From c13bf322cd56835d125f9ff005ecf2b9976cd9ad Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 22 Apr 2020 09:48:35 -0400 Subject: [PATCH 001/909] http: auditing Path() calls for safety with Pathless CONNECT (#10851) This should result in all Path() calls not altered in #10720 being safe for path-less CONNECT. The major change for this PR is that requests without a path will not be considered gRPC requests. They're still currently rejected at the HCM, but when they are allowed through they will simply not be gRPC rather than causing crashes. Risk Level: medium (L7 code refactor) Testing: new unit tests Docs Changes: n/a Release Notes: n/a Part of #1630 #1451 Signed-off-by: Alyssa Wilk --- source/common/grpc/BUILD | 1 + source/common/grpc/common.cc | 10 +++- source/common/grpc/common.h | 10 +++- source/common/http/async_client_impl.cc | 2 +- source/common/http/conn_manager_impl.cc | 7 +-- source/common/http/conn_manager_impl.h | 2 +- source/common/http/conn_manager_utility.cc | 4 +- source/common/http/conn_manager_utility.h | 3 +- source/common/http/path_utility.cc | 2 + source/common/http/path_utility.h | 2 + source/common/router/config_impl.cc | 34 ++++++------- source/common/router/router.cc | 20 +++++--- source/common/tracing/http_tracer_impl.cc | 5 +- source/common/upstream/health_checker_impl.cc | 4 +- .../http/aws_lambda/aws_lambda_filter.cc | 7 ++- .../grpc_http1_bridge/http1_bridge_filter.cc | 2 +- .../http/grpc_http1_reverse_bridge/filter.cc | 2 +- .../json_transcoder_filter.cc | 4 +- .../http/grpc_stats/grpc_stats_filter.cc | 4 +- .../filters/http/grpc_web/grpc_web_filter.cc | 3 ++ .../tracers/xray/xray_tracer_impl.cc | 4 +- source/server/http/admin_filter.cc | 3 +- test/common/grpc/common_test.cc | 21 +++++--- test/common/http/conn_manager_utility_test.cc | 10 ++++ test/common/http/header_utility_test.cc | 6 +++ test/common/router/config_impl_test.cc | 9 ++++ test/common/tracing/http_tracer_impl_test.cc | 39 ++++++++++++++- .../http/grpc_web/grpc_web_filter_test.cc | 50 +++++++++++-------- 28 files changed, 195 insertions(+), 75 deletions(-) diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 29f31e66d444..e49c1c1db016 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -94,6 +94,7 @@ envoy_cc_library( "//source/common/common:macros", "//source/common/common:utility_lib", "//source/common/grpc:status_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", "//source/common/http:utility_lib", diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 2019f54ac874..3b74a5223b96 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -14,6 +14,7 @@ #include "common/common/fmt.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" @@ -37,7 +38,14 @@ bool Common::hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers) .getStringView()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); } -bool Common::isGrpcResponseHeader(const Http::ResponseHeaderMap& headers, bool end_stream) { +bool Common::isGrpcRequestHeaders(const Http::RequestHeaderMap& headers) { + if (!headers.Path()) { + return false; + } + return hasGrpcContentType(headers); +} + +bool Common::isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) { if (end_stream) { // Trailers-only response, only grpc-status is required. return headers.GrpcStatus() != nullptr; diff --git a/source/common/grpc/common.h b/source/common/grpc/common.h index b450e7817e54..cd94fe450568 100644 --- a/source/common/grpc/common.h +++ b/source/common/grpc/common.h @@ -36,12 +36,20 @@ class Common { */ static bool hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers); + /** + * @param headers the headers to parse. + * @return bool indicating whether the header is a gRPC request header. + * Currently headers are considered gRPC request headers if they have the gRPC + * content type, and have a path header. + */ + static bool isGrpcRequestHeaders(const Http::RequestHeaderMap& headers); + /** * @param headers the headers to parse. * @param bool indicating whether the header is at end_stream. * @return bool indicating whether the header is a gRPC response header */ - static bool isGrpcResponseHeader(const Http::ResponseHeaderMap& headers, bool end_stream); + static bool isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool end_stream); /** * Returns the GrpcStatus code from a given set of trailers, if present. diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index cc5659da7885..b2e40ab397f6 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -133,7 +133,7 @@ void AsyncStreamImpl::sendHeaders(RequestHeaderMap& headers, bool end_stream) { is_head_request_ = true; } - is_grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); headers.setReferenceEnvoyInternalRequest(Headers::get().EnvoyInternalRequestValues.True); if (send_xff_) { Utility::appendXff(headers, *parent_.config_.local_info_.address()); diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index d6e4a55eced7..4501cbe7fd41 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -671,7 +671,7 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { } else { stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); sendLocalReply(request_headers_ != nullptr && - Grpc::Common::hasGrpcContentType(*request_headers_), + Grpc::Common::isGrpcRequestHeaders(*request_headers_), Http::Code::RequestTimeout, "stream timeout", nullptr, state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); } @@ -679,7 +679,8 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { connection_manager_.stats_.named_.downstream_rq_timeout_.inc(); - sendLocalReply(request_headers_ != nullptr && Grpc::Common::hasGrpcContentType(*request_headers_), + sendLocalReply(request_headers_ != nullptr && + Grpc::Common::isGrpcRequestHeaders(*request_headers_), Http::Code::RequestTimeout, "request timeout", nullptr, state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); } @@ -799,7 +800,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // overload it is more important to avoid unnecessary allocation than to create the filters. state_.created_filter_chain_ = true; connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), + sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*request_headers_), Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 10bcc7522bd6..1e3705b57634 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -313,7 +313,7 @@ class ConnectionManagerImpl : Logger::Loggable, // so that we can issue gRPC local responses to gRPC requests. Filter's decodeHeaders() // called here may change the content type, so we must check it before the call. FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) { - is_grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + is_grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); FilterHeadersStatus status = handle_->decodeHeaders(headers, end_stream); if (end_stream) { handle_->decodeComplete(); diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index b4a97bfa8b05..417195e1adcb 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -404,7 +404,9 @@ void ConnectionManagerUtility::mutateResponseHeaders( bool ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_headers, const ConnectionManagerConfig& config) { - ASSERT(request_headers.Path()); + if (!request_headers.Path()) { + return true; // It's as valid as it is going to get. + } bool is_valid_path = true; if (config.shouldNormalizePath()) { is_valid_path = PathUtil::canonicalPath(request_headers); diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 20381116162f..6bac45578eb9 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -63,9 +63,10 @@ class ConnectionManagerUtility { const RequestIDExtensionSharedPtr& rid_extension, const std::string& via); - // Sanitize the path in the header map if forced by config. + // Sanitize the path in the header map if the path exists and it is forced by config. // Side affect: the string view of Path header is invalidated. // Return false if error happens during the sanitization. + // Returns true if there is no path. static bool maybeNormalizePath(RequestHeaderMap& request_headers, const ConnectionManagerConfig& config); diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index cb930929e4a0..a9a905d44340 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -29,6 +29,7 @@ absl::optional canonicalizePath(absl::string_view original_path) { /* static */ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { + ASSERT(headers.Path()); const auto original_path = headers.Path()->value().getStringView(); // canonicalPath is supposed to apply on path component in URL instead of :path header const auto query_pos = original_path.find('?'); @@ -54,6 +55,7 @@ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { } void PathUtil::mergeSlashes(RequestHeaderMap& headers) { + ASSERT(headers.Path()); const auto original_path = headers.Path()->value().getStringView(); // Only operate on path component in URL. const absl::string_view::size_type query_start = original_path.find('?'); diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h index 8df1581bad6f..62be43e2e03f 100644 --- a/source/common/http/path_utility.h +++ b/source/common/http/path_utility.h @@ -14,8 +14,10 @@ class PathUtil { public: // Returns if the normalization succeeds. // If it is successful, the path header in header path will be updated with the normalized path. + // Requires the Path header be present. static bool canonicalPath(RequestHeaderMap& headers); // Merges two or more adjacent slashes in path part of URI into one. + // Requires the Path header be present. static void mergeSlashes(RequestHeaderMap& headers); // Removes the query and/or fragment string (if present) from the input path. // For example, this function returns "/data" for the input path "/data#fragment?param=value". diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 801cc0e00ad4..0287a5b6c974 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -59,6 +59,10 @@ convertInternalRedirectAction(const envoy::config::route::v3::RouteAction& route const std::string DEPRECATED_ROUTER_NAME = "envoy.router"; +const absl::string_view getPath(const Http::RequestHeaderMap& headers) { + return headers.Path() ? headers.Path()->value().getStringView() : ""; +} + } // namespace std::string SslRedirector::newPath(const Http::RequestHeaderMap& headers) const { @@ -430,12 +434,6 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, uint64_t random_value) const { bool matches = true; - // TODO(mattklein123): Currently all match types require a path header. When we support CONNECT - // we will need to figure out how to safely relax this. - if (headers.Path() == nullptr) { - return false; - } - matches &= evaluateRuntimeMatch(random_value); if (!matches) { // No need to waste further cycles calculating a route match. @@ -443,13 +441,12 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, } if (match_grpc_) { - matches &= Grpc::Common::hasGrpcContentType(headers); + matches &= Grpc::Common::isGrpcRequestHeaders(headers); } matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { - Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + Http::Utility::QueryParams query_parameters = Http::Utility::parseQueryString(getPath(headers)); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } @@ -540,7 +537,7 @@ void RouteEntryImplBase::finalizePathHeader(Http::RequestHeaderMap& headers, return; } - std::string path(headers.Path()->value().getStringView()); + std::string path(getPath(headers)); if (insert_envoy_original_path) { headers.setEnvoyOriginalPath(path); } @@ -633,8 +630,7 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c if (!path_redirect_.empty()) { final_path = path_redirect_.c_str(); } else { - ASSERT(headers.Path()); - final_path = headers.Path()->value().getStringView(); + final_path = getPath(headers); if (strip_query_) { size_t path_end = final_path.find("?"); if (path_end != absl::string_view::npos) { @@ -852,7 +848,7 @@ RouteConstSharedPtr PrefixRouteEntryImpl::matches(const Http::RequestHeaderMap& const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(headers.Path()->value().getStringView())) { + path_matcher_->match(getPath(headers))) { return clusterEntry(headers, random_value); } return nullptr; @@ -874,7 +870,7 @@ RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::RequestHeaderMap& he const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(headers.Path()->value().getStringView())) { + path_matcher_->match(getPath(headers))) { return clusterEntry(headers, random_value); } @@ -902,8 +898,7 @@ RegexRouteEntryImpl::RegexRouteEntryImpl( void RegexRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, bool insert_envoy_original_path) const { - const absl::string_view path = - Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. ASSERT(regex_->match(path)); @@ -914,8 +909,7 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { - const absl::string_view path = - Http::PathUtil::removeQueryAndFragment(headers.Path()->value().getStringView()); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); if (regex_->match(path)) { return clusterEntry(headers, random_value); } @@ -1124,6 +1118,10 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHead // Check for a route that matches the request. for (const RouteEntryImplBaseConstSharedPtr& route : routes_) { + if (!headers.Path()) { + // TODO(alyssawilk) allow specifically for kConnectMatcher routes. + return nullptr; + } RouteConstSharedPtr route_entry = route->matches(headers, stream_info, random_value); if (nullptr != route_entry) { return route_entry; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index de4278259e65..b788a691474d 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -68,6 +68,9 @@ bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream if (internal_redirect.value().getStringView().length() == 0) { return false; } + if (!downstream_headers.Path()) { + return false; + } Http::Utility::Url absolute_url; if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) { @@ -112,6 +115,10 @@ bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream constexpr uint64_t TimeoutPrecisionFactor = 100; +const absl::string_view getPath(const Http::RequestHeaderMap& headers) { + return headers.Path() ? headers.Path()->value().getStringView() : ""; +} + } // namespace // Express percentage as [0, TimeoutPrecisionFactor] because stats do not accept floating point @@ -379,7 +386,6 @@ void Filter::chargeUpstreamCode(Http::Code code, Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { // Do a common header check. We make sure that all outgoing requests have all HTTP/2 headers. // These get stripped by HTTP/1 codec where applicable. - ASSERT(headers.Path()); ASSERT(headers.Method()); ASSERT(headers.Host()); @@ -395,7 +401,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, : nullptr; // TODO: Maybe add a filter API for this. - grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); // Only increment rq total stat if we actually decode headers here. This does not count requests // that get handled by earlier filters. @@ -410,8 +416,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, route_ = callbacks_->route(); if (!route_) { config_.stats_.no_route_.inc(); - ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, - headers.Path()->value().getStringView()); + ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, getPath(headers)); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); callbacks_->sendLocalReply(Http::Code::NotFound, "", modify_headers, absl::nullopt, @@ -428,7 +433,10 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, direct_response->responseCode(), direct_response->responseBody(), [this, direct_response, &request_headers = headers](Http::ResponseHeaderMap& response_headers) -> void { - const auto new_path = direct_response->newPath(request_headers); + std::string new_path; + if (request_headers.Path()) { + new_path = direct_response->newPath(request_headers); + } // See https://tools.ietf.org/html/rfc7231#section-7.1.2. const auto add_location = direct_response->responseCode() == Http::Code::Created || @@ -473,7 +481,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); ENVOY_STREAM_LOG(debug, "cluster '{}' match for URL '{}'", *callbacks_, - route_entry_->clusterName(), headers.Path()->value().getStringView()); + route_entry_->clusterName(), getPath(headers)); if (config_.strict_check_headers_ != nullptr) { for (const auto& header : *config_.strict_check_headers_) { diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index f3f568666f60..3933b100ad23 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -36,6 +36,9 @@ static std::string valueOrDefault(const Http::HeaderEntry* header, const char* d static std::string buildUrl(const Http::RequestHeaderMap& request_headers, const uint32_t max_path_length) { + if (!request_headers.Path()) { + return ""; + } std::string path(request_headers.EnvoyOriginalPath() ? request_headers.EnvoyOriginalPath()->value().getStringView() : request_headers.Path()->value().getStringView()); @@ -184,7 +187,7 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, std::string(request_headers->ClientTraceId()->value().getStringView())); } - if (Grpc::Common::hasGrpcContentType(*request_headers)) { + if (Grpc::Common::isGrpcRequestHeaders(*request_headers)) { addGrpcRequestTags(span, *request_headers); } } diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 5ca42738d464..16e78394248a 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -583,8 +583,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::decodeHeaders( end_stream); return; } - if (!Grpc::Common::hasGrpcContentType(*headers)) { - onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "invalid gRPC content-type", false); + if (!Grpc::Common::isGrpcResponseHeaders(*headers, end_stream)) { + onRpcComplete(Grpc::Status::WellKnownGrpcStatus::Internal, "not a gRPC request", false); return; } if (end_stream) { diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index f10ca5c0d0b0..89e39663e3f0 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -298,8 +298,11 @@ void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer: &json_req); // Wrap the Query String - for (auto&& kv_pair : Http::Utility::parseQueryString(headers.Path()->value().getStringView())) { - json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); + if (headers.Path()) { + for (auto&& kv_pair : + Http::Utility::parseQueryString(headers.Path()->value().getStringView())) { + json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); + } } // Wrap the body diff --git a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc index 4dc8b216ffe9..ae5332d3504a 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc +++ b/source/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter.cc @@ -24,7 +24,7 @@ void Http1BridgeFilter::chargeStat(const Http::ResponseHeaderOrTrailerMap& heade } Http::FilterHeadersStatus Http1BridgeFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { - const bool grpc_request = Grpc::Common::hasGrpcContentType(headers); + const bool grpc_request = Grpc::Common::isGrpcRequestHeaders(headers); if (grpc_request) { setupStatTracking(headers); } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index d5191716ce59..d585f84ea0c2 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -84,7 +84,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // If this is a gRPC request we: // - mark this request as being gRPC // - change the content-type to application/x-protobuf - if (Envoy::Grpc::Common::hasGrpcContentType(headers)) { + if (Envoy::Grpc::Common::isGrpcRequestHeaders(headers)) { enabled_ = true; // We keep track of the original content-type to ensure that we handle diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 1e0b67aa654f..1434a3a97f1e 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -259,7 +259,7 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const Http::RequestHeaderMap& headers, ZeroCopyInputStream& request_input, google::grpc::transcoding::TranscoderInputStream& response_input, std::unique_ptr& transcoder, MethodInfoSharedPtr& method_info) { - if (Grpc::Common::hasGrpcContentType(headers)) { + if (Grpc::Common::isGrpcRequestHeaders(headers)) { return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); } @@ -476,7 +476,7 @@ void JsonTranscoderFilter::setDecoderFilterCallbacks( Http::FilterHeadersStatus JsonTranscoderFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (!Grpc::Common::isGrpcResponseHeader(headers, end_stream)) { + if (!Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) { error_ = true; } diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc index 7c91093d4712..8dfbc0dc0dc4 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc @@ -147,7 +147,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { GrpcStatsFilter(ConfigConstSharedPtr config) : config_(config) {} Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { - grpc_request_ = Grpc::Common::hasGrpcContentType(headers); + grpc_request_ = Grpc::Common::isGrpcRequestHeaders(headers); if (grpc_request_) { cluster_ = decoder_callbacks_->clusterInfo(); if (cluster_) { @@ -203,7 +203,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { - grpc_response_ = Grpc::Common::isGrpcResponseHeader(headers, end_stream); + grpc_response_ = Grpc::Common::isGrpcResponseHeaders(headers, end_stream); if (doStatTracking()) { config_->context_.chargeStat(*cluster_, Grpc::Context::Protocol::Grpc, request_names_, headers.GrpcStatus()); diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index 7ead4cd30d55..b85e05c32962 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -39,6 +39,9 @@ const absl::flat_hash_set& GrpcWebFilter::gRpcWebContentTypes() con } bool GrpcWebFilter::isGrpcWebRequest(const Http::RequestHeaderMap& headers) { + if (!headers.Path()) { + return false; + } const Http::HeaderEntry* content_type = headers.ContentType(); if (content_type != nullptr) { return gRpcWebContentTypes().count(content_type->value().getStringView()) > 0; diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index d0ecb0684e25..50637152ead9 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -95,9 +95,11 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, } if (!should_trace.has_value()) { + const absl::string_view path = + request_headers.Path() ? request_headers.Path()->value().getStringView() : ""; const SamplingRequest request{std::string{request_headers.Host()->value().getStringView()}, std::string{request_headers.Method()->value().getStringView()}, - std::string{request_headers.Path()->value().getStringView()}}; + std::string{path}}; should_trace = sampling_strategy_->shouldTrace(request); } diff --git a/source/server/http/admin_filter.cc b/source/server/http/admin_filter.cc index 7f9cf3930974..b0565f673c5e 100644 --- a/source/server/http/admin_filter.cc +++ b/source/server/http/admin_filter.cc @@ -62,7 +62,8 @@ const Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const { } void AdminFilter::onComplete() { - absl::string_view path = request_headers_->Path()->value().getStringView(); + const absl::string_view path = + request_headers_->Path() ? request_headers_->Path()->value().getStringView() : ""; ENVOY_STREAM_LOG(debug, "request complete: path: {}", *decoder_callbacks_, path); Buffer::OwnedImpl response; diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 02f948b9c601..d96d07b8eacc 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -285,20 +285,29 @@ TEST(GrpcContextTest, HasGrpcContentType) { EXPECT_FALSE(isGrpcContentType("application/grpc-web+foo")); } +TEST(GrpcContextTest, IsGrpcRequestHeader) { + Http::TestRequestHeaderMapImpl is{ + {":method", "GET"}, {":path", "/"}, {"content-type", "application/grpc"}}; + EXPECT_TRUE(Common::isGrpcRequestHeaders(is)); + Http::TestRequestHeaderMapImpl is_not{{":method", "CONNECT"}, + {"content-type", "application/grpc"}}; + EXPECT_FALSE(Common::isGrpcRequestHeaders(is_not)); +} + TEST(GrpcContextTest, IsGrpcResponseHeader) { Http::TestResponseHeaderMapImpl grpc_status_only{{":status", "500"}, {"grpc-status", "14"}}; - EXPECT_TRUE(Common::isGrpcResponseHeader(grpc_status_only, true)); - EXPECT_FALSE(Common::isGrpcResponseHeader(grpc_status_only, false)); + EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_status_only, true)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_status_only, false)); Http::TestResponseHeaderMapImpl grpc_response_header{{":status", "200"}, {"content-type", "application/grpc"}}; - EXPECT_FALSE(Common::isGrpcResponseHeader(grpc_response_header, true)); - EXPECT_TRUE(Common::isGrpcResponseHeader(grpc_response_header, false)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(grpc_response_header, true)); + EXPECT_TRUE(Common::isGrpcResponseHeaders(grpc_response_header, false)); Http::TestResponseHeaderMapImpl json_response_header{{":status", "200"}, {"content-type", "application/json"}}; - EXPECT_FALSE(Common::isGrpcResponseHeader(json_response_header, true)); - EXPECT_FALSE(Common::isGrpcResponseHeader(json_response_header, false)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, true)); + EXPECT_FALSE(Common::isGrpcResponseHeaders(json_response_header, false)); } TEST(GrpcContextTest, ValidateResponse) { diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 71e7b5a40933..9d730478ec9f 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -1409,6 +1409,16 @@ TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { EXPECT_FALSE(response_headers.has("proxy-connection")); } +// maybeNormalizePath() returns true with an empty path. +TEST_F(ConnectionManagerUtilityTest, SanitizeEmptyPath) { + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false)); + TestRequestHeaderMapImpl original_headers; + + TestRequestHeaderMapImpl header_map(original_headers); + EXPECT_TRUE(ConnectionManagerUtility::maybeNormalizePath(header_map, config_)); + EXPECT_EQ(original_headers, header_map); +} + // maybeNormalizePath() does nothing by default. TEST_F(ConnectionManagerUtilityTest, SanitizePathDefaultOff) { ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(false)); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 55d2056aab78..e8383aa08be1 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -469,6 +469,12 @@ TEST(HeaderIsValidTest, AuthorityIsValid) { EXPECT_FALSE(HeaderUtility::authorityIsValid("illegal{}")); } +TEST(HeaderIsValidTest, IsConnect) { + EXPECT_TRUE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}})); + EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{{":method", "GET"}})); + EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{})); +} + TEST(HeaderAddTest, HeaderAdd) { TestHeaderMapImpl headers{{"myheader1", "123value"}}; TestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index fd61d51ff961..e282f6b9ae8c 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -87,6 +87,14 @@ class TestConfigImpl : public ConfigImpl { const envoy::config::route::v3::RouteConfiguration config_; }; +Http::TestRequestHeaderMapImpl genPathlessHeaders(const std::string& host, + const std::string& method) { + return Http::TestRequestHeaderMapImpl{{":authority", host}, {":method", method}, + {"x-safe", "safe"}, {"x-global-nope", "global"}, + {"x-vhost-nope", "vhost"}, {"x-route-nope", "route"}, + {"x-forwarded-proto", "http"}}; +} + Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, const std::string& method) { return Http::TestRequestHeaderMapImpl{{":authority", host}, {":path", path}, @@ -368,6 +376,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { config.route(genHeaders("bat2.com", "/foo", "GET"), 0)->routeEntry()->clusterName()); EXPECT_EQ("regex_default", config.route(genHeaders("bat2.com", " ", "GET"), 0)->routeEntry()->clusterName()); + EXPECT_TRUE(config.route(genPathlessHeaders("bat2.com", "GET"), 0) == nullptr); // Regular Expression matching with query string params EXPECT_EQ( diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 3d05232cb56f..eaf7437d0bc4 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -158,6 +158,7 @@ TEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) { Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, {"x-envoy-original-path", path}, {":method", "GET"}, + {":path", ""}, {"x-forwarded-proto", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -189,8 +190,10 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { const auto remote_address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance(expected_ip, 0)}; - Http::TestRequestHeaderMapImpl request_headers{ - {"x-envoy-original-path", path}, {":method", "GET"}, {"x-forwarded-proto", "http"}}; + Http::TestRequestHeaderMapImpl request_headers{{":path", ""}, + {"x-envoy-original-path", path}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -213,6 +216,38 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { &response_trailers, stream_info, config); } +TEST_F(HttpConnManFinalizerImplTest, Connect) { + const std::string path(300, 'a'); + const std::string path_prefix = "http://"; + const std::string expected_path(256, 'a'); + const std::string expected_ip = "10.0.0.100"; + const auto remote_address = + Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance(expected_ip, 0)}; + + Http::TestRequestHeaderMapImpl request_headers{{":method", "CONNECT"}, + {"x-forwarded-proto", "http"}}; + Http::TestResponseHeaderMapImpl response_headers; + Http::TestResponseTrailerMapImpl response_trailers; + + absl::optional protocol = Http::Protocol::Http2; + EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); + EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); + EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); + absl::optional response_code; + EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); + EXPECT_CALL(stream_info, downstreamDirectRemoteAddress()) + .WillRepeatedly(ReturnPointee(&remote_address)); + + EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(""))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpMethod), Eq("CONNECT"))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/2"))); + EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().PeerAddress), Eq(expected_ip))); + + HttpTracerUtility::finalizeDownstreamSpan(span, &request_headers, &response_headers, + &response_trailers, stream_info, config); +} + TEST_F(HttpConnManFinalizerImplTest, NullRequestHeadersAndNullRouteEntry) { EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 8828f47c62c1..817d84811061 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -114,6 +114,7 @@ class GrpcWebFilterTest : public testing::TestWithParam Date: Wed, 22 Apr 2020 11:28:58 -0400 Subject: [PATCH 002/909] router: fixing a watermark bug for streaming retries (#10866) Fixes an issue where, if a retry was attempted when the upstream connection was watermark-overrun, data might spool upstream but reading from downstream would not resume. This is a preexisting design flaw which manifests now that we have streaming retries (causing them to time out rather than succeed, if the upstream buffer limit is smaller than the downstream buffer limit, and it backs up due to upstream slowness) because if the whole request is read, the loop of unwinding pause between requests takes care of it. Risk Level: Medium (watermarks) Testing: new unit test Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/router/upstream_request.cc | 12 ++++++++- source/common/router/upstream_request.h | 2 ++ test/common/router/router_test.cc | 32 +++++++++++++++++++++--- 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 818214ef8648..085e6c4a7142 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -98,6 +98,12 @@ UpstreamRequest::~UpstreamRequest() { upstream_log->log(parent_.downstreamHeaders(), upstream_headers_.get(), upstream_trailers_.get(), stream_info_); } + + while (downstream_data_disabled_ != 0) { + parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark(); + parent_.cluster()->stats().upstream_flow_control_drained_total_.inc(); + --downstream_data_disabled_; + } } void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) { @@ -421,7 +427,6 @@ void UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermar // can disable reads from upstream. ASSERT(!parent_.parent_.finalUpstreamRequest() || &parent_ == parent_.parent_.finalUpstreamRequest()); - // The downstream connection is overrun. Pause reads from upstream. // If there are multiple calls to readDisable either the codec (H2) or the underlying // Network::Connection (H1) will handle reference counting. @@ -451,6 +456,7 @@ void UpstreamRequest::disableDataFromDownstreamForFlowControl() { ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); parent_.cluster()->stats().upstream_flow_control_backed_up_total_.inc(); parent_.callbacks()->onDecoderFilterAboveWriteBufferHighWatermark(); + ++downstream_data_disabled_; } void UpstreamRequest::enableDataFromDownstreamForFlowControl() { @@ -466,6 +472,10 @@ void UpstreamRequest::enableDataFromDownstreamForFlowControl() { ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream()); parent_.cluster()->stats().upstream_flow_control_drained_total_.inc(); parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark(); + ASSERT(downstream_data_disabled_ != 0); + if (downstream_data_disabled_ > 0) { + --downstream_data_disabled_; + } } void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 0e85a21ddae1..a10f42163abf 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -159,6 +159,8 @@ class UpstreamRequest : public Logger::Loggable, Http::ResponseTrailerMapPtr upstream_trailers_; Http::MetadataMapVector downstream_metadata_map_vector_; + // Tracks the number of times the flow of data from downstream has been disabled. + uint32_t downstream_data_disabled_{}; bool calling_encode_headers_ : 1; bool upstream_canary_ : 1; bool decode_complete_ : 1; diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index d5395ebd3d25..1a0b47b730c5 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5674,9 +5674,10 @@ class WatermarkTest : public RouterTest { .WillOnce(Return(std::chrono::milliseconds(0))); EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0); - EXPECT_CALL(stream_, addCallbacks(_)).WillOnce(Invoke([&](Http::StreamCallbacks& callbacks) { - stream_callbacks_ = &callbacks; - })); + EXPECT_CALL(stream_, addCallbacks(_)) + .Times(num_add_callbacks_) + .WillOnce( + Invoke([&](Http::StreamCallbacks& callbacks) { stream_callbacks_ = &callbacks; })); EXPECT_CALL(encoder_, getStream()).WillRepeatedly(ReturnRef(stream_)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke( @@ -5707,6 +5708,7 @@ class WatermarkTest : public RouterTest { Http::ResponseDecoder* response_decoder_ = nullptr; Http::TestRequestHeaderMapImpl headers_; Http::ConnectionPool::Callbacks* pool_callbacks_{nullptr}; + int num_add_callbacks_{1}; }; TEST_F(WatermarkTest, DownstreamWatermarks) { @@ -5786,7 +5788,29 @@ TEST_F(WatermarkTest, FilterWatermarks) { .value()); sendResponse(); -} // namespace Router +} + +TEST_F(WatermarkTest, FilterWatermarksUnwound) { + num_add_callbacks_ = 0; + EXPECT_CALL(callbacks_, decoderBufferLimit()).Times(3).WillRepeatedly(Return(10)); + router_.setDecoderFilterCallbacks(callbacks_); + // Send the headers sans-fin, and don't flag the pool as ready. + sendRequest(false, false); + + // Send 11 bytes of body to fill the 10 byte buffer. + Buffer::OwnedImpl data("1234567890!"); + router_.decodeData(data, false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_flow_control_backed_up_total") + .value()); + + // Set up a pool failure, and make sure the flow control blockage is undone. + pool_callbacks_->onPoolFailure(Http::ConnectionPool::PoolFailureReason::RemoteConnectionFailure, + absl::string_view(), nullptr); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_flow_control_drained_total") + .value()); +} // Same as RetryRequestNotComplete but with decodeData larger than the buffer // limit, no retry will occur. From ad31f1c841b20dcbf97caf1a701f91eb05f05604 Mon Sep 17 00:00:00 2001 From: David Bell Date: Wed, 22 Apr 2020 09:01:02 -0700 Subject: [PATCH 003/909] xray: Use correct types for segment document output (#10834) Risk Level: Low Testing: unit tests, manual Docs Changes: N/A Release Notes: N/A Fixes: #10814 --- source/extensions/tracers/xray/daemon.proto | 9 ++-- source/extensions/tracers/xray/tracer.cc | 47 +++++++++++++++------ source/extensions/tracers/xray/tracer.h | 7 +-- test/extensions/tracers/xray/tracer_test.cc | 31 ++++++++------ 4 files changed, 61 insertions(+), 33 deletions(-) diff --git a/source/extensions/tracers/xray/daemon.proto b/source/extensions/tracers/xray/daemon.proto index 78594a0b5985..d19563a5ddf5 100644 --- a/source/extensions/tracers/xray/daemon.proto +++ b/source/extensions/tracers/xray/daemon.proto @@ -5,6 +5,7 @@ syntax = "proto3"; package source.extensions.tracers.xray.daemon; import "validate/validate.proto"; +import "google/protobuf/struct.proto"; // see https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html message Segment { @@ -14,12 +15,12 @@ message Segment { double start_time = 4 [(validate.rules).double = {gt: 0}]; double end_time = 5 [(validate.rules).double = {gt: 0}]; string parent_id = 6; - map annotations = 7; - http_annotations http = 8; + http_annotations http = 7; message http_annotations { - map request = 1; - map response = 2; + google.protobuf.Struct request = 1; + google.protobuf.Struct response = 2; } + map annotations = 8; } message Header { diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 4fdfb26fdfee..1d4768fcc025 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -11,6 +11,7 @@ #include "common/common/fmt.h" #include "common/common/hex.h" #include "common/protobuf/message_validator_impl.h" +#include "common/protobuf/utility.h" #include "common/runtime/runtime_impl.h" #include "source/extensions/tracers/xray/daemon.pb.validate.h" @@ -76,17 +77,25 @@ void Span::finishSpan() { s.set_end_time( time_point_cast(time_source_.systemTime()).time_since_epoch().count()); s.set_parent_id(parentId()); - using KeyValue = Protobuf::Map::value_type; - for (const auto& item : custom_annotations_) { - s.mutable_annotations()->insert(KeyValue{item.first, item.second}); + + // HTTP annotations + using StructField = Protobuf::MapPair; + + ProtobufWkt::Struct* request = s.mutable_http()->mutable_request(); + auto* request_fields = request->mutable_fields(); + for (const auto& field : http_request_annotations_) { + request_fields->insert(StructField{field.first, field.second}); } - for (const auto& item : http_request_annotations_) { - s.mutable_http()->mutable_request()->insert(KeyValue{item.first, item.second}); + ProtobufWkt::Struct* response = s.mutable_http()->mutable_response(); + auto* response_fields = response->mutable_fields(); + for (const auto& field : http_response_annotations_) { + response_fields->insert(StructField{field.first, field.second}); } - for (const auto& item : http_response_annotations_) { - s.mutable_http()->mutable_response()->insert(KeyValue{item.first, item.second}); + using KeyValue = Protobuf::Map::value_type; + for (const auto& item : custom_annotations_) { + s.mutable_annotations()->insert(KeyValue{item.first, item.second}); } const std::string json = MessageUtil::getJsonStringFromMessage( @@ -179,20 +188,30 @@ void Span::setTag(absl::string_view name, absl::string_view value) { } if (name == HttpUrl) { - http_request_annotations_.emplace(SpanUrl, value); + http_request_annotations_.emplace(SpanUrl, ValueUtil::stringValue(std::string(value))); } else if (name == HttpMethod) { - http_request_annotations_.emplace(SpanMethod, value); + http_request_annotations_.emplace(SpanMethod, ValueUtil::stringValue(std::string(value))); } else if (name == HttpUserAgent) { - http_request_annotations_.emplace(SpanUserAgent, value); + http_request_annotations_.emplace(SpanUserAgent, ValueUtil::stringValue(std::string(value))); } else if (name == HttpStatusCode) { - http_response_annotations_.emplace(SpanStatus, value); + uint64_t status_code; + if (!absl::SimpleAtoi(value, &status_code)) { + ENVOY_LOG(debug, "{} must be a number, given: {}", HttpStatusCode, value); + return; + } + http_response_annotations_.emplace(SpanStatus, ValueUtil::numberValue(status_code)); } else if (name == HttpResponseSize) { - http_response_annotations_.emplace(SpanContentLength, value); + uint64_t response_size; + if (!absl::SimpleAtoi(value, &response_size)) { + ENVOY_LOG(debug, "{} must be a number, given: {}", HttpResponseSize, value); + return; + } + http_response_annotations_.emplace(SpanContentLength, ValueUtil::numberValue(response_size)); } else if (name == PeerAddress) { - http_request_annotations_.emplace(SpanClientIp, value); + http_request_annotations_.emplace(SpanClientIp, ValueUtil::stringValue(std::string(value))); // In this case, PeerAddress refers to the client's actual IP address, not // the address specified in the the HTTP X-Forwarded-For header. - http_request_annotations_.emplace(SpanXForwardedFor, "false"); + http_request_annotations_.emplace(SpanXForwardedFor, ValueUtil::boolValue(false)); } else { custom_annotations_.emplace(name, value); } diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index f9a3818cbc79..cf7c977d8fbc 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -8,6 +8,7 @@ #include "envoy/tracing/http_tracer.h" #include "common/common/hex.h" +#include "common/protobuf/utility.h" #include "extensions/tracers/xray/daemon_broker.h" #include "extensions/tracers/xray/sampling_strategy.h" @@ -23,7 +24,7 @@ namespace XRay { constexpr auto XRayTraceHeader = "x-amzn-trace-id"; -class Span : public Tracing::Span { +class Span : public Tracing::Span, Logger::Loggable { public: /** * Creates a new Span. @@ -147,8 +148,8 @@ class Span : public Tracing::Span { std::string trace_id_; std::string parent_segment_id_; std::string name_; - absl::flat_hash_map http_request_annotations_; - absl::flat_hash_map http_response_annotations_; + absl::flat_hash_map http_request_annotations_; + absl::flat_hash_map http_response_annotations_; absl::flat_hash_map custom_annotations_; Envoy::TimeSource& time_source_; DaemonBroker& broker_; diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index ef7b721c565c..023dd00a40ec 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -13,6 +13,7 @@ #include "test/mocks/server/mocks.h" #include "test/mocks/tracing/mocks.h" +#include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -46,10 +47,10 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { constexpr auto expected_http_method = "POST"; constexpr auto expected_http_url = "/first/second"; constexpr auto expected_user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X)"; - constexpr auto expected_status_code = "202"; - constexpr auto expected_content_length = "1337"; + constexpr uint32_t expected_status_code = 202; + constexpr uint32_t expected_content_length = 1337; constexpr auto expected_client_ip = "10.0.0.100"; - constexpr auto expected_x_forwarded_for = "false"; + constexpr auto expected_x_forwarded_for = false; constexpr auto expected_upstream_address = "10.0.0.200"; auto on_send = [&](const std::string& json) { @@ -61,13 +62,19 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { ASSERT_EQ(1, s.annotations().size()); ASSERT_TRUE(s.parent_id().empty()); ASSERT_STREQ(expected_span_name, s.name().c_str()); - ASSERT_STREQ(expected_http_method, s.http().request().at("method").c_str()); - ASSERT_STREQ(expected_http_url, s.http().request().at("url").c_str()); - ASSERT_STREQ(expected_user_agent, s.http().request().at("user_agent").c_str()); - ASSERT_STREQ(expected_status_code, s.http().response().at("status").c_str()); - ASSERT_STREQ(expected_content_length, s.http().response().at("content_length").c_str()); - ASSERT_STREQ(expected_client_ip, s.http().request().at("client_ip").c_str()); - ASSERT_STREQ(expected_x_forwarded_for, s.http().request().at("x_forwarded_for").c_str()); + ASSERT_STREQ(expected_http_method, + s.http().request().fields().at("method").string_value().c_str()); + ASSERT_STREQ(expected_http_url, s.http().request().fields().at("url").string_value().c_str()); + ASSERT_STREQ(expected_user_agent, + s.http().request().fields().at("user_agent").string_value().c_str()); + ASSERT_DOUBLE_EQ(expected_status_code, + s.http().response().fields().at("status").number_value()); + ASSERT_DOUBLE_EQ(expected_content_length, + s.http().response().fields().at("content_length").number_value()); + ASSERT_STREQ(expected_client_ip, + s.http().request().fields().at("client_ip").string_value().c_str()); + ASSERT_EQ(expected_x_forwarded_for, + s.http().request().fields().at("x_forwarded_for").bool_value()); ASSERT_STREQ(expected_upstream_address, s.annotations().at("upstream_address").c_str()); }; @@ -78,8 +85,8 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { span->setTag("http.method", expected_http_method); span->setTag("http.url", expected_http_url); span->setTag("user_agent", expected_user_agent); - span->setTag("http.status_code", expected_status_code); - span->setTag("response_size", expected_content_length); + span->setTag("http.status_code", absl::StrFormat("%d", expected_status_code)); + span->setTag("response_size", absl::StrFormat("%d", expected_content_length)); span->setTag("peer.address", expected_client_ip); span->setTag("upstream_address", expected_upstream_address); span->finishSpan(); From 5cd6ef7754b56e3ce06caaa6b76d5a75d4c67696 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Wed, 22 Apr 2020 09:02:50 -0700 Subject: [PATCH 004/909] request_id: Add option to always set request id in response (#10808) Signed-off-by: Ruslan Nigmatullin --- .../v3/http_connection_manager.proto | 7 +- .../v4alpha/http_connection_manager.proto | 7 +- docs/root/version_history/current.rst | 3 + .../v3/http_connection_manager.proto | 13 ++-- .../v4alpha/http_connection_manager.proto | 7 +- source/common/http/conn_manager_config.h | 5 ++ source/common/http/conn_manager_impl.cc | 5 +- source/common/http/conn_manager_utility.cc | 12 ++-- source/common/http/conn_manager_utility.h | 3 +- .../network/http_connection_manager/config.cc | 1 + .../network/http_connection_manager/config.h | 2 + source/server/http/admin.h | 1 + .../http/conn_manager_impl_fuzz_test.cc | 1 + test/common/http/conn_manager_impl_test.cc | 1 + test/common/http/conn_manager_utility_test.cc | 65 +++++++++++-------- .../http_connection_manager/config_test.cc | 31 +++++++++ 16 files changed, 119 insertions(+), 45 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index d802ec4ce774..4dd60a012a80 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 38] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -433,6 +433,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 975b71cc892f..5eaefe16037e 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 38] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -433,6 +433,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7cb8b8f7bdfa..425073346f25 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -17,6 +17,9 @@ Changes Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. * network filters: added a :ref:`postgres proxy filter `. +* request_id: added to :ref:`always_set_request_id_in_response setting ` + to set :ref:`x-request-id ` header in response even if + tracing is not forced. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index b04d0861c953..4be597d448b1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 38] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -417,19 +417,24 @@ message HttpConnectionManager { // Via header value to append to request and response headers. If this is // empty, no via header will be appended. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; + bool always_set_request_id_in_response = 37; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. - SetCurrentClientCertDetails set_current_client_cert_details = 17; + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. + SetCurrentClientCertDetails set_current_client_cert_details = 17; + + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. bool proxy_100_continue = 18; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 975b71cc892f..5eaefe16037e 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 37] +// [#next-free-field: 38] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -433,6 +433,11 @@ message HttpConnectionManager { // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + bool always_set_request_id_in_response = 37; + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 774b5e9f47c5..67c4a74ca63f 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -245,6 +245,11 @@ class ConnectionManagerConfig { */ virtual bool preserveExternalRequestId() const PURE; + /** + * @return whether the x-request-id should always be set in the response. + */ + virtual bool alwaysSetRequestIdInResponse() const PURE; + /** * @return optional idle timeout for incoming connection manager connections. */ diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 4501cbe7fd41..b4dddcb478ff 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1541,8 +1541,7 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), - connection_manager_.config_.requestIDExtension(), - EMPTY_STRING); + connection_manager_.config_, EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. chargeStats(headers); @@ -1625,7 +1624,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa headers.setReferenceServer(connection_manager_.config_.serverName()); } ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), - connection_manager_.config_.requestIDExtension(), + connection_manager_.config_, connection_manager_.config_.via()); // See if we want to drain/close the connection. Send the go away frame prior to encoding the diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 417195e1adcb..88596ed4bcdf 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -363,9 +363,10 @@ void ConnectionManagerUtility::mutateXfccRequestHeader(RequestHeaderMap& request } } -void ConnectionManagerUtility::mutateResponseHeaders( - ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, - const RequestIDExtensionSharedPtr& rid_extension, const std::string& via) { +void ConnectionManagerUtility::mutateResponseHeaders(ResponseHeaderMap& response_headers, + const RequestHeaderMap* request_headers, + ConnectionManagerConfig& config, + const std::string& via) { if (request_headers != nullptr && Utility::isUpgrade(*request_headers) && Utility::isUpgrade(response_headers)) { // As in mutateRequestHeaders, Upgrade responses have special handling. @@ -391,8 +392,9 @@ void ConnectionManagerUtility::mutateResponseHeaders( response_headers.removeTransferEncoding(); - if (request_headers != nullptr && request_headers->EnvoyForceTrace()) { - rid_extension->setInResponse(response_headers, *request_headers); + if (request_headers != nullptr && + (config.alwaysSetRequestIdInResponse() || request_headers->EnvoyForceTrace())) { + config.requestIDExtension()->setInResponse(response_headers, *request_headers); } response_headers.removeKeepAlive(); response_headers.removeProxyConnection(); diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 6bac45578eb9..9443b55276c3 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -60,8 +60,7 @@ class ConnectionManagerUtility { static void mutateResponseHeaders(ResponseHeaderMap& response_headers, const RequestHeaderMap* request_headers, - const RequestIDExtensionSharedPtr& rid_extension, - const std::string& via); + ConnectionManagerConfig& config, const std::string& via); // Sanitize the path in the header map if the path exists and it is forced by config. // Side affect: the string view of Path header is invalidated. diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 50b4d45f13f5..7a9f7b2efc58 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -196,6 +196,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( drain_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, drain_timeout, 5000)), generate_request_id_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, generate_request_id, true)), preserve_external_request_id_(config.preserve_external_request_id()), + always_set_request_id_in_response_(config.always_set_request_id_in_response()), date_provider_(date_provider), listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_, context_.listenerScope())), diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 59dee762513f..6f3995fc30fd 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -109,6 +109,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, FilterChainFactory& filterFactory() override { return *this; } bool generateRequestId() const override { return generate_request_id_; } bool preserveExternalRequestId() const override { return preserve_external_request_id_; } + bool alwaysSetRequestIdInResponse() const override { return always_set_request_id_in_response_; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } @@ -216,6 +217,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::chrono::milliseconds drain_timeout_; bool generate_request_id_; const bool preserve_external_request_id_; + const bool always_set_request_id_in_response_; Http::DateProvider& date_provider_; Http::ConnectionManagerListenerStats listener_stats_; const bool proxy_100_continue_; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 19dada14018f..464648d1476e 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -115,6 +115,7 @@ class AdminImpl : public Admin, Http::FilterChainFactory& filterFactory() override { return *this; } bool generateRequestId() const override { return false; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } absl::optional idleTimeout() const override { return idle_timeout_; } bool isRoutable() const override { return false; } absl::optional maxConnectionDuration() const override { diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 38d471ded4cc..4d0512ff7dad 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -103,6 +103,7 @@ class FuzzConfig : public ConnectionManagerConfig { FilterChainFactory& filterFactory() override { return filter_factory_; } bool generateRequestId() const override { return true; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 324f2c58259f..38593d73ab3e 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -293,6 +293,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan FilterChainFactory& filterFactory() override { return filter_factory_; } bool generateRequestId() const override { return true; } bool preserveExternalRequestId() const override { return false; } + bool alwaysSetRequestIdInResponse() const override { return false; } uint32_t maxRequestHeadersKb() const override { return max_request_headers_kb_; } uint32_t maxRequestHeadersCount() const override { return max_request_headers_count_; } absl::optional idleTimeout() const override { return idle_timeout_; } diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 9d730478ec9f..2b7d445e8bb7 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -81,6 +81,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { ON_CALL(*this, generateRequestId()).WillByDefault(Return(true)); ON_CALL(*this, isRoutable()).WillByDefault(Return(true)); ON_CALL(*this, preserveExternalRequestId()).WillByDefault(Return(false)); + ON_CALL(*this, alwaysSetRequestIdInResponse()).WillByDefault(Return(false)); } // Http::ConnectionManagerConfig @@ -98,6 +99,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(FilterChainFactory&, filterFactory, ()); MOCK_METHOD(bool, generateRequestId, (), (const)); MOCK_METHOD(bool, preserveExternalRequestId, (), (const)); + MOCK_METHOD(bool, alwaysSetRequestIdInResponse, (), (const)); MOCK_METHOD(uint32_t, maxRequestHeadersKb, (), (const)); MOCK_METHOD(uint32_t, maxRequestHeadersCount, (), (const)); MOCK_METHOD(absl::optional, idleTimeout, (), (const)); @@ -384,8 +386,8 @@ TEST_F(ConnectionManagerUtilityTest, ViaEmpty) { EXPECT_FALSE(request_headers.has(Headers::get().Via)); TestResponseHeaderMapImpl response_headers; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + via_); EXPECT_FALSE(response_headers.has(Headers::get().Via)); } @@ -402,11 +404,10 @@ TEST_F(ConnectionManagerUtilityTest, ViaAppend) { TestResponseHeaderMapImpl response_headers; // Pretend we're doing a 100-continue transform here. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); // The actual response header processing. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), via_); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + via_); EXPECT_EQ("foo", response_headers.get_(Headers::get().Via)); } @@ -753,8 +754,7 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeaders) { {"connection", "foo"}, {"transfer-encoding", "foo"}, {"custom_header", "custom_value"}}; TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(1UL, response_headers.size()); EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -771,8 +771,7 @@ TEST_F(ConnectionManagerUtilityTest, DoNotRemoveConnectionUpgradeForWebSocketRes {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(3UL, response_headers.size()) << response_headers; EXPECT_EQ("upgrade", response_headers.get_("connection")); @@ -787,8 +786,7 @@ TEST_F(ConnectionManagerUtilityTest, DoNotAddConnectionLengthForWebSocket101Resp {":status", "101"}, {"connection", "upgrade"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(3UL, response_headers.size()) << response_headers; EXPECT_EQ("upgrade", response_headers.get_("connection")); @@ -804,8 +802,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"connection", "foo"}, {"transfer-encoding", "bar"}, {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -820,8 +818,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -833,8 +831,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequests) { TestResponseHeaderMapImpl response_headers{{"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(0UL, response_headers.size()) << response_headers; } @@ -854,8 +852,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequestsLeg {"custom_header", "custom_value"}}; EXPECT_FALSE(Utility::isUpgrade(request_headers)); EXPECT_TRUE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(2UL, response_headers.size()) << response_headers; EXPECT_EQ("custom_value", response_headers.get_("custom_header")); @@ -868,8 +866,8 @@ TEST_F(ConnectionManagerUtilityTest, ClearUpgradeHeadersForNonUpgradeRequestsLeg TestResponseHeaderMapImpl response_headers{{"transfer-encoding", "foo"}, {"upgrade", "bar"}}; EXPECT_TRUE(Utility::isUpgrade(request_headers)); EXPECT_FALSE(Utility::isUpgrade(response_headers)); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, + ""); EXPECT_EQ(1UL, response_headers.size()) << response_headers; EXPECT_EQ("bar", response_headers.get_("upgrade")); @@ -885,8 +883,7 @@ TEST_F(ConnectionManagerUtilityTest, MutateResponseHeadersReturnXRequestId) { EXPECT_CALL(*request_id_extension_, setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) .Times(1); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ("request-id", response_headers.get_("x-request-id")); } @@ -898,11 +895,24 @@ TEST_F(ConnectionManagerUtilityTest, SkipMutateResponseHeadersReturnXRequestId) EXPECT_CALL(*request_id_extension_, setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) .Times(0); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ("", response_headers.get_("x-request-id")); } +// Test that we do return x-request-id if we were asked to always return it even if trace is not +// forced. +TEST_F(ConnectionManagerUtilityTest, AlwaysMutateResponseHeadersReturnXRequestId) { + TestResponseHeaderMapImpl response_headers; + TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}}; + + EXPECT_CALL(*request_id_extension_, + setInResponse(testing::Ref(response_headers), testing::Ref(request_headers))) + .Times(1); + ON_CALL(config_, alwaysSetRequestIdInResponse()).WillByDefault(Return(true)); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); + EXPECT_EQ("request-id", response_headers.get_("x-request-id")); +} + // Test full sanitization of x-forwarded-client-cert. TEST_F(ConnectionManagerUtilityTest, MtlsSanitizeClientCert) { auto ssl = std::make_shared>(); @@ -1400,8 +1410,7 @@ TEST_F(ConnectionManagerUtilityTest, RemovesProxyResponseHeaders) { Http::TestResponseHeaderMapImpl response_headers{{"keep-alive", "timeout=60"}, {"proxy-connection", "proxy-header"}}; EXPECT_CALL(*request_id_extension_, setTraceStatus(_, _)).Times(0); - ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, - config_.requestIDExtension(), ""); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, ""); EXPECT_EQ(TraceStatus::NoTrace, request_id_extension_->getTraceStatus(request_headers)); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 014459f6d73e..e6fa79671b60 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1458,6 +1458,37 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtens deprecated_name)); } +TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_FALSE(config.alwaysSetRequestIdInResponse()); +} + +TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + always_set_request_id_in_response: true + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_TRUE(config.alwaysSetRequestIdInResponse()); +} + namespace { class TestRequestIDExtension : public Http::RequestIDExtension { From 25ccc5f2442300bc31a4975ddff3d572fbc37519 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Wed, 22 Apr 2020 12:23:33 -0400 Subject: [PATCH 005/909] Remove inclusion of pthread.h, not needed for linux compilation (#10895) Signed-off-by: Sunjay Bhatia --- .../filters/network/postgres_proxy/postgres_integration_test.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc index 02229fc1ea1e..86ae00230c67 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc @@ -1,5 +1,3 @@ -#include - #include "test/integration/fake_upstream.h" #include "test/integration/integration.h" #include "test/integration/utility.h" From ae0a45a7aeb47100380cb4f2e71c682cda237ded Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Wed, 22 Apr 2020 22:11:43 +0530 Subject: [PATCH 006/909] router: retry overloaded requests (#10847) Signed-off-by: Rama Chavali --- .../configuration/http/http_filters/router_filter.rst | 8 -------- docs/root/intro/arch_overview/http/http_routing.rst | 6 ++++-- docs/root/version_history/current.rst | 1 + include/envoy/http/header_map.h | 1 - source/common/router/retry_state_impl.cc | 4 ---- source/common/router/router.cc | 6 ++++-- test/common/router/retry_state_impl_test.cc | 3 ++- 7 files changed, 11 insertions(+), 18 deletions(-) diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 6575e0ed2336..43a60e990c3f 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -274,14 +274,6 @@ for the next health check interval. The host can become healthy again via standa checks. See the :ref:`health checking overview ` for more information. -.. _config_http_filters_router_x-envoy-overloaded_consumed: - -x-envoy-overloaded -^^^^^^^^^^^^^^^^^^ - -If this header is set by upstream, Envoy will not retry. Currently the value of the header is not -looked at, only its presence. - .. _config_http_filters_router_x-envoy-ratelimited: x-envoy-ratelimited diff --git a/docs/root/intro/arch_overview/http/http_routing.rst b/docs/root/intro/arch_overview/http/http_routing.rst index 95ce2b6ed796..d71ae677a348 100644 --- a/docs/root/intro/arch_overview/http/http_routing.rst +++ b/docs/root/intro/arch_overview/http/http_routing.rst @@ -122,8 +122,10 @@ headers `. The following configurat :ref:`retry priority ` can be configured to adjust the priority load used when selecting a priority for retries. -Note that retries may be disabled depending on the contents of the :ref:`x-envoy-overloaded -`. +Note that Envoy retries requests when :ref:`x-envoy-overloaded +` is present. It is recommended to either configure +:ref:`retry budgets (preferred) ` or set +:ref:`maximum active retries circuit breaker ` to an appropriate value to avoid retry storms. .. _arch_overview_http_routing_hedging: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 425073346f25..5cc3188b5e44 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -21,6 +21,7 @@ Changes to set :ref:`x-request-id ` header in response even if tracing is not forced. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. +* router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 0c8ddb6adcfd..512b59c35445 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -325,7 +325,6 @@ class HeaderEntry { HEADER_FUNC(Etag) \ HEADER_FUNC(EnvoyDegraded) \ HEADER_FUNC(EnvoyImmediateHealthCheckFail) \ - HEADER_FUNC(EnvoyOverloaded) \ HEADER_FUNC(EnvoyRateLimited) \ HEADER_FUNC(EnvoyUpstreamCanary) \ HEADER_FUNC(EnvoyUpstreamHealthCheckedCluster) \ diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 2952b7a34157..7d29a00332df 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -281,10 +281,6 @@ RetryStatus RetryStateImpl::shouldHedgeRetryPerTryTimeout(DoRetryCallback callba } bool RetryStateImpl::wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) { - if (response_headers.EnvoyOverloaded() != nullptr) { - return false; - } - // We never retry if the request is rate limited. if (response_headers.EnvoyRateLimited() != nullptr) { return false; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index b788a691474d..e507e06a4a5c 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -516,7 +516,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, Http::Code::ServiceUnavailable, "maintenance mode", [modify_headers, this](Http::ResponseHeaderMap& headers) { if (!config_.suppress_envoy_headers_) { - headers.setReferenceEnvoyOverloaded(Http::Headers::get().EnvoyOverloadedValues.True); + headers.addReference(Http::Headers::get().EnvoyOverloaded, + Http::Headers::get().EnvoyOverloadedValues.True); } // Note: append_cluster_info does not respect suppress_envoy_headers. modify_headers(headers); @@ -1007,7 +1008,8 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ code, body, [dropped, this](Http::ResponseHeaderMap& headers) { if (dropped && !config_.suppress_envoy_headers_) { - headers.setReferenceEnvoyOverloaded(Http::Headers::get().EnvoyOverloadedValues.True); + headers.addReference(Http::Headers::get().EnvoyOverloaded, + Http::Headers::get().EnvoyOverloadedValues.True); } modify_headers_(headers); }, diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 6b55caf21288..25044563d5a1 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -203,7 +203,8 @@ TEST_F(RouterRetryStateImplTest, Policy5xxRemote503Overloaded) { Http::TestResponseHeaderMapImpl response_headers{{":status", "503"}, {"x-envoy-overloaded", "true"}}; - EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); + expectTimerCreateAndEnable(); + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); } TEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) { From 4ee310ec07a4e0e775626fe8c652726b4151b8e9 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 22 Apr 2020 12:43:11 -0400 Subject: [PATCH 007/909] api/faq: add entry on incremental xDS. (#10876) Also remove some stale comments around delta xDS in ConfigSource. Fixes #10836 Signed-off-by: Harvey Tuch --- api/envoy/api/v2/core/config_source.proto | 4 ---- api/envoy/config/core/v3/config_source.proto | 4 ---- api/envoy/config/core/v4alpha/config_source.proto | 4 ---- docs/root/faq/api/incremental.rst | 11 +++++++++++ docs/root/faq/overview.rst | 5 +++-- .../envoy/api/v2/core/config_source.proto | 4 ---- .../envoy/config/core/v3/config_source.proto | 4 ---- .../envoy/config/core/v4alpha/config_source.proto | 4 ---- 8 files changed, 14 insertions(+), 26 deletions(-) create mode 100644 docs/root/faq/api/incremental.rst diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index fa42a7aeec1c..8bbb961c1a32 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -57,10 +57,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index b56e06e6de4f..7337403bc853 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -58,10 +58,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto index be600bd0096e..253a576a46ed 100644 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -59,10 +59,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } diff --git a/docs/root/faq/api/incremental.rst b/docs/root/faq/api/incremental.rst new file mode 100644 index 000000000000..e6d4d3f11bbe --- /dev/null +++ b/docs/root/faq/api/incremental.rst @@ -0,0 +1,11 @@ +What is the status of incremental xDS support? +============================================== + +The :ref:`incremental xDS ` protocol is designed to improve efficiency, +scalability and functional use of xDS updates via two mechanisms: + +* Delta xDS. Resource deltas are delivered rather than state-of-the-world. +* On-demand xDS. Resource can be lazy loaded depending on request contents. + +Currently, all xDS protocols (including ADS) support delta xDS. On-demand xDS is supported for +:ref:`VHDS ` only. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index e6f4eaa275e6..b3d9de193235 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -12,8 +12,8 @@ Build build/binaries build/boringssl -API versioning --------------- +API +--- .. toctree:: :maxdepth: 2 @@ -23,6 +23,7 @@ API versioning api/control_plane api/package_naming api/why_versioning + api/incremental Performance ----------- diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto index fa42a7aeec1c..8bbb961c1a32 100644 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ b/generated_api_shadow/envoy/api/v2/core/config_source.proto @@ -57,10 +57,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index 159542a3e909..ce896e070ac7 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -58,10 +58,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto index 0cfc7fc59b94..4f532f089869 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -59,10 +59,6 @@ message ApiConfigSource { // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. - // - // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. - // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } From 4607b33c2fc494d7cd230cce1f06e0459c3fb186 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 22 Apr 2020 10:01:12 -0700 Subject: [PATCH 008/909] issue template: clarify security/crash reporting (#10885) Signed-off-by: Matt Klein --- ISSUES.md | 54 +++++++++++++++++++++++++++++++++++++++++ ISSUE_TEMPLATE.md | 62 +++++++---------------------------------------- 2 files changed, 63 insertions(+), 53 deletions(-) create mode 100644 ISSUES.md diff --git a/ISSUES.md b/ISSUES.md new file mode 100644 index 000000000000..70ad1b0c2b15 --- /dev/null +++ b/ISSUES.md @@ -0,0 +1,54 @@ +**If you are reporting *any* crash or *any* potential security issue, *do not* +open an issue in this repo. Please report the issue via emailing +envoy-security@googlegroups.com where the issue will be triaged appropriately.** + +**Issue Template** + +*Title*: *One line description* + +*Description*: +>Describe the issue. Please be detailed. If a feature request, please +describe the desired behaviour, what scenario it enables and how it +would be used. + +[optional *Relevant Links*:] +>Any extra documentation required to understand the issue. + +**Bug Template** + +*Title*: *One line description* + +*Description*: +>What issue is being seen? Describe what should be happening instead of +the bug, for example: Envoy should not crash, the expected value isn't +returned, etc. + +*Repro steps*: +> Include sample requests, environment, etc. All data and inputs +required to reproduce the bug. + +>**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md) +gathers a tarball with debug logs, config and the following admin +endpoints: /stats, /clusters and /server_info. Please note if there are +privacy concerns, sanitize the data prior to sharing the tarball/pasting. + +*Admin and Stats Output*: +>Include the admin output for the following endpoints: /stats, +/clusters, /routes, /server_info. For more information, refer to the +[admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Config*: +>Include the config used to configure Envoy. + +*Logs*: +>Include the access logs and the Envoy logs. + +>**Note**: If there are privacy concerns, sanitize the data prior to +sharing. + +*Call Stack*: +> If the Envoy binary is crashing, a call stack is **required**. +Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 8d87611c68e8..e38a93abbb69 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,56 +1,12 @@ -**WARNING: If you want to report crashes, leaking of sensitive information, -and/or other security issues, please consider -[reporting them using appropriate channels](https://github.com/envoyproxy/envoy#reporting-security-vulnerabilities).** +!!!ATTENTION!!! -**Issue Template** +If you are reporting *any* crash or *any* potential security issue, *do not* +open an issue in this repo. Please report the issue via emailing +envoy-security@googlegroups.com where the issue will be triaged appropriately. +Thank you in advance for helping to keep Envoy secure. -*Title*: *One line description* +!!!ATTENTION!!! -*Description*: ->Describe the issue. Please be detailed. If a feature request, please -describe the desired behaviour, what scenario it enables and how it -would be used. - -[optional *Relevant Links*:] ->Any extra documentation required to understand the issue. - - - -**Bug Template** - -*Title*: *One line description* - -*Description*: ->What issue is being seen? Describe what should be happening instead of -the bug, for example: Envoy should not crash, the expected value isn't -returned, etc. - -*Repro steps*: -> Include sample requests, environment, etc. All data and inputs -required to reproduce the bug. - ->**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md) -gathers a tarball with debug logs, config and the following admin -endpoints: /stats, /clusters and /server_info. Please note if there are -privacy concerns, sanitize the data prior to sharing the tarball/pasting. - -*Admin and Stats Output*: ->Include the admin output for the following endpoints: /stats, -/clusters, /routes, /server_info. For more information, refer to the -[admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) - ->**Note**: If there are privacy concerns, sanitize the data prior to -sharing. - -*Config*: ->Include the config used to configure Envoy. - -*Logs*: ->Include the access logs and the Envoy logs. - ->**Note**: If there are privacy concerns, sanitize the data prior to -sharing. - -*Call Stack*: -> If the Envoy binary is crashing, a call stack is **required**. -Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). +If this is not a crash or potential security issue please use +[ISSUES.md](https://github.com/envoyproxy/envoy/blob/master/ISSUES.md) as a +template. From 239620e046cc79bfcf264ed5837fdf8816f0219b Mon Sep 17 00:00:00 2001 From: Kuat Date: Wed, 22 Apr 2020 13:20:47 -0700 Subject: [PATCH 009/909] wasm: clarify how configuration is passed (#10782) Signed-off-by: Kuat Yessenov --- api/BUILD | 1 - api/envoy/config/wasm/v2alpha/BUILD | 12 --- api/envoy/config/wasm/v2alpha/wasm.proto | 83 ------------------- api/envoy/extensions/wasm/v3/BUILD | 1 - api/envoy/extensions/wasm/v3/wasm.proto | 16 ++-- api/versioning/BUILD | 1 - docs/root/api-v2/config/config.rst | 1 - docs/root/api-v2/config/wasm/wasm.rst | 8 -- generated_api_shadow/BUILD | 1 - .../envoy/config/wasm/v2alpha/BUILD | 12 --- .../envoy/config/wasm/v2alpha/wasm.proto | 83 ------------------- .../envoy/extensions/wasm/v3/BUILD | 1 - .../envoy/extensions/wasm/v3/wasm.proto | 26 +++--- 13 files changed, 19 insertions(+), 227 deletions(-) delete mode 100644 api/envoy/config/wasm/v2alpha/BUILD delete mode 100644 api/envoy/config/wasm/v2alpha/wasm.proto delete mode 100644 docs/root/api-v2/config/wasm/wasm.rst delete mode 100644 generated_api_shadow/envoy/config/wasm/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto diff --git a/api/BUILD b/api/BUILD index 0dafe82267e9..97a8554bc520 100644 --- a/api/BUILD +++ b/api/BUILD @@ -101,7 +101,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", diff --git a/api/envoy/config/wasm/v2alpha/BUILD b/api/envoy/config/wasm/v2alpha/BUILD deleted file mode 100644 index 69168ad0cf24..000000000000 --- a/api/envoy/config/wasm/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/wasm/v2alpha/wasm.proto b/api/envoy/config/wasm/v2alpha/wasm.proto deleted file mode 100644 index b8f050a23d2b..000000000000 --- a/api/envoy/config/wasm/v2alpha/wasm.proto +++ /dev/null @@ -1,83 +0,0 @@ -syntax = "proto3"; - -package envoy.config.wasm.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.wasm.v2alpha"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Wasm service] - -// Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // See ref: "TODO: add ref" for details. - string vm_id = 1; - - // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The Wasm code that Envoy will execute. - api.v2.core.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM (proxy_on_start). - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; - - // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - google.protobuf.Any configuration = 5; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/api/envoy/extensions/wasm/v3/BUILD b/api/envoy/extensions/wasm/v3/BUILD index d29790ff5e75..2c3dad6453b6 100644 --- a/api/envoy/extensions/wasm/v3/BUILD +++ b/api/envoy/extensions/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto index 8cbaf20a3906..73b7959cd95d 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/extensions/wasm/v3/wasm.proto @@ -21,8 +21,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#next-free-field: 6] // [#not-implemented-hide:] pending implementation. message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2alpha.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can @@ -36,7 +34,10 @@ message VmConfig { // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; - // The Wasm configuration used in initialization of a new VM (proxy_on_start). + // The Wasm configuration used in initialization of a new VM + // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. @@ -49,9 +50,6 @@ message VmConfig { // [#next-free-field: 6] // [#not-implemented-hide:] pending implementation. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifying the filter/service if // multiple filters/services are handled by the same *vm_id* and *group_name* and for // logging/debugging. @@ -70,6 +68,9 @@ message PluginConfig { // Filter/service configuration used to configure or reconfigure a plugin // (proxy_on_configuration). + // `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 5; } @@ -78,9 +79,6 @@ message PluginConfig { // create a Wasm Service. // [#not-implemented-hide:] pending implementation. message WasmService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.WasmService"; - // General plugin configuration. PluginConfig config = 1; diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 0ffaf85a1cdd..bbb683d8bd08 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -224,7 +224,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", diff --git a/docs/root/api-v2/config/config.rst b/docs/root/api-v2/config/config.rst index feaa9c5b0c9a..8fe20069ebc7 100644 --- a/docs/root/api-v2/config/config.rst +++ b/docs/root/api-v2/config/config.rst @@ -17,4 +17,3 @@ Extensions grpc_credential/grpc_credential retry/retry trace/trace - wasm/wasm diff --git a/docs/root/api-v2/config/wasm/wasm.rst b/docs/root/api-v2/config/wasm/wasm.rst deleted file mode 100644 index 8ce884b18ba5..000000000000 --- a/docs/root/api-v2/config/wasm/wasm.rst +++ /dev/null @@ -1,8 +0,0 @@ -WASM -==== - -.. toctree:: - :glob: - :maxdepth: 2 - - v2alpha/* diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index a028250022dd..6aafa3e75588 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -111,7 +111,6 @@ proto_library( "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", - "//envoy/config/wasm/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/data/cluster/v2alpha:pkg", diff --git a/generated_api_shadow/envoy/config/wasm/v2alpha/BUILD b/generated_api_shadow/envoy/config/wasm/v2alpha/BUILD deleted file mode 100644 index 69168ad0cf24..000000000000 --- a/generated_api_shadow/envoy/config/wasm/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto b/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto deleted file mode 100644 index b8f050a23d2b..000000000000 --- a/generated_api_shadow/envoy/config/wasm/v2alpha/wasm.proto +++ /dev/null @@ -1,83 +0,0 @@ -syntax = "proto3"; - -package envoy.config.wasm.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.wasm.v2alpha"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.wasm.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Wasm service] - -// Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // See ref: "TODO: add ref" for details. - string vm_id = 1; - - // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). - string runtime = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The Wasm code that Envoy will execute. - api.v2.core.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM (proxy_on_start). - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; - - // Configuration for finding or starting VM. - oneof vm_config { - VmConfig inline_vm_config = 3; - // In the future add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - google.protobuf.Any configuration = 5; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD index d29790ff5e75..2c3dad6453b6 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/wasm/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto index 16cae01897e0..73b7959cd95d 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto @@ -21,8 +21,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#next-free-field: 6] // [#not-implemented-hide:] pending implementation. message VmConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.wasm.v2alpha.VmConfig"; - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can @@ -36,7 +34,10 @@ message VmConfig { // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; - // The Wasm configuration used in initialization of a new VM (proxy_on_start). + // The Wasm configuration used in initialization of a new VM + // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. @@ -49,9 +50,6 @@ message VmConfig { // [#next-free-field: 6] // [#not-implemented-hide:] pending implementation. message PluginConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.PluginConfig"; - // A unique name for a filters/services in a VM for use in identifying the filter/service if // multiple filters/services are handled by the same *vm_id* and *group_name* and for // logging/debugging. @@ -62,15 +60,18 @@ message PluginConfig { // filters/services with a blank group_name with the same *vm_id* will share Context(s). string group_name = 2; - google.protobuf.Any configuration = 5; - // In the future add referential VM configurations. - // Configuration for finding or starting VM. oneof vm_config { - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). VmConfig inline_vm_config = 3; + // In the future add referential VM configurations. } + + // Filter/service configuration used to configure or reconfigure a plugin + // (proxy_on_configuration). + // `google.protobuf.Struct` is serialized as JSON before + // passing it to the plugin. `google.protobuf.BytesValue` and + // `google.protobuf.StringValue` are passed directly without the wrapper. + google.protobuf.Any configuration = 5; } // WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig @@ -78,9 +79,6 @@ message PluginConfig { // create a Wasm Service. // [#not-implemented-hide:] pending implementation. message WasmService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.wasm.v2alpha.WasmService"; - // General plugin configuration. PluginConfig config = 1; From afc19e124c71050309193fea20572883c60d0ef1 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Wed, 22 Apr 2020 13:24:24 -0700 Subject: [PATCH 010/909] Delete legacy connection pool code. (#10881) Fixes #10868 Signed-off-by: Greg Greenway --- docs/root/version_history/current.rst | 2 + source/common/http/BUILD | 12 - source/common/http/conn_pool_base_legacy.cc | 98 -- source/common/http/conn_pool_base_legacy.h | 80 -- source/common/http/http1/BUILD | 30 - source/common/http/http1/conn_pool.cc | 11 +- source/common/http/http1/conn_pool_legacy.cc | 356 ------- source/common/http/http1/conn_pool_legacy.h | 149 --- source/common/http/http2/BUILD | 20 - source/common/http/http2/conn_pool.cc | 11 +- source/common/http/http2/conn_pool_legacy.cc | 309 ------ source/common/http/http2/conn_pool_legacy.h | 121 --- source/common/runtime/runtime_features.cc | 2 - test/common/http/http1/BUILD | 24 - .../http/http1/conn_pool_legacy_test.cc | 972 ------------------ test/common/http/http2/BUILD | 19 - .../http/http2/conn_pool_legacy_test.cc | 810 --------------- 17 files changed, 6 insertions(+), 3020 deletions(-) delete mode 100644 source/common/http/conn_pool_base_legacy.cc delete mode 100644 source/common/http/conn_pool_base_legacy.h delete mode 100644 source/common/http/http1/conn_pool_legacy.cc delete mode 100644 source/common/http/http1/conn_pool_legacy.h delete mode 100644 source/common/http/http2/conn_pool_legacy.cc delete mode 100644 source/common/http/http2/conn_pool_legacy.h delete mode 100644 test/common/http/http1/conn_pool_legacy_test.cc delete mode 100644 test/common/http/http2/conn_pool_legacy_test.cc diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5cc3188b5e44..7dafeef3d4a2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -15,6 +15,8 @@ Changes `google.api.HttpBody `_. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. +* http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and + `envoy.reloadable_features.new_http2_connection_pool_behavior`. * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. * network filters: added a :ref:`postgres proxy filter `. * request_id: added to :ref:`always_set_request_id_in_response setting ` diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 02521f87ea7a..c4e8f00ad335 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -133,18 +133,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "conn_pool_base_legacy_lib", - srcs = ["conn_pool_base_legacy.cc"], - hdrs = ["conn_pool_base_legacy.h"], - deps = [ - "//include/envoy/http:conn_pool_interface", - "//include/envoy/stats:timespan_interface", - "//source/common/common:linked_object", - "//source/common/stats:timespan_lib", - ], -) - envoy_cc_library( name = "conn_manager_config_interface", hdrs = ["conn_manager_config.h"], diff --git a/source/common/http/conn_pool_base_legacy.cc b/source/common/http/conn_pool_base_legacy.cc deleted file mode 100644 index d50cb871bff3..000000000000 --- a/source/common/http/conn_pool_base_legacy.cc +++ /dev/null @@ -1,98 +0,0 @@ -#include "common/http/conn_pool_base_legacy.h" - -#include "common/stats/timespan_impl.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -ConnPoolImplBase::ActiveClient::ActiveClient(Event::Dispatcher& dispatcher, - const Upstream::ClusterInfo& cluster) - : connect_timer_(dispatcher.createTimer([this]() -> void { onConnectTimeout(); })) { - - conn_connect_ms_ = std::make_unique( - cluster.stats().upstream_cx_connect_ms_, dispatcher.timeSource()); - conn_length_ = std::make_unique( - cluster.stats().upstream_cx_length_ms_, dispatcher.timeSource()); - connect_timer_->enableTimer(cluster.connectTimeout()); -} - -void ConnPoolImplBase::ActiveClient::recordConnectionSetup() { - conn_connect_ms_->complete(); - conn_connect_ms_.reset(); -} - -void ConnPoolImplBase::ActiveClient::disarmConnectTimeout() { - if (connect_timer_) { - connect_timer_->disableTimer(); - connect_timer_.reset(); - } -} - -ConnPoolImplBase::ActiveClient::ConnectionState ConnPoolImplBase::ActiveClient::connectionState() { - // We don't track any failure state, as the client should be deferred destroyed once a failure - // event is handled. - if (connect_timer_) { - return Connecting; - } - - return Connected; -} - -ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) - : parent_(parent), decoder_(decoder), callbacks_(callbacks) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); -} - -ConnPoolImplBase::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); -} - -ConnectionPool::Cancellable* -ConnPoolImplBase::newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) { - ENVOY_LOG(debug, "queueing request due to no available connections"); - PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); -} - -void ConnPoolImplBase::purgePendingRequests( - const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, bool was_remote_close) { - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - pending_requests_to_purge_ = std::move(pending_requests_); - while (!pending_requests_to_purge_.empty()) { - PendingRequestPtr request = - pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure( - was_remote_close ? ConnectionPool::PoolFailureReason::RemoteConnectionFailure - : ConnectionPool::PoolFailureReason::LocalConnectionFailure, - failure_reason, host_description); - } -} - -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { - ENVOY_LOG(debug, "cancelling pending request"); - if (!pending_requests_to_purge_.empty()) { - // If pending_requests_to_purge_ is not empty, it means that we are called from - // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests - // is down in the call stack). Remove this request from the list as it is cancelled, - // and there is no need to call its onPoolFailure callback. - request.removeFromList(pending_requests_to_purge_); - } else { - request.removeFromList(pending_requests_); - } - - host_->cluster().stats().upstream_rq_cancelled_.inc(); - checkForDrained(); -} - -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/conn_pool_base_legacy.h b/source/common/http/conn_pool_base_legacy.h deleted file mode 100644 index 7c96ec3a1aaf..000000000000 --- a/source/common/http/conn_pool_base_legacy.h +++ /dev/null @@ -1,80 +0,0 @@ -#pragma once - -#include "envoy/http/conn_pool.h" -#include "envoy/stats/timespan.h" - -#include "common/common/linked_object.h" - -#include "absl/strings/string_view.h" - -namespace Envoy { -namespace Http { -namespace Legacy { - -// Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : protected Logger::Loggable { -protected: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority) - : host_(host), priority_(priority) {} - virtual ~ConnPoolImplBase() = default; - - // ActiveClient provides a base class for connection pool clients that handles connection timings - // as well as managing the connection timeout. - class ActiveClient { - public: - ActiveClient(Event::Dispatcher& dispatcher, const Upstream::ClusterInfo& cluster); - virtual ~ActiveClient() { conn_length_->complete(); } - - virtual void onConnectTimeout() PURE; - - void recordConnectionSetup(); - void disarmConnectTimeout(); - - enum ConnectionState { Connecting, Connected }; - ConnectionState connectionState(); - - private: - Event::TimerPtr connect_timer_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; - }; - - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - ~PendingRequest() override; - - // ConnectionPool::Cancellable - void cancel() override { parent_.onPendingRequestCancel(*this); } - - ConnPoolImplBase& parent_; - ResponseDecoder& decoder_; - ConnectionPool::Callbacks& callbacks_; - }; - - using PendingRequestPtr = std::unique_ptr; - - // Creates a new PendingRequest and enqueues it into the request queue. - ConnectionPool::Cancellable* newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - // Removes the PendingRequest from the list of requests. Called when the PendingRequest is - // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request); - - // Fails all pending requests, calling onPoolFailure on the associated callbacks. - void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, bool was_remote); - - // Must be implemented by sub class. Attempts to drain inactive clients. - virtual void checkForDrained() PURE; - - const Upstream::HostConstSharedPtr host_; - const Upstream::ResourcePriority priority_; - std::list pending_requests_; - // When calling purgePendingRequests, this list will be used to hold the requests we are about - // to purge. We need this if one cancelled requests cancels a different pending request - std::list pending_requests_to_purge_; -}; -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 2709b312976e..0608280278da 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -43,42 +43,12 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "conn_pool_legacy_lib", - srcs = ["conn_pool_legacy.cc"], - hdrs = ["conn_pool_legacy.h"], - external_deps = ["abseil_optional"], - deps = [ - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/event:timer_interface", - "//include/envoy/http:conn_pool_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:timespan_interface", - "//include/envoy/upstream:upstream_interface", - "//source/common/common:linked_object", - "//source/common/common:utility_lib", - "//source/common/http:codec_client_lib", - "//source/common/http:codec_wrappers_lib", - "//source/common/http:codes_lib", - "//source/common/http:conn_pool_base_legacy_lib", - "//source/common/http:headers_lib", - "//source/common/network:utility_lib", - "//source/common/runtime:runtime_features_lib", - "//source/common/stats:timespan_lib", - "//source/common/upstream:upstream_lib", - ], -) - envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], hdrs = ["conn_pool.h"], external_deps = ["abseil_optional"], deps = [ - ":conn_pool_legacy_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 9b82d9b4d7e4..f3d75f586341 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -13,7 +13,6 @@ #include "common/http/codec_client.h" #include "common/http/codes.h" #include "common/http/headers.h" -#include "common/http/http1/conn_pool_legacy.h" #include "common/runtime/runtime_features.h" #include "absl/strings/match.h" @@ -138,14 +137,8 @@ allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr hos Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.new_http1_connection_pool_behavior")) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); - } else { - return std::make_unique( - dispatcher, host, priority, options, transport_socket_options); - } + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); } } // namespace Http1 diff --git a/source/common/http/http1/conn_pool_legacy.cc b/source/common/http/http1/conn_pool_legacy.cc deleted file mode 100644 index da834c2c104a..000000000000 --- a/source/common/http/http1/conn_pool_legacy.cc +++ /dev/null @@ -1,356 +0,0 @@ -#include "common/http/http1/conn_pool_legacy.h" - -#include -#include -#include - -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/http/codec.h" -#include "envoy/http/header_map.h" -#include "envoy/upstream/upstream.h" - -#include "common/common/utility.h" -#include "common/http/codec_client.h" -#include "common/http/codes.h" -#include "common/http/headers.h" -#include "common/network/utility.h" -#include "common/stats/timespan_impl.h" -#include "common/upstream/upstream_impl.h" - -#include "absl/strings/match.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { - -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} - -ConnPoolImpl::~ConnPoolImpl() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - while (!busy_clients_.empty()) { - busy_clients_.front()->codec_client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::drainConnections() { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - // We drain busy clients by manually setting remaining requests to 1. Thus, when the next - // response completes the client will be destroyed. - for (const auto& client : busy_clients_) { - client->remaining_requests_ = 1; - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - return !pending_requests_.empty() || !busy_clients_.empty(); -} - -void ConnPoolImpl::attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(!client.stream_wrapper_); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->stats().rq_total_.inc(); - client.stream_wrapper_ = std::make_unique(response_decoder, client); - callbacks.onPoolReady(*client.stream_wrapper_, client.real_host_description_, - client.codec_client_->streamInfo()); -} - -void ConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_clients_.empty()) { - while (!ready_clients_.empty()) { - ready_clients_.front()->codec_client_->close(); - } - - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::createNewConnection() { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client(new ActiveClient(*this)); - client->moveIntoList(std::move(client), busy_clients_); -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!ready_clients_.empty()) { - ready_clients_.front()->moveBetweenLists(ready_clients_, busy_clients_); - ENVOY_CONN_LOG(debug, "using existing connection", *busy_clients_.front()->codec_client_); - attachRequestToClient(*busy_clients_.front(), response_decoder, callbacks); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - - // If we have no connections at all, make one no matter what so we don't starve. - if ((ready_clients_.empty() && busy_clients_.empty()) || can_create_connection) { - createNewConnection(); - } - - return newPendingRequest(response_decoder, callbacks); - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} - -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - ActiveClientPtr removed; - bool check_for_drained = true; - if (client.stream_wrapper_) { - if (!client.stream_wrapper_->decode_complete_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - // There is an active request attached to this client. The underlying codec client will - // already have "reset" the stream to fire the reset callback. All we do here is just - // destroy the client. - removed = client.removeFromList(busy_clients_); - } else if (client.connectionState() == - ConnPoolImplBase::ActiveClient::ConnectionState::Connected) { - removed = client.removeFromList(ready_clients_); - check_for_drained = false; - } else { - // The only time this happens is if we actually saw a connect failure. - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - removed = client.removeFromList(busy_clients_); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - ENVOY_CONN_LOG(debug, "purge pending, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason(), - event == Network::ConnectionEvent::RemoteClose); - } - - dispatcher_.deferredDelete(std::move(removed)); - - // If we have pending requests and we just lost a connection we should make a new one. - if (pending_requests_.size() > (ready_clients_.size() + busy_clients_.size())) { - createNewConnection(); - } - - if (check_for_drained) { - checkForDrained(); - } - } - - client.disarmConnectTimeout(); - - // Note that the order in this function is important. Concretely, we must destroy the connect - // timer before we process a connected idle client, because if this results in an immediate - // drain/destruction event, we key off of the existence of the connect timer above to determine - // whether the client is in the ready list (connected) or the busy list (failed to connect). - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - processIdleClient(client, false); - } -} - -void ConnPoolImpl::onDownstreamReset(ActiveClient& client) { - // If we get a downstream reset to an attached client, we just blow it away. - client.codec_client_->close(); -} - -void ConnPoolImpl::onResponseComplete(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "response complete", *client.codec_client_); - if (!client.stream_wrapper_->encode_complete_) { - ENVOY_CONN_LOG(debug, "response before request complete", *client.codec_client_); - onDownstreamReset(client); - } else if (client.stream_wrapper_->close_connection_ || client.codec_client_->remoteClosed()) { - ENVOY_CONN_LOG(debug, "saw upstream close connection", *client.codec_client_); - onDownstreamReset(client); - } else if (client.remaining_requests_ > 0 && --client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection", *client.codec_client_); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - onDownstreamReset(client); - } else { - // Upstream connection might be closed right after response is complete. Setting delay=true - // here to attach pending requests in next dispatcher loop to handle that case. - // https://github.com/envoyproxy/envoy/issues/2715 - processIdleClient(client, true); - } -} - -void ConnPoolImpl::onUpstreamReady() { - upstream_ready_enabled_ = false; - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - // There is work to do so bind a request to the client and move it to the busy list. Pending - // requests are pushed onto the front, so pull from the back. - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - client.moveBetweenLists(ready_clients_, busy_clients_); - } -} - -void ConnPoolImpl::processIdleClient(ActiveClient& client, bool delay) { - client.stream_wrapper_.reset(); - if (pending_requests_.empty() || delay) { - // There is nothing to service or delayed processing is requested, so just move the connection - // into the ready list. - ENVOY_CONN_LOG(debug, "moving to ready", *client.codec_client_); - client.moveBetweenLists(busy_clients_, ready_clients_); - } else { - // There is work to do immediately so bind a request to the client and move it to the busy list. - // Pending requests are pushed onto the front, so pull from the back. - ENVOY_CONN_LOG(debug, "attaching to next request", *client.codec_client_); - attachRequestToClient(client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } - - if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { - upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); - } - - checkForDrained(); -} - -ConnPoolImpl::StreamWrapper::StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent) - : RequestEncoderWrapper(parent.codec_client_->newStream(*this)), - ResponseDecoderWrapper(response_decoder), parent_(parent) { - - RequestEncoderWrapper::inner_.getStream().addCallbacks(*this); - parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); - parent_.parent_.host_->stats().rq_active_.inc(); - - // TODO (tonya11en): At the time of writing, there is no way to mix different versions of HTTP - // traffic in the same cluster, so incrementing the request count in the per-cluster resource - // manager will not affect circuit breaking in any unexpected ways. Ideally, outstanding requests - // counts would be tracked the same way in all HTTP versions. - // - // See: https://github.com/envoyproxy/envoy/issues/9215 - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().inc(); -} - -ConnPoolImpl::StreamWrapper::~StreamWrapper() { - parent_.parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.parent_.host_->stats().rq_active_.dec(); - parent_.parent_.host_->cluster().resourceManager(parent_.parent_.priority_).requests().dec(); -} - -void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } - -void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - // If Connection: close OR - // Http/1.0 and not Connection: keep-alive OR - // Proxy-Connection: close - if ((headers->Connection() && - (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.Close))) || - (parent_.codec_client_->protocol() == Protocol::Http10 && - (!headers->Connection() || - !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.KeepAlive))) || - (headers->ProxyConnection() && - (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), - Headers::get().ConnectionValues.Close)))) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); - close_connection_ = true; - } - - ResponseDecoderWrapper::decodeHeaders(std::move(headers), end_stream); -} - -void ConnPoolImpl::StreamWrapper::onDecodeComplete() { - decode_complete_ = encode_complete_; - parent_.parent_.onResponseComplete(parent_); -} - -ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent), - remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); -} - -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); -} - -void ConnPoolImpl::ActiveClient::onConnectTimeout() { - // We just close the client at this point. This will result in both a timeout and a connect - // failure and will fold into all the normal connect failure logic. - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - codec_client_->close(); -} - -CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { - CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP1, std::move(data.connection_), - data.host_description_, dispatcher_)}; - return codec; -} - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http1/conn_pool_legacy.h b/source/common/http/http1/conn_pool_legacy.h deleted file mode 100644 index be76eb5e7769..000000000000 --- a/source/common/http/http1/conn_pool_legacy.h +++ /dev/null @@ -1,149 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/event/deferred_deletable.h" -#include "envoy/event/timer.h" -#include "envoy/http/codec.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" -#include "envoy/upstream/upstream.h" - -#include "common/common/linked_object.h" -#include "common/http/codec_client.h" -#include "common/http/codec_wrappers.h" -#include "common/http/conn_pool_base_legacy.h" - -#include "absl/types/optional.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { - -/** - * A connection pool implementation for HTTP/1.1 connections. - * NOTE: The connection pool does NOT do DNS resolution. It assumes it is being given a numeric IP - * address. Higher layer code should handle resolving DNS on error and creating a new pool - * bound to a different IP address. - */ -class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { -public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - - ~ConnPoolImpl() override; - - // ConnectionPool::Instance - Http::Protocol protocol() const override { return Http::Protocol::Http11; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - - // ConnPoolImplBase - void checkForDrained() override; - -protected: - struct ActiveClient; - - struct StreamWrapper : public RequestEncoderWrapper, - public ResponseDecoderWrapper, - public StreamCallbacks { - StreamWrapper(ResponseDecoder& response_decoder, ActiveClient& parent); - ~StreamWrapper() override; - - // StreamEncoderWrapper - void onEncodeComplete() override; - - // StreamDecoderWrapper - void decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; - void onPreDecodeComplete() override {} - void onDecodeComplete() override; - - // Http::StreamCallbacks - void onResetStream(StreamResetReason, absl::string_view) override { - parent_.parent_.onDownstreamReset(parent_); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - ActiveClient& parent_; - bool encode_complete_{}; - bool close_connection_{}; - bool decode_complete_{}; - }; - - using StreamWrapperPtr = std::unique_ptr; - - struct ActiveClient : ConnPoolImplBase::ActiveClient, - LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { - ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; - - void onConnectTimeout() override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - ConnPoolImpl& parent_; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - StreamWrapperPtr stream_wrapper_; - uint64_t remaining_requests_; - }; - - using ActiveClientPtr = std::unique_ptr; - - void attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - void createNewConnection(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onDownstreamReset(ActiveClient& client); - void onResponseComplete(ActiveClient& client); - void onUpstreamReady(); - void processIdleClient(ActiveClient& client, bool delay); - - Event::Dispatcher& dispatcher_; - std::list ready_clients_; - std::list busy_clients_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; - Event::TimerPtr upstream_ready_timer_; - bool upstream_ready_enabled_{false}; -}; - -/** - * Production implementation of the ConnPoolImpl. - */ -class ProdConnPoolImpl : public ConnPoolImpl { -public: - ProdConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options) {} - - // ConnPoolImpl - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; -}; - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 0790e3ee9cf5..5b177e408969 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -60,31 +60,11 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "conn_pool_legacy_lib", - srcs = ["conn_pool_legacy.cc"], - hdrs = ["conn_pool_legacy.h"], - deps = [ - "//include/envoy/event:dispatcher_interface", - "//include/envoy/event:timer_interface", - "//include/envoy/http:conn_pool_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:timespan_interface", - "//include/envoy/upstream:upstream_interface", - "//source/common/http:codec_client_lib", - "//source/common/http:conn_pool_base_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/stats:timespan_lib", - "//source/common/upstream:upstream_lib", - ], -) - envoy_cc_library( name = "conn_pool_lib", srcs = ["conn_pool.cc"], hdrs = ["conn_pool.h"], deps = [ - ":conn_pool_legacy_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/upstream:upstream_interface", "//source/common/http:codec_client_lib", diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index fd73d1cbb092..6107357e5158 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -6,7 +6,6 @@ #include "envoy/upstream/upstream.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/conn_pool_legacy.h" #include "common/runtime/runtime_features.h" namespace Envoy { @@ -98,14 +97,8 @@ allocateConnPool(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr hos Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.new_http2_connection_pool_behavior")) { - return std::make_unique(dispatcher, host, priority, options, - transport_socket_options); - } else { - return std::make_unique( - dispatcher, host, priority, options, transport_socket_options); - } + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); } } // namespace Http2 diff --git a/source/common/http/http2/conn_pool_legacy.cc b/source/common/http/http2/conn_pool_legacy.cc deleted file mode 100644 index d9834e1893ba..000000000000 --- a/source/common/http/http2/conn_pool_legacy.cc +++ /dev/null @@ -1,309 +0,0 @@ -#include "common/http/http2/conn_pool_legacy.h" - -#include -#include - -#include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/upstream/upstream.h" - -#include "common/http/http2/codec_impl.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority)), dispatcher_(dispatcher), - socket_options_(options), transport_socket_options_(transport_socket_options) {} - -ConnPoolImpl::~ConnPoolImpl() { - if (primary_client_) { - primary_client_->client_->close(); - } - - if (draining_client_) { - draining_client_->client_->close(); - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImpl::ConnPoolImpl::drainConnections() { - if (primary_client_ != nullptr) { - movePrimaryClientToDraining(); - } -} - -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -bool ConnPoolImpl::hasActiveConnections() const { - if (primary_client_ && primary_client_->client_->numActiveRequests() > 0) { - return true; - } - - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - return true; - } - - return !pending_requests_.empty(); -} - -void ConnPoolImpl::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - bool drained = true; - if (primary_client_) { - if (primary_client_->client_->numActiveRequests() == 0) { - primary_client_->client_->close(); - ASSERT(!primary_client_); - } else { - drained = false; - } - } - - ASSERT(!draining_client_ || (draining_client_->client_->numActiveRequests() > 0)); - if (draining_client_ && draining_client_->client_->numActiveRequests() > 0) { - drained = false; - } - - if (drained) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImpl::newClientStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", *primary_client_->client_); - primary_client_->total_streams_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(primary_client_->client_->newStream(response_decoder), - primary_client_->real_host_description_, - primary_client_->client_->streamInfo()); - } -} - -ConnectionPool::Cancellable* ConnPoolImpl::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(drained_callbacks_.empty()); - - // First see if we need to handle max streams rollover. - uint64_t max_streams = host_->cluster().maxRequestsPerConnection(); - if (max_streams == 0) { - max_streams = maxTotalStreams(); - } - - if (primary_client_ && primary_client_->total_streams_ >= max_streams) { - movePrimaryClientToDraining(); - } - - if (!primary_client_) { - primary_client_ = std::make_unique(*this); - } - - // If the primary client is not connected yet, queue up the request. - if (!primary_client_->upstream_ready_) { - // If we're not allowed to enqueue more requests, fail fast. - if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } - - return newPendingRequest(response_decoder, callbacks); - } - - // We already have an active client that's connected to upstream, so attempt to establish a - // new stream. - newClientStream(response_decoder, callbacks); - return nullptr; -} - -void ConnPoolImpl::onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - ENVOY_CONN_LOG(debug, "client disconnected", *client.client_); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - if (client.closed_with_active_rq_) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.connectionState() == ConnPoolImplBase::ActiveClient::ConnectionState::Connecting) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, client.client_->connectionFailureReason(), - event == Network::ConnectionEvent::RemoteClose); - } - - if (&client == primary_client_.get()) { - ENVOY_CONN_LOG(debug, "destroying primary client", *client.client_); - dispatcher_.deferredDelete(std::move(primary_client_)); - } else { - ENVOY_CONN_LOG(debug, "destroying draining client", *client.client_); - dispatcher_.deferredDelete(std::move(draining_client_)); - } - - if (client.closed_with_active_rq_) { - checkForDrained(); - } - } - - if (event == Network::ConnectionEvent::Connected) { - client.recordConnectionSetup(); - - client.upstream_ready_ = true; - onUpstreamReady(); - } - - client.disarmConnectTimeout(); -} - -void ConnPoolImpl::movePrimaryClientToDraining() { - ENVOY_CONN_LOG(debug, "moving primary to draining", *primary_client_->client_); - if (draining_client_) { - // This should pretty much never happen, but is possible if we start draining and then get - // a goaway for example. In this case just kill the current draining connection. It's not - // worth keeping a list. - draining_client_->client_->close(); - } - - ASSERT(!draining_client_); - if (primary_client_->client_->numActiveRequests() == 0) { - // If we are making a new connection and the primary does not have any active requests just - // close it now. - primary_client_->client_->close(); - } else { - draining_client_ = std::move(primary_client_); - } - - ASSERT(!primary_client_); -} - -void ConnPoolImpl::onConnectTimeout(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "connect timeout", *client.client_); - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - client.client_->close(); -} - -void ConnPoolImpl::onGoAway(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "remote goaway", *client.client_); - host_->cluster().stats().upstream_cx_close_notify_.inc(); - if (&client == primary_client_.get()) { - movePrimaryClientToDraining(); - } -} - -void ConnPoolImpl::onStreamDestroy(ActiveClient& client) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.client_, - client.client_->numActiveRequests()); - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (&client == draining_client_.get() && client.client_->numActiveRequests() == 0) { - // Close out the draining client if we no long have active requests. - client.client_->close(); - } - - // If we are destroying this stream because of a disconnect, do not check for drain here. We will - // wait until the connection has been fully drained of streams and then check in the connection - // event callback. - if (!client.closed_with_active_rq_) { - checkForDrained(); - } -} - -void ConnPoolImpl::onStreamReset(ActiveClient& client, Http::StreamResetReason reason) { - if (reason == StreamResetReason::ConnectionTermination || - reason == StreamResetReason::ConnectionFailure) { - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - client.closed_with_active_rq_ = true; - } else if (reason == StreamResetReason::LocalReset) { - host_->cluster().stats().upstream_rq_tx_reset_.inc(); - } else if (reason == StreamResetReason::RemoteReset) { - host_->cluster().stats().upstream_rq_rx_reset_.inc(); - } -} - -void ConnPoolImpl::onUpstreamReady() { - // Establishes new codec streams for each pending request. - while (!pending_requests_.empty()) { - newClientStream(pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); - pending_requests_.pop_back(); - } -} - -ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient(parent.dispatcher_, parent.host_->cluster()), parent_(parent) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - client_ = parent_.createCodecClient(data); - client_->addConnectionCallbacks(*this); - client_->setCodecClientCallbacks(*this); - client_->setCodecConnectionCallbacks(*this); - - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); - - client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); -} - -ConnPoolImpl::ActiveClient::~ActiveClient() { - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().stats().upstream_cx_active_.dec(); -} - -CodecClientPtr ProdConnPoolImpl::createCodecClient(Upstream::Host::CreateConnectionData& data) { - CodecClientPtr codec{new CodecClientProd(CodecClient::Type::HTTP2, std::move(data.connection_), - data.host_description_, dispatcher_)}; - return codec; -} - -uint32_t ProdConnPoolImpl::maxTotalStreams() { return MAX_STREAMS; } - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/conn_pool_legacy.h b/source/common/http/http2/conn_pool_legacy.h deleted file mode 100644 index 0ffb2e520a8d..000000000000 --- a/source/common/http/http2/conn_pool_legacy.h +++ /dev/null @@ -1,121 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "envoy/event/timer.h" -#include "envoy/http/conn_pool.h" -#include "envoy/network/connection.h" -#include "envoy/stats/timespan.h" -#include "envoy/upstream/upstream.h" - -#include "common/http/codec_client.h" -#include "common/http/conn_pool_base_legacy.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -/** - * Implementation of a "connection pool" for HTTP/2. This mainly handles stats as well as - * shifting to a new connection if we reach max streams on the primary. This is a base class - * used for both the prod implementation as well as the testing one. - */ -class ConnPoolImpl : public ConnectionPool::Instance, public Legacy::ConnPoolImplBase { -public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - ~ConnPoolImpl() override; - - // Http::ConnectionPool::Instance - Http::Protocol protocol() const override { return Http::Protocol::Http2; } - void addDrainedCallback(DrainedCb cb) override; - void drainConnections() override; - bool hasActiveConnections() const override; - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - -protected: - struct ActiveClient : ConnPoolImplBase::ActiveClient, - public Network::ConnectionCallbacks, - public CodecClientCallbacks, - public Event::DeferredDeletable, - public Http::ConnectionCallbacks { - ActiveClient(ConnPoolImpl& parent); - ~ActiveClient() override; - - void onConnectTimeout() override { parent_.onConnectTimeout(*this); } - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - // CodecClientCallbacks - void onStreamDestroy() override { parent_.onStreamDestroy(*this); } - void onStreamReset(Http::StreamResetReason reason) override { - parent_.onStreamReset(*this, reason); - } - - // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(*this); } - - ConnPoolImpl& parent_; - CodecClientPtr client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - uint64_t total_streams_{}; - bool upstream_ready_{}; - bool closed_with_active_rq_{}; - }; - - using ActiveClientPtr = std::unique_ptr; - - // Http::ConnPoolImplBase - void checkForDrained() override; - - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - virtual uint32_t maxTotalStreams() PURE; - void movePrimaryClientToDraining(); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); - void onConnectTimeout(ActiveClient& client); - void onGoAway(ActiveClient& client); - void onStreamDestroy(ActiveClient& client); - void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); - void newClientStream(ResponseDecoder& response_decoder, ConnectionPool::Callbacks& callbacks); - void onUpstreamReady(); - - Event::Dispatcher& dispatcher_; - ActiveClientPtr primary_client_; - ActiveClientPtr draining_client_; - std::list drained_callbacks_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; -}; - -/** - * Production implementation of the HTTP/2 connection pool. - */ -class ProdConnPoolImpl : public ConnPoolImpl { -public: - using ConnPoolImpl::ConnPoolImpl; - -private: - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override; - uint32_t maxTotalStreams() override; - - // All streams are 2^31. Client streams are half that, minus stream 0. Just to be on the safe - // side we do 2^29. - static const uint64_t MAX_STREAMS = (1 << 29); -}; - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 1ddc80c12f3d..6b853a9dfc7b 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -59,8 +59,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.connection_header_sanitization", "envoy.reloadable_features.strict_authority_validation", "envoy.reloadable_features.reject_unsupported_transfer_encodings", - "envoy.reloadable_features.new_http1_connection_pool_behavior", - "envoy.reloadable_features.new_http2_connection_pool_behavior", "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 76ef5380d85a..491fcba090ea 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -64,27 +64,3 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) - -envoy_cc_test( - name = "conn_pool_legacy_test", - srcs = ["conn_pool_legacy_test.cc"], - deps = [ - "//source/common/buffer:buffer_lib", - "//source/common/event:dispatcher_lib", - "//source/common/http:codec_client_lib", - "//source/common/http/http1:conn_pool_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", - "//test/common/http:common_lib", - "//test/common/upstream:utility_lib", - "//test/mocks/buffer:buffer_mocks", - "//test/mocks/event:event_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/runtime:runtime_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:simulated_time_system_lib", - "//test/test_common:utility_lib", - ], -) diff --git a/test/common/http/http1/conn_pool_legacy_test.cc b/test/common/http/http1/conn_pool_legacy_test.cc deleted file mode 100644 index c657b9529c2e..000000000000 --- a/test/common/http/http1/conn_pool_legacy_test.cc +++ /dev/null @@ -1,972 +0,0 @@ -#include -#include - -#include "envoy/http/codec.h" - -#include "common/buffer/buffer_impl.h" -#include "common/event/dispatcher_impl.h" -#include "common/http/codec_client.h" -#include "common/http/http1/conn_pool_legacy.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -#include "test/common/http/common.h" -#include "test/common/upstream/utility.h" -#include "test/mocks/buffer/mocks.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/printers.h" -#include "test/test_common/simulated_time_system.h" -#include "test/test_common/utility.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::DoAll; -using testing::InSequence; -using testing::Invoke; -using testing::NiceMock; -using testing::Property; -using testing::Return; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { -namespace { - -/** - * A test version of ConnPoolImpl that allows for mocking beneath the codec clients. - */ -class ConnPoolImplForTest : public ConnPoolImpl { -public: - ConnPoolImplForTest(Event::MockDispatcher& dispatcher, - Upstream::ClusterInfoConstSharedPtr cluster, - NiceMock* upstream_ready_timer) - : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr), - api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), - mock_upstream_ready_timer_(upstream_ready_timer) {} - - ~ConnPoolImplForTest() override { - EXPECT_EQ(0U, ready_clients_.size()); - EXPECT_EQ(0U, busy_clients_.size()); - EXPECT_EQ(0U, pending_requests_.size()); - } - - struct TestCodecClient { - Http::MockClientConnection* codec_; - Network::MockClientConnection* connection_; - CodecClient* codec_client_; - Event::MockTimer* connect_timer_; - Event::DispatcherPtr client_dispatcher_; - }; - - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { - // We expect to own the connection, but already have it, so just release it to prevent it from - // getting deleted. - data.connection_.release(); - return CodecClientPtr{createCodecClient_()}; - } - - MOCK_METHOD0(createCodecClient_, CodecClient*()); - MOCK_METHOD0(onClientDestroy, void()); - - void expectClientCreate(Protocol protocol = Protocol::Http11) { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&mock_dispatcher_); - std::shared_ptr cluster{new NiceMock()}; - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient* codec_client) -> void { - for (auto i = test_clients_.begin(); i != test_clients_.end(); i++) { - if (i->codec_client_ == codec_client) { - onClientDestroy(); - test_clients_.erase(i); - return; - } - } - }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); - EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); - ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); - } - - void expectEnableUpstreamReady() { - EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); - } - - void expectAndRunUpstreamReady() { - EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); - EXPECT_FALSE(upstream_ready_enabled_); - } - - Api::ApiPtr api_; - Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; - std::vector test_clients_; -}; - -/** - * Test fixture for all connection pool tests. - */ -class Http1ConnPoolImplLegacyTest : public testing::Test { -public: - Http1ConnPoolImplLegacyTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} - - ~Http1ConnPoolImplLegacyTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); - } - - NiceMock dispatcher_; - std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; - ConnPoolImplForTest conn_pool_; - NiceMock runtime_; -}; - -/** - * Helper for dealing with an active test request. - */ -struct ActiveTestRequest { - enum class Type { Pending, CreateConnection, Immediate }; - - ActiveTestRequest(Http1ConnPoolImplLegacyTest& parent, size_t client_index, Type type) - : parent_(parent), client_index_(client_index) { - uint64_t active_rq_observed = - parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); - uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); - if (type == Type::CreateConnection) { - parent.conn_pool_.expectClientCreate(); - } - - if (type == Type::Immediate) { - expectNewStream(); - } - - handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); - - if (type == Type::Immediate) { - EXPECT_EQ(nullptr, handle_); - } else { - EXPECT_NE(nullptr, handle_); - } - - if (type == Type::CreateConnection) { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); - expectNewStream(); - parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( - Network::ConnectionEvent::Connected); - } - if (type != Type::Pending) { - EXPECT_EQ(current_rq_total + 1, parent_.cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(active_rq_observed + 1, - parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default) - .requests() - .count()); - } - } - - void completeResponse(bool with_body) { - // Test additional metric writes also. - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"x-envoy-upstream-canary", "true"}}); - - inner_decoder_->decodeHeaders(std::move(response_headers), !with_body); - if (with_body) { - Buffer::OwnedImpl data; - inner_decoder_->decodeData(data, true); - } - } - - void expectNewStream() { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); - EXPECT_CALL(callbacks_.pool_ready_, ready()); - } - - void startRequest() { - callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - } - - Http1ConnPoolImplLegacyTest& parent_; - size_t client_index_; - NiceMock outer_decoder_; - Http::ConnectionPool::Cancellable* handle_{}; - NiceMock request_encoder_; - Http::ResponseDecoder* inner_decoder_{}; - ConnPoolCallbacks callbacks_; -}; - -/** - * Verify that the pool's host is a member of the cluster the pool was constructed with. - */ -TEST_F(Http1ConnPoolImplLegacyTest, Host) { - EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); -} - -/** - * Verify that connections are drained when requested. - */ -TEST_F(Http1ConnPoolImplLegacyTest, DrainConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); - r2.startRequest(); - - r1.completeResponse(false); - - // This will destroy the ready client and set requests remaining to 1 on the busy client. - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the busy client when the response finishes. - r2.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test all timing stats are set. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyTimingStats) { - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test that buffer limits are set. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyBufferLimits) { - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); - EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - EXPECT_CALL(callbacks.pool_failure_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Verify that canceling pending connections within the callback works. - */ -TEST_F(Http1ConnPoolImplLegacyTest, VerifyCancelInCallback) { - Http::ConnectionPool::Cancellable* handle1{}; - // In this scenario, all connections must succeed, so when - // one fails, the others are canceled. - // Note: We rely on the fact that the implementation cancels the second request first, - // to simplify the test. - ConnPoolCallbacks callbacks1; - EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - handle1->cancel(); - })); - - NiceMock outer_decoder; - // Create the first client. - conn_pool_.expectClientCreate(); - handle1 = conn_pool_.newStream(outer_decoder, callbacks1); - ASSERT_NE(nullptr, handle1); - - // Create the second client. - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); - ASSERT_NE(nullptr, handle2); - - // Simulate connection failure. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Tests a request that generates a new connection, completes, and then a second request that uses - * the same connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MultipleRequestAndResponse) { - InSequence s; - - // Request 1 should kick off a new connection. - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - // Request 2 should not. - ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Immediate); - r2.startRequest(); - r2.completeResponse(true); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when we overflow max pending requests. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxPendingRequests) { - cluster_->resetResourceManager(1, 1, 1024, 1, 1); - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks2.pool_failure_, ready()); - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(nullptr, handle2); - - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - - handle->cancel(); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); -} - -/** - * Tests a connection failure before a request is bound which should result in the pending request - * getting purged. - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectFailure) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); -} - -/** - * Tests that connection creation time is recorded correctly even in cases where - * there are multiple pending connection creation attempts to the same upstream. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MeasureConnectTime) { - constexpr uint64_t sleep1_ms = 20; - constexpr uint64_t sleep2_ms = 10; - constexpr uint64_t sleep3_ms = 5; - Event::SimulatedTimeSystem simulated_time; - - // Allow concurrent creation of 2 upstream connections. - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - - InSequence s; - - // Start the first connect attempt. - conn_pool_.expectClientCreate(); - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - - // Move time forward and start the second connect attempt. - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep1_ms)); - conn_pool_.expectClientCreate(); - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); - - // Move time forward, signal that the first connect completed and verify the time to connect. - uint64_t upstream_cx_connect_ms1 = 0; - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep2_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) - .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); - r1.expectNewStream(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); - - // Move time forward, signal that the second connect completed and verify the time to connect. - uint64_t upstream_cx_connect_ms2 = 0; - simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep3_ms)); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) - .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); - r2.expectNewStream(); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); - - // Cleanup, cause the connections to go away. - for (auto& test_client : conn_pool_.test_clients_) { - EXPECT_CALL(conn_pool_, onClientDestroy()); - EXPECT_CALL( - cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - } - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Tests a connect timeout. Also test that we can add a new request during ejection processing. - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectTimeout) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks1; - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); - - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); - })); - - conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); - - EXPECT_CALL(callbacks2.pool_failure_, ready()); - conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); - - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_connect_timeout_.value()); -} - -/** - * Test cancelling before the request is bound to a connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, CancelBeforeBound) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - handle->cancel(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test an upstream disconnection while there is a bound request. - */ -TEST_F(Http1ConnPoolImplLegacyTest, DisconnectWhileBound) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // We should get a reset callback when the connection disconnects. - Http::MockStreamCallbacks stream_callbacks; - EXPECT_CALL(stream_callbacks, onResetStream(StreamResetReason::ConnectionTermination, _)); - request_encoder.getStream().addCallbacks(stream_callbacks); - - // Kill the connection while it has an active request. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test that we correctly handle reaching max connections. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxConnections) { - InSequence s; - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); - - EXPECT_NE(nullptr, handle); - - // Request 2 should not kick off a new connection. - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); - - EXPECT_NE(nullptr, handle); - - // Connect event will bind to request 1. - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Finishing request 1 will immediately bind to request 2. - conn_pool_.expectEnableUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks2.pool_ready_, ready()); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - conn_pool_.expectAndRunUpstreamReady(); - callbacks2.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. - response_headers = std::make_unique( - std::initializer_list>{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when upstream closes connection without 'connection: close' like - * https://github.com/envoyproxy/envoy/pull/2715 - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseWithoutHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder1; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); - - EXPECT_NE(nullptr, handle); - - // Request 2 should not kick off a new connection. - NiceMock outer_decoder2; - ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); - - EXPECT_NE(nullptr, handle); - - // Connect event will bind to request 1. - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Finishing request 1 will schedule binding the connection to request 2. - conn_pool_.expectEnableUpstreamReady(); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - // Cause the connection to go away. - conn_pool_.expectClientCreate(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - conn_pool_.expectAndRunUpstreamReady(); - - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - callbacks2.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. - response_headers = std::make_unique( - std::initializer_list>{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); -} - -/** - * Test when upstream sends us 'connection: close' - */ -TEST_F(Http1ConnPoolImplLegacyTest, ConnectionCloseHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when upstream sends us 'proxy-connection: close' - */ -TEST_F(Http1ConnPoolImplLegacyTest, ProxyConnectionCloseHeader) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'proxy-connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when upstream is HTTP/1.0 and does not send 'connection: keep-alive' - */ -TEST_F(Http1ConnPoolImplLegacyTest, Http10NoConnectionKeepAlive) { - InSequence s; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(Protocol::Http10); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response without 'connection: keep-alive' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers( - new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); -} - -/** - * Test when we reach max requests per connection. - */ -TEST_F(Http1ConnPoolImplLegacyTest, MaxRequestsPerConnection) { - InSequence s; - - cluster_->max_requests_per_connection_ = 1; - - // Request 1 should kick off a new connection. - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); - inner_decoder->decodeHeaders(std::move(response_headers), true); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_max_requests_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ConcurrentConnections) { - cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::CreateConnection); - r2.startRequest(); - - ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); - - // Finish r1, which gets r3 going. - conn_pool_.expectEnableUpstreamReady(); - r3.expectNewStream(); - - r1.completeResponse(false); - conn_pool_.expectAndRunUpstreamReady(); - r3.startRequest(); - EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); - - r2.completeResponse(false); - r3.completeResponse(false); - - // Disconnect both clients. - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, DrainCallback) { - InSequence s; - ReadyWatcher drained; - - EXPECT_CALL(drained, ready()); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); - r2.handle_->cancel(); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); - - EXPECT_CALL(drained, ready()); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -// Test draining a connection pool that has a pending connection. -TEST_F(Http1ConnPoolImplLegacyTest, DrainWhileConnecting) { - InSequence s; - ReadyWatcher drained; - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, - close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(drained, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, RemoteCloseToCompleteResponse) { - InSequence s; - - NiceMock outer_decoder; - ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); - EXPECT_NE(nullptr, handle); - - NiceMock request_encoder; - ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); - EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - callbacks.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - inner_decoder->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); - Buffer::OwnedImpl dummy_data("12345"); - inner_decoder->decodeData(dummy_data, false); - - Buffer::OwnedImpl empty_data; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { - // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. - inner_decoder->decodeData(data, true); - })); - - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, - close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(conn_pool_.hasActiveConnections()); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ActiveRequestHasActiveConnectionsTrue) { - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - - EXPECT_TRUE(conn_pool_.hasActiveConnections()); - - // cleanup - r1.completeResponse(false); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); - r1.startRequest(); - r1.completeResponse(false); - - EXPECT_FALSE(conn_pool_.hasActiveConnections()); - - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http1ConnPoolImplLegacyTest, PendingRequestIsConsideredActive) { - conn_pool_.expectClientCreate(); - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - - EXPECT_TRUE(conn_pool_.hasActiveConnections()); - - EXPECT_CALL(conn_pool_, onClientDestroy()); - r1.handle_->cancel(); - EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - conn_pool_.drainConnections(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -} // namespace -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 870d27aced8c..241fe70d7b11 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -66,25 +66,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "conn_pool_legacy_test", - srcs = ["conn_pool_legacy_test.cc"], - deps = [ - "//source/common/event:dispatcher_lib", - "//source/common/http/http2:conn_pool_legacy_lib", - "//source/common/network:utility_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", - "//test/common/http:common_lib", - "//test/common/upstream:utility_lib", - "//test/mocks/event:event_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/runtime:runtime_mocks", - "//test/mocks/upstream:upstream_mocks", - ], -) - envoy_cc_test_library( name = "http2_frame", srcs = ["http2_frame.cc"], diff --git a/test/common/http/http2/conn_pool_legacy_test.cc b/test/common/http/http2/conn_pool_legacy_test.cc deleted file mode 100644 index ece922fd09ba..000000000000 --- a/test/common/http/http2/conn_pool_legacy_test.cc +++ /dev/null @@ -1,810 +0,0 @@ -#include -#include -#include - -#include "common/event/dispatcher_impl.h" -#include "common/http/http2/conn_pool_legacy.h" -#include "common/network/utility.h" -#include "common/upstream/upstream_impl.h" - -#include "test/common/http/common.h" -#include "test/common/upstream/utility.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/printers.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::DoAll; -using testing::InSequence; -using testing::Invoke; -using testing::NiceMock; -using testing::Property; -using testing::Return; -using testing::ReturnRef; - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -class TestConnPoolImpl : public ConnPoolImpl { -public: - using ConnPoolImpl::ConnPoolImpl; - - CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) override { - // We expect to own the connection, but already have it, so just release it to prevent it from - // getting deleted. - data.connection_.release(); - return CodecClientPtr{createCodecClient_(data)}; - } - - MOCK_METHOD1(createCodecClient_, CodecClient*(Upstream::Host::CreateConnectionData& data)); - - uint32_t maxTotalStreams() override { return max_streams_; } - - uint32_t max_streams_{std::numeric_limits::max()}; -}; - -class ActiveTestRequest; - -class Http2ConnPoolImplLegacyTest : public testing::Test { -public: - struct TestCodecClient { - Http::MockClientConnection* codec_; - Network::MockClientConnection* connection_; - CodecClientForTest* codec_client_; - Event::MockTimer* connect_timer_; - Event::DispatcherPtr client_dispatcher_; - }; - - Http2ConnPoolImplLegacyTest() - : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) {} - - ~Http2ConnPoolImplLegacyTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); - } - - // Creates a new test client, expecting a new connection to be created and associated - // with the new client. - void expectClientCreate(absl::optional buffer_limits = {}) { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&dispatcher_); - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); - EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - auto cluster = std::make_shared>(); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient*) -> void { onClientDestroy(); }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - if (buffer_limits) { - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); - EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); - } - EXPECT_CALL(pool_, createCodecClient_(_)) - .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { - return test_clients_.back().codec_client_; - })); - } - - // Connects a pending connection for client with the given index, asserting - // that the provided request receives onPoolReady. - void expectClientConnect(size_t index, ActiveTestRequest& r); - // Asserts that onPoolReady is called on the request. - void expectStreamConnect(size_t index, ActiveTestRequest& r); - - // Resets the connection belonging to the provided index, asserting that the - // provided request receives onPoolFailure. - void expectClientReset(size_t index, ActiveTestRequest& r); - // Asserts that the provided requests receives onPoolFailure. - void expectStreamReset(ActiveTestRequest& r); - - /** - * Closes a test client. - */ - void closeClient(size_t index); - - /** - * Completes an active request. Useful when this flow is not part of the main test assertions. - */ - void completeRequest(ActiveTestRequest& r); - - /** - * Completes an active request and closes the upstream connection. Useful when this flow is - * not part of the main test assertions. - */ - void completeRequestCloseUpstream(size_t index, ActiveTestRequest& r); - - MOCK_METHOD0(onClientDestroy, void()); - - Stats::IsolatedStoreImpl stats_store_; - Api::ApiPtr api_; - NiceMock dispatcher_; - std::shared_ptr cluster_{new NiceMock()}; - Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; - TestConnPoolImpl pool_; - std::vector test_clients_; - NiceMock runtime_; -}; - -class ActiveTestRequest { -public: - ActiveTestRequest(Http2ConnPoolImplLegacyTest& test, size_t client_index, bool expect_connected) { - if (expect_connected) { - EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); - EXPECT_CALL(callbacks_.pool_ready_, ready()); - EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); - } else { - EXPECT_NE(nullptr, test.pool_.newStream(decoder_, callbacks_)); - } - } - - MockResponseDecoder decoder_; - ConnPoolCallbacks callbacks_; - ResponseDecoder* inner_decoder_{}; - NiceMock inner_encoder_; -}; - -void Http2ConnPoolImplLegacyTest::expectClientConnect(size_t index, ActiveTestRequest& r) { - expectStreamConnect(index, r); - EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); -} - -void Http2ConnPoolImplLegacyTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { - EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) - .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); - EXPECT_CALL(r.callbacks_.pool_ready_, ready()); -} - -void Http2ConnPoolImplLegacyTest::expectClientReset(size_t index, ActiveTestRequest& r) { - expectStreamReset(r); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); -} - -void Http2ConnPoolImplLegacyTest::expectStreamReset(ActiveTestRequest& r) { - EXPECT_CALL(r.callbacks_.pool_failure_, ready()); -} - -void Http2ConnPoolImplLegacyTest::closeClient(size_t index) { - test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -void Http2ConnPoolImplLegacyTest::completeRequest(ActiveTestRequest& r) { - EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); - r.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); - r.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); -} - -void Http2ConnPoolImplLegacyTest::completeRequestCloseUpstream(size_t index, ActiveTestRequest& r) { - completeRequest(r); - closeClient(index); -} - -/** - * Verify that the pool retains and returns the host it was constructed with. - */ -TEST_F(Http2ConnPoolImplLegacyTest, Host) { EXPECT_EQ(host_, pool_.host()); } - -/** - * Verify that connections are drained when requested. - */ -TEST_F(Http2ConnPoolImplLegacyTest, DrainConnections) { - InSequence s; - pool_.max_streams_ = 1; - - // Test drain connections call prior to any connections being created. - pool_.drainConnections(); - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // This will move primary to draining and destroy draining. - pool_.drainConnections(); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy draining. - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool until the connection becomes ready. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequests) { - InSequence s; - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // The connection now becomes ready. This should cause all the queued requests to be sent. - expectStreamConnect(0, r1); - expectStreamConnect(0, r2); - expectClientConnect(0, r3); - - // Send a request through each stream. - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - EXPECT_CALL(r3.inner_encoder_, encodeHeaders(_, true)); - r3.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - // Since we now have an active connection, subsequent requests should connect immediately. - ActiveTestRequest r4(*this, 0, true); - - // Clean up everything. - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool and fail when the connection -// fails to be established. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsFailure) { - InSequence s; - pool_.max_streams_ = 10; - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // The connection now becomes ready. This should cause all the queued requests to be sent. - // Note that these occur in reverse order due to the order we purge pending requests in. - expectStreamReset(r3); - expectStreamReset(r2); - expectClientReset(0, r1); - - expectClientCreate(); - // Since we have no active connection, subsequence requests will queue until - // the new connection is established. - ActiveTestRequest r4(*this, 1, false); - expectClientConnect(1, r4); - - // Clean up everything. - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that requests are queued up in the conn pool and respect max request circuit breaking -// when the connection is established. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsRequestOverflow) { - InSequence s; - - // Inflate the resource count to just under the limit. - auto& requests = host_->cluster().resourceManager(Upstream::ResourcePriority::Default).requests(); - for (uint64_t i = 0; i < requests.max() - 1; ++i) { - requests.inc(); - } - - // Create three requests. These should be queued up. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - ActiveTestRequest r2(*this, 0, false); - ActiveTestRequest r3(*this, 0, false); - - // We queued up three requests, but we can only afford one before hitting the circuit - // breaker. Thus, we expect to see 2 resets and one successful connect. - expectStreamConnect(0, r1); - expectStreamReset(r2); - expectStreamReset(r3); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Clean up everything. - for (uint64_t i = 0; i < requests.max() - 1; ++i) { - requests.dec(); - } - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -// Verifies that we honor the max pending requests circuit breaker. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsMaxPendingCircuitBreaker) { - InSequence s; - - // Inflate the resource count to just under the limit. - auto& pending_reqs = - host_->cluster().resourceManager(Upstream::ResourcePriority::Default).pendingRequests(); - for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { - pending_reqs.inc(); - } - - // Create two requests. The first one should be enqueued, while the second one - // should fail fast due to us being above the max pending requests limit. - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - - MockResponseDecoder decoder; - ConnPoolCallbacks callbacks; - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); - - expectStreamConnect(0, r1); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - - // Clean up everything. - for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { - pending_reqs.dec(); - } - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, VerifyConnectionTimingStats) { - InSequence s; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - EXPECT_CALL(cluster_->stats_store_, - deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -/** - * Test that buffer limits are set. - */ -TEST_F(Http2ConnPoolImplLegacyTest, VerifyBufferLimits) { - InSequence s; - expectClientCreate(8192); - ActiveTestRequest r1(*this, 0, false); - - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, RequestAndResponse) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - ActiveTestRequest r2(*this, 0, true); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, LocalReset) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, false); - r1.callbacks_.outer_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, RemoteReset) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, false)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, false); - r1.inner_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_rx_reset_.value()); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectWithActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainDisconnectDrainingWithActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimary) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http2ConnPoolImplLegacyTest, DrainPrimaryNoActiveRequest) { - InSequence s; - pool_.max_streams_ = 1; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - ReadyWatcher drained; - EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); -} - -TEST_F(Http2ConnPoolImplLegacyTest, ConnectTimeout) { - InSequence s; - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - EXPECT_CALL(r1.callbacks_.pool_failure_, ready()); - test_clients_[0].connect_timer_->invokeCallback(); - - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_timeout_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_failure_eject_.value()); - EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_local_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, MaxGlobalRequests) { - cluster_->resetResourceManager(1024, 1024, 1, 1, 1); - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - - ConnPoolCallbacks callbacks; - MockResponseDecoder decoder; - EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); - - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_remote_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, GoAway) { - InSequence s; - - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); - r1.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); - r1.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[0].codec_client_->raiseGoAway(); - - expectClientCreate(); - ActiveTestRequest r2(*this, 1, false); - expectClientConnect(1, r2); - EXPECT_CALL(r2.inner_encoder_, encodeHeaders(_, true)); - r2.callbacks_.outer_encoder_->encodeHeaders( - TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); - r2.inner_decoder_->decodeHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - - test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(*this, onClientDestroy()).Times(2); - dispatcher_.clearDeferredDeleteList(); - - EXPECT_EQ(1U, cluster_->stats_.upstream_cx_close_notify_.value()); -} - -TEST_F(Http2ConnPoolImplLegacyTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(pool_.hasActiveConnections()); -} - -// Show that an active request on the primary connection is considered active. -TEST_F(Http2ConnPoolImplLegacyTest, ActiveConnectionsHasActiveRequestsTrue) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - completeRequestCloseUpstream(0, r1); -} - -// Show that pending requests are considered active. -TEST_F(Http2ConnPoolImplLegacyTest, PendingRequestsConsideredActive) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - expectClientConnect(0, r1); - completeRequestCloseUpstream(0, r1); -} - -// Show that even if there is a primary client still, if all of its requests have completed, then it -// does not have any active connections. -TEST_F(Http2ConnPoolImplLegacyTest, ResponseCompletedConnectionReadyNoActiveConnections) { - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - completeRequest(r1); - - EXPECT_FALSE(pool_.hasActiveConnections()); - - closeClient(0); -} - -// Show that if connections are draining, they're still considered active. -TEST_F(Http2ConnPoolImplLegacyTest, DrainingConnectionsConsideredActive) { - pool_.max_streams_ = 1; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - pool_.drainConnections(); - - EXPECT_TRUE(pool_.hasActiveConnections()); - - completeRequest(r1); - closeClient(0); -} - -// Show that once we've drained all connections, there are no longer any active. -TEST_F(Http2ConnPoolImplLegacyTest, DrainedConnectionsNotActive) { - pool_.max_streams_ = 1; - expectClientCreate(); - ActiveTestRequest r1(*this, 0, false); - expectClientConnect(0, r1); - pool_.drainConnections(); - completeRequest(r1); - - EXPECT_FALSE(pool_.hasActiveConnections()); - - closeClient(0); -} -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy From 94f679780b3939fbb23d91f5c26549c4da87dab3 Mon Sep 17 00:00:00 2001 From: Manish Date: Thu, 23 Apr 2020 01:58:24 +0530 Subject: [PATCH 011/909] docs: breaking long word to stop content overflow. (#10880) Signed-off-by: Manish Kumar --- docs/root/_static/css/envoy.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/root/_static/css/envoy.css b/docs/root/_static/css/envoy.css index bab090478464..c65a71f05262 100644 --- a/docs/root/_static/css/envoy.css +++ b/docs/root/_static/css/envoy.css @@ -9,3 +9,8 @@ table.docutils div.line-block { margin-left: 0; } +/* Breaking long words */ +.wy-nav-content { + overflow-wrap: break-word; + max-width: 1000px; +} From e689a30d0553709fec8cd49cca871fbb451a8853 Mon Sep 17 00:00:00 2001 From: asraa Date: Wed, 22 Apr 2020 19:16:19 -0400 Subject: [PATCH 012/909] [docs] PR template to include commit message (#10900) Signed-off-by: Asra Ali --- CONTRIBUTING.md | 6 +++++- PULL_REQUESTS.md | 20 ++++++++++++++------ PULL_REQUEST_TEMPLATE.md | 3 ++- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ffd804f25b81..2b8723439638 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -97,6 +97,8 @@ versioning guidelines: colon. Examples: * "docs: fix grammar error" * "http conn man: add new feature" +* Your PR commit message will be used as the commit message when your PR is merged. You should + update this field if your PR diverges during review. * Your PR description should have details on what the PR does. If it fixes an existing issue it should end with "Fixes #XXX". * When all of the tests are passing and all other conditions described herein are satisfied, a @@ -190,10 +192,12 @@ and false. organization specific shortcuts into the code. * If there is a question on who should review a PR please discuss in Slack. * Anyone is welcome to review any PR that they want, whether they are a maintainer or not. +* Please make sure that the PR title, commit message, and description are updated if the PR changes + significantly during review. * Please **clean up the title and body** before merging. By default, GitHub fills the squash merge title with the original title, and the commit body with every individual commit from the PR. The maintainer doing the merge should make sure the title follows the guidelines above and should - overwrite the body with the original extended description from the PR (cleaning it up if necessary) + overwrite the body with the original commit message from the PR (cleaning it up if necessary) while preserving the PR author's final DCO sign-off. * If a PR includes a deprecation/breaking change, notification should be sent to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list. diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index deb77bb326fd..0126cf073ea6 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -12,13 +12,21 @@ explaining the overall change. Both the component and the explanation must be lo * router:add x-envoy-overloaded header * tls: add support for specifying TLS session ticket keys -### Description +### Commit Message -The description field should include a more verbose explanation of what this PR -does. If this PR causes a change in behavior it should document the behavior -before and after If fixing a bug, please describe what the original issue is and -how the change resolves it. If it is configuration controlled, it should note -how the feature is enabled etc... +The commit message field should include an explanation of what this PR +does. This will be used as the final commit message that maintainers will use to +populate the commit message when merging. If this PR causes a change in behavior +it should document the behavior before and after. If fixing a bug, please +describe what the original issue is and how the change resolves it. If it is +configuration controlled, it should note how the feature is enabled etc... + + +### Additional Description + +The additional description field should include information of what this PR does +that may be out of scope for a commit message. This could include additional +information or context useful to reviewers. ### Risk diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index f8bb15ff43e4..e16c81139d86 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,8 @@ For an explanation of how to fill out the fields, please see the relevant section in [PULL_REQUESTS.md](https://github.com/envoyproxy/envoy/blob/master/PULL_REQUESTS.md) -Description: +Commit Message: +Additional Description: Risk Level: Testing: Docs Changes: From 0a7dba7933af7c5e1ffd842d76d221881dd46a24 Mon Sep 17 00:00:00 2001 From: aaron ai Date: Thu, 23 Apr 2020 11:17:56 +0800 Subject: [PATCH 013/909] rocketmq_proxy: implement rocketmq proxy Implement rocketmq proxy Description: implement rocketmq proxy Risk Level: Low Testing: Unit Tests Docs Changes: N/A Release Notes: N/A Fixes #9431 Signed-off-by: aaron-ai --- CODEOWNERS | 2 + api/BUILD | 1 + .../filters/network/rocketmq_proxy/v3/BUILD | 14 + .../network/rocketmq_proxy/v3/README.md | 1 + .../rocketmq_proxy/v3/rocketmq_proxy.proto | 36 + .../network/rocketmq_proxy/v3/route.proto | 55 ++ api/versioning/BUILD | 1 + .../network_filters/network_filters.rst | 1 + .../network_filters/rocketmq_proxy_filter.rst | 76 ++ docs/root/version_history/current.rst | 1 + generated_api_shadow/BUILD | 1 + .../filters/network/rocketmq_proxy/v3/BUILD | 14 + .../rocketmq_proxy/v3/rocketmq_proxy.proto | 36 + .../network/rocketmq_proxy/v3/route.proto | 55 ++ source/common/common/logger.h | 1 + source/extensions/extensions_build_config.bzl | 1 + .../filters/network/rocketmq_proxy/BUILD | 148 +++ .../network/rocketmq_proxy/active_message.cc | 333 +++++++ .../network/rocketmq_proxy/active_message.h | 105 ++ .../filters/network/rocketmq_proxy/codec.cc | 408 ++++++++ .../filters/network/rocketmq_proxy/codec.h | 81 ++ .../filters/network/rocketmq_proxy/config.cc | 65 ++ .../filters/network/rocketmq_proxy/config.h | 72 ++ .../network/rocketmq_proxy/conn_manager.cc | 376 +++++++ .../network/rocketmq_proxy/conn_manager.h | 215 ++++ .../filters/network/rocketmq_proxy/metadata.h | 43 + .../network/rocketmq_proxy/protocol.cc | 749 ++++++++++++++ .../filters/network/rocketmq_proxy/protocol.h | 672 +++++++++++++ .../network/rocketmq_proxy/router/BUILD | 50 + .../rocketmq_proxy/router/route_matcher.cc | 73 ++ .../rocketmq_proxy/router/route_matcher.h | 71 ++ .../network/rocketmq_proxy/router/router.h | 85 ++ .../rocketmq_proxy/router/router_impl.cc | 218 ++++ .../rocketmq_proxy/router/router_impl.h | 75 ++ .../filters/network/rocketmq_proxy/stats.h | 62 ++ .../network/rocketmq_proxy/topic_route.cc | 76 ++ .../network/rocketmq_proxy/topic_route.h | 78 ++ .../network/rocketmq_proxy/well_known_names.h | 29 + .../filters/network/well_known_names.h | 2 + .../filters/network/rocketmq_proxy/BUILD | 136 +++ .../rocketmq_proxy/active_message_test.cc | 209 ++++ .../network/rocketmq_proxy/codec_test.cc | 797 +++++++++++++++ .../network/rocketmq_proxy/config_test.cc | 170 ++++ .../rocketmq_proxy/conn_manager_test.cc | 690 +++++++++++++ .../filters/network/rocketmq_proxy/mocks.cc | 57 ++ .../filters/network/rocketmq_proxy/mocks.h | 89 ++ .../network/rocketmq_proxy/protocol_test.cc | 927 ++++++++++++++++++ .../rocketmq_proxy/route_matcher_test.cc | 74 ++ .../network/rocketmq_proxy/router_test.cc | 470 +++++++++ .../rocketmq_proxy/topic_route_test.cc | 74 ++ .../filters/network/rocketmq_proxy/utility.cc | 240 +++++ .../filters/network/rocketmq_proxy/utility.h | 33 + tools/spelling/spelling_dictionary.txt | 4 + 53 files changed, 8352 insertions(+) create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto create mode 100644 docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto create mode 100644 source/extensions/filters/network/rocketmq_proxy/BUILD create mode 100644 source/extensions/filters/network/rocketmq_proxy/active_message.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/active_message.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/codec.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/codec.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/config.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/config.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/conn_manager.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/conn_manager.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/metadata.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/protocol.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/protocol.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/BUILD create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/router.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/router/router_impl.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/stats.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/topic_route.cc create mode 100644 source/extensions/filters/network/rocketmq_proxy/topic_route.h create mode 100644 source/extensions/filters/network/rocketmq_proxy/well_known_names.h create mode 100644 test/extensions/filters/network/rocketmq_proxy/BUILD create mode 100644 test/extensions/filters/network/rocketmq_proxy/active_message_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/codec_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/config_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/mocks.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/mocks.h create mode 100644 test/extensions/filters/network/rocketmq_proxy/protocol_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/router_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/utility.cc create mode 100644 test/extensions/filters/network/rocketmq_proxy/utility.h diff --git a/CODEOWNERS b/CODEOWNERS index 37e376e77e79..4a7d6aa7cafe 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -16,6 +16,8 @@ extensions/filters/common/original_src @snowp @klarose # dubbo_proxy extension /*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan +# rocketmq_proxy extension +/*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan # thrift_proxy extension /*/extensions/filters/network/thrift_proxy @zuercher @brian-pane # compressor used by http compression filters diff --git a/api/BUILD b/api/BUILD index 97a8554bc520..d52653ebc4e6 100644 --- a/api/BUILD +++ b/api/BUILD @@ -214,6 +214,7 @@ proto_library( "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD new file mode 100644 index 000000000000..e6bc5699efc4 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md new file mode 100644 index 000000000000..3bd849bc2530 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md @@ -0,0 +1 @@ +Protocol buffer definitions for the Rocketmq proxy. \ No newline at end of file diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto new file mode 100644 index 000000000000..ee77ab909592 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto new file mode 100644 index 000000000000..5fe5d33ffacf --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + // The name of the topic. + type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 2; +} + +message RouteAction { + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v3.Metadata metadata_match = 2; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index bbb683d8bd08..f1a0d2440e14 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -96,6 +96,7 @@ proto_library( "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", diff --git a/docs/root/configuration/listeners/network_filters/network_filters.rst b/docs/root/configuration/listeners/network_filters/network_filters.rst index 65511250f84b..4c29a385acad 100644 --- a/docs/root/configuration/listeners/network_filters/network_filters.rst +++ b/docs/root/configuration/listeners/network_filters/network_filters.rst @@ -23,6 +23,7 @@ filters. rate_limit_filter rbac_filter redis_proxy_filter + rocketmq_proxy_filter tcp_proxy_filter thrift_proxy_filter sni_cluster_filter diff --git a/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst new file mode 100644 index 000000000000..50033efc899c --- /dev/null +++ b/docs/root/configuration/listeners/network_filters/rocketmq_proxy_filter.rst @@ -0,0 +1,76 @@ +.. _config_network_filters_rocketmq_proxy: + +RocketMQ proxy +============== + +Apache RocketMQ is a distributed messaging system, which is composed of four types of roles: producer, consumer, name +server and broker server. The former two are embedded into user application in form of SDK; whilst the latter are +standalone servers. + +A message in RocketMQ carries a topic as its destination and optionally one or more tags as application specific labels. + +Producers are used to send messages to brokers according to their topics. Similar to many distributed systems, +producers need to know how to connect to these serving brokers. To achieve this goal, RocketMQ provides name server +clusters for producers to lookup. Namely, when producers attempts to send messages with a new topic, it first +tries to lookup the addresses(called route info) of brokers that serve the topic from name servers. Once producers +get the route info of the topic, they actively cache them in memory and renew them periodically thereafter. This +mechanism, though simple, effectively keeps service availability high without demanding availability of name server +service. + +Brokers provides messaging service to end users. In addition to various messaging services, they also periodically +report health status and route info of topics currently served to name servers. + +Major role of the name server is to serve querying of route info for a topic. Additionally, it also purges route info +entries once the belonging brokers fail to report their health info for a configured period of time. This ensures +clients almost always connect to brokers that are online and ready to serve. + +Consumers are used by application to pull message from brokers. They perform similar heartbeats to maintain alive +status. RocketMQ brokers support two message-fetch approaches: long-pulling and pop. + +Using the first approach, consumers have to implement load-balancing algorithm. The pop approach, in the perspective of +consumers, is stateless. + +Envoy RocketMQ filter proxies requests and responses between producers/consumer and brokers. Various statistical items +are collected to enhance observability. + +At present, pop-based message fetching is implemented. Long-pulling will be implemented in the next pull request. + +.. _config_network_filters_rocketmq_proxy_stats: + +Statistics +---------- + +Every configured rocketmq proxy filter has statistics rooted at *rocketmq..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + request, Counter, Total requests + request_decoding_error, Counter, Total decoding error requests + request_decoding_success, Counter, Total decoding success requests + response, Counter, Total responses + response_decoding_error, Counter, Total decoding error responses + response_decoding_success, Counter, Total decoding success responses + response_error, Counter, Total error responses + response_success, Counter, Total success responses + heartbeat, Counter, Total heartbeat requests + unregister, Counter, Total unregister requests + get_topic_route, Counter, Total getting topic route requests + send_message_v1, Counter, Total sending message v1 requests + send_message_v2, Counter, Total sending message v2 requests + pop_message, Counter, Total poping message requests + ack_message, Counter, Total acking message requests + get_consumer_list, Counter, Total getting consumer list requests + maintenance_failure, Counter, Total maintenance failure + request_active, Gauge, Total active requests + send_message_v1_active, Gauge, Total active sending message v1 requests + send_message_v2_active, Gauge, Total active sending message v2 requests + pop_message_active, Gauge, Total active poping message active requests + get_topic_route_active, Gauge, Total active geting topic route requests + send_message_pending, Gauge, Total pending sending message requests + pop_message_pending, Gauge, Total pending poping message requests + get_topic_route_pending, Gauge, Total pending geting topic route requests + total_pending, Gauge, Total pending requests + request_time_ms, Histogram, Request time in milliseconds \ No newline at end of file diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7dafeef3d4a2..f587abbb4aaa 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -19,6 +19,7 @@ Changes `envoy.reloadable_features.new_http2_connection_pool_behavior`. * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. * network filters: added a :ref:`postgres proxy filter `. +* network filters: added a :ref:`rocketmq proxy filter `. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index 6aafa3e75588..15ac05d10ced 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -77,6 +77,7 @@ proto_library( "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", + "//envoy/config/filter/network/rocketmq_proxy/v3:pkg", "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD new file mode 100644 index 000000000000..e6bc5699efc4 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto new file mode 100644 index 000000000000..ee77ab909592 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto new file mode 100644 index 000000000000..5fe5d33ffacf --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + // The name of the topic. + type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 2; +} + +message RouteAction { + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v3.Metadata metadata_match = 2; +} diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 30b44628076d..384564d7c620 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -34,6 +34,7 @@ namespace Logger { FUNCTION(conn_handler) \ FUNCTION(decompression) \ FUNCTION(dubbo) \ + FUNCTION(rocketmq) \ FUNCTION(file) \ FUNCTION(filter) \ FUNCTION(forward_proxy) \ diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 1bb72dfb7e1d..49f603e2697c 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -96,6 +96,7 @@ EXTENSIONS = { "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", "envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config", "envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config", + "envoy.filters.network.rocketmq_proxy": "//source/extensions/filters/network/rocketmq_proxy:config", "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", diff --git a/source/extensions/filters/network/rocketmq_proxy/BUILD b/source/extensions/filters/network/rocketmq_proxy/BUILD new file mode 100644 index 000000000000..65c4f18be827 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/BUILD @@ -0,0 +1,148 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "well_known_names", + hdrs = ["well_known_names.h"], + deps = ["//source/common/singleton:const_singleton"], +) + +envoy_cc_library( + name = "stats_interface", + hdrs = ["stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + ], +) + +envoy_cc_library( + name = "rocketmq_interface", + hdrs = [ + "topic_route.h", + ], + deps = [ + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "rocketmq_lib", + srcs = [ + "topic_route.cc", + ], + deps = [ + ":rocketmq_interface", + ], +) + +envoy_cc_library( + name = "protocol_interface", + hdrs = ["protocol.h"], + deps = [ + ":metadata_lib", + "//source/common/buffer:buffer_lib", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "protocol_lib", + srcs = ["protocol.cc"], + deps = [ + ":protocol_interface", + ":well_known_names", + "//source/common/common:enum_to_int", + ], +) + +envoy_cc_library( + name = "codec_lib", + srcs = [ + "codec.cc", + ], + hdrs = [ + "codec.h", + ], + deps = [ + ":protocol_lib", + "//include/envoy/network:filter_interface", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_library( + name = "conn_manager_lib", + srcs = [ + "active_message.cc", + "conn_manager.cc", + ], + hdrs = [ + "active_message.h", + "conn_manager.h", + ], + deps = [ + ":codec_lib", + ":protocol_lib", + ":rocketmq_lib", + ":stats_interface", + ":well_known_names", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/tcp:conn_pool_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/protobuf:utility_lib", + "//source/common/stats:timespan_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy/router:router_interface", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = [ + "config.cc", + ], + hdrs = [ + "config.h", + ], + security_posture = "requires_trusted_downstream_and_upstream", + status = "alpha", + deps = [ + ":conn_manager_lib", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/common/common:logger_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/config:utility_lib", + "//source/extensions/filters/network/common:factory_base_lib", + "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", + "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "metadata_lib", + hdrs = ["metadata.h"], + external_deps = ["abseil_optional"], + deps = [ + "//source/common/http:header_map_lib", + ], +) diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.cc b/source/extensions/filters/network/rocketmq_proxy/active_message.cc new file mode 100644 index 000000000000..c9e3bd14c2c3 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.cc @@ -0,0 +1,333 @@ +#include "extensions/filters/network/rocketmq_proxy/active_message.h" + +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" +#include "extensions/filters/network/well_known_names.h" + +#include "absl/strings/match.h" + +using Envoy::Tcp::ConnectionPool::ConnectionDataPtr; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +ActiveMessage::ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request) + : connection_manager_(conn_manager), request_(std::move(request)) { + metadata_ = std::make_shared(); + MetadataHelper::parseRequest(request_, metadata_); + updateActiveRequestStats(); +} + +ActiveMessage::~ActiveMessage() { updateActiveRequestStats(false); } + +void ActiveMessage::createFilterChain() { router_ = connection_manager_.config().createRouter(); } + +void ActiveMessage::sendRequestToUpstream() { + if (!router_) { + createFilterChain(); + } + router_->sendRequestToUpstream(*this); +} + +Router::RouteConstSharedPtr ActiveMessage::route() { + if (cached_route_) { + return cached_route_.value(); + } + const std::string& topic_name = metadata_->topicName(); + ENVOY_LOG(trace, "fetch route for topic: {}", topic_name); + Router::RouteConstSharedPtr route = connection_manager_.config().routerConfig().route(*metadata_); + cached_route_ = route; + return cached_route_.value(); +} + +void ActiveMessage::onError(absl::string_view error_message) { + connection_manager_.onError(request_, error_message); +} + +const RemotingCommandPtr& ActiveMessage::downstreamRequest() const { return request_; } + +void ActiveMessage::fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group, + const std::string& topic, + const AckMessageDirective& directive) { + int32_t cursor = 0; + const int32_t buffer_length = buffer.length(); + while (cursor < buffer_length) { + auto frame_length = buffer.peekBEInt(cursor); + std::string decoded_topic = Decoder::decodeTopic(buffer, cursor); + ENVOY_LOG(trace, "Process a message: consumer group: {}, topic: {}, messageId: {}", + decoded_topic, group, Decoder::decodeMsgId(buffer, cursor)); + if (!absl::StartsWith(decoded_topic, RetryTopicPrefix) && decoded_topic != topic) { + ENVOY_LOG(warn, + "Decoded topic from pop-response does not equal to request. Decoded topic: " + "{}, request topic: {}, message ID: {}", + decoded_topic, topic, Decoder::decodeMsgId(buffer, cursor)); + } + + /* + * Sometimes, client SDK may used -1 for queue-id in the pop request so that broker servers + * are allowed to lookup all queues it serves. So we need to use the actual queue Id from + * response body. + */ + int32_t queue_id = Decoder::decodeQueueId(buffer, cursor); + int64_t queue_offset = Decoder::decodeQueueOffset(buffer, cursor); + + std::string key = fmt::format("{}-{}-{}-{}", group, decoded_topic, queue_id, queue_offset); + connection_manager_.insertAckDirective(key, directive); + ENVOY_LOG( + debug, + "Insert an ack directive. Consumer group: {}, topic: {}, queue Id: {}, queue offset: {}", + group, topic, queue_id, queue_offset); + cursor += frame_length; + } +} + +void ActiveMessage::sendResponseToDownstream() { + if (request_->code() == enumToSignedInt(RequestCode::PopMessage)) { + // Fill ack message directive + auto pop_header = request_->typedCustomHeader(); + AckMessageDirective directive(pop_header->targetBrokerName(), pop_header->targetBrokerId(), + connection_manager_.timeSource().monotonicTime()); + ENVOY_LOG(trace, "Receive pop response from broker name: {}, broker ID: {}", + pop_header->targetBrokerName(), pop_header->targetBrokerId()); + fillAckMessageDirective(response_->body(), pop_header->consumerGroup(), pop_header->topic(), + directive); + } + + // If acknowledgment of the message is successful, we need to erase the ack directive from + // manager. + if (request_->code() == enumToSignedInt(RequestCode::AckMessage) && + response_->code() == enumToSignedInt(ResponseCode::Success)) { + auto ack_header = request_->typedCustomHeader(); + connection_manager_.eraseAckDirective(ack_header->directiveKey()); + } + + if (response_) { + response_->opaque(request_->opaque()); + connection_manager_.sendResponseToDownstream(response_); + } +} + +void ActiveMessage::fillBrokerData(std::vector& list, const std::string& cluster, + const std::string& broker_name, int64_t broker_id, + const std::string& address) { + bool found = false; + for (auto& entry : list) { + if (entry.cluster() == cluster && entry.brokerName() == broker_name) { + found = true; + if (entry.brokerAddresses().find(broker_id) != entry.brokerAddresses().end()) { + ENVOY_LOG(warn, "Duplicate broker_id found. Broker ID: {}, address: {}", broker_id, + address); + continue; + } else { + entry.brokerAddresses()[broker_id] = address; + } + } + } + + if (!found) { + std::unordered_map addresses; + addresses.emplace(broker_id, address); + + list.emplace_back(BrokerData(cluster, broker_name, std::move(addresses))); + } +} + +void ActiveMessage::onQueryTopicRoute() { + std::string cluster_name; + ASSERT(metadata_->hasTopicName()); + const std::string& topic_name = metadata_->topicName(); + Upstream::ThreadLocalCluster* cluster = nullptr; + Router::RouteConstSharedPtr route = this->route(); + if (route) { + cluster_name = route->routeEntry()->clusterName(); + Upstream::ClusterManager& cluster_manager = connection_manager_.config().clusterManager(); + cluster = cluster_manager.get(cluster_name); + } + if (cluster) { + ENVOY_LOG(trace, "Enovy has an operating cluster {} for topic {}", cluster_name, topic_name); + std::vector queue_data_list; + std::vector broker_data_list; + for (auto& host_set : cluster->prioritySet().hostSetsPerPriority()) { + if (host_set->hosts().empty()) { + continue; + } + for (const auto& host : host_set->hosts()) { + std::string broker_address = host->address()->asString(); + auto& filter_metadata = host->metadata()->filter_metadata(); + const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy); + ASSERT(filter_it != filter_metadata.end()); + const auto& metadata_fields = filter_it->second.fields(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName)); + std::string broker_name = + metadata_fields.at(RocketmqConstants::get().BrokerName).string_value(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().ClusterName)); + std::string broker_cluster_name = + metadata_fields.at(RocketmqConstants::get().ClusterName).string_value(); + // Proto3 will ignore the field if the value is zero. + int32_t read_queue_num = 0; + if (metadata_fields.contains(RocketmqConstants::get().ReadQueueNum)) { + read_queue_num = static_cast( + metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value()); + } + int32_t write_queue_num = 0; + if (metadata_fields.contains(RocketmqConstants::get().WriteQueueNum)) { + write_queue_num = static_cast( + metadata_fields.at(RocketmqConstants::get().WriteQueueNum).number_value()); + } + int32_t perm = 0; + if (metadata_fields.contains(RocketmqConstants::get().Perm)) { + perm = static_cast( + metadata_fields.at(RocketmqConstants::get().Perm).number_value()); + } + int32_t broker_id = 0; + if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) { + broker_id = static_cast( + metadata_fields.at(RocketmqConstants::get().BrokerId).number_value()); + } + queue_data_list.emplace_back(QueueData(broker_name, read_queue_num, write_queue_num, perm)); + if (connection_manager_.config().developMode()) { + ENVOY_LOG(trace, "Develop mode, return proxy address to replace all broker addresses so " + "that L4 network rewrite is not required"); + fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id, + connection_manager_.config().proxyAddress()); + } else { + fillBrokerData(broker_data_list, broker_cluster_name, broker_name, broker_id, + broker_address); + } + } + } + ENVOY_LOG(trace, "Prepare TopicRouteData for {} OK", topic_name); + TopicRouteData topic_route_data(std::move(queue_data_list), std::move(broker_data_list)); + ProtobufWkt::Struct data_struct; + topic_route_data.encode(data_struct); + std::string json = MessageUtil::getJsonStringFromMessage(data_struct); + ENVOY_LOG(trace, "Serialize TopicRouteData for {} OK:\n{}", cluster_name, json); + RemotingCommandPtr response = std::make_unique( + static_cast(ResponseCode::Success), downstreamRequest()->version(), + downstreamRequest()->opaque()); + response->markAsResponse(); + response->body().add(json); + connection_manager_.sendResponseToDownstream(response); + } else { + onError("Cluster is not available"); + ENVOY_LOG(warn, "Cluster for topic {} is not available", topic_name); + } + onReset(); +} + +void ActiveMessage::onReset() { connection_manager_.deferredDelete(*this); } + +bool ActiveMessage::onUpstreamData(Envoy::Buffer::Instance& data, bool end_stream, + ConnectionDataPtr& conn_data) { + bool underflow = false; + bool has_error = false; + response_ = Decoder::decode(data, underflow, has_error, downstreamRequest()->code()); + if (underflow && !end_stream) { + ENVOY_LOG(trace, "Wait for more data from upstream"); + return false; + } + + if (enumToSignedInt(RequestCode::PopMessage) == request_->code() && router_ != nullptr) { + recordPopRouteInfo(router_->upstreamHost()); + } + + connection_manager_.stats().response_.inc(); + if (!has_error) { + connection_manager_.stats().response_decoding_success_.inc(); + // Relay response to downstream + sendResponseToDownstream(); + } else { + ENVOY_LOG(error, "Failed to decode response for opaque: {}, close immediately.", + downstreamRequest()->opaque()); + onError("Failed to decode response from upstream"); + connection_manager_.stats().response_decoding_error_.inc(); + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } + + if (end_stream) { + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } + return true; +} + +void ActiveMessage::recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description) { + if (host_description) { + auto host_metadata = host_description->metadata(); + auto filter_metadata = host_metadata->filter_metadata(); + const auto filter_it = filter_metadata.find(NetworkFilterNames::get().RocketmqProxy); + ASSERT(filter_it != filter_metadata.end()); + const auto& metadata_fields = filter_it->second.fields(); + ASSERT(metadata_fields.contains(RocketmqConstants::get().BrokerName)); + std::string broker_name = + metadata_fields.at(RocketmqConstants::get().BrokerName).string_value(); + // Proto3 will ignore the field if the value is zero. + int32_t broker_id = 0; + if (metadata_fields.contains(RocketmqConstants::get().BrokerId)) { + broker_id = static_cast( + metadata_fields.at(RocketmqConstants::get().BrokerId).number_value()); + } + // Tag the request with upstream host metadata: broker-name, broker-id + auto custom_header = request_->typedCustomHeader(); + custom_header->targetBrokerName(broker_name); + custom_header->targetBrokerId(broker_id); + } +} + +void ActiveMessage::updateActiveRequestStats(bool is_inc) { + if (is_inc) { + connection_manager_.stats().request_active_.inc(); + } else { + connection_manager_.stats().request_active_.dec(); + } + auto code = static_cast(request_->code()); + switch (code) { + case RequestCode::PopMessage: { + if (is_inc) { + connection_manager_.stats().pop_message_active_.inc(); + } else { + connection_manager_.stats().pop_message_active_.dec(); + } + break; + } + case RequestCode::SendMessage: { + if (is_inc) { + connection_manager_.stats().send_message_v1_active_.inc(); + } else { + connection_manager_.stats().send_message_v1_active_.dec(); + } + break; + } + case RequestCode::SendMessageV2: { + if (is_inc) { + connection_manager_.stats().send_message_v2_active_.inc(); + } else { + connection_manager_.stats().send_message_v2_active_.dec(); + } + break; + } + case RequestCode::GetRouteInfoByTopic: { + if (is_inc) { + connection_manager_.stats().get_topic_route_active_.inc(); + } else { + connection_manager_.stats().get_topic_route_active_.dec(); + } + break; + } + default: + break; + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.h b/source/extensions/filters/network/rocketmq_proxy/active_message.h new file mode 100644 index 000000000000..566907d40dd6 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.h @@ -0,0 +1,105 @@ +#pragma once + +#include "envoy/event/deferred_deletable.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/router/router.h" +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ConnectionManager; + +/** + * ActiveMessage represents an in-flight request from downstream that has not yet received response + * from upstream. + */ +class ActiveMessage : public LinkedObject, + public Event::DeferredDeletable, + Logger::Loggable { +public: + ActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request); + + ~ActiveMessage() override; + + /** + * Set up filter-chain according to configuration from bootstrap config file and dynamic + * configuration items from Pilot. + */ + void createFilterChain(); + + /** + * Relay requests from downstream to upstream cluster. If the target cluster is absent at the + * moment, it triggers cluster discovery service request and mark awaitCluster as true. + * ClusterUpdateCallback will process requests marked await-cluster once the target cluster is + * in place. + */ + void sendRequestToUpstream(); + + const RemotingCommandPtr& downstreamRequest() const; + + /** + * Parse pop response and insert ack route directive such that ack requests will be forwarded to + * the same broker host from which messages are popped. + * @param buffer Pop response body. + * @param group Consumer group name. + * @param topic Topic from which messages are popped + * @param directive ack route directive + */ + virtual void fillAckMessageDirective(Buffer::Instance& buffer, const std::string& group, + const std::string& topic, + const AckMessageDirective& directive); + + virtual void sendResponseToDownstream(); + + void onQueryTopicRoute(); + + virtual void onError(absl::string_view error_message); + + ConnectionManager& connectionManager() { return connection_manager_; } + + virtual void onReset(); + + bool onUpstreamData(Buffer::Instance& data, bool end_stream, + Tcp::ConnectionPool::ConnectionDataPtr& conn_data); + + virtual MessageMetadataSharedPtr metadata() const { return metadata_; } + + virtual Router::RouteConstSharedPtr route(); + + void recordPopRouteInfo(Upstream::HostDescriptionConstSharedPtr host_description); + + static void fillBrokerData(std::vector& list, const std::string& cluster, + const std::string& broker_name, int64_t broker_id, + const std::string& address); + +private: + ConnectionManager& connection_manager_; + RemotingCommandPtr request_; + RemotingCommandPtr response_; + MessageMetadataSharedPtr metadata_; + Router::RouterPtr router_; + absl::optional cached_route_; + + void updateActiveRequestStats(bool is_inc = true); +}; + +using ActiveMessagePtr = std::unique_ptr; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.cc b/source/extensions/filters/network/rocketmq_proxy/codec.cc new file mode 100644 index 000000000000..628fc302f99d --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/codec.cc @@ -0,0 +1,408 @@ +#include "extensions/filters/network/rocketmq_proxy/codec.h" + +#include + +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +RemotingCommandPtr Decoder::decode(Buffer::Instance& buffer, bool& underflow, bool& has_error, + int request_code) { + // Verify there is at least some bits, which stores frame length and header length + if (buffer.length() <= MIN_FRAME_SIZE) { + underflow = true; + return nullptr; + } + + auto frame_length = buffer.peekBEInt(); + + if (frame_length > MAX_FRAME_SIZE) { + has_error = true; + return nullptr; + } + + if (buffer.length() < frame_length) { + underflow = true; + return nullptr; + } + buffer.drain(FRAME_LENGTH_FIELD_SIZE); + + auto mark = buffer.peekBEInt(); + uint32_t header_length = adjustHeaderLength(mark); + ASSERT(frame_length > header_length); + buffer.drain(FRAME_HEADER_LENGTH_FIELD_SIZE); + + uint32_t body_length = frame_length - 4 - header_length; + + ENVOY_LOG(debug, + "Request/Response Frame Meta: Frame Length = {}, Header Length = {}, Body Length = {}", + frame_length, header_length, body_length); + + Buffer::OwnedImpl header_buffer; + header_buffer.move(buffer, header_length); + std::string header_json = header_buffer.toString(); + ENVOY_LOG(trace, "Request/Response Header JSON: {}", header_json); + + int32_t code, version, opaque; + uint32_t flag; + if (isJsonHeader(mark)) { + ProtobufWkt::Struct header_struct; + + // Parse header JSON text + try { + MessageUtil::loadFromJson(header_json, header_struct); + } catch (std::exception& e) { + has_error = true; + ENVOY_LOG(error, "Failed to parse header JSON: {}. Error message: {}", header_json, e.what()); + return nullptr; + } + + const auto& filed_value_pair = header_struct.fields(); + if (!filed_value_pair.contains("code")) { + ENVOY_LOG(error, "Malformed frame: 'code' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + code = filed_value_pair.at("code").number_value(); + if (!filed_value_pair.contains("version")) { + ENVOY_LOG(error, "Malformed frame: 'version' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + version = filed_value_pair.at("version").number_value(); + if (!filed_value_pair.contains("opaque")) { + ENVOY_LOG(error, "Malformed frame: 'opaque' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + opaque = filed_value_pair.at("opaque").number_value(); + if (!filed_value_pair.contains("flag")) { + ENVOY_LOG(error, "Malformed frame: 'flag' field is missing. Header JSON: {}", header_json); + has_error = true; + return nullptr; + } + flag = filed_value_pair.at("flag").number_value(); + RemotingCommandPtr cmd = std::make_unique(code, version, opaque); + cmd->flag(flag); + if (filed_value_pair.contains("language")) { + cmd->language(filed_value_pair.at("language").string_value()); + } + + if (filed_value_pair.contains("serializeTypeCurrentRPC")) { + cmd->serializeTypeCurrentRPC(filed_value_pair.at("serializeTypeCurrentRPC").string_value()); + } + + cmd->body_.move(buffer, body_length); + + if (RemotingCommand::isResponse(flag)) { + if (filed_value_pair.contains("remark")) { + cmd->remark(filed_value_pair.at("remark").string_value()); + } + cmd->custom_header_ = decodeResponseExtHeader(static_cast(code), header_struct, + static_cast(request_code)); + } else { + cmd->custom_header_ = decodeExtHeader(static_cast(code), header_struct); + } + return cmd; + } else { + ENVOY_LOG(warn, "Unsupported header serialization type"); + has_error = true; + return nullptr; + } +} + +bool Decoder::isComplete(Buffer::Instance& buffer, int32_t cursor) { + if (buffer.length() - cursor < 4) { + // buffer is definitely incomplete. + return false; + } + + auto total_size = buffer.peekBEInt(cursor); + return buffer.length() - cursor >= static_cast(total_size); +} + +std::string Decoder::decodeTopic(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return EMPTY_STRING; + } + + auto magic_code = buffer.peekBEInt(cursor + 4); + + MessageVersion message_version = V1; + if (enumToSignedInt(MessageVersion::V1) == magic_code) { + message_version = V1; + } else if (enumToSignedInt(MessageVersion::V2) == magic_code) { + message_version = V2; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */ + + 8 /* queue offset */ + + 8 /* physical offset */ + + 4 /* sys flag */ + + 8 /* born timestamp */ + + 4 /* born host */ + + 4 /* born host port */ + + 8 /* store timestamp */ + + 4 /* store host */ + + 4 /* store host port */ + + 4 /* re-consume times */ + + 8 /* transaction offset */ + ; + auto body_size = buffer.peekBEInt(cursor + offset); + offset += 4 /* body size */ + + body_size /* body */; + int32_t topic_length; + std::string topic; + switch (message_version) { + case V1: { + topic_length = buffer.peekBEInt(cursor + offset); + topic.reserve(topic_length); + topic.resize(topic_length); + buffer.copyOut(cursor + offset + sizeof(int8_t), topic_length, &topic[0]); + break; + } + case V2: { + topic_length = buffer.peekBEInt(cursor + offset); + topic.reserve(topic_length); + topic.resize(topic_length); + buffer.copyOut(cursor + offset + sizeof(int16_t), topic_length, &topic[0]); + break; + } + } + return topic; +} + +int32_t Decoder::decodeQueueId(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return -1; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */; + + return buffer.peekBEInt(cursor + offset); +} + +int64_t Decoder::decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return -1; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */; + return buffer.peekBEInt(cursor + offset); +} + +std::string Decoder::decodeMsgId(Buffer::Instance& buffer, int32_t cursor) { + if (!isComplete(buffer, cursor)) { + return EMPTY_STRING; + } + + int32_t offset = 4 /* total size */ + + 4 /* magic code */ + + 4 /* body CRC */ + + 4 /* queue Id */ + + 4 /* flag */ + + 8 /* queue offset */; + auto physical_offset = buffer.peekBEInt(cursor + offset); + offset += 8 /* physical offset */ + + 4 /* sys flag */ + + 8 /* born timestamp */ + + 4 /* born host */ + + 4 /* born host port */ + + 8 /* store timestamp */ + ; + + Buffer::OwnedImpl msg_id_buffer; + msg_id_buffer.writeBEInt(buffer.peekBEInt(cursor + offset)); + msg_id_buffer.writeBEInt(physical_offset); + std::string msg_id; + msg_id.reserve(32); + for (uint64_t i = 0; i < msg_id_buffer.length(); i++) { + auto c = msg_id_buffer.peekBEInt(); + msg_id.append(1, static_cast(c >> 4U)); + msg_id.append(1, static_cast(c & 0xFU)); + } + return msg_id; +} + +CommandCustomHeaderPtr Decoder::decodeExtHeader(RequestCode code, + ProtobufWkt::Struct& header_struct) { + const auto& filed_value_pair = header_struct.fields(); + switch (code) { + case RequestCode::SendMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto send_msg_ext_header = new SendMessageRequestHeader(); + send_msg_ext_header->version_ = SendMessageRequestVersion::V1; + send_msg_ext_header->decode(ext_fields); + return send_msg_ext_header; + } + case RequestCode::SendMessageV2: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto send_msg_ext_header = new SendMessageRequestHeader(); + send_msg_ext_header->version_ = SendMessageRequestVersion::V2; + send_msg_ext_header->decode(ext_fields); + return send_msg_ext_header; + } + + case RequestCode::GetRouteInfoByTopic: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto get_route_info_request_header = new GetRouteInfoRequestHeader(); + get_route_info_request_header->decode(ext_fields); + return get_route_info_request_header; + } + + case RequestCode::UnregisterClient: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto unregister_client_request_header = new UnregisterClientRequestHeader(); + unregister_client_request_header->decode(ext_fields); + return unregister_client_request_header; + } + + case RequestCode::GetConsumerListByGroup: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto get_consumer_list_by_group_request_header = new GetConsumerListByGroupRequestHeader(); + get_consumer_list_by_group_request_header->decode(ext_fields); + return get_consumer_list_by_group_request_header; + } + + case RequestCode::PopMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto pop_message_request_header = new PopMessageRequestHeader(); + pop_message_request_header->decode(ext_fields); + return pop_message_request_header; + } + + case RequestCode::AckMessage: { + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + auto ack_message_request_header = new AckMessageRequestHeader(); + ack_message_request_header->decode(ext_fields); + return ack_message_request_header; + } + + case RequestCode::HeartBeat: { + // Heartbeat does not have an extended header. + return nullptr; + } + + default: + ENVOY_LOG(warn, "Unsupported request code: {}", static_cast(code)); + return nullptr; + } +} + +CommandCustomHeaderPtr Decoder::decodeResponseExtHeader(ResponseCode response_code, + ProtobufWkt::Struct& header_struct, + RequestCode request_code) { + // No need to decode a failed response. + if (response_code != ResponseCode::Success && response_code != ResponseCode::SlaveNotAvailable) { + return nullptr; + } + const auto& filed_value_pair = header_struct.fields(); + switch (request_code) { + case RequestCode::SendMessage: + case RequestCode::SendMessageV2: { + auto send_message_response_header = new SendMessageResponseHeader(); + ASSERT(filed_value_pair.contains("extFields")); + auto& ext_fields = filed_value_pair.at("extFields"); + send_message_response_header->decode(ext_fields); + return send_message_response_header; + } + + case RequestCode::PopMessage: { + auto pop_message_response_header = new PopMessageResponseHeader(); + ASSERT(filed_value_pair.contains("extFields")); + const auto& ext_fields = filed_value_pair.at("extFields"); + pop_message_response_header->decode(ext_fields); + return pop_message_response_header; + } + default: + return nullptr; + } +} + +void Encoder::encode(const RemotingCommandPtr& command, Buffer::Instance& data) { + + ProtobufWkt::Struct command_struct; + auto* fields = command_struct.mutable_fields(); + + ProtobufWkt::Value code_v; + code_v.set_number_value(command->code_); + (*fields)["code"] = code_v; + + ProtobufWkt::Value language_v; + language_v.set_string_value(command->language()); + (*fields)["language"] = language_v; + + ProtobufWkt::Value version_v; + version_v.set_number_value(command->version_); + (*fields)["version"] = version_v; + + ProtobufWkt::Value opaque_v; + opaque_v.set_number_value(command->opaque_); + (*fields)["opaque"] = opaque_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(command->flag_); + (*fields)["flag"] = flag_v; + + if (!command->remark_.empty()) { + ProtobufWkt::Value remark_v; + remark_v.set_string_value(command->remark_); + (*fields)["remark"] = remark_v; + } + + ProtobufWkt::Value serialization_type_v; + serialization_type_v.set_string_value(command->serializeTypeCurrentRPC()); + (*fields)["serializeTypeCurrentRPC"] = serialization_type_v; + + if (command->custom_header_) { + ProtobufWkt::Value ext_fields_v; + command->custom_header_->encode(ext_fields_v); + (*fields)["extFields"] = ext_fields_v; + } + + std::string json = MessageUtil::getJsonStringFromMessage(command_struct); + + int32_t frame_length = 4; + int32_t header_length = json.size(); + frame_length += header_length; + frame_length += command->bodyLength(); + + data.writeBEInt(frame_length); + data.writeBEInt(header_length); + data.add(json); + + // add body + if (command->bodyLength() > 0) { + data.add(command->body()); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.h b/source/extensions/filters/network/rocketmq_proxy/codec.h new file mode 100644 index 000000000000..e22502f48b34 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/codec.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/network/filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +enum MessageVersion : uint32_t { + V1 = (0xAABBCCDDU ^ 1880681586U) + 8U, + V2 = (0xAABBCCDDU ^ 1880681586U) + 4U +}; + +class Decoder : Logger::Loggable { +public: + Decoder() = default; + + ~Decoder() = default; + + /** + * @param buffer Data buffer to decode. + * @param underflow Indicate if buffer contains enough data in terms of protocol frame. + * @param has_error Indicate if the decoding is successful or not. + * @param request_code Corresponding request code if applies. + * @return Decoded remote command. + */ + static RemotingCommandPtr decode(Buffer::Instance& buffer, bool& underflow, bool& has_error, + int request_code = 0); + + static std::string decodeTopic(Buffer::Instance& buffer, int32_t cursor); + + static int32_t decodeQueueId(Buffer::Instance& buffer, int32_t cursor); + + static int64_t decodeQueueOffset(Buffer::Instance& buffer, int32_t cursor); + + static std::string decodeMsgId(Buffer::Instance& buffer, int32_t cursor); + + static constexpr uint32_t MIN_FRAME_SIZE = 8; + + static constexpr uint32_t MAX_FRAME_SIZE = 4 * 1024 * 1024; + + static constexpr uint32_t FRAME_LENGTH_FIELD_SIZE = 4; + + static constexpr uint32_t FRAME_HEADER_LENGTH_FIELD_SIZE = 4; + +private: + static uint32_t adjustHeaderLength(uint32_t len) { return len & 0xFFFFFFu; } + + static bool isJsonHeader(uint32_t len) { return (len >> 24u) == 0; } + + static CommandCustomHeaderPtr decodeExtHeader(RequestCode code, + ProtobufWkt::Struct& header_struct); + + static CommandCustomHeaderPtr decodeResponseExtHeader(ResponseCode response_code, + ProtobufWkt::Struct& header_struct, + RequestCode request_code); + + static bool isComplete(Buffer::Instance& buffer, int32_t cursor); +}; + +class Encoder { +public: + static void encode(const RemotingCommandPtr& command, Buffer::Instance& buffer); +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/config.cc b/source/extensions/filters/network/rocketmq_proxy/config.cc new file mode 100644 index 000000000000..02f8da69c41f --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/config.cc @@ -0,0 +1,65 @@ +#include "extensions/filters/network/rocketmq_proxy/config.h" + +#include + +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/stats.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +namespace rocketmq_config = envoy::extensions::filters::network::rocketmq_proxy::v3; + +Network::FilterFactoryCb RocketmqProxyFilterConfigFactory::createFilterFactoryFromProtoTyped( + const rocketmq_config::RocketmqProxy& proto_config, + Server::Configuration::FactoryContext& context) { + std::shared_ptr filter_config = std::make_shared(proto_config, context); + return [filter_config, &context](Network::FilterManager& filter_manager) -> void { + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.dispatcher().timeSource())); + }; +} + +REGISTER_FACTORY(RocketmqProxyFilterConfigFactory, + Server::Configuration::NamedNetworkFilterConfigFactory); + +ConfigImpl::ConfigImpl(const RocketmqProxyConfig& config, + Server::Configuration::FactoryContext& context) + : context_(context), stats_prefix_(fmt::format("rocketmq.{}.", config.stat_prefix())), + stats_(RocketmqFilterStats::generateStats(stats_prefix_, context_.scope())), + route_matcher_(new Router::RouteMatcher(config.route_config())), + develop_mode_(config.develop_mode()), + transient_object_life_span_(PROTOBUF_GET_MS_OR_DEFAULT(config, transient_object_life_span, + TransientObjectLifeSpan)) {} + +std::string ConfigImpl::proxyAddress() { + const LocalInfo::LocalInfo& localInfo = context_.getServerFactoryContext().localInfo(); + Network::Address::InstanceConstSharedPtr address = localInfo.address(); + if (address->type() == Network::Address::Type::Ip) { + const std::string& ip = address->ip()->addressAsString(); + std::string proxyAddr{ip}; + if (address->ip()->port()) { + return proxyAddr.append(":").append(std::to_string(address->ip()->port())); + } else { + ENVOY_LOG(trace, "Local info does not have port specified, defaulting to 10000"); + return proxyAddr.append(":10000"); + } + } + return address->asString(); +} + +Router::RouteConstSharedPtr ConfigImpl::route(const MessageMetadata& metadata) const { + return route_matcher_->route(metadata); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/config.h b/source/extensions/filters/network/rocketmq_proxy/config.h new file mode 100644 index 000000000000..df5cbe7c9711 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/config.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include + +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" + +#include "extensions/filters/network/common/factory_base.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class RocketmqProxyFilterConfigFactory + : public Common::FactoryBase< + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy> { +public: + RocketmqProxyFilterConfigFactory() : FactoryBase(NetworkFilterNames::get().RocketmqProxy, true) {} + +private: + Network::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy& proto_config, + Server::Configuration::FactoryContext& context) override; +}; + +class ConfigImpl : public Config, public Router::Config, Logger::Loggable { +public: + using RocketmqProxyConfig = + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + + ConfigImpl(const RocketmqProxyConfig& config, Server::Configuration::FactoryContext& context); + ~ConfigImpl() override = default; + + // Config + RocketmqFilterStats& stats() override { return stats_; } + Upstream::ClusterManager& clusterManager() override { return context_.clusterManager(); } + Router::RouterPtr createRouter() override { + return std::make_unique(context_.clusterManager()); + } + bool developMode() const override { return develop_mode_; } + + std::chrono::milliseconds transientObjectLifeSpan() const override { + return transient_object_life_span_; + } + + std::string proxyAddress() override; + Router::Config& routerConfig() override { return *this; } + + // Router::Config + Router::RouteConstSharedPtr route(const MessageMetadata& metadata) const override; + +private: + Server::Configuration::FactoryContext& context_; + const std::string stats_prefix_; + RocketmqFilterStats stats_; + Router::RouteMatcherPtr route_matcher_; + const bool develop_mode_; + std::chrono::milliseconds transient_object_life_span_; + + static constexpr uint64_t TransientObjectLifeSpan = 30 * 1000; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc new file mode 100644 index 000000000000..a613998d53a0 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc @@ -0,0 +1,376 @@ +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "envoy/buffer/buffer.h" +#include "envoy/network/connection.h" + +#include "common/common/enum_to_int.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +ConsumerGroupMember::ConsumerGroupMember(absl::string_view client_id, + ConnectionManager& conn_manager) + : client_id_(client_id.data(), client_id.size()), connection_manager_(&conn_manager), + last_(connection_manager_->time_source_.monotonicTime()) {} + +void ConsumerGroupMember::refresh() { last_ = connection_manager_->time_source_.monotonicTime(); } + +bool ConsumerGroupMember::expired() const { + auto duration = connection_manager_->time_source_.monotonicTime() - last_; + return std::chrono::duration_cast(duration).count() > + connection_manager_->config().transientObjectLifeSpan().count(); +} + +ConnectionManager::ConnectionManager(Config& config, TimeSource& time_source) + : config_(config), time_source_(time_source), stats_(config.stats()) {} + +Envoy::Network::FilterStatus ConnectionManager::onData(Envoy::Buffer::Instance& data, + bool end_stream) { + ENVOY_CONN_LOG(trace, "rocketmq_proxy: received {} bytes.", read_callbacks_->connection(), + data.length()); + request_buffer_.move(data); + dispatch(); + if (end_stream) { + resetAllActiveMessages("Connection to downstream is closed"); + read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite); + } + return Network::FilterStatus::StopIteration; +} + +void ConnectionManager::dispatch() { + if (request_buffer_.length() < Decoder::MIN_FRAME_SIZE) { + ENVOY_CONN_LOG(warn, "rocketmq_proxy: request buffer length is less than min frame size: {}", + read_callbacks_->connection(), request_buffer_.length()); + return; + } + + bool underflow = false; + bool has_decode_error = false; + while (!underflow) { + RemotingCommandPtr request = Decoder::decode(request_buffer_, underflow, has_decode_error); + if (underflow) { + // Wait for more data + break; + } + stats_.request_.inc(); + + // Decode error, we need to close connection immediately. + if (has_decode_error) { + ENVOY_CONN_LOG(error, "Failed to decode request, close connection immediately", + read_callbacks_->connection()); + stats_.request_decoding_error_.inc(); + resetAllActiveMessages("Failed to decode data from downstream. Close connection immediately"); + read_callbacks_->connection().close(Envoy::Network::ConnectionCloseType::FlushWrite); + return; + } else { + stats_.request_decoding_success_.inc(); + } + + switch (static_cast(request->code())) { + case RequestCode::GetRouteInfoByTopic: { + ENVOY_CONN_LOG(trace, "GetTopicRoute request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onGetTopicRoute(std::move(request)); + } break; + + case RequestCode::UnregisterClient: { + ENVOY_CONN_LOG(trace, "process unregister client request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onUnregisterClient(std::move(request)); + } break; + + case RequestCode::SendMessage: { + ENVOY_CONN_LOG(trace, "SendMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onSendMessage(std::move(request)); + stats_.send_message_v1_.inc(); + } break; + + case RequestCode::SendMessageV2: { + ENVOY_CONN_LOG(trace, "SendMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onSendMessage(std::move(request)); + stats_.send_message_v2_.inc(); + } break; + + case RequestCode::GetConsumerListByGroup: { + ENVOY_CONN_LOG(trace, "GetConsumerListByGroup request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onGetConsumerListByGroup(std::move(request)); + } break; + + case RequestCode::PopMessage: { + ENVOY_CONN_LOG(trace, "PopMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onPopMessage(std::move(request)); + stats_.pop_message_.inc(); + } break; + + case RequestCode::AckMessage: { + ENVOY_CONN_LOG(trace, "AckMessage request, code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + onAckMessage(std::move(request)); + stats_.ack_message_.inc(); + } break; + + case RequestCode::HeartBeat: { + ENVOY_CONN_LOG(trace, "Heartbeat request, opaque: {}", read_callbacks_->connection(), + request->opaque()); + onHeartbeat(std::move(request)); + } break; + + default: { + ENVOY_CONN_LOG(warn, "Request code {} not supported yet", read_callbacks_->connection(), + request->code()); + std::string error_msg("Request not supported"); + onError(request, error_msg); + } break; + } + } +} + +void ConnectionManager::purgeDirectiveTable() { + auto current = time_source_.monotonicTime(); + for (auto it = ack_directive_table_.begin(); it != ack_directive_table_.end();) { + auto duration = current - it->second.creation_time_; + if (std::chrono::duration_cast(duration).count() > + config_.transientObjectLifeSpan().count()) { + ack_directive_table_.erase(it++); + } else { + it++; + } + } +} + +void ConnectionManager::sendResponseToDownstream(RemotingCommandPtr& response) { + Buffer::OwnedImpl buffer; + Encoder::encode(response, buffer); + if (read_callbacks_->connection().state() == Network::Connection::State::Open) { + ENVOY_CONN_LOG(trace, "Write response to downstream. Opaque: {}", read_callbacks_->connection(), + response->opaque()); + read_callbacks_->connection().write(buffer, false); + } else { + ENVOY_CONN_LOG(error, "Send response to downstream failed as connection is no longer open", + read_callbacks_->connection()); + } +} + +void ConnectionManager::onGetTopicRoute(RemotingCommandPtr request) { + createActiveMessage(request).onQueryTopicRoute(); + stats_.get_topic_route_.inc(); +} + +void ConnectionManager::onHeartbeat(RemotingCommandPtr request) { + const std::string& body = request->body().toString(); + + purgeDirectiveTable(); + + ProtobufWkt::Struct body_struct; + try { + MessageUtil::loadFromJson(body, body_struct); + } catch (std::exception& e) { + ENVOY_LOG(warn, "Failed to decode heartbeat body. Error message: {}", e.what()); + return; + } + + HeartbeatData heartbeatData; + if (!heartbeatData.decode(body_struct)) { + ENVOY_LOG(warn, "Failed to decode heartbeat data"); + return; + } + + for (const auto& group : heartbeatData.consumerGroups()) { + addOrUpdateGroupMember(group, heartbeatData.clientId()); + } + + RemotingCommandPtr response = std::make_unique(); + response->code(enumToSignedInt(ResponseCode::Success)); + response->opaque(request->opaque()); + response->remark("Heartbeat OK"); + response->markAsResponse(); + sendResponseToDownstream(response); + stats_.heartbeat_.inc(); +} + +void ConnectionManager::addOrUpdateGroupMember(absl::string_view group, + absl::string_view client_id) { + ENVOY_LOG(trace, "#addOrUpdateGroupMember. Group: {}, client ID: {}", group, client_id); + auto search = group_members_.find(std::string(group.data(), group.length())); + if (search == group_members_.end()) { + std::vector members; + members.emplace_back(ConsumerGroupMember(client_id, *this)); + group_members_.emplace(std::string(group.data(), group.size()), members); + } else { + std::vector& members = search->second; + for (auto it = members.begin(); it != members.end();) { + if (it->clientId() == client_id) { + it->refresh(); + ++it; + } else if (it->expired()) { + it = members.erase(it); + } else { + ++it; + } + } + if (members.empty()) { + group_members_.erase(search); + } + } +} + +void ConnectionManager::onUnregisterClient(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ASSERT(!header->clientId().empty()); + ENVOY_LOG(trace, "Unregister client ID: {}, producer group: {}, consumer group: {}", + header->clientId(), header->producerGroup(), header->consumerGroup()); + + if (!header->consumerGroup().empty()) { + auto search = group_members_.find(header->consumerGroup()); + if (search != group_members_.end()) { + std::vector& members = search->second; + for (auto it = members.begin(); it != members.end();) { + if (it->clientId() == header->clientId()) { + it = members.erase(it); + } else if (it->expired()) { + it = members.erase(it); + } else { + ++it; + } + } + if (members.empty()) { + group_members_.erase(search); + } + } + } + + RemotingCommandPtr response = std::make_unique( + enumToSignedInt(ResponseCode::Success), request->version(), request->opaque()); + response->markAsResponse(); + response->remark("Envoy unregister client OK."); + sendResponseToDownstream(response); + stats_.unregister_.inc(); +} + +void ConnectionManager::onError(RemotingCommandPtr& request, absl::string_view error_msg) { + Buffer::OwnedImpl buffer; + RemotingCommandPtr response = std::make_unique(); + response->markAsResponse(); + response->opaque(request->opaque()); + response->code(enumToSignedInt(ResponseCode::SystemError)); + response->remark(error_msg); + sendResponseToDownstream(response); +} + +void ConnectionManager::onSendMessage(RemotingCommandPtr request) { + ENVOY_CONN_LOG(trace, "#onSendMessage, opaque: {}", read_callbacks_->connection(), + request->opaque()); + auto header = request->typedCustomHeader(); + header->queueId(-1); + createActiveMessage(request).sendRequestToUpstream(); +} + +void ConnectionManager::onGetConsumerListByGroup(RemotingCommandPtr request) { + auto requestExtHeader = request->typedCustomHeader(); + + ASSERT(requestExtHeader != nullptr); + ASSERT(!requestExtHeader->consumerGroup().empty()); + + ENVOY_LOG(trace, "#onGetConsumerListByGroup, consumer group: {}", + requestExtHeader->consumerGroup()); + + auto search = group_members_.find(requestExtHeader->consumerGroup()); + GetConsumerListByGroupResponseBody getConsumerListByGroupResponseBody; + if (search != group_members_.end()) { + std::vector& members = search->second; + std::sort(members.begin(), members.end()); + for (const auto& member : members) { + getConsumerListByGroupResponseBody.add(member.clientId()); + } + } else { + ENVOY_LOG(warn, "There is no consumer belongs to consumer_group: {}", + requestExtHeader->consumerGroup()); + } + ProtobufWkt::Struct body_struct; + + getConsumerListByGroupResponseBody.encode(body_struct); + + RemotingCommandPtr response = std::make_unique( + enumToSignedInt(ResponseCode::Success), request->version(), request->opaque()); + response->markAsResponse(); + std::string json = MessageUtil::getJsonStringFromMessage(body_struct); + response->body().add(json); + ENVOY_LOG(trace, "GetConsumerListByGroup respond with body: {}", json); + + sendResponseToDownstream(response); + stats_.get_consumer_list_.inc(); +} + +void ConnectionManager::onPopMessage(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ENVOY_LOG(trace, "#onPopMessage. Consumer group: {}, topic: {}", header->consumerGroup(), + header->topic()); + createActiveMessage(request).sendRequestToUpstream(); +} + +void ConnectionManager::onAckMessage(RemotingCommandPtr request) { + auto header = request->typedCustomHeader(); + ASSERT(header != nullptr); + ENVOY_LOG( + trace, + "#onAckMessage. Consumer group: {}, topic: {}, queue Id: {}, offset: {}, extra-info: {}", + header->consumerGroup(), header->topic(), header->queueId(), header->offset(), + header->extraInfo()); + + // Fill the target broker_name and broker_id routing directive + auto it = ack_directive_table_.find(header->directiveKey()); + if (it == ack_directive_table_.end()) { + ENVOY_LOG(warn, "There was no previous ack directive available, which is unexpected"); + onError(request, "No ack directive is found"); + return; + } + header->targetBrokerName(it->second.broker_name_); + header->targetBrokerId(it->second.broker_id_); + + createActiveMessage(request).sendRequestToUpstream(); +} + +ActiveMessage& ConnectionManager::createActiveMessage(RemotingCommandPtr& request) { + ENVOY_CONN_LOG(trace, "ConnectionManager#createActiveMessage. Code: {}, opaque: {}", + read_callbacks_->connection(), request->code(), request->opaque()); + ActiveMessagePtr active_message = std::make_unique(*this, std::move(request)); + active_message->moveIntoList(std::move(active_message), active_message_list_); + return **active_message_list_.begin(); +} + +void ConnectionManager::deferredDelete(ActiveMessage& active_message) { + read_callbacks_->connection().dispatcher().deferredDelete( + active_message.removeFromList(active_message_list_)); +} + +void ConnectionManager::resetAllActiveMessages(absl::string_view error_msg) { + while (!active_message_list_.empty()) { + ENVOY_CONN_LOG(warn, "Reset pending request {} due to error: {}", read_callbacks_->connection(), + active_message_list_.front()->downstreamRequest()->opaque(), error_msg); + active_message_list_.front()->onReset(); + stats_.response_error_.inc(); + } +} + +Envoy::Network::FilterStatus ConnectionManager::onNewConnection() { + return Network::FilterStatus::Continue; +} + +void ConnectionManager::initializeReadFilterCallbacks( + Envoy::Network::ReadFilterCallbacks& callbacks) { + read_callbacks_ = &callbacks; +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h new file mode 100644 index 000000000000..e69237b6cae7 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h @@ -0,0 +1,215 @@ +#pragma once + +#include + +#include "envoy/common/time.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/stats.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class Config { +public: + virtual ~Config() = default; + + virtual RocketmqFilterStats& stats() PURE; + + virtual Upstream::ClusterManager& clusterManager() PURE; + + virtual Router::RouterPtr createRouter() PURE; + + /** + * Indicate whether this proxy is running in develop mode. Once set true, this proxy plugin may + * work without dedicated traffic intercepting facility without considering backward + * compatibility. + * @return true when in development mode; false otherwise. + */ + virtual bool developMode() const PURE; + + virtual std::string proxyAddress() PURE; + + virtual Router::Config& routerConfig() PURE; + + virtual std::chrono::milliseconds transientObjectLifeSpan() const PURE; +}; + +class ConnectionManager; + +/** + * This class is to ensure legacy RocketMQ SDK works. Heartbeat between client SDK and envoy is not + * necessary any more and should be removed once the lite SDK is in-place. + */ +class ConsumerGroupMember { +public: + ConsumerGroupMember(absl::string_view client_id, ConnectionManager& conn_manager); + + bool operator==(const ConsumerGroupMember& other) const { return client_id_ == other.client_id_; } + + bool operator<(const ConsumerGroupMember& other) const { return client_id_ < other.client_id_; } + + void refresh(); + + bool expired() const; + + absl::string_view clientId() const { return client_id_; } + + void setLastForTest(MonotonicTime tp) { last_ = tp; } + +private: + std::string client_id_; + ConnectionManager* connection_manager_; + MonotonicTime last_; +}; + +class ConnectionManager : public Network::ReadFilter, Logger::Loggable { +public: + ConnectionManager(Config& config, TimeSource& time_source); + + ~ConnectionManager() override = default; + + /** + * Called when data is read on the connection. + * @param data supplies the read data which may be modified. + * @param end_stream supplies whether this is the last byte on the connection. This will only + * be set if the connection has half-close semantics enabled. + * @return status used by the filter manager to manage further filter iteration. + */ + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + + /** + * Called when a connection is first established. Filters should do one time long term processing + * that needs to be done when a connection is established. Filter chain iteration can be stopped + * if needed. + * @return status used by the filter manager to manage further filter iteration. + */ + Network::FilterStatus onNewConnection() override; + + /** + * Initializes the read filter callbacks used to interact with the filter manager. It will be + * called by the filter manager a single time when the filter is first registered. Thus, any + * construction that requires the backing connection should take place in the context of this + * function. + * + * IMPORTANT: No outbound networking or complex processing should be done in this function. + * That should be done in the context of onNewConnection() if needed. + * + * @param callbacks supplies the callbacks. + */ + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks&) override; + + /** + * Send response to downstream either when envoy proxy has received result from upstream hosts or + * the proxy itself may serve the request. + * @param response Response to write to downstream with identical opaque number. + */ + void sendResponseToDownstream(RemotingCommandPtr& response); + + void onGetTopicRoute(RemotingCommandPtr request); + + /** + * Called when downstream sends heartbeat requests. + * @param request heartbeat request from downstream + */ + void onHeartbeat(RemotingCommandPtr request); + + void addOrUpdateGroupMember(absl::string_view group, absl::string_view client_id); + + void onUnregisterClient(RemotingCommandPtr request); + + void onError(RemotingCommandPtr& request, absl::string_view error_msg); + + void onSendMessage(RemotingCommandPtr request); + + void onGetConsumerListByGroup(RemotingCommandPtr request); + + void onPopMessage(RemotingCommandPtr request); + + void onAckMessage(RemotingCommandPtr request); + + ActiveMessage& createActiveMessage(RemotingCommandPtr& request); + + void deferredDelete(ActiveMessage& active_message); + + void resetAllActiveMessages(absl::string_view error_msg); + + Config& config() { return config_; } + + RocketmqFilterStats& stats() { return stats_; } + + absl::flat_hash_map>& groupMembersForTest() { + return group_members_; + } + + std::list& activeMessageList() { return active_message_list_; } + + void insertAckDirective(const std::string& key, const AckMessageDirective& directive) { + ack_directive_table_.insert(std::make_pair(key, directive)); + } + + void eraseAckDirective(const std::string& key) { + auto it = ack_directive_table_.find(key); + if (it != ack_directive_table_.end()) { + ack_directive_table_.erase(it); + } + } + + TimeSource& timeSource() const { return time_source_; } + + const absl::flat_hash_map& getAckDirectiveTableForTest() const { + return ack_directive_table_; + } + + friend class ConsumerGroupMember; + +private: + /** + * Dispatch incoming requests from downstream to run through filter chains. + */ + void dispatch(); + + /** + * Invoked by heartbeat to purge deprecated ack_directive entries. + */ + void purgeDirectiveTable(); + + Network::ReadFilterCallbacks* read_callbacks_{}; + Buffer::OwnedImpl request_buffer_; + + Config& config_; + TimeSource& time_source_; + RocketmqFilterStats& stats_; + + std::list active_message_list_; + + absl::flat_hash_map> group_members_; + + /** + * Message unique key to message acknowledge directive mapping. + * Acknowledge requests first consult this table to determine which host in the cluster to go. + */ + absl::flat_hash_map ack_directive_table_; +}; +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/metadata.h b/source/extensions/filters/network/rocketmq_proxy/metadata.h new file mode 100644 index 000000000000..8fca6ab7811a --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/metadata.h @@ -0,0 +1,43 @@ +#pragma once + +#include + +#include "common/http/header_map_impl.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class MessageMetadata { +public: + MessageMetadata() = default; + + void setOneWay(bool oneway) { is_oneway_ = oneway; } + bool isOneWay() const { return is_oneway_; } + + bool hasTopicName() const { return topic_name_.has_value(); } + const std::string& topicName() const { return topic_name_.value(); } + void setTopicName(const std::string& topic_name) { topic_name_ = topic_name; } + + /** + * @return HeaderMap of current headers + */ + const Http::HeaderMap& headers() const { return headers_; } + Http::HeaderMap& headers() { return headers_; } + +private: + bool is_oneway_{false}; + absl::optional topic_name_{}; + + Http::HeaderMapImpl headers_; +}; + +using MessageMetadataSharedPtr = std::shared_ptr; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.cc b/source/extensions/filters/network/rocketmq_proxy/protocol.cc new file mode 100644 index 000000000000..e16481cc453f --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/protocol.cc @@ -0,0 +1,749 @@ +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "common/common/assert.h" +#include "common/common/enum_to_int.h" + +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +void SendMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + switch (version_) { + case SendMessageRequestVersion::V1: { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_); + members["producerGroup"] = producer_group_v; + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topic_v; + + ProtobufWkt::Value default_topic_v; + default_topic_v.set_string_value(default_topic_); + members["defaultTopic"] = default_topic_v; + + ProtobufWkt::Value default_topic_queue_number_v; + default_topic_queue_number_v.set_number_value(default_topic_queue_number_); + members["defaultTopicQueueNums"] = default_topic_queue_number_v; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ProtobufWkt::Value sys_flag_v; + sys_flag_v.set_number_value(sys_flag_); + members["sysFlag"] = sys_flag_v; + + ProtobufWkt::Value born_timestamp_v; + born_timestamp_v.set_number_value(born_timestamp_); + members["bornTimestamp"] = born_timestamp_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(flag_); + members["flag"] = flag_v; + + if (!properties_.empty()) { + ProtobufWkt::Value properties_v; + properties_v.set_string_value(properties_.c_str(), properties_.length()); + members["properties"] = properties_v; + } + + if (reconsume_time_ > 0) { + ProtobufWkt::Value reconsume_times_v; + reconsume_times_v.set_number_value(reconsume_time_); + members["reconsumeTimes"] = reconsume_times_v; + } + + if (unit_mode_) { + ProtobufWkt::Value unit_mode_v; + unit_mode_v.set_bool_value(unit_mode_); + members["unitMode"] = unit_mode_v; + } + + if (batch_) { + ProtobufWkt::Value batch_v; + batch_v.set_bool_value(batch_); + members["batch"] = batch_v; + } + + if (max_reconsume_time_ > 0) { + ProtobufWkt::Value max_reconsume_time_v; + max_reconsume_time_v.set_number_value(max_reconsume_time_); + members["maxReconsumeTimes"] = max_reconsume_time_v; + } + break; + } + case SendMessageRequestVersion::V2: { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.length()); + members["a"] = producer_group_v; + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["b"] = topic_v; + + ProtobufWkt::Value default_topic_v; + default_topic_v.set_string_value(default_topic_.c_str(), default_topic_.length()); + members["c"] = default_topic_v; + + ProtobufWkt::Value default_topic_queue_number_v; + default_topic_queue_number_v.set_number_value(default_topic_queue_number_); + members["d"] = default_topic_queue_number_v; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["e"] = queue_id_v; + + ProtobufWkt::Value sys_flag_v; + sys_flag_v.set_number_value(sys_flag_); + members["f"] = sys_flag_v; + + ProtobufWkt::Value born_timestamp_v; + born_timestamp_v.set_number_value(born_timestamp_); + members["g"] = born_timestamp_v; + + ProtobufWkt::Value flag_v; + flag_v.set_number_value(flag_); + members["h"] = flag_v; + + if (!properties_.empty()) { + ProtobufWkt::Value properties_v; + properties_v.set_string_value(properties_.c_str(), properties_.length()); + members["i"] = properties_v; + } + + if (reconsume_time_ > 0) { + ProtobufWkt::Value reconsume_times_v; + reconsume_times_v.set_number_value(reconsume_time_); + members["j"] = reconsume_times_v; + } + + if (unit_mode_) { + ProtobufWkt::Value unit_mode_v; + unit_mode_v.set_bool_value(unit_mode_); + members["k"] = unit_mode_v; + } + + if (batch_) { + ProtobufWkt::Value batch_v; + batch_v.set_bool_value(batch_); + members["m"] = batch_v; + } + + if (max_reconsume_time_ > 0) { + ProtobufWkt::Value max_reconsume_time_v; + max_reconsume_time_v.set_number_value(max_reconsume_time_); + members["l"] = max_reconsume_time_v; + } + break; + } + default: + break; + } +} + +void SendMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + switch (version_) { + case SendMessageRequestVersion::V1: { + ASSERT(members.contains("producerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("defaultTopic")); + ASSERT(members.contains("defaultTopicQueueNums")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("sysFlag")); + ASSERT(members.contains("bornTimestamp")); + ASSERT(members.contains("flag")); + + producer_group_ = members.at("producerGroup").string_value(); + topic_ = members.at("topic").string_value(); + default_topic_ = members.at("defaultTopic").string_value(); + + if (members.at("defaultTopicQueueNums").kind_case() == ProtobufWkt::Value::kNumberValue) { + default_topic_queue_number_ = members.at("defaultTopicQueueNums").number_value(); + } else { + default_topic_queue_number_ = std::stoi(members.at("defaultTopicQueueNums").string_value()); + } + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("sysFlag").kind_case() == ProtobufWkt::Value::kNumberValue) { + sys_flag_ = static_cast(members.at("sysFlag").number_value()); + } else { + sys_flag_ = std::stoi(members.at("sysFlag").string_value()); + } + + if (members.at("bornTimestamp").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_timestamp_ = static_cast(members.at("bornTimestamp").number_value()); + } else { + born_timestamp_ = std::stoll(members.at("bornTimestamp").string_value()); + } + + if (members.at("flag").kind_case() == ProtobufWkt::Value::kNumberValue) { + flag_ = static_cast(members.at("flag").number_value()); + } else { + flag_ = std::stoi(members.at("flag").string_value()); + } + + if (members.contains("properties")) { + properties_ = members.at("properties").string_value(); + } + + if (members.contains("reconsumeTimes")) { + if (members.at("reconsumeTimes").kind_case() == ProtobufWkt::Value::kNumberValue) { + reconsume_time_ = members.at("reconsumeTimes").number_value(); + } else { + reconsume_time_ = std::stoi(members.at("reconsumeTimes").string_value()); + } + } + + if (members.contains("unitMode")) { + if (members.at("unitMode").kind_case() == ProtobufWkt::Value::kBoolValue) { + unit_mode_ = members.at("unitMode").bool_value(); + } else { + unit_mode_ = (members.at("unitMode").string_value() == std::string("true")); + } + } + + if (members.contains("batch")) { + if (members.at("batch").kind_case() == ProtobufWkt::Value::kBoolValue) { + batch_ = members.at("batch").bool_value(); + } else { + batch_ = (members.at("batch").string_value() == std::string("true")); + } + } + + if (members.contains("maxReconsumeTimes")) { + if (members.at("maxReconsumeTimes").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_reconsume_time_ = static_cast(members.at("maxReconsumeTimes").number_value()); + } else { + max_reconsume_time_ = std::stoi(members.at("maxReconsumeTimes").string_value()); + } + } + break; + } + + case SendMessageRequestVersion::V2: { + ASSERT(members.contains("a")); + ASSERT(members.contains("b")); + ASSERT(members.contains("c")); + ASSERT(members.contains("d")); + ASSERT(members.contains("e")); + ASSERT(members.contains("f")); + ASSERT(members.contains("g")); + ASSERT(members.contains("h")); + + producer_group_ = members.at("a").string_value(); + topic_ = members.at("b").string_value(); + default_topic_ = members.at("c").string_value(); + + if (members.at("d").kind_case() == ProtobufWkt::Value::kNumberValue) { + default_topic_queue_number_ = members.at("d").number_value(); + } else { + default_topic_queue_number_ = std::stoi(members.at("d").string_value()); + } + + if (members.at("e").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("e").number_value(); + } else { + queue_id_ = std::stoi(members.at("e").string_value()); + } + + if (members.at("f").kind_case() == ProtobufWkt::Value::kNumberValue) { + sys_flag_ = static_cast(members.at("f").number_value()); + } else { + sys_flag_ = std::stoi(members.at("f").string_value()); + } + + if (members.at("g").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_timestamp_ = static_cast(members.at("g").number_value()); + } else { + born_timestamp_ = std::stoll(members.at("g").string_value()); + } + + if (members.at("h").kind_case() == ProtobufWkt::Value::kNumberValue) { + flag_ = static_cast(members.at("h").number_value()); + } else { + flag_ = std::stoi(members.at("h").string_value()); + } + + if (members.contains("i")) { + properties_ = members.at("i").string_value(); + } + + if (members.contains("j")) { + if (members.at("j").kind_case() == ProtobufWkt::Value::kNumberValue) { + reconsume_time_ = members.at("j").number_value(); + } else { + reconsume_time_ = std::stoi(members.at("j").string_value()); + } + } + + if (members.contains("k")) { + if (members.at("k").kind_case() == ProtobufWkt::Value::kBoolValue) { + unit_mode_ = members.at("k").bool_value(); + } else { + unit_mode_ = (members.at("k").string_value() == std::string("true")); + } + } + + if (members.contains("m")) { + if (members.at("m").kind_case() == ProtobufWkt::Value::kBoolValue) { + batch_ = members.at("m").bool_value(); + } else { + batch_ = (members.at("m").string_value() == std::string("true")); + } + } + + if (members.contains("l")) { + if (members.at("l").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_reconsume_time_ = members.at("l").number_value(); + } else { + max_reconsume_time_ = std::stoi(members.at("l").string_value()); + } + } + break; + } + default: + ENVOY_LOG(error, "Unknown SendMessageRequestVersion: {}", static_cast(version_)); + break; + } +} + +void SendMessageResponseHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!msg_id_.empty()); + ProtobufWkt::Value msg_id_v; + msg_id_v.set_string_value(msg_id_.c_str(), msg_id_.length()); + members["msgId"] = msg_id_v; + + ASSERT(queue_id_ >= 0); + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ASSERT(queue_offset_ >= 0); + ProtobufWkt::Value queue_offset_v; + queue_offset_v.set_number_value(queue_offset_); + members["queueOffset"] = queue_offset_v; + + if (!transaction_id_.empty()) { + ProtobufWkt::Value transaction_id_v; + transaction_id_v.set_string_value(transaction_id_.c_str(), transaction_id_.length()); + members["transactionId"] = transaction_id_v; + } +} + +void SendMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("msgId")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("queueOffset")); + + msg_id_ = members.at("msgId").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("queueOffset").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_offset_ = members.at("queueOffset").number_value(); + } else { + queue_offset_ = std::stoll(members.at("queueOffset").string_value()); + } + + if (members.contains("transactionId")) { + transaction_id_ = members.at("transactionId").string_value(); + } +} + +void GetRouteInfoRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topic_v; +} + +void GetRouteInfoRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("topic")); + topic_ = members.at("topic").string_value(); +} + +void PopMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!consumer_group_.empty()); + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + + ASSERT(!topic_.empty()); + ProtobufWkt::Value topicNode; + topicNode.set_string_value(topic_.c_str(), topic_.length()); + members["topic"] = topicNode; + + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ProtobufWkt::Value max_msg_nums_v; + max_msg_nums_v.set_number_value(max_msg_nums_); + members["maxMsgNums"] = max_msg_nums_v; + + ProtobufWkt::Value invisible_time_v; + invisible_time_v.set_number_value(invisible_time_); + members["invisibleTime"] = invisible_time_v; + + ProtobufWkt::Value poll_time_v; + poll_time_v.set_number_value(poll_time_); + members["pollTime"] = poll_time_v; + + ProtobufWkt::Value born_time_v; + born_time_v.set_number_value(born_time_); + members["bornTime"] = born_time_v; + + ProtobufWkt::Value init_mode_v; + init_mode_v.set_number_value(init_mode_); + members["initMode"] = init_mode_v; + + if (!exp_type_.empty()) { + ProtobufWkt::Value exp_type_v; + exp_type_v.set_string_value(exp_type_.c_str(), exp_type_.size()); + members["expType"] = exp_type_v; + } + + if (!exp_.empty()) { + ProtobufWkt::Value exp_v; + exp_v.set_string_value(exp_.c_str(), exp_.size()); + members["exp"] = exp_v; + } +} + +void PopMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("maxMsgNums")); + ASSERT(members.contains("invisibleTime")); + ASSERT(members.contains("pollTime")); + ASSERT(members.contains("bornTime")); + ASSERT(members.contains("initMode")); + + consumer_group_ = members.at("consumerGroup").string_value(); + topic_ = members.at("topic").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + if (members.at("maxMsgNums").kind_case() == ProtobufWkt::Value::kNumberValue) { + max_msg_nums_ = members.at("maxMsgNums").number_value(); + } else { + max_msg_nums_ = std::stoi(members.at("maxMsgNums").string_value()); + } + + if (members.at("invisibleTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + invisible_time_ = members.at("invisibleTime").number_value(); + } else { + invisible_time_ = std::stoll(members.at("invisibleTime").string_value()); + } + + if (members.at("pollTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + poll_time_ = members.at("pollTime").number_value(); + } else { + poll_time_ = std::stoll(members.at("pollTime").string_value()); + } + + if (members.at("bornTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + born_time_ = members.at("bornTime").number_value(); + } else { + born_time_ = std::stoll(members.at("bornTime").string_value()); + } + + if (members.at("initMode").kind_case() == ProtobufWkt::Value::kNumberValue) { + init_mode_ = members.at("initMode").number_value(); + } else { + init_mode_ = std::stol(members.at("initMode").string_value()); + } + + if (members.contains("expType")) { + exp_type_ = members.at("expType").string_value(); + } + + if (members.contains("exp")) { + exp_ = members.at("exp").string_value(); + } +} + +void PopMessageResponseHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value pop_time_v; + pop_time_v.set_number_value(pop_time_); + members["popTime"] = pop_time_v; + + ProtobufWkt::Value invisible_time_v; + invisible_time_v.set_number_value(invisible_time_); + members["invisibleTime"] = invisible_time_v; + + ProtobufWkt::Value revive_qid_v; + revive_qid_v.set_number_value(revive_qid_); + members["reviveQid"] = revive_qid_v; + + ProtobufWkt::Value rest_num_v; + rest_num_v.set_number_value(rest_num_); + members["restNum"] = rest_num_v; + + if (!start_offset_info_.empty()) { + ProtobufWkt::Value start_offset_info_v; + start_offset_info_v.set_string_value(start_offset_info_.c_str(), start_offset_info_.size()); + members["startOffsetInfo"] = start_offset_info_v; + } + + if (!msg_off_set_info_.empty()) { + ProtobufWkt::Value msg_offset_info_v; + msg_offset_info_v.set_string_value(msg_off_set_info_.c_str(), msg_off_set_info_.size()); + members["msgOffsetInfo"] = msg_offset_info_v; + } + + if (!order_count_info_.empty()) { + ProtobufWkt::Value order_count_info_v; + order_count_info_v.set_string_value(order_count_info_.c_str(), order_count_info_.size()); + members["orderCountInfo"] = order_count_info_v; + } +} + +void PopMessageResponseHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("popTime")); + ASSERT(members.contains("invisibleTime")); + ASSERT(members.contains("reviveQid")); + ASSERT(members.contains("restNum")); + + if (members.at("popTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + pop_time_ = members.at("popTime").number_value(); + } else { + pop_time_ = std::stoull(members.at("popTime").string_value()); + } + + if (members.at("invisibleTime").kind_case() == ProtobufWkt::Value::kNumberValue) { + invisible_time_ = members.at("invisibleTime").number_value(); + } else { + invisible_time_ = std::stoull(members.at("invisibleTime").string_value()); + } + + if (members.at("reviveQid").kind_case() == ProtobufWkt::Value::kNumberValue) { + revive_qid_ = members.at("reviveQid").number_value(); + } else { + revive_qid_ = std::stoul(members.at("reviveQid").string_value()); + } + + if (members.at("restNum").kind_case() == ProtobufWkt::Value::kNumberValue) { + rest_num_ = members.at("restNum").number_value(); + } else { + rest_num_ = std::stoull(members.at("restNum").string_value()); + } + + if (members.contains("startOffsetInfo")) { + start_offset_info_ = members.at("startOffsetInfo").string_value(); + } + + if (members.contains("msgOffsetInfo")) { + msg_off_set_info_ = members.at("msgOffsetInfo").string_value(); + } + + if (members.contains("orderCountInfo")) { + order_count_info_ = members.at("orderCountInfo").string_value(); + } +} + +void AckMessageRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!consumer_group_.empty()); + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + + ASSERT(!topic_.empty()); + ProtobufWkt::Value topic_v; + topic_v.set_string_value(topic_.c_str(), topic_.size()); + members["topic"] = topic_v; + + ASSERT(queue_id_ >= 0); + ProtobufWkt::Value queue_id_v; + queue_id_v.set_number_value(queue_id_); + members["queueId"] = queue_id_v; + + ASSERT(!extra_info_.empty()); + ProtobufWkt::Value extra_info_v; + extra_info_v.set_string_value(extra_info_.c_str(), extra_info_.size()); + members["extraInfo"] = extra_info_v; + + ASSERT(offset_ >= 0); + ProtobufWkt::Value offset_v; + offset_v.set_number_value(offset_); + members["offset"] = offset_v; +} + +void AckMessageRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + ASSERT(members.contains("topic")); + ASSERT(members.contains("queueId")); + ASSERT(members.contains("extraInfo")); + ASSERT(members.contains("offset")); + + consumer_group_ = members.at("consumerGroup").string_value(); + + topic_ = members.at("topic").string_value(); + + if (members.at("queueId").kind_case() == ProtobufWkt::Value::kNumberValue) { + queue_id_ = members.at("queueId").number_value(); + } else { + queue_id_ = std::stoi(members.at("queueId").string_value()); + } + + extra_info_ = members.at("extraInfo").string_value(); + + if (members.at("offset").kind_case() == ProtobufWkt::Value::kNumberValue) { + offset_ = members.at("offset").number_value(); + } else { + offset_ = std::stoll(members.at("offset").string_value()); + } +} + +void UnregisterClientRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ASSERT(!client_id_.empty()); + ProtobufWkt::Value client_id_v; + client_id_v.set_string_value(client_id_.c_str(), client_id_.size()); + members["clientID"] = client_id_v; + + ASSERT(!producer_group_.empty() || !consumer_group_.empty()); + if (!producer_group_.empty()) { + ProtobufWkt::Value producer_group_v; + producer_group_v.set_string_value(producer_group_.c_str(), producer_group_.size()); + members["producerGroup"] = producer_group_v; + } + + if (!consumer_group_.empty()) { + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; + } +} + +void UnregisterClientRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("clientID")); + ASSERT(members.contains("producerGroup") || members.contains("consumerGroup")); + + client_id_ = members.at("clientID").string_value(); + + if (members.contains("consumerGroup")) { + consumer_group_ = members.at("consumerGroup").string_value(); + } + + if (members.contains("producerGroup")) { + producer_group_ = members.at("producerGroup").string_value(); + } +} + +void GetConsumerListByGroupResponseBody::encode(ProtobufWkt::Struct& root) { + auto& members = *(root.mutable_fields()); + + ProtobufWkt::Value consumer_id_list_v; + auto member_list = consumer_id_list_v.mutable_list_value(); + for (const auto& consumerId : consumer_id_list_) { + auto consumer_id_v = new ProtobufWkt::Value; + consumer_id_v->set_string_value(consumerId.c_str(), consumerId.size()); + member_list->mutable_values()->AddAllocated(consumer_id_v); + } + members["consumerIdList"] = consumer_id_list_v; +} + +bool HeartbeatData::decode(ProtobufWkt::Struct& doc) { + const auto& members = doc.fields(); + if (!members.contains("clientID")) { + return false; + } + + client_id_ = members.at("clientID").string_value(); + + if (members.contains("consumerDataSet")) { + auto& consumer_data_list = members.at("consumerDataSet").list_value().values(); + for (const auto& it : consumer_data_list) { + if (it.struct_value().fields().contains("groupName")) { + consumer_groups_.push_back(it.struct_value().fields().at("groupName").string_value()); + } + } + } + return true; +} + +void HeartbeatData::encode(ProtobufWkt::Struct& root) { + auto& members = *(root.mutable_fields()); + + ProtobufWkt::Value client_id_v; + client_id_v.set_string_value(client_id_.c_str(), client_id_.size()); + members["clientID"] = client_id_v; +} + +void GetConsumerListByGroupRequestHeader::encode(ProtobufWkt::Value& root) { + auto& members = *(root.mutable_struct_value()->mutable_fields()); + + ProtobufWkt::Value consumer_group_v; + consumer_group_v.set_string_value(consumer_group_.c_str(), consumer_group_.size()); + members["consumerGroup"] = consumer_group_v; +} + +void GetConsumerListByGroupRequestHeader::decode(const ProtobufWkt::Value& ext_fields) { + const auto& members = ext_fields.struct_value().fields(); + ASSERT(members.contains("consumerGroup")); + + consumer_group_ = members.at("consumerGroup").string_value(); +} + +void MetadataHelper::parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata) { + metadata->setOneWay(request->isOneWay()); + CommandCustomHeader* custom_header = request->customHeader(); + + auto route_command_custom_header = request->typedCustomHeader(); + if (route_command_custom_header != nullptr) { + metadata->setTopicName(route_command_custom_header->topic()); + } + + const uint64_t code = request->code(); + metadata->headers().addCopy(Http::LowerCaseString("code"), code); + + if (enumToInt(RequestCode::AckMessage) == code) { + metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerName), + custom_header->targetBrokerName()); + metadata->headers().addCopy(Http::LowerCaseString(RocketmqConstants::get().BrokerId), + custom_header->targetBrokerId()); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.h b/source/extensions/filters/network/rocketmq_proxy/protocol.h new file mode 100644 index 000000000000..aa9c213bbc89 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/protocol.h @@ -0,0 +1,672 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" +#include "common/protobuf/protobuf.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +/** + * Retry topic prefix + */ +constexpr absl::string_view RetryTopicPrefix = "%RETRY%"; + +/** + * RocketMQ supports two versions of sending message protocol. These two versions are identical in + * terms of functionality. But they do differ in encoding scheme. See SendMessageRequestHeader + * encode/decode functions for specific differences. + */ +enum class SendMessageRequestVersion : uint32_t { + V1 = 0, + V2 = 1, + // Only for test purpose + V3 = 2, +}; + +/** + * Command custom header are used in combination with RemotingCommand::code, to provide further + * instructions and data for the operation defined by the protocol. + * In addition to the shared encode/decode functions, this class also defines target-broker-name and + * target-broker-id fields, which are helpful if the associated remoting command should be delivered + * to specific host according to the semantics of the previous command. + */ +class CommandCustomHeader { +public: + CommandCustomHeader() = default; + + virtual ~CommandCustomHeader() = default; + + virtual void encode(ProtobufWkt::Value& root) PURE; + + virtual void decode(const ProtobufWkt::Value& ext_fields) PURE; + + const std::string& targetBrokerName() const { return target_broker_name_; } + + void targetBrokerName(absl::string_view broker_name) { + target_broker_name_ = std::string(broker_name.data(), broker_name.length()); + } + + int32_t targetBrokerId() const { return target_broker_id_; } + + void targetBrokerId(int32_t broker_id) { target_broker_id_ = broker_id; } + +protected: + /** + * If this field is not empty, RDS will employ this field and target-broker-id to direct the + * associated request to a subset of the chosen cluster. + */ + std::string target_broker_name_; + + /** + * Used along with target-broker-name field. + */ + int32_t target_broker_id_; +}; + +using CommandCustomHeaderPtr = CommandCustomHeader*; + +/** + * This class extends from CommandCustomHeader, adding a commonly used field by various custom + * command headers which participate the process of request routing. + */ +class RoutingCommandCustomHeader : public CommandCustomHeader { +public: + virtual const std::string& topic() const { return topic_; } + + virtual void topic(absl::string_view t) { topic_ = std::string(t.data(), t.size()); } + +protected: + std::string topic_; +}; + +/** + * This class defines basic request/response forms used by RocketMQ among all its components. + */ +class RemotingCommand { +public: + RemotingCommand() : RemotingCommand(0, 0, 0) {} + + RemotingCommand(int code, int version, int opaque) + : code_(code), version_(version), opaque_(opaque), flag_(0) {} + + ~RemotingCommand() { delete custom_header_; } + + int32_t code() const { return code_; } + + void code(int code) { code_ = code; } + + const std::string& language() const { return language_; } + + void language(absl::string_view lang) { language_ = std::string(lang.data(), lang.size()); } + + int32_t version() const { return version_; } + + void opaque(int opaque) { opaque_ = opaque; } + + int32_t opaque() const { return opaque_; } + + uint32_t flag() const { return flag_; } + + void flag(uint32_t f) { flag_ = f; } + + void customHeader(CommandCustomHeaderPtr custom_header) { custom_header_ = custom_header; } + + CommandCustomHeaderPtr customHeader() const { return custom_header_; } + + template T* typedCustomHeader() { + if (!custom_header_) { + return nullptr; + } + + return dynamic_cast(custom_header_); + } + + uint32_t bodyLength() const { return body_.length(); } + + Buffer::Instance& body() { return body_; } + + const std::string& remark() const { return remark_; } + + void remark(absl::string_view remark) { remark_ = std::string(remark.data(), remark.length()); } + + const std::string& serializeTypeCurrentRPC() const { return serialize_type_current_rpc_; } + + void serializeTypeCurrentRPC(absl::string_view serialization_type) { + serialize_type_current_rpc_ = std::string(serialization_type.data(), serialization_type.size()); + } + + bool isOneWay() const { + uint32_t marker = 1u << SHIFT_ONEWAY; + return (flag_ & marker) == marker; + } + + void markAsResponse() { flag_ |= (1u << SHIFT_RPC); } + + void markAsOneway() { flag_ |= (1u << SHIFT_ONEWAY); } + + static bool isResponse(uint32_t flag) { return (flag & (1u << SHIFT_RPC)) == (1u << SHIFT_RPC); } + +private: + /** + * Action code of this command. Possible values are defined in RequestCode enumeration. + */ + int32_t code_; + + /** + * Language used by the client. + */ + std::string language_{"CPP"}; + + /** + * Version of the client SDK. + */ + int32_t version_; + + /** + * Request ID. If the RPC is request-response form, this field is used to establish the + * association. + */ + int32_t opaque_; + + /** + * Bit-wise flag indicating RPC type, including whether it is one-way or request-response; + * a request or response command. + */ + uint32_t flag_; + + /** + * Remark is used to deliver text message in addition to code. Urgent scenarios may use this field + * to transfer diagnostic message to the counterparts when a full-fledged response is impossible. + */ + std::string remark_; + + /** + * Indicate how the custom command header is serialized. + */ + std::string serialize_type_current_rpc_{"JSON"}; + + /** + * The custom command header works with command code to provide additional protocol + * implementation. + * Generally speaking, each code has pair of request/response custom command header. + */ + CommandCustomHeaderPtr custom_header_{nullptr}; + + /** + * The command body, in form of binary. + */ + Buffer::OwnedImpl body_; + + static constexpr uint32_t SHIFT_RPC = 0; + + static constexpr uint32_t SHIFT_ONEWAY = 1; + + friend class Encoder; + friend class Decoder; +}; + +using RemotingCommandPtr = std::unique_ptr; + +/** + * Command codes used when sending requests. Meaning of each field is self-explanatory. + */ +enum class RequestCode : uint32_t { + SendMessage = 10, + HeartBeat = 34, + UnregisterClient = 35, + GetConsumerListByGroup = 38, + PopMessage = 50, + AckMessage = 51, + GetRouteInfoByTopic = 105, + SendMessageV2 = 310, + // Only for test purpose + Unsupported = 999, +}; + +/** + * Command code used when sending responses. Meaning of each enum is self-explanatory. + */ +enum class ResponseCode : uint32_t { + Success = 0, + SystemError = 1, + SystemBusy = 2, + RequestCodeNotSupported = 3, + SlaveNotAvailable = 11, +}; + +/** + * Custom command header for sending messages. + */ +class SendMessageRequestHeader : public RoutingCommandCustomHeader, + Logger::Loggable { +public: + ~SendMessageRequestHeader() override = default; + + int32_t queueId() const { return queue_id_; } + + /** + * TODO(lizhanhui): Remove this write API after adding queue-id-aware route logic + * @param queue_id target queue Id. + */ + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + void producerGroup(std::string producer_group) { producer_group_ = std::move(producer_group); } + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& producerGroup() const { return producer_group_; } + + const std::string& defaultTopic() const { return default_topic_; } + + int32_t defaultTopicQueueNumber() const { return default_topic_queue_number_; } + + int32_t sysFlag() const { return sys_flag_; } + + int32_t flag() const { return flag_; } + + int64_t bornTimestamp() const { return born_timestamp_; } + + const std::string& properties() const { return properties_; } + + int32_t reconsumeTimes() const { return reconsume_time_; } + + bool unitMode() const { return unit_mode_; } + + bool batch() const { return batch_; } + + int32_t maxReconsumeTimes() const { return max_reconsume_time_; } + + void properties(absl::string_view props) { + properties_ = std::string(props.data(), props.size()); + } + + void reconsumeTimes(int32_t reconsume_times) { reconsume_time_ = reconsume_times; } + + void unitMode(bool unit_mode) { unit_mode_ = unit_mode; } + + void batch(bool batch) { batch_ = batch; } + + void maxReconsumeTimes(int32_t max_reconsume_times) { max_reconsume_time_ = max_reconsume_times; } + + void version(SendMessageRequestVersion version) { version_ = version; } + + SendMessageRequestVersion version() const { return version_; } + +private: + std::string producer_group_; + std::string default_topic_; + int32_t default_topic_queue_number_{0}; + int32_t queue_id_{-1}; + int32_t sys_flag_{0}; + int64_t born_timestamp_{0}; + int32_t flag_{0}; + std::string properties_; + int32_t reconsume_time_{0}; + bool unit_mode_{false}; + bool batch_{false}; + int32_t max_reconsume_time_{0}; + SendMessageRequestVersion version_{SendMessageRequestVersion::V1}; + + friend class Decoder; +}; + +/** + * Custom command header to respond to a send-message-request. + */ +class SendMessageResponseHeader : public CommandCustomHeader { +public: + SendMessageResponseHeader() = default; + + SendMessageResponseHeader(std::string msg_id, int32_t queue_id, int64_t queue_offset, + std::string transaction_id) + : msg_id_(std::move(msg_id)), queue_id_(queue_id), queue_offset_(queue_offset), + transaction_id_(std::move(transaction_id)) {} + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& msgId() const { return msg_id_; } + + int32_t queueId() const { return queue_id_; } + + int64_t queueOffset() const { return queue_offset_; } + + const std::string& transactionId() const { return transaction_id_; } + + // This function is for testing only. + void msgIdForTest(absl::string_view msg_id) { + msg_id_ = std::string(msg_id.data(), msg_id.size()); + } + + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + void queueOffset(int64_t queue_offset) { queue_offset_ = queue_offset; } + + void transactionId(absl::string_view transaction_id) { + transaction_id_ = std::string(transaction_id.data(), transaction_id.size()); + } + +private: + std::string msg_id_; + int32_t queue_id_{0}; + int64_t queue_offset_{0}; + std::string transaction_id_; +}; + +/** + * Classic RocketMQ needs to known addresses of each broker to work with. To resolve the addresses, + * client SDK uses this command header to query name servers. + * + * This header is kept for compatible purpose only. + */ +class GetRouteInfoRequestHeader : public RoutingCommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; +}; + +/** + * When a client wishes to consume messages stored in brokers, it sends a pop command to brokers. + * Brokers would send a batch of messages to the client. At the same time, the broker keeps the + * batch invisible for a configured period of time, waiting for acknowledgments from the client. + * + * If the client manages to consume the messages within promised time interval and sends ack command + * back to the broker, the broker will mark the acknowledged ones as consumed. Otherwise, the + * previously sent messages are visible again and would be consumable for other client instances. + * + * Through this approach, we achieves stateless message-pulling, comparing to classic offset-based + * consuming progress management. This models brings about some extra workload to broker side, but + * it fits Envoy well. + */ +class PopMessageRequestHeader : public RoutingCommandCustomHeader { +public: + friend class Decoder; + + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + const std::string& consumerGroup() const { return consumer_group_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.size()); + } + + int32_t queueId() const { return queue_id_; } + + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + int32_t maxMsgNum() const { return max_msg_nums_; } + + void maxMsgNum(int32_t max_msg_num) { max_msg_nums_ = max_msg_num; } + + int64_t invisibleTime() const { return invisible_time_; } + + void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; } + + int64_t pollTime() const { return poll_time_; } + + void pollTime(int64_t poll_time) { poll_time_ = poll_time; } + + int64_t bornTime() const { return born_time_; } + + void bornTime(int64_t born_time) { born_time_ = born_time; } + + int32_t initMode() const { return init_mode_; } + + void initMode(int32_t init_mode) { init_mode_ = init_mode; } + + const std::string& expType() const { return exp_type_; } + + void expType(absl::string_view exp_type) { + exp_type_ = std::string(exp_type.data(), exp_type.size()); + } + + const std::string& exp() const { return exp_; } + + void exp(absl::string_view exp) { exp_ = std::string(exp.data(), exp.size()); } + +private: + std::string consumer_group_; + int32_t queue_id_{-1}; + int32_t max_msg_nums_{32}; + int64_t invisible_time_{0}; + int64_t poll_time_{0}; + int64_t born_time_{0}; + int32_t init_mode_{0}; + std::string exp_type_; + std::string exp_; + bool order_{false}; +}; + +/** + * The pop response command header. See pop request header for how-things-work explanation. + */ +class PopMessageResponseHeader : public CommandCustomHeader { +public: + void decode(const ProtobufWkt::Value& ext_fields) override; + + void encode(ProtobufWkt::Value& root) override; + + // This function is for testing only. + int64_t popTimeForTest() const { return pop_time_; } + + void popTime(int64_t pop_time) { pop_time_ = pop_time; } + + int64_t invisibleTime() const { return invisible_time_; } + + void invisibleTime(int64_t invisible_time) { invisible_time_ = invisible_time; } + + int32_t reviveQid() const { return revive_qid_; } + + void reviveQid(int32_t revive_qid) { revive_qid_ = revive_qid; } + + int64_t restNum() const { return rest_num_; } + + void restNum(int64_t rest_num) { rest_num_ = rest_num; } + + const std::string& startOffsetInfo() const { return start_offset_info_; } + + void startOffsetInfo(absl::string_view start_offset_info) { + start_offset_info_ = std::string(start_offset_info.data(), start_offset_info.size()); + } + + const std::string& msgOffsetInfo() const { return msg_off_set_info_; } + + void msgOffsetInfo(absl::string_view msg_offset_info) { + msg_off_set_info_ = std::string(msg_offset_info.data(), msg_offset_info.size()); + } + + const std::string& orderCountInfo() const { return order_count_info_; } + + void orderCountInfo(absl::string_view order_count_info) { + order_count_info_ = std::string(order_count_info.data(), order_count_info.size()); + } + +private: + int64_t pop_time_{0}; + int64_t invisible_time_{0}; + int32_t revive_qid_{0}; + int64_t rest_num_{0}; + std::string start_offset_info_; + std::string msg_off_set_info_; + std::string order_count_info_; +}; + +/** + * This command is used by the client to acknowledge message(s) that has been successfully consumed. + * Once the broker received this request, the associated message will formally marked as consumed. + * + * Note: the ack request has to be sent the exactly same broker where messages are popped from. + */ +class AckMessageRequestHeader : public RoutingCommandCustomHeader { +public: + void decode(const ProtobufWkt::Value& ext_fields) override; + + void encode(ProtobufWkt::Value& root) override; + + absl::string_view consumerGroup() const { return consumer_group_; } + + int64_t offset() const { return offset_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.size()); + } + + int32_t queueId() const { return queue_id_; } + void queueId(int32_t queue_id) { queue_id_ = queue_id; } + + absl::string_view extraInfo() const { return extra_info_; } + void extraInfo(absl::string_view extra_info) { + extra_info_ = std::string(extra_info.data(), extra_info.size()); + } + + void offset(int64_t offset) { offset_ = offset; } + + const std::string& directiveKey() { + if (key_.empty()) { + key_ = fmt::format("{}-{}-{}-{}", consumer_group_, topic_, queue_id_, offset_); + } + return key_; + } + +private: + std::string consumer_group_; + int32_t queue_id_{0}; + std::string extra_info_; + int64_t offset_{0}; + std::string key_; +}; + +/** + * When a client shuts down gracefully, it notifies broker(now envoy) this event. + */ +class UnregisterClientRequestHeader : public CommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + void clientId(absl::string_view client_id) { + client_id_ = std::string(client_id.data(), client_id.length()); + } + + const std::string& clientId() const { return client_id_; } + + void producerGroup(absl::string_view producer_group) { + producer_group_ = std::string(producer_group.data(), producer_group.length()); + } + + const std::string& producerGroup() const { return producer_group_; } + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.length()); + } + + const std::string& consumerGroup() const { return consumer_group_; } + +private: + std::string client_id_; + std::string producer_group_; + std::string consumer_group_; +}; + +/** + * Classic SDK clients use client-side load balancing. This header is kept for compatibility. + */ +class GetConsumerListByGroupRequestHeader : public CommandCustomHeader { +public: + void encode(ProtobufWkt::Value& root) override; + + void decode(const ProtobufWkt::Value& ext_fields) override; + + void consumerGroup(absl::string_view consumer_group) { + consumer_group_ = std::string(consumer_group.data(), consumer_group.length()); + } + + const std::string& consumerGroup() const { return consumer_group_; } + +private: + std::string consumer_group_; +}; + +/** + * The response body. + */ +class GetConsumerListByGroupResponseBody { +public: + void encode(ProtobufWkt::Struct& root); + + void add(absl::string_view consumer_id) { + consumer_id_list_.emplace_back(std::string(consumer_id.data(), consumer_id.length())); + } + +private: + std::vector consumer_id_list_; +}; + +/** + * Client periodically sends heartbeat to servers to maintain alive status. + */ +class HeartbeatData : public Logger::Loggable { +public: + bool decode(ProtobufWkt::Struct& doc); + + const std::string& clientId() const { return client_id_; } + + const std::vector& consumerGroups() const { return consumer_groups_; } + + void encode(ProtobufWkt::Struct& root); + + void clientId(absl::string_view client_id) { + client_id_ = std::string(client_id.data(), client_id.size()); + } + +private: + std::string client_id_; + std::vector consumer_groups_; +}; + +class MetadataHelper { +public: + MetadataHelper() = delete; + + static void parseRequest(RemotingCommandPtr& request, MessageMetadataSharedPtr metadata); +}; + +/** + * Directive to ensure entailing ack requests are routed to the same broker host where pop + * requests are made. + */ +struct AckMessageDirective { + + AckMessageDirective(absl::string_view broker_name, int32_t broker_id, MonotonicTime create_time) + : broker_name_(broker_name.data(), broker_name.length()), broker_id_(broker_id), + creation_time_(create_time) {} + + std::string broker_name_; + int32_t broker_id_; + MonotonicTime creation_time_; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/BUILD b/source/extensions/filters/network/rocketmq_proxy/router/BUILD new file mode 100644 index 000000000000..19227abff64a --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/BUILD @@ -0,0 +1,50 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "router_interface", + hdrs = ["router.h"], + deps = [ + "//include/envoy/tcp:conn_pool_interface", + "//include/envoy/upstream:load_balancer_interface", + "//source/common/upstream:load_balancer_lib", + ], +) + +envoy_cc_library( + name = "router_lib", + srcs = ["router_impl.cc"], + hdrs = ["router_impl.h"], + deps = [ + ":router_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:thread_local_cluster_interface", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy:conn_manager_lib", + ], +) + +envoy_cc_library( + name = "route_matcher", + srcs = ["route_matcher.cc"], + hdrs = ["route_matcher.h"], + deps = [ + ":router_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + "//source/common/common:logger_lib", + "//source/common/common:matchers_lib", + "//source/common/http:header_utility_lib", + "//source/common/router:metadatamatchcriteria_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/rocketmq_proxy:metadata_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc new file mode 100644 index 000000000000..e99d6c249ebb --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc @@ -0,0 +1,73 @@ +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" + +#include "common/router/metadatamatchcriteria_impl.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +RouteEntryImpl::RouteEntryImpl( + const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route) + : topic_name_(route.match().topic()), cluster_name_(route.route().cluster()), + config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())) { + + if (route.route().has_metadata_match()) { + const auto filter_it = route.route().metadata_match().filter_metadata().find( + Envoy::Config::MetadataFilters::get().ENVOY_LB); + if (filter_it != route.route().metadata_match().filter_metadata().end()) { + metadata_match_criteria_ = + std::make_unique(filter_it->second); + } + } +} + +const std::string& RouteEntryImpl::clusterName() const { return cluster_name_; } + +const RouteEntry* RouteEntryImpl::routeEntry() const { return this; } + +RouteConstSharedPtr RouteEntryImpl::matches(const MessageMetadata& metadata) const { + if (headersMatch(metadata.headers())) { + const std::string& topic_name = metadata.topicName(); + if (topic_name_.match(topic_name)) { + return shared_from_this(); + } + } + return nullptr; +} + +bool RouteEntryImpl::headersMatch(const Http::HeaderMap& headers) const { + ENVOY_LOG(debug, "rocketmq route matcher: headers size {}, metadata headers size {}", + config_headers_.size(), headers.size()); + return Http::HeaderUtility::matchHeaders(headers, config_headers_); +} + +RouteMatcher::RouteMatcher(const RouteConfig& config) { + for (const auto& route : config.routes()) { + routes_.emplace_back(std::make_shared(route)); + } + ENVOY_LOG(debug, "rocketmq route matcher: routes list size {}", routes_.size()); +} + +RouteConstSharedPtr RouteMatcher::route(const MessageMetadata& metadata) const { + const std::string& topic_name = metadata.topicName(); + for (const auto& route : routes_) { + RouteConstSharedPtr route_entry = route->matches(metadata); + if (nullptr != route_entry) { + ENVOY_LOG(debug, "rocketmq route matcher: find cluster success for topic: {}", topic_name); + return route_entry; + } + } + ENVOY_LOG(debug, "rocketmq route matcher: find cluster failed for topic: {}", topic_name); + return nullptr; +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h new file mode 100644 index 000000000000..8cd4c533a541 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h @@ -0,0 +1,71 @@ +#pragma once + +#include + +#include "envoy/config/typed_config.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "envoy/server/filter_config.h" + +#include "common/common/logger.h" +#include "common/common/matchers.h" +#include "common/http/header_utility.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class MessageMetadata; + +namespace Router { + +class RouteEntryImpl : public RouteEntry, + public Route, + public std::enable_shared_from_this, + public Logger::Loggable { +public: + RouteEntryImpl(const envoy::extensions::filters::network::rocketmq_proxy::v3::Route& route); + ~RouteEntryImpl() override = default; + + // Router::RouteEntry + const std::string& clusterName() const override; + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const override { + return metadata_match_criteria_.get(); + } + + // Router::Route + const RouteEntry* routeEntry() const override; + + RouteConstSharedPtr matches(const MessageMetadata& metadata) const; + +private: + bool headersMatch(const Http::HeaderMap& headers) const; + + const Matchers::StringMatcherImpl topic_name_; + const std::string cluster_name_; + const std::vector config_headers_; + Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; +}; + +using RouteEntryImplConstSharedPtr = std::shared_ptr; + +class RouteMatcher : public Logger::Loggable { +public: + using RouteConfig = envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration; + RouteMatcher(const RouteConfig& config); + + RouteConstSharedPtr route(const MessageMetadata& metadata) const; + +private: + std::vector routes_; +}; + +using RouteMatcherPtr = std::unique_ptr; + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router.h b/source/extensions/filters/network/rocketmq_proxy/router/router.h new file mode 100644 index 000000000000..6067a7295fc6 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router.h @@ -0,0 +1,85 @@ +#pragma once + +#include "envoy/tcp/conn_pool.h" + +#include "common/upstream/load_balancer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ActiveMessage; +class MessageMetadata; + +namespace Router { + +/** + * RouteEntry is an individual resolved route entry. + */ +class RouteEntry { +public: + virtual ~RouteEntry() = default; + + /** + * @return const std::string& the upstream cluster that owns the route. + */ + virtual const std::string& clusterName() const PURE; + + /** + * @return MetadataMatchCriteria* the metadata that a subset load balancer should match when + * selecting an upstream host + */ + virtual const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() const PURE; +}; + +/** + * Route holds the RouteEntry for a request. + */ +class Route { +public: + virtual ~Route() = default; + + /** + * @return the route entry or nullptr if there is no matching route for the request. + */ + virtual const RouteEntry* routeEntry() const PURE; +}; + +using RouteConstSharedPtr = std::shared_ptr; +using RouteSharedPtr = std::shared_ptr; + +/** + * The router configuration. + */ +class Config { +public: + virtual ~Config() = default; + + virtual RouteConstSharedPtr route(const MessageMetadata& metadata) const PURE; +}; + +class Router : public Tcp::ConnectionPool::UpstreamCallbacks, + public Upstream::LoadBalancerContextBase { + +public: + virtual void sendRequestToUpstream(ActiveMessage& active_message) PURE; + + /** + * Release resources associated with this router. + */ + virtual void reset() PURE; + + /** + * Return host description that is eventually connected. + * @return upstream host if a connection has been established; nullptr otherwise. + */ + virtual Upstream::HostDescriptionConstSharedPtr upstreamHost() PURE; +}; + +using RouterPtr = std::unique_ptr; +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc new file mode 100644 index 000000000000..425eeec687c2 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc @@ -0,0 +1,218 @@ +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/codec.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" +#include "extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +RouterImpl::RouterImpl(Envoy::Upstream::ClusterManager& cluster_manager) + : cluster_manager_(cluster_manager), handle_(nullptr), active_message_(nullptr) {} + +RouterImpl::~RouterImpl() { + if (handle_) { + handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } +} + +Upstream::HostDescriptionConstSharedPtr RouterImpl::upstreamHost() { return upstream_host_; } + +void RouterImpl::onAboveWriteBufferHighWatermark() { + ENVOY_LOG(trace, "Above write buffer high watermark"); +} + +void RouterImpl::onBelowWriteBufferLowWatermark() { + ENVOY_LOG(trace, "Below write buffer low watermark"); +} + +void RouterImpl::onEvent(Network::ConnectionEvent event) { + switch (event) { + case Network::ConnectionEvent::RemoteClose: { + ENVOY_LOG(error, "Connection to upstream: {} is closed by remote peer", + upstream_host_->address()->asString()); + // Send local reply to downstream + active_message_->onError("Connection to upstream is closed by remote peer"); + break; + } + case Network::ConnectionEvent::LocalClose: { + ENVOY_LOG(error, "Connection to upstream: {} has been closed", + upstream_host_->address()->asString()); + // Send local reply to downstream + active_message_->onError("Connection to upstream has been closed"); + break; + } + default: + // Ignore other events for now + ENVOY_LOG(trace, "Ignore event type"); + return; + } + active_message_->onReset(); +} + +const Envoy::Router::MetadataMatchCriteria* RouterImpl::metadataMatchCriteria() { + if (route_entry_) { + return route_entry_->metadataMatchCriteria(); + } + return nullptr; +} + +void RouterImpl::onUpstreamData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(trace, "Received some data from upstream: {} bytes, end_stream: {}", data.length(), + end_stream); + if (active_message_->onUpstreamData(data, end_stream, connection_data_)) { + reset(); + } +} + +void RouterImpl::sendRequestToUpstream(ActiveMessage& active_message) { + active_message_ = &active_message; + int opaque = active_message_->downstreamRequest()->opaque(); + ASSERT(active_message_->metadata()->hasTopicName()); + std::string topic_name = active_message_->metadata()->topicName(); + + RouteConstSharedPtr route = active_message.route(); + if (!route) { + active_message.onError("No route for current request."); + ENVOY_LOG(warn, "Can not find route for topic {}", topic_name); + reset(); + return; + } + + route_entry_ = route->routeEntry(); + const std::string cluster_name = route_entry_->clusterName(); + Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name); + if (!cluster) { + active_message.onError("Cluster does not exist."); + ENVOY_LOG(warn, "Cluster for {} is not available", cluster_name); + reset(); + return; + } + + cluster_info_ = cluster->info(); + if (cluster_info_->maintenanceMode()) { + ENVOY_LOG(warn, "Cluster {} is under maintenance. Opaque: {}", cluster_name, opaque); + active_message.onError("Cluster under maintenance."); + active_message.connectionManager().stats().maintenance_failure_.inc(); + reset(); + return; + } + + Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( + cluster_name, Upstream::ResourcePriority::Default, this); + if (!conn_pool) { + ENVOY_LOG(warn, "No host available for cluster {}. Opaque: {}", cluster_name, opaque); + active_message.onError("No host available"); + reset(); + return; + } + + upstream_request_ = std::make_unique(*this); + Tcp::ConnectionPool::Cancellable* cancellable = conn_pool->newConnection(*upstream_request_); + if (cancellable) { + handle_ = cancellable; + ENVOY_LOG(trace, "No connection is available for now. Create a cancellable handle. Opaque: {}", + opaque); + } else { + /* + * UpstreamRequest#onPoolReady or #onPoolFailure should have been invoked. + */ + ENVOY_LOG(trace, + "One connection is picked up from connection pool, callback should have been " + "executed. Opaque: {}", + opaque); + } +} + +RouterImpl::UpstreamRequest::UpstreamRequest(RouterImpl& router) : router_(router) {} + +void RouterImpl::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) { + router_.connection_data_ = std::move(conn); + router_.upstream_host_ = host; + router_.connection_data_->addUpstreamCallbacks(router_); + if (router_.handle_) { + ENVOY_LOG(trace, "#onPoolReady, reset cancellable handle to nullptr"); + router_.handle_ = nullptr; + } + ENVOY_LOG(debug, "Current chosen host address: {}", host->address()->asString()); + // TODO(lizhanhui): we may optimize out encoding in case we there is no protocol translation. + Buffer::OwnedImpl buffer; + Encoder::encode(router_.active_message_->downstreamRequest(), buffer); + router_.connection_data_->connection().write(buffer, false); + ENVOY_LOG(trace, "Write data to upstream OK. Opaque: {}", + router_.active_message_->downstreamRequest()->opaque()); + + if (router_.active_message_->metadata()->isOneWay()) { + ENVOY_LOG(trace, + "Reset ActiveMessage since data is written and the downstream request is one-way. " + "Opaque: {}", + router_.active_message_->downstreamRequest()->opaque()); + + // For one-way ack-message requests, we need erase previously stored ack-directive. + if (enumToSignedInt(RequestCode::AckMessage) == + router_.active_message_->downstreamRequest()->code()) { + auto ack_header = router_.active_message_->downstreamRequest() + ->typedCustomHeader(); + router_.active_message_->connectionManager().eraseAckDirective(ack_header->directiveKey()); + } + + router_.reset(); + } +} + +void RouterImpl::UpstreamRequest::onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) { + if (router_.handle_) { + ENVOY_LOG(trace, "#onPoolFailure, reset cancellable handle to nullptr"); + router_.handle_ = nullptr; + } + switch (reason) { + case Tcp::ConnectionPool::PoolFailureReason::Overflow: { + ENVOY_LOG(error, "Unable to acquire a connection to send request to upstream"); + router_.active_message_->onError("overflow"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure: { + ENVOY_LOG(error, "Failed to make request to upstream due to remote connection error. Host {}", + host->address()->asString()); + router_.active_message_->onError("remote connection failure"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure: { + ENVOY_LOG(error, "Failed to make request to upstream due to local connection error. Host: {}", + host->address()->asString()); + router_.active_message_->onError("local connection failure"); + } break; + + case Tcp::ConnectionPool::PoolFailureReason::Timeout: { + ENVOY_LOG(error, "Failed to make request to upstream due to timeout. Host: {}", + host->address()->asString()); + router_.active_message_->onError("timeout"); + } break; + } + + // Release resources allocated to this request. + router_.reset(); +} + +void RouterImpl::reset() { + active_message_->onReset(); + if (connection_data_) { + connection_data_.reset(nullptr); + } +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h new file mode 100644 index 000000000000..b3eca29e1e67 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h @@ -0,0 +1,75 @@ +#pragma once + +#include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "common/common/logger.h" +#include "common/upstream/load_balancer_impl.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +class RouterImpl : public Router, public Logger::Loggable { +public: + explicit RouterImpl(Upstream::ClusterManager& cluster_manager); + + ~RouterImpl() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + void onEvent(Network::ConnectionEvent event) override; + + // Upstream::LoadBalancerContextBase + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override; + + void sendRequestToUpstream(ActiveMessage& active_message) override; + + void reset() override; + + Upstream::HostDescriptionConstSharedPtr upstreamHost() override; + +private: + class UpstreamRequest : public Tcp::ConnectionPool::Callbacks { + public: + UpstreamRequest(RouterImpl& router); + + void onPoolFailure(Tcp::ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override; + + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) override; + + private: + RouterImpl& router_; + }; + using UpstreamRequestPtr = std::unique_ptr; + + Upstream::ClusterManager& cluster_manager_; + Tcp::ConnectionPool::ConnectionDataPtr connection_data_; + + /** + * On requesting connection from upstream connection pool, this handle may be assigned when no + * connection is readily available at the moment. We may cancel the request through this handle. + * + * If there are connections which can be returned immediately, this handle is assigned as nullptr. + */ + Tcp::ConnectionPool::Cancellable* handle_; + Upstream::HostDescriptionConstSharedPtr upstream_host_; + ActiveMessage* active_message_; + Upstream::ClusterInfoConstSharedPtr cluster_info_; + UpstreamRequestPtr upstream_request_; + const RouteEntry* route_entry_{}; +}; +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/stats.h b/source/extensions/filters/network/rocketmq_proxy/stats.h new file mode 100644 index 000000000000..13f3122b6eff --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/stats.h @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +/** + * All rocketmq filter stats. @see stats_macros.h + */ +#define ALL_ROCKETMQ_FILTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(request) \ + COUNTER(request_decoding_error) \ + COUNTER(request_decoding_success) \ + COUNTER(response) \ + COUNTER(response_decoding_error) \ + COUNTER(response_decoding_success) \ + COUNTER(response_error) \ + COUNTER(response_success) \ + COUNTER(heartbeat) \ + COUNTER(unregister) \ + COUNTER(get_topic_route) \ + COUNTER(send_message_v1) \ + COUNTER(send_message_v2) \ + COUNTER(pop_message) \ + COUNTER(ack_message) \ + COUNTER(get_consumer_list) \ + COUNTER(maintenance_failure) \ + GAUGE(request_active, Accumulate) \ + GAUGE(send_message_v1_active, Accumulate) \ + GAUGE(send_message_v2_active, Accumulate) \ + GAUGE(pop_message_active, Accumulate) \ + GAUGE(get_topic_route_active, Accumulate) \ + GAUGE(send_message_pending, Accumulate) \ + GAUGE(pop_message_pending, Accumulate) \ + GAUGE(get_topic_route_pending, Accumulate) \ + GAUGE(total_pending, Accumulate) \ + HISTOGRAM(request_time_ms, Milliseconds) + +/** + * Struct definition for all rocketmq proxy stats. @see stats_macros.h + */ +struct RocketmqFilterStats { + ALL_ROCKETMQ_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_HISTOGRAM_STRUCT) + + static RocketmqFilterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return RocketmqFilterStats{ALL_ROCKETMQ_FILTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.cc b/source/extensions/filters/network/rocketmq_proxy/topic_route.cc new file mode 100644 index 000000000000..8c445ab1c6f0 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/topic_route.cc @@ -0,0 +1,76 @@ +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +void QueueData::encode(ProtobufWkt::Struct& data_struct) { + auto* fields = data_struct.mutable_fields(); + + ProtobufWkt::Value broker_name_v; + broker_name_v.set_string_value(broker_name_); + (*fields)["brokerName"] = broker_name_v; + + ProtobufWkt::Value read_queue_num_v; + read_queue_num_v.set_number_value(read_queue_nums_); + (*fields)["readQueueNums"] = read_queue_num_v; + + ProtobufWkt::Value write_queue_num_v; + write_queue_num_v.set_number_value(write_queue_nums_); + (*fields)["writeQueueNums"] = write_queue_num_v; + + ProtobufWkt::Value perm_v; + perm_v.set_number_value(perm_); + (*fields)["perm"] = perm_v; +} + +void BrokerData::encode(ProtobufWkt::Struct& data_struct) { + auto& members = *(data_struct.mutable_fields()); + + ProtobufWkt::Value cluster_v; + cluster_v.set_string_value(cluster_); + members["cluster"] = cluster_v; + + ProtobufWkt::Value broker_name_v; + broker_name_v.set_string_value(broker_name_); + members["brokerName"] = broker_name_v; + + if (!broker_addrs_.empty()) { + ProtobufWkt::Value brokerAddrsNode; + auto& brokerAddrsMembers = *(brokerAddrsNode.mutable_struct_value()->mutable_fields()); + for (auto& entry : broker_addrs_) { + ProtobufWkt::Value address_v; + address_v.set_string_value(entry.second); + brokerAddrsMembers[std::to_string(entry.first)] = address_v; + } + members["brokerAddrs"] = brokerAddrsNode; + } +} + +void TopicRouteData::encode(ProtobufWkt::Struct& data_struct) { + auto* fields = data_struct.mutable_fields(); + + if (!queue_data_.empty()) { + ProtobufWkt::ListValue queue_data_list_v; + for (auto& queueData : queue_data_) { + queueData.encode(data_struct); + queue_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct); + } + (*fields)["queueDatas"].mutable_list_value()->CopyFrom(queue_data_list_v); + } + + if (!broker_data_.empty()) { + ProtobufWkt::ListValue broker_data_list_v; + for (auto& brokerData : broker_data_) { + brokerData.encode(data_struct); + broker_data_list_v.add_values()->mutable_struct_value()->CopyFrom(data_struct); + } + (*fields)["brokerDatas"].mutable_list_value()->CopyFrom(broker_data_list_v); + } +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.h b/source/extensions/filters/network/rocketmq_proxy/topic_route.h new file mode 100644 index 000000000000..2b9afdb1d526 --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/topic_route.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +class QueueData { +public: + QueueData(const std::string& broker_name, int32_t read_queue_num, int32_t write_queue_num, + int32_t perm) + : broker_name_(broker_name), read_queue_nums_(read_queue_num), + write_queue_nums_(write_queue_num), perm_(perm) {} + + void encode(ProtobufWkt::Struct& data_struct); + + const std::string& brokerName() const { return broker_name_; } + + int32_t readQueueNum() const { return read_queue_nums_; } + + int32_t writeQueueNum() const { return write_queue_nums_; } + + int32_t perm() const { return perm_; } + +private: + std::string broker_name_; + int32_t read_queue_nums_; + int32_t write_queue_nums_; + int32_t perm_; +}; + +class BrokerData { +public: + BrokerData(const std::string& cluster, const std::string& broker_name, + std::unordered_map&& broker_addrs) + : cluster_(cluster), broker_name_(broker_name), broker_addrs_(broker_addrs) {} + + void encode(ProtobufWkt::Struct& data_struct); + + const std::string& cluster() const { return cluster_; } + + const std::string& brokerName() const { return broker_name_; } + + std::unordered_map& brokerAddresses() { return broker_addrs_; } + +private: + std::string cluster_; + std::string broker_name_; + std::unordered_map broker_addrs_; +}; + +class TopicRouteData { +public: + void encode(ProtobufWkt::Struct& data_struct); + + TopicRouteData() = default; + + TopicRouteData(std::vector&& queue_data, std::vector&& broker_data) + : queue_data_(queue_data), broker_data_(broker_data) {} + + std::vector& queueData() { return queue_data_; } + + std::vector& brokerData() { return broker_data_; } + +private: + std::vector queue_data_; + std::vector broker_data_; +}; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/rocketmq_proxy/well_known_names.h b/source/extensions/filters/network/rocketmq_proxy/well_known_names.h new file mode 100644 index 000000000000..659b387db28b --- /dev/null +++ b/source/extensions/filters/network/rocketmq_proxy/well_known_names.h @@ -0,0 +1,29 @@ +#pragma once + +#include + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +struct RocketmqValues { + /** + * All the values below are the properties of single broker in filter_metadata. + */ + const std::string ReadQueueNum = "read_queue_num"; + const std::string WriteQueueNum = "write_queue_num"; + const std::string ClusterName = "cluster_name"; + const std::string BrokerName = "broker_name"; + const std::string BrokerId = "broker_id"; + const std::string Perm = "perm"; +}; + +using RocketmqConstants = ConstSingleton; + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index a7577b8ffd2c..bc626950ee4c 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -18,6 +18,8 @@ class NetworkFilterNameValues { const std::string Echo = "envoy.filters.network.echo"; // Direct response filter const std::string DirectResponse = "envoy.filters.network.direct_response"; + // RocketMQ proxy filter + const std::string RocketmqProxy = "envoy.filters.network.rocketmq_proxy"; // Dubbo proxy filter const std::string DubboProxy = "envoy.filters.network.dubbo_proxy"; // HTTP connection manager filter diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD new file mode 100644 index 000000000000..868ced554fcc --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -0,0 +1,136 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_cc_test_library", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_cc_mock( + name = "mocks_lib", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_test_library( + name = "utility_lib", + srcs = ["utility.cc"], + hdrs = ["utility.h"], + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/server:server_mocks", + ], +) + +envoy_extension_cc_test( + name = "protocol_test", + srcs = ["protocol_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "router_test", + srcs = ["router_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":mocks_lib", + ":utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/server:server_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "topic_route_test", + srcs = ["topic_route_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "conn_manager_test", + srcs = ["conn_manager_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/common/upstream:utility_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "active_message_test", + srcs = ["active_message_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy:config", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:registry_lib", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "codec_test", + srcs = ["codec_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + ":utility_lib", + "//source/common/network:address_lib", + "//source/common/protobuf:utility_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:registry_lib", + ], +) + +envoy_extension_cc_test( + name = "route_matcher_test", + srcs = ["route_matcher_test.cc"], + extension_name = "envoy.filters.network.rocketmq_proxy", + deps = [ + "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", + "//test/mocks/server:server_mocks", + "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc new file mode 100644 index 000000000000..8b87e034692b --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc @@ -0,0 +1,209 @@ +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/protocol.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class ActiveMessageTest : public testing::Test { +public: + ActiveMessageTest() + : stats_(RocketmqFilterStats::generateStats("test.", store_)), + config_(rocketmq_proxy_config_, factory_context_), + connection_manager_(config_, factory_context_.dispatcher().timeSource()) { + connection_manager_.initializeReadFilterCallbacks(filter_callbacks_); + } + + ~ActiveMessageTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + +protected: + ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_; + NiceMock filter_callbacks_; + NiceMock factory_context_; + Stats::IsolatedStoreImpl store_; + RocketmqFilterStats stats_; + ConfigImpl config_; + ConnectionManager connection_manager_; +}; + +TEST_F(ActiveMessageTest, ClusterName) { + std::string json = R"EOF( + { + "opaque": 1, + "code": 35, + "version": 1, + "language": "JAVA", + "serializeTypeCurrentRPC": "JSON", + "flag": 0, + "extFields": { + "clientID": "SampleClient_01", + "producerGroup": "PG_Example_01", + "consumerGroup": "CG_001" + } + } + )EOF"; + + Buffer::OwnedImpl buffer; + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + auto cmd = Decoder::decode(buffer, underflow, has_error); + EXPECT_FALSE(underflow); + EXPECT_FALSE(has_error); + + ActiveMessage activeMessage(connection_manager_, std::move(cmd)); + EXPECT_FALSE(activeMessage.metadata()->hasTopicName()); +} + +TEST_F(ActiveMessageTest, FillBrokerData) { + + std::unordered_map address; + address.emplace(0, "1.2.3.4:10911"); + BrokerData broker_data("DefaultCluster", "broker-a", std::move(address)); + + std::vector list; + list.push_back(broker_data); + + ActiveMessage::fillBrokerData(list, "DefaultCluster", "broker-a", 1, "localhost:10911"); + ActiveMessage::fillBrokerData(list, "DefaultCluster", "broker-a", 0, "localhost:10911"); + EXPECT_EQ(1, list.size()); + for (auto& it : list) { + auto& address = it.brokerAddresses(); + EXPECT_EQ(2, address.size()); + EXPECT_STREQ("1.2.3.4:10911", address[0].c_str()); + } +} + +TEST_F(ActiveMessageTest, FillAckMessageDirectiveSuccess) { + RemotingCommandPtr cmd = std::make_unique(); + ActiveMessage active_message(connection_manager_, std::move(cmd)); + + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(98); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V1)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + AckMessageDirective directive("broker-a", 0, connection_manager_.timeSource().monotonicTime()); + const std::string group = "Group"; + active_message.fillAckMessageDirective(buffer, group, topic, directive); + + const std::string fake_topic = "FakeTopic"; + active_message.fillAckMessageDirective(buffer, group, fake_topic, directive); + + EXPECT_EQ(connection_manager_.getAckDirectiveTableForTest().size(), 1); +} + +TEST_F(ActiveMessageTest, RecordPopRouteInfo) { + auto host_description = new NiceMock(); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + + std::string broker_name = "broker-a"; + int32_t broker_id = 0; + + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue(broker_name); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(broker_id); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + + EXPECT_CALL(*host_description, metadata()).WillRepeatedly(Return(metadata)); + + Upstream::HostDescriptionConstSharedPtr host_description_ptr(host_description); + + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr cmd = Decoder::decode(buffer, underflow, has_error); + ActiveMessage active_message(connection_manager_, std::move(cmd)); + active_message.recordPopRouteInfo(host_description_ptr); + auto custom_header = active_message.downstreamRequest()->typedCustomHeader(); + EXPECT_EQ(custom_header->targetBrokerName(), broker_name); + EXPECT_EQ(custom_header->targetBrokerId(), broker_id); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc new file mode 100644 index 000000000000..08d8dd5021a1 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc @@ -0,0 +1,797 @@ +#include "common/network/address_impl.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/codec.h" + +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class RocketmqCodecTest : public testing::Test { +public: + RocketmqCodecTest() = default; + ~RocketmqCodecTest() override = default; +}; + +TEST_F(RocketmqCodecTest, DecodeWithMinFrameSize) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_TRUE(underflow); + EXPECT_FALSE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeWithOverMaxFrameSizeData) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x40', '\x00', '\x01'})); + buffer.add(std::string({'\x00', '\x20', '\x00', '\x00', '\x00'})); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeUnsupportHeaderSerialization) { + Buffer::OwnedImpl buffer; + std::string header = "random text suffices"; + + buffer.writeBEInt(4 + 4 + header.size()); + uint32_t mark = header.size(); + mark |= (1u << 24u); + buffer.writeBEInt(mark); + buffer.add(header); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(nullptr == cmd); +} + +TEST_F(RocketmqCodecTest, DecodeInvalidJson) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({a: 3)EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeCodeMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({"a": 3})EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeVersionMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF({"code": 3})EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeOpaqueMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF( + { + "code": 3, + "version": 1 + } + )EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeFlagMissing) { + Buffer::OwnedImpl buffer; + // Invalid json string. + std::string invalid_json = R"EOF( + { + "code": 3, + "version": 1, + "opaque": 1 + } + )EOF"; + + buffer.writeBEInt(4 + 4 + invalid_json.size()); + buffer.writeBEInt(invalid_json.size()); + buffer.add(invalid_json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_TRUE(has_error); + EXPECT_TRUE(cmd == nullptr); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessage) { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow || has_error); + EXPECT_EQ(request->opaque(), BufferUtility::opaque_); + Buffer::Instance& body = request->body(); + EXPECT_EQ(body.toString(), BufferUtility::msg_body_); + + auto header = request->typedCustomHeader(); + + EXPECT_EQ(header->topic(), BufferUtility::topic_name_); + EXPECT_EQ(header->version(), SendMessageRequestVersion::V1); + EXPECT_EQ(header->queueId(), -1); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2) { + Buffer::OwnedImpl buffer; + + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow || has_error); + EXPECT_EQ(request->opaque(), BufferUtility::opaque_); + + Buffer::Instance& body = request->body(); + + EXPECT_EQ(body.toString(), BufferUtility::msg_body_); + + auto header = request->typedCustomHeader(); + + EXPECT_EQ(header->topic(), BufferUtility::topic_name_); + EXPECT_EQ(header->version(), SendMessageRequestVersion::V2); + EXPECT_EQ(header->queueId(), -1); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV1) { + std::string json = R"EOF( + { + "code": 10, + "version": 1, + "opaque": 1, + "flag": 0, + "extFields": { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false, + "properties": "mock_properties", + "maxReconsumeTimes": 32 + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = Decoder::decode(buffer, underflow, has_error); + + EXPECT_FALSE(underflow); + EXPECT_FALSE(has_error); + EXPECT_TRUE(nullptr != cmd); + EXPECT_EQ(10, cmd->code()); + EXPECT_EQ(1, cmd->version()); + EXPECT_EQ(1, cmd->opaque()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemError) { + std::string json = R"EOF( + { + "code": 1, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "System error", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("System error", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithSystemBusy) { + std::string json = R"EOF( + { + "code": 2, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "System busy", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("System busy", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseWithCodeNotSupported) { + std::string json = R"EOF( + { + "code": 3, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "Code not supported", + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("Code not supported", cmd->remark().c_str()); + EXPECT_TRUE(nullptr == cmd->customHeader()); +} + +TEST_F(RocketmqCodecTest, DecodeSendMessageResponseNormal) { + std::string json = R"EOF( + { + "code": 0, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "OK", + "serializeTypeCurrentRPC": "JSON", + "extFields": { + "msgId": "A001", + "queueId": "10", + "queueOffset": "2", + "transactionId": "" + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::SendMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("OK", cmd->remark().c_str()); + EXPECT_TRUE(nullptr != cmd->customHeader()); + + auto extHeader = cmd->typedCustomHeader(); + + EXPECT_STREQ("A001", extHeader->msgId().c_str()); + EXPECT_EQ(10, extHeader->queueId()); + EXPECT_EQ(2, extHeader->queueOffset()); +} + +TEST_F(RocketmqCodecTest, DecodePopMessageResponseNormal) { + std::string json = R"EOF( + { + "code": 0, + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "remark": "OK", + "serializeTypeCurrentRPC": "JSON", + "extFields": { + "popTime": "1234", + "invisibleTime": "10", + "reviveQid": "2", + "restNum": "10", + "startOffsetInfo": "3", + "msgOffsetInfo": "mock_msg_offset_info", + "orderCountInfo": "mock_order_count_info" + } + } + )EOF"; + Buffer::OwnedImpl buffer; + + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + bool underflow = false; + bool has_error = false; + + auto cmd = + Decoder::decode(buffer, underflow, has_error, static_cast(RequestCode::PopMessage)); + + EXPECT_FALSE(has_error); + EXPECT_FALSE(underflow); + EXPECT_TRUE(nullptr != cmd); + EXPECT_STREQ("JAVA", cmd->language().c_str()); + EXPECT_STREQ("JSON", cmd->serializeTypeCurrentRPC().c_str()); + EXPECT_STREQ("OK", cmd->remark().c_str()); + EXPECT_TRUE(nullptr != cmd->customHeader()); + + auto extHeader = cmd->typedCustomHeader(); + + EXPECT_EQ(1234, extHeader->popTimeForTest()); + EXPECT_EQ(10, extHeader->invisibleTime()); + EXPECT_EQ(2, extHeader->reviveQid()); + EXPECT_EQ(10, extHeader->restNum()); + EXPECT_STREQ("3", extHeader->startOffsetInfo().c_str()); + EXPECT_STREQ("mock_msg_offset_info", extHeader->msgOffsetInfo().c_str()); + EXPECT_STREQ("mock_order_count_info", extHeader->orderCountInfo().c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeRequestSendMessageV2underflow) { + Buffer::OwnedImpl buffer; + + buffer.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + + std::string header_json = R"EOF( + { + "code": 310, + "extFields": { + "a": "GID_LINGCHU_TEST_0" + } + )EOF"; + + buffer.add(header_json); + buffer.add(std::string{"_Apache_RocketMQ_"}); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + EXPECT_EQ(underflow, true); + EXPECT_EQ(has_error, false); +} + +TEST_F(RocketmqCodecTest, EncodeResponseSendMessageSuccess) { + const int version = 285; + const int opaque = 4; + const std::string msg_id = "1E05789ABD1F18B4AAC2895B8BE60003"; + + RemotingCommandPtr response = + std::make_unique(static_cast(ResponseCode::Success), version, opaque); + + response->markAsResponse(); + + const int queue_id = 0; + const int queue_offset = 0; + + std::unique_ptr sendMessageResponseHeader = + std::make_unique(msg_id, queue_id, queue_offset, EMPTY_STRING); + CommandCustomHeaderPtr extHeader(sendMessageResponseHeader.release()); + response->customHeader(extHeader); + + Buffer::OwnedImpl response_buffer; + Encoder::encode(response, response_buffer); + + uint32_t frame_length = response_buffer.peekBEInt(); + uint32_t header_length = + response_buffer.peekBEInt(Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE); + + EXPECT_EQ(header_length + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE, frame_length); + + std::unique_ptr header_data = std::make_unique(header_length); + const uint32_t frame_header_content_offset = + Decoder::FRAME_LENGTH_FIELD_SIZE + Decoder::FRAME_HEADER_LENGTH_FIELD_SIZE; + response_buffer.copyOut(frame_header_content_offset, header_length, header_data.get()); + std::string header_json(header_data.get(), header_length); + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(header_json, doc); + const auto& members = doc.fields(); + + EXPECT_EQ(members.at("code").number_value(), 0); + EXPECT_EQ(members.at("version").number_value(), version); + EXPECT_EQ(members.at("opaque").number_value(), opaque); + + const auto& extFields = members.at("extFields").struct_value().fields(); + + EXPECT_EQ(extFields.at("msgId").string_value(), msg_id); + EXPECT_EQ(extFields.at("queueId").number_value(), queue_id); + EXPECT_EQ(extFields.at("queueOffset").number_value(), queue_offset); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdWithIncompleteBuffer) { + Buffer::OwnedImpl buffer; + // incomplete buffer + buffer.add(std::string({'\x00'})); + + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdSuccess) { + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(16); + + for (int i = 0; i < 3; i++) { + buffer.writeBEInt(i); + } + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), 2); +} + +TEST_F(RocketmqCodecTest, DecodeQueueIdFailure) { + Buffer::OwnedImpl buffer; + buffer.writeBEInt(128); + + // Some random data, but incomplete frame + buffer.writeBEInt(12); + + EXPECT_EQ(Decoder::decodeQueueId(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeQueueOffsetSuccess) { + Buffer::OwnedImpl buffer; + // frame length + buffer.writeBEInt(28); + + // frame data + for (int i = 0; i < 4; i++) { + buffer.writeBEInt(i); + } + // write queue offset which takes up 8 bytes + buffer.writeBEInt(4); + + EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), 4); +} + +TEST_F(RocketmqCodecTest, DecodeQueueOffsetFailure) { + Buffer::OwnedImpl buffer; + + // Define length of the frame as 128 bytes + buffer.writeBEInt(128); + + // some random data, just make sure the frame is incomplete + for (int i = 0; i < 6; i++) { + buffer.writeBEInt(i); + } + + EXPECT_EQ(Decoder::decodeQueueOffset(buffer, 0), -1); +} + +TEST_F(RocketmqCodecTest, DecodeMsgIdSuccess) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(64); + + // magic code + buffer.writeBEInt(0); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), false); +} + +TEST_F(RocketmqCodecTest, DecodeMsgIdFailure) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(101); + + // magic code + buffer.writeBEInt(0); + EXPECT_EQ(Decoder::decodeMsgId(buffer, 0).empty(), true); +} + +TEST_F(RocketmqCodecTest, DecodeTopicSuccessV1) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(98); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V1)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeTopicSuccessV2) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(99); + + // magic code + buffer.writeBEInt(enumToSignedInt(MessageVersion::V2)); + + // body CRC + buffer.writeBEInt(1); + + // queue Id + buffer.writeBEInt(2); + + // flag + buffer.writeBEInt(3); + + // queue offset + buffer.writeBEInt(4); + + // physical offset + buffer.writeBEInt(5); + + // system flag + buffer.writeBEInt(6); + + // born timestamp + buffer.writeBEInt(7); + + // born host + buffer.writeBEInt(8); + + // born host port + buffer.writeBEInt(9); + + // store timestamp + buffer.writeBEInt(10); + + // store host address ip:port --> long + Network::Address::Ipv4Instance host_address("127.0.0.1", 10911); + const sockaddr_in* sock_addr = reinterpret_cast(host_address.sockAddr()); + buffer.writeBEInt(sock_addr->sin_addr.s_addr); + buffer.writeBEInt(sock_addr->sin_port); + + // re-consume times + buffer.writeBEInt(11); + + // transaction offset + buffer.writeBEInt(12); + + // body size + buffer.writeBEInt(0); + + const std::string topic = "TopicTest"; + + // topic length + buffer.writeBEInt(topic.length()); + + // topic data + buffer.add(topic); + + EXPECT_STREQ(Decoder::decodeTopic(buffer, 0).c_str(), topic.c_str()); +} + +TEST_F(RocketmqCodecTest, DecodeTopicFailure) { + Buffer::OwnedImpl buffer; + + // frame length + buffer.writeBEInt(64); + + // magic code + buffer.writeBEInt(0); + EXPECT_EQ(Decoder::decodeTopic(buffer, 0).empty(), true); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/config_test.cc b/test/extensions/filters/network/rocketmq_proxy/config_test.cc new file mode 100644 index 000000000000..af4d5ef745e4 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/config_test.cc @@ -0,0 +1,170 @@ +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" + +#include "extensions/filters/network/rocketmq_proxy/config.h" + +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/registry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +using RocketmqProxyProto = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + +RocketmqProxyProto parseRocketmqProxyFromV2Yaml(const std::string& yaml) { + RocketmqProxyProto rocketmq_proxy; + TestUtility::loadFromYaml(yaml, rocketmq_proxy); + return rocketmq_proxy; +} + +class RocketmqFilterConfigTestBase { +public: + void testConfig(RocketmqProxyProto& config) { + Network::FilterFactoryCb cb; + EXPECT_NO_THROW({ cb = factory_.createFilterFactoryFromProto(config, context_); }); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); + } + + NiceMock context_; + RocketmqProxyFilterConfigFactory factory_; +}; + +class RocketmqFilterConfigTest : public RocketmqFilterConfigTestBase, public testing::Test { +public: + ~RocketmqFilterConfigTest() override = default; +}; + +TEST_F(RocketmqFilterConfigTest, ValidateFail) { + NiceMock context; + EXPECT_THROW( + RocketmqProxyFilterConfigFactory().createFilterFactoryFromProto( + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy(), context), + ProtoValidationException); +} + +TEST_F(RocketmqFilterConfigTest, ValidProtoConfiguration) { + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config{}; + config.set_stat_prefix("my_stat_prefix"); + NiceMock context; + RocketmqProxyFilterConfigFactory factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + +TEST_F(RocketmqFilterConfigTest, RocketmqProxyWithEmptyProto) { + NiceMock context; + RocketmqProxyFilterConfigFactory factory; + envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy config = + *dynamic_cast( + factory.createEmptyConfigProto().get()); + config.set_stat_prefix("my_stat_prefix"); + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(config, context); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + +TEST_F(RocketmqFilterConfigTest, RocketmqProxyWithFullConfig) { + const std::string yaml = R"EOF( + stat_prefix: rocketmq_incomming_stats + develop_mode: true + transient_object_life_span: + seconds: 30 + )EOF"; + RocketmqProxyProto config = parseRocketmqProxyFromV2Yaml(yaml); + testConfig(config); +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddress) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(1234)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("1.2.3.4:1234", configImpl.proxyAddress().c_str()); + delete ip; +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddressWithDefaultPort) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("1.2.3.4:10000", configImpl.proxyAddress().c_str()); + delete ip; +} + +TEST_F(RocketmqFilterConfigTest, ProxyAddressWithNonIpType) { + NiceMock context; + Server::Configuration::MockServerFactoryContext factory_context; + EXPECT_CALL(context, getServerFactoryContext()).WillRepeatedly(ReturnRef(factory_context)); + + LocalInfo::MockLocalInfo local_info; + EXPECT_CALL(factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Pipe)); + + Network::MockIp* ip = new Network::MockIp(); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(ip)); + + std::string address("1.2.3.4"); + EXPECT_CALL(*ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(*ip, port()).WillRepeatedly(Return(0)); + ConfigImpl::RocketmqProxyConfig proxyConfig; + ConfigImpl configImpl(proxyConfig, context); + + EXPECT_STREQ("physical", configImpl.proxyAddress().c_str()); + delete ip; +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc new file mode 100644 index 000000000000..46f3af3adef3 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc @@ -0,0 +1,690 @@ +#include "envoy/network/connection.h" + +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/common/upstream/utility.h" +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/network/connection.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +using ConfigRocketmqProxy = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; + +class TestConfigImpl : public ConfigImpl { +public: + TestConfigImpl(RocketmqProxyConfig config, Server::Configuration::MockFactoryContext& context, + RocketmqFilterStats& stats) + : ConfigImpl(config, context), stats_(stats) {} + + RocketmqFilterStats& stats() override { return stats_; } + +private: + RocketmqFilterStats stats_; +}; + +class RocketmqConnectionManagerTest : public testing::Test { +public: + RocketmqConnectionManagerTest() : stats_(RocketmqFilterStats::generateStats("test.", store_)) {} + + ~RocketmqConnectionManagerTest() override { + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + } + + void initializeFilter() { initializeFilter(""); } + + void initializeFilter(const std::string& yaml) { + if (!yaml.empty()) { + TestUtility::loadFromYaml(yaml, proto_config_); + TestUtility::validate(proto_config_); + } + config_ = std::make_unique(proto_config_, factory_context_, stats_); + conn_manager_ = + std::make_unique(*config_, factory_context_.dispatcher().timeSource()); + conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); + conn_manager_->onNewConnection(); + current_ = factory_context_.dispatcher().timeSource().monotonicTime(); + } + + void initializeCluster() { + Upstream::HostVector hosts; + hosts.emplace_back(host_); + priority_set_.updateHosts( + 1, + Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), + Upstream::HostsPerLocalityImpl::empty()), + nullptr, hosts, {}, 100); + ON_CALL(thread_local_cluster_, prioritySet()).WillByDefault(ReturnRef(priority_set_)); + EXPECT_CALL(factory_context_.cluster_manager_, get(_)) + .WillRepeatedly(Return(&thread_local_cluster_)); + } + + NiceMock factory_context_; + Stats::TestUtil::TestStore store_; + RocketmqFilterStats stats_; + ConfigRocketmqProxy proto_config_; + + std::unique_ptr config_; + + Buffer::OwnedImpl buffer_; + NiceMock filter_callbacks_; + std::unique_ptr conn_manager_; + + Encoder encoder_; + Decoder decoder_; + + MonotonicTime current_; + + std::shared_ptr cluster_info_{ + new NiceMock()}; + Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_info_, "tcp://127.0.0.1:80")}; + Upstream::PrioritySetImpl priority_set_; + NiceMock thread_local_cluster_; +}; + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeat) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDecodeError) { + initializeFilter(); + + std::string json = R"EOF( + { + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + + buffer_.writeBEInt(4 + 4 + json.size()); + buffer_.writeBEInt(json.size()); + buffer_.add(json); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithInvalidBodyJson) { + initializeFilter(); + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(RequestCode::HeartBeat)); + std::string heartbeat_data = R"EOF({"clientID": "127})EOF"; + cmd->body().add(heartbeat_data); + encoder_.encode(cmd, buffer_); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithBodyJsonLackofClientId) { + initializeFilter(); + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(RequestCode::HeartBeat)); + std::string heartbeat_data = R"EOF( + { + "consumerDataSet": [{}] + } + )EOF"; + cmd->body().add(heartbeat_data); + encoder_.encode(cmd, buffer_); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(0U, store_.counter("test.request_decoding_error").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.1@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.at("test_cg").empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButExpired) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_TRUE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithGroupMembersMapExistsButLackOfClientID) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.at("test_cg").empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithDownstreamConnecitonClosed) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + NiceMock connection; + EXPECT_CALL(connection, state()).Times(1).WillOnce(Invoke([&]() -> Network::Connection::State { + return Network::Connection::State::Closed; + })); + EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(Invoke([&]() -> Network::Connection& { + return connection; + })); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnHeartbeatWithPurgeDirectiveTable) { + initializeFilter(); + + std::string broker_name = "broker_name"; + int32_t broker_id = 0; + std::chrono::milliseconds delay_0(31 * 1000); + AckMessageDirective directive_0(broker_name, broker_id, + conn_manager_->timeSource().monotonicTime() - delay_0); + std::string directive_key_0 = "key_0"; + conn_manager_->insertAckDirective(directive_key_0, directive_0); + + std::chrono::milliseconds delay_1(29 * 1000); + AckMessageDirective directive_1(broker_name, broker_id, + conn_manager_->timeSource().monotonicTime() - delay_1); + std::string directive_key_1 = "key_1"; + conn_manager_->insertAckDirective(directive_key_1, directive_1); + + EXPECT_EQ(2, conn_manager_->getAckDirectiveTableForTest().size()); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::HeartBeat); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.heartbeat").value()); + + EXPECT_EQ(1, conn_manager_->getAckDirectiveTableForTest().size()); + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClient) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("test_client_id", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnUnregisterClientWithGroupMembersMapExistsButExpired) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_TRUE(group_member.expired()); + EXPECT_TRUE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, + OnUnregisterClientWithGroupMembersMapExistsButLackOfClientID) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::UnregisterClient); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.unregister").value()); + EXPECT_FALSE(group_member.expired()); + EXPECT_FALSE(group_members_map.empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRoute) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue("broker-a"); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + host_->metadata(metadata); + initializeCluster(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutRoutes) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_another_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteWithoutCluster) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + EXPECT_CALL(factory_context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr)); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetTopicRouteInDevelopMode) { + const std::string yaml = R"EOF( +stat_prefix: test +develop_mode: true +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + NiceMock server_factory_context; + NiceMock local_info; + NiceMock ip; + std::shared_ptr instance = + std::make_shared("logical", "physical"); + EXPECT_CALL(factory_context_, getServerFactoryContext()) + .WillRepeatedly(ReturnRef(server_factory_context)); + EXPECT_CALL(server_factory_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + EXPECT_CALL(local_info, address()).WillRepeatedly(Return(instance)); + EXPECT_CALL(*instance, type()).WillRepeatedly(Return(Network::Address::Type::Ip)); + EXPECT_CALL(*instance, ip()).WillRepeatedly(testing::Return(&ip)); + const std::string address{"1.2.3.4"}; + EXPECT_CALL(ip, addressAsString()).WillRepeatedly(ReturnRef(address)); + EXPECT_CALL(ip, port()).WillRepeatedly(Return(1234)); + initializeFilter(yaml); + + auto metadata = std::make_shared(); + ProtobufWkt::Struct topic_route_data; + auto* fields = topic_route_data.mutable_fields(); + (*fields)[RocketmqConstants::get().ReadQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().WriteQueueNum] = ValueUtil::numberValue(4); + (*fields)[RocketmqConstants::get().ClusterName] = ValueUtil::stringValue("DefaultCluster"); + (*fields)[RocketmqConstants::get().BrokerName] = ValueUtil::stringValue("broker-a"); + (*fields)[RocketmqConstants::get().BrokerId] = ValueUtil::numberValue(0); + (*fields)[RocketmqConstants::get().Perm] = ValueUtil::numberValue(6); + metadata->mutable_filter_metadata()->insert(Protobuf::MapPair( + NetworkFilterNames::get().RocketmqProxy, topic_route_data)); + host_->metadata(metadata); + initializeCluster(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetRouteInfoByTopic); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_topic_route").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroup) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_consumer_list").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnGetConsumerListByGroupWithGroupMemberMapExists) { + initializeFilter(); + + auto& group_members_map = conn_manager_->groupMembersForTest(); + std::vector group_members; + ConsumerGroupMember group_member("127.0.0.2@90330", *conn_manager_); + group_member.setLastForTest(current_ - std::chrono::seconds(31)); + group_members.emplace_back(group_member); + group_members_map["test_cg"] = group_members; + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::GetConsumerListByGroup); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.get_consumer_list").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnPopMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::PopMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.pop_message").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnAckMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::AckMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.ack_message").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnData) { + initializeFilter(); + + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0, buffer_.length()); + EXPECT_EQ(0U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithEndStream) { + initializeFilter(); + + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::SendMessageV2); + bool underflow, has_error; + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + conn_manager_->createActiveMessage(request); + EXPECT_EQ(1, conn_manager_->activeMessageList().size()); + conn_manager_->onData(buffer_, true); + EXPECT_TRUE(conn_manager_->activeMessageList().empty()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithMinFrameSize) { + initializeFilter(); + + buffer_.add(std::string({'\x00', '\x00', '\x01', '\x8b'})); + buffer_.add(std::string({'\x00', '\x00', '\x01', '\x76'})); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(0U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataSendMessage) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessage); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.send_message_v1").value()); + EXPECT_EQ( + 1U, + store_.gauge("test.send_message_v1_active", Stats::Gauge::ImportMode::Accumulate).value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataSendMessageV2) { + const std::string yaml = R"EOF( +stat_prefix: test +route_config: + name: default_route + routes: + - match: + topic: + exact: test_topic + route: + cluster: fake_cluster +)EOF"; + initializeFilter(yaml); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::SendMessageV2); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.send_message_v2").value()); + EXPECT_EQ( + 1U, + store_.gauge("test.send_message_v2_active", Stats::Gauge::ImportMode::Accumulate).value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, OnDataWithUnsupportedCode) { + initializeFilter(); + + BufferUtility::fillRequestBuffer(buffer_, RequestCode::Unsupported); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberEqual) { + initializeFilter(); + + ConsumerGroupMember m1("abc", *conn_manager_); + ConsumerGroupMember m2("abc", *conn_manager_); + EXPECT_TRUE(m1 == m2); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberLessThan) { + initializeFilter(); + + ConsumerGroupMember m1("abc", *conn_manager_); + ConsumerGroupMember m2("def", *conn_manager_); + EXPECT_TRUE(m1 < m2); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberExpired) { + initializeFilter(); + + ConsumerGroupMember member("Mock", *conn_manager_); + EXPECT_FALSE(member.expired()); + EXPECT_STREQ("Mock", member.clientId().data()); +} + +TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberRefresh) { + initializeFilter(); + + ConsumerGroupMember member("Mock", *conn_manager_); + EXPECT_FALSE(member.expired()); + member.setLastForTest(current_ - std::chrono::seconds(31)); + EXPECT_TRUE(member.expired()); + member.refresh(); + EXPECT_FALSE(member.expired()); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.cc b/test/extensions/filters/network/rocketmq_proxy/mocks.cc new file mode 100644 index 000000000000..d346364491d7 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/mocks.cc @@ -0,0 +1,57 @@ +#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" + +#include "extensions/filters/network/rocketmq_proxy/router/router_impl.h" + +#include "gtest/gtest.h" + +using testing::_; +using testing::ByMove; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +MockActiveMessage::MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request) + : ActiveMessage(conn_manager, std::move(request)) { + route_ = std::make_shared>(); + + ON_CALL(*this, onError(_)).WillByDefault(Invoke([&](absl::string_view error_message) { + ActiveMessage::onError(error_message); + })); + ON_CALL(*this, onReset()).WillByDefault(Return()); + ON_CALL(*this, sendResponseToDownstream()).WillByDefault(Invoke([&]() { + ActiveMessage::sendResponseToDownstream(); + })); + ON_CALL(*this, metadata()).WillByDefault(Invoke([&]() { return ActiveMessage::metadata(); })); + ON_CALL(*this, route()).WillByDefault(Return(route_)); +} +MockActiveMessage::~MockActiveMessage() = default; + +MockConfig::MockConfig() : stats_(RocketmqFilterStats::generateStats("test.", store_)) { + ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, createRouter()) + .WillByDefault(Return(ByMove(std::make_unique(cluster_manager_)))); + ON_CALL(*this, developMode()).WillByDefault(Return(false)); + ON_CALL(*this, proxyAddress()).WillByDefault(Return(std::string{"1.2.3.4:1234"})); +} + +namespace Router { + +MockRouteEntry::MockRouteEntry() { + ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_)); +} + +MockRouteEntry::~MockRouteEntry() = default; + +MockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); } +MockRoute::~MockRoute() = default; + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.h b/test/extensions/filters/network/rocketmq_proxy/mocks.h new file mode 100644 index 000000000000..a6cc6a05dd4c --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/mocks.h @@ -0,0 +1,89 @@ +#pragma once + +#include "extensions/filters/network/rocketmq_proxy/active_message.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +namespace Router { +class MockRoute; +} // namespace Router + +class MockActiveMessage : public ActiveMessage { +public: + MockActiveMessage(ConnectionManager& conn_manager, RemotingCommandPtr&& request); + ~MockActiveMessage() override; + + MOCK_METHOD(void, createFilterChain, ()); + MOCK_METHOD(void, sendRequestToUpstream, ()); + MOCK_METHOD(RemotingCommandPtr&, downstreamRequest, ()); + MOCK_METHOD(void, sendResponseToDownstream, ()); + MOCK_METHOD(void, onQueryTopicRoute, ()); + MOCK_METHOD(void, onError, (absl::string_view)); + MOCK_METHOD(ConnectionManager&, connectionManager, ()); + MOCK_METHOD(void, onReset, ()); + MOCK_METHOD(bool, onUpstreamData, + (Buffer::Instance&, bool, Tcp::ConnectionPool::ConnectionDataPtr&)); + MOCK_METHOD(MessageMetadataSharedPtr, metadata, (), (const)); + MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + + std::shared_ptr route_; +}; + +class MockConfig : public Config { +public: + MockConfig(); + ~MockConfig() override = default; + + MOCK_METHOD(RocketmqFilterStats&, stats, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Router::RouterPtr, createRouter, ()); + MOCK_METHOD(bool, developMode, (), (const)); + MOCK_METHOD(std::string, proxyAddress, ()); + MOCK_METHOD(Router::Config&, routerConfig, ()); + +private: + Stats::IsolatedStoreImpl store_; + RocketmqFilterStats stats_; + NiceMock cluster_manager_; + Router::RouterPtr router_; +}; + +namespace Router { + +class MockRouteEntry : public RouteEntry { +public: + MockRouteEntry(); + ~MockRouteEntry() override; + + // RocketmqProxy::Router::RouteEntry + MOCK_METHOD(const std::string&, clusterName, (), (const)); + MOCK_METHOD(Envoy::Router::MetadataMatchCriteria*, metadataMatchCriteria, (), (const)); + + std::string cluster_name_{"fake_cluster"}; +}; + +class MockRoute : public Route { +public: + MockRoute(); + ~MockRoute() override; + + // RocketmqProxy::Router::Route + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + + NiceMock route_entry_; +}; +} // namespace Router + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc b/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc new file mode 100644 index 000000000000..ac2aa63a0d81 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc @@ -0,0 +1,927 @@ +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class UnregisterClientRequestHeaderTest : public testing::Test { +public: + std::string client_id_{"SampleClient_01"}; + std::string producer_group_{"PG_Example_01"}; + std::string consumer_group_{"CG_001"}; +}; + +TEST_F(UnregisterClientRequestHeaderTest, Encode) { + UnregisterClientRequestHeader request_header; + request_header.clientId(client_id_); + request_header.producerGroup(producer_group_); + request_header.consumerGroup(consumer_group_); + + ProtobufWkt::Value doc; + request_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_STREQ(client_id_.c_str(), members.at("clientID").string_value().c_str()); + EXPECT_STREQ(producer_group_.c_str(), members.at("producerGroup").string_value().c_str()); + EXPECT_STREQ(consumer_group_.c_str(), members.at("consumerGroup").string_value().c_str()); +} + +TEST_F(UnregisterClientRequestHeaderTest, Decode) { + + std::string json = R"EOF( + { + "clientID": "SampleClient_01", + "producerGroup": "PG_Example_01", + "consumerGroup": "CG_001" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + UnregisterClientRequestHeader unregister_client_request_header; + unregister_client_request_header.decode(doc); + EXPECT_STREQ(client_id_.c_str(), unregister_client_request_header.clientId().c_str()); + EXPECT_STREQ(producer_group_.c_str(), unregister_client_request_header.producerGroup().c_str()); + EXPECT_STREQ(consumer_group_.c_str(), unregister_client_request_header.consumerGroup().c_str()); +} + +TEST(GetConsumerListByGroupResponseBodyTest, Encode) { + GetConsumerListByGroupResponseBody response_body; + response_body.add("localhost@1"); + response_body.add("localhost@2"); + + ProtobufWkt::Struct doc; + response_body.encode(doc); + + const auto& members = doc.fields(); + EXPECT_TRUE(members.contains("consumerIdList")); + EXPECT_EQ(2, members.at("consumerIdList").list_value().values_size()); +} + +class AckMessageRequestHeaderTest : public testing::Test { +public: + std::string consumer_group{"CG_Unit_Test"}; + std::string topic{"T_UnitTest"}; + int32_t queue_id{1}; + std::string extra_info{"extra_info_UT"}; + int64_t offset{100}; +}; + +TEST_F(AckMessageRequestHeaderTest, Encode) { + AckMessageRequestHeader ack_header; + ack_header.consumerGroup(consumer_group); + ack_header.topic(topic); + ack_header.queueId(queue_id); + ack_header.extraInfo(extra_info); + ack_header.offset(offset); + + ProtobufWkt::Value doc; + ack_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("consumerGroup")); + EXPECT_STREQ(consumer_group.c_str(), members.at("consumerGroup").string_value().c_str()); + + EXPECT_TRUE(members.contains("topic")); + EXPECT_STREQ(topic.c_str(), members.at("topic").string_value().c_str()); + + EXPECT_TRUE(members.contains("queueId")); + EXPECT_EQ(queue_id, members.at("queueId").number_value()); + + EXPECT_TRUE(members.contains("extraInfo")); + EXPECT_STREQ(extra_info.c_str(), members.at("extraInfo").string_value().c_str()); + + EXPECT_TRUE(members.contains("offset")); + EXPECT_EQ(offset, members.at("offset").number_value()); +} + +TEST_F(AckMessageRequestHeaderTest, Decode) { + std::string json = R"EOF( + { + "consumerGroup": "CG_Unit_Test", + "topic": "T_UnitTest", + "queueId": 1, + "extraInfo": "extra_info_UT", + "offset": 100 + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + AckMessageRequestHeader ack_header; + ack_header.decode(doc); + ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str()); + ASSERT_EQ(queue_id, ack_header.queueId()); + ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data()); + ASSERT_EQ(offset, ack_header.offset()); +} + +TEST_F(AckMessageRequestHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "consumerGroup": "CG_Unit_Test", + "topic": "T_UnitTest", + "queueId": "1", + "extraInfo": "extra_info_UT", + "offset": "100" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + AckMessageRequestHeader ack_header; + ack_header.decode(doc); + ASSERT_STREQ(consumer_group.c_str(), ack_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), ack_header.topic().c_str()); + ASSERT_EQ(queue_id, ack_header.queueId()); + ASSERT_STREQ(extra_info.c_str(), ack_header.extraInfo().data()); + ASSERT_EQ(offset, ack_header.offset()); +} + +class PopMessageRequestHeaderTest : public testing::Test { +public: + std::string consumer_group{"CG_UT"}; + std::string topic{"T_UT"}; + int32_t queue_id{1}; + int32_t max_msg_nums{2}; + int64_t invisible_time{3}; + int64_t poll_time{4}; + int64_t born_time{5}; + int32_t init_mode{6}; + + std::string exp_type{"exp_type_UT"}; + std::string exp{"exp_UT"}; +}; + +TEST_F(PopMessageRequestHeaderTest, Encode) { + PopMessageRequestHeader pop_request_header; + pop_request_header.consumerGroup(consumer_group); + pop_request_header.topic(topic); + pop_request_header.queueId(queue_id); + pop_request_header.maxMsgNum(max_msg_nums); + pop_request_header.invisibleTime(invisible_time); + pop_request_header.pollTime(poll_time); + pop_request_header.bornTime(born_time); + pop_request_header.initMode(init_mode); + pop_request_header.expType(exp_type); + pop_request_header.exp(exp); + + ProtobufWkt::Value doc; + pop_request_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("consumerGroup")); + EXPECT_STREQ(consumer_group.c_str(), members.at("consumerGroup").string_value().c_str()); + + EXPECT_TRUE(members.contains("topic")); + EXPECT_STREQ(topic.c_str(), members.at("topic").string_value().c_str()); + + EXPECT_TRUE(members.contains("queueId")); + EXPECT_EQ(queue_id, members.at("queueId").number_value()); + + EXPECT_TRUE(members.contains("maxMsgNums")); + EXPECT_EQ(max_msg_nums, members.at("maxMsgNums").number_value()); + + EXPECT_TRUE(members.contains("invisibleTime")); + EXPECT_EQ(invisible_time, members.at("invisibleTime").number_value()); + + EXPECT_TRUE(members.contains("pollTime")); + EXPECT_EQ(poll_time, members.at("pollTime").number_value()); + + EXPECT_TRUE(members.contains("bornTime")); + EXPECT_EQ(born_time, members.at("bornTime").number_value()); + + EXPECT_TRUE(members.contains("initMode")); + EXPECT_EQ(init_mode, members.at("initMode").number_value()); + + EXPECT_TRUE(members.contains("expType")); + EXPECT_STREQ(exp_type.c_str(), members.at("expType").string_value().c_str()); + + EXPECT_TRUE(members.contains("exp")); + EXPECT_STREQ(exp.c_str(), members.at("exp").string_value().c_str()); +} + +TEST_F(PopMessageRequestHeaderTest, Decode) { + std::string json = R"EOF( + { + "consumerGroup": "CG_UT", + "topic": "T_UT", + "queueId": 1, + "maxMsgNums": 2, + "invisibleTime": 3, + "pollTime": 4, + "bornTime": 5, + "initMode": 6, + "expType": "exp_type_UT", + "exp": "exp_UT" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + PopMessageRequestHeader pop_request_header; + pop_request_header.decode(doc); + + ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str()); + ASSERT_EQ(queue_id, pop_request_header.queueId()); + ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum()); + ASSERT_EQ(invisible_time, pop_request_header.invisibleTime()); + ASSERT_EQ(poll_time, pop_request_header.pollTime()); + ASSERT_EQ(born_time, pop_request_header.bornTime()); + ASSERT_EQ(init_mode, pop_request_header.initMode()); + ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str()); + ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str()); +} + +TEST_F(PopMessageRequestHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "consumerGroup": "CG_UT", + "topic": "T_UT", + "queueId": "1", + "maxMsgNums": "2", + "invisibleTime": "3", + "pollTime": "4", + "bornTime": "5", + "initMode": "6", + "expType": "exp_type_UT", + "exp": "exp_UT" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + PopMessageRequestHeader pop_request_header; + pop_request_header.decode(doc); + + ASSERT_STREQ(consumer_group.c_str(), pop_request_header.consumerGroup().data()); + ASSERT_STREQ(topic.c_str(), pop_request_header.topic().c_str()); + ASSERT_EQ(queue_id, pop_request_header.queueId()); + ASSERT_EQ(max_msg_nums, pop_request_header.maxMsgNum()); + ASSERT_EQ(invisible_time, pop_request_header.invisibleTime()); + ASSERT_EQ(poll_time, pop_request_header.pollTime()); + ASSERT_EQ(born_time, pop_request_header.bornTime()); + ASSERT_EQ(init_mode, pop_request_header.initMode()); + ASSERT_STREQ(exp_type.c_str(), pop_request_header.expType().c_str()); + ASSERT_STREQ(exp.c_str(), pop_request_header.exp().c_str()); +} + +class PopMessageResponseHeaderTest : public testing::Test { +public: + int64_t pop_time{1}; + int64_t invisible_time{2}; + int32_t revive_qid{3}; + int64_t rest_num{4}; + + std::string start_offset_info{"start"}; + std::string msg_offset_info{"msg"}; + std::string order_count_info{"order"}; +}; + +TEST_F(PopMessageResponseHeaderTest, Encode) { + PopMessageResponseHeader pop_response_header; + pop_response_header.popTime(pop_time); + pop_response_header.invisibleTime(invisible_time); + pop_response_header.reviveQid(revive_qid); + pop_response_header.restNum(rest_num); + pop_response_header.startOffsetInfo(start_offset_info); + pop_response_header.msgOffsetInfo(msg_offset_info); + pop_response_header.orderCountInfo(order_count_info); + + ProtobufWkt::Value doc; + pop_response_header.encode(doc); + + const auto& members = doc.struct_value().fields(); + + EXPECT_TRUE(members.contains("popTime")); + EXPECT_TRUE(members.contains("invisibleTime")); + EXPECT_TRUE(members.contains("reviveQid")); + EXPECT_TRUE(members.contains("restNum")); + EXPECT_TRUE(members.contains("startOffsetInfo")); + EXPECT_TRUE(members.contains("msgOffsetInfo")); + EXPECT_TRUE(members.contains("orderCountInfo")); + + EXPECT_EQ(pop_time, members.at("popTime").number_value()); + EXPECT_EQ(invisible_time, members.at("invisibleTime").number_value()); + EXPECT_EQ(revive_qid, members.at("reviveQid").number_value()); + EXPECT_EQ(rest_num, members.at("restNum").number_value()); + EXPECT_STREQ(start_offset_info.c_str(), members.at("startOffsetInfo").string_value().c_str()); + EXPECT_STREQ(msg_offset_info.c_str(), members.at("msgOffsetInfo").string_value().c_str()); + EXPECT_STREQ(order_count_info.c_str(), members.at("orderCountInfo").string_value().c_str()); +} + +TEST_F(PopMessageResponseHeaderTest, Decode) { + std::string json = R"EOF( + { + "popTime": 1, + "invisibleTime": 2, + "reviveQid": 3, + "restNum": 4, + "startOffsetInfo": "start", + "msgOffsetInfo": "msg", + "orderCountInfo": "order" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + PopMessageResponseHeader header; + header.decode(doc); + + EXPECT_EQ(pop_time, header.popTimeForTest()); + EXPECT_EQ(invisible_time, header.invisibleTime()); + EXPECT_EQ(revive_qid, header.reviveQid()); + EXPECT_EQ(rest_num, header.restNum()); + + EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data()); + EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data()); + EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data()); +} + +TEST_F(PopMessageResponseHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "popTime": "1", + "invisibleTime": "2", + "reviveQid": "3", + "restNum": "4", + "startOffsetInfo": "start", + "msgOffsetInfo": "msg", + "orderCountInfo": "order" + } + )EOF"; + + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + + PopMessageResponseHeader header; + header.decode(doc); + + EXPECT_EQ(pop_time, header.popTimeForTest()); + EXPECT_EQ(invisible_time, header.invisibleTime()); + EXPECT_EQ(revive_qid, header.reviveQid()); + EXPECT_EQ(rest_num, header.restNum()); + + EXPECT_STREQ(start_offset_info.c_str(), header.startOffsetInfo().data()); + EXPECT_STREQ(msg_offset_info.c_str(), header.msgOffsetInfo().data()); + EXPECT_STREQ(order_count_info.c_str(), header.orderCountInfo().data()); +} + +class SendMessageResponseHeaderTest : public testing::Test { +public: + SendMessageResponseHeader response_header_; +}; + +TEST_F(SendMessageResponseHeaderTest, Encode) { + response_header_.msgIdForTest("MSG_ID_01"); + response_header_.queueId(1); + response_header_.queueOffset(100); + response_header_.transactionId("TX_01"); + ProtobufWkt::Value doc; + response_header_.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("msgId")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("queueOffset")); + EXPECT_TRUE(members.contains("transactionId")); + + EXPECT_STREQ("MSG_ID_01", members.at("msgId").string_value().c_str()); + EXPECT_STREQ("TX_01", members.at("transactionId").string_value().c_str()); + EXPECT_EQ(1, members.at("queueId").number_value()); + EXPECT_EQ(100, members.at("queueOffset").number_value()); +} + +TEST_F(SendMessageResponseHeaderTest, Decode) { + std::string json = R"EOF( + { + "msgId": "abc", + "queueId": 1, + "queueOffset": 10, + "transactionId": "TX_1" + } + )EOF"; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + response_header_.decode(doc); + EXPECT_STREQ("abc", response_header_.msgId().c_str()); + EXPECT_EQ(1, response_header_.queueId()); + EXPECT_EQ(10, response_header_.queueOffset()); + EXPECT_STREQ("TX_1", response_header_.transactionId().c_str()); +} + +TEST_F(SendMessageResponseHeaderTest, DecodeNumSerializedAsString) { + std::string json = R"EOF( + { + "msgId": "abc", + "queueId": "1", + "queueOffset": "10", + "transactionId": "TX_1" + } + )EOF"; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + response_header_.decode(doc); + EXPECT_STREQ("abc", response_header_.msgId().c_str()); + EXPECT_EQ(1, response_header_.queueId()); + EXPECT_EQ(10, response_header_.queueOffset()); + EXPECT_STREQ("TX_1", response_header_.transactionId().c_str()); +} + +class SendMessageRequestHeaderTest : public testing::Test {}; + +TEST_F(SendMessageRequestHeaderTest, EncodeDefault) { + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("producerGroup")); + EXPECT_TRUE(members.contains("topic")); + EXPECT_TRUE(members.contains("defaultTopic")); + EXPECT_TRUE(members.contains("defaultTopicQueueNums")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("sysFlag")); + EXPECT_TRUE(members.contains("bornTimestamp")); + EXPECT_TRUE(members.contains("flag")); + EXPECT_FALSE(members.contains("properties")); + EXPECT_FALSE(members.contains("reconsumeTimes")); + EXPECT_FALSE(members.contains("unitMode")); + EXPECT_FALSE(members.contains("batch")); + EXPECT_FALSE(members.contains("maxReconsumeTimes")); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeOptional) { + SendMessageRequestHeader header; + header.properties("mock"); + header.reconsumeTimes(1); + header.unitMode(true); + header.batch(true); + header.maxReconsumeTimes(32); + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("producerGroup")); + EXPECT_TRUE(members.contains("topic")); + EXPECT_TRUE(members.contains("defaultTopic")); + EXPECT_TRUE(members.contains("defaultTopicQueueNums")); + EXPECT_TRUE(members.contains("queueId")); + EXPECT_TRUE(members.contains("sysFlag")); + EXPECT_TRUE(members.contains("bornTimestamp")); + EXPECT_TRUE(members.contains("flag")); + EXPECT_TRUE(members.contains("properties")); + EXPECT_TRUE(members.contains("reconsumeTimes")); + EXPECT_TRUE(members.contains("unitMode")); + EXPECT_TRUE(members.contains("batch")); + EXPECT_TRUE(members.contains("maxReconsumeTimes")); + + EXPECT_STREQ("mock", members.at("properties").string_value().c_str()); + EXPECT_EQ(1, members.at("reconsumeTimes").number_value()); + EXPECT_TRUE(members.at("unitMode").bool_value()); + EXPECT_TRUE(members.at("batch").bool_value()); + EXPECT_EQ(32, members.at("maxReconsumeTimes").number_value()); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeDefaultV2) { + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + header.encode(doc); + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("a")); + EXPECT_TRUE(members.contains("b")); + EXPECT_TRUE(members.contains("c")); + EXPECT_TRUE(members.contains("d")); + EXPECT_TRUE(members.contains("e")); + EXPECT_TRUE(members.contains("f")); + EXPECT_TRUE(members.contains("g")); + EXPECT_TRUE(members.contains("h")); + EXPECT_FALSE(members.contains("i")); + EXPECT_FALSE(members.contains("j")); + EXPECT_FALSE(members.contains("k")); + EXPECT_FALSE(members.contains("l")); + EXPECT_FALSE(members.contains("m")); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeOptionalV2) { + SendMessageRequestHeader header; + header.properties("mock"); + header.reconsumeTimes(1); + header.unitMode(true); + header.batch(true); + header.maxReconsumeTimes(32); + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + header.encode(doc); + + const auto& members = doc.struct_value().fields(); + EXPECT_TRUE(members.contains("a")); + EXPECT_TRUE(members.contains("b")); + EXPECT_TRUE(members.contains("c")); + EXPECT_TRUE(members.contains("d")); + EXPECT_TRUE(members.contains("e")); + EXPECT_TRUE(members.contains("f")); + EXPECT_TRUE(members.contains("g")); + EXPECT_TRUE(members.contains("h")); + EXPECT_TRUE(members.contains("i")); + EXPECT_TRUE(members.contains("j")); + EXPECT_TRUE(members.contains("k")); + EXPECT_TRUE(members.contains("l")); + EXPECT_TRUE(members.contains("m")); + + EXPECT_STREQ("mock", members.at("i").string_value().c_str()); + EXPECT_EQ(1, members.at("j").number_value()); + EXPECT_TRUE(members.at("k").bool_value()); + EXPECT_TRUE(members.at("m").bool_value()); + EXPECT_EQ(32, members.at("l").number_value()); +} + +TEST_F(SendMessageRequestHeaderTest, EncodeV3) { + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V3); + ProtobufWkt::Value doc; + header.encode(doc); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(0, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1Optional) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false, + "properties": "mock_properties", + "maxReconsumeTimes": 32 + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(32, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV1OptionalNumSerializedAsString) { + std::string json = R"EOF( + { + "batch": "false", + "bornTimestamp": "1575872212297", + "defaultTopic": "TBW102", + "defaultTopicQueueNums": "3", + "flag": "124", + "producerGroup": "FooBarGroup", + "queueId": "1", + "reconsumeTimes": "0", + "sysFlag": "0", + "topic": "FooBar", + "unitMode": "false", + "properties": "mock_properties", + "maxReconsumeTimes": "32" + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872212297, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(32, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": 3, + "e": 1, + "f": 0, + "g": 1575872563203, + "h": 124, + "j": 0, + "k": false, + "m": false + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(0, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2Optional) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": 3, + "e": 1, + "f": 0, + "g": 1575872563203, + "h": 124, + "i": "mock_properties", + "j": 0, + "k": false, + "l": 1, + "m": false + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(1, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV2OptionalNumSerializedAsString) { + std::string json = R"EOF( + { + "a": "FooBarGroup", + "b": "FooBar", + "c": "TBW102", + "d": "3", + "e": "1", + "f": "0", + "g": "1575872563203", + "h": "124", + "i": "mock_properties", + "j": "0", + "k": "false", + "l": "1", + "m": "false" + } + )EOF"; + + SendMessageRequestHeader header; + header.version(SendMessageRequestVersion::V2); + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.decode(doc); + EXPECT_STREQ("FooBar", header.topic().c_str()); + EXPECT_EQ(1, header.queueId()); + EXPECT_STREQ("FooBarGroup", header.producerGroup().c_str()); + EXPECT_STREQ("TBW102", header.defaultTopic().c_str()); + EXPECT_EQ(3, header.defaultTopicQueueNumber()); + EXPECT_EQ(0, header.sysFlag()); + EXPECT_EQ(1575872563203, header.bornTimestamp()); + EXPECT_EQ(124, header.flag()); + EXPECT_STREQ("mock_properties", header.properties().c_str()); + EXPECT_EQ(0, header.reconsumeTimes()); + EXPECT_FALSE(header.unitMode()); + EXPECT_FALSE(header.batch()); + EXPECT_EQ(1, header.maxReconsumeTimes()); +} + +TEST_F(SendMessageRequestHeaderTest, DecodeV3) { + std::string json = R"EOF( + { + "batch": false, + "bornTimestamp": 1575872212297, + "defaultTopic": "TBW102", + "defaultTopicQueueNums": 3, + "flag": 124, + "producerGroup": "FooBarGroup", + "queueId": 1, + "reconsumeTimes": 0, + "sysFlag": 0, + "topic": "FooBar", + "unitMode": false + } + )EOF"; + + SendMessageRequestHeader header; + ProtobufWkt::Value doc; + MessageUtil::loadFromJson(json, *(doc.mutable_struct_value())); + header.version(SendMessageRequestVersion::V3); + header.decode(doc); +} + +class HeartbeatDataTest : public testing::Test { +public: + HeartbeatData data_; +}; + +TEST_F(HeartbeatDataTest, Decoding) { + std::string json = R"EOF( + { + "clientID": "127.0.0.1@23606", + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_LAST_OFFSET", + "consumeType": "CONSUME_ACTIVELY", + "groupName": "please_rename_unique_group_name_4", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 0, + "tagsSet": [], + "topic": "test_topic" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + + const char* clientId = "127.0.0.1@23606"; + const char* consumerGroup = "please_rename_unique_group_name_4"; + + HeartbeatData heart_beat_data; + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(json, doc); + + heart_beat_data.decode(doc); + EXPECT_STREQ(clientId, heart_beat_data.clientId().c_str()); + EXPECT_EQ(1, heart_beat_data.consumerGroups().size()); + EXPECT_STREQ(consumerGroup, heart_beat_data.consumerGroups()[0].c_str()); +} + +TEST_F(HeartbeatDataTest, DecodeClientIdMissing) { + std::string json = R"EOF( + { + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_LAST_OFFSET", + "consumeType": "CONSUME_ACTIVELY", + "groupName": "please_rename_unique_group_name_4", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 0, + "tagsSet": [], + "topic": "test_topic" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + + ProtobufWkt::Struct doc; + MessageUtil::loadFromJson(json, doc); + EXPECT_FALSE(data_.decode(doc)); +} + +TEST_F(HeartbeatDataTest, Encode) { + data_.clientId("CID_01"); + ProtobufWkt::Struct doc; + data_.encode(doc); + const auto& members = doc.fields(); + EXPECT_TRUE(members.contains("clientID")); + EXPECT_STREQ("CID_01", members.at("clientID").string_value().c_str()); +} + +class RemotingCommandTest : public testing::Test { +public: + RemotingCommand cmd_; +}; + +TEST_F(RemotingCommandTest, FlagResponse) { + cmd_.markAsResponse(); + EXPECT_EQ(1, cmd_.flag()); +} + +TEST_F(RemotingCommandTest, FlagOneway) { + cmd_.markAsOneway(); + EXPECT_EQ(2, cmd_.flag()); +} + +TEST_F(RemotingCommandTest, Remark) { + const char* remark = "OK"; + cmd_.remark(remark); + EXPECT_STREQ(remark, cmd_.remark().c_str()); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc new file mode 100644 index 000000000000..c908602fc25e --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc @@ -0,0 +1,74 @@ +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.validate.h" + +#include "extensions/filters/network/rocketmq_proxy/metadata.h" +#include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" + +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +using RouteConfigurationProto = + envoy::extensions::filters::network::rocketmq_proxy::v3::RouteConfiguration; + +RouteConfigurationProto parseRouteConfigurationFromV2Yaml(const std::string& yaml) { + RouteConfigurationProto route_config; + TestUtility::loadFromYaml(yaml, route_config); + TestUtility::validate(route_config); + return route_config; +} + +TEST(RocketmqRouteMatcherTest, RouteWithHeaders) { + const std::string yaml = R"EOF( +name: default_route +routes: + - match: + topic: + exact: test_topic + headers: + - name: code + exact_match: '310' + route: + cluster: fake_cluster + metadata_match: + filter_metadata: + envoy.lb: + k1: v1 +)EOF"; + + RouteConfigurationProto config = parseRouteConfigurationFromV2Yaml(yaml); + + MessageMetadata metadata; + std::string topic_name = "test_topic"; + metadata.setTopicName(topic_name); + uint64_t code = 310; + metadata.headers().addCopy(Http::LowerCaseString("code"), code); + RouteMatcher matcher(config); + const Envoy::Router::MetadataMatchCriteria* criteria = + matcher.route(metadata)->routeEntry()->metadataMatchCriteria(); + const std::vector& mmc = + criteria->metadataMatchCriteria(); + + ProtobufWkt::Value v1; + v1.set_string_value("v1"); + HashedValue hv1(v1); + + EXPECT_EQ(1, mmc.size()); + EXPECT_EQ("k1", mmc[0]->name()); + EXPECT_EQ(hv1, mmc[0]->value()); +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/router_test.cc b/test/extensions/filters/network/rocketmq_proxy/router_test.cc new file mode 100644 index 000000000000..a80d837d1b10 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/router_test.cc @@ -0,0 +1,470 @@ +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "extensions/filters/network/rocketmq_proxy/router/router.h" +#include "extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "test/mocks/server/mocks.h" + +#include "gtest/gtest.h" + +using testing::_; +using testing::ContainsRegex; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { +namespace Router { + +class RocketmqRouterTestBase { +public: + RocketmqRouterTestBase() + : config_(rocketmq_proxy_config_, context_), + cluster_info_(std::make_shared()) { + conn_manager_ = + std::make_unique(config_, context_.dispatcher().timeSource()); + conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); + } + + ~RocketmqRouterTestBase() { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } + + void initializeRouter() { + router_ = std::make_unique(context_.clusterManager()); + EXPECT_EQ(nullptr, router_->downstreamConnection()); + } + + void initSendMessageRequest(std::string topic_name = "test_topic", bool is_oneway = false) { + RemotingCommandPtr request = std::make_unique(); + request->code(static_cast(RequestCode::SendMessageV2)); + if (is_oneway) { + request->flag(2); + } + SendMessageRequestHeader* header = new SendMessageRequestHeader(); + absl::string_view t = topic_name; + header->topic(t); + CommandCustomHeaderPtr custom_header(header); + request->customHeader(custom_header); + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + + // Not yet implemented: + EXPECT_EQ(nullptr, router_->metadataMatchCriteria()); + } + + void initPopMessageRequest() { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::PopMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void initAckMessageRequest() { + Buffer::OwnedImpl buffer; + BufferUtility::fillRequestBuffer(buffer, RequestCode::AckMessage); + + bool underflow = false; + bool has_error = false; + + RemotingCommandPtr request = Decoder::decode(buffer, underflow, has_error); + + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void initOneWayAckMessageRequest() { + RemotingCommandPtr request = std::make_unique(); + request->code(static_cast(RequestCode::AckMessage)); + request->flag(2); + std::unique_ptr header = std::make_unique(); + header->consumerGroup("test_cg"); + header->topic("test_topic"); + header->queueId(0); + header->extraInfo("test_extra"); + header->offset(1); + CommandCustomHeaderPtr ptr(header.release()); + request->customHeader(ptr); + active_message_ = + std::make_unique>(*conn_manager_, std::move(request)); + } + + void startRequest() { router_->sendRequestToUpstream(*active_message_); } + + void connectUpstream() { + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + } + + void startRequestWithExistingConnection() { + EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb); + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + return nullptr; + })); + router_->sendRequestToUpstream(*active_message_); + } + + void receiveEmptyResponse() { + Buffer::OwnedImpl buffer; + router_->onAboveWriteBufferHighWatermark(); + router_->onBelowWriteBufferLowWatermark(); + router_->onUpstreamData(buffer, false); + } + + void receiveSendMessageResponse(bool end_stream) { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::SendMessageV2, ResponseCode::Success); + router_->onUpstreamData(buffer, end_stream); + } + + void receivePopMessageResponse() { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::PopMessage, ResponseCode::Success); + router_->onUpstreamData(buffer, false); + } + + void receiveAckMessageResponse() { + Buffer::OwnedImpl buffer; + BufferUtility::fillResponseBuffer(buffer, RequestCode::AckMessage, ResponseCode::Success); + router_->onUpstreamData(buffer, false); + } + + NiceMock filter_callbacks_; + NiceMock context_; + ConfigImpl::RocketmqProxyConfig rocketmq_proxy_config_; + ConfigImpl config_; + std::unique_ptr conn_manager_; + + std::unique_ptr router_; + + std::unique_ptr> active_message_; + NiceMock upstream_connection_; + + std::shared_ptr cluster_info_; + NiceMock thread_local_cluster_; +}; + +class RocketmqRouterTest : public RocketmqRouterTestBase, public testing::Test {}; + +TEST_F(RocketmqRouterTest, PoolRemoteConnectionFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*remote connection failure*.")); + })); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::RemoteConnectionFailure); +} + +TEST_F(RocketmqRouterTest, PoolTimeout) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*timeout*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::Timeout); +} + +TEST_F(RocketmqRouterTest, PoolLocalConnectionFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*local connection failure*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure); +} + +TEST_F(RocketmqRouterTest, PoolOverflowFailure) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*overflow*.")); + })); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); + context_.cluster_manager_.tcp_conn_pool_.poolFailure( + Tcp::ConnectionPool::PoolFailureReason::Overflow); +} + +TEST_F(RocketmqRouterTest, ClusterMaintenanceMode) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Cluster under maintenance*.")); + })); + EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.cluster_.info_, maintenanceMode()) + .WillOnce(Return(true)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoHealthyHosts) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*No host available*.")); + })); + EXPECT_CALL(context_.cluster_manager_, tcpConnPoolForCluster("fake_cluster", _, _)) + .WillOnce(Return(nullptr)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoRouteForRequest) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*No route for current request*.")); + })); + EXPECT_CALL(*active_message_, route()).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(*active_message_, onReset()); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, NoCluster) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onReset()); + EXPECT_CALL(context_.cluster_manager_, get(_)).WillRepeatedly(Return(nullptr)); + + startRequest(); +} + +TEST_F(RocketmqRouterTest, CallWithEmptyResponse) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + receiveEmptyResponse(); +} + +TEST_F(RocketmqRouterTest, OneWayRequest) { + initializeRouter(); + initSendMessageRequest("test_topic", true); + startRequest(); + + EXPECT_CALL(*active_message_, onReset()); + + connectUpstream(); + + EXPECT_TRUE(active_message_->metadata()->isOneWay()); +} + +TEST_F(RocketmqRouterTest, ReceiveSendMessageResponse) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveSendMessageResponse(false); +} + +TEST_F(RocketmqRouterTest, ReceivePopMessageResponse) { + initializeRouter(); + initPopMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receivePopMessageResponse(); +} + +TEST_F(RocketmqRouterTest, ReceiveAckMessageResponse) { + initializeRouter(); + initAckMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveAckMessageResponse(); +} + +TEST_F(RocketmqRouterTest, OneWayAckMessage) { + initializeRouter(); + initOneWayAckMessageRequest(); + + startRequest(); + + EXPECT_CALL(*active_message_, onReset()); + + connectUpstream(); +} + +TEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithDecodeError) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Failed to decode response*.")); + })); + + EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + + startRequest(); + connectUpstream(); + std::string json = R"EOF( + { + "language": "JAVA", + "version": 2, + "opaque": 1, + "flag": 1, + "serializeTypeCurrentRPC": "JSON" + } + )EOF"; + Buffer::OwnedImpl buffer; + buffer.writeBEInt(4 + 4 + json.size()); + buffer.writeBEInt(json.size()); + buffer.add(json); + + EXPECT_CALL(*active_message_, onReset()).WillRepeatedly(Invoke([&]() -> void { + conn_manager_->deferredDelete(**conn_manager_->activeMessageList().begin()); + })); + EXPECT_CALL(*active_message_, onReset()); + + active_message_->moveIntoList(std::move(active_message_), conn_manager_->activeMessageList()); + router_->onUpstreamData(buffer, false); +} + +TEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithStreamEnd) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()); + EXPECT_CALL(*active_message_, onReset()); + + receiveSendMessageResponse(true); +} + +TEST_F(RocketmqRouterTest, UpstreamRemoteCloseMidResponse) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Connection to upstream is closed*.")); + })); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()); + + router_->onEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(RocketmqRouterTest, UpstreamLocalCloseMidResponse) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)) + .Times(1) + .WillOnce(Invoke([&](absl::string_view error_message) -> void { + EXPECT_THAT(error_message, ContainsRegex(".*Connection to upstream has been closed*.")); + })); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()); + + router_->onEvent(Network::ConnectionEvent::LocalClose); +} + +TEST_F(RocketmqRouterTest, UpstreamConnected) { + initializeRouter(); + initSendMessageRequest(); + + startRequest(); + connectUpstream(); + + EXPECT_CALL(*active_message_, sendResponseToDownstream()).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + router_->onEvent(Network::ConnectionEvent::Connected); +} + +TEST_F(RocketmqRouterTest, StartRequestWithExistingConnection) { + initializeRouter(); + initSendMessageRequest(); + + EXPECT_CALL(*active_message_, onError(_)).Times(0); + EXPECT_CALL(*active_message_, onReset()).Times(0); + + startRequestWithExistingConnection(); +} + +} // namespace Router +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc new file mode 100644 index 000000000000..a2392b0c0603 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc @@ -0,0 +1,74 @@ +#include + +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/rocketmq_proxy/topic_route.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +TEST(TopicRouteTest, Serialization) { + QueueData queue_data("broker-a", 8, 8, 6); + ProtobufWkt::Struct doc; + queue_data.encode(doc); + + const auto& members = doc.fields(); + + ASSERT_STREQ("broker-a", members.at("brokerName").string_value().c_str()); + ASSERT_EQ(queue_data.brokerName(), members.at("brokerName").string_value()); + ASSERT_EQ(queue_data.readQueueNum(), members.at("readQueueNums").number_value()); + ASSERT_EQ(queue_data.writeQueueNum(), members.at("writeQueueNums").number_value()); + ASSERT_EQ(queue_data.perm(), members.at("perm").number_value()); +} + +TEST(BrokerDataTest, Serialization) { + std::unordered_map broker_addrs; + std::string dummy_address("127.0.0.1:10911"); + for (int64_t i = 0; i < 3; i++) { + broker_addrs[i] = dummy_address; + } + std::string cluster("DefaultCluster"); + std::string broker_name("broker-a"); + BrokerData broker_data(cluster, broker_name, std::move(broker_addrs)); + + ProtobufWkt::Struct doc; + broker_data.encode(doc); + + const auto& members = doc.fields(); + + ASSERT_STREQ(cluster.c_str(), members.at("cluster").string_value().c_str()); + ASSERT_STREQ(broker_name.c_str(), members.at("brokerName").string_value().c_str()); +} + +TEST(TopicRouteDataTest, Serialization) { + TopicRouteData topic_route_data; + + for (int i = 0; i < 16; i++) { + topic_route_data.queueData().push_back(QueueData("broker-a", 8, 8, 6)); + } + + std::string cluster("DefaultCluster"); + std::string broker_name("broker-a"); + std::string dummy_address("127.0.0.1:10911"); + + for (int i = 0; i < 16; i++) { + std::unordered_map broker_addrs; + for (int64_t i = 0; i < 3; i++) { + broker_addrs[i] = dummy_address; + } + topic_route_data.brokerData().emplace_back( + BrokerData(cluster, broker_name, std::move(broker_addrs))); + } + ProtobufWkt::Struct doc; + EXPECT_NO_THROW(topic_route_data.encode(doc)); + MessageUtil::getJsonStringFromMessage(doc); +} + +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.cc b/test/extensions/filters/network/rocketmq_proxy/utility.cc new file mode 100644 index 000000000000..a44f0cd0acb3 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/utility.cc @@ -0,0 +1,240 @@ +#include "test/extensions/filters/network/rocketmq_proxy/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +const std::string BufferUtility::topic_name_ = "test_topic"; +const std::string BufferUtility::client_id_ = "test_client_id"; +const std::string BufferUtility::producer_group_ = "test_pg"; +const std::string BufferUtility::consumer_group_ = "test_cg"; +const std::string BufferUtility::extra_info_ = "test_extra"; +const std::string BufferUtility::msg_body_ = "_Apache_RocketMQ_"; +const int BufferUtility::queue_id_ = 1; +int BufferUtility::opaque_ = 0; + +void BufferUtility::fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code) { + + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(code)); + cmd->opaque(++opaque_); + + switch (code) { + case RequestCode::SendMessage: { + std::unique_ptr header = std::make_unique(); + header->topic(topic_name_); + header->version(SendMessageRequestVersion::V1); + std::string msg_body = msg_body_; + cmd->body().add(msg_body); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + } break; + + case RequestCode::HeartBeat: { + std::string heartbeat_data = R"EOF( + { + "clientID": "127.0.0.1@90330", + "consumerDataSet": [ + { + "consumeFromWhere": "CONSUME_FROM_FIRST_OFFSET", + "consumeType": "CONSUME_PASSIVELY", + "groupName": "test_cg", + "messageModel": "CLUSTERING", + "subscriptionDataSet": [ + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 1575630587925, + "tagsSet": [], + "topic": "test_topic" + }, + { + "classFilterMode": false, + "codeSet": [], + "expressionType": "TAG", + "subString": "*", + "subVersion": 1575630587945, + "tagsSet": [], + "topic": "%RETRY%please_rename_unique_group_name_4" + } + ], + "unitMode": false + } + ], + "producerDataSet": [ + { + "groupName": "CLIENT_INNER_PRODUCER" + } + ] + } + )EOF"; + cmd->body().add(heartbeat_data); + } break; + + case RequestCode::UnregisterClient: { + std::unique_ptr header = + std::make_unique(); + header->clientId(client_id_); + header->consumerGroup(consumer_group_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::GetRouteInfoByTopic: { + std::unique_ptr header = + std::make_unique(); + header->topic(topic_name_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::GetConsumerListByGroup: { + std::unique_ptr header = + std::make_unique(); + header->consumerGroup(consumer_group_); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::SendMessageV2: { + std::unique_ptr header = std::make_unique(); + header->topic(topic_name_); + header->version(SendMessageRequestVersion::V2); + header->producerGroup(producer_group_); + std::string msg_body = msg_body_; + cmd->body().add(msg_body); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::PopMessage: { + std::unique_ptr header = std::make_unique(); + header->consumerGroup(consumer_group_); + header->topic(topic_name_); + header->queueId(queue_id_); + header->maxMsgNum(32); + header->invisibleTime(6000); + header->pollTime(3000); + header->bornTime(1000); + header->initMode(4); + + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + case RequestCode::AckMessage: { + std::unique_ptr header = std::make_unique(); + header->consumerGroup(consumer_group_); + header->topic(topic_name_); + header->queueId(queue_id_); + header->extraInfo(extra_info_); + header->offset(1); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + break; + } + + default: + break; + } + Encoder encoder_; + buffer.drain(buffer.length()); + encoder_.encode(cmd, buffer); +} + +void BufferUtility::fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code, + ResponseCode resp_code) { + RemotingCommandPtr cmd = std::make_unique(); + cmd->code(static_cast(resp_code)); + cmd->opaque(opaque_); + + switch (req_code) { + case RequestCode::SendMessageV2: { + std::unique_ptr header = + std::make_unique(); + header->msgIdForTest("MSG_ID_01"); + header->queueId(1); + header->queueOffset(100); + header->transactionId("TX_01"); + break; + } + case RequestCode::PopMessage: { + std::unique_ptr header = std::make_unique(); + header->popTime(1587386521445); + header->invisibleTime(50000); + header->reviveQid(5); + std::string msg_offset_info = "0 6 147"; + header->msgOffsetInfo(msg_offset_info); + std::string start_offset_info = "0 6 147"; + header->startOffsetInfo(start_offset_info); + CommandCustomHeaderPtr ptr(header.release()); + cmd->customHeader(ptr); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\xD5'})); + cmd->body().add(std::string({'\xDA', '\xA3', '\x20', '\xA7'})); + cmd->body().add(std::string({'\x01', '\xE5', '\x9A', '\x3E'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x06'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x93'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x4A', '\xE0', '\x46'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x01', '\x71'})); + cmd->body().add(std::string({'\x97', '\x98', '\x71', '\xB6'})); + cmd->body().add(std::string({'\x0A', '\x65', '\xC4', '\x91'})); + cmd->body().add(std::string({'\x00', '\x00', '\x1A', '\xF4'})); + cmd->body().add(std::string({'\x00', '\x00', '\x01', '\x71'})); + cmd->body().add(std::string({'\x97', '\x98', '\x71', '\xAF'})); + cmd->body().add(std::string({'\x0A', '\x65', '\xC1', '\x2D'})); + cmd->body().add(std::string({'\x00', '\x00', '\x1F', '\x53'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x00'})); + cmd->body().add(std::string({'\x00', '\x00', '\x00', '\x11'})); + cmd->body().add(std::string("Hello RocketMQ 52")); + cmd->body().add(std::string({'\x04'})); + cmd->body().add(std::string("mesh")); + cmd->body().add(std::string({'\x00', '\x65'})); + cmd->body().add(std::string("TRACE_ON")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("true")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("MSG_REGION")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("DefaultRegion")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("UNIQ_KEY")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("1EE10882893E18B4AAC2664649B60034")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("WAIT")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("true")); + cmd->body().add(std::string({'\x02'})); + cmd->body().add(std::string("TAGS")); + cmd->body().add(std::string({'\x01'})); + cmd->body().add(std::string("TagA")); + cmd->body().add(std::string({'\x02'})); + break; + } + default: + break; + } + Encoder encoder_; + buffer.drain(buffer.length()); + encoder_.encode(cmd, buffer); +} +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.h b/test/extensions/filters/network/rocketmq_proxy/utility.h new file mode 100644 index 000000000000..1dc57d5f2a76 --- /dev/null +++ b/test/extensions/filters/network/rocketmq_proxy/utility.h @@ -0,0 +1,33 @@ +#pragma once + +#include "extensions/filters/network/rocketmq_proxy/config.h" +#include "extensions/filters/network/rocketmq_proxy/conn_manager.h" + +#include "test/mocks/server/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace RocketmqProxy { + +class BufferUtility { +public: + static void fillRequestBuffer(Buffer::OwnedImpl& buffer, RequestCode code); + static void fillResponseBuffer(Buffer::OwnedImpl& buffer, RequestCode req_code, + ResponseCode resp_code); + + const static std::string topic_name_; + const static std::string client_id_; + const static std::string producer_group_; + const static std::string consumer_group_; + const static std::string msg_body_; + const static std::string extra_info_; + const static int queue_id_; + static int opaque_; +}; +} // namespace RocketmqProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 2bc47abeb63f..fdeccc570b1b 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -174,6 +174,7 @@ MB MD MERCHANTABILITY MGET +MQ MSET MSVC MTLS @@ -757,6 +758,7 @@ mutexes mux muxed mysql +nameserver namespace namespaces namespacing @@ -913,6 +915,7 @@ reimplements rele releasor reloadable +remoting reparse repeatability reperform @@ -934,6 +937,7 @@ resync retriable retriggers rmdir +rocketmq rollout roundtrip rpcs From 46c5de89cd85ce0b9da41ddc4c14260b63ea2222 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 23 Apr 2020 11:00:46 -0400 Subject: [PATCH 014/909] tools: including working tree in modified_since_last_github_commit.sh diff. (#10911) This avoids proto_format.sh getting confused by uncommitted changes. Risk level: Low (tooling only) Testing: Manual Signed-off-by: Harvey Tuch --- tools/git/modified_since_last_github_commit.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/git/modified_since_last_github_commit.sh b/tools/git/modified_since_last_github_commit.sh index bbb9d388a239..e8e805ce9c97 100755 --- a/tools/git/modified_since_last_github_commit.sh +++ b/tools/git/modified_since_last_github_commit.sh @@ -4,4 +4,4 @@ declare -r BASE="$(dirname "$0")" declare -r TARGET_PATH=$1 declare -r EXTENSION=$2 -git diff --name-only $("${BASE}"/last_github_commit.sh)..HEAD | grep "\.${EXTENSION}$" +git diff --name-only $("${BASE}"/last_github_commit.sh) | grep "\.${EXTENSION}$" From 528287d096e9b9278a00681797340be2f1fe9661 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 23 Apr 2020 11:13:00 -0400 Subject: [PATCH 015/909] [tools] handle commits merged without PR in deprecated script (#10723) Some commits are merged without a PR, so the deprecated script fails. Change so that issues can still be created using the commit message and sha instead of the PR title and number. Signed-off-by: Asra Ali --- tools/deprecate_version/deprecate_version.py | 49 ++++++++++++++------ 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index aba1579734a0..6129ae585f01 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -57,9 +57,10 @@ def CreateIssues(access_token, runtime_and_pr): Args: access_token: GitHub access token (see comment at top of file). - runtime_and_pr: a list of runtime guards and the PRs they were added. + runtime_and_pr: a list of runtime guards and the PRs and commits they were added. """ - repo = github.Github(access_token).get_repo('envoyproxy/envoy') + git = github.Github(access_token) + repo = git.get_repo('envoyproxy/envoy') # Find GitHub label objects for LABELS. labels = [] @@ -70,16 +71,31 @@ def CreateIssues(access_token, runtime_and_pr): raise DeprecateVersionError('Unknown labels (expected %s, got %s)' % (LABELS, labels)) issues = [] - for runtime_guard, pr in runtime_and_pr: + for runtime_guard, pr, commit in runtime_and_pr: # Who is the author? - pr_info = repo.get_pull(pr) + if pr: + # Extract PR title, number, and author. + pr_info = repo.get_pull(pr) + change_title = pr_info.title + number = ('#%d') % pr + login = pr_info.user.login + else: + # Extract commit message, sha, and author. + # Only keep commit message title (remove description), and truncate to 50 characters. + change_title = commit.message.split('\n')[0][:50] + number = ('commit %s') % commit.hexsha + email = commit.author.email + # Use the commit author's email to search through users for their login. + search_user = git.search_users(email.split('@')[0] + " in:email") + login = search_user[0].login if search_user else None title = '%s deprecation' % (runtime_guard) - body = ('#%d (%s) introduced a runtime guarded feature. This issue ' - 'tracks source code cleanup.') % (pr, pr_info.title) + body = ('%s (%s) introduced a runtime guarded feature. This issue ' + 'tracks source code cleanup.') % (number, change_title) + print(title) print(body) - print(' >> Assigning to %s' % pr_info.user.login) + print(' >> Assigning to %s' % (login or email)) # TODO(htuch): Figure out how to do this without legacy and faster. exists = repo.legacy_search_issues('open', '"%s"' % title) or repo.legacy_search_issues( @@ -87,7 +103,7 @@ def CreateIssues(access_token, runtime_and_pr): if exists: print(' >> Issue already exists, not posting!') else: - issues.append((title, body, pr_info.user)) + issues.append((title, body, login)) if not issues: print('No features to deprecate in this release') @@ -95,22 +111,23 @@ def CreateIssues(access_token, runtime_and_pr): if GetConfirmation(): print('Creating issues...') - for title, body, user in issues: + for title, body, login in issues: try: - repo.create_issue(title, body=body, assignees=[user.login], labels=labels) + repo.create_issue(title, body=body, assignees=[login], labels=labels) except github.GithubException as e: try: - body += '\ncc @' + user.login + if login: + body += '\ncc @' + login repo.create_issue(title, body=body, labels=labels) print(('unable to assign issue %s to %s. Add them to the Envoy proxy org' - 'and assign it their way.') % (title, user.login)) + 'and assign it their way.') % (title, login)) except github.GithubException as e: print('GithubException while creating issue.') raise def GetRuntimeAndPr(): - """Returns a list of tuples of [runtime features to deprecate, PR the feature was added] + """Returns a list of tuples of [runtime features to deprecate, PR, commit the feature was added] """ repo = Repo(os.getcwd()) @@ -140,14 +157,16 @@ def GetRuntimeAndPr(): if runtime_guard == 'envoy.reloadable_features.test_feature_true': found_test_feature_true = True continue - pr = (int(re.search('\(#(\d+)\)', commit.message).group(1))) + pr_num = re.search('\(#(\d+)\)', commit.message) + # Some commits may not come from a PR (if they are part of a security point release). + pr = (int(pr_num.group(1))) if pr_num else None pr_date = date.fromtimestamp(commit.committed_date) removable = (pr_date < removal_date) # Add the runtime guard and PR to the list to file issues about. print('Flag ' + runtime_guard + ' added at ' + str(pr_date) + ' ' + (removable and 'and is safe to remove' or 'is not ready to remove')) if removable: - features_to_flip.append((runtime_guard, pr)) + features_to_flip.append((runtime_guard, pr, commit)) print('Failed to find test_feature_false. Script needs fixing') sys.exit(1) From 61d23c4f2dabaa2c7c631ac0c109dd32245a5bad Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Thu, 23 Apr 2020 13:22:46 -0400 Subject: [PATCH 016/909] Lower heap and disk space used by kafka tests (#10915) This patch either removes mocks.h related headers altogether from tests which never required this very heavyweight dependency, or simplifies the most heavyweight server/mocks.h and elects only more specific mocks. Original Revised Heap space used compiling (before/after simplifying mocks.h inclusions) 3619976 283508 //test/extensions/filters/network/kafka:request_codec_integration_test 3621112 177212 //test/extensions/filters/network/kafka:response_codec_integration_test 3631680 308996 //test/extensions/filters/network/kafka:kafka_response_parser_test 3637000 309368 //test/extensions/filters/network/kafka:kafka_request_parser_test 3734736 93464 //test/extensions/filters/network/kafka:request_codec_unit_test 3735984 342468 //test/extensions/filters/network/kafka:response_codec_unit_test 4295272 2323440 //test/extensions/filters/network/kafka/broker:filter_unit_test 4339972 932984 //test/extensions/filters/network/kafka:requests_test 4366380 945936 //test/extensions/filters/network/kafka:request_codec_request_test 4410788 1020112 //test/extensions/filters/network/kafka:responses_test 4437292 1033328 //test/extensions/filters/network/kafka:response_codec_response_test Co-authored-by: William A Rowe Jr Signed-off-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: Sunjay Bhatia --- test/extensions/filters/network/kafka/BUILD | 10 ---------- test/extensions/filters/network/kafka/broker/BUILD | 3 ++- .../filters/network/kafka/broker/filter_unit_test.cc | 2 +- .../filters/network/kafka/kafka_request_parser_test.cc | 1 - .../network/kafka/kafka_response_parser_test.cc | 1 - .../kafka/protocol/request_codec_request_test_cc.j2 | 1 - .../filters/network/kafka/protocol/requests_test_cc.j2 | 1 - .../kafka/protocol/response_codec_response_test_cc.j2 | 1 - .../network/kafka/protocol/responses_test_cc.j2 | 1 - .../network/kafka/request_codec_integration_test.cc | 1 - .../filters/network/kafka/request_codec_unit_test.cc | 1 - .../network/kafka/response_codec_integration_test.cc | 1 - .../filters/network/kafka/response_codec_unit_test.cc | 1 - 13 files changed, 3 insertions(+), 22 deletions(-) diff --git a/test/extensions/filters/network/kafka/BUILD b/test/extensions/filters/network/kafka/BUILD index 19ac80b6947f..d45e3702b0da 100644 --- a/test/extensions/filters/network/kafka/BUILD +++ b/test/extensions/filters/network/kafka/BUILD @@ -100,7 +100,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_parser_lib", - "//test/mocks/server:server_mocks", ], ) @@ -111,7 +110,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -123,7 +121,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -135,7 +132,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -146,7 +142,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -180,7 +175,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_parser_lib", - "//test/mocks/server:server_mocks", ], ) @@ -191,7 +185,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -203,7 +196,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -215,7 +207,6 @@ envoy_extension_cc_test( ":buffer_based_test_lib", ":serialization_utilities_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) @@ -226,7 +217,6 @@ envoy_extension_cc_test( deps = [ ":buffer_based_test_lib", "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - "//test/mocks/server:server_mocks", ], ) diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index 89664fb909b8..158c8a7cb27b 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -27,7 +27,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.kafka_broker", deps = [ "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", ], ) diff --git a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc index 13f4c6cd271f..51e251504ced 100644 --- a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc +++ b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/network/kafka/broker/filter.h" #include "extensions/filters/network/kafka/external/requests.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/network/mocks.h" #include "test/mocks/stats/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc b/test/extensions/filters/network/kafka/kafka_request_parser_test.cc index 42bd15436ac3..cb20b878098f 100644 --- a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc +++ b/test/extensions/filters/network/kafka/kafka_request_parser_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc b/test/extensions/filters/network/kafka/kafka_response_parser_test.cc index 4028423635c4..b905fc326f44 100644 --- a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc +++ b/test/extensions/filters/network/kafka/kafka_response_parser_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 index aa03d24ea842..4c29ff373076 100644 --- a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 @@ -14,7 +14,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 index b6af59d8962c..ec96a4d90047 100644 --- a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 @@ -7,7 +7,6 @@ #include "extensions/filters/network/kafka/request_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 index 060d1c64f370..f366452cf19f 100644 --- a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 @@ -14,7 +14,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 b/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 index f12a21f846c7..84fff592eb34 100644 --- a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 +++ b/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 @@ -7,7 +7,6 @@ #include "extensions/filters/network/kafka/response_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/request_codec_integration_test.cc b/test/extensions/filters/network/kafka/request_codec_integration_test.cc index 69e77a7f8d2b..8a7ae9b7a7a3 100644 --- a/test/extensions/filters/network/kafka/request_codec_integration_test.cc +++ b/test/extensions/filters/network/kafka/request_codec_integration_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/request_codec_unit_test.cc b/test/extensions/filters/network/kafka/request_codec_unit_test.cc index eec27d715215..9d9c0734262f 100644 --- a/test/extensions/filters/network/kafka/request_codec_unit_test.cc +++ b/test/extensions/filters/network/kafka/request_codec_unit_test.cc @@ -1,7 +1,6 @@ #include "extensions/filters/network/kafka/request_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/response_codec_integration_test.cc b/test/extensions/filters/network/kafka/response_codec_integration_test.cc index 08be46a15645..287ba9f1855a 100644 --- a/test/extensions/filters/network/kafka/response_codec_integration_test.cc +++ b/test/extensions/filters/network/kafka/response_codec_integration_test.cc @@ -2,7 +2,6 @@ #include "test/extensions/filters/network/kafka/buffer_based_test.h" #include "test/extensions/filters/network/kafka/serialization_utilities.h" -#include "test/mocks/server/mocks.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/response_codec_unit_test.cc b/test/extensions/filters/network/kafka/response_codec_unit_test.cc index 0327b1ef738c..07dc116aab06 100644 --- a/test/extensions/filters/network/kafka/response_codec_unit_test.cc +++ b/test/extensions/filters/network/kafka/response_codec_unit_test.cc @@ -1,7 +1,6 @@ #include "extensions/filters/network/kafka/response_codec.h" #include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/mocks/server/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" From 083da8472c4dedda336f00c2ddd4366da5f27db7 Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Thu, 23 Apr 2020 13:24:06 -0400 Subject: [PATCH 017/909] Add missing dependency on `assert.h` (#10918) PR #10777 introduces `NOT_REACHED_GCOVR_EXCL_LINE`, but this macro is defined in `assert.h` Signed-off-by: Teju Nareddy --- source/common/http/BUILD | 1 + source/common/http/status.cc | 2 ++ 2 files changed, 3 insertions(+) diff --git a/source/common/http/BUILD b/source/common/http/BUILD index c4e8f00ad335..4a8e95c24ba2 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -411,5 +411,6 @@ envoy_cc_library( ], deps = [ "//include/envoy/http:codes_interface", + "//source/common/common:assert_lib", ], ) diff --git a/source/common/http/status.cc b/source/common/http/status.cc index 166b154a3d2b..74c38d82d145 100644 --- a/source/common/http/status.cc +++ b/source/common/http/status.cc @@ -1,5 +1,7 @@ #include "common/http/status.h" +#include "common/common/assert.h" + #include "absl/strings/str_cat.h" namespace Envoy { From 09bf9a1ff75879986da876d0e3248630c959cf73 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Apr 2020 13:29:34 -0400 Subject: [PATCH 018/909] config: adding connect matcher (unused) (#10894) Split out from #10623 Signed-off-by: Alyssa Wilk --- .../config/route/v3/route_components.proto | 29 ++++- .../route/v4alpha/route_components.proto | 34 +++++- .../config/route/v3/route_components.proto | 51 +++++++-- .../route/v4alpha/route_components.proto | 34 +++++- include/envoy/router/BUILD | 1 + include/envoy/router/router.h | 7 ++ source/common/http/async_client_impl.cc | 2 + source/common/http/async_client_impl.h | 4 + source/common/router/config_impl.cc | 36 +++++- source/common/router/config_impl.h | 31 +++++ test/common/router/config_impl_test.cc | 107 +++++++++++++++++- test/mocks/router/mocks.cc | 1 + test/mocks/router/mocks.h | 2 + 13 files changed, 320 insertions(+), 19 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 70c52010efa0..ebb5b8a01029 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -370,7 +370,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -392,6 +392,11 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // [#not-implemented-hide:] + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + } + reserved 5, 3; reserved "regex"; @@ -420,6 +425,16 @@ message RouteMatch { // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -705,6 +720,13 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // TODO(alyssawilk) add proxy proto configuration here. + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -713,6 +735,11 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21, 10; diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index e813b632edb0..4a54ff847063 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -371,7 +371,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; @@ -393,6 +393,13 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // [#not-implemented-hide:] + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.ConnectMatcher"; + } + reserved 5, 3; reserved "regex"; @@ -421,6 +428,16 @@ message RouteMatch { // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -706,6 +723,16 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // TODO(alyssawilk) add proxy proto configuration here. + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -714,6 +741,11 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21, 10; diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 616e76af302e..631aa9af8602 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -373,7 +373,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -395,6 +395,11 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // [#not-implemented-hide:] + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + } + reserved 5; // If specified, the route is a prefix rule meaning that the prefix must @@ -419,9 +424,19 @@ message RouteMatch { // stripping. This needs more thought.] repeated HeaderMatcher headers = 6; + // [#not-implemented-hide:] + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + repeated QueryParameterMatcher query_parameters = 7; + // Indicates that prefix/path matching should be case insensitive. The default // is true. - repeated QueryParameterMatcher query_parameters = 7; + GrpcRouteMatchOptions grpc = 8; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by @@ -439,35 +454,35 @@ message RouteMatch { // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - GrpcRouteMatchOptions grpc = 8; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). TlsContextMatchOptions tls_context = 11; oneof path_specifier { option (validate.required) = true; + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + string prefix = 1; + // Specifies a set of URL query parameters on which the route should // match. The router will check the query string from the *path* header // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. - string prefix = 1; + string path = 2; // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. - string path = 2; + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; // If specified, the client tls context will be matched against the defined // match options. // // [#next-major-version: unify with RBAC] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + ConnectMatcher connect_matcher = 12; string hidden_envoy_deprecated_regex = 3 [ deprecated = true, @@ -716,6 +731,13 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // TODO(alyssawilk) add proxy proto configuration here. + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -724,6 +746,11 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21; diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index e813b632edb0..4a54ff847063 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -371,7 +371,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 12] +// [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; @@ -393,6 +393,13 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } + // [#not-implemented-hide:] + // An extensible message for matching CONNECT requests. + message ConnectMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteMatch.ConnectMatcher"; + } + reserved 5, 3; reserved "regex"; @@ -421,6 +428,16 @@ message RouteMatch { // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case insensitive. The default @@ -706,6 +723,16 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + message ConnectConfig { + // TODO(alyssawilk) add proxy proto configuration here. + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + } + // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. @@ -714,6 +741,11 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; + + // [#not-implemented-hide:] + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + ConnectConfig connect_config = 3; } reserved 12, 18, 19, 16, 22, 21, 10; diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index 6ed49171af71..b829997d24aa 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -66,6 +66,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 13032173d929..39bffffa50d1 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -11,6 +11,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/common/matchers.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" @@ -825,6 +826,12 @@ class RouteEntry : public ResponseEntry { */ virtual const UpgradeMap& upgradeMap() const PURE; + using ConnectConfig = envoy::config::route::v3::RouteAction::UpgradeConfig::ConnectConfig; + /** + * If present, informs how to handle proxying CONNECT requests on this route. + */ + virtual const absl::optional& connectConfig() const PURE; + /** * @returns the internal redirect action which should be taken on this route. */ diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index b2e40ab397f6..3ef9de94a806 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -30,6 +30,8 @@ const Config::TypedMetadataImpl AsyncStreamImpl::RouteEntryImpl::typed_metadata_({}); const AsyncStreamImpl::NullPathMatchCriterion AsyncStreamImpl::RouteEntryImpl::path_match_criterion_; +const absl::optional + AsyncStreamImpl::RouteEntryImpl::connect_config_nullopt_; const std::list AsyncStreamImpl::NullConfig::internal_only_headers_; AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 608b2188dc1b..143fd8c29541 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -268,6 +268,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { return nullptr; } + const absl::optional& connectConfig() const override { + return connect_config_nullopt_; + } bool includeAttemptCountInRequest() const override { return false; } bool includeAttemptCountInResponse() const override { return false; } @@ -292,6 +295,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, Router::RouteEntry::UpgradeMap upgrade_map_; const std::string& cluster_name_; absl::optional timeout_; + static const absl::optional connect_config_nullopt_; const std::string route_name_; }; diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 0287a5b6c974..7344c12f45d1 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -386,6 +386,12 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, if (!success) { throw EnvoyException(absl::StrCat("Duplicate upgrade ", upgrade_config.upgrade_type())); } + if (upgrade_config.upgrade_type() == Http::Headers::get().MethodValues.Connect) { + connect_config_ = upgrade_config.connect_config(); + } else if (upgrade_config.has_connect_config()) { + throw EnvoyException(absl::StrCat("Non-CONNECT upgrade type ", upgrade_config.upgrade_type(), + " has ConnectConfig")); + } } if (route.route().has_regex_rewrite()) { @@ -917,6 +923,27 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h return nullptr; } +ConnectRouteEntryImpl::ConnectRouteEntryImpl( + const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route, + Server::Configuration::ServerFactoryContext& factory_context, + ProtobufMessage::ValidationVisitor& validator) + : RouteEntryImplBase(vhost, route, factory_context, validator) {} + +void ConnectRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, + bool insert_envoy_original_path) const { + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); + finalizePathHeader(headers, path, insert_envoy_original_path); +} + +RouteConstSharedPtr ConnectRouteEntryImpl::matches(const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo&, + uint64_t random_value) const { + if (Http::HeaderUtility::isConnect(headers)) { + return clusterEntry(headers, random_value); + } + return nullptr; +} + VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& virtual_host, const ConfigImpl& global_route_config, Server::Configuration::ServerFactoryContext& factory_context, @@ -976,6 +1003,10 @@ VirtualHostImpl::VirtualHostImpl(const envoy::config::route::v3::VirtualHost& vi routes_.emplace_back(new RegexRouteEntryImpl(*this, route, factory_context, validator)); break; } + case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kConnectMatcher: { + routes_.emplace_back(new ConnectRouteEntryImpl(*this, route, factory_context, validator)); + break; + } case envoy::config::route::v3::RouteMatch::PathSpecifierCase::PATH_SPECIFIER_NOT_SET: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -1118,9 +1149,8 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHead // Check for a route that matches the request. for (const RouteEntryImplBaseConstSharedPtr& route : routes_) { - if (!headers.Path()) { - // TODO(alyssawilk) allow specifically for kConnectMatcher routes. - return nullptr; + if (!headers.Path() && !route->supportsPathlessHeaders()) { + continue; } RouteConstSharedPtr route_entry = route->matches(headers, stream_info, random_value); if (nullptr != route_entry) { diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 89add8a903e5..54ed2be8a533 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -53,6 +53,9 @@ class Matchable { virtual RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const PURE; + + // By default, matchers do not support null Path headers. + virtual bool supportsPathlessHeaders() const { return false; } }; class PerFilterConfigs { @@ -465,6 +468,7 @@ class RouteEntryImplBase : public RouteEntry, bool includeAttemptCountInResponse() const override { return vhost_.includeAttemptCountInResponse(); } + const absl::optional& connectConfig() const override { return connect_config_; } const UpgradeMap& upgradeMap() const override { return upgrade_map_; } InternalRedirectAction internalRedirectAction() const override { return internal_redirect_action_; @@ -494,6 +498,7 @@ class RouteEntryImplBase : public RouteEntry, std::string regex_rewrite_substitution_; const std::string host_rewrite_; bool include_vh_rate_limits_; + absl::optional connect_config_; RouteConstSharedPtr clusterEntry(const Http::HeaderMap& headers, uint64_t random_value) const; @@ -591,6 +596,9 @@ class RouteEntryImplBase : public RouteEntry, bool includeAttemptCountInResponse() const override { return parent_->includeAttemptCountInResponse(); } + const absl::optional& connectConfig() const override { + return parent_->connectConfig(); + } const UpgradeMap& upgradeMap() const override { return parent_->upgradeMap(); } InternalRedirectAction internalRedirectAction() const override { return parent_->internalRedirectAction(); @@ -824,6 +832,29 @@ class RegexRouteEntryImpl : public RouteEntryImplBase { std::string regex_str_; }; +/** + * Route entry implementation for CONNECT requests. + */ +class ConnectRouteEntryImpl : public RouteEntryImplBase { +public: + ConnectRouteEntryImpl(const VirtualHostImpl& vhost, const envoy::config::route::v3::Route& route, + Server::Configuration::ServerFactoryContext& factory_context, + ProtobufMessage::ValidationVisitor& validator); + + // Router::PathMatchCriterion + const std::string& matcher() const override { return EMPTY_STRING; } + PathMatchType matchType() const override { return PathMatchType::None; } + + // Router::Matchable + RouteConstSharedPtr matches(const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override; + + // Router::DirectResponseEntry + void rewritePathHeader(Http::RequestHeaderMap&, bool) const override; + + bool supportsPathlessHeaders() const override { return true; } +}; /** * Wraps the route configuration which matches an incoming request headers to a backend cluster. * This is split out mainly to help with unit testing. diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index e282f6b9ae8c..02772af8eb86 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -441,6 +441,88 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { } } +TEST_F(RouteMatcherTest, TestConnectRoutes) { + const std::string yaml = R"EOF( +virtual_hosts: +- name: connect + domains: + - bat3.com + routes: + - match: + safe_regex: + google_re2: {} + regex: "foobar" + route: + cluster: connect_break + - match: + connect_matcher: + {} + route: + cluster: connect_match + prefix_rewrite: "/rewrote" + - match: + safe_regex: + google_re2: {} + regex: ".*" + route: + cluster: connect_fallthrough +- name: connect2 + domains: + - bat4.com + routes: + - match: + connect_matcher: + {} + redirect: { path_redirect: /new_path } +- name: default + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: instant-server + timeout: 30s + virtual_clusters: + - headers: + - name: ":path" + safe_regex_match: + google_re2: {} + regex: "^/users/\\d+/location$" + - name: ":method" + exact_match: POST + name: ulu + )EOF"; + NiceMock stream_info; + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + // Connect matching + EXPECT_EQ("connect_match", + config.route(genHeaders("bat3.com", " ", "CONNECT"), 0)->routeEntry()->clusterName()); + EXPECT_EQ( + "connect_match", + config.route(genPathlessHeaders("bat3.com", "CONNECT"), 0)->routeEntry()->clusterName()); + EXPECT_EQ("connect_fallthrough", + config.route(genHeaders("bat3.com", " ", "GET"), 0)->routeEntry()->clusterName()); + + // Prefix rewrite for CONNECT with path (for HTTP/2) + { + Http::TestRequestHeaderMapImpl headers = + genHeaders("bat3.com", "/api/locations?works=true", "CONNECT"); + const RouteEntry* route = config.route(headers, 0)->routeEntry(); + route->finalizeRequestHeaders(headers, stream_info, true); + EXPECT_EQ("/rewrote?works=true", headers.get_(Http::Headers::get().Path)); + } + // Prefix rewrite for CONNECT without path (for non-crashing) + { + Http::TestRequestHeaderMapImpl headers = genPathlessHeaders("bat4.com", "CONNECT"); + const DirectResponseEntry* redirect = config.route(headers, 0)->directResponseEntry(); + ASSERT(redirect != nullptr); + redirect->rewritePathHeader(headers, true); + EXPECT_EQ("http://bat4.com/new_path", redirect->newPath(headers)); + } +} + TEST_F(RouteMatcherTest, TestRoutes) { const std::string yaml = R"EOF( virtual_hosts: @@ -696,7 +778,6 @@ TEST_F(RouteMatcherTest, TestRoutes) { exact_match: POST name: ulu )EOF"; - NiceMock stream_info; TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); @@ -6707,6 +6788,30 @@ name: RetriableStatusCodes EnvoyException, "Duplicate upgrade WebSocket"); } +TEST_F(RouteConfigurationV2, BadConnectConfig) { + const std::string yaml = R"EOF( +name: RetriableStatusCodes +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + upgrade_configs: + - upgrade_type: Websocket + connect_config: {} + enabled: false + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EnvoyException, "Non-CONNECT upgrade type Websocket has ConnectConfig"); +} + // Verifies that we're creating a new instance of the retry plugins on each call instead of always // returning the same one. TEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) { diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 5f4f8d487c1e..32e5ce7ba79e 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -97,6 +97,7 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, upgradeMap()).WillByDefault(ReturnRef(upgrade_map_)); ON_CALL(*this, hedgePolicy()).WillByDefault(ReturnRef(hedge_policy_)); ON_CALL(*this, routeName()).WillByDefault(ReturnRef(route_name_)); + ON_CALL(*this, connectConfig()).WillByDefault(ReturnRef(connect_config_)); } MockRouteEntry::~MockRouteEntry() = default; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 9ed7b8ead74b..3b2ec8ee189a 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -353,6 +353,7 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const)); MOCK_METHOD(bool, includeAttemptCountInRequest, (), (const)); MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const)); + MOCK_METHOD(const absl::optional&, connectConfig, (), (const)); MOCK_METHOD(const UpgradeMap&, upgradeMap, (), (const)); MOCK_METHOD(InternalRedirectAction, internalRedirectAction, (), (const)); MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const)); @@ -374,6 +375,7 @@ class MockRouteEntry : public RouteEntry { testing::NiceMock path_match_criterion_; envoy::config::core::v3::Metadata metadata_; UpgradeMap upgrade_map_; + absl::optional connect_config_; }; class MockDecorator : public Decorator { From 62777e87edc9f16af5c7920bb86f198585789a3d Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Thu, 23 Apr 2020 11:11:27 -0700 Subject: [PATCH 019/909] allow specifying the API version of bootstrap from the command line (#10803) Adds a --boostrap_version flag that can be used to determine which API version the bootstrap should be parsed as. Risk Level: Low Testing: UTs Docs Changes: Flag docs Release Notes: n/a Fixes #10343 Signed-off-by: Snow Pettersen Co-authored-by: Snow Pettersen --- api/envoy/admin/v3/server_info.proto | 5 +- api/envoy/admin/v4alpha/server_info.proto | 5 +- docs/root/operations/cli.rst | 9 +- .../envoy/admin/v3/server_info.proto | 5 +- .../envoy/admin/v4alpha/server_info.proto | 5 +- include/envoy/server/options.h | 6 + source/common/protobuf/utility.cc | 129 ++++++++++-------- source/common/protobuf/utility.h | 9 +- source/server/BUILD | 1 + source/server/options_impl.cc | 8 ++ source/server/options_impl.h | 2 + source/server/server.cc | 36 ++++- test/common/protobuf/utility_test.cc | 26 ++++ test/mocks/server/mocks.cc | 1 + test/mocks/server/mocks.h | 2 + test/server/server_test.cc | 42 ++++++ .../valid_v3_but_invalid_v2_bootstrap.pb_text | 9 ++ 17 files changed, 232 insertions(+), 68 deletions(-) create mode 100644 test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index ac0204428053..8e7a0ef42005 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 30] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -153,4 +153,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index 867a9255bc51..f32f71a8093a 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 30] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -152,4 +152,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index a4729f1e3f07..6bd462e4b26b 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -21,7 +21,7 @@ following are the command line options that Envoy supports. .. option:: --config-yaml - *(optional)* The YAML string for a v2 bootstrap configuration. If :option:`--config-path` is also set, + *(optional)* The YAML string for a bootstrap configuration. If :option:`--config-path` is also set, the values in this YAML string will override and merge with the bootstrap loaded from :option:`--config-path`. Because YAML is a superset of JSON, a JSON string may also be passed to :option:`--config-yaml`. @@ -31,6 +31,13 @@ following are the command line options that Envoy supports. ./envoy -c bootstrap.yaml --config-yaml "node: {id: 'node1'}" +.. option:: --bootstrap-version + + *(optional)* The API version to load the bootstrap as. The value should be a single integer, e.g. + to parse the bootstrap configuration as V3, specify ``--bootstrap-version 3``. If unset, Envoy will + attempt to load the bootstrap as the previous API version and upgrade it to the latest. If that fails, + Envoy will attempt to load the configuration as the latest version. + .. option:: --mode *(optional)* One of the operating modes for Envoy: diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index d412a7f011de..c94a001a3a66 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 30] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -152,6 +152,9 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; + uint64 hidden_envoy_deprecated_max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index 867a9255bc51..f32f71a8093a 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 29] +// [#next-free-field: 30] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -152,4 +152,7 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; + + // See :option:`--bootstrap-version` for details. + uint32 bootstrap_version = 29; } diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 3a9ad7545ef7..96baa7fbdfef 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -9,6 +9,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/network/address.h" +#include "absl/types/optional.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -85,6 +86,11 @@ class Options { */ virtual const envoy::config::bootstrap::v3::Bootstrap& configProto() const PURE; + /** + * @return const absl::optional& the bootstrap version to use, if specified. + */ + virtual const absl::optional& bootstrapVersion() const PURE; + /** * @return bool allow unknown fields in the static configuration? */ diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index e3715a13c79d..684c8e2ffab1 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -103,7 +103,7 @@ ProtobufWkt::Value parseYamlNode(const YAML::Node& node) { void jsonConvertInternal(const Protobuf::Message& source, ProtobufMessage::ValidationVisitor& validation_visitor, - Protobuf::Message& dest) { + Protobuf::Message& dest, bool do_boosting = true) { Protobuf::util::JsonPrintOptions json_options; json_options.preserve_proto_field_names = true; std::string json; @@ -112,7 +112,7 @@ void jsonConvertInternal(const Protobuf::Message& source, throw EnvoyException(fmt::format("Unable to convert protobuf message to JSON string: {} {}", status.ToString(), source.DebugString())); } - MessageUtil::loadFromJson(json, dest, validation_visitor); + MessageUtil::loadFromJson(json, dest, validation_visitor, do_boosting); } enum class MessageVersion { @@ -144,6 +144,7 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { f(message, MessageVersion::LATEST_VERSION); return; } + Protobuf::DynamicMessageFactory dmf; auto earlier_message = ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New()); ASSERT(earlier_message != nullptr); @@ -270,43 +271,49 @@ size_t MessageUtil::hash(const Protobuf::Message& message) { } void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { - tryWithApiBoosting( - [&json, &validation_visitor](Protobuf::Message& message, MessageVersion message_version) { - Protobuf::util::JsonParseOptions options; - options.case_insensitive_enum_parsing = true; - // Let's first try and get a clean parse when checking for unknown fields; - // this should be the common case. - options.ignore_unknown_fields = false; - const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); - if (strict_status.ok()) { - // Success, no need to do any extra work. - return; - } - // If we fail, we see if we get a clean parse when allowing unknown fields. - // This is essentially a workaround - // for https://github.com/protocolbuffers/protobuf/issues/5967. - // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field - // detection directly. - options.ignore_unknown_fields = true; - const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); - // If we still fail with relaxed unknown field checking, the error has nothing - // to do with unknown fields. - if (!relaxed_status.ok()) { - throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + - "): " + json); - } - // We know it's an unknown field at this point. If we're at the latest - // version, then it's definitely an unknown field, otherwise we try to - // load again at a later version. - if (message_version == MessageVersion::LATEST_VERSION) { - validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + - strict_status.ToString()); - } else { - throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); - } - }, - message); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting) { + auto load_json = [&json, &validation_visitor](Protobuf::Message& message, + MessageVersion message_version) { + Protobuf::util::JsonParseOptions options; + options.case_insensitive_enum_parsing = true; + // Let's first try and get a clean parse when checking for unknown fields; + // this should be the common case. + options.ignore_unknown_fields = false; + const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); + if (strict_status.ok()) { + // Success, no need to do any extra work. + return; + } + // If we fail, we see if we get a clean parse when allowing unknown fields. + // This is essentially a workaround + // for https://github.com/protocolbuffers/protobuf/issues/5967. + // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field + // detection directly. + options.ignore_unknown_fields = true; + const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); + // If we still fail with relaxed unknown field checking, the error has nothing + // to do with unknown fields. + if (!relaxed_status.ok()) { + throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + + "): " + json); + } + // We know it's an unknown field at this point. If we're at the latest + // version, then it's definitely an unknown field, otherwise we try to + // load again at a later version. + if (message_version == MessageVersion::LATEST_VERSION) { + validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + + strict_status.ToString()); + } else { + throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); + } + }; + + if (do_boosting) { + tryWithApiBoosting(load_json, message); + } else { + load_json(message, MessageVersion::LATEST_VERSION); + } } void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& message) { @@ -316,11 +323,12 @@ void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& mes } void MessageUtil::loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting) { ProtobufWkt::Value value = ValueUtil::loadFromYaml(yaml); if (value.kind_case() == ProtobufWkt::Value::kStructValue || value.kind_case() == ProtobufWkt::Value::kListValue) { - jsonConvertInternal(value, validation_visitor, message); + jsonConvertInternal(value, validation_visitor, message, do_boosting); return; } throw EnvoyException("Unable to convert YAML as JSON: " + yaml); @@ -334,7 +342,7 @@ void MessageUtil::loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& mes void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api) { + Api::Api& api, bool do_boosting) { const std::string contents = api.fileSystem().fileReadToEnd(path); // If the filename ends with .pb, attempt to parse it as a binary proto. if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { @@ -348,26 +356,31 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa } // If the filename ends with .pb_text, attempt to parse it as a text proto. if (absl::EndsWith(path, FileExtensions::get().ProtoText)) { - tryWithApiBoosting( - [&contents, &path](Protobuf::Message& message, MessageVersion message_version) { - if (Protobuf::TextFormat::ParseFromString(contents, &message)) { - return; - } - if (message_version == MessageVersion::LATEST_VERSION) { - throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + - message.GetTypeName() + ")"); - } else { - throw ApiBoostRetryException( - "Failed to parse at earlier version, trying again at later version."); - } - }, - message); + auto read_proto_text = [&contents, &path](Protobuf::Message& message, + MessageVersion message_version) { + if (Protobuf::TextFormat::ParseFromString(contents, &message)) { + return; + } + if (message_version == MessageVersion::LATEST_VERSION) { + throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + + message.GetTypeName() + ")"); + } else { + throw ApiBoostRetryException( + "Failed to parse at earlier version, trying again at later version."); + } + }; + + if (do_boosting) { + tryWithApiBoosting(read_proto_text, message); + } else { + read_proto_text(message, MessageVersion::LATEST_VERSION); + } return; } if (absl::EndsWith(path, FileExtensions::get().Yaml)) { - loadFromYaml(contents, message, validation_visitor); + loadFromYaml(contents, message, validation_visitor, do_boosting); } else { - loadFromJson(contents, message, validation_visitor); + loadFromJson(contents, message, validation_visitor, do_boosting); } } diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index b2d4b828be1a..ffc9b4bab52d 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -216,13 +216,16 @@ class MessageUtil { static std::size_t hash(const Protobuf::Message& message); static void loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting = true); static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message); static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool do_boosting = true); static void loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message); static void loadFromFile(const std::string& path, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + bool do_boosting = true); /** * Checks for use of deprecated fields in message and all sub-messages. diff --git a/source/server/BUILD b/source/server/BUILD index 1a4eeb771ad8..3611191f990a 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -485,6 +485,7 @@ envoy_cc_library( "//source/server:overload_manager_lib", "//source/server/http:admin_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index e2ca56f39e26..f3b7d67db6a6 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -66,6 +66,11 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg config_yaml( "", "config-yaml", "Inline YAML configuration, merges with the contents of --config-path", false, "", "string", cmd); + TCLAP::ValueArg bootstrap_version( + "", "bootstrap-version", + "API version to parse the bootstrap config as (e.g. 3). If " + "unset, all known versions will be attempted", + false, 0, "string", cmd); TCLAP::SwitchArg allow_unknown_fields("", "allow-unknown-fields", "allow unknown fields in static configuration (DEPRECATED)", @@ -220,6 +225,9 @@ OptionsImpl::OptionsImpl(std::vector args, config_path_ = config_path.getValue(); config_yaml_ = config_yaml.getValue(); + if (bootstrap_version.getValue() != 0) { + bootstrap_version_ = bootstrap_version.getValue(); + } if (allow_unknown_fields.getValue()) { ENVOY_LOG(warn, "--allow-unknown-fields is deprecated, use --allow-unknown-static-fields instead."); diff --git a/source/server/options_impl.h b/source/server/options_impl.h index b8f3e64695c4..fb6bd08dfdb9 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -103,6 +103,7 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable& bootstrapVersion() const override { return bootstrap_version_; } const std::string& configYaml() const override { return config_yaml_; } bool allowUnknownStaticFields() const override { return allow_unknown_static_fields_; } bool rejectUnknownDynamicFields() const override { return reject_unknown_dynamic_fields_; } @@ -156,6 +157,7 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable bootstrap_version_; std::string config_yaml_; bool allow_unknown_static_fields_{false}; bool reject_unknown_dynamic_fields_{false}; diff --git a/source/server/server.cc b/source/server/server.cc index 6c8bc3a3f305..4067555bd270 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -8,6 +8,9 @@ #include #include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/common/exception.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" #include "envoy/event/dispatcher.h" @@ -25,6 +28,7 @@ #include "common/common/utility.h" #include "common/common/version.h" #include "common/config/utility.h" +#include "common/config/version_converter.h" #include "common/http/codes.h" #include "common/local_info/local_info_impl.h" #include "common/memory/stats.h" @@ -219,6 +223,26 @@ void InstanceImpl::flushStatsInternal() { bool InstanceImpl::healthCheckFailed() { return !live_.load(); } +namespace { +// Loads a bootstrap object, potentially at a specific version (upgrading if necessary). +void loadBootsrap(absl::optional bootstrap_version, + envoy::config::bootstrap::v3::Bootstrap& bootstrap, + std::function load_function) { + + if (!bootstrap_version.has_value()) { + load_function(bootstrap, true); + } else if (*bootstrap_version == 3) { + load_function(bootstrap, false); + } else if (*bootstrap_version == 2) { + envoy::config::bootstrap::v2::Bootstrap bootstrap_v2; + load_function(bootstrap_v2, false); + Config::VersionConverter::upgrade(bootstrap_v2, bootstrap); + } else { + throw EnvoyException(fmt::format("Unknown bootstrap version {}.", *bootstrap_version)); + } +} +} // namespace + void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options, ProtobufMessage::ValidationVisitor& validation_visitor, @@ -234,11 +258,19 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& } if (!config_path.empty()) { - MessageUtil::loadFromFile(config_path, bootstrap, validation_visitor, api); + loadBootsrap( + options.bootstrapVersion(), bootstrap, + [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) { + MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting); + }); } if (!config_yaml.empty()) { envoy::config::bootstrap::v3::Bootstrap bootstrap_override; - MessageUtil::loadFromYaml(config_yaml, bootstrap_override, validation_visitor); + loadBootsrap(options.bootstrapVersion(), bootstrap_override, + [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { + MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); + }); + // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test. bootstrap.MergeFrom(bootstrap_override); } if (config_proto.ByteSize() != 0) { diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 6ba14af4d27d..34478ae6203e 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -257,6 +257,23 @@ TEST_F(ProtobufUtilityTest, LoadTextProtoFromFile) { EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } +TEST_F(ProtobufUtilityTest, LoadJsonFromFileNoBoosting) { + envoy::config::bootstrap::v3::Bootstrap bootstrap; + bootstrap.mutable_cluster_manager() + ->mutable_upstream_bind_config() + ->mutable_source_address() + ->set_address("1.1.1.1"); + + std::string bootstrap_text; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); +} + TEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadV2TextProtoFromFile)) { API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) bootstrap; bootstrap.mutable_node()->set_build_version("foo"); @@ -1200,6 +1217,15 @@ TEST_F(ProtobufUtilityTest, LoadFromJsonSameVersion) { } } +// MessageUtility::loadFromJson() avoids boosting when version specified. +TEST_F(ProtobufUtilityTest, LoadFromJsonNoBoosting) { + envoy::config::cluster::v3::Cluster dst; + EXPECT_THROW_WITH_REGEX( + MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, + ProtobufMessage::getStrictValidationVisitor(), false), + EnvoyException, "INVALID_ARGUMENT:drain_connections_on_host_removal: Cannot find field."); +} + // MessageUtility::loadFromJson() with API message works across version. TEST_F(ProtobufUtilityTest, LoadFromJsonNextVersion) { { diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index 148fd24e9da5..3054af81c799 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -25,6 +25,7 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); + ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_)); ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { return allow_unknown_static_fields_; })); diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 3cc405465e61..9d2270c32482 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -74,6 +74,7 @@ class MockOptions : public Options { MOCK_METHOD(const std::string&, configPath, (), (const)); MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); MOCK_METHOD(const std::string&, configYaml, (), (const)); + MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); @@ -103,6 +104,7 @@ class MockOptions : public Options { std::string config_path_; envoy::config::bootstrap::v3::Bootstrap config_proto_; std::string config_yaml_; + absl::optional bootstrap_version_; bool allow_unknown_static_fields_{}; bool reject_unknown_dynamic_fields_{}; std::string admin_address_path_; diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 36ea86675dc3..47f2239be3c3 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -642,6 +642,48 @@ TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2BootstrapFromPbTex EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); } +// Validate that bootstrap v3 pb_text with new fields loads fails if V2 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV3ConfigWhenV2SelectedFromPbText) { + options_.bootstrap_version_ = 2; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"), + EnvoyException, "Unable to parse file"); +} + +// Validate that we correctly parse a V2 file when configured to do so. +TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(LoadsV2ConfigWhenV2SelectedFromPbText)) { + options_.bootstrap_version_ = 2; + + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"); + EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); +} + +// Validate that we correctly parse a V3 file when configured to do so. +TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV2SelectedFromPbText) { + options_.bootstrap_version_ = 3; + + initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"); +} + +// Validate that bootstrap v2 pb_text with deprecated fields loads fails if V3 config is specified. +TEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromPbText) { + options_.bootstrap_version_ = 3; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), + EnvoyException, "Unable to parse file"); +} + +// Validate that we blow up on invalid version number. +TEST_P(ServerInstanceImplTest, InvalidBootstrapVersion) { + options_.bootstrap_version_ = 1; + + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), + EnvoyException, "Unknown bootstrap version 1."); +} + TEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigProtoOptions) { options_.config_proto_.mutable_node()->set_id("foo"); initialize("test/server/test_data/server/node_bootstrap.yaml"); diff --git a/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text new file mode 100644 index 000000000000..f2134dd754fa --- /dev/null +++ b/test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text @@ -0,0 +1,9 @@ +static_resources { + clusters { + name: "cluster" + ignore_health_on_host_removal: true + connect_timeout { + seconds: 1 + } + } +} From 7f165e85a77c021b72d681f85d24f97bbbee5c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Thu, 23 Apr 2020 17:43:01 -0400 Subject: [PATCH 020/909] Revert "init: order dynamic resource initialization to make RTDS always be first (#10362)" (#10919) This reverts commit aaba08195cf6b8f426527924d84cf3ade1755915. Signed-off-by: Raul Gutierrez Segales --- include/envoy/runtime/runtime.h | 8 -- include/envoy/upstream/cluster_manager.h | 15 --- source/common/runtime/BUILD | 2 - source/common/runtime/runtime_impl.cc | 19 +--- source/common/runtime/runtime_impl.h | 12 +- .../common/upstream/cluster_manager_impl.cc | 32 ++---- source/common/upstream/cluster_manager_impl.h | 22 ++-- source/server/server.cc | 55 +++------- source/server/server.h | 5 +- test/common/protobuf/utility_test.cc | 7 +- test/common/runtime/runtime_impl_test.cc | 43 +++++--- .../upstream/cluster_manager_impl_test.cc | 20 +--- .../clusters/aggregate/cluster_update_test.cc | 16 +-- test/integration/ads_integration_test.cc | 103 ------------------ test/mocks/runtime/mocks.h | 1 - test/mocks/upstream/mocks.h | 2 - test/server/server_test.cc | 17 +-- .../test_data/server/runtime_bootstrap.yaml | 9 ++ .../server/runtime_bootstrap_ads_eds.yaml | 38 ------- .../server/runtime_bootstrap_eds.yaml | 35 ------ test/test_common/test_runtime.h | 5 +- 21 files changed, 93 insertions(+), 373 deletions(-) delete mode 100644 test/server/test_data/server/runtime_bootstrap_ads_eds.yaml delete mode 100644 test/server/test_data/server/runtime_bootstrap_eds.yaml diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 52abc0e50616..2df95731398b 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -259,8 +259,6 @@ class Loader { public: virtual ~Loader() = default; - using ReadyCallback = std::function; - /** * Post-construction initialization. Runtime will be generally available after * the constructor is finished, with the exception of dynamic RTDS layers, @@ -288,12 +286,6 @@ class Loader { * @param values the values to merge */ virtual void mergeValues(const std::unordered_map& values) PURE; - - /** - * Initiate all RTDS subscriptions. The `on_done` callback is invoked when all RTDS requests - * have either received and applied their responses or timed out. - */ - virtual void startRtdsSubscriptions(ReadyCallback on_done) PURE; }; using LoaderPtr = std::unique_ptr; diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index 4bfe98beee6b..047bf2aafd48 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -73,13 +73,6 @@ class ClusterManagerFactory; /** * Manages connection pools and load balancing for upstream clusters. The cluster manager is * persistent and shared among multiple ongoing requests/connections. - * Cluster manager is initialized in two phases. In the first phase which begins at the construction - * all primary clusters (i.e. with endpoint assignments provisioned statically in bootstrap, - * discovered through DNS or file based CDS) are initialized. - * After the first phase has completed the server instance initializes services (i.e. RTDS) needed - * to successfully deploy the rest of dynamic configuration. - * In the second phase all secondary clusters (with endpoint assignments provisioned by xDS servers) - * are initialized and then the rest of the configuration provisioned through xDS. */ class ClusterManager { public: @@ -103,14 +96,6 @@ class ClusterManager { */ virtual void setInitializedCb(std::function callback) PURE; - /** - * Start initialization of secondary clusters and then dynamically configured clusters. - * The "initialized callback" set in the method above is invoked when secondary and - * dynamically provisioned clusters have finished initializing. - */ - virtual void - initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE; - using ClusterInfoMap = std::unordered_map>; /** diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index ddeb069e3e5a..dbab335cf0a7 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -67,9 +67,7 @@ envoy_cc_library( "//source/common/config:subscription_base_interface", "//source/common/filesystem:directory_lib", "//source/common/grpc:common_lib", - "//source/common/init:manager_lib", "//source/common/init:target_lib", - "//source/common/init:watcher_lib", "//source/common/protobuf:message_validator_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 4ca76e9172e2..1ed9faf2c432 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -465,12 +465,11 @@ void ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, - const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, + const LocalInfo::LocalInfo& local_info, Init::Manager& init_manager, + Stats::Store& store, RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), - config_(config), service_cluster_(local_info.clusterName()), api_(api), - init_watcher_("RDTS", [this]() { onRdtsReady(); }) { + config_(config), service_cluster_(local_info.clusterName()), api_(api) { std::unordered_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); @@ -498,7 +497,7 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kRtdsLayer: subscriptions_.emplace_back( std::make_unique(*this, layer.rtds_layer(), store, validation_visitor)); - init_manager_.add(subscriptions_.back()->init_target_); + init_manager.add(subscriptions_.back()->init_target_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -510,16 +509,6 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator void LoaderImpl::initialize(Upstream::ClusterManager& cm) { cm_ = &cm; } -void LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) { - on_rtds_initialized_ = on_done; - init_manager_.initialize(init_watcher_); -} - -void LoaderImpl::onRdtsReady() { - ENVOY_LOG(info, "RTDS has finished initialization"); - on_rtds_initialized_(); -} - RtdsSubscription::RtdsSubscription( LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer, Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor) diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index e59afd9c7361..8fa838e88cd8 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -24,7 +24,6 @@ #include "common/common/logger.h" #include "common/common/thread.h" #include "common/config/subscription_base.h" -#include "common/init/manager_impl.h" #include "common/init/target_impl.h" #include "common/singleton/threadsafe_singleton.h" @@ -243,16 +242,15 @@ class LoaderImpl : public Loader, Logger::Loggable { public: LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, - const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api); + const LocalInfo::LocalInfo& local_info, Init::Manager& init_manager, + Stats::Store& store, RandomGenerator& generator, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Runtime::Loader void initialize(Upstream::ClusterManager& cm) override; const Snapshot& snapshot() override; std::shared_ptr threadsafeSnapshot() override; void mergeValues(const std::unordered_map& values) override; - void startRtdsSubscriptions(ReadyCallback on_done) override; private: friend RtdsSubscription; @@ -262,7 +260,6 @@ class LoaderImpl : public Loader, Logger::Loggable { // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); - void onRdtsReady(); RandomGenerator& generator_; RuntimeStats stats_; @@ -272,9 +269,6 @@ class LoaderImpl : public Loader, Logger::Loggable { const std::string service_cluster_; Filesystem::WatcherPtr watcher_; Api::Api& api_; - ReadyCallback on_rtds_initialized_; - Init::WatcherImpl init_watcher_; - Init::ManagerImpl init_manager_{"RTDS"}; std::vector subscriptions_; Upstream::ClusterManager* cm_{}; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 40f1cb07d33e..8e4d6f9975c3 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -123,12 +123,12 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // Do not do anything if we are still doing the initial static load or if we are waiting for // CDS initialize. ENVOY_LOG(debug, "maybe finish initialize state: {}", enumToInt(state_)); - if (state_ == State::Loading || state_ == State::WaitingToStartCdsInitialization) { + if (state_ == State::Loading || state_ == State::WaitingForCdsInitialize) { return; } // If we are still waiting for primary clusters to initialize, do nothing. - ASSERT(state_ == State::WaitingToStartSecondaryInitialization || state_ == State::CdsInitialized); + ASSERT(state_ == State::WaitingForStaticInitialize || state_ == State::CdsInitialized); ENVOY_LOG(debug, "maybe finish initialize primary init clusters empty: {}", primary_init_clusters_.empty()); if (!primary_init_clusters_.empty()) { @@ -162,9 +162,9 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // directly to initialized. started_secondary_initialize_ = false; ENVOY_LOG(debug, "maybe finish initialize cds api ready: {}", cds_ != nullptr); - if (state_ == State::WaitingToStartSecondaryInitialization && cds_) { + if (state_ == State::WaitingForStaticInitialize && cds_) { ENVOY_LOG(info, "cm init: initializing cds"); - state_ = State::WaitingToStartCdsInitialization; + state_ = State::WaitingForCdsInitialize; cds_->initialize(); } else { ENVOY_LOG(info, "cm init: all clusters initialized"); @@ -177,14 +177,7 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { void ClusterManagerInitHelper::onStaticLoadComplete() { ASSERT(state_ == State::Loading); - // After initialization of primary clusters has completed, transition to - // waiting for signal to initialize secondary clusters and then CDS. - state_ = State::WaitingToStartSecondaryInitialization; -} - -void ClusterManagerInitHelper::startInitializingSecondaryClusters() { - ASSERT(state_ == State::WaitingToStartSecondaryInitialization); - ENVOY_LOG(debug, "continue initializing secondary clusters"); + state_ = State::WaitingForStaticInitialize; maybeFinishInitialize(); } @@ -193,7 +186,7 @@ void ClusterManagerInitHelper::setCds(CdsApi* cds) { cds_ = cds; if (cds_) { cds_->setInitializedCb([this]() -> void { - ASSERT(state_ == State::WaitingToStartCdsInitialization); + ASSERT(state_ == State::WaitingForCdsInitialize); state_ = State::CdsInitialized; maybeFinishInitialize(); }); @@ -353,22 +346,15 @@ ClusterManagerImpl::ClusterManagerImpl( init_helper_.onStaticLoadComplete(); ads_mux_->start(); -} -void ClusterManagerImpl::initializeSecondaryClusters( - const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - init_helper_.startInitializingSecondaryClusters(); - - const auto& cm_config = bootstrap.cluster_manager(); if (cm_config.has_load_stats_config()) { const auto& load_stats_config = cm_config.load_stats_config(); - load_stats_reporter_ = std::make_unique( - local_info_, *this, stats_, + local_info, *this, stats, Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, load_stats_config, - stats_, false) + stats, false) ->create(), - load_stats_config.transport_api_version(), dispatcher_); + load_stats_config.transport_api_version(), main_thread_dispatcher); } } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 4f89e443acee..707cc1ca476e 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -110,18 +110,15 @@ class ClusterManagerInitHelper : Logger::Loggable { : cm_(cm), per_cluster_init_callback_(per_cluster_init_callback) {} enum class State { - // Initial state. During this state all static clusters are loaded. Any primary clusters + // Initial state. During this state all static clusters are loaded. Any phase 1 clusters // are immediately initialized. Loading, - // During this state cluster manager waits to start initializing secondary clusters. In this - // state all - // primary clusters have completed initialization. Initialization of the secondary clusters - // is started by the `initializeSecondaryClusters` method. - WaitingToStartSecondaryInitialization, - // In this state cluster manager waits for all secondary clusters (if configured) to finish - // initialization. Then, if CDS is configured, this state tracks waiting for the first CDS - // response to populate dynamically configured clusters. - WaitingToStartCdsInitialization, + // During this state we wait for all static clusters to fully initialize. This requires + // completing phase 1 clusters, initializing phase 2 clusters, and then waiting for them. + WaitingForStaticInitialize, + // If CDS is configured, this state tracks waiting for the first CDS response to populate + // clusters. + WaitingForCdsInitialize, // During this state, all CDS populated clusters are undergoing either phase 1 or phase 2 // initialization. CdsInitialized, @@ -136,8 +133,6 @@ class ClusterManagerInitHelper : Logger::Loggable { void setInitializedCb(std::function callback); State state() const { return state_; } - void startInitializingSecondaryClusters(); - private: // To enable invariant assertions on the cluster lists. friend ClusterManagerImpl; @@ -247,9 +242,6 @@ class ClusterManagerImpl : public ClusterManager, Logger::LoggableinitializeStats(stats_store_, "server."); } - // The broad order of initialization from this point on is the following: - // 1. Statically provisioned configuration (bootstrap) are loaded. - // 2. Cluster manager is created and all primary clusters (i.e. with endpoint assignments - // provisioned statically in bootstrap, discovered through DNS or file based CDS) are - // initialized. - // 3. Various services are initialized and configured using the bootstrap config. - // 4. RTDS is initialized using primary clusters. This allows runtime overrides to be fully - // configured before the rest of xDS configuration is provisioned. - // 5. Secondary clusters (with endpoint assignments provisioned by xDS servers) are initialized. - // 6. The rest of the dynamic configuration is provisioned. - // - // Please note: this order requires that RTDS is provisioned using a primary cluster. If RTDS is - // provisioned through ADS then ADS must use primary cluster as well. This invariant is enforced - // during RTDS initialization and invalid configuration will be rejected. - // Runtime gets initialized before the main configuration since during main configuration // load things may grab a reference to the loader for later use. runtime_singleton_ = std::make_unique( @@ -459,27 +444,6 @@ void InstanceImpl::initialize(const Options& options, // instantiated (which in turn relies on runtime...). Runtime::LoaderSingleton::get().initialize(clusterManager()); - // If RTDS was not configured the `onRuntimeReady` callback is immediately invoked. - Runtime::LoaderSingleton::get().startRtdsSubscriptions([this]() { onRuntimeReady(); }); - - for (Stats::SinkPtr& sink : config_.statsSinks()) { - stats_store_.addSink(*sink); - } - - // Some of the stat sinks may need dispatcher support so don't flush until the main loop starts. - // Just setup the timer. - stat_flush_timer_ = dispatcher_->createTimer([this]() -> void { flushStats(); }); - stat_flush_timer_->enableTimer(config_.statsFlushInterval()); - - // GuardDog (deadlock detection) object and thread setup before workers are - // started and before our own run() loop runs. - guard_dog_ = std::make_unique(stats_store_, config_, *api_); -} - -void InstanceImpl::onRuntimeReady() { - // Begin initializing secondary clusters after RTDS configuration has been applied. - clusterManager().initializeSecondaryClusters(bootstrap_); - if (bootstrap_.has_hds_config()) { const auto& hds_config = bootstrap_.hds_config(); async_client_manager_ = std::make_unique( @@ -494,6 +458,19 @@ void InstanceImpl::onRuntimeReady() { *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_); } + + for (Stats::SinkPtr& sink : config_.statsSinks()) { + stats_store_.addSink(*sink); + } + + // Some of the stat sinks may need dispatcher support so don't flush until the main loop starts. + // Just setup the timer. + stat_flush_timer_ = dispatcher_->createTimer([this]() -> void { flushStats(); }); + stat_flush_timer_->enableTimer(config_.statsFlushInterval()); + + // GuardDog (deadlock detection) object and thread setup before workers are + // started and before our own run() loop runs. + guard_dog_ = std::make_unique(stats_store_, config_, *api_); } void InstanceImpl::startWorkers() { @@ -513,8 +490,8 @@ Runtime::LoaderPtr InstanceUtil::createRuntime(Instance& server, ENVOY_LOG(info, "runtime: {}", MessageUtil::getYamlStringFromMessage(config.runtime())); return std::make_unique( server.dispatcher(), server.threadLocal(), config.runtime(), server.localInfo(), - server.stats(), server.random(), server.messageValidationContext().dynamicValidationVisitor(), - server.api()); + server.initManager(), server.stats(), server.random(), + server.messageValidationContext().dynamicValidationVisitor(), server.api()); } void InstanceImpl::loadServerFlags(const absl::optional& flags_path) { @@ -735,4 +712,4 @@ ProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() { } } // namespace Server -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/server/server.h b/source/server/server.h index 7670ff08ab7d..c5016887700a 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -285,7 +285,6 @@ class InstanceImpl final : Logger::Loggable, void terminate(); void notifyCallbacksForStage( Stage stage, Event::PostCb completion_cb = [] {}); - void onRuntimeReady(); using LifecycleNotifierCallbacks = std::list; using LifecycleNotifierCompletionCallbacks = std::list; @@ -306,9 +305,6 @@ class InstanceImpl final : Logger::Loggable, const Options& options_; ProtobufMessage::ProdValidationContextImpl validation_context_; TimeSource& time_source_; - // Delete local_info_ as late as possible as some members below may reference it during their - // destruction. - LocalInfo::LocalInfoPtr local_info_; HotRestart& restarter_; const time_t start_time_; time_t original_start_time_; @@ -332,6 +328,7 @@ class InstanceImpl final : Logger::Loggable, Configuration::MainImpl config_; Network::DnsResolverSharedPtr dns_resolver_; Event::TimerPtr stat_flush_timer_; + LocalInfo::LocalInfoPtr local_info_; DrainManagerPtr drain_manager_; AccessLog::AccessLogManagerImpl access_log_manager_; std::unique_ptr cluster_manager_factory_; diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 34478ae6203e..eedc2c433eb8 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1399,9 +1399,9 @@ class DeprecatedFieldsTest : public testing::TestWithParam { runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")) { envoy::config::bootstrap::v3::LayeredRuntime config; config.add_layers()->mutable_admin_layer(); - loader_ = std::make_unique( - Runtime::LoaderPtr{new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, store_, - generator_, validation_visitor_, *api_)}); + loader_ = std::make_unique(Runtime::LoaderPtr{ + new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, init_manager_, store_, + generator_, validation_visitor_, *api_)}); } void checkForDeprecation(const Protobuf::Message& message) { @@ -1425,6 +1425,7 @@ class DeprecatedFieldsTest : public testing::TestWithParam { std::unique_ptr loader_; Stats::Counter& runtime_deprecated_feature_use_; NiceMock local_info_; + Init::MockManager init_manager_; NiceMock validation_visitor_; }; diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 4701f5412834..1d9d7076a372 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -28,7 +28,6 @@ using testing::_; using testing::Invoke; using testing::InvokeWithoutArgs; -using testing::MockFunction; using testing::NiceMock; using testing::Return; @@ -119,6 +118,7 @@ class LoaderImplTest : public testing::Test { Api::ApiPtr api_; Upstream::MockClusterManager cm_; NiceMock local_info_; + Init::MockManager init_manager_; std::vector on_changed_cbs_; NiceMock validation_visitor_; std::string expected_watch_root_; @@ -145,8 +145,9 @@ class DiskLoaderImplTest : public LoaderImplTest { envoy::config::bootstrap::v3::LayeredRuntime layered_runtime; Config::translateRuntime(runtime, layered_runtime); - loader_ = std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, - generator_, validation_visitor_, *api_); + loader_ = + std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, + store_, generator_, validation_visitor_, *api_); } void write(const std::string& path, const std::string& value) { @@ -556,8 +557,8 @@ TEST_F(DiskLoaderImplTest, MultipleAdminLayersFail) { layer->mutable_admin_layer(); } EXPECT_THROW_WITH_MESSAGE( - std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, - generator_, validation_visitor_, *api_), + std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, + store_, generator_, validation_visitor_, *api_), EnvoyException, "Too many admin layers specified in LayeredRuntime, at most one may be specified"); } @@ -577,8 +578,9 @@ class StaticLoaderImplTest : public LoaderImplTest { layer->set_name("admin"); layer->mutable_admin_layer(); } - loader_ = std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, - generator_, validation_visitor_, *api_); + loader_ = + std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, + store_, generator_, validation_visitor_, *api_); } ProtobufWkt::Struct base_; @@ -863,6 +865,9 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_layer->mutable_rtds_config(); } EXPECT_CALL(cm_, subscriptionFactory()).Times(layers_.size()); + EXPECT_CALL(init_manager_, add(_)).WillRepeatedly(Invoke([this](const Init::Target& target) { + init_target_handles_.emplace_back(target.createHandle("test")); + })); ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) .WillByDefault(testing::Invoke( [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, @@ -872,14 +877,15 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_callbacks_.push_back(&callbacks); return ret; })); - loader_ = std::make_unique(dispatcher_, tls_, config, local_info_, store_, - generator_, validation_visitor_, *api_); + loader_ = std::make_unique(dispatcher_, tls_, config, local_info_, init_manager_, + store_, generator_, validation_visitor_, *api_); loader_->initialize(cm_); for (auto* sub : rtds_subscriptions_) { EXPECT_CALL(*sub, start(_)); } - - loader_->startRtdsSubscriptions(rtds_init_callback_.AsStdFunction()); + for (auto& handle : init_target_handles_) { + handle->initialize(init_watcher_); + } // Validate that the layer name is set properly for dynamic layers. EXPECT_EQ(layers_[0], loader_->snapshot().getLayers()[1]->name()); @@ -915,7 +921,8 @@ class RtdsLoaderImplTest : public LoaderImplTest { std::vector layers_{"some_resource"}; std::vector rtds_callbacks_; std::vector rtds_subscriptions_; - MockFunction rtds_init_callback_; + Init::ExpectableWatcherImpl init_watcher_; + std::vector init_target_handles_; }; // Empty resource lists are rejected. @@ -924,7 +931,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { Protobuf::RepeatedPtrField runtimes; - EXPECT_CALL(rtds_init_callback_, Call()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, "Unexpected RTDS resource length: 0"); @@ -942,7 +949,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { runtimes.Add(); runtimes.Add(); - EXPECT_CALL(rtds_init_callback_, Call()); + EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, "Unexpected RTDS resource length: 2"); @@ -956,7 +963,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { TEST_F(RtdsLoaderImplTest, FailureSubscription) { setup(); - EXPECT_CALL(rtds_init_callback_, Call()); + EXPECT_CALL(init_watcher_, ready()); // onConfigUpdateFailed() should not be called for gRPC stream connection failure rtds_callbacks_[0]->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {}); @@ -1002,7 +1009,7 @@ TEST_F(RtdsLoaderImplTest, OnConfigUpdateSuccess) { foo: bar baz: meh )EOF"); - EXPECT_CALL(rtds_init_callback_, Call()); + EXPECT_CALL(init_watcher_, ready()); doOnConfigUpdateVerifyNoThrow(runtime); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); @@ -1041,7 +1048,7 @@ TEST_F(RtdsLoaderImplTest, DeltaOnConfigUpdateSuccess) { foo: bar baz: meh )EOF"); - EXPECT_CALL(rtds_init_callback_, Call()); + EXPECT_CALL(init_watcher_, ready()); doDeltaOnConfigUpdateVerifyNoThrow(runtime); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); @@ -1085,7 +1092,7 @@ TEST_F(RtdsLoaderImplTest, MultipleRtdsLayers) { foo: bar baz: meh )EOF"); - EXPECT_CALL(rtds_init_callback_, Call()).Times(1); + EXPECT_CALL(init_watcher_, ready()).Times(2); doOnConfigUpdateVerifyNoThrow(runtime, 0); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 95d0766b7850..2e300b5b2844 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -41,7 +41,6 @@ class ClusterManagerImplTest : public testing::Test { bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, http_context_, grpc_context_); - cluster_manager_->initializeSecondaryClusters(bootstrap); } void createWithLocalClusterUpdate(const bool enable_merge_window = true) { @@ -2830,7 +2829,6 @@ TEST_F(ClusterManagerInitHelperTest, ImmediateInitialize) { cluster1.initialize_callback_(); init_helper_.onStaticLoadComplete(); - init_helper_.startInitializingSecondaryClusters(); ReadyWatcher cm_initialized; EXPECT_CALL(cm_initialized, ready()); @@ -2851,10 +2849,8 @@ TEST_F(ClusterManagerInitHelperTest, StaticSdsInitialize) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - init_helper_.onStaticLoadComplete(); - EXPECT_CALL(cluster1, initialize(_)); - init_helper_.startInitializingSecondaryClusters(); + init_helper_.onStaticLoadComplete(); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2905,9 +2901,8 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - init_helper_.onStaticLoadComplete(); EXPECT_CALL(cluster1, initialize(_)); - init_helper_.startInitializingSecondaryClusters(); + init_helper_.onStaticLoadComplete(); EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); EXPECT_CALL(cm_initialized, ready()); @@ -2928,10 +2923,8 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - init_helper_.onStaticLoadComplete(); - EXPECT_CALL(cluster1, initialize(_)); - init_helper_.startInitializingSecondaryClusters(); + init_helper_.onStaticLoadComplete(); EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); EXPECT_CALL(cm_initialized, ready()); @@ -2979,9 +2972,6 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { ON_CALL(cluster, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster); - // onStaticLoadComplete() must not initialize secondary clusters - init_helper_.onStaticLoadComplete(); - // Set up the scenario seen in Issue 903 where initialize() ultimately results // in the removeCluster() call. In the real bug this was a long and complex call // chain. @@ -2989,9 +2979,9 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { init_helper_.removeCluster(cluster); })); - // Now call initializeSecondaryClusters which will exercise maybeFinishInitialize() + // Now call onStaticLoadComplete which will exercise maybeFinishInitialize() // which calls initialize() on the members of the secondary init list. - init_helper_.startInitializingSecondaryClusters(); + init_helper_.onStaticLoadComplete(); } // Validate that when options are set in the ClusterManager and/or Cluster, we see the socket option diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index e7cbbcb4311d..f040c6b88c5d 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -35,12 +35,10 @@ class AggregateClusterUpdateTest : public testing::Test { : http_context_(stats_store_.symbolTable()), grpc_context_(stats_store_.symbolTable()) {} void initialize(const std::string& yaml_config) { - auto bootstrap = parseBootstrapFromV2Yaml(yaml_config); cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, - factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, - *api_, http_context_, grpc_context_); - cluster_manager_->initializeSecondaryClusters(bootstrap); + parseBootstrapFromV2Yaml(yaml_config), factory_, factory_.stats_, factory_.tls_, + factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_, + factory_.dispatcher_, admin_, validation_context_, *api_, http_context_, grpc_context_); EXPECT_EQ(cluster_manager_->activeClusters().size(), 1); cluster_ = cluster_manager_->get("aggregate_cluster"); } @@ -259,12 +257,10 @@ TEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) - secondary )EOF"; - auto bootstrap = parseBootstrapFromV2Yaml(config); cluster_manager_ = std::make_unique( - bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, - factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, - http_context_, grpc_context_); - cluster_manager_->initializeSecondaryClusters(bootstrap); + parseBootstrapFromV2Yaml(config), factory_, factory_.stats_, factory_.tls_, factory_.runtime_, + factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, + validation_context_, *api_, http_context_, grpc_context_); EXPECT_EQ(cluster_manager_->activeClusters().size(), 2); cluster_ = cluster_manager_->get("aggregate_cluster"); auto primary = cluster_manager_->get("primary"); diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 9153bce772a0..42de78c81fae 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -889,107 +889,4 @@ TEST_P(AdsClusterFromFileIntegrationTest, BasicTestWidsAdsEndpointLoadedFromFile {"ads_eds_cluster"}, {}, {})); } -class AdsIntegrationTestWithRtds : public AdsIntegrationTest { -public: - AdsIntegrationTestWithRtds() = default; - - void initialize() override { - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* layered_runtime = bootstrap.mutable_layered_runtime(); - auto* layer = layered_runtime->add_layers(); - layer->set_name("foobar"); - auto* rtds_layer = layer->mutable_rtds_layer(); - rtds_layer->set_name("ads_rtds_layer"); - auto* rtds_config = rtds_layer->mutable_rtds_config(); - rtds_config->mutable_ads(); - - auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); - ads_config->set_set_node_on_first_message_only(true); - }); - AdsIntegrationTest::initialize(); - } - - void testBasicFlow() { - // Test that runtime discovery request comes first and cluster discovery request comes after - // runtime was loaded. - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"ads_rtds_layer"}, - {"ads_rtds_layer"}, {}, true)); - auto some_rtds_layer = TestUtility::parseYaml(R"EOF( - name: ads_rtds_layer - layer: - foo: bar - baz: meh - )EOF"); - sendDiscoveryResponse( - Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); - - test_server_->waitForCounterGe("runtime.load_success", 1); - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, false)); - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "1", {"ads_rtds_layer"}, {}, - {}, false)); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtds, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); - -TEST_P(AdsIntegrationTestWithRtds, Basic) { - initialize(); - testBasicFlow(); -} - -class AdsIntegrationTestWithRtdsAndSecondaryClusters : public AdsIntegrationTestWithRtds { -public: - AdsIntegrationTestWithRtdsAndSecondaryClusters() = default; - - void initialize() override { - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - // Add secondary cluster to the list of static resources. - auto* eds_cluster = bootstrap.mutable_static_resources()->add_clusters(); - eds_cluster->set_name("eds_cluster"); - eds_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS); - auto* eds_cluster_config = eds_cluster->mutable_eds_cluster_config(); - eds_cluster_config->mutable_eds_config()->mutable_ads(); - }); - AdsIntegrationTestWithRtds::initialize(); - } - - void testBasicFlow() { - // Test that runtime discovery request comes first followed by the cluster load assignment - // discovery request for secondary cluster and then CDS discovery request. - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"ads_rtds_layer"}, - {"ads_rtds_layer"}, {}, true)); - auto some_rtds_layer = TestUtility::parseYaml(R"EOF( - name: ads_rtds_layer - layer: - foo: bar - baz: meh - )EOF"); - sendDiscoveryResponse( - Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); - - test_server_->waitForCounterGe("runtime.load_success", 1); - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", - {"eds_cluster"}, {"eds_cluster"}, {}, false)); - sendDiscoveryResponse( - Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("eds_cluster")}, - {buildClusterLoadAssignment("eds_cluster")}, {}, "1"); - - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "1", {"ads_rtds_layer"}, {}, - {}, false)); - EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, false)); - sendDiscoveryResponse( - Config::TypeUrl::get().Cluster, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, - {}, "1"); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtdsAndSecondaryClusters, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); - -TEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) { - initialize(); - testBasicFlow(); -} - } // namespace Envoy diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index d73bb3eb5317..532c5650e3a1 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -74,7 +74,6 @@ class MockLoader : public Loader { MOCK_METHOD(const Snapshot&, snapshot, ()); MOCK_METHOD(std::shared_ptr, threadsafeSnapshot, ()); MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); - MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); testing::NiceMock snapshot_; }; diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 2e659cd59464..9a8ca01b00f8 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -309,8 +309,6 @@ class MockClusterManager : public ClusterManager { (const envoy::config::cluster::v3::Cluster& cluster, const std::string& version_info)); MOCK_METHOD(void, setInitializedCb, (std::function)); - MOCK_METHOD(void, initializeSecondaryClusters, - (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); MOCK_METHOD(ClusterInfoMap, clusters, ()); MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 47f2239be3c3..5d230027e080 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -723,6 +723,7 @@ TEST_P(ServerInstanceImplTest, BootstrapRuntime) { EXPECT_EQ("bar", server_->runtime().snapshot().get("foo").value().get()); // This should access via the override/some_service overlay. EXPECT_EQ("fozz", server_->runtime().snapshot().get("fizz").value().get()); + EXPECT_EQ("foobar", server_->runtime().snapshot().getLayers()[3]->name()); } // Validate that a runtime absent an admin layer will fail mutating operations @@ -741,22 +742,6 @@ TEST_P(ServerInstanceImplTest, RuntimeNoAdminLayer) { EXPECT_EQ("No admin layer specified", response_body); } -// Verify that bootstrap fails if RTDS is configured through an EDS cluster -TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughEdsFails) { - options_.service_cluster_name_ = "some_service"; - options_.service_node_name_ = "some_node_name"; - EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/runtime_bootstrap_eds.yaml"), - EnvoyException, "must have a statically defined non-EDS cluster"); -} - -// Verify that bootstrap fails if RTDS is configured through an ADS using EDS cluster -TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughAdsViaEdsFails) { - options_.service_cluster_name_ = "some_service"; - options_.service_node_name_ = "some_node_name"; - EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/runtime_bootstrap_ads_eds.yaml"), - EnvoyException, "Unknown gRPC client cluster"); -} - TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(InvalidLegacyBootstrapRuntime)) { EXPECT_THROW_WITH_MESSAGE( initialize("test/server/test_data/server/invalid_runtime_bootstrap.yaml"), EnvoyException, diff --git a/test/server/test_data/server/runtime_bootstrap.yaml b/test/server/test_data/server/runtime_bootstrap.yaml index e92c3fd5a903..ab26028ef183 100644 --- a/test/server/test_data/server/runtime_bootstrap.yaml +++ b/test/server/test_data/server/runtime_bootstrap.yaml @@ -7,3 +7,12 @@ layered_runtime: disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/primary } - name: overlay_disk_layer disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/override, append_service_cluster: true } + - name: foobar + rtds_layer: + name: foobar + rtds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: xds_cluster diff --git a/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml b/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml deleted file mode 100644 index 9bd1730bf927..000000000000 --- a/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml +++ /dev/null @@ -1,38 +0,0 @@ -static_resources: - clusters: - - name: dummy_cluster - connect_timeout: 1s - load_assignment: - cluster_name: dummy_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: {{ ntop_ip_loopback_address }} - port_value: 0 - - name: ads_cluster - connect_timeout: 1s - type: EDS - eds_cluster_config: - eds_config: - api_config_source: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: "dummy_cluster" -dynamic_resources: - ads_config: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: ads_cluster - set_node_on_first_message_only: true -layered_runtime: - layers: - - name: foobar - rtds_layer: - name: foobar - rtds_config: - ads: {} - diff --git a/test/server/test_data/server/runtime_bootstrap_eds.yaml b/test/server/test_data/server/runtime_bootstrap_eds.yaml deleted file mode 100644 index c74b692288e1..000000000000 --- a/test/server/test_data/server/runtime_bootstrap_eds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -static_resources: - clusters: - - name: dummy_cluster - connect_timeout: 1s - load_assignment: - cluster_name: dummy_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: {{ ntop_ip_loopback_address }} - port_value: 0 - - name: rtds_cluster - connect_timeout: 1s - type: EDS - eds_cluster_config: - eds_config: - api_config_source: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: "dummy_cluster" -layered_runtime: - layers: - - name: foobar - rtds_layer: - name: foobar - rtds_config: - api_config_source: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: rtds_cluster - diff --git a/test/test_common/test_runtime.h b/test/test_common/test_runtime.h index 0532b5529f9f..93bc51876dde 100644 --- a/test/test_common/test_runtime.h +++ b/test/test_common/test_runtime.h @@ -35,8 +35,8 @@ class TestScopedRuntime { config.add_layers()->mutable_admin_layer(); loader_ = std::make_unique( - std::make_unique(dispatcher_, tls_, config, local_info_, store_, - generator_, validation_visitor_, *api_)); + std::make_unique(dispatcher_, tls_, config, local_info_, init_manager_, + store_, generator_, validation_visitor_, *api_)); } private: @@ -46,6 +46,7 @@ class TestScopedRuntime { Runtime::MockRandomGenerator generator_; Api::ApiPtr api_; testing::NiceMock local_info_; + Init::MockManager init_manager_; testing::NiceMock validation_visitor_; std::unique_ptr loader_; }; From 5dcfa7b44999f8493a343796ba9dc7194c9f05c4 Mon Sep 17 00:00:00 2001 From: Marco Magdy Date: Thu, 23 Apr 2020 15:33:08 -0700 Subject: [PATCH 021/909] tracer: Improve test coverage for x-ray (#10890) - Removed unused functions that were showing up untested - Renamed a function to match the style guide - Small refactor (now that I understand how to use buffers) - Added a test that exercises the sending bytes to the x-ray daemon Signed-off-by: Marco Magdy --- .../extensions/tracers/xray/daemon_broker.cc | 10 ++-- .../tracers/xray/localized_sampling.h | 6 --- source/extensions/tracers/xray/tracer.cc | 22 +++------ source/extensions/tracers/xray/tracer.h | 2 +- test/extensions/tracers/xray/BUILD | 2 + test/extensions/tracers/xray/tracer_test.cc | 49 +++++++++++++++++-- 6 files changed, 60 insertions(+), 31 deletions(-) diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index d5667c423aa8..39d7de50ef99 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -4,6 +4,7 @@ #include "common/buffer/buffer_impl.h" #include "common/network/utility.h" +#include "common/protobuf/utility.h" #include "source/extensions/tracers/xray/daemon.pb.h" @@ -20,13 +21,8 @@ std::string createHeader(const std::string& format, uint32_t version) { source::extensions::tracers::xray::daemon::Header header; header.set_format(format); header.set_version(version); - - Protobuf::util::JsonPrintOptions json_options; - json_options.preserve_proto_field_names = true; - std::string json; - const auto status = Protobuf::util::MessageToJsonString(header, &json, json_options); - ASSERT(status.ok()); - return json; + return MessageUtil::getJsonStringFromMessage(header, false /* pretty_print */, + false /* always_print_primitive_fields */); } } // namespace diff --git a/source/extensions/tracers/xray/localized_sampling.h b/source/extensions/tracers/xray/localized_sampling.h index 709ec144a32b..aefcd795b058 100644 --- a/source/extensions/tracers/xray/localized_sampling.h +++ b/source/extensions/tracers/xray/localized_sampling.h @@ -74,13 +74,7 @@ class LocalizedSamplingRule { * Set the percentage of requests to sample _after_ sampling |fixed_target| requests per second. */ void setRate(double rate) { rate_ = rate; } - - const std::string& host() const { return host_; } - const std::string& httpMethod() const { return http_method_; } - const std::string& urlPath() const { return url_path_; } - uint32_t fixedTarget() const { return fixed_target_; } double rate() const { return rate_; } - const Reservoir& reservoir() const { return reservoir_; } Reservoir& reservoir() { return reservoir_; } private: diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 1d4768fcc025..d28fd8ff3066 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -71,31 +71,25 @@ void Span::finishSpan() { daemon::Segment s; s.set_name(name()); - s.set_id(Id()); + s.set_id(id()); s.set_trace_id(traceId()); s.set_start_time(time_point_cast(startTime()).time_since_epoch().count()); s.set_end_time( time_point_cast(time_source_.systemTime()).time_since_epoch().count()); s.set_parent_id(parentId()); - // HTTP annotations - using StructField = Protobuf::MapPair; - - ProtobufWkt::Struct* request = s.mutable_http()->mutable_request(); - auto* request_fields = request->mutable_fields(); + auto* request_fields = s.mutable_http()->mutable_request()->mutable_fields(); for (const auto& field : http_request_annotations_) { - request_fields->insert(StructField{field.first, field.second}); + request_fields->insert({field.first, field.second}); } - ProtobufWkt::Struct* response = s.mutable_http()->mutable_response(); - auto* response_fields = response->mutable_fields(); + auto* response_fields = s.mutable_http()->mutable_response()->mutable_fields(); for (const auto& field : http_response_annotations_) { - response_fields->insert(StructField{field.first, field.second}); + response_fields->insert({field.first, field.second}); } - using KeyValue = Protobuf::Map::value_type; for (const auto& item : custom_annotations_) { - s.mutable_annotations()->insert(KeyValue{item.first, item.second}); + s.mutable_annotations()->insert({item.first, item.second}); } const std::string json = MessageUtil::getJsonStringFromMessage( @@ -106,7 +100,7 @@ void Span::finishSpan() { void Span::injectContext(Http::RequestHeaderMap& request_headers) { const std::string xray_header_value = - fmt::format("Root={};Parent={};Sampled={}", traceId(), Id(), sampled() ? "1" : "0"); + fmt::format("Root={};Parent={};Sampled={}", traceId(), id(), sampled() ? "1" : "0"); request_headers.setCopy(Http::LowerCaseString(XRayTraceHeader), xray_header_value); } @@ -118,7 +112,7 @@ Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& ope child_span->setName(name()); child_span->setOperation(operation_name); child_span->setStartTime(start_time); - child_span->setParentId(Id()); + child_span->setParentId(id()); child_span->setTraceId(traceId()); child_span->setSampled(sampled()); return child_span; diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index cf7c977d8fbc..b0f92da9c941 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -113,7 +113,7 @@ class Span : public Tracing::Span, Logger::Loggable { /** * Gets this Span's ID. */ - const std::string& Id() const { return id_; } + const std::string& id() const { return id_; } const std::string& parentId() const { return parent_segment_id_; } diff --git a/test/extensions/tracers/xray/BUILD b/test/extensions/tracers/xray/BUILD index e00d7e395bb9..8d1d57c436be 100644 --- a/test/extensions/tracers/xray/BUILD +++ b/test/extensions/tracers/xray/BUILD @@ -31,6 +31,8 @@ envoy_extension_cc_test( "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", ], ) diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 023dd00a40ec..5323122f2e9e 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -12,6 +12,8 @@ #include "test/mocks/server/mocks.h" #include "test/mocks/tracing/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" @@ -108,15 +110,15 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { absl::nullopt /*headers*/); const XRay::Span* xray_parent_span = static_cast(parent_span.get()); - const std::string expected_parent_id = xray_parent_span->Id(); - auto on_send = [&](const std::string& json) { + const std::string expected_parent_id = xray_parent_span->id(); + auto on_send = [xray_parent_span, expected_parent_id](const std::string& json) { ASSERT_FALSE(json.empty()); daemon::Segment s; MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); ASSERT_STREQ(expected_parent_id.c_str(), s.parent_id().c_str()); ASSERT_STREQ(expected_span_name, s.name().c_str()); ASSERT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str()); - ASSERT_STRNE(xray_parent_span->Id().c_str(), s.id().c_str()); + ASSERT_STRNE(xray_parent_span->id().c_str(), s.id().c_str()); }; EXPECT_CALL(broker, send(_)).WillOnce(Invoke(on_send)); @@ -183,6 +185,47 @@ TEST_F(XRayTracerTest, TraceIDFormatTest) { ASSERT_EQ(24, parts[2].length()); } +class XRayDaemonTest : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, XRayDaemonTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { + NiceMock server; + Network::Test::UdpSyncPeer xray_fake_daemon(GetParam()); + const std::string daemon_endpoint = xray_fake_daemon.localAddress()->asString(); + Tracer tracer{"my_segment", std::make_unique(daemon_endpoint), + server.timeSource()}; + auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), + absl::nullopt /*headers*/); + + span->setTag("http.status_code", "202"); + span->finishSpan(); + + Network::UdpRecvData datagram; + xray_fake_daemon.recv(datagram); + + const std::string header_json = R"EOF({"format":"json","version":1})EOF"; + // The UDP datagram contains two independent, consecutive JSON documents; a header and a body. + const std::string payload = datagram.buffer_->toString(); + // Make sure the payload has enough data. + ASSERT_GT(payload.length(), header_json.length()); + // Skip the header since we're only interested in the body. + const std::string body = payload.substr(header_json.length()); + + EXPECT_EQ(0, payload.find(header_json)); + + // Deserialize the body to verify it. + source::extensions::tracers::xray::daemon::Segment seg; + MessageUtil::loadFromJson(body, seg, ProtobufMessage::getNullValidationVisitor()); + EXPECT_STREQ("my_segment", seg.name().c_str()); + for (auto&& f : seg.http().request().fields()) { + // there should only be a single field + EXPECT_EQ(202, f.second.number_value()); + } +} + } // namespace } // namespace XRay } // namespace Tracers From bc2d1d383b87e26e1a6ac8042a1dc880b00624a3 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 24 Apr 2020 13:14:14 -0700 Subject: [PATCH 022/909] ci: update before purge in cleanup (#10938) Signed-off-by: Lizan Zhou --- .azure-pipelines/cleanup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh index 0a3807d56ddd..72a9bbf9fa18 100755 --- a/.azure-pipelines/cleanup.sh +++ b/.azure-pipelines/cleanup.sh @@ -3,7 +3,7 @@ set -e # Temporary script to remove tools from Azure pipelines agent to create more disk space room. - +sudo apt-get -y update sudo apt-get purge -y 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'cpp-*' dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn From 72904d17d9415f3a579fb61268807fc37495ba4d Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 24 Apr 2020 16:50:06 -0400 Subject: [PATCH 023/909] xray: expected_span_name is not captured by the lambda with MSVC (#10934) Ensure expected_span_name is captured by lamda with MSVC Signed-off-by: Sunjay Bhatia --- test/extensions/tracers/xray/tracer_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 5323122f2e9e..40191027feb0 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -111,7 +111,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { const XRay::Span* xray_parent_span = static_cast(parent_span.get()); const std::string expected_parent_id = xray_parent_span->id(); - auto on_send = [xray_parent_span, expected_parent_id](const std::string& json) { + auto on_send = [&](const std::string& json) { ASSERT_FALSE(json.empty()); daemon::Segment s; MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); From 100c95753974c31a2c3dbc035a98e5d2e0e54807 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 24 Apr 2020 16:51:53 -0400 Subject: [PATCH 024/909] compressor filter: add benchmark (#10464) * compressor filter: add benchmark I am debugging a case of slow responses possibly due to compression, so throwing in some benchmarks to get some perf data about this filter. ``` $ ./bazel-bin/test/extensions/filters/http/common/compressor/compressor_filter_speed_test 2020-03-20 00:15:07 Running ./bazel-bin/test/extensions/filters/http/common/compressor/compressor_filter_speed_test Run on (8 X 2300 MHz CPU s) CPU Caches: L1 Data 32K (x4) L1 Instruction 32K (x4) L2 Unified 262K (x4) L3 Unified 6291K (x1) Load Average: 2.17, 2.14, 2.00 ***WARNING*** Library was built as DEBUG. Timings may be affected. --------------------------------------------------------- Benchmark Time CPU Iterations --------------------------------------------------------- FilterCompress 24674140 ns 24616586 ns 29 ``` Signed-off-by: Raul Gutierrez Segales * Fix format Signed-off-by: Raul Gutierrez Segales * Replace boiler plate with macro Signed-off-by: Raul Gutierrez Segales * Test a broader combination of params Signed-off-by: Raul Gutierrez Segales * Trailing semi colon Signed-off-by: Raul Gutierrez Segales * Fixes * spelling * const-ness Signed-off-by: Raul Gutierrez Segales * Test different chunk sizes (8k, 1k) Signed-off-by: Raul Gutierrez Segales * Add test for 4k Signed-off-by: Raul Gutierrez Segales * Reuse MockStreamDecoderFilterCallbacks Turns out, instantiating this Mock takes ~6ms (!!). Signed-off-by: Raul Gutierrez Segales * Update benchmark output comment Signed-off-by: Raul Gutierrez Segales * Move 8k chunks test to the top Signed-off-by: Raul Gutierrez Segales * Update comment with 8k result Signed-off-by: Raul Gutierrez Segales * Move test buffers creation out of hot path This brings time down to < 5ms (way better!). Turns we were using a lot of time calling TestUtility::feedBufferWithRandomCharacters(). Signed-off-by: Raul Gutierrez Segales * Properly move chunks Signed-off-by: Raul Gutierrez Segales * Fixes Signed-off-by: Raul Gutierrez Segales * Format Signed-off-by: Raul Gutierrez Segales * clang-tidy Signed-off-by: Raul Gutierrez Segales * Add test for 16kb sized chunk Per: https://github.com/envoyproxy/envoy/blob/master/source/common/network/raw_buffer_socket.cc#L21 Envoy reads 16kb at a time from upstream. Signed-off-by: Raul Gutierrez Segales * Use manual timings to skip random data generation Signed-off-by: Raul Gutierrez Segales * Save resuls, useful for debugging Signed-off-by: Raul Gutierrez Segales * Use the same input for all tests This actually makes the difference between different chunk size mostly dissapear. I suspect the previous differences were due to chunks being the same, since the seed for the random data isn't changing. Signed-off-by: Raul Gutierrez Segales * Fix format Signed-off-by: Raul Gutierrez Segales --- .../filters/http/common/compressor/BUILD | 21 ++ .../compressor_filter_speed_test.cc | 295 ++++++++++++++++++ 2 files changed, 316 insertions(+) create mode 100644 test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index b03a3cf39122..688f9164d800 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -3,6 +3,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", + "envoy_cc_test_binary", "envoy_package", ) @@ -21,3 +22,23 @@ envoy_cc_test( "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", ], ) + +envoy_cc_test_binary( + name = "compressor_filter_speed_test", + srcs = ["compressor_filter_speed_test.cc"], + external_deps = [ + "benchmark", + "googletest", + ], + deps = [ + "//source/common/compressor:compressor_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http/common/compressor:compressor_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:printers_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc new file mode 100644 index 000000000000..54103190e308 --- /dev/null +++ b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc @@ -0,0 +1,295 @@ +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" + +#include "common/compressor/zlib_compressor_impl.h" + +#include "extensions/filters/http/common/compressor/compressor.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" + +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Common { +namespace Compressors { + +class MockCompressorFilterConfig : public CompressorFilterConfig { +public: + MockCompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + const std::string& compressor_name, + Envoy::Compressor::ZlibCompressorImpl::CompressionLevel level, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, int64_t window_bits, + uint64_t memory_level) + : CompressorFilterConfig(compressor, stats_prefix + compressor_name + ".", scope, runtime, + compressor_name), + level_(level), strategy_(strategy), window_bits_(window_bits), memory_level_(memory_level) { + } + + std::unique_ptr makeCompressor() override { + auto compressor = std::make_unique(); + compressor->init(level_, strategy_, window_bits_, memory_level_); + return compressor; + } + + const Envoy::Compressor::ZlibCompressorImpl::CompressionLevel level_; + const Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy strategy_; + const int64_t window_bits_; + const uint64_t memory_level_; +}; + +using CompressionParams = + std::tuple; + +static constexpr uint64_t TestDataSize = 122880; + +Buffer::OwnedImpl generateTestData() { + Buffer::OwnedImpl data; + TestUtility::feedBufferWithRandomCharacters(data, TestDataSize); + return data; +} + +const Buffer::OwnedImpl& testData() { + CONSTRUCT_ON_FIRST_USE(Buffer::OwnedImpl, generateTestData()); +} + +static std::vector generateChunks(const uint64_t chunk_count, + const uint64_t chunk_size) { + std::vector vec; + vec.reserve(chunk_count); + + const auto& test_data = testData(); + uint64_t added = 0; + + for (uint64_t i = 0; i < chunk_count; ++i) { + Buffer::OwnedImpl chunk; + std::unique_ptr data(new char[chunk_size]); + + test_data.copyOut(added, chunk_size, data.get()); + chunk.add(absl::string_view(data.get(), chunk_size)); + vec.push_back(std::move(chunk)); + + added += chunk_size; + } + + return vec; +} + +struct Result { + uint64_t total_uncompressed_bytes = 0; + uint64_t total_compressed_bytes = 0; +}; + +static Result compressWith(std::vector&& chunks, CompressionParams params, + NiceMock& decoder_callbacks, + benchmark::State& state) { + auto start = std::chrono::high_resolution_clock::now(); + Stats::IsolatedStoreImpl stats; + testing::NiceMock runtime; + envoy::extensions::filters::http::compressor::v3::Compressor compressor; + + const auto level = std::get<0>(params); + const auto strategy = std::get<1>(params); + const auto window_bits = std::get<2>(params); + const auto memory_level = std::get<3>(params); + CompressorFilterConfigSharedPtr config = std::make_shared( + compressor, "test.", stats, runtime, "gzip", level, strategy, window_bits, memory_level); + + ON_CALL(runtime.snapshot_, featureEnabled("test.filter_enabled", 100)) + .WillByDefault(Return(true)); + + auto filter = std::make_unique(config); + filter->setDecoderFilterCallbacks(decoder_callbacks); + + Http::TestRequestHeaderMapImpl headers = {{":method", "get"}, {"accept-encoding", "gzip"}}; + filter->decodeHeaders(headers, false); + + Http::TestResponseHeaderMapImpl response_headers = { + {":method", "get"}, + {"content-length", "122880"}, + {"content-type", "application/json;charset=utf-8"}}; + filter->encodeHeaders(response_headers, false); + + uint64_t idx = 0; + Result res; + for (auto& data : chunks) { + res.total_uncompressed_bytes += data.length(); + + if (idx == (chunks.size() - 1)) { + filter->encodeData(data, true); + } else { + filter->encodeData(data, false); + } + + res.total_compressed_bytes += data.length(); + ++idx; + } + + EXPECT_EQ(res.total_uncompressed_bytes, + stats.counterFromString("test.gzip.total_uncompressed_bytes").value()); + EXPECT_EQ(res.total_compressed_bytes, + stats.counterFromString("test.gzip.total_compressed_bytes").value()); + + EXPECT_EQ(1U, stats.counterFromString("test.gzip.compressed").value()); + auto end = std::chrono::high_resolution_clock::now(); + const auto elapsed = std::chrono::duration_cast>(end - start); + state.SetIterationTime(elapsed.count()); + + return res; +} + +// SPELLCHECKER(off) +/* +Running ./bazel-bin/test/extensions/filters/http/common/compressor/compressor_filter_speed_test +Run on (8 X 2300 MHz CPU s) +CPU Caches: +L1 Data 32K (x4) +L1 Instruction 32K (x4) +L2 Unified 262K (x4) +L3 Unified 6291K (x1) +Load Average: 1.82, 1.72, 1.74 +***WARNING*** Library was built as DEBUG. Timings may be affected. +------------------------------------------------------------ +Benchmark Time CPU Iterations +------------------------------------------------------------ +.... +compressFull/0/manual_time 14.1 ms 14.3 ms 48 +compressFull/1/manual_time 7.06 ms 7.22 ms 104 +compressFull/2/manual_time 5.17 ms 5.33 ms 123 +compressFull/3/manual_time 15.4 ms 15.5 ms 45 +compressFull/4/manual_time 10.1 ms 10.3 ms 69 +compressFull/5/manual_time 15.8 ms 16.0 ms 40 +compressFull/6/manual_time 15.3 ms 15.5 ms 42 +compressFull/7/manual_time 9.91 ms 10.1 ms 71 +compressFull/8/manual_time 15.8 ms 16.0 ms 45 +compressChunks16384/0/manual_time 13.4 ms 13.5 ms 52 +compressChunks16384/1/manual_time 6.33 ms 6.48 ms 111 +compressChunks16384/2/manual_time 5.09 ms 5.27 ms 147 +compressChunks16384/3/manual_time 15.1 ms 15.3 ms 46 +compressChunks16384/4/manual_time 9.61 ms 9.78 ms 71 +compressChunks16384/5/manual_time 14.5 ms 14.6 ms 47 +compressChunks16384/6/manual_time 14.0 ms 14.1 ms 48 +compressChunks16384/7/manual_time 9.20 ms 9.36 ms 76 +compressChunks16384/8/manual_time 14.5 ms 14.6 ms 48 +compressChunks8192/0/manual_time 14.3 ms 14.5 ms 50 +compressChunks8192/1/manual_time 6.80 ms 6.96 ms 100 +compressChunks8192/2/manual_time 5.21 ms 5.36 ms 135 +compressChunks8192/3/manual_time 14.9 ms 15.0 ms 47 +compressChunks8192/4/manual_time 9.71 ms 9.87 ms 68 +compressChunks8192/5/manual_time 15.9 ms 16.1 ms 45 +.... +*/ +// SPELLCHECKER(on) + +static std::vector compression_params = { + // Speed + Standard + Small Window + Low mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Speed + Standard + Med window + Med mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Speed + Standard + Big window + High mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + + // Standard + Standard + Small window + Low mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Standard + Standard + Med window + Med mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Standard + Standard + High window + High mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + + // Best + Standard + Small window + Low mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + + // Best + Standard + Med window + Med mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + + // Best + Standard + High window + High mem level + {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}}; + +static void compressFull(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(1, 122880); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressFull)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks16384(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(7, 16384); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks16384)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks8192(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(15, 8192); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks8192)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks4096(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(30, 4096); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks4096)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +static void compressChunks1024(benchmark::State& state) { + NiceMock decoder_callbacks; + const auto idx = state.range(0); + const auto& params = compression_params[idx]; + + for (auto _ : state) { + std::vector chunks = generateChunks(120, 1024); + compressWith(std::move(chunks), params, decoder_callbacks, state); + } +} +BENCHMARK(compressChunks1024)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchmark::kMillisecond); + +} // namespace Compressors +} // namespace Common +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy + +BENCHMARK_MAIN(); From 09d3d0076a62715e69f7226d5b829d46e7c6397e Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Fri, 24 Apr 2020 14:43:10 -0700 Subject: [PATCH 025/909] path: Fix merge slash for paths ending with slash and present query args (#10922) Order of path suffix and query string was wrong, so the ending slash was moved to a query. Tests did not cover this scenario so add a new one. Signed-off-by: Ruslan Nigmatullin --- docs/root/version_history/current.rst | 1 + source/common/http/path_utility.cc | 9 +++++---- test/common/http/path_utility_test.cc | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f587abbb4aaa..23614a066953 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -13,6 +13,7 @@ Changes Disabled by default and can be enabled via :ref:`enable_upstream_stats `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index a9a905d44340..b1e5b60986b5 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -64,10 +64,11 @@ void PathUtil::mergeSlashes(RequestHeaderMap& headers) { if (path.find("//") == absl::string_view::npos) { return; } - const absl::string_view prefix = absl::StartsWith(path, "/") ? "/" : absl::string_view(); - const absl::string_view suffix = absl::EndsWith(path, "/") ? "/" : absl::string_view(); - headers.setPath(absl::StrCat( - prefix, absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), query, suffix)); + const absl::string_view path_prefix = absl::StartsWith(path, "/") ? "/" : absl::string_view(); + const absl::string_view path_suffix = absl::EndsWith(path, "/") ? "/" : absl::string_view(); + headers.setPath(absl::StrCat(path_prefix, + absl::StrJoin(absl::StrSplit(path, '/', absl::SkipEmpty()), "/"), + path_suffix, query)); } absl::string_view PathUtil::removeQueryAndFragment(const absl::string_view path) { diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc index 0cd17e324c6d..bcf93f76d349 100644 --- a/test/common/http/path_utility_test.cc +++ b/test/common/http/path_utility_test.cc @@ -105,6 +105,7 @@ TEST_F(PathUtilityTest, MergeSlashes) { EXPECT_EQ("/a/b/c", mergeSlashes("/a////b/c")); // quadruple / in the middle EXPECT_EQ("/a/b?a=///c", mergeSlashes("/a//b?a=///c")); // slashes in the query are ignored EXPECT_EQ("/a/b?", mergeSlashes("/a//b?")); // empty query + EXPECT_EQ("/a/?b", mergeSlashes("//a/?b")); // ends with slash + query } TEST_F(PathUtilityTest, RemoveQueryAndFragment) { From 710306ef02948506156dfc730d83aeb0481775de Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Fri, 24 Apr 2020 17:44:25 -0400 Subject: [PATCH 026/909] status: Fix ASAN error in Status payload handling (#10906) Signed-off-by: Yan Avlasov --- bazel/repository_locations.bzl | 8 ++++---- source/common/http/status.cc | 36 ++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 58a5c8878608..d497f5bede6b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -43,10 +43,10 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-docs/fips/boringssl-66005f41fbc3529ffe8d007708756720529da20d.tar.xz"], ), com_google_absl = dict( - sha256 = "2693730730247afb0e7cb2d41664ac2af3ad75c79944efd266be40ba944179b9", - strip_prefix = "abseil-cpp-06f0e767d13d4d68071c4fc51e25724e0fc8bc74", - # 2020-03-03 - urls = ["https://github.com/abseil/abseil-cpp/archive/06f0e767d13d4d68071c4fc51e25724e0fc8bc74.tar.gz"], + sha256 = "14ee08e2089c2a9b6bf27e1d10abc5629c69c4d0bab4b78ec5b65a29ea1c2af7", + strip_prefix = "abseil-cpp-cf3a1998e9d41709d4141e2f13375993cba1130e", + # 2020-03-05 + urls = ["https://github.com/abseil/abseil-cpp/archive/cf3a1998e9d41709d4141e2f13375993cba1130e.tar.gz"], ), com_github_apache_thrift = dict( sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b", diff --git a/source/common/http/status.cc b/source/common/http/status.cc index 74c38d82d145..d2b4ea122d35 100644 --- a/source/common/http/status.cc +++ b/source/common/http/status.cc @@ -39,17 +39,27 @@ struct PrematureResponsePayload : public EnvoyStatusPayload { }; template void storePayload(absl::Status& status, const T& payload) { - status.SetPayload( - EnvoyPayloadUrl, - absl::Cord(absl::string_view(reinterpret_cast(&payload), sizeof(payload)))); + absl::Cord cord(absl::string_view(reinterpret_cast(&payload), sizeof(payload))); + cord.Flatten(); // Flatten ahead of time for easier access later. + status.SetPayload(EnvoyPayloadUrl, std::move(cord)); } -template const T* getPayload(const absl::Status& status) { - auto payload = status.GetPayload(EnvoyPayloadUrl); - ASSERT(payload.has_value(), "Must have payload"); - auto data = payload.value().Flatten(); - ASSERT(data.length() >= sizeof(T), "Invalid payload length"); - return reinterpret_cast(data.data()); +template const T& getPayload(const absl::Status& status) { + // The only way to get a reference to the payload owned by the absl::Status is through the + // ForEachPayload method. All other methods create a copy of the payload, which is not convenient + // for peeking at the payload value. + const T* payload = nullptr; + status.ForEachPayload([&payload](absl::string_view url, const absl::Cord& cord) { + if (url == EnvoyPayloadUrl) { + ASSERT(!payload); // Status API guarantees to have one payload with given URL + auto data = cord.TryFlat(); + ASSERT(data.has_value()); // EnvoyPayloadUrl cords are flattened ahead of time + ASSERT(data.value().length() >= sizeof(T), "Invalid payload length"); + payload = reinterpret_cast(data.value().data()); + } + }); + ASSERT(payload); + return *payload; } } // namespace @@ -96,7 +106,7 @@ Status codecClientError(absl::string_view message) { // Methods for checking and extracting error information StatusCode getStatusCode(const Status& status) { - return status.ok() ? StatusCode::Ok : getPayload(status)->status_code_; + return status.ok() ? StatusCode::Ok : getPayload(status).status_code_; } bool isCodecProtocolError(const Status& status) { @@ -112,10 +122,10 @@ bool isPrematureResponseError(const Status& status) { } Http::Code getPrematureResponseHttpCode(const Status& status) { - const auto* payload = getPayload(status); - ASSERT(payload->status_code_ == StatusCode::PrematureResponseError, + const auto& payload = getPayload(status); + ASSERT(payload.status_code_ == StatusCode::PrematureResponseError, "Must be PrematureResponseError"); - return payload->http_code_; + return payload.http_code_; } bool isCodecClientError(const Status& status) { From 6516c8820c48e52b60235af2f69e330d8a3da4fd Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 24 Apr 2020 16:07:46 -0700 Subject: [PATCH 027/909] prometheus stats: Correctly group lines of the same metric name. (#10833) The prometheus exposition format requires all metrics of the same name (without tags) be grouped contiguously in the output. Additionally, it specifies that it is preferred for the stats to be output in the same order every time they are produced. Fix Envoy to comply with both of these constraints by taking an extra pass to collect all the stats so that they can be sorted before the final output is generated. Fixes #10073 Signed-off-by: Greg Greenway --- docs/root/version_history/current.rst | 1 + source/server/http/BUILD | 12 + source/server/http/prometheus_stats.cc | 218 +++++++ source/server/http/prometheus_stats.h | 41 ++ source/server/http/stats_handler.cc | 105 +--- source/server/http/stats_handler.h | 45 -- test/mocks/stats/mocks.h | 13 + test/server/http/BUILD | 9 + test/server/http/prometheus_stats_test.cc | 667 ++++++++++++++++++++++ test/server/http/stats_handler_test.cc | 437 -------------- tools/code_format/check_format.py | 3 +- 11 files changed, 964 insertions(+), 587 deletions(-) create mode 100644 source/server/http/prometheus_stats.cc create mode 100644 source/server/http/prometheus_stats.h create mode 100644 test/server/http/prometheus_stats_test.cc diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 23614a066953..b2cc90a89d19 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -21,6 +21,7 @@ Changes * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. +* prometheus stats: fix the sort order of output lines to comply with the standard. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. diff --git a/source/server/http/BUILD b/source/server/http/BUILD index dbbb828c8930..7d46ea39d930 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -90,6 +90,7 @@ envoy_cc_library( srcs = ["stats_handler.cc"], hdrs = ["stats_handler.h"], deps = [ + ":prometheus_stats_lib", ":utils_lib", "//include/envoy/http:codes_interface", "//include/envoy/server:admin_interface", @@ -102,6 +103,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "prometheus_stats_lib", + srcs = ["prometheus_stats.cc"], + hdrs = ["prometheus_stats.h"], + deps = [ + ":utils_lib", + "//source/common/buffer:buffer_lib", + "//source/common/stats:histogram_lib", + ], +) + envoy_cc_library( name = "utils_lib", srcs = ["utils.cc"], diff --git a/source/server/http/prometheus_stats.cc b/source/server/http/prometheus_stats.cc new file mode 100644 index 000000000000..e04edeccf9cf --- /dev/null +++ b/source/server/http/prometheus_stats.cc @@ -0,0 +1,218 @@ +#include "server/http/prometheus_stats.h" + +#include "common/common/empty_string.h" +#include "common/stats/histogram_impl.h" + +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Server { + +namespace { + +const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } + +/** + * Take a string and sanitize it according to Prometheus conventions. + */ +std::string sanitizeName(const std::string& name) { + // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by + // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. + std::string stats_name = std::regex_replace(name, promRegex(), "_"); + if (stats_name[0] >= '0' && stats_name[0] <= '9') { + return absl::StrCat("_", stats_name); + } else { + return stats_name; + } +} + +/* + * Determine whether a metric has never been emitted and choose to + * not show it if we only wanted used metrics. + */ +template +static bool shouldShowMetric(const StatType& metric, const bool used_only, + const absl::optional& regex) { + return ((!used_only || metric.used()) && + (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); +} + +/* + * Comparator for Stats::Metric that does not require a string representation + * to make the comparison, for memory efficiency. + */ +struct MetricLessThan { + bool operator()(const Stats::Metric* a, const Stats::Metric* b) const { + ASSERT(&a->constSymbolTable() == &b->constSymbolTable()); + return a->constSymbolTable().lessThan(a->statName(), b->statName()); + } +}; + +/** + * Processes a stat type (counter, gauge, histogram) by generating all output lines, sorting + * them by tag-extracted metric name, and then outputting them in the correct sorted order into + * response. + * + * @param response The buffer to put the output into. + * @param used_only Whether to only output stats that are used. + * @param regex A filter on which stats to output. + * @param metrics The metrics to output stats for. This must contain all stats of the given type + * to be included in the same output. + * @param generate_output A function which returns the output text for this metric. + * @param type The name of the prometheus metric type for used in TYPE annotations. + */ +template +uint64_t outputStatType( + Buffer::Instance& response, const bool used_only, const absl::optional& regex, + const std::vector>& metrics, + const std::function& generate_output, + absl::string_view type) { + + /* + * From + * https:*github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#grouping-and-sorting: + * + * All lines for a given metric must be provided as one single group, with the optional HELP and + * TYPE lines first (in no particular order). Beyond that, reproducible sorting in repeated + * expositions is preferred but not required, i.e. do not sort if the computational cost is + * prohibitive. + */ + + // This is an unsorted collection of dumb-pointers (no need to increment then decrement every + // refcount; ownership is held throughout by `metrics`). It is unsorted for efficiency, but will + // be sorted before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will be + // consistent across calls. + using StatTypeUnsortedCollection = std::vector; + + // Return early to avoid crashing when getting the symbol table from the first metric. + if (metrics.empty()) { + return 0; + } + + // There should only be one symbol table for all of the stats in the admin + // interface. If this assumption changes, the name comparisons in this function + // will have to change to compare to convert all StatNames to strings before + // comparison. + const Stats::SymbolTable& global_symbol_table = metrics.front()->constSymbolTable(); + + // Sorted collection of metrics sorted by their tagExtractedName, to satisfy the requirements + // of the exposition format. + std::map groups( + global_symbol_table); + + for (const auto& metric : metrics) { + ASSERT(&global_symbol_table == &metric->constSymbolTable()); + + if (!shouldShowMetric(*metric, used_only, regex)) { + continue; + } + + groups[metric->tagExtractedStatName()].push_back(metric.get()); + } + + for (auto& group : groups) { + const std::string prefixed_tag_extracted_name = + PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first)); + response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); + + // Sort before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will + // be consistent across calls. + std::sort(group.second.begin(), group.second.end(), MetricLessThan()); + + for (const auto& metric : group.second) { + response.add(generate_output(*metric, prefixed_tag_extracted_name)); + } + response.add("\n"); + } + return groups.size(); +} + +/* + * Return the prometheus output for a numeric Stat (Counter or Gauge). + */ +template +std::string generateNumericOutput(const StatType& metric, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(metric.tags()); + return fmt::format("{0}{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, metric.value()); +} + +/* + * Returns the prometheus output for a histogram. The output is a multi-line string (with embedded + * newlines) that contains all the individual bucket counts and sum/count for a single histogram + * (metric_name plus all tags). + */ +std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(histogram.tags()); + const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + ","); + + const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics(); + const std::vector& supported_buckets = stats.supportedBuckets(); + const std::vector& computed_buckets = stats.computedBuckets(); + std::string output; + for (size_t i = 0; i < supported_buckets.size(); ++i) { + double bucket = supported_buckets[i]; + uint64_t value = computed_buckets[i]; + // We want to print the bucket in a fixed point (non-scientific) format. The fmt library + // doesn't have a specific modifier to format as a fixed-point value only so we use the + // 'g' operator which prints the number in general fixed point format or scientific format + // with precision 50 to round the number up to 32 significant digits in fixed point format + // which should cover pretty much all cases + output.append(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", prefixed_tag_extracted_name, + hist_tags, bucket, value)); + } + + output.append(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", prefixed_tag_extracted_name, + hist_tags, stats.sampleCount())); + output.append(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", prefixed_tag_extracted_name, tags, + stats.sampleSum())); + output.append(fmt::format("{0}_count{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, + stats.sampleCount())); + + return output; +}; + +} // namespace + +std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { + std::vector buf; + buf.reserve(tags.size()); + for (const Stats::Tag& tag : tags) { + buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); + } + return absl::StrJoin(buf, ","); +} + +std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { + // Add namespacing prefix to avoid conflicts, as per best practice: + // https://prometheus.io/docs/practices/naming/#metric-names + // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ + return sanitizeName(fmt::format("envoy_{0}", extracted_name)); +} + +// TODO(efimki): Add support of text readouts stats. +uint64_t PrometheusStatsFormatter::statsAsPrometheus( + const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, Buffer::Instance& response, + const bool used_only, const absl::optional& regex) { + + uint64_t metric_name_count = 0; + metric_name_count += outputStatType( + response, used_only, regex, counters, generateNumericOutput, "counter"); + + metric_name_count += outputStatType(response, used_only, regex, gauges, + generateNumericOutput, "gauge"); + + metric_name_count += outputStatType( + response, used_only, regex, histograms, generateHistogramOutput, "histogram"); + + return metric_name_count; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/prometheus_stats.h b/source/server/http/prometheus_stats.h new file mode 100644 index 000000000000..e748d051d47b --- /dev/null +++ b/source/server/http/prometheus_stats.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/stats/histogram.h" +#include "envoy/stats/stats.h" + +namespace Envoy { +namespace Server { +/** + * Formatter for metric/labels exported to Prometheus. + * + * See: https://prometheus.io/docs/concepts/data_model + */ +class PrometheusStatsFormatter { +public: + /** + * Extracts counters and gauges and relevant tags, appending them to + * the response buffer after sanitizing the metric / label names. + * @return uint64_t total number of metric types inserted in response. + */ + static uint64_t statsAsPrometheus(const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, + Buffer::Instance& response, const bool used_only, + const absl::optional& regex); + /** + * Format the given tags, returning a string as a comma-separated list + * of ="" pairs. + */ + static std::string formattedTags(const std::vector& tags); + /** + * Format the given metric name, prefixed with "envoy_". + */ + static std::string metricName(const std::string& extracted_name); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/stats_handler.cc b/source/server/http/stats_handler.cc index 0d37267cf66f..a437bd2ac395 100644 --- a/source/server/http/stats_handler.cc +++ b/source/server/http/stats_handler.cc @@ -5,6 +5,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" +#include "server/http/prometheus_stats.h" #include "server/http/utils.h" namespace Envoy { @@ -12,10 +13,6 @@ namespace Server { const uint64_t RecentLookupsCapacity = 100; -namespace { -const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } -} // namespace - Http::Code StatsHandler::handlerResetCounters(absl::string_view, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&, Server::Instance& server) { @@ -157,106 +154,6 @@ Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query return Http::Code::OK; } -std::string PrometheusStatsFormatter::sanitizeName(const std::string& name) { - // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by - // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. - std::string stats_name = std::regex_replace(name, promRegex(), "_"); - if (stats_name[0] >= '0' && stats_name[0] <= '9') { - return absl::StrCat("_", stats_name); - } else { - return stats_name; - } -} - -std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { - std::vector buf; - buf.reserve(tags.size()); - for (const Stats::Tag& tag : tags) { - buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); - } - return absl::StrJoin(buf, ","); -} - -std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { - // Add namespacing prefix to avoid conflicts, as per best practice: - // https://prometheus.io/docs/practices/naming/#metric-names - // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ - return sanitizeName(fmt::format("envoy_{0}", extracted_name)); -} - -// TODO(efimki): Add support of text readouts stats. -uint64_t PrometheusStatsFormatter::statsAsPrometheus( - const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, Buffer::Instance& response, - const bool used_only, const absl::optional& regex) { - std::unordered_set metric_type_tracker; - for (const auto& counter : counters) { - if (!shouldShowMetric(*counter, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(counter->tags()); - const std::string metric_name = metricName(counter->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} counter\n", metric_name)); - } - response.add(fmt::format("{0}{{{1}}} {2}\n", metric_name, tags, counter->value())); - } - - for (const auto& gauge : gauges) { - if (!shouldShowMetric(*gauge, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(gauge->tags()); - const std::string metric_name = metricName(gauge->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} gauge\n", metric_name)); - } - response.add(fmt::format("{0}{{{1}}} {2}\n", metric_name, tags, gauge->value())); - } - - for (const auto& histogram : histograms) { - if (!shouldShowMetric(*histogram, used_only, regex)) { - continue; - } - - const std::string tags = formattedTags(histogram->tags()); - const std::string hist_tags = histogram->tags().empty() ? EMPTY_STRING : (tags + ","); - - const std::string metric_name = metricName(histogram->tagExtractedName()); - if (metric_type_tracker.find(metric_name) == metric_type_tracker.end()) { - metric_type_tracker.insert(metric_name); - response.add(fmt::format("# TYPE {0} histogram\n", metric_name)); - } - - const Stats::HistogramStatistics& stats = histogram->cumulativeStatistics(); - const std::vector& supported_buckets = stats.supportedBuckets(); - const std::vector& computed_buckets = stats.computedBuckets(); - for (size_t i = 0; i < supported_buckets.size(); ++i) { - double bucket = supported_buckets[i]; - uint64_t value = computed_buckets[i]; - // We want to print the bucket in a fixed point (non-scientific) format. The fmt library - // doesn't have a specific modifier to format as a fixed-point value only so we use the - // 'g' operator which prints the number in general fixed point format or scientific format - // with precision 50 to round the number up to 32 significant digits in fixed point format - // which should cover pretty much all cases - response.add(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", metric_name, hist_tags, - bucket, value)); - } - - response.add(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", metric_name, hist_tags, - stats.sampleCount())); - response.add(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", metric_name, tags, stats.sampleSum())); - response.add(fmt::format("{0}_count{{{1}}} {2}\n", metric_name, tags, stats.sampleCount())); - } - - return metric_type_tracker.size(); -} - std::string StatsHandler::statsAsJson(const std::map& all_stats, const std::map& text_readouts, diff --git a/source/server/http/stats_handler.h b/source/server/http/stats_handler.h index ad4272c1d193..4103660689a3 100644 --- a/source/server/http/stats_handler.h +++ b/source/server/http/stats_handler.h @@ -66,50 +66,5 @@ class StatsHandler { bool pretty_print = false); }; -/** - * Formatter for metric/labels exported to Prometheus. - * - * See: https://prometheus.io/docs/concepts/data_model - */ -class PrometheusStatsFormatter { -public: - /** - * Extracts counters and gauges and relevant tags, appending them to - * the response buffer after sanitizing the metric / label names. - * @return uint64_t total number of metric types inserted in response. - */ - static uint64_t statsAsPrometheus(const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, - Buffer::Instance& response, const bool used_only, - const absl::optional& regex); - /** - * Format the given tags, returning a string as a comma-separated list - * of ="" pairs. - */ - static std::string formattedTags(const std::vector& tags); - /** - * Format the given metric name, prefixed with "envoy_". - */ - static std::string metricName(const std::string& extracted_name); - -private: - /** - * Take a string and sanitize it according to Prometheus conventions. - */ - static std::string sanitizeName(const std::string& name); - - /* - * Determine whether a metric has never been emitted and choose to - * not show it if we only wanted used metrics. - */ - template - static bool shouldShowMetric(const StatType& metric, const bool used_only, - const absl::optional& regex) { - return ((!used_only || metric.used()) && - (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); - } -}; - } // namespace Server } // namespace Envoy diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 06a81e7c8cf5..17491916545f 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -110,12 +110,25 @@ template class MockMetric : public BaseClass { void setTags(const TagVector& tags) { tag_pool_.clear(); + tag_names_and_values_.clear(); tags_ = tags; for (const Tag& tag : tags) { tag_names_and_values_.push_back(tag_pool_.add(tag.name_)); tag_names_and_values_.push_back(tag_pool_.add(tag.value_)); } } + + void setTags(const Stats::StatNameTagVector& tags) { + tag_pool_.clear(); + tag_names_and_values_.clear(); + tags_.clear(); + for (const StatNameTag& tag : tags) { + tag_names_and_values_.push_back(tag.first); + tag_names_and_values_.push_back(tag.second); + tags_.push_back(Tag{symbol_table_->toString(tag.first), symbol_table_->toString(tag.second)}); + } + } + void addTag(const Tag& tag) { tags_.emplace_back(tag); tag_names_and_values_.push_back(tag_pool_.add(tag.name_)); diff --git a/test/server/http/BUILD b/test/server/http/BUILD index 51d73c734688..9a706a07cc6b 100644 --- a/test/server/http/BUILD +++ b/test/server/http/BUILD @@ -73,6 +73,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "prometheus_stats_test", + srcs = ["prometheus_stats_test.cc"], + deps = [ + "//source/server/http:prometheus_stats_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], diff --git a/test/server/http/prometheus_stats_test.cc b/test/server/http/prometheus_stats_test.cc new file mode 100644 index 000000000000..fb6f16b958a9 --- /dev/null +++ b/test/server/http/prometheus_stats_test.cc @@ -0,0 +1,667 @@ +#include + +#include "server/http/prometheus_stats.h" + +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Server { + +class HistogramWrapper { +public: + HistogramWrapper() : histogram_(hist_alloc()) {} + + ~HistogramWrapper() { hist_free(histogram_); } + + const histogram_t* getHistogram() { return histogram_; } + + void setHistogramValues(const std::vector& values) { + for (uint64_t value : values) { + hist_insert_intscale(histogram_, value, 0, 1); + } + } + + void setHistogramValuesWithCounts(const std::vector>& values) { + for (std::pair cv : values) { + hist_insert_intscale(histogram_, cv.first, 0, cv.second); + } + } + +private: + histogram_t* histogram_; +}; + +class PrometheusStatsFormatterTest : public testing::Test { +protected: + PrometheusStatsFormatterTest() + : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), + pool_(*symbol_table_) {} + + ~PrometheusStatsFormatterTest() override { clearStorage(); } + + void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + counters_.push_back(alloc_.makeCounter(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags)); + } + + void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + gauges_.push_back(alloc_.makeGauge(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags, + Stats::Gauge::ImportMode::Accumulate)); + } + + using MockHistogramSharedPtr = Stats::RefcountPtr>; + void addHistogram(MockHistogramSharedPtr histogram) { histograms_.push_back(histogram); } + + MockHistogramSharedPtr makeHistogram(const std::string& name, + Stats::StatNameTagVector cluster_tags) { + auto histogram = MockHistogramSharedPtr(new NiceMock()); + histogram->name_ = baseName(name, cluster_tags); + histogram->setTagExtractedName(name); + histogram->setTags(cluster_tags); + histogram->used_ = true; + return histogram; + } + + Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } + + // Format tags into the name to create a unique stat_name for each name:tag combination. + // If the same stat_name is passed to makeGauge() or makeCounter(), even with different + // tags, a copy of the previous metric will be returned. + std::string baseName(const std::string& name, Stats::StatNameTagVector cluster_tags) { + std::string result = name; + for (const auto& name_tag : cluster_tags) { + result.append(fmt::format("<{}:{}>", symbol_table_->toString(name_tag.first), + symbol_table_->toString(name_tag.second))); + } + return result; + } + + void clearStorage() { + pool_.clear(); + counters_.clear(); + gauges_.clear(); + histograms_.clear(); + EXPECT_EQ(0, symbol_table_->numSymbols()); + } + + Stats::SymbolTablePtr symbol_table_; + Stats::AllocatorImpl alloc_; + Stats::StatNamePool pool_; + std::vector counters_; + std::vector gauges_; + std::vector histograms_; +}; + +TEST_F(PrometheusStatsFormatterTest, MetricName) { + std::string raw = "vulture.eats-liver"; + std::string expected = "envoy_vulture_eats_liver"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { + std::string raw = "An.artist.plays-violin@019street"; + std::string expected = "envoy_An_artist_plays_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { + std::string raw = "3.artists.play-violin@019street"; + std::string expected = "envoy_3_artists_play_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, FormattedTags) { + std::vector tags; + Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; + Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; + tags.push_back(tag1); + tags.push_back(tag2); + std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; + auto actual = PrometheusStatsFormatter::formattedTags(tags); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { + + // Create two counters and two gauges with each pair having the same name, + // but having different tag names and values. + //`statsAsPrometheus()` should return two implying it found two unique stat names + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(2UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { + + // Create two counters and two gauges, all with unique names. + // statsAsPrometheus() should return four implying it found + // four unique stat names. + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(4UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()) + .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 0 +envoy_histogram1_bucket{le="10"} 0 +envoy_histogram1_bucket{le="25"} 0 +envoy_histogram1_bucket{le="50"} 0 +envoy_histogram1_bucket{le="100"} 0 +envoy_histogram1_bucket{le="250"} 0 +envoy_histogram1_bucket{le="500"} 0 +envoy_histogram1_bucket{le="1000"} 0 +envoy_histogram1_bucket{le="2500"} 0 +envoy_histogram1_bucket{le="5000"} 0 +envoy_histogram1_bucket{le="10000"} 0 +envoy_histogram1_bucket{le="30000"} 0 +envoy_histogram1_bucket{le="60000"} 0 +envoy_histogram1_bucket{le="300000"} 0 +envoy_histogram1_bucket{le="600000"} 0 +envoy_histogram1_bucket{le="1800000"} 0 +envoy_histogram1_bucket{le="3600000"} 0 +envoy_histogram1_bucket{le="+Inf"} 0 +envoy_histogram1_sum{} 0 +envoy_histogram1_count{} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + HistogramWrapper h1_cumulative; + + // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. + h1_cumulative.setHistogramValuesWithCounts(std::vector>({ + {1, 100000}, + {100, 1000000}, + {1000, 100000000}, + })); + + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()) + .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 100000 +envoy_histogram1_bucket{le="10"} 100000 +envoy_histogram1_bucket{le="25"} 100000 +envoy_histogram1_bucket{le="50"} 100000 +envoy_histogram1_bucket{le="100"} 100000 +envoy_histogram1_bucket{le="250"} 1100000 +envoy_histogram1_bucket{le="500"} 1100000 +envoy_histogram1_bucket{le="1000"} 1100000 +envoy_histogram1_bucket{le="2500"} 101100000 +envoy_histogram1_bucket{le="5000"} 101100000 +envoy_histogram1_bucket{le="10000"} 101100000 +envoy_histogram1_bucket{le="30000"} 101100000 +envoy_histogram1_bucket{le="60000"} 101100000 +envoy_histogram1_bucket{le="300000"} 101100000 +envoy_histogram1_bucket{le="600000"} 101100000 +envoy_histogram1_bucket{le="1800000"} 101100000 +envoy_histogram1_bucket{le="3600000"} 101100000 +envoy_histogram1_bucket{le="+Inf"} 101100000 +envoy_histogram1_sum{} 105105105000 +envoy_histogram1_count{} 101100000 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(5UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +# TYPE envoy_cluster_test_2_upstream_cx_total counter +envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 + +# TYPE envoy_cluster_test_3_upstream_cx_total gauge +envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 + +# TYPE envoy_cluster_test_4_upstream_cx_total gauge +envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 + +# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +// Test that output groups all metrics of the same name (with different tags) together, +// as required by the Prometheus exposition format spec. Additionally, groups of metrics +// should be sorted by their tags; the format specifies that it is preferred that metrics +// are always grouped in the same order, and sorting is an easy way to ensure this. +TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + // Create the 3 clusters in non-sorted order to exercise the sorting. + // Create two of each metric type (counter, gauge, histogram) so that + // the output for each needs to be collected together. + for (const char* cluster : {"ccc", "aaa", "bbb"}) { + const Stats::StatNameTagVector tags{{makeStat("cluster"), makeStat(cluster)}}; + addCounter("cluster.upstream_cx_total", tags); + addCounter("cluster.upstream_cx_connect_fail", tags); + addGauge("cluster.upstream_cx_active", tags); + addGauge("cluster.upstream_rq_active", tags); + + for (const char* hist_name : {"cluster.upstream_rq_time", "cluster.upstream_response_time"}) { + auto histogram1 = makeHistogram(hist_name, tags); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + } + } + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(6UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{cluster="aaa"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="bbb"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{cluster="aaa"} 0 +envoy_cluster_upstream_cx_total{cluster="bbb"} 0 +envoy_cluster_upstream_cx_total{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{cluster="aaa"} 0 +envoy_cluster_upstream_cx_active{cluster="bbb"} 0 +envoy_cluster_upstream_cx_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{cluster="aaa"} 0 +envoy_cluster_upstream_rq_active{cluster="bbb"} 0 +envoy_cluster_upstream_rq_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_response_time histogram +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_response_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_response_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_response_time_count{cluster="ccc"} 7 + +# TYPE envoy_cluster_upstream_rq_time histogram +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + true, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { + const std::vector h1_values = {}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + histogram1->used_ = false; + addHistogram(histogram1); + + { + const bool used_only = true; + EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(0UL, size); + } + + { + const bool used_only = false; + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(1UL, size); + } +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, + absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); + EXPECT_EQ(1UL, size); + + const std::string expected_output = + R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/stats_handler_test.cc b/test/server/http/stats_handler_test.cc index 8c02e3846ffa..6ce1b0cfae48 100644 --- a/test/server/http/stats_handler_test.cc +++ b/test/server/http/stats_handler_test.cc @@ -571,442 +571,5 @@ TEST_P(AdminInstanceTest, RecentLookups) { // fake symbol table. However we cover this solidly in integration tests. } -class HistogramWrapper { -public: - HistogramWrapper() : histogram_(hist_alloc()) {} - - ~HistogramWrapper() { hist_free(histogram_); } - - const histogram_t* getHistogram() { return histogram_; } - - void setHistogramValues(const std::vector& values) { - for (uint64_t value : values) { - hist_insert_intscale(histogram_, value, 0, 1); - } - } - - void setHistogramValuesWithCounts(const std::vector>& values) { - for (std::pair cv : values) { - hist_insert_intscale(histogram_, cv.first, 0, cv.second); - } - } - -private: - histogram_t* histogram_; -}; - -class PrometheusStatsFormatterTest : public testing::Test { -protected: - PrometheusStatsFormatterTest() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} - - ~PrometheusStatsFormatterTest() override { clearStorage(); } - - void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage storage(name, *symbol_table_); - Stats::StatName stat_name = storage.statName(); - counters_.push_back(alloc_.makeCounter(stat_name, stat_name, cluster_tags)); - } - - void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage storage(name, *symbol_table_); - Stats::StatName stat_name = storage.statName(); - gauges_.push_back( - alloc_.makeGauge(stat_name, stat_name, cluster_tags, Stats::Gauge::ImportMode::Accumulate)); - } - - void addHistogram(const Stats::ParentHistogramSharedPtr histogram) { - histograms_.push_back(histogram); - } - - using MockHistogramSharedPtr = Stats::RefcountPtr>; - MockHistogramSharedPtr makeHistogram() { - return MockHistogramSharedPtr(new NiceMock()); - } - - Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } - - void clearStorage() { - pool_.clear(); - counters_.clear(); - gauges_.clear(); - histograms_.clear(); - EXPECT_EQ(0, symbol_table_->numSymbols()); - } - - Stats::SymbolTablePtr symbol_table_; - Stats::AllocatorImpl alloc_; - Stats::StatNamePool pool_; - std::vector counters_; - std::vector gauges_; - std::vector histograms_; -}; - -TEST_F(PrometheusStatsFormatterTest, MetricName) { - std::string raw = "vulture.eats-liver"; - std::string expected = "envoy_vulture_eats_liver"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { - std::string raw = "An.artist.plays-violin@019street"; - std::string expected = "envoy_An_artist_plays_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { - std::string raw = "3.artists.play-violin@019street"; - std::string expected = "envoy_3_artists_play_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, FormattedTags) { - std::vector tags; - Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; - Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; - tags.push_back(tag1); - tags.push_back(tag2); - std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; - auto actual = PrometheusStatsFormatter::formattedTags(tags); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { - - // Create two counters and two gauges with each pair having the same name, - // but having different tag names and values. - //`statsAsPrometheus()` should return two implying it found two unique stat names - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(2UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { - - // Create two counters and two gauges, all with unique names. - // statsAsPrometheus() should return four implying it found - // four unique stat names. - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(4UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(std::vector(0)); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram(); - histogram->name_ = "histogram1"; - histogram->used_ = true; - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 0 -envoy_histogram1_bucket{le="10"} 0 -envoy_histogram1_bucket{le="25"} 0 -envoy_histogram1_bucket{le="50"} 0 -envoy_histogram1_bucket{le="100"} 0 -envoy_histogram1_bucket{le="250"} 0 -envoy_histogram1_bucket{le="500"} 0 -envoy_histogram1_bucket{le="1000"} 0 -envoy_histogram1_bucket{le="2500"} 0 -envoy_histogram1_bucket{le="5000"} 0 -envoy_histogram1_bucket{le="10000"} 0 -envoy_histogram1_bucket{le="30000"} 0 -envoy_histogram1_bucket{le="60000"} 0 -envoy_histogram1_bucket{le="300000"} 0 -envoy_histogram1_bucket{le="600000"} 0 -envoy_histogram1_bucket{le="1800000"} 0 -envoy_histogram1_bucket{le="3600000"} 0 -envoy_histogram1_bucket{le="+Inf"} 0 -envoy_histogram1_sum{} 0 -envoy_histogram1_count{} 0 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { - HistogramWrapper h1_cumulative; - - // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. - h1_cumulative.setHistogramValuesWithCounts(std::vector>({ - {1, 100000}, - {100, 1000000}, - {1000, 100000000}, - })); - - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram(); - histogram->name_ = "histogram1"; - histogram->used_ = true; - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 100000 -envoy_histogram1_bucket{le="10"} 100000 -envoy_histogram1_bucket{le="25"} 100000 -envoy_histogram1_bucket{le="50"} 100000 -envoy_histogram1_bucket{le="100"} 100000 -envoy_histogram1_bucket{le="250"} 1100000 -envoy_histogram1_bucket{le="500"} 1100000 -envoy_histogram1_bucket{le="1000"} 1100000 -envoy_histogram1_bucket{le="2500"} 101100000 -envoy_histogram1_bucket{le="5000"} 101100000 -envoy_histogram1_bucket{le="10000"} 101100000 -envoy_histogram1_bucket{le="30000"} 101100000 -envoy_histogram1_bucket{le="60000"} 101100000 -envoy_histogram1_bucket{le="300000"} 101100000 -envoy_histogram1_bucket{le="600000"} 101100000 -envoy_histogram1_bucket{le="1800000"} 101100000 -envoy_histogram1_bucket{le="3600000"} 101100000 -envoy_histogram1_bucket{le="+Inf"} 101100000 -envoy_histogram1_sum{} 105105105000 -envoy_histogram1_count{} 101100000 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = true; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(5UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 -# TYPE envoy_cluster_test_2_upstream_cx_total counter -envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 -# TYPE envoy_cluster_test_3_upstream_cx_total gauge -envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 -# TYPE envoy_cluster_test_4_upstream_cx_total gauge -envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 -# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = true; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - true, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { - const std::vector h1_values = {}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = false; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - - { - const bool used_only = true; - EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(0UL, size); - } - - { - const bool used_only = false; - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(1UL, size); - } -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = makeHistogram(); - histogram1->name_ = "cluster.test_1.upstream_rq_time"; - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->setTags({Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}); - addHistogram(histogram1); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus( - counters_, gauges_, histograms_, response, false, - absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); - EXPECT_EQ(1UL, size); - - const std::string expected_output = - R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - } // namespace Server } // namespace Envoy diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index c9683f8cce4d..27bc32dbe926 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -79,7 +79,8 @@ "./source/extensions/filters/http/squash/squash_filter.h", "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/utils.h", "./source/server/http/utils.cc", "./source/server/http/stats_handler.h", - "./source/server/http/stats_handler.cc", "./tools/clang_tools/api_booster/main.cc", + "./source/server/http/stats_handler.cc", "./source/server/http/prometheus_stats.h", + "./source/server/http/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") # Only one C++ file should instantiate grpc_init From b9ca1fa22f924410f79d9b470999422b1d27b76f Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 24 Apr 2020 19:53:13 -0400 Subject: [PATCH 028/909] hcm: avoid invoking 100-continue handling on decode filter. (#10929) The 100-continue state tracking variables were checking in commonContinue() (on both decode/encode paths), conditioning do100ContinueHeaders(). This makes no sense on the decode path, and can lead to crashes as per #10923 when the decode pipeline is resumed, so refactored the logic out to just the encode path. Risk level: Low Testing: Unit and integration regression tests added, as well as corpus entry. Fixes oss-fuzz issue https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=18461 Fixes #10923 Signed-off-by: Harvey Tuch --- source/common/http/conn_manager_impl.cc | 2 +- source/common/http/conn_manager_impl.h | 5 + ...nn_manager_impl_fuzz_test-5674283828772864 | 120 ++++++++++++++++++ test/common/http/conn_manager_impl_test.cc | 65 ++++++++++ test/integration/BUILD | 1 + test/integration/integration_test.cc | 10 ++ 6 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index b4dddcb478ff..75c8bed64cd2 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -2067,7 +2067,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { allowIteration(); // Only resume with do100ContinueHeaders() if we've actually seen a 100-Continue. - if (parent_.state_.has_continue_headers_ && !continue_headers_continued_) { + if (has100Continueheaders()) { continue_headers_continued_ = true; do100ContinueHeaders(); // If the response headers have not yet come in, don't continue on with diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 1e3705b57634..705f60e27e4c 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -139,6 +139,7 @@ class ConnectionManagerImpl : Logger::Loggable, virtual Buffer::WatermarkBufferPtr createBuffer() PURE; virtual Buffer::WatermarkBufferPtr& bufferedData() PURE; virtual bool complete() PURE; + virtual bool has100Continueheaders() PURE; virtual void do100ContinueHeaders() PURE; virtual void doHeaders(bool end_stream) PURE; virtual void doData(bool end_stream) PURE; @@ -237,6 +238,7 @@ class ConnectionManagerImpl : Logger::Loggable, Buffer::WatermarkBufferPtr createBuffer() override; Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_request_data_; } bool complete() override { return parent_.state_.remote_complete_; } + bool has100Continueheaders() override { return false; } void do100ContinueHeaders() override { NOT_REACHED_GCOVR_EXCL_LINE; } void doHeaders(bool end_stream) override { parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); @@ -349,6 +351,9 @@ class ConnectionManagerImpl : Logger::Loggable, Buffer::WatermarkBufferPtr createBuffer() override; Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_response_data_; } bool complete() override { return parent_.state_.local_complete_; } + bool has100Continueheaders() override { + return parent_.state_.has_continue_headers_ && !continue_headers_continued_; + } void do100ContinueHeaders() override { parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); } diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 new file mode 100644 index 000000000000..64081c27010c --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 @@ -0,0 +1,120 @@ +actions { + stream_action { + request { + trailers { + headers { + headers { + key: "foo" + value: "bar" + } + } + decoder_filter_callback_action { + add_decoded_data { + size: 1000000 + } + } + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { + stream_action { + request { + data { + size: 3000000 + status: DATA_STOP_ITERATION_AND_BUFFER + decoder_filter_callback_action { + add_decoded_data { + size: 1000000 + } + } + } + } + } +} +actions { + stream_action { + response { + trailers { + headers { + key: "foo" + value: "bar" + } + } + } + } +} +actions { + stream_action { + stream_id: 5505024 + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} +actions { + stream_action { + request { + continue_decoding { + } + } + } +} +actions { + stream_action { + response { + data: 5 + } + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "200" + } + } + } + } +} diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 38593d73ab3e..4853bb378c7e 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -617,6 +617,71 @@ TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10923. +TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { + proxy_100_continue_ = true; + setup(false, "envoy-custom-server", false); + + std::shared_ptr filter(new NiceMock()); + + // Allow headers to pass. + EXPECT_CALL(*filter, decodeHeaders(_, false)) + .WillRepeatedly( + InvokeWithoutArgs([]() -> FilterHeadersStatus { return FilterHeadersStatus::Continue; })); + // Pause and then resume the decode pipeline, this is key to triggering #10923. + EXPECT_CALL(*filter, decodeData(_, false)).WillOnce(InvokeWithoutArgs([]() -> FilterDataStatus { + return FilterDataStatus::StopIterationAndBuffer; + })); + EXPECT_CALL(*filter, decodeData(_, true)) + .WillRepeatedly( + InvokeWithoutArgs([]() -> FilterDataStatus { return FilterDataStatus::Continue; })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(filter); + })); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); + + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { + decoder = &conn_manager_->newStream(encoder); + + // Test not charging stats on the second call. + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + // Allow the decode pipeline to pause. + decoder->decodeData(data, false); + + ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; + filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); + + // Resume decode pipeline after encoding 100 continue headers, we're now ready to trigger + // #10923. + decoder->decodeData(data, true); + + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + + data.drain(4); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_EQ(1U, stats_.named_.downstream_rq_1xx_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_1xx_.value()); + EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); + EXPECT_EQ(1U, listener_stats_.downstream_rq_2xx_.value()); + EXPECT_EQ(2U, stats_.named_.downstream_rq_completed_.value()); + EXPECT_EQ(2U, listener_stats_.downstream_rq_completed_.value()); +} + // By default, Envoy will set the server header to the server name, here "custom-value" TEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) { setup(false, "custom-value", false); diff --git a/test/integration/BUILD b/test/integration/BUILD index ad714ee65167..5c3d08b0ef16 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -596,6 +596,7 @@ envoy_cc_test( "//test/integration/filters:clear_route_cache_filter_lib", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:process_context_lib", + "//test/integration/filters:stop_iteration_and_continue", "//test/mocks/http:http_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 1ae41217b1a0..3cddecf87bfd 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -256,6 +256,16 @@ TEST_P(IntegrationTest, EnvoyProxyingLate100ContinueWithEncoderFilter) { testEnvoyProxying100Continue(false, true); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10923. +TEST_P(IntegrationTest, EnvoyProxying100ContinueWithDecodeDataPause) { + config_helper_.addFilter(R"EOF( + name: stop-iteration-and-continue-filter + typed_config: + "@type": type.googleapis.com/google.protobuf.Empty + )EOF"); + testEnvoyProxying100Continue(true); +} + // This is a regression for https://github.com/envoyproxy/envoy/issues/2715 and validates that a // pending request is not sent on a connection that has been half-closed. TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { From a7f411c22199e2d7445a2a055c97de2ed291a473 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Sat, 25 Apr 2020 10:04:21 -0400 Subject: [PATCH 029/909] stats: add utilities to create stats from a vector of tokens, mixing dynamic and symbolic elements. (#10735) Description: Creating joined stat names is a bit awkward due to the way the memory management needs to work, and having dynamic components to the name adds further complexity. This PR adds some utility helper methods to make this much easier and terse to express. There's one subtle semantic change in the grpc status, which is to use a Dynamic stat name rather than building a locally controlled mutex-protected map from strings to symbolic stat names. Risk Level: low -- this is mostly a refactoring Testing: //test/... Docs Changes: n/a Release Notes: n/a Signed-off-by: Joshua Marantz --- source/common/grpc/BUILD | 1 + source/common/grpc/context_impl.cc | 103 +++-------- source/common/grpc/context_impl.h | 29 +-- source/common/http/BUILD | 1 + source/common/http/user_agent.cc | 20 +-- source/common/stats/utility.cc | 83 +++++++++ source/common/stats/utility.h | 170 ++++++++++++++++++ source/extensions/filters/http/dynamo/BUILD | 1 + .../filters/http/dynamo/dynamo_stats.cc | 29 ++- .../filters/http/dynamo/dynamo_stats.h | 8 +- source/extensions/filters/http/fault/BUILD | 1 + .../filters/http/fault/fault_filter.cc | 6 +- .../filters/network/common/redis/BUILD | 1 + .../common/redis/redis_command_stats.cc | 31 +--- .../common/redis/redis_command_stats.h | 3 - .../filters/network/mongo_proxy/BUILD | 1 + .../network/mongo_proxy/mongo_stats.cc | 18 +- .../filters/network/mongo_proxy/mongo_stats.h | 7 +- .../filters/network/mongo_proxy/proxy.cc | 26 ++- .../filters/network/mongo_proxy/proxy.h | 4 +- .../filters/network/zookeeper_proxy/filter.cc | 29 ++- source/extensions/transport_sockets/tls/BUILD | 1 + .../transport_sockets/tls/context_impl.cc | 8 +- test/common/grpc/context_impl_test.cc | 4 +- test/common/stats/BUILD | 9 + test/common/stats/utility_test.cc | 112 ++++++++++++ 26 files changed, 500 insertions(+), 206 deletions(-) create mode 100644 test/common/stats/utility_test.cc diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index e49c1c1db016..8534b6f0c67c 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -115,6 +115,7 @@ envoy_cc_library( "//include/envoy/stats:stats_interface", "//source/common/common:hash_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/common/grpc/context_impl.cc b/source/common/grpc/context_impl.cc index f612f71cc074..4c0e2f91ebc3 100644 --- a/source/common/grpc/context_impl.cc +++ b/source/common/grpc/context_impl.cc @@ -4,48 +4,29 @@ #include #include "common/grpc/common.h" +#include "common/stats/utility.h" namespace Envoy { namespace Grpc { ContextImpl::ContextImpl(Stats::SymbolTable& symbol_table) - : symbol_table_(symbol_table), stat_name_pool_(symbol_table), - grpc_(stat_name_pool_.add("grpc")), grpc_web_(stat_name_pool_.add("grpc-web")), - success_(stat_name_pool_.add("success")), failure_(stat_name_pool_.add("failure")), - total_(stat_name_pool_.add("total")), zero_(stat_name_pool_.add("0")), + : stat_name_pool_(symbol_table), grpc_(stat_name_pool_.add("grpc")), + grpc_web_(stat_name_pool_.add("grpc-web")), success_(stat_name_pool_.add("success")), + failure_(stat_name_pool_.add("failure")), total_(stat_name_pool_.add("total")), + zero_(stat_name_pool_.add("0")), request_message_count_(stat_name_pool_.add("request_message_count")), response_message_count_(stat_name_pool_.add("response_message_count")), upstream_rq_time_(stat_name_pool_.add("upstream_rq_time")), stat_names_(symbol_table) {} -// Makes a stat name from a string, if we don't already have one for it. -// This always takes a lock on mutex_, and if we haven't seen the name -// before, it also takes a lock on the symbol table. -// -// TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/7008 for -// a lock-free approach to creating dynamic stat-names based on requests. -Stats::StatName ContextImpl::makeDynamicStatName(absl::string_view name) { - Thread::LockGuard lock(mutex_); - auto iter = stat_name_map_.find(name); - if (iter != stat_name_map_.end()) { - return iter->second; - } - const Stats::StatName stat_name = stat_name_pool_.add(name); - stat_name_map_[std::string(name)] = stat_name; - return stat_name; -} - // Gets the stat prefix and underlying storage, depending on whether request_names is empty -std::pair -ContextImpl::getPrefix(Protocol protocol, const absl::optional& request_names) { +Stats::ElementVec ContextImpl::statElements(Protocol protocol, + const absl::optional& request_names, + Stats::Element suffix) { const Stats::StatName protocolName = protocolStatName(protocol); if (request_names) { - Stats::SymbolTable::StoragePtr prefix_storage = - symbol_table_.join({protocolName, request_names->service_, request_names->method_}); - Stats::StatName prefix = Stats::StatName(prefix_storage.get()); - return {prefix, std::move(prefix_storage)}; - } else { - return {protocolName, nullptr}; + return Stats::ElementVec{protocolName, request_names->service_, request_names->method_, suffix}; } + return Stats::ElementVec{protocolName, suffix}; } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, @@ -57,28 +38,20 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol prot absl::string_view status_str = grpc_status->value().getStringView(); auto iter = stat_names_.status_names_.find(status_str); - const Stats::StatName status_stat_name = - (iter != stat_names_.status_names_.end()) ? iter->second : makeDynamicStatName(status_str); - const Stats::SymbolTable::StoragePtr stat_name_storage = - request_names ? symbol_table_.join({protocolStatName(protocol), request_names->service_, - request_names->method_, status_stat_name}) - : symbol_table_.join({protocolStatName(protocol), status_stat_name}); - - cluster.statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); + Stats::ElementVec elements = + statElements(protocol, request_names, + (iter != stat_names_.status_names_.end()) ? Stats::Element(iter->second) + : Stats::DynamicName(status_str)); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); chargeStat(cluster, protocol, request_names, (status_str == "0")); } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, Protocol protocol, const absl::optional& request_names, bool success) { - auto prefix_and_storage = getPrefix(protocol, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr status = - symbol_table_.join({prefix, successStatName(success)}); - const Stats::SymbolTable::StoragePtr total = symbol_table_.join({prefix, total_}); - - cluster.statsScope().counterFromStatName(Stats::StatName(status.get())).inc(); - cluster.statsScope().counterFromStatName(Stats::StatName(total.get())).inc(); + Stats::ElementVec elements = statElements(protocol, request_names, successStatName(success)); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); + elements.back() = total_; + Stats::Utility::counterFromElements(cluster.statsScope(), elements).inc(); } void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, @@ -89,43 +62,23 @@ void ContextImpl::chargeStat(const Upstream::ClusterInfo& cluster, void ContextImpl::chargeRequestMessageStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, uint64_t amount) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr request_message_count = - symbol_table_.join({prefix, request_message_count_}); - - cluster.statsScope() - .counterFromStatName(Stats::StatName(request_message_count.get())) - .add(amount); + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, request_message_count_); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount); } void ContextImpl::chargeResponseMessageStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, uint64_t amount) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr response_message_count = - symbol_table_.join({prefix, response_message_count_}); - - cluster.statsScope() - .counterFromStatName(Stats::StatName(response_message_count.get())) - .add(amount); + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, response_message_count_); + Stats::Utility::counterFromElements(cluster.statsScope(), elements).add(amount); } void ContextImpl::chargeUpstreamStat(const Upstream::ClusterInfo& cluster, const absl::optional& request_names, std::chrono::milliseconds duration) { - auto prefix_and_storage = getPrefix(Protocol::Grpc, request_names); - Stats::StatName prefix = prefix_and_storage.first; - - const Stats::SymbolTable::StoragePtr upstream_rq_time = - symbol_table_.join({prefix, upstream_rq_time_}); - - cluster.statsScope() - .histogramFromStatName(Stats::StatName(upstream_rq_time.get()), - Stats::Histogram::Unit::Milliseconds) + Stats::ElementVec elements = statElements(Protocol::Grpc, request_names, upstream_rq_time_); + Stats::Utility::histogramFromElements(cluster.statsScope(), elements, + Stats::Histogram::Unit::Milliseconds) .recordValue(duration.count()); } @@ -136,8 +89,8 @@ ContextImpl::resolveDynamicServiceAndMethod(const Http::HeaderEntry* path) { return {}; } - const Stats::StatName service = makeDynamicStatName(request_names->service_); - const Stats::StatName method = makeDynamicStatName(request_names->method_); + Stats::Element service = Stats::DynamicName(request_names->service_); + Stats::Element method = Stats::DynamicName(request_names->method_); return RequestStatNames{service, method}; } diff --git a/source/common/grpc/context_impl.h b/source/common/grpc/context_impl.h index 9d3ddc731458..98a34695235b 100644 --- a/source/common/grpc/context_impl.h +++ b/source/common/grpc/context_impl.h @@ -9,6 +9,7 @@ #include "common/common/hash.h" #include "common/grpc/stat_names.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" #include "absl/types/optional.h" @@ -16,8 +17,8 @@ namespace Envoy { namespace Grpc { struct Context::RequestStatNames { - Stats::StatName service_; // supplies the service name. - Stats::StatName method_; // supplies the method name. + Stats::Element service_; // supplies the service name. + Stats::Element method_; // supplies the method name. }; class ContextImpl : public Context { @@ -59,25 +60,13 @@ class ContextImpl : public Context { StatNames& statNames() override { return stat_names_; } private: - // Makes a stat name from a string, if we don't already have one for it. - // This always takes a lock on mutex_, and if we haven't seen the name - // before, it also takes a lock on the symbol table. - // - // TODO(jmarantz): See https://github.com/envoyproxy/envoy/pull/7008 for - // a lock-free approach to creating dynamic stat-names based on requests. - Stats::StatName makeDynamicStatName(absl::string_view name); - - // Gets the stat prefix and underlying storage, depending on whether request_names is empty - // or not. - // Prefix will be "" if request_names is empty, or - // ".." if it is not empty. - std::pair - getPrefix(Protocol protocol, const absl::optional& request_names); + // Creates an array of stat-name elements, comprising the protocol, optional + // service and method, and a suffix. + Stats::ElementVec statElements(Protocol protocol, + const absl::optional& request_names, + Stats::Element suffix); - Stats::SymbolTable& symbol_table_; - mutable Thread::MutexBasicLockable mutex_; - Stats::StatNamePool stat_name_pool_ ABSL_GUARDED_BY(mutex_); - StringMap stat_name_map_ ABSL_GUARDED_BY(mutex_); + Stats::StatNamePool stat_name_pool_; const Stats::StatName grpc_; const Stats::StatName grpc_web_; const Stats::StatName success_; diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 4a8e95c24ba2..6597946f813f 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -316,6 +316,7 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index ca2774376751..d6804f245ec5 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -10,6 +10,7 @@ #include "common/http/headers.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Http { @@ -30,17 +31,14 @@ void UserAgent::completeConnectionLength(Stats::Timespan& span) { UserAgentStats::UserAgentStats(Stats::StatName prefix, Stats::StatName device, Stats::Scope& scope, const UserAgentContext& context) - : downstream_cx_total_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_cx_total_}).get()))), - downstream_cx_destroy_remote_active_rq_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_ - .join({prefix, device, context.downstream_cx_destroy_remote_active_rq_}) - .get()))), - downstream_rq_total_(scope.counterFromStatName(Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_rq_total_}).get()))), - downstream_cx_length_ms_(scope.histogramFromStatName( - Stats::StatName( - context.symbol_table_.join({prefix, device, context.downstream_cx_length_ms_}).get()), + : downstream_cx_total_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_cx_total_})), + downstream_cx_destroy_remote_active_rq_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_cx_destroy_remote_active_rq_})), + downstream_rq_total_(Stats::Utility::counterFromElements( + scope, {prefix, device, context.downstream_rq_total_})), + downstream_cx_length_ms_(Stats::Utility::histogramFromElements( + scope, {prefix, device, context.downstream_cx_length_ms_}, Stats::Histogram::Unit::Milliseconds)) { downstream_cx_total_.inc(); downstream_rq_total_.inc(); diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index 18441355fd3d..ee3944172c0d 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -4,6 +4,7 @@ #include #include "absl/strings/match.h" +#include "absl/types/optional.h" namespace Envoy { namespace Stats { @@ -34,5 +35,87 @@ absl::optional Utility::findTag(const Metric& metric, StatName find_ta return value; } +namespace { + +// Helper class for the three Utility::*FromElements implementations to build up +// a joined StatName from a mix of StatName and string_view. +struct ElementVisitor { + ElementVisitor(SymbolTable& symbol_table, const ElementVec& elements) + : symbol_table_(symbol_table), pool_(symbol_table) { + stat_names_.resize(elements.size()); + for (const Element& element : elements) { + absl::visit(*this, element); + } + joined_ = symbol_table_.join(stat_names_); + } + + // Overloads provides for absl::visit to call. + void operator()(StatName stat_name) { stat_names_.push_back(stat_name); } + void operator()(absl::string_view name) { stat_names_.push_back(pool_.add(name)); } + + /** + * @return the StatName constructed by joining the elements. + */ + StatName statName() { return StatName(joined_.get()); } + + SymbolTable& symbol_table_; + StatNameVec stat_names_; + StatNameDynamicPool pool_; + SymbolTable::StoragePtr joined_; +}; + +} // namespace + +Counter& Utility::counterFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.counterFromStatNameWithTags(visitor.statName(), tags); +} + +Counter& Utility::counterFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.counterFromStatNameWithTags(StatName(joined.get()), tags); +} + +Gauge& Utility::gaugeFromElements(Scope& scope, const ElementVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.gaugeFromStatNameWithTags(visitor.statName(), tags, import_mode); +} + +Gauge& Utility::gaugeFromStatNames(Scope& scope, const StatNameVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.gaugeFromStatNameWithTags(StatName(joined.get()), tags, import_mode); +} + +Histogram& Utility::histogramFromElements(Scope& scope, const ElementVec& elements, + Histogram::Unit unit, StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.histogramFromStatNameWithTags(visitor.statName(), tags, unit); +} + +Histogram& Utility::histogramFromStatNames(Scope& scope, const StatNameVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.histogramFromStatNameWithTags(StatName(joined.get()), tags, unit); +} + +TextReadout& Utility::textReadoutFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags) { + ElementVisitor visitor(scope.symbolTable(), elements); + return scope.textReadoutFromStatNameWithTags(visitor.statName(), tags); +} + +TextReadout& Utility::textReadoutFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags) { + SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); + return scope.textReadoutFromStatNameWithTags(StatName(joined.get()), tags); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 0d0ed4b21bc0..46b72234da3a 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -2,16 +2,44 @@ #include +#include "envoy/stats/scope.h" #include "envoy/stats/stats.h" #include "common/stats/symbol_table_impl.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace Envoy { namespace Stats { +/** + * Represents a dynamically created stat name token based on absl::string_view. + * This class wrapper is used in the 'Element' variant so that call-sites + * can express explicit intent to create dynamic stat names, which are more + * expensive than symbolic stat names. We use dynamic stat names only for + * building stats based on names discovered in the line of a request. + */ +class DynamicName : public absl::string_view { +public: + // This is intentionally left as an implicit conversion from string_view to + // make call-sites easier to read, e.g. + // Utility::counterFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + DynamicName(absl::string_view str) : absl::string_view(str) {} +}; + +/** + * Holds either a symbolic StatName or a dynamic string, for the purpose of + * composing a vector to pass to Utility::counterFromElements, etc. This is + * a programming convenience to create joined stat names. It is easier to + * call the above helpers than to use SymbolTable::join(), because the helpers + * hide the memory management of the joined storage, and they allow easier + * co-mingling of symbolic and dynamic stat-name components. + */ +using Element = absl::variant; +using ElementVec = absl::InlinedVector; + /** * Common stats utility routines. */ @@ -34,6 +62,148 @@ class Utility { * @return The value of the tag, if found. */ static absl::optional findTag(const Metric& metric, StatName find_tag_name); + + /** + * Creates a counter from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also counterFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param tags optionally specified tags. + * @return A counter named using the joined elements. + */ + static Counter& counterFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a counter from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also counterFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param names The vector of StatNames + * @param tags optionally specified tags. + * @return A counter named using the joined elements. + */ + static Counter& counterFromStatNames(Scope& scope, const StatNameVec& names, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a gauge from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also gaugeFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param import_mode Whether hot-restart should accumulate this value. + * @param tags optionally specified tags. + * @return A gauge named using the joined elements. + */ + static Gauge& gaugeFromElements(Scope& scope, const ElementVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a gauge from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also gaugeFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param names The vector of StatNames + * @param import_mode Whether hot-restart should accumulate this value. + * @param tags optionally specified tags. + * @return A gauge named using the joined elements. + */ + static Gauge& gaugeFromStatNames(Scope& scope, const StatNameVec& elements, + Gauge::ImportMode import_mode, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a histogram from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also histogramFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A histogram named using the joined elements. + */ + static Histogram& histogramFromElements(Scope& scope, const ElementVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a histogram from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also histogramFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A histogram named using the joined elements. + */ + static Histogram& histogramFromStatNames(Scope& scope, const StatNameVec& elements, + Histogram::Unit unit, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a TextReadout from a vector of tokens which are used to create the + * name. The tokens can be specified as DynamicName or StatName. For + * tokens specified as DynamicName, a dynamic StatName will be created. See + * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#dynamic-stat-tokens + * for more detail on why symbolic StatNames are preferred when possible. + * + * See also TextReadoutFromStatNames, which is slightly faster but does not allow + * passing DynamicName(string)s as names. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A TextReadout named using the joined elements. + */ + static TextReadout& textReadoutFromElements(Scope& scope, const ElementVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); + + /** + * Creates a TextReadout from a vector of tokens which are used to create the + * name. The tokens must be of type StatName. + * + * See also TextReadoutFromElements, which is slightly slower, but allows + * passing DynamicName(string)s as elements. + * + * @param scope The scope in which to create the counter. + * @param elements The vector of mixed DynamicName and StatName + * @param unit The unit of measurement. + * @param tags optionally specified tags. + * @return A TextReadout named using the joined elements. + */ + static TextReadout& textReadoutFromStatNames(Scope& scope, const StatNameVec& elements, + StatNameTagVectorOptConstRef tags = absl::nullopt); }; } // namespace Stats diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index 9eac6935f330..79296db8818f 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -61,5 +61,6 @@ envoy_cc_library( ":dynamo_request_parser_lib", "//include/envoy/stats:stats_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.cc b/source/extensions/filters/http/dynamo/dynamo_stats.cc index 468c77f0a959..06f3770a688e 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.cc +++ b/source/extensions/filters/http/dynamo/dynamo_stats.cc @@ -46,25 +46,21 @@ DynamoStats::DynamoStats(Stats::Scope& scope, const std::string& prefix) stat_name_set_->rememberBuiltins({"operation", "table"}); } -Stats::SymbolTable::StoragePtr DynamoStats::addPrefix(const Stats::StatNameVec& names) { - Stats::StatNameVec names_with_prefix; +Stats::ElementVec DynamoStats::addPrefix(const Stats::ElementVec& names) { + Stats::ElementVec names_with_prefix; names_with_prefix.reserve(1 + names.size()); names_with_prefix.push_back(prefix_); names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end()); - return scope_.symbolTable().join(names_with_prefix); + return names_with_prefix; } -void DynamoStats::incCounter(const Stats::StatNameVec& names) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); +void DynamoStats::incCounter(const Stats::ElementVec& names) { + Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc(); } -void DynamoStats::recordHistogram(const Stats::StatNameVec& names, Stats::Histogram::Unit unit, +void DynamoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t value) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - Stats::Histogram& histogram = - scope_.histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit); - histogram.recordValue(value); + Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(value); } Stats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_name, @@ -72,12 +68,11 @@ Stats::Counter& DynamoStats::buildPartitionStatCounter(const std::string& table_ const std::string& partition_id) { // Use the last 7 characters of the partition id. absl::string_view id_last_7 = absl::string_view(partition_id).substr(partition_id.size() - 7); - Stats::StatNameDynamicPool dynamic(scope_.symbolTable()); - const Stats::StatName partition = dynamic.add(absl::StrCat("__partition_id=", id_last_7)); - const Stats::SymbolTable::StoragePtr stat_name_storage = - addPrefix({table_, dynamic.add(table_name), capacity_, - getBuiltin(operation, unknown_operation_), partition}); - return scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())); + std::string partition = absl::StrCat("__partition_id=", id_last_7); + return Stats::Utility::counterFromElements( + scope_, + addPrefix({table_, Stats::DynamicName(table_name), capacity_, + getBuiltin(operation, unknown_operation_), Stats::DynamicName(partition)})); } size_t DynamoStats::groupIndex(uint64_t status) { diff --git a/source/extensions/filters/http/dynamo/dynamo_stats.h b/source/extensions/filters/http/dynamo/dynamo_stats.h index 4241ec5dd711..48399e4f4d23 100644 --- a/source/extensions/filters/http/dynamo/dynamo_stats.h +++ b/source/extensions/filters/http/dynamo/dynamo_stats.h @@ -6,6 +6,7 @@ #include "envoy/stats/scope.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Extensions { @@ -16,9 +17,8 @@ class DynamoStats { public: DynamoStats(Stats::Scope& scope, const std::string& prefix); - void incCounter(const Stats::StatNameVec& names); - void recordHistogram(const Stats::StatNameVec& names, Stats::Histogram::Unit unit, - uint64_t value); + void incCounter(const Stats::ElementVec& names); + void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t value); /** * Creates the partition id stats string. The stats format is @@ -42,7 +42,7 @@ class DynamoStats { Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); } private: - Stats::SymbolTable::StoragePtr addPrefix(const Stats::StatNameVec& names); + Stats::ElementVec addPrefix(const Stats::ElementVec& names); Stats::Scope& scope_; Stats::StatNameSetPtr stat_name_set_; diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 3c6e1775235d..749e04b67a4c 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", + "//source/common/stats:utility_lib", "//source/extensions/filters/common/fault:fault_config_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index f3e277edfe6b..a34e833bb12c 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -19,6 +19,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" #include "common/protobuf/utility.h" +#include "common/stats/utility.h" #include "extensions/filters/http/well_known_names.h" @@ -85,9 +86,8 @@ FaultFilterConfig::FaultFilterConfig( stats_prefix_(stat_name_set_->add(absl::StrCat(stats_prefix, "fault"))) {} void FaultFilterConfig::incCounter(Stats::StatName downstream_cluster, Stats::StatName stat_name) { - Stats::SymbolTable::StoragePtr storage = - scope_.symbolTable().join({stats_prefix_, downstream_cluster, stat_name}); - scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Utility::counterFromStatNames(scope_, {stats_prefix_, downstream_cluster, stat_name}) + .inc(); } FaultFilter::FaultFilter(FaultFilterConfigSharedPtr config) : config_(config) {} diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index a7adc168788f..d648832e0b1e 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -97,5 +97,6 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:timespan_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.cc b/source/extensions/filters/network/common/redis/redis_command_stats.cc index 02307dc9c1c2..5a6509cf3ae6 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.cc +++ b/source/extensions/filters/network/common/redis/redis_command_stats.cc @@ -1,6 +1,7 @@ #include "extensions/filters/network/common/redis/redis_command_stats.h" #include "common/stats/timespan_impl.h" +#include "common/stats/utility.h" #include "extensions/filters/network/common/redis/supported_commands.h" @@ -32,33 +33,20 @@ RedisCommandStats::RedisCommandStats(Stats::SymbolTable& symbol_table, const std Extensions::NetworkFilters::Common::Redis::SupportedCommands::mset()); } -Stats::Counter& RedisCommandStats::counter(Stats::Scope& scope, - const Stats::StatNameVec& stat_names) { - const Stats::SymbolTable::StoragePtr storage_ptr = symbol_table_.join(stat_names); - Stats::StatName full_stat_name = Stats::StatName(storage_ptr.get()); - return scope.counterFromStatName(full_stat_name); -} - -Stats::Histogram& RedisCommandStats::histogram(Stats::Scope& scope, - const Stats::StatNameVec& stat_names, - Stats::Histogram::Unit unit) { - const Stats::SymbolTable::StoragePtr storage_ptr = symbol_table_.join(stat_names); - Stats::StatName full_stat_name = Stats::StatName(storage_ptr.get()); - return scope.histogramFromStatName(full_stat_name, unit); -} - Stats::TimespanPtr RedisCommandStats::createCommandTimer(Stats::Scope& scope, Stats::StatName command, Envoy::TimeSource& time_source) { return std::make_unique( - histogram(scope, {prefix_, command, latency_}, Stats::Histogram::Unit::Microseconds), + Stats::Utility::histogramFromStatNames(scope, {prefix_, command, latency_}, + Stats::Histogram::Unit::Microseconds), time_source); } Stats::TimespanPtr RedisCommandStats::createAggregateTimer(Stats::Scope& scope, Envoy::TimeSource& time_source) { return std::make_unique( - histogram(scope, {prefix_, upstream_rq_time_}, Stats::Histogram::Unit::Microseconds), + Stats::Utility::histogramFromStatNames(scope, {prefix_, upstream_rq_time_}, + Stats::Histogram::Unit::Microseconds), time_source); } @@ -84,16 +72,13 @@ Stats::StatName RedisCommandStats::getCommandFromRequest(const RespValue& reques } void RedisCommandStats::updateStatsTotal(Stats::Scope& scope, Stats::StatName command) { - counter(scope, {prefix_, command, total_}).inc(); + Stats::Utility::counterFromStatNames(scope, {prefix_, command, total_}).inc(); } void RedisCommandStats::updateStats(Stats::Scope& scope, Stats::StatName command, const bool success) { - if (success) { - counter(scope, {prefix_, command, success_}).inc(); - } else { - counter(scope, {prefix_, command, failure_}).inc(); - } + Stats::StatName status = success ? success_ : failure_; + Stats::Utility::counterFromStatNames(scope, {prefix_, command, status}).inc(); } } // namespace Redis diff --git a/source/extensions/filters/network/common/redis/redis_command_stats.h b/source/extensions/filters/network/common/redis/redis_command_stats.h index a2870ea4003e..5dddb9f8303c 100644 --- a/source/extensions/filters/network/common/redis/redis_command_stats.h +++ b/source/extensions/filters/network/common/redis/redis_command_stats.h @@ -28,9 +28,6 @@ class RedisCommandStats { return std::make_shared(symbol_table, "upstream_commands"); } - Stats::Counter& counter(Stats::Scope& scope, const Stats::StatNameVec& stat_names); - Stats::Histogram& histogram(Stats::Scope& scope, const Stats::StatNameVec& stat_names, - Stats::Histogram::Unit unit); Stats::TimespanPtr createCommandTimer(Stats::Scope& scope, Stats::StatName command, Envoy::TimeSource& time_source); Stats::TimespanPtr createAggregateTimer(Stats::Scope& scope, Envoy::TimeSource& time_source); diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index c4c08d4a6bc2..e471803285ad 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -89,6 +89,7 @@ envoy_cc_library( deps = [ "//include/envoy/stats:stats_interface", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc index bf9e90ce105c..6059b461f94c 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.cc +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.cc @@ -31,23 +31,21 @@ MongoStats::MongoStats(Stats::Scope& scope, absl::string_view prefix) stat_name_set_->rememberBuiltins({"insert", "query", "update", "delete"}); } -Stats::SymbolTable::StoragePtr MongoStats::addPrefix(const std::vector& names) { - std::vector names_with_prefix; +Stats::ElementVec MongoStats::addPrefix(const Stats::ElementVec& names) { + Stats::ElementVec names_with_prefix; names_with_prefix.reserve(1 + names.size()); names_with_prefix.push_back(prefix_); names_with_prefix.insert(names_with_prefix.end(), names.begin(), names.end()); - return scope_.symbolTable().join(names_with_prefix); + return names_with_prefix; } -void MongoStats::incCounter(const std::vector& names) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); +void MongoStats::incCounter(const Stats::ElementVec& names) { + Stats::Utility::counterFromElements(scope_, addPrefix(names)).inc(); } -void MongoStats::recordHistogram(const std::vector& names, - Stats::Histogram::Unit unit, uint64_t sample) { - const Stats::SymbolTable::StoragePtr stat_name_storage = addPrefix(names); - scope_.histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit).recordValue(sample); +void MongoStats::recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, + uint64_t sample) { + Stats::Utility::histogramFromElements(scope_, addPrefix(names), unit).recordValue(sample); } } // namespace MongoProxy diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h index f49d4d34e7bf..3571c19bbca2 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.h +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -7,6 +7,7 @@ #include "envoy/stats/scope.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" namespace Envoy { namespace Extensions { @@ -17,8 +18,8 @@ class MongoStats { public: MongoStats(Stats::Scope& scope, absl::string_view prefix); - void incCounter(const std::vector& names); - void recordHistogram(const std::vector& names, Stats::Histogram::Unit unit, + void incCounter(const Stats::ElementVec& names); + void recordHistogram(const Stats::ElementVec& names, Stats::Histogram::Unit unit, uint64_t sample); /** @@ -34,7 +35,7 @@ class MongoStats { Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); } private: - Stats::SymbolTable::StoragePtr addPrefix(const std::vector& names); + Stats::ElementVec addPrefix(const Stats::ElementVec& names); Stats::Scope& scope_; Stats::StatNameSetPtr stat_name_set_; diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index fcbd3f6c52bb..c764c618df1b 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -152,17 +152,16 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { } else { // Normal query, get stats on a per collection basis first. QueryMessageInfo::QueryType query_type = active_query->query_info_.type(); - Stats::StatNameVec names; + Stats::ElementVec names; names.reserve(6); // 2 entries are added by chargeQueryStats(). names.push_back(mongo_stats_->collection_); - Stats::StatNameDynamicPool dynamic(mongo_stats_->symbolTable()); - names.push_back(dynamic.add(active_query->query_info_.collection())); + names.push_back(Stats::DynamicName(active_query->query_info_.collection())); chargeQueryStats(names, query_type); // Callsite stats if we have it. if (!active_query->query_info_.callsite().empty()) { names.push_back(mongo_stats_->callsite_); - names.push_back(dynamic.add(active_query->query_info_.callsite())); + names.push_back(Stats::DynamicName(active_query->query_info_.callsite())); chargeQueryStats(names, query_type); } @@ -180,7 +179,7 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { active_query_list_.emplace_back(std::move(active_query)); } -void ProxyFilter::chargeQueryStats(Stats::StatNameVec& names, +void ProxyFilter::chargeQueryStats(Stats::ElementVec& names, QueryMessageInfo::QueryType query_type) { // names come in containing {"collection", collection}. Report stats for 1 or // 2 variations on this array, and then return with the array in the same @@ -224,16 +223,15 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { } if (!active_query.query_info_.command().empty()) { - Stats::StatNameVec names{mongo_stats_->cmd_, - mongo_stats_->getBuiltin(active_query.query_info_.command(), - mongo_stats_->unknown_command_)}; + Stats::ElementVec names{mongo_stats_->cmd_, + mongo_stats_->getBuiltin(active_query.query_info_.command(), + mongo_stats_->unknown_command_)}; chargeReplyStats(active_query, names, *message); } else { // Collection stats first. - Stats::StatNameDynamicPool dynamic(mongo_stats_->symbolTable()); - Stats::StatNameVec names{mongo_stats_->collection_, - dynamic.add(active_query.query_info_.collection()), - mongo_stats_->query_}; + Stats::ElementVec names{mongo_stats_->collection_, + Stats::DynamicName(active_query.query_info_.collection()), + mongo_stats_->query_}; chargeReplyStats(active_query, names, *message); // Callsite stats if we have it. @@ -242,7 +240,7 @@ void ProxyFilter::decodeReply(ReplyMessagePtr&& message) { // to mutate the array to {"collection", collection, "callsite", callsite, "query"}. ASSERT(names.size() == 3); names.back() = mongo_stats_->callsite_; // Replaces "query". - names.push_back(dynamic.add(active_query.query_info_.callsite())); + names.push_back(Stats::DynamicName(active_query.query_info_.callsite())); names.push_back(mongo_stats_->query_); chargeReplyStats(active_query, names, *message); } @@ -292,7 +290,7 @@ void ProxyFilter::onDrainClose() { read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); } -void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, +void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names, const ReplyMessage& message) { uint64_t reply_documents_byte_size = 0; for (const Bson::DocumentSharedPtr& document : message.documents()) { diff --git a/source/extensions/filters/network/mongo_proxy/proxy.h b/source/extensions/filters/network/mongo_proxy/proxy.h index 0da6146f418e..c54308f1ae38 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.h +++ b/source/extensions/filters/network/mongo_proxy/proxy.h @@ -167,12 +167,12 @@ class ProxyFilter : public Network::Filter, // Increment counters related to queries. 'names' is passed by non-const // reference so the implementation can mutate it without copying, though it // always restores it to its prior state prior to return. - void chargeQueryStats(Stats::StatNameVec& names, QueryMessageInfo::QueryType query_type); + void chargeQueryStats(Stats::ElementVec& names, QueryMessageInfo::QueryType query_type); // Add samples to histograms related to replies. 'names' is passed by // non-const reference so the implementation can mutate it without copying, // though it always restores it to its prior state prior to return. - void chargeReplyStats(ActiveQuery& active_query, Stats::StatNameVec& names, + void chargeReplyStats(ActiveQuery& active_query, Stats::ElementVec& names, const ReplyMessage& message); void doDecode(Buffer::Instance& buffer); diff --git a/source/extensions/filters/network/zookeeper_proxy/filter.cc b/source/extensions/filters/network/zookeeper_proxy/filter.cc index 331d8a476e69..b6c38c0ec297 100644 --- a/source/extensions/filters/network/zookeeper_proxy/filter.cc +++ b/source/extensions/filters/network/zookeeper_proxy/filter.cc @@ -154,11 +154,11 @@ void ZooKeeperFilter::onPing() { } void ZooKeeperFilter::onAuthRequest(const std::string& scheme) { - Stats::SymbolTable::StoragePtr storage = config_->scope_.symbolTable().join( - {config_->stat_prefix_, config_->auth_, - config_->stat_name_set_->getBuiltin(absl::StrCat(scheme, "_rq"), - config_->unknown_scheme_rq_)}); - config_->scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Counter& counter = Stats::Utility::counterFromStatNames( + config_->scope_, {config_->stat_prefix_, config_->auth_, + config_->stat_name_set_->getBuiltin(absl::StrCat(scheme, "_rq"), + config_->unknown_scheme_rq_)}); + counter.inc(); setDynamicMetadata("opname", "auth"); } @@ -290,11 +290,10 @@ void ZooKeeperFilter::onConnectResponse(const int32_t proto_version, const int32 const std::chrono::milliseconds& latency) { config_->stats_.connect_resp_.inc(); - Stats::SymbolTable::StoragePtr storage = - config_->scope_.symbolTable().join({config_->stat_prefix_, config_->connect_latency_}); - config_->scope_ - .histogramFromStatName(Stats::StatName(storage.get()), Stats::Histogram::Unit::Milliseconds) - .recordValue(latency.count()); + Stats::Histogram& histogram = Stats::Utility::histogramFromElements( + config_->scope_, {config_->stat_prefix_, config_->connect_latency_}, + Stats::Histogram::Unit::Milliseconds); + histogram.recordValue(latency.count()); setDynamicMetadata({{"opname", "connect_response"}, {"protocol_version", std::to_string(proto_version)}, @@ -313,11 +312,11 @@ void ZooKeeperFilter::onResponse(const OpCodes opcode, const int32_t xid, const opname = opcode_info.opname_; opcode_latency = opcode_info.latency_name_; } - Stats::SymbolTable::StoragePtr storage = - config_->scope_.symbolTable().join({config_->stat_prefix_, opcode_latency}); - config_->scope_ - .histogramFromStatName(Stats::StatName(storage.get()), Stats::Histogram::Unit::Milliseconds) - .recordValue(latency.count()); + + Stats::Histogram& histogram = Stats::Utility::histogramFromStatNames( + config_->scope_, {config_->stat_prefix_, opcode_latency}, + Stats::Histogram::Unit::Milliseconds); + histogram.recordValue(latency.count()); setDynamicMetadata({{"opname", opname}, {"xid", std::to_string(xid)}, diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 748c7b99559f..7cf2407b61fb 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -110,6 +110,7 @@ envoy_cc_library( "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 7292bba9b005..2aaec7f14b88 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -19,6 +19,7 @@ #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" +#include "common/stats/utility.h" #include "extensions/transport_sockets/tls/utility.h" @@ -594,10 +595,9 @@ Envoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate( void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value, const Stats::StatName fallback) const { - Stats::SymbolTable& symbol_table = scope_.symbolTable(); - Stats::SymbolTable::StoragePtr storage = - symbol_table.join({name, stat_name_set_->getBuiltin(value, fallback)}); - scope_.counterFromStatName(Stats::StatName(storage.get())).inc(); + Stats::Counter& counter = Stats::Utility::counterFromElements( + scope_, {name, stat_name_set_->getBuiltin(value, fallback)}); + counter.inc(); #ifdef LOG_BUILTIN_STAT_NAMES std::cerr << absl::StrCat("Builtin ", symbol_table.toString(name), ": ", value, "\n") diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index c1fa773b25d3..d412dd87920f 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -73,8 +73,8 @@ TEST(GrpcContextTest, ResolveServiceAndMethod) { absl::optional request_names = context.resolveDynamicServiceAndMethod(path); EXPECT_TRUE(request_names); - EXPECT_EQ("service_name", symbol_table->toString(request_names->service_)); - EXPECT_EQ("method_name", symbol_table->toString(request_names->method_)); + EXPECT_EQ("service_name", absl::get(request_names->service_)); + EXPECT_EQ("method_name", absl::get(request_names->method_)); headers.setPath(""); EXPECT_FALSE(context.resolveDynamicServiceAndMethod(path)); headers.setPath("/"); diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index ad8e7885cd1c..5e38a6268836 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -241,3 +241,12 @@ envoy_cc_test_binary( "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "utility_test", + srcs = ["utility_test.cc"], + deps = [ + "//source/common/stats:isolated_store_lib", + "//source/common/stats:utility_lib", + ], +) diff --git a/test/common/stats/utility_test.cc b/test/common/stats/utility_test.cc new file mode 100644 index 000000000000..8f4ec260d3bb --- /dev/null +++ b/test/common/stats/utility_test.cc @@ -0,0 +1,112 @@ +#include + +#include "envoy/stats/stats_macros.h" + +#include "common/stats/isolated_store_impl.h" +#include "common/stats/null_counter.h" +#include "common/stats/null_gauge.h" +#include "common/stats/symbol_table_creator.h" + +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Stats { +namespace { + +class StatsUtilityTest : public testing::Test { +protected: + StatsUtilityTest() + : symbol_table_(SymbolTableCreator::makeSymbolTable()), + store_(std::make_unique(*symbol_table_)), pool_(*symbol_table_), + tags_( + {{pool_.add("tag1"), pool_.add("value1")}, {pool_.add("tag2"), pool_.add("value2")}}) {} + + ~StatsUtilityTest() override { + pool_.clear(); + store_.reset(); + EXPECT_EQ(0, symbol_table_->numSymbols()); + } + + SymbolTablePtr symbol_table_; + std::unique_ptr store_; + StatNamePool pool_; + StatNameTagVector tags_; +}; + +TEST_F(StatsUtilityTest, Counters) { + ScopePtr scope = store_->createScope("scope."); + Counter& c1 = Utility::counterFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + EXPECT_EQ("scope.a.b", c1.name()); + StatName token = pool_.add("token"); + Counter& c2 = Utility::counterFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}); + EXPECT_EQ("scope.a.token.b", c2.name()); + StatName suffix = pool_.add("suffix"); + Counter& c3 = Utility::counterFromElements(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", c3.name()); + Counter& c4 = Utility::counterFromStatNames(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", c4.name()); + EXPECT_EQ(&c3, &c4); + + Counter& ctags = + Utility::counterFromElements(*scope, {DynamicName("x"), token, DynamicName("y")}, tags_); + EXPECT_EQ("scope.x.token.y.tag1.value1.tag2.value2", ctags.name()); +} + +TEST_F(StatsUtilityTest, Gauges) { + ScopePtr scope = store_->createScope("scope."); + Gauge& g1 = Utility::gaugeFromElements(*scope, {DynamicName("a"), DynamicName("b")}, + Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.a.b", g1.name()); + EXPECT_EQ(Gauge::ImportMode::NeverImport, g1.importMode()); + StatName token = pool_.add("token"); + Gauge& g2 = Utility::gaugeFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}, + Gauge::ImportMode::Accumulate); + EXPECT_EQ("scope.a.token.b", g2.name()); + EXPECT_EQ(Gauge::ImportMode::Accumulate, g2.importMode()); + StatName suffix = pool_.add("suffix"); + Gauge& g3 = Utility::gaugeFromElements(*scope, {token, suffix}, Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.token.suffix", g3.name()); + Gauge& g4 = Utility::gaugeFromStatNames(*scope, {token, suffix}, Gauge::ImportMode::NeverImport); + EXPECT_EQ("scope.token.suffix", g4.name()); + EXPECT_EQ(&g3, &g4); +} + +TEST_F(StatsUtilityTest, Histograms) { + ScopePtr scope = store_->createScope("scope."); + Histogram& h1 = Utility::histogramFromElements(*scope, {DynamicName("a"), DynamicName("b")}, + Histogram::Unit::Milliseconds); + EXPECT_EQ("scope.a.b", h1.name()); + EXPECT_EQ(Histogram::Unit::Milliseconds, h1.unit()); + StatName token = pool_.add("token"); + Histogram& h2 = Utility::histogramFromElements( + *scope, {DynamicName("a"), token, DynamicName("b")}, Histogram::Unit::Microseconds); + EXPECT_EQ("scope.a.token.b", h2.name()); + EXPECT_EQ(Histogram::Unit::Microseconds, h2.unit()); + StatName suffix = pool_.add("suffix"); + Histogram& h3 = Utility::histogramFromElements(*scope, {token, suffix}, Histogram::Unit::Bytes); + EXPECT_EQ("scope.token.suffix", h3.name()); + EXPECT_EQ(Histogram::Unit::Bytes, h3.unit()); + Histogram& h4 = Utility::histogramFromStatNames(*scope, {token, suffix}, Histogram::Unit::Bytes); + EXPECT_EQ(&h3, &h4); +} + +TEST_F(StatsUtilityTest, TextReadouts) { + ScopePtr scope = store_->createScope("scope."); + TextReadout& t1 = Utility::textReadoutFromElements(*scope, {DynamicName("a"), DynamicName("b")}); + EXPECT_EQ("scope.a.b", t1.name()); + StatName token = pool_.add("token"); + TextReadout& t2 = + Utility::textReadoutFromElements(*scope, {DynamicName("a"), token, DynamicName("b")}); + EXPECT_EQ("scope.a.token.b", t2.name()); + StatName suffix = pool_.add("suffix"); + TextReadout& t3 = Utility::textReadoutFromElements(*scope, {token, suffix}); + EXPECT_EQ("scope.token.suffix", t3.name()); + TextReadout& t4 = Utility::textReadoutFromStatNames(*scope, {token, suffix}); + EXPECT_EQ(&t3, &t4); +} + +} // namespace +} // namespace Stats +} // namespace Envoy From 52ce75f6ee389e3f192129ff9b429029d594fc43 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Sat, 25 Apr 2020 15:02:55 -0700 Subject: [PATCH 030/909] http: replace vector/reserve with InlinedVector in codec helper (#10941) Signed-off-by: Matt Klein --- source/common/http/codec_helper.h | 17 ++++++----------- source/common/http/conn_manager_impl.cc | 2 +- source/common/http/http1/codec_impl.h | 4 ++-- source/common/http/http2/codec_impl.h | 4 ++-- .../quic_listeners/quiche/envoy_quic_stream.h | 6 ++++-- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/source/common/http/codec_helper.h b/source/common/http/codec_helper.h index 3cc6d5bd6580..5128891e4105 100644 --- a/source/common/http/codec_helper.h +++ b/source/common/http/codec_helper.h @@ -1,11 +1,11 @@ #pragma once -#include - #include "envoy/http/codec.h" #include "common/common/assert.h" +#include "absl/container/inlined_vector.h" + namespace Envoy { namespace Http { @@ -54,12 +54,7 @@ class StreamCallbackHelper { bool local_end_stream_{}; protected: - StreamCallbackHelper() { - // Set space for 8 callbacks (64 bytes). - callbacks_.reserve(8); - } - - void addCallbacks_(StreamCallbacks& callbacks) { + void addCallbacksHelper(StreamCallbacks& callbacks) { ASSERT(!reset_callbacks_started_ && !local_end_stream_); callbacks_.push_back(&callbacks); for (uint32_t i = 0; i < high_watermark_callbacks_; ++i) { @@ -67,12 +62,12 @@ class StreamCallbackHelper { } } - void removeCallbacks_(StreamCallbacks& callbacks) { + void removeCallbacksHelper(StreamCallbacks& callbacks) { // For performance reasons we just clear the callback and do not resize the vector. // Reset callbacks scale with the number of filters per request and do not get added and // removed multiple times. // The vector may not be safely resized without making sure the run.*Callbacks() helper - // functions above still handle removeCallbacks_() calls mid-loop. + // functions above still handle removeCallbacksHelper() calls mid-loop. for (auto& callback : callbacks_) { if (callback == &callbacks) { callback = nullptr; @@ -82,7 +77,7 @@ class StreamCallbackHelper { } private: - std::vector callbacks_; + absl::InlinedVector callbacks_; bool reset_callbacks_started_{}; uint32_t high_watermark_callbacks_{}; }; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 75c8bed64cd2..7ab746ad5d27 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -272,7 +272,7 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod new_stream->response_encoder_->getStream().addCallbacks(*new_stream); new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); // If the network connection is backed up, the stream should be made aware of it on creation. - // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacks_. + // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || new_stream->high_watermark_count_ > 0); new_stream->moveIntoList(std::move(new_stream), streams_); diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 12901e447e9a..f41824755b57 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -61,8 +61,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, void disableChunkEncoding() override { disable_chunk_encoding_ = true; } // Http::Stream - void addCallbacks(StreamCallbacks& callbacks) override { addCallbacks_(callbacks); } - void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacks_(callbacks); } + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further // progress may be made with the codec. void resetStream(StreamResetReason reason) override; diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index ed579f6ff7ff..23c670debca7 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -208,8 +208,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggablelocalAddress(); From 0b0213fdc38eb0d0346659cf90bd4c502cadb00c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Sat, 25 Apr 2020 18:03:49 -0400 Subject: [PATCH 031/909] gzip filter: allow setting zlib compressor's chunk size (#10508) Signed-off-by: Raul Gutierrez Segales --- .../extensions/filters/http/gzip/v3/gzip.proto | 7 ++++++- docs/root/version_history/current.rst | 1 + .../extensions/filters/http/gzip/v3/gzip.proto | 7 ++++++- .../extensions/filters/http/gzip/gzip_filter.cc | 5 +++-- source/extensions/filters/http/gzip/gzip_filter.h | 6 ++++-- .../filters/http/gzip/gzip_filter_test.cc | 15 +++++++++++++++ 6 files changed, 35 insertions(+), 6 deletions(-) diff --git a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto index eb8a69f083ba..20cae5c400d3 100644 --- a/api/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/api/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] -// [#next-free-field: 11] +// [#next-free-field: 12] message Gzip { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip"; @@ -76,4 +76,9 @@ message Gzip { // the fields `content_length`, `content_type`, `disable_on_etag_header` and // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b2cc90a89d19..2449c881fa10 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -13,6 +13,7 @@ Changes Disabled by default and can be enabled via :ref:`enable_upstream_stats `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. +* gzip filter: added option to set zlib's next output buffer size. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto index 3206037723de..e711827481a3 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] -// [#next-free-field: 11] +// [#next-free-field: 12] message Gzip { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip"; @@ -72,6 +72,11 @@ message Gzip { // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; + google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 [deprecated = true]; repeated string hidden_envoy_deprecated_content_type = 6 [deprecated = true]; diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index 846500cd6dab..f9c3572a1a35 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -28,10 +28,11 @@ GzipFilterConfig::GzipFilterConfig(const envoy::extensions::filters::http::gzip: compression_level_(compressionLevelEnum(gzip.compression_level())), compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())), memory_level_(memoryLevelUint(gzip.memory_level().value())), - window_bits_(windowBitsUint(gzip.window_bits().value())) {} + window_bits_(windowBitsUint(gzip.window_bits().value())), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, 4096)) {} std::unique_ptr GzipFilterConfig::makeCompressor() { - auto compressor = std::make_unique(); + auto compressor = std::make_unique(chunk_size_); compressor->init(compressionLevel(), compressionStrategy(), windowBits(), memoryLevel()); return compressor; } diff --git a/source/extensions/filters/http/gzip/gzip_filter.h b/source/extensions/filters/http/gzip/gzip_filter.h index a7c6406c2dc4..ec56b28ddf92 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.h +++ b/source/extensions/filters/http/gzip/gzip_filter.h @@ -31,6 +31,7 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { uint64_t memoryLevel() const { return memory_level_; } uint64_t windowBits() const { return window_bits_; } + uint32_t chunkSize() const { return chunk_size_; } private: static Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum( @@ -48,8 +49,9 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { Compressor::ZlibCompressorImpl::CompressionLevel compression_level_; Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_; - int32_t memory_level_; - int32_t window_bits_; + const int32_t memory_level_; + const int32_t window_bits_; + const uint32_t chunk_size_; }; } // namespace Gzip diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 24f715b569e7..565b84de2523 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -417,6 +417,21 @@ TEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName deprecated_name)); } +// Test setting zlib's chunk size. +TEST_F(GzipFilterTest, ChunkSize) { + // Default + setUpFilter("{}"); + EXPECT_EQ(config_->chunkSize(), 4096); + + // Override + setUpFilter(R"EOF( +{ + "chunk_size": 8192 +} +)EOF"); + EXPECT_EQ(config_->chunkSize(), 8192); +} + } // namespace Gzip } // namespace HttpFilters } // namespace Extensions From bad35c86655c77c155303c1c4b9f651f7de42714 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 27 Apr 2020 11:45:15 -0400 Subject: [PATCH 032/909] fuzz: improve header/data stop/continue modeling in HCM fuzzer. (#10931) Previously we weren't tracking the status returned from the mock decodeData(), leading to incorrect invocation of continueDecoding(). This resulted in the underlying fuzz bug that this patch fixes. In addition to improve status state tracking, this PR also adds to the mock codec's ability to return stop/continue from decodeHeaders() and the full range of stop/continue values for decodeData(). Risk level: Low Testing: Corpus entry added. Fixes oss-fuzz issue https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=18378. Signed-off-by: Harvey Tuch --- ...nn_manager_impl_fuzz_test-5669833168912384 | 1 + test/common/http/conn_manager_impl_fuzz.proto | 5 ++- .../http/conn_manager_impl_fuzz_test.cc | 35 +++++++++++++++---- 3 files changed, 34 insertions(+), 7 deletions(-) create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 new file mode 100644 index 000000000000..a25a96c91dbd --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 @@ -0,0 +1 @@ +actions { new_stream { request_headers { headers { key: ":path" value: "/" } headers { key: ":authority" } } } } actions { stream_action { request { continue_decoding { } } } } diff --git a/test/common/http/conn_manager_impl_fuzz.proto b/test/common/http/conn_manager_impl_fuzz.proto index a6a3617d0165..5cc690eb838a 100644 --- a/test/common/http/conn_manager_impl_fuzz.proto +++ b/test/common/http/conn_manager_impl_fuzz.proto @@ -11,12 +11,15 @@ import "test/fuzz/common.proto"; message NewStream { test.fuzz.Headers request_headers = 1; bool end_stream = 2; - // TODO(htuch): Support stop/continue status with headers. + HeaderStatus status = 3; } enum HeaderStatus { HEADER_CONTINUE = 0; HEADER_STOP_ITERATION = 1; + HEADER_CONTINUE_AND_END_STREAM = 2; + HEADER_STOP_ALL_ITERATION_AND_BUFFER = 3; + HEADER_STOP_ALL_ITERATION_AND_WATERMARK = 4; } enum DataStatus { diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 4d0512ff7dad..43db08612d9d 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -217,11 +217,14 @@ class FuzzStream { enum class StreamState { PendingHeaders, PendingDataOrTrailers, Closed }; FuzzStream(ConnectionManagerImpl& conn_manager, FuzzConfig& config, - const HeaderMap& request_headers, bool end_stream) + const HeaderMap& request_headers, + test::common::http::HeaderStatus decode_header_status, bool end_stream) : conn_manager_(conn_manager), config_(config) { config_.newStream(); request_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; response_state_ = StreamState::PendingHeaders; + decoder_filter_ = config.decoder_filter_; + encoder_filter_ = config.encoder_filter_; EXPECT_CALL(*config_.codec_, dispatch(_)) .WillOnce(InvokeWithoutArgs([this, &request_headers, end_stream] { decoder_ = &conn_manager_.newStream(encoder_); @@ -244,9 +247,13 @@ class FuzzStream { })); decoder_->decodeHeaders(std::move(headers), end_stream); })); + ON_CALL(*decoder_filter_, decodeHeaders(_, _)) + .WillByDefault( + InvokeWithoutArgs([this, decode_header_status]() -> Http::FilterHeadersStatus { + header_status_ = fromHeaderStatus(decode_header_status); + return *header_status_; + })); fakeOnData(); - decoder_filter_ = config.decoder_filter_; - encoder_filter_ = config.encoder_filter_; FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); } @@ -261,6 +268,12 @@ class FuzzStream { return Http::FilterHeadersStatus::Continue; case test::common::http::HeaderStatus::HEADER_STOP_ITERATION: return Http::FilterHeadersStatus::StopIteration; + case test::common::http::HeaderStatus::HEADER_CONTINUE_AND_END_STREAM: + return Http::FilterHeadersStatus::ContinueAndEndStream; + case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_BUFFER: + return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + case test::common::http::HeaderStatus::HEADER_STOP_ALL_ITERATION_AND_WATERMARK: + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; default: return Http::FilterHeadersStatus::Continue; } @@ -320,7 +333,8 @@ class FuzzStream { if (data_action.has_decoder_filter_callback_action()) { decoderFilterCallbackAction(data_action.decoder_filter_callback_action()); } - return fromDataStatus(data_action.status()); + data_status_ = fromDataStatus(data_action.status()); + return *data_status_; })); EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([this, &data_action] { Buffer::OwnedImpl buf(std::string(data_action.size() % (1024 * 1024), 'a')); @@ -355,7 +369,14 @@ class FuzzStream { break; } case test::common::http::RequestAction::kContinueDecoding: { - decoder_filter_->callbacks_->continueDecoding(); + if (header_status_ == FilterHeadersStatus::StopAllIterationAndBuffer || + header_status_ == FilterHeadersStatus::StopAllIterationAndWatermark || + (header_status_ == FilterHeadersStatus::StopIteration && + (data_status_ == FilterDataStatus::StopIterationAndBuffer || + data_status_ == FilterDataStatus::StopIterationAndWatermark || + data_status_ == FilterDataStatus::StopIterationNoBuffer))) { + decoder_filter_->callbacks_->continueDecoding(); + } break; } case test::common::http::RequestAction::kThrowDecoderException: { @@ -450,6 +471,8 @@ class FuzzStream { MockStreamEncoderFilter* encoder_filter_{}; StreamState request_state_; StreamState response_state_; + absl::optional header_status_; + absl::optional data_status_; }; using FuzzStreamPtr = std::unique_ptr; @@ -501,7 +524,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { streams.emplace_back(new FuzzStream( conn_manager, config, Fuzz::fromHeaders(action.new_stream().request_headers()), - action.new_stream().end_stream())); + action.new_stream().status(), action.new_stream().end_stream())); break; } case test::common::http::Action::kStreamAction: { From 2785310cf6f1b698948cce5a3552bcd297be1783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?= Date: Mon, 27 Apr 2020 15:20:48 -0300 Subject: [PATCH 033/909] Fix typo on Postgres Proxy documentation. (#10930) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix typo on Postgres proxy documentation per comment on pull request #10602. Risk level: Low: Testing: N/A Docs changes: minor typo fix Release Notes: N/A Signed-off-by: Fabrízio de Royes Mello --- .../listeners/network_filters/postgres_proxy_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst index dd7b489fd344..f2bdf3391467 100644 --- a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst @@ -4,7 +4,7 @@ Postgres proxy ================ The Postgres proxy filter decodes the wire protocol between a Postgres client (downstream) and a Postgres server -(upstream). The decoded information is currently used only to produce Postgres level statistics like sesions, +(upstream). The decoded information is currently used only to produce Postgres level statistics like sessions, statements or transactions executed, among others. This current version does not decode SQL queries. Future versions may add more statistics and more advanced capabilities. When the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes place. More information: From 8951b3f7b26cbdf21c734d4372555188a42aad65 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Mon, 27 Apr 2020 14:25:35 -0400 Subject: [PATCH 034/909] tests: tag tests that fail on Windows with fails_on_windows (#10940) * 139 tests tagged in total now, a few of which are flaky and don't fail every invocation * To enable contributors are to work on fixing these failed tests and to give a view into what progress still needs to be made * When we are able to get CI working to build/run tests on Windows, we can ignore the tagged tests and untag them as they are fixed * It appears a large portion of the tests will be fixable with a few patches * there are also assumptions in test setup/teardown etc. that are not compatible with Windows at the moment * a large percentage of the failing tests are integration tests Risk Level: Low, windows-only Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Sunjay Bhatia --- test/common/access_log/BUILD | 2 + test/common/config/BUILD | 2 + test/common/filesystem/BUILD | 1 + test/common/http/BUILD | 2 + test/common/http/http1/BUILD | 2 + test/common/http/http2/BUILD | 3 ++ test/common/network/BUILD | 7 ++++ test/common/router/BUILD | 3 ++ test/common/secret/BUILD | 1 + test/common/upstream/BUILD | 3 ++ test/config_test/BUILD | 1 + test/dependencies/BUILD | 1 + test/extensions/access_loggers/grpc/BUILD | 2 + test/extensions/clusters/aggregate/BUILD | 1 + test/extensions/clusters/redis/BUILD | 1 + test/extensions/common/aws/BUILD | 1 + test/extensions/common/proxy_protocol/BUILD | 1 + test/extensions/filters/common/lua/BUILD | 1 + .../filters/http/adaptive_concurrency/BUILD | 1 + test/extensions/filters/http/aws_lambda/BUILD | 1 + test/extensions/filters/http/buffer/BUILD | 1 + test/extensions/filters/http/cache/BUILD | 1 + test/extensions/filters/http/cors/BUILD | 1 + test/extensions/filters/http/csrf/BUILD | 1 + .../filters/http/dynamic_forward_proxy/BUILD | 1 + test/extensions/filters/http/ext_authz/BUILD | 1 + test/extensions/filters/http/fault/BUILD | 1 + .../http/grpc_http1_reverse_bridge/BUILD | 1 + .../filters/http/grpc_json_transcoder/BUILD | 1 + test/extensions/filters/http/gzip/BUILD | 1 + test/extensions/filters/http/jwt_authn/BUILD | 1 + test/extensions/filters/http/lua/BUILD | 3 ++ test/extensions/filters/http/rbac/BUILD | 1 + test/extensions/filters/http/router/BUILD | 1 + test/extensions/filters/http/squash/BUILD | 1 + test/extensions/filters/http/tap/BUILD | 1 + .../filters/listener/http_inspector/BUILD | 1 + .../filters/listener/original_src/BUILD | 1 + .../filters/listener/proxy_protocol/BUILD | 1 + .../filters/network/local_ratelimit/BUILD | 1 + .../filters/network/mysql_proxy/BUILD | 2 + .../filters/network/postgres_proxy/BUILD | 1 + test/extensions/filters/network/rbac/BUILD | 1 + .../filters/network/redis_proxy/BUILD | 2 + .../filters/network/rocketmq_proxy/BUILD | 1 + .../network/sni_dynamic_forward_proxy/BUILD | 1 + .../filters/network/thrift_proxy/BUILD | 2 + test/extensions/filters/udp/udp_proxy/BUILD | 1 + .../extensions/grpc_credentials/aws_iam/BUILD | 1 + .../file_based_metadata/BUILD | 1 + .../resource_monitors/injected_resource/BUILD | 1 + .../stats_sinks/common/statsd/BUILD | 1 + test/extensions/stats_sinks/hystrix/BUILD | 1 + .../stats_sinks/metrics_service/BUILD | 1 + test/extensions/transport_sockets/alts/BUILD | 1 + .../transport_sockets/tls/integration/BUILD | 1 + test/integration/BUILD | 41 +++++++++++++++++++ test/server/BUILD | 4 ++ test/server/config_validation/BUILD | 1 + 59 files changed, 124 insertions(+) diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 836975e9ae34..06da33256f87 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -36,6 +36,7 @@ envoy_cc_fuzz_test( envoy_cc_test( name = "access_log_formatter_test", srcs = ["access_log_formatter_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/access_log:access_log_formatter_lib", "//source/common/common:utility_lib", @@ -108,4 +109,5 @@ envoy_cc_benchmark_binary( envoy_benchmark_test( name = "access_log_formatter_speed_test_benchmark_test", benchmark_binary = "access_log_formatter_speed_test", + tags = ["fails_on_windows"], ) diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 390374dad542..360f8b623ede 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -72,6 +72,7 @@ envoy_cc_test( envoy_cc_test( name = "filesystem_subscription_impl_test", srcs = ["filesystem_subscription_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":filesystem_subscription_test_harness", "//test/mocks/event:event_mocks", @@ -271,6 +272,7 @@ envoy_cc_test( envoy_cc_test( name = "subscription_impl_test", srcs = ["subscription_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":delta_subscription_test_harness", ":filesystem_subscription_test_harness", diff --git a/test/common/filesystem/BUILD b/test/common/filesystem/BUILD index 6f385036615a..de9c92bb4c04 100644 --- a/test/common/filesystem/BUILD +++ b/test/common/filesystem/BUILD @@ -29,6 +29,7 @@ envoy_cc_test( envoy_cc_test( name = "watcher_impl_test", srcs = ["watcher_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/common:assert_lib", "//source/common/event:dispatcher_includes", diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 91fc3ec75460..d51e20b33e23 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -16,6 +16,7 @@ envoy_package() envoy_cc_test( name = "async_client_impl_test", srcs = ["async_client_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":common_lib", "//source/common/buffer:buffer_lib", @@ -191,6 +192,7 @@ envoy_cc_fuzz_test( envoy_cc_test( name = "conn_manager_impl_test", srcs = ["conn_manager_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/buffer:buffer_interface", diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 491fcba090ea..4fa2b974dfca 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -19,6 +19,7 @@ envoy_cc_test( envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/event:dispatcher_interface", @@ -44,6 +45,7 @@ envoy_cc_test( envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 241fe70d7b11..1d5ba73e5a52 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -14,6 +14,7 @@ envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], shard_count = 5, + tags = ["fails_on_windows"], deps = [ ":codec_impl_test_util", "//source/common/event:dispatcher_lib", @@ -50,6 +51,7 @@ envoy_cc_test_library( envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/http/http2:conn_pool_lib", @@ -102,6 +104,7 @@ envoy_cc_test( "response_header_corpus/simple_example_huffman", "response_header_corpus/simple_example_plain", ], + tags = ["fails_on_windows"], deps = [":frame_replay_lib"], ) diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 3f9b79c5e7a6..c3c5bc8ba451 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -33,6 +33,7 @@ envoy_cc_test_library( envoy_cc_test( name = "address_impl_test", srcs = ["address_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:utility_lib", @@ -58,6 +59,7 @@ envoy_cc_benchmark_binary( envoy_benchmark_test( name = "address_impl_speed_test_benchmark_test", benchmark_binary = "address_impl_speed_test", + tags = ["fails_on_windows"], ) envoy_cc_test( @@ -100,6 +102,7 @@ envoy_cc_test( envoy_cc_test( name = "dns_impl_test", srcs = ["dns_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/network:address_interface", @@ -164,6 +167,7 @@ envoy_cc_test( envoy_cc_test( name = "listen_socket_impl_test", srcs = ["listen_socket_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:listen_socket_lib", @@ -200,6 +204,7 @@ envoy_cc_test( envoy_cc_test( name = "udp_listener_impl_test", srcs = ["udp_listener_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", @@ -261,6 +266,7 @@ envoy_cc_test( name = "socket_option_factory_test", srcs = ["socket_option_factory_test.cc"], external_deps = ["abseil_str_format"], + tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:socket_option_factory_lib", @@ -276,6 +282,7 @@ envoy_cc_test( envoy_cc_test( name = "addr_family_aware_socket_option_impl_test", srcs = ["addr_family_aware_socket_option_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":socket_option_test", "//source/common/network:addr_family_aware_socket_option_lib", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index c3cab9845d2c..ed1e8e1adc28 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -15,6 +15,7 @@ envoy_package() envoy_cc_test( name = "config_impl_test", + tags = ["fails_on_windows"], deps = [":config_impl_test_lib"], ) @@ -252,6 +253,7 @@ envoy_cc_test( envoy_cc_test( name = "router_test", srcs = ["router_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:context_lib", @@ -283,6 +285,7 @@ envoy_cc_test( name = "router_upstream_log_test", srcs = ["router_upstream_log_test.cc"], external_deps = ["abseil_optional"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/network:utility_lib", diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index d0d72d1b22ff..80ce11317c43 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -14,6 +14,7 @@ envoy_cc_test( data = [ "//test/extensions/transport_sockets/tls/test_data:certs", ], + tags = ["fails_on_windows"], deps = [ "//source/common/secret:sds_api_lib", "//source/common/secret:secret_manager_impl_lib", diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 789cb5401925..9c87ce42c6ce 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -99,6 +99,7 @@ envoy_cc_test( envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", @@ -181,6 +182,7 @@ envoy_cc_test( envoy_cc_test( name = "load_stats_reporter_test", srcs = ["load_stats_reporter_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/stats:stats_lib", "//source/common/upstream:load_stats_reporter_lib", @@ -362,6 +364,7 @@ envoy_benchmark_test( name = "load_balancer_benchmark_test", timeout = "long", benchmark_binary = "load_balancer_benchmark", + tags = ["fails_on_windows"], ) envoy_cc_test( diff --git a/test/config_test/BUILD b/test/config_test/BUILD index a00de6bb96c4..e8ccfd04daad 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -22,6 +22,7 @@ envoy_cc_test( "example_configs_test_setup.sh", "//configs:example_configs", ], + tags = ["fails_on_windows"], deps = [ ":config_test_lib", "//test/test_common:environment_lib", diff --git a/test/dependencies/BUILD b/test/dependencies/BUILD index 2e6ae296b760..3d68e338f2b5 100644 --- a/test/dependencies/BUILD +++ b/test/dependencies/BUILD @@ -14,4 +14,5 @@ envoy_cc_test( external_deps = [ "curl", ], + tags = ["fails_on_windows"], ) diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index c79d11539665..934921e13a1e 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -77,6 +77,7 @@ envoy_extension_cc_test( name = "http_grpc_access_log_integration_test", srcs = ["http_grpc_access_log_integration_test.cc"], extension_name = "envoy.access_loggers.http_grpc", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", @@ -96,6 +97,7 @@ envoy_extension_cc_test( name = "tcp_grpc_access_log_integration_test", srcs = ["tcp_grpc_access_log_integration_test.cc"], extension_name = "envoy.access_loggers.http_grpc", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index f5885980ab36..815a755de3ef 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -53,6 +53,7 @@ envoy_extension_cc_test( name = "cluster_integration_test", srcs = ["cluster_integration_test.cc"], extension_name = "envoy.clusters.aggregate", + tags = ["fails_on_windows"], deps = [ "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index 80476cd921ee..1fc6aa3a6a08 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -88,6 +88,7 @@ envoy_extension_cc_test( size = "small", srcs = ["redis_cluster_integration_test.cc"], extension_name = "envoy.clusters.redis", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/redis:redis_cluster", "//source/extensions/clusters/redis:redis_cluster_lb", diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index a6c37b700101..0f014b4bf458 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -76,6 +76,7 @@ envoy_cc_test( srcs = [ "aws_metadata_fetcher_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:fmt_lib", "//source/extensions/common/aws:utility_lib", diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD index bd269493ddf5..59871f2d17a6 100644 --- a/test/extensions/common/proxy_protocol/BUILD +++ b/test/extensions/common/proxy_protocol/BUILD @@ -21,6 +21,7 @@ envoy_cc_test( envoy_cc_test( name = "proxy_protocol_regression_test", srcs = ["proxy_protocol_regression_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_includes", diff --git a/test/extensions/filters/common/lua/BUILD b/test/extensions/filters/common/lua/BUILD index df115309d63c..17adba2c3145 100644 --- a/test/extensions/filters/common/lua/BUILD +++ b/test/extensions/filters/common/lua/BUILD @@ -23,6 +23,7 @@ envoy_cc_test( envoy_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], + tags = ["fails_on_windows"], deps = [ ":lua_wrappers_lib", "//source/common/buffer:buffer_lib", diff --git a/test/extensions/filters/http/adaptive_concurrency/BUILD b/test/extensions/filters/http/adaptive_concurrency/BUILD index fbe81cc26327..5dd177f945bd 100644 --- a/test/extensions/filters/http/adaptive_concurrency/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/BUILD @@ -34,6 +34,7 @@ envoy_extension_cc_test( "adaptive_concurrency_filter_integration_test.h", ], extension_name = "envoy.filters.http.adaptive_concurrency", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/adaptive_concurrency:config", "//source/extensions/filters/http/fault:config", diff --git a/test/extensions/filters/http/aws_lambda/BUILD b/test/extensions/filters/http/aws_lambda/BUILD index 4486b02c1279..f20dd5e903aa 100644 --- a/test/extensions/filters/http/aws_lambda/BUILD +++ b/test/extensions/filters/http/aws_lambda/BUILD @@ -27,6 +27,7 @@ envoy_extension_cc_test( name = "aws_lambda_filter_integration_test", srcs = ["aws_lambda_filter_integration_test.cc"], extension_name = "envoy.filters.http.aws_lambda", + tags = ["fails_on_windows"], deps = [ "//source/common/http:header_map_lib", "//source/extensions/filters/http/aws_lambda:aws_lambda_filter_lib", diff --git a/test/extensions/filters/http/buffer/BUILD b/test/extensions/filters/http/buffer/BUILD index 20351ba4a1fb..50a68a4f2489 100644 --- a/test/extensions/filters/http/buffer/BUILD +++ b/test/extensions/filters/http/buffer/BUILD @@ -36,6 +36,7 @@ envoy_extension_cc_test( name = "buffer_filter_integration_test", srcs = ["buffer_filter_integration_test.cc"], extension_name = "envoy.filters.http.buffer", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/buffer:config", "//test/config:utility_lib", diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index faa47807b25f..82eb2ff8dc12 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -65,6 +65,7 @@ envoy_extension_cc_test( "cache_filter_integration_test.cc", ], extension_name = "envoy.filters.http.cache", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/cache:config", "//source/extensions/filters/http/cache:http_cache_lib", diff --git a/test/extensions/filters/http/cors/BUILD b/test/extensions/filters/http/cors/BUILD index 6aab69d2f6d9..9320855a4af2 100644 --- a/test/extensions/filters/http/cors/BUILD +++ b/test/extensions/filters/http/cors/BUILD @@ -30,6 +30,7 @@ envoy_extension_cc_test( name = "cors_filter_integration_test", srcs = ["cors_filter_integration_test.cc"], extension_name = "envoy.filters.http.cors", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/csrf/BUILD b/test/extensions/filters/http/csrf/BUILD index 984cccc2b122..2cde41859068 100644 --- a/test/extensions/filters/http/csrf/BUILD +++ b/test/extensions/filters/http/csrf/BUILD @@ -31,6 +31,7 @@ envoy_extension_cc_test( name = "csrf_filter_integration_test", srcs = ["csrf_filter_integration_test.cc"], extension_name = "envoy.filters.http.csrf", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/csrf:config", "//test/config:utility_lib", diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index 6902c9dd718f..71650b6b24d6 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -32,6 +32,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.dynamic_forward_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/http/dynamic_forward_proxy:config", diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index 75bd5bec2fab..ef1b8be3cd89 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -57,6 +57,7 @@ envoy_extension_cc_test( name = "ext_authz_integration_test", srcs = ["ext_authz_integration_test.cc"], extension_name = "envoy.filters.http.ext_authz", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/ext_authz:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/fault/BUILD b/test/extensions/filters/http/fault/BUILD index 04a48ea06dc5..1578099273ed 100644 --- a/test/extensions/filters/http/fault/BUILD +++ b/test/extensions/filters/http/fault/BUILD @@ -54,6 +54,7 @@ envoy_extension_cc_test( name = "fault_filter_integration_test", srcs = ["fault_filter_integration_test.cc"], extension_name = "envoy.filters.http.fault", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/fault:config", "//test/integration:http_protocol_integration_lib", diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index 648f2e473dc7..b84c405bee88 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -28,6 +28,7 @@ envoy_extension_cc_test( name = "reverse_bridge_integration_test", srcs = ["reverse_bridge_integration_test.cc"], extension_name = "envoy.filters.http.grpc_http1_reverse_bridge", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/grpc_json_transcoder/BUILD b/test/extensions/filters/http/grpc_json_transcoder/BUILD index 977669d16fd8..bac0bd17b1db 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/test/extensions/filters/http/grpc_json_transcoder/BUILD @@ -61,6 +61,7 @@ envoy_extension_cc_test( "//test/proto:bookstore_proto_descriptor", ], extension_name = "envoy.filters.http.grpc_json_transcoder", + tags = ["fails_on_windows"], deps = [ "//source/common/grpc:codec_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index 72941b500b9f..3c2bb9059338 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -34,6 +34,7 @@ envoy_extension_cc_test( "gzip_filter_integration_test.cc", ], extension_name = "envoy.filters.http.gzip", + tags = ["fails_on_windows"], deps = [ "//source/common/decompressor:decompressor_lib", "//source/extensions/filters/http/gzip:config", diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index c3641befd989..e8339bdcb473 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -117,6 +117,7 @@ envoy_extension_cc_test( name = "filter_integration_test", srcs = ["filter_integration_test.cc"], extension_name = "envoy.filters.http.jwt_authn", + tags = ["fails_on_windows"], deps = [ "//source/common/router:string_accessor_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index ea708b4420f8..eba9a4ad2e09 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -15,6 +15,7 @@ envoy_extension_cc_test( name = "lua_filter_test", srcs = ["lua_filter_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["fails_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:lua_filter_lib", @@ -33,6 +34,7 @@ envoy_extension_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["fails_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:wrappers_lib", @@ -47,6 +49,7 @@ envoy_extension_cc_test( name = "lua_integration_test", srcs = ["lua_integration_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/rbac/BUILD b/test/extensions/filters/http/rbac/BUILD index 372b2110d0cf..6097f22589a8 100644 --- a/test/extensions/filters/http/rbac/BUILD +++ b/test/extensions/filters/http/rbac/BUILD @@ -44,6 +44,7 @@ envoy_extension_cc_test( name = "rbac_filter_integration_test", srcs = ["rbac_filter_integration_test.cc"], extension_name = "envoy.filters.http.rbac", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/rbac:config", "//test/config:utility_lib", diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index 0fedbe15d3ae..e44c981792f3 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -30,6 +30,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.router", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/squash/BUILD b/test/extensions/filters/http/squash/BUILD index ae0eb427ea84..2954bd98b425 100644 --- a/test/extensions/filters/http/squash/BUILD +++ b/test/extensions/filters/http/squash/BUILD @@ -32,6 +32,7 @@ envoy_extension_cc_test( name = "squash_filter_integration_test", srcs = ["squash_filter_integration_test.cc"], extension_name = "envoy.filters.http.squash", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/squash:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index b5aaea4b1480..95f895888972 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -52,6 +52,7 @@ envoy_extension_cc_test( name = "tap_filter_integration_test", srcs = ["tap_filter_integration_test.cc"], extension_name = "envoy.filters.http.tap", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/tap:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index ddfac953f6b9..afb64b5eee2b 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -15,6 +15,7 @@ envoy_extension_cc_test( name = "http_inspector_test", srcs = ["http_inspector_test.cc"], extension_name = "envoy.filters.listener.http_inspector", + tags = ["fails_on_windows"], deps = [ "//source/common/common:hex_lib", "//source/extensions/filters/listener/http_inspector:http_inspector_lib", diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index 0961acdb2594..7a62688540fe 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -25,6 +25,7 @@ envoy_extension_cc_test( name = "original_src_config_factory_test", srcs = ["original_src_config_factory_test.cc"], extension_name = "envoy.filters.listener.original_src", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/listener/original_src:config", "//source/extensions/filters/listener/original_src:config_lib", diff --git a/test/extensions/filters/listener/proxy_protocol/BUILD b/test/extensions/filters/listener/proxy_protocol/BUILD index 0eb0eb4983eb..f07771bbb91e 100644 --- a/test/extensions/filters/listener/proxy_protocol/BUILD +++ b/test/extensions/filters/listener/proxy_protocol/BUILD @@ -15,6 +15,7 @@ envoy_extension_cc_test( name = "proxy_protocol_test", srcs = ["proxy_protocol_test.cc"], extension_name = "envoy.filters.listener.proxy_protocol", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_includes", diff --git a/test/extensions/filters/network/local_ratelimit/BUILD b/test/extensions/filters/network/local_ratelimit/BUILD index 854bfae8c8bc..291f6726ddc4 100644 --- a/test/extensions/filters/network/local_ratelimit/BUILD +++ b/test/extensions/filters/network/local_ratelimit/BUILD @@ -28,6 +28,7 @@ envoy_extension_cc_test( name = "local_ratelimit_integration_test", srcs = ["local_ratelimit_integration_test.cc"], extension_name = "envoy.filters.network.local_ratelimit", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/local_ratelimit:config", "//source/extensions/filters/network/tcp_proxy:config", diff --git a/test/extensions/filters/network/mysql_proxy/BUILD b/test/extensions/filters/network/mysql_proxy/BUILD index afdb5f84a2c5..073312b3eff4 100644 --- a/test/extensions/filters/network/mysql_proxy/BUILD +++ b/test/extensions/filters/network/mysql_proxy/BUILD @@ -40,6 +40,7 @@ envoy_extension_cc_test( "mysql_filter_test.cc", ], extension_name = "envoy.filters.network.mysql_proxy", + tags = ["fails_on_windows"], deps = [ ":mysql_test_utils_lib", "//source/extensions/filters/network/mysql_proxy:config", @@ -56,6 +57,7 @@ envoy_extension_cc_test( "mysql_test_config.yaml", ], extension_name = "envoy.filters.network.mysql_proxy", + tags = ["fails_on_windows"], deps = [ ":mysql_test_utils_lib", "//source/common/tcp_proxy", diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/test/extensions/filters/network/postgres_proxy/BUILD index f319540a50e9..afb0b1415014 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/test/extensions/filters/network/postgres_proxy/BUILD @@ -57,6 +57,7 @@ envoy_extension_cc_test( "postgres_test_config.yaml", ], extension_name = "envoy.filters.network.postgres_proxy", + tags = ["fails_on_windows"], deps = [ "//source/common/tcp_proxy", "//source/extensions/filters/network/postgres_proxy:config", diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index 9e2c4fec27fd..fb4195d62562 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -41,6 +41,7 @@ envoy_extension_cc_test( name = "integration_test", srcs = ["integration_test.cc"], extension_name = "envoy.filters.network.rbac", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/echo:config", "//source/extensions/filters/network/rbac:config", diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index eb74d4d17bc8..0697c8c39d90 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -35,6 +35,7 @@ envoy_extension_cc_test( name = "conn_pool_impl_test", srcs = ["conn_pool_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + tags = ["fails_on_windows"], deps = [ ":redis_mocks", "//source/common/event:dispatcher_lib", @@ -145,6 +146,7 @@ envoy_extension_cc_test( name = "redis_proxy_integration_test", srcs = ["redis_proxy_integration_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/redis_proxy:config", "//test/integration:integration_lib", diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD index 868ced554fcc..2e719d6b145b 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -49,6 +49,7 @@ envoy_extension_cc_test( name = "router_test", srcs = ["router_test.cc"], extension_name = "envoy.filters.network.rocketmq_proxy", + tags = ["fails_on_windows"], deps = [ ":mocks_lib", ":utility_lib", diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index a931923403d2..059a4a0ef6ff 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -32,6 +32,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.network.sni_dynamic_forward_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/listener/tls_inspector:config", diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index ac219f01fae9..c07318e6cb9a 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -333,6 +333,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", + tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", @@ -349,6 +350,7 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", + tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", diff --git a/test/extensions/filters/udp/udp_proxy/BUILD b/test/extensions/filters/udp/udp_proxy/BUILD index f08621cfc022..c5dfaa36f194 100644 --- a/test/extensions/filters/udp/udp_proxy/BUILD +++ b/test/extensions/filters/udp/udp_proxy/BUILD @@ -27,6 +27,7 @@ envoy_extension_cc_test( name = "udp_proxy_integration_test", srcs = ["udp_proxy_integration_test.cc"], extension_name = "envoy.filters.udp_listener.udp_proxy", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/udp/udp_proxy:config", "//test/integration:integration_lib", diff --git a/test/extensions/grpc_credentials/aws_iam/BUILD b/test/extensions/grpc_credentials/aws_iam/BUILD index 07afc2edd971..656bfd76357c 100644 --- a/test/extensions/grpc_credentials/aws_iam/BUILD +++ b/test/extensions/grpc_credentials/aws_iam/BUILD @@ -13,6 +13,7 @@ envoy_cc_test( name = "aws_iam_grpc_credentials_test", srcs = envoy_select_google_grpc(["aws_iam_grpc_credentials_test.cc"]), data = ["//test/config/integration/certs"], + tags = ["fails_on_windows"], deps = [ "//source/extensions/grpc_credentials:well_known_names", "//source/extensions/grpc_credentials/aws_iam:config", diff --git a/test/extensions/grpc_credentials/file_based_metadata/BUILD b/test/extensions/grpc_credentials/file_based_metadata/BUILD index 74d148c643d1..ccb2fd8263b3 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/test/extensions/grpc_credentials/file_based_metadata/BUILD @@ -13,6 +13,7 @@ envoy_cc_test( name = "file_based_metadata_grpc_credentials_test", srcs = ["file_based_metadata_grpc_credentials_test.cc"], data = ["//test/config/integration/certs"], + tags = ["fails_on_windows"], deps = [ "//source/extensions/grpc_credentials:well_known_names", "//source/extensions/grpc_credentials/file_based_metadata:config", diff --git a/test/extensions/resource_monitors/injected_resource/BUILD b/test/extensions/resource_monitors/injected_resource/BUILD index 034d9b1365f0..e15aa3821a7a 100644 --- a/test/extensions/resource_monitors/injected_resource/BUILD +++ b/test/extensions/resource_monitors/injected_resource/BUILD @@ -15,6 +15,7 @@ envoy_package() envoy_cc_test( name = "injected_resource_monitor_test", srcs = ["injected_resource_monitor_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/stats:isolated_store_lib", diff --git a/test/extensions/stats_sinks/common/statsd/BUILD b/test/extensions/stats_sinks/common/statsd/BUILD index fdd7b8b28df2..93d7505ab94c 100644 --- a/test/extensions/stats_sinks/common/statsd/BUILD +++ b/test/extensions/stats_sinks/common/statsd/BUILD @@ -30,6 +30,7 @@ envoy_cc_test( envoy_cc_test( name = "udp_statsd_test", srcs = ["udp_statsd_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:utility_lib", diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index 148a62fad7c5..36f35b971491 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -44,6 +44,7 @@ envoy_extension_cc_test( name = "hystrix_integration_test", srcs = ["hystrix_integration_test.cc"], extension_name = "envoy.stat_sinks.hystrix", + tags = ["fails_on_windows"], deps = [ "//source/extensions/stat_sinks/hystrix:config", "//test/integration:http_protocol_integration_lib", diff --git a/test/extensions/stats_sinks/metrics_service/BUILD b/test/extensions/stats_sinks/metrics_service/BUILD index e8ff78cc02c5..ae60b006f1e2 100644 --- a/test/extensions/stats_sinks/metrics_service/BUILD +++ b/test/extensions/stats_sinks/metrics_service/BUILD @@ -45,6 +45,7 @@ envoy_extension_cc_test( name = "metrics_service_integration_test", srcs = ["metrics_service_integration_test.cc"], extension_name = "envoy.stat_sinks.metrics_service", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index 8c7cbfa6f27e..ce1deb5e0c4f 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -76,6 +76,7 @@ envoy_extension_cc_test( external_deps = [ "grpc_alts_fake_handshaker_server", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:utility_lib", "//source/common/event:dispatcher_includes", diff --git a/test/extensions/transport_sockets/tls/integration/BUILD b/test/extensions/transport_sockets/tls/integration/BUILD index 830d37771bd3..2f806978c8c6 100644 --- a/test/extensions/transport_sockets/tls/integration/BUILD +++ b/test/extensions/transport_sockets/tls/integration/BUILD @@ -17,6 +17,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_includes", "//source/common/event:dispatcher_lib", diff --git a/test/integration/BUILD b/test/integration/BUILD index 5c3d08b0ef16..912e82a9627c 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -49,6 +49,7 @@ envoy_cc_test_library( envoy_cc_test( name = "ads_integration_test", srcs = ["ads_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":ads_integration_lib", ":http_integration_lib", @@ -69,6 +70,7 @@ envoy_cc_test( envoy_cc_test( name = "api_listener_integration_test", srcs = ["api_listener_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/mocks/http:stream_encoder_mock", @@ -79,6 +81,7 @@ envoy_cc_test( envoy_cc_test( name = "api_version_integration_test", srcs = ["api_version_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", @@ -109,6 +112,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:protobuf_link_hacks", @@ -154,6 +158,7 @@ envoy_cc_test( srcs = [ "filter_manager_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":filter_manager_integration_proto_cc_proto", ":http_integration_lib", @@ -173,6 +178,7 @@ envoy_cc_test( envoy_cc_test( name = "cluster_filter_integration_test", srcs = ["cluster_filter_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/network:filter_interface", @@ -186,6 +192,7 @@ envoy_cc_test( envoy_cc_test( name = "custom_cluster_integration_test", srcs = ["custom_cluster_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/upstream:load_balancer_lib", @@ -203,6 +210,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:protobuf_link_hacks", @@ -251,6 +259,7 @@ envoy_cc_test( srcs = [ "header_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -271,6 +280,7 @@ envoy_cc_test( "http2_integration_test.cc", "http2_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:buffer_lib", @@ -296,6 +306,7 @@ envoy_cc_test( srcs = [ "http_subset_lb_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/common/upstream:utility_lib", @@ -314,6 +325,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/transport_sockets/tls:context_lib", @@ -330,6 +342,7 @@ envoy_cc_test( srcs = [ "header_casing_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -343,6 +356,7 @@ envoy_cc_test( "http_timeout_integration_test.cc", "http_timeout_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", @@ -358,6 +372,7 @@ envoy_cc_test( # As this test has many H1/H2/v4/v6 tests it takes a while to run. # Shard it enough to bring the run time in line with other integration tests. shard_count = 5, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", @@ -378,6 +393,7 @@ envoy_cc_test( "http2_upstream_integration_test.cc", "http2_upstream_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", @@ -398,6 +414,7 @@ envoy_cc_test( "integration_admin_test.cc", "integration_admin_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//include/envoy/http:header_map_interface", @@ -475,6 +492,7 @@ envoy_cc_test( # As this test has many pauses for idle timeouts, it takes a while to run. # Shard it enough to bring the run time in line with other integration tests. shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//test/test_common:test_time_lib", @@ -610,6 +628,7 @@ envoy_cc_test( srcs = [ "redirect_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", @@ -666,6 +685,7 @@ envoy_cc_test( # The symbol table cluster memory tests take a while to run specially under tsan. # Shard it to avoid test timeout. shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/memory:stats_lib", @@ -683,6 +703,7 @@ envoy_cc_test( envoy_cc_test( name = "load_stats_integration_test", srcs = ["load_stats_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/config:utility_lib", @@ -700,6 +721,7 @@ envoy_cc_test( envoy_cc_test( name = "hds_integration_test", srcs = ["hds_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":integration_lib", @@ -723,6 +745,7 @@ envoy_cc_test( name = "header_prefix_integration_test", srcs = ["header_prefix_integration_test.cc"], coverage = False, + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -732,6 +755,7 @@ envoy_cc_test( envoy_cc_test( name = "overload_integration_test", srcs = ["overload_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/extensions/resource_monitors/injected_resource:config", @@ -746,6 +770,7 @@ envoy_cc_test( "proxy_proto_integration_test.cc", "proxy_proto_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:buffer_lib", @@ -760,6 +785,7 @@ envoy_cc_test( envoy_cc_test( name = "ratelimit_integration_test", srcs = ["ratelimit_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/buffer:zero_copy_input_stream_lib", @@ -778,6 +804,7 @@ envoy_cc_test( envoy_cc_test( name = "rtds_integration_test", srcs = ["rtds_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/common/grpc:grpc_client_integration_lib", @@ -799,6 +826,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/event:dispatcher_includes", @@ -823,6 +851,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -852,6 +881,7 @@ envoy_cc_test( srcs = [ "sds_generic_secret_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//include/envoy/registry", @@ -873,6 +903,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", @@ -904,6 +935,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", @@ -918,6 +950,7 @@ envoy_cc_test( srcs = [ "tcp_conn_pool_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/server:filter_config_interface", @@ -944,6 +977,7 @@ envoy_cc_test( "uds_integration_test.cc", "uds_integration_test.h", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/event:dispatcher_includes", @@ -958,6 +992,7 @@ envoy_cc_test( envoy_cc_test( name = "version_integration_test", srcs = ["version_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/filters/http/ip_tagging:config", @@ -968,6 +1003,7 @@ envoy_cc_test( name = "dynamic_validation_integration_test", srcs = ["dynamic_validation_integration_test.cc"], data = ["//test/config/integration:server_xds_files"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/stats:stats_lib", @@ -980,6 +1016,7 @@ envoy_cc_test( name = "xds_integration_test", srcs = ["xds_integration_test.cc"], data = ["//test/config/integration:server_xds_files"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", @@ -999,6 +1036,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", @@ -1077,6 +1115,7 @@ envoy_cc_test( srcs = [ "scoped_rds_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -1100,6 +1139,7 @@ envoy_cc_test( srcs = [ "listener_lds_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/config:api_version_lib", @@ -1126,6 +1166,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", diff --git a/test/server/BUILD b/test/server/BUILD index eb90f57bfaaa..cc513b6ab6ab 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -164,6 +164,7 @@ envoy_cc_test( envoy_cc_test( name = "overload_manager_impl_test", srcs = ["overload_manager_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/registry", "//source/common/stats:isolated_store_lib", @@ -222,6 +223,7 @@ envoy_cc_test_library( envoy_cc_test( name = "listener_manager_impl_test", srcs = ["listener_manager_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":listener_manager_impl_test_lib", ":utility_lib", @@ -343,6 +345,7 @@ envoy_cc_test( ":server_test_data", ":static_validation_test_data", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:version_lib", "//source/extensions/access_loggers/file:config", @@ -428,4 +431,5 @@ envoy_benchmark_test( name = "filter_chain_benchmark_test_benchmark_test", timeout = "long", benchmark_binary = "filter_chain_benchmark_test", + tags = ["fails_on_windows"], ) diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index f785888ac01c..e99e37a684b3 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -55,6 +55,7 @@ envoy_cc_test( "//configs:example_configs", "//test/config_test:example_configs_test_setup.sh", ], + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", From fd366655fd8617a1c34562d8ea2e3306064904bc Mon Sep 17 00:00:00 2001 From: Vijay Rajput <48493084+vijayrajput1@users.noreply.github.com> Date: Mon, 27 Apr 2020 13:19:43 -0700 Subject: [PATCH 035/909] fault injection: add support for setting gRPC status (#10841) Signed-off-by: Vijay Rajput --- .../filters/http/fault/v3/fault.proto | 10 +- .../http/http_filters/fault_filter.rst | 25 ++- docs/root/version_history/current.rst | 2 + .../filters/http/fault/v3/fault.proto | 12 +- .../filters/common/fault/fault_config.cc | 29 +++- .../filters/common/fault/fault_config.h | 76 ++++++--- .../filters/http/fault/fault_filter.cc | 84 +++++++--- .../filters/http/fault/fault_filter.h | 12 +- .../filters/common/fault/fault_config_test.cc | 30 +++- .../fault/fault_filter_integration_test.cc | 88 ++++++++++ .../filters/http/fault/fault_filter_test.cc | 156 ++++++++++++++++++ 11 files changed, 468 insertions(+), 56 deletions(-) diff --git a/api/envoy/extensions/filters/http/fault/v3/fault.proto b/api/envoy/extensions/filters/http/fault/v3/fault.proto index 534a0da35b16..d28ed28b1110 100644 --- a/api/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/api/envoy/extensions/filters/http/fault/v3/fault.proto @@ -21,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] +// [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort"; @@ -41,6 +42,9 @@ message FaultAbort { // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } @@ -50,7 +54,7 @@ message FaultAbort { type.v3.FractionalPercent percentage = 3; } -// [#next-free-field: 14] +// [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.HTTPFault"; @@ -133,4 +137,8 @@ message HTTPFault { // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; } diff --git a/docs/root/configuration/http/http_filters/fault_filter.rst b/docs/root/configuration/http/http_filters/fault_filter.rst index 80678714db69..f79fc4d44a7a 100644 --- a/docs/root/configuration/http/http_filters/fault_filter.rst +++ b/docs/root/configuration/http/http_filters/fault_filter.rst @@ -42,17 +42,27 @@ x-envoy-fault-abort-request In order for the header to work, :ref:`header_abort ` needs to be set. +x-envoy-fault-abort-grpc-request + gRPC status code to abort a request with. The header value should be a non-negative integer that specifies + the gRPC status code to return in response to a request. Its value range is [0, UInt32.Max] instead of [0, 16] + to allow testing even not well-defined gRPC status codes. When this header is set, the HTTP response status code + will be set to 200. In order for the header to work, :ref:`header_abort + ` needs to be set. If both + *x-envoy-fault-abort-request* and *x-envoy-fault-abort-grpc-request* headers are set then + *x-envoy-fault-abort-grpc-request* header will be **ignored** and fault response http status code will be + set to *x-envoy-fault-abort-request* header value. + x-envoy-fault-abort-request-percentage The percentage of requests that should be failed with a status code that's defined - by the value of *x-envoy-fault-abort-request* HTTP header. The header value should be an integer - that specifies the numerator of the percentage of request to apply aborts to and must be greater - or equal to 0 and its maximum value is capped by the value of the numerator of + by the value of *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP headers. + The header value should be an integer that specifies the numerator of the percentage of request to apply aborts + to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator :ref:`percentage ` field. In order for the header to work, :ref:`header_abort ` needs to be set and - *x-envoy-fault-abort-request* HTTP header needs to be a part of a request. + either *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP header needs to be a part of the request. x-envoy-fault-delay-request The duration to delay a request by. The header value should be an integer that specifies the number @@ -144,6 +154,13 @@ fault.http.abort.http_status available regardless of whether the filter is :ref:`configured for abort `. +fault.http.abort.grpc_status + gRPC status code that will be used as the response status code of requests that will be + aborted if the headers match. Defaults to the gRPC status code specified in the config. + If this field is missing from both the runtime and the config, gRPC status code in the response + will be derived from *fault.http.abort.http_status* field. This runtime key is only available when + the filter is :ref:`configured for abort `. + fault.http.delay.fixed_delay_percent % of requests that will be delayed if the headers match. Defaults to the *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2449c881fa10..c19000140d66 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,8 @@ Changes * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. +* fault: added support for specifying grpc_status code in abort faults using + :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. * filter: add `upstram_rq_time` stats to the GPRC stats filter. Disabled by default and can be enabled via :ref:`enable_upstream_stats `. * grpc-json: added support for streaming response using diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto index 07996a9507ff..9bba2f134cdf 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto @@ -21,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] +// [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort"; @@ -41,16 +42,19 @@ message FaultAbort { oneof error_type { option (validate.required) = true; - // Fault aborts are controlled via an HTTP header (if applicable). + // gRPC status code to use to abort the gRPC request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + // Fault aborts are controlled via an HTTP header (if applicable). + uint32 grpc_status = 5; + // The percentage of requests/operations/connections that will be aborted with the error code // provided. HeaderAbort header_abort = 4; } } -// [#next-free-field: 14] +// [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.HTTPFault"; @@ -133,4 +137,8 @@ message HTTPFault { // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; } diff --git a/source/extensions/filters/common/fault/fault_config.cc b/source/extensions/filters/common/fault/fault_config.cc index 7bcdd765a3ef..ebbb86e2fd95 100644 --- a/source/extensions/filters/common/fault/fault_config.cc +++ b/source/extensions/filters/common/fault/fault_config.cc @@ -34,7 +34,13 @@ FaultAbortConfig::FaultAbortConfig( switch (abort_config.error_type_case()) { case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHttpStatus: provider_ = - std::make_unique(abort_config.http_status(), abort_config.percentage()); + std::make_unique(static_cast(abort_config.http_status()), + absl::nullopt, abort_config.percentage()); + break; + case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kGrpcStatus: + provider_ = std::make_unique( + absl::nullopt, static_cast(abort_config.grpc_status()), + abort_config.percentage()); break; case envoy::extensions::filters::http::fault::v3::FaultAbort::ErrorTypeCase::kHeaderAbort: provider_ = std::make_unique(abort_config.percentage()); @@ -44,10 +50,10 @@ FaultAbortConfig::FaultAbortConfig( } } -absl::optional FaultAbortConfig::HeaderAbortProvider::statusCode( +absl::optional FaultAbortConfig::HeaderAbortProvider::httpStatusCode( const Http::RequestHeaderMap* request_headers) const { - absl::optional ret; - const auto header = request_headers->get(HeaderNames::get().AbortRequest); + absl::optional ret = absl::nullopt; + auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortRequest); if (header == nullptr) { return ret; } @@ -64,6 +70,21 @@ absl::optional FaultAbortConfig::HeaderAbortProvider::statusCode( return ret; } +absl::optional FaultAbortConfig::HeaderAbortProvider::grpcStatusCode( + const Http::RequestHeaderMap* request_headers) const { + auto header = request_headers->get(Filters::Common::Fault::HeaderNames::get().AbortGrpcRequest); + if (header == nullptr) { + return absl::nullopt; + } + + uint64_t code; + if (!absl::SimpleAtoi(header->value().getStringView(), &code)) { + return absl::nullopt; + } + + return static_cast(code); +} + FaultDelayConfig::FaultDelayConfig( const envoy::extensions::filters::common::fault::v3::FaultDelay& delay_config) { switch (delay_config.fault_delay_secifier_case()) { diff --git a/source/extensions/filters/common/fault/fault_config.h b/source/extensions/filters/common/fault/fault_config.h index 2bf80a1e67d2..e253814273b3 100644 --- a/source/extensions/filters/common/fault/fault_config.h +++ b/source/extensions/filters/common/fault/fault_config.h @@ -2,6 +2,7 @@ #include "envoy/extensions/filters/common/fault/v3/fault.pb.h" #include "envoy/extensions/filters/http/fault/v3/fault.pb.h" +#include "envoy/grpc/status.h" #include "envoy/http/header_map.h" #include "envoy/type/v3/percent.pb.h" @@ -22,6 +23,7 @@ class HeaderNameValues { const Http::LowerCaseString AbortRequest{absl::StrCat(prefix(), "-fault-abort-request")}; const Http::LowerCaseString AbortRequestPercentage{ absl::StrCat(prefix(), "-fault-abort-request-percentage")}; + const Http::LowerCaseString AbortGrpcRequest{absl::StrCat(prefix(), "-fault-abort-grpc-request")}; const Http::LowerCaseString DelayRequest{absl::StrCat(prefix(), "-fault-delay-request")}; const Http::LowerCaseString DelayRequestPercentage{ absl::StrCat(prefix(), "-fault-delay-request-percentage")}; @@ -53,8 +55,12 @@ class FaultAbortConfig { public: FaultAbortConfig(const envoy::extensions::filters::http::fault::v3::FaultAbort& abort_config); - absl::optional statusCode(const Http::RequestHeaderMap* request_headers) const { - return provider_->statusCode(request_headers); + absl::optional httpStatusCode(const Http::RequestHeaderMap* request_headers) const { + return provider_->httpStatusCode(request_headers); + } + absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const { + return provider_->grpcStatusCode(request_headers); } envoy::type::v3::FractionalPercent @@ -71,22 +77,35 @@ class FaultAbortConfig { // Return the HTTP status code to use. Optionally passed HTTP headers that may contain the // HTTP status code depending on the provider implementation. virtual absl::optional - statusCode(const Http::RequestHeaderMap* request_headers) const PURE; + httpStatusCode(const Http::RequestHeaderMap* request_headers) const PURE; + + // Return the gRPC status code to use. Optionally passed an HTTP header that may contain the + // gRPC status code depending on the provider implementation. + virtual absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const PURE; + // Return what percentage of requests abort faults should be applied to. Optionally passed // HTTP headers that may contain the percentage depending on the provider implementation. virtual envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const PURE; }; - // Delay provider that uses a fixed abort status code. + // Abort provider that uses a fixed abort status code. class FixedAbortProvider : public AbortProvider { public: - FixedAbortProvider(uint64_t status_code, const envoy::type::v3::FractionalPercent& percentage) - : status_code_(status_code), percentage_(percentage) {} + FixedAbortProvider(absl::optional http_status_code, + absl::optional grpc_status_code, + const envoy::type::v3::FractionalPercent& percentage) + : http_status_code_(http_status_code), grpc_status_code_(grpc_status_code), + percentage_(percentage) {} - // AbortProvider - absl::optional statusCode(const Http::RequestHeaderMap*) const override { - return static_cast(status_code_); + absl::optional httpStatusCode(const Http::RequestHeaderMap*) const override { + return http_status_code_; + } + + absl::optional + grpcStatusCode(const Http::RequestHeaderMap*) const override { + return grpc_status_code_; } envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap*) const override { @@ -94,23 +113,30 @@ class FaultAbortConfig { } private: - const uint64_t status_code_; + const absl::optional http_status_code_; + const absl::optional grpc_status_code_; const envoy::type::v3::FractionalPercent percentage_; }; // Abort provider the reads a status code from an HTTP header. - class HeaderAbortProvider : public AbortProvider, public HeaderPercentageProvider { + class HeaderAbortProvider : public AbortProvider { public: HeaderAbortProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().AbortRequestPercentage, percentage) {} - // AbortProvider + : header_percentage_provider_(HeaderNames::get().AbortRequestPercentage, percentage) {} + absl::optional - statusCode(const Http::RequestHeaderMap* request_headers) const override; + httpStatusCode(const Http::RequestHeaderMap* request_headers) const override; + + absl::optional + grpcStatusCode(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using AbortProviderPtr = std::unique_ptr; @@ -176,18 +202,22 @@ class FaultDelayConfig { }; // Delay provider the reads a delay from an HTTP header. - class HeaderDelayProvider : public DelayProvider, public HeaderPercentageProvider { + class HeaderDelayProvider : public DelayProvider { public: HeaderDelayProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().DelayRequestPercentage, percentage) {} + : header_percentage_provider_(HeaderNames::get().DelayRequestPercentage, percentage) {} + // DelayProvider absl::optional duration(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using DelayProviderPtr = std::unique_ptr; @@ -252,16 +282,20 @@ class FaultRateLimitConfig { }; // Rate limit provider that reads the rate limit from an HTTP header. - class HeaderRateLimitProvider : public RateLimitProvider, public HeaderPercentageProvider { + class HeaderRateLimitProvider : public RateLimitProvider { public: HeaderRateLimitProvider(const envoy::type::v3::FractionalPercent& percentage) - : HeaderPercentageProvider(HeaderNames::get().ThroughputResponsePercentage, percentage) {} + : header_percentage_provider_(HeaderNames::get().ThroughputResponsePercentage, percentage) { + } // RateLimitProvider absl::optional rateKbps(const Http::RequestHeaderMap* request_headers) const override; envoy::type::v3::FractionalPercent percentage(const Http::RequestHeaderMap* request_headers) const override { - return HeaderPercentageProvider::percentage(request_headers); + return header_percentage_provider_.percentage(request_headers); } + + private: + HeaderPercentageProvider header_percentage_provider_; }; using RateLimitProviderPtr = std::unique_ptr; diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index a34e833bb12c..67d1f9ce8f17 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -44,6 +44,8 @@ FaultSettings::FaultSettings(const envoy::extensions::filters::http::fault::v3:: RuntimeKeys::get().DelayDurationKey)), abort_http_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( fault, abort_http_status_runtime, RuntimeKeys::get().AbortHttpStatusKey)), + abort_grpc_status_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( + fault, abort_grpc_status_runtime, RuntimeKeys::get().AbortGrpcStatusKey)), max_active_faults_runtime_(PROTOBUF_GET_STRING_OR_DEFAULT( fault, max_active_faults_runtime, RuntimeKeys::get().MaxActiveFaultsKey)), response_rate_limit_percent_runtime_( @@ -149,6 +151,8 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea fmt::format("fault.http.{}.delay.fixed_duration_ms", downstream_cluster_); downstream_cluster_abort_http_status_key_ = fmt::format("fault.http.{}.abort.http_status", downstream_cluster_); + downstream_cluster_abort_grpc_status_key_ = + fmt::format("fault.http.{}.abort.grpc_status", downstream_cluster_); } maybeSetupResponseRateLimit(headers); @@ -164,9 +168,12 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea return Http::FilterHeadersStatus::StopIteration; } - const auto abort_code = abortHttpStatus(headers); - if (abort_code.has_value()) { - abortWithHTTPStatus(abort_code.value()); + absl::optional http_status; + absl::optional grpc_status; + std::tie(http_status, grpc_status) = abortStatus(headers); + + if (http_status.has_value()) { + abortWithStatus(http_status.value(), grpc_status); return Http::FilterHeadersStatus::StopIteration; } @@ -284,29 +291,64 @@ FaultFilter::delayDuration(const Http::RequestHeaderMap& request_headers) { return ret; } -absl::optional -FaultFilter::abortHttpStatus(const Http::RequestHeaderMap& request_headers) { +AbortHttpAndGrpcStatus FaultFilter::abortStatus(const Http::RequestHeaderMap& request_headers) { if (!isAbortEnabled(request_headers)) { - return absl::nullopt; + return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt}; + } + + auto http_status = abortHttpStatus(request_headers); + // If http status code is set, then gRPC status won't be used. + if (http_status.has_value()) { + return AbortHttpAndGrpcStatus{http_status, absl::nullopt}; } + auto grpc_status = abortGrpcStatus(request_headers); + // If gRPC status code is set, then http status will be set to Http::Code::OK (200) + if (grpc_status.has_value()) { + return AbortHttpAndGrpcStatus{Http::Code::OK, grpc_status}; + } + + return AbortHttpAndGrpcStatus{absl::nullopt, absl::nullopt}; +} + +absl::optional +FaultFilter::abortHttpStatus(const Http::RequestHeaderMap& request_headers) { // See if the configured abort provider has a default status code, if not there is no abort status // code (e.g., header configuration and no/invalid header). - const auto config_abort = fault_settings_->requestAbort()->statusCode(&request_headers); - if (!config_abort.has_value()) { + auto http_status = fault_settings_->requestAbort()->httpStatusCode(&request_headers); + if (!http_status.has_value()) { return absl::nullopt; } - auto status_code = static_cast(config_abort.value()); - auto code = static_cast(config_->runtime().snapshot().getInteger( - fault_settings_->abortHttpStatusRuntime(), status_code)); + auto default_http_status_code = static_cast(http_status.value()); + auto runtime_http_status_code = config_->runtime().snapshot().getInteger( + fault_settings_->abortHttpStatusRuntime(), default_http_status_code); if (!downstream_cluster_abort_http_status_key_.empty()) { - code = static_cast(config_->runtime().snapshot().getInteger( - downstream_cluster_abort_http_status_key_, status_code)); + runtime_http_status_code = config_->runtime().snapshot().getInteger( + downstream_cluster_abort_http_status_key_, default_http_status_code); + } + + return static_cast(runtime_http_status_code); +} + +absl::optional +FaultFilter::abortGrpcStatus(const Http::RequestHeaderMap& request_headers) { + auto grpc_status = fault_settings_->requestAbort()->grpcStatusCode(&request_headers); + if (!grpc_status.has_value()) { + return absl::nullopt; } - return code; + auto default_grpc_status_code = static_cast(grpc_status.value()); + auto runtime_grpc_status_code = config_->runtime().snapshot().getInteger( + fault_settings_->abortGrpcStatusRuntime(), default_grpc_status_code); + + if (!downstream_cluster_abort_grpc_status_key_.empty()) { + runtime_grpc_status_code = config_->runtime().snapshot().getInteger( + downstream_cluster_abort_grpc_status_key_, default_grpc_status_code); + } + + return static_cast(runtime_grpc_status_code); } void FaultFilter::recordDelaysInjectedStats() { @@ -375,18 +417,22 @@ void FaultFilter::postDelayInjection(const Http::RequestHeaderMap& request_heade resetTimerState(); // Delays can be followed by aborts - const auto abort_code = abortHttpStatus(request_headers); - if (abort_code.has_value()) { - abortWithHTTPStatus(abort_code.value()); + absl::optional http_status; + absl::optional grpc_status; + std::tie(http_status, grpc_status) = abortStatus(request_headers); + + if (http_status.has_value()) { + abortWithStatus(http_status.value(), grpc_status); } else { // Continue request processing. decoder_callbacks_->continueDecoding(); } } -void FaultFilter::abortWithHTTPStatus(Http::Code abort_code) { +void FaultFilter::abortWithStatus(Http::Code http_status_code, + absl::optional grpc_status) { decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected); - decoder_callbacks_->sendLocalReply(abort_code, "fault filter abort", nullptr, absl::nullopt, + decoder_callbacks_->sendLocalReply(http_status_code, "fault filter abort", nullptr, grpc_status, RcDetails::get().FaultAbort); recordAbortsInjectedStats(); } diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index bdbcbd975282..2dfed7c9167d 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -67,6 +67,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string& abortPercentRuntime() const { return abort_percent_runtime_; } const std::string& delayPercentRuntime() const { return delay_percent_runtime_; } const std::string& abortHttpStatusRuntime() const { return abort_http_status_runtime_; } + const std::string& abortGrpcStatusRuntime() const { return abort_grpc_status_runtime_; } const std::string& delayDurationRuntime() const { return delay_duration_runtime_; } const std::string& maxActiveFaultsRuntime() const { return max_active_faults_runtime_; } const std::string& responseRateLimitPercentRuntime() const { @@ -80,6 +81,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string AbortPercentKey = "fault.http.abort.abort_percent"; const std::string DelayDurationKey = "fault.http.delay.fixed_duration_ms"; const std::string AbortHttpStatusKey = "fault.http.abort.http_status"; + const std::string AbortGrpcStatusKey = "fault.http.abort.grpc_status"; const std::string MaxActiveFaultsKey = "fault.http.max_active_faults"; const std::string ResponseRateLimitPercentKey = "fault.http.rate_limit.response_percent"; }; @@ -98,6 +100,7 @@ class FaultSettings : public Router::RouteSpecificFilterConfig { const std::string abort_percent_runtime_; const std::string delay_duration_runtime_; const std::string abort_http_status_runtime_; + const std::string abort_grpc_status_runtime_; const std::string max_active_faults_runtime_; const std::string response_rate_limit_percent_runtime_; }; @@ -203,6 +206,8 @@ class StreamRateLimiter : Logger::Loggable { Buffer::WatermarkBuffer buffer_; }; +using AbortHttpAndGrpcStatus = + std::pair, absl::optional>; /** * A filter that is capable of faulting an entire request before dispatching it upstream. */ @@ -245,7 +250,8 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable grpc_status_code); bool matchesTargetUpstreamCluster(); bool matchesDownstreamNodes(const Http::RequestHeaderMap& headers); bool isAbortEnabled(const Http::RequestHeaderMap& request_headers); @@ -253,7 +259,10 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable delayDuration(const Http::RequestHeaderMap& request_headers); + AbortHttpAndGrpcStatus abortStatus(const Http::RequestHeaderMap& request_headers); absl::optional abortHttpStatus(const Http::RequestHeaderMap& request_headers); + absl::optional + abortGrpcStatus(const Http::RequestHeaderMap& request_headers); void maybeIncActiveFaults(); void maybeSetupResponseRateLimit(const Http::RequestHeaderMap& request_headers); @@ -270,6 +279,7 @@ class FaultFilter : public Http::StreamFilter, Logger::Loggable 16). + Http::TestRequestHeaderMapImpl too_high_headers{{"x-envoy-fault-abort-grpc-request", "100"}}; + EXPECT_EQ(100, config.grpcStatusCode(&too_high_headers)); } TEST(FaultConfigTest, FaultAbortPercentageHeaderConfig) { diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 6059287a66c7..0864793d136f 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -46,6 +46,17 @@ name: fault percentage: numerator: 100 )EOF"; + + const std::string abort_grpc_fault_config_ = + R"EOF( +name: fault +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault + abort: + grpc_status: 5 + percentage: + numerator: 100 +)EOF"; }; // Fault integration tests that should run with all protocols, useful for testing various @@ -214,6 +225,83 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfigNoHeaders) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); } +// Request abort with grpc status, controlled via header configuration. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-envoy-fault-abort-grpc-request", "5"}, + {"content-type", "application/grpc"}}); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs("200")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "5")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().GrpcMessage, "fault filter abort")); + EXPECT_EQ(nullptr, response->trailers()); + + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); +} + +// Request abort with grpc status, controlled via header configuration. +TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig0PercentageHeader) { + initializeFilter(header_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-envoy-fault-abort-grpc-request", "5"}, + {"x-envoy-fault-abort-request-percentage", "0"}, + {"content-type", "application/grpc"}}); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); +} + +// Request abort with grpc status, controlled via configuration. +TEST_P(FaultIntegrationTestAllProtocols, FaultAbortGrpcConfig) { + initializeFilter(abort_grpc_fault_config_); + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}}); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Envoy::Http::HttpStatusIs("200")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "5")); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().GrpcMessage, "fault filter abort")); + EXPECT_EQ(nullptr, response->trailers()); + + EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); + EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); +} + // Fault integration tests that run with HTTP/2 only, used for fully testing trailers. class FaultIntegrationTestHttp2 : public FaultIntegrationTest {}; INSTANTIATE_TEST_SUITE_P(Protocols, FaultIntegrationTestHttp2, diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index ce783bdca8e5..a29c5cc85816 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -326,6 +326,159 @@ TEST_F(FaultFilterTest, HeaderAbortWithHttpStatus) { EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); } +TEST_F(FaultFilterTest, AbortWithGrpcStatus) { + decoder_filter_callbacks_.is_grpc_request_ = true; + + envoy::extensions::filters::http::fault::v3::HTTPFault fault; + fault.mutable_abort()->mutable_percentage()->set_numerator(100); + fault.mutable_abort()->mutable_percentage()->set_denominator( + envoy::type::v3::FractionalPercent::HUNDRED); + fault.mutable_abort()->set_grpc_status(5); + SetUpTest(fault); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)) + .WillOnce(Return(5)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "fault filter abort"}}; + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + +TEST_F(FaultFilterTest, HeaderAbortWithGrpcStatus) { + decoder_filter_callbacks_.is_grpc_request_ = true; + SetUpTest(header_abort_only_yaml); + + request_headers_.addCopy("x-envoy-fault-abort-grpc-request", "5"); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)) + .WillOnce(Return(5)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/grpc"}, + {"grpc-status", "5"}, + {"grpc-message", "fault filter abort"}}; + + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + +TEST_F(FaultFilterTest, HeaderAbortWithHttpAndGrpcStatus) { + SetUpTest(header_abort_only_yaml); + + request_headers_.addCopy("x-envoy-fault-abort-request", "429"); + request_headers_.addCopy("x-envoy-fault-abort-grpc-request", "5"); + + EXPECT_CALL(runtime_.snapshot_, + getInteger("fault.http.max_active_faults", std::numeric_limits::max())) + .WillOnce(Return(std::numeric_limits::max())); + + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::DelayInjected)) + .Times(0); + + // Abort related calls + EXPECT_CALL(runtime_.snapshot_, + featureEnabled("fault.http.abort.abort_percent", + Matcher(Percent(100)))) + .WillOnce(Return(true)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.http_status", 429)) + .WillOnce(Return(429)); + + EXPECT_CALL(runtime_.snapshot_, getInteger("fault.http.abort.grpc_status", 5)).Times(0); + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "429"}, {"content-length", "18"}, {"content-type", "text/plain"}}; + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + EXPECT_CALL(decoder_filter_callbacks_, encodeData(_, true)); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::FaultInjected)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + EXPECT_EQ(1UL, config_->stats().active_faults_.value()); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + filter_->onDestroy(); + + EXPECT_EQ(0UL, config_->stats().delays_injected_.value()); + EXPECT_EQ(1UL, config_->stats().aborts_injected_.value()); + EXPECT_EQ(0UL, config_->stats().active_faults_.value()); + EXPECT_EQ("fault_filter_abort", decoder_filter_callbacks_.details_); +} + TEST_F(FaultFilterTest, FixedDelayZeroDuration) { SetUpTest(fixed_delay_only_yaml); @@ -1096,6 +1249,7 @@ TEST_F(FaultFilterSettingsTest, CheckDefaultRuntimeKeys) { EXPECT_EQ("fault.http.abort.abort_percent", settings.abortPercentRuntime()); EXPECT_EQ("fault.http.delay.fixed_duration_ms", settings.delayDurationRuntime()); EXPECT_EQ("fault.http.abort.http_status", settings.abortHttpStatusRuntime()); + EXPECT_EQ("fault.http.abort.grpc_status", settings.abortGrpcStatusRuntime()); EXPECT_EQ("fault.http.max_active_faults", settings.maxActiveFaultsRuntime()); EXPECT_EQ("fault.http.rate_limit.response_percent", settings.responseRateLimitPercentRuntime()); } @@ -1105,6 +1259,7 @@ TEST_F(FaultFilterSettingsTest, CheckOverrideRuntimeKeys) { fault.set_abort_percent_runtime(std::string("fault.abort_percent_runtime")); fault.set_delay_percent_runtime(std::string("fault.delay_percent_runtime")); fault.set_abort_http_status_runtime(std::string("fault.abort_http_status_runtime")); + fault.set_abort_grpc_status_runtime(std::string("fault.abort_grpc_status_runtime")); fault.set_delay_duration_runtime(std::string("fault.delay_duration_runtime")); fault.set_max_active_faults_runtime(std::string("fault.max_active_faults_runtime")); fault.set_response_rate_limit_percent_runtime( @@ -1116,6 +1271,7 @@ TEST_F(FaultFilterSettingsTest, CheckOverrideRuntimeKeys) { EXPECT_EQ("fault.abort_percent_runtime", settings.abortPercentRuntime()); EXPECT_EQ("fault.delay_duration_runtime", settings.delayDurationRuntime()); EXPECT_EQ("fault.abort_http_status_runtime", settings.abortHttpStatusRuntime()); + EXPECT_EQ("fault.abort_grpc_status_runtime", settings.abortGrpcStatusRuntime()); EXPECT_EQ("fault.max_active_faults_runtime", settings.maxActiveFaultsRuntime()); EXPECT_EQ("fault.response_rate_limit_percent_runtime", settings.responseRateLimitPercentRuntime()); From e9066038f46b08ca41df8061b359acc22d948861 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Mon, 27 Apr 2020 14:46:16 -0700 Subject: [PATCH 036/909] ci: Disable homebrew auto-updating (#10963) This can introduce flakiness when GitHub returns error codes. The VMs' brew environments should be up to date "enough" anyways. Signed-off-by: Keith Smiley --- ci/mac_ci_setup.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index b58c3a3eeed4..378b36787588 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -6,6 +6,8 @@ # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. +export HOMEBREW_NO_AUTO_UPDATE=1 + function is_installed { brew ls --versions "$1" >/dev/null } From 9e3dfc3f8899434d17c2e1268be747bf5471d00c Mon Sep 17 00:00:00 2001 From: danzh Date: Mon, 27 Apr 2020 19:33:02 -0400 Subject: [PATCH 037/909] quiche: enable some quiche tests in CI (#10461) Enable tests with tag "test_included" in @com_googlesource_quiche:// in all the CI. Risk Level: Medium, quiche test failure will fail all the CIs. Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 15 +++++++++++---- ci/do_ci.sh | 11 +++++++---- .../quiche/platform/string_utils.cc | 2 +- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index eefb54e2fcd2..770eb720b9a3 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -59,15 +59,21 @@ quiche_copts = select({ # Remove these after upstream fix. "-Wno-unused-parameter", "-Wno-unused-function", - "-Wno-unused-const-variable", - "-Wno-type-limits", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", - "-Wno-type-limits", - "-Wno-return-type", ], }) +test_suite( + name = "ci_tests", + tests = [ + "http2_platform_api_test", + "quic_platform_api_test", + "quiche_common_test", + "spdy_platform_api_test", + ], +) + envoy_cc_test_library( name = "http2_test_tools_random", srcs = ["quiche/http2/test_tools/http2_random.cc"], @@ -3553,6 +3559,7 @@ envoy_cc_test( name = "spdy_core_header_block_test", srcs = ["quiche/spdy/core/spdy_header_block_test.cc"], copts = quiche_copts, + coverage = False, repository = "@envoy", tags = ["nofips"], deps = [ diff --git a/ci/do_ci.sh b/ci/do_ci.sh index cc321a6dffca..05cedd54d8da 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -95,9 +95,12 @@ CI_TARGET=$1 if [[ $# -gt 1 ]]; then shift - TEST_TARGETS=$* + COVERAGE_TEST_TARGETS=$* + TEST_TARGETS="$COVERAGE_TEST_TARGETS" else - TEST_TARGETS=//test/... + # Coverage test will add QUICHE tests by itself. + COVERAGE_TEST_TARGETS=//test/... + TEST_TARGETS="${COVERAGE_TEST_TARGETS} @com_googlesource_quiche//:ci_tests" fi if [[ "$CI_TARGET" == "bazel.release" ]]; then @@ -259,14 +262,14 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" ]]; then setup_clang_toolchain - echo "bazel coverage build with tests ${TEST_TARGETS}" + echo "bazel coverage build with tests ${COVERAGE_TEST_TARGETS}" # Reduce the amount of memory Bazel tries to use to prevent it from launching too many subprocesses. # This should prevent the system from running out of memory and killing tasks. See discussion on # https://github.com/envoyproxy/envoy/pull/5611. [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_ram_resources=12288" - test/run_envoy_bazel_coverage.sh ${TEST_TARGETS} + test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} collect_build_profile coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.cc b/source/extensions/quic_listeners/quiche/platform/string_utils.cc index 85452204d9ca..957c61fc3d7e 100644 --- a/source/extensions/quic_listeners/quiche/platform/string_utils.cc +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.cc @@ -88,7 +88,7 @@ bool HexDecodeToUInt32(absl::string_view data, uint32_t* out) { std::string byte_string = absl::HexStringToBytes(data_padded); - RELEASE_ASSERT(byte_string.size() == 4u, "padded dtat is not 4 byte long."); + RELEASE_ASSERT(byte_string.size() == 4u, "padded data is not 4 byte long."); uint32_t bytes; memcpy(&bytes, byte_string.data(), byte_string.length()); *out = ntohl(bytes); From 5649b832c86f00abe8caea82e9731bd7a8281faf Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Mon, 27 Apr 2020 21:14:34 -0700 Subject: [PATCH 038/909] Fixed test build failures under gcc 9 (#10965) Signed-off-by: Dmitri Dolguikh --- test/common/stats/stat_merger_fuzz_test.cc | 2 +- .../http/common/fuzz/filter_fuzz_test.cc | 48 +++++++++---------- test/server/filter_chain_benchmark_test.cc | 2 + 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index 44077aa82e24..7fdcb55033bd 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -31,7 +31,7 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { // TODO(#10008): We should remove the "1 +" below, so we can get empty // segments, which trigger some inconsistent handling as described in that // bug. - uint32_t num_bytes = 1 + data[index] & 0x7; + uint32_t num_bytes = (1 + data[index]) & 0x7; num_bytes = std::min(static_cast(data.size() - 1), num_bytes); // restrict number up to the size of data diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc index 6b076f0da300..8a20604d7a03 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc @@ -13,30 +13,30 @@ namespace Extensions { namespace HttpFilters { DEFINE_PROTO_FUZZER(const test::extensions::filters::http::FilterFuzzTestCase& input) { - static PostProcessorRegistration reg = {[](test::extensions::filters::http::FilterFuzzTestCase* - input, - unsigned int seed) { - // This ensures that the mutated configs all have valid filter names and type_urls. The list of - // names and type_urls is pulled from the NamedHttpFilterConfigFactory. All Envoy extensions are - // built with this test (see BUILD file). - // This post-processor mutation is applied only when libprotobuf-mutator calls mutate on an - // input, and *not* during fuzz target execution. Replaying a corpus through the fuzzer will not - // be affected by the post-processor mutation. - static const std::vector filter_names = Registry::FactoryRegistry< - Server::Configuration::NamedHttpFilterConfigFactory>::registeredNames(); - static const auto factories = - Registry::FactoryRegistry::factories(); - // Choose a valid filter name. - if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == - std::end(filter_names)) { - absl::string_view filter_name = filter_names[seed % filter_names.size()]; - input->mutable_config()->set_name(std::string(filter_name)); - } - // Set the corresponding type_url for Any. - auto& factory = factories.at(input->config().name()); - input->mutable_config()->mutable_typed_config()->set_type_url(absl::StrCat( - "type.googleapis.com/", factory->createEmptyConfigProto()->GetDescriptor()->full_name())); - }}; + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::http::FilterFuzzTestCase* input, unsigned int seed) { + // This ensures that the mutated configs all have valid filter names and type_urls. The list + // of names and type_urls is pulled from the NamedHttpFilterConfigFactory. All Envoy + // extensions are built with this test (see BUILD file). This post-processor mutation is + // applied only when libprotobuf-mutator calls mutate on an input, and *not* during fuzz + // target execution. Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + static const std::vector filter_names = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::registeredNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + }}; try { // Catch invalid header characters. diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index d3e7b58b1499..322a3de92cc2 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -166,6 +166,8 @@ const char YamlSingleDstPortBottom[] = R"EOF( class FilterChainBenchmarkFixture : public benchmark::Fixture { public: + using benchmark::Fixture::SetUp; + void SetUp(::benchmark::State& state) override { int64_t input_size = state.range(0); std::vector port_chains; From ed99d310f1dc23af3d09ed54f9f3f1cbee77886d Mon Sep 17 00:00:00 2001 From: Marco Magdy Date: Mon, 27 Apr 2020 21:16:36 -0700 Subject: [PATCH 039/909] aws_lambda filter: add last chunk when encoding data (#10959) This is a correction of my previous misunderstanding of how filters encoding/decoding work with buffering. Signed-off-by: Marco Magdy --- .../filters/http/aws_lambda/aws_lambda_filter.cc | 4 +--- .../http/aws_lambda/aws_lambda_filter_test.cc | 13 +++++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index 89e39663e3f0..46dbb33a2624 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -251,9 +251,7 @@ Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool end_strea } ENVOY_LOG(trace, "Tranforming JSON payload to HTTP response."); - if (!encoder_callbacks_->encodingBuffer()) { - encoder_callbacks_->addEncodedData(data, false); - } + encoder_callbacks_->addEncodedData(data, false); const Buffer::Instance& encoding_buffer = *encoder_callbacks_->encodingBuffer(); encoder_callbacks_->modifyEncodingBuffer([this](Buffer::Instance& enc_buf) { Buffer::OwnedImpl body; diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index de30165175b2..3d2a61a50d43 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -471,6 +471,19 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeStopIterationAndBuffer) { EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, result); } +TEST_F(AwsLambdaFilterTest, EncodeDataAddsLastChunk) { + setupFilter({arn_, InvocationMode::Synchronous, false /*passthrough*/}); + filter_->resolveSettings(); + Http::TestResponseHeaderMapImpl headers; + headers.setStatus(200); + filter_->encodeHeaders(headers, false /*end_stream*/); + + Buffer::OwnedImpl buf(std::string("foobar")); + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)); + EXPECT_CALL(encoder_callbacks_, encodingBuffer).WillRepeatedly(Return(&buf)); + filter_->encodeData(buf, true /*end_stream*/); +} + /** * encodeData() data in JSON mode without a 'body' key should translate the 'headers' key to HTTP * headers while ignoring any HTTP/2 pseudo-headers. From 61d5aa761097bc0468bd8eccf6ff8e4149dbe021 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Mon, 27 Apr 2020 21:17:33 -0700 Subject: [PATCH 040/909] server: initialize runtime loader singleton so that deprecations fails validate mode (#10759) Prior to this change the config validation mode (--mode validate) would not detect deprecated fields in used in the bootstrap config. Signed-off-by: Stephan Zuercher --- source/server/config_validation/server.cc | 5 +- source/server/config_validation/server.h | 4 +- test/server/config_validation/BUILD | 7 ++ test/server/config_validation/server_test.cc | 82 +++++++++++++++++-- .../test_data/runtime_config.yaml | 27 ++++++ 5 files changed, 116 insertions(+), 9 deletions(-) create mode 100644 test/server/config_validation/test_data/runtime_config.yaml diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 1ae5c43dc798..e7faf1a999cc 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -93,7 +93,8 @@ void ValidationInstance::initialize(const Options& options, messageValidationContext().staticValidationVisitor(), *api_); listener_manager_ = std::make_unique(*this, *this, *this, false); thread_local_.registerThread(*dispatcher_, true); - runtime_loader_ = component_factory.createRuntime(*this, initial_config); + runtime_singleton_ = std::make_unique( + component_factory.createRuntime(*this, initial_config)); secret_manager_ = std::make_unique(admin().getConfigTracker()); ssl_context_manager_ = createContextManager("ssl_context_manager", api_->timeSource()); cluster_manager_factory_ = std::make_unique( @@ -101,7 +102,7 @@ void ValidationInstance::initialize(const Options& options, dispatcher(), localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, accessLogManager(), singletonManager(), time_system_); config_.initialize(bootstrap, *this, *cluster_manager_factory_); - runtime_loader_->initialize(clusterManager()); + runtime().initialize(clusterManager()); clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); } diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 9111dea8a3a8..2cfb3d673dd1 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -85,7 +85,7 @@ class ValidationInstance final : Logger::Loggable, ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } Runtime::RandomGenerator& random() override { return random_generator_; } - Runtime::Loader& runtime() override { return *runtime_loader_; } + Runtime::Loader& runtime() override { return Runtime::LoaderSingleton::get(); } void shutdown() override; bool isShutdown() override { return false; } void shutdownAdmin() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -192,7 +192,7 @@ class ValidationInstance final : Logger::Loggable, Event::DispatcherPtr dispatcher_; Server::ValidationAdmin admin_; Singleton::ManagerPtr singleton_manager_; - Runtime::LoaderPtr runtime_loader_; + std::unique_ptr runtime_singleton_; Runtime::RandomGeneratorImpl random_generator_; std::unique_ptr ssl_context_manager_; Configuration::MainImpl config_; diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index e99e37a684b3..f4d848a58303 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -48,10 +48,16 @@ envoy_cc_test( ], ) +filegroup( + name = "server_test_data", + srcs = glob(["test_data/**"]), +) + envoy_cc_test( name = "server_test", srcs = ["server_test.cc"], data = [ + ":server_test_data", "//configs:example_configs", "//test/config_test:example_configs_test_setup.sh", ], @@ -65,6 +71,7 @@ envoy_cc_test( "//test/mocks/server:server_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", + "//test/test_common:registry_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index 98def0389851..cd310eaefadd 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -1,11 +1,14 @@ #include +#include "envoy/server/filter_config.h" + #include "server/config_validation/server.h" #include "test/integration/server.h" #include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/registry.h" namespace Envoy { namespace Server { @@ -14,14 +17,14 @@ namespace { // Test param is the path to the config file to validate. class ValidationServerTest : public testing::TestWithParam { public: - static void SetupTestDirectory() { + static void setupTestDirectory() { TestEnvironment::exec( {TestEnvironment::runfilesPath("test/config_test/example_configs_test_setup.sh")}); directory_ = TestEnvironment::temporaryDirectory() + "/test/config_test/"; } static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) - SetupTestDirectory(); + setupTestDirectory(); } protected: @@ -38,8 +41,34 @@ std::string ValidationServerTest::directory_ = ""; // tests than set of tests for ValidationServerTest. class ValidationServerTest_1 : public ValidationServerTest { public: - static const std::vector GetAllConfigFiles() { - SetupTestDirectory(); + static const std::vector getAllConfigFiles() { + setupTestDirectory(); + + auto files = TestUtility::listFiles(ValidationServerTest::directory_, false); + + // Strip directory part. options_ adds it for each test. + for (auto& file : files) { + file = file.substr(directory_.length() + 1); + } + return files; + } +}; + +// RuntimeFeatureValidationServerTest is used to test validation with non-default runtime +// values. +class RuntimeFeatureValidationServerTest : public ValidationServerTest { +public: + static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) + setupTestDirectory(); + } + + static void setupTestDirectory() { + directory_ = + TestEnvironment::runfilesDirectory("envoy/test/server/config_validation/test_data/"); + } + + static const std::vector getAllConfigFiles() { + setupTestDirectory(); auto files = TestUtility::listFiles(ValidationServerTest::directory_, false); @@ -49,6 +78,33 @@ class ValidationServerTest_1 : public ValidationServerTest { } return files; } + + class TestConfigFactory : public Configuration::NamedNetworkFilterConfigFactory { + public: + std::string name() const override { return "envoy.filters.network.test"; } + + Network::FilterFactoryCb createFilterFactoryFromProto(const Protobuf::Message&, + Configuration::FactoryContext&) override { + // Validate that the validation server loaded the runtime data and installed the singleton. + auto* runtime = Runtime::LoaderSingleton::getExisting(); + if (runtime == nullptr) { + throw EnvoyException("Runtime::LoaderSingleton == nullptr"); + } + + if (!runtime->threadsafeSnapshot()->getBoolean("test.runtime.loaded", false)) { + throw EnvoyException( + "Found Runtime::LoaderSingleton, got wrong value for test.runtime.loaded"); + } + + return [](Network::FilterManager&) {}; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{new ProtobufWkt::Struct()}; + } + + bool isTerminalFilter() override { return true; } + }; }; TEST_P(ValidationServerTest, Validate) { @@ -91,7 +147,23 @@ TEST_P(ValidationServerTest_1, RunWithoutCrash) { } INSTANTIATE_TEST_SUITE_P(AllConfigs, ValidationServerTest_1, - ::testing::ValuesIn(ValidationServerTest_1::GetAllConfigFiles())); + ::testing::ValuesIn(ValidationServerTest_1::getAllConfigFiles())); + +TEST_P(RuntimeFeatureValidationServerTest, ValidRuntimeLoaderSingleton) { + TestConfigFactory factory; + Registry::InjectFactory registration(factory); + + auto local_address = Network::Utility::getLocalAddress(options_.localAddressIpVersion()); + + // If this fails, it's likely because TestConfigFactory threw an exception related to the + // runtime loader. + ASSERT_TRUE(validateConfig(options_, local_address, component_factory_, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest())); +} + +INSTANTIATE_TEST_SUITE_P( + AllConfigs, RuntimeFeatureValidationServerTest, + ::testing::ValuesIn(RuntimeFeatureValidationServerTest::getAllConfigFiles())); } // namespace } // namespace Server diff --git a/test/server/config_validation/test_data/runtime_config.yaml b/test/server/config_validation/test_data/runtime_config.yaml new file mode 100644 index 000000000000..e4b29bc158be --- /dev/null +++ b/test/server/config_validation/test_data/runtime_config.yaml @@ -0,0 +1,27 @@ +--- +node: + id: "test" +layered_runtime: + layers: + - name: static-layer + static_layer: + "test.runtime.loaded": true +static_resources: + listeners: + - name: "test.listener" + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 0 + filter_chains: + - filters: + - name: envoy.filters.network.test + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 From c46c0a2ef829b0e7fcb099c969c6ef573e8b758f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 28 Apr 2020 09:39:57 -0400 Subject: [PATCH 041/909] router: adding CONNECT support to upstreams (not prod-ready) (#10623) CONNECT won't be very useful without proxy protocol support, and needs some pausing for security reasons, both coming soon. Risk Level: Medium (router refactor) Testing: unit tests and integration tests Docs Changes: some Release Notes: n/a (still hidden) Part of #1630 #1451 Signed-off-by: Alyssa Wilk --- docs/root/intro/arch_overview/http/http.rst | 2 +- .../http/{websocket.rst => upgrades.rst} | 44 +++- source/common/router/router.cc | 63 ++++- source/common/router/router.h | 6 + source/common/router/upstream_request.cc | 73 ++++++ source/common/router/upstream_request.h | 59 +++++ test/common/router/BUILD | 20 ++ test/common/router/upstream_request_test.cc | 245 ++++++++++++++++++ .../tcp_tunneling_integration_test.cc | 32 ++- 9 files changed, 509 insertions(+), 35 deletions(-) rename docs/root/intro/arch_overview/http/{websocket.rst => upgrades.rst} (63%) create mode 100644 test/common/router/upstream_request_test.cc diff --git a/docs/root/intro/arch_overview/http/http.rst b/docs/root/intro/arch_overview/http/http.rst index f5729560e0f6..33b7ebffa6fa 100644 --- a/docs/root/intro/arch_overview/http/http.rst +++ b/docs/root/intro/arch_overview/http/http.rst @@ -7,5 +7,5 @@ HTTP http_connection_management http_filters http_routing - websocket + upgrades http_proxy diff --git a/docs/root/intro/arch_overview/http/websocket.rst b/docs/root/intro/arch_overview/http/upgrades.rst similarity index 63% rename from docs/root/intro/arch_overview/http/websocket.rst rename to docs/root/intro/arch_overview/http/upgrades.rst index fa4e0b1f055d..6f3b88728273 100644 --- a/docs/root/intro/arch_overview/http/websocket.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -1,19 +1,19 @@ .. _arch_overview_websocket: -WebSocket and HTTP upgrades +HTTP upgrades =========================== -Envoy Upgrade support is intended mainly for WebSocket but may be used for non-WebSocket -upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload +Envoy Upgrade support is intended mainly for WebSocket and CONNECT support, but may be used for +arbitrary upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload through an HTTP filter chain. One may configure the :ref:`upgrade_configs ` with or without custom filter chains. If only the :ref:`upgrade_type ` -is specified, both the upgrade headers, any request and response body, and WebSocket payload will +is specified, both the upgrade headers, any request and response body, and HTTP data payload will pass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload, one can set up custom :ref:`filters ` -for the given upgrade type, up to and including only using the router filter to send the WebSocket +for the given upgrade type, up to and including only using the router filter to send the HTTP data upstream. Upgrades can be enabled or disabled on a :ref:`per-route ` basis. @@ -32,12 +32,12 @@ laid out below, but custom filter chains can only be configured on a per-HttpCon | F | F | F | +-----------------------+-------------------------+-------------------+ -Note that the statistics for upgrades are all bundled together so WebSocket +Note that the statistics for upgrades are all bundled together so WebSocket and other upgrades :ref:`statistics ` are tracked by stats such as downstream_cx_upgrades_total and downstream_cx_upgrades_active -Handling HTTP/2 hops -^^^^^^^^^^^^^^^^^^^^ +Websocket over HTTP/2 hops +^^^^^^^^^^^^^^^^^^^^^^^^^^ While HTTP/2 support for WebSockets is off by default, Envoy does support tunneling WebSockets over HTTP/2 streams for deployments that prefer a uniform HTTP/2 mesh throughout; this enables, for example, @@ -61,3 +61,31 @@ a GET method on the final Envoy-Upstream hop. Note that the HTTP/2 upgrade path has very strict HTTP/1.1 compliance, so will not proxy WebSocket upgrade requests or responses with bodies. + +.. TODO(alyssawilk) unhide this when unhiding config +.. CONNECT support +.. ^^^^^^^^^^^^^^^ + +.. Envoy CONNECT support is off by default (Envoy will send an internally generated 403 in response to +.. CONNECT requests). CONNECT support can be enabled via the upgrade options described above, setting +.. the upgrade value to the special keyword "CONNECT". + +.. While for HTTP/2, CONNECT request may have a path, in general and for HTTP/1.1 CONNECT requests do +.. not have a path, and can only be matched using a +.. :ref:`connect_matcher ` +.. +.. Envoy can handle CONNECT in one of two ways, either proxying the CONNECT headers through as if they +.. were any other request, and letting the upstream terminate the CONNECT request, or by terminating the +.. CONNECT request, and forwarding the payload as raw TCP data. When CONNECT upgrade configuration is +.. set up, the default behavior is to proxy the CONNECT request, treating it like any other request using +.. the upgrade path. +.. If termination is desired, this can be accomplished by setting +.. :ref:`connect_config ` +.. If it that message is present for CONNECT requests, the router filter will strip the request headers, +.. and forward the HTTP payload upstream. On receipt of initial TCP data from upstream, the router +.. will synthesize 200 response headers, and then forward the TCP data as the HTTP response body. + +.. .. warning:: +.. This mode of CONNECT support can create major security holes if configured correctly, as the upstream +.. will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution + diff --git a/source/common/router/router.cc b/source/common/router/router.cc index e507e06a4a5c..140e44f5c0a7 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -115,6 +115,16 @@ bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream constexpr uint64_t TimeoutPrecisionFactor = 100; +Http::ConnectionPool::Instance* +httpPool(absl::variant pool) { + return absl::get(pool); +} + +Tcp::ConnectionPool::Instance* +tcpPool(absl::variant pool) { + return absl::get(pool); +} + const absl::string_view getPath(const Http::RequestHeaderMap& headers) { return headers.Path() ? headers.Path()->value().getStringView() : ""; } @@ -549,12 +559,10 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } } - Http::ConnectionPool::Instance* http_pool = getHttpConnPool(); Upstream::HostDescriptionConstSharedPtr host; + Filter::HttpOrTcpPool conn_pool = createConnPool(host); - if (http_pool) { - host = http_pool->host(); - } else { + if (!host) { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } @@ -644,8 +652,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Hang onto the modify_headers function for later use in handling upstream responses. modify_headers_ = modify_headers; - UpstreamRequestPtr upstream_request = - std::make_unique(*this, std::make_unique(*http_pool)); + UpstreamRequestPtr upstream_request = createUpstreamRequest(conn_pool); upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { @@ -655,6 +662,38 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } +Filter::HttpOrTcpPool Filter::createConnPool(Upstream::HostDescriptionConstSharedPtr& host) { + Filter::HttpOrTcpPool conn_pool; + bool should_tcp_proxy = route_entry_->connectConfig().has_value() && + downstream_headers_->Method()->value().getStringView() == + Http::Headers::get().MethodValues.Connect; + + if (!should_tcp_proxy) { + conn_pool = getHttpConnPool(); + if (httpPool(conn_pool)) { + host = httpPool(conn_pool)->host(); + } + } else { + transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( + *callbacks_->streamInfo().filterState()); + conn_pool = config_.cm_.tcpConnPoolForCluster(route_entry_->clusterName(), + Upstream::ResourcePriority::Default, this); + if (tcpPool(conn_pool)) { + host = tcpPool(conn_pool)->host(); + } + } + return conn_pool; +} + +UpstreamRequestPtr Filter::createUpstreamRequest(Filter::HttpOrTcpPool conn_pool) { + if (absl::holds_alternative(conn_pool)) { + return std::make_unique(*this, + std::make_unique(*httpPool(conn_pool))); + } + return std::make_unique(*this, + std::make_unique(tcpPool(conn_pool))); +} + Http::ConnectionPool::Instance* Filter::getHttpConnPool() { // Choose protocol based on cluster configuration and downstream connection // Note: Cluster may downgrade HTTP2 to HTTP1 based on runtime configuration. @@ -1454,19 +1493,15 @@ void Filter::doRetry() { attempt_count_++; ASSERT(pending_retries_ > 0); pending_retries_--; - UpstreamRequestPtr upstream_request; - - Http::ConnectionPool::Instance* conn_pool = getHttpConnPool(); - if (conn_pool) { - upstream_request = - std::make_unique(*this, std::make_unique(*conn_pool)); - } - if (!upstream_request) { + Upstream::HostDescriptionConstSharedPtr host; + Filter::HttpOrTcpPool conn_pool = createConnPool(host); + if (!host) { sendNoHealthyUpstreamResponse(); cleanup(); return; } + UpstreamRequestPtr upstream_request = createUpstreamRequest(conn_pool); if (include_attempt_count_in_request_) { downstream_headers_->setEnvoyAttemptCount(attempt_count_); diff --git a/source/common/router/router.h b/source/common/router/router.h index 9f532d08fb5f..c74a33323377 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -467,6 +467,12 @@ class Filter : Logger::Loggable, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; + + using HttpOrTcpPool = + absl::variant; + HttpOrTcpPool createConnPool(Upstream::HostDescriptionConstSharedPtr& host); + UpstreamRequestPtr createUpstreamRequest(Filter::HttpOrTcpPool conn_pool); + Http::ConnectionPool::Instance* getHttpConnPool(); void maybeDoShadowing(); bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 085e6c4a7142..ed6a0c7e6905 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -490,6 +490,16 @@ void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { } } +void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + Network::Connection& latched_conn = conn_data->connection(); + auto upstream = + std::make_unique(callbacks_->upstreamRequest(), std::move(conn_data)); + callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(), + latched_conn.streamInfo()); +} + bool HttpConnPool::cancelAnyPendingRequest() { if (conn_pool_stream_handle_) { conn_pool_stream_handle_->cancel(); @@ -516,5 +526,68 @@ void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, request_encoder.getStream().connectionLocalAddress(), info); } +TcpUpstream::TcpUpstream(UpstreamRequest* upstream_request, + Tcp::ConnectionPool::ConnectionDataPtr&& upstream) + : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) { + upstream_conn_data_->connection().enableHalfClose(true); + upstream_conn_data_->addUpstreamCallbacks(*this); +} + +void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { + upstream_conn_data_->connection().write(data, end_stream); +} + +void TcpUpstream::encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) { + if (end_stream) { + Buffer::OwnedImpl data; + upstream_conn_data_->connection().write(data, true); + } +} + +void TcpUpstream::encodeTrailers(const Http::RequestTrailerMap&) { + Buffer::OwnedImpl data; + upstream_conn_data_->connection().write(data, true); +} + +void TcpUpstream::readDisable(bool disable) { + if (upstream_conn_data_->connection().state() != Network::Connection::State::Open) { + return; + } + upstream_conn_data_->connection().readDisable(disable); +} + +void TcpUpstream::resetStream() { + upstream_request_ = nullptr; + upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); +} + +void TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) { + if (!sent_headers_) { + Http::ResponseHeaderMapPtr headers{ + Http::createHeaderMap({{Http::Headers::get().Status, "200"}})}; + upstream_request_->decodeHeaders(std::move(headers), false); + sent_headers_ = true; + } + upstream_request_->decodeData(data, end_stream); +} + +void TcpUpstream::onEvent(Network::ConnectionEvent event) { + if (event != Network::ConnectionEvent::Connected && upstream_request_) { + upstream_request_->onResetStream(Http::StreamResetReason::ConnectionTermination, ""); + } +} + +void TcpUpstream::onAboveWriteBufferHighWatermark() { + if (upstream_request_) { + upstream_request_->disableDataFromDownstreamForFlowControl(); + } +} + +void TcpUpstream::onBelowWriteBufferLowWatermark() { + if (upstream_request_) { + upstream_request_->enableDataFromDownstreamForFlowControl(); + } +} + } // namespace Router } // namespace Envoy diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index a10f42163abf..660d6e17b8b1 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -202,6 +202,41 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba GenericConnectionPoolCallbacks* callbacks_{}; }; +class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { +public: + TcpConnPool(Tcp::ConnectionPool::Instance* conn_pool) : conn_pool_(conn_pool) {} + + void newStream(GenericConnectionPoolCallbacks* callbacks) override { + callbacks_ = callbacks; + upstream_handle_ = conn_pool_->newConnection(*this); + } + + bool cancelAnyPendingRequest() override { + if (upstream_handle_) { + upstream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + upstream_handle_ = nullptr; + return true; + } + return false; + } + absl::optional protocol() const override { return absl::nullopt; } + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override { + upstream_handle_ = nullptr; + callbacks_->onPoolFailure(reason, "", host); + } + + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) override; + +private: + Tcp::ConnectionPool::Instance* conn_pool_; + Tcp::ConnectionPool::Cancellable* upstream_handle_{}; + GenericConnectionPoolCallbacks* callbacks_{}; +}; + // A generic API which covers common functionality between HTTP and TCP upstreams. class GenericUpstream { public: @@ -261,5 +296,29 @@ class HttpUpstream : public GenericUpstream, public Http::StreamCallbacks { Http::RequestEncoder* request_encoder_{}; }; +class TcpUpstream : public GenericUpstream, public Tcp::ConnectionPool::UpstreamCallbacks { +public: + TcpUpstream(UpstreamRequest* upstream_request, Tcp::ConnectionPool::ConnectionDataPtr&& upstream); + + // GenericUpstream + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const Http::MetadataMapVector&) override {} + void encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) override; + void encodeTrailers(const Http::RequestTrailerMap&) override; + void readDisable(bool disable) override; + void resetStream() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + +private: + UpstreamRequest* upstream_request_; + Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; + bool sent_headers_{}; +}; + } // namespace Router } // namespace Envoy diff --git a/test/common/router/BUILD b/test/common/router/BUILD index ed1e8e1adc28..500ec54046a1 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -322,6 +322,26 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "upstream_request_test", + srcs = ["upstream_request_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/router:router_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//test/common/http:common_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/router:router_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "header_formatter_test", srcs = ["header_formatter_test.cc"], diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc new file mode 100644 index 000000000000..d9900dc2a588 --- /dev/null +++ b/test/common/router/upstream_request_test.cc @@ -0,0 +1,245 @@ +#include "common/buffer/buffer_impl.h" +#include "common/router/config_impl.h" +#include "common/router/router.h" +#include "common/router/upstream_request.h" + +#include "test/common/http/common.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/router/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/tcp/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AnyNumber; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Router { +namespace { + +class MockGenericConnPool : public GenericConnPool { + MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request)); + MOCK_METHOD(bool, cancelAnyPendingRequest, ()); + MOCK_METHOD(absl::optional, protocol, (), (const)); +}; + +class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { +public: + MOCK_METHOD(void, onPoolFailure, + (Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPoolReady, + (std::unique_ptr && upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info)); + MOCK_METHOD(UpstreamRequest*, upstreamRequest, ()); +}; + +class MockRouterFilterInterface : public RouterFilterInterface { +public: + MockRouterFilterInterface() + : config_("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) { + auto cluster_info = new NiceMock(); + cluster_info->timeout_budget_stats_ = absl::nullopt; + cluster_info_.reset(cluster_info); + ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_)); + ON_CALL(*this, config()).WillByDefault(ReturnRef(config_)); + ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_)); + ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_)); + EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber()); + } + + MOCK_METHOD(void, onUpstream100ContinueHeaders, + (Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHeaders, + (uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamData, + (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamTrailers, + (Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamMetadata, (Http::MetadataMapPtr && metadata_map)); + MOCK_METHOD(void, onUpstreamReset, + (Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + + MOCK_METHOD(Http::StreamDecoderFilterCallbacks*, callbacks, ()); + MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); + MOCK_METHOD(FilterConfig&, config, ()); + MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); + MOCK_METHOD(Http::RequestHeaderMap*, downstreamHeaders, ()); + MOCK_METHOD(Http::RequestTrailerMap*, downstreamTrailers, ()); + MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); + MOCK_METHOD(bool, downstreamEndStream, (), (const)); + MOCK_METHOD(uint32_t, attemptCount, (), (const)); + MOCK_METHOD(const VirtualCluster*, requestVcluster, (), (const)); + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + MOCK_METHOD(const std::list&, upstreamRequests, (), (const)); + MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const)); + MOCK_METHOD(TimeSource&, timeSource, ()); + + NiceMock callbacks_; + + envoy::extensions::filters::http::router::v3::Router router_proto; + NiceMock context_; + FilterConfig config_; + Upstream::ClusterInfoConstSharedPtr cluster_info_; + std::list requests_; +}; + +class TcpConnPoolTest : public ::testing::Test { +public: + TcpConnPoolTest() + : conn_pool_(&mock_pool_), host_(std::make_shared>()) {} + + TcpConnPool conn_pool_; + Tcp::ConnectionPool::MockInstance mock_pool_; + MockGenericConnectionPoolCallbacks mock_generic_callbacks_; + std::shared_ptr> host_; + NiceMock cancellable_; +}; + +TEST_F(TcpConnPoolTest, Basic) { + NiceMock connection; + + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_.newStream(&mock_generic_callbacks_); + + EXPECT_CALL(mock_generic_callbacks_, upstreamRequest()); + EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _)); + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection)); + conn_pool_.onPoolReady(std::move(data), host_); +} + +TEST_F(TcpConnPoolTest, OnPoolFailure) { + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_.newStream(&mock_generic_callbacks_); + + EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _)); + conn_pool_.onPoolFailure(Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, host_); + + // Make sure that the pool failure nulled out the pending request. + EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); +} + +TEST_F(TcpConnPoolTest, Cancel) { + // Initially cancel should fail as there is no pending request. + EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); + + EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); + conn_pool_.newStream(&mock_generic_callbacks_); + + // Canceling should now return true as there was an active request. + EXPECT_TRUE(conn_pool_.cancelAnyPendingRequest()); + + // A second cancel should return false as there is not a pending request. + EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); +} + +class TcpUpstreamTest : public ::testing::Test { +public: + TcpUpstreamTest() { + mock_router_filter_.requests_.push_back(std::make_unique( + mock_router_filter_, std::make_unique>())); + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection_)); + tcp_upstream_ = + std::make_unique(mock_router_filter_.requests_.front().get(), std::move(data)); + } + ~TcpUpstreamTest() override { EXPECT_CALL(mock_router_filter_, config()).Times(AnyNumber()); } + +protected: + NiceMock connection_; + NiceMock mock_router_filter_; + Tcp::ConnectionPool::MockConnectionData* mock_connection_data_; + std::unique_ptr tcp_upstream_; + Http::TestRequestHeaderMapImpl request_{{":method", "CONNECT"}, + {":path", "/"}, + {":protocol", "bytestream"}, + {":scheme", "https"}, + {":authority", "host"}}; +}; + +TEST_F(TcpUpstreamTest, Basic) { + // Swallow the headers. + tcp_upstream_->encodeHeaders(request_, false); + + // Proxy the data. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); + + // Metadata is swallowed. + Http::MetadataMapVector metadata_map_vector; + tcp_upstream_->encodeMetadata(metadata_map_vector); + + // On initial data payload, fake response headers, and forward data. + Buffer::OwnedImpl response1("bar"); + EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(200, _, _, false)); + EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("bar"), _, false)); + tcp_upstream_->onUpstreamData(response1, false); + + // On the next batch of payload there won't be additional headers. + Buffer::OwnedImpl response2("eep"); + EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(_, _, _, _)).Times(0); + EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("eep"), _, false)); + tcp_upstream_->onUpstreamData(response2, false); +} + +TEST_F(TcpUpstreamTest, TrailersEndStream) { + // Swallow the headers. + tcp_upstream_->encodeHeaders(request_, false); + + EXPECT_CALL(connection_, write(BufferStringEqual(""), true)); + Http::TestRequestTrailerMapImpl trailers{{"foo", "bar"}}; + tcp_upstream_->encodeTrailers(trailers); +} + +TEST_F(TcpUpstreamTest, HeaderEndStreamHalfClose) { + EXPECT_CALL(connection_, write(BufferStringEqual(""), true)); + tcp_upstream_->encodeHeaders(request_, true); +} + +TEST_F(TcpUpstreamTest, ReadDisable) { + EXPECT_CALL(connection_, readDisable(true)); + tcp_upstream_->readDisable(true); + + EXPECT_CALL(connection_, readDisable(false)); + tcp_upstream_->readDisable(false); + + // Once the connection is closed, don't touch it. + connection_.state_ = Network::Connection::State::Closed; + EXPECT_CALL(connection_, readDisable(_)).Times(0); + tcp_upstream_->readDisable(true); +} + +TEST_F(TcpUpstreamTest, UpstreamEvent) { + // Make sure upstream disconnects result in stream reset. + EXPECT_CALL(mock_router_filter_, + onUpstreamReset(Http::StreamResetReason::ConnectionTermination, "", _)); + tcp_upstream_->onEvent(Network::ConnectionEvent::RemoteClose); +} + +TEST_F(TcpUpstreamTest, Watermarks) { + EXPECT_CALL(mock_router_filter_, callbacks()).Times(AnyNumber()); + EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterAboveWriteBufferHighWatermark()); + tcp_upstream_->onAboveWriteBufferHighWatermark(); + + EXPECT_CALL(mock_router_filter_.callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); + tcp_upstream_->onBelowWriteBufferLowWatermark(); +} + +} // namespace +} // namespace Router +} // namespace Envoy diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 2851fd79a494..3ce0f1cec06a 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -22,12 +22,20 @@ class ConnectTerminationIntegrationTest } void initialize() override { - auto host = config_helper_.createVirtualHost("host", "/"); - // host.mutable_proxying_config(); - config_helper_.addVirtualHost(host); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { + hcm) { + auto* route_config = hcm.mutable_route_config(); + ASSERT_EQ(1, route_config->virtual_hosts_size()); + auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); + auto* match = route->mutable_match(); + match->Clear(); + match->mutable_connect_matcher(); + + auto* upgrade = route->mutable_route()->add_upgrade_configs(); + upgrade->set_upgrade_type("CONNECT"); + upgrade->mutable_connect_config(); + hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); hcm.mutable_http2_protocol_options()->set_allow_connect(true); @@ -66,7 +74,7 @@ class ConnectTerminationIntegrationTest {":path", "/"}, {":protocol", "bytestream"}, {":scheme", "https"}, - {":authority", "host"}}; + {":authority", "host:80"}}; FakeRawConnectionPtr fake_raw_upstream_connection_; IntegrationStreamDecoderPtr response_; bool enable_timeout_{}; @@ -74,7 +82,7 @@ class ConnectTerminationIntegrationTest // TODO(alyssawilk) make sure that if data is sent with the connect it does not go upstream // until the 200 headers are sent before unhiding ANY config. -TEST_P(ConnectTerminationIntegrationTest, DISABLED_Basic) { +TEST_P(ConnectTerminationIntegrationTest, Basic) { initialize(); setUpConnection(); @@ -92,7 +100,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_Basic) { ASSERT_FALSE(response_->reset()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamClose) { +TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { initialize(); setUpConnection(); @@ -103,7 +111,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamClose) { ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamReset) { +TEST_P(ConnectTerminationIntegrationTest, DownstreamReset) { initialize(); setUpConnection(); @@ -114,7 +122,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_DownstreamReset) { ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_UpstreamClose) { +TEST_P(ConnectTerminationIntegrationTest, UpstreamClose) { initialize(); setUpConnection(); @@ -125,7 +133,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_UpstreamClose) { response_->waitForReset(); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_TestTimeout) { +TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { enable_timeout_ = true; initialize(); @@ -136,7 +144,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_TestTimeout) { ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } -TEST_P(ConnectTerminationIntegrationTest, DISABLED_BuggyHeaders) { +TEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) { initialize(); // It's possible that the FIN is received before we set half close on the // upstream connection, so allow unexpected disconnects. @@ -150,7 +158,7 @@ TEST_P(ConnectTerminationIntegrationTest, DISABLED_BuggyHeaders) { {":path", "/"}, {":protocol", "bytestream"}, {":scheme", "https"}, - {":authority", "host"}}); + {":authority", "host:80"}}); // If the connection is established (created, set to half close, and then the // FIN arrives), make sure the FIN arrives, and send a FIN from upstream. if (fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_) && From 1c28302b62bc4f5b93f2826eb06d2878069d9081 Mon Sep 17 00:00:00 2001 From: Weston Carlson Date: Tue, 28 Apr 2020 07:42:25 -0600 Subject: [PATCH 042/909] config: add proxy protocol config api message. (#10845) Description: This PR creates a common PROXY protocol config API message. It will be used for CONNECT work as well as in the transport socket for my upstream proxy proto work. This message could be extended to include TLVs in the future. Risk Level: Low Testing: None Docs Changes: None Release Notes: None Discussed in: #10682 (my draft PR to discuss the upstream implementation) Signed-off-by: Weston Carlson --- api/envoy/config/core/v3/proxy_protocol.proto | 26 ++++++++++++++++ .../config/core/v4alpha/proxy_protocol.proto | 30 +++++++++++++++++++ .../common_messages/common_messages.rst | 1 + .../envoy/config/core/v3/proxy_protocol.proto | 26 ++++++++++++++++ .../config/core/v4alpha/proxy_protocol.proto | 30 +++++++++++++++++++ 5 files changed, 113 insertions(+) create mode 100644 api/envoy/config/core/v3/proxy_protocol.proto create mode 100644 api/envoy/config/core/v4alpha/proxy_protocol.proto create mode 100644 generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto create mode 100644 generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto diff --git a/api/envoy/config/core/v3/proxy_protocol.proto b/api/envoy/config/core/v3/proxy_protocol.proto new file mode 100644 index 000000000000..225a8971f23a --- /dev/null +++ b/api/envoy/config/core/v3/proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/api/envoy/config/core/v4alpha/proxy_protocol.proto b/api/envoy/config/core/v4alpha/proxy_protocol.proto new file mode 100644 index 000000000000..c7a8d1f454dd --- /dev/null +++ b/api/envoy/config/core/v4alpha/proxy_protocol.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ProxyProtocolConfig"; + + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index faea72f757d6..6e3a5ed33f88 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -9,6 +9,7 @@ Common messages ../config/core/v3/address.proto ../config/core/v3/backoff.proto ../config/core/v3/protocol.proto + ../config/core/v3/proxy_protocol.proto ../service/discovery/v3/discovery.proto ../config/core/v3/config_source.proto ../config/core/v3/grpc_service.proto diff --git a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto new file mode 100644 index 000000000000..225a8971f23a --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto new file mode 100644 index 000000000000..c7a8d1f454dd --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Proxy Protocol] + +message ProxyProtocolConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ProxyProtocolConfig"; + + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; +} From b6c8bb3a4ac6bcce221643a4924befd5eefd6815 Mon Sep 17 00:00:00 2001 From: Spencer Lewis Date: Tue, 28 Apr 2020 11:44:29 -0400 Subject: [PATCH 043/909] healthcheck: support transport socket matching (#10862) Users can specify metadata in a health check's config that will be used to select a matching transport socket from those configured in a cluster's transport_socket_matches. This allows users to configure a different transport socket for health check connections than the one that is used to create a connection to an endpoint for proxying. Risk Level: low; small optional feature Testing: added unit tests Docs Changes: updated health check and cluster proto docs with an explanation and example. Release Notes: added Fixes #10575 Signed-off-by: Spencer Lewis --- api/envoy/config/cluster/v3/cluster.proto | 4 + .../config/cluster/v4alpha/cluster.proto | 4 + api/envoy/config/core/v3/health_check.proto | 34 ++++- .../config/core/v4alpha/health_check.proto | 34 ++++- docs/root/version_history/current.rst | 1 + .../envoy/config/cluster/v3/cluster.proto | 4 + .../config/cluster/v4alpha/cluster.proto | 4 + .../envoy/config/core/v3/health_check.proto | 46 +++++-- .../config/core/v4alpha/health_check.proto | 34 ++++- include/envoy/upstream/upstream.h | 7 +- .../upstream/health_checker_base_impl.cc | 17 ++- .../upstream/health_checker_base_impl.h | 6 + source/common/upstream/health_checker_impl.cc | 10 +- source/common/upstream/upstream_impl.cc | 12 +- source/common/upstream/upstream_impl.h | 11 +- .../upstream/health_checker_impl_test.cc | 116 ++++++++++++++++++ test/mocks/upstream/host.h | 3 +- 17 files changed, 320 insertions(+), 27 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 06de8bbbead0..0b3a4fbc61c4 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -547,6 +547,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 887ef9c3fe33..89c206f2c7b0 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -548,6 +548,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; diff --git a/api/envoy/config/core/v3/health_check.proto b/api/envoy/config/core/v3/health_check.proto index f4ef02e0f966..c6b4acfa937a 100644 --- a/api/envoy/config/core/v3/health_check.proto +++ b/api/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto index 1975c309a7de..7f823da97c5e 100644 --- a/api/envoy/config/core/v4alpha/health_check.proto +++ b/api/envoy/config/core/v4alpha/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c19000140d66..df174452fa96 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -16,6 +16,7 @@ Changes * grpc-json: added support for streaming response using `google.api.HttpBody `_. * gzip filter: added option to set zlib's next output buffer size. +* health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index e8e451de8e6b..0e0b3a1be9f1 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -545,6 +545,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 887ef9c3fe33..89c206f2c7b0 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -548,6 +548,10 @@ message Cluster { // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto index 5b95ebe39de3..2bc8d1488172 100644 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v3/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; @@ -302,24 +302,56 @@ message HealthCheck { // The default value for "healthy edge interval" is the same as the default interval. TlsOptions tls_options = 21; + // Specifies the path to the :ref:`health check event log `. + // If empty, no event log will be written. + google.protobuf.Struct transport_socket_match_criteria = 23; + oneof health_checker { option (validate.required) = true; - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - HttpHealthCheck http_health_check = 8; - // [#not-implemented-hide:] // The gRPC service for the health check event service. // If empty, health check events won't be sent to a remote endpoint. - TcpHealthCheck tcp_health_check = 9; + HttpHealthCheck http_health_check = 8; // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. - GrpcHealthCheck grpc_health_check = 11; + TcpHealthCheck tcp_health_check = 9; // This allows overriding the cluster TLS settings, just for health check connections. + GrpcHealthCheck grpc_health_check = 11; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. CustomHealthCheck custom_health_check = 13; } } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto index 1975c309a7de..7f823da97c5e 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto @@ -54,7 +54,7 @@ enum HealthStatus { DEGRADED = 5; } -// [#next-free-field: 23] +// [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; @@ -323,4 +323,36 @@ message HealthCheck { // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 553f9b33fd8b..7ed52ca584da 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -107,9 +107,10 @@ class Host : virtual public HostDescription { * connection. * @return the connection data. */ - virtual CreateConnectionData createHealthCheckConnection( - Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const PURE; + virtual CreateConnectionData + createHealthCheckConnection(Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const PURE; /** * @return host specific gauges. diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 06e3efb3f3aa..7146b20bb3d2 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -35,7 +35,8 @@ HealthCheckerImplBase::HealthCheckerImplBase(const Cluster& cluster, PROTOBUF_GET_MS_OR_DEFAULT(config, unhealthy_edge_interval, unhealthy_interval_.count())), healthy_edge_interval_( PROTOBUF_GET_MS_OR_DEFAULT(config, healthy_edge_interval, interval_.count())), - transport_socket_options_(initTransportSocketOptions(config)) { + transport_socket_options_(initTransportSocketOptions(config)), + transport_socket_match_metadata_(initTransportSocketMatchMetadata(config)) { cluster_.prioritySet().addMemberUpdateCb( [this](const HostVector& hosts_added, const HostVector& hosts_removed) -> void { onClusterMemberUpdate(hosts_added, hosts_removed); @@ -55,6 +56,20 @@ HealthCheckerImplBase::initTransportSocketOptions( return std::make_shared(); } +MetadataConstSharedPtr HealthCheckerImplBase::initTransportSocketMatchMetadata( + const envoy::config::core::v3::HealthCheck& config) { + if (config.has_transport_socket_match_criteria()) { + std::shared_ptr metadata = + std::make_shared(); + (*metadata->mutable_filter_metadata())[Envoy::Config::MetadataFilters::get() + .ENVOY_TRANSPORT_SOCKET_MATCH] = + config.transport_socket_match_criteria(); + return metadata; + } + + return nullptr; +} + HealthCheckerImplBase::~HealthCheckerImplBase() { // ASSERTs inside the session destructor check to make sure we have been previously deferred // deleted. Unify that logic here before actual destruction happens. diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index 2aed0b19c85d..a69765fbbd0f 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -49,6 +49,9 @@ class HealthCheckerImplBase : public HealthChecker, std::shared_ptr transportSocketOptions() const { return transport_socket_options_; } + MetadataConstSharedPtr transportSocketMatchMetadata() const { + return transport_socket_match_metadata_; + } protected: class ActiveHealthCheckSession : public Event::DeferredDeletable { @@ -137,6 +140,8 @@ class HealthCheckerImplBase : public HealthChecker, void setUnhealthyCrossThread(const HostSharedPtr& host); static std::shared_ptr initTransportSocketOptions(const envoy::config::core::v3::HealthCheck& config); + static MetadataConstSharedPtr + initTransportSocketMatchMetadata(const envoy::config::core::v3::HealthCheck& config); static const std::chrono::milliseconds NO_TRAFFIC_INTERVAL; @@ -153,6 +158,7 @@ class HealthCheckerImplBase : public HealthChecker, uint64_t local_process_healthy_{}; uint64_t local_process_degraded_{}; const std::shared_ptr transport_socket_options_; + const MetadataConstSharedPtr transport_socket_match_metadata_; }; class HealthCheckEventLoggerImpl : public HealthCheckEventLogger { diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 16e78394248a..51928547973f 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -250,7 +250,8 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onEvent(Network::Conne void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() { if (!client_) { Upstream::Host::CreateConnectionData conn = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()); + host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()); client_.reset(parent_.createCodecClient(conn)); client_->addConnectionCallbacks(connection_callback_impl_); expect_reset_ = false; @@ -505,7 +506,9 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onEvent(Network::Connect void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { if (!client_) { client_ = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()) + host_ + ->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()) .connection_; session_callbacks_ = std::make_shared(*this); client_->addConnectionCallbacks(*session_callbacks_); @@ -659,7 +662,8 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onEvent(Network::Conne void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { if (!client_) { Upstream::Host::CreateConnectionData conn = - host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions()); + host_->createHealthCheckConnection(parent_.dispatcher_, parent_.transportSocketOptions(), + parent_.transportSocketMatchMetadata().get()); client_ = parent_.createCodecClient(conn); client_->addConnectionCallbacks(connection_callback_impl_); client_->setCodecConnectionCallbacks(http_connection_callback_impl_); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 49e56528834f..0be39442ef83 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -19,6 +19,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "envoy/network/dns.h" +#include "envoy/network/transport_socket.h" #include "envoy/secret/secret_manager.h" #include "envoy/server/filter_config.h" #include "envoy/server/transport_socket_config.h" @@ -268,7 +269,7 @@ HostDescriptionImpl::HostDescriptionImpl( Network::TransportSocketFactory& HostDescriptionImpl::resolveTransportSocketFactory( const Network::Address::InstanceConstSharedPtr& dest_address, - const envoy::config::core::v3::Metadata* metadata) { + const envoy::config::core::v3::Metadata* metadata) const { auto match = cluster_->transportSocketMatcher().resolve(metadata); match.stats_.total_match_count_.inc(); ENVOY_LOG(debug, "transport socket match, socket {} selected for host with address {}", @@ -305,8 +306,13 @@ void HostImpl::setEdsHealthFlag(envoy::config::core::v3::HealthStatus health_sta Host::CreateConnectionData HostImpl::createHealthCheckConnection( Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const { - return {createConnection(dispatcher, *cluster_, healthCheckAddress(), socket_factory_, nullptr, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const { + + Network::TransportSocketFactory& factory = + (metadata != nullptr) ? resolveTransportSocketFactory(healthCheckAddress(), metadata) + : socket_factory_; + return {createConnection(dispatcher, *cluster_, healthCheckAddress(), factory, nullptr, transport_socket_options), shared_from_this()}; } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index bdb40c9d6841..b9c871ccd94a 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -132,11 +132,9 @@ class HostDescriptionImpl : virtual public HostDescription, } uint32_t priority() const override { return priority_; } void priority(uint32_t priority) override { priority_ = priority; } - -private: Network::TransportSocketFactory& resolveTransportSocketFactory(const Network::Address::InstanceConstSharedPtr& dest_address, - const envoy::config::core::v3::Metadata* metadata); + const envoy::config::core::v3::Metadata* metadata) const; protected: ClusterInfoConstSharedPtr cluster_; @@ -183,9 +181,10 @@ class HostImpl : public HostDescriptionImpl, CreateConnectionData createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; - CreateConnectionData createHealthCheckConnection( - Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr transport_socket_options) const override; + CreateConnectionData + createHealthCheckConnection(Event::Dispatcher& dispatcher, + Network::TransportSocketOptionsSharedPtr transport_socket_options, + const envoy::config::core::v3::Metadata* metadata) const override; std::vector> gauges() const override { diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 3ea1592bb9f9..1bcacc7973bb 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -2467,6 +2467,122 @@ TEST_F(HttpHealthCheckerImplTest, Http2ClusterUseHttp2CodecClient) { EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType()); } +MATCHER_P(MetadataEq, expected, "") { + const envoy::config::core::v3::Metadata* metadata = arg; + if (!metadata) { + return false; + } + EXPECT_TRUE(Envoy::Protobuf::util::MessageDifferencer::Equals(*metadata, expected)); + return true; +} + +TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { + const std::string host = "fake_cluster"; + const std::string path = "/healthcheck"; + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 1s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + transport_socket_match_criteria: + key: value + )EOF"; + + auto default_socket_factory = std::make_unique(); + // We expect that this default_socket_factory will NOT be used to create a transport socket for + // the health check connection. + EXPECT_CALL(*default_socket_factory, createTransportSocket(_)).Times(0); + EXPECT_CALL(*default_socket_factory, implementsSecureTransport()); + auto transport_socket_match = + std::make_unique(std::move(default_socket_factory)); + + auto metadata = TestUtility::parseYaml( + R"EOF( + filter_metadata: + envoy.transport_socket_match: + key: value + )EOF"); + + Stats::IsolatedStoreImpl stats_store; + auto health_transport_socket_stats = TransportSocketMatchStats{ + ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store, "test"))}; + auto health_check_only_socket_factory = std::make_unique(); + + // We expect resolve() to be called twice, once for endpoint socket matching (with no metadata in + // this test) and once for health check socket matching. In the latter we expect metadata that + // matches the above object. + EXPECT_CALL(*transport_socket_match, resolve(nullptr)); + EXPECT_CALL(*transport_socket_match, resolve(MetadataEq(metadata))) + .WillOnce(Return(TransportSocketMatcher::MatchData( + *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))); + // The health_check_only_socket_factory should be used to create a transport socket for the health + // check connection. + EXPECT_CALL(*health_check_only_socket_factory, createTransportSocket(_)); + + cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); + + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + EXPECT_EQ(health_transport_socket_stats.total_match_count_.value(), 1); +} + +TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { + const std::string host = "fake_cluster"; + const std::string path = "/healthcheck"; + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 1s + interval_jitter_percent: 40 + unhealthy_threshold: 2 + healthy_threshold: 2 + http_health_check: + service_name_matcher: + prefix: locations + path: /healthcheck + )EOF"; + + auto default_socket_factory = std::make_unique(); + // The default_socket_factory should be used to create a transport socket for the health check + // connection. + EXPECT_CALL(*default_socket_factory, createTransportSocket(_)); + EXPECT_CALL(*default_socket_factory, implementsSecureTransport()); + auto transport_socket_match = + std::make_unique(std::move(default_socket_factory)); + // We expect resolve() to be called exactly once for endpoint socket matching. We should not + // attempt to match again for health checks since there is not match criteria in the config. + EXPECT_CALL(*transport_socket_match, resolve(nullptr)); + + cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); + + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + cluster_->info_->stats().upstream_cx_total_.inc(); + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); +} + class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { public: using ProdHttpHealthCheckerImpl::ProdHttpHealthCheckerImpl; diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 0316440e5389..3c927b0208aa 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -135,7 +135,8 @@ class MockHost : public Host { CreateConnectionData createHealthCheckConnection(Event::Dispatcher& dispatcher, - Network::TransportSocketOptionsSharedPtr) const override { + Network::TransportSocketOptionsSharedPtr, + const envoy::config::core::v3::Metadata*) const override { MockCreateConnectionData data = createConnection_(dispatcher, nullptr); return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; } From ab32f5fd01ca8b23ee16dcffb55b1276e55bf1fa Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 28 Apr 2020 17:03:05 -0400 Subject: [PATCH 044/909] tls/api: factor transport socket out of cert.proto. (#10910) This is necessary to provide TLS transport socket docs and to be able to have the TLS transport socket added to the threat model docs (via its security_posture tag). I did both v2/v3, since this is not technically a change to v2, justa file re-org, and the shadowing machinery prefers file consistency across versions. Risk level: Low (refactoring) Testing: Docs generation and manual inspection. Signed-off-by: Harvey Tuch --- api/envoy/api/v2/auth/cert.proto | 479 +---------------- api/envoy/api/v2/auth/common.proto | 327 +++++++++++ api/envoy/api/v2/auth/secret.proto | 50 ++ api/envoy/api/v2/auth/tls.proto | 152 ++++++ api/envoy/api/v2/cluster.proto | 2 +- .../api/v2/listener/listener_components.proto | 2 +- api/envoy/config/bootstrap/v2/bootstrap.proto | 2 +- api/envoy/config/bootstrap/v3/bootstrap.proto | 2 +- .../config/bootstrap/v4alpha/bootstrap.proto | 2 +- .../transport_sockets/tls/v3/cert.proto | 506 +----------------- .../transport_sockets/tls/v3/common.proto | 334 ++++++++++++ .../transport_sockets/tls/v3/secret.proto | 54 ++ .../transport_sockets/tls/v3/tls.proto | 161 ++++++ .../tls/v4alpha/{cert.proto => common.proto} | 184 +------ .../tls/v4alpha/secret.proto | 57 ++ .../transport_sockets/tls/v4alpha/tls.proto | 163 ++++++ docs/generate_extension_db.py | 3 + .../common_messages/common_messages.rst | 3 +- .../transport_socket/transport_socket.rst | 1 + .../envoy/api/v2/auth/cert.proto | 479 +---------------- .../envoy/api/v2/auth/common.proto | 327 +++++++++++ .../envoy/api/v2/auth/secret.proto | 50 ++ .../envoy/api/v2/auth/tls.proto | 152 ++++++ .../envoy/api/v2/cluster.proto | 2 +- .../api/v2/listener/listener_components.proto | 2 +- .../envoy/config/bootstrap/v2/bootstrap.proto | 2 +- .../envoy/config/bootstrap/v3/bootstrap.proto | 2 +- .../config/bootstrap/v4alpha/bootstrap.proto | 2 +- .../envoy/config/cluster/v3/cluster.proto | 2 +- .../listener/v3/listener_components.proto | 2 +- .../transport_sockets/tls/v3/cert.proto | 503 +---------------- .../transport_sockets/tls/v3/common.proto | 331 ++++++++++++ .../transport_sockets/tls/v3/secret.proto | 54 ++ .../transport_sockets/tls/v3/tls.proto | 161 ++++++ .../tls/v4alpha/{cert.proto => common.proto} | 184 +------ .../tls/v4alpha/secret.proto | 57 ++ .../transport_sockets/tls/v4alpha/tls.proto | 163 ++++++ source/common/secret/sds_api.cc | 2 +- .../quiche/quic_transport_socket_factory.cc | 2 +- .../transport_sockets/tls/config.cc | 2 +- .../tls/context_impl_test.cc | 2 +- 41 files changed, 2633 insertions(+), 2334 deletions(-) create mode 100644 api/envoy/api/v2/auth/common.proto create mode 100644 api/envoy/api/v2/auth/secret.proto create mode 100644 api/envoy/api/v2/auth/tls.proto create mode 100644 api/envoy/extensions/transport_sockets/tls/v3/common.proto create mode 100644 api/envoy/extensions/transport_sockets/tls/v3/secret.proto create mode 100644 api/envoy/extensions/transport_sockets/tls/v3/tls.proto rename api/envoy/extensions/transport_sockets/tls/v4alpha/{cert.proto => common.proto} (64%) create mode 100644 api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto create mode 100644 api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto create mode 100644 generated_api_shadow/envoy/api/v2/auth/common.proto create mode 100644 generated_api_shadow/envoy/api/v2/auth/secret.proto create mode 100644 generated_api_shadow/envoy/api/v2/auth/tls.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto rename generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/{cert.proto => common.proto} (64%) create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto index a1642318e043..49e8b8c70fa2 100644 --- a/api/envoy/api/v2/auth/cert.proto +++ b/api/envoy/api/v2/auth/cert.proto @@ -2,486 +2,15 @@ syntax = "proto3"; package envoy.api.v2.auth; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "validate/validate.proto"; + +import public "envoy/api/v2/auth/common.proto"; +import public "envoy/api/v2/auth/secret.proto"; +import public "envoy/api/v2/auth/tls.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/api/v2/auth/common.proto b/api/envoy/api/v2/auth/common.proto new file mode 100644 index 000000000000..ab4b9c13493d --- /dev/null +++ b/api/envoy/api/v2/auth/common.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/core/base.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; + + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative Names. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified values. + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated string verify_subject_alt_name = 4 [deprecated = true]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/api/v2/auth/secret.proto b/api/envoy/api/v2/auth/secret.proto new file mode 100644 index 000000000000..3a6d8cf7dcb6 --- /dev/null +++ b/api/envoy/api/v2/auth/secret.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + // Secret of generic type and is available to filters. + core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + core.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/api/v2/auth/tls.proto b/api/envoy/api/v2/auth/tls.proto new file mode 100644 index 000000000000..201973a2b9de --- /dev/null +++ b/api/envoy/api/v2/auth/tls.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/auth/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto index 5de5c20df570..8d9ead00f1cd 100644 --- a/api/envoy/api/v2/cluster.proto +++ b/api/envoy/api/v2/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; diff --git a/api/envoy/api/v2/listener/listener_components.proto b/api/envoy/api/v2/listener/listener_components.proto index fe449c63358a..a6791c86cd0b 100644 --- a/api/envoy/api/v2/listener/listener_components.proto +++ b/api/envoy/api/v2/listener/listener_components.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2.listener; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/range.proto"; diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto index 622304483eb2..da88dce786ae 100644 --- a/api/envoy/config/bootstrap/v2/bootstrap.proto +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.bootstrap.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/secret.proto"; import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index c8219d1b22e3..c20109884d90 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -12,7 +12,7 @@ import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index e76695c4b644..0207967b4b0f 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -11,7 +11,7 @@ import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto index ea4bc1475c47..cf5dc597aafb 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -2,510 +2,12 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; + +import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - reserved 2; - - reserved "config"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 4; - - reserved "verify_subject_alt_name"; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - config.core.v3.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto new file mode 100644 index 000000000000..b468f5b7e412 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -0,0 +1,334 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.PrivateKeyProvider"; + + reserved 2; + + reserved "config"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; + + // The TLS certificate chain. + config.core.v3.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v3.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v3.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v3.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + reserved 4; + + reserved "verify_subject_alt_name"; + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v3.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v3.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto new file mode 100644 index 000000000000..2a77ec765c8f --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v3.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto new file mode 100644 index 000000000000..a6fc2d62b97c --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -0,0 +1,161 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto similarity index 64% rename from api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto rename to api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index febb6d665240..9028e380d092 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,11 +3,9 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; @@ -17,7 +15,7 @@ import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CertProto"; +option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; @@ -336,183 +334,3 @@ message CertificateValidationContext { TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto new file mode 100644 index 000000000000..001c1d2901bd --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto new file mode 100644 index 000000000000..8797f36db18f --- /dev/null +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -0,0 +1,163 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/docs/generate_extension_db.py b/docs/generate_extension_db.py index d021b75e48ab..ebcb94307493 100755 --- a/docs/generate_extension_db.py +++ b/docs/generate_extension_db.py @@ -53,5 +53,8 @@ def GetExtensionMetadata(target): extension_db = {} for extension, target in extensions_build_config.EXTENSIONS.items(): extension_db[extension] = GetExtensionMetadata(target) + # The TLS transport extension is not in source/extensions/extensions_build_config.bzl + extension_db['envoy.transport_sockets.tls'] = GetExtensionMetadata( + '//source/extensions/transport_sockets/tls:config') pathlib.Path(output_path).write_text(json.dumps(extension_db)) diff --git a/docs/root/api-v2/common_messages/common_messages.rst b/docs/root/api-v2/common_messages/common_messages.rst index 853c1604f8cc..d2d2a0a8ac62 100644 --- a/docs/root/api-v2/common_messages/common_messages.rst +++ b/docs/root/api-v2/common_messages/common_messages.rst @@ -15,5 +15,6 @@ Common messages ../api/v2/core/grpc_method_list.proto ../api/v2/core/http_uri.proto ../api/v2/core/socket_option.proto - ../api/v2/auth/cert.proto + ../api/v2/auth/common.proto + ../api/v2/auth/secret.proto ../api/v2/ratelimit/ratelimit.proto diff --git a/docs/root/api-v2/config/transport_socket/transport_socket.rst b/docs/root/api-v2/config/transport_socket/transport_socket.rst index defee4f8cc81..f664acf6d29e 100644 --- a/docs/root/api-v2/config/transport_socket/transport_socket.rst +++ b/docs/root/api-v2/config/transport_socket/transport_socket.rst @@ -7,3 +7,4 @@ Transport sockets */v2alpha/* */v2/* + ../../api/v2/auth/tls.proto diff --git a/generated_api_shadow/envoy/api/v2/auth/cert.proto b/generated_api_shadow/envoy/api/v2/auth/cert.proto index a1642318e043..49e8b8c70fa2 100644 --- a/generated_api_shadow/envoy/api/v2/auth/cert.proto +++ b/generated_api_shadow/envoy/api/v2/auth/cert.proto @@ -2,486 +2,15 @@ syntax = "proto3"; package envoy.api.v2.auth; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "validate/validate.proto"; + +import public "envoy/api/v2/auth/common.proto"; +import public "envoy/api/v2/auth/secret.proto"; +import public "envoy/api/v2/auth/tls.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/api/v2/auth/common.proto b/generated_api_shadow/envoy/api/v2/auth/common.proto new file mode 100644 index 000000000000..ab4b9c13493d --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/common.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/core/base.proto"; +import "envoy/type/matcher/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; + + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative Names. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified values. + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated string verify_subject_alt_name = 4 [deprecated = true]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/api/v2/auth/secret.proto b/generated_api_shadow/envoy/api/v2/auth/secret.proto new file mode 100644 index 000000000000..3a6d8cf7dcb6 --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/secret.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + // Secret of generic type and is available to filters. + core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + core.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/api/v2/auth/tls.proto b/generated_api_shadow/envoy/api/v2/auth/tls.proto new file mode 100644 index 000000000000..201973a2b9de --- /dev/null +++ b/generated_api_shadow/envoy/api/v2/auth/tls.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/auth/common.proto"; +import "envoy/api/v2/auth/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + message CombinedCertificateValidationContext { + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto index 5de5c20df570..8d9ead00f1cd 100644 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ b/generated_api_shadow/envoy/api/v2/cluster.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto index fe449c63358a..a6791c86cd0b 100644 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.api.v2.listener; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/range.proto"; diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto index 622304483eb2..da88dce786ae 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.config.bootstrap.v2; -import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/auth/secret.proto"; import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 3b0861d81850..994af34c7ac2 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -12,7 +12,7 @@ import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 9177f186f6b5..b9086b771981 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -12,7 +12,7 @@ import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 0e0b3a1be9f1..c058c421eec4 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -11,7 +11,7 @@ import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index 25d39e24620e..b42f11cd6f5e 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -4,7 +4,7 @@ package envoy.config.listener.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/transport_sockets/tls/v3/cert.proto"; +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto index 4121297ec1c3..cf5dc597aafb 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -2,507 +2,12 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; + +import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - config.core.v3.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - // How to validate peer certificates. - repeated string alpn_protocols = 4; - - oneof validation_context_type { - // Config for fetching validation context via SDS API. - CertificateValidationContext validation_context = 3; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - CombinedCertificateValidationContext combined_validation_context = 8; - } -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - // TLS session ticket key settings. - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - oneof session_ticket_keys_type { - // Config for fetching TLS session ticket keys via SDS API. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - bool disable_stateless_session_resumption = 7; - } -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto new file mode 100644 index 000000000000..a54ba1faeb97 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -0,0 +1,331 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and + // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not + // specified, the default list will be used. + // + // In non-FIPS builds, the default cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + // + // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.PrivateKeyProvider"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + + google.protobuf.Struct hidden_envoy_deprecated_config = 2 + [deprecated = true, (udpa.annotations.sensitive) = true]; + } +} + +// [#next-free-field: 7] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; + + // The TLS certificate chain. + config.core.v3.DataSource certificate_chain = 1; + + // The TLS private key. + config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // [#not-implemented-hide:] + config.core.v3.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v3.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v3.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// [#next-free-field: 11] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + config.core.v3.DataSource trusted_ca = 1; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + config.core.v3.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; + + repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 [deprecated = true]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto new file mode 100644 index 000000000000..2a77ec765c8f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v3.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto new file mode 100644 index 000000000000..97bb3fe64e7f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -0,0 +1,161 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + // TLS session ticket key settings. + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; + + oneof session_ticket_keys_type { + // Config for fetching TLS session ticket keys via SDS API. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + bool disable_stateless_session_resumption = 7; + } +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + // How to validate peer certificates. + repeated string alpn_protocols = 4; + + oneof validation_context_type { + // Config for fetching validation context via SDS API. + CertificateValidationContext validation_context = 3; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + CombinedCertificateValidationContext combined_validation_context = 8; + } +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto similarity index 64% rename from generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto rename to generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index febb6d665240..9028e380d092 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/cert.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,11 +3,9 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; @@ -17,7 +15,7 @@ import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CertProto"; +option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; @@ -336,183 +334,3 @@ message CertificateValidationContext { TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto new file mode 100644 index 000000000000..001c1d2901bd --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; + + // Secret of generic type and is available to filters. + config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + config.core.v4alpha.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto new file mode 100644 index 000000000000..8797f36db18f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -0,0 +1,163 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; +} + +// [#next-free-field: 8] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." + "CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates. + // + // Only a single TLS certificate is supported in client contexts. In server contexts, the first + // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is + // used for clients that support ECDSA. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 + [(validate.rules).repeated = {max_items: 1}]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedCertificateValidationContext combined_validation_context = 8; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; +} diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index deab859adafd..655eb3726d52 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -5,7 +5,7 @@ #include "envoy/api/v2/auth/cert.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc index 6d1bf0a15691..e604a7be7aa4 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc @@ -1,7 +1,7 @@ #include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "extensions/transport_sockets/tls/context_config_impl.h" diff --git a/source/extensions/transport_sockets/tls/config.cc b/source/extensions/transport_sockets/tls/config.cc index 655ac5724dc8..c743f5f6def8 100644 --- a/source/extensions/transport_sockets/tls/config.cc +++ b/source/extensions/transport_sockets/tls/config.cc @@ -1,7 +1,7 @@ #include "extensions/transport_sockets/tls/config.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "common/protobuf/utility.h" diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 2b6c67057c28..e2213fbf7e29 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -3,7 +3,7 @@ #include "envoy/admin/v3/certs.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.h" +#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" #include "envoy/type/matcher/v3/string.pb.h" #include "common/json/json_loader.h" From 048f4231310fbbead0cbe03d43ffb4307fff0517 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 28 Apr 2020 14:42:37 -0700 Subject: [PATCH 045/909] metrics service sink: add config option to report counters as deltas (#10889) Description: this PR adds the ability to configure the metrics service stats sink to report counters as deltas between flushing intervals. This is the expected representation for some stats aggregations backends. Similar behavior is seen, for instance, in the statsd sink. Risk Level: low, previous behavior is left unchanged, and is the default. Testing: updated unit test. Docs Changes: left comments in the field definition Release Notes: updated version history. Signed-off-by: Jose Nino --- .../config/metrics/v3/metrics_service.proto | 8 ++ docs/root/version_history/current.rst | 3 +- .../config/metrics/v3/metrics_service.proto | 8 ++ .../stat_sinks/metrics_service/config.cc | 4 +- .../grpc_metrics_service_impl.cc | 19 ++-- .../grpc_metrics_service_impl.h | 5 +- .../grpc_metrics_service_impl_test.cc | 90 +++++++++++++------ 7 files changed, 98 insertions(+), 39 deletions(-) diff --git a/api/envoy/config/metrics/v3/metrics_service.proto b/api/envoy/config/metrics/v3/metrics_service.proto index ad9879055ba3..0e078c0916f8 100644 --- a/api/envoy/config/metrics/v3/metrics_service.proto +++ b/api/envoy/config/metrics/v3/metrics_service.proto @@ -4,6 +4,8 @@ package envoy.config.metrics.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -25,4 +27,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index df174452fa96..d4b992a99486 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,7 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. -* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults +* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. @@ -31,6 +31,7 @@ Changes tracing is not forced. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto index ad9879055ba3..0e078c0916f8 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto @@ -4,6 +4,8 @@ package envoy.config.metrics.v3; import "envoy/config/core/v3/grpc_service.proto"; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -25,4 +27,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; } diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 4f8402e201b0..69f0860d228b 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -33,7 +33,9 @@ Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Messag grpc_service, server.stats(), false), server.localInfo()); - return std::make_unique(grpc_metrics_streamer, server.timeSource()); + return std::make_unique( + grpc_metrics_streamer, server.timeSource(), + PROTOBUF_GET_WRAPPED_OR_DEFAULT(sink_config, report_counters_as_deltas, false)); } ProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc index cfea996f40d7..85f2a63b7fb3 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc @@ -34,19 +34,26 @@ void GrpcMetricsStreamerImpl::send(envoy::service::metrics::v3::StreamMetricsMes } MetricsServiceSink::MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - TimeSource& time_source) - : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source) {} + TimeSource& time_source, + const bool report_counters_as_deltas) + : grpc_metrics_streamer_(grpc_metrics_streamer), time_source_(time_source), + report_counters_as_deltas_(report_counters_as_deltas) {} -void MetricsServiceSink::flushCounter(const Stats::Counter& counter) { +void MetricsServiceSink::flushCounter( + const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot) { io::prometheus::client::MetricFamily* metrics_family = message_.add_envoy_metrics(); metrics_family->set_type(io::prometheus::client::MetricType::COUNTER); - metrics_family->set_name(counter.name()); + metrics_family->set_name(counter_snapshot.counter_.get().name()); auto* metric = metrics_family->add_metric(); metric->set_timestamp_ms(std::chrono::duration_cast( time_source_.systemTime().time_since_epoch()) .count()); auto* counter_metric = metric->mutable_counter(); - counter_metric->set_value(counter.value()); + if (report_counters_as_deltas_) { + counter_metric->set_value(counter_snapshot.delta_); + } else { + counter_metric->set_value(counter_snapshot.counter_.get().value()); + } } void MetricsServiceSink::flushGauge(const Stats::Gauge& gauge) { @@ -110,7 +117,7 @@ void MetricsServiceSink::flush(Stats::MetricSnapshot& snapshot) { snapshot.histograms().size()); for (const auto& counter : snapshot.counters()) { if (counter.counter_.get().used()) { - flushCounter(counter.counter_.get()); + flushCounter(counter); } } diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index f8d500a05849..84c2d19695f4 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -73,11 +73,11 @@ class MetricsServiceSink : public Stats::Sink { public: // MetricsService::Sink MetricsServiceSink(const GrpcMetricsStreamerSharedPtr& grpc_metrics_streamer, - TimeSource& time_system); + TimeSource& time_system, const bool report_counters_as_deltas); void flush(Stats::MetricSnapshot& snapshot) override; void onHistogramComplete(const Stats::Histogram&, uint64_t) override {} - void flushCounter(const Stats::Counter& counter); + void flushCounter(const Stats::MetricSnapshot::CounterSnapshot& counter_snapshot); void flushGauge(const Stats::Gauge& gauge); void flushHistogram(const Stats::ParentHistogram& envoy_histogram); @@ -85,6 +85,7 @@ class MetricsServiceSink : public Stats::Sink { GrpcMetricsStreamerSharedPtr grpc_metrics_streamer_; envoy::service::metrics::v3::StreamMetricsMessage message_; TimeSource& time_source_; + const bool report_counters_as_deltas_; }; } // namespace MetricsService diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index c7543903d0f9..d0e5325607e9 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -90,35 +90,29 @@ class MockGrpcMetricsStreamer : public GrpcMetricsStreamer { MOCK_METHOD(void, send, (envoy::service::metrics::v3::StreamMetricsMessage & message)); }; -class TestGrpcMetricsStreamer : public GrpcMetricsStreamer { +class MetricsServiceSinkTest : public testing::Test { public: - int metric_count; - // GrpcMetricsStreamer - void send(envoy::service::metrics::v3::StreamMetricsMessage& message) override { - metric_count = message.envoy_metrics_size(); - } -}; + MetricsServiceSinkTest() = default; -class MetricsServiceSinkTest : public testing::Test {}; - -TEST(MetricsServiceSinkTest, CheckSendCall) { - NiceMock snapshot; - Event::SimulatedTimeSystem time_system; + NiceMock snapshot_; + Event::SimulatedTimeSystem time_system_; std::shared_ptr streamer_{new MockGrpcMetricsStreamer()}; +}; - MetricsServiceSink sink(streamer_, time_system); +TEST_F(MetricsServiceSinkTest, CheckSendCall) { + MetricsServiceSink sink(streamer_, time_system_, false); auto counter = std::make_shared>(); counter->name_ = "test_counter"; counter->latch_ = 1; counter->used_ = true; - snapshot.counters_.push_back({1, *counter}); + snapshot_.counters_.push_back({1, *counter}); auto gauge = std::make_shared>(); gauge->name_ = "test_gauge"; gauge->value_ = 1; gauge->used_ = true; - snapshot.gauges_.push_back(*gauge); + snapshot_.gauges_.push_back(*gauge); auto histogram = std::make_shared>(); histogram->name_ = "test_histogram"; @@ -126,35 +120,73 @@ TEST(MetricsServiceSinkTest, CheckSendCall) { EXPECT_CALL(*streamer_, send(_)); - sink.flush(snapshot); + sink.flush(snapshot_); } -TEST(MetricsServiceSinkTest, CheckStatsCount) { - NiceMock snapshot; - Event::SimulatedTimeSystem time_system; - std::shared_ptr streamer_{new TestGrpcMetricsStreamer()}; - - MetricsServiceSink sink(streamer_, time_system); +TEST_F(MetricsServiceSinkTest, CheckStatsCount) { + MetricsServiceSink sink(streamer_, time_system_, false); auto counter = std::make_shared>(); counter->name_ = "test_counter"; - counter->latch_ = 1; + counter->value_ = 100; counter->used_ = true; - snapshot.counters_.push_back({1, *counter}); + snapshot_.counters_.push_back({1, *counter}); auto gauge = std::make_shared>(); gauge->name_ = "test_gauge"; gauge->value_ = 1; gauge->used_ = true; - snapshot.gauges_.push_back(*gauge); + snapshot_.gauges_.push_back(*gauge); - sink.flush(snapshot); - EXPECT_EQ(2, (*streamer_).metric_count); + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(2, message.envoy_metrics_size()); + })); + sink.flush(snapshot_); // Verify only newly added metrics come after endFlush call. gauge->used_ = false; - sink.flush(snapshot); - EXPECT_EQ(1, (*streamer_).metric_count); + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + })); + sink.flush(snapshot_); +} + +// Test that verifies counters are correctly reported as current value when configured to do so. +TEST_F(MetricsServiceSinkTest, ReportCountersValues) { + MetricsServiceSink sink(streamer_, time_system_, false); + + auto counter = std::make_shared>(); + counter->name_ = "test_counter"; + counter->value_ = 100; + counter->used_ = true; + snapshot_.counters_.push_back({1, *counter}); + + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + EXPECT_EQ(100, message.envoy_metrics(0).metric(0).counter().value()); + })); + sink.flush(snapshot_); +} + +// Test that verifies counters are reported as the delta between flushes when configured to do so. +TEST_F(MetricsServiceSinkTest, ReportCountersAsDeltas) { + MetricsServiceSink sink(streamer_, time_system_, true); + + auto counter = std::make_shared>(); + counter->name_ = "test_counter"; + counter->value_ = 100; + counter->used_ = true; + snapshot_.counters_.push_back({1, *counter}); + + EXPECT_CALL(*streamer_, send(_)) + .WillOnce(Invoke([](envoy::service::metrics::v3::StreamMetricsMessage& message) { + EXPECT_EQ(1, message.envoy_metrics_size()); + EXPECT_EQ(1, message.envoy_metrics(0).metric(0).counter().value()); + })); + sink.flush(snapshot_); } } // namespace From c630d590c9aa874d2296eaed846dd026d2039ace Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Tue, 28 Apr 2020 18:41:38 -0400 Subject: [PATCH 046/909] tests: Reference sh test script to be run with ./ (#10977) Enable sh tests to run on Windows by referencing scripts with ./ Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Sunjay Bhatia --- bazel/sh_test_wrapper.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bazel/sh_test_wrapper.sh b/bazel/sh_test_wrapper.sh index 262a12286463..9e2f1138dea7 100755 --- a/bazel/sh_test_wrapper.sh +++ b/bazel/sh_test_wrapper.sh @@ -6,4 +6,6 @@ cd $(dirname "$0") -"$@" +if [ $# -gt 0 ]; then + "./$@" +fi From c4b8dafdce27d3aa8e786eb3882103a6b2679342 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 28 Apr 2020 18:44:37 -0400 Subject: [PATCH 047/909] switch StatNameVec from std;:vector to absl::InlinedVector, for a modest speed-up. (#10954) Switch StatNameVec from std::vector to absl::InlinedVector, which has a very modest speedup; about 10% speed improvement on symbol_table_speed_test on BM_JoinStatNames. Also benchmarks BM_JoinElements, which has even more modest gains. Signed-off-by: Joshua Marantz --- include/envoy/stats/BUILD | 2 ++ include/envoy/stats/symbol_table.h | 3 +- source/common/stats/fake_symbol_table_impl.h | 2 +- source/common/stats/stat_merger.cc | 2 +- source/common/stats/tag_utility.cc | 4 +-- source/common/stats/thread_local_store.cc | 2 +- test/common/stats/BUILD | 8 ++++- test/common/stats/make_elements_helper.cc | 15 ++++++++ test/common/stats/make_elements_helper.h | 15 ++++++++ test/common/stats/stat_merger_fuzz_test.cc | 2 +- test/common/stats/stat_merger_test.cc | 4 +-- test/common/stats/symbol_table_speed_test.cc | 36 ++++++++++++++++++++ test/mocks/stats/mocks.h | 2 +- 13 files changed, 86 insertions(+), 11 deletions(-) create mode 100644 test/common/stats/make_elements_helper.cc create mode 100644 test/common/stats/make_elements_helper.h diff --git a/include/envoy/stats/BUILD b/include/envoy/stats/BUILD index fcba981cd242..4a3a6948aad5 100644 --- a/include/envoy/stats/BUILD +++ b/include/envoy/stats/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( "tag_extractor.h", "tag_producer.h", ], + external_deps = ["abseil_inlined_vector"], deps = [ ":refcount_ptr_interface", ":symbol_table_interface", @@ -50,6 +51,7 @@ envoy_cc_library( envoy_cc_library( name = "symbol_table_interface", hdrs = ["symbol_table.h"], + external_deps = ["abseil_inlined_vector"], deps = [ "//source/common/common:hash_lib", ], diff --git a/include/envoy/stats/symbol_table.h b/include/envoy/stats/symbol_table.h index 3463e5c7688c..b84d340f79d1 100644 --- a/include/envoy/stats/symbol_table.h +++ b/include/envoy/stats/symbol_table.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -20,7 +21,7 @@ namespace Stats { * declaration for StatName is in source/common/stats/symbol_table_impl.h */ class StatName; -using StatNameVec = std::vector; +using StatNameVec = absl::InlinedVector; class StatNameList; class StatNameSet; diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h index b9639ef44f4a..9e4c5422f7a5 100644 --- a/source/common/stats/fake_symbol_table_impl.h +++ b/source/common/stats/fake_symbol_table_impl.h @@ -95,7 +95,7 @@ class FakeSymbolTableImpl : public SymbolTable { void incRefCount(const StatName&) override {} StoragePtr encode(absl::string_view name) override { return encodeHelper(name); } StoragePtr makeDynamicStorage(absl::string_view name) override { return encodeHelper(name); } - SymbolTable::StoragePtr join(const std::vector& names) const override { + SymbolTable::StoragePtr join(const StatNameVec& names) const override { std::vector strings; for (StatName name : names) { if (!name.empty()) { diff --git a/source/common/stats/stat_merger.cc b/source/common/stats/stat_merger.cc index b32ff6d7f332..e8e2c8b7b55b 100644 --- a/source/common/stats/stat_merger.cc +++ b/source/common/stats/stat_merger.cc @@ -20,7 +20,7 @@ StatName StatMerger::DynamicContext::makeDynamicStatName(const std::string& name // Name has embedded dynamic segments; we'll need to join together the // static/dynamic StatName segments. - std::vector segments; + StatNameVec segments; uint32_t segment_index = 0; std::vector dynamic_segments; diff --git a/source/common/stats/tag_utility.cc b/source/common/stats/tag_utility.cc index 6875f046d72c..7710277fd5ba 100644 --- a/source/common/stats/tag_utility.cc +++ b/source/common/stats/tag_utility.cc @@ -37,7 +37,7 @@ TagStatNameJoiner::TagStatNameJoiner(StatName stat_name, SymbolTable::StoragePtr TagStatNameJoiner::joinNameAndTags(StatName name, const StatNameTagVector& tags, SymbolTable& symbol_table) { - std::vector stat_names; + StatNameVec stat_names; stat_names.reserve(1 + 2 * tags.size()); stat_names.emplace_back(name); @@ -50,4 +50,4 @@ SymbolTable::StoragePtr TagStatNameJoiner::joinNameAndTags(StatName name, } } // namespace TagUtility } // namespace Stats -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 5a9a47f912fa..e526537d453a 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -57,7 +57,7 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { template void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& list) { - std::vector remove_list; + StatNameVec remove_list; for (auto& stat : map) { if (rejects(stat.first)) { remove_list.push_back(stat.first); diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 5e38a6268836..72e04a9c0815 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -165,7 +165,11 @@ envoy_cc_fuzz_test( envoy_cc_test_binary( name = "symbol_table_speed_test", - srcs = ["symbol_table_speed_test.cc"], + srcs = [ + "make_elements_helper.cc", + "make_elements_helper.h", + "symbol_table_speed_test.cc", + ], external_deps = [ "abseil_strings", "benchmark", @@ -173,7 +177,9 @@ envoy_cc_test_binary( deps = [ ":stat_test_utility_lib", "//source/common/memory:stats_lib", + "//source/common/stats:isolated_store_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:logging_lib", "//test/test_common:utility_lib", diff --git a/test/common/stats/make_elements_helper.cc b/test/common/stats/make_elements_helper.cc new file mode 100644 index 000000000000..d7d453824804 --- /dev/null +++ b/test/common/stats/make_elements_helper.cc @@ -0,0 +1,15 @@ +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +ElementVec makeElements(Element a, Element b, Element c, Element d, Element e) { + return ElementVec{a, b, c, d, e}; +} + +StatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e) { + return StatNameVec{a, b, c, d, e}; +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/make_elements_helper.h b/test/common/stats/make_elements_helper.h new file mode 100644 index 000000000000..218fa6f7aec6 --- /dev/null +++ b/test/common/stats/make_elements_helper.h @@ -0,0 +1,15 @@ +#pragma once + +#include "common/stats/utility.h" + +namespace Envoy { +namespace Stats { + +// These two trivial functions are broken out into a separate compilation unit +// to make sure the optimizer cannot hoist vector-creation out of the loop. They +// simply create vectors based on their 5 inputs. +ElementVec makeElements(Element a, Element b, Element c, Element d, Element e); +StatNameVec makeStatNames(StatName a, StatName b, StatName c, StatName d, StatName e); + +} // namespace Stats +} // namespace Envoy diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index 7fdcb55033bd..de2ebf6a9a4b 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -15,7 +15,7 @@ namespace Fuzz { void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { StatNameDynamicPool dynamic_pool(symbol_table); StatNamePool symbolic_pool(symbol_table); - std::vector stat_names; + StatNameVec stat_names; // This local string is write-only; it's used to help when debugging // a crash. If a crash is found, you can print the unit_test_encoding diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index bb47651bcdc9..b348e1f97389 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -34,7 +34,7 @@ class StatMergerTest : public testing::Test { // Encode the input name into a joined StatName, using "D:" to indicate // a dynamic component. - std::vector components; + StatNameVec components; StatNamePool symbolic_pool(symbol_table); StatNameDynamicPool dynamic_pool(symbol_table); @@ -233,7 +233,7 @@ class StatMergerDynamicTest : public testing::Test { uint32_t dynamicEncodeDecodeTest(absl::string_view input_descriptor) { // Encode the input name into a joined StatName, using "D:" to indicate // a dynamic component. - std::vector components; + StatNameVec components; StatNamePool symbolic_pool(*symbol_table_); StatNameDynamicPool dynamic_pool(*symbol_table_); diff --git a/test/common/stats/symbol_table_speed_test.cc b/test/common/stats/symbol_table_speed_test.cc index 1ef4d3f63684..a32f0d8c05b1 100644 --- a/test/common/stats/symbol_table_speed_test.cc +++ b/test/common/stats/symbol_table_speed_test.cc @@ -5,13 +5,17 @@ #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/stats/isolated_store_impl.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" +#include "test/common/stats/make_elements_helper.h" #include "test/test_common/utility.h" #include "absl/synchronization/blocking_counter.h" #include "benchmark/benchmark.h" +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_CreateRace(benchmark::State& state) { Envoy::Thread::ThreadFactory& thread_factory = Envoy::Thread::threadFactoryForTest(); @@ -54,6 +58,38 @@ static void BM_CreateRace(benchmark::State& state) { } BENCHMARK(BM_CreateRace); +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_JoinStatNames(benchmark::State& state) { + Envoy::Stats::SymbolTableImpl symbol_table; + Envoy::Stats::IsolatedStoreImpl store(symbol_table); + Envoy::Stats::StatNamePool pool(symbol_table); + Envoy::Stats::StatName a = pool.add("a"); + Envoy::Stats::StatName b = pool.add("b"); + Envoy::Stats::StatName c = pool.add("c"); + Envoy::Stats::StatName d = pool.add("d"); + Envoy::Stats::StatName e = pool.add("e"); + for (auto _ : state) { + Envoy::Stats::Utility::counterFromStatNames(store, Envoy::Stats::makeStatNames(a, b, c, d, e)); + } +} +BENCHMARK(BM_JoinStatNames); + +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_JoinElements(benchmark::State& state) { + Envoy::Stats::SymbolTableImpl symbol_table; + Envoy::Stats::IsolatedStoreImpl store(symbol_table); + Envoy::Stats::StatNamePool pool(symbol_table); + Envoy::Stats::StatName a = pool.add("a"); + Envoy::Stats::StatName b = pool.add("b"); + Envoy::Stats::StatName c = pool.add("c"); + Envoy::Stats::StatName e = pool.add("e"); + for (auto _ : state) { + Envoy::Stats::Utility::counterFromElements( + store, Envoy::Stats::makeElements(a, b, c, Envoy::Stats::DynamicName("d"), e)); + } +} +BENCHMARK(BM_JoinElements); + int main(int argc, char** argv) { Envoy::Thread::MutexBasicLockable lock; Envoy::Logger::Context logger_context(spdlog::level::warn, diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 17491916545f..1ee96aaf8a9d 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -137,7 +137,7 @@ template class MockMetric : public BaseClass { private: TagVector tags_; - std::vector tag_names_and_values_; + StatNameVec tag_names_and_values_; std::string tag_extracted_name_; StatNamePool tag_pool_; std::unique_ptr tag_extracted_stat_name_; From 7a643a49d97dbc12132eb53e2bc2afcd23658cf2 Mon Sep 17 00:00:00 2001 From: cmiller-sq <64046472+cmiller-sq@users.noreply.github.com> Date: Tue, 28 Apr 2020 18:45:20 -0400 Subject: [PATCH 048/909] docs: missing quote in lua example (#10947) Risk Level: Low Testing: n/a Docs Changes: fix typo Release Notes: n/a Signed-off-by: Chad MILLER --- docs/root/configuration/http/http_filters/lua_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 8868a39d15b7..4be29a1ffe52 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -522,7 +522,7 @@ its keys can only be *string* or *numeric*. function envoy_on_request(request_handle) local headers = request_handle:headers() request_handle:streamInfo():dynamicMetadata():set("envoy.filters.http.lua", "request.info", { - auth: headers:get("authorization), + auth: headers:get("authorization"), token: headers:get("x-request-token"), }) end From 74c8054e84b37e8c315835cc5c2cf39a80631f26 Mon Sep 17 00:00:00 2001 From: cmiller-sq <64046472+cmiller-sq@users.noreply.github.com> Date: Tue, 28 Apr 2020 18:46:18 -0400 Subject: [PATCH 049/909] docs: lua examples use local variables (#10949) Emphasize that global variables aren't available in coroutines by not creating any in examples. Risk Level: Low Testing: n/a Docs Changes: add local modifier Release Notes: n/a Signed-off-by: Chad MILLER --- .../http/http_filters/lua_filter.rst | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 4be29a1ffe52..303b764693e3 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -167,7 +167,7 @@ headers() .. code-block:: lua - headers = handle:headers() + local headers = handle:headers() Returns the stream's headers. The headers can be modified as long as they have not been sent to the next filter in the header chain. For example, they can be modified after an *httpCall()* or @@ -181,7 +181,7 @@ body() .. code-block:: lua - body = handle:body() + local body = handle:body() Returns the stream's body. This call will cause Envoy to yield the script until the entire body has been buffered. Note that all buffering must adhere to the flow control policies in place. @@ -194,7 +194,7 @@ bodyChunks() .. code-block:: lua - iterator = handle:bodyChunks() + local iterator = handle:bodyChunks() Returns an iterator that can be used to iterate through all received body chunks as they arrive. Envoy will yield the script in between chunks, but *will not buffer* them. This can be used by @@ -213,7 +213,7 @@ trailers() .. code-block:: lua - trailers = handle:trailers() + local trailers = handle:trailers() Returns the stream's trailers. May return nil if there are no trailers. The trailers may be modified before they are sent to the next filter. @@ -239,7 +239,7 @@ httpCall() .. code-block:: lua - headers, body = handle:httpCall(cluster, headers, body, timeout, asynchronous) + local headers, body = handle:httpCall(cluster, headers, body, timeout, asynchronous) Makes an HTTP call to an upstream host. *cluster* is a string which maps to a configured cluster manager cluster. *headers* is a table of key/value pairs to send (the value can be a string or table of strings). Note that @@ -283,7 +283,7 @@ metadata() .. code-block:: lua - metadata = handle:metadata() + local metadata = handle:metadata() Returns the current route entry metadata. Note that the metadata should be specified under the filter name i.e. *envoy.filters.http.lua*. Below is an example of a *metadata* in a @@ -306,7 +306,7 @@ streamInfo() .. code-block:: lua - streamInfo = handle:streamInfo() + local streamInfo = handle:streamInfo() Returns :repo:`information ` related to the current request. @@ -317,7 +317,7 @@ connection() .. code-block:: lua - connection = handle:connection() + local connection = handle:connection() Returns the current request's underlying :repo:`connection `. @@ -328,7 +328,7 @@ importPublicKey() .. code-block:: lua - pubkey = handle:importPublicKey(keyder, keyderLength) + local pubkey = handle:importPublicKey(keyder, keyderLength) Returns public key which is used by :ref:`verifySignature ` to verify digital signature. @@ -339,7 +339,7 @@ verifySignature() .. code-block:: lua - ok, error = verifySignature(hashFunction, pubkey, signature, signatureLength, data, dataLength) + local ok, error = verifySignature(hashFunction, pubkey, signature, signatureLength, data, dataLength) Verify signature using provided parameters. *hashFunction* is the variable for hash function which be used for verifying signature. *SHA1*, *SHA224*, *SHA256*, *SHA384* and *SHA512* are supported. @@ -420,7 +420,7 @@ length() .. code-block:: lua - size = buffer:length() + local size = buffer:length() Gets the size of the buffer in bytes. Returns an integer. From f8fe08bc2be45692e2c7064e5fd75cdebd53dc3a Mon Sep 17 00:00:00 2001 From: cmiller-sq <64046472+cmiller-sq@users.noreply.github.com> Date: Wed, 29 Apr 2020 12:52:45 -0400 Subject: [PATCH 050/909] docs: describe the asynchronisity of Lua API calls more naturally (#10948) Originally, the doc used the phrase "yield the script" many times. A reader might interpret that as the normal definition of yield, being to produce an object, or to give way under pressure. But it does not produce the script. Instead of using the transitive verb yield in an intransitive sense, just describe what it's doing in a less jargony way. Risk Level: Low Testing: n/a Docs Changes: clarify docs Release Notes: n/a Signed-off-by: Chad MILLER --- .../http/http_filters/lua_filter.rst | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 303b764693e3..c55bfd5b4519 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -35,7 +35,7 @@ The design of the filter and Lua support at a high level is as follows: * All scripts are run as coroutines. This means that they are written in a synchronous style even though they may perform complex asynchronous tasks. This makes the scripts substantially easier to write. All network/async processing is performed by Envoy via a set of APIs. Envoy will - yield the script as appropriate and resume it when async tasks are complete. + suspend execution of the script as appropriate and resume it when async tasks are complete. * **Do not perform blocking operations from scripts.** It is critical for performance that Envoy APIs are used for all IO. @@ -151,8 +151,9 @@ script defines: end A script can define either or both of these functions. During the request path, Envoy will -run *envoy_on_request* as a coroutine, passing an API handle. During the response path, Envoy will -run *envoy_on_response* as a coroutine, passing an API handle. +run *envoy_on_request* as a coroutine, passing a handle to the request API. During the +response path, Envoy will run *envoy_on_response* as a coroutine, passing handle to the +response API. .. attention:: @@ -183,9 +184,10 @@ body() local body = handle:body() -Returns the stream's body. This call will cause Envoy to yield the script until the entire body -has been buffered. Note that all buffering must adhere to the flow control policies in place. -Envoy will not buffer more data than is allowed by the connection manager. +Returns the stream's body. This call will cause Envoy to suspend execution of the script until +the entire body has been received in a buffer. Note that all buffering must adhere to the +flow-control policies in place. Envoy will not buffer more data than is allowed by the connection +manager. Returns a :ref:`buffer object `. @@ -197,8 +199,8 @@ bodyChunks() local iterator = handle:bodyChunks() Returns an iterator that can be used to iterate through all received body chunks as they arrive. -Envoy will yield the script in between chunks, but *will not buffer* them. This can be used by -a script to inspect data as it is streaming by. +Envoy will suspend executing the script in between chunks, but *will not buffer* them. This can be +used by a script to inspect data as it is streaming by. .. code-block:: lua @@ -247,7 +249,7 @@ the *:method*, *:path*, and *:authority* headers must be set. *body* is an optio data to send. *timeout* is an integer that specifies the call timeout in milliseconds. *asynchronous* is a boolean flag. If asynchronous is set to true, Envoy will make the HTTP request and continue, -regardless of response success or failure. If this is set to false, or not set, Envoy will yield the script +regardless of response success or failure. If this is set to false, or not set, Envoy will suspend executing the script until the call completes or has an error. Returns *headers* which is a table of response headers. Returns *body* which is the string response From c2a63735145ef1ce056ca2a50927e3e6682177eb Mon Sep 17 00:00:00 2001 From: Koki Tomoshige <36136133+tomocy@users.noreply.github.com> Date: Thu, 30 Apr 2020 02:12:43 +0900 Subject: [PATCH 051/909] docs: fix typo in draining doc (#10993) Risk Level: Low Docs Changes: fix typo Signed-off-by: tomocy --- docs/root/intro/arch_overview/operations/draining.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/intro/arch_overview/operations/draining.rst b/docs/root/intro/arch_overview/operations/draining.rst index 2c9045a85395..eeb203a21935 100644 --- a/docs/root/intro/arch_overview/operations/draining.rst +++ b/docs/root/intro/arch_overview/operations/draining.rst @@ -35,5 +35,5 @@ level. Currently the only filters that support graceful draining are :ref:`Mongo `. Listeners can also be stopped via :ref:`drain_listeners `. In this case, -they are directly stopped (with out going through the actual draining process) on worker threads, +they are directly stopped (without going through the actual draining process) on worker threads, so that they will not accept any new requests. From 77ad73c26584c218842cd4df3a38ee43d846d2a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Wed, 29 Apr 2020 17:08:16 -0400 Subject: [PATCH 052/909] header-to-metadata: properly lookup most specific config (#10992) Use Http::Utility::resolveMostSpecificPerFilterConfig() to fetch the correct config (route entry, route or vhost level) instead of doing it manually (and wrong). Signed-off-by: Raul Gutierrez Segales --- .../filters/http/header_to_metadata/BUILD | 1 + .../header_to_metadata_filter.cc | 16 +++------------- .../header_to_metadata_filter.h | 1 - 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index 1c9cbed21a84..d22a182a2dfd 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -19,6 +19,7 @@ envoy_cc_library( deps = [ "//include/envoy/server:filter_config_interface", "//source/common/common:base64_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 3a5d2dc6725f..92f7728114d8 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -4,6 +4,7 @@ #include "common/common/base64.h" #include "common/config/well_known_names.h" +#include "common/http/utility.h" #include "common/protobuf/protobuf.h" #include "extensions/filters/http/well_known_names.h" @@ -206,18 +207,6 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, } } -const Config* HeaderToMetadataFilter::getRouteConfig() const { - if (!decoder_callbacks_->route() || !decoder_callbacks_->route()->routeEntry()) { - return nullptr; - } - - const auto* entry = decoder_callbacks_->route()->routeEntry(); - const auto* per_filter_config = - entry->virtualHost().perFilterConfig(HttpFilterNames::get().HeaderToMetadata); - - return dynamic_cast(per_filter_config); -} - // TODO(rgs1): this belongs in one of the filter interfaces, see issue #10164. const Config* HeaderToMetadataFilter::getConfig() const { // Cached config pointer. @@ -225,7 +214,8 @@ const Config* HeaderToMetadataFilter::getConfig() const { return effective_config_; } - effective_config_ = getRouteConfig(); + effective_config_ = Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().HeaderToMetadata, decoder_callbacks_->route()); if (effective_config_) { return effective_config_; } diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h index 6c02dfb75b07..29614e6704d8 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h @@ -129,7 +129,6 @@ class HeaderToMetadataFilter : public Http::StreamFilter, ValueEncode) const; const std::string& decideNamespace(const std::string& nspace) const; const Config* getConfig() const; - const Config* getRouteConfig() const; }; } // namespace HeaderToMetadataFilter From e11b310664283246c7bbc7ac0fa6a82cfe5ca8d6 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 29 Apr 2020 17:09:12 -0400 Subject: [PATCH 053/909] test: deflaking a QUIC test (#10981) Signed-off-by: Alyssa Wilk --- test/integration/http_integration.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index f485f3083c64..a736718e5df5 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1203,10 +1203,14 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); // Validate that port is closed and can be bound by other sockets. - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - http_port), - nullptr, true)); + // This does not work for HTTP/3 because the port is not closed until the listener is completely + // destroyed. TODO(danzh) Match TCP behavior as much as possible. + if (downstreamProtocol() != Http::CodecClient::Type::HTTP3) { + EXPECT_NO_THROW(Network::TcpListenSocket( + Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), + http_port), + nullptr, true)); + } } std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name) { From 97d3966737627cb3d4131c959a838acd5d31316b Mon Sep 17 00:00:00 2001 From: pyrl <476839241@qq.com> Date: Thu, 30 Apr 2020 05:10:18 +0800 Subject: [PATCH 054/909] [thrift] try to fix bug.large amount of list or map values ,when socket can not read all and frame.remain still be decreased (#10893) When client build large amount list or map values ,and socket can not read them all at once, and the state machine just decode half of list values ,if frame.remaining be decreased ,it will make list could not get all values(just got length - 1),and it will make state machine fall into disorder. I use index rather then reference because the stack_ vector may expand and make the reference(got from stack_.back()) become invalid. Signed-off-by: Guang Yang --- .../filters/network/thrift_proxy/decoder.cc | 33 ++++--- .../network/thrift_proxy/decoder_test.cc | 92 +++++++++++++++++++ 2 files changed, 113 insertions(+), 12 deletions(-) diff --git a/source/extensions/filters/network/thrift_proxy/decoder.cc b/source/extensions/filters/network/thrift_proxy/decoder.cc index 487e026232f6..c02a4b1dc062 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.cc +++ b/source/extensions/filters/network/thrift_proxy/decoder.cc @@ -111,13 +111,16 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::listBegin(Buffer::Instan // ListValue -> ListEnd DecoderStateMachine::DecoderStatus DecoderStateMachine::listValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - if (frame.remaining_ == 0) { + const uint32_t index = stack_.size() - 1; + if (stack_[index].remaining_ == 0) { return {popReturnState(), FilterStatus::Continue}; } - frame.remaining_--; + DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::ListValue); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.elem_type_, ProtocolState::ListValue); + return status; } // ListEnd -> stack's return state @@ -159,11 +162,14 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::mapKey(Buffer::Instance& // MapValue -> MapKey DecoderStateMachine::DecoderStatus DecoderStateMachine::mapValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - ASSERT(frame.remaining_ != 0); - frame.remaining_--; + const uint32_t index = stack_.size() - 1; + ASSERT(stack_[index].remaining_ != 0); + DecoderStatus status = handleValue(buffer, stack_[index].value_type_, ProtocolState::MapKey); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.value_type_, ProtocolState::MapKey); + return status; } // MapEnd -> stack's return state @@ -193,13 +199,16 @@ DecoderStateMachine::DecoderStatus DecoderStateMachine::setBegin(Buffer::Instanc // SetValue -> SetEnd DecoderStateMachine::DecoderStatus DecoderStateMachine::setValue(Buffer::Instance& buffer) { ASSERT(!stack_.empty()); - Frame& frame = stack_.back(); - if (frame.remaining_ == 0) { + const uint32_t index = stack_.size() - 1; + if (stack_[index].remaining_ == 0) { return {popReturnState(), FilterStatus::Continue}; } - frame.remaining_--; + DecoderStatus status = handleValue(buffer, stack_[index].elem_type_, ProtocolState::SetValue); + if (status.next_state_ != ProtocolState::WaitForData) { + stack_[index].remaining_--; + } - return handleValue(buffer, frame.elem_type_, ProtocolState::SetValue); + return status; } // SetEnd -> stack's return state diff --git a/test/extensions/filters/network/thrift_proxy/decoder_test.cc b/test/extensions/filters/network/thrift_proxy/decoder_test.cc index 1dc42a1a116b..4699f3d94a91 100644 --- a/test/extensions/filters/network/thrift_proxy/decoder_test.cc +++ b/test/extensions/filters/network/thrift_proxy/decoder_test.cc @@ -336,6 +336,28 @@ TEST_P(DecoderStateMachineValueTest, ListValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteListValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readListBegin(Ref(buffer), _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::ListBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::ListValue); + + expectValue(proto_, handler_, field_type); + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::ListEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleListValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; @@ -448,6 +470,54 @@ TEST_P(DecoderStateMachineValueTest, MapValueValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteMapKey) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(FieldType::I32), + SetArgReferee<3>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); // key + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::MapBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapKey); + + expectValue(proto_, handler_, field_type); // key + expectValue(proto_, handler_, FieldType::I32); // value + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); +} + +TEST_P(DecoderStateMachineValueTest, IncompleteMapValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readMapBegin(Ref(buffer), _, _, _)) + .WillOnce(DoAll(SetArgReferee<1>(FieldType::I32), SetArgReferee<2>(field_type), + SetArgReferee<3>(1), Return(true))); + + expectValue(proto_, handler_, FieldType::I32); // key + expectValue(proto_, handler_, field_type, false); // value + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::MapBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapValue); + + expectValue(proto_, handler_, field_type); // value + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::MapEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleMapKeyValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; @@ -520,6 +590,28 @@ TEST_P(DecoderStateMachineValueTest, SetValue) { EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd); } +TEST_P(DecoderStateMachineValueTest, IncompleteSetValue) { + FieldType field_type = GetParam(); + Buffer::OwnedImpl buffer; + InSequence dummy; + + EXPECT_CALL(proto_, readSetBegin(Ref(buffer), _, _)) + .WillOnce(DoAll(SetArgReferee<1>(field_type), SetArgReferee<2>(1), Return(true))); + + expectValue(proto_, handler_, field_type, false); + + DecoderStateMachine dsm(proto_, metadata_, handler_); + + dsm.setCurrentState(ProtocolState::SetBegin); + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::SetValue); + + expectValue(proto_, handler_, field_type); + + EXPECT_EQ(dsm.run(buffer), ProtocolState::WaitForData); + EXPECT_EQ(dsm.currentState(), ProtocolState::SetEnd); +} + TEST_P(DecoderStateMachineValueTest, MultipleSetValues) { FieldType field_type = GetParam(); Buffer::OwnedImpl buffer; From decfa39dd3c881970435c144c72b2b64d1c8a4b2 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 29 Apr 2020 17:10:57 -0400 Subject: [PATCH 055/909] grpc: bounding access logs based on configured buffer limits. (#10882) If the underlying HTTP/2 connection is backed up, drop gRPC logs and increment a stat. Signed-off-by: Alyssa Wilk --- .../observability/access_log/access_log.rst | 9 ++ .../observability/access_log/overview.rst | 6 ++ .../observability/access_log/stats.rst | 35 ++++++++ .../{access_log.rst => access_log/usage.rst} | 0 .../observability/observability.rst | 2 +- .../observability/statistics.rst | 17 ---- docs/root/version_history/current.rst | 1 + docs/root/version_history/v1.12.0.rst | 2 +- include/envoy/grpc/async_client.h | 6 ++ include/envoy/http/async_client.h | 6 ++ source/common/grpc/async_client_impl.h | 3 + .../common/grpc/google_async_client_impl.cc | 10 +-- source/common/grpc/google_async_client_impl.h | 4 +- source/common/grpc/typed_async_client.h | 3 + source/common/http/async_client_impl.h | 11 ++- .../grpc/grpc_access_log_impl.cc | 39 +++++++-- .../grpc/grpc_access_log_impl.h | 27 ++++-- .../access_loggers/grpc/http_config.cc | 6 +- .../grpc/http_grpc_access_log_impl.cc | 7 +- .../grpc/http_grpc_access_log_impl.h | 4 +- .../access_loggers/grpc/tcp_config.cc | 3 +- .../grpc/tcp_grpc_access_log_impl.cc | 7 +- .../grpc/tcp_grpc_access_log_impl.h | 4 +- test/common/http/async_client_impl_test.cc | 6 ++ .../grpc/grpc_access_log_impl_test.cc | 82 +++++++++++++++++-- .../grpc/http_grpc_access_log_impl_test.cc | 9 +- test/mocks/grpc/mocks.h | 1 + test/mocks/http/mocks.h | 1 + 28 files changed, 243 insertions(+), 68 deletions(-) create mode 100644 docs/root/configuration/observability/access_log/access_log.rst create mode 100644 docs/root/configuration/observability/access_log/overview.rst create mode 100644 docs/root/configuration/observability/access_log/stats.rst rename docs/root/configuration/observability/{access_log.rst => access_log/usage.rst} (100%) diff --git a/docs/root/configuration/observability/access_log/access_log.rst b/docs/root/configuration/observability/access_log/access_log.rst new file mode 100644 index 000000000000..f1d24152257a --- /dev/null +++ b/docs/root/configuration/observability/access_log/access_log.rst @@ -0,0 +1,9 @@ +Access Logs +=========== + +.. toctree:: + :maxdepth: 2 + + overview + stats + usage diff --git a/docs/root/configuration/observability/access_log/overview.rst b/docs/root/configuration/observability/access_log/overview.rst new file mode 100644 index 000000000000..33b29018b912 --- /dev/null +++ b/docs/root/configuration/observability/access_log/overview.rst @@ -0,0 +1,6 @@ +Overview +======== + +* Access logging :ref:`architecture overview ` +* :ref:`Configuration overview ` +* :ref:`v2 API reference ` diff --git a/docs/root/configuration/observability/access_log/stats.rst b/docs/root/configuration/observability/access_log/stats.rst new file mode 100644 index 000000000000..9ea5d26ccec0 --- /dev/null +++ b/docs/root/configuration/observability/access_log/stats.rst @@ -0,0 +1,35 @@ +.. _config_access_log_stats: + +Statistics +========== + +Currently only the gRPC and file based access logs have statistics. + +gRPC access log statistics +-------------------------- + +The gRPC access log has statistics rooted at *access_logs.grpc_access_log.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + logs_written, Counter, Total log entries sent to the logger which were not dropped. This does not imply the logs have been flushed to the gRPC endpoint yet. + logs_dropped, Counter, Total log entries dropped due to network or HTTP/2 back up. + + +File access log statistics +-------------------------- + +The file access log has statistics rooted at the *filesystem.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer + write_completed, Counter, Total number of times a file was successfully written + write_failed, Counter, Total number of times an error occurred during a file write operation + flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout + reopen_failed, Counter, Total number of times a file was failed to be opened + write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/configuration/observability/access_log.rst b/docs/root/configuration/observability/access_log/usage.rst similarity index 100% rename from docs/root/configuration/observability/access_log.rst rename to docs/root/configuration/observability/access_log/usage.rst diff --git a/docs/root/configuration/observability/observability.rst b/docs/root/configuration/observability/observability.rst index ae77507e99f3..1e314881d6ba 100644 --- a/docs/root/configuration/observability/observability.rst +++ b/docs/root/configuration/observability/observability.rst @@ -6,4 +6,4 @@ Observability statistics application_logging - access_log + access_log/access_log.rst diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index a3a3feab1a9c..9e99227518d3 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -32,20 +32,3 @@ Server related statistics are rooted at *server.* with following statistics: static_unknown_fields, Counter, Number of messages in static configuration with unknown fields dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields -.. _filesystem_stats: - -File system ------------ - -Statistics related to file system are emitted in the *filesystem.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer - write_completed, Counter, Total number of times a file was successfully written - write_failed, Counter, Total number of times an error occurred during a file write operation - flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout - reopen_failed, Counter, Total number of times a file was failed to be opened - write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index d4b992a99486..012f1eb07bbb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -5,6 +5,7 @@ Changes ------- * access loggers: added GRPC_STATUS operator on logging format. +* access loggers: applied existing buffer limits to the non-google gRPC access logs, as well as :ref:`stats ` for logged / dropped logs. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults diff --git a/docs/root/version_history/v1.12.0.rst b/docs/root/version_history/v1.12.0.rst index da2930e8e479..9bc7510639ec 100644 --- a/docs/root/version_history/v1.12.0.rst +++ b/docs/root/version_history/v1.12.0.rst @@ -8,7 +8,7 @@ Changes * access log: added :ref:`buffering ` and :ref:`periodical flushing ` support to gRPC access logger. Defaults to 16KB buffer and flushing every 1 second. * access log: added DOWNSTREAM_DIRECT_REMOTE_ADDRESS and DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT :ref:`access log formatters ` and gRPC access logger. * access log: gRPC Access Log Service (ALS) support added for :ref:`TCP access logs `. -* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. +* access log: reintroduced :ref:`filesystem ` stats and added the `write_failed` counter to track failed log writes. * admin: added ability to configure listener :ref:`socket options `. * admin: added config dump support for Secret Discovery Service :ref:`SecretConfigDump `. * admin: added support for :ref:`draining ` listeners via admin interface. diff --git a/include/envoy/grpc/async_client.h b/include/envoy/grpc/async_client.h index c7ab0d219b89..b2005723fab2 100644 --- a/include/envoy/grpc/async_client.h +++ b/include/envoy/grpc/async_client.h @@ -58,6 +58,12 @@ class RawAsyncStream { * stream object and no further callbacks will be invoked. */ virtual void resetStream() PURE; + + /*** + * @returns if the stream has enough buffered outbound data to be over the configured buffer + * limits + */ + virtual bool isAboveWriteBufferHighWatermark() const PURE; }; class RawAsyncRequestCallbacks { diff --git a/include/envoy/http/async_client.h b/include/envoy/http/async_client.h index 9e95df1cc2f7..65aa72dc8c74 100644 --- a/include/envoy/http/async_client.h +++ b/include/envoy/http/async_client.h @@ -150,6 +150,12 @@ class AsyncClient { * Reset the stream. */ virtual void reset() PURE; + + /*** + * @returns if the stream has enough buffered outbound data to be over the configured buffer + * limits + */ + virtual bool isAboveWriteBufferHighWatermark() const PURE; }; virtual ~AsyncClient() = default; diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index f27cf8436431..750183afd4f5 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -65,6 +65,9 @@ class AsyncStreamImpl : public RawAsyncStream, void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override; void closeStream() override; void resetStream() override; + bool isAboveWriteBufferHighWatermark() const override { + return stream_ && stream_->isAboveWriteBufferHighWatermark(); + } bool hasResetStream() const { return http_reset_; } diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index ea22f07c9451..9b23529c114c 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -109,7 +109,7 @@ AsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name std::unique_ptr grpc_stream{async_request}; grpc_stream->initialize(true); - if (grpc_stream->call_failed()) { + if (grpc_stream->callFailed()) { return nullptr; } @@ -125,7 +125,7 @@ RawAsyncStream* GoogleAsyncClientImpl::startRaw(absl::string_view service_full_n callbacks, options); grpc_stream->initialize(false); - if (grpc_stream->call_failed()) { + if (grpc_stream->callFailed()) { return nullptr; } @@ -412,16 +412,16 @@ GoogleAsyncRequestImpl::GoogleAsyncRequestImpl( void GoogleAsyncRequestImpl::initialize(bool buffer_body_for_retry) { GoogleAsyncStreamImpl::initialize(buffer_body_for_retry); - if (this->call_failed()) { + if (callFailed()) { return; } - this->sendMessageRaw(std::move(request_), true); + sendMessageRaw(std::move(request_), true); } void GoogleAsyncRequestImpl::cancel() { current_span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); current_span_->finishSpan(); - this->resetStream(); + resetStream(); } void GoogleAsyncRequestImpl::onCreateInitialMetadata(Http::RequestHeaderMap& metadata) { diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 19b2059420d6..3f4c48c3e911 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -219,9 +219,11 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override; void closeStream() override; void resetStream() override; + // The GoogleAsyncClientImpl doesn't do Envoy watermark based flow control. + bool isAboveWriteBufferHighWatermark() const override { return false; } protected: - bool call_failed() const { return call_failed_; } + bool callFailed() const { return call_failed_; } private: // Process queued events in completed_ops_ with handleOpCompletion() on diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 72907e42e611..39435f65c827 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -38,6 +38,9 @@ template class AsyncStream /* : public RawAsyncStream */ { } void closeStream() { stream_->closeStream(); } void resetStream() { stream_->resetStream(); } + bool isAboveWriteBufferHighWatermark() const { + return stream_->isAboveWriteBufferHighWatermark(); + } AsyncStream* operator->() { return this; } AsyncStream operator=(RawAsyncStream* stream) { stream_ = stream; diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 143fd8c29541..77f05f8c3bc6 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -54,9 +54,7 @@ class AsyncClientImpl final : public AsyncClient { // Http::AsyncClient Request* send(RequestMessagePtr&& request, Callbacks& callbacks, const AsyncClient::RequestOptions& options) override; - Stream* start(StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) override; - Event::Dispatcher& dispatcher() override { return dispatcher_; } private: @@ -94,6 +92,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, void sendData(Buffer::Instance& data, bool end_stream) override; void sendTrailers(RequestTrailerMap& trailers) override; void reset() override; + bool isAboveWriteBufferHighWatermark() const override { return high_watermark_calls_ > 0; } protected: bool remoteClosed() { return remote_closed_; } @@ -371,8 +370,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; void encodeMetadata(MetadataMapPtr&&) override {} - void onDecoderFilterAboveWriteBufferHighWatermark() override {} - void onDecoderFilterBelowWriteBufferLowWatermark() override {} + void onDecoderFilterAboveWriteBufferHighWatermark() override { ++high_watermark_calls_; } + void onDecoderFilterBelowWriteBufferLowWatermark() override { + ASSERT(high_watermark_calls_ != 0); + --high_watermark_calls_; + } void addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {} void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks&) override {} void setDecoderBufferLimit(uint32_t) override {} @@ -396,6 +398,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, Tracing::NullSpan active_span_; const Tracing::Config& tracing_config_; std::shared_ptr route_; + uint32_t high_watermark_calls_{}; bool local_closed_{}; bool remote_closed_{}; Buffer::InstancePtr buffered_body_; diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index 86a4a50bcbf8..6b980a358677 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -25,23 +25,43 @@ void GrpcAccessLoggerImpl::LocalStream::onRemoteClose(Grpc::Status::GrpcStatus, GrpcAccessLoggerImpl::GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, - uint64_t buffer_size_bytes, + uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info) - : client_(std::move(client)), log_name_(log_name), + const LocalInfo::LocalInfo& local_info, + Stats::Scope& scope) + : stats_({ALL_GRPC_ACCESS_LOGGER_STATS( + POOL_COUNTER_PREFIX(scope, "access_logs.grpc_access_log."))}), + client_(std::move(client)), log_name_(log_name), buffer_flush_interval_msec_(buffer_flush_interval_msec), flush_timer_(dispatcher.createTimer([this]() { flush(); flush_timer_->enableTimer(buffer_flush_interval_msec_); })), - buffer_size_bytes_(buffer_size_bytes), local_info_(local_info) { + max_buffer_size_bytes_(max_buffer_size_bytes), local_info_(local_info) { flush_timer_->enableTimer(buffer_flush_interval_msec_); } +bool GrpcAccessLoggerImpl::canLogMore() { + if (max_buffer_size_bytes_ == 0 || approximate_message_size_bytes_ < max_buffer_size_bytes_) { + stats_.logs_written_.inc(); + return true; + } + flush(); + if (approximate_message_size_bytes_ < max_buffer_size_bytes_) { + stats_.logs_written_.inc(); + return true; + } + stats_.logs_dropped_.inc(); + return false; +} + void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) { + if (!canLogMore()) { + return; + } approximate_message_size_bytes_ += entry.ByteSizeLong(); message_.mutable_http_logs()->mutable_log_entry()->Add(std::move(entry)); - if (approximate_message_size_bytes_ >= buffer_size_bytes_) { + if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) { flush(); } } @@ -49,7 +69,7 @@ void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::TCPAccessLogEntry&& entry) { approximate_message_size_bytes_ += entry.ByteSizeLong(); message_.mutable_tcp_logs()->mutable_log_entry()->Add(std::move(entry)); - if (approximate_message_size_bytes_ >= buffer_size_bytes_) { + if (approximate_message_size_bytes_ >= max_buffer_size_bytes_) { flush(); } } @@ -76,6 +96,9 @@ void GrpcAccessLoggerImpl::flush() { } if (stream_->stream_ != nullptr) { + if (stream_->stream_->isAboveWriteBufferHighWatermark()) { + return; + } stream_->stream_->sendMessage(message_, false); } else { // Clear out the stream data due to stream creation failure. @@ -99,7 +122,7 @@ GrpcAccessLoggerCacheImpl::GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& a GrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) { + GrpcAccessLoggerType logger_type, Stats::Scope& scope) { // TODO(euroelessar): Consider cleaning up loggers. auto& cache = tls_slot_->getTyped(); const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type); @@ -113,7 +136,7 @@ GrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger( factory->create(), config.log_name(), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, - local_info_); + local_info_, scope); cache.access_loggers_.emplace(cache_key, logger); return logger; } diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 4dd4b5948586..c913522cd967 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -21,7 +21,19 @@ namespace Extensions { namespace AccessLoggers { namespace GrpcCommon { -// TODO(mattklein123): Stats +/** + * All stats for the grpc access logger. @see stats_macros.h + */ +#define ALL_GRPC_ACCESS_LOGGER_STATS(COUNTER) \ + COUNTER(logs_written) \ + COUNTER(logs_dropped) + +/** + * Wrapper struct for the access log stats. @see stats_macros.h + */ +struct GrpcAccessLoggerStats { + ALL_GRPC_ACCESS_LOGGER_STATS(GENERATE_COUNTER_STRUCT) +}; /** * Interface for an access logger. The logger provides abstraction on top of gRPC stream, deals with @@ -63,7 +75,7 @@ class GrpcAccessLoggerCache { */ virtual GrpcAccessLoggerSharedPtr getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) PURE; + GrpcAccessLoggerType logger_type, Stats::Scope& scope) PURE; }; using GrpcAccessLoggerCacheSharedPtr = std::shared_ptr; @@ -72,8 +84,8 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { public: GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, - uint64_t buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info); + uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, + const LocalInfo::LocalInfo& local_info, Stats::Scope& scope); // Extensions::AccessLoggers::GrpcCommon::GrpcAccessLogger void log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) override; @@ -98,13 +110,16 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { void flush(); + bool canLogMore(); + + GrpcAccessLoggerStats stats_; Grpc::AsyncClient client_; const std::string log_name_; const std::chrono::milliseconds buffer_flush_interval_msec_; const Event::TimerPtr flush_timer_; - const uint64_t buffer_size_bytes_; + const uint64_t max_buffer_size_bytes_; uint64_t approximate_message_size_bytes_ = 0; envoy::service::accesslog::v3::StreamAccessLogsMessage message_; absl::optional stream_; @@ -119,7 +134,7 @@ class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessL GrpcAccessLoggerSharedPtr getOrCreateLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcAccessLoggerType logger_type) override; + GrpcAccessLoggerType logger_type, Stats::Scope& scope) override; private: /** diff --git a/source/extensions/access_loggers/grpc/http_config.cc b/source/extensions/access_loggers/grpc/http_config.cc index 6655c9f61515..830ba54b2d0e 100644 --- a/source/extensions/access_loggers/grpc/http_config.cc +++ b/source/extensions/access_loggers/grpc/http_config.cc @@ -30,9 +30,9 @@ HttpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& confi const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); - return std::make_shared( - std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); + return std::make_shared(std::move(filter), proto_config, context.threadLocal(), + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), + context.scope()); } ProtobufTypes::MessagePtr HttpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index 6f07a77a44ec..c08606f30c30 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -22,8 +22,9 @@ HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( HttpGrpcAccessLog::HttpGrpcAccessLog( AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) - : Common::ImplBase(std::move(filter)), config_(std::move(config)), + ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { for (const auto& header : config_.additional_request_headers_to_log()) { request_headers_to_log_.emplace_back(header); @@ -39,7 +40,7 @@ HttpGrpcAccessLog::HttpGrpcAccessLog( tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), GrpcCommon::GrpcAccessLoggerType::HTTP)); + config_.common_config(), GrpcCommon::GrpcAccessLoggerType::HTTP, scope_)); }); } diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index 7d15bbb7dbae..0c1a80180fa9 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -30,7 +30,8 @@ class HttpGrpcAccessLog : public Common::ImplBase { HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope); private: /** @@ -48,6 +49,7 @@ class HttpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; + Stats::Scope& scope_; const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; diff --git a/source/extensions/access_loggers/grpc/tcp_config.cc b/source/extensions/access_loggers/grpc/tcp_config.cc index 80d985dce2d6..268a4653a35b 100644 --- a/source/extensions/access_loggers/grpc/tcp_config.cc +++ b/source/extensions/access_loggers/grpc/tcp_config.cc @@ -31,7 +31,8 @@ TcpGrpcAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config config, context.messageValidationVisitor()); return std::make_shared(std::move(filter), proto_config, context.threadLocal(), - GrpcCommon::getGrpcAccessLoggerCacheSingleton(context)); + GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), + context.scope()); } ProtobufTypes::MessagePtr TcpGrpcAccessLogFactory::createEmptyConfigProto() { diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index 2ff4f524e447..a77d182b03b4 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -20,12 +20,13 @@ TcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLog TcpGrpcAccessLog::TcpGrpcAccessLog( AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache) - : Common::ImplBase(std::move(filter)), config_(std::move(config)), + ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), GrpcCommon::GrpcAccessLoggerType::TCP)); + config_.common_config(), GrpcCommon::GrpcAccessLoggerType::TCP, scope_)); }); } diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index 8f9369da7586..7a7260df7248 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -30,7 +30,8 @@ class TcpGrpcAccessLog : public Common::ImplBase { TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, - GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache); + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope); private: /** @@ -48,6 +49,7 @@ class TcpGrpcAccessLog : public Common::ImplBase { const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) override; + Stats::Scope& scope_; const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index fc46f8a5ef43..66f8e3f3e94d 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1316,7 +1316,13 @@ TEST_F(AsyncClientImplTest, WatermarkCallbacks) { Http::StreamDecoderFilterCallbacks* filter_callbacks = static_cast(stream); filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); + filter_callbacks->onDecoderFilterAboveWriteBufferHighWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); + filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark(); + EXPECT_TRUE(stream->isAboveWriteBufferHighWatermark()); filter_callbacks->onDecoderFilterBelowWriteBufferLowWatermark(); + EXPECT_FALSE(stream->isAboveWriteBufferHighWatermark()); EXPECT_CALL(stream_callbacks_, onReset()); } diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 1a5ac2d7f61e..7dfcbad143ad 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -21,6 +21,7 @@ using testing::_; using testing::InSequence; using testing::Invoke; using testing::NiceMock; +using testing::Return; namespace Envoy { namespace Extensions { @@ -39,9 +40,9 @@ class GrpcAccessLoggerImplTest : public testing::Test { void initLogger(std::chrono::milliseconds buffer_flush_interval_msec, size_t buffer_size_bytes) { timer_ = new Event::MockTimer(&dispatcher_); EXPECT_CALL(*timer_, enableTimer(buffer_flush_interval_msec, _)); - logger_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, - log_name_, buffer_flush_interval_msec, - buffer_size_bytes, dispatcher_, local_info_); + logger_ = std::make_unique( + Grpc::RawAsyncClientPtr{async_client_}, log_name_, buffer_flush_interval_msec, + buffer_size_bytes, dispatcher_, local_info_, stats_store_); } void expectStreamStart(MockAccessLogStream& stream, AccessLogCallbacks** callbacks_to_set) { @@ -57,6 +58,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { void expectStreamMessage(MockAccessLogStream& stream, const std::string& expected_message_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage expected_message; TestUtility::loadFromYaml(expected_message_yaml, expected_message); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); EXPECT_CALL(stream, sendMessageRaw_(_, false)) .WillOnce(Invoke([expected_message](Buffer::InstancePtr& request, bool) { envoy::service::accesslog::v3::StreamAccessLogsMessage message; @@ -66,6 +68,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { })); } + Stats::IsolatedStoreImpl stats_store_; std::string log_name_ = "test_log_name"; LocalInfo::MockLocalInfo local_info_; Event::MockTimer* timer_ = nullptr; @@ -100,6 +103,9 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { envoy::data::accesslog::v3::HTTPAccessLogEntry entry; entry.mutable_request()->set_path("/test/path1"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); expectStreamMessage(stream, R"EOF( http_logs: @@ -109,6 +115,9 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { )EOF"); entry.mutable_request()->set_path("/test/path2"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); // Verify that sending an empty response message doesn't do anything bad. callbacks->onReceiveMessage( @@ -133,6 +142,60 @@ TEST_F(GrpcAccessLoggerImplTest, BasicFlow) { )EOF"); entry.mutable_request()->set_path("/test/path3"); logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + EXPECT_EQ( + 3, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); +} + +TEST_F(GrpcAccessLoggerImplTest, WatermarksOverrun) { + InSequence s; + initLogger(FlushInterval, 1); + + // Start a stream for the first log. + MockAccessLogStream stream; + AccessLogCallbacks* callbacks; + expectStreamStart(stream, &callbacks); + EXPECT_CALL(local_info_, node()); + + // Fail to flush, so the log stays buffered up. + envoy::data::accesslog::v3::HTTPAccessLogEntry entry; + entry.mutable_request()->set_path("/test/path1"); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true)); + EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // Now canLogMore will fail, and the next log will be dropped. + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(true)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // Now allow the flush to happen. The stored log will get logged, and the next log will succeed. + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1); + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()).WillOnce(Return(false)); + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(1); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); } // Test that stream failure is handled correctly. @@ -270,6 +333,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { }; TEST_F(GrpcAccessLoggerCacheImplTest, Deduplication) { + Stats::IsolatedStoreImpl scope; InSequence s; envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig config; @@ -278,25 +342,25 @@ TEST_F(GrpcAccessLoggerCacheImplTest, Deduplication) { expectClientCreation(); GrpcAccessLoggerSharedPtr logger1 = - logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP); - EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope); + EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); // Do not deduplicate different types of logger expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::TCP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::TCP, scope)); // Changing log name leads to another logger. config.set_log_name("log-2"); expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); config.set_log_name("log-1"); - EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_EQ(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); // Changing cluster name leads to another logger. config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-2"); expectClientCreation(); - EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP)); + EXPECT_NE(logger1, logger_cache_->getOrCreateLogger(config, GrpcAccessLoggerType::HTTP, scope)); } } // namespace diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 13e0b1641cf0..c39fb8165545 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -45,7 +45,7 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcCommon::GrpcAccessLoggerType logger_type)); + GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); }; class HttpGrpcAccessLogTest : public testing::Test { @@ -55,17 +55,17 @@ class HttpGrpcAccessLogTest : public testing::Test { config_.mutable_common_config()->set_log_name("hello_log"); config_.mutable_common_config()->add_filter_state_objects_to_log("string_accessor"); config_.mutable_common_config()->add_filter_state_objects_to_log("serialized"); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - GrpcCommon::GrpcAccessLoggerType logger_type) { + GrpcCommon::GrpcAccessLoggerType logger_type, Stats::Scope&) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(GrpcCommon::GrpcAccessLoggerType::HTTP, logger_type); return logger_; }); access_log_ = std::make_unique(AccessLog::FilterPtr{filter_}, config_, tls_, - logger_cache_); + logger_cache_, scope_); } void expectLog(const std::string& expected_log_entry_yaml) { @@ -116,6 +116,7 @@ response: {{}} access_log_->log(&request_headers, nullptr, nullptr, stream_info); } + Stats::IsolatedStoreImpl scope_; AccessLog::MockFilter* filter_{new NiceMock()}; NiceMock tls_; envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index bbf8f6d1b60d..0a13f5a5fb08 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -36,6 +36,7 @@ class MockAsyncStream : public RawAsyncStream { MOCK_METHOD(void, sendMessageRaw_, (Buffer::InstancePtr & request, bool end_stream)); MOCK_METHOD(void, closeStream, ()); MOCK_METHOD(void, resetStream, ()); + MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const)); }; template diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 0edb60a75980..e9fb9e45e6d2 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -382,6 +382,7 @@ class MockAsyncClientStream : public AsyncClient::Stream { MOCK_METHOD(void, sendData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, sendTrailers, (RequestTrailerMap & trailers)); MOCK_METHOD(void, reset, ()); + MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const)); }; class MockFilterChainFactoryCallbacks : public Http::FilterChainFactoryCallbacks { From e5286114c3c30e09855908cc502dea48a0a3e9f5 Mon Sep 17 00:00:00 2001 From: Misha Efimov Date: Wed, 29 Apr 2020 17:12:09 -0400 Subject: [PATCH 056/909] Add TextReadout to Envoy::Stats::MetricSnapshot. (#10961) Add TextReadout to Envoy::Stats::MetricSnapshot. Signed-off-by: Misha Efimov --- include/envoy/stats/sink.h | 6 +++++- source/server/server.cc | 6 ++++++ source/server/server.h | 5 +++++ test/common/stats/stat_test_utility.h | 1 + test/mocks/stats/mocks.h | 3 ++- test/server/server_test.cc | 6 ++++++ 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/include/envoy/stats/sink.h b/include/envoy/stats/sink.h index f0ce08c1dd05..1303c9fd67b8 100644 --- a/include/envoy/stats/sink.h +++ b/include/envoy/stats/sink.h @@ -35,7 +35,11 @@ class MetricSnapshot { * @return a snapshot of all histograms. */ virtual const std::vector>& histograms() PURE; - // TODO(efimki): Add support of text readouts stats. + + /** + * @return a snapshot of all text readouts. + */ + virtual const std::vector>& textReadouts() PURE; }; /** diff --git a/source/server/server.cc b/source/server/server.cc index 1593af869995..b76a2676ac3d 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -162,6 +162,12 @@ MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store) { for (const auto& histogram : snapped_histograms_) { histograms_.push_back(*histogram); } + + snapped_text_readouts_ = store.textReadouts(); + text_readouts_.reserve(snapped_text_readouts_.size()); + for (const auto& text_readout : snapped_text_readouts_) { + text_readouts_.push_back(*text_readout); + } } void InstanceUtil::flushMetricsToSinks(const std::list& sinks, diff --git a/source/server/server.h b/source/server/server.h index c5016887700a..107de3eb30e2 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -381,6 +381,9 @@ class MetricSnapshotImpl : public Stats::MetricSnapshot { const std::vector>& histograms() override { return histograms_; } + const std::vector>& textReadouts() override { + return text_readouts_; + } private: std::vector snapped_counters_; @@ -389,6 +392,8 @@ class MetricSnapshotImpl : public Stats::MetricSnapshot { std::vector> gauges_; std::vector snapped_histograms_; std::vector> histograms_; + std::vector snapped_text_readouts_; + std::vector> text_readouts_; }; } // namespace Server diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index b0ccd31763b5..6b46a0f05aea 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -102,6 +102,7 @@ class TestStore : public IsolatedStoreImpl { Histogram& histogram(const std::string& name, Histogram::Unit unit) { return histogramFromString(name, unit); } + TextReadout& textReadout(const std::string& name) { return textReadoutFromString(name); } // Override the Stats::Store methods for name-based lookup of stats, to use // and update the string-maps in this class. Note that IsolatedStoreImpl diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 1ee96aaf8a9d..2530132ff53e 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -279,11 +279,12 @@ class MockMetricSnapshot : public MetricSnapshot { MOCK_METHOD(const std::vector&, counters, ()); MOCK_METHOD(const std::vector>&, gauges, ()); MOCK_METHOD(const std::vector>&, histograms, ()); - MOCK_METHOD(const std::vector&, textReadouts, ()); + MOCK_METHOD(const std::vector>&, textReadouts, ()); std::vector counters_; std::vector> gauges_; std::vector> histograms_; + std::vector> text_readouts_; }; class MockSink : public Sink { diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 5d230027e080..38d753a559da 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -47,6 +47,7 @@ TEST(ServerInstanceUtil, flushHelper) { c.inc(); store.gauge("world", Stats::Gauge::ImportMode::Accumulate).set(5); store.histogram("histogram", Stats::Histogram::Unit::Unspecified); + store.textReadout("text").set("is important"); std::list sinks; InstanceUtil::flushMetricsToSinks(sinks, store); @@ -64,6 +65,10 @@ TEST(ServerInstanceUtil, flushHelper) { ASSERT_EQ(snapshot.gauges().size(), 1); EXPECT_EQ(snapshot.gauges()[0].get().name(), "world"); EXPECT_EQ(snapshot.gauges()[0].get().value(), 5); + + ASSERT_EQ(snapshot.textReadouts().size(), 1); + EXPECT_EQ(snapshot.textReadouts()[0].get().name(), "text"); + EXPECT_EQ(snapshot.textReadouts()[0].get().value(), "is important"); })); c.inc(); InstanceUtil::flushMetricsToSinks(sinks, store); @@ -77,6 +82,7 @@ TEST(ServerInstanceUtil, flushHelper) { EXPECT_TRUE(snapshot.counters().empty()); EXPECT_TRUE(snapshot.gauges().empty()); EXPECT_EQ(snapshot.histograms().size(), 1); + EXPECT_TRUE(snapshot.textReadouts().empty()); })); InstanceUtil::flushMetricsToSinks(sinks, mock_store); } From 941356bfba69840c7da61deb127e18afc0a57b12 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Wed, 29 Apr 2020 14:21:24 -0700 Subject: [PATCH 057/909] docs: add overview of certificate management options (#10937) Signed-off-by: Snow Pettersen --- docs/root/operations/certificates.rst | 15 +++++++++++++++ docs/root/operations/operations.rst | 1 + 2 files changed, 16 insertions(+) create mode 100644 docs/root/operations/certificates.rst diff --git a/docs/root/operations/certificates.rst b/docs/root/operations/certificates.rst new file mode 100644 index 000000000000..0dff05aa9360 --- /dev/null +++ b/docs/root/operations/certificates.rst @@ -0,0 +1,15 @@ +.. _operations_certificates: + +Certificate Management +====================== + +Envoy provides several mechanisms for cert management. At a high level they can be broken into + +1. Static :ref:`CommonTlsContext ` referenced certificates. + These will *not* reload automatically, and requires either a restart of the proxy or + reloading the clusters/listeners that reference them. + :ref:`Hot restarting ` can be used here to pick up the new + certificates without dropping traffic. +2. :ref:`Secret Discovery Service ` referenced certificates. + By using SDS, certificates can either be referenced as files (reloading the certs when the + parent directory is moved) or through an external SDS server that can push new certificates. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst index 3f1ada49c1ae..c4dee98ce876 100644 --- a/docs/root/operations/operations.rst +++ b/docs/root/operations/operations.rst @@ -13,4 +13,5 @@ Operations and administration runtime fs_flags traffic_tapping + certificates performance From b962a8ec63bbb2385ef0577de2ce019ec2ae3437 Mon Sep 17 00:00:00 2001 From: antonio Date: Wed, 29 Apr 2020 17:33:45 -0400 Subject: [PATCH 058/909] [http1] Preserve LWS from the middle of HTTP1 header values that require multiple dispatch calls to process (#10886) Correctly preserve linear whitespace in the middle of HTTP1 header values. The fix in 6a95a21 trimmed away both leading and trailing whitespace when accepting header value fragments which can result in inner LWS in header values being stripped away if the LWS lands at the beginning or end of a buffer slice. Signed-off-by: Antonio Vicente --- include/envoy/http/header_map.h | 6 ++ source/common/http/header_map_impl.cc | 9 +++ source/common/http/http1/codec_impl.cc | 15 ++++- test/common/http/header_map_impl_test.cc | 13 ++++ test/common/http/http1/codec_impl_test.cc | 62 ++++++++++++++++--- test/integration/protocol_integration_test.cc | 36 +++++++++++ 6 files changed, 129 insertions(+), 12 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 512b59c35445..72257770d09e 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -148,6 +148,12 @@ class HeaderString { absl::get(buffer_).begin(), unary_op); } + /** + * Trim trailing whitespaces from the HeaderString. Only supported by the "Inline" HeaderString + * representation. + */ + void rtrim(); + /** * Get an absl::string_view. It will NOT be NUL terminated! * diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 2f407c435a48..1779eef2c304 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -87,6 +87,15 @@ void HeaderString::append(const char* data, uint32_t data_size) { get_in_vec(buffer_).insert(get_in_vec(buffer_).end(), data, data + data_size); } +void HeaderString::rtrim() { + ASSERT(type() == Type::Inline); + absl::string_view original = getStringView(); + absl::string_view rtrimmed = StringUtil::rtrim(original); + if (original.size() != rtrimmed.size()) { + get_in_vec(buffer_).resize(rtrimmed.size()); + } +} + absl::string_view HeaderString::getStringView() const { if (type() == Type::Reference) { return get_str_view(buffer_); diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 9f5fb06f53d1..348f87324045 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -457,6 +457,10 @@ void ConnectionImpl::completeLastHeader() { auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); + // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed + // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + current_header_value_.rtrim(); headers_or_trailers.addViaMove(std::move(current_header_field_), std::move(current_header_value_)); } @@ -562,9 +566,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { maybeAllocTrailers(); } - // Work around a bug in http_parser where trailing whitespace is not trimmed - // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 - const absl::string_view header_value = StringUtil::trim(absl::string_view(data, length)); + absl::string_view header_value{data, length}; if (strict_header_validation_) { if (!Http::HeaderUtility::headerValueIsValid(header_value)) { @@ -576,6 +578,13 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { } header_parsing_state_ = HeaderParsingState::Value; + if (current_header_value_.empty()) { + // Strip leading whitespace if the current header value input contains the first bytes of the + // encoded header value. Trailing whitespace is stripped once the full header value is known in + // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . + header_value = StringUtil::ltrim(header_value); + } current_header_value_.append(header_value.data(), header_value.length()); const uint32_t total = diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index b233e9e94821..b28c7a016283 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -105,6 +105,19 @@ TEST(HeaderStringTest, All) { EXPECT_EQ("HELLO", string.getStringView()); } + // Inline rtrim removes trailing whitespace only. + { + const std::string data_with_leading_lws = " \t\f\v data"; + const std::string data_with_leading_and_trailing_lws = data_with_leading_lws + " \t\f\v"; + HeaderString string; + string.append(data_with_leading_and_trailing_lws.data(), + data_with_leading_and_trailing_lws.size()); + EXPECT_EQ(data_with_leading_and_trailing_lws, string.getStringView()); + string.rtrim(); + EXPECT_NE(data_with_leading_and_trailing_lws, string.getStringView()); + EXPECT_EQ(data_with_leading_lws, string.getStringView()); + } + // Static clear() does nothing. { std::string static_string("HELLO"); diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 5110bd6ccd93..82a6c76b0f83 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -44,11 +44,13 @@ std::string createHeaderFragment(int num_headers) { return headers; } -Buffer::OwnedImpl createBufferWithOneByteSlices(absl::string_view input) { +Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t max_slice_size) { Buffer::OwnedImpl buffer; - for (const char& c : input) { - buffer.appendSliceForTest(&c, 1); + for (size_t offset = 0; offset < input.size(); offset += max_slice_size) { + buffer.appendSliceForTest(input.substr(offset, max_slice_size)); } + // Verify that the buffer contains the right number of slices. + ASSERT(buffer.getRawSlices().size() == (input.size() + max_slice_size - 1) / max_slice_size); return buffer; } } // namespace @@ -80,6 +82,12 @@ class Http1ServerConnectionImplTest : public testing::Test { // Then send a response just to clean up. void sendAndValidateRequestAndSendResponse(absl::string_view raw_request, const TestHeaderMapImpl& expected_request_headers) { + Buffer::OwnedImpl buffer(raw_request); + sendAndValidateRequestAndSendResponse(buffer, expected_request_headers); + } + + void sendAndValidateRequestAndSendResponse(Buffer::Instance& buffer, + const TestHeaderMapImpl& expected_request_headers) { NiceMock decoder; Http::ResponseEncoder* response_encoder = nullptr; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -88,7 +96,6 @@ class Http1ServerConnectionImplTest : public testing::Test { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_request_headers), true)); - Buffer::OwnedImpl buffer(raw_request); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{":status", "200"}}, true); @@ -401,10 +408,11 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { EXPECT_CALL(decoder, decodeData(_, true)); Buffer::OwnedImpl buffer = - createBufferWithOneByteSlices("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" - "6\r\nHello \r\n" - "5\r\nWorld\r\n" - "0\r\n\r\n"); + createBufferWithNByteSlices("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" + "6\r\nHello \r\n" + "5\r\nWorld\r\n" + "0\r\n\r\n", + 1); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } @@ -490,6 +498,42 @@ TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { "GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the +// beginning and end of a header value should be stripped. Whitespace in the middle should be +// preserved. +TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { + initialize(); + + // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is + // split across multiple dispatch calls. The threshold used here comes from Envoy preferring 16KB + // reads, but the important part is that the header value is split such that the pieces have + // leading and trailing whitespace characters. + const std::string header_value_with_inner_lws = "v" + std::string(32 * 1024, ' ') + "v"; + TestHeaderMapImpl expected_headers{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"header_field", header_value_with_inner_lws}}; + + { + // Regression test spaces in the middle are preserved + Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices( + "GET / HTTP/1.1\r\nHost: host\r\nheader_field: " + header_value_with_inner_lws + "\r\n\r\n", + 16 * 1024); + EXPECT_EQ(3, header_buffer.getRawSlices().size()); + sendAndValidateRequestAndSendResponse(header_buffer, expected_headers); + } + + { + // Regression test spaces before and after are removed + Buffer::OwnedImpl header_buffer = createBufferWithNByteSlices( + "GET / HTTP/1.1\r\nHost: host\r\nheader_field: " + header_value_with_inner_lws + + " \r\n\r\n", + 16 * 1024); + EXPECT_EQ(3, header_buffer.getRawSlices().size()); + sendAndValidateRequestAndSendResponse(header_buffer, expected_headers); + } +} + TEST_F(Http1ServerConnectionImplTest, Http10) { initialize(); @@ -1104,7 +1148,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl buffer = - createBufferWithOneByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); + createBufferWithNByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345", 1); codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index f3251638f4a7..6b9590b70e6b 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -266,6 +266,42 @@ TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); } +// Regression test for https://github.com/envoyproxy/envoy/issues/10270 +TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { + // Header with at least 20kb of spaces surrounded by non-whitespace characters to ensure that + // dispatching is split across 2 dispatch calls. This threshold comes from Envoy preferring 16KB + // reads, which the buffer rounds up to about 20KB when allocating slices in + // Buffer::OwnedImpl::reserve(). + const std::string long_header_value_with_inner_lws = "v" + std::string(32 * 1024, ' ') + "v"; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"longrequestvalue", long_header_value_with_inner_lws}}); + waitForNextUpstreamRequest(); + EXPECT_EQ(long_header_value_with_inner_lws, upstream_request_->headers() + .get(Http::LowerCaseString("longrequestvalue")) + ->value() + .getStringView()); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"host", "host"}, + {"longresponsevalue", long_header_value_with_inner_lws}}, + true); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("host", + response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); + EXPECT_EQ( + long_header_value_with_inner_lws, + response->headers().get(Http::LowerCaseString("longresponsevalue"))->value().getStringView()); +} + TEST_P(ProtocolIntegrationTest, Retry) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); From 2fd323bd0e35d14a753f392a4406e1e9f40676b1 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Wed, 29 Apr 2020 14:56:40 -0700 Subject: [PATCH 059/909] docs: add section re runtime flags usage for risky changes to operations (#10994) Signed-off-by: Snow Pettersen --- docs/root/operations/runtime.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/root/operations/runtime.rst b/docs/root/operations/runtime.rst index 4fdb15ddf70d..fae342182a66 100644 --- a/docs/root/operations/runtime.rst +++ b/docs/root/operations/runtime.rst @@ -6,3 +6,13 @@ Runtime :ref:`Runtime configuration ` can be used to modify various server settings without restarting Envoy. The runtime settings that are available depend on how the server is configured. They are documented in the relevant sections of the :ref:`configuration guide `. + +Runtime guards are also used as a mechanism to disable new behavior or risky changes not otherwise +guarded by configuration. Such changes will tend to introduce a runtime guard that can be used to +disable the new behavior/code path. The names of these runtime guards will be included in the +release notes alongside an explanation of the change that warrented the runtime guard. + +Due to this usage of runtime guards, some deployments might find it useful to set up +dynamic runtime configuration as a safety measure to be able to quickly disable the new behavior +without having to revert to an older version of Envoy or redeploy it with a new set of static +runtime flags. From 1221cce68ae973fcdea38f27e6f017e492214e28 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Wed, 29 Apr 2020 14:58:59 -0700 Subject: [PATCH 060/909] docs: fix name typo for `envoy.retry_priorities.previous_priorities` (#10999) Signed-off-by: Greg Greenway --- .../intro/arch_overview/http/http_connection_management.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/intro/arch_overview/http/http_connection_management.rst b/docs/root/intro/arch_overview/http/http_connection_management.rst index 93c4ebb33600..c2b2c76e3e33 100644 --- a/docs/root/intro/arch_overview/http/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http/http_connection_management.rst @@ -73,7 +73,7 @@ can be used to modify this behavior, and they fall into two categories: Envoy supports the following built-in priority predicates - * *envoy.retry_priority.previous_priorities*: This will keep track of previously attempted priorities, + * *envoy.retry_priorities.previous_priorities*: This will keep track of previously attempted priorities, and adjust the priority load such that other priorities will be targeted in subsequent retry attempts. Host selection will continue until either the configured predicates accept the host or a configurable @@ -117,7 +117,7 @@ To reject a host based on its metadata, ``envoy.retry_host_predicates.omit_host_ This will reject any host with matching (key, value) in its metadata. To configure retries to attempt other priorities during retries, the built-in -``envoy.retry_priority.previous_priorities`` can be used. +``envoy.retry_priorities.previous_priorities`` can be used. .. code-block:: yaml From 3975ca9c1718b99b6c94c879617c844a1dad15d2 Mon Sep 17 00:00:00 2001 From: Konstantin Belyalov Date: Wed, 29 Apr 2020 22:23:09 -0700 Subject: [PATCH 061/909] grpc_transcoder/httpBody: process all gRPC frames in streaming mode (#10903) Description: Fix transcoder to process all gRPC frames from buffer in httpBody streaming mode Risk Level: Low (kinda bug fix) Testing: added unit / integration test Docs Changes: not changed because of HttpBody streaming support was introduced in the same version. Release Notes: not added bcz of ^^ Signed-off-by: Konstantin Belyalov --- .../json_transcoder_filter.cc | 2 +- .../grpc_json_transcoder_integration_test.cc | 46 +++++++++++++++++++ .../json_transcoder_filter_test.cc | 11 +++++ tools/spelling/spelling_dictionary.txt | 1 + 4 files changed, 59 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 1434a3a97f1e..78ae748fbc58 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -688,12 +688,12 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( // Non streaming case: single message with content type / length response_headers.setContentType(http_body.content_type()); response_headers.setContentLength(body.size()); + return; } else if (!http_body_response_headers_set_) { // Streaming case: set content type only once from first HttpBody message response_headers.setContentType(http_body.content_type()); http_body_response_headers_set_ = true; } - return; } } } diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 3760cbb6cb41..27bf41e4a661 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -341,6 +341,52 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) { R"(Hello!)"); } +TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyMultipleFramesInData) { + HttpIntegrationTest::initialize(); + + // testTranscoding() does not provide grpc multiframe support. + // Since this is one-off it does not make sense to even more + // complicate this function. + // + // Make request to gRPC upstream + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/indexStream"}, + {":authority", "host"}, + }); + waitForNextUpstreamRequest(); + + // Send multi-framed gRPC response + // Headers + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + // Payload + google::api::HttpBody grpcMsg; + EXPECT_TRUE(TextFormat::ParseFromString(R"(content_type: "text/plain" data: "Hello")", &grpcMsg)); + Buffer::OwnedImpl response_buffer; + for (size_t i = 0; i < 3; i++) { + auto frame = Grpc::Common::serializeToGrpcFrame(grpcMsg); + response_buffer.add(*frame); + } + upstream_request_->encodeData(response_buffer, false); + // Trailers + Http::TestResponseTrailerMapImpl response_trailers; + auto grpc_status = Status(); + response_trailers.setGrpcStatus(static_cast(grpc_status.error_code())); + response_trailers.setGrpcMessage( + absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size())); + upstream_request_->encodeTrailers(response_trailers); + EXPECT_TRUE(upstream_request_->complete()); + + // Wait for complete / check body to have 3 frames joined + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response->body(), "HelloHelloHello"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryEchoHttpBody) { HttpIntegrationTest::initialize(); testTranscoding( diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 2170a79cf38e..05977528ad16 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -884,6 +884,17 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) { EXPECT_EQ(nullptr, response_headers.ContentLength()); EXPECT_EQ(response.data(), response_data->toString()); + // "Send" 3rd multiframe message ("msgmsgmsg") + Buffer::OwnedImpl multiframe_data; + response.set_data("msg"); + for (size_t i = 0; i < 3; i++) { + auto frame = Grpc::Common::serializeToGrpcFrame(response); + multiframe_data.add(*frame); + } + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(multiframe_data, false)); + // 3 grpc frames joined + EXPECT_EQ("msgmsgmsg", multiframe_data.toString()); + Http::TestRequestTrailerMapImpl request_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index fdeccc570b1b..02f996cbbdfa 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -179,6 +179,7 @@ MSET MSVC MTLS MTU +MULTIFRAME NACK NACKed NACKs From ebf3501e6c7fed3540f6e88445edd1ab5f67593e Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 30 Apr 2020 01:23:43 -0400 Subject: [PATCH 062/909] tools: strengthen v2 API package regexes in proto_build_targets_gen.py. (#11008) Some weak regexes were causing v4alpha protos to end up in the v2 docs in https://github.com/envoyproxy/envoy/pull/10971, this PR fixes. Signed-off-by: Harvey Tuch --- tools/type_whisperer/proto_build_targets_gen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/type_whisperer/proto_build_targets_gen.py b/tools/type_whisperer/proto_build_targets_gen.py index c1fcda72ee3d..e7dcfdd26994 100644 --- a/tools/type_whisperer/proto_build_targets_gen.py +++ b/tools/type_whisperer/proto_build_targets_gen.py @@ -15,8 +15,8 @@ map(re.compile, [ r'envoy[\w\.]*\.(v1alpha\d?|v1)', r'envoy[\w\.]*\.(v2alpha\d?|v2)', - r'envoy\.type\.matcher', - r'envoy\.type', + r'envoy\.type\.matcher$', + r'envoy\.type$', r'envoy\.config\.cluster\.redis', r'envoy\.config\.retry\.previous_priorities', ])) From aa3d627a3bf7367f74515439bc1201f8060c36ba Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 30 Apr 2020 08:28:22 -0400 Subject: [PATCH 063/909] [fuzz] Add bookstore proto descriptor to GrpcJsonTranscoder filter fuzz configs (#10798) Modifies fuzzed GrpcJsonTranscoder configs to have a valid service name and proto descriptor so that fuzzing can explore the filter code. Splits UberFilter code into source file and per_filter set-up to use for other filter-specific input clean-ups. Risk level: Low Testing: * Local corpus generation + coverage run shows 25% coverage in json_transcoder_filter.h. Previous 0%. Many methods are on the encode path as well, so we will lack coverage of that until encode paths are fuzzed. * Corpus entry added that successfully decodes headers and decodes data for the filter. * Manual debug assertions show that the decodeHeaders code is run at multiple locations, including at the very end of the method Signed-off-by: Asra Ali --- .../extensions/filters/http/common/fuzz/BUILD | 7 + ...h-bb74d7280823776808e881b20c0a9c87f7a2163b | 16 +++ .../http/common/fuzz/filter_corpus/grpc_json | 23 ++++ .../filters/http/common/fuzz/uber_filter.cc | 106 ++++++++++++++ .../filters/http/common/fuzz/uber_filter.h | 130 ++---------------- .../http/common/fuzz/uber_per_filter.cc | 77 +++++++++++ 6 files changed, 241 insertions(+), 118 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json create mode 100644 test/extensions/filters/http/common/fuzz/uber_filter.cc create mode 100644 test/extensions/filters/http/common/fuzz/uber_per_filter.cc diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 0e99f5e76546..929cb14dfec6 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -25,15 +25,22 @@ envoy_proto_library( envoy_cc_test_library( name = "uber_filter_lib", + srcs = [ + "uber_filter.cc", + "uber_per_filter.cc", + ], hdrs = ["uber_filter.h"], deps = [ ":filter_fuzz_proto_cc_proto", "//source/common/config:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http:well_known_names", "//test/fuzz:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", "//test/mocks/server:server_mocks", + "//test/proto:bookstore_proto_cc_proto", + "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b new file mode 100644 index 000000000000..3eea853ad21e --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b @@ -0,0 +1,16 @@ +config { + name: "envoy.grpc_json_transcoder" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder" + value: "\n\001%8\001" + } +} +data { + data: "\001\000\000\t" + trailers { + headers { + key: "0" + value: "||||||||||||||||||||||||||||||||||||||||" + } + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json new file mode 100644 index 000000000000..3846826fa9d8 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json @@ -0,0 +1,23 @@ +config { +name: "envoy.filters.http.grpc_json_transcoder" +typed_config: { +} +} + +data { +headers { +headers { + key: "content-type" + value: "application/json" +} +headers { + key: ":method" + value: "POST" +} +headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" +} +} +data: "{\"theme\": \"Children\"}" +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc new file mode 100644 index 000000000000..7ec9b022b99a --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -0,0 +1,106 @@ +#include "test/extensions/filters/http/common/fuzz/uber_filter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" +#include "common/http/message_impl.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { + +UberFilterFuzzer::UberFilterFuzzer() { + // Need to set for both a decoder filter and an encoder/decoder filter. + ON_CALL(filter_callback_, addStreamDecoderFilter(_)) + .WillByDefault(Invoke([&](std::shared_ptr filter) -> void { + filter_ = filter; + filter_->setDecoderFilterCallbacks(callbacks_); + })); + ON_CALL(filter_callback_, addStreamFilter(_)) + .WillByDefault(Invoke([&](std::shared_ptr filter) -> void { + filter_ = filter; + filter_->setDecoderFilterCallbacks(callbacks_); + })); + // Set expectations for particular filters that may get fuzzed. + perFilterSetup(); +} + +void UberFilterFuzzer::decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data) { + bool end_stream = false; + + auto headers = Fuzz::fromHeaders(data.headers()); + if (headers.Path() == nullptr) { + headers.setPath("/foo"); + } + if (headers.Method() == nullptr) { + headers.setMethod("GET"); + } + if (headers.Host() == nullptr) { + headers.setHost("foo.com"); + } + + if (data.data().empty() && !data.has_trailers()) { + end_stream = true; + } + ENVOY_LOG_MISC(debug, "Decoding headers: {} ", data.headers().DebugString()); + const auto& headersStatus = filter->decodeHeaders(headers, end_stream); + if (headersStatus != Http::FilterHeadersStatus::Continue && + headersStatus != Http::FilterHeadersStatus::StopIteration) { + return; + } + + for (int i = 0; i < data.data().size(); i++) { + if (i == data.data().size() - 1 && !data.has_trailers()) { + end_stream = true; + } + Buffer::OwnedImpl buffer(data.data().Get(i)); + ENVOY_LOG_MISC(debug, "Decoding data: {} ", buffer.toString()); + if (filter->decodeData(buffer, end_stream) != Http::FilterDataStatus::Continue) { + return; + } + } + + if (data.has_trailers()) { + ENVOY_LOG_MISC(debug, "Decoding trailers: {} ", data.trailers().DebugString()); + auto trailers = Fuzz::fromHeaders(data.trailers()); + filter->decodeTrailers(trailers); + } +} + +void UberFilterFuzzer::fuzz( + const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& + proto_config, + const test::fuzz::HttpData& data) { + try { + // Try to create the filter. Exit early if the config is invalid or violates PGV constraints. + ENVOY_LOG_MISC(info, "filter name {}", proto_config.name()); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name()); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + // Clean-up config with filter-specific logic. + cleanFuzzedConfig(proto_config.name(), message.get()); + cb_ = factory.createFilterFactoryFromProto(*message, "stats", factory_context_); + cb_(filter_callback_); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception {}", e.what()); + return; + } + + decode(filter_.get(), data); + reset(); +} + +void UberFilterFuzzer::reset() { + if (filter_ != nullptr) { + filter_->onDestroy(); + } + filter_.reset(); +} + +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 2df4483a4a61..a18d1ae8057b 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,7 +1,3 @@ -#include "common/config/utility.h" -#include "common/config/version_converter.h" -#include "common/protobuf/utility.h" - #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" @@ -13,126 +9,24 @@ namespace HttpFilters { class UberFilterFuzzer { public: - UberFilterFuzzer() { - // Need to set for both a decoder filter and an encoder/decoder filter. - ON_CALL(filter_callback_, addStreamDecoderFilter(_)) - .WillByDefault( - Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); - })); - ON_CALL(filter_callback_, addStreamFilter(_)) - .WillByDefault( - Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); - })); - setExpectations(); - } - - void setExpectations() { - // Ext-authz setup - prepareExtAuthz(); - prepareCache(); - prepareTap(); - } - - void prepareExtAuthz() { - // Preparing the expectations for the ext_authz filter. - addr_ = std::make_shared("1.2.3.4", 1111); - ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); - ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); - ON_CALL(callbacks_, connection()).WillByDefault(testing::Return(&connection_)); - ON_CALL(callbacks_, activeSpan()) - .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); - callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; - } - - void prepareCache() { - // Prepare expectations for dynamic forward proxy. - ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) - .WillByDefault(testing::Return(resolver_)); - } - - void prepareTap() { - ON_CALL(factory_context_, admin()).WillByDefault(testing::ReturnRef(factory_context_.admin_)); - ON_CALL(factory_context_.admin_, addHandler(_, _, _, _, _)) - .WillByDefault(testing::Return(true)); - ON_CALL(factory_context_.admin_, removeHandler(_)).WillByDefault(testing::Return(true)); - } - - // This executes the decode methods to be fuzzed. - void decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data) { - bool end_stream = false; - - auto headers = Fuzz::fromHeaders(data.headers()); - if (headers.Path() == nullptr) { - headers.setPath("/foo"); - } - if (headers.Method() == nullptr) { - headers.setMethod("GET"); - } - if (headers.Host() == nullptr) { - headers.setHost("foo.com"); - } - - if (data.data().empty() && !data.has_trailers()) { - end_stream = true; - } - ENVOY_LOG_MISC(debug, "Decoding headers: {} ", data.headers().DebugString()); - const auto& headersStatus = filter->decodeHeaders(headers, end_stream); - if (headersStatus != Http::FilterHeadersStatus::Continue && - headersStatus != Http::FilterHeadersStatus::StopIteration) { - return; - } + UberFilterFuzzer(); - for (int i = 0; i < data.data().size(); i++) { - if (i == data.data().size() - 1 && !data.has_trailers()) { - end_stream = true; - } - Buffer::OwnedImpl buffer(data.data().Get(i)); - ENVOY_LOG_MISC(debug, "Decoding data: {} ", buffer.toString()); - if (filter->decodeData(buffer, end_stream) != Http::FilterDataStatus::Continue) { - return; - } - } - - if (data.has_trailers()) { - ENVOY_LOG_MISC(debug, "Decoding trailers: {} ", data.trailers().DebugString()); - auto trailers = Fuzz::fromHeaders(data.trailers()); - filter->decodeTrailers(trailers); - } - } - - // This creates the filter config and runs decode. + // This creates the filter config and runs the decode methods. void fuzz(const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - const test::fuzz::HttpData& data) { - try { - // Try to create the filter. Exit early if the config is invalid or violates PGV constraints. - ENVOY_LOG_MISC(info, "filter name {}", proto_config.name()); - auto& factory = Config::Utility::getAndCheckFactoryByName< - Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name()); - ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( - proto_config, factory_context_.messageValidationVisitor(), factory); - cb_ = factory.createFilterFactoryFromProto(*message, "stats", factory_context_); - cb_(filter_callback_); - } catch (const EnvoyException& e) { - ENVOY_LOG_MISC(debug, "Controlled exception {}", e.what()); - return; - } + const test::fuzz::HttpData& data); - decode(filter_.get(), data); - reset(); - } +protected: + // Set-up filter specific mock expectations in constructor. + void perFilterSetup(); + // Filter specific input cleanup. + void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message); - void reset() { - if (filter_ != nullptr) { - filter_->onDestroy(); - } - filter_.reset(); - } + // This executes the decode methods to be fuzzed. + void decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data); + void reset(); +private: NiceMock factory_context_; NiceMock callbacks_; NiceMock filter_callback_; diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc new file mode 100644 index 000000000000..55ba9d253eae --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -0,0 +1,77 @@ +#include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" + +#include "extensions/filters/http/well_known_names.h" + +#include "test/extensions/filters/http/common/fuzz/uber_filter.h" +#include "test/proto/bookstore.pb.h" + +// This file contains any filter-specific setup and input clean-up needed in the generic filter fuzz +// target. + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace { + +void addFileDescriptorsRecursively(const Protobuf::FileDescriptor& descriptor, + Protobuf::FileDescriptorSet& set, + absl::flat_hash_set& added_descriptors) { + if (!added_descriptors.insert(descriptor.name()).second) { + // Already added. + return; + } + for (int i = 0; i < descriptor.dependency_count(); i++) { + addFileDescriptorsRecursively(*descriptor.dependency(i), set, added_descriptors); + } + descriptor.CopyTo(set.add_file()); +} + +void addBookstoreProtoDescriptor(Protobuf::Message* message) { + envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder& config = + dynamic_cast( + *message); + config.clear_services(); + config.add_services("bookstore.Bookstore"); + + Protobuf::FileDescriptorSet descriptor_set; + const auto* file_descriptor = + Protobuf::DescriptorPool::generated_pool()->FindFileByName("test/proto/bookstore.proto"); + ASSERT(file_descriptor != nullptr); + // Create a set to keep track of descriptors as they are added. + absl::flat_hash_set added_descriptors; + addFileDescriptorsRecursively(*file_descriptor, descriptor_set, added_descriptors); + descriptor_set.SerializeToString(config.mutable_proto_descriptor_bin()); +} +} // namespace + +void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, + Protobuf::Message* message) { + // Map filter name to clean-up function. + if (filter_name == HttpFilterNames::get().GrpcJsonTranscoder) { + addBookstoreProtoDescriptor(message); + } +} + +void UberFilterFuzzer::perFilterSetup() { + // Prepare expectations for the ext_authz filter. + addr_ = std::make_shared("1.2.3.4", 1111); + ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); + ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); + ON_CALL(callbacks_, connection()).WillByDefault(testing::Return(&connection_)); + ON_CALL(callbacks_, activeSpan()) + .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); + callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; + + // Prepare expectations for dynamic forward proxy. + ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) + .WillByDefault(testing::Return(resolver_)); + + // Prepare expectations for TAP config. + ON_CALL(factory_context_, admin()).WillByDefault(testing::ReturnRef(factory_context_.admin_)); + ON_CALL(factory_context_.admin_, addHandler(_, _, _, _, _)).WillByDefault(testing::Return(true)); + ON_CALL(factory_context_.admin_, removeHandler(_)).WillByDefault(testing::Return(true)); +} + +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy From 43fba72dab0f05fdb1902227450772d55c43b569 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 30 Apr 2020 08:30:44 -0400 Subject: [PATCH 064/909] build: Add the use_category attribute to the list of external dependecies (#10896) Add required use_category attribute to the list of external dependencies for describing how the dependency is being used. This attribute is used for automatic tracking of security posture of Envoy's dependencies Risk Level: Low Testing: Build/Unit Test Docs Changes: N/A Release Notes: N/A Signed-off-by: Yan Avlasov --- bazel/repositories.bzl | 56 +++++++++++++++------- bazel/repository_locations.bzl | 85 +++++++++++++++++++++++++++++++++- 2 files changed, 124 insertions(+), 17 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 90cefccf2094..66874b179224 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -2,7 +2,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":dev_binding.bzl", "envoy_dev_binding") load(":genrule_repository.bzl", "genrule_repository") load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") +load(":repository_locations.bzl", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES") load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] @@ -18,6 +18,30 @@ WINDOWS_SKIP_TARGETS = [ # archives, e.g. cares. BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" +# Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl +# Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python, +# and as such needs to be free of bazel specific constructs. +def _repository_locations(): + locations = dict(DEPENDENCY_REPOSITORIES) + for key, location in locations.items(): + if "use_category" not in location: + fail("The 'use_category' attribute must be defined for external dependecy " + str(location["urls"])) + + for category in location["use_category"]: + if category not in USE_CATEGORIES: + fail("Unknown use_category value '" + category + "' for dependecy " + str(location["urls"])) + + return locations + +REPOSITORY_LOCATIONS = _repository_locations() + +# To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations. +# See repository_locations.bzl for the list of annotation attributes. +def _get_location(dependency): + stripped = dict(REPOSITORY_LOCATIONS[dependency]) + stripped.pop("use_category", None) + return stripped + def _repository_impl(name, **kwargs): envoy_http_archive( name, @@ -211,7 +235,7 @@ def _com_github_circonus_labs_libcircllhist(): ) def _com_github_c_ares_c_ares(): - location = REPOSITORY_LOCATIONS["com_github_c_ares_c_ares"] + location = _get_location("com_github_c_ares_c_ares") http_archive( name = "com_github_c_ares_c_ares", patches = ["@envoy//bazel/foreign_cc:cares-win32-nameser.patch"], @@ -280,7 +304,7 @@ def _com_github_gabime_spdlog(): ) def _com_github_google_benchmark(): - location = REPOSITORY_LOCATIONS["com_github_google_benchmark"] + location = _get_location("com_github_google_benchmark") http_archive( name = "com_github_google_benchmark", **location @@ -297,7 +321,7 @@ def _com_github_google_libprotobuf_mutator(): ) def _com_github_jbeder_yaml_cpp(): - location = REPOSITORY_LOCATIONS["com_github_jbeder_yaml_cpp"] + location = _get_location("com_github_jbeder_yaml_cpp") http_archive( name = "com_github_jbeder_yaml_cpp", build_file_content = BUILD_ALL_CONTENT, @@ -309,7 +333,7 @@ def _com_github_jbeder_yaml_cpp(): ) def _com_github_libevent_libevent(): - location = REPOSITORY_LOCATIONS["com_github_libevent_libevent"] + location = _get_location("com_github_libevent_libevent") http_archive( name = "com_github_libevent_libevent", build_file_content = BUILD_ALL_CONTENT, @@ -323,7 +347,7 @@ def _com_github_libevent_libevent(): ) def _net_zlib(): - location = REPOSITORY_LOCATIONS["net_zlib"] + location = _get_location("net_zlib") http_archive( name = "net_zlib", @@ -346,7 +370,7 @@ def _com_google_cel_cpp(): _repository_impl("com_google_cel_cpp") def _com_github_nghttp2_nghttp2(): - location = REPOSITORY_LOCATIONS["com_github_nghttp2_nghttp2"] + location = _get_location("com_github_nghttp2_nghttp2") http_archive( name = "com_github_nghttp2_nghttp2", build_file_content = BUILD_ALL_CONTENT, @@ -560,7 +584,7 @@ def _com_google_protobuf(): ) def _io_opencensus_cpp(): - location = REPOSITORY_LOCATIONS["io_opencensus_cpp"] + location = _get_location("io_opencensus_cpp") http_archive( name = "io_opencensus_cpp", **location @@ -604,7 +628,7 @@ def _io_opencensus_cpp(): def _com_github_curl(): # Used by OpenCensus Zipkin exporter. - location = REPOSITORY_LOCATIONS["com_github_curl"] + location = _get_location("com_github_curl") http_archive( name = "com_github_curl", build_file_content = BUILD_ALL_CONTENT + """ @@ -620,7 +644,7 @@ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy/ ) def _com_googlesource_chromium_v8(): - location = REPOSITORY_LOCATIONS["com_googlesource_chromium_v8"] + location = _get_location("com_googlesource_chromium_v8") genrule_repository( name = "com_googlesource_chromium_v8", genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", @@ -738,7 +762,7 @@ def _com_github_google_jwt_verify(): ) def _com_github_luajit_luajit(): - location = REPOSITORY_LOCATIONS["com_github_luajit_luajit"] + location = _get_location("com_github_luajit_luajit") http_archive( name = "com_github_luajit_luajit", build_file_content = BUILD_ALL_CONTENT, @@ -754,7 +778,7 @@ def _com_github_luajit_luajit(): ) def _com_github_moonjit_moonjit(): - location = REPOSITORY_LOCATIONS["com_github_moonjit_moonjit"] + location = _get_location("com_github_moonjit_moonjit") http_archive( name = "com_github_moonjit_moonjit", build_file_content = BUILD_ALL_CONTENT, @@ -770,7 +794,7 @@ def _com_github_moonjit_moonjit(): ) def _com_github_gperftools_gperftools(): - location = REPOSITORY_LOCATIONS["com_github_gperftools_gperftools"] + location = _get_location("com_github_gperftools_gperftools") http_archive( name = "com_github_gperftools_gperftools", build_file_content = BUILD_ALL_CONTENT, @@ -802,7 +826,7 @@ filegroup( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, patches = ["@envoy//bazel/external:kafka_int32.patch"], - **REPOSITORY_LOCATIONS["kafka_source"] + **_get_location("kafka_source") ) # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration @@ -810,7 +834,7 @@ filegroup( http_archive( name = "kafka_server_binary", build_file_content = BUILD_ALL_CONTENT, - **REPOSITORY_LOCATIONS["kafka_server_binary"] + **_get_location("kafka_server_binary") ) # This archive provides Kafka client in Python, so we can use it to interact with Kafka server @@ -818,7 +842,7 @@ filegroup( http_archive( name = "kafka_python_client", build_file_content = BUILD_ALL_CONTENT, - **REPOSITORY_LOCATIONS["kafka_python_client"] + **_get_location("kafka_python_client") ) def _foreign_cc_dependencies(): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d497f5bede6b..17a3eab8cd70 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,12 +1,39 @@ -REPOSITORY_LOCATIONS = dict( +# Envoy dependencies may be annotated with the following attributes: +# +# use_category - list of the categories describing how the dependency is being used. This attribute is used +# for automatic tracking of security posture of Envoy's dependencies. +# Possible values are documented in the USE_CATEGORIES list. + +# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed +# to be declared. +USE_CATEGORIES = [ + # This dependency is used in build process. + "build", + # This dependency is used for unit tests. + "test", + # This dependency is used in API protos. + "api", + # This dependency is used in processing downstream or upstream requests. + "dataplane", + # This dependency is used to process xDS requests. + "controlplane", + # This dependecy is used for logging, metrics or tracing. It may process unstrusted input. + "observability", + # This dependency does not handle untrusted data and is used for various utility purposes. + "other", +] + +DEPENDENCY_REPOSITORIES = dict( bazel_compdb = dict( sha256 = "87e376a685eacfb27bcc0d0cdf5ded1d0b99d868390ac50f452ba6ed781caffe", strip_prefix = "bazel-compilation-database-0.4.2", urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.2.tar.gz"], + use_category = ["build"], ), bazel_gazelle = dict( sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz"], + use_category = ["build"], ), bazel_toolchains = dict( sha256 = "239a1a673861eabf988e9804f45da3b94da28d1aff05c373b013193c315d9d9e", @@ -15,16 +42,19 @@ REPOSITORY_LOCATIONS = dict( "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.0.1/bazel-toolchains-3.0.1.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.0.1.tar.gz", ], + use_category = ["build"], ), build_bazel_rules_apple = dict( sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.19.0/rules_apple.0.19.0.tar.gz"], + use_category = ["build"], ), envoy_build_tools = dict( sha256 = "9d348f92ae8fb2495393109aac28aea314ad1fb013cdec1ab7b1224f804be1b7", strip_prefix = "envoy-build-tools-823c2e9386eee5117f7ef9e3d7c90e784cd0d047", # 2020-04-07 urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/823c2e9386eee5117f7ef9e3d7c90e784cd0d047.tar.gz"], + use_category = ["build"], ), boringssl = dict( sha256 = "a3d4de4f03cb321ef943678d72a045c9a19d26b23d6f4e313f97600c65201a27", @@ -36,22 +66,26 @@ REPOSITORY_LOCATIONS = dict( # # chromium-81.0.4044.69 urls = ["https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz"], + use_category = ["dataplane"], ), boringssl_fips = dict( sha256 = "b12ad676ee533824f698741bd127f6fbc82c46344398a6d78d25e62c6c418c73", # fips-20180730 urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-docs/fips/boringssl-66005f41fbc3529ffe8d007708756720529da20d.tar.xz"], + use_category = ["dataplane"], ), com_google_absl = dict( sha256 = "14ee08e2089c2a9b6bf27e1d10abc5629c69c4d0bab4b78ec5b65a29ea1c2af7", strip_prefix = "abseil-cpp-cf3a1998e9d41709d4141e2f13375993cba1130e", # 2020-03-05 urls = ["https://github.com/abseil/abseil-cpp/archive/cf3a1998e9d41709d4141e2f13375993cba1130e.tar.gz"], + use_category = ["dataplane", "controlplane"], ), com_github_apache_thrift = dict( sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b", strip_prefix = "thrift-0.11.0", urls = ["https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz"], + use_category = ["dataplane"], ), com_github_c_ares_c_ares = dict( sha256 = "bbaab13d6ad399a278d476f533e4d88a7ec7d729507348bb9c2e3b207ba4c606", @@ -61,44 +95,52 @@ REPOSITORY_LOCATIONS = dict( # Use getaddrinfo to query DNS record and TTL. # TODO(crazyxy): Update to release-1.16.0 when it is released. urls = ["https://github.com/c-ares/c-ares/archive/d7e070e7283f822b1d2787903cce3615536c5610.tar.gz"], + use_category = ["dataplane"], ), com_github_circonus_labs_libcircllhist = dict( sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", strip_prefix = "libcircllhist-63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", # 2019-02-11 urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], + use_category = ["observability"], ), com_github_cyan4973_xxhash = dict( sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-0.7.3", urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.3.tar.gz"], + use_category = ["dataplane", "controlplane"], ), com_github_envoyproxy_sqlparser = dict( sha256 = "b2d3882698cf85b64c87121e208ce0b24d5fe2a00a5d058cf4571f1b25b45403", strip_prefix = "sql-parser-b14d010afd4313f2372a1cc96aa2327e674cc798", # 2020-01-10 urls = ["https://github.com/envoyproxy/sql-parser/archive/b14d010afd4313f2372a1cc96aa2327e674cc798.tar.gz"], + use_category = ["dataplane"], ), com_github_mirror_tclap = dict( sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", strip_prefix = "tclap-tclap-1-2-1-release-final", urls = ["https://github.com/mirror/tclap/archive/tclap-1-2-1-release-final.tar.gz"], + use_category = ["other"], ), com_github_fmtlib_fmt = dict( sha256 = "f1907a58d5e86e6c382e51441d92ad9e23aea63827ba47fd647eacc0d3a16c78", strip_prefix = "fmt-6.0.0", urls = ["https://github.com/fmtlib/fmt/archive/6.0.0.tar.gz"], + use_category = ["observability"], ), com_github_gabime_spdlog = dict( sha256 = "afd18f62d1bc466c60bef088e6b637b0284be88c515cedc59ad4554150af6043", strip_prefix = "spdlog-1.4.0", urls = ["https://github.com/gabime/spdlog/archive/v1.4.0.tar.gz"], + use_category = ["observability"], ), com_github_google_libprotobuf_mutator = dict( sha256 = "", strip_prefix = "libprotobuf-mutator-3521f47a2828da9ace403e4ecc4aece1a84feb36", # 2020-02-04 urls = ["https://github.com/google/libprotobuf-mutator/archive/3521f47a2828da9ace403e4ecc4aece1a84feb36.tar.gz"], + use_category = ["test"], ), com_github_gperftools_gperftools = dict( # TODO(cmluciano): Bump to release 2.8 @@ -107,6 +149,7 @@ REPOSITORY_LOCATIONS = dict( sha256 = "97f0bc2b389c29305f5d1d8cc4d95e9212c33b55827ae65476fc761d78e3ec5d", strip_prefix = "gperftools-gperftools-2.7.90", urls = ["https://github.com/gperftools/gperftools/archive/gperftools-2.7.90.tar.gz"], + use_category = ["test"], ), com_github_grpc_grpc = dict( # TODO(JimmyCYJ): Bump to release 1.27 @@ -114,42 +157,50 @@ REPOSITORY_LOCATIONS = dict( sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123", strip_prefix = "grpc-d8f4928fa779f6005a7fe55a176bdb373b0f910f", urls = ["https://github.com/grpc/grpc/archive/d8f4928fa779f6005a7fe55a176bdb373b0f910f.tar.gz"], + use_category = ["dataplane", "controlplane"], ), com_github_luajit_luajit = dict( sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", strip_prefix = "LuaJIT-2.1.0-beta3", urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"], + use_category = ["dataplane"], ), com_github_moonjit_moonjit = dict( sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6", strip_prefix = "moonjit-2.2.0", urls = ["https://github.com/moonjit/moonjit/archive/2.2.0.tar.gz"], + use_category = ["dataplane"], ), com_github_nghttp2_nghttp2 = dict( sha256 = "eb9d9046495a49dd40c7ef5d6c9907b51e5a6b320ea6e2add11eb8b52c982c47", strip_prefix = "nghttp2-1.40.0", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.gz"], + use_category = ["dataplane"], ), io_opentracing_cpp = dict( sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", strip_prefix = "opentracing-cpp-1.5.1", urls = ["https://github.com/opentracing/opentracing-cpp/archive/v1.5.1.tar.gz"], + use_category = ["observability"], ), com_lightstep_tracer_cpp = dict( sha256 = "0e99716598c010e56bc427ea3482be5ad2c534be8b039d172564deec1264a213", strip_prefix = "lightstep-tracer-cpp-3efe2372ee3d7c2138d6b26e542d757494a7938d", # 2020-03-24 urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/3efe2372ee3d7c2138d6b26e542d757494a7938d.tar.gz"], + use_category = ["observability"], ), com_github_datadog_dd_opentracing_cpp = dict( sha256 = "6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf", strip_prefix = "dd-opentracing-cpp-1.1.3", urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.3.tar.gz"], + use_category = ["observability"], ), com_github_google_benchmark = dict( sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", strip_prefix = "benchmark-1.5.0", urls = ["https://github.com/google/benchmark/archive/v1.5.0.tar.gz"], + use_category = ["test"], ), com_github_libevent_libevent = dict( sha256 = "549d34065eb2485dfad6c8de638caaa6616ed130eec36dd978f73b6bdd5af113", @@ -162,6 +213,7 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "libevent-0d7d85c2083f7a4c9efe01c061486f332b576d28", # 2019-07-02 urls = ["https://github.com/libevent/libevent/archive/0d7d85c2083f7a4c9efe01c061486f332b576d28.tar.gz"], + use_category = ["dataplane"], ), net_zlib = dict( # Use the dev branch of zlib to resolve fuzz bugs and out of bound @@ -171,130 +223,155 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "zlib-79baebe50e4d6b73ae1f8b603f0ef41300110aa3", # 2019-04-14 development branch urls = ["https://github.com/madler/zlib/archive/79baebe50e4d6b73ae1f8b603f0ef41300110aa3.tar.gz"], + use_category = ["dataplane"], ), com_github_jbeder_yaml_cpp = dict( sha256 = "77ea1b90b3718aa0c324207cb29418f5bced2354c2e483a9523d98c3460af1ed", strip_prefix = "yaml-cpp-yaml-cpp-0.6.3", urls = ["https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.3.tar.gz"], + use_category = ["dataplane"], ), com_github_msgpack_msgpack_c = dict( sha256 = "433cbcd741e1813db9ae4b2e192b83ac7b1d2dd7968a3e11470eacc6f4ab58d2", strip_prefix = "msgpack-3.2.1", urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.1/msgpack-3.2.1.tar.gz"], + use_category = ["observability"], ), com_github_google_jwt_verify = dict( sha256 = "d422a6eadd4bcdd0f9b122cd843a4015f8b18aebea6e1deb004bd4d401a8ef92", strip_prefix = "jwt_verify_lib-40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac", # 2020-02-11 urls = ["https://github.com/google/jwt_verify_lib/archive/40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac.tar.gz"], + use_category = ["dataplane"], ), com_github_nodejs_http_parser = dict( sha256 = "8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d", strip_prefix = "http-parser-2.9.3", urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"], + use_category = ["dataplane"], ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", strip_prefix = "jinja-2.10.3", urls = ["https://github.com/pallets/jinja/archive/2.10.3.tar.gz"], + use_category = ["build"], ), com_github_pallets_markupsafe = dict( sha256 = "222a10e3237d92a9cd45ed5ea882626bc72bc5e0264d3ed0f2c9129fa69fc167", strip_prefix = "markupsafe-1.1.1/src", urls = ["https://github.com/pallets/markupsafe/archive/1.1.1.tar.gz"], + use_category = ["build"], ), com_github_tencent_rapidjson = dict( sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b", strip_prefix = "rapidjson-dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1", # Changes through 2019-12-02 urls = ["https://github.com/Tencent/rapidjson/archive/dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1.tar.gz"], + use_category = ["dataplane"], ), com_github_twitter_common_lang = dict( sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-0.3.9.tar.gz"], + use_category = ["dataplane"], ), com_github_twitter_common_rpc = dict( sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-0.3.9.tar.gz"], + use_category = ["dataplane"], ), com_github_twitter_common_finagle_thrift = dict( sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-0.3.9.tar.gz"], + use_category = ["dataplane"], ), com_google_googletest = dict( sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", strip_prefix = "googletest-release-1.10.0", urls = ["https://github.com/google/googletest/archive/release-1.10.0.tar.gz"], + use_category = ["test"], ), com_google_protobuf = dict( sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e", strip_prefix = "protobuf-3.10.1", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz"], + use_category = ["dataplane", "controlplane"], ), grpc_httpjson_transcoding = dict( sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", strip_prefix = "grpc-httpjson-transcoding-faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6", # 2020-03-02 urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6.tar.gz"], + use_category = ["dataplane"], ), io_bazel_rules_go = dict( sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz"], + use_category = ["build"], ), rules_foreign_cc = dict( sha256 = "3184c244b32e65637a74213fc448964b687390eeeca42a36286f874c046bba15", strip_prefix = "rules_foreign_cc-7bc4be735b0560289f6b86ab6136ee25d20b65b7", # 2019-09-26 urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/7bc4be735b0560289f6b86ab6136ee25d20b65b7.tar.gz"], + use_category = ["build"], ), rules_python = dict( sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161", urls = ["https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz"], + use_category = ["build"], ), six = dict( sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-1.12.0.tar.gz"], + use_category = ["other"], ), io_opencensus_cpp = dict( sha256 = "193ffb4e13bd7886757fd22b61b7f7a400634412ad8e7e1071e73f57bedd7fc6", strip_prefix = "opencensus-cpp-04ed0211931f12b03c1a76b3907248ca4db7bc90", # 2020-03-24 urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/04ed0211931f12b03c1a76b3907248ca4db7bc90.tar.gz"], + use_category = ["observability"], ), com_github_curl = dict( sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", strip_prefix = "curl-7.69.1", urls = ["https://github.com/curl/curl/releases/download/curl-7_69_1/curl-7.69.1.tar.gz"], + use_category = ["dataplane"], ), com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. sha256 = "03ff00e41cf259db473dfade9548493e4a2372c0b701a66cd7ff76215bd55a64", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.28.tar.gz"], + use_category = ["dataplane"], ), com_googlesource_quiche = dict( # Static snapshot of https://quiche.googlesource.com/quiche/+archive/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz sha256 = "75af53154402e1654cfd32d8aaeed5fab4dbb79d3cab8c9866019d5369c1889e", urls = ["https://storage.googleapis.com/quiche-envoy-integration/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz"], + use_category = ["dataplane"], ), com_googlesource_googleurl = dict( # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz sha256 = "b40cd22cadba577b7281a76db66f6a66dd744edbad8cc2c861c2c976ef721e4d", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz"], + use_category = ["dataplane"], ), com_google_cel_cpp = dict( sha256 = "326ec397b55e39f48bd5380ccded1af5b04653ee96e769cd4d694f9a3bacef50", strip_prefix = "cel-cpp-80e1cca533190d537a780ad007e8db64164c582e", # 2020-02-26 urls = ["https://github.com/google/cel-cpp/archive/80e1cca533190d537a780ad007e8db64164c582e.tar.gz"], + use_category = ["dataplane"], ), com_googlesource_code_re2 = dict( sha256 = "04ee2aaebaa5038554683329afc494e684c30f82f2a1e47eb62450e59338f84d", strip_prefix = "re2-2020-03-03", urls = ["https://github.com/google/re2/archive/2020-03-03.tar.gz"], + use_category = ["dataplane"], ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the @@ -304,30 +381,36 @@ REPOSITORY_LOCATIONS = dict( # Only allow peeking at fuzzer related files for now. strip_prefix = "compiler-rt-9.0.0.src/lib", urls = ["http://releases.llvm.org/9.0.0/compiler-rt-9.0.0.src.tar.xz"], + use_category = ["test"], ), fuzzit_linux = dict( sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774", urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"], + use_category = ["build", "test"], ), upb = dict( sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", strip_prefix = "upb-8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", # 2019-11-19 urls = ["https://github.com/protocolbuffers/upb/archive/8a3ae1ef3e3e3f26b45dec735c5776737fc7247f.tar.gz"], + use_category = ["dataplane", "controlplane"], ), kafka_source = dict( sha256 = "e7b748a62e432b5770db6dbb3b034c68c0ea212812cb51603ee7f3a8a35f06be", strip_prefix = "kafka-2.4.0/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/2.4.0.zip"], + use_category = ["dataplane"], ), kafka_server_binary = dict( sha256 = "b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee", strip_prefix = "kafka_2.12-2.4.0", urls = ["http://us.mirrors.quenda.co/apache/kafka/2.4.0/kafka_2.12-2.4.0.tgz"], + use_category = ["test"], ), kafka_python_client = dict( sha256 = "454bf3aafef9348017192417b7f0828a347ec2eaf3efba59336f3a3b68f10094", strip_prefix = "kafka-python-2.0.0", urls = ["https://github.com/dpkp/kafka-python/archive/2.0.0.tar.gz"], + use_category = ["test"], ), ) From 21f88af609ad58672ebaae4fe36fbeae7ac8e33e Mon Sep 17 00:00:00 2001 From: Koki Tomoshige <36136133+tomocy@users.noreply.github.com> Date: Fri, 1 May 2020 01:39:05 +0900 Subject: [PATCH 065/909] docs: fix typo in deprecation faq (#11010) Fix grammar error in FAQ of deprecation. Risk Level: Low Docs Changes: typo Signed-off-by: tomocy --- docs/root/faq/configuration/deprecation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/faq/configuration/deprecation.rst b/docs/root/faq/configuration/deprecation.rst index c71ee63645a8..7d503bde03c1 100644 --- a/docs/root/faq/configuration/deprecation.rst +++ b/docs/root/faq/configuration/deprecation.rst @@ -11,5 +11,5 @@ annotated in the API proto itself and explained in detail in the For the first 3 months following deprecation, use of deprecated fields will result in a logged warning and incrementing the :ref:`deprecated_feature_use ` counter. After that point, the field will be annotated as fatal-by-default and further use of the field -will will be treated as invalid configuration unless +will be treated as invalid configuration unless :ref:`runtime overrides ` are employed to re-enable use. From 35bc4fc7ed1eaceaaf5a2d413d75f0a91d973d3f Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 30 Apr 2020 12:40:41 -0400 Subject: [PATCH 066/909] test: remove unused dependency on the dynamo filter from the server_test (#11013) Risk Level: Low Testing: Unit Tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Yan Avlasov --- test/server/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/test/server/BUILD b/test/server/BUILD index cc513b6ab6ab..dba3d54d1c57 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -350,7 +350,6 @@ envoy_cc_test( "//source/common/common:version_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/buffer:config", - "//source/extensions/filters/http/dynamo:config", "//source/extensions/filters/http/grpc_http1_bridge:config", "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/http/ratelimit:config", From 3f3bfa21aa14806ee0cd80d406efd29e307728e4 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Thu, 30 Apr 2020 09:43:17 -0700 Subject: [PATCH 067/909] Fixed a test failure due to the the oder in which destructors were called. (#11001) Commit Message: Fixed a test failure due to the the order in which destructors were called. MockIsolatedStatsStore is being destroyed before ScopePrefixer, which holds a reference to it, resulting in a "virtual method called" error. Signed-off-by: Dmitri Dolguikh --- test/mocks/server/mocks.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 9d2270c32482..0992ff6520ea 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -432,8 +432,8 @@ class MockInstance : public Instance { TimeSource& timeSource() override { return time_system_; } - testing::NiceMock thread_local_; NiceMock stats_store_; + testing::NiceMock thread_local_; std::shared_ptr> dns_resolver_{ new testing::NiceMock()}; testing::NiceMock api_; From 62a4d3cc73fe7cefff2882e4c630133e0bbb7528 Mon Sep 17 00:00:00 2001 From: Misha Efimov Date: Thu, 30 Apr 2020 13:13:16 -0400 Subject: [PATCH 068/909] Reflect xDS version as a TextReadout stats. (#10969) Add SubscriptionStats::version_text field with text representation of the xDS version. Signed-off-by: Misha Efimov --- .../configuration/overview/mgmt_server.rst | 1 + docs/root/version_history/current.rst | 1 + include/envoy/config/subscription.h | 8 +-- include/envoy/stats/stats.h | 2 +- .../config/filesystem_subscription_impl.cc | 1 + .../common/config/grpc_subscription_impl.cc | 2 + .../common/config/http_subscription_impl.cc | 1 + source/common/config/utility.h | 3 +- source/common/stats/allocator_impl.cc | 5 +- source/common/stats/null_text_readout.h | 2 +- .../filesystem_subscription_impl_test.cc | 18 +++---- .../filesystem_subscription_test_harness.h | 6 +-- .../config/grpc_subscription_impl_test.cc | 38 ++++++------- .../config/http_subscription_impl_test.cc | 32 +++++------ test/common/config/subscription_impl_test.cc | 54 ++++++++++--------- .../common/config/subscription_test_harness.h | 7 ++- test/mocks/stats/mocks.h | 2 +- 17 files changed, 100 insertions(+), 83 deletions(-) diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index 3004ff329163..7661f9d513fc 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -53,4 +53,5 @@ The following statistics are generated for all subscriptions. update_rejected, Counter, Total API fetches that failed because of schema/validation errors update_time, Gauge, Timestamp of the last successful API fetch attempt as milliseconds since the epoch. Refreshed even after a trivial configuration reload that contained no configuration changes. version, Gauge, Hash of the contents from the last successful API fetch + version_text, TextReadout, The version text from the last successful API fetch control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 012f1eb07bbb..9c8cdd087788 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: applied existing buffer limits to the non-google gRPC access logs, as well as :ref:`stats ` for logged / dropped logs. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* config: added :ref:`version_text ` stat that reflects xDS version. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index d56242600758..5b041f2464e4 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -97,20 +97,22 @@ using SubscriptionPtr = std::unique_ptr; /** * Per subscription stats. @see stats_macros.h */ -#define ALL_SUBSCRIPTION_STATS(COUNTER, GAUGE) \ +#define ALL_SUBSCRIPTION_STATS(COUNTER, GAUGE, TEXT_READOUT) \ COUNTER(init_fetch_timeout) \ COUNTER(update_attempt) \ COUNTER(update_failure) \ COUNTER(update_rejected) \ COUNTER(update_success) \ GAUGE(update_time, NeverImport) \ - GAUGE(version, NeverImport) + GAUGE(version, NeverImport) \ + TEXT_READOUT(version_text) /** * Struct definition for per subscription stats. @see stats_macros.h */ struct SubscriptionStats { - ALL_SUBSCRIPTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_SUBSCRIPTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_TEXT_READOUT_STRUCT) }; } // namespace Config diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index c723152ab549..eb7677bef742 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -174,7 +174,7 @@ class TextReadout : public virtual Metric { * Sets the value of this TextReadout by moving the input |value| to minimize * buffer copies under the lock. */ - virtual void set(std::string&& value) PURE; + virtual void set(absl::string_view value) PURE; /** * @return the copy of this TextReadout value. */ diff --git a/source/common/config/filesystem_subscription_impl.cc b/source/common/config/filesystem_subscription_impl.cc index ea9b8173bac4..8a42d1b42884 100644 --- a/source/common/config/filesystem_subscription_impl.cc +++ b/source/common/config/filesystem_subscription_impl.cc @@ -54,6 +54,7 @@ void FilesystemSubscriptionImpl::refresh() { callbacks_.onConfigUpdate(message.resources(), message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(api_.timeSource())); stats_.version_.set(HashUtil::xxHash64(message.version_info())); + stats_.version_text_.set(message.version_info()); stats_.update_success_.inc(); ENVOY_LOG(debug, "Filesystem config update accepted for {}: {}", path_, message.DebugString()); } catch (const ProtobufMessage::UnknownProtoFieldException& e) { diff --git a/source/common/config/grpc_subscription_impl.cc b/source/common/config/grpc_subscription_impl.cc index 22b9a468af99..83ddcb5bccad 100644 --- a/source/common/config/grpc_subscription_impl.cc +++ b/source/common/config/grpc_subscription_impl.cc @@ -64,6 +64,7 @@ void GrpcSubscriptionImpl::onConfigUpdate( stats_.update_attempt_.inc(); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(version_info)); + stats_.version_text_.set(version_info); ENVOY_LOG(debug, "gRPC config for {} accepted with {} resources with version {}", type_url_, resources.size(), version_info); } @@ -78,6 +79,7 @@ void GrpcSubscriptionImpl::onConfigUpdate( stats_.update_success_.inc(); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(system_version_info)); + stats_.version_text_.set(system_version_info); } void GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason, diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index ab3974f9880e..223f09d6ea3f 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -89,6 +89,7 @@ void HttpSubscriptionImpl::parseResponse(const Http::ResponseMessage& response) request_.set_version_info(message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(request_.version_info())); + stats_.version_text_.set(request_.version_info()); stats_.update_success_.inc(); } catch (const EnvoyException& e) { handleFailure(Config::ConfigUpdateFailureReason::UpdateRejected, &e); diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 363e12ab1982..3c44f9d98eaf 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -190,7 +190,8 @@ class Utility { * @return SubscriptionStats for scope. */ static SubscriptionStats generateStats(Stats::Scope& scope) { - return {ALL_SUBSCRIPTION_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; + return { + ALL_SUBSCRIPTION_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_TEXT_READOUT(scope))}; } /** diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 06db3ee37f52..4f41b208e4ad 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -240,9 +240,10 @@ class TextReadoutImpl : public StatsSharedImpl { } // Stats::TextReadout - void set(std::string&& value) override { + void set(absl::string_view value) override { + std::string value_copy(value); absl::MutexLock lock(&mutex_); - value_ = std::move(value); + value_ = std::move(value_copy); } std::string value() const override { absl::MutexLock lock(&mutex_); diff --git a/source/common/stats/null_text_readout.h b/source/common/stats/null_text_readout.h index d3e9cc832e6b..da6c0976abf7 100644 --- a/source/common/stats/null_text_readout.h +++ b/source/common/stats/null_text_readout.h @@ -23,7 +23,7 @@ class NullTextReadoutImpl : public MetricImpl { MetricImpl::clear(symbol_table_); } - void set(std::string&&) override {} + void set(absl::string_view) override {} std::string value() const override { return std::string(); } // Metric diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index ddbf73b72d6c..798c29b93821 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -21,20 +21,20 @@ class FilesystemSubscriptionImplTest : public testing::Test, // Validate that the client can recover from bad JSON responses. TEST_F(FilesystemSubscriptionImplTest, BadJsonRecovery) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); updateFile(";!@#badjso n"); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that a file that is initially available results in a successful update. TEST_F(FilesystemSubscriptionImplTest, InitialFile) { updateFile("{\"versionInfo\": \"0\", \"resources\": []}", false); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that if we fail to set a watch, we get a sensible warning. @@ -57,24 +57,24 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { // rejected. TEST_F(FilesystemSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); updateFile(";!@#badjso n"); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); } // Validate that the update_time statistic is changed after a trivial configuration update // (update that resulted in no change). TEST_F(FilesystemSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index 08de45f776b1..42bf2913e4a1 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -85,12 +85,12 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { } AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, - uint32_t init_fetch_timeout, uint64_t update_time, - uint64_t version) override { + uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version, + absl::string_view version_text) override { // The first attempt always fail unless there was a file there to begin with. return SubscriptionTestHarness::statsAre(attempt, success, rejected, failure + (file_at_start_ ? 0 : 1), init_fetch_timeout, - update_time, version); + update_time, version, version_text); } void expectConfigUpdateFailed() override { stats_.update_failure_.inc(); } diff --git a/test/common/config/grpc_subscription_impl_test.cc b/test/common/config/grpc_subscription_impl_test.cc index 2036e69bf336..eb51a0d051d7 100644 --- a/test/common/config/grpc_subscription_impl_test.cc +++ b/test/common/config/grpc_subscription_impl_test.cc @@ -22,7 +22,7 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { EXPECT_CALL(random_, random()); EXPECT_CALL(*timer_, enableTimer(_, _)); subscription_->start({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); // Ensure this doesn't cause an issue by sending a request, since we don't // have a gRPC stream. subscription_->updateResourceInterest({"cluster2"}); @@ -32,14 +32,14 @@ TEST_F(GrpcSubscriptionImplTest, StreamCreationFailure) { expectSendMessage({"cluster2"}, "", true); timer_cb_(); - EXPECT_TRUE(statsAre(3, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(3, 0, 0, 1, 0, 0, 0, "")); verifyControlPlaneStats(1); } // Validate that the client can recover from a remote stream closure via retry. TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); // onConfigUpdateFailed() should not be called for gRPC stream connection failure EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) @@ -47,14 +47,14 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { EXPECT_CALL(*timer_, enableTimer(_, _)); EXPECT_CALL(random_, random()); mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); verifyControlPlaneStats(0); // Retry and succeed. EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage({"cluster0", "cluster1"}, "", true); timer_cb_(); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); } // Validate that When the management server gets multiple requests for the same version, it can @@ -62,43 +62,43 @@ TEST_F(GrpcSubscriptionImplTest, RemoteStreamClose) { TEST_F(GrpcSubscriptionImplTest, RepeatedNonce) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); // First with the initial, empty version update to "0". updateResourceInterest({"cluster2"}); - EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", false); - EXPECT_TRUE(statsAre(3, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(3, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(4, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(4, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Now with version "0" update to "1". updateResourceInterest({"cluster3"}); - EXPECT_TRUE(statsAre(5, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster3"}, "1", false); - EXPECT_TRUE(statsAre(6, 1, 2, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster3"}, "1", true); - EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(5, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); + deliverConfigUpdate({"cluster3"}, "42", false); + EXPECT_TRUE(statsAre(6, 1, 2, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); + deliverConfigUpdate({"cluster3"}, "42", true); + EXPECT_TRUE(statsAre(7, 2, 2, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } TEST_F(GrpcSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } TEST_F(GrpcSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { InSequence s; startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time and verify that a trivial update (no change) also changes the update // time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster2"}, "0", true); - EXPECT_TRUE(statsAre(2, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/http_subscription_impl_test.cc b/test/common/config/http_subscription_impl_test.cc index d79884ef1915..abda847f03c4 100644 --- a/test/common/config/http_subscription_impl_test.cc +++ b/test/common/config/http_subscription_impl_test.cc @@ -19,11 +19,11 @@ TEST_F(HttpSubscriptionImplTest, OnRequestReset) { onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) .Times(0); http_callbacks_->onFailure(http_request_, Http::AsyncClient::FailureReason::Reset); - EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 1, 0, 0, 0, "")); timerTick(); - EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 0, 0, 1, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "42", true); + EXPECT_TRUE(statsAre(3, 1, 0, 1, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } // Validate that the client can recover from bad JSON responses. @@ -38,48 +38,48 @@ TEST_F(HttpSubscriptionImplTest, BadJsonRecovery) { EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)); http_callbacks_->onSuccess(http_request_, std::move(message)); - EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 1, 0, 0, 0, 0, "")); request_in_progress_ = false; timerTick(); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } TEST_F(HttpSubscriptionImplTest, ConfigNotModified) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); timerTick(); - EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 0, 0, 0, 0, 0, "")); // accept and modify. deliverConfigUpdate({"cluster0", "cluster1"}, "0", true, true, "200"); - EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // accept and does not modify. deliverConfigUpdate({"cluster0", "cluster1"}, "0", true, false, "304"); - EXPECT_TRUE(statsAre(4, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(4, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } TEST_F(HttpSubscriptionImplTest, UpdateTimeNotChangedOnUpdateReject) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } TEST_F(HttpSubscriptionImplTest, UpdateTimeChangedOnUpdateSuccess) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); // Advance the simulated time and verify that a trivial update (no change) also changes the update // time. simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS + 1))); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS + 1, 7148434200721666028, "0")); } } // namespace diff --git a/test/common/config/subscription_impl_test.cc b/test/common/config/subscription_impl_test.cc index cbda19812c0a..d8e48bcb820c 100644 --- a/test/common/config/subscription_impl_test.cc +++ b/test/common/config/subscription_impl_test.cc @@ -55,9 +55,10 @@ class SubscriptionImplTest : public testing::TestWithParam { } AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, - uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version) { + uint32_t init_fetch_timeout, uint64_t update_time, uint64_t version, + std::string version_text) { return test_harness_->statsAre(attempt, success, rejected, failure, init_fetch_timeout, - update_time, version); + update_time, version, version_text); } void deliverConfigUpdate(const std::vector cluster_names, const std::string& version, @@ -92,57 +93,58 @@ INSTANTIATE_TEST_SUITE_P(SubscriptionImplTest, SubscriptionImplInitFetchTimeoutT // Validate basic request-response succeeds. TEST_P(SubscriptionImplTest, InitialRequestResponse) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "v25-ubuntu18-beta", true); + EXPECT_TRUE( + statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 18202868392629624077U, "v25-ubuntu18-beta")); } // Validate that multiple streamed updates succeed. TEST_P(SubscriptionImplTest, ResponseStream) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); - deliverConfigUpdate({"cluster0", "cluster1"}, "1", true); - EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "1.2.3.4", true); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 14026795738668939420U, "1.2.3.4")); + deliverConfigUpdate({"cluster0", "cluster1"}, "5_6_7", true); + EXPECT_TRUE(statsAre(3, 2, 0, 0, 0, TEST_TIME_MILLIS, 7612520132475921171U, "5_6_7")); } // Validate that the client can reject a config. TEST_P(SubscriptionImplTest, RejectConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); } // Validate that the client can reject a config and accept the same config later. TEST_P(SubscriptionImplTest, RejectAcceptConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 7148434200721666028, "0")); } // Validate that the client can reject a config and accept another config later. TEST_P(SubscriptionImplTest, RejectAcceptNextConfig) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); - EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(2, 0, 1, 0, 0, 0, 0, "")); deliverConfigUpdate({"cluster0", "cluster1"}, "1", true); - EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U)); + EXPECT_TRUE(statsAre(3, 1, 1, 0, 0, TEST_TIME_MILLIS, 13237225503670494420U, "1")); } // Validate that stream updates send a message with the updated resources. TEST_P(SubscriptionImplTest, UpdateResources) { startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); - deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); - EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); + deliverConfigUpdate({"cluster0", "cluster1"}, "42", true); + EXPECT_TRUE(statsAre(2, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); updateResourceInterest({"cluster2"}); - EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7148434200721666028)); + EXPECT_TRUE(statsAre(3, 1, 0, 0, 0, TEST_TIME_MILLIS, 7919287270473417401, "42")); } // Validate that initial fetch timer is created and calls callback on timeout @@ -153,14 +155,14 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, InitialFetchTimeout) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); if (GetParam() == SubscriptionType::Http) { expectDisableInitFetchTimeoutTimer(); } expectConfigUpdateFailed(); callInitFetchTimeoutCb(); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 1, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 1, 0, 0, "")); } // Validate that initial fetch timer is disabled on config update @@ -168,7 +170,7 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnSuccess) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); expectDisableInitFetchTimeoutTimer(); deliverConfigUpdate({"cluster0", "cluster1"}, "0", true); } @@ -178,7 +180,7 @@ TEST_P(SubscriptionImplInitFetchTimeoutTest, DisableInitTimeoutOnFail) { InSequence s; expectEnableInitFetchTimeoutTimer(std::chrono::milliseconds(1000)); startSubscription({"cluster0", "cluster1"}); - EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0)); + EXPECT_TRUE(statsAre(1, 0, 0, 0, 0, 0, 0, "")); expectDisableInitFetchTimeoutTimer(); deliverConfigUpdate({"cluster0", "cluster1"}, "0", false); } diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index e3d13e37cacc..4653a6b646bf 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -57,7 +57,8 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { virtual testing::AssertionResult statsAre(uint32_t attempt, uint32_t success, uint32_t rejected, uint32_t failure, uint32_t init_fetch_timeout, - uint64_t update_time, uint64_t version) { + uint64_t update_time, uint64_t version, + absl::string_view version_text) { // TODO(fredlas) rework update_success_ to make sense across all xDS carriers. Its value in // statsAre() calls in many tests will probably have to be changed. UNREFERENCED_PARAMETER(attempt); @@ -85,6 +86,10 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { return testing::AssertionFailure() << "version: expected " << version << ", got " << stats_.version_.value(); } + if (version_text != stats_.version_text_.value()) { + return testing::AssertionFailure() + << "version_text: expected " << version << ", got " << stats_.version_text_.value(); + } return testing::AssertionSuccess(); } diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 2530132ff53e..f5337f27aeaa 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -263,7 +263,7 @@ class MockTextReadout : public MockMetric { MockTextReadout(); ~MockTextReadout() override; - MOCK_METHOD1(set, void(std::string&& value)); + MOCK_METHOD1(set, void(absl::string_view value)); MOCK_CONST_METHOD0(used, bool()); MOCK_CONST_METHOD0(value, std::string()); From 49f6e88427cd350dba360ae6a0b930b8c84ce1e0 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Thu, 30 Apr 2020 10:30:36 -0700 Subject: [PATCH 069/909] bazel: add option for disabling debug info (#10987) When building [envoy-mobile](https://github.com/lyft/envoy-mobile) we don't particularly care about having significant debug info, but we want to build with `--compilation_mode=opt`. Previously because opt builds with `-ggdb3` by default, the produced multi-arch archive would be too large to work with rules_apple's framework zipping. Signed-off-by: Keith Smiley --- bazel/BUILD | 5 +++++ bazel/README.md | 10 ++++++++++ bazel/envoy_internal.bzl | 3 +++ 3 files changed, 18 insertions(+) diff --git a/bazel/BUILD b/bazel/BUILD index 8c2b74a4428a..8e4ce5d7c5ff 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -107,6 +107,11 @@ config_setting( values = {"compilation_mode": "dbg"}, ) +config_setting( + name = "no_debug_info", + values = {"define": "no_debug_info=1"}, +) + config_setting( name = "asan_build", values = {"define": "ENVOY_CONFIG_ASAN=1"}, diff --git a/bazel/README.md b/bazel/README.md index 60aa5c9eb915..f7318376b9d0 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -215,6 +215,16 @@ By default Clang drops some debug symbols that are required for pretty printing More information can be found [here](https://bugs.llvm.org/show_bug.cgi?id=24202). The easy solution is to set ```--copt=-fno-limit-debug-info``` on the CLI or in your .bazelrc file. +## Removing debug info + +If you don't want your debug or release binaries to contain debug info +to reduce binary size, pass `--define=no_debug_info=1` when building. +This is primarily useful when building envoy as a static library. When +building a linked envoy binary you can build the implicit `.stripped` +target from [`cc_binary`](https://docs.bazel.build/versions/master/be/c-cpp.html#cc_binary) +or pass [`--strip=always`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--strip) +instead. + # Testing Envoy with Bazel All the Envoy tests can be built and run with: diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 4406f08c4eda..1603523e575c 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -51,6 +51,9 @@ def envoy_copts(repository, test = False): repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], + }) + select({ + repository + "//bazel:no_debug_info": ["-g0"], + "//conditions:default": [], }) + select({ repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], "//conditions:default": ["-DTCMALLOC"], From c86c679dabe6813d779f32e0225abcd83a56b32f Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Thu, 30 Apr 2020 13:37:28 -0700 Subject: [PATCH 070/909] listener: in place filter chain update (#10662) Provide new listener update path which could update the filter chains without draining all the connections of the old listener. The in place filter chain update flow is an optimization of listener update. If the supportUpdateFilterChain() passes and runtime "envoy.reloadable_features.listener_in_place_filterchain_update" is not explicitly disabled, the existing connections may not be drained if the owning filter chains are not updated in the new listener config. Signed-off-by: Yuchen Dai --- docs/root/configuration/listeners/stats.rst | 20 +- docs/root/version_history/current.rst | 3 + include/envoy/network/connection_handler.h | 2 +- source/common/runtime/runtime_features.cc | 1 + source/server/connection_handler_impl.cc | 24 +- source/server/connection_handler_impl.h | 2 - source/server/filter_chain_manager_impl.h | 4 + source/server/listener_impl.cc | 308 +++++-- source/server/listener_impl.h | 51 ++ source/server/listener_manager_impl.cc | 74 +- source/server/listener_manager_impl.h | 40 +- .../integration/quic_http_integration_test.cc | 2 +- test/integration/BUILD | 11 +- test/integration/http2_integration_test.cc | 8 +- test/integration/http_integration.h | 2 +- test/integration/integration_test.cc | 6 +- test/integration/utility.h | 2 +- test/integration/xds_integration_test.cc | 343 +++++++- test/server/BUILD | 2 + test/server/listener_manager_impl_test.cc | 790 +++++++++++++++--- test/server/listener_manager_impl_test.h | 47 +- 21 files changed, 1467 insertions(+), 275 deletions(-) diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 58bc2f57e297..e9aa8f04487d 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -64,13 +64,15 @@ statistics. Any ``:`` character in the stats name is replaced with ``_``. :header: Name, Type, Description :widths: 1, 1, 2 - listener_added, Counter, Total listeners added (either via static config or LDS) - listener_modified, Counter, Total listeners modified (via LDS) - listener_removed, Counter, Total listeners removed (via LDS) - listener_stopped, Counter, Total listeners stopped - listener_create_success, Counter, Total listener objects successfully added to workers - listener_create_failure, Counter, Total failed listener object additions to workers - total_listeners_warming, Gauge, Number of currently warming listeners - total_listeners_active, Gauge, Number of currently active listeners - total_listeners_draining, Gauge, Number of currently draining listeners + listener_added, Counter, Total listeners added (either via static config or LDS). + listener_modified, Counter, Total listeners modified (via LDS). + listener_removed, Counter, Total listeners removed (via LDS). + listener_stopped, Counter, Total listeners stopped. + listener_create_success, Counter, Total listener objects successfully added to workers. + listener_create_failure, Counter, Total failed listener object additions to workers. + listener_in_place_updated, Counter, Total listener objects created to execute filter chain update path. + total_filter_chains_draining, Gauge, Number of currently draining filter chains. + total_listeners_warming, Gauge, Number of currently warming listeners. + total_listeners_active, Gauge, Number of currently active listeners. + total_listeners_draining, Gauge, Number of currently draining listeners. workers_started, Gauge, A boolean (1 if started and 0 otherwise) that indicates whether listeners have been initialized on workers. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9c8cdd087788..d2fd148d8a5a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -24,6 +24,9 @@ Changes Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and `envoy.reloadable_features.new_http2_connection_pool_behavior`. +* listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. + Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. + Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. diff --git a/include/envoy/network/connection_handler.h b/include/envoy/network/connection_handler.h index b8787df14ef8..58f672c04641 100644 --- a/include/envoy/network/connection_handler.h +++ b/include/envoy/network/connection_handler.h @@ -159,4 +159,4 @@ class ActiveUdpListenerFactory { using ActiveUdpListenerFactoryPtr = std::unique_ptr; } // namespace Network -} // namespace Envoy +} // namespace Envoy \ No newline at end of file diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 6b853a9dfc7b..4725229e9b39 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -62,6 +62,7 @@ constexpr const char* runtime_features[] = { "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", + "envoy.reloadable_features.listener_in_place_filterchain_update", }; // This is a section for officially sanctioned runtime features which are too diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 2547ede9f35d..060e940ec1f7 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -67,9 +67,7 @@ void ConnectionHandlerImpl::removeListeners(uint64_t listener_tag) { void ConnectionHandlerImpl::removeFilterChains( uint64_t listener_tag, const std::list& filter_chains, std::function completion) { - // TODO(lambdai): Merge the optimistic path and the pessimistic path. for (auto& listener : listeners_) { - // Optimistic path: The listener tag provided by arg is not stale. if (listener.second.listener_->listenerTag() == listener_tag) { listener.second.tcp_listener_->get().deferredRemoveFilterChains(filter_chains); // Completion is deferred because the above removeFilterChains() may defer delete connection. @@ -77,17 +75,7 @@ void ConnectionHandlerImpl::removeFilterChains( return; } } - // Fallback to iterate over all listeners. The reason is that the target listener might have began - // another update and the previous tag is lost. - // TODO(lambdai): Remove this once we decide to use the same listener tag during intelligent - // update. - for (auto& listener : listeners_) { - if (listener.second.tcp_listener_.has_value()) { - listener.second.tcp_listener_->get().deferredRemoveFilterChains(filter_chains); - } - } - // Completion is deferred because the above removeFilterChains() may defer delete connection. - Event::DeferredTaskUtil::deferredRun(dispatcher_, std::move(completion)); + NOT_REACHED_GCOVR_EXCL_LINE; } void ConnectionHandlerImpl::stopListeners(uint64_t listener_tag) { @@ -399,7 +387,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( std::move(socket), std::move(transport_socket), *stream_info); ActiveTcpConnectionPtr active_connection( new ActiveTcpConnection(active_connections, std::move(server_conn_ptr), - parent_.dispatcher_.timeSource(), *config_, std::move(stream_info))); + parent_.dispatcher_.timeSource(), std::move(stream_info))); active_connection->connection_->setBufferLimits(config_->perConnectionBufferLimitBytes()); const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain( @@ -498,13 +486,11 @@ ConnectionHandlerImpl::ActiveConnections::~ActiveConnections() { ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, - TimeSource& time_source, Network::ListenerConfig& config, - std::unique_ptr&& stream_info) + TimeSource& time_source, std::unique_ptr&& stream_info) : stream_info_(std::move(stream_info)), active_connections_(active_connections), connection_(std::move(new_connection)), conn_length_(new Stats::HistogramCompletableTimespanImpl( - active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)), - config_(config) { + active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) { // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); @@ -521,7 +507,7 @@ ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( } ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { - emitLogs(config_, *stream_info_); + emitLogs(*active_connections_.listener_.config_, *stream_info_); active_connections_.listener_.stats_.downstream_cx_active_.dec(); active_connections_.listener_.stats_.downstream_cx_destroy_.inc(); diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index c65ddf397da3..821e628995c9 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -199,7 +199,6 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, public Network::ConnectionCallbacks { ActiveTcpConnection(ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, TimeSource& time_system, - Network::ListenerConfig& config, std::unique_ptr&& stream_info); ~ActiveTcpConnection() override; @@ -218,7 +217,6 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveConnections& active_connections_; Network::ConnectionPtr connection_; Stats::TimespanPtr conn_length_; - Network::ListenerConfig& config_; }; /** diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 681876a5cb1e..4acdc52470c0 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -193,6 +193,10 @@ class FilterChainManagerImpl : public Network::FilterChainManager, FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator); static bool isWildcardServerName(const std::string& name); + // Return the current view of filter chains, keyed by filter chain message. Used by the owning + // listener to calculate the intersection of filter chains with another listener. + const FcContextMap& filterChainsByMessage() const { return fc_contexts_; } + private: void convertIPsToTries(); using SourcePortsMap = absl::flat_hash_map; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 149d8d1fba21..4b36fc4a20df 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -16,6 +16,7 @@ #include "common/network/socket_option_factory.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" #include "server/configuration_impl.h" #include "server/drain_manager_impl.h" @@ -30,6 +31,26 @@ namespace Envoy { namespace Server { +namespace { +bool needTlsInspector(const envoy::config::listener::v3::Listener& config) { + return std::any_of(config.filter_chains().begin(), config.filter_chains().end(), + [](const auto& filter_chain) { + const auto& matcher = filter_chain.filter_chain_match(); + return matcher.transport_protocol() == "tls" || + (matcher.transport_protocol().empty() && + (!matcher.server_names().empty() || + !matcher.application_protocols().empty())); + }) && + !std::any_of( + config.listener_filters().begin(), config.listener_filters().end(), + [](const auto& filter) { + return filter.name() == + Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector || + filter.name() == "envoy.listener.tls_inspector"; + }); +} +} // namespace + ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, Network::Address::SocketType socket_type, @@ -232,30 +253,87 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, listener_init_target_.ready(); } }) { - Network::Address::SocketType socket_type = - Network::Utility::protobufAddressSocketType(config.address()); - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, transparent, false)) { - addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); - } - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, freebind, false)) { - addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions()); + buildAccessLog(); + auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); + buildListenSocketOptions(socket_type); + buildUdpListenerFactory(socket_type, concurrency); + createListenerFilterFactories(socket_type); + validateFilterChains(socket_type); + buildFilterChains(); + if (socket_type == Network::Address::SocketType::Datagram) { + return; } - if (config.reuse_port()) { - addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions()); - } else if (socket_type == Network::Address::SocketType::Datagram && concurrency > 1) { - ENVOY_LOG(warn, "Listening on UDP without SO_REUSEPORT socket option may result to unstable " - "packet proxying. Consider configuring the reuse_port listener option."); + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + buildTlsInspectorListenerFilter(); + if (!workers_started_) { + // Initialize dynamic_init_manager_ from Server's init manager if it's not initialized. + // NOTE: listener_init_target_ should be added to parent's initManager at the end of the + // listener constructor so that this listener's children entities could register their targets + // with their parent's initManager. + parent_.server_.initManager().add(listener_init_target_); } - if (!config.socket_options().empty()) { - addListenSocketOptions( - Network::SocketOptionFactory::buildLiteralOptions(config.socket_options())); +} + +ListenerImpl::ListenerImpl(const ListenerImpl& origin, + const envoy::config::listener::v3::Listener& config, + const std::string& version_info, ListenerManagerImpl& parent, + const std::string& name, bool added_via_api, bool workers_started, + uint64_t hash, uint32_t concurrency) + : parent_(parent), address_(origin.address_), + bind_to_port_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.deprecated_v1(), bind_to_port, true)), + hand_off_restored_destination_connections_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)), + per_connection_buffer_limit_bytes_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), + listener_tag_(origin.listener_tag_), name_(name), added_via_api_(added_via_api), + workers_started_(workers_started), hash_(hash), + validation_visitor_( + added_via_api_ ? parent_.server_.messageValidationContext().dynamicValidationVisitor() + : parent_.server_.messageValidationContext().staticValidationVisitor()), + // listener_init_target_ is not used during in place update because we expect server started. + listener_init_target_("", nullptr), + dynamic_init_manager_(std::make_unique( + fmt::format("Listener-local-init-manager {} {}", name, hash))), + config_(config), version_info_(version_info), + listener_filters_timeout_( + PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), + continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + listener_factory_context_(std::make_shared( + origin.listener_factory_context_->listener_factory_context_base_, this, *this)), + filter_chain_manager_(address_, origin.listener_factory_context_->parentFactoryContext(), + initManager(), origin.filter_chain_manager_), + local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { + ASSERT(workers_started_); + parent_.inPlaceFilterChainUpdate(*this); + }) { + buildAccessLog(); + auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); + buildListenSocketOptions(socket_type); + buildUdpListenerFactory(socket_type, concurrency); + createListenerFilterFactories(socket_type); + validateFilterChains(socket_type); + buildFilterChains(); + // In place update is tcp only so it's safe to apply below tcp only initialization. + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + buildTlsInspectorListenerFilter(); +} + +void ListenerImpl::buildAccessLog() { + for (const auto& access_log : config_.access_log()) { + AccessLog::InstanceSharedPtr current_access_log = + AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_); + access_logs_.push_back(current_access_log); } +} + +void ListenerImpl::buildUdpListenerFactory(Network::Address::SocketType socket_type, + uint32_t concurrency) { if (socket_type == Network::Address::SocketType::Datagram) { - // Needed for recvmsg to return destination address in IP header. - addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); - // Needed to return receive buffer overflown indicator. - addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); - auto udp_config = config.udp_listener_config(); + auto udp_config = config_.udp_listener_config(); if (udp_config.udp_listener_name().empty()) { udp_config.set_udp_listener_name(UdpListenerNames::get().RawUdp); } @@ -265,44 +343,68 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(udp_config, validation_visitor_, config_factory); udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message, concurrency); + if (!config_.reuse_port() && concurrency > 1) { + ENVOY_LOG(warn, "Listening on UDP without SO_REUSEPORT socket option may result to unstable " + "packet proxying. Consider configuring the reuse_port listener option."); + } } +} - if (!config.listener_filters().empty()) { +void ListenerImpl::buildListenSocketOptions(Network::Address::SocketType socket_type) { + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, transparent, false)) { + addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); + } + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, freebind, false)) { + addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions()); + } + if (config_.reuse_port()) { + addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions()); + } + if (!config_.socket_options().empty()) { + addListenSocketOptions( + Network::SocketOptionFactory::buildLiteralOptions(config_.socket_options())); + } + if (socket_type == Network::Address::SocketType::Datagram) { + // Needed for recvmsg to return destination address in IP header. + addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); + // Needed to return receive buffer overflown indicator. + addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + } +} + +void ListenerImpl::createListenerFilterFactories(Network::Address::SocketType socket_type) { + if (!config_.listener_filters().empty()) { switch (socket_type) { case Network::Address::SocketType::Datagram: - if (config.listener_filters().size() > 1) { - // Currently supports only 1 UDP listener - throw EnvoyException( - fmt::format("error adding listener '{}': Only 1 UDP filter per listener supported", - address_->asString())); + if (config_.listener_filters().size() > 1) { + // Currently supports only 1 UDP listener filter. + throw EnvoyException(fmt::format( + "error adding listener '{}': Only 1 UDP listener filter per listener supported", + address_->asString())); } udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList( - config.listener_filters(), *listener_factory_context_); + config_.listener_filters(), *listener_factory_context_); break; case Network::Address::SocketType::Stream: listener_filter_factories_ = parent_.factory_.createListenerFilterFactoryList( - config.listener_filters(), *listener_factory_context_); + config_.listener_filters(), *listener_factory_context_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; } } +} - for (const auto& access_log : config.access_log()) { - AccessLog::InstanceSharedPtr current_access_log = - AccessLog::AccessLogFactory::fromProto(access_log, *listener_factory_context_); - access_logs_.push_back(current_access_log); - } - - if (config.filter_chains().empty() && (socket_type == Network::Address::SocketType::Stream || - !udp_listener_factory_->isTransportConnectionless())) { +void ListenerImpl::validateFilterChains(Network::Address::SocketType socket_type) { + if (config_.filter_chains().empty() && (socket_type == Network::Address::SocketType::Stream || + !udp_listener_factory_->isTransportConnectionless())) { // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there // is a filter chain specified throw EnvoyException(fmt::format("error adding listener '{}': no filter chains specified", address_->asString())); } else if (udp_listener_factory_ != nullptr && !udp_listener_factory_->isTransportConnectionless()) { - for (auto& filter_chain : config.filter_chains()) { + for (auto& filter_chain : config_.filter_chains()) { // Early fail if any filter chain doesn't have transport socket configured. if (!filter_chain.has_transport_socket()) { throw EnvoyException(fmt::format("error adding listener '{}': no transport socket " @@ -311,7 +413,9 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, } } } +} +void ListenerImpl::buildFilterChains() { Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context( parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), @@ -322,28 +426,28 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, // network filter chain update. // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context); - filter_chain_manager_.addFilterChain(config.filter_chains(), builder, filter_chain_manager_); - - if (socket_type == Network::Address::SocketType::Datagram) { - return; - } + filter_chain_manager_.addFilterChain(config_.filter_chains(), builder, filter_chain_manager_); +} +void ListenerImpl::buildSocketOptions() { // TCP specific setup. - if (config.has_connection_balance_config()) { + if (config_.has_connection_balance_config()) { // Currently exact balance is the only supported type and there are no options. - ASSERT(config.connection_balance_config().has_exact_balance()); + ASSERT(config_.connection_balance_config().has_exact_balance()); connection_balancer_ = std::make_unique(); } else { connection_balancer_ = std::make_unique(); } - if (config.has_tcp_fast_open_queue_length()) { + if (config_.has_tcp_fast_open_queue_length()) { addListenSocketOptions(Network::SocketOptionFactory::buildTcpFastOpenOptions( - config.tcp_fast_open_queue_length().value())); + config_.tcp_fast_open_queue_length().value())); } +} +void ListenerImpl::buildOriginalDstListenerFilter() { // Add original dst listener filter if 'use_original_dst' flag is set. - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, hidden_envoy_deprecated_use_original_dst, false)) { + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, hidden_envoy_deprecated_use_original_dst, false)) { auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().OriginalDst); @@ -352,11 +456,14 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Envoy::ProtobufWkt::Empty(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } +} + +void ListenerImpl::buildProxyProtocolListenerFilter() { // Add proxy protocol listener filter if 'use_proxy_proto' flag is set. // TODO(jrajahalme): This is the last listener filter on purpose. When filter chain matching // is implemented, this needs to be run after the filter chain has been // selected. - if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.filter_chains()[0], use_proxy_proto, false)) { + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false)) { auto& factory = Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().ProxyProtocol); @@ -364,24 +471,11 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Envoy::ProtobufWkt::Empty(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } +} +void ListenerImpl::buildTlsInspectorListenerFilter() { // TODO(zuercher) remove the deprecated TLS inspector name when the deprecated names are removed. - const bool need_tls_inspector = - std::any_of( - config.filter_chains().begin(), config.filter_chains().end(), - [](const auto& filter_chain) { - const auto& matcher = filter_chain.filter_chain_match(); - return matcher.transport_protocol() == "tls" || - (matcher.transport_protocol().empty() && - (!matcher.server_names().empty() || !matcher.application_protocols().empty())); - }) && - !std::any_of( - config.listener_filters().begin(), config.listener_filters().end(), - [](const auto& filter) { - return filter.name() == - Extensions::ListenerFilters::ListenerFilterNames::get().TlsInspector || - filter.name() == "envoy.listener.tls_inspector"; - }); + const bool need_tls_inspector = needTlsInspector(config_); // Automatically inject TLS Inspector if it wasn't configured explicitly and it's needed. if (need_tls_inspector) { const std::string message = @@ -398,14 +492,6 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, Envoy::ProtobufWkt::Empty(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } - - if (!workers_started_) { - // Initialize dynamic_init_manager_ from Server's init manager if it's not initialized. - // NOTE: listener_init_target_ should be added to parent's initManager at the end of the - // listener constructor so that this listener's children entities could register their targets - // with their parent's initManager. - parent_.server_.initManager().add(listener_init_target_); - } } AccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() { @@ -515,8 +601,8 @@ void ListenerImpl::initialize() { // by resetting the watcher. if (workers_started_) { ENVOY_LOG_MISC(debug, "Initialize listener {} local-init-manager.", name_); - // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener manager - // directly. + // If workers_started_ is true, dynamic_init_manager_ should be initialized by listener + // manager directly. dynamic_init_manager_->initialize(local_init_watcher_); } } @@ -536,5 +622,79 @@ void ListenerImpl::setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory_ = socket_factory; } +bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config, + bool worker_started) { + if (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.listener_in_place_filterchain_update")) { + return false; + } + + // The in place update needs the active listener in worker thread. worker_started guarantees the + // existence of that active listener. + if (!worker_started) { + return false; + } + + // Currently we only support TCP filter chain update. + if (Network::Utility::protobufAddressSocketType(config_.address()) != + Network::Address::SocketType::Stream || + Network::Utility::protobufAddressSocketType(config.address()) != + Network::Address::SocketType::Stream) { + return false; + } + + // Full listener update currently rejects tcp listener having 0 filter chain. + // In place filter chain update could survive under zero filter chain but we should keep the same + // behavior for now. This also guards the below filter chain access. + if (config.filter_chains_size() == 0) { + return false; + } + + // See buildProxyProtocolListenerFilter(). Full listener update guarantees at least 1 filter chain + // at tcp listener. + if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_.filter_chains()[0], use_proxy_proto, false) ^ + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.filter_chains()[0], use_proxy_proto, false)) { + return false; + } + + // See buildTlsInspectorListenerFilter(). + if (needTlsInspector(config_) ^ needTlsInspector(config)) { + return false; + } + return ListenerMessageUtil::filterChainOnlyChange(config_, config); +} + +ListenerImplPtr +ListenerImpl::newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, + bool workers_started, uint64_t hash) { + // Use WrapUnique since the constructor is private. + return absl::WrapUnique( + new ListenerImpl(*this, config, version_info_, parent_, name_, added_via_api_, + /* new new workers started state */ workers_started, + /* use new hash */ hash, parent_.server_.options().concurrency())); +} + +void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, + std::function callback) { + for (const auto& message_and_filter_chain : filter_chain_manager_.filterChainsByMessage()) { + if (another_listener.filter_chain_manager_.filterChainsByMessage().find( + message_and_filter_chain.first) == + another_listener.filter_chain_manager_.filterChainsByMessage().end()) { + // The filter chain exists in `this` listener but not in the listener passed in. + callback(*message_and_filter_chain.second); + } + } +} + +bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, + const envoy::config::listener::v3::Listener& rhs) { + Protobuf::util::MessageDifferencer differencer; + differencer.set_message_field_comparison(Protobuf::util::MessageDifferencer::EQUIVALENT); + differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET); + differencer.IgnoreField( + envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName("filter_chains")); + return differencer.Compare(lhs, rhs); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 53e27b02024b..2ad37379e957 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -24,6 +24,15 @@ namespace Envoy { namespace Server { +class ListenerMessageUtil { +public: + /** + * @return true if listener message lhs and rhs are the same if ignoring filter_chains field. + */ + static bool filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, + const envoy::config::listener::v3::Listener& rhs); +}; + class ListenerManagerImpl; class ListenSocketFactoryImpl : public Network::ListenSocketFactory, @@ -218,6 +227,28 @@ class ListenerImpl final : public Network::ListenerConfig, bool workers_started, uint64_t hash, uint32_t concurrency); ~ListenerImpl() override; + // TODO(lambdai): Explore using the same ListenerImpl object to execute in place filter chain + // update. + /** + * Execute in place filter chain update. The filter chain update is less expensive than full + * listener update because connections may not need to be drained. + */ + std::unique_ptr + newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, + bool workers_started, uint64_t hash); + /** + * Determine if in place filter chain update could be executed at this moment. + */ + bool supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config, + bool worker_started); + + /** + * Run the callback on each filter chain that exists in this listener but not in the passed + * listener config. + */ + void diffFilterChain(const ListenerImpl& another_listener, + std::function callback); + /** * Helper functions to determine whether a listener is blocked for update or remove. */ @@ -296,6 +327,26 @@ class ListenerImpl final : public Network::ListenerConfig, SystemTime last_updated_; private: + /** + * Create a new listener from an existing listener and the new config message if the in place + * filter chain update is decided. Should be called only by newListenerWithFilterChain(). + */ + ListenerImpl(const ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, + const std::string& version_info, ListenerManagerImpl& parent, + const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, + uint32_t concurrency); + // Helpers for constructor. + void buildAccessLog(); + void buildUdpListenerFactory(Network::Address::SocketType socket_type, uint32_t concurrency); + void buildListenSocketOptions(Network::Address::SocketType socket_type); + void createListenerFilterFactories(Network::Address::SocketType socket_type); + void validateFilterChains(Network::Address::SocketType socket_type); + void buildFilterChains(); + void buildSocketOptions(); + void buildOriginalDstListenerFilter(); + void buildProxyProtocolListenerFilter(); + void buildTlsInspectorListenerFilter(); + void addListenSocketOption(const Network::Socket::OptionConstSharedPtr& option) { ensureSocketOptions(); listen_socket_options_->emplace_back(std::move(option)); diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index d6c8de15d828..c81fd1da6225 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -403,9 +403,23 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( return false; } - ListenerImplPtr new_listener(new ListenerImpl(config, version_info, *this, name, added_via_api, - workers_started_, hash, - server_.options().concurrency())); + ListenerImplPtr new_listener = nullptr; + + // In place filter chain update depends on the active listener at worker. + if (existing_active_listener != active_listeners_.end() && + (*existing_active_listener)->supportUpdateFilterChain(config, workers_started_)) { + ENVOY_LOG(debug, "use in place update filter chain update path for listener name={} hash={}", + name, hash); + new_listener = + (*existing_active_listener)->newListenerWithFilterChain(config, workers_started_, hash); + stats_.listener_in_place_updated_.inc(); + } else { + ENVOY_LOG(debug, "use full listener update path for listener name={} hash={}", name, hash); + new_listener = + std::make_unique(config, version_info, *this, name, added_via_api, + workers_started_, hash, server_.options().concurrency()); + } + ListenerImpl& new_listener_ref = *new_listener; // We mandate that a listener with the same name must have the same configured address. This @@ -694,20 +708,47 @@ void ListenerManagerImpl::onListenerWarmed(ListenerImpl& listener) { updateWarmingActiveGauges(); } -void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& listener) { +void ListenerManagerImpl::inPlaceFilterChainUpdate(ListenerImpl& listener) { + auto existing_active_listener = getListenerByName(active_listeners_, listener.name()); + auto existing_warming_listener = getListenerByName(warming_listeners_, listener.name()); + ASSERT(existing_warming_listener != warming_listeners_.end()); + ASSERT(*existing_warming_listener != nullptr); + + (*existing_warming_listener)->debugLog("execute in place filter chain update"); + + // Now that in place filter chain update was decided, the replaced listener must be in active + // list. It requires stop/remove listener procedure cancelling the in placed update if any. + ASSERT(existing_active_listener != active_listeners_.end()); + ASSERT(*existing_active_listener != nullptr); + + for (const auto& worker : workers_) { + // Explicitly override the existing listener with a new listener config. + addListenerToWorker(*worker, listener.listenerTag(), listener, nullptr); + } + + auto previous_listener = std::move(*existing_active_listener); + *existing_active_listener = std::move(*existing_warming_listener); + // Finish active_listeners_ transformation before calling `drainFilterChains` as it depends on + // their state. + drainFilterChains(std::move(previous_listener), **existing_active_listener); + + warming_listeners_.erase(existing_warming_listener); + updateWarmingActiveGauges(); +} + +void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& draining_listener, + ListenerImpl& new_listener) { // First add the listener to the draining list. std::list::iterator draining_group = draining_filter_chains_manager_.emplace(draining_filter_chains_manager_.begin(), - std::move(listener), workers_.size()); - int filter_chain_size = draining_group->getDrainingFilterChains().size(); - - // Using set() avoids a multiple modifiers problem during the multiple processes phase of hot - // restart. Same below inside the lambda. - // TODO(lambdai): Currently the number of DrainFilterChains objects are tracked: - // len(filter_chains). What we really need is accumulate(filter_chains, filter_chains: - // len(filter_chains)) - stats_.total_filter_chains_draining_.set(draining_filter_chains_manager_.size()); - + std::move(draining_listener), workers_.size()); + draining_group->getDrainingListener().diffFilterChain( + new_listener, [&draining_group](Network::DrainableFilterChain& filter_chain) mutable { + filter_chain.startDraining(); + draining_group->addFilterChainToDrain(filter_chain); + }); + auto filter_chain_size = draining_group->numDrainingFilterChains(); + stats_.total_filter_chains_draining_.add(filter_chain_size); draining_group->getDrainingListener().debugLog( absl::StrCat("draining ", filter_chain_size, " filter chains in listener ", draining_group->getDrainingListener().name())); @@ -733,15 +774,14 @@ void ListenerManagerImpl::drainFilterChains(ListenerImplPtr&& listener) { draining_group->getDrainingListener().debugLog( absl::StrCat("draining filter chains from listener ", draining_group->getDrainingListener().name(), " complete")); + stats_.total_filter_chains_draining_.sub( + draining_group->numDrainingFilterChains()); draining_filter_chains_manager_.erase(draining_group); - stats_.total_filter_chains_draining_.set( - draining_filter_chains_manager_.size()); } }); }); } }); - updateWarmingActiveGauges(); } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index cfa2603322c7..fa278a2d3806 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -112,6 +112,7 @@ using ListenerImplPtr = std::unique_ptr; COUNTER(listener_added) \ COUNTER(listener_create_failure) \ COUNTER(listener_create_success) \ + COUNTER(listener_in_place_updated) \ COUNTER(listener_modified) \ COUNTER(listener_removed) \ COUNTER(listener_stopped) \ @@ -152,6 +153,12 @@ class DrainingFilterChainsManager { drain_timer_->enableTimer(drain_time); } + void addFilterChainToDrain(const Network::FilterChain& filter_chain) { + draining_filter_chains_.push_back(&filter_chain); + } + + uint32_t numDrainingFilterChains() const { return draining_filter_chains_.size(); } + private: ListenerImplPtr draining_listener_; std::list draining_filter_chains_; @@ -170,6 +177,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable overridden_listener, - ListenerImpl& listener, - std::function completion_callback) { - addListenerToWorker(worker, overridden_listener, listener, completion_callback); - } - // Erase the the listener draining filter chain from active listeners and then start the drain - // sequence. - void drainFilterChainsForTest(ListenerImpl* listener_raw_ptr) { - auto iter = std::find_if(active_listeners_.begin(), active_listeners_.end(), - [listener_raw_ptr](const ListenerImplPtr& ptr) { - return ptr != nullptr && ptr.get() == listener_raw_ptr; - }); - ASSERT(iter != active_listeners_.end()); - - ListenerImplPtr listener_impl_ptr = std::move(*iter); - active_listeners_.erase(iter); - drainFilterChains(std::move(listener_impl_ptr)); - } - Instance& server_; ListenerComponentFactory& factory_; @@ -264,11 +251,14 @@ class ListenerManagerImpl : public ListenerManager, Logger::LoggablewaitForEndStream(); // The delayed close timeout should trigger since client is not closing the connection. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(5000))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } diff --git a/test/integration/BUILD b/test/integration/BUILD index 912e82a9627c..f145741f4f64 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1015,11 +1015,20 @@ envoy_cc_test( envoy_cc_test( name = "xds_integration_test", srcs = ["xds_integration_test.cc"], - data = ["//test/config/integration:server_xds_files"], + data = [ + "//test/config/integration:server_xds_files", + "//test/config/integration/certs", + ], tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", + "//source/extensions/filters/listener/tls_inspector:config", + "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", + "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/transport_sockets/tls:config", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//source/extensions/transport_sockets/tls:context_lib", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 2ca27de3d0c3..e205d512baa8 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1140,10 +1140,10 @@ TEST_P(Http2IntegrationTest, DelayedCloseAfterBadFrame) { // Envoy server), it's possible the delayed close timer could fire and close the server socket // prior to the data callback above firing. Therefore, we may either still be connected, or have // received a remote close. - if (connection.last_connection_event() == Network::ConnectionEvent::Connected) { + if (connection.lastConnectionEvent() == Network::ConnectionEvent::Connected) { connection.run(); } - EXPECT_EQ(connection.last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection.lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } @@ -1169,10 +1169,10 @@ TEST_P(Http2IntegrationTest, DelayedCloseDisabled) { // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the // Envoy server), it's possible for the 'connection' to receive the data and exit the dispatcher // prior to the FIN being received from the server. - if (connection.last_connection_event() == Network::ConnectionEvent::Connected) { + if (connection.lastConnectionEvent() == Network::ConnectionEvent::Connected) { connection.run(); } - EXPECT_EQ(connection.last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection.lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 0); } diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 99f2c8850521..359fe1bc38d3 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -40,7 +40,7 @@ class IntegrationCodecClient : public Http::CodecClientProd { startRequest(const Http::RequestHeaderMap& headers); bool waitForDisconnect(std::chrono::milliseconds time_to_wait = std::chrono::milliseconds(0)); Network::ClientConnection* connection() const { return connection_.get(); } - Network::ConnectionEvent last_connection_event() const { return last_connection_event_; } + Network::ConnectionEvent lastConnectionEvent() const { return last_connection_event_; } Network::Connection& rawConnection() { return *connection_; } bool disconnected() { return disconnected_; } diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 3cddecf87bfd..e16d01583c71 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -993,7 +993,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { // Issue a local close and check that the client did not pick up a remote close which can happen // when delayed close semantics are disabled. codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::LocalClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::LocalClose); } // Test configuration of the delayed close timeout on downstream HTTP/1.1 connections. A value of 0 @@ -1030,7 +1030,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownConfig) { // Therefore, avoid checking response code/payload here and instead simply look for the remote // close. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); } // Test that delay closed connections are eventually force closed when the timeout triggers. @@ -1064,7 +1064,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { response->waitForEndStream(); // The delayed close timeout should trigger since client is not closing the connection. EXPECT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(2000))); - EXPECT_EQ(codec_client_->last_connection_event(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(codec_client_->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } diff --git a/test/integration/utility.h b/test/integration/utility.h index 707acf5dd0aa..c34c12ff2a51 100644 --- a/test/integration/utility.h +++ b/test/integration/utility.h @@ -70,7 +70,7 @@ class RawConnectionDriver { bool connecting() { return callbacks_->connecting_; } void run(Event::Dispatcher::RunType run_type = Event::Dispatcher::RunType::Block); void close(); - Network::ConnectionEvent last_connection_event() const { + Network::ConnectionEvent lastConnectionEvent() const { return callbacks_->last_connection_event_; } diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 9442b682c7a3..3acc2ab486a4 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -3,6 +3,7 @@ #include "test/integration/http_integration.h" #include "test/integration/http_protocol_integration.h" +#include "test/integration/ssl_utility.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -72,6 +73,346 @@ TEST_P(XdsIntegrationTestTypedStruct, RouterRequestAndResponseWithBodyNoBuffer) testRouterRequestAndResponseWithBody(1024, 512, false); } +// TODO(lambdai): Extend RawConnectionDriver with SSL and delete this one. +class SslClient { +public: + SslClient(Network::ClientConnectionPtr ssl_conn, Event::Dispatcher& dispatcher) + : ssl_conn_(std::move(ssl_conn)), + payload_reader_(std::make_shared(dispatcher)) { + ssl_conn_->addConnectionCallbacks(connect_callbacks_); + ssl_conn_->addReadFilter(payload_reader_); + ssl_conn_->connect(); + while (!connect_callbacks_.connected()) { + dispatcher.run(Event::Dispatcher::RunType::NonBlock); + } + } + Network::ClientConnectionPtr ssl_conn_; + MockWatermarkBuffer* client_write_buffer; + std::shared_ptr payload_reader_; + ConnectionStatusCallbacks connect_callbacks_; +}; + +class LdsInplaceUpdateTcpProxyIntegrationTest + : public testing::TestWithParam, + public BaseIntegrationTest { +public: + LdsInplaceUpdateTcpProxyIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::baseConfig() + R"EOF( + filter_chains: + - filter_chain_match: + application_protocols: ["alpn0"] + filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + stat_prefix: tcp_stats + cluster: cluster_0 + - filter_chain_match: + application_protocols: ["alpn1"] + filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + stat_prefix: tcp_stats + cluster: cluster_1 +)EOF") {} + + void initialize() override { + config_helper_.renameListener("tcp"); + std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); + config_helper_.addListenerFilter(tls_inspector_config); + + config_helper_.addSslConfig(); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + auto* filter_chain_1 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(1); + filter_chain_1->mutable_transport_socket()->MergeFrom( + *filter_chain_0->mutable_transport_socket()); + + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + }); + + BaseIntegrationTest::initialize(); + + context_manager_ = + std::make_unique(timeSystem()); + context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); + } + + std::unique_ptr connect(const std::string& alpn) { + Network::Address::InstanceConstSharedPtr address = + Ssl::getSslAddress(version_, lookupPort("tcp")); + auto ssl_conn = dispatcher_->createClientConnection( + address, Network::Address::InstanceConstSharedPtr(), + context_->createTransportSocket(std::make_shared( + absl::string_view(""), std::vector(), std::vector{alpn})), + nullptr); + return std::make_unique(std::move(ssl_conn), *dispatcher_); + } + std::unique_ptr context_manager_; + Network::TransportSocketFactoryPtr context_; + testing::NiceMock secret_manager_; +}; + +// Verify that tcp connection 1 is closed while client 0 survives when deleting filter chain 1. +TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) { + setUpstreamCount(2); + initialize(); + + auto client_0 = connect("alpn0"); + FakeRawConnectionPtr fake_upstream_connection_0; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); + + auto client_1 = connect("alpn1"); + FakeRawConnectionPtr fake_upstream_connection_1; + ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_1)); + + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + + while (!client_1->connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + Buffer::OwnedImpl buffer("hello"); + client_0->ssl_conn_->write(buffer, false); + client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); + std::string observed_data_0; + ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); + EXPECT_EQ("hello", observed_data_0); + + ASSERT_TRUE(fake_upstream_connection_0->write("world")); + client_0->payload_reader_->set_data_to_wait_for("world"); + client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); + client_0->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); + + while (!client_0->connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } +} + +// Verify that tcp connection of filter chain 0 survives if new listener config adds new filter +// chain 2. +TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) { + setUpstreamCount(2); + initialize(); + + auto client_0 = connect("alpn0"); + FakeRawConnectionPtr fake_upstream_connection_0; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); + + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + auto client_2 = connect("alpn2"); + FakeRawConnectionPtr fake_upstream_connection_2; + ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2)); + + Buffer::OwnedImpl buffer_2("hello"); + client_2->ssl_conn_->write(buffer_2, false); + client_2->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); + std::string observed_data_2; + ASSERT_TRUE(fake_upstream_connection_2->waitForData(5, &observed_data_2)); + EXPECT_EQ("hello", observed_data_2); + + ASSERT_TRUE(fake_upstream_connection_2->write("world")); + client_2->payload_reader_->set_data_to_wait_for("world"); + client_2->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); + client_2->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); + + while (!client_2->connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + Buffer::OwnedImpl buffer_0("hello"); + client_0->ssl_conn_->write(buffer_0, false); + client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); + std::string observed_data_0; + ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); + EXPECT_EQ("hello", observed_data_0); + + ASSERT_TRUE(fake_upstream_connection_0->write("world")); + client_0->payload_reader_->set_data_to_wait_for("world"); + client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); + client_0->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); + + while (!client_0->connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } +} + +class LdsInplaceUpdateHttpIntegrationTest + : public testing::TestWithParam, + public HttpIntegrationTest { +public: + LdsInplaceUpdateHttpIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + + void initialize() override { + autonomous_upstream_ = true; + setUpstreamCount(2); + + config_helper_.renameListener("http"); + std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); + config_helper_.addListenerFilter(tls_inspector_config); + config_helper_.addSslConfig(); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + *filter_chain_0->mutable_filter_chain_match()->mutable_application_protocols()->Add() = + "alpn0"; + auto* filter_chain_1 = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_filter_chains() + ->Add(); + filter_chain_1->MergeFrom(*filter_chain_0); + + // filter chain 1 + // alpn1, route to cluster_1 + *filter_chain_1->mutable_filter_chain_match()->mutable_application_protocols(0) = "alpn1"; + + auto* config_blob = filter_chain_1->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE(config_blob->Is()); + auto hcm_config = MessageUtil::anyConvert< + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( + *config_blob); + hcm_config.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster("cluster_1"); + config_blob->PackFrom(hcm_config); + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + }); + + BaseIntegrationTest::initialize(); + + context_manager_ = + std::make_unique(timeSystem()); + context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); + address_ = Ssl::getSslAddress(version_, lookupPort("http")); + } + + IntegrationCodecClientPtr createHttpCodec(const std::string& alpn) { + auto ssl_conn = dispatcher_->createClientConnection( + address_, Network::Address::InstanceConstSharedPtr(), + context_->createTransportSocket(std::make_shared( + absl::string_view(""), std::vector(), std::vector{alpn})), + nullptr); + return makeHttpConnection(std::move(ssl_conn)); + } + + void expectResponseHeaderConnectionClose(IntegrationCodecClient& codec_client, + bool expect_close) { + IntegrationStreamDecoderPtr response = + codec_client.makeHeaderOnlyRequest(default_request_headers_); + + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + if (expect_close) { + EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); + + } else { + EXPECT_EQ(nullptr, response->headers().Connection()); + } + } + + std::unique_ptr context_manager_; + Network::TransportSocketFactoryPtr context_; + testing::NiceMock secret_manager_; + Network::Address::InstanceConstSharedPtr address_; +}; + +// Verify that http response on filter chain 0 has "Connection: close" header when filter chain 0 +// is deleted during the listener update. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { + initialize(); + + auto codec_client_1 = createHttpCodec("alpn1"); + auto codec_client_0 = createHttpCodec("alpn0"); + Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get()]() { + c1->close(); + c0->close(); + }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + + expectResponseHeaderConnectionClose(*codec_client_1, true); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); + expectResponseHeaderConnectionClose(*codec_client_0, false); +} + +// Verify that http clients of filter chain 0 survives if new listener config adds new filter +// chain 2. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { + initialize(); + + auto codec_client_0 = createHttpCodec("alpn0"); + Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + auto codec_client_2 = createHttpCodec("alpn2"); + Cleanup cleanup2([c2 = codec_client_2.get()]() { c2->close(); }); + expectResponseHeaderConnectionClose(*codec_client_2, false); + expectResponseHeaderConnectionClose(*codec_client_0, false); +} + +INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateHttpIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateTcpProxyIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + using LdsIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, LdsIntegrationTest, @@ -106,6 +447,7 @@ TEST_P(LdsIntegrationTest, ReloadConfig) { // Create an LDS response with the new config, and reload config. new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForCounterGe("listener_manager.lds.update_success", 2); // HTTP 1.0 should now be enabled. @@ -125,6 +467,5 @@ TEST_P(LdsIntegrationTest, FailConfigLoad) { EXPECT_DEATH_LOG_TO_STDERR(initialize(), "Didn't find a registered implementation for name: 'grewgragra'"); } - } // namespace } // namespace Envoy diff --git a/test/server/BUILD b/test/server/BUILD index dba3d54d1c57..ce027a2e46f6 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -212,6 +212,7 @@ envoy_cc_test_library( "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:test_time_lib", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", @@ -235,6 +236,7 @@ envoy_cc_test( "//source/common/network:utility_lib", "//source/common/protobuf", "//source/extensions/filters/listener/original_dst:config", + "//source/extensions/filters/listener/proxy_protocol:config", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/filters/network/tcp_proxy:config", diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index a032037daf9b..40b9f45eac5d 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -89,6 +89,65 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { } }; +class ListenerManagerImplForInPlaceFilterChainUpdateTest : public ListenerManagerImplTest { +public: + envoy::config::listener::v3::Listener createDefaultListener() { + envoy::config::listener::v3::Listener listener_proto; + Protobuf::TextFormat::ParseFromString(R"EOF( + name: "foo" + address: { + socket_address: { + address: "127.0.0.1" + port_value: 1234 + } + } + filter_chains: {} + )EOF", + &listener_proto); + return listener_proto; + } + + void expectAddListener(const envoy::config::listener::v3::Listener& listener_proto, + ListenerHandle*) { + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(*worker_, addListener(_, _, _)); + manager_->addOrUpdateListener(listener_proto, "", true); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + } + + void expectUpdateToThenDrain(const envoy::config::listener::v3::Listener& new_listener_proto, + ListenerHandle* old_listener_handle) { + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*old_listener_handle->drain_manager_, startDrainSequence(_)); + + EXPECT_TRUE(manager_->addOrUpdateListener(new_listener_proto, "", true)); + + EXPECT_CALL(*worker_, removeListener(_, _)); + old_listener_handle->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*old_listener_handle, onDestroy()); + worker_->callRemovalCompletion(); + } + + void expectRemove(const envoy::config::listener::v3::Listener& listener_proto, + ListenerHandle* listener_handle) { + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_handle->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener(listener_proto.name())); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_handle->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_handle, onDestroy()); + worker_->callRemovalCompletion(); + } +}; + class MockLdsApi : public LdsApi { public: MOCK_METHOD(std::string, versionInfo, (), (const)); @@ -324,7 +383,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig2UDPListenerFilt )EOF"; EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), - EnvoyException, "Only 1 UDP filter per listener supported"); + EnvoyException, "Only 1 UDP listener filter per listener supported"); } TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { @@ -502,7 +561,7 @@ TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) { expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -526,7 +585,7 @@ drain_type: default ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener, but with a different address. Should throw. const std::string listener_foo_different_address_yaml = R"EOF( @@ -581,7 +640,7 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -613,7 +672,7 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -637,7 +696,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, false); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); checkConfigDump(R"EOF( static_listeners: listener: @@ -668,11 +727,11 @@ name: foo EXPECT_FALSE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", false)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo listener. Should be blocked. EXPECT_FALSE(manager_->removeListener("foo")); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -710,7 +769,7 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); checkConfigDump(R"EOF( @@ -764,7 +823,7 @@ version_info: version1 EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version2", true)); // Version2 is in warming list as listener_foo2->target_ is not ready yet. - checkStats(/*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0); + checkStats(__LINE__, /*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); checkConfigDump(R"EOF( version_info: version2 @@ -792,10 +851,9 @@ version_info: version1 } TEST_F(ListenerManagerImplTest, OverrideListener) { - time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); - InSequence s; + time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); auto* lds_api = new MockLdsApi(); EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); envoy::config::core::v3::ConfigSource lds_config; @@ -815,7 +873,7 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Start workers and capture ListenerImpl. Network::ListenerConfig* listener_config = nullptr; @@ -829,75 +887,40 @@ filter_chains: {} manager_->startWorkers(guard_dog_); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_create_success").value()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); - // TODO(lambdai): No need to invoke `addListenerToWorkerForTest` explicitly when intelligent warm - // up procedure is added. - ListenerImpl* listener_impl = dynamic_cast(listener_config); - auto overridden_listener = absl::make_optional(1); - EXPECT_CALL(*worker_, addListener(_, _, _)) - .WillOnce(Invoke([](absl::optional, Network::ListenerConfig&, - auto completion) -> void { completion(true); })); - manager_->addListenerToWorkerForTest(*worker_, overridden_listener, *listener_impl, nullptr); - - EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_create_success").value()); - EXPECT_CALL(*listener_foo, onDestroy()); -} - -TEST_F(ListenerManagerImplTest, DrainFilterChains) { - time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); - - InSequence s; - - auto* lds_api = new MockLdsApi(); - EXPECT_CALL(listener_factory_, createLdsApi_(_)).WillOnce(Return(lds_api)); - envoy::config::core::v3::ConfigSource lds_config; - manager_->createLdsApi(lds_config); - - // Add foo listener. - const std::string listener_foo_yaml = R"EOF( -name: "foo" + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo address: socket_address: - address: "127.0.0.1" + address: 127.0.0.1 port_value: 1234 filter_chains: - filters: - - name: fake - config: {} + filter_chain_match: + destination_port: 1234 )EOF"; - ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + ListenerHandle* listener_foo_update1 = expectListenerOverridden(false); + EXPECT_CALL(*worker_, addListener(_, _, _)); + auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); + EXPECT_CALL(*timer, enableTimer(_, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); - - // Start workers and capture ListenerImpl. - Network::ListenerConfig* listener_config = nullptr; - EXPECT_CALL(*worker_, addListener(_, _, _)) - .WillOnce(Invoke([&listener_config](auto, Network::ListenerConfig& config, auto) -> void { - listener_config = &config; - })) - .RetiresOnSaturation(); - - EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); - - ENVOY_LOG_MISC(debug, "lambdai: config ptr {}", static_cast(listener_config)); + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1UL, manager_->listeners().size()); - EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_create_success").value()); + worker_->callAddCompletion(true); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); - // TODO(lambdai): No need to invoke `drainFilterChains` explicitly when intelligent warm - // up procedure is added. - ListenerImpl* listener_impl = dynamic_cast(listener_config); - ASSERT(listener_impl != nullptr); - auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); - EXPECT_CALL(*timer, enableTimer(_, _)); - manager_->drainFilterChainsForTest(listener_impl); EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); timer->invokeCallback(); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callDrainFilterChainsComplete(); + + EXPECT_EQ(1UL, manager_->listeners().size()); + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_create_success").value()); } TEST_F(ListenerManagerImplTest, AddOrUpdateListener) { @@ -929,7 +952,7 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); checkConfigDump(R"EOF( version_info: version1 @@ -953,7 +976,7 @@ version_info: version1 // Update duplicate should be a NOP. EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener. Should share socket. const std::string listener_foo_update1_yaml = R"EOF( @@ -972,7 +995,7 @@ per_connection_buffer_limit_bytes: 10 EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "version2", true)); - checkStats(1, 1, 0, 0, 1, 0); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); checkConfigDump(R"EOF( version_info: version2 @@ -1020,7 +1043,7 @@ version_info: version2 // Update duplicate should be a NOP. EXPECT_FALSE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); - checkStats(1, 1, 0, 0, 1, 0); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(3003003003003)); @@ -1033,7 +1056,7 @@ version_info: version2 EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version3", true)); worker_->callAddCompletion(true); - checkStats(1, 2, 0, 0, 1, 1); + checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version3")); checkConfigDump(R"EOF( version_info: version3 @@ -1071,10 +1094,10 @@ version_info: version3 EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo_update1->drain_manager_->drain_sequence_completion_(); - checkStats(1, 2, 0, 0, 1, 1); + checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*listener_foo_update1, onDestroy()); worker_->callRemovalCompletion(); - checkStats(1, 2, 0, 0, 1, 0); + checkStats(__LINE__, 1, 2, 0, 0, 1, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(4004004004004)); @@ -1095,7 +1118,7 @@ filter_chains: {} manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "version4", true)); EXPECT_EQ(2UL, manager_->listeners().size()); worker_->callAddCompletion(true); - checkStats(2, 2, 0, 0, 2, 0); + checkStats(__LINE__, 2, 2, 0, 0, 2, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(5005005005005)); @@ -1115,7 +1138,7 @@ filter_chains: {} EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "version5", true)); EXPECT_EQ(2UL, manager_->listeners().size()); - checkStats(3, 2, 0, 1, 2, 0); + checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version5")); checkConfigDump(R"EOF( version_info: version5 @@ -1166,7 +1189,7 @@ version_info: version5 // Update a duplicate baz that is currently warming. EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "", true)); - checkStats(3, 2, 0, 1, 2, 0); + checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); // Update baz while it is warming. const std::string listener_baz_update1_yaml = R"EOF( @@ -1190,14 +1213,14 @@ name: baz EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_update1_yaml), "", true)); EXPECT_EQ(2UL, manager_->listeners().size()); - checkStats(3, 3, 0, 1, 2, 0); + checkStats(__LINE__, 3, 3, 0, 1, 2, 0, 0); // Finish initialization for baz which should make it active. EXPECT_CALL(*worker_, addListener(_, _, _)); listener_baz_update1->target_.ready(); EXPECT_EQ(3UL, manager_->listeners().size()); worker_->callAddCompletion(true); - checkStats(3, 3, 0, 0, 3, 0); + checkStats(__LINE__, 3, 3, 0, 0, 3, 0, 0); EXPECT_CALL(*listener_foo_update2, onDestroy()); EXPECT_CALL(*listener_bar, onDestroy()); @@ -1230,7 +1253,7 @@ name: foo EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. std::function stop_completion; @@ -1242,24 +1265,24 @@ name: foo })); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(2, 0, 1, 0, 1, 1); + checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); EXPECT_CALL(*listener_factory_.socket_, close()).Times(0); stop_completion(); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); EXPECT_CALL(*listener_foo2, onDestroy()); } @@ -1290,17 +1313,17 @@ name: foo EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); @@ -1308,11 +1331,11 @@ name: foo EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(2, 0, 1, 0, 1, 1); + checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); EXPECT_CALL(*listener_foo2, onDestroy()); } @@ -1510,7 +1533,7 @@ name: foo EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(false)); @@ -1519,7 +1542,7 @@ name: foo EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); // NOTE: || short circuit here prevents the server drain manager from getting called. EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(true)); @@ -1527,7 +1550,7 @@ name: foo EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(1, 0, 1, 0, 0, 1); + checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(true)); @@ -1536,7 +1559,7 @@ name: foo EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 1, 0, 0, 0); + checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0); } TEST_F(ListenerManagerImplTest, RemoveListener) { @@ -1564,25 +1587,25 @@ name: foo EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 0, 1, 0, 0); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); // Remove foo. EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->removeListener("foo")); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(1, 0, 1, 0, 0, 0); + checkStats(__LINE__, 1, 0, 1, 0, 0, 0, 0); // Add foo again and initialize it. listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(2, 0, 1, 1, 0, 0); + checkStats(__LINE__, 2, 0, 1, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(2, 0, 1, 0, 1, 0); + checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); // Update foo into warming. const std::string listener_foo_update1_yaml = R"EOF( @@ -1591,10 +1614,9 @@ name: foo socket_address: address: 127.0.0.1 port_value: 1234 +per_connection_buffer_limit_bytes: 999 filter_chains: -- filters: - - name: fake - config: {} +- filters: [] )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); @@ -1602,7 +1624,7 @@ name: foo EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(2, 1, 1, 1, 1, 0); + checkStats(__LINE__, 2, 1, 1, 1, 1, 0, 0); // Remove foo which should remove both warming and active. EXPECT_CALL(*listener_foo_update1, onDestroy()); @@ -1610,14 +1632,14 @@ name: foo EXPECT_CALL(*listener_factory_.socket_, close()); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener("foo")); - checkStats(2, 1, 2, 0, 0, 1); + checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0); EXPECT_CALL(*worker_, removeListener(_, _)); listener_foo->drain_manager_->drain_sequence_completion_(); - checkStats(2, 1, 2, 0, 0, 1); + checkStats(__LINE__, 2, 1, 2, 0, 0, 1, 0); EXPECT_CALL(*listener_foo, onDestroy()); worker_->callRemovalCompletion(); EXPECT_EQ(0UL, manager_->listeners().size()); - checkStats(2, 1, 2, 0, 0, 0); + checkStats(__LINE__, 2, 1, 2, 0, 0, 0, 0); } // Validates that StopListener functionality works correctly when only inbound listeners are @@ -1643,13 +1665,14 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + auto foo_inbound_proto = parseListenerFromV2Yaml(listener_foo_yaml); + EXPECT_TRUE(manager_->addOrUpdateListener(foo_inbound_proto, "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Add a listener in outbound direction. const std::string listener_foo_outbound_yaml = R"EOF( @@ -1711,6 +1734,15 @@ traffic_direction: INBOUND - filters: [] )EOF"; EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true)); + + // Explicitly validate that in place filter chain update is not allowed. + auto in_place_foo_inbound_proto = foo_inbound_proto; + in_place_foo_inbound_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_destination_port() + ->set_value(9999); + + EXPECT_FALSE(manager_->addOrUpdateListener(in_place_foo_inbound_proto, "", true)); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_CALL(*listener_foo_outbound, onDestroy()); EXPECT_CALL(*listener_bar_outbound, onDestroy()); @@ -1738,12 +1770,12 @@ name: foo EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_factory_.socket_, close()); @@ -1787,12 +1819,12 @@ traffic_direction: INBOUND EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); - checkStats(1, 0, 0, 1, 0, 0); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); worker_->callAddCompletion(true); EXPECT_EQ(1UL, manager_->listeners().size()); - checkStats(1, 0, 0, 0, 1, 0); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo into warming. const std::string listener_foo_update1_yaml = R"EOF( @@ -1802,10 +1834,9 @@ traffic_direction: INBOUND socket_address: address: 127.0.0.1 port_value: 1234 +per_connection_buffer_limit_bytes: 999 filter_chains: -- filters: - - name: fake - config: {} +- filters: [] )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); @@ -4064,6 +4095,537 @@ name: test_api_listener_2 EXPECT_EQ("test_api_listener", manager_->apiListener()->get().name()); } +TEST_F(ListenerManagerImplTest, StopInplaceWarmingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Stop foo which should remove warming listener. + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo, onDestroy()); + manager_->stopListeners(ListenerManager::StopListenersType::InboundOnly); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_stopped").value()); +} + +TEST_F(ListenerManagerImplTest, RemoveInplaceUpdatingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // Remove foo which should remove both warming and active. + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener("foo")); + checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0); + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo->drain_manager_->drain_sequence_completion_(); + checkStats(__LINE__, 1, 1, 1, 0, 0, 1, 0); + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callRemovalCompletion(); + EXPECT_EQ(0UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 1, 0, 0, 0, 0); +} + +TEST_F(ListenerManagerImplTest, UpdateInplaceWarmingListener) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // Listener warmed up. + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*listener_foo, onDestroy()); + listener_foo_update1->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); +} + +TEST_F(ListenerManagerImplTest, DrainageDuringInplaceUpdate) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener into warming. + const std::string listener_foo_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_foo->target_, initialize()); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); + EXPECT_CALL(*worker_, addListener(_, _, _)); + listener_foo->target_.ready(); + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + + // Update foo into warming. + const std::string listener_foo_update1_yaml = R"EOF( +name: foo +traffic_direction: INBOUND +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(listener_foo_update1->target_, initialize()); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1UL, manager_->listeners().size()); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); + + // The warmed up starts the drain timer. + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(server_.options_, drainTime()).WillOnce(Return(std::chrono::seconds(600))); + Event::MockTimer* filter_chain_drain_timer = new Event::MockTimer(&server_.dispatcher_); + EXPECT_CALL(*filter_chain_drain_timer, enableTimer(std::chrono::milliseconds(600000), _)); + listener_foo_update1->target_.ready(); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); + + // Timer expires, worker close connections if any. + EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); + filter_chain_drain_timer->invokeCallback(); + + // Once worker clean up is done, it's safe for the master thread to remove the original listener. + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callDrainFilterChainsComplete(); + checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); +} + +TEST(ListenerMessageUtilTest, ListenerMessageSameAreEquivalent) { + envoy::config::listener::v3::Listener listener1; + envoy::config::listener::v3::Listener listener2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentNameNotEquivalent) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("listener1"); + envoy::config::listener::v3::Listener listener2; + listener2.set_name("listener2"); + EXPECT_FALSE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentFilterChainsAreEquivalent) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("common"); + auto add_filter_chain_1 = listener1.add_filter_chains(); + add_filter_chain_1->set_name("127.0.0.1"); + + envoy::config::listener::v3::Listener listener2; + listener2.set_name("common"); + auto add_filter_chain_2 = listener2.add_filter_chains(); + add_filter_chain_2->set_name("127.0.0.2"); + + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWorkerNotStarted) { + // Worker is not started yet. + auto listener_proto = createDefaultListener(); + ListenerHandle* listener_foo = expectListenerCreate(false, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + manager_->addOrUpdateListener(listener_proto, "", true); + EXPECT_EQ(1u, manager_->listeners().size()); + + // Mutate the listener message as filter chain change only. + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_destination_port() + ->set_value(9999); + + EXPECT_CALL(*listener_foo, onDestroy()); + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + manager_->addOrUpdateListener(new_listener_proto, "", true); + EXPECT_CALL(*listener_foo_update1, onDestroy()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + + expectAddListener(listener_proto, listener_foo); + + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfImplicitTlsInspectorChanges) { + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + *new_listener_proto.mutable_filter_chains(0) + ->mutable_filter_chain_match() + ->mutable_application_protocols() + ->Add() = "alpn"; + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfImplicitProxyProtocolChanges) { + + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + new_listener_proto.mutable_filter_chains(0)->mutable_use_proxy_proto()->set_value(true); + + expectUpdateToThenDrain(new_listener_proto, listener_foo); + expectRemove(new_listener_proto, listener_foo_update1); + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZeroFilterChain) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + auto new_listener_proto = listener_proto; + new_listener_proto.clear_filter_chains(); + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + EXPECT_CALL(listener_factory_, createDrainManager_(_)); + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(new_listener_proto, "", true), + EnvoyException, + "error adding listener '127.0.0.1:1234': no filter chains specified"); + + expectRemove(listener_proto, listener_foo); + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, + TraditionalUpdateIfListenerConfigHasUpdateOtherThanFilterChain) { + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + auto listener_proto = createDefaultListener(); + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + expectAddListener(listener_proto, listener_foo); + + ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + + auto new_listener_proto = listener_proto; + new_listener_proto.set_traffic_direction(::envoy::config::core::v3::TrafficDirection::INBOUND); + expectUpdateToThenDrain(new_listener_proto, listener_foo); + + expectRemove(new_listener_proto, listener_foo_update1); + + EXPECT_EQ(0UL, manager_->listeners().size()); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); +} + +// This test execute an in place update first, then a traditional listener update. +// The second update is enforced by runtime. +TEST_F(ListenerManagerImplTest, RuntimeDisabledInPlaceUpdateFallbacksToTraditionalUpdate) { + InSequence s; + EXPECT_CALL(*worker_, start(_)); + manager_->startWorkers(guard_dog_); + + // Add foo listener. + const std::string listener_foo_yaml = R"EOF( +name: foo +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(false, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + + EXPECT_CALL(*worker_, addListener(_, _, _)); + + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + worker_->callAddCompletion(true); + EXPECT_EQ(1UL, manager_->listeners().size()); + checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); + + // Add foo listener again. Will execute in place filter chain update path. + const std::string listener_foo_update1_yaml = R"EOF( + name: foo + address: + socket_address: + address: 127.0.0.1 + port_value: 1234 + filter_chains: + - filters: [] + filter_chain_match: + destination_port: 1234 + )EOF"; + + ListenerHandle* listener_foo_update1 = expectListenerOverridden(false, listener_foo); + EXPECT_CALL(*worker_, addListener(_, _, _)); + auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); + EXPECT_CALL(*timer, enableTimer(_, _)); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_EQ(1UL, manager_->listeners().size()); + worker_->callAddCompletion(true); + + EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); + timer->invokeCallback(); + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callDrainFilterChainsComplete(); + + // Update foo again. This time we disable in place filter chain update in runtime. + // The traditional full listener update path is used. + auto in_place_update_disabled_guard = disableInplaceUpdateForThisTest(); + const std::string listener_foo_update2_yaml = R"EOF( + name: foo + address: + socket_address: + address: 127.0.0.1 + port_value: 1234 + filter_chains: + - filters: + filter_chain_match: + destination_port: 2345 + )EOF"; + + ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true); + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update2_yaml), "", true)); + EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo_update1->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo_update1, onDestroy()); + worker_->callRemovalCompletion(); + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(*listener_foo_update2->drain_manager_, startDrainSequence(_)); + EXPECT_TRUE(manager_->removeListener("foo")); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo_update2->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo_update2, onDestroy()); + worker_->callRemovalCompletion(); + EXPECT_EQ(0UL, manager_->listeners().size()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 49a29cc4a6dc..5bf7491a4b9d 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -15,6 +15,7 @@ #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" @@ -133,6 +134,33 @@ class ListenerManagerImplTest : public testing::Test { return raw_listener; } + ListenerHandle* expectListenerOverridden(bool need_init, ListenerHandle* origin = nullptr) { + auto raw_listener = new ListenerHandle(false); + // Simulate ListenerImpl: drain manager is copied from origin. + if (origin != nullptr) { + raw_listener->drain_manager_ = origin->drain_manager_; + } + // Overridden listener is always added by api. + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + + EXPECT_CALL(listener_factory_, createNetworkFilterFactoryList(_, _)) + .WillOnce(Invoke( + [raw_listener, need_init]( + const Protobuf::RepeatedPtrField&, + Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) + -> std::vector { + std::shared_ptr notifier(raw_listener); + raw_listener->context_ = &filter_chain_factory_context; + if (need_init) { + filter_chain_factory_context.initManager().add(notifier->target_); + } + return {[notifier](Network::FilterManager&) -> void {}}; + })); + + return raw_listener; + } + const Network::FilterChain* findFilterChain(uint16_t destination_port, const std::string& destination_address, const std::string& server_name, const std::string& transport_protocol, @@ -199,8 +227,11 @@ class ListenerManagerImplTest : public testing::Test { })); } - void checkStats(uint64_t added, uint64_t modified, uint64_t removed, uint64_t warming, - uint64_t active, uint64_t draining) { + void checkStats(int line_num, uint64_t added, uint64_t modified, uint64_t removed, + uint64_t warming, uint64_t active, uint64_t draining, + uint64_t draining_filter_chains) { + SCOPED_TRACE(line_num); + EXPECT_EQ(added, server_.stats_store_.counter("listener_manager.listener_added").value()); EXPECT_EQ(modified, server_.stats_store_.counter("listener_manager.listener_modified").value()); EXPECT_EQ(removed, server_.stats_store_.counter("listener_manager.listener_removed").value()); @@ -216,6 +247,10 @@ class ListenerManagerImplTest : public testing::Test { .gauge("listener_manager.total_listeners_draining", Stats::Gauge::ImportMode::NeverImport) .value()); + EXPECT_EQ(draining_filter_chains, server_.stats_store_ + .gauge("listener_manager.total_filter_chains_draining", + Stats::Gauge::ImportMode::NeverImport) + .value()); } void checkConfigDump(const std::string& expected_dump_yaml) { @@ -228,6 +263,14 @@ class ListenerManagerImplTest : public testing::Test { EXPECT_EQ(expected_listeners_config_dump.DebugString(), listeners_config_dump.DebugString()); } + ABSL_MUST_USE_RESULT + auto disableInplaceUpdateForThisTest() { + auto scoped_runtime = std::make_unique(); + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.listener_in_place_filterchain_update", "false"}}); + return scoped_runtime; + } + NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; Api::OsSysCallsImpl os_sys_calls_actual_; From fd9325dbd81de76162dda1115ea9821ae2c65524 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Apr 2020 17:43:50 -0400 Subject: [PATCH 071/909] docs: updating runtime guard policy (#10983) Codifying that most L7 changes should be runtime guarded, updating deprecation timeline as we changed it a while back. Signed-off-by: Alyssa Wilk --- CONTRIBUTING.md | 13 +++++++++---- PULL_REQUESTS.md | 10 ++++++++++ PULL_REQUEST_TEMPLATE.md | 1 + 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2b8723439638..b89f748a9f4c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -125,10 +125,14 @@ versioning guidelines: # Runtime guarding -Some high risk changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing +Some changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing old code with new code, both code paths are supported for between one Envoy release (if it is guarded due to performance concerns) and a full deprecation cycle (if it is a high risk behavioral -change). +change). Generally as a community we try to guard both high risk changes (major +refactors such as replacing Envoy's buffer implementation) and most user-visible +non-config-guarded changes to protocol processing (for example additions or changes to HTTP headers or +how HTTP is serialized out) for non-alpha features. Feel free to tag @envoyproxy/maintainers +if you aren't sure if a given change merits runtime guarding. The canonical way to runtime guard a feature is ``` @@ -156,8 +160,9 @@ time. Runtime guarded features may either set true (running the new code by default) in the initial PR, after a testing interval, or during the next release cycle, at the PR author's and reviewing maintainer's discretion. Generally all runtime guarded features will be set true when a -release is cut, and the old code path will be deprecated at that time. Runtime features -are set true by default by inclusion in +release is cut. Old code paths for refactors can be cleaned up after a release and there has been +some production run time. Old code for behavioral changes will be deprecated after six months. +Runtime features are set true by default by inclusion in [source/common/runtime/runtime_features.h](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.h) There are four suggested options for testing new runtime features: diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 0126cf073ea6..91211e3ff415 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -67,6 +67,16 @@ current version. Please include any relevant links. Each release note should be relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes. +### Runtime guard + +If this PR has a user-visible behavioral change, or otherwise falls under the +guidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md.md) +it should have a runtime guard, which should be documented both in the release +notes and here in the PR description. + +For new feature additions guarded by configs, no-op refactors, docs changes etc. +this field can be disregarded and/or removed. + ### Issues If this PR fixes an outstanding issue, please add a line of the form: diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index e16c81139d86..d72e0564dbf1 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -7,5 +7,6 @@ Risk Level: Testing: Docs Changes: Release Notes: +[Optional Runtime guard:] [Optional Fixes #Issue] [Optional Deprecated:] From 8654ea213c98274bb507026f57abae21418c3f66 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Apr 2020 17:44:14 -0400 Subject: [PATCH 072/909] http: allowing an optional proxy proto header when terminating CONNECT requests (#10975) Signed-off-by: Alyssa Wilk --- .../config/route/v3/route_components.proto | 4 +- .../route/v4alpha/route_components.proto | 6 ++- .../config/route/v3/route_components.proto | 4 +- .../route/v4alpha/route_components.proto | 6 ++- source/common/router/BUILD | 1 + source/common/router/upstream_request.cc | 17 ++++-- source/common/router/upstream_request.h | 1 + source/extensions/common/proxy_protocol/BUILD | 2 + .../proxy_protocol/proxy_protocol_header.cc | 25 ++++++++- .../proxy_protocol/proxy_protocol_header.h | 14 ++++- test/common/router/upstream_request_test.cc | 54 +++++++++++++++++++ test/extensions/common/proxy_protocol/BUILD | 1 + .../proxy_protocol_header_test.cc | 23 +++++++- 13 files changed, 146 insertions(+), 12 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index ebb5b8a01029..3f82fbd80fb0 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; @@ -724,7 +725,8 @@ message RouteAction { // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { - // TODO(alyssawilk) add proxy proto configuration here. + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v3.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 4a54ff847063..5fb31112b34e 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; @@ -727,10 +728,11 @@ message RouteAction { // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { - // TODO(alyssawilk) add proxy proto configuration here. - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 631aa9af8602..b5da703c2936 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; @@ -735,7 +736,8 @@ message RouteAction { // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { - // TODO(alyssawilk) add proxy proto configuration here. + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v3.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 4a54ff847063..5fb31112b34e 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; @@ -727,10 +728,11 @@ message RouteAction { // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { - // TODO(alyssawilk) add proxy proto configuration here. - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 2786beb3bccc..3fb3a37fb915 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -309,6 +309,7 @@ envoy_cc_library( "//source/common/stream_info:uint32_accessor_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", ], ) diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index ed6a0c7e6905..86e563d382ba 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -35,6 +35,7 @@ #include "common/stream_info/uint32_accessor_impl.h" #include "common/tracing/http_tracer_impl.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" #include "extensions/filters/http/well_known_names.h" namespace Envoy { @@ -538,9 +539,19 @@ void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { } void TcpUpstream::encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) { - if (end_stream) { - Buffer::OwnedImpl data; - upstream_conn_data_->connection().write(data, true); + // Headers should only happen once, so use this opportunity to add the proxy + // proto header, if configured. + ASSERT(upstream_request_->parent().routeEntry()->connectConfig().has_value()); + Buffer::OwnedImpl data; + auto& connect_config = upstream_request_->parent().routeEntry()->connectConfig().value(); + if (connect_config.has_proxy_protocol_config()) { + const Network::Connection& connection = *upstream_request_->parent().callbacks()->connection(); + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + connect_config.proxy_protocol_config(), connection, data); + } + + if (data.length() != 0 || end_stream) { + upstream_conn_data_->connection().write(data, end_stream); } } diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 660d6e17b8b1..938b47327318 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -138,6 +138,7 @@ class UpstreamRequest : public Logger::Loggable, bool createPerTryTimeoutOnRequestComplete() { return create_per_try_timeout_on_request_complete_; } + RouterFilterInterface& parent() { return parent_; } private: RouterFilterInterface& parent_; diff --git a/source/extensions/common/proxy_protocol/BUILD b/source/extensions/common/proxy_protocol/BUILD index 755af8cae0d0..7eb374f5a5bf 100644 --- a/source/extensions/common/proxy_protocol/BUILD +++ b/source/extensions/common/proxy_protocol/BUILD @@ -15,6 +15,8 @@ envoy_cc_library( deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", + "//include/envoy/network:connection_interface", "//source/common/network:address_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc index c6f9d0a9f060..0342f3d1aff3 100644 --- a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc @@ -35,6 +35,12 @@ void generateV1Header(const std::string& src_addr, const std::string& dst_addr, out.add(stream.str()); } +void generateV1Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out) { + generateV1Header(source_address.addressAsString(), dest_address.addressAsString(), + source_address.port(), dest_address.port(), source_address.version(), out); +} + void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out) { @@ -95,6 +101,23 @@ void generateV2Header(const std::string& src_addr, const std::string& dst_addr, out.add(ports, 4); } +void generateV2Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out) { + generateV2Header(source_address.addressAsString(), dest_address.addressAsString(), + source_address.port(), dest_address.port(), source_address.version(), out); +} + +void generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config, + const Network::Connection& connection, Buffer::Instance& out) { + const Network::Address::Ip& dest_address = *connection.localAddress()->ip(); + const Network::Address::Ip& source_address = *connection.remoteAddress()->ip(); + if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V1) { + generateV1Header(source_address, dest_address, out); + } else if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V2) { + generateV2Header(source_address, dest_address, out); + } +} + void generateV2LocalHeader(Buffer::Instance& out) { out.add(PROXY_PROTO_V2_SIGNATURE, PROXY_PROTO_V2_SIGNATURE_LEN); const uint8_t addr_fam_protocol_and_length[4]{PROXY_PROTO_V2_VERSION << 4, 0, 0, 0}; @@ -104,4 +127,4 @@ void generateV2LocalHeader(Buffer::Instance& out) { } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.h b/source/extensions/common/proxy_protocol/proxy_protocol_header.h index 81d9dc1f8951..013c842ced20 100644 --- a/source/extensions/common/proxy_protocol/proxy_protocol_header.h +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.h @@ -1,7 +1,9 @@ #pragma once #include "envoy/buffer/buffer.h" +#include "envoy/config/core/v3/proxy_protocol.pb.h" #include "envoy/network/address.h" +#include "envoy/network/connection.h" namespace Envoy { namespace Extensions { @@ -41,15 +43,25 @@ constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216; void generateV1Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out); +void generateV1Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out); + // Generates the v2 PROXY protocol header and adds it to the specified buffer // TCP is assumed as the transport protocol void generateV2Header(const std::string& src_addr, const std::string& dst_addr, uint32_t src_port, uint32_t dst_port, Network::Address::IpVersion ip_version, Buffer::Instance& out); +void generateV2Header(const Network::Address::Ip& source_address, + const Network::Address::Ip& dest_address, Buffer::Instance& out); + +// Generates the appropriate proxy proto header and appends it to the supplied buffer. +void generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config, + const Network::Connection& connection, Buffer::Instance& out); + // Generates the v2 PROXY protocol local command header and adds it to the specified buffer void generateV2LocalHeader(Buffer::Instance& out); } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index d9900dc2a588..91c2a00cee76 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -3,6 +3,8 @@ #include "common/router/router.h" #include "common/router/upstream_request.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + #include "test/common/http/common.h" #include "test/mocks/http/mocks.h" #include "test/mocks/router/mocks.h" @@ -55,6 +57,9 @@ class MockRouterFilterInterface : public RouterFilterInterface { ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_)); ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_)); EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber()); + ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); + ON_CALL(callbacks_, connection()).WillByDefault(Return(&client_connection_)); + route_entry_.connect_config_.emplace(RouteEntry::ConnectConfig()); } MOCK_METHOD(void, onUpstream100ContinueHeaders, @@ -89,6 +94,8 @@ class MockRouterFilterInterface : public RouterFilterInterface { MOCK_METHOD(TimeSource&, timeSource, ()); NiceMock callbacks_; + NiceMock route_entry_; + NiceMock client_connection_; envoy::extensions::filters::http::router::v3::Router router_proto; NiceMock context_; @@ -173,6 +180,7 @@ class TcpUpstreamTest : public ::testing::Test { TEST_F(TcpUpstreamTest, Basic) { // Swallow the headers. + EXPECT_CALL(connection_, write(_, false)).Times(0); tcp_upstream_->encodeHeaders(request_, false); // Proxy the data. @@ -197,6 +205,52 @@ TEST_F(TcpUpstreamTest, Basic) { tcp_upstream_->onUpstreamData(response2, false); } +TEST_F(TcpUpstreamTest, V1Header) { + envoy::config::core::v3::ProxyProtocolConfig* proxy_config = + mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); + proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V1); + mock_router_filter_.client_connection_.remote_address_ = + std::make_shared("1.2.3.4", 5); + mock_router_filter_.client_connection_.local_address_ = + std::make_shared("4.5.6.7", 8); + + Buffer::OwnedImpl expected_data; + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + *proxy_config, mock_router_filter_.client_connection_, expected_data); + + // encodeHeaders now results in the proxy proto header being sent. + EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false)); + tcp_upstream_->encodeHeaders(request_, false); + + // Data is proxied as usual. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); +} + +TEST_F(TcpUpstreamTest, V2Header) { + envoy::config::core::v3::ProxyProtocolConfig* proxy_config = + mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); + proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V2); + mock_router_filter_.client_connection_.remote_address_ = + std::make_shared("1.2.3.4", 5); + mock_router_filter_.client_connection_.local_address_ = + std::make_shared("4.5.6.7", 8); + + Buffer::OwnedImpl expected_data; + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + *proxy_config, mock_router_filter_.client_connection_, expected_data); + + // encodeHeaders now results in the proxy proto header being sent. + EXPECT_CALL(connection_, write(BufferEqual(&expected_data), false)); + tcp_upstream_->encodeHeaders(request_, false); + + // Data is proxied as usual. + EXPECT_CALL(connection_, write(BufferStringEqual("foo"), false)); + Buffer::OwnedImpl buffer("foo"); + tcp_upstream_->encodeData(buffer, false); +} + TEST_F(TcpUpstreamTest, TrailersEndStream) { // Swallow the headers. tcp_upstream_->encodeHeaders(request_, false); diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD index 59871f2d17a6..e96325c56faf 100644 --- a/test/extensions/common/proxy_protocol/BUILD +++ b/test/extensions/common/proxy_protocol/BUILD @@ -14,6 +14,7 @@ envoy_cc_test( deps = [ "//source/common/buffer:buffer_lib", "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//test/mocks/network:connection_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc index 052544a4a99a..61ac2f70946b 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc @@ -4,6 +4,7 @@ #include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "test/mocks/network/connection.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -28,6 +29,16 @@ TEST(ProxyProtocolHeaderTest, GeneratesV1IPv4Header) { generateV1Header(src_addr, dst_addr, src_port, dst_port, version, buff); EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); + + // Make sure the wrapper utility generates the same output. + testing::NiceMock connection; + connection.remote_address_ = Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + connection.local_address_ = Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl util_buf; + envoy::config::core::v3::ProxyProtocolConfig config; + config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V1); + generateProxyProtoHeader(config, connection, util_buf); + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf)); } TEST(ProxyProtocolHeaderTest, GeneratesV1IPv6Header) { @@ -79,6 +90,16 @@ TEST(ProxyProtocolHeaderTest, GeneratesV2IPv6Header) { generateV2Header(src_addr, dst_addr, src_port, dst_port, version, buff); EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, buff)); + + // Make sure the wrapper utility generates the same output. + testing::NiceMock connection; + connection.remote_address_ = Network::Utility::resolveUrl("tcp://[1:2:3::4]:8"); + connection.local_address_ = Network::Utility::resolveUrl("tcp://[1:100:200:3::]:2"); + Buffer::OwnedImpl util_buf; + envoy::config::core::v3::ProxyProtocolConfig config; + config.set_version(envoy::config::core::v3::ProxyProtocolConfig::V2); + generateProxyProtoHeader(config, connection, util_buf); + EXPECT_TRUE(TestUtility::buffersEqual(expectedBuff, util_buf)); } TEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) { @@ -96,4 +117,4 @@ TEST(ProxyProtocolHeaderTest, GeneratesV2LocalHeader) { } // namespace ProxyProtocol } // namespace Common } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy From 3750bc9342a5c5f35a44a1c4d91195265e6e53cc Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Apr 2020 17:44:35 -0400 Subject: [PATCH 073/909] http: pausing when proxying CONNECT requests for security reasons (#10974) Signed-off-by: Alyssa Wilk --- source/common/router/router.cc | 6 +- source/common/router/upstream_request.cc | 32 ++++++--- source/common/router/upstream_request.h | 11 ++++ test/common/router/router_test.cc | 65 +++++++++++++++++++ test/integration/integration_test.cc | 9 ++- .../tcp_tunneling_integration_test.cc | 2 - 6 files changed, 109 insertions(+), 16 deletions(-) diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 140e44f5c0a7..8d1ab9647bb8 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -664,9 +664,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, Filter::HttpOrTcpPool Filter::createConnPool(Upstream::HostDescriptionConstSharedPtr& host) { Filter::HttpOrTcpPool conn_pool; - bool should_tcp_proxy = route_entry_->connectConfig().has_value() && - downstream_headers_->Method()->value().getStringView() == - Http::Headers::get().MethodValues.Connect; + const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && + downstream_headers_->Method()->value().getStringView() == + Http::Headers::get().MethodValues.Connect; if (!should_tcp_proxy) { conn_pool = getHttpConnPool(); diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 86e563d382ba..6130d64de9df 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -49,7 +49,7 @@ UpstreamRequest::UpstreamRequest(RouterFilterInterface& parent, calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false), encode_complete_(false), encode_trailers_(false), retried_(false), awaiting_headers_(true), outlier_detection_timeout_recorded_(false), - create_per_try_timeout_on_request_complete_(false), + create_per_try_timeout_on_request_complete_(false), paused_for_connect_(false), record_timeout_budget_(parent_.cluster()->timeoutBudgetStats().has_value()) { if (parent_.config().start_child_span_) { span_ = parent_.callbacks()->activeSpan().spawnChild( @@ -128,6 +128,12 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e } const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); + + if (paused_for_connect_ && response_code == 200) { + encodeBodyAndTrailers(); + paused_for_connect_ = false; + } + parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream); } @@ -178,7 +184,7 @@ void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!encode_complete_); encode_complete_ = end_stream; - if (!upstream_) { + if (!upstream_ || paused_for_connect_) { ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks(), data.length()); if (!buffered_request_body_) { buffered_request_body_ = std::make_unique( @@ -358,6 +364,7 @@ void UpstreamRequest::onPoolReady( parent_.callbacks()->addDownstreamWatermarkCallbacks(downstream_watermark_manager_); calling_encode_headers_ = true; + auto* headers = parent_.downstreamHeaders(); if (parent_.routeEntry()->autoHostRewrite() && !host->hostname().empty()) { parent_.downstreamHeaders()->setHost(host->hostname()); } @@ -368,13 +375,22 @@ void UpstreamRequest::onPoolReady( upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource()); - const bool end_stream = !buffered_request_body_ && encode_complete_ && !encode_trailers_; - // If end_stream is set in headers, and there are metadata to send, delays end_stream. The case - // only happens when decoding headers filters return ContinueAndEndStream. - const bool delay_headers_end_stream = end_stream && !downstream_metadata_map_vector_.empty(); - upstream_->encodeHeaders(*parent_.downstreamHeaders(), end_stream && !delay_headers_end_stream); + // Make sure that when we are forwarding CONNECT payload we do not do so until + // the upstream has accepted the CONNECT request. + if (conn_pool_->protocol().has_value() && headers->Method() && + headers->Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect) { + paused_for_connect_ = true; + } + + upstream_->encodeHeaders(*parent_.downstreamHeaders(), shouldSendEndStream()); calling_encode_headers_ = false; + if (!paused_for_connect_) { + encodeBodyAndTrailers(); + } +} + +void UpstreamRequest::encodeBodyAndTrailers() { // It is possible to get reset in the middle of an encodeHeaders() call. This happens for // example in the HTTP/2 codec if the frame cannot be encoded for some reason. This should never // happen but it's unclear if we have covered all cases so protect against it and test for it. @@ -389,7 +405,7 @@ void UpstreamRequest::onPoolReady( downstream_metadata_map_vector_); upstream_->encodeMetadata(downstream_metadata_map_vector_); downstream_metadata_map_vector_.clear(); - if (delay_headers_end_stream) { + if (shouldSendEndStream()) { Buffer::OwnedImpl empty_data(""); upstream_->encodeData(empty_data, true); } diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 938b47327318..3dd852fab2f4 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -119,6 +119,7 @@ class UpstreamRequest : public Logger::Loggable, }; void readEnable(); + void encodeBodyAndTrailers(); // Getters and setters Upstream::HostDescriptionConstSharedPtr& upstreamHost() { return upstream_host_; } @@ -141,6 +142,13 @@ class UpstreamRequest : public Logger::Loggable, RouterFilterInterface& parent() { return parent_; } private: + bool shouldSendEndStream() { + // Only encode end stream if the full request has been received, the body + // has been sent, and any trailers or metadata have also been sent. + return encode_complete_ && !buffered_request_body_ && !encode_trailers_ && + downstream_metadata_map_vector_.empty(); + } + RouterFilterInterface& parent_; std::unique_ptr conn_pool_; bool grpc_rq_success_deferred_; @@ -173,6 +181,9 @@ class UpstreamRequest : public Logger::Loggable, // Tracks whether we deferred a per try timeout because the downstream request // had not been completed yet. bool create_per_try_timeout_on_request_complete_ : 1; + // True if the CONNECT headers have been sent but proxying payload is paused + // waiting for response headers. + bool paused_for_connect_ : 1; // Sentinel to indicate if timeout budget tracking is configured for the cluster, // and if so, if the per-try histogram should record a value. diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 1a0b47b730c5..886004887a2a 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5667,6 +5667,71 @@ TEST_F(RouterTest, ApplicationProtocols) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } +// Verify that CONNECT payload is not sent upstream until :200 response headers +// are received. +TEST_F(RouterTest, ConnectPauseAndResume) { + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + EXPECT_CALL(encoder, encodeHeaders(_, false)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + // Make sure any early data does not go upstream. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + // Now send the response headers, and ensure the deferred payload is proxied. + EXPECT_CALL(encoder, encodeData(_, _)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); +} + +// Verify that CONNECT payload is not sent upstream if non-200 response headers are received. +TEST_F(RouterTest, ConnectPauseNoResume) { + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + EXPECT_CALL(encoder, encodeHeaders(_, false)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + // Make sure any early data does not go upstream. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + // Now send the response headers, and ensure the deferred payload is not proxied. + EXPECT_CALL(encoder, encodeData(_, _)).Times(0); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "400"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); +} + class WatermarkTest : public RouterTest { public: void sendRequest(bool header_only_request = true, bool pool_ready = true) { diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index e16d01583c71..6e2255d6516e 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1336,8 +1336,10 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { }); initialize(); + // Send the payload early so we can regression test that body data does not + // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\n", false); + tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\npayload", false); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1347,6 +1349,8 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { // No transfer-encoding: chunked or connection: close EXPECT_FALSE(absl::StrContains(data, "hunked")) << data; EXPECT_FALSE(absl::StrContains(data, "onnection")) << data; + // The payload should not be present as the response headers have not been sent. + EXPECT_FALSE(absl::StrContains(data, "payload")) << data; ASSERT_TRUE(fake_upstream_connection->write( "HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")); @@ -1356,8 +1360,7 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { EXPECT_TRUE(absl::StrContains(tcp_client->data(), "\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")) << tcp_client->data(); - // Make sure the following payload is proxied without chunks or any other modifications. - tcp_client->write("payload"); + // Make sure the early payload is proxied without chunks or any other modifications. ASSERT_TRUE(fake_upstream_connection->waitForData( FakeRawConnection::waitForInexactMatch("\r\n\r\npayload"))); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 3ce0f1cec06a..3cdaa3074770 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -80,8 +80,6 @@ class ConnectTerminationIntegrationTest bool enable_timeout_{}; }; -// TODO(alyssawilk) make sure that if data is sent with the connect it does not go upstream -// until the 200 headers are sent before unhiding ANY config. TEST_P(ConnectTerminationIntegrationTest, Basic) { initialize(); From a800613288c8569432e033c23919144991eb3bfa Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 30 Apr 2020 18:01:00 -0700 Subject: [PATCH 074/909] build: fix zlib build for crosscompiling (#11022) Signed-off-by: Lizan Zhou --- bazel/foreign_cc/BUILD | 2 ++ bazel/foreign_cc/zlib.patch | 44 +++++++++++++++++++++++++++++++++++++ bazel/repositories.bzl | 7 +++--- 3 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 bazel/foreign_cc/zlib.patch diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index b2ee9cb74d7b..09d67b2a4d28 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -212,6 +212,8 @@ envoy_cmake_external( cache_entries = { "BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", + "CMAKE_C_COMPILER_FORCED": "on", + "SKIP_BUILD_EXAMPLES": "on", }, lib_source = "@net_zlib//:all", static_libraries = select({ diff --git a/bazel/foreign_cc/zlib.patch b/bazel/foreign_cc/zlib.patch new file mode 100644 index 000000000000..d8a7354dc6da --- /dev/null +++ b/bazel/foreign_cc/zlib.patch @@ -0,0 +1,44 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 0fe939d..2f0475a 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -229,21 +229,22 @@ endif() + #============================================================================ + # Example binaries + #============================================================================ +- +-add_executable(example test/example.c) +-target_link_libraries(example zlib) +-add_test(example example) +- +-add_executable(minigzip test/minigzip.c) +-target_link_libraries(minigzip zlib) +- +-if(HAVE_OFF64_T) +- add_executable(example64 test/example.c) +- target_link_libraries(example64 zlib) +- set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") +- add_test(example64 example64) +- +- add_executable(minigzip64 test/minigzip.c) +- target_link_libraries(minigzip64 zlib) +- set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++if(NOT SKIP_BUILD_EXAMPLES) ++ add_executable(example test/example.c) ++ target_link_libraries(example zlib) ++ add_test(example example) ++ ++ add_executable(minigzip test/minigzip.c) ++ target_link_libraries(minigzip zlib) ++ ++ if(HAVE_OFF64_T) ++ add_executable(example64 test/example.c) ++ target_link_libraries(example64 zlib) ++ set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++ add_test(example64 example64) ++ ++ add_executable(minigzip64 test/minigzip.c) ++ target_link_libraries(minigzip64 zlib) ++ set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") ++ endif() + endif() diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 66874b179224..a880eea1a2bb 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -347,12 +347,11 @@ def _com_github_libevent_libevent(): ) def _net_zlib(): - location = _get_location("net_zlib") - - http_archive( + _repository_impl( name = "net_zlib", build_file_content = BUILD_ALL_CONTENT, - **location + patch_args = ["-p1"], + patches = ["@envoy//bazel/foreign_cc:zlib.patch"], ) native.bind( From 85a67843fff09668cfc053e177db165530ebc30b Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Thu, 30 Apr 2020 18:07:52 -0700 Subject: [PATCH 075/909] Cleanup: add const for access_log_filter::evaluate function (#11002) Add const to the function so that const object can be used to call const AccessLogFilter& filter; filter.evaluate(...) ; Signed-off-by: Wayne Zhang --- include/envoy/access_log/access_log.h | 2 +- source/common/access_log/access_log_impl.cc | 26 +++++++++++-------- source/common/access_log/access_log_impl.h | 22 ++++++++-------- .../common/access_log/access_log_impl_test.cc | 4 +-- test/mocks/access_log/mocks.h | 3 ++- 5 files changed, 31 insertions(+), 26 deletions(-) diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index ec58fb541f96..4f1d4ee0fc0e 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -67,7 +67,7 @@ class Filter { virtual bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) PURE; + const Http::ResponseTrailerMap& response_trailers) const PURE; }; using FilterPtr = std::unique_ptr; diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index a0f69d1cf90e..42f5ee2d84bc 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -32,7 +32,7 @@ ComparisonFilter::ComparisonFilter(const envoy::config::accesslog::v3::Compariso Runtime::Loader& runtime) : config_(config), runtime_(runtime) {} -bool ComparisonFilter::compareAgainstValue(uint64_t lhs) { +bool ComparisonFilter::compareAgainstValue(uint64_t lhs) const { uint64_t value = config_.value().default_value(); if (!config_.value().runtime_key().empty()) { @@ -92,14 +92,15 @@ FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLog bool TraceableRequestFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&) { + const Http::ResponseTrailerMap&) const { Tracing::Decision decision = Tracing::HttpTracerUtility::isTracing(info, request_headers); return decision.traced && decision.reason == Tracing::Reason::ServiceForced; } bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { if (!info.responseCode()) { return compareAgainstValue(0ULL); } @@ -108,7 +109,8 @@ bool StatusCodeFilter::evaluate(const StreamInfo::StreamInfo& info, const Http:: } bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { absl::optional final = info.requestComplete(); ASSERT(final); @@ -124,7 +126,8 @@ RuntimeFilter::RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info, const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { auto rid_extension = stream_info.getRequestIDExtension(); uint64_t random_value; if (use_independent_randomness_ || @@ -161,7 +164,7 @@ AndFilter::AndFilter(const envoy::config::accesslog::v3::AndFilter& config, bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { bool result = false; for (auto& filter : filters_) { result |= filter->evaluate(info, request_headers, response_headers, response_trailers); @@ -177,7 +180,7 @@ bool OrFilter::evaluate(const StreamInfo::StreamInfo& info, bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { bool result = true; for (auto& filter : filters_) { result &= filter->evaluate(info, request_headers, response_headers, response_trailers); @@ -192,7 +195,7 @@ bool AndFilter::evaluate(const StreamInfo::StreamInfo& info, bool NotHealthCheckFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&) { + const Http::ResponseTrailerMap&) const { return !info.healthCheck(); } @@ -201,7 +204,7 @@ HeaderFilter::HeaderFilter(const envoy::config::accesslog::v3::HeaderFilter& con bool HeaderFilter::evaluate(const StreamInfo::StreamInfo&, const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const { return Http::HeaderUtility::matchHeaders(request_headers, *header_data_); } @@ -217,7 +220,8 @@ ResponseFlagFilter::ResponseFlagFilter( } bool ResponseFlagFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) { + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { if (configured_flags_ != 0) { return info.intersectResponseFlags(configured_flags_); } @@ -234,7 +238,7 @@ GrpcStatusFilter::GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatu bool GrpcStatusFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) { + const Http::ResponseTrailerMap& response_trailers) const { Grpc::Status::GrpcStatus status = Grpc::Status::WellKnownGrpcStatus::Unknown; const auto& optional_status = diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 512a957e1dc2..518687e77967 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -42,7 +42,7 @@ class ComparisonFilter : public Filter { ComparisonFilter(const envoy::config::accesslog::v3::ComparisonFilter& config, Runtime::Loader& runtime); - bool compareAgainstValue(uint64_t lhs); + bool compareAgainstValue(uint64_t lhs) const; envoy::config::accesslog::v3::ComparisonFilter config_; Runtime::Loader& runtime_; @@ -60,7 +60,7 @@ class StatusCodeFilter : public ComparisonFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -75,7 +75,7 @@ class DurationFilter : public ComparisonFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -104,7 +104,7 @@ class AndFilter : public OperatorFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -119,7 +119,7 @@ class OrFilter : public OperatorFilter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -132,7 +132,7 @@ class NotHealthCheckFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -143,7 +143,7 @@ class TraceableRequestFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; }; /** @@ -157,7 +157,7 @@ class RuntimeFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: Runtime::Loader& runtime_; @@ -177,7 +177,7 @@ class HeaderFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: const Http::HeaderUtility::HeaderDataPtr header_data_; @@ -193,7 +193,7 @@ class ResponseFlagFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: uint64_t configured_flags_{}; @@ -214,7 +214,7 @@ class GrpcStatusFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers) override; + const Http::ResponseTrailerMap& response_trailers) const override; private: GrpcStatusHashSet statuses_; diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index c1194630dcee..7e5b54ab2bce 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -1317,7 +1317,7 @@ class SampleExtensionFilter : public Filter { // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo&, const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) override { + const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&) const override { if (current_++ == 0) { return true; } @@ -1328,7 +1328,7 @@ class SampleExtensionFilter : public Filter { } private: - uint32_t current_ = 0; + mutable uint32_t current_ = 0; uint32_t sample_rate_; }; diff --git a/test/mocks/access_log/mocks.h b/test/mocks/access_log/mocks.h index ce5539207fa9..8d30128564ec 100644 --- a/test/mocks/access_log/mocks.h +++ b/test/mocks/access_log/mocks.h @@ -30,7 +30,8 @@ class MockFilter : public Filter { MOCK_METHOD(bool, evaluate, (const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers)); + const Http::ResponseTrailerMap& response_trailers), + (const)); }; class MockAccessLogManager : public AccessLogManager { From 54cd4d49e895befb8ecb10ebb14585cd8fc71ee7 Mon Sep 17 00:00:00 2001 From: Alvin Baptiste <11775386+abaptiste@users.noreply.github.com> Date: Fri, 1 May 2020 09:48:28 -0700 Subject: [PATCH 076/909] dns_filter: Add request parsing (#10697) This change adds DNS Request Parsing to the DNS filter. The filter will parse and decode DNS requests for A and AAAA records. Tests simply validate that the filter can consume queries. Signed-off-by: Alvin Baptiste --- api/BUILD | 3 +- .../filter/udp/dns_filter/v2alpha/BUILD | 13 - .../udp/dns_filter/v2alpha/dns_filter.proto | 48 ---- .../udp/dns_filter/v3alpha/BUILD | 1 - .../udp/dns_filter/v3alpha/dns_filter.proto | 40 ++- api/versioning/BUILD | 3 +- docs/root/api-v3/config/filter/udp/udp.rst | 2 +- .../listeners/udp_filters/dns_filter.rst | 108 ++++++-- .../arch_overview/listeners/dns_filter.rst | 2 +- .../filter/udp/dns_filter/v2alpha/BUILD | 13 - .../udp/dns_filter/v2alpha/dns_filter.proto | 48 ---- .../udp/dns_filter/v3alpha/BUILD | 1 - .../udp/dns_filter/v3alpha/dns_filter.proto | 40 ++- .../extensions/filters/udp/dns_filter/BUILD | 19 +- .../filters/udp/dns_filter/config.cc | 4 +- .../filters/udp/dns_filter/config.h | 4 +- .../filters/udp/dns_filter/dns_filter.cc | 78 ++++-- .../filters/udp/dns_filter/dns_filter.h | 82 ++++++- .../filters/udp/dns_filter/dns_parser.cc | 232 ++++++++++++++++++ .../filters/udp/dns_filter/dns_parser.h | 183 ++++++++++++++ test/extensions/filters/udp/dns_filter/BUILD | 15 +- .../filters/udp/dns_filter/dns_filter_test.cc | 107 ++++++-- .../udp/dns_filter/dns_filter_test_utils.cc | 67 +++++ .../udp/dns_filter/dns_filter_test_utils.h | 19 ++ tools/spelling/spelling_dictionary.txt | 1 + 25 files changed, 899 insertions(+), 234 deletions(-) delete mode 100644 api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD delete mode 100644 api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto rename api/envoy/extensions/{filter => filters}/udp/dns_filter/v3alpha/BUILD (84%) rename {generated_api_shadow/envoy/extensions/filter => api/envoy/extensions/filters}/udp/dns_filter/v3alpha/dns_filter.proto (50%) delete mode 100644 generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto rename generated_api_shadow/envoy/extensions/{filter => filters}/udp/dns_filter/v3alpha/BUILD (84%) rename {api/envoy/extensions/filter => generated_api_shadow/envoy/extensions/filters}/udp/dns_filter/v3alpha/dns_filter.proto (50%) create mode 100644 source/extensions/filters/udp/dns_filter/dns_parser.cc create mode 100644 source/extensions/filters/udp/dns_filter/dns_parser.h create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h diff --git a/api/BUILD b/api/BUILD index d52653ebc4e6..fe373e4533d5 100644 --- a/api/BUILD +++ b/api/BUILD @@ -81,7 +81,6 @@ proto_library( "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/health_checker/redis/v2:pkg", @@ -162,7 +161,6 @@ proto_library( "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", @@ -221,6 +219,7 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD deleted file mode 100644 index c6f01577c828..000000000000 --- a/api/envoy/config/filter/udp/dns_filter/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/dns/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto deleted file mode 100644 index de2608d44306..000000000000 --- a/api/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.dns_filter.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/dns/v2alpha/dns_table.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filter.udp.dns_filter.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v2alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - api.v2.core.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD similarity index 84% rename from api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD rename to api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD index d011b4d830ad..dbf0a33e662e 100644 --- a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/data/dns/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto similarity index 50% rename from generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto rename to api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index 38a8872d323e..ed9d1c27d04e 100644 --- a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -1,15 +1,16 @@ syntax = "proto3"; -package envoy.extensions.filter.udp.dns_filter.v3alpha; +package envoy.extensions.filters.udp.dns_filter.v3alpha; import "envoy/config/core/v3/base.proto"; import "envoy/data/dns/v3/dns_table.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; @@ -21,16 +22,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for the DNS filter. message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; - - // This message contains the configuration for the Dns Filter operating + // This message contains the configuration for the DNS Filter operating // in a server context. This message will contain the virtual hosts and // associated addresses with which Envoy will respond to queries message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; - oneof config_source { option (validate.required) = true; @@ -44,9 +39,32 @@ message DnsFilterConfig { } } + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 5 + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + + // A list of DNS servers to which we can forward queries + repeated string upstream_resolvers = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + // The stat prefix used when emitting DNS filter statistics string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - // Server context configuration + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; } diff --git a/api/versioning/BUILD b/api/versioning/BUILD index f1a0d2440e14..992e9a33342b 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -44,7 +44,6 @@ proto_library( "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/filter/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", @@ -103,6 +102,7 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", @@ -211,7 +211,6 @@ proto_library( "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/listener/v2:pkg", "//envoy/config/metrics/v2:pkg", diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index beaeaf857fc6..45a9d0a2b97a 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -6,4 +6,4 @@ UDP listener filters :maxdepth: 2 */v2alpha/* - ../../../extensions/filter/udp/*/v3alpha/* + ../../../extensions/filters/udp/*/v3alpha/* diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 25b667ff40f2..2232eb687c17 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -7,18 +7,23 @@ DNS Filter DNS Filter is under active development and should be considered alpha and not production ready. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.dns_filter* Overview -------- -The DNS filter allows Envoy to respond to DNS queries as an authoritative server for any configured -domains. The filter's configuration specifies the names and addresses for which Envoy will answer -as well as the configuration needed to send queries externally for unknown domains. +The DNS filter allows Envoy to resolve forward DNS queries as an authoritative server for all +configured domains. The filter's configuration specifies the names and addresses for which Envoy +will answer as well as the configuration needed to send queries externally for unknown domains. + +The filter supports local and external DNS resolution. If a lookup for a name does not match a +statically configured domain, or a provisioned cluster name, Envoy can refer the query to an +external resolver for an answer. Users have the option of specifying the DNS servers that Envoy +will use for external resolution. The filter supports :ref:`per-filter configuration -`. +`. An Example configuration follows that illustrates how the filter can be used. Example Configuration @@ -29,22 +34,26 @@ Example Configuration listener_filters: name: "envoy.filters.udp.dns_filter" typed_config: - "@type": "type.googleapis.com/envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig" + "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig" stat_prefix: "dns_filter_prefix" + client_config: + resolution_timeout: 5s + upstream_resolvers: + - "8.8.8.8" + - "8.8.4.4" server_config: inline_dns_table: - external_retry_count: 3 known_suffixes: - - suffix: "domain1.com" - - suffix: "domain2.com" - - suffix: "domain3.com" + - suffix: "domain1.com" + - suffix: "domain2.com" + - suffix: "domain3.com" virtual_domains: - - name: "www.domain1.com" - endpoint: - address_list: - address: - - 10.0.0.1 - - 10.0.0.2 + - name: "www.domain1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 - name: "www.domain2.com" endpoint: address_list: @@ -54,8 +63,71 @@ Example Configuration endpoint: address_list: address: - - 10.0.3.1 + - 10.0.3.1 In this example, Envoy is configured to respond to client queries for three domains. For any -other query, it will forward upstream to external resolvers. +other query, it will forward upstream to external resolvers. The filter will return an address +matching the input query type. If the query is for type A records and no A records are configured, +Envoy will return no addresses and set the response code appropriately. Conversely, if there are +matching records for the query type, each configured address is returned. This is also true for +AAAA records. Only A and AAAA records are supported. If the filter parses other queries for other +record types, the filter immediately responds indicating that the query is not supported. + +To disable external resolution, one can omit the `client_config` section of the config. Envoy interprets +this configuration to mean that name resolution should be done only from the data appearing in the +`server_config` section. A query for a name not appearing in the DNS table will receive a "No Answer" +DNS response. + +The filter can also consume its configuration from an external dns table. The same configuration +that appears in the static configuration can be stored in a Proto3-conformant JSON file and +referenced in the configuration using the :ref:`external_dns_table DataSource ` +directive: + +Example External DnsTable Configuration +--------------------------------------- + +.. code-block:: yaml + + listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "my_prefix" + server_config: + external_dns_table: + filename: "/home/ubuntu/configs/dns_table.json" + +In the file, the table can be defined as follows: + +DnsTable JSON Configuration +--------------------------- + +.. code-block:: text + + known_suffixes: [ + { suffix: "suffix1.com" }, + { suffix: "suffix2.com" } + ], + virtual_domains: [ + { + name: "www.suffix1.com", + endpoint: { + address_list: { + address: [ "10.0.0.1", "10.0.0.2" ] + } + } + }, + { + name: "www.suffix2.com", + endpoint: { + address_list: { + address: [ "2001:8a:c1::2800:7" ] + } + } + } + ] + + +By utilizing this configuration, the DNS responses can be configured separately from the Envoy +configuration. diff --git a/docs/root/intro/arch_overview/listeners/dns_filter.rst b/docs/root/intro/arch_overview/listeners/dns_filter.rst index f6090c577ff9..219b3106cab0 100644 --- a/docs/root/intro/arch_overview/listeners/dns_filter.rst +++ b/docs/root/intro/arch_overview/listeners/dns_filter.rst @@ -1,5 +1,5 @@ DNS Filter ========== -Envoy supports DNS responses via a :ref:`UDP listener DNS Filter +Envoy supports responding to DNS requests by configuring a :ref:`UDP listener DNS Filter `. diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD deleted file mode 100644 index c6f01577c828..000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/dns/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto b/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto deleted file mode 100644 index de2608d44306..000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.dns_filter.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/dns/v2alpha/dns_table.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.dns_filter.v2alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filter.udp.dns_filter.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the Dns Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v2alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - api.v2.core.DataSource external_dns_table = 2; - } - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration - ServerContextConfig server_config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD similarity index 84% rename from generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD rename to generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD index d011b4d830ad..dbf0a33e662e 100644 --- a/generated_api_shadow/envoy/extensions/filter/udp/dns_filter/v3alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/udp/dns_filter/v2alpha:pkg", "//envoy/data/dns/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto similarity index 50% rename from api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto rename to generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index 38a8872d323e..ed9d1c27d04e 100644 --- a/api/envoy/extensions/filter/udp/dns_filter/v3alpha/dns_filter.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -1,15 +1,16 @@ syntax = "proto3"; -package envoy.extensions.filter.udp.dns_filter.v3alpha; +package envoy.extensions.filters.udp.dns_filter.v3alpha; import "envoy/config/core/v3/base.proto"; import "envoy/data/dns/v3/dns_table.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filter.udp.dns_filter.v3alpha"; +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; @@ -21,16 +22,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for the DNS filter. message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig"; - - // This message contains the configuration for the Dns Filter operating + // This message contains the configuration for the DNS Filter operating // in a server context. This message will contain the virtual hosts and // associated addresses with which Envoy will respond to queries message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.dns_filter.v2alpha.DnsFilterConfig.ServerContextConfig"; - oneof config_source { option (validate.required) = true; @@ -44,9 +39,32 @@ message DnsFilterConfig { } } + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 5 + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + + // A list of DNS servers to which we can forward queries + repeated string upstream_resolvers = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + // The stat prefix used when emitting DNS filter statistics string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - // Server context configuration + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; } diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 3020f321940e..8886bc279d35 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -11,22 +11,29 @@ envoy_package() envoy_cc_library( name = "dns_filter_lib", - srcs = ["dns_filter.cc"], - hdrs = ["dns_filter.h"], + srcs = [ + "dns_filter.cc", + "dns_parser.cc", + ], + hdrs = [ + "dns_filter.h", + "dns_parser.h", + ], + external_deps = ["ares"], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/event:file_event_interface", - "//include/envoy/event:timer_interface", "//include/envoy/network:address_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listener_interface", "//source/common/buffer:buffer_lib", "//source/common/common:empty_string", + "//source/common/common:matchers_lib", "//source/common/config:config_provider_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", - "//source/common/router:rds_lib", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], ) @@ -40,6 +47,6 @@ envoy_cc_extension( ":dns_filter_lib", "//include/envoy/registry", "//include/envoy/server:filter_config_interface", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/dns_filter/config.cc b/source/extensions/filters/udp/dns_filter/config.cc index f5bae1c6ec0e..242bbab75e9c 100644 --- a/source/extensions/filters/udp/dns_filter/config.cc +++ b/source/extensions/filters/udp/dns_filter/config.cc @@ -9,7 +9,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF const Protobuf::Message& config, Server::Configuration::ListenerFactoryContext& context) { auto shared_config = std::make_shared( context, MessageUtil::downcastAndValidate< - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig&>( + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, @@ -19,7 +19,7 @@ Network::UdpListenerFilterFactoryCb DnsFilterConfigFactory::createFilterFactoryF } ProtobufTypes::MessagePtr DnsFilterConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } std::string DnsFilterConfigFactory::name() const { return "envoy.filters.udp.dns_filter"; } diff --git a/source/extensions/filters/udp/dns_filter/config.h b/source/extensions/filters/udp/dns_filter/config.h index 8031f450a092..421feb786675 100644 --- a/source/extensions/filters/udp/dns_filter/config.h +++ b/source/extensions/filters/udp/dns_filter/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/udp/dns_filter/dns_filter.h" diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index f2eeaaada0cb..c55955d986c3 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -1,6 +1,9 @@ #include "extensions/filters/udp/dns_filter/dns_filter.h" #include "envoy/network/listener.h" +#include "envoy/type/matcher/v3/string.pb.h" + +#include "common/network/address_impl.h" namespace Envoy { namespace Extensions { @@ -9,45 +12,86 @@ namespace DnsFilter { DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config) + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) : root_scope_(context.scope()), stats_(generateStats(config.stat_prefix(), root_scope_)) { + using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; - using envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig; + static constexpr std::chrono::milliseconds DEFAULT_RESOLVER_TIMEOUT{500}; + static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; - // store configured data for server context const auto& server_config = config.server_config(); + // TODO(abaptiste): Read the external DataSource if (server_config.has_inline_dns_table()) { + const auto& dns_table = server_config.inline_dns_table(); + const size_t entries = dns_table.virtual_domains().size(); - const auto& cfg = server_config.inline_dns_table(); - const size_t entries = cfg.virtual_domains().size(); - - // TODO (abaptiste): Check that the domain configured here appears - // in the known domains list virtual_domains_.reserve(entries); - for (const auto& virtual_domain : cfg.virtual_domains()) { - DnsAddressList addresses{}; - + for (const auto& virtual_domain : dns_table.virtual_domains()) { + AddressConstPtrVec addrs{}; if (virtual_domain.endpoint().has_address_list()) { const auto& address_list = virtual_domain.endpoint().address_list().address(); - addresses.reserve(address_list.size()); + addrs.reserve(address_list.size()); + // This will throw an exception if the configured_address string is malformed for (const auto& configured_address : address_list) { - addresses.push_back(configured_address); + const auto ipaddr = + Network::Utility::parseInternetAddress(configured_address, 0 /* port */); + addrs.push_back(ipaddr); } } + virtual_domains_.emplace(virtual_domain.name(), std::move(addrs)); + std::chrono::seconds ttl = virtual_domain.has_answer_ttl() + ? std::chrono::seconds(virtual_domain.answer_ttl().seconds()) + : DEFAULT_RESOLVER_TTL; + domain_ttl_.emplace(virtual_domain.name(), ttl); + } + + // Add known domains + known_suffixes_.reserve(dns_table.known_suffixes().size()); + for (const auto& suffix : dns_table.known_suffixes()) { + auto matcher_ptr = std::make_unique(suffix); + known_suffixes_.push_back(std::move(matcher_ptr)); + } + } - virtual_domains_.emplace(virtual_domain.name(), std::move(addresses)); + forward_queries_ = config.has_client_config(); + if (forward_queries_) { + const auto& client_config = config.client_config(); + const auto& upstream_resolvers = client_config.upstream_resolvers(); + resolvers_.reserve(upstream_resolvers.size()); + for (const auto& resolver : upstream_resolvers) { + auto ipaddr = Network::Utility::parseInternetAddress(resolver, 0 /* port */); + resolvers_.push_back(std::move(ipaddr)); } + resolver_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( + client_config, resolver_timeout, DEFAULT_RESOLVER_TIMEOUT.count())); } } void DnsFilter::onData(Network::UdpRecvData& client_request) { - // Handle incoming request and respond with an answer - UNREFERENCED_PARAMETER(client_request); + // Parse the query, if it fails return an response to the client + DnsQueryContextPtr query_context = message_parser_.createQueryContext(client_request); + if (!query_context->parse_status_) { + sendDnsResponse(std::move(query_context)); + return; + } + + // TODO(abaptiste): Resolve the requested name + + // Send an answer to the client + sendDnsResponse(std::move(query_context)); +} + +void DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) { + Buffer::OwnedImpl response; + // TODO(abaptiste): serialize and return a response to the client + + Network::UdpSendData response_data{query_context->local_->ip(), *(query_context->peer_), + response}; + listener_.send(response_data); } void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { - // Increment error stats UNREFERENCED_PARAMETER(error_code); } diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index f62d0c8162ba..248af66a0f59 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -1,12 +1,17 @@ #pragma once -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" +#include "envoy/event/file_event.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" #include "envoy/network/filter.h" #include "common/buffer/buffer_impl.h" +#include "common/common/matchers.h" #include "common/config/config_provider_impl.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" + +#include "extensions/filters/udp/dns_filter/dns_parser.h" + +#include "absl/container/flat_hash_set.h" namespace Envoy { namespace Extensions { @@ -14,7 +19,7 @@ namespace UdpFilters { namespace DnsFilter { /** - * All Dns Filter stats. @see stats_macros.h + * All DNS Filter stats. @see stats_macros.h * Track the number of answered and un-answered queries for A and AAAA records */ #define ALL_DNS_FILTER_STATS(COUNTER) \ @@ -26,23 +31,32 @@ namespace DnsFilter { COUNTER(answers_aaaa_record) /** - * Struct definition for all Dns Filter stats. @see stats_macros.h + * Struct definition for all DNS Filter stats. @see stats_macros.h */ struct DnsFilterStats { ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT) }; -using DnsAddressList = std::vector; -using DnsVirtualDomainConfig = absl::flat_hash_map; +using DnsVirtualDomainConfig = absl::flat_hash_map; +/** + * DnsFilter configuration class abstracting access to data necessary for the filter's operation + */ class DnsFilterEnvoyConfig { public: DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, - const envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig& config); + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); DnsFilterStats& stats() const { return stats_; } - DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + const DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + const std::vector& knownSuffixes() const { return known_suffixes_; } + const absl::flat_hash_map& domainTtl() const { + return domain_ttl_; + } + const AddressConstPtrVec& resolvers() const { return resolvers_; } + bool forwardQueries() const { return forward_queries_; } + const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } private: static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { @@ -52,11 +66,23 @@ class DnsFilterEnvoyConfig { Stats::Scope& root_scope_; mutable DnsFilterStats stats_; - mutable DnsVirtualDomainConfig virtual_domains_; + DnsVirtualDomainConfig virtual_domains_; + std::vector known_suffixes_; + absl::flat_hash_map domain_ttl_; + bool forward_queries_; + AddressConstPtrVec resolvers_; + std::chrono::milliseconds resolver_timeout_; }; using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; +enum class DnsLookupResponseCode { Success, Failure, External }; + +/** + * This class is responsible for handling incoming DNS datagrams and responding to the queries. + * The filter will attempt to resolve the query via its configuration or direct to an external + * resolver when necessary + */ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { public: DnsFilter(Network::UdpReadFilterCallbacks& callbacks, const DnsFilterEnvoyConfigSharedPtr& config) @@ -66,10 +92,46 @@ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable +#include + +#include "envoy/network/address.h" + +#include "common/common/empty_string.h" +#include "common/network/address_impl.h" +#include "common/network/utility.h" + +// TODO(abaptiste): add fuzzing tests for DNS message parsing +#include "ares.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +void BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) { + // Iterate over a name e.g. "www.domain.com" once and produce a buffer containing each name + // segment prefixed by its length + static constexpr char SEPARATOR('.'); + + size_t last = 0; + size_t count = name_.find_first_of(SEPARATOR); + auto iter = name_.begin(); + + while (count != std::string::npos) { + count -= last; + output.writeBEInt(count); + for (size_t i = 0; i < count; i++) { + output.writeByte(*iter); + ++iter; + } + + // periods are not serialized. Skip to the next character + if (*iter == SEPARATOR) { + ++iter; + } + + // Move our last marker to the first position after where we stopped. Search for the next name + // separator + last += count; + ++last; + count = name_.find_first_of(SEPARATOR, last); + } + + // Write the remaining segment prepended by its length + count = name_.size() - last; + output.writeBEInt(count); + for (size_t i = 0; i < count; i++) { + output.writeByte(*iter); + ++iter; + } + + // Terminate the name record with a null byte + output.writeByte(0x00); +} + +// Serialize a DNS Query Record +void DnsQueryRecord::serialize(Buffer::OwnedImpl& output) { + serializeName(output); + output.writeBEInt(type_); + output.writeBEInt(class_); +} + +DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request) { + DnsQueryContextPtr query_context = std::make_unique( + client_request.addresses_.local_, client_request.addresses_.peer_); + + query_context->parse_status_ = parseDnsObject(query_context, client_request.buffer_); + if (!query_context->parse_status_) { + ENVOY_LOG(debug, "Unable to parse query buffer from '{}' into a DNS object.", + client_request.addresses_.peer_->ip()->addressAsString()); + } + + return query_context; +} + +bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, + const Buffer::InstancePtr& buffer) { + static constexpr uint64_t field_size = sizeof(uint16_t); + size_t available_bytes = buffer->length(); + uint64_t offset = 0; + uint16_t data; + DnsQueryParseState state{DnsQueryParseState::Init}; + + header_ = {}; + while (state != DnsQueryParseState::Finish) { + // Ensure that we have enough data remaining in the buffer to parse the query + if (available_bytes < field_size) { + ENVOY_LOG(debug, + "Exhausted available bytes in the buffer. Insufficient data to parse query field."); + return false; + } + + // Each aggregate DNS header field is 2 bytes wide. + data = buffer->peekBEInt(offset); + offset += field_size; + available_bytes -= field_size; + + if (offset > buffer->length()) { + ENVOY_LOG(debug, "Buffer read offset [{}] is beyond buffer length [{}].", offset, + buffer->length()); + return false; + } + + switch (state) { + case DnsQueryParseState::Init: + header_.id = data; + state = DnsQueryParseState::Flags; + break; + + case DnsQueryParseState::Flags: + ::memcpy(static_cast(&header_.flags), &data, sizeof(uint16_t)); + state = DnsQueryParseState::Questions; + break; + + case DnsQueryParseState::Questions: + header_.questions = data; + state = DnsQueryParseState::Answers; + break; + + case DnsQueryParseState::Answers: + header_.answers = data; + state = DnsQueryParseState::Authority; + break; + + case DnsQueryParseState::Authority: + header_.authority_rrs = data; + state = DnsQueryParseState::Authority2; + break; + + case DnsQueryParseState::Authority2: + header_.additional_rrs = data; + state = DnsQueryParseState::Finish; + break; + + case DnsQueryParseState::Finish: + break; + } + } + + // TODO(abaptiste): Verify that queries do not contain answer records + // Verify that we still have available data in the buffer to read answer and query records + if (offset > buffer->length()) { + ENVOY_LOG(debug, "Buffer read offset[{}] is larget than buffer length [{}].", offset, + buffer->length()); + return false; + } + + context->id_ = static_cast(header_.id); + + // Almost always, we will have only one query here. Per the RFC, QDCOUNT is usually 1 + context->queries_.reserve(header_.questions); + for (auto index = 0; index < header_.questions; index++) { + ENVOY_LOG(trace, "Parsing [{}/{}] questions", index, header_.questions); + auto rec = parseDnsQueryRecord(buffer, &offset); + if (rec == nullptr) { + ENVOY_LOG(debug, "Couldn't parse query record from buffer"); + return false; + } + context->queries_.push_back(std::move(rec)); + } + + return true; +} + +const std::string DnsMessageParser::parseDnsNameRecord(const Buffer::InstancePtr& buffer, + uint64_t* available_bytes, + uint64_t* name_offset) { + void* buf = buffer->linearize(static_cast(buffer->length())); + const unsigned char* linearized_data = static_cast(buf); + const unsigned char* record = linearized_data + *name_offset; + long encoded_len; + char* output; + + int result = ares_expand_name(record, linearized_data, buffer->length(), &output, &encoded_len); + if (result != ARES_SUCCESS) { + return EMPTY_STRING; + } + + std::string name(output); + ares_free_string(output); + *name_offset += encoded_len; + *available_bytes -= encoded_len; + + return name; +} + +DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer, + uint64_t* offset) { + uint64_t name_offset = *offset; + uint64_t available_bytes = buffer->length() - name_offset; + + const std::string record_name = parseDnsNameRecord(buffer, &available_bytes, &name_offset); + if (record_name.empty()) { + ENVOY_LOG(debug, "Unable to parse name record from buffer"); + return nullptr; + } + + if (available_bytes < 2 * sizeof(uint16_t)) { + ENVOY_LOG(debug, "Insufficient data in buffer to read query record type and class. "); + return nullptr; + } + + // Read the record type (A or AAAA) + uint16_t record_type; + record_type = buffer->peekBEInt(name_offset); + name_offset += sizeof(record_type); + + // Read the record class. This value is almost always 1 for internet address records + uint16_t record_class; + record_class = buffer->peekBEInt(name_offset); + name_offset += sizeof(record_class); + + auto rec = std::make_unique(record_name, record_type, record_class); + + // stop reading he buffer here since we aren't parsing additional records + ENVOY_LOG(trace, "Extracted query record. Name: {} type: {} class: {}", rec->name_, rec->type_, + rec->class_); + + *offset = name_offset; + + return rec; +} + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h new file mode 100644 index 000000000000..6b41321561d2 --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -0,0 +1,183 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/common/platform.h" +#include "envoy/network/address.h" +#include "envoy/network/listener.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +constexpr uint16_t DNS_RECORD_CLASS_IN = 1; +constexpr uint16_t DNS_RECORD_TYPE_A = 1; +constexpr uint16_t DNS_RECORD_TYPE_AAAA = 28; + +constexpr uint16_t DNS_RESPONSE_CODE_NO_ERROR = 0; +constexpr uint16_t DNS_RESPONSE_CODE_FORMAT_ERROR = 1; +constexpr uint16_t DNS_RESPONSE_CODE_SERVER_FAILURE = 2; +constexpr uint16_t DNS_RESPONSE_CODE_NAME_ERROR = 3; +constexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4; + +/** + * BaseDnsRecord contains the fields and functions common to both query and answer records. + */ +class BaseDnsRecord { +public: + BaseDnsRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) + : name_(rec_name), type_(rec_type), class_(rec_class){}; + + virtual ~BaseDnsRecord() = default; + void serializeName(Buffer::OwnedImpl& output); + virtual void serialize(Buffer::OwnedImpl& output) PURE; + + const std::string name_; + const uint16_t type_; + const uint16_t class_; +}; + +/** + * DnsQueryRecord represents a query record parsed from a DNS request from a client. Each record + * contains the ID, domain requested and the flags dictating the type of record that is sought. + */ +class DnsQueryRecord : public BaseDnsRecord { +public: + DnsQueryRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) + : BaseDnsRecord(rec_name, rec_type, rec_class) {} + void serialize(Buffer::OwnedImpl& output) override; +}; + +using DnsQueryRecordPtr = std::unique_ptr; +using DnsQueryPtrVec = std::vector; + +using AddressConstPtrVec = std::vector; +using AnswerCallback = std::function; + +/** + * DnsAnswerRecord represents a single answer record for a name that is to be serialized and sent to + * a client. This class differs from the BaseDnsRecord and DnsQueryRecord because it contains + * additional fields for the TTL and address. + */ +class DnsAnswerRecord : public BaseDnsRecord { +public: + DnsAnswerRecord(const std::string& query_name, const uint16_t rec_type, const uint16_t rec_class, + const uint32_t ttl, Network::Address::InstanceConstSharedPtr ipaddr) + : BaseDnsRecord(query_name, rec_type, rec_class), ttl_(ttl), ip_addr_(ipaddr) {} + void serialize(Buffer::OwnedImpl& output) override { UNREFERENCED_PARAMETER(output); } + + const uint32_t ttl_; + const Network::Address::InstanceConstSharedPtr ip_addr_; +}; + +using DnsAnswerRecordPtr = std::unique_ptr; +using DnsAnswerMap = std::unordered_multimap; + +/** + * DnsQueryContext contains all the data associated with a query. The filter uses this object to + * generate a response and determine where it should be transmitted. + */ +class DnsQueryContext { +public: + DnsQueryContext(Network::Address::InstanceConstSharedPtr local, + Network::Address::InstanceConstSharedPtr peer) + : local_(std::move(local)), peer_(std::move(peer)), parse_status_(false), id_() {} + + const Network::Address::InstanceConstSharedPtr local_; + const Network::Address::InstanceConstSharedPtr peer_; + bool parse_status_; + uint16_t id_; + DnsQueryPtrVec queries_; + DnsAnswerMap answers_; +}; + +using DnsQueryContextPtr = std::unique_ptr; + +/** + * This class orchestrates parsing a DNS query and building the response to be sent to a client. + */ +class DnsMessageParser : public Logger::Loggable { +public: + enum class DnsQueryParseState { + Init = 0, + Flags, // 2 bytes + Questions, // 2 bytes + Answers, // 2 bytes + Authority, // 2 bytes + Authority2, // 2 bytes + Finish + }; + + // These flags have been verified with dig. The flag order does not match the RFC, but takes byte + // ordering into account so that serialization does not need bitwise operations + PACKED_STRUCT(struct DnsHeaderFlags { + unsigned rcode : 4; // return code + unsigned cd : 1; // checking disabled + unsigned ad : 1; // authenticated data + unsigned z : 1; // z - bit (must be zero in queries per RFC1035) + unsigned ra : 1; // recursion available + unsigned rd : 1; // recursion desired + unsigned tc : 1; // truncated response + unsigned aa : 1; // authoritative answer + unsigned opcode : 4; // operation code + unsigned qr : 1; // query or response + }); + + /** + * Structure representing the DNS header as it appears in a packet + * See https://www.ietf.org/rfc/rfc1035.txt for more details + */ + PACKED_STRUCT(struct DnsHeader { + uint16_t id; + struct DnsHeaderFlags flags; + uint16_t questions; + uint16_t answers; + uint16_t authority_rrs; + uint16_t additional_rrs; + }); + + /** + * @brief parse a single query record from a client request + * + * @param buffer a reference to the incoming request object received by the listener + * @param offset the buffer offset at which parsing is to begin. This parameter is updated when + * one record is parsed from the buffer and returned to the caller. + * @return DnsQueryRecordPtr a pointer to a DnsQueryRecord object containing all query data parsed + * from the buffer + */ + DnsQueryRecordPtr parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); + + /** + * @return uint16_t the response code flag value from a parsed dns object + */ + uint16_t getQueryResponseCode() { return static_cast(header_.flags.rcode); } + + /** + * @brief Create a context object for handling a DNS Query + * + * @param client_request the context containing the client addressing and the buffer with the DNS + * query contents + */ + DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request); + +private: + /** + * @param buffer a reference to the incoming request object received by the listener + * @return bool true if all DNS records and flags were successfully parsed from the buffer + */ + bool parseDnsObject(DnsQueryContextPtr& context, const Buffer::InstancePtr& buffer); + + const std::string parseDnsNameRecord(const Buffer::InstancePtr& buffer, uint64_t* available_bytes, + uint64_t* name_offset); + + DnsHeader header_; +}; + +using DnsMessageParserPtr = std::unique_ptr; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index a7c21842cd4c..f19a863d0dd9 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -7,19 +7,32 @@ load( load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", + "envoy_extension_cc_test_library", ) envoy_package() +envoy_extension_cc_test_library( + name = "dns_filter_test_lib", + srcs = ["dns_filter_test_utils.cc"], + hdrs = ["dns_filter_test_utils.h"], + extension_name = "envoy.filters.udp_listener.dns_filter", + deps = [ + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/test_common:environment_lib", + ], +) + envoy_extension_cc_test( name = "dns_filter_test", srcs = ["dns_filter_test.cc"], extension_name = "envoy.filters.udp_listener.dns_filter", deps = [ + ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", "//test/mocks/server:server_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/config/filter/udp/dns_filter/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 72f349ff196a..9b852cfb1873 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -1,13 +1,13 @@ -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.h" -#include "envoy/config/filter/udp/dns_filter/v2alpha/dns_filter.pb.validate.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.validate.h" #include "common/common/logger.h" -#include "extensions/filters/udp/dns_filter/dns_filter.h" - +#include "test/mocks/event/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" +#include "dns_filter_test_utils.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -21,28 +21,55 @@ namespace UdpFilters { namespace DnsFilter { namespace { +Api::IoCallUint64Result makeNoError(uint64_t rc) { + auto no_error = Api::ioCallUint64ResultNoError(); + no_error.rc_ = rc; + return no_error; +} + class DnsFilterTest : public testing::Test { public: DnsFilterTest() : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")) { + response_parser_ = std::make_unique(); - Logger::Registry::setLogLevel(spdlog::level::info); + client_request_.addresses_.local_ = listener_address_; + client_request_.addresses_.peer_ = listener_address_; + client_request_.buffer_ = std::make_unique(); EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0)); + EXPECT_CALL(callbacks_.udp_listener_, send(_)) + .WillRepeatedly( + Invoke([this](const Network::UdpSendData& send_data) -> Api::IoCallUint64Result { + client_request_.buffer_->move(send_data.buffer_); + return makeNoError(client_request_.buffer_->length()); + })); + EXPECT_CALL(callbacks_.udp_listener_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); } ~DnsFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } void setup(const std::string& yaml) { - envoy::config::filter::udp::dns_filter::v2alpha::DnsFilterConfig config; + envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); auto store = stats_store_.createScope("dns_scope"); EXPECT_CALL(listener_factory_, scope()).WillOnce(ReturnRef(*store)); + EXPECT_CALL(listener_factory_, dispatcher()).Times(AtLeast(0)); + EXPECT_CALL(listener_factory_, clusterManager()).Times(AtLeast(0)); config_ = std::make_shared(listener_factory_, config); filter_ = std::make_unique(callbacks_, config_); } + void sendQueryFromClient(const std::string& peer_address, const std::string& buffer) { + Network::UdpRecvData data; + data.addresses_.peer_ = Network::Utility::parseInternetAddressAndPort(peer_address); + data.addresses_.local_ = listener_address_; + data.buffer_ = std::make_unique(buffer); + data.receive_time_ = MonotonicTime(std::chrono::seconds(0)); + filter_->onData(data); + } + const Network::Address::InstanceConstSharedPtr listener_address_; Server::Configuration::MockListenerFactoryContext listener_factory_; DnsFilterEnvoyConfigSharedPtr config_; @@ -50,37 +77,65 @@ class DnsFilterTest : public testing::Test { std::unique_ptr filter_; Network::MockUdpReadFilterCallbacks callbacks_; Stats::IsolatedStoreImpl stats_store_; - Runtime::RandomGeneratorImpl rng_; + Network::UdpRecvData client_request_; + + std::unique_ptr response_parser_; + Event::MockDispatcher dispatcher_; - const std::string config_yaml = R"EOF( + DnsQueryContextPtr query_ctx_; + + const std::string forward_query_off_config = R"EOF( stat_prefix: "my_prefix" server_config: inline_dns_table: external_retry_count: 3 + known_suffixes: + - suffix: foo1.com + - suffix: foo2.com virtual_domains: - - name: "www.foo1.com" - endpoint: - address_list: - address: - - 10.0.0.1 - - 10.0.0.2 - - name: "www.foo2.com" - endpoint: - address_list: - address: - - 2001:8a:c1::2800:7 - - name: "www.foo3.com" - endpoint: - address_list: - address: - - 10.0.3.1 + - name: "www.foo1.com" + endpoint: + address_list: + address: + - "10.0.0.1" + - "10.0.0.2" + - name: "www.foo2.com" + endpoint: + address_list: + address: + - "2001:8a:c1::2800:7" + - "2001:8a:c1::2800:8" + - "2001:8a:c1::2800:9" + - name: "www.foo3.com" + endpoint: + address_list: + address: + - "10.0.3.1" )EOF"; }; -TEST_F(DnsFilterTest, TestConfig) { +TEST_F(DnsFilterTest, InvalidQuery) { + InSequence s; + + setup(forward_query_off_config); + sendQueryFromClient("10.0.0.1:1000", "hello"); + query_ctx_ = response_parser_->createQueryContext(client_request_); + ASSERT_FALSE(query_ctx_->parse_status_); +} + +TEST_F(DnsFilterTest, SingleTypeAQuery) { InSequence s; - setup(config_yaml); + setup(forward_query_off_config); + const std::string domain("www.foo3.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + query_ctx_ = response_parser_->createQueryContext(client_request_); + + // This will fail since the response generation is not being done yet + ASSERT_FALSE(query_ctx_->parse_status_); } } // namespace diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc new file mode 100644 index 000000000000..3efbeeefdbb3 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc @@ -0,0 +1,67 @@ +#include "dns_filter_test_utils.h" + +#include "common/runtime/runtime_impl.h" + +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace Utils { + +std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class) { + Runtime::RandomGeneratorImpl random_; + struct DnsMessageParser::DnsHeader query {}; + uint16_t id = random_.random() & 0xFFFF; + + // Generate a random query ID + query.id = id; + + // Signify that this is a query + query.flags.qr = 0; + + // This should usually be zero + query.flags.opcode = 0; + + query.flags.aa = 0; + query.flags.tc = 0; + + // Set Recursion flags (at least one bit set so that the flags are not all zero) + query.flags.rd = 1; + query.flags.ra = 0; + + // reserved flag is not set + query.flags.z = 0; + + // Set the authenticated flags to zero + query.flags.ad = 0; + query.flags.cd = 0; + + query.questions = 1; + query.answers = 0; + query.authority_rrs = 0; + query.additional_rrs = 0; + + Buffer::OwnedImpl buffer_; + buffer_.writeBEInt(query.id); + + uint16_t flags; + ::memcpy(&flags, static_cast(&query.flags), sizeof(uint16_t)); + buffer_.writeBEInt(flags); + + buffer_.writeBEInt(query.questions); + buffer_.writeBEInt(query.answers); + buffer_.writeBEInt(query.authority_rrs); + buffer_.writeBEInt(query.additional_rrs); + + DnsQueryRecord query_rec(name, rec_type, rec_class); + query_rec.serialize(buffer_); + return buffer_.toString(); +} + +} // namespace Utils +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h new file mode 100644 index 000000000000..d27f5e000438 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h @@ -0,0 +1,19 @@ +#pragma once + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace Utils { + +static constexpr uint64_t MAX_UDP_DNS_SIZE{512}; + +std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class); + +} // namespace Utils +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 02f996cbbdfa..e5af042bf9cd 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -228,6 +228,7 @@ PROT Postgre Postgres Prereq +QDCOUNT QUIC QoS RAII From 8b1889640751af0ee8098e900c22ccae5b77dd83 Mon Sep 17 00:00:00 2001 From: rulex123 <29862113+rulex123@users.noreply.github.com> Date: Fri, 1 May 2020 20:13:57 +0200 Subject: [PATCH 077/909] admin: class that holding context for handlers (#11029) Create a class that captures context info for admin handlers, and use it as a base class for StatsHandler. Part of refactoring for #5505. Signed-off-by: Erica Manno --- source/server/http/BUILD | 9 +++++ source/server/http/admin.cc | 27 ++++++--------- source/server/http/admin.h | 19 ++--------- source/server/http/handler_ctx.h | 17 +++++++++ source/server/http/stats_handler.cc | 52 +++++++++++++--------------- source/server/http/stats_handler.h | 53 ++++++++++++++--------------- 6 files changed, 88 insertions(+), 89 deletions(-) create mode 100644 source/server/http/handler_ctx.h diff --git a/source/server/http/BUILD b/source/server/http/BUILD index 7d46ea39d930..e849e1b2bfb8 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -85,11 +85,20 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "handler_ctx_lib", + hdrs = ["handler_ctx.h"], + deps = [ + "//include/envoy/server:instance_interface", + ], +) + envoy_cc_library( name = "stats_handler_lib", srcs = ["stats_handler.cc"], hdrs = ["stats_handler.h"], deps = [ + ":handler_ctx_lib", ":prometheus_stats_lib", ":utils_lib", "//include/envoy/http:codes_interface", diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index 3a8f7a563ce1..a9bcab103f4c 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -50,7 +50,6 @@ #include "common/router/config_impl.h" #include "common/upstream/host_utility.h" -#include "server/http/stats_handler.h" #include "server/http/utils.h" #include "extensions/access_loggers/file/file_access_log_impl.h" @@ -992,7 +991,7 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), route_config_provider_(server.timeSource()), - scoped_route_config_provider_(server.timeSource()), + scoped_route_config_provider_(server.timeSource()), stats_handler_(server), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, @@ -1021,25 +1020,26 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) false, false}, {"/quitquitquit", "exit the server", MAKE_ADMIN_HANDLER(handlerQuitQuitQuit), false, true}, - {"/reset_counters", "reset all counters to zero", StatsHandler::handlerResetCounters, - false, true}, + {"/reset_counters", "reset all counters to zero", + MAKE_ADMIN_HANDLER(stats_handler_.handlerResetCounters), false, true}, {"/drain_listeners", "drain listeners", MAKE_ADMIN_HANDLER(handlerDrainListeners), false, true}, {"/server_info", "print server version/status information", MAKE_ADMIN_HANDLER(handlerServerInfo), false, false}, {"/ready", "print server state, return 200 if LIVE, otherwise return 503", MAKE_ADMIN_HANDLER(handlerReady), false, false}, - {"/stats", "print server stats", StatsHandler::handlerStats, false, false}, + {"/stats", "print server stats", MAKE_ADMIN_HANDLER(stats_handler_.handlerStats), false, + false}, {"/stats/prometheus", "print server stats in prometheus format", - StatsHandler::handlerPrometheusStats, false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerPrometheusStats), false, false}, {"/stats/recentlookups", "Show recent stat-name lookups", - StatsHandler::handlerStatsRecentLookups, false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookups), false, false}, {"/stats/recentlookups/clear", "clear list of stat-name lookups and counter", - StatsHandler::handlerStatsRecentLookupsClear, false, true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsClear), false, true}, {"/stats/recentlookups/disable", "disable recording of reset stat-name lookup names", - StatsHandler::handlerStatsRecentLookupsDisable, false, true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsDisable), false, true}, {"/stats/recentlookups/enable", "enable recording of reset stat-name lookup names", - StatsHandler::handlerStatsRecentLookupsEnable, false, true}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsEnable), false, true}, {"/listeners", "print listener info", MAKE_ADMIN_HANDLER(handlerListenerInfo), false, false}, {"/runtime", "print runtime values", MAKE_ADMIN_HANDLER(handlerRuntime), false, false}, @@ -1101,12 +1101,7 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, break; } } - if (handler.requires_server_) { - code = handler.handler_with_server_(path_and_query, response_headers, response, - admin_stream, server_); - } else { - code = handler.handler_(path_and_query, response_headers, response, admin_stream); - } + code = handler.handler_(path_and_query, response_headers, response, admin_stream); Memory::Utils::tryShrinkHeap(); break; } diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 464648d1476e..ca80e8b5bc16 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -39,6 +39,7 @@ #include "server/http/admin_filter.h" #include "server/http/config_tracker_impl.h" +#include "server/http/stats_handler.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -179,33 +180,16 @@ class AdminImpl : public Admin, }; } - using HandlerWithServerCb = std::function; - private: /** * Individual admin handler including prefix, help text, and callback. */ struct UrlHandler { - UrlHandler(std::string prefix, std::string help_text, HandlerCb handler, bool removable, - bool mutates_server_state) - : prefix_(prefix), help_text_(help_text), handler_(handler), removable_(removable), - mutates_server_state_(mutates_server_state), requires_server_(false) {} - - UrlHandler(std::string prefix, std::string help_text, HandlerWithServerCb handler_with_server, - bool removable, bool mutates_server_state) - : prefix_(prefix), help_text_(help_text), handler_with_server_(handler_with_server), - removable_(removable), mutates_server_state_(mutates_server_state), - requires_server_(true) {} - const std::string prefix_; const std::string help_text_; const HandlerCb handler_; - const HandlerWithServerCb handler_with_server_; const bool removable_; const bool mutates_server_state_; - const bool requires_server_; }; /** @@ -458,6 +442,7 @@ class AdminImpl : public Admin, Http::ConnectionManagerTracingStats tracing_stats_; NullRouteConfigProvider route_config_provider_; NullScopedRouteConfigProvider scoped_route_config_provider_; + Server::StatsHandler stats_handler_; std::list handlers_; const uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; const uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; diff --git a/source/server/http/handler_ctx.h b/source/server/http/handler_ctx.h new file mode 100644 index 000000000000..6ac5098213de --- /dev/null +++ b/source/server/http/handler_ctx.h @@ -0,0 +1,17 @@ +#pragma once + +#include "envoy/server/instance.h" + +namespace Envoy { +namespace Server { + +class HandlerContextBase { +public: + HandlerContextBase(Server::Instance& server) : server_(server) {} + +protected: + Server::Instance& server_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/stats_handler.cc b/source/server/http/stats_handler.cc index a437bd2ac395..c792c61cfd63 100644 --- a/source/server/http/stats_handler.cc +++ b/source/server/http/stats_handler.cc @@ -13,21 +13,21 @@ namespace Server { const uint64_t RecentLookupsCapacity = 100; +StatsHandler::StatsHandler(Server::Instance& server) : HandlerContextBase(server) {} + Http::Code StatsHandler::handlerResetCounters(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - for (const Stats::CounterSharedPtr& counter : server.stats().counters()) { + Buffer::Instance& response, AdminStream&) { + for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { counter->reset(); } - server.stats().symbolTable().clearRecentLookups(); + server_.stats().symbolTable().clearRecentLookups(); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookups(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - Stats::SymbolTable& symbol_table = server.stats().symbolTable(); + Buffer::Instance& response, AdminStream&) { + Stats::SymbolTable& symbol_table = server_.stats().symbolTable(); std::string table; const uint64_t total = symbol_table.getRecentLookups([&table](absl::string_view name, uint64_t count) { @@ -43,35 +43,32 @@ Http::Code StatsHandler::handlerStatsRecentLookups(absl::string_view, Http::Resp } Http::Code StatsHandler::handlerStatsRecentLookupsClear(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().clearRecentLookups(); + Buffer::Instance& response, AdminStream&) { + server_.stats().symbolTable().clearRecentLookups(); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookupsDisable(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().setRecentLookupCapacity(0); + Buffer::Instance& response, + AdminStream&) { + server_.stats().symbolTable().setRecentLookupCapacity(0); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStatsRecentLookupsEnable(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { - server.stats().symbolTable().setRecentLookupCapacity(RecentLookupsCapacity); + Buffer::Instance& response, AdminStream&) { + server_.stats().symbolTable().setRecentLookupCapacity(RecentLookupsCapacity); response.add("OK\n"); return Http::Code::OK; } Http::Code StatsHandler::handlerStats(absl::string_view url, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream& admin_stream, - Server::Instance& server) { + Buffer::Instance& response, AdminStream& admin_stream) { Http::Code rc = Http::Code::OK; const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); @@ -82,13 +79,13 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } std::map all_stats; - for (const Stats::CounterSharedPtr& counter : server.stats().counters()) { + for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { if (shouldShowMetric(*counter, used_only, regex)) { all_stats.emplace(counter->name(), counter->value()); } } - for (const Stats::GaugeSharedPtr& gauge : server.stats().gauges()) { + for (const Stats::GaugeSharedPtr& gauge : server_.stats().gauges()) { if (shouldShowMetric(*gauge, used_only, regex)) { ASSERT(gauge->importMode() != Stats::Gauge::ImportMode::Uninitialized); all_stats.emplace(gauge->name(), gauge->value()); @@ -96,7 +93,7 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } std::map text_readouts; - for (const auto& text_readout : server.stats().textReadouts()) { + for (const auto& text_readout : server_.stats().textReadouts()) { if (shouldShowMetric(*text_readout, used_only, regex)) { text_readouts.emplace(text_readout->name(), text_readout->value()); } @@ -106,9 +103,9 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, if (format_value.value() == "json") { response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); response.add( - statsAsJson(all_stats, text_readouts, server.stats().histograms(), used_only, regex)); + statsAsJson(all_stats, text_readouts, server_.stats().histograms(), used_only, regex)); } else if (format_value.value() == "prometheus") { - return handlerPrometheusStats(url, response_headers, response, admin_stream, server); + return handlerPrometheusStats(url, response_headers, response, admin_stream); } else { response.add("usage: /stats?format=json or /stats?format=prometheus \n"); response.add("\n"); @@ -126,7 +123,7 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, // multimap here. This makes sure that duplicate histograms get output. When shared storage is // implemented this can be switched back to a normal map. std::multimap all_histograms; - for (const Stats::ParentHistogramSharedPtr& histogram : server.stats().histograms()) { + for (const Stats::ParentHistogramSharedPtr& histogram : server_.stats().histograms()) { if (shouldShowMetric(*histogram, used_only, regex)) { all_histograms.emplace(histogram->name(), histogram->quantileSummary()); } @@ -140,16 +137,15 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&, - Server::Instance& server) { + Buffer::Instance& response, AdminStream&) { const Http::Utility::QueryParams params = Http::Utility::parseQueryString(path_and_query); const bool used_only = params.find("usedonly") != params.end(); absl::optional regex; if (!Utility::filterParam(params, response, regex)) { return Http::Code::BadRequest; } - PrometheusStatsFormatter::statsAsPrometheus(server.stats().counters(), server.stats().gauges(), - server.stats().histograms(), response, used_only, + PrometheusStatsFormatter::statsAsPrometheus(server_.stats().counters(), server_.stats().gauges(), + server_.stats().histograms(), response, used_only, regex); return Http::Code::OK; } diff --git a/source/server/http/stats_handler.h b/source/server/http/stats_handler.h index 4103660689a3..104a06fc6b9e 100644 --- a/source/server/http/stats_handler.h +++ b/source/server/http/stats_handler.h @@ -11,42 +11,39 @@ #include "common/stats/histogram_impl.h" +#include "server/http/handler_ctx.h" + #include "absl/strings/string_view.h" namespace Envoy { namespace Server { -class StatsHandler { +class StatsHandler : public HandlerContextBase { public: - static Http::Code handlerResetCounters(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookups(absl::string_view path_and_query, + StatsHandler(Server::Instance& server); + + Http::Code handlerResetCounters(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookups(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsClear(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsDisable(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsClear(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsDisable(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStatsRecentLookupsEnable(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerStats(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); - static Http::Code handlerPrometheusStats(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&, - Server::Instance& server); + Buffer::Instance& response, AdminStream&); + Http::Code handlerStatsRecentLookupsEnable(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + Http::Code handlerStats(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + Http::Code handlerPrometheusStats(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); private: template From b0f45ed98a7a0e6a6c02b28d54bb29e96fe6e6fc Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 May 2020 13:35:08 -0700 Subject: [PATCH 078/909] api: Add send_all_clusters field to LRS response. (#10613) Add a send_all_clusters field to LRS response, triggered by a new client capability. This avoids the need for the server to enumerate the full list of clusters if it always wants data for all clusters. Risk Level: Low Testing: N/A Docs Changes: Included in PR. Release Notes: N/A Signed-off-by: Mark D. Roth --- api/envoy/service/load_stats/v2/lrs.proto | 8 +++- api/envoy/service/load_stats/v3/lrs.proto | 8 +++- docs/root/api/client_features.rst | 5 ++ docs/root/version_history/current.rst | 4 ++ .../envoy/service/load_stats/v2/lrs.proto | 8 +++- .../envoy/service/load_stats/v3/lrs.proto | 8 +++- source/common/upstream/load_stats_reporter.cc | 46 ++++++++++++++++--- .../upstream/load_stats_reporter_test.cc | 1 + .../load_stats_integration_test.cc | 33 ++++++++++--- 9 files changed, 104 insertions(+), 17 deletions(-) diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto index a71039e7ceeb..d8707bd62cb2 100644 --- a/api/envoy/service/load_stats/v2/lrs.proto +++ b/api/envoy/service/load_stats/v2/lrs.proto @@ -66,7 +66,13 @@ message LoadStatsRequest { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/api/envoy/service/load_stats/v3/lrs.proto b/api/envoy/service/load_stats/v3/lrs.proto index ce48574826a9..d76356884a7a 100644 --- a/api/envoy/service/load_stats/v3/lrs.proto +++ b/api/envoy/service/load_stats/v3/lrs.proto @@ -73,7 +73,13 @@ message LoadStatsResponse { "envoy.service.load_stats.v2.LoadStatsResponse"; // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/docs/root/api/client_features.rst b/docs/root/api/client_features.rst index a233f7e7448d..4cd6594f0bdc 100644 --- a/docs/root/api/client_features.rst +++ b/docs/root/api/client_features.rst @@ -10,6 +10,8 @@ Client features use reverse DNS naming scheme, for example `com.acme.feature`. Currently Defined Client Features --------------------------------- +.. It would be nice to use an RST ref here for service.load_stats.v2.LoadStatsResponse.send_all_clusters, but we can't due to https://github.com/envoyproxy/envoy/issues/3091. + - **envoy.config.require-any-fields-contain-struct**: This feature indicates that xDS client requires that the configuration entries of type *google.protobuf.Any* contain messages of type *udpa.type.v1.TypedStruct* only. @@ -18,3 +20,6 @@ Currently Defined Client Features :ref:`overprovisioning_factor` field. If graceful failover functionality is required, it must be supplied by the management server. +- **envoy.lrs.supports_send_all_clusters**: This feature indicates that the client supports + the *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* + field in the LRS response. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index d2fd148d8a5a..7b6436448254 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -28,6 +28,10 @@ Changes Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. * logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. +* lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field + in LRS response, which allows management servers to avoid explicitly listing all clusters it is + interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability + in :ref:`client_features` field. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. * prometheus stats: fix the sort order of output lines to comply with the standard. diff --git a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto index a71039e7ceeb..d8707bd62cb2 100644 --- a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto @@ -66,7 +66,13 @@ message LoadStatsRequest { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto index ce48574826a9..d76356884a7a 100644 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto @@ -73,7 +73,13 @@ message LoadStatsResponse { "envoy.service.load_stats.v2.LoadStatsResponse"; // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + // Not populated if *send_all_clusters* is true. + repeated string clusters = 1; + + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 14c707e1c613..c2f997050f98 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -21,6 +21,7 @@ LoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info, "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats")), time_source_(dispatcher.timeSource()) { request_.mutable_node()->MergeFrom(local_info.node()); + request_.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); response_timer_ = dispatcher.createTimer([this]() -> void { sendLoadStatsRequest(); }); establishNewStream(); @@ -44,6 +45,20 @@ void LoadStatsReporter::establishNewStream() { } void LoadStatsReporter::sendLoadStatsRequest() { + // TODO(htuch): This sends load reports for only the set of clusters in clusters_, which + // was initialized in startLoadReportPeriod() the last time we either sent a load report + // or received a new LRS response (whichever happened more recently). The code in + // startLoadReportPeriod() adds to clusters_ only those clusters that exist in the + // ClusterManager at the moment when startLoadReportPeriod() runs. This means that if + // a cluster is selected by the LRS server (either by being explicitly listed or by using + // the send_all_clusters field), if that cluster was added to the ClusterManager since the + // last time startLoadReportPeriod() was invoked, we will not report its load here. In + // practice, this means that for any newly created cluster, we will always drop the data for + // the initial load report period. This seems sub-optimal. + // + // One possible way to deal with this would be to get a notification whenever a new cluster is + // added to the cluster manager. When we get the notification, we record the current time in + // clusters_ as the start time for the load reporting window for that cluster. request_.mutable_cluster_stats()->Clear(); for (const auto& cluster_name_and_timestamp : clusters_) { const std::string& cluster_name = cluster_name_and_timestamp.first; @@ -136,25 +151,34 @@ void LoadStatsReporter::startLoadReportPeriod() { // internal string type. Consider this optimization when the string types // converge. std::unordered_map existing_clusters; - for (const std::string& cluster_name : message_->clusters()) { - if (clusters_.count(cluster_name) > 0) { - existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + if (message_->send_all_clusters()) { + for (const auto& p : cm_.clusters()) { + const std::string& cluster_name = p.first; + if (clusters_.count(cluster_name) > 0) { + existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + } + } + } else { + for (const std::string& cluster_name : message_->clusters()) { + if (clusters_.count(cluster_name) > 0) { + existing_clusters.emplace(cluster_name, clusters_[cluster_name]); + } } } clusters_.clear(); // Reset stats for all hosts in clusters we are tracking. - for (const std::string& cluster_name : message_->clusters()) { + auto handle_cluster_func = [this, &existing_clusters](const std::string& cluster_name) { clusters_.emplace(cluster_name, existing_clusters.count(cluster_name) > 0 ? existing_clusters[cluster_name] : time_source_.monotonicTime().time_since_epoch()); auto cluster_info_map = cm_.clusters(); auto it = cluster_info_map.find(cluster_name); if (it == cluster_info_map.end()) { - continue; + return; } // Don't reset stats for existing tracked clusters. if (existing_clusters.count(cluster_name) > 0) { - continue; + return; } auto& cluster = it->second.get(); for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { @@ -165,6 +189,16 @@ void LoadStatsReporter::startLoadReportPeriod() { } } cluster.info()->loadReportStats().upstream_rq_dropped_.latch(); + }; + if (message_->send_all_clusters()) { + for (const auto& p : cm_.clusters()) { + const std::string& cluster_name = p.first; + handle_cluster_func(cluster_name); + } + } else { + for (const std::string& cluster_name : message_->clusters()) { + handle_cluster_func(cluster_name); + } } response_timer_->enableTimer(std::chrono::milliseconds( DurationUtil::durationToMilliseconds(message_->load_reporting_interval()))); diff --git a/test/common/upstream/load_stats_reporter_test.cc b/test/common/upstream/load_stats_reporter_test.cc index b87b9e751f1d..c22593a84f5c 100644 --- a/test/common/upstream/load_stats_reporter_test.cc +++ b/test/common/upstream/load_stats_reporter_test.cc @@ -52,6 +52,7 @@ class LoadStatsReporterTest : public testing::Test { const std::vector& expected_cluster_stats) { envoy::service::load_stats::v3::LoadStatsRequest expected_request; expected_request.mutable_node()->MergeFrom(local_info_.node()); + expected_request.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); std::copy(expected_cluster_stats.begin(), expected_cluster_stats.end(), Protobuf::RepeatedPtrFieldBackInserter(expected_request.mutable_cluster_stats())); EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false)); diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index d917259e4a9a..8e79f61d684d 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -158,11 +158,20 @@ class LoadStatsIntegrationTest : public testing::TestWithParambegin(); + it != local_loadstats_request.mutable_cluster_stats()->end(); ++it) { + if (it->cluster_name() == "load_report") { + local_loadstats_request.mutable_cluster_stats()->erase(it); + break; + } + } + + ASSERT_LE(loadstats_request.cluster_stats_size(), 1) << loadstats_request.DebugString(); + ASSERT_LE(local_loadstats_request.cluster_stats_size(), 1) + << local_loadstats_request.DebugString(); if (local_loadstats_request.cluster_stats_size() == 0) { return; @@ -254,6 +263,11 @@ class LoadStatsIntegrationTest : public testing::TestWithParamwaitForGrpcMessage(*dispatcher_, local_loadstats_request); RELEASE_ASSERT(result, result.message()); + // Check that "envoy.lrs.supports_send_all_clusters" client feature is set. + if (local_loadstats_request.has_node()) { + EXPECT_THAT(local_loadstats_request.node().client_features(), + ::testing::ElementsAre("envoy.lrs.supports_send_all_clusters")); + } // Sanity check and clear the measured load report interval. for (auto& cluster_stats : *local_loadstats_request.mutable_cluster_stats()) { const uint32_t actual_load_report_interval_ms = @@ -299,13 +313,17 @@ class LoadStatsIntegrationTest : public testing::TestWithParambody().size()); } - void requestLoadStatsResponse(const std::vector& clusters) { + void requestLoadStatsResponse(const std::vector& clusters, + bool send_all_clusters = false) { envoy::service::load_stats::v3::LoadStatsResponse loadstats_response; loadstats_response.mutable_load_reporting_interval()->MergeFrom( Protobuf::util::TimeUtil::MillisecondsToDuration(load_report_interval_ms_)); for (const auto& cluster : clusters) { loadstats_response.add_clusters(cluster); } + if (send_all_clusters) { + loadstats_response.set_send_all_clusters(true); + } loadstats_stream_->sendGrpcMessage(loadstats_response); // Wait until the request has been received by Envoy. test_server_->waitForCounterGe("load_reporter.requests", ++load_requests_); @@ -394,7 +412,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { // 33%/67% split between dragon/winter primary localities. updateClusterLoadAssignment({{0}}, {{1, 2}}, {}, {{4}}); - requestLoadStatsResponse({"cluster_0"}); + // Verify that send_all_clusters works. + requestLoadStatsResponse({}, true); for (uint32_t i = 0; i < 6; ++i) { sendAndReceiveUpstream((4 + i) % 3); From cf12e8ab8bbbeebf9d003173ee8fb21f5a2428dd Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 1 May 2020 17:24:26 -0400 Subject: [PATCH 079/909] api: new major versioning cut policy. (#10958) After extended discussion in #10852, Slack and offline, this patch proposes a revision to the API major versioning policy where we will: * Not mechanically cut a new major version at EOY, instead wait for enough tech debt. * Point to future minor versioning and client capabilities to help deal with tech debt. Fixes #10852. Signed-off-by: Harvey Tuch --- api/API_VERSIONING.md | 62 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index 4684ed3e86e9..52b0ea04c751 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -76,9 +76,20 @@ implementations within a major version should set explicit values for these fiel # API lifecycle -The API lifecycle follows a calendar clock. At the end of Q4 each year, a major API version -increment may occur for any Envoy API package, in concert with the quarterly Envoy release. - +A new major version is a significant event in the xDS API ecosystem, inevitably requiring support +from clients (Envoy, gRPC) and a large number of control planes, ranging from simple in-house custom +management servers to xDS-as-a-service offerings run by vendors. The [xDS API +shepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) will make the decision to add a +new major version subject to the following constraints: +* There exists sufficient technical debt in the xDS APIs in the existing supported major version + to justify the cost burden for xDS client/server implementations. +* At least one year has elapsed since the last major version was cut. +* Consultation with the Envoy community (via Envoy community call, `#xds` channel on Slack), as + well as gRPC OSS community (via reaching out to language maintainers) is made. This is not a veto + process; the API shepherds retain the right to move forward with a new major API version after + weighing this input with the first two considerations above. + +Following the release of a new major version, the API lifecycle follows a deprecation clock. Envoy will support at most three major versions of any API package at all times: * The current stable major version, e.g. v3. * The previous stable major version, e.g. v2. This is needed to ensure that we provide at least 1 @@ -94,16 +105,26 @@ Envoy will support at most three major versions of any API package at all times: current stable major version, making use of annotations such as `deprecated = true`. This is not a human editable artifact. -An example of how this might play out is that at the end of September in 2020, we will freeze -`envoy.config.bootstrap.v4alpha` and this package will become the current stable major version +An example of how this might play out is that at the end of December in 2020, if a v4 major version +is justified, we might freeze +`envoy.config.bootstrap.v4alpha` and this package would then become the current stable major version `envoy.config.bootstrap.v4`. The `envoy.config.bootstrap.v3` package will become the previous stable major version and support for `envoy.config.bootstrap.v2` will be dropped from the Envoy implementation. Note that some transitively referenced package, e.g. `envoy.config.filter.network.foo.v2` may remain at version 2 during this release, if no changes were -made to the referenced package. +made to the referenced package. If no major version is justified at this point, the decision to cut +v4 might occur at some point in 2021 or beyond. + +The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will +retain implementation support for at least 1-2 years. -The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will retain -implementation support for 1-2 years (1.5 years on average). +We are currently working on a strategy to introduce minor versions +(https://github.com/envoyproxy/envoy/issues/8416). This will bump the xDS API minor version on every +deprecation and field introduction/modification. This will provide an opportunity for the control +plane to condition on client and major/minor API version support. Currently under discussion, but +not finalized will be the sunsetting of Envoy client support for deprecated features after a year +of support within a major version. Please post to https://github.com/envoyproxy/envoy/issues/8416 +any thoughts around this. # New API features @@ -152,10 +173,27 @@ candidate for this class of change. The following steps are required: 3. The old message/enum/field/enum value should be annotated as deprecated. 4. At the next major version, `protoxform` will remove the deprecated version automatically. -This approach ensures that API major version releases are predictable and mechanical, and has the -bulk of the Envoy code and test changes owned by feature developers, rather than the API owners. -There will be no major `vN` initiative to address technical debt beyond that enabled by the above -process. +This make-before-break approach ensures that API major version releases are predictable and +mechanical, and has the bulk of the Envoy code and test changes owned by feature developers, rather +than the API owners. There will be no major `vN` initiative to address technical debt beyond that +enabled by the above process. + +# Client features + +Not all clients will support all fields and features in a given major API version. In general, it is +preferable to use Protobuf semantics to support this, for example: +* Ignoring a field's contents is sufficient to indicate that the support is missing in a client. +* Setting both deprecated and the new method for expressing a field if support for a range of + clients is desired (where this does not involve huge overhead or gymnastics). + +This approach does not always work, for example: +* A route matcher conjunct condition should not be ignored just because the client is missing the + ability to implement the match; this might result in route policy bypass. +* A client may expect the server to provide a response in a certain format or encoding, for example + a JSON encoded `Struct`-in-`Any` representation of opaque extension configuration. + +For this purpose, we have [client +features](https://www.envoyproxy.io/docs/envoy/latest/api/client_features). # One Definition Rule (ODR) From 28af32e1eb0748521ab596d7793bf254363afcb0 Mon Sep 17 00:00:00 2001 From: asraa Date: Sun, 3 May 2020 17:22:27 -0400 Subject: [PATCH 080/909] [tools] initial fuzz coverage script (#10289) fuzz coverage script to run all fuzz targets (or ones specified) for 1 min creating a temporary corpus for testcases with new coverage, and then runs a coverage script over all the binaries produced from bazel coverage. usage: test/run_envoy_fuzz_coverage.sh (runs all fuzz targets) test/run_envoy_fuzz_coverage.sh //test/common/common:base64_fuzz_test //test/common/common:hash_fuzz_test (runs just specified) Risk level: Low Testing: Local Signed-off-by: Asra Ali --- bazel/envoy_test.bzl | 2 +- test/build_and_run_fuzz_targets.sh | 48 ++++++++++++++++++++++++ test/run_envoy_bazel_coverage.sh | 59 +++++++++++++++++++++++++----- 3 files changed, 99 insertions(+), 10 deletions(-) create mode 100755 test/build_and_run_fuzz_targets.sh diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index ca0b430c16f0..222cb99e60e5 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -121,7 +121,7 @@ def envoy_cc_fuzz_test( ], }), size = size, - tags = tags, + tags = ["fuzz_target"] + tags, ) # This target exists only for diff --git a/test/build_and_run_fuzz_targets.sh b/test/build_and_run_fuzz_targets.sh new file mode 100755 index 000000000000..516b85fe1a6a --- /dev/null +++ b/test/build_and_run_fuzz_targets.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +if [[ $# -gt 0 ]]; then + FUZZ_TARGETS=$* +else + echo "This script should be called from tools/run_envoy_bazel_coverage.sh" +fi + +LIBFUZZER_TARGETS="" +# Build all fuzz targets to run instrumented with libfuzzer in sequence. +for t in ${FUZZ_TARGETS} +do + LIBFUZZER_TARGETS+="${t}_with_libfuzzer " +done + +bazel build ${BAZEL_BUILD_OPTIONS} ${LIBFUZZER_TARGETS} --config asan-fuzzer -c opt + +# Now run each fuzz target in parallel for 60 seconds. +PIDS="" +TMPDIR="${FUZZ_TEMPDIR}" + +for t in ${FUZZ_TARGETS} +do + # Make a temporary corpus for this fuzz target. + TARGET_BINARY="${t/://}" + TEMP_CORPUS_PATH="${TARGET_BINARY:2}" + CORPUS_DIR="${TMPDIR}/${TEMP_CORPUS_PATH////_}_corpus" + mkdir -v "${CORPUS_DIR}" + # Get the original corpus for the fuzz target + CORPUS_LOCATION="$(bazel query "labels(data, ${t})" | head -1)" + ORIGINAL_CORPUS="$(bazel query "labels(srcs, ${CORPUS_LOCATION})" | head -1)" + ORIGINAL_CORPUS="${ORIGINAL_CORPUS/://}" + ORIGINAL_CORPUS="$(dirname ${ORIGINAL_CORPUS})" + # Copy entries in original corpus into temp. + cp -r "$(pwd)${ORIGINAL_CORPUS:1}" "${CORPUS_DIR}" + # Run fuzzing process. + bazel-bin/"${TARGET_BINARY:2}"_with_libfuzzer -max_total_time=60 "${CORPUS_DIR}" & + # Add pid to pids list + PIDS="${PIDS} $!" +done + +# Wait for background process to run. +for pid in ${PIDS}; do + wait $pid + if [ $? -ne 0 ]; then + echo "${pid} FAILED" + fi +done diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 825803f28f97..6eaf8c469a5a 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -4,11 +4,13 @@ set -e [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true +[[ -z "${FUZZ_COVERAGE}" ]] && FUZZ_COVERAGE=false echo "Starting run_envoy_bazel_coverage.sh..." echo " PWD=$(pwd)" echo " SRCDIR=${SRCDIR}" echo " VALIDATE_COVERAGE=${VALIDATE_COVERAGE}" +echo " FUZZ_COVERAGE=${FUZZ_COVERAGE}" # This is the target that will be run to generate coverage data. It can be overridden by consumer # projects that want to run coverage on a different/combined target. @@ -18,18 +20,56 @@ if [[ $# -gt 0 ]]; then elif [[ -n "${COVERAGE_TARGET}" ]]; then COVERAGE_TARGETS=${COVERAGE_TARGET} else - COVERAGE_TARGETS=//test/... + # For fuzz builds, this overrides to just fuzz targets. + COVERAGE_TARGETS=//test/... && [[ ${FUZZ_COVERAGE} == "true" ]] && + COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzz_target", //test/...)')" fi -# Make sure //test/coverage:coverage_tests is up-to-date. SCRIPT_DIR="$(realpath "$(dirname "$0")")" -"${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} +TEMP_CORPORA="" +if [ "$FUZZ_COVERAGE" == "true" ] +then + # Build and run libfuzzer linked target, grab collect temp directories. + FUZZ_TEMPDIR="$(mktemp -d)" + FUZZ_TEMPDIR=${FUZZ_TEMPDIR} "${SCRIPT_DIR}"/build_and_run_fuzz_targets.sh ${COVERAGE_TARGETS} +else + # Make sure //test/coverage:coverage_tests is up-to-date. + "${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} +fi + +# Set the bazel targets to run. +BAZEL_TARGET=//test/coverage:coverage_tests && [[ ${FUZZ_COVERAGE} == "true" ]] && BAZEL_TARGET=${COVERAGE_TARGETS} + +# Add binaries to OBJECTS to pass in to llvm-cov +OBJECTS="" +# For nornaml builds, BAZEL_TARGET only contains //test/coverage:coverage_tests +for t in ${BAZEL_TARGET} +do + # Set test args. If normal coverage run, this is --log-path /dev/null + if [ "$FUZZ_COVERAGE" == "true" ] + then + # If this is a fuzz target, set args to be the temp corpus. + TARGET_BINARY="${t/://}" + CORPUS_LOCATION="${TARGET_BINARY:2}" + TEST_ARGS=(--test_arg="${FUZZ_TEMPDIR}/${CORPUS_LOCATION////_}_corpus" --test_arg="-runs=0") + if [[ -z "${OBJECTS}" ]]; then + # The first object needs to be passed without -object= flag. + OBJECTS="bazel-bin/${TARGET_BINARY:2}_with_libfuzzer" + else + OBJECTS="$OBJECTS -object=bazel-bin/${TARGET_BINARY:2}_with_libfuzzer" + fi + TARGET="${t}_with_libfuzzer" + else + TEST_ARGS=(--test_arg="--log-path /dev/null" --test_arg="-l trace") + OBJECTS="bazel-bin/test/coverage/coverage_tests" + TARGET="${t}" + fi -BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ + BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ -c fastbuild --copt=-DNDEBUG --instrumentation_filter=//source/...,//include/... \ --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ - //test/coverage:coverage_tests + "${TEST_ARGS[@]}" --test_env=HEAPCHECK= ${TARGET} +done COVERAGE_DIR="${SRCDIR}"/generated/coverage mkdir -p "${COVERAGE_DIR}" @@ -39,10 +79,11 @@ COVERAGE_BINARY="bazel-bin/test/coverage/coverage_tests" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" echo "Merging coverage data..." -llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/test/coverage/coverage_tests/ -name coverage.dat) +BAZEL_OUT=test/coverage/coverage_tests/ && [[ ${FUZZ_COVERAGE} ]] && BAZEL_OUT=test/ +llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/${BAZEL_OUT} -name coverage.dat) echo "Generating report..." -llvm-cov show "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" -Xdemangler=c++filt \ +llvm-cov show -instr-profile="${COVERAGE_DATA}" ${OBJECTS} -Xdemangler=c++filt \ -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -output-dir=${COVERAGE_DIR} -format=html sed -i -e 's|>proc/self/cwd/|>|g' "${COVERAGE_DIR}/index.html" sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' "${COVERAGE_DIR}/index.html" @@ -51,7 +92,7 @@ sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' if [ "$VALIDATE_COVERAGE" == "true" ] then - COVERAGE_VALUE=$(llvm-cov export "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" \ + COVERAGE_VALUE=$(llvm-cov export "${OBJECTS}" -instr-profile="${COVERAGE_DATA}" \ -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") COVERAGE_THRESHOLD=97.0 From cb08d542389d455069f83f20dc543176ffb484fb Mon Sep 17 00:00:00 2001 From: Raghavendra Balgi Date: Mon, 4 May 2020 03:14:41 +0530 Subject: [PATCH 081/909] docs: Fixes a mismatch in metadata attribute name (used by transport_socket_matches) (#11040) Fixes a mismatch in metadata attribute name used by transport_socket_matches Risk Level: Low Testing: Manual review Docs Changes: Not applicable Fixes #10660 Signed-off-by: Raghavendra Balgi --- api/envoy/api/v2/cluster.proto | 6 +++--- api/envoy/config/cluster/v3/cluster.proto | 6 +++--- api/envoy/config/cluster/v4alpha/cluster.proto | 6 +++--- generated_api_shadow/envoy/api/v2/cluster.proto | 6 +++--- generated_api_shadow/envoy/config/cluster/v3/cluster.proto | 6 +++--- .../envoy/config/cluster/v4alpha/cluster.proto | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/api/envoy/api/v2/cluster.proto b/api/envoy/api/v2/cluster.proto index 8d9ead00f1cd..c95de62c128d 100644 --- a/api/envoy/api/v2/cluster.proto +++ b/api/envoy/api/v2/cluster.proto @@ -471,7 +471,7 @@ message Cluster { reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -491,14 +491,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 0b3a4fbc61c4..be7710815b70 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -504,7 +504,7 @@ message Cluster { reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -524,14 +524,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 89c206f2c7b0..2b044b2c6437 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -505,7 +505,7 @@ message Cluster { reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -525,14 +525,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto index 8d9ead00f1cd..c95de62c128d 100644 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ b/generated_api_shadow/envoy/api/v2/cluster.proto @@ -471,7 +471,7 @@ message Cluster { reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -491,14 +491,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index c058c421eec4..eaa40527c2c3 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -502,7 +502,7 @@ message Cluster { reserved 12, 15; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -522,14 +522,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 89c206f2c7b0..2b044b2c6437 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -505,7 +505,7 @@ message Cluster { reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the + // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -525,14 +525,14 @@ message Cluster { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // From 10125161be0d0a759c3ffb02ddcdf8abc0bc6060 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Sun, 3 May 2020 14:50:11 -0700 Subject: [PATCH 082/909] api: GoogleRE2 max_program_size should be checked by server, not client. (#10971) Deprecates GoogleRE2.max_program_size field so that client does not need to check this. Risk Level: Low Testing: N/A Docs Changes: Included in PR Release Notes: Included in PR Signed-off-by: Mark D. Roth --- api/envoy/admin/v4alpha/BUILD | 2 +- api/envoy/admin/v4alpha/tap.proto | 4 +- api/envoy/config/accesslog/v4alpha/BUILD | 15 + .../config/accesslog/v4alpha/accesslog.proto | 298 ++++++++++ api/envoy/config/bootstrap/v4alpha/BUILD | 4 +- .../config/bootstrap/v4alpha/bootstrap.proto | 18 +- api/envoy/config/core/v4alpha/BUILD | 2 +- api/envoy/config/core/v4alpha/address.proto | 2 +- .../config/core/v4alpha/health_check.proto | 8 +- api/envoy/config/listener/v4alpha/BUILD | 15 + .../listener/v4alpha/api_listener.proto | 32 ++ .../config/listener/v4alpha/listener.proto | 241 ++++++++ .../v4alpha/listener_components.proto | 298 ++++++++++ .../config/listener/v4alpha/quic_config.proto | 35 ++ .../v4alpha/udp_listener_config.proto | 42 ++ api/envoy/config/metrics/v4alpha/BUILD | 14 + .../metrics/v4alpha/metrics_service.proto | 36 ++ api/envoy/config/metrics/v4alpha/stats.proto | 361 ++++++++++++ api/envoy/config/rbac/v4alpha/BUILD | 2 +- api/envoy/config/rbac/v4alpha/rbac.proto | 20 +- api/envoy/config/route/v4alpha/BUILD | 2 +- .../route/v4alpha/route_components.proto | 19 +- api/envoy/config/tap/v4alpha/BUILD | 14 + api/envoy/config/tap/v4alpha/common.proto | 225 ++++++++ api/envoy/data/dns/v4alpha/BUILD | 13 + api/envoy/data/dns/v4alpha/dns_table.proto | 85 +++ api/envoy/extensions/common/tap/v4alpha/BUILD | 2 +- .../common/tap/v4alpha/common.proto | 4 +- .../filters/http/cache/v4alpha/BUILD | 14 + .../filters/http/cache/v4alpha/cache.proto | 84 +++ .../filters/http/csrf/v4alpha/BUILD | 14 + .../filters/http/csrf/v4alpha/csrf.proto | 54 ++ .../filters/http/ext_authz/v4alpha/BUILD | 16 + .../http/ext_authz/v4alpha/ext_authz.proto | 245 ++++++++ .../filters/http/fault/v4alpha/BUILD | 15 + .../filters/http/fault/v4alpha/fault.proto | 144 +++++ .../filters/http/health_check/v4alpha/BUILD | 14 + .../health_check/v4alpha/health_check.proto | 47 ++ .../filters/http/jwt_authn/v4alpha/BUILD | 14 + .../http/jwt_authn/v4alpha/config.proto | 531 ++++++++++++++++++ .../filters/http/router/v4alpha/BUILD | 13 + .../filters/http/router/v4alpha/router.proto | 81 +++ .../filters/network/dubbo_proxy/v4alpha/BUILD | 15 + .../dubbo_proxy/v4alpha/dubbo_proxy.proto | 70 +++ .../network/dubbo_proxy/v4alpha/route.proto | 121 ++++ .../http_connection_manager/v4alpha/BUILD | 2 +- .../v4alpha/http_connection_manager.proto | 4 +- .../network/rocketmq_proxy/v4alpha/BUILD | 15 + .../v4alpha/rocketmq_proxy.proto | 39 ++ .../rocketmq_proxy/v4alpha/route.proto | 67 +++ .../filters/network/tcp_proxy/v4alpha/BUILD | 15 + .../network/tcp_proxy/v4alpha/tcp_proxy.proto | 137 +++++ .../network/thrift_proxy/v4alpha/BUILD | 14 + .../network/thrift_proxy/v4alpha/route.proto | 157 ++++++ .../thrift_proxy/v4alpha/thrift_proxy.proto | 130 +++++ .../filters/udp/dns_filter/v4alpha/BUILD | 14 + .../udp/dns_filter/v4alpha/dns_filter.proto | 80 +++ .../transport_sockets/tls/v4alpha/BUILD | 2 +- .../tls/v4alpha/common.proto | 6 +- api/envoy/service/health/v4alpha/BUILD | 15 + api/envoy/service/health/v4alpha/hds.proto | 160 ++++++ api/envoy/service/status/v4alpha/BUILD | 16 + api/envoy/service/status/v4alpha/csds.proto | 102 ++++ api/envoy/service/tap/v4alpha/BUILD | 17 + api/envoy/service/tap/v4alpha/tap.proto | 64 +++ api/envoy/service/tap/v4alpha/tapds.proto | 48 ++ api/envoy/type/matcher/regex.proto | 5 +- api/envoy/type/matcher/v3/regex.proto | 5 +- api/envoy/type/matcher/v4alpha/BUILD | 14 + api/envoy/type/matcher/v4alpha/metadata.proto | 105 ++++ api/envoy/type/matcher/v4alpha/node.proto | 28 + api/envoy/type/matcher/v4alpha/number.proto | 33 ++ api/envoy/type/matcher/v4alpha/path.proto | 30 + api/envoy/type/matcher/v4alpha/regex.proto | 72 +++ api/envoy/type/matcher/v4alpha/string.proto | 71 +++ api/envoy/type/matcher/v4alpha/struct.proto | 91 +++ api/envoy/type/matcher/v4alpha/value.proto | 71 +++ docs/root/version_history/current.rst | 3 + .../envoy/admin/v4alpha/BUILD | 2 +- .../envoy/admin/v4alpha/tap.proto | 4 +- .../envoy/config/accesslog/v4alpha/BUILD | 15 + .../config/accesslog/v4alpha/accesslog.proto | 298 ++++++++++ .../envoy/config/bootstrap/v4alpha/BUILD | 4 +- .../config/bootstrap/v4alpha/bootstrap.proto | 18 +- .../envoy/config/core/v4alpha/BUILD | 2 +- .../envoy/config/core/v4alpha/address.proto | 2 +- .../config/core/v4alpha/health_check.proto | 8 +- .../envoy/config/listener/v4alpha/BUILD | 15 + .../listener/v4alpha/api_listener.proto | 32 ++ .../config/listener/v4alpha/listener.proto | 241 ++++++++ .../v4alpha/listener_components.proto | 298 ++++++++++ .../config/listener/v4alpha/quic_config.proto | 35 ++ .../v4alpha/udp_listener_config.proto | 42 ++ .../envoy/config/metrics/v4alpha/BUILD | 14 + .../metrics/v4alpha/metrics_service.proto | 36 ++ .../envoy/config/metrics/v4alpha/stats.proto | 361 ++++++++++++ .../envoy/config/rbac/v4alpha/BUILD | 2 +- .../envoy/config/rbac/v4alpha/rbac.proto | 20 +- .../envoy/config/route/v4alpha/BUILD | 2 +- .../route/v4alpha/route_components.proto | 19 +- .../envoy/config/tap/v4alpha/BUILD | 14 + .../envoy/config/tap/v4alpha/common.proto | 225 ++++++++ .../envoy/data/dns/v4alpha/BUILD | 13 + .../envoy/data/dns/v4alpha/dns_table.proto | 85 +++ .../envoy/extensions/common/tap/v4alpha/BUILD | 2 +- .../common/tap/v4alpha/common.proto | 4 +- .../filters/http/cache/v4alpha/BUILD | 14 + .../filters/http/cache/v4alpha/cache.proto | 84 +++ .../filters/http/csrf/v4alpha/BUILD | 14 + .../filters/http/csrf/v4alpha/csrf.proto | 54 ++ .../filters/http/ext_authz/v4alpha/BUILD | 16 + .../http/ext_authz/v4alpha/ext_authz.proto | 245 ++++++++ .../filters/http/fault/v4alpha/BUILD | 15 + .../filters/http/fault/v4alpha/fault.proto | 144 +++++ .../filters/http/health_check/v4alpha/BUILD | 14 + .../health_check/v4alpha/health_check.proto | 47 ++ .../filters/http/jwt_authn/v4alpha/BUILD | 14 + .../http/jwt_authn/v4alpha/config.proto | 531 ++++++++++++++++++ .../filters/http/router/v4alpha/BUILD | 13 + .../filters/http/router/v4alpha/router.proto | 81 +++ .../filters/network/dubbo_proxy/v4alpha/BUILD | 15 + .../dubbo_proxy/v4alpha/dubbo_proxy.proto | 70 +++ .../network/dubbo_proxy/v4alpha/route.proto | 121 ++++ .../http_connection_manager/v4alpha/BUILD | 2 +- .../v4alpha/http_connection_manager.proto | 4 +- .../network/rocketmq_proxy/v4alpha/BUILD | 15 + .../v4alpha/rocketmq_proxy.proto | 39 ++ .../rocketmq_proxy/v4alpha/route.proto | 67 +++ .../filters/network/tcp_proxy/v4alpha/BUILD | 15 + .../network/tcp_proxy/v4alpha/tcp_proxy.proto | 137 +++++ .../network/thrift_proxy/v4alpha/BUILD | 14 + .../network/thrift_proxy/v4alpha/route.proto | 157 ++++++ .../thrift_proxy/v4alpha/thrift_proxy.proto | 130 +++++ .../filters/udp/dns_filter/v4alpha/BUILD | 14 + .../udp/dns_filter/v4alpha/dns_filter.proto | 80 +++ .../transport_sockets/tls/v4alpha/BUILD | 2 +- .../tls/v4alpha/common.proto | 6 +- .../envoy/service/health/v4alpha/BUILD | 15 + .../envoy/service/health/v4alpha/hds.proto | 160 ++++++ .../envoy/service/status/v4alpha/BUILD | 16 + .../envoy/service/status/v4alpha/csds.proto | 102 ++++ .../envoy/service/tap/v4alpha/BUILD | 17 + .../envoy/service/tap/v4alpha/tap.proto | 64 +++ .../envoy/service/tap/v4alpha/tapds.proto | 48 ++ .../envoy/type/matcher/regex.proto | 5 +- .../envoy/type/matcher/v3/regex.proto | 5 +- .../envoy/type/matcher/v4alpha/BUILD | 14 + .../envoy/type/matcher/v4alpha/metadata.proto | 105 ++++ .../envoy/type/matcher/v4alpha/node.proto | 28 + .../envoy/type/matcher/v4alpha/number.proto | 33 ++ .../envoy/type/matcher/v4alpha/path.proto | 30 + .../envoy/type/matcher/v4alpha/regex.proto | 77 +++ .../envoy/type/matcher/v4alpha/string.proto | 71 +++ .../envoy/type/matcher/v4alpha/struct.proto | 91 +++ .../envoy/type/matcher/v4alpha/value.proto | 71 +++ tools/api_boost/testdata/deprecate.cc.gold | 4 +- 156 files changed, 9772 insertions(+), 108 deletions(-) create mode 100644 api/envoy/config/accesslog/v4alpha/BUILD create mode 100644 api/envoy/config/accesslog/v4alpha/accesslog.proto create mode 100644 api/envoy/config/listener/v4alpha/BUILD create mode 100644 api/envoy/config/listener/v4alpha/api_listener.proto create mode 100644 api/envoy/config/listener/v4alpha/listener.proto create mode 100644 api/envoy/config/listener/v4alpha/listener_components.proto create mode 100644 api/envoy/config/listener/v4alpha/quic_config.proto create mode 100644 api/envoy/config/listener/v4alpha/udp_listener_config.proto create mode 100644 api/envoy/config/metrics/v4alpha/BUILD create mode 100644 api/envoy/config/metrics/v4alpha/metrics_service.proto create mode 100644 api/envoy/config/metrics/v4alpha/stats.proto create mode 100644 api/envoy/config/tap/v4alpha/BUILD create mode 100644 api/envoy/config/tap/v4alpha/common.proto create mode 100644 api/envoy/data/dns/v4alpha/BUILD create mode 100644 api/envoy/data/dns/v4alpha/dns_table.proto create mode 100644 api/envoy/extensions/filters/http/cache/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/cache/v4alpha/cache.proto create mode 100644 api/envoy/extensions/filters/http/csrf/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto create mode 100644 api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto create mode 100644 api/envoy/extensions/filters/http/fault/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/fault/v4alpha/fault.proto create mode 100644 api/envoy/extensions/filters/http/health_check/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto create mode 100644 api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto create mode 100644 api/envoy/extensions/filters/http/router/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/router/v4alpha/router.proto create mode 100644 api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto create mode 100644 api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto create mode 100644 api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto create mode 100644 api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto create mode 100644 api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto create mode 100644 api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto create mode 100644 api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto create mode 100644 api/envoy/service/health/v4alpha/BUILD create mode 100644 api/envoy/service/health/v4alpha/hds.proto create mode 100644 api/envoy/service/status/v4alpha/BUILD create mode 100644 api/envoy/service/status/v4alpha/csds.proto create mode 100644 api/envoy/service/tap/v4alpha/BUILD create mode 100644 api/envoy/service/tap/v4alpha/tap.proto create mode 100644 api/envoy/service/tap/v4alpha/tapds.proto create mode 100644 api/envoy/type/matcher/v4alpha/BUILD create mode 100644 api/envoy/type/matcher/v4alpha/metadata.proto create mode 100644 api/envoy/type/matcher/v4alpha/node.proto create mode 100644 api/envoy/type/matcher/v4alpha/number.proto create mode 100644 api/envoy/type/matcher/v4alpha/path.proto create mode 100644 api/envoy/type/matcher/v4alpha/regex.proto create mode 100644 api/envoy/type/matcher/v4alpha/string.proto create mode 100644 api/envoy/type/matcher/v4alpha/struct.proto create mode 100644 api/envoy/type/matcher/v4alpha/value.proto create mode 100644 generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/listener.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto create mode 100644 generated_api_shadow/envoy/config/metrics/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto create mode 100644 generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto create mode 100644 generated_api_shadow/envoy/config/tap/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/config/tap/v4alpha/common.proto create mode 100644 generated_api_shadow/envoy/data/dns/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto create mode 100644 generated_api_shadow/envoy/service/health/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/service/health/v4alpha/hds.proto create mode 100644 generated_api_shadow/envoy/service/status/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/service/status/v4alpha/csds.proto create mode 100644 generated_api_shadow/envoy/service/tap/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/service/tap/v4alpha/tap.proto create mode 100644 generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/node.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/number.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/path.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/string.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto create mode 100644 generated_api_shadow/envoy/type/matcher/v4alpha/value.proto diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD index 6da5b60bad28..d64c4f6a0816 100644 --- a/api/envoy/admin/v4alpha/BUILD +++ b/api/envoy/admin/v4alpha/BUILD @@ -10,7 +10,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto index c47b308d6ee6..039dfcfeb812 100644 --- a/api/envoy/admin/v4alpha/tap.proto +++ b/api/envoy/admin/v4alpha/tap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -24,5 +24,5 @@ message TapRequest { string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; + config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/config/accesslog/v4alpha/BUILD b/api/envoy/config/accesslog/v4alpha/BUILD new file mode 100644 index 000000000000..4ed75a69ea09 --- /dev/null +++ b/api/envoy/config/accesslog/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto new file mode 100644 index 000000000000..56911ca19185 --- /dev/null +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common access log types] + +message AccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLog"; + + reserved 3; + + reserved "config"; + + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // + // #. "envoy.access_loggers.file" + // #. "envoy.access_loggers.http_grpc" + // #. "envoy.access_loggers.tcp_grpc" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. Built-in + // configurations include: + // + // #. "envoy.access_loggers.file": :ref:`FileAccessLog + // ` + // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig + // ` + // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig + // ` + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// [#next-free-field: 12] +message AccessLogFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLogFilter"; + + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // Header filter. + HeaderFilter header_filter = 8; + + // Response flag filter. + ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; + + // Extension filter. + ExtensionFilter extension_filter = 11; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ComparisonFilter"; + + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum = {defined_only: true}]; + + // Value to compare against. + core.v4alpha.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.StatusCodeFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.DurationFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.NotHealthCheckFilter"; +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.TraceableFilter"; +} + +// Filters for random sampling of requests. +message RuntimeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.RuntimeFilter"; + + // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. + // If found in runtime, this value will replace the default numerator. + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + type.v3.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being present. If + // :ref:`x-request-id` is present, the filter will + // consistently sample across multiple hosts based on the runtime key value and the value + // extracted from :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based + // on the runtime key value alone. *use_independent_randomness* can be used for logging kill + // switches within complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to reason about + // from a probability perspective (i.e., setting to true will cause the filter to behave like + // an independent random variable when composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AndFilter"; + + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; + + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; +} + +// Filters requests based on the presence or value of a request header. +message HeaderFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.HeaderFilter"; + + // Only requests with a header which matches the specified HeaderMatcher will pass the filter + // check. + route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter :ref:`documentation`. +message ResponseFlagFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ResponseFlagFilter"; + + // Only responses with the any of the flags listed in this field will be logged. + // This field is optional. If it is not specified, then any response flag will pass + // the filter check. + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + in: "DPE" + } + } + }]; +} + +// Filters gRPC requests based on their response status. If a gRPC status is not provided, the +// filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.GrpcStatusFilter"; + + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; + + // If included and set to true, the filter will instead block all responses with a gRPC status or + // inferred gRPC status enumerated in statuses, and allow all other responses. + bool exclude = 2; +} + +// Extension filter is statically registered at runtime. +message ExtensionFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ExtensionFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + string name = 1; + + // Custom configuration that depends on the filter being instantiated. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD index 884b942b2dac..2bb0248a4772 100644 --- a/api/envoy/config/bootstrap/v4alpha/BUILD +++ b/api/envoy/config/bootstrap/v4alpha/BUILD @@ -10,8 +10,8 @@ api_proto_package( "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", + "//envoy/config/listener/v4alpha:pkg", + "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 0207967b4b0f..ce6aa147fba2 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -8,8 +8,8 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/listener/v4alpha/listener.proto"; +import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; @@ -42,9 +42,9 @@ message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - // Static :ref:`Listeners `. These listeners are + // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; + repeated listener.v4alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -65,7 +65,7 @@ message Bootstrap { reserved 4; - // All :ref:`Listeners ` are provided by a single + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; @@ -110,10 +110,10 @@ message Bootstrap { string flags_path = 5; // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; + repeated metrics.v4alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; + metrics.v4alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -142,7 +142,7 @@ message Bootstrap { // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value + // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; @@ -160,7 +160,7 @@ message Bootstrap { // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. + // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. diff --git a/api/envoy/config/core/v4alpha/BUILD b/api/envoy/config/core/v4alpha/BUILD index aeac38ac2833..ef6414dadc09 100644 --- a/api/envoy/config/core/v4alpha/BUILD +++ b/api/envoy/config/core/v4alpha/BUILD @@ -8,7 +8,7 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto index a2e6070103ae..ffade4bed75b 100644 --- a/api/envoy/config/core/v4alpha/address.proto +++ b/api/envoy/config/core/v4alpha/address.proto @@ -45,7 +45,7 @@ message SocketAddress { // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used + // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto index 7f823da97c5e..39badc334b01 100644 --- a/api/envoy/config/core/v4alpha/health_check.proto +++ b/api/envoy/config/core/v4alpha/health_check.proto @@ -4,7 +4,7 @@ package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -125,9 +125,9 @@ message HealthCheck { // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview + // `. See the :ref:`architecture overview // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; + type.matcher.v4alpha.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { @@ -206,7 +206,7 @@ message HealthCheck { // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks + // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } diff --git a/api/envoy/config/listener/v4alpha/BUILD b/api/envoy/config/listener/v4alpha/BUILD new file mode 100644 index 000000000000..1d1761a3e941 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/listener/v4alpha/api_listener.proto b/api/envoy/config/listener/v4alpha/api_listener.proto new file mode 100644 index 000000000000..b8d076c36583 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/api_listener.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: API listener] + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ApiListener"; + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto new file mode 100644 index 000000000000..4438bd2974d4 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -0,0 +1,241 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v4alpha/api_listener.proto"; +import "envoy/config/listener/v4alpha/listener_components.proto"; +import "envoy/config/listener/v4alpha/udp_listener_config.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// [#next-free-field: 23] +message Listener { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.DeprecatedV1"; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; + + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + } + } + + reserved 14, 4; + + reserved "use_original_dst"; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated FilterChain filter_chains = 3; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Listener metadata. + core.v4alpha.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // `. + // UDP listeners currently support a single filter. + repeated ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.v4alpha.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // listener to create, i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + ConnectionBalanceConfig connection_balance_config = 20; + + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. + // + // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart + // (see `3rd paragraph in 'soreuseport' commit message + // `_). + // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket + // `_. + bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v4alpha.AccessLog access_log = 22; +} diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto new file mode 100644 index 000000000000..6900cde39016 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 13] +message FilterChainMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChainMatch"; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + SAME_IP_OR_LOOPBACK = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.v4alpha.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.v4alpha.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 8] +message FilterChain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChain"; + + reserved 2; + + reserved "tls_context"; + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.v4alpha.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`DownstreamTlsContext ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.v3.Int32Range destination_port_range = 5; + } +} + +message ListenerFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/api/envoy/config/listener/v4alpha/quic_config.proto b/api/envoy/config/listener/v4alpha/quic_config.proto new file mode 100644 index 000000000000..97866e4b6ed8 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/quic_config.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: QUIC listener Config] + +// Configuration specific to the QUIC protocol. +// Next id: 4 +message QuicProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.QuicProtocolOptions"; + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; +} diff --git a/api/envoy/config/listener/v4alpha/udp_listener_config.proto b/api/envoy/config/listener/v4alpha/udp_listener_config.proto new file mode 100644 index 000000000000..7e40e9529f99 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_listener_config.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: UDP Listener Config] +// Listener :ref:`configuration overview ` + +message UdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpListenerConfig"; + + reserved 2; + + reserved "config"; + + // Used to look up UDP listener factory, matches "raw_udp_listener" or + // "quic_listener" to create a specific udp listener. + // If not specified, treat as "raw_udp_listener". + string udp_listener_name = 1; + + // Used to create a specific listener factory. To some factory, e.g. + // "raw_udp_listener", config is not needed. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +message ActiveRawUdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; +} diff --git a/api/envoy/config/metrics/v4alpha/BUILD b/api/envoy/config/metrics/v4alpha/BUILD new file mode 100644 index 000000000000..4b70ffb4110a --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/metrics/v4alpha/metrics_service.proto b/api/envoy/config/metrics/v4alpha/metrics_service.proto new file mode 100644 index 000000000000..e4da16c56bfd --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/metrics_service.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics service] + +// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] +message MetricsServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.MetricsServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; +} diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto new file mode 100644 index 000000000000..f9a4549746c6 --- /dev/null +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -0,0 +1,361 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "StatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +// Configuration for pluggable stats sinks. +message StatsSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; + + reserved 2; + + reserved "config"; + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.stat_sinks.statsd ` + // * :ref:`envoy.stat_sinks.dog_statsd ` + // * :ref:`envoy.stat_sinks.metrics_service ` + // * :ref:`envoy.stat_sinks.hystrix ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Statistics configuration such as tagging. +message StatsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsConfig"; + + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match that + // same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; + + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher stats_matcher = 3; +} + +// Configuration for disabling stat instantiation. +message StatsMatcher { + // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to + // instantiate all stats, there is no need to construct a StatsMatcher. + // + // However, StatsMatcher can be used to limit the creation of families of stats in order to + // conserve memory. Stats can either be disabled entirely, or they can be + // limited by either an exclusion or an inclusion list of :ref:`StringMatcher + // ` protos: + // + // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to + // `false`, all stats will be instantiated. + // + // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the + // list will not instantiate. + // + // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of + // the StringMatchers in the list. + // + // + // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. + // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based + // matcher rather than a regex-based matcher. + // + // Example 1. Excluding all stats. + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "rejectAll": "true" + // } + // } + // + // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "exclusionList": { + // "patterns": [ + // { + // "prefix": "cluster." + // } + // ] + // } + // } + // } + // + // Example 3. Including only manager-related stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "inclusionList": { + // "patterns": [ + // { + // "prefix": "cluster_manager." + // }, + // { + // "prefix": "listener_manager." + // } + // ] + // } + // } + // } + // + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsMatcher"; + + oneof stats_matcher { + option (validate.required) = true; + + // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // stats are enabled. + bool reject_all = 1; + + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; + + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; + } +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.TagSpecifier"; + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +message StatsdSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; + + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v4alpha.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +message DogStatsdSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.DogStatsdSink"; + + reserved 2; + + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + core.v4alpha.Address address = 1; + } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +message HystrixSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HystrixSink"; + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + int64 num_buckets = 1; +} diff --git a/api/envoy/config/rbac/v4alpha/BUILD b/api/envoy/config/rbac/v4alpha/BUILD index dbfa8be4f36f..f0707bae6eae 100644 --- a/api/envoy/config/rbac/v4alpha/BUILD +++ b/api/envoy/config/rbac/v4alpha/BUILD @@ -9,7 +9,7 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto index 097231282f45..3ca9f7f08a72 100644 --- a/api/envoy/config/rbac/v4alpha/rbac.proto +++ b/api/envoy/config/rbac/v4alpha/rbac.proto @@ -4,9 +4,9 @@ package envoy.config.rbac.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/matcher/v4alpha/path.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -140,7 +140,7 @@ message Permission { route.v4alpha.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; + type.matcher.v4alpha.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v4alpha.CidrRange destination_ip = 5; @@ -149,7 +149,7 @@ message Permission { uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not @@ -166,7 +166,7 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -175,7 +175,7 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; + type.matcher.v4alpha.StringMatcher requested_server_name = 9; } } @@ -203,7 +203,7 @@ message Principal { // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. - type.matcher.v3.StringMatcher principal_name = 2; + type.matcher.v4alpha.StringMatcher principal_name = 2; } reserved 5; @@ -245,10 +245,10 @@ message Principal { route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; + type.matcher.v4alpha.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this diff --git a/api/envoy/config/route/v4alpha/BUILD b/api/envoy/config/route/v4alpha/BUILD index 507bedd76bdf..13dd451d1b4a 100644 --- a/api/envoy/config/route/v4alpha/BUILD +++ b/api/envoy/config/route/v4alpha/BUILD @@ -9,7 +9,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 5fb31112b34e..6e1b1f9f5a0a 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -4,8 +4,8 @@ package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/regex.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -143,7 +143,7 @@ message VirtualHost { // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; @@ -155,7 +155,7 @@ message VirtualHost { // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a @@ -428,7 +428,7 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. @@ -499,7 +499,7 @@ message CorsPolicy { // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; @@ -855,7 +855,7 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with @@ -1534,7 +1534,7 @@ message HeaderMatcher { // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.matcher.v4alpha.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1596,7 +1596,8 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.StringMatcher string_match = 5 + [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; diff --git a/api/envoy/config/tap/v4alpha/BUILD b/api/envoy/config/tap/v4alpha/BUILD new file mode 100644 index 000000000000..cb06389f0186 --- /dev/null +++ b/api/envoy/config/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto new file mode 100644 index 000000000000..b8e8dac291f3 --- /dev/null +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -0,0 +1,225 @@ +syntax = "proto3"; + +package envoy.config.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap configuration] + +// Tap configuration. +message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 9] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// Tap output configuration. +message OutputConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; + + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; +} + +// Tap output sink configuration. +message OutputSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; + + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum = {defined_only: true}]; + + oneof output_sink_type { + option (validate.required) = true; + + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdminSink streaming_admin = 2; + + // Tap output will be written to a file per tap sink. + FilePerTapSink file_per_tap = 3; + + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + StreamingGrpcSink streaming_grpc = 4; + } +} + +// Streaming admin sink configuration. +message StreamingAdminSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingAdminSink"; +} + +// The file per tap sink outputs a discrete file for every tapped stream. +message FilePerTapSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +message StreamingGrpcSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingGrpcSink"; + + // Opaque identifier, that will be sent back to the streaming grpc server. + string tap_id = 1; + + // The gRPC server that hosts the Tap Sink Service. + core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/data/dns/v4alpha/BUILD b/api/envoy/data/dns/v4alpha/BUILD new file mode 100644 index 000000000000..bc8958ceab0b --- /dev/null +++ b/api/envoy/data/dns/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto new file mode 100644 index 000000000000..83edc20088de --- /dev/null +++ b/api/envoy/data/dns/v4alpha/dns_table.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package envoy.data.dns.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; + + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; +} diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD index d1fe49142a8e..a6fffecd9621 100644 --- a/api/envoy/extensions/common/tap/v4alpha/BUILD +++ b/api/envoy/extensions/common/tap/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto index 63de14a3d6f6..f37889b90212 100644 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -42,7 +42,7 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. - config.tap.v3.TapConfig static_config = 2; + config.tap.v4alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/BUILD b/api/envoy/extensions/filters/http/cache/v4alpha/BUILD new file mode 100644 index 000000000000..63033acab5cf --- /dev/null +++ b/api/envoy/extensions/filters/http/cache/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto new file mode 100644 index 000000000000..7cb48d4d6c26 --- /dev/null +++ b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cache.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; +option java_outer_classname = "CacheProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Cache Filter] +// [#extension: envoy.filters.http.cache] + +message CacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; + + // [#not-implemented-hide:] + // Modifies cache key creation by restricting which parts of the URL are included. + message KeyCreatorParams { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; + + // If true, exclude the URL scheme from the cache key. Set to true if your origins always + // produce the same response for http and https requests. + bool exclude_scheme = 1; + + // If true, exclude the host from the cache key. Set to true if your origins' responses don't + // ever depend on host. + bool exclude_host = 2; + + // If *query_parameters_included* is nonempty, only query parameters matched + // by one or more of its matchers are included in the cache key. Any other + // query params will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; + + // If *query_parameters_excluded* is nonempty, query parameters matched by one + // or more of its matchers are excluded from the cache key (even if also + // matched by *query_parameters_included*), and will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; + } + + // Config specific to the cache storage implementation. + google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; + + // [#not-implemented-hide:] + // + // + // List of allowed *Vary* headers. + // + // The *vary* response header holds a list of header names that affect the + // contents of a response, as described by + // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. + // + // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // response's *vary* header mentions any header names that aren't in + // *allowed_vary_headers*, that response will not be cached. + // + // During lookup, *allowed_vary_headers* controls what request headers will be + // sent to the cache storage implementation. + repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; + + // [#not-implemented-hide:] + // + // + // Modifies cache key creation by restricting which parts of the URL are included. + KeyCreatorParams key_creator_params = 3; + + // [#not-implemented-hide:] + // + // + // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache + // storage implementation may have its own limit beyond which it will reject insertions). + uint32 max_body_bytes = 4; +} diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD new file mode 100644 index 000000000000..72211218ff52 --- /dev/null +++ b/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto new file mode 100644 index 000000000000..dda915a059af --- /dev/null +++ b/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.csrf.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; +option java_outer_classname = "CsrfProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] + +// CSRF filter config. +message CsrfPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; + + // Specifies the % of requests for which the CSRF filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // + // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* and *Destination* to determine if it's valid, but will not + // enforce any policies. + config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; + + // Specifies additional source origins that will be allowed in addition to + // the destination origin. + // + // More information on how this can be configured via runtime can be found + // :ref:`here `. + repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; +} diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD new file mode 100644 index 000000000000..9a3d8a574a9b --- /dev/null +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 000000000000..b39a2d56d00d --- /dev/null +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,245 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/http_status.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: External Authorization] +// External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] + +// [#next-free-field: 11] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; + + reserved 4; + + reserved "use_alpha"; + + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v4alpha.GrpcService grpc_service = 1; + + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // Changes filter's behaviour on errors: + // + // 1. When set to true, the filter will *accept* client request even if the communication with + // the authorization service has failed, or if the authorization service has returned a HTTP 5xx + // error. + // + // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* + // response if the communication with the authorization service has failed, or if the + // authorization service has returned a HTTP 5xx error. + // + // Note that errors can be *always* tracked in the :ref:`stats + // `. + bool failure_mode_allow = 2; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; + + // Sets the HTTP status that is returned to the client when there is a network error between the + // filter and the authorization server. The default status is HTTP 403 Forbidden. + type.v3.HttpStatus status_on_error = 7; + + // Specifies a list of metadata namespaces whose values, if present, will be passed to the + // ext_authz service as an opaque *protobuf::Struct*. + // + // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata + // ` is set, + // then the following will pass the jwt payload to the authorization server. + // + // .. code-block:: yaml + // + // metadata_context_namespaces: + // - envoy.filters.http.jwt_authn + // + repeated string metadata_context_namespaces = 8; + + // Specifies if the filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // If this field is not specified, the filter will be enabled for all requests. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; +} + +// Configuration for buffering the request data. +message BufferSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; + + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; +} + +// HttpService is used for raw HTTP communication between the filter and the authorization service. +// When configured, the filter will parse the client request and use these attributes to call the +// authorization server. Depending on the response, the filter may reject or accept the client +// request. Note that in any of these events, metadata can be added, removed or overridden by the +// filter: +// +// *On authorization request*, a list of allowed request headers may be supplied. See +// :ref:`allowed_headers +// ` +// for details. Additional headers metadata may be added to the authorization request. See +// :ref:`headers_to_add +// ` for +// details. +// +// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and +// additional headers metadata may be added to the original client request. See +// :ref:`allowed_upstream_headers +// ` +// for details. +// +// On other authorization response statuses, the filter will not allow traffic. Additional headers +// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers +// ` +// for details. +// [#next-free-field: 9] +message HttpService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.HttpService"; + + reserved 3, 4, 5, 6; + + // Sets the HTTP server URI which the authorization requests must be sent to. + config.core.v4alpha.HttpUri server_uri = 1; + + // Sets a prefix to the value of authorization request header *Path*. + string path_prefix = 2; + + // Settings used for controlling authorization request metadata. + AuthorizationRequest authorization_request = 7; + + // Settings used for controlling authorization response metadata. + AuthorizationResponse authorization_response = 8; +} + +message AuthorizationRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; + + // Authorization request will include the client request headers that have a correspondent match + // in the :ref:`list `. Note that in addition to the + // user's supplied matchers: + // + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // + // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have + // a message body. However, the authorization request can include the buffered client request body + // (controlled by :ref:`with_request_body + // ` setting), + // consequently the value of *Content-Length* of the authorization request reflects the size of + // its payload size. + // + type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; + + // Sets a list of headers that will be included to the request to authorization service. Note that + // client request of the same key will be overridden. + repeated config.core.v4alpha.HeaderValue headers_to_add = 2; +} + +message AuthorizationResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the original client request. + // Note that coexistent headers will be overridden. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + + // When this :ref:`list `. is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that when this list is *not* set, all the authorization response headers, except *Authority + // (Host)* will be in the response to the client. When a header is included in this list, *Path*, + // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. + type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; +} + +// Extra settings on a per virtualhost/route/weighted-cluster level. +message ExtAuthzPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; + + oneof override { + option (validate.required) = true; + + // Disable the ext auth filter for this particular vhost or route. + // If disabled is specified in multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // Check request settings for this route. + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; + } +} + +// Extra settings for the check request. You can use this to provide extra context for the +// external authorization server on specific virtual hosts \ routes. For example, adding a context +// extension on the virtual host level can give the ext-authz server information on what virtual +// host is used without needing to parse the host header. If CheckSettings is specified in multiple +// per-filter-configs, they will be merged in order, and the result will be used. +message CheckSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; + + // Context extensions to set on the CheckRequest's + // :ref:`AttributeContext.context_extensions` + // + // Merge semantics for this field are such that keys from more specific configs override. + // + // .. note:: + // + // These settings are only applied to a filter configured with a + // :ref:`grpc_service`. + map context_extensions = 1; +} diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/BUILD b/api/envoy/extensions/filters/http/fault/v4alpha/BUILD new file mode 100644 index 000000000000..936ee4414038 --- /dev/null +++ b/api/envoy/extensions/filters/http/fault/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/common/fault/v3:pkg", + "//envoy/extensions/filters/http/fault/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto new file mode 100644 index 000000000000..7dd4f48aa476 --- /dev/null +++ b/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.fault.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] + +// [#next-free-field: 6] +message FaultAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort"; + + // Fault aborts are controlled via an HTTP header (if applicable). See the + // :ref:`HTTP fault filter ` documentation for + // more information. + message HeaderAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; + } + + reserved 1; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort header_abort = 4; + } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; +} + +// [#next-free-field: 15] +message HTTPFault { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.HTTPFault"; + + // If specified, the filter will inject delays based on the values in the + // object. + common.fault.v3.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the *value* field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + common.fault.v3.FaultRateLimit response_rate_limit = 7; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + string delay_percent_runtime = 8; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + string abort_percent_runtime = 9; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + string delay_duration_runtime = 10; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + string abort_http_status_runtime = 11; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + string max_active_faults_runtime = 12; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; +} diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD new file mode 100644 index 000000000000..97b6ad2feb2d --- /dev/null +++ b/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/health_check/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto new file mode 100644 index 000000000000..f530363e2380 --- /dev/null +++ b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.health_check.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] + +// [#next-free-field: 6] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.health_check.v3.HealthCheck"; + + reserved 2; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy or degraded in order for the filter to return a 200. + map cluster_min_healthy_percentages = 4; + + // Specifies a set of health check request headers to match on. The health check filter will + // check a request’s headers against all the specified headers. To specify the health check + // endpoint, set the ``:path`` header to match on. + repeated config.route.v4alpha.HeaderMatcher headers = 5; +} diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD new file mode 100644 index 000000000000..a9f9b8bc44c3 --- /dev/null +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto new file mode 100644 index 000000000000..302cf7253dde --- /dev/null +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -0,0 +1,531 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.jwt_authn.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] + +// Please see following for JWT authentication flow: +// +// * `JSON Web Token (JWT) `_ +// * `The OAuth 2.0 Authorization Framework `_ +// * `OpenID Connect `_ +// +// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: +// +// * issuer: the principal that issues the JWT. It has to match the one from the token. +// * allowed audiences: the ones in the token have to be listed here. +// * how to fetch public key JWKS to verify the token signature. +// * how to extract JWT token in the request. +// * how to pass successfully verified token payload. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// - bookstore_web.apps.googleusercontent.com +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// seconds: 300 +// +// [#next-free-field: 10] +message JwtProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; + + // Specify the `principal `_ that issued + // the JWT, usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The list of JWT `audiences `_ are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // - bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v4alpha.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the `Bearer schema + // `_. Example:: + // + // Authorization: Bearer . + // + // 2. `access_token `_ query parameter. + // + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its provider specified or from the default locations. + // + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // ``x-goog-iap-jwt-assertion: ``. + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; + + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + config.core.v4alpha.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; + + // The HTTP header name. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; +} + +// Specify a required provider with audiences. +message ProviderWithAudiences { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; + + // Specify a required provider name. + string provider_name = 1; + + // This field overrides the one specified in the JwtProvider. + repeated string audiences = 2; +} + +// This message specifies a Jwt requirement. An empty message means JWT verification is not +// required. Here are some config examples: +// +// .. code-block:: yaml +// +// # Example 1: not required with an empty message +// +// # Example 2: require A +// provider_name: provider-A +// +// # Example 3: require A or B +// requires_any: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 4: require A and B +// requires_all: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 5: require A and (B or C) +// requires_all: +// requirements: +// - provider_name: provider-A +// - requires_any: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 6: require A or (B and C) +// requires_any: +// requirements: +// - provider_name: provider-A +// - requires_all: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows +// missing token.) +// requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// +// # Example 8: A is optional and B is required. +// requires_all: +// requirements: +// - requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// - provider_name: provider-B +// +// [#next-free-field: 7] +message JwtRequirement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; + + oneof requires_type { + // Specify a required provider name. + string provider_name = 1; + + // Specify a required provider with audiences. + ProviderWithAudiences provider_and_audiences = 2; + + // Specify list of JwtRequirement. Their results are OR-ed. + // If any one of them passes, the result is passed. + JwtRequirementOrList requires_any = 3; + + // Specify list of JwtRequirement. Their results are AND-ed. + // All of them must pass, if one of them fails or missing, it fails. + JwtRequirementAndList requires_all = 4; + + // The requirement is always satisfied even if JWT is missing or the JWT + // verification fails. A typical usage is: this filter is used to only verify + // JWTs and pass the verified JWT payloads to another filter, the other filter + // will make decision. In this mode, all JWT tokens will be verified. + google.protobuf.Empty allow_missing_or_failed = 5; + + // The requirement is satisfied if JWT is missing, but failed if JWT is + // presented but invalid. Similar to allow_missing_or_failed, this is used + // to only verify JWTs and pass the verified payload to another filter. The + // different is this mode will reject requests with invalid tokens. + google.protobuf.Empty allow_missing = 6; + } +} + +// This message specifies a list of RequiredProvider. +// Their results are OR-ed; if any one of them passes, the result is passed +message JwtRequirementOrList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a list of RequiredProvider. +// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. +message JwtRequirementAndList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a Jwt requirement for a specific Route condition. +// Example 1: +// +// .. code-block:: yaml +// +// - match: +// prefix: /healthz +// +// In above example, "requires" field is empty for /healthz prefix match, +// it means that requests matching the path prefix don't require JWT authentication. +// +// Example 2: +// +// .. code-block:: yaml +// +// - match: +// prefix: / +// requires: { provider_name: provider-A } +// +// In above example, all requests matched the path prefix require jwt authentication +// from "provider-A". +message RequirementRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; + + // The route matching parameter. Only when the match is satisfied, the "requires" field will + // apply. + // + // For example: following match will match all requests. + // + // .. code-block:: yaml + // + // match: + // prefix: / + // + config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. + JwtRequirement requires = 2; +} + +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; + + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// +// For example: +// +// .. code-block:: yaml +// +// providers: +// provider1: +// issuer: issuer1 +// audiences: +// - audience1 +// - audience2 +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// provider2: +// issuer: issuer2 +// local_jwks: +// inline_string: jwks_string +// +// rules: +// # Not jwt verification is required for /health path +// - match: +// prefix: /health +// +// # Jwt verification for provider1 is required for path prefixed with "prefix" +// - match: +// prefix: /prefix +// requires: +// provider_name: provider1 +// +// # Jwt verification for either provider1 or provider2 is required for all other requests. +// - match: +// prefix: / +// requires: +// requires_any: +// requirements: +// - provider_name: provider1 +// - provider_name: provider2 +// +message JwtAuthentication { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; + + // Map of provider names to JwtProviders. + // + // .. code-block:: yaml + // + // providers: + // provider1: + // issuer: issuer1 + // audiences: + // - audience1 + // - audience2 + // remote_jwks: + // http_uri: + // uri: https://example.com/.well-known/jwks.json + // cluster: example_jwks_cluster + // provider2: + // issuer: provider2 + // local_jwks: + // inline_string: jwks_string + // + map providers = 1; + + // Specifies requirements based on the route matches. The first matched requirement will be + // applied. If there are overlapped match conditions, please put the most specific match first. + // + // Examples + // + // .. code-block:: yaml + // + // rules: + // - match: + // prefix: /healthz + // - match: + // prefix: /baz + // requires: + // provider_name: provider1 + // - match: + // prefix: /foo + // requires: + // requires_any: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // - match: + // prefix: /bar + // requires: + // requires_all: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // + repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; + + // When set to true, bypass the `CORS preflight request + // `_ regardless of JWT + // requirements specified in the rules. + bool bypass_cors_preflight = 4; +} diff --git a/api/envoy/extensions/filters/http/router/v4alpha/BUILD b/api/envoy/extensions/filters/http/router/v4alpha/BUILD new file mode 100644 index 000000000000..df329be54230 --- /dev/null +++ b/api/envoy/extensions/filters/http/router/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/extensions/filters/http/router/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/router/v4alpha/router.proto b/api/envoy/extensions/filters/http/router/v4alpha/router.proto new file mode 100644 index 000000000000..d0baaab84a39 --- /dev/null +++ b/api/envoy/extensions/filters/http/router/v4alpha/router.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.router.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] + +// [#next-free-field: 7] +message Router { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.router.v3.Router"; + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; + + // Do not add any additional *x-envoy-* headers to requests or responses. This + // only affects the :ref:`router filter generated *x-envoy-* headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set *x-envoy-* headers. + bool suppress_envoy_headers = 4; + + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } + }]; + + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + bool respect_expected_rq_timeout = 6; +} diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..663eb0d52d25 --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto new file mode 100644 index 000000000000..4894c7693fd7 --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "DubboProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy] +// Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] + +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + +// [#next-free-field: 6] +message DubboProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Configure the protocol used. + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; + + // Configure the serialization protocol used. + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; + + // The route table for the connection manager is static and is specified in this property. + repeated RouteConfiguration route_config = 4; + + // A list of individual Dubbo filters that make up the filter chain for requests made to the + // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no dubbo_filters are specified, a default Dubbo router filter + // (`envoy.filters.dubbo.router`) is used. + repeated DubboFilter dubbo_filters = 5; +} + +// DubboFilter configures a Dubbo filter. +message DubboFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; + + // The name of the filter to instantiate. The name must match a supported + // filter. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..c2ff03b33fb1 --- /dev/null +++ b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. + +// [#next-free-field: 6] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The interface name of the service. + string interface = 2; + + // Which group does the interface belong to. + string group = 3; + + // The version number of the interface. + string version = 4; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 5; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; + + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + config.route.v4alpha.WeightedCluster weighted_clusters = 2; + } +} + +message MethodMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; + + // The parameter matching type. + message ParameterMatchSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; + + oneof parameter_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 3; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting + // of an optional plus or minus sign followed by a sequence of digits. The rule will not match + // if the header value does not represent an integer. Match will fail for empty values, + // floating point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, + // "somestring", 10.9, "-1somestring" + type.v3.Int64Range range_match = 4; + } + } + + // The name of the method. + type.matcher.v4alpha.StringMatcher name = 1; + + // Method parameter definition. + // The key is the parameter index, starting from 0. + // The value is the parameter matching type. + map params_match = 2; +} diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 792ccf7ab677..57c9eebb5b19 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", + "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/trace/v4alpha:pkg", diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 5eaefe16037e..03a15d832732 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; -import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/route/v4alpha/route.proto"; @@ -383,7 +383,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; + repeated config.accesslog.v4alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..d8d88f7f3bb4 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto new file mode 100644 index 000000000000..a765734e66db --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..995e8bcb05e3 --- /dev/null +++ b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; + + // The name of the topic. + type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v4alpha.Metadata metadata_match = 2; +} diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..3825be9a8afc --- /dev/null +++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto new file mode 100644 index 000000000000..1857f2abcd4e --- /dev/null +++ b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.tcp_proxy.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/hash_policy.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; +option java_outer_classname = "TcpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] + +// [#next-free-field: 13] +message TcpProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; + + // Allows for specification of multiple upstream clusters along with weights + // that indicate the percentage of traffic to be forwarded to each cluster. + // The router selects an upstream cluster based on these weights. + message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is + // determined by its weight. The sum of weights across all entries in the + // clusters array determines the total weight. + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Configuration for tunneling TCP over other transports or application layers. + // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will + // remain the default. + message TunnelingConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; + + // The hostname to send in the synthesized CONNECT headers to the upstream proxy. + string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + reserved 6; + + reserved "deprecated_v1"; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set + // to 0s, the timeout will be disabled. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 8; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated config.accesslog.v4alpha.AccessLog access_log = 5; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based + // load balancing algorithms will select a host randomly. Currently the number of hash policies is + // limited to 1. + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; +} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..9ec74c0a9b83 --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..9b847d645a65 --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy Route Configuration] +// Thrift Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; + + oneof match_specifier { + option (validate.required) = true; + + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. + string method_name = 1; + + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. + string service_name = 2; + } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v4alpha.HeaderMatcher headers = 4; +} + +// [#next-free-field: 7] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates a single upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 2; + + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v4alpha.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; +} + +// Allows for specification of multiple upstream clusters along with weights that indicate the +// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster +// based on these weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is determined by its + // weight. The sum of weights across all entries in the clusters array determines the total + // weight. + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field, combined with what's + // provided in :ref:`RouteAction's metadata_match + // `, + // will be considered. Values here will take precedence. Keys and values should be provided + // under the "envoy.lb" metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto new file mode 100644 index 000000000000..6bf055da3ce6 --- /dev/null +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "ThriftProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy] +// Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] + +// Thrift transport types supported by Envoy. +enum TransportType { + // For downstream connections, the Thrift proxy will attempt to determine which transport to use. + // For upstream connections, the Thrift proxy will use same transport as the downstream + // connection. + AUTO_TRANSPORT = 0; + + // The Thrift proxy will use the Thrift framed transport. + FRAMED = 1; + + // The Thrift proxy will use the Thrift unframed transport. + UNFRAMED = 2; + + // The Thrift proxy will assume the client is using the Thrift header transport. + HEADER = 3; +} + +// Thrift Protocol types supported by Envoy. +enum ProtocolType { + // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. + // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol + // detection. For upstream connections, the Thrift proxy will use the same protocol as the + // downstream connection. + AUTO_PROTOCOL = 0; + + // The Thrift proxy will use the Thrift binary protocol. + BINARY = 1; + + // The Thrift proxy will use Thrift non-strict binary protocol. + LAX_BINARY = 2; + + // The Thrift proxy will use the Thrift compact protocol. + COMPACT = 3; + + // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. + TWITTER = 4; +} + +// [#next-free-field: 6] +message ThriftProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; + + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + +// ThriftFilter configures a Thrift filter. +message ThriftFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(zuercher): Auto generate the following list] + // * :ref:`envoy.filters.thrift.router ` + // * :ref:`envoy.filters.thrift.rate_limit ` + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in +// in +// :ref:`typed_extension_protocol_options`, +// keyed by the name `envoy.filters.network.thrift_proxy`. +message ThriftProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; + + // Supplies the type of transport that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_TRANSPORT`, + // which is the default, causes the proxy to use the same transport as the downstream connection. + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_PROTOCOL`, + // which is the default, causes the proxy to use the same protocol as the downstream connection. + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD new file mode 100644 index 000000000000..f869cf5ac123 --- /dev/null +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/data/dns/v4alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto new file mode 100644 index 000000000000..be78ebf40c18 --- /dev/null +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/dns/v4alpha/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; + + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v4alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v4alpha.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; + + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 5 + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + + // A list of DNS servers to which we can forward queries + repeated string upstream_resolvers = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index e56544584bfe..d294b69de40c 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -8,7 +8,7 @@ api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index 9028e380d092..f81442f4dbcd 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; @@ -298,7 +298,7 @@ message CertificateValidationContext { // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. + // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // @@ -312,7 +312,7 @@ message CertificateValidationContext { // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; diff --git a/api/envoy/service/health/v4alpha/BUILD b/api/envoy/service/health/v4alpha/BUILD new file mode 100644 index 000000000000..b7b2a13bd495 --- /dev/null +++ b/api/envoy/service/health/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/service/health/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/health/v4alpha/hds.proto b/api/envoy/service/health/v4alpha/hds.proto new file mode 100644 index 000000000000..826d5eeb0301 --- /dev/null +++ b/api/envoy/service/health/v4alpha/hds.proto @@ -0,0 +1,160 @@ +syntax = "proto3"; + +package envoy.service.health.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/endpoint/v3/endpoint_components.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; +option java_outer_classname = "HdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health Discovery Service (HDS)] + +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn't been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http).post = "/v3/discovery:health_check"; + option (google.api.http).body = "*"; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; + + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + + repeated Protocol health_check_protocols = 1; +} + +message HealthCheckRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequest"; + + config.core.v4alpha.Node node = 1; + + Capability capability = 2; +} + +message EndpointHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealth"; + + config.endpoint.v3.Endpoint endpoint = 1; + + config.core.v4alpha.HealthStatus health_status = 2; +} + +message EndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealthResponse"; + + repeated EndpointHealth endpoints_health = 1; +} + +message HealthCheckRequestOrEndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; + + oneof request_type { + HealthCheckRequest health_check_request = 1; + + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpoints"; + + config.core.v4alpha.Locality locality = 1; + + repeated config.endpoint.v3.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterHealthCheck"; + + string cluster_name = 1; + + repeated config.core.v4alpha.HealthCheck health_checks = 2; + + repeated LocalityEndpoints locality_endpoints = 3; +} + +message HealthCheckSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckSpecifier"; + + repeated ClusterHealthCheck cluster_health_checks = 1; + + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/api/envoy/service/status/v4alpha/BUILD b/api/envoy/service/status/v4alpha/BUILD new file mode 100644 index 000000000000..fb238648fbca --- /dev/null +++ b/api/envoy/service/status/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/admin/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/service/status/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto new file mode 100644 index 000000000000..f6f5fa654d70 --- /dev/null +++ b/api/envoy/service/status/v4alpha/csds.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +package envoy.service.status.v4alpha; + +import "envoy/admin/v4alpha/config_dump.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/node.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; +option java_outer_classname = "CsdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Client Status Discovery Service (CSDS)] + +// CSDS is Client Status Discovery Service. It can be used to get the status of +// an xDS-compliant client from the management server's point of view. In the +// future, it can potentially be used as an interface to get the current +// state directly from the client. +service ClientStatusDiscoveryService { + rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { + } + + rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { + option (google.api.http).post = "/v3/discovery:client_status"; + option (google.api.http).body = "*"; + } +} + +// Status of a config. +enum ConfigStatus { + // Status info is not available/unknown. + UNKNOWN = 0; + + // Management server has sent the config to client and received ACK. + SYNCED = 1; + + // Config is not sent. + NOT_SENT = 2; + + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + STALE = 3; + + // Management server has sent the config to client but received NACK. + ERROR = 4; +} + +// Request for client status of clients identified by a list of NodeMatchers. +message ClientStatusRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusRequest"; + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 6] +message PerXdsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.PerXdsConfig"; + + ConfigStatus status = 1; + + oneof per_xds_config { + admin.v4alpha.ListenersConfigDump listener_config = 2; + + admin.v4alpha.ClustersConfigDump cluster_config = 3; + + admin.v4alpha.RoutesConfigDump route_config = 4; + + admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + } +} + +// All xds configs for a particular client. +message ClientConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientConfig"; + + // Node for a particular client. + config.core.v4alpha.Node node = 1; + + repeated PerXdsConfig xds_config = 2; +} + +message ClientStatusResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusResponse"; + + // Client configs for the clients specified in the ClientStatusRequest. + repeated ClientConfig config = 1; +} diff --git a/api/envoy/service/tap/v4alpha/BUILD b/api/envoy/service/tap/v4alpha/BUILD new file mode 100644 index 000000000000..5f75886cd068 --- /dev/null +++ b/api/envoy/service/tap/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v4alpha:pkg", + "//envoy/data/tap/v3:pkg", + "//envoy/service/discovery/v3:pkg", + "//envoy/service/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/tap/v4alpha/tap.proto b/api/envoy/service/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..a1654d18bebb --- /dev/null +++ b/api/envoy/service/tap/v4alpha/tap.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/tap/v3/wrapper.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap Sink Service] + +// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call +// StreamTaps to deliver captured taps to the server +service TapSinkService { + // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. + rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { + } +} + +// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server +// and stream taps without ever expecting a response. +message StreamTapsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest.Identifier"; + + // The node sending taps over the stream. + config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; + + // The opaque identifier that was set in the :ref:`output config + // `. + string tap_id = 2; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // The trace id. this can be used to merge together a streaming trace. Note that the trace_id + // is not guaranteed to be spatially or temporally unique. + uint64 trace_id = 2; + + // The trace data. + data.tap.v3.TraceWrapper trace = 3; +} + +// [#not-implemented-hide:] +message StreamTapsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsResponse"; +} diff --git a/api/envoy/service/tap/v4alpha/tapds.proto b/api/envoy/service/tap/v4alpha/tapds.proto new file mode 100644 index 000000000000..855fde8c8e63 --- /dev/null +++ b/api/envoy/service/tap/v4alpha/tapds.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/tap/v4alpha/common.proto"; +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap discovery service] + +// [#not-implemented-hide:] Tap discovery service. +service TapDiscoveryService { + rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:tap_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name +// The filter TapDS config references this name. +message TapResource { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; + + // The name of the tap configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Tap config to apply + config.tap.v4alpha.TapConfig config = 2; +} diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index 78b4a2c1d61e..9e41637ab70c 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -24,7 +24,10 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index 393274794abf..e318cb5457d9 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -30,7 +30,10 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { diff --git a/api/envoy/type/matcher/v4alpha/BUILD b/api/envoy/type/matcher/v4alpha/BUILD new file mode 100644 index 000000000000..e63f52b2baa5 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/type/matcher/v4alpha/metadata.proto b/api/envoy/type/matcher/v4alpha/metadata.proto new file mode 100644 index 000000000000..8abe14e7b667 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/metadata.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metadata matcher] + +// MetadataMatcher provides a general interface to check if a given value is matched in +// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value +// from the Metadata and then check if it's matched to the specified value. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.filters.http.rbac: +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following MetadataMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to +// enforce access control based on dynamic metadata in a request. See :ref:`Permission +// ` and :ref:`Principal +// `. + +// [#next-major-version: MetadataMatcher should use StructMatcher] +message MetadataMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher"; + + // Specifies the segment in a path to retrieve value from Metadata. + // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + // if the segment key refers to a list, it has to be the last segment in a path. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The filter name to retrieve the Struct from the Metadata. + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/type/matcher/v4alpha/node.proto b/api/envoy/type/matcher/v4alpha/node.proto new file mode 100644 index 000000000000..a74bf808f05a --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/node.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/matcher/v4alpha/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Node matcher] + +// Specifies the way to match a Node. +// The match follows AND semantics. +message NodeMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; + + // Specifies match criteria on the node id. + StringMatcher node_id = 1; + + // Specifies match criteria on the node metadata. + repeated StructMatcher node_metadatas = 2; +} diff --git a/api/envoy/type/matcher/v4alpha/number.proto b/api/envoy/type/matcher/v4alpha/number.proto new file mode 100644 index 000000000000..b168af19ab50 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/number.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NumberProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Number matcher] + +// Specifies the way to match a double value. +message DoubleMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.DoubleMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + v3.DoubleRange range = 1; + + // If specified, the input double value must be equal to the value specified here. + double exact = 2; + } +} diff --git a/api/envoy/type/matcher/v4alpha/path.proto b/api/envoy/type/matcher/v4alpha/path.proto new file mode 100644 index 000000000000..9150939bf2ee --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/path.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; + + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto new file mode 100644 index 000000000000..f94a85e778e4 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/regex.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; + + // Google's `RE2 `_ regex engine. The regex string must adhere to + // the documented `syntax `_. The engine is designed + // to complete execution in linear time as well as limit the amount of memory used. + message GoogleRE2 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; + + reserved 1; + + reserved "max_program_size"; + } + + oneof engine_type { + option (validate.required) = true; + + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; + } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +message RegexMatchAndSubstitute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatchAndSubstitute"; + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + RegexMatcher pattern = 1; + + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers + // to capture group 2. + string substitution = 2; +} diff --git a/api/envoy/type/matcher/v4alpha/string.proto b/api/envoy/type/matcher/v4alpha/string.proto new file mode 100644 index 000000000000..8ce0b12f9e2a --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/string.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 7] +message StringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StringMatcher"; + + reserved 4; + + reserved "regex"; + + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *abc.xyz* + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc* + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ListStringMatcher"; + + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/type/matcher/v4alpha/struct.proto b/api/envoy/type/matcher/v4alpha/struct.proto new file mode 100644 index 000000000000..643cc5a47570 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/struct.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Struct matcher] + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses `path` to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +message StructMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher"; + + // Specifies the segment in a path to retrieve value from Struct. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The StructMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/type/matcher/v4alpha/value.proto b/api/envoy/type/matcher/v4alpha/value.proto new file mode 100644 index 000000000000..6e509d460109 --- /dev/null +++ b/api/envoy/type/matcher/v4alpha/value.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/number.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "ValueProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Value matcher] + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 7] +message ValueMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; + + // NullMatch is an empty message to specify a null value. + message NullMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ValueMatcher.NullMatch"; + } + + // Specifies how to match a value. + oneof match_pattern { + option (validate.required) = true; + + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch null_match = 1; + + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatcher double_match = 2; + + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatcher string_match = 3; + + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + bool bool_match = 4; + + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + bool present_match = 5; + + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatcher list_match = 6; + } +} + +// Specifies the way to match a list value. +message ListMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, at least one of the values in the list must match the value specified. + ValueMatcher one_of = 1; + } +} diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7b6436448254..c408bf6ea747 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -51,3 +51,6 @@ Deprecated * Tracing provider configuration as part of :ref:`bootstrap config ` has been deprecated in favor of configuration as part of :ref:`HTTP connection manager `. +* The * :ref:`GoogleRE2.max_program_size` + field is now deprecated. Management servers are expected to validate regexp program sizes + instead of expecting the client to do it. diff --git a/generated_api_shadow/envoy/admin/v4alpha/BUILD b/generated_api_shadow/envoy/admin/v4alpha/BUILD index 6da5b60bad28..d64c4f6a0816 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/BUILD +++ b/generated_api_shadow/envoy/admin/v4alpha/BUILD @@ -10,7 +10,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/admin/v4alpha/tap.proto b/generated_api_shadow/envoy/admin/v4alpha/tap.proto index c47b308d6ee6..039dfcfeb812 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/tap.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/tap.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.admin.v4alpha; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -24,5 +24,5 @@ message TapRequest { string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; + config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD new file mode 100644 index 000000000000..4ed75a69ea09 --- /dev/null +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto new file mode 100644 index 000000000000..56911ca19185 --- /dev/null +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common access log types] + +message AccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLog"; + + reserved 3; + + reserved "config"; + + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // + // #. "envoy.access_loggers.file" + // #. "envoy.access_loggers.http_grpc" + // #. "envoy.access_loggers.tcp_grpc" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. Built-in + // configurations include: + // + // #. "envoy.access_loggers.file": :ref:`FileAccessLog + // ` + // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig + // ` + // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig + // ` + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// [#next-free-field: 12] +message AccessLogFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AccessLogFilter"; + + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // Header filter. + HeaderFilter header_filter = 8; + + // Response flag filter. + ResponseFlagFilter response_flag_filter = 9; + + // gRPC status filter. + GrpcStatusFilter grpc_status_filter = 10; + + // Extension filter. + ExtensionFilter extension_filter = 11; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ComparisonFilter"; + + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum = {defined_only: true}]; + + // Value to compare against. + core.v4alpha.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.StatusCodeFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.DurationFilter"; + + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.NotHealthCheckFilter"; +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.TraceableFilter"; +} + +// Filters for random sampling of requests. +message RuntimeFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.RuntimeFilter"; + + // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. + // If found in runtime, this value will replace the default numerator. + string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + type.v3.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being present. If + // :ref:`x-request-id` is present, the filter will + // consistently sample across multiple hosts based on the runtime key value and the value + // extracted from :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based + // on the runtime key value alone. *use_independent_randomness* can be used for logging kill + // switches within complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to reason about + // from a probability perspective (i.e., setting to true will cause the filter to behave like + // an independent random variable when composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.AndFilter"; + + repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; + + repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; +} + +// Filters requests based on the presence or value of a request header. +message HeaderFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.HeaderFilter"; + + // Only requests with a header which matches the specified HeaderMatcher will pass the filter + // check. + route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter :ref:`documentation`. +message ResponseFlagFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ResponseFlagFilter"; + + // Only responses with the any of the flags listed in this field will be logged. + // This field is optional. If it is not specified, then any response flag will pass + // the filter check. + repeated string flags = 1 [(validate.rules).repeated = { + items { + string { + in: "LH" + in: "UH" + in: "UT" + in: "LR" + in: "UR" + in: "UF" + in: "UC" + in: "UO" + in: "NR" + in: "DI" + in: "FI" + in: "RL" + in: "UAEX" + in: "RLSE" + in: "DC" + in: "URX" + in: "SI" + in: "IH" + in: "DPE" + } + } + }]; +} + +// Filters gRPC requests based on their response status. If a gRPC status is not provided, the +// filter will infer the status from the HTTP status code. +message GrpcStatusFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.GrpcStatusFilter"; + + enum Status { + OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALID_ARGUMENT = 3; + DEADLINE_EXCEEDED = 4; + NOT_FOUND = 5; + ALREADY_EXISTS = 6; + PERMISSION_DENIED = 7; + RESOURCE_EXHAUSTED = 8; + FAILED_PRECONDITION = 9; + ABORTED = 10; + OUT_OF_RANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATA_LOSS = 15; + UNAUTHENTICATED = 16; + } + + // Logs only responses that have any one of the gRPC statuses in this field. + repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; + + // If included and set to true, the filter will instead block all responses with a gRPC status or + // inferred gRPC status enumerated in statuses, and allow all other responses. + bool exclude = 2; +} + +// Extension filter is statically registered at runtime. +message ExtensionFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.ExtensionFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + string name = 1; + + // Custom configuration that depends on the filter being instantiated. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD index 005603632b4c..eb87a71ad68e 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD @@ -10,8 +10,8 @@ api_proto_package( "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", + "//envoy/config/listener/v4alpha:pkg", + "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index b9086b771981..cd05d6f4e46d 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -8,8 +8,8 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; +import "envoy/config/listener/v4alpha/listener.proto"; +import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; @@ -43,9 +43,9 @@ message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - // Static :ref:`Listeners `. These listeners are + // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; + repeated listener.v4alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary @@ -66,7 +66,7 @@ message Bootstrap { reserved 4; - // All :ref:`Listeners ` are provided by a single + // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; @@ -111,10 +111,10 @@ message Bootstrap { string flags_path = 5; // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; + repeated metrics.v4alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; + metrics.v4alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and @@ -150,7 +150,7 @@ message Bootstrap { // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value + // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; @@ -168,7 +168,7 @@ message Bootstrap { // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. + // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/BUILD b/generated_api_shadow/envoy/config/core/v4alpha/BUILD index aeac38ac2833..ef6414dadc09 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/core/v4alpha/BUILD @@ -8,7 +8,7 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/config/core/v4alpha/address.proto b/generated_api_shadow/envoy/config/core/v4alpha/address.proto index a2e6070103ae..ffade4bed75b 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/address.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/address.proto @@ -45,7 +45,7 @@ message SocketAddress { // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used + // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto index 7f823da97c5e..39badc334b01 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto @@ -4,7 +4,7 @@ package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; @@ -125,9 +125,9 @@ message HealthCheck { // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview + // `. See the :ref:`architecture overview // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; + type.matcher.v4alpha.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { @@ -206,7 +206,7 @@ message HealthCheck { // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks + // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD new file mode 100644 index 000000000000..1d1761a3e941 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/listener/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto new file mode 100644 index 000000000000..b8d076c36583 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: API listener] + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ApiListener"; + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto new file mode 100644 index 000000000000..4438bd2974d4 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -0,0 +1,241 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/socket_option.proto"; +import "envoy/config/listener/v4alpha/api_listener.proto"; +import "envoy/config/listener/v4alpha/listener_components.proto"; +import "envoy/config/listener/v4alpha/udp_listener_config.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// [#next-free-field: 23] +message Listener { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.DeprecatedV1"; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; + + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + } + } + + reserved 14, 4; + + reserved "use_original_dst"; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated FilterChain filter_chains = 3; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Listener metadata. + core.v4alpha.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // `. + // UDP listeners currently support a single filter. + repeated ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.v4alpha.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.v4alpha.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // listener to create, i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + ConnectionBalanceConfig connection_balance_config = 20; + + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. + // + // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart + // (see `3rd paragraph in 'soreuseport' commit message + // `_). + // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket + // `_. + bool reuse_port = 21; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v4alpha.AccessLog access_log = 22; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto new file mode 100644 index 000000000000..6900cde39016 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -0,0 +1,298 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 4; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 13] +message FilterChainMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChainMatch"; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + SAME_IP_OR_LOOPBACK = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.v4alpha.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.v4alpha.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 8] +message FilterChain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.FilterChain"; + + reserved 2; + + reserved "tls_context"; + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.v4alpha.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`DownstreamTlsContext ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v4alpha.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3306 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.v3.Int32Range destination_port_range = 5; + } +} + +message ListenerFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto new file mode 100644 index 000000000000..97866e4b6ed8 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: QUIC listener Config] + +// Configuration specific to the QUIC protocol. +// Next id: 4 +message QuicProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.QuicProtocolOptions"; + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto new file mode 100644 index 000000000000..7e40e9529f99 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: UDP Listener Config] +// Listener :ref:`configuration overview ` + +message UdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpListenerConfig"; + + reserved 2; + + reserved "config"; + + // Used to look up UDP listener factory, matches "raw_udp_listener" or + // "quic_listener" to create a specific udp listener. + // If not specified, treat as "raw_udp_listener". + string udp_listener_name = 1; + + // Used to create a specific listener factory. To some factory, e.g. + // "raw_udp_listener", config is not needed. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +message ActiveRawUdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; +} diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD b/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD new file mode 100644 index 000000000000..4b70ffb4110a --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/metrics/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto new file mode 100644 index 000000000000..e4da16c56bfd --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/grpc_service.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "MetricsServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metrics service] + +// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// [#extension: envoy.stat_sinks.metrics_service] +message MetricsServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.MetricsServiceConfig"; + + // The upstream gRPC cluster that hosts the metrics service. + core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + google.protobuf.BoolValue report_counters_as_deltas = 2; +} diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto new file mode 100644 index 000000000000..f9a4549746c6 --- /dev/null +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -0,0 +1,361 @@ +syntax = "proto3"; + +package envoy.config.metrics.v4alpha; + +import "envoy/config/core/v4alpha/address.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; +option java_outer_classname = "StatsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +// Configuration for pluggable stats sinks. +message StatsSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; + + reserved 2; + + reserved "config"; + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.stat_sinks.statsd ` + // * :ref:`envoy.stat_sinks.dog_statsd ` + // * :ref:`envoy.stat_sinks.metrics_service ` + // * :ref:`envoy.stat_sinks.hystrix ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Statistics configuration such as tagging. +message StatsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsConfig"; + + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match that + // same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; + + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher stats_matcher = 3; +} + +// Configuration for disabling stat instantiation. +message StatsMatcher { + // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to + // instantiate all stats, there is no need to construct a StatsMatcher. + // + // However, StatsMatcher can be used to limit the creation of families of stats in order to + // conserve memory. Stats can either be disabled entirely, or they can be + // limited by either an exclusion or an inclusion list of :ref:`StringMatcher + // ` protos: + // + // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to + // `false`, all stats will be instantiated. + // + // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the + // list will not instantiate. + // + // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of + // the StringMatchers in the list. + // + // + // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. + // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based + // matcher rather than a regex-based matcher. + // + // Example 1. Excluding all stats. + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "rejectAll": "true" + // } + // } + // + // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "exclusionList": { + // "patterns": [ + // { + // "prefix": "cluster." + // } + // ] + // } + // } + // } + // + // Example 3. Including only manager-related stats: + // + // .. code-block:: json + // + // { + // "statsMatcher": { + // "inclusionList": { + // "patterns": [ + // { + // "prefix": "cluster_manager." + // }, + // { + // "prefix": "listener_manager." + // } + // ] + // } + // } + // } + // + + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.StatsMatcher"; + + oneof stats_matcher { + option (validate.required) = true; + + // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // stats are enabled. + bool reject_all = 1; + + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; + + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; + } +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.TagSpecifier"; + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +message StatsdSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; + + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v4alpha.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +message DogStatsdSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.DogStatsdSink"; + + reserved 2; + + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + core.v4alpha.Address address = 1; + } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +message HystrixSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HystrixSink"; + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + int64 num_buckets = 1; +} diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD index dbfa8be4f36f..f0707bae6eae 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD @@ -9,7 +9,7 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto index cdbeb5bf2eef..8bab830607b6 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto @@ -4,9 +4,9 @@ package envoy.config.rbac.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; +import "envoy/type/matcher/v4alpha/path.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -140,7 +140,7 @@ message Permission { route.v4alpha.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; + type.matcher.v4alpha.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v4alpha.CidrRange destination_ip = 5; @@ -149,7 +149,7 @@ message Permission { uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not @@ -166,7 +166,7 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -175,7 +175,7 @@ message Permission { // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; + type.matcher.v4alpha.StringMatcher requested_server_name = 9; } } @@ -203,7 +203,7 @@ message Principal { // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. - type.matcher.v3.StringMatcher principal_name = 2; + type.matcher.v4alpha.StringMatcher principal_name = 2; } oneof identifier { @@ -245,10 +245,10 @@ message Principal { route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; + type.matcher.v4alpha.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; + type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this diff --git a/generated_api_shadow/envoy/config/route/v4alpha/BUILD b/generated_api_shadow/envoy/config/route/v4alpha/BUILD index 507bedd76bdf..13dd451d1b4a 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/route/v4alpha/BUILD @@ -9,7 +9,7 @@ api_proto_package( "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 5fb31112b34e..6e1b1f9f5a0a 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -4,8 +4,8 @@ package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/regex.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -143,7 +143,7 @@ message VirtualHost { // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; @@ -155,7 +155,7 @@ message VirtualHost { // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers - // ` flag. + // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a @@ -428,7 +428,7 @@ message RouteMatch { // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. @@ -499,7 +499,7 @@ message CorsPolicy { // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; @@ -855,7 +855,7 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with @@ -1534,7 +1534,7 @@ message HeaderMatcher { // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.matcher.v4alpha.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1596,7 +1596,8 @@ message QueryParameterMatcher { oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + type.matcher.v4alpha.StringMatcher string_match = 5 + [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD new file mode 100644 index 000000000000..cb06389f0186 --- /dev/null +++ b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/config/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto new file mode 100644 index 000000000000..b8e8dac291f3 --- /dev/null +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -0,0 +1,225 @@ +syntax = "proto3"; + +package envoy.config.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Common tap configuration] + +// Tap configuration. +message TapConfig { + // [#comment:TODO(mattklein123): Rate limiting] + + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 9] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// Tap output configuration. +message OutputConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; + + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_rx_bytes = 2; + + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + google.protobuf.UInt32Value max_buffered_tx_bytes = 3; + + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + bool streaming = 4; +} + +// Tap output sink configuration. +message OutputSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; + + // Output format. All output is in the form of one or more :ref:`TraceWrapper + // ` messages. This enumeration indicates + // how those messages are written. Note that not all sinks support all output formats. See + // individual sink documentation for more information. + enum Format { + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + JSON_BODY_AS_BYTES = 0; + + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + JSON_BODY_AS_STRING = 1; + + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + PROTO_BINARY = 2; + + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + PROTO_BINARY_LENGTH_DELIMITED = 3; + + // Text proto format. + PROTO_TEXT = 4; + } + + // Sink output format. + Format format = 1 [(validate.rules).enum = {defined_only: true}]; + + oneof output_sink_type { + option (validate.required) = true; + + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdminSink streaming_admin = 2; + + // Tap output will be written to a file per tap sink. + FilePerTapSink file_per_tap = 3; + + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + StreamingGrpcSink streaming_grpc = 4; + } +} + +// Streaming admin sink configuration. +message StreamingAdminSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingAdminSink"; +} + +// The file per tap sink outputs a discrete file for every tapped stream. +message FilePerTapSink { + option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +message StreamingGrpcSink { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.StreamingGrpcSink"; + + // Opaque identifier, that will be sent back to the streaming grpc server. + string tap_id = 1; + + // The gRPC server that hosts the Tap Sink Service. + core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/BUILD b/generated_api_shadow/envoy/data/dns/v4alpha/BUILD new file mode 100644 index 000000000000..bc8958ceab0b --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/data/dns/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto new file mode 100644 index 000000000000..83edc20088de --- /dev/null +++ b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package envoy.data.dns.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; +option java_outer_classname = "DnsTableProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter Table Data] +// :ref:`DNS Filter config overview `. + +// This message contains the configuration for the DNS Filter if populated +// from the control plane +message DnsTable { + option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; + + // This message contains a list of IP addresses returned for a query for a known name + message AddressList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.AddressList"; + + // This field contains a well formed IP address that is returned + // in the answer for a name query. The address field can be an + // IPv4 or IPv6 address. Address family detection is done automatically + // when Envoy parses the string. Since this field is repeated, + // Envoy will return one randomly chosen entry from this list in the + // DNS response. The random index will vary per query so that we prevent + // clients pinning on a single address for a configured domain + repeated string address = 1 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // This message type is extensible and can contain a list of addresses + // or dictate some other method for resolving the addresses for an + // endpoint + message DnsEndpoint { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsEndpoint"; + + oneof endpoint_config { + option (validate.required) = true; + + AddressList address_list = 1; + } + } + + message DnsVirtualDomain { + option (udpa.annotations.versioning).previous_message_type = + "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; + + // The domain name for which Envoy will respond to query requests + string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + + // The configuration containing the method to determine the address + // of this endpoint + DnsEndpoint endpoint = 2; + + // Sets the TTL in dns answers from Envoy returned to the client + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + } + + // Control how many times envoy makes an attempt to forward a query to + // an external server + uint32 external_retry_count = 1; + + // Fully qualified domain names for which Envoy will respond to queries + repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // This field serves to help Envoy determine whether it can authoritatively + // answer a query for a name matching a suffix in this list. If the query + // name does not match a suffix in this list, Envoy will forward + // the query to an upstream DNS server + repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; +} diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD index d1fe49142a8e..a6fffecd9621 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", + "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto index 63de14a3d6f6..f37889b90212 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/tap/v3/common.proto"; +import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -42,7 +42,7 @@ message CommonExtensionConfig { // If specified, the tap filter will be configured via a static configuration that cannot be // changed. - config.tap.v3.TapConfig static_config = 2; + config.tap.v4alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD new file mode 100644 index 000000000000..63033acab5cf --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/cache/v3alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto new file mode 100644 index 000000000000..7cb48d4d6c26 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.cache.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; +option java_outer_classname = "CacheProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: HTTP Cache Filter] +// [#extension: envoy.filters.http.cache] + +message CacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; + + // [#not-implemented-hide:] + // Modifies cache key creation by restricting which parts of the URL are included. + message KeyCreatorParams { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; + + // If true, exclude the URL scheme from the cache key. Set to true if your origins always + // produce the same response for http and https requests. + bool exclude_scheme = 1; + + // If true, exclude the host from the cache key. Set to true if your origins' responses don't + // ever depend on host. + bool exclude_host = 2; + + // If *query_parameters_included* is nonempty, only query parameters matched + // by one or more of its matchers are included in the cache key. Any other + // query params will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; + + // If *query_parameters_excluded* is nonempty, query parameters matched by one + // or more of its matchers are excluded from the cache key (even if also + // matched by *query_parameters_included*), and will not affect cache lookup. + repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; + } + + // Config specific to the cache storage implementation. + google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; + + // [#not-implemented-hide:] + // + // + // List of allowed *Vary* headers. + // + // The *vary* response header holds a list of header names that affect the + // contents of a response, as described by + // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. + // + // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // response's *vary* header mentions any header names that aren't in + // *allowed_vary_headers*, that response will not be cached. + // + // During lookup, *allowed_vary_headers* controls what request headers will be + // sent to the cache storage implementation. + repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; + + // [#not-implemented-hide:] + // + // + // Modifies cache key creation by restricting which parts of the URL are included. + KeyCreatorParams key_creator_params = 3; + + // [#not-implemented-hide:] + // + // + // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache + // storage implementation may have its own limit beyond which it will reject insertions). + uint32 max_body_bytes = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD new file mode 100644 index 000000000000..72211218ff52 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto new file mode 100644 index 000000000000..dda915a059af --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.csrf.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; +option java_outer_classname = "CsrfProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: CSRF] +// Cross-Site Request Forgery :ref:`configuration overview `. +// [#extension: envoy.filters.http.csrf] + +// CSRF filter config. +message CsrfPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; + + // Specifies the % of requests for which the CSRF filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 + [(validate.rules).message = {required: true}]; + + // Specifies that CSRF policies will be evaluated and tracked, but not enforced. + // + // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* and *Destination* to determine if it's valid, but will not + // enforce any policies. + config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; + + // Specifies additional source origins that will be allowed in addition to + // the destination origin. + // + // More information on how this can be configured via runtime can be found + // :ref:`here `. + repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD new file mode 100644 index 000000000000..9a3d8a574a9b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/http/ext_authz/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto new file mode 100644 index 000000000000..b39a2d56d00d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -0,0 +1,245 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.ext_authz.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/grpc_service.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/http_status.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; +option java_outer_classname = "ExtAuthzProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: External Authorization] +// External Authorization :ref:`configuration overview `. +// [#extension: envoy.filters.http.ext_authz] + +// [#next-free-field: 11] +message ExtAuthz { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; + + reserved 4; + + reserved "use_alpha"; + + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v4alpha.GrpcService grpc_service = 1; + + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } + + // Changes filter's behaviour on errors: + // + // 1. When set to true, the filter will *accept* client request even if the communication with + // the authorization service has failed, or if the authorization service has returned a HTTP 5xx + // error. + // + // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* + // response if the communication with the authorization service has failed, or if the + // authorization service has returned a HTTP 5xx error. + // + // Note that errors can be *always* tracked in the :ref:`stats + // `. + bool failure_mode_allow = 2; + + // Enables filter to buffer the client request body and send it within the authorization request. + // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization + // request message indicating if the body data is partial. + BufferSettings with_request_body = 5; + + // Clears route cache in order to allow the external authorization service to correctly affect + // routing decisions. Filter clears all cached routes when: + // + // 1. The field is set to *true*. + // + // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. + // + // 3. At least one *authorization response header* is added to the client request, or is used for + // altering another client request header. + // + bool clear_route_cache = 6; + + // Sets the HTTP status that is returned to the client when there is a network error between the + // filter and the authorization server. The default status is HTTP 403 Forbidden. + type.v3.HttpStatus status_on_error = 7; + + // Specifies a list of metadata namespaces whose values, if present, will be passed to the + // ext_authz service as an opaque *protobuf::Struct*. + // + // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata + // ` is set, + // then the following will pass the jwt payload to the authorization server. + // + // .. code-block:: yaml + // + // metadata_context_namespaces: + // - envoy.filters.http.jwt_authn + // + repeated string metadata_context_namespaces = 8; + + // Specifies if the filter is enabled. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests to filter. + // + // If this field is not specified, the filter will be enabled for all requests. + config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; +} + +// Configuration for buffering the request data. +message BufferSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; + + // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return + // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number + // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow + // `. + uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; + + // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. + // The authorization request will be dispatched and no 413 HTTP error will be returned by the + // filter. + bool allow_partial_message = 2; +} + +// HttpService is used for raw HTTP communication between the filter and the authorization service. +// When configured, the filter will parse the client request and use these attributes to call the +// authorization server. Depending on the response, the filter may reject or accept the client +// request. Note that in any of these events, metadata can be added, removed or overridden by the +// filter: +// +// *On authorization request*, a list of allowed request headers may be supplied. See +// :ref:`allowed_headers +// ` +// for details. Additional headers metadata may be added to the authorization request. See +// :ref:`headers_to_add +// ` for +// details. +// +// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and +// additional headers metadata may be added to the original client request. See +// :ref:`allowed_upstream_headers +// ` +// for details. +// +// On other authorization response statuses, the filter will not allow traffic. Additional headers +// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers +// ` +// for details. +// [#next-free-field: 9] +message HttpService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.HttpService"; + + reserved 3, 4, 5, 6; + + // Sets the HTTP server URI which the authorization requests must be sent to. + config.core.v4alpha.HttpUri server_uri = 1; + + // Sets a prefix to the value of authorization request header *Path*. + string path_prefix = 2; + + // Settings used for controlling authorization request metadata. + AuthorizationRequest authorization_request = 7; + + // Settings used for controlling authorization response metadata. + AuthorizationResponse authorization_response = 8; +} + +message AuthorizationRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; + + // Authorization request will include the client request headers that have a correspondent match + // in the :ref:`list `. Note that in addition to the + // user's supplied matchers: + // + // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. + // + // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have + // a message body. However, the authorization request can include the buffered client request body + // (controlled by :ref:`with_request_body + // ` setting), + // consequently the value of *Content-Length* of the authorization request reflects the size of + // its payload size. + // + type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; + + // Sets a list of headers that will be included to the request to authorization service. Note that + // client request of the same key will be overridden. + repeated config.core.v4alpha.HeaderValue headers_to_add = 2; +} + +message AuthorizationResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the original client request. + // Note that coexistent headers will be overridden. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + + // When this :ref:`list `. is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that when this list is *not* set, all the authorization response headers, except *Authority + // (Host)* will be in the response to the client. When a header is included in this list, *Path*, + // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. + type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; +} + +// Extra settings on a per virtualhost/route/weighted-cluster level. +message ExtAuthzPerRoute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; + + oneof override { + option (validate.required) = true; + + // Disable the ext auth filter for this particular vhost or route. + // If disabled is specified in multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // Check request settings for this route. + CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; + } +} + +// Extra settings for the check request. You can use this to provide extra context for the +// external authorization server on specific virtual hosts \ routes. For example, adding a context +// extension on the virtual host level can give the ext-authz server information on what virtual +// host is used without needing to parse the host header. If CheckSettings is specified in multiple +// per-filter-configs, they will be merged in order, and the result will be used. +message CheckSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; + + // Context extensions to set on the CheckRequest's + // :ref:`AttributeContext.context_extensions` + // + // Merge semantics for this field are such that keys from more specific configs override. + // + // .. note:: + // + // These settings are only applied to a filter configured with a + // :ref:`grpc_service`. + map context_extensions = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD new file mode 100644 index 000000000000..936ee4414038 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/common/fault/v3:pkg", + "//envoy/extensions/filters/http/fault/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto new file mode 100644 index 000000000000..7dd4f48aa476 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.fault.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/extensions/filters/common/fault/v3/fault.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; +option java_outer_classname = "FaultProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. +// [#extension: envoy.filters.http.fault] + +// [#next-free-field: 6] +message FaultAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort"; + + // Fault aborts are controlled via an HTTP header (if applicable). See the + // :ref:`HTTP fault filter ` documentation for + // more information. + message HeaderAbort { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; + } + + reserved 1; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // gRPC status code to use to abort the gRPC request. + uint32 grpc_status = 5; + + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort header_abort = 4; + } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; +} + +// [#next-free-field: 15] +message HTTPFault { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.fault.v3.HTTPFault"; + + // If specified, the filter will inject delays based on the values in the + // object. + common.fault.v3.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the *value* field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; + + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // `runtime ` and any faults that are not injected + // due to overflow will be indicated via the `faults_overflow + // ` stat. + // + // .. attention:: + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + google.protobuf.UInt32Value max_active_faults = 6; + + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + common.fault.v3.FaultRateLimit response_rate_limit = 7; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + string delay_percent_runtime = 8; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + string abort_percent_runtime = 9; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + string delay_duration_runtime = 10; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + string abort_http_status_runtime = 11; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + string max_active_faults_runtime = 12; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + string response_rate_limit_percent_runtime = 13; + + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + string abort_grpc_status_runtime = 14; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD new file mode 100644 index 000000000000..97b6ad2feb2d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/health_check/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto new file mode 100644 index 000000000000..f530363e2380 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.health_check.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. +// [#extension: envoy.filters.http.health_check] + +// [#next-free-field: 6] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.health_check.v3.HealthCheck"; + + reserved 2; + + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy or degraded in order for the filter to return a 200. + map cluster_min_healthy_percentages = 4; + + // Specifies a set of health check request headers to match on. The health check filter will + // check a request’s headers against all the specified headers. To specify the health check + // endpoint, set the ``:path`` header to match on. + repeated config.route.v4alpha.HeaderMatcher headers = 5; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD new file mode 100644 index 000000000000..a9f9b8bc44c3 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/http/jwt_authn/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto new file mode 100644 index 000000000000..302cf7253dde --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -0,0 +1,531 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.jwt_authn.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/http_uri.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. +// [#extension: envoy.filters.http.jwt_authn] + +// Please see following for JWT authentication flow: +// +// * `JSON Web Token (JWT) `_ +// * `The OAuth 2.0 Authorization Framework `_ +// * `OpenID Connect `_ +// +// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: +// +// * issuer: the principal that issues the JWT. It has to match the one from the token. +// * allowed audiences: the ones in the token have to be listed here. +// * how to fetch public key JWKS to verify the token signature. +// * how to extract JWT token in the request. +// * how to pass successfully verified token payload. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// - bookstore_web.apps.googleusercontent.com +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// seconds: 300 +// +// [#next-free-field: 10] +message JwtProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; + + // Specify the `principal `_ that issued + // the JWT, usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The list of JWT `audiences `_ are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // - bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v4alpha.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the `Bearer schema + // `_. Example:: + // + // Authorization: Bearer . + // + // 2. `access_token `_ query parameter. + // + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its provider specified or from the default locations. + // + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // ``x-goog-iap-jwt-assertion: ``. + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; + + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + config.core.v4alpha.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; + + // The HTTP header name. + string name = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; +} + +// Specify a required provider with audiences. +message ProviderWithAudiences { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; + + // Specify a required provider name. + string provider_name = 1; + + // This field overrides the one specified in the JwtProvider. + repeated string audiences = 2; +} + +// This message specifies a Jwt requirement. An empty message means JWT verification is not +// required. Here are some config examples: +// +// .. code-block:: yaml +// +// # Example 1: not required with an empty message +// +// # Example 2: require A +// provider_name: provider-A +// +// # Example 3: require A or B +// requires_any: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 4: require A and B +// requires_all: +// requirements: +// - provider_name: provider-A +// - provider_name: provider-B +// +// # Example 5: require A and (B or C) +// requires_all: +// requirements: +// - provider_name: provider-A +// - requires_any: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 6: require A or (B and C) +// requires_any: +// requirements: +// - provider_name: provider-A +// - requires_all: +// requirements: +// - provider_name: provider-B +// - provider_name: provider-C +// +// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows +// missing token.) +// requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// +// # Example 8: A is optional and B is required. +// requires_all: +// requirements: +// - requires_any: +// requirements: +// - provider_name: provider-A +// - allow_missing: {} +// - provider_name: provider-B +// +// [#next-free-field: 7] +message JwtRequirement { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; + + oneof requires_type { + // Specify a required provider name. + string provider_name = 1; + + // Specify a required provider with audiences. + ProviderWithAudiences provider_and_audiences = 2; + + // Specify list of JwtRequirement. Their results are OR-ed. + // If any one of them passes, the result is passed. + JwtRequirementOrList requires_any = 3; + + // Specify list of JwtRequirement. Their results are AND-ed. + // All of them must pass, if one of them fails or missing, it fails. + JwtRequirementAndList requires_all = 4; + + // The requirement is always satisfied even if JWT is missing or the JWT + // verification fails. A typical usage is: this filter is used to only verify + // JWTs and pass the verified JWT payloads to another filter, the other filter + // will make decision. In this mode, all JWT tokens will be verified. + google.protobuf.Empty allow_missing_or_failed = 5; + + // The requirement is satisfied if JWT is missing, but failed if JWT is + // presented but invalid. Similar to allow_missing_or_failed, this is used + // to only verify JWTs and pass the verified payload to another filter. The + // different is this mode will reject requests with invalid tokens. + google.protobuf.Empty allow_missing = 6; + } +} + +// This message specifies a list of RequiredProvider. +// Their results are OR-ed; if any one of them passes, the result is passed +message JwtRequirementOrList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a list of RequiredProvider. +// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. +message JwtRequirementAndList { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; + + // Specify a list of JwtRequirement. + repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; +} + +// This message specifies a Jwt requirement for a specific Route condition. +// Example 1: +// +// .. code-block:: yaml +// +// - match: +// prefix: /healthz +// +// In above example, "requires" field is empty for /healthz prefix match, +// it means that requests matching the path prefix don't require JWT authentication. +// +// Example 2: +// +// .. code-block:: yaml +// +// - match: +// prefix: / +// requires: { provider_name: provider-A } +// +// In above example, all requests matched the path prefix require jwt authentication +// from "provider-A". +message RequirementRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; + + // The route matching parameter. Only when the match is satisfied, the "requires" field will + // apply. + // + // For example: following match will match all requests. + // + // .. code-block:: yaml + // + // match: + // prefix: / + // + config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. + JwtRequirement requires = 2; +} + +// This message specifies Jwt requirements based on stream_info.filterState. +// This FilterState should use `Router::StringAccessor` object to set a string value. +// Other HTTP filters can use it to specify Jwt requirements dynamically. +// +// Example: +// +// .. code-block:: yaml +// +// name: jwt_selector +// requires: +// issuer_1: +// provider_name: issuer1 +// issuer_2: +// provider_name: issuer2 +// +// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, +// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. +message FilterStateRule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; + + // The filter state name to retrieve the `Router::StringAccessor` object. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A map of string keys to requirements. The string key is the string value + // in the FilterState with the name specified in the *name* field above. + map requires = 3; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// +// For example: +// +// .. code-block:: yaml +// +// providers: +// provider1: +// issuer: issuer1 +// audiences: +// - audience1 +// - audience2 +// remote_jwks: +// http_uri: +// uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// provider2: +// issuer: issuer2 +// local_jwks: +// inline_string: jwks_string +// +// rules: +// # Not jwt verification is required for /health path +// - match: +// prefix: /health +// +// # Jwt verification for provider1 is required for path prefixed with "prefix" +// - match: +// prefix: /prefix +// requires: +// provider_name: provider1 +// +// # Jwt verification for either provider1 or provider2 is required for all other requests. +// - match: +// prefix: / +// requires: +// requires_any: +// requirements: +// - provider_name: provider1 +// - provider_name: provider2 +// +message JwtAuthentication { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; + + // Map of provider names to JwtProviders. + // + // .. code-block:: yaml + // + // providers: + // provider1: + // issuer: issuer1 + // audiences: + // - audience1 + // - audience2 + // remote_jwks: + // http_uri: + // uri: https://example.com/.well-known/jwks.json + // cluster: example_jwks_cluster + // provider2: + // issuer: provider2 + // local_jwks: + // inline_string: jwks_string + // + map providers = 1; + + // Specifies requirements based on the route matches. The first matched requirement will be + // applied. If there are overlapped match conditions, please put the most specific match first. + // + // Examples + // + // .. code-block:: yaml + // + // rules: + // - match: + // prefix: /healthz + // - match: + // prefix: /baz + // requires: + // provider_name: provider1 + // - match: + // prefix: /foo + // requires: + // requires_any: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // - match: + // prefix: /bar + // requires: + // requires_all: + // requirements: + // - provider_name: provider1 + // - provider_name: provider2 + // + repeated RequirementRule rules = 2; + + // This message specifies Jwt requirements based on stream_info.filterState. + // Other HTTP filters can use it to specify Jwt requirements dynamically. + // The *rules* field above is checked first, if it could not find any matches, + // check this one. + FilterStateRule filter_state_rules = 3; + + // When set to true, bypass the `CORS preflight request + // `_ regardless of JWT + // requirements specified in the rules. + bool bypass_cors_preflight = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD new file mode 100644 index 000000000000..df329be54230 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/extensions/filters/http/router/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto new file mode 100644 index 000000000000..d0baaab84a39 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.router.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. +// [#extension: envoy.filters.http.router] + +// [#next-free-field: 7] +message Router { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.router.v3.Router"; + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; + + // Do not add any additional *x-envoy-* headers to requests or responses. This + // only affects the :ref:`router filter generated *x-envoy-* headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set *x-envoy-* headers. + bool suppress_envoy_headers = 4; + + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + repeated string strict_check_headers = 5 [(validate.rules).repeated = { + items { + string { + in: "x-envoy-upstream-rq-timeout-ms" + in: "x-envoy-upstream-rq-per-try-timeout-ms" + in: "x-envoy-max-retries" + in: "x-envoy-retry-grpc-on" + in: "x-envoy-retry-on" + } + } + }]; + + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + bool respect_expected_rq_timeout = 6; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..663eb0d52d25 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto new file mode 100644 index 000000000000..4894c7693fd7 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "DubboProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy] +// Dubbo Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.dubbo_proxy] + +// Dubbo Protocol types supported by Envoy. +enum ProtocolType { + // the default protocol. + Dubbo = 0; +} + +// Dubbo Serialization types supported by Envoy. +enum SerializationType { + // the default serialization protocol. + Hessian2 = 0; +} + +// [#next-free-field: 6] +message DubboProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Configure the protocol used. + ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; + + // Configure the serialization protocol used. + SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; + + // The route table for the connection manager is static and is specified in this property. + repeated RouteConfiguration route_config = 4; + + // A list of individual Dubbo filters that make up the filter chain for requests made to the + // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no dubbo_filters are specified, a default Dubbo router filter + // (`envoy.filters.dubbo.router`) is used. + repeated DubboFilter dubbo_filters = 5; +} + +// DubboFilter configures a Dubbo filter. +message DubboFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; + + // The name of the filter to instantiate. The name must match a supported + // filter. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..c2ff03b33fb1 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.dubbo_proxy.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Dubbo Proxy Route Configuration] +// Dubbo Proxy :ref:`configuration overview `. + +// [#next-free-field: 6] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The interface name of the service. + string interface = 2; + + // Which group does the interface belong to. + string group = 3; + + // The version number of the interface. + string version = 4; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 5; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; + + // Method level routing matching. + MethodMatch method = 1; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + config.route.v4alpha.WeightedCluster weighted_clusters = 2; + } +} + +message MethodMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; + + // The parameter matching type. + message ParameterMatchSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; + + oneof parameter_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 3; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting + // of an optional plus or minus sign followed by a sequence of digits. The rule will not match + // if the header value does not represent an integer. Match will fail for empty values, + // floating point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, + // "somestring", 10.9, "-1somestring" + type.v3.Int64Range range_match = 4; + } + } + + // The name of the method. + type.matcher.v4alpha.StringMatcher name = 1; + + // Method parameter definition. + // The key is the parameter index, starting from 0. + // The value is the parameter matching type. + map params_match = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 792ccf7ab677..57c9eebb5b19 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -7,7 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", + "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/trace/v4alpha:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 5eaefe16037e..03a15d832732 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; -import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/route/v4alpha/route.proto"; @@ -383,7 +383,7 @@ message HttpConnectionManager { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; + repeated config.accesslog.v4alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..d8d88f7f3bb4 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto new file mode 100644 index 000000000000..a765734e66db --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RocketmqProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: RocketMQ Proxy] +// RocketMQ Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.rocketmq_proxy] + +message RocketmqProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is specified in this property. + RouteConfiguration route_config = 2; + + // The largest duration transient object expected to live, more than 10s is recommended. + google.protobuf.Duration transient_object_life_span = 3; + + // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting + // facility without considering backward compatibility of exiting RocketMQ client SDK. + bool develop_mode = 4; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..995e8bcb05e3 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Rocketmq Proxy Route Configuration] +// Rocketmq Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; + + // The name of the topic. + type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). + repeated config.route.v4alpha.HeaderMatcher headers = 2; +} + +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. + config.core.v4alpha.Metadata metadata_match = 2; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..3825be9a8afc --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/accesslog/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto new file mode 100644 index 000000000000..1857f2abcd4e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.tcp_proxy.v4alpha; + +import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/v3/hash_policy.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; +option java_outer_classname = "TcpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.tcp_proxy] + +// [#next-free-field: 13] +message TcpProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; + + // Allows for specification of multiple upstream clusters along with weights + // that indicate the percentage of traffic to be forwarded to each cluster. + // The router selects an upstream cluster based on these weights. + message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is + // determined by its weight. The sum of weights across all entries in the + // clusters array determines the total weight. + uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what is set in this field will be considered + // for load balancing. Note that this will be merged with what's provided in + // :ref:`TcpProxy.metadata_match + // `, with values + // here taking precedence. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Configuration for tunneling TCP over other transports or application layers. + // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will + // remain the default. + message TunnelingConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; + + // The hostname to send in the synthesized CONNECT headers to the upstream proxy. + string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + reserved 6; + + reserved "deprecated_v1"; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof cluster_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + config.core.v4alpha.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set + // to 0s, the timeout will be disabled. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + google.protobuf.Duration idle_timeout = 8; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated config.accesslog.v4alpha.AccessLog access_log = 5; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based + // load balancing algorithms will select a host randomly. Currently the number of hash policies is + // limited to 1. + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD new file mode 100644 index 000000000000..9ec74c0a9b83 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/route/v4alpha:pkg", + "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto new file mode 100644 index 000000000000..9b847d645a65 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/route/v4alpha/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy Route Configuration] +// Thrift Proxy :ref:`configuration overview `. + +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; + + // The name of the route configuration. Reserved for future use in asynchronous route discovery. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.Route"; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + // Route request to some upstream cluster. + RouteAction route = 2 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; + + oneof match_specifier { + option (validate.required) = true; + + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. + string method_name = 1; + + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. + string service_name = 2; + } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v4alpha.HeaderMatcher headers = 4; +} + +// [#next-free-field: 7] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates a single upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 2; + + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. + string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v4alpha.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; +} + +// Allows for specification of multiple upstream clusters along with weights that indicate the +// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster +// based on these weights. +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; + + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; + + // Name of the upstream cluster. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When a request matches the route, the choice of an upstream cluster is determined by its + // weight. The sum of weights across all entries in the clusters array determines the total + // weight. + google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field, combined with what's + // provided in :ref:`RouteAction's metadata_match + // `, + // will be considered. Values here will take precedence. Keys and values should be provided + // under the "envoy.lb" metadata key. + config.core.v4alpha.Metadata metadata_match = 3; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto new file mode 100644 index 000000000000..6bf055da3ce6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.thrift_proxy.v4alpha; + +import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; +option java_outer_classname = "ThriftProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Thrift Proxy] +// Thrift Proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.thrift_proxy] + +// Thrift transport types supported by Envoy. +enum TransportType { + // For downstream connections, the Thrift proxy will attempt to determine which transport to use. + // For upstream connections, the Thrift proxy will use same transport as the downstream + // connection. + AUTO_TRANSPORT = 0; + + // The Thrift proxy will use the Thrift framed transport. + FRAMED = 1; + + // The Thrift proxy will use the Thrift unframed transport. + UNFRAMED = 2; + + // The Thrift proxy will assume the client is using the Thrift header transport. + HEADER = 3; +} + +// Thrift Protocol types supported by Envoy. +enum ProtocolType { + // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. + // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol + // detection. For upstream connections, the Thrift proxy will use the same protocol as the + // downstream connection. + AUTO_PROTOCOL = 0; + + // The Thrift proxy will use the Thrift binary protocol. + BINARY = 1; + + // The Thrift proxy will use Thrift non-strict binary protocol. + LAX_BINARY = 2; + + // The Thrift proxy will use the Thrift compact protocol. + COMPACT = 3; + + // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. + TWITTER = 4; +} + +// [#next-free-field: 6] +message ThriftProxy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; + + // Supplies the type of transport that the Thrift proxy should use. Defaults to + // :ref:`AUTO_TRANSPORT`. + TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use. Defaults to + // :ref:`AUTO_PROTOCOL`. + ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The route table for the connection manager is static and is specified in this property. + RouteConfiguration route_config = 4; + + // A list of individual Thrift filters that make up the filter chain for requests made to the + // Thrift proxy. Order matters as the filters are processed sequentially. For backwards + // compatibility, if no thrift_filters are specified, a default Thrift router filter + // (`envoy.filters.thrift.router`) is used. + repeated ThriftFilter thrift_filters = 5; +} + +// ThriftFilter configures a Thrift filter. +message ThriftFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(zuercher): Auto generate the following list] + // * :ref:`envoy.filters.thrift.router ` + // * :ref:`envoy.filters.thrift.rate_limit ` + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in +// in +// :ref:`typed_extension_protocol_options`, +// keyed by the name `envoy.filters.network.thrift_proxy`. +message ThriftProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; + + // Supplies the type of transport that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_TRANSPORT`, + // which is the default, causes the proxy to use the same transport as the downstream connection. + TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; + + // Supplies the type of protocol that the Thrift proxy should use for upstream connections. + // Selecting + // :ref:`AUTO_PROTOCOL`, + // which is the default, causes the proxy to use the same protocol as the downstream connection. + ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD new file mode 100644 index 000000000000..f869cf5ac123 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/data/dns/v4alpha:pkg", + "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto new file mode 100644 index 000000000000..be78ebf40c18 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.dns_filter.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/dns/v4alpha/dns_table.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; +option java_outer_classname = "DnsFilterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: DNS Filter] +// DNS Filter :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.dns_filter] + +// Configuration for the DNS filter. +message DnsFilterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; + + // This message contains the configuration for the DNS Filter operating + // in a server context. This message will contain the virtual hosts and + // associated addresses with which Envoy will respond to queries + message ServerContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; + + oneof config_source { + option (validate.required) = true; + + // Load the configuration specified from the control plane + data.dns.v4alpha.DnsTable inline_dns_table = 1; + + // Seed the filter configuration from an external path. This source + // is a yaml formatted file that contains the DnsTable driving Envoy's + // responses to DNS queries + config.core.v4alpha.DataSource external_dns_table = 2; + } + } + + // This message contains the configuration for the DNS Filter operating + // in a client context. This message will contain the timeouts, retry, + // and forwarding configuration for Envoy to make DNS requests to other + // resolvers + message ClientContextConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; + + // Sets the maximum time we will wait for the upstream query to complete + // We allow 5s for the upstream resolution to complete, so the minimum + // value here is 5 + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + + // A list of DNS servers to which we can forward queries + repeated string upstream_resolvers = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {min_len: 3}} + }]; + } + + // The stat prefix used when emitting DNS filter statistics + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Server context configuration contains the data that the filter uses to respond + // to DNS requests. + ServerContextConfig server_config = 2; + + // Client context configuration controls Envoy's behavior when it must use external + // resolvers to answer a query. This object is optional and if omitted instructs + // the filter to resolve queries from the data in the server_config + ClientContextConfig client_config = 3; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index e56544584bfe..d294b69de40c 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -8,7 +8,7 @@ api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index 9028e380d092..f81442f4dbcd 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; @@ -298,7 +298,7 @@ message CertificateValidationContext { // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. + // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // @@ -312,7 +312,7 @@ message CertificateValidationContext { // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; + repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; diff --git a/generated_api_shadow/envoy/service/health/v4alpha/BUILD b/generated_api_shadow/envoy/service/health/v4alpha/BUILD new file mode 100644 index 000000000000..b7b2a13bd495 --- /dev/null +++ b/generated_api_shadow/envoy/service/health/v4alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/service/health/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto new file mode 100644 index 000000000000..826d5eeb0301 --- /dev/null +++ b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto @@ -0,0 +1,160 @@ +syntax = "proto3"; + +package envoy.service.health.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/health_check.proto"; +import "envoy/config/endpoint/v3/endpoint_components.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; +option java_outer_classname = "HdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Health Discovery Service (HDS)] + +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn't been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http).post = "/v3/discovery:health_check"; + option (google.api.http).body = "*"; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; + + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + + repeated Protocol health_check_protocols = 1; +} + +message HealthCheckRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequest"; + + config.core.v4alpha.Node node = 1; + + Capability capability = 2; +} + +message EndpointHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealth"; + + config.endpoint.v3.Endpoint endpoint = 1; + + config.core.v4alpha.HealthStatus health_status = 2; +} + +message EndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.EndpointHealthResponse"; + + repeated EndpointHealth endpoints_health = 1; +} + +message HealthCheckRequestOrEndpointHealthResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; + + oneof request_type { + HealthCheckRequest health_check_request = 1; + + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpoints"; + + config.core.v4alpha.Locality locality = 1; + + repeated config.endpoint.v3.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterHealthCheck"; + + string cluster_name = 1; + + repeated config.core.v4alpha.HealthCheck health_checks = 2; + + repeated LocalityEndpoints locality_endpoints = 3; +} + +message HealthCheckSpecifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.HealthCheckSpecifier"; + + repeated ClusterHealthCheck cluster_health_checks = 1; + + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/generated_api_shadow/envoy/service/status/v4alpha/BUILD b/generated_api_shadow/envoy/service/status/v4alpha/BUILD new file mode 100644 index 000000000000..fb238648fbca --- /dev/null +++ b/generated_api_shadow/envoy/service/status/v4alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/admin/v4alpha:pkg", + "//envoy/config/core/v4alpha:pkg", + "//envoy/service/status/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto new file mode 100644 index 000000000000..f6f5fa654d70 --- /dev/null +++ b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +package envoy.service.status.v4alpha; + +import "envoy/admin/v4alpha/config_dump.proto"; +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/type/matcher/v4alpha/node.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; +option java_outer_classname = "CsdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Client Status Discovery Service (CSDS)] + +// CSDS is Client Status Discovery Service. It can be used to get the status of +// an xDS-compliant client from the management server's point of view. In the +// future, it can potentially be used as an interface to get the current +// state directly from the client. +service ClientStatusDiscoveryService { + rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { + } + + rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { + option (google.api.http).post = "/v3/discovery:client_status"; + option (google.api.http).body = "*"; + } +} + +// Status of a config. +enum ConfigStatus { + // Status info is not available/unknown. + UNKNOWN = 0; + + // Management server has sent the config to client and received ACK. + SYNCED = 1; + + // Config is not sent. + NOT_SENT = 2; + + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + STALE = 3; + + // Management server has sent the config to client but received NACK. + ERROR = 4; +} + +// Request for client status of clients identified by a list of NodeMatchers. +message ClientStatusRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusRequest"; + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 6] +message PerXdsConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.PerXdsConfig"; + + ConfigStatus status = 1; + + oneof per_xds_config { + admin.v4alpha.ListenersConfigDump listener_config = 2; + + admin.v4alpha.ClustersConfigDump cluster_config = 3; + + admin.v4alpha.RoutesConfigDump route_config = 4; + + admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + } +} + +// All xds configs for a particular client. +message ClientConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientConfig"; + + // Node for a particular client. + config.core.v4alpha.Node node = 1; + + repeated PerXdsConfig xds_config = 2; +} + +message ClientStatusResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.status.v3.ClientStatusResponse"; + + // Client configs for the clients specified in the ClientStatusRequest. + repeated ClientConfig config = 1; +} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD new file mode 100644 index 000000000000..5f75886cd068 --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/config/tap/v4alpha:pkg", + "//envoy/data/tap/v3:pkg", + "//envoy/service/discovery/v3:pkg", + "//envoy/service/tap/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto new file mode 100644 index 000000000000..a1654d18bebb --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/core/v4alpha/base.proto"; +import "envoy/data/tap/v3/wrapper.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap Sink Service] + +// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call +// StreamTaps to deliver captured taps to the server +service TapSinkService { + // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. + rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { + } +} + +// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server +// and stream taps without ever expecting a response. +message StreamTapsRequest { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsRequest.Identifier"; + + // The node sending taps over the stream. + config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; + + // The opaque identifier that was set in the :ref:`output config + // `. + string tap_id = 2; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // The trace id. this can be used to merge together a streaming trace. Note that the trace_id + // is not guaranteed to be spatially or temporally unique. + uint64 trace_id = 2; + + // The trace data. + data.tap.v3.TraceWrapper trace = 3; +} + +// [#not-implemented-hide:] +message StreamTapsResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.tap.v3.StreamTapsResponse"; +} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto new file mode 100644 index 000000000000..855fde8c8e63 --- /dev/null +++ b/generated_api_shadow/envoy/service/tap/v4alpha/tapds.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.service.tap.v4alpha; + +import "envoy/config/tap/v4alpha/common.proto"; +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; +option java_outer_classname = "TapdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Tap discovery service] + +// [#not-implemented-hide:] Tap discovery service. +service TapDiscoveryService { + rpc StreamTapConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaTapConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchTapConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:tap_configs"; + option (google.api.http).body = "*"; + } +} + +// [#not-implemented-hide:] A tap resource is essentially a tap configuration with a name +// The filter TapDS config references this name. +message TapResource { + option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.TapResource"; + + // The name of the tap configuration. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Tap config to apply + config.tap.v4alpha.TapConfig config = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto index 78b4a2c1d61e..9e41637ab70c 100644 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/regex.proto @@ -24,7 +24,10 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index 1b10df3ff1ba..5de7fd9baf54 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -30,7 +30,10 @@ message RegexMatcher { // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. - google.protobuf.UInt32Value max_program_size = 1; + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } // Google's RE2 regex engine. diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD new file mode 100644 index 000000000000..e63f52b2baa5 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto new file mode 100644 index 000000000000..8abe14e7b667 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Metadata matcher] + +// MetadataMatcher provides a general interface to check if a given value is matched in +// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value +// from the Metadata and then check if it's matched to the specified value. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.filters.http.rbac: +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following MetadataMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to +// enforce access control based on dynamic metadata in a request. See :ref:`Permission +// ` and :ref:`Principal +// `. + +// [#next-major-version: MetadataMatcher should use StructMatcher] +message MetadataMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher"; + + // Specifies the segment in a path to retrieve value from Metadata. + // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + // if the segment key refers to a list, it has to be the last segment in a path. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The filter name to retrieve the Struct from the Metadata. + string filter = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto new file mode 100644 index 000000000000..a74bf808f05a --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/matcher/v4alpha/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Node matcher] + +// Specifies the way to match a Node. +// The match follows AND semantics. +message NodeMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; + + // Specifies match criteria on the node id. + StringMatcher node_id = 1; + + // Specifies match criteria on the node metadata. + repeated StructMatcher node_metadatas = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto new file mode 100644 index 000000000000..b168af19ab50 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "NumberProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Number matcher] + +// Specifies the way to match a double value. +message DoubleMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.DoubleMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + v3.DoubleRange range = 1; + + // If specified, the input double value must be equal to the value specified here. + double exact = 2; + } +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto new file mode 100644 index 000000000000..9150939bf2ee --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; + + oneof rule { + option (validate.required) = true; + + // The `path` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto new file mode 100644 index 000000000000..ed038ec3abb4 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; + + // Google's `RE2 `_ regex engine. The regex string must adhere to + // the documented `syntax `_. The engine is designed + // to complete execution in linear time as well as limit the amount of memory used. + message GoogleRE2 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; + + // This field controls the RE2 "program size" which is a rough estimate of how complex a + // compiled regex is to evaluate. A regex that has a program size greater than the configured + // value will fail to compile. In this case, the configured max program size can be increased + // or the regex can be simplified. If not specified, the default is 100. + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + google.protobuf.UInt32Value hidden_envoy_deprecated_max_program_size = 1 [deprecated = true]; + } + + oneof engine_type { + option (validate.required) = true; + + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; + } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; +} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +message RegexMatchAndSubstitute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.RegexMatchAndSubstitute"; + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + RegexMatcher pattern = 1; + + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers + // to capture group 2. + string substitution = 2; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto new file mode 100644 index 000000000000..8ce0b12f9e2a --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 7] +message StringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StringMatcher"; + + reserved 4; + + reserved "regex"; + + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *abc.xyz* + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc* + string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ListStringMatcher"; + + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto new file mode 100644 index 000000000000..643cc5a47570 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Struct matcher] + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses `path` to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +message StructMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher"; + + // Specifies the segment in a path to retrieve value from Struct. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.StructMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The StructMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto new file mode 100644 index 000000000000..6e509d460109 --- /dev/null +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v4alpha; + +import "envoy/type/matcher/v4alpha/number.proto"; +import "envoy/type/matcher/v4alpha/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; +option java_outer_classname = "ValueProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Value matcher] + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 7] +message ValueMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; + + // NullMatch is an empty message to specify a null value. + message NullMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.v3.ValueMatcher.NullMatch"; + } + + // Specifies how to match a value. + oneof match_pattern { + option (validate.required) = true; + + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch null_match = 1; + + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatcher double_match = 2; + + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatcher string_match = 3; + + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + bool bool_match = 4; + + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + bool present_match = 5; + + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatcher list_match = 6; + } +} + +// Specifies the way to match a list value. +message ListMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, at least one of the values in the list must match the value specified. + ValueMatcher one_of = 1; + } +} diff --git a/tools/api_boost/testdata/deprecate.cc.gold b/tools/api_boost/testdata/deprecate.cc.gold index bee1dacfe56e..0158efa26d9a 100644 --- a/tools/api_boost/testdata/deprecate.cc.gold +++ b/tools/api_boost/testdata/deprecate.cc.gold @@ -1,11 +1,11 @@ #include "envoy/config/cluster/v4alpha/cluster.pb.h" #include "envoy/config/route/v4alpha/route_components.pb.h" -#include "envoy/type/matcher/v3/string.pb.h" +#include "envoy/type/matcher/v4alpha/string.pb.h" void test() { envoy::config::route::v4alpha::VirtualHost vhost; vhost.hidden_envoy_deprecated_per_filter_config(); vhost.mutable_hidden_envoy_deprecated_per_filter_config(); - static_cast(envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); + static_cast(envoy::type::matcher::v4alpha::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); static_cast(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); } From a011189376b08fff4ff7e274e490e4a535b45d16 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 4 May 2020 18:41:59 +0530 Subject: [PATCH 083/909] ext_authz: Log ext_authz gRPC messages at trace level (#11041) Commit Message: log external auth request/response proto messages Additional Description: Logs ext auth request/response proto messages for debugging at trace level. Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Rama Chavali --- source/common/common/logger.h | 1 + .../filters/common/ext_authz/ext_authz_grpc_impl.cc | 4 ++++ .../extensions/filters/common/ext_authz/ext_authz_grpc_impl.h | 4 +++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 384564d7c620..b7a095eb8c9e 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -34,6 +34,7 @@ namespace Logger { FUNCTION(conn_handler) \ FUNCTION(decompression) \ FUNCTION(dubbo) \ + FUNCTION(ext_authz) \ FUNCTION(rocketmq) \ FUNCTION(file) \ FUNCTION(filter) \ diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index d96756eaf43b..b5ca79aeb1b5 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -41,12 +41,14 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; + ENVOY_LOG(trace, "Sending CheckRequest: {}", request.DebugString()); request_ = async_client_->send(service_method_, request, *this, parent_span, Http::AsyncClient::RequestOptions().setTimeout(timeout_)); } void GrpcClientImpl::onSuccess(std::unique_ptr&& response, Tracing::Span& span) { + ENVOY_LOG(trace, "Received CheckResponse: {}", response->DebugString()); ResponsePtr authz_response = std::make_unique(Response{}); if (response->status().code() == Grpc::Status::WellKnownGrpcStatus::Ok) { span.setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); @@ -73,6 +75,8 @@ void GrpcClientImpl::onSuccess(std::unique_ptr { public: // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated. GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, From bbdae2c755d3daa6c118b38abd3d5012df9c1529 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 4 May 2020 11:35:08 -0400 Subject: [PATCH 084/909] add new runtime gauge: deprecated_feature_active (#10884) Additional Description: This is preferred to deprecated_feature_use -- which might some day be deprecated (no pun intended) -- since it does not get carried over on hot restarts. Risk Level: Low (new stat) Testing: enhanced tests Docs Changes: new stat documented Release Notes: n/a Signed-off-by: Raul Gutierrez Segales --- docs/root/configuration/operations/runtime.rst | 1 + source/common/runtime/runtime_impl.cc | 4 ++++ source/common/runtime/runtime_impl.h | 1 + test/common/protobuf/utility_test.cc | 8 +++++++- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 9546f1970b08..f0160e778347 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -274,6 +274,7 @@ The file system runtime provider emits some statistics in the *runtime.* namespa admin_overrides_active, Gauge, 1 if any admin overrides are active otherwise 0 deprecated_feature_use, Counter, Total number of times deprecated features were used. Detailed information about the feature used will be logged to warning logs in the form "Using deprecated option 'X' from file Y". + deprecated_feature_seen_since_process_start, Gauge, Number of times deprecated features were used. This is not carried over during hot restarts. load_error, Counter, Total number of load attempts that resulted in an error in any layer load_success, Counter, Total number of load attempts that were successful at all layers num_keys, Gauge, Number of keys currently loaded diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 1ed9faf2c432..89d69295242c 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -168,6 +168,10 @@ bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_ // The feature is allowed. It is assumed this check is called when the feature // is about to be used, so increment the feature use stat. stats_.deprecated_feature_use_.inc(); + + // Similar to the above, but a gauge that isn't imported during a hot restart. + stats_.deprecated_feature_seen_since_process_start_.inc(); + #ifdef ENVOY_DISABLE_DEPRECATED_FEATURES return false; #endif diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 8fa838e88cd8..10151cf4dbd5 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -57,6 +57,7 @@ class RandomGeneratorImpl : public RandomGenerator { COUNTER(override_dir_exists) \ COUNTER(override_dir_not_exists) \ GAUGE(admin_overrides_active, NeverImport) \ + GAUGE(deprecated_feature_seen_since_process_start, NeverImport) \ GAUGE(num_keys, NeverImport) \ GAUGE(num_layers, NeverImport) diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index eedc2c433eb8..bffc82971a08 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1396,7 +1396,10 @@ class DeprecatedFieldsTest : public testing::TestWithParam { protected: DeprecatedFieldsTest() : with_upgrade_(GetParam()), api_(Api::createApiForTest(store_)), - runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")) { + runtime_deprecated_feature_use_(store_.counter("runtime.deprecated_feature_use")), + deprecated_feature_seen_since_process_start_( + store_.gauge("runtime.deprecated_feature_seen_since_process_start", + Stats::Gauge::ImportMode::NeverImport)) { envoy::config::bootstrap::v3::LayeredRuntime config; config.add_layers()->mutable_admin_layer(); loader_ = std::make_unique(Runtime::LoaderPtr{ @@ -1424,6 +1427,7 @@ class DeprecatedFieldsTest : public testing::TestWithParam { Runtime::MockRandomGenerator rand_; std::unique_ptr loader_; Stats::Counter& runtime_deprecated_feature_use_; + Stats::Gauge& deprecated_feature_seen_since_process_start_; NiceMock local_info_; Init::MockManager init_manager_; NiceMock validation_visitor_; @@ -1446,6 +1450,7 @@ TEST_P(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { // Fatal checks for a non-deprecated field should cause no problem. checkForDeprecation(base); EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(0, deprecated_feature_seen_since_process_start_.value()); } TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) { @@ -1456,6 +1461,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecated)) "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", checkForDeprecation(base)); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); } // Use of a deprecated and disallowed field should result in an exception. From 1aed8d28c29fefeaa531f50ea3e1ff6d41d4fae3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 4 May 2020 14:16:17 -0400 Subject: [PATCH 085/909] connection: refactoring watermark logic (#10997) Commit Message: cleaning up connection watermark logic: intended as a no-op refactor. Additional Description: I think when the connection went from boolean to reference counted, we (probably I) should have replaced the bool with an int instead of extending the bool with an int. IMO it's much easier to reason about this way. Risk Level: Medium (intended as no-op, but watermarks...) Testing: enhanced existing tests. Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/network/connection_impl.cc | 55 ++++++++-------- source/common/network/connection_impl.h | 9 ++- test/common/network/connection_impl_test.cc | 62 ++++++++++++------- test/integration/filters/pause_filter.cc | 4 +- .../filters/random_pause_filter.cc | 4 +- 5 files changed, 75 insertions(+), 59 deletions(-) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index fed4bc8aa320..f44aee154ead 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -48,15 +48,15 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt : ConnectionImplBase(dispatcher, next_global_id_++), transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), stream_info_(stream_info), filter_manager_(*this), - write_buffer_( - dispatcher.getWatermarkFactory().create([this]() -> void { this->onLowWatermark(); }, - [this]() -> void { this->onHighWatermark(); })), - read_enabled_(true), above_high_watermark_(false), detect_early_close_(true), + write_buffer_(dispatcher.getWatermarkFactory().create( + [this]() -> void { this->onWriteBufferLowWatermark(); }, + [this]() -> void { this->onWriteBufferHighWatermark(); })), + write_buffer_above_high_watermark_(false), detect_early_close_(true), enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false), write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false) { // Treat the lack of a valid fd (which in practice only happens if we run out of FDs) as an OOM // condition and just crash. - RELEASE_ASSERT(SOCKET_VALID(ioHandle().fd()), ""); + RELEASE_ASSERT(SOCKET_VALID(ConnectionImpl::ioHandle().fd()), ""); if (!connected) { connecting_ = true; @@ -71,8 +71,8 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt // We never ask for both early close and read at the same time. If we are reading, we want to // consume all available data. file_event_ = dispatcher_.createFileEvent( - ioHandle().fd(), [this](uint32_t events) -> void { onFileEvent(events); }, trigger, - Event::FileReadyType::Read | Event::FileReadyType::Write); + ConnectionImpl::ioHandle().fd(), [this](uint32_t events) -> void { onFileEvent(events); }, + trigger, Event::FileReadyType::Read | Event::FileReadyType::Write); transport_socket_->setTransportSocketCallbacks(*this); } @@ -268,7 +268,7 @@ void ConnectionImpl::noDelay(bool enable) { } void ConnectionImpl::onRead(uint64_t read_buffer_size) { - if (!read_enabled_ || inDelayedClose()) { + if (read_disable_count_ != 0 || inDelayedClose()) { return; } ASSERT(ioHandle().isOpen()); @@ -301,7 +301,7 @@ void ConnectionImpl::enableHalfClose(bool enabled) { // This code doesn't correctly ensure that EV_CLOSE isn't set if reading is disabled // when enabling half-close. This could be fixed, but isn't needed right now, so just // ASSERT that it doesn't happen. - ASSERT(!enabled || read_enabled_); + ASSERT(!enabled || read_disable_count_ == 0); enable_half_close_ = enabled; } @@ -311,8 +311,8 @@ void ConnectionImpl::readDisable(bool disable) { ASSERT(state() == State::Open); ASSERT(file_event_ != nullptr); - ENVOY_CONN_LOG(trace, "readDisable: enabled={} disable={} state={}", *this, read_enabled_, - disable, static_cast(state())); + ENVOY_CONN_LOG(trace, "readDisable: enabled={} disable_count={} state={}", *this, + read_disable_count_, disable, static_cast(state())); // When we disable reads, we still allow for early close notifications (the equivalent of // EPOLLRDHUP for an epoll backend). For backends that support it, this allows us to apply @@ -322,17 +322,16 @@ void ConnectionImpl::readDisable(bool disable) { // closed TCP connections in the sense that we assume that a remote FIN means the remote intends a // full close. if (disable) { - if (!read_enabled_) { - ++read_disable_count_; - return; - } - ASSERT(read_enabled_); - read_enabled_ = false; + ++read_disable_count_; if (state() != State::Open || file_event_ == nullptr) { // If readDisable is called on a closed connection, do not crash. return; } + if (read_disable_count_ > 1) { + // The socket has already been read disabled. + return; + } // If half-close semantics are enabled, we never want early close notifications; we // always want to read all available data, even if the other side has closed. @@ -342,13 +341,11 @@ void ConnectionImpl::readDisable(bool disable) { file_event_->setEnabled(Event::FileReadyType::Write); } } else { - if (read_disable_count_ > 0) { - --read_disable_count_; + --read_disable_count_; + if (read_disable_count_ != 0) { + // The socket should stay disabled. return; } - ASSERT(!read_enabled_); - read_enabled_ = true; - if (state() != State::Open || file_event_ == nullptr) { // If readDisable is called on a closed connection, do not crash. return; @@ -383,7 +380,7 @@ bool ConnectionImpl::readEnabled() const { // Calls to readEnabled on a closed socket are considered to be an error. ASSERT(state() == State::Open); ASSERT(file_event_ != nullptr); - return read_enabled_; + return read_disable_count_ == 0; } void ConnectionImpl::addBytesSentCallback(BytesSentCb cb) { @@ -471,19 +468,19 @@ void ConnectionImpl::setBufferLimits(uint32_t limit) { } } -void ConnectionImpl::onLowWatermark() { +void ConnectionImpl::onWriteBufferLowWatermark() { ENVOY_CONN_LOG(debug, "onBelowWriteBufferLowWatermark", *this); - ASSERT(above_high_watermark_); - above_high_watermark_ = false; + ASSERT(write_buffer_above_high_watermark_); + write_buffer_above_high_watermark_ = false; for (ConnectionCallbacks* callback : callbacks_) { callback->onBelowWriteBufferLowWatermark(); } } -void ConnectionImpl::onHighWatermark() { +void ConnectionImpl::onWriteBufferHighWatermark() { ENVOY_CONN_LOG(debug, "onAboveWriteBufferHighWatermark", *this); - ASSERT(!above_high_watermark_); - above_high_watermark_ = true; + ASSERT(!write_buffer_above_high_watermark_); + write_buffer_above_high_watermark_ = true; for (ConnectionCallbacks* callback : callbacks_) { callback->onAboveWriteBufferHighWatermark(); } diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 26d04eae9348..6e8c1eb65518 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -82,7 +82,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback void setBufferLimits(uint32_t limit) override; uint32_t bufferLimit() const override { return read_buffer_limit_; } bool localAddressRestored() const override { return socket_->localAddressRestored(); } - bool aboveHighWatermark() const override { return above_high_watermark_; } + bool aboveHighWatermark() const override { return write_buffer_above_high_watermark_; } const ConnectionSocket::OptionsSharedPtr& socketOptions() const override { return socket_->options(); } @@ -127,8 +127,8 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback void closeSocket(ConnectionEvent close_type); - void onLowWatermark(); - void onHighWatermark(); + void onWriteBufferLowWatermark(); + void onWriteBufferHighWatermark(); TransportSocketPtr transport_socket_; ConnectionSocketPtr socket_; @@ -174,8 +174,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback uint64_t last_write_buffer_size_{}; Buffer::Instance* current_write_buffer_{}; uint32_t read_disable_count_{0}; - bool read_enabled_ : 1; - bool above_high_watermark_ : 1; + bool write_buffer_above_high_watermark_ : 1; bool detect_early_close_ : 1; bool enable_half_close_ : 1; bool read_end_stream_raised_ : 1; diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index ba7c7092c7de..fbeb06519b09 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -90,7 +90,7 @@ TEST_P(ConnectionImplDeathTest, BadFd) { ConnectionImpl(*dispatcher, std::make_unique(std::move(io_handle), nullptr, nullptr), Network::Test::createRawBufferSocket(), stream_info, false), - ".*assert failure: SOCKET_VALID\\(ioHandle\\(\\)\\.fd\\(\\)\\).*"); + ".*assert failure: SOCKET_VALID\\(ConnectionImpl::ioHandle\\(\\)\\.fd\\(\\)\\).*"); } class ConnectionImplTest : public testing::TestWithParam { @@ -188,7 +188,7 @@ class ConnectionImplTest : public testing::TestWithParam { Event::FileReadyCb* file_ready_cb_; }; - ConnectionMocks createConnectionMocks() { + ConnectionMocks createConnectionMocks(bool create_timer = true) { auto dispatcher = std::make_unique>(); EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _)) .WillRepeatedly(Invoke([](std::function below_low, @@ -198,9 +198,12 @@ class ConnectionImplTest : public testing::TestWithParam { return new Buffer::WatermarkBuffer(below_low, above_high); })); - // This timer will be returned (transferring ownership) to the ConnectionImpl when createTimer() - // is called to allocate the delayed close timer. - Event::MockTimer* timer = new Event::MockTimer(dispatcher.get()); + Event::MockTimer* timer = nullptr; + if (create_timer) { + // This timer will be returned (transferring ownership) to the ConnectionImpl when + // createTimer() is called to allocate the delayed close timer. + timer = new Event::MockTimer(dispatcher.get()); + } NiceMock* file_event = new NiceMock; EXPECT_CALL(*dispatcher, createFileEvent_(0, _, _, _)) @@ -482,24 +485,41 @@ TEST_P(ConnectionImplTest, ConnectionStats) { // Ensure the new counter logic in ReadDisable avoids tripping asserts in ReadDisable guarding // against actual enabling twice in a row. TEST_P(ConnectionImplTest, ReadDisable) { - setUpBasicConnection(); - - client_connection_->readDisable(true); - client_connection_->readDisable(false); - - client_connection_->readDisable(true); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(false); + ConnectionMocks mocks = createConnectionMocks(false); + IoHandlePtr io_handle = std::make_unique(0); + auto connection = std::make_unique( + *mocks.dispatcher_, + std::make_unique(std::move(io_handle), nullptr, nullptr), + std::move(mocks.transport_socket_), stream_info_, true); - client_connection_->readDisable(true); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(true); - client_connection_->readDisable(false); - client_connection_->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); + + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); + + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(true); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)).Times(0); + connection->readDisable(false); + EXPECT_CALL(*mocks.file_event_, setEnabled(_)); + connection->readDisable(false); - disconnect(false); + connection->close(ConnectionCloseType::NoFlush); } // The HTTP/1 codec handles pipelined connections by relying on readDisable(false) resulting in the diff --git a/test/integration/filters/pause_filter.cc b/test/integration/filters/pause_filter.cc index 45eaeefbb593..b7f9aa1a3c36 100644 --- a/test/integration/filters/pause_filter.cc +++ b/test/integration/filters/pause_filter.cc @@ -30,7 +30,7 @@ class TestPauseFilter : public Http::PassThroughFilter { // If this is the second stream to decode headers and we're at high watermark. force low // watermark state if (number_of_decode_calls_ref_ == 2 && connection()->aboveHighWatermark()) { - connection()->onLowWatermark(); + connection()->onWriteBufferLowWatermark(); } } return PassThroughFilter::decodeData(buf, end_stream); @@ -43,7 +43,7 @@ class TestPauseFilter : public Http::PassThroughFilter { // If this is the first stream to encode headers and we're not at high watermark, force high // watermark state. if (number_of_encode_calls_ref_ == 1 && !connection()->aboveHighWatermark()) { - connection()->onHighWatermark(); + connection()->onWriteBufferHighWatermark(); } } return PassThroughFilter::encodeData(buf, end_stream); diff --git a/test/integration/filters/random_pause_filter.cc b/test/integration/filters/random_pause_filter.cc index 6b883bef5341..e1a370ca015c 100644 --- a/test/integration/filters/random_pause_filter.cc +++ b/test/integration/filters/random_pause_filter.cc @@ -25,9 +25,9 @@ class RandomPauseFilter : public Http::PassThroughFilter { // Roughly every 5th encode (5 being arbitrary) swap the watermark state. if (random % 5 == 0) { if (connection()->aboveHighWatermark()) { - connection()->onLowWatermark(); + connection()->onWriteBufferLowWatermark(); } else { - connection()->onHighWatermark(); + connection()->onWriteBufferHighWatermark(); } } return Http::PassThroughFilter::encodeData(buf, end_stream); From fd67664afb5b45374701ec873a9f467e9b943dd3 Mon Sep 17 00:00:00 2001 From: rulex123 <29862113+rulex123@users.noreply.github.com> Date: Mon, 4 May 2020 20:30:02 +0200 Subject: [PATCH 086/909] [admin]: extract runtime and listeners handlers to separate files (#10872) extract runtime and listeners handlers from admin.h|cc and into separate classes (part of #5505 ) Signed-off-by: Erica Manno --- source/server/http/BUILD | 36 ++++- source/server/http/admin.cc | 163 ++--------------------- source/server/http/admin.h | 24 +--- source/server/http/listeners_handler.cc | 61 +++++++++ source/server/http/listeners_handler.h | 38 ++++++ source/server/http/runtime_handler.cc | 120 +++++++++++++++++ source/server/http/runtime_handler.h | 33 +++++ test/server/http/BUILD | 6 + test/server/http/admin_test.cc | 109 --------------- test/server/http/runtime_handler_test.cc | 119 +++++++++++++++++ 10 files changed, 425 insertions(+), 284 deletions(-) create mode 100644 source/server/http/listeners_handler.cc create mode 100644 source/server/http/listeners_handler.h create mode 100644 source/server/http/runtime_handler.cc create mode 100644 source/server/http/runtime_handler.h create mode 100644 test/server/http/runtime_handler_test.cc diff --git a/source/server/http/BUILD b/source/server/http/BUILD index e849e1b2bfb8..2e4d47c4c694 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -15,6 +15,8 @@ envoy_cc_library( deps = [ ":admin_filter_lib", ":config_tracker_lib", + ":listeners_handler_lib", + ":runtime_handler_lib", ":stats_handler_lib", ":utils_lib", "//include/envoy/filesystem:filesystem_interface", @@ -22,7 +24,6 @@ envoy_cc_library( "//include/envoy/http:request_id_extension_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", - "//include/envoy/runtime:runtime_interface", "//include/envoy/server:admin_interface", "//include/envoy/server:hot_restart_interface", "//include/envoy/server:instance_interface", @@ -123,6 +124,39 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "listeners_handler_lib", + srcs = ["listeners_handler.cc"], + hdrs = ["listeners_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "runtime_handler_lib", + srcs = ["runtime_handler.cc"], + hdrs = ["runtime_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + envoy_cc_library( name = "utils_lib", srcs = ["utils.cc"], diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index a9bcab103f4c..d2a0bd87c317 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -12,14 +11,12 @@ #include "envoy/admin/v3/certs.pb.h" #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/admin/v3/listeners.pb.h" #include "envoy/admin/v3/memory.pb.h" #include "envoy/admin/v3/metrics.pb.h" #include "envoy/admin/v3/mutex_stats.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/filesystem/filesystem.h" -#include "envoy/runtime/runtime.h" #include "envoy/server/hot_restart.h" #include "envoy/server/instance.h" #include "envoy/server/options.h" @@ -487,24 +484,6 @@ void AdminImpl::writeClustersAsText(Buffer::Instance& response) { } } -void AdminImpl::writeListenersAsJson(Buffer::Instance& response) { - envoy::admin::v3::Listeners listeners; - for (const auto& listener : server_.listenerManager().listeners()) { - envoy::admin::v3::ListenerStatus& listener_status = *listeners.add_listener_statuses(); - listener_status.set_name(listener.get().name()); - Network::Utility::addressToProtobufAddress(*listener.get().listenSocketFactory().localAddress(), - *listener_status.mutable_local_address()); - } - response.add(MessageUtil::getJsonStringFromMessage(listeners, true)); // pretty-print -} - -void AdminImpl::writeListenersAsText(Buffer::Instance& response) { - for (const auto& listener : server_.listenerManager().listeners()) { - response.add(fmt::format("{}::{}\n", listener.get().name(), - listener.get().listenSocketFactory().localAddress()->asString())); - } -} - Http::Code AdminImpl::handlerClusters(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { @@ -756,17 +735,6 @@ Http::Code AdminImpl::handlerMemory(absl::string_view, Http::ResponseHeaderMap& return Http::Code::OK; } -Http::Code AdminImpl::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - ListenerManager::StopListenersType stop_listeners_type = - params.find("inboundonly") != params.end() ? ListenerManager::StopListenersType::InboundOnly - : ListenerManager::StopListenersType::All; - server_.listenerManager().stopListeners(stop_listeners_type); - response.add("OK\n"); - return Http::Code::OK; -} - Http::Code AdminImpl::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers, Buffer::Instance& response, AdminStream&) { const std::time_t current_time = @@ -811,21 +779,6 @@ Http::Code AdminImpl::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeade return Http::Code::OK; } -Http::Code AdminImpl::handlerListenerInfo(absl::string_view url, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - const auto format_value = Utility::formatParam(query_params); - - if (format_value.has_value() && format_value.value() == "json") { - writeListenersAsJson(response); - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - } else { - writeListenersAsText(response); - } - return Http::Code::OK; -} - Http::Code AdminImpl::handlerCerts(absl::string_view, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc @@ -847,106 +800,6 @@ Http::Code AdminImpl::handlerCerts(absl::string_view, Http::ResponseHeaderMap& r return Http::Code::OK; } -Http::Code AdminImpl::handlerRuntime(absl::string_view url, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - - // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON. - const auto& layers = server_.runtime().snapshot().getLayers(); - - std::vector layer_names; - layer_names.reserve(layers.size()); - std::map> entries; - for (const auto& layer : layers) { - layer_names.push_back(ValueUtil::stringValue(layer->name())); - for (const auto& value : layer->values()) { - const auto found = entries.find(value.first); - if (found == entries.end()) { - entries.emplace(value.first, std::vector{}); - } - } - } - - for (const auto& layer : layers) { - for (auto& entry : entries) { - const auto found = layer->values().find(entry.first); - const auto& entry_value = - found == layer->values().end() ? EMPTY_STRING : found->second.raw_string_value_; - entry.second.push_back(entry_value); - } - } - - ProtobufWkt::Struct layer_entries; - auto* layer_entry_fields = layer_entries.mutable_fields(); - for (const auto& entry : entries) { - std::vector layer_entry_values; - layer_entry_values.reserve(entry.second.size()); - std::string final_value; - for (const auto& value : entry.second) { - if (!value.empty()) { - final_value = value; - } - layer_entry_values.push_back(ValueUtil::stringValue(value)); - } - - ProtobufWkt::Struct layer_entry_value; - auto* layer_entry_value_fields = layer_entry_value.mutable_fields(); - - (*layer_entry_value_fields)["final_value"] = ValueUtil::stringValue(final_value); - (*layer_entry_value_fields)["layer_values"] = ValueUtil::listValue(layer_entry_values); - (*layer_entry_fields)[entry.first] = ValueUtil::structValue(layer_entry_value); - } - - ProtobufWkt::Struct runtime; - auto* fields = runtime.mutable_fields(); - - (*fields)["layers"] = ValueUtil::listValue(layer_names); - (*fields)["entries"] = ValueUtil::structValue(layer_entries); - - response.add(MessageUtil::getJsonStringFromMessage(runtime, true, true)); - return Http::Code::OK; -} - -bool AdminImpl::isFormUrlEncoded(const Http::HeaderEntry* content_type) const { - if (content_type == nullptr) { - return false; - } - - return content_type->value().getStringView() == - Http::Headers::get().ContentTypeValues.FormUrlEncoded; -} - -Http::Code AdminImpl::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream& admin_stream) { - Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); - if (params.empty()) { - // Check if the params are in the request's body. - if (admin_stream.getRequestBody() != nullptr && - isFormUrlEncoded(admin_stream.getRequestHeaders().ContentType())) { - params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString()); - } - - if (params.empty()) { - response.add("usage: /runtime_modify?key1=value1&key2=value2&keyN=valueN\n"); - response.add(" or send the parameters as form values\n"); - response.add("use an empty value to remove a previously added override"); - return Http::Code::BadRequest; - } - } - std::unordered_map overrides; - overrides.insert(params.begin(), params.end()); - try { - server_.runtime().mergeValues(overrides); - } catch (const EnvoyException& e) { - response.add(e.what()); - return Http::Code::ServiceUnavailable; - } - response.add("OK\n"); - return Http::Code::OK; -} - Http::Code AdminImpl::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&) { server_.accessLogManager().reopen(); @@ -992,6 +845,7 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), route_config_provider_(server.timeSource()), scoped_route_config_provider_(server.timeSource()), stats_handler_(server), + runtime_handler_(server), listeners_handler_(server), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, @@ -1022,8 +876,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) true}, {"/reset_counters", "reset all counters to zero", MAKE_ADMIN_HANDLER(stats_handler_.handlerResetCounters), false, true}, - {"/drain_listeners", "drain listeners", MAKE_ADMIN_HANDLER(handlerDrainListeners), false, - true}, + {"/drain_listeners", "drain listeners", + MAKE_ADMIN_HANDLER(listeners_handler_.handlerDrainListeners), false, true}, {"/server_info", "print server version/status information", MAKE_ADMIN_HANDLER(handlerServerInfo), false, false}, {"/ready", "print server state, return 200 if LIVE, otherwise return 503", @@ -1040,11 +894,12 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsDisable), false, true}, {"/stats/recentlookups/enable", "enable recording of reset stat-name lookup names", MAKE_ADMIN_HANDLER(stats_handler_.handlerStatsRecentLookupsEnable), false, true}, - {"/listeners", "print listener info", MAKE_ADMIN_HANDLER(handlerListenerInfo), false, - false}, - {"/runtime", "print runtime values", MAKE_ADMIN_HANDLER(handlerRuntime), false, false}, - {"/runtime_modify", "modify runtime values", MAKE_ADMIN_HANDLER(handlerRuntimeModify), - false, true}, + {"/listeners", "print listener info", + MAKE_ADMIN_HANDLER(listeners_handler_.handlerListenerInfo), false, false}, + {"/runtime", "print runtime values", MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntime), + false, false}, + {"/runtime_modify", "modify runtime values", + MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntimeModify), false, true}, {"/reopen_logs", "reopen access logs", MAKE_ADMIN_HANDLER(handlerReopenLogs), false, true}, }, diff --git a/source/server/http/admin.h b/source/server/http/admin.h index ca80e8b5bc16..15946837b3ff 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -17,7 +17,6 @@ #include "envoy/http/request_id_extension.h" #include "envoy/network/filter.h" #include "envoy/network/listen_socket.h" -#include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" #include "envoy/server/instance.h" #include "envoy/server/listener_manager.h" @@ -39,6 +38,8 @@ #include "server/http/admin_filter.h" #include "server/http/config_tracker_impl.h" +#include "server/http/listeners_handler.h" +#include "server/http/runtime_handler.h" #include "server/http/stats_handler.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -253,12 +254,6 @@ class AdminImpl : public Admin, void writeClustersAsJson(Buffer::Instance& response); void writeClustersAsText(Buffer::Instance& response); - /** - * Helper methods for the /listeners url handler. - */ - void writeListenersAsJson(Buffer::Instance& response); - void writeListenersAsText(Buffer::Instance& response); - /** * Helper methods for the /config_dump url handler. */ @@ -311,9 +306,6 @@ class AdminImpl : public Admin, Http::Code handlerHotRestartVersion(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerListenerInfo(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); Http::Code handlerLogging(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); @@ -324,25 +316,15 @@ class AdminImpl : public Admin, Http::Code handlerQuitQuitQuit(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerDrainListeners(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); Http::Code handlerServerInfo(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); Http::Code handlerReady(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerRuntime(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerRuntimeModify(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); Http::Code handlerReopenLogs(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - bool isFormUrlEncoded(const Http::HeaderEntry* content_type) const; class AdminListenSocketFactory : public Network::ListenSocketFactory { public: @@ -443,6 +425,8 @@ class AdminImpl : public Admin, NullRouteConfigProvider route_config_provider_; NullScopedRouteConfigProvider scoped_route_config_provider_; Server::StatsHandler stats_handler_; + Server::RuntimeHandler runtime_handler_; + Server::ListenersHandler listeners_handler_; std::list handlers_; const uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; const uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; diff --git a/source/server/http/listeners_handler.cc b/source/server/http/listeners_handler.cc new file mode 100644 index 000000000000..7751aa78cf82 --- /dev/null +++ b/source/server/http/listeners_handler.cc @@ -0,0 +1,61 @@ +#include "server/http/listeners_handler.h" + +#include "envoy/admin/v3/listeners.pb.h" + +#include "common/http/headers.h" +#include "common/http/utility.h" +#include "common/network/utility.h" + +#include "server/http/utils.h" + +namespace Envoy { +namespace Server { + +ListenersHandler::ListenersHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ListenersHandler::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + ListenerManager::StopListenersType stop_listeners_type = + params.find("inboundonly") != params.end() ? ListenerManager::StopListenersType::InboundOnly + : ListenerManager::StopListenersType::All; + server_.listenerManager().stopListeners(stop_listeners_type); + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ListenersHandler::handlerListenerInfo(absl::string_view url, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + const auto format_value = Utility::formatParam(query_params); + + if (format_value.has_value() && format_value.value() == "json") { + writeListenersAsJson(response); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + } else { + writeListenersAsText(response); + } + return Http::Code::OK; +} + +void ListenersHandler::writeListenersAsJson(Buffer::Instance& response) { + envoy::admin::v3::Listeners listeners; + for (const auto& listener : server_.listenerManager().listeners()) { + envoy::admin::v3::ListenerStatus& listener_status = *listeners.add_listener_statuses(); + listener_status.set_name(listener.get().name()); + Network::Utility::addressToProtobufAddress(*listener.get().listenSocketFactory().localAddress(), + *listener_status.mutable_local_address()); + } + response.add(MessageUtil::getJsonStringFromMessage(listeners, true)); // pretty-print +} + +void ListenersHandler::writeListenersAsText(Buffer::Instance& response) { + for (const auto& listener : server_.listenerManager().listeners()) { + response.add(fmt::format("{}::{}\n", listener.get().name(), + listener.get().listenSocketFactory().localAddress()->asString())); + } +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/listeners_handler.h b/source/server/http/listeners_handler.h new file mode 100644 index 000000000000..b16dada0eae7 --- /dev/null +++ b/source/server/http/listeners_handler.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/http/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ListenersHandler : public HandlerContextBase { + +public: + ListenersHandler(Server::Instance& server); + + Http::Code handlerDrainListeners(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerListenerInfo(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + /** + * Helper methods for the /listeners url handler. + */ + void writeListenersAsJson(Buffer::Instance& response); + void writeListenersAsText(Buffer::Instance& response); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/runtime_handler.cc b/source/server/http/runtime_handler.cc new file mode 100644 index 000000000000..7f7f58f20d05 --- /dev/null +++ b/source/server/http/runtime_handler.cc @@ -0,0 +1,120 @@ +#include "server/http/runtime_handler.h" + +#include +#include +#include + +#include "common/common/empty_string.h" +#include "common/http/headers.h" +#include "common/http/utility.h" + +#include "server/http/utils.h" + +namespace Envoy { +namespace Server { + +RuntimeHandler::RuntimeHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code RuntimeHandler::handlerRuntime(absl::string_view url, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + + // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON. + const auto& layers = server_.runtime().snapshot().getLayers(); + + std::vector layer_names; + layer_names.reserve(layers.size()); + std::map> entries; + for (const auto& layer : layers) { + layer_names.push_back(ValueUtil::stringValue(layer->name())); + for (const auto& value : layer->values()) { + const auto found = entries.find(value.first); + if (found == entries.end()) { + entries.emplace(value.first, std::vector{}); + } + } + } + + for (const auto& layer : layers) { + for (auto& entry : entries) { + const auto found = layer->values().find(entry.first); + const auto& entry_value = + found == layer->values().end() ? EMPTY_STRING : found->second.raw_string_value_; + entry.second.push_back(entry_value); + } + } + + ProtobufWkt::Struct layer_entries; + auto* layer_entry_fields = layer_entries.mutable_fields(); + for (const auto& entry : entries) { + std::vector layer_entry_values; + layer_entry_values.reserve(entry.second.size()); + std::string final_value; + for (const auto& value : entry.second) { + if (!value.empty()) { + final_value = value; + } + layer_entry_values.push_back(ValueUtil::stringValue(value)); + } + + ProtobufWkt::Struct layer_entry_value; + auto* layer_entry_value_fields = layer_entry_value.mutable_fields(); + + (*layer_entry_value_fields)["final_value"] = ValueUtil::stringValue(final_value); + (*layer_entry_value_fields)["layer_values"] = ValueUtil::listValue(layer_entry_values); + (*layer_entry_fields)[entry.first] = ValueUtil::structValue(layer_entry_value); + } + + ProtobufWkt::Struct runtime; + auto* fields = runtime.mutable_fields(); + + (*fields)["layers"] = ValueUtil::listValue(layer_names); + (*fields)["entries"] = ValueUtil::structValue(layer_entries); + + response.add(MessageUtil::getJsonStringFromMessage(runtime, true, true)); + return Http::Code::OK; +} + +Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, + AdminStream& admin_stream) { + Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + if (params.empty()) { + // Check if the params are in the request's body. + if (admin_stream.getRequestBody() != nullptr && + isFormUrlEncoded(admin_stream.getRequestHeaders().ContentType())) { + params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString()); + } + + if (params.empty()) { + response.add("usage: /runtime_modify?key1=value1&key2=value2&keyN=valueN\n"); + response.add(" or send the parameters as form values\n"); + response.add("use an empty value to remove a previously added override"); + return Http::Code::BadRequest; + } + } + std::unordered_map overrides; + overrides.insert(params.begin(), params.end()); + try { + server_.runtime().mergeValues(overrides); + } catch (const EnvoyException& e) { + response.add(e.what()); + return Http::Code::ServiceUnavailable; + } + response.add("OK\n"); + return Http::Code::OK; +} + +bool RuntimeHandler::isFormUrlEncoded(const Http::HeaderEntry* content_type) { + if (content_type == nullptr) { + return false; + } + + return content_type->value().getStringView() == + Http::Headers::get().ContentTypeValues.FormUrlEncoded; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/runtime_handler.h b/source/server/http/runtime_handler.h new file mode 100644 index 000000000000..0afc3250a2fe --- /dev/null +++ b/source/server/http/runtime_handler.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/http/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class RuntimeHandler : public HandlerContextBase { + +public: + RuntimeHandler(Server::Instance& server); + + Http::Code handlerRuntime(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + Http::Code handlerRuntimeModify(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + bool isFormUrlEncoded(const Http::HeaderEntry* content_type); +}; + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/BUILD b/test/server/http/BUILD index 9a706a07cc6b..d2f1d5c03018 100644 --- a/test/server/http/BUILD +++ b/test/server/http/BUILD @@ -73,6 +73,12 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "runtime_handler_test", + srcs = ["runtime_handler_test.cc"], + deps = [":admin_instance_lib"], +) + envoy_cc_test( name = "prometheus_stats_test", srcs = ["prometheus_stats_test.cc"], diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 6c3dcf15e3f9..3368e6d200bf 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -11,7 +11,6 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/json/json_object.h" -#include "envoy/runtime/runtime.h" #include "common/http/message_impl.h" #include "common/json/json_loader.h" @@ -498,114 +497,6 @@ TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { EXPECT_EQ(expected_empty_json, response.toString()); } -TEST_P(AdminInstanceTest, Runtime) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - Runtime::MockSnapshot snapshot; - Runtime::MockLoader loader; - auto layer1 = std::make_unique>(); - auto layer2 = std::make_unique>(); - Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}, {}}}, - {"extra_key", {"bar", {}, {}, {}, {}}}}; - Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}, {}}}, - {"int_key", {"1", 1, {}, {}, {}}}, - {"other_key", {"bar", {}, {}, {}, {}}}}; - - ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); - ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); - ON_CALL(*layer2, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer2"})); - ON_CALL(*layer2, values()).WillByDefault(testing::ReturnRef(entries2)); - - std::vector layers; - layers.push_back(std::move(layer1)); - layers.push_back(std::move(layer2)); - EXPECT_CALL(snapshot, getLayers()).WillRepeatedly(testing::ReturnRef(layers)); - - const std::string expected_json = R"EOF({ - "layers": [ - "layer1", - "layer2" - ], - "entries": { - "extra_key": { - "layer_values": [ - "", - "bar" - ], - "final_value": "bar" - }, - "int_key": { - "layer_values": [ - "1", - "" - ], - "final_value": "1" - }, - "other_key": { - "layer_values": [ - "bar", - "" - ], - "final_value": "bar" - }, - "string_key": { - "layer_values": [ - "foo", - "override" - ], - "final_value": "override" - } - } -})EOF"; - - EXPECT_CALL(loader, snapshot()).WillRepeatedly(testing::ReturnPointee(&snapshot)); - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - EXPECT_EQ(Http::Code::OK, getCallback("/runtime", header_map, response)); - EXPECT_THAT(expected_json, JsonStringEq(response.toString())); -} - -TEST_P(AdminInstanceTest, RuntimeModify) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - Runtime::MockLoader loader; - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - - std::unordered_map overrides; - overrides["foo"] = "bar"; - overrides["x"] = "42"; - overrides["nothing"] = ""; - EXPECT_CALL(loader, mergeValues(overrides)).Times(1); - EXPECT_EQ(Http::Code::OK, - postCallback("/runtime_modify?foo=bar&x=42¬hing=", header_map, response)); - EXPECT_EQ("OK\n", response.toString()); -} - -TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { - Runtime::MockLoader loader; - EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - - const std::string key = "routing.traffic_shift.foo"; - const std::string value = "numerator: 1\ndenominator: TEN_THOUSAND\n"; - const std::unordered_map overrides = {{key, value}}; - EXPECT_CALL(loader, mergeValues(overrides)).Times(1); - - const std::string body = fmt::format("{}={}", key, value); - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - EXPECT_EQ(Http::Code::OK, runCallback("/runtime_modify", header_map, response, "POST", body)); - EXPECT_EQ("OK\n", response.toString()); -} - -TEST_P(AdminInstanceTest, RuntimeModifyNoArguments) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - EXPECT_EQ(Http::Code::BadRequest, postCallback("/runtime_modify", header_map, response)); - EXPECT_TRUE(absl::StartsWith(response.toString(), "usage:")); -} - TEST_P(AdminInstanceTest, ReopenLogs) { Http::ResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; diff --git a/test/server/http/runtime_handler_test.cc b/test/server/http/runtime_handler_test.cc new file mode 100644 index 000000000000..c9a7e7b2937e --- /dev/null +++ b/test/server/http/runtime_handler_test.cc @@ -0,0 +1,119 @@ +#include "test/server/http/admin_instance.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, Runtime) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + Runtime::MockSnapshot snapshot; + Runtime::MockLoader loader; + auto layer1 = std::make_unique>(); + auto layer2 = std::make_unique>(); + Runtime::Snapshot::EntryMap entries2{{"string_key", {"override", {}, {}, {}, {}}}, + {"extra_key", {"bar", {}, {}, {}, {}}}}; + Runtime::Snapshot::EntryMap entries1{{"string_key", {"foo", {}, {}, {}, {}}}, + {"int_key", {"1", 1, {}, {}, {}}}, + {"other_key", {"bar", {}, {}, {}, {}}}}; + + ON_CALL(*layer1, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer1"})); + ON_CALL(*layer1, values()).WillByDefault(testing::ReturnRef(entries1)); + ON_CALL(*layer2, name()).WillByDefault(testing::ReturnRefOfCopy(std::string{"layer2"})); + ON_CALL(*layer2, values()).WillByDefault(testing::ReturnRef(entries2)); + + std::vector layers; + layers.push_back(std::move(layer1)); + layers.push_back(std::move(layer2)); + EXPECT_CALL(snapshot, getLayers()).WillRepeatedly(testing::ReturnRef(layers)); + + const std::string expected_json = R"EOF({ + "layers": [ + "layer1", + "layer2" + ], + "entries": { + "extra_key": { + "layer_values": [ + "", + "bar" + ], + "final_value": "bar" + }, + "int_key": { + "layer_values": [ + "1", + "" + ], + "final_value": "1" + }, + "other_key": { + "layer_values": [ + "bar", + "" + ], + "final_value": "bar" + }, + "string_key": { + "layer_values": [ + "foo", + "override" + ], + "final_value": "override" + } + } +})EOF"; + + EXPECT_CALL(loader, snapshot()).WillRepeatedly(testing::ReturnPointee(&snapshot)); + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + EXPECT_EQ(Http::Code::OK, getCallback("/runtime", header_map, response)); + EXPECT_THAT(expected_json, JsonStringEq(response.toString())); +} + +TEST_P(AdminInstanceTest, RuntimeModify) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + Runtime::MockLoader loader; + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + + std::unordered_map overrides; + overrides["foo"] = "bar"; + overrides["x"] = "42"; + overrides["nothing"] = ""; + EXPECT_CALL(loader, mergeValues(overrides)).Times(1); + EXPECT_EQ(Http::Code::OK, + postCallback("/runtime_modify?foo=bar&x=42¬hing=", header_map, response)); + EXPECT_EQ("OK\n", response.toString()); +} + +TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { + Runtime::MockLoader loader; + EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); + + const std::string key = "routing.traffic_shift.foo"; + const std::string value = "numerator: 1\ndenominator: TEN_THOUSAND\n"; + const std::unordered_map overrides = {{key, value}}; + EXPECT_CALL(loader, mergeValues(overrides)).Times(1); + + const std::string body = fmt::format("{}={}", key, value); + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + EXPECT_EQ(Http::Code::OK, runCallback("/runtime_modify", header_map, response, "POST", body)); + EXPECT_EQ("OK\n", response.toString()); +} + +TEST_P(AdminInstanceTest, RuntimeModifyNoArguments) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + EXPECT_EQ(Http::Code::BadRequest, postCallback("/runtime_modify", header_map, response)); + EXPECT_TRUE(absl::StartsWith(response.toString(), "usage:")); +} + +} // namespace Server +} // namespace Envoy From f2b81c1c5797904a70abf2da23a5164cc76ad843 Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 4 May 2020 14:44:22 -0400 Subject: [PATCH 087/909] [fuzz] fix invalid header related issues in fuzz tests (#10933) * fix invalid header related issues in fuzz tests Signed-off-by: Asra Ali * add comment Signed-off-by: Asra Ali * Add comments, TODO to remove adding host Signed-off-by: Asra Ali * address comments Signed-off-by: Asra Ali --- include/envoy/http/codec.h | 3 +- source/common/http/http1/codec_impl.cc | 3 + .../http/codec_impl_corpus/method_connect | 10 ++ test/common/http/codec_impl_fuzz_test.cc | 11 ++ .../conn_manager_impl_corpus/invalid_host | 100 ++++++++++++++++++ .../http/conn_manager_impl_fuzz_test.cc | 3 +- test/fuzz/utility.h | 20 ++-- 7 files changed, 140 insertions(+), 10 deletions(-) create mode 100644 test/common/http/codec_impl_corpus/method_connect create mode 100644 test/common/http/conn_manager_impl_corpus/invalid_host diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 39e11b195449..24cfdacf49b9 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -85,8 +85,7 @@ class StreamEncoder { class RequestEncoder : public virtual StreamEncoder { public: /** - * Encode headers, optionally indicating end of stream. Response headers must - * have a valid :status set. + * Encode headers, optionally indicating end of stream. * @param headers supplies the header map to encode. * @param end_stream supplies whether this is a header only request. */ diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 348f87324045..b383ad6bd3b8 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -362,6 +362,9 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end bool is_connect = HeaderUtility::isConnect(headers); if (!method || (!path && !is_connect)) { + // TODO(#10878): This exception does not occur during dispatch and would not be triggered under + // normal circumstances since inputs would fail parsing at ingress. Replace with proper error + // handling when exceptions are removed. Include missing host header for CONNECT. throw CodecClientException(":method and :path must be specified"); } if (method->value() == Headers::get().MethodValues.Head) { diff --git a/test/common/http/codec_impl_corpus/method_connect b/test/common/http/codec_impl_corpus/method_connect new file mode 100644 index 000000000000..d3682266d5e7 --- /dev/null +++ b/test/common/http/codec_impl_corpus/method_connect @@ -0,0 +1,10 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "CONNECT" + } + } + } +} \ No newline at end of file diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 33856e11a6ac..c2424db5ade4 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -42,6 +42,17 @@ template T fromSanitizedHeaders(const test::fuzz::Headers& headers) { return Fuzz::fromHeaders(headers, {"transfer-encoding"}); } +// Template specialization for TestRequestHeaderMapImpl to include a Host header. This guards +// against missing host headers in CONNECT requests that would have failed parsing on ingress. +// TODO(#10878): When proper error handling is introduced for non-dispatching codec calls, remove +// this and fail gracefully. +template <> +TestRequestHeaderMapImpl +fromSanitizedHeaders(const test::fuzz::Headers& headers) { + return Fuzz::fromHeaders(headers, {"transfer-encoding"}, + {":authority"}); +} + // Convert from test proto Http1ServerSettings to Http1Settings. Http1Settings fromHttp1Settings(const test::common::http::Http1ServerSettings& settings) { Http1Settings h1_settings; diff --git a/test/common/http/conn_manager_impl_corpus/invalid_host b/test/common/http/conn_manager_impl_corpus/invalid_host new file mode 100644 index 000000000000..7ce2011b9668 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/invalid_host @@ -0,0 +1,100 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "G?T" + } + headers { + key: ":path" + value: "/" + } + headers { + key: "cookie" + value: "http" + } + headers { + key: ":authority" + value: "foo.c/m" + } + headers { + key: ":path" + value: "foo-968957191215689797641957=bar1" + } + } + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + stream_action { + stream_id: 67108864 + request { + data { + size: 67108864 + } + } + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 43db08612d9d..87be21091269 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -523,7 +523,8 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { case test::common::http::Action::kNewStream: { streams.emplace_back(new FuzzStream( conn_manager, config, - Fuzz::fromHeaders(action.new_stream().request_headers()), + Fuzz::fromHeaders(action.new_stream().request_headers(), + /* ignore_headers =*/{}, {":authority"}), action.new_stream().status(), action.new_stream().end_stream())); break; } diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index ac1354648b64..bb9f6020a0e1 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -12,6 +12,8 @@ #include "test/mocks/upstream/host.h" #include "test/test_common/utility.h" +#include "nghttp2/nghttp2.h" + // Strong assertion that applies across all compilation modes and doesn't rely // on gtest, which only provides soft fails that don't trip oss-fuzz failures. #define FUZZ_ASSERT(x) RELEASE_ASSERT(x, "") @@ -49,13 +51,11 @@ inline std::string replaceInvalidCharacters(absl::string_view string) { inline std::string replaceInvalidHostCharacters(absl::string_view string) { std::string filtered; filtered.reserve(string.length()); - for (const char& c : string) { - switch (c) { - case ' ': + for (const uint8_t* c = reinterpret_cast(string.data()); *c; ++c) { + if (nghttp2_check_authority(c, 1)) { + filtered.push_back(*c); + } else { filtered.push_back('0'); - break; - default: - filtered.push_back(c); } } return filtered; @@ -83,12 +83,18 @@ replaceInvalidStringValues(const envoy::config::core::v3::Metadata& upstream_met template inline T fromHeaders( const test::fuzz::Headers& headers, - const std::unordered_set& ignore_headers = std::unordered_set()) { + const std::unordered_set& ignore_headers = std::unordered_set(), + std::unordered_set include_headers = std::unordered_set()) { T header_map; for (const auto& header : headers.headers()) { if (ignore_headers.find(absl::AsciiStrToLower(header.key())) == ignore_headers.end()) { header_map.addCopy(header.key(), header.value()); } + include_headers.erase(absl::AsciiStrToLower(header.key())); + } + // Add dummy headers for non-present headers that must be included. + for (const auto& header : include_headers) { + header_map.addCopy(header, "dummy"); } return header_map; } From 31915818188fab5c095b86aa307a50c3caf68c6a Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 4 May 2020 15:45:12 -0400 Subject: [PATCH 088/909] repokitteh: allow envoyproxy/api-watchers to monitor API changes. (#11043) Signed-off-by: Harvey Tuch --- repokitteh.star | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/repokitteh.star b/repokitteh.star index 79f6bbee624b..e115b2eae20b 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -12,6 +12,10 @@ use( "path": "api/", "label": "api", }, + { + "owner": "envoyproxy/api-watchers", + "path": "api/", + }, ], ) From f64ae9c134794ca29b36e48158718a711d2fd4d3 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Mon, 4 May 2020 13:04:10 -0700 Subject: [PATCH 089/909] Updated documentation and example configs to v3 API (#10644) Signed-off-by: Dmitri Dolguikh --- .../configuration/best_practices/edge.rst | 42 ++++++---- .../best_practices/level_two.rst | 4 +- .../http/http_conn_man/header_casing.rst | 4 +- .../http/http_conn_man/header_sanitizing.rst | 4 +- .../http/http_conn_man/headers.rst | 32 ++++---- .../http/http_conn_man/overview.rst | 4 +- .../configuration/http/http_conn_man/rds.rst | 2 +- .../http/http_conn_man/route_matching.rst | 6 +- .../http/http_conn_man/runtime.rst | 2 +- .../http/http_conn_man/stats.rst | 20 ++--- .../http/http_conn_man/traffic_splitting.rst | 10 +-- .../configuration/http/http_conn_man/vhds.rst | 28 +++---- .../adaptive_concurrency_filter.rst | 10 +-- .../http/http_filters/aws_lambda_filter.rst | 12 +-- .../aws_request_signing_filter.rst | 6 +- .../http/http_filters/buffer_filter.rst | 4 +- .../http/http_filters/cors_filter.rst | 10 +-- .../http/http_filters/csrf_filter.rst | 10 +-- .../dynamic_forward_proxy_filter.rst | 27 ++++--- .../http/http_filters/dynamodb_filter.rst | 4 +- .../http/http_filters/ext_authz_filter.rst | 22 ++--- .../http/http_filters/fault_filter.rst | 48 +++++------ .../http_filters/grpc_http1_bridge_filter.rst | 2 +- .../grpc_http1_reverse_bridge_filter.rst | 24 +++--- .../grpc_json_transcoder_filter.rst | 6 +- .../http/http_filters/grpc_stats_filter.rst | 8 +- .../http/http_filters/grpc_web_filter.rst | 2 +- .../http/http_filters/gzip_filter.rst | 2 +- .../header_to_metadata_filter.rst | 7 +- .../http/http_filters/health_check_filter.rst | 2 +- .../http/http_filters/ip_tagging_filter.rst | 2 +- .../http/http_filters/jwt_authn_filter.rst | 8 +- .../http/http_filters/lua_filter.rst | 4 +- .../http_filters/on_demand_updates_filter.rst | 8 +- .../http/http_filters/original_src_filter.rst | 8 +- .../http/http_filters/rate_limit_filter.rst | 14 ++-- .../http/http_filters/rbac_filter.rst | 6 +- .../http/http_filters/router_filter.rst | 64 +++++++-------- .../http/http_filters/squash_filter.rst | 2 +- .../http/http_filters/tap_filter.rst | 38 ++++----- docs/root/configuration/listeners/lds.rst | 6 +- .../listener_filters/http_inspector.rst | 6 +- .../listener_filters/original_dst_filter.rst | 4 +- .../listener_filters/original_src_filter.rst | 10 ++- .../listener_filters/proxy_protocol.rst | 2 +- .../listener_filters/tls_inspector.rst | 10 +-- .../client_ssl_auth_filter.rst | 2 +- .../direct_response_filter.rst | 2 +- .../network_filters/dubbo_proxy_filter.rst | 6 +- .../listeners/network_filters/echo_filter.rst | 2 +- .../network_filters/ext_authz_filter.rst | 8 +- .../network_filters/kafka_broker_filter.rst | 19 +++-- .../local_rate_limit_filter.rst | 8 +- .../network_filters/mongo_proxy_filter.rst | 6 +- .../network_filters/mysql_proxy_filter.rst | 10 +-- .../network_filters/rate_limit_filter.rst | 4 +- .../listeners/network_filters/rbac_filter.rst | 2 +- .../network_filters/redis_proxy_filter.rst | 4 +- .../network_filters/sni_cluster_filter.rst | 2 +- .../sni_dynamic_forward_proxy_filter.rst | 6 +- .../network_filters/tcp_proxy_filter.rst | 10 +-- .../network_filters/thrift_proxy_filter.rst | 14 ++-- .../zookeeper_proxy_filter.rst | 4 +- .../root/configuration/listeners/overview.rst | 2 +- .../listeners/udp_filters/dns_filter.rst | 2 +- .../listeners/udp_filters/udp_proxy.rst | 4 +- .../observability/access_log/usage.rst | 16 ++-- .../observability/statistics.rst | 4 +- .../overload_manager/overload_manager.rst | 2 +- .../root/configuration/operations/runtime.rst | 26 +++--- .../operations/tools/router_check.rst | 2 +- .../other_features/rate_limit.rst | 4 +- .../dubbo_filters/router_filter.rst | 4 +- .../thrift_filters/rate_limit_filter.rst | 8 +- .../thrift_filters/router_filter.rst | 4 +- .../root/configuration/overview/bootstrap.rst | 12 +-- docs/root/configuration/overview/examples.rst | 22 ++--- .../root/configuration/overview/extension.rst | 8 +- .../configuration/overview/mgmt_server.rst | 2 +- docs/root/configuration/overview/xds_api.rst | 80 +++++++++---------- docs/root/configuration/security/secret.rst | 24 +++--- .../upstream/cluster_manager/cds.rst | 2 +- .../cluster_circuit_breakers.rst | 4 +- .../upstream/cluster_manager/cluster_hc.rst | 2 +- .../cluster_manager/cluster_runtime.rst | 60 +++++++------- .../cluster_manager/cluster_stats.rst | 12 +-- .../upstream/cluster_manager/overview.rst | 2 +- .../upstream/health_checkers/redis.rst | 8 +- docs/root/faq/configuration/flow_control.rst | 25 +++--- docs/root/faq/configuration/sni.rst | 16 ++-- docs/root/faq/configuration/timeouts.rst | 26 +++--- .../faq/configuration/zone_aware_routing.rst | 6 +- .../disable_circuit_breaking.rst | 2 +- .../tools/schema_validator_check_tool.rst | 4 +- .../advanced/data_sharing_between_filters.rst | 12 +-- .../http/http_connection_management.rst | 22 ++--- .../intro/arch_overview/http/http_proxy.rst | 8 +- .../intro/arch_overview/http/http_routing.rst | 54 ++++++------- .../intro/arch_overview/http/upgrades.rst | 10 +-- .../arch_overview/intro/threading_model.rst | 2 +- .../arch_overview/listeners/listeners.rst | 4 +- .../arch_overview/listeners/tcp_proxy.rst | 2 +- .../observability/access_logging.rst | 10 +-- .../observability/statistics.rst | 6 +- .../arch_overview/observability/tracing.rst | 12 +-- .../arch_overview/operations/draining.rst | 2 +- .../operations/dynamic_configuration.rst | 6 +- .../intro/arch_overview/operations/init.rst | 8 +- .../arch_overview/other_protocols/grpc.rst | 4 +- .../arch_overview/other_protocols/redis.rst | 10 +-- .../security/ext_authz_filter.rst | 6 +- .../arch_overview/security/rbac_filter.rst | 8 +- .../root/intro/arch_overview/security/ssl.rst | 35 ++++---- .../upstream/aggregate_cluster.rst | 8 +- .../upstream/circuit_breaking.rst | 6 +- .../upstream/connection_pooling.rst | 4 +- .../upstream/health_checking.rst | 32 ++++---- .../load_balancing/load_balancers.rst | 12 +-- .../load_balancing/locality_weight.rst | 8 +- .../upstream/load_balancing/original_dst.rst | 4 +- .../load_balancing/overprovisioning.rst | 2 +- .../load_balancing/panic_threshold.rst | 4 +- .../upstream/load_balancing/priority.rst | 2 +- .../upstream/load_balancing/subsets.rst | 9 +-- .../intro/arch_overview/upstream/outlier.rst | 68 ++++++++-------- .../upstream/service_discovery.rst | 26 +++--- docs/root/intro/what_is_envoy.rst | 2 +- docs/root/operations/admin.rst | 48 +++++------ docs/root/operations/cli.rst | 22 ++--- docs/root/operations/fs_flags.rst | 2 +- docs/root/operations/performance.rst | 6 +- docs/root/operations/traffic_tapping.rst | 34 ++++---- docs/root/start/start.rst | 16 ++-- 133 files changed, 814 insertions(+), 782 deletions(-) diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index d70345971f01..0a3efe8307bd 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -11,19 +11,19 @@ TCP proxies should configure: * restrict access to the admin endpoint, * :ref:`overload_manager `, -* :ref:`listener buffer limits ` to 32 KiB, -* :ref:`cluster buffer limits ` to 32 KiB. +* :ref:`listener buffer limits ` to 32 KiB, +* :ref:`cluster buffer limits ` to 32 KiB. HTTP proxies should additionally configure: -* :ref:`use_remote_address ` +* :ref:`use_remote_address ` to true (to avoid consuming HTTP headers from external clients, see :ref:`HTTP header sanitizing ` for details), * :ref:`connection and stream timeouts `, -* :ref:`HTTP/2 maximum concurrent streams limit ` to 100, -* :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, -* :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. -* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. +* :ref:`HTTP/2 maximum concurrent streams limit ` to 100, +* :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, +* :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. +* :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. The following is a YAML example of the above recommendation. @@ -69,17 +69,20 @@ The following is a YAML example of the above recommendation. filter_chains: - filter_chain_match: server_names: ["example.com", "www.example.com"] - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "example_com_cert.pem" } - private_key: { filename: "example_com_key.pem" } + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "example_com_cert.pem" } + private_key: { filename: "example_com_key.pem" } # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. # use_proxy_proto: true filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http use_remote_address: true common_http_protocol_options: @@ -104,10 +107,15 @@ The following is a YAML example of the above recommendation. name: service_foo connect_timeout: 15s per_connection_buffer_limit_bytes: 32768 # 32 KiB - hosts: - socket_address: - address: 127.0.0.1 - port_value: 8080 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 http2_protocol_options: initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB diff --git a/docs/root/configuration/best_practices/level_two.rst b/docs/root/configuration/best_practices/level_two.rst index a7a0b6e7e49d..c38dae7cdb68 100644 --- a/docs/root/configuration/best_practices/level_two.rst +++ b/docs/root/configuration/best_practices/level_two.rst @@ -12,7 +12,7 @@ edge use case may need to be adjusted when using Envoy in a multi-level deployme **In summary, if you run level two Envoy version 1.11.1 or greater which terminates HTTP/2, we strongly advise you to change the HTTP/2 configuration of your level two Envoy, by setting its downstream** -:ref:`validation of HTTP/2 messaging option ` +:ref:`validation of HTTP/2 messaging option ` **to true.** If there is an invalid HTTP/2 request and this option is not set, the Envoy in @@ -30,7 +30,7 @@ user has insight into what traffic will bypass level one checks, they could spra traffic. Please note that the -:ref:`validation of HTTP/2 messaging option ` +:ref:`validation of HTTP/2 messaging option ` is planned to be deprecated and replaced with mandatory configuration in the HttpConnectionManager, to ensure that what is now an easily overlooked option would need to be configured, ideally appropriately for the given Envoy deployment. Please refer to the diff --git a/docs/root/configuration/http/http_conn_man/header_casing.rst b/docs/root/configuration/http/http_conn_man/header_casing.rst index 7bdc11616284..e5476513810e 100644 --- a/docs/root/configuration/http/http_conn_man/header_casing.rst +++ b/docs/root/configuration/http/http_conn_man/header_casing.rst @@ -7,5 +7,5 @@ existing systems that might rely on specific header casing. To support these use cases, Envoy allows configuring a formatting scheme for the headers, which will have Envoy transform the header keys during serialization. To configure this formatting on -response headers, specify the format in the :ref:`http_protocol_options `. -To configure this for upstream request headers, specify the formatting on the :ref:`Cluster `. +response headers, specify the format in the :ref:`http_protocol_options `. +To configure this for upstream request headers, specify the formatting on the :ref:`Cluster `. diff --git a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst index db0a55f886fa..47040620f757 100644 --- a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst +++ b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst @@ -9,10 +9,10 @@ result in addition, removal, or modification. Ultimately, whether the request is or external is governed by the :ref:`x-forwarded-for ` header (please read the linked section carefully as how Envoy populates the header is complex and depends on the :ref:`use_remote_address -` +` setting). In addition, the :ref:`internal_address_config -` +` setting can be used to configure the internal/external determination. Envoy will potentially sanitize the following headers: diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 44764759a3bc..48f681d802c2 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -15,7 +15,7 @@ user-agent ---------- The *user-agent* header may be set by the connection manager during decoding if the :ref:`add_user_agent -` option is +` option is enabled. The header is only modified if it is not already set. If the connection manager does set the header, the value is determined by the :option:`--service-cluster` command line option. @@ -25,7 +25,7 @@ server ------ The *server* header will be set during encoding to the value in the :ref:`server_name -` option. +` option. .. _config_http_conn_man_headers_x-client-trace-id: @@ -49,7 +49,7 @@ that in the current implementation, this should be considered a hint as it is se could be easily spoofed by any internal entity. In the future Envoy will support a mutual authentication TLS mesh which will make this header fully secure. Like *user-agent*, the value is determined by the :option:`--service-cluster` command line option. In order to enable this -feature you need to set the :ref:`user_agent ` option to true. +feature you need to set the :ref:`user_agent ` option to true. .. _config_http_conn_man_headers_downstream-service-node: @@ -108,7 +108,7 @@ The header used to override destination address when using the load balancing policy. It is ignored, unless the use of it is enabled via -:ref:`use_http_header `. +:ref:`use_http_header `. .. _config_http_conn_man_headers_x-forwarded-client-cert: @@ -149,9 +149,9 @@ Some examples of the XFCC header are: 3. For one client certificate with both URI type and DNS type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com;DNS=lyft.com;DNS=www.lyft.com`` How Envoy processes XFCC is specified by the -:ref:`forward_client_cert_details` +:ref:`forward_client_cert_details` and the -:ref:`set_current_client_cert_details` +:ref:`set_current_client_cert_details` HTTP connection manager options. If *forward_client_cert_details* is unset, the XFCC header will be sanitized by default. @@ -169,9 +169,9 @@ address of the nearest client to the XFF list before proxying the request. Some 3. ``x-forwarded-for: 50.0.0.1, 10.0.0.1`` (internal proxy hop) Envoy will only append to XFF if the :ref:`use_remote_address -` +` HTTP connection manager option is set to true and the :ref:`skip_xff_append -` +` is set false. This means that if *use_remote_address* is false (which is the default) or *skip_xff_append* is true, the connection manager operates in a transparent mode where it does not modify XFF. @@ -318,7 +318,7 @@ A few very important notes about XFF: * **NOTE**: If an internal service proxies an external request to another internal service, and includes the original XFF header, Envoy will append to it on egress if - :ref:`use_remote_address ` is set. This will cause + :ref:`use_remote_address ` is set. This will cause the other side to think the request is external. Generally, this is what is intended if XFF is being forwarded. If it is not intended, do not forward XFF, and forward :ref:`config_http_conn_man_headers_x-envoy-internal` instead. @@ -352,7 +352,7 @@ is out of scope for this documentation. If *x-request-id* is propagated across a following features are available: * Stable :ref:`access logging ` via the - :ref:`v2 API runtime filter`. + :ref:`v3 API runtime filter`. * Stable tracing when performing random sampling via the :ref:`tracing.random_sampling ` runtime setting or via forced tracing using the :ref:`config_http_conn_man_headers_x-envoy-force-trace` and @@ -467,13 +467,13 @@ Custom request/response headers Custom request/response headers can be added to a request/response at the weighted cluster, route, virtual host, and/or global route configuration level. See the -:ref:`v2 ` API documentation. +:ref:`v3 ` API documentation. No *:-prefixed* pseudo-header may be modified via this mechanism. The *:path* and *:authority* headers may instead be modified via mechanisms such as -:ref:`prefix_rewrite `, -:ref:`regex_rewrite `, and -:ref:`host_rewrite `. +:ref:`prefix_rewrite `, +:ref:`regex_rewrite `, and +:ref:`host_rewrite `. Headers are appended to requests/responses in the following order: weighted cluster level headers, route level headers, virtual host level headers and finally global level headers. @@ -496,7 +496,7 @@ Supported variable names are: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% @@ -603,7 +603,7 @@ Supported variable names are: :ref:`x-forwarded-proto ` request header. %UPSTREAM_METADATA(["namespace", "key", ...])% - Populates the header with :ref:`EDS endpoint metadata ` from the + Populates the header with :ref:`EDS endpoint metadata ` from the upstream host selected by the router. Metadata may be selected from any namespace. In general, metadata values may be strings, numbers, booleans, lists, nested structures, or null. Upstream metadata values may be selected from nested structs by specifying multiple keys. Otherwise, diff --git a/docs/root/configuration/http/http_conn_man/overview.rst b/docs/root/configuration/http/http_conn_man/overview.rst index dbb8fbc8c46d..280008b4f77b 100644 --- a/docs/root/configuration/http/http_conn_man/overview.rst +++ b/docs/root/configuration/http/http_conn_man/overview.rst @@ -3,5 +3,5 @@ Overview * HTTP connection manager :ref:`architecture overview ` * HTTP protocols :ref:`architecture overview ` -* :ref:`v2 API reference - ` +* :ref:`v3 API reference + ` diff --git a/docs/root/configuration/http/http_conn_man/rds.rst b/docs/root/configuration/http/http_conn_man/rds.rst index 516d0832868d..11f8e367a04d 100644 --- a/docs/root/configuration/http/http_conn_man/rds.rst +++ b/docs/root/configuration/http/http_conn_man/rds.rst @@ -4,7 +4,7 @@ Route discovery service (RDS) ============================= The route discovery service (RDS) API is an optional API that Envoy will call to dynamically fetch -:ref:`route configurations `. A route configuration includes both +:ref:`route configurations `. A route configuration includes both HTTP header modifications, virtual hosts, and the individual route entries contained within each virtual host. Each :ref:`HTTP connection manager filter ` can independently fetch its own route configuration via the API. Optionally, the diff --git a/docs/root/configuration/http/http_conn_man/route_matching.rst b/docs/root/configuration/http/http_conn_man/route_matching.rst index 5f425fb31d8e..9cd71e2b2bbb 100644 --- a/docs/root/configuration/http/http_conn_man/route_matching.rst +++ b/docs/root/configuration/http/http_conn_man/route_matching.rst @@ -6,9 +6,9 @@ Route matching When Envoy matches a route, it uses the following procedure: #. The HTTP request's *host* or *:authority* header is matched to a :ref:`virtual host - `. -#. Each :ref:`route entry ` in the virtual host is checked, + `. +#. Each :ref:`route entry ` in the virtual host is checked, *in order*. If there is a match, the route is used and no further route checks are made. -#. Independently, each :ref:`virtual cluster ` in the +#. Independently, each :ref:`virtual cluster ` in the virtual host is checked, *in order*. If there is a match, the virtual cluster is used and no further virtual cluster checks are made. diff --git a/docs/root/configuration/http/http_conn_man/runtime.rst b/docs/root/configuration/http/http_conn_man/runtime.rst index dcc85412c631..2c104c806508 100644 --- a/docs/root/configuration/http/http_conn_man/runtime.rst +++ b/docs/root/configuration/http/http_conn_man/runtime.rst @@ -9,7 +9,7 @@ The HTTP connection manager supports the following runtime settings: http_connection_manager.normalize_path % of requests that will have path normalization applied if not already configured in - :ref:`normalize_path `. + :ref:`normalize_path `. This is evaluated at configuration load time and will apply to all requests for a given configuration. diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index d68abc8ce8e6..b81d3ea06045 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -111,10 +111,10 @@ All http1 statistics are rooted at *http1.* :header: Name, Type, Description :widths: 1, 1, 2 - dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/1 encoding response_flood, Counter, Total number of connections closed due to response flooding - requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. Http2 codec statistics ~~~~~~~~~~~~~~~~~~~~~~ @@ -125,15 +125,15 @@ All http2 statistics are rooted at *http2.* :header: Name, Type, Description :widths: 1, 1, 2 - dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. - header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. + dropped_headers_with_underscores, Counter, Total number of dropped headers with names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + header_overflow, Counter, Total number of connections reset due to the headers being larger than the :ref:`configured value `. headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug - inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. - inbound_priority_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. - inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. - outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. - outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." - requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. + inbound_empty_frames_flood, Counter, Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag. The limit is configured by setting the :ref:`max_consecutive_inbound_frames_with_empty_payload config setting `. + inbound_priority_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY. The limit is configured by setting the :ref:`max_inbound_priority_frames_per_stream config setting `. + inbound_window_update_frames_flood, Counter, Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE. The limit is configured by setting the :ref:`max_inbound_window_updateframes_per_data_frame_sent config setting `. + outbound_flood, Counter, Total number of connections terminated for exceeding the limit on outbound frames of all types. The limit is configured by setting the :ref:`max_outbound_frames config setting `. + outbound_control_flood, Counter, "Total number of connections terminated for exceeding the limit on outbound frames of types PING, SETTINGS and RST_STREAM. The limit is configured by setting the :ref:`max_outbound_control_frames config setting `." + requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 `_ of the HTTP/2 spec. This will result in a *tx_reset* rx_reset, Counter, Total number of reset stream frames received by Envoy too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers diff --git a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst index bfbe0c191986..eab4577f5301 100644 --- a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst +++ b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst @@ -26,7 +26,7 @@ section describes this scenario in more detail. Traffic shifting between two upstreams -------------------------------------- -The :ref:`runtime ` object +The :ref:`runtime ` object in the route configuration determines the probability of selecting a particular route (and hence its cluster). By using the *runtime_fraction* configuration, traffic to a particular route in a virtual host can be @@ -59,7 +59,7 @@ envoy configuration file. Envoy matches routes with a :ref:`first match ` policy. If the route has a runtime_fraction object, the request will be additionally matched based on the runtime_fraction -:ref:`value ` +:ref:`value ` (or the default, if no value is specified). Thus, by placing routes back-to-back in the above example and specifying a runtime_fraction object in the first route, traffic shifting can be accomplished by changing the runtime_fraction @@ -93,8 +93,8 @@ v3) instead of two. To split traffic evenly across the three versions specify the weight for each upstream cluster. Unlike the previous example, a **single** :ref:`route -` entry is sufficient. The -:ref:`weighted_clusters ` +` entry is sufficient. The +:ref:`weighted_clusters ` configuration block in a route can be used to specify multiple upstream clusters along with weights that indicate the **percentage** of traffic to be sent to each upstream cluster. @@ -120,7 +120,7 @@ to each upstream cluster. By default, the weights must sum to exactly 100. In the V2 API, the -:ref:`total weight ` defaults to 100, but can +:ref:`total weight ` defaults to 100, but can be modified to allow finer granularity. The weights assigned to each cluster can be dynamically adjusted using the diff --git a/docs/root/configuration/http/http_conn_man/vhds.rst b/docs/root/configuration/http/http_conn_man/vhds.rst index 73d9a14f0ec9..f9bcdf517bec 100644 --- a/docs/root/configuration/http/http_conn_man/vhds.rst +++ b/docs/root/configuration/http/http_conn_man/vhds.rst @@ -4,7 +4,7 @@ Virtual Host Discovery Service (VHDS) ===================================== The virtual host discovery service (VHDS) API is an optional API that Envoy will call to -dynamically fetch :ref:`virtual hosts `. A virtual host includes +dynamically fetch :ref:`virtual hosts `. A virtual host includes a name and set of domains that get routed to it based on the incoming request's host header. By default in RDS, all routes for a cluster are sent to every Envoy instance in the mesh. This @@ -32,20 +32,20 @@ a route configuration name can. Subscribing to Resources ^^^^^^^^^^^^^^^^^^^^^^^^ VHDS allows resources to be :ref:`subscribed ` to using a -:ref:`DeltaDiscoveryRequest ` with the -:ref:`type_url ` set to -`type.googleapis.com/envoy.api.v2.route.VirtualHost` -and :ref:`resource_names_subscribe ` +:ref:`DeltaDiscoveryRequest ` with the +:ref:`type_url ` set to +`type.googleapis.com/envoy.config.route.v3.VirtualHost` +and :ref:`resource_names_subscribe ` set to a list of virtual host resource names for which it would like configuration. If a route for the contents of a host/authority header cannot be resolved, the active stream is paused while a -:ref:`DeltaDiscoveryRequest ` is sent. -When a :ref:`DeltaDiscoveryResponse ` is received where one of -the :ref:`aliases ` or the -:ref:`name ` in the response exactly matches the -:ref:`resource_names_subscribe ` -entry from the :ref:`DeltaDiscoveryRequest `, the route +:ref:`DeltaDiscoveryRequest ` is sent. +When a :ref:`DeltaDiscoveryResponse ` is received where one of +the :ref:`aliases ` or the +:ref:`name ` in the response exactly matches the +:ref:`resource_names_subscribe ` +entry from the :ref:`DeltaDiscoveryRequest `, the route configuration is updated, the stream is resumed, and processing of the filter chain continues. Updates to virtual hosts occur in two ways. If a virtual host was originally sent over RDS, then the @@ -53,19 +53,19 @@ virtual host should be updated over RDS. If a virtual host was subscribed to ove will take place over VHDS. When a route configuration entry is updated, if the -:ref:`vhds field ` has changed, the virtual host table for +:ref:`vhds field ` has changed, the virtual host table for that route configuration is cleared, which will require that all virtual hosts be sent again. Compatibility with Scoped RDS ----------------------------- VHDS shouldn't present any compatibility issues with -:ref:`scoped RDS `. +:ref:`scoped RDS `. Route configuration names can still be used for virtual host matching, but with scoped RDS configured it would point to a scoped route configuration. However, it is important to note that using -on-demand :ref:`scoped RDS ` +on-demand :ref:`scoped RDS ` and VHDS together will require two on-demand subscriptions per routing scope. diff --git a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst index f5f1467a738e..19c7d0c58872 100644 --- a/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst +++ b/docs/root/configuration/http/http_filters/adaptive_concurrency_filter.rst @@ -9,7 +9,7 @@ Adaptive Concurrency This filter should be configured with the name `envoy.filters.http.adaptive_concurrency`. -See the :ref:`v2 API reference ` for details on each configuration parameter. +See the :ref:`v3 API reference ` for details on each configuration parameter. Overview -------- @@ -28,7 +28,7 @@ Gradient Controller The gradient controller makes forwarding decisions based on a periodically measured ideal round-trip time (minRTT) for an upstream. -:ref:`v2 API reference ` +:ref:`v3 API reference ` Calculating the minRTT ^^^^^^^^^^^^^^^^^^^^^^ @@ -73,7 +73,7 @@ Notice that *B*, the buffer value added to the minRTT, allows for normal varianc latencies by requiring the sampled latencies the exceed the minRTT by some configurable threshold before decreasing the gradient value. -The buffer will be a percentage of the measured minRTT value whose value is modified via the buffer field in the :ref:`minRTT calculation parameters `. The buffer is calculated as follows: +The buffer will be a percentage of the measured minRTT value whose value is modified via the buffer field in the :ref:`minRTT calculation parameters `. The buffer is calculated as follows: .. math:: @@ -118,7 +118,7 @@ fields can be overridden via runtime settings. name: envoy.filters.http.adaptive_concurrency typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency + "@type": type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency gradient_controller_config: sample_aggregate_percentile: value: 90 @@ -191,7 +191,7 @@ Statistics ---------- The adaptive concurrency filter outputs statistics in the *http..adaptive_concurrency.* namespace. The :ref:`stat prefix -` +` comes from the owning HTTP connection manager. Statistics are specific to the concurrency controllers. diff --git a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst index 28bfe645ea93..d281de9b0ab7 100644 --- a/docs/root/configuration/http/http_filters/aws_lambda_filter.rst +++ b/docs/root/configuration/http/http_filters/aws_lambda_filter.rst @@ -4,7 +4,7 @@ AWS Lambda ========== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.aws_lambda*. .. attention:: @@ -15,11 +15,11 @@ The HTTP AWS Lambda filter is used to trigger an AWS Lambda function from a stan It supports a few options to control whether to pass through the HTTP request payload as is or to wrap it in a JSON schema. -If :ref:`payload_passthrough ` is set to +If :ref:`payload_passthrough ` is set to ``true``, then the payload is sent to Lambda without any transformations. *Note*: This means you lose access to all the HTTP headers in the Lambda function. -However, if :ref:`payload_passthrough ` +However, if :ref:`payload_passthrough ` is set to ``false``, then the HTTP request is transformed to a JSON payload with the following schema: .. code-block:: @@ -81,7 +81,7 @@ On the other end, the response of the Lambda function must conform to the follow .. _regional Lambda endpoint: https://docs.aws.amazon.com/general/latest/gr/lambda-service.html The filter supports :ref:`per-filter configuration -`. +`. If you use the per-filter configuration, the target cluster _must_ have the following metadata: @@ -132,7 +132,7 @@ in us-west-2: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: "*.amazonaws.com" @@ -179,7 +179,7 @@ An example with the Lambda metadata applied to a weighted-cluster: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: "*.amazonaws.com" diff --git a/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst b/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst index 4c9e097b879f..0280a012a05d 100644 --- a/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst +++ b/docs/root/configuration/http/http_filters/aws_request_signing_filter.rst @@ -4,7 +4,7 @@ AWS Request Signing =================== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.aws_request_signing*. .. attention:: @@ -24,7 +24,7 @@ Example filter configuration: name: envoy.filters.http.aws_request_signing typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning + "@type": type.googleapis.com/envoy.extensions.filters.http.aws_request_signing.v3.AwsRequestSigning service_name: s3 region: us-west-2 @@ -33,7 +33,7 @@ Statistics ---------- The AWS request signing filter outputs statistics in the *http..aws_request_signing.* namespace. The -:ref:`stat prefix ` +:ref:`stat prefix ` comes from the owning HTTP connection manager. .. csv-table:: diff --git a/docs/root/configuration/http/http_filters/buffer_filter.rst b/docs/root/configuration/http/http_filters/buffer_filter.rst index d1e55ac3138e..d2665c58c137 100644 --- a/docs/root/configuration/http/http_filters/buffer_filter.rst +++ b/docs/root/configuration/http/http_filters/buffer_filter.rst @@ -11,12 +11,12 @@ If enabled the buffer filter populates content-length header if it is not presen already. The behavior can be disabled using the runtime feature `envoy.reloadable_features.buffer_filter_populate_content_length`. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.buffer*. Per-Route Configuration ----------------------- The buffer filter configuration can be overridden or disabled on a per-route basis by providing a -:ref:`BufferPerRoute ` configuration on +:ref:`BufferPerRoute ` configuration on the virtual host, route, or weighted cluster. diff --git a/docs/root/configuration/http/http_filters/cors_filter.rst b/docs/root/configuration/http/http_filters/cors_filter.rst index d51cbd923b54..f7109ef6eaa9 100644 --- a/docs/root/configuration/http/http_filters/cors_filter.rst +++ b/docs/root/configuration/http/http_filters/cors_filter.rst @@ -8,7 +8,7 @@ For the meaning of the headers please refer to the pages below. * https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS * https://www.w3.org/TR/cors/ -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.http.cors*. .. _cors-runtime: @@ -16,12 +16,12 @@ For the meaning of the headers please refer to the pages below. Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. The fraction of requests for which the filter is enabled in shadow-only mode can be configured via -the :ref:`runtime_key ` value of the -:ref:`shadow_enabled ` field. When enabled in +the :ref:`runtime_key ` value of the +:ref:`shadow_enabled ` field. When enabled in shadow-only mode, the filter will evaluate the request's *Origin* to determine if it's valid but will not enforce any policies. diff --git a/docs/root/configuration/http/http_filters/csrf_filter.rst b/docs/root/configuration/http/http_filters/csrf_filter.rst index fb8c770d03da..4e01e413f595 100644 --- a/docs/root/configuration/http/http_filters/csrf_filter.rst +++ b/docs/root/configuration/http/http_filters/csrf_filter.rst @@ -41,7 +41,7 @@ For more information on CSRF please refer to the pages below. * https://www.owasp.org/index.php/Cross-Site_Request_Forgery_%28CSRF%29 * https://seclab.stanford.edu/websec/csrf/csrf.pdf -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. note:: @@ -76,12 +76,12 @@ Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. The fraction of requests for which the filter is enabled in shadow-only mode can be configured via -the :ref:`runtime_key ` value of the -:ref:`shadow_enabled ` field. +the :ref:`runtime_key ` value of the +:ref:`shadow_enabled ` field. When enabled in shadow-only mode, the filter will evaluate the request's *Origin* and *Destination* to determine if it's valid but will not enforce any policies. diff --git a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst index 376796423028..54613fdde1a9 100644 --- a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst @@ -8,26 +8,26 @@ Dynamic forward proxy HTTP dynamic forward proxy support should be considered alpha and not production ready. * HTTP dynamic forward proxy :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.dynamic_forward_proxy* The following is a complete configuration that configures both the :ref:`dynamic forward proxy HTTP filter -` +` as well as the :ref:`dynamic forward proxy cluster -`. Both filter and cluster +`. Both filter and cluster must be configured together and point to the same DNS cache parameters for Envoy to operate as an HTTP dynamic forward proxy. -This filter supports :ref:`host rewrite ` -via the :ref:`virtual host's per_filter_config ` or the -:ref:`route's per_filter_config `. This can be used to rewrite +This filter supports :ref:`host rewrite ` +via the :ref:`virtual host's typed_per_filter_config ` or the +:ref:`route's typed_per_filter_config `. This can be used to rewrite the host header with the provided value before DNS lookup, thus allowing to route traffic to the rewritten host when forwarding. See the example below within the configured routes. .. note:: - Configuring a :ref:`tls_context ` on the cluster with + Configuring a :ref:`transport_socket with name envoy.transport_sockets.tls ` on the cluster with *trusted_ca* certificates instructs Envoy to use TLS when connecting to upstream hosts and verify the certificate chain. Additionally, Envoy will automatically perform SAN verification for the resolved host name as well as specify the host name via SNI. @@ -53,7 +53,7 @@ host when forwarding. See the example below within the configured routes. - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -65,9 +65,10 @@ host when forwarding. See the example below within the configured routes. prefix: "/force-host-rewrite" route: cluster: dynamic_forward_proxy_cluster - per_filter_config: + typed_per_filter_config: envoy.filters.http.dynamic_forward_proxy: - host_rewrite: www.example.org + "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig + host_rewrite_literal: www.example.org - match: prefix: "/" route: @@ -80,6 +81,8 @@ host when forwarding. See the example below within the configured routes. name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router clusters: - name: dynamic_forward_proxy_cluster connect_timeout: 1s @@ -87,14 +90,14 @@ host when forwarding. See the example below within the configured routes. cluster_type: name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: {filename: /etc/ssl/certs/ca-certificates.crt} diff --git a/docs/root/configuration/http/http_filters/dynamodb_filter.rst b/docs/root/configuration/http/http_filters/dynamodb_filter.rst index df06e05ef6c0..c66c474e0bc7 100644 --- a/docs/root/configuration/http/http_filters/dynamodb_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamodb_filter.rst @@ -4,14 +4,14 @@ DynamoDB ======== * DynamoDB :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.dynamo*. Statistics ---------- The DynamoDB filter outputs statistics in the *http..dynamodb.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. Per operation stats can be found in the *http..dynamodb.operation..* diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index bc1c69c611c8..44334684abe9 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -3,23 +3,23 @@ External Authorization ====================== * External authorization :ref:`architecture overview ` -* :ref:`HTTP filter v2 API reference ` +* :ref:`HTTP filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_authz*. The external authorization filter calls an external gRPC or HTTP service to check whether an incoming HTTP request is authorized or not. If the request is deemed unauthorized, then the request will be denied normally with 403 (Forbidden) response. Note that sending additional custom metadata from the authorization service to the upstream, to the downstream or to the authorization service is -also possible. This is explained in more details at :ref:`HTTP filter `. +also possible. This is explained in more details at :ref:`HTTP filter `. The content of the requests that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. _config_http_filters_ext_authz_http_configuration: The HTTP filter, using a gRPC/HTTP service, can be configured as follows. You can see all the configuration options at -:ref:`HTTP filter `. +:ref:`HTTP filter `. Configuration Examples ----------------------------- @@ -31,7 +31,7 @@ A sample filter configuration for a gRPC authorization server: http_filters: - name: envoy.filters.http.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz grpc_service: envoy_grpc: cluster_name: ext-authz @@ -67,7 +67,7 @@ A sample filter configuration for a raw HTTP authorization server: http_filters: - name: envoy.filters.http.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz http_service: server_uri: uri: 127.0.0.1:10003 @@ -106,16 +106,18 @@ In this example we add additional context on the virtual host, and disabled the virtual_hosts: - name: local_service domains: ["*"] - per_filter_config: + typed_per_filter_config: envoy.filters.http.ext_authz: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute check_settings: context_extensions: virtual_host: local_service routes: - match: { prefix: "/static" } route: { cluster: some_service } - per_filter_config: + typed_per_filter_config: envoy.filters.http.ext_authz: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute disabled: true - match: { prefix: "/" } route: { cluster: some_service } @@ -139,5 +141,5 @@ The HTTP filter outputs statistics in the *cluster..ext_au Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key -` value of the :ref:`filter_enabled -` field. +` value of the :ref:`filter_enabled +` field. diff --git a/docs/root/configuration/http/http_filters/fault_filter.rst b/docs/root/configuration/http/http_filters/fault_filter.rst index f79fc4d44a7a..62b9cd9e28c8 100644 --- a/docs/root/configuration/http/http_filters/fault_filter.rst +++ b/docs/root/configuration/http/http_filters/fault_filter.rst @@ -24,7 +24,7 @@ Configuration The fault injection filter must be inserted before any other filter, including the router filter. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.fault*. .. _config_http_filters_fault_injection_http_header: @@ -40,7 +40,7 @@ x-envoy-fault-abort-request HTTP status code to abort a request with. The header value should be an integer that specifies the HTTP status code to return in response to a request and must be in the range [200, 600). In order for the header to work, :ref:`header_abort - ` needs to be set. + ` needs to be set. x-envoy-fault-abort-grpc-request gRPC status code to abort a request with. The header value should be a non-negative integer that specifies @@ -57,52 +57,52 @@ x-envoy-fault-abort-request-percentage by the value of *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP headers. The header value should be an integer that specifies the numerator of the percentage of request to apply aborts to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_abort - ` needs to be set and + ` needs to be set and either *x-envoy-fault-abort-request* or *x-envoy-fault-abort-grpc-request* HTTP header needs to be a part of the request. x-envoy-fault-delay-request The duration to delay a request by. The header value should be an integer that specifies the number of milliseconds to throttle the latency for. In order for the header to work, :ref:`header_delay - ` needs to be set. + ` needs to be set. x-envoy-fault-delay-request-percentage The percentage of requests that should be delayed by a duration that's defined by the value of *x-envoy-fault-delay-request* HTTP header. The header value should be an integer that specifies the percentage of request to apply delays to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_delay - ` needs to be set and + ` needs to be set and *x-envoy-fault-delay-request* HTTP header needs to be a part of a request. x-envoy-fault-throughput-response The rate limit to use when a response to a caller is sent. The header value should be an integer that specifies the limit in KiB/s and must be > 0. In order for the header to work, :ref:`header_limit - ` needs to be set. + ` needs to be set. x-envoy-fault-throughput-response-percentage The percentage of requests whose response rate should be limited to the value of *x-envoy-fault-throughput-response* HTTP header. The header value should be an integer that specifies the percentage of request to apply delays to and must be greater or equal to 0 and its maximum value is capped by the value of the numerator of - :ref:`percentage ` field. + :ref:`percentage ` field. Percentage's denominator is equal to default percentage's denominator - :ref:`percentage ` field. + :ref:`percentage ` field. In order for the header to work, :ref:`header_limit - ` needs to be set and + ` needs to be set and *x-envoy-fault-delay-request* HTTP header needs to be a part of a request. .. attention:: Allowing header control is inherently dangerous if exposed to untrusted clients. In this case, it is suggested to use the :ref:`max_active_faults - ` setting to limit the + ` setting to limit the maximum concurrent faults that can be active at any given time. The following is an example configuration that enables header control for both of the above @@ -112,7 +112,7 @@ options: name: envoy.filters.http.fault typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.fault.v2.HTTPFault + "@type": type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault max_active_faults: 100 abort: header_abort: {} @@ -144,7 +144,7 @@ fault.http.abort.abort_percent *abort_percent* specified in config. If the config does not contain an *abort* block, then *abort_percent* defaults to 0. For historic reasons, this runtime key is available regardless of whether the filter is :ref:`configured for abort - `. + `. fault.http.abort.http_status HTTP status code that will be used as the response status code of requests that will be @@ -152,7 +152,7 @@ fault.http.abort.http_status in the config. If the config does not contain an *abort* block, then *http_status* defaults to 0. For historic reasons, this runtime key is available regardless of whether the filter is :ref:`configured for abort - `. + `. fault.http.abort.grpc_status gRPC status code that will be used as the response status code of requests that will be @@ -165,27 +165,27 @@ fault.http.delay.fixed_delay_percent % of requests that will be delayed if the headers match. Defaults to the *delay_percent* specified in the config or 0 otherwise. This runtime key is only available when the filter is :ref:`configured for delay - `. + `. fault.http.delay.fixed_duration_ms The delay duration in milliseconds. If not specified, the *fixed_duration_ms* specified in the config will be used. If this field is missing from both the runtime and the config, no delays will be injected. This runtime key is only available when the filter is :ref:`configured for delay - `. + `. fault.http.max_active_faults The maximum number of active faults (of all types) that Envoy will will inject via the fault filter. This can be used in cases where it is desired that faults are 100% injected, but the user wants to avoid a situation in which too many unexpected concurrent faulting requests cause resource constraint issues. If not specified, the :ref:`max_active_faults - ` setting will be used. + ` setting will be used. fault.http.rate_limit.response_percent % of requests which will have a response rate limit fault injected. Defaults to the value set in - the :ref:`percentage ` field. + the :ref:`percentage ` field. This runtime key is only available when the filter is :ref:`configured for response rate limiting - `. + `. *Note*, fault filter runtime settings for the specific downstream cluster override the default ones if present. The following are downstream specific @@ -207,7 +207,7 @@ Statistics ---------- The fault filter outputs statistics in the *http..fault.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: @@ -217,7 +217,7 @@ owning HTTP connection manager. delays_injected, Counter, Total requests that were delayed aborts_injected, Counter, Total requests that were aborted response_rl_injected, Counter, "Total requests that had a response rate limit selected for injection (actually injection may not occur due to disconnect, reset, no body, etc.)" - faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults ` setting + faults_overflow, Counter, Total number of faults that were not injected due to overflowing the :ref:`max_active_faults ` setting active_faults, Gauge, Total number of faults active at the current time .delays_injected, Counter, Total delayed requests for the given downstream cluster .aborts_injected, Counter, Total aborted requests for the given downstream cluster diff --git a/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst b/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst index 47039f2b6fcd..5454468c5ac2 100644 --- a/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_http1_bridge_filter.rst @@ -4,7 +4,7 @@ gRPC HTTP/1.1 bridge ==================== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_http1_bridge*. This is a simple filter which enables the bridging of an HTTP/1.1 client which does not support diff --git a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst index ace79b2813ad..ed668b936a3f 100644 --- a/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_http1_reverse_bridge_filter.rst @@ -4,7 +4,7 @@ gRPC HTTP/1.1 reverse bridge ============================ * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_http1_reverse_bridge*. This is a filter that enables converting an incoming gRPC request into a HTTP/1.1 request to allow @@ -61,11 +61,11 @@ How to disable HTTP/1.1 reverse bridge filter per route - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager access_log: - name: envoy.access_loggers.file typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /dev/stdout stat_prefix: ingress_http route_config: @@ -81,8 +81,9 @@ How to disable HTTP/1.1 reverse bridge filter per route cluster: grpc timeout: 5.00s # per_filter_config disables the filter for this route - per_filter_config: + typed_per_filter_config: envoy.filters.http.grpc_http1_reverse_bridge: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfigPerRoute disabled: true - match: prefix: "/route-with-filter-enabled" @@ -93,7 +94,7 @@ How to disable HTTP/1.1 reverse bridge filter per route http_filters: - name: envoy.filters.http.grpc_http1_reverse_bridge typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig content_type: application/grpc+proto withhold_grpc_frames: true - name: envoy.filters.http.router @@ -104,10 +105,15 @@ How to disable HTTP/1.1 reverse bridge filter per route type: LOGICAL_DNS dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: localhost - port_value: 4630 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 4630 - name: grpc connect_timeout: 5.00s type: strict_dns diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index a1fdfdcccdf5..3e01c544e819 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -4,7 +4,7 @@ gRPC-JSON transcoder ==================== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_json_transcoder*. This is a filter which allows a RESTful JSON API client to send requests to Envoy over HTTP @@ -110,7 +110,7 @@ gRPC or RESTful JSON requests to localhost:51051. - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: grpc_json codec_type: AUTO route_config: @@ -126,7 +126,7 @@ gRPC or RESTful JSON requests to localhost:51051. http_filters: - name: envoy.filters.http.grpc_json_transcoder typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder proto_descriptor: "/tmp/envoy/proto.pb" services: ["helloworld.Greeter"] print_options: diff --git a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst index 984a2f9348d7..80458525b72b 100644 --- a/docs/root/configuration/http/http_filters/grpc_stats_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_stats_filter.rst @@ -4,10 +4,10 @@ gRPC Statistics =============== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_stats*. * This filter can be enabled to emit a :ref:`filter state object - ` + ` This is a filter which enables telemetry of gRPC calls. Additionally, the filter detects message boundaries in streaming gRPC calls and emits the message @@ -18,8 +18,8 @@ More info: wire format in `gRPC over HTTP/2 .grpc.* namespace. Depending on the configuration, the stats may be prefixed with `..`; the stats in the table below are shown in this form. See the documentation for -:ref:`individual_method_stats_allowlist ` -and :ref:`stats_for_all_methods `. +:ref:`individual_method_stats_allowlist ` +and :ref:`stats_for_all_methods `. To enable *upstream_rq_time* (v3 API only) see :ref:`enable_upstream_stats `. diff --git a/docs/root/configuration/http/http_filters/grpc_web_filter.rst b/docs/root/configuration/http/http_filters/grpc_web_filter.rst index fe4dea6f4b09..8241262a7c02 100644 --- a/docs/root/configuration/http/http_filters/grpc_web_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_web_filter.rst @@ -4,7 +4,7 @@ gRPC-Web ======== * gRPC :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.grpc_web*. This is a filter which enables the bridging of a gRPC-Web client to a compliant gRPC server by diff --git a/docs/root/configuration/http/http_filters/gzip_filter.rst b/docs/root/configuration/http/http_filters/gzip_filter.rst index 0251012244f1..9267e3ead6e3 100644 --- a/docs/root/configuration/http/http_filters/gzip_filter.rst +++ b/docs/root/configuration/http/http_filters/gzip_filter.rst @@ -9,7 +9,7 @@ compromising the response time. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.gzip*. .. attention:: diff --git a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst index 38a55736861e..e482545a481f 100644 --- a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst +++ b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst @@ -2,7 +2,7 @@ Envoy Header-To-Metadata Filter =============================== -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.header_to_metadata*. This filter is configured with rules that will be matched against requests and responses. @@ -25,7 +25,7 @@ absence of a version header could be: http_filters: - name: envoy.filters.http.header_to_metadata typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.header_to_metadata.v2.Config + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config request_rules: - header: x-version on_header_present: @@ -72,8 +72,9 @@ Note that this filter also supports per route configuration: routes: - match: { prefix: "/version-to-metadata" } route: { cluster: service } - per_filter_config: + typed_per_filter_config: envoy.filters.http.header_to_metadata: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config request_rules: - header: x-version on_header_present: diff --git a/docs/root/configuration/http/http_filters/health_check_filter.rst b/docs/root/configuration/http/http_filters/health_check_filter.rst index 14b35114adb4..809b1fd42e98 100644 --- a/docs/root/configuration/http/http_filters/health_check_filter.rst +++ b/docs/root/configuration/http/http_filters/health_check_filter.rst @@ -4,7 +4,7 @@ Health check ============ * Health check filter :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.health_check*. .. note:: diff --git a/docs/root/configuration/http/http_filters/ip_tagging_filter.rst b/docs/root/configuration/http/http_filters/ip_tagging_filter.rst index 45bb1efefcfc..a991c1f65f24 100644 --- a/docs/root/configuration/http/http_filters/ip_tagging_filter.rst +++ b/docs/root/configuration/http/http_filters/ip_tagging_filter.rst @@ -16,7 +16,7 @@ G. Karlsson. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ip_tagging*. Statistics diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 8fe9b9c16f7c..118f21946b95 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -15,7 +15,7 @@ Configuration This filter should be configured with the name *envoy.filters.http.jwt_authn*. -This HTTP :ref:`filter config ` has two fields: +This HTTP :ref:`filter config ` has two fields: * Field *providers* specifies how a JWT should be verified, such as where to extract the token, where to fetch the public key (JWKS) and where to output its payload. * Field *rules* specifies matching rules and their requirements. If a request matches a rule, its requirement applies. The requirement specifies which JWT providers should be used. @@ -23,7 +23,7 @@ This HTTP :ref:`filter config ` specifies how a JWT should be verified. It has the following fields: +:ref:`JwtProvider ` specifies how a JWT should be verified. It has the following fields: * *issuer*: the principal that issued the JWT, usually a URL or an email address. * *audiences*: a list of JWT audiences allowed to access. A JWT containing any of these audiences will be accepted. @@ -46,7 +46,7 @@ If fails to extract a JWT from above header, then check query parameter key *acc /path?access_token= -In the :ref:`filter config `, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider `. The *provider_name* must be unique, it is referred in the `JwtRequirement ` in its *provider_name* field. +In the :ref:`filter config `, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider `. The *provider_name* must be unique, it is referred in the `JwtRequirement ` in its *provider_name* field. .. important:: For *remote_jwks*, a **jwks_cluster** cluster is required. @@ -119,7 +119,7 @@ JWT payload will be added to the request header as following format:: RequirementRule ~~~~~~~~~~~~~~~ -:ref:`RequirementRule ` has two fields: +:ref:`RequirementRule ` has two fields: * Field *match* specifies how a request can be matched; e.g. by HTTP headers, or by query parameters, or by path prefixes. * Field *requires* specifies the JWT requirement, e.g. which provider is required. diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index c55bfd5b4519..7e381a69b2d5 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -60,7 +60,7 @@ API. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.lua*. Script examples @@ -289,7 +289,7 @@ metadata() Returns the current route entry metadata. Note that the metadata should be specified under the filter name i.e. *envoy.filters.http.lua*. Below is an example of a *metadata* in a -:ref:`route entry `. +:ref:`route entry `. .. code-block:: yaml diff --git a/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst b/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst index ee39fa06aa5f..d856d3e7597c 100644 --- a/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst +++ b/docs/root/configuration/http/http_filters/on_demand_updates_filter.rst @@ -3,16 +3,16 @@ On-demand VHDS Updates ====================== -The on-demand VHDS filter is used to request a :ref:`virtual host ` -data if it's not already present in the :ref:`Route Configuration `. The +The on-demand VHDS filter is used to request a :ref:`virtual host ` +data if it's not already present in the :ref:`Route Configuration `. The contents of the *Host* or *:authority* header is used to create the on-demand request. For an on-demand -request to be created, :ref:`VHDS ` must be enabled and either *Host* +request to be created, :ref:`VHDS ` must be enabled and either *Host* or *:authority* header be present. On-demand VHDS cannot be used with SRDS at this point. Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.on_demand*. * The filter should be placed before *envoy.filters.http.router* filter in the HttpConnectionManager's filter chain. diff --git a/docs/root/configuration/http/http_filters/original_src_filter.rst b/docs/root/configuration/http/http_filters/original_src_filter.rst index 0dc13c650606..2103d7e5e97d 100644 --- a/docs/root/configuration/http/http_filters/original_src_filter.rst +++ b/docs/root/configuration/http/http_filters/original_src_filter.rst @@ -3,7 +3,7 @@ Original Source =============== -* :ref:`HTTP filter v2 API reference ` +* :ref:`HTTP filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.original_src*. The original source http filter replicates the downstream remote address of the connection on @@ -32,10 +32,10 @@ to forcefully route any traffic whose IP was replicated by Envoy back through th If Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables and routing rules can be used to ensure correct behaviour. The filter has an unsigned integer configuration, -:ref:`mark `. Setting +:ref:`mark `. Setting this to *X* causes Envoy to *mark* all upstream packets originating from this http with value *X*. Note that if -:ref:`mark ` is set +:ref:`mark ` is set to 0, Envoy will not mark upstream packets. We can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X* @@ -66,7 +66,7 @@ The following example configures Envoy to use the original source for all connec http_filters: - name: envoy.filters.http.original_src typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.original_src.v2alpha1.OriginalSrc + "@type": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc mark: 123 - name: envoy.filters.http.router typed_config: {} diff --git a/docs/root/configuration/http/http_filters/rate_limit_filter.rst b/docs/root/configuration/http/http_filters/rate_limit_filter.rst index e76dda3d6f02..51850ba45640 100644 --- a/docs/root/configuration/http/http_filters/rate_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/rate_limit_filter.rst @@ -4,19 +4,19 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ratelimit*. The HTTP rate limit filter will call the rate limit service when the request's route or virtual host -has one or more :ref:`rate limit configurations` -that match the filter stage setting. The :ref:`route` +has one or more :ref:`rate limit configurations` +that match the filter stage setting. The :ref:`route` can optionally include the virtual host rate limit configurations. More than one configuration can apply to a request. Each configuration results in a descriptor being sent to the rate limit service. If the rate limit service is called, and the response for any of the descriptors is over limit, a 429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header. -If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is +If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is set to true, a 500 response is returned. .. _config_http_filters_rate_limit_composing_actions: @@ -24,7 +24,7 @@ set to true, a 500 response is returned. Composing Actions ----------------- -Each :ref:`rate limit action ` on the route or +Each :ref:`rate limit action ` on the route or virtual host populates a descriptor entry. A vector of descriptor entries compose a descriptor. To create more complex rate limit descriptors, actions can be composed in any order. The descriptor will be populated in the order the actions are specified in the configuration. @@ -90,7 +90,7 @@ The rate limit filter outputs statistics in the *cluster.. error, Counter, Total errors contacting the rate limit service over_limit, Counter, total over limit responses from the rate limit service failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because - of :ref:`failure_mode_deny ` set to false." + of :ref:`failure_mode_deny ` set to false." Runtime ------- @@ -106,4 +106,4 @@ ratelimit.http_filter_enforcing ratelimit..http_filter_enabled % of requests that will call the rate limit service for a given *route_key* specified in the - :ref:`rate limit configuration `. Defaults to 100. + :ref:`rate limit configuration `. Defaults to 100. diff --git a/docs/root/configuration/http/http_filters/rbac_filter.rst b/docs/root/configuration/http/http_filters/rbac_filter.rst index a905b2a6c99f..d6068bbdcc6c 100644 --- a/docs/root/configuration/http/http_filters/rbac_filter.rst +++ b/docs/root/configuration/http/http_filters/rbac_filter.rst @@ -11,21 +11,21 @@ as well as the incoming request's HTTP headers. This filter also supports policy and shadow mode, shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.rbac*. Per-Route Configuration ----------------------- The RBAC filter configuration can be overridden or disabled on a per-route basis by providing a -:ref:`RBACPerRoute ` configuration on +:ref:`RBACPerRoute ` configuration on the virtual host, route, or weighted cluster. Statistics ---------- The RBAC filter outputs statistics in the *http..rbac.* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 43a60e990c3f..5bf42b4ac7da 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -5,10 +5,10 @@ Router The router filter implements HTTP forwarding. It will be used in almost all HTTP proxy scenarios that Envoy is deployed for. The filter's main job is to follow the instructions specified in the -configured :ref:`route table `. In addition to forwarding and +configured :ref:`route table `. In addition to forwarding and redirection, the filter also handles retry, statistics, etc. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.router*. .. _config_http_filters_router_headers_consumed: @@ -26,8 +26,8 @@ ingress/response path. They are documented in this section. x-envoy-max-retries ^^^^^^^^^^^^^^^^^^^ -If a :ref:`route config retry policy ` or a -:ref:`virtual host retry policy ` is in place, Envoy will default to retrying +If a :ref:`route config retry policy ` or a +:ref:`virtual host retry policy ` is in place, Envoy will default to retrying one time unless explicitly specified. The number of retries can be explicitly set in the virtual host retry config, the route retry config, or by using this header. If this header is used, its value takes precedence over the number of retries set in either retry policy. If a retry policy is not configured and :ref:`config_http_filters_router_x-envoy-retry-on` @@ -36,9 +36,9 @@ or :ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not speci A few notes on how Envoy does retries: * The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the - :ref:`timeout ` in route configuration or set via + :ref:`timeout ` in route configuration or set via `grpc-timeout header `_ by specifying - :ref:`max_grpc_timeout ` in route configuration) **includes** all + :ref:`max_grpc_timeout ` in route configuration) **includes** all retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the retry (including back-off) has .3s to complete. This is by design to avoid an exponential retry/timeout explosion. @@ -50,7 +50,7 @@ A few notes on how Envoy does retries: The default base interval (and therefore the maximum interval) can be manipulated by setting the upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified by configuring the retry policy's - :ref:`retry back-off `. + :ref:`retry back-off `. * If max retries is set both by header as well as in the route configuration, the maximum value is taken when determining the max retries to use for the request. @@ -62,7 +62,7 @@ x-envoy-retry-on Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries ` header or the :ref:`route config retry policy -` or the :ref:`virtual host retry policy `). +` or the :ref:`virtual host retry policy `). The value to which the x-envoy-retry-on header is set indicates the retry policy. One or more policies can be specified using a ',' delimited list. The supported policies are: @@ -91,8 +91,8 @@ connect-failure * **NOTE:** A connection failure/timeout is a the TCP level, not the request level. This does not include upstream request timeouts specified via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or via :ref:`route - configuration ` or via - :ref:`virtual host retry policy `. + configuration ` or via + :ref:`virtual host retry policy `. retriable-4xx Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code. @@ -109,22 +109,22 @@ refused-stream retriable-status-codes Envoy will attempt a retry if the upstream server responds with any response code matching one defined - in either :ref:`the retry policy ` + in either :ref:`the retry policy ` or in the :ref:`config_http_filters_router_x-envoy-retriable-status-codes` header. retriable-headers Envoy will attempt a retry if the upstream server response includes any headers matching in either - :ref:`the retry policy ` or in the + :ref:`the retry policy ` or in the :ref:`config_http_filters_router_x-envoy-retriable-header-names` header. The number of retries can be controlled via the :ref:`config_http_filters_router_x-envoy-max-retries` header or via the :ref:`route -configuration ` or via the -:ref:`virtual host retry policy `. +configuration ` or via the +:ref:`virtual host retry policy `. Note that retry policies can also be applied at the :ref:`route level -` or the -:ref:`virtual host level `. +` or the +:ref:`virtual host level `. By default, Envoy will *not* perform retries unless you've configured them per above. @@ -135,8 +135,8 @@ x-envoy-retry-grpc-on Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of retries defaults to 1, and can be controlled by :ref:`x-envoy-max-retries ` -header or the :ref:`route config retry policy `) or the -:ref:`virtual host retry policy `. +header or the :ref:`route config retry policy `) or the +:ref:`virtual host retry policy `. gRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in trailers will not trigger retry logic. One or more policies can be specified using a ',' delimited list. The supported policies are: @@ -160,8 +160,8 @@ As with the x-envoy-retry-grpc-on header, the number of retries can be controlle :ref:`config_http_filters_router_x-envoy-max-retries` header Note that retry policies can also be applied at the :ref:`route level -` or the -:ref:`virtual host level `. +` or the +:ref:`virtual host level `. By default, Envoy will *not* perform retries unless you've configured them per above. @@ -180,7 +180,7 @@ is enabled. Header names are case-insensitive. Only the names of retriable response headers can be specified via the request header. A more sophisticated retry policy based on the response headers can be specified by using arbitrary header matching rules -via :ref:`retry policy configuration `. +via :ref:`retry policy configuration `. This header will only be honored for requests from internal clients. @@ -207,7 +207,7 @@ Setting this header on egress requests will cause Envoy to emit upstream respons statistics to a dual stat tree. This can be useful for application level categories that Envoy doesn't know about. The output tree is documented :ref:`here `. -This should not be confused with :ref:`alt_stat_name ` which +This should not be confused with :ref:`alt_stat_name ` which is specified while defining the cluster and when provided specifies an alternative name for the cluster at the root of the statistic tree. @@ -232,9 +232,9 @@ x-envoy-upstream-rq-timeout-ms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting this header on egress requests will cause Envoy to override the :ref:`route configuration timeout -` or gRPC client timeout set via `grpc-timeout header +` or gRPC client timeout set via `grpc-timeout header `_ by specifying :ref:`max_grpc_timeout -`. The timeout must be specified in millisecond +`. The timeout must be specified in millisecond units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`. .. _config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms: @@ -255,7 +255,7 @@ x-envoy-hedge-on-per-try-timeout Setting this header on egress requests will cause Envoy to use a request hedging strategy in the case of a per try timeout. This overrides the value set in the :ref:`route configuration -`. This means that a retry +`. This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. @@ -311,13 +311,13 @@ x-envoy-attempt-count Sent to the upstream to indicate which attempt the current request is in a series of retries. The value will be "1" on the initial request, incrementing by one for each retry. Only set if the -:ref:`include_request_attempt_count ` +:ref:`include_request_attempt_count ` flag is set to true. Sent to the downstream to indicate how many upstream requests took place. The header will be absent if the router did not send any upstream requests. The value will be "1" if only the original upstream request was sent, incrementing by one for each retry. Only set if the -:ref:`include_attempt_count_in_response ` +:ref:`include_attempt_count_in_response ` flag is set to true. .. _config_http_filters_router_x-envoy-expected-rq-timeout-ms: @@ -329,7 +329,7 @@ This is the time in milliseconds the router expects the request to be completed. header so that the upstream host receiving the request can make decisions based on the request timeout, e.g., early exit. This is set on internal requests and is either taken from the :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout -`, in that order. +`, in that order. x-envoy-upstream-service-time ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -343,8 +343,8 @@ responses. x-envoy-original-path ^^^^^^^^^^^^^^^^^^^^^ -If the route utilizes :ref:`prefix_rewrite ` -or :ref:`regex_rewrite `, +If the route utilizes :ref:`prefix_rewrite ` +or :ref:`regex_rewrite `, Envoy will put the original path header in this header. This can be useful for logging and debugging. @@ -367,7 +367,7 @@ The router outputs many statistics in the cluster namespace (depending on the cl the chosen route). See :ref:`here ` for more information. The router filter outputs statistics in the *http..* namespace. The :ref:`stat prefix -` comes from the +` comes from the owning HTTP connection manager. .. csv-table:: @@ -398,7 +398,7 @@ statistics: upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" upstream_rq_retry, Counter, Total request retries upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets ` + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budgets ` upstream_rq_retry_success, Counter, Total request retry successes upstream_rq_time, Histogram, Request time milliseconds upstream_rq_timeout, Counter, Total requests that timed out waiting for a response diff --git a/docs/root/configuration/http/http_filters/squash_filter.rst b/docs/root/configuration/http/http_filters/squash_filter.rst index 006d7612417f..494f05f03b43 100644 --- a/docs/root/configuration/http/http_filters/squash_filter.rst +++ b/docs/root/configuration/http/http_filters/squash_filter.rst @@ -20,7 +20,7 @@ request, before the request arrive to the application code, without any changes Configuration ------------- -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.squash*. How it works diff --git a/docs/root/configuration/http/http_filters/tap_filter.rst b/docs/root/configuration/http/http_filters/tap_filter.rst index 5bcae0a77659..7db1c47c4bad 100644 --- a/docs/root/configuration/http/http_filters/tap_filter.rst +++ b/docs/root/configuration/http/http_filters/tap_filter.rst @@ -3,7 +3,7 @@ Tap === -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.tap*. .. attention:: @@ -15,9 +15,9 @@ Tap The HTTP tap filter is used to interpose on and record HTTP traffic. At a high level, the configuration is composed of two pieces: -1. :ref:`Match configuration `: a list of +1. :ref:`Match configuration `: a list of conditions under which the filter will match an HTTP request and begin a tap session. -2. :ref:`Output configuration `: a list of output +2. :ref:`Output configuration `: a list of output sinks that the filter will write the matched and tapped data to. Each of these concepts will be covered incrementally over the course of several example @@ -32,7 +32,7 @@ Example filter configuration: name: envoy.filters.http.tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: admin_config: config_id: test_config_id @@ -46,15 +46,15 @@ Admin handler ------------- When the HTTP filter specifies an :ref:`admin_config -`, it is configured for admin control and +`, it is configured for admin control and the :http:post:`/tap` admin handler will be installed. The admin handler can be used for live tapping and debugging of HTTP traffic. It works as follows: 1. A POST request is used to provide a valid tap configuration. The POST request body can be either the JSON or YAML representation of the :ref:`TapConfig - ` message. + ` message. 2. If the POST request is accepted, Envoy will stream :ref:`HttpBufferedTrace - ` messages (serialized to JSON) until the admin + ` messages (serialized to JSON) until the admin request is terminated. An example POST body: @@ -126,16 +126,16 @@ Output format ------------- Each output sink has an associated :ref:`format -`. The default format is +`. The default format is :ref:`JSON_BODY_AS_BYTES -`. This format is +`. This format is easy to read JSON, but has the downside that body data is base64 encoded. In the case that the tap is known to be on human readable data, the :ref:`JSON_BODY_AS_STRING -` format may be +` format may be more user friendly. See the reference documentation for more information on other available formats. An example of a streaming admin tap configuration that uses the :ref:`JSON_BODY_AS_STRING -` format: +` format: .. code-block:: yaml @@ -154,9 +154,9 @@ Buffered body limits For buffered taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations. The default limit is 1KiB for both received (request) and transmitted (response) data. This is configurable via the :ref:`max_buffered_rx_bytes -` and +` and :ref:`max_buffered_tx_bytes -` settings. +` settings. .. _config_http_filters_tap_streaming: @@ -169,18 +169,18 @@ first the request headers will be matched, then the request body if present, the trailers if present, then the response headers if present, etc. The filter additionally supports optional streamed output which is governed by the :ref:`streaming -` setting. If this setting is false +` setting. If this setting is false (the default), Envoy will emit :ref:`fully buffered traces -`. Users are likely to find this format easier +`. Users are likely to find this format easier to interact with for simple cases. In cases where fully buffered traces are not practical (e.g., very large request and responses, long lived streaming APIs, etc.), the streaming setting can be set to true, and Envoy will emit -multiple :ref:`streamed trace segments ` for +multiple :ref:`streamed trace segments ` for each tap. In this case, it is required that post-processing is performed to stitch all of the trace segments back together into a usable form. Also note that binary protobuf is not a self-delimiting format. If binary protobuf output is desired, the :ref:`PROTO_BINARY_LENGTH_DELIMITED -` output +` output format should be used. An static filter configuration to enable streaming output looks like: @@ -189,7 +189,7 @@ An static filter configuration to enable streaming output looks like: name: envoy.filters.http.tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: static_config: match_config: @@ -232,7 +232,7 @@ Statistics ---------- The tap filter outputs statistics in the *http..tap.* namespace. The :ref:`stat prefix -` +` comes from the owning HTTP connection manager. .. csv-table:: diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index 94511a1cb221..a54c9ab89b0f 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -9,7 +9,7 @@ depending on what is required. The semantics of listener updates are as follows: -* Every listener must have a unique :ref:`name `. If a name is not +* Every listener must have a unique :ref:`name `. If a name is not provided, Envoy will create a UUID. Listeners that are to be dynamically updated should have a unique name supplied by the management server. * When a listener is added, it will be "warmed" before taking traffic. For example, if the listener @@ -31,9 +31,9 @@ The semantics of listener updates are as follows: Configuration ------------- -* :ref:`v2 LDS API ` +* :ref:`v3 LDS API ` Statistics ---------- -LDS has a :ref:`statistics ` tree rooted at *listener_manager.lds.* \ No newline at end of file +LDS has a :ref:`statistics ` tree rooted at *listener_manager.lds.* diff --git a/docs/root/configuration/listeners/listener_filters/http_inspector.rst b/docs/root/configuration/listeners/listener_filters/http_inspector.rst index 7fc1b620414e..0c744531c914 100644 --- a/docs/root/configuration/listeners/listener_filters/http_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/http_inspector.rst @@ -5,10 +5,10 @@ HTTP Inspector HTTP Inspector listener filter allows detecting whether the application protocol appears to be HTTP, and if it is HTTP, it detects the HTTP protocol (HTTP/1.x or HTTP/2) further. This can be used to select a -:ref:`FilterChain ` via the :ref:`application_protocols ` -of a :ref:`FilterChainMatch `. +:ref:`FilterChain ` via the :ref:`application_protocols ` +of a :ref:`FilterChainMatch `. -* :ref:`Listener filter v2 API reference ` +* :ref:`Listener filter v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.http_inspector*. Example diff --git a/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst b/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst index fba1d5cb4817..5d764068a518 100644 --- a/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst +++ b/docs/root/configuration/listeners/listener_filters/original_dst_filter.rst @@ -5,11 +5,11 @@ Original Destination Original destination listener filter reads the SO_ORIGINAL_DST socket option set when a connection has been redirected by an iptables REDIRECT target, or by an iptables TPROXY target in combination -with setting the listener's :ref:`transparent ` option. +with setting the listener's :ref:`transparent ` option. Later processing in Envoy sees the restored destination address as the connection's local address, rather than the address at which the listener is listening at. Furthermore, :ref:`an original destination cluster ` may be used to forward HTTP requests or TCP connections to the restored destination address. -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.listener.original_dst*. diff --git a/docs/root/configuration/listeners/listener_filters/original_src_filter.rst b/docs/root/configuration/listeners/listener_filters/original_src_filter.rst index 72f98dd97e18..8aa4a679678d 100644 --- a/docs/root/configuration/listeners/listener_filters/original_src_filter.rst +++ b/docs/root/configuration/listeners/listener_filters/original_src_filter.rst @@ -3,7 +3,7 @@ Original Source =============== -* :ref:`Listener filter v2 API reference ` +* :ref:`Listener filter v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.original_src*. The original source listener filter replicates the downstream remote address of the connection on @@ -33,10 +33,10 @@ to forcefully route any traffic whose IP was replicated by Envoy back through th If Envoy and the upstream are on the same host -- e.g. in an sidecar deployment --, then iptables and routing rules can be used to ensure correct behaviour. The filter has an unsigned integer configuration, -:ref:`mark `. Setting +:ref:`mark `. Setting this to *X* causes Envoy to *mark* all upstream packets originating from this listener with value *X*. Note that if -:ref:`mark ` is set +:ref:`mark ` is set to 0, Envoy will not mark upstream packets. We can use the following set of commands to ensure that all ipv4 and ipv6 traffic marked with *X* @@ -72,7 +72,9 @@ marked with 123. port_value: 8888 listener_filters: - name: envoy.filters.listener.proxy_protocol + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol - name: envoy.filters.listener.original_src typed_config: - "@type": type.googleapis.com/envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc + "@type": type.googleapis.com/envoy.extensions.filters.listener.original_src.v3.OriginalSrc mark: 123 diff --git a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst index e679607debb7..257977b8627f 100644 --- a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst +++ b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst @@ -23,7 +23,7 @@ the standard does not allow parsing to determine if it is present or not. If there is a protocol error or an unsupported address family (e.g. AF_UNIX) the connection will be closed and an error thrown. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.listener.proxy_protocol*. Statistics diff --git a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst index 5aba1cea0b2c..2b7d30cd434c 100644 --- a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst @@ -9,13 +9,13 @@ TLS or plaintext, and if it is TLS, it detects the and/or `Application-Layer Protocol Negotiation `_ from the client. This can be used to select a -:ref:`FilterChain ` via the -:ref:`server_names ` and/or -:ref:`application_protocols ` -of a :ref:`FilterChainMatch `. +:ref:`FilterChain ` via the +:ref:`server_names ` and/or +:ref:`application_protocols ` +of a :ref:`FilterChainMatch `. * :ref:`SNI ` -* :ref:`v2 API reference ` +* :ref:`v2 API reference ` * This filter should be configured with the name *envoy.filters.listener.tls_inspector*. Example diff --git a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst index d42a235953af..c415e7b118c3 100644 --- a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst +++ b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst @@ -4,7 +4,7 @@ Client TLS authentication ========================= * Client TLS authentication filter :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.client_ssl_auth*. .. _config_network_filters_client_ssl_auth_stats: diff --git a/docs/root/configuration/listeners/network_filters/direct_response_filter.rst b/docs/root/configuration/listeners/network_filters/direct_response_filter.rst index de8ae26e3a8a..c8d4750123f4 100644 --- a/docs/root/configuration/listeners/network_filters/direct_response_filter.rst +++ b/docs/root/configuration/listeners/network_filters/direct_response_filter.rst @@ -9,4 +9,4 @@ can be used, for example, as a terminal filter in filter chains to collect telemetry for blocked traffic. This filter should be configured with the name *envoy.filters.network.direct_response*. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst index 28e730cd37e8..02ae9a74ac0f 100644 --- a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst @@ -9,7 +9,7 @@ the metadata includes the basic request ID, request type, serialization type, and the required service name, method name, parameter name, and parameter value for routing. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.dubbo_proxy*. .. _config_network_filters_dubbo_proxy_stats: @@ -60,7 +60,7 @@ the second step is to add your configuration, configuration method refer to the - filters: - name: envoy.filters.network.dubbo_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy stat_prefix: dubbo_incomming_stats protocol_type: Dubbo serialization_type: Hessian2 @@ -80,4 +80,4 @@ the second step is to add your configuration, configuration method refer to the "@type": type.googleapis.com/google.protobuf.Struct value: name: test_service - - name: envoy.filters.dubbo.router \ No newline at end of file + - name: envoy.filters.dubbo.router diff --git a/docs/root/configuration/listeners/network_filters/echo_filter.rst b/docs/root/configuration/listeners/network_filters/echo_filter.rst index ff1fdfa70166..7d9dc21e5fe7 100644 --- a/docs/root/configuration/listeners/network_filters/echo_filter.rst +++ b/docs/root/configuration/listeners/network_filters/echo_filter.rst @@ -7,4 +7,4 @@ The echo is a trivial network filter mainly meant to demonstrate the network fil installed it will echo (write) all received data back to the connected downstream client. This filter should be configured with the name *envoy.filters.network.echo*. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst index 65c25788a3c7..9df08d8f4c2b 100644 --- a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst @@ -4,7 +4,7 @@ External Authorization ====================== * External authorization :ref:`architecture overview ` -* :ref:`Network filter v2 API reference ` +* :ref:`Network filter v3 API reference ` * This filter should be configured with the name *envoy.filters.network.ext_authz*. The external authorization network filter calls an external authorization service to check if the @@ -16,12 +16,12 @@ then the connection will be closed. authorized prior to rest of the filters processing the request. The content of the request that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. _config_network_filters_ext_authz_network_configuration: The network filter, gRPC service, can be configured as follows. You can see all the configuration -options at :ref:`Network filter `. +options at :ref:`Network filter `. Example ------- @@ -33,7 +33,7 @@ A sample filter configuration could be: filters: - name: envoy.filters.network.ext_authz typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz + "@type": type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz stat_prefix: ext_authz grpc_service: envoy_grpc: diff --git a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst index 8f7ef37427fe..4753f3845a78 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst @@ -11,7 +11,7 @@ The filter attempts not to influence the communication between client and broker that could not be decoded (due to Kafka client or broker running a newer version than supported by this filter) are forwarded as-is. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.kafka_broker*. .. attention:: @@ -38,11 +38,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.kafka_broker typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker stat_prefix: exampleprefix - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: localkafka clusters: @@ -50,10 +50,15 @@ in the configuration snippet below: connect_timeout: 0.25s type: strict_dns lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 # Kafka broker's host. - port_value: 9092 # Kafka broker's port. + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 # Kafka broker's host + port_value: 9092 # Kafka broker's port. The Kafka broker needs to advertise the Envoy listener port instead of its own. diff --git a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst index 5939a63ae7d1..4ab02eb7145a 100644 --- a/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst +++ b/docs/root/configuration/listeners/network_filters/local_rate_limit_filter.rst @@ -4,8 +4,8 @@ Local rate limit ================ * Local rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference - ` +* :ref:`v3 API reference + ` * This filter should be configured with the name *envoy.filters.network.local_ratelimit*. .. note:: @@ -16,7 +16,7 @@ Overview -------- The local rate limit filter applies a :ref:`token bucket -` rate +` rate limit to incoming connections that are processed by the filter's filter chain. Each connection processed by the filter utilizes a single token, and if no tokens are available, the connection will be immediately closed without further filter iteration. @@ -42,5 +42,5 @@ Runtime ------- The local rate limit filter can be runtime feature flagged via the :ref:`enabled -` +` configuration field. diff --git a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst index 9aa734c69ec5..8c5a451ba4ce 100644 --- a/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/mongo_proxy_filter.rst @@ -4,7 +4,7 @@ Mongo proxy =========== * MongoDB :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.mongo_proxy*. .. _config_network_filters_mongo_proxy_fault_injection: @@ -12,7 +12,7 @@ Mongo proxy Fault injection --------------- -The Mongo proxy filter supports fault injection. See the v2 API reference for how to +The Mongo proxy filter supports fault injection. See the v3 API reference for how to configure. .. _config_network_filters_mongo_proxy_stats: @@ -181,7 +181,7 @@ Dynamic Metadata ---------------- The Mongo filter emits the following dynamic metadata when enabled via the -:ref:`configuration `. +:ref:`configuration `. This dynamic metadata is available as key-value pairs where the key represents the database and the collection being accessed, and the value is a list of operations performed on the collection. diff --git a/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst index 750e3b30e31e..24eb5cb12878 100644 --- a/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/mysql_proxy_filter.rst @@ -34,11 +34,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.mysql_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: ... @@ -96,11 +96,11 @@ _catalog_ table in the _productdb_ database. - filters: - name: envoy.filters.network.mysql_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql - name: envoy.filters.network.rbac typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.rbac.v2.RBAC + "@type": type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC stat_prefix: rbac rules: action: DENY @@ -120,6 +120,6 @@ _catalog_ table in the _productdb_ database. - any: true - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: mysql diff --git a/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst b/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst index 4196956ff00c..1a94053f0c5a 100644 --- a/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rate_limit_filter.rst @@ -4,7 +4,7 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.ratelimit*. .. note:: @@ -30,7 +30,7 @@ following statistics: cx_closed, Counter, Total connections closed due to an over limit response from the rate limit service active, Gauge, Total active requests to the rate limit service failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because - of :ref:`failure_mode_deny ` set to false." + of :ref:`failure_mode_deny ` set to false." Runtime ------- diff --git a/docs/root/configuration/listeners/network_filters/rbac_filter.rst b/docs/root/configuration/listeners/network_filters/rbac_filter.rst index 9d9821c41af5..d07417492045 100644 --- a/docs/root/configuration/listeners/network_filters/rbac_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rbac_filter.rst @@ -10,7 +10,7 @@ block-list (DENY) set of policies based on properties of the connection (IPs, po This filter also supports policy in both enforcement and shadow modes. Shadow mode won't effect real users, it is used to test that a new set of policies work before rolling out to production. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.rbac*. Statistics diff --git a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst index 666bd3bb1b02..3c3fb77f3861 100644 --- a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst @@ -4,7 +4,7 @@ Redis proxy =========== * Redis :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.redis_proxy*. .. _config_network_filters_redis_proxy_stats: @@ -49,7 +49,7 @@ Per command statistics The Redis filter will gather statistics for commands in the *redis..command..* namespace. By default latency stats are in milliseconds and can be -changed to microseconds by setting the configuration parameter :ref:`latency_in_micros ` to true. +changed to microseconds by setting the configuration parameter :ref:`latency_in_micros ` to true. .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst b/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst index 1ad5d26f946f..207b5932b7d5 100644 --- a/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst +++ b/docs/root/configuration/listeners/network_filters/sni_cluster_filter.rst @@ -11,4 +11,4 @@ with the name *envoy.filters.network.sni_cluster*. This filter has no configuration. It must be installed before the :ref:`tcp_proxy ` filter. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst index 4751b3a614e5..1e01ec592240 100644 --- a/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/sni_dynamic_forward_proxy_filter.rst @@ -48,14 +48,14 @@ SNI dynamic forward proxy. - filters: - name: envoy.filters.network.sni_dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.sni_dynamic_forward_proxy.v2alpha.FilterConfig + "@type": type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig port_value: 443 dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY - name: envoy.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: dynamic_forward_proxy_cluster clusters: @@ -65,7 +65,7 @@ SNI dynamic forward proxy. cluster_type: name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: dynamic_forward_proxy_cache_config dns_lookup_family: V4_ONLY diff --git a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst index e137dfc58ff6..1822e08715c9 100644 --- a/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/tcp_proxy_filter.rst @@ -4,7 +4,7 @@ TCP proxy ========= * TCP proxy :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.tcp_proxy*. .. _config_network_filters_tcp_proxy_dynamic_cluster: @@ -26,12 +26,12 @@ TCP proxy can be configured to route to a subset of hosts within an upstream clu To define metadata that a suitable upstream host must match, use one of the following fields: -#. Use :ref:`TcpProxy.metadata_match` +#. Use :ref:`TcpProxy.metadata_match` to define required metadata for a single upstream cluster. -#. Use :ref:`ClusterWeight.metadata_match` +#. Use :ref:`ClusterWeight.metadata_match` to define required metadata for a weighted upstream cluster. -#. Use combination of :ref:`TcpProxy.metadata_match` - and :ref:`ClusterWeight.metadata_match` +#. Use combination of :ref:`TcpProxy.metadata_match` + and :ref:`ClusterWeight.metadata_match` to define required metadata for a weighted upstream cluster (metadata from the latter will be merged on top of the former). .. _config_network_filters_tcp_proxy_stats: diff --git a/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst index ba7060160a1b..504f6f873752 100644 --- a/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/thrift_proxy_filter.rst @@ -3,23 +3,23 @@ Thrift proxy ============ -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.thrift_proxy*. Cluster Protocol Options ------------------------ Thrift connections to upstream hosts can be configured by adding an entry to the appropriate -Cluster's :ref:`extension_protocol_options` +Cluster's :ref:`extension_protocol_options` keyed by `envoy.filters.network.thrift_proxy`. The -:ref:`ThriftProtocolOptions` +:ref:`ThriftProtocolOptions` message describes the available options. Thrift Request Metadata ----------------------- -The :ref:`HEADER transport` -and :ref:`TWITTER protocol` +The :ref:`HEADER transport` +and :ref:`TWITTER protocol` support metadata. In particular, the `Header transport `_ supports informational key/value pairs and the Twitter protocol transmits @@ -29,13 +29,13 @@ Header Transport Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ Header transport key/value pairs are available for routing as -:ref:`headers `. +:ref:`headers `. Twitter Protocol Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ Twitter protocol request contexts are converted into headers which are available for routing as -:ref:`headers `. +:ref:`headers `. In addition, the following fields are presented as headers: Client Identifier diff --git a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst index 587ebc7f7730..426634fefde5 100644 --- a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst @@ -29,11 +29,11 @@ in the configuration snippet below: - filters: - name: envoy.filters.network.zookeeper_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy stat_prefix: zookeeper - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp cluster: ... diff --git a/docs/root/configuration/listeners/overview.rst b/docs/root/configuration/listeners/overview.rst index 8b4549b45648..06c19f698ae8 100644 --- a/docs/root/configuration/listeners/overview.rst +++ b/docs/root/configuration/listeners/overview.rst @@ -4,4 +4,4 @@ Overview The top level Envoy configuration contains a list of :ref:`listeners `. Each individual listener configuration has the following format: -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 2232eb687c17..90a768c908f6 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -32,7 +32,7 @@ Example Configuration .. code-block:: yaml listener_filters: - name: "envoy.filters.udp.dns_filter" + name: envoy.filters.udp.dns_filter typed_config: "@type": "type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig" stat_prefix: "dns_filter_prefix" diff --git a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst index e5a4bfdb245e..1929fc3c2a7e 100644 --- a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst +++ b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst @@ -7,7 +7,7 @@ UDP proxy UDP proxy support should be considered alpha and not production ready. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.udp_proxy* Overview @@ -22,7 +22,7 @@ Because UDP is not a connection oriented protocol, Envoy must keep track of a cl such that the response datagrams from an upstream server can be routed back to the correct client. Each session is index by the 4-tuple consisting of source IP/port and local IP/port that the datagram is received on. Sessions last until the :ref:`idle timeout -` is reached. +` is reached. Load balancing and unhealthy host handling ------------------------------------------ diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 46637c05ec4b..7ac3def7cdec 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -9,7 +9,7 @@ Configuration Access logs are configured as part of the :ref:`HTTP connection manager config ` or :ref:`TCP Proxy `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _config_access_log_format: @@ -257,7 +257,7 @@ The following command operators are supported: * **UF**: Upstream connection failure in addition to 503 response code. * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code, or no matching filter chain for a downstream connection. - * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) ` or :ref:`maximum connect attempts (TCP) ` was reached. + * **URX**: The request was rejected because the :ref:`upstream retry limit (HTTP) ` or :ref:`maximum connect attempts (TCP) ` was reached. HTTP only * **DC**: Downstream connection termination. * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. @@ -271,7 +271,7 @@ The following command operators are supported: * **UAEX**: The request was denied by the external authorization service. * **RLSE**: The request was rejected because there was an error in rate limit service. * **IH**: The request was rejected because it set an invalid value for a - :ref:`strictly-checked header ` in addition to 400 response code. + :ref:`strictly-checked header ` in addition to 400 response code. * **SI**: Stream idle timeout in addition to 408 response code. * **DPE**: The downstream request had an HTTP protocol error. @@ -306,7 +306,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% @@ -316,7 +316,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS% @@ -326,7 +326,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% @@ -336,7 +336,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`proxy proto ` or :ref:`x-forwarded-for `. %DOWNSTREAM_LOCAL_ADDRESS% @@ -383,7 +383,7 @@ The following command operators are supported: %DYNAMIC_METADATA(NAMESPACE:KEY*):Z% HTTP - :ref:`Dynamic Metadata ` info, + :ref:`Dynamic Metadata ` info, where NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional lookup up key in the namespace with the option of specifying nested keys separated by ':', and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index 9e99227518d3..e8f92643c029 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -20,10 +20,10 @@ Server related statistics are rooted at *server.* with following statistics: memory_heap_size, Gauge, Current reserved heap size in bytes. New Envoy process heap size on hot restart. memory_physical_size, Gauge, Current estimate of total bytes of the physical memory. New Envoy process physical memory size on hot restart. live, Gauge, "1 if the server is not currently draining, 0 otherwise" - state, Gauge, Current :ref:`State ` of the Server. + state, Gauge, Current :ref:`State ` of the Server. parent_connections, Gauge, Total connections of the old Envoy process on hot restart total_connections, Gauge, Total connections of both new and old Envoy processes - version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. + version, Gauge, Integer represented version number based on SCM revision or :ref:`stats_server_version_override ` if set. days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire hot_restart_epoch, Gauge, Current hot restart epoch -- an integer passed via command line flag `--restart-epoch` usually indicating generation. hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent. diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index c1eb8fc7a96a..8d28935536e2 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -4,7 +4,7 @@ Overload manager ================ The :ref:`overload manager ` is configured in the Bootstrap -:ref:`overload_manager ` +:ref:`overload_manager ` field. An example configuration of the overload manager is shown below. It shows a configuration to diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index f0160e778347..45b1f7263480 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -7,7 +7,7 @@ The :ref:`runtime configuration ` specifies a virtual fil contains re-loadable configuration elements. This virtual file system can be realized via a series of local file system, static bootstrap configuration, RTDS and admin console derived overlays. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _config_virtual_filesystem: @@ -20,7 +20,7 @@ Layering ++++++++ The runtime can be viewed as a virtual file system consisting of multiple layers. The :ref:`layered -runtime ` bootstrap configuration specifies this +runtime ` bootstrap configuration specifies this layering. Runtime settings in later layers override earlier layers. A typical configuration might be: @@ -38,7 +38,7 @@ be: - name: admin_layer_0 admin_layer: {} -In the deprecated :ref:`runtime ` bootstrap +In the deprecated :ref:`runtime ` bootstrap configuration, the layering was implicit and fixed: 1. :ref:`Static bootstrap configuration ` @@ -69,7 +69,7 @@ Static bootstrap ++++++++++++++++ A static base runtime may be specified in the :ref:`bootstrap configuration -` via a :ref:`protobuf JSON representation +` via a :ref:`protobuf JSON representation `. .. _config_runtime_local_disk: @@ -90,9 +90,9 @@ Overrides ~~~~~~~~~ An arbitrary number of disk file system layers can be overlaid in the :ref:`layered -runtime ` bootstrap configuration. +runtime ` bootstrap configuration. -In the deprecated :ref:`runtime ` bootstrap configuration, +In the deprecated :ref:`runtime ` bootstrap configuration, there was a distinguished file system override. Assume that the folder ``/srv/runtime/v1`` points to the actual file system path where global runtime configurations are stored. The following would be a typical configuration setting for runtime: @@ -108,7 +108,7 @@ Where ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``. Cluster-specific subdirectories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In the deprecated :ref:`runtime ` bootstrap configuration, +In the deprecated :ref:`runtime ` bootstrap configuration, the *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the *health_check.min_interval* key in the following full file system path: @@ -118,9 +118,9 @@ that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will firs If found, the value will override any value found in the primary lookup path. This allows the user to customize the runtime values for individual clusters on top of global defaults. -With the :ref:`layered runtime ` bootstrap +With the :ref:`layered runtime ` bootstrap configuration, it is possible to specialize on service cluster via the :ref:`append_service_cluster -` option at any +` option at any disk layer. .. _config_runtime_symbolic_link_swap: @@ -144,10 +144,10 @@ Runtime Discovery Service (RTDS) ++++++++++++++++++++++++++++++++ One or more runtime layers may be specified and delivered by specifying a :ref:`rtds_layer -`. This points the runtime layer at a +`. This points the runtime layer at a regular :ref:`xDS ` endpoint, subscribing to a single xDS resource for the given layer. The resource type for these layers is a :ref:`Runtime message -`. +`. .. _config_runtime_admin: @@ -167,7 +167,7 @@ built into the code, except for any values added via `/runtime_modify`. secured `. At most one admin layer may be specified. If a non-empty :ref:`layered runtime -` bootstrap configuration is specified with an +` bootstrap configuration is specified with an absent admin layer, any mutating admin console actions will elicit a 503 response. .. _config_runtime_atomicity: @@ -201,7 +201,7 @@ modeling a JSON object with the following rules: * Dot separators map to tree edges. * Scalar leaves (integer, strings, booleans, doubles) are represented with their respective JSON type. -* :ref:`FractionalPercent ` is represented with via its +* :ref:`FractionalPercent ` is represented with via its `canonical JSON encoding `_. An example representation of a setting for the *health_check.min_interval* key in YAML is: diff --git a/docs/root/configuration/operations/tools/router_check.rst b/docs/root/configuration/operations/tools/router_check.rst index 1752b084b55f..5ac7902eeaea 100644 --- a/docs/root/configuration/operations/tools/router_check.rst +++ b/docs/root/configuration/operations/tools/router_check.rst @@ -10,7 +10,7 @@ Route table check tool file. The following specifies input to the route table check tool. The route table check tool checks if -the route returned by a :ref:`router ` matches what is expected. +the route returned by a :ref:`router ` matches what is expected. The tool can be used to check cluster name, virtual cluster name, virtual host name, manual path rewrite, manual host rewrite, path redirect, and header field matches. Extensions for other test cases can be added. Details about installing the tool diff --git a/docs/root/configuration/other_features/rate_limit.rst b/docs/root/configuration/other_features/rate_limit.rst index a4c456257a2a..d3503a899878 100644 --- a/docs/root/configuration/other_features/rate_limit.rst +++ b/docs/root/configuration/other_features/rate_limit.rst @@ -7,12 +7,12 @@ The :ref:`rate limit service ` configuration sp limit service Envoy should talk to when it needs to make global rate limit decisions. If no rate limit service is configured, a "null" service will be used which will always return OK if called. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` gRPC service IDL ---------------- Envoy expects the rate limit service to support the gRPC IDL specified in -:ref:`rls.proto `. See the IDL documentation +:ref:`rls.proto `. See the IDL documentation for more information on how the API works. See Lyft's reference implementation `here `_. diff --git a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst index b51a7dd455b4..615a0b03da27 100644 --- a/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/dubbo_filters/router_filter.rst @@ -5,7 +5,7 @@ Router The router filter implements Dubbo forwarding. It will be used in almost all Dubbo proxying scenarios. The filter's main job is to follow the instructions specified in the configured -:ref:`route table `. +:ref:`route table `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.dubbo.router*. diff --git a/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst index 4fa27e08febd..366059c65a0d 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/rate_limit_filter.rst @@ -4,12 +4,12 @@ Rate limit ========== * Global rate limiting :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.thrift.rate_limit*. The Thrift rate limit filter will call the rate limit service when the request's route has one or more :ref:`rate limit configurations -` that +` that match the filter's stage setting. More than one configuration can apply to a request. Each configuration results in a descriptor being sent to the rate limit service. @@ -18,7 +18,7 @@ application exception indicating an internal error is returned. If there is an error in calling the rate limit service or it returns an error and :ref:`failure_mode_deny -` is set to +` is set to true, an application exception indicating an internal error is returned. .. _config_thrift_filters_rate_limit_stats: @@ -37,5 +37,5 @@ The filter outputs statistics in the *cluster..ratelimit.* over_limit, Counter, Total over limit responses from the rate limit service. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of :ref:`failure_mode_deny - ` set to + ` set to false." diff --git a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst index c9ced73dd5d3..22ce7bcbf137 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst @@ -5,9 +5,9 @@ Router The router filter implements Thrift forwarding. It will be used in almost all Thrift proxying scenarios. The filter's main job is to follow the instructions specified in the configured -:ref:`route table `. +:ref:`route table `. -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.thrift.router*. Statistics diff --git a/docs/root/configuration/overview/bootstrap.rst b/docs/root/configuration/overview/bootstrap.rst index 03019bfc86ae..ec75f71f7a4a 100644 --- a/docs/root/configuration/overview/bootstrap.rst +++ b/docs/root/configuration/overview/bootstrap.rst @@ -14,12 +14,12 @@ the :option:`-c` flag, i.e.: where the extension reflects the underlying config representation. -The :ref:`Bootstrap ` message is the root of the -configuration. A key concept in the :ref:`Bootstrap ` +The :ref:`Bootstrap ` message is the root of the +configuration. A key concept in the :ref:`Bootstrap ` message is the distinction between static and dynamic resources. Resources such -as a :ref:`Listener ` or :ref:`Cluster -` may be supplied either statically in -:ref:`static_resources ` or have +as a :ref:`Listener ` or :ref:`Cluster +` may be supplied either statically in +:ref:`static_resources ` or have an xDS service such as :ref:`LDS ` or :ref:`CDS ` configured in -:ref:`dynamic_resources `. +:ref:`dynamic_resources `. diff --git a/docs/root/configuration/overview/examples.rst b/docs/root/configuration/overview/examples.rst index f26345a3138e..a4758cb15104 100644 --- a/docs/root/configuration/overview/examples.rst +++ b/docs/root/configuration/overview/examples.rst @@ -25,7 +25,7 @@ A minimal fully static bootstrap config is provided below: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -58,7 +58,7 @@ Mostly static with dynamic EDS A bootstrap config that continues from the above example with :ref:`dynamic endpoint discovery ` via an -:ref:`EDS` gRPC management server listening +:ref:`EDS` gRPC management server listening on 127.0.0.1:5678 is provided below: .. code-block:: yaml @@ -77,7 +77,7 @@ on 127.0.0.1:5678 is provided below: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -125,18 +125,18 @@ Notice above that *xds_cluster* is defined to point Envoy at the management serv an otherwise completely dynamic configurations, some static resources need to be defined to point Envoy at its xDS management server(s). -It's important to set appropriate :ref:`TCP Keep-Alive options ` +It's important to set appropriate :ref:`TCP Keep-Alive options ` in the `tcp_keepalive` block. This will help detect TCP half open connections to the xDS management server and re-establish a full connection. In the above example, the EDS management server could then return a proto encoding of a -:ref:`DiscoveryResponse `: +:ref:`DiscoveryResponse `: .. code-block:: yaml version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + - "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: some_service endpoints: - lb_endpoints: @@ -207,7 +207,7 @@ The management server could respond to LDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.Listener + - "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: listener_0 address: socket_address: @@ -217,7 +217,7 @@ The management server could respond to LDS requests with: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO rds: @@ -237,7 +237,7 @@ The management server could respond to RDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + - "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: local_route virtual_hosts: - name: local_service @@ -252,7 +252,7 @@ The management server could respond to CDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.Cluster + - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: some_service connect_timeout: 0.25s lb_policy: ROUND_ROBIN @@ -271,7 +271,7 @@ The management server could respond to EDS requests with: version_info: "0" resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + - "@type": type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment cluster_name: some_service endpoints: - lb_endpoints: diff --git a/docs/root/configuration/overview/extension.rst b/docs/root/configuration/overview/extension.rst index e131a7515cc3..37f58b8ecad7 100644 --- a/docs/root/configuration/overview/extension.rst +++ b/docs/root/configuration/overview/extension.rst @@ -15,7 +15,7 @@ filter configuration snippet is permitted: name: front-http-proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO rds: @@ -29,7 +29,7 @@ filter configuration snippet is permitted: http_filters: - name: front-router typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.router.v2.Router + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router dynamic_stats: true In case the control plane lacks the schema definitions for an extension, @@ -43,7 +43,7 @@ follows: name: front-http-proxy typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + type_url: type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager value: stat_prefix: ingress_http codec_type: AUTO @@ -59,5 +59,5 @@ follows: - name: front-router typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: type.googleapis.com/envoy.config.filter.http.router.v2.Router + type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index 7661f9d513fc..febc770bb207 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -46,7 +46,7 @@ The following statistics are generated for all subscriptions. :widths: 1, 1, 2 config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts ` + init_fetch_timeout, Counter, Total :ref:`initial fetch timeouts ` update_attempt, Counter, Total API fetches attempted update_success, Counter, Total API fetches completed successfully update_failure, Counter, Total API fetches that failed because of network errors diff --git a/docs/root/configuration/overview/xds_api.rst b/docs/root/configuration/overview/xds_api.rst index dc28631d87c5..428575afda8f 100644 --- a/docs/root/configuration/overview/xds_api.rst +++ b/docs/root/configuration/overview/xds_api.rst @@ -5,8 +5,8 @@ xDS API endpoints An xDS management server will implement the below endpoints as required for gRPC and/or REST serving. In both streaming gRPC and -REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a -:ref:`DiscoveryResponse ` received following the +REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a +:ref:`DiscoveryResponse ` received following the :ref:`xDS protocol `. Below we describe endpoints for the v2 and v3 transport API versions. @@ -19,7 +19,7 @@ gRPC streaming endpoints .. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters .. http:post:: /envoy.service.cluster.v3.ClusterDiscoveryService/StreamClusters -See :repo:`cds.proto ` for the service definition. This is used by Envoy +See :repo:`cds.proto ` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -33,14 +33,14 @@ as a client when cluster_name: some_xds_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints .. http:post:: /envoy.service.endpoint.v3.EndpointDiscoveryService/StreamEndpoints See :repo:`eds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -54,14 +54,14 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. +` field of the :ref:`Cluster +` config. .. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners .. http:post:: /envoy.service.listener.v3.ListenerDiscoveryService/StreamListeners See :repo:`lds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -75,14 +75,14 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes .. http:post:: /envoy.service.route.v3.RouteDiscoveryService/StreamRoutes See :repo:`rds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -97,15 +97,15 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`rds -` field +` field of the :ref:`HttpConnectionManager -` config. +` config. .. http:post:: /envoy.api.v2.ScopedRoutesDiscoveryService/StreamScopedRoutes .. http:post:: /envoy.service.route.v3.ScopedRoutesDiscoveryService/StreamScopedRoutes See :repo:`srds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -121,15 +121,15 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_xds_cluster is set in the :ref:`scoped_routes -` +` field of the :ref:`HttpConnectionManager -` config. +` config. .. http:post:: /envoy.service.discovery.v2.SecretDiscoveryService/StreamSecrets .. http:post:: /envoy.service.secret.v3.SecretDiscoveryService/StreamSecrets See :repo:`sds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -143,14 +143,14 @@ for the service definition. This is used by Envoy as a client when envoy_grpc: cluster_name: some_xds_cluster -is set inside a :ref:`SdsSecretConfig ` message. This message -is used in various places such as the :ref:`CommonTlsContext `. +is set inside a :ref:`SdsSecretConfig ` message. This message +is used in various places such as the :ref:`CommonTlsContext `. .. http:post:: /envoy.service.discovery.v2.RuntimeDiscoveryService/StreamRuntime .. http:post:: /envoy.service.runtime.v3.RuntimeDiscoveryService/StreamRuntime See :repo:`rtds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -164,7 +164,7 @@ for the service definition. This is used by Envoy as a client when envoy_grpc: cluster_name: some_xds_cluster -is set inside the :ref:`rtds_layer ` +is set inside the :ref:`rtds_layer ` field. REST endpoints @@ -174,7 +174,7 @@ REST endpoints .. http:post:: /v3/discovery:clusters See :repo:`cds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -186,14 +186,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /v2/discovery:endpoints .. http:post:: /v3/discovery:endpoints See :repo:`eds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -205,14 +205,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. +` field of the :ref:`Cluster +` config. .. http:post:: /v2/discovery:listeners .. http:post:: /v3/discovery:listeners See :repo:`lds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -224,14 +224,14 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. .. http:post:: /v2/discovery:routes .. http:post:: /v3/discovery:routes See :repo:`rds.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -244,8 +244,8 @@ for the service definition. This is used by Envoy as a client when cluster_names: [some_xds_cluster] is set in the :ref:`rds -` field of the :ref:`HttpConnectionManager -` config. +` field of the :ref:`HttpConnectionManager +` config. .. note:: @@ -288,7 +288,7 @@ document. The gRPC endpoint is: .. http:post:: /envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources See :repo:`discovery.proto -` +` for the service definition. This is used by Envoy as a client when .. code-block:: yaml @@ -301,8 +301,8 @@ for the service definition. This is used by Envoy as a client when cluster_name: some_ads_cluster is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. +` of the :ref:`Bootstrap +` config. When this is set, any of the configuration sources :ref:`above ` can be set to use the ADS channel. For example, a LDS config could be changed from @@ -336,7 +336,7 @@ churn, these state-of-the-world updates can be cumbersome. As of 1.12.0, Envoy supports a "delta" variant of xDS (including ADS), where updates only contain resources added/changed/removed. Delta xDS is a gRPC (only) protocol. Delta uses different request/response protos than SotW (DeltaDiscovery{Request,Response}); see -:repo:`discovery.proto `. Conceptually, delta should be viewed as +:repo:`discovery.proto `. Conceptually, delta should be viewed as a new xDS transport type: there is static, filesystem, REST, gRPC-SotW, and now gRPC-delta. (Envoy's implementation of the gRPC-SotW/delta client happens to share most of its code between the two, and something similar is likely possible on the server side. However, they are in fact @@ -344,7 +344,7 @@ incompatible protocols. :ref:`The specification of the delta xDS protocol's behavior is here `.) To use delta, simply set the api_type field of your -:ref:`ApiConfigSource ` proto(s) to DELTA_GRPC. +:ref:`ApiConfigSource ` proto(s) to DELTA_GRPC. That works for both xDS and ADS; for ADS, it's the api_type field of -:ref:`DynamicResources.ads_config `, +:ref:`DynamicResources.ads_config `, as described in the previous section. diff --git a/docs/root/configuration/security/secret.rst b/docs/root/configuration/security/secret.rst index b1b3e1ec33fc..060fcb79b53f 100644 --- a/docs/root/configuration/security/secret.rst +++ b/docs/root/configuration/security/secret.rst @@ -4,7 +4,7 @@ Secret discovery service (SDS) ============================== TLS certificates, the secrets, can be specified in the bootstrap.static_resource -:ref:`secrets `. +:ref:`secrets `. But they can also be fetched remotely by secret discovery service (SDS). The most important benefit of SDS is to simplify the certificate management. Without this feature, in k8s deployment, certificates must be created as secrets and mounted into the proxy containers. If certificates are expired, the secrets need to be updated and the proxy containers need to be re-deployed. With SDS, a central SDS server will push certificates to all Envoy instances. If certificates are expired, the server just pushes new certificates to Envoy instances, Envoy will use the new ones right away without re-deployment. @@ -23,15 +23,15 @@ The connection between Envoy proxy and SDS server has to be secure. One option i SDS server ---------- -A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. +A SDS server needs to implement the gRPC service :repo:`SecretDiscoveryService `. It follows the same protocol as other :ref:`xDS `. SDS Configuration ----------------- -:ref:`SdsSecretConfig ` is used to specify the secret. Its field *name* is a required field. If its *sds_config* field is empty, the *name* field specifies the secret in the bootstrap static_resource :ref:`secrets `. Otherwise, it specifies the SDS server as :ref:`ConfigSource `. Only gRPC is supported for the SDS service so its *api_config_source* must specify a **grpc_service**. +:ref:`SdsSecretConfig ` is used to specify the secret. Its field *name* is a required field. If its *sds_config* field is empty, the *name* field specifies the secret in the bootstrap static_resource :ref:`secrets `. Otherwise, it specifies the SDS server as :ref:`ConfigSource `. Only gRPC is supported for the SDS service so its *api_config_source* must specify a **grpc_service**. -*SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext `. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate `. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext `. +*SdsSecretConfig* is used in two fields in :ref:`CommonTlsContext `. The first field is *tls_certificate_sds_secret_configs* to use SDS to get :ref:`TlsCertificate `. The second field is *validation_context_sds_secret_config* to use SDS to get :ref:`CertificateValidationContext `. Example one: static_resource ----------------------------- @@ -68,7 +68,7 @@ This example show how to configure secrets in the static_resource: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: client_cert @@ -78,7 +78,7 @@ This example show how to configure secrets in the static_resource: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: server_cert @@ -112,7 +112,7 @@ This example shows how to configure secrets fetched from remote SDS servers: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: - tls_certificate: certificate_chain: @@ -137,7 +137,7 @@ This example shows how to configure secrets fetched from remote SDS servers: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: client_cert @@ -153,7 +153,7 @@ This example shows how to configure secrets fetched from remote SDS servers: - transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificate_sds_secret_configs: - name: server_cert @@ -205,7 +205,7 @@ In contrast, :ref:`sds_server_example` requires a restart to reload xDS certific transport_socket: name: "envoy.transport_sockets.tls" typed_config: - "@type": "type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext" + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" common_tls_context: tls_certificate_sds_secret_configs: sds_config: @@ -219,7 +219,7 @@ Paths to client certificate, including client's certificate chain and private ke .. code-block:: yaml resources: - - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + - "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" tls_certificate: certificate_chain: filename: /certs/sds_cert.pem @@ -231,7 +231,7 @@ Path to CA certificate bundle for validating the xDS server certificate is given .. code-block:: yaml resources: - - "@type": "type.googleapis.com/envoy.api.v2.auth.Secret" + - "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" validation_context: trusted_ca: filename: /certs/cacert.pem diff --git a/docs/root/configuration/upstream/cluster_manager/cds.rst b/docs/root/configuration/upstream/cluster_manager/cds.rst index dcea74d79710..9d747f4c8349 100644 --- a/docs/root/configuration/upstream/cluster_manager/cds.rst +++ b/docs/root/configuration/upstream/cluster_manager/cds.rst @@ -12,7 +12,7 @@ clusters depending on what is required. Any clusters that are statically defined within the Envoy configuration cannot be modified or removed via the CDS API. -* :ref:`v2 CDS API ` +* :ref:`v3 CDS API ` Statistics ---------- diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst index 53a08ca497e5..9f765173658b 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst @@ -4,7 +4,7 @@ Circuit breaking ================ * Circuit Breaking :ref:`architecture overview `. -* :ref:`v2 API documentation `. +* :ref:`v3 API documentation `. The following is an example circuit breaker configuration: @@ -26,5 +26,5 @@ Runtime All circuit breaking settings are runtime configurable for all defined priorities based on cluster name. They follow the following naming scheme ``circuit_breakers...``. ``cluster_name`` is the name field in each cluster's configuration, which is set in the Envoy -:ref:`config file `. Available runtime settings will override +:ref:`config file `. Available runtime settings will override settings set in the Envoy config file. diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst b/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst index 683a89cad800..5c73695597f2 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_hc.rst @@ -6,7 +6,7 @@ Health checking * Health checking :ref:`architecture overview `. * If health checking is configured for a cluster, additional statistics are emitted. They are documented :ref:`here `. -* :ref:`v2 API documentation `. +* :ref:`v3 API documentation `. .. _config_cluster_manager_cluster_hc_tcp_health_checking: diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst index 34e0cb9058eb..ae138196d141 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst @@ -9,18 +9,18 @@ Active health checking ---------------------- health_check.min_interval - Min value for the health checking :ref:`interval `. + Min value for the health checking :ref:`interval `. Default value is 1 ms. The effective health check interval will be no less than 1ms. The health checking interval will be between *min_interval* and *max_interval*. health_check.max_interval - Max value for the health checking :ref:`interval `. + Max value for the health checking :ref:`interval `. Default value is MAX_INT. The effective health check interval will be no less than 1ms. The health checking interval will be between *min_interval* and *max_interval*. health_check.verify_cluster What % of health check requests will be verified against the :ref:`expected upstream service - ` as the :ref:`health check filter + ` as the :ref:`health check filter ` will write the remote service cluster into the response. .. _config_cluster_manager_cluster_runtime_outlier_detection: @@ -30,101 +30,101 @@ Outlier detection See the outlier detection :ref:`architecture overview ` for more information on outlier detection. The runtime parameters supported by outlier detection are the -same as the :ref:`static configuration parameters `, namely: +same as the :ref:`static configuration parameters `, namely: outlier_detection.consecutive_5xx :ref:`consecutive_5XX - ` + ` setting in outlier detection outlier_detection.consecutive_gateway_failure :ref:`consecutive_gateway_failure - ` + ` setting in outlier detection outlier_detection.consecutive_local_origin_failure :ref:`consecutive_local_origin_failure - ` + ` setting in outlier detection outlier_detection.interval_ms :ref:`interval_ms - ` + ` setting in outlier detection outlier_detection.base_ejection_time_ms :ref:`base_ejection_time_ms - ` + ` setting in outlier detection outlier_detection.max_ejection_percent :ref:`max_ejection_percent - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_5xx :ref:`enforcing_consecutive_5xx - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_gateway_failure :ref:`enforcing_consecutive_gateway_failure - ` + ` setting in outlier detection outlier_detection.enforcing_consecutive_local_origin_failure :ref:`enforcing_consecutive_local_origin_failure - ` + ` setting in outlier detection outlier_detection.enforcing_success_rate :ref:`enforcing_success_rate - ` + ` setting in outlier detection outlier_detection.enforcing_local_origin_success_rate :ref:`enforcing_local_origin_success_rate - ` + ` setting in outlier detection outlier_detection.success_rate_minimum_hosts :ref:`success_rate_minimum_hosts - ` + ` setting in outlier detection outlier_detection.success_rate_request_volume :ref:`success_rate_request_volume - ` + ` setting in outlier detection outlier_detection.success_rate_stdev_factor :ref:`success_rate_stdev_factor - ` + ` setting in outlier detection outlier_detection.enforcing_failure_percentage :ref:`enforcing_failure_percentage - ` + ` setting in outlier detection outlier_detection.enforcing_failure_percentage_local_origin :ref:`enforcing_failure_percentage_local_origin - ` + ` setting in outlier detection outlier_detection.failure_percentage_request_volume :ref:`failure_percentage_request_volume - ` + ` setting in outlier detection outlier_detection.failure_percentage_minimum_hosts :ref:`failure_percentage_minimum_hosts - ` + ` setting in outlier detection outlier_detection.failure_percentage_threshold :ref:`failure_percentage_threshold - ` + ` setting in outlier detection Core @@ -135,7 +135,7 @@ upstream.healthy_panic_threshold Defaults to 50%. upstream.use_http2 - Whether the cluster utilizes the *http2* :ref:`protocol options ` + Whether the cluster utilizes the *http2* :ref:`protocol options ` if configured. Set to 0 to disable HTTP/2 even if the feature is configured. Defaults to enabled. .. _config_cluster_manager_cluster_runtime_zone_routing: @@ -155,19 +155,19 @@ Circuit breaking ---------------- circuit_breakers...max_connections - :ref:`Max connections circuit breaker setting ` + :ref:`Max connections circuit breaker setting ` circuit_breakers...max_pending_requests - :ref:`Max pending requests circuit breaker setting ` + :ref:`Max pending requests circuit breaker setting ` circuit_breakers...max_requests - :ref:`Max requests circuit breaker setting ` + :ref:`Max requests circuit breaker setting ` circuit_breakers...max_retries - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` circuit_breakers...retry_budget.budget_percent - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` circuit_breakers...retry_budget.min_retry_concurrency - :ref:`Max retries circuit breaker setting ` + :ref:`Max retries circuit breaker setting ` diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index e58a1d32c90c..61f506f29453 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -75,7 +75,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_retry, Counter, Total request retries upstream_rq_retry_limit_exceeded, Counter, Total requests not retried due to exceeding :ref:`the configured number of maximum retries ` upstream_rq_retry_success, Counter, Total request retry successes - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget ` + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking or exceeding the :ref:`retry budget ` upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream @@ -134,16 +134,16 @@ statistics will be rooted at *cluster..outlier_detection.* and contain the ejections_overflow, Counter, Number of ejections aborted due to the max ejection % ejections_enforced_consecutive_5xx, Counter, Number of enforced consecutive 5xx ejections ejections_detected_consecutive_5xx, Counter, Number of detected consecutive 5xx ejections (even if unenforced) - ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. - ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. ejections_enforced_consecutive_gateway_failure, Counter, Number of enforced consecutive gateway failure ejections ejections_detected_consecutive_gateway_failure, Counter, Number of detected consecutive gateway failure ejections (even if unenforced) ejections_enforced_consecutive_local_origin_failure, Counter, Number of enforced consecutive local origin failure ejections ejections_detected_consecutive_local_origin_failure, Counter, Number of detected consecutive local origin failure ejections (even if unenforced) ejections_enforced_local_origin_success_rate, Counter, Number of enforced success rate outlier ejections for locally originated failures ejections_detected_local_origin_success_rate, Counter, Number of detected success rate outlier ejections for locally originated failures (even if unenforced) - ejections_enforced_failure_percentage, Counter, Number of enforced failure percentage outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. - ejections_detected_failure_percentage, Counter, Number of detected failure percentage outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_enforced_failure_percentage, Counter, Number of enforced failure percentage outlier ejections. Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. + ejections_detected_failure_percentage, Counter, Number of detected failure percentage outlier ejections (even if unenforced). Exact meaning of this counter depends on :ref:`outlier_detection.split_external_local_origin_errors` config item. Refer to :ref:`Outlier Detection documentation` for details. ejections_enforced_failure_percentage_local_origin, Counter, Number of enforced failure percentage outlier ejections for locally originated failures ejections_detected_failure_percentage_local_origin, Counter, Number of detected failure percentage outlier ejections for locally originated failures (even if unenforced) ejections_total, Counter, Deprecated. Number of ejections due to any outlier type (even if unenforced) @@ -175,7 +175,7 @@ Circuit breakers statistics will be rooted at *cluster..circuit_breakers.< Timeout budget statistics ------------------------- -If :ref:`timeout budget statistic tracking ` is +If :ref:`timeout budget statistic tracking ` is turned on, statistics will be added to *cluster.* and contain the following: .. csv-table:: diff --git a/docs/root/configuration/upstream/cluster_manager/overview.rst b/docs/root/configuration/upstream/cluster_manager/overview.rst index d54fc5806c15..899c16b27bb8 100644 --- a/docs/root/configuration/upstream/cluster_manager/overview.rst +++ b/docs/root/configuration/upstream/cluster_manager/overview.rst @@ -2,4 +2,4 @@ Overview ======== * Cluster manager :ref:`architecture overview ` -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/configuration/upstream/health_checkers/redis.rst b/docs/root/configuration/upstream/health_checkers/redis.rst index 6fb7112327b6..03ad07741e67 100644 --- a/docs/root/configuration/upstream/health_checkers/redis.rst +++ b/docs/root/configuration/upstream/health_checkers/redis.rst @@ -8,10 +8,10 @@ which checks Redis upstream hosts. It sends a Redis PING command and expect a PO Redis server can respond with anything other than PONG to cause an immediate active health check failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a passing health check. This allows the user to mark a Redis instance for maintenance by setting the -specified :ref:`key ` to any value and waiting +specified :ref:`key ` to any value and waiting for traffic to drain. -An example setting for :ref:`custom_health_check ` as a +An example setting for :ref:`custom_health_check ` as a Redis health checker is shown below: .. code-block:: yaml @@ -19,7 +19,7 @@ Redis health checker is shown below: custom_health_check: name: envoy.health_checkers.redis typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy key: foo -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` diff --git a/docs/root/faq/configuration/flow_control.rst b/docs/root/faq/configuration/flow_control.rst index 7d3f9e775fed..6f7b90163d11 100644 --- a/docs/root/faq/configuration/flow_control.rst +++ b/docs/root/faq/configuration/flow_control.rst @@ -10,9 +10,9 @@ response body must be buffered and exceeds the limit, Envoy will increment the (if headers have already been sent downstream) or send a 500 response. There are three knobs for configuring Envoy flow control: -:ref:`listener limits `, -:ref:`cluster limits ` and -:ref:`http2 stream limits ` +:ref:`listener limits `, +:ref:`cluster limits ` and +:ref:`http2 stream limits ` The listener limits apply to how much raw data will be read per read() call from downstream, as well as how much data may be buffered in userspace between Envoy @@ -22,7 +22,7 @@ The listener limits are also propogated to the HttpConnectionManager, and applie basis to HTTP/1.1 L7 buffers described below. As such they limit the size of HTTP/1 requests and response bodies that can be buffered. For HTTP/2, as many streams can be multiplexed over one TCP connection, the L7 and L4 buffer limits can be tuned separately, and the configuration option -:ref:`http2 stream limits ` +:ref:`http2 stream limits ` is applied to all of the L7 buffers. Note that for both HTTP/1 and HTTP/2 Envoy can and will proxy arbitrarily large bodies on routes where all L7 filters are streaming, but many filters such as the transcoder or buffer filters require the full HTTP body to @@ -33,7 +33,7 @@ well as how much data may be buffered in userspace between Envoy and upstream. The following code block shows how to adjust all three fields mentioned above, though generally the only one which needs to be amended is the listener -:ref:`per_connection_buffer_limit_bytes ` +:ref:`per_connection_buffer_limit_bytes ` .. code-block:: yaml @@ -48,7 +48,7 @@ the only one which needs to be amended is the listener filters: name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager http2_protocol_options: initial_stream_window_size: 65535 route_config: {} @@ -60,7 +60,12 @@ the only one which needs to be amended is the listener name: cluster_0 connect_timeout: 5s per_connection_buffer_limit_bytes: 1024 - hosts: - socket_address: - address: '::1' - port_value: 46685 + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ::1 + port_value: 46685 diff --git a/docs/root/faq/configuration/sni.rst b/docs/root/faq/configuration/sni.rst index 1e8da3f9c6d5..7ef61ef565e3 100644 --- a/docs/root/faq/configuration/sni.rst +++ b/docs/root/faq/configuration/sni.rst @@ -3,7 +3,7 @@ How do I configure SNI for listeners? ===================================== -`SNI `_ is only supported in the :ref:`v2 +`SNI `_ is only supported in the :ref:`v3 configuration/API `. .. attention:: @@ -26,7 +26,7 @@ The following is a YAML example of the above requirement. transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { filename: "example_com_cert.pem" } @@ -34,7 +34,7 @@ The following is a YAML example of the above requirement. filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: @@ -48,7 +48,7 @@ The following is a YAML example of the above requirement. transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { filename: "api_example_com_cert.pem" } @@ -56,7 +56,7 @@ The following is a YAML example of the above requirement. filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: virtual_hosts: @@ -70,9 +70,9 @@ The following is a YAML example of the above requirement. How do I configure SNI for clusters? ==================================== -For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. +For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. To derive SNI from HTTP `host` or `:authority` header, turn on -:ref:`auto_sni ` to override the fixed SNI in +:ref:`auto_sni ` to override the fixed SNI in `UpstreamTlsContext`. If upstream will present certificates with the hostname in SAN, turn on -:ref:`auto_san_validation ` too. +:ref:`auto_san_validation ` too. It still needs a trust CA in validation context in `UpstreamTlsContext` for trust anchor. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 4cdca3a57167..4cf5aa16aa29 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -19,16 +19,16 @@ Connection timeouts Connection timeouts apply to the entire HTTP connection and all streams the connection carries. -* The HTTP protocol :ref:`idle timeout ` +* The HTTP protocol :ref:`idle timeout ` is defined in a generic message used by both the HTTP connection manager as well as upstream cluster HTTP connections. The idle timeout is the time at which a downstream or upstream connection will be terminated if there are no active streams. The default idle timeout if not otherwise specified is *1 hour*. To modify the idle timeout for downstream connections use the :ref:`common_http_protocol_options - ` + ` field in the HTTP connection manager configuration. To modify the idle timeout for upstream connections use the - :ref:`common_http_protocol_options ` field + :ref:`common_http_protocol_options ` field in the cluster configuration. Stream timeouts @@ -39,7 +39,7 @@ an HTTP/2 and HTTP/3 concept, however internally Envoy maps HTTP/1 requests to s context request/stream is interchangeable. * The HTTP connection manager :ref:`request_timeout - ` + ` is the amount of time the connection manager will allow for the *entire request stream* to be received from the client. @@ -49,15 +49,15 @@ context request/stream is interchangeable. (requests that never end). See the stream idle timeout that follows. However, if using the :ref:`buffer filter `, it is recommended to configure this timeout. * The HTTP connection manager :ref:`stream_idle_timeout - ` + ` is the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity. The default stream idle timeout is *5 minutes*. This timeout is strongly recommended for streaming APIs (requests or responses that never end). -* The HTTP protocol :ref:`max_stream_duration ` +* The HTTP protocol :ref:`max_stream_duration ` is defined in a generic message used by the HTTP connection manager. The max stream duration is the maximum time that a stream's lifetime will span. You can use this functionality when you want to reset HTTP request/response streams periodically. You can't use :ref:`request_timeout - ` + ` in this situation because this timer will be disarmed if a response header is received on the request/response streams. .. attention:: @@ -70,7 +70,7 @@ Route timeouts Envoy supports additional stream timeouts at the route level, as well as overriding some of the stream timeouts already introduced above. -* A route :ref:`timeout ` is the amount of time that +* A route :ref:`timeout ` is the amount of time that Envoy will wait for the upstream to respond with a complete response. *This timeout does not start until the entire downstream request stream has been received*. @@ -79,11 +79,11 @@ stream timeouts already introduced above. This timeout defaults to *15 seconds*, however, it is not compatible with streaming responses (responses that never end), and will need to be disabled. Stream idle timeouts should be used in the case of streaming APIs as described elsewhere on this page. -* The route :ref:`idle_timeout ` allows overriding +* The route :ref:`idle_timeout ` allows overriding of the HTTP connection manager :ref:`stream_idle_timeout - ` + ` and does the same thing. -* The route :ref:`per_try_timeout ` can be +* The route :ref:`per_try_timeout ` can be configured when using retries so that individual tries using a shorter timeout than the overall request timeout described above. This timeout only applies before any part of the response is sent to the downstream, which normally happens after the upstream has sent response headers. @@ -93,7 +93,7 @@ stream timeouts already introduced above. TCP --- -* The cluster :ref:`connect_timeout ` specifies the amount +* The cluster :ref:`connect_timeout ` specifies the amount of time Envoy will wait for an upstream TCP connection to be established. This timeout has no default, but is required in the configuration. @@ -101,6 +101,6 @@ TCP For TLS connections, the connect timeout includes the TLS handshake. * The TCP proxy :ref:`idle_timeout - ` + ` is the amount of time that the TCP proxy will allow a connection to exist with no upstream or downstream activity. The default idle timeout if not otherwise specified is *1 hour*. diff --git a/docs/root/faq/configuration/zone_aware_routing.rst b/docs/root/faq/configuration/zone_aware_routing.rst index 65c7967e0142..78b502a02ea1 100644 --- a/docs/root/faq/configuration/zone_aware_routing.rst +++ b/docs/root/faq/configuration/zone_aware_routing.rst @@ -12,8 +12,8 @@ This section describes the specific configuration for the Envoy running side by These are the requirements: * Envoy must be launched with :option:`--service-zone` option which defines the zone for the current host. -* Both definitions of the source and the destination clusters must have :ref:`EDS ` type. -* :ref:`local_cluster_name ` must be set to the +* Both definitions of the source and the destination clusters must have :ref:`EDS ` type. +* :ref:`local_cluster_name ` must be set to the source cluster. Only essential parts are listed in the configuration below for the cluster manager. @@ -35,7 +35,7 @@ Envoy configuration on the destination service ---------------------------------------------- It's not necessary to run Envoy side by side with the destination service, but it's important that each host in the destination cluster registers with the discovery service :ref:`queried by the source service Envoy -`. :ref:`Zone ` +`. :ref:`Zone ` information must be available as part of that response. Only zone related data is listed in the response below. diff --git a/docs/root/faq/load_balancing/disable_circuit_breaking.rst b/docs/root/faq/load_balancing/disable_circuit_breaking.rst index 764de5158f71..00182f8d8407 100644 --- a/docs/root/faq/load_balancing/disable_circuit_breaking.rst +++ b/docs/root/faq/load_balancing/disable_circuit_breaking.rst @@ -1,7 +1,7 @@ Is there a way to disable circuit breaking? =========================================== -Envoy comes with :ref:`certain defaults ` +Envoy comes with :ref:`certain defaults ` for each kind of circuit breaking. Currently, there isn't a switch to turn circuit breaking off completely; however, you could achieve a similar behavior by setting these thresholds very high, for example, to `std::numeric_limits::max()`. diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/install/tools/schema_validator_check_tool.rst index 067ebabc128f..a3e1a7c7bc41 100644 --- a/docs/root/install/tools/schema_validator_check_tool.rst +++ b/docs/root/install/tools/schema_validator_check_tool.rst @@ -13,8 +13,8 @@ Input 1. The schema type to check the passed in configuration against. The supported types are: - * `route` - for :ref:`route configuration` validation. - * `discovery_response` for :ref:`discovery response` validation. + * `route` - for :ref:`route configuration` validation. + * `discovery_response` for :ref:`discovery response` validation. 2. The path to the configuration file. diff --git a/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst b/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst index 3ef220bb89a5..326d9e1bc9bc 100644 --- a/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst +++ b/docs/root/intro/arch_overview/advanced/data_sharing_between_filters.rst @@ -17,7 +17,7 @@ Metadata -------- Several parts of Envoy configuration (e.g. listeners, routes, clusters) -contain a :ref:`metadata ` where arbitrary +contain a :ref:`metadata ` where arbitrary key-value pairs can be encoded. The typical pattern is to use the filter names in reverse DNS format as the key and encode filter specific configuration metadata in the value. This metadata is immutable and shared @@ -31,7 +31,7 @@ weighted cluster to select appropriate endpoints in a cluster Typed Metadata -------------- -:ref:`Metadata ` as such is untyped. Before +:ref:`Metadata ` as such is untyped. Before acting on the metadata, callers typically convert it to a typed class object. The cost of conversion becomes non-negligible when performed repeatedly (e.g., for each request stream or connection). Typed Metadata @@ -57,8 +57,8 @@ is specified as part of the configuration. A `FilterState::Object` implements HTTP Per-Route Filter Configuration ----------------------------------- -In HTTP routes, :ref:`per_filter_config -` allows HTTP filters +In HTTP routes, :ref:`typed_per_filter_config +` allows HTTP filters to have virtualhost/route-specific configuration in addition to a global filter config common to all virtual hosts. This configuration is converted and embedded into the route table. It is up to the HTTP filter @@ -66,9 +66,9 @@ implementation to treat the route-specific filter config as a replacement to global config or an enhancement. For example, the HTTP fault filter uses this technique to provide per-route fault configuration. -`per_filter_config` is a `map`. The Connection +`typed_per_filter_config` is a `map`. The Connection manager iterates over this map and invokes the filter factory interface -`createRouteSpecificFilterConfig` to parse/validate the struct value and +`createRouteSpecificFilterConfigTyped` to parse/validate the struct value and convert it into a typed class object that’s stored with the route itself. HTTP filters can then query the route-specific filter config during request processing. diff --git a/docs/root/intro/arch_overview/http/http_connection_management.rst b/docs/root/intro/arch_overview/http/http_connection_management.rst index c2b2c76e3e33..b69c587b17d4 100644 --- a/docs/root/intro/arch_overview/http/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http/http_connection_management.rst @@ -51,7 +51,7 @@ Retry plugin configuration Normally during retries, host selection follows the same process as the original request. Retry plugins can be used to modify this behavior, and they fall into two categories: -* :ref:`Host Predicates `: +* :ref:`Host Predicates `: These predicates can be used to "reject" a host, which will cause host selection to be reattempted. Any number of these predicates can be specified, and the host will be rejected if any of the predicates reject the host. @@ -62,12 +62,12 @@ can be used to modify this behavior, and they fall into two categories: * *envoy.retry_host_predicates.omit_canary_hosts*: This will reject any host that is a marked as canary host. Hosts are marked by setting ``canary: true`` for the ``envoy.lb`` filter in the endpoint's filter metadata. - See :ref:`LbEndpoint ` for more details. + See :ref:`LbEndpoint ` for more details. * *envoy.retry_host_predicates.omit_host_metadata*: This will reject any host based on predefined metadata match criteria. See the configuration example below for more details. -* :ref:`Priority Predicates`: These predicates can +* :ref:`Priority Predicates`: These predicates can be used to adjust the priority load used when selecting a priority for a retry attempt. Only one such predicate may be specified. @@ -77,7 +77,7 @@ can be used to modify this behavior, and they fall into two categories: and adjust the priority load such that other priorities will be targeted in subsequent retry attempts. Host selection will continue until either the configured predicates accept the host or a configurable -:ref:`max attempts ` has been reached. +:ref:`max attempts ` has been reached. These plugins can be combined to affect both host selection and priority load. Envoy can also be extended with custom retry plugins similar to how custom filters can be added. @@ -108,7 +108,7 @@ To reject a host based on its metadata, ``envoy.retry_host_predicates.omit_host_ retry_host_predicate: - name: envoy.retry_host_predicates.omit_host_metadata typed_config: - "@type": type.googleapis.com/envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig + "@type": type.googleapis.com/envoy.extensions.retry.host.omit_host_metadata.v3.OmitHostMetadataConfig metadata_match: filter_metadata: envoy.lb: @@ -125,7 +125,7 @@ To configure retries to attempt other priorities during retries, the built-in retry_priority: name: envoy.retry_priorities.previous_priorities typed_config: - "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + "@type": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig update_frequency: 2 This will target priorities in subsequent retry attempts that haven't been already used. The ``update_frequency`` parameter decides how @@ -143,7 +143,7 @@ previously attempted priorities. retry_priority: name: envoy.retry_priorities.previous_priorities typed_config: - "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + "@type": type.googleapis.com/envoy.extensions.retry.priority.previous_priorities.v3.PreviousPrioritiesConfig update_frequency: 2 .. _arch_overview_internal_redirects: @@ -156,8 +156,8 @@ synthesizing a new request, sending it to the upstream specified by the new rout returning the redirected response as the response to the original request. Internal redirects are configured via the ref:`internal redirect action -` field and -`max internal redirects ` field in +` field and +`max internal redirects ` field in route configuration. When redirect handling is on, any 302 response from upstream is subject to the redirect being handled by Envoy. @@ -168,7 +168,7 @@ For a redirect to be handled successfully it must pass the following checks: 3. The request must have been fully processed by Envoy. 4. The request must not have a body. 5. The number of previously handled internal redirect within a given downstream request does not exceed - `max internal redirects ` of the route + `max internal redirects ` of the route that the request or redirected request is hitting. Any failure will result in redirect being passed downstream instead. @@ -177,7 +177,7 @@ Since a redirected request may be bounced between different routes, any route in 1. does not have internal redirect enabled 2. or has a `max internal redirects - ` + ` smaller or equal to the redirect chain length when the redirect chain hits it will cause the redirect to be passed downstream. diff --git a/docs/root/intro/arch_overview/http/http_proxy.rst b/docs/root/intro/arch_overview/http/http_proxy.rst index 2ed691203abb..50dc9ce2ec01 100644 --- a/docs/root/intro/arch_overview/http/http_proxy.rst +++ b/docs/root/intro/arch_overview/http/http_proxy.rst @@ -8,7 +8,7 @@ HTTP dynamic forward proxy HTTP dynamic forward proxy support should be considered alpha and not production ready. Through the combination of both an :ref:`HTTP filter ` and -:ref:`custom cluster `, +:ref:`custom cluster `, Envoy supports HTTP dynamic forward proxy. This means that Envoy can perform the role of an HTTP proxy without prior knowledge of all configured DNS addresses, while still retaining the vast majority of Envoy's benefits including asynchronous DNS resolution. The implementation works as @@ -49,15 +49,15 @@ Memory usage detail's for Envoy's dynamic forward proxy support are as follows: * Hosts removed via TTL are purged once all active connections stop referring to them and all used memory is regained. * The :ref:`max_hosts - ` field can + ` field can be used to limit the number of hosts that the DNS cache will store at any given time. * The cluster's :ref:`max_pending_requests - ` circuit breaker can + ` circuit breaker can be used to limit the number of requests that are pending waiting for the DNS cache to load a host. * Long lived upstream connections can have the underlying logical host expire via TTL while the connection is still open. Upstream requests and connections are still bound by other cluster circuit breakers such as :ref:`max_requests - `. The current assumption is that + `. The current assumption is that host data shared between connections uses a marginal amount of memory compared to the connections and requests themselves, making it not worth controlling independently. diff --git a/docs/root/intro/arch_overview/http/http_routing.rst b/docs/root/intro/arch_overview/http/http_routing.rst index d71ae677a348..ff24b0fd512e 100644 --- a/docs/root/intro/arch_overview/http/http_routing.rst +++ b/docs/root/intro/arch_overview/http/http_routing.rst @@ -15,41 +15,41 @@ request. The router filter supports the following features: * Virtual hosts that map domains/authorities to a set of routing rules. * Prefix and exact path matching rules (both :ref:`case sensitive - ` and case insensitive). Regex/slug + ` and case insensitive). Regex/slug matching is not currently supported, mainly because it makes it difficult/impossible to programmatically determine whether routing rules conflict with each other. For this reason we don’t recommend regex/slug routing at the reverse proxy level, however we may add support in the future depending on demand. -* :ref:`TLS redirection ` at the virtual host +* :ref:`TLS redirection ` at the virtual host level. -* :ref:`Path `/:ref:`host - ` redirection at the route level. +* :ref:`Path `/:ref:`host + ` redirection at the route level. * :ref:`Direct (non-proxied) HTTP responses ` at the route level. -* :ref:`Explicit host rewriting `. -* :ref:`Automatic host rewriting ` based on +* :ref:`Explicit host rewriting `. +* :ref:`Automatic host rewriting ` based on the DNS name of the selected upstream host. -* :ref:`Prefix rewriting `. -* :ref:`Path rewriting using a regular expression and capture groups `. +* :ref:`Prefix rewriting `. +* :ref:`Path rewriting using a regular expression and capture groups `. * :ref:`Request retries ` specified either via HTTP header or via route configuration. * Request timeout specified either via :ref:`HTTP header ` or via :ref:`route configuration - `. + `. * :ref:`Request hedging ` for retries in response to a request (per try) timeout. * Traffic shifting from one upstream cluster to another via :ref:`runtime values - ` (see :ref:`traffic shifting/splitting + ` (see :ref:`traffic shifting/splitting `). * Traffic splitting across multiple upstream clusters using :ref:`weight/percentage-based routing - ` (see :ref:`traffic shifting/splitting + ` (see :ref:`traffic shifting/splitting `). -* Arbitrary header matching :ref:`routing rules `. +* Arbitrary header matching :ref:`routing rules `. * Virtual cluster specifications. A virtual cluster is specified at the virtual host level and is used by Envoy to generate additional statistics on top of the standard cluster level ones. Virtual clusters can use regex matching. * :ref:`Priority ` based routing. -* :ref:`Hash policy ` based routing. -* :ref:`Absolute urls ` are supported for non-tls forward proxies. +* :ref:`Hash policy ` based routing. +* :ref:`Absolute urls ` are supported for non-tls forward proxies. .. _arch_overview_http_routing_route_scope: @@ -60,8 +60,8 @@ Scoped routing enables Envoy to put constraints on search space of domains and r A :ref:`Route Scope` associates a key with a :ref:`route table `. For each request, a scope key is computed dynamically by the HTTP connection manager to pick the :ref:`route table`. -The Scoped RDS (SRDS) API contains a set of :ref:`Scopes ` resources, each defining independent routing configuration, -along with a :ref:`ScopeKeyBuilder ` +The Scoped RDS (SRDS) API contains a set of :ref:`Scopes ` resources, each defining independent routing configuration, +along with a :ref:`ScopeKeyBuilder ` defining the key construction algorithm used by Envoy to look up the scope corresponding to each request. For example, for the following scoped route configuration, Envoy will look into the "addr" header value, split the header value by ";" first, and use the first value for key 'x-foo-key' as the scope key. @@ -80,8 +80,8 @@ If the "addr" header value is "foo=1;x-foo-key=127.0.0.1;x-bar-key=1.1.1.1", the .. _arch_overview_http_routing_route_table: -For a key to match a :ref:`ScopedRouteConfiguration`, the number of fragments in the computed key has to match that of -the :ref:`ScopedRouteConfiguration`. +For a key to match a :ref:`ScopedRouteConfiguration`, the number of fragments in the computed key has to match that of +the :ref:`ScopedRouteConfiguration`. Then fragments are matched in order. A missing fragment(treated as NULL) in the built key makes the request unable to match any scope, i.e. no route entry can be found for the request. @@ -89,7 +89,7 @@ Route table ----------- The :ref:`configuration ` for the HTTP connection manager owns the :ref:`route -table ` that is used by all configured HTTP filters. Although the +table ` that is used by all configured HTTP filters. Although the router filter is the primary consumer of the route table, other filters also have access in case they want to make decisions based on the ultimate destination of the request. For example, the built in rate limit filter consults the route table to determine whether the global rate limit service @@ -103,7 +103,7 @@ Retry semantics --------------- Envoy allows retries to be configured both in the :ref:`route configuration -` as well as for specific requests via :ref:`request +` as well as for specific requests via :ref:`request headers `. The following configurations are possible: * **Maximum number of retries**: Envoy will continue to retry any number of times. An exponential @@ -112,14 +112,14 @@ headers `. The following configurat * **Retry conditions**: Envoy can retry on different types of conditions depending on application requirements. For example, network failure, all 5xx response codes, idempotent 4xx response codes, etc. -* **Retry budgets**: Envoy can limit the proportion of active requests via :ref:`retry budgets ` that can be retries to +* **Retry budgets**: Envoy can limit the proportion of active requests via :ref:`retry budgets ` that can be retries to prevent their contribution to large increases in traffic volume. * **Host selection retry plugins**: Envoy can be configured to apply additional logic to the host selection logic when selecting hosts for retries. Specifying a - :ref:`retry host predicate ` + :ref:`retry host predicate ` allows for reattempting host selection when certain hosts are selected (e.g. when an already attempted host is selected), while a - :ref:`retry priority ` can be + :ref:`retry priority ` can be configured to adjust the priority load used when selecting a priority for retries. Note that Envoy retries requests when :ref:`x-envoy-overloaded @@ -133,7 +133,7 @@ Request Hedging --------------- Envoy supports request hedging which can be enabled by specifying a :ref:`hedge -policy `. This means that Envoy will race +policy `. This means that Envoy will race multiple simultaneous upstream requests and return the response associated with the first acceptable response headers to the downstream. The retry policy is used to determine whether a response should be returned or whether more @@ -153,7 +153,7 @@ response, creating two retriable events. Priority routing ---------------- -Envoy supports priority routing at the :ref:`route ` level. +Envoy supports priority routing at the :ref:`route ` level. The current priority implementation uses different :ref:`connection pool ` and :ref:`circuit breaking ` settings for each priority level. This means that even for HTTP/2 requests, two physical connections will be used to @@ -172,9 +172,9 @@ that do not require proxying to an upstream server. There are two ways to specify a direct response in a Route: -* Set the :ref:`direct_response ` field. +* Set the :ref:`direct_response ` field. This works for all HTTP response statuses. -* Set the :ref:`redirect ` field. This works for +* Set the :ref:`redirect ` field. This works for redirect response statuses only, but it simplifies the setting of the *Location* header. A direct response has an HTTP status code and an optional body. The Route configuration diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index 6f3b88728273..5d0620d44bb6 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -6,17 +6,17 @@ HTTP upgrades Envoy Upgrade support is intended mainly for WebSocket and CONNECT support, but may be used for arbitrary upgrades as well. Upgrades pass both the HTTP headers and the upgrade payload through an HTTP filter chain. One may configure the -:ref:`upgrade_configs ` +:ref:`upgrade_configs ` with or without custom filter chains. If only the -:ref:`upgrade_type ` +:ref:`upgrade_type ` is specified, both the upgrade headers, any request and response body, and HTTP data payload will pass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload, one can set up custom -:ref:`filters ` +:ref:`filters ` for the given upgrade type, up to and including only using the router filter to send the HTTP data upstream. -Upgrades can be enabled or disabled on a :ref:`per-route ` basis. +Upgrades can be enabled or disabled on a :ref:`per-route ` basis. Any per-route enabling/disabling automatically overrides HttpConnectionManager configuration as laid out below, but custom filter chains can only be configured on a per-HttpConnectionManager basis. @@ -49,7 +49,7 @@ In this case, if a client is for example using WebSocket, we want the Websocket upstream server functionally intact, which means it needs to traverse the HTTP/2 hop. This is accomplished via `extended CONNECT `_ support, -turned on by setting :ref:`allow_connect ` +turned on by setting :ref:`allow_connect ` true at the second layer Envoy. The WebSocket request will be transformed into an HTTP/2 CONNECT stream, with :protocol header indicating the original upgrade, traverse the HTTP/2 hop, and be downgraded back into an HTTP/1 diff --git a/docs/root/intro/arch_overview/intro/threading_model.rst b/docs/root/intro/arch_overview/intro/threading_model.rst index 110660c80c73..7b86791a4a4a 100644 --- a/docs/root/intro/arch_overview/intro/threading_model.rst +++ b/docs/root/intro/arch_overview/intro/threading_model.rst @@ -22,5 +22,5 @@ balancing incoming connections. However, for some workloads, particularly those number of very long lived connections (e.g., service mesh HTTP2/gRPC egress), it may be desirable to have Envoy forcibly balance connections between worker threads. To support this behavior, Envoy allows for different types of :ref:`connection balancing -` to be configured on each :ref:`listener +` to be configured on each :ref:`listener `. diff --git a/docs/root/intro/arch_overview/listeners/listeners.rst b/docs/root/intro/arch_overview/listeners/listeners.rst index a6e201fdb116..8802a1dcb776 100644 --- a/docs/root/intro/arch_overview/listeners/listeners.rst +++ b/docs/root/intro/arch_overview/listeners/listeners.rst @@ -12,8 +12,8 @@ TCP --- Each listener is independently configured with some number :ref:`filter chains -`, where an individual chain is selected based on its -:ref:`match criteria `. An individual filter chain is +`, where an individual chain is selected based on its +:ref:`match criteria `. An individual filter chain is composed of one or more network level (L3/L4) :ref:`filters `. When a new connection is received on a listener, the appropriate filter chain is selected, and the configured connection local filter stack is instantiated and begins processing subsequent events. diff --git a/docs/root/intro/arch_overview/listeners/tcp_proxy.rst b/docs/root/intro/arch_overview/listeners/tcp_proxy.rst index a8f5a9686f29..cceb6abe8064 100644 --- a/docs/root/intro/arch_overview/listeners/tcp_proxy.rst +++ b/docs/root/intro/arch_overview/listeners/tcp_proxy.rst @@ -10,7 +10,7 @@ such as the :ref:`MongoDB filter ` or the :ref:`rate limit ` filter. The TCP proxy filter will respect the -:ref:`connection limits ` +:ref:`connection limits ` imposed by each upstream cluster's global resource manager. The TCP proxy filter checks with the upstream cluster's resource manager if it can create a connection without going over that cluster's maximum number of connections, if it can't the TCP proxy will not make the connection. diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index fa41bd0d377b..46ce106d89f4 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -12,7 +12,7 @@ features: to different access logs. Downstream connection access logging can be enabled using :ref:`listener access -logs`. The listener access logs complement +logs`. The listener access logs complement HTTP request access logging and can be enabled separately and independently from filter access logs. @@ -22,8 +22,8 @@ Access log filters ------------------ Envoy supports several built-in -:ref:`access log filters` and -:ref:`extension filters` +:ref:`access log filters` and +:ref:`extension filters` that are registered at runtime. Access logging sinks @@ -48,6 +48,6 @@ Further reading --------------- * Access log :ref:`configuration `. -* File :ref:`access log sink `. -* gRPC :ref:`Access Log Service (ALS) ` +* File :ref:`access log sink `. +* gRPC :ref:`Access Log Service (ALS) ` sink. diff --git a/docs/root/intro/arch_overview/observability/statistics.rst b/docs/root/intro/arch_overview/observability/statistics.rst index 8cab36f9c98f..1ff0d8957811 100644 --- a/docs/root/intro/arch_overview/observability/statistics.rst +++ b/docs/root/intro/arch_overview/observability/statistics.rst @@ -19,12 +19,12 @@ mesh give a very detailed picture of each hop and overall network health. The st documented in detail in the operations guide. As of the v2 API, Envoy has the ability to support custom, pluggable sinks. :ref:`A -few standard sink implementations` are included in Envoy. +few standard sink implementations` are included in Envoy. Some sinks also support emitting statistics with tags/dimensions. Within Envoy and throughout the documentation, statistics are identified by a canonical string representation. The dynamic portions of these strings are stripped to become tags. Users can -configure this behavior via :ref:`the Tag Specifier configuration `. +configure this behavior via :ref:`the Tag Specifier configuration `. Envoy emits three types of values as statistics: @@ -37,4 +37,4 @@ Internally, counters and gauges are batched and periodically flushed to improve Histograms are written as they are received. Note: what were previously referred to as timers have become histograms as the only difference between the two representations was the units. -* :ref:`v2 API reference `. +* :ref:`v3 API reference `. diff --git a/docs/root/intro/arch_overview/observability/tracing.rst b/docs/root/intro/arch_overview/observability/tracing.rst index 74657ff4ca99..26f057468d76 100644 --- a/docs/root/intro/arch_overview/observability/tracing.rst +++ b/docs/root/intro/arch_overview/observability/tracing.rst @@ -11,7 +11,7 @@ sources of latency. Envoy supports three features related to system wide tracing * **Request ID generation**: Envoy will generate UUIDs when needed and populate the :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the - x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager` basis using an extension. + x-request-id header for unified logging as well as tracing. The behavior can be configured on per :ref:`HTTP connection manager` basis using an extension. * **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can be used to join untrusted request IDs to the trusted internal :ref:`config_http_conn_man_headers_x-request-id`. @@ -28,7 +28,7 @@ Support for other tracing providers would not be difficult to add. How to initiate a trace ----------------------- The HTTP connection manager that handles the request must have the :ref:`tracing -` object set. There are several ways tracing can be +` object set. There are several ways tracing can be initiated: * By an external client via the :ref:`config_http_conn_man_headers_x-client-trace-id` @@ -39,7 +39,7 @@ initiated: runtime setting. The router filter is also capable of creating a child span for egress calls via the -:ref:`start_child_span ` option. +:ref:`start_child_span ` option. Trace context propagation ------------------------- @@ -95,7 +95,7 @@ associated with it. Each span generated by Envoy contains the following data: header. * HTTP request URL, method, protocol and user-agent. * Additional custom tags set via :ref:`custom_tags - `. + `. * Upstream cluster name and address. * HTTP response status code. * GRPC response status and message (if available). @@ -103,7 +103,7 @@ associated with it. Each span generated by Envoy contains the following data: * Tracing system-specific metadata. The span also includes a name (or operation) which by default is defined as the host of the invoked -service. However this can be customized using a :ref:`envoy_api_msg_route.Decorator` on +service. However this can be customized using a :ref:`envoy_v3_api_msg_config.route.v3.Decorator` on the route. The name can also be overridden using the :ref:`config_http_filters_router_x-envoy-decorator-operation` header. @@ -111,5 +111,5 @@ Envoy automatically sends spans to tracing collectors. Depending on the tracing multiple spans are stitched together using common information such as the globally unique request ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or the trace ID configuration (Zipkin and Datadog). See -:ref:`v2 API reference ` +:ref:`v3 API reference ` for more information on how to setup tracing in Envoy. diff --git a/docs/root/intro/arch_overview/operations/draining.rst b/docs/root/intro/arch_overview/operations/draining.rst index eeb203a21935..0a0932e57a92 100644 --- a/docs/root/intro/arch_overview/operations/draining.rst +++ b/docs/root/intro/arch_overview/operations/draining.rst @@ -14,7 +14,7 @@ various events. Draining occurs at the following times: `. Each :ref:`configured listener ` has a :ref:`drain_type -` setting which controls when draining takes place. The currently +` setting which controls when draining takes place. The currently supported values are: default diff --git a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst index e2c0a00ea14d..458a4589d008 100644 --- a/docs/root/intro/arch_overview/operations/dynamic_configuration.rst +++ b/docs/root/intro/arch_overview/operations/dynamic_configuration.rst @@ -13,14 +13,14 @@ overview of the options currently available. * Top level configuration :ref:`reference `. * :ref:`Reference configurations `. -* Envoy :ref:`v2 API overview `. +* Envoy :ref:`v3 API overview `. * :ref:`xDS API endpoints `. Fully static ------------ In a fully static configuration, the implementor provides a set of :ref:`listeners -` (and :ref:`filter chains `), :ref:`clusters +` (and :ref:`filter chains `), :ref:`clusters `, etc. Dynamic host discovery is only possible via DNS based :ref:`service discovery `. Configuration reloads must take place via the built in :ref:`hot restart ` mechanism. @@ -50,7 +50,7 @@ and remove clusters as specified by the API. This API allows implementors to bui which Envoy does not need to be aware of all upstream clusters at initial configuration time. Typically, when doing HTTP routing along with CDS (but without route discovery service), implementors will make use of the router's ability to forward requests to a cluster specified in an -:ref:`HTTP request header `. +:ref:`HTTP request header `. Although it is possible to use CDS without EDS by specifying fully static clusters, we recommend still using the EDS API for clusters specified via CDS. Internally, when a cluster definition is diff --git a/docs/root/intro/arch_overview/operations/init.rst b/docs/root/intro/arch_overview/operations/init.rst index 4ce245d78f51..51effe697b3a 100644 --- a/docs/root/intro/arch_overview/operations/init.rst +++ b/docs/root/intro/arch_overview/operations/init.rst @@ -11,16 +11,16 @@ accepting new connections. multi-phase initialization where it first initializes static/DNS clusters, then predefined :ref:`EDS ` clusters. Then it initializes :ref:`CDS ` if applicable, waits for one response (or failure) - for a :ref:`bounded period of time `, + for a :ref:`bounded period of time `, and does the same primary/secondary initialization of CDS provided clusters. * If clusters use :ref:`active health checking `, Envoy also does a single active health check round. * Once cluster manager initialization is done, :ref:`RDS ` and :ref:`LDS ` initialize (if applicable). The server waits - for a :ref:`bounded period of time ` + for a :ref:`bounded period of time ` for at least one response (or failure) for LDS/RDS requests. After which, it starts accepting connections. * If LDS itself returns a listener that needs an RDS response, Envoy further waits for - a :ref:`bounded period of time ` until an RDS + a :ref:`bounded period of time ` until an RDS response (or failure) is received. Note that this process takes place on every future listener addition via LDS and is known as :ref:`listener warming `. * After all of the previous steps have taken place, the listeners start accepting new connections. @@ -28,6 +28,6 @@ accepting new connections. processing new connections before the draining of the old process begins. A key design principle of initialization is that an Envoy is always guaranteed to initialize within -:ref:`initial_fetch_timeout `, +:ref:`initial_fetch_timeout `, with a best effort made to obtain the complete set of xDS configuration within that subject to the management server availability. diff --git a/docs/root/intro/arch_overview/other_protocols/grpc.rst b/docs/root/intro/arch_overview/other_protocols/grpc.rst index 87ae32984c88..01275183b155 100644 --- a/docs/root/intro/arch_overview/other_protocols/grpc.rst +++ b/docs/root/intro/arch_overview/other_protocols/grpc.rst @@ -47,8 +47,8 @@ control plane, where it :ref:`fetches configuration from management server(s) *gRPC services*. When specifying gRPC services, it's necessary to specify the use of either the -:ref:`Envoy gRPC client ` or the -:ref:`Google C++ gRPC client `. We +:ref:`Envoy gRPC client ` or the +:ref:`Google C++ gRPC client `. We discuss the tradeoffs in this choice below. The Envoy gRPC client is a minimal custom implementation of gRPC that makes use diff --git a/docs/root/intro/arch_overview/other_protocols/redis.rst b/docs/root/intro/arch_overview/other_protocols/redis.rst index d95fc35db179..10c85d05a287 100644 --- a/docs/root/intro/arch_overview/other_protocols/redis.rst +++ b/docs/root/intro/arch_overview/other_protocols/redis.rst @@ -27,7 +27,7 @@ The Redis project offers a thorough reference on partitioning as it relates to R * Prefix routing. * Separate downstream client and upstream server authentication. * Request mirroring for all requests or write requests only. -* Control :ref:`read requests routing`. This only works with Redis Cluster. +* Control :ref:`read requests routing`. This only works with Redis Cluster. **Planned future enhancements**: @@ -47,11 +47,11 @@ For filter configuration details, see the Redis proxy filter :ref:`configuration reference `. The corresponding cluster definition should be configured with -:ref:`ring hash load balancing `. +:ref:`ring hash load balancing `. If :ref:`active health checking ` is desired, the cluster should be configured with a :ref:`custom health check -` which configured as a +` which configured as a :ref:`Redis health checker `. If passive healthchecking is desired, also configure @@ -82,7 +82,7 @@ following information: * Nodes entering or leaving the cluster. For topology configuration details, see the Redis Cluster -:ref:`v2 API reference `. +:ref:`v2 API reference `. Every Redis cluster has its own extra statistics tree rooted at *cluster..redis_cluster.* with the following statistics: @@ -96,7 +96,7 @@ Every Redis cluster has its own extra statistics tree rooted at *cluster.. .. _arch_overview_redis_cluster_command_stats: -Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `: +Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `: .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/intro/arch_overview/security/ext_authz_filter.rst b/docs/root/intro/arch_overview/security/ext_authz_filter.rst index aaa8b2a3610c..b6935e96412d 100644 --- a/docs/root/intro/arch_overview/security/ext_authz_filter.rst +++ b/docs/root/intro/arch_overview/security/ext_authz_filter.rst @@ -21,8 +21,8 @@ The external authorization service cluster may be either statically configured o the :ref:`Cluster Discovery Service `. If the external service is not available when a request comes in then whether the request is authorized or not is defined by the configuration setting of *failure_mode_allow* configuration in the applicable -:ref:`network filter ` or -:ref:`HTTP filter `. If it is set to +:ref:`network filter ` or +:ref:`HTTP filter `. If it is set to true then the request will be permitted (fail open) otherwise it will be denied. The default setting is *false*. @@ -32,7 +32,7 @@ Service Definition The context of the traffic is passed on to an external authorization service using the service definition listed here. The content of the request that are passed to an authorization service is specified by -:ref:`CheckRequest `. +:ref:`CheckRequest `. .. toctree:: :glob: diff --git a/docs/root/intro/arch_overview/security/rbac_filter.rst b/docs/root/intro/arch_overview/security/rbac_filter.rst index f5c22ebfef81..abccdbee6dd5 100644 --- a/docs/root/intro/arch_overview/security/rbac_filter.rst +++ b/docs/root/intro/arch_overview/security/rbac_filter.rst @@ -19,9 +19,9 @@ Policy ------ The RBAC filter checks the request based on a list of -:ref:`policies `. A policy consists of a list of -:ref:`permissions ` and -:ref:`principals `. The permission specifies the actions of +:ref:`policies `. A policy consists of a list of +:ref:`permissions ` and +:ref:`principals `. The permission specifies the actions of the request, for example, the method and path of a HTTP request. The principal specifies the downstream client identities of the request, for example, the URI SAN of the downstream client certificate. A policy is matched if its permissions and principals are matched at the same time. @@ -30,7 +30,7 @@ Shadow Policy ------------- The filter can be configured with a -:ref:`shadow policy ` that doesn't +:ref:`shadow policy ` that doesn't have any effect (i.e. not deny the request) but only emit stats and log the result. This is useful for testing a rule before applying in production. diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index ebcb2e8f6838..eee933e3b8e8 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -3,8 +3,8 @@ TLS === -Envoy supports both :ref:`TLS termination ` in listeners as well as -:ref:`TLS origination ` when making connections to upstream +Envoy supports both :ref:`TLS termination ` in listeners as well as +:ref:`TLS origination ` when making connections to upstream clusters. Support is sufficient for Envoy to perform standard edge proxy duties for modern web services as well as to initiate connections with external services that have advanced TLS requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: @@ -15,7 +15,7 @@ requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: * **Certificate verification and pinning**: Certificate verification options include basic chain verification, subject name verification, and hash pinning. * **Certificate revocation**: Envoy can check peer certificates against a certificate revocation list - (CRL) if one is :ref:`provided `. + (CRL) if one is :ref:`provided `. * **ALPN**: TLS listeners support ALPN. The HTTP connection manager uses this information (in addition to protocol inference) to determine whether a client is speaking HTTP/1.1 or HTTP/2. * **SNI**: SNI is supported for both server (listener) and client (upstream) connections. @@ -83,7 +83,7 @@ Example configuration transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -105,19 +105,20 @@ Example configuration transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificates: certificate_chain: { "filename": "/cert.crt" } private_key: { "filename": "/cert.key" } validation_context: - verify_subject_alt_name: [ foo ] + match_subject_alt_names: + exact: "foo" trusted_ca: filename: /etc/ssl/certs/ca-certificates.crt */etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. -:ref:`trusted_ca ` along with -:ref:`verify_subject_alt_name ` +:ref:`trusted_ca ` along with +:ref:`match_subject_alt_names ` makes Envoy verify the server identity of *127.0.0.2:1234* as "foo" in the same way as e.g. cURL does on standard Debian installations. Common paths for system CA bundles on Linux and BSD are: @@ -128,12 +129,12 @@ does on standard Debian installations. Common paths for system CA bundles on Lin * /usr/local/etc/ssl/cert.pem (FreeBSD) * /etc/ssl/cert.pem (OpenBSD) -See the reference for :ref:`UpstreamTlsContexts ` and -:ref:`DownstreamTlsContexts ` for other TLS options. +See the reference for :ref:`UpstreamTlsContexts ` and +:ref:`DownstreamTlsContexts ` for other TLS options. .. attention:: - If only :ref:`trusted_ca ` is + If only :ref:`trusted_ca ` is specified, Envoy will verify the certificate chain of the presented certificate, but not its subject name, hash, etc. Other validation context configuration is typically required depending on the deployment. @@ -143,22 +144,22 @@ See the reference for :ref:`UpstreamTlsContexts ` support multiple TLS +:ref:`DownstreamTlsContexts ` support multiple TLS certificates. These may be a mix of RSA and P-256 ECDSA certificates. The following rules apply: * Only one certificate of a particular type (RSA or ECDSA) may be specified. * Non-P-256 server ECDSA certificates are rejected. * If the client supports P-256 ECDSA, a P-256 ECDSA certificate will be selected if present in the - :ref:`DownstreamTlsContext `. + :ref:`DownstreamTlsContext `. * If the client only supports RSA certificates, a RSA certificate will be selected if present in the - :ref:`DownstreamTlsContext `. + :ref:`DownstreamTlsContext `. * Otherwise, the first certificate listed is used. This will result in a failed handshake if the client only supports RSA certificates and the server only has ECDSA certificates. * Static and SDS certificates may not be mixed in a given :ref:`DownstreamTlsContext - `. + `. Only a single TLS certificate is supported today for :ref:`UpstreamTlsContexts -`. +`. Secret discovery service (SDS) ------------------------------ @@ -188,7 +189,7 @@ Trouble shooting When Envoy originates TLS when making connections to upstream clusters, any errors will be logged into :ref:`UPSTREAM_TRANSPORT_FAILURE_REASON` field or -:ref:`AccessLogCommon.upstream_transport_failure_reason` field. +:ref:`AccessLogCommon.upstream_transport_failure_reason` field. Common errors are: * ``Secret is not supplied by SDS``: Envoy is still waiting SDS to deliver key/cert or root CA. diff --git a/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst index 96216c565145..e51ea4a1a829 100644 --- a/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst +++ b/docs/root/intro/arch_overview/upstream/aggregate_cluster.rst @@ -7,8 +7,8 @@ Aggregate cluster is used for failover between clusters with different configura upstream cluster to STRICT_DNS upstream cluster, from cluster using ROUND_ROBIN load balancing policy to cluster using MAGLEV, from cluster with 0.1s connection timeout to cluster with 1s connection timeout, etc. Aggregate cluster loosely couples multiple clusters by referencing their -name in the :ref:`configuration `. The -fallback priority is defined implicitly by the ordering in the :ref:`clusters list `. +name in the :ref:`configuration `. The +fallback priority is defined implicitly by the ordering in the :ref:`clusters list `. Aggregate cluster uses tiered load balancing. The load balancer chooses cluster and priority first and then delegates the load balancing to the load balancer of the selected cluster. The top level load balancer reuses the existing load balancing algorithm by linearizing the priority set of @@ -54,14 +54,14 @@ A sample aggregate cluster configuration could be: cluster_type: name: envoy.clusters.aggregate typed_config: - "@type": type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig clusters: # cluster primary, secondary and tertiary should be defined outside. - primary - secondary - tertiary -Note: :ref:`PriorityLoad retry plugins ` won't +Note: :ref:`PriorityLoad retry plugins ` won't work for aggregate cluster because the aggregate load balancer will override the *PriorityLoad* during load balancing. diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index 532d9645bd56..f0a51e344ccf 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -17,8 +17,8 @@ configure and code each application independently. Envoy supports various types * **Cluster maximum pending requests**: The maximum number of requests that will be queued while waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch - the request. For HTTP/2 connections, if :ref:`max concurrent streams ` - and :ref:`max requests per connection ` are not + the request. For HTTP/2 connections, if :ref:`max concurrent streams ` + and :ref:`max requests per connection ` are not configured, all requests will be multiplexed over the same connection so this circuit breaker will only be hit when no connection is already established. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will @@ -27,7 +27,7 @@ configure and code each application independently. Envoy supports various types in a cluster at any given time. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow ` counter for the cluster will increment. * **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all - hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break + hosts in a cluster at any given time. In general we recommend using :ref:`retry budgets `; however, if static circuit breaking is preferred it should aggressively circuit break retries. This is so that retries for sporadic failures are allowed, but the overall retry volume cannot explode and cause large scale cascading failure. If this circuit breaker overflows the :ref:`upstream_rq_retry_overflow ` counter for the cluster diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index ebb0031e4e97..ee597f4d067d 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -21,8 +21,8 @@ HTTP/2 ------ The HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits -imposed by :ref:`max concurrent streams ` -and :ref:`max requests per connection `. +imposed by :ref:`max concurrent streams ` +and :ref:`max requests per connection `. The HTTP/2 connection pool establishes only as many connections as are needed to serve the current requests. With no limits, this will be only a single connection. If a GOAWAY frame is received or if the connection reaches the maximum stream limit, the connection pool will drain the existing one. diff --git a/docs/root/intro/arch_overview/upstream/health_checking.rst b/docs/root/intro/arch_overview/upstream/health_checking.rst index c63b59ece132..d6b7bf9cc618 100644 --- a/docs/root/intro/arch_overview/upstream/health_checking.rst +++ b/docs/root/intro/arch_overview/upstream/health_checking.rst @@ -13,7 +13,7 @@ unhealthy, successes required before marking a host healthy, etc.): * **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. By default, it expects a 200 response if the host is healthy. Expected response codes are - :ref:`configurable `. The + :ref:`configurable `. The upstream host can return 503 if it wants to immediately notify downstream hosts to no longer forward traffic to it. * **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the @@ -24,13 +24,13 @@ unhealthy, successes required before marking a host healthy, etc.): failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a passing healthcheck. This allows the user to mark a Redis instance for maintenance by setting the specified key to any value and waiting for traffic to drain. See - :ref:`redis_key `. + :ref:`redis_key `. Health checks occur over the transport socket specified for the cluster. This implies that if a cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. The -:ref:`TLS options ` used for health check connections +:ref:`TLS options ` used for health check connections can be specified, which is useful if the corresponding upstream is using ALPN-based -:ref:`FilterChainMatch ` with different protocols for +:ref:`FilterChainMatch ` with different protocols for health checks versus data connections. .. _arch_overview_per_cluster_health_check_config: @@ -40,14 +40,14 @@ Per cluster member health check config If active health checking is configured for an upstream cluster, a specific additional configuration for each registered member can be specified by setting the -:ref:`HealthCheckConfig` -in the :ref:`Endpoint` of an :ref:`LbEndpoint` -of each defined :ref:`LocalityLbEndpoints` in a -:ref:`ClusterLoadAssignment`. +:ref:`HealthCheckConfig` +in the :ref:`Endpoint` of an :ref:`LbEndpoint` +of each defined :ref:`LocalityLbEndpoints` in a +:ref:`ClusterLoadAssignment`. -An example of setting up :ref:`health check config` -to set a :ref:`cluster member`'s alternative health check -:ref:`port` is: +An example of setting up :ref:`health check config` +to set a :ref:`cluster member`'s alternative health check +:ref:`port` is: .. code-block:: yaml @@ -68,12 +68,12 @@ Health check event logging -------------------------- A per-healthchecker log of ejection and addition events can optionally be produced by Envoy by -specifying a log file path in :ref:`the HealthCheck config `. +specifying a log file path in :ref:`the HealthCheck config `. The log is structured as JSON dumps of -:ref:`HealthCheckEvent messages `. +:ref:`HealthCheckEvent messages `. Envoy can be configured to log all health check failure events by setting the :ref:`always_log_health_check_failures -flag ` to true. +flag ` to true. Passive health checking ----------------------- @@ -100,7 +100,7 @@ operation: Envoy will respond with a 200 or a 503 depending on the current draining state of the server. * **No pass through, computed from upstream cluster health**: In this mode, the health checking filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage - ` + ` of the servers are available (healthy + degraded) in one or more upstream clusters. (If the Envoy server is in a draining state, though, it will respond with a 503 regardless of the upstream cluster health.) @@ -151,7 +151,7 @@ is having a different HTTP health checking URL for every service type. The downs is that overall configuration becomes more complicated as every health check URL is fully custom. The Envoy HTTP health checker supports the :ref:`service_name_matcher -` option. If this option is set, +` option. If this option is set, the health checker additionally compares the value of the *x-envoy-upstream-healthchecked-cluster* response header to *service_name_matcher*. If the values do not match, the health check does not pass. The upstream health check filter appends *x-envoy-upstream-healthchecked-cluster* to the response headers. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst index 48605a7ad6d1..5336dccb14c3 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst @@ -6,10 +6,10 @@ Supported load balancers When a filter needs to acquire a connection to a host in an upstream cluster, the cluster manager uses a load balancing policy to determine which host is selected. The load balancing policies are pluggable and are specified on a per upstream cluster basis in the :ref:`configuration -`. Note that if no active health checking policy is :ref:`configured +`. Note that if no active health checking policy is :ref:`configured ` for a cluster, all upstream cluster members are considered healthy, unless otherwise specified through -:ref:`health_status `. +:ref:`health_status `. .. _arch_overview_load_balancing_types_round_robin: @@ -18,7 +18,7 @@ Weighted round robin This is a simple policy in which each available upstream host is selected in round robin order. If :ref:`weights -` are assigned to +` are assigned to endpoints in a locality, then a weighted round robin schedule is used, where higher weighted endpoints will appear more often in the rotation to achieve the effective weighting. @@ -32,7 +32,7 @@ The least request load balancer uses different algorithms depending on whether h same or different weights. * *all weights equal*: An O(1) algorithm which selects N random available hosts as specified in the - :ref:`configuration ` (2 by default) and picks the + :ref:`configuration ` (2 by default) and picks the host which has the fewest active requests (`Mitzenmacher et al. `_ has shown that this approach is nearly as good as an O(N) full scan). This is also known as P2C (power of two @@ -66,8 +66,8 @@ partitioning of the circle, however, since the computed hashes could be coincide one another; so it is necessary to multiply the number of hashes per host---for example inserting 100 entries on the ring for host A and 200 entries for host B---to better approximate the desired distribution. Best practice is to explicitly set -:ref:`minimum_ring_size` and -:ref:`maximum_ring_size`, and monitor +:ref:`minimum_ring_size` and +:ref:`maximum_ring_size`, and monitor the :ref:`min_hashes_per_host and max_hashes_per_host gauges` to ensure good distribution. With the ring partitioned appropriately, the addition or removal of one host from a set of N hosts will diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst index 1003d98418ed..f435b7abce6d 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst @@ -5,7 +5,7 @@ Locality weighted load balancing One approach to determining how to weight assignments across different zones and geographical locations is by using explicit weights supplied via EDS in the -:ref:`LocalityLbEndpoints ` message. +:ref:`LocalityLbEndpoints ` message. This approach is mutually exclusive with :ref:`zone aware routing `, since in the case of locality aware LB, we rely on the management server to provide the @@ -59,10 +59,10 @@ picked. The load balancer follows these steps: Locality weighted load balancing is configured by setting :ref:`locality_weighted_lb_config -` in the +` in the cluster configuration and providing weights in :ref:`LocalityLbEndpoints -` via :ref:`load_balancing_weight -`. +` via :ref:`load_balancing_weight +`. This feature is not compatible with :ref:`load balancer subsetting `, since it is not straightforward to diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst b/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst index 212a7e9e645b..38b489476442 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/original_dst.rst @@ -8,8 +8,8 @@ cluster `. Upstream based on the downstream connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection was before the connection was redirected to Envoy. New destinations are added to the cluster by the load balancer on-demand, and the cluster -:ref:`periodically ` cleans out unused hosts -from the cluster. No other :ref:`load balancing policy ` can +:ref:`periodically ` cleans out unused hosts +from the cluster. No other :ref:`load balancing policy ` can be used with original destination clusters. .. _arch_overview_load_balancing_types_original_destination_request_header: diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst index e04cf5572213..7162b68f3387 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst @@ -3,7 +3,7 @@ Overprovisioning Factor ----------------------- Priority levels and localities are considered overprovisioned with -:ref:`this percentage `. +:ref:`this percentage `. Envoy doesn't consider a priority level or locality unavailable until the percentage of available hosts multiplied by the overprovisioning factor drops below 100. The default value is 1.4, so a priority level or locality will not be diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst index 2fcfafb4e057..86996cfb891a 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/panic_threshold.rst @@ -8,13 +8,13 @@ an upstream cluster. However, if the percentage of available hosts in the cluste Envoy will disregard health status and balance either amongst all hosts or no hosts. This is known as the *panic threshold*. The default panic threshold is 50%. This is :ref:`configurable ` via runtime as well as in the -:ref:`cluster configuration `. +:ref:`cluster configuration `. The panic threshold is used to avoid a situation in which host failures cascade throughout the cluster as load increases. There are two modes Envoy can choose from when in a panic state: traffic will either be sent to all hosts, or will be sent to no hosts (and therefore will always fail). This is configured in the -:ref:`cluster configuration `. +:ref:`cluster configuration `. Choosing to fail traffic during panic scenarios can help avoid overwhelming potentially failing upstream services, as it will reduce the load on the upstream service before all hosts have been determined to be unhealthy. However, it eliminates the possibility of _some_ requests succeeding diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst b/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst index 23bc5fb6f22d..0ce7f090ca87 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/priority.rst @@ -4,7 +4,7 @@ Priority levels ------------------ During load balancing, Envoy will generally only consider hosts configured at the highest priority -level. For each EDS :ref:`LocalityLbEndpoints` an optional +level. For each EDS :ref:`LocalityLbEndpoints` an optional priority may also be specified. When endpoints at the highest priority level (P=0) are healthy, all traffic will land on endpoints in that priority level. As endpoints for the highest priority level become unhealthy, traffic will begin to trickle to lower priority levels. diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst b/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst index 6543ca31ad69..789bf822e187 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/subsets.rst @@ -13,7 +13,7 @@ not be used with subsets because the upstream hosts are not known in advance. Su with zone aware routing, but be aware that the use of subsets may easily violate the minimum hosts condition described above. -If subsets are :ref:`configured ` and a route +If subsets are :ref:`configured ` and a route specifies no metadata or no subset matching the metadata exists, the subset load balancer initiates its fallback policy. The default policy is ``NO_FALLBACK``, in which case the request fails as if the cluster had no hosts. Conversely, the ``ANY_ENDPOINT`` fallback policy load balances across all @@ -36,8 +36,8 @@ balancing to occur. This feature can only be enabled using the V2 configuration API. Furthermore, host metadata is only supported when hosts are defined using -:ref:`ClusterLoadAssignments `. ClusterLoadAssignments are -available via EDS or the Cluster :ref:`load_assignment ` +:ref:`ClusterLoadAssignments `. ClusterLoadAssignments are +available via EDS or the Cluster :ref:`load_assignment ` field. Host metadata for subset load balancing must be placed under the filter name ``"envoy.lb"``. Similarly, route metadata match criteria use ``"envoy.lb"`` filter name. Host metadata may be hierarchical (e.g., the value for a top-level key may be a structured value or list), but the @@ -46,8 +46,7 @@ values, a route's match criteria will only match if an identical structured valu host's metadata. Finally, note that subset load balancing is not available for the -:ref:`ORIGINAL_DST_LB ` or -:ref:`CLUSTER_PROVIDED ` load balancer +:ref:`CLUSTER_PROVIDED ` load balancer policies. Examples diff --git a/docs/root/intro/arch_overview/upstream/outlier.rst b/docs/root/intro/arch_overview/upstream/outlier.rst index a2ac3ed5fcee..d8f35540513a 100644 --- a/docs/root/intro/arch_overview/upstream/outlier.rst +++ b/docs/root/intro/arch_overview/upstream/outlier.rst @@ -10,7 +10,7 @@ such as consecutive failures, temporal success rate, temporal latency, etc. Outl form of *passive* health checking. Envoy also supports :ref:`active health checking `. *Passive* and *active* health checking can be enabled together or independently, and form the basis for an overall upstream health checking solution. -Outlier detection is part of :ref:`cluster configuration ` +Outlier detection is part of :ref:`cluster configuration ` and it needs filters to report errors, timeouts, resets. Currently the following filters support outlier detection: :ref:`http router `, :ref:`tcp proxy ` and :ref:`redis proxy `. @@ -27,18 +27,18 @@ transaction with the server may fail. On the contrary, :ref:`tcp proxy ` filter does not understand any protocol above TCP layer and reports only locally originated errors. -In default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +In default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) locally originated errors are not distinguished from externally generated (transaction) errors and all end up in the same bucket and are compared against -:ref:`outlier_detection.consecutive_5xx`, -:ref:`outlier_detection.consecutive_gateway_failure` and -:ref:`outlier_detection.success_rate_stdev_factor` +:ref:`outlier_detection.consecutive_5xx`, +:ref:`outlier_detection.consecutive_gateway_failure` and +:ref:`outlier_detection.success_rate_stdev_factor` configuration items. For example, if connection to an upstream HTTP server fails twice because of timeout and then, after successful connection, the server returns error code 500, the total error count will be 3. Outlier detection may also be configured to distinguish locally originated errors from externally originated (transaction) errors. It is done via -:ref:`outlier_detection.split_external_local_origin_errors` configuration item. +:ref:`outlier_detection.split_external_local_origin_errors` configuration item. In that mode locally originated errors are tracked by separate counters than externally originated (transaction) errors and the outlier detector may be configured to react to locally originated errors and ignore externally originated errors @@ -58,13 +58,13 @@ ejection algorithm works as follows: #. A host is determined to be an outlier. #. If no hosts have been ejected, Envoy will eject the host immediately. Otherwise, it checks to make sure the number of ejected hosts is below the allowed threshold (specified via the - :ref:`outlier_detection.max_ejection_percent` + :ref:`outlier_detection.max_ejection_percent` setting). If the number of ejected hosts is above the threshold, the host is not ejected. #. The host is ejected for some number of milliseconds. Ejection means that the host is marked unhealthy and will not be used during load balancing unless the load balancer is in a :ref:`panic ` scenario. The number of milliseconds is equal to the :ref:`outlier_detection.base_ejection_time_ms - ` value + ` value multiplied by the number of times the host has been ejected. This causes hosts to get ejected for longer and longer periods if they continue to fail. #. An ejected host will automatically be brought back into service after the ejection time has @@ -79,12 +79,12 @@ Envoy supports the following outlier detection types: Consecutive 5xx ^^^^^^^^^^^^^^^ -In default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally +In default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally originated and externally originated (transaction) type of errors. Errors generated by non-HTTP filters, like :ref:`tcp proxy ` or :ref:`redis proxy ` are internally mapped to HTTP 5xx codes and treated as such. -In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors ignoring locally originated errors. +In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors ignoring locally originated errors. If an upstream host is HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure` for exceptions). For redis servers, served via :ref:`redis proxy ` only malformed responses from the server are taken into account. @@ -92,7 +92,7 @@ Properly formatted responses, even when they carry operational error (like index If an upstream host returns some number of errors which are treated as consecutive 5xx type errors, it will be ejected. The number of consecutive 5xx required for ejection is controlled by -the :ref:`outlier_detection.consecutive_5xx` value. +the :ref:`outlier_detection.consecutive_5xx` value. .. _consecutive_gateway_failure: @@ -106,17 +106,17 @@ If an upstream host returns some number of consecutive "gateway errors" (502, 50 code), it will be ejected. The number of consecutive gateway failures required for ejection is controlled by the :ref:`outlier_detection.consecutive_gateway_failure -` value. +` value. Consecutive Local Origin Failure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This detection type is enabled only when :ref:`outlier_detection.split_external_local_origin_errors` is *true* and takes into account only locally originated errors (timeout, reset, etc). +This detection type is enabled only when :ref:`outlier_detection.split_external_local_origin_errors` is *true* and takes into account only locally originated errors (timeout, reset, etc). If Envoy repeatedly cannot connect to an upstream host or communication with the upstream host is repeatedly interrupted, it will be ejected. Various locally originated problems are detected: timeout, TCP reset, ICMP errors, etc. The number of consecutive locally originated failures required for ejection is controlled by the :ref:`outlier_detection.consecutive_local_origin_failure -` value. +` value. This detection type is supported by :ref:`http router `, :ref:`tcp proxy ` and :ref:`redis proxy `. @@ -126,24 +126,24 @@ Success Rate Success Rate based outlier ejection aggregates success rate data from every host in a cluster. Then at given intervals ejects hosts based on statistical outlier detection. Success Rate outlier ejection will not be calculated for a host if its request volume over the aggregation interval is less than the -:ref:`outlier_detection.success_rate_request_volume` +:ref:`outlier_detection.success_rate_request_volume` value. Moreover, detection will not be performed for a cluster if the number of hosts with the minimum required request volume in an interval is less than the -:ref:`outlier_detection.success_rate_minimum_hosts` +:ref:`outlier_detection.success_rate_minimum_hosts` value. -In default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +In default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all type of errors: locally and externally originated. -:ref:`outlier_detection.enforcing_local_origin_success` config item is ignored. +:ref:`outlier_detection.enforcing_local_origin_success` config item is ignored. -In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*), +In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*), locally originated errors and externally originated (transaction) errors are counted and treated separately. Most configuration items, namely -:ref:`outlier_detection.success_rate_minimum_hosts`, -:ref:`outlier_detection.success_rate_request_volume`, -:ref:`outlier_detection.success_rate_stdev_factor` apply to both -types of errors, but :ref:`outlier_detection.enforcing_success_rate` applies -to externally originated errors only and :ref:`outlier_detection.enforcing_local_origin_success_rate` applies to locally originated errors only. +:ref:`outlier_detection.success_rate_minimum_hosts`, +:ref:`outlier_detection.success_rate_request_volume`, +:ref:`outlier_detection.success_rate_stdev_factor` apply to both +types of errors, but :ref:`outlier_detection.enforcing_success_rate` applies +to externally originated errors only and :ref:`outlier_detection.enforcing_local_origin_success_rate` applies to locally originated errors only. .. _arch_overview_outlier_detection_failure_percentage: @@ -154,22 +154,22 @@ Failure Percentage based outlier ejection functions similarly to the success rat that it relies on success rate data from each host in a cluster. However, rather than compare those values to the mean success rate of the cluster as a whole, they are compared to a flat user-configured threshold. This threshold is configured via the -:ref:`outlier_detection.failure_percentage_threshold` +:ref:`outlier_detection.failure_percentage_threshold` field. The other configuration fields for failure percentage based ejection are similar to the fields for success rate ejection. Failure percentage based ejection also obeys -:ref:`outlier_detection.split_external_local_origin_errors`; +:ref:`outlier_detection.split_external_local_origin_errors`; the enforcement percentages for externally- and locally-originated errors are controlled by -:ref:`outlier_detection.enforcing_failure_percentage` +:ref:`outlier_detection.enforcing_failure_percentage` and -:ref:`outlier_detection.enforcing_failure_percentage_local_origin`, +:ref:`outlier_detection.enforcing_failure_percentage_local_origin`, respectively. As with success rate detection, detection will not be performed for a host if its request volume over the aggregation interval is less than the -:ref:`outlier_detection.failure_percentage_request_volume` +:ref:`outlier_detection.failure_percentage_request_volume` value. Detection also will not be performed for a cluster if the number of hosts with the minimum required request volume in an interval is less than the -:ref:`outlier_detection.failure_percentage_minimum_hosts` +:ref:`outlier_detection.failure_percentage_minimum_hosts` value. .. _arch_overview_outlier_detection_grpc: @@ -188,13 +188,13 @@ Ejection event logging A log of outlier ejection events can optionally be produced by Envoy. This is extremely useful during daily operations since global stats do not provide enough information on which hosts are being ejected and for what reasons. The log is structured as protobuf-based dumps of -:ref:`OutlierDetectionEvent messages `. -Ejection event logging is configured in the Cluster manager :ref:`outlier detection configuration `. +:ref:`OutlierDetectionEvent messages `. +Ejection event logging is configured in the Cluster manager :ref:`outlier detection configuration `. Configuration reference ----------------------- -* Cluster manager :ref:`global configuration ` -* Per cluster :ref:`configuration ` +* Cluster manager :ref:`global configuration ` +* Per cluster :ref:`configuration ` * Runtime :ref:`settings ` * Statistics :ref:`reference ` diff --git a/docs/root/intro/arch_overview/upstream/service_discovery.rst b/docs/root/intro/arch_overview/upstream/service_discovery.rst index 678877ff81de..5fd032b57312 100644 --- a/docs/root/intro/arch_overview/upstream/service_discovery.rst +++ b/docs/root/intro/arch_overview/upstream/service_discovery.rst @@ -3,7 +3,7 @@ Service discovery ================= -When an upstream cluster is defined in the :ref:`configuration `, +When an upstream cluster is defined in the :ref:`configuration `, Envoy needs to know how to resolve the members of the cluster. This is known as *service discovery*. .. _arch_overview_service_discovery_types: @@ -41,11 +41,11 @@ This means that care should be taken if active health checking is used with DNS to the same IPs: if an IP is repeated many times between DNS names it might cause undue load on the upstream host. -If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and -:ref:`dns_refresh_rate ` are used to control DNS refresh rate. -For strict DNS cluster, if the minimum of all record TTLs is 0, :ref:`dns_refresh_rate ` -will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` -defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` +If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and +:ref:`dns_refresh_rate ` are used to control DNS refresh rate. +For strict DNS cluster, if the minimum of all record TTLs is 0, :ref:`dns_refresh_rate ` +will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` +defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. .. _arch_overview_service_discovery_types_logical_dns: @@ -70,11 +70,11 @@ When interacting with large scale web services, this is the best of all possible asynchronous/eventually consistent DNS resolution, long lived connections, and zero blocking in the forwarding path. -If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and -:ref:`dns_refresh_rate ` are used to control DNS refresh rate. -For logical DNS cluster, if the TTL of first record is 0, :ref:`dns_refresh_rate ` -will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` -defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` +If :ref:`respect_dns_ttl ` is enabled, DNS record TTLs and +:ref:`dns_refresh_rate ` are used to control DNS refresh rate. +For logical DNS cluster, if the TTL of first record is 0, :ref:`dns_refresh_rate ` +will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` +defaults to 5000ms if not specified. The :ref:`dns_failure_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. .. _arch_overview_service_discovery_types_original_destination: @@ -87,7 +87,7 @@ via an iptables REDIRECT or TPROXY target or with Proxy Protocol. In these cases to an original destination cluster are forwarded to upstream hosts as addressed by the redirection metadata, without any explicit host configuration or upstream host discovery. Connections to upstream hosts are pooled and unused hosts are flushed out when they have been idle longer than -:ref:`cleanup_interval `, which defaults to +:ref:`cleanup_interval `, which defaults to 5000ms. If the original destination address is not available, no upstream connection is opened. Envoy can also pickup the original destination from a :ref:`HTTP header `. @@ -121,7 +121,7 @@ Custom cluster ^^^^^^^^^^^^^^ Envoy also supports custom cluster discovery mechanism. Custom clusters are specified using -:ref:`cluster_type field ` on the cluster configuration. +:ref:`cluster_type field ` on the cluster configuration. Generally active health checking is used in conjunction with the eventually consistent service discovery service data to making load balancing and routing decisions. This is discussed further in diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst index 2de7b36d775c..d9a6e62a3b64 100644 --- a/docs/root/intro/what_is_envoy.rst +++ b/docs/root/intro/what_is_envoy.rst @@ -97,7 +97,7 @@ instead of a library, it is able to implement advanced load balancing techniques and have them be accessible to any application. Currently Envoy includes support for :ref:`automatic retries `, :ref:`circuit breaking `, :ref:`global rate limiting ` via an external rate limiting service, -:ref:`request shadowing `, and +:ref:`request shadowing `, and :ref:`outlier detection `. Future support is planned for request racing. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index 3ce7ff5725c6..c4a3cd81ee13 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -6,7 +6,7 @@ Administration interface Envoy exposes a local administration interface that can be used to query and modify different aspects of the server: -* :ref:`v2 API reference ` +* :ref:`v3 API reference ` .. _operations_admin_interface_security: @@ -48,7 +48,7 @@ modify different aspects of the server: .. http:get:: /certs List out all loaded TLS certificates, including file name, serial number, subject alternate names and days until - expiration in JSON format conforming to the :ref:`certificate proto definition `. + expiration in JSON format conforming to the :ref:`certificate proto definition `. .. _operations_admin_interface_clusters: @@ -68,10 +68,10 @@ modify different aspects of the server: - :ref:`circuit breakers` settings for all priority settings. - Information about :ref:`outlier detection` if a detector is installed. Currently - :ref:`average success rate `, - and :ref:`ejection threshold` + :ref:`average success rate `, + and :ref:`ejection threshold` are presented. Both of these values could be ``-1`` if there was not enough data to calculate them in the last - :ref:`interval`. + :ref/`interval`. - ``added_via_api`` flag -- ``false`` if the cluster was added via static configuration, ``true`` if it was added via the :ref:`CDS` api. @@ -94,8 +94,8 @@ modify different aspects of the server: zone, String, Service zone canary, Boolean, Whether the host is a canary success_rate, Double, "Request success rate (0-100). -1 if there was not enough - :ref:`request volume` - in the :ref:`interval` + :ref:`request volume` + in the :ref:`interval` to calculate it" Host health status @@ -115,18 +115,18 @@ modify different aspects of the server: .. http:get:: /clusters?format=json Dump the */clusters* output in a JSON-serialized proto. See the - :ref:`definition ` for more information. + :ref:`definition ` for more information. .. _operations_admin_interface_config_dump: .. http:get:: /config_dump Dump currently loaded configuration from various Envoy components as JSON-serialized proto - messages. See the :ref:`response definition ` for more + messages. See the :ref:`response definition ` for more information. .. warning:: - Configuration may include :ref:`TLS certificates `. Before + Configuration may include :ref:`TLS certificates `. Before dumping the configuration, Envoy will attempt to redact the ``private_key`` and ``password`` fields from any certificates it finds. This relies on the configuration being a strongly-typed protobuf message. If your Envoy configuration uses deprecated ``config`` fields (of type @@ -143,8 +143,8 @@ modify different aspects of the server: Specify a subset of fields that you would like to be returned. The mask is parsed as a ``ProtobufWkt::FieldMask`` and applied to each top level dump such as - :ref:`BootstrapConfigDump ` and - :ref:`ClustersConfigDump `. + :ref:`BootstrapConfigDump ` and + :ref:`ClustersConfigDump `. This behavior changes if both resource and mask query parameters are specified. See below for details. @@ -154,10 +154,10 @@ modify different aspects of the server: Dump only the currently loaded configuration that matches the specified resource. The resource must be a repeated field in one of the top level config dumps such as - :ref:`static_listeners ` from - :ref:`ListenersConfigDump ` or - :ref:`dynamic_active_clusters ` from - :ref:`ClustersConfigDump `. If you need a non-repeated + :ref:`static_listeners ` from + :ref:`ListenersConfigDump ` or + :ref:`dynamic_active_clusters ` from + :ref:`ClustersConfigDump `. If you need a non-repeated field, use the mask query parameter documented above. If you want only a subset of fields from the repeated resource, use both as documented below. @@ -174,7 +174,7 @@ modify different aspects of the server: .. http:get:: /contention - Dump current Envoy mutex contention stats (:ref:`MutexStats `) in JSON + Dump current Envoy mutex contention stats (:ref:`MutexStats `) in JSON format, if mutex tracing is enabled. See :option:`--enable-mutex-tracing`. .. http:post:: /cpuprofiler @@ -216,7 +216,7 @@ modify different aspects of the server: .. http:get:: /listeners?format=json Dump the */listeners* output in a JSON-serialized proto. See the - :ref:`definition ` for more information. + :ref:`definition ` for more information. .. _operations_admin_interface_logging: @@ -255,7 +255,7 @@ modify different aspects of the server: .. http:post:: /drain_listeners?inboundonly :ref:`Drains ` all inbound listeners. `traffic_direction` field in - :ref:`Listener ` is used to determine whether a listener + :ref:`Listener ` is used to determine whether a listener is inbound or outbound. .. attention:: @@ -304,7 +304,7 @@ modify different aspects of the server: "uptime_all_epochs": "6s" } - See the :ref:`ServerInfo proto ` for an + See the :ref:`ServerInfo proto ` for an explanation of the output. .. http:get:: /ready @@ -318,7 +318,7 @@ modify different aspects of the server: LIVE - See the `state` field of the :ref:`ServerInfo proto ` for an + See the `state` field of the :ref:`ServerInfo proto ` for an explanation of the output. .. _operations_admin_interface_stats: @@ -527,11 +527,11 @@ modify different aspects of the server: format, as expected by the Hystrix dashboard. If invoked from a browser or a terminal, the response will be shown as a continuous stream, - sent in intervals defined by the :ref:`Bootstrap ` - :ref:`stats_flush_interval ` + sent in intervals defined by the :ref:`Bootstrap ` + :ref:`stats_flush_interval ` This handler is enabled only when a Hystrix sink is enabled in the config file as documented - :ref:`here `. + :ref:`here `. As Envoy's and Hystrix resiliency mechanisms differ, some of the statistics shown in the dashboard had to be adapted: diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 6bd462e4b26b..877f4e2d866a 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -183,15 +183,15 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service cluster name where Envoy is running. The local service cluster name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`cluster - ` field. This CLI option provides an alternative + ` message's :ref:`cluster + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if any of the following features are used: :ref:`statsd `, :ref:`health check cluster - verification `, - :ref:`runtime override directory `, + verification `, + :ref:`runtime override directory `, :ref:`user agent addition - `, + `, :ref:`HTTP global rate limiting `, :ref:`CDS `, and :ref:`HTTP tracing `, either via this CLI option or in the bootstrap @@ -201,8 +201,8 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service node name where Envoy is running. The local service node name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`id - ` field. This CLI option provides an alternative + ` message's :ref:`id + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if any of the following features are used: :ref:`statsd `, :ref:`CDS @@ -214,12 +214,12 @@ following are the command line options that Envoy supports. *(optional)* Defines the local service zone where Envoy is running. The local service zone is first sourced from the :ref:`Bootstrap node - ` message's :ref:`locality.zone - ` field. This CLI option provides an + ` message's :ref:`locality.zone + ` field. This CLI option provides an alternative method for specifying this value and will override any value set in bootstrap configuration. It should be set if discovery service routing is used and the discovery service exposes :ref:`zone data - `, either via this CLI option or in + `, either via this CLI option or in the bootstrap configuration. The meaning of zone is context dependent, e.g. `Availability Zone (AZ) `_ @@ -261,7 +261,7 @@ following are the command line options that Envoy supports. .. option:: --enable-mutex-tracing *(optional)* This flag enables the collection of mutex contention statistics - (:ref:`MutexStats `) as well as a contention endpoint + (:ref:`MutexStats `) as well as a contention endpoint (:http:get:`/contention`). Mutex tracing is not enabled by default, since it incurs a slight performance penalty for those Envoys which already experience mutex contention. diff --git a/docs/root/operations/fs_flags.rst b/docs/root/operations/fs_flags.rst index a4c154bd207a..ba322a31ef32 100644 --- a/docs/root/operations/fs_flags.rst +++ b/docs/root/operations/fs_flags.rst @@ -6,7 +6,7 @@ File system flags Envoy supports file system "flags" that alter state at startup. This is used to persist changes between restarts if necessary. The flag files should be placed in the directory specified in the :ref:`flags_path -` configuration +` configuration option. The currently supported flag files are: drain diff --git a/docs/root/operations/performance.rst b/docs/root/operations/performance.rst index 555e7a03b5eb..01acce4acc1f 100644 --- a/docs/root/operations/performance.rst +++ b/docs/root/operations/performance.rst @@ -23,14 +23,14 @@ Envoy exposes two statistics to monitor performance of the event loops on all th running---but if this number elevates substantially above its normal observed baseline, it likely indicates kernel scheduler delays. -These statistics can be enabled by setting :ref:`enable_dispatcher_stats ` +These statistics can be enabled by setting :ref:`enable_dispatcher_stats ` to true. .. warning:: Note that enabling dispatcher stats records a value for each iteration of the event loop on every thread. This should normally be minimal overhead, but when using - :ref:`statsd `, it will send each observed value over + :ref:`statsd `, it will send each observed value over the wire individually because the statsd protocol doesn't have any way to represent a histogram summary. Be aware that this can be a very large volume of data. @@ -56,7 +56,7 @@ Watchdog -------- In addition to event loop statistics, Envoy also include a configurable -:ref:`watchdog ` system that can increment +:ref:`watchdog ` system that can increment statistics when Envoy is not responsive and optionally kill the server. The statistics are useful for understanding at a high level whether Envoy's event loop is not responsive either because it is doing too much work, blocking, or not being scheduled by the OS. diff --git a/docs/root/operations/traffic_tapping.rst b/docs/root/operations/traffic_tapping.rst index c67e3848ff0c..d15eee5cffc9 100644 --- a/docs/root/operations/traffic_tapping.rst +++ b/docs/root/operations/traffic_tapping.rst @@ -7,9 +7,9 @@ Envoy currently provides two experimental extensions that can tap traffic: * :ref:`HTTP tap filter `. See the linked filter documentation for more information. - * :ref:`Tap transport socket extension ` that can intercept + * :ref:`Tap transport socket extension ` that can intercept traffic and write to a :ref:`protobuf trace file - `. The remainder of this document describes + `. The remainder of this document describes the configuration of the tap transport socket. Tap transport socket configuration @@ -22,12 +22,12 @@ Tap transport socket configuration Capabilities will be expanded over time and the configuration structures are likely to change. Tapping can be configured on :ref:`Listener -` and :ref:`Cluster -` transport sockets, providing the ability to interpose on +` and :ref:`Cluster +` transport sockets, providing the ability to interpose on downstream and upstream L4 connections respectively. To configure traffic tapping, add an `envoy.transport_sockets.tap` transport socket -:ref:`configuration ` to the listener +:ref:`configuration ` to the listener or cluster. For a plain text socket this might look like: .. code-block:: yaml @@ -35,7 +35,7 @@ or cluster. For a plain text socket this might look like: transport_socket: name: envoy.transport_sockets.tap typed_config: - "@type": type.googleapis.com/envoy.config.transport_socket.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap common_config: static_config: match_config: @@ -55,7 +55,7 @@ For a TLS socket, this will be: transport_socket: name: envoy.transport_sockets.tap typed_config: - "@type": type.googleapis.com/envoy.config.transport_socket.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tap.v3.Tap common_config: static_config: match_config: @@ -70,8 +70,8 @@ For a TLS socket, this will be: typed_config: where the TLS context configuration replaces any existing :ref:`downstream -` or :ref:`upstream -` +` or :ref:`upstream +` TLS configuration on the listener or cluster, respectively. Each unique socket instance will generate a trace file prefixed with `path_prefix`. E.g. @@ -83,22 +83,22 @@ Buffered data limits For buffered socket taps, Envoy will limit the amount of body data that is tapped to avoid OOM situations. The default limit is 1KiB for both received and transmitted data. This is configurable via the :ref:`max_buffered_rx_bytes -` and +` and :ref:`max_buffered_tx_bytes -` settings. When a buffered +` settings. When a buffered socket tap is truncated, the trace will indicate truncation via the :ref:`read_truncated -` and :ref:`write_truncated -` fields as well as the body -:ref:`truncated ` field. +` and :ref:`write_truncated +` fields as well as the body +:ref:`truncated ` field. Streaming --------- The tap transport socket supports both buffered and streaming, controlled by the :ref:`streaming -` setting. When buffering, -:ref:`SocketBufferedTrace ` messages are +` setting. When buffering, +:ref:`SocketBufferedTrace ` messages are emitted. When streaming, a series of :ref:`SocketStreamedTraceSegment -` are emitted. +` are emitted. See the :ref:`HTTP tap filter streaming ` documentation for more information. Most of the concepts overlap between the HTTP filter and the transport socket. diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index a8aa1d24139c..b07ccd95c8f9 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -8,7 +8,7 @@ This section gets you started with a very simple configuration and provides some The fastest way to get started using Envoy is :ref:`installing pre-built binaries `. You can also :ref:`build it ` from source. -These examples use the :ref:`v2 Envoy API `, but use only the static configuration +These examples use the :ref:`v3 Envoy API `, but use only the static configuration feature of the API, which is most useful for simple requirements. For more complex requirements :ref:`Dynamic Configuration ` is supported. @@ -38,9 +38,9 @@ Simple Configuration Envoy can be configured using a single YAML file passed in as an argument on the command line. -The :ref:`admin message ` is required to configure +The :ref:`admin message ` is required to configure the administration server. The `address` key specifies the -listening :ref:`address ` +listening :ref:`address ` which in this case is simply `0.0.0.0:9901`. .. code-block:: yaml @@ -50,7 +50,7 @@ which in this case is simply `0.0.0.0:9901`. address: socket_address: { address: 0.0.0.0, port_value: 9901 } -The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, +The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, as opposed to the means of configuring resources dynamically when Envoy is running. The :ref:`v2 API Overview ` describes this. @@ -58,7 +58,7 @@ The :ref:`v2 API Overview ` describes this. static_resources: -The specification of the :ref:`listeners `. +The specification of the :ref:`listeners `. .. code-block:: yaml @@ -70,7 +70,7 @@ The specification of the :ref:`listeners `. +The specification of the :ref:`clusters `. .. code-block:: yaml @@ -107,7 +107,7 @@ The specification of the :ref:`clusters ` transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com From 6151a69f9c0dc4aa7938d987036ec00eedb818d5 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Tue, 5 May 2020 05:05:37 +0900 Subject: [PATCH 090/909] http: max stream duration upstream support (#10531) To resolve #10274, adding max stream duration for upstream connection. Signed-off-by: shikugawa --- api/envoy/api/v2/core/protocol.proto | 2 - api/envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + api/envoy/config/core/v3/protocol.proto | 2 - api/envoy/config/core/v4alpha/protocol.proto | 2 - api/envoy/data/accesslog/v3/accesslog.proto | 5 +- clang-tidy-fixes.yaml | 0 .../observability/access_log/usage.rst | 1 + .../cluster_manager/cluster_stats.rst | 1 + docs/root/faq/configuration/timeouts.rst | 5 +- .../envoy/api/v2/core/protocol.proto | 2 - .../envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + .../envoy/config/core/v3/protocol.proto | 2 - .../envoy/config/core/v4alpha/protocol.proto | 2 - .../envoy/data/accesslog/v3/accesslog.proto | 5 +- include/envoy/stream_info/stream_info.h | 6 +- include/envoy/upstream/upstream.h | 7 + source/common/router/router.cc | 23 +++ source/common/router/router.h | 2 + source/common/router/upstream_request.cc | 21 +++ source/common/router/upstream_request.h | 3 + source/common/stream_info/utility.cc | 8 +- source/common/stream_info/utility.h | 1 + source/common/upstream/upstream_impl.cc | 1 + source/common/upstream/upstream_impl.h | 4 + .../grpc/grpc_access_log_utils.cc | 5 +- .../common/access_log/access_log_impl_test.cc | 9 +- test/common/router/router_test.cc | 154 ++++++++++++++++++ test/common/router/upstream_request_test.cc | 1 + test/common/stream_info/utility_test.cc | 8 +- .../grpc/grpc_access_log_utils_test.cc | 2 + test/integration/http_integration.cc | 87 ++++++++++ test/integration/http_integration.h | 4 +- test/integration/protocol_integration_test.cc | 10 ++ test/integration/stats_integration_test.cc | 10 +- .../tcp_tunneling_integration_test.cc | 24 +++ test/mocks/upstream/cluster_info.cc | 2 + test/mocks/upstream/cluster_info.h | 4 + 39 files changed, 395 insertions(+), 34 deletions(-) create mode 100644 clang-tidy-fixes.yaml diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto index 5838ca744075..9c47e388ee1a 100644 --- a/api/envoy/api/v2/core/protocol.proto +++ b/api/envoy/api/v2/core/protocol.proto @@ -85,8 +85,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index f5732ba3f8e4..218ad5bda4b8 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -240,6 +240,7 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" } } }]; diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index 56911ca19185..5900f62f4ffe 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -239,6 +239,7 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" } } }]; diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 400b0dd95a94..7866b87999e4 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index dcb205444524..773aa184bdba 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index 374569d937f2..c97e2f4acef0 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] +// [#next-free-field: 21] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -263,6 +263,9 @@ message ResponseFlags { // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; + + // Indicates there was a max stream duration reached on the upstream request. + bool upstream_max_stream_duration_reached = 20; } // Properties of a negotiated TLS connection. diff --git a/clang-tidy-fixes.yaml b/clang-tidy-fixes.yaml new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 7ac3def7cdec..920e7619efff 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -274,6 +274,7 @@ The following command operators are supported: :ref:`strictly-checked header ` in addition to 400 response code. * **SI**: Stream idle timeout in addition to 408 response code. * **DPE**: The downstream request had an HTTP protocol error. + * **UMSDR**: The upstream request reached to max stream duration. %ROUTE_NAME% Name of the route. diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 61f506f29453..e318a9778cce 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -69,6 +69,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` upstream_rq_timeout, Counter, Total requests that timed out waiting for a response + upstream_rq_max_duration_reached, Counter, Total requests closed due to max duration reached upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout upstream_rq_rx_reset, Counter, Total requests that were reset remotely upstream_rq_tx_reset, Counter, Total requests that were reset locally diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 4cf5aa16aa29..11f6ae366f1d 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -59,10 +59,7 @@ context request/stream is interchangeable. HTTP request/response streams periodically. You can't use :ref:`request_timeout ` in this situation because this timer will be disarmed if a response header is received on the request/response streams. - - .. attention:: - - The current implementation implements this timeout on downstream connections only. + This timeout is available on both upstream and downstream connections. Route timeouts ^^^^^^^^^^^^^^ diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto index 5838ca744075..9c47e388ee1a 100644 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ b/generated_api_shadow/envoy/api/v2/core/protocol.proto @@ -85,8 +85,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index da29f198802f..1edd34407635 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -238,6 +238,7 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" } } }]; diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index 56911ca19185..5900f62f4ffe 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -239,6 +239,7 @@ message ResponseFlagFilter { in: "SI" in: "IH" in: "DPE" + in: "UMSDR" } } }]; diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 400b0dd95a94..7866b87999e4 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index dcb205444524..773aa184bdba 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -92,8 +92,6 @@ message HttpProtocolOptions { // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. - // The current implementation implements this timeout on downstream connections only. - // [#comment:TODO(shikugawa): add this functionality to upstream.] google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index 374569d937f2..c97e2f4acef0 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] +// [#next-free-field: 21] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -263,6 +263,9 @@ message ResponseFlags { // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; + + // Indicates there was a max stream duration reached on the upstream request. + bool upstream_max_stream_duration_reached = 20; } // Properties of a negotiated TLS connection. diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 89824f4190f4..bb4a2e73382d 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -72,8 +72,10 @@ enum ResponseFlag { InvalidEnvoyRequestHeaders = 0x20000, // Downstream request had an HTTP protocol error DownstreamProtocolError = 0x40000, + // Upstream request reached to user defined max stream duration. + UpstreamMaxStreamDurationReached = 0x80000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = DownstreamProtocolError + LastFlag = UpstreamMaxStreamDurationReached }; /** @@ -139,6 +141,8 @@ struct ResponseCodeDetailValues { const std::string UpstreamTimeout = "upstream_response_timeout"; // The final upstream try timed out const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; + // The request was destroyed because of user defined max stream duration. + const std::string UpstreamMaxStreamDurationReached = "upstream_max_stream_duration_reached"; // The upstream connection was reset before a response was started. This // will generally be accompanied by details about why the reset occurred. const std::string EarlyUpstreamReset = "upstream_reset_before_response_started"; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 7ed52ca584da..5e8aa41e2ff5 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -571,6 +571,7 @@ class PrioritySet { COUNTER(upstream_rq_cancelled) \ COUNTER(upstream_rq_completed) \ COUNTER(upstream_rq_maintenance_mode) \ + COUNTER(upstream_rq_max_duration_reached) \ COUNTER(upstream_rq_pending_failure_eject) \ COUNTER(upstream_rq_pending_overflow) \ COUNTER(upstream_rq_pending_total) \ @@ -728,6 +729,12 @@ class ClusterInfo { */ virtual const envoy::config::core::v3::Http2ProtocolOptions& http2Options() const PURE; + /** + * @return const envoy::config::core::v3::HttpProtocolOptions for all of HTTP versions. + */ + virtual const envoy::config::core::v3::HttpProtocolOptions& + commonHttpProtocolOptions() const PURE; + /** * @param name std::string containing the well-known name of the extension for which protocol * options are desired diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 8d1ab9647bb8..fb676ed514e5 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -984,6 +984,29 @@ void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); } +void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) { + upstream_request.resetStream(); + + if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) { + return; + } + + upstream_request.removeFromList(upstream_requests_); + cleanup(); + + if (downstream_response_started_) { + callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); + callbacks_->resetStream(); + } else { + callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached); + callbacks_->sendLocalReply( + Http::Code::RequestTimeout, "upstream max stream duration reached", modify_headers_, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); + } +} + void Filter::updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code) { diff --git a/source/common/router/router.h b/source/common/router/router.h index c74a33323377..058e82bdc540 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -265,6 +265,7 @@ class RouterFilterInterface { UpstreamRequest& upstream_request) PURE; virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE; virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE; + virtual void onStreamMaxDurationReached(UpstreamRequest& upstream_request) PURE; virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE; virtual Upstream::ClusterInfoConstSharedPtr cluster() PURE; @@ -432,6 +433,7 @@ class Filter : Logger::Loggable, UpstreamRequest& upstream_request) override; void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override; void onPerTryTimeout(UpstreamRequest& upstream_request) override; + void onStreamMaxDurationReached(UpstreamRequest& upstream_request) override; Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; } Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } FilterConfig& config() override { return config_; } diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 6130d64de9df..be15ec7a8e67 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -78,6 +78,9 @@ UpstreamRequest::~UpstreamRequest() { // Allows for testing. per_try_timeout_->disableTimer(); } + if (max_stream_duration_timer_ != nullptr) { + max_stream_duration_timer_->disableTimer(); + } clearRequestEncoder(); // If desired, fire the per-try histogram when the UpstreamRequest @@ -382,7 +385,18 @@ void UpstreamRequest::onPoolReady( paused_for_connect_ = true; } + if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) { + const auto max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + upstream_host_->cluster().commonHttpProtocolOptions().max_stream_duration())); + if (max_stream_duration.count()) { + max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer( + [this]() -> void { onStreamMaxDurationReached(); }); + max_stream_duration_timer_->enableTimer(max_stream_duration); + } + } + upstream_->encodeHeaders(*parent_.downstreamHeaders(), shouldSendEndStream()); + calling_encode_headers_ = false; if (!paused_for_connect_) { @@ -426,6 +440,13 @@ void UpstreamRequest::encodeBodyAndTrailers() { } } +void UpstreamRequest::onStreamMaxDurationReached() { + upstream_host_->cluster().stats().upstream_rq_max_duration_reached_.inc(); + + // The upstream had closed then try to retry along with retry policy. + parent_.onStreamMaxDurationReached(*this); +} + void UpstreamRequest::clearRequestEncoder() { // Before clearing the encoder, unsubscribe from callbacks. if (upstream_) { diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 3dd852fab2f4..c215f9d45647 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -107,6 +107,7 @@ class UpstreamRequest : public Logger::Loggable, UpstreamRequest* upstreamRequest() override { return this; } void clearRequestEncoder(); + void onStreamMaxDurationReached(); struct DownstreamWatermarkManager : public Http::DownstreamWatermarkCallbacks { DownstreamWatermarkManager(UpstreamRequest& parent) : parent_(parent) {} @@ -188,6 +189,8 @@ class UpstreamRequest : public Logger::Loggable, // Sentinel to indicate if timeout budget tracking is configured for the cluster, // and if so, if the per-try histogram should record a value. bool record_timeout_budget_ : 1; + + Event::TimerPtr max_stream_duration_timer_; }; class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index ccd24cb1acf7..2a173e9dd504 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -25,6 +25,7 @@ const std::string ResponseFlagUtils::RATELIMIT_SERVICE_ERROR = "RLSE"; const std::string ResponseFlagUtils::STREAM_IDLE_TIMEOUT = "SI"; const std::string ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS = "IH"; const std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = "DPE"; +const std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = "UMSDR"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -37,7 +38,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -114,6 +115,9 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info appendString(result, DOWNSTREAM_PROTOCOL_ERROR); } + if (stream_info.hasResponseFlag(ResponseFlag::UpstreamMaxStreamDurationReached)) { + appendString(result, UPSTREAM_MAX_STREAM_DURATION_REACHED); + } return result.empty() ? NONE : result; } @@ -140,6 +144,8 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string {ResponseFlagUtils::STREAM_IDLE_TIMEOUT, ResponseFlag::StreamIdleTimeout}, {ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS, ResponseFlag::InvalidEnvoyRequestHeaders}, {ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR, ResponseFlag::DownstreamProtocolError}, + {ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED, + ResponseFlag::UpstreamMaxStreamDurationReached}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index fe8059b89643..85285d1d2f80 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -40,6 +40,7 @@ class ResponseFlagUtils { const static std::string STREAM_IDLE_TIMEOUT; const static std::string INVALID_ENVOY_REQUEST_HEADERS; const static std::string DOWNSTREAM_PROTOCOL_ERROR; + const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED; }; /** diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 0be39442ef83..bd88b40b5314 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -685,6 +685,7 @@ ClusterInfoImpl::ClusterInfoImpl( features_(parseFeatures(config)), http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), + common_http_protocol_options_(config.common_http_protocol_options()), extension_protocol_options_(parseExtensionProtocolOptions(config, validation_visitor)), resource_managers_(config, runtime, name_, *stats_scope_), maintenance_mode_runtime_key_(absl::StrCat("upstream.maintenance_mode.", name_)), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index b9c871ccd94a..382605591d9d 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -539,6 +539,9 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable extension_protocol_options_; mutable ResourceManagers resource_managers_; const std::string maintenance_mode_runtime_key_; diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 1d187bc29985..65ace2eb7ede 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x40000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -116,6 +116,9 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)) { common_access_log.mutable_response_flags()->set_downstream_protocol_error(true); } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached)) { + common_access_log.mutable_response_flags()->set_upstream_max_stream_duration_reached(true); + } } void Utility::extractCommonAccessLogProperties( diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 7e5b54ab2bce..74010eaef8ba 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -945,12 +945,13 @@ name: accesslog - SI - IH - DPE + - UMSDR typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x40000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); const std::vector all_response_flags = { @@ -973,7 +974,7 @@ name: accesslog StreamInfo::ResponseFlag::StreamIdleTimeout, StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders, StreamInfo::ResponseFlag::DownstreamProtocolError, - }; + StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached}; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); @@ -1005,7 +1006,7 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\"]]): name: \"accesslog\"\nfilter {\n " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); @@ -1031,7 +1032,7 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\"]]): name: \"accesslog\"\nfilter {\n " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 886004887a2a..058f7a580d00 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -125,6 +125,12 @@ class RouterTestBase : public testing::Test { EXPECT_CALL(*per_try_timeout_, disableTimer()); } + void expectMaxStreamDurationTimerCreate() { + max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*max_stream_duration_timer_, enableTimer(_, _)); + EXPECT_CALL(*max_stream_duration_timer_, disableTimer()); + } + AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error) { if (success != cm_.conn_pool_.host_->stats_.rq_success_.value()) { return AssertionFailure() << fmt::format("rq_success {} does not match expected {}", @@ -317,6 +323,13 @@ class RouterTestBase : public testing::Test { .WillByDefault(Return(include)); } + void setUpstreamMaxStreamDuration(uint32_t seconds) { + common_http_protocol_options_.mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(seconds)); + ON_CALL(cm_.conn_pool_.host_->cluster_, commonHttpProtocolOptions()) + .WillByDefault(ReturnRef(common_http_protocol_options_)); + } + void enableHedgeOnPerTryTimeout() { callbacks_.route_->route_entry_.hedge_policy_.hedge_on_per_try_timeout_ = true; callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_ = @@ -334,6 +347,7 @@ class RouterTestBase : public testing::Test { Event::SimulatedTimeSystem test_time_; std::string upstream_zone_{"to_az"}; envoy::config::core::v3::Locality upstream_locality_; + envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_; NiceMock stats_store_; NiceMock cm_; NiceMock runtime_; @@ -347,6 +361,7 @@ class RouterTestBase : public testing::Test { RouterTestFilter router_; Event::MockTimer* response_timeout_{}; Event::MockTimer* per_try_timeout_{}; + Event::MockTimer* max_stream_duration_timer_{}; Network::Address::InstanceConstSharedPtr host_address_{ Network::Utility::resolveUrl("tcp://10.0.0.5:9211")}; NiceMock original_encoder_; @@ -3888,6 +3903,145 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelay) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +TEST_F(RouterTest, MaxStreamDurationValidlyConfiguredWithoutRetryPolicy) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + max_stream_duration_timer_->invokeCallback(); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationDisabledIfSetToZero) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(0); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + + // not to be called timer creation. + EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationCallbackNotCalled) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(5000); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationWhenDownstreamAlreadyStartedWithoutRetryPolicy) { + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), false); + max_stream_duration_timer_->invokeCallback(); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); +} + +TEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) { + // First upstream request + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + + Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "reset"}, + {"x-envoy-internal", "true"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.retry_state_->expectResetRetry(); + max_stream_duration_timer_->invokeCallback(); + + // Second upstream request + NiceMock encoder2; + setUpstreamMaxStreamDuration(500); + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(); + router_.retry_state_->callback_(); + + EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), true); + EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); +} + TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index 91c2a00cee76..e228e6057ded 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -77,6 +77,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { UpstreamRequest& upstream_request)); MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); MOCK_METHOD(Http::StreamDecoderFilterCallbacks*, callbacks, ()); MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); diff --git a/test/common/stream_info/utility_test.cc b/test/common/stream_info/utility_test.cc index b3a02d18f117..5b1b73760375 100644 --- a/test/common/stream_info/utility_test.cc +++ b/test/common/stream_info/utility_test.cc @@ -15,7 +15,7 @@ namespace StreamInfo { namespace { TEST(ResponseFlagUtilsTest, toShortStringConversion) { - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair(ResponseFlag::FailedLocalHealthCheck, "LH"), @@ -37,7 +37,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { std::make_pair(ResponseFlag::StreamIdleTimeout, "SI"), std::make_pair(ResponseFlag::InvalidEnvoyRequestHeaders, "IH"), std::make_pair(ResponseFlag::DownstreamProtocolError, "DPE"), - }; + std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR")}; for (const auto& test_case : expected) { NiceMock stream_info; @@ -65,7 +65,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { } TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { - static_assert(ResponseFlag::LastFlag == 0x40000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair("LH", ResponseFlag::FailedLocalHealthCheck), @@ -87,7 +87,7 @@ TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { std::make_pair("SI", ResponseFlag::StreamIdleTimeout), std::make_pair("IH", ResponseFlag::InvalidEnvoyRequestHeaders), std::make_pair("DPE", ResponseFlag::DownstreamProtocolError), - }; + std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached)}; EXPECT_FALSE(ResponseFlagUtils::toResponseFlag("NonExistentFlag").has_value()); diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index 90d18811c43a..1519369ffbdb 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -40,6 +40,8 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { common_access_log_expected.mutable_response_flags()->set_stream_idle_timeout(true); common_access_log_expected.mutable_response_flags()->set_invalid_envoy_request_headers(true); common_access_log_expected.mutable_response_flags()->set_downstream_protocol_error(true); + common_access_log_expected.mutable_response_flags()->set_upstream_max_stream_duration_reached( + true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index a736718e5df5..8dc0dedf59bb 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1213,6 +1213,93 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t } } +void HttpIntegrationTest::testMaxStreamDuration() { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(200)); + }); + + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + response->waitForReset(); + codec_client_->close(); + } +} + +void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + }); + + Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}; + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(retriable_header); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + if (invoke_retry_upstream_disconnect) { + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 2); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + response->waitForReset(); + codec_client_->close(); + } + + EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + } else { + Http::TestHeaderMapImpl response_headers{{":status", "200"}}; + upstream_request_->encodeHeaders(response_headers, true); + + response->waitForHeaders(); + codec_client_->close(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + std::string HttpIntegrationTest::listenerStatPrefix(const std::string& stat_name) { if (version_ == Network::Address::IpVersion::v4) { return "listener.127.0.0.1_0." + stat_name; diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 359fe1bc38d3..85448db6d672 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -220,7 +220,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { bool response_trailers_present); // Test /drain_listener from admin portal. void testAdminDrain(Http::CodecClient::Type admin_request_type); - + // Test max stream duration. + void testMaxStreamDuration(); + void testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect); Http::CodecClient::Type downstreamProtocol() const { return downstream_protocol_; } // Prefix listener stat with IP:port, including IP version dependent loopback address. std::string listenerStatPrefix(const std::string& stat_name); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 6b9590b70e6b..b65c3a53312b 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -782,6 +782,16 @@ TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } TEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); } +TEST_P(ProtocolIntegrationTest, BasicMaxStreamDuration) { testMaxStreamDuration(); } + +TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicy) { + testMaxStreamDurationWithRetry(false); +} + +TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicyWhenRetryUpstreamDisconnection) { + testMaxStreamDurationWithRetry(true); +} + // Verify that headers with underscores in their names are dropped from client requests // but remain in upstream responses. TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 75012bd02116..ea7fa20fd23b 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -271,6 +271,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/03/24 10501 44261 44600 upstream: upstream_rq_retry_limit_exceeded. // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 43349 44000 fix clang tidy on master + // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -284,8 +285,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 43993); - EXPECT_MEMORY_LE(m_per_cluster, 44100); + EXPECT_MEMORY_EQ(m_per_cluster, 44169); + EXPECT_MEMORY_LE(m_per_cluster, 44600); } TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { @@ -329,6 +330,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/03/24 10501 36300 36800 upstream: upstream_rq_retry_limit_exceeded. // 2020/04/02 10624 35564 36000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 35557 36000 fix clang tidy on master + // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -342,8 +344,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36201); - EXPECT_MEMORY_LE(m_per_cluster, 36300); + EXPECT_MEMORY_EQ(m_per_cluster, 36281); + EXPECT_MEMORY_LE(m_per_cluster, 36800); } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 3cdaa3074770..497fc5ee5b46 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -171,6 +171,30 @@ TEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) { ASSERT_FALSE(response_->reset()); } +TEST_P(ConnectTerminationIntegrationTest, BasicMaxStreamDuration) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + auto* http_protocol_options = cluster->mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + }); + + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + setUpConnection(); + sendBidirectionalData(); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + response_->waitForReset(); + codec_client_->close(); + } +} + // For this class, forward the CONNECT request upstream class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { public: diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 55b0fffbb2b8..215368b58c47 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -55,6 +55,8 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, eds_service_name()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); ON_CALL(*this, http2Options()).WillByDefault(ReturnRef(http2_options_)); + ON_CALL(*this, commonHttpProtocolOptions()) + .WillByDefault(ReturnRef(common_http_protocol_options_)); ON_CALL(*this, extensionProtocolOptions(_)).WillByDefault(Return(extension_protocol_options_)); ON_CALL(*this, maxResponseHeadersCount()) .WillByDefault(ReturnPointee(&max_response_headers_count_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 6e2e8c3b113f..9ef5fcc14618 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -90,6 +90,8 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(uint64_t, features, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Http2ProtocolOptions&, http2Options, (), (const)); + MOCK_METHOD(const envoy::config::core::v3::HttpProtocolOptions&, commonHttpProtocolOptions, (), + (const)); MOCK_METHOD(ProtocolOptionsConfigConstSharedPtr, extensionProtocolOptions, (const std::string&), (const)); MOCK_METHOD(const envoy::config::cluster::v3::Cluster::CommonLbConfig&, lbConfig, (), (const)); @@ -131,6 +133,7 @@ class MockClusterInfo : public ClusterInfo { absl::optional eds_service_name_; Http::Http1Settings http1_settings_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; + envoy::config::core::v3::HttpProtocolOptions common_http_protocol_options_; ProtocolOptionsConfigConstSharedPtr extension_protocol_options_; uint64_t max_requests_per_connection_{}; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; @@ -158,6 +161,7 @@ class MockClusterInfo : public ClusterInfo { envoy::config::cluster::v3::Cluster::CommonLbConfig lb_config_; envoy::config::core::v3::Metadata metadata_; std::unique_ptr typed_metadata_; + absl::optional max_stream_duration_; }; class MockIdleTimeEnabledClusterInfo : public MockClusterInfo { From 1d03d302ba9670d04cc8c731393be90ff7b47f2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 4 May 2020 17:51:00 -0400 Subject: [PATCH 091/909] Add release note for #10884 (#11046) Signed-off-by: Raul Gutierrez Segales --- docs/root/version_history/current.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c408bf6ea747..a8b6e0f694c3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -40,6 +40,7 @@ Changes tracing is not forced. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. From b204cdbaac95cf41e0bd8471e1d4bbb45542ffdc Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Mon, 4 May 2020 21:20:27 -0400 Subject: [PATCH 092/909] build: Add NIST CPE IDs of envoy dependencies (#11017) Add NIST CPE IDs to Envoy dependencies. Risk Level: Low (build flags only) Testing: Unit Tests Signed-off-by: Yan Avlasov --- bazel/repositories.bzl | 8 +++-- bazel/repository_locations.bzl | 63 +++++++++++++++++++++++++++++++--- 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index a880eea1a2bb..30046a9333c5 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -2,7 +2,7 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":dev_binding.bzl", "envoy_dev_binding") load(":genrule_repository.bzl", "genrule_repository") load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") -load(":repository_locations.bzl", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES") +load(":repository_locations.bzl", "DEPENDENCY_ANNOTATIONS", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES", "USE_CATEGORIES_WITH_CPE_OPTIONAL") load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] @@ -27,6 +27,9 @@ def _repository_locations(): if "use_category" not in location: fail("The 'use_category' attribute must be defined for external dependecy " + str(location["urls"])) + if "cpe" not in location and not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: + fail("The 'cpe' attribute must be defined for external dependecy " + str(location["urls"])) + for category in location["use_category"]: if category not in USE_CATEGORIES: fail("Unknown use_category value '" + category + "' for dependecy " + str(location["urls"])) @@ -39,7 +42,8 @@ REPOSITORY_LOCATIONS = _repository_locations() # See repository_locations.bzl for the list of annotation attributes. def _get_location(dependency): stripped = dict(REPOSITORY_LOCATIONS[dependency]) - stripped.pop("use_category", None) + for attribute in DEPENDENCY_ANNOTATIONS: + stripped.pop(attribute, None) return stripped def _repository_impl(name, **kwargs): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 17a3eab8cd70..f84209eebe9c 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,8 +1,22 @@ +# Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel +# constructs. This is to allow this file to be loaded into Python based build and maintenance tools. + # Envoy dependencies may be annotated with the following attributes: -# -# use_category - list of the categories describing how the dependency is being used. This attribute is used -# for automatic tracking of security posture of Envoy's dependencies. -# Possible values are documented in the USE_CATEGORIES list. +DEPENDENCY_ANNOTATIONS = [ + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID + # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See + # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements + # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. + # This attribute is optional for components with use categories listed in the + # USE_CATEGORIES_WITH_CPE_OPTIONAL + "cpe", +] # NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed # to be declared. @@ -23,6 +37,9 @@ USE_CATEGORIES = [ "other", ] +# Components with these use categories are not required to specify the 'cpe' annotation. +USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "test", "other"] + DEPENDENCY_REPOSITORIES = dict( bazel_compdb = dict( sha256 = "87e376a685eacfb27bcc0d0cdf5ded1d0b99d868390ac50f452ba6ed781caffe", @@ -67,12 +84,14 @@ DEPENDENCY_REPOSITORIES = dict( # chromium-81.0.4044.69 urls = ["https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), boringssl_fips = dict( sha256 = "b12ad676ee533824f698741bd127f6fbc82c46344398a6d78d25e62c6c418c73", # fips-20180730 urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-docs/fips/boringssl-66005f41fbc3529ffe8d007708756720529da20d.tar.xz"], use_category = ["dataplane"], + cpe = "N/A", ), com_google_absl = dict( sha256 = "14ee08e2089c2a9b6bf27e1d10abc5629c69c4d0bab4b78ec5b65a29ea1c2af7", @@ -80,12 +99,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-03-05 urls = ["https://github.com/abseil/abseil-cpp/archive/cf3a1998e9d41709d4141e2f13375993cba1130e.tar.gz"], use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), com_github_apache_thrift = dict( sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b", strip_prefix = "thrift-0.11.0", urls = ["https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:apache:thrift:*", ), com_github_c_ares_c_ares = dict( sha256 = "bbaab13d6ad399a278d476f533e4d88a7ec7d729507348bb9c2e3b207ba4c606", @@ -96,6 +117,7 @@ DEPENDENCY_REPOSITORIES = dict( # TODO(crazyxy): Update to release-1.16.0 when it is released. urls = ["https://github.com/c-ares/c-ares/archive/d7e070e7283f822b1d2787903cce3615536c5610.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", @@ -103,12 +125,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2019-02-11 urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_cyan4973_xxhash = dict( sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-0.7.3", urls = ["https://github.com/Cyan4973/xxHash/archive/v0.7.3.tar.gz"], use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( sha256 = "b2d3882698cf85b64c87121e208ce0b24d5fe2a00a5d058cf4571f1b25b45403", @@ -116,6 +140,7 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-01-10 urls = ["https://github.com/envoyproxy/sql-parser/archive/b14d010afd4313f2372a1cc96aa2327e674cc798.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_mirror_tclap = dict( sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", @@ -128,12 +153,14 @@ DEPENDENCY_REPOSITORIES = dict( strip_prefix = "fmt-6.0.0", urls = ["https://github.com/fmtlib/fmt/archive/6.0.0.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_gabime_spdlog = dict( sha256 = "afd18f62d1bc466c60bef088e6b637b0284be88c515cedc59ad4554150af6043", strip_prefix = "spdlog-1.4.0", urls = ["https://github.com/gabime/spdlog/archive/v1.4.0.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( sha256 = "", @@ -158,30 +185,35 @@ DEPENDENCY_REPOSITORIES = dict( strip_prefix = "grpc-d8f4928fa779f6005a7fe55a176bdb373b0f910f", urls = ["https://github.com/grpc/grpc/archive/d8f4928fa779f6005a7fe55a176bdb373b0f910f.tar.gz"], use_category = ["dataplane", "controlplane"], + cpe = "cpe:2.3:a:grpc:grpc:*", ), com_github_luajit_luajit = dict( sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", strip_prefix = "LuaJIT-2.1.0-beta3", urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_moonjit_moonjit = dict( sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6", strip_prefix = "moonjit-2.2.0", urls = ["https://github.com/moonjit/moonjit/archive/2.2.0.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_nghttp2_nghttp2 = dict( sha256 = "eb9d9046495a49dd40c7ef5d6c9907b51e5a6b320ea6e2add11eb8b52c982c47", strip_prefix = "nghttp2-1.40.0", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", strip_prefix = "opentracing-cpp-1.5.1", urls = ["https://github.com/opentracing/opentracing-cpp/archive/v1.5.1.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_lightstep_tracer_cpp = dict( sha256 = "0e99716598c010e56bc427ea3482be5ad2c534be8b039d172564deec1264a213", @@ -189,12 +221,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-03-24 urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/3efe2372ee3d7c2138d6b26e542d757494a7938d.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( sha256 = "6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf", strip_prefix = "dd-opentracing-cpp-1.1.3", urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.3.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_google_benchmark = dict( sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", @@ -214,6 +248,7 @@ DEPENDENCY_REPOSITORIES = dict( # 2019-07-02 urls = ["https://github.com/libevent/libevent/archive/0d7d85c2083f7a4c9efe01c061486f332b576d28.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:libevent_project:libevent:*", ), net_zlib = dict( # Use the dev branch of zlib to resolve fuzz bugs and out of bound @@ -224,18 +259,21 @@ DEPENDENCY_REPOSITORIES = dict( # 2019-04-14 development branch urls = ["https://github.com/madler/zlib/archive/79baebe50e4d6b73ae1f8b603f0ef41300110aa3.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_jbeder_yaml_cpp = dict( sha256 = "77ea1b90b3718aa0c324207cb29418f5bced2354c2e483a9523d98c3460af1ed", strip_prefix = "yaml-cpp-yaml-cpp-0.6.3", urls = ["https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.3.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_msgpack_msgpack_c = dict( sha256 = "433cbcd741e1813db9ae4b2e192b83ac7b1d2dd7968a3e11470eacc6f4ab58d2", strip_prefix = "msgpack-3.2.1", urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-3.2.1/msgpack-3.2.1.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_google_jwt_verify = dict( sha256 = "d422a6eadd4bcdd0f9b122cd843a4015f8b18aebea6e1deb004bd4d401a8ef92", @@ -243,12 +281,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-02-11 urls = ["https://github.com/google/jwt_verify_lib/archive/40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_nodejs_http_parser = dict( sha256 = "8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d", strip_prefix = "http-parser-2.9.3", urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", @@ -268,24 +308,28 @@ DEPENDENCY_REPOSITORIES = dict( # Changes through 2019-12-02 urls = ["https://github.com/Tencent/rapidjson/archive/dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1.tar.gz"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:tencent:rapidjson:*", ), com_github_twitter_common_lang = dict( sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-0.3.9.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_twitter_common_rpc = dict( sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-0.3.9.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_github_twitter_common_finagle_thrift = dict( sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-0.3.9/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-0.3.9.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_google_googletest = dict( sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", @@ -298,6 +342,7 @@ DEPENDENCY_REPOSITORIES = dict( strip_prefix = "protobuf-3.10.1", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz"], use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), grpc_httpjson_transcoding = dict( sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", @@ -305,6 +350,7 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-03-02 urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), io_bazel_rules_go = dict( sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", @@ -334,12 +380,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-03-24 urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/04ed0211931f12b03c1a76b3907248ca4db7bc90.tar.gz"], use_category = ["observability"], + cpe = "N/A", ), com_github_curl = dict( sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", strip_prefix = "curl-7.69.1", urls = ["https://github.com/curl/curl/releases/download/curl-7_69_1/curl-7.69.1.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh @@ -347,18 +395,21 @@ DEPENDENCY_REPOSITORIES = dict( sha256 = "03ff00e41cf259db473dfade9548493e4a2372c0b701a66cd7ff76215bd55a64", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.28.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_quiche = dict( # Static snapshot of https://quiche.googlesource.com/quiche/+archive/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz sha256 = "75af53154402e1654cfd32d8aaeed5fab4dbb79d3cab8c9866019d5369c1889e", urls = ["https://storage.googleapis.com/quiche-envoy-integration/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_googleurl = dict( # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz sha256 = "b40cd22cadba577b7281a76db66f6a66dd744edbad8cc2c861c2c976ef721e4d", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_google_cel_cpp = dict( sha256 = "326ec397b55e39f48bd5380ccded1af5b04653ee96e769cd4d694f9a3bacef50", @@ -366,12 +417,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2020-02-26 urls = ["https://github.com/google/cel-cpp/archive/80e1cca533190d537a780ad007e8db64164c582e.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), com_googlesource_code_re2 = dict( sha256 = "04ee2aaebaa5038554683329afc494e684c30f82f2a1e47eb62450e59338f84d", strip_prefix = "re2-2020-03-03", urls = ["https://github.com/google/re2/archive/2020-03-03.tar.gz"], use_category = ["dataplane"], + cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the @@ -394,12 +447,14 @@ DEPENDENCY_REPOSITORIES = dict( # 2019-11-19 urls = ["https://github.com/protocolbuffers/upb/archive/8a3ae1ef3e3e3f26b45dec735c5776737fc7247f.tar.gz"], use_category = ["dataplane", "controlplane"], + cpe = "N/A", ), kafka_source = dict( sha256 = "e7b748a62e432b5770db6dbb3b034c68c0ea212812cb51603ee7f3a8a35f06be", strip_prefix = "kafka-2.4.0/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/2.4.0.zip"], use_category = ["dataplane"], + cpe = "cpe:2.3:a:apache:kafka:*", ), kafka_server_binary = dict( sha256 = "b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee", From ebc9e356b785c64123852c44f0058ba237052f4e Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 4 May 2020 21:48:39 -0400 Subject: [PATCH 093/909] remove fuzzit code (#11047) Remove leftover unused fuzzit code Risk level: Low Signed-off-by: Asra Ali --- README.md | 1 - bazel/BUILD | 6 ------ bazel/fuzzit_wrapper.sh | 19 ------------------- bazel/repositories.bzl | 7 ------- bazel/repository_locations.bzl | 5 ----- ci/do_ci.sh | 8 -------- 6 files changed, 46 deletions(-) delete mode 100755 bazel/fuzzit_wrapper.sh diff --git a/README.md b/README.md index 8c88a1fffa42..290119f82e23 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,6 @@ involved and how Envoy plays a role, read the CNCF [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master) [![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy) -[![fuzzit](https://app.fuzzit.dev/badge?org_id=envoyproxy)](https://app.fuzzit.dev/orgs/envoyproxy/dashboard) [![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation diff --git a/bazel/BUILD b/bazel/BUILD index 8e4ce5d7c5ff..f94bc1da4433 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -43,12 +43,6 @@ genrule( stamp = 1, ) -sh_binary( - name = "fuzzit_wrapper", - srcs = ["fuzzit_wrapper.sh"], - data = ["@fuzzit_linux//:fuzzit"], -) - # A target to optionally link C++ standard library dynamically in sanitizer runs. # TSAN doesn't support libc/libstdc++ static linking per doc: # http://releases.llvm.org/8.0.1/tools/clang/docs/ThreadSanitizer.html diff --git a/bazel/fuzzit_wrapper.sh b/bazel/fuzzit_wrapper.sh deleted file mode 100755 index 5f66247bb867..000000000000 --- a/bazel/fuzzit_wrapper.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -e - -# run fuzzing regression or upload to Fuzzit for long running fuzzing job depending on whether FUZZIT_API_KEY is set - -FUZZIT="${TEST_SRCDIR}/fuzzit_linux/fuzzit" - -FUZZER_BINARY=$1 -FUZZIT_TARGET_NAME="$(basename $1 | sed -e s/_fuzz_test_with_libfuzzer$// -e s/_/-/g)" - -if [[ ! -z "${FUZZIT_API_KEY}" ]]; then - "${FUZZIT}" create target --skip-if-exists --public-corpus envoyproxy/"${FUZZIT_TARGET_NAME}" - - # Run fuzzing first so this is not affected by local-regression timeout - "${FUZZIT}" create job --skip-if-not-exists --host "${ENVOY_BUILD_IMAGE}" --type fuzzing envoyproxy/"${FUZZIT_TARGET_NAME}" "${FUZZER_BINARY}" -fi - -"${FUZZIT}" create job --skip-if-not-exists --host "${ENVOY_BUILD_IMAGE}" --type local-regression envoyproxy/"${FUZZIT_TARGET_NAME}" "${FUZZER_BINARY}" diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 30046a9333c5..18230f79d954 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -190,7 +190,6 @@ def envoy_dependencies(skip_targets = []): # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() - _fuzzit_linux() _python_deps() _cc_deps() @@ -705,12 +704,6 @@ def _org_llvm_releases_compiler_rt(): build_file = "@envoy//bazel/external:compiler_rt.BUILD", ) -def _fuzzit_linux(): - _repository_impl( - name = "fuzzit_linux", - build_file_content = "exports_files([\"fuzzit\"])", - ) - def _com_github_grpc_grpc(): _repository_impl("com_github_grpc_grpc") _repository_impl("build_bazel_rules_apple") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f84209eebe9c..9561aae50d6f 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -436,11 +436,6 @@ DEPENDENCY_REPOSITORIES = dict( urls = ["http://releases.llvm.org/9.0.0/compiler-rt-9.0.0.src.tar.xz"], use_category = ["test"], ), - fuzzit_linux = dict( - sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774", - urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"], - use_category = ["build", "test"], - ), upb = dict( sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", strip_prefix = "upb-8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 05cedd54d8da..cee7405cf7ea 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -300,14 +300,6 @@ elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then echo "Building envoy fuzzers and executing 100 fuzz iterations..." bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} --test_arg="-runs=10" exit 0 -elif [[ "$CI_TARGET" == "bazel.fuzzit" ]]; then - setup_clang_toolchain - FUZZ_TEST_TARGETS="$(bazel query "attr('tags','fuzzer',${TEST_TARGETS})")" - echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS}" - echo "Building fuzzers and run under Fuzzit" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=asan-fuzzer ${FUZZ_TEST_TARGETS} \ - --test_env=FUZZIT_API_KEY --test_env=ENVOY_BUILD_IMAGE --test_timeout=1200 --run_under=//bazel:fuzzit_wrapper - exit 0 elif [[ "$CI_TARGET" == "fix_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain From bf5436a0c1cef70ecd0929f995ee26c07beb723d Mon Sep 17 00:00:00 2001 From: Henry Yang <4411287+HenryYYang@users.noreply.github.com> Date: Mon, 4 May 2020 19:29:58 -0700 Subject: [PATCH 094/909] redis: mark cluster refresh manager as flaky test (#11051) Mark cluster refresh manager as flaky test Signed-off-by: Henry Yang --- test/extensions/common/redis/BUILD | 1 + test/extensions/common/redis/cluster_refresh_manager_test.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/test/extensions/common/redis/BUILD b/test/extensions/common/redis/BUILD index a2185d580451..fc6009a41d71 100644 --- a/test/extensions/common/redis/BUILD +++ b/test/extensions/common/redis/BUILD @@ -26,6 +26,7 @@ envoy_extension_cc_test( name = "cluster_refresh_manager_test", srcs = ["cluster_refresh_manager_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + flaky = True, deps = [ "//source/common/common:lock_guard_lib", "//source/common/common:thread_lib", diff --git a/test/extensions/common/redis/cluster_refresh_manager_test.cc b/test/extensions/common/redis/cluster_refresh_manager_test.cc index d4bca7edeade..e58f6d6ca728 100644 --- a/test/extensions/common/redis/cluster_refresh_manager_test.cc +++ b/test/extensions/common/redis/cluster_refresh_manager_test.cc @@ -25,6 +25,7 @@ namespace Extensions { namespace Common { namespace Redis { +// TODO: rewrite the tests to fix the flaky test class ClusterRefreshManagerTest : public testing::Test { public: ClusterRefreshManagerTest() From 690660eddd2643477bab98c010322ddce34b164c Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Mon, 4 May 2020 19:30:37 -0700 Subject: [PATCH 095/909] test: extend raw connection driver (#10998) Avoid dispatcher allocation. Allow passing ssl transport socket. Add waitForConnected helper method. Signed-off-by: Yuchen Dai --- ci/filter_example_setup.sh | 2 +- .../direct_response_integration_test.cc | 15 +-- .../hystrix/hystrix_integration_test.cc | 17 ++- test/integration/echo_integration_test.cc | 46 +++----- test/integration/http2_integration_test.cc | 59 ++++------ test/integration/integration.cc | 13 +-- test/integration/integration.h | 15 +++ test/integration/integration_test.cc | 49 ++++---- test/integration/utility.cc | 24 +++- test/integration/utility.h | 24 +++- test/integration/xds_integration_test.cc | 108 ++++++++---------- 11 files changed, 184 insertions(+), 188 deletions(-) diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index df1ec083664f..1991ea1244da 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -5,7 +5,7 @@ set -e # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. -ENVOY_FILTER_EXAMPLE_GITSHA="c6c986cca7ad676cc1c33f2df7515cbbd2e02502" +ENVOY_FILTER_EXAMPLE_GITSHA="bb2e91fde758446fbccc3f8fedffce1827a47bcb" ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example" export ENVOY_FILTER_EXAMPLE_TESTS="//:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test" diff --git a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc index 6b6272165ae3..4b08edb73f47 100644 --- a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc +++ b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc @@ -43,17 +43,14 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, DirectResponseIntegrationTest, TestUtility::ipTestParamsToString); TEST_P(DirectResponseIntegrationTest, Hello) { - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - lookupPort("listener_0"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello, world!\n", response); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(StreamInfo::ResponseCodeDetails::get().DirectResponse)); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc index 9a5667e6d581..221201bff5c8 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_integration_test.cc @@ -25,26 +25,23 @@ TEST_P(HystrixIntegrationTest, NoChunkEncoding) { if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { // For HTTP/1.1 we use a raw client to make absolutely sure there is no chunk encoding. - Buffer::OwnedImpl buffer("GET /hystrix_event_stream HTTP/1.1\r\nHost: admin\r\n\r\n"); std::string response; - RawConnectionDriver connection( - lookupPort("admin"), buffer, - [&](Network::ClientConnection& client, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("admin"), "GET /hystrix_event_stream HTTP/1.1\r\nHost: admin\r\n\r\n", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - // Wait until there is a flush. if (response.find("rollingCountCollapsedRequests") != std::string::npos) { - client.close(Network::ConnectionCloseType::NoFlush); + conn.close(Network::ConnectionCloseType::NoFlush); } - }, - version_); - connection.run(); + }); + connection->run(); EXPECT_THAT(response, StartsWith("HTTP/1.1 200 OK\r\n")); // Make sure that the response is not actually chunk encoded, but it does have the hystrix flush // trailer. EXPECT_THAT(response, Not(HasSubstr("chunked"))); EXPECT_THAT(response, Not(HasSubstr("3\r\n:\n\n"))); EXPECT_THAT(response, HasSubstr(":\n\n")); - connection.close(); + connection->close(); } else { codec_client_ = makeHttpConnection(lookupPort("admin")); auto response = codec_client_->makeHeaderOnlyRequest( diff --git a/test/integration/echo_integration_test.cc b/test/integration/echo_integration_test.cc index 4009e6cacdb0..62c3f66037af 100644 --- a/test/integration/echo_integration_test.cc +++ b/test/integration/echo_integration_test.cc @@ -47,17 +47,14 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, EchoIntegrationTest, TestUtility::ipTestParamsToString); TEST_P(EchoIntegrationTest, Hello) { - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - lookupPort("listener_0"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello", response); } @@ -97,16 +94,14 @@ name: new_listener ->ip() ->port(); - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - new_listener_port, buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { response.append(data.toString()); - connection.close(); - }, - version_); - connection.run(); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); EXPECT_EQ("hello", response); // Remove the listener. @@ -128,20 +123,15 @@ name: new_listener // connect would unexpectedly succeed. bool connect_fail = false; for (int i = 0; i < 10; ++i) { - RawConnectionDriver connection2( - new_listener_port, buffer, - [&](Network::ClientConnection&, const Buffer::Instance&) -> void { FAIL(); }, version_); - while (connection2.connecting()) { - // Don't busy loop, but macOS often needs a moment to decide this connection isn't happening. - timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); - - connection2.run(Event::Dispatcher::RunType::NonBlock); - } - if (connection2.connection().state() == Network::Connection::State::Closed) { + auto connection2 = createConnectionDriver( + new_listener_port, "hello", + [](Network::ClientConnection&, const Buffer::Instance&) -> void { FAIL(); }); + connection2->waitForConnection(); + if (connection2->connection().state() == Network::Connection::State::Closed) { connect_fail = true; break; } else { - connection2.close(); + connection2->close(); } } ASSERT_TRUE(connect_fail); diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index e205d512baa8..12a1d93711a2 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -823,31 +823,25 @@ TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { TEST_P(Http2IntegrationTest, BadMagic) { initialize(); - Buffer::OwnedImpl buffer("hello"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "hello", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); - - connection.run(); + }); + connection->run(); EXPECT_EQ("", response); } TEST_P(Http2IntegrationTest, BadFrame) { initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); - - connection.run(); + }); + connection->run(); EXPECT_TRUE(response.find("SETTINGS expected") != std::string::npos); } @@ -1124,26 +1118,25 @@ TEST_P(Http2IntegrationTest, SimultaneousRequestWithBufferLimits) { // Test downstream connection delayed close processing. TEST_P(Http2IntegrationTest, DelayedCloseAfterBadFrame) { initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, + + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void { response.append(data.toString()); connection.dispatcher().exit(); - }, - version_); + }); - connection.run(); + connection->run(); EXPECT_THAT(response, HasSubstr("SETTINGS expected")); // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the // Envoy server), it's possible the delayed close timer could fire and close the server socket // prior to the data callback above firing. Therefore, we may either still be connected, or have // received a remote close. - if (connection.lastConnectionEvent() == Network::ConnectionEvent::Connected) { - connection.run(); + if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) { + connection->run(); } - EXPECT_EQ(connection.lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 1); } @@ -1154,25 +1147,23 @@ TEST_P(Http2IntegrationTest, DelayedCloseDisabled) { [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(0); }); initialize(); - Buffer::OwnedImpl buffer("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror"); std::string response; - RawConnectionDriver connection( - lookupPort("http"), buffer, + auto connection = createConnectionDriver( + lookupPort("http"), "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\nhelloworldcauseanerror", [&](Network::ClientConnection& connection, const Buffer::Instance& data) -> void { response.append(data.toString()); connection.dispatcher().exit(); - }, - version_); + }); - connection.run(); + connection->run(); EXPECT_THAT(response, HasSubstr("SETTINGS expected")); // Due to the multiple dispatchers involved (one for the RawConnectionDriver and another for the // Envoy server), it's possible for the 'connection' to receive the data and exit the dispatcher // prior to the FIN being received from the server. - if (connection.lastConnectionEvent() == Network::ConnectionEvent::Connected) { - connection.run(); + if (connection->lastConnectionEvent() == Network::ConnectionEvent::Connected) { + connection->run(); } - EXPECT_EQ(connection.lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); + EXPECT_EQ(connection->lastConnectionEvent(), Network::ConnectionEvent::RemoteClose); EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value(), 0); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index ac40c2628be0..5f2f57a5e8b3 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -518,18 +518,17 @@ void BaseIntegrationTest::createTestServer(const std::string& json_path, void BaseIntegrationTest::sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete) { - Buffer::OwnedImpl buffer(raw_http); - RawConnectionDriver connection( - port, buffer, - [&](Network::ClientConnection& client, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + port, raw_http, + [response, disconnect_after_headers_complete](Network::ClientConnection& client, + const Buffer::Instance& data) -> void { response->append(data.toString()); if (disconnect_after_headers_complete && response->find("\r\n\r\n") != std::string::npos) { client.close(Network::ConnectionCloseType::NoFlush); } - }, - version_); + }); - connection.run(); + connection->run(); } IntegrationTestServerPtr BaseIntegrationTest::createIntegrationTestServer( diff --git a/test/integration/integration.h b/test/integration/integration.h index 8c388b46314f..83fcccedb169 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -358,6 +358,21 @@ class BaseIntegrationTest : protected Logger::Loggable { void sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete = false); + /** + * Helper to create ConnectionDriver. + * + * @param port the port to connect to. + * @param initial_data the data to send. + * @param data_callback the callback on the received data. + **/ + std::unique_ptr createConnectionDriver( + uint32_t port, const std::string& initial_data, + std::function&& data_callback) { + Buffer::OwnedImpl buffer(initial_data); + return std::make_unique(port, buffer, data_callback, version_, + *dispatcher_); + } + protected: // Create the envoy server in another thread and start it. // Will not return until that server is listening. diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 6e2255d6516e..f359c82df7b9 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -590,25 +590,23 @@ TEST_P(IntegrationTest, Pipeline) { initialize(); std::string response; - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHost: host\r\n\r\nGET / HTTP/1.1\r\n\r\n"); - RawConnectionDriver connection( - lookupPort("http"), buffer, + auto connection = createConnectionDriver( + lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\n\r\nGET / HTTP/1.1\r\n\r\n", [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); // First response should be success. while (response.find("200") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); // Second response should be 400 (no host) while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); - connection.close(); + connection->close(); } // Checks to ensure that we reject the third request that is pipelined in the @@ -639,30 +637,27 @@ TEST_P(IntegrationTest, PipelineWithTrailers) { "trailer2:t3\r\n" "\r\n"); - Buffer::OwnedImpl buffer(absl::StrCat(good_request, good_request, bad_request)); - - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), absl::StrCat(good_request, good_request, bad_request), + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); // First response should be success. size_t pos; while ((pos = response.find("200")) == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); while (response.find("200", pos + 1) == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); - connection.close(); + connection->close(); } // Add a pipeline test where complete request headers in the first request merit @@ -673,24 +668,22 @@ TEST_P(IntegrationTest, PipelineInline) { initialize(); std::string response; - Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\nGET / HTTP/1.0\r\n\r\n"); - RawConnectionDriver connection( - lookupPort("http"), buffer, - [&](Network::ClientConnection&, const Buffer::Instance& data) -> void { + auto connection = createConnectionDriver( + lookupPort("http"), "GET / HTTP/1.1\r\n\r\nGET / HTTP/1.0\r\n\r\n", + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { response.append(data.toString()); - }, - version_); + }); while (response.find("400") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); while (response.find("426") == std::string::npos) { - connection.run(Event::Dispatcher::RunType::NonBlock); + connection->run(Event::Dispatcher::RunType::NonBlock); } EXPECT_THAT(response, HasSubstr("HTTP/1.1 426 Upgrade Required\r\n")); - connection.close(); + connection->close(); } TEST_P(IntegrationTest, NoHost) { diff --git a/test/integration/utility.cc b/test/integration/utility.cc index d23124563567..5a0ffa0ab577 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -114,15 +114,22 @@ IntegrationUtil::makeSingleRequest(uint32_t port, const std::string& method, con RawConnectionDriver::RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data, ReadCallback data_callback, - Network::Address::IpVersion version) { + Network::Address::IpVersion version, + Event::Dispatcher& dispatcher, + Network::TransportSocketPtr transport_socket) + : dispatcher_(dispatcher) { api_ = Api::createApiForTest(stats_store_); Event::GlobalTimeSystem time_system; - dispatcher_ = api_->allocateDispatcher("test_thread"); callbacks_ = std::make_unique(); - client_ = dispatcher_->createClientConnection( + + if (transport_socket == nullptr) { + transport_socket = Network::Test::createRawBufferSocket(); + } + + client_ = dispatcher_.createClientConnection( Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version), port)), - Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); + Network::Address::InstanceConstSharedPtr(), std::move(transport_socket), nullptr); client_->addConnectionCallbacks(*callbacks_); client_->addReadFilter(Network::ReadFilterSharedPtr{new ForwardingFilter(*this, data_callback)}); client_->write(initial_data, false); @@ -131,7 +138,14 @@ RawConnectionDriver::RawConnectionDriver(uint32_t port, Buffer::Instance& initia RawConnectionDriver::~RawConnectionDriver() = default; -void RawConnectionDriver::run(Event::Dispatcher::RunType run_type) { dispatcher_->run(run_type); } +void RawConnectionDriver::waitForConnection() { + while (!callbacks_->connected() && !callbacks_->closed()) { + Event::GlobalTimeSystem().timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); + dispatcher_.run(Event::Dispatcher::RunType::NonBlock); + } +} + +void RawConnectionDriver::run(Event::Dispatcher::RunType run_type) { dispatcher_.run(run_type); } void RawConnectionDriver::close() { client_->close(Network::ConnectionCloseType::FlushWrite); } diff --git a/test/integration/utility.h b/test/integration/utility.h index c34c12ff2a51..21235c2e2b42 100644 --- a/test/integration/utility.h +++ b/test/integration/utility.h @@ -64,15 +64,19 @@ class RawConnectionDriver { using ReadCallback = std::function; RawConnectionDriver(uint32_t port, Buffer::Instance& initial_data, ReadCallback data_callback, - Network::Address::IpVersion version); + Network::Address::IpVersion version, Event::Dispatcher& dispatcher, + Network::TransportSocketPtr transport_socket = nullptr); ~RawConnectionDriver(); const Network::Connection& connection() { return *client_; } - bool connecting() { return callbacks_->connecting_; } void run(Event::Dispatcher::RunType run_type = Event::Dispatcher::RunType::Block); void close(); Network::ConnectionEvent lastConnectionEvent() const { return callbacks_->last_connection_event_; } + // Wait until connected or closed(). + void waitForConnection(); + + bool closed() { return callbacks_->closed(); } private: struct ForwardingFilter : public Network::ReadFilterBaseImpl { @@ -91,20 +95,30 @@ class RawConnectionDriver { }; struct ConnectionCallbacks : public Network::ConnectionCallbacks { + + bool connected() const { return connected_; } + bool closed() const { return closed_; } + + // Network::ConnectionCallbacks void onEvent(Network::ConnectionEvent event) override { last_connection_event_ = event; - connecting_ = false; + closed_ |= (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose); + connected_ |= (event == Network::ConnectionEvent::Connected); } void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} - bool connecting_{true}; Network::ConnectionEvent last_connection_event_; + + private: + bool connected_{false}; + bool closed_{false}; }; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; - Event::DispatcherPtr dispatcher_; + Event::Dispatcher& dispatcher_; std::unique_ptr callbacks_; Network::ClientConnectionPtr client_; }; diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 3acc2ab486a4..5bf1380a2951 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -1,6 +1,8 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "common/buffer/buffer_impl.h" + #include "test/integration/http_integration.h" #include "test/integration/http_protocol_integration.h" #include "test/integration/ssl_utility.h" @@ -8,6 +10,7 @@ #include "test/test_common/utility.h" #include "gtest/gtest.h" +#include "utility.h" namespace Envoy { namespace { @@ -73,25 +76,6 @@ TEST_P(XdsIntegrationTestTypedStruct, RouterRequestAndResponseWithBodyNoBuffer) testRouterRequestAndResponseWithBody(1024, 512, false); } -// TODO(lambdai): Extend RawConnectionDriver with SSL and delete this one. -class SslClient { -public: - SslClient(Network::ClientConnectionPtr ssl_conn, Event::Dispatcher& dispatcher) - : ssl_conn_(std::move(ssl_conn)), - payload_reader_(std::make_shared(dispatcher)) { - ssl_conn_->addConnectionCallbacks(connect_callbacks_); - ssl_conn_->addReadFilter(payload_reader_); - ssl_conn_->connect(); - while (!connect_callbacks_.connected()) { - dispatcher.run(Event::Dispatcher::RunType::NonBlock); - } - } - Network::ClientConnectionPtr ssl_conn_; - MockWatermarkBuffer* client_write_buffer; - std::shared_ptr payload_reader_; - ConnectionStatusCallbacks connect_callbacks_; -}; - class LdsInplaceUpdateTcpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { @@ -143,16 +127,20 @@ class LdsInplaceUpdateTcpProxyIntegrationTest context_ = Ssl::createClientSslTransportSocketFactory({}, *context_manager_, *api_); } - std::unique_ptr connect(const std::string& alpn) { - Network::Address::InstanceConstSharedPtr address = - Ssl::getSslAddress(version_, lookupPort("tcp")); - auto ssl_conn = dispatcher_->createClientConnection( - address, Network::Address::InstanceConstSharedPtr(), + std::unique_ptr createConnectionAndWrite(const std::string& alpn, + const std::string& request, + std::string& response) { + Buffer::OwnedImpl buffer(request); + return std::make_unique( + lookupPort("tcp"), buffer, + [&response](Network::ClientConnection&, const Buffer::Instance& data) -> void { + response.append(data.toString()); + }, + version_, *dispatcher_, context_->createTransportSocket(std::make_shared( - absl::string_view(""), std::vector(), std::vector{alpn})), - nullptr); - return std::make_unique(std::move(ssl_conn), *dispatcher_); + absl::string_view(""), std::vector(), std::vector{alpn}))); } + std::unique_ptr context_manager_; Network::TransportSocketFactoryPtr context_; testing::NiceMock secret_manager_; @@ -162,12 +150,15 @@ class LdsInplaceUpdateTcpProxyIntegrationTest TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) { setUpstreamCount(2); initialize(); - - auto client_0 = connect("alpn0"); + std::string response_0; + auto client_conn_0 = createConnectionAndWrite("alpn0", "hello", response_0); + client_conn_0->waitForConnection(); FakeRawConnectionPtr fake_upstream_connection_0; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); - auto client_1 = connect("alpn1"); + std::string response_1; + auto client_conn_1 = createConnectionAndWrite("alpn1", "dummy", response_1); + client_conn_1->waitForConnection(); FakeRawConnectionPtr fake_upstream_connection_1; ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_1)); @@ -181,23 +172,21 @@ TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) new_config_helper.setLds("1"); test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); - while (!client_1->connect_callbacks_.closed()) { + while (!client_conn_1->closed()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } + ASSERT_EQ(response_1, ""); - Buffer::OwnedImpl buffer("hello"); - client_0->ssl_conn_->write(buffer, false); - client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); std::string observed_data_0; ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); EXPECT_EQ("hello", observed_data_0); ASSERT_TRUE(fake_upstream_connection_0->write("world")); - client_0->payload_reader_->set_data_to_wait_for("world"); - client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); - client_0->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); - - while (!client_0->connect_callbacks_.closed()) { + while (response_0.find("world") == std::string::npos) { + client_conn_0->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_0->close(); + while (!client_conn_0->closed()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } } @@ -208,7 +197,9 @@ TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) { setUpstreamCount(2); initialize(); - auto client_0 = connect("alpn0"); + std::string response_0; + auto client_conn_0 = createConnectionAndWrite("alpn0", "hello", response_0); + client_conn_0->waitForConnection(); FakeRawConnectionPtr fake_upstream_connection_0; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection_0)); @@ -226,39 +217,34 @@ TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) { test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); - auto client_2 = connect("alpn2"); + std::string response_2; + auto client_conn_2 = createConnectionAndWrite("alpn2", "hello2", response_2); + client_conn_2->waitForConnection(); FakeRawConnectionPtr fake_upstream_connection_2; ASSERT_TRUE(fake_upstreams_[1]->waitForRawConnection(fake_upstream_connection_2)); - - Buffer::OwnedImpl buffer_2("hello"); - client_2->ssl_conn_->write(buffer_2, false); - client_2->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); std::string observed_data_2; - ASSERT_TRUE(fake_upstream_connection_2->waitForData(5, &observed_data_2)); - EXPECT_EQ("hello", observed_data_2); + ASSERT_TRUE(fake_upstream_connection_2->waitForData(6, &observed_data_2)); + EXPECT_EQ("hello2", observed_data_2); - ASSERT_TRUE(fake_upstream_connection_2->write("world")); - client_2->payload_reader_->set_data_to_wait_for("world"); - client_2->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); - client_2->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); - - while (!client_2->connect_callbacks_.closed()) { + ASSERT_TRUE(fake_upstream_connection_2->write("world2")); + while (response_2.find("world2") == std::string::npos) { + client_conn_2->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_2->close(); + while (!client_conn_2->closed()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } - Buffer::OwnedImpl buffer_0("hello"); - client_0->ssl_conn_->write(buffer_0, false); - client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); std::string observed_data_0; ASSERT_TRUE(fake_upstream_connection_0->waitForData(5, &observed_data_0)); EXPECT_EQ("hello", observed_data_0); ASSERT_TRUE(fake_upstream_connection_0->write("world")); - client_0->payload_reader_->set_data_to_wait_for("world"); - client_0->ssl_conn_->dispatcher().run(Event::Dispatcher::RunType::Block); - client_0->ssl_conn_->close(Network::ConnectionCloseType::NoFlush); - - while (!client_0->connect_callbacks_.closed()) { + while (response_0.find("world") == std::string::npos) { + client_conn_0->run(Event::Dispatcher::RunType::NonBlock); + } + client_conn_0->close(); + while (!client_conn_0->closed()) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } } From 78eaac391b519a69900ebcdccea668f690593e9a Mon Sep 17 00:00:00 2001 From: Konstantin Belyalov Date: Mon, 4 May 2020 21:47:44 -0700 Subject: [PATCH 096/909] grpc/transcoding: add support for fragmented httpBody (#11060) Additional Description: As mentioned in #10673 when transcoding of httpBody in streaming mode and grpc frame gets fragmented (e.g. large enough) - http header Content-Type may not be set to correct value from content-type of httpBody. Risk Level: low Testing: new unit/integration test Docs Changes: not changed bcz of this feature was introduced in the same release Release Notes: not added bcz of this feature was introduced in the same release Signed-off-by: Konstantin Belyalov --- .../json_transcoder_filter.cc | 13 +++-- .../json_transcoder_filter.h | 6 ++- .../grpc_json_transcoder_integration_test.cc | 47 +++++++++++++++++++ .../json_transcoder_filter_test.cc | 35 ++++++++++++++ 4 files changed, 96 insertions(+), 5 deletions(-) diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 78ae748fbc58..980a614a5efb 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -519,10 +519,13 @@ Http::FilterDataStatus JsonTranscoderFilter::encodeData(Buffer::Instance& data, has_body_ = true; if (method_->response_type_is_http_body_) { - buildResponseFromHttpBodyOutput(*response_headers_, data); + bool frame_processed = buildResponseFromHttpBodyOutput(*response_headers_, data); if (!method_->descriptor_->server_streaming()) { return Http::FilterDataStatus::StopIterationAndBuffer; } + if (!http_body_response_headers_set_ && !frame_processed) { + return Http::FilterDataStatus::StopIterationAndBuffer; + } return Http::FilterDataStatus::Continue; } @@ -667,12 +670,12 @@ void JsonTranscoderFilter::maybeSendHttpBodyRequestMessage() { first_request_sent_ = true; } -void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( +bool JsonTranscoderFilter::buildResponseFromHttpBodyOutput( Http::ResponseHeaderMap& response_headers, Buffer::Instance& data) { std::vector frames; decoder_.decode(data, frames); if (frames.empty()) { - return; + return false; } google::api::HttpBody http_body; @@ -688,7 +691,7 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( // Non streaming case: single message with content type / length response_headers.setContentType(http_body.content_type()); response_headers.setContentLength(body.size()); - return; + return true; } else if (!http_body_response_headers_set_) { // Streaming case: set content type only once from first HttpBody message response_headers.setContentType(http_body.content_type()); @@ -696,6 +699,8 @@ void JsonTranscoderFilter::buildResponseFromHttpBodyOutput( } } } + + return true; } bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index 5c271dafde24..31180556ac54 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -162,7 +162,11 @@ class JsonTranscoderFilter : public Http::StreamFilter, public Logger::Loggable< bool checkIfTranscoderFailed(const std::string& details); bool readToBuffer(Protobuf::io::ZeroCopyInputStream& stream, Buffer::Instance& data); void maybeSendHttpBodyRequestMessage(); - void buildResponseFromHttpBodyOutput(Http::ResponseHeaderMap& response_headers, + /** + * Builds response from HttpBody protobuf. + * Returns true if at least one gRPC frame has processed. + */ + bool buildResponseFromHttpBodyOutput(Http::ResponseHeaderMap& response_headers, Buffer::Instance& data); bool maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_status, Http::ResponseHeaderOrTrailerMap& trailers); diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 27bf41e4a661..982a3dffa707 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -387,6 +387,53 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyMultipleFramesInData) EXPECT_EQ(response->body(), "HelloHelloHello"); } +TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyFragmented) { + HttpIntegrationTest::initialize(); + + // Make request to gRPC upstream + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/indexStream"}, + {":authority", "host"}, + }); + waitForNextUpstreamRequest(); + + // Send fragmented gRPC response + // Headers + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + // Fragmented payload + google::api::HttpBody http_body; + http_body.set_content_type("text/plain"); + http_body.set_data(std::string(1024, 'a')); + // Fragment gRPC frame into 2 buffers equally divided + Buffer::OwnedImpl fragment1; + auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body); + fragment1.move(*fragment2, fragment2->length() / 2); + upstream_request_->encodeData(fragment1, false); + upstream_request_->encodeData(*fragment2, false); + // Trailers + Http::TestResponseTrailerMapImpl response_trailers; + auto grpc_status = Status(); + response_trailers.setGrpcStatus(static_cast(grpc_status.error_code())); + response_trailers.setGrpcMessage( + absl::string_view(grpc_status.error_message().data(), grpc_status.error_message().size())); + upstream_request_->encodeTrailers(response_trailers); + EXPECT_TRUE(upstream_request_->complete()); + + // Wait for complete + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + // Ensure that body was actually replaced + EXPECT_EQ(response->body(), http_body.data()); + // As well as content-type header + auto content_type = response->headers().get(Http::LowerCaseString("content-type")); + EXPECT_EQ("text/plain", content_type->value().getStringView()); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryEchoHttpBody) { HttpIntegrationTest::initialize(); testTranscoding( diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 05977528ad16..20acafd6f472 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -899,6 +899,41 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); } +TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithFragmentedHttpBody) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/indexStream"}}; + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ("application/grpc", request_headers.get_("content-type")); + EXPECT_EQ("/indexStream", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("/bookstore.Bookstore/GetIndexStream", request_headers.get_(":path")); + EXPECT_EQ("trailers", request_headers.get_("te")); + + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.encodeHeaders(response_headers, false)); + + // "Send" one fragmented gRPC frame + google::api::HttpBody http_body; + http_body.set_content_type("text/html"); + http_body.set_data("

Fragmented Message!

"); + auto fragment2 = Grpc::Common::serializeToGrpcFrame(http_body); + Buffer::OwnedImpl fragment1; + fragment1.move(*fragment2, fragment2->length() / 2); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(fragment1, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(*fragment2, false)); + + // Ensure that content-type is correct (taken from httpBody) + EXPECT_EQ("text/html", response_headers.get_("content-type")); + + // Fragment1 is buffered by transcoder + EXPECT_EQ(0, fragment1.length()); + // Second fragment contains entire body + EXPECT_EQ(http_body.data(), fragment2->toString()); +} + class GrpcJsonTranscoderFilterGrpcStatusTest : public GrpcJsonTranscoderFilterTest { public: GrpcJsonTranscoderFilterGrpcStatusTest( From 0cb6bbf59d46ee60b938f9fb4c3eb51ed62ea41f Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Tue, 5 May 2020 06:51:09 -0700 Subject: [PATCH 097/909] test: optimize waitForNextUpstreamRequest() (#11026) Commit Message: //test/integration:protocol_integration_test After: Stats over 50 runs: max = 11.8s, min = 1.2s, avg = 4.0s, dev = 4.3s Before: Stats over 50 runs: max = 2.6s, min = 1.4s, avg = 2.4s, dev = 0.3s Signed-off-by: Yuchen Dai silentdai@gmail.com Additional Description: Risk Level: LOW Testing: test Docs Changes: n/a Release Notes: n/a Signed-off-by: Yuchen Dai --- test/integration/http_integration.cc | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 8dc0dedf59bb..034a143266e1 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -357,16 +357,24 @@ HttpIntegrationTest::waitForNextUpstreamRequest(const std::vector& ups absl::optional upstream_with_request; // If there is no upstream connection, wait for it to be established. if (!fake_upstream_connection_) { - AssertionResult result = AssertionFailure(); - for (auto upstream_index : upstream_indices) { - result = fake_upstreams_[upstream_index]->waitForHttpConnection( - *dispatcher_, fake_upstream_connection_, connection_wait_timeout, max_request_headers_kb_, - max_request_headers_count_); + int upstream_index = 0; + Event::TestTimeSystem& time_system = timeSystem(); + auto end_time = time_system.monotonicTime() + connection_wait_timeout; + // Loop over the upstreams until the call times out or an upstream request is received. + while (!result) { + upstream_index = upstream_index % upstream_indices.size(); + result = fake_upstreams_[upstream_indices[upstream_index]]->waitForHttpConnection( + *dispatcher_, fake_upstream_connection_, std::chrono::milliseconds(5), + max_request_headers_kb_, max_request_headers_count_); if (result) { upstream_with_request = upstream_index; break; + } else if (time_system.monotonicTime() >= end_time) { + result = (AssertionFailure() << "Timed out waiting for new connection."); + break; } + ++upstream_index; } RELEASE_ASSERT(result, result.message()); } From 698df2c5f1b9b71d49c183d902a6cde59a335da7 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Tue, 5 May 2020 07:03:33 -0700 Subject: [PATCH 098/909] doc: clarify --concurrency 0 (#11036) Risk Level: n/a Testing: n/a Docs Changes: Clarify doc that concurrency 0 and 1 are equivalent. Release Notes: n/a Signed-off-by: Yuchen Dai --- docs/root/operations/cli.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 877f4e2d866a..f24e52ce6b5f 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -69,7 +69,8 @@ following are the command line options that Envoy supports. .. option:: --concurrency *(optional)* The number of :ref:`worker threads ` to run. If not - specified defaults to the number of hardware threads on the machine. + specified defaults to the number of hardware threads on the machine. If set to zero, Envoy will + still run one worker thread. .. option:: -l , --log-level From 893ed58ef0f77e71b46df964b90d3e4e4165e757 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Tue, 5 May 2020 14:30:38 -0400 Subject: [PATCH 099/909] Move ratelimit integration test into its filter specific directory (#11067) Move ratelimit integration test into its filter specific directory. Remove unused dependencies on this filter as well. Signed-off-by: Yan Avlasov --- test/extensions/filters/http/ratelimit/BUILD | 20 +++++++++++++++++++ .../ratelimit}/ratelimit_integration_test.cc | 0 .../network/http_connection_manager/BUILD | 1 - test/integration/BUILD | 19 ------------------ test/server/BUILD | 1 - 5 files changed, 20 insertions(+), 21 deletions(-) rename test/{integration => extensions/filters/http/ratelimit}/ratelimit_integration_test.cc (100%) diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index becbc98059fb..fe03b1f6eb30 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -45,3 +45,23 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "ratelimit_integration_test", + srcs = ["ratelimit_integration_test.cc"], + extension_name = "envoy.filters.http.ratelimit", + tags = ["fails_on_windows"], + deps = [ + "//source/common/buffer:zero_copy_input_stream_lib", + "//source/common/grpc:codec_lib", + "//source/common/grpc:common_lib", + "//source/extensions/filters/http/ratelimit:config", + "//test/common/grpc:grpc_client_integration_lib", + "//test/integration:http_integration_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/test/integration/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc similarity index 100% rename from test/integration/ratelimit_integration_test.cc rename to test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index 0b8e125be0c5..ebaaf0038867 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -27,7 +27,6 @@ envoy_extension_cc_test( "//source/common/event:dispatcher_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/health_check:config", - "//source/extensions/filters/http/ratelimit:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//test/integration/filters:encoder_decoder_buffer_filter_lib", diff --git a/test/integration/BUILD b/test/integration/BUILD index f145741f4f64..d18b58487f89 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -782,25 +782,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "ratelimit_integration_test", - srcs = ["ratelimit_integration_test.cc"], - tags = ["fails_on_windows"], - deps = [ - ":http_integration_lib", - "//source/common/buffer:zero_copy_input_stream_lib", - "//source/common/grpc:codec_lib", - "//source/common/grpc:common_lib", - "//source/extensions/filters/http/ratelimit:config", - "//test/common/grpc:grpc_client_integration_lib", - "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "rtds_integration_test", srcs = ["rtds_integration_test.cc"], diff --git a/test/server/BUILD b/test/server/BUILD index ce027a2e46f6..bdab7e6a7e4c 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -354,7 +354,6 @@ envoy_cc_test( "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/grpc_http1_bridge:config", "//source/extensions/filters/http/health_check:config", - "//source/extensions/filters/http/ratelimit:config", "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/filters/network/redis_proxy:config", From 4abe685446316f79ba1627cf830f0c0d398abdaf Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 5 May 2020 12:38:55 -0700 Subject: [PATCH 100/909] docs: clarity around http filter ordering (#11057) I still had an arcane mental model (pre https://github.com/envoyproxy/envoy/pull/5955) where both decoder and encoder filters where invoked in the same order as the configuration order. My mind was so used to this I even failed to notice the code that prepends encoder filters into the encoder_filters_ list in the conn manager. These documentation additions are trying to make the behavior as explicit as possible so others are not confused. Risk Level: low - doc updates, no behavior change. Docs Changes: updated inline comments in the code I failed to notice, and in the project docs. Signed-off-by: Jose Nino --- .../v2/http_connection_manager.proto | 4 ++-- .../v3/http_connection_manager.proto | 4 ++-- .../v4alpha/http_connection_manager.proto | 4 ++-- .../intro/arch_overview/http/http_filters.rst | 23 +++++++++++++++++++ .../v2/http_connection_manager.proto | 4 ++-- .../v3/http_connection_manager.proto | 4 ++-- .../v4alpha/http_connection_manager.proto | 4 ++-- source/common/http/conn_manager_impl.cc | 16 +++++++++++++ 8 files changed, 51 insertions(+), 12 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index c78e69b2ae30..56a05eed758b 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -262,8 +262,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 4dd60a012a80..a3dcaa2f815f 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -263,8 +263,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 03a15d832732..860a951b90f6 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -263,8 +263,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/docs/root/intro/arch_overview/http/http_filters.rst b/docs/root/intro/arch_overview/http/http_filters.rst index 36f2ab3228a0..32f177d8e5f7 100644 --- a/docs/root/intro/arch_overview/http/http_filters.rst +++ b/docs/root/intro/arch_overview/http/http_filters.rst @@ -25,3 +25,26 @@ themselves within the context of a single request stream. Refer to :ref:`data sh between filters ` for more details. Envoy already includes several HTTP level filters that are documented in this architecture overview as well as the :ref:`configuration reference `. + +.. _arch_overview_http_filters_ordering: + +Filter ordering +--------------- + +Filter ordering in the :ref:`http_filters field ` +matters. If filters are configured in the following order (and assuming all three filters are +decoder/encoder filters): + +.. code-block:: yaml + + http_filters: + - A + - B + # The last configured filter has to be a terminal filter, as determined by the + # NamedHttpFilterConfigFactory::isTerminalFilter() function. This is most likely the router + # filter. + - C + +The connection manager will invoke decoder filters in the order: ``A``, ``B``, ``C``. +On the other hand, the connection manager will invoke encoder filters in the **reverse** +order: ``C``, ``B``, ``A``. diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index c78e69b2ae30..56a05eed758b 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -262,8 +262,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 4be597d448b1..11555c3bee29 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -261,8 +261,8 @@ message HttpConnectionManager { Tracing tracing = 7; // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 03a15d832732..860a951b90f6 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -263,8 +263,8 @@ message HttpConnectionManager { } // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. Order matters as the filters are - // processed sequentially as request events happen. + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 7ab746ad5d27..d81eb04f0f2c 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -695,6 +695,14 @@ void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( StreamDecoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper(new ActiveStreamDecoderFilter(*this, filter, dual_filter)); filter->setDecoderFilterCallbacks(*wrapper); + // Note: configured decoder filters are appended to decoder_filters_. + // This means that if filters are configured in the following order (assume all three filters are + // both decoder/encoder filters): + // http_filters: + // - A + // - B + // - C + // The decoder filter chain will iterate through filters A, B, C. wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); } @@ -702,6 +710,14 @@ void ConnectionManagerImpl::ActiveStream::addStreamEncoderFilterWorker( StreamEncoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter)); filter->setEncoderFilterCallbacks(*wrapper); + // Note: configured encoder filters are prepended to encoder_filters_. + // This means that if filters are configured in the following order (assume all three filters are + // both decoder/encoder filters): + // http_filters: + // - A + // - B + // - C + // The encoder filter chain will iterate through filters C, B, A. wrapper->moveIntoList(std::move(wrapper), encoder_filters_); } From 23185deb6c9e2376f7449127ad77f838ee6b41f4 Mon Sep 17 00:00:00 2001 From: asraa Date: Tue, 5 May 2020 18:05:44 -0400 Subject: [PATCH 101/909] [http] Introduce error status return type for dispatch (no-op) (#10879) (This is the merge-able portion of https://github.com/envoyproxy/envoy/pull/10484. It does not include the behavior changes in H/1. Only the necessary changes are there for H/1 and H/2 exception removal to happen separately.) Description: This introduces a new return value, `Envoy::Http::Status` for `Connection::dispatch`. Currently, dispatch will always return an OK status. This facilitates the migration to remove exceptions in codecs and replace them with error statuses. The HCM and codec client can now handle statuses returned from dispatch, although they will continue to support exceptions (by translating them to the corresponding status) until legacy codecs are deprecated. Following this, PRs will stage exception removal in H/1 and H/2 codecs, codecs will be forked to have a legacy version, and a runtime override will allow users to continue to use legacy codecs during the migration. See issue https://github.com/envoyproxy/envoy/issues/10878 for a full detailed plan and overview. Risk: Medium-high. A no-op, but touches sensitive code. Testing: All tests pass, this is a no-op Issue: https://github.com/envoyproxy/envoy/issues/10878 Signed-off-by: Asra Ali --- include/envoy/http/BUILD | 1 + include/envoy/http/codec.h | 6 +- source/common/http/BUILD | 2 + source/common/http/codec_client.cc | 13 +- source/common/http/conn_manager_impl.cc | 16 +- source/common/http/conn_manager_impl.h | 2 +- source/common/http/http1/BUILD | 2 + source/common/http/http1/codec_impl.cc | 13 +- source/common/http/http1/codec_impl.h | 13 +- source/common/http/http2/BUILD | 1 + source/common/http/http2/codec_impl.cc | 27 +- source/common/http/http2/codec_impl.h | 16 +- source/common/http/utility.cc | 22 + source/common/http/utility.h | 13 + .../quic_listeners/quiche/codec_impl.h | 2 +- test/common/http/codec_client_test.cc | 38 +- test/common/http/codec_impl_fuzz_test.cc | 177 ++-- .../http/conn_manager_impl_fuzz_test.cc | 8 +- test/common/http/conn_manager_impl_test.cc | 865 ++++++++++-------- test/common/http/http1/codec_impl_test.cc | 352 ++++--- test/common/http/http1/conn_pool_test.cc | 3 +- test/common/http/http2/codec_impl_test.cc | 64 +- test/common/http/http2/frame_replay.cc | 9 +- test/common/http/http2/frame_replay.h | 2 +- test/common/http/http2/frame_replay_test.cc | 80 +- .../http/http2/request_header_fuzz_test.cc | 12 +- .../http/http2/response_header_fuzz_test.cc | 10 +- test/integration/fake_upstream.h | 8 +- test/mocks/http/mocks.h | 4 +- 29 files changed, 1084 insertions(+), 697 deletions(-) diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index e0bc196ac5b8..d1839dce3bf4 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -36,6 +36,7 @@ envoy_cc_library( ":protocol_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", + "//source/common/http:status_lib", ], ) diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 24cfdacf49b9..769c042d6b9b 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -11,6 +11,8 @@ #include "envoy/http/protocol.h" #include "envoy/network/address.h" +#include "common/http/status.h" + namespace Envoy { namespace Http { @@ -358,8 +360,10 @@ class Connection { /** * Dispatch incoming connection data. * @param data supplies the data to dispatch. The codec will drain as many bytes as it processes. + * @return Status indicating the status of the codec. Holds any errors encountered while + * processing the incoming data. */ - virtual void dispatch(Buffer::Instance& data) PURE; + virtual Status dispatch(Buffer::Instance& data) PURE; /** * Indicate "go away" to the remote. No new streams can be created beyond this point. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 6597946f813f..8fd46d8d58dc 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -51,6 +51,7 @@ envoy_cc_library( deps = [ ":codec_wrappers_lib", ":exception_lib", + ":status_lib", ":utility_lib", "//include/envoy/event:deferred_deletable", "//include/envoy/http:codec_interface", @@ -167,6 +168,7 @@ envoy_cc_library( ":header_utility_lib", ":headers_lib", ":path_utility_lib", + ":status_lib", ":user_agent_lib", ":utility_lib", "//include/envoy/access_log:access_log_interface", diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 6d8011e22485..60725a27b3fd 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -12,6 +12,7 @@ #include "common/http/http2/codec_impl.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" +#include "common/http/status.h" #include "common/http/utility.h" namespace Envoy { @@ -121,18 +122,18 @@ void CodecClient::onReset(ActiveRequest& request, StreamResetReason reason) { void CodecClient::onData(Buffer::Instance& data) { bool protocol_error = false; - try { - codec_->dispatch(data); - } catch (CodecProtocolException& e) { - ENVOY_CONN_LOG(debug, "protocol error: {}", *connection_, e.what()); + const Status status = codec_->dispatch(data); + + if (isCodecProtocolError(status)) { + ENVOY_CONN_LOG(debug, "protocol error: {}", *connection_, status.message()); close(); protocol_error = true; - } catch (PrematureResponseException& e) { + } else if (isPrematureResponseError(status)) { ENVOY_CONN_LOG(debug, "premature response", *connection_); close(); // Don't count 408 responses where we have no active requests as protocol errors - if (!active_requests_.empty() || e.responseCode() != Code::RequestTimeout) { + if (!active_requests_.empty() || getPrematureResponseHttpCode(status) != Code::RequestTimeout) { protocol_error = true; } } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index d81eb04f0f2c..f36d5976e2f5 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -34,6 +34,7 @@ #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" #include "common/http/path_utility.h" +#include "common/http/status.h" #include "common/http/utility.h" #include "common/network/utility.h" #include "common/router/config_impl.h" @@ -279,7 +280,7 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod return **streams_.begin(); } -void ConnectionManagerImpl::handleCodecException(const char* error) { +void ConnectionManagerImpl::handleCodecError(absl::string_view error) { ENVOY_CONN_LOG(debug, "dispatch error: {}", read_callbacks_->connection(), error); read_callbacks_->connection().streamInfo().setResponseCodeDetails( absl::StrCat("codec error: ", error)); @@ -323,14 +324,15 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool do { redispatch = false; - try { - codec_->dispatch(data); - } catch (const FrameFloodException& e) { - handleCodecException(e.what()); + const Status status = codec_->dispatch(data); + + ASSERT(!isPrematureResponseError(status)); + if (isBufferFloodError(status)) { + handleCodecError(status.message()); return Network::FilterStatus::StopIteration; - } catch (const CodecProtocolException& e) { + } else if (isCodecProtocolError(status)) { stats_.named_.downstream_cx_protocol_error_.inc(); - handleCodecException(e.what()); + handleCodecError(status.message()); return Network::FilterStatus::StopIteration; } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 705f60e27e4c..c33d752d38a4 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -758,7 +758,7 @@ class ConnectionManagerImpl : Logger::Loggable, void onDrainTimeout(); void startDrainSequence(); Tracing::HttpTracer& tracer() { return *config_.tracer(); } - void handleCodecException(const char* error); + void handleCodecError(absl::string_view error); void doConnectionClose(absl::optional close_type, absl::optional response_flag); diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 0608280278da..d9b00a317157 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:statusor_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", "//source/common/http:codes_lib", @@ -36,6 +37,7 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", + "//source/common/http:status_lib", "//source/common/http:utility_lib", "//source/common/http/http1:header_formatter_lib", "//source/common/runtime:runtime_features_lib", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index b383ad6bd3b8..d4aacd3a8ccc 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -494,12 +494,20 @@ bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { return true; } -void ConnectionImpl::dispatch(Buffer::Instance& data) { +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); ASSERT(buffered_body_.length() == 0); if (maybeDirectDispatch(data)) { - return; + return Http::okStatus(); } // Always unpause before dispatch. @@ -528,6 +536,7 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { // If an upgrade has been handled and there is body data or early upgrade // payload to send on, send it on. maybeDirectDispatch(data); + return Http::okStatus(); } size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index f41824755b57..d3fa827e029a 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -15,10 +15,12 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" +#include "common/common/statusor.h" #include "common/http/codec_helper.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/http1/header_formatter.h" +#include "common/http/status.h" namespace Envoy { namespace Http { @@ -205,7 +207,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a + // single return after exception removal (#10878)). + Cleanup cleanup([this]() { dispatching_ = false; }); for (const Buffer::RawSlice& slice : data.getRawSlices()) { dispatching_ = true; ssize_t rc = @@ -489,6 +501,7 @@ void ConnectionImpl::dispatch(Buffer::Instance& data) { // Decoding incoming frames can generate outbound frames so flush pending. sendPendingFrames(); + return Http::okStatus(); } ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { @@ -1339,7 +1352,15 @@ void ServerConnectionImpl::checkOutboundQueueLimits() { } } -void ServerConnectionImpl::dispatch(Buffer::Instance& data) { +Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { ASSERT(!dispatching_downstream_data_); dispatching_downstream_data_ = true; @@ -1350,7 +1371,7 @@ void ServerConnectionImpl::dispatch(Buffer::Instance& data) { // Make sure downstream outbound queue was not flooded by the upstream frames. checkOutboundQueueLimits(); - ConnectionImpl::dispatch(data); + return ConnectionImpl::innerDispatch(data); } absl::optional diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 23c670debca7..c69c8816ac74 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -21,6 +21,7 @@ #include "common/http/header_map_impl.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" +#include "common/http/status.h" #include "common/http/utility.h" #include "absl/types/optional.h" @@ -121,7 +122,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable dispatch, + Buffer::Instance& data) { + Http::Status status; + try { + status = dispatch(data); + // TODO(#10878): Remove this when exception removal is complete. It is currently in migration, + // so dispatch may either return an error status or throw an exception. Soon we won't need to + // catch these exceptions, as all codec errors will be migrated to using error statuses that are + // returned from dispatch. + } catch (FrameFloodException& e) { + status = bufferFloodError(e.what()); + } catch (CodecProtocolException& e) { + status = codecProtocolError(e.what()); + } catch (PrematureResponseException& e) { + status = prematureResponseError(e.what(), e.responseCode()); + } + return status; +} +} // namespace Utility +} // namespace Http namespace Http2 { namespace Utility { diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 241962466c82..cc6a2a3e4b7c 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -13,6 +13,8 @@ #include "envoy/http/metadata_interface.h" #include "envoy/http/query_params.h" +#include "common/http/exception.h" +#include "common/http/status.h" #include "common/json/json_loader.h" #include "absl/strings/string_view.h" @@ -20,6 +22,17 @@ #include "nghttp2/nghttp2.h" namespace Envoy { +namespace Http { +namespace Utility { + +// This is a wrapper around dispatch calls that may throw an exception or may return an error status +// while exception removal is in migration. +// TODO(#10878): Remove this. +Http::Status exceptionToStatus(std::function dispatch, + Buffer::Instance& data); +} // namespace Utility +} // namespace Http + namespace Http2 { namespace Utility { diff --git a/source/extensions/quic_listeners/quiche/codec_impl.h b/source/extensions/quic_listeners/quiche/codec_impl.h index 732a8aa8e5ab..58098ecd9ce5 100644 --- a/source/extensions/quic_listeners/quiche/codec_impl.h +++ b/source/extensions/quic_listeners/quiche/codec_impl.h @@ -22,7 +22,7 @@ class QuicHttpConnectionImplBase : public virtual Http::Connection, : quic_session_(quic_session) {} // Http::Connection - void dispatch(Buffer::Instance& /*data*/) override { + Http::Status dispatch(Buffer::Instance& /*data*/) override { // Bypassed. QUIC connection already hands all data to streams. NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 607cdb8cd612..3316380030b9 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -229,7 +229,7 @@ TEST_F(CodecClientTest, IdleTimerClientLocalCloseWithActiveRequests) { } TEST_F(CodecClientTest, ProtocolError) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Throw(CodecProtocolException("protocol error"))); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError("protocol error"))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -239,10 +239,8 @@ TEST_F(CodecClientTest, ProtocolError) { } TEST_F(CodecClientTest, 408Response) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([](Buffer::Instance&) -> void { - throw PrematureResponseException(Code::RequestTimeout); - })); - + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Return(prematureResponseError("", Code::RequestTimeout))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -252,10 +250,7 @@ TEST_F(CodecClientTest, 408Response) { } TEST_F(CodecClientTest, PrematureResponse) { - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([](Buffer::Instance&) -> void { - throw PrematureResponseException(Code::OK); - })); - + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(prematureResponseError("", Code::OK))); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); Buffer::OwnedImpl data; @@ -375,9 +370,10 @@ TEST_P(CodecNetworkTest, SendData) { const std::string full_data = "HTTP/1.1 200 OK\r\ncontent-length: 0\r\n"; Buffer::OwnedImpl data(full_data); upstream_connection_->write(data, false); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { EXPECT_EQ(full_data, data.toString()); dispatcher_->exit(); + return Http::okStatus(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -397,9 +393,14 @@ TEST_P(CodecNetworkTest, SendHeadersAndClose) { upstream_connection_->close(Network::ConnectionCloseType::FlushWrite); EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillOnce( - Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ(full_data, data.toString()); })) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ("", data.toString()); })); + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ(full_data, data.toString()); + return Http::okStatus(); + })) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ("", data.toString()); + return Http::okStatus(); + })); // Because the headers are not complete, the disconnect will reset the stream. // Note even if the final \r\n were appended to the header data, enough of the // codec state is mocked out that the data would not be framed and the stream @@ -430,9 +431,14 @@ TEST_P(CodecNetworkTest, SendHeadersAndCloseUnderReadDisable) { EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillOnce( - Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ(full_data, data.toString()); })) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { EXPECT_EQ("", data.toString()); })); + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ(full_data, data.toString()); + return Http::okStatus(); + })) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + EXPECT_EQ("", data.toString()); + return Http::okStatus(); + })); EXPECT_CALL(inner_encoder_.stream_, resetStream(_)).WillOnce(InvokeWithoutArgs([&]() -> void { for (auto callbacks : inner_encoder_.stream_.callbacks_) { callbacks->onResetStream(StreamResetReason::RemoteReset, absl::string_view()); diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index c2424db5ade4..2beb87908c6a 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -356,14 +356,20 @@ class ReorderBuffer { bufs_.back().move(data); } - void drain() { + Http::Status drain() { + Status status = Http::okStatus(); while (!bufs_.empty()) { Buffer::OwnedImpl& buf = bufs_.front(); while (buf.length() > 0) { - connection_.dispatch(buf); + status = connection_.dispatch(buf); + if (!status.ok()) { + ENVOY_LOG_MISC(trace, "Error status: {}", status.message()); + return status; + } } bufs_.pop_front(); } + return status; } void mutate(uint32_t buffer, uint32_t offset, uint8_t value) { @@ -483,11 +489,19 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi return stream->request_.request_decoder_; })); - const auto client_server_buf_drain = [&client_write_buf, &server_write_buf] { + auto client_server_buf_drain = [&client_write_buf, &server_write_buf] { + Http::Status status = Http::okStatus(); while (!client_write_buf.empty() || !server_write_buf.empty()) { - client_write_buf.drain(); - server_write_buf.drain(); + status = client_write_buf.drain(); + if (!status.ok()) { + return status; + } + status = server_write_buf.drain(); + if (!status.ok()) { + return status; + } } + return status; }; // We track whether the connection should be closed for HTTP/1, since stream resets imply @@ -495,92 +509,103 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi bool should_close_connection = false; constexpr auto max_actions = 1024; - try { - for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection; - ++i) { - const auto& action = input.actions(i); - ENVOY_LOG_MISC(trace, "action {} with {} streams", action.DebugString(), streams.size()); - switch (action.action_selector_case()) { - case test::common::http::Action::kNewStream: { - if (!http2) { - // HTTP/1 codec needs to have existing streams complete, so make it - // easier to achieve a successful multi-stream example by flushing. - client_server_buf_drain(); - // HTTP/1 client codec can only have a single active stream. - if (!pending_streams.empty() || (!streams.empty() && streams.back()->active())) { - ENVOY_LOG_MISC(trace, "Skipping new stream as HTTP/1 and already have existing stream"); - continue; - } - } - HttpStreamPtr stream = std::make_unique( - *client, - fromSanitizedHeaders(action.new_stream().request_headers()), - action.new_stream().end_stream(), [&should_close_connection, http2]() { - // HTTP/1 codec has stream reset implying connection close. - if (!http2) { - should_close_connection = true; - } - }); - stream->moveIntoListBack(std::move(stream), pending_streams); - break; - } - case test::common::http::Action::kStreamAction: { - const auto& stream_action = action.stream_action(); - if (streams.empty()) { + bool codec_error = false; + for (int i = 0; i < std::min(max_actions, input.actions().size()) && !should_close_connection && + !codec_error; + ++i) { + const auto& action = input.actions(i); + ENVOY_LOG_MISC(trace, "action {} with {} streams", action.DebugString(), streams.size()); + switch (action.action_selector_case()) { + case test::common::http::Action::kNewStream: { + if (!http2) { + // HTTP/1 codec needs to have existing streams complete, so make it + // easier to achieve a successful multi-stream example by flushing. + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - // Index into list of created streams (not HTTP/2 level stream ID). - const uint32_t stream_id = stream_action.stream_id() % streams.size(); - ENVOY_LOG_MISC(trace, "action for stream index {}", stream_id); - (*std::next(streams.begin(), stream_id))->streamAction(stream_action); - break; - } - case test::common::http::Action::kMutate: { - const auto& mutate = action.mutate(); - ReorderBuffer& write_buf = mutate.server() ? server_write_buf : client_write_buf; - write_buf.mutate(mutate.buffer(), mutate.offset(), mutate.value()); - break; + // HTTP/1 client codec can only have a single active stream. + if (!pending_streams.empty() || (!streams.empty() && streams.back()->active())) { + ENVOY_LOG_MISC(trace, "Skipping new stream as HTTP/1 and already have existing stream"); + continue; + } } - case test::common::http::Action::kSwapBuffer: { - const auto& swap_buffer = action.swap_buffer(); - ReorderBuffer& write_buf = swap_buffer.server() ? server_write_buf : client_write_buf; - write_buf.swap(swap_buffer.buffer()); + HttpStreamPtr stream = std::make_unique( + *client, + fromSanitizedHeaders(action.new_stream().request_headers()), + action.new_stream().end_stream(), [&should_close_connection, http2]() { + // HTTP/1 codec has stream reset implying connection close. + if (!http2) { + should_close_connection = true; + } + }); + stream->moveIntoListBack(std::move(stream), pending_streams); + break; + } + case test::common::http::Action::kStreamAction: { + const auto& stream_action = action.stream_action(); + if (streams.empty()) { break; } - case test::common::http::Action::kClientDrain: { - client_write_buf.drain(); + // Index into list of created streams (not HTTP/2 level stream ID). + const uint32_t stream_id = stream_action.stream_id() % streams.size(); + ENVOY_LOG_MISC(trace, "action for stream index {}", stream_id); + (*std::next(streams.begin(), stream_id))->streamAction(stream_action); + break; + } + case test::common::http::Action::kMutate: { + const auto& mutate = action.mutate(); + ReorderBuffer& write_buf = mutate.server() ? server_write_buf : client_write_buf; + write_buf.mutate(mutate.buffer(), mutate.offset(), mutate.value()); + break; + } + case test::common::http::Action::kSwapBuffer: { + const auto& swap_buffer = action.swap_buffer(); + ReorderBuffer& write_buf = swap_buffer.server() ? server_write_buf : client_write_buf; + write_buf.swap(swap_buffer.buffer()); + break; + } + case test::common::http::Action::kClientDrain: { + if (!client_write_buf.drain().ok()) { + codec_error = true; break; } - case test::common::http::Action::kServerDrain: { - server_write_buf.drain(); + break; + } + case test::common::http::Action::kServerDrain: { + if (!server_write_buf.drain().ok()) { + codec_error = true; break; } - case test::common::http::Action::kQuiesceDrain: { - client_server_buf_drain(); + break; + } + case test::common::http::Action::kQuiesceDrain: { + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - default: - // Maybe nothing is set? + break; + } + default: + // Maybe nothing is set? + break; + } + if (DebugMode && !should_close_connection && !codec_error) { + if (!client_server_buf_drain().ok()) { + codec_error = true; break; } - if (DebugMode && !should_close_connection) { - client_server_buf_drain(); - } - } - // Drain all remaining buffers, unless the connection is effectively closed. - if (!should_close_connection) { - client_server_buf_drain(); } - if (http2) { - dynamic_cast(*client).goAway(); - dynamic_cast(*server).goAway(); + } + // Drain all remaining buffers, unless the connection is effectively closed. + if (!should_close_connection && !codec_error) { + if (!client_server_buf_drain().ok()) { + codec_error = true; } - } catch (CodecProtocolException& e) { - ENVOY_LOG_MISC(debug, "CodecProtocolException {}", e.what()); - } catch (CodecClientException& e) { - ENVOY_LOG_MISC(debug, "CodecClientException {}", e.what()); - } catch (PrematureResponseException& e) { - ENVOY_LOG_MISC(debug, "PrematureResponseException {}", e.what()); + } + if (!codec_error && http2) { + dynamic_cast(*client).goAway(); + dynamic_cast(*server).goAway(); } } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 87be21091269..bd72be7f7ff1 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -246,6 +246,7 @@ class FuzzStream { end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; })); decoder_->decodeHeaders(std::move(headers), end_stream); + return Http::okStatus(); })); ON_CALL(*decoder_filter_, decodeHeaders(_, _)) .WillByDefault( @@ -339,6 +340,7 @@ class FuzzStream { EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([this, &data_action] { Buffer::OwnedImpl buf(std::string(data_action.size() % (1024 * 1024), 'a')); decoder_->decodeData(buf, data_action.end_stream()); + return Http::okStatus(); })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); @@ -361,6 +363,7 @@ class FuzzStream { .WillOnce(InvokeWithoutArgs([this, &trailers_action] { decoder_->decodeTrailers(std::make_unique( Fuzz::fromHeaders(trailers_action.headers()))); + return Http::okStatus(); })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); @@ -381,9 +384,8 @@ class FuzzStream { } case test::common::http::RequestAction::kThrowDecoderException: { if (state == StreamState::PendingDataOrTrailers) { - EXPECT_CALL(*config_.codec_, dispatch(_)).WillOnce(InvokeWithoutArgs([] { - throw CodecProtocolException("blah"); - })); + EXPECT_CALL(*config_.codec_, dispatch(_)) + .WillOnce(testing::Throw(CodecProtocolException("blah"))); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = StreamState::Closed; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 4853bb378c7e..aab9651f96d6 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -202,7 +202,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan void setUpEncoderAndDecoder(bool request_with_data_and_trailers, bool decode_headers_stop_all) { setUpBufferLimits(); EXPECT_CALL(*codec_, dispatch(_)) - .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> void { + .WillOnce(Invoke([&, request_with_data_and_trailers](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -217,6 +217,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan } else { decoder->decodeHeaders(std::move(headers), true); } + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -454,7 +455,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .Times(2) - .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(encoder); // Test not charging stats on the second call. @@ -473,6 +474,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { // Drain 2 so that on the 2nd iteration we will hit zero. data.drain(2); + return Http::okStatus(); })); // Kick off the incoming data. Use extra data which should cause a redispatch. @@ -512,21 +514,23 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { // only request into it. Then we respond into the filter. RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - // Test not charging stats on the second call. - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); + // Test not charging stats on the second call. + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; - filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; + filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); @@ -647,28 +651,30 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - // Test not charging stats on the second call. - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); - // Allow the decode pipeline to pause. - decoder->decodeData(data, false); + // Test not charging stats on the second call. + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + // Allow the decode pipeline to pause. + decoder->decodeData(data, false); - ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; - filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); + ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; + filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); - // Resume decode pipeline after encoding 100 continue headers, we're now ready to trigger - // #10923. - decoder->decodeData(data, true); + // Resume decode pipeline after encoding 100 continue headers, we're now + // ready to trigger #10923. + decoder->decodeData(data, true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); @@ -745,12 +751,13 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "http://api.lyft.com/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); // This test also verifies that decoder/encoder filters have onDestroy() called only once. @@ -782,7 +789,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { // Enable path sanitizer normalize_path_ = true; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, @@ -790,6 +797,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); // This test also verifies that decoder/encoder filters have onDestroy() called only once. @@ -838,11 +846,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); // Kick off the incoming data. @@ -859,11 +868,12 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { const std::string original_path = "/x/%2E%2e/z"; const std::string normalized_path = "/z"; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", original_path}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); const std::string fake_cluster_name = "fake_cluster"; @@ -1015,21 +1025,23 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Should be no 'x-envoy-decorator-operation' response header. EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1083,21 +1095,23 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Verify decorator operation response header has been defined. EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1149,21 +1163,23 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + data.drain(4); + return Http::okStatus(); + })); // Verify decorator operation response header has NOT been defined (i.e. not propagated). EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1213,23 +1229,25 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}, - {"x-envoy-decorator-operation", "testOp"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}, + {"x-envoy-decorator-operation", "testOp"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Should be no 'x-envoy-decorator-operation' response header, as decorator // was overridden by request header. @@ -1294,22 +1312,24 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { @@ -1376,22 +1396,24 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Verify that decorator operation has NOT been set as request header (propagate is false) EXPECT_CALL(*filter, decodeHeaders(_, true)) @@ -1456,23 +1478,25 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); - filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); + filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1512,22 +1536,24 @@ TEST_F(HttpConnectionManagerImplTest, RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1568,22 +1594,24 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-forwarded-for", xff_address}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-forwarded-for", xff_address}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1612,15 +1640,17 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1651,24 +1681,26 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), false); - ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; - filter->callbacks_->encodeTrailers(std::move(response_trailers)); + ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; + filter->callbacks_->encodeTrailers(std::move(response_trailers)); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1700,14 +1732,17 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - // These request headers are missing the necessary ":host" - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(0); - })); + // These request headers are missing the necessary ":host" + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(0); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input; conn_manager_->onData(fake_input, false); @@ -1739,24 +1774,26 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), false); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), false); - ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; - filter->callbacks_->encodeTrailers(std::move(response_trailers)); + ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"x-trailer", "1"}}}; + filter->callbacks_->encodeTrailers(std::move(response_trailers)); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1782,21 +1819,23 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":method", "GET"}, - {":authority", "host"}, - {":path", "/"}, - {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; - decoder->decodeHeaders(std::move(headers), true); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":method", "GET"}, + {":authority", "host"}, + {":path", "/"}, + {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1807,12 +1846,13 @@ TEST_F(HttpConnectionManagerImplTest, NoPath) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "NOT_CONNECT"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -1831,15 +1871,17 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNotConfigured) { setup(false, ""); EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_(_)).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), false); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -1853,7 +1895,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { stream_idle_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1863,6 +1905,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2); EXPECT_CALL(*idle_timer, disableTimer()); idle_timer->invokeCallback(); + return Http::okStatus(); })); // 408 direct response after timeout. @@ -1891,7 +1934,7 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl callbacks.addStreamEncoderFilter(filter); })); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1902,6 +1945,7 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl EXPECT_CALL(*idle_timer, disableTimer()); // Simulate and idle timeout so that the filter chain gets created. idle_timer->invokeCallback(); + return Http::okStatus(); })); // This should not be called as we don't have request headers. @@ -1932,7 +1976,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { setup(false, ""); NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* idle_timer = setUpTimer(); EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); conn_manager_->newStream(response_encoder_); @@ -1942,6 +1986,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { EXPECT_CALL(*idle_timer, enableTimer(_, _)).Times(2); EXPECT_CALL(*idle_timer, disableTimer()); idle_timer->invokeCallback(); + return Http::okStatus(); })); std::shared_ptr filter(new NiceMock()); @@ -1984,18 +2029,20 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout()) .WillByDefault(Return(std::chrono::milliseconds(30))); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); - decoder->decodeHeaders(std::move(headers), false); - - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); + decoder->decodeHeaders(std::move(headers), false); + + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -2010,18 +2057,20 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteZeroOverride) { ON_CALL(route_config_provider_.route_config_->route_->route_entry_, idleTimeout()) .WillByDefault(Return(std::chrono::milliseconds(0))); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*idle_timer, disableTimer()); - decoder->decodeHeaders(std::move(headers), false); - - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + Event::MockTimer* idle_timer = setUpTimer(); + EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(10), _)); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + EXPECT_CALL(*idle_timer, disableTimer()); + decoder->decodeHeaders(std::move(headers), false); + + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -2036,7 +2085,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders .WillByDefault(Return(std::chrono::milliseconds(10))); // Codec sends downstream request headers. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -2052,6 +2101,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 408 direct response after timeout. @@ -2077,7 +2127,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) { // Codec sends downstream request headers. Event::MockTimer* idle_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ @@ -2086,6 +2136,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNormalTermination) { decoder->decodeHeaders(std::move(headers), false); data.drain(4); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2105,7 +2156,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders .WillByDefault(Return(std::chrono::milliseconds(10))); // Codec sends downstream request headers. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -2124,6 +2175,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 408 direct response after timeout. @@ -2158,7 +2210,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) // Codec sends downstream request headers, upstream response headers are // encoded. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); Event::MockTimer* idle_timer = setUpTimer(); @@ -2175,6 +2227,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 200 upstream response. @@ -2209,7 +2262,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // encoded, data events happen in various directions. Event::MockTimer* idle_timer = setUpTimer(); RequestDecoder* decoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -2240,6 +2293,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { idle_timer->invokeCallback(); data.drain(4); + return Http::okStatus(); })); // 100 continue. @@ -2264,9 +2318,10 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledByDefault) { setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2277,9 +2332,10 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) { request_timeout_ = std::chrono::milliseconds(0); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2290,11 +2346,12 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2306,7 +2363,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 setup(false, ""); std::string response_body; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(AtLeast(1)); @@ -2320,6 +2377,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 conn_manager_->newStream(response_encoder_); EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, setTrackedObject(_)).Times(2); request_timer->invokeCallback(); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2333,7 +2391,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); EXPECT_CALL(*request_timer, disableTimer()).Times(0); @@ -2344,6 +2402,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq // the second parameter 'false' leaves the stream open decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2356,7 +2415,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2366,6 +2425,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW EXPECT_CALL(*request_timer, disableTimer()).Times(1); decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2378,7 +2438,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2389,6 +2449,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW EXPECT_CALL(*request_timer, disableTimer()).Times(1); decoder->decodeData(data, true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2401,7 +2462,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW request_timeout_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); @@ -2414,6 +2475,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW EXPECT_CALL(*request_timer, disableTimer()).Times(1); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2432,7 +2494,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { })); EXPECT_CALL(response_encoder_, encodeHeaders(_, _)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); @@ -2445,6 +2507,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { EXPECT_CALL(*request_timer, disableTimer()).Times(1); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2458,12 +2521,13 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnConnectionTermin setup(false, ""); Event::MockTimer* request_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2481,9 +2545,10 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationDisabledIfSetToZero) { max_stream_duration_ = std::chrono::milliseconds(0); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, createTimer_).Times(0); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2494,11 +2559,12 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationValidlyConfigured) { max_stream_duration_ = std::chrono::milliseconds(10); setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* duration_timer = setUpTimer(); EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2510,9 +2576,10 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { setup(false, ""); Event::MockTimer* duration_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2530,9 +2597,10 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetS setup(false, ""); Event::MockTimer* duration_timer = setUpTimer(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)).Times(1); conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); @@ -2549,7 +2617,7 @@ TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { setup(false, ""); RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, @@ -2558,6 +2626,7 @@ TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { {"upgrade", "websocket"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(encoder, encodeHeaders(_, true)) @@ -2608,22 +2677,24 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, - {":method", "GET"}, - {":path", "/"}, - {"connection", "Upgrade"}, - {"upgrade", "foo"}}}; - decoder->decodeHeaders(std::move(headers), false); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":method", "GET"}, + {":path", "/"}, + {"connection", "Upgrade"}, + {"upgrade", "foo"}}}; + decoder->decodeHeaders(std::move(headers), false); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ - {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; - filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ + {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; + filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Use extra data which should cause a redispatch. Buffer::OwnedImpl fake_input("1234"); @@ -2640,13 +2711,15 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(true)); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - decoder = &conn_manager_->newStream(encoder); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; - decoder->decodeHeaders(std::move(headers), false); - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); // Kick off the incoming data. Use extra data which should cause a redispatch. Buffer::OwnedImpl fake_input("1234"); @@ -2660,11 +2733,12 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2686,9 +2760,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { // Fake a protocol error that races with the drain timeout. This will cause a local close. // Also fake the local close not closing immediately. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { - throw CodecProtocolException("protocol error"); - })); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Return(codecProtocolError("protocol error"))); EXPECT_CALL(*drain_timer, disableTimer()); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWriteAndDelay)) @@ -2717,11 +2789,12 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input; @@ -2752,11 +2825,12 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { InSequence s; setup(false, "envoy-server-test"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2784,11 +2858,12 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { InSequence s; setup(false, "envoy-server-test"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"proxy-connection", "close"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(1, 0); @@ -2830,11 +2905,12 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { // Start the request RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); Buffer::OwnedImpl fake_input("hello"); @@ -2851,8 +2927,9 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { // Finish the request. EXPECT_CALL(*filter, decodeData(_, true)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder->decodeData(data, true); + return Http::okStatus(); })); conn_manager_->onData(fake_input, false); @@ -2871,9 +2948,10 @@ TEST_F(HttpConnectionManagerImplTest, DownstreamDisconnect) { setup(false, ""); NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { conn_manager_->newStream(encoder); data.drain(2); + return Http::okStatus(); })); EXPECT_CALL(filter_factory_, createFilterChain(_)).Times(0); @@ -2890,9 +2968,9 @@ TEST_F(HttpConnectionManagerImplTest, DownstreamProtocolError) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); - throw CodecProtocolException("protocol error"); + return codecProtocolError("protocol error"); })); EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); @@ -2924,9 +3002,9 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); - throw CodecProtocolException("protocol error"); + return codecProtocolError("protocol error"); })); Buffer::OwnedImpl fake_input("1234"); @@ -2955,14 +3033,14 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAcc RequestDecoder* decoder = nullptr; NiceMock encoder; - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; decoder->decodeHeaders(std::move(headers), true); - throw CodecProtocolException("protocol error"); + return codecProtocolError("protocol error"); })); Buffer::OwnedImpl fake_input("1234"); @@ -2974,9 +3052,9 @@ TEST_F(HttpConnectionManagerImplTest, FrameFloodError) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); - throw FrameFloodException("too many outbound frames."); + return bufferFloodError("too many outbound frames."); })); EXPECT_CALL(response_encoder_.stream_, removeCallbacks(_)); @@ -3026,7 +3104,7 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { NiceMock encoder; RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3034,6 +3112,7 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); EXPECT_CALL(*idle_timer, disableTimer()); @@ -3095,7 +3174,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { NiceMock encoder; RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3103,6 +3182,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); EXPECT_CALL(*filter, decodeHeaders(_, false)) @@ -3135,7 +3215,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3143,6 +3223,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 0); @@ -3181,12 +3262,13 @@ TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { // The data will get moved so we need to have a copy to compare against. Buffer::OwnedImpl fake_data("hello"); Buffer::OwnedImpl fake_data_copy("hello"); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(3, 0); @@ -3224,11 +3306,12 @@ TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { setup(false, ""); RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(2, 0); @@ -3263,7 +3346,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3274,6 +3357,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3349,7 +3433,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3357,6 +3441,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3440,7 +3525,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3451,6 +3536,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3521,7 +3607,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3529,6 +3615,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 1); @@ -3580,7 +3667,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3588,6 +3675,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 1); @@ -3645,7 +3733,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -3656,6 +3744,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { Buffer::OwnedImpl data2("world"); decoder->decodeData(data2, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3713,11 +3802,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -3763,11 +3853,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 2); @@ -3889,10 +3980,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith RequestDecoder* decoder; { setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); // Call the high buffer callbacks as the codecs do. stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); })); // Send fake data to kick off newStream being created. @@ -3905,10 +3997,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith { setupFilterChain(2, 2); EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -3949,10 +4042,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL RequestDecoder* decoder; { setUpBufferLimits(); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); // Call the high buffer callbacks as the codecs do. stream_callbacks_->onAboveWriteBufferHighWatermark(); + return Http::okStatus(); })); // Send fake data to kick off newStream being created. @@ -3972,10 +4066,11 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL { setupFilterChain(2, 2); EXPECT_CALL(filter_callbacks_.connection_, aboveHighWatermark()).Times(0); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -4096,7 +4191,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) initial_buffer_limit_ = 10; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4107,6 +4202,7 @@ TEST_F(HttpConnectionManagerImplTest, HitRequestBufferLimitsIntermediateFilter) Buffer::OwnedImpl fake_data2("world world"); decoder->decodeData(fake_data2, true); + return Http::okStatus(); })); setUpBufferLimits(); @@ -4204,12 +4300,13 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "HEAD"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); setupFilterChain(1, 1); @@ -4243,13 +4340,15 @@ TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillOnce(Invoke([&](Buffer::Instance& data) -> Envoy::Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); setupFilterChain(1, 1); @@ -4287,12 +4386,13 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = std::make_unique( std::initializer_list>( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}})); decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4328,7 +4428,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = makeHeaderMap( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); @@ -4336,6 +4436,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4370,7 +4471,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); auto headers = makeHeaderMap( {{":authority", "host"}, {":path", "/"}, {":method", "GET"}}); @@ -4381,6 +4482,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { auto trailers = makeHeaderMap({{"foo", "bar"}}); decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4418,11 +4520,12 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(2, 2); @@ -4503,11 +4606,12 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 3); @@ -4605,11 +4709,12 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); })); setupFilterChain(3, 3); @@ -4680,7 +4785,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4688,6 +4793,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { Buffer::OwnedImpl fake_data("hello"); decoder->decodeData(fake_data, true); + return Http::okStatus(); })); EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); @@ -4760,7 +4866,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4771,6 +4877,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); + return Http::okStatus(); })); EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); @@ -4854,7 +4961,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { InSequence s; setup(false, ""); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; @@ -4865,6 +4972,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { Buffer::OwnedImpl fake_data2("world"); decoder->decodeData(fake_data2, true); + return Http::okStatus(); })); EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); @@ -4966,11 +5074,12 @@ TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { Server::OverloadActionNames::get().StopAcceptingRequests, Server::OverloadActionState::Active); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); + return Http::okStatus(); })); // 503 direct response when overloaded. @@ -5000,17 +5109,21 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); })); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { @@ -5119,17 +5232,21 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); })); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ - {":authority", "host"}, {":path", "/"}, {":method", "GET"}, {"connection", "keep-alive"}}}; - decoder->decodeHeaders(std::move(headers), true); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "keep-alive"}}}; + decoder->decodeHeaders(std::move(headers), true); - ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; - filter->callbacks_->encodeHeaders(std::move(response_headers), true); + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->encodeHeaders(std::move(response_headers), true); - data.drain(4); - })); + data.drain(4); + return Http::okStatus(); + })); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { @@ -5144,9 +5261,11 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { setup(false, ""); // Set up the codec. - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance& data) -> void { - data.drain(4); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + data.drain(4); + return Http::okStatus(); + })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); @@ -5211,12 +5330,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { getRouteConfig(_)) .Times(2) .WillRepeatedly(Return(nullptr)); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5241,12 +5361,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { .WillOnce(Return(nullptr)) .WillOnce(Return(nullptr)) // refreshCachedRoute first time. .WillOnce(Return(route_config_)); // triggered by callbacks_->route(), SRDS now updated. - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); const std::string fake_cluster1_name = "fake_cluster1"; std::shared_ptr route1 = std::make_shared>(); @@ -5304,12 +5425,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { } return route_config2; })); - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {"scope_key", "foo"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), false); data.drain(4); + return Http::okStatus(); })); setupFilterChain(2, 0); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -5357,12 +5479,13 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { route(_, _, _)) .WillOnce(Return(route1)); RequestDecoder* decoder = nullptr; - EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); data.drain(4); + return Http::okStatus(); })); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -5460,12 +5583,15 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { setupFilterChain(1, 0, /* num_requests = */ 3); - EXPECT_CALL(*codec_, dispatch(_)).Times(2).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { - RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); - RequestHeaderMapPtr headers{ - new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - decoder->decodeHeaders(std::move(headers), true); - })); + EXPECT_CALL(*codec_, dispatch(_)) + .Times(2) + .WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); { InSequence s; EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5541,8 +5667,9 @@ TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { setup(false, ""); Buffer::OwnedImpl fake_input("1234"); - EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> void { + EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(response_encoder_); + return Http::okStatus(); })); // Either RDS or SRDS should be set. EXPECT_DEBUG_DEATH(conn_manager_->onData(fake_input, false), diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 82a6c76b0f83..b07b812df650 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -96,7 +96,8 @@ class Http1ServerConnectionImplTest : public testing::Test { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_request_headers), true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{":status", "200"}}, true); } @@ -132,7 +133,8 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur return decoder; })); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); EXPECT_EQ(p, codec_->protocol()); if (!details.empty()) { @@ -157,7 +159,8 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(p, codec_->protocol()); } @@ -189,11 +192,13 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); EXPECT_CALL(decoder, decodeData(_, true)); } + Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" "6\r\nHello \r\n" "5\r\nWorld\r\n" "0\r\nhello: world\r\nsecond: header\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -223,15 +228,18 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ "Transfer-Encoding: chunked\r\n\r\n" "4\r\n" "body\r\n0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(trailer_string + "\r\n\r\n"); if (enable_trailers) { - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, - "trailers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "trailers size exceeds limit"); } else { // If trailers are not enabled, we expect Envoy to simply skip over the large // trailers as if nothing has happened! - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } } void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string header_string, @@ -248,9 +256,12 @@ void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string he })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(header_string + "\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); if (!details.empty()) { EXPECT_EQ(details, response_encoder->getStream().responseDetails()); } @@ -268,9 +279,10 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); buffer = Buffer::OwnedImpl(header_string + "\r\n"); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { @@ -290,7 +302,8 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nTest:\r\nHello: World\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -305,8 +318,9 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { @@ -318,8 +332,9 @@ TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: gzip\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } // Verify that data in the two body chunks is merged before the call to decodeData. @@ -347,7 +362,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { "6\r\nHello \r\n" "5\r\nWorld\r\n" "0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -373,7 +389,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" "6\r\nHello \r\n" "5\r\nWorl"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // Process the rest of the body and final chunk. @@ -383,7 +400,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { Buffer::OwnedImpl buffer2("d\r\n" "0\r\n\r\n"); - codec_->dispatch(buffer2); + status = codec_->dispatch(buffer2); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer2.length()); } @@ -413,7 +431,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { "5\r\nWorld\r\n" "0\r\n\r\n", 1); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -437,7 +456,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -463,8 +483,9 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { "6\r\nHello \r\n" "invalid\r\nWorl"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); } TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { @@ -477,8 +498,10 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: " "identity,chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: unsupported transfer encoding"); + + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { @@ -546,7 +569,8 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(Protocol::Http10, codec_->protocol()); } @@ -584,7 +608,8 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { })); EXPECT_CALL(decoder, decodeHeaders_(_, true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -607,7 +632,8 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { return decoder; })); EXPECT_CALL(decoder, decodeHeaders_(_, true)); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(Protocol::Http11, codec_->protocol()); } } @@ -679,7 +705,9 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { "4\r\n" "body\r\n0\r\n" "badtrailer\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } @@ -744,7 +772,8 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -755,7 +784,8 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); Buffer::OwnedImpl buffer("bad"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } @@ -771,7 +801,8 @@ TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); Buffer::OwnedImpl buffer("BAD / HTTP/1.1\r\nHost: foo\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } @@ -785,10 +816,12 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("G"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); Buffer::OwnedImpl buffer2("g"); - EXPECT_THROW(codec_->dispatch(buffer2), CodecProtocolException); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } @@ -808,7 +841,8 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // In most tests the write output is serialized to a buffer here it is @@ -835,8 +869,9 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), FrameFloodException, - "Too many responses queued."); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isBufferFloodError(status)); + EXPECT_EQ(status.message(), "Too many responses queued."); EXPECT_EQ(1, store_.counter("http1.response_flood").value()); } } @@ -859,7 +894,8 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); // In most tests the write output is serialized to a buffer here it is @@ -888,7 +924,8 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHOST: hello\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -908,7 +945,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRuntimeGuard) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } // Ensures that requests with invalid HTTP header values are properly rejected @@ -931,8 +969,9 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { })); Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: header value contains invalid chars"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: header value contains invalid chars"); EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); } @@ -954,7 +993,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(0, store_.counter("http1.dropped_headers_with_underscores").value()); } @@ -976,7 +1016,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); EXPECT_EQ(1, store_.counter("http1.dropped_headers_with_underscores").value()); } @@ -996,8 +1037,9 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, - "http/1.1 protocol error: header name contains underscores"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: header name contains underscores"); EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } @@ -1015,9 +1057,10 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { return decoder; })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.\"com\r\n\r\n")); - EXPECT_THROW_WITH_MESSAGE( - codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: request headers failed spec compliance checks"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), + "http/1.1 protocol error: request headers failed spec compliance checks"); EXPECT_EQ("http.invalid_authority", response_encoder->getStream().responseDetails()); } @@ -1036,8 +1079,9 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); } // Mutate an HTTP GET with embedded NULs, this should always be rejected in some @@ -1055,12 +1099,14 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, '\0'), example_input.substr(n))); - EXPECT_THROW_WITH_REGEX(codec_->dispatch(buffer), CodecProtocolException, - "http/1.1 protocol error:"); + auto status = codec_->dispatch(buffer); + EXPECT_FALSE(status.ok()); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_THAT(status.message(), testing::HasSubstr("http/1.1 protocol error:")); } } -// Mutate an HTTP GET with CR or LF. These can cause an exception or maybe +// Mutate an HTTP GET with CR or LF. These can cause an error status or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { @@ -1077,10 +1123,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, c), example_input.substr(n))); - try { - codec_->dispatch(buffer); - } catch (CodecProtocolException&) { - } + // May or may not cause an error status, but should never trip on a debug ASSERT. + auto status = codec_->dispatch(buffer); } } } @@ -1102,7 +1146,8 @@ TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_CALL(decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_NE(0U, buffer.length()); } @@ -1124,7 +1169,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -1149,7 +1195,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { Buffer::OwnedImpl buffer = createBufferWithNByteSlices("POST / HTTP/1.1\r\ncontent-length: 5\r\n\r\n12345", 1); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); } @@ -1165,7 +1212,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1190,7 +1238,8 @@ TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1216,7 +1265,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1241,7 +1291,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1264,7 +1315,8 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1293,7 +1345,8 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); MetadataMap metadata_map = {{"key", "value"}}; @@ -1316,7 +1369,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1351,7 +1405,8 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1383,7 +1438,8 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1409,7 +1465,8 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { })); Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1432,7 +1489,8 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { })); Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); std::string output; @@ -1459,12 +1517,13 @@ TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { Buffer::OwnedImpl buffer(request); buffer.add(request); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); EXPECT_EQ(request.size(), buffer.length()); response_encoder->encodeHeaders(TestResponseHeaderMapImpl{{":status", "200"}}, true); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); EXPECT_EQ(0U, buffer.length()); } @@ -1519,17 +1578,18 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl buffer( "POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length:5\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(websocket_payload); + status = codec_->dispatch(websocket_payload); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { @@ -1544,7 +1604,8 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ncontent-length:5\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { @@ -1561,7 +1622,8 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: " "foo\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { @@ -1578,7 +1640,8 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer( "GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\ncontent-length: 0\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { @@ -1594,12 +1657,13 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { }; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Buffer::OwnedImpl expected_data("abcd"); Buffer::OwnedImpl connect_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); - codec_->dispatch(connect_payload); + status = codec_->dispatch(connect_payload); + EXPECT_TRUE(status.ok()); } // We use the absolute URL parsing code for CONNECT requests, but it does not @@ -1612,7 +1676,8 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("CONNECT http://host:80 HTTP/1.1\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { @@ -1626,7 +1691,8 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { @@ -1645,7 +1711,8 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer( "CONNECT host:80 HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithContentLength) { @@ -1661,7 +1728,8 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithContentLength) { EXPECT_CALL(decoder, decodeHeaders_(_, false)); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 0\r\n\r\nabcd"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { @@ -1677,7 +1745,7 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); Http::MockStreamCallbacks stream_callbacks; response_encoder->getStream().addCallbacks(stream_callbacks); @@ -1799,14 +1867,16 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { // Response. EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response), PrematureResponseException); + auto status = codec_->dispatch(response); + EXPECT_TRUE(isPrematureResponseError(status)); } TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { @@ -1819,7 +1889,8 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 503 Service Unavailable\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { @@ -1832,7 +1903,8 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, HeadRequest) { @@ -1845,7 +1917,8 @@ TEST_F(Http1ClientConnectionImplTest, HeadRequest) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, 204Response) { @@ -1858,7 +1931,8 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, 100Response) { @@ -1872,12 +1946,13 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl initial_response("HTTP/1.1 100 Continue\r\n\r\n"); - codec_->dispatch(initial_response); + auto status = codec_->dispatch(initial_response); EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { @@ -1908,10 +1983,11 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\nHello World"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); Buffer::OwnedImpl empty; - codec_->dispatch(empty); + status = codec_->dispatch(empty); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { @@ -1924,8 +2000,9 @@ TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello " "World\r\n0\r\nhello: world\r\nsecond: header\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); EXPECT_EQ(0UL, response.length()); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, GiantPath) { @@ -1939,7 +2016,8 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { @@ -1948,7 +2026,8 @@ TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { // make sure upgradeAllowed doesn't cause crashes if run with no pending response. Buffer::OwnedImpl response( "HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: upgrade\r\nUpgrade: websocket\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response), PrematureResponseException); + auto status = codec_->dispatch(response); + EXPECT_TRUE(isPrematureResponseError(status)); } TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { @@ -1969,19 +2048,20 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response( "HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: upgrade\r\nUpgrade: websocket\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); // Send body payload Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); // Send websocket payload Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl websocket_payload("abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(websocket_payload); + status = codec_->dispatch(websocket_payload); + EXPECT_TRUE(status.ok()); } // Same data as above, but make sure directDispatch immediately hands off any @@ -2006,7 +2086,8 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 5\r\nConnection: " "upgrade\r\nUpgrade: websocket\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { @@ -2022,19 +2103,20 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { // Send response headers EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); // Send body payload Buffer::OwnedImpl expected_data1("12345"); Buffer::OwnedImpl body("12345"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data1), false)); - codec_->dispatch(body); + status = codec_->dispatch(body); // Send connect payload Buffer::OwnedImpl expected_data2("abcd"); Buffer::OwnedImpl connect_payload("abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data2), false)); - codec_->dispatch(connect_payload); + status = codec_->dispatch(connect_payload); + EXPECT_TRUE(status.ok()); } // Same data as above, but make sure directDispatch immediately hands off any @@ -2054,7 +2136,8 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { Buffer::OwnedImpl expected_data("12345abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)).Times(1); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { @@ -2071,7 +2154,8 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { Buffer::OwnedImpl expected_data("12345abcd"); EXPECT_CALL(response_decoder, decodeData(BufferEqual(&expected_data), false)); Buffer::OwnedImpl response("HTTP/1.1 400 OK\r\n\r\n12345abcd"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { @@ -2129,10 +2213,11 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); Buffer::OwnedImpl response2("HTTP/1.1 400 Bad Request\r\nContent-Length: 0\r\n\r\n"); - EXPECT_THROW(codec_->dispatch(response2), PrematureResponseException); + status = codec_->dispatch(response2); + EXPECT_TRUE(isPrematureResponseError(status)); // Fake a call for going below the low watermark. Make sure no stream callbacks get called. EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(0); @@ -2168,7 +2253,8 @@ TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - codec_->dispatch(response); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); } TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { @@ -2198,7 +2284,7 @@ TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; - testRequestHeadersExceedLimit(long_string); + testRequestHeadersExceedLimit(long_string, ""); } // Tests that the default limit for the number of request headers is 100. @@ -2220,16 +2306,18 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); std::string long_string = std::string(1024, 'q'); for (int i = 0; i < 59; i++) { buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } // the 60th 1kb header should induce overflow buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } @@ -2248,15 +2336,17 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { return decoder; })); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); // Dispatch 100 headers. buffer = Buffer::OwnedImpl(createHeaderFragment(100)); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); // The final 101th header should induce overflow. buffer = Buffer::OwnedImpl("header101:\r\n\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { @@ -2288,10 +2378,13 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); std::string long_header = "big: " + std::string(80 * 1024, 'q') + "\r\n"; buffer = Buffer::OwnedImpl(long_header); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } // Tests that the size of response headers for HTTP/1 must be under 80 kB. @@ -2304,10 +2397,11 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); std::string long_header = "big: " + std::string(79 * 1024, 'q') + "\r\n"; buffer = Buffer::OwnedImpl(long_header); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } // Regression test for CVE-2019-18801. Large method headers should not trigger @@ -2372,9 +2466,11 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); buffer = Buffer::OwnedImpl(createHeaderFragment(101) + "\r\n"); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); } // Tests that the number of response headers is configurable. @@ -2389,10 +2485,10 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { request_encoder.encodeHeaders(headers, true); Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); - codec_->dispatch(buffer); + auto status = codec_->dispatch(buffer); // Response already contains one header. buffer = Buffer::OwnedImpl(createHeaderFragment(150) + "\r\n"); - codec_->dispatch(buffer); + status = codec_->dispatch(buffer); } } // namespace Http1 diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index beb7344b5333..bc74a1b884a4 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -901,9 +901,10 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { Buffer::OwnedImpl empty_data; EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) - .WillOnce(Invoke([&](Buffer::Instance& data) -> void { + .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. inner_decoder->decodeData(data, true); + return Http::okStatus(); })); EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 6ee56bf7234f..cce3fca74316 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -46,12 +46,38 @@ namespace CommonUtility = ::Envoy::Http2::Utility; class Http2CodecImplTestFixture { public: struct ConnectionWrapper { - void dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status status = Http::okStatus(); buffer_.add(data); if (!dispatching_) { while (buffer_.length() > 0) { dispatching_ = true; - connection.dispatch(buffer_); + status = connection.dispatch(buffer_); + if (!status.ok()) { + // Exit early if we hit an error status. + return status; + } + dispatching_ = false; + } + } + return status; + } + + // TODO(#10878): This test uses the innerDispatch which may throw exceptions while + // exception removal is in progress. Tests override MockConnection's write with this + // method. Connection::write can be called while dispatching data in a codec callback, or + // outside a dispatching context (for example, in RequestEncoder::encodeHeaders) where they are + // not caught like in Connection::dispatch. In practice, these would never be triggered since + // these inputs would fail parsing on ingress. + // This should be removed, and the throws that are expected outside of dispatching context + // should be replaced with error handling. + void innerDispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status status; + buffer_.add(data); + if (!dispatching_) { + while (buffer_.length() > 0) { + dispatching_ = true; + status = connection.innerDispatch(buffer_); dispatching_ = false; } } @@ -100,11 +126,11 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - server_wrapper_.dispatch(data, *server_); + server_wrapper_.innerDispatch(data, *server_); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.dispatch(data, *client_); + client_wrapper_.innerDispatch(data, *client_); })); } @@ -341,7 +367,8 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { // Flush pending data. EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); @@ -389,7 +416,8 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { // Flush pending data. EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); @@ -469,7 +497,8 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.invalid.header.field"); @@ -513,7 +542,8 @@ TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); expectDetailsResponse("http2.violation.of.messaging.rule"); } @@ -560,7 +590,8 @@ TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { // Flush pending data. setupDefaultConnectionMocks(); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); TestResponseHeaderMapImpl response_headers{{":status", "200"}}; EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); @@ -720,7 +751,8 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); } TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { @@ -751,7 +783,8 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); - client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); } class Http2CodecImplFlowControlTest : public Http2CodecImplTest {}; @@ -1769,7 +1802,7 @@ TEST_P(Http2CodecImplTest, EmptyDataFlood) { Buffer::OwnedImpl data; emptyDataFlood(data); EXPECT_CALL(request_decoder_, decodeData(_, false)); - EXPECT_THROW(server_wrapper_.dispatch(data, *server_), FrameFloodException); + EXPECT_THROW(server_wrapper_.innerDispatch(data, *server_), FrameFloodException); } TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { @@ -1780,7 +1813,8 @@ TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { .Times( CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD + 1); - EXPECT_NO_THROW(server_wrapper_.dispatch(data, *server_)); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_TRUE(status.ok()); } // CONNECT without upgrade type gets tagged with "bytestream" @@ -1899,11 +1933,11 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - server_wrapper_.dispatch(data, *server_); + server_wrapper_.innerDispatch(data, *server_); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.dispatch(data, *client_); + client_wrapper_.innerDispatch(data, *client_); })); } diff --git a/test/common/http/http2/frame_replay.cc b/test/common/http/http2/frame_replay.cc index 7f63fd514160..b0aa61e14ff0 100644 --- a/test/common/http/http2/frame_replay.cc +++ b/test/common/http/http2/frame_replay.cc @@ -89,13 +89,16 @@ ServerCodecFrameInjector::ServerCodecFrameInjector() : CodecFrameInjector("clien })); } -void CodecFrameInjector::write(const Frame& frame, Http::Connection& connection) { +Http::Status CodecFrameInjector::write(const Frame& frame, Http::Connection& connection) { Buffer::OwnedImpl buffer; buffer.add(frame.data(), frame.size()); ENVOY_LOG_MISC(trace, "{} write: {}", injector_name_, Hex::encode(frame.data(), frame.size())); - while (buffer.length() > 0) { - connection.dispatch(buffer); + auto status = Http::okStatus(); + while (buffer.length() > 0 && status.ok()) { + status = connection.dispatch(buffer); } + ENVOY_LOG_MISC(trace, "Status: {}", status.message()); + return status; } } // namespace Http2 diff --git a/test/common/http/http2/frame_replay.h b/test/common/http/http2/frame_replay.h index 7024c292cb4e..3a6e89c6ca5b 100644 --- a/test/common/http/http2/frame_replay.h +++ b/test/common/http/http2/frame_replay.h @@ -53,7 +53,7 @@ class CodecFrameInjector { CodecFrameInjector(const std::string& injector_name); // Writes the data using the Http::Connection's nghttp2 session. - void write(const Frame& frame, Http::Connection& connection); + Http::Status write(const Frame& frame, Http::Connection& connection); envoy::config::core::v3::Http2ProtocolOptions options_; Stats::IsolatedStoreImpl stats_store_; diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index c6afd9a8be5e..5554f19bcc5f 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -60,14 +60,14 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.addCopy("foo", "barbaz"); EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple Huffman encoded response HEADERS frame can be decoded. @@ -95,13 +95,13 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple non-Huffman request HEADERS frame with no static table user either can be @@ -138,14 +138,14 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.addCopy("foo", "barbaz"); EXPECT_CALL(codec.request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that a simple non-Huffman response HEADERS frame with no static table user either can be @@ -175,13 +175,13 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); TestHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); - codec.write(header.frame(), connection); + EXPECT_TRUE(codec.write(header.frame(), connection).ok()); } // Validate that corrupting any single byte with {NUL, CR, LF} in a HEADERS frame doesn't crash or @@ -203,15 +203,14 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); - EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { - ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { + ENVOY_LOG_MISC(trace, "CodecProtocolError: {}", status.message()); } header.frame()[offset] = original; } @@ -239,14 +238,13 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); - EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { - ENVOY_LOG_MISC(trace, "CodecProtocolException: {}", e.what()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); + EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(AnyNumber()); + EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)).Times(AnyNumber()); + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { + ENVOY_LOG_MISC(trace, "CodecProtocolError: {}", status.message()); } header.frame()[offset] = original; } @@ -273,17 +271,16 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); bool stream_reset = false; EXPECT_CALL(codec.request_decoder_, decodeHeaders_(_, _)).Times(0); EXPECT_CALL(codec.server_stream_callbacks_, onResetStream(_, _)) .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); bool codec_exception = false; - try { - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { codec_exception = true; } EXPECT_TRUE(stream_reset || codec_exception); @@ -314,16 +311,15 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { ProdNghttp2SessionFactory::get()); setupStream(codec, connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); + EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); bool stream_reset = false; EXPECT_CALL(codec.response_decoder_, decodeHeaders_(_, _)).Times(0); EXPECT_CALL(codec.client_stream_callbacks_, onResetStream(_, _)) .WillRepeatedly(InvokeWithoutArgs([&stream_reset] { stream_reset = true; })); bool codec_exception = false; - try { - codec.write(header.frame(), connection); - } catch (const CodecProtocolException& e) { + auto status = codec.write(header.frame(), connection); + if (isCodecProtocolError(status)) { codec_exception = true; } EXPECT_TRUE(stream_reset || codec_exception); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 9ac05cbfbe94..5dc75d58ebbb 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -18,13 +18,11 @@ void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - codec.write(frame, connection); - } catch (const CodecProtocolException& e) { - } + Http::Status status = Http::okStatus(); + status = codec.write(WellKnownFrames::clientConnectionPrefaceFrame(), connection); + status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection); + status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + status = codec.write(frame, connection); } DEFINE_FUZZER(const uint8_t* buf, size_t len) { diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index 756af3860c3f..8b1a5d3d0797 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -20,6 +20,7 @@ void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); // Create a new stream. + Http::Status status = Http::okStatus(); codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -28,12 +29,9 @@ void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { codec.request_encoder_->encodeHeaders(request_headers, true); // Send frames. - codec.write(WellKnownFrames::defaultSettingsFrame(), connection); - codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); - try { - codec.write(frame, connection); - } catch (const CodecProtocolException& e) { - } + status = codec.write(WellKnownFrames::defaultSettingsFrame(), connection); + status = codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection); + status = codec.write(frame, connection); } DEFINE_FUZZER(const uint8_t* buf, size_t len) { diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 26379a3d31f4..8a2e1ee20335 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -446,10 +446,10 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance& data, bool) override { - try { - parent_.codec_->dispatch(data); - } catch (const Http::CodecProtocolException& e) { - ENVOY_LOG(debug, "FakeUpstream dispatch error: {}", e.what()); + Http::Status status = parent_.codec_->dispatch(data); + + if (Http::isCodecProtocolError(status)) { + ENVOY_LOG(debug, "FakeUpstream dispatch error: {}", status.message()); // We don't do a full stream shutdown like HCM, but just shutdown the // connection for now. read_filter_callbacks_->connection().close( diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index e9fb9e45e6d2..4bbf40e7d657 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -75,7 +75,7 @@ class MockServerConnection : public ServerConnection { ~MockServerConnection() override; // Http::Connection - MOCK_METHOD(void, dispatch, (Buffer::Instance & data)); + MOCK_METHOD(Status, dispatch, (Buffer::Instance & data)); MOCK_METHOD(void, goAway, ()); MOCK_METHOD(Protocol, protocol, ()); MOCK_METHOD(void, shutdownNotice, ()); @@ -92,7 +92,7 @@ class MockClientConnection : public ClientConnection { ~MockClientConnection() override; // Http::Connection - MOCK_METHOD(void, dispatch, (Buffer::Instance & data)); + MOCK_METHOD(Status, dispatch, (Buffer::Instance & data)); MOCK_METHOD(void, goAway, ()); MOCK_METHOD(Protocol, protocol, ()); MOCK_METHOD(void, shutdownNotice, ()); From f0ebefc2a36746dbf12301d3c258e2f1b66cfe82 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Tue, 5 May 2020 18:16:18 -0400 Subject: [PATCH 102/909] init: order dynamic resource initialization to make RTDS always be first. Take 2. (#10989) Order dynamic resource initialization such that RTDS is completely initialized before starting to load rest of xDS resources. Signed-off-by: Yan Avlasov --- include/envoy/runtime/runtime.h | 8 ++ include/envoy/upstream/cluster_manager.h | 28 ++++- source/common/runtime/BUILD | 2 + source/common/runtime/runtime_impl.cc | 19 +++- source/common/runtime/runtime_impl.h | 12 +- .../common/upstream/cluster_manager_impl.cc | 59 ++++++++-- source/common/upstream/cluster_manager_impl.h | 41 +++++-- source/server/server.cc | 60 +++++++--- source/server/server.h | 6 +- test/common/protobuf/utility_test.cc | 7 +- test/common/runtime/runtime_impl_test.cc | 43 +++----- .../upstream/cluster_manager_impl_test.cc | 42 ++++++- .../clusters/aggregate/cluster_update_test.cc | 16 ++- test/integration/ads_integration_test.cc | 103 ++++++++++++++++++ test/integration/rtds_integration_test.cc | 75 +++++++++++-- test/mocks/runtime/mocks.h | 1 + test/mocks/upstream/mocks.h | 5 +- test/server/server_test.cc | 17 ++- .../test_data/server/runtime_bootstrap.yaml | 9 -- .../server/runtime_bootstrap_ads_eds.yaml | 38 +++++++ .../server/runtime_bootstrap_eds.yaml | 35 ++++++ test/test_common/test_runtime.h | 5 +- 22 files changed, 519 insertions(+), 112 deletions(-) create mode 100644 test/server/test_data/server/runtime_bootstrap_ads_eds.yaml create mode 100644 test/server/test_data/server/runtime_bootstrap_eds.yaml diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 2df95731398b..52abc0e50616 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -259,6 +259,8 @@ class Loader { public: virtual ~Loader() = default; + using ReadyCallback = std::function; + /** * Post-construction initialization. Runtime will be generally available after * the constructor is finished, with the exception of dynamic RTDS layers, @@ -286,6 +288,12 @@ class Loader { * @param values the values to merge */ virtual void mergeValues(const std::unordered_map& values) PURE; + + /** + * Initiate all RTDS subscriptions. The `on_done` callback is invoked when all RTDS requests + * have either received and applied their responses or timed out. + */ + virtual void startRtdsSubscriptions(ReadyCallback on_done) PURE; }; using LoaderPtr = std::unique_ptr; diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index 047bf2aafd48..e049faf57abd 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -73,9 +73,22 @@ class ClusterManagerFactory; /** * Manages connection pools and load balancing for upstream clusters. The cluster manager is * persistent and shared among multiple ongoing requests/connections. + * Cluster manager is initialized in two phases. In the first phase which begins at the construction + * all primary clusters (i.e. with endpoint assignments provisioned statically in bootstrap, + * discovered through DNS or file based CDS) are initialized. This phase may complete synchronously + * with cluster manager construction iff all clusters are STATIC and without health checks + * configured. At the completion of the first phase cluster manager invokes callback set through the + * `setPrimaryClustersInitializedCb` method. + * After the first phase has completed the server instance initializes services (i.e. RTDS) needed + * to successfully deploy the rest of dynamic configuration. + * In the second phase all secondary clusters (with endpoint assignments provisioned by xDS servers) + * are initialized and then the rest of the configuration provisioned through xDS. */ class ClusterManager { public: + using PrimaryClustersReadyCallback = std::function; + using InitializationCompleteCallback = std::function; + virtual ~ClusterManager() = default; /** @@ -91,10 +104,23 @@ class ClusterManager { virtual bool addOrUpdateCluster(const envoy::config::cluster::v3::Cluster& cluster, const std::string& version_info) PURE; + /** + * Set a callback that will be invoked when all primary clusters have been initialized. + */ + virtual void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) PURE; + /** * Set a callback that will be invoked when all owned clusters have been initialized. */ - virtual void setInitializedCb(std::function callback) PURE; + virtual void setInitializedCb(InitializationCompleteCallback callback) PURE; + + /** + * Start initialization of secondary clusters and then dynamically configured clusters. + * The "initialized callback" set in the method above is invoked when secondary and + * dynamically provisioned clusters have finished initializing. + */ + virtual void + initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE; using ClusterInfoMap = std::unordered_map>; diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index dbab335cf0a7..ddeb069e3e5a 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -67,7 +67,9 @@ envoy_cc_library( "//source/common/config:subscription_base_interface", "//source/common/filesystem:directory_lib", "//source/common/grpc:common_lib", + "//source/common/init:manager_lib", "//source/common/init:target_lib", + "//source/common/init:watcher_lib", "//source/common/protobuf:message_validator_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 89d69295242c..17efeeab1d53 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -469,11 +469,12 @@ void ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, - const LocalInfo::LocalInfo& local_info, Init::Manager& init_manager, - Stats::Store& store, RandomGenerator& generator, + const LocalInfo::LocalInfo& local_info, Stats::Store& store, + RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), - config_(config), service_cluster_(local_info.clusterName()), api_(api) { + config_(config), service_cluster_(local_info.clusterName()), api_(api), + init_watcher_("RDTS", [this]() { onRdtsReady(); }) { std::unordered_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); @@ -501,7 +502,7 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator case envoy::config::bootstrap::v3::RuntimeLayer::LayerSpecifierCase::kRtdsLayer: subscriptions_.emplace_back( std::make_unique(*this, layer.rtds_layer(), store, validation_visitor)); - init_manager.add(subscriptions_.back()->init_target_); + init_manager_.add(subscriptions_.back()->init_target_); break; default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -513,6 +514,16 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator void LoaderImpl::initialize(Upstream::ClusterManager& cm) { cm_ = &cm; } +void LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) { + on_rtds_initialized_ = on_done; + init_manager_.initialize(init_watcher_); +} + +void LoaderImpl::onRdtsReady() { + ENVOY_LOG(info, "RTDS has finished initialization"); + on_rtds_initialized_(); +} + RtdsSubscription::RtdsSubscription( LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer, Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor) diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 10151cf4dbd5..c3047d55099c 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -24,6 +24,7 @@ #include "common/common/logger.h" #include "common/common/thread.h" #include "common/config/subscription_base.h" +#include "common/init/manager_impl.h" #include "common/init/target_impl.h" #include "common/singleton/threadsafe_singleton.h" @@ -243,15 +244,16 @@ class LoaderImpl : public Loader, Logger::Loggable { public: LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, - const LocalInfo::LocalInfo& local_info, Init::Manager& init_manager, - Stats::Store& store, RandomGenerator& generator, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + const LocalInfo::LocalInfo& local_info, Stats::Store& store, + RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, + Api::Api& api); // Runtime::Loader void initialize(Upstream::ClusterManager& cm) override; const Snapshot& snapshot() override; std::shared_ptr threadsafeSnapshot() override; void mergeValues(const std::unordered_map& values) override; + void startRtdsSubscriptions(ReadyCallback on_done) override; private: friend RtdsSubscription; @@ -261,6 +263,7 @@ class LoaderImpl : public Loader, Logger::Loggable { // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); + void onRdtsReady(); RandomGenerator& generator_; RuntimeStats stats_; @@ -270,6 +273,9 @@ class LoaderImpl : public Loader, Logger::Loggable { const std::string service_cluster_; Filesystem::WatcherPtr watcher_; Api::Api& api_; + ReadyCallback on_rtds_initialized_; + Init::WatcherImpl init_watcher_; + Init::ManagerImpl init_manager_{"RTDS"}; std::vector subscriptions_; Upstream::ClusterManager* cm_{}; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 8e4d6f9975c3..9be913e4e7aa 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -123,16 +123,24 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // Do not do anything if we are still doing the initial static load or if we are waiting for // CDS initialize. ENVOY_LOG(debug, "maybe finish initialize state: {}", enumToInt(state_)); - if (state_ == State::Loading || state_ == State::WaitingForCdsInitialize) { + if (state_ == State::Loading || state_ == State::WaitingToStartCdsInitialization) { return; } - // If we are still waiting for primary clusters to initialize, do nothing. - ASSERT(state_ == State::WaitingForStaticInitialize || state_ == State::CdsInitialized); + ASSERT(state_ == State::WaitingToStartSecondaryInitialization || + state_ == State::CdsInitialized || + state_ == State::WaitingForPrimaryInitializationToComplete); ENVOY_LOG(debug, "maybe finish initialize primary init clusters empty: {}", primary_init_clusters_.empty()); + // If we are still waiting for primary clusters to initialize, do nothing. if (!primary_init_clusters_.empty()) { return; + } else if (state_ == State::WaitingForPrimaryInitializationToComplete) { + state_ = State::WaitingToStartSecondaryInitialization; + if (primary_clusters_initialized_callback_) { + primary_clusters_initialized_callback_(); + } + return; } // If we are still waiting for secondary clusters to initialize, see if we need to first call @@ -162,9 +170,9 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // directly to initialized. started_secondary_initialize_ = false; ENVOY_LOG(debug, "maybe finish initialize cds api ready: {}", cds_ != nullptr); - if (state_ == State::WaitingForStaticInitialize && cds_) { + if (state_ == State::WaitingToStartSecondaryInitialization && cds_) { ENVOY_LOG(info, "cm init: initializing cds"); - state_ = State::WaitingForCdsInitialize; + state_ = State::WaitingToStartCdsInitialization; cds_->initialize(); } else { ENVOY_LOG(info, "cm init: all clusters initialized"); @@ -177,7 +185,15 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { void ClusterManagerInitHelper::onStaticLoadComplete() { ASSERT(state_ == State::Loading); - state_ = State::WaitingForStaticInitialize; + // After initialization of primary clusters has completed, transition to + // waiting for signal to initialize secondary clusters and then CDS. + state_ = State::WaitingForPrimaryInitializationToComplete; + maybeFinishInitialize(); +} + +void ClusterManagerInitHelper::startInitializingSecondaryClusters() { + ASSERT(state_ == State::WaitingToStartSecondaryInitialization); + ENVOY_LOG(debug, "continue initializing secondary clusters"); maybeFinishInitialize(); } @@ -186,14 +202,15 @@ void ClusterManagerInitHelper::setCds(CdsApi* cds) { cds_ = cds; if (cds_) { cds_->setInitializedCb([this]() -> void { - ASSERT(state_ == State::WaitingForCdsInitialize); + ASSERT(state_ == State::WaitingToStartCdsInitialization); state_ = State::CdsInitialized; maybeFinishInitialize(); }); } } -void ClusterManagerInitHelper::setInitializedCb(std::function callback) { +void ClusterManagerInitHelper::setInitializedCb( + ClusterManager::InitializationCompleteCallback callback) { if (state_ == State::AllClustersInitialized) { callback(); } else { @@ -201,6 +218,19 @@ void ClusterManagerInitHelper::setInitializedCb(std::function callback) } } +void ClusterManagerInitHelper::setPrimaryClustersInitializedCb( + ClusterManager::PrimaryClustersReadyCallback callback) { + // The callback must be set before or at the `WaitingToStartSecondaryInitialization` state. + ASSERT(state_ == State::WaitingToStartSecondaryInitialization || + state_ == State::WaitingForPrimaryInitializationToComplete || state_ == State::Loading); + if (state_ == State::WaitingToStartSecondaryInitialization) { + // This is the case where all clusters are STATIC and without health checking. + callback(); + } else { + primary_clusters_initialized_callback_ = callback; + } +} + ClusterManagerImpl::ClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, @@ -346,15 +376,22 @@ ClusterManagerImpl::ClusterManagerImpl( init_helper_.onStaticLoadComplete(); ads_mux_->start(); +} +void ClusterManagerImpl::initializeSecondaryClusters( + const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + init_helper_.startInitializingSecondaryClusters(); + + const auto& cm_config = bootstrap.cluster_manager(); if (cm_config.has_load_stats_config()) { const auto& load_stats_config = cm_config.load_stats_config(); + load_stats_reporter_ = std::make_unique( - local_info, *this, stats, + local_info_, *this, stats_, Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, load_stats_config, - stats, false) + stats_, false) ->create(), - load_stats_config.transport_api_version(), main_thread_dispatcher); + load_stats_config.transport_api_version(), dispatcher_); } } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 707cc1ca476e..2e5800ecb952 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -110,15 +110,22 @@ class ClusterManagerInitHelper : Logger::Loggable { : cm_(cm), per_cluster_init_callback_(per_cluster_init_callback) {} enum class State { - // Initial state. During this state all static clusters are loaded. Any phase 1 clusters - // are immediately initialized. + // Initial state. During this state all static clusters are loaded. Any primary clusters + // immediately begin initialization. Loading, - // During this state we wait for all static clusters to fully initialize. This requires - // completing phase 1 clusters, initializing phase 2 clusters, and then waiting for them. - WaitingForStaticInitialize, - // If CDS is configured, this state tracks waiting for the first CDS response to populate - // clusters. - WaitingForCdsInitialize, + // In this state cluster manager waits for all primary clusters to finish initialization. + // This state may immediately transition to the next state iff all clusters are STATIC and + // without health checks enabled or health checks have failed immediately, since their + // initialization completes immediately. + WaitingForPrimaryInitializationToComplete, + // During this state cluster manager waits to start initializing secondary clusters. In this + // state all primary clusters have completed initialization. Initialization of the + // secondary clusters is started by the `initializeSecondaryClusters` method. + WaitingToStartSecondaryInitialization, + // In this state cluster manager waits for all secondary clusters (if configured) to finish + // initialization. Then, if CDS is configured, this state tracks waiting for the first CDS + // response to populate dynamically configured clusters. + WaitingToStartCdsInitialization, // During this state, all CDS populated clusters are undergoing either phase 1 or phase 2 // initialization. CdsInitialized, @@ -130,9 +137,12 @@ class ClusterManagerInitHelper : Logger::Loggable { void onStaticLoadComplete(); void removeCluster(Cluster& cluster); void setCds(CdsApi* cds); - void setInitializedCb(std::function callback); + void setPrimaryClustersInitializedCb(ClusterManager::PrimaryClustersReadyCallback callback); + void setInitializedCb(ClusterManager::InitializationCompleteCallback callback); State state() const { return state_; } + void startInitializingSecondaryClusters(); + private: // To enable invariant assertions on the cluster lists. friend ClusterManagerImpl; @@ -144,7 +154,8 @@ class ClusterManagerInitHelper : Logger::Loggable { ClusterManager& cm_; std::function per_cluster_init_callback_; CdsApi* cds_{}; - std::function initialized_callback_; + ClusterManager::PrimaryClustersReadyCallback primary_clusters_initialized_callback_; + ClusterManager::InitializationCompleteCallback initialized_callback_; std::list primary_init_clusters_; std::list secondary_init_clusters_; State state_{State::Loading}; @@ -192,7 +203,12 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable callback) override { + + void setPrimaryClustersInitializedCb(PrimaryClustersReadyCallback callback) override { + init_helper_.setPrimaryClustersInitializedCb(callback); + } + + void setInitializedCb(InitializationCompleteCallback callback) override { init_helper_.setInitializedCb(callback); } @@ -242,6 +258,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::LoggableinitializeStats(stats_store_, "server."); } + // The broad order of initialization from this point on is the following: + // 1. Statically provisioned configuration (bootstrap) are loaded. + // 2. Cluster manager is created and all primary clusters (i.e. with endpoint assignments + // provisioned statically in bootstrap, discovered through DNS or file based CDS) are + // initialized. + // 3. Various services are initialized and configured using the bootstrap config. + // 4. RTDS is initialized using primary clusters. This allows runtime overrides to be fully + // configured before the rest of xDS configuration is provisioned. + // 5. Secondary clusters (with endpoint assignments provisioned by xDS servers) are initialized. + // 6. The rest of the dynamic configuration is provisioned. + // + // Please note: this order requires that RTDS is provisioned using a primary cluster. If RTDS is + // provisioned through ADS then ADS must use primary cluster as well. This invariant is enforced + // during RTDS initialization and invalid configuration will be rejected. + // Runtime gets initialized before the main configuration since during main configuration // load things may grab a reference to the loader for later use. runtime_singleton_ = std::make_unique( @@ -450,6 +465,32 @@ void InstanceImpl::initialize(const Options& options, // instantiated (which in turn relies on runtime...). Runtime::LoaderSingleton::get().initialize(clusterManager()); + clusterManager().setPrimaryClustersInitializedCb( + [this]() { onClusterManagerPrimaryInitializationComplete(); }); + + for (Stats::SinkPtr& sink : config_.statsSinks()) { + stats_store_.addSink(*sink); + } + + // Some of the stat sinks may need dispatcher support so don't flush until the main loop starts. + // Just setup the timer. + stat_flush_timer_ = dispatcher_->createTimer([this]() -> void { flushStats(); }); + stat_flush_timer_->enableTimer(config_.statsFlushInterval()); + + // GuardDog (deadlock detection) object and thread setup before workers are + // started and before our own run() loop runs. + guard_dog_ = std::make_unique(stats_store_, config_, *api_); +} + +void InstanceImpl::onClusterManagerPrimaryInitializationComplete() { + // If RTDS was not configured the `onRuntimeReady` callback is immediately invoked. + Runtime::LoaderSingleton::get().startRtdsSubscriptions([this]() { onRuntimeReady(); }); +} + +void InstanceImpl::onRuntimeReady() { + // Begin initializing secondary clusters after RTDS configuration has been applied. + clusterManager().initializeSecondaryClusters(bootstrap_); + if (bootstrap_.has_hds_config()) { const auto& hds_config = bootstrap_.hds_config(); async_client_manager_ = std::make_unique( @@ -464,19 +505,6 @@ void InstanceImpl::initialize(const Options& options, *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_); } - - for (Stats::SinkPtr& sink : config_.statsSinks()) { - stats_store_.addSink(*sink); - } - - // Some of the stat sinks may need dispatcher support so don't flush until the main loop starts. - // Just setup the timer. - stat_flush_timer_ = dispatcher_->createTimer([this]() -> void { flushStats(); }); - stat_flush_timer_->enableTimer(config_.statsFlushInterval()); - - // GuardDog (deadlock detection) object and thread setup before workers are - // started and before our own run() loop runs. - guard_dog_ = std::make_unique(stats_store_, config_, *api_); } void InstanceImpl::startWorkers() { @@ -496,8 +524,8 @@ Runtime::LoaderPtr InstanceUtil::createRuntime(Instance& server, ENVOY_LOG(info, "runtime: {}", MessageUtil::getYamlStringFromMessage(config.runtime())); return std::make_unique( server.dispatcher(), server.threadLocal(), config.runtime(), server.localInfo(), - server.initManager(), server.stats(), server.random(), - server.messageValidationContext().dynamicValidationVisitor(), server.api()); + server.stats(), server.random(), server.messageValidationContext().dynamicValidationVisitor(), + server.api()); } void InstanceImpl::loadServerFlags(const absl::optional& flags_path) { @@ -718,4 +746,4 @@ ProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() { } } // namespace Server -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/server/server.h b/source/server/server.h index 107de3eb30e2..f10db7332af3 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -285,6 +285,8 @@ class InstanceImpl final : Logger::Loggable, void terminate(); void notifyCallbacksForStage( Stage stage, Event::PostCb completion_cb = [] {}); + void onRuntimeReady(); + void onClusterManagerPrimaryInitializationComplete(); using LifecycleNotifierCallbacks = std::list; using LifecycleNotifierCompletionCallbacks = std::list; @@ -305,6 +307,9 @@ class InstanceImpl final : Logger::Loggable, const Options& options_; ProtobufMessage::ProdValidationContextImpl validation_context_; TimeSource& time_source_; + // Delete local_info_ as late as possible as some members below may reference it during their + // destruction. + LocalInfo::LocalInfoPtr local_info_; HotRestart& restarter_; const time_t start_time_; time_t original_start_time_; @@ -328,7 +333,6 @@ class InstanceImpl final : Logger::Loggable, Configuration::MainImpl config_; Network::DnsResolverSharedPtr dns_resolver_; Event::TimerPtr stat_flush_timer_; - LocalInfo::LocalInfoPtr local_info_; DrainManagerPtr drain_manager_; AccessLog::AccessLogManagerImpl access_log_manager_; std::unique_ptr cluster_manager_factory_; diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index bffc82971a08..35b634d1edaa 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1402,9 +1402,9 @@ class DeprecatedFieldsTest : public testing::TestWithParam { Stats::Gauge::ImportMode::NeverImport)) { envoy::config::bootstrap::v3::LayeredRuntime config; config.add_layers()->mutable_admin_layer(); - loader_ = std::make_unique(Runtime::LoaderPtr{ - new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, init_manager_, store_, - generator_, validation_visitor_, *api_)}); + loader_ = std::make_unique( + Runtime::LoaderPtr{new Runtime::LoaderImpl(dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_)}); } void checkForDeprecation(const Protobuf::Message& message) { @@ -1429,7 +1429,6 @@ class DeprecatedFieldsTest : public testing::TestWithParam { Stats::Counter& runtime_deprecated_feature_use_; Stats::Gauge& deprecated_feature_seen_since_process_start_; NiceMock local_info_; - Init::MockManager init_manager_; NiceMock validation_visitor_; }; diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 1d9d7076a372..4701f5412834 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -28,6 +28,7 @@ using testing::_; using testing::Invoke; using testing::InvokeWithoutArgs; +using testing::MockFunction; using testing::NiceMock; using testing::Return; @@ -118,7 +119,6 @@ class LoaderImplTest : public testing::Test { Api::ApiPtr api_; Upstream::MockClusterManager cm_; NiceMock local_info_; - Init::MockManager init_manager_; std::vector on_changed_cbs_; NiceMock validation_visitor_; std::string expected_watch_root_; @@ -145,9 +145,8 @@ class DiskLoaderImplTest : public LoaderImplTest { envoy::config::bootstrap::v3::LayeredRuntime layered_runtime; Config::translateRuntime(runtime, layered_runtime); - loader_ = - std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_); + loader_ = std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, + generator_, validation_visitor_, *api_); } void write(const std::string& path, const std::string& value) { @@ -557,8 +556,8 @@ TEST_F(DiskLoaderImplTest, MultipleAdminLayersFail) { layer->mutable_admin_layer(); } EXPECT_THROW_WITH_MESSAGE( - std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_), + std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, + generator_, validation_visitor_, *api_), EnvoyException, "Too many admin layers specified in LayeredRuntime, at most one may be specified"); } @@ -578,9 +577,8 @@ class StaticLoaderImplTest : public LoaderImplTest { layer->set_name("admin"); layer->mutable_admin_layer(); } - loader_ = - std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_); + loader_ = std::make_unique(dispatcher_, tls_, layered_runtime, local_info_, store_, + generator_, validation_visitor_, *api_); } ProtobufWkt::Struct base_; @@ -865,9 +863,6 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_layer->mutable_rtds_config(); } EXPECT_CALL(cm_, subscriptionFactory()).Times(layers_.size()); - EXPECT_CALL(init_manager_, add(_)).WillRepeatedly(Invoke([this](const Init::Target& target) { - init_target_handles_.emplace_back(target.createHandle("test")); - })); ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) .WillByDefault(testing::Invoke( [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, @@ -877,15 +872,14 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_callbacks_.push_back(&callbacks); return ret; })); - loader_ = std::make_unique(dispatcher_, tls_, config, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_); + loader_ = std::make_unique(dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_); loader_->initialize(cm_); for (auto* sub : rtds_subscriptions_) { EXPECT_CALL(*sub, start(_)); } - for (auto& handle : init_target_handles_) { - handle->initialize(init_watcher_); - } + + loader_->startRtdsSubscriptions(rtds_init_callback_.AsStdFunction()); // Validate that the layer name is set properly for dynamic layers. EXPECT_EQ(layers_[0], loader_->snapshot().getLayers()[1]->name()); @@ -921,8 +915,7 @@ class RtdsLoaderImplTest : public LoaderImplTest { std::vector layers_{"some_resource"}; std::vector rtds_callbacks_; std::vector rtds_subscriptions_; - Init::ExpectableWatcherImpl init_watcher_; - std::vector init_target_handles_; + MockFunction rtds_init_callback_; }; // Empty resource lists are rejected. @@ -931,7 +924,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { Protobuf::RepeatedPtrField runtimes; - EXPECT_CALL(init_watcher_, ready()); + EXPECT_CALL(rtds_init_callback_, Call()); EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, "Unexpected RTDS resource length: 0"); @@ -949,7 +942,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { runtimes.Add(); runtimes.Add(); - EXPECT_CALL(init_watcher_, ready()); + EXPECT_CALL(rtds_init_callback_, Call()); EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, "Unexpected RTDS resource length: 2"); @@ -963,7 +956,7 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { TEST_F(RtdsLoaderImplTest, FailureSubscription) { setup(); - EXPECT_CALL(init_watcher_, ready()); + EXPECT_CALL(rtds_init_callback_, Call()); // onConfigUpdateFailed() should not be called for gRPC stream connection failure rtds_callbacks_[0]->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, {}); @@ -1009,7 +1002,7 @@ TEST_F(RtdsLoaderImplTest, OnConfigUpdateSuccess) { foo: bar baz: meh )EOF"); - EXPECT_CALL(init_watcher_, ready()); + EXPECT_CALL(rtds_init_callback_, Call()); doOnConfigUpdateVerifyNoThrow(runtime); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); @@ -1048,7 +1041,7 @@ TEST_F(RtdsLoaderImplTest, DeltaOnConfigUpdateSuccess) { foo: bar baz: meh )EOF"); - EXPECT_CALL(init_watcher_, ready()); + EXPECT_CALL(rtds_init_callback_, Call()); doDeltaOnConfigUpdateVerifyNoThrow(runtime); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); @@ -1092,7 +1085,7 @@ TEST_F(RtdsLoaderImplTest, MultipleRtdsLayers) { foo: bar baz: meh )EOF"); - EXPECT_CALL(init_watcher_, ready()).Times(2); + EXPECT_CALL(rtds_init_callback_, Call()).Times(1); doOnConfigUpdateVerifyNoThrow(runtime, 0); EXPECT_EQ("bar", loader_->snapshot().get("foo").value().get()); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 2e300b5b2844..82b158706c8d 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -41,6 +41,8 @@ class ClusterManagerImplTest : public testing::Test { bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, http_context_, grpc_context_); + cluster_manager_->setPrimaryClustersInitializedCb( + [this, bootstrap]() { cluster_manager_->initializeSecondaryClusters(bootstrap); }); } void createWithLocalClusterUpdate(const bool enable_merge_window = true) { @@ -2829,6 +2831,7 @@ TEST_F(ClusterManagerInitHelperTest, ImmediateInitialize) { cluster1.initialize_callback_(); init_helper_.onStaticLoadComplete(); + init_helper_.startInitializingSecondaryClusters(); ReadyWatcher cm_initialized; EXPECT_CALL(cm_initialized, ready()); @@ -2849,9 +2852,11 @@ TEST_F(ClusterManagerInitHelperTest, StaticSdsInitialize) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - EXPECT_CALL(cluster1, initialize(_)); init_helper_.onStaticLoadComplete(); + EXPECT_CALL(cluster1, initialize(_)); + init_helper_.startInitializingSecondaryClusters(); + ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2863,6 +2868,9 @@ TEST_F(ClusterManagerInitHelperTest, StaticSdsInitialize) { TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2883,8 +2891,11 @@ TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { init_helper_.removeCluster(cluster1); EXPECT_CALL(*this, onClusterInit(Ref(cluster2))); - EXPECT_CALL(cm_initialized, ready()); + EXPECT_CALL(primary_clusters_initialized, ready()); cluster2.initialize_callback_(); + + EXPECT_CALL(cm_initialized, ready()); + init_helper_.startInitializingSecondaryClusters(); } // If secondary clusters initialization triggered outside of CdsApiImpl::onConfigUpdate()'s @@ -2894,6 +2905,9 @@ TEST_F(ClusterManagerInitHelperTest, UpdateAlreadyInitialized) { TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2901,8 +2915,10 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - EXPECT_CALL(cluster1, initialize(_)); + EXPECT_CALL(primary_clusters_initialized, ready()); init_helper_.onStaticLoadComplete(); + EXPECT_CALL(cluster1, initialize(_)); + init_helper_.startInitializingSecondaryClusters(); EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); EXPECT_CALL(cm_initialized, ready()); @@ -2916,6 +2932,9 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithoutEdsPaused) { TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2923,9 +2942,12 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { ON_CALL(cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster1); - EXPECT_CALL(cluster1, initialize(_)); + EXPECT_CALL(primary_clusters_initialized, ready()); init_helper_.onStaticLoadComplete(); + EXPECT_CALL(cluster1, initialize(_)); + init_helper_.startInitializingSecondaryClusters(); + EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); EXPECT_CALL(cm_initialized, ready()); cluster1.initialize_callback_(); @@ -2934,6 +2956,9 @@ TEST_F(ClusterManagerInitHelperTest, InitSecondaryWithEdsPaused) { TEST_F(ClusterManagerInitHelperTest, AddSecondaryAfterSecondaryInit) { InSequence s; + ReadyWatcher primary_clusters_initialized; + init_helper_.setPrimaryClustersInitializedCb( + [&]() -> void { primary_clusters_initialized.ready(); }); ReadyWatcher cm_initialized; init_helper_.setInitializedCb([&]() -> void { cm_initialized.ready(); }); @@ -2949,8 +2974,10 @@ TEST_F(ClusterManagerInitHelperTest, AddSecondaryAfterSecondaryInit) { init_helper_.onStaticLoadComplete(); EXPECT_CALL(*this, onClusterInit(Ref(cluster1))); + EXPECT_CALL(primary_clusters_initialized, ready()); EXPECT_CALL(cluster2, initialize(_)); cluster1.initialize_callback_(); + init_helper_.startInitializingSecondaryClusters(); NiceMock cluster3; ON_CALL(cluster3, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); @@ -2972,6 +2999,9 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { ON_CALL(cluster, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Secondary)); init_helper_.addCluster(cluster); + // onStaticLoadComplete() must not initialize secondary clusters + init_helper_.onStaticLoadComplete(); + // Set up the scenario seen in Issue 903 where initialize() ultimately results // in the removeCluster() call. In the real bug this was a long and complex call // chain. @@ -2979,9 +3009,9 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { init_helper_.removeCluster(cluster); })); - // Now call onStaticLoadComplete which will exercise maybeFinishInitialize() + // Now call initializeSecondaryClusters which will exercise maybeFinishInitialize() // which calls initialize() on the members of the secondary init list. - init_helper_.onStaticLoadComplete(); + init_helper_.startInitializingSecondaryClusters(); } // Validate that when options are set in the ClusterManager and/or Cluster, we see the socket option diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index f040c6b88c5d..e7cbbcb4311d 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -35,10 +35,12 @@ class AggregateClusterUpdateTest : public testing::Test { : http_context_(stats_store_.symbolTable()), grpc_context_(stats_store_.symbolTable()) {} void initialize(const std::string& yaml_config) { + auto bootstrap = parseBootstrapFromV2Yaml(yaml_config); cluster_manager_ = std::make_unique( - parseBootstrapFromV2Yaml(yaml_config), factory_, factory_.stats_, factory_.tls_, - factory_.runtime_, factory_.random_, factory_.local_info_, log_manager_, - factory_.dispatcher_, admin_, validation_context_, *api_, http_context_, grpc_context_); + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, + factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, + *api_, http_context_, grpc_context_); + cluster_manager_->initializeSecondaryClusters(bootstrap); EXPECT_EQ(cluster_manager_->activeClusters().size(), 1); cluster_ = cluster_manager_->get("aggregate_cluster"); } @@ -257,10 +259,12 @@ TEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) - secondary )EOF"; + auto bootstrap = parseBootstrapFromV2Yaml(config); cluster_manager_ = std::make_unique( - parseBootstrapFromV2Yaml(config), factory_, factory_.stats_, factory_.tls_, factory_.runtime_, - factory_.random_, factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, - validation_context_, *api_, http_context_, grpc_context_); + bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, + factory_.local_info_, log_manager_, factory_.dispatcher_, admin_, validation_context_, *api_, + http_context_, grpc_context_); + cluster_manager_->initializeSecondaryClusters(bootstrap); EXPECT_EQ(cluster_manager_->activeClusters().size(), 2); cluster_ = cluster_manager_->get("aggregate_cluster"); auto primary = cluster_manager_->get("primary"); diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 42de78c81fae..9153bce772a0 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -889,4 +889,107 @@ TEST_P(AdsClusterFromFileIntegrationTest, BasicTestWidsAdsEndpointLoadedFromFile {"ads_eds_cluster"}, {}, {})); } +class AdsIntegrationTestWithRtds : public AdsIntegrationTest { +public: + AdsIntegrationTestWithRtds() = default; + + void initialize() override { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* layered_runtime = bootstrap.mutable_layered_runtime(); + auto* layer = layered_runtime->add_layers(); + layer->set_name("foobar"); + auto* rtds_layer = layer->mutable_rtds_layer(); + rtds_layer->set_name("ads_rtds_layer"); + auto* rtds_config = rtds_layer->mutable_rtds_config(); + rtds_config->mutable_ads(); + + auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); + ads_config->set_set_node_on_first_message_only(true); + }); + AdsIntegrationTest::initialize(); + } + + void testBasicFlow() { + // Test that runtime discovery request comes first and cluster discovery request comes after + // runtime was loaded. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"ads_rtds_layer"}, + {"ads_rtds_layer"}, {}, true)); + auto some_rtds_layer = TestUtility::parseYaml(R"EOF( + name: ads_rtds_layer + layer: + foo: bar + baz: meh + )EOF"); + sendDiscoveryResponse( + Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); + + test_server_->waitForCounterGe("runtime.load_success", 1); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, false)); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "1", {"ads_rtds_layer"}, {}, + {}, false)); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtds, + DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); + +TEST_P(AdsIntegrationTestWithRtds, Basic) { + initialize(); + testBasicFlow(); +} + +class AdsIntegrationTestWithRtdsAndSecondaryClusters : public AdsIntegrationTestWithRtds { +public: + AdsIntegrationTestWithRtdsAndSecondaryClusters() = default; + + void initialize() override { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Add secondary cluster to the list of static resources. + auto* eds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + eds_cluster->set_name("eds_cluster"); + eds_cluster->set_type(envoy::config::cluster::v3::Cluster::EDS); + auto* eds_cluster_config = eds_cluster->mutable_eds_cluster_config(); + eds_cluster_config->mutable_eds_config()->mutable_ads(); + }); + AdsIntegrationTestWithRtds::initialize(); + } + + void testBasicFlow() { + // Test that runtime discovery request comes first followed by the cluster load assignment + // discovery request for secondary cluster and then CDS discovery request. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"ads_rtds_layer"}, + {"ads_rtds_layer"}, {}, true)); + auto some_rtds_layer = TestUtility::parseYaml(R"EOF( + name: ads_rtds_layer + layer: + foo: bar + baz: meh + )EOF"); + sendDiscoveryResponse( + Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); + + test_server_->waitForCounterGe("runtime.load_success", 1); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"eds_cluster"}, {"eds_cluster"}, {}, false)); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("eds_cluster")}, + {buildClusterLoadAssignment("eds_cluster")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "1", {"ads_rtds_layer"}, {}, + {}, false)); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, false)); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, + {}, "1"); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsIntegrationTestWithRtdsAndSecondaryClusters, + DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); + +TEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) { + initialize(); + testBasicFlow(); +} + } // namespace Envoy diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index b68124c563db..3456a3ad9dd4 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -15,6 +15,7 @@ std::string tdsBootstrapConfig(absl::string_view api_type) { static_resources: clusters: - name: dummy_cluster + http2_protocol_options: {{}} load_assignment: cluster_name: dummy_cluster endpoints: @@ -86,25 +87,19 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H setUpstreamCount(1); setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); HttpIntegrationTest::initialize(); - // Initial RTDS connection. - createXdsConnection(); - AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); - RELEASE_ASSERT(result, result.message()); - xds_stream_->startGrpcStream(); // Register admin port. registerTestServerPorts({}); initial_load_success_ = test_server_->counter("runtime.load_success")->value(); initial_keys_ = test_server_->gauge("runtime.num_keys")->value(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); } void acceptXdsConnection() { - AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection. - fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_); - RELEASE_ASSERT(result, result.message()); - result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + // Initial RTDS connection. + createXdsConnection(); + AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); RELEASE_ASSERT(result, result.message()); xds_stream_->startGrpcStream(); - fake_upstreams_[0]->set_allow_unexpected_disconnects(true); } std::string getRuntimeKey(const std::string& key) { @@ -129,6 +124,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, RtdsIntegrationTest, TEST_P(RtdsIntegrationTest, RtdsReload) { initialize(); + acceptXdsConnection(); EXPECT_EQ("whatevs", getRuntimeKey("foo")); EXPECT_EQ("yar", getRuntimeKey("bar")); @@ -176,5 +172,64 @@ TEST_P(RtdsIntegrationTest, RtdsReload) { EXPECT_EQ(3, test_server_->gauge("runtime.num_layers")->value()); } +// Verify that RTDS initialization starts only after initialization of all primary clusters has +// completed. Primary cluster initialization completes asynchronously when some of the clusters use +// DNS for endpoint discovery or when health check is configured. +// This test uses health checking of the first cluster to make primary cluster initialization to +// complete asynchronously. +TEST_P(RtdsIntegrationTest, RtdsAfterAsyncPrimaryClusterInitialization) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Enable health checking for the first cluster. + auto* dummy_cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + auto* health_check = dummy_cluster->add_health_checks(); + health_check->mutable_timeout()->set_seconds(30); + health_check->mutable_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_no_traffic_interval()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + health_check->mutable_unhealthy_threshold()->set_value(1); + health_check->mutable_healthy_threshold()->set_value(1); + health_check->mutable_http_health_check()->set_path("/healthcheck"); + health_check->mutable_http_health_check()->set_codec_client_type( + envoy::type::v3::CodecClientType::HTTP2); + }); + + initialize(); + + // Make sure statically provisioned runtime values were loaded. + EXPECT_EQ("whatevs", getRuntimeKey("foo")); + EXPECT_EQ("yar", getRuntimeKey("bar")); + EXPECT_EQ("", getRuntimeKey("baz")); + + // Respond to the initial health check, which should complete initialization of primary clusters. + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + test_server_->waitForGaugeEq("cluster.dummy_cluster.membership_healthy", 1); + + // After this xDS connection should be established. Verify that dynamic runtime values are loaded. + acceptXdsConnection(); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Runtime, "", {"some_rtds_layer"}, + {"some_rtds_layer"}, {}, true)); + auto some_rtds_layer = TestUtility::parseYaml(R"EOF( + name: some_rtds_layer + layer: + foo: bar + baz: meh + )EOF"); + sendDiscoveryResponse( + Config::TypeUrl::get().Runtime, {some_rtds_layer}, {some_rtds_layer}, {}, "1"); + test_server_->waitForCounterGe("runtime.load_success", initial_load_success_ + 1); + + EXPECT_EQ("bar", getRuntimeKey("foo")); + EXPECT_EQ("yar", getRuntimeKey("bar")); + EXPECT_EQ("meh", getRuntimeKey("baz")); + + EXPECT_EQ(0, test_server_->counter("runtime.load_error")->value()); + EXPECT_EQ(initial_load_success_ + 1, test_server_->counter("runtime.load_success")->value()); + EXPECT_EQ(initial_keys_ + 1, test_server_->gauge("runtime.num_keys")->value()); + EXPECT_EQ(3, test_server_->gauge("runtime.num_layers")->value()); + cleanupUpstreamAndDownstream(); +} + } // namespace } // namespace Envoy diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 532c5650e3a1..d73bb3eb5317 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -74,6 +74,7 @@ class MockLoader : public Loader { MOCK_METHOD(const Snapshot&, snapshot, ()); MOCK_METHOD(std::shared_ptr, threadsafeSnapshot, ()); MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); + MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); testing::NiceMock snapshot_; }; diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 9a8ca01b00f8..9ee59ff15d20 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -308,7 +308,10 @@ class MockClusterManager : public ClusterManager { MOCK_METHOD(bool, addOrUpdateCluster, (const envoy::config::cluster::v3::Cluster& cluster, const std::string& version_info)); - MOCK_METHOD(void, setInitializedCb, (std::function)); + MOCK_METHOD(void, setPrimaryClustersInitializedCb, (PrimaryClustersReadyCallback)); + MOCK_METHOD(void, setInitializedCb, (InitializationCompleteCallback)); + MOCK_METHOD(void, initializeSecondaryClusters, + (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); MOCK_METHOD(ClusterInfoMap, clusters, ()); MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 38d753a559da..4a12a62dc3a2 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -729,7 +729,6 @@ TEST_P(ServerInstanceImplTest, BootstrapRuntime) { EXPECT_EQ("bar", server_->runtime().snapshot().get("foo").value().get()); // This should access via the override/some_service overlay. EXPECT_EQ("fozz", server_->runtime().snapshot().get("fizz").value().get()); - EXPECT_EQ("foobar", server_->runtime().snapshot().getLayers()[3]->name()); } // Validate that a runtime absent an admin layer will fail mutating operations @@ -748,6 +747,22 @@ TEST_P(ServerInstanceImplTest, RuntimeNoAdminLayer) { EXPECT_EQ("No admin layer specified", response_body); } +// Verify that bootstrap fails if RTDS is configured through an EDS cluster +TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughEdsFails) { + options_.service_cluster_name_ = "some_service"; + options_.service_node_name_ = "some_node_name"; + EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/runtime_bootstrap_eds.yaml"), + EnvoyException, "must have a statically defined non-EDS cluster"); +} + +// Verify that bootstrap fails if RTDS is configured through an ADS using EDS cluster +TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughAdsViaEdsFails) { + options_.service_cluster_name_ = "some_service"; + options_.service_node_name_ = "some_node_name"; + EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/runtime_bootstrap_ads_eds.yaml"), + EnvoyException, "Unknown gRPC client cluster"); +} + TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(InvalidLegacyBootstrapRuntime)) { EXPECT_THROW_WITH_MESSAGE( initialize("test/server/test_data/server/invalid_runtime_bootstrap.yaml"), EnvoyException, diff --git a/test/server/test_data/server/runtime_bootstrap.yaml b/test/server/test_data/server/runtime_bootstrap.yaml index ab26028ef183..e92c3fd5a903 100644 --- a/test/server/test_data/server/runtime_bootstrap.yaml +++ b/test/server/test_data/server/runtime_bootstrap.yaml @@ -7,12 +7,3 @@ layered_runtime: disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/primary } - name: overlay_disk_layer disk_layer: { symlink_root: {{ test_rundir }}/test/server/test_data/runtime/override, append_service_cluster: true } - - name: foobar - rtds_layer: - name: foobar - rtds_config: - api_config_source: - api_type: GRPC - grpc_services: - envoy_grpc: - cluster_name: xds_cluster diff --git a/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml b/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml new file mode 100644 index 000000000000..9bd1730bf927 --- /dev/null +++ b/test/server/test_data/server/runtime_bootstrap_ads_eds.yaml @@ -0,0 +1,38 @@ +static_resources: + clusters: + - name: dummy_cluster + connect_timeout: 1s + load_assignment: + cluster_name: dummy_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {{ ntop_ip_loopback_address }} + port_value: 0 + - name: ads_cluster + connect_timeout: 1s + type: EDS + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: "dummy_cluster" +dynamic_resources: + ads_config: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: ads_cluster + set_node_on_first_message_only: true +layered_runtime: + layers: + - name: foobar + rtds_layer: + name: foobar + rtds_config: + ads: {} + diff --git a/test/server/test_data/server/runtime_bootstrap_eds.yaml b/test/server/test_data/server/runtime_bootstrap_eds.yaml new file mode 100644 index 000000000000..c74b692288e1 --- /dev/null +++ b/test/server/test_data/server/runtime_bootstrap_eds.yaml @@ -0,0 +1,35 @@ +static_resources: + clusters: + - name: dummy_cluster + connect_timeout: 1s + load_assignment: + cluster_name: dummy_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {{ ntop_ip_loopback_address }} + port_value: 0 + - name: rtds_cluster + connect_timeout: 1s + type: EDS + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: "dummy_cluster" +layered_runtime: + layers: + - name: foobar + rtds_layer: + name: foobar + rtds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: rtds_cluster + diff --git a/test/test_common/test_runtime.h b/test/test_common/test_runtime.h index 93bc51876dde..0532b5529f9f 100644 --- a/test/test_common/test_runtime.h +++ b/test/test_common/test_runtime.h @@ -35,8 +35,8 @@ class TestScopedRuntime { config.add_layers()->mutable_admin_layer(); loader_ = std::make_unique( - std::make_unique(dispatcher_, tls_, config, local_info_, init_manager_, - store_, generator_, validation_visitor_, *api_)); + std::make_unique(dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_)); } private: @@ -46,7 +46,6 @@ class TestScopedRuntime { Runtime::MockRandomGenerator generator_; Api::ApiPtr api_; testing::NiceMock local_info_; - Init::MockManager init_manager_; testing::NiceMock validation_visitor_; std::unique_ptr loader_; }; From 311aee4cbe2b8779070f2f0729dfa6ae4f2dc80e Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 6 May 2020 13:07:59 -0400 Subject: [PATCH 103/909] api: clarify that v2 is removed at EOY 2020 regardless. (#11083) There was somewhat ambiguous wording after recent changes to the major versioning policy. Signed-off-by: Harvey Tuch --- api/API_VERSIONING.md | 8 +++++--- docs/root/faq/api/envoy_v2_support.rst | 6 ++++++ docs/root/faq/overview.rst | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 docs/root/faq/api/envoy_v2_support.rst diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index 52b0ea04c751..25e80aaa8407 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -95,8 +95,9 @@ Envoy will support at most three major versions of any API package at all times: * The previous stable major version, e.g. v2. This is needed to ensure that we provide at least 1 year for a supported major version to sunset. By supporting two stable major versions simultaneously, this makes it easier to coordinate control plane and Envoy - rollouts as well. This previous stable major version will be supported for 1 - year after the introduction of the new current stable major version. + rollouts as well. This previous stable major version will be supported for exactly 1 + year after the introduction of the new current stable major version, after which it will be + removed from the Envoy implementation. * Optionally, the next experimental alpha major version, e.g. v4alpha. This is a release candidate for the next stable major version. This is only generated when the current stable major version requires a breaking change at the next cycle, e.g. a deprecation or field rename. This release @@ -113,7 +114,8 @@ major version and support for `envoy.config.bootstrap.v2` will be dropped from t implementation. Note that some transitively referenced package, e.g. `envoy.config.filter.network.foo.v2` may remain at version 2 during this release, if no changes were made to the referenced package. If no major version is justified at this point, the decision to cut -v4 might occur at some point in 2021 or beyond. +v4 might occur at some point in 2021 or beyond, however v2 support will still be removed at the end +of 2020. The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will retain implementation support for at least 1-2 years. diff --git a/docs/root/faq/api/envoy_v2_support.rst b/docs/root/faq/api/envoy_v2_support.rst new file mode 100644 index 000000000000..f9a2f9778646 --- /dev/null +++ b/docs/root/faq/api/envoy_v2_support.rst @@ -0,0 +1,6 @@ +How long will the v2 APIs be supported? +======================================= + +The v2 xDS APIs are deprecated and will be removed form Envoy at the end of 2020, as per the +:repo:`API versioning policy
`. + diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index b3d9de193235..c3a32d963c52 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -17,6 +17,7 @@ API .. toctree:: :maxdepth: 2 + api/envoy_v2_support api/envoy_v3 api/envoy_upgrade_v3 api/extensions From 612b15f9a2ba286c80ddc51f0392d7ff2cdbdfab Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 6 May 2020 15:07:32 -0400 Subject: [PATCH 104/909] http: moving HTTP/1.1 watermark logic into the codec (#10978) This gets rid of the read enable loop and ensures that entities only undo read disable calls they instantiated. It has the advantage of greatly simplifying envoyproxy/envoy-setec#100 (which we agreed we can fix in the open) Risk Level: quite high (watermark code) Testing: updated unit tests, leveraging existing HTTP/1.1 pipeline tests Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/http/conn_manager_impl.cc | 13 ---------- source/common/http/http1/codec_impl.cc | 29 ++++++++++++---------- source/common/http/http1/codec_impl.h | 27 +++++++++++++++----- test/common/http/conn_manager_impl_test.cc | 8 ------ test/common/http/http1/codec_impl_test.cc | 14 +++++------ test/integration/fake_upstream.cc | 26 ++++++++++++++++++- 6 files changed, 69 insertions(+), 48 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index f36d5976e2f5..863f40f9bac5 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -219,15 +219,6 @@ void ConnectionManagerImpl::doEndStream(ActiveStream& stream) { } checkForDeferredClose(); - - // Reading may have been disabled for the non-multiplexing case, so enable it again. - // Also be sure to unwind any read-disable done by the prior downstream - // connection. - if (drain_state_ != DrainState::Closing && codec_->protocol() < Protocol::Http2) { - while (!read_callbacks_->connection().readEnabled()) { - read_callbacks_->connection().readDisable(false); - } - } } void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { @@ -348,10 +339,6 @@ Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool data.length() > 0 && streams_.empty()) { redispatch = true; } - - if (!streams_.empty() && streams_.front()->state_.remote_complete_) { - read_callbacks_->connection().readDisable(true); - } } } while (redispatch); diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index d4aacd3a8ccc..658596cc6057 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -304,7 +304,17 @@ void StreamEncoderImpl::resetStream(StreamResetReason reason) { connection_.onResetStreamBase(reason); } -void StreamEncoderImpl::readDisable(bool disable) { connection_.readDisable(disable); } +void StreamEncoderImpl::readDisable(bool disable) { + if (disable) { + ++read_disable_calls_; + } else { + ASSERT(read_disable_calls_ != 0); + if (read_disable_calls_ != 0) { + --read_disable_calls_; + } + } + connection_.readDisable(disable); +} uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } @@ -322,7 +332,7 @@ void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool e ASSERT(headers.Status() != nullptr); uint64_t numeric_status = Utility::getResponseStatus(headers); - if (connection_.protocol() == Protocol::Http10 && connection_.supports_http_10()) { + if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); } else { connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); @@ -901,6 +911,10 @@ void ServerConnectionImpl::onMessageComplete() { ASSERT(!handling_upgrade_); if (active_request_.has_value()) { auto& active_request = active_request_.value(); + + if (active_request.request_decoder_) { + active_request.response_encoder_.readDisable(true); + } active_request.remote_complete_ = true; if (deferred_end_stream_headers_) { active_request.request_decoder_->decodeHeaders( @@ -1088,17 +1102,6 @@ void ClientConnectionImpl::onMessageComplete() { // be reset just yet. Preserve the state in pending_response_done_ instead. pending_response_done_ = true; - // Streams are responsible for unwinding any outstanding readDisable(true) - // calls done on the underlying connection as they are destroyed. As this is - // the only place a HTTP/1 stream is destroyed where the Network::Connection is - // reused, unwind any outstanding readDisable() calls here. Do this before we dispatch - // end_stream in case the caller immediately reuses the connection. - if (connection_.state() == Network::Connection::State::Open) { - while (!connection_.readEnabled()) { - connection_.readDisable(false); - } - } - if (deferred_end_stream_headers_) { response.decoder_->decodeHeaders( std::move(absl::get(headers_or_trailers_)), true); diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index d3fa827e029a..8b53d4c374f5 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -53,6 +53,12 @@ class StreamEncoderImpl : public virtual StreamEncoder, public StreamCallbackHelper, public Http1StreamEncoderOptions { public: + ~StreamEncoderImpl() override { + // When the stream goes away, undo any read blocks to resume reading. + while (read_disable_calls_ != 0) { + StreamEncoderImpl::readDisable(false); + } + } // Http::StreamEncoder void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeMetadata(const MetadataMapVector&) override; @@ -77,6 +83,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } void setDetails(absl::string_view details) { details_ = details; } + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + protected: StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); void setIsContentLengthAllowed(bool value) { is_content_length_allowed_ = value; } @@ -87,6 +95,7 @@ class StreamEncoderImpl : public virtual StreamEncoder, static const std::string LAST_CHUNK; ConnectionImpl& connection_; + uint32_t read_disable_calls_{}; bool disable_chunk_encoding_ : 1; bool chunk_encoding_ : 1; bool processing_100_continue_ : 1; @@ -198,9 +207,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& activeRequest() { return active_request_; } + // ConnectionImpl + void onMessageComplete() override; +private: /** * Manipulate the request's first line, parsing the url and converting to a relative path if * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 @@ -445,7 +461,6 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. bool upgradeAllowed() const override { return true; } void onBody(Buffer::Instance& data) override; - void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; void sendProtocolError(absl::string_view details) override; void onAboveHighWatermark() override; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index aab9651f96d6..a537ac211e0d 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -3949,14 +3949,6 @@ TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, true)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); - // When the stream ends, the manager should check to see if the connection is - // read disabled, and keep calling readDisable(false) until readEnabled() - // returns true. - EXPECT_CALL(filter_callbacks_.connection_, readEnabled()) - .Times(2) - .WillOnce(Return(false)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(filter_callbacks_.connection_, readDisable(false)); expectOnDestroy(); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index b07b812df650..ca755e94dc5e 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -1846,6 +1846,11 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { MockResponseDecoder response_decoder; Http::RequestEncoder* request_encoder = &codec_->newStream(response_decoder); + // Manually read disable. + EXPECT_CALL(connection_, readDisable(true)).Times(2); + RequestEncoderImpl* encoder = dynamic_cast(request_encoder); + encoder->readDisable(true); + encoder->readDisable(true); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -1856,13 +1861,8 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); output.clear(); - // Simulate the underlying connection being backed up. Ensure that it is - // read-enabled when the final response completes. - EXPECT_CALL(connection_, readEnabled()) - .Times(2) - .WillOnce(Return(false)) - .WillRepeatedly(Return(true)); - EXPECT_CALL(connection_, readDisable(false)); + // When the response is sent, the read disable should be unwound. + EXPECT_CALL(connection_, readDisable(false)).Times(2); // Response. EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 8562edc01ca3..8b232ed351de 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -221,6 +221,30 @@ void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { Http::TestHeaderMapImpl{{"grpc-status", std::to_string(static_cast(status))}}); } +// The TestHttp1ServerConnectionImpl outlives its underlying Network::Connection +// so must not access the Connection on teardown. To achieve this, clear the +// read disable calls to avoid checking / editing the Connection blocked state. +class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { +public: + using Http::Http1::ServerConnectionImpl::ServerConnectionImpl; + + void onMessageComplete() override { + ServerConnectionImpl::onMessageComplete(); + + if (activeRequest().has_value() && activeRequest().value().request_decoder_) { + // Undo the read disable from the base class - we have many tests which + // waitForDisconnect after a full request has been read which will not + // receive the disconnect if reading is disabled. + activeRequest().value().response_encoder_.readDisable(false); + } + } + ~TestHttp1ServerConnectionImpl() override { + if (activeRequest().has_value()) { + activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); + } + } +}; + FakeHttpConnection::FakeHttpConnection( SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, @@ -232,7 +256,7 @@ FakeHttpConnection::FakeHttpConnection( Http::Http1Settings http1_settings; // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), store, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { From 62735bc93ef5019147472dfb156803e8d1ea5d63 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 6 May 2020 15:52:52 -0400 Subject: [PATCH 105/909] test: coverage improvements (#11019) Additional Description: this includes one minor refactor - we moved error handling to avoid creating multiple filter chains into createFilterChainn and didn't remove the error handling at once call site. Risk Level: low (one no-op refactor) Testing: yep Docs Changes: no Release Notes: no Signed-off-by: Alyssa Wilk --- source/common/http/conn_manager_impl.cc | 8 ++++---- test/common/http/codes_test.cc | 1 + test/common/http/conn_manager_impl_test.cc | 10 +++++++++- test/integration/integration_test.cc | 15 +++++++++++++++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 863f40f9bac5..eed3c641bf82 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1493,10 +1493,10 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); ASSERT(response_headers_ == nullptr); // For early error handling, do a best-effort attempt to create a filter chain - // to ensure access logging. - if (!state_.created_filter_chain_) { - createFilterChain(); - } + // to ensure access logging. If the filter chain already exists this will be + // a no-op. + createFilterChain(); + stream_info_.setResponseCodeDetails(details); Utility::sendLocalReply( is_grpc_request, diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index 136519f60c5a..9a071f8c122f 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -53,6 +53,7 @@ class CodeUtilityTest : public testing::Test { }; TEST_F(CodeUtilityTest, GroupStrings) { + EXPECT_EQ("1xx", CodeUtility::groupStringForResponseCode(Code::SwitchingProtocols)); EXPECT_EQ("2xx", CodeUtility::groupStringForResponseCode(Code::OK)); EXPECT_EQ("3xx", CodeUtility::groupStringForResponseCode(Code::Found)); EXPECT_EQ("4xx", CodeUtility::groupStringForResponseCode(Code::NotFound)); diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a537ac211e0d..89ba46e56df4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -2624,7 +2624,12 @@ TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { {":path", "/"}, {"connection", "Upgrade"}, {"upgrade", "websocket"}}}; - decoder->decodeHeaders(std::move(headers), true); + decoder->decodeHeaders(std::move(headers), false); + // Try sending trailers after the headers which will be rejected, just to + // test the HCM logic that further decoding will not be passed to the + // filters once the early response path is kicked off. + RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"bazzz", "bar"}}}; + decoder->decodeTrailers(std::move(trailers)); data.drain(4); return Http::okStatus(); })); @@ -5057,6 +5062,9 @@ TEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) { ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::NotTraceableRequestId, tracing_stats); EXPECT_EQ(1UL, tracing_stats.not_traceable_.value()); + + ConnectionManagerImpl::chargeTracingStats(Tracing::Reason::Sampling, tracing_stats); + EXPECT_EQ(1UL, tracing_stats.random_sampling_.value()); } TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index f359c82df7b9..ae11e48dafab 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -493,6 +493,21 @@ TEST_P(IntegrationTest, Http09Enabled) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("HTTP/1.0")); } +TEST_P(IntegrationTest, Http09WithKeepalive) { + useAccessLog(); + autonomous_upstream_ = true; + config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost); + initialize(); + reinterpret_cast(fake_upstreams_.front().get()) + ->setResponseHeaders(std::make_unique( + Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "0"}}))); + std::string response; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET /\r\nConnection: keep-alive\r\n\r\n", + &response, true); + EXPECT_THAT(response, HasSubstr("HTTP/1.0 200 OK\r\n")); + EXPECT_THAT(response, HasSubstr("connection: keep-alive\r\n")); +} + // Turn HTTP/1.0 support on and verify the request is proxied and the default host is sent upstream. TEST_P(IntegrationTest, Http10Enabled) { autonomous_upstream_ = true; From 09170bdbbc629df144b2bd4659c65c7899898848 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 6 May 2020 16:34:46 -0400 Subject: [PATCH 106/909] ci: fork repokitteh ownerscheck.star. (#11090) As per @itayd suggestion, forking ownerscheck.star so that we can add some improvements (regex matching, disabling of global approval) to support v2 freeze enforcement. There are no functional changes in this PR, it's just the baseline for later work. Signed-off-by: Harvey Tuch --- ci/repokitteh/modules/ownerscheck.star | 224 +++++++++++++++++++++++++ repokitteh.star | 2 +- 2 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 ci/repokitteh/modules/ownerscheck.star diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star new file mode 100644 index 000000000000..2eb379a9ccfd --- /dev/null +++ b/ci/repokitteh/modules/ownerscheck.star @@ -0,0 +1,224 @@ +# Ownership specified by list of specs, like so: +# +# use( +# "github.com/repokitteh/modules/ownerscheck.star", +# paths=[ +# { +# "owner": "envoyproxy/api-shepherds!", +# "path": "api/", +# "label": "api", +# }, +# ], +# ) +# +# This module will maintain a commit status per specified path (also aka as spec). +# +# Two types of approvals: +# 1. Global approvals, done by approving the PR using Github's review approval feature. +# 2. Partial approval, done by commenting "/lgtm [label]" where label is the label +# associated with the path. This does not affect GitHub's PR approve status, only +# this module's maintained commit status. This approval is automatically revoked +# if any further changes are done to the relevant files in this spec. + +load("github.com/repokitteh/modules/lib/utils.star", "react") + +def _store_partial_approval(who, files): + for f in files: + store_put('ownerscheck/partial/%s:%s' % (who, f['filename']), f['sha']) + + +def _is_partially_approved(who, files): + for f in files: + sha = store_get('ownerscheck/partial/%s:%s' % (who, f['filename'])) + if sha != f['sha']: + return False + + return True + + +def _get_relevant_specs(specs, changed_files): + if not specs: + print("no specs") + return [] + + relevant = [] + + for spec in specs: + prefix = spec["path"] + + files = [f for f in changed_files if f['filename'].startswith(prefix)] + if files: + relevant.append(struct(files=files, prefix=prefix, **spec)) + + print("specs: %s" % relevant) + + return relevant + + +def _get_global_approvers(): # -> List[str] (owners) + reviews = [{'login': r['user']['login'], 'state': r['state']} for r in github.pr_list_reviews()] + + print("reviews=%s" % reviews) + + return [r['login'] for r in reviews if r['state'] == 'APPROVED'] + + +def _is_approved(spec, approvers): + owner = spec.owner + + if owner[-1] == '!': + owner = owner[:-1] + + required = [owner] + + if '/' in owner: + team_name = owner.split('/')[1] + + # this is a team, parse it. + team_id = github.team_get_by_name(team_name)['id'] + required = [m['login'] for m in github.team_list_members(team_id)] + + print("team %s(%d) = %s" % (team_name, team_id, required)) + + for r in required: + if any([a for a in approvers if a == r]): + print("global approver: %s" % r) + return True + + if _is_partially_approved(r, spec.files): + print("partial approval: %s" % r) + return True + + return False + + +def _update_status(owner, prefix, approved): + github.create_status( + state=approved and 'success' or 'pending', + context='%s must approve' % owner, + description='changes to %s' % (prefix or '/'), + ) + +def _get_specs(config): + return _get_relevant_specs(config.get('paths', []), github.pr_list_files()) + +def _reconcile(config, specs=None): + specs = specs or _get_specs(config) + + if not specs: + return [] + + approvers = _get_global_approvers() + + print("approvers: %s" % approvers) + + results = [] + + for spec in specs: + approved = _is_approved(spec, approvers) + + print("%s -> %s" % (spec, approved)) + + results.append((spec, approved)) + + if spec.owner[-1] == '!': + _update_status(spec.owner[:-1], spec.prefix, approved) + + if hasattr(spec, 'label'): + if approved: + github.issue_unlabel(spec.label) + else: + github.issue_label(spec.label) + elif hasattr(spec, 'label'): # fyis + github.issue_label(spec.label) + + return results + + +def _comment(config, results, force=False): + lines = [] + + for spec, approved in results: + if approved: + continue + + mention = spec.owner + + if mention[0] != '@': + mention = '@' + mention + + if mention[-1] == '!': + mention = mention[:-1] + + prefix = spec.prefix + if prefix: + prefix = ' for changes made to `' + prefix + '`' + + mode = spec.owner[-1] == '!' and 'approval' or 'fyi' + + key = "ownerscheck/%s/%s" % (spec.owner, spec.prefix) + + if (not force) and (store_get(key) == mode): + mode = 'skip' + else: + store_put(key, mode) + + if mode == 'approval': + lines.append('CC %s: Your approval is needed%s.' % (mention, prefix)) + elif mode == 'fyi': + lines.append('CC %s: FYI only%s.' % (mention, prefix)) + + if lines: + github.issue_create_comment('\n'.join(lines)) + + +def _reconcile_and_comment(config): + _comment(config, _reconcile(config)) + + +def _force_reconcile_and_comment(config): + _comment(config, _reconcile(config), force=True) + + +def _pr(action, config): + if action in ['synchronize', 'opened']: + _reconcile_and_comment(config) + + +def _pr_review(action, review_state, config): + if action != 'submitted' or not review_state: + return + + _reconcile(config) + + +# Partial approvals are done by commenting "/lgtm [label]". +def _lgtm_by_comment(config, comment_id, command, sender, sha): + labels = command.args + + if len(labels) != 1: + react(comment_id, 'please specify a single label can be specified') + return + + label = labels[0] + + specs = [s for s in _get_specs(config) if hasattr(s, 'label') and s.label == label] + + if len(specs) == 0: + react(comment_id, 'no relevant owners for "%s"' % label) + return + + for spec in specs: + _store_partial_approval(sender, spec.files) + + react(comment_id, None) + + _reconcile(config, specs) + + +handlers.pull_request(func=_pr) +handlers.pull_request_review(func=_pr_review) + +handlers.command(name='checkowners', func=_reconcile) +handlers.command(name='checkowners!', func=_force_reconcile_and_comment) +handlers.command(name='lgtm', func=_lgtm_by_comment) diff --git a/repokitteh.star b/repokitteh.star index e115b2eae20b..a6c42ef909ec 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -5,7 +5,7 @@ use("github.com/repokitteh/modules/review.star") use("github.com/repokitteh/modules/wait.star") use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token')) use( - "github.com/repokitteh/modules/ownerscheck.star", + "github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star", paths=[ { "owner": "envoyproxy/api-shepherds!", From c89996bcbfecbeda49c587966b019c3296637837 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Wed, 6 May 2020 18:00:27 -0400 Subject: [PATCH 107/909] Windows build: Run CI via docker (#11070) Run Windows CI build in envoy-build-windows2019 docker container Additional Description: This will enable us to control dependencies more closely and enable us to provision CI workers outside of AZP hosted workers more easily Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- .azure-pipelines/pipelines.yml | 17 +-------------- ci/run_envoy_docker_windows.sh | 28 ++++++++++++++++++++++++ ci/windows_ci_setup.ps1 | 40 ---------------------------------- 3 files changed, 29 insertions(+), 56 deletions(-) create mode 100644 ci/run_envoy_docker_windows.sh delete mode 100644 ci/windows_ci_setup.ps1 diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 3395229a721c..9f58e32fac0c 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -130,24 +130,9 @@ jobs: pool: vmImage: "windows-latest" steps: - - powershell: | - .\ci\windows_ci_setup.ps1 - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR\usr\bin" - Write-Host "##vso[task.prependpath]$env:VC_TOOLS_BIN_X64;$env:VC_CMAKE_PATH\CMake\bin;$env:VC_CMAKE_PATH\Ninja" - Write-Host "##vso[task.prependpath]$env:TOOLS_BIN_DIR" - displayName: "Install dependencies" - env: - TOOLS_BIN_DIR: $(Pipeline.Workspace)\bin - VC_CMAKE_PATH: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\Common7\\IDE\\CommonExtensions\\Microsoft\\CMake" - VC_TOOLS_BIN_X64: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC\\Tools\\MSVC\\14.25.28610\\bin\\HostX64\\x64" - - - bash: ci/windows_ci_steps.sh + - bash: ci/run_envoy_docker_windows.sh ci/windows_ci_steps.sh displayName: "Run Windows CI" env: - BAZEL_VC: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Enterprise\\VC" - BAZEL_SH: $(Pipeline.Workspace)/bin/usr/bin/bash.exe BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - MSYS2_ARG_CONV_EXCL: "*" - TMPDIR: $(Agent.TempDirectory) diff --git a/ci/run_envoy_docker_windows.sh b/ci/run_envoy_docker_windows.sh new file mode 100644 index 000000000000..023234e21b71 --- /dev/null +++ b/ci/run_envoy_docker_windows.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -e + +# TODO(sunjayBhatia, wrowe): update this note once we have an RBE toolchain generated for Windows +# NOTE: Update this from the latest pushed image here: https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/tags +ENVOY_BUILD_SHA="3cbc11e373dc4e3a523b9273ed010c5e0f197874" + +[[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" +# The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker +# images'). +[[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" + +ENVOY_SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") + +[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" + +[[ -t 1 ]] && DOCKER_TTY_OPTION=-it + +export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" + +# Since we specify an explicit hash, docker-run will pull from the remote repo if missing. +docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ + ${GIT_VOLUME_OPTION} -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE \ + -e ENVOY_STDLIB -e BUILD_REASON -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE \ + -e ENVOY_BUILD_IMAGE -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -v ${ENVOY_SOURCE_DIR}:C:/source \ + "${ENVOY_BUILD_IMAGE}" \ + bash -c "cd source && $*" diff --git a/ci/windows_ci_setup.ps1 b/ci/windows_ci_setup.ps1 deleted file mode 100644 index c39f2c083e22..000000000000 --- a/ci/windows_ci_setup.ps1 +++ /dev/null @@ -1,40 +0,0 @@ -# This file only installs dependencies needed in additio to Azure pipelines hosted image. -# The list of installed software can be found at: -# https://github.com/actions/virtual-environments/blob/master/images/win/Windows2019-Readme.md - -function Checksum -{ - param([string]$filepath, [string]$expected, [string]$algorithm) - - $actual = Get-FileHash -Path $filePath -Algorithm $algorithm; - if ($actual.Hash -eq $expected) { - Write-Host "$filepath is valid"; - } else { - Write-Host "$filepath is invalid, expected: $expected, but got: $actual"; - exit 1 - } -} - -mkdir "$env:TOOLS_BIN_DIR" -$wc = New-Object System.Net.WebClient -$wc.DownloadFile("https://github.com/bazelbuild/bazelisk/releases/download/v1.0/bazelisk-windows-amd64.exe", "$env:TOOLS_BIN_DIR\bazel.exe") -# See https://sourceforge.net/projects/msys2/files/Base/x86_64/ for msys2 download source -$wc.DownloadFile("http://repo.msys2.org/distrib/x86_64/msys2-base-x86_64-20190524.tar.xz", "$env:TEMP\msys2.tar.xz") - -# Check the SHA256 file hash of each downloaded file. -Checksum $env:TOOLS_BIN_DIR\bazel.exe 96395ee9e3fb9f4499fcaffa8a94dd72b0748f495f366bc4be44dbf09d6827fc SHA256 -Checksum $env:TEMP\msys2.tar.xz 168e156fa9f00d90a8445676c023c63be6e82f71487f4e2688ab5cb13b345383 SHA256 - -# Unpack and install msys2 and required packages -$tarpath="$env:ProgramFiles\Git\usr\bin\tar.exe" -$msys2TarPathClean = "/$env:TEMP/msys2.tar.xz".replace(':', '').replace('\', '/') -$outDirClean = "/$env:TOOLS_BIN_DIR".replace(':', '').replace('\', '/') -&"$tarpath" -Jxf $msys2TarPathClean -C $outDirClean --strip-components=1 -# Add utils to the path for msys2 setup -$env:PATH = "$env:TOOLS_BIN_DIR\usr\bin;$env:TOOLS_BIN_DIR\mingw64\bin;$env:PATH" -bash.exe -c "pacman-key --init 2>&1" -bash.exe -c "pacman-key --populate msys2 2>&1" -bash.exe -c "pacman.exe -Syyuu --noconfirm 2>&1" -bash.exe -c "pacman.exe -Syuu --noconfirm 2>&1" -bash.exe -c "pacman.exe -S --noconfirm --needed compression diffutils patch 2>&1" -bash.exe -c "pacman.exe -Scc --noconfirm 2>&1" From 02e8a26193a79d7c023648902ebd9eb00475a374 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 7 May 2020 08:48:49 -0400 Subject: [PATCH 108/909] tools: fix comment reordering in merge_active_shadow.py. (#11075) Previously, we were resorting oneof fields (even when no deprecatinos occurred), causing comments to become out of whack. This is fine when consuming inside Envoy, but we end up using the shadows during Google import, so it's a real problem. This PR adds fixup to the FileDescriptorProto source code info, where the comments exist. Any time we need to reorder a shadowed oneof field, the corresponding path inside the source code info is fixed up. Risk level: Low Testing: Unit test added, manual inspection of route_components.proto. Signed-off-by: Harvey Tuch --- .../envoy/config/cluster/v3/cluster.proto | 184 +++--- .../envoy/config/core/v3/address.proto | 32 +- .../envoy/config/core/v3/base.proto | 30 +- .../envoy/config/core/v3/config_source.proto | 66 +-- .../envoy/config/core/v3/grpc_service.proto | 30 +- .../envoy/config/core/v3/health_check.proto | 116 ++-- .../envoy/config/core/v3/http_uri.proto | 30 +- .../envoy/config/core/v3/socket_option.proto | 12 +- .../endpoint/v3/endpoint_components.proto | 48 +- .../listener/v3/listener_components.proto | 10 +- .../envoy/config/metrics/v3/stats.proto | 78 +-- .../config/route/v3/route_components.proto | 558 +++++++++--------- .../data/core/v3/health_check_event.proto | 16 +- .../envoy/data/tap/v3/common.proto | 22 +- .../filters/common/fault/v3/fault.proto | 36 +- .../v3/adaptive_concurrency.proto | 10 +- .../filters/http/ext_authz/v3/ext_authz.proto | 40 +- .../filters/http/fault/v3/fault.proto | 14 +- .../grpc_json_transcoder/v3/transcoder.proto | 102 ++-- .../filters/http/grpc_stats/v3/config.proto | 26 +- .../filters/http/jwt_authn/v3/config.proto | 134 ++--- .../v3/http_connection_manager.proto | 124 ++-- .../network/tcp_proxy/v3/tcp_proxy.proto | 46 +- .../network/thrift_proxy/v3/route.proto | 106 ++-- .../transport_sockets/tls/v3/tls.proto | 58 +- .../envoy/type/matcher/v3/regex.proto | 8 +- .../envoy/type/matcher/v3/string.proto | 28 +- tools/protoxform/BUILD | 2 + tools/protoxform/merge_active_shadow.py | 109 +++- tools/protoxform/merge_active_shadow_test.py | 300 +++++++++- 30 files changed, 1362 insertions(+), 1013 deletions(-) diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index eaa40527c2c3..f512cbcc9d22 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -430,9 +430,11 @@ message Cluster { // The specified percent will be truncated to the nearest 1%. type.v3.Percent healthy_panic_threshold = 1; - google.protobuf.Duration update_merge_window = 4; + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; - bool ignore_new_hosts_until_first_hc = 5; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when @@ -448,7 +450,7 @@ message Cluster { // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. - bool close_connections_on_host_set_change = 6; + google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless @@ -469,16 +471,14 @@ message Cluster { // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; + bool ignore_new_hosts_until_first_hc = 5; - oneof locality_config_specifier { - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - ZoneAwareLbConfig zone_aware_lb_config = 2; + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; - //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } + //Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { @@ -565,26 +565,28 @@ message Cluster { // `. string alt_stat_name = 28; - // The :ref:`service discovery type ` - // to use for resolving the cluster. - EdsClusterConfig eds_cluster_config = 3; + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - // The custom cluster type. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + // The custom cluster type. + CustomClusterType cluster_type = 38; + } // Configuration to use for EDS updates for the Cluster. - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - endpoint.v3.ClusterLoadAssignment load_assignment = 33; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; // The :ref:`load balancer type ` to use // when picking a host in the cluster. - repeated core.v3.HealthCheck health_checks = 8; + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -597,33 +599,33 @@ message Cluster { // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // - google.protobuf.UInt32Value max_requests_per_connection = 9; + endpoint.v3.ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. - CircuitBreakers circuit_breakers = 10; + repeated core.v3.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. - core.v3.HttpProtocolOptions common_http_protocol_options = 29; + CircuitBreakers circuit_breakers = 10; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. - core.v3.Http1ProtocolOptions http_protocol_options = 13; + core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; + core.v3.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. - map typed_extension_protocol_options = 36; + core.v3.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when @@ -631,14 +633,13 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; + core.v3.Http2ProtocolOptions http2_protocol_options = 14; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. - RefreshRate dns_failure_refresh_rate = 44; + map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -649,7 +650,8 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - bool respect_dns_ttl = 39; + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -659,17 +661,17 @@ message Cluster { // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. - repeated core.v3.Address dns_resolvers = 18; + bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. - bool use_tcp_for_dns_lookups = 45; + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -681,16 +683,16 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - OutlierDetection outlier_detection = 19; + repeated core.v3.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. - core.v3.BindConfig upstream_bind_config = 21; + OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. @@ -705,47 +707,56 @@ message Cluster { // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. - LbSubsetConfig lb_subset_config = 22; + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. - CommonLbConfig common_lb_config = 27; + core.v3.BindConfig upstream_bind_config = 21; // Configuration for load balancing subsetting. - core.v3.TransportSocket transport_socket = 24; + LbSubsetConfig lb_subset_config = 22; - // Optional configuration for the Ring Hash load balancing policy. - core.v3.Metadata metadata = 25; + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; - // Optional configuration for the Original Destination load balancing policy. - ClusterProtocolSelection protocol_selection = 26; + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; - // Optional configuration for the LeastRequest load balancing policy. - UpstreamConnectionOptions upstream_connection_options = 30; + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } // Common configuration for all load balancer implementations. - bool close_connections_on_host_health_failure = 31; + CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. - bool ignore_health_on_host_removal = 32; + core.v3.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - repeated Filter filters = 40; + core.v3.Metadata metadata = 25; // Determines how Envoy selects the protocol used to speak to upstream hosts. - LoadBalancingPolicy load_balancing_policy = 41; + ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. - core.v3.ConfigSource lrs_server = 42; + UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. @@ -760,64 +771,53 @@ message Cluster { // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. - bool track_timeout_budgets = 47; + bool close_connections_on_host_health_failure = 31; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. - repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; + bool ignore_health_on_host_removal = 32; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. - envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = - 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + repeated Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - map hidden_envoy_deprecated_extension_protocol_options = 35 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + LoadBalancingPolicy load_balancing_policy = 41; - oneof cluster_discovery_type { - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.v3.ConfigSource lrs_server = 42; - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - CustomClusterType cluster_type = 38; - } + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool track_timeout_budgets = 47; - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - RingHashLbConfig ring_hash_lb_config = 23; + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; - OriginalDstLbConfig original_dst_lb_config = 34; + envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = + 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - LeastRequestLbConfig least_request_lb_config = 37; - } + map hidden_envoy_deprecated_extension_protocol_options = 35 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto index a9dc3c6e1e30..5102c2d57591 100644 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ b/generated_api_shadow/envoy/config/core/v3/address.proto @@ -54,29 +54,29 @@ message SocketAddress { // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_bytes: 1}]; - string resolver_name = 5; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - bool ipv4_compat = 6; - oneof port_specifier { option (validate.required) = true; - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. string named_port = 4; } + + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; } message TcpKeepalive { diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index f9d7759cc7fa..6f8c1129ac0f 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -168,32 +168,32 @@ message Node { // E.g. "envoy" or "grpc" string user_agent_name = 6; - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - repeated Extension extensions = 9; + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; - // Structured version of the entity requesting config. - repeated string client_features = 10; + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } // List of extensions and their versions supported by the node. - repeated Address listening_addresses = 11; + repeated Extension extensions = 9; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. - string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; + repeated string client_features = 10; - oneof user_agent_version_type { - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - string user_agent_version = 7; + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + repeated Address listening_addresses = 11; - BuildVersion user_agent_build_version = 8; - } + string hidden_envoy_deprecated_build_version = 5 [deprecated = true]; } // Metadata provides additional inputs to filters based on matched listeners, diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index ce896e070ac7..363f4ef91f90 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -136,30 +136,30 @@ message RateLimitSettings { message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API configuration source. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; - oneof config_source_specifier { option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + + // API configuration source. + ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. - string path = 1; + AggregatedConfigSource ads = 3; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the @@ -172,20 +172,20 @@ message ConfigSource { // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] - ApiConfigSource api_config_source = 2; - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - AggregatedConfigSource ads = 3; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. SelfConfigSource self = 5; } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 654d3ed81b56..5cd8562f5783 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -234,26 +234,26 @@ message GrpcService { reserved 4; - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - google.protobuf.Duration timeout = 3; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - repeated HeaderValue initial_metadata = 5; - oneof target_specifier { option (validate.required) = true; - // The timeout for the gRPC request. This is the timeout for a specific - // request. + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. EnvoyGrpc envoy_grpc = 1; - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. GoogleGrpc google_grpc = 2; } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + repeated HeaderValue initial_metadata = 5; } diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto index 2bc8d1488172..05af0a8cef06 100644 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ b/generated_api_shadow/envoy/config/core/v3/health_check.proto @@ -258,17 +258,21 @@ message HealthCheck { // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; - // HTTP health check. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + oneof health_checker { + option (validate.required) = true; - // TCP health check. - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + // HTTP health check. + HttpHealthCheck http_health_check = 8; - // gRPC health check. - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + // TCP health check. + TcpHealthCheck tcp_health_check = 9; - // Custom health check. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to @@ -278,14 +282,14 @@ message HealthCheck { // any other. // // The default value for "no traffic interval" is 60 seconds. - string event_log_path = 17; + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". - EventServiceConfig event_service = 22; + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks @@ -293,65 +297,61 @@ message HealthCheck { // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - bool always_log_health_check_failures = 19; + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. - TlsOptions tls_options = 21; + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. - google.protobuf.Struct transport_socket_match_criteria = 23; - - oneof health_checker { - option (validate.required) = true; + string event_log_path = 17; - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - HttpHealthCheck http_health_check = 8; + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - TcpHealthCheck tcp_health_check = 9; + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; - // This allows overriding the cluster TLS settings, just for health check connections. - GrpcHealthCheck grpc_health_check = 11; + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; - // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's - // :ref:`tranport socket matches `. - // For example, the following match criteria - // - // .. code-block:: yaml - // - // transport_socket_match_criteria: - // useMTLS: true - // - // Will match the following :ref:`cluster socket match ` - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "useMTLS" - // match: - // useMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata `. - // This allows using different transport socket capabilities for health checking versus proxying to the - // endpoint. - // - // If the key/values pairs specified do not match any - // :ref:`transport socket matches `, - // the cluster's :ref:`transport socket ` - // will be used for health check socket configuration. - CustomHealthCheck custom_health_check = 13; - } + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; } diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto index 6cc4d36d3944..42bcd4f61572 100644 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ b/generated_api_shadow/envoy/config/core/v3/http_uri.proto @@ -29,20 +29,6 @@ message HttpUri { // string uri = 1 [(validate.rules).string = {min_bytes: 1}]; - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; - // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue @@ -50,7 +36,21 @@ message HttpUri { oneof http_upstream_type { option (validate.required) = true; - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + gte {} + }]; } diff --git a/generated_api_shadow/envoy/config/core/v3/socket_option.proto b/generated_api_shadow/envoy/config/core/v3/socket_option.proto index 836b8f553813..b22169b86aeb 100644 --- a/generated_api_shadow/envoy/config/core/v3/socket_option.proto +++ b/generated_api_shadow/envoy/config/core/v3/socket_option.proto @@ -40,17 +40,17 @@ message SocketOption { // The numeric name as passed to setsockopt int64 name = 3; - // Because many sockopts take an int value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; - oneof value { option (validate.required) = true; - // Otherwise it's a byte buffer. + // Because many sockopts take an int value. int64 int_value = 4; - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. + // Otherwise it's a byte buffer. bytes buf_value = 5; } + + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto index 8e800745df3f..b880a38d1a3e 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto @@ -76,36 +76,36 @@ message Endpoint { message LbEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; - core.v3.HealthStatus health_status = 2; - - // [#not-implemented-hide:] - core.v3.Metadata metadata = 3; - - // Optional health status when known and supplied by EDS server. - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; - // Upstream host identifier or a named reference. oneof host_identifier { - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. Endpoint endpoint = 1; - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). + // [#not-implemented-hide:] string endpoint_name = 5; } + + // Optional health status when known and supplied by EDS server. + core.v3.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.v3.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. The sum of the weights of all endpoints in the + // endpoint's locality must not exceed uint32_t maximal value (4294967295). + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } // A group of endpoints belonging to a Locality. diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index b42f11cd6f5e..138b168ce5d4 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -280,16 +280,16 @@ message ListenerFilter { // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; - ListenerFilterChainMatchPredicate filter_disabled = 4; - // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. google.protobuf.Any typed_config = 3; google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; } diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index bd5e0e8c4973..c6295b8326ac 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -263,47 +263,47 @@ message TagSpecifier { message StatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - string prefix = 3; - oneof statsd_specifier { option (validate.required) = true; + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + core.v3.Address address = 1; + // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. - core.v3.Address address = 1; - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms string tcp_cluster_name = 2; } + + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. @@ -317,17 +317,17 @@ message DogStatsdSink { reserved 2; - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - string prefix = 3; - oneof dog_statsd_specifier { option (validate.required) = true; - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. core.v3.Address address = 1; } + + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index b5da703c2936..00d4f5e628a7 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -208,40 +208,40 @@ message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; - // Route request to some upstream cluster. - core.v3.Metadata metadata = 4; + oneof action { + option (validate.required) = true; - // Return a redirect. - Decorator decorator = 5; + // Route request to some upstream cluster. + RouteAction route = 2; - // Return an arbitrary HTTP response directly, without proxying. - map typed_per_filter_config = 13; + // Return a redirect. + RedirectAction redirect = 3; - // [#not-implemented-hide:] - // If true, a filter will define the action (e.g., it could dynamically generate the - // RouteAction). - repeated core.v3.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + FilterAction filter_action = 17; + } // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; + core.v3.Metadata metadata = 4; // Decorator for the matched route. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; + Decorator decorator = 5; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. - repeated string response_headers_to_remove = 11; + map typed_per_filter_config = 13; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the @@ -249,11 +249,14 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. - Tracing tracing = 15; + repeated core.v3.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; + repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before @@ -261,27 +264,24 @@ message Route { // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true]; - - oneof action { - option (validate.required) = true; + repeated core.v3.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - RouteAction route = 2; + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11; - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - RedirectAction redirect = 3; + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - DirectResponseAction direct_response = 7; + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; - FilterAction filter_action = 17; - } + map hidden_envoy_deprecated_per_filter_config = 8 + [deprecated = true]; } // Compared to the :ref:`cluster ` field that specifies a @@ -403,41 +403,51 @@ message RouteMatch { reserved 5; - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - google.protobuf.BoolValue case_sensitive = 4; + oneof path_specifier { + option (validate.required) = true; - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - core.v3.RuntimeFractionalPercent runtime_fraction = 9; + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - repeated HeaderMatcher headers = 6; + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; - // [#not-implemented-hide:] - // If this is used as the matcher, the matcher will only match CONNECT requests. - // Note that this will not match HTTP/2 upgrade-style CONNECT requests - // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style - // upgrades. - // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where CONNECT requests may have a path, the path matchers will work if - // there is a path present. - repeated QueryParameterMatcher query_parameters = 7; + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] + // If this is used as the matcher, the matcher will only match CONNECT requests. + // Note that this will not match HTTP/2 upgrade-style CONNECT requests + // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + // upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + // where CONNECT requests may have a path, the path matchers will work if + // there is a path present. + ConnectMatcher connect_matcher = 12; + + string hidden_envoy_deprecated_regex = 3 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + } // Indicates that prefix/path matching should be case insensitive. The default // is true. - GrpcRouteMatchOptions grpc = 8; + google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by @@ -455,42 +465,32 @@ message RouteMatch { // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - TlsContextMatchOptions tls_context = 11; - - oneof path_specifier { - option (validate.required) = true; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - string prefix = 1; + core.v3.RuntimeFractionalPercent runtime_fraction = 9; - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - string path = 2; + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - ConnectMatcher connect_matcher = 12; + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; - string hidden_envoy_deprecated_regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - } + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; } // [#next-free-field: 12] @@ -516,14 +516,19 @@ message CorsPolicy { // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v3.RuntimeFractionalPercent shadow_enabled = 10; + oneof enabled_specifier { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v3.RuntimeFractionalPercent filter_enabled = 9; + + google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + } // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. @@ -534,18 +539,13 @@ message CorsPolicy { // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + core.v3.RuntimeFractionalPercent shadow_enabled = 10; + repeated string hidden_envoy_deprecated_allow_origin = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; repeated string hidden_envoy_deprecated_allow_origin_regex = 8 [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; - - oneof enabled_specifier { - core.v3.RuntimeFractionalPercent filter_enabled = 9; - - google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - } } // [#next-free-field: 34] @@ -681,45 +681,45 @@ message RouteAction { string key = 1 [(validate.rules).string = {min_bytes: 1}]; } - // Header hash policy. - bool terminal = 4; - oneof policy_specifier { option (validate.required) = true; - // Cookie hash policy. + // Header hash policy. Header header = 1; - // Connection properties hash policy. + // Cookie hash policy. Cookie cookie = 2; - // Query parameter hash policy. + // Connection properties hash policy. ConnectionProperties connection_properties = 3; - // Filter state hash policy. + // Query parameter hash policy. QueryParameter query_parameter = 5; - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. + // Filter state hash policy. FilterState filter_state = 6; } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. @@ -757,40 +757,44 @@ message RouteAction { reserved 12, 18, 19, 16, 22, 21; - // Indicates the upstream cluster to which the request should be routed - // to. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; + oneof cluster_specifier { + option (validate.required) = true; - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - core.v3.Metadata metadata_match = 4; + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. - google.protobuf.Duration timeout = 8; + core.v3.Metadata metadata_match = 4; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted @@ -823,7 +827,8 @@ message RouteAction { // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. - google.protobuf.Duration idle_timeout = 24; + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture @@ -853,28 +858,32 @@ message RouteAction { // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. - RetryPolicy retry_policy = 9; + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; - // Indicates that during forwarding, the host header will be swapped with - // this value. - google.protobuf.Any retry_policy_typed_config = 33; + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - repeated RequestMirrorPolicy request_mirror_policies = 30; + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been @@ -887,7 +896,7 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - repeated RateLimit rate_limits = 13; + google.protobuf.Duration timeout = 8; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout @@ -907,35 +916,35 @@ message RouteAction { // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. - google.protobuf.BoolValue include_vh_rate_limits = 14; + google.protobuf.Duration idle_timeout = 24; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). - repeated HashPolicy hash_policy = 15; + RetryPolicy retry_policy = 9; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. - CorsPolicy cors = 17; + google.protobuf.Any retry_policy_typed_config = 33; // Indicates that the route has request mirroring policies. - google.protobuf.Duration max_grpc_timeout = 23; + repeated RequestMirrorPolicy request_mirror_policies = 30; // Optionally specifies the :ref:`routing priority `. - google.protobuf.Duration grpc_timeout_offset = 28; + core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies a set of rate limit configurations that could be applied to the // route. - repeated UpgradeConfig upgrade_configs = 25; + repeated RateLimit rate_limits = 13; // Specifies if the rate limit filter should include the virtual host rate // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. - InternalRedirectAction internal_redirect_action = 26; + google.protobuf.BoolValue include_vh_rate_limits = 14; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to @@ -949,10 +958,10 @@ message RouteAction { // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. - google.protobuf.UInt32Value max_internal_redirects = 31; + repeated HashPolicy hash_policy = 15; // Indicates that the route has a CORS policy. - HedgePolicy hedge_policy = 27; + CorsPolicy cors = 17; // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, @@ -973,52 +982,43 @@ message RouteAction { // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. - RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; - - oneof cluster_specifier { - option (validate.required) = true; + google.protobuf.Duration max_grpc_timeout = 23; - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28; - string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + repeated UpgradeConfig upgrade_configs = 25; - WeightedCluster weighted_clusters = 3; - } + InternalRedirectAction internal_redirect_action = 26; - oneof host_rewrite_specifier { - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31; - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - google.protobuf.BoolValue auto_host_rewrite = 7; + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - } + RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 [deprecated = true]; } // HTTP retry :ref:`architecture overview `. @@ -1178,28 +1178,31 @@ message RedirectAction { PERMANENT_REDIRECT = 4; } - // The scheme portion of the URL will be swapped with "https". - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The scheme portion of the URL will be swapped with this value. - uint32 port_redirect = 8; - - // The host portion of the URL will be swapped with this value. - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // The port value of the URL will be swapped with this value. - bool strip_query = 6; - // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection // 2. If the source URI scheme is `https` and the port is explicitly // set to `:443`, the port will be removed after the redirection oneof scheme_rewrite_specifier { - // The path portion of the URL will be swapped with this value. + // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created // based on the request. @@ -1208,20 +1211,17 @@ message RedirectAction { // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. - string scheme_redirect = 7; - } - - oneof path_rewrite_specifier { - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; } message DirectResponseAction { @@ -1527,15 +1527,15 @@ message HeaderMatcher { string name = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - // If specified, header match will be performed based on the value of the header. - bool invert_match = 8; - // Specifies how the header match will be performed to route the request. oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. - string exact_match = 4; + type.matcher.v3.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. @@ -1548,11 +1548,11 @@ message HeaderMatcher { // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" - type.matcher.v3.RegexMatcher safe_regex_match = 11; + type.v3.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. - type.v3.Int64Range range_match = 6; + bool present_match = 7; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. @@ -1560,7 +1560,7 @@ message HeaderMatcher { // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - bool present_match = 7; + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. @@ -1568,14 +1568,6 @@ message HeaderMatcher { // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; string hidden_envoy_deprecated_regex_match = 5 [ @@ -1584,6 +1576,14 @@ message HeaderMatcher { (envoy.annotations.disallowed_by_default) = true ]; } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; } // Query parameter matching treats the query string of a request's :path header @@ -1597,17 +1597,17 @@ message QueryParameterMatcher { // *path*'s query string. string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - // Specifies whether a query parameter value should match against a string. - string hidden_envoy_deprecated_value = 3 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies whether a query parameter should be present. - google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + // Specifies whether a query parameter should be present. bool present_match = 6; } + + string hidden_envoy_deprecated_value = 3 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto index cff0e381bd19..88b195b92b3d 100644 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto @@ -42,27 +42,27 @@ message HealthCheckEvent { string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; - // Host ejection. - google.protobuf.Timestamp timestamp = 6; - oneof event { option (validate.required) = true; - // Host addition. + // Host ejection. HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - // Host failure. + // Host addition. HealthCheckAddHealthy add_healthy_event = 5; - // Healthy host became degraded. + // Host failure. HealthCheckFailure health_check_failure_event = 7; - // A degraded host returned to being healthy. + // Healthy host became degraded. DegradedHealthyHost degraded_healthy_host = 8; - // Timestamp for event. + // A degraded host returned to being healthy. NoLongerDegradedHost no_longer_degraded_host = 9; } + + // Timestamp for event. + google.protobuf.Timestamp timestamp = 6; } message HealthCheckEjectUnhealthy { diff --git a/generated_api_shadow/envoy/data/tap/v3/common.proto b/generated_api_shadow/envoy/data/tap/v3/common.proto index c954b1b6747d..861da12e20c1 100644 --- a/generated_api_shadow/envoy/data/tap/v3/common.proto +++ b/generated_api_shadow/envoy/data/tap/v3/common.proto @@ -17,21 +17,21 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message Body { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bool truncated = 3; - oneof body_type { + // Body data as bytes. By default, tap body data will be present in this field, as the proto + // `bytes` type can contain any valid byte. + bytes as_bytes = 1; + // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING // ` sink // format type is selected. See the documentation for that option for why this is useful. - bytes as_bytes = 1; - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. string as_string = 2; } + + // Specifies whether body data has been truncated to fit within the specified + // :ref:`max_buffered_rx_bytes + // ` and + // :ref:`max_buffered_tx_bytes + // ` settings. + bool truncated = 3; } diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto index a5a688468fb4..f8df4c3d16e6 100644 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto @@ -40,26 +40,26 @@ message FaultDelay { reserved 2; - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. - type.v3.FractionalPercent percentage = 4; - - // Fault delays are controlled via an HTTP header (if applicable). - FaultDelayType hidden_envoy_deprecated_type = 1 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - oneof fault_delay_secifier { option (validate.required) = true; - // The percentage of operations/connections/requests on which the delay will be injected. + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified + // delay will be injected before a new request/operation. For TCP + // connections, the proxying of the connection upstream will be delayed + // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; + // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; } + + // The percentage of operations/connections/requests on which the delay will be injected. + type.v3.FractionalPercent percentage = 4; + + FaultDelayType hidden_envoy_deprecated_type = 1 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Describes a rate limit to be applied. @@ -84,16 +84,16 @@ message FaultRateLimit { "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; } - // A fixed rate limit. - type.v3.FractionalPercent percentage = 2; - oneof limit_type { option (validate.required) = true; - // Rate limits are controlled via an HTTP header (if applicable). + // A fixed rate limit. FixedLimit fixed_limit = 1; - // The percentage of operations/connections/requests on which the rate limit will be injected. + // Rate limits are controlled via an HTTP header (if applicable). HeaderLimit header_limit = 3; } + + // The percentage of operations/connections/requests on which the rate limit will be injected. + type.v3.FractionalPercent percentage = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 7ff9bb6a0f5f..3d2ef3e96d96 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -93,15 +93,15 @@ message AdaptiveConcurrency { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; - // Gradient concurrency control will be used. - config.core.v3.RuntimeFeatureFlag enabled = 2; - oneof concurrency_controller_config { option (validate.required) = true; - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. + // Gradient concurrency control will be used. GradientControllerConfig gradient_controller_config = 1 [(validate.rules).message = {required: true}]; } + + // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 2; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 64e82c7b1614..4ede2bd5abf8 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -27,11 +27,14 @@ message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; - // gRPC service configuration (default timeout: 200ms). - bool failure_mode_allow = 2; + // External authorization service configuration. + oneof services { + // gRPC service configuration (default timeout: 200ms). + config.core.v3.GrpcService grpc_service = 1; - // HTTP service configuration (default timeout: 200ms). - BufferSettings with_request_body = 5; + // HTTP service configuration (default timeout: 200ms). + HttpService http_service = 3; + } // Changes filter's behaviour on errors: // @@ -45,12 +48,12 @@ message ExtAuthz { // // Note that errors can be *always* tracked in the :ref:`stats // `. - bool clear_route_cache = 6; + bool failure_mode_allow = 2; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. - type.v3.HttpStatus status_on_error = 7; + BufferSettings with_request_body = 5; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: @@ -62,11 +65,11 @@ message ExtAuthz { // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // - repeated string metadata_context_namespaces = 8; + bool clear_route_cache = 6; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. - config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + type.v3.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. @@ -80,7 +83,7 @@ message ExtAuthz { // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // - bool include_peer_certificate = 10; + repeated string metadata_context_namespaces = 8; // Specifies if the filter is enabled. // @@ -88,19 +91,16 @@ message ExtAuthz { // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. - bool hidden_envoy_deprecated_use_alpha = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + config.core.v3.RuntimeFractionalPercent filter_enabled = 9; - // External authorization service configuration. - oneof services { - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - config.core.v3.GrpcService grpc_service = 1; + // Specifies if the peer certificate is sent to the external service. + // + // When this field is true, Envoy will include the peer X.509 certificate, if available, in the + // :ref:`certificate`. + bool include_peer_certificate = 10; - HttpService http_service = 3; - } + bool hidden_envoy_deprecated_use_alpha = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Configuration for buffering the request data. diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto index 9bba2f134cdf..d28ed28b1110 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto @@ -36,22 +36,22 @@ message FaultAbort { reserved 1; - // HTTP status code to use to abort the HTTP request. - type.v3.FractionalPercent percentage = 3; - oneof error_type { option (validate.required) = true; - // gRPC status code to use to abort the gRPC request. + // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - // Fault aborts are controlled via an HTTP header (if applicable). + // gRPC status code to use to abort the gRPC request. uint32 grpc_status = 5; - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. + // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } + + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + type.v3.FractionalPercent percentage = 3; } // [#next-free-field: 15] diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto index da27441f2aca..3082089202ee 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto @@ -45,32 +45,36 @@ message GrpcJsonTranscoder { bool preserve_proto_field_names = 4; } - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; + oneof descriptor_set { + option (validate.required) = true; - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - PrintOptions print_options = 3; + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } // A list of strings that // supplies the fully qualified service names (i.e. "package_name.service_name") that // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than // the service names specified here, but they won't be translated. - bool match_incoming_request_route = 5; + repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `_. - repeated string ignored_query_parameters = 6; + PrintOptions print_options = 3; // Whether to keep the incoming request route after the outgoing headers have been transformed to // the match the upstream gRPC service. Note: This means that routes for gRPC services that are // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool auto_mapping = 7; + bool match_incoming_request_route = 5; // A list of query parameters to be ignored for transcoding method mapping. // By default, the transcoder filter will not transcode a request if there are any @@ -97,7 +101,7 @@ message GrpcJsonTranscoder { // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. - bool ignore_unknown_query_parameters = 8; + repeated string ignored_query_parameters = 6; // Whether to route methods without the ``google.api.http`` option. // @@ -119,45 +123,41 @@ message GrpcJsonTranscoder { // // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool convert_grpc_status = 9; - - oneof descriptor_set { - option (validate.required) = true; + bool auto_mapping = 7; - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - string proto_descriptor = 1; + // Whether to ignore query parameters that cannot be mapped to a corresponding + // protobuf field. Use this if you cannot control the query parameters and do + // not know them beforehand. Otherwise use ``ignored_query_parameters``. + // Defaults to false. + bool ignore_unknown_query_parameters = 8; - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bytes proto_descriptor_bin = 4; - } + // Whether to convert gRPC status headers to JSON. + // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` + // from the ``grpc-status-details-bin`` header and use it as JSON body. + // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and + // ``grpc-message`` headers. + // The error details types must be present in the ``proto_descriptor``. + // + // For example, if an upstream server replies with headers: + // + // .. code-block:: none + // + // grpc-status: 5 + // grpc-status-details-bin: + // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ + // + // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message + // ``google.rpc.Status``. It will be transcoded into: + // + // .. code-block:: none + // + // HTTP/1.1 404 Not Found + // content-type: application/json + // + // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} + // + // In order to transcode the message, the ``google.rpc.RequestInfo`` type from + // the ``google/rpc/error_details.proto`` should be included in the configured + // :ref:`proto descriptor set `. + bool convert_grpc_status = 9; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto index d5aca14ea530..ff56066410cb 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto @@ -28,12 +28,12 @@ message FilterConfig { // counts. bool emit_filter_state = 1; - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - bool enable_upstream_stats = 4; - oneof per_method_stat_specifier { + // If set, specifies an allowlist of service/methods that will have individual stats + // emitted for them. Any call that does not match the allowlist will be counted + // in a stat with no method specifier: `cluster..grpc.*`. + config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; + // If set to true, emit stats for all service/method names. // // If set to false, emit stats for all service/message types to the same stats without including @@ -52,16 +52,16 @@ message FilterConfig { // `stats_for_all_methods=false` in order to be safe by default. This behavior can be // controlled with runtime override // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; - - // If true, the filter will gather a histogram for the request time of the upstream. - // It works with :ref:`stats_for_all_methods - // ` - // and :ref:`individual_method_stats_allowlist - // ` the same way - // request_message_count and response_message_count works. google.protobuf.BoolValue stats_for_all_methods = 3; } + + // If true, the filter will gather a histogram for the request time of the upstream. + // It works with :ref:`stats_for_all_methods + // ` + // and :ref:`individual_method_stats_allowlist + // ` the same way + // request_message_count and response_message_count works. + bool enable_upstream_stats = 4; } // gRPC statistics filter state object in protobuf form. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 592610819bdc..39fe6187f64f 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -78,44 +78,50 @@ message JwtProvider { // repeated string audiences = 2; - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // cache_duration: - // seconds: 300 - // - bool forward = 5; + // `JSON Web Key Set (JWKS) `_ is needed to + // validate signature of a JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - repeated JwtHeader from_headers = 6; + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // http_uri: + // uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // inline_string: ACADADADADA + // + config.core.v3.DataSource local_jwks = 4; + } // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. - repeated string from_params = 7; + bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. // @@ -142,8 +148,7 @@ message JwtProvider { // // ``x-goog-iap-jwt-assertion: ``. // - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + repeated JwtHeader from_headers = 6; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // @@ -158,39 +163,34 @@ message JwtProvider { // // /path?jwt_token= // - string payload_in_metadata = 9; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; + repeated string from_params = 7; - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - RemoteJwks remote_jwks = 3; + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64url_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + string forward_payload_header = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - config.core.v3.DataSource local_jwks = 4; - } + // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata + // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** + // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* + // and the value is the *protobuf::Struct* converted from JWT JSON payload. + // + // For example, if payload_in_metadata is *my_payload*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // + string payload_in_metadata = 9; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 11555c3bee29..d3f5fb927ffa 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -249,59 +249,63 @@ message HttpConnectionManager { // more information. string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - // The connection manager’s route table will be dynamically loaded via the RDS API. - repeated HttpFilter http_filters = 5; + oneof route_specifier { + option (validate.required) = true; - // The route table for the connection manager is static and is specified in this property. - google.protobuf.BoolValue add_user_agent = 6; + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - Tracing tracing = 7; + // The route table for the connection manager is static and is specified in this property. + config.route.v3.RouteConfiguration route_config = 4; + + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes scoped_routes = 31; + } // A list of individual HTTP filters that make up the filter chain for // requests made to the connection manager. :ref:`Order matters ` // as the filters are processed sequentially as request events happen. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. - config.core.v3.Http1ProtocolOptions http_protocol_options = 8; + google.protobuf.BoolValue add_user_agent = 6; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + Tracing tracing = 7; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - string server_name = 10; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; + config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 96 gt: 0}]; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. - google.protobuf.Duration stream_idle_timeout = 24; + string server_name = 10; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. - google.protobuf.Duration request_timeout = 28; + ServerHeaderTransformation server_header_transformation = 34 + [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. - google.protobuf.Duration drain_timeout = 12; + google.protobuf.UInt32Value max_request_headers_kb = 29 + [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -328,13 +332,13 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration delayed_close_timeout = 26; + google.protobuf.Duration stream_idle_timeout = 24; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - repeated config.accesslog.v3.AccessLog access_log = 13; + google.protobuf.Duration request_timeout = 28; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -345,7 +349,7 @@ message HttpConnectionManager { // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.Duration drain_timeout = 12; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated @@ -377,11 +381,11 @@ message HttpConnectionManager { // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. - uint32 xff_num_trusted_hops = 19; + google.protobuf.Duration delayed_close_timeout = 26; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. - InternalAddressConfig internal_address_config = 25; + repeated config.accesslog.v3.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating @@ -390,20 +394,20 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - bool skip_xff_append = 21; + google.protobuf.BoolValue use_remote_address = 14; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - string via = 22; + uint32 xff_num_trusted_hops = 19; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. - google.protobuf.BoolValue generate_request_id = 15; + InternalAddressConfig internal_address_config = 25; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in @@ -413,33 +417,33 @@ message HttpConnectionManager { // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. - bool preserve_external_request_id = 32; + bool skip_xff_append = 21; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. - bool always_set_request_id_in_response = 37; + string via = 22; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; + google.protobuf.BoolValue generate_request_id = 15; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. - SetCurrentClientCertDetails set_current_client_cert_details = 17; + bool preserve_external_request_id = 32; // If set, Envoy will always set :ref:`x-request-id ` header in response. // If this is false or not set, the request ID is returned in responses only if tracing is forced using // :ref:`x-envoy-force-trace ` header. - bool proxy_100_continue = 18; + bool always_set_request_id_in_response = 37; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` @@ -448,13 +452,13 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. - repeated UpgradeConfig upgrade_configs = 23; + SetCurrentClientCertDetails set_current_client_cert_details = 17; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. - google.protobuf.BoolValue normalize_path = 30; + bool proxy_100_continue = 18; // If // :ref:`use_remote_address @@ -469,9 +473,9 @@ message HttpConnectionManager { // ` for runtime // control. // [#not-implemented-hide:] - bool merge_slashes = 33; + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - RequestIDExtension request_id_extension = 36; + repeated UpgradeConfig upgrade_configs = 23; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header @@ -484,33 +488,29 @@ message HttpConnectionManager { // for details of normalization. // Note that Envoy does not perform // `case normalization ` - google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - oneof route_specifier { - option (validate.required) = true; + google.protobuf.BoolValue normalize_path = 30; - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. - Rds rds = 3; + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without + // setting this option, incoming requests with path `//dir///file` will not match against route + // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // `HTTP spec ` and is provided for convenience. + bool merge_slashes = 33; - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. - // - // If not set, Envoy uses the default UUID-based behavior: - // - // 1. Request ID is propagated using *x-request-id* header. - // - // 2. Request ID is a universally unique identifier (UUID). - // - // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - config.route.v3.RouteConfiguration route_config = 4; + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. + // + // If not set, Envoy uses the default UUID-based behavior: + // + // 1. Request ID is propagated using *x-request-id* header. + // + // 2. Request ID is a universally unique identifier (UUID). + // + // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. + RequestIDExtension request_id_extension = 36; - ScopedRoutes scoped_routes = 31; - } + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } message Rds { diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index 6024a6d552bc..27d187ed2c33 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -99,18 +99,22 @@ message TcpProxy { // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - // The upstream cluster to connect to. - config.core.v3.Metadata metadata_match = 9; + oneof cluster_specifier { + option (validate.required) = true; - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - google.protobuf.Duration idle_timeout = 8; + // The upstream cluster to connect to. + string cluster = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + WeightedCluster weighted_clusters = 10; + } // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. - google.protobuf.Duration downstream_idle_timeout = 3; + config.core.v3.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either @@ -120,7 +124,7 @@ message TcpProxy { // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. - google.protobuf.Duration upstream_idle_timeout = 4; + google.protobuf.Duration idle_timeout = 8; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no @@ -128,33 +132,29 @@ message TcpProxy { // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. - repeated config.accesslog.v3.AccessLog access_log = 5; + google.protobuf.Duration downstream_idle_timeout = 3; // [#not-implemented-hide:] - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; + google.protobuf.Duration upstream_idle_timeout = 4; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; + repeated config.accesslog.v3.AccessLog access_log = 5; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. - TunnelingConfig tunneling_config = 12; + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; - - oneof cluster_specifier { - option (validate.required) = true; + repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - // [#not-implemented-hide:] feature in progress - // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP - // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload - // will be proxied upstream as per usual. - string cluster = 2; + // [#not-implemented-hide:] feature in progress + // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP + // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload + // will be proxied upstream as per usual. + TunnelingConfig tunneling_config = 12; - WeightedCluster weighted_clusters = 10; - } + DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 3eeae0cba594..5ce18fd06233 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -46,39 +46,39 @@ message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - bool invert = 3; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - repeated config.route.v3.HeaderMatcher headers = 4; - oneof match_specifier { option (validate.required) = true; - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. + // If specified, the route must exactly match the request method name. As a special case, an + // empty string matches any request method name. string method_name = 1; - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. + // If specified, the route must have the service name as the request method name prefix. As a + // special case, an empty string matches any service name. Only relevant when service + // multiplexing. string service_name = 2; } + + // Inverts whatever matching is done in the :ref:`method_name + // ` or + // :ref:`service_name + // ` fields. + // Cannot be combined with wildcard matching as that would result in routes never being matched. + // + // .. note:: + // + // This does not invert matching done as part of the :ref:`headers field + // ` field. To + // invert header matching, see :ref:`invert_match + // `. + bool invert = 3; + + // Specifies a set of headers that the route should match on. The router will check the request’s + // headers against all the specified headers in the route config. A match will happen if all the + // headers in the route are present in the request with the same values (or based on presence if + // the value field is not in the config). Note that this only applies for Thrift transports and/or + // protocols that support headers. + repeated config.route.v3.HeaderMatcher headers = 4; } // [#next-free-field: 7] @@ -86,42 +86,42 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; - // Indicates a single upstream cluster to which the request should be routed - // to. - config.core.v3.Metadata metadata_match = 3; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - repeated config.route.v3.RateLimit rate_limits = 4; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - bool strip_service_name = 5; - oneof cluster_specifier { option (validate.required) = true; - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. + // Indicates a single upstream cluster to which the request should be routed + // to. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. WeightedCluster weighted_clusters = 2; - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. + // Envoy will determine the cluster to route to by reading the value of the + // Thrift header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist Envoy will + // respond with an unknown method exception or an internal error exception, + // respectively. string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; } + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered. + // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match + // `, + // with values there taking precedence. Keys and values should be provided under the "envoy.lb" + // metadata key. + config.core.v3.Metadata metadata_match = 3; + + // Specifies a set of rate limit configurations that could be applied to the route. + // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders + // action with the header name ":method-name". + repeated config.route.v3.RateLimit rate_limits = 4; + + // Strip the service prefix from the method name, if there's a prefix. For + // example, the method call Service:method would end up being just method. + bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto index 97bb3fe64e7f..a6fc2d62b97c 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -67,16 +67,13 @@ message DownstreamTlsContext { // [#not-implemented-hide:] google.protobuf.BoolValue require_sni = 3; - // TLS session ticket key settings. - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - oneof session_ticket_keys_type { - // Config for fetching TLS session ticket keys via SDS API. + // TLS session ticket key settings. TlsSessionTicketKeys session_ticket_keys = 4; + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using @@ -85,14 +82,17 @@ message DownstreamTlsContext { // If this config is set to false and no keys are explicitly configured, the TLS server will issue // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the // implication that sessions cannot be resumed across hot restarts or on different hosts. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). bool disable_stateless_session_resumption = 7; } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; } // TLS context shared by both client and server TLS contexts. @@ -130,13 +130,13 @@ message CommonTlsContext { repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; - // How to validate peer certificates. - repeated string alpn_protocols = 4; - oneof validation_context_type { - // Config for fetching validation context via SDS API. + // How to validate peer certificates. CertificateValidationContext validation_context = 3; + // Config for fetching validation context via SDS API. + SdsSecretConfig validation_context_sds_secret_config = 7; + // Combined certificate validation context holds a default CertificateValidationContext // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic // and default CertificateValidationContext are merged into a new CertificateValidationContext @@ -144,18 +144,18 @@ message CommonTlsContext { // CertificateValidationContext overwrites singular fields in default // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. CombinedCertificateValidationContext combined_validation_context = 8; } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; } diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index 5de7fd9baf54..e318cb5457d9 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -36,15 +36,15 @@ message RegexMatcher { google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } - // Google's RE2 regex engine. - string regex = 2 [(validate.rules).string = {min_bytes: 1}]; - oneof engine_type { option (validate.required) = true; - // The regex match string. The string must be supported by the configured engine. + // Google's RE2 regex engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } + + // The regex match string. The string must be supported by the configured engine. + string regex = 2 [(validate.rules).string = {min_bytes: 1}]; } // Describes how to match a string and then produce a new string using a regular diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto index 2f9d43de40dc..1c55202a7b77 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/string.proto @@ -21,23 +21,23 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - bool ignore_case = 6; - oneof match_pattern { option (validate.required) = true; + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* - string exact = 1; + string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. @@ -45,14 +45,9 @@ message StringMatcher { // Examples: // // * *abc* matches the value *xyz.abc* - string prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The input string must match the regular expression specified here. string suffix = 3 [(validate.rules).string = {min_bytes: 1}]; - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; string hidden_envoy_deprecated_regex = 4 [ @@ -61,6 +56,11 @@ message StringMatcher { (envoy.annotations.disallowed_by_default) = true ]; } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index d5d46bce81ee..631a4f9585e4 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -4,6 +4,7 @@ py_binary( name = "merge_active_shadow", srcs = ["merge_active_shadow.py"], deps = [ + "//tools/api_proto_plugin", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", @@ -17,6 +18,7 @@ py_test( srcs = ["merge_active_shadow_test.py"], deps = [ ":merge_active_shadow", + "//tools/api_proto_plugin", "@com_google_protobuf//:protobuf_python", ], ) diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py index 13f6f8c63bcc..5d2cd029526c 100644 --- a/tools/protoxform/merge_active_shadow.py +++ b/tools/protoxform/merge_active_shadow.py @@ -4,10 +4,13 @@ # 2. Recovering deprecated (sub)message types. # 3. Misc. fixups for oneof metadata and reserved ranges/names. +from collections import defaultdict import copy import pathlib import sys +from tools.api_proto_plugin import type_context as api_type_context + from google.protobuf import descriptor_pb2 from google.protobuf import text_format @@ -22,7 +25,7 @@ from udpa.annotations import versioning_pb2 as _ -# Set reserved_range in target_proto to reflex previous_reserved_range skipping +# Set reserved_range in target_proto to reflect previous_reserved_range skipping # skip_reserved_numbers. def AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers): del target_proto.reserved_range[:] @@ -61,49 +64,112 @@ def MergeActiveShadowEnum(active_proto, shadow_proto, target_proto): tv.CopyFrom(sv) +# Adjust source code info comments path to reflect insertions of oneof fields +# inside the middle of an existing collection of fields. +def AdjustSourceCodeInfo(type_context, field_index, field_adjustment): + + def HasPathPrefix(s, t): + return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t)) + + for loc in type_context.source_code_info.proto.location: + if HasPathPrefix(type_context.path + [2], loc.path): + path_field_index = len(type_context.path) + 1 + if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index: + loc.path[path_field_index] += field_adjustment + + # Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. -def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): +def MergeActiveShadowMessage(type_context, active_proto, shadow_proto, target_proto): target_proto.MergeFrom(active_proto) if not shadow_proto: return shadow_fields = {f.name: f for f in shadow_proto.field} skip_reserved_numbers = [] # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. + # reintroduce in target_proto. We track both the normal fields we need to add + # back in (extra_simple_fields) and those that belong to oneofs + # (extra_oneof_fields). The latter require special treatment, as we can't just + # append them to the end of the message, they need to be reordered. + extra_simple_fields = [] + extra_oneof_fields = defaultdict(list) # oneof index -> list of fields del target_proto.reserved_name[:] for n in active_proto.reserved_name: hidden_n = 'hidden_envoy_deprecated_' + n if hidden_n in shadow_fields: f = shadow_fields[hidden_n] skip_reserved_numbers.append(f.number) - missing_field = target_proto.field.add() - missing_field.MergeFrom(f) + missing_field = copy.deepcopy(f) # oneof fields from the shadow need to have their index set to the # corresponding index in active/target_proto. if missing_field.HasField('oneof_index'): oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name missing_oneof_index = None - for oneof_index, oneof_decl in enumerate(active_proto.oneof_decl): + for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl): if oneof_decl.name == oneof_name: missing_oneof_index = oneof_index - assert (missing_oneof_index is not None) + if missing_oneof_index is None: + missing_oneof_index = len(target_proto.oneof_decl) + target_proto.oneof_decl.add().MergeFrom( + shadow_proto.oneof_decl[missing_field.oneof_index]) missing_field.oneof_index = missing_oneof_index + extra_oneof_fields[missing_oneof_index].append(missing_field) + else: + extra_simple_fields.append(missing_field) else: target_proto.reserved_name.append(n) - # protoprint.py expects that oneof fields are consecutive, so need to sort for - # this. - if len(active_proto.oneof_decl) > 0: - fields = copy.deepcopy(target_proto.field) - fields.sort(key=lambda f: f.oneof_index if f.HasField('oneof_index') else -1) - del target_proto.field[:] - for f in fields: - target_proto.field.append(f) + # Copy existing fields, as we need to nuke them. + existing_fields = copy.deepcopy(target_proto.field) + del target_proto.field[:] + # Rebuild fields, taking into account extra_oneof_fields. protoprint.py + # expects that oneof fields are consecutive, so need to sort for this. + current_oneof_index = None + + def AppendExtraOneofFields(current_oneof_index, last_oneof_field_index): + # Add fields from extra_oneof_fields for current_oneof_index. + for oneof_f in extra_oneof_fields[current_oneof_index]: + target_proto.field.add().MergeFrom(oneof_f) + field_adjustment = len(extra_oneof_fields[current_oneof_index]) + # Fixup the comments in source code info. Note that this is really + # inefficient, O(N^2) in the worst case, but since we have relatively few + # deprecated fields, is the easiest to implement method. + if last_oneof_field_index is not None: + AdjustSourceCodeInfo(type_context, last_oneof_field_index, field_adjustment) + del extra_oneof_fields[current_oneof_index] + return field_adjustment + + field_index = 0 + for f in existing_fields: + if current_oneof_index is not None: + field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None + # Are we exiting the oneof? If so, add the respective extra_one_fields. + if field_oneof_index != current_oneof_index: + field_index += AppendExtraOneofFields(current_oneof_index, field_index) + current_oneof_index = field_oneof_index + elif f.HasField('oneof_index'): + current_oneof_index = f.oneof_index + target_proto.field.add().MergeFrom(f) + field_index += 1 + if current_oneof_index is not None: + # No need to adjust source code info here, since there are no comments for + # trailing deprecated fields, so just set field index to None. + AppendExtraOneofFields(current_oneof_index, None) + # Non-oneof fields are easy to treat, we just append them to the existing + # fields. They don't get any comments, but that's fine in the generated + # shadows. + for f in extra_simple_fields: + target_proto.field.add().MergeFrom(f) + for oneof_index in sorted(extra_oneof_fields.keys()): + for f in extra_oneof_fields[oneof_index]: + target_proto.field.add().MergeFrom(f) + # Same is true for oneofs that are exclusively from the shadow. AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) # Visit nested message types del target_proto.nested_type[:] shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} - for msg in active_proto.nested_type: - MergeActiveShadowMessage(msg, shadow_msgs.get(msg.name), target_proto.nested_type.add()) + for index, msg in enumerate(active_proto.nested_type): + MergeActiveShadowMessage( + type_context.ExtendNestedMessage(index, msg.name, msg.options.deprecated), msg, + shadow_msgs.get(msg.name), target_proto.nested_type.add()) # Visit nested enum types del target_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} @@ -119,11 +185,16 @@ def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): # Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto. def MergeActiveShadowFile(active_file_proto, shadow_file_proto): target_file_proto = copy.deepcopy(active_file_proto) + source_code_info = api_type_context.SourceCodeInfo(target_file_proto.name, + target_file_proto.source_code_info) + package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package) # Visit message types del target_file_proto.message_type[:] shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} - for msg in active_file_proto.message_type: - MergeActiveShadowMessage(msg, shadow_msgs.get(msg.name), target_file_proto.message_type.add()) + for index, msg in enumerate(active_file_proto.message_type): + MergeActiveShadowMessage( + package_type_context.ExtendMessage(index, msg.name, msg.options.deprecated), msg, + shadow_msgs.get(msg.name), target_file_proto.message_type.add()) # Visit enum types del target_file_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} diff --git a/tools/protoxform/merge_active_shadow_test.py b/tools/protoxform/merge_active_shadow_test.py index 2fb4c983945d..7a2961fb1c50 100644 --- a/tools/protoxform/merge_active_shadow_test.py +++ b/tools/protoxform/merge_active_shadow_test.py @@ -2,11 +2,18 @@ import merge_active_shadow +from tools.api_proto_plugin import type_context as api_type_context + from google.protobuf import descriptor_pb2 from google.protobuf import text_format class MergeActiveShadowTest(unittest.TestCase): + # Dummy type context for tests that don't care about this. + def fakeTypeContext(self): + fake_source_code_info = descriptor_pb2.SourceCodeInfo() + source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info) + return api_type_context.TypeContext(source_code_info, 'fake_package') # Poor man's text proto equivalence. Tensorflow has better tools for this, # i.e. assertProto2Equal. @@ -118,6 +125,271 @@ def testMergeActiveShadowEnum(self): """ self.assertTextProtoEq(target_pb_text, str(target_proto)) + def testMergeActiveShadowMessageComments(self): + """MergeActiveShadowMessage preserves comment field correspondence.""" + active_pb_text = """ +field { + number: 9 + name: "oneof_1_0" + oneof_index: 0 +} +field { + number: 1 + name: "simple_field_0" +} +field { + number: 0 + name: "oneof_2_0" + oneof_index: 2 +} +field { + number: 8 + name: "oneof_2_1" + oneof_index: 2 +} +field { + number: 3 + name: "oneof_0_0" + oneof_index: 1 +} +field { + number: 4 + name: "newbie" +} +field { + number: 7 + name: "oneof_3_0" + oneof_index: 3 +} +reserved_name: "missing_oneof_field_0" +reserved_name: "missing_oneof_field_1" +reserved_name: "missing_oneof_field_2" +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "oneof_3" +} + """ + active_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(active_pb_text, active_proto) + active_source_code_info_text = """ +location { + path: [4, 1, 2, 4] + leading_comments: "field_4" +} +location { + path: [4, 1, 2, 5] + leading_comments: "field_5" +} +location { + path: [4, 1, 2, 3] + leading_comments: "field_3" +} +location { + path: [4, 1, 2, 0] + leading_comments: "field_0" +} +location { + path: [4, 1, 2, 1] + leading_comments: "field_1" +} +location { + path: [4, 0, 2, 2] + leading_comments: "ignore_0" +} +location { + path: [4, 1, 2, 6] + leading_comments: "field_6" +} +location { + path: [4, 1, 2, 2] + leading_comments: "field_2" +} +location { + path: [3] + leading_comments: "ignore_1" +} +""" + active_source_code_info = descriptor_pb2.SourceCodeInfo() + text_format.Merge(active_source_code_info_text, active_source_code_info) + shadow_pb_text = """ +field { + number: 10 + name: "hidden_envoy_deprecated_missing_oneof_field_0" + oneof_index: 0 +} +field { + number: 11 + name: "hidden_envoy_deprecated_missing_oneof_field_1" + oneof_index: 3 +} +field { + number: 11 + name: "hidden_envoy_deprecated_missing_oneof_field_2" + oneof_index: 2 +} +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "some_removed_oneof" +} +oneof_decl { + name: "oneof_3" +} +""" + shadow_proto = descriptor_pb2.DescriptorProto() + text_format.Merge(shadow_pb_text, shadow_proto) + target_proto = descriptor_pb2.DescriptorProto() + source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info) + fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package') + merge_active_shadow.MergeActiveShadowMessage(fake_type_context.ExtendMessage(1, "foo", False), + active_proto, shadow_proto, target_proto) + target_pb_text = """ +field { + name: "oneof_1_0" + number: 9 + oneof_index: 0 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_0" + number: 10 + oneof_index: 0 +} +field { + name: "simple_field_0" + number: 1 +} +field { + name: "oneof_2_0" + number: 0 + oneof_index: 2 +} +field { + name: "oneof_2_1" + number: 8 + oneof_index: 2 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_2" + number: 11 + oneof_index: 2 +} +field { + name: "oneof_0_0" + number: 3 + oneof_index: 1 +} +field { + name: "newbie" + number: 4 +} +field { + name: "oneof_3_0" + number: 7 + oneof_index: 3 +} +field { + name: "hidden_envoy_deprecated_missing_oneof_field_1" + number: 11 + oneof_index: 4 +} +oneof_decl { + name: "oneof_0" +} +oneof_decl { + name: "oneof_1" +} +oneof_decl { + name: "oneof_2" +} +oneof_decl { + name: "oneof_3" +} +oneof_decl { + name: "some_removed_oneof" +} + """ + target_source_code_info_text = """ +location { + path: 4 + path: 1 + path: 2 + path: 6 + leading_comments: "field_4" +} +location { + path: 4 + path: 1 + path: 2 + path: 7 + leading_comments: "field_5" +} +location { + path: 4 + path: 1 + path: 2 + path: 4 + leading_comments: "field_3" +} +location { + path: 4 + path: 1 + path: 2 + path: 0 + leading_comments: "field_0" +} +location { + path: 4 + path: 1 + path: 2 + path: 2 + leading_comments: "field_1" +} +location { + path: 4 + path: 0 + path: 2 + path: 2 + leading_comments: "ignore_0" +} +location { + path: 4 + path: 1 + path: 2 + path: 8 + leading_comments: "field_6" +} +location { + path: 4 + path: 1 + path: 2 + path: 3 + leading_comments: "field_2" +} +location { + path: 3 + leading_comments: "ignore_1" +} +""" + self.maxDiff = None + self.assertTextProtoEq(target_pb_text, str(target_proto)) + self.assertTextProtoEq(target_source_code_info_text, + str(fake_type_context.source_code_info.proto)) + def testMergeActiveShadowMessage(self): """MergeActiveShadowMessage recovers shadow fields with oneofs.""" active_pb_text = """ @@ -180,20 +452,13 @@ def testMergeActiveShadowMessage(self): shadow_proto = descriptor_pb2.DescriptorProto() text_format.Merge(shadow_pb_text, shadow_proto) target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) target_pb_text = """ field { name: "foo" number: 1 } -field { - name: "baz" - number: 3 -} -field { - name: "newbie" - number: 4 -} field { name: "bar" number: 0 @@ -204,6 +469,14 @@ def testMergeActiveShadowMessage(self): number: 2 oneof_index: 2 } +field { + name: "baz" + number: 3 +} +field { + name: "newbie" + number: 4 +} oneof_decl { name: "ign" } @@ -222,7 +495,8 @@ def testMergeActiveShadowMessageNoShadowMessage(self): shadow_proto = descriptor_pb2.DescriptorProto() active_proto.nested_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.nested_type[0].name, 'foo') def testMergeActiveShadowMessageNoShadowEnum(self): @@ -231,7 +505,8 @@ def testMergeActiveShadowMessageNoShadowEnum(self): shadow_proto = descriptor_pb2.DescriptorProto() active_proto.enum_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.enum_type[0].name, 'foo') def testMergeActiveShadowMessageMissing(self): @@ -240,7 +515,8 @@ def testMergeActiveShadowMessageMissing(self): shadow_proto = descriptor_pb2.DescriptorProto() shadow_proto.nested_type.add().name = 'foo' target_proto = descriptor_pb2.DescriptorProto() - merge_active_shadow.MergeActiveShadowMessage(active_proto, shadow_proto, target_proto) + merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto, + target_proto) self.assertEqual(target_proto.nested_type[0].name, 'foo') def testMergeActiveShadowFileMissing(self): From b4873a38379d239d0c53fecb94a11b2fcdbab3f9 Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Thu, 7 May 2020 09:25:52 -0400 Subject: [PATCH 109/909] Silence all proto logs in fuzz tests (#10939) Description: OSS Fuzz reports a large number for the avg_unwanted_log_lines statistic in each fuzz test. Reduce the number of logs by silencing all libprotobuf non-fatal logging while running fuzz tests. Signed-off-by: Teju Nareddy --- test/fuzz/fuzz_runner.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/fuzz/fuzz_runner.cc b/test/fuzz/fuzz_runner.cc index 508f1e922c43..c7cbcdfa08b7 100644 --- a/test/fuzz/fuzz_runner.cc +++ b/test/fuzz/fuzz_runner.cc @@ -47,6 +47,12 @@ void Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum d static auto* logging_context = new Logger::Context(log_level_, TestEnvironment::getOptions().logFormat(), *lock, false); UNREFERENCED_PARAMETER(logging_context); + + // Suppress all libprotobuf non-fatal logging as long as this object exists. + // For fuzzing, this prevents logging when parsing text-format protos fails, + // deprecated fields are used, etc. + // https://github.com/protocolbuffers/protobuf/blob/204f99488ce1ef74565239cf3963111ae4c774b7/src/google/protobuf/stubs/logging.h#L223 + ABSL_ATTRIBUTE_UNUSED static auto* log_silencer = new Protobuf::LogSilencer(); } } // namespace Fuzz From 10e7c9de4c210fd801aa487d3a35f8fa9ff9be0a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 7 May 2020 11:22:34 -0400 Subject: [PATCH 110/909] test: improving test coverage by removing unused functions (#11086) Risk Level: Low (may be used downstream) Testing: tests pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/common/hash.cc | 6 +++--- source/common/common/hash.h | 16 +--------------- source/common/common/logger.h | 1 - source/common/common/utility.h | 10 ---------- source/common/config/grpc_mux_impl.cc | 7 +------ source/common/config/grpc_mux_impl.h | 1 - source/common/grpc/google_async_client_impl.h | 7 ------- source/common/http/http1/conn_pool.h | 2 -- source/common/stats/symbol_table_impl.h | 17 ----------------- source/common/upstream/subset_lb.h | 7 ------- source/common/upstream/upstream_impl.h | 4 ---- .../extensions/clusters/redis/redis_cluster.cc | 2 +- .../common/ext_authz/ext_authz_http_impl.h | 5 ----- .../http/aws_lambda/aws_lambda_filter.h | 1 - .../extensions/filters/http/cache/http_cache.h | 18 ++++++------------ .../filters/http/ext_authz/ext_authz.h | 14 ++++---------- .../http/ip_tagging/ip_tagging_filter.h | 1 - .../filters/network/dubbo_proxy/decoder.h | 5 ----- .../filters/network/mongo_proxy/mongo_stats.h | 2 -- .../network/mysql_proxy/mysql_codec_command.h | 2 -- .../mysql_proxy/mysql_codec_switch_resp.h | 1 - .../filters/network/redis_proxy/config.h | 12 ++++-------- .../network/redis_proxy/conn_pool_impl.cc | 4 ++-- .../sni_dynamic_forward_proxy/proxy_filter.cc | 5 ++--- .../sni_dynamic_forward_proxy/proxy_filter.h | 2 -- .../thrift_proxy/filters/ratelimit/ratelimit.h | 1 - .../thrift_proxy/header_transport_impl.h | 9 --------- .../network/thrift_proxy/protocol_converter.h | 3 --- .../network/thrift_proxy/router/router_impl.h | 4 ---- .../filters/udp/dns_filter/dns_filter.h | 10 ---------- .../filters/udp/dns_filter/dns_parser.h | 5 ----- .../extensions/health_checkers/redis/redis.cc | 2 +- .../tracers/datadog/datadog_tracer_impl.cc | 4 ++-- .../tracers/datadog/datadog_tracer_impl.h | 3 --- .../tracers/zipkin/span_context_extractor.h | 1 - source/extensions/tracers/zipkin/tracer.h | 15 --------------- source/server/listener_impl.h | 4 ---- 37 files changed, 27 insertions(+), 186 deletions(-) diff --git a/source/common/common/hash.cc b/source/common/common/hash.cc index 76fcf9a1df59..eb1e0765da87 100644 --- a/source/common/common/hash.cc +++ b/source/common/common/hash.cc @@ -19,7 +19,7 @@ uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { const char* const end = buf + len_aligned; uint64_t hash = seed ^ (len * mul); for (const char* p = buf; p != end; p += 8) { - const uint64_t data = shift_mix(unaligned_load(p) * mul) * mul; + const uint64_t data = shiftMix(unaligned_load(p) * mul) * mul; hash ^= data; hash *= mul; } @@ -29,8 +29,8 @@ uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { hash ^= data; hash *= mul; } - hash = shift_mix(hash) * mul; - hash = shift_mix(hash); + hash = shiftMix(hash) * mul; + hash = shiftMix(hash); return hash; } diff --git a/source/common/common/hash.h b/source/common/common/hash.h index 29c007274208..30d27b9b2022 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -74,23 +74,9 @@ class MurmurHash { return result; } - static inline uint64_t shift_mix(uint64_t v) { return v ^ (v >> 47); } + static inline uint64_t shiftMix(uint64_t v) { return v ^ (v >> 47); } }; -struct ConstCharStarHash { - size_t operator()(const char* a) const { return HashUtil::xxHash64(a); } -}; - -struct ConstCharStarEqual { - size_t operator()(const char* a, const char* b) const { return strcmp(a, b) == 0; } -}; - -template -using ConstCharStarHashMap = - absl::flat_hash_map; -using ConstCharStarHashSet = - absl::flat_hash_set; - using SharedString = std::shared_ptr; struct HeterogeneousStringHash { diff --git a/source/common/common/logger.h b/source/common/common/logger.h index b7a095eb8c9e..3b2fd61db5bf 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -124,7 +124,6 @@ class StderrSinkDelegate : public SinkDelegate { bool hasLock() const { return lock_ != nullptr; } void setLock(Thread::BasicLockable& lock) { lock_ = &lock; } void clearLock() { lock_ = nullptr; } - Thread::BasicLockable* lock() { return lock_; } private: Thread::BasicLockable* lock_{}; diff --git a/source/common/common/utility.h b/source/common/common/utility.h index f1214148f3d7..8cab7c8a47c9 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -700,16 +700,6 @@ class InlineString : public InlineStorage { */ absl::string_view toStringView() const { return {data_, size_}; } - /** - * @return the number of bytes in the string - */ - size_t size() const { return size_; } - - /** - * @return a pointer to the first byte of the string. - */ - const char* data() const { return data_; } - private: // Constructor is declared private so that no one constructs one without the // proper size allocation. to accommodate the variable-size buffer. diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 1e495a2a7f94..6cc17325bd50 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -229,11 +229,6 @@ void GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) { drainRequests(); } -void GrpcMuxImpl::clearRequestQueue() { - grpc_stream_.maybeUpdateQueueSizeStat(0); - request_queue_ = {}; -} - void GrpcMuxImpl::drainRequests() { while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { // Process the request, if rate limiting is not enabled at all or if it is under rate limit. @@ -244,4 +239,4 @@ void GrpcMuxImpl::drainRequests() { } } // namespace Config -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index d3572bf71f09..cf93c899d284 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -118,7 +118,6 @@ class GrpcMuxImpl : public GrpcMux, // Request queue management logic. void queueDiscoveryRequest(const std::string& queue_item); - void clearRequestQueue(); GrpcStream diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 3f4c48c3e911..a29d8ebff6f0 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -58,13 +58,6 @@ struct GoogleAsyncTag { GoogleAsyncStreamImpl& stream_; const Operation op_; - - // Generate a void* tag for a given Operation. - static void* tag(Operation op) { return reinterpret_cast(op); } - // Extract Operation from void* tag. - static Operation operation(void* tag) { - return static_cast(reinterpret_cast(tag)); - } }; class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 0d9665f18446..8211664df592 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -79,8 +79,6 @@ class ConnPoolImpl : public ConnPoolImplBase { void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); - ActiveClient& firstReady() const { return static_cast(*ready_clients_.front()); } - ActiveClient& firstBusy() const { return static_cast(*busy_clients_.front()); } Event::TimerPtr upstream_ready_timer_; bool upstream_ready_enabled_{false}; diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 9121e56739ce..97e16172814b 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -118,11 +118,6 @@ class SymbolTableImpl : public SymbolTable { return data_bytes_required_ + encodingSizeBytes(data_bytes_required_); } - /** - * @return the number of uint8_t entries we collected while adding symbols. - */ - uint64_t dataBytesRequired() const { return data_bytes_required_; } - /** * Moves the contents of the vector into an allocated array. The array * must have been allocated with bytesRequired() bytes. @@ -174,8 +169,6 @@ class SymbolTableImpl : public SymbolTable { */ static std::pair decodeNumber(const uint8_t* encoding); - StoragePtr release() { return mem_block_.release(); } - private: uint64_t data_bytes_required_{0}; MemBlockBuilder mem_block_; @@ -464,11 +457,6 @@ class StatName { const uint8_t* dataIncludingSize() const { return size_and_data_; } - /** - * @return A pointer to the buffer, including the size bytes. - */ - const uint8_t* sizeAndData() const { return size_and_data_; } - /** * @return whether this is empty. */ @@ -607,11 +595,6 @@ class StatNameDynamicPool { public: explicit StatNameDynamicPool(SymbolTable& symbol_table) : symbol_table_(symbol_table) {} - /** - * Removes all StatNames from the pool. - */ - void clear() { storage_vector_.clear(); } - /** * @param name the name to add the container. * @return the StatName held in the container for this name. diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 29d984210fa3..e5880cd16115 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -63,9 +63,6 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable(&getOrCreateHostSet(priority)); - } - void triggerCallbacks() { for (size_t i = 0; i < hostSetsPerPriority().size(); ++i) { runReferenceUpdateCallbacks(i, {}, {}); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 382605591d9d..f8472ca05dc5 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -707,7 +707,6 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable partitionHostsPerLocality(const HostsPerLocality& hosts); - Stats::SymbolTable& symbolTable() { return symbol_table_; } Config::ConstMetadataSharedPoolSharedPtr constMetadataSharedPool() { return const_metadata_shared_pool_; } @@ -816,9 +815,6 @@ class PriorityStateManager : protected Logger::Loggable { const absl::optional health_checker_flag, absl::optional overprovisioning_factor = absl::nullopt); - // Returns the size of the current cluster priority state. - size_t size() const { return priority_state_.size(); } - // Returns the saved priority state. PriorityState& priorityState() { return priority_state_; } diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 0d07abb071d2..6c0e04bdfbc1 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -46,7 +46,7 @@ RedisCluster::RedisCluster( local_info_(factory_context.localInfo()), random_(factory_context.random()), redis_discovery_session_(*this, redis_client_factory), lb_factory_(std::move(lb_factory)), auth_password_( - NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::auth_password(info(), api)), + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)), cluster_name_(cluster.name()), refresh_manager_(Common::Redis::getClusterRefreshManager( factory_context.singletonManager(), factory_context.dispatcher(), diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 51956c41d2db..c1b017bb741a 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -98,11 +98,6 @@ class ClientConfig { */ const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; } - /** - * Returns a list of headers that will be add to the authorization request. - */ - const Http::LowerCaseStrPairVector& headersToAdd() const { return authorization_headers_to_add_; } - /** * Returns the name used for tracing. */ diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h index 6611128143d6..82bfdaf85cf2 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.h @@ -83,7 +83,6 @@ class FilterSettings : public Router::RouteSpecificFilterConfig { : arn_(arn), invocation_mode_(mode), payload_passthrough_(payload_passthrough) {} const Arn& arn() const& { return arn_; } - Arn&& arn() && { return std::move(arn_); } bool payloadPassthrough() const { return payload_passthrough_; } InvocationMode invocationMode() const { return invocation_mode_; } diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 05e07a84fd7c..de3dde3120f4 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -165,18 +165,6 @@ class LookupRequest { // Caches may modify the key according to local needs, though care must be // taken to ensure that meaningfully distinct responses have distinct keys. const Key& key() const { return key_; } - Key& key() { return key_; } - - // Returns the subset of this request's headers that are listed in - // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache - // storage implementation forwards lookup requests to a remote cache server that supports *vary* - // headers, that server may need to see these headers. For local implementations, it may be - // simpler to instead call makeLookupResult with each potential response. - HeaderVector& vary_headers() { return vary_headers_; } - const HeaderVector& vary_headers() const { return vary_headers_; } - - // Time when this LookupRequest was created (in response to an HTTP request). - SystemTime timestamp() const { return timestamp_; } // WARNING: Incomplete--do not use in production (yet). // Returns a LookupResult suitable for sending to the cache filter's @@ -195,7 +183,13 @@ class LookupRequest { Key key_; std::vector request_range_spec_; + // Time when this LookupRequest was created (in response to an HTTP request). SystemTime timestamp_; + // The subset of this request's headers that are listed in + // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache + // storage implementation forwards lookup requests to a remote cache server that supports *vary* + // headers, that server may need to see these headers. For local implementations, it may be + // simpler to instead call makeLookupResult with each potential response. HeaderVector vary_headers_; const std::string request_cache_control_; }; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 56ce8b5a3074..57388120480f 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -58,15 +58,14 @@ struct ExtAuthzFilterStats { class FilterConfig { public: FilterConfig(const envoy::extensions::filters::http::ext_authz::v3::ExtAuthz& config, - const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - Runtime::Loader& runtime, Http::Context& http_context, - const std::string& stats_prefix) + const LocalInfo::LocalInfo&, Stats::Scope& scope, Runtime::Loader& runtime, + Http::Context& http_context, const std::string& stats_prefix) : allow_partial_message_(config.with_request_body().allow_partial_message()), failure_mode_allow_(config.failure_mode_allow()), clear_route_cache_(config.clear_route_cache()), max_request_bytes_(config.with_request_body().max_request_bytes()), - status_on_error_(toErrorCode(config.status_on_error().code())), local_info_(local_info), - scope_(scope), runtime_(runtime), http_context_(http_context), + status_on_error_(toErrorCode(config.status_on_error().code())), scope_(scope), + runtime_(runtime), http_context_(http_context), filter_enabled_(config.has_filter_enabled() ? absl::optional( Runtime::FractionalPercent(config.filter_enabled(), runtime_)) @@ -90,14 +89,10 @@ class FilterConfig { uint32_t maxRequestBytes() const { return max_request_bytes_; } - const LocalInfo::LocalInfo& localInfo() const { return local_info_; } - Http::Code statusOnError() const { return status_on_error_; } bool filterEnabled() { return filter_enabled_.has_value() ? filter_enabled_->enabled() : true; } - Runtime::Loader& runtime() { return runtime_; } - Stats::Scope& scope() { return scope_; } Http::Context& httpContext() { return http_context_; } @@ -133,7 +128,6 @@ class FilterConfig { const bool clear_route_cache_; const uint32_t max_request_bytes_; const Http::Code status_on_error_; - const LocalInfo::LocalInfo& local_info_; Stats::Scope& scope_; Runtime::Loader& runtime_; Http::Context& http_context_; diff --git a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h index a4bc92897110..a37c5b9006a8 100644 --- a/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h +++ b/source/extensions/filters/http/ip_tagging/ip_tagging_filter.h @@ -36,7 +36,6 @@ class IpTaggingFilterConfig { Runtime::Loader& runtime); Runtime::Loader& runtime() { return runtime_; } - Stats::Scope& scope() { return scope_; } FilterRequestType requestType() const { return request_type_; } const Network::LcTrie::LcTrie& trie() const { return *trie_; } diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.h b/source/extensions/filters/network/dubbo_proxy/decoder.h index 2723633c79a6..d180ba13e4e0 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.h +++ b/source/extensions/filters/network/dubbo_proxy/decoder.h @@ -91,11 +91,6 @@ class DecoderStateMachine : public Logger::Loggable { */ ProtocolState currentState() const { return state_; } - /** - * Set the current state. Used for testing only. - */ - void setCurrentState(ProtocolState state) { state_ = state; } - private: struct DecoderStatus { DecoderStatus() = default; diff --git a/source/extensions/filters/network/mongo_proxy/mongo_stats.h b/source/extensions/filters/network/mongo_proxy/mongo_stats.h index 3571c19bbca2..b19561df6788 100644 --- a/source/extensions/filters/network/mongo_proxy/mongo_stats.h +++ b/source/extensions/filters/network/mongo_proxy/mongo_stats.h @@ -32,8 +32,6 @@ class MongoStats { return stat_name_set_->getBuiltin(str, fallback); } - Stats::SymbolTable& symbolTable() { return scope_.symbolTable(); } - private: Stats::ElementVec addPrefix(const Stats::ElementVec& names); diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h b/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h index 776218be0c51..6f4ae5239e1f 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h @@ -60,8 +60,6 @@ class CommandResponse : public MySQLCodec { int parseMessage(Buffer::Instance&, uint32_t) override { return MYSQL_SUCCESS; } std::string encode() override { return ""; } - uint16_t getServerStatus() const { return server_status_; } - uint16_t getWarnings() const { return warnings_; } void setServerStatus(uint16_t status); void setWarnings(uint16_t warnings); diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h b/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h index 5ed3d70c9c35..7b26ad7bcf27 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h @@ -14,7 +14,6 @@ class ClientSwitchResponse : public MySQLCodec { int parseMessage(Buffer::Instance& buffer, uint32_t len) override; std::string encode() override; - const std::string& getAuthPluginResp() const { return auth_plugin_resp_; } void setAuthPluginResp(std::string& auth_swith_resp); private: diff --git a/source/extensions/filters/network/redis_proxy/config.h b/source/extensions/filters/network/redis_proxy/config.h index 521ae76a9d9f..e13d0cda331e 100644 --- a/source/extensions/filters/network/redis_proxy/config.h +++ b/source/extensions/filters/network/redis_proxy/config.h @@ -26,20 +26,16 @@ class ProtocolOptionsConfigImpl : public Upstream::ProtocolOptionsConfig { proto_config) : auth_password_(proto_config.auth_password()) {} - std::string auth_password(Api::Api& api) const { + std::string authPassword(Api::Api& api) const { return Config::DataSource::read(auth_password_, true, api); } - const envoy::config::core::v3::DataSource& auth_password_datasource() const { - return auth_password_; - } - - static const std::string auth_password(const Upstream::ClusterInfoConstSharedPtr info, - Api::Api& api) { + static const std::string authPassword(const Upstream::ClusterInfoConstSharedPtr info, + Api::Api& api) { auto options = info->extensionProtocolOptionsTyped( NetworkFilterNames::get().RedisProxy); if (options) { - return options->auth_password(api); + return options->authPassword(api); } return EMPTY_STRING; } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index fdc70b71ec99..9bc15b0f14a6 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -74,7 +74,7 @@ InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Disp cluster_update_handle_ = parent_.cm_.addThreadLocalClusterUpdateCallbacks(*this); Upstream::ThreadLocalCluster* cluster = parent_.cm_.get(cluster_name_); if (cluster != nullptr) { - auth_password_ = ProtocolOptionsConfigImpl::auth_password(cluster->info(), parent_.api_); + auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent_.api_); onClusterAddOrUpdateNonVirtual(*cluster); } } @@ -102,7 +102,7 @@ void InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual( if (cluster_ != nullptr) { // Treat an update as a removal followed by an add. - onClusterRemoval(cluster_name_); + ThreadLocalPool::onClusterRemoval(cluster_name_); } ASSERT(cluster_ == nullptr); diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc index d9eeceb12098..115901dd544e 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc @@ -15,11 +15,10 @@ namespace SniDynamicForwardProxy { ProxyFilterConfig::ProxyFilterConfig( const FilterConfig& proto_config, Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactory& cache_manager_factory, - Upstream::ClusterManager& cluster_manager) + Upstream::ClusterManager&) : port_(static_cast(proto_config.port_value())), dns_cache_manager_(cache_manager_factory.get()), - dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())), - cluster_manager_(cluster_manager) {} + dns_cache_(dns_cache_manager_->getCache(proto_config.dns_cache_config())) {} ProxyFilter::ProxyFilter(ProxyFilterConfigSharedPtr config) : config_(std::move(config)) {} diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h index 49f66aba3fb6..e171cb3b0cb5 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h @@ -24,14 +24,12 @@ class ProxyFilterConfig { Upstream::ClusterManager& cluster_manager); Extensions::Common::DynamicForwardProxy::DnsCache& cache() { return *dns_cache_; } - Upstream::ClusterManager& clusterManager() { return cluster_manager_; } uint32_t port() { return port_; } private: const uint32_t port_; const Extensions::Common::DynamicForwardProxy::DnsCacheManagerSharedPtr dns_cache_manager_; const Extensions::Common::DynamicForwardProxy::DnsCacheSharedPtr dns_cache_; - Upstream::ClusterManager& cluster_manager_; }; using ProxyFilterConfigSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h index 5226c0356a6d..90a244ca78e0 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h @@ -38,7 +38,6 @@ class Config { const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint32_t stage() const { return stage_; } - Stats::Scope& scope() { return scope_; } Runtime::Loader& runtime() { return runtime_; } Upstream::ClusterManager& cm() { return cm_; } bool failureModeAllow() const { return !failure_mode_deny_; }; diff --git a/source/extensions/filters/network/thrift_proxy/header_transport_impl.h b/source/extensions/filters/network/thrift_proxy/header_transport_impl.h index c2d97ead1607..e7eb6d3e18ac 100644 --- a/source/extensions/filters/network/thrift_proxy/header_transport_impl.h +++ b/source/extensions/filters/network/thrift_proxy/header_transport_impl.h @@ -45,15 +45,6 @@ class HeaderTransportImpl : public Transport { const char* desc); static void writeVarString(Buffer::Instance& buffer, const absl::string_view str); - void setException(AppExceptionType type, std::string reason) { - if (exception_.has_value()) { - return; - } - - exception_ = type; - exception_reason_ = reason; - } - absl::optional exception_; std::string exception_reason_; }; diff --git a/source/extensions/filters/network/thrift_proxy/protocol_converter.h b/source/extensions/filters/network/thrift_proxy/protocol_converter.h index 47ab48ac8204..2d73f4c9498b 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol_converter.h +++ b/source/extensions/filters/network/thrift_proxy/protocol_converter.h @@ -122,9 +122,6 @@ class ProtocolConverter : public virtual DecoderEventHandler { return FilterStatus::Continue; } -protected: - ProtocolType protocolType() const { return proto_->type(); } - private: Protocol* proto_; Buffer::Instance* buffer_{}; diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index c41793a8066c..26a94c90c753 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -127,8 +127,6 @@ class MethodNameRouteEntryImpl : public RouteEntryImplBase { MethodNameRouteEntryImpl( const envoy::extensions::filters::network::thrift_proxy::v3::Route& route); - const std::string& methodName() const { return method_name_; } - // RouteEntryImplBase RouteConstSharedPtr matches(const MessageMetadata& metadata, uint64_t random_value) const override; @@ -143,8 +141,6 @@ class ServiceNameRouteEntryImpl : public RouteEntryImplBase { ServiceNameRouteEntryImpl( const envoy::extensions::filters::network::thrift_proxy::v3::Route& route); - const std::string& serviceName() const { return service_name_; } - // RouteEntryImplBase RouteConstSharedPtr matches(const MessageMetadata& metadata, uint64_t random_value) const override; diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index 248af66a0f59..c8d73086c85b 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -48,16 +48,6 @@ class DnsFilterEnvoyConfig { Server::Configuration::ListenerFactoryContext& context, const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); - DnsFilterStats& stats() const { return stats_; } - const DnsVirtualDomainConfig& domains() const { return virtual_domains_; } - const std::vector& knownSuffixes() const { return known_suffixes_; } - const absl::flat_hash_map& domainTtl() const { - return domain_ttl_; - } - const AddressConstPtrVec& resolvers() const { return resolvers_; } - bool forwardQueries() const { return forward_queries_; } - const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } - private: static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { const auto final_prefix = absl::StrCat("dns_filter.", stat_prefix); diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h index 6b41321561d2..3099fb66694d 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.h +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -149,11 +149,6 @@ class DnsMessageParser : public Logger::Loggable { */ DnsQueryRecordPtr parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); - /** - * @return uint16_t the response code flag value from a parsed dns object - */ - uint16_t getQueryResponseCode() { return static_cast(header_.flags.rcode); } - /** * @brief Create a context object for handling a DNS Query * diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index 738092a00a14..d508193445da 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -19,7 +19,7 @@ RedisHealthChecker::RedisHealthChecker( Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()), - auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::auth_password( + auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword( cluster.info(), api)) { if (!key_.empty()) { type_ = Type::Exists; diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.cc b/source/extensions/tracers/datadog/datadog_tracer_impl.cc index ef99f50664e7..8974471790f7 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.cc +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.cc @@ -23,11 +23,11 @@ Driver::TlsTracer::TlsTracer(const std::shared_ptr& tracer, Driver::Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, - ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime) + ThreadLocal::SlotAllocator& tls, Runtime::Loader&) : OpenTracingDriver{scope}, cm_(cluster_manager), tracer_stats_{DATADOG_TRACER_STATS( POOL_COUNTER_PREFIX(scope, "tracing.datadog."))}, - tls_(tls.allocateSlot()), runtime_(runtime) { + tls_(tls.allocateSlot()) { Config::Utility::checkCluster(TracerNames::get().Datadog, datadog_config.collector_cluster(), cm_, /* allow_added_via_api */ true); diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.h b/source/extensions/tracers/datadog/datadog_tracer_impl.h index 5cdb482543bc..87c48ffd2eb0 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.h +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.h @@ -52,9 +52,7 @@ class Driver : public Common::Ot::OpenTracingDriver { // Getters to return the DatadogDriver's key members. Upstream::ClusterManager& clusterManager() { return cm_; } const std::string& cluster() { return cluster_; } - Runtime::Loader& runtime() { return runtime_; } DatadogTracerStats& tracerStats() { return tracer_stats_; } - const datadog::opentracing::TracerOptions& tracerOptions() { return tracer_options_; } // Tracer::OpenTracingDriver opentracing::Tracer& tracer() override; @@ -80,7 +78,6 @@ class Driver : public Common::Ot::OpenTracingDriver { DatadogTracerStats tracer_stats_; datadog::opentracing::TracerOptions tracer_options_; ThreadLocal::SlotPtr tls_; - Runtime::Loader& runtime_; }; /** diff --git a/source/extensions/tracers/zipkin/span_context_extractor.h b/source/extensions/tracers/zipkin/span_context_extractor.h index e48939b24708..425a0d59973c 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.h +++ b/source/extensions/tracers/zipkin/span_context_extractor.h @@ -14,7 +14,6 @@ class SpanContext; struct ExtractorException : public EnvoyException { ExtractorException(const std::string& what) : EnvoyException(what) {} - ExtractorException(const ExtractorException& ex) : EnvoyException(ex.what()) {} }; /** diff --git a/source/extensions/tracers/zipkin/tracer.h b/source/extensions/tracers/zipkin/tracer.h index d51e0645844a..74f04bf6c919 100644 --- a/source/extensions/tracers/zipkin/tracer.h +++ b/source/extensions/tracers/zipkin/tracer.h @@ -95,16 +95,6 @@ class Tracer : public TracerInterface { */ void reportSpan(Span&& span) override; - /** - * @return the service-name attribute associated with the Tracer. - */ - const std::string& serviceName() const { return service_name_; } - - /** - * @return the pointer to the address object associated with the Tracer. - */ - const Network::Address::InstanceConstSharedPtr address() const { return address_; } - /** * Associates a Reporter object with this Tracer. * @@ -112,11 +102,6 @@ class Tracer : public TracerInterface { */ void setReporter(ReporterPtr reporter); - /** - * @return the random-number generator associated with the Tracer. - */ - Runtime::RandomGenerator& randomGenerator() { return random_generator_; } - private: const std::string service_name_; Network::Address::InstanceConstSharedPtr address_; diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 2ad37379e957..6160c0f4f87c 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -347,10 +347,6 @@ class ListenerImpl final : public Network::ListenerConfig, void buildProxyProtocolListenerFilter(); void buildTlsInspectorListenerFilter(); - void addListenSocketOption(const Network::Socket::OptionConstSharedPtr& option) { - ensureSocketOptions(); - listen_socket_options_->emplace_back(std::move(option)); - } void addListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { ensureSocketOptions(); Network::Socket::appendOptions(listen_socket_options_, options); From a250a871571f18dffd13732da83cbd4a677b1f03 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 7 May 2020 15:36:25 -0400 Subject: [PATCH 111/909] api: drop_overload is not yet implemented in Envoy. (#11104) This was added before we started annotating fields missing implementations. Fixed with a [#not-implemented-hide:] annotations for now, the plan is to move things like this to https://github.com/envoyproxy/envoy/issues/11085 when it lands. Signed-off-by: Harvey Tuch --- api/envoy/api/v2/endpoint.proto | 2 ++ api/envoy/config/endpoint/v3/endpoint.proto | 2 ++ generated_api_shadow/envoy/api/v2/endpoint.proto | 2 ++ generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto | 2 ++ 4 files changed, 8 insertions(+) diff --git a/api/envoy/api/v2/endpoint.proto b/api/envoy/api/v2/endpoint.proto index e233b0e7d34e..92a2b13a8947 100644 --- a/api/envoy/api/v2/endpoint.proto +++ b/api/envoy/api/v2/endpoint.proto @@ -36,6 +36,7 @@ message ClusterLoadAssignment { // Load balancing policy settings. // [#next-free-field: 6] message Policy { + // [#not-implemented-hide:] message DropOverload { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -65,6 +66,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto index 008b4ddc4993..63869fafcb54 100644 --- a/api/envoy/config/endpoint/v3/endpoint.proto +++ b/api/envoy/config/endpoint/v3/endpoint.proto @@ -40,6 +40,7 @@ message ClusterLoadAssignment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy"; + // [#not-implemented-hide:] message DropOverload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; @@ -74,6 +75,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/generated_api_shadow/envoy/api/v2/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint.proto index e233b0e7d34e..92a2b13a8947 100644 --- a/generated_api_shadow/envoy/api/v2/endpoint.proto +++ b/generated_api_shadow/envoy/api/v2/endpoint.proto @@ -36,6 +36,7 @@ message ClusterLoadAssignment { // Load balancing policy settings. // [#next-free-field: 6] message Policy { + // [#not-implemented-hide:] message DropOverload { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -65,6 +66,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto index a65db5e7d7d8..e34b07619ab0 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto @@ -40,6 +40,7 @@ message ClusterLoadAssignment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy"; + // [#not-implemented-hide:] message DropOverload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; @@ -72,6 +73,7 @@ message ClusterLoadAssignment { // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. + // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this From b16981f8d4f4726d5f8d31b55603f893fbba5575 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 7 May 2020 16:38:55 -0400 Subject: [PATCH 112/909] api: introduce TypedExtensionConfig. (#11105) A common wrapper for name/Any that should be used for all new extensions throughout the API. I've left a note that we need to revisit existing typed_config at the next major version as well, since that would be a breaking change. Signed-off-by: Harvey Tuch --- api/envoy/config/core/v3/extension.proto | 30 ++++++++++++++++ api/envoy/config/core/v4alpha/extension.proto | 34 +++++++++++++++++++ .../common_messages/common_messages.rst | 1 + .../envoy/config/core/v3/extension.proto | 30 ++++++++++++++++ .../envoy/config/core/v4alpha/extension.proto | 34 +++++++++++++++++++ 5 files changed, 129 insertions(+) create mode 100644 api/envoy/config/core/v3/extension.proto create mode 100644 api/envoy/config/core/v4alpha/extension.proto create mode 100644 generated_api_shadow/envoy/config/core/v3/extension.proto create mode 100644 generated_api_shadow/envoy/config/core/v4alpha/extension.proto diff --git a/api/envoy/config/core/v3/extension.proto b/api/envoy/config/core/v3/extension.proto new file mode 100644 index 000000000000..636398760785 --- /dev/null +++ b/api/envoy/config/core/v3/extension.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} diff --git a/api/envoy/config/core/v4alpha/extension.proto b/api/envoy/config/core/v4alpha/extension.proto new file mode 100644 index 000000000000..52ae2a143b49 --- /dev/null +++ b/api/envoy/config/core/v4alpha/extension.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TypedExtensionConfig"; + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index 6e3a5ed33f88..0d88c7715a6a 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -6,6 +6,7 @@ Common messages :maxdepth: 2 ../config/core/v3/base.proto + ../config/core/v3/extension.proto ../config/core/v3/address.proto ../config/core/v3/backoff.proto ../config/core/v3/protocol.proto diff --git a/generated_api_shadow/envoy/config/core/v3/extension.proto b/generated_api_shadow/envoy/config/core/v3/extension.proto new file mode 100644 index 000000000000..636398760785 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/extension.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto new file mode 100644 index 000000000000..52ae2a143b49 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.TypedExtensionConfig"; + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, + // the inner type URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} From e6e37da3d9854410cfc2b46f193b265563ee5580 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 7 May 2020 16:39:47 -0400 Subject: [PATCH 113/909] docs: some additional v3 FAQ entries. (#11103) * Elaborate on what we gained from v3. * Explain how to rollout control plane changes. Signed-off-by: Harvey Tuch --- docs/root/faq/api/control_plane.rst | 5 ++- .../faq/api/control_plane_version_support.rst | 36 +++++++++++++++++++ docs/root/faq/api/why_versioning.rst | 21 +++++++++++ docs/root/faq/overview.rst | 1 + 4 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 docs/root/faq/api/control_plane_version_support.rst diff --git a/docs/root/faq/api/control_plane.rst b/docs/root/faq/api/control_plane.rst index bc717e393913..c489eab5648c 100644 --- a/docs/root/faq/api/control_plane.rst +++ b/docs/root/faq/api/control_plane.rst @@ -1,9 +1,12 @@ +.. _control_plane: + How do I support multiple xDS API major versions in my control plane? ===================================================================== Where possible, it is highly recommended that control planes support a single major version at a given point in time for simplicity. This works in situations where control planes need to only -support a window of Envoy versions which spans less than a year. +support a window of Envoy versions which spans less than a year. Temporary support for multiple +versions during rollout in this scenario is described :ref:`here `. For control planes that need to support a wider range of versions, there are a few approaches: diff --git a/docs/root/faq/api/control_plane_version_support.rst b/docs/root/faq/api/control_plane_version_support.rst new file mode 100644 index 000000000000..599ec8d7d8d8 --- /dev/null +++ b/docs/root/faq/api/control_plane_version_support.rst @@ -0,0 +1,36 @@ +.. _control_plane_version_support: + +Which xDS transport and resource versions does my control plane need to support? +================================================================================ + +If a control plane is serving a well known set of clients at a given API major version, it only +needs to support that version (both transport and resource version). However, even in this +relatively basic scenario, if the set of clients straddles a major version drop or the control plane +wishes to move from v2/v3, there are considerations around rollout of client and server binaries. + +One approach to this problem is to add temporary support to the management server for both v2 and v3 +transport versions (see https://github.com/envoyproxy/go-control-plane). For resources, messages +are binary compatible modulo deprecated or new fields between API major versions. If the control +plane no longer emits resources with deprecated fields, this allows for a trivial replacement of +type URL based on the requested resource from the client to serve the same resource for v2 and v3. A +typical rollout sequence might look like: + +1. Clients with a mix of v2 and v3 support are in operation, with a v2 management server. The + client bootstraps will reference v2 API transport endpoints. + +2. A management server with dual v2/v3 API support is rolled out. Both v2 and v3 transport endpoints + are supported, while a trivial type URL replacement in the returned resource is sufficient for + matching the requested v2 or v3 resource type URL with the existing v2 resource in the control + plane. When returning resources with embedded `ConfigSource` messages pointing at xDS resources + for a v3 request, it will be necessary to set the `transport_api_version` and + `resource_api_version` to v3. No deprecated v2 fields or new v3 fields can be used at this point. + +3. Client bootstraps are upgraded to v3 API transport endpoints and v3 API resource versions. + +4. Support for v2 is removed in the management server. The management server moves to v3 exclusively + internally and can support newer fields. + +If you are operating a managed control plane as-a-service, you will likely need to support a wide +range of client versions. In this scenario, you will require long term support for multiple major +API transport and resource versions. Strategies for managing this support are described :ref:`here +`. diff --git a/docs/root/faq/api/why_versioning.rst b/docs/root/faq/api/why_versioning.rst index 5a0b027e8bf6..917a16ae2afe 100644 --- a/docs/root/faq/api/why_versioning.rst +++ b/docs/root/faq/api/why_versioning.rst @@ -12,3 +12,24 @@ We had previously put in place policies around :repo:`breaking changes ` takes this a step further, articulating a guaranteed multi-year support window for APIs that provides control plane authors a predictable clock when considering support for a range of Envoy versions. + +For the v3 xDS APIs, a brief list of the key improvements that were made with a clean break from v2: + +* Packages organization was improved to reflect a more logical grouping of related APIs: + + - The legacy `envoy.api.v2` tree was eliminated, with protos moved to their logical groupings, + e.g. `envoy.config.core.v3`, `envoy.server.listener.v3`. + - All packages are now versioned with a `vN` at the end. This allows for type-level identification + of major version. + - xDS service endpoints/transport and configuration are split between `envoy.service` and + `envoy.config`. + - Extensions now reflect the Envoy source tree layout under `envoy.extensions`. +* `std::regex` regular expressions were dropped from the API, in favor of RE2. The former have dangerous + security implications. +* `google.protobug.Struct` configuration of extensions was dropped from the API, in favor of + typed configuration. This provides for better support for multiple instances of extensions, e.g. + in filter chains, and more flexible naming of extension instances. +* Over 60 deprecated fields were removed from the API. +* Tooling and processes were established for API versioning support. This has now been reflected in + the bootstrap `Node`, providing a long term notion of API support that control planes can depend + upon for client negotiation. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index c3a32d963c52..d2f21b70a333 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -21,6 +21,7 @@ API api/envoy_v3 api/envoy_upgrade_v3 api/extensions + api/control_plane_version_support api/control_plane api/package_naming api/why_versioning From 10c755e9d9b8acd7cf1702a4f49dbcbdf0696198 Mon Sep 17 00:00:00 2001 From: Craig Radcliffe Date: Thu, 7 May 2020 23:24:21 -0400 Subject: [PATCH 114/909] http: Introduce preserve_upstream_date option (#11077) The preserve_upstream_date option allows the HTTP Connection Manager to be configured to pass through the original date header from the upstream response rather than overwriting it. The default behaviour for the date response header remains the same as before -- the header value will be overwritten by Envoy. Signed-off-by: Craig Radcliffe --- .../v3/http_connection_manager.proto | 7 ++- .../v4alpha/http_connection_manager.proto | 7 ++- .../v3/http_connection_manager.proto | 7 ++- .../v4alpha/http_connection_manager.proto | 7 ++- source/common/http/conn_manager_config.h | 6 +++ source/common/http/conn_manager_impl.cc | 4 +- .../network/http_connection_manager/config.cc | 3 +- .../network/http_connection_manager/config.h | 2 + source/server/http/admin.h | 1 + .../http/conn_manager_impl_fuzz_test.cc | 1 + test/common/http/conn_manager_impl_test.cc | 52 +++++++++++++++++++ test/common/http/conn_manager_utility_test.cc | 1 + .../http_connection_manager/config_test.cc | 33 ++++++++++++ 13 files changed, 125 insertions(+), 6 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index a3dcaa2f815f..92b2d8b9eb7f 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 38] +// [#next-free-field: 39] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -506,6 +506,11 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream + // host will not be overwritten by the HTTP Connection Manager. The default behaviour is + // to overwrite the `date` header unconditionally. + bool preserve_upstream_date = 38; } message Rds { diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 860a951b90f6..8e16ecd6d1ca 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 38] +// [#next-free-field: 39] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -506,6 +506,11 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream + // host will not be overwritten by the HTTP Connection Manager. The default behaviour is + // to overwrite the `date` header unconditionally. + bool preserve_upstream_date = 38; } message Rds { diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index d3f5fb927ffa..3ca02df7b37c 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 38] +// [#next-free-field: 39] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -509,6 +509,11 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream + // host will not be overwritten by the HTTP Connection Manager. The default behaviour is + // to overwrite the `date` header unconditionally. + bool preserve_upstream_date = 38; + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 860a951b90f6..8e16ecd6d1ca 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 38] +// [#next-free-field: 39] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -506,6 +506,11 @@ message HttpConnectionManager { // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + + // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream + // host will not be overwritten by the HTTP Connection Manager. The default behaviour is + // to overwrite the `date` header unconditionally. + bool preserve_upstream_date = 38; } message Rds { diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 67c4a74ca63f..63b1b9bbb30c 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -430,6 +430,12 @@ class ConnectionManagerConfig { */ virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const PURE; + + /** + * @return if the HttpConnectionManager should preserve the `date` response header sent by the + * upstream host. + */ + virtual bool shouldPreserveUpstreamDate() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index eed3c641bf82..f5187fdc129d 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1620,7 +1620,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream) { // Base headers. - connection_manager_.config_.dateProvider().setDateHeader(headers); + if (!connection_manager_.config_.shouldPreserveUpstreamDate() || !headers.Date()) { + connection_manager_.config_.dateProvider().setDateHeader(headers); + } // Following setReference() is safe because serverName() is constant for the life of the listener. const auto transformation = connection_manager_.config_.serverHeaderTransformation(); if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 7a9f7b2efc58..b2ca593b1cb4 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -217,7 +217,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( #endif merge_slashes_(config.merge_slashes()), headers_with_underscores_action_( - config.common_http_protocol_options().headers_with_underscores_action()) { + config.common_http_protocol_options().headers_with_underscores_action()), + preserve_upstream_date_(config.preserve_upstream_date()) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 6f3995fc30fd..35a179c32d03 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -162,6 +162,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, return headers_with_underscores_action_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } + bool shouldPreserveUpstreamDate() const override { return preserve_upstream_date_; } private: enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO }; @@ -226,6 +227,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, const bool merge_slashes_; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; + const bool preserve_upstream_date_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 15946837b3ff..1f69acdb120f 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -168,6 +168,7 @@ class AdminImpl : public Admin, headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + bool shouldPreserveUpstreamDate() const override { return false; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void closeSocket(); diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index bd72be7f7ff1..8f878f625b30 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -160,6 +160,7 @@ class FuzzConfig : public ConnectionManagerConfig { headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + bool shouldPreserveUpstreamDate() const override { return false; } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 89ba46e56df4..df03a23bb0f4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -353,6 +353,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan headersWithUnderscoresAction() const override { return headers_with_underscores_action_; } + bool shouldPreserveUpstreamDate() const override { return preserve_upstream_date_; } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; @@ -414,6 +415,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests RequestIDExtensionSharedPtr request_id_extension_; + bool preserve_upstream_date_ = false; // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. MockResponseEncoder response_encoder_; @@ -897,6 +899,56 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { conn_manager_->onData(fake_input, false); } +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + preserve_upstream_date_ = false; + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) { + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + preserve_upstream_date_ = false; + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_NE(expected_date, modified_headers->Date()->value().getStringView()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateNotSet) { + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + preserve_upstream_date_ = true; + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + preserve_upstream_date_ = true; + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); +} + TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { setup(false, ""); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 2b7d445e8bb7..2aaf846bbc12 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -138,6 +138,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(bool, shouldMergeSlashes, (), (const)); MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, headersWithUnderscoresAction, (), (const)); + MOCK_METHOD(bool, shouldPreserveUpstreamDate, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index e6fa79671b60..b6bcef8241d1 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1080,6 +1080,39 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { EXPECT_EQ(0, config.requestTimeout().count()); } +TEST_F(HttpConnectionManagerConfigTest, DisabledPreserveResponseDate) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + request_timeout: 0s + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_FALSE(config.shouldPreserveUpstreamDate()); +} + +TEST_F(HttpConnectionManagerConfigTest, EnabledPreserveResponseDate) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + request_timeout: 0s + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + preserve_upstream_date: true + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_TRUE(config.shouldPreserveUpstreamDate()); +} + TEST_F(HttpConnectionManagerConfigTest, SingleDateProvider) { const std::string yaml_string = R"EOF( codec_type: http1 From bb125f27e05db64358a2a061e7427de27b3e5ec9 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 8 May 2020 13:54:42 -0400 Subject: [PATCH 115/909] test: switching to synthetic quit (#11106) Switching from sending /quitquitquit for shutdown to calling quit() directly. Signed-off-by: Alyssa Wilk --- test/integration/integration_test.cc | 5 +++++ test/integration/server.cc | 27 +++++++++++++++--------- test/integration/server.h | 31 +++++++++++++++------------- 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index ae11e48dafab..5f70de47465d 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1379,4 +1379,9 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } +TEST_P(IntegrationTest, QuitQuitQuit) { + initialize(); + test_server_->useAdminInterfaceToQuit(true); +} + } // namespace Envoy diff --git a/test/integration/server.cc b/test/integration/server.cc index 8bc2f9d96cd9..05f05a34a16f 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -218,18 +218,25 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( IntegrationTestServerImpl::~IntegrationTestServerImpl() { ENVOY_LOG(info, "stopping integration test server"); - Network::Address::InstanceConstSharedPtr admin_address(admin_address_); - admin_address_ = nullptr; + if (useAdminInterfaceToQuit()) { + Network::Address::InstanceConstSharedPtr admin_address(admin_address_); + if (admin_address != nullptr) { + BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( + admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + server_gone_.WaitForNotification(); + } + } else { + if (server_) { + server_->dispatcher().post([this]() { server_->shutdown(); }); + server_gone_.WaitForNotification(); + } + } + server_ = nullptr; + admin_address_ = nullptr; stat_store_ = nullptr; - - if (admin_address != nullptr) { - BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( - admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); - EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - server_gone_.WaitForNotification(); - } } } // namespace Envoy diff --git a/test/integration/server.h b/test/integration/server.h index 22f80a09b57b..007d4f5be817 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -300,36 +300,36 @@ class IntegrationTestServer : public Logger::Loggable, uint32_t concurrency); void waitForCounterEq(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterEq(stat_store(), name, value, time_system_); + TestUtility::waitForCounterEq(statStore(), name, value, time_system_); } void waitForCounterGe(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterGe(stat_store(), name, value, time_system_); + TestUtility::waitForCounterGe(statStore(), name, value, time_system_); } void waitForGaugeGe(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeGe(stat_store(), name, value, time_system_); + TestUtility::waitForGaugeGe(statStore(), name, value, time_system_); } void waitForGaugeEq(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeEq(stat_store(), name, value, time_system_); + TestUtility::waitForGaugeEq(statStore(), name, value, time_system_); } Stats::CounterSharedPtr counter(const std::string& name) override { // When using the thread local store, only counters() is thread safe. This also allows us // to test if a counter exists at all versus just defaulting to zero. - return TestUtility::findCounter(stat_store(), name); + return TestUtility::findCounter(statStore(), name); } Stats::GaugeSharedPtr gauge(const std::string& name) override { // When using the thread local store, only gauges() is thread safe. This also allows us // to test if a counter exists at all versus just defaulting to zero. - return TestUtility::findGauge(stat_store(), name); + return TestUtility::findGauge(statStore(), name); } - std::vector counters() override { return stat_store().counters(); } + std::vector counters() override { return statStore().counters(); } - std::vector gauges() override { return stat_store().gauges(); } + std::vector gauges() override { return statStore().gauges(); } // ListenerHooks void onWorkerListenerAdded() override; @@ -347,8 +347,10 @@ class IntegrationTestServer : public Logger::Loggable, // Should not be called until createAndRunEnvoyServer() is called. virtual Server::Instance& server() PURE; - virtual Stats::Store& stat_store() PURE; - virtual Network::Address::InstanceConstSharedPtr admin_address() PURE; + virtual Stats::Store& statStore() PURE; + virtual Network::Address::InstanceConstSharedPtr adminAddress() PURE; + void useAdminInterfaceToQuit(bool use) { use_admin_interface_to_quit_ = use; } + bool useAdminInterfaceToQuit() { return use_admin_interface_to_quit_; } protected: IntegrationTestServer(Event::TestTimeSystem& time_system, Api::Api& api, @@ -356,7 +358,7 @@ class IntegrationTestServer : public Logger::Loggable, : time_system_(time_system), api_(api), config_path_(config_path) {} // Create the running envoy server. This function will call serverReady() when the virtual - // functions server(), stat_store(), and admin_address() may be called, but before the server + // functions server(), statStore(), and adminAddress() may be called, but before the server // has been started. // The subclass is also responsible for tearing down this server in its destructor. virtual void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system, @@ -367,7 +369,7 @@ class IntegrationTestServer : public Logger::Loggable, ProcessObjectOptRef process_object) PURE; // Will be called by subclass on server thread when the server is ready to be accessed. The - // server may not have been run yet, but all server access methods (server(), stat_store(), + // server may not have been run yet, but all server access methods (server(), statStore(), // adminAddress()) will be available. void serverReady(); @@ -392,6 +394,7 @@ class IntegrationTestServer : public Logger::Loggable, std::function on_worker_listener_removed_cb_; TcpDumpPtr tcp_dump_; std::function on_server_ready_cb_; + bool use_admin_interface_to_quit_{}; }; // Default implementation of IntegrationTestServer @@ -407,11 +410,11 @@ class IntegrationTestServerImpl : public IntegrationTestServer { RELEASE_ASSERT(server_ != nullptr, ""); return *server_; } - Stats::Store& stat_store() override { + Stats::Store& statStore() override { RELEASE_ASSERT(stat_store_ != nullptr, ""); return *stat_store_; } - Network::Address::InstanceConstSharedPtr admin_address() override { return admin_address_; } + Network::Address::InstanceConstSharedPtr adminAddress() override { return admin_address_; } private: void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system, From 111684faa1d28f34fb16af43911c575c6815c450 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Fri, 8 May 2020 10:56:21 -0700 Subject: [PATCH 116/909] conn_manager: allow to remove port from host header (#10960) add an api option/conn manager feature which would allow to remove port part from Host header (e.g. would transform example:443 to example. this would simplify domain's matching inside virtual host as well as would not require explicit matching on "domain:port" in upstream proxies. Signed-off-by: Nikita V. Shirokov --- .../v3/http_connection_manager.proto | 10 ++- .../v4alpha/http_connection_manager.proto | 10 ++- docs/root/version_history/current.rst | 1 + .../v3/http_connection_manager.proto | 10 ++- .../v4alpha/http_connection_manager.proto | 10 ++- source/common/http/conn_manager_config.h | 5 ++ source/common/http/conn_manager_impl.cc | 11 +++ source/common/http/conn_manager_impl.h | 5 +- source/common/http/conn_manager_utility.cc | 9 +++ source/common/http/conn_manager_utility.h | 3 + source/common/http/header_utility.cc | 33 ++++++++ source/common/http/header_utility.h | 5 ++ .../network/http_connection_manager/config.cc | 1 + .../network/http_connection_manager/config.h | 2 + source/server/http/admin.h | 1 + .../http/conn_manager_impl_fuzz_test.cc | 1 + test/common/http/conn_manager_impl_test.cc | 78 ++++++++++++++++++- test/common/http/conn_manager_utility_test.cc | 12 +++ test/common/http/header_utility_test.cc | 50 ++++++++++++ test/common/http/path_utility_test.cc | 4 + .../http_connection_manager/config_test.cc | 50 ++++++++++++ 21 files changed, 305 insertions(+), 6 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 92b2d8b9eb7f..a236c5c47743 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 39] +// [#next-free-field: 40] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -511,6 +511,14 @@ message HttpConnectionManager { // host will not be overwritten by the HTTP Connection Manager. The default behaviour is // to overwrite the `date` header unconditionally. bool preserve_upstream_date = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec ` and is provided for convenience. + bool strip_matching_host_port = 39; } message Rds { diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 8e16ecd6d1ca..6e26aa2d1f98 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 39] +// [#next-free-field: 40] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -511,6 +511,14 @@ message HttpConnectionManager { // host will not be overwritten by the HTTP Connection Manager. The default behaviour is // to overwrite the `date` header unconditionally. bool preserve_upstream_date = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec ` and is provided for convenience. + bool strip_matching_host_port = 39; } message Rds { diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a8b6e0f694c3..9eb566ccd65f 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -19,6 +19,7 @@ Changes `google.api.HttpBody `_. * gzip filter: added option to set zlib's next output buffer size. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`stripping port from host header ` support. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 3ca02df7b37c..55f4f5fe819d 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 39] +// [#next-free-field: 40] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -514,6 +514,14 @@ message HttpConnectionManager { // to overwrite the `date` header unconditionally. bool preserve_upstream_date = 38; + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec ` and is provided for convenience. + bool strip_matching_host_port = 39; + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 8e16ecd6d1ca..6e26aa2d1f98 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -30,7 +30,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 39] +// [#next-free-field: 40] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -511,6 +511,14 @@ message HttpConnectionManager { // host will not be overwritten by the HTTP Connection Manager. The default behaviour is // to overwrite the `date` header unconditionally. bool preserve_upstream_date = 38; + + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port and request method is not CONNECT. This affects the upstream host header as well. + // Without setting this option, incoming requests with host `example:443` will not match against + // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // of `HTTP spec ` and is provided for convenience. + bool strip_matching_host_port = 39; } message Rds { diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 63b1b9bbb30c..993fa7d55a3e 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -424,6 +424,11 @@ class ConnectionManagerConfig { */ virtual bool shouldMergeSlashes() const PURE; + /** + * @return if the HttpConnectionManager should remove the port from host/authority header + */ + virtual bool shouldStripMatchingPort() const PURE; + /** * @return the action HttpConnectionManager should take when receiving client request * headers containing underscore characters. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index f5187fdc129d..17e6db543c24 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -747,6 +747,14 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { return &connection_manager_.read_callbacks_->connection(); } +uint32_t ConnectionManagerImpl::ActiveStream::localPort() { + auto ip = connection()->localAddress()->ip(); + if (ip == nullptr) { + return 0; + } + return ip->port(); +} + // Ordering in this function is complicated, but important. // // We want to do minimal work before selecting route and creating a filter @@ -896,6 +904,9 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he return; } + ConnectionManagerUtility::maybeNormalizeHost(*request_headers_, connection_manager_.config_, + localPort()); + if (protocol == Protocol::Http11 && request_headers_->Connection() && absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), Http::Headers::get().ConnectionValues.Close)) { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index c33d752d38a4..8cb3cf6789e8 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -667,12 +667,15 @@ class ConnectionManagerImpl : Logger::Loggable, void onIdleTimeout(); // Reset per-stream idle timer. void resetIdleTimer(); - // Per-stream request timeout callback + // Per-stream request timeout callback. void onRequestTimeout(); // Per-stream alive duration reached. void onStreamMaxDurationReached(); bool hasCachedRoute() { return cached_route_.has_value() && cached_route_.value(); } + // Return local port of the connection. + uint32_t localPort(); + friend std::ostream& operator<<(std::ostream& os, const ActiveStream& s) { s.dumpState(os); return os; diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 88596ed4bcdf..2b1321afd5b8 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -9,6 +9,7 @@ #include "common/access_log/access_log_formatter.h" #include "common/common/empty_string.h" #include "common/common/utility.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" @@ -420,5 +421,13 @@ bool ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_head return is_valid_path; } +void ConnectionManagerUtility::maybeNormalizeHost(RequestHeaderMap& request_headers, + const ConnectionManagerConfig& config, + uint32_t port) { + if (config.shouldStripMatchingPort()) { + HeaderUtility::stripPortFromHost(request_headers, port); + } +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 9443b55276c3..377143bde4ee 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -69,6 +69,9 @@ class ConnectionManagerUtility { static bool maybeNormalizePath(RequestHeaderMap& request_headers, const ConnectionManagerConfig& config); + static void maybeNormalizeHost(RequestHeaderMap& request_headers, + const ConnectionManagerConfig& config, uint32_t port); + /** * Mutate request headers if request needs to be traced. */ diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 72560b73e076..303c07ffbf08 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -180,6 +180,39 @@ bool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) { internal_request_header->value() == Headers::get().EnvoyInternalRequestValues.True; } +void HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port) { + + if (headers.Method() && + headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect) { + // According to RFC 2817 Connect method should have port part in host header. + // In this case we won't strip it even if configured to do so. + return; + } + const auto original_host = headers.Host()->value().getStringView(); + const absl::string_view::size_type port_start = original_host.rfind(':'); + if (port_start == absl::string_view::npos) { + return; + } + // According to RFC3986 v6 address is always enclosed in "[]". section 3.2.2. + const auto v6_end_index = original_host.rfind("]"); + if (v6_end_index == absl::string_view::npos || v6_end_index < port_start) { + if ((port_start + 1) > original_host.size()) { + return; + } + const absl::string_view port_str = original_host.substr(port_start + 1); + uint32_t port = 0; + if (!absl::SimpleAtoi(port_str, &port)) { + return; + } + if (port != listener_port) { + // We would strip ports only if they are the same, as local port of the listener. + return; + } + const absl::string_view host = original_host.substr(0, port_start); + headers.setHost(host); + } +} + absl::optional> HeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) { // Make sure the host is valid. diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index b061a5a31d0e..69a879888bb5 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -135,6 +135,11 @@ class HeaderUtility { */ static absl::optional> requestHeadersValid(const RequestHeaderMap& headers); + + /** + * @brief Remove the port part from host/authority header if it is equal to provided port + */ + static void stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port); }; } // namespace Http } // namespace Envoy diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index b2ca593b1cb4..d842e84b46f9 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -216,6 +216,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( 0))), #endif merge_slashes_(config.merge_slashes()), + strip_matching_port_(config.strip_matching_host_port()), headers_with_underscores_action_( config.common_http_protocol_options().headers_with_underscores_action()), preserve_upstream_date_(config.preserve_upstream_date()) { diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 35a179c32d03..4a5d3f87db77 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -157,6 +157,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + bool shouldStripMatchingPort() const override { return strip_matching_port_; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return headers_with_underscores_action_; @@ -225,6 +226,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::chrono::milliseconds delayed_close_timeout_; const bool normalize_path_; const bool merge_slashes_; + const bool strip_matching_port_; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; const bool preserve_upstream_date_; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 1f69acdb120f..1abed8984b83 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -164,6 +164,7 @@ class AdminImpl : public Admin, const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return true; } bool shouldMergeSlashes() const override { return true; } + bool shouldStripMatchingPort() const override { return false; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 8f878f625b30..d6152bdbcd16 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -156,6 +156,7 @@ class FuzzConfig : public ConnectionManagerConfig { const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return false; } bool shouldMergeSlashes() const override { return false; } + bool shouldStripMatchingPort() const override { return false; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index df03a23bb0f4..27793279b03c 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -126,7 +126,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_connection_)); ON_CALL(Const(filter_callbacks_.connection_), ssl()).WillByDefault(Return(ssl_connection_)); filter_callbacks_.connection_.local_address_ = - std::make_shared("127.0.0.1"); + std::make_shared("127.0.0.1", 443); filter_callbacks_.connection_.remote_address_ = std::make_shared("0.0.0.0"); conn_manager_ = std::make_unique( @@ -348,6 +348,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } + bool shouldStripMatchingPort() const override { return strip_matching_port_; } RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const override { @@ -410,6 +411,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan Http::Http1Settings http1_settings_; bool normalize_path_ = false; bool merge_slashes_ = false; + bool strip_matching_port_ = false; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; NiceMock upstream_conn_; // for websocket tests @@ -899,6 +901,80 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { conn_manager_->onData(fake_input, false); } +// Filters observe host header w/o port's part when port's removal is configured +TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { + setup(false, ""); + // Enable port removal + strip_matching_port_ = true; + const std::string original_host = "host:443"; + const std::string normalized_host = "host"; + + auto* filter = new MockStreamFilter(); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{filter}); + })); + + EXPECT_CALL(*filter, decodeHeaders(_, true)) + .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus { + EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); + return FilterHeadersStatus::StopIteration; + })); + + EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +// The router observes host header w/o port, not the original host, when +// remove_port is configured +TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { + setup(false, ""); + // Enable port removal + strip_matching_port_ = true; + const std::string original_host = "host:443"; + const std::string normalized_host = "host"; + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", original_host}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + const std::string fake_cluster_name = "fake_cluster"; + + std::shared_ptr fake_cluster = + std::make_shared>(); + std::shared_ptr route = std::make_shared>(); + EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); + + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) + .WillOnce(Invoke( + [&](const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { + EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); + return route; + })); + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { setup(false, ""); setUpEncoderAndDecoder(false, false); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 2aaf846bbc12..9e39dfe30d79 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -136,6 +136,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(bool, shouldNormalizePath, (), (const)); MOCK_METHOD(bool, shouldMergeSlashes, (), (const)); + MOCK_METHOD(bool, shouldStripMatchingPort, (), (const)); MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, headersWithUnderscoresAction, (), (const)); MOCK_METHOD(bool, shouldPreserveUpstreamDate, (), (const)); @@ -1498,6 +1499,17 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashesWithoutNormalization) { EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz/../abc"); } +// maybeNormalizeHost() removes port part from host header. +TEST_F(ConnectionManagerUtilityTest, RemovePort) { + ON_CALL(config_, shouldStripMatchingPort()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl original_headers; + original_headers.setHost("host:443"); + + TestRequestHeaderMapImpl header_map(original_headers); + ConnectionManagerUtility::maybeNormalizeHost(header_map, config_, 443); + EXPECT_EQ(header_map.Host()->value().getStringView(), "host"); +} + // test preserve_external_request_id true does not reset the passed requestId if passed TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) { connection_.remote_address_ = std::make_shared("134.2.2.11"); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index e8383aa08be1..0006ff8ee917 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -20,6 +20,56 @@ envoy::config::route::v3::HeaderMatcher parseHeaderMatcherFromYaml(const std::st return header_matcher; } +class HeaderUtilityTest : public testing::Test { +public: + const HeaderEntry& hostHeaderEntry(const std::string& host_value, bool set_connect = false) { + headers_.setHost(host_value); + if (set_connect) { + headers_.setMethod(Http::Headers::get().MethodValues.Connect); + } + return *headers_.Host(); + } + RequestHeaderMapImpl headers_; +}; + +// Port's part from host header get removed +TEST_F(HeaderUtilityTest, RemovePortsFromHost) { + const std::vector> host_headers{ + {"localhost", "localhost"}, // w/o port part + {"localhost:443", "localhost"}, // name w/ port + {"", ""}, // empty + {":443", ""}, // just port + {"192.168.1.1", "192.168.1.1"}, // ipv4 + {"192.168.1.1:443", "192.168.1.1"}, // ipv4 w/ port + {"[fc00::1]:443", "[fc00::1]"}, // ipv6 w/ port + {"[fc00::1]", "[fc00::1]"}, // ipv6 + {":", ":"}, // malformed string #1 + {"]:", "]:"}, // malformed string #2 + {":abc", ":abc"}, // malformed string #3 + {"localhost:80", "localhost:80"}, // port not matching w/ hostname + {"192.168.1.1:80", "192.168.1.1:80"}, // port not matching w/ ipv4 + {"[fc00::1]:80", "[fc00::1]:80"} // port not matching w/ ipv6 + }; + + for (const auto& host_pair : host_headers) { + auto& host_header = hostHeaderEntry(host_pair.first); + HeaderUtility::stripPortFromHost(headers_, 443); + EXPECT_EQ(host_header.value().getStringView(), host_pair.second); + } +} + +// Port's part from host header won't be removed if method is "connect" +TEST_F(HeaderUtilityTest, RemovePortsFromHostConnect) { + const std::vector> host_headers{ + {"localhost:443", "localhost:443"}, + }; + for (const auto& host_pair : host_headers) { + auto& host_header = hostHeaderEntry(host_pair.first, true); + HeaderUtility::stripPortFromHost(headers_, 443); + EXPECT_EQ(host_header.value().getStringView(), host_pair.second); + } +} + TEST(HeaderDataConstructorTest, NoSpecifierSet) { const std::string yaml = R"EOF( name: test-header diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc index bcf93f76d349..946c8a8131af 100644 --- a/test/common/http/path_utility_test.cc +++ b/test/common/http/path_utility_test.cc @@ -18,6 +18,10 @@ class PathUtilityTest : public testing::Test { headers_.setPath(path_value); return *headers_.Path(); } + const HeaderEntry& hostHeaderEntry(const std::string& host_value) { + headers_.setHost(host_value); + return *headers_.Host(); + } RequestHeaderMapImpl headers_; }; diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index b6bcef8241d1..02f3cbea978c 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -978,6 +978,56 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) { EXPECT_FALSE(config.shouldMergeSlashes()); } +// Validated that by default we don't remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortDefault) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_FALSE(config.shouldStripMatchingPort()); +} + +// Validated that when configured, we remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortTrue) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + strip_matching_host_port: true + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_TRUE(config.shouldStripMatchingPort()); +} + +// Validated that when explicitly set false, we don't remove port. +TEST_F(HttpConnectionManagerConfigTest, RemovePortFalse) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + strip_matching_host_port: false + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + EXPECT_FALSE(config.shouldStripMatchingPort()); +} + // Validated that by default we allow requests with header names containing underscores. TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) { const std::string yaml_string = R"EOF( From 6a2c28a2383f47966e308536047de7f977d6450e Mon Sep 17 00:00:00 2001 From: Charles Strahan Date: Fri, 8 May 2020 13:02:24 -0500 Subject: [PATCH 117/909] Fix build system error introduced in #8596 (#9988) Resolves the build system error introduced in #8596 Signed-off-by: Charles Strahan --- source/server/BUILD | 86 +++++++++------------------------------------ test/server/BUILD | 9 ++--- 2 files changed, 19 insertions(+), 76 deletions(-) diff --git a/source/server/BUILD b/source/server/BUILD index 3611191f990a..ec35936335c7 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -161,9 +161,8 @@ envoy_cc_library( srcs = envoy_select_hot_restart(["hot_restarting_parent.cc"]), hdrs = envoy_select_hot_restart(["hot_restarting_parent.h"]), deps = [ - ":api_listener_lib", ":hot_restarting_base", - ":listener_lib", + ":listener_manager_lib", "//source/common/memory:stats_lib", "//source/common/stats:stat_merger_lib", "//source/common/stats:symbol_table_lib", @@ -275,77 +274,14 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "api_listener_lib", - srcs = [ - "api_listener_impl.cc", - ], - hdrs = [ - "api_listener_impl.h", - ], - deps = [ - ":drain_manager_lib", - ":filter_chain_manager_lib", - ":listener_manager_impl", - "//include/envoy/network:connection_interface", - "//include/envoy/server:api_listener_interface", - "//include/envoy/server:filter_config_interface", - "//include/envoy/server:listener_manager_interface", - "//source/common/common:empty_string", - "//source/common/http:conn_manager_lib", - "//source/common/init:manager_lib", - "//source/common/network:resolver_lib", - "//source/common/stream_info:stream_info_lib", - "//source/extensions/filters/network/http_connection_manager:config", - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "listener_lib", - srcs = [ - "listener_impl.cc", - ], - hdrs = [ - "listener_impl.h", - ], - deps = [ - ":configuration_lib", - ":drain_manager_lib", - ":filter_chain_manager_lib", - ":listener_manager_impl", - ":transport_socket_config_lib", - ":well_known_names_lib", - "//include/envoy/access_log:access_log_interface", - "//include/envoy/server:active_udp_listener_config_interface", - "//include/envoy/server:filter_config_interface", - "//include/envoy/server:listener_manager_interface", - "//include/envoy/server:transport_socket_config_interface", - "//source/common/access_log:access_log_lib", - "//source/common/config:utility_lib", - "//source/common/init:manager_lib", - "//source/common/init:target_lib", - "//source/common/network:connection_balancer_lib", - "//source/common/network:listen_socket_lib", - "//source/common/network:socket_option_factory_lib", - "//source/common/network:utility_lib", - "//source/common/protobuf:utility_lib", - "//source/extensions/filters/listener:well_known_names", - "//source/extensions/transport_sockets:well_known_names", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", - ], -) - # TODO(junr03): actually separate this lib from the listener and api listener lib. # this can be done if the parent_ in the listener and the api listener becomes the ListenerManager interface. # the issue right now is that the listener's reach into the listener manager's server_ instance variable. envoy_cc_library( - name = "listener_manager_impl", + name = "listener_manager_lib", srcs = [ + "api_listener_impl.cc", + "listener_impl.cc", "listener_manager_impl.cc", ], hdrs = [ @@ -360,24 +296,35 @@ envoy_cc_library( ":lds_api_lib", ":transport_socket_config_lib", ":well_known_names_lib", + "//include/envoy/access_log:access_log_interface", + "//include/envoy/network:connection_interface", "//include/envoy/server:active_udp_listener_config_interface", + "//include/envoy/server:api_listener_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/server:listener_manager_interface", "//include/envoy/server:transport_socket_config_interface", "//include/envoy/server:worker_interface", + "//source/common/access_log:access_log_lib", + "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/config:version_converter_lib", "//source/common/http:conn_manager_lib", "//source/common/init:manager_lib", + "//source/common/init:target_lib", + "//source/common/network:connection_balancer_lib", "//source/common/network:filter_matcher_lib", "//source/common/network:listen_socket_lib", + "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/listener:well_known_names", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets:well_known_names", "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -437,12 +384,11 @@ envoy_cc_library( ], deps = [ ":active_raw_udp_listener_config", - ":api_listener_lib", ":configuration_lib", ":connection_handler_lib", ":guarddog_lib", ":listener_hooks_lib", - ":listener_lib", + ":listener_manager_lib", ":ssl_context_manager_lib", ":worker_lib", "//include/envoy/event:dispatcher_interface", diff --git a/test/server/BUILD b/test/server/BUILD index bdab7e6a7e4c..7a6b145abd8f 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -20,8 +20,7 @@ envoy_cc_test( srcs = ["api_listener_test.cc"], deps = [ ":utility_lib", - "//source/server:api_listener_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:utility_lib", @@ -205,8 +204,7 @@ envoy_cc_test_library( data = ["//test/extensions/transport_sockets/tls/test_data:certs"], deps = [ "//source/common/init:manager_lib", - "//source/server:api_listener_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/init:init_mocks", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", @@ -290,9 +288,8 @@ envoy_cc_test( "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:config", "//source/extensions/transport_sockets/tls:ssl_socket_lib", - "//source/server:api_listener_lib", "//source/server:filter_chain_manager_lib", - "//source/server:listener_lib", + "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", From 49efb9841a58ebdc43a666f55c445911c8e4181c Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Fri, 8 May 2020 21:06:04 +0300 Subject: [PATCH 118/909] compressor: expose generic compressor filter to users (#10553) Currently the generic HTTP compressor filter isn't exposed to users even though it's used internally by `envoy.filters.http.gzip` and can be used by external filter extensions. Expose the compressor's config API to users. For example the filter can be configured as follows: ... filter_chains: filters: - name: envoy.http_connection_manager config: http_filters: - name: envoy.filters.http.compressor config: disable_on_etag_header: true content_length: 100 content_type: - text/html - application/json compressor_library: name: envoy.filters.http.compressor.gzip config: memory_level: 3 window_bits: 10 compression_level: best compression_strategy: rle ... Multiple compressor filters using different compressor libraries, e.g. gzip and brotli, can be stacked in one filter chain. Signed-off-by: Dmitry Rozhkov --- CODEOWNERS | 4 + api/BUILD | 1 + .../compression/gzip/compressor/v3/BUILD | 9 + .../compression/gzip/compressor/v3/gzip.proto | 79 +++++ .../http/compressor/v3/compressor.proto | 13 +- api/versioning/BUILD | 1 + .../api-v3/config/compression/compression.rst | 8 + docs/root/api-v3/config/config.rst | 1 + .../http/http_filters/compressor_filter.rst | 108 ++++++ .../http/http_filters/gzip_filter.rst | 5 + .../http/http_filters/http_filters.rst | 1 + docs/root/version_history/current.rst | 3 + .../compression/gzip/compressor/v3/BUILD | 9 + .../compression/gzip/compressor/v3/gzip.proto | 79 +++++ .../http/compressor/v3/compressor.proto | 13 +- include/envoy/compression/compressor/BUILD | 35 ++ .../{ => compression}/compressor/compressor.h | 4 + include/envoy/compression/compressor/config.h | 22 ++ .../envoy/compression/compressor/factory.h | 22 ++ include/envoy/compressor/BUILD | 17 - source/common/compressor/BUILD | 22 -- .../compression/common/compressor/BUILD | 19 ++ .../common/compressor/factory_base.h | 46 +++ .../compression/gzip/compressor/BUILD | 37 ++ .../compression/gzip/compressor/config.cc | 98 ++++++ .../compression/gzip/compressor/config.h | 71 ++++ .../gzip}/compressor/zlib_compressor_impl.cc | 13 +- .../gzip}/compressor/zlib_compressor_impl.h | 27 +- source/extensions/extensions_build_config.bzl | 7 + .../filters/http/common/compressor/BUILD | 3 +- .../http/common/compressor/compressor.cc | 5 +- .../http/common/compressor/compressor.h | 9 +- .../extensions/filters/http/compressor/BUILD | 39 +++ .../http/compressor/compressor_filter.cc | 26 ++ .../http/compressor/compressor_filter.h | 35 ++ .../filters/http/compressor/config.cc | 52 +++ .../filters/http/compressor/config.h | 33 ++ source/extensions/filters/http/gzip/BUILD | 3 +- source/extensions/filters/http/gzip/config.cc | 19 ++ .../filters/http/gzip/gzip_filter.cc | 32 +- .../filters/http/gzip/gzip_filter.h | 19 +- .../filters/http/well_known_names.h | 2 + .../compressor/zlib_compressor_impl_test.cc | 207 ------------ test/common/decompressor/BUILD | 2 +- .../zlib_decompressor_impl_test.cc | 85 +++-- .../compression/gzip}/BUILD | 14 +- .../compression/gzip/compressor/BUILD | 24 ++ .../compressor/zlib_compressor_impl_test.cc | 268 +++++++++++++++ ...ized-compressor_fuzz_test-5149986500640768 | Bin ...ized-compressor_fuzz_test-5407695477932032 | Bin ...ized-compressor_fuzz_test-5644831560302592 | Bin ...ized-compressor_fuzz_test-6005942746873856 | Bin .../compression/gzip}/compressor_corpus/empty | 0 .../compression/gzip}/compressor_corpus/noise | Bin .../gzip}/compressor_corpus/simple | 0 .../compression/gzip}/compressor_fuzz_test.cc | 12 +- .../filters/http/common/compressor/BUILD | 4 +- .../compressor_filter_speed_test.cc | 58 ++-- .../compressor/compressor_filter_test.cc | 255 ++++++++++---- test/extensions/filters/http/compressor/BUILD | 42 +++ .../compressor_filter_integration_test.cc | 317 ++++++++++++++++++ .../http/compressor/compressor_filter_test.cc | 34 ++ test/extensions/filters/http/gzip/BUILD | 4 +- .../http/gzip/gzip_filter_integration_test.cc | 22 +- .../filters/http/gzip/gzip_filter_test.cc | 84 +++-- test/mocks/compression/compressor/BUILD | 19 ++ test/mocks/compression/compressor/mocks.cc | 21 ++ test/mocks/compression/compressor/mocks.h | 37 ++ tools/spelling/spelling_dictionary.txt | 1 + 69 files changed, 2102 insertions(+), 459 deletions(-) create mode 100644 api/envoy/extensions/compression/gzip/compressor/v3/BUILD create mode 100644 api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto create mode 100644 docs/root/api-v3/config/compression/compression.rst create mode 100644 docs/root/configuration/http/http_filters/compressor_filter.rst create mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto create mode 100644 include/envoy/compression/compressor/BUILD rename include/envoy/{ => compression}/compressor/compressor.h (86%) create mode 100644 include/envoy/compression/compressor/config.h create mode 100644 include/envoy/compression/compressor/factory.h delete mode 100644 include/envoy/compressor/BUILD delete mode 100644 source/common/compressor/BUILD create mode 100644 source/extensions/compression/common/compressor/BUILD create mode 100644 source/extensions/compression/common/compressor/factory_base.h create mode 100644 source/extensions/compression/gzip/compressor/BUILD create mode 100644 source/extensions/compression/gzip/compressor/config.cc create mode 100644 source/extensions/compression/gzip/compressor/config.h rename source/{common => extensions/compression/gzip}/compressor/zlib_compressor_impl.cc (85%) rename source/{common => extensions/compression/gzip}/compressor/zlib_compressor_impl.h (76%) create mode 100644 source/extensions/filters/http/compressor/BUILD create mode 100644 source/extensions/filters/http/compressor/compressor_filter.cc create mode 100644 source/extensions/filters/http/compressor/compressor_filter.h create mode 100644 source/extensions/filters/http/compressor/config.cc create mode 100644 source/extensions/filters/http/compressor/config.h delete mode 100644 test/common/compressor/zlib_compressor_impl_test.cc rename test/{common/compressor => extensions/compression/gzip}/BUILD (54%) create mode 100644 test/extensions/compression/gzip/compressor/BUILD create mode 100644 test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/empty (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/noise (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_corpus/simple (100%) rename test/{common/compressor => extensions/compression/gzip}/compressor_fuzz_test.cc (88%) create mode 100644 test/extensions/filters/http/compressor/BUILD create mode 100644 test/extensions/filters/http/compressor/compressor_filter_integration_test.cc create mode 100644 test/extensions/filters/http/compressor/compressor_filter_test.cc create mode 100644 test/mocks/compression/compressor/BUILD create mode 100644 test/mocks/compression/compressor/mocks.cc create mode 100644 test/mocks/compression/compressor/mocks.h diff --git a/CODEOWNERS b/CODEOWNERS index 4a7d6aa7cafe..b82652d16143 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -6,6 +6,9 @@ /api/ @envoyproxy/api-shepherds # access loggers /*/extensions/access_loggers/common @auni53 @zuercher +# compression extensions +/*/extensions/compression/common/compressor @rojkov @junr03 +/*/extensions/compression/gzip/compressor @rojkov @junr03 # csrf extension /*/extensions/filters/http/csrf @dschaller @mattklein123 # original_src http filter extension @@ -22,6 +25,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/network/thrift_proxy @zuercher @brian-pane # compressor used by http compression filters /*/extensions/filters/http/common/compressor @gsagula @rojkov @dio +/*/extensions/filters/http/compressor @rojkov @dio # jwt_authn http filter extension /*/extensions/filters/http/jwt_authn @qiwzhang @lizan # grpc_http1_reverse_bridge http filter extension diff --git a/api/BUILD b/api/BUILD index fe373e4533d5..2472352ab2bd 100644 --- a/api/BUILD +++ b/api/BUILD @@ -161,6 +161,7 @@ proto_library( "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", + "//envoy/extensions/compression/gzip/compressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", diff --git a/api/envoy/extensions/compression/gzip/compressor/v3/BUILD b/api/envoy/extensions/compression/gzip/compressor/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/compression/gzip/compressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto new file mode 100644 index 000000000000..7508e17df221 --- /dev/null +++ b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.compressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip] +// [#extension: envoy.compression.gzip.compressor] + +// [#next-free-field: 6] +message Gzip { + // All the values of this enumeration translate directly to zlib's compression strategies. + // For more information about each strategy, please refer to zlib manual. + enum CompressionStrategy { + DEFAULT_STRATEGY = 0; + FILTERED = 1; + HUFFMAN_ONLY = 2; + RLE = 3; + FIXED = 4; + } + + enum CompressionLevel { + option allow_alias = true; + + DEFAULT_COMPRESSION = 0; + BEST_SPEED = 1; + COMPRESSION_LEVEL_1 = 1; + COMPRESSION_LEVEL_2 = 2; + COMPRESSION_LEVEL_3 = 3; + COMPRESSION_LEVEL_4 = 4; + COMPRESSION_LEVEL_5 = 5; + COMPRESSION_LEVEL_6 = 6; + COMPRESSION_LEVEL_7 = 7; + COMPRESSION_LEVEL_8 = 8; + COMPRESSION_LEVEL_9 = 9; + BEST_COMPRESSION = 9; + } + + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression + // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides + // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". + // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According + // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". + // This field will be set to "DEFAULT_COMPRESSION" if not specified. + CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, + // which is also the default value for the parameter, though there are situations when + // changing this parameter might produce better results. For example, run-length encoding (RLE) + // is typically used when the content is known for having sequences which same data occurs many + // consecutive times. For more information about each strategy, please refer to zlib manual. + CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto index 0eefe55140d2..1f6cd63e9d52 100644 --- a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -3,11 +3,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.compressor.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; @@ -15,8 +18,10 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] +// Compressor :ref:`configuration overview `. +// [#extension: envoy.filters.http.compressor] -// [#next-free-field: 6] +// [#next-free-field: 7] message Compressor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.compressor.v2.Compressor"; @@ -46,4 +51,10 @@ message Compressor { // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; + + // A compressor library to use for compression. Currently only + // :ref:`envoy.filters.http.compressor.gzip` + // is included in Envoy. + // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. + config.core.v3.TypedExtensionConfig compressor_library = 6; } diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 992e9a33342b..4d768d09a015 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -44,6 +44,7 @@ proto_library( "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", + "//envoy/extensions/compression/gzip/compressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", diff --git a/docs/root/api-v3/config/compression/compression.rst b/docs/root/api-v3/config/compression/compression.rst new file mode 100644 index 000000000000..80aa0ba927cc --- /dev/null +++ b/docs/root/api-v3/config/compression/compression.rst @@ -0,0 +1,8 @@ +Compression +=========== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/compression/gzip/*/v3/* diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index d7e6e6edd43c..663ac872183d 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -12,6 +12,7 @@ Extensions transport_socket/transport_socket resource_monitor/resource_monitor common/common + compression/compression cluster/cluster grpc_credential/grpc_credential retry/retry diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst new file mode 100644 index 000000000000..08e7298e1dc6 --- /dev/null +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -0,0 +1,108 @@ +.. _config_http_filters_compressor: + +Compressor +========== +Compressor is an HTTP filter which enables Envoy to compress dispatched data +from an upstream service upon client request. Compression is useful in +situations when bandwidth is scarce and large payloads can be effectively compressed +at the expense of higher CPU load or offloading it to a compression accelerator. + +.. note:: + + This filter deprecates the :ref:`HTTP Gzip filter `. + +Configuration +------------- +* :ref:`v3 API reference ` +* This filter should be configured with the name *envoy.filters.http.compressor*. + +How it works +------------ +When compressor filter is enabled, request and response headers are inspected to +determine whether or not the content should be compressed. The content is +compressed and then sent to the client with the appropriate headers, if +response and request allow. + +Currently the filter supports :ref:`gzip compression ` +only. Other compression libraries can be supported as extensions. + +An example configuration of the filter may look like the following: + +.. code-block:: yaml + + http_filters: + - name: compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + disable_on_etag_header: true + content_length: 100 + content_type: + - text/html + - application/json + compressor_library: + name: text_optimized + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.gzip.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: best + compression_strategy: default_strategy + +By *default* compression will be *skipped* when: + +- A request does NOT contain *accept-encoding* header. +- A request includes *accept-encoding* header, but it does not contain "gzip" or "\*". +- A request includes *accept-encoding* with "gzip" or "\*" with the weight "q=0". Note + that the "gzip" will have a higher weight then "\*". For example, if *accept-encoding* + is "gzip;q=0,\*;q=1", the filter will not compress. But if the header is set to + "\*;q=0,gzip;q=1", the filter will compress. +- A request whose *accept-encoding* header includes any encoding type with a higher + weight than "gzip"'s given the corresponding compression filter is present in the chain. +- A response contains a *content-encoding* header. +- A response contains a *cache-control* header whose value includes "no-transform". +- A response contains a *transfer-encoding* header whose value includes "gzip". +- A response does not contain a *content-type* value that matches one of the selected + mime-types, which default to *application/javascript*, *application/json*, + *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*, + *text/xml*. +- Neither *content-length* nor *transfer-encoding* headers are present in + the response. +- Response size is smaller than 30 bytes (only applicable when *transfer-encoding* + is not chunked). + +Please note that in case the filter is configured to use a compression library extension +other than gzip it looks for content encoding in the *accept-encoding* header provided by +the extension. + +When compression is *applied*: + +- The *content-length* is removed from response headers. +- Response headers contain "*transfer-encoding: chunked*" and do not contain + "*content-encoding*" header. +- The "*vary: accept-encoding*" header is inserted on every response. + +.. _compressor-statistics: + +Statistics +---------- + +Every configured Compressor filter has statistics rooted at +.compressor...* +with the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + compressed, Counter, Number of requests compressed. + not_compressed, Counter, Number of requests not compressed. + no_accept_header, Counter, Number of requests with no accept header sent. + header_identity, Counter, Number of requests sent with "identity" set as the *accept-encoding*. + header_compressor_used, Counter, Number of requests sent with "gzip" set as the *accept-encoding*. + header_compressor_overshadowed, Counter, Number of requests skipped by this filter instance because they were handled by another filter in the same filter chain. + header_wildcard, Counter, Number of requests sent with "\*" set as the *accept-encoding*. + header_not_valid, Counter, Number of requests sent with a not valid *accept-encoding* header (aka "q=0" or an unsupported encoding type). + total_uncompressed_bytes, Counter, The total uncompressed bytes of all the requests that were marked for compression. + total_compressed_bytes, Counter, The total compressed bytes of all the requests that were marked for compression. + content_length_too_small, Counter, Number of requests that accepted gzip encoding but did not compress because the payload was too small. + not_compressed_etag, Counter, Number of requests that were not compressed due to the etag header. *disable_on_etag_header* must be turned on for this to happen. diff --git a/docs/root/configuration/http/http_filters/gzip_filter.rst b/docs/root/configuration/http/http_filters/gzip_filter.rst index 9267e3ead6e3..f492b13d102e 100644 --- a/docs/root/configuration/http/http_filters/gzip_filter.rst +++ b/docs/root/configuration/http/http_filters/gzip_filter.rst @@ -1,5 +1,10 @@ .. _config_http_filters_gzip: +.. warning:: + + This filter has been deprecated in favor the + :ref:`HTTP Compressor filter `. + Gzip ==== Gzip is an HTTP filter which enables Envoy to compress dispatched data diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index aa435d6d5a9f..3d541aed13cf 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -10,6 +10,7 @@ HTTP filters aws_lambda_filter aws_request_signing_filter buffer_filter + compressor_filter cors_filter csrf_filter dynamic_forward_proxy_filter diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9eb566ccd65f..bc30af368871 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: applied existing buffer limits to the non-google gRPC access logs, as well as :ref:`stats ` for logged / dropped logs. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`version_text ` stat that reflects xDS version. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults @@ -53,6 +54,8 @@ Deprecated * Tracing provider configuration as part of :ref:`bootstrap config ` has been deprecated in favor of configuration as part of :ref:`HTTP connection manager `. +* The :ref:`HTTP Gzip filter ` has been deprecated in favor of + :ref:`Compressor `. * The * :ref:`GoogleRE2.max_program_size` field is now deprecated. Management servers are expected to validate regexp program sizes instead of expecting the client to do it. diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto new file mode 100644 index 000000000000..7508e17df221 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.compressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip] +// [#extension: envoy.compression.gzip.compressor] + +// [#next-free-field: 6] +message Gzip { + // All the values of this enumeration translate directly to zlib's compression strategies. + // For more information about each strategy, please refer to zlib manual. + enum CompressionStrategy { + DEFAULT_STRATEGY = 0; + FILTERED = 1; + HUFFMAN_ONLY = 2; + RLE = 3; + FIXED = 4; + } + + enum CompressionLevel { + option allow_alias = true; + + DEFAULT_COMPRESSION = 0; + BEST_SPEED = 1; + COMPRESSION_LEVEL_1 = 1; + COMPRESSION_LEVEL_2 = 2; + COMPRESSION_LEVEL_3 = 3; + COMPRESSION_LEVEL_4 = 4; + COMPRESSION_LEVEL_5 = 5; + COMPRESSION_LEVEL_6 = 6; + COMPRESSION_LEVEL_7 = 7; + COMPRESSION_LEVEL_8 = 8; + COMPRESSION_LEVEL_9 = 9; + BEST_COMPRESSION = 9; + } + + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression + // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides + // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". + // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According + // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". + // This field will be set to "DEFAULT_COMPRESSION" if not specified. + CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, + // which is also the default value for the parameter, though there are situations when + // changing this parameter might produce better results. For example, run-length encoding (RLE) + // is typically used when the content is known for having sequences which same data occurs many + // consecutive times. For more information about each strategy, please refer to zlib manual. + CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for Zlib's next output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. Also see + // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. + google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto index 0eefe55140d2..1f6cd63e9d52 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -3,11 +3,14 @@ syntax = "proto3"; package envoy.extensions.filters.http.compressor.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; @@ -15,8 +18,10 @@ option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] +// Compressor :ref:`configuration overview `. +// [#extension: envoy.filters.http.compressor] -// [#next-free-field: 6] +// [#next-free-field: 7] message Compressor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.compressor.v2.Compressor"; @@ -46,4 +51,10 @@ message Compressor { // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; + + // A compressor library to use for compression. Currently only + // :ref:`envoy.filters.http.compressor.gzip` + // is included in Envoy. + // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. + config.core.v3.TypedExtensionConfig compressor_library = 6; } diff --git a/include/envoy/compression/compressor/BUILD b/include/envoy/compression/compressor/BUILD new file mode 100644 index 000000000000..f9e90c9ec612 --- /dev/null +++ b/include/envoy/compression/compressor/BUILD @@ -0,0 +1,35 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "compressor_interface", + hdrs = ["compressor.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + ], +) + +envoy_cc_library( + name = "compressor_factory_interface", + hdrs = ["factory.h"], + deps = [ + ":compressor_interface", + ], +) + +envoy_cc_library( + name = "compressor_config_interface", + hdrs = ["config.h"], + deps = [ + ":compressor_factory_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + ], +) diff --git a/include/envoy/compressor/compressor.h b/include/envoy/compression/compressor/compressor.h similarity index 86% rename from include/envoy/compressor/compressor.h rename to include/envoy/compression/compressor/compressor.h index d25204a3ead4..f236586d4ddf 100644 --- a/include/envoy/compressor/compressor.h +++ b/include/envoy/compression/compressor/compressor.h @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" namespace Envoy { +namespace Compression { namespace Compressor { /** @@ -26,5 +27,8 @@ class Compressor { virtual void compress(Buffer::Instance& buffer, State state) PURE; }; +using CompressorPtr = std::unique_ptr; + } // namespace Compressor +} // namespace Compression } // namespace Envoy diff --git a/include/envoy/compression/compressor/config.h b/include/envoy/compression/compressor/config.h new file mode 100644 index 000000000000..af8f0b9997fa --- /dev/null +++ b/include/envoy/compression/compressor/config.h @@ -0,0 +1,22 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/config/typed_config.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class NamedCompressorLibraryConfigFactory : public Config::TypedFactory { +public: + ~NamedCompressorLibraryConfigFactory() override = default; + + virtual CompressorFactoryPtr + createCompressorFactoryFromProto(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) PURE; +}; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/include/envoy/compression/compressor/factory.h b/include/envoy/compression/compressor/factory.h new file mode 100644 index 000000000000..4587e3a297b3 --- /dev/null +++ b/include/envoy/compression/compressor/factory.h @@ -0,0 +1,22 @@ +#pragma once + +#include "envoy/compression/compressor/compressor.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class CompressorFactory { +public: + virtual ~CompressorFactory() = default; + + virtual CompressorPtr createCompressor() PURE; + virtual const std::string& statsPrefix() const PURE; + virtual const std::string& contentEncoding() const PURE; +}; + +using CompressorFactoryPtr = std::unique_ptr; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/include/envoy/compressor/BUILD b/include/envoy/compressor/BUILD deleted file mode 100644 index 9b3b8f43e47d..000000000000 --- a/include/envoy/compressor/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "compressor_interface", - hdrs = ["compressor.h"], - deps = [ - "//include/envoy/buffer:buffer_interface", - ], -) diff --git a/source/common/compressor/BUILD b/source/common/compressor/BUILD deleted file mode 100644 index d452e1c968f7..000000000000 --- a/source/common/compressor/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "compressor_lib", - srcs = ["zlib_compressor_impl.cc"], - hdrs = ["zlib_compressor_impl.h"], - external_deps = ["zlib"], - deps = [ - "//include/envoy/compressor:compressor_interface", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:zlib_base_lib", - ], -) diff --git a/source/extensions/compression/common/compressor/BUILD b/source/extensions/compression/common/compressor/BUILD new file mode 100644 index 000000000000..eb16810cc8b9 --- /dev/null +++ b/source/extensions/compression/common/compressor/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "compressor_factory_base_lib", + hdrs = ["factory_base.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_config_interface", + "//include/envoy/compression/compressor:compressor_factory_interface", + "//include/envoy/server:filter_config_interface", + ], +) diff --git a/source/extensions/compression/common/compressor/factory_base.h b/source/extensions/compression/common/compressor/factory_base.h new file mode 100644 index 000000000000..fe2ddefb9c32 --- /dev/null +++ b/source/extensions/compression/common/compressor/factory_base.h @@ -0,0 +1,46 @@ +#pragma once + +#include "envoy/compression/compressor/config.h" +#include "envoy/compression/compressor/factory.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Common { +namespace Compressor { + +template +class CompressorLibraryFactoryBase + : public Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory { +public: + Envoy::Compression::Compressor::CompressorFactoryPtr + createCompressorFactoryFromProto(const Protobuf::Message& proto_config, + Server::Configuration::FactoryContext& context) override { + return createCompressorFactoryFromProtoTyped( + MessageUtil::downcastAndValidate(proto_config, + context.messageValidationVisitor())); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string category() const override { return "envoy.compression.compressor"; } + + std::string name() const override { return name_; } + +protected: + CompressorLibraryFactoryBase(const std::string& name) : name_(name) {} + +private: + virtual Envoy::Compression::Compressor::CompressorFactoryPtr + createCompressorFactoryFromProtoTyped(const ConfigProto&) PURE; + const std::string name_; +}; + +} // namespace Compressor +} // namespace Common +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/compressor/BUILD b/source/extensions/compression/gzip/compressor/BUILD new file mode 100644 index 000000000000..f7c1d5f51b1d --- /dev/null +++ b/source/extensions/compression/gzip/compressor/BUILD @@ -0,0 +1,37 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "compressor_lib", + srcs = ["zlib_compressor_impl.cc"], + hdrs = ["zlib_compressor_impl.h"], + external_deps = ["zlib"], + deps = [ + "//include/envoy/compression/compressor:compressor_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:zlib_base_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":compressor_lib", + "//source/common/http:headers_lib", + "//source/extensions/compression/common/compressor:compressor_factory_base_lib", + "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/extensions/compression/gzip/compressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/compression/gzip/compressor/config.cc b/source/extensions/compression/gzip/compressor/config.cc new file mode 100644 index 000000000000..9d37441547f4 --- /dev/null +++ b/source/extensions/compression/gzip/compressor/config.cc @@ -0,0 +1,98 @@ +#include "extensions/compression/gzip/compressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { + +namespace { +// Default zlib memory level. +const uint64_t DefaultMemoryLevel = 5; + +// Default and maximum compression window size. +const uint64_t DefaultWindowBits = 12; + +// When summed to window bits, this sets a gzip header and trailer around the compressed data. +const uint64_t GzipHeaderValue = 16; + +// Default zlib chunk size. +const uint32_t DefaultChunkSize = 4096; +} // namespace + +GzipCompressorFactory::GzipCompressorFactory( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip) + : compression_level_(compressionLevelEnum(gzip.compression_level())), + compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())), + memory_level_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, memory_level, DefaultMemoryLevel)), + window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | + GzipHeaderValue), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} + +ZlibCompressorImpl::CompressionLevel GzipCompressorFactory::compressionLevelEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel + compression_level) { + switch (compression_level) { + case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_SPEED: + return ZlibCompressorImpl::CompressionLevel::Speed; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_2: + return ZlibCompressorImpl::CompressionLevel::Level2; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_3: + return ZlibCompressorImpl::CompressionLevel::Level3; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_4: + return ZlibCompressorImpl::CompressionLevel::Level4; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_5: + return ZlibCompressorImpl::CompressionLevel::Level5; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_6: + return ZlibCompressorImpl::CompressionLevel::Level6; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_7: + return ZlibCompressorImpl::CompressionLevel::Level7; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::COMPRESSION_LEVEL_8: + return ZlibCompressorImpl::CompressionLevel::Level8; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::BEST_COMPRESSION: + return ZlibCompressorImpl::CompressionLevel::Best; + default: + return ZlibCompressorImpl::CompressionLevel::Standard; + } +} + +ZlibCompressorImpl::CompressionStrategy GzipCompressorFactory::compressionStrategyEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy + compression_strategy) { + switch (compression_strategy) { + case envoy::extensions::compression::gzip::compressor::v3::Gzip::FILTERED: + return ZlibCompressorImpl::CompressionStrategy::Filtered; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::FIXED: + return ZlibCompressorImpl::CompressionStrategy::Fixed; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::HUFFMAN_ONLY: + return ZlibCompressorImpl::CompressionStrategy::Huffman; + case envoy::extensions::compression::gzip::compressor::v3::Gzip::RLE: + return ZlibCompressorImpl::CompressionStrategy::Rle; + default: + return ZlibCompressorImpl::CompressionStrategy::Standard; + } +} + +Envoy::Compression::Compressor::CompressorPtr GzipCompressorFactory::createCompressor() { + auto compressor = std::make_unique(chunk_size_); + compressor->init(compression_level_, compression_strategy_, window_bits_, memory_level_); + return compressor; +} + +Envoy::Compression::Compressor::CompressorFactoryPtr +GzipCompressorLibraryFactory::createCompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& proto_config) { + return std::make_unique(proto_config); +} + +/** + * Static registration for the gzip compressor library. @see NamedCompressorLibraryConfigFactory. + */ +REGISTER_FACTORY(GzipCompressorLibraryFactory, + Envoy::Compression::Compressor::NamedCompressorLibraryConfigFactory); + +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/compressor/config.h b/source/extensions/compression/gzip/compressor/config.h new file mode 100644 index 000000000000..25c96fff8a90 --- /dev/null +++ b/source/extensions/compression/gzip/compressor/config.h @@ -0,0 +1,71 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/extensions/compression/gzip/compressor/v3/gzip.pb.h" +#include "envoy/extensions/compression/gzip/compressor/v3/gzip.pb.validate.h" + +#include "common/http/headers.h" + +#include "extensions/compression/common/compressor/factory_base.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { + +namespace { + +const std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, "gzip."); } +const std::string& gzipExtensionName() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.compression.gzip.compressor"); +} + +} // namespace + +class GzipCompressorFactory : public Envoy::Compression::Compressor::CompressorFactory { +public: + GzipCompressorFactory(const envoy::extensions::compression::gzip::compressor::v3::Gzip& gzip); + + // Envoy::Compression::Compressor::CompressorFactory + Envoy::Compression::Compressor::CompressorPtr createCompressor() override; + const std::string& statsPrefix() const override { return gzipStatsPrefix(); } + const std::string& contentEncoding() const override { + return Http::Headers::get().ContentEncodingValues.Gzip; + } + +private: + static ZlibCompressorImpl::CompressionLevel + compressionLevelEnum(envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionLevel + compression_level); + static ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum( + envoy::extensions::compression::gzip::compressor::v3::Gzip::CompressionStrategy + compression_strategy); + + ZlibCompressorImpl::CompressionLevel compression_level_; + ZlibCompressorImpl::CompressionStrategy compression_strategy_; + const int32_t memory_level_; + const int32_t window_bits_; + const uint32_t chunk_size_; +}; + +class GzipCompressorLibraryFactory + : public Compression::Common::Compressor::CompressorLibraryFactoryBase< + envoy::extensions::compression::gzip::compressor::v3::Gzip> { +public: + GzipCompressorLibraryFactory() : CompressorLibraryFactoryBase(gzipExtensionName()) {} + +private: + Envoy::Compression::Compressor::CompressorFactoryPtr createCompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::compressor::v3::Gzip& config) override; +}; + +DECLARE_FACTORY(GzipCompressorLibraryFactory); + +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/common/compressor/zlib_compressor_impl.cc b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc similarity index 85% rename from source/common/compressor/zlib_compressor_impl.cc rename to source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc index 2f44a5da1a90..432c36e97015 100644 --- a/source/common/compressor/zlib_compressor_impl.cc +++ b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.cc @@ -1,4 +1,4 @@ -#include "common/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include @@ -9,6 +9,9 @@ #include "absl/container/fixed_array.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { ZlibCompressorImpl::ZlibCompressorImpl() : ZlibCompressorImpl(4096) {} @@ -34,7 +37,8 @@ void ZlibCompressorImpl::init(CompressionLevel comp_level, CompressionStrategy c initialized_ = true; } -void ZlibCompressorImpl::compress(Buffer::Instance& buffer, State state) { +void ZlibCompressorImpl::compress(Buffer::Instance& buffer, + Envoy::Compression::Compressor::State state) { for (const Buffer::RawSlice& input_slice : buffer.getRawSlices()) { zstream_ptr_->avail_in = input_slice.len_; zstream_ptr_->next_in = static_cast(input_slice.mem_); @@ -46,7 +50,7 @@ void ZlibCompressorImpl::compress(Buffer::Instance& buffer, State state) { buffer.drain(input_slice.len_); } - process(buffer, state == State::Finish ? Z_FINISH : Z_SYNC_FLUSH); + process(buffer, state == Envoy::Compression::Compressor::State::Finish ? Z_FINISH : Z_SYNC_FLUSH); } bool ZlibCompressorImpl::deflateNext(int64_t flush_state) { @@ -81,4 +85,7 @@ void ZlibCompressorImpl::process(Buffer::Instance& output_buffer, int64_t flush_ } } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/common/compressor/zlib_compressor_impl.h b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h similarity index 76% rename from source/common/compressor/zlib_compressor_impl.h rename to source/extensions/compression/gzip/compressor/zlib_compressor_impl.h index 396e7ff250aa..4b2956688ef4 100644 --- a/source/common/compressor/zlib_compressor_impl.h +++ b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h @@ -1,18 +1,21 @@ #pragma once -#include "envoy/compressor/compressor.h" +#include "envoy/compression/compressor/compressor.h" #include "common/common/zlib/base.h" #include "zlib.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { /** * Implementation of compressor's interface. */ -class ZlibCompressorImpl : public Zlib::Base, public Compressor { +class ZlibCompressorImpl : public Zlib::Base, public Envoy::Compression::Compressor::Compressor { public: ZlibCompressorImpl(); @@ -30,11 +33,22 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { * Enum values used to set compression level during initialization. * best: gives best compression. * speed: gives best performance. + * levelX: allows to adjust trad-offs more precisely - from level1 (best speed, but very + * low compression ratio) to level9 (best compression, but low speed). * standard: requests a default compromise between speed and compression. (default) @see zlib * manual. */ enum class CompressionLevel : int64_t { Best = Z_BEST_COMPRESSION, + Level1 = 1, + Level2 = 2, + Level3 = 3, + Level4 = 4, + Level5 = 5, + Level6 = 6, + Level7 = 7, + Level8 = 8, + Level9 = 9, Speed = Z_BEST_SPEED, Standard = Z_DEFAULT_COMPRESSION, }; @@ -42,12 +56,14 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { /** * Enum values are used for setting the compression algorithm strategy. * filtered: used for data produced by a filter. (or predictor) @see Z_FILTERED (zlib manual) + * fixed: disable dynamic Huffman codes. @see Z_FIXED (zlib manual) * huffman: used to enforce Huffman encoding. @see RFC 1951 * rle: used to limit match distances to one. (Run-length encoding) * standard: used for normal data. (default) @see Z_DEFAULT_STRATEGY in zlib manual. */ enum class CompressionStrategy : uint64_t { Filtered = Z_FILTERED, + Fixed = Z_FIXED, Huffman = Z_HUFFMAN_ONLY, Rle = Z_RLE, Standard = Z_DEFAULT_STRATEGY, @@ -66,8 +82,8 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { void init(CompressionLevel level, CompressionStrategy strategy, int64_t window_bits, uint64_t memory_level); - // Compressor - void compress(Buffer::Instance& buffer, State state) override; + // Compression::Compressor::Compressor + void compress(Buffer::Instance& buffer, Envoy::Compression::Compressor::State state) override; private: bool deflateNext(int64_t flush_state); @@ -75,4 +91,7 @@ class ZlibCompressorImpl : public Zlib::Base, public Compressor { }; } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 49f603e2697c..d064296d8370 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -16,6 +16,12 @@ EXTENSIONS = { "envoy.clusters.dynamic_forward_proxy": "//source/extensions/clusters/dynamic_forward_proxy:cluster", "envoy.clusters.redis": "//source/extensions/clusters/redis:redis_cluster", + # + # Compression + # + + "envoy.compression.gzip.compressor": "//source/extensions/compression/gzip/compressor:config", + # # gRPC Credentials Plugins # @@ -38,6 +44,7 @@ EXTENSIONS = { "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.cache": "//source/extensions/filters/http/cache:config", + "envoy.filters.http.compressor": "//source/extensions/filters/http/compressor:config", "envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", "envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", "envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config", diff --git a/source/extensions/filters/http/common/compressor/BUILD b/source/extensions/filters/http/common/compressor/BUILD index 55e6a87aa2c7..7058ab2fc58b 100644 --- a/source/extensions/filters/http/common/compressor/BUILD +++ b/source/extensions/filters/http/common/compressor/BUILD @@ -8,12 +8,13 @@ load( envoy_package() +# TODO(rojkov): move this library to source/extensions/filters/http/compressor/. envoy_cc_library( name = "compressor_lib", srcs = ["compressor.cc"], hdrs = ["compressor.h"], deps = [ - "//include/envoy/compressor:compressor_interface", + "//include/envoy/compression/compressor:compressor_interface", "//include/envoy/stats:stats_macros", "//include/envoy/stream_info:filter_state_interface", "//source/common/buffer:buffer_lib", diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index 7c6ac05893b3..ac2fc769ec0f 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -121,7 +121,8 @@ Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMa Http::FilterDataStatus CompressorFilter::encodeData(Buffer::Instance& data, bool end_stream) { if (!skip_compression_) { config_->stats().total_uncompressed_bytes_.add(data.length()); - compressor_->compress(data, end_stream ? Compressor::State::Finish : Compressor::State::Flush); + compressor_->compress(data, end_stream ? Envoy::Compression::Compressor::State::Finish + : Envoy::Compression::Compressor::State::Flush); config_->stats().total_compressed_bytes_.add(data.length()); } return Http::FilterDataStatus::Continue; @@ -130,7 +131,7 @@ Http::FilterDataStatus CompressorFilter::encodeData(Buffer::Instance& data, bool Http::FilterTrailersStatus CompressorFilter::encodeTrailers(Http::ResponseTrailerMap&) { if (!skip_compression_) { Buffer::OwnedImpl empty_buffer; - compressor_->compress(empty_buffer, Compressor::State::Finish); + compressor_->compress(empty_buffer, Envoy::Compression::Compressor::State::Finish); config_->stats().total_compressed_bytes_.add(empty_buffer.length()); encoder_callbacks_->addEncodedData(empty_buffer, true); } diff --git a/source/extensions/filters/http/common/compressor/compressor.h b/source/extensions/filters/http/common/compressor/compressor.h index fc99ab517d0d..844719a33466 100644 --- a/source/extensions/filters/http/common/compressor/compressor.h +++ b/source/extensions/filters/http/common/compressor/compressor.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/compressor/compressor.h" +#include "envoy/compression/compressor/compressor.h" #include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -55,12 +55,14 @@ struct CompressorStats { ALL_COMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) }; +// TODO(rojkov): merge this class with Compressor::CompressorFilterConfig when the filter +// `envoy.filters.http.gzip` is fully deprecated and dropped. class CompressorFilterConfig { public: CompressorFilterConfig() = delete; virtual ~CompressorFilterConfig() = default; - virtual std::unique_ptr makeCompressor() PURE; + virtual Envoy::Compression::Compressor::CompressorPtr makeCompressor() PURE; bool enabled() const { return enabled_.enabled(); } const CompressorStats& stats() { return stats_; } @@ -69,7 +71,6 @@ class CompressorFilterConfig { bool removeAcceptEncodingHeader() const { return remove_accept_encoding_header_; } uint32_t minimumLength() const { return content_length_; } const std::string contentEncoding() const { return content_encoding_; }; - const std::map registeredCompressors() const; protected: CompressorFilterConfig( @@ -148,7 +149,7 @@ class CompressorFilter : public Http::PassThroughFilter { bool shouldCompress(const EncodingDecision& decision) const; bool skip_compression_; - std::unique_ptr compressor_; + Envoy::Compression::Compressor::CompressorPtr compressor_; const CompressorFilterConfigSharedPtr config_; std::unique_ptr accept_encoding_; }; diff --git a/source/extensions/filters/http/compressor/BUILD b/source/extensions/filters/http/compressor/BUILD new file mode 100644 index 000000000000..188ee5e38be0 --- /dev/null +++ b/source/extensions/filters/http/compressor/BUILD @@ -0,0 +1,39 @@ +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs compression with configurable compression libraries +# Public docs: docs/root/configuration/http_filters/compressor_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "compressor_filter_lib", + srcs = ["compressor_filter.cc"], + hdrs = ["compressor_filter.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_factory_interface", + "//source/extensions/filters/http/common/compressor:compressor_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":compressor_filter_lib", + "//include/envoy/compression/compressor:compressor_config_interface", + "//source/common/config:utility_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/compressor/compressor_filter.cc b/source/extensions/filters/http/compressor/compressor_filter.cc new file mode 100644 index 000000000000..24e974e012e5 --- /dev/null +++ b/source/extensions/filters/http/compressor/compressor_filter.cc @@ -0,0 +1,26 @@ +#include "extensions/filters/http/compressor/compressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +CompressorFilterConfig::CompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& generic_compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Compressor::CompressorFactoryPtr compressor_factory) + : Common::Compressors::CompressorFilterConfig( + generic_compressor, + stats_prefix + "compressor." + generic_compressor.compressor_library().name() + "." + + compressor_factory->statsPrefix(), + scope, runtime, compressor_factory->contentEncoding()), + compressor_factory_(std::move(compressor_factory)) {} + +Envoy::Compression::Compressor::CompressorPtr CompressorFilterConfig::makeCompressor() { + return compressor_factory_->createCompressor(); +} + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/compressor_filter.h b/source/extensions/filters/http/compressor/compressor_filter.h new file mode 100644 index 000000000000..8d7347847f79 --- /dev/null +++ b/source/extensions/filters/http/compressor/compressor_filter.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/compression/compressor/factory.h" +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" + +#include "extensions/filters/http/common/compressor/compressor.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +/** + * Configuration for the compressor filter. + */ +class CompressorFilterConfig : public Common::Compressors::CompressorFilterConfig { + // TODO(rojkov): move functionality of Common::Compressors::CompressorFilterConfig + // to this class when `envoy.filters.http.gzip` is fully deprecated and dropped. +public: + CompressorFilterConfig() = delete; + CompressorFilterConfig( + const envoy::extensions::filters::http::compressor::v3::Compressor& genereic_compressor, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory); + + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override; + +private: + const Envoy::Compression::Compressor::CompressorFactoryPtr compressor_factory_; +}; + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/config.cc b/source/extensions/filters/http/compressor/config.cc new file mode 100644 index 000000000000..aff3ca5afe4c --- /dev/null +++ b/source/extensions/filters/http/compressor/config.cc @@ -0,0 +1,52 @@ +#include "extensions/filters/http/compressor/config.h" + +#include "envoy/compression/compressor/config.h" + +#include "common/config/utility.h" + +#include "extensions/filters/http/compressor/compressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +Http::FilterFactoryCb CompressorFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::compressor::v3::Compressor& proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + // TODO(rojkov): instead of throwing an exception make the Compressor.compressor_library field + // required when the Gzip HTTP-filter is fully deprecated and removed. + if (!proto_config.has_compressor_library()) { + throw EnvoyException("Compressor filter doesn't have compressor_library defined"); + } + const std::string type{TypeUtil::typeUrlToDescriptorFullName( + proto_config.compressor_library().typed_config().type_url())}; + Compression::Compressor::NamedCompressorLibraryConfigFactory* const config_factory = + Registry::FactoryRegistry< + Compression::Compressor::NamedCompressorLibraryConfigFactory>::getFactoryByType(type); + if (config_factory == nullptr) { + throw EnvoyException( + fmt::format("Didn't find a registered implementation for type: '{}'", type)); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + proto_config.compressor_library().typed_config(), context.messageValidationVisitor(), + *config_factory); + Compression::Compressor::CompressorFactoryPtr compressor_factory = + config_factory->createCompressorFactoryFromProto(*message, context); + Common::Compressors::CompressorFilterConfigSharedPtr config = + std::make_shared(proto_config, stats_prefix, context.scope(), + context.runtime(), std::move(compressor_factory)); + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(config)); + }; +} + +/** + * Static registration for the compressor filter. @see NamedHttpFilterConfigFactory. + */ +REGISTER_FACTORY(CompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/compressor/config.h b/source/extensions/filters/http/compressor/config.h new file mode 100644 index 000000000000..50127769f3b4 --- /dev/null +++ b/source/extensions/filters/http/compressor/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" +#include "envoy/extensions/filters/http/compressor/v3/compressor.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { + +/** + * Config registration for the compressor filter. @see NamedHttpFilterConfigFactory. + */ +class CompressorFilterFactory + : public Common::FactoryBase { +public: + CompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Compressor) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::compressor::v3::Compressor& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(CompressorFilterFactory); + +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index 5b1f7517b66f..b27fe1fef620 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -17,10 +17,11 @@ envoy_cc_library( srcs = ["gzip_filter.cc"], hdrs = ["gzip_filter.h"], deps = [ - "//source/common/compressor:compressor_lib", "//source/common/http:headers_lib", "//source/common/protobuf", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/filters/http/common/compressor:compressor_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/gzip/config.cc b/source/extensions/filters/http/gzip/config.cc index f8c577d67f34..d11d9279d5be 100644 --- a/source/extensions/filters/http/gzip/config.cc +++ b/source/extensions/filters/http/gzip/config.cc @@ -10,6 +10,25 @@ namespace Gzip { Http::FilterFactoryCb GzipFilterFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::gzip::v3::Gzip& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + // This will flip to false eventually. + const bool runtime_feature_default = true; + const char runtime_key[] = "envoy.deprecated_features.allow_deprecated_gzip_http_filter"; + const std::string warn_message = + "Using deprecated extension 'envoy.extensions.filters.http.gzip'. This " + "extension will be removed from Envoy soon. Please use " + "'envoy.extensions.filters.http.compressor' instead."; + + if (context.runtime().snapshot().deprecatedFeatureEnabled(runtime_key, runtime_feature_default)) { + ENVOY_LOG_MISC(warn, "{}", warn_message); + } else { + throw EnvoyException( + warn_message + + " If continued use of this extension is absolutely necessary, see " + "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" + "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " + "highly discouraged override."); + } + Common::Compressors::CompressorFilterConfigSharedPtr config = std::make_shared( proto_config, stats_prefix, context.scope(), context.runtime()); return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index f9c3572a1a35..b0b0ab06aec9 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -1,6 +1,9 @@ #include "extensions/filters/http/gzip/gzip_filter.h" +#include "envoy/config/core/v3/base.pb.h" + #include "common/http/headers.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" namespace Envoy { @@ -31,35 +34,38 @@ GzipFilterConfig::GzipFilterConfig(const envoy::extensions::filters::http::gzip: window_bits_(windowBitsUint(gzip.window_bits().value())), chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, 4096)) {} -std::unique_ptr GzipFilterConfig::makeCompressor() { - auto compressor = std::make_unique(chunk_size_); +Envoy::Compression::Compressor::CompressorPtr GzipFilterConfig::makeCompressor() { + auto compressor = + std::make_unique(chunk_size_); compressor->init(compressionLevel(), compressionStrategy(), windowBits(), memoryLevel()); return compressor; } -Compressor::ZlibCompressorImpl::CompressionLevel GzipFilterConfig::compressionLevelEnum( +Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel +GzipFilterConfig::compressionLevelEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level) { switch (compression_level) { case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::BEST: - return Compressor::ZlibCompressorImpl::CompressionLevel::Best; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best; case envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::SPEED: - return Compressor::ZlibCompressorImpl::CompressionLevel::Speed; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed; default: - return Compressor::ZlibCompressorImpl::CompressionLevel::Standard; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard; } } -Compressor::ZlibCompressorImpl::CompressionStrategy GzipFilterConfig::compressionStrategyEnum( +Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy +GzipFilterConfig::compressionStrategyEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy) { switch (compression_strategy) { case envoy::extensions::filters::http::gzip::v3::Gzip::RLE: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Rle; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle; case envoy::extensions::filters::http::gzip::v3::Gzip::FILTERED: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered; case envoy::extensions::filters::http::gzip::v3::Gzip::HUFFMAN: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman; default: - return Compressor::ZlibCompressorImpl::CompressionStrategy::Standard; + return Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard; } } @@ -79,8 +85,12 @@ GzipFilterConfig::compressorConfig(const envoy::extensions::filters::http::gzip: envoy::extensions::filters::http::compressor::v3::Compressor compressor = {}; if (gzip.has_hidden_envoy_deprecated_content_length()) { compressor.set_allocated_content_length( + // According to + // https://developers.google.com/protocol-buffers/docs/reference/cpp-generated#embeddedmessage + // the message Compressor takes ownership of the allocated Protobuf::Uint32Value object. new Protobuf::UInt32Value(gzip.hidden_envoy_deprecated_content_length())); } + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) for (const std::string& ctype : gzip.hidden_envoy_deprecated_content_type()) { compressor.add_content_type(ctype); } diff --git a/source/extensions/filters/http/gzip/gzip_filter.h b/source/extensions/filters/http/gzip/gzip_filter.h index ec56b28ddf92..be30f081a043 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.h +++ b/source/extensions/filters/http/gzip/gzip_filter.h @@ -2,8 +2,7 @@ #include "envoy/extensions/filters/http/gzip/v3/gzip.pb.h" -#include "common/compressor/zlib_compressor_impl.h" - +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include "extensions/filters/http/common/compressor/compressor.h" namespace Envoy { @@ -20,12 +19,13 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { GzipFilterConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime); - std::unique_ptr makeCompressor() override; + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override; - Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevel() const { return compression_level_; } - Compressor::ZlibCompressorImpl::CompressionStrategy compressionStrategy() const { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + compressionStrategy() const { return compression_strategy_; } @@ -34,9 +34,10 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { uint32_t chunkSize() const { return chunk_size_; } private: - static Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum( + static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compressionLevelEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionLevel::Enum compression_level); - static Compressor::ZlibCompressorImpl::CompressionStrategy compressionStrategyEnum( + static Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + compressionStrategyEnum( envoy::extensions::filters::http::gzip::v3::Gzip::CompressionStrategy compression_strategy); static uint64_t memoryLevelUint(Protobuf::uint32 level); @@ -46,8 +47,8 @@ class GzipFilterConfig : public Common::Compressors::CompressorFilterConfig { static const envoy::extensions::filters::http::compressor::v3::Compressor compressorConfig(const envoy::extensions::filters::http::gzip::v3::Gzip& gzip); - Compressor::ZlibCompressorImpl::CompressionLevel compression_level_; - Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_; + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel compression_level_; + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy compression_strategy_; const int32_t memory_level_; const int32_t window_bits_; diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index 68bc5c361be4..afa9981a7510 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -16,6 +16,8 @@ class HttpFilterNameValues { const std::string Buffer = "envoy.filters.http.buffer"; // Cache filter const std::string Cache = "envoy.filters.http.cache"; + // Compressor filter + const std::string Compressor = "envoy.filters.http.compressor"; // CORS filter const std::string Cors = "envoy.filters.http.cors"; // CSRF filter diff --git a/test/common/compressor/zlib_compressor_impl_test.cc b/test/common/compressor/zlib_compressor_impl_test.cc deleted file mode 100644 index 3e2db26f4d43..000000000000 --- a/test/common/compressor/zlib_compressor_impl_test.cc +++ /dev/null @@ -1,207 +0,0 @@ -#include "common/buffer/buffer_impl.h" -#include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" - -#include "test/test_common/utility.h" - -#include "absl/container/fixed_array.h" -#include "gtest/gtest.h" - -namespace Envoy { -namespace Compressor { -namespace { - -class ZlibCompressorImplTest : public testing::Test { -protected: - void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { - Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); - const uint64_t num_comp_slices = compressed_slices.size(); - - const std::string header_hex_str = Hex::encode( - reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); - - // HEADER 0x1f = 31 (window_bits) - EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); - // CM 0x8 = deflate (compression method) - EXPECT_EQ("08", header_hex_str.substr(4, 2)); - - const std::string footer_hex_str = - Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), - compressed_slices[num_comp_slices - 1].len_); - // FOOTER four-byte sequence (sync flush) - EXPECT_EQ("0000ffff", footer_hex_str.substr(footer_hex_str.size() - 8, 10)); - } - - void expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, - const uint32_t input_size) { - Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); - const uint64_t num_comp_slices = compressed_slices.size(); - - const std::string header_hex_str = Hex::encode( - reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); - // HEADER 0x1f = 31 (window_bits) - EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); - // CM 0x8 = deflate (compression method) - EXPECT_EQ("08", header_hex_str.substr(4, 2)); - - const std::string footer_bytes_str = - Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), - compressed_slices[num_comp_slices - 1].len_); - - // A valid finished compressed buffer should have trailer with input size in it. - expectEqualInputSize(footer_bytes_str, input_size); - } - - void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) { - const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8); - uint64_t size; - StringUtil::atoull(size_bytes.c_str(), size, 16); - EXPECT_EQ(TestUtility::flipOrder(size), input_size); - } - - void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } - - static constexpr int64_t gzip_window_bits{31}; - static constexpr int64_t memory_level{8}; - static constexpr uint64_t default_input_size{796}; -}; - -class ZlibCompressorImplTester : public ZlibCompressorImpl { -public: - ZlibCompressorImplTester() = default; - ZlibCompressorImplTester(uint64_t chunk_size) : ZlibCompressorImpl(chunk_size) {} - void compressThenFlush(Buffer::OwnedImpl& buffer) { compress(buffer, State::Flush); } - void finish(Buffer::OwnedImpl& buffer) { compress(buffer, State::Finish); } -}; - -class ZlibCompressorImplDeathTest : public ZlibCompressorImplTest { -protected: - static void compressorBadInitTestHelper(int64_t window_bits, int64_t mem_level) { - ZlibCompressorImpl compressor; - compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, - ZlibCompressorImpl::CompressionStrategy::Standard, window_bits, mem_level); - } - - static void uninitializedCompressorTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - TestUtility::feedBufferWithRandomCharacters(buffer, 100); - compressor.finish(buffer); - } - - static void uninitializedCompressorFlushTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - compressor.compressThenFlush(buffer); - } - - static void uninitializedCompressorFinishTestHelper() { - Buffer::OwnedImpl buffer; - ZlibCompressorImplTester compressor; - compressor.finish(buffer); - } -}; - -// Exercises death by passing bad initialization params or by calling -// compress before init. -TEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) { - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFlushTestHelper(), - "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFinishTestHelper(), - "assert failure: result == Z_STREAM_END"); -} - -// Exercises compressor's checksum by calling it before init or compress. -TEST_F(ZlibCompressorImplTest, CallingChecksum) { - Buffer::OwnedImpl buffer; - - ZlibCompressorImplTester compressor; - EXPECT_EQ(0, compressor.checksum()); - - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); - EXPECT_EQ(0, compressor.checksum()); - - TestUtility::feedBufferWithRandomCharacters(buffer, 4096); - compressor.compressThenFlush(buffer); - expectValidFlushedBuffer(buffer); - - drainBuffer(buffer); - EXPECT_TRUE(compressor.checksum() > 0); -} - -// Exercises compressor's checksum by calling it before init or compress. -TEST_F(ZlibCompressorImplTest, CallingFinishOnly) { - Buffer::OwnedImpl buffer; - - ZlibCompressorImplTester compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); - EXPECT_EQ(0, compressor.checksum()); - - TestUtility::feedBufferWithRandomCharacters(buffer, 4096); - compressor.finish(buffer); - expectValidFinishedBuffer(buffer, 4096); -} - -TEST_F(ZlibCompressorImplTest, CompressWithSmallChunkSize) { - Buffer::OwnedImpl buffer; - Buffer::OwnedImpl accumulation_buffer; - - ZlibCompressorImplTester compressor(8); - compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, - ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, - memory_level); - - uint64_t input_size = 0; - for (uint64_t i = 0; i < 10; i++) { - TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); - ASSERT_EQ(default_input_size * i, buffer.length()); - input_size += buffer.length(); - compressor.compressThenFlush(buffer); - accumulation_buffer.add(buffer); - drainBuffer(buffer); - ASSERT_EQ(0, buffer.length()); - } - expectValidFlushedBuffer(accumulation_buffer); - - compressor.finish(buffer); - accumulation_buffer.add(buffer); - expectValidFinishedBuffer(accumulation_buffer, input_size); -} - -// Exercises compression with other supported zlib initialization params. -TEST_F(ZlibCompressorImplTest, CompressWithNotCommonParams) { - Buffer::OwnedImpl buffer; - Buffer::OwnedImpl accumulation_buffer; - - ZlibCompressorImplTester compressor; - compressor.init(ZlibCompressorImpl::CompressionLevel::Speed, - ZlibCompressorImpl::CompressionStrategy::Rle, gzip_window_bits, 1); - - uint64_t input_size = 0; - for (uint64_t i = 0; i < 10; i++) { - TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); - ASSERT_EQ(default_input_size * i, buffer.length()); - input_size += buffer.length(); - compressor.compressThenFlush(buffer); - accumulation_buffer.add(buffer); - drainBuffer(buffer); - ASSERT_EQ(0, buffer.length()); - } - - expectValidFlushedBuffer(accumulation_buffer); - - compressor.finish(buffer); - accumulation_buffer.add(buffer); - expectValidFinishedBuffer(accumulation_buffer, input_size); -} - -} // namespace -} // namespace Compressor -} // namespace Envoy diff --git a/test/common/decompressor/BUILD b/test/common/decompressor/BUILD index d1608797be71..55703c5b25ab 100644 --- a/test/common/decompressor/BUILD +++ b/test/common/decompressor/BUILD @@ -14,8 +14,8 @@ envoy_cc_test( deps = [ "//source/common/common:assert_lib", "//source/common/common:hex_lib", - "//source/common/compressor:compressor_lib", "//source/common/decompressor:decompressor_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/common/decompressor/zlib_decompressor_impl_test.cc index 93ff4e07729d..99e0b43e83f6 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/common/decompressor/zlib_decompressor_impl_test.cc @@ -1,8 +1,9 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" #include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" + #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -16,26 +17,27 @@ class ZlibDecompressorImplTest : public testing::Test { void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } void testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel comp_level, - Compressor::ZlibCompressorImpl::CompressionStrategy comp_strategy, int64_t window_bits, - uint64_t memory_level) { + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel comp_level, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy + comp_strategy, + int64_t window_bits, uint64_t memory_level) { Buffer::OwnedImpl buffer; Buffer::OwnedImpl accumulation_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; compressor.init(comp_level, comp_strategy, window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 30; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Compression::Compressor::State::Finish); accumulation_buffer.add(buffer); drainBuffer(buffer); @@ -87,16 +89,17 @@ TEST_F(ZlibDecompressorImplTest, CallingChecksum) { Buffer::OwnedImpl compressor_buffer; Buffer::OwnedImpl decompressor_output_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; ASSERT_EQ(0, compressor.checksum()); - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); ASSERT_EQ(0, compressor.checksum()); TestUtility::feedBufferWithRandomCharacters(compressor_buffer, 4096); - compressor.compress(compressor_buffer, Compressor::State::Flush); + compressor.compress(compressor_buffer, Compression::Compressor::State::Flush); ASSERT_TRUE(compressor.checksum() > 0); ZlibDecompressorImpl decompressor; @@ -119,23 +122,24 @@ TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { Buffer::OwnedImpl accumulation_buffer; Buffer::OwnedImpl empty_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -186,23 +190,24 @@ TEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) { Buffer::OwnedImpl buffer; Buffer::OwnedImpl accumulation_buffer; - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Envoy::Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); std::string original_text{}; for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compressor::State::Finish); + compressor.compress(buffer, Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -227,20 +232,25 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressWithUncommonParams) { // Test with different memory levels. for (uint64_t i = 1; i < 10; ++i) { testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, + i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, 15, + i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, + 15, i); testcompressDecompressWithUncommonParams( - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, 15, i); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy:: + Filtered, + 15, i); } } @@ -262,12 +272,13 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { const uint64_t num_slices = buffer.getRawSlices().size(); EXPECT_EQ(num_slices, 20); - Envoy::Compressor::ZlibCompressorImpl compressor; - compressor.init(Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, - gzip_window_bits, memory_level); + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); - compressor.compress(buffer, Compressor::State::Flush); + compressor.compress(buffer, Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); ZlibDecompressorImpl decompressor; diff --git a/test/common/compressor/BUILD b/test/extensions/compression/gzip/BUILD similarity index 54% rename from test/common/compressor/BUILD rename to test/extensions/compression/gzip/BUILD index e58b835ab69a..98c9de21486e 100644 --- a/test/common/compressor/BUILD +++ b/test/extensions/compression/gzip/BUILD @@ -3,7 +3,6 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", - "envoy_cc_test", "envoy_package", ) @@ -16,18 +15,7 @@ envoy_cc_fuzz_test( deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", - "//source/common/compressor:compressor_lib", "//source/common/decompressor:decompressor_lib", - ], -) - -envoy_cc_test( - name = "compressor_test", - srcs = ["zlib_compressor_impl_test.cc"], - deps = [ - "//source/common/common:assert_lib", - "//source/common/common:hex_lib", - "//source/common/compressor:compressor_lib", - "//test/test_common:utility_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", ], ) diff --git a/test/extensions/compression/gzip/compressor/BUILD b/test/extensions/compression/gzip/compressor/BUILD new file mode 100644 index 000000000000..0121199e1060 --- /dev/null +++ b/test/extensions/compression/gzip/compressor/BUILD @@ -0,0 +1,24 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_extension_cc_test( + name = "compressor_test", + srcs = ["zlib_compressor_impl_test.cc"], + extension_name = "envoy.compression.gzip.compressor", + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hex_lib", + "//source/extensions/compression/gzip/compressor:config", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc new file mode 100644 index 000000000000..ee8b487549d6 --- /dev/null +++ b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc @@ -0,0 +1,268 @@ +#include "common/buffer/buffer_impl.h" +#include "common/common/hex.h" + +#include "extensions/compression/gzip/compressor/config.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" + +#include "test/test_common/utility.h" + +#include "absl/container/fixed_array.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Compressor { +namespace { + +// Test helpers + +void expectValidFlushedBuffer(const Buffer::OwnedImpl& output_buffer) { + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); + + const std::string header_hex_str = Hex::encode( + reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); + + // HEADER 0x1f = 31 (window_bits) + EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); + // CM 0x8 = deflate (compression method) + EXPECT_EQ("08", header_hex_str.substr(4, 2)); + + const std::string footer_hex_str = + Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), + compressed_slices[num_comp_slices - 1].len_); + // FOOTER four-byte sequence (sync flush) + EXPECT_EQ("0000ffff", footer_hex_str.substr(footer_hex_str.size() - 8, 10)); +} + +void expectEqualInputSize(const std::string& footer_bytes, const uint32_t input_size) { + const std::string size_bytes = footer_bytes.substr(footer_bytes.size() - 8, 8); + uint64_t size; + StringUtil::atoull(size_bytes.c_str(), size, 16); + EXPECT_EQ(TestUtility::flipOrder(size), input_size); +} + +void expectValidFinishedBuffer(const Buffer::OwnedImpl& output_buffer, const uint32_t input_size) { + Buffer::RawSliceVector compressed_slices = output_buffer.getRawSlices(); + const uint64_t num_comp_slices = compressed_slices.size(); + + const std::string header_hex_str = Hex::encode( + reinterpret_cast(compressed_slices[0].mem_), compressed_slices[0].len_); + // HEADER 0x1f = 31 (window_bits) + EXPECT_EQ("1f8b", header_hex_str.substr(0, 4)); + // CM 0x8 = deflate (compression method) + EXPECT_EQ("08", header_hex_str.substr(4, 2)); + + const std::string footer_bytes_str = + Hex::encode(reinterpret_cast(compressed_slices[num_comp_slices - 1].mem_), + compressed_slices[num_comp_slices - 1].len_); + + // A valid finished compressed buffer should have trailer with input size in it. + expectEqualInputSize(footer_bytes_str, input_size); +} + +void drainBuffer(Buffer::OwnedImpl& buffer) { buffer.drain(buffer.length()); } + +class ZlibCompressorImplTester : public ZlibCompressorImpl { +public: + ZlibCompressorImplTester() = default; + ZlibCompressorImplTester(uint64_t chunk_size) : ZlibCompressorImpl(chunk_size) {} + void compressThenFlush(Buffer::OwnedImpl& buffer) { + compress(buffer, Envoy::Compression::Compressor::State::Flush); + } + void finish(Buffer::OwnedImpl& buffer) { + compress(buffer, Envoy::Compression::Compressor::State::Finish); + } +}; + +// Fixtures + +class ZlibCompressorImplTest : public testing::Test { +protected: + static constexpr int64_t gzip_window_bits{31}; + static constexpr int64_t memory_level{8}; + static constexpr uint64_t default_input_size{796}; +}; + +class ZlibCompressorImplDeathTest : public ZlibCompressorImplTest { +protected: + static void compressorBadInitTestHelper(int64_t window_bits, int64_t mem_level) { + ZlibCompressorImpl compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, window_bits, mem_level); + } + + static void uninitializedCompressorTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + TestUtility::feedBufferWithRandomCharacters(buffer, 100); + compressor.finish(buffer); + } + + static void uninitializedCompressorFlushTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + compressor.compressThenFlush(buffer); + } + + static void uninitializedCompressorFinishTestHelper() { + Buffer::OwnedImpl buffer; + ZlibCompressorImplTester compressor; + compressor.finish(buffer); + } +}; + +class ZlibCompressorImplFactoryTest + : public ::testing::TestWithParam> {}; + +INSTANTIATE_TEST_SUITE_P( + CreateCompressorTests, ZlibCompressorImplFactoryTest, + ::testing::Values(std::make_tuple("", ""), std::make_tuple("FILTERED", "BEST_COMPRESSION"), + std::make_tuple("HUFFMAN_ONLY", "BEST_COMPRESSION"), + std::make_tuple("RLE", "BEST_SPEED"), + std::make_tuple("DEFAULT_STRATEGY", "DEFAULT_COMPRESSION"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_1"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_2"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_3"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_4"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_5"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_6"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_7"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_8"), + std::make_tuple("FIXED", "COMPRESSION_LEVEL_9"))); + +TEST_P(ZlibCompressorImplFactoryTest, CreateCompressorTest) { + Buffer::OwnedImpl buffer; + envoy::extensions::compression::gzip::compressor::v3::Gzip gzip; + std::string json{"{}"}; + absl::string_view strategy = std::get<0>(GetParam()); + absl::string_view compression_level = std::get<1>(GetParam()); + + if (!strategy.empty()) { + json = fmt::format(R"EOF({{ + "compression_strategy": "{}", + "compression_level": "{}", + "memory_level": 6, + "window_bits": 27, + "chunk_size": 10000 + }})EOF", + strategy, compression_level); + } + TestUtility::loadFromJson(json, gzip); + Envoy::Compression::Compressor::CompressorPtr compressor = + GzipCompressorFactory(gzip).createCompressor(); + // Check the created compressor produces valid output. + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor->compress(buffer, Envoy::Compression::Compressor::State::Flush); + expectValidFlushedBuffer(buffer); + drainBuffer(buffer); +} + +// Exercises death by passing bad initialization params or by calling +// compress before init. +TEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) { + EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); + EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); + EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); + EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFlushTestHelper(), + "assert failure: result == Z_OK"); + EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFinishTestHelper(), + "assert failure: result == Z_STREAM_END"); +} + +// Exercises compressor's checksum by calling it before init or compress. +TEST_F(ZlibCompressorImplTest, CallingChecksum) { + Buffer::OwnedImpl buffer; + + ZlibCompressorImplTester compressor; + EXPECT_EQ(0, compressor.checksum()); + + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + EXPECT_EQ(0, compressor.checksum()); + + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor.compressThenFlush(buffer); + expectValidFlushedBuffer(buffer); + + drainBuffer(buffer); + EXPECT_TRUE(compressor.checksum() > 0); +} + +// Exercises compressor's checksum by calling it before init or compress. +TEST_F(ZlibCompressorImplTest, CallingFinishOnly) { + Buffer::OwnedImpl buffer; + + ZlibCompressorImplTester compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + EXPECT_EQ(0, compressor.checksum()); + + TestUtility::feedBufferWithRandomCharacters(buffer, 4096); + compressor.finish(buffer); + expectValidFinishedBuffer(buffer, 4096); +} + +TEST_F(ZlibCompressorImplTest, CompressWithSmallChunkSize) { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl accumulation_buffer; + + ZlibCompressorImplTester compressor(8); + compressor.init(ZlibCompressorImpl::CompressionLevel::Standard, + ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, + memory_level); + + uint64_t input_size = 0; + for (uint64_t i = 0; i < 10; i++) { + TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); + ASSERT_EQ(default_input_size * i, buffer.length()); + input_size += buffer.length(); + compressor.compressThenFlush(buffer); + accumulation_buffer.add(buffer); + drainBuffer(buffer); + ASSERT_EQ(0, buffer.length()); + } + expectValidFlushedBuffer(accumulation_buffer); + + compressor.finish(buffer); + accumulation_buffer.add(buffer); + expectValidFinishedBuffer(accumulation_buffer, input_size); +} + +// Exercises compression with other supported zlib initialization params. +TEST_F(ZlibCompressorImplTest, CompressWithNotCommonParams) { + Buffer::OwnedImpl buffer; + Buffer::OwnedImpl accumulation_buffer; + + ZlibCompressorImplTester compressor; + compressor.init(ZlibCompressorImpl::CompressionLevel::Speed, + ZlibCompressorImpl::CompressionStrategy::Rle, gzip_window_bits, 1); + + uint64_t input_size = 0; + for (uint64_t i = 0; i < 10; i++) { + TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); + ASSERT_EQ(default_input_size * i, buffer.length()); + input_size += buffer.length(); + compressor.compressThenFlush(buffer); + accumulation_buffer.add(buffer); + drainBuffer(buffer); + ASSERT_EQ(0, buffer.length()); + } + + expectValidFlushedBuffer(accumulation_buffer); + + compressor.finish(buffer); + accumulation_buffer.add(buffer); + expectValidFinishedBuffer(accumulation_buffer, input_size); +} + +} // namespace +} // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 diff --git a/test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 b/test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 similarity index 100% rename from test/common/compressor/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 rename to test/extensions/compression/gzip/compressor_corpus/clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 diff --git a/test/common/compressor/compressor_corpus/empty b/test/extensions/compression/gzip/compressor_corpus/empty similarity index 100% rename from test/common/compressor/compressor_corpus/empty rename to test/extensions/compression/gzip/compressor_corpus/empty diff --git a/test/common/compressor/compressor_corpus/noise b/test/extensions/compression/gzip/compressor_corpus/noise similarity index 100% rename from test/common/compressor/compressor_corpus/noise rename to test/extensions/compression/gzip/compressor_corpus/noise diff --git a/test/common/compressor/compressor_corpus/simple b/test/extensions/compression/gzip/compressor_corpus/simple similarity index 100% rename from test/common/compressor/compressor_corpus/simple rename to test/extensions/compression/gzip/compressor_corpus/simple diff --git a/test/common/compressor/compressor_fuzz_test.cc b/test/extensions/compression/gzip/compressor_fuzz_test.cc similarity index 88% rename from test/common/compressor/compressor_fuzz_test.cc rename to test/extensions/compression/gzip/compressor_fuzz_test.cc index 1c28ac5bcc5c..b9a194c0c15c 100644 --- a/test/common/compressor/compressor_fuzz_test.cc +++ b/test/extensions/compression/gzip/compressor_fuzz_test.cc @@ -1,11 +1,15 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" -#include "common/compressor/zlib_compressor_impl.h" #include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" + #include "test/fuzz/fuzz_runner.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Compressor { namespace Fuzz { @@ -61,7 +65,8 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { full_input.add(next_data); Buffer::OwnedImpl buffer{next_data.data(), next_data.size()}; provider_empty = provider.remaining_bytes() == 0; - compressor.compress(buffer, provider_empty ? State::Finish : State::Flush); + compressor.compress(buffer, provider_empty ? Envoy::Compression::Compressor::State::Finish + : Envoy::Compression::Compressor::State::Flush); decompressor.decompress(buffer, full_output); } RELEASE_ASSERT(full_input.toString() == full_output.toString(), ""); @@ -70,4 +75,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { } // namespace Fuzz } // namespace Compressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index 688f9164d800..a3cb82c215e4 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -14,7 +14,9 @@ envoy_cc_test( srcs = ["compressor_filter_test.cc"], deps = [ "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:config", "//source/extensions/filters/http/common/compressor:compressor_lib", + "//test/mocks/compression/compressor:compressor_mocks", "//test/mocks/http:http_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", @@ -31,8 +33,8 @@ envoy_cc_test_binary( "googletest", ], deps = [ - "//source/common/compressor:compressor_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/filters/http/common/compressor:compressor_lib", "//test/mocks/http:http_mocks", "//test/mocks/protobuf:protobuf_mocks", diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc index 54103190e308..cb3c4b26e425 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc @@ -1,7 +1,6 @@ #include "envoy/extensions/filters/http/compressor/v3/compressor.pb.h" -#include "common/compressor/zlib_compressor_impl.h" - +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include "extensions/filters/http/common/compressor/compressor.h" #include "test/mocks/http/mocks.h" @@ -25,29 +24,30 @@ class MockCompressorFilterConfig : public CompressorFilterConfig { const envoy::extensions::filters::http::compressor::v3::Compressor& compressor, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, const std::string& compressor_name, - Envoy::Compressor::ZlibCompressorImpl::CompressionLevel level, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, int64_t window_bits, - uint64_t memory_level) + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, + int64_t window_bits, uint64_t memory_level) : CompressorFilterConfig(compressor, stats_prefix + compressor_name + ".", scope, runtime, compressor_name), level_(level), strategy_(strategy), window_bits_(window_bits), memory_level_(memory_level) { } - std::unique_ptr makeCompressor() override { - auto compressor = std::make_unique(); + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override { + auto compressor = std::make_unique(); compressor->init(level_, strategy_, window_bits_, memory_level_); return compressor; } - const Envoy::Compressor::ZlibCompressorImpl::CompressionLevel level_; - const Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy strategy_; + const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level_; + const Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy_; const int64_t window_bits_; const uint64_t memory_level_; }; using CompressionParams = - std::tuple; + std::tuple; static constexpr uint64_t TestDataSize = 122880; @@ -191,40 +191,40 @@ compressChunks8192/5/manual_time 15.9 ms 16.1 ms 45 static std::vector compression_params = { // Speed + Standard + Small Window + Low mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, // Speed + Standard + Med window + Med mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, // Speed + Standard + Big window + High mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, // Standard + Standard + Small window + Low mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, // Standard + Standard + Med window + Med mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, // Standard + Standard + High window + High mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}, // Best + Standard + Small window + Low mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 9, 1}, // Best + Standard + Med window + Med mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 12, 5}, // Best + Standard + High window + High mem level - {Envoy::Compressor::ZlibCompressorImpl::CompressionLevel::Best, - Envoy::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}}; + {Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, 15, 9}}; static void compressFull(benchmark::State& state) { NiceMock decoder_callbacks; diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc index a82c86444d0a..225edcaeaa32 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc @@ -6,6 +6,7 @@ #include "extensions/filters/http/common/compressor/compressor.h" +#include "test/mocks/compression/compressor/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" @@ -14,30 +15,34 @@ #include "gtest/gtest.h" -using testing::Return; - namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Common { namespace Compressors { -class MockCompressor : public Compressor::Compressor { - void compress(Buffer::Instance&, ::Envoy::Compressor::State) override {} -}; +using testing::_; +using testing::Return; -class MockCompressorFilterConfig : public CompressorFilterConfig { +class TestCompressorFilterConfig : public CompressorFilterConfig { public: - MockCompressorFilterConfig( + TestCompressorFilterConfig( const envoy::extensions::filters::http::compressor::v3::Compressor& compressor, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, const std::string& compressor_name) : CompressorFilterConfig(compressor, stats_prefix + compressor_name + ".", scope, runtime, compressor_name) {} - std::unique_ptr makeCompressor() override { - return std::make_unique(); + Envoy::Compression::Compressor::CompressorPtr makeCompressor() override { + auto compressor = std::make_unique(); + EXPECT_CALL(*compressor, compress(_, _)).Times(expected_compress_calls_); + return compressor; } + + void setExpectedCompressCalls(uint32_t calls) { expected_compress_calls_ = calls; } + +private: + uint32_t expected_compress_calls_{1}; }; class CompressorFilterTest : public testing::Test { @@ -47,7 +52,17 @@ class CompressorFilterTest : public testing::Test { .WillByDefault(Return(true)); } - void SetUp() override { setUpFilter("{}"); } + void SetUp() override { + setUpFilter(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); + } // CompressorFilter private member functions void sanitizeEtagHeader(Http::ResponseHeaderMap& headers) { @@ -93,7 +108,7 @@ class CompressorFilterTest : public testing::Test { envoy::extensions::filters::http::compressor::v3::Compressor compressor; TestUtility::loadFromJson(json, compressor); config_ = - std::make_shared(compressor, "test.", stats_, runtime_, "test"); + std::make_shared(compressor, "test.", stats_, runtime_, "test"); filter_ = std::make_unique(config_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); } @@ -117,7 +132,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, end_stream)); } - void doResponseCompression(Http::TestResponseHeaderMapImpl&& headers, bool with_trailers) { + void doResponseCompression(Http::TestResponseHeaderMapImpl& headers, bool with_trailers) { NiceMock decoder_callbacks; filter_->setDecoderFilterCallbacks(decoder_callbacks); uint64_t content_length; @@ -158,7 +173,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(1, stats_.counter("test.test.not_compressed").value()); } - CompressorFilterConfigSharedPtr config_; + std::shared_ptr config_; std::unique_ptr filter_; Buffer::OwnedImpl data_; std::string expected_str_; @@ -174,6 +189,11 @@ TEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) { "runtime_enabled": { "default_value": true, "runtime_key": "foo_key" + }, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } } } )EOF"); @@ -199,7 +219,9 @@ TEST_F(CompressorFilterTest, AcceptanceTestEncoding) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, false); + + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseCompression(headers, false); } TEST_F(CompressorFilterTest, AcceptanceTestEncodingWithTrailers) { @@ -208,7 +230,9 @@ TEST_F(CompressorFilterTest, AcceptanceTestEncodingWithTrailers) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, true); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + config_->setExpectedCompressCalls(2); + doResponseCompression(headers, true); } // Verifies hasCacheControlNoTransform function. @@ -238,8 +262,9 @@ TEST_F(CompressorFilterTest, HasCacheControlNoTransformNoCompression) { // value. TEST_F(CompressorFilterTest, HasCacheControlNoTransformCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test, deflate"}}, true); - doResponseCompression( - {{":method", "get"}, {"content-length", "256"}, {"cache-control", "no-cache"}}, false); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"cache-control", "no-cache"}}; + doResponseCompression(headers, false); } TEST_F(CompressorFilterTest, NoAcceptEncodingHeader) { @@ -351,10 +376,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { Stats::TestUtil::TestStore stats; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -375,10 +409,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "gzip"); + std::make_shared(compressor, "test2.", stats, runtime, "gzip"); std::unique_ptr gzip_filter = std::make_unique(config2); NiceMock decoder_callbacks; gzip_filter->setDecoderFilterCallbacks(decoder_callbacks); @@ -395,10 +438,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test"); + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -415,10 +467,19 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test"); + std::make_shared(compressor, "test2.", stats, runtime, "test"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter2->setDecoderFilterCallbacks(decoder_callbacks); @@ -435,14 +496,23 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config1; config1 = - std::make_shared(compressor, "test1.", stats, runtime, "test1"); + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); @@ -465,14 +535,23 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { ; NiceMock runtime; envoy::extensions::filters::http::compressor::v3::Compressor compressor; - TestUtility::loadFromJson("{}", compressor); + TestUtility::loadFromJson(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF", + compressor); CompressorFilterConfigSharedPtr config1; config1 = - std::make_shared(compressor, "test1.", stats, runtime, "test1"); + std::make_shared(compressor, "test1.", stats, runtime, "test1"); std::unique_ptr filter1 = std::make_unique(config1); CompressorFilterConfigSharedPtr config2; config2 = - std::make_shared(compressor, "test2.", stats, runtime, "test2"); + std::make_shared(compressor, "test2.", stats, runtime, "test2"); std::unique_ptr filter2 = std::make_unique(config2); NiceMock decoder_callbacks; filter1->setDecoderFilterCallbacks(decoder_callbacks); @@ -495,7 +574,8 @@ TEST_F(CompressorFilterTest, AcceptEncodingNoCompression) { // Verifies that compression is NOT skipped when accept-encoding header is allowed. TEST_F(CompressorFilterTest, AcceptEncodingCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test, deflate"}}, true); - doResponseCompression({{":method", "get"}, {"content-length", "256"}}, false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseCompression(headers, false); } // Verifies isMinimumContentLength function. @@ -517,7 +597,16 @@ TEST_F(CompressorFilterTest, IsMinimumContentLength) { EXPECT_TRUE(isMinimumContentLength(headers)); } - setUpFilter(R"EOF({"content_length": 500})EOF"); + setUpFilter(R"EOF( +{ + "content_length": 500, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); { Http::TestResponseHeaderMapImpl headers = {{"content-length", "501"}}; EXPECT_TRUE(isMinimumContentLength(headers)); @@ -540,9 +629,19 @@ TEST_F(CompressorFilterTest, ContentLengthNoCompression) { // Verifies that compression is NOT skipped when content-length header is allowed. TEST_F(CompressorFilterTest, ContentLengthCompression) { - setUpFilter(R"EOF({"content_length": 500})EOF"); + setUpFilter(R"EOF( +{ + "content_length": 500, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression({{":method", "get"}, {"content-length", "1000"}}, false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "1000"}}; + doResponseCompression(headers, false); } // Verifies isContentTypeAllowed function. @@ -603,7 +702,12 @@ TEST_F(CompressorFilterTest, IsContentTypeAllowed) { "text/html", "xyz/svg+xml", "Test/INSENSITIVE" - ] + ], + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } } )EOF"); @@ -641,7 +745,12 @@ TEST_F(CompressorFilterTest, ContentTypeNoCompression) { "application/json", "font/eot", "image/svg+xml" - ] + ], + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } } )EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); @@ -653,10 +762,10 @@ TEST_F(CompressorFilterTest, ContentTypeNoCompression) { // Verifies that compression is NOT skipped when content-encoding header is allowed. TEST_F(CompressorFilterTest, ContentTypeCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression({{":method", "get"}, - {"content-length", "256"}, - {"content-type", "application/json;charset=utf-8"}}, - false); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, + {"content-length", "256"}, + {"content-type", "application/json;charset=utf-8"}}; + doResponseCompression(headers, false); } // Verifies sanitizeEtagHeader function. @@ -698,7 +807,16 @@ TEST_F(CompressorFilterTest, IsEtagAllowed) { EXPECT_EQ(0, stats_.counter("test.test.not_compressed_etag").value()); } - setUpFilter(R"EOF({ "disable_on_etag_header": true })EOF"); + setUpFilter(R"EOF( +{ + "disable_on_etag_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); { Http::TestResponseHeaderMapImpl headers = {{"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}; EXPECT_FALSE(isEtagAllowed(headers)); @@ -718,22 +836,28 @@ TEST_F(CompressorFilterTest, IsEtagAllowed) { // Verifies that compression is skipped when etag header is NOT allowed. TEST_F(CompressorFilterTest, EtagNoCompression) { - setUpFilter(R"EOF({ "disable_on_etag_header": true })EOF"); + setUpFilter(R"EOF( +{ + "disable_on_etag_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); doResponseNoCompression( {{":method", "get"}, {"content-length", "256"}, {"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}); EXPECT_EQ(1, stats_.counter("test.test.not_compressed_etag").value()); } -// Verifies that compression is skipped when etag header is NOT allowed. +// Verifies that compression is not skipped when strong etag header is present. TEST_F(CompressorFilterTest, EtagCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"etag", "686897696a7c876b7e"}}; - feedBuffer(256); - NiceMock decoder_callbacks; - filter_->setDecoderFilterCallbacks(decoder_callbacks); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_FALSE(headers.has("etag")); EXPECT_EQ("test", headers.get_("content-encoding")); } @@ -777,8 +901,9 @@ TEST_F(CompressorFilterTest, IsTransferEncodingAllowed) { // Tests compression when Transfer-Encoding header exists. TEST_F(CompressorFilterTest, TransferEncodingChunked) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseCompression( - {{":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked"}}, false); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked"}}; + doResponseCompression(headers, false); } // Tests compression when Transfer-Encoding header exists. @@ -848,8 +973,7 @@ TEST_F(CompressorFilterTest, NoVaryHeader) { filter_->setDecoderFilterCallbacks(decoder_callbacks); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } @@ -861,8 +985,7 @@ TEST_F(CompressorFilterTest, VaryOtherValues) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"vary", "User-Agent, Cookie"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("User-Agent, Cookie, Accept-Encoding", headers.get_("vary")); } @@ -874,8 +997,7 @@ TEST_F(CompressorFilterTest, VaryAlreadyHasAcceptEncoding) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); Http::TestResponseHeaderMapImpl headers{ {":method", "get"}, {"content-length", "256"}, {"vary", "accept-encoding"}}; - feedBuffer(256); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + doResponseCompression(headers, false); EXPECT_TRUE(headers.has("vary")); EXPECT_EQ("accept-encoding, Accept-Encoding", headers.get_("vary")); } @@ -886,13 +1008,30 @@ TEST_F(CompressorFilterTest, RemoveAcceptEncodingHeader) { filter_->setDecoderFilterCallbacks(decoder_callbacks); { Http::TestRequestHeaderMapImpl headers = {{"accept-encoding", "deflate, test, gzip, br"}}; - setUpFilter(R"EOF({"remove_accept_encoding_header": true})EOF"); + setUpFilter(R"EOF( +{ + "remove_accept_encoding_header": true, + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); EXPECT_FALSE(headers.has("accept-encoding")); } { Http::TestRequestHeaderMapImpl headers = {{"accept-encoding", "deflate, test, gzip, br"}}; - setUpFilter("{}"); + setUpFilter(R"EOF( +{ + "compressor_library": { + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip" + } + } +} +)EOF"); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); EXPECT_TRUE(headers.has("accept-encoding")); EXPECT_EQ("deflate, test, gzip, br", headers.get_("accept-encoding")); diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD new file mode 100644 index 000000000000..87a1a23d04f2 --- /dev/null +++ b/test/extensions/filters/http/compressor/BUILD @@ -0,0 +1,42 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_extension_cc_test( + name = "compressor_filter_test", + srcs = [ + "compressor_filter_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + deps = [ + "//source/extensions/filters/http/compressor:compressor_filter_lib", + "//test/mocks/compression/compressor:compressor_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "compressor_filter_integration_test", + srcs = [ + "compressor_filter_integration_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + deps = [ + "//source/common/decompressor:decompressor_lib", + "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/filters/http/compressor:config", + "//test/integration:http_integration_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc new file mode 100644 index 000000000000..c43869f9bc8e --- /dev/null +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -0,0 +1,317 @@ +#include "envoy/event/timer.h" + +#include "common/decompressor/zlib_decompressor_impl.h" + +#include "test/integration/http_integration.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class CompressorIntegrationTest : public testing::TestWithParam, + public Event::SimulatedTimeSystem, + public HttpIntegrationTest { +public: + CompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + + void SetUp() override { decompressor_.init(window_bits); } + void TearDown() override { cleanupUpstreamAndDownstream(); } + + void initializeFilter(const std::string& config) { + config_helper_.addFilter(config); + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + } + + void doRequestAndCompression(Http::TestHeaderMapImpl&& request_headers, + Http::TestHeaderMapImpl&& response_headers) { + uint64_t content_length; + ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_("content-length"), &content_length)); + const Buffer::OwnedImpl expected_response{std::string(content_length, 'a')}; + auto response = + sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); + EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, + response->headers().ContentEncoding()->value().getStringView()); + ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); + EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, + response->headers().TransferEncoding()->value().getStringView()); + + Buffer::OwnedImpl decompressed_response{}; + const Buffer::OwnedImpl compressed_response{response->body()}; + decompressor_.decompress(compressed_response, decompressed_response); + ASSERT_EQ(content_length, decompressed_response.length()); + EXPECT_TRUE(TestUtility::buffersEqual(expected_response, decompressed_response)); + } + + void doRequestAndNoCompression(Http::TestHeaderMapImpl&& request_headers, + Http::TestHeaderMapImpl&& response_headers) { + uint64_t content_length; + ASSERT_TRUE(absl::SimpleAtoi(response_headers.get_("content-length"), &content_length)); + auto response = + sendRequestAndWaitForResponse(request_headers, 0, response_headers, content_length); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_EQ(content_length, response->body().size()); + EXPECT_EQ(response->body(), std::string(content_length, 'a')); + } + + const std::string full_config{R"EOF( + name: compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + disable_on_etag_header: true + content_length: 100 + content_type: + - text/html + - application/json + compressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: best_compression + compression_strategy: rle + )EOF"}; + + const std::string default_config{R"EOF( + name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + compressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + )EOF"}; + + const uint64_t window_bits{15 | 16}; + + Decompressor::ZlibDecompressorImpl decompressor_{}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, CompressorIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Exercises gzip compression with default configuration. + */ +TEST_P(CompressorIntegrationTest, AcceptanceDefaultConfigTest) { + initializeFilter(default_config); + doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "4400"}, + {"content-type", "text/xml"}}); +} + +/** + * Exercises gzip compression with full configuration. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigTest) { + initializeFilter(full_config); + doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "4400"}, + {"content-type", "application/json"}}); +} + +/** + * Exercises filter when client request contains 'identity' type. + */ +TEST_P(CompressorIntegrationTest, IdentityAcceptEncoding) { + initializeFilter(default_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "identity"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "text/plain"}}); +} + +/** + * Exercises filter when client request contains unsupported 'accept-encoding' type. + */ +TEST_P(CompressorIntegrationTest, NotSupportedAcceptEncoding) { + initializeFilter(default_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, br"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "text/plain"}}); +} + +/** + * Exercises filter when upstream response is already encoded. + */ +TEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-encoding", "br"}, + {"content-length", "128"}, + {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 128); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("br", response->headers().ContentEncoding()->value().getStringView()); + EXPECT_EQ(128U, response->body().size()); +} + +/** + * Exercises filter when upstream responds with content length below the default threshold. + */ +TEST_P(CompressorIntegrationTest, NotEnoughContentLength) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, {"content-length", "10"}, {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 10); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + EXPECT_EQ(10U, response->body().size()); +} + +/** + * Exercises filter when response from upstream service is empty. + */ +TEST_P(CompressorIntegrationTest, EmptyResponse) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "0"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("204", response->headers().Status()->value().getStringView()); + ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + EXPECT_EQ(0U, response->body().size()); +} + +/** + * Exercises filter when upstream responds with restricted content-type value. + */ +TEST_P(CompressorIntegrationTest, SkipOnContentType) { + initializeFilter(full_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"content-type", "application/xml"}}); +} + +/** + * Exercises filter when upstream responds with restricted cache-control value. + */ +TEST_P(CompressorIntegrationTest, SkipOnCacheControl) { + initializeFilter(full_config); + doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}, + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-length", "128"}, + {"cache-control", "no-transform"}, + {"content-type", "application/json"}}); +} + +/** + * Exercises gzip compression when upstream returns a chunked response. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) { + initializeFilter(full_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"content-type", "application/json"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); + ASSERT_EQ("chunked", response->headers().TransferEncoding()->value().getStringView()); +} + +/** + * Verify Vary header values are preserved. + */ +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVeryHeader) { + initializeFilter(default_config); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"accept-encoding", "deflate, gzip"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, {"content-type", "application/json"}, {"vary", "Cookie"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 1024); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", response->headers().Vary()->value().getStringView()); +} +} // namespace Envoy diff --git a/test/extensions/filters/http/compressor/compressor_filter_test.cc b/test/extensions/filters/http/compressor/compressor_filter_test.cc new file mode 100644 index 000000000000..a8f2571f6266 --- /dev/null +++ b/test/extensions/filters/http/compressor/compressor_filter_test.cc @@ -0,0 +1,34 @@ +#include "extensions/filters/http/compressor/compressor_filter.h" + +#include "test/mocks/compression/compressor/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { +namespace { + +using testing::NiceMock; + +TEST(CompressorFilterConfigTests, MakeCompressorTest) { + const envoy::extensions::filters::http::compressor::v3::Compressor compressor_cfg; + NiceMock runtime; + Stats::TestUtil::TestStore stats; + auto compressor_factory(std::make_unique()); + EXPECT_CALL(*compressor_factory, createCompressor()).Times(1); + EXPECT_CALL(*compressor_factory, statsPrefix()).Times(1); + EXPECT_CALL(*compressor_factory, contentEncoding()).Times(1); + CompressorFilterConfig config(compressor_cfg, "test.compressor.", stats, runtime, + std::move(compressor_factory)); + Envoy::Compression::Compressor::CompressorPtr compressor = config.makeCompressor(); +} + +} // namespace +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index 3c2bb9059338..3fefc1e4034c 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -16,13 +16,15 @@ envoy_extension_cc_test( srcs = ["gzip_filter_test.cc"], extension_name = "envoy.filters.http.gzip", deps = [ - "//source/common/compressor:compressor_lib", "//source/common/decompressor:decompressor_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/filters/http/gzip:config", "//source/extensions/filters/http/gzip:gzip_filter_lib", "//test/mocks/http:http_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index 83ecbb2c22ad..ad8c658e0502 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -110,7 +110,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, GzipIntegrationTest, /** * Exercises gzip compression with default configuration. */ -TEST_P(GzipIntegrationTest, AcceptanceDefaultConfigTest) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceDefaultConfigTest)) { initializeFilter(default_config); doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -140,7 +140,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceDeprecatedFullConf /** * Exercises gzip compression with full configuration. */ -TEST_P(GzipIntegrationTest, AcceptanceFullConfigTest) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigTest)) { initializeFilter(full_config); doRequestAndCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -155,7 +155,7 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigTest) { /** * Exercises filter when client request contains 'identity' type. */ -TEST_P(GzipIntegrationTest, IdentityAcceptEncoding) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(IdentityAcceptEncoding)) { initializeFilter(default_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -170,7 +170,7 @@ TEST_P(GzipIntegrationTest, IdentityAcceptEncoding) { /** * Exercises filter when client request contains unsupported 'accept-encoding' type. */ -TEST_P(GzipIntegrationTest, NotSupportedAcceptEncoding) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotSupportedAcceptEncoding)) { initializeFilter(default_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -185,7 +185,7 @@ TEST_P(GzipIntegrationTest, NotSupportedAcceptEncoding) { /** * Exercises filter when upstream response is already encoded. */ -TEST_P(GzipIntegrationTest, UpstreamResponseAlreadyEncoded) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(UpstreamResponseAlreadyEncoded)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -211,7 +211,7 @@ TEST_P(GzipIntegrationTest, UpstreamResponseAlreadyEncoded) { /** * Exercises filter when upstream responds with content length below the default threshold. */ -TEST_P(GzipIntegrationTest, NotEnoughContentLength) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -235,7 +235,7 @@ TEST_P(GzipIntegrationTest, NotEnoughContentLength) { /** * Exercises filter when response from upstream service is empty. */ -TEST_P(GzipIntegrationTest, EmptyResponse) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -258,7 +258,7 @@ TEST_P(GzipIntegrationTest, EmptyResponse) { /** * Exercises filter when upstream responds with restricted content-type value. */ -TEST_P(GzipIntegrationTest, SkipOnContentType) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnContentType)) { initializeFilter(full_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -273,7 +273,7 @@ TEST_P(GzipIntegrationTest, SkipOnContentType) { /** * Exercises filter when upstream responds with restricted cache-control value. */ -TEST_P(GzipIntegrationTest, SkipOnCacheControl) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(SkipOnCacheControl)) { initializeFilter(full_config); doRequestAndNoCompression(Http::TestRequestHeaderMapImpl{{":method", "GET"}, {":path", "/test/long/url"}, @@ -289,7 +289,7 @@ TEST_P(GzipIntegrationTest, SkipOnCacheControl) { /** * Exercises gzip compression when upstream returns a chunked response. */ -TEST_P(GzipIntegrationTest, AcceptanceFullConfigChunkedResponse) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedResponse)) { initializeFilter(full_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -313,7 +313,7 @@ TEST_P(GzipIntegrationTest, AcceptanceFullConfigChunkedResponse) { /** * Verify Vary header values are preserved. */ -TEST_P(GzipIntegrationTest, AcceptanceFullConfigVeryHeader) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVeryHeader)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 565b84de2523..74bf5f7a8a60 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -3,15 +3,18 @@ #include "envoy/extensions/filters/http/gzip/v3/gzip.pb.h" #include "common/common/hex.h" -#include "common/compressor/zlib_compressor_impl.h" #include "common/decompressor/zlib_decompressor_impl.h" #include "common/protobuf/utility.h" +#include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/filters/http/gzip/config.h" #include "extensions/filters/http/gzip/gzip_filter.h" #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" +#include "test/test_common/logging.h" #include "test/test_common/utility.h" #include "absl/container/fixed_array.h" @@ -120,8 +123,10 @@ class GzipFilterTest : public testing::Test { } void expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy strategy, absl::string_view strategy_name, - Compressor::ZlibCompressorImpl::CompressionLevel level, absl::string_view level_name) { + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy strategy, + absl::string_view strategy_name, + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel level, + absl::string_view level_name) { setUpFilter(fmt::format(R"EOF({{"compression_strategy": "{}", "compression_level": "{}"}})EOF", strategy_name, level_name)); EXPECT_EQ(strategy, config_->compressionStrategy()); @@ -189,26 +194,26 @@ TEST_F(GzipFilterTest, DefaultConfigValues) { EXPECT_EQ(28, config_->windowBits()); EXPECT_EQ(false, config_->disableOnEtagHeader()); EXPECT_EQ(false, config_->removeAcceptEncodingHeader()); - EXPECT_EQ(Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, config_->compressionStrategy()); - EXPECT_EQ(Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + EXPECT_EQ(Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, config_->compressionLevel()); EXPECT_EQ(18, config_->contentTypeValues().size()); } TEST_F(GzipFilterTest, AvailableCombinationCompressionStrategyAndLevelConfig) { expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, "FILTERED", - Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Filtered, "FILTERED", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, "HUFFMAN", - Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Huffman, "HUFFMAN", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Best, "BEST"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, "RLE", - Compressor::ZlibCompressorImpl::CompressionLevel::Speed, "SPEED"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Rle, "RLE", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Speed, "SPEED"); expectValidCompressionStrategyAndLevel( - Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, "DEFAULT", - Compressor::ZlibCompressorImpl::CompressionLevel::Standard, "DEFAULT"); + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, "DEFAULT", + Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, "DEFAULT"); } // Acceptance Testing with default configuration. @@ -407,16 +412,6 @@ TEST_F(GzipFilterTest, RemoveAcceptEncodingHeader) { } } -// Test that the deprecated extension name still functions. -TEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - const std::string deprecated_name = "envoy.gzip"; - - ASSERT_NE( - nullptr, - Registry::FactoryRegistry::getFactory( - deprecated_name)); -} - // Test setting zlib's chunk size. TEST_F(GzipFilterTest, ChunkSize) { // Default @@ -432,6 +427,49 @@ TEST_F(GzipFilterTest, ChunkSize) { EXPECT_EQ(config_->chunkSize(), 8192); } +// Test that the deprecated extension name still functions. +TEST(GzipFilterConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { + const std::string deprecated_name = "envoy.gzip"; + + ASSERT_NE( + nullptr, + Registry::FactoryRegistry::getFactory( + deprecated_name)); +} + +// Test that the deprecated extension triggers an exception. +TEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionThrows)) { + NiceMock context; + GzipFilterFactory factory; + envoy::extensions::filters::http::gzip::v3::Gzip config; + + EXPECT_CALL( + context.runtime_loader_.snapshot_, + deprecatedFeatureEnabled("envoy.deprecated_features.allow_deprecated_gzip_http_filter", _)) + .WillRepeatedly(Return(false)); + + EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, "stats.", context), + EnvoyException, + "Using deprecated extension 'envoy.extensions.filters.http.gzip'.*"); +} + +// Test that the deprecated extension gives a deprecation warning. +TEST(GzipFilterFactoryTest, DEPRECATED_FEATURE_TEST(TestCheckDeprecatedExtensionWarns)) { + NiceMock context; + GzipFilterFactory factory; + envoy::extensions::filters::http::gzip::v3::Gzip config; + + EXPECT_CALL( + context.runtime_loader_.snapshot_, + deprecatedFeatureEnabled("envoy.deprecated_features.allow_deprecated_gzip_http_filter", _)) + .WillRepeatedly(Return(true)); + + EXPECT_NO_THROW(factory.createFilterFactoryFromProto(config, "stats.", context)); + + EXPECT_LOG_CONTAINS("warn", "Using deprecated extension 'envoy.extensions.filters.http.gzip'.", + factory.createFilterFactoryFromProto(config, "stats.", context)); +} + } // namespace Gzip } // namespace HttpFilters } // namespace Extensions diff --git a/test/mocks/compression/compressor/BUILD b/test/mocks/compression/compressor/BUILD new file mode 100644 index 000000000000..e598f5cc5cf5 --- /dev/null +++ b/test/mocks/compression/compressor/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +envoy_package() + +envoy_cc_mock( + name = "compressor_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//include/envoy/compression/compressor:compressor_config_interface", + "//include/envoy/compression/compressor:compressor_interface", + ], +) diff --git a/test/mocks/compression/compressor/mocks.cc b/test/mocks/compression/compressor/mocks.cc new file mode 100644 index 000000000000..9d2ff9cac79e --- /dev/null +++ b/test/mocks/compression/compressor/mocks.cc @@ -0,0 +1,21 @@ +#include "test/mocks/compression/compressor/mocks.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Compression { +namespace Compressor { + +MockCompressor::MockCompressor() = default; +MockCompressor::~MockCompressor() = default; + +MockCompressorFactory::MockCompressorFactory() { + ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_)); + ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_)); +} + +MockCompressorFactory::~MockCompressorFactory() = default; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/compression/compressor/mocks.h b/test/mocks/compression/compressor/mocks.h new file mode 100644 index 000000000000..e5438699484f --- /dev/null +++ b/test/mocks/compression/compressor/mocks.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/compression/compressor/compressor.h" +#include "envoy/compression/compressor/config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Compression { +namespace Compressor { + +class MockCompressor : public Compressor { +public: + MockCompressor(); + ~MockCompressor() override; + + // Compressor::Compressor + MOCK_METHOD(void, compress, (Buffer::Instance & buffer, State state)); +}; + +class MockCompressorFactory : public CompressorFactory { +public: + MockCompressorFactory(); + ~MockCompressorFactory() override; + + // Compressor::CompressorFactory + MOCK_METHOD(CompressorPtr, createCompressor, ()); + MOCK_METHOD(const std::string&, statsPrefix, (), (const)); + MOCK_METHOD(const std::string&, contentEncoding, (), (const)); + + const std::string stats_prefix_{"mock"}; + const std::string content_encoding_{"mock"}; +}; + +} // namespace Compressor +} // namespace Compression +} // namespace Envoy diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index e5af042bf9cd..636112319b38 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -477,6 +477,7 @@ coroutines cors cout coverity +cplusplus cpuset creds crypto From 6aaad264c70b7407383e5fd8e39d602a7049d3b5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 8 May 2020 16:49:23 -0400 Subject: [PATCH 119/909] http: pathless CONNECT (#11048) Removing the synthetic path added to CONNECT requests theoretically completing Envoy CONNECT support. Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + source/common/http/conn_manager_impl.cc | 32 +++++++++-------- source/common/http/http2/codec_impl.cc | 9 +++++ source/common/runtime/runtime_features.cc | 1 + test/common/http/BUILD | 1 + test/common/http/conn_manager_impl_test.cc | 34 +++++++++++++++++++ test/config/utility.cc | 20 +++++++++++ test/config/utility.h | 16 +++++---- test/integration/integration_test.cc | 16 +++------ test/integration/protocol_integration_test.cc | 6 +++- .../tcp_tunneling_integration_test.cc | 19 ++--------- tools/spelling/spelling_dictionary.txt | 1 + 12 files changed, 105 insertions(+), 51 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index bc30af368871..61d3eb38b5b4 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -26,6 +26,7 @@ Changes Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and `envoy.reloadable_features.new_http2_connection_pool_behavior`. +* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 17e6db543c24..7d0494ce4606 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -38,6 +38,7 @@ #include "common/http/utility.h" #include "common/network/utility.h" #include "common/router/config_impl.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/stats/timespan_impl.h" @@ -774,10 +775,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he connection_manager_.read_callbacks_->connection().dispatcher()); request_headers_ = std::move(headers); - // TODO(alyssawilk) remove this synthetic path in a follow-up PR, including - // auditing of empty path headers. We check for path because HTTP/2 connect requests may have a - // path. - if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path()) { + if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path() && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.stop_faking_paths")) { request_headers_->setPath("/"); } @@ -878,20 +877,23 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // Verify header sanity checks which should have been performed by the codec. ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false); - // Currently we only support relative paths at the application layer. We expect the codec to have - // broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does this - // when the allow_absolute_url flag is enabled on the HCM. - // https://tools.ietf.org/html/rfc7230#section-5.3 We also need to check for the existence of - // :path because CONNECT does not have a path, and we don't support that currently. - if (!request_headers_->Path() || request_headers_->Path()->value().getStringView().empty() || - request_headers_->Path()->value().getStringView()[0] != '/') { - const bool has_path = - request_headers_->Path() && !request_headers_->Path()->value().getStringView().empty(); + // Check for the existence of the :path header for non-CONNECT requests. We expect the codec to + // have broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does + // this when the allow_absolute_url flag is enabled on the HCM. + if ((!HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path()) || + (request_headers_->Path() && request_headers_->Path()->value().getStringView().empty())) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, + state_.is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingPath); + return; + } + + // Currently we only support relative paths at the application layer. + if (request_headers_->Path() && request_headers_->Path()->value().getStringView()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, state_.is_head_request_, absl::nullopt, - has_path ? StreamInfo::ResponseCodeDetails::get().AbsolutePath - : StreamInfo::ResponseCodeDetails::get().MissingPath); + StreamInfo::ResponseCodeDetails::get().AbsolutePath); return; } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 13f96036146d..342519dbc669 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -162,8 +162,17 @@ void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& hea Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); buildHeaders(final_headers, *modified_headers); } else if (headers.Method() && headers.Method()->value() == "CONNECT") { + // If this is not an upgrade style connect (above branch) it is a bytestream + // connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 modified_headers = createHeaderMap(headers); modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); + if (!headers.Path()) { + modified_headers->setPath("/"); + } buildHeaders(final_headers, *modified_headers); } else { buildHeaders(final_headers, headers); diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 4725229e9b39..a5d84003a3a6 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -63,6 +63,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.stop_faking_paths", }; // This is a section for officially sanctioned runtime features which are too diff --git a/test/common/http/BUILD b/test/common/http/BUILD index d51e20b33e23..b5b736ff1fc4 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -230,6 +230,7 @@ envoy_cc_test( "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:test_time_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/type/tracing/v3:pkg_cc_proto", diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 27793279b03c..9483f32a4928 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -46,6 +46,7 @@ #include "test/mocks/upstream/mocks.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/test_time.h" #include "gmock/gmock.h" @@ -2859,6 +2860,39 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { conn_manager_->onData(fake_input, false); } +TEST_F(HttpConnectionManagerImplTest, ConnectLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.stop_faking_paths", "false"}}); + + setup(false, "envoy-custom-server", false); + + NiceMock encoder; + RequestDecoder* decoder = nullptr; + + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) + .WillRepeatedly(Return(false)); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, _)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("403", headers.Status()->value().getStringView()); + })); + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + // Regression test for https://github.com/envoyproxy/envoy/issues/10138 TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { InSequence s; diff --git a/test/config/utility.cc b/test/config/utility.cc index 9f80e901662b..076006ce2681 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -433,6 +433,26 @@ void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, } } +void ConfigHelper::setConnectConfig( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm, + bool terminate_connect) { + auto* route_config = hcm.mutable_route_config(); + ASSERT_EQ(1, route_config->virtual_hosts_size()); + auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); + auto* match = route->mutable_match(); + match->Clear(); + match->mutable_connect_matcher(); + + if (terminate_connect) { + auto* upgrade = route->mutable_route()->add_upgrade_configs(); + upgrade->set_upgrade_type("CONNECT"); + upgrade->mutable_connect_config(); + } + + hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); + hcm.mutable_http2_protocol_options()->set_allow_connect(true); +} + void ConfigHelper::applyConfigModifiers() { for (const auto& config_modifier : config_modifiers_) { config_modifier(bootstrap_); diff --git a/test/config/utility.h b/test/config/utility.h index 77f0553a7bbf..cb3e1b8cfacd 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -27,6 +27,8 @@ namespace Envoy { class ConfigHelper { public: + using HttpConnectionManager = + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; struct ServerSslOptions { ServerSslOptions& setRsaCert(bool rsa_cert) { rsa_cert_ = rsa_cert; @@ -67,8 +69,7 @@ class ConfigHelper { envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_context); using ConfigModifierFunction = std::function; - using HttpModifierFunction = std::function; + using HttpModifierFunction = std::function; // A basic configuration (admin port, cluster_0, one listener) with no network filters. static std::string baseConfig(); @@ -196,15 +197,16 @@ class ConfigHelper { void addClusterFilterMetadata(absl::string_view metadata_yaml, absl::string_view cluster_name = "cluster_0"); + // Given an HCM with the default config, set the matcher to be a connect matcher and enable + // CONNECT requests. + static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect); + private: // Load the first HCM struct from the first listener into a parsed proto. - bool loadHttpConnectionManager( - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm); + bool loadHttpConnectionManager(HttpConnectionManager& hcm); // Take the contents of the provided HCM proto and stuff them into the first HCM // struct of the first listener. - void storeHttpConnectionManager( - const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm); + void storeHttpConnectionManager(const HttpConnectionManager& hcm); // Finds the filter named 'name' from the first filter chain from the first listener. envoy::config::listener::v3::Filter* getFilterFromListener(const std::string& name); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 5f70de47465d..6f0a501ac3bc 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -771,7 +771,7 @@ TEST_P(IntegrationTest, AbsolutePathWithoutPort) { // Ensure that connect behaves the same with allow_absolute_url enabled and without TEST_P(IntegrationTest, Connect) { - const std::string& request = "CONNECT www.somewhere.com:80 HTTP/1.1\r\nHost: host\r\n\r\n"; + const std::string& request = "CONNECT www.somewhere.com:80 HTTP/1.1\r\n\r\n"; config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { // Clone the whole listener. auto static_resources = bootstrap.mutable_static_resources(); @@ -1301,14 +1301,11 @@ TEST_P(IntegrationTest, TestUpgradeHeaderInResponse) { TEST_P(IntegrationTest, ConnectWithNoBody) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\n", false); + tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\n", false); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1338,16 +1335,13 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { TEST_P(IntegrationTest, ConnectWithChunkedBody) { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); // Send the payload early so we can regression test that body data does not // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\nHost: host\r\n\r\npayload", false); + tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index b65c3a53312b..e09134290e70 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1789,8 +1789,12 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":authority", "host.com:80"}}); if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + // TODO(alyssawilk) either reinstate prior behavior, or include a release + // note with this PR. + // Because CONNECT requests for HTTP/1 do not include a path, they will fail + // to find a route match and return a 404. response->waitForEndStream(); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().Status()->value().getStringView()); EXPECT_TRUE(response->complete()); } else { response->waitForReset(); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 497fc5ee5b46..97ee8abe49da 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -25,19 +25,7 @@ class ConnectTerminationIntegrationTest config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { - auto* route_config = hcm.mutable_route_config(); - ASSERT_EQ(1, route_config->virtual_hosts_size()); - auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); - auto* match = route->mutable_match(); - match->Clear(); - match->mutable_connect_matcher(); - - auto* upgrade = route->mutable_route()->add_upgrade_configs(); - upgrade->set_upgrade_type("CONNECT"); - upgrade->mutable_connect_config(); - - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); + ConfigHelper::setConnectConfig(hcm, true); if (enable_timeout_) { hcm.mutable_stream_idle_timeout()->set_seconds(0); @@ -201,10 +189,7 @@ class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { - hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); - hcm.mutable_http2_protocol_options()->set_allow_connect(true); - }); + hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); HttpProtocolIntegrationTest::initialize(); } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 636112319b38..eac6b9880ae6 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -423,6 +423,7 @@ builtins bulkstrings bursty bytecode +bytestream cacheability callee callsite From 9eeaa4688c6d55b8c423aa3d65669388e5082323 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 8 May 2020 16:50:42 -0400 Subject: [PATCH 120/909] ci: force /lgtm v2-freeze on any v2 API changes. (#11092) To assist the API shepherds in ensuring that no unintentional v2 freezes creep in, this PR extends our forked ownerscheck.star to force a "/lgtm v2-freeze" to be issued in order for v2 API changes to merge. The changes made to ownerscheck.star are: * Replace path prefix matching with regex matching. * Allow global approvers to be opted out of; we don't want a PR "approve" stamp to allow merges without an explicit v2 related LGTM. * Support custom GitHub status labels for each spec. Risk level: Low (CI only) Testing: Manual interactions with RK in #11092 Signed-off-by: Harvey Tuch --- ci/repokitteh/modules/ownerscheck.star | 45 +++++++++++++++++--------- repokitteh.star | 9 ++++++ 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star index 2eb379a9ccfd..b559fc114336 100644 --- a/ci/repokitteh/modules/ownerscheck.star +++ b/ci/repokitteh/modules/ownerscheck.star @@ -7,11 +7,13 @@ # "owner": "envoyproxy/api-shepherds!", # "path": "api/", # "label": "api", +# "allow_global_approval": True, +# "github_status_label" = "any API change", # }, # ], # ) # -# This module will maintain a commit status per specified path (also aka as spec). +# This module will maintain a commit status per specified path regex (also aka as spec). # # Two types of approvals: # 1. Global approvals, done by approving the PR using Github's review approval feature. @@ -19,7 +21,14 @@ # associated with the path. This does not affect GitHub's PR approve status, only # this module's maintained commit status. This approval is automatically revoked # if any further changes are done to the relevant files in this spec. +# +# By default, 'allow_global_approval' is true and either (1) or (2) above can unblock +# merges. If 'allow_global_approval' is set false, then only (2) will unblock a merge. +# +# 'label' refers to a GitHub label applied to any matching PR. The GitHub check status +# can be customized with `github_status_label`. +load("text", "match") load("github.com/repokitteh/modules/lib/utils.star", "react") def _store_partial_approval(who, files): @@ -44,11 +53,16 @@ def _get_relevant_specs(specs, changed_files): relevant = [] for spec in specs: - prefix = spec["path"] + path_match = spec["path"] - files = [f for f in changed_files if f['filename'].startswith(prefix)] + files = [f for f in changed_files if match(path_match, f['filename'])] + allow_global_approval = spec.get("allow_global_approval", True) + status_label = spec.get("github_status_label", "") if files: - relevant.append(struct(files=files, prefix=prefix, **spec)) + relevant.append(struct(files=files, + path_match=path_match, + allow_global_approval=allow_global_approval, + status_label=status_label)) print("specs: %s" % relevant) @@ -81,7 +95,7 @@ def _is_approved(spec, approvers): print("team %s(%d) = %s" % (team_name, team_id, required)) for r in required: - if any([a for a in approvers if a == r]): + if spec.allow_global_approval and any([a for a in approvers if a == r]): print("global approver: %s" % r) return True @@ -92,11 +106,12 @@ def _is_approved(spec, approvers): return False -def _update_status(owner, prefix, approved): +def _update_status(owner, status_label, path_match, approved): + changes_to = path_match or '/' github.create_status( state=approved and 'success' or 'pending', - context='%s must approve' % owner, - description='changes to %s' % (prefix or '/'), + context='%s must approve for %s' % (owner, status_label), + description='changes to %s' % changes_to, ) def _get_specs(config): @@ -122,7 +137,7 @@ def _reconcile(config, specs=None): results.append((spec, approved)) if spec.owner[-1] == '!': - _update_status(spec.owner[:-1], spec.prefix, approved) + _update_status(spec.owner[:-1], spec.status_label, spec.path_match, approved) if hasattr(spec, 'label'): if approved: @@ -150,13 +165,13 @@ def _comment(config, results, force=False): if mention[-1] == '!': mention = mention[:-1] - prefix = spec.prefix - if prefix: - prefix = ' for changes made to `' + prefix + '`' + match_description = spec.path_match + if match_description: + match_description = ' for changes made to `' + match_description + '`' mode = spec.owner[-1] == '!' and 'approval' or 'fyi' - key = "ownerscheck/%s/%s" % (spec.owner, spec.prefix) + key = "ownerscheck/%s/%s" % (spec.owner, spec.path_match) if (not force) and (store_get(key) == mode): mode = 'skip' @@ -164,9 +179,9 @@ def _comment(config, results, force=False): store_put(key, mode) if mode == 'approval': - lines.append('CC %s: Your approval is needed%s.' % (mention, prefix)) + lines.append('CC %s: Your approval is needed%s.' % (mention, match_description)) elif mode == 'fyi': - lines.append('CC %s: FYI only%s.' % (mention, prefix)) + lines.append('CC %s: FYI only%s.' % (mention, match_description)) if lines: github.issue_create_comment('\n'.join(lines)) diff --git a/repokitteh.star b/repokitteh.star index a6c42ef909ec..342a3f9675e3 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -7,10 +7,19 @@ use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circ use( "github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star", paths=[ + { + "owner": "envoyproxy/api-shepherds!", + "path": + "(api/envoy[\w/]*/(v1alpha\d?|v1|v2alpha\d?|v2))|(api/envoy/type/(matcher/)?\w+.proto)", + "label": "v2-freeze", + "allow_global_approval": False, + "github_status_label": "v2 freeze violations", + }, { "owner": "envoyproxy/api-shepherds!", "path": "api/", "label": "api", + "github_status_label": "any API change", }, { "owner": "envoyproxy/api-watchers", From 8e52a24362221af1512a037ed7ea667a8330fbf2 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 8 May 2020 15:55:32 -0700 Subject: [PATCH 121/909] Revert "http: Introduce preserve_upstream_date option (#11077)" (#11116) This reverts commit 10c755e9d9b8acd7cf1702a4f49dbcbdf0696198. Signed-off-by: Matt Klein --- .../v3/http_connection_manager.proto | 5 -- .../v4alpha/http_connection_manager.proto | 5 -- .../v3/http_connection_manager.proto | 5 -- .../v4alpha/http_connection_manager.proto | 5 -- source/common/http/conn_manager_config.h | 6 --- source/common/http/conn_manager_impl.cc | 4 +- .../network/http_connection_manager/config.cc | 3 +- .../network/http_connection_manager/config.h | 2 - source/server/http/admin.h | 1 - .../http/conn_manager_impl_fuzz_test.cc | 1 - test/common/http/conn_manager_impl_test.cc | 52 ------------------- test/common/http/conn_manager_utility_test.cc | 1 - .../http_connection_manager/config_test.cc | 33 ------------ 13 files changed, 2 insertions(+), 121 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index a236c5c47743..ac8ab2adbfb6 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -507,11 +507,6 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; - // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream - // host will not be overwritten by the HTTP Connection Manager. The default behaviour is - // to overwrite the `date` header unconditionally. - bool preserve_upstream_date = 38; - // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 6e26aa2d1f98..8f370b21d8f1 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -507,11 +507,6 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; - // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream - // host will not be overwritten by the HTTP Connection Manager. The default behaviour is - // to overwrite the `date` header unconditionally. - bool preserve_upstream_date = 38; - // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 55f4f5fe819d..21616dcc386b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -509,11 +509,6 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; - // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream - // host will not be overwritten by the HTTP Connection Manager. The default behaviour is - // to overwrite the `date` header unconditionally. - bool preserve_upstream_date = 38; - // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 6e26aa2d1f98..8f370b21d8f1 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -507,11 +507,6 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; - // If `preserve_upstream_date` is true, the value of the `date` header sent by the upstream - // host will not be overwritten by the HTTP Connection Manager. The default behaviour is - // to overwrite the `date` header unconditionally. - bool preserve_upstream_date = 38; - // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 993fa7d55a3e..bbbc07ce0825 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -435,12 +435,6 @@ class ConnectionManagerConfig { */ virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const PURE; - - /** - * @return if the HttpConnectionManager should preserve the `date` response header sent by the - * upstream host. - */ - virtual bool shouldPreserveUpstreamDate() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 7d0494ce4606..2becab3737b1 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1633,9 +1633,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream) { // Base headers. - if (!connection_manager_.config_.shouldPreserveUpstreamDate() || !headers.Date()) { - connection_manager_.config_.dateProvider().setDateHeader(headers); - } + connection_manager_.config_.dateProvider().setDateHeader(headers); // Following setReference() is safe because serverName() is constant for the life of the listener. const auto transformation = connection_manager_.config_.serverHeaderTransformation(); if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index d842e84b46f9..18cec14f48b4 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -218,8 +218,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( merge_slashes_(config.merge_slashes()), strip_matching_port_(config.strip_matching_host_port()), headers_with_underscores_action_( - config.common_http_protocol_options().headers_with_underscores_action()), - preserve_upstream_date_(config.preserve_upstream_date()) { + config.common_http_protocol_options().headers_with_underscores_action()) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 4a5d3f87db77..22088c3e472f 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -163,7 +163,6 @@ class HttpConnectionManagerConfig : Logger::Loggable, return headers_with_underscores_action_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } - bool shouldPreserveUpstreamDate() const override { return preserve_upstream_date_; } private: enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO }; @@ -229,7 +228,6 @@ class HttpConnectionManagerConfig : Logger::Loggable, const bool strip_matching_port_; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; - const bool preserve_upstream_date_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/server/http/admin.h b/source/server/http/admin.h index 1abed8984b83..bea7cef019c4 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -169,7 +169,6 @@ class AdminImpl : public Admin, headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } - bool shouldPreserveUpstreamDate() const override { return false; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void closeSocket(); diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index d6152bdbcd16..de381823f7db 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -161,7 +161,6 @@ class FuzzConfig : public ConnectionManagerConfig { headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } - bool shouldPreserveUpstreamDate() const override { return false; } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 9483f32a4928..4ad7d1b41372 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -355,7 +355,6 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan headersWithUnderscoresAction() const override { return headers_with_underscores_action_; } - bool shouldPreserveUpstreamDate() const override { return preserve_upstream_date_; } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; @@ -418,7 +417,6 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests RequestIDExtensionSharedPtr request_id_extension_; - bool preserve_upstream_date_ = false; // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. MockResponseEncoder response_encoder_; @@ -976,56 +974,6 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { conn_manager_->onData(fake_input, false); } -TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - preserve_upstream_date_ = false; - const auto* modified_headers = sendResponseHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - ASSERT_TRUE(modified_headers); - EXPECT_TRUE(modified_headers->Date()); -} - -TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) { - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - preserve_upstream_date_ = false; - const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; - const auto* modified_headers = - sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); - ASSERT_TRUE(modified_headers); - ASSERT_TRUE(modified_headers->Date()); - EXPECT_NE(expected_date, modified_headers->Date()->value().getStringView()); -} - -TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateNotSet) { - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - preserve_upstream_date_ = true; - const auto* modified_headers = sendResponseHeaders( - ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - ASSERT_TRUE(modified_headers); - EXPECT_TRUE(modified_headers->Date()); -} - -TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { - setup(false, ""); - setUpEncoderAndDecoder(false, false); - sendRequestHeadersAndData(); - preserve_upstream_date_ = true; - const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; - const auto* modified_headers = - sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ - {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); - ASSERT_TRUE(modified_headers); - ASSERT_TRUE(modified_headers->Date()); - EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); -} - TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { setup(false, ""); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 9e39dfe30d79..3ea00c438db9 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -139,7 +139,6 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(bool, shouldStripMatchingPort, (), (const)); MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, headersWithUnderscoresAction, (), (const)); - MOCK_METHOD(bool, shouldPreserveUpstreamDate, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 02f3cbea978c..9ee7ed609910 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1130,39 +1130,6 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { EXPECT_EQ(0, config.requestTimeout().count()); } -TEST_F(HttpConnectionManagerConfigTest, DisabledPreserveResponseDate) { - const std::string yaml_string = R"EOF( - stat_prefix: ingress_http - request_timeout: 0s - route_config: - name: local_route - http_filters: - - name: envoy.filters.http.router - )EOF"; - - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); - EXPECT_FALSE(config.shouldPreserveUpstreamDate()); -} - -TEST_F(HttpConnectionManagerConfigTest, EnabledPreserveResponseDate) { - const std::string yaml_string = R"EOF( - stat_prefix: ingress_http - request_timeout: 0s - route_config: - name: local_route - http_filters: - - name: envoy.filters.http.router - preserve_upstream_date: true - )EOF"; - - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); - EXPECT_TRUE(config.shouldPreserveUpstreamDate()); -} - TEST_F(HttpConnectionManagerConfigTest, SingleDateProvider) { const std::string yaml_string = R"EOF( codec_type: http1 From 0d81abde2a31407085b793132f7561b548a3b292 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 8 May 2020 15:58:08 -0700 Subject: [PATCH 122/909] ci: no upgrade (#11117) Signed-off-by: Lizan Zhou --- .azure-pipelines/cleanup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh index 72a9bbf9fa18..4b145b7729e0 100755 --- a/.azure-pipelines/cleanup.sh +++ b/.azure-pipelines/cleanup.sh @@ -3,7 +3,7 @@ set -e # Temporary script to remove tools from Azure pipelines agent to create more disk space room. -sudo apt-get -y update -sudo apt-get purge -y 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'cpp-*' +sudo apt-get update -y +sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn From 3337426cc82755f23ac536ac220f83e25a8378ad Mon Sep 17 00:00:00 2001 From: cmiller-sq <64046472+cmiller-sq@users.noreply.github.com> Date: Fri, 8 May 2020 19:03:23 -0400 Subject: [PATCH 123/909] docs: add diagrams and improve service-to-service descr (#11114) Signed-off-by: Chad MILLER --- .../service_to_service_egress_listener.svg | 237 ++++++++++++++ .../service_to_service_ingress_listener.svg | 309 ++++++++++++++++++ .../deployment_types/service_to_service.rst | 18 +- 3 files changed, 558 insertions(+), 6 deletions(-) create mode 100644 docs/root/_static/service_to_service_egress_listener.svg create mode 100644 docs/root/_static/service_to_service_ingress_listener.svg diff --git a/docs/root/_static/service_to_service_egress_listener.svg b/docs/root/_static/service_to_service_egress_listener.svg new file mode 100644 index 000000000000..ef0bbe70b806 --- /dev/null +++ b/docs/root/_static/service_to_service_egress_listener.svg @@ -0,0 +1,237 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + :9901 + diff --git a/docs/root/_static/service_to_service_ingress_listener.svg b/docs/root/_static/service_to_service_ingress_listener.svg new file mode 100644 index 000000000000..5b9109239ab0 --- /dev/null +++ b/docs/root/_static/service_to_service_ingress_listener.svg @@ -0,0 +1,309 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + :9221 + diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst index 9f16d8063e1e..a4200a607ab3 100644 --- a/docs/root/intro/deployment_types/service_to_service.rst +++ b/docs/root/intro/deployment_types/service_to_service.rst @@ -8,7 +8,7 @@ Service to service only The above diagram shows the simplest Envoy deployment which uses Envoy as a communication bus for all traffic internal to a service oriented architecture (SOA). In this scenario, Envoy exposes -several listeners that are used for local origin traffic as well as service to service traffic. +several listeners that are used for local origin traffic as well as service-to-service traffic. Service to service egress listener ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22,13 +22,16 @@ themselves with network topology, whether they are running in development or pro This listener supports both HTTP/1.1 or HTTP/2 depending on the capabilities of the application. +.. image:: /_static/service_to_service_egress_listener.svg + :width: 40% + .. _deployment_type_service_to_service_ingress: Service to service ingress listener ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is the port used by remote Envoys when they want to talk to the local Envoy. For example, -*http://localhost:9211*. Incoming requests are routed to the local service on the configured +*http://servicename:9211*. Envoy routes incoming requests to the local service on the configured port(s). Multiple application ports may be involved depending on application or load balancing needs (for example if the service needs both an HTTP port and a gRPC port). The local Envoy performs buffering, circuit breaking, etc. as needed. @@ -37,6 +40,10 @@ Our default configurations use HTTP/2 for all Envoy to Envoy communication, rega the application uses HTTP/1.1 or HTTP/2 when egressing out of a local Envoy. HTTP/2 provides better performance via long lived connections and explicit reset notifications. +.. image:: /_static/service_to_service_ingress_listener.svg + :width: 55% + + Optional external service egress listeners ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -50,13 +57,12 @@ being consistent and using local port routing for all external services. Discovery service integration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The recommended service to service configuration uses an external discovery service for all cluster +The recommended service-to-service configuration uses an external discovery service for all cluster lookups. This provides Envoy with the most detailed information possible for use when performing load balancing, statistics gathering, etc. Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example service to service configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes :ref:`an example service-to-service configuration` +that is very similar to the version that Lyft runs in production. From 888e0e28900a470df448c65d7b99d8065fd60251 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Fri, 8 May 2020 17:27:55 -0700 Subject: [PATCH 124/909] docs: fix links broken in previous doc cleanup. (#11115) The links were broken in 8e8209fa75f87ab53d4c78a466c8f927df930e50 Fixes: #11078 Signed-off-by: Greg Greenway --- DEPRECATED.md | 3 ++- docs/root/intro/deprecated.rst | 6 ++++++ docs/root/intro/intro.rst | 7 +++++++ docs/root/intro/version_history.rst | 6 ++++++ docs/root/version_history/version_history.rst | 2 ++ source/common/common/BUILD | 5 +++++ source/common/common/documentation_url.h | 13 ++++++++++++ source/common/protobuf/BUILD | 1 + source/common/protobuf/utility.cc | 21 ++++++++----------- source/extensions/common/BUILD | 1 + source/extensions/common/utility.h | 14 ++++++------- 11 files changed, 58 insertions(+), 21 deletions(-) create mode 100644 docs/root/intro/deprecated.rst create mode 100644 docs/root/intro/version_history.rst create mode 100644 source/common/common/documentation_url.h diff --git a/DEPRECATED.md b/DEPRECATED.md index 1b2962adcb97..a82576c77ffa 100644 --- a/DEPRECATED.md +++ b/DEPRECATED.md @@ -1,3 +1,4 @@ # DEPRECATED -The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated) can be found in the official Envoy developer documentation. +The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/version_history/version_history) +for each version can be found in the official Envoy developer documentation. diff --git a/docs/root/intro/deprecated.rst b/docs/root/intro/deprecated.rst new file mode 100644 index 000000000000..e630390e94e5 --- /dev/null +++ b/docs/root/intro/deprecated.rst @@ -0,0 +1,6 @@ +Deprecated +========== + +The deprecations for each version have moved :ref:`here `. + +.. This page only exists because previous versions of Envoy link here. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index 055bca727679..cf3404c363ec 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -10,3 +10,10 @@ Introduction arch_overview/arch_overview deployment_types/deployment_types getting_help + +.. These pages are only here for redirects from log lines from shipping versions of Envoy, so hide them. +.. toctree:: + :hidden: + + version_history + deprecated diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst new file mode 100644 index 000000000000..307b7bb140e8 --- /dev/null +++ b/docs/root/intro/version_history.rst @@ -0,0 +1,6 @@ +Version History +=============== + +The changes for each version have moved :ref:`here `. + +.. This page only exists because previous versions of Envoy link here. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index b869b08080e0..6451336bffe7 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -1,3 +1,5 @@ +.. _version_history: + Version history --------------- diff --git a/source/common/common/BUILD b/source/common/common/BUILD index af25042d18c5..9429aa2e24c6 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -72,6 +72,11 @@ envoy_cc_library( hdrs = ["compiler_requirements.h"], ) +envoy_cc_library( + name = "documentation_url_lib", + hdrs = ["documentation_url.h"], +) + envoy_cc_library( name = "empty_string", hdrs = ["empty_string.h"], diff --git a/source/common/common/documentation_url.h b/source/common/common/documentation_url.h new file mode 100644 index 000000000000..dc3e0e352209 --- /dev/null +++ b/source/common/common/documentation_url.h @@ -0,0 +1,13 @@ +namespace Envoy { + +// TODO(ggreenway): replace 'latest' with the current version, pulled from the VERSION file at +// the root of the repo. +#define ENVOY_DOC_URL_ROOT "https://www.envoyproxy.io/docs/envoy/latest" + +#define ENVOY_DOC_URL_VERSION_HISTORY ENVOY_DOC_URL_ROOT "/version_history/version_history" + +#define ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED \ + ENVOY_DOC_URL_ROOT \ + "/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features" + +} // namespace Envoy diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index 9a9aa1f30624..de33b5516828 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -64,6 +64,7 @@ envoy_cc_library( "//include/envoy/protobuf:message_validator_interface", "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", + "//source/common/common:documentation_url_lib", "//source/common/common:hash_lib", "//source/common/common:utility_lib", "//source/common/config:api_type_oracle_lib", diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 684c8e2ffab1..6aba34ea841b 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -8,6 +8,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/common/assert.h" +#include "common/common/documentation_url.h" #include "common/common/fmt.h" #include "common/config/api_type_oracle.h" #include "common/config/version_converter.h" @@ -198,11 +199,9 @@ void deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_dep if (warn_only) { ENVOY_LOG_MISC(warn, "{}", with_overridden); } else { - const char fatal_error[] = - " If continued use of this field is absolutely necessary, see " - "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" - "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " - "highly discouraged override."; + const char fatal_error[] = " If continued use of this field is absolutely necessary, " + "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for how " + "to apply a temporary and highly discouraged override."; throw ProtoValidationException(with_overridden + fatal_error, message); } } @@ -408,8 +407,7 @@ void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, enum_value_descriptor->name(), " for enum '", field->full_name(), "' from file ", filename, ". This enum value will be removed from Envoy soon", (default_value ? " so a non-default value must now be explicitly set" : ""), - ". Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " - "for details."); + ". Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); deprecatedFieldHelper( runtime, true /*deprecated*/, enum_value_descriptor->options().GetExtension(envoy::annotations::disallowed_by_default_enum), @@ -440,11 +438,10 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { - const std::string warning = absl::StrCat( - "Using {}deprecated option '", field.full_name(), "' from file ", filename, - ". This configuration will be removed from " - "Envoy soon. Please see https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated " - "for details."); + const std::string warning = + absl::StrCat("Using {}deprecated option '", field.full_name(), "' from file ", filename, + ". This configuration will be removed from " + "Envoy soon. Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); deprecatedFieldHelper(runtime_, true /*deprecated*/, field.options().GetExtension(envoy::annotations::disallowed_by_default), diff --git a/source/extensions/common/BUILD b/source/extensions/common/BUILD index 11077c08c1e5..035e287aa3c9 100644 --- a/source/extensions/common/BUILD +++ b/source/extensions/common/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["utility.h"], deps = [ "//include/envoy/runtime:runtime_interface", + "//source/common/common:documentation_url_lib", "//source/common/common:minimal_logger_lib", ], ) diff --git a/source/extensions/common/utility.h b/source/extensions/common/utility.h index c50677c898d9..309dca2e2107 100644 --- a/source/extensions/common/utility.h +++ b/source/extensions/common/utility.h @@ -5,6 +5,7 @@ #include "envoy/common/exception.h" #include "envoy/runtime/runtime.h" +#include "common/common/documentation_url.h" #include "common/common/logger.h" namespace Envoy { @@ -103,9 +104,8 @@ class ExtensionNameUtil { return fmt::format( "Using deprecated {}{}extension name '{}' for '{}'. This name will be removed from Envoy " - "soon. Please see " - "https://www.envoyproxy.io/docs/envoy/latest/intro/deprecated for details.", - extension_type, spacing, deprecated_name, canonical_name); + "soon. Please see {} for details.", + extension_type, spacing, deprecated_name, canonical_name, ENVOY_DOC_URL_VERSION_HISTORY); } static std::string fatalMessage(absl::string_view extension_type, @@ -113,11 +113,9 @@ class ExtensionNameUtil { absl::string_view canonical_name) { std::string err = message(extension_type, deprecated_name, canonical_name); - const char fatal_error[] = - " If continued use of this filter name is absolutely necessary, see " - "https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime" - "#using-runtime-overrides-for-deprecated-features for how to apply a temporary and " - "highly discouraged override."; + const char fatal_error[] = " If continued use of this filter name is absolutely necessary, " + "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for " + "how to apply a temporary and highly discouraged override."; return err + fatal_error; } From 1210df904254d8144cd7c35106df091aceb5375a Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 11 May 2020 00:40:10 +0530 Subject: [PATCH 125/909] move static to construct on first use (#11127) move histogram quantiles and buckets to construct on first use idiom. Signed-off-by: Rama Chavali --- source/common/stats/histogram_impl.cc | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index f7ab4897596b..b1d041882efe 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -26,16 +26,14 @@ HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_pt } const std::vector& HistogramStatisticsImpl::supportedQuantiles() const { - static const std::vector supported_quantiles = {0, 0.25, 0.5, 0.75, 0.90, - 0.95, 0.99, 0.995, 0.999, 1}; - return supported_quantiles; + CONSTRUCT_ON_FIRST_USE(std::vector, + {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 0.999, 1}); } const std::vector& HistogramStatisticsImpl::supportedBuckets() const { - static const std::vector supported_buckets = { - 0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, - 2500, 5000, 10000, 30000, 60000, 300000, 600000, 1800000, 3600000}; - return supported_buckets; + CONSTRUCT_ON_FIRST_USE(std::vector, + {0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, + 60000, 300000, 600000, 1800000, 3600000}); } std::string HistogramStatisticsImpl::quantileSummary() const { From 50fcadf7bfa0ae2b59b8940027d4874441234ec6 Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Sun, 10 May 2020 17:10:18 -0500 Subject: [PATCH 126/909] Fix curl expected features test for Windows (#11118) Fix curl expected features test for Windows Co-authored-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia --- bazel/foreign_cc/BUILD | 3 +++ test/dependencies/BUILD | 1 - test/dependencies/curl_test.cc | 38 +++++++++++++++++++--------------- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 09d67b2a4d28..5dc37d92df90 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -101,6 +101,9 @@ envoy_cmake_external( "CMAKE_USE_GSSAPI": "off", "HTTP_ONLY": "on", "CMAKE_INSTALL_LIBDIR": "lib", + # Explicitly enable Unix sockets and disable crypto for Windows + "USE_UNIX_SOCKETS": "on", + "CURL_DISABLE_CRYPTO_AUTH": "on", # C-Ares. "ENABLE_ARES": "on", "CARES_LIBRARY": "$EXT_BUILD_DEPS/ares", diff --git a/test/dependencies/BUILD b/test/dependencies/BUILD index 3d68e338f2b5..2e6ae296b760 100644 --- a/test/dependencies/BUILD +++ b/test/dependencies/BUILD @@ -14,5 +14,4 @@ envoy_cc_test( external_deps = [ "curl", ], - tags = ["fails_on_windows"], ) diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index 6e11ee6dc7de..859e68c90d9c 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -9,28 +9,32 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { // https://curl.haxx.se/libcurl/c/curl_version_info.html. curl_version_info_data* info = curl_version_info(CURLVERSION_NOW); - EXPECT_NE(0, info->features & CURL_VERSION_ASYNCHDNS); - EXPECT_NE(0, info->ares_num); - EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); - EXPECT_NE(0, info->features & CURL_VERSION_LIBZ); + // In sequence as declared in curl.h. Overlook any toggle of the + // developer or os elections for DEBUG, CURL DEBUG and LARGE FILE EXPECT_NE(0, info->features & CURL_VERSION_IPV6); - -#ifndef WIN32 - EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#else - EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#endif - - EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI); - EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); - EXPECT_EQ(0, info->features & CURL_VERSION_GSSNEGOTIATE); EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS4); - EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); + EXPECT_EQ(0, info->features & CURL_VERSION_SSL); + EXPECT_NE(0, info->features & CURL_VERSION_LIBZ); EXPECT_EQ(0, info->features & CURL_VERSION_NTLM); - EXPECT_EQ(0, info->features & CURL_VERSION_NTLM_WB); + EXPECT_EQ(0, info->features & CURL_VERSION_GSSNEGOTIATE); + EXPECT_NE(0, info->features & CURL_VERSION_ASYNCHDNS); EXPECT_EQ(0, info->features & CURL_VERSION_SPNEGO); - EXPECT_EQ(0, info->features & CURL_VERSION_SSL); + EXPECT_EQ(0, info->features & CURL_VERSION_IDN); EXPECT_EQ(0, info->features & CURL_VERSION_SSPI); + EXPECT_EQ(0, info->features & CURL_VERSION_CONV); + EXPECT_EQ(0, info->features & CURL_VERSION_TLSAUTH_SRP); + EXPECT_EQ(0, info->features & CURL_VERSION_NTLM_WB); + EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); + EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); + EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); + EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); + EXPECT_EQ(0, info->features & CURL_VERSION_PSL); + EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); + EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); + EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI); + EXPECT_EQ(0, info->features & CURL_VERSION_ALTSVC); + EXPECT_EQ(0, info->features & CURL_VERSION_HTTP3); + EXPECT_NE(0, info->ares_num); } } // namespace Dependencies From b7c6ec6341026a02d591c44973247f6603cc8126 Mon Sep 17 00:00:00 2001 From: htuch Date: Sun, 10 May 2020 19:55:59 -0400 Subject: [PATCH 127/909] ci: fix missing spec.owner in ownerscheck.star. (#11122) The **spec was dropped in #11092 when building the struct to pass around. This lost the owner info, resulting in errors such as https://prod.repokitteh.app/traces/ui/envoyproxy/envoy/595f3d80-9170-11ea-9312-ad19ced22be2. Signed-off-by: Harvey Tuch --- ci/repokitteh/modules/ownerscheck.star | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star index b559fc114336..43264261349c 100644 --- a/ci/repokitteh/modules/ownerscheck.star +++ b/ci/repokitteh/modules/ownerscheck.star @@ -60,6 +60,7 @@ def _get_relevant_specs(specs, changed_files): status_label = spec.get("github_status_label", "") if files: relevant.append(struct(files=files, + owner=spec.owner, path_match=path_match, allow_global_approval=allow_global_approval, status_label=status_label)) From d90464cace696da61248d3999081c3c0d22a725b Mon Sep 17 00:00:00 2001 From: Prakasam Kannan Date: Sun, 10 May 2020 19:23:40 -0700 Subject: [PATCH 128/909] http: route override (#10659) Envoy currently choose the upstream cluster based on the first matching route, however it is desirable to continue matching route until a specific cluster is chosen, when testing a new L7 rules/route in production or based on certain condition (upstream cluster is not healthy so continue on to next matching or default route). This PR overloads the StreamFilterCallbacks::route method with a callback that lets the caller to contiue matching routes. Signed-off-by: Prakasam Kannan Signed-off-by: Prakasam Kannan --- include/envoy/http/filter.h | 20 +- include/envoy/router/router.h | 57 ++++ source/common/http/async_client_impl.h | 8 + source/common/http/conn_manager_impl.cc | 17 +- source/common/http/conn_manager_impl.h | 2 + source/common/router/config_impl.cc | 44 ++- source/common/router/config_impl.h | 17 +- test/common/http/conn_manager_impl_test.cc | 240 ++++++++++++++-- test/common/router/config_impl_test.cc | 319 ++++++++++++++++++++- test/mocks/http/mocks.h | 2 + test/mocks/router/mocks.cc | 1 + test/mocks/router/mocks.h | 5 + 12 files changed, 686 insertions(+), 46 deletions(-) diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index c97589d3cb69..ef1e195b28c6 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -147,10 +147,28 @@ class StreamFilterCallbacks { * caching where applicable to avoid multiple lookups. If a filter has modified the headers in * a way that affects routing, clearRouteCache() must be called to clear the cache. * - * NOTE: In the future we may want to allow the filter to override the route entry. + * NOTE: In the future we want to split route() into 2 methods, one that just + * returns current route and another that actually resolve the route. */ virtual Router::RouteConstSharedPtr route() PURE; + /** + * Invokes callback with a matched route, callback can choose to accept this route by returning + * Router::RouteMatchStatus::Accept or continue route match from last matched route by returning + * Router::RouteMatchStatus::Continue, if there are more routes available. + * + * Returns route accepted by the callback or nullptr if no match found or none of route is + * accepted by the callback. + * + * NOTE: clearRouteCache() must be called before invoking this method otherwise cached route will + * be returned directly to the caller and the callback will not be invoked. + * + * Currently a route callback's decision is overridden by clearRouteCache() / route() call in the + * subsequent filters. We may want to persist callbacks so they always participate in later route + * resolution or make it an independent entity like filters that gets called on route resolution. + */ + virtual Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) PURE; + /** * Returns the clusterInfo for the cached route. * This method is to avoid multiple look ups in the filter chain, it also provides a consistent diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 39bffffa50d1..890d9cacef07 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -957,6 +957,44 @@ class Route { using RouteConstSharedPtr = std::shared_ptr; +/** + * RouteCallback, returns one of these enums to the route matcher to indicate + * if the matched route has been accepted or it wants the route matching to + * continue. + */ +enum class RouteMatchStatus { + // Continue matching route + Continue, + // Accept matched route + Accept +}; + +/** + * RouteCallback is passed this enum to indicate if more routes are available for evaluation. + */ +enum class RouteEvalStatus { + // Has more routes that can be evaluated for match. + HasMoreRoutes, + // All routes have been evaluated for match. + NoMoreRoutes +}; + +/** + * RouteCallback can be used to override routing decision made by the Route::Config::route, + * this callback is passed the RouteConstSharedPtr, when a matching route is found, and + * RouteEvalStatus indicating whether there are more routes available for evaluation. + * + * RouteCallback will be called back only when at least one matching route is found, if no matching + * routes are found RouteCallback will not be invoked. RouteCallback can return one of the + * RouteMatchStatus enum to indicate if the match has been accepted or should the route match + * evaluation continue. + * + * Returning RouteMatchStatus::Continue, when no more routes available for evaluation will result in + * no further callbacks and no route is deemed to be accepted and nullptr is returned to the caller + * of Route::Config::route. + */ +using RouteCallback = std::function; + /** * The router configuration. */ @@ -976,6 +1014,25 @@ class Config { const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const PURE; + /** + * Based on the incoming HTTP request headers, determine the target route (containing either a + * route entry or a direct response entry) for the request. + * + * Invokes callback with matched route, callback can choose to accept the route by returning + * RouteStatus::Stop or continue route match from last matched route by returning + * RouteMatchStatus::Continue, when more routes are available. + * + * @param cb supplies callback to be invoked upon route match. + * @param headers supplies the request headers. + * @param random_value supplies the random seed to use if a runtime choice is required. This + * allows stable choices between calls if desired. + * @return the route accepted by the callback or nullptr if no match found or none of route is + * accepted by the callback. + */ + virtual RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const PURE; + /** * Return a list of headers that will be cleaned from any requests that are not from an internal * (RFC1918) source. diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 77f05f8c3bc6..beaea6c3ef60 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -163,6 +163,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, return nullptr; } + Router::RouteConstSharedPtr route(const Router::RouteCallback&, const Http::RequestHeaderMap&, + const StreamInfo::StreamInfo&, uint64_t) const override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } @@ -326,6 +331,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, Event::Dispatcher& dispatcher() override { return parent_.dispatcher_; } void resetStream() override; Router::RouteConstSharedPtr route() override { return route_; } + Router::RouteConstSharedPtr route(const Router::RouteCallback&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } Upstream::ClusterInfoConstSharedPtr clusterInfo() override { return parent_.cluster_; } void clearRouteCache() override {} uint64_t streamId() const override { return stream_id_; } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 2becab3737b1..2d1e4808cab3 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1431,7 +1431,9 @@ void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { } } -void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { +void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRoute(nullptr); } + +void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::RouteCallback& cb) { Router::RouteConstSharedPtr route; if (request_headers_ != nullptr) { if (connection_manager_.config_.isRoutable() && @@ -1440,7 +1442,7 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { snapScopedRouteConfig(); } if (snapped_route_config_ != nullptr) { - route = snapped_route_config_->route(*request_headers_, stream_info_, stream_id_); + route = snapped_route_config_->route(cb, *request_headers_, stream_info_, stream_id_); } } stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; @@ -2259,10 +2261,15 @@ Upstream::ClusterInfoConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBas } Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route() { - if (!parent_.cached_route_.has_value()) { - parent_.refreshCachedRoute(); - } + return route(nullptr); +} +Router::RouteConstSharedPtr +ConnectionManagerImpl::ActiveStreamFilterBase::route(const Router::RouteCallback& cb) { + if (parent_.cached_route_.has_value()) { + return parent_.cached_route_.value(); + } + parent_.refreshCachedRoute(cb); return parent_.cached_route_.value(); } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 8cb3cf6789e8..d4c0b1744c9d 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -154,6 +154,7 @@ class ConnectionManagerImpl : Logger::Loggable, Event::Dispatcher& dispatcher() override; void resetStream() override; Router::RouteConstSharedPtr route() override; + Router::RouteConstSharedPtr route(const Router::RouteCallback& cb) override; Upstream::ClusterInfoConstSharedPtr clusterInfo() override; void clearRouteCache() override; uint64_t streamId() const override; @@ -579,6 +580,7 @@ class ConnectionManagerImpl : Logger::Loggable, void snapScopedRouteConfig(); void refreshCachedRoute(); + void refreshCachedRoute(const Router::RouteCallback& cb); void requestRouteConfigUpdate(Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb); diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 7344c12f45d1..a6c3fd0b34c4 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -1128,7 +1128,8 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r } } -RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHeaderMap& headers, +RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders @@ -1148,14 +1149,32 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const Http::RequestHead } // Check for a route that matches the request. - for (const RouteEntryImplBaseConstSharedPtr& route : routes_) { - if (!headers.Path() && !route->supportsPathlessHeaders()) { + for (auto route = routes_.begin(); route != routes_.end(); ++route) { + if (!headers.Path() && !(*route)->supportsPathlessHeaders()) { continue; } - RouteConstSharedPtr route_entry = route->matches(headers, stream_info, random_value); - if (nullptr != route_entry) { - return route_entry; + + RouteConstSharedPtr route_entry = (*route)->matches(headers, stream_info, random_value); + if (nullptr == route_entry) { + continue; } + + if (cb) { + RouteEvalStatus eval_status = (std::next(route) == routes_.end()) + ? RouteEvalStatus::NoMoreRoutes + : RouteEvalStatus::HasMoreRoutes; + RouteMatchStatus match_status = cb(route_entry, eval_status); + if (match_status == RouteMatchStatus::Accept) { + return route_entry; + } + if (match_status == RouteMatchStatus::Continue && + eval_status == RouteEvalStatus::NoMoreRoutes) { + return nullptr; + } + continue; + } + + return route_entry; } return nullptr; @@ -1200,12 +1219,14 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::RequestHeaderMa return default_virtual_host_.get(); } -RouteConstSharedPtr RouteMatcher::route(const Http::RequestHeaderMap& headers, +RouteConstSharedPtr RouteMatcher::route(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { + const VirtualHostImpl* virtual_host = findVirtualHost(headers); if (virtual_host) { - return virtual_host->getRouteFromEntries(headers, stream_info, random_value); + return virtual_host->getRouteFromEntries(cb, headers, stream_info, random_value); } else { return nullptr; } @@ -1251,6 +1272,13 @@ ConfigImpl::ConfigImpl(const envoy::config::route::v3::RouteConfiguration& confi config.response_headers_to_remove()); } +RouteConstSharedPtr ConfigImpl::route(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const { + return route_matcher_->route(cb, headers, stream_info, random_value); +} + namespace { RouteSpecificFilterConfigConstSharedPtr diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 54ed2be8a533..d5bec26b4791 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -163,7 +164,8 @@ class VirtualHostImpl : public VirtualHost { Server::Configuration::ServerFactoryContext& factory_context, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters); - RouteConstSharedPtr getRouteFromEntries(const Http::RequestHeaderMap& headers, + RouteConstSharedPtr getRouteFromEntries(const RouteCallback& cb, + const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const; const VirtualCluster* virtualClusterFromEntries(const Http::HeaderMap& headers) const; @@ -866,7 +868,7 @@ class RouteMatcher { Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator, bool validate_clusters); - RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const; const VirtualHostImpl* findVirtualHost(const Http::RequestHeaderMap& headers) const; @@ -916,9 +918,13 @@ class ConfigImpl : public Config { RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const override { - return route_matcher_->route(headers, stream_info, random_value); + return route(nullptr, headers, stream_info, random_value); } + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override; + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } @@ -953,6 +959,11 @@ class NullConfigImpl : public Config { return nullptr; } + RouteConstSharedPtr route(const RouteCallback&, const Http::RequestHeaderMap&, + const StreamInfo::StreamInfo&, uint64_t) const override { + return nullptr; + } + const std::list& internalOnlyHeaders() const override { return internal_only_headers_; } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 4ad7d1b41372..535cf78fb929 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -886,12 +886,12 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { std::shared_ptr route = std::make_shared>(); EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) - .WillOnce(Invoke( - [&](const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { - EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); - return route; - })); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, + const StreamInfo::StreamInfo&, uint64_t) { + EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); + return route; + })); EXPECT_CALL(filter_factory_, createFilterChain(_)) .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); @@ -900,6 +900,202 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { conn_manager_->onData(fake_input, false); } +TEST_F(HttpConnectionManagerImplTest, RouteOverride) { + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + setupFilterChain(2, 0); + const std::string foo_bar_baz_cluster_name = "foo_bar_baz"; + const std::string foo_bar_cluster_name = "foo_bar"; + const std::string foo_cluster_name = "foo"; + const std::string default_cluster_name = "default"; + + std::shared_ptr foo_bar_baz_cluster = + std::make_shared>(); + + std::shared_ptr foo_bar_cluster = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(absl::string_view{foo_bar_cluster_name})) + .WillOnce(Return(foo_bar_cluster.get())); + + std::shared_ptr foo_cluster = + std::make_shared>(); + + std::shared_ptr default_cluster = + std::make_shared>(); + EXPECT_CALL(cluster_manager_, get(absl::string_view{default_cluster_name})) + .Times(2) + .WillRepeatedly(Return(default_cluster.get())); + + std::shared_ptr foo_bar_baz_route = + std::make_shared>(); + + std::shared_ptr foo_bar_route = + std::make_shared>(); + EXPECT_CALL(foo_bar_route->route_entry_, clusterName()).WillOnce(ReturnRef(foo_bar_cluster_name)); + + std::shared_ptr foo_route = std::make_shared>(); + + std::shared_ptr default_route = + std::make_shared>(); + EXPECT_CALL(default_route->route_entry_, clusterName()) + .Times(2) + .WillRepeatedly(ReturnRef(default_cluster_name)); + + using ::testing::InSequence; + { + InSequence seq; + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Return(default_route)); + + // This filter iterates through all possible route matches and choose the last matched route + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + + // Not clearing cached route returns cached route and doesn't invoke cb. + Router::RouteConstSharedPtr route = decoder_filters_[0]->callbacks_->route( + [](Router::RouteConstSharedPtr, Router::RouteEvalStatus) -> Router::RouteMatchStatus { + ADD_FAILURE() << "When route cache is not cleared CB should not be invoked"; + return Router::RouteMatchStatus::Accept; + }); + EXPECT_EQ(default_route, route); + + int ctr = 0; + const Router::RouteCallback& cb = + [&](Router::RouteConstSharedPtr route, + Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus { + EXPECT_LE(ctr, 3); + if (ctr == 0) { + ++ctr; + EXPECT_EQ(foo_bar_baz_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 1) { + ++ctr; + EXPECT_EQ(foo_bar_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 2) { + ++ctr; + EXPECT_EQ(foo_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 3) { + ++ctr; + EXPECT_EQ(default_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::NoMoreRoutes); + return Router::RouteMatchStatus::Accept; + } + return Router::RouteMatchStatus::Accept; + }; + + decoder_filters_[0]->callbacks_->clearRouteCache(); + route = decoder_filters_[0]->callbacks_->route(cb); + + EXPECT_EQ(default_route, route); + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); + + return FilterHeadersStatus::Continue; + })); + + // This route config expected to be invoked for all matching routes + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, + uint64_t) -> Router::RouteConstSharedPtr { + EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(default_route, Router::RouteEvalStatus::NoMoreRoutes), + Router::RouteMatchStatus::Accept); + return default_route; + })); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + // This filter chooses second route + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) + .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { + EXPECT_EQ(default_route, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(default_route->routeEntry(), + decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); + + int ctr = 0; + const Router::RouteCallback& cb = + [&](Router::RouteConstSharedPtr route, + Router::RouteEvalStatus route_eval_status) -> Router::RouteMatchStatus { + EXPECT_LE(ctr, 1); + if (ctr == 0) { + ++ctr; + EXPECT_EQ(foo_bar_baz_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Continue; + } + + if (ctr == 1) { + ++ctr; + EXPECT_EQ(foo_bar_route, route); + EXPECT_EQ(route_eval_status, Router::RouteEvalStatus::HasMoreRoutes); + return Router::RouteMatchStatus::Accept; + } + return Router::RouteMatchStatus::Accept; + }; + + decoder_filters_[0]->callbacks_->clearRouteCache(); + decoder_filters_[1]->callbacks_->route(cb); + + EXPECT_EQ(foo_bar_route, decoder_filters_[1]->callbacks_->route()); + EXPECT_EQ(foo_bar_route->routeEntry(), + decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(foo_bar_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); + + return FilterHeadersStatus::Continue; + })); + + // This route config expected to be invoked for first two matching routes + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, + uint64_t) -> Router::RouteConstSharedPtr { + EXPECT_EQ(cb(foo_bar_baz_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Continue); + EXPECT_EQ(cb(foo_bar_route, Router::RouteEvalStatus::HasMoreRoutes), + Router::RouteMatchStatus::Accept); + return foo_bar_route; + })); + + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + } + + // Kick off the incoming data. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + // Filters observe host header w/o port's part when port's removal is configured TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { setup(false, ""); @@ -960,12 +1156,12 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { std::shared_ptr route = std::make_shared>(); EXPECT_CALL(route->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster_name)); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) - .WillOnce(Invoke( - [&](const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { - EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); - return route; - })); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) + .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, + const StreamInfo::StreamInfo&, uint64_t) { + EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); + return route; + })); EXPECT_CALL(filter_factory_, createFilterChain(_)) .WillOnce(Invoke([&](FilterChainFactoryCallbacks&) -> void {})); @@ -2026,7 +2222,7 @@ TEST_F(HttpConnectionManagerImplTest, AccessEncoderRouteBeforeHeadersArriveOnIdl })); // This should not be called as we don't have request headers. - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)).Times(0); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)).Times(0); EXPECT_CALL(*filter, encodeHeaders(_, _)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { @@ -3965,7 +4161,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } -TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { +TEST_F(HttpConnectionManagerImplTest, Filter) { setup(false, ""); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { @@ -3991,7 +4187,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterClearRouteCache) { std::shared_ptr route2 = std::make_shared>(); EXPECT_CALL(route2->route_entry_, clusterName()).WillRepeatedly(ReturnRef(fake_cluster2_name)); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)) + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) .WillOnce(Return(route1)) .WillOnce(Return(route2)) .WillOnce(Return(nullptr)); @@ -4903,7 +5099,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(2, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -4987,7 +5183,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(2, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -5082,7 +5278,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { return Http::okStatus(); })); - EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _)); + EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)); setupFilterChain(3, 2); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) @@ -5485,7 +5681,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { std::shared_ptr fake_cluster1 = std::make_shared>(); EXPECT_CALL(cluster_manager_, get(_)).WillOnce(Return(fake_cluster1.get())); - EXPECT_CALL(*route_config_, route(_, _, _)).WillOnce(Return(route1)); + EXPECT_CALL(*route_config_, route(_, _, _, _)).WillOnce(Return(route1)); // First no-scope-found request will be handled by decoder_filters_[0]. setupFilterChain(1, 0); EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) @@ -5519,8 +5715,8 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { std::make_shared>(); std::shared_ptr route1 = std::make_shared>(); std::shared_ptr route2 = std::make_shared>(); - EXPECT_CALL(*route_config1, route(_, _, _)).WillRepeatedly(Return(route1)); - EXPECT_CALL(*route_config2, route(_, _, _)).WillRepeatedly(Return(route2)); + EXPECT_CALL(*route_config1, route(_, _, _, _)).WillRepeatedly(Return(route1)); + EXPECT_CALL(*route_config2, route(_, _, _, _)).WillRepeatedly(Return(route2)); EXPECT_CALL(*static_cast( scopedRouteConfigProvider()->config().get()), getRouteConfig(_)) @@ -5586,7 +5782,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { EXPECT_CALL( *static_cast( scopedRouteConfigProvider()->config()->route_config_.get()), - route(_, _, _)) + route(_, _, _, _)) .WillOnce(Return(route1)); RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 02772af8eb86..dc7f9bc53b4a 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -4,6 +4,7 @@ #include #include #include +#include #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route.pb.validate.h" @@ -58,9 +59,7 @@ class TestConfigImpl : public ConfigImpl { validate_clusters_default), config_(config) {} - RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, - const Envoy::StreamInfo::StreamInfo& stream_info, - uint64_t random_value) const override { + void setupRouteConfig(const Http::RequestHeaderMap& headers, uint64_t random_value) const { absl::optional corpus_path = TestEnvironment::getOptionalEnvVar("GENRULE_OUTPUT_DIR"); if (corpus_path) { @@ -77,9 +76,28 @@ class TestConfigImpl : public ConfigImpl { corpus_file << corpus; } } + } + + RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, + const Envoy::StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override { + + setupRouteConfig(headers, random_value); return ConfigImpl::route(headers, stream_info, random_value); } + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, + const StreamInfo::StreamInfo& stream_info, + uint64_t random_value) const override { + + setupRouteConfig(headers, random_value); + return ConfigImpl::route(cb, headers, stream_info, random_value); + } + + RouteConstSharedPtr route(const RouteCallback& cb, const Http::RequestHeaderMap& headers) const { + return route(cb, headers, NiceMock(), 0); + } + RouteConstSharedPtr route(const Http::RequestHeaderMap& headers, uint64_t random_value) const { return route(headers, NiceMock(), random_value); } @@ -95,12 +113,25 @@ Http::TestRequestHeaderMapImpl genPathlessHeaders(const std::string& host, {"x-forwarded-proto", "http"}}; } +Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, + const std::string& method, + const std::string& forwarded_proto) { + auto hdrs = Http::TestRequestHeaderMapImpl{ + {":authority", host}, {":path", path}, + {":method", method}, {"x-safe", "safe"}, + {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, + {"x-route-nope", "route"}, {"x-forwarded-proto", forwarded_proto}}; + + if (forwarded_proto.empty()) { + hdrs.remove("x-forwarded-proto"); + } + + return hdrs; +} + Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, const std::string& method) { - return Http::TestRequestHeaderMapImpl{{":authority", host}, {":path", path}, - {":method", method}, {"x-safe", "safe"}, - {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, - {"x-route-nope", "route"}, {"x-forwarded-proto", "http"}}; + return genHeaders(host, path, method, "http"); } envoy::config::route::v3::RouteConfiguration @@ -7192,6 +7223,280 @@ name: foo checkEach(yaml, 1213, 1213, 1415); } +class RouteMatchOverrideTest : public testing::Test, public ConfigImplTestBase {}; + +TEST_F(RouteMatchOverrideTest, VerifyAllMatchableRoutes) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + if (clusters.empty()) { + EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes); + return RouteMatchStatus::Accept; + } + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar/baz", "GET")); + EXPECT_EQ(accepted_route->routeEntry()->clusterName(), "default"); +} + +TEST_F(RouteMatchOverrideTest, VerifyRouteOverrideStops) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + std::vector clusters{"foo", "foo_bar"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + + if (clusters.empty()) { + return RouteMatchStatus::Accept; // Do not match default route + } + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar", "GET")); + EXPECT_EQ(accepted_route->routeEntry()->clusterName(), "foo"); +} + +TEST_F(RouteMatchOverrideTest, StopWhenNoMoreRoutes) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; + + RouteConstSharedPtr accepted_route = config.route( + [&clusters](RouteConstSharedPtr route, + RouteEvalStatus route_eval_status) -> RouteMatchStatus { + EXPECT_FALSE(clusters.empty()); + EXPECT_EQ(clusters[clusters.size() - 1], route->routeEntry()->clusterName()); + clusters.pop_back(); + + if (clusters.empty()) { + EXPECT_EQ(route_eval_status, RouteEvalStatus::NoMoreRoutes); + } else { + EXPECT_EQ(route_eval_status, RouteEvalStatus::HasMoreRoutes); + } + // Returning continue when no more routes are available will be ignored by ConfigImpl::route + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/foo/bar/baz", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNoRouteMatch) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/foo" } + route: + cluster: foo +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNoHostMatch) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["www.acme.com"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnNullXForwardedProto) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET", "")); + EXPECT_EQ(accepted_route, nullptr); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsAll) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default + require_tls: ALL +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_NE(nullptr, dynamic_cast(accepted_route.get())); +} + +TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsInternal) { + const std::string yaml = R"EOF( +name: foo +virtual_hosts: + - name: bar + domains: ["*"] + routes: + - match: { prefix: "/foo/bar/baz" } + route: + cluster: foo_bar_baz + - match: { prefix: "/foo/bar" } + route: + cluster: foo_bar + - match: { prefix: "/" } + route: + cluster: default + require_tls: EXTERNAL_ONLY +)EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + RouteConstSharedPtr accepted_route = config.route( + [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { + ADD_FAILURE() + << "RouteCallback should not be invoked since there are no matching route to override"; + return RouteMatchStatus::Continue; + }, + genHeaders("bat.com", "/", "GET")); + EXPECT_NE(nullptr, dynamic_cast(accepted_route.get())); +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 4bbf40e7d657..4aa3c9b9e839 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -136,6 +136,7 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, MOCK_METHOD(void, resetStream, ()); MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, clusterInfo, ()); MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&)); MOCK_METHOD(void, requestRouteConfigUpdate, (Http::RouteConfigUpdatedCallbackSharedPtr)); MOCK_METHOD(absl::optional, routeConfig, ()); MOCK_METHOD(void, clearRouteCache, ()); @@ -214,6 +215,7 @@ class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks, MOCK_METHOD(void, requestRouteConfigUpdate, (std::function)); MOCK_METHOD(bool, canRequestRouteConfigUpdate, ()); MOCK_METHOD(Router::RouteConstSharedPtr, route, ()); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (const Router::RouteCallback&)); MOCK_METHOD(void, clearRouteCache, ()); MOCK_METHOD(uint64_t, streamId, (), (const)); MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ()); diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 32e5ce7ba79e..c04efe43e255 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -104,6 +104,7 @@ MockRouteEntry::~MockRouteEntry() = default; MockConfig::MockConfig() : route_(new NiceMock()) { ON_CALL(*this, route(_, _, _)).WillByDefault(Return(route_)); + ON_CALL(*this, route(_, _, _, _)).WillByDefault(Return(route_)); ON_CALL(*this, internalOnlyHeaders()).WillByDefault(ReturnRef(internal_only_headers_)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); ON_CALL(*this, usesVhds()).WillByDefault(Return(false)); diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 3b2ec8ee189a..e17bdc2e1fd3 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -430,6 +430,11 @@ class MockConfig : public Config { (const Http::RequestHeaderMap&, const Envoy::StreamInfo::StreamInfo&, uint64_t random_value), (const)); + MOCK_METHOD(RouteConstSharedPtr, route, + (const RouteCallback& cb, const Http::RequestHeaderMap&, + const Envoy::StreamInfo::StreamInfo&, uint64_t random_value), + (const)); + MOCK_METHOD(const std::list&, internalOnlyHeaders, (), (const)); MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(bool, usesVhds, (), (const)); From 57194f4bdc02c249eb5012f640ae5e29242b1232 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 11 May 2020 08:45:09 -0400 Subject: [PATCH 129/909] http: adding CONNECT example configs and improving docs (#11066) Risk Level: n/a Testing: n/a Docs Changes: connect docs improved Release Notes: n/a Part of #1451 and #1630 Signed-off-by: Alyssa Wilk --- configs/configgen.py | 3 + configs/encapsulate_in_connect.v3.yaml | 38 +++++++++++ configs/terminate_connect.v3.yaml | 64 +++++++++++++++++++ .../intro/arch_overview/http/upgrades.rst | 14 ++++ test/config_test/config_test.cc | 2 + test/config_test/example_configs_test.cc | 4 +- 6 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 configs/encapsulate_in_connect.v3.yaml create mode 100644 configs/terminate_connect.v3.yaml diff --git a/configs/configgen.py b/configs/configgen.py index c255b0d4e2a1..557dd9ed2b1b 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -134,3 +134,6 @@ def generate_config(template_path, template, output_file, **context): for google_ext in ['v2.yaml']: shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) + +shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR) diff --git a/configs/encapsulate_in_connect.v3.yaml b/configs/encapsulate_in_connect.v3.yaml new file mode 100644 index 000000000000..c6470b13ed58 --- /dev/null +++ b/configs/encapsulate_in_connect.v3.yaml @@ -0,0 +1,38 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9903 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 10000 + filter_chains: + - filters: + - name: tcp + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: tcp_stats + cluster: "cluster_0" + tunneling_config: + hostname: host.com + clusters: + - name: cluster_0 + connect_timeout: 5s + http2_protocol_options: + {} + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10001 diff --git a/configs/terminate_connect.v3.yaml b/configs/terminate_connect.v3.yaml new file mode 100644 index 000000000000..419bd80c6b8e --- /dev/null +++ b/configs/terminate_connect.v3.yaml @@ -0,0 +1,64 @@ +admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9902 +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 10001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + connect_matcher: + {} + route: + cluster: service_google + upgrade_configs: + - upgrade_type: CONNECT + connect_config: + {} + http_filters: + - name: envoy.filters.http.router + http2_protocol_options: + allow_connect: true + upgrade_configs: + - upgrade_type: CONNECT + clusters: + - name: service_google + connect_timeout: 0.25s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service_google + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: www.google.com + port_value: 443 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: www.google.com diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index 5d0620d44bb6..36af0fc7b809 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -89,3 +89,17 @@ upgrade requests or responses with bodies. .. This mode of CONNECT support can create major security holes if configured correctly, as the upstream .. will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution +.. Tunneling TCP over HTTP/2 +.. ^^^^^^^^^^^^^^^^^^^^^^^^^ +.. Envoy also has support for transforming raw TCP into HTTP/2 CONNECT requests. This can be used to +.. proxy multiplexed TCP over pre-warmed secure connections and amortize the cost of any TLS handshake. +.. An example set up proxying SMTP would look something like this +.. +.. [SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] +.. +.. Examples of such a set up can be found in the Envoy example config `directory ` +.. If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` +.. and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` +.. you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 +.. CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the +.. original TCP upstream, in this case to google.com. diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 71f30f2eb11a..9fb72818f817 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -168,6 +168,8 @@ uint32_t run(const std::string& directory) { ENVOY_LOG_MISC(info, "testing {}.\n", filename); OptionsImpl options( Envoy::Server::createTestOptionsImpl(filename, "", Network::Address::IpVersion::v6)); + // Avoid contention issues with other tests over the hot restart domain socket. + options.setHotRestartDisabled(true); ConfigTest test1(options); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig(bootstrap, options, diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index e6255dcee21a..1439e8701af5 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -21,9 +21,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(21UL, ConfigTest::run(directory)); + EXPECT_EQ(23UL, ConfigTest::run(directory)); #else - EXPECT_EQ(22UL, ConfigTest::run(directory)); + EXPECT_EQ(24UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); From 3b494753db1dbd5cc6ea4c282ac7c9fadf9dd4a6 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 11 May 2020 13:57:05 -0400 Subject: [PATCH 130/909] test: fixing test docs (#11147) Replacing the deprecated local_resources flag with local_ram_resources and local_cpu_resouces. Signed-off-by: Alyssa Wilk --- test/integration/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/README.md b/test/integration/README.md index ef24fa0e439e..19cf5cf4dc7a 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -160,7 +160,7 @@ The full command might look something like ``` bazel test //test/integration:http2_upstream_integration_test \ --test_arg=--gtest_filter="IpVersions/Http2UpstreamIntegrationTest.RouterRequestAndResponseWithBodyNoBuffer/IPv6" \ ---jobs 60 --local_resources 100000000000,100000000000,10000000 --runs_per_test=1000 --test_arg="-l trace" +--jobs 60 --local_ram_resources=100000000000 --local_cpu_resources=100000000000 --runs_per_test=1000 --test_arg="-l trace" ``` ## Debugging test flakes From 5ee4c45b8b2c6c679dc7468a1971bcc95c7ad617 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Mon, 11 May 2020 14:03:46 -0400 Subject: [PATCH 131/909] Use node.js CPE for http-parser (#11142) Signed-off-by: Yan Avlasov --- bazel/repository_locations.bzl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 9561aae50d6f..ee1776cd4372 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -288,7 +288,7 @@ DEPENDENCY_REPOSITORIES = dict( strip_prefix = "http-parser-2.9.3", urls = ["https://github.com/nodejs/http-parser/archive/v2.9.3.tar.gz"], use_category = ["dataplane"], - cpe = "N/A", + cpe = "cpe:2.3:a:nodejs:node.js:*", ), com_github_pallets_jinja = dict( sha256 = "db49236731373e4f3118af880eb91bb0aa6978bc0cf8b35760f6a026f1a9ffc4", From e171cf957df1fd58e1877cee847232d52a6a8a3a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 11 May 2020 15:40:29 -0400 Subject: [PATCH 132/909] http: fixing connection close behavior (#10957) Fixing "Connection: Close" behavior for HTTP/1.x responses for correctness. Now closing the connection on early HTTP/1.0 responses, or if incoming headers have the "close" token mixed with other Connection or Proxy-Connection tokens. These changes are applied to the HttpConnectionManager, HTTP/1.1 connection pool and HTTP/1.1. health checker. Risk Level: Medium (L7 changes) Testing: New unit tests, fixed integration test Docs Changes: n/a Release Notes: yes Runtime guard: envoy.reloadable_features.fixed_connection_close Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + source/common/http/conn_manager_impl.cc | 47 +++-- source/common/http/header_utility.cc | 28 +++ source/common/http/header_utility.h | 11 ++ source/common/http/http1/conn_pool.cc | 40 ++-- source/common/runtime/runtime_features.cc | 1 + source/common/upstream/health_checker_impl.cc | 14 +- test/common/http/conn_manager_impl_test.cc | 187 ++++++++++++------ test/common/http/header_utility_test.cc | 21 ++ test/common/http/http1/BUILD | 1 + test/common/http/http1/conn_pool_test.cc | 79 +++++++- test/common/upstream/BUILD | 1 + .../upstream/health_checker_impl_test.cc | 52 +++++ test/integration/header_integration_test.cc | 1 + 14 files changed, 385 insertions(+), 99 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 61d3eb38b5b4..73ad7c1d31ee 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -22,6 +22,7 @@ Changes * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`stripping port from host header ` support. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 2d1e4808cab3..1765fdb60b33 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -766,15 +766,26 @@ uint32_t ConnectionManagerImpl::ActiveStream::localPort() { // can't route select properly without full headers), checking state required to // serve error responses (connection close, head requests, etc), and // modifications which may themselves affect route selection. -// -// TODO(alyssawilk) all the calls here should be audited for order priority, -// e.g. many early returns do not currently handle connection: close properly. void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); request_headers_ = std::move(headers); + // Both saw_connection_close_ and is_head_request_ affect local replies: set + // them as early as possible. + const Protocol protocol = connection_manager_.codec_->protocol(); + const bool fixed_connection_close = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close"); + if (fixed_connection_close) { + state_.saw_connection_close_ = + HeaderUtility::shouldCloseConnection(protocol, *request_headers_); + } + if (request_headers_->Method() && Http::Headers::get().MethodValues.Head == + request_headers_->Method()->value().getStringView()) { + state_.is_head_request_ = true; + } + if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path() && !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.stop_faking_paths")) { request_headers_->setPath("/"); @@ -793,10 +804,6 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he snapped_route_config_ = connection_manager_.config_.routeConfigProvider()->config(); } - if (Http::Headers::get().MethodValues.Head == - request_headers_->Method()->value().getStringView()) { - state_.is_head_request_ = true; - } ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream, *request_headers_); @@ -834,7 +841,6 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he connection_manager_.stats_.scope_); // Make sure we are getting a codec version we support. - Protocol protocol = connection_manager_.codec_->protocol(); if (protocol == Protocol::Http10) { // Assume this is HTTP/1.0. This is fine for HTTP/0.9 but this code will also affect any // requests with non-standard version numbers (0.9, 1.3), basically anything which is not @@ -847,7 +853,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he sendLocalReply(false, Code::UpgradeRequired, "", nullptr, state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().LowVersion); return; - } else { + } else if (!fixed_connection_close) { // HTTP/1.0 defaults to single-use connections. Make sure the connection // will be closed unless Keep-Alive is present. state_.saw_connection_close_ = true; @@ -857,23 +863,22 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he state_.saw_connection_close_ = false; } } - } - - if (!request_headers_->Host()) { - if ((protocol == Protocol::Http10) && + if (!request_headers_->Host() && !connection_manager_.config_.http1Settings().default_host_for_http_10_.empty()) { // Add a default host if configured to do so. request_headers_->setHost( connection_manager_.config_.http1Settings().default_host_for_http_10_); - } else { - // Require host header. For HTTP/1.1 Host has already been translated to :authority. - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().MissingHost); - return; } } + if (!request_headers_->Host()) { + // Require host header. For HTTP/1.1 Host has already been translated to :authority. + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", + nullptr, state_.is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingHost); + return; + } + // Verify header sanity checks which should have been performed by the codec. ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false); @@ -909,7 +914,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he ConnectionManagerUtility::maybeNormalizeHost(*request_headers_, connection_manager_.config_, localPort()); - if (protocol == Protocol::Http11 && request_headers_->Connection() && + if (!fixed_connection_close && protocol == Protocol::Http11 && request_headers_->Connection() && absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; @@ -917,7 +922,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // Note: Proxy-Connection is not a standard header, but is supported here // since it is supported by http-parser the underlying parser for http // requests. - if (protocol < Protocol::Http2 && !state_.saw_connection_close_ && + if (!fixed_connection_close && protocol < Protocol::Http2 && !state_.saw_connection_close_ && request_headers_->ProxyConnection() && absl::EqualsIgnoreCase(request_headers_->ProxyConnection()->value().getStringView(), Http::Headers::get().ConnectionValues.Close)) { diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 303c07ffbf08..38e8256e1144 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -223,5 +223,33 @@ HeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) { return absl::nullopt; } +bool HeaderUtility::shouldCloseConnection(Http::Protocol protocol, + const RequestOrResponseHeaderMap& headers) { + // HTTP/1.0 defaults to single-use connections. Make sure the connection will be closed unless + // Keep-Alive is present. + if (protocol == Protocol::Http10 && + (!headers.Connection() || + !Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.KeepAlive))) { + return true; + } + + if (protocol == Protocol::Http11 && headers.Connection() && + Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.Close)) { + return true; + } + + // Note: Proxy-Connection is not a standard header, but is supported here + // since it is supported by http-parser the underlying parser for http + // requests. + if (protocol < Protocol::Http2 && headers.ProxyConnection() && + Envoy::StringUtil::caseFindToken(headers.ProxyConnection()->value().getStringView(), ",", + Http::Headers::get().ConnectionValues.Close)) { + return true; + } + return false; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 69a879888bb5..71d45f8d3763 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -5,6 +5,7 @@ #include "envoy/common/regex.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/http/header_map.h" +#include "envoy/http/protocol.h" #include "envoy/json/json_object.h" #include "envoy/type/v3/range.pb.h" @@ -136,6 +137,16 @@ class HeaderUtility { static absl::optional> requestHeadersValid(const RequestHeaderMap& headers); + /** + * Determines if the response should be framed by Connection: Close based on protocol + * and headers. + * @param protocol the protocol of the request + * @param headers the request or response headers + * @return if the response should be framed by Connection: Close + */ + static bool shouldCloseConnection(Http::Protocol protocol, + const RequestOrResponseHeaderMap& headers); + /** * @brief Remove the port part from host/authority header if it is equal to provided port */ diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index f3d75f586341..d782420ea57c 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -12,6 +12,7 @@ #include "common/http/codec_client.h" #include "common/http/codes.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/runtime/runtime_features.h" @@ -81,23 +82,30 @@ ConnPoolImpl::StreamWrapper::~StreamWrapper() { void ConnPoolImpl::StreamWrapper::onEncodeComplete() { encode_complete_ = true; } void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - // If Connection: close OR - // Http/1.0 and not Connection: keep-alive OR - // Proxy-Connection: close - if ((headers->Connection() && - (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.Close))) || - (parent_.codec_client_->protocol() == Protocol::Http10 && - (!headers->Connection() || - !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.KeepAlive))) || - (headers->ProxyConnection() && - (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), - Headers::get().ConnectionValues.Close)))) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); - close_connection_ = true; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close")) { + close_connection_ = + HeaderUtility::shouldCloseConnection(parent_.codec_client_->protocol(), *headers); + if (close_connection_) { + parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + } + } else { + // If Connection: close OR + // Http/1.0 and not Connection: keep-alive OR + // Proxy-Connection: close + if ((headers->Connection() && + (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.Close))) || + (parent_.codec_client_->protocol() == Protocol::Http10 && + (!headers->Connection() || + !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), + Headers::get().ConnectionValues.KeepAlive))) || + (headers->ProxyConnection() && + (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), + Headers::get().ConnectionValues.Close)))) { + parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + close_connection_ = true; + } } - ResponseDecoderWrapper::decodeHeaders(std::move(headers), end_stream); } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index a5d84003a3a6..0a03fed9bebc 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -62,6 +62,7 @@ constexpr const char* runtime_features[] = { "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", + "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.stop_faking_paths", }; diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 51928547973f..87be8234663b 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -16,8 +16,10 @@ #include "common/config/well_known_names.h" #include "common/grpc/common.h" #include "common/http/header_map_impl.h" +#include "common/http/header_utility.h" #include "common/network/address_impl.h" #include "common/router/router.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/upstream/host_utility.h" @@ -344,6 +346,14 @@ bool HttpHealthCheckerImpl::HttpActiveHealthCheckSession::shouldClose() const { return false; } + if (!parent_.reuse_connection_) { + return true; + } + + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close")) { + return Http::HeaderUtility::shouldCloseConnection(client_->protocol(), *response_headers_); + } + if (response_headers_->Connection()) { const bool close = absl::EqualsIgnoreCase(response_headers_->Connection()->value().getStringView(), @@ -362,10 +372,6 @@ bool HttpHealthCheckerImpl::HttpActiveHealthCheckSession::shouldClose() const { } } - if (!parent_.reuse_connection_) { - return true; - } - return false; } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 535cf78fb929..5db00349eaf4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -454,12 +454,11 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .Times(2) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); // Test not charging stats on the second call. if (data.length() == 4) { @@ -515,11 +514,10 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); // Test not charging stats on the second call. RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ @@ -652,11 +650,10 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); // Test not charging stats on the second call. RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ @@ -1296,11 +1293,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1366,11 +1362,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1434,11 +1429,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1500,11 +1494,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1583,11 +1576,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1667,11 +1659,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1749,11 +1740,10 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1807,11 +1797,10 @@ TEST_F(HttpConnectionManagerImplTest, use_remote_address_ = false; EXPECT_CALL(random_, uuid()).Times(0); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1865,11 +1854,10 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { local_address); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -1911,11 +1899,10 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination)); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; @@ -1952,11 +1939,10 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -2003,11 +1989,10 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { EXPECT_EQ(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); // These request headers are missing the necessary ":host" RequestHeaderMapPtr headers{ @@ -2045,11 +2030,10 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { EXPECT_NE(nullptr, stream_info.routeEntry()); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -2090,11 +2074,10 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { callbacks.addStreamDecoderFilter(filter); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, @@ -2117,10 +2100,9 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { TEST_F(HttpConnectionManagerImplTest, NoPath) { setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "NOT_CONNECT"}}}; decoder->decodeHeaders(std::move(headers), true); @@ -2865,6 +2847,107 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { EXPECT_EQ(1U, stats_.named_.downstream_rq_rx_reset_.value()); } +TEST_F(HttpConnectionManagerImplTest, Http10Rejected) { + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("426", headers.Status()->value().getStringView()); + EXPECT_EQ("close", headers.Connection()->value().getStringView()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, Http10ConnCloseLegacy) { + http1_settings_.accept_http_10_ = true; + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, protocol()).Times(AnyNumber()).WillRepeatedly(Return(Protocol::Http10)); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host:80"}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.Connection()->value().getStringView()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, ProxyConnectLegacyClose) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host:80"}, {":method", "CONNECT"}, {"proxy-connection", "close"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.Connection()->value().getStringView()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, ConnectLegacyClose) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setup(false, ""); + RequestDecoder* decoder = nullptr; + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { + decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":method", "CONNECT"}, {"connection", "close"}}}; + decoder->decodeHeaders(std::move(headers), true); + data.drain(4); + return Http::okStatus(); + })); + + EXPECT_CALL(encoder, encodeHeaders(_, true)) + .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { + EXPECT_EQ("close", headers.Connection()->value().getStringView()); + })); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetStreamValidly) { max_stream_duration_ = std::chrono::milliseconds(5000); setup(false, ""); @@ -2888,10 +2971,9 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackNotCalledIfResetS TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, {":path", "/"}, @@ -2954,10 +3036,9 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { // When dispatch is called on the codec, we pretend to get a new stream and then fire a headers // only request into it. Then we respond into the filter. - RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "GET"}, @@ -2984,14 +3065,13 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { setup(false, "envoy-custom-server", false); NiceMock encoder; - RequestDecoder* decoder = nullptr; EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) .WillRepeatedly(Return(true)); EXPECT_CALL(*codec_, dispatch(_)) .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":method", "CONNECT"}}}; decoder->decodeHeaders(std::move(headers), false); @@ -3042,10 +3122,9 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { InSequence s; setup(false, ""); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); @@ -3098,10 +3177,9 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { return FilterHeadersStatus::StopIteration; })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), true); @@ -3214,8 +3292,8 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); // Start the request - RequestDecoder* decoder = nullptr; NiceMock encoder; + RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ @@ -3311,10 +3389,9 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + conn_manager_->newStream(encoder); return codecProtocolError("protocol error"); })); @@ -3342,10 +3419,9 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAfterHeadersAcc EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); })); - RequestDecoder* decoder = nullptr; NiceMock encoder; EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}}; @@ -3414,9 +3490,8 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { })); NiceMock encoder; - RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); @@ -3484,9 +3559,8 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { })); NiceMock encoder; - RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { - decoder = &conn_manager_->newStream(encoder); + RequestDecoder* decoder = &conn_manager_->newStream(encoder); RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; decoder->decodeHeaders(std::move(headers), false); @@ -5784,9 +5858,8 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { scopedRouteConfigProvider()->config()->route_config_.get()), route(_, _, _, _)) .WillOnce(Return(route1)); - RequestDecoder* decoder = nullptr; EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { - decoder = &conn_manager_->newStream(response_encoder_); + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":method", "GET"}, {":path", "/foo"}}}; decoder->decodeHeaders(std::move(headers), true); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 0006ff8ee917..62ec0c0ff7c6 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -2,6 +2,7 @@ #include #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/http/protocol.h" #include "envoy/json/json_object.h" #include "common/http/header_utility.h" @@ -549,5 +550,25 @@ TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { EXPECT_TRUE(HeaderUtility::headerNameContainsUnderscore("x_something")); } +TEST(PercentEncoding, ShouldCloseConnection) { + EXPECT_TRUE(HeaderUtility::shouldCloseConnection(Protocol::Http10, + TestRequestHeaderMapImpl{{"foo", "bar"}})); + EXPECT_FALSE(HeaderUtility::shouldCloseConnection( + Protocol::Http10, TestRequestHeaderMapImpl{{"connection", "keep-alive"}})); + EXPECT_FALSE(HeaderUtility::shouldCloseConnection( + Protocol::Http10, TestRequestHeaderMapImpl{{"connection", "foo, keep-alive"}})); + + EXPECT_FALSE(HeaderUtility::shouldCloseConnection(Protocol::Http11, + TestRequestHeaderMapImpl{{"foo", "bar"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"connection", "close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"connection", "te,close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"proxy-connection", "close"}})); + EXPECT_TRUE(HeaderUtility::shouldCloseConnection( + Protocol::Http11, TestRequestHeaderMapImpl{{"proxy-connection", "foo,close"}})); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 4fa2b974dfca..eac82f9734ec 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -63,6 +63,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index bc74a1b884a4..c9464299a92c 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -20,6 +20,7 @@ #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -719,7 +720,46 @@ TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - // Response with 'proxy-connection: close' which should cause the connection to go away. + // Response with 'proxy-connection: close' which should cause the connection to go away, even if + // there are other tokens in that header. + EXPECT_CALL(conn_pool_, onClientDestroy()); + ResponseHeaderMapPtr response_headers( + new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close, foo"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + +/** + * Test legacy behavior when upstream sends us 'proxy-connection: close' + */ +TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeaderLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + ResponseDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + // Response with 'proxy-connection: close' which should cause the connection to go away, even if + // there are other tokens in that header. EXPECT_CALL(conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); @@ -763,6 +803,43 @@ TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); } +/** + * Test legacy behavior when upstream is HTTP/1.0 and does not send 'connection: keep-alive' + */ +TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAliveLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + + EXPECT_NE(nullptr, handle); + + NiceMock request_encoder; + ResponseDecoder* inner_decoder; + EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); + EXPECT_CALL(callbacks.pool_ready_, ready()); + + conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + callbacks.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + // Response without 'connection: keep-alive' which should cause the connection to go away. + EXPECT_CALL(conn_pool_, onClientDestroy()); + ResponseHeaderMapPtr response_headers( + new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); + inner_decoder->decodeHeaders(std::move(response_headers), true); + dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(0U, cluster_->stats_.upstream_cx_destroy_with_active_rq_.value()); +} + /** * Test when we reach max requests per connection. */ diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 9c87ce42c6ce..27ccee4cd7eb 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -118,6 +118,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 1bcacc7973bb..cb178468647a 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -29,6 +29,7 @@ #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -568,6 +569,7 @@ class HttpHealthCheckerImplTest : public testing::Test { void expectClientCreate(size_t index, const HostWithHealthCheckMap& health_check_map) { TestSession& test_session = *test_sessions_[index]; test_session.codec_ = new NiceMock(); + ON_CALL(*test_session.codec_, protocol()).WillByDefault(Return(Http::Protocol::Http11)); test_session.client_connection_ = new NiceMock(); connection_index_.push_back(index); codec_index_.push_back(index); @@ -2070,6 +2072,56 @@ TEST_F(HttpHealthCheckerImplTest, ProxyConnectionClose) { test_sessions_[0]->interval_timer_->invokeCallback(); } +TEST_F(HttpHealthCheckerImplTest, ConnectionCloseLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setupNoServiceValidationHC(); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", true); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + expectClientCreate(0); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + +TEST_F(HttpHealthCheckerImplTest, ProxyConnectionCloseLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fixed_connection_close", "false"}}); + setupNoServiceValidationHC(); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + expectSessionCreate(); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + health_checker_->start(); + + EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); + respond(0, "200", false, true); + EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); + + expectClientCreate(0); + expectStreamCreate(0); + EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); + test_sessions_[0]->interval_timer_->invokeCallback(); +} + TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { setupHealthCheckIntervalOverridesHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 08ebe7b2a806..c7218ca716ff 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -1114,6 +1114,7 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, + {"connection", "close"}, }); } From c19ead9dcd279f55eb7b66ab482ce4499b109a3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 11 May 2020 15:44:40 -0400 Subject: [PATCH 133/909] subset lb: purge empty subsets (#11035) This fixes a leak in an unordered map used by the Subset LB to keep track of subsets. With this fix, every time there's an update we'll now check to see if there are any empty subsets that need to be removed. Note about perf: we could potentially make this smarter by reusing the existing subsets map transversal code, but it'll make that code considerably more complex. Eventually, we could relax the cost of purging empty subsets by doing it less often as opposed to doing it on every update. Signed-off-by: Raul Gutierrez Segales --- source/common/upstream/subset_lb.cc | 72 ++++++++----- source/common/upstream/subset_lb.h | 10 +- test/common/upstream/subset_lb_test.cc | 137 ++++++++++++++++++++++++- 3 files changed, 185 insertions(+), 34 deletions(-) diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 1cf9de7230a2..5c1d745f41a8 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -83,6 +83,8 @@ SubsetLoadBalancer::SubsetLoadBalancer( // This is a regular update with deltas. update(priority, hosts_added, hosts_removed); } + + purgeEmptySubsets(subsets_); }); } @@ -91,7 +93,7 @@ SubsetLoadBalancer::~SubsetLoadBalancer() { // Ensure gauges reflect correct values. forEachSubset(subsets_, [&](LbSubsetEntryPtr entry) { - if (entry->initialized() && entry->active()) { + if (entry->active()) { stats_.lb_subsets_removed_.inc(); stats_.lb_subsets_active_.dec(); } @@ -363,7 +365,7 @@ void SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, const HostVecto void SubsetLoadBalancer::processSubsets( const HostVector& hosts_added, const HostVector& hosts_removed, std::function update_cb, - std::function new_cb) { + std::function new_cb) { std::unordered_set subsets_modified; std::pair steps[] = {{hosts_added, true}, {hosts_removed, false}}; @@ -392,7 +394,9 @@ void SubsetLoadBalancer::processSubsets( HostPredicate predicate = [this, kvs](const Host& host) -> bool { return hostMatches(kvs, host); }; - new_cb(entry, predicate, kvs, adding_hosts); + if (adding_hosts) { + new_cb(entry, predicate, kvs); + } } } } @@ -421,31 +425,18 @@ void SubsetLoadBalancer::update(uint32_t priority, const HostVector& hosts_added processSubsets( hosts_added, hosts_removed, [&](LbSubsetEntryPtr entry) { - const bool active_before = entry->active(); entry->priority_subset_->update(priority, hosts_added, hosts_removed); - - if (active_before && !entry->active()) { - stats_.lb_subsets_active_.dec(); - stats_.lb_subsets_removed_.inc(); - } else if (!active_before && entry->active()) { - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } }, - [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs, - bool adding_host) { - UNREFERENCED_PARAMETER(kvs); - if (adding_host) { - ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); - - // Initialize new entry with hosts and update stats. (An uninitialized entry - // with only removed hosts is a degenerate case and we leave the entry - // uninitialized.) - entry->priority_subset_ = std::make_shared( - *this, predicate, locality_weight_aware_, scale_locality_weight_); - stats_.lb_subsets_active_.inc(); - stats_.lb_subsets_created_.inc(); - } + [&](LbSubsetEntryPtr entry, HostPredicate predicate, const SubsetMetadata& kvs) { + ENVOY_LOG(debug, "subset lb: creating load balancer for {}", describeMetadata(kvs)); + + // Initialize new entry with hosts and update stats. (An uninitialized entry + // with only removed hosts is a degenerate case and we leave the entry + // uninitialized.) + entry->priority_subset_ = std::make_shared( + *this, predicate, locality_weight_aware_, scale_locality_weight_); + stats_.lb_subsets_active_.inc(); + stats_.lb_subsets_created_.inc(); }); } @@ -593,6 +584,35 @@ void SubsetLoadBalancer::forEachSubset(LbSubsetMap& subsets, } } +void SubsetLoadBalancer::purgeEmptySubsets(LbSubsetMap& subsets) { + for (auto subset_it = subsets.begin(); subset_it != subsets.end();) { + for (auto it = subset_it->second.begin(); it != subset_it->second.end();) { + LbSubsetEntryPtr entry = it->second; + + purgeEmptySubsets(entry->children_); + + if (entry->active() || entry->hasChildren()) { + it++; + continue; + } + + // If it wasn't initialized, it wasn't accounted for. + if (entry->initialized()) { + stats_.lb_subsets_active_.dec(); + stats_.lb_subsets_removed_.inc(); + } + + it = subset_it->second.erase(it); + } + + if (subset_it->second.empty()) { + subset_it = subsets.erase(subset_it); + } else { + subset_it++; + } + } +} + // Initialize a new HostSubsetImpl and LoadBalancer from the SubsetLoadBalancer, filtering hosts // with the given predicate. SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalancer& subset_lb, diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index e5880cd16115..b46ab624fb67 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -181,6 +181,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggableempty(); } + bool hasChildren() const { return !children_.empty(); } LbSubsetMap children_; @@ -197,10 +198,10 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable update_cb, - std::function cb); + void + processSubsets(const HostVector& hosts_added, const HostVector& hosts_removed, + std::function update_cb, + std::function cb); HostConstSharedPtr tryChooseHostFromContext(LoadBalancerContext* context, bool& host_chosen); @@ -215,6 +216,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable cb); + void purgeEmptySubsets(LbSubsetMap& subsets); std::vector extractSubsetMetadata(const std::set& subset_keys, const Host& host); diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 5c4cf7c5faf5..40ae53bddf19 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -296,10 +296,11 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { fallback_keys_subset_mapped); } - SubsetSelectorPtr - makeSelector(const std::set& selector_keys, - envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector:: - LbSubsetSelectorFallbackPolicy fallback_policy) { + SubsetSelectorPtr makeSelector( + const std::set& selector_keys, + envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector:: + LbSubsetSelectorFallbackPolicy fallback_policy = + envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) { return makeSelector(selector_keys, fallback_policy, {}); } @@ -434,6 +435,25 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { return std::make_shared(metadata); } + MetadataConstSharedPtr buildMetadataWithStage(const std::string& version, + const std::string& stage = "") const { + envoy::config::core::v3::Metadata metadata; + + if (!version.empty()) { + Envoy::Config::Metadata::mutableMetadataValue( + metadata, Config::MetadataFilters::get().ENVOY_LB, "version") + .set_string_value(version); + } + + if (!stage.empty()) { + Envoy::Config::Metadata::mutableMetadataValue( + metadata, Config::MetadataFilters::get().ENVOY_LB, "stage") + .set_string_value(stage); + } + + return std::make_shared(metadata); + } + LoadBalancerType lb_type_{LoadBalancerType::RoundRobin}; NiceMock priority_set_; MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); @@ -936,6 +956,115 @@ TEST_P(SubsetLoadBalancerTest, OnlyMetadataChanged) { EXPECT_EQ(host_set_.hosts_[1], lb_->chooseHost(&context_13)); } +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurged) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Simple add and remove. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + host_set_.hosts_[0]->metadata(buildMetadataWithStage("1.3")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(1U, stats_.lb_subsets_removed_.value()); + + // Move host that was in the version + stage subset into a new version only subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.4")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(5U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); + + // Create a new version + stage subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.5", "devel")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(7U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_removed_.value()); + + // Now move it back to its original version + stage subset. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.0", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(9U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(6U, stats_.lb_subsets_removed_.value()); + + // Finally, remove the original version + stage subset again. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.6")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(10U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(8U, stats_.lb_subsets_removed_.value()); +} + +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedCollapsed) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Init subsets. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + // Get rid of 1.0. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(2U, stats_.lb_subsets_removed_.value()); + + // Get rid of stage prod. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(1U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); + + // Add stage prod back. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(5U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_removed_.value()); +} + +TEST_P(SubsetLoadBalancerTest, EmptySubsetsPurgedVersionChanged) { + std::vector subset_selectors = {makeSelector({"version"}), + makeSelector({"version", "stage"})}; + EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); + + // Init subsets. + init({{"tcp://127.0.0.1:8000", {{"version", "1.2"}}}, + {"tcp://127.0.0.1:8001", {{"version", "1.0"}, {"stage", "prod"}}}}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(3U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(0U, stats_.lb_subsets_removed_.value()); + + // Get rid of 1.0. + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.2", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(2U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(2U, stats_.lb_subsets_removed_.value()); + + // Change versions. + host_set_.hosts_[0]->metadata(buildMetadataWithStage("1.3")); + host_set_.hosts_[1]->metadata(buildMetadataWithStage("1.4", "prod")); + host_set_.runCallbacks({}, {}); + EXPECT_EQ(3U, stats_.lb_subsets_active_.value()); + EXPECT_EQ(7U, stats_.lb_subsets_created_.value()); + EXPECT_EQ(4U, stats_.lb_subsets_removed_.value()); +} + TEST_P(SubsetLoadBalancerTest, MetadataChangedHostsAddedRemoved) { TestLoadBalancerContext context_10({{"version", "1.0"}}); TestLoadBalancerContext context_12({{"version", "1.2"}}); From 516b9ae533cb8c8f843cbf7e208ab8d088e26e81 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 11 May 2020 15:13:51 -0700 Subject: [PATCH 134/909] build: upgrade base image to ubuntu 18.04 (#11037) * bump build image * bazel 3.1 This is high risk as the binary in released image may not work in older environment. Risk Level: High Testing: Local, CI Docs Changes: Added Release Notes: Added Fixes #10944 Signed-off-by: Lizan Zhou --- .azure-pipelines/cleanup.sh | 2 +- .azure-pipelines/pipelines.yml | 8 ++++---- .bazelrc | 2 +- .bazelversion | 2 +- .circleci/config.yml | 4 ++-- bazel/repository_locations.bzl | 16 ++++++++-------- ci/Dockerfile-envoy | 2 +- ci/README.md | 12 ++++++------ ci/run_envoy_docker.sh | 2 +- docs/root/install/building.rst | 8 ++++---- docs/root/version_history/current.rst | 1 + 11 files changed, 30 insertions(+), 29 deletions(-) diff --git a/.azure-pipelines/cleanup.sh b/.azure-pipelines/cleanup.sh index 4b145b7729e0..8fa8c11cfcb3 100755 --- a/.azure-pipelines/cleanup.sh +++ b/.azure-pipelines/cleanup.sh @@ -4,6 +4,6 @@ set -e # Temporary script to remove tools from Azure pipelines agent to create more disk space room. sudo apt-get update -y -sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' +sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'libgl1' dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 9f58e32fac0c..5e1f442abb09 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -14,7 +14,7 @@ jobs: - job: format dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - task: Cache@2 inputs: @@ -44,7 +44,7 @@ jobs: condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) timeoutInMinutes: 360 pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: @@ -70,7 +70,7 @@ jobs: CI_TARGET: "bazel.compile_time_options" timeoutInMinutes: 360 pool: - vmImage: "Ubuntu 16.04" + vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: @@ -81,7 +81,7 @@ jobs: dependsOn: ["release"] condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) pool: - vmImage: "ubuntu-16.04" + vmImage: "ubuntu-18.04" steps: - task: DownloadBuildArtifacts@0 inputs: diff --git a/.bazelrc b/.bazelrc index f39013f27000..bffcef2cba59 100644 --- a/.bazelrc +++ b/.bazelrc @@ -159,7 +159,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:09a5a914c904faa39dbc641181cb43b68cabf626 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:4a5cbb97e1a068661cab495bf40cccc96bb37ca1 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.bazelversion b/.bazelversion index 4a36342fcab7..fd2a01863fdd 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.0.0 +3.1.0 diff --git a/.circleci/config.yml b/.circleci/config.yml index aa6ba5f7ac45..745cfb29025c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,8 +4,8 @@ executors: ubuntu-build: description: "A regular build executor based on ubuntu image" docker: - # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L7 - - image: envoyproxy/envoy-build-ubuntu@sha256:3788a87461f2b3dc8048ad0ce5df40438a56e0a8f1a4ab0f61b4ef0d8c11ff1f + # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 + - image: envoyproxy/envoy-build-ubuntu:4a5cbb97e1a068661cab495bf40cccc96bb37ca1 resource_class: xlarge working_directory: /source diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index ee1776cd4372..06f721e45776 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -53,11 +53,11 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), bazel_toolchains = dict( - sha256 = "239a1a673861eabf988e9804f45da3b94da28d1aff05c373b013193c315d9d9e", - strip_prefix = "bazel-toolchains-3.0.1", + sha256 = "144290c4166bd67e76a54f96cd504ed86416ca3ca82030282760f0823c10be48", + strip_prefix = "bazel-toolchains-3.1.1", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.0.1/bazel-toolchains-3.0.1.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.0.1.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.1.1/bazel-toolchains-3.1.1.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.1.1.tar.gz", ], use_category = ["build"], ), @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "9d348f92ae8fb2495393109aac28aea314ad1fb013cdec1ab7b1224f804be1b7", - strip_prefix = "envoy-build-tools-823c2e9386eee5117f7ef9e3d7c90e784cd0d047", - # 2020-04-07 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/823c2e9386eee5117f7ef9e3d7c90e784cd0d047.tar.gz"], + sha256 = "", + strip_prefix = "envoy-build-tools-8d7a0cb9be7a34c726575d79688ae3dea565a424", + # 2020-05-08 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/8d7a0cb9be7a34c726575d79688ae3dea565a424.tar.gz"], use_category = ["build"], ), boringssl = dict( diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 8e65dcaf55f5..8e1046a9be35 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:18.04 RUN apt-get update \ && apt-get upgrade -y \ diff --git a/ci/README.md b/ci/README.md index deecdb41cc1f..7a705a948c0f 100644 --- a/ci/README.md +++ b/ci/README.md @@ -5,10 +5,10 @@ Two flavors of Envoy Docker images, based on Ubuntu and Alpine Linux, are built. ## Ubuntu Envoy image The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers -may work with `envoyproxy/envoy-build:latest` to provide a self-contained environment for building Envoy binaries and -running tests that reflects the latest built Ubuntu Envoy image. Moreover, the Docker image -at [`envoyproxy/envoy:`](https://hub.docker.com/r/envoyproxy/envoy/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. The `` -corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy +may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) +repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. +Moreover, the Docker image at [`envoyproxy/envoy-dev:`](https://hub.docker.com/r/envoyproxy/envoy-dev/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. +The `` corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy binary built from the latest tip of master that passed tests. ## Alpine Envoy image @@ -23,8 +23,8 @@ master commit at which the binary was compiled, and `latest` corresponds to a bi Currently there are three build images: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. -* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 16.04 (Xenial) with GCC 7 and Clang 9 compiler. -* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 7 and Clang 9 compiler, this image is experimental and not well tested. +* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 9 compiler. +* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 9 compiler, this image is experimental and not well tested. The source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools) repository. diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 3ac8671d5b2d..2fb46473167d 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -31,4 +31,4 @@ docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY= -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ - --home-dir /source envoybuild && usermod -a -G pcap envoybuild && su envoybuild -c \"cd source && $*\"" + --home-dir /build envoybuild && usermod -a -G pcap envoybuild && sudo -EHs -u envoybuild bash -c \"cd /source && $*\"" diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index ada695fa53b5..65f86471f389 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -15,8 +15,8 @@ In order to build manually, follow the instructions at :repo:`bazel/README.md`. Requirements ------------ -Envoy was initially developed and deployed on Ubuntu 14 LTS. It should work on any reasonably -recent Linux including Ubuntu 16 LTS. +Envoy was initially developed and deployed on Ubuntu 14.04 LTS. It should work on any reasonably +recent Linux including Ubuntu 18.04 LTS. Building Envoy has the following requirements: @@ -35,7 +35,7 @@ We build and tag Docker images with release versions when we do official release be found in the following repositories: * `envoyproxy/envoy `_: Release binary with - symbols stripped on top of an Ubuntu Xenial base. + symbols stripped on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine `_: Release binary with symbols stripped on top of a **glibc** alpine base. * `envoyproxy/envoy-alpine-debug `_: @@ -50,7 +50,7 @@ On every master commit we additionally create a set of development Docker images be found in the following repositories: * `envoyproxy/envoy-dev `_: Release binary with - symbols stripped on top of an Ubuntu Xenial base. + symbols stripped on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine-dev `_: Release binary with symbols stripped on top of a **glibc** alpine base. * `envoyproxy/envoy-alpine-debug-dev `_: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 73ad7c1d31ee..e5346afe2347 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: applied existing buffer limits to the non-google gRPC access logs, as well as :ref:`stats ` for logged / dropped logs. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`version_text ` stat that reflects xDS version. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. From 8fe32a6bed9c3305f992f93c72b91388012ed52c Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 11 May 2020 16:35:25 -0700 Subject: [PATCH 135/909] fix add sha256sum (#11152) Forgot to add in #11037 Signed-off-by: Lizan Zhou --- bazel/repository_locations.bzl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 06f721e45776..8ac1bdf57e44 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -67,7 +67,7 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "", + sha256 = "e2cb99cf66e36412a9f570fe0391ff0c457ff17c2524ccdf73853c2752e8d372", strip_prefix = "envoy-build-tools-8d7a0cb9be7a34c726575d79688ae3dea565a424", # 2020-05-08 urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/8d7a0cb9be7a34c726575d79688ae3dea565a424.tar.gz"], From f9f3834d368d80bb7d1b65407eb55314be6939c8 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 11 May 2020 19:42:17 -0400 Subject: [PATCH 136/909] http: fixing a bug in streaming retries (#11144) Previously, when response headers were received, the retry logic would reset the stream iff the response was complete: if the response was complete the request was complete so the stream was guaranteed to be complete. With streaming retries, the state of the request needs to be taken into account: if the request was incomplete but the response was complete the stream would not be reset, the retry would begin, and a reset of the original stream would cause the router to deal with a stream it had already removed. Signed-off-by: Alyssa Wilk --- source/common/router/router.cc | 3 +- source/common/router/upstream_request.h | 1 + test/integration/protocol_integration_test.cc | 60 +++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/source/common/router/router.cc b/source/common/router/router.cc index fb676ed514e5..677244a7f9e2 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1096,6 +1096,7 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, if (upstream_request.upstreamHost()) { upstream_request.upstreamHost()->stats().rq_error_.inc(); } + upstream_request.removeFromList(upstream_requests_); return true; } else if (retry_status == RetryStatus::NoOverflow) { @@ -1287,7 +1288,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt code_stats.chargeBasicResponseStat(cluster_->statsScope(), config_.retry_, static_cast(response_code)); - if (!end_stream) { + if (!end_stream || !upstream_request.encodeComplete()) { upstream_request.resetStream(); } upstream_request.removeFromList(upstream_requests_); diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index c215f9d45647..a23ab1b5afd6 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -140,6 +140,7 @@ class UpstreamRequest : public Logger::Loggable, bool createPerTryTimeoutOnRequestComplete() { return create_per_try_timeout_on_request_complete_; } + bool encodeComplete() const { return encode_complete_; } RouterFilterInterface& parent() { return parent_; } private: diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e09134290e70..cc684148b90c 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -388,6 +388,66 @@ TEST_P(ProtocolIntegrationTest, RetryStreaming) { EXPECT_EQ(512U, response->body().size()); } +// Regression test https://github.com/envoyproxy/envoy/issues/11131 +// Send complete response headers directing a retry and reset the stream to make +// sure that Envoy cleans up stream state correctly when doing a retry with +// complete response but incomplete request. +TEST_P(ProtocolIntegrationTest, RetryStreamingReset) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}}); + auto& encoder = encoder_decoder.first; + auto& response = encoder_decoder.second; + + // Send some data, but not the entire body. + std::string data(1024, 'a'); + Buffer::OwnedImpl send1(data); + encoder.encodeData(send1, false); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + // Send back an upstream failure and end stream. Make sure an immediate reset + // doesn't cause problems. + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); + upstream_request_->encodeResetStream(); + + // Make sure the fake stream is reset. + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + // Wait for a retry. Ensure all data, both before and after the retry, is received. + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + // Finish the request. + std::string data2(512, 'b'); + Buffer::OwnedImpl send2(data2); + encoder.encodeData(send2, true); + std::string combined_request_data = data + data2; + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, combined_request_data)); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + + response->waitForEndStream(); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ(512U, response->body().size()); +} + TEST_P(ProtocolIntegrationTest, RetryStreamingCancelDueToBufferOverflow) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& From 0b455afbeb70a37f3fd161c097976c52b14aaca7 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 11 May 2020 20:12:49 -0400 Subject: [PATCH 137/909] ci: really fix spec.owner in ownerscheck.star. (#11143) Fix snafu in #11122. Signed-off-by: Harvey Tuch --- ci/repokitteh/modules/ownerscheck.star | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star index 43264261349c..e93010f89a7f 100644 --- a/ci/repokitteh/modules/ownerscheck.star +++ b/ci/repokitteh/modules/ownerscheck.star @@ -60,7 +60,8 @@ def _get_relevant_specs(specs, changed_files): status_label = spec.get("github_status_label", "") if files: relevant.append(struct(files=files, - owner=spec.owner, + owner=spec["owner"], + label=spec.get("label", None), path_match=path_match, allow_global_approval=allow_global_approval, status_label=status_label)) @@ -140,12 +141,12 @@ def _reconcile(config, specs=None): if spec.owner[-1] == '!': _update_status(spec.owner[:-1], spec.status_label, spec.path_match, approved) - if hasattr(spec, 'label'): + if spec.label: if approved: github.issue_unlabel(spec.label) else: github.issue_label(spec.label) - elif hasattr(spec, 'label'): # fyis + elif spec.label: # fyis github.issue_label(spec.label) return results @@ -218,7 +219,7 @@ def _lgtm_by_comment(config, comment_id, command, sender, sha): label = labels[0] - specs = [s for s in _get_specs(config) if hasattr(s, 'label') and s.label == label] + specs = [s for s in _get_specs(config) if s.label and s.label == label] if len(specs) == 0: react(comment_id, 'no relevant owners for "%s"' % label) From bcc3050f7385a3f67c1b62b8779c44c7cece6b10 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 12 May 2020 00:04:52 -0400 Subject: [PATCH 138/909] build: enforce non-empty SHA256 for dependency pinning. (#11154) Signed-off-by: Harvey Tuch --- bazel/repositories.bzl | 3 +++ bazel/repository_locations.bzl | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 18230f79d954..3e7c58599581 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -24,6 +24,9 @@ BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = def _repository_locations(): locations = dict(DEPENDENCY_REPOSITORIES) for key, location in locations.items(): + if "sha256" not in location or len(location["sha256"]) == 0: + fail("SHA256 missing for external dependency " + str(location["urls"])) + if "use_category" not in location: fail("The 'use_category' attribute must be defined for external dependecy " + str(location["urls"])) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 8ac1bdf57e44..80bb69492ddc 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -163,7 +163,7 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( - sha256 = "", + sha256 = "f6def6cdf63e29a367d46c0ad9e3e31eed89d031e22e0caac126f1e62d8b3fd0", strip_prefix = "libprotobuf-mutator-3521f47a2828da9ace403e4ecc4aece1a84feb36", # 2020-02-04 urls = ["https://github.com/google/libprotobuf-mutator/archive/3521f47a2828da9ace403e4ecc4aece1a84feb36.tar.gz"], From bb74a91ac413d81f4b1874ed6474df93e4ca2782 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 12 May 2020 09:57:18 -0400 Subject: [PATCH 139/909] caching: fixing date header and response details (#11100) Changing the HCM to not adjust the date header when serving (side-car) cached responses. This includes adding a status flag for cached responses which propagates to access logs etc, as well as some test cleanup to support polling for multiple log entries. Risk Level: Low (only affects pre-alpha cache filter) Testing: new UT, enhanced IT Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- api/envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + api/envoy/data/accesslog/v3/accesslog.proto | 5 ++++- .../envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + .../envoy/data/accesslog/v3/accesslog.proto | 5 ++++- include/envoy/stream_info/stream_info.h | 4 +++- source/common/http/conn_manager_impl.cc | 4 +++- source/common/stream_info/utility.cc | 9 +++++++- source/common/stream_info/utility.h | 1 + .../grpc/grpc_access_log_utils.cc | 5 ++++- .../filters/http/cache/cache_filter.cc | 10 +++++++++ .../common/access_log/access_log_impl_test.cc | 12 ++++++---- test/common/stream_info/utility_test.cc | 10 +++++---- test/config/utility.cc | 2 +- .../grpc/grpc_access_log_utils_test.cc | 1 + .../cache/cache_filter_integration_test.cc | 8 +++++++ test/integration/integration.cc | 22 ++++++++++++++++--- test/integration/integration.h | 4 ++-- .../proxy_proto_integration_test.cc | 2 +- 20 files changed, 87 insertions(+), 21 deletions(-) diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index 218ad5bda4b8..9a2f276b34b4 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -241,6 +241,7 @@ message ResponseFlagFilter { in: "IH" in: "DPE" in: "UMSDR" + in: "RFCF" } } }]; diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index 5900f62f4ffe..c5eb4d2497e7 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -240,6 +240,7 @@ message ResponseFlagFilter { in: "IH" in: "DPE" in: "UMSDR" + in: "RFCF" } } }]; diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index c97e2f4acef0..347adc2003e6 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 21] +// [#next-free-field: 22] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -266,6 +266,9 @@ message ResponseFlags { // Indicates there was a max stream duration reached on the upstream request. bool upstream_max_stream_duration_reached = 20; + + // Indicates the response was served from a cache filter. + bool response_from_cache_filter = 21; } // Properties of a negotiated TLS connection. diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index 1edd34407635..09d691dd3665 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -239,6 +239,7 @@ message ResponseFlagFilter { in: "IH" in: "DPE" in: "UMSDR" + in: "RFCF" } } }]; diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index 5900f62f4ffe..c5eb4d2497e7 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -240,6 +240,7 @@ message ResponseFlagFilter { in: "IH" in: "DPE" in: "UMSDR" + in: "RFCF" } } }]; diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index c97e2f4acef0..347adc2003e6 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 21] +// [#next-free-field: 22] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -266,6 +266,9 @@ message ResponseFlags { // Indicates there was a max stream duration reached on the upstream request. bool upstream_max_stream_duration_reached = 20; + + // Indicates the response was served from a cache filter. + bool response_from_cache_filter = 21; } // Properties of a negotiated TLS connection. diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index bb4a2e73382d..c6a0318564f9 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -74,8 +74,10 @@ enum ResponseFlag { DownstreamProtocolError = 0x40000, // Upstream request reached to user defined max stream duration. UpstreamMaxStreamDurationReached = 0x80000, + // True if the response was served from an Envoy cache filter. + ResponseFromCacheFilter = 0x100000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = UpstreamMaxStreamDurationReached + LastFlag = ResponseFromCacheFilter }; /** diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 1765fdb60b33..fee69c99c96f 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1640,7 +1640,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream) { // Base headers. - connection_manager_.config_.dateProvider().setDateHeader(headers); + if (!stream_info_.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) { + connection_manager_.config_.dateProvider().setDateHeader(headers); + } // Following setReference() is safe because serverName() is constant for the life of the listener. const auto transformation = connection_manager_.config_.serverHeaderTransformation(); if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 2a173e9dd504..9a5a690b682f 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -26,6 +26,7 @@ const std::string ResponseFlagUtils::STREAM_IDLE_TIMEOUT = "SI"; const std::string ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS = "IH"; const std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = "DPE"; const std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = "UMSDR"; +const std::string ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER = "RFCF"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -38,7 +39,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -118,6 +119,11 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info if (stream_info.hasResponseFlag(ResponseFlag::UpstreamMaxStreamDurationReached)) { appendString(result, UPSTREAM_MAX_STREAM_DURATION_REACHED); } + + if (stream_info.hasResponseFlag(ResponseFlag::ResponseFromCacheFilter)) { + appendString(result, RESPONSE_FROM_CACHE_FILTER); + } + return result.empty() ? NONE : result; } @@ -146,6 +152,7 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string {ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR, ResponseFlag::DownstreamProtocolError}, {ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED, ResponseFlag::UpstreamMaxStreamDurationReached}, + {ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER, ResponseFlag::ResponseFromCacheFilter}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index 85285d1d2f80..2c7b73d751fb 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -41,6 +41,7 @@ class ResponseFlagUtils { const static std::string INVALID_ENVOY_REQUEST_HEADERS; const static std::string DOWNSTREAM_PROTOCOL_ERROR; const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED; + const static std::string RESPONSE_FROM_CACHE_FILTER; }; /** diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 65ace2eb7ede..0977540b4102 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x80000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -119,6 +119,9 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached)) { common_access_log.mutable_response_flags()->set_upstream_max_stream_duration_reached(true); } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) { + common_access_log.mutable_response_flags()->set_response_from_cache_filter(true); + } } void Utility::extractCommonAccessLogProperties( diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index ad43c577034e..55f477c24cb2 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -9,6 +9,12 @@ namespace Extensions { namespace HttpFilters { namespace Cache { +struct CacheResponseCodeDetailValues { + const absl::string_view ResponseFromCacheFilter = "cache.response_from_cache_filter"; +}; + +using CacheResponseCodeDetails = ConstSingleton; + bool CacheFilter::isCacheableRequest(Http::RequestHeaderMap& headers) { const Http::HeaderEntry* method = headers.Method(); const Http::HeaderEntry* forwarded_proto = headers.ForwardedProto(); @@ -113,6 +119,10 @@ void CacheFilter::onHeaders(LookupResult&& result) { const bool end_stream = (result.content_length_ == 0 && !response_has_trailers_); // TODO(toddmgreer): Calculate age per https://httpwg.org/specs/rfc7234.html#age.calculations result.headers_->addReferenceKey(Http::Headers::get().Age, 0); + decoder_callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::ResponseFromCacheFilter); + decoder_callbacks_->streamInfo().setResponseCodeDetails( + CacheResponseCodeDetails::get().ResponseFromCacheFilter); decoder_callbacks_->encodeHeaders(std::move(result.headers_), end_stream); if (end_stream) { return; diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 74010eaef8ba..451c44bf52f8 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -946,12 +946,13 @@ name: accesslog - IH - DPE - UMSDR + - RFCF typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x80000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); const std::vector all_response_flags = { @@ -974,7 +975,8 @@ name: accesslog StreamInfo::ResponseFlag::StreamIdleTimeout, StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders, StreamInfo::ResponseFlag::DownstreamProtocolError, - StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached}; + StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached, + StreamInfo::ResponseFlag::ResponseFromCacheFilter}; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); @@ -1006,7 +1008,8 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\"]]): name: \"accesslog\"\nfilter {\n " + " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); @@ -1032,7 +1035,8 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\"]]): name: \"accesslog\"\nfilter {\n " + " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " "}\n}\n"); diff --git a/test/common/stream_info/utility_test.cc b/test/common/stream_info/utility_test.cc index 5b1b73760375..6492488efa98 100644 --- a/test/common/stream_info/utility_test.cc +++ b/test/common/stream_info/utility_test.cc @@ -15,7 +15,7 @@ namespace StreamInfo { namespace { TEST(ResponseFlagUtilsTest, toShortStringConversion) { - static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair(ResponseFlag::FailedLocalHealthCheck, "LH"), @@ -37,7 +37,8 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { std::make_pair(ResponseFlag::StreamIdleTimeout, "SI"), std::make_pair(ResponseFlag::InvalidEnvoyRequestHeaders, "IH"), std::make_pair(ResponseFlag::DownstreamProtocolError, "DPE"), - std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR")}; + std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR"), + std::make_pair(ResponseFlag::ResponseFromCacheFilter, "RFCF")}; for (const auto& test_case : expected) { NiceMock stream_info; @@ -65,7 +66,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { } TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { - static_assert(ResponseFlag::LastFlag == 0x80000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair("LH", ResponseFlag::FailedLocalHealthCheck), @@ -87,7 +88,8 @@ TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { std::make_pair("SI", ResponseFlag::StreamIdleTimeout), std::make_pair("IH", ResponseFlag::InvalidEnvoyRequestHeaders), std::make_pair("DPE", ResponseFlag::DownstreamProtocolError), - std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached)}; + std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached), + std::make_pair("RFCF", ResponseFlag::ResponseFromCacheFilter)}; EXPECT_FALSE(ResponseFlagUtils::toResponseFlag("NonExistentFlag").has_value()); diff --git a/test/config/utility.cc b/test/config/utility.cc index 076006ce2681..359925892a6d 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -766,7 +766,7 @@ bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view f loadHttpConnectionManager(hcm_config); envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; if (!format.empty()) { - access_log_config.set_format(std::string(format)); + access_log_config.set_format(absl::StrCat(format, "\n")); } access_log_config.set_path(filename); hcm_config.mutable_access_log(0)->mutable_typed_config()->PackFrom(access_log_config); diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index 1519369ffbdb..b824aeb2a3ad 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -42,6 +42,7 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { common_access_log_expected.mutable_response_flags()->set_downstream_protocol_error(true); common_access_log_expected.mutable_response_flags()->set_upstream_max_stream_duration_reached( true); + common_access_log_expected.mutable_response_flags()->set_response_from_cache_filter(true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index a64573448646..4ddb6786804f 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -40,6 +40,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, CacheIntegrationTest, HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(CacheIntegrationTest, MissInsertHit) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); // Set system time to cause Envoy's cached formatted time to match time on this thread. simTime().setSystemTime(std::chrono::hours(1)); initializeFilter(default_config); @@ -68,8 +69,12 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_THAT(request->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(request->headers().get(Http::Headers::get().Age), nullptr); EXPECT_EQ(request->body(), std::string(42, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); } + // Advance time, to verify the original date header is preserved. + simTime().advanceTimeWait(std::chrono::seconds(10)); + // Send second request, and get response from cache. IntegrationStreamDecoderPtr request = codec_client_->makeHeaderOnlyRequest(request_headers); request->waitForEndStream(); @@ -77,6 +82,9 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_THAT(request->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(request->body(), std::string(42, 'a')); EXPECT_NE(request->headers().get(Http::Headers::get().Age), nullptr); + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "RFCF cache.response_from_cache_filter\n"); } // Send the same GET request twice with body and trailers twice, then check that the response diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 5f2f57a5e8b3..d56347633edf 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -545,12 +545,28 @@ void BaseIntegrationTest::useListenerAccessLog(absl::string_view format) { ASSERT_TRUE(config_helper_.setListenerAccessLog(listener_access_log_name_, format)); } -std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename) { +// Assuming logs are newline delineated, return the start index of the nth entry. +// If there are not n entries, it will return file.length() (end of the string +// index) +size_t entryIndex(const std::string& file, uint32_t entry) { + size_t index = 0; + for (uint32_t i = 0; i < entry; ++i) { + index = file.find('\n', index); + if (index == std::string::npos || index == file.length()) { + return file.length(); + } + ++index; + } + return index; +} + +std::string BaseIntegrationTest::waitForAccessLog(const std::string& filename, uint32_t entry) { // Wait a max of 1s for logs to flush to disk. for (int i = 0; i < 1000; ++i) { std::string contents = TestEnvironment::readFileToStringForTest(filename, false); - if (contents.length() > 0) { - return contents; + size_t index = entryIndex(contents, entry); + if (contents.length() > index) { + return contents.substr(index); } absl::SleepFor(absl::Milliseconds(1)); } diff --git a/test/integration/integration.h b/test/integration/integration.h index 83fcccedb169..959a550cb5af 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -222,8 +222,8 @@ class BaseIntegrationTest : protected Logger::Loggable { // Enable the listener access log void useListenerAccessLog(absl::string_view format = ""); - // Waits for the first access log entry. - std::string waitForAccessLog(const std::string& filename); + // Waits for the nth access log entry, defaulting to log entry 0. + std::string waitForAccessLog(const std::string& filename, uint32_t entry = 0); std::string listener_access_log_name_; diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index ec62a8991a6a..10b1a961040f 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -116,7 +116,7 @@ TEST_P(ProxyProtoIntegrationTest, AccessLog) { ASSERT_EQ(2, tokens.size()); EXPECT_EQ(tokens[0], Network::Test::getLoopbackAddressString(GetParam())); - EXPECT_EQ(tokens[1], "1.2.3.4:12345"); + EXPECT_EQ(tokens[1], "1.2.3.4:12345\n"); } TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { From 703f2fbdd4d2b11db901a74d15726bc6c017189b Mon Sep 17 00:00:00 2001 From: Phil Genera Date: Tue, 12 May 2020 11:01:03 -0400 Subject: [PATCH 140/909] eds: optionalize counting of unknown fields (#10982) In order to speed up eds, don't necessarily visit every proto field to count its validity as WarningValidationVisitor does. This yields a ~30% speed improvement in processing very large updates in EDS. Risk Level: medium, new feature behind a command line flag. Testing: Unit and bechmark tests. Docs Changes: These are probably wrong, thus the draft-ness. Release Notes: EDS can now ignore unknown dynamic fields, for a ~30% improvement in update processing time. Behind --ignore-unknown-dynamic-fields Co-authored-by: Joshua Marantz --- api/envoy/admin/v3/server_info.proto | 5 +- api/envoy/admin/v4alpha/server_info.proto | 5 +- docs/root/operations/cli.rst | 7 + .../envoy/admin/v3/server_info.proto | 5 +- .../envoy/admin/v4alpha/server_info.proto | 5 +- include/envoy/protobuf/message_validator.h | 6 + include/envoy/server/options.h | 5 + .../common/protobuf/message_validator_impl.h | 16 ++- source/common/protobuf/utility.h | 4 +- source/server/config_validation/server.cc | 3 +- source/server/options_impl.cc | 5 + source/server/options_impl.h | 6 + source/server/server.cc | 3 +- .../protobuf/message_validator_impl_test.cc | 4 + test/common/upstream/BUILD | 31 ++++ test/common/upstream/eds_speed_test.cc | 134 ++++++++++++++++++ test/common/upstream/eds_test.cc | 18 +++ .../dynamic_validation_integration_test.cc | 52 +++++-- test/integration/integration.cc | 31 ++-- test/integration/integration.h | 6 +- test/integration/server.cc | 33 ++--- test/integration/server.h | 32 +++-- test/integration/xds_integration_test.cc | 2 +- test/mocks/protobuf/mocks.h | 7 + test/mocks/server/mocks.cc | 3 + test/mocks/server/mocks.h | 2 + test/server/server_test.cc | 1 + 27 files changed, 356 insertions(+), 75 deletions(-) create mode 100644 test/common/upstream/eds_speed_test.cc diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index 8e7a0ef42005..b89e58749f7e 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 30] +// [#next-free-field: 31] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -97,6 +97,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index f32f71a8093a..b9e8c3043002 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 30] +// [#next-free-field: 31] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -96,6 +96,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index f24e52ce6b5f..ea446c82c1d0 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -288,6 +288,13 @@ following are the command line options that Envoy supports. and these occurrences are counted in the :ref:`server.dynamic_unknown_fields ` statistic. +.. option:: --ignore-unknown-dynamic-fields + + *(optional)* This flag disables validation of protobuf configuration for unknown fields in dynamic + configuration. Unlike setting --reject-unknown-dynamic-fields to false, it does not log warnings or + count occurrences of unknown fields, in the interest of configuration processing speed. If + --reject-unknown-dynamic-fields is set to true, this flag has no effect. + .. option:: --disable-extensions *(optional)* This flag disabled the provided list of comma-separated extension names. Disabled diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index c94a001a3a66..4962a95d631b 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 30] +// [#next-free-field: 31] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -95,6 +95,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index f32f71a8093a..b9e8c3043002 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 30] +// [#next-free-field: 31] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -96,6 +96,9 @@ message CommandLineOptions { // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; + // See :option:`--ignore-unknown-dynamic-fields` for details. + bool ignore_unknown_dynamic_fields = 30; + // See :option:`--admin-address-path` for details. string admin_address_path = 6; diff --git a/include/envoy/protobuf/message_validator.h b/include/envoy/protobuf/message_validator.h index 613b8c964249..ddd6d14d3da2 100644 --- a/include/envoy/protobuf/message_validator.h +++ b/include/envoy/protobuf/message_validator.h @@ -33,6 +33,12 @@ class ValidationVisitor { * @param description human readable description of the field */ virtual void onUnknownField(absl::string_view description) PURE; + + /** + * If true, skip this validation visitor in the interest of speed when + * possible. + **/ + virtual bool skipValidation() PURE; }; class ValidationContext { diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 96baa7fbdfef..c83102e472f4 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -101,6 +101,11 @@ class Options { */ virtual bool rejectUnknownDynamicFields() const PURE; + /** + * @return bool ignore unknown fields in the dynamic configuration? + **/ + virtual bool ignoreUnknownDynamicFields() const PURE; + /** * @return const std::string& the admin address output file. */ diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index 32d705fd44bf..0ba98161ec5d 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -14,6 +14,9 @@ class NullValidationVisitorImpl : public ValidationVisitor { public: // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view) override {} + + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return true; } }; ValidationVisitor& getNullValidationVisitor(); @@ -26,6 +29,9 @@ class WarningValidationVisitorImpl : public ValidationVisitor, // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return false; } + private: // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid // wasting memory with unused strings. @@ -40,6 +46,9 @@ class StrictValidationVisitorImpl : public ValidationVisitor { public: // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; + + // Envoy::ProtobufMessage::ValidationVisitor + bool skipValidation() override { return false; } }; ValidationVisitor& getStrictValidationVisitor(); @@ -62,11 +71,14 @@ class ValidationContextImpl : public ValidationContext { class ProdValidationContextImpl : public ValidationContextImpl { public: - ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields) + ProdValidationContextImpl(bool allow_unknown_static_fields, bool allow_unknown_dynamic_fields, + bool ignore_unknown_dynamic_fields) : ValidationContextImpl(allow_unknown_static_fields ? static_warning_validation_visitor_ : getStrictValidationVisitor(), allow_unknown_dynamic_fields - ? dynamic_warning_validation_visitor_ + ? (ignore_unknown_dynamic_fields + ? ProtobufMessage::getNullValidationVisitor() + : dynamic_warning_validation_visitor_) : ProtobufMessage::getStrictValidationVisitor()) {} ProtobufMessage::WarningValidationVisitorImpl& static_warning_validation_visitor() { diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index ffc9b4bab52d..871f6b219cef 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -250,7 +250,9 @@ class MessageUtil { static void validate(const MessageType& message, ProtobufMessage::ValidationVisitor& validation_visitor) { // Log warnings or throw errors if deprecated fields or unknown fields are in use. - checkForUnexpectedFields(message, validation_visitor); + if (!validation_visitor.skipValidation()) { + checkForUnexpectedFields(message, validation_visitor); + } std::string err; if (!Validate(message, &err)) { diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index e7faf1a999cc..b418e5fe867a 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -42,7 +42,8 @@ ValidationInstance::ValidationInstance( Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system) : options_(options), validation_context_(options_.allowUnknownStaticFields(), - !options.rejectUnknownDynamicFields()), + !options.rejectUnknownDynamicFields(), + !options.ignoreUnknownDynamicFields()), stats_store_(store), api_(new Api::ValidationImpl(thread_factory, store, time_system, file_system)), dispatcher_(api_->allocateDispatcher("main_thread")), diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index f3b7d67db6a6..e8e7cc9fa04d 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -81,6 +81,9 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::SwitchArg reject_unknown_dynamic_fields("", "reject-unknown-dynamic-fields", "reject unknown fields in dynamic configuration", cmd, false); + TCLAP::SwitchArg ignore_unknown_dynamic_fields("", "ignore-unknown-dynamic-fields", + "ignore unknown fields in dynamic configuration", + cmd, false); TCLAP::ValueArg admin_address_path("", "admin-address-path", "Admin address path", false, "", "string", cmd); @@ -235,6 +238,7 @@ OptionsImpl::OptionsImpl(std::vector args, allow_unknown_static_fields_ = allow_unknown_static_fields.getValue() || allow_unknown_fields.getValue(); reject_unknown_dynamic_fields_ = reject_unknown_dynamic_fields.getValue(); + ignore_unknown_dynamic_fields_ = ignore_unknown_dynamic_fields.getValue(); admin_address_path_ = admin_address_path.getValue(); log_path_ = log_path.getValue(); restart_epoch_ = restart_epoch.getValue(); @@ -321,6 +325,7 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { command_line_options->set_config_yaml(configYaml()); command_line_options->set_allow_unknown_static_fields(allow_unknown_static_fields_); command_line_options->set_reject_unknown_dynamic_fields(reject_unknown_dynamic_fields_); + command_line_options->set_ignore_unknown_dynamic_fields(ignore_unknown_dynamic_fields_); command_line_options->set_admin_address_path(adminAddressPath()); command_line_options->set_component_log_level(component_log_level_str_); command_line_options->set_log_level(spdlog::level::to_string_view(logLevel()).data(), diff --git a/source/server/options_impl.h b/source/server/options_impl.h index fb6bd08dfdb9..d82b28ca9fb1 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -92,6 +92,10 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable process_context) : init_manager_(init_manager), workers_started_(false), live_(false), shutdown_(false), options_(options), validation_context_(options_.allowUnknownStaticFields(), - !options.rejectUnknownDynamicFields()), + !options.rejectUnknownDynamicFields(), + options.ignoreUnknownDynamicFields()), time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), thread_local_(tls), api_(new Api::Impl(thread_factory, store, time_system, file_system, diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index fd5433704584..7110a1432537 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -16,6 +16,7 @@ namespace { // The null validation visitor doesn't do anything on unknown fields. TEST(NullValidationVisitorImpl, UnknownField) { NullValidationVisitorImpl null_validation_visitor; + EXPECT_TRUE(null_validation_visitor.skipValidation()); EXPECT_NO_THROW(null_validation_visitor.onUnknownField("foo")); } @@ -24,6 +25,8 @@ TEST(WarningValidationVisitorImpl, UnknownField) { Stats::TestUtil::TestStore stats; Stats::Counter& counter = stats.counter("counter"); WarningValidationVisitorImpl warning_validation_visitor; + // we want to be executed. + EXPECT_FALSE(warning_validation_visitor.skipValidation()); // First time around we should log. EXPECT_LOG_CONTAINS("warn", "Unknown field: foo", warning_validation_visitor.onUnknownField("foo")); @@ -46,6 +49,7 @@ TEST(WarningValidationVisitorImpl, UnknownField) { // The strict validation visitor throws on unknown fields. TEST(StrictValidationVisitorImpl, UnknownField) { StrictValidationVisitorImpl strict_validation_visitor; + EXPECT_FALSE(strict_validation_visitor.skipValidation()); EXPECT_THROW_WITH_MESSAGE(strict_validation_visitor.onUnknownField("foo"), UnknownProtoFieldException, "Protobuf message (foo) has unknown fields"); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 27ccee4cd7eb..19f713f672a2 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -96,6 +96,37 @@ envoy_cc_test( ], ) +envoy_cc_benchmark_binary( + name = "eds_speed_test", + srcs = ["eds_speed_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + ":utility_lib", + "//source/common/config:utility_lib", + "//source/common/upstream:eds_lib", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) + +envoy_benchmark_test( + name = "eds_speed_test_benchmark_test", + benchmark_binary = "eds_speed_test", +) + envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc new file mode 100644 index 000000000000..e02f16a086f8 --- /dev/null +++ b/test/common/upstream/eds_speed_test.cc @@ -0,0 +1,134 @@ +// Note: this should be run with --compilation_mode=opt, and would benefit from a +// quiescent system with disabled cstate power management. + +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" +#include "envoy/service/discovery/v3/discovery.pb.h" +#include "envoy/stats/scope.h" + +#include "common/config/utility.h" +#include "common/singleton/manager_impl.h" +#include "common/upstream/eds.h" + +#include "server/transport_socket_config_impl.h" + +#include "test/common/upstream/utility.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/utility.h" + +#include "benchmark/benchmark.h" + +namespace Envoy { +namespace Upstream { + +class EdsSpeedTest { +public: + EdsSpeedTest() : api_(Api::createApiForTest(stats_)) {} + + void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { + local_info_.node_.mutable_locality()->set_zone("us-east-1a"); + eds_cluster_ = parseClusterFromV2Yaml(yaml_config); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", + eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, random_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_); + cluster_ = std::make_shared(eds_cluster_, runtime_, factory_context, + std::move(scope), false); + EXPECT_EQ(initialize_phase, cluster_->initializePhase()); + eds_callbacks_ = cm_.subscription_factory_.callbacks_; + } + + void initialize() { + EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_)); + cluster_->initialize([this] { initialized_ = true; }); + } + + // Set up an EDS config with multiple priorities, localities, weights and make sure + // they are loaded and reloaded as expected. + void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, int num_hosts) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetCluster(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + cluster_names: + - eds + refresh_delay: 1s + )EOF", + Envoy::Upstream::Cluster::InitializePhase::Secondary); + + // Add a whole bunch of hosts in a single place: + auto* endpoints = cluster_load_assignment.add_endpoints(); + endpoints->set_priority(1); + auto* locality = endpoints->mutable_locality(); + locality->set_region("region"); + locality->set_zone("zone"); + locality->set_sub_zone("sub_zone"); + endpoints->mutable_load_balancing_weight()->set_value(1); + + uint32_t port = 1000; + for (int i = 0; i < num_hosts; ++i) { + auto* socket_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + socket_address->set_address("10.0.1." + std::to_string(i / 60000)); + socket_address->set_port_value((port + i) % 60000); + } + + // this is what we're actually testing: + validation_visitor_.setSkipValidation(ignore_unknown_dynamic_fields); + + initialize(); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); + eds_callbacks_->onConfigUpdate(resources, ""); + ASSERT(initialized_); + } + + bool initialized_{}; + Stats::IsolatedStoreImpl stats_; + Ssl::MockContextManager ssl_context_manager_; + envoy::config::cluster::v3::Cluster eds_cluster_; + NiceMock cm_; + NiceMock dispatcher_; + std::shared_ptr cluster_; + Config::SubscriptionCallbacks* eds_callbacks_{}; + NiceMock random_; + NiceMock runtime_; + NiceMock local_info_; + NiceMock admin_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock tls_; + ProtobufMessage::MockValidationVisitor validation_visitor_; + Api::ApiPtr api_; +}; + +} // namespace Upstream +} // namespace Envoy + +static void priorityAndLocalityWeighted(benchmark::State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test; + speed_test.priorityAndLocalityWeightedHelper(state.range(0), state.range(1)); + } +} + +BENCHMARK(priorityAndLocalityWeighted)->Ranges({{false, true}, {2000, 100000}}); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 23600bbfe0e5..b40aa3f40f7f 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -228,6 +228,24 @@ TEST_F(EdsTest, ValidateFail) { EXPECT_FALSE(initialized_); } +// Validate that onConfigUpdate() can ignore unknown fields. +// this doesn't test the actual functionality, as the ValidationVisitor is mocked out, +// however it is functionally tested in dynamic_validation_integration_test. +TEST_F(EdsTest, ValidateIgnored) { + validation_visitor_.setSkipValidation(true); + initialize(); + envoy::config::endpoint::v3::ClusterLoadAssignment resource; + resource.set_cluster_name("fare"); + auto* unknown = resource.GetReflection()->MutableUnknownFields(&resource); + // add a field that doesn't exist in the proto definition: + unknown->AddFixed32(1000, 1); + + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(resource); + doOnConfigUpdateVerifyNoThrow(resource); + EXPECT_TRUE(initialized_); +} + // Validate that onConfigUpdate() with unexpected cluster names rejects config. TEST_F(EdsTest, OnConfigUpdateWrongName) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; diff --git a/test/integration/dynamic_validation_integration_test.cc b/test/integration/dynamic_validation_integration_test.cc index 3363b5341f4f..31a80bffae0d 100644 --- a/test/integration/dynamic_validation_integration_test.cc +++ b/test/integration/dynamic_validation_integration_test.cc @@ -46,33 +46,38 @@ class TestDynamicValidationNetworkFilterConfigFactory // Pretty-printing of parameterized test names. std::string dynamicValidationTestParamsToString( - const ::testing::TestParamInfo>& params) { + const ::testing::TestParamInfo>& params) { return fmt::format( - "{}_{}", + "{}_{}_{}", TestUtility::ipTestParamsToString( ::testing::TestParamInfo(std::get<0>(params.param), 0)), - std::get<1>(params.param) ? "with_reject_unknown_fields" : "without_reject_unknown_fields"); + std::get<1>(params.param) ? "with_reject_unknown_fields" : "without_reject_unknown_fields", + std::get<2>(params.param) ? "with_ignore_unknown_fields" : "without_ignore_unknown_fields"); } // Validate unknown field handling in dynamic configuration. class DynamicValidationIntegrationTest - : public testing::TestWithParam>, + : public testing::TestWithParam>, public HttpIntegrationTest { public: DynamicValidationIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, std::get<0>(GetParam())), - reject_unknown_dynamic_fields_(std::get<1>(GetParam())) { + reject_unknown_dynamic_fields_(std::get<1>(GetParam())), + ignore_unknown_dynamic_fields_(std::get<2>(GetParam())) { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); } void createEnvoy() override { registerPort("upstream_0", fake_upstreams_.back()->localAddress()->ip()->port()); - createApiTestServer(api_filesystem_config_, {"http"}, reject_unknown_dynamic_fields_, - reject_unknown_dynamic_fields_, allow_lds_rejection_); + createApiTestServer(api_filesystem_config_, {"http"}, + {reject_unknown_dynamic_fields_, reject_unknown_dynamic_fields_, + ignore_unknown_dynamic_fields_}, + allow_lds_rejection_); } ApiFilesystemConfig api_filesystem_config_; const bool reject_unknown_dynamic_fields_; + const bool ignore_unknown_dynamic_fields_; bool allow_lds_rejection_{}; private: @@ -83,7 +88,8 @@ class DynamicValidationIntegrationTest INSTANTIATE_TEST_SUITE_P( IpVersions, DynamicValidationIntegrationTest, - testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()), + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool(), + testing::Bool()), dynamicValidationTestParamsToString); // Protocol options in CDS with unknown fields are rejected if and only if strict. @@ -103,7 +109,11 @@ TEST_P(DynamicValidationIntegrationTest, CdsProtocolOptionsRejected) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } } @@ -127,7 +137,11 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejected) { } else { EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -154,7 +168,11 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejectedTypedStruct) { } else { EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -178,7 +196,11 @@ TEST_P(DynamicValidationIntegrationTest, RdsFailedBySubscription) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); @@ -204,7 +226,11 @@ TEST_P(DynamicValidationIntegrationTest, EdsFailedBySubscription) { EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); } else { EXPECT_EQ(1, test_server_->counter("cluster.cluster_1.update_success")->value()); - EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + if (ignore_unknown_dynamic_fields_) { + EXPECT_EQ(0, test_server_->counter("server.dynamic_unknown_fields")->value()); + } else { + EXPECT_EQ(1, test_server_->counter("server.dynamic_unknown_fields")->value()); + } } } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index d56347633edf..1f4b9ffcc5b3 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -371,7 +371,7 @@ void BaseIntegrationTest::createEnvoy() { for (int i = 0; i < static_resources.listeners_size(); ++i) { named_ports.push_back(static_resources.listeners(i).name()); } - createGeneratedApiTestServer(bootstrap_path, named_ports, false, true, false); + createGeneratedApiTestServer(bootstrap_path, named_ports, {false, true, false}, false); } void BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) { @@ -450,15 +450,13 @@ std::string getListenerDetails(Envoy::Server::Instance& server) { return MessageUtil::getYamlStringFromMessage(listener_info.dynamic_listeners(0).error_state()); } -void BaseIntegrationTest::createGeneratedApiTestServer(const std::string& bootstrap_path, - const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, - bool allow_lds_rejection) { - test_server_ = IntegrationTestServer::create( - bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, - timeSystem(), *api_, defer_listener_finalization_, process_object_, - allow_unknown_static_fields, reject_unknown_dynamic_fields, concurrency_); +void BaseIntegrationTest::createGeneratedApiTestServer( + const std::string& bootstrap_path, const std::vector& port_names, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { + test_server_ = IntegrationTestServer::create(bootstrap_path, version_, on_server_ready_function_, + on_server_init_function_, deterministic_, + timeSystem(), *api_, defer_listener_finalization_, + process_object_, validator_config, concurrency_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { @@ -489,8 +487,7 @@ void BaseIntegrationTest::createGeneratedApiTestServer(const std::string& bootst void BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_filesystem_config, const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { const std::string eds_path = TestEnvironment::temporaryFileSubstitute( api_filesystem_config.eds_path_, port_map_, version_); @@ -500,11 +497,11 @@ void BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_fil api_filesystem_config.rds_path_, port_map_, version_); const std::string lds_path = TestEnvironment::temporaryFileSubstitute( api_filesystem_config.lds_path_, {{"rds_json_path", rds_path}}, port_map_, version_); - createGeneratedApiTestServer( - TestEnvironment::temporaryFileSubstitute( - api_filesystem_config.bootstrap_path_, - {{"cds_json_path", cds_path}, {"lds_json_path", lds_path}}, port_map_, version_), - port_names, allow_unknown_static_fields, reject_unknown_dynamic_fields, allow_lds_rejection); + createGeneratedApiTestServer(TestEnvironment::temporaryFileSubstitute( + api_filesystem_config.bootstrap_path_, + {{"cds_json_path", cds_path}, {"lds_json_path", lds_path}}, + port_map_, version_), + port_names, validator_config, allow_lds_rejection); } void BaseIntegrationTest::createTestServer(const std::string& json_path, diff --git a/test/integration/integration.h b/test/integration/integration.h index 959a550cb5af..6047c577dd52 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -206,11 +206,11 @@ class BaseIntegrationTest : protected Logger::Loggable { void createTestServer(const std::string& json_path, const std::vector& port_names); void createGeneratedApiTestServer(const std::string& bootstrap_path, const std::vector& port_names, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, bool allow_lds_rejection); + Server::FieldValidationConfig validator_config, + bool allow_lds_rejection); void createApiTestServer(const ApiFilesystemConfig& api_filesystem_config, const std::vector& port_names, - bool allow_unknown_static_fields, bool reject_unknown_dynamic_fields, + Server::FieldValidationConfig validator_config, bool allow_lds_rejection); Event::TestTimeSystem& timeSystem() { return time_system_; } diff --git a/test/integration/server.cc b/test/integration/server.cc index 05f05a34a16f..07c8e2ccc401 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -31,8 +31,7 @@ namespace Server { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + FieldValidationConfig validation_config, uint32_t concurrency) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); @@ -41,8 +40,9 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50)); test_options.setDrainTime(std::chrono::seconds(1)); test_options.setParentShutdownTime(std::chrono::seconds(2)); - test_options.setAllowUnkownFields(allow_unknown_static_fields); - test_options.setRejectUnknownFieldsDynamic(reject_unknown_dynamic_fields); + test_options.setAllowUnkownFields(validation_config.allow_unknown_static_fields); + test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields); + test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields); test_options.setConcurrency(concurrency); return test_options; @@ -55,16 +55,15 @@ IntegrationTestServerPtr IntegrationTestServer::create( std::function server_ready_function, std::function on_server_init_function, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, - ProcessObjectOptRef process_object, bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, + uint32_t concurrency) { IntegrationTestServerPtr server{ std::make_unique(time_system, api, config_path)}; if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } server->start(version, on_server_init_function, deterministic, defer_listener_finalization, - process_object, allow_unknown_static_fields, reject_unknown_dynamic_fields, - concurrency); + process_object, validation_config, concurrency); return server; } @@ -82,15 +81,13 @@ void IntegrationTestServer::start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency) { + Server::FieldValidationConfig validator_config, + uint32_t concurrency) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); thread_ = api_.threadFactory().createThread( - [version, deterministic, process_object, allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency, this]() -> void { - threadRoutine(version, deterministic, process_object, allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency); + [version, deterministic, process_object, validator_config, concurrency, this]() -> void { + threadRoutine(version, deterministic, process_object, validator_config, concurrency); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -165,12 +162,10 @@ void IntegrationTestServer::serverReady() { void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, + Server::FieldValidationConfig validation_config, uint32_t concurrency) { - OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, - allow_unknown_static_fields, - reject_unknown_dynamic_fields, concurrency)); + OptionsImpl options( + Server::createTestOptionsImpl(config_path_, "", version, validation_config, concurrency)); Thread::MutexBasicLockable lock; Runtime::RandomGeneratorPtr random_generator; diff --git a/test/integration/server.h b/test/integration/server.h index 007d4f5be817..c951ff62145a 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -30,11 +30,16 @@ namespace Envoy { namespace Server { +struct FieldValidationConfig { + bool allow_unknown_static_fields = false; + bool reject_unknown_dynamic_fields = false; + bool ignore_unknown_dynamic_fields = false; +}; + // Create OptionsImpl structures suitable for tests. OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, - bool allow_unknown_static_fields = false, - bool reject_unknown_dynamic_fields = false, + FieldValidationConfig validation_config = FieldValidationConfig(), uint32_t concurrency = 1); class TestDrainManager : public DrainManager { @@ -268,13 +273,15 @@ class IntegrationTestServer : public Logger::Loggable, public IntegrationTestServerStats, public Server::ComponentFactory { public: - static IntegrationTestServerPtr create( - const std::string& config_path, const Network::Address::IpVersion version, - std::function on_server_ready_function, - std::function on_server_init_function, bool deterministic, - Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization = false, - ProcessObjectOptRef process_object = absl::nullopt, bool allow_unknown_static_fields = false, - bool reject_unknown_dynamic_fields = false, uint32_t concurrency = 1); + static IntegrationTestServerPtr + create(const std::string& config_path, const Network::Address::IpVersion version, + std::function on_server_ready_function, + std::function on_server_init_function, bool deterministic, + Event::TestTimeSystem& time_system, Api::Api& api, + bool defer_listener_finalization = false, + ProcessObjectOptRef process_object = absl::nullopt, + Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), + uint32_t concurrency = 1); // Note that the derived class is responsible for tearing down the server in its // destructor. ~IntegrationTestServer() override; @@ -296,8 +303,7 @@ class IntegrationTestServer : public Logger::Loggable, void start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - bool allow_unknown_static_fields, bool reject_unknown_dynamic_fields, - uint32_t concurrency); + Server::FieldValidationConfig validation_config, uint32_t concurrency); void waitForCounterEq(const std::string& name, uint64_t value) override { TestUtility::waitForCounterEq(statStore(), name, value, time_system_); @@ -378,8 +384,8 @@ class IntegrationTestServer : public Logger::Loggable, * Runs the real server on a thread. */ void threadRoutine(const Network::Address::IpVersion version, bool deterministic, - ProcessObjectOptRef process_object, bool allow_unknown_static_fields, - bool reject_unknown_dynamic_fields, uint32_t concurrency); + ProcessObjectOptRef process_object, + Server::FieldValidationConfig validation_config, uint32_t concurrency); Event::TestTimeSystem& time_system_; Api::Api& api_; diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 5bf1380a2951..5ed39d311eca 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -37,7 +37,7 @@ class XdsIntegrationTest : public testing::TestWithParamlocalAddress()->ip()->port()); - createApiTestServer(api_filesystem_config, {"http"}, false, false, false); + createApiTestServer(api_filesystem_config, {"http"}, {false, false, false}, false); EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); EXPECT_EQ(1, test_server_->counter("http.router.rds.route_config_0.update_success")->value()); EXPECT_EQ(1, test_server_->counter("cluster_manager.cds.update_success")->value()); diff --git a/test/mocks/protobuf/mocks.h b/test/mocks/protobuf/mocks.h index 3f60902c1fc6..5170f1ba1228 100644 --- a/test/mocks/protobuf/mocks.h +++ b/test/mocks/protobuf/mocks.h @@ -13,6 +13,13 @@ class MockValidationVisitor : public ValidationVisitor { ~MockValidationVisitor() override; MOCK_METHOD(void, onUnknownField, (absl::string_view)); + + bool skipValidation() override { return skip_validation_; } + + void setSkipValidation(bool s) { skip_validation_ = s; } + +private: + bool skip_validation_ = false; }; class MockValidationContext : public ValidationContext { diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index 3054af81c799..b85e9924ecfa 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -32,6 +32,9 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] { return reject_unknown_dynamic_fields_; })); + ON_CALL(*this, ignoreUnknownDynamicFields()).WillByDefault(Invoke([this] { + return ignore_unknown_dynamic_fields_; + })); ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_)); ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_)); ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_)); diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 0992ff6520ea..70cb86598c90 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -77,6 +77,7 @@ class MockOptions : public Options { MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); + MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const)); MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); @@ -107,6 +108,7 @@ class MockOptions : public Options { absl::optional bootstrap_version_; bool allow_unknown_static_fields_{}; bool reject_unknown_dynamic_fields_{}; + bool ignore_unknown_dynamic_fields_{}; std::string admin_address_path_; std::string service_cluster_name_; std::string service_node_name_; diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 4a12a62dc3a2..e12eca40fad7 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -582,6 +582,7 @@ TEST_P(ServerInstanceImplTest, ValidationRejectDynamic) { options_.service_cluster_name_ = "some_cluster_name"; options_.service_node_name_ = "some_node_name"; options_.reject_unknown_dynamic_fields_ = true; + options_.ignore_unknown_dynamic_fields_ = true; // reject takes precedence over ignore EXPECT_NO_THROW(initialize("test/server/test_data/server/empty_bootstrap.yaml")); EXPECT_THAT_THROWS_MESSAGE( server_->messageValidationContext().staticValidationVisitor().onUnknownField("foo"), From abdbbde827e3a76d014feb9a94ec4f803b1950c3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 12 May 2020 12:58:43 -0400 Subject: [PATCH 141/909] grpc: adding limits to outbound buffered data for google-gRPC access logs (#11072) Risk Level: Medium (changes by default) Testing: new unit tests, new guard also tested Docs Changes: n/a Release Notes: yes Optional Runtime guard: envoy.reloadable_features.allow_unbounded_access_logs Fixes #10764 Signed-off-by: Alyssa Wilk --- api/envoy/config/core/v3/grpc_service.proto | 7 ++- .../config/core/v4alpha/grpc_service.proto | 7 ++- docs/root/version_history/current.rst | 2 +- .../envoy/config/core/v3/grpc_service.proto | 7 ++- .../config/core/v4alpha/grpc_service.proto | 7 ++- .../common/grpc/google_async_client_impl.cc | 9 +++- source/common/grpc/google_async_client_impl.h | 11 ++++- source/common/runtime/runtime_features.cc | 2 + source/extensions/access_loggers/grpc/BUILD | 1 + .../grpc/grpc_access_log_impl.cc | 9 +++- .../grpc/google_async_client_impl_test.cc | 47 +++++++++++++++++-- test/extensions/access_loggers/grpc/BUILD | 1 + .../grpc/grpc_access_log_impl_test.cc | 43 +++++++++++++++++ 13 files changed, 140 insertions(+), 13 deletions(-) diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index 8719652a6bbe..3acd3c1c9b9c 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -37,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 7] + // [#next-free-field: 8] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -232,6 +233,10 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; } reserved 4; diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index 64bbc6b5f077..b547cfb7deec 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -37,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 7] + // [#next-free-field: 8] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -232,6 +233,10 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; } reserved 4; diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e5346afe2347..d294302b0ffb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -5,7 +5,7 @@ Changes ------- * access loggers: added GRPC_STATUS operator on logging format. -* access loggers: applied existing buffer limits to the non-google gRPC access logs, as well as :ref:`stats ` for logged / dropped logs. +* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * compressor: generic :ref:`compressor ` filter exposed to users. diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 5cd8562f5783..89ce3132ef05 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -37,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 7] + // [#next-free-field: 8] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -230,6 +231,10 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; } reserved 4; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index 64bbc6b5f077..b547cfb7deec 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -8,6 +8,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -37,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 7] + // [#next-free-field: 8] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -232,6 +233,10 @@ message GrpcService { // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; } reserved 4; diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 9b23529c114c..18c4936e5e25 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -16,6 +16,9 @@ namespace Envoy { namespace Grpc { +namespace { +static constexpr int DefaultBufferLimitBytes = 1024 * 1024; +} GoogleAsyncClientThreadLocal::GoogleAsyncClientThreadLocal(Api::Api& api) : completion_thread_(api.threadFactory().createThread([this] { completionThread(); })) {} @@ -75,7 +78,9 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, const envoy::config::core::v3::GrpcService& config, Api::Api& api, const StatNames& stat_names) : dispatcher_(dispatcher), tls_(tls), stat_prefix_(config.google_grpc().stat_prefix()), - initial_metadata_(config.initial_metadata()), scope_(scope) { + initial_metadata_(config.initial_metadata()), scope_(scope), + per_stream_buffer_limit_bytes_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config.google_grpc(), per_stream_buffer_limit_bytes, DefaultBufferLimitBytes)) { // We rebuild the channel each time we construct the channel. It appears that the gRPC library is // smart enough to do connection pooling and reuse with identical channel args, so this should // have comparable overhead to what we are doing in Grpc::AsyncClientImpl, i.e. no expensive @@ -211,6 +216,7 @@ void GoogleAsyncStreamImpl::sendMessageRaw(Buffer::InstancePtr&& request, bool e write_pending_queue_.emplace(std::move(request), end_stream); ENVOY_LOG(trace, "Queued message to write ({} bytes)", write_pending_queue_.back().buf_.value().Length()); + bytes_in_write_pending_queue_ += write_pending_queue_.back().buf_.value().Length(); writeQueued(); } @@ -313,6 +319,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo case GoogleAsyncTag::Operation::Write: { ASSERT(ok); write_pending_ = false; + bytes_in_write_pending_queue_ -= write_pending_queue_.front().buf_.value().Length(); write_pending_queue_.pop(); writeQueued(); break; diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index a29d8ebff6f0..a23a3791f6f2 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -177,6 +177,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable initial_metadata_; Stats::ScopeSharedPtr scope_; GoogleAsyncClientStats stats_; + uint64_t per_stream_buffer_limit_bytes_; friend class GoogleAsyncClientThreadLocal; friend class GoogleAsyncRequestImpl; @@ -212,8 +214,12 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, void sendMessageRaw(Buffer::InstancePtr&& request, bool end_stream) override; void closeStream() override; void resetStream() override; - // The GoogleAsyncClientImpl doesn't do Envoy watermark based flow control. - bool isAboveWriteBufferHighWatermark() const override { return false; } + // While the Google-gRPC code doesn't use Envoy watermark buffers, the logical + // analog is to make sure that the aren't too many bytes in the pending write + // queue. + bool isAboveWriteBufferHighWatermark() const override { + return bytes_in_write_pending_queue_ > parent_.perStreamBufferLimitBytes(); + } protected: bool callFailed() const { return call_failed_; } @@ -274,6 +280,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, grpc::ClientContext ctxt_; std::unique_ptr rw_; std::queue write_pending_queue_; + uint64_t bytes_in_write_pending_queue_{}; grpc::ByteBuffer read_buf_; grpc::Status status_; // Has Operation::Init completed? diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 0a03fed9bebc..b7b03c4f1e06 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -59,7 +59,9 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.connection_header_sanitization", "envoy.reloadable_features.strict_authority_validation", "envoy.reloadable_features.reject_unsupported_transfer_encodings", + // Begin alphabetically sorted section. "envoy.deprecated_features.allow_deprecated_extension_names", + "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.fixed_connection_close", diff --git a/source/extensions/access_loggers/grpc/BUILD b/source/extensions/access_loggers/grpc/BUILD index f487c0d3688a..3cf198c4d0fc 100644 --- a/source/extensions/access_loggers/grpc/BUILD +++ b/source/extensions/access_loggers/grpc/BUILD @@ -36,6 +36,7 @@ envoy_cc_library( "//include/envoy/upstream:upstream_interface", "//source/common/grpc:async_client_lib", "//source/common/grpc:typed_async_client_lib", + "//source/common/runtime:runtime_features_lib", "//source/extensions/access_loggers/common:access_log_base", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index 6b980a358677..d9295a17f29c 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -6,6 +6,7 @@ #include "common/common/assert.h" #include "common/network/utility.h" +#include "common/runtime/runtime_features.h" #include "common/stream_info/utility.h" namespace Envoy { @@ -51,8 +52,12 @@ bool GrpcAccessLoggerImpl::canLogMore() { stats_.logs_written_.inc(); return true; } - stats_.logs_dropped_.inc(); - return false; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.disallow_unbounded_access_logs")) { + stats_.logs_dropped_.inc(); + return false; + } + stats_.logs_written_.inc(); + return true; } void GrpcAccessLoggerImpl::log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) { diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index cd9749cb63c1..86066f8bc66a 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -17,6 +17,7 @@ using testing::_; using testing::Eq; +using testing::NiceMock; using testing::Return; namespace Envoy { @@ -54,15 +55,18 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")), stat_names_(scope_->symbolTable()) { - envoy::config::core::v3::GrpcService config; - auto* google_grpc = config.mutable_google_grpc(); + auto* google_grpc = config_.mutable_google_grpc(); google_grpc->set_target_uri("fake_address"); google_grpc->set_stat_prefix("test_cluster"); tls_ = std::make_unique(*api_); + } + + virtual void initialize() { grpc_client_ = std::make_unique(*dispatcher_, *tls_, stub_factory_, - scope_, config, *api_, stat_names_); + scope_, config_, *api_, stat_names_); } + envoy::config::core::v3::GrpcService config_; DangerousDeprecatedTestTime test_time_; Stats::IsolatedStoreImpl* stats_store_; // Ownership transferred to scope_. Api::ApiPtr api_; @@ -78,6 +82,8 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { + initialize(); + EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); MockAsyncStreamCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); @@ -91,6 +97,8 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { + initialize(); + EXPECT_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillOnce(Return(nullptr)); MockAsyncRequestCallbacks grpc_callbacks; EXPECT_CALL(grpc_callbacks, onCreateInitialMetadata(_)); @@ -114,6 +122,39 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, RequestHttpStartFail) { EXPECT_TRUE(grpc_request == nullptr); } +class EnvoyGoogleLessMockedAsyncClientImplTest : public EnvoyGoogleAsyncClientImplTest { +public: + void initialize() override { + grpc_client_ = std::make_unique(*dispatcher_, *tls_, real_stub_factory_, + scope_, config_, *api_, stat_names_); + } + + GoogleGenericStubFactory real_stub_factory_; +}; + +TEST_F(EnvoyGoogleLessMockedAsyncClientImplTest, TestOverflow) { + // Set an (unreasonably) low byte limit. + auto* google_grpc = config_.mutable_google_grpc(); + google_grpc->mutable_per_stream_buffer_limit_bytes()->set_value(1); + initialize(); + + NiceMock> grpc_callbacks; + AsyncStream grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::RequestOptions()); + EXPECT_FALSE(grpc_stream == nullptr); + EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark()); + + // With no data in the message, it won't back up. + helloworld::HelloRequest request_msg; + grpc_stream->sendMessage(request_msg, false); + EXPECT_FALSE(grpc_stream->isAboveWriteBufferHighWatermark()); + + // With actual data we pass the very small byte limit. + request_msg.set_name("bob"); + grpc_stream->sendMessage(request_msg, false); + EXPECT_TRUE(grpc_stream->isAboveWriteBufferHighWatermark()); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index 934921e13a1e..570f723c6c2f 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -23,6 +23,7 @@ envoy_extension_cc_test( "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 7dfcbad143ad..5747ed71b3ed 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -16,8 +16,10 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/thread_local/mocks.h" +#include "test/test_common/test_runtime.h" using testing::_; +using testing::AnyNumber; using testing::InSequence; using testing::Invoke; using testing::NiceMock; @@ -198,6 +200,47 @@ TEST_F(GrpcAccessLoggerImplTest, WatermarksOverrun) { TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); } +// Test legacy behavior of unbounded access logs. +TEST_F(GrpcAccessLoggerImplTest, WatermarksLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.disallow_unbounded_access_logs", "false"}}); + + InSequence s; + initLogger(FlushInterval, 1); + + // Start a stream for the first log. + MockAccessLogStream stream; + AccessLogCallbacks* callbacks; + expectStreamStart(stream, &callbacks); + EXPECT_CALL(local_info_, node()); + + EXPECT_CALL(stream, isAboveWriteBufferHighWatermark()) + .Times(AnyNumber()) + .WillRepeatedly(Return(true)); + + // Fail to flush, so the log stays buffered up. + envoy::data::accesslog::v3::HTTPAccessLogEntry entry; + entry.mutable_request()->set_path("/test/path1"); + EXPECT_CALL(stream, sendMessageRaw_(_, false)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 1, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); + + // As with the above test, try to log more. The log will not be dropped. + EXPECT_CALL(stream, sendMessageRaw_(_, _)).Times(0); + logger_->log(envoy::data::accesslog::v3::HTTPAccessLogEntry(entry)); + EXPECT_EQ( + 2, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_written")->value()); + EXPECT_EQ( + 0, + TestUtility::findCounter(stats_store_, "access_logs.grpc_access_log.logs_dropped")->value()); +} // Test that stream failure is handled correctly. TEST_F(GrpcAccessLoggerImplTest, StreamFailure) { InSequence s; From fcc8791d982917d1f0a1fd1bbaff82395e898ca1 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 12 May 2020 13:15:42 -0400 Subject: [PATCH 142/909] tools: remove V3_RESTRICTED_PATHS from validate_structure.py. (#11145) Legacy artifact from v3alpha -> v3. Signed-off-by: Harvey Tuch --- tools/api/validate_structure.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tools/api/validate_structure.py b/tools/api/validate_structure.py index 3b59af202136..05cfe0cff502 100755 --- a/tools/api/validate_structure.py +++ b/tools/api/validate_structure.py @@ -17,12 +17,6 @@ 'config/common/tap', ] -# These are trees that allow v3+ protos, but only a strict whitelist. -V3_RESTRICTED_PATHS = { - 'config/accesslog/v3': ['accesslog.proto'], - 'service/discovery/v3': ['ads.proto', 'discovery.proto'], -} - # These are the only legacy trees that we permit not to terminate with a versioned suffix. VERSIONLESS_PATHS = [ 'annotations', @@ -71,13 +65,6 @@ def ValidateProtoPath(proto_path): if str(proto_path).startswith(p): raise ValidationError('v3+ protos are not allowed in %s' % p) - # Validate v3 restricted paths. - for p in V3_RESTRICTED_PATHS: - if str(proto_path).startswith(p): - allowed_files = V3_RESTRICTED_PATHS[p] - if proto_path.name not in allowed_files: - raise ValidationError('Only %s allowed in %s' % (allowed_files, p)) - # Validate a list of proto paths. def ValidateProtoPaths(proto_paths): From 520e0c7050564ac7886129a87290e9e433470efd Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 12 May 2020 12:03:23 -0700 Subject: [PATCH 143/909] compression: create a decompressor extensibility point and move gzip decompressor (#10744) creates decompressors as an extension point and moves the zlib based gzip decompressor. Signed-off-by: Jose Nino --- CODEOWNERS | 3 + api/BUILD | 1 + .../compression/gzip/compressor/v3/gzip.proto | 2 +- .../compression/gzip/decompressor/v3/BUILD | 9 +++ .../gzip/decompressor/v3/gzip.proto | 30 ++++++++++ api/versioning/BUILD | 1 + .../compression/gzip/compressor/v3/gzip.proto | 2 +- .../compression/gzip/decompressor/v3/BUILD | 9 +++ .../gzip/decompressor/v3/gzip.proto | 30 ++++++++++ include/envoy/compression/compressor/config.h | 2 + include/envoy/compression/decompressor/BUILD | 35 +++++++++++ .../envoy/compression/decompressor/config.h | 24 ++++++++ .../decompressor/decompressor.h | 4 ++ .../envoy/compression/decompressor/factory.h | 25 ++++++++ source/common/common/zlib/base.h | 1 + source/common/decompressor/BUILD | 23 -------- .../common/compressor/factory_base.h | 3 +- .../compression/common}/decompressor/BUILD | 6 +- .../common/decompressor/factory_base.h | 43 ++++++++++++++ .../compression/gzip/decompressor/BUILD | 37 ++++++++++++ .../compression/gzip/decompressor/config.cc | 40 +++++++++++++ .../compression/gzip/decompressor/config.h | 59 +++++++++++++++++++ .../decompressor/zlib_decompressor_impl.cc | 8 ++- .../decompressor/zlib_decompressor_impl.h | 12 +++- source/extensions/extensions_build_config.bzl | 1 + test/common/stats/BUILD | 1 - test/extensions/compression/gzip/BUILD | 2 +- .../compression/gzip/compressor_fuzz_test.cc | 2 +- .../compression/gzip}/decompressor/BUILD | 4 +- .../zlib_decompressor_impl_test.cc | 24 +++++--- test/extensions/filters/http/compressor/BUILD | 2 +- .../compressor_filter_integration_test.cc | 4 +- test/extensions/filters/http/gzip/BUILD | 4 +- .../http/gzip/gzip_filter_integration_test.cc | 4 +- .../filters/http/gzip/gzip_filter_test.cc | 4 +- tools/spelling/spelling_dictionary.txt | 1 + 36 files changed, 405 insertions(+), 57 deletions(-) create mode 100644 api/envoy/extensions/compression/gzip/decompressor/v3/BUILD create mode 100644 api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto create mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto create mode 100644 include/envoy/compression/decompressor/BUILD create mode 100644 include/envoy/compression/decompressor/config.h rename include/envoy/{ => compression}/decompressor/decompressor.h (84%) create mode 100644 include/envoy/compression/decompressor/factory.h delete mode 100644 source/common/decompressor/BUILD rename {include/envoy => source/extensions/compression/common}/decompressor/BUILD (53%) create mode 100644 source/extensions/compression/common/decompressor/factory_base.h create mode 100644 source/extensions/compression/gzip/decompressor/BUILD create mode 100644 source/extensions/compression/gzip/decompressor/config.cc create mode 100644 source/extensions/compression/gzip/decompressor/config.h rename source/{common => extensions/compression/gzip}/decompressor/zlib_decompressor_impl.cc (91%) rename source/{common => extensions/compression/gzip}/decompressor/zlib_decompressor_impl.h (84%) rename test/{common => extensions/compression/gzip}/decompressor/BUILD (76%) rename test/{common => extensions/compression/gzip}/decompressor/zlib_decompressor_impl_test.cc (92%) diff --git a/CODEOWNERS b/CODEOWNERS index b82652d16143..9a7d0f06b94a 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -114,3 +114,6 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/network/local_ratelimit @mattklein123 @junr03 /*/extensions/filters/http/aws_request_signing @rgs1 @derekargueta @mattklein123 @marcomagdy /*/extensions/filters/http/aws_lambda @mattklein123 @marcomagdy @lavignes +# Compression +/*/extensions/compression/common @junr03 @rojkov +/*/extensions/compression/gzip @junr03 @rojkov diff --git a/api/BUILD b/api/BUILD index 2472352ab2bd..f5bd8c1a8d0b 100644 --- a/api/BUILD +++ b/api/BUILD @@ -162,6 +162,7 @@ proto_library( "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/compression/gzip/compressor/v3:pkg", + "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", diff --git a/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto index 7508e17df221..d4d60eaa43ee 100644 --- a/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto +++ b/api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -13,7 +13,7 @@ option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Gzip] +// [#protodoc-title: Gzip Compressor] // [#extension: envoy.compression.gzip.compressor] // [#next-free-field: 6] diff --git a/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/compression/gzip/decompressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto new file mode 100644 index 000000000000..097531ab1e9f --- /dev/null +++ b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.decompressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Decompressor] +// [#extension: envoy.compression.gzip.decompressor] + +message Gzip { + // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. + // The decompression window size needs to be equal or larger than the compression window size. + // The default is 15 per zlib's manual. For more details about this parameter, please refer to + // zlib manual > inflateInit2. + google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for zlib's decompressor output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. + google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 4d768d09a015..e23f851b221e 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -45,6 +45,7 @@ proto_library( "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/compression/gzip/compressor/v3:pkg", + "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto index 7508e17df221..d4d60eaa43ee 100644 --- a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto @@ -13,7 +13,7 @@ option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Gzip] +// [#protodoc-title: Gzip Compressor] // [#extension: envoy.compression.gzip.compressor] // [#next-free-field: 6] diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto new file mode 100644 index 000000000000..097531ab1e9f --- /dev/null +++ b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.extensions.compression.gzip.decompressor.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; +option java_outer_classname = "GzipProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Gzip Decompressor] +// [#extension: envoy.compression.gzip.decompressor] + +message Gzip { + // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. + // The decompression window size needs to be equal or larger than the compression window size. + // The default is 15 per zlib's manual. For more details about this parameter, please refer to + // zlib manual > inflateInit2. + google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; + + // Value for zlib's decompressor output buffer. If not set, defaults to 4096. + // See https://www.zlib.net/manual.html for more details. + google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; +} diff --git a/include/envoy/compression/compressor/config.h b/include/envoy/compression/compressor/config.h index af8f0b9997fa..3ef89c9f0d55 100644 --- a/include/envoy/compression/compressor/config.h +++ b/include/envoy/compression/compressor/config.h @@ -15,6 +15,8 @@ class NamedCompressorLibraryConfigFactory : public Config::TypedFactory { virtual CompressorFactoryPtr createCompressorFactoryFromProto(const Protobuf::Message& config, Server::Configuration::FactoryContext& context) PURE; + + std::string category() const override { return "envoy.compression.compressor"; } }; } // namespace Compressor diff --git a/include/envoy/compression/decompressor/BUILD b/include/envoy/compression/decompressor/BUILD new file mode 100644 index 000000000000..60a8e9cb7eeb --- /dev/null +++ b/include/envoy/compression/decompressor/BUILD @@ -0,0 +1,35 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "decompressor_config_interface", + hdrs = ["config.h"], + deps = [ + ":decompressor_factory_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/server:filter_config_interface", + ], +) + +envoy_cc_library( + name = "decompressor_factory_interface", + hdrs = ["factory.h"], + deps = [ + ":decompressor_interface", + ], +) + +envoy_cc_library( + name = "decompressor_interface", + hdrs = ["decompressor.h"], + deps = [ + "//include/envoy/buffer:buffer_interface", + ], +) diff --git a/include/envoy/compression/decompressor/config.h b/include/envoy/compression/decompressor/config.h new file mode 100644 index 000000000000..15ecd1255d6a --- /dev/null +++ b/include/envoy/compression/decompressor/config.h @@ -0,0 +1,24 @@ +#pragma once + +#include "envoy/compression/decompressor/factory.h" +#include "envoy/config/typed_config.h" +#include "envoy/server/filter_config.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class NamedDecompressorLibraryConfigFactory : public Config::TypedFactory { +public: + ~NamedDecompressorLibraryConfigFactory() override = default; + + virtual DecompressorFactoryPtr + createDecompressorFactoryFromProto(const Protobuf::Message& config, + Server::Configuration::FactoryContext& context) PURE; + + std::string category() const override { return "envoy.compression.decompressor"; } +}; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/decompressor/decompressor.h b/include/envoy/compression/decompressor/decompressor.h similarity index 84% rename from include/envoy/decompressor/decompressor.h rename to include/envoy/compression/decompressor/decompressor.h index d694aa50ca1c..c0518a5789b3 100644 --- a/include/envoy/decompressor/decompressor.h +++ b/include/envoy/compression/decompressor/decompressor.h @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" namespace Envoy { +namespace Compression { namespace Decompressor { /** @@ -21,5 +22,8 @@ class Decompressor { Buffer::Instance& output_buffer) PURE; }; +using DecompressorPtr = std::unique_ptr; + } // namespace Decompressor +} // namespace Compression } // namespace Envoy diff --git a/include/envoy/compression/decompressor/factory.h b/include/envoy/compression/decompressor/factory.h new file mode 100644 index 000000000000..e0a38713b42f --- /dev/null +++ b/include/envoy/compression/decompressor/factory.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/compression/decompressor/decompressor.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class DecompressorFactory { +public: + virtual ~DecompressorFactory() = default; + + virtual DecompressorPtr createDecompressor() PURE; + virtual const std::string& statsPrefix() const PURE; + // TODO(junr03): this method assumes that decompressors are used on http messages. + // A more generic method might be `hint()` which gives the user of the decompressor a hint about + // the type of decompression that it can perform. + virtual const std::string& contentEncoding() const PURE; +}; + +using DecompressorFactoryPtr = std::unique_ptr; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy \ No newline at end of file diff --git a/source/common/common/zlib/base.h b/source/common/common/zlib/base.h index 4f427fb90985..f8b89cb25335 100644 --- a/source/common/common/zlib/base.h +++ b/source/common/common/zlib/base.h @@ -12,6 +12,7 @@ namespace Zlib { /** * Shared code between the compressor and the decompressor. */ +// TODO(junr03): move to extensions tree once the compressor side is moved to extensions. class Base { public: Base(uint64_t chunk_size, std::function zstream_deleter); diff --git a/source/common/decompressor/BUILD b/source/common/decompressor/BUILD deleted file mode 100644 index dfdf8f9b90ed..000000000000 --- a/source/common/decompressor/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -envoy_package() - -envoy_cc_library( - name = "decompressor_lib", - srcs = ["zlib_decompressor_impl.cc"], - hdrs = ["zlib_decompressor_impl.h"], - external_deps = ["zlib"], - deps = [ - "//include/envoy/decompressor:decompressor_interface", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - "//source/common/common:zlib_base_lib", - ], -) diff --git a/source/extensions/compression/common/compressor/factory_base.h b/source/extensions/compression/common/compressor/factory_base.h index fe2ddefb9c32..472d754cb9cd 100644 --- a/source/extensions/compression/common/compressor/factory_base.h +++ b/source/extensions/compression/common/compressor/factory_base.h @@ -26,8 +26,6 @@ class CompressorLibraryFactoryBase return std::make_unique(); } - std::string category() const override { return "envoy.compression.compressor"; } - std::string name() const override { return name_; } protected: @@ -36,6 +34,7 @@ class CompressorLibraryFactoryBase private: virtual Envoy::Compression::Compressor::CompressorFactoryPtr createCompressorFactoryFromProtoTyped(const ConfigProto&) PURE; + const std::string name_; }; diff --git a/include/envoy/decompressor/BUILD b/source/extensions/compression/common/decompressor/BUILD similarity index 53% rename from include/envoy/decompressor/BUILD rename to source/extensions/compression/common/decompressor/BUILD index 4dbcfec52980..3b3e96b3e980 100644 --- a/include/envoy/decompressor/BUILD +++ b/source/extensions/compression/common/decompressor/BUILD @@ -9,9 +9,9 @@ load( envoy_package() envoy_cc_library( - name = "decompressor_interface", - hdrs = ["decompressor.h"], + name = "decompressor_factory_base_lib", + hdrs = ["factory_base.h"], deps = [ - "//include/envoy/buffer:buffer_interface", + "//include/envoy/compression/decompressor:decompressor_config_interface", ], ) diff --git a/source/extensions/compression/common/decompressor/factory_base.h b/source/extensions/compression/common/decompressor/factory_base.h new file mode 100644 index 000000000000..98144e02e1b6 --- /dev/null +++ b/source/extensions/compression/common/decompressor/factory_base.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Common { +namespace Decompressor { + +template +class DecompressorLibraryFactoryBase + : public Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory { +public: + Envoy::Compression::Decompressor::DecompressorFactoryPtr + createDecompressorFactoryFromProto(const Protobuf::Message& proto_config, + Server::Configuration::FactoryContext& context) override { + return createDecompressorFactoryFromProtoTyped( + MessageUtil::downcastAndValidate(proto_config, + context.messageValidationVisitor())); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return name_; } + +protected: + DecompressorLibraryFactoryBase(const std::string& name) : name_(name) {} + +private: + virtual Envoy::Compression::Decompressor::DecompressorFactoryPtr + createDecompressorFactoryFromProtoTyped(const ConfigProto&) PURE; + + const std::string name_; +}; + +} // namespace Decompressor +} // namespace Common +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD new file mode 100644 index 000000000000..bfb693b8ac64 --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -0,0 +1,37 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "zlib_decompressor_impl_lib", + srcs = ["zlib_decompressor_impl.cc"], + hdrs = ["zlib_decompressor_impl.h"], + external_deps = ["zlib"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:zlib_base_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream", + deps = [ + ":zlib_decompressor_impl_lib", + "//source/common/http:headers_lib", + "//source/extensions/compression/common/decompressor:decompressor_factory_base_lib", + "@envoy_api//envoy/extensions/compression/gzip/decompressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/compression/gzip/decompressor/config.cc b/source/extensions/compression/gzip/decompressor/config.cc new file mode 100644 index 000000000000..8dc898be7060 --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/config.cc @@ -0,0 +1,40 @@ +#include "extensions/compression/gzip/decompressor/config.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Decompressor { + +namespace { +const uint32_t DefaultWindowBits = 12; +const uint32_t DefaultChunkSize = 4096; +} // namespace + +GzipDecompressorFactory::GzipDecompressorFactory( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip) + : window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits)), + chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} + +Envoy::Compression::Decompressor::DecompressorPtr GzipDecompressorFactory::createDecompressor() { + auto decompressor = std::make_unique(chunk_size_); + decompressor->init(window_bits_); + return decompressor; +} + +Envoy::Compression::Decompressor::DecompressorFactoryPtr +GzipDecompressorLibraryFactory::createDecompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config) { + return std::make_unique(proto_config); +} + +/** + * Static registration for the gzip decompressor. @see NamedDecompressorLibraryConfigFactory. + */ +REGISTER_FACTORY(GzipDecompressorLibraryFactory, + Envoy::Compression::Decompressor::NamedDecompressorLibraryConfigFactory); +} // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/compression/gzip/decompressor/config.h b/source/extensions/compression/gzip/decompressor/config.h new file mode 100644 index 000000000000..9a99398b23a4 --- /dev/null +++ b/source/extensions/compression/gzip/decompressor/config.h @@ -0,0 +1,59 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.h" +#include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.validate.h" + +#include "common/http/headers.h" + +#include "extensions/compression/common/decompressor/factory_base.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { +namespace Decompressor { + +namespace { +const std::string& gzipStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, "gzip."); } +const std::string& gzipExtensionName() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.compression.gzip.decompressor"); +} + +} // namespace + +class GzipDecompressorFactory : public Envoy::Compression::Decompressor::DecompressorFactory { +public: + GzipDecompressorFactory(const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip); + + // Envoy::Compression::Decompressor::DecompressorFactory + Envoy::Compression::Decompressor::DecompressorPtr createDecompressor() override; + const std::string& statsPrefix() const override { return gzipStatsPrefix(); } + const std::string& contentEncoding() const override { + return Http::Headers::get().ContentEncodingValues.Gzip; + } + +private: + const int32_t window_bits_; + const uint32_t chunk_size_; +}; + +class GzipDecompressorLibraryFactory + : public Common::Decompressor::DecompressorLibraryFactoryBase< + envoy::extensions::compression::gzip::decompressor::v3::Gzip> { +public: + GzipDecompressorLibraryFactory() : DecompressorLibraryFactoryBase(gzipExtensionName()) {} + +private: + Envoy::Compression::Decompressor::DecompressorFactoryPtr createDecompressorFactoryFromProtoTyped( + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& config) override; +}; + +DECLARE_FACTORY(GzipDecompressorLibraryFactory); + +} // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions +} // namespace Envoy diff --git a/source/common/decompressor/zlib_decompressor_impl.cc b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc similarity index 91% rename from source/common/decompressor/zlib_decompressor_impl.cc rename to source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc index 55dffc6d3609..4a1ca6251098 100644 --- a/source/common/decompressor/zlib_decompressor_impl.cc +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc @@ -1,4 +1,4 @@ -#include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include @@ -9,6 +9,9 @@ #include "absl/container/fixed_array.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Decompressor { ZlibDecompressorImpl::ZlibDecompressorImpl() : ZlibDecompressorImpl(4096) {} @@ -75,4 +78,7 @@ bool ZlibDecompressorImpl::inflateNext() { } } // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/common/decompressor/zlib_decompressor_impl.h b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h similarity index 84% rename from source/common/decompressor/zlib_decompressor_impl.h rename to source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h index 8d5627fc6c31..77c49e83d56d 100644 --- a/source/common/decompressor/zlib_decompressor_impl.h +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h @@ -1,6 +1,6 @@ #pragma once -#include "envoy/decompressor/decompressor.h" +#include "envoy/compression/decompressor/decompressor.h" #include "common/common/logger.h" #include "common/common/zlib/base.h" @@ -8,13 +8,16 @@ #include "zlib.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Decompressor { /** * Implementation of decompressor's interface. */ class ZlibDecompressorImpl : public Zlib::Base, - public Decompressor, + public Envoy::Compression::Decompressor::Decompressor, public Logger::Loggable { public: ZlibDecompressorImpl(); @@ -37,7 +40,7 @@ class ZlibDecompressorImpl : public Zlib::Base, */ void init(int64_t window_bits); - // Decompressor + // Compression::Decompressor::Decompressor void decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) override; // Flag to track whether error occurred during decompression. @@ -49,4 +52,7 @@ class ZlibDecompressorImpl : public Zlib::Base, }; } // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index d064296d8370..434a74d949ce 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -21,6 +21,7 @@ EXTENSIONS = { # "envoy.compression.gzip.compressor": "//source/extensions/compression/gzip/compressor:config", + "envoy.compression.gzip.decompressor": "//source/extensions/compression/gzip/decompressor:config", # # gRPC Credentials Plugins diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 72e04a9c0815..909e4a4c25f4 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -157,7 +157,6 @@ envoy_cc_fuzz_test( ":stat_test_utility_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", - "//source/common/decompressor:decompressor_lib", "//source/common/stats:symbol_table_lib", "//test/fuzz:utility_lib", ], diff --git a/test/extensions/compression/gzip/BUILD b/test/extensions/compression/gzip/BUILD index 98c9de21486e..772bc17016ad 100644 --- a/test/extensions/compression/gzip/BUILD +++ b/test/extensions/compression/gzip/BUILD @@ -15,7 +15,7 @@ envoy_cc_fuzz_test( deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", - "//source/common/decompressor:decompressor_lib", "//source/extensions/compression/gzip/compressor:compressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", ], ) diff --git a/test/extensions/compression/gzip/compressor_fuzz_test.cc b/test/extensions/compression/gzip/compressor_fuzz_test.cc index b9a194c0c15c..da76007c8989 100644 --- a/test/extensions/compression/gzip/compressor_fuzz_test.cc +++ b/test/extensions/compression/gzip/compressor_fuzz_test.cc @@ -1,8 +1,8 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" -#include "common/decompressor/zlib_decompressor_impl.h" #include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/fuzz/fuzz_runner.h" diff --git a/test/common/decompressor/BUILD b/test/extensions/compression/gzip/decompressor/BUILD similarity index 76% rename from test/common/decompressor/BUILD rename to test/extensions/compression/gzip/decompressor/BUILD index 55703c5b25ab..e40b2953db28 100644 --- a/test/common/decompressor/BUILD +++ b/test/extensions/compression/gzip/decompressor/BUILD @@ -9,13 +9,13 @@ load( envoy_package() envoy_cc_test( - name = "decompressor_test", + name = "zlib_decompressor_impl_test", srcs = ["zlib_decompressor_impl_test.cc"], deps = [ "//source/common/common:assert_lib", "//source/common/common:hex_lib", - "//source/common/decompressor:decompressor_lib", "//source/extensions/compression/gzip/compressor:compressor_lib", + "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc similarity index 92% rename from test/common/decompressor/zlib_decompressor_impl_test.cc rename to test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc index 99e0b43e83f6..87cd0ecf5b1a 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc @@ -1,14 +1,17 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hex.h" -#include "common/decompressor/zlib_decompressor_impl.h" #include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" namespace Envoy { +namespace Extensions { +namespace Compression { +namespace Gzip { namespace Decompressor { namespace { @@ -31,13 +34,13 @@ class ZlibDecompressorImplTest : public testing::Test { for (uint64_t i = 0; i < 30; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compression::Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compression::Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); accumulation_buffer.add(buffer); drainBuffer(buffer); @@ -99,7 +102,7 @@ TEST_F(ZlibDecompressorImplTest, CallingChecksum) { ASSERT_EQ(0, compressor.checksum()); TestUtility::feedBufferWithRandomCharacters(compressor_buffer, 4096); - compressor.compress(compressor_buffer, Compression::Compressor::State::Flush); + compressor.compress(compressor_buffer, Envoy::Compression::Compressor::State::Flush); ASSERT_TRUE(compressor.checksum() > 0); ZlibDecompressorImpl decompressor; @@ -132,14 +135,14 @@ TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compression::Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compression::Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -200,14 +203,14 @@ TEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) { for (uint64_t i = 0; i < 20; ++i) { TestUtility::feedBufferWithRandomCharacters(buffer, default_input_size * i, i); original_text.append(buffer.toString()); - compressor.compress(buffer, Compression::Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); drainBuffer(buffer); } ASSERT_EQ(0, buffer.length()); - compressor.compress(buffer, Compression::Compressor::State::Finish); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); ASSERT_GE(10, buffer.length()); accumulation_buffer.add(buffer); @@ -278,7 +281,7 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, gzip_window_bits, memory_level); - compressor.compress(buffer, Compression::Compressor::State::Flush); + compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); ZlibDecompressorImpl decompressor; @@ -297,4 +300,7 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { } // namespace } // namespace Decompressor +} // namespace Gzip +} // namespace Compression +} // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD index 87a1a23d04f2..1a608f282f0a 100644 --- a/test/extensions/filters/http/compressor/BUILD +++ b/test/extensions/filters/http/compressor/BUILD @@ -32,8 +32,8 @@ envoy_extension_cc_test( ], extension_name = "envoy.filters.http.compressor", deps = [ - "//source/common/decompressor:decompressor_lib", "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/compression/gzip/decompressor:config", "//source/extensions/filters/http/compressor:config", "//test/integration:http_integration_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index c43869f9bc8e..82e7c1cd31f7 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -1,6 +1,6 @@ #include "envoy/event/timer.h" -#include "common/decompressor/zlib_decompressor_impl.h" +#include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" #include "test/integration/http_integration.h" #include "test/test_common/simulated_time_system.h" @@ -96,7 +96,7 @@ class CompressorIntegrationTest : public testing::TestWithParam config_; std::unique_ptr filter_; Buffer::OwnedImpl data_; - Decompressor::ZlibDecompressorImpl decompressor_; + Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_; Buffer::OwnedImpl decompressed_data_; std::string expected_str_; Stats::TestUtil::TestStore stats_; diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index eac6b9880ae6..497e035e2c02 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -504,6 +504,7 @@ dechunked decl decls decompressor +decompressors decrement decrypt dedup From 8e6de64fcb1d8b00aa89a0dc292ac330db7a62a7 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 12 May 2020 12:25:42 -0700 Subject: [PATCH 144/909] aggregate cluster: make route `retry_priority` predicates work with this cluster type. (#11102) This includes `previous_priorities` (the only in-tree predicate currently defined). Fixes: #11021 Signed-off-by: Greg Greenway --- docs/root/version_history/current.rst | 1 + include/envoy/router/router.h | 8 +- include/envoy/upstream/load_balancer.h | 7 +- include/envoy/upstream/retry.h | 21 +++- source/common/router/retry_state_impl.h | 10 +- source/common/router/router.h | 10 +- source/common/upstream/load_balancer_impl.cc | 3 +- source/common/upstream/load_balancer_impl.h | 4 +- source/common/upstream/subset_lb.h | 9 +- source/common/upstream/upstream_impl.h | 2 +- .../extensions/clusters/aggregate/cluster.cc | 63 +++++++++--- .../extensions/clusters/aggregate/cluster.h | 34 +++++-- .../clusters/aggregate/lb_context.h | 8 +- .../previous_priorities.cc | 16 ++-- .../previous_priorities/previous_priorities.h | 9 +- .../upstream/load_balancer_impl_test.cc | 13 ++- test/common/upstream/utility.h | 6 +- test/extensions/clusters/aggregate/BUILD | 1 + .../aggregate/cluster_integration_test.cc | 43 +++++++++ .../clusters/aggregate/cluster_test.cc | 95 ++++++++++++++++--- .../previous_priorities/config_test.cc | 60 +++++++++++- test/mocks/router/mocks.h | 3 +- test/mocks/upstream/load_balancer_context.cc | 2 +- test/mocks/upstream/load_balancer_context.h | 3 +- test/mocks/upstream/mocks.h | 3 +- 25 files changed, 346 insertions(+), 88 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index d294302b0ffb..8e19981456af 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`version_text ` stat that reflects xDS version. diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 890d9cacef07..185ba349d185 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -323,11 +323,13 @@ class RetryState { * Returns a reference to the PriorityLoad that should be used for the next retry. * @param priority_set current priority set. * @param original_priority_load original priority load. + * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc. * @return HealthyAndDegradedLoad that should be used to select a priority for the next retry. */ - virtual const Upstream::HealthyAndDegradedLoad& - priorityLoadForRetry(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) PURE; + virtual const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE; /** * return how many times host selection should be reattempted during host selection. */ diff --git a/include/envoy/upstream/load_balancer.h b/include/envoy/upstream/load_balancer.h index cfddc2b61e0a..031daffc8ad2 100644 --- a/include/envoy/upstream/load_balancer.h +++ b/include/envoy/upstream/load_balancer.h @@ -53,12 +53,13 @@ class LoadBalancerContext { * * @param priority_state current priority state of the cluster being being load balanced. * @param original_priority_load the cached priority load for the cluster being load balanced. + * @param priority_mapping_func see @Upstream::RetryPriority::PriorityMappingFunc. * @return a reference to the priority load data that should be used to select a priority. * */ - virtual const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) PURE; + virtual const HealthyAndDegradedLoad& determinePriorityLoad( + const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) PURE; /** * Called to determine whether we should reperform host selection. The load balancer diff --git a/include/envoy/upstream/retry.h b/include/envoy/upstream/retry.h index 4a7af89201bd..f772d5402917 100644 --- a/include/envoy/upstream/retry.h +++ b/include/envoy/upstream/retry.h @@ -18,18 +18,37 @@ class RetryPriority { public: virtual ~RetryPriority() = default; + /** + * Function that maps a HostDescription to it's effective priority level in a cluster. + * For most cluster types, the mapping is simply `return host.priority()`, but some + * cluster types require more complex mapping. + * @return either the effective priority, or absl::nullopt if the mapping cannot be determined, + * which can happen if the host has been removed from the configurations since it was + * used. + */ + using PriorityMappingFunc = + std::function(const Upstream::HostDescription&)>; + + static absl::optional defaultPriorityMapping(const Upstream::HostDescription& host) { + return host.priority(); + } + /** * Determines what PriorityLoad to use. * * @param priority_set current priority set of cluster. * @param original_priority_load the unmodified HealthAndDegradedLoad. + * @param priority_mapping_func a callback to get the priority of a host that has + * been attempted. This function may only be called on hosts that were + * passed to calls to `onHostAttempted()` on this object. * @return HealthAndDegradedLoad load that should be used for the next retry. Return * original_priority_load if the original load should be used. a pointer to original_priority, * original_degraded_priority if no changes should be made. */ virtual const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) PURE; + const HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) PURE; /** * Called after a host has been attempted but before host selection for the next attempt has diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 79d355cd6499..6fc4f7125095 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -74,13 +74,15 @@ class RetryStateImpl : public RetryState { [&host](auto predicate) { return predicate->shouldSelectAnotherHost(host); }); } - const Upstream::HealthyAndDegradedLoad& - priorityLoadForRetry(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& priorityLoadForRetry( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { if (!retry_priority_) { return original_priority_load; } - return retry_priority_->determinePriorityLoad(priority_set, original_priority_load); + return retry_priority_->determinePriorityLoad(priority_set, original_priority_load, + priority_mapping_func); } uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; } diff --git a/source/common/router/router.h b/source/common/router/router.h index 058e82bdc540..83ba6dee85d3 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -368,15 +368,17 @@ class Filter : Logger::Loggable, return retry_state_->shouldSelectAnotherHost(host); } - const Upstream::HealthyAndDegradedLoad& - determinePriorityLoad(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& determinePriorityLoad( + const Upstream::PrioritySet& priority_set, + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { // We only modify the priority load on retries. if (!is_retry_) { return original_priority_load; } - return retry_state_->priorityLoadForRetry(priority_set, original_priority_load); + return retry_state_->priorityLoadForRetry(priority_set, original_priority_load, + priority_mapping_func); } uint32_t hostSelectionRetryCount() const override { diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 2de5a21d870a..d5ec4fffa8bb 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -323,7 +323,8 @@ void LoadBalancerBase::recalculateLoadInTotalPanic() { std::pair LoadBalancerBase::chooseHostSet(LoadBalancerContext* context) { if (context) { - const auto priority_loads = context->determinePriorityLoad(priority_set_, per_priority_load_); + const auto priority_loads = context->determinePriorityLoad( + priority_set_, per_priority_load_, Upstream::RetryPriority::defaultPriorityMapping); const auto priority_and_source = choosePriority(random_.random(), priority_loads.healthy_priority_load_, diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 4ef1fa0b3dd0..9ef6f9fd8fac 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -147,8 +147,8 @@ class LoadBalancerContextBase : public LoadBalancerContext { const Http::RequestHeaderMap* downstreamHeaders() const override { return nullptr; } const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet&, - const HealthyAndDegradedLoad& original_priority_load) override { + determinePriorityLoad(const PrioritySet&, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc&) override { return original_priority_load; } diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index b46ab624fb67..9d90671d1f94 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -140,10 +140,11 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::LoggabledownstreamHeaders(); } - const HealthyAndDegradedLoad& - determinePriorityLoad(const PrioritySet& priority_set, - const HealthyAndDegradedLoad& original_priority_load) override { - return wrapped_->determinePriorityLoad(priority_set, original_priority_load); + const HealthyAndDegradedLoad& determinePriorityLoad( + const PrioritySet& priority_set, const HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc& priority_mapping_func) override { + return wrapped_->determinePriorityLoad(priority_set, original_priority_load, + priority_mapping_func); } bool shouldSelectAnotherHost(const Host& host) override { return wrapped_->shouldSelectAnotherHost(host); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index f8472ca05dc5..24783b33dd6c 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -170,7 +170,7 @@ class HostImpl : public HostDescriptionImpl, priority), used_(true) { setEdsHealthFlag(health_status); - weight(initial_weight); + HostImpl::weight(initial_weight); } // Upstream::Host diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index 5088c6a24658..965d06a5b9c5 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -4,6 +4,8 @@ #include "envoy/extensions/clusters/aggregate/v3/cluster.pb.h" #include "envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.h" +#include "common/common/assert.h" + namespace Envoy { namespace Extensions { namespace Clusters { @@ -20,10 +22,9 @@ Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, cluster_manager_(cluster_manager), runtime_(runtime), random_(random), tls_(tls.allocateSlot()), clusters_(config.clusters().begin(), config.clusters().end()) {} -PriorityContext +PriorityContextPtr Cluster::linearizePrioritySet(const std::function& skip_predicate) { - Upstream::PrioritySetImpl priority_set; - std::vector> priority_to_cluster; + PriorityContextPtr priority_context = std::make_unique(); uint32_t next_priority_after_linearizing = 0; // Linearize the priority set. e.g. for clusters [C_0, C_1, C_2] referred in aggregate cluster @@ -47,16 +48,21 @@ Cluster::linearizePrioritySet(const std::function& ski uint32_t priority_in_current_cluster = 0; for (const auto& host_set : tlc->prioritySet().hostSetsPerPriority()) { if (!host_set->hosts().empty()) { - priority_set.updateHosts( - next_priority_after_linearizing++, Upstream::HostSetImpl::updateHostsParams(*host_set), + priority_context->priority_set_.updateHosts( + next_priority_after_linearizing, Upstream::HostSetImpl::updateHostsParams(*host_set), host_set->localityWeights(), host_set->hosts(), {}, host_set->overprovisioningFactor()); - priority_to_cluster.emplace_back(std::make_pair(priority_in_current_cluster, tlc)); + priority_context->priority_to_cluster_.emplace_back( + std::make_pair(priority_in_current_cluster, tlc)); + + priority_context->cluster_and_priority_to_linearized_priority_[std::make_pair( + cluster, priority_in_current_cluster)] = next_priority_after_linearizing; + next_priority_after_linearizing++; } priority_in_current_cluster++; } } - return std::make_pair(std::move(priority_set), std::move(priority_to_cluster)); + return priority_context; } void Cluster::startPreInit() { @@ -85,10 +91,11 @@ void Cluster::startPreInit() { void Cluster::refresh(const std::function& skip_predicate) { // Post the priority set to worker threads. tls_->runOnAllThreads([this, skip_predicate, cluster_name = this->info()->name()]() { - PriorityContext priority_set = linearizePrioritySet(skip_predicate); + PriorityContextPtr priority_context = linearizePrioritySet(skip_predicate); Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(cluster_name); ASSERT(cluster != nullptr); - dynamic_cast(cluster->loadBalancer()).refresh(priority_set); + dynamic_cast(cluster->loadBalancer()) + .refresh(std::move(priority_context)); }); } @@ -113,15 +120,41 @@ void Cluster::onClusterRemoval(const std::string& cluster_name) { } } +absl::optional AggregateClusterLoadBalancer::LoadBalancerImpl::hostToLinearizedPriority( + const Upstream::HostDescription& host) const { + auto it = priority_context_.cluster_and_priority_to_linearized_priority_.find( + std::make_pair(host.cluster().name(), host.priority())); + + if (it != priority_context_.cluster_and_priority_to_linearized_priority_.end()) { + return it->second; + } else { + // The HostSet can change due to CDS/EDS updates between retries. + return absl::nullopt; + } +} + Upstream::HostConstSharedPtr AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(Upstream::LoadBalancerContext* context) { + const Upstream::HealthyAndDegradedLoad* priority_loads = nullptr; + if (context != nullptr) { + priority_loads = &context->determinePriorityLoad( + priority_set_, per_priority_load_, + [this](const auto& host) { return hostToLinearizedPriority(host); }); + } else { + priority_loads = &per_priority_load_; + } + const auto priority_pair = - choosePriority(random_.random(), per_priority_load_.healthy_priority_load_, - per_priority_load_.degraded_priority_load_); - AggregateLoadBalancerContext aggregate_context(context, priority_pair.second, - priority_to_cluster_[priority_pair.first].first); - return priority_to_cluster_[priority_pair.first].second->loadBalancer().chooseHost( - &aggregate_context); + choosePriority(random_.random(), priority_loads->healthy_priority_load_, + priority_loads->degraded_priority_load_); + + AggregateLoadBalancerContext aggregate_context( + context, priority_pair.second, + priority_context_.priority_to_cluster_[priority_pair.first].first); + + Upstream::ThreadLocalCluster* cluster = + priority_context_.priority_to_cluster_[priority_pair.first].second; + return cluster->loadBalancer().chooseHost(&aggregate_context); } Upstream::HostConstSharedPtr diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 550d8919346c..296a1bf9d6cd 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -14,8 +14,19 @@ namespace Extensions { namespace Clusters { namespace Aggregate { -using PriorityContext = std::pair>>; +using PriorityToClusterVector = std::vector>; + +// Maps pair(host_cluster_name, host_priority) to the linearized priority of the Aggregate cluster. +using ClusterAndPriorityToLinearizedPriorityMap = + absl::flat_hash_map, uint32_t>; + +struct PriorityContext { + Upstream::PrioritySetImpl priority_set_; + PriorityToClusterVector priority_to_cluster_; + ClusterAndPriorityToLinearizedPriorityMap cluster_and_priority_to_linearized_priority_; +}; + +using PriorityContextPtr = std::unique_ptr; class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbacks { public: @@ -51,7 +62,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac void startPreInit() override; void refresh(const std::function& skip_predicate); - PriorityContext + PriorityContextPtr linearizePrioritySet(const std::function& skip_predicate); }; @@ -75,8 +86,9 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { LoadBalancerImpl(const PriorityContext& priority_context, Upstream::ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) - : Upstream::LoadBalancerBase(priority_context.first, stats, runtime, random, common_config), - priority_to_cluster_(priority_context.second) {} + : Upstream::LoadBalancerBase(priority_context.priority_set_, stats, runtime, random, + common_config), + priority_context_(priority_context) {} // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; @@ -86,8 +98,10 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + absl::optional hostToLinearizedPriority(const Upstream::HostDescription& host) const; + private: - std::vector> priority_to_cluster_; + const PriorityContext& priority_context_; }; using LoadBalancerImplPtr = std::unique_ptr; @@ -97,15 +111,17 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { Runtime::Loader& runtime_; Runtime::RandomGenerator& random_; const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config_; + PriorityContextPtr priority_context_; public: - void refresh(const PriorityContext& priority_context) { - if (!priority_context.first.hostSetsPerPriority().empty()) { - load_balancer_ = std::make_unique(priority_context, stats_, runtime_, + void refresh(PriorityContextPtr priority_context) { + if (!priority_context->priority_set_.hostSetsPerPriority().empty()) { + load_balancer_ = std::make_unique(*priority_context, stats_, runtime_, random_, common_config_); } else { load_balancer_ = nullptr; } + priority_context_ = std::move(priority_context); } }; diff --git a/source/extensions/clusters/aggregate/lb_context.h b/source/extensions/clusters/aggregate/lb_context.h index 83c2325863f0..08aae97f51e7 100644 --- a/source/extensions/clusters/aggregate/lb_context.h +++ b/source/extensions/clusters/aggregate/lb_context.h @@ -37,11 +37,13 @@ class AggregateLoadBalancerContext : public Upstream::LoadBalancerContext { } const Upstream::HealthyAndDegradedLoad& determinePriorityLoad(const Upstream::PrioritySet&, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override { + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const Upstream::RetryPriority::PriorityMappingFunc&) override { // Re-assign load. Set all traffic to the priority and availability selected in aggregate // cluster. - // TODO(yxue): allow determinePriorityLoad to affect the load of top level cluster and verify it - // works with current retry plugin + // + // Note: context_->determinePriorityLoad() was already called and its result handled in + // AggregateClusterLoadBalancer::LoadBalancerImpl::chooseHost(). const size_t priorities = original_priority_load.healthy_priority_load_.get().size(); priority_load_.healthy_priority_load_.get().assign(priorities, 0); priority_load_.degraded_priority_load_.get().assign(priorities, 0); diff --git a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc index 7a1ec35d5263..96dc7c540b25 100644 --- a/source/extensions/retry/priority/previous_priorities/previous_priorities.cc +++ b/source/extensions/retry/priority/previous_priorities/previous_priorities.cc @@ -7,20 +7,24 @@ namespace Priority { const Upstream::HealthyAndDegradedLoad& PreviousPrioritiesRetryPriority::determinePriorityLoad( const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) { + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) { // If we've not seen enough retries to modify the priority load, just // return the original. // If this retry should trigger an update, recalculate the priority load by excluding attempted // priorities. - if (attempted_priorities_.size() < update_frequency_) { + if (attempted_hosts_.size() < update_frequency_) { return original_priority_load; - } else if (attempted_priorities_.size() % update_frequency_ == 0) { + } else if (attempted_hosts_.size() % update_frequency_ == 0) { if (excluded_priorities_.size() < priority_set.hostSetsPerPriority().size()) { excluded_priorities_.resize(priority_set.hostSetsPerPriority().size()); } - for (const auto priority : attempted_priorities_) { - excluded_priorities_[priority] = true; + for (const auto& host : attempted_hosts_) { + absl::optional mapped_host_priority = priority_mapping_func(*host); + if (mapped_host_priority.has_value()) { + excluded_priorities_[mapped_host_priority.value()] = true; + } } if (!adjustForAttemptedPriorities(priority_set)) { @@ -50,7 +54,7 @@ bool PreviousPrioritiesRetryPriority::adjustForAttemptedPriorities( for (auto&& excluded_priority : excluded_priorities_) { excluded_priority = false; } - attempted_priorities_.clear(); + attempted_hosts_.clear(); total_availability = adjustedAvailability(adjusted_per_priority_health, adjusted_per_priority_degraded); } diff --git a/source/extensions/retry/priority/previous_priorities/previous_priorities.h b/source/extensions/retry/priority/previous_priorities/previous_priorities.h index 1723fc49b7c9..05e4f3db37a2 100644 --- a/source/extensions/retry/priority/previous_priorities/previous_priorities.h +++ b/source/extensions/retry/priority/previous_priorities/previous_priorities.h @@ -13,15 +13,16 @@ class PreviousPrioritiesRetryPriority : public Upstream::RetryPriority { public: PreviousPrioritiesRetryPriority(uint32_t update_frequency, uint32_t max_retries) : update_frequency_(update_frequency) { - attempted_priorities_.reserve(max_retries); + attempted_hosts_.reserve(max_retries); } const Upstream::HealthyAndDegradedLoad& determinePriorityLoad(const Upstream::PrioritySet& priority_set, - const Upstream::HealthyAndDegradedLoad& original_priority_load) override; + const Upstream::HealthyAndDegradedLoad& original_priority_load, + const PriorityMappingFunc& priority_mapping_func) override; void onHostAttempted(Upstream::HostDescriptionConstSharedPtr attempted_host) override { - attempted_priorities_.emplace_back(attempted_host->priority()); + attempted_hosts_.emplace_back(attempted_host); } private: @@ -41,7 +42,7 @@ class PreviousPrioritiesRetryPriority : public Upstream::RetryPriority { bool adjustForAttemptedPriorities(const Upstream::PrioritySet& priority_set); const uint32_t update_frequency_; - std::vector attempted_priorities_; + std::vector attempted_hosts_; std::vector excluded_priorities_; Upstream::HealthyAndDegradedLoad per_priority_load_; Upstream::HealthyAvailability per_priority_health_; diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 3ad095de9bc4..ced9ec06ae29 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -128,7 +128,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelection) { HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({100, 0, 0}), Upstream::DegradedLoad({0, 0, 0})}; - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); // Primary and failover are in panic mode. Load distribution is based // on the number of hosts regardless of their health. EXPECT_EQ(50, lb_.percentageLoad(0)); @@ -205,11 +205,10 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionFuzz) { updateHostSet(failover_host_set_, failover_set_hosts, unhealthy_hosts, degraded_hosts); } - EXPECT_CALL(context, determinePriorityLoad(_, _)) + EXPECT_CALL(context, determinePriorityLoad(_, _, _)) .WillRepeatedly( - Invoke([](const auto&, const auto& original_load) -> const HealthyAndDegradedLoad& { - return original_load; - })); + Invoke([](const auto&, const auto& original_load, + const auto&) -> const HealthyAndDegradedLoad& { return original_load; })); for (uint64_t i = 0; i < total_hosts; ++i) { const auto hs = lb_.chooseHostSet(&context); @@ -234,7 +233,7 @@ TEST_P(LoadBalancerBaseTest, PrioritySelectionWithFilter) { HealthyAndDegradedLoad priority_load{Upstream::HealthyLoad({0u, 100u}), Upstream::DegradedLoad({0, 0})}; // return a filter that excludes priority 0 - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); updateHostSet(host_set_, 1 /* num_hosts */, 1 /* num_healthy_hosts */); updateHostSet(failover_host_set_, 1, 1); @@ -1009,7 +1008,7 @@ TEST_P(RoundRobinLoadBalancerTest, HostSelectionWithFilter) { } else { priority_load.healthy_priority_load_ = HealthyLoad({0u, 100u}); } - EXPECT_CALL(context, determinePriorityLoad(_, _)).WillRepeatedly(ReturnRef(priority_load)); + EXPECT_CALL(context, determinePriorityLoad(_, _, _)).WillRepeatedly(ReturnRef(priority_load)); EXPECT_CALL(context, hostSelectionRetryCount()).WillRepeatedly(Return(2)); // Calling chooseHost multiple times always returns host one, since the filter will reject diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index a181985b1c07..da5d7df44a56 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -76,12 +76,12 @@ inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std:: } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, - uint32_t weight = 1) { + uint32_t weight = 1, uint32_t priority = 0) { return HostSharedPtr{ new HostImpl(cluster, "", Network::Utility::resolveUrl(url), nullptr, weight, envoy::config::core::v3::Locality(), - envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN)}; + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), + priority, envoy::config::core::v3::UNKNOWN)}; } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index 815a755de3ef..b5d47f29614f 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -59,6 +59,7 @@ envoy_extension_cc_test( "//source/common/protobuf:utility_lib", "//source/extensions/clusters/aggregate:cluster", "//source/extensions/filters/network/tcp_proxy:config", + "//source/extensions/retry/priority/previous_priorities:config", "//test/common/grpc:grpc_client_integration_lib", "//test/integration:http_integration_lib", "//test/integration:integration_lib", diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 4f6f8b6edb3d..bb1b62683ea2 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -99,6 +99,12 @@ const std::string& config() { prefix: "/cluster2" - route: cluster: aggregate_cluster + retry_policy: + retry_priority: + name: envoy.retry_priorities.previous_priorities + typed_config: + "@type": type.googleapis.com/envoy.config.retry.previous_priorities.PreviousPrioritiesConfig + update_frequency: 1 match: prefix: "/aggregatecluster" domains: "*" @@ -249,5 +255,42 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { cleanupUpstreamAndDownstream(); } +// Test that the PreviousPriorities retry predicate works as expected. It is configured +// in this test to exclude a priority after a single failure, so the first failure +// on cluster_1 results in the retry going to cluster_2. +TEST_P(AggregateIntegrationTest, PreviousPrioritiesRetryPredicate) { + initialize(); + + // Tell Envoy that cluster_2 is here. + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, "42"); + // The '4' includes the fake CDS server and aggregate cluster. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/aggregatecluster"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + {"x-envoy-retry-on", "5xx"}}, + 1024); + waitForNextUpstreamRequest(FirstUpstreamIndex); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); + + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + fake_upstream_connection_.reset(); + waitForNextUpstreamRequest(SecondUpstreamIndex); + upstream_request_->encodeHeaders(default_response_headers_, true); + + response->waitForEndStream(); + EXPECT_TRUE(upstream_request_->complete()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + cleanupUpstreamAndDownstream(); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 1c173b6ac0cc..916fe3df8b10 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -21,24 +21,35 @@ namespace Extensions { namespace Clusters { namespace Aggregate { +namespace { +const std::string primary_name("primary"); +const std::string secondary_name("secondary"); +} // namespace + class AggregateClusterTest : public testing::Test { public: - AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) {} + AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) { + ON_CALL(*primary_info_, name()).WillByDefault(ReturnRef(primary_name)); + ON_CALL(*secondary_info_, name()).WillByDefault(ReturnRef(secondary_name)); + } - Upstream::HostVector setupHostSet(int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { + Upstream::HostVector setupHostSet(Upstream::ClusterInfoConstSharedPtr cluster, int healthy_hosts, + int degraded_hosts, int unhealthy_hosts, uint32_t priority) { Upstream::HostVector hosts; for (int i = 0; i < healthy_hosts; ++i) { - hosts.emplace_back(Upstream::makeTestHost(info_, "tcp://127.0.0.1:80")); + hosts.emplace_back(Upstream::makeTestHost(cluster, "tcp://127.0.0.1:80", 1, priority)); } for (int i = 0; i < degraded_hosts; ++i) { - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.2:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(cluster, "tcp://127.0.0.2:80", 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); hosts.emplace_back(host); } for (int i = 0; i < unhealthy_hosts; ++i) { - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.3:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(cluster, "tcp://127.0.0.3:80", 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); hosts.emplace_back(host); } @@ -47,7 +58,8 @@ class AggregateClusterTest : public testing::Test { } void setupPrimary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { - auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts); + auto hosts = + setupHostSet(primary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority); primary_ps_.updateHosts( priority, Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), @@ -57,7 +69,8 @@ class AggregateClusterTest : public testing::Test { } void setupSecondary(int priority, int healthy_hosts, int degraded_hosts, int unhealthy_hosts) { - auto hosts = setupHostSet(healthy_hosts, degraded_hosts, unhealthy_hosts); + auto hosts = + setupHostSet(secondary_info_, healthy_hosts, degraded_hosts, unhealthy_hosts, priority); secondary_ps_.updateHosts( priority, Upstream::HostSetImpl::partitionHosts(std::make_shared(hosts), @@ -123,7 +136,10 @@ class AggregateClusterTest : public testing::Test { Upstream::LoadBalancerFactorySharedPtr lb_factory_; Upstream::LoadBalancerPtr lb_; Upstream::ClusterStats stats_; - std::shared_ptr info_{new NiceMock()}; + std::shared_ptr primary_info_{ + new NiceMock()}; + std::shared_ptr secondary_info_{ + new NiceMock()}; NiceMock aggregate_cluster_, primary_, secondary_; Upstream::PrioritySetImpl primary_ps_, secondary_ps_; NiceMock primary_load_balancer_, secondary_load_balancer_; @@ -151,7 +167,7 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { // Cluster 2: // Priority 0: 33.3% // Priority 1: 33.3% - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host)); EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr)); @@ -199,7 +215,7 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy. setupPrimary(0, 0, 0, 2); setupPrimary(1, 0, 0, 2); @@ -237,7 +253,7 @@ TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { TEST_F(AggregateClusterTest, ClusterInPanicTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); setupPrimary(0, 1, 0, 4); setupPrimary(1, 1, 0, 4); setupSecondary(0, 1, 0, 4); @@ -310,6 +326,63 @@ TEST_F(AggregateClusterTest, LBContextTest) { EXPECT_EQ(context.upstreamTransportSocketOptions(), nullptr); } +TEST_F(AggregateClusterTest, ContextDeterminePriorityLoad) { + Upstream::MockLoadBalancerContext lb_context; + initialize(default_yaml_config_); + setupPrimary(0, 1, 0, 0); + setupPrimary(1, 1, 0, 0); + setupSecondary(0, 1, 0, 0); + setupSecondary(1, 1, 0, 0); + + const uint32_t invalid_priority = 42; + Upstream::HostSharedPtr host = + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", 1, invalid_priority); + + // The linearized priorities are [P0, P1, S0, S1]. + Upstream::HealthyAndDegradedLoad secondary_priority_1{Upstream::HealthyLoad({0, 0, 0, 100}), + Upstream::DegradedLoad()}; + + // Validate that lb_context->determinePriorityLoad() is called and that the mapping function + // passed in works correctly. + EXPECT_CALL(lb_context, determinePriorityLoad(_, _, _)) + .WillOnce(Invoke([&](const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc& mapping_func) + -> const Upstream::HealthyAndDegradedLoad& { + // This one isn't part of the mapping due to an invalid priority. + EXPECT_FALSE(mapping_func(*host).has_value()); + + // Helper to get a host from the given set and priority + auto host_from_priority = [](Upstream::PrioritySetImpl& ps, + uint32_t priority) -> const Upstream::HostDescription& { + return *(ps.hostSetsPerPriority()[priority]->hosts()[0]); + }; + + EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 0)), absl::optional(0)); + EXPECT_EQ(mapping_func(host_from_priority(primary_ps_, 1)), absl::optional(1)); + EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 0)), absl::optional(2)); + EXPECT_EQ(mapping_func(host_from_priority(secondary_ps_, 1)), absl::optional(3)); + + return secondary_priority_1; + })); + + // Validate that the AggregateLoadBalancerContext is initialized with the weights from + // lb_context->determinePriorityLoad(). + EXPECT_CALL(secondary_load_balancer_, chooseHost(_)) + .WillOnce(Invoke([this, &host]( + Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { + const Upstream::HealthyAndDegradedLoad& adjusted_load = context->determinePriorityLoad( + secondary_ps_, {Upstream::HealthyLoad({100, 0}), Upstream::DegradedLoad()}, nullptr); + + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().size(), 2); + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(0), 0); + EXPECT_EQ(adjusted_load.healthy_priority_load_.get().at(1), 100); + + return host; + })); + + lb_->chooseHost(&lb_context); +} + } // namespace Aggregate } // namespace Clusters } // namespace Extensions diff --git a/test/extensions/retry/priority/previous_priorities/config_test.cc b/test/extensions/retry/priority/previous_priorities/config_test.cc index d715b3c12653..89b00310acfa 100644 --- a/test/extensions/retry/priority/previous_priorities/config_test.cc +++ b/test/extensions/retry/priority/previous_priorities/config_test.cc @@ -49,9 +49,12 @@ class RetryPriorityTest : public testing::Test { } void verifyPriorityLoads(const Upstream::HealthyLoad& expected_healthy_priority_load, - const Upstream::DegradedLoad& expected_degraded_priority_load) { - const auto& priority_loads = - retry_priority_->determinePriorityLoad(priority_set_, original_priority_load_); + const Upstream::DegradedLoad& expected_degraded_priority_load, + absl::optional + priority_mapping_func = absl::nullopt) { + const auto& priority_loads = retry_priority_->determinePriorityLoad( + priority_set_, original_priority_load_, + priority_mapping_func.value_or(Upstream::RetryPriority::defaultPriorityMapping)); // Unwrapping gives a nicer gtest error. ASSERT_EQ(priority_loads.healthy_priority_load_.get(), expected_healthy_priority_load.get()); ASSERT_EQ(priority_loads.degraded_priority_load_.get(), expected_degraded_priority_load.get()); @@ -94,6 +97,57 @@ TEST_F(RetryPriorityTest, DefaultFrequency) { verifyPriorityLoads(original_priority_load, original_degraded_priority_load); } +TEST_F(RetryPriorityTest, PriorityMappingCallback) { + const Upstream::HealthyLoad original_priority_load({100, 0}); + const Upstream::DegradedLoad original_degraded_priority_load({0, 0}); + + initialize(original_priority_load, original_degraded_priority_load); + addHosts(0, 2, 2); + addHosts(1, 2, 2); + + auto host1 = std::make_shared>(); + EXPECT_CALL(*host1, priority()).Times(0); + + auto host2 = std::make_shared>(); + EXPECT_CALL(*host2, priority()).Times(0); + + Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func = + [&](const Upstream::HostDescription& host) -> absl::optional { + if (&host == host1.get()) { + return 0; + } + ASSERT(&host == host2.get()); + return 1; + }; + + const Upstream::HealthyLoad expected_priority_load({0, 100}); + const Upstream::DegradedLoad expected_degraded_priority_load({0, 0}); + + // After attempting a host in P0, P1 should receive all the load. + retry_priority_->onHostAttempted(host1); + verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load, + priority_mapping_func); + + // With a mapping function that doesn't recognize host2, results will remain the same as after + // only trying host1. + retry_priority_->onHostAttempted(host2); + Upstream::RetryPriority::PriorityMappingFunc priority_mapping_func_no_host2 = + [&](const Upstream::HostDescription& host) -> absl::optional { + if (&host == host1.get()) { + return 0; + } + ASSERT(&host == host2.get()); + return absl::nullopt; + }; + verifyPriorityLoads(expected_priority_load, expected_degraded_priority_load, + priority_mapping_func_no_host2); + + // After we've tried host2, we've attempted all priorities and should reset back to the original + // priority load. + verifyPriorityLoads(original_priority_load, original_degraded_priority_load, + priority_mapping_func); +} + // Tests that we handle all hosts being unhealthy in the original priority set. TEST_F(RetryPriorityTest, NoHealthyUpstreams) { const Upstream::HealthyLoad original_priority_load({0, 0, 0}); diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index e17bdc2e1fd3..f7c719442e27 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -150,7 +150,8 @@ class MockRetryState : public RetryState { MOCK_METHOD(void, onHostAttempted, (Upstream::HostDescriptionConstSharedPtr)); MOCK_METHOD(bool, shouldSelectAnotherHost, (const Upstream::Host& host)); MOCK_METHOD(const Upstream::HealthyAndDegradedLoad&, priorityLoadForRetry, - (const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&)); + (const Upstream::PrioritySet&, const Upstream::HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc&)); MOCK_METHOD(uint32_t, hostSelectionMaxAttempts, (), (const)); DoRetryCallback callback_; diff --git a/test/mocks/upstream/load_balancer_context.cc b/test/mocks/upstream/load_balancer_context.cc index 5a424b07b867..21e16847e8c3 100644 --- a/test/mocks/upstream/load_balancer_context.cc +++ b/test/mocks/upstream/load_balancer_context.cc @@ -10,7 +10,7 @@ MockLoadBalancerContext::MockLoadBalancerContext() { // By default, set loads which treat everything as healthy in the first priority. priority_load_.healthy_priority_load_ = HealthyLoad({100}); priority_load_.degraded_priority_load_ = DegradedLoad({0}); - ON_CALL(*this, determinePriorityLoad(_, _)).WillByDefault(ReturnRef(priority_load_)); + ON_CALL(*this, determinePriorityLoad(_, _, _)).WillByDefault(ReturnRef(priority_load_)); } MockLoadBalancerContext::~MockLoadBalancerContext() = default; diff --git a/test/mocks/upstream/load_balancer_context.h b/test/mocks/upstream/load_balancer_context.h index c716543a4086..ef3d46486777 100644 --- a/test/mocks/upstream/load_balancer_context.h +++ b/test/mocks/upstream/load_balancer_context.h @@ -15,7 +15,8 @@ class MockLoadBalancerContext : public LoadBalancerContext { MOCK_METHOD(const Network::Connection*, downstreamConnection, (), (const)); MOCK_METHOD(const Http::RequestHeaderMap*, downstreamHeaders, (), (const)); MOCK_METHOD(const HealthyAndDegradedLoad&, determinePriorityLoad, - (const PrioritySet&, const HealthyAndDegradedLoad&)); + (const PrioritySet&, const HealthyAndDegradedLoad&, + const Upstream::RetryPriority::PriorityMappingFunc&)); MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host&)); MOCK_METHOD(uint32_t, hostSelectionRetryCount, (), (const)); MOCK_METHOD(Network::Socket::OptionsSharedPtr, upstreamSocketOptions, (), (const)); diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 9ee59ff15d20..4551aaa42e26 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -133,7 +133,8 @@ class MockRetryPriority : public RetryPriority { ~MockRetryPriority() override; const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&, - const HealthyAndDegradedLoad&) override { + const HealthyAndDegradedLoad&, + const PriorityMappingFunc&) override { return priority_load_; } From 45726b79a3caee978a727a987723fbe1913b58df Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 12 May 2020 15:50:47 -0400 Subject: [PATCH 145/909] docs: FAQ on benchmarking best practices. (#11140) Includes a bunch of tips from @jmarantz, @oschaaf, @mattklein123. Signed-off-by: Harvey Tuch --- .../disable_circuit_breaking.rst | 2 + docs/root/faq/overview.rst | 1 + .../faq/performance/how_fast_is_envoy.rst | 2 + .../performance/how_to_benchmark_envoy.rst | 83 +++++++++++++++++++ 4 files changed, 88 insertions(+) create mode 100644 docs/root/faq/performance/how_to_benchmark_envoy.rst diff --git a/docs/root/faq/load_balancing/disable_circuit_breaking.rst b/docs/root/faq/load_balancing/disable_circuit_breaking.rst index 00182f8d8407..dfc6180628c9 100644 --- a/docs/root/faq/load_balancing/disable_circuit_breaking.rst +++ b/docs/root/faq/load_balancing/disable_circuit_breaking.rst @@ -1,3 +1,5 @@ +.. _faq_disable_circuit_breaking: + Is there a way to disable circuit breaking? =========================================== diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index d2f21b70a333..3769b53f4766 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -34,6 +34,7 @@ Performance :maxdepth: 2 performance/how_fast_is_envoy + performance/how_to_benchmark_envoy Configuration ------------- diff --git a/docs/root/faq/performance/how_fast_is_envoy.rst b/docs/root/faq/performance/how_fast_is_envoy.rst index 78b1dd4d20bc..f2d7ceadaa91 100644 --- a/docs/root/faq/performance/how_fast_is_envoy.rst +++ b/docs/root/faq/performance/how_fast_is_envoy.rst @@ -1,3 +1,5 @@ +.. _faq_how_fast_is_envoy: + How fast is Envoy? ================== diff --git a/docs/root/faq/performance/how_to_benchmark_envoy.rst b/docs/root/faq/performance/how_to_benchmark_envoy.rst new file mode 100644 index 000000000000..4152cf6d2fa3 --- /dev/null +++ b/docs/root/faq/performance/how_to_benchmark_envoy.rst @@ -0,0 +1,83 @@ +What are best practices for benchmarking Envoy? +=============================================== + +There is :ref:`no single QPS, latency or throughput overhead ` that can +characterize a network proxy such as Envoy. Instead, any measurements need to be contextually aware, +ensuring an apples-to-apples comparison with other systems by configuring and load testing Envoy +appropriately. As a result, we can't provide a canonical benchmark configuration, but instead offer +the following guidance: + +* A release Envoy binary should be used. If building, please ensure that `-c opt` + is used on the Bazel command line. When consuming Envoy point releases, make + sure you are using the latest point release; given the pace of Envoy development + it's not reasonable to pick older versions when making a statement about Envoy + performance. Similarly, if working on a master build, please perform due diligence + and ensure no regressions or performance improvements have landed proximal to your + benchmark work and that your are close to HEAD. + +* The :option:`--concurrency` Envoy CLI flag should be unset (providing one worker thread per + logical core on your machine) or set to match the number of cores/threads made available to other + network proxies in your comparison. + +* Disable :ref:`circuit breaking `. A common issue during benchmarking + is that Envoy's default circuit breaker limits are low, leading to connection and request queuing. + +* Disable :ref:`generate_request_id + `. + +* Disable :ref:`dynamic_stats + `. If you are measuring + the overhead vs. a direct connection, you might want to consider disabling all stats via + :ref:`reject_all `. + +* Ensure that the networking and HTTP filter chains are reflective of comparable features + in the systems that Envoy is being compared with. + +* Ensure that TLS settings (if any) are realistic and that consistent cyphers are used in + any comparison. Session reuse may have a significant impact on results and should be tracked via + :ref:`listener SSL stats `. + +* Ensure that :ref:`HTTP/2 settings `, in + particular those that affect flow control and stream concurrency, are consistent in any + comparison. Ideally taking into account BDP and network link latencies when optimizing any + HTTP/2 settings. + +* Verify in the listener and cluster stats that the number of streams, connections and errors + matches what is expected in any given experiment. + +* Make sure you are aware of how connections created by your load generator are + distributed across Envoy worker threads. This is especially important for + benchmarks that use low connection counts and perfect keep-alive. You should be aware that + Envoy will allocate all streams for a given connection to a single worker thread. This means, + for example, that if you have 72 logical cores and worker threads, but only a single HTTP/2 + connection from your load generator, then only 1 worker thread will be active. + +* Make sure request-release timing expectations line up with what is intended. + Some load generators produce naturally jittery and/or batchy timings. This + might end up being an unintended dominant factor in certain tests. + +* The specifics of how your load generator reuses connections is an important factor (e.g. MRU, + random, LRU, etc.) as this impacts work distribution. + +* If you're trying to measure small (say < 1ms) latencies, make sure the measurement tool and + environment have the required sensitivity and the noise floor is sufficiently low. + +* Be critical of your bootstrap or xDS configuration. Ideally every line has a motivation and is + necessary for the benchmark under consideration. + +* Consider using `Nighthawk `_ as your + load generator and measurement tool. We are committed to building out + benchmarking and latency measurement best practices in this tool. + +* Examine `perf` profiles of Envoy during the benchmark run, e.g. with `flame graphs + `_. Verify that Envoy is spending its time + doing the expected essential work under test, rather than some unrelated or tangential + work. + +* Familiarize yourself with `latency measurement best practices + `_. In particular, never measure latency at + max load, this is not generally meaningful or reflecting of real system performance; aim + to measure below the knee of the QPS-latency curve. Prefer open vs. closed loop load + generators. + +* Avoid `benchmarking crimes `_. From 568e13953774d9d827954fc9103c3d1778adfb11 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 12 May 2020 17:23:09 -0400 Subject: [PATCH 146/909] build: add support for pip3_import from rules_python. (#11108) Provide an Envoy build system styled way of loading arbitrary pip3 packages as py_{library,binary} dependencies. Part of https://github.com/envoyproxy/envoy/issues/11085 (to fetch PyYAML). Risk level: Low (build system) Testing: Manual validation that the test utility loads PyYAML at the correct version (different to my system version). Signed-off-by: Harvey Tuch --- WORKSPACE | 4 +++ bazel/EXTERNAL_DEPS.md | 27 ++++++++++++++++--- bazel/dependency_imports.bzl | 3 +++ bazel/repositories.bzl | 4 +-- bazel/repositories_extra.bzl | 16 +++++++++++ bazel/repository_locations.bzl | 7 +++-- ci/WORKSPACE.filter.example | 4 +++ ci/run_envoy_docker_windows.sh | 2 +- tools/config_validation/BUILD | 10 +++++++ tools/config_validation/requirements.txt | 1 + .../validate_yaml_fragment.py | 3 +++ 11 files changed, 73 insertions(+), 8 deletions(-) create mode 100644 bazel/repositories_extra.bzl create mode 100644 tools/config_validation/BUILD create mode 100644 tools/config_validation/requirements.txt create mode 100644 tools/config_validation/validate_yaml_fragment.py diff --git a/WORKSPACE b/WORKSPACE index ef120bc53d4f..a96cba501302 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -12,6 +12,10 @@ load("//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() +load("//bazel:repositories_extra.bzl", "envoy_dependencies_extra") + +envoy_dependencies_extra() + load("//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 7793129376aa..7eebe1c3ec2b 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -6,7 +6,9 @@ values can change when Github change their tar/gzip libraries breaking builds. Maintainer provided tarballs are more stable and the maintainer can provide the SHA256. -# Adding external dependencies to Envoy (native Bazel) +# Adding external dependencies to Envoy (C++) + +## Native Bazel This is the preferred style of adding dependencies that use Bazel for their build process. @@ -17,7 +19,7 @@ build process. `external_deps` attribute. 3. `bazel test //test/...` -# Adding external dependencies to Envoy (external CMake) +## External CMake (preferred) This is the preferred style of adding dependencies that use CMake for their build system. @@ -29,7 +31,8 @@ This is the preferred style of adding dependencies that use CMake for their buil `external_deps` attribute. 4. `bazel test //test/...` -# Adding external dependencies to Envoy (genrule repository) + +## genrule repository This is the newer style of adding dependencies with no upstream Bazel configs. It wraps the dependency's native build tooling in a Bazel-aware shell script, @@ -54,6 +57,24 @@ Dependencies between external libraries can use the standard Bazel dependency resolution logic, using the `$(location)` shell extension to resolve paths to binaries, libraries, headers, etc. +# Adding external dependencies to Envoy (Python) + +Python dependencies should be added via `pip3` and `rules_python`. The process +is: + +1. Define a `pip3_import()` pointing at your target `requirements.txt` in + [`bazel/repositories_extra.bzl`](repositories_extra.bzl) + +2. Add a `pip_install()` invocation in + [`bazel/dependency_imports.bzl`](dependency_imports.bzl). + +3. Add a `requirements(" Date: Tue, 12 May 2020 18:17:37 -0400 Subject: [PATCH 147/909] util: refactor mechanism used for atomic creation of http code stat-names to a general utility (#11128) Commit Message: Adds a utility class to facilitate making lazy-initialized shared structures across threads, where the lazy-initialization doesn't require a lock except when creation is required. Additional Description: Risk Level: low -- this is a refactor of an existing mechanism. Testing: //test/... Docs Changes: n/a Release Notes: n/a Signed-off-by: Joshua Marantz --- source/common/common/BUILD | 4 +- source/common/common/thread.h | 93 ++++++++++++ source/common/http/codes.cc | 20 +-- source/common/http/codes.h | 8 +- test/common/common/BUILD | 10 ++ test/common/common/lock_guard_test.cc | 14 +- test/common/common/thread_test.cc | 195 ++++++++++++++++++++++++++ 7 files changed, 318 insertions(+), 26 deletions(-) create mode 100644 test/common/common/thread_test.cc diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 9429aa2e24c6..d0b4346d6aa9 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -279,7 +279,9 @@ envoy_cc_library( name = "thread_lib", hdrs = ["thread.h"], external_deps = ["abseil_synchronization"], - deps = envoy_cc_platform_dep("thread_impl_lib"), + deps = envoy_cc_platform_dep("thread_impl_lib") + [ + ":non_copyable", + ], ) envoy_cc_posix_library( diff --git a/source/common/common/thread.h b/source/common/common/thread.h index cfd40c8a4083..bbad9fee6913 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -1,10 +1,14 @@ #pragma once +#include +#include #include #include #include "envoy/thread/thread.h" +#include "common/common/non_copyable.h" + #include "absl/synchronization/mutex.h" namespace Envoy { @@ -75,5 +79,94 @@ class CondVar { absl::CondVar condvar_; }; +enum class AtomicPtrAllocMode { DoNotDelete, DeleteOnDestruct }; + +// Manages an array of atomic pointers to T, providing a relatively +// contention-free mechanism to lazily get a T* at an index, where the caller +// provides a mechanism to instantiate a T* under lock, if one has not already +// been stored at that index. +// +// alloc_mode controls whether allocated T* entries should be deleted on +// destruction of the array. This should be set to AtomicPtrAllocMode::DoNotDelete +// if the T* returned from MakeObject are managed by the caller. +template +class AtomicPtrArray : NonCopyable { +public: + AtomicPtrArray() { + for (std::atomic& atomic_ref : data_) { + atomic_ref = nullptr; + } + } + + ~AtomicPtrArray() { + if (alloc_mode == AtomicPtrAllocMode::DeleteOnDestruct) { + for (std::atomic& atomic_ref : data_) { + T* ptr = atomic_ref.load(); + if (ptr != nullptr) { + delete ptr; + } + } + } + } + + // User-defined function for allocating an object. This will be called + // under a lock controlled by this class, so MakeObject will not race + // against itself. MakeObject is allowed to return nullptr, in which + // case the next call to get() will call MakeObject again. + using MakeObject = std::function; + + /* + * Returns an already existing T* at index, or calls make_object to + * instantiate and save the T* under lock. + * + * @param index the Index to look up. + * @param make_object function to call under lock to make a T*. + * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr. + */ + T* get(uint32_t index, const MakeObject& make_object) { + std::atomic& atomic_ref = data_[index]; + + // First, use an atomic load to see if the object has already been allocated. + if (atomic_ref.load() == nullptr) { + absl::MutexLock lock(&mutex_); + + // If that fails, check again under lock as two threads might have raced + // to create the object. + if (atomic_ref.load() == nullptr) { + atomic_ref = make_object(); + } + } + return atomic_ref.load(); + } + +private: + std::atomic data_[size]; + absl::Mutex mutex_; +}; + +// Manages a pointer to T, providing a relatively contention-free mechanism to +// lazily create a T*, where the caller provides a mechanism to instantiate a +// T* under lock, if one has not already been stored. +// +// alloc_mode controls whether allocated T* objects should be deleted on +// destruction of the AtomicObject. This should be set to +// AtomicPtrAllocMode::DoNotDelete if the T* returned from MakeObject are managed +// by the caller. +template +class AtomicPtr : private AtomicPtrArray { +public: + using BaseClass = AtomicPtrArray; + using typename BaseClass::MakeObject; + + /* + * Returns an already existing T*, or calls make_object to instantiate and + * save the T* under lock. + * + * @param make_object function to call under lock to make a T*. + * @return The new or already-existing T*, possibly nullptr if make_object returns nullptr. + */ + T* get(const MakeObject& make_object) { return BaseClass::get(0, make_object); } +}; + } // namespace Thread } // namespace Envoy diff --git a/source/common/http/codes.cc b/source/common/http/codes.cc index cf291af32dfb..37273856a1c6 100644 --- a/source/common/http/codes.cc +++ b/source/common/http/codes.cc @@ -34,10 +34,6 @@ CodeStatsImpl::CodeStatsImpl(Stats::SymbolTable& symbol_table) vcluster_(stat_name_pool_.add("vcluster")), vhost_(stat_name_pool_.add("vhost")), zone_(stat_name_pool_.add("zone")) { - for (auto& rc_stat_name : rc_stat_names_) { - rc_stat_name = nullptr; - } - // Pre-allocate response codes 200, 404, and 503, as those seem quite likely. // We don't pre-allocate all the HTTP codes because the first 127 allocations // are likely to be encoded in one byte, and we would rather spend those on @@ -180,18 +176,10 @@ Stats::StatName CodeStatsImpl::upstreamRqStatName(Code response_code) const { if (rc_index >= NumHttpCodes) { return upstream_rq_unknown_; } - std::atomic& atomic_ref = rc_stat_names_[rc_index]; - if (atomic_ref.load() == nullptr) { - absl::MutexLock lock(&mutex_); - - // Check again under lock as two threads might have raced to add a StatName - // for the same code. - if (atomic_ref.load() == nullptr) { - atomic_ref = stat_name_pool_.addReturningStorage( - absl::StrCat("upstream_rq_", enumToInt(response_code))); - } - } - return Stats::StatName(atomic_ref.load()); + return Stats::StatName(rc_stat_names_.get(rc_index, [this, response_code]() -> const uint8_t* { + return stat_name_pool_.addReturningStorage( + absl::StrCat("upstream_rq_", enumToInt(response_code))); + })); } std::string CodeUtility::groupStringForResponseCode(Code response_code) { diff --git a/source/common/http/codes.h b/source/common/http/codes.h index dcfa4e37df50..3957377aa2c2 100644 --- a/source/common/http/codes.h +++ b/source/common/http/codes.h @@ -8,6 +8,7 @@ #include "envoy/http/header_map.h" #include "envoy/stats/scope.h" +#include "common/common/thread.h" #include "common/stats/symbol_table_impl.h" namespace Envoy { @@ -62,8 +63,7 @@ class CodeStatsImpl : public CodeStats { Stats::StatName upstreamRqGroup(Code response_code) const; Stats::StatName upstreamRqStatName(Code response_code) const; - mutable Stats::StatNamePool stat_name_pool_ ABSL_GUARDED_BY(mutex_); - mutable absl::Mutex mutex_; + mutable Stats::StatNamePool stat_name_pool_; Stats::SymbolTable& symbol_table_; const Stats::StatName canary_; @@ -108,7 +108,9 @@ class CodeStatsImpl : public CodeStats { static constexpr uint32_t NumHttpCodes = 500; static constexpr uint32_t HttpCodeOffset = 100; // code 100 is at index 0. - mutable std::atomic rc_stat_names_[NumHttpCodes]; + mutable Thread::AtomicPtrArray + rc_stat_names_; }; /** diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 288afab64682..a32421843112 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -247,6 +247,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "thread_test", + srcs = ["thread_test.cc"], + deps = [ + "//source/common/common:thread_lib", + "//source/common/common:thread_synchronizer_lib", + "//test/test_common:thread_factory_for_test_lib", + ], +) + envoy_cc_test( name = "stl_helpers_test", srcs = ["stl_helpers_test.cc"], diff --git a/test/common/common/lock_guard_test.cc b/test/common/common/lock_guard_test.cc index 8cc8eb8b8355..01b677c8dc48 100644 --- a/test/common/common/lock_guard_test.cc +++ b/test/common/common/lock_guard_test.cc @@ -5,32 +5,33 @@ namespace Envoy { namespace Thread { +namespace { -class ThreadTest : public testing::Test { +class LockGuardTest : public testing::Test { protected: - ThreadTest() = default; + LockGuardTest() = default; int a_ ABSL_GUARDED_BY(a_mutex_){0}; MutexBasicLockable a_mutex_; int b_{0}; }; -TEST_F(ThreadTest, TestLockGuard) { +TEST_F(LockGuardTest, TestLockGuard) { LockGuard lock(a_mutex_); EXPECT_EQ(1, ++a_); } -TEST_F(ThreadTest, TestOptionalLockGuard) { +TEST_F(LockGuardTest, TestOptionalLockGuard) { OptionalLockGuard lock(nullptr); EXPECT_EQ(1, ++b_); } -TEST_F(ThreadTest, TestReleasableLockGuard) { +TEST_F(LockGuardTest, TestReleasableLockGuard) { ReleasableLockGuard lock(a_mutex_); EXPECT_EQ(1, ++a_); lock.release(); } -TEST_F(ThreadTest, TestTryLockGuard) { +TEST_F(LockGuardTest, TestTryLockGuard) { TryLockGuard lock(a_mutex_); if (lock.tryLock()) { @@ -44,5 +45,6 @@ TEST_F(ThreadTest, TestTryLockGuard) { } } +} // namespace } // namespace Thread } // namespace Envoy diff --git a/test/common/common/thread_test.cc b/test/common/common/thread_test.cc new file mode 100644 index 000000000000..431d4be38f32 --- /dev/null +++ b/test/common/common/thread_test.cc @@ -0,0 +1,195 @@ +#include + +#include "common/common/thread.h" +#include "common/common/thread_synchronizer.h" + +#include "test/test_common/thread_factory_for_test.h" + +#include "absl/synchronization/notification.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Thread { +namespace { + +class ThreadAsyncPtrTest : public testing::Test { +protected: + ThreadFactory& thread_factory_{threadFactoryForTest()}; +}; + +// Tests that two threads racing to create an object have well-defined +// behavior. +TEST_F(ThreadAsyncPtrTest, DeleteOnDestruct) { + AtomicPtr str; + ThreadSynchronizer sync; + sync.enable(); + sync.waitOn("creator"); + + // On thread1, we will lazily instantiate the string as "thread1". However + // in the creation function we will block on a sync-point. + auto thread1 = thread_factory_.createThread([&str, &sync]() { + str.get([&sync]() -> std::string* { + sync.syncPoint("creator"); + return new std::string("thread1"); + }); + }); + + sync.barrierOn("creator"); + + // Now spawn a separate thread that will attempt to lazy-initialize the + // string as "thread2", but that allocator will never run because + // the allocator on thread1 has already locked the AtomicPtr's mutex. + auto thread2 = thread_factory_.createThread( + [&str]() { str.get([]() -> std::string* { return new std::string("thread2"); }); }); + + // Now let thread1's initializer finish. + sync.signal("creator"); + thread1->join(); + thread2->join(); + + // Now ensure the "thread1" value sticks past the thread lifetimes. + bool called = false; + EXPECT_EQ("thread1", *str.get([&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); +} + +// Same test as AtomicPtrDeleteOnDestruct, except the allocator callbacks return +// pointers to locals, rather than allocating the strings on the heap. +TEST_F(ThreadAsyncPtrTest, DoNotDelete) { + const std::string thread1_str("thread1"); + const std::string thread2_str("thread2"); + AtomicPtr str; + ThreadSynchronizer sync; + sync.enable(); + sync.waitOn("creator"); + + // On thread1, we will lazily instantiate the string as "thread1". However + // in the creation function we will block on a sync-point. + auto thread1 = thread_factory_.createThread([&str, &sync, &thread1_str]() { + str.get([&sync, &thread1_str]() -> const std::string* { + sync.syncPoint("creator"); + return &thread1_str; + }); + }); + + sync.barrierOn("creator"); + + // Now spawn a separate thread that will attempt to lazy-initialize the + // string as "thread2", but that allocator will never run because + // the allocator on thread1 has already locked the AtomicPtr's mutex. + auto thread2 = thread_factory_.createThread([&str, &thread2_str]() { + str.get([&thread2_str]() -> const std::string* { return &thread2_str; }); + }); + + // Now let thread1's initializer finish. + sync.signal("creator"); + thread1->join(); + thread2->join(); + + // Now ensure the "thread1" value sticks past the thread lifetimes. + bool called = false; + EXPECT_EQ("thread1", *str.get([&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); +} + +TEST_F(ThreadAsyncPtrTest, ThreadSpammer) { + AtomicPtr str; + absl::Notification go; + constexpr uint32_t num_threads = 100; + AtomicPtr answer; + uint32_t calls = 0; + auto thread_fn = [&go, &answer, &calls]() { + go.WaitForNotification(); + answer.get([&calls]() { + ++calls; + return new uint32_t(42); + }); + }; + std::vector threads; + for (uint32_t i = 0; i < num_threads; ++i) { + threads.emplace_back(thread_factory_.createThread(thread_fn)); + } + EXPECT_EQ(0, calls); + go.Notify(); + for (auto& thread : threads) { + thread->join(); + } + EXPECT_EQ(1, calls); + EXPECT_EQ(42, *answer.get([&calls]() { + ++calls; + return nullptr; + })); + EXPECT_EQ(1, calls); +} + +// Tests that null can be allocated, but the allocator will be re-called each +// time until a non-null result is returned. +TEST_F(ThreadAsyncPtrTest, Null) { + AtomicPtr str; + uint32_t calls = 0; + EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(nullptr, str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(2, calls); + EXPECT_EQ("x", *str.get([&calls]() -> std::string* { + ++calls; + return new std::string("x"); + })); + EXPECT_EQ(3, calls); + EXPECT_EQ("x", *str.get([&calls]() -> std::string* { + ++calls; + return nullptr; + })); + EXPECT_EQ(3, calls); // allocator was not called this last time. +} + +// Tests array semantics. Note that AtomicPtr is implemented a 1-element +// AtomicPtrArray, so there's no need to repeat the complex thread-race test +// from AtomicPtr. +TEST_F(ThreadAsyncPtrTest, Array) { + const uint32_t size = 5; + AtomicPtrArray strs; + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + EXPECT_EQ(val, *strs.get(i, [&val]() -> std::string* { return new std::string(val); })); + } + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + // Second time through the array, the allocator will not be called, but + // we'll have all the expected values returned from get. + bool called = false; + EXPECT_EQ(val, *strs.get(i, [&called]() -> std::string* { + called = true; + return nullptr; + })); + EXPECT_FALSE(called); + } +} + +TEST_F(ThreadAsyncPtrTest, ManagedAlloc) { + const uint32_t size = 5; + std::vector> pool; + AtomicPtrArray strs; + for (uint32_t i = 0; i < size; ++i) { + std::string val = absl::StrCat("x", i); + EXPECT_EQ(val, *strs.get(i, [&pool, &val]() -> std::string* { + pool.emplace_back(std::make_unique(val)); + return pool.back().get(); + })); + } +} + +} // namespace +} // namespace Thread +} // namespace Envoy From 593d32bf56ccdb0017cbf4d8c024e7fb3f7ed4e1 Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Tue, 12 May 2020 17:22:08 -0500 Subject: [PATCH 148/909] Fix dbg compilation mode crash on Windows (#11084) - bump abseil dependency that resolves failing assertion on startup in dbg builds - disable lua extension as luajit/moonjit build always links dynamic runtime - disable tests that rely on extension "envoy.filters.http.lua" Fixes https://github.com/envoyproxy/envoy/issues/10877 Signed-off-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia --- bazel/foreign_cc/BUILD | 2 ++ bazel/repositories.bzl | 1 + bazel/repository_locations.bzl | 8 ++++---- test/extensions/filters/common/lua/BUILD | 4 +++- test/extensions/filters/http/lua/BUILD | 7 ++++--- 5 files changed, 14 insertions(+), 8 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 5dc37d92df90..06d8ed05df6b 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -53,6 +53,7 @@ configure_make( "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + tags = ["skip_on_windows"], ) configure_make( @@ -72,6 +73,7 @@ configure_make( "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + tags = ["skip_on_windows"], ) envoy_cmake_external( diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 1539332ec81e..45b596629395 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -8,6 +8,7 @@ load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_languag PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] WINDOWS_SKIP_TARGETS = [ + "envoy.filters.http.lua", "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", "envoy.tracers.datadog", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index a08c2c63c580..812290a35b35 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -94,10 +94,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_google_absl = dict( - sha256 = "14ee08e2089c2a9b6bf27e1d10abc5629c69c4d0bab4b78ec5b65a29ea1c2af7", - strip_prefix = "abseil-cpp-cf3a1998e9d41709d4141e2f13375993cba1130e", - # 2020-03-05 - urls = ["https://github.com/abseil/abseil-cpp/archive/cf3a1998e9d41709d4141e2f13375993cba1130e.tar.gz"], + sha256 = "cd477bfd0d19f803f85d118c7943b7908930310d261752730afa981118fee230", + strip_prefix = "abseil-cpp-ca9856cabc23d771bcce634677650eb6fc4363ae", + # 2020-04-30 + urls = ["https://github.com/abseil/abseil-cpp/archive/ca9856cabc23d771bcce634677650eb6fc4363ae.tar.gz"], use_category = ["dataplane", "controlplane"], cpe = "N/A", ), diff --git a/test/extensions/filters/common/lua/BUILD b/test/extensions/filters/common/lua/BUILD index 17adba2c3145..cbe0ef71b7b7 100644 --- a/test/extensions/filters/common/lua/BUILD +++ b/test/extensions/filters/common/lua/BUILD @@ -12,6 +12,7 @@ envoy_package() envoy_cc_test( name = "lua_test", srcs = ["lua_test.cc"], + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/common/lua:lua_lib", "//test/mocks:common_lib", @@ -23,7 +24,7 @@ envoy_cc_test( envoy_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], - tags = ["fails_on_windows"], + tags = ["skip_on_windows"], deps = [ ":lua_wrappers_lib", "//source/common/buffer:buffer_lib", @@ -38,6 +39,7 @@ envoy_cc_test( envoy_cc_test_library( name = "lua_wrappers_lib", hdrs = ["lua_wrappers.h"], + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/common/lua:lua_lib", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index eba9a4ad2e09..93b555322a18 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -15,7 +15,7 @@ envoy_extension_cc_test( name = "lua_filter_test", srcs = ["lua_filter_test.cc"], extension_name = "envoy.filters.http.lua", - tags = ["fails_on_windows"], + tags = ["skip_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:lua_filter_lib", @@ -34,7 +34,7 @@ envoy_extension_cc_test( name = "wrappers_test", srcs = ["wrappers_test.cc"], extension_name = "envoy.filters.http.lua", - tags = ["fails_on_windows"], + tags = ["skip_on_windows"], deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:wrappers_lib", @@ -49,7 +49,7 @@ envoy_extension_cc_test( name = "lua_integration_test", srcs = ["lua_integration_test.cc"], extension_name = "envoy.filters.http.lua", - tags = ["fails_on_windows"], + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", "//test/integration:http_integration_lib", @@ -63,6 +63,7 @@ envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], extension_name = "envoy.filters.http.lua", + tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", "//test/mocks/server:server_mocks", From c07e5c84437124921df24b001c93802f6bc39c1c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 13 May 2020 08:30:25 -0400 Subject: [PATCH 149/909] connect: docs and cleanup (#11146) Un-hiding CONNECT docs and config, now that it is implemented. Risk Level: low (docs only) Testing: in prior PRs Docs Changes: yes Release Notes: yes Fixes #1630 and #1451 Signed-off-by: Alyssa Wilk --- .../v2/http_connection_manager.proto | 2 +- .../config/route/v3/route_components.proto | 8 +- .../route/v4alpha/route_components.proto | 8 +- .../v3/http_connection_manager.proto | 2 +- .../v4alpha/http_connection_manager.proto | 2 +- .../intro/arch_overview/http/upgrades.rst | 83 +++++++++---------- docs/root/version_history/current.rst | 1 + docs/root/version_history/v1.4.0.rst | 2 +- .../v2/http_connection_manager.proto | 2 +- .../config/route/v3/route_components.proto | 8 +- .../route/v4alpha/route_components.proto | 8 +- .../v3/http_connection_manager.proto | 2 +- .../v4alpha/http_connection_manager.proto | 2 +- test/integration/protocol_integration_test.cc | 2 - 14 files changed, 65 insertions(+), 67 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 56a05eed758b..742e5584befe 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -232,7 +232,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 3f82fbd80fb0..782bacfb95ad 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -393,7 +393,6 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } - // [#not-implemented-hide:] // An extensible message for matching CONNECT requests. message ConnectMatcher { } @@ -427,7 +426,6 @@ message RouteMatch { // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style @@ -435,6 +433,8 @@ message RouteMatch { // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where CONNECT requests may have a path, the path matchers will work if // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; } @@ -721,7 +721,6 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { @@ -738,9 +737,10 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 6e1b1f9f5a0a..8dfa58177bdb 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -394,7 +394,6 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } - // [#not-implemented-hide:] // An extensible message for matching CONNECT requests. message ConnectMatcher { option (udpa.annotations.versioning).previous_message_type = @@ -430,7 +429,6 @@ message RouteMatch { // stripping. This needs more thought.] type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style @@ -438,6 +436,8 @@ message RouteMatch { // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where CONNECT requests may have a path, the path matchers will work if // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; } @@ -724,7 +724,6 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { @@ -744,9 +743,10 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index ac8ab2adbfb6..ff083e29228a 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -231,7 +231,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 8f370b21d8f1..41284b7e1095 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -231,7 +231,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index 36af0fc7b809..a19ed35c695e 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -1,4 +1,4 @@ -.. _arch_overview_websocket: +.. _arch_overview_upgrades: HTTP upgrades =========================== @@ -62,44 +62,43 @@ a GET method on the final Envoy-Upstream hop. Note that the HTTP/2 upgrade path has very strict HTTP/1.1 compliance, so will not proxy WebSocket upgrade requests or responses with bodies. -.. TODO(alyssawilk) unhide this when unhiding config -.. CONNECT support -.. ^^^^^^^^^^^^^^^ - -.. Envoy CONNECT support is off by default (Envoy will send an internally generated 403 in response to -.. CONNECT requests). CONNECT support can be enabled via the upgrade options described above, setting -.. the upgrade value to the special keyword "CONNECT". - -.. While for HTTP/2, CONNECT request may have a path, in general and for HTTP/1.1 CONNECT requests do -.. not have a path, and can only be matched using a -.. :ref:`connect_matcher ` -.. -.. Envoy can handle CONNECT in one of two ways, either proxying the CONNECT headers through as if they -.. were any other request, and letting the upstream terminate the CONNECT request, or by terminating the -.. CONNECT request, and forwarding the payload as raw TCP data. When CONNECT upgrade configuration is -.. set up, the default behavior is to proxy the CONNECT request, treating it like any other request using -.. the upgrade path. -.. If termination is desired, this can be accomplished by setting -.. :ref:`connect_config ` -.. If it that message is present for CONNECT requests, the router filter will strip the request headers, -.. and forward the HTTP payload upstream. On receipt of initial TCP data from upstream, the router -.. will synthesize 200 response headers, and then forward the TCP data as the HTTP response body. - -.. .. warning:: -.. This mode of CONNECT support can create major security holes if configured correctly, as the upstream -.. will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution - -.. Tunneling TCP over HTTP/2 -.. ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. Envoy also has support for transforming raw TCP into HTTP/2 CONNECT requests. This can be used to -.. proxy multiplexed TCP over pre-warmed secure connections and amortize the cost of any TLS handshake. -.. An example set up proxying SMTP would look something like this -.. -.. [SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] -.. -.. Examples of such a set up can be found in the Envoy example config `directory ` -.. If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` -.. and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` -.. you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 -.. CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the -.. original TCP upstream, in this case to google.com. +CONNECT support +^^^^^^^^^^^^^^^ + +Envoy CONNECT support is off by default (Envoy will send an internally generated 403 in response to +CONNECT requests). CONNECT support can be enabled via the upgrade options described above, setting +the upgrade value to the special keyword "CONNECT". + +While for HTTP/2, CONNECT request may have a path, in general and for HTTP/1.1 CONNECT requests do +not have a path, and can only be matched using a +:ref:`connect_matcher ` + +Envoy can handle CONNECT in one of two ways, either proxying the CONNECT headers through as if they +were any other request, and letting the upstream terminate the CONNECT request, or by terminating the +CONNECT request, and forwarding the payload as raw TCP data. When CONNECT upgrade configuration is +set up, the default behavior is to proxy the CONNECT request, treating it like any other request using +the upgrade path. +If termination is desired, this can be accomplished by setting +:ref:`connect_config ` +If it that message is present for CONNECT requests, the router filter will strip the request headers, +and forward the HTTP payload upstream. On receipt of initial TCP data from upstream, the router +will synthesize 200 response headers, and then forward the TCP data as the HTTP response body. + +.. warning:: + This mode of CONNECT support can create major security holes if configured correctly, as the upstream + will be forwarded *unsanitized* headers if they are in the body payload. Please use with caution + +Tunneling TCP over HTTP/2 +^^^^^^^^^^^^^^^^^^^^^^^^^ +Envoy also has support for transforming raw TCP into HTTP/2 CONNECT requests. This can be used to +proxy multiplexed TCP over pre-warmed secure connections and amortize the cost of any TLS handshake. +An example set up proxying SMTP would look something like this + +[SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] + +Examples of such a set up can be found in the Envoy example config :repo:`directory ` +If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` +and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` +you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 +CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the +original TCP upstream, in this case to google.com. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 8e19981456af..ab8820c35736 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -23,6 +23,7 @@ Changes * gzip filter: added option to set zlib's next output buffer size. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`stripping port from host header ` support. +* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. diff --git a/docs/root/version_history/v1.4.0.rst b/docs/root/version_history/v1.4.0.rst index 1acf5011305e..f940deb1b5a6 100644 --- a/docs/root/version_history/v1.4.0.rst +++ b/docs/root/version_history/v1.4.0.rst @@ -14,7 +14,7 @@ Changes * Hot restart :repo:`compile time flag ` added. * Original destination :ref:`cluster ` and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. +* :ref:`WebSocket ` is now supported. * Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure no one is using this feature. * Route `validate_clusters` option added. diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 56a05eed758b..742e5584befe 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -232,7 +232,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 00d4f5e628a7..e99f136343e0 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -396,7 +396,6 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } - // [#not-implemented-hide:] // An extensible message for matching CONNECT requests. message ConnectMatcher { } @@ -428,7 +427,6 @@ message RouteMatch { // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style @@ -436,6 +434,8 @@ message RouteMatch { // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where CONNECT requests may have a path, the path matchers will work if // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; string hidden_envoy_deprecated_regex = 3 [ @@ -732,7 +732,6 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { @@ -749,9 +748,10 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 6e1b1f9f5a0a..8dfa58177bdb 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -394,7 +394,6 @@ message RouteMatch { google.protobuf.BoolValue validated = 2; } - // [#not-implemented-hide:] // An extensible message for matching CONNECT requests. message ConnectMatcher { option (udpa.annotations.versioning).previous_message_type = @@ -430,7 +429,6 @@ message RouteMatch { // stripping. This needs more thought.] type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - // [#not-implemented-hide:] // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style @@ -438,6 +436,8 @@ message RouteMatch { // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where CONNECT requests may have a path, the path matchers will work if // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; } @@ -724,7 +724,6 @@ message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { @@ -744,9 +743,10 @@ message RouteAction { // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; - // [#not-implemented-hide:] // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 21616dcc386b..346df090a770 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -235,7 +235,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 8f370b21d8f1..41284b7e1095 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -231,7 +231,7 @@ message HttpConnectionManager { // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the - // :ref:`upgrade documentation `. + // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index cc684148b90c..4af2ea219c90 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1849,8 +1849,6 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":authority", "host.com:80"}}); if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { - // TODO(alyssawilk) either reinstate prior behavior, or include a release - // note with this PR. // Because CONNECT requests for HTTP/1 do not include a path, they will fail // to find a route match and return a 404. response->waitForEndStream(); From afaedbb2cb81810be055dde445aa6755a7c7853c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 13 May 2020 10:26:06 -0400 Subject: [PATCH 150/909] docs: update to security process for low severity bugfixes (#11148) Risk Level: n/a Testing: n/a Docs Changes: yes Release Notes: no Signed-off-by: Alyssa Wilk --- SECURITY.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 98bf6bffb5c2..40ebecff3bce 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -72,18 +72,18 @@ severity. If a vulnerability does not affect any point release but only master, additional caveats apply: -* If the issue is detected and a fix is available within 5 days of the introduction of the - vulnerability, the fix will be publicly reviewed and landed on master. A courtesy e-mail will be - sent to envoy-users@googlegroups.com, envoy-dev@googlegroups.com, - envoy-security-announce@googlegroups.com and cncf-envoy-distributors-announce@lists.cncf.io if - the severity is medium or greater. -* If the vulnerability has been in existence for more than 5 days, we will activate the security - release process for any medium or higher vulnerabilities. Low severity vulnerabilities will still - be merged onto master as soon as a fix is available. - -We advise distributors and operators working from the master branch to allow at least 3 days soak +* If the issue is detected and a fix is available within 7 days of the introduction of the + vulnerability, or the issue is deemed a low severity vulnerability by the Envoy maintainer and + security teams, the fix will be publicly reviewed and landed on master. If the severity is at least + medium or at maintainer discretion a courtesy e-mail will be sent to envoy-users@googlegroups.com, + envoy-dev@googlegroups.com, envoy-security-announce@googlegroups.com and + cncf-envoy-distributors-announce@lists.cncf.io. +* If the vulnerability has been in existence for more than 7 days and is medium or higher, we will + activate the security release process. + +We advise distributors and operators working from the master branch to allow at least 5 days soak time after cutting a binary release before distribution or rollout, to allow time for our fuzzers to -detect issues during their execution on ClusterFuzz. A soak period of 5 days provides an even stronger +detect issues during their execution on ClusterFuzz. A soak period of 7 days provides an even stronger guarantee, since we will invoke the security release process for medium or higher severity issues for these older bugs. From 57d369a2781a5259b7b5089b299fc7bdf0ce94a1 Mon Sep 17 00:00:00 2001 From: rulex123 <29862113+rulex123@users.noreply.github.com> Date: Wed, 13 May 2020 17:05:57 +0200 Subject: [PATCH 151/909] [admin] extract logs and profiler handlers to separate classes (#11087) Description: extract logging and profiling-related handlers from admin.h|cc and into separate classes (part of #5505 ) Risk Level: low Testing: pre-existing tests Docs Changes: n/a Release Notes: n/a Signed-off-by: Erica Manno --- source/server/http/BUILD | 35 ++++- source/server/http/admin.cc | 167 +-------------------- source/server/http/admin.h | 17 +-- source/server/http/logs_handler.cc | 95 ++++++++++++ source/server/http/logs_handler.h | 39 +++++ source/server/http/profiling_handler.cc | 84 +++++++++++ source/server/http/profiling_handler.h | 31 ++++ test/server/http/BUILD | 18 ++- test/server/http/admin_test.cc | 79 ---------- test/server/http/logs_handler_test.cc | 21 +++ test/server/http/profiling_handler_test.cc | 82 ++++++++++ 11 files changed, 415 insertions(+), 253 deletions(-) create mode 100644 source/server/http/logs_handler.cc create mode 100644 source/server/http/logs_handler.h create mode 100644 source/server/http/profiling_handler.cc create mode 100644 source/server/http/profiling_handler.h create mode 100644 test/server/http/logs_handler_test.cc create mode 100644 test/server/http/profiling_handler_test.cc diff --git a/source/server/http/BUILD b/source/server/http/BUILD index 2e4d47c4c694..149ad12b408f 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -16,6 +16,8 @@ envoy_cc_library( ":admin_filter_lib", ":config_tracker_lib", ":listeners_handler_lib", + ":logs_handler_lib", + ":profiling_handler_lib", ":runtime_handler_lib", ":stats_handler_lib", ":utils_lib", @@ -57,7 +59,6 @@ envoy_cc_library( "//source/common/network:listen_socket_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/network:utility_lib", - "//source/common/profiler:profiler_lib", "//source/common/router:config_lib", "//source/common/router:scoped_config_lib", "//source/common/stats:isolated_store_lib", @@ -157,6 +158,38 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "logs_handler_lib", + srcs = ["logs_handler.cc"], + hdrs = ["logs_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "profiling_handler_lib", + srcs = ["profiling_handler.cc"], + hdrs = ["profiling_handler.h"], + deps = [ + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/profiler:profiler_lib", + ], +) + envoy_cc_library( name = "utils_lib", srcs = ["utils.cc"], diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index d2a0bd87c317..cfdcbdff6eb3 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -41,7 +41,6 @@ #include "common/memory/utils.h" #include "common/network/listen_socket_impl.h" #include "common/network/utility.h" -#include "common/profiler/profiler.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/config_impl.h" @@ -259,53 +258,6 @@ void trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Messag } // namespace -bool AdminImpl::changeLogLevel(const Http::Utility::QueryParams& params) { - if (params.size() != 1) { - return false; - } - - std::string name = params.begin()->first; - std::string level = params.begin()->second; - - // First see if the level is valid. - size_t level_to_use = std::numeric_limits::max(); - for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { - if (level == spdlog::level::level_string_views[i]) { - level_to_use = i; - break; - } - } - - if (level_to_use == std::numeric_limits::max()) { - return false; - } - - // Now either change all levels or a single level. - if (name == "level") { - ENVOY_LOG(debug, "change all log levels: level='{}'", level); - for (Logger::Logger& logger : Logger::Registry::loggers()) { - logger.setLevel(static_cast(level_to_use)); - } - } else { - ENVOY_LOG(debug, "change log level: name='{}' level='{}'", name, level); - Logger::Logger* logger_to_change = nullptr; - for (Logger::Logger& logger : Logger::Registry::loggers()) { - if (logger.name() == name) { - logger_to_change = &logger; - break; - } - } - - if (!logger_to_change) { - return false; - } - - logger_to_change->setLevel(static_cast(level_to_use)); - } - - return true; -} - void AdminImpl::addOutlierInfo(const std::string& cluster_name, const Upstream::Outlier::Detector* outlier_detector, Buffer::Instance& response) { @@ -603,77 +555,6 @@ Http::Code AdminImpl::handlerContention(absl::string_view, return Http::Code::OK; } -Http::Code AdminImpl::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - if (query_params.size() != 1 || query_params.begin()->first != "enable" || - (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { - response.add("?enable=\n"); - return Http::Code::BadRequest; - } - - bool enable = query_params.begin()->second == "y"; - if (enable && !Profiler::Cpu::profilerEnabled()) { - if (!Profiler::Cpu::startProfiler(profile_path_)) { - response.add("failure to start the profiler"); - return Http::Code::InternalServerError; - } - - } else if (!enable && Profiler::Cpu::profilerEnabled()) { - Profiler::Cpu::stopProfiler(); - } - - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHeapProfiler(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - if (!Profiler::Heap::profilerEnabled()) { - response.add("The current build does not support heap profiler"); - return Http::Code::NotImplemented; - } - - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - if (query_params.size() != 1 || query_params.begin()->first != "enable" || - (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { - response.add("?enable=\n"); - return Http::Code::BadRequest; - } - - Http::Code res = Http::Code::OK; - bool enable = query_params.begin()->second == "y"; - if (enable) { - if (Profiler::Heap::isProfilerStarted()) { - response.add("Fail to start heap profiler: already started"); - res = Http::Code::BadRequest; - } else if (!Profiler::Heap::startProfiler(profile_path_)) { - // GCOVR_EXCL_START - // TODO(silentdai) remove the GCOVR when startProfiler is better implemented - response.add("Fail to start the heap profiler"); - res = Http::Code::InternalServerError; - // GCOVR_EXCL_STOP - } else { - response.add("Starting heap profiler"); - res = Http::Code::OK; - } - } else { - // !enable - if (!Profiler::Heap::isProfilerStarted()) { - response.add("Fail to stop heap profiler: not started"); - res = Http::Code::BadRequest; - } else { - Profiler::Heap::stopProfiler(); - response.add( - fmt::format("Heap profiler stopped and data written to {}. See " - "http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.", - profile_path_)); - res = Http::Code::OK; - } - } - return res; -} - Http::Code AdminImpl::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&) { server_.failHealthcheck(true); @@ -694,32 +575,6 @@ Http::Code AdminImpl::handlerHotRestartVersion(absl::string_view, Http::Response return Http::Code::OK; } -Http::Code AdminImpl::handlerLogging(absl::string_view url, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); - - Http::Code rc = Http::Code::OK; - if (!query_params.empty() && !changeLogLevel(query_params)) { - response.add("usage: /logging?= (change single level)\n"); - response.add("usage: /logging?level= (change all levels)\n"); - response.add("levels: "); - for (auto level_string_view : spdlog::level::level_string_views) { - response.add(fmt::format("{} ", level_string_view)); - } - - response.add("\n"); - rc = Http::Code::NotFound; - } - - response.add("active loggers:\n"); - for (const Logger::Logger& logger : Logger::Registry::loggers()) { - response.add(fmt::format(" {}: {}\n", logger.name(), logger.levelString())); - } - - response.add("\n"); - return rc; -} - // TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator. Http::Code AdminImpl::handlerMemory(absl::string_view, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { @@ -800,13 +655,6 @@ Http::Code AdminImpl::handlerCerts(absl::string_view, Http::ResponseHeaderMap& r return Http::Code::OK; } -Http::Code AdminImpl::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.accessLogManager().reopen(); - response.add("OK\n"); - return Http::Code::OK; -} - ConfigTracker& AdminImpl::getConfigTracker() { return config_tracker_; } AdminImpl::NullRouteConfigProvider::NullRouteConfigProvider(TimeSource& time_source) @@ -845,7 +693,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), route_config_provider_(server.timeSource()), scoped_route_config_provider_(server.timeSource()), stats_handler_(server), - runtime_handler_(server), listeners_handler_(server), + logs_handler_(server), profiling_handler_(profile_path), runtime_handler_(server), + listeners_handler_(server), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, @@ -857,9 +706,9 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) {"/contention", "dump current Envoy mutex contention stats (if enabled)", MAKE_ADMIN_HANDLER(handlerContention), false, false}, {"/cpuprofiler", "enable/disable the CPU profiler", - MAKE_ADMIN_HANDLER(handlerCpuProfiler), false, true}, + MAKE_ADMIN_HANDLER(profiling_handler_.handlerCpuProfiler), false, true}, {"/heapprofiler", "enable/disable the heap profiler", - MAKE_ADMIN_HANDLER(handlerHeapProfiler), false, true}, + MAKE_ADMIN_HANDLER(profiling_handler_.handlerHeapProfiler), false, true}, {"/healthcheck/fail", "cause the server to fail health checks", MAKE_ADMIN_HANDLER(handlerHealthcheckFail), false, true}, {"/healthcheck/ok", "cause the server to pass health checks", @@ -868,8 +717,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) false}, {"/hot_restart_version", "print the hot restart compatibility version", MAKE_ADMIN_HANDLER(handlerHotRestartVersion), false, false}, - {"/logging", "query/change logging levels", MAKE_ADMIN_HANDLER(handlerLogging), false, - true}, + {"/logging", "query/change logging levels", + MAKE_ADMIN_HANDLER(logs_handler_.handlerLogging), false, true}, {"/memory", "print current allocation/heap usage", MAKE_ADMIN_HANDLER(handlerMemory), false, false}, {"/quitquitquit", "exit the server", MAKE_ADMIN_HANDLER(handlerQuitQuitQuit), false, @@ -900,8 +749,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) false, false}, {"/runtime_modify", "modify runtime values", MAKE_ADMIN_HANDLER(runtime_handler_.handlerRuntimeModify), false, true}, - {"/reopen_logs", "reopen access logs", MAKE_ADMIN_HANDLER(handlerReopenLogs), false, - true}, + {"/reopen_logs", "reopen access logs", + MAKE_ADMIN_HANDLER(logs_handler_.handlerReopenLogs), false, true}, }, date_provider_(server.dispatcher().timeSource()), admin_filter_chain_(std::make_shared()) {} diff --git a/source/server/http/admin.h b/source/server/http/admin.h index bea7cef019c4..8d772d59b87d 100644 --- a/source/server/http/admin.h +++ b/source/server/http/admin.h @@ -39,6 +39,8 @@ #include "server/http/admin_filter.h" #include "server/http/config_tracker_impl.h" #include "server/http/listeners_handler.h" +#include "server/http/logs_handler.h" +#include "server/http/profiling_handler.h" #include "server/http/runtime_handler.h" #include "server/http/stats_handler.h" @@ -237,13 +239,6 @@ class AdminImpl : public Admin, TimeSource& time_source_; }; - /** - * Attempt to change the log level of a logger or all loggers - * @param params supplies the incoming endpoint query params. - * @return TRUE if level change succeeded, FALSE otherwise. - */ - bool changeLogLevel(const Http::Utility::QueryParams& params); - /** * Helper methods for the /clusters url handler. */ @@ -307,9 +302,6 @@ class AdminImpl : public Admin, Http::Code handlerHotRestartVersion(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerLogging(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); Http::Code handlerMemory(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); @@ -323,9 +315,6 @@ class AdminImpl : public Admin, Http::Code handlerReady(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerReopenLogs(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); class AdminListenSocketFactory : public Network::ListenSocketFactory { public: @@ -426,6 +415,8 @@ class AdminImpl : public Admin, NullRouteConfigProvider route_config_provider_; NullScopedRouteConfigProvider scoped_route_config_provider_; Server::StatsHandler stats_handler_; + Server::LogsHandler logs_handler_; + Server::ProfilingHandler profiling_handler_; Server::RuntimeHandler runtime_handler_; Server::ListenersHandler listeners_handler_; std::list handlers_; diff --git a/source/server/http/logs_handler.cc b/source/server/http/logs_handler.cc new file mode 100644 index 000000000000..e5de4302e082 --- /dev/null +++ b/source/server/http/logs_handler.cc @@ -0,0 +1,95 @@ +#include "server/http/logs_handler.h" + +#include + +#include "common/common/logger.h" + +#include "server/http/utils.h" + +namespace Envoy { +namespace Server { + +LogsHandler::LogsHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code LogsHandler::handlerLogging(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + + Http::Code rc = Http::Code::OK; + if (!query_params.empty() && !changeLogLevel(query_params)) { + response.add("usage: /logging?= (change single level)\n"); + response.add("usage: /logging?level= (change all levels)\n"); + response.add("levels: "); + for (auto level_string_view : spdlog::level::level_string_views) { + response.add(fmt::format("{} ", level_string_view)); + } + + response.add("\n"); + rc = Http::Code::NotFound; + } + + response.add("active loggers:\n"); + for (const Logger::Logger& logger : Logger::Registry::loggers()) { + response.add(fmt::format(" {}: {}\n", logger.name(), logger.levelString())); + } + + response.add("\n"); + return rc; +} + +Http::Code LogsHandler::handlerReopenLogs(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.accessLogManager().reopen(); + response.add("OK\n"); + return Http::Code::OK; +} + +bool LogsHandler::changeLogLevel(const Http::Utility::QueryParams& params) { + if (params.size() != 1) { + return false; + } + + std::string name = params.begin()->first; + std::string level = params.begin()->second; + + // First see if the level is valid. + size_t level_to_use = std::numeric_limits::max(); + for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { + if (level == spdlog::level::level_string_views[i]) { + level_to_use = i; + break; + } + } + + if (level_to_use == std::numeric_limits::max()) { + return false; + } + + // Now either change all levels or a single level. + if (name == "level") { + ENVOY_LOG(debug, "change all log levels: level='{}'", level); + for (Logger::Logger& logger : Logger::Registry::loggers()) { + logger.setLevel(static_cast(level_to_use)); + } + } else { + ENVOY_LOG(debug, "change log level: name='{}' level='{}'", name, level); + Logger::Logger* logger_to_change = nullptr; + for (Logger::Logger& logger : Logger::Registry::loggers()) { + if (logger.name() == name) { + logger_to_change = &logger; + break; + } + } + + if (!logger_to_change) { + return false; + } + + logger_to_change->setLevel(static_cast(level_to_use)); + } + + return true; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/logs_handler.h b/source/server/http/logs_handler.h new file mode 100644 index 000000000000..60002e484993 --- /dev/null +++ b/source/server/http/logs_handler.h @@ -0,0 +1,39 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/http/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class LogsHandler : public HandlerContextBase, Logger::Loggable { + +public: + LogsHandler(Server::Instance& server); + + Http::Code handlerLogging(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerReopenLogs(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + /** + * Attempt to change the log level of a logger or all loggers + * @param params supplies the incoming endpoint query params. + * @return TRUE if level change succeeded, FALSE otherwise. + */ + bool changeLogLevel(const Http::Utility::QueryParams& params); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/profiling_handler.cc b/source/server/http/profiling_handler.cc new file mode 100644 index 000000000000..76c31a2764e4 --- /dev/null +++ b/source/server/http/profiling_handler.cc @@ -0,0 +1,84 @@ +#include "server/http/profiling_handler.h" + +#include "common/profiler/profiler.h" + +#include "server/http/utils.h" + +namespace Envoy { +namespace Server { + +ProfilingHandler::ProfilingHandler(const std::string& profile_path) : profile_path_(profile_path) {} + +Http::Code ProfilingHandler::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + if (query_params.size() != 1 || query_params.begin()->first != "enable" || + (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { + response.add("?enable=\n"); + return Http::Code::BadRequest; + } + + bool enable = query_params.begin()->second == "y"; + if (enable && !Profiler::Cpu::profilerEnabled()) { + if (!Profiler::Cpu::startProfiler(profile_path_)) { + response.add("failure to start the profiler"); + return Http::Code::InternalServerError; + } + + } else if (!enable && Profiler::Cpu::profilerEnabled()) { + Profiler::Cpu::stopProfiler(); + } + + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ProfilingHandler::handlerHeapProfiler(absl::string_view url, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + if (!Profiler::Heap::profilerEnabled()) { + response.add("The current build does not support heap profiler"); + return Http::Code::NotImplemented; + } + + Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + if (query_params.size() != 1 || query_params.begin()->first != "enable" || + (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { + response.add("?enable=\n"); + return Http::Code::BadRequest; + } + + Http::Code res = Http::Code::OK; + bool enable = query_params.begin()->second == "y"; + if (enable) { + if (Profiler::Heap::isProfilerStarted()) { + response.add("Fail to start heap profiler: already started"); + res = Http::Code::BadRequest; + } else if (!Profiler::Heap::startProfiler(profile_path_)) { + // GCOVR_EXCL_START + // TODO(silentdai) remove the GCOVR when startProfiler is better implemented + response.add("Fail to start the heap profiler"); + res = Http::Code::InternalServerError; + // GCOVR_EXCL_STOP + } else { + response.add("Starting heap profiler"); + res = Http::Code::OK; + } + } else { + // !enable + if (!Profiler::Heap::isProfilerStarted()) { + response.add("Fail to stop heap profiler: not started"); + res = Http::Code::BadRequest; + } else { + Profiler::Heap::stopProfiler(); + response.add( + fmt::format("Heap profiler stopped and data written to {}. See " + "http://goog-perftools.sourceforge.net/doc/heap_profiler.html for details.", + profile_path_)); + res = Http::Code::OK; + } + } + return res; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/profiling_handler.h b/source/server/http/profiling_handler.h new file mode 100644 index 000000000000..2ec81e24cae5 --- /dev/null +++ b/source/server/http/profiling_handler.h @@ -0,0 +1,31 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ProfilingHandler { + +public: + ProfilingHandler(const std::string& profile_path); + + Http::Code handlerCpuProfiler(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHeapProfiler(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + +private: + const std::string profile_path_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/BUILD b/test/server/http/BUILD index d2f1d5c03018..d5dd5f83bda0 100644 --- a/test/server/http/BUILD +++ b/test/server/http/BUILD @@ -31,7 +31,6 @@ envoy_cc_test( "//include/envoy/runtime:runtime_interface", "//source/common/http:message_lib", "//source/common/json:json_loader_lib", - "//source/common/profiler:profiler_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_creator_lib", @@ -88,6 +87,23 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "logs_handler_test", + srcs = ["logs_handler_test.cc"], + deps = [ + ":admin_instance_lib", + ], +) + +envoy_cc_test( + name = "profiling_handler_test", + srcs = ["profiling_handler_test.cc"], + deps = [ + ":admin_instance_lib", + "//test/test_common:logging_lib", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], diff --git a/test/server/http/admin_test.cc b/test/server/http/admin_test.cc index 3368e6d200bf..5be06a9cb682 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/http/admin_test.cc @@ -14,7 +14,6 @@ #include "common/http/message_impl.h" #include "common/json/json_loader.h" -#include "common/profiler/profiler.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -47,60 +46,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -TEST_P(AdminInstanceTest, AdminCpuProfiler) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - - // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with - // a real profiler linked in (successful call to startProfiler). -#ifdef PROFILER_AVAILABLE - EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=y", header_map, data)); - EXPECT_TRUE(Profiler::Cpu::profilerEnabled()); -#else - EXPECT_EQ(Http::Code::InternalServerError, - postCallback("/cpuprofiler?enable=y", header_map, data)); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -#endif - - EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=n", header_map, data)); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -} - -TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - auto repeatResultCode = Http::Code::BadRequest; -#ifndef PROFILER_AVAILABLE - repeatResultCode = Http::Code::NotImplemented; -#endif - - postCallback("/heapprofiler?enable=y", header_map, data); - EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=y", header_map, data)); - - postCallback("/heapprofiler?enable=n", header_map, data); - EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=n", header_map, data)); -} - -TEST_P(AdminInstanceTest, AdminHeapProfiler) { - Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; - - // The below flow need to begin with the profiler not running - Profiler::Heap::stopProfiler(); - -#ifdef PROFILER_AVAILABLE - EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=y", header_map, data)); - EXPECT_TRUE(Profiler::Heap::isProfilerStarted()); - EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=n", header_map, data)); -#else - EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=y", header_map, data)); - EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); - EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=n", header_map, data)); -#endif - - EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); -} - TEST_P(AdminInstanceTest, MutatesErrorWithGet) { Buffer::OwnedImpl data; Http::ResponseHeaderMapImpl header_map; @@ -112,20 +57,6 @@ TEST_P(AdminInstanceTest, MutatesErrorWithGet) { EXPECT_EQ(Http::Code::MethodNotAllowed, getCallback(path, header_map, data))); } -TEST_P(AdminInstanceTest, AdminBadProfiler) { - Buffer::OwnedImpl data; - AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath("some/unlikely/bad/path.prof"), - server_); - Http::ResponseHeaderMapImpl header_map; - const absl::string_view post = Http::Headers::get().MethodValues.Post; - request_headers_.setMethod(post); - admin_filter_.decodeHeaders(request_headers_, false); - EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::InternalServerError, - admin_bad_profile_path.runCallback("/cpuprofiler?enable=y", header_map, - data, admin_filter_))); - EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); -} - TEST_P(AdminInstanceTest, WriteAddressToFile) { std::ifstream address_file(address_out_path_); std::string address_from_file; @@ -497,16 +428,6 @@ TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { EXPECT_EQ(expected_empty_json, response.toString()); } -TEST_P(AdminInstanceTest, ReopenLogs) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - testing::NiceMock access_log_manager_; - - EXPECT_CALL(server_, accessLogManager()).WillRepeatedly(ReturnRef(access_log_manager_)); - EXPECT_CALL(access_log_manager_, reopen()); - EXPECT_EQ(Http::Code::OK, postCallback("/reopen_logs", header_map, response)); -} - TEST_P(AdminInstanceTest, ClustersJson) { Upstream::ClusterManager::ClusterInfoMap cluster_map; ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); diff --git a/test/server/http/logs_handler_test.cc b/test/server/http/logs_handler_test.cc new file mode 100644 index 000000000000..99be88296e22 --- /dev/null +++ b/test/server/http/logs_handler_test.cc @@ -0,0 +1,21 @@ +#include "test/server/http/admin_instance.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, ReopenLogs) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + testing::NiceMock access_log_manager_; + + EXPECT_CALL(server_, accessLogManager()).WillRepeatedly(ReturnRef(access_log_manager_)); + EXPECT_CALL(access_log_manager_, reopen()); + EXPECT_EQ(Http::Code::OK, postCallback("/reopen_logs", header_map, response)); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/profiling_handler_test.cc b/test/server/http/profiling_handler_test.cc new file mode 100644 index 000000000000..949f1c2c9368 --- /dev/null +++ b/test/server/http/profiling_handler_test.cc @@ -0,0 +1,82 @@ +#include "common/profiler/profiler.h" + +#include "test/server/http/admin_instance.h" +#include "test/test_common/logging.h" + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, AdminCpuProfiler) { + Buffer::OwnedImpl data; + Http::ResponseHeaderMapImpl header_map; + + // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with + // a real profiler linked in (successful call to startProfiler). +#ifdef PROFILER_AVAILABLE + EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=y", header_map, data)); + EXPECT_TRUE(Profiler::Cpu::profilerEnabled()); +#else + EXPECT_EQ(Http::Code::InternalServerError, + postCallback("/cpuprofiler?enable=y", header_map, data)); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +#endif + + EXPECT_EQ(Http::Code::OK, postCallback("/cpuprofiler?enable=n", header_map, data)); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +} + +TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { + Buffer::OwnedImpl data; + Http::ResponseHeaderMapImpl header_map; + auto repeatResultCode = Http::Code::BadRequest; +#ifndef PROFILER_AVAILABLE + repeatResultCode = Http::Code::NotImplemented; +#endif + + postCallback("/heapprofiler?enable=y", header_map, data); + EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=y", header_map, data)); + + postCallback("/heapprofiler?enable=n", header_map, data); + EXPECT_EQ(repeatResultCode, postCallback("/heapprofiler?enable=n", header_map, data)); +} + +TEST_P(AdminInstanceTest, AdminHeapProfiler) { + Buffer::OwnedImpl data; + Http::ResponseHeaderMapImpl header_map; + + // The below flow need to begin with the profiler not running + Profiler::Heap::stopProfiler(); + +#ifdef PROFILER_AVAILABLE + EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=y", header_map, data)); + EXPECT_TRUE(Profiler::Heap::isProfilerStarted()); + EXPECT_EQ(Http::Code::OK, postCallback("/heapprofiler?enable=n", header_map, data)); +#else + EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=y", header_map, data)); + EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); + EXPECT_EQ(Http::Code::NotImplemented, postCallback("/heapprofiler?enable=n", header_map, data)); +#endif + + EXPECT_FALSE(Profiler::Heap::isProfilerStarted()); +} + +TEST_P(AdminInstanceTest, AdminBadProfiler) { + Buffer::OwnedImpl data; + AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath("some/unlikely/bad/path.prof"), + server_); + Http::ResponseHeaderMapImpl header_map; + const absl::string_view post = Http::Headers::get().MethodValues.Post; + request_headers_.setMethod(post); + admin_filter_.decodeHeaders(request_headers_, false); + EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::InternalServerError, + admin_bad_profile_path.runCallback("/cpuprofiler?enable=y", header_map, + data, admin_filter_))); + EXPECT_FALSE(Profiler::Cpu::profilerEnabled()); +} + +} // namespace Server +} // namespace Envoy From dfaee4bc46e8309c25c1f80c95ca456d00b39e99 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 13 May 2020 14:18:07 -0400 Subject: [PATCH 152/909] docs: fix file path (#11175) Signed-off-by: Alyssa Wilk --- docs/root/intro/arch_overview/http/upgrades.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index a19ed35c695e..a00b43d15d0d 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -97,8 +97,8 @@ An example set up proxying SMTP would look something like this [SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] Examples of such a set up can be found in the Envoy example config :repo:`directory ` -If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` -and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` +If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.v3.yaml --base-id 1` +and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.v3.yaml` you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the original TCP upstream, in this case to google.com. From 2e02a43b3f73793c9399c68db1598d19d3d48ddc Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Wed, 13 May 2020 22:29:26 +0300 Subject: [PATCH 153/909] docs: fix typos in config sample for compressor http-filter (#11162) Signed-off-by: Dmitry Rozhkov --- .../configuration/http/http_filters/compressor_filter.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index 08e7298e1dc6..9b59b15a8d15 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -42,10 +42,10 @@ An example configuration of the filter may look like the following: compressor_library: name: text_optimized typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.gzip.v3.Gzip + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip memory_level: 3 window_bits: 10 - compression_level: best + compression_level: best_compression compression_strategy: default_strategy By *default* compression will be *skipped* when: From c3e0ad3c549f06fb0dcf99f4e39b7858ff07aff5 Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Wed, 13 May 2020 15:30:01 -0400 Subject: [PATCH 154/909] Fixing order of variables definition in codec_impl fuzz test (#11168) This happens when Envoy::Http::Http1::StreamEncoderImpl::~StreamEncoderImpl() is called, and readDisable() is called on connection_ which is already destroyed. (The connection is passed as a reference to StreamEncoderImpl.) Signed-off-by: Adi Suissa-Peleg --- .../http/codec_impl_corpus/read_disable | 25 +++++++++++++++++++ test/common/http/codec_impl_fuzz_test.cc | 5 ++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 test/common/http/codec_impl_corpus/read_disable diff --git a/test/common/http/codec_impl_corpus/read_disable b/test/common/http/codec_impl_corpus/read_disable new file mode 100644 index 000000000000..d0525e6285ad --- /dev/null +++ b/test/common/http/codec_impl_corpus/read_disable @@ -0,0 +1,25 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + read_disable: true + } + } +} diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 2beb87908c6a..7163b53c0dd8 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -418,11 +418,14 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi fromHttp2Settings(input.h2_settings().client())}; const Http1Settings client_http1settings; NiceMock client_callbacks; + NiceMock server_connection; + NiceMock server_callbacks; uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; uint32_t max_response_headers_count = Http::DEFAULT_MAX_HEADERS_COUNT; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; @@ -438,8 +441,6 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi max_response_headers_count); } - NiceMock server_connection; - NiceMock server_callbacks; if (http2) { const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{ fromHttp2Settings(input.h2_settings().server())}; From 5ef1007d69fa5f590cec29d93258087c59be3cce Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Wed, 13 May 2020 15:32:55 -0400 Subject: [PATCH 155/909] Add support to fuzz proto data in uber filter fuzzer (#10796) The uber filter fuzzer is not very efficient in fuzzing decodeData with serialized proto bodies. Add some specialized logic that allows libprotobufmutator to generate google.protobuf.Any messages. Uber filter fuzzer then uses the serialized value as the data for decodeData. This should allow better fuzz coverage in the gRPC Transcoding filter. Signed-off-by: Teju Nareddy --- .../http/common/fuzz/filter_corpus/buffer1 | 4 ++- ...h-3014465358f0947e73ac12ccb40b299d5b0646b3 | 6 ++-- ...h-bb74d7280823776808e881b20c0a9c87f7a2163b | 4 ++- .../http/common/fuzz/filter_corpus/grpc_json | 23 ------------- .../filter_corpus/grpc_transcoding_http_data | 24 ++++++++++++++ .../filter_corpus/grpc_transcoding_proto_data | 32 +++++++++++++++++++ .../http/common/fuzz/filter_fuzz_test.cc | 7 ++++ .../filters/http/common/fuzz/uber_filter.cc | 30 +++++++++++++---- .../filters/http/common/fuzz/uber_filter.h | 7 ++++ .../http/common/fuzz/uber_per_filter.cc | 26 +++++++++++++++ test/fuzz/common.proto | 23 ++++++++++++- tools/code_format/check_format.py | 1 + 12 files changed, 153 insertions(+), 34 deletions(-) delete mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 b/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 index a1bf00f67a61..9b8bf63c7ea9 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/buffer1 @@ -15,5 +15,7 @@ data { "a" value : "b" } } - data: "hello" + http_body { + data: "hello" + } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 index fb63866ea5a5..72bcfa0b0bae 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-3014465358f0947e73ac12ccb40b299d5b0646b3 @@ -7,8 +7,10 @@ data { value: "\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\360\240\240\240\314\255" } } - data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" - data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + http_body { + data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + data: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } trailers { headers { key: "6" diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b index 3eea853ad21e..7c2bbfbae7f7 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/crash-bb74d7280823776808e881b20c0a9c87f7a2163b @@ -6,7 +6,9 @@ config { } } data { - data: "\001\000\000\t" + http_body { + data: "\001\000\000\t" + } trailers { headers { key: "0" diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json deleted file mode 100644 index 3846826fa9d8..000000000000 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_json +++ /dev/null @@ -1,23 +0,0 @@ -config { -name: "envoy.filters.http.grpc_json_transcoder" -typed_config: { -} -} - -data { -headers { -headers { - key: "content-type" - value: "application/json" -} -headers { - key: ":method" - value: "POST" -} -headers { - key: ":path" - value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" -} -} -data: "{\"theme\": \"Children\"}" -} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data new file mode 100644 index 000000000000..cf0e8282a083 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_http_data @@ -0,0 +1,24 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} + +data { + headers { + headers { + key: "content-type" + value: "application/json" + } + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + } + http_body { + data: "{\"theme\": \"Children\"}" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data new file mode 100644 index 000000000000..711ea9f66ec5 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data @@ -0,0 +1,32 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} + +data { + headers { + headers { + key: "content-type" + value: "application/json" + } + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelf" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.CreateShelfRequest] { + shelf: { + id: 32 + theme: "Children" + } + } + } + chunk_size: 3 + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc index 8a20604d7a03..edfa89f917c7 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc @@ -36,6 +36,13 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::http::FilterFuzzTestCase& i input->mutable_config()->mutable_typed_config()->set_type_url( absl::StrCat("type.googleapis.com/", factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + + // For fuzzing proto data, guide the mutator to useful 'Any' types half + // the time. The other half the time, let the fuzzing engine choose + // any message to serialize. + if (seed % 2 == 0 && input->data().has_proto_body()) { + UberFilterFuzzer::guideAnyProtoType(input->mutable_data(), seed / 2); + } }}; try { diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 7ec9b022b99a..a88cc585a72e 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -28,6 +28,22 @@ UberFilterFuzzer::UberFilterFuzzer() { perFilterSetup(); } +std::vector UberFilterFuzzer::parseHttpData(const test::fuzz::HttpData& data) { + std::vector data_chunks; + + if (data.has_http_body()) { + data_chunks.reserve(data.http_body().data_size()); + for (const std::string& http_data : data.http_body().data()) { + data_chunks.push_back(http_data); + } + } else if (data.has_proto_body()) { + const std::string serialized = data.proto_body().message().value(); + data_chunks = absl::StrSplit(serialized, absl::ByLength(data.proto_body().chunk_size())); + } + + return data_chunks; +} + void UberFilterFuzzer::decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data) { bool end_stream = false; @@ -42,22 +58,24 @@ void UberFilterFuzzer::decode(Http::StreamDecoderFilter* filter, const test::fuz headers.setHost("foo.com"); } - if (data.data().empty() && !data.has_trailers()) { + if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { end_stream = true; } - ENVOY_LOG_MISC(debug, "Decoding headers: {} ", data.headers().DebugString()); + ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}): {} ", end_stream, + data.headers().DebugString()); const auto& headersStatus = filter->decodeHeaders(headers, end_stream); if (headersStatus != Http::FilterHeadersStatus::Continue && headersStatus != Http::FilterHeadersStatus::StopIteration) { return; } - for (int i = 0; i < data.data().size(); i++) { - if (i == data.data().size() - 1 && !data.has_trailers()) { + const std::vector data_chunks = parseHttpData(data); + for (size_t i = 0; i < data_chunks.size(); i++) { + if (!data.has_trailers() && i == data_chunks.size() - 1) { end_stream = true; } - Buffer::OwnedImpl buffer(data.data().Get(i)); - ENVOY_LOG_MISC(debug, "Decoding data: {} ", buffer.toString()); + Buffer::OwnedImpl buffer(data_chunks[i]); + ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); if (filter->decodeData(buffer, end_stream) != Http::FilterDataStatus::Continue) { return; } diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index a18d1ae8057b..511c587a6e62 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -16,14 +16,21 @@ class UberFilterFuzzer { proto_config, const test::fuzz::HttpData& data); + // For fuzzing proto data, guide the mutator to useful 'Any' types. + static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); + protected: // Set-up filter specific mock expectations in constructor. void perFilterSetup(); // Filter specific input cleanup. void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message); + // Parses http or proto body into chunks. + std::vector parseHttpData(const test::fuzz::HttpData& data); + // This executes the decode methods to be fuzzed. void decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data); + void reset(); private: diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 55ba9d253eae..353eea56f0be 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -44,6 +44,32 @@ void addBookstoreProtoDescriptor(Protobuf::Message* message) { } } // namespace +void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice) { + // These types are request/response from the test Bookstore service + // for the gRPC Transcoding filter. + static const std::vector expected_types = { + "type.googleapis.com/bookstore.ListShelvesResponse", + "type.googleapis.com/bookstore.CreateShelfRequest", + "type.googleapis.com/bookstore.GetShelfRequest", + "type.googleapis.com/bookstore.DeleteShelfRequest", + "type.googleapis.com/bookstore.ListBooksRequest", + "type.googleapis.com/bookstore.CreateBookRequest", + "type.googleapis.com/bookstore.GetBookRequest", + "type.googleapis.com/bookstore.UpdateBookRequest", + "type.googleapis.com/bookstore.DeleteBookRequest", + "type.googleapis.com/bookstore.GetAuthorRequest", + "type.googleapis.com/bookstore.EchoBodyRequest", + "type.googleapis.com/bookstore.EchoStructReqResp", + "type.googleapis.com/bookstore.Shelf", + "type.googleapis.com/bookstore.Book", + "type.googleapis.com/google.protobuf.Empty", + "type.googleapis.com/google.api.HttpBody", + }; + ProtobufWkt::Any* mutable_any = mutable_data->mutable_proto_body()->mutable_message(); + const std::string& type_url = expected_types[choice % expected_types.size()]; + mutable_any->set_type_url(type_url); +} + void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message) { // Map filter name to clean-up function. diff --git a/test/fuzz/common.proto b/test/fuzz/common.proto index b32db65c98e0..92df9a1b4021 100644 --- a/test/fuzz/common.proto +++ b/test/fuzz/common.proto @@ -5,6 +5,7 @@ package test.fuzz; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/address.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; @@ -15,9 +16,29 @@ message Headers { repeated envoy.config.core.v3.HeaderValue headers = 1; } +message HttpBody { + // The bytes that will be used as the request body. + repeated string data = 1 [(validate.rules).repeated .min_items = 1]; +} + +// HttpBody cannot efficiently create serialized protos. +// Use ProtoBody instead to test grpc data. +message ProtoBody { + // The proto message that will be serialized and used as the request body. + google.protobuf.Any message = 1 [(validate.rules).any.required = true]; + + // The size (in bytes) of each buffer when forming the requests. + uint64 chunk_size = 2 [(validate.rules).uint64.gt = 0]; +} + message HttpData { Headers headers = 1; - repeated string data = 2; + + oneof body { + HttpBody http_body = 2; + ProtoBody proto_body = 4; + } + Headers trailers = 3; } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 27bc32dbe926..d54d2a36abc4 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -58,6 +58,7 @@ "./test/common/config/version_converter_test.cc", "./test/common/grpc/codec_test.cc", "./test/common/grpc/codec_fuzz_test.cc", + "./test/extensions/filters/http/common/fuzz/uber_filter.h", ) # Files in these paths can use Protobuf::util::JsonStringToMessage From 97f0492333d2b7e48fdd2e450491a5c337e48f57 Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Wed, 13 May 2020 15:35:41 -0400 Subject: [PATCH 156/909] integration test: Remove TestDrainManager (#11176) I don't think this class needs to exist, and any test-specific drain functionality can be accomplished by a mock rather than a test class. In any case, I need a real drain manager in the integration tests to support the drain enhancement work. Signed-off-by: Auni Ahsan --- test/integration/BUILD | 2 ++ test/integration/protocol_integration_test.cc | 10 ++++--- test/integration/server.h | 26 +++++++------------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/test/integration/BUILD b/test/integration/BUILD index d18b58487f89..e1def6423fe3 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -567,6 +567,7 @@ envoy_cc_test_library( "//source/extensions/transport_sockets/tap:config", "//source/extensions/transport_sockets/tls:config", "//source/server:connection_handler_lib", + "//source/server:drain_manager_lib", "//source/server:hot_restart_nop_lib", "//source/server:listener_hooks_lib", "//source/server:process_context_lib", @@ -591,6 +592,7 @@ envoy_cc_test_library( "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", ], ) diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 4af2ea219c90..731da8c44b80 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -232,7 +232,13 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); - test_server_->drainManager().draining_ = true; + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence(nullptr); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); response->waitForEndStream(); @@ -243,8 +249,6 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { EXPECT_TRUE(codec_client_->sawGoAway()); } - - test_server_->drainManager().draining_ = false; } // Regression test for https://github.com/envoyproxy/envoy/issues/9873 diff --git a/test/integration/server.h b/test/integration/server.h index c951ff62145a..5f2274487bd9 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -6,6 +6,7 @@ #include #include +#include "envoy/config/listener/v3/listener.pb.h" #include "envoy/server/options.h" #include "envoy/server/process_context.h" #include "envoy/stats/stats.h" @@ -15,6 +16,7 @@ #include "common/common/logger.h" #include "common/common/thread.h" +#include "server/drain_manager_impl.h" #include "server/listener_hooks.h" #include "server/options_impl.h" #include "server/server.h" @@ -42,20 +44,11 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str FieldValidationConfig validation_config = FieldValidationConfig(), uint32_t concurrency = 1); -class TestDrainManager : public DrainManager { -public: - // Server::DrainManager - bool drainClose() const override { return draining_; } - void startDrainSequence(std::function) override {} - void startParentShutdownSequence() override {} - - bool draining_{}; -}; - class TestComponentFactory : public ComponentFactory { public: - Server::DrainManagerPtr createDrainManager(Server::Instance&) override { - return Server::DrainManagerPtr{new Server::TestDrainManager()}; + Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { + return Server::DrainManagerPtr{ + new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY)}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, Server::Configuration::Initial& config) override { @@ -288,7 +281,7 @@ class IntegrationTestServer : public Logger::Loggable, void waitUntilListenersReady(); - Server::TestDrainManager& drainManager() { return *drain_manager_; } + Server::DrainManagerImpl& drainManager() { return *drain_manager_; } void setOnWorkerListenerAddedCb(std::function on_worker_listener_added) { on_worker_listener_added_cb_ = std::move(on_worker_listener_added); } @@ -342,8 +335,9 @@ class IntegrationTestServer : public Logger::Loggable, void onWorkerListenerRemoved() override; // Server::ComponentFactory - Server::DrainManagerPtr createDrainManager(Server::Instance&) override { - drain_manager_ = new Server::TestDrainManager(); + Server::DrainManagerPtr createDrainManager(Server::Instance& server) override { + drain_manager_ = + new Server::DrainManagerImpl(server, envoy::config::listener::v3::Listener::MODIFY_ONLY); return Server::DrainManagerPtr{drain_manager_}; } Runtime::LoaderPtr createRuntime(Server::Instance& server, @@ -395,7 +389,7 @@ class IntegrationTestServer : public Logger::Loggable, Thread::MutexBasicLockable listeners_mutex_; uint64_t pending_listeners_; ConditionalInitializer server_set_; - Server::TestDrainManager* drain_manager_{}; + Server::DrainManagerImpl* drain_manager_{}; std::function on_worker_listener_added_cb_; std::function on_worker_listener_removed_cb_; TcpDumpPtr tcp_dump_; From 11eecae656d868be92540e0276e60fc2e80efe88 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Wed, 13 May 2020 15:37:56 -0400 Subject: [PATCH 157/909] Windows dev-env doc updates (#11178) Co-authored-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia --- bazel/README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index f7318376b9d0..8188228d634e 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -108,7 +108,9 @@ for how to update or override dependencies. `Git` is required. The version installable via MSYS2 is sufficient. Install the Windows-native [python3](https://www.python.org/downloads/), the POSIX flavor - available via MSYS2 will not work. + available via MSYS2 will not work. You need to add a symlink for `python3.exe` pointing to + the installed `python.exe` for Bazel rules which follow POSIX conventions. Be sure to add + `pip.exe` to the PATH and install the `wheel` package. For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload from the @@ -126,7 +128,11 @@ for how to update or override dependencies. In addition, because of the behavior of the `rules_foreign_cc` component of Bazel, set the `TMPDIR` environment variable to a path usable as a temporary directory (e.g. `C:\Windows\TEMP`). This variable is used frequently by `mktemp` from MSYS2 in the Envoy Bazel - build and can cause problems if not set to a value outside the MSYS2 filesystem. + build and can cause problems if not set to a value outside the MSYS2 filesystem. Note that + using the `ci/windows_ci_steps.sh` script (to build and run tests) will create a directory + symlink linking `C:\c` to `C:\` in order to enable build scripts run via MSYS2 to access + dependencies in the temporary directory specified above. If you are not using that script, you + will need to create that symlink manually. 1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md) and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files. From 302edab55f534abd3f83a297f994b31cf3fb8f92 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Wed, 13 May 2020 15:43:10 -0400 Subject: [PATCH 158/909] added more required valgrind suppressions which are needed due to re2 issues in protoc-gen-validate (#11139) Commit Message: I'm not sure if this is something under Envoy's control, but there's a static RE2 instance generated by protoc-gen-validate: ``` const re2::RE2 _uuidPattern("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"); ``` Processing this generates a lot of valgrind warnings, and this PR gets rid of them. I don't see the string `uuidPattern` in the Envoy source so I think this might be part of the proto infrastructure. So this PR makes it so we can continue to use valgrind despite this issue. Additional Description: Risk Level: low Testing: ran valgrind on a single test. Docs Changes: n/a Release Notes: n/a Signed-off-by: Joshua Marantz --- tools/debugging/valgrind-suppressions.txt | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/debugging/valgrind-suppressions.txt b/tools/debugging/valgrind-suppressions.txt index d985fca478a1..bfb08d95b387 100644 --- a/tools/debugging/valgrind-suppressions.txt +++ b/tools/debugging/valgrind-suppressions.txt @@ -4,3 +4,27 @@ fun:free ... } +{ + re2 cond-jump failure + Memcheck:Cond + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re2 uninit-value + Memcheck:Value8 + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re2 cond-jump failure + Memcheck:Cond + fun:_ZNK3re211SparseArrayIiE9has_indexEi + ... +} +{ + re2 uninit-value + Memcheck:Value8 + fun:_ZNK3re211SparseArrayIiE9has_indexEi + ... +} From de7ca30bdbcaa9d0f00846858a24806b9212ff9b Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Wed, 13 May 2020 14:57:21 -0700 Subject: [PATCH 159/909] ci: Fix macOS bazelisk symlink order issue (#11181) In the case that the macOS image has the `bazel` formula symlinked to `/usr/local/bin/bazel` and bazelisk symlinked to `/usr/local/bin/bazelisk`, when you attempt to do the `brew link --overwrite` homebrew thinks bazelisk is already linked, even though the `bazel` link is wrong. By unlinking and relinking it, we force homebrew to actually overwrite `/usr/local/bin/bazel`. This appears to have started causing issues with a GitHub actions update this week. Signed-off-by: Keith Smiley --- ci/mac_ci_setup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index 378b36787588..f3991ac407ea 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -41,6 +41,7 @@ fi # to unlink/overwrite them to install bazelisk echo "Installing bazelbuild/tap/bazelisk" brew install --force bazelbuild/tap/bazelisk +brew unlink bazelbuild/tap/bazelisk || true if ! brew link --overwrite bazelbuild/tap/bazelisk; then echo "Failed to install and link bazelbuild/tap/bazelisk" exit 1 From e0890029be02daa7142c3147cbb70c349fba27b1 Mon Sep 17 00:00:00 2001 From: antonio Date: Wed, 13 May 2020 18:28:24 -0400 Subject: [PATCH 160/909] Prefer vector::data() over &v[0] as a way to get a pointer to vector contents. Use of &v[0] on an empty vector triggers undefined-behavior warnings under ASAN. (#11163) Prefer vector::data() over &v[0] as a way to get a pointer to vector contents Use of &v[0] on an empty vector triggers undefined-behavior warnings under ASAN. Signed-off-by: Antonio Vicente --- source/common/common/hex.h | 2 +- source/common/http/http2/codec_impl.cc | 10 ++-- source/common/network/utility.cc | 3 +- source/common/upstream/health_checker_impl.cc | 4 +- .../transport_sockets/tls/context_impl.cc | 4 +- ...ized-codec_impl_fuzz_test-5698895985508352 | 49 +++++++++++++++++++ test/common/http/http2/http2_frame.cc | 2 +- 7 files changed, 61 insertions(+), 13 deletions(-) create mode 100644 test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 diff --git a/source/common/common/hex.h b/source/common/common/hex.h index d72fa77a7c95..e77ac57d50d3 100644 --- a/source/common/common/hex.h +++ b/source/common/common/hex.h @@ -16,7 +16,7 @@ class Hex final { * @return the hex encoded string representing data */ static std::string encode(const std::vector& data) { - return encode(&data[0], data.size()); + return encode(data.data(), data.size()); } /** diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 342519dbc669..4b4292537175 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -317,8 +317,8 @@ void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { std::vector final_headers; buildHeaders(final_headers, trailers); - int rc = - nghttp2_submit_trailer(parent_.session_, stream_id_, &final_headers[0], final_headers.size()); + int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), + final_headers.size()); ASSERT(rc == 0); } @@ -373,7 +373,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ == -1); - stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, &final_headers.data()[0], + stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), final_headers.size(), provider, base()); ASSERT(stream_id_ > 0); } @@ -381,7 +381,7 @@ void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ != -1); - int rc = nghttp2_submit_response(parent_.session_, stream_id_, &final_headers.data()[0], + int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), final_headers.size(), provider); ASSERT(rc == 0); } @@ -996,7 +996,7 @@ void ConnectionImpl::sendSettings( {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); if (!settings.empty()) { - int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, &settings[0], settings.size()); + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); ASSERT(rc == 0); } else { // nghttp2_submit_settings need to be called at least once diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 891e6f995ef7..2d138e1961be 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -506,8 +506,7 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, const Buffer::I const Address::Ip* local_ip, const Address::Instance& peer_address) { Buffer::RawSliceVector slices = buffer.getRawSlices(); - return writeToSocket(handle, !slices.empty() ? &slices[0] : nullptr, slices.size(), local_ip, - peer_address); + return writeToSocket(handle, slices.data(), slices.size(), local_ip, peer_address); } Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlice* slices, diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 87be8234663b..17a1f1fbdab7 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -426,7 +426,7 @@ TcpHealthCheckMatcher::MatchSegments TcpHealthCheckMatcher::loadProtoBytes( bool TcpHealthCheckMatcher::match(const MatchSegments& expected, const Buffer::Instance& buffer) { uint64_t start_index = 0; for (const std::vector& segment : expected) { - ssize_t search_result = buffer.search(&segment[0], segment.size(), start_index); + ssize_t search_result = buffer.search(segment.data(), segment.size(), start_index); if (search_result == -1) { return false; } @@ -528,7 +528,7 @@ void TcpHealthCheckerImpl::TcpActiveHealthCheckSession::onInterval() { if (!parent_.send_bytes_.empty()) { Buffer::OwnedImpl data; for (const std::vector& segment : parent_.send_bytes_) { - data.add(&segment[0], segment.size()); + data.add(segment.data(), segment.size()); } client_->write(data, false); diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 2aaec7f14b88..11853e04dbec 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -451,7 +451,7 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c int ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen, const unsigned char* in, unsigned int inlen) { // Currently this uses the standard selection algorithm in priority order. - const uint8_t* alpn_data = &parsed_alpn_protocols_[0]; + const uint8_t* alpn_data = parsed_alpn_protocols_.data(); size_t alpn_data_size = parsed_alpn_protocols_.size(); if (SSL_select_next_proto(const_cast(out), outlen, alpn_data, alpn_data_size, in, @@ -821,7 +821,7 @@ ClientContextImpl::ClientContextImpl(Stats::Scope& scope, ASSERT(tls_contexts_.size() == 1); if (!parsed_alpn_protocols_.empty()) { for (auto& ctx : tls_contexts_) { - const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), &parsed_alpn_protocols_[0], + const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), parsed_alpn_protocols_.data(), parsed_alpn_protocols_.size()); RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); } diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 new file mode 100644 index 000000000000..19f5125b23de --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 @@ -0,0 +1,49 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "5" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "r" + } + headers { + key: ":authority" + value: "5" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + } + } + } +} +actions { + stream_action { + request { + data: 1 + } + } +} +actions { + stream_action { + request { + trailers { + } + } + } +} diff --git a/test/common/http/http2/http2_frame.cc b/test/common/http/http2/http2_frame.cc index a0b5f0a91268..a044e644cb45 100644 --- a/test/common/http/http2/http2_frame.cc +++ b/test/common/http/http2/http2_frame.cc @@ -33,7 +33,7 @@ const char Http2Frame::Preamble[25] = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; void Http2Frame::setHeader(absl::string_view header) { ASSERT(header.size() >= HeaderSize); data_.assign(HeaderSize, 0); - memcpy(&data_[0], header.data(), HeaderSize); + memcpy(data_.data(), header.data(), HeaderSize); data_.resize(HeaderSize + payloadSize()); } From 6c4aae5c293f26a09d260345e31253a79e8ee5e4 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 13 May 2020 15:36:28 -0700 Subject: [PATCH 161/909] ci: enable arm64 binary only build in postsubmit (#11157) Experimentally enable binary only arm64 build in postsubmit. #1861 Additional Description: Risk Level: Low Testing: CI Docs Changes: N/A Release Notes: N/A (not releasing anything really) Signed-off-by: Lizan Zhou --- .azure-pipelines/bazel.yml | 27 +++++++++++++++++++++++---- .azure-pipelines/pipelines.yml | 13 +++++++++++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index 40ebbba15e2d..0d0ed6302664 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -3,6 +3,18 @@ parameters: displayName: "CI target" type: string default: bazel.release + - name: rbe + displayName: "Enable RBE" + type: string + default: "true" + - name: bazelBuildExtraOptions + type: string + # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks + # to save disk space. + default: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" + - name: managedAgent + type: boolean + default: true steps: - task: Cache@2 @@ -13,6 +25,7 @@ steps: - bash: .azure-pipelines/cleanup.sh displayName: "Removing tools from agent" + condition: ${{ parameters.managedAgent }} - bash: | echo "disk space at beginning of build:" @@ -27,15 +40,14 @@ steps: }' | sudo tee /etc/docker/daemon.json sudo service docker restart displayName: "Enable IPv6" + condition: ${{ parameters.managedAgent }} - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}' workingDirectory: $(Build.SourcesDirectory) env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "true" - # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks - # to save disk space. - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" + ENVOY_RBE: "${{ parameters.rbe }}" + BAZEL_BUILD_EXTRA_OPTIONS: "${{ parameters.bazelBuildExtraOptions }}" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) @@ -59,3 +71,10 @@ steps: pathtoPublish: "$(Build.StagingDirectory)/envoy" artifactName: ${{ parameters.ciTarget }} condition: always() + + # TODO(lizan): This is a workaround for self hosted azure agent can't clean up bazel local cache due to + # permission. Remove this once it is resolved. + - bash: | + chmod -R u+w $(Build.StagingDirectory) + displayName: "Self hosted agent clean up" + condition: eq(false, ${{ parameters.managedAgent }}) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 5e1f442abb09..5ae719be6732 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -50,6 +50,19 @@ jobs: parameters: ciTarget: bazel.release + - job: release_arm64 + displayName: "Linux-arm64 release.server_only" + dependsOn: ["format"] + condition: ne(variables['Build.Reason'], 'PullRequest') + pool: "arm-large" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.release.server_only + rbe: "" + bazelBuildExtraOptions: "--curses=no" + - job: bazel displayName: "Linux-x64" dependsOn: ["release"] From 8771d31f8595531bcd040bf6f42f7a8316c5fc9d Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Wed, 13 May 2020 19:01:30 -0400 Subject: [PATCH 162/909] stat: Speed up StatNameTest.TestDynamic100k by covering only sizes near powers of 2 (#11180) It's interesting to cover encoding-tests for strings of size of powers of 2, and a few size increments above & below. But the previous test was testing every single size possibility and most of them are boring. This resulted in excessive tsan test times. Signed-off-by: Joshua Marantz --- test/common/stats/symbol_table_impl_test.cc | 40 ++++++++++++++------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index b7def5f19039..5913b47b4be6 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -142,21 +142,35 @@ TEST_P(StatNameTest, TestEmpty) { } TEST_P(StatNameTest, TestDynamic100k) { - // Tests 100k different sizes of dynamic stat, covering all kinds of - // corner cases of spilling over into multi-byte lengths. + // Tests a variety different sizes of dynamic stat ranging to 500k, covering + // potential corner cases of spilling over into multi-byte lengths. + std::string stat_str("dyn.x"); + char ch = '\001'; + StatName ab = makeStat("a.b"); + StatName cd = makeStat("c.d"); + auto test_at_size = [this, &stat_str, &ch, ab, cd](uint32_t size) { + if (size > stat_str.size()) { + // Add rotating characters to stat_str until we hit size. + for (uint32_t i = stat_str.size(); i < size; ++i, ++ch) { + stat_str += (ch == '.') ? 'x' : ch; + } + StatNameDynamicStorage storage(stat_str, *table_); + StatName dynamic = storage.statName(); + EXPECT_EQ(stat_str, table_->toString(dynamic)); + SymbolTable::StoragePtr joined = table_->join({ab, dynamic, cd}); + EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_->toString(StatName(joined.get()))); + } + }; - std::string stat_str("dynamic_stat.x"); - for (int i = 0; i < 100 * 1000; ++i) { - char ch = i % 256; - if (ch == '.') { - ch = 'x'; + // The outer-loop hits powers of 2 from 8 to 512k. + for (uint32_t i = 3; i < 20; ++i) { + int32_t pow_2 = 1 << i; + + // The inner-loop covers every offset from the power of 2, between offsets of + // -10 and +10. + for (int32_t j = std::max(0, pow_2 - 10); j < pow_2 + 10; ++j) { + test_at_size(j); } - stat_str += ch; - StatNameDynamicStorage storage(stat_str, *table_); - StatName dynamic = storage.statName(); - EXPECT_EQ(stat_str, table_->toString(dynamic)); - SymbolTable::StoragePtr joined = table_->join({makeStat("a.b"), dynamic, makeStat("c.d")}); - EXPECT_EQ(absl::StrCat("a.b.", stat_str, ".c.d"), table_->toString(StatName(joined.get()))); } } From ed6a1b8b60a23e6bae7f08f990d1680b26b26cde Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Wed, 13 May 2020 21:27:09 -0400 Subject: [PATCH 163/909] Upgrading libevent to latest commit (#11137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upgrading libevent to solve phantom event bug - an event that was triggered after closing connections. Risk Level: Low-medium - basic functionality hasn’t changed, but there might be a few corner cases and performance that will be affected. Testing: None added Signed-off-by: Adi Suissa-Peleg --- bazel/envoy_binary.bzl | 1 + bazel/envoy_test.bzl | 1 + bazel/foreign_cc/libevent_msvc.patch | 10 ---------- bazel/repositories.bzl | 2 -- bazel/repository_locations.bzl | 12 +++++++----- 5 files changed, 9 insertions(+), 17 deletions(-) delete mode 100644 bazel/foreign_cc/libevent_msvc.patch diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index ac15656af1d8..e53e42d8e284 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -61,6 +61,7 @@ def _envoy_linkopts(): "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", + "-DEFAULTLIB:iphlpapi.lib", "-WX", ], "//conditions:default": [ diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 222cb99e60e5..dff388c5d07e 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -58,6 +58,7 @@ def _envoy_test_linkopts(): "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", + "-DEFAULTLIB:iphlpapi.lib", "-WX", ], diff --git a/bazel/foreign_cc/libevent_msvc.patch b/bazel/foreign_cc/libevent_msvc.patch deleted file mode 100644 index ebbd053c7651..000000000000 --- a/bazel/foreign_cc/libevent_msvc.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- CMakeLists.txt 2019-10-02 20:20:58.449181400 -0400 -+++ CMakeLists.txt 2019-10-02 20:21:19.390279100 -0400 -@@ -236,7 +236,6 @@ - - if (EVENT__MSVC_STATIC_RUNTIME) - foreach (flag_var -- CMAKE_C_FLAGS - CMAKE_C_FLAGS_DEBUG - CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_MINSIZEREL diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 45b596629395..b67009e22424 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -344,8 +344,6 @@ def _com_github_libevent_libevent(): http_archive( name = "com_github_libevent_libevent", build_file_content = BUILD_ALL_CONTENT, - patch_args = ["-p0"], - patches = ["@envoy//bazel/foreign_cc:libevent_msvc.patch"], **location ) native.bind( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 812290a35b35..d6c384964477 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -237,16 +237,18 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["test"], ), com_github_libevent_libevent = dict( - sha256 = "549d34065eb2485dfad6c8de638caaa6616ed130eec36dd978f73b6bdd5af113", + sha256 = "c64156c24602ab7a5c66937d774cc55868911d5bbbf1650792f5877744b1c2d9", # This SHA includes the new "prepare" and "check" watchers, used for event loop performance # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition # in the watchers (see https://github.com/libevent/libevent/pull/802). # This also includes the fixes for https://github.com/libevent/libevent/issues/806 # and https://github.com/lyft/envoy-mobile/issues/215. - # TODO(mergeconflict): Update to v2.2 when it is released. - strip_prefix = "libevent-0d7d85c2083f7a4c9efe01c061486f332b576d28", - # 2019-07-02 - urls = ["https://github.com/libevent/libevent/archive/0d7d85c2083f7a4c9efe01c061486f332b576d28.tar.gz"], + # This also include the fixes for Phantom events with EV_ET (see + # https://github.com/libevent/libevent/issues/984). + # TODO(adip): Update to v2.2 when it is released. + strip_prefix = "libevent-06a11929511bebaaf40c52aaf91de397b1782ba2", + # 2020-05-08 + urls = ["https://github.com/libevent/libevent/archive/06a11929511bebaaf40c52aaf91de397b1782ba2.tar.gz"], use_category = ["dataplane"], cpe = "cpe:2.3:a:libevent_project:libevent:*", ), From 3550a7a7b0b858f4c2ac505efa6c71c2610f2666 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 14 May 2020 08:24:37 -0400 Subject: [PATCH 164/909] [http] Remove exceptions from HTTP/1 codec callbacks (#11101) Commit Message: Remove exceptions from HTTP/1 codec callbacks. Replaces with http_parser exit codes that indicate failure. codec_status_ propagates the error. Additional Description: I know the diff is slightly messy but the principles I abided by were: Replace throw with setting codec_status_, immediately return and propagate return up to callback with an error exit code, always ASSERT(dispatching_) in the body of the method that throws, always ASSERT(codec_status_.ok()) before setting the codec status. The remaining exception is in encodeHeaders, which I will need to replace with ENVOY_BUG I audited for throws in the includes for this file and did not find anything used in the codec_impl, but I will need to do another pass. This is just part 1 of my HTTP/1 PRs. Part 2 is exception to error handling for encodeHeaders and any other utility functions. This is just a PR to stage. Testing: Tests pass, codec_impl_fuzz_test has been running for a few minutes. Risk level: Medium, this should do nothing but is a codec behavior change. Issues: #10878 Signed-off-by: Asra Ali asraa@google.com --- source/common/http/http1/BUILD | 1 + source/common/http/http1/codec_impl.cc | 197 +++++++++++++++++-------- source/common/http/http1/codec_impl.h | 66 ++++++--- 3 files changed, 187 insertions(+), 77 deletions(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index d9b00a317157..e57b43f0cd77 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:cleanup_lib", "//source/common/common:statusor_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 658596cc6057..523506201bbe 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -9,6 +9,7 @@ #include "envoy/http/header_map.h" #include "envoy/network/connection.h" +#include "common/common/cleanup.h" #include "common/common/enum_to_int.h" #include "common/common/utility.h" #include "common/http/exception.h" @@ -266,9 +267,10 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe outbound_responses_++; } -void ServerConnectionImpl::doFloodProtectionChecks() const { +ConnectionImpl::HttpParserCode ServerConnectionImpl::doFloodProtectionChecks() { + ASSERT(dispatching_); if (!flood_protection_) { - return; + return HttpParserCode::Success; } // Before processing another request, make sure that we are below the response flood protection // threshold. @@ -276,8 +278,11 @@ void ServerConnectionImpl::doFloodProtectionChecks() const { ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", connection_); stats_.response_flood_.inc(); - throw FrameFloodException("Too many responses queued."); + ASSERT(codec_status_.ok()); + codec_status_ = Http::bufferFloodError("Too many responses queued."); + return HttpParserCode::Error; } + return HttpParserCode::Success; } void ConnectionImpl::flushOutput(bool end_encode) { @@ -401,8 +406,7 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end http_parser_settings ConnectionImpl::settings_{ [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageBeginBase(); - return 0; + return enumToInt(static_cast(parser->data)->onMessageBeginBase()); }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onUrl(at, length); @@ -410,23 +414,20 @@ http_parser_settings ConnectionImpl::settings_{ }, nullptr, // on_status [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderField(at, length); - return 0; + return enumToInt(static_cast(parser->data)->onHeaderField(at, length)); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderValue(at, length); - return 0; + return enumToInt(static_cast(parser->data)->onHeaderValue(at, length)); }, [](http_parser* parser) -> int { - return static_cast(parser->data)->onHeadersCompleteBase(); + return enumToInt(static_cast(parser->data)->onHeadersCompleteBase()); }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->bufferBody(at, length); return 0; }, [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageCompleteBase(); - return 0; + return enumToInt(static_cast(parser->data)->onMessageCompleteBase()); }, [](http_parser* parser) -> int { // A 0-byte chunk header is used to signal the end of the chunked body. @@ -454,19 +455,23 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st enable_trailers_(enable_trailers), reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.reject_unsupported_transfer_encodings")), - output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }), + dispatching_(false), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, + [&]() -> void { this->onAboveHighWatermark(); }), max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); parser_.data = this; } -void ConnectionImpl::completeLastHeader() { +ConnectionImpl::HttpParserCode ConnectionImpl::completeLastHeader() { + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); - checkHeaderNameForUnderscores(); + if (!checkHeaderNameForUnderscores()) { + // This indicates that the request should be rejected due to header name with underscores. + return HttpParserCode::Error; + } auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); @@ -484,12 +489,15 @@ void ConnectionImpl::completeLastHeader() { sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); const absl::string_view header_type = processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); + return HttpParserCode::Error; } header_parsing_state_ = HeaderParsingState::Field; ASSERT(current_header_field_.empty()); ASSERT(current_header_value_.empty()); + return HttpParserCode::Success; } bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { @@ -514,8 +522,13 @@ Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch throws an exception. + Cleanup cleanup([this]() { dispatching_ = false; }); + ASSERT(!dispatching_); ASSERT(buffered_body_.length() == 0); + dispatching_ = true; if (maybeDirectDispatch(data)) { return Http::okStatus(); } @@ -526,7 +539,11 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { for (const Buffer::RawSlice& slice : data.getRawSlices()) { - total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + auto statusor_parsed = dispatchSlice(static_cast(slice.mem_), slice.len_); + if (!statusor_parsed.ok()) { + return statusor_parsed.status(); + } + total_parsed += statusor_parsed.value(); if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at // this point. @@ -536,7 +553,10 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { } dispatchBufferedBody(); } else { - dispatchSlice(nullptr, 0); + auto result = dispatchSlice(nullptr, 0); + if (!result.ok()) { + return result.status(); + } } ASSERT(buffered_body_.length() == 0); @@ -549,39 +569,54 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { return Http::okStatus(); } -size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { +Envoy::StatusOr ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ASSERT(codec_status_.ok() && dispatching_); ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (!codec_status_.ok()) { + return codec_status_; + } if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); - throw CodecProtocolException("http/1.1 protocol error: " + - std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + // Avoid overwriting the codec_status_ set in the callbacks. + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError( + absl::StrCat("http/1.1 protocol error: ", http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + return codec_status_; } return rc; } -void ConnectionImpl::onHeaderField(const char* data, size_t length) { +ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderField(const char* data, size_t length) { + ASSERT(dispatching_); // We previously already finished up the headers, these headers are // now trailers. if (header_parsing_state_ == HeaderParsingState::Done) { if (!enable_trailers_) { // Ignore trailers. - return; + return HttpParserCode::Success; } processing_trailers_ = true; header_parsing_state_ = HeaderParsingState::Field; } if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + HttpParserCode exit_code = completeLastHeader(); + if (exit_code == HttpParserCode::Error) { + // If an error exit code is returned, there must be an error in the codec status. + ASSERT(!codec_status_.ok()); + return HttpParserCode::Error; + } } current_header_field_.append(data, length); + return HttpParserCode::Success; } -void ConnectionImpl::onHeaderValue(const char* data, size_t length) { +ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderValue(const char* data, size_t length) { + ASSERT(dispatching_); if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { // Ignore trailers. - return; + return HttpParserCode::Success; } if (processing_trailers_) { @@ -595,7 +630,10 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); + ASSERT(codec_status_.ok()); + codec_status_ = + codecProtocolError("http/1.1 protocol error: header value contains invalid chars"); + return HttpParserCode::Error; } } @@ -616,14 +654,23 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; error_code_ = Http::Code::RequestHeaderFieldsTooLarge; sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); + return HttpParserCode::Error; } + return HttpParserCode::Success; } -int ConnectionImpl::onHeadersCompleteBase() { +ConnectionImpl::HttpParserCode ConnectionImpl::onHeadersCompleteBase() { ASSERT(!processing_trailers_); + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); - completeLastHeader(); + HttpParserCode exit_code = completeLastHeader(); + if (exit_code == HttpParserCode::Error) { + // If an error exit code is returned, there must be an error in the codec status. + ASSERT(!codec_status_.ok()); + return exit_code; + } if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { // This is not necessarily true, but it's good enough since higher layers only care if this is @@ -670,15 +717,20 @@ int ConnectionImpl::onHeadersCompleteBase() { !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { error_code_ = Http::Code::NotImplemented; sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); + return HttpParserCode::Error; } } - int rc = onHeadersComplete(); + HttpParserCode rc = onHeadersComplete(); + if (rc == HttpParserCode::Error) { + return rc; + } header_parsing_state_ = HeaderParsingState::Done; // Returning 2 informs http_parser to not expect a body or further data on this connection. - return handling_upgrade_ ? 2 : rc; + return handling_upgrade_ ? HttpParserCode::NoBodyData : rc; } void ConnectionImpl::bufferBody(const char* data, size_t length) { @@ -687,6 +739,7 @@ void ConnectionImpl::bufferBody(const char* data, size_t length) { void ConnectionImpl::dispatchBufferedBody() { ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + ASSERT(codec_status_.ok()); if (buffered_body_.length() > 0) { onBody(buffered_body_); buffered_body_.drain(buffered_body_.length()); @@ -701,7 +754,7 @@ void ConnectionImpl::onChunkHeader(bool is_final_chunk) { } } -void ConnectionImpl::onMessageCompleteBase() { +ConnectionImpl::HttpParserCode ConnectionImpl::onMessageCompleteBase() { ENVOY_CONN_LOG(trace, "message complete", connection_); dispatchBufferedBody(); @@ -712,19 +765,25 @@ void ConnectionImpl::onMessageCompleteBase() { ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); http_parser_pause(&parser_, 1); - return; + return HttpParserCode::Success; } // If true, this indicates we were processing trailers and must // move the last header into current_header_map_ if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + HttpParserCode exit_code = completeLastHeader(); + if (exit_code == HttpParserCode::Error) { + // If an error exit code is returned, there must be an error in the codec status. + ASSERT(!codec_status_.ok()); + return exit_code; + } } onMessageComplete(); + return HttpParserCode::Success; } -void ConnectionImpl::onMessageBeginBase() { +ConnectionImpl::HttpParserCode ConnectionImpl::onMessageBeginBase() { ENVOY_CONN_LOG(trace, "message begin", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable @@ -733,7 +792,7 @@ void ConnectionImpl::onMessageBeginBase() { processing_trailers_ = false; header_parsing_state_ = HeaderParsingState::Field; allocHeaders(); - onMessageBegin(); + return onMessageBegin(); } void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { @@ -773,7 +832,7 @@ void ServerConnectionImpl::onEncodeComplete() { } } -void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { +bool ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { HeaderString path(Headers::get().Path); bool is_connect = (method == HTTP_CONNECT); @@ -784,7 +843,7 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me (active_request.request_url_.getStringView()[0] == '/' || ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return true; } // If absolute_urls and/or connect are not going be handled, copy the url and return. @@ -793,13 +852,15 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. if (!codec_settings_.allow_absolute_url_ && !is_connect) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return true; } Utility::Url absolute_url; if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); - throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError("http/1.1 protocol error: invalid url in request line"); + return false; } // RFC7230#5.7 // When a proxy receives a request with an absolute-form of @@ -814,9 +875,10 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me headers.setPath(absolute_url.pathAndQueryParams()); } active_request.request_url_.clear(); + return true; } -int ServerConnectionImpl::onHeadersComplete() { +ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. @@ -834,7 +896,9 @@ int ServerConnectionImpl::onHeadersComplete() { header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); - throw CodecProtocolException("Invalid nominated headers in Connection."); + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError("Invalid nominated headers in Connection."); + return HttpParserCode::Error; } } @@ -843,7 +907,11 @@ int ServerConnectionImpl::onHeadersComplete() { active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); - handlePath(*headers, parser_.method); + if (!handlePath(*headers, parser_.method)) { + // Reached a failure. + ASSERT(!codec_status_.ok()); + return HttpParserCode::Error; + } ASSERT(active_request.request_url_.empty()); headers->setMethod(method_string); @@ -852,8 +920,10 @@ int ServerConnectionImpl::onHeadersComplete() { auto details = HeaderUtility::requestHeadersValid(*headers); if (details.has_value()) { sendProtocolError(details.value().get()); - throw CodecProtocolException( + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError( "http/1.1 protocol error: request headers failed spec compliance checks"); + return HttpParserCode::Error; } // Determine here whether we have a body or not. This uses the new RFC semantics where the @@ -876,20 +946,27 @@ int ServerConnectionImpl::onHeadersComplete() { } } - return 0; + return HttpParserCode::Success; } -void ServerConnectionImpl::onMessageBegin() { +ConnectionImpl::HttpParserCode ServerConnectionImpl::onMessageBegin() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); active_request_.emplace(*this, header_key_formatter_.get()); auto& active_request = active_request_.value(); + if (resetStreamCalled()) { + ASSERT(codec_status_.ok()); + codec_status_ = codecClientError("cannot create new streams after calling reset"); + return HttpParserCode::Error; + } active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); // Check for pipelined request flood as we prepare to accept a new request. // Parse errors that happen prior to onMessageBegin result in stream termination, it is not // possible to overflow output buffers with early parse errors. - doFloodProtectionChecks(); + return doFloodProtectionChecks(); + } else { + return HttpParserCode::Success; } } @@ -979,7 +1056,7 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } -void ServerConnectionImpl::checkHeaderNameForUnderscores() { +bool ServerConnectionImpl::checkHeaderNameForUnderscores() { if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { if (headers_with_underscores_action_ == @@ -995,9 +1072,13 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); stats_.requests_rejected_with_underscores_in_headers_.inc(); - throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + ASSERT(codec_status_.ok()); + codec_status_ = + codecProtocolError("http/1.1 protocol error: header name contains underscores"); + return false; } } + return true; } ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, @@ -1019,10 +1100,6 @@ bool ClientConnectionImpl::cannotHaveBody() { } RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { - if (resetStreamCalled()) { - throw CodecClientException("cannot create new streams after calling reset"); - } - // If reads were disabled due to flow control, we expect reads to always be enabled again before // reusing this connection. This is done when the response is received. ASSERT(connection_.readEnabled()); @@ -1034,12 +1111,14 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode return pending_response_.value().encoder_; } -int ClientConnectionImpl::onHeadersComplete() { +ConnectionImpl::HttpParserCode ClientConnectionImpl::onHeadersComplete() { // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. if (!pending_response_.has_value() && !resetStreamCalled()) { - throw PrematureResponseException(static_cast(parser_.status_code)); + ASSERT(codec_status_.ok()); + codec_status_ = prematureResponseError("", static_cast(parser_.status_code)); + return HttpParserCode::Error; } else if (pending_response_.has_value()) { ASSERT(!pending_response_done_); auto& headers = absl::get(headers_or_trailers_); @@ -1070,7 +1149,7 @@ int ClientConnectionImpl::onHeadersComplete() { // Here we deal with cases where the response cannot have a body, but http_parser does not deal // with it for us. - return cannotHaveBody() ? 1 : 0; + return cannotHaveBody() ? HttpParserCode::NoBody : HttpParserCode::Success; } bool ClientConnectionImpl::upgradeAllowed() const { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 8b53d4c374f5..52d5ed6b1ed6 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -228,6 +228,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable dispatchSlice(const char* slice, size_t len); /** * Called by the http_parser when body data is received. @@ -310,9 +334,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_request_; @@ -538,9 +568,9 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl void onEncodeComplete() override {} - void onMessageBegin() override {} + HttpParserCode onMessageBegin() override { return HttpParserCode::Success; } void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - int onHeadersComplete() override; + HttpParserCode onHeadersComplete() override; bool upgradeAllowed() const override; void onBody(Buffer::Instance& data) override; void onMessageComplete() override; From 1ce010929d4d283fce977bc65558da71ffe6bf7c Mon Sep 17 00:00:00 2001 From: Peng Gao Date: Thu, 14 May 2020 09:46:54 -0400 Subject: [PATCH 165/909] router: Create InternalRedirectPolicy in side RouteAction and extend it with pluggable predicates (#10908) Description: router: Create InternalRedirectPolicy to capture all internal redirect related options and extend it with pluggable predicates similar to retry plugins. The previous_routes and whitelisted_routes predicate allow creating a DAG of routes for internal redirects. Each node in the DAG is a route. whitelisted_routes defines the edges of each node. previous_routes serves as visited status keeper for each of the edge. This prevents infinite loop, while allowing loop to exist in the DAG. Risk Level: Medium Testing: Unit tests. Integration tests. Docs Changes: Updated HCM architecture overview page. Added toctree for the predicates. Release Notes: Updated version history. Signed-off-by: pengg --- CODEOWNERS | 1 + api/BUILD | 3 + .../config/route/v3/route_components.proto | 41 +++- .../route/v4alpha/route_components.proto | 60 ++++-- .../allow_listed_routes/v3/BUILD | 9 + .../v3/allow_listed_routes_config.proto | 24 +++ .../previous_routes/v3/BUILD | 9 + .../v3/previous_routes_config.proto | 19 ++ .../safe_cross_scheme/v3/BUILD | 9 + .../v3/safe_cross_scheme_config.proto | 24 +++ api/versioning/BUILD | 3 + docs/root/api-v3/config/config.rst | 1 + .../internal_redirect/internal_redirect.rst | 8 + .../http/http_connection_management.rst | 53 +++-- docs/root/version_history/current.rst | 9 + .../config/route/v3/route_components.proto | 41 +++- .../route/v4alpha/route_components.proto | 45 ++++- .../allow_listed_routes/v3/BUILD | 9 + .../v3/allow_listed_routes_config.proto | 24 +++ .../previous_routes/v3/BUILD | 9 + .../v3/previous_routes_config.proto | 19 ++ .../safe_cross_scheme/v3/BUILD | 9 + .../v3/safe_cross_scheme_config.proto | 24 +++ include/envoy/router/BUILD | 11 + include/envoy/router/internal_redirect.h | 66 ++++++ include/envoy/router/router.h | 56 ++++-- source/common/http/async_client_impl.cc | 1 + source/common/http/async_client_impl.h | 8 +- source/common/router/BUILD | 1 + source/common/router/config_impl.cc | 87 ++++++-- source/common/router/config_impl.h | 62 +++++- source/common/router/router.cc | 155 ++++++++------ source/common/router/router.h | 7 + source/extensions/extensions_build_config.bzl | 7 + source/extensions/internal_redirect/BUILD | 17 ++ .../allow_listed_routes/BUILD | 35 ++++ .../allow_listed_routes/allow_listed_routes.h | 37 ++++ .../allow_listed_routes/config.cc | 14 ++ .../allow_listed_routes/config.h | 40 ++++ .../internal_redirect/previous_routes/BUILD | 35 ++++ .../previous_routes/config.cc | 14 ++ .../previous_routes/config.h | 34 ++++ .../previous_routes/previous_routes.cc | 52 +++++ .../previous_routes/previous_routes.h | 32 +++ .../internal_redirect/safe_cross_scheme/BUILD | 34 ++++ .../safe_cross_scheme/config.cc | 14 ++ .../safe_cross_scheme/config.h | 32 +++ .../safe_cross_scheme/safe_cross_scheme.h | 28 +++ .../internal_redirect/well_known_names.h | 27 +++ test/common/http/async_client_impl_test.cc | 3 +- test/common/router/config_impl_test.cc | 127 +++++++++++- test/common/router/router_test.cc | 113 +++++++++-- .../internal_redirect/previous_routes/BUILD | 24 +++ .../previous_routes/config_test.cc | 83 ++++++++ test/integration/BUILD | 6 + test/integration/redirect_integration_test.cc | 189 +++++++++++++++++- test/integration/stats_integration_test.cc | 6 +- test/mocks/router/mocks.cc | 5 + test/mocks/router/mocks.h | 20 +- tools/spelling/spelling_dictionary.txt | 3 + 60 files changed, 1761 insertions(+), 177 deletions(-) create mode 100644 api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD create mode 100644 api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto create mode 100644 api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD create mode 100644 api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto create mode 100644 api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD create mode 100644 api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto create mode 100644 docs/root/api-v3/config/internal_redirect/internal_redirect.rst create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto create mode 100644 include/envoy/router/internal_redirect.h create mode 100644 source/extensions/internal_redirect/BUILD create mode 100644 source/extensions/internal_redirect/allow_listed_routes/BUILD create mode 100644 source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h create mode 100644 source/extensions/internal_redirect/allow_listed_routes/config.cc create mode 100644 source/extensions/internal_redirect/allow_listed_routes/config.h create mode 100644 source/extensions/internal_redirect/previous_routes/BUILD create mode 100644 source/extensions/internal_redirect/previous_routes/config.cc create mode 100644 source/extensions/internal_redirect/previous_routes/config.h create mode 100644 source/extensions/internal_redirect/previous_routes/previous_routes.cc create mode 100644 source/extensions/internal_redirect/previous_routes/previous_routes.h create mode 100644 source/extensions/internal_redirect/safe_cross_scheme/BUILD create mode 100644 source/extensions/internal_redirect/safe_cross_scheme/config.cc create mode 100644 source/extensions/internal_redirect/safe_cross_scheme/config.h create mode 100644 source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h create mode 100644 source/extensions/internal_redirect/well_known_names.h create mode 100644 test/extensions/internal_redirect/previous_routes/BUILD create mode 100644 test/extensions/internal_redirect/previous_routes/config_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index 9a7d0f06b94a..601f9c69755d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -92,6 +92,7 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/listener/tls_inspector @piotrsikora @htuch /*/extensions/grpc_credentials/example @wozz @htuch /*/extensions/grpc_credentials/file_based_metadata @wozz @htuch +/*/extensions/internal_redirect @alyssawilk @penguingao /*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz /*/extensions/stat_sinks/hystrix @trabetti @jmarantz /*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz diff --git a/api/BUILD b/api/BUILD index f5bd8c1a8d0b..c701bdcf4833 100644 --- a/api/BUILD +++ b/api/BUILD @@ -222,6 +222,9 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", + "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", + "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 782bacfb95ad..eb6aab7dcd7e 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -536,7 +537,7 @@ message CorsPolicy { core.v3.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -549,6 +550,7 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; @@ -986,7 +988,13 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and @@ -1002,7 +1010,7 @@ message RouteAction { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1593,3 +1601,30 @@ message QueryParameterMatcher { bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v3.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 8dfa58177bdb..815895db80e4 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/type/matcher/v4alpha/string.proto"; @@ -539,7 +540,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -552,6 +553,7 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; @@ -750,9 +752,9 @@ message RouteAction { ConnectConfig connect_config = 3; } - reserved 12, 18, 19, 16, 22, 21, 10; + reserved 12, 18, 19, 16, 22, 21, 10, 26, 31; - reserved "request_mirror_policy"; + reserved "request_mirror_policy", "internal_redirect_action", "max_internal_redirects"; oneof cluster_specifier { option (validate.required) = true; @@ -992,23 +994,11 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1603,3 +1593,33 @@ message QueryParameterMatcher { bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.InternalRedirectPolicy"; + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v4alpha.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto new file mode 100644 index 000000000000..a6da5b0f5d9b --- /dev/null +++ b/api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.allow_listed_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; +option java_outer_classname = "AllowListedRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Allow listed routes internal redirect predicate] + +// An internal redirect predicate that accepts only explicitly allowed target routes. +// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] +message AllowListedRoutesConfig { + // The list of routes that's allowed as redirect target by this predicate, + // identified by the route's :ref:`name `. + // Empty route names are not allowed. + repeated string allowed_route_names = 1 + [(validate.rules).repeated = {items {string {min_len: 1}}}]; +} diff --git a/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto new file mode 100644 index 000000000000..6cc5fba871ea --- /dev/null +++ b/api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.previous_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; +option java_outer_classname = "PreviousRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Previous routes internal redirect predicate] + +// An internal redirect predicate that rejects redirect targets that are pointing +// to a route that has been followed by a previous redirect from the current route. +// [#extension: envoy.internal_redirect_predicates.previous_routes] +message PreviousRoutesConfig { +} diff --git a/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto new file mode 100644 index 000000000000..54cec2f09bbb --- /dev/null +++ b/api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.safe_cross_scheme.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; +option java_outer_classname = "SafeCrossSchemeConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: SafeCrossScheme internal redirect predicate] + +// An internal redirect predicate that checks the scheme between the +// downstream url and the redirect target url and allows a) same scheme +// redirect and b) safe cross scheme redirect, which means if the downstream +// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the +// downstream scheme is HTTP, only HTTP redirect targets are allowed. +// [#extension: +// envoy.internal_redirect_predicates.safe_cross_scheme] +message SafeCrossSchemeConfig { +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index e23f851b221e..d7771fbbd29e 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -105,6 +105,9 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", + "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", + "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index 663ac872183d..ba7ca7e70f76 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -17,3 +17,4 @@ Extensions grpc_credential/grpc_credential retry/retry trace/trace + internal_redirect/internal_redirect diff --git a/docs/root/api-v3/config/internal_redirect/internal_redirect.rst b/docs/root/api-v3/config/internal_redirect/internal_redirect.rst new file mode 100644 index 000000000000..5452e8accee7 --- /dev/null +++ b/docs/root/api-v3/config/internal_redirect/internal_redirect.rst @@ -0,0 +1,8 @@ +Internal Redirect Predicates +============================ + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/internal_redirect/** diff --git a/docs/root/intro/arch_overview/http/http_connection_management.rst b/docs/root/intro/arch_overview/http/http_connection_management.rst index b69c587b17d4..74e8d90b99e8 100644 --- a/docs/root/intro/arch_overview/http/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http/http_connection_management.rst @@ -151,37 +151,60 @@ previously attempted priorities. Internal redirects -------------------------- -Envoy supports handling 302 redirects internally, that is capturing a 302 redirect response, -synthesizing a new request, sending it to the upstream specified by the new route match, and -returning the redirected response as the response to the original request. +Envoy supports handling 3xx redirects internally, that is capturing a configurable 3xx redirect +response, synthesizing a new request, sending it to the upstream specified by the new route match, +and returning the redirected response as the response to the original request. -Internal redirects are configured via the ref:`internal redirect action -` field and -`max internal redirects ` field in -route configuration. When redirect handling is on, any 302 response from upstream is -subject to the redirect being handled by Envoy. +Internal redirects are configured via the :ref:`internal redirect policy +` field in route configuration. +When redirect handling is on, any 3xx response from upstream, that matches +:ref:`redirect_response_codes +` +is subject to the redirect being handled by Envoy. For a redirect to be handled successfully it must pass the following checks: -1. Be a 302 response. -2. Have a *location* header with a valid, fully qualified URL matching the scheme of the original request. +1. Have a response code matching one of :ref:`redirect_response_codes + `, which is + either 302 (by default), or a set of 3xx codes (301, 302, 303, 307, 308). +2. Have a *location* header with a valid, fully qualified URL. 3. The request must have been fully processed by Envoy. 4. The request must not have a body. -5. The number of previously handled internal redirect within a given downstream request does not exceed - `max internal redirects ` of the route - that the request or redirected request is hitting. +5. :ref:`allow_cross_scheme_redirect + ` is true (default to false), + or the scheme of the downstream request and the *location* header are the same. +6. The number of previously handled internal redirect within a given downstream request does not + exceed :ref:`max internal redirects + ` + of the route that the request or redirected request is hitting. +7. All :ref:`predicates ` accept + the target route. Any failure will result in redirect being passed downstream instead. Since a redirected request may be bounced between different routes, any route in the chain of redirects that 1. does not have internal redirect enabled -2. or has a `max internal redirects - ` +2. or has a :ref:`max internal redirects + ` smaller or equal to the redirect chain length when the redirect chain hits it +3. or is disallowed by any of the :ref:`predicates + ` will cause the redirect to be passed downstream. +Two predicates can be used to create a DAG that defines the redirect chain, the :ref:`previous routes +` predicate, and +the :ref:`allow_listed_routes +`. +Specifically, the *allow listed routes* predicate defines edges of individual node in the DAG +and the *previous routes* predicate defines "visited" state of the edges, so that loop can be avoided +if so desired. + +A third predicate :ref:`safe_cross_scheme +` +can be used to prevent HTTP -> HTTPS redirect. + Once the redirect has passed these checks, the request headers which were shipped to the original upstream will be modified by: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ab8820c35736..3d9249db4556 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -47,6 +47,8 @@ Changes tracing is not forced. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* router: more fine grained internal redirect configs are added to the :ref`internal_redirect_policy + ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager @@ -64,3 +66,10 @@ Deprecated * The * :ref:`GoogleRE2.max_program_size` field is now deprecated. Management servers are expected to validate regexp program sizes instead of expecting the client to do it. +* The :ref:`internal_redirect_action ` + field and :ref:`max_internal_redirects ` field + are now deprecated. This changes the implemented default cross scheme redirect behavior. + All cross scheme redirect are disallowed by default. To restore + the previous behavior, set allow_cross_scheme_redirect=true and use + :ref:`safe_cross_scheme`, + in :ref:`predicates `. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index e99f136343e0..cfd3f44291ee 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -548,7 +549,7 @@ message CorsPolicy { [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; @@ -561,6 +562,7 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; @@ -995,7 +997,13 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and @@ -1011,7 +1019,7 @@ message RouteAction { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1611,3 +1619,30 @@ message QueryParameterMatcher { google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v3.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 8dfa58177bdb..dcfd095b1fe5 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/type/matcher/v4alpha/string.proto"; @@ -539,7 +540,7 @@ message CorsPolicy { core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } -// [#next-free-field: 34] +// [#next-free-field: 35] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; @@ -552,6 +553,7 @@ message RouteAction { } // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; @@ -992,7 +994,13 @@ message RouteAction { repeated UpgradeConfig upgrade_configs = 25; - InternalRedirectAction internal_redirect_action = 26; + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction hidden_envoy_deprecated_internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and @@ -1008,7 +1016,8 @@ message RouteAction { // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; + google.protobuf.UInt32Value hidden_envoy_deprecated_max_internal_redirects = 31 + [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely @@ -1603,3 +1612,33 @@ message QueryParameterMatcher { bool present_match = 6; } } + +// HTTP Internal Redirect :ref:`architecture overview `. +message InternalRedirectPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.InternalRedirectPolicy"; + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + repeated core.v4alpha.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + bool allow_cross_scheme_redirect = 4; +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto new file mode 100644 index 000000000000..a6da5b0f5d9b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.allow_listed_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; +option java_outer_classname = "AllowListedRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Allow listed routes internal redirect predicate] + +// An internal redirect predicate that accepts only explicitly allowed target routes. +// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] +message AllowListedRoutesConfig { + // The list of routes that's allowed as redirect target by this predicate, + // identified by the route's :ref:`name `. + // Empty route names are not allowed. + repeated string allowed_route_names = 1 + [(validate.rules).repeated = {items {string {min_len: 1}}}]; +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto new file mode 100644 index 000000000000..6cc5fba871ea --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.previous_routes.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; +option java_outer_classname = "PreviousRoutesConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Previous routes internal redirect predicate] + +// An internal redirect predicate that rejects redirect targets that are pointing +// to a route that has been followed by a previous redirect from the current route. +// [#extension: envoy.internal_redirect_predicates.previous_routes] +message PreviousRoutesConfig { +} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto new file mode 100644 index 000000000000..54cec2f09bbb --- /dev/null +++ b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.extensions.internal_redirect.safe_cross_scheme.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; +option java_outer_classname = "SafeCrossSchemeConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: SafeCrossScheme internal redirect predicate] + +// An internal redirect predicate that checks the scheme between the +// downstream url and the redirect target url and allows a) same scheme +// redirect and b) safe cross scheme redirect, which means if the downstream +// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the +// downstream scheme is HTTP, only HTTP redirect targets are allowed. +// [#extension: +// envoy.internal_redirect_predicates.safe_cross_scheme] +message SafeCrossSchemeConfig { +} diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index b829997d24aa..44aee699e338 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -53,6 +53,7 @@ envoy_cc_library( hdrs = ["router.h"], external_deps = ["abseil_optional"], deps = [ + ":internal_redirect_interface", "//include/envoy/access_log:access_log_interface", "//include/envoy/common:matchers_interface", "//include/envoy/config:typed_metadata_interface", @@ -108,3 +109,13 @@ envoy_cc_library( "//include/envoy/stream_info:filter_state_interface", ], ) + +envoy_cc_library( + name = "internal_redirect_interface", + hdrs = ["internal_redirect.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/include/envoy/router/internal_redirect.h b/include/envoy/router/internal_redirect.h new file mode 100644 index 000000000000..95f624255ace --- /dev/null +++ b/include/envoy/router/internal_redirect.h @@ -0,0 +1,66 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/stream_info/filter_state.h" + +#include "common/common/logger.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Router { + +/** + * Used to decide if an internal redirect is allowed to be followed based on the target route. + * Subclassing Logger::Loggable so that implementations can log details. + */ +class InternalRedirectPredicate : Logger::Loggable { +public: + virtual ~InternalRedirectPredicate() = default; + + /** + * A FilterState is provided so that predicate implementation can use it to preserve state across + * internal redirects. + * @param filter_state supplies the filter state associated with the current request so that the + * predicates can use it to persist states across filter chains. + * @param target_route_name indicates the route that an internal redirect is targeting. + * @param downstream_is_https indicates the downstream request is using https. + * @param target_is_https indicates the internal redirect target url has https in the url. + * @return whether the route specified by target_route_name is allowed to be followed. Any + * predicate returning false will prevent the redirect from being followed, causing the + * response to be proxied downstream. + */ + virtual bool acceptTargetRoute(StreamInfo::FilterState& filter_state, + absl::string_view target_route_name, bool downstream_is_https, + bool target_is_https) PURE; + + /** + * @return the name of the current predicate. + */ + virtual absl::string_view name() const PURE; +}; + +using InternalRedirectPredicateSharedPtr = std::shared_ptr; + +/** + * Factory for InternalRedirectPredicate. + */ +class InternalRedirectPredicateFactory : public Config::TypedFactory { +public: + ~InternalRedirectPredicateFactory() override = default; + + /** + * @param config contains the proto stored in TypedExtensionConfig.typed_config for the predicate. + * @param current_route_name stores the route name of the route where the predicate is installed. + * @return an InternalRedirectPredicate. The given current_route_name is useful for predicates + * that need to create per-route FilterState. + */ + virtual InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message& config, + absl::string_view current_route_name) PURE; + + std::string category() const override { return "envoy.internal_redirect_predicates"; } +}; + +} // namespace Router +} // namespace Envoy diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 185ba349d185..39be039e7cfd 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -17,6 +17,7 @@ #include "envoy/http/codes.h" #include "envoy/http/hash_policy.h" #include "envoy/http/header_map.h" +#include "envoy/router/internal_redirect.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/resource_manager.h" @@ -237,9 +238,42 @@ class RetryPolicy { enum class RetryStatus { No, NoOverflow, NoRetryLimitExceeded, Yes }; /** - * InternalRedirectAction from the route configuration. + * InternalRedirectPolicy from the route configuration. */ -enum class InternalRedirectAction { PassThrough, Handle }; +class InternalRedirectPolicy { +public: + virtual ~InternalRedirectPolicy() = default; + + /** + * @return whether internal redirect is enabled on this route. + */ + virtual bool enabled() const PURE; + + /** + * @param response_code the response code from the upstream. + * @return whether the given response_code should trigger an internal redirect on this route. + */ + virtual bool shouldRedirectForResponseCode(const Http::Code& response_code) const PURE; + + /** + * Creates the target route predicates. This should really be called only once for each upstream + * redirect response. Creating the predicates lazily to avoid wasting CPU cycles on non-redirect + * responses, which should be the most common case. + * @return a vector of newly constructed InternalRedirectPredicate instances. + */ + virtual std::vector predicates() const PURE; + + /** + * @return the maximum number of allowed internal redirects on this route. + */ + virtual uint32_t maxInternalRedirects() const PURE; + + /** + * @return if it is allowed to follow the redirect with a different scheme in + * the target URI than the downstream request. + */ + virtual bool isCrossSchemeRedirectAllowed() const PURE; +}; /** * Wraps retry state for an active routed request. @@ -686,6 +720,13 @@ class RouteEntry : public ResponseEntry { */ virtual const RetryPolicy& retryPolicy() const PURE; + /** + * @return const InternalRedirectPolicy& the internal redirect policy for the route. All routes + * have a internal redirect policy even if it is not enabled, which means redirects are + * simply proxied as normal responses. + */ + virtual const InternalRedirectPolicy& internalRedirectPolicy() const PURE; + /** * @return uint32_t any route cap on bytes which should be buffered for shadowing or retries. * This is an upper bound so does not necessarily reflect the bytes which will be buffered @@ -834,17 +875,6 @@ class RouteEntry : public ResponseEntry { */ virtual const absl::optional& connectConfig() const PURE; - /** - * @returns the internal redirect action which should be taken on this route. - */ - virtual InternalRedirectAction internalRedirectAction() const PURE; - - /** - * @returns the threshold of number of previously handled internal redirects, for this route to - * stop handle internal redirects. - */ - virtual uint32_t maxInternalRedirects() const PURE; - /** * @return std::string& the name of the route. */ diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 3ef9de94a806..088d93fe43bf 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -20,6 +20,7 @@ const std::vector> const AsyncStreamImpl::NullHedgePolicy AsyncStreamImpl::RouteEntryImpl::hedge_policy_; const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::RouteEntryImpl::rate_limit_policy_; const AsyncStreamImpl::NullRetryPolicy AsyncStreamImpl::RouteEntryImpl::retry_policy_; +const Router::InternalRedirectPolicyImpl AsyncStreamImpl::RouteEntryImpl::internal_redirect_policy_; const std::vector AsyncStreamImpl::RouteEntryImpl::shadow_policies_; const AsyncStreamImpl::NullVirtualHost AsyncStreamImpl::RouteEntryImpl::virtual_host_; const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::NullVirtualHost::rate_limit_policy_; diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index beaea6c3ef60..241c7135348c 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -231,6 +231,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, } const Router::RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const Router::RetryPolicy& retryPolicy() const override { return retry_policy_; } + const Router::InternalRedirectPolicy& internalRedirectPolicy() const override { + return internal_redirect_policy_; + } uint32_t retryShadowBufferLimit() const override { return std::numeric_limits::max(); } @@ -279,15 +282,12 @@ class AsyncStreamImpl : public AsyncClient::Stream, bool includeAttemptCountInRequest() const override { return false; } bool includeAttemptCountInResponse() const override { return false; } const Router::RouteEntry::UpgradeMap& upgradeMap() const override { return upgrade_map_; } - Router::InternalRedirectAction internalRedirectAction() const override { - return Router::InternalRedirectAction::PassThrough; - } - uint32_t maxInternalRedirects() const override { return 1; } const std::string& routeName() const override { return route_name_; } std::unique_ptr hash_policy_; static const NullHedgePolicy hedge_policy_; static const NullRateLimitPolicy rate_limit_policy_; static const NullRetryPolicy retry_policy_; + static const Router::InternalRedirectPolicyImpl internal_redirect_policy_; static const std::vector shadow_policies_; static const NullVirtualHost virtual_host_; static const std::multimap opaque_config_; diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 3fb3a37fb915..b194d6e239a6 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -289,6 +289,7 @@ envoy_cc_library( "//source/common/access_log:access_log_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:cleanup_lib", "//source/common/common:empty_string", "//source/common/common:enum_to_int", "//source/common/common:hash_lib", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index a6c3fd0b34c4..fb8ec0d8449d 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -45,18 +45,6 @@ namespace Envoy { namespace Router { namespace { -InternalRedirectAction -convertInternalRedirectAction(const envoy::config::route::v3::RouteAction& route) { - switch (route.internal_redirect_action()) { - case envoy::config::route::v3::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT: - return InternalRedirectAction::PassThrough; - case envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT: - return InternalRedirectAction::Handle; - default: - return InternalRedirectAction::PassThrough; - } -} - const std::string DEPRECATED_ROUTER_NAME = "envoy.router"; const absl::string_view getPath(const Http::RequestHeaderMap& headers) { @@ -156,6 +144,52 @@ Upstream::RetryPrioritySharedPtr RetryPolicyImpl::retryPriority() const { *validation_visitor_, num_retries_); } +InternalRedirectPolicyImpl::InternalRedirectPolicyImpl( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) + : current_route_name_(current_route_name), + redirect_response_codes_(buildRedirectResponseCodes(policy_config)), + max_internal_redirects_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)), + enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()) { + for (const auto& predicate : policy_config.predicates()) { + const std::string type{ + TypeUtil::typeUrlToDescriptorFullName(predicate.typed_config().type_url())}; + auto* factory = + Registry::FactoryRegistry::getFactoryByType(type); + + auto config = factory->createEmptyConfigProto(); + Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), {}, validator, *config); + predicate_factories_.emplace_back(factory, std::move(config)); + } +} + +std::vector InternalRedirectPolicyImpl::predicates() const { + std::vector predicates; + for (const auto& predicate_factory : predicate_factories_) { + predicates.emplace_back(predicate_factory.first->createInternalRedirectPredicate( + *predicate_factory.second, current_route_name_)); + } + return predicates; +} + +absl::flat_hash_set InternalRedirectPolicyImpl::buildRedirectResponseCodes( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const { + if (policy_config.redirect_response_codes_size() == 0) { + return absl::flat_hash_set{Http::Code::Found}; + } + absl::flat_hash_set ret; + std::for_each(policy_config.redirect_response_codes().begin(), + policy_config.redirect_response_codes().end(), [&ret](uint32_t response_code) { + const absl::flat_hash_set valid_redirect_response_code = {301, 302, 303, + 307, 308}; + if (valid_redirect_response_code.contains(response_code)) { + ret.insert(static_cast(response_code)); + } + }); + return ret; +} + CorsPolicyImpl::CorsPolicyImpl(const envoy::config::route::v3::CorsPolicy& config, Runtime::Loader& loader) : config_(config), loader_(loader), allow_methods_(config.allow_methods()), @@ -278,6 +312,8 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, strip_query_(route.redirect().strip_query()), hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())), retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route(), validator)), + internal_redirect_policy_( + buildInternalRedirectPolicy(route.route(), validator, route.name())), rate_limit_policy_(route.route().rate_limits()), priority_(ConfigUtility::parsePriority(route.route().priority())), config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())), @@ -297,10 +333,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, per_filter_configs_(route.typed_per_filter_config(), route.hidden_envoy_deprecated_per_filter_config(), factory_context, validator), - route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()), - internal_redirect_action_(convertInternalRedirectAction(route.route())), - max_internal_redirects_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(route.route(), max_internal_redirects, 1)) { + route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -709,6 +742,28 @@ RetryPolicyImpl RouteEntryImplBase::buildRetryPolicy( return RetryPolicyImpl(); } +InternalRedirectPolicyImpl RouteEntryImplBase::buildInternalRedirectPolicy( + const envoy::config::route::v3::RouteAction& route_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) const { + if (route_config.has_internal_redirect_policy()) { + return InternalRedirectPolicyImpl(route_config.internal_redirect_policy(), validator, + current_route_name); + } + envoy::config::route::v3::InternalRedirectPolicy policy_config; + switch (route_config.internal_redirect_action()) { + case envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT: + break; + case envoy::config::route::v3::RouteAction::PASS_THROUGH_INTERNAL_REDIRECT: + FALLTHRU; + default: + return InternalRedirectPolicyImpl(); + } + if (route_config.has_max_internal_redirects()) { + *policy_config.mutable_max_internal_redirects() = route_config.max_internal_redirects(); + } + return InternalRedirectPolicyImpl(policy_config, validator, current_route_name); +} + DecoratorConstPtr RouteEntryImplBase::parseDecorator(const envoy::config::route::v3::Route& route) { DecoratorConstPtr ret; if (route.has_decorator()) { diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index d5bec26b4791..31a81abf7e84 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -386,6 +386,46 @@ class RouteTracingImpl : public RouteTracing { Tracing::CustomTagMap custom_tags_; }; +/** + * Implementation of InternalRedirectPolicy that reads from the proto + * InternalRedirectPolicy of the RouteAction. + */ +class InternalRedirectPolicyImpl : public InternalRedirectPolicy { +public: + // Constructor that enables internal redirect with policy_config controlling the configurable + // behaviors. + explicit InternalRedirectPolicyImpl( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config, + ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name); + // Default constructor that disables internal redirect. + InternalRedirectPolicyImpl() = default; + + bool enabled() const override { return enabled_; } + + bool shouldRedirectForResponseCode(const Http::Code& response_code) const override { + return redirect_response_codes_.contains(response_code); + } + + std::vector predicates() const override; + + uint32_t maxInternalRedirects() const override { return max_internal_redirects_; } + + bool isCrossSchemeRedirectAllowed() const override { return allow_cross_scheme_redirect_; } + +private: + absl::flat_hash_set buildRedirectResponseCodes( + const envoy::config::route::v3::InternalRedirectPolicy& policy_config) const; + + const std::string current_route_name_; + const absl::flat_hash_set redirect_response_codes_; + const uint32_t max_internal_redirects_{1}; + const bool enabled_{false}; + const bool allow_cross_scheme_redirect_{false}; + + std::vector> + predicate_factories_; +}; + /** * Base implementation for all route entries. */ @@ -442,6 +482,9 @@ class RouteEntryImplBase : public RouteEntry, Upstream::ResourcePriority priority() const override { return priority_; } const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const RetryPolicy& retryPolicy() const override { return retry_policy_; } + const InternalRedirectPolicy& internalRedirectPolicy() const override { + return internal_redirect_policy_; + } uint32_t retryShadowBufferLimit() const override { return retry_shadow_buffer_limit_; } const std::vector& shadowPolicies() const override { return shadow_policies_; } const VirtualCluster* virtualCluster(const Http::HeaderMap& headers) const override { @@ -472,10 +515,6 @@ class RouteEntryImplBase : public RouteEntry, } const absl::optional& connectConfig() const override { return connect_config_; } const UpgradeMap& upgradeMap() const override { return upgrade_map_; } - InternalRedirectAction internalRedirectAction() const override { - return internal_redirect_action_; - } - uint32_t maxInternalRedirects() const override { return max_internal_redirects_; } // Router::DirectResponseEntry std::string newPath(const Http::RequestHeaderMap& headers) const override; @@ -548,6 +587,9 @@ class RouteEntryImplBase : public RouteEntry, Upstream::ResourcePriority priority() const override { return parent_->priority(); } const RateLimitPolicy& rateLimitPolicy() const override { return parent_->rateLimitPolicy(); } const RetryPolicy& retryPolicy() const override { return parent_->retryPolicy(); } + const InternalRedirectPolicy& internalRedirectPolicy() const override { + return parent_->internalRedirectPolicy(); + } uint32_t retryShadowBufferLimit() const override { return parent_->retryShadowBufferLimit(); } const std::vector& shadowPolicies() const override { return parent_->shadowPolicies(); @@ -602,10 +644,6 @@ class RouteEntryImplBase : public RouteEntry, return parent_->connectConfig(); } const UpgradeMap& upgradeMap() const override { return parent_->upgradeMap(); } - InternalRedirectAction internalRedirectAction() const override { - return parent_->internalRedirectAction(); - } - uint32_t maxInternalRedirects() const override { return parent_->maxInternalRedirects(); } // Router::Route const DirectResponseEntry* directResponseEntry() const override { return nullptr; } @@ -694,6 +732,11 @@ class RouteEntryImplBase : public RouteEntry, const envoy::config::route::v3::RouteAction& route_config, ProtobufMessage::ValidationVisitor& validation_visitor) const; + InternalRedirectPolicyImpl + buildInternalRedirectPolicy(const envoy::config::route::v3::RouteAction& route_config, + ProtobufMessage::ValidationVisitor& validator, + absl::string_view current_route_name) const; + // Default timeout is 15s if nothing is specified in the route config. static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000; @@ -720,6 +763,7 @@ class RouteEntryImplBase : public RouteEntry, const bool strip_query_; const HedgePolicyImpl hedge_policy_; const RetryPolicyImpl retry_policy_; + const InternalRedirectPolicyImpl internal_redirect_policy_; const RateLimitPolicyImpl rate_limit_policy_; std::vector shadow_policies_; const Upstream::ResourcePriority priority_; @@ -749,8 +793,6 @@ class RouteEntryImplBase : public RouteEntry, PerFilterConfigs per_filter_configs_; const std::string route_name_; TimeSource& time_source_; - InternalRedirectAction internal_redirect_action_; - uint32_t max_internal_redirects_{1}; }; /** diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 677244a7f9e2..516f5b403c1e 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -15,6 +15,7 @@ #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/common/cleanup.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/scope_tracker.h" @@ -59,60 +60,6 @@ bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, return false; } -bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, - StreamInfo::FilterState& filter_state, - uint32_t max_internal_redirects, - const Http::HeaderEntry& internal_redirect, - const Network::Connection& connection) { - // Make sure the redirect response contains a URL to redirect to. - if (internal_redirect.value().getStringView().length() == 0) { - return false; - } - if (!downstream_headers.Path()) { - return false; - } - - Http::Utility::Url absolute_url; - if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) { - return false; - } - - // Don't allow serving TLS responses over plaintext. - bool scheme_is_http = schemeIsHttp(downstream_headers, connection); - if (scheme_is_http && absolute_url.scheme() == Http::Headers::get().SchemeValues.Https) { - return false; - } - - // Make sure that performing the redirect won't result in exceeding the configured number of - // redirects allowed for this route. - if (!filter_state.hasData(NumInternalRedirectsFilterStateName)) { - filter_state.setData( - NumInternalRedirectsFilterStateName, std::make_shared(0), - StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request); - } - StreamInfo::UInt32Accessor& num_internal_redirect = - filter_state.getDataMutable(NumInternalRedirectsFilterStateName); - - if (num_internal_redirect.value() >= max_internal_redirects) { - return false; - } - num_internal_redirect.increment(); - - // Preserve the original request URL for the second pass. - downstream_headers.setEnvoyOriginalUrl( - absl::StrCat(scheme_is_http ? Http::Headers::get().SchemeValues.Http - : Http::Headers::get().SchemeValues.Https, - "://", downstream_headers.Host()->value().getStringView(), - downstream_headers.Path()->value().getStringView())); - - // Replace the original host, scheme and path. - downstream_headers.setScheme(absolute_url.scheme()); - downstream_headers.setHost(absolute_url.hostAndPort()); - downstream_headers.setPath(absolute_url.pathAndQueryParams()); - - return true; -} - constexpr uint64_t TimeoutPrecisionFactor = 100; Http::ConnectionPool::Instance* @@ -1305,8 +1252,9 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt } } - if (static_cast(response_code) == Http::Code::Found && - route_entry_->internalRedirectAction() == InternalRedirectAction::Handle && + if (route_entry_->internalRedirectPolicy().enabled() && + route_entry_->internalRedirectPolicy().shouldRedirectForResponseCode( + static_cast(response_code)) && setupRedirect(*headers, upstream_request)) { return; // If the redirect could not be handled, fail open and let it pass to the @@ -1489,15 +1437,11 @@ bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, attempting_internal_redirect_with_complete_stream_ = upstream_request.upstreamTiming().last_upstream_rx_byte_received_ && downstream_end_stream_; - const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState(); - // Redirects are not supported for streaming requests yet. if (downstream_end_stream_ && !callbacks_->decodingBuffer() && // Redirects with body not yet supported. location != nullptr && - convertRequestHeadersForInternalRedirect(*downstream_headers_, *filter_state, - route_entry_->maxInternalRedirects(), *location, - *callbacks_->connection()) && + convertRequestHeadersForInternalRedirect(*downstream_headers_, *location) && callbacks_->recreateStream()) { cluster_->stats().upstream_internal_redirect_succeeded_total_.inc(); return true; @@ -1510,6 +1454,95 @@ bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, return false; } +bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, + const Http::HeaderEntry& internal_redirect) { + if (!downstream_headers.Path()) { + ENVOY_STREAM_LOG(trace, "no path in downstream_headers", *callbacks_); + return false; + } + + // Make sure the redirect response contains a URL to redirect to. + if (internal_redirect.value().getStringView().length() == 0) { + config_.stats_.passthrough_internal_redirect_bad_location_.inc(); + return false; + } + Http::Utility::Url absolute_url; + if (!absolute_url.initialize(internal_redirect.value().getStringView(), false)) { + config_.stats_.passthrough_internal_redirect_bad_location_.inc(); + return false; + } + + const auto& policy = route_entry_->internalRedirectPolicy(); + // Don't allow serving TLS responses over plaintext unless allowed by policy. + const bool scheme_is_http = schemeIsHttp(downstream_headers, *callbacks_->connection()); + const bool target_is_http = absolute_url.scheme() == Http::Headers::get().SchemeValues.Http; + if (!policy.isCrossSchemeRedirectAllowed() && scheme_is_http != target_is_http) { + config_.stats_.passthrough_internal_redirect_unsafe_scheme_.inc(); + return false; + } + + const StreamInfo::FilterStateSharedPtr& filter_state = callbacks_->streamInfo().filterState(); + // Make sure that performing the redirect won't result in exceeding the configured number of + // redirects allowed for this route. + if (!filter_state->hasData(NumInternalRedirectsFilterStateName)) { + filter_state->setData( + NumInternalRedirectsFilterStateName, std::make_shared(0), + StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Request); + } + StreamInfo::UInt32Accessor& num_internal_redirect = + filter_state->getDataMutable(NumInternalRedirectsFilterStateName); + + if (num_internal_redirect.value() >= policy.maxInternalRedirects()) { + config_.stats_.passthrough_internal_redirect_too_many_redirects_.inc(); + return false; + } + std::string original_host(downstream_headers.Host()->value().getStringView()); + std::string original_path(downstream_headers.Path()->value().getStringView()); + const bool scheme_is_set = (downstream_headers.Scheme() != nullptr); + Cleanup restore_original_headers( + [&downstream_headers, original_host, original_path, scheme_is_set, scheme_is_http]() { + downstream_headers.setHost(original_host); + downstream_headers.setPath(original_path); + if (scheme_is_set) { + downstream_headers.setScheme(scheme_is_http ? Http::Headers::get().SchemeValues.Http + : Http::Headers::get().SchemeValues.Https); + } + }); + + // Replace the original host, scheme and path. + downstream_headers.setScheme(absolute_url.scheme()); + downstream_headers.setHost(absolute_url.hostAndPort()); + downstream_headers.setPath(absolute_url.pathAndQueryParams()); + + callbacks_->clearRouteCache(); + const auto route = callbacks_->route(); + // Don't allow a redirect to a non existing route. + if (!route) { + config_.stats_.passthrough_internal_redirect_no_route_.inc(); + return false; + } + + const auto& route_name = route->routeEntry()->routeName(); + for (const auto& predicate : policy.predicates()) { + if (!predicate->acceptTargetRoute(*filter_state, route_name, !scheme_is_http, + !target_is_http)) { + config_.stats_.passthrough_internal_redirect_predicate_.inc(); + ENVOY_STREAM_LOG(trace, "rejecting redirect targeting {}, by {} predicate", *callbacks_, + route_name, predicate->name()); + return false; + } + } + + num_internal_redirect.increment(); + restore_original_headers.cancel(); + // Preserve the original request URL for the second pass. + downstream_headers.setEnvoyOriginalUrl(absl::StrCat(scheme_is_http + ? Http::Headers::get().SchemeValues.Http + : Http::Headers::get().SchemeValues.Https, + "://", original_host, original_path)); + return true; +} + void Filter::doRetry() { ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); diff --git a/source/common/router/router.h b/source/common/router/router.h index 83ba6dee85d3..682aaed92ca6 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -41,6 +41,11 @@ namespace Router { */ // clang-format off #define ALL_ROUTER_STATS(COUNTER) \ + COUNTER(passthrough_internal_redirect_bad_location) \ + COUNTER(passthrough_internal_redirect_unsafe_scheme) \ + COUNTER(passthrough_internal_redirect_too_many_redirects) \ + COUNTER(passthrough_internal_redirect_no_route) \ + COUNTER(passthrough_internal_redirect_predicate) \ COUNTER(no_route) \ COUNTER(no_cluster) \ COUNTER(rq_redirect) \ @@ -502,6 +507,8 @@ class Filter : Logger::Loggable, void resetOtherUpstreams(UpstreamRequest& upstream_request); void sendNoHealthyUpstreamResponse(); bool setupRedirect(const Http::ResponseHeaderMap& headers, UpstreamRequest& upstream_request); + bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, + const Http::HeaderEntry& internal_redirect); void updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code); void doRetry(); diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 434a74d949ce..9bd2531dcdb7 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -180,4 +180,11 @@ EXTENSIONS = { # "envoy.filters.http.cache.simple_http_cache": "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", + + # + # Internal redirect predicates + # + "envoy.internal_redirect_predicates.allow_listed_routes": "//source/extensions/internal_redirect/allow_listed_routes:config", + "envoy.internal_redirect_predicates.previous_routes": "//source/extensions/internal_redirect/previous_routes:config", + "envoy.internal_redirect_predicates.safe_cross_scheme": "//source/extensions/internal_redirect/safe_cross_scheme:config", } diff --git a/source/extensions/internal_redirect/BUILD b/source/extensions/internal_redirect/BUILD new file mode 100644 index 000000000000..6156949edef6 --- /dev/null +++ b/source/extensions/internal_redirect/BUILD @@ -0,0 +1,17 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "well_known_names", + hdrs = ["well_known_names.h"], + deps = [ + "//source/common/singleton:const_singleton", + ], +) diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD new file mode 100644 index 000000000000..02cf2789dc79 --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -0,0 +1,35 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "allow_listed_routes_lib", + hdrs = ["allow_listed_routes.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + deps = [ + ":allow_listed_routes_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h b/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h new file mode 100644 index 000000000000..72d8d605db0f --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class AllowListedRoutesPredicate : public Router::InternalRedirectPredicate { +public: + explicit AllowListedRoutesPredicate( + const envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig& + config) + : allowed_routes_(config.allowed_route_names().begin(), config.allowed_route_names().end()) {} + + bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view route_name, bool, + bool) override { + return allowed_routes_.contains(route_name); + } + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate; + } + + const absl::flat_hash_set allowed_routes_; +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/allow_listed_routes/config.cc b/source/extensions/internal_redirect/allow_listed_routes/config.cc new file mode 100644 index 000000000000..55c2d5af81ce --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/allow_listed_routes/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(AllowListedRoutesPredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/allow_listed_routes/config.h b/source/extensions/internal_redirect/allow_listed_routes/config.h new file mode 100644 index 000000000000..1a122f4f31b6 --- /dev/null +++ b/source/extensions/internal_redirect/allow_listed_routes/config.h @@ -0,0 +1,40 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.validate.h" +#include "envoy/router/internal_redirect.h" + +#include "common/protobuf/message_validator_impl.h" +#include "common/protobuf/utility.h" + +#include "extensions/internal_redirect/allow_listed_routes/allow_listed_routes.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class AllowListedRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message& config, absl::string_view) override { + auto allow_listed_routes_config = + MessageUtil::downcastAndValidate( + config, ProtobufMessage::getStrictValidationVisitor()); + return std::make_shared(allow_listed_routes_config); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().AllowListedRoutesPredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD new file mode 100644 index 000000000000..d022a4c6719c --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -0,0 +1,35 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "previous_routes_lib", + srcs = ["previous_routes.cc"], + hdrs = ["previous_routes.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + deps = [ + ":previous_routes_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/previous_routes/config.cc b/source/extensions/internal_redirect/previous_routes/config.cc new file mode 100644 index 000000000000..d5d4b67c491e --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/previous_routes/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(PreviousRoutesPredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/config.h b/source/extensions/internal_redirect/previous_routes/config.h new file mode 100644 index 000000000000..21ccb3c1646b --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/config.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.validate.h" +#include "envoy/router/internal_redirect.h" + +#include "extensions/internal_redirect/previous_routes/previous_routes.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class PreviousRoutesPredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message&, + absl::string_view current_route_name) override { + return std::make_shared(current_route_name); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().PreviousRoutesPredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/previous_routes.cc b/source/extensions/internal_redirect/previous_routes/previous_routes.cc new file mode 100644 index 000000000000..a29187e29d43 --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/previous_routes.cc @@ -0,0 +1,52 @@ +#include "extensions/internal_redirect/previous_routes/previous_routes.h" + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +namespace { + +constexpr absl::string_view PreviousRoutesPredicateStateNamePrefix = + "envoy.internal_redirect.previous_routes_predicate_state"; + +class PreviousRoutesPredicateState : public StreamInfo::FilterState::Object { +public: + PreviousRoutesPredicateState() = default; + // Disallow copy so that we don't accidentally take a copy of the state + // through FilterState::getDataMutable, which will cause confusing bug that + // states are not updated in the original copy. + PreviousRoutesPredicateState(const PreviousRoutesPredicateState&) = delete; + PreviousRoutesPredicateState& operator=(const PreviousRoutesPredicateState&) = delete; + + bool insertRouteIfNotPresent(absl::string_view route) { + return previous_routes_.insert(std::string(route)).second; + } + +private: + absl::flat_hash_set previous_routes_; +}; + +} // namespace + +bool PreviousRoutesPredicate::acceptTargetRoute(StreamInfo::FilterState& filter_state, + absl::string_view route_name, bool, bool) { + auto filter_state_name = + absl::StrCat(PreviousRoutesPredicateStateNamePrefix, ".", current_route_name_); + if (!filter_state.hasData(filter_state_name)) { + filter_state.setData(filter_state_name, std::make_unique(), + StreamInfo::FilterState::StateType::Mutable, + StreamInfo::FilterState::LifeSpan::Request); + } + auto& predicate_state = + filter_state.getDataMutable(filter_state_name); + return predicate_state.insertRouteIfNotPresent(route_name); +} + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/previous_routes/previous_routes.h b/source/extensions/internal_redirect/previous_routes/previous_routes.h new file mode 100644 index 000000000000..b79f4f8b1754 --- /dev/null +++ b/source/extensions/internal_redirect/previous_routes/previous_routes.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class PreviousRoutesPredicate : public Router::InternalRedirectPredicate { +public: + explicit PreviousRoutesPredicate(absl::string_view current_route_name) + : current_route_name_(current_route_name) {} + + bool acceptTargetRoute(StreamInfo::FilterState& filter_state, absl::string_view route_name, bool, + bool) override; + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().PreviousRoutesPredicate; + } + +private: + const std::string current_route_name_; +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD new file mode 100644 index 000000000000..94293850b53b --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -0,0 +1,34 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "safe_cross_scheme_lib", + hdrs = ["safe_cross_scheme.h"], + deps = [ + "//include/envoy/router:internal_redirect_interface", + "//include/envoy/stream_info:filter_state_interface", + "//source/extensions/internal_redirect:well_known_names", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + deps = [ + ":safe_cross_scheme_lib", + "//include/envoy/registry", + "//include/envoy/router:internal_redirect_interface", + "//source/extensions/internal_redirect:well_known_names", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/internal_redirect/safe_cross_scheme/config.cc b/source/extensions/internal_redirect/safe_cross_scheme/config.cc new file mode 100644 index 000000000000..43b7664fd7ff --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/config.cc @@ -0,0 +1,14 @@ +#include "extensions/internal_redirect/safe_cross_scheme/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +REGISTER_FACTORY(SafeCrossSchemePredicateFactory, Router::InternalRedirectPredicateFactory); + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/config.h b/source/extensions/internal_redirect/safe_cross_scheme/config.h new file mode 100644 index 000000000000..49a8fdfa8b69 --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" +#include "envoy/router/internal_redirect.h" + +#include "extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h" +#include "extensions/internal_redirect/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class SafeCrossSchemePredicateFactory : public Router::InternalRedirectPredicateFactory { +public: + Router::InternalRedirectPredicateSharedPtr + createInternalRedirectPredicate(const Protobuf::Message&, absl::string_view) override { + return std::make_shared(); + } + + std::string name() const override { + return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig>(); + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h b/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h new file mode 100644 index 000000000000..fb33e58b6fdd --- /dev/null +++ b/source/extensions/internal_redirect/safe_cross_scheme/safe_cross_scheme.h @@ -0,0 +1,28 @@ +#pragma once + +#include "envoy/router/internal_redirect.h" +#include "envoy/stream_info/filter_state.h" + +#include "extensions/internal_redirect/well_known_names.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +class SafeCrossSchemePredicate : public Router::InternalRedirectPredicate { +public: + bool acceptTargetRoute(StreamInfo::FilterState&, absl::string_view, bool downstream_is_https, + bool target_is_https) override { + return downstream_is_https || !target_is_https; + } + + absl::string_view name() const override { + return InternalRedirectPredicateValues::get().SafeCrossSchemePredicate; + } +}; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/internal_redirect/well_known_names.h b/source/extensions/internal_redirect/well_known_names.h new file mode 100644 index 000000000000..003e270329d6 --- /dev/null +++ b/source/extensions/internal_redirect/well_known_names.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +#include "common/singleton/const_singleton.h" + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { + +/** + * Well-known internal redirect predicate names. + */ +class InternalRedirectPredicatesNameValues { +public: + const std::string AllowListedRoutesPredicate = + "envoy.internal_redirect_predicates.allow_listed_routes"; + const std::string PreviousRoutesPredicate = "envoy.internal_redirect_predicates.previous_routes"; + const std::string SafeCrossSchemePredicate = + "envoy.internal_redirect_predicates.safe_cross_scheme"; +}; + +using InternalRedirectPredicateValues = ConstSingleton; + +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 66f8e3f3e94d..e21c496e89a5 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1400,8 +1400,7 @@ TEST_F(AsyncClientImplUnitTest, RouteImplInitTest) { route_impl_.routeEntry()->typedMetadata().get("bar")); EXPECT_EQ(nullptr, route_impl_.routeEntry()->perFilterConfig("bar")); EXPECT_TRUE(route_impl_.routeEntry()->upgradeMap().empty()); - EXPECT_EQ(Router::InternalRedirectAction::PassThrough, - route_impl_.routeEntry()->internalRedirectAction()); + EXPECT_EQ(false, route_impl_.routeEntry()->internalRedirectPolicy().enabled()); EXPECT_TRUE(route_impl_.routeEntry()->shadowPolicies().empty()); EXPECT_TRUE(route_impl_.routeEntry()->virtualHost().rateLimitPolicy().empty()); EXPECT_EQ(nullptr, route_impl_.routeEntry()->virtualHost().corsPolicy()); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index dc7f9bc53b4a..a48a9174f9e8 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2739,8 +2739,7 @@ TEST_F(RouteMatcherTest, ClusterHeader) { route->routeEntry()->maxGrpcTimeout(); route->routeEntry()->grpcTimeoutOffset(); route->routeEntry()->upgradeMap(); - route->routeEntry()->internalRedirectAction(); - route->routeEntry()->maxInternalRedirects(); + route->routeEntry()->internalRedirectPolicy(); } } @@ -6886,6 +6885,130 @@ name: RetriableStatusCodes EXPECT_NE(predicates1, predicates2); } +TEST_F(RouteConfigurationV2, InternalRedirctIsDisabledWhenNotSpecifiedInRouteAction) { + const std::string InternalRedirectEnabled = R"EOF( +name: InternalRedirectEnabled +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), + factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_FALSE(internal_redirect_policy.enabled()); +} + +TEST_F(RouteConfigurationV2, DefaultInternalRedirctPolicyIsSensible) { + const std::string InternalRedirectEnabled = R"EOF( +name: InternalRedirectEnabled +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: {} + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), + factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(200))); + EXPECT_EQ(1, internal_redirect_policy.maxInternalRedirects()); + EXPECT_TRUE(internal_redirect_policy.predicates().empty()); + EXPECT_FALSE(internal_redirect_policy.isCrossSchemeRedirectAllowed()); +} + +TEST_F(RouteConfigurationV2, InternalRedirctPolicyDropsInvalidRedirectCode) { + const std::string InternalRedirectEnabled = R"EOF( +name: InternalRedirectEnabled +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: + redirect_response_codes: [301, 302, 303, 304] + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), + factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(301))); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_TRUE(internal_redirect_policy.shouldRedirectForResponseCode(static_cast(303))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(304))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(305))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(306))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(307))); +} + +TEST_F(RouteConfigurationV2, InternalRedirctPolicyDropsInvalidRedirectCodeCauseEmptySet) { + const std::string InternalRedirectEnabled = R"EOF( +name: InternalRedirectEnabled +virtual_hosts: + - name: regex + domains: [idle.lyft.com] + routes: + - match: + safe_regex: + google_re2: {} + regex: "/regex" + route: + cluster: some-cluster + internal_redirect_policy: + redirect_response_codes: [200, 304] + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), + factory_context_, true); + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("idle.lyft.com", "/regex", true, false); + const auto& internal_redirect_policy = + config.route(headers, 0)->routeEntry()->internalRedirectPolicy(); + EXPECT_TRUE(internal_redirect_policy.enabled()); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(302))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(304))); + EXPECT_FALSE( + internal_redirect_policy.shouldRedirectForResponseCode(static_cast(200))); +} + class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { public: PerFilterConfigsTest() diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 058f7a580d00..ff40ce748fe4 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -294,16 +294,18 @@ class RouterTestBase : public testing::Test { router_.decodeHeaders(default_request_headers_, end_stream); } - void enableRedirects() { - ON_CALL(callbacks_.route_->route_entry_, internalRedirectAction()) - .WillByDefault(Return(InternalRedirectAction::Handle)); - ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_)); - setMaxInternalRedirects(1); - } - - void setMaxInternalRedirects(uint32_t max_internal_redirects) { - ON_CALL(callbacks_.route_->route_entry_, maxInternalRedirects()) + void enableRedirects(uint32_t max_internal_redirects = 1) { + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, enabled()) + .WillByDefault(Return(true)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + shouldRedirectForResponseCode(_)) + .WillByDefault(Return(true)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, maxInternalRedirects()) .WillByDefault(Return(max_internal_redirects)); + ON_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + isCrossSchemeRedirectAllowed()) + .WillByDefault(Return(false)); + ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_)); } void setNumPreviousRedirect(uint32_t num_previous_redirects) { @@ -4433,11 +4435,12 @@ TEST_F(RouterTest, RetryRespectsRetryHostPredicate) { } TEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) { - enableRedirects(); - setMaxInternalRedirects(3); + enableRedirects(3); setNumPreviousRedirect(3); sendRequest(); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4445,6 +4448,8 @@ TEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, + stats_store_.counter("test.passthrough_internal_redirect_too_many_redirects").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { @@ -4452,6 +4457,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { sendRequest(); redirect_headers_->setLocation(""); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4459,6 +4467,7 @@ TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { @@ -4466,6 +4475,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { sendRequest(); redirect_headers_->setLocation("h"); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4473,6 +4485,7 @@ TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) { EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value()); } TEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) { @@ -4480,6 +4493,8 @@ TEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) { sendRequest(false); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); @@ -4495,6 +4510,9 @@ TEST_F(RouterTest, InternalRedirectRejectedWithoutLocation) { sendRequest(); redirect_headers_->removeLocation(); + + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); response_decoder_->decodeData(data, true); @@ -4508,7 +4526,10 @@ TEST_F(RouterTest, InternalRedirectRejectedWithBody) { sendRequest(); - EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + Buffer::InstancePtr body_data(new Buffer::OwnedImpl("random_fake_data")); + EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(body_data.get())); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); Buffer::OwnedImpl data("1234567890"); response_decoder_->decodeData(data, true); @@ -4517,26 +4538,61 @@ TEST_F(RouterTest, InternalRedirectRejectedWithBody) { .value()); } -TEST_F(RouterTest, InternalRedirectRejectedWithCrossSchemeRedirect) { +TEST_F(RouterTest, CrossSchemeRedirectRejectedByPolicy) { enableRedirects(); sendRequest(); redirect_headers_->setLocation("https://www.foo.com"); + + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + response_decoder_->decodeHeaders(std::move(redirect_headers_), true); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_internal_redirect_failed_total") .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_unsafe_scheme").value()); } -TEST_F(RouterTest, HttpInternalRedirectSucceeded) { +TEST_F(RouterTest, InternalRedirectRejectedByPredicate) { enableRedirects(); - setMaxInternalRedirects(3); + + sendRequest(); + + redirect_headers_->setLocation("http://www.foo.com/some/path"); + + auto mock_predicate = std::make_shared>(); + + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); + EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, predicates()) + .WillOnce(Return(std::vector({mock_predicate}))); + EXPECT_CALL(*mock_predicate, acceptTargetRoute(_, _, _, _)).WillOnce(Return(false)); + ON_CALL(*mock_predicate, name()).WillByDefault(Return("mock_predicate")); + EXPECT_CALL(callbacks_, recreateStream()).Times(0); + + response_decoder_->decodeHeaders(std::move(redirect_headers_), true); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_failed_total") + .value()); + EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_predicate").value()); + + // Make sure the original host/path is preserved. + EXPECT_EQ("host", default_request_headers_.Host()->value().getStringView()); + EXPECT_EQ("/", default_request_headers_.Path()->value().getStringView()); + // Make sure x-envoy-original-url is not set for unsuccessful redirect. + EXPECT_EQ(nullptr, default_request_headers_.EnvoyOriginalUrl()); +} + +TEST_F(RouterTest, HttpInternalRedirectSucceeded) { + enableRedirects(3); setNumPreviousRedirect(2); default_request_headers_.setForwardedProto("http"); sendRequest(); EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); response_decoder_->decodeHeaders(std::move(redirect_headers_), false); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ @@ -4553,8 +4609,7 @@ TEST_F(RouterTest, HttpInternalRedirectSucceeded) { TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { auto ssl_connection = std::make_shared(); - enableRedirects(); - setMaxInternalRedirects(3); + enableRedirects(3); setNumPreviousRedirect(1); sendRequest(); @@ -4562,6 +4617,30 @@ TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { redirect_headers_->setLocation("https://www.foo.com"); EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection)); EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); + EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); + response_decoder_->decodeHeaders(std::move(redirect_headers_), false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_succeeded_total") + .value()); + + // In production, the HCM recreateStream would have called this. + router_.onDestroy(); +} + +TEST_F(RouterTest, CrossSchemeRedirectAllowedByPolicy) { + auto ssl_connection = std::make_shared(); + enableRedirects(); + + sendRequest(); + + redirect_headers_->setLocation("http://www.foo.com"); + EXPECT_CALL(connection_, ssl()).Times(1).WillOnce(Return(ssl_connection)); + EXPECT_CALL(callbacks_, decodingBuffer()).Times(1); + EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, + isCrossSchemeRedirectAllowed()) + .WillOnce(Return(true)); + EXPECT_CALL(callbacks_, clearRouteCache()).Times(1); EXPECT_CALL(callbacks_, recreateStream()).Times(1).WillOnce(Return(true)); response_decoder_->decodeHeaders(std::move(redirect_headers_), false); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ diff --git a/test/extensions/internal_redirect/previous_routes/BUILD b/test/extensions/internal_redirect/previous_routes/BUILD new file mode 100644 index 000000000000..8425dec9126c --- /dev/null +++ b/test/extensions/internal_redirect/previous_routes/BUILD @@ -0,0 +1,24 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.internal_redirect_predicates.previous_routes", + deps = [ + "//source/common/stream_info:filter_state_lib", + "//source/extensions/internal_redirect:well_known_names", + "//source/extensions/internal_redirect/previous_routes:config", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/internal_redirect/previous_routes/config_test.cc b/test/extensions/internal_redirect/previous_routes/config_test.cc new file mode 100644 index 000000000000..1d69320fc2ed --- /dev/null +++ b/test/extensions/internal_redirect/previous_routes/config_test.cc @@ -0,0 +1,83 @@ +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/internal_redirect.h" + +#include "common/stream_info/filter_state_impl.h" + +#include "extensions/internal_redirect/previous_routes/config.h" +#include "extensions/internal_redirect/well_known_names.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace testing; + +namespace Envoy { +namespace Extensions { +namespace InternalRedirect { +namespace { + +class PreviousRoutesTest : public testing::Test { +protected: + PreviousRoutesTest() : filter_state_(StreamInfo::FilterState::LifeSpan::FilterChain) { + factory_ = Registry::FactoryRegistry::getFactory( + InternalRedirectPredicateValues::get().PreviousRoutesPredicate); + config_ = factory_->createEmptyConfigProto(); + } + + StreamInfo::FilterStateImpl filter_state_; + Router::InternalRedirectPredicateFactory* factory_; + ProtobufTypes::MessagePtr config_; +}; + +TEST_F(PreviousRoutesTest, TargetIsOnlyTakenOnce) { + std::string current_route_name = "fake_current_route"; + // Create the predicate for the first time. It should remember nothing in the + // filter state, so it allows the redirect. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_1", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.fake_current_route")); + } + + // The second predicate should see the previously taken route. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, current_route_name); + ASSERT(predicate); + + EXPECT_FALSE(predicate->acceptTargetRoute(filter_state_, "route_1", false, false)); + } +} + +TEST_F(PreviousRoutesTest, RoutesAreIndependent) { + // Create the predicate on route_0. + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, "route_0"); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_2", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.route_0")); + } + + // The predicate created on route_1 should also allow a redirect to route_2 + { + auto predicate = factory_->createInternalRedirectPredicate(*config_, "route_1"); + ASSERT(predicate); + + EXPECT_TRUE(predicate->acceptTargetRoute(filter_state_, "route_2", false, false)); + // New filter state data is created with route name. + EXPECT_TRUE(filter_state_.hasDataWithName( + "envoy.internal_redirect.previous_routes_predicate_state.route_1")); + } +} + +} // namespace +} // namespace InternalRedirect +} // namespace Extensions +} // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index e1def6423fe3..cce09495678d 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -634,9 +634,15 @@ envoy_cc_test( deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", + "//source/extensions/internal_redirect/allow_listed_routes:config", + "//source/extensions/internal_redirect/previous_routes:config", + "//source/extensions/internal_redirect/safe_cross_scheme:config", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", ], ) diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 14c4fa5da7e2..02861c34b6c8 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -1,5 +1,8 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" #include "test/integration/http_protocol_integration.h" @@ -19,16 +22,17 @@ class RedirectIntegrationTest : public HttpProtocolIntegrationTest { config_helper_.addVirtualHost(pass_through); auto handle = config_helper_.createVirtualHost("handle.internal.redirect"); - handle.mutable_routes(0)->mutable_route()->set_internal_redirect_action( - envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT); + handle.mutable_routes(0)->set_name("redirect"); + handle.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); config_helper_.addVirtualHost(handle); auto handle_max_3_hop = config_helper_.createVirtualHost("handle.internal.redirect.max.three.hop"); - handle_max_3_hop.mutable_routes(0)->mutable_route()->set_internal_redirect_action( - envoy::config::route::v3::RouteAction::HANDLE_INTERNAL_REDIRECT); + handle_max_3_hop.mutable_routes(0)->set_name("max_three_hop"); + handle_max_3_hop.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); handle_max_3_hop.mutable_routes(0) ->mutable_route() + ->mutable_internal_redirect_policy() ->mutable_max_internal_redirects() ->set_value(3); config_helper_.addVirtualHost(handle_max_3_hop); @@ -176,6 +180,9 @@ TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); + EXPECT_EQ( + 1, test_server_->counter("http.config_test.passthrough_internal_redirect_too_many_redirects") + ->value()); } TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { @@ -220,6 +227,180 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { ->value()); } +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { + auto handle_prevent_repeated_target = + config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); + auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig + previous_routes_config; + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("previous_routes"); + predicate->mutable_typed_config()->PackFrom(previous_routes_config); + config_helper_.addVirtualHost(handle_prevent_repeated_target); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the same route as the first redirect. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", + response->headers().Location()->value().getStringView()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { + auto handle_allow_listed_redirect_route = + config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); + auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); + allow_listed_routes_predicate->set_name("allow_listed_routes"); + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig + allow_listed_routes_config; + *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; + allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_allow_listed_redirect_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.listed.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the non-allow-listed route. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("http://handle.internal.redirect/yet/another/path", + response->headers().Location()->value().getStringView()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + +TEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { + auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + internal_redirect_policy->set_allow_cross_scheme_redirect(true); + + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("safe_cross_scheme_predicate"); + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig + predicate_config; + predicate->mutable_typed_config()->PackFrom(predicate_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_safe_cross_scheme_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to https target. This should fail. + redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("https://handle.internal.redirect/yet/another/path", + response->headers().Location()->value().getStringView()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); +} + TEST_P(RedirectIntegrationTest, InvalidRedirect) { initialize(); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index ea7fa20fd23b..9221d3d38381 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -272,6 +272,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 43349 44000 fix clang tidy on master // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. + // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -285,7 +286,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44169); + EXPECT_MEMORY_EQ(m_per_cluster, 44233); EXPECT_MEMORY_LE(m_per_cluster, 44600); } @@ -331,6 +332,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/04/02 10624 35564 36000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 35557 36000 fix clang tidy on master // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. + // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -344,7 +346,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36281); + EXPECT_MEMORY_EQ(m_per_cluster, 36345); EXPECT_MEMORY_LE(m_per_cluster, 36800); } diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index c04efe43e255..2d0a995bf1ca 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -22,6 +22,10 @@ TestRetryPolicy::TestRetryPolicy() { num_retries_ = 1; } TestRetryPolicy::~TestRetryPolicy() = default; +MockInternalRedirectPolicy::MockInternalRedirectPolicy() { + ON_CALL(*this, enabled()).WillByDefault(Return(false)); +} + MockRetryState::MockRetryState() = default; void MockRetryState::expectHeadersRetry() { @@ -85,6 +89,7 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, opaqueConfig()).WillByDefault(ReturnRef(opaque_config_)); ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_)); ON_CALL(*this, retryPolicy()).WillByDefault(ReturnRef(retry_policy_)); + ON_CALL(*this, internalRedirectPolicy()).WillByDefault(ReturnRef(internal_redirect_policy_)); ON_CALL(*this, retryShadowBufferLimit()) .WillByDefault(Return(std::numeric_limits::max())); ON_CALL(*this, shadowPolicies()).WillByDefault(ReturnRef(shadow_policies_)); diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index f7c719442e27..d221d81b3439 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -131,6 +131,22 @@ class TestRetryPolicy : public RetryPolicy { absl::optional max_interval_{}; }; +class MockInternalRedirectPolicy : public InternalRedirectPolicy { +public: + MockInternalRedirectPolicy(); + MOCK_METHOD(bool, enabled, (), (const)); + MOCK_METHOD(bool, shouldRedirectForResponseCode, (const Http::Code& response_code), (const)); + MOCK_METHOD(std::vector, predicates, (), (const)); + MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const)); + MOCK_METHOD(bool, isCrossSchemeRedirectAllowed, (), (const)); +}; + +class MockInternalRedirectPredicate : public InternalRedirectPredicate { +public: + MOCK_METHOD(bool, acceptTargetRoute, (StreamInfo::FilterState&, absl::string_view, bool, bool)); + MOCK_METHOD(absl::string_view, name, (), (const)); +}; + class MockRetryState : public RetryState { public: MockRetryState(); @@ -335,6 +351,7 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(Upstream::ResourcePriority, priority, (), (const)); MOCK_METHOD(const RateLimitPolicy&, rateLimitPolicy, (), (const)); MOCK_METHOD(const RetryPolicy&, retryPolicy, (), (const)); + MOCK_METHOD(const InternalRedirectPolicy&, internalRedirectPolicy, (), (const)); MOCK_METHOD(uint32_t, retryShadowBufferLimit, (), (const)); MOCK_METHOD(const std::vector&, shadowPolicies, (), (const)); MOCK_METHOD(std::chrono::milliseconds, timeout, (), (const)); @@ -356,8 +373,6 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const)); MOCK_METHOD(const absl::optional&, connectConfig, (), (const)); MOCK_METHOD(const UpgradeMap&, upgradeMap, (), (const)); - MOCK_METHOD(InternalRedirectAction, internalRedirectAction, (), (const)); - MOCK_METHOD(uint32_t, maxInternalRedirects, (), (const)); MOCK_METHOD(const std::string&, routeName, (), (const)); std::string cluster_name_{"fake_cluster"}; @@ -365,6 +380,7 @@ class MockRouteEntry : public RouteEntry { std::multimap opaque_config_; TestVirtualCluster virtual_cluster_; TestRetryPolicy retry_policy_; + testing::NiceMock internal_redirect_policy_; TestHedgePolicy hedge_policy_; testing::NiceMock rate_limit_policy_; std::vector shadow_policies_; diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 497e035e2c02..00bbda734aa3 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -17,6 +17,7 @@ ASCII ASSERTs AST AWS +Allowlisted BACKTRACE BSON BPF @@ -170,6 +171,7 @@ LHS LLVM LPT LRS +Loggable MB MD MERCHANTABILITY @@ -362,6 +364,7 @@ alloc alloca allocator allowlist +allowlisted alls alphanumerics amongst From 510c10ca1611f2ed40d7352fa94827bbf0b6d6c8 Mon Sep 17 00:00:00 2001 From: Craig Radcliffe Date: Thu, 14 May 2020 13:48:58 +0000 Subject: [PATCH 166/909] http conn man: default preserve_upstream_date to true, add runtime guard (#11132) Commit Message: http conn man: always preserve upstream date response header Additional Description: Reintroduces the change to preserve the upstream date response header (introduced in #11077, reverted in #11116 ) but removes the configuration and adds a runtime guard instead (see #11110 ) Risk Level: Low Testing: Unit testing Docs Changes: N/A Release Notes: yes Runtime guard: http_connection_manager.preserve_upstream_date Signed-off-by: Craig Radcliffe --- docs/root/version_history/current.rst | 1 + source/common/http/conn_manager_impl.cc | 10 ++- source/common/runtime/runtime_features.cc | 1 + test/common/http/conn_manager_impl_test.cc | 76 ++++++++++++++++++++++ 4 files changed, 87 insertions(+), 1 deletion(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 3d9249db4556..7f55307b1da0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -31,6 +31,7 @@ Changes * http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and `envoy.reloadable_features.new_http2_connection_pool_behavior`. * http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. +* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index fee69c99c96f..c8eba98605e7 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1640,9 +1640,17 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream) { // Base headers. - if (!stream_info_.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) { + + // By default, always preserve the upstream date response header if present. If we choose to + // overwrite the upstream date unconditionally (a previous behavior), only do so if the response + // is not from cache + const bool should_preserve_upstream_date = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.preserve_upstream_date") || + stream_info_.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter); + if (!should_preserve_upstream_date || !headers.Date()) { connection_manager_.config_.dateProvider().setDateHeader(headers); } + // Following setReference() is safe because serverName() is constant for the life of the listener. const auto transformation = connection_manager_.config_.serverHeaderTransformation(); if (transformation == ConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE || diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index b7b03c4f1e06..661e58d920ac 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -67,6 +67,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.stop_faking_paths", + "envoy.reloadable_features.preserve_upstream_date", }; // This is a section for officially sanctioned runtime features which are too diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 5db00349eaf4..7b2f3c92b825 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -1167,6 +1167,82 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { conn_manager_->onData(fake_input, false); } +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateNotSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "true"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const auto* modified_headers = sendResponseHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); + ASSERT_TRUE(modified_headers); + EXPECT_TRUE(modified_headers->Date()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_NE(expected_date, modified_headers->Date()->value().getStringView()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "true"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); +} + +TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateFromCache) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_upstream_date", "false"}}); + setup(false, ""); + setUpEncoderAndDecoder(false, false); + sendRequestHeadersAndData(); + encoder_filters_[0]->callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::ResponseFromCacheFilter); + const std::string expected_date{"Tue, 15 Nov 1994 08:12:31 GMT"}; + const auto* modified_headers = + sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{ + {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); + ASSERT_TRUE(modified_headers); + ASSERT_TRUE(modified_headers->Date()); + EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); +} + TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { setup(false, ""); From e71d1622d622ab19dd962538908f1baa6e0483b1 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Fri, 15 May 2020 00:38:44 +0900 Subject: [PATCH 167/909] header: HeaderListView implementation (#10733) This is an abstraction of HeaderList. This is required for us to return all of headers from HeaderMap to implement List/Map expression processor for google/cel which is not implemented right now. Signed-off-by: shikugawa --- source/common/http/BUILD | 9 +++++++++ source/common/http/header_list_view.cc | 19 +++++++++++++++++++ source/common/http/header_list_view.h | 24 ++++++++++++++++++++++++ test/common/http/BUILD | 1 + test/common/http/header_map_impl_test.cc | 21 +++++++++++++++++++++ 5 files changed, 74 insertions(+) create mode 100644 source/common/http/header_list_view.cc create mode 100644 source/common/http/header_list_view.h diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 8fd46d8d58dc..5e78b0ed8def 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -253,6 +253,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "header_list_view_lib", + srcs = ["header_list_view.cc"], + hdrs = ["header_list_view.h"], + deps = [ + "//include/envoy/http:header_map_interface", + ], +) + envoy_cc_library( name = "header_map_lib", srcs = ["header_map_impl.cc"], diff --git a/source/common/http/header_list_view.cc b/source/common/http/header_list_view.cc new file mode 100644 index 000000000000..a29bc84bf86f --- /dev/null +++ b/source/common/http/header_list_view.cc @@ -0,0 +1,19 @@ +#include "common/http/header_list_view.h" + +namespace Envoy { +namespace Http { + +HeaderListView::HeaderListView(const HeaderMap& header_map) { + header_map.iterate( + [](const Http::HeaderEntry& header, void* context) -> HeaderMap::Iterate { + auto* context_ptr = static_cast(context); + context_ptr->keys_.emplace_back(std::reference_wrapper(header.key())); + context_ptr->values_.emplace_back( + std::reference_wrapper(header.value())); + return HeaderMap::Iterate::Continue; + }, + this); +} + +} // namespace Http +} // namespace Envoy \ No newline at end of file diff --git a/source/common/http/header_list_view.h b/source/common/http/header_list_view.h new file mode 100644 index 000000000000..552af6f89d5c --- /dev/null +++ b/source/common/http/header_list_view.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +#include "envoy/http/header_map.h" + +namespace Envoy { +namespace Http { + +class HeaderListView { +public: + using HeaderStringRefs = std::vector>; + + HeaderListView(const HeaderMap& header_map); + const HeaderStringRefs& keys() const { return keys_; } + const HeaderStringRefs& values() const { return values_; } + +private: + HeaderStringRefs keys_; + HeaderStringRefs values_; +}; + +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/BUILD b/test/common/http/BUILD index b5b736ff1fc4..3f9e7fcb3715 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -278,6 +278,7 @@ envoy_cc_test( name = "header_map_impl_test", srcs = ["header_map_impl_test.cc"], deps = [ + "//source/common/http:header_list_view_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", "//test/test_common:utility_lib", diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index b28c7a016283..5bb93d8300e3 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1,6 +1,8 @@ +#include #include #include +#include "common/http/header_list_view.h" #include "common/http/header_map_impl.h" #include "common/http/header_utility.h" @@ -9,6 +11,7 @@ #include "gtest/gtest.h" +using ::testing::ElementsAre; using ::testing::InSequence; namespace Envoy { @@ -920,6 +923,24 @@ TEST(HeaderMapImplTest, Get) { } } +TEST(HeaderMapImplTest, TestHeaderList) { + std::array keys{Headers::get().Path, LowerCaseString("hello")}; + std::array values{"/", "world"}; + + auto headers = createHeaderMap({{keys[0], values[0]}, {keys[1], values[1]}}); + HeaderListView header_list(headers->header_map_); + auto to_string_views = + [](const HeaderListView::HeaderStringRefs& strs) -> std::vector { + std::vector str_views(strs.size()); + std::transform(strs.begin(), strs.end(), str_views.begin(), + [](auto value) -> absl::string_view { return value.get().getStringView(); }); + return str_views; + }; + + EXPECT_THAT(to_string_views(header_list.keys()), ElementsAre(":path", "hello")); + EXPECT_THAT(to_string_views(header_list.values()), ElementsAre("/", "world")); +} + TEST(HeaderMapImplTest, TestAppendHeader) { // Test appending to a string with a value. { From e83240169291761d942d06cec45da17623dab27e Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 14 May 2020 18:36:41 -0400 Subject: [PATCH 168/909] [fuzz] fix stats merger fuzz test (#11195) Fixes stat merger fuzz test case. Handles empty spans. Risk level: Low Fixes OSS-Fuzz issue https://oss-fuzz.com/testcase-detail/4800677542100992 Signed-off-by: Asra Ali --- ...se-minimized-stat_merger_fuzz_test-4800677542100992.fuzz | 1 + test/common/stats/stat_merger_fuzz_test.cc | 6 ++++-- test/common/stats/stat_merger_test.cc | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz diff --git a/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz b/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz new file mode 100644 index 000000000000..77b5c6fdaf93 --- /dev/null +++ b/test/common/stats/stat_merger_corpus/clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz @@ -0,0 +1 @@ +aVa.b \ No newline at end of file diff --git a/test/common/stats/stat_merger_fuzz_test.cc b/test/common/stats/stat_merger_fuzz_test.cc index de2ebf6a9a4b..70579f378676 100644 --- a/test/common/stats/stat_merger_fuzz_test.cc +++ b/test/common/stats/stat_merger_fuzz_test.cc @@ -58,8 +58,10 @@ void testDynamicEncoding(absl::string_view data, SymbolTable& symbol_table) { StatMerger::DynamicContext dynamic_context(symbol_table); std::string name = symbol_table.toString(stat_name); StatMerger::DynamicsMap dynamic_map; - dynamic_map[name] = symbol_table.getDynamicSpans(stat_name); - + DynamicSpans spans = symbol_table.getDynamicSpans(stat_name); + if (!spans.empty()) { + dynamic_map[name] = spans; + } StatName decoded = dynamic_context.makeDynamicStatName(name, dynamic_map); FUZZ_ASSERT(name == symbol_table.toString(decoded)); FUZZ_ASSERT(stat_name == decoded); diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index b348e1f97389..1f37f0774f41 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -328,6 +328,7 @@ TEST_F(StatMergerDynamicTest, DynamicsWithFakeSymbolTable) { EXPECT_EQ(0, dynamicEncodeDecodeTest("hello..D:world")); EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello..D:world")); EXPECT_EQ(0, dynamicEncodeDecodeTest("D:hello.D:.D:world")); + EXPECT_EQ(0, dynamicEncodeDecodeTest("aV.D:,b")); // TODO(#10008): these tests fail because fake/real symbol tables // deal with empty components differently. From c98ea6f42cc00fcd60307e979630f8cf2f5ed9d3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 14 May 2020 18:44:19 -0400 Subject: [PATCH 169/909] http: eagerly sending 200 headers for CONNECT termination (#11197) Changing CONNECT termination to eagerly send synthesized 200 headers as soon as a TCP connection to the upstream is established. Signed-off-by: Alyssa Wilk --- source/common/router/upstream_request.cc | 12 ++++++------ source/common/router/upstream_request.h | 1 - test/common/router/upstream_request_test.cc | 7 +++---- test/integration/tcp_tunneling_integration_test.cc | 3 ++- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index be15ec7a8e67..2cf636fd483a 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -590,6 +590,12 @@ void TcpUpstream::encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) if (data.length() != 0 || end_stream) { upstream_conn_data_->connection().write(data, end_stream); } + + // TcpUpstream::encodeHeaders is called after the UpstreamRequest is fully initialized. Also use + // this time to synthesize the 200 response headers downstream to complete the CONNECT handshake. + Http::ResponseHeaderMapPtr headers{ + Http::createHeaderMap({{Http::Headers::get().Status, "200"}})}; + upstream_request_->decodeHeaders(std::move(headers), false); } void TcpUpstream::encodeTrailers(const Http::RequestTrailerMap&) { @@ -610,12 +616,6 @@ void TcpUpstream::resetStream() { } void TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) { - if (!sent_headers_) { - Http::ResponseHeaderMapPtr headers{ - Http::createHeaderMap({{Http::Headers::get().Status, "200"}})}; - upstream_request_->decodeHeaders(std::move(headers), false); - sent_headers_ = true; - } upstream_request_->decodeData(data, end_stream); } diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index a23ab1b5afd6..a2fe46067101 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -333,7 +333,6 @@ class TcpUpstream : public GenericUpstream, public Tcp::ConnectionPool::Upstream private: UpstreamRequest* upstream_request_; Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; - bool sent_headers_{}; }; } // namespace Router diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index e228e6057ded..1f36264c15e5 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -180,8 +180,9 @@ class TcpUpstreamTest : public ::testing::Test { }; TEST_F(TcpUpstreamTest, Basic) { - // Swallow the headers. + // Swallow the request headers and generate response headers. EXPECT_CALL(connection_, write(_, false)).Times(0); + EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(200, _, _, false)); tcp_upstream_->encodeHeaders(request_, false); // Proxy the data. @@ -193,13 +194,11 @@ TEST_F(TcpUpstreamTest, Basic) { Http::MetadataMapVector metadata_map_vector; tcp_upstream_->encodeMetadata(metadata_map_vector); - // On initial data payload, fake response headers, and forward data. + // Forward data. Buffer::OwnedImpl response1("bar"); - EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(200, _, _, false)); EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("bar"), _, false)); tcp_upstream_->onUpstreamData(response1, false); - // On the next batch of payload there won't be additional headers. Buffer::OwnedImpl response2("eep"); EXPECT_CALL(mock_router_filter_, onUpstreamHeaders(_, _, _, _)).Times(0); EXPECT_CALL(mock_router_filter_, onUpstreamData(BufferStringEqual("eep"), _, false)); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 97ee8abe49da..591af915e938 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -41,6 +41,7 @@ class ConnectTerminationIntegrationTest request_encoder_ = &encoder_decoder.first; response_ = std::move(encoder_decoder.second); ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_raw_upstream_connection_)); + response_->waitForHeaders(); } void sendBidirectionalData(const char* downstream_send_data = "hello", @@ -126,7 +127,7 @@ TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { setUpConnection(); // Wait for the timeout to close the connection. - response_->waitForEndStream(); + response_->waitForReset(); ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); } From 3cd4a0f2588e4481e59647cede39a90b5f1ea79e Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 14 May 2020 21:33:22 -0400 Subject: [PATCH 170/909] build: update c-ares to 1.16.1. (#11149) This picks up ares_getaddrinfo() and also some security fixes. See https://c-ares.haxx.se/changelog.html#1_16_1. Risk level: Low Testing: bazel test //test/... Signed-off-by: Harvey Tuch --- bazel/foreign_cc/BUILD | 8 ++++++++ bazel/foreign_cc/cares-win32-nameser.patch | 12 ------------ bazel/repositories.bzl | 1 - bazel/repository_locations.bzl | 10 +++------- 4 files changed, 11 insertions(+), 20 deletions(-) delete mode 100644 bazel/foreign_cc/cares-win32-nameser.patch diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 06d8ed05df6b..46b111981c55 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -85,6 +85,14 @@ envoy_cmake_external( }, defines = ["CARES_STATICLIB"], lib_source = "@com_github_c_ares_c_ares//:all", + linkopts = select({ + "//bazel:apple": ["-lresolv"], + "//conditions:default": [], + }), + postfix_script = select({ + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/nameser.h $INSTALLDIR/include/nameser.h", + "//conditions:default": "", + }), static_libraries = select({ "//bazel:windows_x86_64": ["cares.lib"], "//conditions:default": ["libcares.a"], diff --git a/bazel/foreign_cc/cares-win32-nameser.patch b/bazel/foreign_cc/cares-win32-nameser.patch deleted file mode 100644 index 756c3933edcf..000000000000 --- a/bazel/foreign_cc/cares-win32-nameser.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- CMakeLists.txt.orig 2020-02-19 14:42:47.978299400 -0500 -+++ CMakeLists.txt 2020-02-19 14:45:18.925903400 -0500 -@@ -652,6 +652,9 @@ - # Headers installation target - IF (CARES_INSTALL) - SET (CARES_HEADERS ares.h ares_version.h ares_dns.h "${PROJECT_BINARY_DIR}/ares_build.h" ares_rules.h) -+ IF (WIN32) -+ SET (CARES_HEADERS ${CARES_HEADERS} nameser.h) -+ ENDIF() - INSTALL (FILES ${CARES_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) - ENDIF () - diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b67009e22424..39bc408ea748 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -245,7 +245,6 @@ def _com_github_c_ares_c_ares(): location = _get_location("com_github_c_ares_c_ares") http_archive( name = "com_github_c_ares_c_ares", - patches = ["@envoy//bazel/foreign_cc:cares-win32-nameser.patch"], build_file_content = BUILD_ALL_CONTENT, **location ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d6c384964477..7ea4a944b453 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -109,13 +109,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "cpe:2.3:a:apache:thrift:*", ), com_github_c_ares_c_ares = dict( - sha256 = "bbaab13d6ad399a278d476f533e4d88a7ec7d729507348bb9c2e3b207ba4c606", - strip_prefix = "c-ares-d7e070e7283f822b1d2787903cce3615536c5610", - # 2019-06-19 - # 27 new commits from release-1.15.0. Upgrade for commit 7d3591ee8a1a63e7748e68e6d880bd1763a32885 "getaddrinfo enhancements" and follow up fixes. - # Use getaddrinfo to query DNS record and TTL. - # TODO(crazyxy): Update to release-1.16.0 when it is released. - urls = ["https://github.com/c-ares/c-ares/archive/d7e070e7283f822b1d2787903cce3615536c5610.tar.gz"], + sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce", + strip_prefix = "c-ares-1.16.1", + urls = ["https://github.com/c-ares/c-ares/releases/download/cares-1_16_1/c-ares-1.16.1.tar.gz"], use_category = ["dataplane"], cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), From dbb67aa4968e7ecffaff25c1723675de569b5c59 Mon Sep 17 00:00:00 2001 From: Peng Gao Date: Thu, 14 May 2020 21:34:54 -0400 Subject: [PATCH 171/909] protoxform: Add enum deprecation support. (#11199) Support deprecating an enum stanza as a whole by "option deprecated = true;" Risk Level: low Testing: Added test case; manually verified the generated config removes the deprecated enum Docs Changes: N/A Release Notes: N/A Signed-off-by: pengg --- api/envoy/config/route/v3/route_components.proto | 2 ++ api/envoy/config/route/v4alpha/route_components.proto | 7 ------- .../envoy/config/route/v3/route_components.proto | 2 ++ .../envoy/config/route/v4alpha/route_components.proto | 2 ++ tools/protoxform/migrate.py | 2 ++ tools/protoxform/protoprint.py | 2 ++ tools/testdata/protoxform/envoy/v2/sample.proto | 5 +++++ .../protoxform/envoy/v2/sample.proto.active_or_frozen.gold | 7 +++++++ ....proto.next_major_version_candidate.envoy_internal.gold | 7 +++++++ 9 files changed, 29 insertions(+), 7 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index eb6aab7dcd7e..9be58f9681c5 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -552,6 +552,8 @@ message RouteAction { // Configures :ref:`internal redirect ` behavior. // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 815895db80e4..7b49aca53803 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -552,13 +552,6 @@ message RouteAction { NOT_FOUND = 1; } - // Configures :ref:`internal redirect ` behavior. - // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index cfd3f44291ee..f94f2c2bb3e5 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -564,6 +564,8 @@ message RouteAction { // Configures :ref:`internal redirect ` behavior. // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index dcfd095b1fe5..55718de65a9c 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -555,6 +555,8 @@ message RouteAction { // Configures :ref:`internal redirect ` behavior. // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { + option deprecated = true; + PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index 1be44af91acb..06e2743c845f 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -191,6 +191,8 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): def VisitEnum(self, enum_proto, type_context): upgraded_proto = copy.deepcopy(enum_proto) + if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: + options.AddHideOption(upgraded_proto.options) for v in upgraded_proto.value: if v.options.deprecated: # We need special handling for the zero field, as proto3 needs some value diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 804b93b86600..1b0e8f5f418c 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -566,6 +566,8 @@ def VisitService(self, service_proto, type_context): trailing_comment, methods) def VisitEnum(self, enum_proto, type_context): + if protoxform_options.HasHideOption(enum_proto.options): + return '' leading_comment, trailing_comment = FormatTypeContextComments(type_context) formatted_options = FormatOptions(enum_proto.options) reserved_fields = FormatReserved(enum_proto) diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto b/tools/testdata/protoxform/envoy/v2/sample.proto index 73649fb32db0..be4b61a9230b 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto +++ b/tools/testdata/protoxform/envoy/v2/sample.proto @@ -19,6 +19,11 @@ message Sample { string key = 1; string value = 2; } + enum DeprecateEnum { + option deprecated = true; + FIRST = 0; + SECOND = 1; + } repeated Entry entries = 1; string will_deprecated = 2 [deprecated = true]; string will_rename_compoent = 3 [(udpa.annotations.field_migrate).rename = "renamed_component"]; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold index 577b8ddcc1f2..5c5fe19f4997 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.active_or_frozen.gold @@ -18,6 +18,13 @@ enum SomeEnum { } message Sample { + enum DeprecateEnum { + option deprecated = true; + + FIRST = 0; + SECOND = 1; + } + message Entry { string key = 1; diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold index 3f10d5e043c4..c9dbe1062f91 100644 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold +++ b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold @@ -20,6 +20,13 @@ enum SomeEnum { message Sample { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; + enum DeprecateEnum { + option deprecated = true; + + FIRST = 0; + SECOND = 1; + } + message Entry { option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; From 7a28514e08dcce9b4d7e780a791de87006946f43 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 14 May 2020 19:37:29 -0700 Subject: [PATCH 172/909] coverage: use lcov to merge multi-binary coverage report (#10909) Description: Use bazel lcov merger to merge coverage generated by `llvm-cov export` from multiple binary run. This improves coverage test stability by run them in separate sandbox. Risk Level: Low (test only) Testing: CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Lizan Zhou --- .bazelrc | 24 ++- .circleci/config.yml | 2 +- bazel/coverage/BUILD | 9 + bazel/coverage/collect_cc_coverage.sh | 175 ++++++++++++++++++ bazel/coverage/fuzz_coverage_wrapper.sh | 14 ++ bazel/envoy_test.bzl | 63 +++---- bazel/repository_locations.bzl | 8 +- test/build_and_run_fuzz_targets.sh | 48 ----- test/coverage/gen_build.sh | 87 --------- .../quiche/platform/quic_platform_test.cc | 4 + .../dynamic_opentracing_driver_impl_test.cc | 12 +- test/integration/BUILD | 4 +- .../header_prefix_integration_test.cc | 3 +- test/run_envoy_bazel_coverage.sh | 74 ++------ test/tools/router_check/test/BUILD | 2 +- 15 files changed, 286 insertions(+), 243 deletions(-) create mode 100644 bazel/coverage/BUILD create mode 100755 bazel/coverage/collect_cc_coverage.sh create mode 100755 bazel/coverage/fuzz_coverage_wrapper.sh delete mode 100755 test/build_and_run_fuzz_targets.sh delete mode 100755 test/coverage/gen_build.sh diff --git a/.bazelrc b/.bazelrc index bffcef2cba59..5478e9a9364e 100644 --- a/.bazelrc +++ b/.bazelrc @@ -110,6 +110,28 @@ build:sizeopt -c opt --copt -Os # Test options build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH +# Coverage options +coverage --config=coverage +build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 +build:coverage --action_env=GCOV=llvm-profdata +build:coverage --copt=-DNDEBUG +build:coverage --test_timeout=900 +build:coverage --define=ENVOY_CONFIG_COVERAGE=1 +build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" +build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support +build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh +build:coverage --test_env=HEAPCHECK= +build:coverage --combined_report=lcov +build:coverage --strategy=TestRunner=sandboxed,local +build:coverage --strategy=CoverageReport=sandboxed,local +build:coverage --experimental_use_llvm_covmap +build:coverage --collect_code_coverage +build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +coverage:test-coverage --test_arg="--log-path /dev/null" +coverage:test-coverage --test_arg="-l trace" +coverage:fuzz-coverage --config=asan-fuzzer +coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh + # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform @@ -159,7 +181,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:4a5cbb97e1a068661cab495bf40cccc96bb37ca1 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 745cfb29025c..4201a956dc28 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ executors: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:4a5cbb97e1a068661cab495bf40cccc96bb37ca1 + - image: envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 resource_class: xlarge working_directory: /source diff --git a/bazel/coverage/BUILD b/bazel/coverage/BUILD new file mode 100644 index 000000000000..9aa87d086968 --- /dev/null +++ b/bazel/coverage/BUILD @@ -0,0 +1,9 @@ +licenses(["notice"]) # Apache 2 + +# TODO(lizan): Add test for this and upstream to upstream Bazel. +filegroup( + name = "coverage_support", + srcs = ["collect_cc_coverage.sh"], +) + +exports_files(["fuzz_coverage_wrapper.sh"]) diff --git a/bazel/coverage/collect_cc_coverage.sh b/bazel/coverage/collect_cc_coverage.sh new file mode 100755 index 000000000000..53926e5cb6af --- /dev/null +++ b/bazel/coverage/collect_cc_coverage.sh @@ -0,0 +1,175 @@ +#!/bin/bash -x +# +# This is a fork of https://github.com/bazelbuild/bazel/blob/3.1.0/tools/test/collect_cc_coverage.sh +# to cover most of use cases in Envoy. +# TODO(lizan): Move this to upstream Bazel +# +# Copyright 2016 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script collects code coverage data for C++ sources, after the tests +# were executed. +# +# Bazel C++ code coverage collection support is poor and limited. There is +# an ongoing effort to improve this (tracking issue #1118). +# +# Bazel uses the lcov tool for gathering coverage data. There is also +# an experimental support for clang llvm coverage, which uses the .profraw +# data files to compute the coverage report. +# +# This script assumes the following environment variables are set: +# - COVERAGE_DIR Directory containing metadata files needed for +# coverage collection (e.g. gcda files, profraw). +# - COVERAGE_MANIFEST Location of the instrumented file manifest. +# - COVERAGE_GCOV_PATH Location of gcov. This is set by the TestRunner. +# - COVERAGE_GCOV_OPTIONS Additional options to pass to gcov. +# - ROOT Location from where the code coverage collection +# was invoked. +# +# The script looks in $COVERAGE_DIR for the C++ metadata coverage files (either +# gcda or profraw) and uses either lcov or gcov to get the coverage data. +# The coverage data is placed in $COVERAGE_OUTPUT_FILE. + +# Checks if clang llvm coverage should be used instead of lcov. +function uses_llvm() { + if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then + return 0 + fi + return 1 +} + +# Returns 0 if gcov must be used, 1 otherwise. +function uses_gcov() { + [[ "$GCOV_COVERAGE" -eq "1" ]] && return 0 + return 1 +} + +function init_gcov() { + # Symlink the gcov tool such with a link called gcov. Clang comes with a tool + # called llvm-cov, which behaves like gcov if symlinked in this way (otherwise + # we would need to invoke it with "llvm-cov gcov"). + # For more details see https://llvm.org/docs/CommandGuide/llvm-cov.html. + GCOV="${COVERAGE_DIR}/gcov" + ln -s "${COVERAGE_GCOV_PATH}" "${GCOV}" +} + +# Computes code coverage data using the clang generated metadata found under +# $COVERAGE_DIR. +# Writes the collected coverage into the given output file. +function llvm_coverage() { + local output_file="${1}"; shift + export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw" + "${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \ + "${COVERAGE_DIR}"/*.profraw + + + local object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \ + | grep ELF | grep -v "LSB core" | sed 's,:.*,,')" + + local object_param="" + for object_file in ${object_files}; do + object_param+=" -object ${object_file}" + done + + llvm-cov export -instr-profile "${output_file}.data" -format=lcov \ + -ignore-filename-regex='.*external/.+' \ + -ignore-filename-regex='/tmp/.+' \ + ${object_param} | sed 's#/proc/self/cwd/##' > "${output_file}" +} + +# Generates a code coverage report in gcov intermediate text format by invoking +# gcov and using the profile data (.gcda) and notes (.gcno) files. +# +# The profile data files are expected to be found under $COVERAGE_DIR. +# The notes file are expected to be found under $ROOT. +# +# - output_file The location of the file where the generated code coverage +# report is written. +function gcov_coverage() { + local output_file="${1}"; shift + + # We'll save the standard output of each the gcov command in this log. + local gcov_log="$output_file.gcov.log" + + # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR + # because gcov expects them to be in the same directory. + while read -r line; do + if [[ ${line: -4} == "gcno" ]]; then + gcno_path=${line} + local gcda="${COVERAGE_DIR}/$(dirname ${gcno_path})/$(basename ${gcno_path} .gcno).gcda" + # If the gcda file was not found we skip generating coverage from the gcno + # file. + if [[ -f "$gcda" ]]; then + # gcov expects both gcno and gcda files to be in the same directory. + # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda + # files are expected to be. + if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then + mkdir -p "${COVERAGE_DIR}/$(dirname ${gcno_path})" + cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}" + fi + # Invoke gcov to generate a code coverage report with the flags: + # -i Output gcov file in an intermediate text format. + # The output is a single .gcov file per .gcda file. + # No source code is required. + # -o directory The directory containing the .gcno and + # .gcda data files. + # "${gcda"} The input file name. gcov is looking for data files + # named after the input filename without its extension. + # gcov produces files called .gcov in the current + # directory. These contain the coverage information of the source file + # they correspond to. One .gcov file is produced for each source + # (or header) file containing code which was compiled to produce the + # .gcda files. + # Don't generate branch coverage (-b) because of a gcov issue that + # segfaults when both -i and -b are used (see + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879). + "${GCOV}" -i $COVERAGE_GCOV_OPTIONS -o "$(dirname ${gcda})" "${gcda}" + + # Append all .gcov files in the current directory to the output file. + cat *.gcov >> "$output_file" + # Delete the .gcov files. + rm *.gcov + fi + fi + done < "${COVERAGE_MANIFEST}" +} + +function main() { + init_gcov + + # If llvm code coverage is used, we output the raw code coverage report in + # the $COVERAGE_OUTPUT_FILE. This report will not be converted to any other + # format by LcovMerger. + # TODO(#5881): Convert profdata reports to lcov. + if uses_llvm; then + BAZEL_CC_COVERAGE_TOOL="PROFDATA" + fi + + # When using either gcov or lcov, have an output file specific to the test + # and format used. For lcov we generate a ".dat" output file and for gcov + # a ".gcov" output file. It is important that these files are generated under + # COVERAGE_DIR. + # When this script is invoked by tools/test/collect_coverage.sh either of + # these two coverage reports will be picked up by LcovMerger and their + # content will be converted and/or merged with other reports to an lcov + # format, generating the final code coverage report. + case "$BAZEL_CC_COVERAGE_TOOL" in + ("GCOV") gcov_coverage "$COVERAGE_DIR/_cc_coverage.gcov" ;; + ("PROFDATA") llvm_coverage "$COVERAGE_DIR/_cc_coverage.dat" ;; + (*) echo "Coverage tool $BAZEL_CC_COVERAGE_TOOL not supported" \ + && exit 1 + esac +} + +main diff --git a/bazel/coverage/fuzz_coverage_wrapper.sh b/bazel/coverage/fuzz_coverage_wrapper.sh new file mode 100755 index 000000000000..14c94bd545ae --- /dev/null +++ b/bazel/coverage/fuzz_coverage_wrapper.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -ex + +TEST_BINARY=$1 +shift + +# Clear existing corpus if previous run wasn't in sandbox +rm -rf fuzz_corpus + +mkdir -p fuzz_corpus/seed_corpus +cp -r $@ fuzz_corpus/seed_corpus + +${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index dff388c5d07e..fb68b46f7e64 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -144,7 +144,10 @@ def envoy_cc_fuzz_test( copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = ["-fsanitize=fuzzer"] + _envoy_test_linkopts(), linkstatic = 1, - testonly = 1, + args = select({ + "@envoy//bazel:coverage_build": ["$(locations %s)" % corpus_name], + "//conditions:default": [], + }), data = [corpus_name], deps = [":" + test_lib_name], tags = ["manual", "fuzzer"] + tags, @@ -167,32 +170,17 @@ def envoy_cc_test( local = False, size = "medium", flaky = False): - if coverage: - coverage_tags = tags + ["coverage_test_lib"] - else: - coverage_tags = tags - _envoy_cc_test_infrastructure_library( - name = name + "_lib_internal_only", - srcs = srcs, - data = data, - external_deps = external_deps, - deps = deps + [repository + "//test/test_common:printers_includes"], - repository = repository, - tags = coverage_tags, - copts = copts, - # Allow public visibility so these can be consumed in coverage tests in external projects. - visibility = ["//visibility:public"], - ) - if coverage: - coverage_tags = tags + ["coverage_test"] + coverage_tags = tags + ([] if coverage else ["nocoverage"]) + native.cc_test( name = name, + srcs = srcs, + data = data, copts = envoy_copts(repository, test = True) + copts, linkopts = _envoy_test_linkopts(), linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), - deps = envoy_stdlib_deps() + [ - ":" + name + "_lib_internal_only", + deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ repository + "//test:main", ], # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 @@ -272,12 +260,14 @@ def envoy_benchmark_test( name, benchmark_binary, data = [], + tags = [], **kargs): native.sh_test( name = name, srcs = ["//bazel:test_for_benchmark_wrapper.sh"], data = [":" + benchmark_binary] + data, args = ["%s/%s" % (native.package_name(), benchmark_binary)], + tags = tags + ["nocoverage"], **kargs ) @@ -303,9 +293,12 @@ def envoy_sh_test( srcs = [], data = [], coverage = True, + cc_binary = [], tags = [], **kargs): if coverage: + if cc_binary == []: + fail("cc_binary is required for coverage-enabled test.") test_runner_cc = name + "_test_runner.cc" native.genrule( name = name + "_gen_test_runner", @@ -314,18 +307,20 @@ def envoy_sh_test( cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@", tools = ["//bazel:gen_sh_test_runner.sh"], ) - envoy_cc_test_library( - name = name + "_lib", + envoy_cc_test( + name = name, srcs = [test_runner_cc], - data = srcs + data, - tags = tags + ["coverage_test_lib"], - deps = ["//test/test_common:environment_lib"], + data = srcs + data + cc_binary, + tags = tags, + deps = ["//test/test_common:environment_lib"] + cc_binary, + ) + + else: + native.sh_test( + name = name, + srcs = ["//bazel:sh_test_wrapper.sh"], + data = srcs + data + cc_binary, + args = srcs, + tags = tags + ["nocoverage"], + **kargs ) - native.sh_test( - name = name, - srcs = ["//bazel:sh_test_wrapper.sh"], - data = srcs + data, - args = srcs, - tags = tags, - **kargs - ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 7ea4a944b453..47ad51b78127 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "e2cb99cf66e36412a9f570fe0391ff0c457ff17c2524ccdf73853c2752e8d372", - strip_prefix = "envoy-build-tools-8d7a0cb9be7a34c726575d79688ae3dea565a424", - # 2020-05-08 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/8d7a0cb9be7a34c726575d79688ae3dea565a424.tar.gz"], + sha256 = "328648f158e7167f881d984433ff6bfe203bf0b815a99d98d22fb01a0fc95f70", + strip_prefix = "envoy-build-tools-f41e5ef5a023e50da088035449c6cdee0ae30d71", + # 2020-05-11 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/f41e5ef5a023e50da088035449c6cdee0ae30d71.tar.gz"], use_category = ["build"], ), boringssl = dict( diff --git a/test/build_and_run_fuzz_targets.sh b/test/build_and_run_fuzz_targets.sh deleted file mode 100755 index 516b85fe1a6a..000000000000 --- a/test/build_and_run_fuzz_targets.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -if [[ $# -gt 0 ]]; then - FUZZ_TARGETS=$* -else - echo "This script should be called from tools/run_envoy_bazel_coverage.sh" -fi - -LIBFUZZER_TARGETS="" -# Build all fuzz targets to run instrumented with libfuzzer in sequence. -for t in ${FUZZ_TARGETS} -do - LIBFUZZER_TARGETS+="${t}_with_libfuzzer " -done - -bazel build ${BAZEL_BUILD_OPTIONS} ${LIBFUZZER_TARGETS} --config asan-fuzzer -c opt - -# Now run each fuzz target in parallel for 60 seconds. -PIDS="" -TMPDIR="${FUZZ_TEMPDIR}" - -for t in ${FUZZ_TARGETS} -do - # Make a temporary corpus for this fuzz target. - TARGET_BINARY="${t/://}" - TEMP_CORPUS_PATH="${TARGET_BINARY:2}" - CORPUS_DIR="${TMPDIR}/${TEMP_CORPUS_PATH////_}_corpus" - mkdir -v "${CORPUS_DIR}" - # Get the original corpus for the fuzz target - CORPUS_LOCATION="$(bazel query "labels(data, ${t})" | head -1)" - ORIGINAL_CORPUS="$(bazel query "labels(srcs, ${CORPUS_LOCATION})" | head -1)" - ORIGINAL_CORPUS="${ORIGINAL_CORPUS/://}" - ORIGINAL_CORPUS="$(dirname ${ORIGINAL_CORPUS})" - # Copy entries in original corpus into temp. - cp -r "$(pwd)${ORIGINAL_CORPUS:1}" "${CORPUS_DIR}" - # Run fuzzing process. - bazel-bin/"${TARGET_BINARY:2}"_with_libfuzzer -max_total_time=60 "${CORPUS_DIR}" & - # Add pid to pids list - PIDS="${PIDS} $!" -done - -# Wait for background process to run. -for pid in ${PIDS}; do - wait $pid - if [ $? -ne 0 ]; then - echo "${pid} FAILED" - fi -done diff --git a/test/coverage/gen_build.sh b/test/coverage/gen_build.sh deleted file mode 100755 index e262c4e99bdc..000000000000 --- a/test/coverage/gen_build.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -# Generate test/coverage/BUILD, which contains a single envoy_cc_test target -# that contains all C++ based tests suitable for performing the coverage run. A -# single binary (as opposed to multiple test targets) is require to work around -# the crazy in https://github.com/bazelbuild/bazel/issues/1118. This is used by -# the coverage runner script. - -set -e - -[ -z "${BAZEL_BIN}" ] && BAZEL_BIN=bazel -[ -z "${BUILDIFIER_BIN}" ] && BUILDIFIER_BIN=buildifier - -# Path to the generated BUILD file for the coverage target. -[ -z "${BUILD_PATH}" ] && BUILD_PATH="$(dirname "$0")"/BUILD - -# Extra repository information to include when generating coverage targets. This is useful for -# consuming projects. E.g., "@envoy". -[ -z "${REPOSITORY}" ] && REPOSITORY="" - -# This is an extra bazel path to query for additional targets. This is useful for consuming projects -# that want to run coverage over the public envoy code as well as private extensions. -# E.g., "//envoy-lyft/test/..." -[ -z "${EXTRA_QUERY_PATHS}" ] && EXTRA_QUERY_PATHS="" - -rm -f "${BUILD_PATH}" - -if [[ $# -gt 0 ]]; then - COVERAGE_TARGETS=$* -else - COVERAGE_TARGETS=//test/... -fi - -# This setting allows consuming projects to only run coverage over private extensions. -if [[ -z "${ONLY_EXTRA_QUERY_PATHS}" ]]; then - for target in ${COVERAGE_TARGETS}; do - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${REPOSITORY}${target})" | grep "^//")" - done - - # Run the QUICHE platform api tests for coverage. - if [[ "${COVERAGE_TARGETS}" == "//test/..." ]]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', '@com_googlesource_quiche//:all')" | grep "^@com_googlesource_quiche")" - fi -fi - -if [ -n "${EXTRA_QUERY_PATHS}" ]; then - TARGETS="$TARGETS $("${BAZEL_BIN}" query ${BAZEL_QUERY_OPTIONS} "attr('tags', 'coverage_test_lib', ${EXTRA_QUERY_PATHS})" | grep "^//")" -fi - -( - cat << EOF -# This file is generated by test/coverage/gen_build.sh automatically prior to -# coverage runs. It is under .gitignore. DO NOT EDIT, DO NOT CHECK IN. -load( - "${REPOSITORY}//bazel:envoy_build_system.bzl", - "envoy_cc_test", - "envoy_package", -) - -envoy_package() - -envoy_cc_test( - name = "coverage_tests", - repository = "${REPOSITORY}", - deps = [ -EOF - for t in ${TARGETS} - do - echo " \"$t\"," - done - cat << EOF - ], - # no-remote due to https://github.com/bazelbuild/bazel/issues/4685 - tags = ["manual", "no-remote"], - coverage = False, - # Due to the nature of coverage_tests, the shard of coverage_tests are very uneven, some of - # shard can take 100s and some takes only 10s, so we use the maximum sharding to here to let - # Bazel scheduling them across CPU cores. - # Sharding can be disabled by --test_sharding_strategy=disabled. - shard_count = 50, -) -EOF - -) > "${BUILD_PATH}" - -echo "Generated coverage BUILD file at: ${BUILD_PATH}" -"${BUILDIFIER_BIN}" "${BUILD_PATH}" diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index d15743831a8a..56fa77e411cb 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -239,7 +239,11 @@ TEST_F(QuicPlatformTest, QuicServerStats) { } TEST_F(QuicPlatformTest, QuicStackTraceTest) { +#ifndef ENVOY_CONFIG_COVERAGE + // This doesn't work in coverage build because part of the stacktrace will be overwritten by + // __llvm_coverage_mapping EXPECT_THAT(QuicStackTrace(), HasSubstr("QuicStackTraceTest")); +#endif } TEST_F(QuicPlatformTest, QuicSleep) { QuicSleep(QuicTime::Delta::FromMilliseconds(20)); } diff --git a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc index 422ffed9728d..19a995d6e537 100644 --- a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc @@ -73,10 +73,14 @@ TEST_F(DynamicOpenTracingDriverTest, InitializeDriver) { TEST_F(DynamicOpenTracingDriverTest, FlushSpans) { setupValidDriver(); - Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, - start_time_, {Tracing::Reason::Sampling, true}); - first_span->finishSpan(); - driver_->tracer().Close(); + { + Tracing::SpanPtr first_span = driver_->startSpan( + config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); + first_span->finishSpan(); + driver_->tracer().Close(); + } + + driver_ = nullptr; const Json::ObjectSharedPtr spans_json = TestEnvironment::jsonLoadFromString(TestEnvironment::readFileToStringForTest(spans_file_)); diff --git a/test/integration/BUILD b/test/integration/BUILD index cce09495678d..c9e97596c8bb 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -232,9 +232,9 @@ envoy_sh_test( srcs = envoy_select_hot_restart([ "hotrestart_test.sh", ]), + cc_binary = ["//source/exe:envoy-static"], data = [ "test_utility.sh", - "//source/exe:envoy-static", "//test/config/integration:server_config_files", "//tools:socket_passing", ], @@ -245,9 +245,9 @@ envoy_sh_test( envoy_sh_test( name = "run_envoy_test", srcs = ["run_envoy_test.sh"], + cc_binary = ["//source/exe:envoy-static"], data = [ "test_utility.sh", - "//source/exe:envoy-static", "//test/config/integration:server_config_files", ], # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index 0400d2f25a3c..a3a88f08f1dc 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -50,7 +50,8 @@ TEST_P(HeaderPrefixIntegrationTest, FailedCustomHeaderPrefix) { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { bootstrap.set_header_prefix("x-custom-but-not-set"); }); - EXPECT_DEATH(initialize(), "Attempting to change the header prefix after it has been used!"); + EXPECT_DEATH_LOG_TO_STDERR(initialize(), + "Attempting to change the header prefix after it has been used!"); } INSTANTIATE_TEST_SUITE_P(Protocols, HeaderPrefixIntegrationTest, diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 6eaf8c469a5a..c84a3efd206e 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -10,7 +10,6 @@ echo "Starting run_envoy_bazel_coverage.sh..." echo " PWD=$(pwd)" echo " SRCDIR=${SRCDIR}" echo " VALIDATE_COVERAGE=${VALIDATE_COVERAGE}" -echo " FUZZ_COVERAGE=${FUZZ_COVERAGE}" # This is the target that will be run to generate coverage data. It can be overridden by consumer # projects that want to run coverage on a different/combined target. @@ -22,80 +21,35 @@ elif [[ -n "${COVERAGE_TARGET}" ]]; then else # For fuzz builds, this overrides to just fuzz targets. COVERAGE_TARGETS=//test/... && [[ ${FUZZ_COVERAGE} == "true" ]] && - COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzz_target", //test/...)')" + COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzzer", //test/...)')" fi -SCRIPT_DIR="$(realpath "$(dirname "$0")")" -TEMP_CORPORA="" -if [ "$FUZZ_COVERAGE" == "true" ] -then - # Build and run libfuzzer linked target, grab collect temp directories. - FUZZ_TEMPDIR="$(mktemp -d)" - FUZZ_TEMPDIR=${FUZZ_TEMPDIR} "${SCRIPT_DIR}"/build_and_run_fuzz_targets.sh ${COVERAGE_TARGETS} +if [[ "${FUZZ_COVERAGE}" == "true" ]]; then + BAZEL_BUILD_OPTIONS+=" --config=fuzz-coverage --test_tag_filters=-nocoverage" else - # Make sure //test/coverage:coverage_tests is up-to-date. - "${SCRIPT_DIR}"/coverage/gen_build.sh ${COVERAGE_TARGETS} + BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage,-fuzz_target" fi -# Set the bazel targets to run. -BAZEL_TARGET=//test/coverage:coverage_tests && [[ ${FUZZ_COVERAGE} == "true" ]] && BAZEL_TARGET=${COVERAGE_TARGETS} - -# Add binaries to OBJECTS to pass in to llvm-cov -OBJECTS="" -# For nornaml builds, BAZEL_TARGET only contains //test/coverage:coverage_tests -for t in ${BAZEL_TARGET} -do - # Set test args. If normal coverage run, this is --log-path /dev/null - if [ "$FUZZ_COVERAGE" == "true" ] - then - # If this is a fuzz target, set args to be the temp corpus. - TARGET_BINARY="${t/://}" - CORPUS_LOCATION="${TARGET_BINARY:2}" - TEST_ARGS=(--test_arg="${FUZZ_TEMPDIR}/${CORPUS_LOCATION////_}_corpus" --test_arg="-runs=0") - if [[ -z "${OBJECTS}" ]]; then - # The first object needs to be passed without -object= flag. - OBJECTS="bazel-bin/${TARGET_BINARY:2}_with_libfuzzer" - else - OBJECTS="$OBJECTS -object=bazel-bin/${TARGET_BINARY:2}_with_libfuzzer" - fi - TARGET="${t}_with_libfuzzer" - else - TEST_ARGS=(--test_arg="--log-path /dev/null" --test_arg="-l trace") - OBJECTS="bazel-bin/test/coverage/coverage_tests" - TARGET="${t}" - fi - - BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata bazel coverage ${BAZEL_BUILD_OPTIONS} \ - -c fastbuild --copt=-DNDEBUG --instrumentation_filter=//source/...,//include/... \ - --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - "${TEST_ARGS[@]}" --test_env=HEAPCHECK= ${TARGET} -done +bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS} COVERAGE_DIR="${SRCDIR}"/generated/coverage mkdir -p "${COVERAGE_DIR}" -COVERAGE_IGNORE_REGEX="(/external/|pb\.(validate\.)?(h|cc)|/chromium_url/|/test/|/tmp|/tools/|/third_party/|/source/extensions/quic_listeners/quiche/)" -COVERAGE_BINARY="bazel-bin/test/coverage/coverage_tests" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" -echo "Merging coverage data..." -BAZEL_OUT=test/coverage/coverage_tests/ && [[ ${FUZZ_COVERAGE} ]] && BAZEL_OUT=test/ -llvm-profdata merge -sparse -o ${COVERAGE_DATA} $(find -L bazel-out/k8-fastbuild/testlogs/${BAZEL_OUT} -name coverage.dat) +cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" -echo "Generating report..." -llvm-cov show -instr-profile="${COVERAGE_DATA}" ${OBJECTS} -Xdemangler=c++filt \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -output-dir=${COVERAGE_DIR} -format=html -sed -i -e 's|>proc/self/cwd/|>|g' "${COVERAGE_DIR}/index.html" -sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' "${COVERAGE_DIR}/index.html" +COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) +COVERAGE_VALUE=${COVERAGE_VALUE%?} [[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}" -if [ "$VALIDATE_COVERAGE" == "true" ] -then - COVERAGE_VALUE=$(llvm-cov export "${OBJECTS}" -instr-profile="${COVERAGE_DATA}" \ - -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ - python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") - COVERAGE_THRESHOLD=97.0 +if [[ "$VALIDATE_COVERAGE" == "true" ]]; then + if [[ "${FUZZ_COVERAGE}" == "true" ]]; then + COVERAGE_THRESHOLD=27.0 + else + COVERAGE_THRESHOLD=97.0 + fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} diff --git a/test/tools/router_check/test/BUILD b/test/tools/router_check/test/BUILD index 4e8e7f8885b8..42960b2da0e1 100644 --- a/test/tools/router_check/test/BUILD +++ b/test/tools/router_check/test/BUILD @@ -11,9 +11,9 @@ envoy_package() envoy_sh_test( name = "router_tool_test", srcs = ["route_tests.sh"], + cc_binary = ["//test/tools/router_check:router_check_tool"], data = [ ":configs", - "//test/tools/router_check:router_check_tool", ], # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 tags = ["fails_on_windows"], From 63e6829659226b5f79ad1b3998ccb1eb9234f8a1 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 14 May 2020 23:32:02 -0400 Subject: [PATCH 173/909] [fuzz] fix fuzz crashes in fmt format (#10935) Fixes fuzz crashes in fmt::format (https://github.com/fmtlib/fmt/blob/0463665ef136d685fe07a564d93c782456456d3d/include/fmt/format.h#L703) on certain invalid protobuf inputs. fmt is patches with PR (fmtlib/fmt#1650) that replaces the in-house fuzzing resource management to an fmt specific fuzzing macro. Additional Description: The regression test added shows that the proto in question is not unreasonably huge for Envoy. This is causing a high unexplained crash percentage for many fuzz tests on OSS-Fuzz. Also bump fmt. Testing: Added regression test in server fuzz test, failed bazel test test/server:server_fuzz_test --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION before Related: #9623 Risk level: Low Signed-off-by: Asra Ali --- .bazelrc | 1 + bazel/repository_locations.bzl | 9 +++++--- ...h-ac725507195d840cdb90bed3079b877e6e9419e3 | 22 +++++++++++++++++++ 3 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 diff --git a/.bazelrc b/.bazelrc index 5478e9a9364e..264eb09dc3dc 100644 --- a/.bazelrc +++ b/.bazelrc @@ -212,6 +212,7 @@ build:asan-fuzzer --config=clang-asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link build:asan-fuzzer --copt=-fno-omit-frame-pointer +build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 47ad51b78127..fff26a38c7b3 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -82,6 +82,7 @@ DEPENDENCY_REPOSITORIES = dict( # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # # chromium-81.0.4044.69 + # 2020-01-22 urls = ["https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz"], use_category = ["dataplane"], cpe = "N/A", @@ -145,9 +146,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["other"], ), com_github_fmtlib_fmt = dict( - sha256 = "f1907a58d5e86e6c382e51441d92ad9e23aea63827ba47fd647eacc0d3a16c78", - strip_prefix = "fmt-6.0.0", - urls = ["https://github.com/fmtlib/fmt/archive/6.0.0.tar.gz"], + sha256 = "5014aacf55285bf79654539791de0d6925063fddf4dfdd597ef76b53eb994f86", + strip_prefix = "fmt-e2ff910675c7800e5c4e28e1509ca6a50bdceafa", + # 2020-04-29 + urls = ["https://github.com/fmtlib/fmt/archive/e2ff910675c7800e5c4e28e1509ca6a50bdceafa.tar.gz"], use_category = ["observability"], cpe = "N/A", ), @@ -179,6 +181,7 @@ DEPENDENCY_REPOSITORIES = dict( # This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options. sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123", strip_prefix = "grpc-d8f4928fa779f6005a7fe55a176bdb373b0f910f", + # 2020-02-11 urls = ["https://github.com/grpc/grpc/archive/d8f4928fa779f6005a7fe55a176bdb373b0f910f.tar.gz"], use_category = ["dataplane", "controlplane"], cpe = "cpe:2.3:a:grpc:grpc:*", diff --git a/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 b/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 new file mode 100644 index 000000000000..11fd78365277 --- /dev/null +++ b/test/server/server_corpus/crash-ac725507195d840cdb90bed3079b877e6e9419e3 @@ -0,0 +1,22 @@ +dynamic_resources { +} +cluster_manager { + local_cluster_name: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" +} +hidden_envoy_deprecated_runtime { +} +admin { +} +stats_config { + use_all_default_tags { + value: true + } +} +layered_runtime { + layers { + disk_layer { + append_service_cluster: true + } + } +} +use_tcp_for_dns_lookups: true From 6e14acb2aae2a2f1826dc3e15470edc43a17a85d Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 14 May 2020 20:37:15 -0700 Subject: [PATCH 174/909] build: fix compile errors on clang-10 (#11186) -Wdeprecated-copy are enabled by default w/-Wextra while quiche and wee8 is not compatible with that. Fixing dangling pointer in tests. Disable a couple more warnings on wee8 Risk Level: Low Testing: local with clang-10, CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Lizan Zhou --- bazel/external/quiche.BUILD | 2 ++ bazel/external/wee8.genrule_cmd | 2 ++ test/extensions/filters/http/buffer/config_test.cc | 14 ++++++-------- test/extensions/filters/http/rbac/config_test.cc | 10 ++++------ .../extensions/filters/network/rbac/config_test.cc | 5 ++--- 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 770eb720b9a3..d19e7e80ae53 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -59,6 +59,8 @@ quiche_copts = select({ # Remove these after upstream fix. "-Wno-unused-parameter", "-Wno-unused-function", + "-Wno-unknown-warning-option", + "-Wno-deprecated-copy", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", ], diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index f62997af1f0e..8886462edbe9 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -19,10 +19,12 @@ pushd $$ROOT/wee8 rm -rf out/wee8 # Export compiler configuration. +export CXXFLAGS="$${CXXFLAGS-} -Wno-deprecated-copy -Wno-unknown-warning-option" if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then export IS_CLANG=true export CC=$${CC:-clang} export CXX=$${CXX:-clang++} + export CXXFLAGS="$${CXXFLAGS} -Wno-implicit-int-float-conversion -Wno-builtin-assume-aligned-alignment -Wno-final-dtor-non-final-class" else export IS_CLANG=false export CC=$${CC:-gcc} diff --git a/test/extensions/filters/http/buffer/config_test.cc b/test/extensions/filters/http/buffer/config_test.cc index d2c8e0c85633..a3b7b9e8142d 100644 --- a/test/extensions/filters/http/buffer/config_test.cc +++ b/test/extensions/filters/http/buffer/config_test.cc @@ -47,9 +47,9 @@ TEST(BufferFilterFactoryTest, BufferFilterCorrectProto) { TEST(BufferFilterFactoryTest, BufferFilterEmptyProto) { BufferFilterFactory factory; + auto empty_proto = factory.createEmptyConfigProto(); envoy::extensions::filters::http::buffer::v3::Buffer config = - *dynamic_cast( - factory.createEmptyConfigProto().get()); + *dynamic_cast(empty_proto.get()); config.mutable_max_request_bytes()->set_value(1028); @@ -62,9 +62,9 @@ TEST(BufferFilterFactoryTest, BufferFilterEmptyProto) { TEST(BufferFilterFactoryTest, BufferFilterNoMaxRequestBytes) { BufferFilterFactory factory; + auto empty_proto = factory.createEmptyConfigProto(); envoy::extensions::filters::http::buffer::v3::Buffer config = - *dynamic_cast( - factory.createEmptyConfigProto().get()); + *dynamic_cast(empty_proto.get()); NiceMock context; EXPECT_THROW_WITH_REGEX(factory.createFilterFactoryFromProto(config, "stats", context), @@ -74,10 +74,8 @@ TEST(BufferFilterFactoryTest, BufferFilterNoMaxRequestBytes) { TEST(BufferFilterFactoryTest, BufferFilterEmptyRouteProto) { BufferFilterFactory factory; EXPECT_NO_THROW({ - envoy::extensions::filters::http::buffer::v3::BufferPerRoute* config = - dynamic_cast( - factory.createEmptyRouteConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyRouteConfigProto().get())); }); } diff --git a/test/extensions/filters/http/rbac/config_test.cc b/test/extensions/filters/http/rbac/config_test.cc index 7617a99c6136..7c296bc5f718 100644 --- a/test/extensions/filters/http/rbac/config_test.cc +++ b/test/extensions/filters/http/rbac/config_test.cc @@ -36,16 +36,14 @@ TEST(RoleBasedAccessControlFilterConfigFactoryTest, ValidProto) { TEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyProto) { RoleBasedAccessControlFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyConfigProto().get())); } TEST(RoleBasedAccessControlFilterConfigFactoryTest, EmptyRouteProto) { RoleBasedAccessControlFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyRouteConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyRouteConfigProto().get())); } TEST(RoleBasedAccessControlFilterConfigFactoryTest, RouteSpecificConfig) { diff --git a/test/extensions/filters/network/rbac/config_test.cc b/test/extensions/filters/network/rbac/config_test.cc index 06eb5b30a182..7da1addf69a2 100644 --- a/test/extensions/filters/network/rbac/config_test.cc +++ b/test/extensions/filters/network/rbac/config_test.cc @@ -68,9 +68,8 @@ TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, ValidProto) { TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, EmptyProto) { RoleBasedAccessControlNetworkFilterConfigFactory factory; - auto* config = dynamic_cast( - factory.createEmptyConfigProto().get()); - EXPECT_NE(nullptr, config); + EXPECT_NE(nullptr, dynamic_cast( + factory.createEmptyConfigProto().get())); } TEST_F(RoleBasedAccessControlNetworkFilterConfigFactoryTest, InvalidPermission) { From 5083a9a4bf84edf172ca76f531acd27e58522314 Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 15 May 2020 08:33:14 -0400 Subject: [PATCH 175/909] Revert "[http] Remove exceptions from HTTP/1 codec callbacks (#11101)" (#11194) This reverts commit 3550a7a7b0b858f4c2ac505efa6c71c2610f2666. Signed-off-by: Asra Ali --- source/common/http/http1/BUILD | 1 - source/common/http/http1/codec_impl.cc | 197 ++++++++----------------- source/common/http/http1/codec_impl.h | 66 +++------ 3 files changed, 77 insertions(+), 187 deletions(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index e57b43f0cd77..d9b00a317157 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -29,7 +29,6 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", - "//source/common/common:cleanup_lib", "//source/common/common:statusor_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 523506201bbe..658596cc6057 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -9,7 +9,6 @@ #include "envoy/http/header_map.h" #include "envoy/network/connection.h" -#include "common/common/cleanup.h" #include "common/common/enum_to_int.h" #include "common/common/utility.h" #include "common/http/exception.h" @@ -267,10 +266,9 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe outbound_responses_++; } -ConnectionImpl::HttpParserCode ServerConnectionImpl::doFloodProtectionChecks() { - ASSERT(dispatching_); +void ServerConnectionImpl::doFloodProtectionChecks() const { if (!flood_protection_) { - return HttpParserCode::Success; + return; } // Before processing another request, make sure that we are below the response flood protection // threshold. @@ -278,11 +276,8 @@ ConnectionImpl::HttpParserCode ServerConnectionImpl::doFloodProtectionChecks() { ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", connection_); stats_.response_flood_.inc(); - ASSERT(codec_status_.ok()); - codec_status_ = Http::bufferFloodError("Too many responses queued."); - return HttpParserCode::Error; + throw FrameFloodException("Too many responses queued."); } - return HttpParserCode::Success; } void ConnectionImpl::flushOutput(bool end_encode) { @@ -406,7 +401,8 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end http_parser_settings ConnectionImpl::settings_{ [](http_parser* parser) -> int { - return enumToInt(static_cast(parser->data)->onMessageBeginBase()); + static_cast(parser->data)->onMessageBeginBase(); + return 0; }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->onUrl(at, length); @@ -414,20 +410,23 @@ http_parser_settings ConnectionImpl::settings_{ }, nullptr, // on_status [](http_parser* parser, const char* at, size_t length) -> int { - return enumToInt(static_cast(parser->data)->onHeaderField(at, length)); + static_cast(parser->data)->onHeaderField(at, length); + return 0; }, [](http_parser* parser, const char* at, size_t length) -> int { - return enumToInt(static_cast(parser->data)->onHeaderValue(at, length)); + static_cast(parser->data)->onHeaderValue(at, length); + return 0; }, [](http_parser* parser) -> int { - return enumToInt(static_cast(parser->data)->onHeadersCompleteBase()); + return static_cast(parser->data)->onHeadersCompleteBase(); }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->bufferBody(at, length); return 0; }, [](http_parser* parser) -> int { - return enumToInt(static_cast(parser->data)->onMessageCompleteBase()); + static_cast(parser->data)->onMessageCompleteBase(); + return 0; }, [](http_parser* parser) -> int { // A 0-byte chunk header is used to signal the end of the chunked body. @@ -455,23 +454,19 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& st enable_trailers_(enable_trailers), reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.reject_unsupported_transfer_encodings")), - dispatching_(false), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }), + output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, + [&]() -> void { this->onAboveHighWatermark(); }), max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); parser_.data = this; } -ConnectionImpl::HttpParserCode ConnectionImpl::completeLastHeader() { - ASSERT(dispatching_); +void ConnectionImpl::completeLastHeader() { ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); - if (!checkHeaderNameForUnderscores()) { - // This indicates that the request should be rejected due to header name with underscores. - return HttpParserCode::Error; - } + checkHeaderNameForUnderscores(); auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); @@ -489,15 +484,12 @@ ConnectionImpl::HttpParserCode ConnectionImpl::completeLastHeader() { sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); const absl::string_view header_type = processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); - return HttpParserCode::Error; + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); } header_parsing_state_ = HeaderParsingState::Field; ASSERT(current_header_field_.empty()); ASSERT(current_header_value_.empty()); - return HttpParserCode::Success; } bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { @@ -522,13 +514,8 @@ Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); - // Make sure that dispatching_ is set to false after dispatching, even when - // ConnectionImpl::dispatch throws an exception. - Cleanup cleanup([this]() { dispatching_ = false; }); - ASSERT(!dispatching_); ASSERT(buffered_body_.length() == 0); - dispatching_ = true; if (maybeDirectDispatch(data)) { return Http::okStatus(); } @@ -539,11 +526,7 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { for (const Buffer::RawSlice& slice : data.getRawSlices()) { - auto statusor_parsed = dispatchSlice(static_cast(slice.mem_), slice.len_); - if (!statusor_parsed.ok()) { - return statusor_parsed.status(); - } - total_parsed += statusor_parsed.value(); + total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at // this point. @@ -553,10 +536,7 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { } dispatchBufferedBody(); } else { - auto result = dispatchSlice(nullptr, 0); - if (!result.ok()) { - return result.status(); - } + dispatchSlice(nullptr, 0); } ASSERT(buffered_body_.length() == 0); @@ -569,54 +549,39 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { return Http::okStatus(); } -Envoy::StatusOr ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - ASSERT(codec_status_.ok() && dispatching_); +size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); - if (!codec_status_.ok()) { - return codec_status_; - } if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); - // Avoid overwriting the codec_status_ set in the callbacks. - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError( - absl::StrCat("http/1.1 protocol error: ", http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); - return codec_status_; + throw CodecProtocolException("http/1.1 protocol error: " + + std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); } return rc; } -ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderField(const char* data, size_t length) { - ASSERT(dispatching_); +void ConnectionImpl::onHeaderField(const char* data, size_t length) { // We previously already finished up the headers, these headers are // now trailers. if (header_parsing_state_ == HeaderParsingState::Done) { if (!enable_trailers_) { // Ignore trailers. - return HttpParserCode::Success; + return; } processing_trailers_ = true; header_parsing_state_ = HeaderParsingState::Field; } if (header_parsing_state_ == HeaderParsingState::Value) { - HttpParserCode exit_code = completeLastHeader(); - if (exit_code == HttpParserCode::Error) { - // If an error exit code is returned, there must be an error in the codec status. - ASSERT(!codec_status_.ok()); - return HttpParserCode::Error; - } + completeLastHeader(); } current_header_field_.append(data, length); - return HttpParserCode::Success; } -ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderValue(const char* data, size_t length) { - ASSERT(dispatching_); +void ConnectionImpl::onHeaderValue(const char* data, size_t length) { if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { // Ignore trailers. - return HttpParserCode::Success; + return; } if (processing_trailers_) { @@ -630,10 +595,7 @@ ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderValue(const char* data, s ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - ASSERT(codec_status_.ok()); - codec_status_ = - codecProtocolError("http/1.1 protocol error: header value contains invalid chars"); - return HttpParserCode::Error; + throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } } @@ -654,23 +616,14 @@ ConnectionImpl::HttpParserCode ConnectionImpl::onHeaderValue(const char* data, s processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; error_code_ = Http::Code::RequestHeaderFieldsTooLarge; sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); - return HttpParserCode::Error; + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); } - return HttpParserCode::Success; } -ConnectionImpl::HttpParserCode ConnectionImpl::onHeadersCompleteBase() { +int ConnectionImpl::onHeadersCompleteBase() { ASSERT(!processing_trailers_); - ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); - HttpParserCode exit_code = completeLastHeader(); - if (exit_code == HttpParserCode::Error) { - // If an error exit code is returned, there must be an error in the codec status. - ASSERT(!codec_status_.ok()); - return exit_code; - } + completeLastHeader(); if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { // This is not necessarily true, but it's good enough since higher layers only care if this is @@ -717,20 +670,15 @@ ConnectionImpl::HttpParserCode ConnectionImpl::onHeadersCompleteBase() { !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { error_code_ = Http::Code::NotImplemented; sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); - return HttpParserCode::Error; + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); } } - HttpParserCode rc = onHeadersComplete(); - if (rc == HttpParserCode::Error) { - return rc; - } + int rc = onHeadersComplete(); header_parsing_state_ = HeaderParsingState::Done; // Returning 2 informs http_parser to not expect a body or further data on this connection. - return handling_upgrade_ ? HttpParserCode::NoBodyData : rc; + return handling_upgrade_ ? 2 : rc; } void ConnectionImpl::bufferBody(const char* data, size_t length) { @@ -739,7 +687,6 @@ void ConnectionImpl::bufferBody(const char* data, size_t length) { void ConnectionImpl::dispatchBufferedBody() { ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); - ASSERT(codec_status_.ok()); if (buffered_body_.length() > 0) { onBody(buffered_body_); buffered_body_.drain(buffered_body_.length()); @@ -754,7 +701,7 @@ void ConnectionImpl::onChunkHeader(bool is_final_chunk) { } } -ConnectionImpl::HttpParserCode ConnectionImpl::onMessageCompleteBase() { +void ConnectionImpl::onMessageCompleteBase() { ENVOY_CONN_LOG(trace, "message complete", connection_); dispatchBufferedBody(); @@ -765,25 +712,19 @@ ConnectionImpl::HttpParserCode ConnectionImpl::onMessageCompleteBase() { ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); http_parser_pause(&parser_, 1); - return HttpParserCode::Success; + return; } // If true, this indicates we were processing trailers and must // move the last header into current_header_map_ if (header_parsing_state_ == HeaderParsingState::Value) { - HttpParserCode exit_code = completeLastHeader(); - if (exit_code == HttpParserCode::Error) { - // If an error exit code is returned, there must be an error in the codec status. - ASSERT(!codec_status_.ok()); - return exit_code; - } + completeLastHeader(); } onMessageComplete(); - return HttpParserCode::Success; } -ConnectionImpl::HttpParserCode ConnectionImpl::onMessageBeginBase() { +void ConnectionImpl::onMessageBeginBase() { ENVOY_CONN_LOG(trace, "message begin", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable @@ -792,7 +733,7 @@ ConnectionImpl::HttpParserCode ConnectionImpl::onMessageBeginBase() { processing_trailers_ = false; header_parsing_state_ = HeaderParsingState::Field; allocHeaders(); - return onMessageBegin(); + onMessageBegin(); } void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { @@ -832,7 +773,7 @@ void ServerConnectionImpl::onEncodeComplete() { } } -bool ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { +void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { HeaderString path(Headers::get().Path); bool is_connect = (method == HTTP_CONNECT); @@ -843,7 +784,7 @@ bool ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me (active_request.request_url_.getStringView()[0] == '/' || ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return true; + return; } // If absolute_urls and/or connect are not going be handled, copy the url and return. @@ -852,15 +793,13 @@ bool ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. if (!codec_settings_.allow_absolute_url_ && !is_connect) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return true; + return; } Utility::Url absolute_url; if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError("http/1.1 protocol error: invalid url in request line"); - return false; + throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); } // RFC7230#5.7 // When a proxy receives a request with an absolute-form of @@ -875,10 +814,9 @@ bool ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me headers.setPath(absolute_url.pathAndQueryParams()); } active_request.request_url_.clear(); - return true; } -ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { +int ServerConnectionImpl::onHeadersComplete() { // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. @@ -896,9 +834,7 @@ ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { header_value); error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError("Invalid nominated headers in Connection."); - return HttpParserCode::Error; + throw CodecProtocolException("Invalid nominated headers in Connection."); } } @@ -907,11 +843,7 @@ ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); - if (!handlePath(*headers, parser_.method)) { - // Reached a failure. - ASSERT(!codec_status_.ok()); - return HttpParserCode::Error; - } + handlePath(*headers, parser_.method); ASSERT(active_request.request_url_.empty()); headers->setMethod(method_string); @@ -920,10 +852,8 @@ ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { auto details = HeaderUtility::requestHeadersValid(*headers); if (details.has_value()) { sendProtocolError(details.value().get()); - ASSERT(codec_status_.ok()); - codec_status_ = codecProtocolError( + throw CodecProtocolException( "http/1.1 protocol error: request headers failed spec compliance checks"); - return HttpParserCode::Error; } // Determine here whether we have a body or not. This uses the new RFC semantics where the @@ -946,27 +876,20 @@ ConnectionImpl::HttpParserCode ServerConnectionImpl::onHeadersComplete() { } } - return HttpParserCode::Success; + return 0; } -ConnectionImpl::HttpParserCode ServerConnectionImpl::onMessageBegin() { +void ServerConnectionImpl::onMessageBegin() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); active_request_.emplace(*this, header_key_formatter_.get()); auto& active_request = active_request_.value(); - if (resetStreamCalled()) { - ASSERT(codec_status_.ok()); - codec_status_ = codecClientError("cannot create new streams after calling reset"); - return HttpParserCode::Error; - } active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); // Check for pipelined request flood as we prepare to accept a new request. // Parse errors that happen prior to onMessageBegin result in stream termination, it is not // possible to overflow output buffers with early parse errors. - return doFloodProtectionChecks(); - } else { - return HttpParserCode::Success; + doFloodProtectionChecks(); } } @@ -1056,7 +979,7 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } -bool ServerConnectionImpl::checkHeaderNameForUnderscores() { +void ServerConnectionImpl::checkHeaderNameForUnderscores() { if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { if (headers_with_underscores_action_ == @@ -1072,13 +995,9 @@ bool ServerConnectionImpl::checkHeaderNameForUnderscores() { error_code_ = Http::Code::BadRequest; sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); stats_.requests_rejected_with_underscores_in_headers_.inc(); - ASSERT(codec_status_.ok()); - codec_status_ = - codecProtocolError("http/1.1 protocol error: header name contains underscores"); - return false; + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); } } - return true; } ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, @@ -1100,6 +1019,10 @@ bool ClientConnectionImpl::cannotHaveBody() { } RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { + if (resetStreamCalled()) { + throw CodecClientException("cannot create new streams after calling reset"); + } + // If reads were disabled due to flow control, we expect reads to always be enabled again before // reusing this connection. This is done when the response is received. ASSERT(connection_.readEnabled()); @@ -1111,14 +1034,12 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode return pending_response_.value().encoder_; } -ConnectionImpl::HttpParserCode ClientConnectionImpl::onHeadersComplete() { +int ClientConnectionImpl::onHeadersComplete() { // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. if (!pending_response_.has_value() && !resetStreamCalled()) { - ASSERT(codec_status_.ok()); - codec_status_ = prematureResponseError("", static_cast(parser_.status_code)); - return HttpParserCode::Error; + throw PrematureResponseException(static_cast(parser_.status_code)); } else if (pending_response_.has_value()) { ASSERT(!pending_response_done_); auto& headers = absl::get(headers_or_trailers_); @@ -1149,7 +1070,7 @@ ConnectionImpl::HttpParserCode ClientConnectionImpl::onHeadersComplete() { // Here we deal with cases where the response cannot have a body, but http_parser does not deal // with it for us. - return cannotHaveBody() ? HttpParserCode::NoBody : HttpParserCode::Success; + return cannotHaveBody() ? 1 : 0; } bool ClientConnectionImpl::upgradeAllowed() const { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 52d5ed6b1ed6..8b53d4c374f5 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -228,10 +228,6 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable dispatchSlice(const char* slice, size_t len); + size_t dispatchSlice(const char* slice, size_t len); /** * Called by the http_parser when body data is received. @@ -334,10 +310,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_request_; @@ -568,9 +538,9 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl void onEncodeComplete() override {} - HttpParserCode onMessageBegin() override { return HttpParserCode::Success; } + void onMessageBegin() override {} void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - HttpParserCode onHeadersComplete() override; + int onHeadersComplete() override; bool upgradeAllowed() const override; void onBody(Buffer::Instance& data) override; void onMessageComplete() override; From 9d5046b18dae94bc1b1fda5693cfb30b5cb4bd22 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 15 May 2020 12:01:11 -0400 Subject: [PATCH 176/909] Windows: Fix access log tests (#11177) formatter test: - Anchor the command regex at the beginning of the search space to replace the existing invalid use of the match position logic (match should not be used at all when the regex search fails). - add test case that demonstrates anchor is needed, removal of anchor causes new test case to fail benchmark test: - benchmark main now matches test/main.cc to initialize the socket stack on Windows and to disable the CRT invalid parameter abort behavior Co-authored-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia --- .../common/access_log/access_log_formatter.cc | 4 +-- test/BUILD | 8 +---- test/benchmark/BUILD | 8 ++--- test/benchmark/main.cc | 15 ++------- test/common/access_log/BUILD | 2 -- .../access_log/access_log_formatter_test.cc | 1 + test/fuzz/main.cc | 9 ++---- test/main.cc | 32 +------------------ test/test_common/BUILD | 6 +++- test/test_common/environment.cc | 30 +++++++++++++++++ test/test_common/environment.h | 7 ++++ 11 files changed, 54 insertions(+), 68 deletions(-) diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 6ea62a7dd1a8..d1bfc981338c 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -231,7 +231,7 @@ std::vector AccessLogFormatParser::parse(const std::string std::vector formatters; static constexpr absl::string_view DYNAMIC_META_TOKEN{"DYNAMIC_METADATA("}; static constexpr absl::string_view FILTER_STATE_TOKEN{"FILTER_STATE("}; - const std::regex command_w_args_regex(R"EOF(%([A-Z]|_)+(\([^\)]*\))?(:[0-9]+)?(%))EOF"); + const std::regex command_w_args_regex(R"EOF(^%([A-Z]|_)+(\([^\)]*\))?(:[0-9]+)?(%))EOF"); static constexpr absl::string_view PLAIN_SERIALIZATION{"PLAIN"}; static constexpr absl::string_view TYPED_SERIALIZATION{"TYPED"}; @@ -245,7 +245,7 @@ std::vector AccessLogFormatParser::parse(const std::string std::smatch m; const std::string search_space = format.substr(pos); - if (!(std::regex_search(search_space, m, command_w_args_regex) || m.position() == 0)) { + if (!std::regex_search(search_space, m, command_w_args_regex)) { throw EnvoyException( fmt::format("Incorrect configuration: {}. Couldn't find valid command at position {}", format, pos)); diff --git a/test/BUILD b/test/BUILD index 41cf8c14bcb3..a3cd553d30e8 100644 --- a/test/BUILD +++ b/test/BUILD @@ -23,9 +23,6 @@ envoy_cc_test_library( "test_runner.h", ], hdrs = ["test_listener.h"], - external_deps = [ - "abseil_symbolize", - ], deps = [ "//source/common/common:logger_lib", "//source/common/common:thread_lib", @@ -37,8 +34,5 @@ envoy_cc_test_library( "//test/test_common:environment_lib", "//test/test_common:global_lib", "//test/test_common:printers_lib", - ] + select({ - "//bazel:disable_signal_trace": [], - "//conditions:default": ["//source/common/signal:sigaction_lib"], - }), + ], ) diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index 7bfc766727b5..96d08112c091 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -12,11 +12,9 @@ envoy_cc_test_library( name = "main", srcs = ["main.cc"], external_deps = [ - "abseil_symbolize", "benchmark", ], - deps = select({ - "//bazel:disable_signal_trace": [], - "//conditions:default": ["//source/common/signal:sigaction_lib"], - }), + deps = [ + "//test/test_common:environment_lib", + ], ) diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index ae39333d72a8..7afdf85e6558 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -1,23 +1,12 @@ // NOLINT(namespace-envoy) // This is an Envoy driver for benchmarks. +#include "test/test_common/environment.h" #include "benchmark/benchmark.h" -#ifdef ENVOY_HANDLE_SIGNALS -#include "common/signal/signal_action.h" -#endif - -#include "absl/debugging/symbolize.h" - // Boilerplate main(), which discovers benchmarks and runs them. int main(int argc, char** argv) { -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif + Envoy::TestEnvironment::initializeTestMain(argv[0]); benchmark::Initialize(&argc, argv); if (benchmark::ReportUnrecognizedArguments(argc, argv)) { diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 06da33256f87..836975e9ae34 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -36,7 +36,6 @@ envoy_cc_fuzz_test( envoy_cc_test( name = "access_log_formatter_test", srcs = ["access_log_formatter_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/access_log:access_log_formatter_lib", "//source/common/common:utility_lib", @@ -109,5 +108,4 @@ envoy_cc_benchmark_binary( envoy_benchmark_test( name = "access_log_formatter_speed_test_benchmark_test", benchmark_binary = "access_log_formatter_speed_test", - tags = ["fails_on_windows"], ) diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index 0de62f0887da..fe852ad2a22a 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -1988,6 +1988,7 @@ TEST(AccessLogFormatterTest, ParserFailures) { "%REQ(valid)% %NOT_VALID%", "%REQ(FIRST?SECOND%", "%%", + "%%HOSTNAME%PROTOCOL%", "%protocol%", "%REQ(TEST):%", "%REQ(TEST):3q4%", diff --git a/test/fuzz/main.cc b/test/fuzz/main.cc index 98e30e63cbb8..d1c98eb6eed9 100644 --- a/test/fuzz/main.cc +++ b/test/fuzz/main.cc @@ -54,13 +54,8 @@ INSTANTIATE_TEST_SUITE_P(CorpusExamples, FuzzerCorpusTest, testing::ValuesIn(tes } // namespace Envoy int main(int argc, char** argv) { -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif + Envoy::TestEnvironment::initializeTestMain(argv[0]); + // Expected usage: [other gtest flags] RELEASE_ASSERT(argc >= 2, ""); // Consider any file after the test path which doesn't have a - prefix to be a corpus entry. diff --git a/test/main.cc b/test/main.cc index 42bc71b05ed3..eae6c3fc4f68 100644 --- a/test/main.cc +++ b/test/main.cc @@ -5,43 +5,13 @@ #include "test/test_common/utility.h" #include "test/test_runner.h" -#include "absl/debugging/symbolize.h" - -#ifdef ENVOY_HANDLE_SIGNALS -#include "common/signal/signal_action.h" -#endif - #include "tools/cpp/runfiles/runfiles.h" -#if defined(WIN32) -static void NoopInvalidParameterHandler(const wchar_t* expression, const wchar_t* function, - const wchar_t* file, unsigned int line, - uintptr_t pReserved) { - return; -} -#endif - using bazel::tools::cpp::runfiles::Runfiles; // The main entry point (and the rest of this file) should have no logic in it, // this allows overriding by site specific versions of main.cc. int main(int argc, char** argv) { -#if defined(WIN32) - _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); - - _set_invalid_parameter_handler(NoopInvalidParameterHandler); - - WSADATA wsa_data; - const WORD version_requested = MAKEWORD(2, 2); - RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); -#endif - -#ifndef __APPLE__ - absl::InitializeSymbolizer(argv[0]); -#endif -#ifdef ENVOY_HANDLE_SIGNALS - // Enabled by default. Control with "bazel --define=signal_trace=disabled" - Envoy::SignalAction handle_sigs; -#endif + Envoy::TestEnvironment::initializeTestMain(argv[0]); // Create a Runfiles object for runfiles lookup. // https://github.com/bazelbuild/bazel/blob/master/tools/cpp/runfiles/runfiles_src.h#L32 diff --git a/test/test_common/BUILD b/test/test_common/BUILD index b735d286130b..57e72caf50c4 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -25,6 +25,7 @@ envoy_cc_test_library( hdrs = ["environment.h"], external_deps = [ "abseil_optional", + "abseil_symbolize", "bazel_runfiles", ], deps = [ @@ -39,7 +40,10 @@ envoy_cc_test_library( "//source/common/network:utility_lib", "//source/server:options_lib", "//test/common/runtime:utility_lib", - ], + ] + select({ + "//bazel:disable_signal_trace": [], + "//conditions:default": ["//source/common/signal:sigaction_lib"], + }), ) envoy_cc_test_library( diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 87377b7022c7..99b88671f36e 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -17,11 +17,16 @@ #include "common/common/utility.h" #include "common/filesystem/directory.h" +#ifdef ENVOY_HANDLE_SIGNALS +#include "common/signal/signal_action.h" +#endif + #include "server/options_impl.h" #include "test/test_common/file_system_for_test.h" #include "test/test_common/network_utility.h" +#include "absl/debugging/symbolize.h" #include "absl/strings/match.h" #include "gtest/gtest.h" #include "spdlog/spdlog.h" @@ -188,6 +193,31 @@ std::string TestEnvironment::getCheckedEnvVar(const std::string& var) { return optional.value(); } +void TestEnvironment::initializeTestMain(char* program_name) { +#ifdef WIN32 + _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); + + _set_invalid_parameter_handler([](const wchar_t* expression, const wchar_t* function, + const wchar_t* file, unsigned int line, + uintptr_t pReserved) {}); + + WSADATA wsa_data; + const WORD version_requested = MAKEWORD(2, 2); + RELEASE_ASSERT(WSAStartup(version_requested, &wsa_data) == 0, ""); +#endif + +#ifdef __APPLE__ + UNREFERENCED_PARAMETER(program_name); +#else + absl::InitializeSymbolizer(program_name); +#endif + +#ifdef ENVOY_HANDLE_SIGNALS + // Enabled by default. Control with "bazel --define=signal_trace=disabled" + static Envoy::SignalAction handle_sigs; +#endif +} + void TestEnvironment::initializeOptions(int argc, char** argv) { argc_ = argc; argv_ = argv; diff --git a/test/test_common/environment.h b/test/test_common/environment.h index ed9886ef63af..50c097a9ad27 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -22,6 +22,13 @@ class TestEnvironment { using ParamMap = std::unordered_map; + /** + * Perform common initialization steps needed to run a test binary. This + * method should be called first in all test main functions. + * @param program_name argv[0] test program is invoked with + */ + static void initializeTestMain(char* program_name); + /** * Initialize command-line options for later access by tests in getOptions(). * @param argc number of command-line args. From 47c54f2726c2e3218103ef05e3de64e856dca2d8 Mon Sep 17 00:00:00 2001 From: rulex123 <29862113+rulex123@users.noreply.github.com> Date: Fri, 15 May 2020 18:15:12 +0200 Subject: [PATCH 177/909] [admin]: rename source/server/http to source/server/admin (#11212) As part of the refactoring work for #5505, we decided to rename the source/server/http dir to source/server/admin. Signed-off-by: Erica Manno --- include/envoy/server/admin.h | 2 +- source/server/BUILD | 2 +- source/server/{http => admin}/BUILD | 0 source/server/{http => admin}/admin.cc | 4 ++-- source/server/{http => admin}/admin.h | 14 +++++++------- source/server/{http => admin}/admin_filter.cc | 4 ++-- source/server/{http => admin}/admin_filter.h | 0 .../server/{http => admin}/config_tracker_impl.cc | 2 +- .../server/{http => admin}/config_tracker_impl.h | 0 source/server/{http => admin}/handler_ctx.h | 0 source/server/{http => admin}/listeners_handler.cc | 4 ++-- source/server/{http => admin}/listeners_handler.h | 2 +- source/server/{http => admin}/logs_handler.cc | 4 ++-- source/server/{http => admin}/logs_handler.h | 2 +- source/server/{http => admin}/profiling_handler.cc | 4 ++-- source/server/{http => admin}/profiling_handler.h | 0 source/server/{http => admin}/prometheus_stats.cc | 2 +- source/server/{http => admin}/prometheus_stats.h | 0 source/server/{http => admin}/runtime_handler.cc | 4 ++-- source/server/{http => admin}/runtime_handler.h | 2 +- source/server/{http => admin}/stats_handler.cc | 6 +++--- source/server/{http => admin}/stats_handler.h | 2 +- source/server/{http => admin}/utils.cc | 2 +- source/server/{http => admin}/utils.h | 0 source/server/config_validation/BUILD | 4 ++-- source/server/config_validation/admin.h | 2 +- source/server/config_validation/server.h | 2 +- source/server/server.cc | 2 +- source/server/server.h | 2 +- test/common/router/BUILD | 6 +++--- test/common/router/rds_impl_test.cc | 2 +- test/common/router/vhds_test.cc | 2 +- test/server/{http => admin}/BUILD | 12 ++++++------ test/server/{http => admin}/admin_filter_test.cc | 2 +- test/server/{http => admin}/admin_instance.cc | 2 +- test/server/{http => admin}/admin_instance.h | 2 +- test/server/{http => admin}/admin_test.cc | 2 +- .../{http => admin}/config_tracker_impl_test.cc | 2 +- test/server/{http => admin}/logs_handler_test.cc | 2 +- .../{http => admin}/profiling_handler_test.cc | 2 +- .../{http => admin}/prometheus_stats_test.cc | 2 +- .../server/{http => admin}/runtime_handler_test.cc | 2 +- test/server/{http => admin}/stats_handler_test.cc | 4 ++-- tools/code_format/check_format.py | 8 ++++---- 44 files changed, 63 insertions(+), 63 deletions(-) rename source/server/{http => admin}/BUILD (100%) rename source/server/{http => admin}/admin.cc (99%) rename source/server/{http => admin}/admin.h (98%) rename source/server/{http => admin}/admin_filter.cc (97%) rename source/server/{http => admin}/admin_filter.h (100%) rename source/server/{http => admin}/config_tracker_impl.cc (94%) rename source/server/{http => admin}/config_tracker_impl.h (100%) rename source/server/{http => admin}/handler_ctx.h (100%) rename source/server/{http => admin}/listeners_handler.cc (97%) rename source/server/{http => admin}/listeners_handler.h (96%) rename source/server/{http => admin}/logs_handler.cc (97%) rename source/server/{http => admin}/logs_handler.h (96%) rename source/server/{http => admin}/profiling_handler.cc (97%) rename source/server/{http => admin}/profiling_handler.h (100%) rename source/server/{http => admin}/prometheus_stats.cc (99%) rename source/server/{http => admin}/prometheus_stats.h (100%) rename source/server/{http => admin}/runtime_handler.cc (98%) rename source/server/{http => admin}/runtime_handler.h (95%) rename source/server/{http => admin}/stats_handler.cc (98%) rename source/server/{http => admin}/stats_handler.h (98%) rename source/server/{http => admin}/utils.cc (98%) rename source/server/{http => admin}/utils.h (100%) rename test/server/{http => admin}/BUILD (90%) rename test/server/{http => admin}/admin_filter_test.cc (98%) rename test/server/{http => admin}/admin_instance.cc (98%) rename test/server/{http => admin}/admin_instance.h (97%) rename test/server/{http => admin}/admin_test.cc (99%) rename test/server/{http => admin}/config_tracker_impl_test.cc (98%) rename test/server/{http => admin}/logs_handler_test.cc (93%) rename test/server/{http => admin}/profiling_handler_test.cc (98%) rename test/server/{http => admin}/prometheus_stats_test.cc (99%) rename test/server/{http => admin}/runtime_handler_test.cc (98%) rename test/server/{http => admin}/stats_handler_test.cc (99%) diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h index 62b2604fda78..b99b76a1e0ad 100644 --- a/include/envoy/server/admin.h +++ b/include/envoy/server/admin.h @@ -60,7 +60,7 @@ class AdminStream { /** * This macro is used to add handlers to the Admin HTTP Endpoint. It builds * a callback that executes X when the specified admin handler is hit. This macro can be - * used to add static handlers as in source/server/http/admin.cc and also dynamic handlers as + * used to add static handlers as in source/server/admin/admin.cc and also dynamic handlers as * done in the RouteConfigProviderManagerImpl constructor in source/common/router/rds_impl.cc. */ #define MAKE_ADMIN_HANDLER(X) \ diff --git a/source/server/BUILD b/source/server/BUILD index ec35936335c7..42ef0a1b546b 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -429,7 +429,7 @@ envoy_cc_library( "//source/common/upstream:cluster_manager_lib", "//source/common/upstream:health_discovery_service_lib", "//source/server:overload_manager_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/source/server/http/BUILD b/source/server/admin/BUILD similarity index 100% rename from source/server/http/BUILD rename to source/server/admin/BUILD diff --git a/source/server/http/admin.cc b/source/server/admin/admin.cc similarity index 99% rename from source/server/http/admin.cc rename to source/server/admin/admin.cc index cfdcbdff6eb3..2d67d02a5dfb 100644 --- a/source/server/http/admin.cc +++ b/source/server/admin/admin.cc @@ -1,4 +1,4 @@ -#include "server/http/admin.h" +#include "server/admin/admin.h" #include #include @@ -46,7 +46,7 @@ #include "common/router/config_impl.h" #include "common/upstream/host_utility.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" #include "extensions/access_loggers/file/file_access_log_impl.h" diff --git a/source/server/http/admin.h b/source/server/admin/admin.h similarity index 98% rename from source/server/http/admin.h rename to source/server/admin/admin.h index 8d772d59b87d..d4629211a244 100644 --- a/source/server/http/admin.h +++ b/source/server/admin/admin.h @@ -36,13 +36,13 @@ #include "common/router/scoped_config_impl.h" #include "common/stats/isolated_store_impl.h" -#include "server/http/admin_filter.h" -#include "server/http/config_tracker_impl.h" -#include "server/http/listeners_handler.h" -#include "server/http/logs_handler.h" -#include "server/http/profiling_handler.h" -#include "server/http/runtime_handler.h" -#include "server/http/stats_handler.h" +#include "server/admin/admin_filter.h" +#include "server/admin/config_tracker_impl.h" +#include "server/admin/listeners_handler.h" +#include "server/admin/logs_handler.h" +#include "server/admin/profiling_handler.h" +#include "server/admin/runtime_handler.h" +#include "server/admin/stats_handler.h" #include "extensions/filters/http/common/pass_through_filter.h" diff --git a/source/server/http/admin_filter.cc b/source/server/admin/admin_filter.cc similarity index 97% rename from source/server/http/admin_filter.cc rename to source/server/admin/admin_filter.cc index b0565f673c5e..154698fa79bf 100644 --- a/source/server/http/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -1,6 +1,6 @@ -#include "server/http/admin_filter.h" +#include "server/admin/admin_filter.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/admin_filter.h b/source/server/admin/admin_filter.h similarity index 100% rename from source/server/http/admin_filter.h rename to source/server/admin/admin_filter.h diff --git a/source/server/http/config_tracker_impl.cc b/source/server/admin/config_tracker_impl.cc similarity index 94% rename from source/server/http/config_tracker_impl.cc rename to source/server/admin/config_tracker_impl.cc index 252bd24a0f55..da1bc875a6ee 100644 --- a/source/server/http/config_tracker_impl.cc +++ b/source/server/admin/config_tracker_impl.cc @@ -1,4 +1,4 @@ -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" namespace Envoy { namespace Server { diff --git a/source/server/http/config_tracker_impl.h b/source/server/admin/config_tracker_impl.h similarity index 100% rename from source/server/http/config_tracker_impl.h rename to source/server/admin/config_tracker_impl.h diff --git a/source/server/http/handler_ctx.h b/source/server/admin/handler_ctx.h similarity index 100% rename from source/server/http/handler_ctx.h rename to source/server/admin/handler_ctx.h diff --git a/source/server/http/listeners_handler.cc b/source/server/admin/listeners_handler.cc similarity index 97% rename from source/server/http/listeners_handler.cc rename to source/server/admin/listeners_handler.cc index 7751aa78cf82..3d813ad4b4c8 100644 --- a/source/server/http/listeners_handler.cc +++ b/source/server/admin/listeners_handler.cc @@ -1,4 +1,4 @@ -#include "server/http/listeners_handler.h" +#include "server/admin/listeners_handler.h" #include "envoy/admin/v3/listeners.pb.h" @@ -6,7 +6,7 @@ #include "common/http/utility.h" #include "common/network/utility.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/listeners_handler.h b/source/server/admin/listeners_handler.h similarity index 96% rename from source/server/http/listeners_handler.h rename to source/server/admin/listeners_handler.h index b16dada0eae7..bf48f86419e5 100644 --- a/source/server/http/listeners_handler.h +++ b/source/server/admin/listeners_handler.h @@ -6,7 +6,7 @@ #include "envoy/server/admin.h" #include "envoy/server/instance.h" -#include "server/http/handler_ctx.h" +#include "server/admin/handler_ctx.h" #include "absl/strings/string_view.h" diff --git a/source/server/http/logs_handler.cc b/source/server/admin/logs_handler.cc similarity index 97% rename from source/server/http/logs_handler.cc rename to source/server/admin/logs_handler.cc index e5de4302e082..57b0fbdfca2f 100644 --- a/source/server/http/logs_handler.cc +++ b/source/server/admin/logs_handler.cc @@ -1,10 +1,10 @@ -#include "server/http/logs_handler.h" +#include "server/admin/logs_handler.h" #include #include "common/common/logger.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/logs_handler.h b/source/server/admin/logs_handler.h similarity index 96% rename from source/server/http/logs_handler.h rename to source/server/admin/logs_handler.h index 60002e484993..1eea995d88ba 100644 --- a/source/server/http/logs_handler.h +++ b/source/server/admin/logs_handler.h @@ -6,7 +6,7 @@ #include "envoy/server/admin.h" #include "envoy/server/instance.h" -#include "server/http/handler_ctx.h" +#include "server/admin/handler_ctx.h" #include "absl/strings/string_view.h" diff --git a/source/server/http/profiling_handler.cc b/source/server/admin/profiling_handler.cc similarity index 97% rename from source/server/http/profiling_handler.cc rename to source/server/admin/profiling_handler.cc index 76c31a2764e4..121daeb9976b 100644 --- a/source/server/http/profiling_handler.cc +++ b/source/server/admin/profiling_handler.cc @@ -1,8 +1,8 @@ -#include "server/http/profiling_handler.h" +#include "server/admin/profiling_handler.h" #include "common/profiler/profiler.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/profiling_handler.h b/source/server/admin/profiling_handler.h similarity index 100% rename from source/server/http/profiling_handler.h rename to source/server/admin/profiling_handler.h diff --git a/source/server/http/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc similarity index 99% rename from source/server/http/prometheus_stats.cc rename to source/server/admin/prometheus_stats.cc index e04edeccf9cf..8ca8fcd0fc14 100644 --- a/source/server/http/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -1,4 +1,4 @@ -#include "server/http/prometheus_stats.h" +#include "server/admin/prometheus_stats.h" #include "common/common/empty_string.h" #include "common/stats/histogram_impl.h" diff --git a/source/server/http/prometheus_stats.h b/source/server/admin/prometheus_stats.h similarity index 100% rename from source/server/http/prometheus_stats.h rename to source/server/admin/prometheus_stats.h diff --git a/source/server/http/runtime_handler.cc b/source/server/admin/runtime_handler.cc similarity index 98% rename from source/server/http/runtime_handler.cc rename to source/server/admin/runtime_handler.cc index 7f7f58f20d05..1b6f9051673c 100644 --- a/source/server/http/runtime_handler.cc +++ b/source/server/admin/runtime_handler.cc @@ -1,4 +1,4 @@ -#include "server/http/runtime_handler.h" +#include "server/admin/runtime_handler.h" #include #include @@ -8,7 +8,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" -#include "server/http/utils.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/runtime_handler.h b/source/server/admin/runtime_handler.h similarity index 95% rename from source/server/http/runtime_handler.h rename to source/server/admin/runtime_handler.h index 0afc3250a2fe..d0b25d7a8297 100644 --- a/source/server/http/runtime_handler.h +++ b/source/server/admin/runtime_handler.h @@ -6,7 +6,7 @@ #include "envoy/server/admin.h" #include "envoy/server/instance.h" -#include "server/http/handler_ctx.h" +#include "server/admin/handler_ctx.h" #include "absl/strings/string_view.h" diff --git a/source/server/http/stats_handler.cc b/source/server/admin/stats_handler.cc similarity index 98% rename from source/server/http/stats_handler.cc rename to source/server/admin/stats_handler.cc index c792c61cfd63..08fc8a965f8c 100644 --- a/source/server/http/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -1,12 +1,12 @@ -#include "server/http/stats_handler.h" +#include "server/admin/stats_handler.h" #include "common/common/empty_string.h" #include "common/html/utility.h" #include "common/http/headers.h" #include "common/http/utility.h" -#include "server/http/prometheus_stats.h" -#include "server/http/utils.h" +#include "server/admin/prometheus_stats.h" +#include "server/admin/utils.h" namespace Envoy { namespace Server { diff --git a/source/server/http/stats_handler.h b/source/server/admin/stats_handler.h similarity index 98% rename from source/server/http/stats_handler.h rename to source/server/admin/stats_handler.h index 104a06fc6b9e..ff166f80bfb1 100644 --- a/source/server/http/stats_handler.h +++ b/source/server/admin/stats_handler.h @@ -11,7 +11,7 @@ #include "common/stats/histogram_impl.h" -#include "server/http/handler_ctx.h" +#include "server/admin/handler_ctx.h" #include "absl/strings/string_view.h" diff --git a/source/server/http/utils.cc b/source/server/admin/utils.cc similarity index 98% rename from source/server/http/utils.cc rename to source/server/admin/utils.cc index cd564784a566..3c2442bb2894 100644 --- a/source/server/http/utils.cc +++ b/source/server/admin/utils.cc @@ -1,4 +1,4 @@ -#include "server/http/utils.h" +#include "server/admin/utils.h" #include "common/common/enum_to_int.h" #include "common/http/headers.h" diff --git a/source/server/http/utils.h b/source/server/admin/utils.h similarity index 100% rename from source/server/http/utils.h rename to source/server/admin/utils.h diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index 9a1151993329..f3295a3f5f60 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -11,7 +11,7 @@ envoy_cc_library( deps = [ "//include/envoy/server:admin_interface", "//source/common/common:assert_lib", - "//source/server/http:config_tracker_lib", + "//source/server/admin:config_tracker_lib", ], ) @@ -108,7 +108,7 @@ envoy_cc_library( "//source/common/thread_local:thread_local_lib", "//source/server:configuration_lib", "//source/server:server_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", diff --git a/source/server/config_validation/admin.h b/source/server/config_validation/admin.h index 521aaf838b94..9eabd7f40b9f 100644 --- a/source/server/config_validation/admin.h +++ b/source/server/config_validation/admin.h @@ -4,7 +4,7 @@ #include "common/common/assert.h" -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" namespace Envoy { namespace Server { diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 2cfb3d673dd1..70bb29bf180b 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -20,11 +20,11 @@ #include "common/secret/secret_manager_impl.h" #include "common/thread_local/thread_local_impl.h" +#include "server/admin/admin.h" #include "server/config_validation/admin.h" #include "server/config_validation/api.h" #include "server/config_validation/cluster_manager.h" #include "server/config_validation/dns.h" -#include "server/http/admin.h" #include "server/listener_manager_impl.h" #include "server/server.h" diff --git a/source/server/server.cc b/source/server/server.cc index 3c32e180a5eb..d01cc5387956 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -41,10 +41,10 @@ #include "common/stats/timespan_impl.h" #include "common/upstream/cluster_manager_impl.h" +#include "server/admin/utils.h" #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" #include "server/guarddog_impl.h" -#include "server/http/utils.h" #include "server/listener_hooks.h" #include "server/ssl_context_manager.h" diff --git a/source/server/server.h b/source/server/server.h index f10db7332af3..56e0eb188616 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -35,8 +35,8 @@ #include "common/secret/secret_manager_impl.h" #include "common/upstream/health_discovery_service.h" +#include "server/admin/admin.h" #include "server/configuration_impl.h" -#include "server/http/admin.h" #include "server/listener_hooks.h" #include "server/listener_manager_impl.h" #include "server/overload_manager_impl.h" diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 500ec54046a1..3d1264b07fec 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -69,7 +69,7 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/json:json_loader_lib", "//source/common/router:rds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/server:server_mocks", @@ -113,7 +113,7 @@ envoy_cc_test( "//source/common/http:message_lib", "//source/common/json:json_loader_lib", "//source/common/router:scoped_rds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", @@ -138,7 +138,7 @@ envoy_cc_test( "//source/common/protobuf", "//source/common/router:rds_lib", "//source/common/router:vhds_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/server:server_mocks", diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index ad211cedc787..07825cf9636c 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -13,7 +13,7 @@ #include "common/json/json_loader.h" #include "common/router/rds_impl.h" -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" diff --git a/test/common/router/vhds_test.cc b/test/common/router/vhds_test.cc index 97cf20b50bf9..4048a86be306 100644 --- a/test/common/router/vhds_test.cc +++ b/test/common/router/vhds_test.cc @@ -11,7 +11,7 @@ #include "common/protobuf/protobuf.h" #include "common/router/rds_impl.h" -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/config/mocks.h" #include "test/mocks/init/mocks.h" diff --git a/test/server/http/BUILD b/test/server/admin/BUILD similarity index 90% rename from test/server/http/BUILD rename to test/server/admin/BUILD index d5dd5f83bda0..2d56fae7cd01 100644 --- a/test/server/http/BUILD +++ b/test/server/admin/BUILD @@ -14,7 +14,7 @@ envoy_cc_test_library( srcs = ["admin_instance.cc"], hdrs = ["admin_instance.h"], deps = [ - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", @@ -36,7 +36,7 @@ envoy_cc_test( "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", "//source/extensions/transport_sockets/tls:context_config_lib", - "//source/server/http:admin_lib", + "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", @@ -54,7 +54,7 @@ envoy_cc_test( name = "admin_filter_test", srcs = ["admin_filter_test.cc"], deps = [ - "//source/server/http:admin_filter_lib", + "//source/server/admin:admin_filter_lib", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", ], @@ -66,7 +66,7 @@ envoy_cc_test( deps = [ ":admin_instance_lib", "//source/common/stats:thread_local_store_lib", - "//source/server/http:stats_handler_lib", + "//source/server/admin:stats_handler_lib", "//test/test_common:logging_lib", "//test/test_common:utility_lib", ], @@ -82,7 +82,7 @@ envoy_cc_test( name = "prometheus_stats_test", srcs = ["prometheus_stats_test.cc"], deps = [ - "//source/server/http:prometheus_stats_lib", + "//source/server/admin:prometheus_stats_lib", "//test/test_common:utility_lib", ], ) @@ -108,7 +108,7 @@ envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], deps = [ - "//source/server/http:config_tracker_lib", + "//source/server/admin:config_tracker_lib", "//test/mocks:common_lib", ], ) diff --git a/test/server/http/admin_filter_test.cc b/test/server/admin/admin_filter_test.cc similarity index 98% rename from test/server/http/admin_filter_test.cc rename to test/server/admin/admin_filter_test.cc index 7dad3e63d3f4..07b5f9b6c98c 100644 --- a/test/server/http/admin_filter_test.cc +++ b/test/server/admin/admin_filter_test.cc @@ -1,4 +1,4 @@ -#include "server/http/admin_filter.h" +#include "server/admin/admin_filter.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" diff --git a/test/server/http/admin_instance.cc b/test/server/admin/admin_instance.cc similarity index 98% rename from test/server/http/admin_instance.cc rename to test/server/admin/admin_instance.cc index 279e5b1d590c..a9126957c7c2 100644 --- a/test/server/http/admin_instance.cc +++ b/test/server/admin/admin_instance.cc @@ -1,4 +1,4 @@ -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" namespace Envoy { namespace Server { diff --git a/test/server/http/admin_instance.h b/test/server/admin/admin_instance.h similarity index 97% rename from test/server/http/admin_instance.h rename to test/server/admin/admin_instance.h index aaec8f7a98b8..b6231ee856a9 100644 --- a/test/server/http/admin_instance.h +++ b/test/server/admin/admin_instance.h @@ -1,6 +1,6 @@ #pragma once -#include "server/http/admin.h" +#include "server/admin/admin.h" #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" diff --git a/test/server/http/admin_test.cc b/test/server/admin/admin_test.cc similarity index 99% rename from test/server/http/admin_test.cc rename to test/server/admin/admin_test.cc index 5be06a9cb682..4d36bb6fe6de 100644 --- a/test/server/http/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -19,7 +19,7 @@ #include "extensions/transport_sockets/tls/context_config_impl.h" -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" diff --git a/test/server/http/config_tracker_impl_test.cc b/test/server/admin/config_tracker_impl_test.cc similarity index 98% rename from test/server/http/config_tracker_impl_test.cc rename to test/server/admin/config_tracker_impl_test.cc index 2fcd777fca55..9388c2e2ef11 100644 --- a/test/server/http/config_tracker_impl_test.cc +++ b/test/server/admin/config_tracker_impl_test.cc @@ -1,4 +1,4 @@ -#include "server/http/config_tracker_impl.h" +#include "server/admin/config_tracker_impl.h" #include "test/mocks/common.h" diff --git a/test/server/http/logs_handler_test.cc b/test/server/admin/logs_handler_test.cc similarity index 93% rename from test/server/http/logs_handler_test.cc rename to test/server/admin/logs_handler_test.cc index 99be88296e22..5f6a8aa3724c 100644 --- a/test/server/http/logs_handler_test.cc +++ b/test/server/admin/logs_handler_test.cc @@ -1,4 +1,4 @@ -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" namespace Envoy { namespace Server { diff --git a/test/server/http/profiling_handler_test.cc b/test/server/admin/profiling_handler_test.cc similarity index 98% rename from test/server/http/profiling_handler_test.cc rename to test/server/admin/profiling_handler_test.cc index 949f1c2c9368..721fd5dc1e68 100644 --- a/test/server/http/profiling_handler_test.cc +++ b/test/server/admin/profiling_handler_test.cc @@ -1,6 +1,6 @@ #include "common/profiler/profiler.h" -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" namespace Envoy { diff --git a/test/server/http/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc similarity index 99% rename from test/server/http/prometheus_stats_test.cc rename to test/server/admin/prometheus_stats_test.cc index fb6f16b958a9..7994da560249 100644 --- a/test/server/http/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -1,6 +1,6 @@ #include -#include "server/http/prometheus_stats.h" +#include "server/admin/prometheus_stats.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/utility.h" diff --git a/test/server/http/runtime_handler_test.cc b/test/server/admin/runtime_handler_test.cc similarity index 98% rename from test/server/http/runtime_handler_test.cc rename to test/server/admin/runtime_handler_test.cc index c9a7e7b2937e..6ac7a40b4a87 100644 --- a/test/server/http/runtime_handler_test.cc +++ b/test/server/admin/runtime_handler_test.cc @@ -1,4 +1,4 @@ -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" namespace Envoy { namespace Server { diff --git a/test/server/http/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc similarity index 99% rename from test/server/http/stats_handler_test.cc rename to test/server/admin/stats_handler_test.cc index 6ce1b0cfae48..8b282e6557bf 100644 --- a/test/server/http/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -2,9 +2,9 @@ #include "common/stats/thread_local_store.h" -#include "server/http/stats_handler.h" +#include "server/admin/stats_handler.h" -#include "test/server/http/admin_instance.h" +#include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index d54d2a36abc4..af5e092643bd 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -78,10 +78,10 @@ "./source/common/stats/tag_extractor_impl.cc", "./source/common/access_log/access_log_formatter.cc", "./source/extensions/filters/http/squash/squash_filter.h", - "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/utils.h", - "./source/server/http/utils.cc", "./source/server/http/stats_handler.h", - "./source/server/http/stats_handler.cc", "./source/server/http/prometheus_stats.h", - "./source/server/http/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", + "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/admin/utils.h", + "./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h", + "./source/server/admin/stats_handler.cc", "./source/server/admin/prometheus_stats.h", + "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") # Only one C++ file should instantiate grpc_init From 3b097b43bd294930716271e9859eeaff0e2c72f0 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Fri, 15 May 2020 09:15:45 -0700 Subject: [PATCH 178/909] compression: move zlib base to extensions tree (#11171) move zlib base to extensions tree Signed-off-by: Jose Nino --- source/common/common/BUILD | 10 ---------- .../extensions/compression/gzip/common/BUILD | 19 +++++++++++++++++++ .../compression/gzip/common}/base.cc | 2 +- .../compression/gzip/common}/base.h | 0 .../compression/gzip/compressor/BUILD | 2 +- .../gzip/compressor/zlib_compressor_impl.h | 2 +- .../compression/gzip/decompressor/BUILD | 2 +- .../decompressor/zlib_decompressor_impl.h | 3 ++- 8 files changed, 25 insertions(+), 15 deletions(-) create mode 100644 source/extensions/compression/gzip/common/BUILD rename source/{common/common/zlib => extensions/compression/gzip/common}/base.cc (92%) rename source/{common/common/zlib => extensions/compression/gzip/common}/base.h (100%) diff --git a/source/common/common/BUILD b/source/common/common/BUILD index d0b4346d6aa9..f9ee77a45a7b 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -427,16 +427,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "zlib_base_lib", - srcs = ["zlib/base.cc"], - hdrs = ["zlib/base.h"], - external_deps = ["zlib"], - deps = [ - "//source/common/buffer:buffer_lib", - ], -) - envoy_cc_library( name = "statusor_lib", hdrs = ["statusor.h"], diff --git a/source/extensions/compression/gzip/common/BUILD b/source/extensions/compression/gzip/common/BUILD new file mode 100644 index 000000000000..b2393354384f --- /dev/null +++ b/source/extensions/compression/gzip/common/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "zlib_base_lib", + srcs = ["base.cc"], + hdrs = ["base.h"], + external_deps = ["zlib"], + deps = [ + "//source/common/buffer:buffer_lib", + ], +) diff --git a/source/common/common/zlib/base.cc b/source/extensions/compression/gzip/common/base.cc similarity index 92% rename from source/common/common/zlib/base.cc rename to source/extensions/compression/gzip/common/base.cc index 5336f35f8735..b3843c1aec1f 100644 --- a/source/common/common/zlib/base.cc +++ b/source/extensions/compression/gzip/common/base.cc @@ -1,4 +1,4 @@ -#include "common/common/zlib/base.h" +#include "extensions/compression/gzip/common/base.h" namespace Envoy { namespace Zlib { diff --git a/source/common/common/zlib/base.h b/source/extensions/compression/gzip/common/base.h similarity index 100% rename from source/common/common/zlib/base.h rename to source/extensions/compression/gzip/common/base.h diff --git a/source/extensions/compression/gzip/compressor/BUILD b/source/extensions/compression/gzip/compressor/BUILD index f7c1d5f51b1d..20df9161513d 100644 --- a/source/extensions/compression/gzip/compressor/BUILD +++ b/source/extensions/compression/gzip/compressor/BUILD @@ -18,7 +18,7 @@ envoy_cc_library( "//include/envoy/compression/compressor:compressor_interface", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", - "//source/common/common:zlib_base_lib", + "//source/extensions/compression/gzip/common:zlib_base_lib", ], ) diff --git a/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h index 4b2956688ef4..deddf3aac37a 100644 --- a/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h +++ b/source/extensions/compression/gzip/compressor/zlib_compressor_impl.h @@ -2,7 +2,7 @@ #include "envoy/compression/compressor/compressor.h" -#include "common/common/zlib/base.h" +#include "extensions/compression/gzip/common/base.h" #include "zlib.h" diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD index bfb693b8ac64..220c40f5c5cc 100644 --- a/source/extensions/compression/gzip/decompressor/BUILD +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -19,7 +19,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", - "//source/common/common:zlib_base_lib", + "//source/extensions/compression/gzip/common:zlib_base_lib", ], ) diff --git a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h index 77c49e83d56d..a4f27adb5658 100644 --- a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h @@ -3,7 +3,8 @@ #include "envoy/compression/decompressor/decompressor.h" #include "common/common/logger.h" -#include "common/common/zlib/base.h" + +#include "extensions/compression/gzip/common/base.h" #include "zlib.h" From 386e00bab075f60c19cefd716a02745a84e07c68 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 15 May 2020 12:19:01 -0400 Subject: [PATCH 179/909] =?UTF-8?q?http:=20fixing=20a=20content=20length?= =?UTF-8?q?=20bug=20in=20the=20grpc=5Fhttp1=5Freverse=5Fbridge=20fi?= =?UTF-8?q?=E2=80=A6=20(#11166)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + .../http/grpc_http1_reverse_bridge/filter.cc | 4 +- .../reverse_bridge_test.cc | 48 +++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7f55307b1da0..5fe2d231c030 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -24,6 +24,7 @@ Changes * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`stripping port from host header ` support. * http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. +* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index d585f84ea0c2..b6d0926b6fbf 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -57,7 +57,9 @@ void adjustContentLength(Http::RequestOrResponseHeaderMap& headers, if (length_header != nullptr) { uint64_t length; if (absl::SimpleAtoi(length_header->value().getStringView(), &length)) { - headers.setContentLength(adjustment(length)); + if (length != 0) { + headers.setContentLength(adjustment(length)); + } } } } diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index d264d37ff06d..f78a81d6a3a5 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -379,6 +379,54 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoContentLength) { EXPECT_EQ(12, frames[0].length_); } } + +// Regression tests that header-only responses do not get the content-length +// adjusted (https://github.com/envoyproxy/envoy/issues/11099) +TEST_F(ReverseBridgeTest, GrpcRequestHeaderOnlyResponse) { + initialize(); + decoder_callbacks_.is_grpc_request_ = true; + + { + EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr)); + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestRequestHeaderMapImpl headers({{"content-type", "application/grpc"}, + {"content-length", "25"}, + {":path", "/testing.ExampleService/SendData"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + } + + { + // We should remove the first five bytes. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("fgh", buffer.toString()); + } + + { + // Subsequent calls to decodeData should do nothing. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("abcdefgh", buffer.toString()); + } + + { + Http::TestRequestTrailerMapImpl trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); + } + + Http::TestResponseHeaderMapImpl headers( + {{":status", "200"}, {"content-length", "0"}, {"content-type", "application/x-protobuf"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, true)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "0")); +} + // Tests that a gRPC is downgraded to application/x-protobuf and upgraded back // to gRPC, and that the upstream 400 is converted into an internal (13) // grpc-status. From ca2b0c79a311de255e8ff9bf8e6cc3976175ef57 Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Fri, 15 May 2020 11:26:21 -0500 Subject: [PATCH 180/909] Windows fixes for tests/common/runtime (#11201) - identify a known invalid path prefix - correctly create symlinks on windows (bash can't) Co-authored-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia --- test/common/runtime/BUILD | 5 ----- test/common/runtime/filesystem_setup.sh | 7 +++++-- test/common/runtime/runtime_impl_test.cc | 7 ++++--- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 1df1476d4149..c2cd9d5be4be 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -29,8 +29,6 @@ envoy_cc_test_library( envoy_cc_test( name = "runtime_protos_test", srcs = ["runtime_protos_test.cc"], - # Pass for the time being, test times out on windows - tags = ["fails_on_windows"], deps = [ "//source/common/runtime:runtime_lib", "//test/mocks/runtime:runtime_mocks", @@ -44,9 +42,6 @@ envoy_cc_test( name = "runtime_impl_test", srcs = ["runtime_impl_test.cc"], data = glob(["test_data/**"]) + ["filesystem_setup.sh"], - # Inexplicable failure promoting arguments to mock, see - # https://envoyproxy.slack.com/archives/CNAK09BSB/p1571946165007300 - tags = ["fails_on_windows"], deps = [ "//source/common/config:runtime_utility_lib", "//source/common/runtime:runtime_lib", diff --git a/test/common/runtime/filesystem_setup.sh b/test/common/runtime/filesystem_setup.sh index 39684619067a..b66941acdc70 100755 --- a/test/common/runtime/filesystem_setup.sh +++ b/test/common/runtime/filesystem_setup.sh @@ -10,8 +10,6 @@ rm -rf "${TEST_TMPDIR}/${TEST_DATA}" mkdir -p "${TEST_TMPDIR}/${TEST_DATA}" cp -RfL "${TEST_DATA}"/* "${TEST_TMPDIR}/${TEST_DATA}" chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" -ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root" "${TEST_TMPDIR}/${TEST_DATA}/current" -ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir" "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink" # Deliberate symlink of doom. LOOP_PATH="${TEST_TMPDIR}/${TEST_DATA}/loop" @@ -20,8 +18,13 @@ mkdir -p "${LOOP_PATH}" # the ln in MSYS2 doesn't handle recursive symlinks correctly, # so use the cmd built in mklink instead on Windows if [[ -z "${WINDIR}" ]]; then + ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root" "${TEST_TMPDIR}/${TEST_DATA}/current" + ln -sf "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/subdir" "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/badlink" ln -sf "${LOOP_PATH}" "${LOOP_PATH}"/loop else + win_test_root="$(echo $TEST_TMPDIR/$TEST_DATA | tr '/' '\\')" + cmd.exe /C "mklink /D ${win_test_root}\\current ${win_test_root}\\root" + cmd.exe /C "mklink /D ${win_test_root}\\root\\envoy\\badlink ${win_test_root}\\root\\envoy\\subdir" win_loop_path="$(echo $LOOP_PATH | tr '/' '\\')" cmd.exe /C "mklink /D ${win_loop_path}\\loop ${win_loop_path}" fi diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 4701f5412834..8092dde805df 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -809,10 +809,11 @@ class DiskLayerTest : public testing::Test { TEST_F(DiskLayerTest, IllegalPath) { #ifdef WIN32 - // no illegal paths on Windows at the moment - return; -#endif + EXPECT_THROW_WITH_MESSAGE(DiskLayer("test", R"EOF(\\.\)EOF", *api_), EnvoyException, + R"EOF(Invalid path: \\.\)EOF"); +#else EXPECT_THROW_WITH_MESSAGE(DiskLayer("test", "/dev", *api_), EnvoyException, "Invalid path: /dev"); +#endif } // Validate that we catch recursion that goes too deep in the runtime filesystem From 1e1bdcc693086f89bfcfe79173e98bc9ae97dde3 Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 15 May 2020 12:29:02 -0400 Subject: [PATCH 181/909] [fuzz] misc HCM fuzz bugs (#11169) Fixes two minor HCM fuzz bugs Signed-off-by: Asra Ali --- .../clusterfuzz-testcase-failed-dispatch | 294 ++++++++++++++++++ .../clusterfuzz-testcase-invalidhost | 108 +++++++ test/common/http/conn_manager_impl_fuzz.proto | 4 +- .../http/conn_manager_impl_fuzz_test.cc | 6 +- test/fuzz/utility.h | 6 +- 5 files changed, 412 insertions(+), 6 deletions(-) create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch new file mode 100644 index 000000000000..485481def130 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-failed-dispatch @@ -0,0 +1,294 @@ +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1634017305 + request { + trailers { + headers { + headers { + key: "&" + } + } + } + } + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "\'" + } + headers { + key: ":method" + value: "GOT" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + end_stream: true + } +} +actions { +} +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: "/" + value: "foo.com" + } + } + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + stream_action { + request { + throw_decoder_exception { + } + } + } +} +actions { + stream_action { + request { + data { + status: DATA_STOP_ITERATION_NO_BUFFER + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "\'" + } + } + } +} +actions { + stream_action { + stream_id: 1073741824 + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { + new_stream { + request_headers { + headers { + key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + } + } + } +} +actions { + stream_action { + request { + data { + decoder_filter_callback_action { + add_decoded_data { + size: 262144 + } + } + } + } + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: "&" + } + } + } +} +actions { + stream_action { + stream_id: 4294967295 + } +} +actions { + stream_action { + stream_id: 4 + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost new file mode 100644 index 000000000000..c6ab3140f0f2 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-invalidhost @@ -0,0 +1,108 @@ +actions { + stream_action { + response { + data: 2683 + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":scheme" + value: "t" + } + headers { + key: ":method" + value: "GET" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "\'" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKjKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKAKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKEKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKZKKKKKKKKKKKKKdKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK>KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK957191215689797641957=bar1" + } + } + end_stream: true + } +} +actions { +} +actions { + new_stream { + end_stream: true + status: HEADER_STOP_ALL_ITERATION_AND_WATERMARK + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + stream_action { + stream_id: 721420288 + } +} +actions { + stream_action { + stream_id: 1024 + } +} +actions { + stream_action { + request { + trailers { + status: TRAILER_STOP_ITERATION + } + } + } +} +actions { + new_stream { + end_stream: true + } +} +actions { + new_stream { + } +} +actions { +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} +actions { +} +actions { +} +actions { + new_stream { + } +} +actions { + new_stream { + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_fuzz.proto b/test/common/http/conn_manager_impl_fuzz.proto index 5cc690eb838a..58a7d8ba0d53 100644 --- a/test/common/http/conn_manager_impl_fuzz.proto +++ b/test/common/http/conn_manager_impl_fuzz.proto @@ -61,7 +61,9 @@ message RequestAction { DataAction data = 1; TrailerAction trailers = 2; google.protobuf.Empty continue_decoding = 3; - google.protobuf.Empty throw_decoder_exception = 4; + // Dispatch no longer throws, but rather returns an error status. + google.protobuf.Empty throw_decoder_exception = 4 [deprecated = true]; + google.protobuf.Empty return_decoder_error = 5; // TODO(htuch): Model and fuzz watermark events. } } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index de381823f7db..7e503275e15d 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -383,10 +383,12 @@ class FuzzStream { } break; } - case test::common::http::RequestAction::kThrowDecoderException: { + case test::common::http::RequestAction::kThrowDecoderException: + // Dispatch no longer throws, execute subsequent kReturnDecoderError case. + case test::common::http::RequestAction::kReturnDecoderError: { if (state == StreamState::PendingDataOrTrailers) { EXPECT_CALL(*config_.codec_, dispatch(_)) - .WillOnce(testing::Throw(CodecProtocolException("blah"))); + .WillOnce(testing::Return(codecProtocolError("blah"))); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); state = StreamState::Closed; diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index bb9f6020a0e1..b0a16930fbeb 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -51,9 +51,9 @@ inline std::string replaceInvalidCharacters(absl::string_view string) { inline std::string replaceInvalidHostCharacters(absl::string_view string) { std::string filtered; filtered.reserve(string.length()); - for (const uint8_t* c = reinterpret_cast(string.data()); *c; ++c) { - if (nghttp2_check_authority(c, 1)) { - filtered.push_back(*c); + for (const char& c : string) { + if (nghttp2_check_authority(reinterpret_cast(&c), 1)) { + filtered.push_back(c); } else { filtered.push_back('0'); } From 853ecaa2f1e7364c1db49dad6a91bfcb3bcc4d61 Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Fri, 15 May 2020 18:29:54 +0200 Subject: [PATCH 182/909] Config switch to turn on/off Quic processing (#9679) Extending quic listener configuration with runtime feature flag for enabling/disabling quic processing. Signed-off-by: Kateryna Nezdolii --- .../config/listener/v3/quic_config.proto | 8 +- .../config/listener/v4alpha/quic_config.proto | 8 +- .../config/listener/v3/quic_config.proto | 8 +- .../config/listener/v4alpha/quic_config.proto | 8 +- source/extensions/quic_listeners/quiche/BUILD | 1 + .../quiche/active_quic_listener.cc | 20 ++- .../quiche/active_quic_listener.h | 10 +- test/extensions/quic_listeners/quiche/BUILD | 3 + .../active_quic_listener_config_test.cc | 39 +++++ .../quiche/active_quic_listener_test.cc | 150 +++++++++++++++--- 10 files changed, 224 insertions(+), 31 deletions(-) diff --git a/api/envoy/config/listener/v3/quic_config.proto b/api/envoy/config/listener/v3/quic_config.proto index 9949da2e0d70..c024be95bace 100644 --- a/api/envoy/config/listener/v3/quic_config.proto +++ b/api/envoy/config/listener/v3/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v3.RuntimeFeatureFlag enabled = 4; } diff --git a/api/envoy/config/listener/v4alpha/quic_config.proto b/api/envoy/config/listener/v4alpha/quic_config.proto index 97866e4b6ed8..b2b1df1e374f 100644 --- a/api/envoy/config/listener/v4alpha/quic_config.proto +++ b/api/envoy/config/listener/v4alpha/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v4alpha; +import "envoy/config/core/v4alpha/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v4alpha.RuntimeFeatureFlag enabled = 4; } diff --git a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto index 9949da2e0d70..c024be95bace 100644 --- a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto +++ b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v3; +import "envoy/config/core/v3/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v3.RuntimeFeatureFlag enabled = 4; } diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto index 97866e4b6ed8..b2b1df1e374f 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.listener.v4alpha; +import "envoy/config/core/v4alpha/base.proto"; + import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -16,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. -// Next id: 4 +// Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.QuicProtocolOptions"; @@ -32,4 +34,8 @@ message QuicProtocolOptions { // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v4alpha.RuntimeFeatureFlag enabled = 4; } diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index dde1927d81ab..aa0be8877546 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -274,6 +274,7 @@ envoy_cc_library( "//include/envoy/network:listener_interface", "//source/common/network:listener_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_lib", "//source/server:connection_handler_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 8ab780021d62..30c65d443e8a 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -20,20 +20,22 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options) + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : ActiveQuicListener(dispatcher, parent, listener_config.listenSocketFactory().getListenSocket(), listener_config, - quic_config, std::move(options)) {} + quic_config, std::move(options), enabled) {} ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options) + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, &listener_config), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), - listen_socket_(*listen_socket) { + listen_socket_(*listen_socket), enabled_(enabled, Runtime::LoaderSingleton::get()) { if (options != nullptr) { const bool ok = Network::Socket::applyOptions( options, listen_socket_, envoy::config::core::v3::SocketOption::STATE_BOUND); @@ -44,7 +46,6 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, } listen_socket_.addOptions(options); } - udp_listener_ = dispatcher_.createUdpListener(std::move(listen_socket), *this); quic::QuicRandom* const random = quic::QuicRandom::GetInstance(); random->RandBytes(random_seed_, sizeof(random_seed_)); @@ -93,6 +94,10 @@ void ActiveQuicListener::onData(Network::UdpRecvData& data) { } void ActiveQuicListener::onReadReady() { + if (!enabled_.enabled()) { + ENVOY_LOG(trace, "Quic listener {}: runtime disabled", config_->name()); + return; + } quic_dispatcher_->ProcessBufferedChlos(kNumSessionsToCreatePerLoop); } @@ -112,7 +117,7 @@ void ActiveQuicListener::shutdownListener() { ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency) - : concurrency_(concurrency) { + : concurrency_(concurrency), enabled_(config.enabled()) { uint64_t idle_network_timeout_ms = config.has_idle_timeout() ? DurationUtil::durationToMilliseconds(config.idle_timeout()) : 300000; @@ -191,8 +196,9 @@ ActiveQuicListenerFactory::createActiveUdpListener(Network::ConnectionHandler& p #endif } #endif + return std::make_unique(disptacher, parent, config, quic_config_, - std::move(options)); + std::move(options), enabled_); } } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 6536731c199f..8d0d5c9dd46e 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -3,9 +3,11 @@ #include "envoy/config/listener/v3/quic_config.pb.h" #include "envoy/network/connection_handler.h" #include "envoy/network/listener.h" +#include "envoy/runtime/runtime.h" #include "common/network/socket_option_impl.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" #include "server/connection_handler_impl.h" @@ -25,12 +27,14 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options); + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled); ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options); + Network::Socket::OptionsSharedPtr options, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled); ~ActiveQuicListener() override; @@ -60,6 +64,7 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, quic::QuicVersionManager version_manager_; std::unique_ptr quic_dispatcher_; Network::Socket& listen_socket_; + Runtime::FeatureFlag enabled_; }; using ActiveQuicListenerPtr = std::unique_ptr; @@ -83,6 +88,7 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, quic::QuicConfig quic_config_; const uint32_t concurrency_; absl::once_flag install_bpf_once_; + envoy::config::core::v3::RuntimeFeatureFlag enabled_; }; } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 7e24810fe50b..43347b72524d 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -167,13 +167,16 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", "//source/server:configuration_lib", "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc index b93afa375ea3..d116f816b6ca 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_config_test.cc @@ -15,6 +15,10 @@ class ActiveQuicListenerFactoryPeer { static quic::QuicConfig& quicConfig(ActiveQuicListenerFactory& factory) { return factory.quic_config_; } + static envoy::config::core::v3::RuntimeFeatureFlag& + runtimeEnabled(ActiveQuicListenerFactory& factory) { + return factory.enabled_; + } }; TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { @@ -29,6 +33,9 @@ TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { idle_timeout: { seconds: 2 } + enabled: + default_value: true + runtime_key: foo_key )EOF"; TestUtility::loadFromYaml(yaml, *config); Network::ActiveUdpListenerFactoryPtr listener_factory = @@ -41,6 +48,38 @@ TEST(ActiveQuicListenerConfigTest, CreateActiveQuicListenerFactory) { EXPECT_EQ(2000u, quic_config.IdleNetworkTimeout().ToMilliseconds()); // Default value if not present in config. EXPECT_EQ(20000u, quic_config.max_time_before_crypto_handshake().ToMilliseconds()); + envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled = + ActiveQuicListenerFactoryPeer::runtimeEnabled( + dynamic_cast(*listener_factory)); + EXPECT_EQ(true, runtime_enabled.default_value().value()); + EXPECT_EQ("foo_key", runtime_enabled.runtime_key()); +} + +TEST(ActiveQuicListenerConfigTest, QuicListenerFlagNotConfigured) { + std::string listener_name = QuicListenerName; + auto& config_factory = + Config::Utility::getAndCheckFactoryByName( + listener_name); + ProtobufTypes::MessagePtr config = config_factory.createEmptyConfigProto(); + + std::string yaml = R"EOF( + max_concurrent_streams: 10 + idle_timeout: { + seconds: 2 + } + )EOF"; + TestUtility::loadFromYaml(yaml, *config); + Network::ActiveUdpListenerFactoryPtr listener_factory = + config_factory.createActiveUdpListenerFactory(*config, /*concurrency=*/1); + EXPECT_NE(nullptr, listener_factory); + envoy::config::core::v3::RuntimeFeatureFlag& runtime_enabled = + ActiveQuicListenerFactoryPeer::runtimeEnabled( + dynamic_cast(*listener_factory)); + auto& quic_config = + dynamic_cast(*config); + EXPECT_FALSE(runtime_enabled.has_default_value()); + EXPECT_FALSE(quic_config.has_enabled()); + EXPECT_EQ("", runtime_enabled.runtime_key()); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index a9850565fa0f..c3836ac38163 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -8,6 +8,11 @@ #include +#include "common/runtime/runtime_impl.h" + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/core/v3/base.pb.validate.h" + #include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" @@ -23,11 +28,14 @@ #include "test/test_common/simulated_time_system.h" #include "test/test_common/environment.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/runtime/mocks.h" #include "test/test_common/utility.h" #include "test/test_common/network_utility.h" #include "absl/time/time.h" #include "gtest/gtest.h" #include "gmock/gmock.h" +#include "extensions/quic_listeners/quiche/active_quic_listener_config.h" #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" @@ -39,13 +47,23 @@ namespace Quic { class ActiveQuicListenerPeer { public: - static EnvoyQuicDispatcher* quic_dispatcher(ActiveQuicListener& listener) { + static EnvoyQuicDispatcher* quicDispatcher(ActiveQuicListener& listener) { return listener.quic_dispatcher_.get(); } - static quic::QuicCryptoServerConfig& crypto_config(ActiveQuicListener& listener) { + static quic::QuicCryptoServerConfig& cryptoConfig(ActiveQuicListener& listener) { return *listener.crypto_config_; } + + static bool enabled(ActiveQuicListener& listener) { return listener.enabled_.enabled(); } +}; + +class ActiveQuicListenerFactoryPeer { +public: + static envoy::config::core::v3::RuntimeFeatureFlag& + runtimeEnabled(ActiveQuicListenerFactory* factory) { + return factory->enabled_; + } }; class ActiveQuicListenerTest : public testing::TestWithParam { @@ -59,17 +77,44 @@ class ActiveQuicListenerTest : public testing::TestWithParam + std::unique_ptr staticUniquePointerCast(std::unique_ptr&& source) { + return std::unique_ptr{static_cast(source.release())}; + } + void SetUp() override { + envoy::config::bootstrap::v3::LayeredRuntime config; + config.add_layers()->mutable_admin_layer(); + loader_ = std::make_unique( + Runtime::LoaderPtr{new Runtime::LoaderImpl(*dispatcher_, tls_, config, local_info_, store_, + generator_, validation_visitor_, *api_)}); + listen_socket_ = std::make_shared(local_address_, nullptr, /*bind*/ true); listen_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); listen_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); - quic_listener_ = std::make_unique( - *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, nullptr); + ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); + ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_)); + + listener_factory_ = createQuicListenerFactory(yamlForQuicConfig()); + quic_listener_ = + staticUniquePointerCast(listener_factory_->createActiveUdpListener( + connection_handler_, *dispatcher_, listener_config_)); + quic_dispatcher_ = ActiveQuicListenerPeer::quicDispatcher(*quic_listener_); simulated_time_system_.advanceTimeWait(std::chrono::milliseconds(100)); } + Network::ActiveUdpListenerFactoryPtr createQuicListenerFactory(const std::string& yaml) { + std::string listener_name = QuicListenerName; + auto& config_factory = + Config::Utility::getAndCheckFactoryByName( + listener_name); + ProtobufTypes::MessagePtr config_proto = config_factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *config_proto); + return config_factory.createActiveUdpListenerFactory(*config_proto, /*concurrency=*/1); + } + void configureMocks(int connection_count) { EXPECT_CALL(listener_config_, filterChainManager()) .Times(connection_count) @@ -116,11 +161,11 @@ class ActiveQuicListenerTest : public testing::TestWithParam(local_address_, nullptr, /*bind*/ false)); quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( &clock_, quic::AllSupportedVersions()[0].transport_version, - &ActiveQuicListenerPeer::crypto_config(*quic_listener_)); + &ActiveQuicListenerPeer::cryptoConfig(*quic_listener_)); chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); quic::CryptoHandshakeMessage full_chlo; quic::QuicReferenceCountedPointer signed_config( @@ -128,7 +173,7 @@ class ActiveQuicListenerTest : public testing::TestWithParamonListenerShutdown(); // Trigger alarm to fire before listener destruction. dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + Runtime::LoaderSingleton::clear(); + } + +protected: + virtual std::string yamlForQuicConfig() { + return R"EOF( + enabled: + default_value: true + runtime_key: quic.enabled +)EOF"; } Network::Address::IpVersion version_; @@ -201,6 +256,18 @@ class ActiveQuicListenerTest : public testing::TestWithParam quic_listener_; + Network::ActiveUdpListenerFactoryPtr listener_factory_; + NiceMock socket_factory_; + EnvoyQuicDispatcher* quic_dispatcher_; + std::unique_ptr loader_; + + NiceMock tls_; + Stats::TestUtil::TestStore store_; + Runtime::MockRandomGenerator generator_; + Runtime::MockRandomGenerator rand_; + NiceMock local_info_; + Init::MockManager init_manager_; + NiceMock validation_visitor_; std::list> client_sockets_; std::list> read_filters_; @@ -221,30 +288,34 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { .WillOnce(Return(false)); auto options = std::make_shared>(); options->emplace_back(std::move(option)); - EXPECT_THROW_WITH_REGEX(std::make_unique(*dispatcher_, connection_handler_, - listen_socket_, listener_config_, - quic_config_, options), - EnvoyException, "Failed to apply socket options."); + EXPECT_THROW_WITH_REGEX( + std::make_unique( + *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, + options, + ActiveQuicListenerFactoryPeer::runtimeEnabled( + static_cast(listener_factory_.get()))), + EnvoyException, "Failed to apply socket options."); } TEST_P(ActiveQuicListenerTest, ReceiveFullQuicCHLO) { + quic::QuicBufferedPacketStore* const buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); configureMocks(/* connection_count = */ 1); - SendFullCHLO(quic::test::TestConnectionId(1)); + sendFullCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); ReadFromClientSockets(); } TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { - EnvoyQuicDispatcher* const envoy_quic_dispatcher = - ActiveQuicListenerPeer::quic_dispatcher(*quic_listener_); quic::QuicBufferedPacketStore* const buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(envoy_quic_dispatcher); - + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); configureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); // Generate one more CHLO than can be processed immediately. for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 1; ++i) { - SendFullCHLO(quic::test::TestConnectionId(i)); + sendFullCHLO(quic::test::TestConnectionId(i)); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -256,9 +327,10 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { EXPECT_TRUE(buffered_packets->HasBufferedPackets( quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 1))); EXPECT_TRUE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); // Generate more data to trigger a socket read during the next event loop. - SendFullCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); + sendFullCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // The socket read results in processing all CHLOs. @@ -270,5 +342,47 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { ReadFromClientSockets(); } +TEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) { + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " false"}}); + sendFullCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + // If listener was enabled, there should have been session created for active connection. + EXPECT_TRUE(quic_dispatcher_->session_map().empty()); + EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " true"}}); + configureMocks(/* connection_count = */ 1); + sendFullCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); +} + +class ActiveQuicListenerEmptyFlagConfigTest : public ActiveQuicListenerTest { +protected: + std::string yamlForQuicConfig() override { + return R"EOF( + max_concurrent_streams: 10 + )EOF"; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ActiveQuicListenerEmptyFlagConfigTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Quic listener should be enabled by default, if not enabled explicitly in config. +TEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) { + quic::QuicBufferedPacketStore* const buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); + configureMocks(/* connection_count = */ 1); + sendFullCHLO(quic::test::TestConnectionId(1)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(quic_dispatcher_->session_map().empty()); + EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); + ReadFromClientSockets(); +} + } // namespace Quic } // namespace Envoy From 00238028d51dd7e2a866e721a678dbc5cdbec996 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Fri, 15 May 2020 13:18:26 -0700 Subject: [PATCH 183/909] Move access_log_format out of FileAccessLogger to be shared (#11125) Signed-off-by: Wayne Zhang --- .../config/accesslog/v4alpha/accesslog.proto | 2 +- .../core/v3/substitution_format_string.proto | 61 ++++++++++++ .../v4alpha/substitution_format_string.proto | 65 +++++++++++++ .../extensions/access_loggers/file/v3/BUILD | 1 + .../access_loggers/file/v3/file.proto | 20 +++- .../access_loggers/file/v4alpha/BUILD | 13 +++ .../access_loggers/file/v4alpha/file.proto | 42 +++++++++ .../common_messages/common_messages.rst | 1 + .../observability/access_log/usage.rst | 2 + docs/root/version_history/current.rst | 2 + .../config/accesslog/v4alpha/accesslog.proto | 2 +- .../core/v3/substitution_format_string.proto | 61 ++++++++++++ .../v4alpha/substitution_format_string.proto | 65 +++++++++++++ .../extensions/access_loggers/file/v3/BUILD | 1 + .../access_loggers/file/v3/file.proto | 20 +++- .../access_loggers/file/v4alpha/BUILD | 13 +++ .../access_loggers/file/v4alpha/file.proto | 59 ++++++++++++ .../common/access_log/access_log_formatter.cc | 4 +- .../common/access_log/access_log_formatter.h | 3 +- source/common/common/BUILD | 12 +++ .../common/substitution_format_string.cc | 43 +++++++++ .../common/substitution_format_string.h | 31 ++++++ source/extensions/access_loggers/file/BUILD | 3 +- .../extensions/access_loggers/file/config.cc | 58 +++++------- .../extensions/access_loggers/file/config.h | 3 - .../access_log_formatter_speed_test.cc | 15 +-- .../access_log/access_log_formatter_test.cc | 28 +++--- test/common/common/BUILD | 12 +++ .../common/substitution_format_string_test.cc | 94 +++++++++++++++++++ test/common/tcp_proxy/tcp_proxy_test.cc | 4 +- test/config/utility.cc | 4 +- .../access_loggers/file/config_test.cc | 88 ++++++++++------- .../integration/tcp_proxy_integration_test.cc | 2 +- 33 files changed, 723 insertions(+), 111 deletions(-) create mode 100644 api/envoy/config/core/v3/substitution_format_string.proto create mode 100644 api/envoy/config/core/v4alpha/substitution_format_string.proto create mode 100644 api/envoy/extensions/access_loggers/file/v4alpha/BUILD create mode 100644 api/envoy/extensions/access_loggers/file/v4alpha/file.proto create mode 100644 generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto create mode 100644 generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto create mode 100644 generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto create mode 100644 source/common/common/substitution_format_string.cc create mode 100644 source/common/common/substitution_format_string.h create mode 100644 test/common/common/substitution_format_string_test.cc diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index c5eb4d2497e7..939d4df95889 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -43,7 +43,7 @@ message AccessLog { // configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog - // ` + // ` // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig diff --git a/api/envoy/config/core/v3/substitution_format_string.proto b/api/envoy/config/core/v3/substitution_format_string.proto new file mode 100644 index 000000000000..5fe6c08753df --- /dev/null +++ b/api/envoy/config/core/v3/substitution_format_string.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // typed_json_format: + // status: %RESPONSE_CODE% + // message: %RESP_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto new file mode 100644 index 000000000000..d998ca1fe835 --- /dev/null +++ b/api/envoy/config/core/v4alpha/substitution_format_string.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SubstitutionFormatString"; + + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // typed_json_format: + // status: %RESPONSE_CODE% + // message: %RESP_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/api/envoy/extensions/access_loggers/file/v3/BUILD b/api/envoy/extensions/access_loggers/file/v3/BUILD index db752e857c62..3edacd3aafea 100644 --- a/api/envoy/extensions/access_loggers/file/v3/BUILD +++ b/api/envoy/extensions/access_loggers/file/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/access_loggers/file/v3/file.proto b/api/envoy/extensions/access_loggers/file/v3/file.proto index f3c9c0a11612..de33623c207f 100644 --- a/api/envoy/extensions/access_loggers/file/v3/file.proto +++ b/api/envoy/extensions/access_loggers/file/v3/file.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.access_loggers.file.v3; +import "envoy/config/core/v3/substitution_format_string.proto"; + import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; @@ -19,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. +// [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.FileAccessLog"; @@ -30,16 +33,27 @@ message FileAccessLog { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. - string format = 2; + // This field is deprecated. + // Please use :ref:`log_format `. + string format = 2 [deprecated = true]; // Access log :ref:`format dictionary`. All values // are rendered as strings. - google.protobuf.Struct json_format = 3; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct json_format = 3 [deprecated = true]; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v3.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; } } diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/BUILD b/api/envoy/extensions/access_loggers/file/v4alpha/BUILD new file mode 100644 index 000000000000..ba8c3042328b --- /dev/null +++ b/api/envoy/extensions/access_loggers/file/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/access_loggers/file/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto new file mode 100644 index 000000000000..fa2ec9a50495 --- /dev/null +++ b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.file.v4alpha; + +import "envoy/config/core/v4alpha/substitution_format_string.proto"; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; +option java_outer_classname = "FileProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] + +// Custom configuration for an :ref:`AccessLog ` +// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* +// AccessLog. +// [#next-free-field: 6] +message FileAccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.access_loggers.file.v3.FileAccessLog"; + + reserved 2, 3, 4; + + reserved "format", "json_format", "typed_json_format"; + + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof access_log_format { + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v4alpha.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; + } +} diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index 0d88c7715a6a..59e40f63b7fb 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -17,5 +17,6 @@ Common messages ../config/core/v3/grpc_method_list.proto ../config/core/v3/http_uri.proto ../config/core/v3/socket_option.proto + ../config/core/v3/substitution_format_string.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 920e7619efff..11b5cf63e1c9 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -104,6 +104,8 @@ Format dictionaries have the following restrictions: When using the ``typed_json_format``, integer values that exceed :math:`2^{53}` will be represented with reduced precision as they must be converted to floating point numbers. +.. _config_access_log_command_operators: + Command Operators ----------------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5fe2d231c030..220ddade58a2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,7 @@ Changes * access loggers: added GRPC_STATUS operator on logging format. * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* access loggers: file access logger config added :ref:`log_format `. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * compressor: generic :ref:`compressor ` filter exposed to users. @@ -75,3 +76,4 @@ Deprecated the previous behavior, set allow_cross_scheme_redirect=true and use :ref:`safe_cross_scheme`, in :ref:`predicates `. +* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index c5eb4d2497e7..939d4df95889 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -43,7 +43,7 @@ message AccessLog { // configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog - // ` + // ` // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto new file mode 100644 index 000000000000..5fe6c08753df --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // typed_json_format: + // status: %RESPONSE_CODE% + // message: %RESP_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto new file mode 100644 index 000000000000..d998ca1fe835 --- /dev/null +++ b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; + +package envoy.config.core.v4alpha; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Substitution format string] + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +message SubstitutionFormatString { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.SubstitutionFormatString"; + + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // .. code-block:: + // + // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following plain text will be created: + // + // .. code-block:: + // + // upstream connect error:204:path=/foo + // + string text_format = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. code-block:: + // + // typed_json_format: + // status: %RESPONSE_CODE% + // message: %RESP_BODY% + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + } +} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD index db752e857c62..3edacd3aafea 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto index f3c9c0a11612..de33623c207f 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.access_loggers.file.v3; +import "envoy/config/core/v3/substitution_format_string.proto"; + import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; @@ -19,6 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. +// [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.FileAccessLog"; @@ -30,16 +33,27 @@ message FileAccessLog { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. - string format = 2; + // This field is deprecated. + // Please use :ref:`log_format `. + string format = 2 [deprecated = true]; // Access log :ref:`format dictionary`. All values // are rendered as strings. - google.protobuf.Struct json_format = 3; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct json_format = 3 [deprecated = true]; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v3.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; } } diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD new file mode 100644 index 000000000000..ba8c3042328b --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v4alpha:pkg", + "//envoy/extensions/access_loggers/file/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto new file mode 100644 index 000000000000..c2a2c753f5bb --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.file.v4alpha; + +import "envoy/config/core/v4alpha/substitution_format_string.proto"; + +import "google/protobuf/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; +option java_outer_classname = "FileProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: File access log] +// [#extension: envoy.access_loggers.file] + +// Custom configuration for an :ref:`AccessLog ` +// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* +// AccessLog. +// [#next-free-field: 6] +message FileAccessLog { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.access_loggers.file.v3.FileAccessLog"; + + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof access_log_format { + // Access log :ref:`format string`. + // Envoy supports :ref:`custom access log formats ` as well as a + // :ref:`default format `. + // This field is deprecated. + // Please use :ref:`log_format `. + string hidden_envoy_deprecated_format = 2 [deprecated = true]; + + // Access log :ref:`format dictionary`. All values + // are rendered as strings. + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct hidden_envoy_deprecated_json_format = 3 [deprecated = true]; + + // Access log :ref:`format dictionary`. Values are + // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may + // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the + // documentation for a specific command operator for details. + // This field is deprecated. + // Please use :ref:`log_format `. + google.protobuf.Struct hidden_envoy_deprecated_typed_json_format = 4 [deprecated = true]; + + // Configuration to form access log data and format. + // If not specified, use :ref:`default format `. + config.core.v4alpha.SubstitutionFormatString log_format = 5 + [(validate.rules).message = {required: true}]; + } +} diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index d1bfc981338c..4b6bd1891bf8 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -107,8 +107,8 @@ std::string FormatterImpl::format(const Http::RequestHeaderMap& request_headers, return log_line; } -JsonFormatterImpl::JsonFormatterImpl(std::unordered_map& format_mapping, - bool preserve_types) +JsonFormatterImpl::JsonFormatterImpl( + const absl::flat_hash_map& format_mapping, bool preserve_types) : preserve_types_(preserve_types) { for (const auto& pair : format_mapping) { json_output_format_.emplace(pair.first, AccessLogFormatParser::parse(pair.second)); diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index 408eb49b3eab..dd3654a2eb7e 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -12,6 +12,7 @@ #include "common/common/utility.h" +#include "absl/container/flat_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -101,7 +102,7 @@ class FormatterImpl : public Formatter { class JsonFormatterImpl : public Formatter { public: - JsonFormatterImpl(std::unordered_map& format_mapping, + JsonFormatterImpl(const absl::flat_hash_map& format_mapping, bool preserve_types); // Formatter::format diff --git a/source/common/common/BUILD b/source/common/common/BUILD index f9ee77a45a7b..087b4dec7cb2 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -67,6 +67,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "substitution_format_string_lib", + srcs = ["substitution_format_string.cc"], + hdrs = ["substitution_format_string.h"], + deps = [ + "//include/envoy/access_log:access_log_interface", + "//source/common/access_log:access_log_formatter_lib", + "//source/common/protobuf", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "compiler_requirements_lib", hdrs = ["compiler_requirements.h"], diff --git a/source/common/common/substitution_format_string.cc b/source/common/common/substitution_format_string.cc new file mode 100644 index 000000000000..19001acc18f8 --- /dev/null +++ b/source/common/common/substitution_format_string.cc @@ -0,0 +1,43 @@ +#include "common/common/substitution_format_string.h" + +#include "common/access_log/access_log_formatter.h" + +namespace Envoy { +namespace { + +absl::flat_hash_map +convertJsonFormatToMap(const ProtobufWkt::Struct& json_format) { + absl::flat_hash_map output; + for (const auto& pair : json_format.fields()) { + if (pair.second.kind_case() != ProtobufWkt::Value::kStringValue) { + throw EnvoyException("Only string values are supported in the JSON access log format."); + } + output.emplace(pair.first, pair.second.string_value()); + } + return output; +} + +} // namespace + +AccessLog::FormatterPtr +SubstitutionFormatStringUtils::createJsonFormatter(const ProtobufWkt::Struct& struct_format, + bool preserve_types) { + auto json_format_map = convertJsonFormatToMap(struct_format); + return std::make_unique(json_format_map, preserve_types); +} + +AccessLog::FormatterPtr SubstitutionFormatStringUtils::fromProtoConfig( + const envoy::config::core::v3::SubstitutionFormatString& config) { + switch (config.format_case()) { + case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kTextFormat: + return std::make_unique(config.text_format()); + case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat: { + return createJsonFormatter(config.json_format(), true); + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + return nullptr; +} + +} // namespace Envoy diff --git a/source/common/common/substitution_format_string.h b/source/common/common/substitution_format_string.h new file mode 100644 index 000000000000..77b0b3f1f091 --- /dev/null +++ b/source/common/common/substitution_format_string.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/config/core/v3/substitution_format_string.pb.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { + +/** + * Utilities for using envoy::config::core::v3::SubstitutionFormatString + */ +class SubstitutionFormatStringUtils { +public: + /** + * Generate a formatter object from config SubstitutionFormatString. + */ + static AccessLog::FormatterPtr + fromProtoConfig(const envoy::config::core::v3::SubstitutionFormatString& config); + + /** + * Generate a Json formatter object from proto::Struct config + */ + static AccessLog::FormatterPtr createJsonFormatter(const ProtobufWkt::Struct& struct_format, + bool preserve_types); +}; + +} // namespace Envoy diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 30d0af49e2fe..5d19c5f8e4db 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -29,8 +29,7 @@ envoy_cc_extension( deps = [ ":file_access_log_lib", "//include/envoy/registry", - "//include/envoy/server:access_log_config_interface", - "//source/common/access_log:access_log_formatter_lib", + "//source/common/common:substitution_format_string_lib", "//source/common/protobuf", "//source/extensions/access_loggers:well_known_names", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc index dca426886840..dd430c0c298e 100644 --- a/source/extensions/access_loggers/file/config.cc +++ b/source/extensions/access_loggers/file/config.cc @@ -10,6 +10,7 @@ #include "common/access_log/access_log_formatter.h" #include "common/common/logger.h" +#include "common/common/substitution_format_string.h" #include "common/protobuf/protobuf.h" #include "extensions/access_loggers/file/file_access_log_impl.h" @@ -29,30 +30,29 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, config, context.messageValidationVisitor()); AccessLog::FormatterPtr formatter; - if (fal_config.access_log_format_case() == envoy::extensions::access_loggers::file::v3:: - FileAccessLog::AccessLogFormatCase::kFormat || - fal_config.access_log_format_case() == - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - ACCESS_LOG_FORMAT_NOT_SET) { - if (fal_config.format().empty()) { - formatter = AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(); - } else { - formatter = std::make_unique(fal_config.format()); - } - } else if (fal_config.access_log_format_case() == - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kJsonFormat) { - auto json_format_map = this->convertJsonFormatToMap(fal_config.json_format()); - formatter = std::make_unique(json_format_map, false); - } else if (fal_config.access_log_format_case() == + if (fal_config.has_log_format()) { + formatter = SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); + } else if (fal_config.has_json_format()) { + formatter = SubstitutionFormatStringUtils::createJsonFormatter(fal_config.json_format(), false); + } else if (fal_config.access_log_format_case() != envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kTypedJsonFormat) { - auto json_format_map = this->convertJsonFormatToMap(fal_config.typed_json_format()); - formatter = std::make_unique(json_format_map, true); - } else { - throw EnvoyException( - "Invalid access_log format provided. Only 'format', 'json_format', or 'typed_json_format' " - "are supported."); + ACCESS_LOG_FORMAT_NOT_SET) { + envoy::config::core::v3::SubstitutionFormatString sff_config; + switch (fal_config.access_log_format_case()) { + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kFormat: + sff_config.set_text_format(fal_config.format()); + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: + kTypedJsonFormat: + *sff_config.mutable_json_format() = fal_config.typed_json_format(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + formatter = SubstitutionFormatStringUtils::fromProtoConfig(sff_config); + } + if (!formatter) { + formatter = AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(); } return std::make_shared(fal_config.path(), std::move(filter), std::move(formatter), @@ -66,18 +66,6 @@ ProtobufTypes::MessagePtr FileAccessLogFactory::createEmptyConfigProto() { std::string FileAccessLogFactory::name() const { return AccessLogNames::get().File; } -std::unordered_map -FileAccessLogFactory::convertJsonFormatToMap(ProtobufWkt::Struct json_format) { - std::unordered_map output; - for (const auto& pair : json_format.fields()) { - if (pair.second.kind_case() != ProtobufWkt::Value::kStringValue) { - throw EnvoyException("Only string values are supported in the JSON access log format."); - } - output.emplace(pair.first, pair.second.string_value()); - } - return output; -} - /** * Static registration for the file access log. @see RegisterFactory. */ diff --git a/source/extensions/access_loggers/file/config.h b/source/extensions/access_loggers/file/config.h index 7f3976adfc8b..d3ebf58c352f 100644 --- a/source/extensions/access_loggers/file/config.h +++ b/source/extensions/access_loggers/file/config.h @@ -19,9 +19,6 @@ class FileAccessLogFactory : public Server::Configuration::AccessLogInstanceFact ProtobufTypes::MessagePtr createEmptyConfigProto() override; std::string name() const override; - -private: - std::unordered_map convertJsonFormatToMap(ProtobufWkt::Struct config); }; } // namespace File diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/access_log/access_log_formatter_speed_test.cc index c946cfab8ed1..4cb64f3d6052 100644 --- a/test/common/access_log/access_log_formatter_speed_test.cc +++ b/test/common/access_log/access_log_formatter_speed_test.cc @@ -6,10 +6,12 @@ #include "benchmark/benchmark.h" +namespace Envoy { + namespace { -std::unique_ptr MakeJsonFormatter(bool typed) { - std::unordered_map JsonLogFormat = { +std::unique_ptr makeJsonFormatter(bool typed) { + absl::flat_hash_map JsonLogFormat = { {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"}, {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"}, {"method", "%REQ(:METHOD)%"}, @@ -33,8 +35,7 @@ std::unique_ptr makeStreamInfo() { } // namespace -namespace Envoy { - +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_AccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); static const char* LogFormat = @@ -59,9 +60,10 @@ static void BM_AccessLogFormatter(benchmark::State& state) { } BENCHMARK(BM_AccessLogFormatter); +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_JsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); - std::unique_ptr json_formatter = MakeJsonFormatter(false); + std::unique_ptr json_formatter = makeJsonFormatter(false); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; @@ -76,10 +78,11 @@ static void BM_JsonAccessLogFormatter(benchmark::State& state) { } BENCHMARK(BM_JsonAccessLogFormatter); +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); std::unique_ptr typed_json_formatter = - MakeJsonFormatter(true); + makeJsonFormatter(true); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index fe852ad2a22a..12b2a77399e0 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -1473,7 +1473,7 @@ TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { std::unordered_map expected_json_map = { {"plain_string", "plain_string_value"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"plain_string", "plain_string_value"}}; JsonFormatterImpl formatter(key_mapping, false); @@ -1494,7 +1494,7 @@ TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { std::unordered_map expected_json_map = {{"protocol", "HTTP/1.1"}}; - std::unordered_map key_mapping = {{"protocol", "%PROTOCOL%"}}; + absl::flat_hash_map key_mapping = {{"protocol", "%PROTOCOL%"}}; JsonFormatterImpl formatter(key_mapping, false); verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), @@ -1513,7 +1513,7 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { {"nonexistent_response_header", "-"}, {"some_response_header", "SOME_RESPONSE_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"protocol", "%PROTOCOL%"}, {"some_request_header", "%REQ(some_request_header)%"}, {"nonexistent_response_header", "%RESP(nonexistent_response_header)%"}, @@ -1541,7 +1541,7 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { {"response_absent_header_or_response_absent_header", "RESPONSE_PRESENT_HEADER"}, {"response_present_header_or_response_absent_header", "RESPONSE_PRESENT_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"request_present_header_or_request_absent_header", "%REQ(request_present_header?request_absent_header)%"}, {"request_absent_header_or_request_present_header", @@ -1575,7 +1575,7 @@ TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { {"test_obj", "{\"inner_key\":\"inner_value\"}"}, {"test_obj.inner_key", "\"inner_value\""}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%DYNAMIC_METADATA(com.test:test_key)%"}, {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"}, {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}}; @@ -1597,7 +1597,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%DYNAMIC_METADATA(com.test:test_key)%"}, {"test_obj", "%DYNAMIC_METADATA(com.test:test_obj)%"}, {"test_obj.inner_key", "%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"}}; @@ -1632,7 +1632,7 @@ TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { std::unordered_map expected_json_map = { {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}}; JsonFormatterImpl formatter(key_mapping, false); @@ -1655,7 +1655,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key", "%FILTER_STATE(test_key)%"}, {"test_obj", "%FILTER_STATE(test_obj)%"}}; JsonFormatterImpl formatter(key_mapping, true); @@ -1688,7 +1688,7 @@ TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { {"test_key_typed", "\"test_value By TYPED\""}, }; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:PLAIN)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; @@ -1711,7 +1711,7 @@ TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:PLAIN)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; @@ -1739,7 +1739,7 @@ TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { StreamInfo::FilterState::StateType::ReadOnly); // 'ABCDE' is error specifier. - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"test_key_plain", "%FILTER_STATE(test_key:ABCDE)%"}, {"test_key_typed", "%FILTER_STATE(test_key:TYPED)%"}}; @@ -1764,7 +1764,7 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { {"default", "2018-03-28T23:35:58.000Z"}, {"all_zeroes", "000000000.0.00.000"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"simple_date", "%START_TIME(%Y/%m/%d)%"}, {"test_time", "%START_TIME(%s)%"}, {"bad_format", "%START_TIME(bad_format)%"}, @@ -1787,7 +1787,7 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { std::unordered_map expected_json_map = { {"multi_token_field", "HTTP/1.1 plainstring SOME_REQUEST_HEADER SOME_RESPONSE_HEADER"}}; - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"multi_token_field", "%PROTOCOL% plainstring %REQ(some_request_header)% %RESP(some_response_header)%"}}; @@ -1827,7 +1827,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map key_mapping = { + absl::flat_hash_map key_mapping = { {"request_duration", "%REQUEST_DURATION%"}, {"request_duration_multi", "%REQUEST_DURATION%ms"}, {"filter_state", "%FILTER_STATE(test_obj)%"}, diff --git a/test/common/common/BUILD b/test/common/common/BUILD index a32421843112..6db03926358b 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -38,6 +38,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "substitution_format_string_test", + srcs = ["substitution_format_string_test.cc"], + deps = [ + "//source/common/common:substitution_format_string_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_fuzz_test( name = "base64_fuzz_test", srcs = ["base64_fuzz_test.cc"], diff --git a/test/common/common/substitution_format_string_test.cc b/test/common/common/substitution_format_string_test.cc new file mode 100644 index 000000000000..745d2fb760fc --- /dev/null +++ b/test/common/common/substitution_format_string_test.cc @@ -0,0 +1,94 @@ +#include "envoy/config/core/v3/substitution_format_string.pb.validate.h" + +#include "common/common/substitution_format_string.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { + +class SubstitutionFormatStringUtilsTest : public ::testing::Test { +public: + SubstitutionFormatStringUtilsTest() { + absl::optional response_code{200}; + EXPECT_CALL(stream_info_, responseCode()).WillRepeatedly(Return(response_code)); + } + + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; + StreamInfo::MockStreamInfo stream_info_; + + envoy::config::core::v3::SubstitutionFormatString config_; +}; + +TEST_F(SubstitutionFormatStringUtilsTest, TestEmptyIsInvalid) { + envoy::config::core::v3::SubstitutionFormatString empty_config; + std::string err; + EXPECT_FALSE(Validate(empty_config, &err)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigText) { + const std::string yaml = R"EOF( + text_format: "plain text, path=%REQ(:path)%, code=%RESPONSE_CODE%" +)EOF"; + TestUtility::loadFromYaml(yaml, config_); + + auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); + EXPECT_EQ( + "plain text, path=/bar/foo, code=200", + formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) { + const std::string yaml = R"EOF( + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" +)EOF"; + TestUtility::loadFromYaml(yaml, config_); + + auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); + const auto out_json = + formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_); + + const std::string expected = R"EOF({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})EOF"; + EXPECT_TRUE(TestUtility::jsonStringEqual(out_json, expected)); +} + +TEST_F(SubstitutionFormatStringUtilsTest, TestInvalidConfigs) { + const std::vector invalid_configs = { + R"( + json_format: + field: true +)", + R"( + json_format: + field: 200 +)", + R"( + json_format: + field: + nest_field: "value" +)", + }; + for (const auto& yaml : invalid_configs) { + TestUtility::loadFromYaml(yaml, config_); + EXPECT_THROW_WITH_MESSAGE(SubstitutionFormatStringUtils::fromProtoConfig(config_), + EnvoyException, + "Only string values are supported in the JSON access log format."); + } +} + +} // namespace Envoy diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index ee399514b752..5e4c0b70bf70 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -802,7 +802,7 @@ TEST(ConfigTest, AccessLogConfig) { { envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; file_access_log.set_path("some_path"); - file_access_log.set_format("the format specifier"); + file_access_log.mutable_log_format()->set_text_format("the format specifier"); log->mutable_typed_config()->PackFrom(file_access_log); } @@ -859,7 +859,7 @@ class TcpProxyTest : public testing::Test { access_log->set_name(Extensions::AccessLoggers::AccessLogNames::get().File); envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; file_access_log.set_path("unused"); - file_access_log.set_format(access_log_format); + file_access_log.mutable_log_format()->set_text_format(access_log_format); access_log->mutable_typed_config()->PackFrom(file_access_log); return config; } diff --git a/test/config/utility.cc b/test/config/utility.cc index 359925892a6d..14a1bbce9386 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -766,7 +766,7 @@ bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view f loadHttpConnectionManager(hcm_config); envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; if (!format.empty()) { - access_log_config.set_format(absl::StrCat(format, "\n")); + access_log_config.mutable_log_format()->set_text_format(absl::StrCat(format, "\n")); } access_log_config.set_path(filename); hcm_config.mutable_access_log(0)->mutable_typed_config()->PackFrom(access_log_config); @@ -781,7 +781,7 @@ bool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::strin } envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; if (!format.empty()) { - access_log_config.set_format(std::string(format)); + access_log_config.mutable_log_format()->set_text_format(std::string(format)); } access_log_config.set_path(filename); bootstrap_.mutable_static_resources() diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc index 19343c80bdbe..d6225b29ff2a 100644 --- a/test/extensions/access_loggers/file/config_test.cc +++ b/test/extensions/access_loggers/file/config_test.cc @@ -53,7 +53,7 @@ TEST(FileAccessLogConfigTest, ConfigureFromProto) { EXPECT_NE(nullptr, dynamic_cast(log.get())); } -TEST(FileAccessLogConfigTest, FileAccessLogTest) { +TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogTest)) { auto factory = Registry::FactoryRegistry::getFactory( AccessLogNames::get().File); @@ -76,7 +76,7 @@ TEST(FileAccessLogConfigTest, FileAccessLogTest) { EXPECT_NE(nullptr, dynamic_cast(instance.get())); } -TEST(FileAccessLogConfigTest, FileAccessLogJsonTest) { +TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogJsonTest)) { envoy::config::accesslog::v3::AccessLog config; NiceMock context; @@ -110,7 +110,7 @@ TEST(FileAccessLogConfigTest, FileAccessLogJsonTest) { EXPECT_NE(nullptr, dynamic_cast(log.get())); } -TEST(FileAccessLogConfigTest, FileAccessLogTypedJsonTest) { +TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogTypedJsonTest)) { envoy::config::accesslog::v3::AccessLog config; envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; @@ -136,51 +136,69 @@ TEST(FileAccessLogConfigTest, FileAccessLogTypedJsonTest) { EXPECT_NE(nullptr, dynamic_cast(log.get())); } -TEST(FileAccessLogConfigTest, FileAccessLogJsonWithBoolValueTest) { - { - // Make sure we fail if you set a bool value in the format dictionary - envoy::config::accesslog::v3::AccessLog config; - config.set_name(AccessLogNames::get().File); +TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogDeprecatedFormat)) { + const std::vector configs{ + R"( + path: "/foo" + format: "plain_text" +)", + R"( + path: "/foo" + json_format: + text: "plain_text" +)", + R"( + path: "/foo" + typed_json_format: + text: "plain_text" +)", + }; + + for (const auto& yaml : configs) { envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value bool_value; - bool_value.set_bool_value(false); - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["protocol"] = bool_value; + TestUtility::loadFromYaml(yaml, fal_config); + envoy::config::accesslog::v3::AccessLog config; config.mutable_typed_config()->PackFrom(fal_config); + NiceMock context; + AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), - EnvoyException, - "Only string values are supported in the JSON access log format."); + EXPECT_NE(nullptr, log); + EXPECT_NE(nullptr, dynamic_cast(log.get())); } } -TEST(FileAccessLogConfigTest, FileAccessLogJsonWithNestedKeyTest) { - { - // Make sure we fail if you set a nested Struct value in the format dictionary - envoy::config::accesslog::v3::AccessLog config; - config.set_name(AccessLogNames::get().File); +TEST(FileAccessLogConfigTest, FileAccessLogCheckLogFormat) { + const std::vector configs{ + // log_format: text_format + R"( + path: "/foo" + log_format: + text_format: "plain_text" +)", + + // log_format: json_format + R"( + path: "/foo" + log_format: + json_format: + text: "plain_text" +)", + }; + + for (const auto& yaml : configs) { envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("some_nested_value"); - - ProtobufWkt::Value struct_value; - (*struct_value.mutable_struct_value()->mutable_fields())["some_nested_key"] = string_value; - - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["top_level_key"] = struct_value; + TestUtility::loadFromYaml(yaml, fal_config); + envoy::config::accesslog::v3::AccessLog config; config.mutable_typed_config()->PackFrom(fal_config); + NiceMock context; + AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), - EnvoyException, - "Only string values are supported in the JSON access log format."); + EXPECT_NE(nullptr, log); + EXPECT_NE(nullptr, dynamic_cast(log.get())); } } diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 9742d24fc3c8..7d5f5db0c4e0 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -250,7 +250,7 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { access_log->set_name("accesslog"); envoy::extensions::access_loggers::file::v3::FileAccessLog access_log_config; access_log_config.set_path(access_log_path); - access_log_config.set_format( + access_log_config.mutable_log_format()->set_text_format( "upstreamlocal=%UPSTREAM_LOCAL_ADDRESS% " "upstreamhost=%UPSTREAM_HOST% downstream=%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% " "sent=%BYTES_SENT% received=%BYTES_RECEIVED%\n"); From ae7ef2f09886b905f87aae5dbf85856525b0ee9c Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 15 May 2020 16:53:47 -0400 Subject: [PATCH 184/909] [fuzz] add metadata and dispatching actions to codec fuzzer (#11173) This adds an encodeMetadata action in the codec_impl_fuzz_test since we don't have any coverage over the metadata code yet. We can specify sending metadata in the form of a map test.fuzz.Metadata in requests or responses. * This also adds dispatching_actions that can be specified in a request stream action. These will apply a response directional action while request headers, data, or trailers are dispatching. A bug was reproduced where metadata crashed while dispatching * Also adds a case where the stream ID in the metadata frame is corrupted to produce an invalid stream ID. Previously, this crashed. TODO: Add metadata frames to an e2e H/2 fuzzer Risk level: Low Testing: added corpus entries that repro'd two qod: #10732 is reproduced by the entry metadata_corrupt #10034 is reproduced by the entry metadata_dispatching Signed-off-by: Asra Ali --- test/common/http/codec_impl_corpus/example | 26 ++++ test/common/http/codec_impl_corpus/metadata | 114 ++++++++++++++++++ .../http/codec_impl_corpus/metadata_corrupt | 52 ++++++++ .../http/codec_impl_corpus/metadata_dispatch | 61 ++++++++++ .../http/codec_impl_corpus/simple_stream | 10 ++ test/common/http/codec_impl_fuzz.proto | 10 ++ test/common/http/codec_impl_fuzz_test.cc | 36 ++++++ test/fuzz/common.proto | 4 + test/fuzz/utility.h | 14 +++ 9 files changed, 327 insertions(+) create mode 100644 test/common/http/codec_impl_corpus/metadata create mode 100644 test/common/http/codec_impl_corpus/metadata_corrupt create mode 100644 test/common/http/codec_impl_corpus/metadata_dispatch diff --git a/test/common/http/codec_impl_corpus/example b/test/common/http/codec_impl_corpus/example index 897c48be43d6..4efce4512644 100644 --- a/test/common/http/codec_impl_corpus/example +++ b/test/common/http/codec_impl_corpus/example @@ -89,6 +89,32 @@ actions { } } } +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "a" + value: "a" + } + } + } + } +} +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "a" + value: "a" + } + } + } + } +} actions { stream_action { stream_id: 1 diff --git a/test/common/http/codec_impl_corpus/metadata b/test/common/http/codec_impl_corpus/metadata new file mode 100644 index 000000000000..cc07a24428f3 --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata @@ -0,0 +1,114 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } + +actions { + stream_action { + stream_id: 1 + request { + data: 128000 + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + response { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + } +} +actions { + stream_action { + stream_id: 2 + response { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + } +} +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } diff --git a/test/common/http/codec_impl_corpus/metadata_corrupt b/test/common/http/codec_impl_corpus/metadata_corrupt new file mode 100644 index 000000000000..04176a675294 --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata_corrupt @@ -0,0 +1,52 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + request { + metadata { + metadata { + key: "header_key1" + value: "header_value1" + } + metadata { + key: "header_key2" + value: "header_value2" + } + metadata { + key: "header_key3" + value: "header_value3" + } + } + } + } +} +actions { + mutate { + buffer: 0 + offset: 8 + value: 0 + } +} +actions { quiesce_drain {} } \ No newline at end of file diff --git a/test/common/http/codec_impl_corpus/metadata_dispatch b/test/common/http/codec_impl_corpus/metadata_dispatch new file mode 100644 index 000000000000..cbacc6a4126a --- /dev/null +++ b/test/common/http/codec_impl_corpus/metadata_dispatch @@ -0,0 +1,61 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "foo.com" + } + headers { + key: "blah" + value: "nosniff" + } + headers { + key: "cookie" + value: "foo=bar" + } + headers { + key: "cookie" + value: "foo2=bar2" + } + } + } +} +actions { quiesce_drain {} } +actions { + stream_action { + stream_id: 1 + request { + data: 128000 + } + dispatching_action { + metadata { + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + metadata { + key: "aaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbb" + } + } + } + + } +} +actions { quiesce_drain {} } \ No newline at end of file diff --git a/test/common/http/codec_impl_corpus/simple_stream b/test/common/http/codec_impl_corpus/simple_stream index dc23bacfcd36..294b8df5a349 100644 --- a/test/common/http/codec_impl_corpus/simple_stream +++ b/test/common/http/codec_impl_corpus/simple_stream @@ -1,5 +1,15 @@ actions { new_stream { + metadata { + metadata { + key: "" + value: "" + } + metadata { + key: "new_key" + value: "new_value" + } + } request_headers { headers { key: ":method" diff --git a/test/common/http/codec_impl_fuzz.proto b/test/common/http/codec_impl_fuzz.proto index f5d39f9ded2f..99f9f9592b05 100644 --- a/test/common/http/codec_impl_fuzz.proto +++ b/test/common/http/codec_impl_fuzz.proto @@ -10,6 +10,9 @@ import "test/fuzz/common.proto"; // Structured input for H2 codec_impl_fuzz_test. message NewStream { + // Optional metadata before request headers. + // Metadata sent after request headers can be send via a directional action. + test.fuzz.Metadata metadata = 3; test.fuzz.Headers request_headers = 1 [(validate.rules).message.required = true]; bool end_stream = 2; } @@ -22,6 +25,7 @@ message DirectionalAction { uint32 data = 3; string data_value = 8; test.fuzz.Headers trailers = 4; + test.fuzz.Metadata metadata = 9; uint32 reset_stream = 5; bool read_disable = 6; } @@ -36,6 +40,12 @@ message StreamAction { DirectionalAction request = 2; DirectionalAction response = 3; } + // Optionally set a dispatching action. This is a directional action that will + // be called while the stream action is sending headers, data, or trailers. + // This will only apply to request stream actions (so that the dispatching + // action occurs in the response direction). This may happen as a result of a + // filter sending a direct response. + DirectionalAction dispatching_action = 4; } message MutateAction { diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 7163b53c0dd8..aad44b0f5609 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -88,6 +88,7 @@ fromHttp2Settings(const test::common::http::Http2Settings& settings) { settings.initial_connection_window_size() % (1 + Http2Utility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE - Http2Utility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE)); + options.set_allow_metadata(true); return options; } @@ -180,6 +181,7 @@ class HttpStream : public LinkedObject { if (!end_stream) { request_.request_encoder_->getStream().addCallbacks(request_.stream_callbacks_); } + request_.request_encoder_->encodeHeaders(request_headers, end_stream); request_.stream_state_ = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; response_.stream_state_ = StreamState::PendingHeaders; @@ -271,6 +273,17 @@ class HttpStream : public LinkedObject { } break; } + case test::common::http::DirectionalAction::kMetadata: { + if (state.isLocalOpen() && state.stream_state_ != StreamState::Closed) { + if (response) { + state.response_encoder_->encodeMetadata( + Fuzz::fromMetadata(directional_action.metadata())); + } else { + state.request_encoder_->encodeMetadata(Fuzz::fromMetadata(directional_action.metadata())); + } + } + break; + } case test::common::http::DirectionalAction::kResetStream: { if (state.stream_state_ != StreamState::Closed) { StreamEncoder* encoder; @@ -318,6 +331,29 @@ class HttpStream : public LinkedObject { ENVOY_LOG_MISC(debug, "Request stream action on {} in state {} {}", stream_index_, static_cast(request_.stream_state_), static_cast(response_.stream_state_)); + if (stream_action.has_dispatching_action()) { + // Simulate some response action while dispatching request headers, data, or trailers. This + // may happen as a result of a filter sending a direct response. + ENVOY_LOG_MISC(debug, "Setting dispatching action on {} in state {} {}", stream_index_, + static_cast(request_.stream_state_), + static_cast(response_.stream_state_)); + auto request_action = stream_action.request().directional_action_selector_case(); + if (request_action == test::common::http::DirectionalAction::kHeaders) { + EXPECT_CALL(request_.request_decoder_, decodeHeaders_(_, _)) + .WillOnce(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } else if (request_action == test::common::http::DirectionalAction::kData) { + EXPECT_CALL(request_.request_decoder_, decodeData(_, _)) + .Times(testing::AtLeast(1)) + .WillRepeatedly(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } else if (request_action == test::common::http::DirectionalAction::kTrailers) { + EXPECT_CALL(request_.request_decoder_, decodeTrailers_(_)) + .WillOnce(InvokeWithoutArgs( + [&] { directionalAction(response_, stream_action.dispatching_action()); })); + } + } + // Perform the stream action. directionalAction(request_, stream_action.request()); break; } diff --git a/test/fuzz/common.proto b/test/fuzz/common.proto index 92df9a1b4021..7bc55e131457 100644 --- a/test/fuzz/common.proto +++ b/test/fuzz/common.proto @@ -16,6 +16,10 @@ message Headers { repeated envoy.config.core.v3.HeaderValue headers = 1; } +message Metadata { + map metadata = 1; +} + message HttpBody { // The bytes that will be used as the request body. repeated string data = 1 [(validate.rules).repeated .min_items = 1]; diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index b0a16930fbeb..8347e09f2e77 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -99,6 +99,20 @@ inline T fromHeaders( return header_map; } +// Convert from test proto Metadata to MetadataMap +inline Http::MetadataMapVector fromMetadata(const test::fuzz::Metadata& metadata) { + Http::MetadataMapVector metadata_map_vector; + if (!metadata.metadata().empty()) { + Http::MetadataMap metadata_map; + Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + for (const auto& pair : metadata.metadata()) { + metadata_map_ptr->insert(pair); + } + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + } + return metadata_map_vector; +} + // Convert from HeaderMap to test proto Headers. inline test::fuzz::Headers toHeaders(const Http::HeaderMap& headers) { test::fuzz::Headers fuzz_headers; From 245363004b2be750d9081a04a592630e59502afc Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Fri, 15 May 2020 14:58:23 -0700 Subject: [PATCH 185/909] Refactor resource manager (#11182) This patch separates the Resource class from the resource manager implementation and allows for resource limit tracking in other parts of the code base. Signed-off-by: Tony Allen --- include/envoy/common/BUILD | 5 ++ include/envoy/common/resource.h | 47 ++++++++++++ include/envoy/network/BUILD | 1 + include/envoy/upstream/BUILD | 1 + include/envoy/upstream/resource_manager.h | 63 ++++------------ source/common/common/BUILD | 9 +++ source/common/common/basic_resource_impl.h | 60 +++++++++++++++ source/common/upstream/BUILD | 1 + source/common/upstream/conn_pool_map_impl.h | 2 +- .../common/upstream/resource_manager_impl.h | 60 +++++++-------- source/server/BUILD | 1 + source/server/admin/BUILD | 1 + test/common/common/BUILD | 9 +++ .../common/common/basic_resource_impl_test.cc | 73 +++++++++++++++++++ test/integration/stats_integration_test.cc | 6 +- 15 files changed, 252 insertions(+), 87 deletions(-) create mode 100644 include/envoy/common/resource.h create mode 100644 source/common/common/basic_resource_impl.h create mode 100644 test/common/common/basic_resource_impl_test.cc diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index 47dd8e1549ef..b950bcbd7fbe 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -29,6 +29,11 @@ envoy_cc_library( hdrs = ["mutex_tracer.h"], ) +envoy_cc_library( + name = "resource_interface", + hdrs = ["resource.h"], +) + envoy_cc_library( name = "time_interface", hdrs = ["time.h"], diff --git a/include/envoy/common/resource.h b/include/envoy/common/resource.h new file mode 100644 index 000000000000..6b04afcfdf4b --- /dev/null +++ b/include/envoy/common/resource.h @@ -0,0 +1,47 @@ +#include + +#include "envoy/common/pure.h" + +#pragma once + +namespace Envoy { + +/** + * A handle for use by any resource managers. + */ +class ResourceLimit { +public: + virtual ~ResourceLimit() = default; + + /** + * @return true if the resource can be created. + */ + virtual bool canCreate() PURE; + + /** + * Increment the resource count. + */ + virtual void inc() PURE; + + /** + * Decrement the resource count. + */ + virtual void dec() PURE; + + /** + * Decrement the resource count by a specific amount. + */ + virtual void decBy(uint64_t amount) PURE; + + /** + * @return the current maximum allowed number of this resource. + */ + virtual uint64_t max() PURE; + + /** + * @return the current resource count. + */ + virtual uint64_t count() const PURE; +}; + +} // namespace Envoy diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 229ad3019523..16a0945a2770 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -118,6 +118,7 @@ envoy_cc_library( ":connection_interface", ":listen_socket_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:resource_interface", "//include/envoy/stats:stats_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index 2ecf8e601b7c..c6fe37d45beb 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -114,6 +114,7 @@ envoy_cc_library( envoy_cc_library( name = "resource_manager_interface", hdrs = ["resource_manager.h"], + deps = ["//include/envoy/common:resource_interface"], ) envoy_cc_library( diff --git a/include/envoy/upstream/resource_manager.h b/include/envoy/upstream/resource_manager.h index c10ff89c033f..5cac59a1a0ad 100644 --- a/include/envoy/upstream/resource_manager.h +++ b/include/envoy/upstream/resource_manager.h @@ -5,6 +5,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/resource.h" namespace Envoy { namespace Upstream { @@ -16,54 +17,16 @@ namespace Upstream { enum class ResourcePriority { Default, High }; const size_t NumResourcePriorities = 2; -/** - * An individual resource tracked by the resource manager. - */ -class Resource { -public: - virtual ~Resource() = default; - - /** - * @return true if the resource can be created. - */ - virtual bool canCreate() PURE; - - /** - * Increment the resource count. - */ - virtual void inc() PURE; - - /** - * Decrement the resource count. - */ - virtual void dec() PURE; - - /** - * Decrement the resource count by a specific amount. - */ - virtual void decBy(uint64_t amount) PURE; - - /** - * @return the current maximum allowed number of this resource. - */ - virtual uint64_t max() PURE; - - /** - * @return the current resource count. - */ - virtual uint64_t count() const PURE; -}; - /** * RAII wrapper that increments a resource on construction and decrements it on destruction. */ class ResourceAutoIncDec { public: - ResourceAutoIncDec(Resource& resource) : resource_(resource) { resource_.inc(); } + ResourceAutoIncDec(ResourceLimit& resource) : resource_(resource) { resource_.inc(); } ~ResourceAutoIncDec() { resource_.dec(); } private: - Resource& resource_; + ResourceLimit& resource_; }; using ResourceAutoIncDecPtr = std::unique_ptr; @@ -78,31 +41,31 @@ class ResourceManager { virtual ~ResourceManager() = default; /** - * @return Resource& active TCP connections and UDP sessions. + * @return ResourceLimit& active TCP connections and UDP sessions. */ - virtual Resource& connections() PURE; + virtual ResourceLimit& connections() PURE; /** - * @return Resource& active pending requests (requests that have not yet been attached to a + * @return ResourceLimit& active pending requests (requests that have not yet been attached to a * connection pool connection). */ - virtual Resource& pendingRequests() PURE; + virtual ResourceLimit& pendingRequests() PURE; /** - * @return Resource& active requests (requests that are currently bound to a connection pool + * @return ResourceLimit& active requests (requests that are currently bound to a connection pool * connection and are awaiting response). */ - virtual Resource& requests() PURE; + virtual ResourceLimit& requests() PURE; /** - * @return Resource& active retries. + * @return ResourceLimit& active retries. */ - virtual Resource& retries() PURE; + virtual ResourceLimit& retries() PURE; /** - * @return Resource& active connection pools. + * @return ResourceLimit& active connection pools. */ - virtual Resource& connectionPools() PURE; + virtual ResourceLimit& connectionPools() PURE; }; } // namespace Upstream diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 087b4dec7cb2..5670b3c73629 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -212,6 +212,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "basic_resource_lib", + hdrs = ["basic_resource_impl.h"], + deps = [ + "//include/envoy/common:resource_interface", + "//include/envoy/runtime:runtime_interface", + ], +) + envoy_cc_library( name = "macros", hdrs = ["macros.h"], diff --git a/source/common/common/basic_resource_impl.h b/source/common/common/basic_resource_impl.h new file mode 100644 index 000000000000..8fe93aaabcb9 --- /dev/null +++ b/source/common/common/basic_resource_impl.h @@ -0,0 +1,60 @@ +#pragma once + +#include + +#include "envoy/common/resource.h" +#include "envoy/runtime/runtime.h" + +#include "common/common/assert.h" + +#include "absl/types/optional.h" + +namespace Envoy { + +/** + * A handle to track some limited resource. + * + * NOTE: + * This implementation makes some assumptions which favor simplicity over correctness. Though + * atomics are used, it is possible for resources to temporarily go above the supplied maximums. + * This should not effect overall behavior. + */ +class BasicResourceLimitImpl : public ResourceLimit { +public: + BasicResourceLimitImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key) + : max_(max), runtime_(&runtime), runtime_key_(runtime_key) {} + BasicResourceLimitImpl(uint64_t max) : max_(max), runtime_(nullptr) {} + BasicResourceLimitImpl() : max_(std::numeric_limits::max()), runtime_(nullptr) {} + + bool canCreate() override { return current_.load() < max(); } + + void inc() override { ++current_; } + + void dec() override { decBy(1); } + + void decBy(uint64_t amount) override { + ASSERT(current_ >= amount); + current_ -= amount; + } + + uint64_t max() override { + return (runtime_ != nullptr && runtime_key_.has_value()) + ? runtime_->snapshot().getInteger(runtime_key_.value(), max_) + : max_; + } + + uint64_t count() const override { return current_.load(); } + + void setMax(uint64_t new_max) { max_ = new_max; } + void resetMax() { max_ = std::numeric_limits::max(); } + +protected: + std::atomic current_{}; + +private: + uint64_t max_; + Runtime::Loader* runtime_{nullptr}; + const absl::optional runtime_key_; +}; + +} // namespace Envoy diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 608c24bbfb3f..2dd83d85143b 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -310,6 +310,7 @@ envoy_cc_library( "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", ], ) diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 943e0b469c46..c0daca8ef37a 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -30,7 +30,7 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto if (pool_iter != active_pools_.end()) { return std::ref(*(pool_iter->second)); } - Resource& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); + ResourceLimit& connPoolResource = host_->cluster().resourceManager(priority_).connectionPools(); // We need a new pool. Check if we have room. if (!connPoolResource.canCreate()) { // We're full. Try to free up a pool. If we can't, bail out. diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index e360c90206b7..12d0d498fc72 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -5,11 +5,13 @@ #include #include +#include "envoy/common/resource.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/resource_manager.h" #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" namespace Envoy { namespace Upstream { @@ -44,38 +46,33 @@ class ResourceManagerImpl : public ResourceManager { pending_requests_) {} // Upstream::ResourceManager - Resource& connections() override { return connections_; } - Resource& pendingRequests() override { return pending_requests_; } - Resource& requests() override { return requests_; } - Resource& retries() override { return retries_; } - Resource& connectionPools() override { return connection_pools_; } + ResourceLimit& connections() override { return connections_; } + ResourceLimit& pendingRequests() override { return pending_requests_; } + ResourceLimit& requests() override { return requests_; } + ResourceLimit& retries() override { return retries_; } + ResourceLimit& connectionPools() override { return connection_pools_; } private: - struct ResourceImpl : public Resource { - ResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge, Stats::Gauge& remaining) - : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge), + struct ManagedResourceImpl : public BasicResourceLimitImpl { + ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, + Stats::Gauge& open_gauge, Stats::Gauge& remaining) + : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge), remaining_(remaining) { remaining_.set(max); } - ~ResourceImpl() override { ASSERT(current_ == 0); } // Upstream::Resource bool canCreate() override { return current_ < max(); } void inc() override { - current_++; + BasicResourceLimitImpl::inc(); updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); } - void dec() override { decBy(1); } void decBy(uint64_t amount) override { - ASSERT(current_ >= amount); - current_ -= amount; + BasicResourceLimitImpl::decBy(amount); updateRemaining(); - open_gauge_.set(canCreate() ? 0 : 1); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); } - uint64_t max() override { return runtime_.snapshot().getInteger(runtime_key_, max_); } - uint64_t count() const override { return current_.load(); } /** * We set the gauge instead of incrementing and decrementing because, @@ -91,11 +88,6 @@ class ResourceManagerImpl : public ResourceManager { remaining_.set(max() > current_copy ? max() - current_copy : 0); } - const uint64_t max_; - std::atomic current_{}; - Runtime::Loader& runtime_; - const std::string runtime_key_; - /** * A gauge to notify the live circuit breaker state. The gauge is set to 0 * to notify that the circuit breaker is not yet triggered. @@ -108,14 +100,14 @@ class ResourceManagerImpl : public ResourceManager { Stats::Gauge& remaining_; }; - class RetryBudgetImpl : public Resource { + class RetryBudgetImpl : public ResourceLimit { public: RetryBudgetImpl(absl::optional budget_percent, absl::optional min_retry_concurrency, uint64_t max_retries, Runtime::Loader& runtime, const std::string& retry_budget_runtime_key, const std::string& max_retries_runtime_key, Stats::Gauge& open_gauge, - Stats::Gauge& remaining, const Resource& requests, - const Resource& pending_requests) + Stats::Gauge& remaining, const ResourceLimit& requests, + const ResourceLimit& pending_requests) : runtime_(runtime), max_retry_resource_(max_retries, runtime, max_retries_runtime_key, open_gauge, remaining), budget_percent_(budget_percent), min_retry_concurrency_(min_retry_concurrency), @@ -123,7 +115,7 @@ class ResourceManagerImpl : public ResourceManager { min_retry_concurrency_key_(retry_budget_runtime_key + "min_retry_concurrency"), requests_(requests), pending_requests_(pending_requests), remaining_(remaining) {} - // Upstream::Resource + // Envoy::ResourceLimit bool canCreate() override { if (!useRetryBudget()) { return max_retry_resource_.canCreate(); @@ -182,20 +174,20 @@ class ResourceManagerImpl : public ResourceManager { Runtime::Loader& runtime_; // The max_retry resource is nested within the budget to maintain state if the retry budget is // toggled. - ResourceImpl max_retry_resource_; + ManagedResourceImpl max_retry_resource_; const absl::optional budget_percent_; const absl::optional min_retry_concurrency_; const std::string budget_percent_key_; const std::string min_retry_concurrency_key_; - const Resource& requests_; - const Resource& pending_requests_; + const ResourceLimit& requests_; + const ResourceLimit& pending_requests_; Stats::Gauge& remaining_; }; - ResourceImpl connections_; - ResourceImpl pending_requests_; - ResourceImpl requests_; - ResourceImpl connection_pools_; + ManagedResourceImpl connections_; + ManagedResourceImpl pending_requests_; + ManagedResourceImpl requests_; + ManagedResourceImpl connection_pools_; RetryBudgetImpl retries_; }; diff --git a/source/server/BUILD b/source/server/BUILD index 42ef0a1b546b..7ffee07345b9 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -305,6 +305,7 @@ envoy_cc_library( "//include/envoy/server:transport_socket_config_interface", "//include/envoy/server:worker_interface", "//source/common/access_log:access_log_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/config:version_converter_lib", diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 149ad12b408f..b97b3066f3b1 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -38,6 +38,7 @@ envoy_cc_library( "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/common:macros", "//source/common/common:minimal_logger_lib", diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 6db03926358b..32ae464b0fe9 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -206,6 +206,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "basic_resource_impl_test", + srcs = ["basic_resource_impl_test.cc"], + deps = [ + "//source/common/common:basic_resource_lib", + "//test/mocks/runtime:runtime_mocks", + ], +) + envoy_cc_test( name = "token_bucket_impl_test", srcs = ["token_bucket_impl_test.cc"], diff --git a/test/common/common/basic_resource_impl_test.cc b/test/common/common/basic_resource_impl_test.cc new file mode 100644 index 000000000000..60481535d06c --- /dev/null +++ b/test/common/common/basic_resource_impl_test.cc @@ -0,0 +1,73 @@ +#include + +#include "common/common/basic_resource_impl.h" + +#include "test/mocks/runtime/mocks.h" + +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { + +class BasicResourceLimitImplTest : public testing::Test { +protected: + NiceMock runtime_; +}; + +TEST_F(BasicResourceLimitImplTest, NoArgsConstructorVerifyMax) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, VerifySetClearMax) { + BasicResourceLimitImpl br(123); + + EXPECT_EQ(br.max(), 123); + br.setMax(321); + EXPECT_EQ(br.max(), 321); + br.resetMax(); + EXPECT_EQ(br.max(), std::numeric_limits::max()); +} + +TEST_F(BasicResourceLimitImplTest, IncDecCount) { + BasicResourceLimitImpl br; + + EXPECT_EQ(br.count(), 0); + br.inc(); + EXPECT_EQ(br.count(), 1); + br.inc(); + br.inc(); + EXPECT_EQ(br.count(), 3); + br.dec(); + EXPECT_EQ(br.count(), 2); + br.decBy(2); + EXPECT_EQ(br.count(), 0); +} + +TEST_F(BasicResourceLimitImplTest, CanCreate) { + BasicResourceLimitImpl br(2); + + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_TRUE(br.canCreate()); + br.inc(); + EXPECT_FALSE(br.canCreate()); + br.dec(); + EXPECT_TRUE(br.canCreate()); + br.dec(); +} + +TEST_F(BasicResourceLimitImplTest, RuntimeMods) { + BasicResourceLimitImpl br(1337, runtime_, "trololo"); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(555)); + EXPECT_EQ(br.max(), 555); + + EXPECT_CALL(runtime_.snapshot_, getInteger("trololo", 1337)).WillOnce(Return(1337)); + EXPECT_EQ(br.max(), 1337); +} + +} // namespace Envoy diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 9221d3d38381..757eebef0eee 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -273,6 +273,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/04/07 10661 43349 44000 fix clang tidy on master // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate + // 2020/05/13 10531 44425 44600 Refactor resource manager // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -286,7 +287,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44233); + EXPECT_MEMORY_EQ(m_per_cluster, 44425); EXPECT_MEMORY_LE(m_per_cluster, 44600); } @@ -333,6 +334,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/04/07 10661 35557 36000 fix clang tidy on master // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate + // 2020/05/13 10531 36537 44600 Refactor resource manager // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -346,7 +348,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36345); + EXPECT_MEMORY_EQ(m_per_cluster, 36537); EXPECT_MEMORY_LE(m_per_cluster, 36800); } From 466bd5444db721a79edb86b4f4ec3b7319e69069 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 15 May 2020 15:37:37 -0700 Subject: [PATCH 186/909] bazelci: add coverage (#11218) Signed-off-by: Lizan Zhou --- .bazelci/presubmit.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index 6aa45662598c..3cdb1ad2c30a 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -1,10 +1,12 @@ --- tasks: gcc: + name: "GCC" platform: ubuntu1804 build_targets: - "//source/exe:envoy-static" rbe: + name: "RBE" platform: ubuntu1804 test_targets: - "//test/..." @@ -12,3 +14,14 @@ tasks: - "--config=remote-clang" - "--config=remote-ci" - "--jobs=75" + coverage: + name: "Coverage" + platform: ubuntu1804 + test_targets: + - "//test/integration/..." + - "//test/exe/..." + test_flags: + - "--config=remote-clang" + - "--config=remote-ci" + - "--config=coverage" + - "--jobs=75" From f64007db8b1cc68901d51d121f13e1860b391595 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco <47160394+FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg@users.noreply.github.com> Date: Fri, 15 May 2020 15:55:08 -0700 Subject: [PATCH 187/909] redis: update docs to reflect redis upstream latency is in micros only (#11073) Update docs to reflect redis upstream latency is in micros only. Signed-off-by: FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg --- .../filters/network/redis_proxy/v3/redis_proxy.proto | 4 ++-- docs/root/intro/arch_overview/other_protocols/redis.rst | 2 +- .../filters/network/redis_proxy/v3/redis_proxy.proto | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index a3341b5ac606..143bd4da65e1 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -113,7 +113,7 @@ message RedisProxy { google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. + // count. These commands are measured in microseconds. bool enable_command_stats = 8; // Read policy. The default is to read from the master. @@ -193,7 +193,7 @@ message RedisProxy { ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. + // milliseconds. This does not apply to upstream command stats currently. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different diff --git a/docs/root/intro/arch_overview/other_protocols/redis.rst b/docs/root/intro/arch_overview/other_protocols/redis.rst index 10c85d05a287..deebe8ce7f47 100644 --- a/docs/root/intro/arch_overview/other_protocols/redis.rst +++ b/docs/root/intro/arch_overview/other_protocols/redis.rst @@ -96,7 +96,7 @@ Every Redis cluster has its own extra statistics tree rooted at *cluster.. .. _arch_overview_redis_cluster_command_stats: -Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `: +Per-cluster command statistics can be enabled via the setting :ref:`enable_command_stats `.: .. csv-table:: :header: Name, Type, Description diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 60ab28cfcf1f..b9ca387f4ca5 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -113,7 +113,7 @@ message RedisProxy { google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. + // count. These commands are measured in microseconds. bool enable_command_stats = 8; // Read policy. The default is to read from the master. @@ -188,7 +188,7 @@ message RedisProxy { ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. + // milliseconds. This does not apply to upstream command stats currently. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different From 11c9223aac10834b7d5d2e399b0b4b59e0a3b3a1 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 15 May 2020 21:05:11 -0700 Subject: [PATCH 188/909] build: update envoy-build-tools (#11224) Signed-off-by: Lizan Zhou --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index fff26a38c7b3..58e6c1027175 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "328648f158e7167f881d984433ff6bfe203bf0b815a99d98d22fb01a0fc95f70", - strip_prefix = "envoy-build-tools-f41e5ef5a023e50da088035449c6cdee0ae30d71", - # 2020-05-11 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/f41e5ef5a023e50da088035449c6cdee0ae30d71.tar.gz"], + sha256 = "78e794ae1c1197f59b7ecbf8bd62c053ecb1625daaccdbe287581ee6f12ec0fb", + strip_prefix = "envoy-build-tools-b47394aa94c45e15c479d18eab18ffd43ec62d89", + # 2020-05-14 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/b47394aa94c45e15c479d18eab18ffd43ec62d89.tar.gz"], use_category = ["build"], ), boringssl = dict( From 31128e7dc22355876020188bc8feb99304663041 Mon Sep 17 00:00:00 2001 From: htuch Date: Sun, 17 May 2020 15:31:59 -0400 Subject: [PATCH 189/909] api: manifest based edge default documentation. (#11151) This PR replaces #11058, taking a slightly different approach. We utilize field options to annotate fields that should be set for untrusted environments with [configure_for_untrusted_downstream, configure_for_untrusted_downstream]. Defaults are provided out-of-band, in a manifest files in docs/edge_defaults_manifest.yaml. Protodoc glues the manifest and options together when generating field documentation, providing an additional notice for sensitive fields. This PR depends on #11108 first merging to provide the pip3 build infrastructure. Risk level: Low (docs only). Testing: Inspection of generated docs. Signed-off-by: Harvey Tuch --- api/bazel/repository_locations.bzl | 4 +- api/envoy/config/bootstrap/v3/bootstrap.proto | 6 +- .../config/bootstrap/v4alpha/bootstrap.proto | 6 +- api/envoy/config/listener/v3/listener.proto | 4 +- .../config/listener/v4alpha/listener.proto | 4 +- bazel/dependency_imports.bzl | 4 +- bazel/repositories_extra.bzl | 6 +- docs/BUILD | 3 + docs/edge_defaults_manifest.yaml | 21 ++++++ .../bazel/repository_locations.bzl | 4 +- .../envoy/config/bootstrap/v3/bootstrap.proto | 6 +- .../config/bootstrap/v4alpha/bootstrap.proto | 6 +- .../envoy/config/listener/v3/listener.proto | 4 +- .../config/listener/v4alpha/listener.proto | 4 +- tools/code_format/check_format.py | 3 +- tools/config_validation/BUILD | 13 ++-- tools/config_validation/validate_fragment.py | 56 ++++++++++++++++ .../validate_yaml_fragment.py | 3 - tools/protodoc/BUILD | 5 ++ tools/protodoc/protodoc.py | 65 +++++++++++++++++-- tools/protodoc/requirements.txt | 1 + tools/protoxform/merge_active_shadow.py | 1 + tools/protoxform/migrate.py | 4 +- tools/protoxform/protoprint.py | 1 + tools/protoxform/protoxform.py | 5 +- tools/type_whisperer/BUILD | 6 ++ .../file_descriptor_set_text.bzl | 6 +- 27 files changed, 219 insertions(+), 32 deletions(-) create mode 100644 docs/BUILD create mode 100644 docs/edge_defaults_manifest.yaml create mode 100644 tools/config_validation/validate_fragment.py delete mode 100644 tools/config_validation/validate_yaml_fragment.py create mode 100644 tools/protodoc/requirements.txt diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index c275a8c65835..77539ee9b109 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 -UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" +UDPA_GIT_SHA = "9f54a527e3bf4d1f4a6527f93d329fb1cc4516ac" # May 8, 2020 +UDPA_SHA256 = "7edae88586a84360203e5a4c724080c740b7b6002d5d56f5e806f27c912895cd" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index c20109884d90..8eba15a5ba72 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -145,7 +146,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index ce6aa147fba2..bd4169356a4e 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -18,6 +18,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -137,7 +138,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index 473a5eb2b42b..03214150e773 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -14,6 +14,7 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -108,7 +109,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v3.Metadata metadata = 6; diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index 4438bd2974d4..b7f32a821443 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -14,6 +14,7 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -108,7 +109,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v4alpha.Metadata metadata = 6; diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 2385a98ff25b..cc2ff635ede3 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -4,7 +4,8 @@ load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") -load("@config_validation//:requirements.bzl", config_validation_pip_install = "pip_install") +load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install") +load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") # go version for rules_go GO_VERSION = "1.13.5" @@ -41,3 +42,4 @@ def envoy_dependency_imports(go_version = GO_VERSION): ) config_validation_pip_install() + protodoc_pip_install() diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index fe0e9adb6a29..aef6b8c69b24 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -7,9 +7,13 @@ def _python_deps(): pip_repositories() pip3_import( - name = "config_validation", + name = "config_validation_pip3", requirements = "@envoy//tools/config_validation:requirements.txt", ) + pip3_import( + name = "protodoc_pip3", + requirements = "@envoy//tools/protodoc:requirements.txt", + ) # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). def envoy_dependencies_extra(): diff --git a/docs/BUILD b/docs/BUILD new file mode 100644 index 000000000000..d190c0a59a0a --- /dev/null +++ b/docs/BUILD @@ -0,0 +1,3 @@ +licenses(["notice"]) # Apache 2 + +exports_files(["edge_defaults_manifest.yaml"]) diff --git a/docs/edge_defaults_manifest.yaml b/docs/edge_defaults_manifest.yaml new file mode 100644 index 000000000000..b5072c26a32b --- /dev/null +++ b/docs/edge_defaults_manifest.yaml @@ -0,0 +1,21 @@ +envoy.config.bootstrap.v3.Bootstrap.overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + # TODO: Tune for your system. + max_heap_size_bytes: 2147483648 # 2 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.98 + +envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes: 32768 # 32 KiB diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index c275a8c65835..77539ee9b109 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "e8cd3a4bb307e2c810cffff99f93e96e6d7fee85" # Mar 27, 2020 -UDPA_SHA256 = "1fd7857cb61daee7726fca8f4d55e4923774a8d00a53007a4093830dc0482685" +UDPA_GIT_SHA = "9f54a527e3bf4d1f4a6527f93d329fb1cc4516ac" # May 8, 2020 +UDPA_SHA256 = "7edae88586a84360203e5a4c724080c740b7b6002d5d56f5e806f27c912895cd" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 994af34c7ac2..de0bc8ffa443 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -143,7 +144,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index cd05d6f4e46d..ec0a4b3d6a89 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -145,7 +146,10 @@ message Bootstrap { Admin admin = 12; // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15; + overload.v3.OverloadManager overload_manager = 15 [ + (udpa.annotations.security).configure_for_untrusted_downstream = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true + ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index 2b4ecb826d86..b2892906a484 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -14,6 +14,7 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -106,7 +107,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v3.Metadata metadata = 6; diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index 4438bd2974d4..b7f32a821443 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -14,6 +14,7 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -108,7 +109,8 @@ message Listener { // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v4alpha.Metadata metadata = 6; diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index af5e092643bd..000c4de01299 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -697,7 +697,8 @@ def checkSourceLine(line, file_path, reportError): def checkBuildLine(line, file_path, reportError): - if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")): + if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/") or + "python/runfiles" in line): reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line: reportError("unexpected direct external dependency on protobuf, use " diff --git a/tools/config_validation/BUILD b/tools/config_validation/BUILD index a6e8b6db72b2..99d15311d6f0 100644 --- a/tools/config_validation/BUILD +++ b/tools/config_validation/BUILD @@ -1,10 +1,15 @@ licenses(["notice"]) # Apache 2 -load("@config_validation//:requirements.bzl", "requirement") +load("@config_validation_pip3//:requirements.bzl", "requirement") py_binary( - name = "validate_yaml_fragment", - srcs = ["validate_yaml_fragment.py"], + name = "validate_fragment", + srcs = ["validate_fragment.py"], + data = ["//tools/type_whisperer:all_protos_with_ext_pb_text.pb_text"], visibility = ["//visibility:public"], - deps = [requirement("PyYAML")], + deps = [ + requirement("PyYAML"), + "@bazel_tools//tools/python/runfiles", + "@com_google_protobuf//:protobuf_python", + ], ) diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py new file mode 100644 index 000000000000..403b5540418f --- /dev/null +++ b/tools/config_validation/validate_fragment.py @@ -0,0 +1,56 @@ +# Validate a YAML fragment against an Envoy API proto3 type. +# +# Example usage: +# +# bazel run //tools/config_validation:validate_fragment -- \ +# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.v2.yaml + +import json +import pathlib +import sys + +import yaml + +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor_pool +from google.protobuf import json_format +from google.protobuf import message_factory +from google.protobuf import text_format + +from bazel_tools.tools.python.runfiles import runfiles + + +def ValidateFragment(type_name, fragment): + """Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type. + + Throws Protobuf errors on parsing exceptions, successful validations produce + no result. + + Args: + type_name: a string providing the type name, e.g. + envoy.config.bootstrap.v3.Bootstrap. + fragment: a dictionary representing the parsed JSON/YAML configuration + fragment. + """ + json_fragment = json.dumps(fragment) + + r = runfiles.Create() + all_protos_pb_text_path = r.Rlocation( + 'envoy/tools/type_whisperer/all_protos_with_ext_pb_text.pb_text') + file_desc_set = descriptor_pb2.FileDescriptorSet() + text_format.Parse(pathlib.Path(all_protos_pb_text_path).read_text(), + file_desc_set, + allow_unknown_extension=True) + + pool = descriptor_pool.DescriptorPool() + for f in file_desc_set.file: + pool.Add(f) + desc = pool.FindMessageTypeByName(type_name) + msg = message_factory.MessageFactory(pool=pool).GetPrototype(desc)() + json_format.Parse(json_fragment, msg, descriptor_pool=pool) + + +if __name__ == '__main__': + type_name, yaml_path = sys.argv[1:] + ValidateFragment(type_name, yaml.load(pathlib.Path(yaml_path).read_text(), + Loader=yaml.FullLoader)) diff --git a/tools/config_validation/validate_yaml_fragment.py b/tools/config_validation/validate_yaml_fragment.py deleted file mode 100644 index 0cfac273b237..000000000000 --- a/tools/config_validation/validate_yaml_fragment.py +++ /dev/null @@ -1,3 +0,0 @@ -import yaml - -print('YAML version is %s' % yaml.__version__) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 812ceac0c66b..45480b086306 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,5 +1,7 @@ licenses(["notice"]) # Apache 2 +load("@protodoc_pip3//:requirements.bzl", "requirement") + py_binary( name = "generate_empty", srcs = ["generate_empty.py"], @@ -10,11 +12,14 @@ py_binary( py_binary( name = "protodoc", srcs = ["protodoc.py"], + data = ["//docs:edge_defaults_manifest.yaml"], visibility = ["//visibility:public"], deps = [ "//tools/api_proto_plugin", + "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_protobuf//:protobuf_python", + requirement("PyYAML"), ], ) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index f360741db233..750ca3cd78d1 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -10,11 +10,24 @@ import pathlib import re import string +import sys + +from bazel_tools.tools.python.runfiles import runfiles +import yaml + +# We have to do some evil things to sys.path due to the way that Python module +# resolution works; we have both tools/ trees in bazel_tools and envoy. By +# default, Bazel leaves us with a sys.path in which the @bazel_tools repository +# takes precedence. Now that we're done with importing runfiles above, we can +# just remove it from the sys.path. +sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor +from tools.config_validation import validate_fragment +from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 @@ -388,7 +401,30 @@ def FormatAnchor(label): return '.. _%s:\n\n' % label -def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): +def FormatSecurityOptions(security_option, field, type_context, edge_default_yaml): + sections = [] + + if security_option.configure_for_untrusted_downstream: + sections.append( + Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) + if security_option.configure_for_untrusted_upstream: + sections.append( + Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) + + validate_fragment.ValidateFragment(field.type_name[1:], edge_default_yaml) + field_name = type_context.name.split('.')[-1] + example = {field_name: edge_default_yaml} + sections.append( + Indent(4, 'Example configuration for untrusted environments:\n\n') + + Indent(4, '.. code-block:: yaml\n\n') + + '\n'.join(IndentLines(6, + yaml.dump(example).split('\n')))) + + return '.. attention::\n' + '\n\n'.join(sections) + + +def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, + edge_defaults_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: @@ -441,13 +477,23 @@ def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): else: formatted_oneof_comment = '' + # If there is a udpa.annotations.security option, include it after the comment. + if field.options.HasExtension(security_pb2.security): + edge_default_yaml = edge_defaults_manifest.get(type_context.name) + if not edge_default_yaml: + raise ProtodocError('Missing edge default YAML example for %s' % type_context.name) + formatted_security_options = FormatSecurityOptions( + field.options.Extensions[security_pb2.security], field, type_context, edge_default_yaml) + else: + formatted_security_options = '' + comment = '(%s) ' % ', '.join([FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment - return anchor + field.name + '\n' + MapLines(functools.partial(Indent, 2), - comment + formatted_oneof_comment) + return anchor + field.name + '\n' + MapLines(functools.partial( + Indent, 2), comment + formatted_oneof_comment) + formatted_security_options -def FormatMessageAsDefinitionList(type_context, msg): +def FormatMessageAsDefinitionList(type_context, msg, edge_defaults_manifest): """Format a DescriptorProto as RST definition list. Args: @@ -472,7 +518,8 @@ def FormatMessageAsDefinitionList(type_context, msg): type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), - field) for index, field in enumerate(msg.field)) + '\n' + field, edge_defaults_manifest) + for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): @@ -525,6 +572,11 @@ class RstFormatVisitor(visitor.Visitor): See visitor.Visitor for visitor method docs comments. """ + def __init__(self): + r = runfiles.Create() + with open(r.Rlocation('envoy/docs/edge_defaults_manifest.yaml'), 'r') as f: + self.edge_defaults_manifest = yaml.load(f.read()) + def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) @@ -553,7 +605,8 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( - type_context, msg_proto) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) + type_context, msg_proto, + self.edge_defaults_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True diff --git a/tools/protodoc/requirements.txt b/tools/protodoc/requirements.txt new file mode 100644 index 000000000000..7a997b5e44bd --- /dev/null +++ b/tools/protodoc/requirements.txt @@ -0,0 +1 @@ +PyYAML==5.3.1 diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py index 5d2cd029526c..cac6dbfe58e5 100644 --- a/tools/protoxform/merge_active_shadow.py +++ b/tools/protoxform/merge_active_shadow.py @@ -20,6 +20,7 @@ from envoy.annotations import deprecation_pb2 as _ from envoy.annotations import resource_pb2 as _ from udpa.annotations import migrate_pb2 as _ +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 as _ from udpa.annotations import versioning_pb2 as _ diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index 06e2743c845f..e7481b0ccbbb 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -8,7 +8,7 @@ from tools.protoxform import options from tools.protoxform import utils -from envoy.annotations import resource_pb2 +from envoy_api_canonical.envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 from udpa.annotations import status_pb2 from google.api import annotations_pb2 @@ -251,6 +251,8 @@ def VersionUpgradeXform(n, envoy_internal_shadow, file_proto, params): v(N+1) FileDescriptorProto message. """ # Load type database. + if params['type_db_path']: + utils.LoadTypeDb(params['type_db_path']) typedb = utils.GetTypeDb() # If this isn't a proto in an upgraded package, return None. if file_proto.name not in typedb.next_version_protos or not typedb.next_version_protos[ diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 1b0e8f5f418c..57a305afa4f6 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -36,6 +36,7 @@ from envoy.annotations import deprecation_pb2 as _ from envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 9331877aa17f..4bc9b55a2365 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -16,9 +16,10 @@ # during FileDescriptorProto printing. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ -from envoy.annotations import deprecation_pb2 as _ -from envoy.annotations import resource_pb2 +from envoy_api_canonical.envoy.annotations import deprecation_pb2 as _ +from envoy_api_canonical.envoy.annotations import resource_pb2 from udpa.annotations import migrate_pb2 +from udpa.annotations import security_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index 3acb95c8adbe..191e2b90d1ba 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -70,6 +70,12 @@ file_descriptor_set_text( deps = ["@envoy_api_canonical//:all_protos"], ) +file_descriptor_set_text( + name = "all_protos_with_ext_pb_text", + with_external_deps = True, + deps = ["@envoy_api_canonical//:all_protos"], +) + proto_cc_source( name = "embedded_all_protos", constant = "AllProtosPbText", diff --git a/tools/type_whisperer/file_descriptor_set_text.bzl b/tools/type_whisperer/file_descriptor_set_text.bzl index 2ed8c7c315fc..5146e1f82331 100644 --- a/tools/type_whisperer/file_descriptor_set_text.bzl +++ b/tools/type_whisperer/file_descriptor_set_text.bzl @@ -9,7 +9,7 @@ def _file_descriptor_set_text(ctx): args = [ctx.outputs.pb_text.path] for dep in file_descriptor_sets.to_list(): ws_name = dep.owner.workspace_name - if (not ws_name) or ws_name in ctx.attr.proto_repositories: + if (not ws_name) or ws_name in ctx.attr.proto_repositories or ctx.attr.with_external_deps: args.append(dep.path) ctx.actions.run( @@ -30,6 +30,10 @@ file_descriptor_set_text = rule( default = ["envoy_api_canonical"], allow_empty = False, ), + "with_external_deps": attr.bool( + doc = "Include file descriptors for external dependencies.", + default = False, + ), "_file_descriptor_set_text_gen": attr.label( default = Label("//tools/type_whisperer:file_descriptor_set_text_gen"), executable = True, From c6627752ac7d62a014c75a4880da219ab0f8e22e Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Sun, 17 May 2020 18:34:02 -0400 Subject: [PATCH 190/909] stats: Pre-allocate codec stats for http1 and http2 (#11135) Commit Message: Lazy-init codec stats as part of ClusterInfo and pass them into the codecs, rather than recreating the stats on every connection. Additional Description: Risk Level: medium Testing: //test/... Docs Changes: n/a Release Notes: n/a Fixes: #11069, #8324 Signed-off-by: Joshua Marantz --- include/envoy/http/codec.h | 8 +++++ include/envoy/upstream/upstream.h | 10 ++++++ source/common/http/codec_client.cc | 5 +-- source/common/http/conn_manager_utility.cc | 10 ++++-- source/common/http/conn_manager_utility.h | 4 +++ source/common/http/http1/BUILD | 1 + source/common/http/http1/codec_impl.cc | 8 ++--- source/common/http/http1/codec_impl.h | 18 ++++++++--- source/common/http/http2/BUILD | 1 + source/common/http/http2/codec_impl.cc | 15 ++++----- source/common/http/http2/codec_impl.h | 18 ++++++++--- source/common/upstream/BUILD | 5 +++ .../upstream/health_discovery_service.cc | 1 + source/common/upstream/upstream_impl.cc | 10 ++++++ source/common/upstream/upstream_impl.h | 8 +++++ .../network/http_connection_manager/config.cc | 20 ++++++++---- .../network/http_connection_manager/config.h | 4 +++ source/server/admin/admin.cc | 3 +- source/server/admin/admin.h | 4 +++ test/common/http/codec_impl_fuzz_test.cc | 12 ++++--- test/common/http/http1/codec_impl_test.cc | 27 ++++++++++------ test/common/http/http2/codec_impl_test_util.h | 32 +++++++++++++++---- test/integration/autonomous_upstream.cc | 10 +++--- test/integration/autonomous_upstream.h | 5 +-- test/integration/fake_upstream.cc | 16 ++++++---- test/integration/fake_upstream.h | 18 +++++++++-- test/integration/integration_admin_test.cc | 26 ++------------- test/integration/protocol_integration_test.cc | 16 ++++++++++ test/mocks/upstream/BUILD | 3 ++ test/mocks/upstream/cluster_info.cc | 8 +++++ test/mocks/upstream/cluster_info.h | 8 +++++ 31 files changed, 240 insertions(+), 94 deletions(-) diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 769c042d6b9b..bb19ce83bcab 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -16,6 +16,14 @@ namespace Envoy { namespace Http { +namespace Http1 { +struct CodecStats; +} + +namespace Http2 { +struct CodecStats; +} + // Legacy default value of 60K is safely under both codec default limits. static const uint32_t DEFAULT_MAX_REQUEST_HEADERS_KB = 60; // Default maximum number of headers. diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 5e8aa41e2ff5..dbb89c88be67 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -911,6 +911,16 @@ class ClusterInfo { virtual const absl::optional& upstreamHttpProtocolOptions() const PURE; + /** + * @return the Http1 Codec Stats. + */ + virtual Http::Http1::CodecStats& http1CodecStats() const PURE; + + /** + * @return the Http2 Codec Stats. + */ + virtual Http::Http2::CodecStats& http2CodecStats() const PURE; + protected: /** * Invoked by extensionProtocolOptionsTyped. diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 60725a27b3fd..935fb6476e33 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -147,16 +147,17 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) : CodecClient(type, std::move(connection), host, dispatcher) { + switch (type) { case Type::HTTP1: { codec_ = std::make_unique( - *connection_, host->cluster().statsScope(), *this, host->cluster().http1Settings(), + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), host->cluster().maxResponseHeadersCount()); break; } case Type::HTTP2: { codec_ = std::make_unique( - *connection_, *this, host->cluster().statsScope(), host->cluster().http2Options(), + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); break; diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 2b1321afd5b8..68846e8a0040 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -43,18 +43,22 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( Network::Connection& connection, const Buffer::Instance& data, - ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, + ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + Http1::CodecStats::AtomicPtr& http1_codec_stats, + Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { + Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); return std::make_unique( - connection, callbacks, scope, http2_options, max_request_headers_kb, + connection, callbacks, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { + Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope); return std::make_unique( - connection, scope, callbacks, http1_settings, max_request_headers_kb, + connection, stats, callbacks, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } } diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index 377143bde4ee..fd5f2098be7c 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -7,6 +7,8 @@ #include "envoy/network/connection.h" #include "common/http/conn_manager_impl.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" namespace Envoy { namespace Http { @@ -36,6 +38,8 @@ class ConnectionManagerUtility { static ServerConnectionPtr autoCreateCodec(Network::Connection& connection, const Buffer::Instance& data, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, + Http1::CodecStats::AtomicPtr& http1_codec_stats, + Http2::CodecStats::AtomicPtr& http2_codec_stats, const Http1Settings& http1_settings, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index d9b00a317157..be5088d41966 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -30,6 +30,7 @@ envoy_cc_library( "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", "//source/common/common:statusor_lib", + "//source/common/common:thread_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", "//source/common/http:codes_lib", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 658596cc6057..0b7dab12d1fa 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -440,11 +440,11 @@ http_parser_settings ConnectionImpl::settings_{ nullptr // on_chunk_complete }; -ConnectionImpl::ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) - : connection_(connection), stats_{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(stats, "http1."))}, + : connection_(connection), stats_(stats), header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), strict_header_validation_( @@ -743,7 +743,7 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { } ServerConnectionImpl::ServerConnectionImpl( - Network::Connection& connection, Stats::Scope& stats, ServerConnectionCallbacks& callbacks, + Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, const Http1Settings& settings, uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -1000,7 +1000,7 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { } } -ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, Stats::Scope& stats, +ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 8b53d4c374f5..dd731982c6c3 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -11,11 +11,11 @@ #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/http/codec.h" #include "envoy/network/connection.h" -#include "envoy/stats/scope.h" #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" #include "common/common/statusor.h" +#include "common/common/thread.h" #include "common/http/codec_helper.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" @@ -39,6 +39,14 @@ namespace Http1 { * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h */ struct CodecStats { + using AtomicPtr = Thread::AtomicPtr; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http1."))}; + }); + } + ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT) }; @@ -229,14 +237,14 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."))}; + }); + } + ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) }; @@ -114,7 +122,7 @@ class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { */ class ConnectionImpl : public virtual Connection, protected Logger::Loggable { public: - ConnectionImpl(Network::Connection& connection, Stats::Scope& stats, + ConnectionImpl(Network::Connection& connection, CodecStats& stats, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, const uint32_t max_headers_kb, const uint32_t max_headers_count); @@ -408,7 +416,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_streams_; nghttp2_session* session_{}; - CodecStats stats_; + CodecStats& stats_; Network::Connection& connection_; const uint32_t max_headers_kb_; const uint32_t max_headers_count_; @@ -517,7 +525,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable downstream_protocol) const override; + Http::Http1::CodecStats& http1CodecStats() const override; + Http::Http2::CodecStats& http2CodecStats() const override; + private: struct ResourceManagers { ResourceManagers(const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, @@ -653,6 +659,8 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable cluster_type_; const std::unique_ptr factory_context_; std::vector filter_factories_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; /** diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 18cec14f48b4..85d78cb75623 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -474,14 +474,20 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: + case CodecType::HTTP1: { + Http::Http1::CodecStats& stats = + Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()); return std::make_unique( - connection, context_.scope(), callbacks, http1_settings_, maxRequestHeadersKb(), + connection, stats, callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); - case CodecType::HTTP2: + } + case CodecType::HTTP2: { + Http::Http2::CodecStats& stats = + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()); return std::make_unique( - connection, callbacks, context_.scope(), http2_options_, maxRequestHeadersKb(), + connection, callbacks, stats, http2_options_, maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. // TODO(danzh) Add support to get the factory name from config, possibly @@ -493,10 +499,10 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, .createQuicServerConnection(connection, callbacks)); case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, context_.scope(), http1_settings_, http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + connection, data, callbacks, context_.scope(), http1_codec_stats_, http2_codec_stats_, + http1_settings_, http2_options_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); } - NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 22088c3e472f..2aa7e2952ab3 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -18,6 +18,8 @@ #include "common/common/logger.h" #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" #include "common/json/json_loader.h" #include "common/router/rds_impl.h" #include "common/router/scoped_rds.h" @@ -187,6 +189,8 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::list access_logs_; const std::string stats_prefix_; Http::ConnectionManagerStats stats_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; Http::ConnectionManagerTracingStats tracing_stats_; const bool use_remote_address_{}; const std::unique_ptr internal_address_config_; diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 2d67d02a5dfb..fb2b52fc92e1 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -759,7 +759,8 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { return Http::ConnectionManagerUtility::autoCreateCodec( - connection, data, callbacks, server_.stats(), Http::Http1Settings(), + connection, data, callbacks, server_.stats(), http1_codec_stats_, http2_codec_stats_, + Http::Http1Settings(), ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()), maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index d4629211a244..c5e86534dc7c 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -29,6 +29,8 @@ #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/default_server_string.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" #include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" #include "common/network/connection_balancer_impl.h" @@ -429,6 +431,8 @@ class AdminImpl : public Admin, Http::SlowDateProviderImpl date_provider_; std::vector set_current_client_cert_details_; Http::Http1Settings http1_settings_; + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; ConfigTrackerImpl config_tracker_; const Network::FilterChainSharedPtr admin_filter_chain_; Network::SocketSharedPtr socket_; diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index aad44b0f5609..16e4eee2c960 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -465,6 +465,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; + Http1::CodecStats::AtomicPtr stats; if (http2) { client = std::make_unique( @@ -472,9 +473,9 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { - client = std::make_unique(client_connection, stats_store, - client_callbacks, client_http1settings, - max_response_headers_count); + client = std::make_unique( + client_connection, Http1::CodecStats::atomicGet(stats, stats_store), client_callbacks, + client_http1settings, max_response_headers_count); } if (http2) { @@ -486,8 +487,9 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( - server_connection, stats_store, server_callbacks, server_http1settings, - max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); + server_connection, Http1::CodecStats::atomicGet(stats, stats_store), server_callbacks, + server_http1settings, max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action); } ReorderBuffer client_write_buf{*server}; diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index ca755e94dc5e..f2dea33d03b0 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -55,11 +55,21 @@ Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t ma } } // namespace -class Http1ServerConnectionImplTest : public testing::Test { +class Http1CodecTestBase : public testing::Test { +protected: + Http::Http1::CodecStats& http1CodecStats() { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_); + } + + Stats::TestUtil::TestStore store_; + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; +}; + +class Http1ServerConnectionImplTest : public Http1CodecTestBase { public: void initialize() { codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); } @@ -107,7 +117,6 @@ class Http1ServerConnectionImplTest : public testing::Test { uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; - Stats::TestUtil::TestStore store_; }; void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url, @@ -121,7 +130,7 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } @@ -151,7 +160,7 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } @@ -172,7 +181,7 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } @@ -208,7 +217,7 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; codec_ = std::make_unique( - connection_, store_, callbacks_, codec_settings_, max_request_headers_kb_, + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); std::string exception_reason; NiceMock decoder; @@ -1766,10 +1775,10 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public testing::Test { +class Http1ClientConnectionImplTest : public Http1CodecTestBase { public: void initialize() { - codec_ = std::make_unique(connection_, store_, callbacks_, + codec_ = std::make_unique(connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); } diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 4eb42ac1c282..1eb8bd581a9e 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -9,6 +9,18 @@ namespace Envoy { namespace Http { namespace Http2 { +class TestCodecStatsProvider { +public: + TestCodecStatsProvider(Stats::Scope& scope) : scope_(scope) {} + + Http::Http2::CodecStats& http2CodecStats() { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, scope_); + } + + Stats::Scope& scope_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; +}; + class TestCodecSettingsProvider { public: // Returns the value of the SETTINGS parameter keyed by |identifier| sent by the remote endpoint. @@ -45,7 +57,9 @@ class TestCodecSettingsProvider { std::unordered_map settings_; }; -class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSettingsProvider { +class TestServerConnectionImpl : public TestCodecStatsProvider, + public ServerConnectionImpl, + public TestCodecSettingsProvider { public: TestServerConnectionImpl( Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, @@ -53,8 +67,10 @@ class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSe uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : ServerConnectionImpl(connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action) {} + : TestCodecStatsProvider(scope), + ServerConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action) {} nghttp2_session* session() { return session_; } using ServerConnectionImpl::getStream; @@ -63,15 +79,19 @@ class TestServerConnectionImpl : public ServerConnectionImpl, public TestCodecSe void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -class TestClientConnectionImpl : public ClientConnectionImpl, public TestCodecSettingsProvider { +class TestClientConnectionImpl : public TestCodecStatsProvider, + public ClientConnectionImpl, + public TestCodecSettingsProvider { public: TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, Nghttp2SessionFactory& http2_session_factory) - : ClientConnectionImpl(connection, callbacks, scope, http2_options, max_request_headers_kb, - max_request_headers_count, http2_session_factory) {} + : TestCodecStatsProvider(scope), + ClientConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} nghttp2_session* session() { return session_; } diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 70c14b75de7d..649dbe243029 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -62,10 +62,10 @@ void AutonomousStream::sendResponse() { encodeData(response_body_length, true); } -AutonomousHttpConnection::AutonomousHttpConnection(SharedConnectionWrapper& shared_connection, - Stats::Store& store, Type type, - AutonomousUpstream& upstream) - : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem(), +AutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, + SharedConnectionWrapper& shared_connection, + Type type, AutonomousUpstream& upstream) + : FakeHttpConnection(autonomous_upstream, shared_connection, type, upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW), upstream_(upstream) {} @@ -88,7 +88,7 @@ bool AutonomousUpstream::createNetworkFilterChain(Network::Connection& connectio const std::vector&) { shared_connections_.emplace_back(new SharedConnectionWrapper(connection, true)); AutonomousHttpConnectionPtr http_connection( - new AutonomousHttpConnection(*shared_connections_.back(), stats_store_, http_type_, *this)); + new AutonomousHttpConnection(*this, *shared_connections_.back(), http_type_, *this)); testing::AssertionResult result = http_connection->initialize(); RELEASE_ASSERT(result, result.message()); http_connections_.push_back(std::move(http_connection)); diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index 6c51abb00217..5abb7bc186be 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -35,8 +35,9 @@ class AutonomousStream : public FakeStream { // An upstream which creates AutonomousStreams for new incoming streams. class AutonomousHttpConnection : public FakeHttpConnection { public: - AutonomousHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, - Type type, AutonomousUpstream& upstream); + AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, + SharedConnectionWrapper& shared_connection, Type type, + AutonomousUpstream& upstream); Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override; diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 8b232ed351de..67217d6cb32f 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -246,7 +246,7 @@ class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { }; FakeHttpConnection::FakeHttpConnection( - SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, + FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -256,8 +256,9 @@ FakeHttpConnection::FakeHttpConnection( Http::Http1Settings http1_settings; // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; + Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); codec_ = std::make_unique( - shared_connection_.connection(), store, *this, http1_settings, max_request_headers_kb, + shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = @@ -265,8 +266,9 @@ FakeHttpConnection::FakeHttpConnection( envoy::config::core::v3::Http2ProtocolOptions()); http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); + Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); codec_ = std::make_unique( - shared_connection_.connection(), *this, store, http2_options, max_request_headers_kb, + shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); ASSERT(type == Type::HTTP2); } @@ -522,7 +524,7 @@ AssertionResult FakeUpstream::waitForHttpConnection( return AssertionFailure() << "Got a new connection event, but didn't create a connection."; } connection = std::make_unique( - consumeConnection(), stats_store_, http_type_, time_system, max_request_headers_kb, + *this, consumeConnection(), http_type_, time_system, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } VERIFY_ASSERTION(connection->initialize()); @@ -554,9 +556,9 @@ FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, client_dispatcher.run(Event::Dispatcher::RunType::NonBlock); } else { connection = std::make_unique( - upstream.consumeConnection(), upstream.stats_store_, upstream.http_type_, - upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + upstream, upstream.consumeConnection(), upstream.http_type_, upstream.timeSystem(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, + envoy::config::core::v3::HttpProtocolOptions::ALLOW); lock.release(); VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 8a2e1ee20335..ec2bb436b291 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -26,6 +26,8 @@ #include "common/grpc/codec.h" #include "common/grpc/common.h" #include "common/http/exception.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" @@ -38,7 +40,9 @@ #include "test/test_common/utility.h" namespace Envoy { + class FakeHttpConnection; +class FakeUpstream; /** * Provides a fake HTTP stream for integration testing. @@ -420,8 +424,8 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo public: enum class Type { HTTP1, HTTP2 }; - FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, - Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, + FakeHttpConnection(FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, + Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action); @@ -617,6 +621,14 @@ class FakeUpstream : Logger::Loggable, // Stops the dispatcher loop and joins the listening thread. void cleanUp(); + Http::Http1::CodecStats& http1CodecStats() { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, stats_store_); + } + + Http::Http2::CodecStats& http2CodecStats() { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, stats_store_); + } + protected: Stats::IsolatedStoreImpl stats_store_; const FakeHttpConnection::Type http_type_; @@ -723,6 +735,8 @@ class FakeUpstream : Logger::Loggable, FakeListener listener_; const Network::FilterChainSharedPtr filter_chain_; std::list received_datagrams_ ABSL_GUARDED_BY(lock_); + Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; using FakeUpstreamPtr = std::unique_ptr; diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index a5ecdcf30b56..39bfb6c5b1a7 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -156,7 +156,7 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_EQ("200", request("admin", "GET", "/stats/recentlookups", response)); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_TRUE(absl::StartsWith(response->body(), " Count Lookup\n")) << response->body(); - EXPECT_LT(30, response->body().size()); + EXPECT_LT(28, response->body().size()); // Now disable recent-lookups tracking and check that we get the error again. EXPECT_EQ("200", request("admin", "POST", "/stats/recentlookups/disable", response)); @@ -262,37 +262,17 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_EQ("200", request("admin", "GET", "/stats/recentlookups", response)); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); - // TODO(#8324): "http1.metadata_not_supported_error" should not still be in - // the 'recent lookups' output after reset_counters. switch (GetParam().downstream_protocol) { case Http::CodecClient::Type::HTTP1: EXPECT_EQ(" Count Lookup\n" - " 1 http1.dropped_headers_with_underscores\n" - " 1 http1.metadata_not_supported_error\n" - " 1 http1.requests_rejected_with_underscores_in_headers\n" - " 1 http1.response_flood\n" "\n" - "total: 4\n", + "total: 0\n", response->body()); break; case Http::CodecClient::Type::HTTP2: EXPECT_EQ(" Count Lookup\n" - " 1 http2.dropped_headers_with_underscores\n" - " 1 http2.header_overflow\n" - " 1 http2.headers_cb_no_stream\n" - " 1 http2.inbound_empty_frames_flood\n" - " 1 http2.inbound_priority_frames_flood\n" - " 1 http2.inbound_window_update_frames_flood\n" - " 1 http2.outbound_control_flood\n" - " 1 http2.outbound_flood\n" - " 1 http2.requests_rejected_with_underscores_in_headers\n" - " 1 http2.rx_messaging_error\n" - " 1 http2.rx_reset\n" - " 1 http2.too_many_header_frames\n" - " 1 http2.trailers\n" - " 1 http2.tx_reset\n" "\n" - "total: 14\n", + "total: 0\n", response->body()); break; case Http::CodecClient::Type::HTTP3: diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 731da8c44b80..cc914f9d8bc3 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -337,6 +337,17 @@ TEST_P(ProtocolIntegrationTest, Retry) { EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(512U, response->body().size()); + Stats::Store& stats = test_server_->server().stats(); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { + Stats::CounterSharedPtr counter = + TestUtility::findCounter(stats, "cluster.cluster_0.http2.tx_reset"); + ASSERT_NE(nullptr, counter); + EXPECT_EQ(1L, counter->value()); + } else { + Stats::CounterSharedPtr counter = + TestUtility::findCounter(stats, "cluster.cluster_0.http1.dropped_headers_with_underscores"); + EXPECT_NE(nullptr, counter); + } } TEST_P(ProtocolIntegrationTest, RetryStreaming) { @@ -882,6 +893,11 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); + Stats::Store& stats = test_server_->server().stats(); + std::string stat_name = (downstreamProtocol() == Http::CodecClient::Type::HTTP1) + ? "http1.dropped_headers_with_underscores" + : "http2.dropped_headers_with_underscores"; + EXPECT_EQ(1L, TestUtility::findCounter(stats, stat_name)->value()); } // Verify that by default headers with underscores in their names remain in both requests and diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index 7319dbfb9ad5..8c4ca0d28c68 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -16,8 +16,11 @@ envoy_cc_mock( ":transport_socket_match_mocks", "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/common:thread_lib", "//source/common/config:metadata_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 215368b58c47..dd428aa124c2 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -101,5 +101,13 @@ MockClusterInfo::MockClusterInfo() MockClusterInfo::~MockClusterInfo() = default; +Http::Http1::CodecStats& MockClusterInfo::http1CodecStats() const { + return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, statsScope()); +} + +Http::Http2::CodecStats& MockClusterInfo::http2CodecStats() const { + return Http::Http2::CodecStats::atomicGet(http2_codec_stats_, statsScope()); +} + } // namespace Upstream } // namespace Envoy diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 9ef5fcc14618..2e99eea091ef 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -13,6 +13,9 @@ #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" +#include "common/common/thread.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" #include "common/upstream/upstream_impl.h" #include "test/mocks/runtime/mocks.h" @@ -129,6 +132,9 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(void, createNetworkFilterChain, (Network::Connection&), (const)); MOCK_METHOD(Http::Protocol, upstreamHttpProtocol, (absl::optional), (const)); + Http::Http1::CodecStats& http1CodecStats() const override; + Http::Http2::CodecStats& http2CodecStats() const override; + std::string name_{"fake_cluster"}; absl::optional eds_service_name_; Http::Http1Settings http1_settings_; @@ -162,6 +168,8 @@ class MockClusterInfo : public ClusterInfo { envoy::config::core::v3::Metadata metadata_; std::unique_ptr typed_metadata_; absl::optional max_stream_duration_; + mutable Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; + mutable Http::Http2::CodecStats::AtomicPtr http2_codec_stats_; }; class MockIdleTimeEnabledClusterInfo : public MockClusterInfo { From cf2d4df8981bcff50d280a040cfa38a60b48f27f Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Sun, 17 May 2020 17:27:16 -0700 Subject: [PATCH 191/909] build: fix cares build (#11225) #11149 broke CentOS/RHEL build, force lib install dir to lib rather than lib64 in those distro, so rules_foreign_cc can find built static libraries. Signed-off-by: Lizan Zhou --- bazel/foreign_cc/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 46b111981c55..9d05d6d0dd68 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -82,6 +82,7 @@ envoy_cmake_external( "CARES_SHARED": "no", "CARES_STATIC": "on", "CMAKE_CXX_COMPILER_FORCED": "on", + "CMAKE_INSTALL_LIBDIR": "lib", }, defines = ["CARES_STATICLIB"], lib_source = "@com_github_c_ares_c_ares//:all", From ec89d6e40a5faa9a713c833b92db84fd97942199 Mon Sep 17 00:00:00 2001 From: Daniel Hochman Date: Mon, 18 May 2020 09:37:43 -0500 Subject: [PATCH 192/909] ci: save api revision in go-control-plane (#11220) When committing generated code from protos into envoyproxy/go-control-plane, also save a file 'COMMIT' with the commit SHA in the root of the generated code directory. Additional Description: I have a use case where I want to know the commit used for go-control-plane generated code and I don't also have git history available. This will allow me to read it from a file instead of relying on git history. Risk Level: Low Testing: N/A Signed-off-by: Daniel Hochman --- tools/api/generate_go_protobuf.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index ba6ca16ead1e..3f60158257e5 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -83,6 +83,13 @@ def updatedSinceSHA(repo, last_sha): return git(None, 'rev-list', '%s..HEAD' % last_sha, 'api/envoy').split() +def writeRevisionInfo(repo, sha): + # Put a file in the generated code root containing the latest mirrored SHA + dst = os.path.join(repo, 'envoy', 'COMMIT') + with open(dst, 'w') as fh: + fh.write(sha) + + def syncGoProtobufs(output, repo): # Sync generated content against repo and return true if there is a commit necessary dst = os.path.join(repo, 'envoy') @@ -109,7 +116,9 @@ def publishGoProtobufs(repo, sha): cloneGoProtobufs(repo) last_sha = findLastSyncSHA(repo) changes = updatedSinceSHA(repo, last_sha) + new_sha = changes[0] if changes: print('Changes detected: %s' % changes) syncGoProtobufs(output, repo) - publishGoProtobufs(repo, changes[0]) + writeRevisionInfo(repo, new_sha) + publishGoProtobufs(repo, new_sha) From b867a4dfae32e600ea0a4087dc7925ded5e2ab2a Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 18 May 2020 07:40:13 -0700 Subject: [PATCH 193/909] bazelci: always exclude nocoverage tag in coverage config (#11226) #11218 broked bazel ci https://buildkite.com/bazel/envoy/builds/1145 Signed-off-by: Lizan Zhou --- .bazelrc | 1 + 1 file changed, 1 insertion(+) diff --git a/.bazelrc b/.bazelrc index 264eb09dc3dc..a77ca3f3d92a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -126,6 +126,7 @@ build:coverage --strategy=TestRunner=sandboxed,local build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage +build:coverage --test_tag_filters=-nocoverage build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="--log-path /dev/null" coverage:test-coverage --test_arg="-l trace" From fff1da1abffb31d20d3ea7c5e23d57139bb615cf Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Mon, 18 May 2020 09:27:29 -0700 Subject: [PATCH 194/909] tls: update BoringSSL to 107c03cf (4103). (#11232) Signed-off-by: Piotr Sikora --- bazel/repository_locations.bzl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 58e6c1027175..5a4f48a8b4db 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -74,16 +74,16 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), boringssl = dict( - sha256 = "a3d4de4f03cb321ef943678d72a045c9a19d26b23d6f4e313f97600c65201a27", - strip_prefix = "boringssl-1c2769383f027befac5b75b6cedd25daf3bf4dcf", + sha256 = "8ae14b52b7889cf92f3b107610b12afb5011506c77f90c7b3d4a36ed7283905a", + strip_prefix = "boringssl-107c03cf6d364939469194396bf7a6b2572d0f9c", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # - # chromium-81.0.4044.69 - # 2020-01-22 - urls = ["https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz"], + # chromium-83.0.4103.62 + # 2020-03-16 + urls = ["https://github.com/google/boringssl/archive/107c03cf6d364939469194396bf7a6b2572d0f9c.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From d494467ccad43be35f9d5b75b276449483abfccd Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Mon, 18 May 2020 09:29:37 -0700 Subject: [PATCH 195/909] wasm: update V8 to v8.3.110.9. (#11233) Signed-off-by: Piotr Sikora --- bazel/external/wee8.patch | 16 ++++++++-------- bazel/repository_locations.bzl | 4 ++-- .../common/wasm/test_data/test_rust.wasm | Bin 5197 -> 4592 bytes 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bazel/external/wee8.patch b/bazel/external/wee8.patch index 3f95bc83926a..ad1c20b6c00b 100644 --- a/bazel/external/wee8.patch +++ b/bazel/external/wee8.patch @@ -1,9 +1,9 @@ # 1. Fix linking with unbundled toolchain on macOS. -# 2. Increase VSZ limit to 4TiB (allows us to start up to 370 VMs). +# 2. Increase VSZ limit to 4TiB (allows us to start up to 409 VMs). # 3. Fix MSAN linking. --- wee8/build/toolchain/gcc_toolchain.gni +++ wee8/build/toolchain/gcc_toolchain.gni -@@ -355,6 +355,8 @@ template("gcc_toolchain") { +@@ -329,6 +329,8 @@ template("gcc_toolchain") { # AIX does not support either -D (deterministic output) or response # files. command = "$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}" @@ -12,7 +12,7 @@ } else { rspfile = "{{output}}.rsp" rspfile_content = "{{inputs}}" -@@ -546,7 +548,7 @@ template("gcc_toolchain") { +@@ -507,7 +509,7 @@ template("gcc_toolchain") { start_group_flag = "" end_group_flag = "" @@ -21,9 +21,9 @@ # the "--start-group .. --end-group" feature isn't available on the aix ld. start_group_flag = "-Wl,--start-group" end_group_flag = "-Wl,--end-group " ---- wee8/src/wasm/wasm-memory.cc -+++ wee8/src/wasm/wasm-memory.cc -@@ -142,7 +142,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, +--- wee8/src/objects/backing-store.cc ++++ wee8/src/objects/backing-store.cc +@@ -34,7 +34,7 @@ constexpr bool kUseGuardRegions = false; // address space limits needs to be smaller. constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB #elif V8_TARGET_ARCH_64_BIT @@ -34,7 +34,7 @@ #endif --- wee8/build/config/sanitizers/sanitizers.gni +++ wee8/build/config/sanitizers/sanitizers.gni -@@ -145,7 +145,7 @@ if (current_toolchain != default_toolchain) { +@@ -147,7 +147,7 @@ if (!is_a_target_toolchain) { # standard system libraries. We have instrumented system libraries for msan, # which requires them to prevent false positives. # TODO(thakis): Maybe remove this variable. @@ -43,7 +43,7 @@ # Whether we are doing a fuzzer build. Normally this should be checked instead # of checking "use_libfuzzer || use_afl" because often developers forget to -@@ -185,8 +185,7 @@ assert(!using_sanitizer || is_clang, +@@ -195,8 +195,7 @@ assert(!using_sanitizer || is_clang, assert(!is_cfi || is_clang, "is_cfi requires setting is_clang = true in 'gn args'") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 5a4f48a8b4db..773e9154a85a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -396,8 +396,8 @@ DEPENDENCY_REPOSITORIES = dict( com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. - sha256 = "03ff00e41cf259db473dfade9548493e4a2372c0b701a66cd7ff76215bd55a64", - urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.1.307.28.tar.gz"], + sha256 = "cc6f5357cd10922bfcf667bd882624ad313e21b009b919ce00f322f390012476", + urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-8.3.110.9.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/test/extensions/common/wasm/test_data/test_rust.wasm b/test/extensions/common/wasm/test_data/test_rust.wasm index 2396b5badfaf827c2118d0f972ea421f4f8992db..68c30b0da4fa80b0e2d84fc61bd37af51aa2395c 100755 GIT binary patch delta 931 zcmb7?O=uHA7>0LtYqH6v`K!j5t{4!L*feqT)BIpti<;n}Ifzm!qDgIg2u2&KEfGBg zL_{nSI`$~!Qalu_Lcv2V6pCOEO2LZr(83rD%uDIu2T3uKBSaAFfGLL=SnEU10(H@4(?^(}m~VH#7Z=3j zyqJ3WwxVCb!gsth(5BKsK@Gd z5^pojRHs=s_iU%)z0Vg1>RltG(b;~#C!du-Ub$A1AQQ?6qG delta 1507 zcmeyMd{$$^UB*2d@0m05@-jGGKXdxmoE?XIC)Q6^WID-MJ^3}$YIh?&0|kvFi{#Xl zWCH^e1tS9^16>1ST?0!^hM9`$1x2aJ`MCv|IjJe}WfnlBXQ*dvV6JCm5ucNpS6UHY zVPO_;X7Y%QfIVf2CCv8@Y?&K!>pwdK!^Z~^R|FUs7?^yquH!*_9ZDCvOzA^w;FjW)_zTV1y_=3DvO$O2hc*G|a>YP<22D zg8>hc4IGn=IXZ#1?B(zceapCq*>yX!>e}hP#VTZr`e$L3{ZVsP`(h{ z#wT3UfHqchXGi~Le8Xt?oYA&{iCY^CrTL(`I-$Bspfrq+PP0Pg7eHy4f4Shc>GAPQ zw&QUH+StP5Yo0D6C}V7_?!#?g3)LY3r9tTh1Ymp=da^yQ{NxMVJd+!^SiosOlvhF* zW`Y7(A3`ULk3yrFz?BgGEa?0}Ug?P*-A`i=PnZBQ1LXGwAO;YaLMR3WC=KJI)3C(k z096MKKL#df0B}s+!E5EM&+!~8UkarY(D*QM7!3>92BL+6Rpf6Nl00Ho#oYf)aJ#_|$;5$nv}Sh6{*tKY^N=38h2O_%Lx8jcx}s zR6VjC9Fte_n*;55$Zu_!$f3&&i&hz^S`jD>8pcPb zVOeGYR2?i4bHYPnjX(s@g{*>ZLAe#q#pXSX+|kFOiecp!HOe*K$+HDTfffPFHa1Av F1_0&V(@y{Z From 5347c66ad826357db1e66786b43b71565e33999d Mon Sep 17 00:00:00 2001 From: Josh Chorlton Date: Mon, 18 May 2020 09:30:14 -0700 Subject: [PATCH 196/909] docs: host_rewrite -> host_rewrite_literal (#11229) Signed-off-by: Josh Chorlton --- docs/root/start/start.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index b07ccd95c8f9..586896366be5 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -80,7 +80,7 @@ The specification of the :ref:`listeners Date: Mon, 18 May 2020 17:16:22 -0500 Subject: [PATCH 197/909] fix go mirror when no changes (#11249) Signed-off-by: Daniel Hochman --- tools/api/generate_go_protobuf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index 3f60158257e5..746008c82c85 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -116,9 +116,9 @@ def publishGoProtobufs(repo, sha): cloneGoProtobufs(repo) last_sha = findLastSyncSHA(repo) changes = updatedSinceSHA(repo, last_sha) - new_sha = changes[0] if changes: print('Changes detected: %s' % changes) + new_sha = changes[0] syncGoProtobufs(output, repo) writeRevisionInfo(repo, new_sha) publishGoProtobufs(repo, new_sha) From 301003162e0f607671b0d1772ab8dbb628df9c83 Mon Sep 17 00:00:00 2001 From: antonio Date: Tue, 19 May 2020 09:23:29 -0400 Subject: [PATCH 198/909] test: Fix missing instantiation of parameterized tests. (#11247) Commit Message: test: Fix missing instantiation of parameterized tests. Additional Description: gtest silently skips parameterized that have no INSTANTIATE_TEST_SUITE_P associated with them as described at google/googletest#2683 . Thanks @yurykats for pointing me at these failing tests. Also, filed #11246 for redis tests that are not running for this same reason. Risk Level: n/a Testing: test-only changes Docs Changes: n/a Release Notes: n/a Signed-off-by: Antonio Vicente --- test/exe/main_common_test.cc | 21 +++++++------ .../filters/http/ext_authz/ext_authz_test.cc | 30 +++++++++---------- test/extensions/filters/http/grpc_web/BUILD | 1 + .../grpc_web_filter_integration_test.cc | 3 ++ 4 files changed, 31 insertions(+), 24 deletions(-) diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 78fe3aeebfcf..2632e657280a 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -89,6 +89,9 @@ class MainCommonTest : public testing::TestWithParam argv_; }; +INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); // Exercise the codepath to instantiate MainCommon and destruct it, with hot restart. TEST_P(MainCommonTest, ConstructDestructHotRestartEnabled) { @@ -119,6 +122,9 @@ TEST_P(MainCommonTest, ConstructDestructHotRestartDisabledNoInit) { // of 0x10000000000 (thread T0) class MainCommonDeathTest : public MainCommonTest {}; +INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonDeathTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { #if defined(__has_feature) && (__has_feature(thread_sanitizer) || __has_feature(address_sanitizer)) @@ -140,17 +146,15 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { for (uint64_t size = initial; size >= initial; // Disallow wraparound to avoid infinite loops on failure. size *= 1000) { - new int[size]; + int* p = new int[size]; + // Use the pointer to prevent clang from optimizing the allocation away in opt mode. + ENVOY_LOG_MISC(debug, "p={}", reinterpret_cast(p)); } }(), ".*panic: out of memory.*"); #endif } -INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - class AdminRequestTest : public MainCommonTest { protected: AdminRequestTest() { addArg("--disable-hot-restart"); } @@ -242,6 +246,9 @@ class AdminRequestTest : public MainCommonTest { bool pause_before_run_{false}; bool pause_after_run_{false}; }; +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminRequestTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(AdminRequestTest, AdminRequestGetStatsAndQuit) { startEnvoy(); @@ -405,8 +412,4 @@ TEST_P(MainCommonTest, ConstructDestructLogger) { Logger::Registry::getSink()->log(log_msg); } -INSTANTIATE_TEST_SUITE_P(IpVersions, AdminRequestTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - } // namespace Envoy diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 957ed37467e7..c72148f0f2cf 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -915,7 +915,7 @@ TEST_F(HttpFilterTest, FilterEnabled) { // ------------------- // Test that context extensions make it into the check request. -TEST_F(HttpFilterTestParam, ContextExtensions) { +TEST_P(HttpFilterTestParam, ContextExtensions) { // Place something in the context extensions on the virtualhost. envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsvhost; (*settingsvhost.mutable_check_settings()->mutable_context_extensions())["key_vhost"] = @@ -957,7 +957,7 @@ TEST_F(HttpFilterTestParam, ContextExtensions) { } // Test that filter can be disabled with route config. -TEST_F(HttpFilterTestParam, DisabledOnRoute) { +TEST_P(HttpFilterTestParam, DisabledOnRoute) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); @@ -990,7 +990,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRoute) { } // Test that filter can be disabled with route config. -TEST_F(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { +TEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); @@ -1036,7 +1036,7 @@ TEST_F(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { } // Test that the request continues when the filter_callbacks has no route. -TEST_F(HttpFilterTestParam, NoRoute) { +TEST_P(HttpFilterTestParam, NoRoute) { EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -1044,7 +1044,7 @@ TEST_F(HttpFilterTestParam, NoRoute) { } // Test that the request is stopped till there is an OK response back after which it continues on. -TEST_F(HttpFilterTestParam, OkResponse) { +TEST_P(HttpFilterTestParam, OkResponse) { InSequence s; prepareCheck(); @@ -1075,7 +1075,7 @@ TEST_F(HttpFilterTestParam, OkResponse) { // Test that an synchronous OK response from the authorization service, on the call stack, results // in request continuing on. -TEST_F(HttpFilterTestParam, ImmediateOkResponse) { +TEST_P(HttpFilterTestParam, ImmediateOkResponse) { InSequence s; prepareCheck(); @@ -1099,7 +1099,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponse) { // Test that an synchronous denied response from the authorization service passing additional HTTP // attributes to the downstream. -TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { +TEST_P(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { InSequence s; prepareCheck(); @@ -1130,7 +1130,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { // Test that an synchronous ok response from the authorization service passing additional HTTP // attributes to the upstream. -TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { +TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { InSequence s; // `bar` will be appended to this header. @@ -1170,7 +1170,7 @@ TEST_F(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { // Test that an synchronous denied response from the authorization service, on the call stack, // results in request not continuing. -TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { +TEST_P(HttpFilterTestParam, ImmediateDeniedResponse) { InSequence s; prepareCheck(); @@ -1194,7 +1194,7 @@ TEST_F(HttpFilterTestParam, ImmediateDeniedResponse) { } // Test that a denied response results in the connection closing with a 401 response to the client. -TEST_F(HttpFilterTestParam, DeniedResponseWith401) { +TEST_P(HttpFilterTestParam, DeniedResponseWith401) { InSequence s; prepareCheck(); @@ -1226,7 +1226,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith401) { } // Test that a denied response results in the connection closing with a 403 response to the client. -TEST_F(HttpFilterTestParam, DeniedResponseWith403) { +TEST_P(HttpFilterTestParam, DeniedResponseWith403) { InSequence s; prepareCheck(); @@ -1261,7 +1261,7 @@ TEST_F(HttpFilterTestParam, DeniedResponseWith403) { } // Verify that authz response memory is not used after free. -TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { +TEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { InSequence s; Filters::Common::ExtAuthz::Response response{}; @@ -1315,7 +1315,7 @@ TEST_F(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { // Verify that authz denied response headers overrides the existing encoding headers, // and that it adds repeated header names using the standard method of comma concatenation of values // for predefined inline headers while repeating other headers -TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { +TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { InSequence s; Filters::Common::ExtAuthz::Response response{}; @@ -1385,7 +1385,7 @@ TEST_F(HttpFilterTestParam, OverrideEncodingHeaders) { // Test that when a connection awaiting a authorization response is canceled then the // authorization call is closed. -TEST_F(HttpFilterTestParam, ResetDuringCall) { +TEST_P(HttpFilterTestParam, ResetDuringCall) { InSequence s; prepareCheck(); @@ -1403,7 +1403,7 @@ TEST_F(HttpFilterTestParam, ResetDuringCall) { // Regression test for https://github.com/envoyproxy/envoy/pull/8436. // Test that ext_authz filter is not in noop mode when cluster is not specified per route // (this could be the case when route is configured with redirect or direct response action). -TEST_F(HttpFilterTestParam, NoCluster) { +TEST_P(HttpFilterTestParam, NoCluster) { ON_CALL(filter_callbacks_, clusterInfo()).WillByDefault(Return(nullptr)); diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index 5c35c5c6c357..ef16548a00dc 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -42,6 +42,7 @@ envoy_extension_cc_test( deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", + "//source/extensions/filters/http/grpc_web:config", "//source/extensions/filters/http/grpc_web:grpc_web_filter_lib", "//test/integration:http_integration_lib", "//test/mocks/upstream:upstream_mocks", diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index 773a5f035433..e34e8ac047e9 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -20,6 +20,9 @@ class GrpcWebFilterIntegrationTest : public ::testing::TestWithParam Date: Wed, 20 May 2020 01:27:41 +1200 Subject: [PATCH 199/909] deps: update datadog tracer to v1.1.5 (#11253) Commit Message: deps: update datadog tracer to v1.1.5 Additional Description: The updated tracer contains a different sampling implementation, and some additional settings can be overridden by environment variables. Also included fixes for errors reported by envoy's CI in #11094 Risk Level: Low Testing: end-to-end tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Caleb Gilmour --- bazel/repository_locations.bzl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 773e9154a85a..6af4a65a0d1a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -223,9 +223,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( - sha256 = "6dc1088ab7f788b6c849fbaa6300517c8fdf88991a70b778be79c284c36857bf", - strip_prefix = "dd-opentracing-cpp-1.1.3", - urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.3.tar.gz"], + sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924", + strip_prefix = "dd-opentracing-cpp-1.1.5", + urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v1.1.5.tar.gz"], use_category = ["observability"], cpe = "N/A", ), From b86e0a729720b9690a53ef6bfda303010bc0c589 Mon Sep 17 00:00:00 2001 From: Timofei Bredov Date: Tue, 19 May 2020 19:55:15 +0300 Subject: [PATCH 200/909] docs: update upstream network filters description (#11231) Signed-off-by: Tsimafei Bredau --- .../intro/arch_overview/upstream/upstream_filters.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/upstream_filters.rst b/docs/root/intro/arch_overview/upstream/upstream_filters.rst index 1fe902dcf919..4a2b4da0d3b3 100644 --- a/docs/root/intro/arch_overview/upstream/upstream_filters.rst +++ b/docs/root/intro/arch_overview/upstream/upstream_filters.rst @@ -4,8 +4,10 @@ Upstream network filters ======================== Upstream clusters provide an ability to inject network level (L3/L4) -:ref:`filters `. The filters apply to the -connection to the upstream hosts, using the same API presented by listeners for -the downstream connections. The write callbacks are invoked for any chunk of -data sent to the upstream host, and the read callbacks are invoked for data +filters. It should be noted that a network filter needs to +be registered in code as an upstream filter before usage. Currently, +there are no upstream filters available in Envoy out of the box. +The filters apply to the connection to the upstream hosts, using the same API presented by listeners for +the downstream connections. The write-callbacks are invoked for any chunk of +data sent to the upstream host, and the read-callbacks are invoked for data received from the upstream host. From 3c6a95ec28515372aa1638e40159c0880ee53447 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 19 May 2020 14:58:57 -0400 Subject: [PATCH 201/909] http: fixing CONNECT to not advertise chunk encoding. (#11245) Risk Level: low (connect only) Testing: UT, IT Docs Changes: n/a Release Notes: n/a Fixes #11227 Part of #1451 Signed-off-by: Alyssa Wilk --- source/common/http/BUILD | 1 + source/common/http/conn_manager_impl.cc | 5 ++-- source/common/http/header_utility.cc | 8 ++++++ source/common/http/header_utility.h | 6 +++++ source/common/http/http1/codec_impl.cc | 17 +++++++++++-- test/common/http/header_utility_test.cc | 12 +++++++++ test/integration/integration_test.cc | 33 +++++++++---------------- 7 files changed, 56 insertions(+), 26 deletions(-) diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 5e78b0ed8def..c5b14f4a4b91 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -371,6 +371,7 @@ envoy_cc_library( ], deps = [ ":header_map_lib", + ":utility_lib", "//include/envoy/common:regex_interface", "//include/envoy/http:header_map_interface", "//include/envoy/json:json_object_interface", diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index c8eba98605e7..ac3bd1157891 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1714,9 +1714,10 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa if (connection_manager_.drain_state_ != DrainState::NotDraining && connection_manager_.codec_->protocol() < Protocol::Http2) { // If the connection manager is draining send "Connection: Close" on HTTP/1.1 connections. - // Do not do this for H2 (which drains via GOAWAY) or Upgrade (as the upgrade + // Do not do this for H2 (which drains via GOAWAY) or Upgrade or CONNECT (as the // payload is no longer HTTP/1.1) - if (!Utility::isUpgrade(headers)) { + if (!Utility::isUpgrade(headers) && + !HeaderUtility::isConnectResponse(request_headers_, *response_headers_)) { headers.setReferenceConnection(Headers::get().ConnectionValues.Close); } } diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 38e8256e1144..00f089cd10c1 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -5,6 +5,7 @@ #include "common/common/regex.h" #include "common/common/utility.h" #include "common/http/header_map_impl.h" +#include "common/http/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" @@ -161,6 +162,13 @@ bool HeaderUtility::isConnect(const RequestHeaderMap& headers) { return headers.Method() && headers.Method()->value() == Http::Headers::get().MethodValues.Connect; } +bool HeaderUtility::isConnectResponse(const RequestHeaderMapPtr& request_headers, + const ResponseHeaderMap& response_headers) { + return request_headers.get() && isConnect(*request_headers) && + static_cast(Http::Utility::getResponseStatus(response_headers)) == + Http::Code::OK; +} + void HeaderUtility::addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add) { headers_to_add.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 71d45f8d3763..b357563b4c9a 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -117,6 +117,12 @@ class HeaderUtility { */ static bool isConnect(const RequestHeaderMap& headers); + /** + * @brief a helper function to determine if the headers represent an accepted CONNECT response. + */ + static bool isConnectResponse(const RequestHeaderMapPtr& request_headers, + const ResponseHeaderMap& response_headers); + /** * Add headers from one HeaderMap to another * @param headers target where headers will be added diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 0b7dab12d1fa..2ab8d5be3a2d 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -165,8 +165,12 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head } else if (connection_.protocol() == Protocol::Http10) { chunk_encoding_ = false; } else { - encodeFormattedHeader(Headers::get().TransferEncoding.get(), - Headers::get().TransferEncodingValues.Chunked); + // For responses to connect requests, do not send the chunked encoding header: + // https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (!is_response_to_connect_request_) { + encodeFormattedHeader(Headers::get().TransferEncoding.get(), + Headers::get().TransferEncodingValues.Chunked); + } // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. // If there is a body in a response on the upgrade path, the chunks will be // passed through via maybeDirectDispatch so we need to avoid appending @@ -1050,6 +1054,15 @@ int ClientConnectionImpl::onHeadersComplete() { pending_response_.value().encoder_.connectRequest()) { ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); handling_upgrade_ = true; + + // For responses to connect requests, do not accept the chunked + // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (headers->TransferEncoding() && + absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), + Headers::get().TransferEncodingValues.Chunked)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } } if (parser_.status_code == 100) { diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 62ec0c0ff7c6..229b4e172bce 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -526,6 +526,18 @@ TEST(HeaderIsValidTest, IsConnect) { EXPECT_FALSE(HeaderUtility::isConnect(Http::TestRequestHeaderMapImpl{})); } +TEST(HeaderIsValidTest, IsConnectResponse) { + RequestHeaderMapPtr connect_request{new TestRequestHeaderMapImpl{{":method", "CONNECT"}}}; + RequestHeaderMapPtr get_request{new TestRequestHeaderMapImpl{{":method", "GET"}}}; + TestResponseHeaderMapImpl success_response{{":status", "200"}}; + TestResponseHeaderMapImpl failure_response{{":status", "500"}}; + + EXPECT_TRUE(HeaderUtility::isConnectResponse(connect_request, success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(connect_request, failure_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(nullptr, success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(get_request, success_response)); +} + TEST(HeaderAddTest, HeaderAdd) { TestHeaderMapImpl headers{{"myheader1", "123value"}}; TestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 6f0a501ac3bc..9a238319fff4 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1304,8 +1304,10 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); + // Send the payload early so we can regression test that body data does not + // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\n", false); + tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1313,20 +1315,22 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { ASSERT_TRUE(fake_upstream_connection->waitForData( FakeRawConnection::waitForInexactMatch("\r\n\r\n"), &data)); EXPECT_TRUE(absl::StartsWith(data, "CONNECT host.com:80 HTTP/1.1")); + // The payload should not be present as the response headers have not been sent. + EXPECT_FALSE(absl::StrContains(data, "payload")) << data; // No transfer-encoding: chunked or connection: close EXPECT_FALSE(absl::StrContains(data, "hunked")) << data; EXPECT_FALSE(absl::StrContains(data, "onnection")) << data; - ASSERT_TRUE(fake_upstream_connection->write("HTTP/1.1 200 OK\r\nContent-length: 0\r\n\r\n")); + ASSERT_TRUE(fake_upstream_connection->write("HTTP/1.1 200 OK\r\n\r\n")); tcp_client->waitForData("\r\n\r\n", false); EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 200 OK\r\n")) << tcp_client->data(); // Make sure the following payload is proxied without chunks or any other modifications. - tcp_client->write("payload"); ASSERT_TRUE(fake_upstream_connection->waitForData( FakeRawConnection::waitForInexactMatch("\r\n\r\npayload"), &data)); ASSERT_TRUE(fake_upstream_connection->write("return-payload")); tcp_client->waitForData("\r\n\r\nreturn-payload", false); + EXPECT_FALSE(absl::StrContains(tcp_client->data(), "hunked")); tcp_client->close(); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); @@ -1338,8 +1342,6 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { hcm) -> void { ConfigHelper::setConnectConfig(hcm, false); }); initialize(); - // Send the payload early so we can regression test that body data does not - // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false); @@ -1351,25 +1353,12 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { // No transfer-encoding: chunked or connection: close EXPECT_FALSE(absl::StrContains(data, "hunked")) << data; EXPECT_FALSE(absl::StrContains(data, "onnection")) << data; - // The payload should not be present as the response headers have not been sent. - EXPECT_FALSE(absl::StrContains(data, "payload")) << data; - ASSERT_TRUE(fake_upstream_connection->write( "HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")); - tcp_client->waitForData("0\r\n\r\n", false); - EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 200 OK\r\n")); - EXPECT_TRUE(absl::StrContains(tcp_client->data(), "hunked")) << tcp_client->data(); - EXPECT_TRUE(absl::StrContains(tcp_client->data(), "\r\n\r\nb\r\nHello World\r\n0\r\n\r\n")) - << tcp_client->data(); - - // Make sure the early payload is proxied without chunks or any other modifications. - ASSERT_TRUE(fake_upstream_connection->waitForData( - FakeRawConnection::waitForInexactMatch("\r\n\r\npayload"))); - - ASSERT_TRUE(fake_upstream_connection->write("return-payload")); - tcp_client->waitForData("\r\n\r\nreturn-payload", false); - - tcp_client->close(); + // The response will be rejected because chunked headers are not allowed with CONNECT upgrades. + // Envoy will send a local reply due to the invalid upstream response. + tcp_client->waitForDisconnect(false); + EXPECT_TRUE(absl::StartsWith(tcp_client->data(), "HTTP/1.1 503 Service Unavailable\r\n")); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } From a691438153d70c5af357923f686ff590384f3cb2 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 19 May 2020 17:27:19 -0400 Subject: [PATCH 202/909] security: update policy for fix/disclosure SLOs. (#11243) The idea is to prepare for the Envoy bug bounty, help burn down the envoy-setec backlog and set expectations to disclosers. The 90 days limit comes from the fuzz bug disclosure deadline and https://www.google.com/about/appsecurity/. Signed-off-by: Harvey Tuch --- SECURITY.md | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 40ebecff3bce..8b5a8504bc58 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -118,12 +118,36 @@ score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can d release process down in the face of holidays, developer bandwidth, etc. These decisions must be discussed on the envoy-security mailing list. -A two week window will be provided to members of the private distributor list from candidate patch +A three week window will be provided to members of the private distributor list from candidate patch availability until the security release date. It is expected that distributors will normally be able to perform a release within this time window. If there are exceptional circumstances, the Envoy security team will raise this window to four weeks. The release window will be reduced if the security issue is public or embargo is broken. +### Fix and disclosure SLOs + +* All reports to envoy-security@googlegroups.com will be triaged and have an + initial response within 1 business day. + +* Privately disclosed issues will be fixed or publicly disclosed within 90 days + by the Envoy security team. In exceptional circumstances we reserve the right + to work with the discloser to coordinate on an extension, but this will be + rarely used. + +* Any issue discovered by the Envoy security team and raised in our private bug + tracker will be converted to a public issue within 90 days. We will regularly + audit these issues to ensure that no major vulnerability (from the perspective + of the threat model) is accidentally leaked. + +* Fuzz bugs are subject to a 90 day disclosure deadline. + +* Three weeks notice will be provided to private distributors from patch + availability until the embargo deadline. + +* Public zero days will be fixed ASAP, but there is no SLO for this, since this + will depend on the severity and impact to the organizations backing the Envoy + security team. + ### Fix Disclosure Process With the fix development underway, the Fix Lead needs to come up with an overall communication plan From 01cc47a337d6bb2e46500f7e653057d04382e6b1 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 20 May 2020 01:07:53 -0700 Subject: [PATCH 203/909] devex: initial commit of devcontainer setup (#11207) Risk Level: Low (not affecting any output) Testing: local Docs Changes: `.devcontainer/README.md` Release Notes: N/A Signed-off-by: Lizan Zhou --- .devcontainer/.gitignore | 1 + .devcontainer/Dockerfile | 21 +++++++++++++++++++ .devcontainer/README.md | 35 +++++++++++++++++++++++++++++++ .devcontainer/devcontainer.json | 33 +++++++++++++++++++++++++++++ .devcontainer/setup.sh | 15 +++++++++++++ .vscode/.gitignore | 2 ++ .vscode/tasks.json | 31 +++++++++++++++++++++++++++ bazel/setup_clang.sh | 3 +-- tools/gen_compilation_database.py | 12 ++++++++++- tools/vscode/refresh_compdb.sh | 9 ++++++++ 10 files changed, 159 insertions(+), 3 deletions(-) create mode 100644 .devcontainer/.gitignore create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/README.md create mode 100644 .devcontainer/devcontainer.json create mode 100755 .devcontainer/setup.sh create mode 100644 .vscode/.gitignore create mode 100644 .vscode/tasks.json create mode 100755 tools/vscode/refresh_compdb.sh diff --git a/.devcontainer/.gitignore b/.devcontainer/.gitignore new file mode 100644 index 000000000000..55abd6a0566c --- /dev/null +++ b/.devcontainer/.gitignore @@ -0,0 +1 @@ +devcontainer.env diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000000..d17a43c3f431 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,21 @@ +FROM gcr.io/envoy-ci/envoy-build:04f06115b6ee7cfea74930353fb47a41149cbec3 + +ARG USERNAME=vscode +ARG USER_UID=501 +ARG USER_GID=$USER_UID + +ENV BUILD_DIR=/build +ENV ENVOY_STDLIB=libstdc++ + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get -y update \ + && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \ + # + # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. + && groupadd --gid $USER_GID $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ + # [Optional] Add sudo support for non-root user + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + +ENV DEBIAN_FRONTEND= diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 000000000000..1cd314d2e4e0 --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1,35 @@ +# Envoy Dev Container (experimental) + +This directory contains some experimental tools for Envoy Development in [VSCode Remote - Containers](https://code.visualstudio.com/docs/remote/containers). + +## How to use + +Open with VSCode with the Container extension installed. Follow the [official guide](https://code.visualstudio.com/docs/remote/containers) to open this +repository directly from GitHub or from checked-out source tree. + +After opening, run the `Refresh Compilation Database` task to generate compilation database to navigate in source code. +This will run partial build of Envoy and may take a while depends on the machine performance. +This task is needed to run everytime after: +- Changing a BUILD file that add/remove files from a target, changes dependencies +- Changing API proto files + +## Advanced Usages + +### Using Remote Build Execution + +Write the following content to `devcontainer.env` and rebuild the container. The key will be persisted in the container's `~/.bazelrc`. + +``` +GCP_SERVICE_ACCOUNT_KEY= +BAZEL_REMOTE_INSTANCE= +BAZEL_REMOTE_CACHE=grpcs://remotebuildexecution.googleapis.com +BAZEL_BUILD_EXTRA_OPTIONS=--config=remote-ci --config=remote --jobs= +``` + +By default the `--config=remote` implies [`--remote_download_toplevel`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--remote_download_toplevel), +change this to `minimal` or `all` depending on where you're running the container by adding them to `BAZEL_BUILD_EXTRA_OPTIONS`. + +### Disk performance + +Docker for Mac/Windows is known to have disk performance issue, this makes formatting all files in the container very slow. +[Update the mount consistency to 'delegated'](https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos) is recommended. diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..b4c56432cec5 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,33 @@ +{ + "name": "Envoy Dev", + "dockerFile": "Dockerfile", + "runArgs": [ + "--user=vscode", + "--cap-add=SYS_PTRACE", + "--security-opt=seccomp=unconfined", + "--volume=${env:HOME}:${env:HOME}", + "--volume=envoy-build:/build", + // Uncomment next line if you have devcontainer.env + // "--env-file=.devcontainer/devcontainer.env" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "bazel.buildifierFixOnFormat": true, + "clangd.path": "/opt/llvm/bin/clangd", + "python.pythonPath": "/usr/bin/python3", + "files.exclude": { + "**/.clangd/**": true, + "**/bazel-*/**": true + } + }, + "remoteUser": "vscode", + "containerUser": "vscode", + "postCreateCommand": ".devcontainer/setup.sh", + "extensions": [ + "github.vscode-pull-request-github", + "zxh404.vscode-proto3", + "bazelbuild.vscode-bazel", + "llvm-vs-code-extensions.vscode-clangd", + "webfreak.debug" + ] +} \ No newline at end of file diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100755 index 000000000000..4dd2ddbff92b --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +. ci/setup_cache.sh +trap - EXIT # Don't remove the key file written into a temporary file + +BAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm + +# Use generated toolchain config because we know the base container is the one we're using in RBE. +# Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9. +echo "build --config=rbe-toolchain-clang" >> ~/.bazelrc +echo "build --symlink_prefix=/" >> ~/.bazelrc +echo "build ${BAZEL_BUILD_EXTRA_OPTIONS}" | tee -a ~/.bazelrc +echo "startup --output_base=/build/tmp" + +[[ ! -z "${BUILD_DIR}" ]] && sudo chown -R "$(id -u):$(id -g)" ${BUILD_DIR} \ No newline at end of file diff --git a/.vscode/.gitignore b/.vscode/.gitignore new file mode 100644 index 000000000000..c2393f450708 --- /dev/null +++ b/.vscode/.gitignore @@ -0,0 +1,2 @@ +settings.json +launch.json diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 000000000000..fe0a5963698e --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,31 @@ +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "tasks": [ + { + "label": "Build All Tests", + "type": "shell", + "command": "bazel build //test/...", + "group": { + "kind": "build", + "isDefault": true + } + }, + { + "label": "Run All Tests", + "type": "shell", + "command": "bazel test //test/...", + "group": { + "kind": "test", + "isDefault": true + } + }, + { + "label": "Refresh Compilation Database", + "type": "shell", + "command": "tools/vscode/refresh_compdb.sh", + "problemMatcher": [] + } + ] +} diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh index 4fd8a2bf8a5d..6b79aaed2484 100755 --- a/bazel/setup_clang.sh +++ b/bazel/setup_clang.sh @@ -1,6 +1,6 @@ #!/bin/bash -BAZELRC_FILE="$(bazel info workspace)/clang.bazelrc" +BAZELRC_FILE="${BAZELRC_FILE:-$(bazel info workspace)/clang.bazelrc}" LLVM_PREFIX=$1 @@ -28,6 +28,5 @@ build:clang-asan --linkopt=-fsanitize=vptr,function build:clang-asan --linkopt=-L${RT_LIBRARY_PATH} build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a - " > ${BAZELRC_FILE} diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index b5b3c5a4a1be..1a3cf2ff4025 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -3,6 +3,7 @@ import argparse import glob import json +import logging import os import shlex import subprocess @@ -30,8 +31,16 @@ def generateCompilationDatabase(args): "--config=compdb", "--remote_download_outputs=all", ] + if args.keep_going: + bazel_options.append("-k") if args.run_bazel_build: - runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets) + try: + runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets) + except subprocess.CalledProcessError as e: + if not args.keep_going: + raise + else: + logging.warning("bazel build failed {}: {}".format(e.returncode, e.cmd)) subprocess.check_call(["bazel", "build"] + bazel_options + [ "--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect", @@ -102,6 +111,7 @@ def fixCompilationDatabase(args, db): if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate JSON compilation database') parser.add_argument('--run_bazel_build', action='store_true') + parser.add_argument('-k', '--keep_going', action='store_true') parser.add_argument('--include_external', action='store_true') parser.add_argument('--include_genfiles', action='store_true') parser.add_argument('--include_headers', action='store_true') diff --git a/tools/vscode/refresh_compdb.sh b/tools/vscode/refresh_compdb.sh new file mode 100755 index 000000000000..c40074be87e3 --- /dev/null +++ b/tools/vscode/refresh_compdb.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +tools/proto_format/proto_format.sh fix + +# Setting platform suffix here so the compdb headers won't be overwritten by another bazel run +BAZEL_BUILD_OPTIONS=--platform_suffix=-compdb tools/gen_compilation_database.py --run_bazel_build -k + +# Kill clangd to reload the compilation database +killall -v /opt/llvm/bin/clangd From ab9ae746077cdf100947d7f3ac1616d814727425 Mon Sep 17 00:00:00 2001 From: ankatare Date: Wed, 20 May 2020 18:14:50 +0530 Subject: [PATCH 204/909] fixing typos and breaking link issues (#11270) Signed-off-by: Abhay Narayan Katare Commit Message: Fixing breaking link issues and typos Additional Description: Risk Level: LOW Testing: format and integration Docs Changes:Yes Release Notes: [Optional Runtime guard:] [Optional Fixes #11215 [Optional Deprecated:] --- PULL_REQUESTS.md | 2 +- docs/root/start/sandboxes/fault_injection.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 91211e3ff415..0293b144b40b 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -70,7 +70,7 @@ to relevant parts of the documentation. Thank you! Please write in N/A if there ### Runtime guard If this PR has a user-visible behavioral change, or otherwise falls under the -guidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md.md) +guidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md) it should have a runtime guard, which should be documented both in the release notes and here in the PR description. diff --git a/docs/root/start/sandboxes/fault_injection.rst b/docs/root/start/sandboxes/fault_injection.rst index 237c52c97286..a091c2ada258 100644 --- a/docs/root/start/sandboxes/fault_injection.rst +++ b/docs/root/start/sandboxes/fault_injection.rst @@ -48,7 +48,7 @@ Terminal 2 $ docker-compose exec envoy bash $ bash send_request.sh -The script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Evoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes. +The script above (``send_request.sh``) sends a continuous stream of HTTP requests to Envoy, which in turn forwards the requests to the backend container. Fauilt injection is configured in Envoy but turned off (i.e. affects 0% of requests). Consequently, you should see a continuous sequence of HTTP 200 response codes. **Step 4: Test Envoy's abort fault injection** From 6a4b014a5cd110721d43492c668c7b3797858c89 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 20 May 2020 09:00:26 -0400 Subject: [PATCH 205/909] http: testing 304-with-body behavior (#11261) Risk Level: n/a (test only) Testing: new integration test Docs Changes: n/a Release Notes: n/a Fixes #9274 as much as we can Signed-off-by: Alyssa Wilk --- test/integration/protocol_integration_test.cc | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index cc914f9d8bc3..207e41b02722 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -248,6 +248,8 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { EXPECT_EQ("200", response->headers().Status()->value().getStringView()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); } } @@ -967,6 +969,51 @@ TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } +// Test we're following https://tools.ietf.org/html/rfc7230#section-3.3.2 +// as best we can. +TEST_P(ProtocolIntegrationTest, 304WithBody) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + Http::TestResponseHeaderMapImpl response_headers{{":status", "304"}, {"content-length", "2"}}; + ASSERT(upstream_request_ != nullptr); + upstream_request_->encodeHeaders(response_headers, false); + response->waitForHeaders(); + EXPECT_EQ("304", response->headers().Status()->value().getStringView()); + + // For HTTP/1.1 http_parser is explicitly told that 304s are header-only + // requests. + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1 || + upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(response->complete()); + } else { + ASSERT_FALSE(response->complete()); + } + + upstream_request_->encodeData(2, true); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // Any body sent after the request is considered complete will not be handled as part of the + // active request, but will be flagged as a protocol error for the no-longer-associated + // connection. + // Ideally if we got the body with the headers we would instead reset the + // stream, but it turns out that's complicated so instead we consistently + // forward the headers and error out after. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_protocol_error", 1); + } + + // Only for HTTP/2, where streams are ended with an explicit end-stream so we + // can differentiate between 304-with-advertised-but-absent-body and + // 304-with-body, is there a protocol error on the active stream. + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2 && + upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { + response->waitForReset(); + } +} + // Validate that lots of tiny cookies doesn't cause a DoS (single cookie header). TEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingConcatenated) { initialize(); From 570f7ac553e64206193d64952bf8d4a72b76e49c Mon Sep 17 00:00:00 2001 From: Kathan <5263542+kathan24@users.noreply.github.com> Date: Wed, 20 May 2020 08:25:55 -0700 Subject: [PATCH 206/909] Load reporting service documentation (#10962) Signed-off-by: kathan24 --- api/envoy/config/endpoint/v3/load_report.proto | 9 +++------ api/envoy/service/load_stats/v3/lrs.proto | 13 ++++++++++--- docs/root/api-v3/config/config.rst | 1 + docs/root/api-v3/config/endpoint/endpoint.rst | 8 ++++++++ docs/root/api-v3/service/service.rst | 1 + .../upstream/load_reporting_service.rst | 15 +++++++++++++++ .../intro/arch_overview/upstream/upstream.rst | 1 + .../envoy/config/endpoint/v3/load_report.proto | 9 +++------ .../envoy/service/load_stats/v3/lrs.proto | 13 ++++++++++--- 9 files changed, 52 insertions(+), 18 deletions(-) create mode 100644 docs/root/api-v3/config/endpoint/endpoint.rst create mode 100644 docs/root/intro/arch_overview/upstream/load_reporting_service.rst diff --git a/api/envoy/config/endpoint/v3/load_report.proto b/api/envoy/config/endpoint/v3/load_report.proto index 01eb7b12cf1a..3f067737ec25 100644 --- a/api/envoy/config/endpoint/v3/load_report.proto +++ b/api/envoy/config/endpoint/v3/load_report.proto @@ -17,11 +17,11 @@ option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by +// [#protodoc-title: Load Report] + +// These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 9] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = @@ -60,7 +60,6 @@ message UpstreamLocalityStats { uint32 priority = 6; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 8] message UpstreamEndpointStats { option (udpa.annotations.versioning).previous_message_type = @@ -103,7 +102,6 @@ message UpstreamEndpointStats { repeated EndpointLoadMetricStats load_metric_stats = 5; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message EndpointLoadMetricStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.EndpointLoadMetricStats"; @@ -121,7 +119,6 @@ message EndpointLoadMetricStats { // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 // [#next-free-field: 7] message ClusterStats { diff --git a/api/envoy/service/load_stats/v3/lrs.proto b/api/envoy/service/load_stats/v3/lrs.proto index d76356884a7a..76705ba77771 100644 --- a/api/envoy/service/load_stats/v3/lrs.proto +++ b/api/envoy/service/load_stats/v3/lrs.proto @@ -17,7 +17,15 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Load reporting service] +// [#protodoc-title: Load Reporting service (LRS)] + +// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional +// stream with a management server. Upon connecting, the management server can send a +// :ref:`LoadStatsResponse ` to a node it is +// interested in getting the load reports for. Envoy in this node will start sending +// :ref:`LoadStatsRequest `. This is done periodically +// based on the :ref:`load reporting interval ` +// For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote @@ -53,7 +61,6 @@ service LoadReportingService { } // A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsRequest"; @@ -67,7 +74,6 @@ message LoadStatsRequest { // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsResponse"; @@ -82,6 +88,7 @@ message LoadStatsResponse { bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index ba7ca7e70f76..e1ccac77719d 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -18,3 +18,4 @@ Extensions retry/retry trace/trace internal_redirect/internal_redirect + endpoint/endpoint diff --git a/docs/root/api-v3/config/endpoint/endpoint.rst b/docs/root/api-v3/config/endpoint/endpoint.rst new file mode 100644 index 000000000000..c1b64b1e4651 --- /dev/null +++ b/docs/root/api-v3/config/endpoint/endpoint.rst @@ -0,0 +1,8 @@ +Endpoint +======== + +.. toctree:: + :glob: + :maxdepth: 2 + + v3/* \ No newline at end of file diff --git a/docs/root/api-v3/service/service.rst b/docs/root/api-v3/service/service.rst index 6ad5674d4bde..de8110cf5fbd 100644 --- a/docs/root/api-v3/service/service.rst +++ b/docs/root/api-v3/service/service.rst @@ -6,6 +6,7 @@ Services :maxdepth: 2 accesslog/v3/* + load_stats/v3/* auth/v3/* health/v3/* metrics/v3/* diff --git a/docs/root/intro/arch_overview/upstream/load_reporting_service.rst b/docs/root/intro/arch_overview/upstream/load_reporting_service.rst new file mode 100644 index 000000000000..669bce160592 --- /dev/null +++ b/docs/root/intro/arch_overview/upstream/load_reporting_service.rst @@ -0,0 +1,15 @@ +.. _arch_overview_load_reporting_service: + +Load Reporting Service (LRS) +============================ + +The Load Reporting Service provides a mechanism by which Envoy can emit Load Reports to a management +server at a regular cadence. + +This will initiate a bi-directional stream with a management server. Upon connecting, the management +server can send a :ref:`LoadStatsResponse ` +to a node it is interested in getting the load reports for. Envoy in this node will start sending +:ref:`LoadStatsRequest `. This is done periodically +based on the :ref:`load reporting interval ` + +Envoy config with LRS can be found at :repo:`/examples/load-reporting-service/service-envoy-w-lrs.yaml`. diff --git a/docs/root/intro/arch_overview/upstream/upstream.rst b/docs/root/intro/arch_overview/upstream/upstream.rst index 112dc7885446..3c976f0212c3 100644 --- a/docs/root/intro/arch_overview/upstream/upstream.rst +++ b/docs/root/intro/arch_overview/upstream/upstream.rst @@ -13,3 +13,4 @@ Upstream clusters outlier circuit_breaking upstream_filters + load_reporting_service diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto index 01eb7b12cf1a..3f067737ec25 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto @@ -17,11 +17,11 @@ option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by +// [#protodoc-title: Load Report] + +// These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 9] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = @@ -60,7 +60,6 @@ message UpstreamLocalityStats { uint32 priority = 6; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 8] message UpstreamEndpointStats { option (udpa.annotations.versioning).previous_message_type = @@ -103,7 +102,6 @@ message UpstreamEndpointStats { repeated EndpointLoadMetricStats load_metric_stats = 5; } -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message EndpointLoadMetricStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.EndpointLoadMetricStats"; @@ -121,7 +119,6 @@ message EndpointLoadMetricStats { // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 // [#next-free-field: 7] message ClusterStats { diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto index d76356884a7a..76705ba77771 100644 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto @@ -17,7 +17,15 @@ option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Load reporting service] +// [#protodoc-title: Load Reporting service (LRS)] + +// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional +// stream with a management server. Upon connecting, the management server can send a +// :ref:`LoadStatsResponse ` to a node it is +// interested in getting the load reports for. Envoy in this node will start sending +// :ref:`LoadStatsRequest `. This is done periodically +// based on the :ref:`load reporting interval ` +// For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote @@ -53,7 +61,6 @@ service LoadReportingService { } // A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsRequest"; @@ -67,7 +74,6 @@ message LoadStatsRequest { // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsResponse"; @@ -82,6 +88,7 @@ message LoadStatsResponse { bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this From a5f2f12bcac30b1c2662b722565ea2d9cfb57786 Mon Sep 17 00:00:00 2001 From: rulex123 <29862113+rulex123@users.noreply.github.com> Date: Wed, 20 May 2020 17:29:54 +0200 Subject: [PATCH 207/909] admin: extract more handlers to separate classes (#11258) More refactoring for #5505. In this specific PR: extract server command handlers to newly created ServerCmdHandler class extract server info handlers to newly created ServerInfoHandler class extract mutex stats handler to StatsHandler class Signed-off-by: Erica Manno --- source/server/admin/BUILD | 40 +++- source/server/admin/admin.cc | 149 ++------------- source/server/admin/admin.h | 38 +--- source/server/admin/server_cmd_handler.cc | 30 +++ source/server/admin/server_cmd_handler.h | 35 ++++ source/server/admin/server_info_handler.cc | 97 ++++++++++ source/server/admin/server_info_handler.h | 43 +++++ source/server/admin/stats_handler.cc | 22 +++ source/server/admin/stats_handler.h | 3 + test/server/admin/BUILD | 12 +- test/server/admin/admin_test.cc | 164 ---------------- test/server/admin/server_info_handler_test.cc | 178 ++++++++++++++++++ 12 files changed, 474 insertions(+), 337 deletions(-) create mode 100644 source/server/admin/server_cmd_handler.cc create mode 100644 source/server/admin/server_cmd_handler.h create mode 100644 source/server/admin/server_info_handler.cc create mode 100644 source/server/admin/server_info_handler.h create mode 100644 test/server/admin/server_info_handler_test.cc diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index b97b3066f3b1..84b331a927f1 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -19,6 +19,8 @@ envoy_cc_library( ":logs_handler_lib", ":profiling_handler_lib", ":runtime_handler_lib", + ":server_cmd_handler_lib", + ":server_info_handler_lib", ":stats_handler_lib", ":utils_lib", "//include/envoy/filesystem:filesystem_interface", @@ -44,7 +46,6 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", - "//source/common/common:version_includes", "//source/common/html:utility_lib", "//source/common/http:codes_lib", "//source/common/http:conn_manager_lib", @@ -54,7 +55,6 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", - "//source/common/memory:stats_lib", "//source/common/memory:utils_lib", "//source/common/network:connection_balancer_lib", "//source/common/network:listen_socket_lib", @@ -112,6 +112,7 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/stats:histogram_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", ], ) @@ -191,6 +192,41 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "server_cmd_handler_lib", + srcs = ["server_cmd_handler.cc"], + hdrs = ["server_cmd_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + ], +) + +envoy_cc_library( + name = "server_info_handler_lib", + srcs = ["server_info_handler.cc"], + hdrs = ["server_info_handler.h"], + deps = [ + ":handler_ctx_lib", + ":utils_lib", + "//include/envoy/http:codes_interface", + "//include/envoy/server:admin_interface", + "//include/envoy/server:instance_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:version_includes", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/memory:stats_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "utils_lib", srcs = ["utils.cc"], diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index fb2b52fc92e1..0484488b4977 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -11,9 +11,7 @@ #include "envoy/admin/v3/certs.pb.h" #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/admin/v3/memory.pb.h" #include "envoy/admin/v3/metrics.pb.h" -#include "envoy/admin/v3/mutex_stats.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/filesystem/filesystem.h" @@ -31,13 +29,11 @@ #include "common/common/fmt.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/html/utility.h" #include "common/http/codes.h" #include "common/http/conn_manager_utility.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" -#include "common/memory/stats.h" #include "common/memory/utils.h" #include "common/network/listen_socket_impl.h" #include "common/network/utility.h" @@ -535,126 +531,6 @@ Http::Code AdminImpl::handlerConfigDump(absl::string_view url, return Http::Code::OK; } -// TODO(ambuc) Export this as a server (?) stat for monitoring. -Http::Code AdminImpl::handlerContention(absl::string_view, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - - if (server_.options().mutexTracingEnabled() && server_.mutexTracer() != nullptr) { - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - - envoy::admin::v3::MutexStats mutex_stats; - mutex_stats.set_num_contentions(server_.mutexTracer()->numContentions()); - mutex_stats.set_current_wait_cycles(server_.mutexTracer()->currentWaitCycles()); - mutex_stats.set_lifetime_wait_cycles(server_.mutexTracer()->lifetimeWaitCycles()); - response.add(MessageUtil::getJsonStringFromMessage(mutex_stats, true, true)); - } else { - response.add("Mutex contention tracing is not enabled. To enable, run Envoy with flag " - "--enable-mutex-tracing."); - } - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.failHealthcheck(true); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHealthcheckOk(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.failHealthcheck(false); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerHotRestartVersion(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - response.add(server_.hotRestart().version()); - return Http::Code::OK; -} - -// TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator. -Http::Code AdminImpl::handlerMemory(absl::string_view, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - envoy::admin::v3::Memory memory; - memory.set_allocated(Memory::Stats::totalCurrentlyAllocated()); - memory.set_heap_size(Memory::Stats::totalCurrentlyReserved()); - memory.set_total_thread_cache(Memory::Stats::totalThreadCacheBytes()); - memory.set_pageheap_unmapped(Memory::Stats::totalPageHeapUnmapped()); - memory.set_pageheap_free(Memory::Stats::totalPageHeapFree()); - memory.set_total_physical_bytes(Memory::Stats::totalPhysicalBytes()); - response.add(MessageUtil::getJsonStringFromMessage(memory, true, true)); // pretty-print - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers, - Buffer::Instance& response, AdminStream&) { - const std::time_t current_time = - std::chrono::system_clock::to_time_t(server_.timeSource().systemTime()); - const std::time_t uptime_current_epoch = current_time - server_.startTimeCurrentEpoch(); - const std::time_t uptime_all_epochs = current_time - server_.startTimeFirstEpoch(); - - ASSERT(uptime_current_epoch >= 0); - ASSERT(uptime_all_epochs >= 0); - - envoy::admin::v3::ServerInfo server_info; - server_info.set_version(VersionInfo::version()); - server_info.set_hot_restart_version(server_.hotRestart().version()); - server_info.set_state( - Utility::serverState(server_.initManager().state(), server_.healthCheckFailed())); - - server_info.mutable_uptime_current_epoch()->set_seconds(uptime_current_epoch); - server_info.mutable_uptime_all_epochs()->set_seconds(uptime_all_epochs); - envoy::admin::v3::CommandLineOptions* command_line_options = - server_info.mutable_command_line_options(); - *command_line_options = *server_.options().toCommandLineOptions(); - response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true)); - headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerReady(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - const envoy::admin::v3::ServerInfo::State state = - Utility::serverState(server_.initManager().state(), server_.healthCheckFailed()); - - response.add(envoy::admin::v3::ServerInfo::State_Name(state) + "\n"); - Http::Code code = - state == envoy::admin::v3::ServerInfo::LIVE ? Http::Code::OK : Http::Code::ServiceUnavailable; - return code; -} - -Http::Code AdminImpl::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeaderMap&, - Buffer::Instance& response, AdminStream&) { - server_.shutdown(); - response.add("OK\n"); - return Http::Code::OK; -} - -Http::Code AdminImpl::handlerCerts(absl::string_view, Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&) { - // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc - // using the same cert. - response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); - envoy::admin::v3::Certificates certificates; - server_.sslContextManager().iterateContexts([&](const Ssl::Context& context) -> void { - envoy::admin::v3::Certificate& certificate = *certificates.add_certificates(); - if (context.getCaCertInformation() != nullptr) { - envoy::admin::v3::CertificateDetails* ca_certificate = certificate.add_ca_cert(); - *ca_certificate = *context.getCaCertInformation(); - } - for (const auto& cert_details : context.getCertChainInformation()) { - envoy::admin::v3::CertificateDetails* cert_chain = certificate.add_cert_chain(); - *cert_chain = *cert_details; - } - }); - response.add(MessageUtil::getJsonStringFromMessage(certificates, true, true)); - return Http::Code::OK; -} - ConfigTracker& AdminImpl::getConfigTracker() { return config_tracker_; } AdminImpl::NullRouteConfigProvider::NullRouteConfigProvider(TimeSource& time_source) @@ -694,43 +570,44 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) route_config_provider_(server.timeSource()), scoped_route_config_provider_(server.timeSource()), stats_handler_(server), logs_handler_(server), profiling_handler_(profile_path), runtime_handler_(server), - listeners_handler_(server), + listeners_handler_(server), server_cmd_handler_(server), server_info_handler_(server), // TODO(jsedgwick) add /runtime_reset endpoint that removes all admin-set values handlers_{ {"/", "Admin home page", MAKE_ADMIN_HANDLER(handlerAdminHome), false, false}, - {"/certs", "print certs on machine", MAKE_ADMIN_HANDLER(handlerCerts), false, false}, + {"/certs", "print certs on machine", + MAKE_ADMIN_HANDLER(server_info_handler_.handlerCerts), false, false}, {"/clusters", "upstream cluster status", MAKE_ADMIN_HANDLER(handlerClusters), false, false}, {"/config_dump", "dump current Envoy configs (experimental)", MAKE_ADMIN_HANDLER(handlerConfigDump), false, false}, {"/contention", "dump current Envoy mutex contention stats (if enabled)", - MAKE_ADMIN_HANDLER(handlerContention), false, false}, + MAKE_ADMIN_HANDLER(stats_handler_.handlerContention), false, false}, {"/cpuprofiler", "enable/disable the CPU profiler", MAKE_ADMIN_HANDLER(profiling_handler_.handlerCpuProfiler), false, true}, {"/heapprofiler", "enable/disable the heap profiler", MAKE_ADMIN_HANDLER(profiling_handler_.handlerHeapProfiler), false, true}, {"/healthcheck/fail", "cause the server to fail health checks", - MAKE_ADMIN_HANDLER(handlerHealthcheckFail), false, true}, + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckFail), false, true}, {"/healthcheck/ok", "cause the server to pass health checks", - MAKE_ADMIN_HANDLER(handlerHealthcheckOk), false, true}, + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerHealthcheckOk), false, true}, {"/help", "print out list of admin commands", MAKE_ADMIN_HANDLER(handlerHelp), false, false}, {"/hot_restart_version", "print the hot restart compatibility version", - MAKE_ADMIN_HANDLER(handlerHotRestartVersion), false, false}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerHotRestartVersion), false, false}, {"/logging", "query/change logging levels", MAKE_ADMIN_HANDLER(logs_handler_.handlerLogging), false, true}, - {"/memory", "print current allocation/heap usage", MAKE_ADMIN_HANDLER(handlerMemory), - false, false}, - {"/quitquitquit", "exit the server", MAKE_ADMIN_HANDLER(handlerQuitQuitQuit), false, - true}, + {"/memory", "print current allocation/heap usage", + MAKE_ADMIN_HANDLER(server_info_handler_.handlerMemory), false, false}, + {"/quitquitquit", "exit the server", + MAKE_ADMIN_HANDLER(server_cmd_handler_.handlerQuitQuitQuit), false, true}, {"/reset_counters", "reset all counters to zero", MAKE_ADMIN_HANDLER(stats_handler_.handlerResetCounters), false, true}, {"/drain_listeners", "drain listeners", MAKE_ADMIN_HANDLER(listeners_handler_.handlerDrainListeners), false, true}, {"/server_info", "print server version/status information", - MAKE_ADMIN_HANDLER(handlerServerInfo), false, false}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerServerInfo), false, false}, {"/ready", "print server state, return 200 if LIVE, otherwise return 503", - MAKE_ADMIN_HANDLER(handlerReady), false, false}, + MAKE_ADMIN_HANDLER(server_info_handler_.handlerReady), false, false}, {"/stats", "print server stats", MAKE_ADMIN_HANDLER(stats_handler_.handlerStats), false, false}, {"/stats/prometheus", "print server stats in prometheus format", diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index c5e86534dc7c..f59db77c2292 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -44,6 +44,8 @@ #include "server/admin/logs_handler.h" #include "server/admin/profiling_handler.h" #include "server/admin/runtime_handler.h" +#include "server/admin/server_cmd_handler.h" +#include "server/admin/server_info_handler.h" #include "server/admin/stats_handler.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -274,49 +276,15 @@ class AdminImpl : public Admin, Http::Code handlerAdminHome(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerCerts(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); Http::Code handlerClusters(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); Http::Code handlerConfigDump(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) const; - Http::Code handlerContention(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerCpuProfiler(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHeapProfiler(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHealthcheckFail(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerHealthcheckOk(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); Http::Code handlerHelp(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - Http::Code handlerHotRestartVersion(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerMemory(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); - Http::Code handlerMain(const std::string& path, Buffer::Instance& response, AdminStream&); - Http::Code handlerQuitQuitQuit(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerServerInfo(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, - Buffer::Instance& response, AdminStream&); - Http::Code handlerReady(absl::string_view path_and_query, - Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, - AdminStream&); class AdminListenSocketFactory : public Network::ListenSocketFactory { public: @@ -421,6 +389,8 @@ class AdminImpl : public Admin, Server::ProfilingHandler profiling_handler_; Server::RuntimeHandler runtime_handler_; Server::ListenersHandler listeners_handler_; + Server::ServerCmdHandler server_cmd_handler_; + Server::ServerInfoHandler server_info_handler_; std::list handlers_; const uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; const uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; diff --git a/source/server/admin/server_cmd_handler.cc b/source/server/admin/server_cmd_handler.cc new file mode 100644 index 000000000000..dfa66a41ee84 --- /dev/null +++ b/source/server/admin/server_cmd_handler.cc @@ -0,0 +1,30 @@ +#include "server/admin/server_cmd_handler.h" + +namespace Envoy { +namespace Server { + +ServerCmdHandler::ServerCmdHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ServerCmdHandler::handlerHealthcheckFail(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.failHealthcheck(true); + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ServerCmdHandler::handlerHealthcheckOk(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.failHealthcheck(false); + response.add("OK\n"); + return Http::Code::OK; +} + +Http::Code ServerCmdHandler::handlerQuitQuitQuit(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + server_.shutdown(); + response.add("OK\n"); + return Http::Code::OK; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_cmd_handler.h b/source/server/admin/server_cmd_handler.h new file mode 100644 index 000000000000..cddfb94b3917 --- /dev/null +++ b/source/server/admin/server_cmd_handler.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ServerCmdHandler : public HandlerContextBase { + +public: + ServerCmdHandler(Server::Instance& server); + + Http::Code handlerQuitQuitQuit(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHealthcheckFail(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerHealthcheckOk(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_info_handler.cc b/source/server/admin/server_info_handler.cc new file mode 100644 index 000000000000..4c4c8322b3df --- /dev/null +++ b/source/server/admin/server_info_handler.cc @@ -0,0 +1,97 @@ +#include "server/admin/server_info_handler.h" + +#include "envoy/admin/v3/memory.pb.h" + +#include "common/common/version.h" +#include "common/memory/stats.h" + +#include "server/admin/utils.h" + +namespace Envoy { +namespace Server { + +ServerInfoHandler::ServerInfoHandler(Server::Instance& server) : HandlerContextBase(server) {} + +Http::Code ServerInfoHandler::handlerCerts(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + // This set is used to track distinct certificates. We may have multiple listeners, upstreams, etc + // using the same cert. + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + envoy::admin::v3::Certificates certificates; + server_.sslContextManager().iterateContexts([&](const Ssl::Context& context) -> void { + envoy::admin::v3::Certificate& certificate = *certificates.add_certificates(); + if (context.getCaCertInformation() != nullptr) { + envoy::admin::v3::CertificateDetails* ca_certificate = certificate.add_ca_cert(); + *ca_certificate = *context.getCaCertInformation(); + } + for (const auto& cert_details : context.getCertChainInformation()) { + envoy::admin::v3::CertificateDetails* cert_chain = certificate.add_cert_chain(); + *cert_chain = *cert_details; + } + }); + response.add(MessageUtil::getJsonStringFromMessage(certificates, true, true)); + return Http::Code::OK; +} + +Http::Code ServerInfoHandler::handlerHotRestartVersion(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + response.add(server_.hotRestart().version()); + return Http::Code::OK; +} + +// TODO(ambuc): Add more tcmalloc stats, export proto details based on allocator. +Http::Code ServerInfoHandler::handlerMemory(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + envoy::admin::v3::Memory memory; + memory.set_allocated(Memory::Stats::totalCurrentlyAllocated()); + memory.set_heap_size(Memory::Stats::totalCurrentlyReserved()); + memory.set_total_thread_cache(Memory::Stats::totalThreadCacheBytes()); + memory.set_pageheap_unmapped(Memory::Stats::totalPageHeapUnmapped()); + memory.set_pageheap_free(Memory::Stats::totalPageHeapFree()); + memory.set_total_physical_bytes(Memory::Stats::totalPhysicalBytes()); + response.add(MessageUtil::getJsonStringFromMessage(memory, true, true)); // pretty-print + return Http::Code::OK; +} + +Http::Code ServerInfoHandler::handlerReady(absl::string_view, Http::ResponseHeaderMap&, + Buffer::Instance& response, AdminStream&) { + const envoy::admin::v3::ServerInfo::State state = + Utility::serverState(server_.initManager().state(), server_.healthCheckFailed()); + + response.add(envoy::admin::v3::ServerInfo::State_Name(state) + "\n"); + Http::Code code = + state == envoy::admin::v3::ServerInfo::LIVE ? Http::Code::OK : Http::Code::ServiceUnavailable; + return code; +} + +Http::Code ServerInfoHandler::handlerServerInfo(absl::string_view, Http::ResponseHeaderMap& headers, + Buffer::Instance& response, AdminStream&) { + const std::time_t current_time = + std::chrono::system_clock::to_time_t(server_.timeSource().systemTime()); + const std::time_t uptime_current_epoch = current_time - server_.startTimeCurrentEpoch(); + const std::time_t uptime_all_epochs = current_time - server_.startTimeFirstEpoch(); + + ASSERT(uptime_current_epoch >= 0); + ASSERT(uptime_all_epochs >= 0); + + envoy::admin::v3::ServerInfo server_info; + server_info.set_version(VersionInfo::version()); + server_info.set_hot_restart_version(server_.hotRestart().version()); + server_info.set_state( + Utility::serverState(server_.initManager().state(), server_.healthCheckFailed())); + + server_info.mutable_uptime_current_epoch()->set_seconds(uptime_current_epoch); + server_info.mutable_uptime_all_epochs()->set_seconds(uptime_all_epochs); + envoy::admin::v3::CommandLineOptions* command_line_options = + server_info.mutable_command_line_options(); + *command_line_options = *server_.options().toCommandLineOptions(); + response.add(MessageUtil::getJsonStringFromMessage(server_info, true, true)); + headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + return Http::Code::OK; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/server_info_handler.h b/source/server/admin/server_info_handler.h new file mode 100644 index 000000000000..6a2a29abf3ac --- /dev/null +++ b/source/server/admin/server_info_handler.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/admin.h" +#include "envoy/server/instance.h" + +#include "server/admin/handler_ctx.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Server { + +class ServerInfoHandler : public HandlerContextBase { + +public: + ServerInfoHandler(Server::Instance& server); + + Http::Code handlerCerts(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerServerInfo(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerReady(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); + + Http::Code handlerHotRestartVersion(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); + + Http::Code handlerMemory(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, + AdminStream&); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index 08fc8a965f8c..3ec4702b2c5a 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -1,5 +1,7 @@ #include "server/admin/stats_handler.h" +#include "envoy/admin/v3/mutex_stats.pb.h" + #include "common/common/empty_string.h" #include "common/html/utility.h" #include "common/http/headers.h" @@ -150,6 +152,26 @@ Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query return Http::Code::OK; } +// TODO(ambuc) Export this as a server (?) stat for monitoring. +Http::Code StatsHandler::handlerContention(absl::string_view, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&) { + + if (server_.options().mutexTracingEnabled() && server_.mutexTracer() != nullptr) { + response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); + + envoy::admin::v3::MutexStats mutex_stats; + mutex_stats.set_num_contentions(server_.mutexTracer()->numContentions()); + mutex_stats.set_current_wait_cycles(server_.mutexTracer()->currentWaitCycles()); + mutex_stats.set_lifetime_wait_cycles(server_.mutexTracer()->lifetimeWaitCycles()); + response.add(MessageUtil::getJsonStringFromMessage(mutex_stats, true, true)); + } else { + response.add("Mutex contention tracing is not enabled. To enable, run Envoy with flag " + "--enable-mutex-tracing."); + } + return Http::Code::OK; +} + std::string StatsHandler::statsAsJson(const std::map& all_stats, const std::map& text_readouts, diff --git a/source/server/admin/stats_handler.h b/source/server/admin/stats_handler.h index ff166f80bfb1..abdb656ed2c9 100644 --- a/source/server/admin/stats_handler.h +++ b/source/server/admin/stats_handler.h @@ -44,6 +44,9 @@ class StatsHandler : public HandlerContextBase { Http::Code handlerPrometheusStats(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); + Http::Code handlerContention(absl::string_view path_and_query, + Http::ResponseHeaderMap& response_headers, + Buffer::Instance& response, AdminStream&); private: template diff --git a/test/server/admin/BUILD b/test/server/admin/BUILD index 2d56fae7cd01..07045a5b3868 100644 --- a/test/server/admin/BUILD +++ b/test/server/admin/BUILD @@ -35,7 +35,6 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_creator_lib", "//source/common/stats:thread_local_store_lib", - "//source/extensions/transport_sockets/tls:context_config_lib", "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", @@ -104,6 +103,17 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "server_info_handler_test", + srcs = ["server_info_handler_test.cc"], + deps = [ + ":admin_instance_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//test/test_common:logging_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index 4d36bb6fe6de..435a6d02bb26 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -5,7 +5,6 @@ #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/admin/v3/memory.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -17,8 +16,6 @@ #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" -#include "extensions/transport_sockets/tls/context_config_impl.h" - #include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" #include "test/test_common/printers.h" @@ -28,13 +25,9 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::AllOf; -using testing::Ge; using testing::HasSubstr; using testing::Invoke; using testing::NiceMock; -using testing::Property; -using testing::Ref; using testing::Return; using testing::ReturnPointee; using testing::ReturnRef; @@ -385,49 +378,6 @@ TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { getCallback("/config_dump?resource=version_info", header_map, response)); } -TEST_P(AdminInstanceTest, Memory) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - EXPECT_EQ(Http::Code::OK, getCallback("/memory", header_map, response)); - const std::string output_json = response.toString(); - envoy::admin::v3::Memory output_proto; - TestUtility::loadFromJson(output_json, output_proto); - EXPECT_THAT(output_proto, AllOf(Property(&envoy::admin::v3::Memory::allocated, Ge(0)), - Property(&envoy::admin::v3::Memory::heap_size, Ge(0)), - Property(&envoy::admin::v3::Memory::pageheap_unmapped, Ge(0)), - Property(&envoy::admin::v3::Memory::pageheap_free, Ge(0)), - Property(&envoy::admin::v3::Memory::total_thread_cache, Ge(0)))); -} - -TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { - Http::ResponseHeaderMapImpl header_map; - Buffer::OwnedImpl response; - - // Setup a context that returns null cert details. - testing::NiceMock factory_context; - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config; - Extensions::TransportSockets::Tls::ClientContextConfigImpl cfg(config, factory_context); - Stats::IsolatedStoreImpl store; - Envoy::Ssl::ClientContextSharedPtr client_ctx( - server_.sslContextManager().createSslClientContext(store, cfg)); - - const std::string expected_empty_json = R"EOF({ - "certificates": [ - { - "ca_cert": [], - "cert_chain": [] - } - ] -} -)EOF"; - - // Validate that cert details are null and /certs handles it correctly. - EXPECT_EQ(nullptr, client_ctx->getCaCertInformation()); - EXPECT_TRUE(client_ctx->getCertChainInformation().empty()); - EXPECT_EQ(Http::Code::OK, getCallback("/certs", header_map, response)); - EXPECT_EQ(expected_empty_json, response.toString()); -} - TEST_P(AdminInstanceTest, ClustersJson) { Upstream::ClusterManager::ClusterInfoMap cluster_map; ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); @@ -633,119 +583,5 @@ fake_cluster::1.2.3.4:80::local_origin_success_rate::93.2 EXPECT_EQ(expected_text, response2.toString()); } -TEST_P(AdminInstanceTest, GetRequest) { - EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([] { - Server::CommandLineOptionsPtr command_line_options = - std::make_unique(); - command_line_options->set_restart_epoch(2); - command_line_options->set_service_cluster("cluster"); - return command_line_options; - })); - NiceMock initManager; - ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); - ON_CALL(server_.hot_restart_, version()).WillByDefault(Return("foo_version")); - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE); - EXPECT_EQ(server_info_proto.hot_restart_version(), "foo_version"); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); - } - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); - } - - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); - EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); - envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); - - // We only test that it parses as the proto and that some fields are correct, since - // values such as timestamps + Envoy version are tricky to test for. - TestUtility::loadFromJson(body, server_info_proto); - EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING); - EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); - EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); -} - -TEST_P(AdminInstanceTest, GetReadyRequest) { - NiceMock initManager; - ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); - EXPECT_EQ(Http::Code::OK, admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "LIVE\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); - } - - { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); - EXPECT_EQ(Http::Code::ServiceUnavailable, - admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "PRE_INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); - } - - Http::ResponseHeaderMapImpl response_headers; - std::string body; - - ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); - EXPECT_EQ(Http::Code::ServiceUnavailable, - admin_.request("/ready", "GET", response_headers, body)); - EXPECT_EQ(body, "INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); -} - -TEST_P(AdminInstanceTest, PostRequest) { - Http::ResponseHeaderMapImpl response_headers; - std::string body; - EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, - admin_.request("/healthcheck/fail", "POST", response_headers, body))); - EXPECT_EQ(body, "OK\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); -} - } // namespace Server } // namespace Envoy diff --git a/test/server/admin/server_info_handler_test.cc b/test/server/admin/server_info_handler_test.cc new file mode 100644 index 000000000000..86e8223cc74d --- /dev/null +++ b/test/server/admin/server_info_handler_test.cc @@ -0,0 +1,178 @@ +#include "envoy/admin/v3/memory.pb.h" + +#include "extensions/transport_sockets/tls/context_config_impl.h" + +#include "test/server/admin/admin_instance.h" +#include "test/test_common/logging.h" + +using testing::Ge; +using testing::HasSubstr; +using testing::Property; +using testing::Return; + +namespace Envoy { +namespace Server { + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + + // Setup a context that returns null cert details. + testing::NiceMock factory_context; + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext config; + Extensions::TransportSockets::Tls::ClientContextConfigImpl cfg(config, factory_context); + Stats::IsolatedStoreImpl store; + Envoy::Ssl::ClientContextSharedPtr client_ctx( + server_.sslContextManager().createSslClientContext(store, cfg)); + + const std::string expected_empty_json = R"EOF({ + "certificates": [ + { + "ca_cert": [], + "cert_chain": [] + } + ] +} +)EOF"; + + // Validate that cert details are null and /certs handles it correctly. + EXPECT_EQ(nullptr, client_ctx->getCaCertInformation()); + EXPECT_TRUE(client_ctx->getCertChainInformation().empty()); + EXPECT_EQ(Http::Code::OK, getCallback("/certs", header_map, response)); + EXPECT_EQ(expected_empty_json, response.toString()); +} + +TEST_P(AdminInstanceTest, Memory) { + Http::ResponseHeaderMapImpl header_map; + Buffer::OwnedImpl response; + EXPECT_EQ(Http::Code::OK, getCallback("/memory", header_map, response)); + const std::string output_json = response.toString(); + envoy::admin::v3::Memory output_proto; + TestUtility::loadFromJson(output_json, output_proto); + EXPECT_THAT(output_proto, AllOf(Property(&envoy::admin::v3::Memory::allocated, Ge(0)), + Property(&envoy::admin::v3::Memory::heap_size, Ge(0)), + Property(&envoy::admin::v3::Memory::pageheap_unmapped, Ge(0)), + Property(&envoy::admin::v3::Memory::pageheap_free, Ge(0)), + Property(&envoy::admin::v3::Memory::total_thread_cache, Ge(0)))); +} + +TEST_P(AdminInstanceTest, GetReadyRequest) { + NiceMock initManager; + ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); + + { + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "LIVE\n"); + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("text/plain")); + } + + { + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); + EXPECT_EQ(Http::Code::ServiceUnavailable, + admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "PRE_INITIALIZING\n"); + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("text/plain")); + } + + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); + EXPECT_EQ(Http::Code::ServiceUnavailable, + admin_.request("/ready", "GET", response_headers, body)); + EXPECT_EQ(body, "INITIALIZING\n"); + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("text/plain")); +} + +TEST_P(AdminInstanceTest, GetRequest) { + EXPECT_CALL(server_.options_, toCommandLineOptions()).WillRepeatedly(Invoke([] { + Server::CommandLineOptionsPtr command_line_options = + std::make_unique(); + command_line_options->set_restart_epoch(2); + command_line_options->set_service_cluster("cluster"); + return command_line_options; + })); + NiceMock initManager; + ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); + ON_CALL(server_.hot_restart_, version()).WillByDefault(Return("foo_version")); + + { + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::LIVE); + EXPECT_EQ(server_info_proto.hot_restart_version(), "foo_version"); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + } + + { + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::PRE_INITIALIZING); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); + } + + Http::ResponseHeaderMapImpl response_headers; + std::string body; + + ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); + EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); + envoy::admin::v3::ServerInfo server_info_proto; + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("application/json")); + + // We only test that it parses as the proto and that some fields are correct, since + // values such as timestamps + Envoy version are tricky to test for. + TestUtility::loadFromJson(body, server_info_proto); + EXPECT_EQ(server_info_proto.state(), envoy::admin::v3::ServerInfo::INITIALIZING); + EXPECT_EQ(server_info_proto.command_line_options().restart_epoch(), 2); + EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); +} + +TEST_P(AdminInstanceTest, PostRequest) { + Http::ResponseHeaderMapImpl response_headers; + std::string body; + EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, + admin_.request("/healthcheck/fail", "POST", response_headers, body))); + EXPECT_EQ(body, "OK\n"); + EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), + HasSubstr("text/plain")); +} + +} // namespace Server +} // namespace Envoy From e3f1506a0c6bbe7afa0980cca58611e105f3e350 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Wed, 20 May 2020 09:29:30 -0700 Subject: [PATCH 208/909] docs: break release notes into categories (#11217) This attempts to make it easier when upgrading to understand the changes in the release and how they will affect a given deployment. Related: #11211 Signed-off-by: Greg Greenway --- CONTRIBUTING.md | 5 +- docs/root/version_history/current.rst | 56 +++++++++++++------ tools/code_format/check_format.py | 19 ++++--- .../check_format/version_history/current.rst | 16 +++++- 4 files changed, 67 insertions(+), 29 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b89f748a9f4c..c383a6fe2f43 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -89,7 +89,10 @@ versioning guidelines: open it. * Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as well as [release notes](docs/root/version_history/current.rst). API changes should be documented - inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). + inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). If a change applies + to multiple sections of the release notes, it should be noted in the first (most important) section + that applies. For instance, a bug fix that introduces incompatible behavior should be noted in + `Incompatible Behavior Changes` but not in `Bug Fixes`. * All code comments and documentation are expected to have proper English grammar and punctuation. If you are not a fluent English speaker (or a bad writer ;-)) please let us know and we will try to find some help but there are no guarantees. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 220ddade58a2..6c1b461f2c5e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,15 +1,50 @@ 1.15.0 (Pending) ================ -Changes -------- -* access loggers: added GRPC_STATUS operator on logging format. +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. + Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. +* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. +* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. +* router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. +* router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* prometheus stats: fix the sort order of output lines to comply with the standard. +* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and + `envoy.reloadable_features.new_http2_connection_pool_behavior`. + +New Features +------------ + +* access loggers: added GRPC_STATUS operator on logging format. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * access loggers: file access logger config added :ref:`log_format `. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. -* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`version_text ` stat that reflects xDS version. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. @@ -25,15 +60,6 @@ Changes * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`stripping port from host header ` support. * http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. -* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. -* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. -* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. -* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. - Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. -* http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and - `envoy.reloadable_features.new_http2_connection_pool_behavior`. -* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. -* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. @@ -44,19 +70,15 @@ Changes in :ref:`client_features` field. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. -* prometheus stats: fix the sort order of output lines to comply with the standard. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. -* router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. -* router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. * router: more fine grained internal redirect configs are added to the :ref`internal_redirect_policy ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. -* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. Deprecated ---------- diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 000c4de01299..60ab0a9d0216 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -411,7 +411,7 @@ def hasInvalidAngleBracketDirectory(line): VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)") -VERSION_HISTORY_NEW_SECTION_REGEX = re.compile("^-----[-]+$") +VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") RELOADABLE_FLAG_REGEX = re.compile(".*(.)(envoy.reloadable_features.[^ ]*)\s.*") # Check for punctuation in a terminal ref clause, e.g. # :ref:`panic mode. ` @@ -419,8 +419,6 @@ def hasInvalidAngleBracketDirectory(line): def checkCurrentReleaseNotes(file_path, error_messages): - in_changes_section = False - first_word_of_prior_line = '' next_word_to_check = '' # first word after : prior_line = '' @@ -439,12 +437,15 @@ def endsWithPeriod(prior_line): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) - if VERSION_HISTORY_NEW_SECTION_REGEX.match(line): - # The second section is deprecations, which are not sorted. - if in_changes_section: + if VERSION_HISTORY_SECTION_NAME.match(line): + if line == "Deprecated": + # The deprecations section is last, and does not have enforced formatting. break - # If we see a section marker we are now in the changes section. - in_changes_section = True + + # Reset all parsing at the start of a section. + first_word_of_prior_line = '' + next_word_to_check = '' # first word after : + prior_line = '' # make sure flags are surrounded by ``s flag_match = RELOADABLE_FLAG_REGEX.match(line) @@ -452,7 +453,7 @@ def reportError(message): if not flag_match.groups()[0].startswith('`'): reportError("Flag `%s` should be enclosed in back ticks" % flag_match.groups()[1]) - if line.startswith("*"): + if line.startswith("* "): if not endsWithPeriod(prior_line): reportError("The following release note does not end with a '.'\n %s" % prior_line) diff --git a/tools/testdata/check_format/version_history/current.rst b/tools/testdata/check_format/version_history/current.rst index ef39a4202101..c2ecaddad3ed 100644 --- a/tools/testdata/check_format/version_history/current.rst +++ b/tools/testdata/check_format/version_history/current.rst @@ -1,13 +1,25 @@ 1.10.0 (pending) ================ -Changes -------- +Section One +----------------------------- +*Some doc text* * zzzzz: this should be alphabatized after a. * aaaaa: this should be alphabatized before z. * aaaaa: aaaa is before 'this'. * access log: Added should be added not Added. +Another Section +--------------- +*Doc string here* + +* server: changed server code. +* upstream: made a change. + Deprecated ---------- + +* no +* enforcement +* here From 629313832e8e8076ae47d595e3ea85ad6e77c785 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 20 May 2020 13:51:03 -0700 Subject: [PATCH 209/909] network: refactor generic socket code (#11269) Move generic socket code into separate files. Refactor that prepares move of socket apis from addresses to the socket class. Signed-off-by: Florin Coras --- include/envoy/network/BUILD | 12 +- include/envoy/network/listen_socket.h | 160 +--------------- include/envoy/network/socket.h | 179 ++++++++++++++++++ source/common/network/BUILD | 13 +- source/common/network/listen_socket_impl.h | 41 +--- source/common/network/socket_impl.h | 51 +++++ source/common/network/socket_option_impl.cc | 6 +- .../network/socket_option_factory_test.cc | 2 +- .../common/network/socket_option_impl_test.cc | 4 +- test/common/network/socket_option_test.h | 2 +- .../upstream/cluster_manager_impl_test.cc | 4 +- .../http/original_src/original_src_test.cc | 6 +- .../original_src/original_src_test.cc | 6 +- test/server/listener_manager_impl_test.cc | 2 +- 14 files changed, 271 insertions(+), 217 deletions(-) create mode 100644 include/envoy/network/socket.h create mode 100644 source/common/network/socket_impl.h diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 16a0945a2770..233a5a55566b 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -82,12 +82,22 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "socket_interface", + hdrs = ["socket.h"], + deps = [ + ":address_interface", + ":io_handle_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "listen_socket_interface", hdrs = ["listen_socket.h"], deps = [ - ":address_interface", ":io_handle_interface", + ":socket_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/network/listen_socket.h b/include/envoy/network/listen_socket.h index 89b8cdbebb23..c654cecdaf8a 100644 --- a/include/envoy/network/listen_socket.h +++ b/include/envoy/network/listen_socket.h @@ -9,6 +9,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/address.h" #include "envoy/network/io_handle.h" +#include "envoy/network/socket.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -16,165 +17,6 @@ namespace Envoy { namespace Network { -// SocketOptionName is an optional value that captures the setsockopt(2) -// arguments. The idea here is that if a socket option is not supported -// on a platform, we can make this the empty value, which allows us to -// avoid #ifdef proliferation. -struct SocketOptionName { - SocketOptionName() = default; - SocketOptionName(const SocketOptionName&) = default; - SocketOptionName(int level, int option, const std::string& name) - : value_(std::make_tuple(level, option, name)) {} - - int level() const { return std::get<0>(value_.value()); } - int option() const { return std::get<1>(value_.value()); } - const std::string& name() const { return std::get<2>(value_.value()); } - - bool has_value() const { return value_.has_value(); } - bool operator==(const SocketOptionName& rhs) const { return value_ == rhs.value_; } - -private: - absl::optional> value_; -}; - -// ENVOY_MAKE_SOCKET_OPTION_NAME is a helper macro to generate a -// SocketOptionName with a descriptive string name. -#define ENVOY_MAKE_SOCKET_OPTION_NAME(level, option) \ - Network::SocketOptionName(level, option, #level "/" #option) - -/** - * Base class for Sockets - */ -class Socket { -public: - virtual ~Socket() = default; - - /** - * @return the local address of the socket. - */ - virtual const Address::InstanceConstSharedPtr& localAddress() const PURE; - - /** - * Set the local address of the socket. On accepted sockets the local address defaults to the - * one at which the connection was received at, which is the same as the listener's address, if - * the listener is bound to a specific address. - * - * @param local_address the new local address. - */ - virtual void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE; - - /** - * @return IoHandle for the underlying connection - */ - virtual IoHandle& ioHandle() PURE; - - /** - * @return const IoHandle for the underlying connection - */ - virtual const IoHandle& ioHandle() const PURE; - - /** - * @return the type (stream or datagram) of the socket. - */ - virtual Address::SocketType socketType() const PURE; - - /** - * Close the underlying socket. - */ - virtual void close() PURE; - - /** - * Return true if close() hasn't been called. - */ - virtual bool isOpen() const PURE; - - /** - * Visitor class for setting socket options. - */ - class Option { - public: - virtual ~Option() = default; - - /** - * @param socket the socket on which to apply options. - * @param state the current state of the socket. Significant for options that can only be - * set for some particular state of the socket. - * @return true if succeeded, false otherwise. - */ - virtual bool setOption(Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) const PURE; - - /** - * @param vector of bytes to which the option should append hash key data that will be used - * to separate connections based on the option. Any data already in the key vector must - * not be modified. - */ - virtual void hashKey(std::vector& key) const PURE; - - /** - * Contains details about what this option applies to a socket. - */ - struct Details { - SocketOptionName name_; - std::string value_; ///< Binary string representation of an option's value. - - bool operator==(const Details& other) const { - return name_ == other.name_ && value_ == other.value_; - } - }; - - /** - * @param socket The socket for which we want to know the options that would be applied. - * @param state The state at which we would apply the options. - * @return What we would apply to the socket at the provided state. Empty if we'd apply nothing. - */ - virtual absl::optional
- getOptionDetails(const Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) const PURE; - }; - - using OptionConstSharedPtr = std::shared_ptr; - using Options = std::vector; - using OptionsSharedPtr = std::shared_ptr; - - static OptionsSharedPtr& appendOptions(OptionsSharedPtr& to, const OptionsSharedPtr& from) { - to->insert(to->end(), from->begin(), from->end()); - return to; - } - - static bool applyOptions(const OptionsSharedPtr& options, Socket& socket, - envoy::config::core::v3::SocketOption::SocketState state) { - if (options == nullptr) { - return true; - } - for (const auto& option : *options) { - if (!option->setOption(socket, state)) { - return false; - } - } - return true; - } - - /** - * Add a socket option visitor for later retrieval with options(). - */ - virtual void addOption(const OptionConstSharedPtr&) PURE; - - /** - * Add socket option visitors for later retrieval with options(). - */ - virtual void addOptions(const OptionsSharedPtr&) PURE; - - /** - * @return the socket options stored earlier with addOption() and addOptions() calls, if any. - */ - virtual const OptionsSharedPtr& options() const PURE; -}; - -using SocketPtr = std::unique_ptr; -using SocketSharedPtr = std::shared_ptr; -using SocketOptRef = absl::optional>; - /** * A socket passed to a connection. For server connections this represents the accepted socket, and * for client connections this represents the socket being connected to a remote address. diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h new file mode 100644 index 000000000000..6d911f4f111c --- /dev/null +++ b/include/envoy/network/socket.h @@ -0,0 +1,179 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/platform.h" +#include "envoy/common/pure.h" +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/address.h" +#include "envoy/network/io_handle.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Network { + +// SocketOptionName is an optional value that captures the setsockopt(2) +// arguments. The idea here is that if a socket option is not supported +// on a platform, we can make this the empty value, which allows us to +// avoid #ifdef proliferation. +struct SocketOptionName { + SocketOptionName() = default; + SocketOptionName(const SocketOptionName&) = default; + SocketOptionName(int level, int option, const std::string& name) + : value_(std::make_tuple(level, option, name)) {} + + int level() const { return std::get<0>(value_.value()); } + int option() const { return std::get<1>(value_.value()); } + const std::string& name() const { return std::get<2>(value_.value()); } + + bool hasValue() const { return value_.has_value(); } + bool operator==(const SocketOptionName& rhs) const { return value_ == rhs.value_; } + +private: + absl::optional> value_; +}; + +// ENVOY_MAKE_SOCKET_OPTION_NAME is a helper macro to generate a +// SocketOptionName with a descriptive string name. +#define ENVOY_MAKE_SOCKET_OPTION_NAME(level, option) \ + Network::SocketOptionName(level, option, #level "/" #option) + +/** + * Base class for Sockets + */ +class Socket { +public: + virtual ~Socket() = default; + + /** + * @return the local address of the socket. + */ + virtual const Address::InstanceConstSharedPtr& localAddress() const PURE; + + /** + * Set the local address of the socket. On accepted sockets the local address defaults to the + * one at which the connection was received at, which is the same as the listener's address, if + * the listener is bound to a specific address. + * + * @param local_address the new local address. + */ + virtual void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) PURE; + + /** + * @return IoHandle for the underlying connection + */ + virtual IoHandle& ioHandle() PURE; + + /** + * @return const IoHandle for the underlying connection + */ + virtual const IoHandle& ioHandle() const PURE; + + /** + * @return the type (stream or datagram) of the socket. + */ + virtual Address::SocketType socketType() const PURE; + + /** + * Close the underlying socket. + */ + virtual void close() PURE; + + /** + * Return true if close() hasn't been called. + */ + virtual bool isOpen() const PURE; + + /** + * Visitor class for setting socket options. + */ + class Option { + public: + virtual ~Option() = default; + + /** + * @param socket the socket on which to apply options. + * @param state the current state of the socket. Significant for options that can only be + * set for some particular state of the socket. + * @return true if succeeded, false otherwise. + */ + virtual bool setOption(Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) const PURE; + + /** + * @param vector of bytes to which the option should append hash key data that will be used + * to separate connections based on the option. Any data already in the key vector must + * not be modified. + */ + virtual void hashKey(std::vector& key) const PURE; + + /** + * Contains details about what this option applies to a socket. + */ + struct Details { + SocketOptionName name_; + std::string value_; ///< Binary string representation of an option's value. + + bool operator==(const Details& other) const { + return name_ == other.name_ && value_ == other.value_; + } + }; + + /** + * @param socket The socket for which we want to know the options that would be applied. + * @param state The state at which we would apply the options. + * @return What we would apply to the socket at the provided state. Empty if we'd apply nothing. + */ + virtual absl::optional
+ getOptionDetails(const Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) const PURE; + }; + + using OptionConstSharedPtr = std::shared_ptr; + using Options = std::vector; + using OptionsSharedPtr = std::shared_ptr; + + static OptionsSharedPtr& appendOptions(OptionsSharedPtr& to, const OptionsSharedPtr& from) { + to->insert(to->end(), from->begin(), from->end()); + return to; + } + + static bool applyOptions(const OptionsSharedPtr& options, Socket& socket, + envoy::config::core::v3::SocketOption::SocketState state) { + if (options == nullptr) { + return true; + } + for (const auto& option : *options) { + if (!option->setOption(socket, state)) { + return false; + } + } + return true; + } + + /** + * Add a socket option visitor for later retrieval with options(). + */ + virtual void addOption(const OptionConstSharedPtr&) PURE; + + /** + * Add socket option visitors for later retrieval with options(). + */ + virtual void addOptions(const OptionsSharedPtr&) PURE; + + /** + * @return the socket options stored earlier with addOption() and addOptions() calls, if any. + */ + virtual const OptionsSharedPtr& options() const PURE; +}; + +using SocketPtr = std::unique_ptr; +using SocketSharedPtr = std::shared_ptr; +using SocketOptRef = absl::optional>; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/BUILD b/source/common/network/BUILD index cecff719b046..5f56af1948e0 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -169,12 +169,22 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "socket_lib", + hdrs = ["socket_impl.h"], + deps = [ + ":address_lib", + "//include/envoy/network:socket_interface", + "//source/common/common:assert_lib", + ], +) + envoy_cc_library( name = "listen_socket_lib", srcs = ["listen_socket_impl.cc"], hdrs = ["listen_socket_impl.h"], deps = [ - ":address_lib", + ":socket_lib", ":utility_lib", "//include/envoy/network:listen_socket_interface", "//source/common/common:assert_lib", @@ -296,6 +306,7 @@ envoy_cc_library( hdrs = ["utility.h"], deps = [ ":address_lib", + ":socket_lib", "//include/envoy/network:connection_interface", "//include/envoy/network:listener_interface", "//include/envoy/stats:stats_interface", diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 0dba0680b1c8..3a5ed3366293 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -9,50 +9,11 @@ #include "envoy/network/listen_socket.h" #include "common/common/assert.h" +#include "common/network/socket_impl.h" namespace Envoy { namespace Network { -class SocketImpl : public virtual Socket { -public: - // Network::Socket - const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } - void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { - local_address_ = local_address; - } - - IoHandle& ioHandle() override { return *io_handle_; } - const IoHandle& ioHandle() const override { return *io_handle_; } - void close() override { - if (io_handle_->isOpen()) { - io_handle_->close(); - } - } - bool isOpen() const override { return io_handle_->isOpen(); } - void ensureOptions() { - if (!options_) { - options_ = std::make_shared>(); - } - } - void addOption(const OptionConstSharedPtr& option) override { - ensureOptions(); - options_->emplace_back(std::move(option)); - } - void addOptions(const OptionsSharedPtr& options) override { - ensureOptions(); - Network::Socket::appendOptions(options_, options); - } - const OptionsSharedPtr& options() const override { return options_; } - -protected: - SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) - : io_handle_(std::move(io_handle)), local_address_(local_address) {} - - const IoHandlePtr io_handle_; - Address::InstanceConstSharedPtr local_address_; - OptionsSharedPtr options_; -}; - class ListenSocketImpl : public SocketImpl { protected: ListenSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h new file mode 100644 index 000000000000..f41e9d74f3f8 --- /dev/null +++ b/source/common/network/socket_impl.h @@ -0,0 +1,51 @@ +#pragma once + +#include "envoy/network/socket.h" + +#include "common/common/assert.h" + +namespace Envoy { +namespace Network { + +class SocketImpl : public virtual Socket { +public: + // Network::Socket + const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } + void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { + local_address_ = local_address; + } + + IoHandle& ioHandle() override { return *io_handle_; } + const IoHandle& ioHandle() const override { return *io_handle_; } + void close() override { + if (io_handle_->isOpen()) { + io_handle_->close(); + } + } + bool isOpen() const override { return io_handle_->isOpen(); } + void ensureOptions() { + if (!options_) { + options_ = std::make_shared>(); + } + } + void addOption(const OptionConstSharedPtr& option) override { + ensureOptions(); + options_->emplace_back(std::move(option)); + } + void addOptions(const OptionsSharedPtr& options) override { + ensureOptions(); + Network::Socket::appendOptions(options_, options); + } + const OptionsSharedPtr& options() const override { return options_; } + +protected: + SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) + : io_handle_(std::move(io_handle)), local_address_(local_address) {} + + const IoHandlePtr io_handle_; + Address::InstanceConstSharedPtr local_address_; + OptionsSharedPtr options_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 016e97613f9d..2253fa8a19c9 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -14,7 +14,7 @@ namespace Network { bool SocketOptionImpl::setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const { if (in_state_ == state) { - if (!optname_.has_value()) { + if (!optname_.hasValue()) { ENVOY_LOG(warn, "Failed to set unsupported option on socket"); return false; } @@ -44,12 +44,12 @@ SocketOptionImpl::getOptionDetails(const Socket&, return absl::make_optional(std::move(info)); } -bool SocketOptionImpl::isSupported() const { return optname_.has_value(); } +bool SocketOptionImpl::isSupported() const { return optname_.hasValue(); } Api::SysCallIntResult SocketOptionImpl::setSocketOption(Socket& socket, const Network::SocketOptionName& optname, const void* value, size_t size) { - if (!optname.has_value()) { + if (!optname.hasValue()) { return {-1, ENOTSUP}; } diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index 4ec848f57c9c..3ff0a214021a 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -43,7 +43,7 @@ class SocketOptionFactoryTest : public testing::Test { }; #define CHECK_OPTION_SUPPORTED(option) \ - if (!option.has_value()) { \ + if (!option.hasValue()) { \ return; \ } diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index e0a6be83f703..d6e0e3932e1b 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -20,14 +20,14 @@ TEST_F(SocketOptionImplTest, HasName) { auto optname = ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_SNDBUF); // Verify that the constructor macro sets all the fields correctly. - EXPECT_TRUE(optname.has_value()); + EXPECT_TRUE(optname.hasValue()); EXPECT_EQ(SOL_SOCKET, optname.level()); EXPECT_EQ(SO_SNDBUF, optname.option()); EXPECT_EQ("SOL_SOCKET/SO_SNDBUF", optname.name()); // The default constructor should not have a value, i.e. should // be unsupported. - EXPECT_FALSE(SocketOptionName().has_value()); + EXPECT_FALSE(SocketOptionName().hasValue()); // If we fail to set an option, verify that the log message // contains the option name so the operator can debug. diff --git a/test/common/network/socket_option_test.h b/test/common/network/socket_option_test.h index aab277f97858..edc9a4e6a49a 100644 --- a/test/common/network/socket_option_test.h +++ b/test/common/network/socket_option_test.h @@ -74,7 +74,7 @@ class SocketOptionTest : public testing::Test { Socket::Option& socket_option, Network::SocketOptionName option_name, int option_val, const std::set& when) { for (auto state : when) { - if (option_name.has_value()) { + if (option_name.hasValue()) { EXPECT_CALL(os_sys_calls_, setsockopt_(_, option_name.level(), option_name.option(), _, sizeof(int))) .WillOnce(Invoke([option_val](os_fd_t, int, int, const void* optval, socklen_t) -> int { diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 82b158706c8d..be526c1a1f51 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -3032,7 +3032,7 @@ class SockoptsTest : public ClusterManagerImplTest { TestThreadsafeSingletonInjector os_calls(&os_sys_calls); bool expect_success = true; for (const auto& name_val : names_vals) { - if (!name_val.first.has_value()) { + if (!name_val.first.hasValue()) { expect_success = false; continue; } @@ -3283,7 +3283,7 @@ class TcpKeepaliveTest : public ClusterManagerImplTest { void expectSetsockoptSoKeepalive(absl::optional keepalive_probes, absl::optional keepalive_time, absl::optional keepalive_interval) { - if (!ENVOY_SOCKET_SO_KEEPALIVE.has_value()) { + if (!ENVOY_SOCKET_SO_KEEPALIVE.hasValue()) { EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce( Invoke([this](Network::Address::InstanceConstSharedPtr, diff --git a/test/extensions/filters/http/original_src/original_src_test.cc b/test/extensions/filters/http/original_src/original_src_test.cc index 33b3ba002f11..def891c4094c 100644 --- a/test/extensions/filters/http/original_src/original_src_test.cc +++ b/test/extensions/filters/http/original_src/original_src_test.cc @@ -134,7 +134,7 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) { } TEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) { - if (!ENVOY_SOCKET_IP_TRANSPARENT.has_value()) { + if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -153,7 +153,7 @@ TEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) { } TEST_F(OriginalSrcHttpTest, FilterAddsMarkOption) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -175,7 +175,7 @@ TEST_F(OriginalSrcHttpTest, FilterAddsMarkOption) { } TEST_F(OriginalSrcHttpTest, Mark0NotAdded) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } diff --git a/test/extensions/filters/listener/original_src/original_src_test.cc b/test/extensions/filters/listener/original_src/original_src_test.cc index b206bb722b66..0e9180012fc2 100644 --- a/test/extensions/filters/listener/original_src/original_src_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_test.cc @@ -119,7 +119,7 @@ TEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressBleachesPort) { } TEST_F(OriginalSrcTest, FilterAddsTransparentOption) { - if (!ENVOY_SOCKET_IP_TRANSPARENT.has_value()) { + if (!ENVOY_SOCKET_IP_TRANSPARENT.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -138,7 +138,7 @@ TEST_F(OriginalSrcTest, FilterAddsTransparentOption) { } TEST_F(OriginalSrcTest, FilterAddsMarkOption) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } @@ -160,7 +160,7 @@ TEST_F(OriginalSrcTest, FilterAddsMarkOption) { } TEST_F(OriginalSrcTest, Mark0NotAdded) { - if (!ENVOY_SOCKET_SO_MARK.has_value()) { + if (!ENVOY_SOCKET_SO_MARK.hasValue()) { // The option isn't supported on this platform. Just skip the test. return; } diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 40b9f45eac5d..3325a221684e 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -75,7 +75,7 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { const Network::SocketOptionName& expected_option, int expected_value, uint32_t expected_num_options = 1, ListenSocketCreationParams expected_creation_params = {true, true}) { - if (expected_option.has_value()) { + if (expected_option.hasValue()) { expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params); expectSetsockopt(os_sys_calls_, expected_option.level(), expected_option.option(), expected_value, expected_num_options); From 95c3b41ed811062e9cbd60539db6e50de15b7b93 Mon Sep 17 00:00:00 2001 From: Yuval Kohavi Date: Wed, 20 May 2020 16:51:30 -0400 Subject: [PATCH 210/909] hcm: bugfix - request hangs where 1st filter adds body, and 2nd filter waits for it (#11248) Fix a bug where a request without a body hangs where a filter adds a body, and a subsequent filter stops the iteration until decodeData. Signed-off-by: Yuval Kohavi --- source/common/http/conn_manager_impl.cc | 50 ++++++++++++------ source/common/http/conn_manager_impl.h | 6 +++ test/common/http/conn_manager_impl_test.cc | 48 +++++++++++++++++ test/integration/BUILD | 2 + test/integration/filters/BUILD | 30 +++++++++++ test/integration/filters/add_body_filter.cc | 49 +++++++++++++++++ .../wait_for_whole_request_and_response.cc | 52 +++++++++++++++++++ test/integration/protocol_integration_test.cc | 47 +++++++++++++++++ 8 files changed, 267 insertions(+), 17 deletions(-) create mode 100644 test/integration/filters/add_body_filter.cc create mode 100644 test/integration/filters/wait_for_whole_request_and_response.cc diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index ac3bd1157891..8d5dd8895ae0 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1057,6 +1057,19 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { } } +void ConnectionManagerImpl::ActiveStream::maybeContinueDecoding( + const std::list::iterator& continue_data_entry) { + if (continue_data_entry != decoder_filters_.end()) { + // We use the continueDecoding() code since it will correctly handle not calling + // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code + // expects it. + ASSERT(buffered_request_data_); + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; + (*continue_data_entry)->continueDecoding(); + } +} + void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream) { @@ -1096,6 +1109,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added body. + maybeContinueDecoding(continue_data_entry); return; } @@ -1106,15 +1120,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte } } - if (continue_data_entry != decoder_filters_.end()) { - // We use the continueDecoding() code since it will correctly handle not calling - // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code - // expects it. - ASSERT(buffered_request_data_); - (*continue_data_entry)->iteration_state_ = - ActiveStreamFilterBase::IterationState::StopSingleIteration; - (*continue_data_entry)->continueDecoding(); - } + maybeContinueDecoding(continue_data_entry); if (end_stream) { disarmRequestTimeout(); @@ -1577,6 +1583,19 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( response_encoder_->encode100ContinueHeaders(headers); } +void ConnectionManagerImpl::ActiveStream::maybeContinueEncoding( + const std::list::iterator& continue_data_entry) { + if (continue_data_entry != encoder_filters_.end()) { + // We use the continueEncoding() code since it will correctly handle not calling + // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code + // expects it. + ASSERT(buffered_response_data_); + (*continue_data_entry)->iteration_state_ = + ActiveStreamFilterBase::IterationState::StopSingleIteration; + (*continue_data_entry)->continueEncoding(); + } +} + void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream) { @@ -1612,6 +1631,9 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte } if (!continue_iteration) { + if (!(*entry)->end_stream_) { + maybeContinueEncoding(continue_data_entry); + } return; } @@ -1626,14 +1648,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte (end_stream && continue_data_entry == encoder_filters_.end()); encodeHeadersInternal(headers, modified_end_stream); - if (continue_data_entry != encoder_filters_.end() && !modified_end_stream) { - // We use the continueEncoding() code since it will correctly handle not calling - // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code - // expects it. - ASSERT(buffered_response_data_); - (*continue_data_entry)->iteration_state_ = - ActiveStreamFilterBase::IterationState::StopSingleIteration; - (*continue_data_entry)->continueEncoding(); + if (!modified_end_stream) { + maybeContinueEncoding(continue_data_entry); } } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index d4c0b1744c9d..bf8f9d153053 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -475,6 +475,10 @@ class ConnectionManagerImpl : Logger::Loggable, void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming); RequestTrailerMap& addDecodedTrailers(); MetadataMapVector& addDecodedMetadata(); + // Helper function for the case where we have a header only request, but a filter adds a body + // to it. + void maybeContinueDecoding( + const std::list::iterator& maybe_continue_data_entry); void decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream); // Sends data through decoding filter chains. filter_iteration_start_state indicates which @@ -496,6 +500,8 @@ class ConnectionManagerImpl : Logger::Loggable, // As with most of the encode functions, this runs encodeHeaders on various // filters before calling encodeHeadersInternal which does final header munging and passes the // headers to the encoder. + void maybeContinueEncoding( + const std::list::iterator& maybe_continue_data_entry); void encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream); // Sends data through encoding filter chains. filter_iteration_start_state indicates which diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 7b2f3c92b825..278aba456b50 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -3237,6 +3237,54 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); } +TEST_F(HttpConnectionManagerImplTest, + FilterThatWaitsForBodyCanBeCalledAfterFilterThatAddsBodyEvenIfItIsNotLast) { + InSequence s; + setup(false, ""); + + NiceMock encoder; + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + // 3 filters: + // 1st filter adds a body + // 2nd filter waits for the body + // 3rd filter simulates router filter. + setupFilterChain(3, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { + Buffer::OwnedImpl body("body"); + decoder_filters_[0]->callbacks_->addDecodedData(body, false); + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, false)) + .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::StopIteration; + })); + EXPECT_CALL(*decoder_filters_[1], decodeData(_, true)) + .WillOnce(Invoke( + [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; })); + EXPECT_CALL(*decoder_filters_[1], decodeComplete()); + EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, false)) + .WillOnce(Invoke([](RequestHeaderMap&, bool) -> FilterHeadersStatus { + return FilterHeadersStatus::Continue; + })); + EXPECT_CALL(*decoder_filters_[2], decodeData(_, true)) + .WillOnce(Invoke( + [](Buffer::Instance&, bool) -> FilterDataStatus { return FilterDataStatus::Continue; })); + EXPECT_CALL(*decoder_filters_[2], decodeComplete()); + + Buffer::OwnedImpl fake_input; + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, DrainClose) { setup(true, ""); diff --git a/test/integration/BUILD b/test/integration/BUILD index c9e97596c8bb..6d25cdc3b080 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -458,6 +458,7 @@ envoy_cc_test_library( "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", "//test/common/upstream:utility_lib", + "//test/integration/filters:add_body_filter_config_lib", "//test/integration/filters:add_trailers_filter_config_lib", "//test/integration/filters:call_decodedata_once_filter_config_lib", "//test/integration/filters:decode_headers_return_stop_all_filter_config_lib", @@ -466,6 +467,7 @@ envoy_cc_test_library( "//test/integration/filters:modify_buffer_filter_config_lib", "//test/integration/filters:passthrough_filter_config_lib", "//test/integration/filters:pause_filter_lib", + "//test/integration/filters:wait_for_whole_request_and_response_config_lib", "//test/test_common:registry_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index b26d5ffb1835..1f320a04a61c 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -8,6 +8,36 @@ load( envoy_package() +envoy_cc_test_library( + name = "add_body_filter_config_lib", + srcs = [ + "add_body_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + +envoy_cc_test_library( + name = "wait_for_whole_request_and_response_config_lib", + srcs = [ + "wait_for_whole_request_and_response.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "add_trailers_filter_config_lib", srcs = [ diff --git a/test/integration/filters/add_body_filter.cc b/test/integration/filters/add_body_filter.cc new file mode 100644 index 000000000000..c319f0f5f729 --- /dev/null +++ b/test/integration/filters/add_body_filter.cc @@ -0,0 +1,49 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "common/buffer/buffer_impl.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +// A test filter that inserts body to a header only request/response. +class AddBodyStreamFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "add-body-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) override { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + decoder_callbacks_->addDecodedData(body, false); + } + + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + encoder_callbacks_->addEncodedData(body, false); + } + + return Http::FilterHeadersStatus::Continue; + } +}; + +constexpr char AddBodyStreamFilter::name[]; + +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + encoder_register_; +} // namespace Envoy diff --git a/test/integration/filters/wait_for_whole_request_and_response.cc b/test/integration/filters/wait_for_whole_request_and_response.cc new file mode 100644 index 000000000000..c9fd34607325 --- /dev/null +++ b/test/integration/filters/wait_for_whole_request_and_response.cc @@ -0,0 +1,52 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +// A test filter that waits for the request/response to finish before continuing. +class WaitForWholeRequestAndResponseStreamFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "wait-for-whole-request-and-response-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override { + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + return Http::FilterHeadersStatus::StopIteration; + } + Http::FilterDataStatus decodeData(Buffer::Instance&, bool end_stream) override { + if (end_stream) { + return Http::FilterDataStatus::Continue; + } + return Http::FilterDataStatus::StopIterationAndBuffer; + } + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override { + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + return Http::FilterHeadersStatus::StopIteration; + } + + Http::FilterDataStatus encodeData(Buffer::Instance&, bool end_stream) override { + if (end_stream) { + return Http::FilterDataStatus::Continue; + } + return Http::FilterDataStatus::StopIterationAndBuffer; + } +}; + +constexpr char WaitForWholeRequestAndResponseStreamFilter::name[]; + +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + encoder_register_; +} // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 207e41b02722..14bc8b0e9142 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -199,6 +199,53 @@ name: health_check EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } +// Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 +TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { + // filters are prepended, so add them in reverse order + config_helper_.addFilter(R"EOF( + name: wait-for-whole-request-and-response-filter + )EOF"); + config_helper_.addFilter(R"EOF( + name: add-body-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + EXPECT_EQ("body", upstream_request_->body().toString()); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + // encode data, as we have a separate test for the transforming header only response. + upstream_request_->encodeData(128, true); + response->waitForEndStream(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); +} + +TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { + // filters are prepended, so add them in reverse order + config_helper_.addFilter(R"EOF( + name: add-body-filter + )EOF"); + config_helper_.addFilter(R"EOF( + name: wait-for-whole-request-and-response-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 128); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + response->waitForEndStream(); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("body", response->body()); +} + TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { config_helper_.addFilter(R"EOF( name: add-trailers-filter From 72b930b3a148abbb2f5efa1e6ad04d7ea210830c Mon Sep 17 00:00:00 2001 From: Spencer Lewis Date: Wed, 20 May 2020 16:52:29 -0400 Subject: [PATCH 211/909] router: add two header formatter operators (#11242) This commit updates the router's header formatter to support the RESPONSE_FLAGS and RESPONSE_CODE_DETAILS operators. Signed-off-by: Spencer Lewis --- .../root/configuration/http/http_conn_man/headers.rst | 8 ++++++++ docs/root/version_history/current.rst | 2 ++ source/common/router/header_formatter.cc | 11 +++++++++++ test/common/router/header_formatter_test.cc | 8 ++++++++ 4 files changed, 29 insertions(+) diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 48f681d802c2..345657d0ee93 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -644,3 +644,11 @@ Supported variable names are: key: "x-request-start" value: "%START_TIME(%s.%3f)%" append: true + +%RESPONSE_FLAGS% + Additional details about the response or connection, if any. Possible values and their meanings + are listed in the access log formatter :ref:`documentation`. + +%RESPONSE_CODE_DETAILS% + Response code details provides additional information about the HTTP response code, such as + who set it (the upstream or envoy) and why. \ No newline at end of file diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6c1b461f2c5e..6c29d63ce8b2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -73,6 +73,8 @@ New Features * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. +* router: add support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters + `. * router: more fine grained internal redirect configs are added to the :ref`internal_redirect_policy ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 88ac5741b648..cb74a7aaee90 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -344,6 +344,17 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam } else if (field_name == "HOSTNAME") { std::string hostname = Envoy::AccessLog::AccessLogFormatUtils::getHostname(); field_extractor_ = [hostname](const StreamInfo::StreamInfo&) { return hostname; }; + } else if (field_name == "RESPONSE_FLAGS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { + return StreamInfo::ResponseFlagUtils::toShortString(stream_info); + }; + } else if (field_name == "RESPONSE_CODE_DETAILS") { + field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) -> std::string { + if (stream_info.responseCodeDetails().has_value()) { + return stream_info.responseCodeDetails().value(); + } + return ""; + }; } else { throw EnvoyException(fmt::format("field '{}' not supported as custom header", field_name)); } diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 81044fddb558..88586fcbbe9d 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -750,6 +750,8 @@ TEST(HeaderParserTest, TestParseInternal) { {"%PER_REQUEST_STATE(testing)%", {"test_value"}, {}}, {"%REQ(x-request-id)%", {"123"}, {}}, {"%START_TIME%", {"2018-04-03T23:06:09.123Z"}, {}}, + {"%RESPONSE_FLAGS%", {"LR"}, {}}, + {"%RESPONSE_CODE_DETAILS%", {"via_upstream"}, {}}, // Unescaped % {"%", {}, {"Invalid header configuration. Un-escaped % at position 0"}}, @@ -875,6 +877,12 @@ TEST(HeaderParserTest, TestParseInternal) { ON_CALL(stream_info, filterState()).WillByDefault(ReturnRef(filter_state)); ON_CALL(Const(stream_info), filterState()).WillByDefault(ReturnRef(*filter_state)); + ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset)) + .WillByDefault(Return(true)); + + absl::optional rc_details{"via_upstream"}; + ON_CALL(stream_info, responseCodeDetails()).WillByDefault(ReturnRef(rc_details)); + for (const auto& test_case : test_cases) { Protobuf::RepeatedPtrField to_add; envoy::config::core::v3::HeaderValueOption* header = to_add.Add(); From 77cca6b40775fbce1b93eb3ae6189b6e9dede208 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 20 May 2020 16:57:23 -0400 Subject: [PATCH 212/909] connection: adding watermarks to the read buffer. (#11170) Fixing an issue where every time a connection was readDisabled/readEnabled it would read from the socket, even if the buffer already contained sufficient data it should have triggered push back. Signed-off-by: Alyssa Wilk --- source/common/buffer/watermark_buffer.h | 3 + source/common/network/connection_impl.cc | 77 ++++++++--- source/common/network/connection_impl.h | 19 ++- test/common/network/connection_impl_test.cc | 134 +++++++++++++++++++- 4 files changed, 206 insertions(+), 27 deletions(-) diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 827d1a51bccf..5bc111a4e1e3 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -38,6 +38,9 @@ class WatermarkBuffer : public OwnedImpl { void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); } void setWatermarks(uint32_t low_watermark, uint32_t high_watermark); uint32_t highWatermark() const { return high_watermark_; } + // Returns true if the high watermark callbacks have been called more recently + // than the low watermark callbacks. + bool highWatermarkTriggered() const { return above_high_watermark_called_; } private: void checkHighWatermark(); diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index f44aee154ead..12961773a7ee 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -48,6 +48,8 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt : ConnectionImplBase(dispatcher, next_global_id_++), transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), stream_info_(stream_info), filter_manager_(*this), + read_buffer_([this]() -> void { this->onReadBufferLowWatermark(); }, + [this]() -> void { this->onReadBufferHighWatermark(); }), write_buffer_(dispatcher.getWatermarkFactory().create( [this]() -> void { this->onWriteBufferLowWatermark(); }, [this]() -> void { this->onWriteBufferHighWatermark(); })), @@ -186,8 +188,13 @@ Connection::State ConnectionImpl::state() const { void ConnectionImpl::closeConnectionImmediately() { closeSocket(ConnectionEvent::LocalClose); } +bool ConnectionImpl::consumerWantsToRead() { + return read_disable_count_ == 0 || + (read_disable_count_ == 1 && read_buffer_.highWatermarkTriggered()); +} + void ConnectionImpl::closeSocket(ConnectionEvent close_type) { - if (!ioHandle().isOpen()) { + if (!ConnectionImpl::ioHandle().isOpen()) { return; } @@ -216,7 +223,8 @@ void ConnectionImpl::closeSocket(ConnectionEvent close_type) { socket_->close(); - raiseEvent(close_type); + // Call the base class directly as close() is called in the destructor. + ConnectionImpl::raiseEvent(close_type); } void ConnectionImpl::noDelay(bool enable) { @@ -268,7 +276,7 @@ void ConnectionImpl::noDelay(bool enable) { } void ConnectionImpl::onRead(uint64_t read_buffer_size) { - if (read_disable_count_ != 0 || inDelayedClose()) { + if (inDelayedClose() || !consumerWantsToRead()) { return; } ASSERT(ioHandle().isOpen()); @@ -311,8 +319,8 @@ void ConnectionImpl::readDisable(bool disable) { ASSERT(state() == State::Open); ASSERT(file_event_ != nullptr); - ENVOY_CONN_LOG(trace, "readDisable: enabled={} disable_count={} state={}", *this, - read_disable_count_, disable, static_cast(state())); + ENVOY_CONN_LOG(trace, "readDisable: disable={} disable_count={} state={} buffer_length={}", *this, + disable, read_disable_count_, static_cast(state()), read_buffer_.length()); // When we disable reads, we still allow for early close notifications (the equivalent of // EPOLLRDHUP for an epoll backend). For backends that support it, this allows us to apply @@ -341,25 +349,26 @@ void ConnectionImpl::readDisable(bool disable) { file_event_->setEnabled(Event::FileReadyType::Write); } } else { + ASSERT(read_disable_count_ != 0); --read_disable_count_; - if (read_disable_count_ != 0) { - // The socket should stay disabled. - return; - } if (state() != State::Open || file_event_ == nullptr) { // If readDisable is called on a closed connection, do not crash. return; } - // We never ask for both early close and read at the same time. If we are reading, we want to - // consume all available data. - file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); - // If the connection has data buffered there's no guarantee there's also data in the kernel - // which will kick off the filter chain. Instead fake an event to make sure the buffered data - // gets processed regardless and ensure that we dispatch it via onRead. - if (read_buffer_.length() > 0) { + if (read_disable_count_ == 0) { + // We never ask for both early close and read at the same time. If we are reading, we want to + // consume all available data. + file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); + } + + if (consumerWantsToRead() && read_buffer_.length() > 0) { + // If the connection has data buffered there's no guarantee there's also data in the kernel + // which will kick off the filter chain. Alternately if the read buffer has data the fd could + // be read disabled. To handle these cases, fake an event to make sure the buffered data gets + // processed regardless and ensure that we dispatch it via onRead. dispatch_buffered_data_ = true; - file_event_->activate(Event::FileReadyType::Read); + setReadBufferReady(); } } } @@ -465,6 +474,21 @@ void ConnectionImpl::setBufferLimits(uint32_t limit) { // would result in respecting the exact buffer limit. if (limit > 0) { static_cast(write_buffer_.get())->setWatermarks(limit + 1); + read_buffer_.setWatermarks(limit + 1); + } +} + +void ConnectionImpl::onReadBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "onBelowReadBufferLowWatermark", *this); + if (state() == State::Open) { + readDisable(false); + } +} + +void ConnectionImpl::onReadBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "onAboveReadBufferHighWatermark", *this); + if (state() == State::Open) { + readDisable(true); } } @@ -525,10 +549,24 @@ void ConnectionImpl::onFileEvent(uint32_t events) { } void ConnectionImpl::onReadReady() { - ENVOY_CONN_LOG(trace, "read ready", *this); + ENVOY_CONN_LOG(trace, "read ready. dispatch_buffered_data={}", *this, dispatch_buffered_data_); + const bool latched_dispatch_buffered_data = dispatch_buffered_data_; + dispatch_buffered_data_ = false; ASSERT(!connecting_); + // We get here while read disabled in two ways. + // 1) There was a call to setReadBufferReady(), for example if a raw buffer socket ceded due to + // shouldDrainReadBuffer(). In this case we defer the event until the socket is read enabled. + // 2) The consumer of connection data called readDisable(true), and instead of reading from the + // socket we simply need to dispatch already read data. + if (read_disable_count_ != 0) { + if (latched_dispatch_buffered_data && consumerWantsToRead()) { + onRead(read_buffer_.length()); + } + return; + } + IoResult result = transport_socket_->doRead(read_buffer_); uint64_t new_buffer_size = read_buffer_.length(); updateReadBufferStats(result.bytes_processed_, new_buffer_size); @@ -542,13 +580,12 @@ void ConnectionImpl::onReadReady() { read_end_stream_ |= result.end_stream_read_; if (result.bytes_processed_ != 0 || result.end_stream_read_ || - (dispatch_buffered_data_ && read_buffer_.length() > 0)) { + (latched_dispatch_buffered_data && read_buffer_.length() > 0)) { // Skip onRead if no bytes were processed unless we explicitly want to force onRead for // buffered data. For instance, skip onRead if the connection was closed without producing // more data. onRead(new_buffer_size); } - dispatch_buffered_data_ = false; // The read callback may have already closed the connection. if (result.action_ == PostIoAction::Close || bothSidesHalfClosed()) { diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 6e8c1eb65518..b464e2af96d1 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -102,10 +102,10 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback } // Network::TransportSocketCallbacks - IoHandle& ioHandle() override { return socket_->ioHandle(); } + IoHandle& ioHandle() final { return socket_->ioHandle(); } const IoHandle& ioHandle() const override { return socket_->ioHandle(); } Connection& connection() override { return *this; } - void raiseEvent(ConnectionEvent event) override; + void raiseEvent(ConnectionEvent event) final; // Should the read buffer be drained? bool shouldDrainReadBuffer() override { return read_buffer_limit_ > 0 && read_buffer_.length() >= read_buffer_limit_; @@ -122,11 +122,22 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback static uint64_t nextGlobalIdForTest() { return next_global_id_; } protected: + // A convenience function which returns true if + // 1) The read disable count is zero or + // 2) The read disable count is one due to the read buffer being overrun. + // In either case the consumer of the data would like to read from the buffer. + // If the read count is greater than one, or equal to one when the buffer is + // not overrun, then the consumer of the data has called readDisable, and does + // not want to read. + bool consumerWantsToRead(); + // Network::ConnectionImplBase void closeConnectionImmediately() override; void closeSocket(ConnectionEvent close_type); + void onReadBufferLowWatermark(); + void onReadBufferHighWatermark(); void onWriteBufferLowWatermark(); void onWriteBufferHighWatermark(); @@ -135,7 +146,9 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback StreamInfo::StreamInfo& stream_info_; FilterManagerImpl filter_manager_; - Buffer::OwnedImpl read_buffer_; + // Ensure that if the consumer of the data from this connection isn't + // consuming, that the connection eventually stops reading from the wire. + Buffer::WatermarkBuffer read_buffer_; // This must be a WatermarkBuffer, but as it is created by a factory the ConnectionImpl only has // a generic pointer. // It MUST be defined after the filter_manager_ as some filters may have callbacks that diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index fbeb06519b09..6ef055b9fab3 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -93,6 +93,12 @@ TEST_P(ConnectionImplDeathTest, BadFd) { ".*assert failure: SOCKET_VALID\\(ConnectionImpl::ioHandle\\(\\)\\.fd\\(\\)\\).*"); } +class TestClientConnectionImpl : public Network::ClientConnectionImpl { +public: + using ClientConnectionImpl::ClientConnectionImpl; + Buffer::WatermarkBuffer& readBuffer() { return read_buffer_; } +}; + class ConnectionImplTest : public testing::TestWithParam { protected: ConnectionImplTest() : api_(Api::createApiForTest(time_system_)), stream_info_(time_system_) {} @@ -104,9 +110,9 @@ class ConnectionImplTest : public testing::TestWithParam { socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); - client_connection_ = dispatcher_->createClientConnection( - socket_->localAddress(), source_address_, Network::Test::createRawBufferSocket(), - socket_options_); + client_connection_ = std::make_unique( + *dispatcher_, socket_->localAddress(), source_address_, + Network::Test::createRawBufferSocket(), socket_options_); client_connection_->addConnectionCallbacks(client_callbacks_); EXPECT_EQ(nullptr, client_connection_->ssl()); const Network::ClientConnection& const_connection = *client_connection_; @@ -215,6 +221,9 @@ class ConnectionImplTest : public testing::TestWithParam { return ConnectionMocks{std::move(dispatcher), timer, std::move(transport_socket), file_event, &file_ready_cb_}; } + Network::TestClientConnectionImpl* testClientConnection() { + return dynamic_cast(client_connection_.get()); + } Event::FileReadyCb file_ready_cb_; Event::SimulatedTimeSystem time_system_; @@ -742,7 +751,7 @@ TEST_P(ConnectionImplTest, HalfCloseNoEarlyCloseDetection) { } // Test that as watermark levels are changed, the appropriate callbacks are triggered. -TEST_P(ConnectionImplTest, Watermarks) { +TEST_P(ConnectionImplTest, WriteWatermarks) { useMockBuffer(); setUpBasicConnection(); @@ -791,6 +800,123 @@ TEST_P(ConnectionImplTest, Watermarks) { disconnect(false); } +// Test that as watermark levels are changed, the appropriate callbacks are triggered. +TEST_P(ConnectionImplTest, ReadWatermarks) { + + setUpBasicConnection(); + client_connection_->setBufferLimits(2); + std::shared_ptr client_read_filter(new NiceMock()); + client_connection_->addReadFilter(client_read_filter); + connect(); + + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_TRUE(client_connection_->readEnabled()); + // Add 4 bytes to the buffer and verify the connection becomes read disabled. + { + Buffer::OwnedImpl buffer("data"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + } + + // Drain 3 bytes from the buffer. This bring sit below the low watermark, and + // read enables, as well as triggering a kick for the remaining byte. + { + testClientConnection()->readBuffer().drain(3); + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_TRUE(client_connection_->readEnabled()); + + EXPECT_CALL(*client_read_filter, onData(_, false)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + // Add 3 bytes to the buffer and verify the connection becomes read disabled + // again. + { + Buffer::OwnedImpl buffer("bye"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + } + + // Now have the consumer read disable. + // This time when the buffer is drained, there will be no kick as the consumer + // does not want to read. + { + client_connection_->readDisable(true); + testClientConnection()->readBuffer().drain(3); + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + + EXPECT_CALL(*client_read_filter, onData(_, false)).Times(0); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + // Now read enable again. + // Inside the onData call, readDisable and readEnable. This should trigger + // another kick on the next dispatcher loop, so onData gets called twice. + { + client_connection_->readDisable(false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .Times(2) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + client_connection_->readDisable(true); + client_connection_->readDisable(false); + return FilterStatus::StopIteration; + })) + .WillRepeatedly(Return(FilterStatus::StopIteration)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + // Test the same logic for dispatched_buffered_data from the + // onReadReady() (read_disable_count_ != 0) path. + { + // Fill the buffer and verify the socket is read disabled. + Buffer::OwnedImpl buffer("bye"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(_, false)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); + EXPECT_FALSE(client_connection_->readEnabled()); + + // Read disable and read enable, to set dispatch_buffered_data_ true. + client_connection_->readDisable(true); + client_connection_->readDisable(false); + // Now event loop. This hits the early on-Read path. As above, read + // disable and read enable from inside the stack of onData, to ensure that + // dispatch_buffered_data_ works correctly. + EXPECT_CALL(*client_read_filter, onData(_, false)) + .Times(2) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { + client_connection_->readDisable(true); + client_connection_->readDisable(false); + return FilterStatus::StopIteration; + })) + .WillRepeatedly(Return(FilterStatus::StopIteration)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + disconnect(true); +} + // Write some data to the connection. It will automatically attempt to flush // it to the upstream file descriptor via a write() call to buffer_, which is // configured to succeed and accept all bytes read. From d64fc0d0fa40f4953fceb50c17465a0c4d8bef1d Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 20 May 2020 16:57:47 -0400 Subject: [PATCH 213/909] http: adding a new accessor (#11265) Adding an accessor headers.getHeaderNameValue() to replace headers.HeaderName().value().getStringView() in the common case where we really just want to look at the header value. Signed-off-by: Alyssa Wilk --- include/envoy/http/header_map.h | 8 +++- source/common/http/async_client_impl.cc | 2 +- source/common/http/conn_manager_impl.cc | 29 +++++++------- source/common/http/header_utility.cc | 5 +-- source/common/http/http1/codec_impl.cc | 11 ++---- source/common/http/http1/conn_pool.cc | 15 +++---- source/common/http/http2/codec_impl.cc | 2 +- source/common/http/path_utility.cc | 4 +- .../http/request_id_extension_uuid_impl.cc | 8 ++-- source/common/http/user_agent.cc | 8 ++-- source/common/http/utility.cc | 26 +++++-------- test/common/http/conn_manager_impl_test.cc | 39 +++++++++++++++---- 12 files changed, 85 insertions(+), 72 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 72257770d09e..789ad6121d5b 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -385,7 +385,13 @@ class HeaderEntry { virtual void setReference##name(absl::string_view value) PURE; \ virtual void set##name(absl::string_view value) PURE; \ virtual void set##name(uint64_t value) PURE; \ - virtual size_t remove##name() PURE; + virtual size_t remove##name() PURE; \ + absl::string_view get##name##Value() const { \ + if (name() != nullptr) { \ + return name()->value().getStringView(); \ + } \ + return ""; \ + } /** * Wraps a set of HTTP headers. diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 088d93fe43bf..6a55567bc8db 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -132,7 +132,7 @@ void AsyncStreamImpl::encodeTrailers(ResponseTrailerMapPtr&& trailers) { } void AsyncStreamImpl::sendHeaders(RequestHeaderMap& headers, bool end_stream) { - if (Http::Headers::get().MethodValues.Head == headers.Method()->value().getStringView()) { + if (Http::Headers::get().MethodValues.Head == headers.getMethodValue()) { is_head_request_ = true; } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 8d5dd8895ae0..c20b2690cd74 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -781,8 +781,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he state_.saw_connection_close_ = HeaderUtility::shouldCloseConnection(protocol, *request_headers_); } - if (request_headers_->Method() && Http::Headers::get().MethodValues.Head == - request_headers_->Method()->value().getStringView()) { + if (Http::Headers::get().MethodValues.Head == request_headers_->getMethodValue()) { state_.is_head_request_ = true; } @@ -857,8 +856,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // HTTP/1.0 defaults to single-use connections. Make sure the connection // will be closed unless Keep-Alive is present. state_.saw_connection_close_ = true; - if (request_headers_->Connection() && - absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), + if (absl::EqualsIgnoreCase(request_headers_->getConnectionValue(), Http::Headers::get().ConnectionValues.KeepAlive)) { state_.saw_connection_close_ = false; } @@ -882,11 +880,12 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // Verify header sanity checks which should have been performed by the codec. ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false); - // Check for the existence of the :path header for non-CONNECT requests. We expect the codec to - // have broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does - // this when the allow_absolute_url flag is enabled on the HCM. - if ((!HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path()) || - (request_headers_->Path() && request_headers_->Path()->value().getStringView().empty())) { + // Check for the existence of the :path header for non-CONNECT requests, or present-but-empty + // :path header for CONNECT requests. We expect the codec to have broken the path into pieces if + // applicable. NOTE: Currently the HTTP/1.1 codec only does this when the allow_absolute_url flag + // is enabled on the HCM. + if ((!HeaderUtility::isConnect(*request_headers_) || request_headers_->Path()) && + request_headers_->getPathValue().empty()) { sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingPath); @@ -894,7 +893,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he } // Currently we only support relative paths at the application layer. - if (request_headers_->Path() && request_headers_->Path()->value().getStringView()[0] != '/') { + if (!request_headers_->getPathValue().empty() && request_headers_->getPathValue()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, state_.is_head_request_, absl::nullopt, @@ -914,8 +913,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he ConnectionManagerUtility::maybeNormalizeHost(*request_headers_, connection_manager_.config_, localPort()); - if (!fixed_connection_close && protocol == Protocol::Http11 && request_headers_->Connection() && - absl::EqualsIgnoreCase(request_headers_->Connection()->value().getStringView(), + if (!fixed_connection_close && protocol == Protocol::Http11 && + absl::EqualsIgnoreCase(request_headers_->getConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } @@ -923,8 +922,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // since it is supported by http-parser the underlying parser for http // requests. if (!fixed_connection_close && protocol < Protocol::Http2 && !state_.saw_connection_close_ && - request_headers_->ProxyConnection() && - absl::EqualsIgnoreCase(request_headers_->ProxyConnection()->value().getStringView(), + absl::EqualsIgnoreCase(request_headers_->getProxyConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } @@ -1498,8 +1496,7 @@ void ConnectionManagerImpl::ActiveStream::requestRouteConfigUpdate( Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { ASSERT(!request_headers_->Host()->value().empty()); - const auto& host_header = - absl::AsciiStrToLower(request_headers_->Host()->value().getStringView()); + const auto& host_header = absl::AsciiStrToLower(request_headers_->getHostValue()); route_config_update_requester_->requestRouteConfigUpdate(host_header, thread_local_dispatcher, std::move(route_config_updated_cb)); } diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 00f089cd10c1..b180ad3ead3b 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -190,13 +190,12 @@ bool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) { void HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listener_port) { - if (headers.Method() && - headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect) { + if (headers.getMethodValue() == Http::Headers::get().MethodValues.Connect) { // According to RFC 2817 Connect method should have port part in host header. // In this case we won't strip it even if configured to do so. return; } - const auto original_host = headers.Host()->value().getStringView(); + const absl::string_view original_host = headers.getHostValue(); const absl::string_view::size_type port_start = original_host.rfind(':'); if (port_start == absl::string_view::npos) { return; diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 2ab8d5be3a2d..70f40af8d742 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -638,16 +638,14 @@ int ConnectionImpl::onHeadersCompleteBase() { if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { // Ignore h2c upgrade requests until we support them. // See https://github.com/envoyproxy/envoy/issues/7161 for details. - if (request_or_response_headers.Upgrade() && - absl::EqualsIgnoreCase(request_or_response_headers.Upgrade()->value().getStringView(), + if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), Http::Headers::get().UpgradeValues.H2c)) { ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); request_or_response_headers.removeUpgrade(); if (request_or_response_headers.Connection()) { const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); std::string new_value = StringUtil::removeTokens( - request_or_response_headers.Connection()->value().getStringView(), ",", - tokens_to_remove, ","); + request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); if (new_value.empty()) { request_or_response_headers.removeConnection(); } else { @@ -668,8 +666,7 @@ int ConnectionImpl::onHeadersCompleteBase() { // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject // transfer-codings it does not understand. if (request_or_response_headers.TransferEncoding()) { - const absl::string_view encoding = - request_or_response_headers.TransferEncoding()->value().getStringView(); + const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); if (reject_unsupported_transfer_encodings_ && !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { error_code_ = Http::Code::NotImplemented; @@ -833,7 +830,7 @@ int ServerConnectionImpl::onHeadersComplete() { if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { // If we fail to sanitize the request, return a 400 to the client if (!Utility::sanitizeConnectionHeader(*headers)) { - absl::string_view header_value = headers->Connection()->value().getStringView(); + absl::string_view header_value = headers->getConnectionValue(); ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index d782420ea57c..df4aec11f3e8 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -92,16 +92,13 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, // If Connection: close OR // Http/1.0 and not Connection: keep-alive OR // Proxy-Connection: close - if ((headers->Connection() && - (absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.Close))) || + if ((absl::EqualsIgnoreCase(headers->getConnectionValue(), + Headers::get().ConnectionValues.Close)) || (parent_.codec_client_->protocol() == Protocol::Http10 && - (!headers->Connection() || - !absl::EqualsIgnoreCase(headers->Connection()->value().getStringView(), - Headers::get().ConnectionValues.KeepAlive))) || - (headers->ProxyConnection() && - (absl::EqualsIgnoreCase(headers->ProxyConnection()->value().getStringView(), - Headers::get().ConnectionValues.Close)))) { + !absl::EqualsIgnoreCase(headers->getConnectionValue(), + Headers::get().ConnectionValues.KeepAlive)) || + (absl::EqualsIgnoreCase(headers->getProxyConnectionValue(), + Headers::get().ConnectionValues.Close))) { parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); close_connection_ = true; } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index dc9ea9de0276..b2c2e6e8ded6 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -157,7 +157,7 @@ void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& hea Http::RequestHeaderMapPtr modified_headers; if (Http::Utility::isUpgrade(headers)) { modified_headers = createHeaderMap(headers); - upgrade_type_ = std::string(headers.Upgrade()->value().getStringView()); + upgrade_type_ = std::string(headers.getUpgradeValue()); Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); buildHeaders(final_headers, *modified_headers); } else if (headers.Method() && headers.Method()->value() == "CONNECT") { diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index b1e5b60986b5..f12790b41103 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -30,7 +30,7 @@ absl::optional canonicalizePath(absl::string_view original_path) { /* static */ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { ASSERT(headers.Path()); - const auto original_path = headers.Path()->value().getStringView(); + const auto original_path = headers.getPathValue(); // canonicalPath is supposed to apply on path component in URL instead of :path header const auto query_pos = original_path.find('?'); auto normalized_path_opt = canonicalizePath( @@ -56,7 +56,7 @@ bool PathUtil::canonicalPath(RequestHeaderMap& headers) { void PathUtil::mergeSlashes(RequestHeaderMap& headers) { ASSERT(headers.Path()); - const auto original_path = headers.Path()->value().getStringView(); + const auto original_path = headers.getPathValue(); // Only operate on path component in URL. const absl::string_view::size_type query_start = original_path.find('?'); const absl::string_view path = original_path.substr(0, query_start); diff --git a/source/common/http/request_id_extension_uuid_impl.cc b/source/common/http/request_id_extension_uuid_impl.cc index dc95b46f81c7..a7ec65fb6612 100644 --- a/source/common/http/request_id_extension_uuid_impl.cc +++ b/source/common/http/request_id_extension_uuid_impl.cc @@ -27,7 +27,7 @@ void UUIDRequestIDExtension::set(RequestHeaderMap& request_headers, bool force) void UUIDRequestIDExtension::setInResponse(ResponseHeaderMap& response_headers, const RequestHeaderMap& request_headers) { if (request_headers.RequestId()) { - response_headers.setRequestId(request_headers.RequestId()->value().getStringView()); + response_headers.setRequestId(request_headers.getRequestIdValue()); } } @@ -36,7 +36,7 @@ bool UUIDRequestIDExtension::modBy(const RequestHeaderMap& request_headers, uint if (request_headers.RequestId() == nullptr) { return false; } - const std::string uuid(request_headers.RequestId()->value().getStringView()); + const std::string uuid(request_headers.getRequestIdValue()); if (uuid.length() < 8) { return false; } @@ -54,7 +54,7 @@ TraceStatus UUIDRequestIDExtension::getTraceStatus(const RequestHeaderMap& reque if (request_headers.RequestId() == nullptr) { return TraceStatus::NoTrace; } - absl::string_view uuid = request_headers.RequestId()->value().getStringView(); + absl::string_view uuid = request_headers.getRequestIdValue(); if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { return TraceStatus::NoTrace; } @@ -75,7 +75,7 @@ void UUIDRequestIDExtension::setTraceStatus(RequestHeaderMap& request_headers, T if (request_headers.RequestId() == nullptr) { return; } - absl::string_view uuid_view = request_headers.RequestId()->value().getStringView(); + absl::string_view uuid_view = request_headers.getRequestIdValue(); if (uuid_view.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { return; } diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index d6804f245ec5..65c243aeae24 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -50,11 +50,11 @@ void UserAgent::initializeFromHeaders(const RequestHeaderMap& headers, Stats::St if (stats_ == nullptr && !initialized_) { initialized_ = true; - const HeaderEntry* user_agent = headers.UserAgent(); - if (user_agent != nullptr) { - if (user_agent->value().getStringView().find("iOS") != absl::string_view::npos) { + const absl::string_view user_agent = headers.getUserAgentValue(); + if (!user_agent.empty()) { + if (user_agent.find("iOS") != absl::string_view::npos) { stats_ = std::make_unique(prefix, context_.ios_, scope, context_); - } else if (user_agent->value().getStringView().find("android") != absl::string_view::npos) { + } else if (user_agent.find("android") != absl::string_view::npos) { stats_ = std::make_unique(prefix, context_.android_, scope, context_); } } diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 9f01dcb72278..dfc1f64dc10d 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -261,8 +261,7 @@ void Utility::appendVia(RequestOrResponseHeaderMap& headers, const std::string& std::string Utility::createSslRedirectPath(const RequestHeaderMap& headers) { ASSERT(headers.Host()); ASSERT(headers.Path()); - return fmt::format("https://{}{}", headers.Host()->value().getStringView(), - headers.Path()->value().getStringView()); + return fmt::format("https://{}{}", headers.getHostValue(), headers.getPathValue()); } Utility::QueryParams Utility::parseQueryString(absl::string_view url) { @@ -385,7 +384,7 @@ std::string Utility::makeSetCookieValue(const std::string& key, const std::strin uint64_t Utility::getResponseStatus(const ResponseHeaderMap& headers) { const HeaderEntry* header = headers.Status(); uint64_t response_code; - if (!header || !absl::SimpleAtoi(headers.Status()->value().getStringView(), &response_code)) { + if (!header || !absl::SimpleAtoi(headers.getStatusValue(), &response_code)) { throw CodecClientException(":status must be specified and a valid unsigned long"); } return response_code; @@ -394,21 +393,20 @@ uint64_t Utility::getResponseStatus(const ResponseHeaderMap& headers) { bool Utility::isUpgrade(const RequestOrResponseHeaderMap& headers) { // In firefox the "Connection" request header value is "keep-alive, Upgrade", // we should check if it contains the "Upgrade" token. - return (headers.Connection() && headers.Upgrade() && - Envoy::StringUtil::caseFindToken(headers.Connection()->value().getStringView(), ",", + return (headers.Upgrade() && + Envoy::StringUtil::caseFindToken(headers.getConnectionValue(), ",", Http::Headers::get().ConnectionValues.Upgrade.c_str())); } bool Utility::isH2UpgradeRequest(const RequestHeaderMap& headers) { - return headers.Method() && - headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect && + return headers.getMethodValue() == Http::Headers::get().MethodValues.Connect && headers.Protocol() && !headers.Protocol()->value().empty() && headers.Protocol()->value() != Headers::get().ProtocolValues.Bytestream; } bool Utility::isWebSocketUpgradeRequest(const RequestHeaderMap& headers) { return (isUpgrade(headers) && - absl::EqualsIgnoreCase(headers.Upgrade()->value().getStringView(), + absl::EqualsIgnoreCase(headers.getUpgradeValue(), Http::Headers::get().UpgradeValues.WebSocket)); } @@ -744,15 +742,13 @@ const std::string Utility::resetReasonToString(const Http::StreamResetReason res void Utility::transformUpgradeRequestFromH1toH2(RequestHeaderMap& headers) { ASSERT(Utility::isUpgrade(headers)); - const HeaderString& upgrade = headers.Upgrade()->value(); headers.setReferenceMethod(Http::Headers::get().MethodValues.Connect); - headers.setProtocol(upgrade.getStringView()); + headers.setProtocol(headers.getUpgradeValue()); headers.removeUpgrade(); headers.removeConnection(); // nghttp2 rejects upgrade requests/responses with content length, so strip // any unnecessary content length header. - if (headers.ContentLength() != nullptr && - headers.ContentLength()->value().getStringView() == "0") { + if (headers.getContentLengthValue() == "0") { headers.removeContentLength(); } } @@ -763,8 +759,7 @@ void Utility::transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers) { } headers.removeUpgrade(); headers.removeConnection(); - if (headers.ContentLength() != nullptr && - headers.ContentLength()->value().getStringView() == "0") { + if (headers.getContentLengthValue() == "0") { headers.removeContentLength(); } } @@ -772,9 +767,8 @@ void Utility::transformUpgradeResponseFromH1toH2(ResponseHeaderMap& headers) { void Utility::transformUpgradeRequestFromH2toH1(RequestHeaderMap& headers) { ASSERT(Utility::isH2UpgradeRequest(headers)); - const HeaderString& protocol = headers.Protocol()->value(); headers.setReferenceMethod(Http::Headers::get().MethodValues.Get); - headers.setUpgrade(protocol.getStringView()); + headers.setUpgrade(headers.getProtocolValue()); headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Upgrade); headers.removeProtocol(); } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 278aba456b50..86770f07a790 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -2940,7 +2940,7 @@ TEST_F(HttpConnectionManagerImplTest, Http10Rejected) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ("426", headers.Status()->value().getStringView()); - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2967,7 +2967,7 @@ TEST_F(HttpConnectionManagerImplTest, Http10ConnCloseLegacy) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2992,7 +2992,7 @@ TEST_F(HttpConnectionManagerImplTest, ProxyConnectLegacyClose) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -3017,7 +3017,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectLegacyClose) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -3097,7 +3097,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); - EXPECT_EQ("upgrade", headers.Connection()->value().getStringView()); + EXPECT_EQ("upgrade", headers.getConnectionValue()); })); EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); @@ -3160,6 +3160,29 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { conn_manager_->onData(fake_input, false); } +TEST_F(HttpConnectionManagerImplTest, ConnectWithEmptyPath) { + setup(false, "envoy-custom-server", false); + + NiceMock encoder; + + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("CONNECT", _, _)) + .WillRepeatedly(Return(true)); + + EXPECT_CALL(*codec_, dispatch(_)) + .WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(encoder); + RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ + {":authority", "host"}, {":path", ""}, {":method", "CONNECT"}}}; + decoder->decodeHeaders(std::move(headers), false); + data.drain(4); + return Http::okStatus(); + })); + + // Kick off the incoming data. Use extra data which should cause a redispatch. + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + TEST_F(HttpConnectionManagerImplTest, ConnectLegacy) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -3390,7 +3413,7 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Connection()); - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); EXPECT_EQ(nullptr, headers.ProxyConnection()); })); EXPECT_CALL(*decoder_filters_[0], onDestroy()); @@ -5631,7 +5654,7 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -5754,7 +5777,7 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("close", headers.Connection()->value().getStringView()); + EXPECT_EQ("close", headers.getConnectionValue()); })); Buffer::OwnedImpl fake_input; From 2b0633a09ce2ebf7ff4ba19ac470c013b1f2d35b Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Wed, 20 May 2020 17:01:39 -0400 Subject: [PATCH 214/909] Support fuzzing encoder callbacks in UberFilterFuzzer (#11209) Support fuzzing encoder callbacks in UberFilterFuzzer Signed-off-by: Teju Nareddy --- .../extensions/filters/http/common/fuzz/BUILD | 1 + .../http/common/fuzz/filter_corpus/grpc_stats | 47 ++++++ .../grpc_transcoding_decode_encode | 50 ++++++ .../filter_corpus/grpc_transcoding_proto_data | 14 +- .../http/common/fuzz/filter_fuzz.proto | 4 + .../http/common/fuzz/filter_fuzz_test.cc | 2 +- .../filters/http/common/fuzz/uber_filter.cc | 148 ++++++++++++++---- .../filters/http/common/fuzz/uber_filter.h | 42 ++++- .../http/common/fuzz/uber_per_filter.cc | 12 +- test/fuzz/common.proto | 2 +- test/fuzz/fuzz_runner.cc | 4 +- 11 files changed, 276 insertions(+), 50 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 929cb14dfec6..9c42dc89bb6d 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -33,6 +33,7 @@ envoy_cc_test_library( deps = [ ":filter_fuzz_proto_cc_proto", "//source/common/config:utility_lib", + "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http:well_known_names", "//test/fuzz:utility_lib", diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats new file mode 100644 index 000000000000..10704daac17b --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_stats @@ -0,0 +1,47 @@ +config { + name: "envoy.filters.http.grpc_stats" + typed_config: {} +} +data { + headers { + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + headers { + key: "content-type" + value: "application/grpc" + } + } +} +upstream_data { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.Book] { + id: 16 + title: "Hardy Boys" + } + } + chunk_size: 4 + } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode new file mode 100644 index 000000000000..d1a907e186fc --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_decode_encode @@ -0,0 +1,50 @@ +config { + name: "envoy.filters.http.grpc_json_transcoder" + typed_config: {} +} +data { + headers { + headers { + key: "content-type" + value: "application/json" + } + headers { + key: ":method" + value: "POST" + } + headers { + key: ":path" + value: "/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod" + } + } + http_body { + data: "{\"theme\": \"Children\"}" + } +} +upstream_data { + headers { + headers { + key: ":status" + value: "200" + } + headers { + key: "content-type" + value: "application/grpc" + } + } + proto_body { + message { + [type.googleapis.com/bookstore.Book] { + id: 16 + title: "Hardy Boys" + } + } + chunk_size: 100 + } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data index 711ea9f66ec5..3adc75ba874e 100644 --- a/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/grpc_transcoding_proto_data @@ -5,10 +5,6 @@ config { data { headers { - headers { - key: "content-type" - value: "application/json" - } headers { key: ":method" value: "POST" @@ -17,6 +13,10 @@ data { key: ":path" value: "/bookstore.Bookstore/CreateShelf" } + headers { + key: "content-type" + value: "application/grpc" + } } proto_body { message { @@ -29,4 +29,10 @@ data { } chunk_size: 3 } + trailers { + headers { + key: "grpc-status" + value: "0" + } + } } \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz.proto b/test/extensions/filters/http/common/fuzz/filter_fuzz.proto index a97d9dcfd2bb..20f036684161 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz.proto +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz.proto @@ -8,5 +8,9 @@ import "envoy/extensions/filters/network/http_connection_manager/v3/http_connect message FilterFuzzTestCase { envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter config = 1; + // Downstream data (named for backwards compatibility). test.fuzz.HttpData data = 2; + + // Upstream data. + test.fuzz.HttpData upstream_data = 3; } diff --git a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc index edfa89f917c7..7e773b4f1311 100644 --- a/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc +++ b/test/extensions/filters/http/common/fuzz/filter_fuzz_test.cc @@ -50,7 +50,7 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::http::FilterFuzzTestCase& i TestUtility::validate(input); // Fuzz filter. static UberFilterFuzzer fuzzer; - fuzzer.fuzz(input.config(), input.data()); + fuzzer.fuzz(input.config(), input.data(), input.upstream_data()); } catch (const ProtoValidationException& e) { ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); } diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index a88cc585a72e..ae7fbc2d9f92 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -3,6 +3,7 @@ #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/http/message_impl.h" +#include "common/http/utility.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -13,16 +14,25 @@ namespace Extensions { namespace HttpFilters { UberFilterFuzzer::UberFilterFuzzer() { - // Need to set for both a decoder filter and an encoder/decoder filter. + // This is a decoder filter. ON_CALL(filter_callback_, addStreamDecoderFilter(_)) - .WillByDefault(Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); + .WillByDefault(Invoke([&](Http::StreamDecoderFilterSharedPtr filter) -> void { + decoder_filter_ = filter; + decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_); })); + // This is an encoded filter. + ON_CALL(filter_callback_, addStreamEncoderFilter(_)) + .WillByDefault(Invoke([&](Http::StreamEncoderFilterSharedPtr filter) -> void { + encoder_filter_ = filter; + encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_); + })); + // This is a decoder and encoder filter. ON_CALL(filter_callback_, addStreamFilter(_)) - .WillByDefault(Invoke([&](std::shared_ptr filter) -> void { - filter_ = filter; - filter_->setDecoderFilterCallbacks(callbacks_); + .WillByDefault(Invoke([&](Http::StreamFilterSharedPtr filter) -> void { + decoder_filter_ = filter; + decoder_filter_->setDecoderFilterCallbacks(decoder_callbacks_); + encoder_filter_ = filter; + encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_); })); // Set expectations for particular filters that may get fuzzed. perFilterSetup(); @@ -44,28 +54,16 @@ std::vector UberFilterFuzzer::parseHttpData(const test::fuzz::HttpD return data_chunks; } -void UberFilterFuzzer::decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data) { +template +void UberFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) { bool end_stream = false; - - auto headers = Fuzz::fromHeaders(data.headers()); - if (headers.Path() == nullptr) { - headers.setPath("/foo"); - } - if (headers.Method() == nullptr) { - headers.setMethod("GET"); - } - if (headers.Host() == nullptr) { - headers.setHost("foo.com"); - } - if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { end_stream = true; } - ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}): {} ", end_stream, - data.headers().DebugString()); - const auto& headersStatus = filter->decodeHeaders(headers, end_stream); + const auto& headersStatus = sendHeaders(filter, data, end_stream); if (headersStatus != Http::FilterHeadersStatus::Continue && headersStatus != Http::FilterHeadersStatus::StopIteration) { + ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); return; } @@ -75,23 +73,90 @@ void UberFilterFuzzer::decode(Http::StreamDecoderFilter* filter, const test::fuz end_stream = true; } Buffer::OwnedImpl buffer(data_chunks[i]); - ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); - if (filter->decodeData(buffer, end_stream) != Http::FilterDataStatus::Continue) { + const auto& dataStatus = sendData(filter, buffer, end_stream); + if (dataStatus != Http::FilterDataStatus::Continue) { + ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); return; } } if (data.has_trailers()) { - ENVOY_LOG_MISC(debug, "Decoding trailers: {} ", data.trailers().DebugString()); - auto trailers = Fuzz::fromHeaders(data.trailers()); - filter->decodeTrailers(trailers); + sendTrailers(filter, data); + } +} + +template <> +Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + request_headers_ = Fuzz::fromHeaders(data.headers()); + if (request_headers_.Path() == nullptr) { + request_headers_.setPath("/foo"); + } + if (request_headers_.Method() == nullptr) { + request_headers_.setMethod("GET"); + } + if (request_headers_.Host() == nullptr) { + request_headers_.setHost("foo.com"); + } + + ENVOY_LOG_MISC(debug, "Decoding headers (end_stream={}):\n{} ", end_stream, request_headers_); + return filter->decodeHeaders(request_headers_, end_stream); +} + +template <> +Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data, + bool end_stream) { + response_headers_ = Fuzz::fromHeaders(data.headers()); + + // Status must be a valid unsigned long. If not set, the utility function below will throw + // an exception on the data path of some filters. This should never happen in production, so catch + // the exception and set to a default value. + try { + (void)Http::Utility::getResponseStatus(response_headers_); + } catch (const Http::CodecClientException& e) { + response_headers_.setStatus(200); } + + ENVOY_LOG_MISC(debug, "Encoding headers (end_stream={}):\n{} ", end_stream, response_headers_); + return filter->encodeHeaders(response_headers_, end_stream); +} + +template <> +Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamDecoderFilter* filter, + Buffer::Instance& buffer, bool end_stream) { + ENVOY_LOG_MISC(debug, "Decoding data (end_stream={}): {} ", end_stream, buffer.toString()); + return filter->decodeData(buffer, end_stream); +} + +template <> +Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamEncoderFilter* filter, + Buffer::Instance& buffer, bool end_stream) { + ENVOY_LOG_MISC(debug, "Encoding data (end_stream={}): {} ", end_stream, buffer.toString()); + return filter->encodeData(buffer, end_stream); +} + +template <> +void UberFilterFuzzer::sendTrailers(Http::StreamDecoderFilter* filter, + const test::fuzz::HttpData& data) { + request_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Decoding trailers:\n{} ", request_trailers_); + filter->decodeTrailers(request_trailers_); +} + +template <> +void UberFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, + const test::fuzz::HttpData& data) { + response_trailers_ = Fuzz::fromHeaders(data.trailers()); + ENVOY_LOG_MISC(debug, "Encoding trailers:\n{} ", response_trailers_); + filter->encodeTrailers(response_trailers_); } void UberFilterFuzzer::fuzz( const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - const test::fuzz::HttpData& data) { + const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data) { try { // Try to create the filter. Exit early if the config is invalid or violates PGV constraints. ENVOY_LOG_MISC(info, "filter name {}", proto_config.name()); @@ -108,15 +173,32 @@ void UberFilterFuzzer::fuzz( return; } - decode(filter_.get(), data); + // Data path should not throw exceptions. + if (decoder_filter_ != nullptr) { + runData(decoder_filter_.get(), downstream_data); + } + if (encoder_filter_ != nullptr) { + runData(encoder_filter_.get(), upstream_data); + } + reset(); } void UberFilterFuzzer::reset() { - if (filter_ != nullptr) { - filter_->onDestroy(); + if (decoder_filter_ != nullptr) { + decoder_filter_->onDestroy(); + } + decoder_filter_.reset(); + + if (encoder_filter_ != nullptr) { + encoder_filter_->onDestroy(); } - filter_.reset(); + encoder_filter_.reset(); + + request_headers_.clear(); + response_headers_.clear(); + request_trailers_.clear(); + response_trailers_.clear(); } } // namespace HttpFilters diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 511c587a6e62..af6b060f6a80 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -11,10 +11,13 @@ class UberFilterFuzzer { public: UberFilterFuzzer(); - // This creates the filter config and runs the decode methods. + // This creates the filter config and runs the fuzzed data against the filter. void fuzz(const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - const test::fuzz::HttpData& data); + const test::fuzz::HttpData& downstream_data, const test::fuzz::HttpData& upstream_data); + + // This executes the filter decoders/encoders with the fuzzed data. + template void runData(FilterType* filter, const test::fuzz::HttpData& data); // For fuzzing proto data, guide the mutator to useful 'Any' types. static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); @@ -26,22 +29,47 @@ class UberFilterFuzzer { void cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message); // Parses http or proto body into chunks. - std::vector parseHttpData(const test::fuzz::HttpData& data); + static std::vector parseHttpData(const test::fuzz::HttpData& data); + + // Templated functions to validate and send headers/data/trailers for decoders/encoders. + // General functions are deleted, but templated specializations for encoders/decoders are defined + // in the cc file. + template + Http::FilterHeadersStatus sendHeaders(FilterType* filter, const test::fuzz::HttpData& data, + bool end_stream) = delete; + + template + Http::FilterDataStatus sendData(FilterType* filter, Buffer::Instance& buffer, + bool end_stream) = delete; - // This executes the decode methods to be fuzzed. - void decode(Http::StreamDecoderFilter* filter, const test::fuzz::HttpData& data); + template + void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; void reset(); private: NiceMock factory_context_; - NiceMock callbacks_; NiceMock filter_callback_; std::shared_ptr resolver_{std::make_shared()}; - std::shared_ptr filter_; Http::FilterFactoryCb cb_; NiceMock connection_; Network::Address::InstanceConstSharedPtr addr_; + + // Mocked callbacks. + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + + // Filter constructed from the config. + Http::StreamDecoderFilterSharedPtr decoder_filter_; + Http::StreamEncoderFilterSharedPtr encoder_filter_; + + // Headers/trailers need to be saved for the lifetime of the the filter, + // so save them as member variables. + // TODO(nareddyt): Use for access logging in a followup PR. + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; + Http::TestResponseTrailerMapImpl response_trailers_; }; } // namespace HttpFilters diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 353eea56f0be..ad6913bda5f0 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -83,10 +83,16 @@ void UberFilterFuzzer::perFilterSetup() { addr_ = std::make_shared("1.2.3.4", 1111); ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); - ON_CALL(callbacks_, connection()).WillByDefault(testing::Return(&connection_)); - ON_CALL(callbacks_, activeSpan()) + + ON_CALL(decoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_)); + ON_CALL(decoder_callbacks_, activeSpan()) + .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); + decoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; + + ON_CALL(encoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_)); + ON_CALL(encoder_callbacks_, activeSpan()) .WillByDefault(testing::ReturnRef(Tracing::NullSpan::instance())); - callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; + encoder_callbacks_.stream_info_.protocol_ = Envoy::Http::Protocol::Http2; // Prepare expectations for dynamic forward proxy. ON_CALL(factory_context_.dispatcher_, createDnsResolver(_, _)) diff --git a/test/fuzz/common.proto b/test/fuzz/common.proto index 7bc55e131457..7b8bc1f83c61 100644 --- a/test/fuzz/common.proto +++ b/test/fuzz/common.proto @@ -32,7 +32,7 @@ message ProtoBody { google.protobuf.Any message = 1 [(validate.rules).any.required = true]; // The size (in bytes) of each buffer when forming the requests. - uint64 chunk_size = 2 [(validate.rules).uint64.gt = 0]; + uint64 chunk_size = 2 [(validate.rules).uint64 = {gt: 0, lt: 8192}]; } message HttpData { diff --git a/test/fuzz/fuzz_runner.cc b/test/fuzz/fuzz_runner.cc index c7cbcdfa08b7..bda9446b39e9 100644 --- a/test/fuzz/fuzz_runner.cc +++ b/test/fuzz/fuzz_runner.cc @@ -52,7 +52,9 @@ void Runner::setupEnvironment(int argc, char** argv, spdlog::level::level_enum d // For fuzzing, this prevents logging when parsing text-format protos fails, // deprecated fields are used, etc. // https://github.com/protocolbuffers/protobuf/blob/204f99488ce1ef74565239cf3963111ae4c774b7/src/google/protobuf/stubs/logging.h#L223 - ABSL_ATTRIBUTE_UNUSED static auto* log_silencer = new Protobuf::LogSilencer(); + if (log_level_ > spdlog::level::debug) { + ABSL_ATTRIBUTE_UNUSED static auto* log_silencer = new Protobuf::LogSilencer(); + } } } // namespace Fuzz From 5fa0d0eaf8b67a11adf13726f4d1f7cd006df3af Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 20 May 2020 18:59:31 -0700 Subject: [PATCH 215/909] grpc-reverse: do nothing in encodeTrailers in disabled path (#11271) Fixed a bug that in route disabled grpc_http1_reverse_bridge case, encodeTrailers may add an empty gRPC frame at the end of response body, which causes failure of gRPC client. Risk Level: Low Testing: integration test Docs Changes: N/A Release Notes: N/A Fixes #9922 Signed-off-by: Lizan Zhou --- .../http/grpc_http1_reverse_bridge/filter.cc | 4 + .../http/grpc_http1_reverse_bridge/BUILD | 1 + .../reverse_bridge_integration_test.cc | 88 +++++++++++++++---- 3 files changed, 78 insertions(+), 15 deletions(-) diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index b6d0926b6fbf..33e88bcb8425 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -193,6 +193,10 @@ Http::FilterDataStatus Filter::encodeData(Buffer::Instance& buffer, bool end_str } Http::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap& trailers) { + if (!enabled_) { + return Http::FilterTrailersStatus::Continue; + } + trailers.setGrpcStatus(grpc_status_); if (withhold_grpc_frames_) { diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index b84c405bee88..c3ce59f00bc7 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -37,6 +37,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index f774044bdf8a..5b6983569a4b 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -1,5 +1,7 @@ #include +#include "envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h" + #include "common/http/message_impl.h" #include "extensions/filters/http/well_known_names.h" @@ -12,6 +14,7 @@ #include "gtest/gtest.h" using Envoy::Http::HeaderValueOf; +using std::string_literals::operator""s; namespace Envoy { namespace { @@ -24,19 +27,28 @@ class ReverseBridgeIntegrationTest : public testing::TestWithParammutable_typed_per_filter_config())["envoy.filters.http.grpc_http1_reverse_bridge"] + .PackFrom(route_config); + config_helper_.addVirtualHost(vhost); + HttpIntegrationTest::initialize(); } @@ -45,6 +57,9 @@ name: grpc_http1_reverse_bridge fake_upstream_connection_.reset(); fake_upstreams_.clear(); } + +protected: + FakeHttpConnection::Type upstream_protocol_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ReverseBridgeIntegrationTest, @@ -53,7 +68,60 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ReverseBridgeIntegrationTest, // Verifies that we don't do anything with the request when it's hitting a route that // doesn't enable the bridge. +// Regression test of https://github.com/envoyproxy/envoy/issues/9922 +TEST_P(ReverseBridgeIntegrationTest, DisabledRoute) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP2; + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, + {":method", "POST"}, + {":authority", "disabled"}, + {":path", "/testing.ExampleService/Print"}, + {"content-type", "application/grpc"}}); + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); + + // Wait for upstream to finish the request. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Ensure that we don't do anything + EXPECT_EQ("abcdef", upstream_request_->body().toString()); + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + + // Respond to the request. + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/grpc"); + upstream_request_->encodeHeaders(response_headers, false); + + Buffer::OwnedImpl response_data{"defgh"}; + upstream_request_->encodeData(response_data, false); + + Http::TestResponseTrailerMapImpl response_trailers; + response_trailers.setGrpcStatus(std::string("0")); + upstream_request_->encodeTrailers(response_trailers); + + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ(response->body(), response_data.toString()); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); + + codec_client_->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP1; + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, @@ -61,12 +129,8 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { {":authority", "foo"}, {":path", "/testing.ExampleService/Print"}, {"content-type", "application/grpc"}}); - auto encoder_decoder = codec_client_->startRequest(request_headers); - request_encoder_ = &encoder_decoder.first; - IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); - Buffer::OwnedImpl request_data{"abcdef"}; - codec_client_->sendData(*request_encoder_, request_data, true); + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); // Wait for upstream to finish the request. ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); @@ -88,11 +152,7 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { upstream_request_->encodeHeaders(response_headers, false); Buffer::OwnedImpl response_data{"defgh"}; - upstream_request_->encodeData(response_data, false); - - Http::TestResponseTrailerMapImpl response_trailers; - response_trailers.setGrpcStatus(std::string("0")); - upstream_request_->encodeTrailers(response_trailers); + upstream_request_->encodeData(response_data, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); @@ -103,8 +163,6 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { // Comparing strings embedded zero literals is hard. Use string literal and std::equal to avoid // truncating the string when it's converted to const char *. - using namespace std::string_literals; - const auto expected_prefix = "\0\0\0\0\4"s; EXPECT_TRUE( std::equal(response->body().begin(), response->body().begin() + 4, expected_prefix.begin())); From 54dedadaa8aeac2963c7d47d46199ba3323527e5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 21 May 2020 09:56:42 -0400 Subject: [PATCH 216/909] test: shorten delay close time from 1s to 100ns (#11260) Signed-off-by: Alyssa Wilk --- test/config/utility.cc | 2 ++ .../http/tap/tap_filter_integration_test.cc | 1 + test/integration/http2_integration_test.cc | 19 +++---------------- test/integration/integration_test.cc | 6 ++++++ 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/test/config/utility.cc b/test/config/utility.cc index 14a1bbce9386..f53924e1e3f7 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -128,6 +128,8 @@ std::string ConfigHelper::httpProxyConfig() { typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: config_test + delayed_close_timeout: + nanos: 100 http_filters: name: envoy.filters.http.router codec_type: HTTP1 diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index 048b369a7d28..61d01e68899f 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -308,6 +308,7 @@ config_id: test_config_id admin_client_->close(); EXPECT_EQ(3UL, test_server_->counter("http.config_test.tap.rq_tapped")->value()); + test_server_->waitForGaugeEq("http.admin.downstream_rq_active", 0); } // Verify both request and response trailer matching works. diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 12a1d93711a2..48a3b54738df 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1117,6 +1117,9 @@ TEST_P(Http2IntegrationTest, SimultaneousRequestWithBufferLimits) { // Test downstream connection delayed close processing. TEST_P(Http2IntegrationTest, DelayedCloseAfterBadFrame) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_nanos(1000 * 1000); }); initialize(); std::string response; @@ -1560,8 +1563,6 @@ void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::s EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } // Verify that the server detects the flood using specified request parameters. @@ -1585,8 +1586,6 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ if (!flood_stat.empty()) { EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); } - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, @@ -1654,8 +1653,6 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } // Verify that the server stop reading downstream connection on protocol error. @@ -1691,8 +1688,6 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { @@ -1710,8 +1705,6 @@ TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyData) { @@ -1730,8 +1723,6 @@ TEST_P(Http2FloodMitigationTest, EmptyData) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { @@ -1796,8 +1787,6 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeader) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); // expect a downstream protocol error. EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); @@ -1834,8 +1823,6 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { tcp_client_->close(); EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); - EXPECT_EQ(0, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); // expect Downstream Protocol Error EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 9a238319fff4..c782136f104b 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -970,6 +970,9 @@ TEST_P(IntegrationTest, ViaAppendWith100Continue) { // sent by Envoy, it will wait for response acknowledgment (via FIN/RST) from the client before // closing the socket (with a timeout for ensuring cleanup). TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); // This test will trigger an early 413 Payload Too Large response due to buffer limits being // exceeded. The following filter is needed since the router filter will never trigger a 413. config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " @@ -1213,6 +1216,9 @@ TEST_P(IntegrationTest, TestFlood) { } TEST_P(IntegrationTest, TestFloodUpstreamErrors) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); autonomous_upstream_ = true; initialize(); From ae1f15c966dff7a9c5e8b426fb712dedb72d8cd4 Mon Sep 17 00:00:00 2001 From: Caleb Gilmour Date: Fri, 22 May 2020 03:14:07 +1200 Subject: [PATCH 217/909] tracers: report envoy version in datadog tracer (#11280) Signed-off-by: Caleb Gilmour --- source/extensions/tracers/datadog/BUILD | 1 + source/extensions/tracers/datadog/datadog_tracer_impl.cc | 2 ++ 2 files changed, 3 insertions(+) diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index ea38e6dfc778..95cc7d74a212 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -21,6 +21,7 @@ envoy_cc_library( ], external_deps = ["dd_opentracing_cpp"], deps = [ + "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/http:async_client_utility_lib", "//source/common/tracing:http_tracer_lib", diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.cc b/source/extensions/tracers/datadog/datadog_tracer_impl.cc index 8974471790f7..1f56b1718528 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.cc +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.cc @@ -5,6 +5,7 @@ #include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/utility.h" +#include "common/common/version.h" #include "common/config/utility.h" #include "common/http/message_impl.h" #include "common/http/utility.h" @@ -34,6 +35,7 @@ Driver::Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config, cluster_ = datadog_config.collector_cluster(); // Default tracer options. + tracer_options_.version = absl::StrCat("envoy ", Envoy::VersionInfo::version()); tracer_options_.operation_name_override = "envoy.proxy"; tracer_options_.service = "envoy"; tracer_options_.inject = std::set{ From 77e436f9eb39863a4a425bbca9026c86740b36cd Mon Sep 17 00:00:00 2001 From: Rohan Seth Date: Thu, 21 May 2020 08:17:26 -0700 Subject: [PATCH 218/909] ratelimit: allow header descriptors to be skipped (#11153) Resolves #10124 indirectly by adding an extra config flag to RequestHeaders through which it is possible for descriptors to be sent on a partial match. Signed-off-by: Rohan Seth --- .../config/route/v3/route_components.proto | 5 ++ .../route/v4alpha/route_components.proto | 5 ++ docs/root/version_history/current.rst | 1 + .../config/route/v3/route_components.proto | 5 ++ .../route/v4alpha/route_components.proto | 5 ++ source/common/router/router_ratelimit.cc | 7 ++- source/common/router/router_ratelimit.h | 4 +- test/common/router/router_ratelimit_test.cc | 47 +++++++++++++++++++ 8 files changed, 76 insertions(+), 3 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 9be58f9681c5..f927f582bd1c 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1395,6 +1395,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 7b49aca53803..b23efe34e77b 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1376,6 +1376,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6c29d63ce8b2..befbc5ef6dd1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -75,6 +75,7 @@ New Features tracing is not forced. * router: add support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters `. +* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. * router: more fine grained internal redirect configs are added to the :ref`internal_redirect_policy ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index f94f2c2bb3e5..76c75d647c56 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1407,6 +1407,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 55718de65a9c..15a7ee7050b7 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1404,6 +1404,11 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 259834270e77..c883ce894210 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -38,10 +38,13 @@ bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, const Http::HeaderMap& headers, const Network::Address::Instance&) const { const Http::HeaderEntry* header_value = headers.get(header_name_); + + // If header is not present in the request and if skip_if_absent is true skip this descriptor, + // while calling rate limiting service. If skip_if_absent is false, do not call rate limiting + // service. if (!header_value) { - return false; + return skip_if_absent_; } - descriptor.entries_.push_back( {descriptor_key_, std::string(header_value->value().getStringView())}); return true; diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 0d7826f67be9..df42e898952b 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -42,7 +42,8 @@ class DestinationClusterAction : public RateLimitAction { class RequestHeadersAction : public RateLimitAction { public: RequestHeadersAction(const envoy::config::route::v3::RateLimit::Action::RequestHeaders& action) - : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()) {} + : header_name_(action.header_name()), descriptor_key_(action.descriptor_key()), + skip_if_absent_(action.skip_if_absent()) {} // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, @@ -52,6 +53,7 @@ class RequestHeadersAction : public RateLimitAction { private: const Http::LowerCaseString header_name_; const std::string descriptor_key_; + const bool skip_if_absent_; }; /** diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 1205fe0707f5..20954ec61ffb 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -366,6 +366,53 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { testing::ContainerEq(descriptors_)); } +// Validate that a descriptor is added if the missing request header +// has skip_if_absent set to true +TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithSkipIfAbsent) { + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header-name + descriptor_key: my_header_name + skip_if_absent: false +- request_headers: + header_name: x-header + descriptor_key: my_header + skip_if_absent: true + )EOF"; + + setupTest(yaml); + Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, + default_remote_address_); + EXPECT_THAT(std::vector({{{{"my_header_name", "test_value"}}}}), + testing::ContainerEq(descriptors_)); +} + +// Tests if the descriptors are added if one of the headers is missing +// and skip_if_absent is set to default value which is false +TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithDefaultSkipIfAbsent) { + const std::string yaml = R"EOF( +actions: +- request_headers: + header_name: x-header-name + descriptor_key: my_header_name + skip_if_absent: false +- request_headers: + header_name: x-header + descriptor_key: my_header + skip_if_absent: false + )EOF"; + + setupTest(yaml); + Http::TestHeaderMapImpl header{{"x-header-test", "test_value"}}; + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, + default_remote_address_); + EXPECT_TRUE(descriptors_.empty()); +} + TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { const std::string yaml = R"EOF( actions: From 639c2268460e5468572d3074c4cd34456fe60a9d Mon Sep 17 00:00:00 2001 From: sanjaypujare Date: Thu, 21 May 2020 10:15:46 -0700 Subject: [PATCH 219/909] api: enhance v3 CommonTlsContext for agentless support (#11061) Signed-off-by: Sanjay Pujare --- .../transport_sockets/tls/v3/tls.proto | 55 ++++++++++++++++-- .../transport_sockets/tls/v4alpha/tls.proto | 56 +++++++++++++++++-- .../transport_sockets/tls/v3/tls.proto | 55 ++++++++++++++++-- .../transport_sockets/tls/v4alpha/tls.proto | 56 +++++++++++++++++-- 4 files changed, 198 insertions(+), 24 deletions(-) diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto index a6fc2d62b97c..1806a44666e5 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -2,12 +2,15 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; +import "envoy/config/core/v3/extension.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -96,10 +99,30 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] +// [#next-free-field: 11] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v3.TypedExtensionConfig typed_config = 2; + } + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; @@ -108,9 +131,19 @@ message CommonTlsContext { CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only to be used when validation_context_certificate_provider is not used. + SdsSecretConfig validation_context_sds_secret_config = 2 [ + (validate.rules).message = {required: true}, + (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" + ]; + + // Certificate provider for fetching validation context - only to be used when + // validation_context_sds_secret_config is not used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; } reserved 5; @@ -126,15 +159,21 @@ message CommonTlsContext { // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; - // Configs for fetching TLS certificates via SDS API. + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; - // Config for fetching validation context via SDS API. + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext @@ -145,6 +184,10 @@ message CommonTlsContext { // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index 8797f36db18f..d8cf226afbcd 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -2,9 +2,11 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -96,11 +98,34 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] +// [#next-free-field: 11] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v4alpha.TypedExtensionConfig typed_config = 2; + } + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." @@ -110,9 +135,18 @@ message CommonTlsContext { CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; + oneof dynamic_validation_context { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only to be used when validation_context_certificate_provider is not used. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + + // Certificate provider for fetching validation context - only to be used when + // validation_context_sds_secret_config is not used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3; + } } reserved 5; @@ -128,15 +162,21 @@ message CommonTlsContext { // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; - // Configs for fetching TLS certificates via SDS API. + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; - // Config for fetching validation context via SDS API. + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext @@ -147,6 +187,10 @@ message CommonTlsContext { // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto index a6fc2d62b97c..1806a44666e5 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -2,12 +2,15 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; +import "envoy/config/core/v3/extension.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -96,10 +99,30 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] +// [#next-free-field: 11] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v3.TypedExtensionConfig typed_config = 2; + } + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; @@ -108,9 +131,19 @@ message CommonTlsContext { CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only to be used when validation_context_certificate_provider is not used. + SdsSecretConfig validation_context_sds_secret_config = 2 [ + (validate.rules).message = {required: true}, + (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" + ]; + + // Certificate provider for fetching validation context - only to be used when + // validation_context_sds_secret_config is not used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; } reserved 5; @@ -126,15 +159,21 @@ message CommonTlsContext { // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; - // Configs for fetching TLS certificates via SDS API. + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; - // Config for fetching validation context via SDS API. + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext @@ -145,6 +184,10 @@ message CommonTlsContext { // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index 8797f36db18f..d8cf226afbcd 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -2,9 +2,11 @@ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -96,11 +98,34 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] +// [#next-free-field: 11] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + message CertificateProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v4alpha.TypedExtensionConfig typed_config = 2; + } + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." @@ -110,9 +135,18 @@ message CommonTlsContext { CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; + oneof dynamic_validation_context { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // Only to be used when validation_context_certificate_provider is not used. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + + // Certificate provider for fetching validation context - only to be used when + // validation_context_sds_secret_config is not used. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3; + } } reserved 5; @@ -128,15 +162,21 @@ message CommonTlsContext { // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; - // Configs for fetching TLS certificates via SDS API. + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; - // Config for fetching validation context via SDS API. + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext @@ -147,6 +187,10 @@ message CommonTlsContext { // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10; } // Supplies the list of ALPN protocols that the listener should expose. In From a18611c9a81a87b49470846eff1f62cc8507043b Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 21 May 2020 13:21:40 -0700 Subject: [PATCH 220/909] bazelci: no rbe for coverage (#11272) Signed-off-by: Lizan Zhou --- .bazelci/presubmit.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index 3cdb1ad2c30a..196fca8c5b4e 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -21,7 +21,6 @@ tasks: - "//test/integration/..." - "//test/exe/..." test_flags: - - "--config=remote-clang" - - "--config=remote-ci" + - "--action_env=CC=clang" + - "--action_env=CXX=clang++" - "--config=coverage" - - "--jobs=75" From 440899714143b6a143917cbd8e3f0ccba0847cd4 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Thu, 21 May 2020 16:25:32 -0700 Subject: [PATCH 221/909] xds: warning is logged when v2 xDS is used (#10964) Signed-off-by: Dmitri Dolguikh --- docs/root/version_history/current.rst | 1 + include/envoy/runtime/runtime.h | 5 +++ source/common/config/BUILD | 1 + .../config/subscription_factory_impl.cc | 13 +++++-- .../common/config/subscription_factory_impl.h | 8 +++-- source/common/runtime/runtime_features.cc | 1 + source/common/runtime/runtime_impl.cc | 11 +++--- source/common/runtime/runtime_impl.h | 1 + .../common/upstream/cluster_manager_impl.cc | 2 +- test/common/config/BUILD | 1 + .../config/subscription_factory_impl_test.cc | 36 ++++++++++++++++--- test/mocks/runtime/mocks.h | 1 + 12 files changed, 67 insertions(+), 14 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index befbc5ef6dd1..e3b0f62b80e1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -102,3 +102,4 @@ Deprecated :ref:`safe_cross_scheme`, in :ref:`predicates `. * File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. +* A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 52abc0e50616..f68b67a0ae56 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -104,6 +104,11 @@ class Snapshot { using OverrideLayerConstPtr = std::unique_ptr; + /** + * Updates deprecated feature use stats. + */ + virtual void countDeprecatedFeatureUse() const PURE; + /** * Returns true if a deprecated feature is allowed. * diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 611a8cd791cc..12042dee180a 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -282,6 +282,7 @@ envoy_cc_library( "//include/envoy/config:subscription_factory_interface", "//include/envoy/config:subscription_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:minimal_logger_lib", "//source/common/protobuf", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 96ce4e07a81d..1851b8a486f6 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -17,9 +17,9 @@ namespace Config { SubscriptionFactoryImpl::SubscriptionFactoryImpl( const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime) : local_info_(local_info), dispatcher_(dispatcher), cm_(cm), random_(random), - validation_visitor_(validation_visitor), api_(api) {} + validation_visitor_(validation_visitor), api_(api), runtime_(runtime) {} SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, @@ -28,6 +28,15 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); + const auto transport_api_version = config.api_config_source().transport_api_version(); + if (transport_api_version == envoy::config::core::v3::ApiVersion::V2 && + runtime_.snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.enable_deprecated_v2_api_warning")) { + runtime_.snapshot().countDeprecatedFeatureUse(); + ENVOY_LOG(warn, + "xDS of version v2 has been deprecated and will be removed in subsequent versions"); + } + switch (config.config_source_specifier_case()) { case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath: { Utility::checkFilesystemSubscriptionBackingPath(config.path(), api_); diff --git a/source/common/config/subscription_factory_impl.h b/source/common/config/subscription_factory_impl.h index 8d31b8682aa8..28e459ad20e2 100644 --- a/source/common/config/subscription_factory_impl.h +++ b/source/common/config/subscription_factory_impl.h @@ -7,14 +7,17 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/logger.h" + namespace Envoy { namespace Config { -class SubscriptionFactoryImpl : public SubscriptionFactory { +class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable { public: SubscriptionFactoryImpl(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + Runtime::Loader& runtime); // Config::SubscriptionFactory SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config, @@ -28,6 +31,7 @@ class SubscriptionFactoryImpl : public SubscriptionFactory { Runtime::RandomGenerator& random_; ProtobufMessage::ValidationVisitor& validation_visitor_; Api::Api& api_; + Runtime::Loader& runtime_; }; } // namespace Config diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 661e58d920ac..48579aed53b0 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -62,6 +62,7 @@ constexpr const char* runtime_features[] = { // Begin alphabetically sorted section. "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.disallow_unbounded_access_logs", + "envoy.reloadable_features.enable_deprecated_v2_api_warning", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.fixed_connection_close", diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 17efeeab1d53..a9f81c4f1a41 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -157,6 +157,12 @@ std::string RandomGeneratorImpl::uuid() { return std::string(uuid, UUID_LENGTH); } +void SnapshotImpl::countDeprecatedFeatureUse() const { + stats_.deprecated_feature_use_.inc(); + // Similar to the above, but a gauge that isn't imported during a hot restart. + stats_.deprecated_feature_seen_since_process_start_.inc(); +} + bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_value) const { // If the value is not explicitly set as a runtime boolean, trust the proto annotations passed as // default_value. @@ -167,10 +173,7 @@ bool SnapshotImpl::deprecatedFeatureEnabled(absl::string_view key, bool default_ // The feature is allowed. It is assumed this check is called when the feature // is about to be used, so increment the feature use stat. - stats_.deprecated_feature_use_.inc(); - - // Similar to the above, but a gauge that isn't imported during a hot restart. - stats_.deprecated_feature_seen_since_process_start_.inc(); + countDeprecatedFeatureUse(); #ifdef ENVOY_DISABLE_DEPRECATED_FEATURES return false; diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index c3047d55099c..480a345f3f72 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -80,6 +80,7 @@ class SnapshotImpl : public Snapshot, std::vector&& layers); // Runtime::Snapshot + void countDeprecatedFeatureUse() const override; bool deprecatedFeatureEnabled(absl::string_view key, bool default_value) const override; bool runtimeFeatureEnabled(absl::string_view key) const override; bool featureEnabled(absl::string_view key, uint64_t default_value, uint64_t random_value, diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 9be913e4e7aa..0fcd22cfb20d 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -247,7 +247,7 @@ ClusterManagerImpl::ClusterManagerImpl( time_source_(main_thread_dispatcher.timeSource()), dispatcher_(main_thread_dispatcher), http_context_(http_context), subscription_factory_(local_info, main_thread_dispatcher, *this, random, - validation_context.dynamicValidationVisitor(), api) { + validation_context.dynamicValidationVisitor(), api, runtime_) { async_client_manager_ = std::make_unique( *this, tls, time_source_, api, grpc_context.statNames()); const auto& cm_config = bootstrap.cluster_manager(); diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 360f8b623ede..2310f7a7330c 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -262,6 +262,7 @@ envoy_cc_test( "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index f52407bcc0c4..7402654068f9 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -19,6 +19,7 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/logging.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -35,14 +36,14 @@ namespace { class SubscriptionFactoryTest : public testing::Test { public: SubscriptionFactoryTest() - : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)) {} + : http_request_(&cm_.async_client_), api_(Api::createApiForTest(stats_store_)), + subscription_factory_(local_info_, dispatcher_, cm_, random_, validation_visitor_, *api_, + runtime_) {} std::unique_ptr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) { - return SubscriptionFactoryImpl(local_info_, dispatcher_, cm_, random_, validation_visitor_, - *api_) - .subscriptionFromConfigSource(config, Config::TypeUrl::get().ClusterLoadAssignment, - stats_store_, callbacks_); + return subscription_factory_.subscriptionFromConfigSource( + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_); } Upstream::MockClusterManager cm_; @@ -54,6 +55,8 @@ class SubscriptionFactoryTest : public testing::Test { NiceMock local_info_; NiceMock validation_visitor_; Api::ApiPtr api_; + NiceMock runtime_; + SubscriptionFactoryImpl subscription_factory_; }; class SubscriptionFactoryTestApiConfigSource @@ -315,6 +318,29 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { subscriptionFromConfigSource(config)->start({"static_cluster"}); } +TEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) { + envoy::config::core::v3::ConfigSource config; + + config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); + config.mutable_api_config_source()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V2); + NiceMock snapshot; + EXPECT_CALL(runtime_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); + EXPECT_CALL(snapshot, runtimeFeatureEnabled(_)).WillOnce(Return(true)); + EXPECT_CALL(snapshot, countDeprecatedFeatureUse()); + + Upstream::ClusterManager::ClusterInfoMap cluster_map; + NiceMock cluster; + cluster_map.emplace("static_cluster", cluster); + EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + + EXPECT_LOG_CONTAINS( + "warn", "xDS of version v2 has been deprecated", try { + subscription_factory_.subscriptionFromConfigSource( + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_); + } catch (EnvoyException&){/* expected, we pass an empty configuration */}); +} + INSTANTIATE_TEST_SUITE_P(SubscriptionFactoryTestApiConfigSource, SubscriptionFactoryTestApiConfigSource, ::testing::Values(envoy::config::core::v3::ApiConfigSource::REST, diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index d73bb3eb5317..1f4d5d0589fa 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -41,6 +41,7 @@ class MockSnapshot : public Snapshot { } } + MOCK_METHOD(void, countDeprecatedFeatureUse, (), (const)); MOCK_METHOD(bool, deprecatedFeatureEnabled, (absl::string_view key, bool default_enabled), (const)); MOCK_METHOD(bool, runtimeFeatureEnabled, (absl::string_view key), (const)); From 89150d1e0c55be6383ba2920561477f2ad81f48c Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 22 May 2020 07:07:05 -0700 Subject: [PATCH 222/909] network: add socket interface (#11278) Move socket calls that involve raw fds from address to socket. Signed-off-by: Florin Coras --- include/envoy/network/address.h | 13 +- source/common/network/BUILD | 1 + .../addr_family_aware_socket_option_impl.cc | 3 +- source/common/network/address_impl.cc | 142 ++---------------- source/common/network/address_impl.h | 34 +---- source/common/network/base_listener_impl.cc | 3 +- source/common/network/connection_impl.cc | 2 +- source/common/network/listen_socket_impl.cc | 4 +- source/common/network/listen_socket_impl.h | 7 +- source/common/network/listener_impl.cc | 2 +- source/common/network/socket_impl.cc | 138 +++++++++++++++++ source/common/network/socket_impl.h | 43 ++++++ source/common/network/utility.h | 2 + .../filters/udp/udp_proxy/udp_proxy_filter.h | 3 +- .../quic_listeners/quiche/envoy_quic_utils.cc | 5 +- .../stat_sinks/common/statsd/statsd.cc | 4 +- .../extensions/tracers/xray/daemon_broker.cc | 3 +- source/server/filter_chain_manager_impl.cc | 4 +- ...dr_family_aware_socket_option_impl_test.cc | 27 ++-- test/common/network/address_impl_test.cc | 13 +- test/common/network/dns_impl_test.cc | 1 - test/common/network/listener_impl_test.cc | 4 +- .../quiche/platform/quic_platform_test.cc | 4 +- .../common/statsd/udp_statsd_test.cc | 3 +- test/test_common/network_utility.cc | 10 +- 25 files changed, 268 insertions(+), 207 deletions(-) create mode 100644 source/common/network/socket_impl.cc diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index fd5a3b4563cc..440e86c6e667 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -43,6 +43,11 @@ class Ipv6 { * @return the absl::uint128 IPv6 address in network byte order. */ virtual absl::uint128 address() const PURE; + + /** + * @return true if address is Ipv6 and Ipv4 compatibility is disabled, false otherwise + */ + virtual bool v6only() const PURE; }; enum class IpVersion { v4, v6 }; // NOLINT(readability-identifier-naming) @@ -155,14 +160,6 @@ class Instance { */ virtual const Ip* ip() const PURE; - /** - * Create a socket for this address. - * @param type supplies the socket type to create. - * @return the IoHandlePtr naming the socket. In case of a failure, the program would be - * aborted. - */ - virtual IoHandlePtr socket(SocketType type) const PURE; - /** * @return the type of address. */ diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 5f56af1948e0..66db07d3680f 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -171,6 +171,7 @@ envoy_cc_library( envoy_cc_library( name = "socket_lib", + srcs = ["socket_impl.cc"], hdrs = ["socket_impl.h"], deps = [ ":address_lib", diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 60d33382c91a..700a20733556 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -7,6 +7,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/network/address_impl.h" +#include "common/network/socket_impl.h" #include "common/network/socket_option_impl.h" namespace Envoy { @@ -29,7 +30,7 @@ absl::optional getVersionFromSocket(const Socket& socket) { if (socket.localAddress()) { return {getVersionFromAddress(socket.localAddress())}; } else { - return {getVersionFromAddress(Address::addressFromFd(socket.ioHandle().fd()))}; + return {getVersionFromAddress(SocketInterface::addressFromFd(socket.ioHandle().fd()))}; } } catch (const EnvoyException&) { // Ignore, we get here because we failed in getsockname(). diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 6b1d46c85a3b..41630d8a1b3e 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -19,6 +19,17 @@ namespace Address { namespace { +// Check if an IP family is supported on this machine. +bool ipFamilySupported(int domain) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); + if (SOCKET_VALID(result.rc_)) { + RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, + absl::StrCat("Fail to close fd: response code ", result.rc_)); + } + return SOCKET_VALID(result.rc_); +} + // Validate that IPv4 is supported on this platform, raise an exception for the // given address if not. void validateIpv4Supported(const std::string& address) { @@ -48,17 +59,6 @@ std::string friendlyNameFromAbstractPath(absl::string_view path) { } // namespace -// Check if an IP family is supported on this machine. -bool ipFamilySupported(int domain) { - Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - if (SOCKET_VALID(result.rc_)) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - absl::StrCat("Fail to close fd: response code ", result.rc_)); - } - return SOCKET_VALID(result.rc_); -} - Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t ss_len, bool v6only) { RELEASE_ASSERT(ss_len == 0 || static_cast(ss_len) >= sizeof(sa_family_t), ""); @@ -103,107 +103,6 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, NOT_REACHED_GCOVR_EXCL_LINE; } -InstanceConstSharedPtr addressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } - int socket_v6only = 0; - if (ss.ss_family == AF_INET6) { - socklen_t size_int = sizeof(socket_v6only); - result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); -#ifdef WIN32 - // On Windows, it is possible for this getsockopt() call to fail. - // This can happen if the address we are trying to connect to has nothing - // listening. So we can't use RELEASE_ASSERT and instead must throw an - // exception - if (SOCKET_FAILURE(result.rc_)) { - throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } -#else - RELEASE_ASSERT(result.rc_ == 0, ""); -#endif - } - return addressFromSockAddr(ss, ss_len, socket_v6only); -} - -InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); - } -#ifdef __APPLE__ - if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) -#else - if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) -#endif - { - // For Unix domain sockets, can't find out the peer name, but it should match our own - // name for the socket (i.e. the path should match, barring any namespace or other - // mechanisms to hide things, of which there are many). - ss_len = sizeof ss; - result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); - } - } - return addressFromSockAddr(ss, ss_len); -} - -IoHandlePtr InstanceBase::socketFromSocketType(SocketType socket_type) const { -#if defined(__APPLE__) || defined(WIN32) - int flags = 0; -#else - int flags = SOCK_NONBLOCK; -#endif - - if (socket_type == SocketType::Stream) { - flags |= SOCK_STREAM; - } else { - flags |= SOCK_DGRAM; - } - - int domain; - if (type() == Type::Ip) { - IpVersion version = ip()->version(); - if (version == IpVersion::v6) { - domain = AF_INET6; - } else { - ASSERT(version == IpVersion::v4); - domain = AF_INET; - } - } else { - ASSERT(type() == Type::Pipe); - domain = AF_UNIX; - } - - auto os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, flags, 0); - RELEASE_ASSERT(SOCKET_VALID(result.rc_), - fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); - IoHandlePtr io_handle = std::make_unique(result.rc_); - -#if defined(__APPLE__) || defined(WIN32) - // Cannot set SOCK_NONBLOCK as a ::socket flag. - const int rc = os_sys_calls.setsocketblocking(io_handle->fd(), false).rc_; - RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); -#endif - - return io_handle; -} - Ipv4Instance::Ipv4Instance(const sockaddr_in* address) : InstanceBase(Type::Ip) { ip_.ipv4_.address_ = *address; ip_.friendly_address_ = sockaddrToString(*address); @@ -258,8 +157,6 @@ Api::SysCallIntResult Ipv4Instance::connect(os_fd_t fd) const { return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); } -IoHandlePtr Ipv4Instance::socket(SocketType type) const { return socketFromSocketType(type); } - std::string Ipv4Instance::sockaddrToString(const sockaddr_in& addr) { static constexpr size_t BufferSize = 16; // enough space to hold an IPv4 address in string form char str[BufferSize]; @@ -296,6 +193,8 @@ absl::uint128 Ipv6Instance::Ipv6Helper::address() const { uint32_t Ipv6Instance::Ipv6Helper::port() const { return ntohs(address_.sin6_port); } +bool Ipv6Instance::Ipv6Helper::v6only() const { return v6only_; }; + std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { char str[INET6_ADDRSTRLEN]; const char* ptr = inet_ntop(AF_INET6, &address_.sin6_addr, str, INET6_ADDRSTRLEN); @@ -306,7 +205,7 @@ std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only) : InstanceBase(Type::Ip) { ip_.ipv6_.address_ = address; ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); - ip_.v6only_ = v6only; + ip_.ipv6_.v6only_ = v6only; friendly_name_ = fmt::format("[{}]:{}", ip_.friendly_address_, ip_.port()); validateIpv6Supported(friendly_name_); } @@ -346,17 +245,6 @@ Api::SysCallIntResult Ipv6Instance::connect(os_fd_t fd) const { return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); } -IoHandlePtr Ipv6Instance::socket(SocketType type) const { - IoHandlePtr io_handle = socketFromSocketType(type); - // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. - const int v6only = ip_.v6only_; - const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( - io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), - sizeof(v6only)); - RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); - return io_handle; -} - PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode) : InstanceBase(Type::Pipe) { if (address->sun_path[0] == '\0') { @@ -444,8 +332,6 @@ Api::SysCallIntResult PipeInstance::connect(os_fd_t fd) const { return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); } -IoHandlePtr PipeInstance::socket(SocketType type) const { return socketFromSocketType(type); } - } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index 8a8916ce8c69..d59f151a087d 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -14,12 +14,6 @@ namespace Envoy { namespace Network { namespace Address { -/** - * Returns true if the given family is supported on this machine. - * @param domain the IP family. - */ -bool ipFamilySupported(int domain); - /** * Convert an address in the form of the socket address struct defined by Posix, Linux, etc. into * a Network::Address::Instance and return a pointer to it. Raises an EnvoyException on failure. @@ -32,21 +26,6 @@ bool ipFamilySupported(int domain); InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, socklen_t len, bool v6only = true); -/** - * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for bound address. - */ -InstanceConstSharedPtr addressFromFd(os_fd_t fd); - -/** - * Obtain the address of the peer of the socket with the specified file descriptor. - * Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for peer address. - */ -InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd); - /** * Base class for all address types. */ @@ -64,7 +43,6 @@ class InstanceBase : public Instance { protected: InstanceBase(Type type) : type_(type) {} - IoHandlePtr socketFromSocketType(SocketType type) const; std::string friendly_name_; @@ -103,7 +81,6 @@ class Ipv4Instance : public InstanceBase { Api::SysCallIntResult bind(os_fd_t fd) const override; Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - IoHandlePtr socket(SocketType type) const override; // Network::Address::InstanceBase const sockaddr* sockAddr() const override { @@ -177,7 +154,6 @@ class Ipv6Instance : public InstanceBase { Api::SysCallIntResult bind(os_fd_t fd) const override; Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - IoHandlePtr socket(SocketType type) const override; // Network::Address::InstanceBase const sockaddr* sockAddr() const override { @@ -189,11 +165,16 @@ class Ipv6Instance : public InstanceBase { struct Ipv6Helper : public Ipv6 { Ipv6Helper() { memset(&address_, 0, sizeof(address_)); } absl::uint128 address() const override; + bool v6only() const override; uint32_t port() const; std::string makeFriendlyAddress() const; sockaddr_in6 address_; + // Is IPv4 compatibility (https://tools.ietf.org/html/rfc3493#page-11) disabled? + // Default initialized to true to preserve extant Envoy behavior where we don't explicitly set + // this in the constructor. + bool v6only_{true}; }; struct IpHelper : public Ip { @@ -211,10 +192,6 @@ class Ipv6Instance : public InstanceBase { Ipv6Helper ipv6_; std::string friendly_address_; - // Is IPv4 compatibility (https://tools.ietf.org/html/rfc3493#page-11) disabled? - // Default initialized to true to preserve extant Envoy behavior where we don't explicitly set - // this in the constructor. - bool v6only_{true}; }; IpHelper ip_; @@ -240,7 +217,6 @@ class PipeInstance : public InstanceBase { Api::SysCallIntResult bind(os_fd_t fd) const override; Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return nullptr; } - IoHandlePtr socket(SocketType type) const override; // Network::Address::InstanceBase const sockaddr* sockAddr() const override { return reinterpret_cast(&address_); } diff --git a/source/common/network/base_listener_impl.cc b/source/common/network/base_listener_impl.cc index c096bfcfb522..16e1f7ef8bd7 100644 --- a/source/common/network/base_listener_impl.cc +++ b/source/common/network/base_listener_impl.cc @@ -8,6 +8,7 @@ #include "common/event/dispatcher_impl.h" #include "common/event/file_event_impl.h" #include "common/network/address_impl.h" +#include "common/network/socket_impl.h" #include "event2/listener.h" @@ -15,7 +16,7 @@ namespace Envoy { namespace Network { Address::InstanceConstSharedPtr BaseListenerImpl::getLocalAddress(os_fd_t fd) { - return Address::addressFromFd(fd); + return SocketInterface::addressFromFd(fd); } BaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 12961773a7ee..3ac80fca58c1 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -781,7 +781,7 @@ void ClientConnectionImpl::connect() { // The local address can only be retrieved for IP connections. Other // types, such as UDS, don't have a notion of a local address. if (socket_->remoteAddress()->type() == Address::Type::Ip) { - socket_->setLocalAddress(Address::addressFromFd(ioHandle().fd())); + socket_->setLocalAddress(SocketInterface::addressFromFd(ioHandle().fd())); } } } // namespace Network diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 9c7da8ce4326..e689dfd25405 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -28,7 +28,7 @@ void ListenSocketImpl::doBind() { if (local_address_->type() == Address::Type::Ip && local_address_->ip()->port() == 0) { // If the port we bind is zero, then the OS will pick a free port for us (assuming there are // any), and we need to find out the port number that the OS picked. - local_address_ = Address::addressFromFd(io_handle_->fd()); + local_address_ = SocketInterface::addressFromFd(io_handle_->fd()); } } @@ -64,7 +64,7 @@ void NetworkListenSocket< NetworkSocketTrait>::setPrebindSocketOptions() {} UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) - : ListenSocketImpl(address->socket(Address::SocketType::Stream), address) { + : ListenSocketImpl(SocketInterface::socket(Address::SocketType::Stream, address), address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); doBind(); } diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 3a5ed3366293..82d71c15223f 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -41,7 +41,7 @@ template class NetworkListenSocket : public ListenSocketImpl { public: NetworkListenSocket(const Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port) - : ListenSocketImpl(address->socket(T::type), address) { + : ListenSocketImpl(Network::SocketInterface::socket(T::type, address), address) { RELEASE_ASSERT(SOCKET_VALID(io_handle_->fd()), ""); setPrebindSocketOptions(); @@ -141,8 +141,9 @@ class ClientSocketImpl : public ConnectionSocketImpl { public: ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address, const OptionsSharedPtr& options) - : ConnectionSocketImpl(remote_address->socket(Address::SocketType::Stream), nullptr, - remote_address) { + : ConnectionSocketImpl( + Network::SocketInterface::socket(Address::SocketType::Stream, remote_address), nullptr, + remote_address) { if (options) { addOptions(options); } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index a8e6d5809f87..6f4e777752be 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -39,7 +39,7 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // IPv4 local_address was created from an IPv6 mapped IPv4 address. const Address::InstanceConstSharedPtr& remote_address = (remote_addr->sa_family == AF_UNIX) - ? Address::peerAddressFromFd(io_handle->fd()) + ? SocketInterface::peerAddressFromFd(io_handle->fd()) : Address::addressFromSockAddr(*reinterpret_cast(remote_addr), remote_addr_len, local_address->ip()->version() == Address::IpVersion::v6); diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc new file mode 100644 index 000000000000..0ecf36730aa6 --- /dev/null +++ b/source/common/network/socket_impl.cc @@ -0,0 +1,138 @@ +#include "common/network/socket_impl.h" + +#include "envoy/common/exception.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/network/address_impl.h" +#include "common/network/io_socket_handle_impl.h" + +namespace Envoy { +namespace Network { + +IoHandlePtr SocketInterface::socket(Address::SocketType socket_type, Address::Type addr_type, + Address::IpVersion version) { +#if defined(__APPLE__) || defined(WIN32) + int flags = 0; +#else + int flags = SOCK_NONBLOCK; +#endif + + if (socket_type == Address::SocketType::Stream) { + flags |= SOCK_STREAM; + } else { + flags |= SOCK_DGRAM; + } + + int domain; + if (addr_type == Address::Type::Ip) { + if (version == Address::IpVersion::v6) { + domain = AF_INET6; + } else { + ASSERT(version == Address::IpVersion::v4); + domain = AF_INET; + } + } else { + ASSERT(addr_type == Address::Type::Pipe); + domain = AF_UNIX; + } + + const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); + RELEASE_ASSERT(SOCKET_VALID(result.rc_), + fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); + IoHandlePtr io_handle = std::make_unique(result.rc_); + +#if defined(__APPLE__) || defined(WIN32) + // Cannot set SOCK_NONBLOCK as a ::socket flag. + const int rc = Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_; + RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); +#endif + + return io_handle; +} + +IoHandlePtr SocketInterface::socket(Address::SocketType socket_type, + const Address::InstanceConstSharedPtr addr) { + Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; + IoHandlePtr io_handle = SocketInterface::socket(socket_type, addr->type(), ip_version); + if (addr->type() == Address::Type::Ip && addr->ip()->version() == Address::IpVersion::v6) { + // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. + const int v6only = addr->ip()->ipv6()->v6only(); + const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( + io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), + sizeof(v6only)); + RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); + } + return io_handle; +} + +bool SocketInterface::ipFamilySupported(int domain) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); + if (SOCKET_VALID(result.rc_)) { + RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, + fmt::format("Fail to close fd: response code {}", strerror(result.rc_))); + } + return SOCKET_VALID(result.rc_); +} + +Address::InstanceConstSharedPtr SocketInterface::addressFromFd(os_fd_t fd) { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, + strerror(result.errno_))); + } + int socket_v6only = 0; + if (ss.ss_family == AF_INET6) { + socklen_t size_int = sizeof(socket_v6only); + result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); +#ifdef WIN32 + // On Windows, it is possible for this getsockopt() call to fail. + // This can happen if the address we are trying to connect to has nothing + // listening. So we can't use RELEASE_ASSERT and instead must throw an + // exception + if (SOCKET_FAILURE(result.rc_)) { + throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, + strerror(result.errno_))); + } +#else + RELEASE_ASSERT(result.rc_ == 0, ""); +#endif + } + return Address::addressFromSockAddr(ss, ss_len, socket_v6only); +} + +Address::InstanceConstSharedPtr SocketInterface::peerAddressFromFd(os_fd_t fd) { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); + } +#ifdef __APPLE__ + if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) +#else + if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) +#endif + { + // For Unix domain sockets, can't find out the peer name, but it should match our own + // name for the socket (i.e. the path should match, barring any namespace or other + // mechanisms to hide things, of which there are many). + ss_len = sizeof ss; + result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); + } + } + return Address::addressFromSockAddr(ss, ss_len); +} + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index f41e9d74f3f8..b380ca1506ca 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -7,6 +7,49 @@ namespace Envoy { namespace Network { +namespace SocketInterface { + +/** + * Low level api to create a socket in the underlying host stack. Does not create an + * Envoy socket. + * @param type type of socket requested + * @param addr_type type of address used with the socket + * @param version IP version if address type is IP + * @return Socket file descriptor + */ +IoHandlePtr socket(Address::SocketType type, Address::Type addr_type, Address::IpVersion version); + +/** + * Low level api to create a socket in the underlying host stack. Does not create an + * Envoy socket. + * @param socket_type type of socket requested + * @param addr address that is gleaned for address type and version if needed (@see createSocket) + */ +IoHandlePtr socket(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr); + +/** + * Returns true if the given family is supported on this machine. + * @param domain the IP family. + */ +bool ipFamilySupported(int domain); + +/** + * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. + * @param fd socket file descriptor + * @return InstanceConstSharedPtr for bound address. + */ +Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd); + +/** + * Obtain the address of the peer of the socket with the specified file descriptor. + * Raises an EnvoyException on failure. + * @param fd socket file descriptor + * @return InstanceConstSharedPtr for peer address. + */ +Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd); + +} // namespace SocketInterface + class SocketImpl : public virtual Socket { public: // Network::Socket diff --git a/source/common/network/utility.h b/source/common/network/utility.h index 152b2ccc471d..c0d3501d7854 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -9,6 +9,8 @@ #include "envoy/network/connection.h" #include "envoy/network/listener.h" +#include "common/network/socket_impl.h" + #include "absl/strings/string_view.h" namespace Envoy { diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index d96eda529995..d4de8ad98dfd 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -221,7 +221,8 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) { // Virtual so this can be overridden in unit tests. - return host->address()->socket(Network::Address::SocketType::Datagram); + return Network::SocketInterface::socket(Network::Address::SocketType::Datagram, + host->address()); } // Upstream::ClusterUpdateCallbacks diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index de1cf601c3bc..921f473312f8 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -94,7 +94,8 @@ Network::ConnectionSocketPtr createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options) { - Network::IoHandlePtr io_handle = peer_addr->socket(Network::Address::SocketType::Datagram); + Network::IoHandlePtr io_handle = + Network::SocketInterface::socket(Network::Address::SocketType::Datagram, peer_addr); auto connection_socket = std::make_unique(std::move(io_handle), local_addr, peer_addr); connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); @@ -112,7 +113,7 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, ASSERT(local_addr->ip()); if (local_addr->ip()->port() == 0) { // Get ephemeral port number. - local_addr = Network::Address::addressFromFd(connection_socket->ioHandle().fd()); + local_addr = Network::SocketInterface::addressFromFd(connection_socket->ioHandle().fd()); } if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket, envoy::config::core::v3::SocketOption::STATE_BOUND)) { diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index b4676d290155..320c4b85d922 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -27,8 +27,8 @@ namespace Common { namespace Statsd { UdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent) - : parent_(parent), - io_handle_(parent_.server_address_->socket(Network::Address::SocketType::Datagram)) {} + : parent_(parent), io_handle_(Network::SocketInterface::socket( + Network::Address::SocketType::Datagram, parent_.server_address_)) {} void UdpStatsdSink::WriterImpl::write(const std::string& message) { // TODO(mattklein123): We can avoid this const_cast pattern by having a constant variant of diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index 39d7de50ef99..dd80bcdfc121 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -29,7 +29,8 @@ std::string createHeader(const std::string& format, uint32_t version) { DaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint) : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)), - io_handle_(address_->socket(Network::Address::SocketType::Datagram)) {} + io_handle_( + Network::SocketInterface::socket(Network::Address::SocketType::Datagram, address_)) {} void DaemonBrokerImpl::send(const std::string& data) const { auto& logger = Logger::Registry::getLog(Logger::Id::tracing); diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 8b8345f64cc4..369a2873abf6 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -360,11 +360,11 @@ std::pair> makeCidrListEntry(const s const T& data) { std::vector subnets; if (cidr == EMPTY_STRING) { - if (Network::Address::ipFamilySupported(AF_INET)) { + if (Network::SocketInterface::ipFamilySupported(AF_INET)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv4CidrCatchAllAddress())); } - if (Network::Address::ipFamilySupported(AF_INET6)) { + if (Network::SocketInterface::ipFamilySupported(AF_INET6)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv6CidrCatchAllAddress())); } diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index ce315917b80b..d65fbe5e5e73 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -46,7 +46,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { // If a platform supports IPv4 socket option variant for an IPv4 address, it works TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -61,7 +62,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { // If a platform doesn't support IPv4 socket option variant for an IPv4 address we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; @@ -74,7 +76,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { // If a platform doesn't support IPv4 and IPv6 socket option variants for an IPv4 address, we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; @@ -88,7 +91,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { // IPv4 variant TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -101,7 +105,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { // If a platform supports IPv6 socket option variant for an IPv6 address it works TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -117,7 +122,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { // we apply the IPv4 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -133,7 +139,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { // AddrFamilyAwareSocketOptionImpl::setIpSocketOption() works with the IPv6 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -146,7 +153,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { // GetSocketOptionName returns the v4 information for a v4 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -161,7 +169,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { // GetSocketOptionName returns the v4 information for a v6 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { Address::Ipv6Instance address("2::1", 5678); - IoHandlePtr io_handle = address.socket(Address::SocketType::Stream); + IoHandlePtr io_handle = Network::SocketInterface::socket( + Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 33997fc4bb0f..2950907d1812 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -48,7 +48,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl ASSERT_NE(addr_port->ip(), nullptr); // Create a socket on which we'll listen for connections from clients. - IoHandlePtr io_handle = addr_port->socket(SocketType::Stream); + IoHandlePtr io_handle = SocketInterface::socket(SocketType::Stream, addr_port); ASSERT_GE(io_handle->fd(), 0) << addr_port->asString(); auto& os_sys_calls = Api::OsSysCallsSingleton::get(); @@ -74,7 +74,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl auto client_connect = [&os_sys_calls](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. - IoHandlePtr client_handle = addr_port->socket(SocketType::Stream); + IoHandlePtr client_handle = SocketInterface::socket(SocketType::Stream, addr_port); ASSERT_GE(client_handle->fd(), 0) << addr_port->asString(); // Instance::socket creates a non-blocking socket, which that extends all the way to the @@ -327,7 +327,8 @@ TEST(PipeInstanceTest, BasicPermission) { const mode_t mode = 0777; PipeInstance address(path, mode); - IoHandlePtr io_handle = address.socket(SocketType::Stream); + IoHandlePtr io_handle = + SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); ASSERT_GE(io_handle->fd(), 0) << address.asString(); Api::SysCallIntResult result = address.bind(io_handle->fd()); @@ -352,7 +353,8 @@ TEST(PipeInstanceTest, PermissionFail) { const mode_t mode = 0777; PipeInstance address(path, mode); - IoHandlePtr io_handle = address.socket(SocketType::Stream); + IoHandlePtr io_handle = + SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); ASSERT_GE(io_handle->fd(), 0) << address.asString(); EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); EXPECT_CALL(os_sys_calls, chmod(_, _)).WillOnce(Return(Api::SysCallIntResult{-1, 0})); @@ -422,7 +424,8 @@ TEST(PipeInstanceTest, EmbeddedNullPathError) { TEST(PipeInstanceTest, UnlinksExistingFile) { const auto bind_uds_socket = [](const std::string& path) { PipeInstance address(path); - IoHandlePtr io_handle = address.socket(SocketType::Stream); + IoHandlePtr io_handle = + SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); ASSERT_GE(io_handle->fd(), 0) << address.asString(); const Api::SysCallIntResult result = address.bind(io_handle->fd()); diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 38abf99b904a..9163a4545d01 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -389,7 +389,6 @@ class CustomInstance : public Address::Instance { Api::SysCallIntResult bind(os_fd_t fd) const override { return instance_.bind(fd); } Api::SysCallIntResult connect(os_fd_t fd) const override { return instance_.connect(fd); } const Address::Ip* ip() const override { return instance_.ip(); } - IoHandlePtr socket(Address::SocketType type) const override { return instance_.socket(type); } Address::Type type() const override { return instance_.type(); } private: diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index a19fa577163e..6471641ef764 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -201,7 +201,7 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { EXPECT_CALL(listener, getLocalAddress(_)) .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return Address::addressFromFd(fd); + return SocketInterface::addressFromFd(fd); })); StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); @@ -251,7 +251,7 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) { EXPECT_CALL(listener, getLocalAddress(_)) .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return Address::addressFromFd(fd); + return SocketInterface::addressFromFd(fd); })); EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void { client_connection->close(ConnectionCloseType::NoFlush); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 56fa77e411cb..a9f9b8cf762b 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -669,8 +669,8 @@ TEST_F(QuicPlatformTest, PickUnsedPort) { Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version); Envoy::Network::Address::InstanceConstSharedPtr addr_with_port = Envoy::Network::Utility::getAddressWithPort(*addr, port); - Envoy::Network::IoHandlePtr io_handle = - addr_with_port->socket(Envoy::Network::Address::SocketType::Datagram); + Envoy::Network::IoHandlePtr io_handle = Envoy::Network::SocketInterface::socket( + Envoy::Network::Address::SocketType::Datagram, addr_with_port); // binding of given port should success. EXPECT_EQ(0, addr_with_port->bind(io_handle->fd()).rc_); } diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index c2b3ea48542c..2bc30294c6c8 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -55,7 +55,8 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { // modification back to the abstraction layer so it will work for multiple platforms. Additionally // this uses low level networking calls because our abstractions in this area only work for IP // sockets. Revisit this also. - auto io_handle = uds_address->socket(Network::Address::SocketType::Datagram); + auto io_handle = + Network::SocketInterface::socket(Network::Address::SocketType::Datagram, uds_address); RELEASE_ASSERT( Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_ != -1, ""); uds_address->bind(io_handle->fd()); diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 1f40191733f1..494382d8c7cc 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -27,7 +27,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared << (addr_port == nullptr ? "nullptr" : addr_port->asString()); return nullptr; } - IoHandlePtr io_handle = addr_port->socket(type); + IoHandlePtr io_handle = SocketInterface::socket(type, addr_port); // Not setting REUSEADDR, therefore if the address has been recently used we won't reuse it here. // However, because we're going to use the address while checking if it is available, we'll need // to set REUSEADDR on listener sockets created by tests using an address validated by this means. @@ -58,7 +58,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared // If the port we bind is zero, then the OS will pick a free port for us (assuming there are // any), and we need to find out the port number that the OS picked so we can return it. if (addr_port->ip()->port() == 0) { - return Address::addressFromFd(io_handle->fd()); + return SocketInterface::addressFromFd(io_handle->fd()); } return addr_port; } @@ -149,7 +149,7 @@ Address::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version, bool supportsIpVersion(const Address::IpVersion version) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = addr->socket(Address::SocketType::Stream); + IoHandlePtr io_handle = SocketInterface::socket(Address::SocketType::Stream, addr); if (0 != addr->bind(io_handle->fd()).rc_) { // Socket bind failed. RELEASE_ASSERT(io_handle->close().err_ == nullptr, ""); @@ -174,7 +174,7 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { std::pair bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = addr->socket(type); + IoHandlePtr io_handle = SocketInterface::socket(type, addr); Api::SysCallIntResult result = addr->bind(io_handle->fd()); if (0 != result.rc_) { io_handle->close(); @@ -183,7 +183,7 @@ bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { ADD_FAILURE() << msg; throw EnvoyException(msg); } - return std::make_pair(Address::addressFromFd(io_handle->fd()), std::move(io_handle)); + return std::make_pair(SocketInterface::addressFromFd(io_handle->fd()), std::move(io_handle)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } From 74290ef76a76fbbf50f072dc33438791f93f68c7 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Sat, 23 May 2020 16:40:08 -0700 Subject: [PATCH 223/909] http: local reply mapper (#11007) Allows to create custom mappers of response code based on access_log filters. Allows to map error response to custom in Text or Json format. Signed-off-by: Wayne Zhang --- .../core/v3/substitution_format_string.proto | 6 +- .../v4alpha/substitution_format_string.proto | 6 +- .../v3/http_connection_manager.proto | 69 ++ .../v4alpha/http_connection_manager.proto | 75 ++ .../http/http_conn_man/http_conn_man.rst | 1 + .../http/http_conn_man/local_reply.rst | 73 ++ .../observability/access_log/usage.rst | 3 + docs/root/version_history/current.rst | 1 + .../core/v3/substitution_format_string.proto | 6 +- .../v4alpha/substitution_format_string.proto | 6 +- .../v3/http_connection_manager.proto | 69 ++ .../v4alpha/http_connection_manager.proto | 75 ++ include/envoy/access_log/access_log.h | 12 +- .../common/access_log/access_log_formatter.cc | 123 +-- .../common/access_log/access_log_formatter.h | 87 ++- source/common/http/BUILD | 1 + source/common/http/async_client_impl.h | 22 +- source/common/http/conn_manager_config.h | 6 + source/common/http/conn_manager_impl.cc | 68 +- source/common/http/utility.cc | 79 +- source/common/http/utility.h | 63 +- source/common/local_reply/BUILD | 29 + source/common/local_reply/local_reply.cc | 175 +++++ source/common/local_reply/local_reply.h | 54 ++ source/common/router/header_formatter.cc | 3 +- .../file/file_access_log_impl.cc | 4 +- .../network/http_connection_manager/BUILD | 1 + .../network/http_connection_manager/config.cc | 4 +- .../network/http_connection_manager/config.h | 3 + source/server/admin/admin.cc | 3 +- source/server/admin/admin.h | 2 + .../access_log_formatter_fuzz_test.cc | 3 +- .../access_log_formatter_speed_test.cc | 15 +- .../access_log/access_log_formatter_test.cc | 733 +++++++++--------- .../common/substitution_format_string_test.cc | 11 +- .../http/conn_manager_impl_fuzz_test.cc | 5 +- test/common/http/conn_manager_impl_test.cc | 5 +- test/common/http/conn_manager_utility_test.cc | 7 +- test/common/http/utility_test.cc | 40 +- test/common/local_reply/BUILD | 22 + test/common/local_reply/local_reply_test.cc | 295 +++++++ test/config/utility.cc | 10 + test/config/utility.h | 4 + test/integration/BUILD | 12 + .../local_reply_integration_test.cc | 345 +++++++++ test/mocks/http/mocks.cc | 22 +- tools/spelling/spelling_dictionary.txt | 1 + 47 files changed, 2080 insertions(+), 579 deletions(-) create mode 100644 docs/root/configuration/http/http_conn_man/local_reply.rst create mode 100644 source/common/local_reply/BUILD create mode 100644 source/common/local_reply/local_reply.cc create mode 100644 source/common/local_reply/local_reply.h create mode 100644 test/common/local_reply/BUILD create mode 100644 test/common/local_reply/local_reply_test.cc create mode 100644 test/integration/local_reply_integration_test.cc diff --git a/api/envoy/config/core/v3/substitution_format_string.proto b/api/envoy/config/core/v3/substitution_format_string.proto index 5fe6c08753df..7537a1178b64 100644 --- a/api/envoy/config/core/v3/substitution_format_string.proto +++ b/api/envoy/config/core/v3/substitution_format_string.proto @@ -25,7 +25,7 @@ message SubstitutionFormatString { // // .. code-block:: // - // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% // // The following plain text will be created: // @@ -43,9 +43,9 @@ message SubstitutionFormatString { // // .. code-block:: // - // typed_json_format: + // json_format: // status: %RESPONSE_CODE% - // message: %RESP_BODY% + // message: %LOCAL_REPLY_BODY% // // The following JSON object would be created: // diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto index d998ca1fe835..2d3e0a21b790 100644 --- a/api/envoy/config/core/v4alpha/substitution_format_string.proto +++ b/api/envoy/config/core/v4alpha/substitution_format_string.proto @@ -29,7 +29,7 @@ message SubstitutionFormatString { // // .. code-block:: // - // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% // // The following plain text will be created: // @@ -47,9 +47,9 @@ message SubstitutionFormatString { // // .. code-block:: // - // typed_json_format: + // json_format: // status: %RESPONSE_CODE% - // message: %RESP_BODY% + // message: %LOCAL_REPLY_BODY% // // The following JSON object would be created: // diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index ff083e29228a..b46e63076d7a 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; import "envoy/config/route/v3/scoped_route.proto"; import "envoy/config/trace/v3/http_tracer.proto"; @@ -507,6 +509,11 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. @@ -516,6 +523,68 @@ message HttpConnectionManager { bool strip_matching_host_port = 39; } +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v3.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +message ResponseMapper { + // Filter to determine if this mapper should apply. + config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_foramt`. + config.core.v3.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v3.SubstitutionFormatString body_format_override = 4; +} + message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.Rds"; diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 41284b7e1095..482bb5ed95e9 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; import "envoy/config/route/v4alpha/scoped_route.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; @@ -507,6 +509,11 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. @@ -516,6 +523,74 @@ message HttpConnectionManager { bool strip_matching_host_port = 39; } +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v4alpha.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +message ResponseMapper { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; + + // Filter to determine if this mapper should apply. + config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_foramt`. + config.core.v4alpha.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v4alpha.SubstitutionFormatString body_format_override = 4; +} + message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; diff --git a/docs/root/configuration/http/http_conn_man/http_conn_man.rst b/docs/root/configuration/http/http_conn_man/http_conn_man.rst index f75ebacae09d..a726c3983a7a 100644 --- a/docs/root/configuration/http/http_conn_man/http_conn_man.rst +++ b/docs/root/configuration/http/http_conn_man/http_conn_man.rst @@ -12,6 +12,7 @@ HTTP connection manager header_casing headers header_sanitizing + local_reply stats runtime rds diff --git a/docs/root/configuration/http/http_conn_man/local_reply.rst b/docs/root/configuration/http/http_conn_man/local_reply.rst new file mode 100644 index 000000000000..c2f9d59e18d6 --- /dev/null +++ b/docs/root/configuration/http/http_conn_man/local_reply.rst @@ -0,0 +1,73 @@ +.. _config_http_conn_man_local_reply: + +Local reply modification +======================== + +The :ref:`HTTP connection manager ` supports modification of local reply which is response returned by Envoy itself. + +Features: + +* :ref:`Local reply content modification`. +* :ref:`Local reply format modification`. + +.. _config_http_conn_man_local_reply_modification: + +Local reply content modification +-------------------------------- + +The local response content returned by Envoy can be customized. A list of :ref:`mappers ` can be specified. Each mapper must have a :ref:`filter `. It may have following rewrite rules; a :ref:`status_code ` rule to rewrite response code, a :ref:`body ` rule to rewrite the local reply body and a :ref:`body_format_override ` to specify the response body format. Envoy checks each `mapper` according to the specified order until the first one is matched. If a `mapper` is matched, all its rewrite rules will apply. + +Example of a LocalReplyConfig + +.. code-block:: + + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "not allowed" + +In above example, if the status_code is 400, it will be rewritten to 401, the response body will be rewritten to as "not allowed". + +.. _config_http_conn_man_local_reply_format: + +Local reply format modification +------------------------------- + +The response body content type can be customized. If not specified, the content type is plain/text. There are two `body_format` fields; one is the :ref:`body_format ` field in the :ref:`LocalReplyConfig ` message and the other :ref:`body_format_override ` field in the `mapper`. The latter is only used when its mapper is matched. The former is used if there is no any matched mappers, or the matched mapper doesn't have the `body_format` specified. + +Local reply format can be specified as :ref:`SubstitutionFormatString `. It supports :ref:`text_format ` and :ref:`json_format `. + +Example of a LocalReplyConfig with `body_format` field. + +.. code-block:: + + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body_format_override: + text_format: "%LOCAL_REPLY_BODY% %REQ(:path)%" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 500 + runtime_key: key_b + status_code: 501 + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" + +In above example, there is a `body_format_override` inside the first `mapper` with a filter matching `status_code == 400`. It generates the response body in plain text format by concatenating %LOCAL_REPLY_BODY% with the `:path` request header. It is only used when the first mapper is matched. There is a `body_format` at the bottom of the config and at the same level as field `mappers`. It is used when non of the mappers is matched or the matched mapper doesn't have its own `body_format_override` specified. diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 11b5cf63e1c9..86da614014d1 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -521,3 +521,6 @@ The following command operators are supported: %HOSTNAME% The system hostname. + +%LOCAL_REPLY_BODY% + The body text for the requests rejected by the Envoy. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e3b0f62b80e1..d16f5d1c8459 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -58,6 +58,7 @@ New Features `google.api.HttpBody `_. * gzip filter: added option to set zlib's next output buffer size. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. * http: added :ref:`stripping port from host header ` support. * http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto index 5fe6c08753df..7537a1178b64 100644 --- a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto +++ b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto @@ -25,7 +25,7 @@ message SubstitutionFormatString { // // .. code-block:: // - // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% // // The following plain text will be created: // @@ -43,9 +43,9 @@ message SubstitutionFormatString { // // .. code-block:: // - // typed_json_format: + // json_format: // status: %RESPONSE_CODE% - // message: %RESP_BODY% + // message: %LOCAL_REPLY_BODY% // // The following JSON object would be created: // diff --git a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto index d998ca1fe835..2d3e0a21b790 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto @@ -29,7 +29,7 @@ message SubstitutionFormatString { // // .. code-block:: // - // text_format: %RESP_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% // // The following plain text will be created: // @@ -47,9 +47,9 @@ message SubstitutionFormatString { // // .. code-block:: // - // typed_json_format: + // json_format: // status: %RESPONSE_CODE% - // message: %RESP_BODY% + // message: %LOCAL_REPLY_BODY% // // The following JSON object would be created: // diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 346df090a770..8efa78bf0eb9 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; import "envoy/config/route/v3/scoped_route.proto"; import "envoy/config/trace/v3/http_tracer.proto"; @@ -509,6 +511,11 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. @@ -521,6 +528,68 @@ message HttpConnectionManager { [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v3.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +message ResponseMapper { + // Filter to determine if this mapper should apply. + config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_foramt`. + config.core.v3.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v3.SubstitutionFormatString body_format_override = 4; +} + message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.Rds"; diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 41284b7e1095..482bb5ed95e9 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; +import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/protocol.proto"; +import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; import "envoy/config/route/v4alpha/scoped_route.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; @@ -507,6 +509,11 @@ message HttpConnectionManager { // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig local_reply_config = 38; + // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. @@ -516,6 +523,74 @@ message HttpConnectionManager { bool strip_matching_host_port = 39; } +// The configuration to customize local reply returned by Envoy. +message LocalReplyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + repeated ResponseMapper mappers = 1; + + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: plain/text body_format. + // + // .. code-block:: + // + // text_format: %LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=$REQ(:path)% + // + // The following response body in `plain/text` format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: + // + // upstream connect error:503:path=/foo + // + // Example two: application/json body_format. + // + // .. code-block:: + // + // json_format: + // status: %RESPONSE_CODE% + // message: %LOCAL_REPLY_BODY% + // path: $REQ(:path)% + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + // + config.core.v4alpha.SubstitutionFormatString body_format = 2; +} + +// The configuration to filter and change local response. +message ResponseMapper { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; + + // Filter to determine if this mapper should apply. + config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; + + // The new response status code if specified. + google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` + // command operator in the `body_foramt`. + config.core.v4alpha.DataSource body = 3; + + // A per mapper `body_format` to override the :ref:`body_format `. + // It will be used when this mapper is matched. + config.core.v4alpha.SubstitutionFormatString body_format_override = 4; +} + message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index 4f1d4ee0fc0e..15486d050a91 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -109,12 +109,14 @@ class Formatter { * @param response_headers supplies the response headers. * @param response_trailers supplies the response trailers. * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. * @return std::string string containing the complete formatted access log line. */ virtual std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; }; using FormatterPtr = std::unique_ptr; @@ -133,25 +135,29 @@ class FormatterProvider { * @param response_headers supplies the response headers. * @param response_trailers supplies the response trailers. * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. * @return std::string containing a single value extracted from the given headers/trailers/stream. */ virtual std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; /** * Extract a value from the provided headers/trailers/stream, preserving the value's type. * @param request_headers supplies the request headers. * @param response_headers supplies the response headers. * @param response_trailers supplies the response trailers. * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. * @return ProtobufWkt::Value containing a single value extracted from the given * headers/trailers/stream. */ virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const PURE; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; }; using FormatterProviderPtr = std::unique_ptr; diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index 4b6bd1891bf8..8fce54b47dbf 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -96,12 +96,14 @@ FormatterImpl::FormatterImpl(const std::string& format) { std::string FormatterImpl::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { std::string log_line; log_line.reserve(256); for (const FormatterProviderPtr& provider : providers_) { - log_line += provider->format(request_headers, response_headers, response_trailers, stream_info); + log_line += provider->format(request_headers, response_headers, response_trailers, stream_info, + local_reply_body); } return log_line; @@ -118,9 +120,10 @@ JsonFormatterImpl::JsonFormatterImpl( std::string JsonFormatterImpl::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { const auto output_struct = - toStruct(request_headers, response_headers, response_trailers, stream_info); + toStruct(request_headers, response_headers, response_trailers, stream_info, local_reply_body); const std::string log_line = MessageUtil::getJsonStringFromMessage(output_struct, false, true); return absl::StrCat(log_line, "\n"); @@ -129,7 +132,8 @@ std::string JsonFormatterImpl::format(const Http::RequestHeaderMap& request_head ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { ProtobufWkt::Struct output; auto* fields = output.mutable_fields(); for (const auto& pair : json_output_format_) { @@ -140,16 +144,17 @@ ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& re const auto& provider = providers.front(); const auto val = preserve_types_ ? provider->formatValue(request_headers, response_headers, - response_trailers, stream_info) - : ValueUtil::stringValue(provider->format( - request_headers, response_headers, response_trailers, stream_info)); - + response_trailers, stream_info, local_reply_body) + : ValueUtil::stringValue( + provider->format(request_headers, response_headers, + response_trailers, stream_info, local_reply_body)); (*fields)[pair.first] = val; } else { // Multiple providers forces string output. std::string str; for (const auto& provider : providers) { - str += provider->format(request_headers, response_headers, response_trailers, stream_info); + str += provider->format(request_headers, response_headers, response_trailers, stream_info, + local_reply_body); } (*fields)[pair.first] = ValueUtil::stringValue(str); } @@ -280,6 +285,8 @@ std::vector AccessLogFormatParser::parse(const std::string formatters.emplace_back(FormatterProviderPtr{ new ResponseTrailerFormatter(main_header, alternative_header, max_length)}); + } else if (absl::StartsWith(token, "LOCAL_REPLY_BODY")) { + formatters.emplace_back(std::make_unique()); } else if (absl::StartsWith(token, DYNAMIC_META_TOKEN)) { std::string filter_namespace; absl::optional max_length; @@ -763,14 +770,16 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { std::string StreamInfoFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return field_extractor_->extract(stream_info); } -ProtobufWkt::Value -StreamInfoFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value StreamInfoFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return field_extractor_->extractValue(stream_info); } @@ -779,17 +788,34 @@ PlainStringFormatter::PlainStringFormatter(const std::string& str) { str_.set_st std::string PlainStringFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return str_.string_value(); } ProtobufWkt::Value PlainStringFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return str_; } +std::string LocalReplyBodyFormatter::format(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const { + return std::string(local_reply_body); +} + +ProtobufWkt::Value LocalReplyBodyFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const { + return ValueUtil::stringValue(std::string(local_reply_body)); +} + HeaderFormatter::HeaderFormatter(const std::string& main_header, const std::string& alternative_header, absl::optional max_length) @@ -835,13 +861,14 @@ ResponseHeaderFormatter::ResponseHeaderFormatter(const std::string& main_header, std::string ResponseHeaderFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return HeaderFormatter::format(response_headers); } ProtobufWkt::Value ResponseHeaderFormatter::formatValue( const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const { + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(response_headers); } @@ -853,14 +880,14 @@ RequestHeaderFormatter::RequestHeaderFormatter(const std::string& main_header, std::string RequestHeaderFormatter::format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::format(request_headers); } ProtobufWkt::Value RequestHeaderFormatter::formatValue(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(request_headers); } @@ -872,14 +899,15 @@ ResponseTrailerFormatter::ResponseTrailerFormatter(const std::string& main_heade std::string ResponseTrailerFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, + absl::string_view) const { return HeaderFormatter::format(response_trailers); } ProtobufWkt::Value ResponseTrailerFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const { + const StreamInfo::StreamInfo&, absl::string_view) const { return HeaderFormatter::formatValue(response_trailers); } @@ -891,7 +919,8 @@ GrpcStatusFormatter::GrpcStatusFormatter(const std::string& main_header, std::string GrpcStatusFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& info) const { + const StreamInfo::StreamInfo& info, + absl::string_view) const { const auto grpc_status = Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true); if (!grpc_status.has_value()) { @@ -904,9 +933,11 @@ std::string GrpcStatusFormatter::format(const Http::RequestHeaderMap&, return grpc_status_message; } -ProtobufWkt::Value GrpcStatusFormatter::formatValue( - const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& info) const { +ProtobufWkt::Value +GrpcStatusFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& info, absl::string_view) const { const auto grpc_status = Grpc::Common::getGrpcStatus(response_trailers, response_headers, info, true); if (!grpc_status.has_value()) { @@ -966,14 +997,16 @@ DynamicMetadataFormatter::DynamicMetadataFormatter(const std::string& filter_nam std::string DynamicMetadataFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return MetadataFormatter::formatMetadata(stream_info.dynamicMetadata()); } -ProtobufWkt::Value -DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { return MetadataFormatter::formatMetadataValue(stream_info.dynamicMetadata()); } @@ -994,7 +1027,8 @@ FilterStateFormatter::filterState(const StreamInfo::StreamInfo& stream_info) con std::string FilterStateFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info); if (!state) { return UnspecifiedValueString; @@ -1026,10 +1060,11 @@ std::string FilterStateFormatter::format(const Http::RequestHeaderMap&, return value; } -ProtobufWkt::Value -FilterStateFormatter::formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value FilterStateFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { const Envoy::StreamInfo::FilterState::Object* state = filterState(stream_info); if (!state) { return unspecifiedValue(); @@ -1063,7 +1098,8 @@ StartTimeFormatter::StartTimeFormatter(const std::string& format) : date_formatt std::string StartTimeFormatter::format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info) const { + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { if (date_formatter_.formatString().empty()) { return AccessLogDateTimeFormatter::fromTime(stream_info.startTime()); } else { @@ -1071,13 +1107,12 @@ std::string StartTimeFormatter::format(const Http::RequestHeaderMap&, } } -ProtobufWkt::Value -StartTimeFormatter::formatValue(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const { +ProtobufWkt::Value StartTimeFormatter::formatValue( + const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const { return ValueUtil::stringValue( - format(request_headers, response_headers, response_trailers, stream_info)); + format(request_headers, response_headers, response_trailers, stream_info, local_reply_body)); } } // namespace AccessLog diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index dd3654a2eb7e..fb525cd29925 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -18,6 +18,8 @@ namespace Envoy { namespace AccessLog { +// TODO(qiwzhang): move this to source/common/common to be shared + /** * Access log format parser. */ @@ -94,7 +96,8 @@ class FormatterImpl : public Formatter { std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const override; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const override; private: std::vector providers_; @@ -109,7 +112,8 @@ class JsonFormatterImpl : public Formatter { std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const override; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const override; private: const bool preserve_types_; @@ -118,7 +122,8 @@ class JsonFormatterImpl : public Formatter { ProtobufWkt::Struct toStruct(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info) const; + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const; }; /** @@ -131,18 +136,32 @@ class PlainStringFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: ProtobufWkt::Value str_; }; /** - * Base formatter for headers. + * FormatterProvider for local_reply_body. It returns the string from `local_reply_body` argument. */ +class LocalReplyBodyFormatter : public FormatterProvider { +public: + LocalReplyBodyFormatter() = default; + + // Formatter::format + std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const override; + ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view local_reply_body) const override; +}; + class HeaderFormatter { public: HeaderFormatter(const std::string& main_header, const std::string& alternative_header, @@ -170,10 +189,11 @@ class RequestHeaderFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -186,10 +206,11 @@ class ResponseHeaderFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -203,10 +224,10 @@ class ResponseTrailerFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const override; + const StreamInfo::StreamInfo&, absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -220,10 +241,10 @@ class GrpcStatusFormatter : public FormatterProvider, HeaderFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo&) const override; + const StreamInfo::StreamInfo&, absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -235,10 +256,11 @@ class StreamInfoFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; class FieldExtractor { public: @@ -283,10 +305,11 @@ class DynamicMetadataFormatter : public FormatterProvider, MetadataFormatter { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; }; /** @@ -299,10 +322,11 @@ class FilterStateFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: const Envoy::StreamInfo::FilterState::Object* @@ -323,10 +347,11 @@ class StartTimeFormatter : public FormatterProvider { // FormatterProvider std::string format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo&) const override; + const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, + absl::string_view) const override; private: const Envoy::DateFormatter date_formatter_; diff --git a/source/common/http/BUILD b/source/common/http/BUILD index c5b14f4a4b91..8bacf5d1395a 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -143,6 +143,7 @@ envoy_cc_library( "//include/envoy/http:filter_interface", "//include/envoy/http:request_id_extension_interface", "//include/envoy/router:rds_interface", + "//source/common/local_reply:local_reply_lib", "//source/common/network:utility_lib", "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 241c7135348c..90acf1aadda0 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -361,15 +361,19 @@ class AsyncStreamImpl : public AsyncClient::Stream, absl::string_view details) override { stream_info_.setResponseCodeDetails(details); Utility::sendLocalReply( - is_grpc_request_, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - encodeHeaders(std::move(headers), end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); }, - remote_closed_, code, body, grpc_status, is_head_request_); + remote_closed_, + Utility::EncodeFunctions{ + nullptr, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + encodeHeaders(std::move(headers), end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + encodeData(data, end_stream); + }}, + Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_}); } // The async client won't pause if sending an Expect: 100-Continue so simply // swallows any incoming encode100Continue. diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index bbbc07ce0825..faf691b6fc20 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -10,6 +10,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/http/date_provider.h" +#include "common/local_reply/local_reply.h" #include "common/network/utility.h" #include "common/stats/symbol_table_impl.h" @@ -435,6 +436,11 @@ class ConnectionManagerConfig { */ virtual envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headersWithUnderscoresAction() const PURE; + + /** + * @return LocalReply configuration which supplies mapping for local reply generated by Envoy. + */ + virtual const LocalReply::LocalReply& localReply() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index c20b2690cd74..94575ac9ff79 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1522,22 +1522,28 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( stream_info_.setResponseCodeDetails(details); Utility::sendLocalReply( - is_grpc_request, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - response_headers_ = std::move(headers); - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeHeaders(nullptr, *response_headers_, end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { - // TODO: Start encoding from the last decoder filter that saw the - // request instead. - encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); - }, - state_.destroyed_, code, body, grpc_status, is_head_request); + state_.destroyed_, + Utility::EncodeFunctions{ + [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + connection_manager_.config_.localReply().rewrite( + request_headers_.get(), response_headers, stream_info_, code, body, content_type); + }, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + response_headers_ = std::move(headers); + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeHeaders(nullptr, *response_headers_, end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + // TODO: Start encoding from the last decoder filter that saw the + // request instead. + encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + }}, + Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); } void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( @@ -2552,17 +2558,25 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { // Instead, call the encodeHeadersInternal / encodeDataInternal helpers // directly, which maximizes shared code with the normal response path. Http::Utility::sendLocalReply( - Grpc::Common::hasGrpcContentType(*parent_.request_headers_), - [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { - parent_.response_headers_ = std::move(response_headers); - parent_.encodeHeadersInternal(*parent_.response_headers_, end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - parent_.encodeDataInternal(data, end_stream); - }, - parent_.state_.destroyed_, Http::Code::InternalServerError, - CodeUtility::toString(Http::Code::InternalServerError), absl::nullopt, - parent_.state_.is_head_request_); + parent_.state_.destroyed_, + Utility::EncodeFunctions{ + [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + parent_.connection_manager_.config_.localReply().rewrite( + parent_.request_headers_.get(), response_headers, parent_.stream_info_, code, + body, content_type); + }, + [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { + parent_.response_headers_ = std::move(response_headers); + parent_.encodeHeadersInternal(*parent_.response_headers_, end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + parent_.encodeDataInternal(data, end_stream); + }}, + Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*parent_.request_headers_), + Http::Code::InternalServerError, + CodeUtility::toString(Http::Code::InternalServerError), + absl::nullopt, parent_.state_.is_head_request_}); parent_.maybeEndEncode(parent_.state_.local_complete_); } else { ENVOY_STREAM_LOG( diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index dfc1f64dc10d..079544db9aa2 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -427,64 +427,69 @@ Utility::parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& return ret; } -void Utility::sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbacks, - const bool& is_reset, Code response_code, absl::string_view body_text, - const absl::optional grpc_status, - bool is_head_request) { +void Utility::sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks, + const LocalReplyData& local_reply_data) { sendLocalReply( - is_grpc, - [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - callbacks.encodeHeaders(std::move(headers), end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - callbacks.encodeData(data, end_stream); - }, - is_reset, response_code, body_text, grpc_status, is_head_request); -} - -void Utility::sendLocalReply( - bool is_grpc, - std::function encode_headers, - std::function encode_data, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, bool is_head_request) { + is_reset, + Utility::EncodeFunctions{nullptr, + [&](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + callbacks.encodeHeaders(std::move(headers), end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + callbacks.encodeData(data, end_stream); + }}, + local_reply_data); +} + +void Utility::sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions, + const LocalReplyData& local_reply_data) { // encode_headers() may reset the stream, so the stream must not be reset before calling it. ASSERT(!is_reset); + + // rewrite_response will rewrite response code and body text. + Code response_code = local_reply_data.response_code_; + std::string body_text(local_reply_data.body_text_); + absl::string_view content_type(Headers::get().ContentTypeValues.Text); + + ResponseHeaderMapPtr response_headers{createHeaderMap( + {{Headers::get().Status, std::to_string(enumToInt(response_code))}})}; + + if (encode_functions.rewrite_) { + encode_functions.rewrite_(*response_headers, response_code, body_text, content_type); + } + // Respond with a gRPC trailers-only response if the request is gRPC - if (is_grpc) { - ResponseHeaderMapPtr response_headers{createHeaderMap( - {{Headers::get().Status, std::to_string(enumToInt(Code::OK))}, - {Headers::get().ContentType, Headers::get().ContentTypeValues.Grpc}, - {Headers::get().GrpcStatus, - std::to_string(enumToInt( - grpc_status ? grpc_status.value() - : Grpc::Utility::httpToGrpcStatus(enumToInt(response_code))))}})}; - if (!body_text.empty() && !is_head_request) { + if (local_reply_data.is_grpc_) { + response_headers->setStatus(std::to_string(enumToInt(Code::OK))); + response_headers->setReferenceContentType(Headers::get().ContentTypeValues.Grpc); + response_headers->setGrpcStatus( + std::to_string(enumToInt(local_reply_data.grpc_status_ + ? local_reply_data.grpc_status_.value() + : Grpc::Utility::httpToGrpcStatus(enumToInt(response_code))))); + if (!body_text.empty() && !local_reply_data.is_head_request_) { // TODO(dio): Probably it is worth to consider caching the encoded message based on gRPC // status. response_headers->setGrpcMessage(PercentEncoding::encode(body_text)); } - encode_headers(std::move(response_headers), true); // Trailers only response + encode_functions.encode_headers_(std::move(response_headers), true); // Trailers only response return; } - ResponseHeaderMapPtr response_headers{createHeaderMap( - {{Headers::get().Status, std::to_string(enumToInt(response_code))}})}; if (!body_text.empty()) { response_headers->setContentLength(body_text.size()); - response_headers->setReferenceContentType(Headers::get().ContentTypeValues.Text); + response_headers->setReferenceContentType(content_type); } - if (is_head_request) { - encode_headers(std::move(response_headers), true); + if (local_reply_data.is_head_request_) { + encode_functions.encode_headers_(std::move(response_headers), true); return; } - encode_headers(std::move(response_headers), body_text.empty()); + encode_functions.encode_headers_(std::move(response_headers), body_text.empty()); // encode_headers()) may have changed the referenced is_reset so we need to test it if (!body_text.empty() && !is_reset) { Buffer::OwnedImpl buffer(body_text); - encode_data(buffer, true); + encode_functions.encode_data_(buffer, true); } } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index cc6a2a3e4b7c..476eda1c6c7d 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -255,43 +255,52 @@ bool isWebSocketUpgradeRequest(const RequestHeaderMap& headers); */ Http1Settings parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config); +struct EncodeFunctions { + // Function to rewrite locally generated response. + std::function + rewrite_; + // Function to encode response headers. + std::function encode_headers_; + // Function to encode the response body. + std::function encode_data_; +}; + +struct LocalReplyData { + // Tells if this is a response to a gRPC request. + bool is_grpc_; + // Supplies the HTTP response code. + Code response_code_; + // Supplies the optional body text which is returned. + absl::string_view body_text_; + // gRPC status code to override the httpToGrpcStatus mapping with. + const absl::optional grpc_status_; + // Tells if this is a response to a HEAD request. + bool is_head_request_ = false; +}; + /** * Create a locally generated response using filter callbacks. - * @param is_grpc tells if this is a response to a gRPC request. - * @param callbacks supplies the filter callbacks to use. * @param is_reset boolean reference that indicates whether a stream has been reset. It is the - * responsibility of the caller to ensure that this is set to false if onDestroy() - * is invoked in the context of sendLocalReply(). - * @param response_code supplies the HTTP response code. - * @param body_text supplies the optional body text which is sent using the text/plain content - * type. - * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. - * @param is_head_request tells if this is a response to a HEAD request + * responsibility of the caller to ensure that this is set to false if onDestroy() + * is invoked in the context of sendLocalReply(). + * @param callbacks supplies the filter callbacks to use. + * @param local_reply_data struct which keeps data related to generate reply. */ -void sendLocalReply(bool is_grpc, StreamDecoderFilterCallbacks& callbacks, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, - bool is_head_request); +void sendLocalReply(const bool& is_reset, StreamDecoderFilterCallbacks& callbacks, + const LocalReplyData& local_reply_data); /** * Create a locally generated response using the provided lambdas. - * @param is_grpc tells if this is a response to a gRPC request. - * @param encode_headers supplies the function to encode response headers. - * @param encode_data supplies the function to encode the response body. + * @param is_reset boolean reference that indicates whether a stream has been reset. It is the * responsibility of the caller to ensure that this is set to false if onDestroy() * is invoked in the context of sendLocalReply(). - * @param response_code supplies the HTTP response code. - * @param body_text supplies the optional body text which is sent using the text/plain content - * type. - * @param grpc_status the gRPC status code to override the httpToGrpcStatus mapping with. - */ -void sendLocalReply( - bool is_grpc, - std::function encode_headers, - std::function encode_data, const bool& is_reset, - Code response_code, absl::string_view body_text, - const absl::optional grpc_status, bool is_head_request = false); + * @param encode_functions supplies the functions to encode response body and headers. + * @param local_reply_data struct which keeps data related to generate reply. + */ +void sendLocalReply(const bool& is_reset, const EncodeFunctions& encode_functions, + const LocalReplyData& local_reply_data); struct GetLastAddressFromXffInfo { // Last valid address pulled from the XFF header. diff --git a/source/common/local_reply/BUILD b/source/common/local_reply/BUILD new file mode 100644 index 000000000000..750a3e59fa5a --- /dev/null +++ b/source/common/local_reply/BUILD @@ -0,0 +1,29 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "local_reply_lib", + srcs = ["local_reply.cc"], + hdrs = ["local_reply.h"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/server:filter_config_interface", + "//include/envoy/stream_info:stream_info_interface", + "//source/common/access_log:access_log_formatter_lib", + "//source/common/access_log:access_log_lib", + "//source/common/common:enum_to_int", + "//source/common/common:substitution_format_string_lib", + "//source/common/config:datasource_lib", + "//source/common/http:header_map_lib", + "//source/common/stream_info:stream_info_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc new file mode 100644 index 000000000000..31b7ce468992 --- /dev/null +++ b/source/common/local_reply/local_reply.cc @@ -0,0 +1,175 @@ +#include "common/local_reply/local_reply.h" + +#include +#include + +#include "common/access_log/access_log_formatter.h" +#include "common/access_log/access_log_impl.h" +#include "common/common/enum_to_int.h" +#include "common/common/substitution_format_string.h" +#include "common/config/datasource.h" +#include "common/http/header_map_impl.h" + +namespace Envoy { +namespace LocalReply { +namespace { + +struct EmptyHeaders { + Http::RequestHeaderMapImpl request_headers; + Http::ResponseTrailerMapImpl response_trailers; +}; + +using StaticEmptyHeaders = ConstSingleton; + +} // namespace + +class BodyFormatter { +public: + BodyFormatter() + : formatter_(std::make_unique("%LOCAL_REPLY_BODY%")), + content_type_(Http::Headers::get().ContentTypeValues.Text) {} + + BodyFormatter(const envoy::config::core::v3::SubstitutionFormatString& config) + : formatter_(SubstitutionFormatStringUtils::fromProtoConfig(config)), + content_type_( + config.format_case() == + envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat + ? Http::Headers::get().ContentTypeValues.Json + : Http::Headers::get().ContentTypeValues.Text) {} + + void format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, std::string& body, + absl::string_view& content_type) const { + body = + formatter_->format(request_headers, response_headers, response_trailers, stream_info, body); + content_type = content_type_; + } + +private: + const AccessLog::FormatterPtr formatter_; + const absl::string_view content_type_; +}; + +using BodyFormatterPtr = std::unique_ptr; + +class ResponseMapper { +public: + ResponseMapper( + const envoy::extensions::filters::network::http_connection_manager::v3::ResponseMapper& + config, + Server::Configuration::FactoryContext& context) + : filter_(AccessLog::FilterFactory::fromProto(config.filter(), context.runtime(), + context.random(), + context.messageValidationVisitor())) { + if (config.has_status_code()) { + status_code_ = static_cast(config.status_code().value()); + } + if (config.has_body()) { + body_ = Config::DataSource::read(config.body(), true, context.api()); + } + + if (config.has_body_format_override()) { + body_formatter_ = std::make_unique(config.body_format_override()); + } + } + + bool matchAndRewrite(const Http::RequestHeaderMap& request_headers, + Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body, + BodyFormatter*& final_formatter) const { + // If not matched, just bail out. + if (!filter_->evaluate(stream_info, request_headers, response_headers, response_trailers)) { + return false; + } + + if (body_.has_value()) { + body = body_.value(); + } + + if (status_code_.has_value() && code != status_code_.value()) { + code = status_code_.value(); + response_headers.setStatus(std::to_string(enumToInt(code))); + stream_info.response_code_ = static_cast(code); + } + + if (body_formatter_) { + final_formatter = body_formatter_.get(); + } + return true; + } + +private: + const AccessLog::FilterPtr filter_; + absl::optional status_code_; + absl::optional body_; + BodyFormatterPtr body_formatter_; +}; + +using ResponseMapperPtr = std::unique_ptr; + +class LocalReplyImpl : public LocalReply { +public: + LocalReplyImpl() : body_formatter_(std::make_unique()) {} + + LocalReplyImpl( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context) + : body_formatter_(config.has_body_format() + ? std::make_unique(config.body_format()) + : std::make_unique()) { + for (const auto& mapper : config.mappers()) { + mappers_.emplace_back(std::make_unique(mapper, context)); + } + } + + void rewrite(const Http::RequestHeaderMap* request_headers, + Http::ResponseHeaderMap& response_headers, StreamInfo::StreamInfoImpl& stream_info, + Http::Code& code, std::string& body, + absl::string_view& content_type) const override { + // Set response code to stream_info and response_headers due to: + // 1) StatusCode filter is using response_code from stream_info, + // 2) %RESP(:status)% is from Status() in response_headers. + response_headers.setStatus(std::to_string(enumToInt(code))); + stream_info.response_code_ = static_cast(code); + + if (request_headers == nullptr) { + request_headers = &StaticEmptyHeaders::get().request_headers; + } + + BodyFormatter* final_formatter{}; + for (const auto& mapper : mappers_) { + if (mapper->matchAndRewrite(*request_headers, response_headers, + StaticEmptyHeaders::get().response_trailers, stream_info, code, + body, final_formatter)) { + break; + } + } + + if (!final_formatter) { + final_formatter = body_formatter_.get(); + } + return final_formatter->format(*request_headers, response_headers, + StaticEmptyHeaders::get().response_trailers, stream_info, body, + content_type); + } + +private: + std::list mappers_; + const BodyFormatterPtr body_formatter_; +}; + +LocalReplyPtr Factory::createDefault() { return std::make_unique(); } + +LocalReplyPtr Factory::create( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context) { + return std::make_unique(config, context); +} + +} // namespace LocalReply +} // namespace Envoy diff --git a/source/common/local_reply/local_reply.h b/source/common/local_reply/local_reply.h new file mode 100644 index 000000000000..cafcaf33d307 --- /dev/null +++ b/source/common/local_reply/local_reply.h @@ -0,0 +1,54 @@ +#pragma once + +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/server/filter_config.h" + +#include "common/stream_info/stream_info_impl.h" + +namespace Envoy { +namespace LocalReply { + +class LocalReply { +public: + virtual ~LocalReply() = default; + + /** + * rewrite the response status code, body and content_type. + * @param request_headers supplies the information about request headers required by filters. + * @param stream_info supplies the information about streams required by filters. + * @param code status code. + * @param body response body. + * @param content_type response content_type. + */ + virtual void rewrite(const Http::RequestHeaderMap* request_headers, + Http::ResponseHeaderMap& response_headers, + StreamInfo::StreamInfoImpl& stream_info, Http::Code& code, std::string& body, + absl::string_view& content_type) const PURE; +}; + +using LocalReplyPtr = std::unique_ptr; + +/** + * Access log filter factory that reads from proto. + */ +class Factory { +public: + /** + * Create a LocalReply object from ProtoConfig + */ + static LocalReplyPtr + create(const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config, + Server::Configuration::FactoryContext& context); + + /** + * Create a default LocalReply object with empty config. + * It is used at places without Server::Configuration::FactoryContext. + */ + static LocalReplyPtr createDefault(); +}; + +} // namespace LocalReply +} // namespace Envoy diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index cb74a7aaee90..e846240aa1f3 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -329,7 +329,8 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam std::string formatted; for (const auto& formatter : formatters) { absl::StrAppend(&formatted, formatter->format(empty_request_headers, empty_response_headers, - empty_response_trailers, stream_info)); + empty_response_trailers, stream_info, + absl::string_view())); } return formatted; }; diff --git a/source/extensions/access_loggers/file/file_access_log_impl.cc b/source/extensions/access_loggers/file/file_access_log_impl.cc index 9ccbf36a7ee0..323160da2a17 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.cc +++ b/source/extensions/access_loggers/file/file_access_log_impl.cc @@ -16,8 +16,8 @@ void FileAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, const Http::ResponseHeaderMap& response_headers, const Http::ResponseTrailerMap& response_trailers, const StreamInfo::StreamInfo& stream_info) { - log_file_->write( - formatter_->format(request_headers, response_headers, response_trailers, stream_info)); + log_file_->write(formatter_->format(request_headers, response_headers, response_trailers, + stream_info, absl::string_view())); } } // namespace File diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index b7ebe990a80d..21a10ddadef3 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -38,6 +38,7 @@ envoy_cc_extension( "//source/common/http/http1:codec_lib", "//source/common/http/http2:codec_lib", "//source/common/json:json_loader_lib", + "//source/common/local_reply:local_reply_lib", "//source/common/protobuf:utility_lib", "//source/common/router:rds_lib", "//source/common/router:scoped_rds_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 85d78cb75623..1c0958ad6f1d 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -25,6 +25,7 @@ #include "common/http/http3/well_known_names.h" #include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" +#include "common/local_reply/local_reply.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/router/scoped_rds.h" @@ -218,7 +219,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( merge_slashes_(config.merge_slashes()), strip_matching_port_(config.strip_matching_host_port()), headers_with_underscores_action_( - config.common_http_protocol_options().headers_with_underscores_action()) { + config.common_http_protocol_options().headers_with_underscores_action()), + local_reply_(LocalReply::Factory::create(config.local_reply_config(), context)) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 2aa7e2952ab3..ca76b80a593a 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -21,6 +21,7 @@ #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" #include "common/json/json_loader.h" +#include "common/local_reply/local_reply.h" #include "common/router/rds_impl.h" #include "common/router/scoped_rds.h" #include "common/tracing/http_tracer_impl.h" @@ -165,6 +166,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, return headers_with_underscores_action_; } std::chrono::milliseconds delayedCloseTimeout() const override { return delayed_close_timeout_; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } private: enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO }; @@ -232,6 +234,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, const bool strip_matching_port_; const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; + const LocalReply::LocalReplyPtr local_reply_; // Default idle timeout is 5 minutes if nothing is specified in the HCM config. static const uint64_t StreamIdleTimeoutMs = 5 * 60 * 1000; diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 0484488b4977..4c4fe24018c9 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -630,7 +630,8 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) MAKE_ADMIN_HANDLER(logs_handler_.handlerReopenLogs), false, true}, }, date_provider_(server.dispatcher().timeSource()), - admin_filter_chain_(std::make_shared()) {} + admin_filter_chain_(std::make_shared()), + local_reply_(LocalReply::Factory::createDefault()) {} Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection, const Buffer::Instance& data, diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index f59db77c2292..74449114bae9 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -175,6 +175,7 @@ class AdminImpl : public Admin, headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void closeSocket(); @@ -409,6 +410,7 @@ class AdminImpl : public Admin, Network::ListenSocketFactorySharedPtr socket_factory_; AdminListenerPtr listener_; const AdminInternalAddressConfig internal_address_config_; + const LocalReply::LocalReplyPtr local_reply_; }; } // namespace Server diff --git a/test/common/access_log/access_log_formatter_fuzz_test.cc b/test/common/access_log/access_log_formatter_fuzz_test.cc index bf1e2eeb4984..7d96a275c094 100644 --- a/test/common/access_log/access_log_formatter_fuzz_test.cc +++ b/test/common/access_log/access_log_formatter_fuzz_test.cc @@ -21,7 +21,8 @@ DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { Fuzz::fromHeaders(input.response_trailers()); const auto& stream_info = Fuzz::fromStreamInfo(input.stream_info()); for (const auto& it : formatters) { - it->format(request_headers, response_headers, response_trailers, stream_info); + it->format(request_headers, response_headers, response_trailers, stream_info, + absl::string_view()); } ENVOY_LOG_MISC(trace, "Success"); } catch (const EnvoyException& e) { diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/access_log/access_log_formatter_speed_test.cc index 4cb64f3d6052..1fd9f360a750 100644 --- a/test/common/access_log/access_log_formatter_speed_test.cc +++ b/test/common/access_log/access_log_formatter_speed_test.cc @@ -51,9 +51,10 @@ static void BM_AccessLogFormatter(benchmark::State& state) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { output_bytes += - formatter->format(request_headers, response_headers, response_trailers, *stream_info) + formatter->format(request_headers, response_headers, response_trailers, *stream_info, body) .length(); } benchmark::DoNotOptimize(output_bytes); @@ -69,9 +70,11 @@ static void BM_JsonAccessLogFormatter(benchmark::State& state) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { output_bytes += - json_formatter->format(request_headers, response_headers, response_trailers, *stream_info) + json_formatter + ->format(request_headers, response_headers, response_trailers, *stream_info, body) .length(); } benchmark::DoNotOptimize(output_bytes); @@ -88,10 +91,12 @@ static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; for (auto _ : state) { - output_bytes += typed_json_formatter - ->format(request_headers, response_headers, response_trailers, *stream_info) - .length(); + output_bytes += + typed_json_formatter + ->format(request_headers, response_headers, response_trailers, *stream_info, body) + .length(); } benchmark::DoNotOptimize(output_bytes); } diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/access_log/access_log_formatter_test.cc index 12b2a77399e0..c182f49406ac 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/access_log/access_log_formatter_test.cc @@ -105,12 +105,13 @@ TEST(AccessLogFormatterTest, plainStringFormatter) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; - EXPECT_EQ("plain", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("plain"))); + EXPECT_EQ("plain", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("plain"))); } TEST(AccessLogFormatterTest, streamInfoFormatter) { @@ -120,15 +121,16 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { StreamInfoFormatter request_duration_format("REQUEST_DURATION"); absl::optional dur = std::chrono::nanoseconds(5000000); EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("5", request_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(5.0))); } @@ -137,9 +139,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur; EXPECT_CALL(stream_info, lastDownstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("-", request_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(request_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -148,9 +150,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur = std::chrono::nanoseconds(10000000); EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("10", response_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(10.0))); } @@ -159,9 +161,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur; EXPECT_CALL(stream_info, firstUpstreamRxByteReceived()).WillRepeatedly(Return(dur)); EXPECT_EQ("-", response_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -174,9 +176,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream)); EXPECT_EQ("15", ttlb_duration_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(15.0))); } @@ -189,9 +191,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, lastDownstreamTxByteSent()).WillRepeatedly(Return(dur_downstream)); EXPECT_EQ("-", ttlb_duration_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(ttlb_duration_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -199,9 +201,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter bytes_received_format("BYTES_RECEIVED"); EXPECT_CALL(stream_info, bytesReceived()).WillRepeatedly(Return(1)); EXPECT_EQ("1", bytes_received_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(bytes_received_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(1.0))); } @@ -210,9 +212,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); EXPECT_EQ("HTTP/1.1", protocol_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(protocol_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("HTTP/1.1"))); } @@ -221,9 +223,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional response_code{200}; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code)); EXPECT_EQ("200", response_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(200.0))); } @@ -232,9 +234,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(Return(response_code)); EXPECT_EQ("0", response_code_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::numberValue(0.0))); } @@ -243,9 +245,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional rc_details; EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); EXPECT_EQ("-", response_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(response_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -254,9 +256,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional rc_details{"via_upstream"}; EXPECT_CALL(stream_info, responseCodeDetails()).WillRepeatedly(ReturnRef(rc_details)); EXPECT_EQ("via_upstream", response_code_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_code_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::stringValue("via_upstream"))); } @@ -264,9 +266,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter bytes_sent_format("BYTES_SENT"); EXPECT_CALL(stream_info, bytesSent()).WillRepeatedly(Return(1)); EXPECT_EQ("1", bytes_sent_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(bytes_sent_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(1.0))); } @@ -275,9 +277,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { absl::optional dur = std::chrono::nanoseconds(15000000); EXPECT_CALL(stream_info, requestComplete()).WillRepeatedly(Return(dur)); EXPECT_EQ("15", duration_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(duration_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::numberValue(15.0))); } @@ -286,18 +288,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { ON_CALL(stream_info, hasResponseFlag(StreamInfo::ResponseFlag::LocalReset)) .WillByDefault(Return(true)); EXPECT_EQ("LR", response_flags_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(response_flags_format.formatValue(request_headers, response_headers, - response_trailers, stream_info), + response_trailers, stream_info, body), ProtoEq(ValueUtil::stringValue("LR"))); } { StreamInfoFormatter upstream_format("UPSTREAM_HOST"); EXPECT_EQ("10.0.0.1:443", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("10.0.0.1:443"))); } @@ -307,9 +309,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info.host_->cluster_, name()) .WillRepeatedly(ReturnRef(upstream_cluster_name)); EXPECT_EQ("cluster_name", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("cluster_name"))); } @@ -317,9 +319,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("UPSTREAM_HOST"); EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } @@ -333,9 +335,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("HOSTNAME"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("-"))); } @@ -350,9 +352,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("HOSTNAME"); EXPECT_EQ("myhostname", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("myhostname"))); } @@ -360,27 +362,27 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("UPSTREAM_CLUSTER"); EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS"); EXPECT_EQ("127.0.0.2:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.2:0"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.2"))); } @@ -392,9 +394,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("8443", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("8443"))); // Validate for IPv6 address @@ -402,54 +404,54 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("9443", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("9443"))); // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; EXPECT_CALL(stream_info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(address)); EXPECT_EQ("", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(""))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.1", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_REMOTE_ADDRESS"); EXPECT_EQ("127.0.0.1:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1:0"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT"); EXPECT_EQ("127.0.0.1", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1"))); } { StreamInfoFormatter upstream_format("DOWNSTREAM_DIRECT_REMOTE_ADDRESS"); EXPECT_EQ("127.0.0.1:0", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("127.0.0.1:0"))); } @@ -459,9 +461,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, requestedServerName()) .WillRepeatedly(ReturnRef(requested_server_name)); EXPECT_EQ("stub_server", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("stub_server"))); } @@ -471,9 +473,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, requestedServerName()) .WillRepeatedly(ReturnRef(requested_server_name)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -483,9 +485,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("san"))); } @@ -496,7 +498,7 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); @@ -505,18 +507,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(std::vector())); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -526,9 +528,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("san"))); } { @@ -538,7 +540,7 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); @@ -547,18 +549,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(std::vector())); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -569,9 +571,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(subject_local)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("subject"))); } { @@ -581,18 +583,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -602,9 +604,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("subject"))); } { @@ -613,18 +615,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -634,9 +636,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(session_id)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("deadbeef", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("deadbeef"))); } { @@ -645,18 +647,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -665,9 +667,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, ciphersuiteString()) .WillRepeatedly(Return("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384")); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); @@ -675,18 +677,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, ciphersuiteString()).WillRepeatedly(Return("")); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -696,9 +698,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(tlsVersion)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("TLSv1.2", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("TLSv1.2"))); } { @@ -707,18 +709,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -729,9 +731,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_sha)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(expected_sha))); } { @@ -742,18 +744,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_sha)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -764,9 +766,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(serial_number)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("b8b5ecc898f2124a", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("b8b5ecc898f2124a"))); } { @@ -776,18 +778,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -797,9 +799,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(issuer_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); @@ -807,18 +809,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -828,9 +830,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { "CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ( - "CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", - upstream_format.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); @@ -838,18 +840,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -860,9 +862,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_cert)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ(expected_cert, upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(expected_cert))); } { @@ -873,18 +875,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(ReturnRef(expected_cert)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -895,8 +897,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { SystemTime startTime = absl::ToChronoTime(abslStartTime); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(startTime)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ("2018-12-18T01:50:34.000Z", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + EXPECT_EQ("2018-12-18T01:50:34.000Z", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START"); @@ -904,18 +907,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(absl::nullopt)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_START"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -926,8 +929,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { SystemTime endTime = absl::ToChronoTime(abslEndTime); EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(endTime)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); - EXPECT_EQ("2020-12-17T01:50:34.000Z", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + EXPECT_EQ("2020-12-17T01:50:34.000Z", + upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END"); @@ -936,18 +940,18 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { .WillRepeatedly(Return(absl::nullopt)); EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT_V_END"); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } { @@ -956,9 +960,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_EQ("SSL error", upstream_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("SSL error"))); } @@ -968,9 +972,9 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { EXPECT_CALL(stream_info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::nullValue())); } } @@ -980,49 +984,50 @@ TEST(AccessLogFormatterTest, requestHeaderFormatter) { Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { RequestHeaderFormatter formatter(":Method", "", absl::optional()); - EXPECT_EQ("GET", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GET"))); } { RequestHeaderFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("/", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("/", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("/"))); } { RequestHeaderFormatter formatter(":TEST", ":METHOD", absl::optional()); - EXPECT_EQ("GET", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GET"))); } { RequestHeaderFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { RequestHeaderFormatter formatter(":Method", "", absl::optional(2)); - EXPECT_EQ("GE", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GE", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("GE"))); } } @@ -1032,49 +1037,50 @@ TEST(AccessLogFormatterTest, responseHeaderFormatter) { Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { ResponseHeaderFormatter formatter(":method", "", absl::optional()); - EXPECT_EQ("PUT", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PUT", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PUT"))); } { ResponseHeaderFormatter formatter("test", ":method", absl::optional()); - EXPECT_EQ("test", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("test", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("test"))); } { ResponseHeaderFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("PUT", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PUT", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PUT"))); } { ResponseHeaderFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { ResponseHeaderFormatter formatter(":method", "", absl::optional(2)); - EXPECT_EQ("PU", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PU", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PU"))); } } @@ -1084,49 +1090,50 @@ TEST(AccessLogFormatterTest, responseTrailerFormatter) { Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{":method", "POST"}, {"test-2", "test-2"}}; + std::string body; { ResponseTrailerFormatter formatter(":method", "", absl::optional()); - EXPECT_EQ("POST", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("POST", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("POST"))); } { ResponseTrailerFormatter formatter("test-2", ":method", absl::optional()); - EXPECT_EQ("test-2", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("test-2", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("test-2"))); } { ResponseTrailerFormatter formatter(":path", ":method", absl::optional()); - EXPECT_EQ("POST", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("POST", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("POST"))); } { ResponseTrailerFormatter formatter("does_not_exist", "", absl::optional()); - EXPECT_EQ("-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::nullValue())); } { ResponseTrailerFormatter formatter(":method", "", absl::optional(2)); - EXPECT_EQ("PO", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("PO", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("PO"))); } } @@ -1156,87 +1163,89 @@ TEST(AccessLogFormatterTest, DynamicMetadataFormatter) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { DynamicMetadataFormatter formatter("com.test", {}, absl::optional()); std::string val = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); EXPECT_TRUE(val.find("\"test_key\":\"test_value\"") != std::string::npos); EXPECT_TRUE(val.find("\"test_obj\":{\"inner_key\":\"inner_value\"}") != std::string::npos); ProtobufWkt::Value expected_val; expected_val.mutable_struct_value()->CopyFrom(metadata.filter_metadata().at("com.test")); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected_val)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected_val)); } { DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional()); - EXPECT_EQ("\"test_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_EQ("\"test_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } { DynamicMetadataFormatter formatter("com.test", {"test_obj"}, absl::optional()); - EXPECT_EQ("{\"inner_key\":\"inner_value\"}", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ( + "{\"inner_key\":\"inner_value\"}", + formatter.format(request_headers, response_headers, response_trailers, stream_info, body)); ProtobufWkt::Value expected_val; (*expected_val.mutable_struct_value()->mutable_fields())["inner_key"] = ValueUtil::stringValue("inner_value"); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected_val)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected_val)); } { DynamicMetadataFormatter formatter("com.test", {"test_obj", "inner_key"}, absl::optional()); - EXPECT_EQ("\"inner_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("inner_value"))); + EXPECT_EQ("\"inner_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("inner_value"))); } // not found cases { DynamicMetadataFormatter formatter("com.notfound", {}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } { DynamicMetadataFormatter formatter("com.test", {"notfound"}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } { DynamicMetadataFormatter formatter("com.test", {"test_obj", "notfound"}, absl::optional()); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // size limit { DynamicMetadataFormatter formatter("com.test", {"test_key"}, absl::optional(5)); - EXPECT_EQ("\"test", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("\"test", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); // N.B. Does not truncate. - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } } @@ -1245,6 +1254,8 @@ TEST(AccessLogFormatterTest, FilterStateFormatter) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; + stream_info.filter_state_->setData("key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1266,98 +1277,99 @@ TEST(AccessLogFormatterTest, FilterStateFormatter) { { FilterStateFormatter formatter("key", absl::optional(), false); - EXPECT_EQ("\"test_value\"", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_EQ("\"test_value\"", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } { FilterStateFormatter formatter("key-struct", absl::optional(), false); - EXPECT_EQ("{\"inner_key\":\"inner_value\"}", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ( + "{\"inner_key\":\"inner_value\"}", + formatter.format(request_headers, response_headers, response_trailers, stream_info, body)); ProtobufWkt::Value expected; (*expected.mutable_struct_value()->mutable_fields())["inner_key"] = ValueUtil::stringValue("inner_value"); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(expected)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(expected)); } // not found case { FilterStateFormatter formatter("key-not-found", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // no serialization case { FilterStateFormatter formatter("key-no-serialization", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // serialization error case { FilterStateFormatter formatter("key-serialization-error", absl::optional(), false); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } // size limit { FilterStateFormatter formatter("key", absl::optional(5), false); - EXPECT_EQ("\"test", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("\"test", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); // N.B. Does not truncate. - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::stringValue("test_value"))); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue("test_value"))); } // serializeAsString case { FilterStateFormatter formatter("test_key", absl::optional(), true); - EXPECT_EQ("test_value By PLAIN", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("test_value By PLAIN", formatter.format(request_headers, response_headers, + response_trailers, stream_info, body)); } // size limit for serializeAsString { FilterStateFormatter formatter("test_key", absl::optional(10), true); - EXPECT_EQ("test_value", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); + EXPECT_EQ("test_value", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); } // no serialization case for serializeAsString { FilterStateFormatter formatter("key-no-serialization", absl::optional(), true); - EXPECT_EQ("-", - formatter.format(request_headers, response_headers, response_trailers, stream_info)); - EXPECT_THAT( - formatter.formatValue(request_headers, response_headers, response_trailers, stream_info), - ProtoEq(ValueUtil::nullValue())); + EXPECT_EQ("-", formatter.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(formatter.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); } } @@ -1366,6 +1378,7 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; + std::string body; { StartTimeFormatter start_time_format("%Y/%m/%d"); @@ -1373,9 +1386,9 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); EXPECT_EQ("2018/03/28", start_time_format.format(request_headers, response_headers, - response_trailers, stream_info)); + response_trailers, stream_info, body)); EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue("2018/03/28"))); } @@ -1385,9 +1398,9 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time), start_time_format.format(request_headers, response_headers, response_trailers, - stream_info)); + stream_info, body)); EXPECT_THAT(start_time_format.formatValue(request_headers, response_headers, response_trailers, - stream_info), + stream_info, body), ProtoEq(ValueUtil::stringValue(AccessLogDateTimeFormatter::fromTime(time)))); } } @@ -1398,6 +1411,7 @@ TEST(AccessLogFormatterTest, GrpcStatusFormatterTest) { Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; std::array grpc_statuses{ "OK", "Canceled", "Unknown", "InvalidArgument", "DeadlineExceeded", @@ -1406,39 +1420,39 @@ TEST(AccessLogFormatterTest, GrpcStatusFormatterTest) { "DataLoss", "Unauthenticated"}; for (size_t i = 0; i < grpc_statuses.size(); ++i) { response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", std::to_string(i)}}; - EXPECT_EQ(grpc_statuses[i], - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ(grpc_statuses[i], formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue(grpc_statuses[i]))); } { response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", "-1"}}; - EXPECT_EQ("-1", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-1", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("-1"))); response_trailer = Http::TestResponseTrailerMapImpl{{"grpc-status", "42738"}}; - EXPECT_EQ("42738", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("42738", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("42738"))); response_trailer.clear(); } { response_header = Http::TestResponseHeaderMapImpl{{"grpc-status", "-1"}}; - EXPECT_EQ("-1", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("-1", formatter.format(request_header, response_header, response_trailer, stream_info, + body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("-1"))); response_header = Http::TestResponseHeaderMapImpl{{"grpc-status", "42738"}}; - EXPECT_EQ("42738", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("42738", formatter.format(request_header, response_header, response_trailer, + stream_info, body)); EXPECT_THAT( - formatter.formatValue(request_header, response_header, response_trailer, stream_info), + formatter.formatValue(request_header, response_header, response_trailer, stream_info, body), ProtoEq(ValueUtil::stringValue("42738"))); response_header.clear(); } @@ -1464,6 +1478,7 @@ TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); @@ -1477,8 +1492,9 @@ TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { {"plain_string", "plain_string_value"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { @@ -1486,6 +1502,7 @@ TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); @@ -1497,8 +1514,9 @@ TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { absl::flat_hash_map key_mapping = {{"protocol", "%PROTOCOL%"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { @@ -1506,6 +1524,7 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { Http::TestRequestHeaderMapImpl request_header{{"some_request_header", "SOME_REQUEST_HEADER"}}; Http::TestResponseHeaderMapImpl response_header{{"some_response_header", "SOME_RESPONSE_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; std::unordered_map expected_json_map = { {"protocol", "HTTP/1.1"}, @@ -1523,8 +1542,9 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { @@ -1534,6 +1554,7 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { Http::TestResponseHeaderMapImpl response_header{ {"response_present_header", "RESPONSE_PRESENT_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; std::unordered_map expected_json_map = { {"request_present_header_or_request_absent_header", "REQUEST_PRESENT_HEADER"}, @@ -1555,8 +1576,9 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { @@ -1564,6 +1586,7 @@ TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); @@ -1582,8 +1605,9 @@ TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { @@ -1591,6 +1615,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); @@ -1605,7 +1630,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { JsonFormatterImpl formatter(key_mapping, true); const std::string json = - formatter.format(request_header, response_header, response_trailer, stream_info); + formatter.format(request_header, response_header, response_trailer, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1621,6 +1646,7 @@ TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData("test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1638,7 +1664,7 @@ TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { JsonFormatterImpl formatter(key_mapping, false); verifyJsonOutput( - formatter.format(request_headers, response_headers, response_trailers, stream_info), + formatter.format(request_headers, response_headers, response_trailers, stream_info, body), expected_json_map); } @@ -1647,6 +1673,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData("test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1661,7 +1688,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { JsonFormatterImpl formatter(key_mapping, true); std::string json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1678,6 +1705,7 @@ TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1695,7 +1723,7 @@ TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { JsonFormatterImpl formatter(key_mapping, false); verifyJsonOutput( - formatter.format(request_headers, response_headers, response_trailers, stream_info), + formatter.format(request_headers, response_headers, response_trailers, stream_info, body), expected_json_map); } @@ -1706,6 +1734,7 @@ TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1718,7 +1747,7 @@ TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { JsonFormatterImpl formatter(key_mapping, true); std::string json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1734,6 +1763,7 @@ TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; stream_info.filter_state_->setData( "test_key", std::make_unique("test_value"), StreamInfo::FilterState::StateType::ReadOnly); @@ -1752,6 +1782,7 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; time_t expected_time_in_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); @@ -1772,8 +1803,9 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { {"all_zeroes", "%START_TIME(%f.%1f.%2f.%3f)%"}}; JsonFormatterImpl formatter(key_mapping, false); - verifyJsonOutput(formatter.format(request_header, response_header, response_trailer, stream_info), - expected_json_map); + verifyJsonOutput( + formatter.format(request_header, response_header, response_trailer, stream_info, body), + expected_json_map); } TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { @@ -1783,6 +1815,7 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { Http::TestResponseHeaderMapImpl response_header{ {"some_response_header", "SOME_RESPONSE_HEADER"}}; Http::TestResponseTrailerMapImpl response_trailer; + std::string body; std::unordered_map expected_json_map = { {"multi_token_field", "HTTP/1.1 plainstring SOME_REQUEST_HEADER SOME_RESPONSE_HEADER"}}; @@ -1798,7 +1831,7 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); const auto parsed = Json::Factory::loadFromString( - formatter.format(request_header, response_header, response_trailer, stream_info)); + formatter.format(request_header, response_header, response_trailer, stream_info, body)); for (const auto& pair : expected_json_map) { EXPECT_EQ(parsed->getString(pair.first), pair.second); } @@ -1811,6 +1844,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; StreamInfo::MockStreamInfo stream_info; + std::string body; EXPECT_CALL(Const(stream_info), lastDownstreamRxByteReceived()) .WillRepeatedly(Return(std::chrono::nanoseconds(5000000))); @@ -1836,7 +1870,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { JsonFormatterImpl formatter(key_mapping, true); const auto json = - formatter.format(request_headers, response_headers, response_trailers, stream_info); + formatter.format(request_headers, response_headers, response_trailers, stream_info, body); ProtobufWkt::Struct output; MessageUtil::loadFromJson(json, output); @@ -1853,6 +1887,7 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; + std::string body; { const std::string format = "{{%PROTOCOL%}} %RESP(not exist)%++%RESP(test)% " @@ -1863,16 +1898,17 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - EXPECT_EQ("{{HTTP/1.1}} -++test GET PUT\t@POST@\ttest-2[]", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "{{HTTP/1.1}} -++test GET PUT\t@POST@\ttest-2[]", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { const std::string format = "{}*JUST PLAIN string]"; FormatterImpl formatter(format); - EXPECT_EQ(format, - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ(format, formatter.format(request_header, response_header, response_trailer, + stream_info, body)); } { @@ -1881,8 +1917,8 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { FormatterImpl formatter(format); - EXPECT_EQ("GET|G|PU|GET|POS", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ("GET|G|PU|GET|POS", formatter.format(request_header, response_header, + response_trailer, stream_info, body)); } { @@ -1894,8 +1930,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { "test_obj)%|%DYNAMIC_METADATA(com.test:test_obj:inner_key)%"; FormatterImpl formatter(format); - EXPECT_EQ("\"test_value\"|{\"inner_key\":\"inner_value\"}|\"inner_value\"", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "\"test_value\"|{\"inner_key\":\"inner_value\"}|\"inner_value\"", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1912,8 +1949,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { "%FILTER_STATE(testing):8%|%FILTER_STATE(nonexisting)%"; FormatterImpl formatter(format); - EXPECT_EQ("\"test_value\"|-|\"test_va|-", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "\"test_value\"|-|\"test_va|-", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1925,9 +1963,10 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); FormatterImpl formatter(format); - EXPECT_EQ(fmt::format("2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000", - expected_time_in_epoch), - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + fmt::format("2018/03/28|{}|bad_format|2018-03-28T23:35:58.000Z|000000000.0.00.000", + expected_time_in_epoch), + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1940,8 +1979,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); FormatterImpl formatter(format); - EXPECT_EQ("1970/01/01|0|bad_format|1970-01-01T00:00:00.000Z|000000000.0.00.000", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "1970/01/01|0|bad_format|1970-01-01T00:00:00.000Z|000000000.0.00.000", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1951,8 +1991,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("1522796769.123|1522796769.1234|1522796769.12345|1522796769.123456", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "1522796769.123|1522796769.1234|1522796769.12345|1522796769.123456", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1961,9 +2002,10 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("segment1:1522796769.123|segment2:1522796769.1234|seg3:1522796769.123456|1522796769-" - "123-asdf-123456000|.1234560:segm5:2018", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "segment1:1522796769.123|segment2:1522796769.1234|seg3:1522796769.123456|1522796769-" + "123-asdf-123456000|.1234560:segm5:2018", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } { @@ -1973,8 +2015,9 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { const SystemTime start_time(std::chrono::microseconds(1522796769123456)); EXPECT_CALL(stream_info, startTime()).WillOnce(Return(start_time)); FormatterImpl formatter(format); - EXPECT_EQ("%%|%%123456000|1522796769%%123|1%%1522796769", - formatter.format(request_header, response_header, response_trailer, stream_info)); + EXPECT_EQ( + "%%|%%123456000|1522796769%%123|1%%1522796769", + formatter.format(request_header, response_header, response_trailer, stream_info, body)); } } diff --git a/test/common/common/substitution_format_string_test.cc b/test/common/common/substitution_format_string_test.cc index 745d2fb760fc..01ba892bdbb4 100644 --- a/test/common/common/substitution_format_string_test.cc +++ b/test/common/common/substitution_format_string_test.cc @@ -24,6 +24,7 @@ class SubstitutionFormatStringUtilsTest : public ::testing::Test { Http::TestResponseHeaderMapImpl response_headers_; Http::TestResponseTrailerMapImpl response_trailers_; StreamInfo::MockStreamInfo stream_info_; + std::string body_; envoy::config::core::v3::SubstitutionFormatString config_; }; @@ -41,9 +42,9 @@ TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigText) { TestUtility::loadFromYaml(yaml, config_); auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); - EXPECT_EQ( - "plain text, path=/bar/foo, code=200", - formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_)); + EXPECT_EQ("plain text, path=/bar/foo, code=200", + formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_, + body_)); } TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) { @@ -56,8 +57,8 @@ TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) { TestUtility::loadFromYaml(yaml, config_); auto formatter = SubstitutionFormatStringUtils::fromProtoConfig(config_); - const auto out_json = - formatter->format(request_headers_, response_headers_, response_trailers_, stream_info_); + const auto out_json = formatter->format(request_headers_, response_headers_, response_trailers_, + stream_info_, body_); const std::string expected = R"EOF({ "text": "plain text", diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 7e503275e15d..377770669ebb 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -67,7 +67,8 @@ class FuzzConfig : public ConnectionManagerConfig { POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, - listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))} { + listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_stats_))}, + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(route_config_provider_, lastUpdated()).WillByDefault(Return(time_system_.systemTime())); ON_CALL(scoped_route_config_provider_, lastUpdated()) .WillByDefault(Return(time_system_.systemTime())); @@ -161,6 +162,7 @@ class FuzzConfig : public ConnectionManagerConfig { headersWithUnderscoresAction() const override { return envoy::config::core::v3::HttpProtocolOptions::ALLOW; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; @@ -203,6 +205,7 @@ class FuzzConfig : public ConnectionManagerConfig { Http::Http1Settings http1_settings_; Http::DefaultInternalAddressConfig internal_address_config_; bool normalize_path_{true}; + LocalReply::LocalReplyPtr local_reply_; }; // Internal representation of stream state. Encapsulates the stream state, mocks diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 86770f07a790..a763da3c0af9 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -96,7 +96,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan "", fake_stats_), tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}, - request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)) { + request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(route_config_provider_, lastUpdated()) .WillByDefault(Return(test_time_.timeSystem().systemTime())); @@ -355,6 +356,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan headersWithUnderscoresAction() const override { return headers_with_underscores_action_; } + const LocalReply::LocalReply& localReply() const override { return *local_reply_; } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; @@ -417,6 +419,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan NiceMock upstream_conn_; // for websocket tests NiceMock conn_pool_; // for websocket tests RequestIDExtensionSharedPtr request_id_extension_; + const LocalReply::LocalReplyPtr local_reply_; // TODO(mattklein123): Not all tests have been converted over to better setup. Convert the rest. MockResponseEncoder response_encoder_; diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 3ea00c438db9..12b01c207c2c 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -120,6 +120,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { const Http::InternalAddressConfig& internalAddressConfig() const override { return *internal_address_config_; } + MOCK_METHOD(bool, unixSocketInternal, ()); MOCK_METHOD(uint32_t, xffNumTrustedHops, (), (const)); MOCK_METHOD(bool, skipXffAppend, (), (const)); @@ -139,6 +140,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(bool, shouldStripMatchingPort, (), (const)); MOCK_METHOD(envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction, headersWithUnderscoresAction, (), (const)); + MOCK_METHOD(const LocalReply::LocalReply&, localReply, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); @@ -152,7 +154,8 @@ const Http::LowerCaseString& traceStatusHeader() { class ConnectionManagerUtilityTest : public testing::Test { public: ConnectionManagerUtilityTest() - : request_id_extension_(std::make_shared>(random_)) { + : request_id_extension_(std::make_shared>(random_)), + local_reply_(LocalReply::Factory::createDefault()) { ON_CALL(config_, userAgent()).WillByDefault(ReturnRef(user_agent_)); envoy::type::v3::FractionalPercent percent1; @@ -163,6 +166,7 @@ class ConnectionManagerUtilityTest : public testing::Test { tracing_config_ = { Tracing::OperationName::Ingress, {}, percent1, percent2, percent1, false, 256}; ON_CALL(config_, tracingConfig()).WillByDefault(Return(&tracing_config_)); + ON_CALL(config_, localReply()).WillByDefault(ReturnRef(*local_reply_)); ON_CALL(config_, via()).WillByDefault(ReturnRef(via_)); ON_CALL(config_, requestIDExtension()).WillByDefault(Return(request_id_extension_)); @@ -200,6 +204,7 @@ class ConnectionManagerUtilityTest : public testing::Test { NiceMock runtime_; Http::TracingConnectionManagerConfig tracing_config_; NiceMock local_info_; + LocalReply::LocalReplyPtr local_reply_; std::string canary_node_{"canary"}; std::string empty_node_; std::string via_; diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 20d7e97305a5..4a3f118e1260 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -460,8 +460,9 @@ TEST(HttpUtility, SendLocalReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, false)); EXPECT_CALL(callbacks, encodeData(_, true)); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalGrpcReply) { @@ -477,8 +478,9 @@ TEST(HttpUtility, SendLocalGrpcReply) { EXPECT_NE(headers.GrpcMessage(), nullptr); EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), "large"); })); - Utility::sendLocalReply(true, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{true, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { @@ -504,8 +506,9 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { const auto& encoded = Utility::PercentEncoding::encode(json); EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), encoded); })); - Utility::sendLocalReply(true, callbacks, is_reset, Http::Code::Unauthorized, json, absl::nullopt, - false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{true, Http::Code::Unauthorized, json, absl::nullopt, false}); } TEST(HttpUtility, RateLimitedGrpcStatus) { @@ -517,8 +520,9 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable))); })); - Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", absl::nullopt, - false); + Utility::sendLocalReply( + false, callbacks, + Utility::LocalReplyData{true, Http::Code::TooManyRequests, "", absl::nullopt, false}); EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { @@ -526,10 +530,12 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted))); })); - Utility::sendLocalReply(true, callbacks, false, Http::Code::TooManyRequests, "", - absl::make_optional( - Grpc::Status::WellKnownGrpcStatus::ResourceExhausted), - false); + Utility::sendLocalReply( + false, callbacks, + Utility::LocalReplyData{true, Http::Code::TooManyRequests, "", + absl::make_optional( + Grpc::Status::WellKnownGrpcStatus::ResourceExhausted), + false}); } TEST(HttpUtility, SendLocalReplyDestroyedEarly) { @@ -540,8 +546,9 @@ TEST(HttpUtility, SendLocalReplyDestroyedEarly) { is_reset = true; })); EXPECT_CALL(callbacks, encodeData(_, true)).Times(0); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, false); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, false}); } TEST(HttpUtility, SendLocalReplyHeadRequest) { @@ -552,8 +559,9 @@ TEST(HttpUtility, SendLocalReplyHeadRequest) { EXPECT_EQ(headers.ContentLength()->value().getStringView(), fmt::format("{}", strlen("large"))); })); - Utility::sendLocalReply(false, callbacks, is_reset, Http::Code::PayloadTooLarge, "large", - absl::nullopt, true); + Utility::sendLocalReply( + is_reset, callbacks, + Utility::LocalReplyData{false, Http::Code::PayloadTooLarge, "large", absl::nullopt, true}); } TEST(HttpUtility, TestExtractHostPathFromUri) { diff --git a/test/common/local_reply/BUILD b/test/common/local_reply/BUILD new file mode 100644 index 000000000000..fcd56c500a53 --- /dev/null +++ b/test/common/local_reply/BUILD @@ -0,0 +1,22 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +envoy_package() + +envoy_cc_test( + name = "local_reply_test", + srcs = ["local_reply_test.cc"], + deps = [ + "//source/common/local_reply:local_reply_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/test/common/local_reply/local_reply_test.cc b/test/common/local_reply/local_reply_test.cc new file mode 100644 index 000000000000..2bf9149d0a94 --- /dev/null +++ b/test/common/local_reply/local_reply_test.cc @@ -0,0 +1,295 @@ +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/http/codes.h" + +#include "common/local_reply/local_reply.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace LocalReply { +namespace { + +const Http::Code TestInitCode = Http::Code::OK; +const std::string TestInitBody = "Init body text"; +const absl::string_view TestInitContentType = "content-type"; +} // namespace + +class LocalReplyTest : public testing::Test { +public: + LocalReplyTest() : stream_info_(time_system_.timeSystem()) { resetData(TestInitCode); } + + void resetData(Http::Code code) { + code_ = code; + body_ = TestInitBody; + content_type_ = TestInitContentType; + } + void resetData(uint32_t code) { resetData(static_cast(code)); } + + Http::Code code_; + std::string body_; + absl::string_view content_type_; + + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Event::SimulatedTimeSystem time_system_; + StreamInfo::StreamInfoImpl stream_info_; + + envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig config_; + NiceMock context_; +}; + +TEST_F(LocalReplyTest, TestEmptyConfig) { + // Empty LocalReply config. + auto local = Factory::create(config_, context_); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestDefaultLocalReply) { + // Default LocalReply should be the same as empty config. + auto local = Factory::createDefault(); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestInvalidConfigEmptyFilter) { + // Invalid config: a mapper should have a valid filter + const std::string yaml = R"( + mappers: + - status_code: 401 +)"; + TestUtility::loadFromYaml(yaml, config_); + + std::string err; + EXPECT_FALSE(Validate(config_, &err)); +} + +TEST_F(LocalReplyTest, TestInvalidConfigStatusCode) { + // Invalid config: status_code should be at range [200, 600) + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 100 +)"; + TestUtility::loadFromYaml(yaml, config_); + + std::string err; + EXPECT_FALSE(Validate(config_, &err)); +} + +TEST_F(LocalReplyTest, TestDefaultTextFormatter) { + // Default text formatter without any mappers + const std::string yaml = R"( + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(body_, "Init body text 200"); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestDefaultJsonFormatter) { + // Default json formatter without any mappers + const std::string yaml = R"( + body_format: + json_format: + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" + body: "%LOCAL_REPLY_BODY%" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), + std::to_string(enumToInt(TestInitCode))); + EXPECT_EQ(content_type_, "application/json"); + + const std::string expected = R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200, + "body": "Init body text" +})"; + EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected)); +} + +TEST_F(LocalReplyTest, TestMapperRewrite) { + // Match with response_code, and rewrite the code and body. + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "400 body text" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 410 + runtime_key: key_b + body: + inline_string: "410 body text" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 420 + runtime_key: key_b + status_code: 421 + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 430 + runtime_key: key_b +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + // code=400 matches the first filter; rewrite code and body + resetData(400); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(401)); + EXPECT_EQ(stream_info_.response_code_, 401U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "401"); + EXPECT_EQ(body_, "400 body text"); + EXPECT_EQ(content_type_, "text/plain"); + + // code=410 matches the second filter; rewrite body only + resetData(410); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(410)); + EXPECT_EQ(stream_info_.response_code_, 410U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "410"); + EXPECT_EQ(body_, "410 body text"); + EXPECT_EQ(content_type_, "text/plain"); + + // code=420 matches the third filter; rewrite code only + resetData(420); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(421)); + EXPECT_EQ(stream_info_.response_code_, 421U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "421"); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); + + // code=430 matches the fourth filter; rewrite nothing + resetData(430); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(430)); + EXPECT_EQ(stream_info_.response_code_, 430U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "430"); + EXPECT_EQ(body_, TestInitBody); + EXPECT_EQ(content_type_, "text/plain"); +} + +TEST_F(LocalReplyTest, TestMapperFormat) { + // Match with response_code, and rewrite the code and body. + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 400 + runtime_key: key_b + status_code: 401 + body: + inline_string: "401 body text" + body_format_override: + json_format: + text: "401 filter formatter" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" + body: "%LOCAL_REPLY_BODY%" + - filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 410 + runtime_key: key_b + status_code: 411 + body: + inline_string: "411 body text" + body_format: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE% default formatter" +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + // code=400 matches the first filter; rewrite code and body + // has its own formatter + resetData(400); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(401)); + EXPECT_EQ(stream_info_.response_code_, 401U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "401"); + EXPECT_EQ(content_type_, "application/json"); + + const std::string expected = R"({ + "text": "401 filter formatter", + "path": "/bar/foo", + "code": 401, + "body": "401 body text" +})"; + EXPECT_TRUE(TestUtility::jsonStringEqual(body_, expected)); + + // code=410 matches the second filter; rewrite code and body + // but using default formatter + resetData(410); + local->rewrite(&request_headers_, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, static_cast(411)); + EXPECT_EQ(stream_info_.response_code_, 411U); + EXPECT_EQ(response_headers_.Status()->value().getStringView(), "411"); + EXPECT_EQ(body_, "411 body text 411 default formatter"); + EXPECT_EQ(content_type_, "text/plain"); +} + +} // namespace LocalReply +} // namespace Envoy diff --git a/test/config/utility.cc b/test/config/utility.cc index f53924e1e3f7..e6d196e2ae08 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -947,6 +947,16 @@ void ConfigHelper::setOutboundFramesLimits(uint32_t max_all_frames, uint32_t max } } +void ConfigHelper::setLocalReply( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config) { + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + hcm_config; + loadHttpConnectionManager(hcm_config); + hcm_config.mutable_local_reply_config()->MergeFrom(config); + storeHttpConnectionManager(hcm_config); +} + CdsHelper::CdsHelper() : cds_path_(TestEnvironment::writeStringToFileForTest("cds.pb_text", "")) {} void CdsHelper::setCds(const std::vector& clusters) { diff --git a/test/config/utility.h b/test/config/utility.h index cb3e1b8cfacd..dc49abaa01e4 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -201,6 +201,10 @@ class ConfigHelper { // CONNECT requests. static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect); + void setLocalReply( + const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& + config); + private: // Load the first HCM struct from the first listener into a parsed proto. bool loadHttpConnectionManager(HttpConnectionManager& hcm); diff --git a/test/integration/BUILD b/test/integration/BUILD index 6d25cdc3b080..f4d8ff9263d3 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1189,3 +1189,15 @@ envoy_cc_test( "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "local_reply_integration_test", + srcs = [ + "local_reply_integration_test.cc", + ], + deps = [ + ":http_integration_lib", + ":http_protocol_integration_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc new file mode 100644 index 000000000000..3568eb979536 --- /dev/null +++ b/test/integration/local_reply_integration_test.cc @@ -0,0 +1,345 @@ +#include "test/integration/http_protocol_integration.h" +#include "test/test_common/utility.h" + +namespace Envoy { + +class LocalReplyIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { HttpProtocolIntegrationTest::initialize(); } + + void setLocalReplyConfig(const std::string& yaml) { + envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig + local_reply_config; + TestUtility::loadFromYaml(yaml, local_reply_config); + config_helper_.setLocalReply(local_reply_config); + } +}; + +INSTANTIATE_TEST_SUITE_P(Protocols, LocalReplyIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 550 +body_format: + json_format: + level: TRACE + user_agent: "%REQ(USER-AGENT)%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = R"({ + "level": "TRACE", + "user_agent": null, + "response_body": "upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("150", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("550", response->headers().Status()->value().getStringView()); + // Check if returned json is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); +} + +// Matched second filter has code and body rewrite and its format +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 551 + body: + inline_string: "customized body text" + body_format_override: + text_format: "%LOCAL_REPLY_BODY% %RESPONSE_CODE%" + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value + status_code: 552 +body_format: + json_format: + level: TRACE + response_flags: "%RESPONSE_FLAGS%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = "customized body text 551"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("24", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("551", response->headers().Status()->value().getStringView()); + // Check if returned json is same as expected + EXPECT_EQ(response->body(), expected_body); +} + +// Not matching any filters. +TEST_P(LocalReplyIntegrationTest, ShouldNotMatchAnyFilter) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-2 + status_code: 551 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-3 + status_code: 552 +body_format: + json_format: + level: TRACE + response_flags: "%RESPONSE_FLAGS%" + response_body: "%LOCAL_REPLY_BODY%" + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_body = R"({ + "level": "TRACE", + "response_flags": "UC", + "response_body": "upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("154", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + // Check if returned json is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); +} + +// Use default formatter. +TEST_P(LocalReplyIntegrationTest, ShouldMapResponseCodeAndMapToDefaultTextResponse) { + const std::string yaml = R"EOF( +mappers: + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-1 + status_code: 550 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-2 + status_code: 551 + - filter: + header_filter: + header: + name: test-header + exact_match: exact-match-value-3 + status_code: 552 + )EOF"; + setLocalReplyConfig(yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value-2"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("95", response->headers().ContentLength()->value().getStringView()); + + EXPECT_EQ("551", response->headers().Status()->value().getStringView()); + + EXPECT_EQ(response->body(), "upstream connect error or disconnect/reset before headers. reset " + "reason: connection termination"); +} + +// Should return formatted text/plain response. +TEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToCustomString) { + const std::string yaml = R"EOF( +mappers: +- filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 503 + runtime_key: key_b + status_code: 513 + body: + inline_string: "customized body text" +body_format: + text_format: "%RESPONSE_CODE% - %LOCAL_REPLY_BODY%" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value-2"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("26", response->headers().ContentLength()->value().getStringView()); + + EXPECT_EQ("513", response->headers().Status()->value().getStringView()); + + EXPECT_EQ(response->body(), "513 - customized body text"); +} + +} // namespace Envoy diff --git a/test/mocks/http/mocks.cc b/test/mocks/http/mocks.cc index 6b041b740ec3..6d95a4150cdb 100644 --- a/test/mocks/http/mocks.cc +++ b/test/mocks/http/mocks.cc @@ -80,15 +80,19 @@ void MockStreamDecoderFilterCallbacks::sendLocalReply_( const absl::optional grpc_status, absl::string_view details) { details_ = std::string(details); Utility::sendLocalReply( - is_grpc_request_, - [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*headers); - } - encodeHeaders(std::move(headers), end_stream); - }, - [this](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); }, - stream_destroyed_, code, body, grpc_status, is_head_request_); + stream_destroyed_, + Utility::EncodeFunctions{ + nullptr, + [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*headers); + } + encodeHeaders(std::move(headers), end_stream); + }, + [this](Buffer::Instance& data, bool end_stream) -> void { + encodeData(data, end_stream); + }}, + Utility::LocalReplyData{is_grpc_request_, code, body, grpc_status, is_head_request_}); } MockStreamEncoderFilterCallbacks::MockStreamEncoderFilterCallbacks() { diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 00bbda734aa3..c59db7754d40 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -946,6 +946,7 @@ retriable retriggers rmdir rocketmq +rewriter rollout roundtrip rpcs From c8c7d1a3116728b1d2eee9f19cd5de7dec736cdf Mon Sep 17 00:00:00 2001 From: Alvin Baptiste <11775386+abaptiste@users.noreply.github.com> Date: Tue, 26 May 2020 09:36:48 -0700 Subject: [PATCH 224/909] dns_filter: Generate responses to queries (#11074) Signed-off-by: Alvin Baptiste --- api/envoy/data/dns/v3/dns_table.proto | 45 +- api/envoy/data/dns/v4alpha/dns_table.proto | 45 +- .../listeners/udp_filters/dns_filter.rst | 71 +- .../arch_overview/listeners/dns_filter.rst | 6 + .../envoy/data/dns/v3/dns_table.proto | 45 +- .../envoy/data/dns/v4alpha/dns_table.proto | 45 +- source/extensions/extensions_build_config.bzl | 2 +- .../extensions/filters/udp/dns_filter/BUILD | 5 +- .../filters/udp/dns_filter/dns_filter.cc | 275 +++++++- .../filters/udp/dns_filter/dns_filter.h | 70 +- .../filters/udp/dns_filter/dns_parser.cc | 419 +++++++++++- .../filters/udp/dns_filter/dns_parser.h | 131 +++- test/extensions/filters/udp/dns_filter/BUILD | 26 + .../016fac1e4a40199b26b08df73179f9249e6a680b | Bin 0 -> 45 bytes .../110be4738f0cc29218ba95bd16a1442b57b3caaf | Bin 0 -> 52 bytes .../497a3f29c3a53a65853a9e0ab3dd315fb92ac025 | Bin 0 -> 47 bytes .../7c09f450b6667337fd111fad0049bf4601c1aece | Bin 0 -> 1024 bytes .../fb9282f0af3341cfc98d56f10fffffd5529d8802 | Bin 0 -> 44 bytes .../udp/dns_filter/dns_filter_fuzz_test.cc | 65 ++ .../dns_filter/dns_filter_integration_test.cc | 171 +++++ .../filters/udp/dns_filter/dns_filter_test.cc | 632 +++++++++++++++++- .../udp/dns_filter/dns_filter_test_utils.cc | 41 +- .../udp/dns_filter/dns_filter_test_utils.h | 3 + 23 files changed, 1850 insertions(+), 247 deletions(-) create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_corpus/110be4738f0cc29218ba95bd16a1442b57b3caaf create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_corpus/497a3f29c3a53a65853a9e0ab3dd315fb92ac025 create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc create mode 100644 test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto index a6457e118672..5615c96e2891 100644 --- a/api/envoy/data/dns/v3/dns_table.proto +++ b/api/envoy/data/dns/v3/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests + // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v3.StringMatcher known_suffixes = 3; } diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto index 83edc20088de..f7050bedc1c1 100644 --- a/api/envoy/data/dns/v4alpha/dns_table.proto +++ b/api/envoy/data/dns/v4alpha/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests + // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; } diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 90a768c908f6..0b6874aafa26 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -13,14 +13,15 @@ DNS Filter Overview -------- -The DNS filter allows Envoy to resolve forward DNS queries as an authoritative server for all +The DNS filter allows Envoy to resolve forward DNS queries as an authoritative server for any configured domains. The filter's configuration specifies the names and addresses for which Envoy will answer as well as the configuration needed to send queries externally for unknown domains. The filter supports local and external DNS resolution. If a lookup for a name does not match a statically configured domain, or a provisioned cluster name, Envoy can refer the query to an external resolver for an answer. Users have the option of specifying the DNS servers that Envoy -will use for external resolution. +will use for external resolution. Users can disable external DNS resolution by omitting the +client configuration object. The filter supports :ref:`per-filter configuration `. @@ -64,25 +65,25 @@ Example Configuration address_list: address: - 10.0.3.1 + - name: "www.domain4.com" + endpoint: + cluster_name: cluster_0 -In this example, Envoy is configured to respond to client queries for three domains. For any +In this example, Envoy is configured to respond to client queries for four domains. For any other query, it will forward upstream to external resolvers. The filter will return an address matching the input query type. If the query is for type A records and no A records are configured, Envoy will return no addresses and set the response code appropriately. Conversely, if there are matching records for the query type, each configured address is returned. This is also true for AAAA records. Only A and AAAA records are supported. If the filter parses other queries for other -record types, the filter immediately responds indicating that the query is not supported. - -To disable external resolution, one can omit the `client_config` section of the config. Envoy interprets -this configuration to mean that name resolution should be done only from the data appearing in the -`server_config` section. A query for a name not appearing in the DNS table will receive a "No Answer" -DNS response. +record types, the filter immediately responds indicating that the query is not supported. The +filter can also redirect a query for a DNS name to the enpoints of a cluster. The last domain +in the configuration demonstrates this. Along with an address list, a cluster name is a valid +endpoint for a DNS name. -The filter can also consume its configuration from an external dns table. The same configuration -that appears in the static configuration can be stored in a Proto3-conformant JSON file and -referenced in the configuration using the :ref:`external_dns_table DataSource ` -directive: +The filter can also consume its domain configuration from an external DNS table. The same entities +appearing in the static configuration can be stored as JSON or YAML in a separate file and referenced +using the :ref:`external_dns_table DataSource ` directive: Example External DnsTable Configuration --------------------------------------- @@ -103,30 +104,32 @@ In the file, the table can be defined as follows: DnsTable JSON Configuration --------------------------- -.. code-block:: text - - known_suffixes: [ - { suffix: "suffix1.com" }, - { suffix: "suffix2.com" } - ], - virtual_domains: [ - { - name: "www.suffix1.com", - endpoint: { - address_list: { - address: [ "10.0.0.1", "10.0.0.2" ] +.. code-block:: json + + { + "known_suffixes": [ + { "suffix": "suffix1.com" }, + { "suffix": "suffix2.com" } + ], + "virtual_domains": [ + { + "name": "www.suffix1.com", + "endpoint": { + "address_list": { + "address": [ "10.0.0.1", "10.0.0.2" ] + } } - } - }, - { - name: "www.suffix2.com", - endpoint: { - address_list: { - address: [ "2001:8a:c1::2800:7" ] + }, + { + "name": "www.suffix2.com", + "endpoint": { + "address_list": { + "address": [ "2001:8a:c1::2800:7" ] + } } } - } - ] + ] + } By utilizing this configuration, the DNS responses can be configured separately from the Envoy diff --git a/docs/root/intro/arch_overview/listeners/dns_filter.rst b/docs/root/intro/arch_overview/listeners/dns_filter.rst index 219b3106cab0..87295a8d0950 100644 --- a/docs/root/intro/arch_overview/listeners/dns_filter.rst +++ b/docs/root/intro/arch_overview/listeners/dns_filter.rst @@ -3,3 +3,9 @@ DNS Filter Envoy supports responding to DNS requests by configuring a :ref:`UDP listener DNS Filter `. + +The DNS filter supports responding to forward queries for A and AAAA records. The answers are +discovered from statically configured resources, clusters, or external DNS servers. The filter +will return DNS responses up to to 512 bytes. If domains are configured with multiple addresses, +or clusters with multiple endpoints, Envoy will return each discovered address up to the +aforementioned size limit. diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto index a6457e118672..5615c96e2891 100644 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests + // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v3.StringMatcher known_suffixes = 3; } diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto index 83edc20088de..f7050bedc1c1 100644 --- a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto @@ -28,22 +28,19 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.AddressList"; - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain + // This field contains a well formed IP address that is returned in the answer for a + // name query. The address field can be an IPv4 or IPv6 address. Address family + // detection is done automatically when Envoy parses the string. Since this field is + // repeated, Envoy will return as many entries from this list in the DNS response while + // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint + // This message type is extensible and can contain a list of addresses, clusters or + // dictate a different method for resolving the addresses for an endpoint message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsEndpoint"; @@ -52,6 +49,8 @@ message DnsTable { option (validate.required) = true; AddressList address_list = 1; + + string cluster_name = 2; } } @@ -59,27 +58,25 @@ message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; - // The domain name for which Envoy will respond to query requests + // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - // The configuration containing the method to determine the address - // of this endpoint + // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; + // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; } - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; + // Control how many times Envoy makes an attempt to forward a query to an external DNS server + uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; + // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this + // list empty, Envoy will forward all queries to external resolvers + repeated DnsVirtualDomain virtual_domains = 2; - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server + // This field serves to help Envoy determine whether it can authoritatively answer a query + // for a name matching a suffix in this list. If the query name does not match a suffix in + // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; } diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 9bd2531dcdb7..8baae0811e28 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -115,7 +115,7 @@ EXTENSIONS = { # UDP filters # - "envoy.filters.udp_listener.dns_filter": "//source/extensions/filters/udp/dns_filter:config", + "envoy.filters.udp_listener.dns_filter": "//source/extensions/filters/udp/dns_filter:config", "envoy.filters.udp_listener.udp_proxy": "//source/extensions/filters/udp/udp_proxy:config", # diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 8886bc279d35..b538d52110a4 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -22,7 +22,6 @@ envoy_cc_library( external_deps = ["ares"], deps = [ "//include/envoy/buffer:buffer_interface", - "//include/envoy/event:file_event_interface", "//include/envoy/network:address_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listener_interface", @@ -30,8 +29,12 @@ envoy_cc_library( "//source/common/common:empty_string", "//source/common/common:matchers_lib", "//source/common/config:config_provider_lib", + "//source/common/config:datasource_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/common/protobuf:message_validator_lib", + "//source/common/runtime:runtime_lib", + "//source/common/upstream:cluster_manager_lib", "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index c55955d986c3..909843977ad8 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -3,55 +3,74 @@ #include "envoy/network/listener.h" #include "envoy/type/matcher/v3/string.pb.h" +#include "common/config/datasource.h" #include "common/network/address_impl.h" +#include "common/protobuf/message_validator_impl.h" namespace Envoy { namespace Extensions { namespace UdpFilters { namespace DnsFilter { +static constexpr std::chrono::milliseconds DEFAULT_RESOLVER_TIMEOUT{500}; +static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; + DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config) - : root_scope_(context.scope()), stats_(generateStats(config.stat_prefix(), root_scope_)) { + : root_scope_(context.scope()), cluster_manager_(context.clusterManager()), api_(context.api()), + stats_(generateStats(config.stat_prefix(), root_scope_)), random_(context.random()) { using envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig; - static constexpr std::chrono::milliseconds DEFAULT_RESOLVER_TIMEOUT{500}; - static constexpr std::chrono::seconds DEFAULT_RESOLVER_TTL{300}; - const auto& server_config = config.server_config(); - // TODO(abaptiste): Read the external DataSource - if (server_config.has_inline_dns_table()) { - const auto& dns_table = server_config.inline_dns_table(); - const size_t entries = dns_table.virtual_domains().size(); - - virtual_domains_.reserve(entries); - for (const auto& virtual_domain : dns_table.virtual_domains()) { - AddressConstPtrVec addrs{}; - if (virtual_domain.endpoint().has_address_list()) { - const auto& address_list = virtual_domain.endpoint().address_list().address(); - addrs.reserve(address_list.size()); - // This will throw an exception if the configured_address string is malformed - for (const auto& configured_address : address_list) { - const auto ipaddr = - Network::Utility::parseInternetAddress(configured_address, 0 /* port */); - addrs.push_back(ipaddr); - } + envoy::data::dns::v3::DnsTable dns_table; + bool result = loadServerConfig(server_config, dns_table); + ENVOY_LOG(debug, "Loading DNS table from external file: {}", result ? "Success" : "Failure"); + + retry_count_ = dns_table.external_retry_count(); + + const size_t entries = dns_table.virtual_domains().size(); + virtual_domains_.reserve(entries); + for (const auto& virtual_domain : dns_table.virtual_domains()) { + AddressConstPtrVec addrs{}; + absl::string_view cluster_name; + if (virtual_domain.endpoint().has_address_list()) { + const auto& address_list = virtual_domain.endpoint().address_list().address(); + addrs.reserve(address_list.size()); + + // Shuffle the configured addresses. We store the addresses starting at a random + // list index so that we do not always return answers in the same order as the IPs + // are configured. + size_t i = random_.random(); + + // Creating the IP address will throw an exception if the address string is malformed + for (auto index = 0; index < address_list.size(); index++) { + const auto address_iter = std::next(address_list.begin(), (i++ % address_list.size())); + auto ipaddr = Network::Utility::parseInternetAddress(*address_iter, 0 /* port */); + addrs.push_back(std::move(ipaddr)); } - virtual_domains_.emplace(virtual_domain.name(), std::move(addrs)); - std::chrono::seconds ttl = virtual_domain.has_answer_ttl() - ? std::chrono::seconds(virtual_domain.answer_ttl().seconds()) - : DEFAULT_RESOLVER_TTL; - domain_ttl_.emplace(virtual_domain.name(), ttl); + } else { + cluster_name = virtual_domain.endpoint().cluster_name(); } - // Add known domains - known_suffixes_.reserve(dns_table.known_suffixes().size()); - for (const auto& suffix : dns_table.known_suffixes()) { - auto matcher_ptr = std::make_unique(suffix); - known_suffixes_.push_back(std::move(matcher_ptr)); - } + DnsEndpointConfig endpoint_config; + endpoint_config.address_list = absl::make_optional(std::move(addrs)); + endpoint_config.cluster_name = absl::make_optional(cluster_name); + + virtual_domains_.emplace(virtual_domain.name(), endpoint_config); + + std::chrono::seconds ttl = virtual_domain.has_answer_ttl() + ? std::chrono::seconds(virtual_domain.answer_ttl().seconds()) + : DEFAULT_RESOLVER_TTL; + domain_ttl_.emplace(virtual_domain.name(), ttl); + } + + // Add known domain suffixes + known_suffixes_.reserve(dns_table.known_suffixes().size()); + for (const auto& suffix : dns_table.known_suffixes()) { + auto matcher_ptr = std::make_unique(suffix); + known_suffixes_.push_back(std::move(matcher_ptr)); } forward_queries_ = config.has_client_config(); @@ -68,6 +87,35 @@ DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( } } +bool DnsFilterEnvoyConfig::loadServerConfig( + const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig:: + ServerContextConfig& config, + envoy::data::dns::v3::DnsTable& table) { + using envoy::data::dns::v3::DnsTable; + + if (config.has_inline_dns_table()) { + table = config.inline_dns_table(); + return true; + } + + const auto& datasource = config.external_dns_table(); + bool data_source_loaded = false; + try { + // Data structure is deduced from the file extension. If the data is not read an exception + // is thrown. If no table can be read, the filter will refer all queries to an external + // DNS server, if configured, otherwise all queries will be responded to with Name Error. + MessageUtil::loadFromFile(datasource.filename(), table, + ProtobufMessage::getNullValidationVisitor(), api_, + false /* do_boosting */); + data_source_loaded = true; + } catch (const ProtobufMessage::UnknownProtoFieldException& e) { + ENVOY_LOG(warn, "Invalid field in DNS Filter datasource configuration: {}", e.what()); + } catch (const EnvoyException& e) { + ENVOY_LOG(warn, "Filesystem DNS Filter config update failure: {}", e.what()); + } + return data_source_loaded; +} + void DnsFilter::onData(Network::UdpRecvData& client_request) { // Parse the query, if it fails return an response to the client DnsQueryContextPtr query_context = message_parser_.createQueryContext(client_request); @@ -76,21 +124,178 @@ void DnsFilter::onData(Network::UdpRecvData& client_request) { return; } - // TODO(abaptiste): Resolve the requested name + // Resolve the requested name + const auto response = getResponseForQuery(query_context); - // Send an answer to the client + // We were not able to satisfy the request locally. Return an empty response to the client + if (response == DnsLookupResponseCode::Failure) { + sendDnsResponse(std::move(query_context)); + return; + } + + // TODO(abaptiste): external resolution + + // We have an answer. Send it to the client sendDnsResponse(std::move(query_context)); } void DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) { Buffer::OwnedImpl response; - // TODO(abaptiste): serialize and return a response to the client + // Serializes the generated response to the parsed query from the client. If there is a + // parsing error or the incoming query is invalid, we will still generate a valid DNS response + message_parser_.buildResponseBuffer(query_context, response); Network::UdpSendData response_data{query_context->local_->ip(), *(query_context->peer_), response}; listener_.send(response_data); } +DnsLookupResponseCode DnsFilter::getResponseForQuery(DnsQueryContextPtr& context) { + /* It appears to be a rare case where we would have more than one query in a single request. + * It is allowed by the protocol but not widely supported: + * + * See: https://www.ietf.org/rfc/rfc1035.txt + * + * The question section is used to carry the "question" in most queries, + * i.e., the parameters that define what is being asked. The section + * contains QDCOUNT (usually 1) entries. + */ + for (const auto& query : context->queries_) { + // Try to resolve the query locally. If forwarding the query externally is disabled we will + // always attempt to resolve with the configured domains + if (isKnownDomain(query->name_) || !config_->forwardQueries()) { + // Determine whether the name is a cluster. Move on to the next query if successful + if (resolveViaClusters(context, *query)) { + continue; + } + + // Determine whether we an answer this query with the static configuration + if (resolveViaConfiguredHosts(context, *query)) { + continue; + } + } + // TODO(abaptiste): resolve the query externally + } + + if (context->answers_.empty()) { + return DnsLookupResponseCode::Failure; + } + return DnsLookupResponseCode::Success; +} + +std::chrono::seconds DnsFilter::getDomainTTL(const absl::string_view domain) { + const auto& domain_ttl_config = config_->domainTtl(); + const auto& iter = domain_ttl_config.find(domain); + + if (iter == domain_ttl_config.end()) { + return DEFAULT_RESOLVER_TTL; + } + return iter->second; +} + +bool DnsFilter::isKnownDomain(const absl::string_view domain_name) { + const auto& known_suffixes = config_->knownSuffixes(); + + // If we don't have a list of whitelisted domain suffixes, we will resolve the name with an + // external DNS server + if (known_suffixes.empty()) { + ENVOY_LOG(debug, "Known domains list is empty"); + return false; + } + + // TODO(abaptiste): Use a trie to find a match instead of iterating through the list + for (auto& suffix : known_suffixes) { + if (suffix->match(domain_name)) { + return true; + } + } + return false; +} + +const DnsEndpointConfig* DnsFilter::getEndpointConfigForDomain(const absl::string_view domain) { + const auto& domains = config_->domains(); + const auto iter = domains.find(domain); + if (iter == domains.end()) { + ENVOY_LOG(debug, "No endpoint configuration exists for [{}]", domain); + return nullptr; + } + return &(iter->second); +} + +const AddressConstPtrVec* DnsFilter::getAddressListForDomain(const absl::string_view domain) { + const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain); + if (endpoint_config != nullptr && endpoint_config->address_list.has_value()) { + return &(endpoint_config->address_list.value()); + } + return nullptr; +} + +const absl::string_view DnsFilter::getClusterNameForDomain(const absl::string_view domain) { + const DnsEndpointConfig* endpoint_config = getEndpointConfigForDomain(domain); + if (endpoint_config != nullptr && endpoint_config->cluster_name.has_value()) { + return endpoint_config->cluster_name.value(); + } + return {}; +} + +bool DnsFilter::resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRecord& query) { + // Determine if the domain name is being redirected to a cluster + const auto cluster_name = getClusterNameForDomain(query.name_); + absl::string_view lookup_name; + if (!cluster_name.empty()) { + lookup_name = cluster_name; + } else { + lookup_name = query.name_; + } + + Upstream::ThreadLocalCluster* cluster = cluster_manager_.get(lookup_name); + if (cluster == nullptr) { + ENVOY_LOG(debug, "Did not find a cluster for name [{}]", lookup_name); + return false; + } + + // TODO(abaptiste): consider using host weights when returning answer addresses + + // Return the address for all discovered endpoints + size_t discovered_endpoints = 0; + const std::chrono::seconds ttl = getDomainTTL(query.name_); + for (const auto& hostsets : cluster->prioritySet().hostSetsPerPriority()) { + for (const auto& host : hostsets->hosts()) { + ++discovered_endpoints; + ENVOY_LOG(debug, "using cluster host address {} for domain [{}]", + host->address()->ip()->addressAsString(), lookup_name); + message_parser_.buildDnsAnswerRecord(context, query, ttl, host->address()); + } + } + return (discovered_endpoints != 0); +} + +bool DnsFilter::resolveViaConfiguredHosts(DnsQueryContextPtr& context, + const DnsQueryRecord& query) { + const auto* configured_address_list = getAddressListForDomain(query.name_); + if (configured_address_list == nullptr) { + ENVOY_LOG(debug, "Domain [{}] address list was not found", query.name_); + return false; + } + + if (configured_address_list->empty()) { + ENVOY_LOG(debug, "Domain [{}] address list is empty", query.name_); + return false; + } + + // Build an answer record from each configured IP address + uint64_t hosts_found = 0; + for (const auto& configured_address : *configured_address_list) { + ASSERT(configured_address != nullptr); + ENVOY_LOG(debug, "using local address {} for domain [{}]", + configured_address->ip()->addressAsString(), query.name_); + ++hosts_found; + const std::chrono::seconds ttl = getDomainTTL(query.name_); + message_parser_.buildDnsAnswerRecord(context, query, ttl, configured_address); + } + return (hosts_found != 0); +} + void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { UNREFERENCED_PARAMETER(error_code); } diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index c8d73086c85b..3e295f1c79a9 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -37,31 +37,58 @@ struct DnsFilterStats { ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT) }; -using DnsVirtualDomainConfig = absl::flat_hash_map; +struct DnsEndpointConfig { + absl::optional address_list; + absl::optional cluster_name; +}; + +using DnsVirtualDomainConfig = absl::flat_hash_map; /** * DnsFilter configuration class abstracting access to data necessary for the filter's operation */ -class DnsFilterEnvoyConfig { +class DnsFilterEnvoyConfig : public Logger::Loggable { public: DnsFilterEnvoyConfig( Server::Configuration::ListenerFactoryContext& context, const envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig& config); + DnsFilterStats& stats() const { return stats_; } + const DnsVirtualDomainConfig& domains() const { return virtual_domains_; } + const std::vector& knownSuffixes() const { return known_suffixes_; } + const absl::flat_hash_map& domainTtl() const { + return domain_ttl_; + } + const AddressConstPtrVec& resolvers() const { return resolvers_; } + bool forwardQueries() const { return forward_queries_; } + const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } + Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } + uint64_t retryCount() const { return retry_count_; } + Runtime::RandomGenerator& random() const { return random_; } + private: static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { const auto final_prefix = absl::StrCat("dns_filter.", stat_prefix); return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; } + bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha:: + DnsFilterConfig::ServerContextConfig& config, + envoy::data::dns::v3::DnsTable& table); + Stats::Scope& root_scope_; + Upstream::ClusterManager& cluster_manager_; + Api::Api& api_; + mutable DnsFilterStats stats_; DnsVirtualDomainConfig virtual_domains_; std::vector known_suffixes_; absl::flat_hash_map domain_ttl_; bool forward_queries_; + uint64_t retry_count_; AddressConstPtrVec resolvers_; std::chrono::milliseconds resolver_timeout_; + Runtime::RandomGenerator& random_; }; using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; @@ -76,7 +103,9 @@ enum class DnsLookupResponseCode { Success, Failure, External }; class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { public: DnsFilter(Network::UdpReadFilterCallbacks& callbacks, const DnsFilterEnvoyConfigSharedPtr& config) - : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()) {} + : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()), + cluster_manager_(config_->clusterManager()), + message_parser_(config->forwardQueries(), config->retryCount(), config->random()) {} // Network::UdpListenerReadFilter callbacks void onData(Network::UdpRecvData& client_request) override; @@ -101,23 +130,50 @@ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable -#include - #include "envoy/network/address.h" #include "common/common/empty_string.h" #include "common/network/address_impl.h" #include "common/network/utility.h" -// TODO(abaptiste): add fuzzing tests for DNS message parsing #include "ares.h" namespace Envoy { @@ -17,16 +13,27 @@ namespace Extensions { namespace UdpFilters { namespace DnsFilter { -void BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) { +bool BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) { // Iterate over a name e.g. "www.domain.com" once and produce a buffer containing each name // segment prefixed by its length - static constexpr char SEPARATOR('.'); + static constexpr char SEPARATOR = '.'; + static constexpr size_t MAX_LABEL_LENGTH = 63; + static constexpr size_t MAX_NAME_LENGTH = 255; + + // Names are restricted to 255 bytes per RFC + if (name_.size() > MAX_NAME_LENGTH) { + return false; + } size_t last = 0; size_t count = name_.find_first_of(SEPARATOR); auto iter = name_.begin(); while (count != std::string::npos) { + if ((count - last) > MAX_LABEL_LENGTH) { + return false; + } + count -= last; output.writeBEInt(count); for (size_t i = 0; i < count; i++) { @@ -56,13 +63,43 @@ void BaseDnsRecord::serializeName(Buffer::OwnedImpl& output) { // Terminate the name record with a null byte output.writeByte(0x00); + return true; } // Serialize a DNS Query Record -void DnsQueryRecord::serialize(Buffer::OwnedImpl& output) { - serializeName(output); - output.writeBEInt(type_); - output.writeBEInt(class_); +bool DnsQueryRecord::serialize(Buffer::OwnedImpl& output) { + if (serializeName(output)) { + output.writeBEInt(type_); + output.writeBEInt(class_); + return true; + } + return false; +} + +// Serialize a DNS Answer Record +bool DnsAnswerRecord::serialize(Buffer::OwnedImpl& output) { + if (serializeName(output)) { + output.writeBEInt(type_); + output.writeBEInt(class_); + output.writeBEInt(static_cast(ttl_.count())); + + ASSERT(ip_addr_ != nullptr); + const auto ip_address = ip_addr_->ip(); + + ASSERT(ip_address != nullptr); + if (ip_address->ipv6() != nullptr) { + // Store the 128bit address with 2 64 bit writes + const absl::uint128 addr6 = ip_address->ipv6()->address(); + output.writeBEInt(sizeof(addr6)); + output.writeLEInt(absl::Uint128Low64(addr6)); + output.writeLEInt(absl::Uint128High64(addr6)); + } else if (ip_address->ipv4() != nullptr) { + output.writeBEInt(4); + output.writeLEInt(ip_address->ipv4()->address()); + } + return true; + } + return false; } DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request) { @@ -71,10 +108,10 @@ DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& cl query_context->parse_status_ = parseDnsObject(query_context, client_request.buffer_); if (!query_context->parse_status_) { - ENVOY_LOG(debug, "Unable to parse query buffer from '{}' into a DNS object.", + query_context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR; + ENVOY_LOG(debug, "Unable to parse query buffer from '{}' into a DNS object", client_request.addresses_.peer_->ip()->addressAsString()); } - return query_context; } @@ -151,6 +188,15 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, } context->id_ = static_cast(header_.id); + if (context->id_ == 0) { + ENVOY_LOG(debug, "No ID in DNS query"); + return false; + } + + if (header_.questions == 0) { + ENVOY_LOG(debug, "No questions in DNS request"); + return false; + } // Almost always, we will have only one query here. Per the RFC, QDCOUNT is usually 1 context->queries_.reserve(header_.questions); @@ -164,6 +210,19 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, context->queries_.push_back(std::move(rec)); } + // Parse all answer records and store them. This is exercised primarily in tests to + // verify the responses returned from the filter. + for (auto index = 0; index < header_.answers; index++) { + ENVOY_LOG(trace, "Parsing [{}/{}] answers", index, header_.answers); + auto rec = parseDnsAnswerRecord(buffer, &offset); + if (rec == nullptr) { + ENVOY_LOG(debug, "Couldn't parse answer record from buffer"); + return false; + } + const std::string name = rec->name_; + context->answers_.emplace(name, std::move(rec)); + } + return true; } @@ -189,19 +248,142 @@ const std::string DnsMessageParser::parseDnsNameRecord(const Buffer::InstancePtr return name; } +DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, + uint64_t* offset) { + uint64_t data_offset = *offset; + + if (data_offset > buffer->length()) { + ENVOY_LOG(debug, "Invalid offset for parsing answer record"); + return nullptr; + } + + uint64_t available_bytes = buffer->length() - data_offset; + + if (available_bytes == 0) { + ENVOY_LOG(debug, "No data left in buffer for reading answer record"); + return nullptr; + } + + const std::string record_name = parseDnsNameRecord(buffer, &available_bytes, &data_offset); + if (record_name.empty()) { + ENVOY_LOG(debug, "Unable to parse name record from buffer"); + return nullptr; + } + + if (available_bytes < (sizeof(uint32_t) + 3 * sizeof(uint16_t))) { + ENVOY_LOG(debug, + "Insufficient data in buffer to read answer record data." + "Available bytes: {}", + available_bytes); + return nullptr; + } + + // Parse the record type + uint16_t record_type; + record_type = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + // We support only A and AAAA record types + if (record_type != DNS_RECORD_TYPE_A && record_type != DNS_RECORD_TYPE_AAAA) { + ENVOY_LOG(debug, "Unsupported record type [{}] found in answer", record_type); + return nullptr; + } + + // Parse the record class + uint16_t record_class; + record_class = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + // We support only IN record classes + if (record_class != DNS_RECORD_CLASS_IN) { + ENVOY_LOG(debug, "Unsupported record class [{}] found in answer", record_class); + return nullptr; + } + + // Read the record's TTL + uint32_t ttl; + ttl = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint32_t); + available_bytes -= sizeof(uint32_t); + + // Parse the Data Length and address data record + uint16_t data_length; + data_length = buffer->peekBEInt(data_offset); + data_offset += sizeof(uint16_t); + available_bytes -= sizeof(uint16_t); + + if (data_length == 0) { + ENVOY_LOG(debug, "Read zero for data length when reading address from answer record"); + return nullptr; + } + + // Build an address pointer from the string data. + // We don't support anything other than A or AAAA records. If we add support for other record + // types, we must account for them here + Network::Address::InstanceConstSharedPtr ip_addr = nullptr; + + switch (record_type) { + case DNS_RECORD_TYPE_A: + if (available_bytes >= sizeof(uint32_t)) { + sockaddr_in sa4; + sa4.sin_addr.s_addr = buffer->peekLEInt(data_offset); + ip_addr = std::make_shared(&sa4); + data_offset += data_length; + } + break; + case DNS_RECORD_TYPE_AAAA: + if (available_bytes >= sizeof(absl::uint128)) { + sockaddr_in6 sa6; + uint8_t* address6_bytes = reinterpret_cast(&sa6.sin6_addr.s6_addr); + static constexpr size_t count = sizeof(absl::uint128) / sizeof(uint8_t); + for (size_t index = 0; index < count; index++) { + *address6_bytes++ = buffer->peekLEInt(data_offset++); + } + ip_addr = std::make_shared(sa6, true); + } + break; + default: + ENVOY_LOG(debug, "Unsupported record type [{}] found in answer", record_type); + break; + } + + if (ip_addr == nullptr) { + ENVOY_LOG(debug, "Unable to parse IP address from data in answer record"); + return nullptr; + } + + ENVOY_LOG(trace, "Parsed address [{}] from record type [{}]: offset {}", + ip_addr->ip()->addressAsString(), record_type, data_offset); + + *offset = data_offset; + + return std::make_unique(record_name, record_type, record_class, + std::chrono::seconds{ttl}, std::move(ip_addr)); +} + DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t* offset) { uint64_t name_offset = *offset; uint64_t available_bytes = buffer->length() - name_offset; + if (available_bytes == 0) { + ENVOY_LOG(debug, "No available data in buffer to parse a query record"); + return nullptr; + } + const std::string record_name = parseDnsNameRecord(buffer, &available_bytes, &name_offset); if (record_name.empty()) { - ENVOY_LOG(debug, "Unable to parse name record from buffer"); + ENVOY_LOG(debug, "Unable to parse name record from buffer [length {}]", buffer->length()); return nullptr; } if (available_bytes < 2 * sizeof(uint16_t)) { - ENVOY_LOG(debug, "Insufficient data in buffer to read query record type and class. "); + ENVOY_LOG(debug, + "Insufficient data in buffer to read query record type and class. " + "Available bytes: {}", + available_bytes); return nullptr; } @@ -210,20 +392,219 @@ DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePt record_type = buffer->peekBEInt(name_offset); name_offset += sizeof(record_type); - // Read the record class. This value is almost always 1 for internet address records + // Read the record class. This value is always 1 for internet address records uint16_t record_class; record_class = buffer->peekBEInt(name_offset); name_offset += sizeof(record_class); - auto rec = std::make_unique(record_name, record_type, record_class); + if (record_class != DNS_RECORD_CLASS_IN) { + ENVOY_LOG(debug, "Unsupported record class '{}' in address record", record_class); + return nullptr; + } // stop reading he buffer here since we aren't parsing additional records - ENVOY_LOG(trace, "Extracted query record. Name: {} type: {} class: {}", rec->name_, rec->type_, - rec->class_); + ENVOY_LOG(trace, "Extracted query record. Name: {} type: {} class: {}", record_name, record_type, + record_class); *offset = name_offset; + return std::make_unique(record_name, record_type, record_class); +} + +void DnsMessageParser::setDnsResponseFlags(DnsQueryContextPtr& query_context, + const uint16_t questions, const uint16_t answers) { + // Copy the transaction ID + response_header_.id = header_.id; + + // Signify that this is a response to a query + response_header_.flags.qr = 1; + + response_header_.flags.opcode = header_.flags.opcode; + response_header_.flags.aa = 0; + response_header_.flags.tc = 0; + + // Copy Recursion flags + response_header_.flags.rd = header_.flags.rd; + + // Set the recursion flag based on whether Envoy is configured to forward queries + response_header_.flags.ra = recursion_available_; + + // reserved flag is not set + response_header_.flags.z = 0; + + // Set the authenticated flags to zero + response_header_.flags.ad = 0; + + response_header_.flags.cd = 0; + response_header_.answers = answers; + response_header_.flags.rcode = query_context->response_code_; + + // Set the number of questions from the incoming query + response_header_.questions = questions; + + // We will not include any additional records + response_header_.authority_rrs = 0; + response_header_.additional_rrs = 0; +} + +void DnsMessageParser::buildDnsAnswerRecord(DnsQueryContextPtr& context, + const DnsQueryRecord& query_rec, + const std::chrono::seconds ttl, + Network::Address::InstanceConstSharedPtr ipaddr) { + // Verify that we have an address matching the query record type + switch (query_rec.type_) { + case DNS_RECORD_TYPE_AAAA: + if (ipaddr->ip()->ipv6() == nullptr) { + ENVOY_LOG(debug, "Unable to return IPV6 address for query"); + return; + } + break; + + case DNS_RECORD_TYPE_A: + if (ipaddr->ip()->ipv4() == nullptr) { + ENVOY_LOG(debug, "Unable to return IPV4 address for query"); + return; + } + break; + + // TODO(abbaptis): Support additional records (e.g. SRV) + default: + ENVOY_LOG(debug, "record type [{}] is not supported", query_rec.type_); + return; + } + + auto answer_record = std::make_unique(query_rec.name_, query_rec.type_, + query_rec.class_, ttl, std::move(ipaddr)); + context->answers_.emplace(query_rec.name_, std::move(answer_record)); +} + +void DnsMessageParser::setResponseCode(DnsQueryContextPtr& context, + const uint16_t serialized_queries, + const uint16_t serialized_answers) { + // If the question is malformed, don't change the response + if (context->response_code_ == DNS_RESPONSE_CODE_FORMAT_ERROR) { + return; + } + // Check for unsupported request types + for (const auto& query : context->queries_) { + if (query->type_ != DNS_RECORD_TYPE_A && query->type_ != DNS_RECORD_TYPE_AAAA) { + context->response_code_ = DNS_RESPONSE_CODE_NOT_IMPLEMENTED; + return; + } + } + // Output validation + if (serialized_queries == 0) { + context->response_code_ = DNS_RESPONSE_CODE_FORMAT_ERROR; + return; + } + if (serialized_answers == 0) { + context->response_code_ = DNS_RESPONSE_CODE_NAME_ERROR; + return; + } + context->response_code_ = DNS_RESPONSE_CODE_NO_ERROR; +} + +void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, + Buffer::OwnedImpl& buffer) { + // Ensure that responses stay below the 512 byte byte limit. If we are to exceed this we must add + // DNS extension fields + // + // Note: There is Network::MAX_UDP_PACKET_SIZE, which is defined as 1500 bytes. If we support + // DNS extensions, which support up to 4096 bytes, we will have to keep this 1500 byte limit in + // mind. + static constexpr uint64_t MAX_DNS_RESPONSE_SIZE = 512; + static constexpr uint64_t MAX_DNS_NAME_SIZE = 255; + + // Amazon Route53 will return up to 8 records in an answer + // https://aws.amazon.com/route53/faqs/#associate_multiple_ip_with_single_record + static constexpr size_t MAX_RETURNED_RECORDS = 8; + + // Each response must have DNS flags, which spans 4 bytes. Account for them immediately so that we + // can adjust the number of returned answers to remain under the limit + uint64_t total_buffer_size = sizeof(DnsHeaderFlags); + uint16_t serialized_answers = 0; + uint16_t serialized_queries = 0; + + Buffer::OwnedImpl query_buffer{}; + Buffer::OwnedImpl answer_buffer{}; + + ENVOY_LOG(trace, "Building response for query ID [{}]", query_context->id_); + + for (const auto& query : query_context->queries_) { + if (!query->serialize(query_buffer)) { + ENVOY_LOG(debug, "Unable to serialize query record for {}", query->name_); + continue; + } + + // Serialize and account for each query's size. That said, there should be only one query. + ++serialized_queries; + total_buffer_size += query_buffer.length(); + + const auto& answers = query_context->answers_; + if (answers.empty()) { + continue; + } + const size_t num_answers = answers.size(); + + // Randomize the starting index if we have more than 8 records + size_t index = num_answers > MAX_RETURNED_RECORDS ? rng_.random() % num_answers : 0; + + while (serialized_answers < num_answers) { + const auto answer = std::next(answers.begin(), (index++ % num_answers)); + // Query names are limited to 255 characters. Since we are using ares to decode the encoded + // names, we should not end up with a non-conforming name here. + // + // See Section 2.3.4 of https://tools.ietf.org/html/rfc1035 + + // TODO(abaptiste): add stats for record overflow + if (query->name_.size() > MAX_DNS_NAME_SIZE) { + ENVOY_LOG( + debug, + "Query name '{}' is longer than the maximum permitted length. Skipping serialization", + query->name_); + continue; + } + if (answer->first != query->name_) { + continue; + } + + Buffer::OwnedImpl serialized_answer; + if (!answer->second->serialize(serialized_answer)) { + ENVOY_LOG(debug, "Unable to serialize answer record for {}", query->name_); + continue; + } + const uint64_t serialized_answer_length = serialized_answer.length(); + if ((total_buffer_size + serialized_answer_length) > MAX_DNS_RESPONSE_SIZE) { + break; + } + + ++serialized_answers; + total_buffer_size += serialized_answer_length; + answer_buffer.add(serialized_answer); + + if (serialized_answers == MAX_RETURNED_RECORDS) { + break; + } + } + } + + setResponseCode(query_context, serialized_queries, serialized_answers); + setDnsResponseFlags(query_context, serialized_queries, serialized_answers); + + // Build the response buffer for transmission to the client + buffer.writeBEInt(response_header_.id); + + uint16_t flags; + ::memcpy(&flags, static_cast(&response_header_.flags), sizeof(uint16_t)); + buffer.writeBEInt(flags); + + buffer.writeBEInt(response_header_.questions); + buffer.writeBEInt(response_header_.answers); + buffer.writeBEInt(response_header_.authority_rrs); + buffer.writeBEInt(response_header_.additional_rrs); - return rec; + // write the queries and answers + buffer.move(query_buffer); + buffer.move(answer_buffer); } } // namespace DnsFilter diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h index 3099fb66694d..b64962019b49 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.h +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -6,6 +6,7 @@ #include "envoy/network/listener.h" #include "common/buffer/buffer_impl.h" +#include "common/runtime/runtime_impl.h" namespace Envoy { namespace Extensions { @@ -28,11 +29,10 @@ constexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4; class BaseDnsRecord { public: BaseDnsRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) - : name_(rec_name), type_(rec_type), class_(rec_class){}; - + : name_(rec_name), type_(rec_type), class_(rec_class) {} virtual ~BaseDnsRecord() = default; - void serializeName(Buffer::OwnedImpl& output); - virtual void serialize(Buffer::OwnedImpl& output) PURE; + bool serializeName(Buffer::OwnedImpl& output); + virtual bool serialize(Buffer::OwnedImpl& output) PURE; const std::string name_; const uint16_t type_; @@ -40,19 +40,18 @@ class BaseDnsRecord { }; /** - * DnsQueryRecord represents a query record parsed from a DNS request from a client. Each record - * contains the ID, domain requested and the flags dictating the type of record that is sought. + * DnsQueryRecord represents a query record parsed from a DNS request from a client. Each query + * record contains the domain requested and the flags dictating the type of record that is sought. */ class DnsQueryRecord : public BaseDnsRecord { public: DnsQueryRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) : BaseDnsRecord(rec_name, rec_type, rec_class) {} - void serialize(Buffer::OwnedImpl& output) override; + bool serialize(Buffer::OwnedImpl& output) override; }; using DnsQueryRecordPtr = std::unique_ptr; using DnsQueryPtrVec = std::vector; - using AddressConstPtrVec = std::vector; using AnswerCallback = std::function; @@ -64,11 +63,11 @@ using AnswerCallback = std::function; using DnsAnswerMap = std::unordered_multimap; /** - * DnsQueryContext contains all the data associated with a query. The filter uses this object to - * generate a response and determine where it should be transmitted. + * DnsQueryContext contains all the data necessary for responding to a query from a given client. */ class DnsQueryContext { public: DnsQueryContext(Network::Address::InstanceConstSharedPtr local, Network::Address::InstanceConstSharedPtr peer) - : local_(std::move(local)), peer_(std::move(peer)), parse_status_(false), id_() {} + : local_(std::move(local)), peer_(std::move(peer)), parse_status_(false), + response_code_(DNS_RESPONSE_CODE_NO_ERROR) {} const Network::Address::InstanceConstSharedPtr local_; const Network::Address::InstanceConstSharedPtr peer_; bool parse_status_; + uint16_t response_code_; uint16_t id_; DnsQueryPtrVec queries_; DnsAnswerMap answers_; @@ -101,7 +101,7 @@ using DnsQueryContextPtr = std::unique_ptr; class DnsMessageParser : public Logger::Loggable { public: enum class DnsQueryParseState { - Init = 0, + Init, Flags, // 2 bytes Questions, // 2 bytes Answers, // 2 bytes @@ -110,8 +110,9 @@ class DnsMessageParser : public Logger::Loggable { Finish }; - // These flags have been verified with dig. The flag order does not match the RFC, but takes byte - // ordering into account so that serialization does not need bitwise operations + // The flags have been verified with dig and this structure should not be modified. The flag order + // here does not match the RFC, but takes byte ordering into account so that serialization does + // not bitwise operations. PACKED_STRUCT(struct DnsHeaderFlags { unsigned rcode : 4; // return code unsigned cd : 1; // checking disabled @@ -138,6 +139,21 @@ class DnsMessageParser : public Logger::Loggable { uint16_t additional_rrs; }); + DnsMessageParser(bool recurse, uint64_t retry_count, Runtime::RandomGenerator& random) + : recursion_available_(recurse), retry_count_(retry_count), rng_(random) {} + + /** + * @brief Builds an Answer record for the active query. The active query transaction ID is at the + * top of a queue. This ID is sufficient enough to determine the answer records associated with + * the query + */ + DnsAnswerRecordPtr getResponseForQuery(); + + /** + * @param buffer the buffer containing the constructed DNS response to be sent to a client + */ + void buildResponseBuffer(DnsQueryContextPtr& query_context, Buffer::OwnedImpl& buffer); + /** * @brief parse a single query record from a client request * @@ -150,28 +166,95 @@ class DnsMessageParser : public Logger::Loggable { DnsQueryRecordPtr parseDnsQueryRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); /** - * @brief Create a context object for handling a DNS Query + * @brief parse a single answer record from a client request + * + * @param buffer a reference to a buffer containing a DNS response + * @param offset the buffer offset at which parsing is to begin. This parameter is updated when + * one record is parsed from the buffer and returned to the caller. + * @return DnsQueryRecordPtr a pointer to a DnsAnswerRecord object containing all answer data + * parsed from the buffer + */ + DnsAnswerRecordPtr parseDnsAnswerRecord(const Buffer::InstancePtr& buffer, uint64_t* offset); + + /** + * @brief Constructs a DNS Answer record for a given IP Address and stores the object in a map + * where the response is associated with query name * - * @param client_request the context containing the client addressing and the buffer with the DNS - * query contents + * @param query_record to which the answer is matched. + * @param ttl the TTL specifying how long the returned answer is cached + * @param ipaddr the address that is returned in the answer record */ - DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request); + void buildDnsAnswerRecord(DnsQueryContextPtr& context, const DnsQueryRecord& query_rec, + const std::chrono::seconds ttl, + Network::Address::InstanceConstSharedPtr ipaddr); -private: + /** + * @return uint16_t the response code flag value from a parsed dns object + */ + uint16_t getQueryResponseCode() { return static_cast(header_.flags.rcode); } + + /** + * @return uint16_t the number of answer records in the parsed dns object + */ + uint16_t getAnswers() { return header_.answers; } + + /** + * @return uint16_t the response code flag value from a generated dns object + */ + uint16_t getAnswerResponseCode() { return static_cast(response_header_.flags.rcode); } + + /** + * @brief Parse the incoming query and create a context object for the filter + * + * @param client_request a structure containing addressing information and the buffer received + * from a client + */ + DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request); /** * @param buffer a reference to the incoming request object received by the listener * @return bool true if all DNS records and flags were successfully parsed from the buffer */ bool parseDnsObject(DnsQueryContextPtr& context, const Buffer::InstancePtr& buffer); +private: + /** + * @brief sets the response code returned to the client + * + * @param context the query context for which we are generating a response + * @param queries specify the number of query records contained in the response + * @param answers specify the number of answer records contained in the response + */ + void setResponseCode(DnsQueryContextPtr& context, const uint16_t serialized_queries, + const uint16_t serialized_answers); + + /** + * @brief sets the flags in the DNS header of the response sent to a client + * + * @param context the query context for which we are generating a response + * @param queries specify the number of query records contained in the response + * @param answers specify the number of answer records contained in the response + */ + void setDnsResponseFlags(DnsQueryContextPtr& context, const uint16_t questions, + const uint16_t answers); + + /** + * @brief Extracts a DNS query name from a buffer + * + * @param buffer the buffer from which the name is extracted + * @param available_bytes the size of the remaining bytes in the buffer on which we can operate + * @param name_offset the offset from which parsing begins and ends. The updated value is returned + * to the caller + */ const std::string parseDnsNameRecord(const Buffer::InstancePtr& buffer, uint64_t* available_bytes, uint64_t* name_offset); + bool recursion_available_; + uint64_t retry_count_; DnsHeader header_; + DnsHeader response_header_; + Runtime::RandomGenerator& rng_; }; -using DnsMessageParserPtr = std::unique_ptr; - } // namespace DnsFilter } // namespace UdpFilters } // namespace Extensions diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index f19a863d0dd9..e4a66a2fcf10 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( @@ -36,3 +37,28 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "dns_filter_integration_test", + srcs = ["dns_filter_integration_test.cc"], + extension_name = "envoy.filters.udp_listener.dns_filter", + deps = [ + ":dns_filter_test_lib", + "//source/extensions/filters/udp/dns_filter:config", + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/integration:integration_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "dns_filter_fuzz_test", + srcs = ["dns_filter_fuzz_test.cc"], + corpus = "dns_filter_corpus", + deps = [ + "//source/extensions/filters/udp/dns_filter:dns_filter_lib", + "//test/fuzz:utility_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + ], +) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/016fac1e4a40199b26b08df73179f9249e6a680b new file mode 100644 index 0000000000000000000000000000000000000000..a78515c64c5fc91760a4de3114014b811e8712a9 GIT binary patch literal 45 ncmXqGG-Y65RA2xCMwZNiGBc)(ROYjEcWB>t1mdt`OGp3AG=DgGrAQvR8DFBvY;9=lm;8?xZ<~nt3 literal 0 HcmV?d00001 diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/7c09f450b6667337fd111fad0049bf4601c1aece new file mode 100644 index 0000000000000000000000000000000000000000..a7644df43a1ccd2bc5e04ecc9249d7b13a83166d GIT binary patch literal 1024 zcmV+b1poW6dyrD{vO~?Ea3y#9;9N>JuvK3%r&zj)OC3=5OvaL}**z15hFY|Kj6u>P zfP3W>YQ(z@cI?Axq8sP7k+`&|kcJfmh5UI+`Kg)rV_H+8K92)=E@wGG$t)b&{m_6k zH0>h_GbM?S?9OPm6Oek_0Lw zO)3*5$5T7>3YOv^eTMzpC&gz-X0(zNA|!Pj*d~aP^lq&a*bx5!3s6GTh&v#c>t)3$ zYB8Jt$p*P~b9^=hvCAuX0#u&tTC{T^bBvawfVX!U4e|6nh9p=mrL>rG1_nRb6N#*J zHd2Bt5Eine8Pj~W`NC--r03`@Ye5G~YD|H&f%C+4Pl7Ar&Y%*^LQ>RFJA;^8H7eP< zF#SQ?Xcn^1$D*V9r29wUz50MxLu!WdD`;fDQo+tIavP~`-(Em}Q&TC+96mu?HMV{B!Pgsk9~jdEI66FEYUarw=$Nj2N^{V85U&6k3y29M;!T8xfeA+c0jFnvE z5J_(Ih@Si1R$X}a5?>IQ8yB^1jDmu*0zwG3k&MA;pr1fRi|pjv1|nbF3Zo`&q~7}l zb*EUcOVE~LQV0SJ_7CbxX{--wDe1hGN?ihvrCI~xl7K|6s1e0Q+O0R-c?bKL^yei_{hLaj*QoS6xNT8SrXqES%ZuLf%!kS zmL~JQzwb8r9|xJ9 z#w8pMBdbGw2QVCh{U8ju0%-6x!2CPc8R?O`oMH(zqO#_ZY_cYd`tFBwch*;; literal 0 HcmV?d00001 diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 b/test/extensions/filters/udp/dns_filter/dns_filter_corpus/fb9282f0af3341cfc98d56f10fffffd5529d8802 new file mode 100644 index 0000000000000000000000000000000000000000..2e9c5cade09f057f125e3adc575e457321af0ccf GIT binary patch literal 44 pcmXqGG-Y65RA2xBMg<_jz{rwWkXpj92?`jPGE$it7#Xq|7yzJ02{Zrz literal 0 HcmV?d00001 diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc new file mode 100644 index 000000000000..964bb0d0eea9 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc @@ -0,0 +1,65 @@ +#include "common/common/logger.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "test/fuzz/fuzz_runner.h" +#include "test/fuzz/utility.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace { + +const std::string generateQuery(FuzzedDataProvider* data_provider) { + size_t query_size = data_provider->ConsumeIntegralInRange(0, 512); + return data_provider->ConsumeRandomLengthString(query_size); +} + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + FuzzedDataProvider data_provider(buf, len); + const bool recurse = data_provider.ConsumeBool(); + const uint16_t retry_count = data_provider.ConsumeIntegralInRange(0, 3); + + static NiceMock random; + DnsMessageParser message_parser(recurse, retry_count, random); + + const auto local = Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353"); + const auto peer = Network::Utility::parseInternetAddressAndPort("127.0.2.1:55088"); + + Buffer::InstancePtr query_buffer = std::make_unique(); + const std::string query = generateQuery(&data_provider); + query_buffer->add(query.data(), query.size()); + + const uint8_t fuzz_function = data_provider.ConsumeIntegralInRange(0, 2); + switch (fuzz_function) { + case 0: { + DnsQueryContextPtr query_context = std::make_unique(local, peer); + bool result = message_parser.parseDnsObject(query_context, query_buffer); + UNREFERENCED_PARAMETER(result); + } break; + + case 1: { + uint64_t offset = data_provider.ConsumeIntegralInRange(0, query_buffer->length()); + DnsQueryRecordPtr ptr = message_parser.parseDnsQueryRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; + + case 2: { + uint64_t offset = data_provider.ConsumeIntegralInRange(0, query_buffer->length()); + DnsAnswerRecordPtr ptr = message_parser.parseDnsAnswerRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; + } // end case +} +} // namespace +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc new file mode 100644 index 000000000000..8811fbd39ee2 --- /dev/null +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -0,0 +1,171 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "extensions/filters/udp/dns_filter/dns_filter.h" + +#include "test/integration/integration.h" +#include "test/test_common/network_utility.h" + +#include "dns_filter_test_utils.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { +namespace { + +class DnsFilterIntegrationTest : public testing::TestWithParam, + public BaseIntegrationTest { +public: + DnsFilterIntegrationTest() : BaseIntegrationTest(GetParam(), configToUse()) { + setupResponseParser(); + } + + void setupResponseParser() { + response_parser_ = std::make_unique(true /*recursive queries */, + 0 /* retry_count */, random_); + } + + static std::string configToUse() { + return absl::StrCat(ConfigHelper::baseUdpListenerConfig(), R"EOF( + listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "my_prefix" + server_config: + inline_dns_table: + external_retry_count: 3 + known_suffixes: + - suffix: "foo1.com" + - suffix: "cluster_0" + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 + - 10.0.0.3 + - 10.0.0.4 + - name: "cluster.foo1.com" + endpoint: + cluster_name: "cluster_0" + )EOF"); + } + + void setup(uint32_t upstream_count) { + udp_fake_upstream_ = true; + if (upstream_count > 1) { + setDeterministic(); + setUpstreamCount(upstream_count); + config_helper_.addConfigModifier( + [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + for (uint32_t i = 1; i < upstream_count; i++) { + bootstrap.mutable_static_resources() + ->mutable_clusters(0) + ->mutable_load_assignment() + ->mutable_endpoints(0) + ->add_lb_endpoints() + ->mutable_endpoint() + ->MergeFrom(ConfigHelper::buildEndpoint( + Network::Test::getLoopbackAddressString(GetParam()))); + } + }); + } + BaseIntegrationTest::initialize(); + } + + void TearDown() override { + test_server_.reset(); + fake_upstreams_.clear(); + } + + void requestResponseWithListenerAddress(const Network::Address::Instance& listener_address, + const std::string& data_to_send, + Network::UdpRecvData& response_datagram) { + Network::Test::UdpSyncPeer client(version_); + client.write(data_to_send, listener_address); + client.recv(response_datagram); + } + + NiceMock random_; + std::unique_ptr response_parser_; + DnsQueryContextPtr query_ctx_; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, DnsFilterIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(DnsFilterIntegrationTest, LocalLookupTest) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.foo1.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(4, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ClusterLookupTest) { + setup(2); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + uint16_t record_type; + if (listener_address->ip()->ipv6()) { + record_type = DNS_RECORD_TYPE_AAAA; + } else { + record_type = DNS_RECORD_TYPE_A; + } + + Network::UdpRecvData response; + std::string query = Utils::buildQueryForDomain("cluster_0", record_type, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(2, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ClusterEndpointLookupTest) { + setup(2); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + uint16_t record_type; + if (listener_address->ip()->ipv6()) { + record_type = DNS_RECORD_TYPE_AAAA; + } else { + record_type = DNS_RECORD_TYPE_A; + } + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("cluster.foo1.com", record_type, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(2, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +} // namespace +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 9b852cfb1873..942a6d88fea3 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -6,6 +6,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" +#include "test/test_common/simulated_time_system.h" #include "dns_filter_test_utils.h" #include "gmock/gmock.h" @@ -13,6 +14,7 @@ using testing::AtLeast; using testing::InSequence; +using testing::Return; using testing::ReturnRef; namespace Envoy { @@ -30,19 +32,21 @@ Api::IoCallUint64Result makeNoError(uint64_t rc) { class DnsFilterTest : public testing::Test { public: DnsFilterTest() - : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")) { - response_parser_ = std::make_unique(); + : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")), + api_(Api::createApiForTest()) { - client_request_.addresses_.local_ = listener_address_; - client_request_.addresses_.peer_ = listener_address_; - client_request_.buffer_ = std::make_unique(); + response_parser_ = + std::make_unique(true /* recursive queries */, 0 /* retries */, random_); + udp_response_.addresses_.local_ = listener_address_; + udp_response_.addresses_.peer_ = listener_address_; + udp_response_.buffer_ = std::make_unique(); EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0)); EXPECT_CALL(callbacks_.udp_listener_, send(_)) .WillRepeatedly( Invoke([this](const Network::UdpSendData& send_data) -> Api::IoCallUint64Result { - client_request_.buffer_->move(send_data.buffer_); - return makeNoError(client_request_.buffer_->length()); + udp_response_.buffer_->move(send_data.buffer_); + return makeNoError(udp_response_.buffer_->length()); })); EXPECT_CALL(callbacks_.udp_listener_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); } @@ -56,6 +60,9 @@ class DnsFilterTest : public testing::Test { EXPECT_CALL(listener_factory_, scope()).WillOnce(ReturnRef(*store)); EXPECT_CALL(listener_factory_, dispatcher()).Times(AtLeast(0)); EXPECT_CALL(listener_factory_, clusterManager()).Times(AtLeast(0)); + EXPECT_CALL(listener_factory_, api()).WillOnce(ReturnRef(*api_)); + ON_CALL(random_, random()).WillByDefault(Return(3)); + EXPECT_CALL(listener_factory_, random()).WillOnce(ReturnRef(random_)); config_ = std::make_shared(listener_factory_, config); filter_ = std::make_unique(callbacks_, config_); @@ -71,18 +78,19 @@ class DnsFilterTest : public testing::Test { } const Network::Address::InstanceConstSharedPtr listener_address_; - Server::Configuration::MockListenerFactoryContext listener_factory_; + Api::ApiPtr api_; DnsFilterEnvoyConfigSharedPtr config_; - - std::unique_ptr filter_; + DnsQueryContextPtr query_ctx_; + Event::MockDispatcher dispatcher_; Network::MockUdpReadFilterCallbacks callbacks_; + Network::UdpRecvData udp_response_; + NiceMock file_system_; + NiceMock histogram_; + NiceMock random_; + Server::Configuration::MockListenerFactoryContext listener_factory_; Stats::IsolatedStoreImpl stats_store_; - Network::UdpRecvData client_request_; - + std::unique_ptr filter_; std::unique_ptr response_parser_; - Event::MockDispatcher dispatcher_; - - DnsQueryContextPtr query_ctx_; const std::string forward_query_off_config = R"EOF( stat_prefix: "my_prefix" @@ -92,6 +100,9 @@ stat_prefix: "my_prefix" known_suffixes: - suffix: foo1.com - suffix: foo2.com + - suffix: foo3.com + - suffix: foo16.com + - suffix: thisismydomainforafivehundredandtwelvebytetest.com virtual_domains: - name: "www.foo1.com" endpoint: @@ -111,7 +122,116 @@ stat_prefix: "my_prefix" address_list: address: - "10.0.3.1" - )EOF"; + - name: "www.foo16.com" + endpoint: + address_list: + address: + - "10.0.16.1" + - "10.0.16.2" + - "10.0.16.3" + - "10.0.16.4" + - "10.0.16.5" + - "10.0.16.6" + - "10.0.16.7" + - "10.0.16.8" + - "10.0.16.9" + - "10.0.16.10" + - "10.0.16.11" + - "10.0.16.12" + - "10.0.16.13" + - "10.0.16.14" + - "10.0.16.15" + - "10.0.16.16" + - name: www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com + endpoint: + address_list: + address: + - "2001:8a:c1::2801:0001" + - "2001:8a:c1::2801:0002" + - "2001:8a:c1::2801:0003" + - "2001:8a:c1::2801:0004" + - "2001:8a:c1::2801:0005" + - "2001:8a:c1::2801:0006" + - "2001:8a:c1::2801:0007" + - "2001:8a:c1::2801:0008" +)EOF"; + + const std::string forward_query_on_config = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 5s + upstream_resolvers: + - "1.1.1.1" + - "8.8.8.8" + - "8.8.4.4" +server_config: + inline_dns_table: + external_retry_count: 3 + known_suffixes: + - suffix: foo1.com + - suffix: foo2.com + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - "10.0.0.1" +)EOF"; + + const std::string external_dns_table_config = R"EOF( +stat_prefix: "my_prefix" +client_config: + resolver_timeout: 5s + upstream_resolvers: + - "1.1.1.1" +server_config: + external_dns_table: + filename: {} +)EOF"; + + const std::string external_dns_table_json = R"EOF( +{ + "external_retry_count": 3, + "known_suffixes": [ { "suffix": "com" } ], + "virtual_domains": [ + { + "name": "www.external_foo1.com", + "endpoint": { "address_list": { "address": [ "10.0.0.1", "10.0.0.2" ] } } + }, + { + "name": "www.external_foo2.com", + "endpoint": { "address_list": { "address": [ "2001:8a:c1::2800:7" ] } } + }, + { + "name": "www.external_foo3.com", + "endpoint": { "address_list": { "address": [ "10.0.3.1" ] } } + } + ] +} +)EOF"; + + const std::string external_dns_table_yaml = R"EOF( +external_retry_count: 3 +known_suffixes: + - suffix: "com" +virtual_domains: + - name: "www.external_foo1.com" + endpoint: + address_list: + address: + - "10.0.0.1" + - "10.0.0.2" + - name: "www.external_foo2.com" + endpoint: + address_list: + address: + - "2001:8a:c1::2800:7" + - name: "www.external_foo3.com" + endpoint: + address_list: + address: + - "10.0.3.1" +)EOF"; }; TEST_F(DnsFilterTest, InvalidQuery) { @@ -119,8 +239,72 @@ TEST_F(DnsFilterTest, InvalidQuery) { setup(forward_query_off_config); sendQueryFromClient("10.0.0.1:1000", "hello"); - query_ctx_ = response_parser_->createQueryContext(client_request_); - ASSERT_FALSE(query_ctx_->parse_status_); + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain( + "www.supercalifragilisticexpialidocious.thisismydomainforafivehundredandtwelvebytetest.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + // There are 8 addresses, however, since the domain is part of the answer record, each + // serialized answer is over 100 bytes in size, there is room for 3 before the next + // serialized answer puts the buffer over the 512 byte limit. The query itself is also + // around 100 bytes. + EXPECT_EQ(3, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTooLongTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain = "www." + std::string(256, 'a') + ".com"; + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, InvalidLabelNameTooLongTest) { + InSequence s; + + setup(forward_query_off_config); + std::string domain(64, 'a'); + domain += ".com"; + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, SingleTypeAQuery) { @@ -131,13 +315,419 @@ TEST_F(DnsFilterTest, SingleTypeAQuery) { const std::string query = Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + const std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); +} + +TEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) { + InSequence s; + + setup(forward_query_off_config); + constexpr size_t loopCount = 5; + const std::string domain("www.foo3.com"); + size_t total_query_bytes = 0; + + for (size_t i = 0; i < loopCount; i++) { + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + total_query_bytes += query.size(); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); + } +} + +TEST_F(DnsFilterTest, LocalTypeAQueryFail) { + InSequence s; + + setup(forward_query_off_config); + const std::string query = + Utils::buildQueryForDomain("www.foo2.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(3, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); +} + +TEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) { + InSequence s; + + setup(forward_query_off_config); + std::list expected{"2001:8a:c1::2800:7", "2001:8a:c1::2800:8", "2001:8a:c1::2800:9"}; + const std::string domain("www.foo2.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + sendQueryFromClient("10.0.0.1:1000", query); + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(expected.size(), query_ctx_->answers_.size()); + + // Verify the address returned + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } +} + +TEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) { + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.json", external_dns_table_json); + std::string config_to_use = fmt::format(external_dns_table_config, temp_path); + setup(config_to_use); + + const std::string domain("www.external_foo1.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(2, query_ctx_->answers_.size()); + + // Verify the address returned + const std::list expected{"10.0.0.1", "10.0.0.2"}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } +} + +TEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) { + InSequence s; + + std::string temp_path = + TestEnvironment::writeStringToFileForTest("dns_table.yaml", external_dns_table_yaml); + std::string config_to_use = fmt::format(external_dns_table_config, temp_path); + setup(config_to_use); + + const std::string domain("www.external_foo1.com"); + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(2, query_ctx_->answers_.size()); + + // Verify the address returned + const std::list expected{"10.0.0.1", "10.0.0.2"}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } +} + +TEST_F(DnsFilterTest, RawBufferTest) { + InSequence s; + + setup(forward_query_off_config); + const std::string domain("www.foo3.com"); + + constexpr char dns_request[] = { + 0x36, 0x6b, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + // Verify that we have an answer record for the queried domain + const DnsAnswerRecordPtr& answer = query_ctx_->answers_.find(domain)->second; + + // Verify the address returned + const std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTest) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the name segment sizes are incorrect. The filter will indicate that the parsing + // failed + constexpr char dns_request[] = { + 0x36, 0x6c, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x02, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_F(DnsFilterTest, InvalidQueryNameTest2) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the name segment sizes are incorrect. The first segment points + // past the end of the buffer. The filter will indicate that the parsing failed. + constexpr char dns_request[] = { + 0x36, 0x6c, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x4c, 0x77, 0x77, 0x77, 0x03, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x01, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(client_request_); - // This will fail since the response generation is not being done yet - ASSERT_FALSE(query_ctx_->parse_status_); + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); } +TEST_F(DnsFilterTest, MultipleQueryCountTest) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer we have 2 queries for two different domains. This is a rare case + // and serves to validate that we handle the protocol correctly. + constexpr char dns_request[] = { + 0x36, 0x6d, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x02, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // begin query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x31, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo1.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(3, query_ctx_->answers_.size()); + + // Verify that the answers contain an entry for each domain + for (const auto& answer : query_ctx_->answers_) { + if (answer.first == "www.foo1.com") { + Utils::verifyAddress({"10.0.0.1", "10.0.0.2"}, answer.second); + } else if (answer.first == "www.foo3.com") { + Utils::verifyAddress({"10.0.3.1"}, answer.second); + } else { + FAIL() << "Unexpected domain in DNS response: " << answer.first; + } + } +} + +TEST_F(DnsFilterTest, InvalidQueryCountTest) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the Questions count is incorrect. We will abort parsing and return a response + // to the client. + constexpr char dns_request[] = { + 0x36, 0x6e, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x0a, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_F(DnsFilterTest, InvalidQueryCountTest2) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the Questions count is zero. This is an invalid query and is handled as such. + constexpr char dns_request[] = { + 0x36, 0x6f, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x00, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x01, // Query Type - A + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_F(DnsFilterTest, NotImplementedQueryTest) { + InSequence s; + + setup(forward_query_off_config); + // In this buffer the Questions count is zero. This is an invalid query and is handled as such. + constexpr char dns_request[] = { + 0x36, 0x70, // Transaction ID + 0x01, 0x20, // Flags + 0x00, 0x01, // Questions + 0x00, 0x00, // Answers + 0x00, 0x00, // Authority RRs + 0x00, 0x00, // Additional RRs + 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for + 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com + 0x00, 0x05, // Query Type - CNAME + 0x00, 0x01, // Query Class - IN + }; + + constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); + const std::string query = Utils::buildQueryFromBytes(dns_request, count); + + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NOT_IMPLEMENTED, response_parser_->getQueryResponseCode()); +} + +TEST_F(DnsFilterTest, InvalidShortBufferTest) { + InSequence s; + + setup(forward_query_off_config); + // This is an invalid query. Envoy should handle the packet and indicate a parsing failure + constexpr char dns_request[] = {0x1c}; + const std::string query = Utils::buildQueryFromBytes(dns_request, 1); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_F(DnsFilterTest, RandomizeFirstAnswerTest) { + InSequence s; + + setup(forward_query_off_config); + const std::string domain("www.foo16.com"); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + sendQueryFromClient("10.0.0.1:1000", query); + + query_ctx_ = response_parser_->createQueryContext(udp_response_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + + // Although 16 addresses are defined, only 8 are returned + EXPECT_EQ(8, query_ctx_->answers_.size()); + + // We shuffle the list of addresses when we read the config, and in the case of more than + // 8 defined addresses, we randomize the initial starting index. We should not end up with + // the first answer being the first defined address, or the answers appearing in the same + // order as they are defined. + const std::list defined_order{"10.0.16.1", "10.0.16.2", "10.0.16.3", "10.0.16.4", + "10.0.16.5", "10.0.16.6", "10.0.16.7", "10.0.16.8"}; + auto defined_answer_iter = defined_order.begin(); + for (const auto& answer : query_ctx_->answers_) { + const auto resolved_address = answer.second->ip_addr_->ip()->addressAsString(); + EXPECT_NE(0L, resolved_address.compare(*defined_answer_iter++)); + } +} } // namespace } // namespace DnsFilter } // namespace UdpFilters diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc index 3efbeeefdbb3..ca184dbf601b 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc @@ -10,6 +10,14 @@ namespace UdpFilters { namespace DnsFilter { namespace Utils { +std::string buildQueryFromBytes(const char* bytes, const size_t count) { + std::string query; + for (size_t i = 0; i < count; i++) { + query.append(static_cast(&bytes[i]), 1); + } + return query; +} + std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class) { Runtime::RandomGeneratorImpl random_; struct DnsMessageParser::DnsHeader query {}; @@ -43,21 +51,36 @@ std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint query.authority_rrs = 0; query.additional_rrs = 0; - Buffer::OwnedImpl buffer_; - buffer_.writeBEInt(query.id); + Buffer::OwnedImpl buffer; + buffer.writeBEInt(query.id); uint16_t flags; ::memcpy(&flags, static_cast(&query.flags), sizeof(uint16_t)); - buffer_.writeBEInt(flags); + buffer.writeBEInt(flags); - buffer_.writeBEInt(query.questions); - buffer_.writeBEInt(query.answers); - buffer_.writeBEInt(query.authority_rrs); - buffer_.writeBEInt(query.additional_rrs); + buffer.writeBEInt(query.questions); + buffer.writeBEInt(query.answers); + buffer.writeBEInt(query.authority_rrs); + buffer.writeBEInt(query.additional_rrs); DnsQueryRecord query_rec(name, rec_type, rec_class); - query_rec.serialize(buffer_); - return buffer_.toString(); + query_rec.serialize(buffer); + return buffer.toString(); +} + +void verifyAddress(const std::list& addresses, const DnsAnswerRecordPtr& answer) { + ASSERT_TRUE(answer != nullptr); + ASSERT_TRUE(answer->ip_addr_ != nullptr); + + const auto resolved_address = answer->ip_addr_->ip()->addressAsString(); + if (addresses.size() == 1) { + const auto expected = addresses.begin(); + ASSERT_EQ(*expected, resolved_address); + return; + } + + const auto iter = std::find(addresses.begin(), addresses.end(), resolved_address); + ASSERT_TRUE(iter != addresses.end()); } } // namespace Utils diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h index d27f5e000438..f3bced0ff262 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.h @@ -10,7 +10,10 @@ namespace Utils { static constexpr uint64_t MAX_UDP_DNS_SIZE{512}; +std::string buildQueryFromBytes(const char* bytes, const size_t count); std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class); +void verifyAddress(const std::list& addresses, const DnsAnswerRecordPtr& answer); +size_t getResponseQueryCount(DnsMessageParser& parser); } // namespace Utils } // namespace DnsFilter From f61b096f6a2dd3a9c74b9a9369a6ea398dbe1f0f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 26 May 2020 12:37:06 -0400 Subject: [PATCH 225/909] test: restoring prior behavior for http2 flood tests (#11319) Signed-off-by: Alyssa Wilk --- test/integration/http2_integration_test.cc | 16 ++++++++++++++++ test/integration/http2_integration_test.h | 6 +++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 48a3b54738df..2665a7a90050 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1563,6 +1563,8 @@ void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::s EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } // Verify that the server detects the flood using specified request parameters. @@ -1586,6 +1588,8 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ if (!flood_stat.empty()) { EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); } + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FloodMitigationTest, @@ -1653,6 +1657,8 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter("http2.outbound_control_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } // Verify that the server stop reading downstream connection on protocol error. @@ -1688,6 +1694,8 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { @@ -1705,6 +1713,8 @@ TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyData) { @@ -1723,6 +1733,8 @@ TEST_P(Http2FloodMitigationTest, EmptyData) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { @@ -1787,6 +1799,8 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeader) { tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(1, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); // expect a downstream protocol error. EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); @@ -1823,6 +1837,8 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { tcp_client_->close(); EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); + EXPECT_EQ(0, + test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.invalid.header.field")); // expect Downstream Protocol Error EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("DPE")); diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index d19d2d6436b3..88b019b57f1b 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -70,7 +70,11 @@ class Http2MetadataIntegrationTest : public Http2IntegrationTest { class Http2FloodMitigationTest : public testing::TestWithParam, public HttpIntegrationTest { public: - Http2FloodMitigationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} + Http2FloodMitigationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); + } protected: void startHttp2Session(); From edbb85a3eb0a3f0e5a23be41d4acb4cce4beb9f5 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Tue, 26 May 2020 22:07:29 +0530 Subject: [PATCH 226/909] docs: clarify upstream_rq_pending_failure_eject stat (#11315) Signed-off-by: Rama Chavali --- .../configuration/upstream/cluster_manager/cluster_stats.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index e318a9778cce..ffe1516bccb5 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -63,8 +63,8 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_rq_total, Counter, Total requests upstream_rq_active, Gauge, Total active requests upstream_rq_pending_total, Counter, Total requests pending a connection pool connection - upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool circuit breaking and were failed - upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure + upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool or requests (mainly for HTTP/2) circuit breaking and were failed + upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure or remote connection termination upstream_rq_pending_active, Gauge, Total active requests pending a connection pool connection upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` From b6f72358bf892c66d7d4603a9773b6debdb3a99d Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Tue, 26 May 2020 10:00:58 -0700 Subject: [PATCH 227/909] docs: split consumed headers list based on request/response consumption (#11200) Signed-off-by: Snow Pettersen --- .../http/http_filters/router_filter.rst | 96 ++++++++++--------- 1 file changed, 53 insertions(+), 43 deletions(-) diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 5bf42b4ac7da..14638a2c3d14 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -13,8 +13,8 @@ redirection, the filter also handles retry, statistics, etc. .. _config_http_filters_router_headers_consumed: -HTTP headers (consumed) ------------------------ +HTTP headers (consumed from downstreams) +---------------------------------------- The router consumes and sets various HTTP headers both on the egress/request path as well as on the ingress/response path. They are documented in this section. @@ -59,7 +59,7 @@ A few notes on how Envoy does retries: x-envoy-retry-on ^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number +Setting this header will cause Envoy to attempt to retry failed requests (number of retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries ` header or the :ref:`route config retry policy ` or the :ref:`virtual host retry policy `). @@ -132,9 +132,8 @@ By default, Envoy will *not* perform retries unless you've configured them per a x-envoy-retry-grpc-on ^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of -retries defaults to 1, and can be controlled by -:ref:`x-envoy-max-retries ` +Setting this header will cause Envoy to attempt to retry failed requests (number of retries defaults +to 1, and can be controlled by :ref:`x-envoy-max-retries ` header or the :ref:`route config retry policy `) or the :ref:`virtual host retry policy `. gRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in @@ -203,35 +202,29 @@ This header will only be honored for requests from internal clients. x-envoy-upstream-alt-stat-name ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to emit upstream response code/timing -statistics to a dual stat tree. This can be useful for application level categories that Envoy -doesn't know about. The output tree is documented :ref:`here `. +Setting this header will cause Envoy to emit upstream response code/timing statistics to a dual stat tree. +This can be useful for application level categories that Envoy doesn't know about. The output tree +is documented :ref:`here `. This should not be confused with :ref:`alt_stat_name ` which is specified while defining the cluster and when provided specifies an alternative name for the cluster at the root of the statistic tree. -x-envoy-upstream-canary -^^^^^^^^^^^^^^^^^^^^^^^ - -If an upstream host sets this header, the router will use it to generate canary specific statistics. -The output tree is documented :ref:`here `. - .. _config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response: x-envoy-upstream-rq-timeout-alt-response ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to set a 204 response code (instead of 504) -in the event of a request timeout. The actual value of the header is ignored; only its presence -is considered. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. +Setting this header will cause Envoy to set a 204 response code (instead of 504) in the event of a request timeout. +The actual value of the header is ignored; only its presence is considered. See also +:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. .. _config_http_filters_router_x-envoy-upstream-rq-timeout-ms: x-envoy-upstream-rq-timeout-ms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to override the :ref:`route configuration timeout +Setting this header will cause Envoy to override the :ref:`route configuration timeout ` or gRPC client timeout set via `grpc-timeout header `_ by specifying :ref:`max_grpc_timeout `. The timeout must be specified in millisecond @@ -242,8 +235,8 @@ units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-tim x-envoy-upstream-rq-per-try-timeout-ms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to set a *per try* timeout on routed -requests. If a global route timeout is configured, this timeout must be less than the global route +Setting this header will cause Envoy to set a *per try* timeout on routed requests. +If a global route timeout is configured, this timeout must be less than the global route timeout (see :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`) or it is ignored. This allows a caller to set a tight per try timeout to allow for retries while maintaining a reasonable overall timeout. This timeout only applies before any part of the response is sent to @@ -252,15 +245,37 @@ the downstream, which normally happens after the upstream has sent response head x-envoy-hedge-on-per-try-timeout ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to use a request -hedging strategy in the case of a per try timeout. This overrides the value set -in the :ref:`route configuration +Setting this header will cause Envoy to use a request hedging strategy in the case of a per try timeout. +This overrides the value set in the :ref:`route configuration `. This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. The value of the header should be "true" or "false", and is ignored if invalid. +.. _config_http_filters_router_x-envoy-decorator-operation: + +x-envoy-decorator-operation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The value of this header will override any locally defined operation (span) name on the +server span generated by the tracing mechanism. + +HTTP response headers consumed from upstream +-------------------------------------------- + +x-envoy-decorator-operation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The value of this header will override any locally defined operation (span) name on the +client span generated by the tracing mechanism. + +x-envoy-upstream-canary +^^^^^^^^^^^^^^^^^^^^^^^ + +If an upstream host sets this header, the router will use it to generate canary specific statistics. +The output tree is documented :ref:`here `. + .. _config_http_filters_router_x-envoy-immediate-health-check-fail: x-envoy-immediate-health-check-fail @@ -283,20 +298,10 @@ If this header is set by upstream, Envoy will not retry. Currently the value of looked at, only its presence. This header is set by :ref:`rate limit filter` when the request is rate limited. -.. _config_http_filters_router_x-envoy-decorator-operation: - -x-envoy-decorator-operation -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If this header is present on ingress requests, its value will override any locally defined -operation (span) name on the server span generated by the tracing mechanism. Similarly, if -this header is present on an egress response, its value will override any locally defined -operation (span) name on the client span. - .. _config_http_filters_router_headers_set: -HTTP headers (set) ------------------- +HTTP request headers set on upstream calls +------------------------------------------ The router sets various HTTP headers both on the egress/request path as well as on the ingress/response path. They are documented in this section. @@ -331,13 +336,6 @@ timeout, e.g., early exit. This is set on internal requests and is either taken :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout `, in that order. -x-envoy-upstream-service-time -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Contains the time in milliseconds spent by the upstream host processing the request. This is useful -if the client wants to determine service time compared to network latency. This header is set on -responses. - .. _config_http_filters_router_x-envoy-original-path: x-envoy-original-path @@ -348,6 +346,18 @@ or :ref:`regex_rewrite Date: Tue, 26 May 2020 14:24:48 -0400 Subject: [PATCH 228/909] http: moving tests over to new accessors (#11287) Risk Level: Low (test only) Testing: tests pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/stats/histogram_impl.cc | 5 +- source/common/stats/histogram_impl.h | 2 +- .../config/http_subscription_test_harness.h | 11 +- .../config/subscription_factory_impl_test.cc | 8 +- test/common/grpc/common_test.cc | 82 ++++++------- test/common/http/async_client_impl_test.cc | 2 +- .../http/conn_manager_impl_fuzz_test.cc | 5 +- test/common/http/conn_manager_impl_test.cc | 72 ++++++------ test/common/http/conn_manager_utility_test.cc | 16 +-- test/common/http/header_map_impl_test.cc | 59 +++++----- .../request_id_extension_uuid_impl_test.cc | 2 +- test/common/http/utility_test.cc | 23 ++-- test/common/router/router_test.cc | 21 ++-- test/common/router/shadow_writer_impl_test.cc | 2 +- .../upstream/health_checker_impl_test.cc | 71 +++++------ .../http_grpc_access_log_integration_test.cc | 11 +- .../tcp_grpc_access_log_integration_test.cc | 7 +- .../aggregate/cluster_integration_test.cc | 4 +- .../extensions/common/aws/signer_impl_test.cc | 16 +-- .../ext_authz/ext_authz_http_impl_test.cc | 22 +--- ...tive_concurrency_filter_integration_test.h | 4 +- .../http/aws_lambda/aws_lambda_filter_test.cc | 13 +-- .../aws_request_signing_filter_test.cc | 2 +- .../buffer/buffer_filter_integration_test.cc | 10 +- .../filters/http/buffer/buffer_filter_test.cc | 9 +- .../compressor_filter_integration_test.cc | 30 +++-- .../http/csrf/csrf_filter_integration_test.cc | 22 ++-- .../proxy_filter_integration_test.cc | 4 +- .../ext_authz/ext_authz_integration_test.cc | 9 +- .../filters/http/ext_authz/ext_authz_test.cc | 5 +- .../json_transcoder_filter_test.cc | 2 +- .../grpc_web_filter_integration_test.cc | 2 +- .../http/grpc_web/grpc_web_filter_test.cc | 21 ++-- .../http/gzip/gzip_filter_integration_test.cc | 28 ++--- .../http/health_check/health_check_test.cc | 23 ++-- .../http/jwt_authn/filter_integration_test.cc | 22 ++-- .../filters/http/lua/lua_integration_test.cc | 10 +- .../ratelimit/ratelimit_integration_test.cc | 12 +- .../http/rbac/rbac_filter_integration_test.cc | 18 +-- .../squash/squash_filter_integration_test.cc | 21 ++-- .../http/tap/tap_filter_integration_test.cc | 2 +- .../quiche/envoy_quic_client_session_test.cc | 2 +- .../quiche/envoy_quic_client_stream_test.cc | 4 +- .../quiche/envoy_quic_server_session_test.cc | 42 +++---- .../quiche/envoy_quic_server_stream_test.cc | 28 ++--- .../quiche/envoy_quic_utils_test.cc | 10 +- .../integration/quic_http_integration_test.cc | 4 +- .../stats_sinks/hystrix/hystrix_test.cc | 2 +- .../metrics_service_integration_test.cc | 7 +- .../datadog/datadog_tracer_impl_test.cc | 5 +- .../lightstep/lightstep_tracer_impl_test.cc | 40 +++---- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 13 +-- .../tls/integration/ssl_integration_test.cc | 6 +- .../api_version_integration_test.cc | 2 +- test/integration/cds_integration_test.cc | 2 +- test/integration/eds_integration_test.cc | 4 +- .../filter_manager_integration_test.cc | 2 +- test/integration/hds_integration_test.cc | 12 +- test/integration/http2_integration_test.cc | 35 +++--- .../http2_upstream_integration_test.cc | 14 +-- test/integration/http_integration.cc | 68 +++++------ .../http_timeout_integration_test.cc | 18 +-- .../idle_timeout_integration_test.cc | 20 ++-- test/integration/integration_admin_test.cc | 10 +- test/integration/integration_admin_test.h | 2 +- test/integration/integration_test.cc | 16 +-- .../load_stats_integration_test.cc | 12 +- test/integration/overload_integration_test.cc | 14 +-- test/integration/protocol_integration_test.cc | 110 ++++++++---------- test/integration/redirect_integration_test.cc | 47 ++++---- test/integration/rtds_integration_test.cc | 2 +- .../sds_dynamic_integration_test.cc | 2 +- test/integration/server.cc | 2 +- .../tcp_tunneling_integration_test.cc | 2 +- ...transport_socket_match_integration_test.cc | 8 +- test/integration/vhds_integration_test.cc | 10 +- .../integration/websocket_integration_test.cc | 12 +- test/integration/xds_integration_test.cc | 4 +- test/integration/xfcc_integration_test.cc | 3 +- test/server/admin/server_info_handler_test.cc | 21 ++-- test/server/admin/stats_handler_test.cc | 6 +- 81 files changed, 615 insertions(+), 718 deletions(-) diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index b1d041882efe..3755f17fefc9 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -11,8 +11,9 @@ namespace Envoy { namespace Stats { HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr) - : computed_quantiles_(supportedQuantiles().size(), 0.0) { - hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), supportedQuantiles().size(), + : computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) { + hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), + HistogramStatisticsImpl::supportedQuantiles().size(), computed_quantiles_.data()); sample_count_ = hist_sample_count(histogram_ptr); diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index 332fca0e2b07..657bbbdf357c 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -33,7 +33,7 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { // HistogramStatistics std::string quantileSummary() const override; std::string bucketSummary() const override; - const std::vector& supportedQuantiles() const override; + const std::vector& supportedQuantiles() const final; const std::vector& computedQuantiles() const override { return computed_quantiles_; } const std::vector& supportedBuckets() const override; const std::vector& computedBuckets() const override { return computed_buckets_; } diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index af798a4efac8..0165c06edabd 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -70,12 +70,11 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) { http_callbacks_ = &callbacks; - EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); + EXPECT_EQ("POST", request->headers().getMethodValue()); EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json, - std::string(request->headers().ContentType()->value().getStringView())); - EXPECT_EQ("eds_cluster", std::string(request->headers().Host()->value().getStringView())); - EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().getStringView())); + request->headers().getContentTypeValue()); + EXPECT_EQ("eds_cluster", request->headers().getHostValue()); + EXPECT_EQ("/v2/discovery:endpoints", request->headers().getPathValue()); std::string expected_request = "{"; if (!version_.empty()) { expected_request += "\"version_info\":\"" + version + "\","; @@ -98,7 +97,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { expected_request += "}"; EXPECT_EQ(expected_request, request->bodyAsString()); EXPECT_EQ(fmt::format_int(expected_request.size()).str(), - std::string(request->headers().ContentLength()->value().getStringView())); + request->headers().getContentLengthValue()); request_in_progress_ = true; return &http_request_; })); diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 7402654068f9..7cef3c2ee06d 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -262,11 +262,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { EXPECT_CALL(cm_.async_client_, send_(_, _, _)) .WillOnce(Invoke([this](Http::RequestMessagePtr& request, Http::AsyncClient::Callbacks&, const Http::AsyncClient::RequestOptions&) { - EXPECT_EQ("POST", std::string(request->headers().Method()->value().getStringView())); - EXPECT_EQ("static_cluster", - std::string(request->headers().Host()->value().getStringView())); - EXPECT_EQ("/v2/discovery:endpoints", - std::string(request->headers().Path()->value().getStringView())); + EXPECT_EQ("POST", request->headers().getMethodValue()); + EXPECT_EQ("static_cluster", request->headers().getHostValue()); + EXPECT_EQ("/v2/discovery:endpoints", request->headers().getPathValue()); return &http_request_; })); EXPECT_CALL(http_request_, cancel()); diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index d96d07b8eacc..055c3358dd08 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -129,25 +129,25 @@ TEST(GrpcContextTest, ToGrpcTimeout) { Http::TestRequestHeaderMapImpl headers; Common::toGrpcTimeout(std::chrono::milliseconds(0UL), headers); - EXPECT_EQ("0m", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("0m", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(1UL), headers); - EXPECT_EQ("1m", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("1m", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000UL), headers); - EXPECT_EQ("100000S", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("100000S", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(100000000000UL), headers); - EXPECT_EQ("1666666M", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("1666666M", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(9000000000000UL), headers); - EXPECT_EQ("2500000H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("2500000H", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(360000000000000UL), headers); - EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("99999999H", headers.getGrpcTimeoutValue()); Common::toGrpcTimeout(std::chrono::milliseconds(UINT64_MAX), headers); - EXPECT_EQ("99999999H", headers.GrpcTimeout()->value().getStringView()); + EXPECT_EQ("99999999H", headers.getGrpcTimeoutValue()); } TEST(GrpcContextTest, PrepareHeaders) { @@ -155,71 +155,71 @@ TEST(GrpcContextTest, PrepareHeaders) { Http::RequestMessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::nullopt); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("60000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("60000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(1)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("3600000m", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("3600000m", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders( "cluster", "service_name", "method_name", absl::optional(100000000)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("99999999H", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("99999999H", message->headers().getGrpcTimeoutValue()); } { Http::RequestMessagePtr message = Common::prepareHeaders("cluster", "service_name", "method_name", absl::optional(100000000000)); - EXPECT_EQ("POST", message->headers().Method()->value().getStringView()); - EXPECT_EQ("/service_name/method_name", message->headers().Path()->value().getStringView()); - EXPECT_EQ("cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", message->headers().ContentType()->value().getStringView()); - EXPECT_EQ("1666666M", message->headers().GrpcTimeout()->value().getStringView()); + EXPECT_EQ("POST", message->headers().getMethodValue()); + EXPECT_EQ("/service_name/method_name", message->headers().getPathValue()); + EXPECT_EQ("cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); + EXPECT_EQ("1666666M", message->headers().getGrpcTimeoutValue()); } } diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index e21c496e89a5..35a93450c732 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -67,7 +67,7 @@ class AsyncClientImplTest : public testing::Test { bool end_stream) { EXPECT_CALL(callbacks, onHeaders_(_, end_stream)) .WillOnce(Invoke([code](ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(std::to_string(code), headers.Status()->value().getStringView()); + EXPECT_EQ(std::to_string(code), headers.getStatusValue()); })); } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 377770669ebb..61e496f85c54 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -237,11 +237,10 @@ class FuzzStream { headers->setReferenceKey(Headers::get().Method, "GET"); } if (headers->Host() != nullptr && - !HeaderUtility::authorityIsValid(headers->Host()->value().getStringView())) { + !HeaderUtility::authorityIsValid(headers->getHostValue())) { // Sanitize host header so we don't fail at ASSERTs that verify header sanity checks // which should have been performed by the codec. - headers->setHost( - Fuzz::replaceInvalidHostCharacters(headers->Host()->value().getStringView())); + headers->setHost(Fuzz::replaceInvalidHostCharacters(headers->getHostValue())); } // If sendLocalReply is called: ON_CALL(encoder_, encodeHeaders(_, true)) diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a763da3c0af9..a95be8010898 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -437,7 +437,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { .Times(2) .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); if (headers.Path()->value() == "/healthcheck") { filter->callbacks_->streamInfo().healthCheck(true); } @@ -502,7 +502,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); @@ -699,7 +699,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderOverwritten) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("custom-value", altered_headers->getServerValue()); } // When configured APPEND_IF_ABSENT if the server header is present it will be retained. @@ -711,7 +711,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendPresent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("foo", altered_headers->getServerValue()); } // When configured APPEND_IF_ABSENT if the server header is absent the server name will be set. @@ -723,7 +723,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderAppendAbsent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders(ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}); - EXPECT_EQ("custom-value", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("custom-value", altered_headers->getServerValue()); } // When configured PASS_THROUGH, the server name will pass through. @@ -735,7 +735,7 @@ TEST_F(HttpConnectionManagerImplTest, ServerHeaderPassthroughPresent) { sendRequestHeadersAndData(); const ResponseHeaderMap* altered_headers = sendResponseHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}, {"server", "foo"}}}); - EXPECT_EQ("foo", altered_headers->Server()->value().getStringView()); + EXPECT_EQ("foo", altered_headers->getServerValue()); } // When configured PASS_THROUGH, the server header will not be added if absent. @@ -775,7 +775,7 @@ TEST_F(HttpConnectionManagerImplTest, InvalidPathWithDualFilter) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("404", headers.Status()->value().getStringView()); + EXPECT_EQ("404", headers.getStatusValue()); EXPECT_EQ("absolute_path_rejected", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); @@ -815,7 +815,7 @@ TEST_F(HttpConnectionManagerImplTest, PathFailedtoSanitize) { EXPECT_CALL(*filter, encodeHeaders(_, true)); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("400", headers.Status()->value().getStringView()); + EXPECT_EQ("400", headers.getStatusValue()); EXPECT_EQ("path_normalization_failed", filter->decoder_callbacks_->streamInfo().responseCodeDetails().value()); })); @@ -843,7 +843,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus { - EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); + EXPECT_EQ(normalized_path, header_map.getPathValue()); return FilterHeadersStatus::StopIteration; })); @@ -889,7 +889,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { - EXPECT_EQ(normalized_path, header_map.Path()->value().getStringView()); + EXPECT_EQ(normalized_path, header_map.getPathValue()); return route; })); EXPECT_CALL(filter_factory_, createFilterChain(_)) @@ -1113,7 +1113,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillRepeatedly(Invoke([&](RequestHeaderMap& header_map, bool) -> FilterHeadersStatus { - EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); + EXPECT_EQ(normalized_host, header_map.getHostValue()); return FilterHeadersStatus::StopIteration; })); @@ -1159,7 +1159,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { EXPECT_CALL(*route_config_provider_.route_config_, route(_, _, _, _)) .WillOnce(Invoke([&](const Router::RouteCallback&, const Http::RequestHeaderMap& header_map, const StreamInfo::StreamInfo&, uint64_t) { - EXPECT_EQ(normalized_host, header_map.Host()->value().getStringView()); + EXPECT_EQ(normalized_host, header_map.getHostValue()); return route; })); EXPECT_CALL(filter_factory_, createFilterChain(_)) @@ -1209,7 +1209,7 @@ TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateSet) { {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); ASSERT_TRUE(modified_headers); ASSERT_TRUE(modified_headers->Date()); - EXPECT_NE(expected_date, modified_headers->Date()->value().getStringView()); + EXPECT_NE(expected_date, modified_headers->getDateValue()); } TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { @@ -1225,7 +1225,7 @@ TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateEnabledDateSet) { {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); ASSERT_TRUE(modified_headers); ASSERT_TRUE(modified_headers->Date()); - EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); + EXPECT_EQ(expected_date, modified_headers->getDateValue()); } TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateFromCache) { @@ -1243,7 +1243,7 @@ TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateFromCache) {":status", "200"}, {"server", "foo"}, {"date", expected_date.c_str()}}}); ASSERT_TRUE(modified_headers); ASSERT_TRUE(modified_headers->Date()); - EXPECT_EQ(expected_date, modified_headers->Date()->value().getStringView()); + EXPECT_EQ(expected_date, modified_headers->getDateValue()); } TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { @@ -1463,7 +1463,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat // Verify decorator operation response header has been defined. EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); + EXPECT_EQ("testOp", headers.getEnvoyDecoratorOperationValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -1679,7 +1679,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.EnvoyDecoratorOperation()); // Verify that decorator operation has been set as request header. - EXPECT_EQ("testOp", headers.EnvoyDecoratorOperation()->value().getStringView()); + EXPECT_EQ("testOp", headers.getEnvoyDecoratorOperationValue()); return FilterHeadersStatus::StopIteration; })); @@ -2191,7 +2191,7 @@ TEST_F(HttpConnectionManagerImplTest, NoPath) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("404", headers.Status()->value().getStringView()); + EXPECT_EQ("404", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2245,7 +2245,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutGlobal) { // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2329,7 +2329,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStreamIdleAccessLog) { // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; @@ -2441,7 +2441,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2515,7 +2515,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterDownstreamHeaders // 408 direct response after timeout. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2567,7 +2567,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterUpstreamHeaders) // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("200", headers.Status()->value().getStringView()); + EXPECT_EQ("200", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -2636,7 +2636,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { // 200 upstream response. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("200", headers.Status()->value().getStringView()); + EXPECT_EQ("200", headers.getStatusValue()); })); std::string response_body; @@ -2704,7 +2704,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408 EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("408", headers.Status()->value().getStringView()); + EXPECT_EQ("408", headers.getStatusValue()); })); EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -2942,7 +2942,7 @@ TEST_F(HttpConnectionManagerImplTest, Http10Rejected) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("426", headers.Status()->value().getStringView()); + EXPECT_EQ("426", headers.getStatusValue()); EXPECT_EQ("close", headers.getConnectionValue()); })); @@ -3070,7 +3070,7 @@ TEST_F(HttpConnectionManagerImplTest, RejectWebSocketOnNonWebSocketRoute) { EXPECT_CALL(encoder, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("403", headers.Status()->value().getStringView()); + EXPECT_EQ("403", headers.getStatusValue()); })); Buffer::OwnedImpl fake_input("1234"); @@ -3211,7 +3211,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectLegacy) { EXPECT_CALL(encoder, encodeHeaders(_, _)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("403", headers.Status()->value().getStringView()); + EXPECT_EQ("403", headers.getStatusValue()); })); // Kick off the incoming data. @@ -3323,7 +3323,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("https", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("https", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); @@ -3383,7 +3383,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("envoy-server-test", headers.Server()->value().getStringView()); + EXPECT_EQ("envoy-server-test", headers.getServerValue()); })); EXPECT_CALL(*decoder_filters_[0], onDestroy()); EXPECT_CALL(filter_callbacks_.connection_, @@ -3460,7 +3460,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { EXPECT_CALL(encoder, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(nullptr, headers.Server()); - EXPECT_EQ("", headers.Server()->value().getStringView()); + EXPECT_EQ("", headers.getServerValue()); })); filter->callbacks_->encodeHeaders(std::move(response_headers), false); @@ -4781,7 +4781,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { // Make sure this is a 500 - EXPECT_EQ("500", headers.Status()->value().getStringView()); + EXPECT_EQ("500", headers.getStatusValue()); // Make sure Envoy standard sanitization has been applied. EXPECT_TRUE(headers.Date() != nullptr); return FilterHeadersStatus::Continue; @@ -4847,7 +4847,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, true)) .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("11", headers.getContentLengthValue()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(*encoder_filters_[0], encodeComplete()); @@ -4888,7 +4888,7 @@ TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Invoke([&](ResponseHeaderMap& headers, bool) -> FilterHeadersStatus { - EXPECT_EQ("11", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("11", headers.getContentLengthValue()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); @@ -5615,7 +5615,7 @@ TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { // 503 direct response when overloaded. EXPECT_CALL(response_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ("503", headers.Status()->value().getStringView()); + EXPECT_EQ("503", headers.getStatusValue()); })); std::string response_body; EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); @@ -6060,7 +6060,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { EXPECT_CALL(*filter, decodeHeaders(_, true)) .WillOnce(Invoke([&](RequestHeaderMap& headers, bool) -> FilterHeadersStatus { EXPECT_NE(nullptr, headers.ForwardedFor()); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); return FilterHeadersStatus::StopIteration; })); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 12b01c207c2c..eca659e2989b 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -298,7 +298,7 @@ TEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) { EXPECT_EQ((MutateRequestRet{"12.12.12.12:0", false}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_EQ("198.51.100.1", headers.ForwardedFor()->value().getStringView()); + EXPECT_EQ("198.51.100.1", headers.getForwardedForValue()); } TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { @@ -312,7 +312,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "https"}}; callMutateRequestHeaders(headers, Protocol::Http2); - EXPECT_EQ("https", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("https", headers.getForwardedProtoValue()); } TEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) { @@ -324,7 +324,7 @@ TEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) { ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address)); callMutateRequestHeaders(headers, Protocol::Http2); - EXPECT_EQ("http", headers.ForwardedProto()->value().getStringView()); + EXPECT_EQ("http", headers.getForwardedProtoValue()); } // Verify internal request and XFF is set when we are using remote address and the address is @@ -1464,7 +1464,7 @@ TEST_F(ConnectionManagerUtilityTest, SanitizePathRelativePAth) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/abc"); + EXPECT_EQ(header_map.getPathValue(), "/abc"); } // maybeNormalizePath() does not touch adjacent slashes by default. @@ -1476,7 +1476,7 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashesDefaultOff) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz///abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz///abc"); } // maybeNormalizePath() merges adjacent slashes. @@ -1488,7 +1488,7 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashes) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz/abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz/abc"); } // maybeNormalizePath() merges adjacent slashes if normalization if off. @@ -1500,7 +1500,7 @@ TEST_F(ConnectionManagerUtilityTest, MergeSlashesWithoutNormalization) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizePath(header_map, config_); - EXPECT_EQ(header_map.Path()->value().getStringView(), "/xyz/../abc"); + EXPECT_EQ(header_map.getPathValue(), "/xyz/../abc"); } // maybeNormalizeHost() removes port part from host header. @@ -1511,7 +1511,7 @@ TEST_F(ConnectionManagerUtilityTest, RemovePort) { TestRequestHeaderMapImpl header_map(original_headers); ConnectionManagerUtility::maybeNormalizeHost(header_map, config_, 443); - EXPECT_EQ(header_map.Host()->value().getStringView(), "host"); + EXPECT_EQ(header_map.getHostValue(), "host"); } // test preserve_external_request_id true does not reset the passed requestId if passed diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 5bb93d8300e3..a3a695941cd5 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -392,7 +392,7 @@ TEST(HeaderMapImplTest, InlineInsert) { EXPECT_FALSE(headers.empty()); EXPECT_EQ(1, headers.size()); EXPECT_EQ(":authority", headers.Host()->key().getStringView()); - EXPECT_EQ("hello", headers.Host()->value().getStringView()); + EXPECT_EQ("hello", headers.getHostValue()); EXPECT_EQ("hello", headers.get(Headers::get().Host)->value().getStringView()); } @@ -402,43 +402,43 @@ TEST(HeaderMapImplTest, InlineAppend) { // Create via header and append. headers.setVia(""); headers.appendVia("1.0 fred", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); } { // Append to via header without explicitly creating first. TestRequestHeaderMapImpl headers; headers.appendVia("1.0 fred", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); } { // Custom delimiter. TestRequestHeaderMapImpl headers; headers.setVia(""); headers.appendVia("1.0 fred", ", "); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendVia("1.1 nowhere.com", ", "); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred, 1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred, 1.1 nowhere.com"); } { // Append and then later set. TestRequestHeaderMapImpl headers; headers.appendVia("1.0 fred", ","); headers.appendVia("1.1 nowhere.com", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 nowhere.com"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 nowhere.com"); headers.setVia("2.0 override"); - EXPECT_EQ(headers.Via()->value().getStringView(), "2.0 override"); + EXPECT_EQ(headers.getViaValue(), "2.0 override"); } { // Set and then append. This mimics how GrpcTimeout is set. TestRequestHeaderMapImpl headers; headers.setGrpcTimeout(42); - EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42"); + EXPECT_EQ(headers.getGrpcTimeoutValue(), "42"); headers.appendGrpcTimeout("s", ""); - EXPECT_EQ(headers.GrpcTimeout()->value().getStringView(), "42s"); + EXPECT_EQ(headers.getGrpcTimeoutValue(), "42s"); } } @@ -450,7 +450,7 @@ TEST(HeaderMapImplTest, MoveIntoInline) { value.setCopy("hello"); headers.addViaMove(std::move(key), std::move(value)); EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello", headers.CacheControl()->value().getStringView()); + EXPECT_EQ("hello", headers.getCacheControlValue()); HeaderString key2; key2.setCopy(Headers::get().CacheControl.get()); @@ -458,7 +458,7 @@ TEST(HeaderMapImplTest, MoveIntoInline) { value2.setCopy("there"); headers.addViaMove(std::move(key2), std::move(value2)); EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello,there", headers.CacheControl()->value().getStringView()); + EXPECT_EQ("hello,there", headers.getCacheControlValue()); } TEST(HeaderMapImplTest, Remove) { @@ -480,7 +480,7 @@ TEST(HeaderMapImplTest, Remove) { // Add and remove by inline. EXPECT_EQ(0UL, headers.removeContentLength()); headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.removeContentLength()); @@ -490,7 +490,7 @@ TEST(HeaderMapImplTest, Remove) { // Add inline and remove by name. headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.remove(Headers::get().ContentLength)); @@ -537,7 +537,7 @@ TEST(HeaderMapImplTest, RemoveRegex) { // Add inline and remove by regex headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); EXPECT_EQ(1UL, headers.removePrefix(LowerCaseString("content"))); @@ -606,21 +606,21 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string bar("bar"); headers.addReference(Headers::get().ContentLength, foo); headers.addReference(Headers::get().ContentLength, bar); - EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,bar", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { TestRequestHeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, "foo"); headers.addReferenceKey(Headers::get().ContentLength, "bar"); - EXPECT_EQ("foo,bar", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,bar", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { TestRequestHeaderMapImpl headers; headers.addReferenceKey(Headers::get().ContentLength, 5); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_EQ("5,6", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5,6", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } { @@ -628,7 +628,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { const std::string foo("foo"); headers.addReference(Headers::get().ContentLength, foo); headers.addReferenceKey(Headers::get().ContentLength, 6); - EXPECT_EQ("foo,6", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("foo,6", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); } } @@ -655,7 +655,7 @@ TEST(HeaderMapImplTest, DoubleInlineSet) { TestRequestHeaderMapImpl headers; headers.setReferenceKey(Headers::get().ContentType, "blah"); headers.setReferenceKey(Headers::get().ContentType, "text/html"); - EXPECT_EQ("text/html", headers.ContentType()->value().getStringView()); + EXPECT_EQ("text/html", headers.getContentTypeValue()); EXPECT_EQ(1UL, headers.size()); } @@ -725,10 +725,10 @@ TEST(HeaderMapImplTest, SetCopy) { EXPECT_EQ(headers.size(), 0); headers.setCopy(Headers::get().Path, "/"); EXPECT_EQ(headers.size(), 1); - EXPECT_EQ(headers.Path()->value().getStringView(), "/"); + EXPECT_EQ(headers.getPathValue(), "/"); headers.setPath("/foo"); EXPECT_EQ(headers.size(), 1); - EXPECT_EQ(headers.Path()->value().getStringView(), "/foo"); + EXPECT_EQ(headers.getPathValue(), "/foo"); } TEST(HeaderMapImplTest, AddCopy) { @@ -790,7 +790,7 @@ TEST(HeaderMapImplTest, AddCopy) { LowerCaseString cache_control("cache-control"); headers.addCopy(cache_control, "max-age=1345"); EXPECT_EQ("max-age=1345", headers.get(cache_control)->value().getStringView()); - EXPECT_EQ("max-age=1345", headers.CacheControl()->value().getStringView()); + EXPECT_EQ("max-age=1345", headers.getCacheControlValue()); headers.addCopy(cache_control, "public"); EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); headers.addCopy(cache_control, ""); @@ -976,19 +976,18 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Append with default delimiter. headers.appendPath(" ", ","); headers.setPath(0); - EXPECT_EQ("0", headers.Path()->value().getStringView()); + EXPECT_EQ("0", headers.getPathValue()); EXPECT_EQ(1U, headers.Path()->value().size()); } // Test append for inline headers using this method and append##name. { TestRequestHeaderMapImpl headers; headers.addCopy(Headers::get().Via, "1.0 fred"); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred"); headers.appendCopy(Headers::get().Via, "1.1 p.example.net"); - EXPECT_EQ(headers.Via()->value().getStringView(), "1.0 fred,1.1 p.example.net"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 p.example.net"); headers.appendVia("1.1 new.example.net", ","); - EXPECT_EQ(headers.Via()->value().getStringView(), - "1.0 fred,1.1 p.example.net,1.1 new.example.net"); + EXPECT_EQ(headers.getViaValue(), "1.0 fred,1.1 p.example.net,1.1 new.example.net"); } } @@ -1273,7 +1272,7 @@ TEST(HeaderMapImplTest, ClearHeaderMap) { // Add inline and clear. headers.setContentLength(5); - EXPECT_EQ("5", headers.ContentLength()->value().getStringView()); + EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); EXPECT_FALSE(headers.empty()); headers.clear(); diff --git a/test/common/http/request_id_extension_uuid_impl_test.cc b/test/common/http/request_id_extension_uuid_impl_test.cc index fb3da43f9786..7efd54948e31 100644 --- a/test/common/http/request_id_extension_uuid_impl_test.cc +++ b/test/common/http/request_id_extension_uuid_impl_test.cc @@ -175,7 +175,7 @@ TEST(UUIDRequestIDExtensionTest, SetTraceStatus) { // Invalid request ID. request_headers.setRequestId(""); uuid_utils.setTraceStatus(request_headers, TraceStatus::Forced); - EXPECT_EQ(request_headers.RequestId()->value().getStringView(), ""); + EXPECT_EQ(request_headers.getRequestIdValue(), ""); } } // namespace Http diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 4a3f118e1260..469ced999962 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -471,12 +471,12 @@ TEST(HttpUtility, SendLocalGrpcReply) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), "200"); + EXPECT_EQ(headers.getStatusValue(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown))); EXPECT_NE(headers.GrpcMessage(), nullptr); - EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), "large"); + EXPECT_EQ(headers.getGrpcMessageValue(), "large"); })); Utility::sendLocalReply( is_reset, callbacks, @@ -498,13 +498,13 @@ TEST(HttpUtility, SendLocalGrpcReplyWithUpstreamJsonPayload) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), "200"); + EXPECT_EQ(headers.getStatusValue(), "200"); EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated))); EXPECT_NE(headers.GrpcMessage(), nullptr); const auto& encoded = Utility::PercentEncoding::encode(json); - EXPECT_EQ(headers.GrpcMessage()->value().getStringView(), encoded); + EXPECT_EQ(headers.getGrpcMessageValue(), encoded); })); Utility::sendLocalReply( is_reset, callbacks, @@ -517,7 +517,7 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable))); })); Utility::sendLocalReply( @@ -527,7 +527,7 @@ TEST(HttpUtility, RateLimitedGrpcStatus) { EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { EXPECT_NE(headers.GrpcStatus(), nullptr); - EXPECT_EQ(headers.GrpcStatus()->value().getStringView(), + EXPECT_EQ(headers.getGrpcStatusValue(), std::to_string(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted))); })); Utility::sendLocalReply( @@ -556,8 +556,7 @@ TEST(HttpUtility, SendLocalReplyHeadRequest) { bool is_reset = false; EXPECT_CALL(callbacks, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.ContentLength()->value().getStringView(), - fmt::format("{}", strlen("large"))); + EXPECT_EQ(headers.getContentLengthValue(), fmt::format("{}", strlen("large"))); })); Utility::sendLocalReply( is_reset, callbacks, @@ -607,8 +606,8 @@ TEST(HttpUtility, TestPrepareHeaders) { Http::RequestMessagePtr message = Utility::prepareHeaders(http_uri); - EXPECT_EQ("/x/y/z", message->headers().Path()->value().getStringView()); - EXPECT_EQ("dns.name", message->headers().Host()->value().getStringView()); + EXPECT_EQ("/x/y/z", message->headers().getPathValue()); + EXPECT_EQ("dns.name", message->headers().getHostValue()); } TEST(HttpUtility, QueryParamsToString) { diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index ff40ce748fe4..36b8e0d43a48 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -225,8 +225,7 @@ class RouterTestBase : public testing::Test { } router_.decodeHeaders(headers, true); - EXPECT_EQ(expected_count, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // When the router filter gets reset we should cancel the pool request. EXPECT_CALL(cancellable_, cancel()); @@ -267,9 +266,7 @@ class RouterTestBase : public testing::Test { EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([expected_count](Http::ResponseHeaderMap& headers, bool) { - EXPECT_EQ( - expected_count, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); })); response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); @@ -723,7 +720,8 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) .WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void { - EXPECT_EQ(headers.get(Http::Headers::get().SetCookie)->value().getStringView(), "foo=baz"); + EXPECT_EQ(std::string{headers.get(Http::Headers::get().SetCookie)->value().getStringView()}, + "foo=baz"); })); expectResponseTimerCreate(); @@ -1037,7 +1035,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // Initial request has 1 attempt. - EXPECT_EQ(1, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -1063,7 +1061,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); // The retry should cause the header to increase to 2. - EXPECT_EQ(2, atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -1209,8 +1207,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool) { // Because a retry happened the number of attempts in the response headers should be 2. - EXPECT_EQ(2, - atoi(std::string(headers.EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); })); response_decoder->decodeHeaders(std::move(response_headers2), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); @@ -4579,8 +4576,8 @@ TEST_F(RouterTest, InternalRedirectRejectedByPredicate) { EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_predicate").value()); // Make sure the original host/path is preserved. - EXPECT_EQ("host", default_request_headers_.Host()->value().getStringView()); - EXPECT_EQ("/", default_request_headers_.Path()->value().getStringView()); + EXPECT_EQ("host", default_request_headers_.getHostValue()); + EXPECT_EQ("/", default_request_headers_.getPathValue()); // Make sure x-envoy-original-url is not set for unsuccessful redirect. EXPECT_EQ(nullptr, default_request_headers_.EnvoyOriginalUrl()); } diff --git a/test/common/router/shadow_writer_impl_test.cc b/test/common/router/shadow_writer_impl_test.cc index d95ae08565b4..7121a9abe027 100644 --- a/test/common/router/shadow_writer_impl_test.cc +++ b/test/common/router/shadow_writer_impl_test.cc @@ -33,7 +33,7 @@ class ShadowWriterImplTest : public testing::Test { [&](Http::RequestMessagePtr& inner_message, Http::AsyncClient::Callbacks& callbacks, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { EXPECT_EQ(message, inner_message); - EXPECT_EQ(shadowed_host, message->headers().Host()->value().getStringView()); + EXPECT_EQ(shadowed_host, message->headers().getHostValue()); callback_ = &callbacks; return &request_; })); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index cb178468647a..9ce644323bfc 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -1049,10 +1049,9 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1128,10 +1127,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1163,10 +1161,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServicePrefixPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1198,10 +1195,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceExactPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1233,10 +1229,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceRegexPatternCheck) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -1276,8 +1271,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValueOnTheHos EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1320,8 +1315,8 @@ TEST_F(HttpHealthCheckerImplTest, EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1354,8 +1349,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -1421,7 +1416,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { EXPECT_EQ(headers.get(header_cool)->value().getStringView(), value_cool); EXPECT_EQ(headers.get(header_awesome)->value().getStringView(), value_awesome); - EXPECT_EQ(headers.UserAgent()->value().getStringView(), value_user_agent); + EXPECT_EQ(headers.getUserAgentValue(), value_user_agent); EXPECT_EQ(headers.get(upstream_metadata)->value().getStringView(), value_upstream_metadata); EXPECT_EQ(headers.get(protocol)->value().getStringView(), value_protocol); @@ -2463,8 +2458,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAltPort) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); })); health_checker_->start(); @@ -2775,10 +2770,9 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, true)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(headers.Host()->value().getStringView(), host); - EXPECT_EQ(headers.Path()->value().getStringView(), path); - EXPECT_EQ(headers.Scheme()->value().getStringView(), - Http::Headers::get().SchemeValues.Http); + EXPECT_EQ(headers.getHostValue(), host); + EXPECT_EQ(headers.getPathValue(), path); + EXPECT_EQ(headers.getSchemeValue(), Http::Headers::get().SchemeValues.Http); })); health_checker_->start(); @@ -3932,14 +3926,11 @@ class GrpcHealthCheckerImplTestBase { EXPECT_CALL(test_sessions_[0]->request_encoder_, encodeHeaders(_, false)) .WillOnce(Invoke([&](const Http::RequestHeaderMap& headers, bool) { - EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - headers.ContentType()->value().getStringView()); - EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), - headers.Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().SchemeValues.Http, - headers.Scheme()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, headers.getContentTypeValue()); + EXPECT_EQ(std::string("/grpc.health.v1.Health/Check"), headers.getPathValue()); + EXPECT_EQ(Http::Headers::get().SchemeValues.Http, headers.getSchemeValue()); EXPECT_NE(nullptr, headers.Method()); - EXPECT_EQ(expected_host, headers.Host()->value().getStringView()); + EXPECT_EQ(expected_host, headers.getHostValue()); EXPECT_EQ(std::chrono::milliseconds(1000).count(), Envoy::Grpc::Common::getGrpcTimeout(headers).count()); })); diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index e66655b92c02..9eff52031ef2 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -70,11 +70,10 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", access_log_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - access_log_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - access_log_request_->headers().ContentType()->value().getStringView()); + access_log_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg; TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg); @@ -152,7 +151,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); ASSERT_TRUE(waitForAccessLogRequest(R"EOF( http_logs: log_entry: @@ -191,7 +190,7 @@ TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); ASSERT_TRUE(waitForAccessLogStream()); ASSERT_TRUE(waitForAccessLogRequest(fmt::format(R"EOF( identifier: diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index a83f27484641..9225508c5dd6 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -80,11 +80,10 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT AssertionResult waitForAccessLogRequest(const std::string& expected_request_msg_yaml) { envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", access_log_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", - access_log_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - access_log_request_->headers().ContentType()->value().getStringView()); + access_log_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); envoy::service::accesslog::v3::StreamAccessLogsMessage expected_request_msg; TestUtility::loadFromYaml(expected_request_msg_yaml, expected_request_msg); diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index bb1b62683ea2..9303792a4db8 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -196,7 +196,7 @@ TEST_P(AggregateIntegrationTest, ClusterUpDownUp) { IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/aggregatecluster", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); codec_client_->waitForDisconnect(); @@ -288,7 +288,7 @@ TEST_P(AggregateIntegrationTest, PreviousPrioritiesRetryPredicate) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } diff --git a/test/extensions/common/aws/signer_impl_test.cc b/test/extensions/common/aws/signer_impl_test.cc index 31ed9f7cbd9d..2b4681ca15c9 100644 --- a/test/extensions/common/aws/signer_impl_test.cc +++ b/test/extensions/common/aws/signer_impl_test.cc @@ -86,7 +86,7 @@ TEST_F(SignerImplTest, SignDateHeader) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=x-amz-content-sha256;x-amz-date, " "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify we sign the security token header if the token is present in the credentials @@ -101,7 +101,7 @@ TEST_F(SignerImplTest, SignSecurityTokenHeader) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify we sign the content header as the hashed empty string if the body is empty @@ -116,7 +116,7 @@ TEST_F(SignerImplTest, SignEmptyContentHeader) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=x-amz-content-sha256;x-amz-date, " "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify we sign the content header correctly when we have a body @@ -132,7 +132,7 @@ TEST_F(SignerImplTest, SignContentHeader) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=x-amz-content-sha256;x-amz-date, " "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify we sign some extra headers @@ -147,7 +147,7 @@ TEST_F(SignerImplTest, SignExtraHeaders) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify signing a host header @@ -160,7 +160,7 @@ TEST_F(SignerImplTest, SignHostHeader) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - message_->headers().Authorization()->value().getStringView()); + message_->headers().getAuthorizationValue()); } // Verify signing headers for S3 @@ -179,7 +179,7 @@ TEST_F(SignerImplTest, SignHeadersS3) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/s3/aws4_request, " "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature=d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", - headers.Authorization()->value().getStringView()); + headers.getAuthorizationValue()); EXPECT_EQ(SignatureConstants::get().UnsignedPayload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); } @@ -200,7 +200,7 @@ TEST_F(SignerImplTest, SignHeadersNonS3) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - headers.Authorization()->value().getStringView()); + headers.getAuthorizationValue()); EXPECT_EQ(SignatureConstants::get().HashedEmptyString, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); } diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 9b075d3bbf29..fbb6f7a7bcfe 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -201,9 +201,7 @@ TEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) { TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithPathRewrite) { Http::RequestMessagePtr message_ptr = sendRequest({{":path", "/foo"}, {"foo", "bar"}}); - const auto* path = message_ptr->headers().get(Http::Headers::get().Path); - ASSERT_NE(path, nullptr); - EXPECT_EQ(path->value().getStringView(), "/bar/foo"); + EXPECT_EQ(message_ptr->headers().getPathValue(), "/bar/foo"); } // Test the client when a request contains Content-Length greater than 0. @@ -212,13 +210,8 @@ TEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZero) { sendRequest({{Http::Headers::get().ContentLength.get(), std::string{"47"}}, {Http::Headers::get().Method.get(), std::string{"POST"}}}); - const auto* content_length = message_ptr->headers().get(Http::Headers::get().ContentLength); - ASSERT_NE(content_length, nullptr); - EXPECT_EQ(content_length->value().getStringView(), "0"); - - const auto* method = message_ptr->headers().get(Http::Headers::get().Method); - ASSERT_NE(method, nullptr); - EXPECT_EQ(method->value().getStringView(), "POST"); + EXPECT_EQ(message_ptr->headers().getContentLengthValue(), "0"); + EXPECT_EQ(message_ptr->headers().getMethodValue(), "POST"); } // Test the client when a request contains Content-Length greater than 0. @@ -244,13 +237,8 @@ TEST_F(ExtAuthzHttpClientTest, ContentLengthEqualZeroWithAllowedHeaders) { sendRequest({{Http::Headers::get().ContentLength.get(), std::string{"47"}}, {Http::Headers::get().Method.get(), std::string{"POST"}}}); - const auto* content_length = message_ptr->headers().get(Http::Headers::get().ContentLength); - ASSERT_NE(content_length, nullptr); - EXPECT_EQ(content_length->value().getStringView(), "0"); - - const auto* method = message_ptr->headers().get(Http::Headers::get().Method); - ASSERT_NE(method, nullptr); - EXPECT_EQ(method->value().getStringView(), "POST"); + EXPECT_EQ(message_ptr->headers().getContentLengthValue(), "0"); + EXPECT_EQ(message_ptr->headers().getMethodValue(), "POST"); } // Test the client when a request contains headers in the prefix matchers. diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h index c367b3326319..a4f6d35b3dd2 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h @@ -67,11 +67,11 @@ class AdaptiveConcurrencyIntegrationTest void respondToRequest(bool expect_forwarded); void verifyResponseForwarded(IntegrationStreamDecoderPtr response) { - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void verifyResponseBlocked(IntegrationStreamDecoderPtr response) { - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } std::deque responses_; diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index 3d2a61a50d43..0144f67269c4 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -235,11 +235,11 @@ TEST_F(AwsLambdaFilterTest, DecodeHeadersOnlyRequestWithJsonOn) { ASSERT_GT(json_buf.length(), 0); ASSERT_NE(headers.ContentType(), nullptr); - EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + EXPECT_EQ("application/json", headers.getContentTypeValue()); // Assert the true (post-transformation) content-length sent to the Lambda endpoint. ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(fmt::format("{}", json_buf.length()), headers.ContentLength()->value().getStringView()); + EXPECT_EQ(fmt::format("{}", json_buf.length()), headers.getContentLengthValue()); // The best way to verify the generated JSON is to deserialize it and inspect it. Request req; @@ -298,12 +298,11 @@ TEST_F(AwsLambdaFilterTest, DecodeDataWithTextualBodyWithJsonOn) { ASSERT_GT(decoded_buf.length(), 0); ASSERT_NE(headers.ContentType(), nullptr); - EXPECT_EQ("application/json", headers.ContentType()->value().getStringView()); + EXPECT_EQ("application/json", headers.getContentTypeValue()); // Assert the true (post-transformation) content-length sent to the Lambda endpoint. ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(fmt::format("{}", decoded_buf.length()), - headers.ContentLength()->value().getStringView()); + EXPECT_EQ(fmt::format("{}", decoded_buf.length()), headers.getContentLengthValue()); // The best way to verify the generated JSON is to deserialize it and inspect it. Request req; @@ -519,7 +518,7 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { EXPECT_EQ(Http::FilterDataStatus::Continue, result); ASSERT_NE(nullptr, headers.Status()); - EXPECT_EQ("201", headers.Status()->value().getStringView()); + EXPECT_EQ("201", headers.getStatusValue()); EXPECT_EQ(nullptr, headers.get(Http::LowerCaseString(":other"))); @@ -621,7 +620,7 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeInvalidJson) { EXPECT_EQ(0, encoded_buf.length()); ASSERT_NE(nullptr, headers.Status()); - EXPECT_EQ("500", headers.Status()->value().getStringView()); + EXPECT_EQ("500", headers.getStatusValue()); EXPECT_EQ(1ul, filter_->stats().server_error_.value()); } diff --git a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc index f6225ec8ee41..e53ec917318a 100644 --- a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc +++ b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc @@ -56,7 +56,7 @@ TEST_F(AwsRequestSigningFilterTest, SignWithHostRewrite) { Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); - EXPECT_EQ("foo", headers.Host()->value().getStringView()); + EXPECT_EQ("foo", headers.getHostValue()); EXPECT_EQ(1UL, filter_config_->stats_.signing_added_.value()); } diff --git a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc index dfde16d77699..e899205bd6b9 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc @@ -63,7 +63,7 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLength) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { @@ -92,7 +92,7 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { @@ -112,7 +112,7 @@ TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); } ConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config) { @@ -154,7 +154,7 @@ TEST_P(BufferIntegrationTest, RouteDisabled) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(BufferIntegrationTest, RouteOverride) { @@ -180,7 +180,7 @@ TEST_P(BufferIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace diff --git a/test/extensions/filters/http/buffer/buffer_filter_test.cc b/test/extensions/filters/http/buffer/buffer_filter_test.cc index 11f85e138f76..34ce1e2211b6 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_test.cc @@ -106,8 +106,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulation) { Buffer::OwnedImpl data2(" world"); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data2, true)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "11"); + EXPECT_EQ(headers.getContentLengthValue(), "11"); } TEST_F(BufferFilterTest, ContentLengthPopulationInTrailers) { @@ -122,8 +121,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulationInTrailers) { Http::TestRequestTrailerMapImpl trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "5"); + EXPECT_EQ(headers.getContentLengthValue(), "5"); } TEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) { @@ -134,8 +132,7 @@ TEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) { Buffer::OwnedImpl data("foo"); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, true)); - ASSERT_NE(headers.ContentLength(), nullptr); - EXPECT_EQ(headers.ContentLength()->value().getStringView(), "3"); + EXPECT_EQ(headers.getContentLengthValue(), "3"); } TEST_F(BufferFilterTest, RouteConfigOverride) { diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index 82e7c1cd31f7..33b976857c55 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -35,13 +35,11 @@ class CompressorIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().ContentEncoding()->value().getStringView()); - ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); + response->headers().getContentEncodingValue()); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, - response->headers().TransferEncoding()->value().getStringView()); + response->headers().getTransferEncodingValue()); Buffer::OwnedImpl decompressed_response{}; const Buffer::OwnedImpl compressed_response{response->body()}; @@ -59,7 +57,7 @@ class CompressorIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); @@ -184,8 +182,8 @@ TEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("br", response->headers().ContentEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("br", response->headers().getContentEncodingValue()); EXPECT_EQ(128U, response->body().size()); } @@ -208,7 +206,7 @@ TEST_P(CompressorIntegrationTest, NotEnoughContentLength) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(10U, response->body().size()); } @@ -231,7 +229,7 @@ TEST_P(CompressorIntegrationTest, EmptyResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("204", response->headers().Status()->value().getStringView()); + EXPECT_EQ("204", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(0U, response->body().size()); } @@ -286,9 +284,9 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("chunked", response->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } /** @@ -310,8 +308,8 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVeryHeader) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("Cookie, Accept-Encoding", response->headers().Vary()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ("Cookie, Accept-Encoding", response->headers().getVaryValue()); } } // namespace Envoy diff --git a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc index 0b4b81ec2fa5..91b43cad095c 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc @@ -89,7 +89,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) { }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { @@ -103,7 +103,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { @@ -117,7 +117,7 @@ TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { @@ -131,7 +131,7 @@ TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { @@ -145,7 +145,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { @@ -159,7 +159,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { @@ -173,7 +173,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { @@ -185,7 +185,7 @@ TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { {"host", "test-origin"}}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { @@ -194,7 +194,7 @@ TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, {"host", "test-origin"}}}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { @@ -208,7 +208,7 @@ TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { }}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(response->headers().getStatusValue(), "200"); } TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { @@ -222,7 +222,7 @@ TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { }}; const auto& response = sendRequest(headers); EXPECT_TRUE(response->complete()); - EXPECT_EQ(response->headers().Status()->value().getStringView(), "403"); + EXPECT_EQ(response->headers().getStatusValue(), "403"); } } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index dc607359ca09..bbe4a2a95fb8 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -214,7 +214,7 @@ TEST_P(ProxyFilterIntegrationTest, DNSCacheHostOverflow) { {":authority", fmt::format("localhost2", fake_upstreams_[0]->localAddress()->ip()->port())}}; response = codec_client_->makeHeaderOnlyRequest(request_headers2); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_overflow")->value()); } @@ -289,7 +289,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) { auto response = codec_client_->makeHeaderOnlyRequest(request_headers); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ssl.fail_verify_san")->value()); } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index e24056c15957..a6e00760db52 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -71,11 +71,10 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, result = ext_authz_request_->waitForGrpcMessage(*dispatcher_, check_request); RELEASE_ASSERT(result, result.message()); - EXPECT_EQ("POST", ext_authz_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.auth.v2.Authorization/Check", - ext_authz_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - ext_authz_request_->headers().ContentType()->value().getStringView()); + ext_authz_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); envoy::service::auth::v3::CheckRequest expected_check_request; TestUtility::loadFromYaml(expected_check_request_yaml, expected_check_request); @@ -117,7 +116,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index c72148f0f2cf..2d9eb7a87a12 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -189,8 +189,7 @@ TEST_F(HttpFilterTest, ErrorFailClose) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), - std::to_string(enumToInt(Http::Code::Forbidden))); + EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::Forbidden))); })); Filters::Common::ExtAuthz::Response response{}; @@ -228,7 +227,7 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([&](const Http::ResponseHeaderMap& headers, bool) -> void { - EXPECT_EQ(headers.Status()->value().getStringView(), + EXPECT_EQ(headers.getStatusValue(), std::to_string(enumToInt(Http::Code::ServiceUnavailable))); })); diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 20acafd6f472..e820cb04f273 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -618,7 +618,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryError) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, false)) .WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool end_stream) { - EXPECT_EQ("400", headers.Status()->value().getStringView()); + EXPECT_EQ("400", headers.getStatusValue()); EXPECT_FALSE(end_stream); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index e34e8ac047e9..608954031f71 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -56,7 +56,7 @@ TEST_P(GrpcWebFilterIntegrationTest, GRPCWebTrailersNotDuplicated) { EXPECT_THAT(*upstream_request_->trailers(), HeaderMapEqualRef(&request_trailers)); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_TRUE(absl::StrContains(response->body(), "response1:trailer1")); EXPECT_TRUE(absl::StrContains(response->body(), "response2:trailer2")); // Expect that the trailers be in the response-body instead diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 817d84811061..65c60882f2d8 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -87,7 +87,7 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().getStringView(), &code)); + ASSERT_TRUE(absl::SimpleAtoi(headers.getStatusValue(), &code)); EXPECT_EQ(static_cast(expected_code), code); })); EXPECT_CALL(decoder_callbacks_, encodeData(_, _)) @@ -96,14 +96,12 @@ class GrpcWebFilterTest : public testing::TestWithParamvalue().getStringView()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue()); // Ensure we never send content-length upstream EXPECT_EQ(nullptr, request_headers.ContentLength()); - EXPECT_EQ(Http::Headers::get().TEValues.Trailers, - request_headers.TE()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().TEValues.Trailers, request_headers.getTEValue()); EXPECT_EQ(Http::Headers::get().GrpcAcceptEncodingValues.Default, - request_headers.GrpcAcceptEncoding()->value().getStringView()); + request_headers.getGrpcAcceptEncodingValue()); } Stats::TestSymbolTable symbol_table_; @@ -129,8 +127,7 @@ TEST_F(GrpcWebFilterTest, SupportedContentTypes) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); Http::MetadataMap metadata_map{{"metadata", "metadata"}}; EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_.decodeMetadata(metadata_map)); - EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - request_headers.ContentType()->value().getStringView()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, request_headers.getContentTypeValue()); } } @@ -318,10 +315,10 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ("200", response_headers.get_(Http::Headers::get().Status.get())); if (accept_binary_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebProto, - response_headers.ContentType()->value().getStringView()); + response_headers.getContentTypeValue()); } else if (accept_text_response()) { EXPECT_EQ(Http::Headers::get().ContentTypeValues.GrpcWebTextProto, - response_headers.ContentType()->value().getStringView()); + response_headers.getContentTypeValue()); } else { FAIL() << "Unsupported gRPC-Web request accept: " << request_accept(); } @@ -352,7 +349,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(B64_MESSAGE, B64_MESSAGE_SIZE), encoded_buffer.toString()); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().getStringView(); + << response_headers.getContentTypeValue(); } // Tests response trailers. @@ -369,7 +366,7 @@ TEST_P(GrpcWebFilterTest, Unary) { EXPECT_EQ(std::string(TRAILERS, TRAILERS_SIZE), Base64::decode(trailers_buffer.toString())); } else { FAIL() << "Unsupported gRPC-Web response content-type: " - << response_headers.ContentType()->value().getStringView(); + << response_headers.getContentTypeValue(); } EXPECT_EQ(0, response_trailers.size()); } diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index eb3c7782925f..6e41050e3477 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -35,13 +35,13 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().ContentEncoding()->value().getStringView()); + response->headers().getContentEncodingValue()); ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, - response->headers().TransferEncoding()->value().getStringView()); + response->headers().getTransferEncodingValue()); Buffer::OwnedImpl decompressed_response{}; const Buffer::OwnedImpl compressed_response{response->body()}; @@ -59,7 +59,7 @@ class GzipIntegrationTest : public testing::TestWithParamcomplete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); @@ -203,8 +203,8 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(UpstreamResponseAlreadyEncod EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("br", response->headers().ContentEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("br", response->headers().getContentEncodingValue()); EXPECT_EQ(128U, response->body().size()); } @@ -227,7 +227,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(10U, response->body().size()); } @@ -250,7 +250,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("204", response->headers().Status()->value().getStringView()); + EXPECT_EQ("204", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); EXPECT_EQ(0U, response->body().size()); } @@ -305,9 +305,9 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedR EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("chunked", response->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } /** @@ -329,8 +329,8 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVeryHead EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - ASSERT_EQ("gzip", response->headers().ContentEncoding()->value().getStringView()); - ASSERT_EQ("Cookie, Accept-Encoding", response->headers().Vary()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ("Cookie, Accept-Encoding", response->headers().getVaryValue()); } } // namespace Envoy diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index 54f5306cdb47..6b1063bf5c82 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -115,7 +115,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, NotHcRequest) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(body, false)); Http::TestResponseTrailerMapImpl response_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); - EXPECT_EQ("true", service_response.EnvoyImmediateHealthCheckFail()->value().getStringView()); + EXPECT_EQ("true", service_response.getEnvoyImmediateHealthCheckFailValue()); } TEST_F(HealthCheckFilterNoPassThroughTest, ComputedHealth) { @@ -227,8 +227,7 @@ TEST_F(HealthCheckFilterNoPassThroughTest, HealthCheckFailedCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); EXPECT_EQ(nullptr, headers.EnvoyImmediateHealthCheckFail()); })); @@ -252,8 +251,7 @@ TEST_F(HealthCheckFilterPassThroughTest, Ok) { Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", service_hc_respnose.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { @@ -272,8 +270,7 @@ TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map)); Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", - service_hc_respnose.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", service_hc_respnose.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, Failed) { @@ -302,8 +299,7 @@ TEST_F(HealthCheckFilterCachingTest, CachedServiceUnavailableCallbackCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_CALL(callbacks_.stream_info_, @@ -324,8 +320,7 @@ TEST_F(HealthCheckFilterCachingTest, CachedOkCallbackNotCalled) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -353,8 +348,7 @@ TEST_F(HealthCheckFilterCachingTest, All) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); @@ -388,8 +382,7 @@ TEST_F(HealthCheckFilterCachingTest, DegradedHeader) { .Times(1) .WillRepeatedly(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) { filter_->encodeHeaders(headers, end_stream); - EXPECT_EQ("cluster_name", - headers.EnvoyUpstreamHealthCheckedCluster()->value().getStringView()); + EXPECT_EQ("cluster_name", headers.getEnvoyUpstreamHealthCheckedClusterValue()); })); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index bb40a649bc88..b4292d937ec2 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -118,7 +118,7 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // With local Jwks, this test verifies a request is rejected with an expired Jwt token. @@ -138,7 +138,7 @@ TEST_P(LocalJwksIntegrationTest, ExpiredToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); } TEST_P(LocalJwksIntegrationTest, MissingToken) { @@ -156,7 +156,7 @@ TEST_P(LocalJwksIntegrationTest, MissingToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); } TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { @@ -175,8 +175,8 @@ TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); - EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); + EXPECT_NE("0", response->headers().getContentLengthValue()); EXPECT_THAT(response->body(), ::testing::IsEmpty()); } @@ -199,7 +199,7 @@ TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test verifies a CORS preflight request without JWT token is allowed. @@ -222,7 +222,7 @@ TEST_P(LocalJwksIntegrationTest, CorsPreflight) { upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test verifies JwtRequirement specified from filer state rules @@ -299,7 +299,7 @@ TEST_P(LocalJwksIntegrationTest, FilterStateRequirement) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(test.expected_status, response->headers().Status()->value().getStringView()); + EXPECT_EQ(test.expected_status, response->headers().getStatusValue()); } } @@ -398,7 +398,7 @@ TEST_P(RemoteJwksIntegrationTest, WithGoodToken) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanup(); } @@ -423,7 +423,7 @@ TEST_P(RemoteJwksIntegrationTest, FetchFailedJwks) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); cleanup(); } @@ -443,7 +443,7 @@ TEST_P(RemoteJwksIntegrationTest, FetchFailedMissingCluster) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("401", response->headers().Status()->value().getStringView()); + EXPECT_EQ("401", response->headers().getStatusValue()); cleanup(); } diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 082f6217e571..cfc4b59ec4a2 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -353,7 +353,7 @@ name: lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); EXPECT_EQ("nope", response->body()); } @@ -404,7 +404,7 @@ name: envoy.filters.http.lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Filter alters headers and changes route. @@ -437,7 +437,7 @@ name: lua cleanup(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Should survive from 30 calls when calling streamInfo():dynamicMetadata(). This is a regression @@ -471,7 +471,7 @@ name: lua response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } cleanup(); @@ -571,7 +571,7 @@ name: lua response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanup(); } diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 76ff3a39819b..38989dcbc7d7 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -85,11 +85,10 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); result = ratelimit_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); - EXPECT_EQ("POST", ratelimit_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("POST", ratelimit_request_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", - ratelimit_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - ratelimit_request_->headers().ContentType()->value().getStringView()); + ratelimit_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", ratelimit_request_->headers().getContentTypeValue()); envoy::service::ratelimit::v3::RateLimitRequest expected_request_msg; expected_request_msg.set_domain("some_domain"); @@ -116,15 +115,14 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, EXPECT_EQ(request_size_, upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } void waitForFailedUpstreamResponse(uint32_t response_code) { response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); - EXPECT_EQ(std::to_string(response_code), - response_->headers().Status()->value().getStringView()); + EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue()); } void sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::Code code, diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index a7b8f8915159..28e4420db014 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -90,7 +90,7 @@ TEST_P(RBACIntegrationTest, Allowed) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, Denied) { @@ -110,7 +110,7 @@ TEST_P(RBACIntegrationTest, Denied) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { @@ -136,7 +136,7 @@ TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { @@ -160,7 +160,7 @@ TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, DeniedHeadReply) { @@ -180,9 +180,9 @@ TEST_P(RBACIntegrationTest, DeniedHeadReply) { 1024); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("403", response->headers().Status()->value().getStringView()); + EXPECT_EQ("403", response->headers().getStatusValue()); ASSERT_TRUE(response->headers().ContentLength()); - EXPECT_NE("0", response->headers().ContentLength()->value().getStringView()); + EXPECT_NE("0", response->headers().getContentLengthValue()); EXPECT_THAT(response->body(), ::testing::IsEmpty()); } @@ -220,7 +220,7 @@ TEST_P(RBACIntegrationTest, RouteOverride) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { @@ -246,7 +246,7 @@ TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -273,7 +273,7 @@ TEST_P(RBACIntegrationTest, PathIgnoreCase) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } diff --git a/test/extensions/filters/http/squash/squash_filter_integration_test.cc b/test/extensions/filters/http/squash/squash_filter_integration_test.cc index 79cf9a91c23c..fa8dd4ec1a75 100644 --- a/test/extensions/filters/http/squash/squash_filter_integration_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_integration_test.cc @@ -133,8 +133,8 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { response->waitForEndStream(); - EXPECT_EQ("POST", create_stream->headers().Method()->value().getStringView()); - EXPECT_EQ("/api/v2/debugattachment/", create_stream->headers().Path()->value().getStringView()); + EXPECT_EQ("POST", create_stream->headers().getMethodValue()); + EXPECT_EQ("/api/v2/debugattachment/", create_stream->headers().getPathValue()); // Make sure the env var was replaced ProtobufWkt::Struct actualbody; TestUtility::loadFromJson(create_stream->body().toString(), actualbody); @@ -146,11 +146,10 @@ TEST_P(SquashFilterIntegrationTest, TestHappyPath) { EXPECT_TRUE(MessageDifferencer::Equals(expectedbody, actualbody)); // The second request should be for the created object - EXPECT_EQ("GET", get_stream->headers().Method()->value().getStringView()); - EXPECT_EQ("/api/v2/debugattachment/oF8iVdiJs5", - get_stream->headers().Path()->value().getStringView()); + EXPECT_EQ("GET", get_stream->headers().getMethodValue()); + EXPECT_EQ("/api/v2/debugattachment/oF8iVdiJs5", get_stream->headers().getPathValue()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { @@ -164,7 +163,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { @@ -180,7 +179,7 @@ TEST_P(SquashFilterIntegrationTest, TimeoutAttaching) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { @@ -191,7 +190,7 @@ TEST_P(SquashFilterIntegrationTest, ErrorNoSquashServer) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { @@ -203,7 +202,7 @@ TEST_P(SquashFilterIntegrationTest, BadCreateResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(SquashFilterIntegrationTest, BadGetResponse) { @@ -217,7 +216,7 @@ TEST_P(SquashFilterIntegrationTest, BadGetResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index 61d01e68899f..ff4f315a3236 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -87,7 +87,7 @@ class TapIntegrationTest : public testing::TestWithParammakeRequestWithBody(admin_request_headers, admin_request_yaml); admin_response_->waitForHeaders(); - EXPECT_EQ("200", admin_response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", admin_response_->headers().getStatusValue()); EXPECT_FALSE(admin_response_->complete()); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index 39cf43207efb..f489a983b43e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -188,7 +188,7 @@ TEST_P(EnvoyQuicClientSessionTest, NewStream) { // Response headers should be propagated to decoder. EXPECT_CALL(response_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ("200", decoded_headers->Status()->value().getStringView()); + EXPECT_EQ("200", decoded_headers->getStatusValue()); })); stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 5aa201c4a1f8..99a2931d933d 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -116,7 +116,7 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { - EXPECT_EQ("200", headers->Status()->value().getStringView()); + EXPECT_EQ("200", headers->getStatusValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), response_headers_); @@ -163,7 +163,7 @@ TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { quic_stream_->encodeHeaders(request_headers_, true); EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { - EXPECT_EQ("200", headers->Status()->value().getStringView()); + EXPECT_EQ("200", headers->getStatusValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), response_headers_); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 52727dc8dcbb..188603a527e3 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -237,10 +237,9 @@ TEST_P(EnvoyQuicServerSessionTest, NewStream) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } @@ -427,10 +426,9 @@ TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -522,10 +520,9 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -817,10 +814,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { // Request headers should be propagated to decoder. EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -850,10 +846,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id + 4)); EXPECT_CALL(request_decoder2, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream2->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -882,10 +877,9 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id + 8)); EXPECT_CALL(request_decoder3, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->Host()->value().getStringView()); - EXPECT_EQ("/", decoded_headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - decoded_headers->Method()->value().getStringView()); + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); })); stream3->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 735f2690d031..ab792b1e7024 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -101,10 +101,9 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { size_t sendRequest(const std::string& payload, bool fin, size_t decoder_buffer_high_watermark) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); @@ -161,10 +160,9 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/true)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), request_headers); @@ -198,10 +196,9 @@ TEST_P(EnvoyQuicServerStreamTest, OutOfOrderTrailers) { } EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); @@ -271,10 +268,9 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->Host()->value().getStringView()); - EXPECT_EQ("/", headers->Path()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, - headers->Method()->value().getStringView()); + EXPECT_EQ(host_, headers->getHostValue()); + EXPECT_EQ("/", headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); })); quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), request_headers_); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc index de9883880c2e..cd5004c39c2d 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc @@ -45,16 +45,14 @@ TEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) { TEST(EnvoyQuicUtilsTest, HeadersConversion) { spdy::SpdyHeaderBlock headers_block; - headers_block[":host"] = "www.google.com"; + headers_block[":authority"] = "www.google.com"; headers_block[":path"] = "/index.hml"; headers_block[":scheme"] = "https"; auto envoy_headers = spdyHeaderBlockToEnvoyHeaders(headers_block); EXPECT_EQ(headers_block.size(), envoy_headers->size()); - EXPECT_EQ("www.google.com", - envoy_headers->get(Http::LowerCaseString(":host"))->value().getStringView()); - EXPECT_EQ("/index.hml", - envoy_headers->get(Http::LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("https", envoy_headers->get(Http::LowerCaseString(":scheme"))->value().getStringView()); + EXPECT_EQ("www.google.com", envoy_headers->getHostValue()); + EXPECT_EQ("/index.hml", envoy_headers->getPathValue()); + EXPECT_EQ("https", envoy_headers->getSchemeValue()); quic::QuicHeaderList quic_headers = quic::test::AsHeaderList(headers_block); auto envoy_headers2 = quicHeadersToEnvoyHeaders(quic_headers); diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index a14e7cc9650e..7c701372ff21 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -432,12 +432,12 @@ TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { upstream_request_->encodeData(10, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); // New request should be rejected. auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); response2->waitForEndStream(); - EXPECT_EQ("503", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response2->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response2->body()); codec_client_->close(); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index f8527b34e099..b691c61bc39e 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -532,7 +532,7 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { EXPECT_EQ(response_headers.AccessControlAllowOrigin()->value(), "*"); std::string access_control_allow_headers = - std::string(response_headers.AccessControlAllowHeaders()->value().getStringView()); + std::string(response_headers.getAccessControlAllowHeadersValue()); EXPECT_THAT(access_control_allow_headers, HasSubstr("Accept")); } diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 5539ece5fdf8..55e8baafcfae 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -79,11 +79,10 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes while (!(known_counter_exists && known_gauge_exists && known_histogram_exists)) { envoy::service::metrics::v3::StreamMetricsMessage request_msg; VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg)); - EXPECT_EQ("POST", metrics_service_request_->headers().Method()->value().getStringView()); + EXPECT_EQ("POST", metrics_service_request_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.metrics.v2.MetricsService/StreamMetrics", - metrics_service_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - metrics_service_request_->headers().ContentType()->value().getStringView()); + metrics_service_request_->headers().getPathValue()); + EXPECT_EQ("application/grpc", metrics_service_request_->headers().getContentTypeValue()); EXPECT_TRUE(request_msg.envoy_metrics_size() > 0); const Protobuf::RepeatedPtrField<::io::prometheus::client::MetricFamily>& envoy_metrics = request_msg.envoy_metrics(); diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index d8fec3cbadbb..5043808dd78b 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -154,9 +154,8 @@ TEST_F(DatadogDriverTest, FlushSpansTimer) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/msgpack", - message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/msgpack", message->headers().getContentTypeValue()); return &request; })); diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 48c50dd51bbf..8b72f2cffaec 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -245,10 +245,9 @@ TEST_F(LightStepDriverTest, FlushSeveralSpans) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -416,10 +415,9 @@ TEST_F(LightStepDriverTest, FlushOneFailure) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -464,10 +462,9 @@ TEST_F(LightStepDriverTest, FlushWithActiveReport) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -510,10 +507,9 @@ TEST_F(LightStepDriverTest, OnFullWithActiveReport) { callback = &callbacks; EXPECT_EQ("/lightstep.collector.CollectorService/Report", - message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/grpc", - message->headers().ContentType()->value().getStringView()); + message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/grpc", message->headers().getContentTypeValue()); return &request; })); @@ -628,7 +624,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { {Tracing::Reason::Sampling, true}); EXPECT_EQ(1U, stats_.counter("tracing.opentracing.span_context_extraction_error").value()); - std::string injected_ctx(request_headers_.OtSpanContext()->value().getStringView()); + std::string injected_ctx(request_headers_.getOtSpanContextValue()); EXPECT_FALSE(injected_ctx.empty()); // Supply empty context. @@ -639,7 +635,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); span->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); + injected_ctx = std::string(request_headers_.getOtSpanContextValue()); EXPECT_FALSE(injected_ctx.empty()); // Context can be parsed fine. @@ -653,7 +649,7 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); request_headers_.removeOtSpanContext(); span_with_parent->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.OtSpanContext()->value().getStringView()); + injected_ctx = std::string(request_headers_.getOtSpanContextValue()); EXPECT_FALSE(injected_ctx.empty()); } } @@ -713,10 +709,8 @@ TEST_F(LightStepDriverTest, SpawnChild) { childViaHeaders->injectContext(base1); childViaSpawn->injectContext(base2); - std::string base1_context = - Base64::decode(std::string(base1.OtSpanContext()->value().getStringView())); - std::string base2_context = - Base64::decode(std::string(base2.OtSpanContext()->value().getStringView())); + std::string base1_context = Base64::decode(std::string(base1.getOtSpanContextValue())); + std::string base2_context = Base64::decode(std::string(base2.getOtSpanContextValue())); EXPECT_FALSE(base1_context.empty()); EXPECT_FALSE(base2_context.empty()); diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 96078b70c898..7950d95198aa 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -90,9 +90,9 @@ class ZipkinDriverTest : public testing::Test { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ(content_type, message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ(content_type, message->headers().getContentTypeValue()); return &request; })); @@ -235,10 +235,9 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("/api/v1/spans", message->headers().Path()->value().getStringView()); - EXPECT_EQ("fake_cluster", message->headers().Host()->value().getStringView()); - EXPECT_EQ("application/json", - message->headers().ContentType()->value().getStringView()); + EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("fake_cluster", message->headers().getHostValue()); + EXPECT_EQ("application/json", message->headers().getContentTypeValue()); return &request; })); diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index bd755736ef45..aebffa4b2c35 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -174,7 +174,7 @@ TEST_P(SslIntegrationTest, AdminCertEndpoint) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/certs", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Validate certificate selection across different certificate types and client TLS versions. @@ -444,7 +444,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(256, response->body().size()); checkStats(); envoy::config::core::v3::Address expected_local_address; @@ -482,7 +482,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(128, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(256, response->body().size()); checkStats(); codec_client_->close(); diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index d7681cfd4e98..7dcfffc6cf11 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -75,7 +75,7 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, RELEASE_ASSERT(result, result.message()); result = xds_stream_->waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - endpoint_ = std::string(xds_stream_->headers().Path()->value().getStringView()); + endpoint_ = std::string(xds_stream_->headers().getPathValue()); ENVOY_LOG_MISC(debug, "xDS endpoint {}", endpoint_); } } diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 851a6ed9f3a3..b9e60886086c 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -160,7 +160,7 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); codec_client_->waitForDisconnect(); diff --git a/test/integration/eds_integration_test.cc b/test/integration/eds_integration_test.cc index b9f51205fc7d..16a694eadafc 100644 --- a/test/integration/eds_integration_test.cc +++ b/test/integration/eds_integration_test.cc @@ -390,7 +390,7 @@ TEST_P(EdsIntegrationTest, StatsReadyFilter) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("500", response->headers().getStatusValue()); EXPECT_EQ("EDS not ready", response->body()); cleanupUpstreamAndDownstream(); @@ -401,7 +401,7 @@ TEST_P(EdsIntegrationTest, StatsReadyFilter) { response = IntegrationUtil::makeSingleRequest(lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("EDS is ready", response->body()); cleanupUpstreamAndDownstream(); diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index 5a9dbbc52366..fe6069e40095 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -603,7 +603,7 @@ TEST_P(InjectDataWithHttpConnectionManagerIntegrationTest, response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("greetings", response->body()); } diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index dfdc804beaee..91bc7143110b 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -80,9 +80,9 @@ class HdsIntegrationTest : public testing::TestWithParamwaitForEndStream(*dispatcher_)); host_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_EQ(host_stream_->headers().Path()->value().getStringView(), "/healthcheck"); - EXPECT_EQ(host_stream_->headers().Method()->value().getStringView(), "GET"); - EXPECT_EQ(host_stream_->headers().Host()->value().getStringView(), "anna"); + EXPECT_EQ(host_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host_stream_->headers().getHostValue(), "anna"); if (!cluster2.empty()) { ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); @@ -90,9 +90,9 @@ class HdsIntegrationTest : public testing::TestWithParamwaitForEndStream(*dispatcher_)); host2_upstream_->set_allow_unexpected_disconnects(true); - EXPECT_EQ(host2_stream_->headers().Path()->value().getStringView(), "/healthcheck"); - EXPECT_EQ(host2_stream_->headers().Method()->value().getStringView(), "GET"); - EXPECT_EQ(host2_stream_->headers().Host()->value().getStringView(), cluster2); + EXPECT_EQ(host2_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host2_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host2_stream_->headers().getHostValue(), cluster2); } } diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 2665a7a90050..9235ea005a2c 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -796,10 +796,9 @@ TEST_P(Http2IntegrationTest, GrpcRouterNotFound) { lookupPort("http"), "POST", "/service/notfound", "", downstream_protocol_, version_, "host", Http::Headers::get().ContentTypeValues.Grpc); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, - response->headers().ContentType()->value().getStringView()); - EXPECT_EQ("12", response->headers().GrpcStatus()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(Http::Headers::get().ContentTypeValues.Grpc, response->headers().getContentTypeValue()); + EXPECT_EQ("12", response->headers().getGrpcStatusValue()); } TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } @@ -862,7 +861,7 @@ TEST_P(Http2IntegrationTest, GoAway) { codec_client_->close(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(Http2IntegrationTest, Trailers) { testTrailers(1024, 2048, false, false); } @@ -898,9 +897,9 @@ TEST_P(Http2IntegrationTest, GrpcRequestTimeout) { {"content-type", "application/grpc"}}); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_NE(response->headers().GrpcStatus(), nullptr); - EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); // Service Unavailable + EXPECT_EQ("14", response->headers().getGrpcStatusValue()); // Service Unavailable EXPECT_LT(0, test_server_->counter("cluster.cluster_0.upstream_rq_timeout")->value()); } @@ -967,7 +966,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response2->body().size()); // Validate that idle time is not kicked in. @@ -981,7 +980,7 @@ TEST_P(Http2IntegrationTest, IdleTimeoutWithSimultaneousRequests) { EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(request1_bytes, response1->body().size()); // Do not send any requests and validate idle timeout kicks in after both the requests are done. @@ -1026,12 +1025,12 @@ TEST_P(Http2IntegrationTest, RequestMirrorWithBody) { // Make sure both requests have a body. Also check the shadow for the shadow headers. EXPECT_EQ("hello", upstream_request_->body().toString()); EXPECT_EQ("hello", upstream_request2->body().toString()); - EXPECT_EQ("host-shadow", upstream_request2->headers().Host()->value().getStringView()); + EXPECT_EQ("host-shadow", upstream_request2->headers().getHostValue()); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); upstream_request2->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); request->waitForEndStream(); - EXPECT_EQ("200", request->headers().Status()->value().getStringView()); + EXPECT_EQ("200", request->headers().getStatusValue()); // Cleanup. ASSERT_TRUE(fake_upstream_connection2->close()); @@ -1087,7 +1086,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response2->body().size()); // Respond to request 1 @@ -1097,7 +1096,7 @@ void Http2IntegrationTest::simultaneousRequest(int32_t request1_bytes, int32_t r EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(request2_bytes, response1->body().size()); // Cleanup both downstream and upstream @@ -1340,7 +1339,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); @@ -1370,7 +1369,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithNonzeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); @@ -1401,7 +1400,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingNoCookieWithZeroTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); std::string value( response.headers().get(Http::Headers::get().SetCookie)->value().getStringView()); set_cookies.insert(value); @@ -1432,7 +1431,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieNoTtl) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); @@ -1463,7 +1462,7 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { {":scheme", "http"}, {":authority", "host"}}, [&](IntegrationStreamDecoder& response) { - EXPECT_EQ("200", response.headers().Status()->value().getStringView()); + EXPECT_EQ("200", response.headers().getStatusValue()); EXPECT_TRUE(response.headers().get(Http::Headers::get().SetCookie) == nullptr); served_by.insert(std::string( response.headers().get(Http::LowerCaseString("x-served-by"))->value().getStringView())); diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index 37cda22965f2..b06547333cf9 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -180,7 +180,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request2->complete()); EXPECT_EQ(request2_bytes, upstream_request2->bodyLength()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); EXPECT_EQ(response2_bytes, response2->body().size()); // Respond to request 1 @@ -190,7 +190,7 @@ void Http2UpstreamIntegrationTest::simultaneousRequest(uint32_t request1_bytes, EXPECT_TRUE(upstream_request1->complete()); EXPECT_EQ(request1_bytes, upstream_request1->bodyLength()); EXPECT_TRUE(response1->complete()); - EXPECT_EQ("200", response1->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response1->headers().getStatusValue()); EXPECT_EQ(response1_bytes, response1->body().size()); } @@ -235,11 +235,11 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt responses[i]->waitForEndStream(); if (i % 2 != 0) { EXPECT_TRUE(responses[i]->complete()); - EXPECT_EQ("200", responses[i]->headers().Status()->value().getStringView()); + EXPECT_EQ("200", responses[i]->headers().getStatusValue()); EXPECT_EQ(response_bytes[i], responses[i]->body().length()); } else { // Upstream stream reset. - EXPECT_EQ("503", responses[i]->headers().Status()->value().getStringView()); + EXPECT_EQ("503", responses[i]->headers().getStatusValue()); } } } @@ -396,7 +396,7 @@ TEST_P(Http2UpstreamIntegrationTest, TestManyResponseHeadersRejected) { upstream_request_->encodeHeaders(many_headers, true); response->waitForEndStream(); // Upstream stream reset triggered. - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Tests bootstrap configuration of max response headers. @@ -439,7 +439,7 @@ TEST_P(Http2UpstreamIntegrationTest, LargeResponseHeadersRejected) { upstream_request_->encodeHeaders(large_headers, true); response->waitForEndStream(); // Upstream stream reset. - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Regression test to make sure that configuring upstream logs over gRPC will not crash Envoy. @@ -483,7 +483,7 @@ name: router // Send the response headers. upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 034a143266e1..fdc7b4ad2bf6 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -336,7 +336,7 @@ void HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response, const Http::TestResponseHeaderMapImpl& expected_headers, const std::string& expected_body) { EXPECT_TRUE(response->complete()); - EXPECT_EQ(response_code, response->headers().Status()->value().getStringView()); + EXPECT_EQ(response_code, response->headers().getStatusValue()); expected_headers.iterate( [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { auto response_headers = static_cast(context); @@ -401,7 +401,7 @@ void HttpIntegrationTest::checkSimpleRequestSuccess(uint64_t expected_request_si EXPECT_EQ(expected_request_size, upstream_request_->bodyLength()); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(expected_response_size, response->body().size()); } @@ -462,7 +462,7 @@ void HttpIntegrationTest::testRouterNotFound() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/notfound", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Change the default route to be restrictive, and send a POST to an alternate route. @@ -473,7 +473,7 @@ void HttpIntegrationTest::testRouterNotFoundWithBody() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "POST", "/notfound", "foo", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Make sure virtual cluster stats are charged to the appropriate virtual cluster. @@ -546,7 +546,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("upstream connect error or disconnect/reset before headers. reset reason: connection " "termination", response->body()); @@ -574,7 +574,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(0U, response->body().size()); } @@ -636,7 +636,7 @@ void HttpIntegrationTest::testRouterDownstreamDisconnectBeforeResponseComplete( EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -670,7 +670,7 @@ void HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -703,7 +703,7 @@ void HttpIntegrationTest::testRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -728,10 +728,7 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 1); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -740,10 +737,7 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 2); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -752,11 +746,9 @@ void HttpIntegrationTest::testRetryAttemptCountHeader() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); - EXPECT_EQ( - 2, - atoi(std::string(response->headers().EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str())); } void HttpIntegrationTest::testGrpcRetry() { @@ -797,7 +789,7 @@ void HttpIntegrationTest::testGrpcRetry() { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP2) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); @@ -846,13 +838,13 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ response->waitForEndStream(); ASSERT_TRUE(response->complete()); ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); + EXPECT_EQ("100", response->continue_headers()->getStatusValue()); EXPECT_EQ(nullptr, response->continue_headers()->Via()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); if (via.empty()) { EXPECT_EQ(nullptr, response->headers().Via()); } else { - EXPECT_EQ(via.c_str(), response->headers().Via()->value().getStringView()); + EXPECT_EQ(via.c_str(), response->headers().getViaValue()); } } @@ -915,9 +907,9 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst response->waitForEndStream(); EXPECT_TRUE(response->complete()); ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->Status()->value().getStringView()); + EXPECT_EQ("100", response->continue_headers()->getStatusValue()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void HttpIntegrationTest::testTwoRequests(bool network_backup) { @@ -950,7 +942,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); // Request 2. @@ -963,7 +955,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(512U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1024U, response->body().size()); } @@ -1002,7 +994,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); @@ -1010,7 +1002,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, } else { auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -1041,7 +1033,7 @@ void HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_s if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { // Expect a stream reset when the size of the trailers is larger than the maximum // limit. @@ -1088,7 +1080,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) sendRequestAndWaitForResponse(*big_headers, 0, default_response_headers_, 0, 0, time); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { @@ -1129,7 +1121,7 @@ void HttpIntegrationTest::testDownstreamResetBeforeResponseComplete() { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -1166,7 +1158,7 @@ void HttpIntegrationTest::testTrailers(uint64_t request_size, uint64_t response_ } EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(response_size, response->body().size()); if (check_response) { EXPECT_THAT(*response->trailers(), HeaderMapEqualRef(&response_trailers)); @@ -1194,7 +1186,7 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "POST", "/drain_listeners", "", admin_request_type, version_); EXPECT_TRUE(admin_response->complete()); - EXPECT_EQ("200", admin_response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", admin_response->headers().getStatusValue()); EXPECT_EQ("OK\n", admin_response->body()); upstream_request_->encodeData(512, true); @@ -1295,7 +1287,7 @@ void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstr codec_client_->close(); } - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); } else { Http::TestHeaderMapImpl response_headers{{":status", "200"}}; upstream_request_->encodeHeaders(response_headers, true); @@ -1304,7 +1296,7 @@ void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstr codec_client_->close(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc index ca226745de95..33dea2c83128 100644 --- a/test/integration/http_timeout_integration_test.cc +++ b/test/integration/http_timeout_integration_test.cc @@ -44,7 +44,7 @@ TEST_P(HttpTimeoutIntegrationTest, GlobalTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is respected by ingress @@ -85,7 +85,7 @@ TEST_P(HttpTimeoutIntegrationTest, UseTimeoutSetByEgressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that ingress envoy derives new timeout value and sets `x-envoy-expected-timeout-ms` @@ -126,7 +126,7 @@ TEST_P(HttpTimeoutIntegrationTest, DeriveTimeoutInIngressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Testing that `x-envoy-expected-timeout-ms` header, set by egress envoy, is ignored by ingress @@ -168,7 +168,7 @@ TEST_P(HttpTimeoutIntegrationTest, IgnoreTimeoutSetByEgressEnvoy) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Regression test for https://github.com/envoyproxy/envoy/issues/7154 in which @@ -202,7 +202,7 @@ TEST_P(HttpTimeoutIntegrationTest, GlobalTimeoutAfterHeadersBeforeBodyResetsUpst upstream_request_->encodeHeaders(response_headers, false); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); // Trigger global timeout. timeSystem().advanceTimeWait(std::chrono::milliseconds(200)); @@ -261,7 +261,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("504", response->headers().Status()->value().getStringView()); + EXPECT_EQ("504", response->headers().getStatusValue()); } // Sends a request with a per try timeout specified but no global timeout. @@ -309,7 +309,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeoutWithoutGlobalTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // With hedge_on_per_try_timeout enabled via config, sends a request with a @@ -368,7 +368,7 @@ TEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeout) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(HttpTimeoutIntegrationTest, HedgedPerTryTimeoutWithBodyNoBufferFirstRequestWins) { @@ -490,7 +490,7 @@ void HttpTimeoutIntegrationTest::testRouterRequestAndResponseWithHedgedPerTryTim } EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } // namespace Envoy diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index e40e683cb74b..0591d125be04 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -178,7 +178,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("stream_idle_timeout")); @@ -193,9 +193,9 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstrea EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ(fmt::format("{}", strlen("stream timeout")), - response->headers().ContentLength()->value().getStringView()); + response->headers().getContentLengthValue()); EXPECT_EQ("", response->body()); } @@ -210,7 +210,7 @@ TEST_P(IdleTimeoutIntegrationTest, GlobalPerStreamIdleTimeoutAfterDownstreamHead EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); } @@ -227,7 +227,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeadersAnd EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("stream timeout", response->body()); } @@ -243,7 +243,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("", response->body()); } @@ -277,7 +277,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_FALSE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("aa", response->body()); } @@ -307,7 +307,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutTriggersOnBodilessPost) { EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("request timeout", response->body()); } @@ -323,7 +323,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutUnconfiguredDoesNotTriggerOnBod EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_NE("request timeout", response->body()); } @@ -381,7 +381,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsNotDisarmedByEncode100Continu EXPECT_FALSE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("408", response->headers().Status()->value().getStringView()); + EXPECT_EQ("408", response->headers().getStatusValue()); EXPECT_EQ("request timeout", response->body()); } diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 39bfb6c5b1a7..17df926b2b3c 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -363,7 +363,7 @@ TEST_P(IntegrationAdminTest, Admin) { "/drain_listeners?inboundonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -375,7 +375,7 @@ TEST_P(IntegrationAdminTest, Admin) { response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -395,7 +395,7 @@ TEST_P(IntegrationAdminTest, AdminDrainInboundOnly) { lookupPort("admin"), "POST", "/drain_listeners?inboundonly", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("text/plain; charset=UTF-8", ContentType(response)); EXPECT_EQ("OK\n", response->body()); @@ -473,7 +473,7 @@ TEST_F(IntegrationAdminIpv4Ipv6Test, Ipv4Ipv6Listen) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/server_info", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -503,7 +503,7 @@ class StatsMatcherIntegrationTest response_ = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "GET", "/stats", "", downstreamProtocol(), version_); ASSERT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); } BufferingStreamDecoderPtr response_; diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index 97426d52a651..e5ab002fcd7a 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -32,7 +32,7 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { response = IntegrationUtil::makeSingleRequest(lookupPort(port_key), method, endpoint, "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - return response->headers().Status()->value().getStringView(); + return response->headers().getStatusValue(); } /** diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index c782136f104b..d9ef3a84a650 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -131,12 +131,12 @@ TEST_P(IntegrationTest, RouterDirectResponse) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/", "", downstream_protocol_, version_, "direct.example.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("example-value", response->headers() .get(Envoy::Http::LowerCaseString("x-additional-header")) ->value() .getStringView()); - EXPECT_EQ("text/html", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("text/html", response->headers().getContentTypeValue()); EXPECT_EQ(body, response->body()); } @@ -302,7 +302,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 1); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 1); @@ -317,7 +317,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response2->complete()); - EXPECT_EQ("200", response2->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response2->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", 2); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 2); } @@ -711,7 +711,7 @@ TEST_P(IntegrationTest, NoHost) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } TEST_P(IntegrationTest, BadPath) { @@ -812,7 +812,7 @@ TEST_P(IntegrationTest, UpstreamProtocolError) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } TEST_P(IntegrationTest, TestHead) { @@ -996,7 +996,7 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); // With no delayed close processing, Envoy will close the connection immediately after flushing // and this should instead return true. EXPECT_FALSE(codec_client_->waitForDisconnect(std::chrono::milliseconds(500))); @@ -1113,7 +1113,7 @@ TEST_P(IntegrationTest, NoConnectionPoolsFree) { response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_503", 1); EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_pool_overflow")->value(), 1); diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 8e79f61d684d..e1401161be3b 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -281,11 +281,10 @@ class LoadStatsIntegrationTest : public testing::TestWithParamheaders().Method()->value().getStringView()); + EXPECT_EQ("POST", loadstats_stream_->headers().getMethodValue()); EXPECT_EQ("/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", - loadstats_stream_->headers().Path()->value().getStringView()); - EXPECT_EQ("application/grpc", - loadstats_stream_->headers().ContentType()->value().getStringView()); + loadstats_stream_->headers().getPathValue()); + EXPECT_EQ("application/grpc", loadstats_stream_->headers().getContentTypeValue()); } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, loadstats_request.cluster_stats(), true)); } @@ -308,8 +307,7 @@ class LoadStatsIntegrationTest : public testing::TestWithParambodyLength()); ASSERT_TRUE(response_->complete()); - EXPECT_EQ(std::to_string(response_code), - response_->headers().Status()->value().getStringView()); + EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } @@ -627,7 +625,7 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { initiateClientConnection(); response_->waitForEndStream(); ASSERT_TRUE(response_->complete()); - EXPECT_EQ("503", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response_->headers().getStatusValue()); cleanupUpstreamAndDownstream(); waitForLoadStatsRequest({}, 1); diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index b8406f587c5e..749f947e553d 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -75,7 +75,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -84,7 +84,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); @@ -98,7 +98,7 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(0U, response->body().size()); } @@ -121,8 +121,8 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); - EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("close", response->headers().getConnectionValue()); // Deactivate overload state and check that keepalive is not disabled updateResource(0.7); @@ -132,7 +132,7 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(nullptr, response->headers().Connection()); } @@ -159,7 +159,7 @@ TEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ("envoy overloaded", response->body()); codec_client_->close(); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 14bc8b0e9142..5f8fdda34197 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -124,7 +124,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); } // Add a route that uses unknown cluster (expect 503 Service Unavailable). @@ -139,7 +139,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/unknown", "", downstream_protocol_, version_, "foo.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Add a route which redirects HTTP to HTTPS, and verify Envoy sends a 301 @@ -152,7 +152,7 @@ TEST_P(ProtocolIntegrationTest, RouterRedirect) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/foo", "", downstream_protocol_, version_, "www.redirect.com"); ASSERT_TRUE(response->complete()); - EXPECT_EQ("301", response->headers().Status()->value().getStringView()); + EXPECT_EQ("301", response->headers().getStatusValue()); EXPECT_EQ("https://www.redirect.com/foo", response->headers().get(Http::Headers::get().Location)->value().getStringView()); } @@ -175,7 +175,7 @@ name: health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Add a health check filter and verify correct computation of health based on upstream status. @@ -196,7 +196,7 @@ name: health_check response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 @@ -221,7 +221,7 @@ TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { @@ -242,7 +242,7 @@ TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("body", response->body()); } @@ -268,9 +268,9 @@ name: add-trailers-filter .getStringView()); } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_EQ("encode", response->trailers()->GrpcMessage()->value().getStringView()); + EXPECT_EQ("encode", response->trailers()->getGrpcMessageValue()); } } @@ -292,11 +292,11 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { EXPECT_TRUE(codec_client_->sawGoAway()); } else { - EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); + EXPECT_EQ("close", response->headers().getConnectionValue()); } } @@ -314,7 +314,7 @@ TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"host", "host"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); } @@ -347,7 +347,7 @@ TEST_P(ProtocolIntegrationTest, LongHeaderValueWithSpaces) { true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ("host", response->headers().get(Http::LowerCaseString("host"))->value().getStringView()); EXPECT_EQ( @@ -384,7 +384,7 @@ TEST_P(ProtocolIntegrationTest, Retry) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); Stats::Store& stats = test_server_->server().stats(); if (upstreamProtocol() == FakeHttpConnection::Type::HTTP2) { @@ -448,7 +448,7 @@ TEST_P(ProtocolIntegrationTest, RetryStreaming) { EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -508,7 +508,7 @@ TEST_P(ProtocolIntegrationTest, RetryStreamingReset) { EXPECT_EQ(combined_request_data.size(), upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -566,7 +566,7 @@ TEST_P(ProtocolIntegrationTest, RetryStreamingCancelDueToBufferOverflow) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("507", response->headers().Status()->value().getStringView()); + EXPECT_EQ("507", response->headers().getStatusValue()); test_server_->waitForCounterEq("cluster.cluster_0.retry_or_shadow_abandoned", 1); } @@ -590,10 +590,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 1); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 1); if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); @@ -602,10 +599,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { ASSERT_TRUE(upstream_request_->waitForReset()); } waitForNextUpstreamRequest(); - EXPECT_EQ( - atoi(std::string(upstream_request_->headers().EnvoyAttemptCount()->value().getStringView()) - .c_str()), - 2); + EXPECT_EQ(atoi(std::string(upstream_request_->headers().getEnvoyAttemptCountValue()).c_str()), 2); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->encodeData(512, true); @@ -614,11 +608,9 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryAttemptCountHeader) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); - EXPECT_EQ( - 2, - atoi(std::string(response->headers().EnvoyAttemptCount()->value().getStringView()).c_str())); + EXPECT_EQ(2, atoi(std::string(response->headers().getEnvoyAttemptCountValue()).c_str())); } // Verifies that a retry priority can be configured and affect the host selected during retries. @@ -687,7 +679,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryPriority) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -751,7 +743,7 @@ TEST_P(DownstreamProtocolIntegrationTest, RetryHostPredicateFilter) { EXPECT_EQ(1024U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(512U, response->body().size()); } @@ -779,7 +771,7 @@ TEST_P(ProtocolIntegrationTest, RetryHittingBufferLimit) { EXPECT_EQ(66560U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Very similar set-up to RetryHittingBufferLimits but using the route specific cap. @@ -807,7 +799,7 @@ TEST_P(ProtocolIntegrationTest, RetryHittingRouteLimits) { EXPECT_EQ(1U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); } // Test hitting the decoder buffer filter with too many request bytes to buffer. Ensure the @@ -841,7 +833,7 @@ TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) { ASSERT_TRUE(response->complete()); } if (response->complete()) { - EXPECT_EQ("413", response->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response->headers().getStatusValue()); } } @@ -880,7 +872,7 @@ TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("500", response->headers().Status()->value().getStringView()); + EXPECT_EQ("500", response->headers().getStatusValue()); // Regression test https://github.com/envoyproxy/envoy/issues/9881 by making // sure this path does standard HCM header transformations. EXPECT_TRUE(response->headers().Date() != nullptr); @@ -940,7 +932,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); Stats::Store& stats = test_server_->server().stats(); std::string stat_name = (downstreamProtocol() == Http::CodecClient::Type::HTTP1) @@ -967,7 +959,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"bar_baz", "fooz"}}, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_THAT(response->headers(), HeaderHasValueRef("bar_baz", "fooz")); } @@ -991,7 +983,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefa if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); @@ -1013,7 +1005,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Test we're following https://tools.ietf.org/html/rfc7230#section-3.3.2 @@ -1030,7 +1022,7 @@ TEST_P(ProtocolIntegrationTest, 304WithBody) { ASSERT(upstream_request_ != nullptr); upstream_request_->encodeHeaders(response_headers, false); response->waitForHeaders(); - EXPECT_EQ("304", response->headers().Status()->value().getStringView()); + EXPECT_EQ("304", response->headers().getStatusValue()); // For HTTP/1.1 http_parser is explicitly told that 304s are header-only // requests. @@ -1080,7 +1072,7 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingConcatenated) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Validate that lots of tiny cookies doesn't cause a DoS (many cookie headers). @@ -1109,7 +1101,7 @@ TEST_P(DownstreamProtocolIntegrationTest, LargeCookieParsingMany) { auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { @@ -1128,7 +1120,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); @@ -1163,7 +1155,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); @@ -1184,7 +1176,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); @@ -1217,7 +1209,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); @@ -1250,7 +1242,7 @@ name: encode-headers-only } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, response->body().size()); } @@ -1273,7 +1265,7 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(128, response->body().size()); } @@ -1308,7 +1300,7 @@ name: passthrough-filter } EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, response->body().size()); } @@ -1337,7 +1329,7 @@ name: passthrough-filter response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(128, response->body().size()); } @@ -1374,7 +1366,7 @@ name: decode-headers-only response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); EXPECT_EQ(0, upstream_request_->body().length()); } @@ -1419,7 +1411,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + EXPECT_EQ("431", response->headers().getStatusValue()); } else { response->waitForReset(); codec_client_->close(); @@ -1453,7 +1445,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersAccepted) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid @@ -1509,7 +1501,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } // Regression tests for CVE-2019-18801. We only validate the behavior of large @@ -1542,7 +1534,7 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { auto response = std::move(encoder_decoder.second); codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT(downstreamProtocol() == Http::CodecClient::Type::HTTP2); if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { @@ -1551,7 +1543,7 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT(upstreamProtocol() == FakeHttpConnection::Type::HTTP2); auto response = @@ -1804,7 +1796,7 @@ TEST_P(ProtocolIntegrationTest, MultipleSetCookies) { auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); std::vector out; Http::HeaderUtility::getAllOfHeader(response->headers(), "set-cookie", out); @@ -1947,7 +1939,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidAuthority) { // For HTTP/1 this is handled by the HCM, which sends a full 400 response. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + EXPECT_EQ("400", response->headers().getStatusValue()); } else { // For HTTP/2 this is handled by nghttp2 which resets the connection without // sending an HTTP response. @@ -1966,7 +1958,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { // Because CONNECT requests for HTTP/1 do not include a path, they will fail // to find a route match and return a 404. response->waitForEndStream(); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); EXPECT_TRUE(response->complete()); } else { response->waitForReset(); diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 02861c34b6c8..90be3c730b1d 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -93,7 +93,7 @@ TEST_P(RedirectIntegrationTest, RedirectNotConfigured) { codec_client_ = makeHttpConnection(lookupPort("http")); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); EXPECT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); } // Now test a route with redirects configured on in pass-through mode. @@ -103,7 +103,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.setHost("pass.through.internal.redirect"); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 0, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); @@ -129,16 +129,16 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { waitForNextUpstreamRequest(); ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); EXPECT_EQ("http://handle.internal.redirect/test/long/url", - upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView()); - EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView()); + upstream_request_->headers().getEnvoyOriginalUrlValue()); + EXPECT_EQ("/new/url", upstream_request_->headers().getPathValue()); + EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); } @@ -163,11 +163,10 @@ TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { for (int i = 0; i < 4; i++) { upstream_requests.push_back(waitForNextStream()); - EXPECT_EQ(fmt::format("/path{}", i), - upstream_requests.back()->headers().Path()->value().getStringView()); + EXPECT_EQ(fmt::format("/path{}", i), upstream_requests.back()->headers().getPathValue()); EXPECT_EQ("handle.internal.redirect.max.three.hop", - upstream_requests.back()->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_requests.back()->headers().Via()->value().getStringView()); + upstream_requests.back()->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_requests.back()->headers().getViaValue()); auto next_location = fmt::format(HandleThreeHopLocationFormat, i + 1); redirect_response_.setLocation(next_location); @@ -176,7 +175,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); @@ -210,10 +209,10 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { waitForNextUpstreamRequest(); ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); EXPECT_EQ("http://handle.internal.redirect/test/long/url", - upstream_request_->headers().EnvoyOriginalUrl()->value().getStringView()); - EXPECT_EQ("/new/url", upstream_request_->headers().Path()->value().getStringView()); - EXPECT_EQ("authority2", upstream_request_->headers().Host()->value().getStringView()); - EXPECT_EQ("via_value", upstream_request_->headers().Via()->value().getStringView()); + upstream_request_->headers().getEnvoyOriginalUrlValue()); + EXPECT_EQ("/new/url", upstream_request_->headers().getPathValue()); + EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); Http::TestHeaderMapImpl response_with_big_body( {{":status", "200"}, {"content-length", "2000000"}}); @@ -222,7 +221,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); } @@ -271,9 +270,9 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredica response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", - response->headers().Location()->value().getStringView()); + response->headers().getLocationValue()); EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); EXPECT_EQ( @@ -330,9 +329,9 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPred response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ("http://handle.internal.redirect/yet/another/path", - response->headers().Location()->value().getStringView()); + response->headers().getLocationValue()); EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); EXPECT_EQ( @@ -391,9 +390,9 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredic response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ("https://handle.internal.redirect/yet/another/path", - response->headers().Location()->value().getStringView()); + response->headers().getLocationValue()); EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); EXPECT_EQ( @@ -411,7 +410,7 @@ TEST_P(RedirectIntegrationTest, InvalidRedirect) { codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.setHost("handle.internal.redirect"); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); - EXPECT_EQ("302", response->headers().Status()->value().getStringView()); + EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index 3456a3ad9dd4..37f4b50a834d 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -106,7 +106,7 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H auto response = IntegrationUtil::makeSingleRequest( lookupPort("admin"), "GET", "/runtime?format=json", "", downstreamProtocol(), version_); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); Json::ObjectSharedPtr loader = TestEnvironment::jsonLoadFromString(response->body()); auto entries = loader->getObject("entries"); if (entries->hasObject(key)) { diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 9a384a6e98ec..0cc56c3742a7 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -529,7 +529,7 @@ TEST_P(SdsDynamicUpstreamIntegrationTest, WrongSecretFirst) { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( lookupPort("http"), "GET", "/test/long/url", "", downstream_protocol_, version_); ASSERT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); // To flush out the reset connection from the first request in upstream. FakeRawConnectionPtr fake_upstream_connection; diff --git a/test/integration/server.cc b/test/integration/server.cc index 07c8e2ccc401..7ce22bfbe004 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -219,7 +219,7 @@ IntegrationTestServerImpl::~IntegrationTestServerImpl() { BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( admin_address, "POST", "/quitquitquit", "", Http::CodecClient::Type::HTTP1); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); server_gone_.WaitForNotification(); } } else { diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 591af915e938..00954ccde7aa 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -235,7 +235,7 @@ TEST_P(ProxyingConnectIntegrationTest, ProxyConnect) { // Wait for them to arrive downstream. response_->waitForHeaders(); - EXPECT_EQ("200", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response_->headers().getStatusValue()); // Make sure that even once the response has started, that data can continue to go upstream. codec_client_->sendData(*request_encoder_, "hello", false); diff --git a/test/integration/transport_socket_match_integration_test.cc b/test/integration/transport_socket_match_integration_test.cc index e074af1a85d0..771e98df76f7 100644 --- a/test/integration/transport_socket_match_integration_test.cc +++ b/test/integration/transport_socket_match_integration_test.cc @@ -188,10 +188,10 @@ TEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextSucceed) { IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(type_a_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } @@ -203,10 +203,10 @@ TEST_F(TransportSockeMatchIntegrationTest, TlsAndPlaintextFailsWithoutSocketMatc IntegrationStreamDecoderPtr response = codec_client_->makeHeaderOnlyRequest(type_a_request_headers_); response->waitForEndStream(); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + EXPECT_EQ("503", response->headers().getStatusValue()); response = codec_client_->makeHeaderOnlyRequest(type_b_request_headers_); response->waitForEndStream(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); } } } // namespace Envoy diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index dc8dd64244f5..d9851f4dd09d 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -457,7 +457,7 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -524,7 +524,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -565,7 +565,7 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateWithResourceNameAsAlias) { upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForHeaders(); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -606,7 +606,7 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveTheAlias) { notifyAboutAliasResolutionFailure("4", vhds_stream_, {"my_route/vhost.third"}); response->waitForHeaders(); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } @@ -648,7 +648,7 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) {"vhost.first"}, {"my_route/vhost.third"}); response->waitForHeaders(); - EXPECT_EQ("404", response->headers().Status()->value().getStringView()); + EXPECT_EQ("404", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); } diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index f66eddbf02de..195eb2ce0bdb 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -40,7 +40,7 @@ void commonValidate(ProxiedHeaders& proxied_headers, const OriginalHeaders& orig // If no content length is specified, the HTTP1 codec will add a chunked encoding header. if (original_headers.ContentLength() == nullptr && proxied_headers.TransferEncoding() != nullptr) { - ASSERT_EQ(proxied_headers.TransferEncoding()->value().getStringView(), "chunked"); + ASSERT_EQ(proxied_headers.getTransferEncodingValue(), "chunked"); proxied_headers.removeTransferEncoding(); } if (proxied_headers.Connection() != nullptr && @@ -60,7 +60,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( const Http::RequestHeaderMap& original_request_headers) { Http::TestRequestHeaderMapImpl proxied_request_headers(original_proxied_request_headers); if (proxied_request_headers.ForwardedProto()) { - ASSERT_EQ(proxied_request_headers.ForwardedProto()->value().getStringView(), "http"); + ASSERT_EQ(proxied_request_headers.getForwardedProtoValue(), "http"); proxied_request_headers.removeForwardedProto(); } @@ -70,7 +70,7 @@ void WebsocketIntegrationTest::validateUpgradeRequestHeaders( proxied_request_headers.removeEnvoyExpectedRequestTimeoutMs(); if (proxied_request_headers.Scheme()) { - ASSERT_EQ(proxied_request_headers.Scheme()->value().getStringView(), "http"); + ASSERT_EQ(proxied_request_headers.getSchemeValue(), "http"); } else { proxied_request_headers.setScheme("http"); } @@ -96,7 +96,7 @@ void WebsocketIntegrationTest::validateUpgradeResponseHeaders( // Check for and remove headers added by default for HTTP responses. ASSERT_TRUE(proxied_response_headers.Date() != nullptr); ASSERT_TRUE(proxied_response_headers.Server() != nullptr); - ASSERT_EQ(proxied_response_headers.Server()->value().getStringView(), "envoy"); + ASSERT_EQ(proxied_response_headers.getServerValue(), "envoy"); proxied_response_headers.removeDate(); proxied_response_headers.removeServer(); @@ -364,7 +364,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response_->headers().getStatusValue()); waitForClientDisconnectOrReset(); codec_client_->close(); } @@ -381,7 +381,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { response_ = std::move(encoder_decoder.second); codec_client_->sendData(encoder_decoder.first, large_req_str, false); response_->waitForEndStream(); - EXPECT_EQ("413", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("413", response_->headers().getStatusValue()); waitForClientDisconnectOrReset(); codec_client_->close(); } diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 5ed39d311eca..833e2d73f65c 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -321,9 +321,9 @@ class LdsInplaceUpdateHttpIntegrationTest response->waitForEndStream(); EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + EXPECT_EQ("200", response->headers().getStatusValue()); if (expect_close) { - EXPECT_EQ("close", response->headers().Connection()->value().getStringView()); + EXPECT_EQ("close", response->headers().getConnectionValue()); } else { EXPECT_EQ(nullptr, response->headers().Connection()); diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc index 25176afe941f..2b87808c372c 100644 --- a/test/integration/xfcc_integration_test.cc +++ b/test/integration/xfcc_integration_test.cc @@ -178,8 +178,7 @@ void XfccIntegrationTest::testRequestAndResponseWithXfccHeader(std::string previ if (expected_xfcc.empty()) { EXPECT_EQ(nullptr, upstream_request_->headers().ForwardedClientCert()); } else { - EXPECT_EQ(expected_xfcc, - upstream_request_->headers().ForwardedClientCert()->value().getStringView()); + EXPECT_EQ(expected_xfcc, upstream_request_->headers().getForwardedClientCertValue()); } upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); diff --git a/test/server/admin/server_info_handler_test.cc b/test/server/admin/server_info_handler_test.cc index 86e8223cc74d..fe3276b53485 100644 --- a/test/server/admin/server_info_handler_test.cc +++ b/test/server/admin/server_info_handler_test.cc @@ -71,8 +71,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); EXPECT_EQ(Http::Code::OK, admin_.request("/ready", "GET", response_headers, body)); EXPECT_EQ(body, "LIVE\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); } { @@ -83,8 +82,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { EXPECT_EQ(Http::Code::ServiceUnavailable, admin_.request("/ready", "GET", response_headers, body)); EXPECT_EQ(body, "PRE_INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); } Http::ResponseHeaderMapImpl response_headers; @@ -94,8 +92,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { EXPECT_EQ(Http::Code::ServiceUnavailable, admin_.request("/ready", "GET", response_headers, body)); EXPECT_EQ(body, "INITIALIZING\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); } TEST_P(AdminInstanceTest, GetRequest) { @@ -117,8 +114,7 @@ TEST_P(AdminInstanceTest, GetRequest) { ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); // We only test that it parses as the proto and that some fields are correct, since // values such as timestamps + Envoy version are tricky to test for. @@ -136,8 +132,7 @@ TEST_P(AdminInstanceTest, GetRequest) { ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); // We only test that it parses as the proto and that some fields are correct, since // values such as timestamps + Envoy version are tricky to test for. @@ -153,8 +148,7 @@ TEST_P(AdminInstanceTest, GetRequest) { ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); EXPECT_EQ(Http::Code::OK, admin_.request("/server_info", "GET", response_headers, body)); envoy::admin::v3::ServerInfo server_info_proto; - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); // We only test that it parses as the proto and that some fields are correct, since // values such as timestamps + Envoy version are tricky to test for. @@ -170,8 +164,7 @@ TEST_P(AdminInstanceTest, PostRequest) { EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, admin_.request("/healthcheck/fail", "POST", response_headers, body))); EXPECT_EQ(body, "OK\n"); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); } } // namespace Server diff --git a/test/server/admin/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc index 8b282e6557bf..f6c326f909e5 100644 --- a/test/server/admin/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -553,8 +553,7 @@ TEST_P(AdminInstanceTest, GetRequestJson) { std::string body; EXPECT_EQ(Http::Code::OK, admin_.request("/stats?format=json", "GET", response_headers, body)); EXPECT_THAT(body, HasSubstr("{\"stats\":[")); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("application/json")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("application/json")); } TEST_P(AdminInstanceTest, RecentLookups) { @@ -564,8 +563,7 @@ TEST_P(AdminInstanceTest, RecentLookups) { // Recent lookup tracking is disabled by default. EXPECT_EQ(Http::Code::OK, admin_.request("/stats/recentlookups", "GET", response_headers, body)); EXPECT_THAT(body, HasSubstr("Lookup tracking is not enabled")); - EXPECT_THAT(std::string(response_headers.ContentType()->value().getStringView()), - HasSubstr("text/plain")); + EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); // We can't test RecentLookups in admin unit tests as it doesn't work with a // fake symbol table. However we cover this solidly in integration tests. From 48a5b21d9483e7eddac79aeff7daac178d7b7462 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 26 May 2020 13:41:54 -0700 Subject: [PATCH 229/909] compression: add generic decompressor filter (#11172) Commit Message: add generic decompressor filter Risk Level: low - low as it is an extension, med - for users as this is a brand new filter. Testing: unit tests, integration tests Docs Changes: added docs Release Notes: added release notes Signed-off-by: Jose Nino --- CODEOWNERS | 1 + STYLE.md | 2 +- api/BUILD | 1 + .../http/compressor/v3/compressor.proto | 2 +- .../filters/http/decompressor/v3/BUILD | 12 + .../http/decompressor/v3/decompressor.proto | 57 +++ api/versioning/BUILD | 1 + .../http/http_filters/decompressor_filter.rst | 112 ++++++ .../http/http_filters/http_filters.rst | 1 + docs/root/version_history/current.rst | 1 + .../http/compressor/v3/compressor.proto | 2 +- .../filters/http/decompressor/v3/BUILD | 12 + .../http/decompressor/v3/decompressor.proto | 57 +++ include/envoy/http/header_map.h | 2 +- .../compression/gzip/compressor/config.cc | 3 +- .../compression/gzip/decompressor/config.cc | 6 +- source/extensions/extensions_build_config.bzl | 1 + .../filters/http/decompressor/BUILD | 45 +++ .../filters/http/decompressor/config.cc | 48 +++ .../filters/http/decompressor/config.h | 33 ++ .../http/decompressor/decompressor_filter.cc | 175 +++++++++ .../http/decompressor/decompressor_filter.h | 154 ++++++++ .../filters/http/gzip/gzip_filter.cc | 3 +- .../filters/http/well_known_names.h | 2 + .../compression/gzip/decompressor/BUILD | 8 +- .../filters/http/decompressor/BUILD | 48 +++ .../decompressor_filter_integration_test.cc | 156 ++++++++ .../decompressor/decompressor_filter_test.cc | 363 ++++++++++++++++++ test/mocks/compression/decompressor/BUILD | 19 + test/mocks/compression/decompressor/mocks.cc | 21 + test/mocks/compression/decompressor/mocks.h | 38 ++ 31 files changed, 1377 insertions(+), 9 deletions(-) create mode 100644 api/envoy/extensions/filters/http/decompressor/v3/BUILD create mode 100644 api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto create mode 100644 docs/root/configuration/http/http_filters/decompressor_filter.rst create mode 100644 generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto create mode 100644 source/extensions/filters/http/decompressor/BUILD create mode 100644 source/extensions/filters/http/decompressor/config.cc create mode 100644 source/extensions/filters/http/decompressor/config.h create mode 100644 source/extensions/filters/http/decompressor/decompressor_filter.cc create mode 100644 source/extensions/filters/http/decompressor/decompressor_filter.h create mode 100644 test/extensions/filters/http/decompressor/BUILD create mode 100644 test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc create mode 100644 test/extensions/filters/http/decompressor/decompressor_filter_test.cc create mode 100644 test/mocks/compression/decompressor/BUILD create mode 100644 test/mocks/compression/decompressor/mocks.cc create mode 100644 test/mocks/compression/decompressor/mocks.h diff --git a/CODEOWNERS b/CODEOWNERS index 601f9c69755d..200d8fbf9e30 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -118,3 +118,4 @@ extensions/filters/common/original_src @snowp @klarose # Compression /*/extensions/compression/common @junr03 @rojkov /*/extensions/compression/gzip @junr03 @rojkov +/*/extensions/filters/http/decompressor @rojkov @dio diff --git a/STYLE.md b/STYLE.md index 54e513ef3435..2c5b6d2e785f 100644 --- a/STYLE.md +++ b/STYLE.md @@ -46,7 +46,7 @@ * Regular pointers (e.g. `int* foo`) should not be type aliased. * `absl::optional> is type aliased: * `using FooOptRef = absl::optional>;` - * `using FooOptConstRef = absl::optional>;` + * `using FooOptConstRef = absl::optional>;` * If move semantics are intended, prefer specifying function arguments with `&&`. E.g., `void onHeaders(Http::HeaderMapPtr&& headers, ...)`. The rationale for this is that it forces the caller to specify `std::move(...)` or pass a temporary and makes the intention at diff --git a/api/BUILD b/api/BUILD index c701bdcf4833..cdc59c8a143b 100644 --- a/api/BUILD +++ b/api/BUILD @@ -172,6 +172,7 @@ proto_library( "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", diff --git a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto index 1f6cd63e9d52..0bfa5c1860d4 100644 --- a/api/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/api/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -53,7 +53,7 @@ message Compressor { config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; // A compressor library to use for compression. Currently only - // :ref:`envoy.filters.http.compressor.gzip` + // :ref:`envoy.compression.gzip.compressor` // is included in Envoy. // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. config.core.v3.TypedExtensionConfig compressor_library = 6; diff --git a/api/envoy/extensions/filters/http/decompressor/v3/BUILD b/api/envoy/extensions/filters/http/decompressor/v3/BUILD new file mode 100644 index 000000000000..2c3dad6453b6 --- /dev/null +++ b/api/envoy/extensions/filters/http/decompressor/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto new file mode 100644 index 000000000000..1e3d72766d05 --- /dev/null +++ b/api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.decompressor.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; +option java_outer_classname = "DecompressorProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Decompressor] +// [#extension: envoy.filters.http.decompressor] + +message Decompressor { + // Common configuration for filter behavior on both the request and response direction. + message CommonDirectionConfig { + // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the + // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + } + + // Configuration for filter behavior on the request direction. + message RequestDirectionConfig { + CommonDirectionConfig common_config = 1; + + // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding + // request header by appending the decompressor_library's encoding. Defaults to true. + google.protobuf.BoolValue advertise_accept_encoding = 2; + } + + // Configuration for filter behavior on the response direction. + message ResponseDirectionConfig { + CommonDirectionConfig common_config = 1; + } + + // A decompressor library to use for both request and response decompression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + config.core.v3.TypedExtensionConfig decompressor_library = 1 + [(validate.rules).message = {required: true}]; + + // Configuration for request decompression. Decompression is enabled by default if left empty. + RequestDirectionConfig request_direction_config = 2; + + // Configuration for response decompression. Decompression is enabled by default if left empty. + ResponseDirectionConfig response_direction_config = 3; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index d7771fbbd29e..fecf08a9f701 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -55,6 +55,7 @@ proto_library( "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", + "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", diff --git a/docs/root/configuration/http/http_filters/decompressor_filter.rst b/docs/root/configuration/http/http_filters/decompressor_filter.rst new file mode 100644 index 000000000000..898f9d1f0fd6 --- /dev/null +++ b/docs/root/configuration/http/http_filters/decompressor_filter.rst @@ -0,0 +1,112 @@ +.. _config_http_filters_decompressor: + +Decompressor +============ +Decompressor is an HTTP filter which enables Envoy to bidirectionally decompress data. + + +Configuration +------------- +* :ref:`v3 API reference ` + +How it works +------------ +When the decompressor filter is enabled, headers are inspected to +determine whether or not the content should be decompressed. The content is +decompressed and passed on to the rest of the filter chain. Note that decompression happens +independently for request and responses based on the rules described below. + +Currently the filter supports :ref:`gzip compression ` +only. Other compression libraries can be supported as extensions. + +An example configuration of the filter may look like the following: + +.. code-block:: yaml + + http_filters: + - name: decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: basic + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + window_bits: 10 + +By *default* decompression will be *skipped* when: + +- A request/response does NOT contain *content-encoding* header. +- A request/response includes *content-encoding* header, but it does not contain the configured + decompressor's content-encoding. +- A request/response contains a *cache-control* header whose value includes "no-transform". + +When decompression is *applied*: + +- The *content-length* is removed from headers. + + .. note:: + + If an updated *content-length* header is desired, the buffer filter can be installed as part + of the filter chain to buffer decompressed frames, and ultimately update the header. Due to + :ref:`filter ordering ` a buffer filter needs to be + installed after the decompressor for requests and prior to the decompressor for responses. + +- The *content-encoding* header is modified to remove the decompression that was applied. + +.. _decompressor-statistics: + +Using different decompressors for requests and responses +-------------------------------------------------------- + +If different compression libraries are desired for requests and responses, it is possible to install +multiple decompressor filters enabled only for requests or responses. For instance: + +.. code-block:: yaml + + http_filters: + # This filter is only enabled for requests. + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: small + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 9 + chunk_size: 8192 + response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: response_decompressor_enabled + # This filter is only enabled for responses. + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: large + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 12 + chunk_size: 16384 + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_decompressor_enabled + +Statistics +---------- + +Every configured Deompressor filter has statistics rooted at +.decompressor...* +with the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + decompressed, Counter, Number of request/responses compressed. + not_decompressed, Counter, Number of request/responses not compressed. + total_uncompressed_bytes, Counter, The total uncompressed bytes of all the request/responses that were marked for decompression. + total_compressed_bytes, Counter, The total compressed bytes of all the request/responses that were marked for decompression. diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index 3d541aed13cf..911034fe13c6 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -13,6 +13,7 @@ HTTP filters compressor_filter cors_filter csrf_filter + decompressor_filter dynamic_forward_proxy_filter dynamodb_filter ext_authz_filter diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index d16f5d1c8459..7d4fcd9a3f5d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -47,6 +47,7 @@ New Features * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`version_text ` stat that reflects xDS version. +* decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto index 1f6cd63e9d52..0bfa5c1860d4 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -53,7 +53,7 @@ message Compressor { config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; // A compressor library to use for compression. Currently only - // :ref:`envoy.filters.http.compressor.gzip` + // :ref:`envoy.compression.gzip.compressor` // is included in Envoy. // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. config.core.v3.TypedExtensionConfig compressor_library = 6; diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD new file mode 100644 index 000000000000..2c3dad6453b6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto new file mode 100644 index 000000000000..1e3d72766d05 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.decompressor.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; +option java_outer_classname = "DecompressorProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Decompressor] +// [#extension: envoy.filters.http.decompressor] + +message Decompressor { + // Common configuration for filter behavior on both the request and response direction. + message CommonDirectionConfig { + // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the + // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + } + + // Configuration for filter behavior on the request direction. + message RequestDirectionConfig { + CommonDirectionConfig common_config = 1; + + // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding + // request header by appending the decompressor_library's encoding. Defaults to true. + google.protobuf.BoolValue advertise_accept_encoding = 2; + } + + // Configuration for filter behavior on the response direction. + message ResponseDirectionConfig { + CommonDirectionConfig common_config = 1; + } + + // A decompressor library to use for both request and response decompression. Currently only + // :ref:`envoy.compression.gzip.compressor` + // is included in Envoy. + config.core.v3.TypedExtensionConfig decompressor_library = 1 + [(validate.rules).message = {required: true}]; + + // Configuration for request decompression. Decompression is enabled by default if left empty. + RequestDirectionConfig request_direction_config = 2; + + // Configuration for response decompression. Decompression is enabled by default if left empty. + ResponseDirectionConfig response_direction_config = 3; +} diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 789ad6121d5b..96af5483d5cd 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -326,7 +326,6 @@ class HeaderEntry { HEADER_FUNC(AccessControlAllowOrigin) \ HEADER_FUNC(AccessControlExposeHeaders) \ HEADER_FUNC(AccessControlMaxAge) \ - HEADER_FUNC(ContentEncoding) \ HEADER_FUNC(Date) \ HEADER_FUNC(Etag) \ HEADER_FUNC(EnvoyDegraded) \ @@ -346,6 +345,7 @@ class HeaderEntry { #define INLINE_REQ_RESP_HEADERS(HEADER_FUNC) \ HEADER_FUNC(CacheControl) \ HEADER_FUNC(Connection) \ + HEADER_FUNC(ContentEncoding) \ HEADER_FUNC(ContentLength) \ HEADER_FUNC(ContentType) \ HEADER_FUNC(EnvoyAttemptCount) \ diff --git a/source/extensions/compression/gzip/compressor/config.cc b/source/extensions/compression/gzip/compressor/config.cc index 9d37441547f4..0971a9a90586 100644 --- a/source/extensions/compression/gzip/compressor/config.cc +++ b/source/extensions/compression/gzip/compressor/config.cc @@ -13,7 +13,8 @@ const uint64_t DefaultMemoryLevel = 5; // Default and maximum compression window size. const uint64_t DefaultWindowBits = 12; -// When summed to window bits, this sets a gzip header and trailer around the compressed data. +// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed +// data. const uint64_t GzipHeaderValue = 16; // Default zlib chunk size. diff --git a/source/extensions/compression/gzip/decompressor/config.cc b/source/extensions/compression/gzip/decompressor/config.cc index 8dc898be7060..bf73e3eba697 100644 --- a/source/extensions/compression/gzip/decompressor/config.cc +++ b/source/extensions/compression/gzip/decompressor/config.cc @@ -9,11 +9,15 @@ namespace Decompressor { namespace { const uint32_t DefaultWindowBits = 12; const uint32_t DefaultChunkSize = 4096; +// When logical OR'ed to window bits, this tells zlib library to decompress gzip data per: +// inflateInit2 in https://www.zlib.net/manual.html +const uint32_t GzipHeaderValue = 16; } // namespace GzipDecompressorFactory::GzipDecompressorFactory( const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip) - : window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits)), + : window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | + GzipHeaderValue), chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} Envoy::Compression::Decompressor::DecompressorPtr GzipDecompressorFactory::createDecompressor() { diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 8baae0811e28..163efb8ffd70 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -48,6 +48,7 @@ EXTENSIONS = { "envoy.filters.http.compressor": "//source/extensions/filters/http/compressor:config", "envoy.filters.http.cors": "//source/extensions/filters/http/cors:config", "envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config", + "envoy.filters.http.decompressor": "//source/extensions/filters/http/decompressor:config", "envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config", "envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config", "envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config", diff --git a/source/extensions/filters/http/decompressor/BUILD b/source/extensions/filters/http/decompressor/BUILD new file mode 100644 index 000000000000..3e8faf194250 --- /dev/null +++ b/source/extensions/filters/http/decompressor/BUILD @@ -0,0 +1,45 @@ +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs decompression with configurable decompression libraries +# Public docs: docs/root/configuration/http_filters/decompressor_filter.rst + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "decompressor_filter_lib", + srcs = ["decompressor_filter.cc"], + hdrs = ["decompressor_filter.h"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//include/envoy/compression/decompressor:decompressor_interface", + "//include/envoy/http:filter_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:macros", + "//source/common/http:headers_lib", + "//source/common/runtime:runtime_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "robust_to_untrusted_downstream_and_upstream", + deps = [ + ":decompressor_filter_lib", + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//source/common/config:utility_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/decompressor/config.cc b/source/extensions/filters/http/decompressor/config.cc new file mode 100644 index 000000000000..fb52ae85c216 --- /dev/null +++ b/source/extensions/filters/http/decompressor/config.cc @@ -0,0 +1,48 @@ +#include "extensions/filters/http/decompressor/config.h" + +#include "envoy/compression/decompressor/config.h" + +#include "common/config/utility.h" + +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +Http::FilterFactoryCb DecompressorFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + const std::string decompressor_library_type{TypeUtil::typeUrlToDescriptorFullName( + proto_config.decompressor_library().typed_config().type_url())}; + Compression::Decompressor::NamedDecompressorLibraryConfigFactory* const + decompressor_library_factory = Registry::FactoryRegistry< + Compression::Decompressor::NamedDecompressorLibraryConfigFactory>:: + getFactoryByType(decompressor_library_type); + if (decompressor_library_factory == nullptr) { + throw EnvoyException(fmt::format("Didn't find a registered implementation for type: '{}'", + decompressor_library_type)); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + proto_config.decompressor_library().typed_config(), context.messageValidationVisitor(), + *decompressor_library_factory); + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory = + decompressor_library_factory->createDecompressorFactoryFromProto(*message, context); + DecompressorFilterConfigSharedPtr filter_config = std::make_shared( + proto_config, stats_prefix, context.scope(), context.runtime(), + std::move(decompressor_factory)); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; +} + +/** + * Static registration for the decompressor filter. @see NamedHttpFilterConfigFactory. + */ +REGISTER_FACTORY(DecompressorFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/config.h b/source/extensions/filters/http/decompressor/config.h new file mode 100644 index 000000000000..4e04abe3c6df --- /dev/null +++ b/source/extensions/filters/http/decompressor/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +/** + * Config registration for the decompressor filter. @see NamedHttpFilterConfigFactory. + */ +class DecompressorFilterFactory + : public Common::FactoryBase { +public: + DecompressorFilterFactory() : FactoryBase(HttpFilterNames::get().Decompressor) {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(DecompressorFilterFactory); + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc new file mode 100644 index 000000000000..f8938489a6a3 --- /dev/null +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -0,0 +1,175 @@ +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/empty_string.h" +#include "common/common/macros.h" +#include "common/http/headers.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +DecompressorFilterConfig::DecompressorFilterConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory) + : stats_prefix_(fmt::format("{}decompressor.{}.{}", stats_prefix, + proto_config.decompressor_library().name(), + decompressor_factory->statsPrefix())), + decompressor_factory_(std::move(decompressor_factory)), + request_direction_config_(proto_config.request_direction_config(), stats_prefix_, scope, + runtime), + response_direction_config_(proto_config.response_direction_config(), stats_prefix_, scope, + runtime) {} + +DecompressorFilterConfig::DirectionConfig::DirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::CommonDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : stats_(generateStats(stats_prefix, scope)), + decompression_enabled_(proto_config.enabled(), runtime) {} + +DecompressorFilterConfig::RequestDirectionConfig::RequestDirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::RequestDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : DirectionConfig(proto_config.common_config(), stats_prefix + "request.", scope, runtime), + advertise_accept_encoding_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, advertise_accept_encoding, true)) {} + +DecompressorFilterConfig::ResponseDirectionConfig::ResponseDirectionConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor::ResponseDirectionConfig& + proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) + : DirectionConfig(proto_config.common_config(), stats_prefix + "response.", scope, runtime) {} + +DecompressorFilter::DecompressorFilter(DecompressorFilterConfigSharedPtr config) + : config_(std::move(config)) {} + +Http::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, + bool end_stream) { + // Headers only request, continue. + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + ENVOY_STREAM_LOG(debug, "DecompressorFilter::decodeHeaders: {}", *decoder_callbacks_, headers); + + // Two responsibilities on the request side: + // 1. If response decompression is enabled (and advertisement is enabled), then advertise to + // the upstream that this hop is able to decompress responses via the Accept-Encoding header. + if (config_->responseDirectionConfig().decompressionEnabled() && + config_->requestDirectionConfig().advertiseAcceptEncoding()) { + headers.appendAcceptEncoding(config_->contentEncoding(), ","); + ENVOY_STREAM_LOG(debug, + "DecompressorFilter::decodeHeaders advertise Accept-Encoding with value '{}'", + *decoder_callbacks_, headers.AcceptEncoding()->value().getStringView()); + } + + // 2. If request decompression is enabled, then decompress the request. + return maybeInitDecompress(config_->requestDirectionConfig(), request_decompressor_, + *decoder_callbacks_, headers); +}; + +Http::FilterDataStatus DecompressorFilter::decodeData(Buffer::Instance& data, bool) { + return maybeDecompress(config_->requestDirectionConfig(), request_decompressor_, + *decoder_callbacks_, data); +} + +Http::FilterHeadersStatus DecompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) { + // Headers only response, continue. + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + ENVOY_STREAM_LOG(debug, "DecompressorFilter::encodeHeaders: {}", *encoder_callbacks_, headers); + + return maybeInitDecompress(config_->responseDirectionConfig(), response_decompressor_, + *encoder_callbacks_, headers); +} + +Http::FilterDataStatus DecompressorFilter::encodeData(Buffer::Instance& data, bool) { + return maybeDecompress(config_->responseDirectionConfig(), response_decompressor_, + *encoder_callbacks_, data); +} + +Http::FilterHeadersStatus DecompressorFilter::maybeInitDecompress( + const DecompressorFilterConfig::DirectionConfig& direction_config, + Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, Http::RequestOrResponseHeaderMap& headers) { + if (direction_config.decompressionEnabled() && !hasCacheControlNoTransform(headers) && + contentEncodingMatches(headers)) { + direction_config.stats().decompressed_.inc(); + decompressor = config_->makeDecompressor(); + + // Update headers. + headers.removeContentLength(); + modifyContentEncoding(headers); + + ENVOY_STREAM_LOG(debug, "do decompress (without buffering) {}: {}", callbacks, + direction_config.logString(), headers); + } else { + direction_config.stats().not_decompressed_.inc(); + ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), + headers); + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterDataStatus DecompressorFilter::maybeDecompress( + const DecompressorFilterConfig::DirectionConfig& direction_config, + const Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer) const { + if (decompressor) { + Buffer::OwnedImpl output_buffer; + decompressor->decompress(input_buffer, output_buffer); + + // Report decompression via stats and logging before modifying the input buffer. + direction_config.stats().total_compressed_bytes_.add(input_buffer.length()); + direction_config.stats().total_uncompressed_bytes_.add(output_buffer.length()); + ENVOY_STREAM_LOG(debug, "{} data decompressed from {} bytes to {} bytes", callbacks, + direction_config.logString(), input_buffer.length(), output_buffer.length()); + + input_buffer.drain(input_buffer.length()); + input_buffer.add(output_buffer); + } + return Http::FilterDataStatus::Continue; +} + +bool DecompressorFilter::hasCacheControlNoTransform( + Http::RequestOrResponseHeaderMap& headers) const { + return headers.CacheControl() + ? StringUtil::caseFindToken(headers.CacheControl()->value().getStringView(), ",", + Http::Headers::get().CacheControlValues.NoTransform) + : false; +} + +/** + * Content-Encoding matches if the configured encoding is the first value in the comma-delimited + * Content-Encoding header, regardless of spacing and casing. + */ +bool DecompressorFilter::contentEncodingMatches(Http::RequestOrResponseHeaderMap& headers) const { + if (headers.ContentEncoding()) { + absl::string_view coding = StringUtil::trim( + StringUtil::cropRight(headers.ContentEncoding()->value().getStringView(), ",")); + return StringUtil::CaseInsensitiveCompare()(config_->contentEncoding(), coding); + } + return false; +} + +void DecompressorFilter::modifyContentEncoding(Http::RequestOrResponseHeaderMap& headers) const { + const auto all_codings = StringUtil::trim(headers.ContentEncoding()->value().getStringView()); + const auto remaining_codings = StringUtil::trim(StringUtil::cropLeft(all_codings, ",")); + + if (remaining_codings != all_codings) { + headers.setContentEncoding(remaining_codings); + } else { + headers.removeContentEncoding(); + } +} + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.h b/source/extensions/filters/http/decompressor/decompressor_filter.h new file mode 100644 index 000000000000..d7017e385771 --- /dev/null +++ b/source/extensions/filters/http/decompressor/decompressor_filter.h @@ -0,0 +1,154 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/compression/decompressor/decompressor.h" +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" +#include "envoy/http/filter.h" + +#include "common/common/macros.h" +#include "common/runtime/runtime_protos.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { + +/** + * All decompressor filter stats. @see stats_macros.h + */ +#define ALL_DECOMPRESSOR_STATS(COUNTER) \ + COUNTER(decompressed) \ + COUNTER(not_decompressed) \ + COUNTER(total_uncompressed_bytes) \ + COUNTER(total_compressed_bytes) + +/** + * Struct definition for decompressor stats. @see stats_macros.h + */ +struct DecompressorStats { + ALL_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Configuration for the decompressor filter. + */ +class DecompressorFilterConfig { +public: + class DirectionConfig { + public: + DirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + CommonDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime); + + virtual ~DirectionConfig() = default; + + virtual const std::string& logString() const PURE; + const DecompressorStats& stats() const { return stats_; } + bool decompressionEnabled() const { return decompression_enabled_.enabled(); } + + private: + static DecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return DecompressorStats{ALL_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + const DecompressorStats stats_; + const Runtime::FeatureFlag decompression_enabled_; + }; + + class RequestDirectionConfig : public DirectionConfig { + public: + RequestDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + RequestDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime); + + // DirectionConfig + const std::string& logString() const override { + CONSTRUCT_ON_FIRST_USE(std::string, "request"); + } + + bool advertiseAcceptEncoding() const { return advertise_accept_encoding_; } + + private: + const bool advertise_accept_encoding_; + }; + + class ResponseDirectionConfig : public DirectionConfig { + public: + ResponseDirectionConfig(const envoy::extensions::filters::http::decompressor::v3::Decompressor:: + ResponseDirectionConfig& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, + Runtime::Loader& runtime); + + // DirectionConfig + const std::string& logString() const override { + CONSTRUCT_ON_FIRST_USE(std::string, "response"); + } + }; + + DecompressorFilterConfig( + const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, + const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, + Compression::Decompressor::DecompressorFactoryPtr decompressor_factory); + + Compression::Decompressor::DecompressorPtr makeDecompressor() { + return decompressor_factory_->createDecompressor(); + } + const std::string& contentEncoding() { return decompressor_factory_->contentEncoding(); } + const RequestDirectionConfig& requestDirectionConfig() { return request_direction_config_; } + const ResponseDirectionConfig& responseDirectionConfig() { return response_direction_config_; } + +private: + const std::string stats_prefix_; + const Compression::Decompressor::DecompressorFactoryPtr decompressor_factory_; + const RequestDirectionConfig request_direction_config_; + const ResponseDirectionConfig response_direction_config_; +}; + +using DecompressorFilterConfigSharedPtr = std::shared_ptr; + +/** + * A filter that decompresses data bidirectionally. + */ +class DecompressorFilter : public Http::PassThroughFilter, + public Logger::Loggable { +public: + DecompressorFilter(DecompressorFilterConfigSharedPtr config); + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override; + Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override; + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override; + Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override; + +private: + Http::FilterHeadersStatus + maybeInitDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, + Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, + Http::RequestOrResponseHeaderMap& headers); + + Http::FilterDataStatus + maybeDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, + const Compression::Decompressor::DecompressorPtr& decompressor, + Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer) const; + + // TODO(junr03): these do not need to be member functions. They can all be part of a static + // utility class. Moreover, they can be shared between compressor and decompressor. + bool hasCacheControlNoTransform(Http::RequestOrResponseHeaderMap& headers) const; + bool contentEncodingMatches(Http::RequestOrResponseHeaderMap& headers) const; + void modifyContentEncoding(Http::RequestOrResponseHeaderMap& headers) const; + + DecompressorFilterConfigSharedPtr config_; + Compression::Decompressor::DecompressorPtr request_decompressor_{}; + Compression::Decompressor::DecompressorPtr response_decompressor_{}; +}; + +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index b0b0ab06aec9..d233e6f20bc3 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -18,7 +18,8 @@ const uint64_t DefaultMemoryLevel = 5; // Default and maximum compression window size. const uint64_t DefaultWindowBits = 12; -// When summed to window bits, this sets a gzip header and trailer around the compressed data. +// When logical OR'ed to window bits, this sets a gzip header and trailer around the compressed +// data. const uint64_t GzipHeaderValue = 16; } // namespace diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index afa9981a7510..c3971fa30678 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -22,6 +22,8 @@ class HttpFilterNameValues { const std::string Cors = "envoy.filters.http.cors"; // CSRF filter const std::string Csrf = "envoy.filters.http.csrf"; + // Decompressor filter + const std::string Decompressor = "envoy.filters.http.decompressor"; // Dynamo filter const std::string Dynamo = "envoy.filters.http.dynamo"; // Fault filter diff --git a/test/extensions/compression/gzip/decompressor/BUILD b/test/extensions/compression/gzip/decompressor/BUILD index e40b2953db28..1816ef8da235 100644 --- a/test/extensions/compression/gzip/decompressor/BUILD +++ b/test/extensions/compression/gzip/decompressor/BUILD @@ -2,15 +2,19 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_cc_test", "envoy_package", ) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) envoy_package() -envoy_cc_test( +envoy_extension_cc_test( name = "zlib_decompressor_impl_test", srcs = ["zlib_decompressor_impl_test.cc"], + extension_name = "envoy.compression.gzip.decompressor", deps = [ "//source/common/common:assert_lib", "//source/common/common:hex_lib", diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD new file mode 100644 index 000000000000..2da39cd00c40 --- /dev/null +++ b/test/extensions/filters/http/decompressor/BUILD @@ -0,0 +1,48 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +envoy_package() + +envoy_extension_cc_test( + name = "decompressor_filter_test", + srcs = ["decompressor_filter_test.cc"], + extension_name = "envoy.filters.http.decompressor", + deps = [ + "//source/common/http:headers_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/compression/gzip/decompressor:config", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/decompressor:config", + "//test/mocks/compression/decompressor:decompressor_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/decompressor/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "decompressor_filter_integration_test", + srcs = [ + "decompressor_filter_integration_test.cc", + ], + extension_name = "envoy.filters.http.decompressor", + deps = [ + "//source/extensions/compression/gzip/compressor:config", + "//source/extensions/compression/gzip/decompressor:config", + "//source/extensions/filters/http/decompressor:config", + "//test/integration:http_integration_lib", + "//test/mocks/server:server_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc new file mode 100644 index 000000000000..efb888569543 --- /dev/null +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -0,0 +1,156 @@ +#include "envoy/event/timer.h" + +#include "extensions/compression/gzip/compressor/config.h" + +#include "test/integration/http_integration.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class DecompressorIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { +public: + DecompressorIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) { + Extensions::Compression::Gzip::Compressor::GzipCompressorLibraryFactory + compressor_library_factory; + envoy::extensions::compression::gzip::compressor::v3::Gzip factory_config; + testing::NiceMock context; + + auto compressor_factory = + compressor_library_factory.createCompressorFactoryFromProto(factory_config, context); + request_compressor_ = compressor_factory->createCompressor(); + response_compressor_ = compressor_factory->createCompressor(); + } + + void TearDown() override { cleanupUpstreamAndDownstream(); } + + void initializeFilter(const std::string& config) { + config_helper_.addFilter(config); + HttpIntegrationTest::initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + } + + const std::string default_config{R"EOF( + name: default_decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + )EOF"}; + + Envoy::Compression::Compressor::CompressorPtr request_compressor_{}; + Envoy::Compression::Compressor::CompressorPtr response_compressor_{}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, DecompressorIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Exercises gzip decompression bidirectionally with default configuration. + */ +TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { + // Use gzip for decompression. + initializeFilter(default_config); + + // Enable request decompression by setting the Content-Encoding header to gzip. + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}, + {"content-encoding", "gzip"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // Send first data chunk upstream. + Buffer::OwnedImpl request_data1(std::string(8192, 'a')); + auto uncompressed_request_length = request_data1.length(); + request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_request_length = request_data1.length(); + codec_client_->sendData(*request_encoder, request_data1, false); + + // Send second data chunk upstream and finish the request stream. + Buffer::OwnedImpl request_data2(std::string(16384, 'a')); + uncompressed_request_length += request_data2.length(); + request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish); + compressed_request_length += request_data2.length(); + codec_client_->sendData(*request_encoder, request_data2, true); + + // Wait for frames to arrive upstream. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Assert that the total bytes received upstream equal the sum of the uncompressed byte buffers + // sent. + EXPECT_TRUE(upstream_request_->complete()); + TestUtility::headerMapEqualIgnoreOrder( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}, + {"accept-encoding", "wroong"}}, + upstream_request_->headers()); + EXPECT_EQ(uncompressed_request_length, upstream_request_->bodyLength()); + + // Verify stats + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.request.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes", + compressed_request_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_uncompressed_bytes", + uncompressed_request_length); + + // Enable response decompression by setting the Content-Encoding header to gzip. + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-encoding", "gzip"}}, false); + + // Send first data chunk downstream. + Buffer::OwnedImpl response_data1(std::string(4096, 'a')); + auto uncompressed_response_length = response_data1.length(); + response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_response_length = response_data1.length(); + upstream_request_->encodeData(response_data1, false); + + // Send second data chunk downstream and finish the response stream. + Buffer::OwnedImpl response_data2(std::string(8192, 'a')); + uncompressed_response_length += response_data2.length(); + response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush); + compressed_response_length += response_data2.length(); + upstream_request_->encodeData(response_data2, true); + + // Wait for frames to arrive downstream. + response->waitForEndStream(); + + // Assert that the total bytes received downstream equal the sum of the uncompressed byte buffers + // sent. + EXPECT_TRUE(response->complete()); + TestUtility::headerMapEqualIgnoreOrder(Http::TestRequestHeaderMapImpl{{":status", "200"}}, + response->headers()); + EXPECT_EQ(uncompressed_response_length, response->body().length()); + + // Verify stats + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.response.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes", + compressed_response_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_uncompressed_bytes", + uncompressed_response_length); +} + +} // namespace Envoy diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc new file mode 100644 index 000000000000..216b86bbe0fd --- /dev/null +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -0,0 +1,363 @@ +#include "envoy/extensions/filters/http/decompressor/v3/decompressor.pb.h" + +#include "common/http/headers.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/http/decompressor/decompressor_filter.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/compression/decompressor/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::ByMove; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Decompressor { +namespace { + +class DecompressorFilterTest : public testing::TestWithParam { +public: + void SetUp() override { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +)EOF"); + } + + void setUpFilter(std::string&& yaml) { + envoy::extensions::filters::http::decompressor::v3::Decompressor decompressor; + TestUtility::loadFromYaml(yaml, decompressor); + auto decompressor_factory = + std::make_unique>(); + decompressor_factory_ = decompressor_factory.get(); + config_ = std::make_shared(decompressor, "test.", stats_, runtime_, + std::move(decompressor_factory)); + filter_ = std::make_unique(config_); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + filter_->setEncoderFilterCallbacks(encoder_callbacks_); + } + + bool isRequestDirection() { return GetParam(); } + + std::unique_ptr doHeaders(const Http::HeaderMap& headers, + const bool end_stream) { + if (isRequestDirection()) { + auto request_headers = Http::createHeaderMap(headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->decodeHeaders(*request_headers, end_stream)); + return std::move(request_headers); + } else { + auto response_headers = Http::createHeaderMap(headers); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encodeHeaders(*response_headers, end_stream)); + return std::move(response_headers); + } + } + + void doData(Buffer::Instance& buffer, const bool end_stream) { + if (isRequestDirection()) { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, end_stream)); + } else { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, end_stream)); + } + } + + void expectDecompression(Compression::Decompressor::MockDecompressor* decompressor_ptr) { + EXPECT_CALL(*decompressor_ptr, decompress(_, _)) + .Times(2) + .WillRepeatedly( + Invoke([&](const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { + TestUtility::feedBufferWithRandomCharacters(output_buffer, 2 * input_buffer.length()); + })); + Buffer::OwnedImpl buffer; + TestUtility::feedBufferWithRandomCharacters(buffer, 10); + EXPECT_EQ(10, buffer.length()); + doData(buffer, false /* end_stream */); + EXPECT_EQ(20, buffer.length()); + doData(buffer, true /* end_stream */); + EXPECT_EQ(40, buffer.length()); + } + + void expectNoDecompression() { + Buffer::OwnedImpl buffer; + TestUtility::feedBufferWithRandomCharacters(buffer, 10); + EXPECT_EQ(10, buffer.length()); + doData(buffer, true /* end_stream */); + EXPECT_EQ(10, buffer.length()); + } + + void decompressionActive(const Http::HeaderMap& headers_before_filter, + const absl::optional expected_content_encoding, + const absl::optional expected_accept_encoding = "mock") { + // Keep the decompressor to set expectations about it + auto decompressor = std::make_unique(); + auto* decompressor_ptr = decompressor.get(); + EXPECT_CALL(*decompressor_factory_, createDecompressor()) + .WillOnce(Return(ByMove(std::move(decompressor)))); + + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + + // The filter removes Content-Length + EXPECT_EQ(nullptr, headers_after_filter->ContentLength()); + + // The filter removes the decompressor's content encoding from the Content-Encoding header. + if (expected_content_encoding.has_value()) { + EXPECT_EQ(expected_content_encoding.value(), + headers_after_filter->ContentEncoding()->value().getStringView()); + } else { + EXPECT_EQ(nullptr, headers_after_filter->ContentEncoding()); + } + + // The filter adds the decompressor's content encoding to the Accept-Encoding header on the + // request direction. + const auto* accept_encoding = + headers_after_filter->get(Http::LowerCaseString{"accept-encoding"}); + if (isRequestDirection() && expected_accept_encoding.has_value()) { + EXPECT_EQ(expected_accept_encoding.value(), accept_encoding->value().getStringView()); + } else { + EXPECT_EQ(nullptr, accept_encoding); + } + + expectDecompression(decompressor_ptr); + } + + Compression::Decompressor::MockDecompressorFactory* decompressor_factory_{}; + DecompressorFilterConfigSharedPtr config_; + std::unique_ptr filter_; + Stats::TestUtil::TestStore stats_; + NiceMock runtime_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; +}; + +INSTANTIATE_TEST_SUITE_P(IsRequestDirection, DecompressorFilterTest, + ::testing::Values(true, false)); + +TEST_P(DecompressorFilterTest, DecompressionActive) { + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingSpacing) { + // Additional spacing should still match. + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", " mock "}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingCasing) { + // Different casing should still match. + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "MOCK"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings) { + // If the first encoding in the Content-Encoding header is the configured value, the filter should + // still be active. + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, "br"); +} + +TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings2) { + // If the first encoding in the Content-Encoding header is the configured value, the filter should + // still be active. + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br , gzip "}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, "br , gzip"); +} + +TEST_P(DecompressorFilterTest, DisableAdvertiseAcceptEncoding) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + advertise_accept_encoding: false +)EOF"); + + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + absl::nullopt /* expected_accept_encoding */); +} + +TEST_P(DecompressorFilterTest, ExplicitlyEnableAdvertiseAcceptEncoding) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + advertise_accept_encoding: true +)EOF"); + + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + if (isRequestDirection()) { + // Also test that the filter appends to an already existing header. + headers_before_filter.addCopy("accept-encoding", "br"); + } + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + "br,mock" /* expected_accept_encoding */); +} + +TEST_P(DecompressorFilterTest, DecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, RequestDecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + + if (isRequestDirection()) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + expectNoDecompression(); + } else { + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + "mock" /* expected_accept_encoding */); + } +} + +TEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) { + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" +response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: does_not_exist +)EOF"); + + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; + + if (isRequestDirection()) { + // Accept-Encoding is not advertised in the request headers when response decompression is + // disabled. + decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, + absl::nullopt /* expected_accept_encoding */); + } else { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + expectNoDecompression(); + } +} + +TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, true /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); +} + +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + // The decompressor's content scheme is not the first value in the comma-delimited list in the + // Content-Encoding header. Therefore, compression will not occur. + Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "gzip,mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter{ + {"cache-control", Http::Headers::get().CacheControlValues.NoTransform}, + {"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + +TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter{ + {"cache-control", fmt::format("{}, {}", Http::Headers::get().CacheControlValues.NoCache, + Http::Headers::get().CacheControlValues.NoTransform)}, + {"content-encoding", "mock"}, + {"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + +} // namespace +} // namespace Decompressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/mocks/compression/decompressor/BUILD b/test/mocks/compression/decompressor/BUILD new file mode 100644 index 000000000000..3e4e605cef52 --- /dev/null +++ b/test/mocks/compression/decompressor/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_package", +) + +envoy_package() + +envoy_cc_mock( + name = "decompressor_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//include/envoy/compression/decompressor:decompressor_config_interface", + "//include/envoy/compression/decompressor:decompressor_interface", + ], +) diff --git a/test/mocks/compression/decompressor/mocks.cc b/test/mocks/compression/decompressor/mocks.cc new file mode 100644 index 000000000000..48017c6f3f6f --- /dev/null +++ b/test/mocks/compression/decompressor/mocks.cc @@ -0,0 +1,21 @@ +#include "test/mocks/compression/decompressor/mocks.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +MockDecompressor::MockDecompressor() = default; +MockDecompressor::~MockDecompressor() = default; + +MockDecompressorFactory::MockDecompressorFactory() { + ON_CALL(*this, statsPrefix()).WillByDefault(ReturnRef(stats_prefix_)); + ON_CALL(*this, contentEncoding()).WillByDefault(ReturnRef(content_encoding_)); +} + +MockDecompressorFactory::~MockDecompressorFactory() = default; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy diff --git a/test/mocks/compression/decompressor/mocks.h b/test/mocks/compression/decompressor/mocks.h new file mode 100644 index 000000000000..07ce0f6fa701 --- /dev/null +++ b/test/mocks/compression/decompressor/mocks.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/compression/decompressor/config.h" +#include "envoy/compression/decompressor/decompressor.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Compression { +namespace Decompressor { + +class MockDecompressor : public Decompressor { +public: + MockDecompressor(); + ~MockDecompressor() override; + + // Decompressor::Decompressor + MOCK_METHOD(void, decompress, + (const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer)); +}; + +class MockDecompressorFactory : public DecompressorFactory { +public: + MockDecompressorFactory(); + ~MockDecompressorFactory() override; + + // Decompressor::DecompressorFactory + MOCK_METHOD(DecompressorPtr, createDecompressor, ()); + MOCK_METHOD(const std::string&, statsPrefix, (), (const)); + MOCK_METHOD(const std::string&, contentEncoding, (), (const)); + + const std::string stats_prefix_{"mock"}; + const std::string content_encoding_{"mock"}; +}; + +} // namespace Decompressor +} // namespace Compression +} // namespace Envoy From 29ee3208ede7a3f43b459fc93bc1a3b6a72f0f50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=A5=81=E6=97=A0=E5=BF=A7?= Date: Wed, 27 May 2020 07:59:36 +0800 Subject: [PATCH 230/909] Correct error method name in DynamicRouteEntry (#11282) Signed-off-by: wbpcode --- source/common/router/config_impl.h | 2 +- test/common/router/config_impl_test.cc | 44 ++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 31a81abf7e84..df1ab51e6e5f 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -602,7 +602,7 @@ class RouteEntryImplBase : public RouteEntry, return parent_->maxGrpcTimeout(); } absl::optional grpcTimeoutOffset() const override { - return parent_->maxGrpcTimeout(); + return parent_->grpcTimeoutOffset(); } const MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_->metadataMatchCriteria(); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index a48a9174f9e8..741df2ede163 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2813,6 +2813,50 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { ->grpcTimeoutOffset()); } +TEST_F(RouteMatcherTest, GrpcTimeoutOffsetOfDynamicRoute) { + // A DynamicRouteEntry will be created when 'cluster_header' is set. + const std::string yaml = R"EOF( +virtual_hosts: +- name: local_service + domains: + - "*" + routes: + - match: + prefix: "/foo" + route: + cluster: local_service_grpc + max_grpc_timeout: 0.1s + grpc_timeout_offset: 0.01s + - match: + prefix: "/" + route: + max_grpc_timeout: 0.2s + grpc_timeout_offset: 0.02s + cluster_header: request_to + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + + { + Http::TestRequestHeaderMapImpl reqeust_headers = genHeaders("www.lyft.com", "/", "GET"); + reqeust_headers.addCopy(Http::LowerCaseString("reqeust_to"), "dynamic_grpc_service"); + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(20)), + config.route(reqeust_headers, 0)->routeEntry()->grpcTimeoutOffset()); + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(200)), + config.route(reqeust_headers, 0)->routeEntry()->maxGrpcTimeout()); + } + { + + EXPECT_EQ(absl::make_optional(std::chrono::milliseconds(10)), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->grpcTimeoutOffset()); + EXPECT_EQ( + absl::make_optional(std::chrono::milliseconds(100)), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->maxGrpcTimeout()); + } +} + TEST_F(RouteMatcherTest, FractionalRuntime) { const std::string yaml = R"EOF( virtual_hosts: From c8f330c1f99ee5d89048d52ac98faab2539d5a52 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Tue, 26 May 2020 17:32:22 -0700 Subject: [PATCH 231/909] jwt_authn: update jwt_verify_lib to 2020-05-21 (#11309) jwt_verify_lib added more error messages for jwt parsing erros. Risk Level: None Testing: Unit-test Signed-off-by: Wayne Zhang --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 6af4a65a0d1a..a206815b962a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -277,10 +277,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_jwt_verify = dict( - sha256 = "d422a6eadd4bcdd0f9b122cd843a4015f8b18aebea6e1deb004bd4d401a8ef92", - strip_prefix = "jwt_verify_lib-40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac", - # 2020-02-11 - urls = ["https://github.com/google/jwt_verify_lib/archive/40e2cc938f4bcd059a97dc6c73f59ecfa5a71bac.tar.gz"], + sha256 = "118f955620509f1634cbd918c63234d2048dce56b1815caf348d78e3c3dc899c", + strip_prefix = "jwt_verify_lib-44291b2ee4c19631e5a0a0bf4f965436a9364ca7", + # 2020-05-21 + urls = ["https://github.com/google/jwt_verify_lib/archive/44291b2ee4c19631e5a0a0bf4f965436a9364ca7.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From 8f4d759cb53493159bcba921d6109eace48cb6da Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 May 2020 10:14:55 -0400 Subject: [PATCH 232/909] sds: don't throw exceptions in initialize() (#11223) Similar root cause to #4377 - we can't throw exceptions in initialize(). Fixes #10976 Risk level: Low Testing: Unit/integration test added. Signed-off-by: Harvey Tuch --- include/envoy/upstream/cluster_manager.h | 9 ++ .../config/subscription_factory_impl.cc | 3 +- source/common/config/utility.cc | 14 +- source/common/config/utility.h | 8 +- source/common/network/dns_impl.cc | 4 +- source/common/secret/sds_api.cc | 9 +- .../common/upstream/cluster_manager_impl.cc | 20 ++- source/common/upstream/cluster_manager_impl.h | 2 + .../config/subscription_factory_impl_test.cc | 147 +++++------------- test/common/config/utility_test.cc | 70 ++------- test/common/secret/sds_api_test.cc | 36 ++++- .../upstream/cluster_manager_impl_test.cc | 48 ++++++ test/integration/stats_integration_test.cc | 8 +- test/mocks/upstream/mocks.h | 1 + test/server/server_test.cc | 8 + .../server/bad_sds_config_source.yaml | 32 ++++ 16 files changed, 219 insertions(+), 200 deletions(-) create mode 100644 test/server/test_data/server/bad_sds_config_source.yaml diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index e049faf57abd..e3eea850f000 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -130,6 +130,15 @@ class ClusterManager { */ virtual ClusterInfoMap clusters() PURE; + using ClusterSet = std::unordered_set; + + /** + * @return const ClusterSet& providing the cluster names that are eligible as + * xDS API config sources. These must be static (i.e. in the + * bootstrap) and non-EDS. + */ + virtual const ClusterSet& primaryClusters() PURE; + /** * @return ThreadLocalCluster* the thread local cluster with the given name or nullptr if it * does not exist. This is thread safe. diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 1851b8a486f6..342830ebc3d8 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -45,7 +45,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( } case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: { const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.clusters(), api_config_source); + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), + api_config_source); switch (api_config_source.api_type()) { case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY: diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 1d4a55a52fe1..65ce74a5150b 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -136,13 +136,11 @@ void Utility::checkApiConfigSourceNames( } } -void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, +void Utility::validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters, const std::string& cluster_name, const std::string& config_source) { - const auto& it = clusters.find(cluster_name); - - if (it == clusters.end() || it->second.get().info()->addedViaApi() || - it->second.get().info()->type() == envoy::config::cluster::v3::Cluster::EDS) { + const auto& it = primary_clusters.find(cluster_name); + if (it == primary_clusters.end()) { throw EnvoyException(fmt::format("{} must have a statically defined non-EDS cluster: '{}' does " "not exist, was added via api, or is an EDS cluster", config_source, cluster_name)); @@ -150,7 +148,7 @@ void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap } void Utility::checkApiConfigSourceSubscriptionBackingCluster( - const Upstream::ClusterManager::ClusterInfoMap& clusters, + const Upstream::ClusterManager::ClusterSet& primary_clusters, const envoy::config::core::v3::ApiConfigSource& api_config_source) { Utility::checkApiConfigSourceNames(api_config_source); @@ -161,14 +159,14 @@ void Utility::checkApiConfigSourceSubscriptionBackingCluster( // All API configs of type REST and UNSUPPORTED_REST_LEGACY should have cluster names. // Additionally, some gRPC API configs might have a cluster name set instead // of an envoy gRPC. - Utility::validateClusterName(clusters, api_config_source.cluster_names()[0], + Utility::validateClusterName(primary_clusters, api_config_source.cluster_names()[0], api_config_source.GetTypeName()); } else if (is_grpc) { // Some ApiConfigSources of type GRPC won't have a cluster name, such as if // they've been configured with google_grpc. if (api_config_source.grpc_services()[0].has_envoy_grpc()) { // If an Envoy gRPC exists, we take its cluster name. - Utility::validateClusterName(clusters, + Utility::validateClusterName(primary_clusters, api_config_source.grpc_services()[0].envoy_grpc().cluster_name(), api_config_source.GetTypeName()); } diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 3c44f9d98eaf..78e98e93f219 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -157,23 +157,23 @@ class Utility { /** * Check the validity of a cluster backing an api config source. Throws on error. - * @param clusters the clusters currently loaded in the cluster manager. + * @param primary_clusters the API config source eligible clusters. * @param cluster_name the cluster name to validate. * @param config_source the config source typed name. * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster. */ - static void validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, + static void validateClusterName(const Upstream::ClusterManager::ClusterSet& primary_clusters, const std::string& cluster_name, const std::string& config_source); /** * Potentially calls Utility::validateClusterName, if a cluster name can be found. - * @param clusters the clusters currently loaded in the cluster manager. + * @param primary_clusters the API config source eligible clusters. * @param api_config_source the config source to validate. * @throws EnvoyException when an API config doesn't have a statically defined non-EDS cluster. */ static void checkApiConfigSourceSubscriptionBackingCluster( - const Upstream::ClusterManager::ClusterInfoMap& clusters, + const Upstream::ClusterManager::ClusterSet& primary_clusters, const envoy::config::core::v3::ApiConfigSource& api_config_source); /** diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 17d608ce2ceb..d44d53c70f39 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -161,10 +161,10 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i try { callback_(resolution_status, std::move(address_list)); } catch (const EnvoyException& e) { - ENVOY_LOG(critical, "EnvoyException in c-ares callback"); + ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); } catch (const std::exception& e) { - ENVOY_LOG(critical, "std::exception in c-ares callback"); + ENVOY_LOG(critical, "std::exception in c-ares callback: {}", e.what()); dispatcher_.post([s = std::string(e.what())] { throw EnvoyException(s); }); } catch (...) { ENVOY_LOG(critical, "Unknown exception in c-ares callback"); diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 655eb3726d52..351928f78ef4 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -29,6 +29,10 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi time_source_(time_source), secret_data_{sds_config_name_, "uninitialized", time_source_.systemTime()}, dispatcher_(dispatcher), api_(api) { + const auto resource_name = getResourceName(); + // This has to happen here (rather than in initialize()) as it can throw exceptions. + subscription_ = subscription_factory_.subscriptionFromConfigSource( + sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this); // TODO(JimmyCYJ): Implement chained_init_manager, so that multiple init_manager // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and @@ -111,9 +115,8 @@ void SdsApi::validateUpdateSize(int num_resources) { } void SdsApi::initialize() { - const auto resource_name = getResourceName(); - subscription_ = subscription_factory_.subscriptionFromConfigSource( - sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this); + // Don't put any code here that can throw exceptions, this has been the cause of multiple + // hard-to-diagnose regressions. subscription_->start({sds_config_name_}); } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 0fcd22cfb20d..8da7fa17e6ca 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -274,12 +274,22 @@ ClusterManagerImpl::ClusterManagerImpl( // loading is done because in v2 configuration each EDS cluster individually sets up a // subscription. When this subscription is an API source the cluster will depend on a non-EDS // cluster, so the non-EDS clusters must be loaded first. + auto is_primary_cluster = [](const envoy::config::cluster::v3::Cluster& cluster) -> bool { + return cluster.type() != envoy::config::cluster::v3::Cluster::EDS || + (cluster.type() == envoy::config::cluster::v3::Cluster::EDS && + cluster.eds_cluster_config().eds_config().config_source_specifier_case() == + envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath); + }; + // Build book-keeping for which clusters are primary. This is useful when we + // invoke loadCluster() below and it needs the complete set of primaries. for (const auto& cluster : bootstrap.static_resources().clusters()) { - // First load all the primary clusters. - if (cluster.type() != envoy::config::cluster::v3::Cluster::EDS || - (cluster.type() == envoy::config::cluster::v3::Cluster::EDS && - cluster.eds_cluster_config().eds_config().config_source_specifier_case() == - envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath)) { + if (is_primary_cluster(cluster)) { + primary_clusters_.insert(cluster.name()); + } + } + // Load all the primary clusters. + for (const auto& cluster : bootstrap.static_resources().clusters()) { + if (is_primary_cluster(cluster)) { loadCluster(cluster, "", false, active_clusters_); } } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 2e5800ecb952..a373eb20aaf2 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -221,6 +221,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggableset_api_type(envoy::config::core::v3::ApiConfigSource::REST); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); } TEST_F(SubscriptionFactoryTest, GrpcClusterEmpty) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); } TEST_F(SubscriptionFactoryTest, RestClusterSingleton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); config.mutable_api_config_source()->add_cluster_names("static_cluster"); - cluster_map.emplace("static_cluster", cluster); + primary_clusters.insert("static_cluster"); EXPECT_CALL(dispatcher_, createTimer_(_)); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(false)); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); subscriptionFromConfigSource(config); } TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); config.mutable_api_config_source()->mutable_refresh_delay()->set_seconds(1); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster"); - cluster_map.emplace("static_cluster", cluster); + primary_clusters.insert("static_cluster"); envoy::config::core::v3::GrpcService expected_grpc_service; expected_grpc_service.mutable_envoy_grpc()->set_cluster_name("static_cluster"); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_)); EXPECT_CALL(cm_.async_client_manager_, factoryForGrpcService(ProtoEq(expected_grpc_service), _, _)) @@ -134,8 +131,6 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { })); return async_client_factory; })); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(false)); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::STATIC)); EXPECT_CALL(dispatcher_, createTimer_(_)); subscriptionFromConfigSource(config); @@ -143,21 +138,17 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterSingleton) { TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); config.mutable_api_config_source()->add_cluster_names("static_cluster_foo"); - cluster_map.emplace("static_cluster_foo", cluster); + primary_clusters.insert("static_cluster_foo"); config.mutable_api_config_source()->add_cluster_names("static_cluster_bar"); - cluster_map.emplace("static_cluster_bar", cluster); + primary_clusters.insert("static_cluster_bar"); - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); - EXPECT_CALL(*cluster.info_, type()) - .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, fmt::format("{} must have a singleton cluster name specified:", config.mutable_api_config_source()->GetTypeName())); @@ -165,23 +156,19 @@ TEST_F(SubscriptionFactoryTest, RestClusterMultiton) { TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { envoy::config::core::v3::ConfigSource config; - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; + Upstream::ClusterManager::ClusterSet primary_clusters; config.mutable_api_config_source()->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster_foo"); - cluster_map.emplace("static_cluster_foo", cluster); + primary_clusters.insert("static_cluster_foo"); config.mutable_api_config_source()->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( "static_cluster_bar"); - cluster_map.emplace("static_cluster_bar", cluster); + primary_clusters.insert("static_cluster_bar"); - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillRepeatedly(ReturnRef(cm_.async_client_manager_)); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); - EXPECT_CALL(*cluster.info_, type()) - .WillRepeatedly(Return(envoy::config::cluster::v3::Cluster::STATIC)); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, fmt::format("{}::.DELTA_.GRPC must have a " @@ -214,12 +201,9 @@ TEST_F(SubscriptionFactoryTest, LegacySubscription) { api_config_source->set_api_type( envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY); api_config_source->add_cluster_names("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, "REST_LEGACY no longer a supported ApiConfigSource.*"); } @@ -231,12 +215,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionCustomRequestTimeout) { api_config_source->add_cluster_names("static_cluster"); api_config_source->mutable_refresh_delay()->set_seconds(1); api_config_source->mutable_request_timeout()->set_seconds(5); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); EXPECT_CALL(cm_, httpAsyncClientForCluster("static_cluster")); EXPECT_CALL( @@ -251,12 +232,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscription) { api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); api_config_source->add_cluster_names("static_cluster"); api_config_source->mutable_refresh_delay()->set_seconds(1); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); EXPECT_CALL(cm_, httpAsyncClientForCluster("static_cluster")); EXPECT_CALL(cm_.async_client_, send_(_, _, _)) @@ -277,12 +255,9 @@ TEST_F(SubscriptionFactoryTest, HttpSubscriptionNoRefreshDelay) { auto* api_config_source = config.mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); api_config_source->add_cluster_names("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, "refresh_delay is required for REST API configuration sources"); @@ -295,10 +270,9 @@ TEST_F(SubscriptionFactoryTest, GrpcSubscription) { api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("static_cluster"); envoy::config::core::v3::GrpcService expected_grpc_service; expected_grpc_service.mutable_envoy_grpc()->set_cluster_name("static_cluster"); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_CALL(cm_, grpcAsyncClientManager()).WillOnce(ReturnRef(cm_.async_client_manager_)); EXPECT_CALL(cm_.async_client_manager_, factoryForGrpcService(ProtoEq(expected_grpc_service), _, _)) @@ -354,57 +328,8 @@ TEST_P(SubscriptionFactoryTestApiConfigSource, NonExistentCluster) { } else { api_config_source->add_cluster_names("static_cluster"); } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), - EnvoyException, - fmt::format("{} must have a statically defined " - "non-EDS cluster: 'static_cluster' does not exist, was " - "added via api, or is an EDS cluster", - api_config_source->GetTypeName())); -} - -TEST_P(SubscriptionFactoryTestApiConfigSource, DynamicCluster) { - envoy::config::core::v3::ConfigSource config; - auto* api_config_source = config.mutable_api_config_source(); - api_config_source->set_api_type(GetParam()); - if (api_config_source->api_type() == envoy::config::core::v3::ApiConfigSource::GRPC) { - api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( - "static_cluster"); - } else { - api_config_source->add_cluster_names("static_cluster"); - } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), - EnvoyException, - fmt::format("{} must have a statically defined " - "non-EDS cluster: 'static_cluster' does not exist, was " - "added via api, or is an EDS cluster", - api_config_source->GetTypeName())); -} - -TEST_P(SubscriptionFactoryTestApiConfigSource, EDSClusterBackingEDSCluster) { - envoy::config::core::v3::ConfigSource config; - auto* api_config_source = config.mutable_api_config_source(); - api_config_source->set_api_type(GetParam()); - if (api_config_source->api_type() == envoy::config::core::v3::ApiConfigSource::GRPC) { - api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name( - "static_cluster"); - } else { - api_config_source->add_cluster_names("static_cluster"); - } - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); + Upstream::ClusterManager::ClusterSet primary_clusters; + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"static_cluster"}), EnvoyException, fmt::format("{} must have a statically defined " diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index 503866e34ce8..00f23e5b6557 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -524,58 +524,33 @@ TEST(UtilityTest, EmptyToEmptyConfig) { TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTypes) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; // API of type GRPC api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); // GRPC cluster without GRPC services. EXPECT_THROW_WITH_REGEX( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, "API configs must have either a gRPC service or a cluster name defined:"); // Non-existent cluster. api_config_source->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("foo_cluster"); EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // Dynamic Cluster. - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo_cluster", cluster); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt ::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // EDS Cluster backing EDS Cluster. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{} must have a statically defined non-EDS cluster: " "'foo_cluster' does not exist, was added via api, or is an EDS cluster", api_config_source->GetTypeName())); // All ok. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source); + primary_clusters.insert("foo_cluster"); + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source); // API with cluster_names set should be rejected. api_config_source->add_cluster_names("foo_cluster"); EXPECT_THROW_WITH_REGEX( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{}::.DELTA_.GRPC must not have a cluster name " "specified:", @@ -585,46 +560,21 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTy TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTypes) { envoy::config::core::v3::ConfigSource config; auto* api_config_source = config.mutable_api_config_source(); - Upstream::ClusterManager::ClusterInfoMap cluster_map; + Upstream::ClusterManager::ClusterSet primary_clusters; api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::REST); // Non-existent cluster. api_config_source->add_cluster_names("foo_cluster"); EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // Dynamic Cluster. - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo_cluster", cluster); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), - EnvoyException, - fmt::format("{} must have a statically defined non-EDS cluster: " - "'foo_cluster' does not exist, was added via api, or is an EDS cluster", - api_config_source->GetTypeName())); - - // EDS Cluster backing EDS Cluster. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()).WillOnce(Return(envoy::config::cluster::v3::Cluster::EDS)); - EXPECT_THROW_WITH_MESSAGE( - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source), EnvoyException, fmt::format("{} must have a statically defined non-EDS cluster: " "'foo_cluster' does not exist, was added via api, or is an EDS cluster", api_config_source->GetTypeName())); // All ok. - EXPECT_CALL(cluster, info()).Times(2); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(*cluster.info_, type()); - Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source); + primary_clusters.insert("foo_cluster"); + Utility::checkApiConfigSourceSubscriptionBackingCluster(primary_clusters, *api_config_source); } // Validates CheckCluster functionality. diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index ec1c6ee5acc8..8834dc896c98 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -24,6 +24,7 @@ using ::testing::_; using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; namespace Envoy { namespace Secret { @@ -32,14 +33,15 @@ namespace { class SdsApiTest : public testing::Test { protected: SdsApiTest() - : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) { + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void initialize() { init_target_handle_->initialize(init_watcher_); } + void setupMocks() { EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) { init_target_handle_ = target.createHandle("test"); })); } - void initialize() { init_target_handle_->initialize(init_watcher_); } - Api::ApiPtr api_; NiceMock validation_visitor_; NiceMock subscription_factory_; @@ -57,17 +59,37 @@ TEST_F(SdsApiTest, BasicTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); initialize(); } +// Validate that bad ConfigSources are caught at construction time. This is a regression test for +// https://github.com/envoyproxy/envoy/issues/10976. +TEST_F(SdsApiTest, BadConfigSource) { + ::testing::InSequence s; + NiceMock server; + envoy::config::core::v3::ConfigSource config_source; + EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) + .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr { + throw EnvoyException("bad config"); + return nullptr; + })); + EXPECT_THROW_WITH_MESSAGE(TlsCertificateSdsApi( + config_source, "abc.com", subscription_factory_, time_system_, + validation_visitor_, server.stats(), init_manager_, []() {}, + *dispatcher_, *api_), + EnvoyException, "bad config"); +} + // Validate that TlsCertificateSdsApi updates secrets successfully if a good secret // is passed to onConfigUpdate(). TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -149,6 +171,7 @@ TEST_F(SdsApiTest, Delta) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; Event::GlobalTimeSystem time_system; + setupMocks(); PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system, *dispatcher_, *api_); initialize(); @@ -168,6 +191,7 @@ TEST_F(SdsApiTest, Delta) { TEST_F(SdsApiTest, DeltaUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -213,6 +237,7 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); CertificateValidationContextSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -267,6 +292,7 @@ class MockCvcValidationCallback : public CvcValidationCallback { TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); CertificateValidationContextSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -355,6 +381,7 @@ class MockGenericSecretValidationCallback : public GenericSecretValidationCallba TEST_F(SdsApiTest, GenericSecretSdsApiTest) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); GenericSecretSdsApi sds_api( config_source, "encryption_key", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -400,6 +427,7 @@ name: "encryption_key" TEST_F(SdsApiTest, EmptyResource) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -416,6 +444,7 @@ TEST_F(SdsApiTest, EmptyResource) { TEST_F(SdsApiTest, SecretUpdateWrongSize) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); @@ -446,6 +475,7 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { NiceMock server; envoy::config::core::v3::ConfigSource config_source; + setupMocks(); TlsCertificateSdsApi sds_api( config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index be526c1a1f51..7e1255aef2a6 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -352,6 +352,54 @@ TEST_F(ClusterManagerImplTest, ValidClusterName) { EXPECT_EQ(1UL, factory_.stats_.counter("cluster.cluster_name.foo").value()); } +// Validate that the primary clusters are derived from the bootstrap and don't +// include EDS. +TEST_F(ClusterManagerImplTest, PrimaryClusters) { + const std::string yaml = R"EOF( +static_resources: + clusters: + - name: static_cluster + connect_timeout: 0.250s + type: static + - name: logical_dns_cluster + connect_timeout: 0.250s + type: logical_dns + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.com + port_value: 11001 + - name: strict_dns_cluster + connect_timeout: 0.250s + type: strict_dns + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.com + port_value: 11001 + - name: rest_eds_cluster + connect_timeout: 0.250s + type: eds + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: static_cluster + )EOF"; + create(parseBootstrapFromV2Yaml(yaml)); + const auto& primary_clusters = cluster_manager_->primaryClusters(); + EXPECT_THAT(primary_clusters, testing::UnorderedElementsAre( + "static_cluster", "strict_dns_cluster", "logical_dns_cluster")); +} + TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) { const std::string yaml = R"EOF( static_resources: diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 757eebef0eee..914657507162 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -274,6 +274,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate // 2020/05/13 10531 44425 44600 Refactor resource manager + // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -287,7 +288,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44425); + EXPECT_MEMORY_EQ(m_per_cluster, 44491); EXPECT_MEMORY_LE(m_per_cluster, 44600); } @@ -334,7 +335,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/04/07 10661 35557 36000 fix clang tidy on master // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate - // 2020/05/13 10531 36537 44600 Refactor resource manager + // 2020/05/13 10531 36537 36800 Refactor resource manager + // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -348,7 +350,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36537); + EXPECT_MEMORY_EQ(m_per_cluster, 36603); EXPECT_MEMORY_LE(m_per_cluster, 36800); } diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 4551aaa42e26..6222f636cb06 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -314,6 +314,7 @@ class MockClusterManager : public ClusterManager { MOCK_METHOD(void, initializeSecondaryClusters, (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); MOCK_METHOD(ClusterInfoMap, clusters, ()); + MOCK_METHOD(const ClusterSet&, primaryClusters, ()); MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, (const std::string& cluster, ResourcePriority priority, Http::Protocol protocol, diff --git a/test/server/server_test.cc b/test/server/server_test.cc index e12eca40fad7..6975f2ceecfb 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -805,6 +805,14 @@ TEST_P(ServerInstanceImplTest, BootstrapClusterManagerInitializationFail) { EnvoyException, "cluster manager: duplicate cluster 'service_google'"); } +// Regression tests for SdsApi throwing exceptions in initialize(). +TEST_P(ServerInstanceImplTest, BadSdsConfigSource) { + EXPECT_THROW_WITH_MESSAGE( + initialize("test/server/test_data/server/bad_sds_config_source.yaml"), EnvoyException, + "envoy.config.core.v3.ApiConfigSource must have a statically defined non-EDS cluster: " + "'sds-grpc' does not exist, was added via api, or is an EDS cluster"); +} + // Test for protoc-gen-validate constraint on invalid timeout entry of a health check config entry. TEST_P(ServerInstanceImplTest, BootstrapClusterHealthCheckInvalidTimeout) { EXPECT_THROW_WITH_REGEX( diff --git a/test/server/test_data/server/bad_sds_config_source.yaml b/test/server/test_data/server/bad_sds_config_source.yaml new file mode 100644 index 000000000000..f5dab6740008 --- /dev/null +++ b/test/server/test_data/server/bad_sds_config_source.yaml @@ -0,0 +1,32 @@ +node: + id: bootstrap_id + cluster: bootstrap_cluster +static_resources: + clusters: + - name: xds-grpc + connect_timeout: 0.25s + type: STRICT_DNS + connect_timeout: 1s + load_assignment: + cluster_name: xds-grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 12345 + transport_socket: + name: "envoy.transport_sockets.tls" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + common_tls_context: + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: GRPC + grpc_services: + envoy_grpc: + cluster_name: "sds-grpc" + validation_context: {} From 104f01398e73727c35b5fef8fcefe8195b34d909 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 27 May 2020 12:15:28 -0700 Subject: [PATCH 233/909] ci: use azp for coverage (#11251) Signed-off-by: Lizan Zhou --- .azure-pipelines/pipelines.yml | 21 ++++++++++++++++++++ .circleci/config.yml | 30 ----------------------------- ci/coverage_publish.sh | 33 -------------------------------- ci/run_envoy_docker.sh | 12 ++++++------ ci/upload_gcs_artifact.sh | 28 +++++++++++++++++++++++++++ test/run_envoy_bazel_coverage.sh | 3 ++- 6 files changed, 57 insertions(+), 70 deletions(-) delete mode 100755 ci/coverage_publish.sh create mode 100755 ci/upload_gcs_artifact.sh diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 5ae719be6732..8ca800f172e5 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -89,6 +89,27 @@ jobs: parameters: ciTarget: $(CI_TARGET) + - job: coverage + displayName: "Linux-x64 coverage" + dependsOn: ["format"] + timeoutInMinutes: 360 + pool: "x64-large" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.coverage + rbe: "" + bazelBuildExtraOptions: "--test_env=ENVOY_IP_TEST_VERSIONS=v4only --curses=no" + + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /build/envoy/generated/coverage coverage' + displayName: "Upload Report to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: always() + - job: docker displayName: "Linux-x64 docker" dependsOn: ["release"] diff --git a/.circleci/config.yml b/.circleci/config.yml index 4201a956dc28..ab33aac57881 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -45,33 +45,6 @@ jobs: - "f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24" - run: ci/filter_example_mirror.sh - coverage: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: - command: - ci/do_circle_ci.sh bazel.coverage - no_output_timeout: 60m - - persist_to_workspace: - root: /build/envoy/generated - paths: - - coverage - - store_artifacts: - path: /build/envoy/generated - destination: / - - coverage_publish: - docker: - - image: google/cloud-sdk - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - attach_workspace: - at: /build/envoy/generated - - run: ci/coverage_publish.sh - docs: executor: ubuntu-build steps: @@ -92,9 +65,6 @@ workflows: - api - go_control_plane_mirror - filter_example_mirror - - coverage - - coverage_publish: - requires: [coverage] - docs: filters: tags: diff --git a/ci/coverage_publish.sh b/ci/coverage_publish.sh deleted file mode 100755 index c04eafff0323..000000000000 --- a/ci/coverage_publish.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Do not ever set -x here, it is a security hazard as it will place the credentials below in the -# CircleCI logs. -set -e - -if [ "${CIRCLECI}" != "true" ]; then - exit 0 -fi - -[[ -z "${ENVOY_BUILD_DIR}" ]] && ENVOY_BUILD_DIR=/build -COVERAGE_FILE="${ENVOY_BUILD_DIR}/envoy/generated/coverage/index.html" - -if [ ! -f "${COVERAGE_FILE}" ]; then - echo "ERROR: Coverage file not found." - exit 1 -fi - -# available for master builds -if [ -z "$CIRCLE_PR_NUMBER" ] -then - echo "Uploading coverage report..." - - BRANCH_NAME="${CIRCLE_BRANCH}" - COVERAGE_DIR="$(dirname "${COVERAGE_FILE}")" - GCS_LOCATION="envoy-coverage/report-${BRANCH_NAME}" - - echo ${GCP_SERVICE_ACCOUNT_KEY} | base64 --decode | gcloud auth activate-service-account --key-file=- - gsutil -m rsync -dr ${COVERAGE_DIR} gs://${GCS_LOCATION} - echo "Coverage report for branch '${BRANCH_NAME}': https://storage.googleapis.com/${GCS_LOCATION}/index.html" -else - echo "Coverage report will not be uploaded for this build." -fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 2fb46473167d..59c8c4a1a0df 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -16,19 +16,19 @@ USER_GROUP=root [[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" [[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build -[[ -f .git ]] && [[ ! -d .git ]] && GIT_VOLUME_OPTION="-v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" - -[[ -t 1 ]] && DOCKER_TTY_OPTION=-it +[[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=" -it" +[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v \"$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)\"" export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm ${DOCKER_TTY_OPTION} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ - -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock ${GIT_VOLUME_OPTION} \ +docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ + -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock \ -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ - -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH \ + -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ + -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ --home-dir /build envoybuild && usermod -a -G pcap envoybuild && sudo -EHs -u envoybuild bash -c \"cd /source && $*\"" diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh new file mode 100755 index 000000000000..3ec06f3d1761 --- /dev/null +++ b/ci/upload_gcs_artifact.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Do not ever set -x here, it is a security hazard as it will place the credentials below in the +# CI logs. +set -e -o pipefail + +if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then + echo "Artifact bucket is not set, not uploading artifacts." + exit 0 +fi + +# Fail when service account key is not specified +echo ${GCP_SERVICE_ACCOUNT_KEY} | base64 --decode | gcloud auth activate-service-account --key-file=- + +SOURCE_DIRECTORY="$1" +TARGET_SUFFIX="$2" + +if [ ! -d "${SOURCE_DIRECTORY}" ]; then + echo "ERROR: ${SOURCE_DIRECTORY} is not found." + exit 1 +fi + +BRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}} +GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}" + +echo "Uploading to gs://${GCS_LOCATION} ..." +gsutil -mq rsync -dr ${SOURCE_DIRECTORY} gs://${GCS_LOCATION} +echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html" diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index c84a3efd206e..0754ab01a163 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -33,10 +33,11 @@ fi bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS} COVERAGE_DIR="${SRCDIR}"/generated/coverage + +rm -rf "${COVERAGE_DIR}" mkdir -p "${COVERAGE_DIR}" COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" - cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) From f13ea5b3ee906c4510d68f286a0f65b8b961d7b5 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 May 2020 15:54:53 -0400 Subject: [PATCH 234/909] test: fix merge race in #11223. (#11333) Signed-off-by: Harvey Tuch --- test/common/config/subscription_factory_impl_test.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index f193518e65d0..bc772b6cf073 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -301,10 +301,9 @@ TEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) { EXPECT_CALL(snapshot, runtimeFeatureEnabled(_)).WillOnce(Return(true)); EXPECT_CALL(snapshot, countDeprecatedFeatureUse()); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - NiceMock cluster; - cluster_map.emplace("static_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); + Upstream::ClusterManager::ClusterSet primary_clusters; + primary_clusters.insert("static_cluster"); + EXPECT_CALL(cm_, primaryClusters()).WillOnce(ReturnRef(primary_clusters)); EXPECT_LOG_CONTAINS( "warn", "xDS of version v2 has been deprecated", try { From 1ef01c01115e427425a94dc376cfe5d025f2bec6 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 27 May 2020 16:38:46 -0400 Subject: [PATCH 235/909] security: Google Vulnerability Reward Program (VRP) (#11005) This PR povides the documentation and an accompanying set of Docker images for Envoy's participation in the Google Vulnerability Reward Program (VRP), see https://www.google.com/about/appsecurity/reward-program/. The starting point is a fairly conservative Envoy use case, over time we will grow this. Signed-off-by: Harvey Tuch --- ci/Dockerfile-envoy-google-vrp | 22 +++ ci/docker_ci.sh | 7 +- ci/docker_rebuild_google-vrp.sh | 53 +++++ configs/BUILD | 2 + configs/google-vrp/envoy-edge.yaml | 92 +++++++++ configs/google-vrp/envoy-origin.yaml | 64 ++++++ configs/google-vrp/launch_envoy.sh | 4 + configs/google-vrp/supervisor.conf | 16 ++ docs/build.sh | 13 +- .../configuration/best_practices/edge.rst | 97 +-------- .../arch_overview/security/google_vrp.rst | 184 ++++++++++++++++++ .../intro/arch_overview/security/security.rst | 1 + test/config_test/example_configs_test.cc | 4 +- 13 files changed, 458 insertions(+), 101 deletions(-) create mode 100644 ci/Dockerfile-envoy-google-vrp create mode 100755 ci/docker_rebuild_google-vrp.sh create mode 100644 configs/google-vrp/envoy-edge.yaml create mode 100644 configs/google-vrp/envoy-origin.yaml create mode 100755 configs/google-vrp/launch_envoy.sh create mode 100644 configs/google-vrp/supervisor.conf create mode 100644 docs/root/intro/arch_overview/security/google_vrp.rst diff --git a/ci/Dockerfile-envoy-google-vrp b/ci/Dockerfile-envoy-google-vrp new file mode 100644 index 000000000000..868b6a5840df --- /dev/null +++ b/ci/Dockerfile-envoy-google-vrp @@ -0,0 +1,22 @@ +FROM envoyproxy/envoy:local + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y libc++1 supervisor gdb strace tshark \ + && apt-get autoremove -y \ + && apt-get clean \ + && rm -rf /tmp/* /var/tmp/* \ + && rm -rf /var/lib/apt/lists/* + +ADD configs/google-vrp/envoy-edge.yaml /etc/envoy/envoy-edge.yaml +ADD configs/google-vrp/envoy-origin.yaml /etc/envoy/envoy-origin.yaml +ADD configs/google-vrp/launch_envoy.sh /usr/local/bin/launch_envoy.sh +ADD configs/google-vrp/supervisor.conf /etc/supervisor.conf +ADD test/config/integration/certs/serverkey.pem /etc/envoy/certs/serverkey.pem +ADD test/config/integration/certs/servercert.pem /etc/envoy/certs/servercert.pem +# ADD %local envoy bin% /usr/local/bin/envoy + +EXPOSE 10000 +EXPOSE 10001 + +CMD ["supervisord", "-c", "/etc/supervisor.conf"] diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index d4594df2ffca..53fd19464738 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -7,9 +7,12 @@ set -e # This prefix is altered for the private security images on setec builds. DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" +# "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. +BUILD_TYPES=("" "-alpine" "-alpine-debug" "-google-vrp") + # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. -for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do +for BUILD_TYPE in "${BUILD_TYPES[@]}"; do docker build -f ci/Dockerfile-envoy"${BUILD_TYPE}" -t "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" . done @@ -38,7 +41,7 @@ fi docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" -for BUILD_TYPE in "" "-alpine" "-alpine-debug"; do +for BUILD_TYPE in ${BUILD_TYPES}; do docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" diff --git a/ci/docker_rebuild_google-vrp.sh b/ci/docker_rebuild_google-vrp.sh new file mode 100755 index 000000000000..7a6656378d94 --- /dev/null +++ b/ci/docker_rebuild_google-vrp.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# Script to rebuild Dockerfile-envoy-google-vrp locally (i.e. not in CI) for development purposes. +# This makes use of the latest envoy-dev base image on Docker Hub as the base and takes an +# optional local path for an Envoy binary. When a custom local Envoy binary is used, the script +# switches to using ${BASE_DOCKER_IMAGE} for the build, which should be configured to provide +# compatibility with your local build environment (specifically glibc). +# +# Usage: +# +# Basic rebuild of Docker image (tagged envoy-google-vrp:local): +# +# ./ci/docker_rebuild_google-vrp.sh +# +# Basic rebuild of Docker image (tagged envoy-google-vrp:local) with some local Envoy binary: +# +# bazel build //source/exe:envoy-static --config=libc++ -copt +# ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static + +set -e + +# This should match your local machine if you are building custom Envoy binaries outside of Docker. +BASE_DOCKER_IMAGE="ubuntu:20.04" + +declare -r BUILD_DIR="$(mktemp -d)" +cp ci/Dockerfile-envoy-google-vrp "${BUILD_DIR}" +declare -r DOCKER_BUILD_FILE="${BUILD_DIR}"/Dockerfile-envoy-google-vrp + +# If we have a local Envoy binary, use a variant of the build environment that supports it. +if [[ -n "$1" ]] +then + # Switch to a base image similar to the local build environment. This provides compatibility of + # locally built Envoy and glibc in the Docker env. + sed -i -e "s#envoyproxy/envoy:local#${BASE_DOCKER_IMAGE}#" "${DOCKER_BUILD_FILE}" + # Copy the binary to deal with symlinks in Bazel cache and Docker daemon confusion. + declare -r LOCAL_ENVOY="envoy-binary" + cp -f "$1" "${PWD}/${LOCAL_ENVOY}" + sed -i -e "s@# ADD %local envoy bin%@ADD ${LOCAL_ENVOY}@" "${DOCKER_BUILD_FILE}" +else + # Don't use the local envoy-dev, but pull from Docker Hub instead, this avoids having to rebuild + # this local dep which is fairly stable. + sed -i -e "s#envoyproxy/envoy:local#envoyproxy/envoy-dev:latest#" "${DOCKER_BUILD_FILE}" +fi + +cat "${DOCKER_BUILD_FILE}" + +docker build -t "envoy-google-vrp:local" -f "${DOCKER_BUILD_FILE}" . + +if [[ -n "$1" ]] +then + rm -f "${LOCAL_ENVOY}" +fi +rm -r "${BUILD_DIR}" diff --git a/configs/BUILD b/configs/BUILD index 9846609607e9..451946f1805d 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -22,6 +22,8 @@ envoy_py_test_binary( filegroup( name = "configs", srcs = [ + "google-vrp/envoy-edge.yaml", + "google-vrp/envoy-origin.yaml", "original-dst-cluster/proxy_config.yaml", ] + select({ "//bazel:apple": [], diff --git a/configs/google-vrp/envoy-edge.yaml b/configs/google-vrp/envoy-edge.yaml new file mode 100644 index 000000000000..803b01116ad1 --- /dev/null +++ b/configs/google-vrp/envoy-edge.yaml @@ -0,0 +1,92 @@ +overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + # TODO: Tune for your system. + max_heap_size_bytes: 1073741824 # 1 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.90 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + +static_resources: + listeners: + - name: listener_https + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 + per_connection_buffer_limit_bytes: 32768 # 32 KiB + filter_chains: + - transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # use_proxy_proto: true + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + # The exact route table is not super important in this example (this is the model + # for the Google VRP scenario). + routes: + - match: + prefix: "/content" + route: + cluster: service_foo + idle_timeout: 15s # must be disabled for long-lived and streaming requests + - match: + prefix: "/" + direct_response: + status: 403 + body: + inline_string: "denied\n" + http_filters: + - name: envoy.filters.http.router + clusters: + name: service_foo + connect_timeout: 5s + per_connection_buffer_limit_bytes: 32768 # 32 KiB + load_assignment: + cluster_name: service_foo + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 10002 + http2_protocol_options: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB diff --git a/configs/google-vrp/envoy-origin.yaml b/configs/google-vrp/envoy-origin.yaml new file mode 100644 index 000000000000..283d347e5a27 --- /dev/null +++ b/configs/google-vrp/envoy-origin.yaml @@ -0,0 +1,64 @@ +overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + max_heap_size_bytes: 1073741824 # 1 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.98 + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10002 + per_connection_buffer_limit_bytes: 32768 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + path: "/blockedz" + direct_response: + status: 200 + body: + inline_string: "hidden treasure\n" + - match: + prefix: "/" + direct_response: + status: 200 + body: + inline_string: "normal\n" + http_filters: + - name: envoy.filters.http.router diff --git a/configs/google-vrp/launch_envoy.sh b/configs/google-vrp/launch_envoy.sh new file mode 100755 index 000000000000..1d402df0cbef --- /dev/null +++ b/configs/google-vrp/launch_envoy.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +cd /etc/envoy +envoy "$@" diff --git a/configs/google-vrp/supervisor.conf b/configs/google-vrp/supervisor.conf new file mode 100644 index 000000000000..e019581d079c --- /dev/null +++ b/configs/google-vrp/supervisor.conf @@ -0,0 +1,16 @@ +[supervisord] +nodaemon=true + +[program:envoy-edge] +command=launch_envoy.sh -c /etc/envoy/envoy-edge.yaml %(ENV_ENVOY_EDGE_EXTRA_ARGS)s + --log-format "(edge)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 0 +redirect_stderr=true +stdout_logfile_maxbytes=0 +stdout_logfile=/dev/stdout + +[program:envoy-origin] +command=launch_envoy.sh -c /etc/envoy/envoy-origin.yaml %(ENV_ENVOY_ORIGIN_EXTRA_ARGS)s + --log-format "(origin)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 1 +redirect_stderr=true +stdout_logfile_maxbytes=0 +stdout_logfile=/dev/stdout diff --git a/docs/build.sh b/docs/build.sh index 0ebb4d085497..bc0c302414a1 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -31,8 +31,10 @@ else export ENVOY_BLOB_SHA="$BUILD_SHA" fi -SCRIPT_DIR=$(dirname "$0") -API_DIR=$(dirname "$dir")/api +SCRIPT_DIR="$(dirname "$0")" +SRC_DIR="$(dirname "$dir")" +API_DIR="${SRC_DIR}"/api +CONFIGS_DIR="${SRC_DIR}"/configs BUILD_DIR=build_docs [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs [[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst @@ -115,9 +117,12 @@ generate_api_rst v3 find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#envoy_api_#envoy_v3_api_#g" find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#config_resource_monitors#v3_config_resource_monitors#g" +# xDS protocol spec. mkdir -p ${GENERATED_RST_DIR}/api-docs - -cp -f $API_DIR/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" +cp -f "${API_DIR}"/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" +# Edge hardening example YAML. +mkdir -p "${GENERATED_RST_DIR}"/configuration/best_practices +cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configuration/best_practices rsync -rav $API_DIR/diagrams "${GENERATED_RST_DIR}/api-docs" diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index 0a3efe8307bd..e6bc0cbdcc0a 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -25,97 +25,8 @@ HTTP proxies should additionally configure: * :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. * :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. -The following is a YAML example of the above recommendation. +The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP +` edge server configuration): -.. code-block:: yaml - - overload_manager: - refresh_interval: 0.25s - resource_monitors: - - name: "envoy.resource_monitors.fixed_heap" - typed_config: - "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig - # TODO: Tune for your system. - max_heap_size_bytes: 2147483648 # 2 GiB - actions: - - name: "envoy.overload_actions.shrink_heap" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.95 - - name: "envoy.overload_actions.stop_accepting_requests" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.98 - - admin: - access_log_path: "/var/log/envoy_admin.log" - address: - socket_address: - address: 127.0.0.1 - port_value: 9090 - - static_resources: - listeners: - - address: - socket_address: - address: 0.0.0.0 - port_value: 443 - listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: {} - per_connection_buffer_limit_bytes: 32768 # 32 KiB - filter_chains: - - filter_chain_match: - server_names: ["example.com", "www.example.com"] - transport_socket: - name: envoy.transport_sockets.tls - typed_config: - "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "example_com_cert.pem" } - private_key: { filename: "example_com_key.pem" } - # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. - # use_proxy_proto: true - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - use_remote_address: true - common_http_protocol_options: - idle_timeout: 3600s # 1 hour - headers_with_underscores_action: REJECT_REQUEST - http2_protocol_options: - max_concurrent_streams: 100 - initial_stream_window_size: 65536 # 64 KiB - initial_connection_window_size: 1048576 # 1 MiB - stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests - request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests - route_config: - virtual_hosts: - - name: default - domains: "*" - routes: - - match: { prefix: "/" } - route: - cluster: service_foo - idle_timeout: 15s # must be disabled for long-lived and streaming requests - clusters: - name: service_foo - connect_timeout: 15s - per_connection_buffer_limit_bytes: 32768 # 32 KiB - load_assignment: - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8080 - http2_protocol_options: - initial_stream_window_size: 65536 # 64 KiB - initial_connection_window_size: 1048576 # 1 MiB +.. literalinclude:: envoy-edge.yaml + :language: yaml diff --git a/docs/root/intro/arch_overview/security/google_vrp.rst b/docs/root/intro/arch_overview/security/google_vrp.rst new file mode 100644 index 000000000000..5d94cf11e8f8 --- /dev/null +++ b/docs/root/intro/arch_overview/security/google_vrp.rst @@ -0,0 +1,184 @@ +.. _arch_overview_google_vrp: + +Google Vulnerability Reward Program (VRP) +========================================= + +Envoy is a participant in `Google's Vulnerability Reward Program (VRP) +`_. This is open to all security +researchers and will provide rewards for vulnerabilities discovered and reported according to the +rules below. + +.. _arch_overview_google_vrp_rules: + +Rules +----- + +The goal of the VRP is to provide a formal process to honor contributions from external +security researchers to Envoy's security. Vulnerabilities should meet the following conditions +to be eligible for the program: + +1. Vulnerabilities must meet one of the below :ref:`objectives + `, demonstrated with the supplied Docker-based + :ref:`execution environment ` and be consistent with the + program's :ref:`threat model `. + +2. Vulnerabilities must be reported to envoy-security@googlegroups.com and be kept under embargo + while triage and potential security releases occur. Please follow the :repo:`disclosure guidance + ` when submitting reports. Disclosure SLOs are documented :repo:`here + `. In general, security disclosures are subject to the + `Linux Foundation's privacy policy `_ with the added + proviso that VRP reports (including reporter e-mail address and name) may be freely shared with + Google for VRP purposes. + +3. Vulnerabilities must not be previously known in a public forum, e.g. GitHub issues trackers, + CVE databases (when previously associated with Envoy), etc. Existing CVEs that have not been + previously associated with an Envoy vulnerability are fair game. + +4. Vulnerabilities must not be also submitted to a parallel reward program run by Google or + `Lyft `_. + +Rewards are at the discretion of the Envoy OSS security team and Google. They will be conditioned on +the above criteria. If multiple instances of the same vulnerability are reported at the same time by +independent researchers or the vulnerability is already tracked under embargo by the OSS Envoy +security team, we will aim to fairly divide the reward amongst reporters. + +.. _arch_overview_google_vrp_threat_model: + +Threat model +------------ + +The base threat model matches that of Envoy's :ref:`OSS security posture +`. We add a number of temporary restrictions to provide a constrained +attack surface for the initial stages of this program. We exclude any threat from: + +* Untrusted control planes. +* Runtime services such as access logging, external authorization, etc. +* Untrusted upstreams. +* DoS attacks except as stipulated below. +* Any filters apart from the HTTP connection manager network filter and HTTP router filter. +* Admin console; this is disabled in the execution environment. + +We also explicitly exclude any local attacks (e.g. via local processes, shells, etc.) against +the Envoy process. All attacks must occur via the network data plane on port 10000. Similarly, +kernel and Docker vulnerabilities are outside the threat model. + +In the future we may relax some of these restrictions as we increase the sophistication of the +program's execution environment. + +.. _arch_overview_google_vrp_ee: + +Execution environment +--------------------- + +We supply Docker images that act as the reference environment for this program: + +* `envoyproxy/envoy-google-vrp `_ images + are based on Envoy point releases. Only the latest point release at the time of vulnerability + submission is eligible for the program. + +* `envoyproxy/envoy-google-vrp-dev `_ + images are based on Envoy master builds. Only builds within the last 5 days at the time of + vulnerability submission are eligible for the program. They must not be subject to any + publicly disclosed vulnerability at that point in time. + +Two Envoy processes are available when these images are launched via `docker run`: + +* The *edge* Envoy is listening on ports 10000 (HTTPS). It has a :repo:`static configuration + ` that is configured according to Envoy's :ref:`edge hardening + principles `. It has sinkhole, direct response and request forwarding routing rules (in + order): + + 1. `/content/*`: route to the origin Envoy server. + 2. `/*`: return 403 (denied). + + +* The *origin* Envoy is an upstream of the edge Envoy. It has a :repo:`static configuration + ` that features only direct responses, effectively acting + as an HTTP origin server. There are two route rules (in order): + + 1. `/blockedz`: return 200 `hidden treasure`. It should never be possible to have + traffic on the Envoy edge server's 10000 port receive this response unless a + qualifying vulnerability is present. + 2. `/*`: return 200 `normal`. + +When running the Docker images, the following command line options should be supplied: + +* `-m 3g` to ensure that memory is bounded to 3GB. At least this much memory should be available + to the execution environment. Each Envoy process has an overload manager configured to limit + at 1GB. + +* `-e ENVOY_EDGE_EXTRA_ARGS="<...>"` supplies additional CLI args for the edge Envoy. This + needs to be set but can be empty. + +* `-e ENVOY_ORIGIN_EXTRA_ARGS="<...>"` supplies additional CLI args for the origin Envoy. This + needs to be set but can be empty. + +.. _arch_overview_google_vrp_objectives: + +Objectives +---------- + +Vulnerabilities will be evidenced by requests on 10000 that trigger a failure mode +that falls into one of these categories: + +* Query-of-death: requests that cause the Envoy process to segfault or abort + in some immediate way. +* OOM: requests that cause the edge Envoy process to OOM. There should be no more than + 100 connections and streams in total involved to cause this to happen (i.e. brute force + connection/stream DoS is excluded). +* Routing rule bypass: requests that are able to access `hidden treasure`. +* TLS certificate exfiltration: requests that are able to obtain the edge Envoy's + `serverkey.pem`. +* Remote code exploits: any root shell obtained via the network data plane. +* At the discretion of the OSS Envoy security team, sufficiently interesting vulnerabilities that + don't fit the above categories but are likely to fall into the category of high or critical + vulnerabilities. + +Working with the Docker images +------------------------------ + +A basic invocation of the execution environment that will bring up the edge Envoy on local +port 10000 looks like: + +.. code-block:: bash + + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="" \ + envoyproxy/envoy-google-vrp-dev:latest + +When debugging, additional args may prove useful, e.g. in order to obtain trace logs, make +use of `wireshark` and `gdb`: + +.. code-block:: bash + + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="-l trace" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="-l trace" \ + --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN \ + envoyproxy/envoy-google-vrp-dev:latest + +You can obtain a shell in the Docker container with: + +.. code-block:: bash + + docker exec -it envoy-google-vrp /bin/bash + +The Docker images include `gdb`, `strace`, `tshark` (feel free to contribute other +suggestions via PRs updating the :repo:`Docker build file `). + +Rebuilding the Docker image +--------------------------- + +It's helpful to be able to regenerate your own Docker base image for research purposes. +To do this without relying on CI, follow the instructions at the top of +:repo:`ci/docker_rebuild_google-vrp.sh`. An example of this flow looks like: + +.. code-block:: bash + + bazel build //source/exe:envoy-static + ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static + docker run -m 3g -p 10000:10000 --name envoy-google-vrp \ + -e ENVOY_EDGE_EXTRA_ARGS="" \ + -e ENVOY_ORIGIN_EXTRA_ARGS="" \ + envoy-google-vrp:local diff --git a/docs/root/intro/arch_overview/security/security.rst b/docs/root/intro/arch_overview/security/security.rst index 16409d759de1..4c19cdf54a28 100644 --- a/docs/root/intro/arch_overview/security/security.rst +++ b/docs/root/intro/arch_overview/security/security.rst @@ -5,6 +5,7 @@ Security :maxdepth: 2 threat_model + google_vrp ssl jwt_authn_filter ext_authz_filter diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 1439e8701af5..951d70a8b5dc 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -21,9 +21,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(23UL, ConfigTest::run(directory)); + EXPECT_EQ(25UL, ConfigTest::run(directory)); #else - EXPECT_EQ(24UL, ConfigTest::run(directory)); + EXPECT_EQ(26UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); From 5f64fe00a90d6c0e8e9be3902014f6b111e0d75c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 27 May 2020 20:18:24 -0400 Subject: [PATCH 236/909] http: fixing a duplicate response detail (#11331) Signed-off-by: Alyssa Wilk --- include/envoy/stream_info/stream_info.h | 5 +++-- source/common/http/conn_manager_impl.cc | 3 +-- test/common/http/conn_manager_impl_test.cc | 2 ++ 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index c6a0318564f9..fbec2554d380 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -95,6 +95,9 @@ struct ResponseCodeDetailValues { // Envoy is doing non-streaming proxying, and the request payload exceeded // configured limits. const std::string RequestPayloadTooLarge = "request_payload_too_large"; + // Envoy is doing non-streaming proxying, and the response payload exceeded + // configured limits. + const std::string ResponsePayloadTooLarge = "response_payload_too_large"; // Envoy is doing streaming proxying, but too much data arrived while waiting // to attempt a retry. const std::string RequestPayloadExceededRetryBufferLimit = @@ -112,8 +115,6 @@ struct ResponseCodeDetailValues { const std::string LowVersion = "low_version"; // The request was rejected due to the Host: or :authority field missing const std::string MissingHost = "missing_host_header"; - // The request was rejected due to the request headers being larger than the configured limit. - const std::string RequestHeadersTooLarge = "request_headers_too_large"; // The request was rejected due to x-envoy-* headers failing strict header validation. const std::string InvalidEnvoyRequestHeaders = "request_headers_failed_strict_check"; // The request was rejected due to the Path or :path header field missing. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 94575ac9ff79..f10d3052c567 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -2550,9 +2550,8 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { // Make sure we won't end up with nested watermark calls from the body buffer. parent_.state_.encoder_filters_streaming_ = true; allowIteration(); - parent_.stream_info_.setResponseCodeDetails( - StreamInfo::ResponseCodeDetails::get().RequestHeadersTooLarge); + StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); // This does not call the standard sendLocalReply because if there is already response data // we do not want to pass a second set of response headers through the filter chain. // Instead, call the encodeHeadersInternal / encodeDataInternal helpers diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a95be8010898..58a22f40874b 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -4784,6 +4784,8 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { EXPECT_EQ("500", headers.getStatusValue()); // Make sure Envoy standard sanitization has been applied. EXPECT_TRUE(headers.Date() != nullptr); + EXPECT_EQ("response_payload_too_large", + decoder_filters_[0]->callbacks_->streamInfo().responseCodeDetails().value()); return FilterHeadersStatus::Continue; })); EXPECT_CALL(response_encoder_, encodeData(_, true)).WillOnce(AddBufferToString(&response_body)); From 8ae7046a1e16ba43420cf66d43dd0d69b500a6ca Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 27 May 2020 20:28:41 -0400 Subject: [PATCH 237/909] http: replacing more header accessors with the new Value helper (#11285) Signed-off-by: Alyssa Wilk --- source/common/grpc/common.cc | 17 ++++----- source/common/router/config_impl.cc | 32 ++++++++--------- source/common/router/retry_state_impl.cc | 11 +++--- source/common/router/router.cc | 36 ++++++++----------- source/common/router/shadow_writer_impl.cc | 11 +++--- source/common/router/upstream_request.cc | 4 +-- source/common/tracing/http_tracer_impl.cc | 27 +++++++------- source/common/upstream/health_checker_impl.cc | 3 +- .../grpc/http_grpc_access_log_impl.cc | 25 ++++++------- .../clusters/dynamic_forward_proxy/cluster.cc | 2 +- source/extensions/common/aws/utility.cc | 9 +++-- .../extensions/filters/common/expr/context.cc | 4 +-- .../common/ext_authz/ext_authz_http_impl.cc | 2 +- .../filters/common/rbac/matchers.cc | 2 +- .../http/aws_lambda/aws_lambda_filter.cc | 10 +++--- .../filters/http/cache/cache_filter.cc | 9 ++--- .../filters/http/cache/http_cache.cc | 9 ++--- .../http/common/compressor/compressor.cc | 6 ++-- .../filters/http/cors/cors_filter.cc | 5 ++- .../filters/http/csrf/csrf_filter.cc | 19 +++++----- .../filters/http/fault/fault_filter.cc | 6 ++-- .../http/grpc_http1_reverse_bridge/filter.cc | 17 +++++---- .../json_transcoder_filter.cc | 8 ++--- .../filters/http/grpc_web/grpc_web_filter.cc | 16 ++++----- .../filters/http/jwt_authn/extractor.cc | 2 +- .../filters/http/jwt_authn/filter.cc | 3 +- .../filters/http/jwt_authn/matcher.cc | 8 ++--- .../extensions/filters/http/lua/lua_filter.cc | 4 +-- .../filters/http/squash/squash_filter.cc | 2 +- .../common/ot/opentracing_driver_impl.cc | 2 +- .../tracers/xray/xray_tracer_impl.cc | 8 ++--- .../tracers/zipkin/zipkin_tracer_impl.cc | 9 +++-- source/server/admin/admin.cc | 3 +- source/server/admin/admin_filter.cc | 3 +- source/server/admin/runtime_handler.cc | 12 ++----- source/server/admin/runtime_handler.h | 3 -- 36 files changed, 144 insertions(+), 205 deletions(-) diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 3b74a5223b96..5c4c9234c7bd 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -27,15 +27,12 @@ namespace Envoy { namespace Grpc { bool Common::hasGrpcContentType(const Http::RequestOrResponseHeaderMap& headers) { - const Http::HeaderEntry* content_type = headers.ContentType(); + const absl::string_view content_type = headers.getContentTypeValue(); // Content type is gRPC if it is exactly "application/grpc" or starts with // "application/grpc+". Specifically, something like application/grpc-web is not gRPC. - return content_type != nullptr && - absl::StartsWith(content_type->value().getStringView(), - Http::Headers::get().ContentTypeValues.Grpc) && - (content_type->value().size() == Http::Headers::get().ContentTypeValues.Grpc.size() || - content_type->value() - .getStringView()[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); + return absl::StartsWith(content_type, Http::Headers::get().ContentTypeValues.Grpc) && + (content_type.size() == Http::Headers::get().ContentTypeValues.Grpc.size() || + content_type[Http::Headers::get().ContentTypeValues.Grpc.size()] == '+'); } bool Common::isGrpcRequestHeaders(const Http::RequestHeaderMap& headers) { @@ -58,13 +55,13 @@ bool Common::isGrpcResponseHeaders(const Http::ResponseHeaderMap& headers, bool absl::optional Common::getGrpcStatus(const Http::ResponseHeaderOrTrailerMap& trailers, bool allow_user_defined) { - const Http::HeaderEntry* grpc_status_header = trailers.GrpcStatus(); + const absl::string_view grpc_status_header = trailers.getGrpcStatusValue(); uint64_t grpc_status_code; - if (!grpc_status_header || grpc_status_header->value().empty()) { + if (grpc_status_header.empty()) { return absl::nullopt; } - if (!absl::SimpleAtoi(grpc_status_header->value().getStringView(), &grpc_status_code) || + if (!absl::SimpleAtoi(grpc_status_header, &grpc_status_code) || (grpc_status_code > Status::WellKnownGrpcStatus::MaximumKnown && !allow_user_defined)) { return {Status::WellKnownGrpcStatus::InvalidCode}; } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index fb8ec0d8449d..128351c99dd8 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -47,10 +47,6 @@ namespace { const std::string DEPRECATED_ROUTER_NAME = "envoy.router"; -const absl::string_view getPath(const Http::RequestHeaderMap& headers) { - return headers.Path() ? headers.Path()->value().getStringView() : ""; -} - } // namespace std::string SslRedirector::newPath(const Http::RequestHeaderMap& headers) const { @@ -485,7 +481,8 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { - Http::Utility::QueryParams query_parameters = Http::Utility::parseQueryString(getPath(headers)); + Http::Utility::QueryParams query_parameters = + Http::Utility::parseQueryString(headers.getPathValue()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } @@ -576,7 +573,8 @@ void RouteEntryImplBase::finalizePathHeader(Http::RequestHeaderMap& headers, return; } - std::string path(getPath(headers)); + // TODO(perf): can we avoid the string copy for the common case? + std::string path(headers.getPathValue()); if (insert_envoy_original_path) { headers.setEnvoyOriginalPath(path); } @@ -601,7 +599,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHead absl::string_view new_scheme, absl::string_view new_port) const { - absl::string_view request_host = headers.Host()->value().getStringView(); + absl::string_view request_host = headers.getHostValue(); size_t host_end; if (request_host.empty()) { return request_host; @@ -618,7 +616,7 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHead if (host_end != absl::string_view::npos) { absl::string_view request_port = request_host.substr(host_end); - absl::string_view request_protocol = headers.ForwardedProto()->value().getStringView(); + absl::string_view request_protocol = headers.getForwardedProtoValue(); bool remove_port = !new_port.empty(); if (new_scheme != request_protocol) { @@ -650,7 +648,7 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c final_scheme = Http::Headers::get().SchemeValues.Https; } else { ASSERT(headers.ForwardedProto()); - final_scheme = headers.ForwardedProto()->value().getStringView(); + final_scheme = headers.getForwardedProtoValue(); } if (!port_redirect_.empty()) { @@ -669,7 +667,7 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c if (!path_redirect_.empty()) { final_path = path_redirect_.c_str(); } else { - final_path = getPath(headers); + final_path = headers.getPathValue(); if (strip_query_) { size_t path_end = final_path.find("?"); if (path_end != absl::string_view::npos) { @@ -909,7 +907,7 @@ RouteConstSharedPtr PrefixRouteEntryImpl::matches(const Http::RequestHeaderMap& const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(getPath(headers))) { + path_matcher_->match(headers.getPathValue())) { return clusterEntry(headers, random_value); } return nullptr; @@ -931,7 +929,7 @@ RouteConstSharedPtr PathRouteEntryImpl::matches(const Http::RequestHeaderMap& he const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value) && - path_matcher_->match(getPath(headers))) { + path_matcher_->match(headers.getPathValue())) { return clusterEntry(headers, random_value); } @@ -959,7 +957,7 @@ RegexRouteEntryImpl::RegexRouteEntryImpl( void RegexRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, bool insert_envoy_original_path) const { - const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); // TODO(yuval-k): This ASSERT can happen if the path was changed by a filter without clearing the // route cache. We should consider if ASSERT-ing is the desired behavior in this case. ASSERT(regex_->match(path)); @@ -970,7 +968,7 @@ RouteConstSharedPtr RegexRouteEntryImpl::matches(const Http::RequestHeaderMap& h const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { if (RouteEntryImplBase::matchRoute(headers, stream_info, random_value)) { - const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); if (regex_->match(path)) { return clusterEntry(headers, random_value); } @@ -986,7 +984,7 @@ ConnectRouteEntryImpl::ConnectRouteEntryImpl( void ConnectRouteEntryImpl::rewritePathHeader(Http::RequestHeaderMap& headers, bool insert_envoy_original_path) const { - const absl::string_view path = Http::PathUtil::removeQueryAndFragment(getPath(headers)); + const absl::string_view path = Http::PathUtil::removeQueryAndFragment(headers.getPathValue()); finalizePathHeader(headers, path, insert_envoy_original_path); } @@ -1249,8 +1247,8 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::RequestHeaderMa // TODO (@rshriram) Match Origin header in WebSocket // request with VHost, using wildcard match - const std::string host = - Http::LowerCaseString(std::string(headers.Host()->value().getStringView())).get(); + // Lower-case the value of the host header, as hostnames are case insensitive. + const std::string host = absl::AsciiStrToLower(headers.getHostValue()); const auto& iter = virtual_hosts_.find(host); if (iter != virtual_hosts_.end()) { return iter->second.get(); diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 7d29a00332df..09d3b1016804 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -83,11 +83,10 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, // Merge in the headers. if (request_headers.EnvoyRetryOn()) { - retry_on_ |= parseRetryOn(request_headers.EnvoyRetryOn()->value().getStringView()).first; + retry_on_ |= parseRetryOn(request_headers.getEnvoyRetryOnValue()).first; } if (request_headers.EnvoyRetryGrpcOn()) { - retry_on_ |= - parseRetryGrpcOn(request_headers.EnvoyRetryGrpcOn()->value().getStringView()).first; + retry_on_ |= parseRetryGrpcOn(request_headers.getEnvoyRetryGrpcOnValue()).first; } const auto& retriable_request_headers = route_policy.retriableRequestHeaders(); @@ -107,15 +106,15 @@ RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, } if (retry_on_ != 0 && request_headers.EnvoyMaxRetries()) { uint64_t temp; - if (absl::SimpleAtoi(request_headers.EnvoyMaxRetries()->value().getStringView(), &temp)) { + if (absl::SimpleAtoi(request_headers.getEnvoyMaxRetriesValue(), &temp)) { // The max retries header takes precedence if set. retries_remaining_ = temp; } } if (request_headers.EnvoyRetriableStatusCodes()) { - for (const auto code : StringUtil::splitToken( - request_headers.EnvoyRetriableStatusCodes()->value().getStringView(), ",")) { + for (const auto code : + StringUtil::splitToken(request_headers.getEnvoyRetriableStatusCodesValue(), ",")) { unsigned int out; if (absl::SimpleAtoi(code, &out)) { retriable_status_codes_.emplace_back(out); diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 516f5b403c1e..eb0c5650a86c 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -49,9 +49,7 @@ uint32_t getLength(const Buffer::Instance* instance) { return instance ? instanc bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, const Network::Connection& connection) { - if (downstream_headers.ForwardedProto() && - downstream_headers.ForwardedProto()->value().getStringView() == - Http::Headers::get().SchemeValues.Http) { + if (downstream_headers.getForwardedProtoValue() == Http::Headers::get().SchemeValues.Http) { return true; } if (!connection.ssl()) { @@ -72,10 +70,6 @@ tcpPool(absl::variant(pool); } -const absl::string_view getPath(const Http::RequestHeaderMap& headers) { - return headers.Path() ? headers.Path()->value().getStringView() : ""; -} - } // namespace // Express percentage as [0, TimeoutPrecisionFactor] because stats do not accept floating point @@ -179,10 +173,10 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req } // See if there is a per try/retry timeout. If it's >= global we just ignore it. - const Http::HeaderEntry* per_try_timeout_entry = - request_headers.EnvoyUpstreamRequestPerTryTimeoutMs(); - if (per_try_timeout_entry) { - if (absl::SimpleAtoi(per_try_timeout_entry->value().getStringView(), &header_timeout)) { + const absl::string_view per_try_timeout_entry = + request_headers.getEnvoyUpstreamRequestPerTryTimeoutMsValue(); + if (!per_try_timeout_entry.empty()) { + if (absl::SimpleAtoi(per_try_timeout_entry, &header_timeout)) { timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); @@ -373,7 +367,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, route_ = callbacks_->route(); if (!route_) { config_.stats_.no_route_.inc(); - ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, getPath(headers)); + ENVOY_STREAM_LOG(debug, "no cluster match for URL '{}'", *callbacks_, headers.getPathValue()); callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); callbacks_->sendLocalReply(Http::Code::NotFound, "", modify_headers, absl::nullopt, @@ -438,7 +432,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); ENVOY_STREAM_LOG(debug, "cluster '{}' match for URL '{}'", *callbacks_, - route_entry_->clusterName(), getPath(headers)); + route_entry_->clusterName(), headers.getPathValue()); if (config_.strict_check_headers_ != nullptr) { for (const auto& header : *config_.strict_check_headers_) { @@ -488,8 +482,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, const auto& upstream_http_protocol_options = cluster_->upstreamHttpProtocolOptions(); if (upstream_http_protocol_options.has_value()) { - const auto parsed_authority = - Http::Utility::parseAuthority(headers.Host()->value().getStringView()); + const auto parsed_authority = Http::Utility::parseAuthority(headers.getHostValue()); if (!parsed_authority.is_ip_address_ && upstream_http_protocol_options.value().auto_sni()) { callbacks_->streamInfo().filterState()->setData( Network::UpstreamServerName::key(), @@ -611,9 +604,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, Filter::HttpOrTcpPool Filter::createConnPool(Upstream::HostDescriptionConstSharedPtr& host) { Filter::HttpOrTcpPool conn_pool; - const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && - downstream_headers_->Method()->value().getStringView() == - Http::Headers::get().MethodValues.Connect; + const bool should_tcp_proxy = + route_entry_->connectConfig().has_value() && + downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; if (!should_tcp_proxy) { conn_pool = getHttpConnPool(); @@ -1462,7 +1455,7 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do } // Make sure the redirect response contains a URL to redirect to. - if (internal_redirect.value().getStringView().length() == 0) { + if (internal_redirect.value().getStringView().empty()) { config_.stats_.passthrough_internal_redirect_bad_location_.inc(); return false; } @@ -1496,8 +1489,9 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do config_.stats_.passthrough_internal_redirect_too_many_redirects_.inc(); return false; } - std::string original_host(downstream_headers.Host()->value().getStringView()); - std::string original_path(downstream_headers.Path()->value().getStringView()); + // Copy the old values, so they can be restored if the redirect fails. + const std::string original_host(downstream_headers.getHostValue()); + const std::string original_path(downstream_headers.getPathValue()); const bool scheme_is_set = (downstream_headers.Scheme() != nullptr); Cleanup restore_original_headers( [&downstream_headers, original_host, original_path, scheme_is_set, scheme_is_http]() { diff --git a/source/common/router/shadow_writer_impl.cc b/source/common/router/shadow_writer_impl.cc index 41b0736f60fa..504877c4e643 100644 --- a/source/common/router/shadow_writer_impl.cc +++ b/source/common/router/shadow_writer_impl.cc @@ -21,14 +21,13 @@ void ShadowWriterImpl::shadow(const std::string& cluster, Http::RequestMessagePt return; } - ASSERT(!request->headers().Host()->value().empty()); + ASSERT(!request->headers().getHostValue().empty()); // Switch authority to add a shadow postfix. This allows upstream logging to make more sense. - auto parts = StringUtil::splitToken(request->headers().Host()->value().getStringView(), ":"); + auto parts = StringUtil::splitToken(request->headers().getHostValue(), ":"); ASSERT(!parts.empty() && parts.size() <= 2); - request->headers().setHost( - parts.size() == 2 - ? absl::StrJoin(parts, "-shadow:") - : absl::StrCat(request->headers().Host()->value().getStringView(), "-shadow")); + request->headers().setHost(parts.size() == 2 + ? absl::StrJoin(parts, "-shadow:") + : absl::StrCat(request->headers().getHostValue(), "-shadow")); // This is basically fire and forget. We don't handle cancelling. cm_.httpAsyncClientForCluster(cluster).send(std::move(request), *this, options); } diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 2cf636fd483a..7e53c292802c 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -380,8 +380,8 @@ void UpstreamRequest::onPoolReady( // Make sure that when we are forwarding CONNECT payload we do not do so until // the upstream has accepted the CONNECT request. - if (conn_pool_->protocol().has_value() && headers->Method() && - headers->Method()->value().getStringView() == Http::Headers::get().MethodValues.Connect) { + if (conn_pool_->protocol().has_value() && + headers->getMethodValue() == Http::Headers::get().MethodValues.Connect) { paused_for_connect_ = true; } diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 3933b100ad23..2548b7ed6581 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -25,13 +25,14 @@ namespace Envoy { namespace Tracing { -// TODO(mattklein123) PERF: Avoid string creations/copies in this entire file. +// TODO(perf): Avoid string creations/copies in this entire file. static std::string buildResponseCode(const StreamInfo::StreamInfo& info) { return info.responseCode() ? std::to_string(info.responseCode().value()) : "0"; } -static std::string valueOrDefault(const Http::HeaderEntry* header, const char* default_value) { - return header ? std::string(header->value().getStringView()) : default_value; +static absl::string_view valueOrDefault(const Http::HeaderEntry* header, + const char* default_value) { + return header ? header->value().getStringView() : default_value; } static std::string buildUrl(const Http::RequestHeaderMap& request_headers, @@ -39,16 +40,16 @@ static std::string buildUrl(const Http::RequestHeaderMap& request_headers, if (!request_headers.Path()) { return ""; } - std::string path(request_headers.EnvoyOriginalPath() - ? request_headers.EnvoyOriginalPath()->value().getStringView() - : request_headers.Path()->value().getStringView()); + absl::string_view path(request_headers.EnvoyOriginalPath() + ? request_headers.getEnvoyOriginalPathValue() + : request_headers.getPathValue()); if (path.length() > max_path_length) { path = path.substr(0, max_path_length); } - return absl::StrCat(valueOrDefault(request_headers.ForwardedProto(), ""), "://", - valueOrDefault(request_headers.Host(), ""), path); + return absl::StrCat(request_headers.getForwardedProtoValue(), "://", + request_headers.getHostValue(), path); } const std::string HttpTracerUtility::IngressOperation = "ingress"; @@ -160,13 +161,11 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, // Pre response data. if (request_headers) { if (request_headers->RequestId()) { - span.setTag(Tracing::Tags::get().GuidXRequestId, - std::string(request_headers->RequestId()->value().getStringView())); + span.setTag(Tracing::Tags::get().GuidXRequestId, request_headers->getRequestIdValue()); } span.setTag(Tracing::Tags::get().HttpUrl, buildUrl(*request_headers, tracing_config.maxPathTagLength())); - span.setTag(Tracing::Tags::get().HttpMethod, - std::string(request_headers->Method()->value().getStringView())); + span.setTag(Tracing::Tags::get().HttpMethod, request_headers->getMethodValue()); span.setTag(Tracing::Tags::get().DownstreamCluster, valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), "-")); span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), "-")); @@ -184,7 +183,7 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, if (request_headers->ClientTraceId()) { span.setTag(Tracing::Tags::get().GuidXClientTraceId, - std::string(request_headers->ClientTraceId()->value().getStringView())); + request_headers->getClientTraceIdValue()); } if (Grpc::Common::isGrpcRequestHeaders(*request_headers)) { @@ -283,7 +282,7 @@ SpanPtr HttpTracerImpl::startSpan(const Config& config, Http::RequestHeaderMap& if (config.operationName() == OperationName::Egress) { span_name.append(" "); - span_name.append(std::string(request_headers.Host()->value().getStringView())); + span_name.append(std::string(request_headers.getHostValue())); } SpanPtr active_span = driver_->startSpan(config, request_headers, span_name, diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 17a1f1fbdab7..62b2397f9329 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -305,8 +305,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::healthCheckResult() { parent_.stats_.verify_cluster_.inc(); std::string service_cluster_healthchecked = response_headers_->EnvoyUpstreamHealthCheckedCluster() - ? std::string( - response_headers_->EnvoyUpstreamHealthCheckedCluster()->value().getStringView()) + ? std::string(response_headers_->getEnvoyUpstreamHealthCheckedClusterValue()) : EMPTY_STRING; if (parent_.service_name_matcher_->match(service_cluster_healthchecked)) { return degraded ? HealthCheckResult::Degraded : HealthCheckResult::Succeeded; diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index c08606f30c30..fc73d1b649fb 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -75,40 +75,35 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, // TODO(mattklein123): Populate port field. auto* request_properties = log_entry.mutable_request(); if (request_headers.Scheme() != nullptr) { - request_properties->set_scheme(std::string(request_headers.Scheme()->value().getStringView())); + request_properties->set_scheme(std::string(request_headers.getSchemeValue())); } if (request_headers.Host() != nullptr) { - request_properties->set_authority(std::string(request_headers.Host()->value().getStringView())); + request_properties->set_authority(std::string(request_headers.getHostValue())); } if (request_headers.Path() != nullptr) { - request_properties->set_path(std::string(request_headers.Path()->value().getStringView())); + request_properties->set_path(std::string(request_headers.getPathValue())); } if (request_headers.UserAgent() != nullptr) { - request_properties->set_user_agent( - std::string(request_headers.UserAgent()->value().getStringView())); + request_properties->set_user_agent(std::string(request_headers.getUserAgentValue())); } if (request_headers.Referer() != nullptr) { - request_properties->set_referer( - std::string(request_headers.Referer()->value().getStringView())); + request_properties->set_referer(std::string(request_headers.getRefererValue())); } if (request_headers.ForwardedFor() != nullptr) { - request_properties->set_forwarded_for( - std::string(request_headers.ForwardedFor()->value().getStringView())); + request_properties->set_forwarded_for(std::string(request_headers.getForwardedForValue())); } if (request_headers.RequestId() != nullptr) { - request_properties->set_request_id( - std::string(request_headers.RequestId()->value().getStringView())); + request_properties->set_request_id(std::string(request_headers.getRequestIdValue())); } if (request_headers.EnvoyOriginalPath() != nullptr) { - request_properties->set_original_path( - std::string(request_headers.EnvoyOriginalPath()->value().getStringView())); + request_properties->set_original_path(std::string(request_headers.getEnvoyOriginalPathValue())); } request_properties->set_request_headers_bytes(request_headers.byteSize()); request_properties->set_request_body_bytes(stream_info.bytesReceived()); if (request_headers.Method() != nullptr) { envoy::config::core::v3::RequestMethod method = envoy::config::core::v3::METHOD_UNSPECIFIED; - envoy::config::core::v3::RequestMethod_Parse( - std::string(request_headers.Method()->value().getStringView()), &method); + envoy::config::core::v3::RequestMethod_Parse(std::string(request_headers.getMethodValue()), + &method); request_properties->set_request_method(method); } if (!request_headers_to_log_.empty()) { diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index c671aeb3c795..e79e6e019756 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -171,7 +171,7 @@ Cluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) { absl::string_view host; if (context->downstreamHeaders()) { - host = context->downstreamHeaders()->Host()->value().getStringView(); + host = context->downstreamHeaders()->getHostValue(); } else if (context->downstreamConnection()) { host = context->downstreamConnection()->requestedServerName(); } diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index fb9c00918f06..f13012860e70 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -48,15 +48,14 @@ Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { // The AWS SDK has a quirk where it removes "default ports" (80, 443) from the host headers // Additionally, we canonicalize the :authority header as "host" // TODO(lavignes): This may need to be tweaked to canonicalize :authority for HTTP/2 requests - const auto* authority_header = headers.Host(); - if (authority_header != nullptr && !authority_header->value().empty()) { - const auto& value = authority_header->value().getStringView(); - const auto parts = StringUtil::splitToken(value, ":"); + const absl::string_view authority_header = headers.getHostValue(); + if (!authority_header.empty()) { + const auto parts = StringUtil::splitToken(authority_header, ":"); if (parts.size() > 1 && (parts[1] == "80" || parts[1] == "443")) { // Has default port, so use only the host part out.emplace(Http::Headers::get().HostLegacy.get(), std::string(parts[0])); } else { - out.emplace(Http::Headers::get().HostLegacy.get(), std::string(value)); + out.emplace(Http::Headers::get().HostLegacy.get(), std::string(authority_header)); } } return out; diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 6df67169d09e..44929440b70d 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -67,7 +67,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { // (which is not available at the time of the request headers) if (headers_.value_ != nullptr && headers_.value_->ContentLength() != nullptr) { int64_t length; - if (absl::SimpleAtoi(headers_.value_->ContentLength()->value().getStringView(), &length)) { + if (absl::SimpleAtoi(headers_.value_->getContentLengthValue(), &length)) { return CelValue::CreateInt64(length); } } else { @@ -93,7 +93,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { if (value == Path) { return convertHeaderEntry(headers_.value_->Path()); } else if (value == UrlPath) { - absl::string_view path = headers_.value_->Path()->value().getStringView(); + absl::string_view path = headers_.value_->getPathValue(); size_t query_offset = path.find('?'); if (query_offset == absl::string_view::npos) { return CelValue::CreateStringView(path); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index e096d679af05..42e3864943a0 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -297,7 +297,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Set an error status if parsing status code fails. A Forbidden response is sent to the client // if the filter has not been configured with failure_mode_allow. uint64_t status_code{}; - if (!absl::SimpleAtoi(message->headers().Status()->value().getStringView(), &status_code)) { + if (!absl::SimpleAtoi(message->headers().getStatusValue(), &status_code)) { ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); return std::make_unique(errorResponse()); diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 123f394d6523..81b5be5885bf 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -211,7 +211,7 @@ bool PathMatcher::matches(const Network::Connection&, const Envoy::Http::Request if (headers.Path() == nullptr) { return false; } - return path_matcher_.match(headers.Path()->value().getStringView()); + return path_matcher_.match(headers.getPathValue()); } } // namespace RBAC diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index 46dbb33a2624..cd702fcba3c9 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -93,8 +93,7 @@ bool isContentTypeTextual(const Http::RequestOrResponseHeaderMap& headers) { return false; } - const Http::LowerCaseString content_type_value{ - std::string(headers.ContentType()->value().getStringView())}; + const Http::LowerCaseString content_type_value{std::string(headers.getContentTypeValue())}; if (content_type_value.get() == Http::Headers::get().ContentTypeValues.Json) { return true; } @@ -268,11 +267,11 @@ void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer: using source::extensions::filters::http::aws_lambda::Request; Request json_req; if (headers.Path()) { - json_req.set_raw_path(std::string(headers.Path()->value().getStringView())); + json_req.set_raw_path(std::string(headers.getPathValue())); } if (headers.Method()) { - json_req.set_method(std::string(headers.Method()->value().getStringView())); + json_req.set_method(std::string(headers.getMethodValue())); } // Wrap the headers @@ -297,8 +296,7 @@ void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer: // Wrap the Query String if (headers.Path()) { - for (auto&& kv_pair : - Http::Utility::parseQueryString(headers.Path()->value().getStringView())) { + for (auto&& kv_pair : Http::Utility::parseQueryString(headers.getPathValue())) { json_req.mutable_query_string_parameters()->insert({kv_pair.first, kv_pair.second}); } } diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 55f477c24cb2..53bdc5cd5344 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -28,14 +28,11 @@ bool CacheFilter::isCacheableRequest(Http::RequestHeaderMap& headers) { } bool CacheFilter::isCacheableResponse(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* cache_control = headers.CacheControl(); + const absl::string_view cache_control = headers.getCacheControlValue(); // TODO(toddmgreer): fully check for cacheability. See for example // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. - if (cache_control) { - return !StringUtil::caseFindToken(cache_control->value().getStringView(), ",", - Http::Headers::get().CacheControlValues.Private); - } - return false; + return !StringUtil::caseFindToken(cache_control, ",", + Http::Headers::get().CacheControlValues.Private); } CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&, diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 7ae556d4891b..213ae1d7b8ca 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -38,10 +38,7 @@ std::ostream& operator<<(std::ostream& os, const AdjustedByteRange& range) { } LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp) - : timestamp_(timestamp), - request_cache_control_(request_headers.CacheControl() == nullptr - ? "" - : request_headers.CacheControl()->value().getStringView()) { + : timestamp_(timestamp), request_cache_control_(request_headers.getCacheControlValue()) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " @@ -60,8 +57,8 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst // TODO(toddmgreer): Parse Range header into request_range_spec_, and handle the resultant // vector in CacheFilter::onOkHeaders. key_.set_cluster_name("cluster_name_goes_here"); - key_.set_host(std::string(request_headers.Host()->value().getStringView())); - key_.set_path(std::string(request_headers.Path()->value().getStringView())); + key_.set_host(std::string(request_headers.getHostValue())); + key_.set_path(std::string(request_headers.getPathValue())); key_.set_clear_http(forwarded_proto == scheme_values.Http); } diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index ac2fc769ec0f..1b961017fcbc 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -371,10 +371,8 @@ bool CompressorFilter::isMinimumContentLength(Http::ResponseHeaderMap& headers) return is_minimum_content_length; } - const Http::HeaderEntry* transfer_encoding = headers.TransferEncoding(); - return (transfer_encoding && - StringUtil::caseFindToken(transfer_encoding->value().getStringView(), ",", - Http::Headers::get().TransferEncodingValues.Chunked)); + return StringUtil::caseFindToken(headers.getTransferEncodingValue(), ",", + Http::Headers::get().TransferEncodingValues.Chunked); } bool CompressorFilter::isTransferEncodingAllowed(Http::ResponseHeaderMap& headers) const { diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 34f2576aabfd..e482ca565b33 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -53,9 +53,8 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head is_cors_request_ = true; - const auto method = headers.Method(); - if (method == nullptr || - method->value().getStringView() != Http::Headers::get().MethodValues.Options) { + const absl::string_view method = headers.getMethodValue(); + if (method != Http::Headers::get().MethodValues.Options) { return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index 396dc056c87d..dbd0f5f02c0e 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -22,37 +22,36 @@ using RcDetails = ConstSingleton; namespace { bool isModifyMethod(const Http::RequestHeaderMap& headers) { - const Envoy::Http::HeaderEntry* method = headers.Method(); - if (method == nullptr) { + const absl::string_view method_type = headers.getMethodValue(); + if (method_type.empty()) { return false; } - const absl::string_view method_type = method->value().getStringView(); const auto& method_values = Http::Headers::get().MethodValues; return (method_type == method_values.Post || method_type == method_values.Put || method_type == method_values.Delete || method_type == method_values.Patch); } -absl::string_view hostAndPort(const Http::HeaderEntry* header) { +absl::string_view hostAndPort(const absl::string_view header) { Http::Utility::Url absolute_url; - if (header != nullptr && !header->value().empty()) { - if (absolute_url.initialize(header->value().getStringView(), false)) { + if (!header.empty()) { + if (absolute_url.initialize(header, false)) { return absolute_url.hostAndPort(); } - return header->value().getStringView(); + return header; } return EMPTY_STRING; } absl::string_view sourceOriginValue(const Http::RequestHeaderMap& headers) { - const absl::string_view origin = hostAndPort(headers.Origin()); + const absl::string_view origin = hostAndPort(headers.getOriginValue()); if (origin != EMPTY_STRING) { return origin; } - return hostAndPort(headers.Referer()); + return hostAndPort(headers.getRefererValue()); } absl::string_view targetOriginValue(const Http::RequestHeaderMap& headers) { - return hostAndPort(headers.Host()); + return hostAndPort(headers.getHostValue()); } static CsrfStats generateStats(const std::string& prefix, Stats::Scope& scope) { diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 67d1f9ce8f17..217471da0801 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -136,8 +136,7 @@ Http::FilterHeadersStatus FaultFilter::decodeHeaders(Http::RequestHeaderMap& hea } if (headers.EnvoyDownstreamServiceCluster()) { - downstream_cluster_ = - std::string(headers.EnvoyDownstreamServiceCluster()->value().getStringView()); + downstream_cluster_ = std::string(headers.getEnvoyDownstreamServiceClusterValue()); if (!downstream_cluster_.empty()) { downstream_cluster_storage_ = std::make_unique( downstream_cluster_, config_->scope().symbolTable()); @@ -458,8 +457,7 @@ bool FaultFilter::matchesDownstreamNodes(const Http::RequestHeaderMap& headers) return false; } - const absl::string_view downstream_node = - headers.EnvoyDownstreamServiceNode()->value().getStringView(); + const absl::string_view downstream_node = headers.getEnvoyDownstreamServiceNodeValue(); return fault_settings_->downstreamNodes().find(downstream_node) != fault_settings_->downstreamNodes().end(); } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 33e88bcb8425..955bf6416829 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -43,20 +43,20 @@ std::string badContentTypeMessage(const Http::ResponseHeaderMap& headers) { if (headers.ContentType() != nullptr) { return fmt::format( "envoy reverse bridge: upstream responded with unsupported content-type {}, status code {}", - headers.ContentType()->value().getStringView(), headers.Status()->value().getStringView()); + headers.getContentTypeValue(), headers.getStatusValue()); } else { return fmt::format( "envoy reverse bridge: upstream responded with no content-type header, status code {}", - headers.Status()->value().getStringView()); + headers.getStatusValue()); } } void adjustContentLength(Http::RequestOrResponseHeaderMap& headers, const std::function& adjustment) { - auto length_header = headers.ContentLength(); - if (length_header != nullptr) { + auto length_header = headers.getContentLengthValue(); + if (!length_header.empty()) { uint64_t length; - if (absl::SimpleAtoi(length_header->value().getStringView(), &length)) { + if (absl::SimpleAtoi(length_header, &length)) { if (length != 0) { headers.setContentLength(adjustment(length)); } @@ -91,7 +91,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // We keep track of the original content-type to ensure that we handle // gRPC content type variations such as application/grpc+proto. - content_type_ = std::string(headers.ContentType()->value().getStringView()); + content_type_ = std::string(headers.getContentTypeValue()); headers.setContentType(upstream_content_type_); headers.setAccept(upstream_content_type_); @@ -128,12 +128,11 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& buffer, bool) { Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { if (enabled_) { - auto content_type = headers.ContentType(); + absl::string_view content_type = headers.getContentTypeValue(); // If the response from upstream does not have the correct content-type, // perform an early return with a useful error message in grpc-message. - if (content_type == nullptr || - content_type->value().getStringView() != upstream_content_type_) { + if (content_type != upstream_content_type_) { headers.setGrpcMessage(badContentTypeMessage(headers)); headers.setGrpcStatus(Envoy::Grpc::Status::WellKnownGrpcStatus::Unknown); headers.setStatus(enumToInt(Http::Code::OK)); diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 980a614a5efb..526bf1ac8abe 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -263,8 +263,8 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); } - const std::string method(headers.Method()->value().getStringView()); - std::string path(headers.Path()->value().getStringView()); + const std::string method(headers.getMethodValue()); + std::string path(headers.getPathValue()); std::string args; const size_t pos = path.find('?'); @@ -364,7 +364,7 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeade if (method_->request_type_is_http_body_) { if (headers.ContentType() != nullptr) { - absl::string_view content_type = headers.ContentType()->value().getStringView(); + absl::string_view content_type = headers.getContentTypeValue(); content_type_.assign(content_type.begin(), content_type.end()); } @@ -386,7 +386,7 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeade headers.removeContentLength(); headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); - headers.setEnvoyOriginalPath(headers.Path()->value().getStringView()); + headers.setEnvoyOriginalPath(headers.getPathValue()); headers.setPath("/" + method_->descriptor_->service()->full_name() + "/" + method_->descriptor_->name()); headers.setReferenceMethod(Http::Headers::get().MethodValues.Post); diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index b85e05c32962..727d7110e8f2 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -52,7 +52,6 @@ bool GrpcWebFilter::isGrpcWebRequest(const Http::RequestHeaderMap& headers) { // Implements StreamDecoderFilter. // TODO(fengli): Implements the subtypes of gRPC-Web content-type other than proto, like +json, etc. Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { - const Http::HeaderEntry* content_type = headers.ContentType(); if (!isGrpcWebRequest(headers)) { return Http::FilterHeadersStatus::Continue; } @@ -64,20 +63,17 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& h headers.removeContentLength(); setupStatTracking(headers); - if (content_type != nullptr && (Http::Headers::get().ContentTypeValues.GrpcWebText == - content_type->value().getStringView() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == - content_type->value().getStringView())) { + const absl::string_view content_type = headers.getContentTypeValue(); + if (content_type == Http::Headers::get().ContentTypeValues.GrpcWebText || + content_type == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) { // Checks whether gRPC-Web client is sending base64 encoded request. is_text_request_ = true; } headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); - const Http::HeaderEntry* accept = headers.Accept(); - if (accept != nullptr && - (Http::Headers::get().ContentTypeValues.GrpcWebText == accept->value().getStringView() || - Http::Headers::get().ContentTypeValues.GrpcWebTextProto == - accept->value().getStringView())) { + const absl::string_view accept = headers.getAcceptValue(); + if (accept == Http::Headers::get().ContentTypeValues.GrpcWebText || + accept == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) { // Checks whether gRPC-Web client is asking for base64 encoded response. is_text_response_ = true; } diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index fb1ebb21a091..fda2c37c91ce 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -208,7 +208,7 @@ ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { } // Check query parameter locations. - const auto& params = Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + const auto& params = Http::Utility::parseQueryString(headers.getPathValue()); for (const auto& location_it : param_locations_) { const auto& param_key = location_it.first; const auto& location_spec = location_it.second; diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 2035727a65d2..8cc7d85e56ac 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -17,8 +17,7 @@ namespace JwtAuthn { namespace { bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { - return headers.Method() && - headers.Method()->value().getStringView() == Http::Headers::get().MethodValues.Options && + return headers.getMethodValue() == Http::Headers::get().MethodValues.Options && headers.Origin() && !headers.Origin()->value().empty() && headers.AccessControlRequestMethod() && !headers.AccessControlRequestMethod()->value().empty(); diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 526387c7eab0..622d73f022ae 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -42,7 +42,7 @@ class BaseMatcherImpl : public Matcher, public Logger::Loggable matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = - Http::Utility::parseQueryString(headers.Path()->value().getStringView()); + Http::Utility::parseQueryString(headers.getPathValue()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); } return matches; @@ -66,8 +66,7 @@ class PrefixMatcherImpl : public BaseMatcherImpl { path_matcher_(Matchers::PathMatcher::createPrefix(prefix_, !case_sensitive_)) {} bool matches(const Http::RequestHeaderMap& headers) const override { - if (BaseMatcherImpl::matchRoute(headers) && - path_matcher_->match(headers.Path()->value().getStringView())) { + if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) { ENVOY_LOG(debug, "Prefix requirement '{}' matched.", prefix_); return true; } @@ -90,8 +89,7 @@ class PathMatcherImpl : public BaseMatcherImpl { path_matcher_(Matchers::PathMatcher::createExact(path_, !case_sensitive_)) {} bool matches(const Http::RequestHeaderMap& headers) const override { - if (BaseMatcherImpl::matchRoute(headers) && - path_matcher_->match(headers.Path()->value().getStringView())) { + if (BaseMatcherImpl::matchRoute(headers) && path_matcher_->match(headers.getPathValue())) { ENVOY_LOG(debug, "Path requirement '{}' matched.", path_); return true; } diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index fc935fa8c33b..3429627624d4 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -244,9 +244,7 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { buildHeadersFromTable(*headers, state, 2); uint64_t status; - if (headers->Status() == nullptr || - !absl::SimpleAtoi(headers->Status()->value().getStringView(), &status) || status < 200 || - status >= 600) { + if (!absl::SimpleAtoi(headers->getStatusValue(), &status) || status < 200 || status >= 600) { luaL_error(state, ":status must be between 200-599"); } diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index ca0c8205f558..0c5d42fe1f6a 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -198,7 +198,7 @@ void SquashFilter::onCreateAttachmentSuccess(Http::ResponseMessagePtr&& m) { // Get the config object that was created if (Http::Utility::getResponseStatus(m->headers()) != enumToInt(Http::Code::Created)) { ENVOY_LOG(debug, "Squash: can't create attachment object. status {} - not squashing", - m->headers().Status()->value().getStringView()); + m->headers().getStatusValue()); doneSquashing(); } else { std::string debug_attachment_id; diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index cf2f798d9b14..1e1ee2e67e54 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -158,7 +158,7 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, if (propagation_mode == PropagationMode::SingleHeader && request_headers.OtSpanContext()) { opentracing::expected> parent_span_ctx_maybe; std::string parent_context = - Base64::decode(std::string(request_headers.OtSpanContext()->value().getStringView())); + Base64::decode(std::string(request_headers.getOtSpanContextValue())); if (!parent_context.empty()) { InputConstMemoryStream istream{parent_context.data(), parent_context.size()}; diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index 50637152ead9..cc50ebfeb996 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -95,11 +95,9 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, } if (!should_trace.has_value()) { - const absl::string_view path = - request_headers.Path() ? request_headers.Path()->value().getStringView() : ""; - const SamplingRequest request{std::string{request_headers.Host()->value().getStringView()}, - std::string{request_headers.Method()->value().getStringView()}, - std::string{path}}; + const SamplingRequest request{std::string{request_headers.getHostValue()}, + std::string{request_headers.getMethodValue()}, + std::string{request_headers.getPathValue()}}; should_trace = sampling_strategy_->shouldTrace(request); } diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 2cb4338c0ad3..25d1e60ee799 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -113,13 +113,12 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto ret_span_context = extractor.extractSpanContext(sampled); if (!ret_span_context.second) { // Create a root Zipkin span. No context was found in the headers. - new_zipkin_span = tracer.startSpan( - config, std::string(request_headers.Host()->value().getStringView()), start_time); + new_zipkin_span = + tracer.startSpan(config, std::string(request_headers.getHostValue()), start_time); new_zipkin_span->setSampled(sampled); } else { - new_zipkin_span = - tracer.startSpan(config, std::string(request_headers.Host()->value().getStringView()), - start_time, ret_span_context.first); + new_zipkin_span = tracer.startSpan(config, std::string(request_headers.getHostValue()), + start_time, ret_span_context.first); } } catch (const ExtractorException& e) { diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 4c4fe24018c9..71aca6a62a51 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -674,8 +674,7 @@ Http::Code AdminImpl::runCallback(absl::string_view path_and_query, if (path_and_query.compare(0, query_index, handler.prefix_) == 0) { found_handler = true; if (handler.mutates_server_state_) { - const absl::string_view method = - admin_stream.getRequestHeaders().Method()->value().getStringView(); + const absl::string_view method = admin_stream.getRequestHeaders().getMethodValue(); if (method != Http::Headers::get().MethodValues.Post) { ENVOY_LOG(error, "admin path \"{}\" mutates state, method={} rather than POST", handler.prefix_, method); diff --git a/source/server/admin/admin_filter.cc b/source/server/admin/admin_filter.cc index 154698fa79bf..92ac92ad289b 100644 --- a/source/server/admin/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -62,8 +62,7 @@ const Http::RequestHeaderMap& AdminFilter::getRequestHeaders() const { } void AdminFilter::onComplete() { - const absl::string_view path = - request_headers_->Path() ? request_headers_->Path()->value().getStringView() : ""; + const absl::string_view path = request_headers_->getPathValue(); ENVOY_STREAM_LOG(debug, "request complete: path: {}", *decoder_callbacks_, path); Buffer::OwnedImpl response; diff --git a/source/server/admin/runtime_handler.cc b/source/server/admin/runtime_handler.cc index 1b6f9051673c..d2ee6dd84813 100644 --- a/source/server/admin/runtime_handler.cc +++ b/source/server/admin/runtime_handler.cc @@ -84,7 +84,8 @@ Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::Res if (params.empty()) { // Check if the params are in the request's body. if (admin_stream.getRequestBody() != nullptr && - isFormUrlEncoded(admin_stream.getRequestHeaders().ContentType())) { + admin_stream.getRequestHeaders().getContentTypeValue() == + Http::Headers::get().ContentTypeValues.FormUrlEncoded) { params = Http::Utility::parseFromBody(admin_stream.getRequestBody()->toString()); } @@ -107,14 +108,5 @@ Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::Res return Http::Code::OK; } -bool RuntimeHandler::isFormUrlEncoded(const Http::HeaderEntry* content_type) { - if (content_type == nullptr) { - return false; - } - - return content_type->value().getStringView() == - Http::Headers::get().ContentTypeValues.FormUrlEncoded; -} - } // namespace Server } // namespace Envoy diff --git a/source/server/admin/runtime_handler.h b/source/server/admin/runtime_handler.h index d0b25d7a8297..306a356574b3 100644 --- a/source/server/admin/runtime_handler.h +++ b/source/server/admin/runtime_handler.h @@ -24,9 +24,6 @@ class RuntimeHandler : public HandlerContextBase { Http::Code handlerRuntimeModify(absl::string_view path_and_query, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&); - -private: - bool isFormUrlEncoded(const Http::HeaderEntry* content_type); }; } // namespace Server From 2fb39216b00317a33a20395eb1607b88b536c4d6 Mon Sep 17 00:00:00 2001 From: Cynthia Coan Date: Thu, 28 May 2020 00:01:18 -0600 Subject: [PATCH 238/909] set instance-protection on non-managed agents (#11338) set instance-protection on non-managed agents Additional Description: Prevent automatic scaling in of non-managed agents that are currently working jobs. Risk Level: Minimum Testing: Validated setting instance protection script works manually: ``` azure-pipelines@i-0ddaddbed4d4ec867:~$ /usr/local/bin/set-instance-protection.sh on Fetched Cached Credentials, Expire At: [2020-05-28T02:20:57Z] azure-pipelines@i-0ddaddbed4d4ec867:~$ /usr/local/bin/set-instance-protection.sh off Fetched Cached Credentials, Expire At: [2020-05-28T02:20:57Z] azure-pipelines@i-0ddaddbed4d4ec867:~$ /usr/local/bin/set-instance-protection.sh on Fetched Cached Credentials, Expire At: [2020-05-28T02:20:57Z] ``` Docs Changes: N/A Release Notes: N/A Signed-off-by: Cynthia --- .azure-pipelines/bazel.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index 0d0ed6302664..2683b2f1a53a 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -17,6 +17,11 @@ parameters: default: true steps: + - bash: | + /usr/local/bin/set-instance-protection.sh on + displayName: "Set Instance Protection on Agent to prevent scale in" + condition: eq(false, ${{ parameters.managedAgent }}) + - task: Cache@2 inputs: key: '"${{ parameters.ciTarget }}" | ./WORKSPACE | **/*.bzl' @@ -78,3 +83,8 @@ steps: chmod -R u+w $(Build.StagingDirectory) displayName: "Self hosted agent clean up" condition: eq(false, ${{ parameters.managedAgent }}) + + - bash: | + /usr/local/bin/set-instance-protection.sh off + displayName: "Set Instance Protection on Agent to prevent scale in" + condition: eq(false, ${{ parameters.managedAgent }}) \ No newline at end of file From 8713125fd275f623196552cebff3c0f45f7c2127 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 28 May 2020 05:22:20 -0700 Subject: [PATCH 239/909] network: remove remaining socket apis from address (#11295) Move remaining network stack/socket layer interactions from addresses to sockets. Signed-off-by: Florin Coras --- include/envoy/network/address.h | 42 ++++++---- include/envoy/network/socket.h | 25 ++++++ source/common/network/address_impl.cc | 83 +++++-------------- source/common/network/address_impl.h | 47 +++++------ source/common/network/connection_impl.cc | 15 ++-- source/common/network/listen_socket_impl.cc | 13 ++- source/common/network/listen_socket_impl.h | 11 ++- source/common/network/socket_impl.cc | 45 ++++++++++ source/common/network/socket_impl.h | 10 +++ source/common/network/socket_option_factory.h | 2 +- .../filters/udp/udp_proxy/udp_proxy_filter.h | 1 + .../quic_listeners/quiche/envoy_quic_utils.cc | 9 +- test/common/network/address_impl_test.cc | 65 ++++++++------- test/common/network/dns_impl_test.cc | 5 +- .../common/network/listen_socket_impl_test.cc | 11 +-- .../quiche/platform/quic_platform_test.cc | 5 +- .../common/statsd/udp_statsd_test.cc | 9 +- test/mocks/network/mocks.h | 16 ++++ test/server/filter_chain_benchmark_test.cc | 5 ++ test/test_common/BUILD | 9 ++ test/test_common/network_utility.cc | 28 ++++--- test/test_common/network_utility.h | 2 +- test/test_common/network_utility_test.cc | 4 +- 23 files changed, 284 insertions(+), 178 deletions(-) diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index 440e86c6e667..243c877045d6 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -98,6 +98,23 @@ class Ip { virtual IpVersion version() const PURE; }; +/** + * Interface for a generic Pipe address + */ +class Pipe { +public: + virtual ~Pipe() = default; + /** + * @return abstract namespace flag + */ + virtual bool abstractNamespace() const PURE; + + /** + * @return pipe mode + */ + virtual mode_t mode() const PURE; +}; + enum class Type { Ip, Pipe }; enum class SocketType { Stream, Datagram }; @@ -138,27 +155,24 @@ class Instance { virtual const std::string& logicalName() const PURE; /** - * Bind a socket to this address. The socket should have been created with a call to socket() on - * an Instance of the same address family. - * @param fd supplies the platform socket handle. - * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call - * is successful, errno_ shouldn't be used. + * @return the IP address information IFF type() == Type::Ip, otherwise nullptr. */ - virtual Api::SysCallIntResult bind(os_fd_t fd) const PURE; + virtual const Ip* ip() const PURE; /** - * Connect a socket to this address. The socket should have been created with a call to socket() - * on this object. - * @param fd supplies the platform socket handle. - * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call - * is successful, errno_ shouldn't be used. + * @return the pipe address information IFF type() == Type::Pipe, otherwise nullptr. */ - virtual Api::SysCallIntResult connect(os_fd_t fd) const PURE; + virtual const Pipe* pipe() const PURE; /** - * @return the IP address information IFF type() == Type::Ip, otherwise nullptr. + * @return the underlying structure wherein the address is stored */ - virtual const Ip* ip() const PURE; + virtual const sockaddr* sockAddr() const PURE; + + /** + * @return length of the address container + */ + virtual socklen_t sockAddrLen() const PURE; /** * @return the type of address. diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index 6d911f4f111c..14aed46083d8 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -88,6 +88,31 @@ class Socket { */ virtual bool isOpen() const PURE; + /** + * Bind a socket to this address. The socket should have been created with a call to socket() + * @param address address to bind the socket to. + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult bind(const Address::InstanceConstSharedPtr address) PURE; + + /** + * Listen on bound socket. + * @param backlog maximum number of pending connections for listener + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult listen(int backlog) PURE; + + /** + * Connect a socket to this address. The socket should have been created with a call to socket() + * on this object. + * @param address remote address to connect to. + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr address) PURE; + /** * Visitor class for setting socket options. */ diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 41630d8a1b3e..dbd7eee1e380 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -148,15 +148,6 @@ bool Ipv4Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } -Api::SysCallIntResult Ipv4Instance::bind(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().bind( - fd, reinterpret_cast(&ip_.ipv4_.address_), sizeof(ip_.ipv4_.address_)); -} - -Api::SysCallIntResult Ipv4Instance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - std::string Ipv4Instance::sockaddrToString(const sockaddr_in& addr) { static constexpr size_t BufferSize = 16; // enough space to hold an IPv4 address in string form char str[BufferSize]; @@ -236,15 +227,6 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } -Api::SysCallIntResult Ipv6Instance::bind(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().bind( - fd, reinterpret_cast(&ip_.ipv6_.address_), sizeof(ip_.ipv6_.address_)); -} - -Api::SysCallIntResult Ipv6Instance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode) : InstanceBase(Type::Pipe) { if (address->sun_path[0] == '\0') { @@ -253,31 +235,31 @@ PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t #endif RELEASE_ASSERT(static_cast(ss_len) >= offsetof(struct sockaddr_un, sun_path) + 1, ""); - abstract_namespace_ = true; - address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); + pipe_.abstract_namespace_ = true; + pipe_.address_length_ = ss_len - offsetof(struct sockaddr_un, sun_path); } - address_ = *address; - if (abstract_namespace_) { + pipe_.address_ = *address; + if (pipe_.abstract_namespace_) { if (mode != 0) { throw EnvoyException("Cannot set mode for Abstract AF_UNIX sockets"); } // Replace all null characters with '@' in friendly_name_. - friendly_name_ = - friendlyNameFromAbstractPath(absl::string_view(address_.sun_path, address_length_)); + friendly_name_ = friendlyNameFromAbstractPath( + absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); } else { friendly_name_ = address->sun_path; } - this->mode = mode; + pipe_.mode_ = mode; } PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode) : InstanceBase(Type::Pipe) { - if (pipe_path.size() >= sizeof(address_.sun_path)) { + if (pipe_path.size() >= sizeof(pipe_.address_.sun_path)) { throw EnvoyException( fmt::format("Path \"{}\" exceeds maximum UNIX domain socket path size of {}.", pipe_path, - sizeof(address_.sun_path))); + sizeof(pipe_.address_.sun_path))); } - memset(&address_, 0, sizeof(address_)); - address_.sun_family = AF_UNIX; + memset(&pipe_.address_, 0, sizeof(pipe_.address_)); + pipe_.address_.sun_family = AF_UNIX; if (pipe_path[0] == '@') { // This indicates an abstract namespace. // In this case, null bytes in the name have no special significance, and so we copy all @@ -290,48 +272,27 @@ PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode) : Instance if (mode != 0) { throw EnvoyException("Cannot set mode for Abstract AF_UNIX sockets"); } - abstract_namespace_ = true; - address_length_ = pipe_path.size(); - memcpy(&address_.sun_path[0], pipe_path.data(), pipe_path.size()); - address_.sun_path[0] = '\0'; - address_.sun_path[pipe_path.size()] = '\0'; - friendly_name_ = - friendlyNameFromAbstractPath(absl::string_view(address_.sun_path, address_length_)); + pipe_.abstract_namespace_ = true; + pipe_.address_length_ = pipe_path.size(); + memcpy(&pipe_.address_.sun_path[0], pipe_path.data(), pipe_path.size()); + pipe_.address_.sun_path[0] = '\0'; + pipe_.address_.sun_path[pipe_path.size()] = '\0'; + friendly_name_ = friendlyNameFromAbstractPath( + absl::string_view(pipe_.address_.sun_path, pipe_.address_length_)); } else { // Throw an error if the pipe path has an embedded null character. if (pipe_path.size() != strlen(pipe_path.c_str())) { throw EnvoyException("UNIX domain socket pathname contains embedded null characters"); } - StringUtil::strlcpy(&address_.sun_path[0], pipe_path.c_str(), sizeof(address_.sun_path)); - friendly_name_ = address_.sun_path; + StringUtil::strlcpy(&pipe_.address_.sun_path[0], pipe_path.c_str(), + sizeof(pipe_.address_.sun_path)); + friendly_name_ = pipe_.address_.sun_path; } - this->mode = mode; + pipe_.mode_ = mode; } bool PipeInstance::operator==(const Instance& rhs) const { return asString() == rhs.asString(); } -Api::SysCallIntResult PipeInstance::bind(os_fd_t fd) const { - if (!abstract_namespace_) { - // Try to unlink an existing filesystem object at the requested path. Ignore - // errors -- it's fine if the path doesn't exist, and if it exists but can't - // be unlinked then `::bind()` will generate a reasonable errno. - unlink(address_.sun_path); - } - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - auto bind_result = os_syscalls.bind(fd, sockAddr(), sockAddrLen()); - if (mode != 0 && !abstract_namespace_ && bind_result.rc_ == 0) { - auto set_permissions = os_syscalls.chmod(address_.sun_path, mode); - if (set_permissions.rc_ != 0) { - throw EnvoyException(absl::StrCat("Failed to create socket with mode ", mode)); - } - } - return bind_result; -} - -Api::SysCallIntResult PipeInstance::connect(os_fd_t fd) const { - return Api::OsSysCallsSingleton::get().connect(fd, sockAddr(), sockAddrLen()); -} - } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index d59f151a087d..c9bd3e64408e 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -8,7 +8,6 @@ #include "envoy/common/platform.h" #include "envoy/network/address.h" -#include "envoy/network/io_handle.h" namespace Envoy { namespace Network { @@ -38,9 +37,6 @@ class InstanceBase : public Instance { const std::string& logicalName() const override { return asString(); } Type type() const override { return type_; } - virtual const sockaddr* sockAddr() const PURE; - virtual socklen_t sockAddrLen() const PURE; - protected: InstanceBase(Type type) : type_(type) {} @@ -78,11 +74,8 @@ class Ipv4Instance : public InstanceBase { // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - - // Network::Address::InstanceBase + const Pipe* pipe() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv4_.address_); } @@ -151,11 +144,8 @@ class Ipv6Instance : public InstanceBase { // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return &ip_; } - - // Network::Address::InstanceBase + const Pipe* pipe() const override { return nullptr; } const sockaddr* sockAddr() const override { return reinterpret_cast(&ip_.ipv6_.address_); } @@ -214,25 +204,32 @@ class PipeInstance : public InstanceBase { // Network::Address::Instance bool operator==(const Instance& rhs) const override; - Api::SysCallIntResult bind(os_fd_t fd) const override; - Api::SysCallIntResult connect(os_fd_t fd) const override; const Ip* ip() const override { return nullptr; } - - // Network::Address::InstanceBase - const sockaddr* sockAddr() const override { return reinterpret_cast(&address_); } + const Pipe* pipe() const override { return &pipe_; } + const sockaddr* sockAddr() const override { + return reinterpret_cast(&pipe_.address_); + } socklen_t sockAddrLen() const override { - if (abstract_namespace_) { - return offsetof(struct sockaddr_un, sun_path) + address_length_; + if (pipe_.abstract_namespace_) { + return offsetof(struct sockaddr_un, sun_path) + pipe_.address_length_; } - return sizeof(address_); + return sizeof(pipe_.address_); } private: - sockaddr_un address_; - // For abstract namespaces. - bool abstract_namespace_{false}; - uint32_t address_length_{0}; - mode_t mode{0}; + struct PipeHelper : public Pipe { + + bool abstractNamespace() const override { return abstract_namespace_; } + mode_t mode() const override { return mode_; } + + sockaddr_un address_; + // For abstract namespaces. + bool abstract_namespace_{false}; + uint32_t address_length_{0}; + mode_t mode_{0}; + }; + + PipeHelper pipe_; }; } // namespace Address diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 3ac80fca58c1..6378678322f4 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -734,16 +734,18 @@ ClientConnectionImpl::ClientConnectionImpl( file_event_->activate(Event::FileReadyType::Write); return; } - const Network::Address::Instance* source_to_use = source_address.get(); + + const Network::Address::InstanceConstSharedPtr* source = &source_address; + if (socket_->localAddress()) { - source_to_use = socket_->localAddress().get(); + source = &socket_->localAddress(); } - if (source_to_use != nullptr) { - const Api::SysCallIntResult result = source_to_use->bind(ioHandle().fd()); + if (*source != nullptr) { + Api::SysCallIntResult result = socket_->bind(*source); if (result.rc_ < 0) { // TODO(lizan): consider add this error into transportFailureReason. - ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source_to_use->asString(), + ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source->get()->asString(), strerror(result.errno_)); bind_error_ = true; // Set a special error state to ensure asynchronous close to give the owner of the @@ -759,7 +761,7 @@ ClientConnectionImpl::ClientConnectionImpl( void ClientConnectionImpl::connect() { ENVOY_CONN_LOG(debug, "connecting to {}", *this, socket_->remoteAddress()->asString()); - const Api::SysCallIntResult result = socket_->remoteAddress()->connect(ioHandle().fd()); + const Api::SysCallIntResult result = socket_->connect(socket_->remoteAddress()); if (result.rc_ == 0) { // write will become ready. ASSERT(connecting_); @@ -780,6 +782,7 @@ void ClientConnectionImpl::connect() { // The local address can only be retrieved for IP connections. Other // types, such as UDS, don't have a notion of a local address. + // TODO(fcoras) move to SocketImpl? if (socket_->remoteAddress()->type() == Address::Type::Ip) { socket_->setLocalAddress(SocketInterface::addressFromFd(ioHandle().fd())); } diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index e689dfd25405..7d7a61913d0f 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -17,19 +17,24 @@ namespace Envoy { namespace Network { -void ListenSocketImpl::doBind() { - const Api::SysCallIntResult result = local_address_->bind(io_handle_->fd()); +Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { + local_address_ = address; + + const Api::SysCallIntResult result = SocketImpl::bind(local_address_); if (SOCKET_FAILURE(result.rc_)) { close(); throw SocketBindException( fmt::format("cannot bind '{}': {}", local_address_->asString(), strerror(result.errno_)), result.errno_); } + // TODO(fcoras): should this be moved to SocketImpl::bind()? if (local_address_->type() == Address::Type::Ip && local_address_->ip()->port() == 0) { // If the port we bind is zero, then the OS will pick a free port for us (assuming there are // any), and we need to find out the port number that the OS picked. local_address_ = SocketInterface::addressFromFd(io_handle_->fd()); } + + return {0, 0}; } void ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { @@ -44,7 +49,7 @@ void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& opti setListenSocketOptions(options); if (bind_to_port) { - doBind(); + bind(local_address_); } } @@ -66,7 +71,7 @@ void NetworkListenSocket< UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) : ListenSocketImpl(SocketInterface::socket(Address::SocketType::Stream, address), address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); - doBind(); + bind(local_address_); } UdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle, diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 82d71c15223f..7b5f5af7ad7c 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -7,6 +7,7 @@ #include "envoy/common/platform.h" #include "envoy/network/connection.h" #include "envoy/network/listen_socket.h" +#include "envoy/network/socket.h" #include "common/common/assert.h" #include "common/network/socket_impl.h" @@ -20,8 +21,8 @@ class ListenSocketImpl : public SocketImpl { : SocketImpl(std::move(io_handle), local_address) {} void setupSocket(const Network::Socket::OptionsSharedPtr& options, bool bind_to_port); - void doBind(); void setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options); + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; }; /** @@ -82,6 +83,14 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { : SocketImpl(std::move(io_handle), local_address), remote_address_(remote_address), direct_remote_address_(remote_address) {} + ConnectionSocketImpl(Address::SocketType type, + const Address::InstanceConstSharedPtr& local_address, + const Address::InstanceConstSharedPtr& remote_address) + : SocketImpl(type, local_address), remote_address_(remote_address), + direct_remote_address_(remote_address) { + setLocalAddress(local_address); + } + // Network::Socket Address::SocketType socketType() const override { return Address::SocketType::Stream; } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 0ecf36730aa6..64dc99594b8b 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -134,5 +134,50 @@ Address::InstanceConstSharedPtr SocketInterface::peerAddressFromFd(os_fd_t fd) { return Address::addressFromSockAddr(ss, ss_len); } +SocketImpl::SocketImpl(Address::SocketType type, Address::Type addr_type, + Address::IpVersion version) + : io_handle_(SocketInterface::socket(type, addr_type, version)) {} + +SocketImpl::SocketImpl(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr) + : io_handle_(SocketInterface::socket(socket_type, addr)) {} + +Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { + if (address->type() == Address::Type::Pipe) { + const Address::Pipe* pipe = address->pipe(); + const auto* pipe_sa = reinterpret_cast(address->sockAddr()); + bool abstract_namespace = address->pipe()->abstractNamespace(); + if (!abstract_namespace) { + // Try to unlink an existing filesystem object at the requested path. Ignore + // errors -- it's fine if the path doesn't exist, and if it exists but can't + // be unlinked then `::bind()` will generate a reasonable errno. + unlink(pipe_sa->sun_path); + } + // Not storing a reference to syscalls singleton because of unit test mocks + auto bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); + if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { + auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); + if (set_permissions.rc_ != 0) { + throw EnvoyException(fmt::format("Failed to create socket with mode {}: {}", + std::to_string(pipe->mode()), + strerror(set_permissions.errno_))); + } + } + return bind_result; + } + + return Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); +} + +Api::SysCallIntResult SocketImpl::listen(int backlog) { + return Api::OsSysCallsSingleton::get().listen(io_handle_->fd(), backlog); +} + +Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { + return Api::OsSysCallsSingleton::get().connect(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); +} + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index b380ca1506ca..99766d23d158 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -52,6 +52,9 @@ Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd); class SocketImpl : public virtual Socket { public: + SocketImpl(Address::SocketType type, Address::Type addr_type, Address::IpVersion version); + SocketImpl(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr); + // Network::Socket const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { @@ -79,7 +82,13 @@ class SocketImpl : public virtual Socket { ensureOptions(); Network::Socket::appendOptions(options_, options); } + + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; + Api::SysCallIntResult listen(int backlog) override; + Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr addr) override; + const OptionsSharedPtr& options() const override { return options_; } + Address::SocketType socketType() const override { return sock_type_; } protected: SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) @@ -88,6 +97,7 @@ class SocketImpl : public virtual Socket { const IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; OptionsSharedPtr options_; + Address::SocketType sock_type_; }; } // namespace Network diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h index 51a40b1bd8c3..e93885e844b9 100644 --- a/source/common/network/socket_option_factory.h +++ b/source/common/network/socket_option_factory.h @@ -2,7 +2,7 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" -#include "envoy/network/listen_socket.h" +#include "envoy/network/socket.h" #include "common/common/logger.h" #include "common/protobuf/protobuf.h" diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index d4de8ad98dfd..78a906fbd2f0 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -6,6 +6,7 @@ #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" +#include "common/network/socket_impl.h" #include "common/network/utility.h" #include "absl/container/flat_hash_set.h" diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index 921f473312f8..8659103d1b74 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -94,10 +94,8 @@ Network::ConnectionSocketPtr createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options) { - Network::IoHandlePtr io_handle = - Network::SocketInterface::socket(Network::Address::SocketType::Datagram, peer_addr); - auto connection_socket = - std::make_unique(std::move(io_handle), local_addr, peer_addr); + auto connection_socket = std::make_unique( + Network::Address::SocketType::Datagram, local_addr, peer_addr); connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); connection_socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); if (options != nullptr) { @@ -109,8 +107,9 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, ENVOY_LOG_MISC(error, "Fail to apply pre-bind options"); return connection_socket; } - local_addr->bind(connection_socket->ioHandle().fd()); + connection_socket->bind(local_addr); ASSERT(local_addr->ip()); + // TODO(fcoras) maybe move to SocketImpl? if (local_addr->ip()->port() == 0) { // Get ephemeral port number. local_addr = Network::SocketInterface::addressFromFd(connection_socket->ioHandle().fd()); diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 2950907d1812..3b1dd622ff35 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -9,6 +9,7 @@ #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/network/address_impl.h" +#include "common/network/listen_socket_impl.h" #include "common/network/utility.h" #include "test/mocks/api/mocks.h" @@ -48,43 +49,45 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl ASSERT_NE(addr_port->ip(), nullptr); // Create a socket on which we'll listen for connections from clients. - IoHandlePtr io_handle = SocketInterface::socket(SocketType::Stream, addr_port); - ASSERT_GE(io_handle->fd(), 0) << addr_port->asString(); + SocketImpl sock(SocketType::Stream, addr_port); + ASSERT_GE(sock.ioHandle().fd(), 0) << addr_port->asString(); auto& os_sys_calls = Api::OsSysCallsSingleton::get(); // Check that IPv6 sockets accept IPv6 connections only. if (addr_port->ip()->version() == IpVersion::v6) { int socket_v6only = 0; socklen_t size_int = sizeof(socket_v6only); - ASSERT_GE(os_sys_calls - .getsockopt(io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int) - .rc_, - 0); + ASSERT_GE( + os_sys_calls + .getsockopt(sock.ioHandle().fd(), IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int) + .rc_, + 0); EXPECT_EQ(v6only, socket_v6only != 0); } // Bind the socket to the desired address and port. - const Api::SysCallIntResult result = addr_port->bind(io_handle->fd()); + const Api::SysCallIntResult result = sock.bind(addr_port); ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) << "\nerrno: " << result.errno_; // Do a bare listen syscall. Not bothering to accept connections as that would // require another thread. - ASSERT_EQ(os_sys_calls.listen(io_handle->fd(), 128).rc_, 0); + ASSERT_EQ(sock.listen(128).rc_, 0); auto client_connect = [&os_sys_calls](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. - IoHandlePtr client_handle = SocketInterface::socket(SocketType::Stream, addr_port); - ASSERT_GE(client_handle->fd(), 0) << addr_port->asString(); + SocketImpl client_sock(SocketType::Stream, addr_port); + + ASSERT_GE(client_sock.ioHandle().fd(), 0) << addr_port->asString(); // Instance::socket creates a non-blocking socket, which that extends all the way to the // operation of ::connect(), so connect returns with errno==EWOULDBLOCK before the tcp // handshake can complete. For testing convenience, re-enable blocking on the socket // so that connect will wait for the handshake to complete. - ASSERT_EQ(os_sys_calls.setsocketblocking(client_handle->fd(), true).rc_, 0); + ASSERT_EQ(os_sys_calls.setsocketblocking(client_sock.ioHandle().fd(), true).rc_, 0); // Connect to the server. - const Api::SysCallIntResult result = addr_port->connect(client_handle->fd()); + const Api::SysCallIntResult result = client_sock.connect(addr_port); ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) << "\nerrno: " << result.errno_; }; @@ -325,14 +328,14 @@ TEST(PipeInstanceTest, BasicPermission) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); const mode_t mode = 0777; - PipeInstance address(path, mode); + PipeInstance pipe(path, mode); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(SocketType::Stream, address); - IoHandlePtr io_handle = - SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); - Api::SysCallIntResult result = address.bind(io_handle->fd()); - ASSERT_EQ(result.rc_, 0) << address.asString() << "\nerror: " << strerror(result.errno_) + Api::SysCallIntResult result = sock.bind(address); + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << strerror(result.errno_) << "\terrno: " << result.errno_; Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); @@ -351,15 +354,15 @@ TEST(PipeInstanceTest, PermissionFail) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); const mode_t mode = 0777; - PipeInstance address(path, mode); + PipeInstance pipe(path, mode); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(SocketType::Stream, address); + + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); - IoHandlePtr io_handle = - SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); EXPECT_CALL(os_sys_calls, chmod(_, _)).WillOnce(Return(Api::SysCallIntResult{-1, 0})); - EXPECT_THROW_WITH_REGEX(address.bind(io_handle->fd()), EnvoyException, - "Failed to create socket with mode"); + EXPECT_THROW_WITH_REGEX(sock.bind(address), EnvoyException, "Failed to create socket with mode"); } TEST(PipeInstanceTest, AbstractNamespacePermission) { @@ -423,13 +426,15 @@ TEST(PipeInstanceTest, EmbeddedNullPathError) { TEST(PipeInstanceTest, UnlinksExistingFile) { const auto bind_uds_socket = [](const std::string& path) { - PipeInstance address(path); - IoHandlePtr io_handle = - SocketInterface::socket(SocketType::Stream, Address::Type::Pipe, Address::IpVersion::v4); - ASSERT_GE(io_handle->fd(), 0) << address.asString(); + PipeInstance pipe(path); + InstanceConstSharedPtr address = std::make_shared(pipe); + SocketImpl sock(SocketType::Stream, address); + + ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); + + const Api::SysCallIntResult result = sock.bind(address); - const Api::SysCallIntResult result = address.bind(io_handle->fd()); - ASSERT_EQ(result.rc_, 0) << address.asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << strerror(result.errno_) << "\nerrno: " << result.errno_; }; diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 9163a4545d01..d7b21618cdc2 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -386,9 +386,10 @@ class CustomInstance : public Address::Instance { const std::string& asString() const override { return antagonistic_name_; } absl::string_view asStringView() const override { return antagonistic_name_; } const std::string& logicalName() const override { return antagonistic_name_; } - Api::SysCallIntResult bind(os_fd_t fd) const override { return instance_.bind(fd); } - Api::SysCallIntResult connect(os_fd_t fd) const override { return instance_.connect(fd); } const Address::Ip* ip() const override { return instance_.ip(); } + const Address::Pipe* pipe() const override { return instance_.pipe(); } + const sockaddr* sockAddr() const override { return instance_.sockAddr(); } + socklen_t sockAddrLen() const override { return instance_.sockAddrLen(); } Address::Type type() const override { return instance_.type(); } private: diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index f705f9163f91..4b2d98442d75 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -46,8 +46,8 @@ class ListenSocketImplTest : public testing::TestWithParam { auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); auto addr = addr_fd.first; - Network::IoHandlePtr& io_handle = addr_fd.second; - EXPECT_TRUE(SOCKET_VALID(io_handle->fd())); + SocketPtr& sock = addr_fd.second; + EXPECT_TRUE(SOCKET_VALID(sock->ioHandle().fd())); // Confirm that we got a reasonable address and port. ASSERT_EQ(Address::Type::Ip, addr->type()); @@ -55,7 +55,8 @@ class ListenSocketImplTest : public testing::TestWithParam { ASSERT_LT(0U, addr->ip()->port()); // Release the socket and re-bind it. - EXPECT_EQ(nullptr, io_handle->close().err_); + EXPECT_TRUE(sock->isOpen()); + sock->close(); auto option = std::make_unique(); auto options = std::make_shared>(); @@ -82,7 +83,7 @@ class ListenSocketImplTest : public testing::TestWithParam { // instead of if block. auto os_sys_calls = Api::OsSysCallsSingleton::get(); if (NetworkSocketTrait::type == Address::SocketType::Stream) { - EXPECT_EQ(0, os_sys_calls.listen(socket1->ioHandle().fd(), 0).rc_); + EXPECT_EQ(0, socket1->listen(0).rc_); } EXPECT_EQ(addr->ip()->port(), socket1->localAddress()->ip()->port()); @@ -104,7 +105,7 @@ class ListenSocketImplTest : public testing::TestWithParam { int domain = version_ == Address::IpVersion::v4 ? AF_INET : AF_INET6; auto socket_result = os_sys_calls.socket(domain, SOCK_STREAM, 0); EXPECT_TRUE(SOCKET_VALID(socket_result.rc_)); - io_handle = std::make_unique(socket_result.rc_); + Network::IoHandlePtr io_handle = std::make_unique(socket_result.rc_); auto socket3 = createListenSocketPtr(std::move(io_handle), addr, nullptr); EXPECT_EQ(socket3->localAddress()->asString(), addr->asString()); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index a9f9b8cf762b..0568f3664337 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -669,10 +669,9 @@ TEST_F(QuicPlatformTest, PickUnsedPort) { Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version); Envoy::Network::Address::InstanceConstSharedPtr addr_with_port = Envoy::Network::Utility::getAddressWithPort(*addr, port); - Envoy::Network::IoHandlePtr io_handle = Envoy::Network::SocketInterface::socket( - Envoy::Network::Address::SocketType::Datagram, addr_with_port); + Envoy::Network::SocketImpl sock(Envoy::Network::Address::SocketType::Datagram, addr_with_port); // binding of given port should success. - EXPECT_EQ(0, addr_with_port->bind(io_handle->fd()).rc_); + EXPECT_EQ(0, sock.bind(addr_with_port).rc_); } } diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index 2bc30294c6c8..7ca2fad17b68 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -55,16 +55,15 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { // modification back to the abstraction layer so it will work for multiple platforms. Additionally // this uses low level networking calls because our abstractions in this area only work for IP // sockets. Revisit this also. - auto io_handle = - Network::SocketInterface::socket(Network::Address::SocketType::Datagram, uds_address); + Network::SocketImpl sock(Network::Address::SocketType::Datagram, uds_address); RELEASE_ASSERT( - Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_ != -1, ""); - uds_address->bind(io_handle->fd()); + Api::OsSysCallsSingleton::get().setsocketblocking(sock.ioHandle().fd(), false).rc_ != -1, ""); + sock.bind(uds_address); // Do the flush which should have somewhere to write now. sink.flush(snapshot); Buffer::OwnedImpl receive_buffer; - receive_buffer.read(*io_handle, 32); + receive_buffer.read(sock.ioHandle(), 32); EXPECT_EQ("envoy.test_counter:1|c", receive_buffer.toString()); } diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index b7537a4b9f74..73384f8850b7 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -225,6 +225,13 @@ class MockListenSocket : public Socket { MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option)); MOCK_METHOD(void, addOptions_, (const Socket::OptionsSharedPtr& options)); MOCK_METHOD(const OptionsSharedPtr&, options, (), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType, Address::Type, Address::IpVersion), + (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, + (Address::SocketType, const Address::InstanceConstSharedPtr), (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -273,6 +280,13 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(Address::SocketType, socketType, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType, Address::Type, Address::IpVersion), + (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, + (Address::SocketType, const Address::InstanceConstSharedPtr), (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -380,6 +394,7 @@ class MockIp : public Address::Ip { MOCK_METHOD(Address::Ipv6*, ipv6, (), (const)); MOCK_METHOD(uint32_t, port, (), (const)); MOCK_METHOD(Address::IpVersion, version, (), (const)); + MOCK_METHOD(bool, v6only, (), (const)); }; class MockResolvedAddress : public Address::Instance { @@ -395,6 +410,7 @@ class MockResolvedAddress : public Address::Instance { MOCK_METHOD(Api::SysCallIntResult, bind, (os_fd_t), (const)); MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const)); MOCK_METHOD(Address::Ip*, ip, (), (const)); + MOCK_METHOD(Address::Pipe*, pipe, (), (const)); MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType), (const)); MOCK_METHOD(Address::Type, type, (), (const)); MOCK_METHOD(sockaddr*, sockAddr, (), (const)); diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 322a3de92cc2..54935927a5a3 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -102,6 +102,11 @@ class MockConnectionSocket : public Network::ConnectionSocket { void addOptions(const OptionsSharedPtr&) override {} const OptionsSharedPtr& options() const override { return options_; } void setRequestedServerName(absl::string_view) override {} + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr) override { return {0, 0}; } + Api::SysCallIntResult listen(int) override { return {0, 0}; } + Api::SysCallIntResult connect(const Network::Address::InstanceConstSharedPtr) override { + return {0, 0}; + } private: Network::IoHandlePtr io_handle_; diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 57e72caf50c4..33f404d67e06 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -61,6 +61,15 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "network_utility_test", + srcs = ["network_utility_test.cc"], + deps = [ + ":environment_lib", + ":network_utility_lib", + ], +) + envoy_cc_test_library( name = "contention_lib", srcs = ["contention.cc"], diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 494382d8c7cc..3a03ecb4c947 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -27,17 +27,17 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared << (addr_port == nullptr ? "nullptr" : addr_port->asString()); return nullptr; } - IoHandlePtr io_handle = SocketInterface::socket(type, addr_port); + SocketImpl sock(type, addr_port); // Not setting REUSEADDR, therefore if the address has been recently used we won't reuse it here. // However, because we're going to use the address while checking if it is available, we'll need // to set REUSEADDR on listener sockets created by tests using an address validated by this means. - Api::SysCallIntResult result = addr_port->bind(io_handle->fd()); + Api::SysCallIntResult result = sock.bind(addr_port); const char* failing_fn = nullptr; if (result.rc_ != 0) { failing_fn = "bind"; } else if (type == Address::SocketType::Stream) { // Try listening on the port also, if the type is TCP. - result = Api::OsSysCallsSingleton::get().listen(io_handle->fd(), 1); + result = sock.listen(1); if (result.rc_ != 0) { failing_fn = "listen"; } @@ -57,8 +57,9 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared } // If the port we bind is zero, then the OS will pick a free port for us (assuming there are // any), and we need to find out the port number that the OS picked so we can return it. + // TODO(fcoras) maybe move to SocketImpl if (addr_port->ip()->port() == 0) { - return SocketInterface::addressFromFd(io_handle->fd()); + return SocketInterface::addressFromFd(sock.ioHandle().fd()); } return addr_port; } @@ -149,13 +150,13 @@ Address::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version, bool supportsIpVersion(const Address::IpVersion version) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = SocketInterface::socket(Address::SocketType::Stream, addr); - if (0 != addr->bind(io_handle->fd()).rc_) { + SocketImpl sock(Address::SocketType::Stream, addr); + if (0 != sock.bind(addr).rc_) { // Socket bind failed. - RELEASE_ASSERT(io_handle->close().err_ == nullptr, ""); + RELEASE_ASSERT(sock.ioHandle().close().err_ == nullptr, ""); return false; } - RELEASE_ASSERT(io_handle->close().err_ == nullptr, ""); + RELEASE_ASSERT(sock.ioHandle().close().err_ == nullptr, ""); return true; } @@ -171,19 +172,20 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { NOT_REACHED_GCOVR_EXCL_LINE; } -std::pair +std::pair bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - IoHandlePtr io_handle = SocketInterface::socket(type, addr); - Api::SysCallIntResult result = addr->bind(io_handle->fd()); + SocketPtr sock = std::make_unique(type, addr); + Api::SysCallIntResult result = sock->bind(addr); if (0 != result.rc_) { - io_handle->close(); + sock->close(); std::string msg = fmt::format("bind failed for address {} with error: {} ({})", addr->asString(), strerror(result.errno_), result.errno_); ADD_FAILURE() << msg; throw EnvoyException(msg); } - return std::make_pair(SocketInterface::addressFromFd(io_handle->fd()), std::move(io_handle)); + + return std::make_pair(SocketInterface::addressFromFd(sock->ioHandle().fd()), std::move(sock)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 965ec8b6dfad..76c01a87cc62 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -114,7 +114,7 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version); * @param type the type of socket to be bound. * @returns the address and the fd of the socket bound to that address. */ -std::pair +std::pair bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type); /** diff --git a/test/test_common/network_utility_test.cc b/test/test_common/network_utility_test.cc index 195fc0991b11..b95e29a94617 100644 --- a/test/test_common/network_utility_test.cc +++ b/test/test_common/network_utility_test.cc @@ -18,7 +18,7 @@ class NetworkUtilityTest : public testing::TestWithParam { }; INSTANTIATE_TEST_SUITE_P(IpVersions, NetworkUtilityTest, - testing::ValuesIn(TestEnvironment::getIpTestParameters())); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); // This validates Network::Test::bindFreeLoopbackPort behaves as desired, i.e. that we don't have // a significant risk of flakes due to re-use of a port over short time intervals. We can't drive @@ -36,7 +36,7 @@ TEST_P(NetworkUtilityTest, DISABLED_ValidateBindFreeLoopbackPort) { const size_t kLimit = 50; for (size_t n = 0; n < kLimit; ++n) { auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); - Api::OsSysCallsSingleton::get().close(addr_fd.second); + addr_fd.second->close(); auto addr = addr_fd.first->asString(); auto search = seen.find(addr); if (search != seen.end()) { From a78a6d1edd50be17f8fa0790ae27502580434977 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Thu, 28 May 2020 09:00:58 -0700 Subject: [PATCH 240/909] test: use non-default base-id in tests (#11016) Prevents coverage failures with an error related to using the default base-id 0. Likely because two processes started Envoy with the same base-id. Use the shell's PID or bazel's TEST_RANDOM_SEED to avoid this situation. Disables hot-restart in a death test where the fork used for the test causes base id collisions. Risk Level: low Testing: fixes test Docs Changes: n/a Release Notes: n/a Signed-off-by: Stephan Zuercher --- source/server/hot_restarting_base.cc | 13 +++++++-- source/server/hot_restarting_base.h | 1 + test/config_test/config_test.cc | 2 -- test/exe/main_common_test.cc | 36 +++++------------------- test/integration/hotrestart_test.sh | 10 +++---- test/integration/run_envoy_test.sh | 9 ++++-- test/integration/server.cc | 1 + test/integration/server.h | 2 +- test/server/hot_restart_impl_test.cc | 3 ++ test/test_common/environment.cc | 25 +++++++++++++++++ test/test_common/environment.h | 41 ++++++++++++++++++++++++++++ 11 files changed, 101 insertions(+), 42 deletions(-) diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 07cfbd94bfd9..2d96db059b74 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -10,6 +10,14 @@ using HotRestartMessage = envoy::HotRestartMessage; static constexpr uint64_t MaxSendmsgSize = 4096; +HotRestartingBase::~HotRestartingBase() { + if (my_domain_socket_ != -1) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = os_sys_calls.close(my_domain_socket_); + ASSERT(result.rc_ == 0); + } +} + void HotRestartingBase::initDomainSocketAddress(sockaddr_un* address) { memset(address, 0, sizeof(*address)); address->sun_family = AF_UNIX; @@ -40,8 +48,9 @@ void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { Api::SysCallIntResult result = os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("unable to bind domain socket with id={} (see --base-id option)", id)); + throw EnvoyException(fmt::format( + "unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)", + base_id_, id, result.errno_)); } } diff --git a/source/server/hot_restarting_base.h b/source/server/hot_restarting_base.h index 933c41adf6a7..1ef303d983e8 100644 --- a/source/server/hot_restarting_base.h +++ b/source/server/hot_restarting_base.h @@ -24,6 +24,7 @@ namespace Server { class HotRestartingBase { protected: HotRestartingBase(uint64_t base_id) : base_id_(base_id) {} + ~HotRestartingBase(); void initDomainSocketAddress(sockaddr_un* address); sockaddr_un createDomainSocketAddress(uint64_t id, const std::string& role); diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 9fb72818f817..71f30f2eb11a 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -168,8 +168,6 @@ uint32_t run(const std::string& directory) { ENVOY_LOG_MISC(info, "testing {}.\n", filename); OptionsImpl options( Envoy::Server::createTestOptionsImpl(filename, "", Network::Address::IpVersion::v6)); - // Avoid contention issues with other tests over the hot restart domain socket. - options.setHotRestartDisabled(true); ConfigTest test1(options); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig(bootstrap, options, diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 2632e657280a..1287225f1829 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -38,34 +38,9 @@ class MainCommonTest : public testing::TestWithParam argv_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, @@ -131,6 +106,9 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { ENVOY_LOG_MISC(critical, "MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration"); #else + // Death test forks and restarts the test with special arguments. Since we're meant to choose + // the same base-id on the second attempt we can't succeed with hot restart enabled. + addArg("--disable-hot-restart"); MainCommon main_common(argc(), argv()); #if !defined(WIN32) // Resolving symbols for a backtrace takes longer than the timeout in coverage builds, diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 13a5a59731d5..665ea44a264a 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -64,12 +64,10 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | cat > "${HOT_RESTART_JSON_REUSE_PORT}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}") -# Enable this test to work with --runs_per_test -if [[ -z "${TEST_RANDOM_SEED}" ]]; then - BASE_ID=1 -else - BASE_ID="${TEST_RANDOM_SEED}" -fi +# Use TEST_RANDOM_SEED or TEST_SHARD_INDEX to choose a base id. This +# replicates the logic of TestEnvironment::chooseBaseId(1). See that method +# for details. +let BASE_ID=1000000+${TEST_RANDOM_SEED:-${TEST_SHARD_INDEX:-0}} echo "Hot restart test using --base-id ${BASE_ID}" diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index a84ec07be4d0..2dda6f3494f9 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -2,13 +2,18 @@ source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" +# Use TEST_RANDOM_SEED or TEST_SHARD_INDEX to choose a base id. This +# replicates the logic of TestEnvironment::chooseBaseId(2). See that method +# for details. +let BASE_ID=2000000+${TEST_RANDOM_SEED:-${TEST_SHARD_INDEX:-0}} + function expect_fail_with_error() { log="${TEST_TMPDIR}/envoy.log" rm -f "$log" expected_error="$1" shift - echo ${ENVOY_BIN} "$@" ">&" "$log" - ${ENVOY_BIN} "$@" >& "$log" + echo ${ENVOY_BIN} --base-id "${BASE_ID}" "$@" ">&" "$log" + ${ENVOY_BIN} --base-id "${BASE_ID}" "$@" >& "$log" EXIT_CODE=$? cat "$log" check [ $EXIT_CODE -eq 1 ] diff --git a/test/integration/server.cc b/test/integration/server.cc index 7ce22bfbe004..755ca829d4e7 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -44,6 +44,7 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields); test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields); test_options.setConcurrency(concurrency); + test_options.setHotRestartDisabled(true); return test_options; } diff --git a/test/integration/server.h b/test/integration/server.h index 5f2274487bd9..9a4367263adb 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -38,7 +38,7 @@ struct FieldValidationConfig { bool ignore_unknown_dynamic_fields = false; }; -// Create OptionsImpl structures suitable for tests. +// Create OptionsImpl structures suitable for tests. Disables hot restart. OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, FieldValidationConfig validation_config = FieldValidationConfig(), diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index 1b82f08ab6af..d9eb46b24c51 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -44,6 +44,9 @@ class HotRestartImplTest : public testing::Test { // Test we match the correct stat with empty-slots before, after, or both. hot_restart_ = std::make_unique(options_); hot_restart_->drainParentListeners(); + + // We close both sockets. + EXPECT_CALL(os_sys_calls_, close(_)).Times(2); } Api::MockOsSysCalls os_sys_calls_; diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 99b88671f36e..b4a73c5248dd 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -28,6 +28,7 @@ #include "absl/debugging/symbolize.h" #include "absl/strings/match.h" +#include "absl/strings/str_format.h" #include "gtest/gtest.h" #include "spdlog/spdlog.h" @@ -193,6 +194,30 @@ std::string TestEnvironment::getCheckedEnvVar(const std::string& var) { return optional.value(); } +std::string TestEnvironment::chooseBaseId(uint64_t test_base_id) { + ASSERT(test_base_id >= 1); + ASSERT(test_base_id <= 1L << 44); // Leave room to multiple by 1000000. + + test_base_id *= 1000000; + + auto test_random_seed = TestEnvironment::getOptionalEnvVar("TEST_RANDOM_SEED"); + auto test_shard_index = TestEnvironment::getOptionalEnvVar("TEST_SHARD_INDEX"); + + if (test_random_seed) { + int mutator = 0; + if (absl::SimpleAtoi(test_random_seed.value(), &mutator)) { + test_base_id += mutator; + } + } else if (test_shard_index) { + int mutator = 0; + if (absl::SimpleAtoi(test_shard_index.value(), &mutator)) { + test_base_id += mutator; + } + } + + return absl::StrFormat("%d", test_base_id); +} + void TestEnvironment::initializeTestMain(char* program_name) { #ifdef WIN32 _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); diff --git a/test/test_common/environment.h b/test/test_common/environment.h index 50c097a9ad27..bc0a30b23ce7 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -72,6 +72,47 @@ class TestEnvironment { */ static std::string getCheckedEnvVar(const std::string& var); + /** + * Generates an appropriate base-id for use as the base id option to an Envoy + * server. Each test that requires a unique test base id should invoke this + * method with a unique value to get a string value appropriate for use the + * value of the --base-id command line argument. In general, tests that + * create an Envoy::Server::HotRestartImpl without mocks should use this + * function. If all test cases within a grouping, say a single test source + * file, are executed consecutively, they may share a test base id. Tests in + * separate groupings cannot share a test base id for reasons described + * below. Special care must be taken with death tests -- the gtest framework + * will fork and invoke a new copy of the same test binary with additional + * command line flags. This can result in the re-use of a given base-id from + * another process (which causing a failure). + * + * We require a unique test base id because random seeds are reused across + * multiple test targets. For example, running: + * bazel --runs_per_test=3 //test:foo_test //test:bar_test + * results in 3 unique seeds. Each test target is run once with each seed. + * Similarly, in coverage tests, the same test may be run concurrently in + * multiple coverage shards. + * + * This method uses the given test base id and one of the following + * environment variables: + * - TEST_RANDOM_SEED is used to handle concurrent runs via the Bazel + * --runs_per_test flag + * - TEST_SHARD_INDEX is used to handle concurrent runs when tests are run in + * shards, such as in coverage testing + * + * This algorithm is re-implemented in the following test scripts: + * - test/integration/hot_restart_test.sh + * - test/integration/run_envoy_test.sh + * + * Currently the in-use test base ids are: + * 1: test/integration/hot_restart_test.sh + * 2: test/integration/run_envoy_test.sh + * 3: test/exe/main_common_test.cc + * + * @param test_base_id a uint64_t used to unique identify a group of tests + */ + static std::string chooseBaseId(uint64_t test_base_id); + /** * Obtain a private writable temporary directory. * @return const std::string& with the path to the temporary directory. From ece1b6aa21c940477d3587848bad0c153cf05987 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 28 May 2020 15:19:24 -0400 Subject: [PATCH 241/909] http: fixing a stream teardown issue with watermarks (#11332) Fixing a bug where the router filter was attempting to update codec stream watermark state after the request was complete. Risk Level: Medium (HCM changes) Testing: new integration test Docs Changes: n/a Release Notes: n/a Fixes envoyproxy/envoy-setec#152 Signed-off-by: Alyssa Wilk --- source/common/http/conn_manager_impl.cc | 6 ++- test/integration/BUILD | 1 + test/integration/filters/BUILD | 14 +++++++ .../filters/backpressure_filter.cc | 42 +++++++++++++++++++ .../idle_timeout_integration_test.cc | 21 ++++++++++ 5 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 test/integration/filters/backpressure_filter.cc diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index f10d3052c567..2d19400b6271 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -2419,7 +2419,11 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataDrained() { void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", parent_); - parent_.response_encoder_->getStream().readDisable(false); + // If the state is destroyed, the codec's stream is already torn down. On + // teardown the codec will unwind any remaining read disable calls. + if (!parent_.state_.destroyed_) { + parent_.response_encoder_->getStream().readDisable(false); + } parent_.connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); } diff --git a/test/integration/BUILD b/test/integration/BUILD index f4d8ff9263d3..10a3d147de0f 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -497,6 +497,7 @@ envoy_cc_test( tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", + "//test/integration/filters:backpressure_filter_config_lib", "//test/test_common:test_time_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 1f320a04a61c..950370da9e70 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -52,6 +52,20 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "backpressure_filter_config_lib", + srcs = [ + "backpressure_filter.cc", + ], + deps = [ + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "clear_route_cache_filter_lib", srcs = [ diff --git a/test/integration/filters/backpressure_filter.cc b/test/integration/filters/backpressure_filter.cc new file mode 100644 index 000000000000..1d6f8ce92be5 --- /dev/null +++ b/test/integration/filters/backpressure_filter.cc @@ -0,0 +1,42 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" + +namespace Envoy { + +// A filter that buffers the entire request/response, then doubles +// the content of the filter buffer. +class BackpressureFilter : public Http::PassThroughFilter { +public: + void onDestroy() override { decoder_callbacks_->onDecoderFilterBelowWriteBufferLowWatermark(); } + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->onDecoderFilterAboveWriteBufferHighWatermark(); + return Http::FilterHeadersStatus::Continue; + } +}; + +class BackpressureConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { +public: + BackpressureConfig() : EmptyHttpFilterConfig("backpressure-filter") {} + + Http::FilterFactoryCb createFilter(const std::string&, + Server::Configuration::FactoryContext&) override { + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared<::Envoy::BackpressureFilter>()); + }; + } +}; + +// perform static registration +static Registry::RegisterFactory + register_; + +} // namespace Envoy diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 0591d125be04..8c906aa51ada 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -184,6 +184,27 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("stream_idle_timeout")); } +// Per-stream idle timeout with reads disabled. +TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutWithLargeBuffer) { + config_helper_.addFilter(R"EOF( + name: backpressure-filter + )EOF"); + enable_per_stream_idle_timeout_ = true; + initialize(); + + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + + // Make sure that for HTTP/1.1 reads are enabled even though the first request + // ended in the "backed up" state. + auto response2 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response2->waitForEndStream(); + EXPECT_TRUE(response2->complete()); +} + // Per-stream idle timeout after having sent downstream head request. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutHeadRequestAfterDownstreamHeadRequest) { enable_per_stream_idle_timeout_ = true; From 4ed772642d836689494b8555e529ead28df3c7a6 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 28 May 2020 17:35:28 -0400 Subject: [PATCH 242/909] docs: adding some debug FAQs (#11347) Signed-off-by: Alyssa Wilk --- docs/root/faq/configuration/flow_control.rst | 2 ++ .../debugging/why_is_envoy_sending_413s.rst | 6 ++++ ...hy_is_envoy_sending_internal_responses.rst | 9 +++++ .../debugging/why_is_my_route_not_found.rst | 33 +++++++++++++++++++ docs/root/faq/overview.rst | 9 +++++ 5 files changed, 59 insertions(+) create mode 100644 docs/root/faq/debugging/why_is_envoy_sending_413s.rst create mode 100644 docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst create mode 100644 docs/root/faq/debugging/why_is_my_route_not_found.rst diff --git a/docs/root/faq/configuration/flow_control.rst b/docs/root/faq/configuration/flow_control.rst index 6f7b90163d11..9bbce146a95e 100644 --- a/docs/root/faq/configuration/flow_control.rst +++ b/docs/root/faq/configuration/flow_control.rst @@ -1,3 +1,5 @@ +.. _faq_flow_control: + How do I configure flow control? ================================ diff --git a/docs/root/faq/debugging/why_is_envoy_sending_413s.rst b/docs/root/faq/debugging/why_is_envoy_sending_413s.rst new file mode 100644 index 000000000000..39769282ea0a --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_413s.rst @@ -0,0 +1,6 @@ +.. _faq_why_is_envoy_sending_413: + +Why is Envoy sending 413s? +========================== + +Envoy by default imposes limits to how much it will buffer for a given request. Generally, Envoy filters are designed to be streaming, and will pass data from downstream to upstream, or will simply pause processing while waiting for an external event (e.g. doing auth checks). Some filters, for example the buffer filter, require buffering the full request or response. If a request body is too large to buffer, but buffering is required by the filter, Envoy will send a 413. The buffer limits can be increased at the risk of making OOMs more possible. Please see the ref:`flow control docs ` for details. diff --git a/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst new file mode 100644 index 000000000000..eaaeca31c290 --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_internal_responses.rst @@ -0,0 +1,9 @@ +.. _why_is_envoy_sending_internal_responses: + +Why is Envoy sending internal responses? +======================================== + +One of the easiest ways to get an understanding of why Envoy sends a given local response, is to turn on trace logging. If you can run your instance with “-l trace” you will slow Envoy down significantly, but get detailed information on various events in the lifetime of each stream and connection. Any time Envoy sends an internally generated response it will log to the _debug_ level “Sending local reply with details [unique reason]” which gives you information about why the local response was sent. Each individual response detail is used at one point in the code base, be it a codec validation check or a failed route match. + +If turning on debug logging is not plausible, the response details can be added to the access logs using _%RESPONSE_CODE_DETAILS%_, and again it will let you pinpoint the exact reason a given response was generated. + diff --git a/docs/root/faq/debugging/why_is_my_route_not_found.rst b/docs/root/faq/debugging/why_is_my_route_not_found.rst new file mode 100644 index 000000000000..17b6447c2f7e --- /dev/null +++ b/docs/root/faq/debugging/why_is_my_route_not_found.rst @@ -0,0 +1,33 @@ +.. _why_is_my_route_not_found: + +Why is my route not found? +========================== + +Once you've drilled down into Envoy responses and discovered Envoy generating local responses with the message +"Sending local reply with details route_not_found" the next question is _why_? + +Often you can look at your route configuration and the headers sent, and see what is missing. +One often overlooked problem is host:port matching. If your route configuration matches the domain +www.host.com but the client is sending requests to www.host.com:443, it will not match. + +If this is the problem you are encountering you can solve it one of two ways. First by changing your +configuration to match host:port pairs, going from + +.. code-block:: yaml + + domains: + - "www.host.com" + +to + +.. code-block:: yaml + + domains: + - "www.host.com" + - "www.host.com:80" + - "www.host.com:443" + +The other is to strip ports entirely using :ref:`stripping port from host header `. Not that this will only stip port 80 from insecure requests and 443 from secure request. It does +not just stop ports when routes are matched, but changes +the host sent downstream to also not include the port. + diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 3769b53f4766..48267fb00684 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -27,6 +27,15 @@ API api/why_versioning api/incremental +Debugging +--------- +.. toctree:: + :maxdepth: 2 + + debugging/why_is_envoy_sending_internal_responses + debugging/why_is_envoy_sending_413s + debugging/why_is_my_route_not_found + Performance ----------- From c7846f3846c2640a6eebf0ff68efbf927fa6a393 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Thu, 28 May 2020 14:53:20 -0700 Subject: [PATCH 243/909] Moved access_log_formatter to a new folder formatter (#11330) access_log_formatter is used by access_log and local_reply. So it is renamed as substitution_formatter. Signed-off-by: Wayne Zhang --- include/envoy/access_log/access_log.h | 67 -------------- include/envoy/formatter/BUILD | 19 ++++ .../envoy/formatter/substitution_formatter.h | 81 +++++++++++++++++ source/common/access_log/BUILD | 19 ---- source/common/access_log/access_log_impl.cc | 1 - source/common/common/BUILD | 12 --- source/common/formatter/BUILD | 39 +++++++++ .../substitution_format_string.cc | 14 +-- .../substitution_format_string.h | 10 ++- .../substitution_formatter.cc} | 42 ++++----- .../substitution_formatter.h} | 16 ++-- source/common/http/BUILD | 1 - source/common/http/conn_manager_utility.cc | 1 - source/common/local_reply/BUILD | 4 +- source/common/local_reply/local_reply.cc | 10 +-- source/common/router/BUILD | 2 +- source/common/router/header_formatter.cc | 8 +- source/common/router/header_formatter.h | 4 +- source/common/tracing/BUILD | 2 +- source/common/tracing/http_tracer_impl.cc | 6 +- source/extensions/access_loggers/file/BUILD | 2 +- .../extensions/access_loggers/file/config.cc | 15 ++-- .../file/file_access_log_impl.cc | 2 +- .../file/file_access_log_impl.h | 6 +- source/server/admin/BUILD | 2 +- source/server/admin/admin.cc | 4 +- test/common/access_log/BUILD | 66 -------------- test/common/common/BUILD | 12 --- test/common/formatter/BUILD | 87 +++++++++++++++++++ .../substitution_format_string_test.cc | 4 +- ...s_log_formatter_fuzz_test-4673648219652096 | 0 ...s_log_formatter_fuzz_test-5630958620901376 | 0 ...s_log_formatter_fuzz_test-5633770020929536 | 0 ...s_log_formatter_fuzz_test-5645869313687552 | 0 ...s_log_formatter_fuzz_test-5701824317751296 | 0 ...s_log_formatter_fuzz_test-5758486359572480 | 0 ...der_parser_fuzz_test-5633924724424704.fuzz | 0 .../dynamic_metadata | 0 .../substitution_formatter_corpus}/empty | 0 .../substitution_formatter_corpus}/headers | 0 .../substitution_formatter_corpus}/invalid_0 | 0 .../substitution_formatter_corpus}/invalid_1 | 0 .../substitution_formatter_corpus}/invalid_10 | 0 .../substitution_formatter_corpus}/invalid_11 | 0 .../substitution_formatter_corpus}/invalid_12 | 0 .../substitution_formatter_corpus}/invalid_13 | 0 .../substitution_formatter_corpus}/invalid_14 | 0 .../substitution_formatter_corpus}/invalid_15 | 0 .../substitution_formatter_corpus}/invalid_16 | 0 .../substitution_formatter_corpus}/invalid_17 | 0 .../substitution_formatter_corpus}/invalid_18 | 0 .../substitution_formatter_corpus}/invalid_19 | 0 .../substitution_formatter_corpus}/invalid_2 | 0 .../substitution_formatter_corpus}/invalid_3 | 0 .../substitution_formatter_corpus}/invalid_4 | 0 .../substitution_formatter_corpus}/invalid_5 | 0 .../substitution_formatter_corpus}/invalid_6 | 0 .../substitution_formatter_corpus}/invalid_7 | 0 .../substitution_formatter_corpus}/invalid_8 | 0 .../substitution_formatter_corpus}/invalid_9 | 0 .../plain_string | 0 .../response_code | 0 .../start_time_0 | 0 .../start_time_1 | 0 .../start_time_2 | 0 .../start_time_3 | 0 .../upstream_local_address | 0 .../substitution_formatter_fuzz.proto} | 4 +- .../substitution_formatter_fuzz_test.cc} | 10 +-- .../substitution_formatter_fuzz_test.dict} | 0 .../substitution_formatter_speed_test.cc} | 14 +-- .../substitution_formatter_test.cc} | 68 +++++++-------- test/common/http/BUILD | 2 +- test/common/http/conn_manager_impl_test.cc | 6 +- tools/code_format/check_format.py | 2 +- 75 files changed, 359 insertions(+), 305 deletions(-) create mode 100644 include/envoy/formatter/BUILD create mode 100644 include/envoy/formatter/substitution_formatter.h create mode 100644 source/common/formatter/BUILD rename source/common/{common => formatter}/substitution_format_string.cc (75%) rename source/common/{common => formatter}/substitution_format_string.h (68%) rename source/common/{access_log/access_log_formatter.cc => formatter/substitution_formatter.cc} (96%) rename source/common/{access_log/access_log_formatter.h => formatter/substitution_formatter.h} (97%) create mode 100644 test/common/formatter/BUILD rename test/common/{common => formatter}/substitution_format_string_test.cc (96%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/dynamic_metadata (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/empty (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/headers (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_0 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_1 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_10 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_11 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_12 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_13 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_14 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_15 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_16 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_17 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_18 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_19 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_2 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_3 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_4 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_5 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_6 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_7 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_8 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/invalid_9 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/plain_string (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/response_code (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/start_time_0 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/start_time_1 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/start_time_2 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/start_time_3 (100%) rename test/common/{access_log/access_log_formatter_corpus => formatter/substitution_formatter_corpus}/upstream_local_address (100%) rename test/common/{access_log/access_log_formatter_fuzz.proto => formatter/substitution_formatter_fuzz.proto} (84%) rename test/common/{access_log/access_log_formatter_fuzz_test.cc => formatter/substitution_formatter_fuzz_test.cc} (74%) rename test/common/{access_log/access_log_formatter_fuzz_test.dict => formatter/substitution_formatter_fuzz_test.dict} (100%) rename test/common/{access_log/access_log_formatter_speed_test.cc => formatter/substitution_formatter_speed_test.cc} (88%) rename test/common/{access_log/access_log_formatter_test.cc => formatter/substitution_formatter_test.cc} (98%) diff --git a/include/envoy/access_log/access_log.h b/include/envoy/access_log/access_log.h index 15486d050a91..eb84ff64cebc 100644 --- a/include/envoy/access_log/access_log.h +++ b/include/envoy/access_log/access_log.h @@ -95,72 +95,5 @@ class Instance { using InstanceSharedPtr = std::shared_ptr; -/** - * Interface for access log formatter. - * Formatters provide a complete access log output line for the given headers/trailers/stream. - */ -class Formatter { -public: - virtual ~Formatter() = default; - - /** - * Return a formatted access log line. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @param local_reply_body supplies the local reply body. - * @return std::string string containing the complete formatted access log line. - */ - virtual std::string format(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info, - absl::string_view local_reply_body) const PURE; -}; - -using FormatterPtr = std::unique_ptr; - -/** - * Interface for access log provider. - * FormatterProviders extract information from the given headers/trailers/stream. - */ -class FormatterProvider { -public: - virtual ~FormatterProvider() = default; - - /** - * Extract a value from the provided headers/trailers/stream. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @param local_reply_body supplies the local reply body. - * @return std::string containing a single value extracted from the given headers/trailers/stream. - */ - virtual std::string format(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info, - absl::string_view local_reply_body) const PURE; - /** - * Extract a value from the provided headers/trailers/stream, preserving the value's type. - * @param request_headers supplies the request headers. - * @param response_headers supplies the response headers. - * @param response_trailers supplies the response trailers. - * @param stream_info supplies the stream info. - * @param local_reply_body supplies the local reply body. - * @return ProtobufWkt::Value containing a single value extracted from the given - * headers/trailers/stream. - */ - virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers, - const Http::ResponseHeaderMap& response_headers, - const Http::ResponseTrailerMap& response_trailers, - const StreamInfo::StreamInfo& stream_info, - absl::string_view local_reply_body) const PURE; -}; - -using FormatterProviderPtr = std::unique_ptr; - } // namespace AccessLog } // namespace Envoy diff --git a/include/envoy/formatter/BUILD b/include/envoy/formatter/BUILD new file mode 100644 index 000000000000..a39fd0a2d63f --- /dev/null +++ b/include/envoy/formatter/BUILD @@ -0,0 +1,19 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "substitution_formatter_interface", + hdrs = ["substitution_formatter.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/stream_info:stream_info_interface", + ], +) diff --git a/include/envoy/formatter/substitution_formatter.h b/include/envoy/formatter/substitution_formatter.h new file mode 100644 index 000000000000..ec17e692f73c --- /dev/null +++ b/include/envoy/formatter/substitution_formatter.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/http/header_map.h" +#include "envoy/stream_info/stream_info.h" + +namespace Envoy { +namespace Formatter { + +/** + * Interface for substitution formatter. + * Formatters provide a complete substitution output line for the given headers/trailers/stream. + */ +class Formatter { +public: + virtual ~Formatter() = default; + + /** + * Return a formatted substitution line. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return std::string string containing the complete formatted substitution line. + */ + virtual std::string format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; +}; + +using FormatterPtr = std::unique_ptr; + +/** + * Interface for substitution provider. + * FormatterProviders extract information from the given headers/trailers/stream. + */ +class FormatterProvider { +public: + virtual ~FormatterProvider() = default; + + /** + * Extract a value from the provided headers/trailers/stream. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return std::string containing a single value extracted from the given headers/trailers/stream. + */ + virtual std::string format(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; + /** + * Extract a value from the provided headers/trailers/stream, preserving the value's type. + * @param request_headers supplies the request headers. + * @param response_headers supplies the response headers. + * @param response_trailers supplies the response trailers. + * @param stream_info supplies the stream info. + * @param local_reply_body supplies the local reply body. + * @return ProtobufWkt::Value containing a single value extracted from the given + * headers/trailers/stream. + */ + virtual ProtobufWkt::Value formatValue(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers, + const StreamInfo::StreamInfo& stream_info, + absl::string_view local_reply_body) const PURE; +}; + +using FormatterProviderPtr = std::unique_ptr; + +} // namespace Formatter +} // namespace Envoy diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index 9dd85e03aa61..faf3c2420a00 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -36,25 +36,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "access_log_formatter_lib", - srcs = ["access_log_formatter.cc"], - hdrs = ["access_log_formatter.h"], - external_deps = ["abseil_str_format"], - deps = [ - "//include/envoy/access_log:access_log_interface", - "//include/envoy/stream_info:stream_info_interface", - "//source/common/common:assert_lib", - "//source/common/common:utility_lib", - "//source/common/config:metadata_lib", - "//source/common/grpc:common_lib", - "//source/common/http:utility_lib", - "//source/common/protobuf:message_validator_lib", - "//source/common/stream_info:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "access_log_manager_lib", srcs = ["access_log_manager_impl.cc"], diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index 42f5ee2d84bc..9aec83a15fc5 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -11,7 +11,6 @@ #include "envoy/runtime/runtime.h" #include "envoy/upstream/upstream.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/assert.h" #include "common/common/utility.h" #include "common/config/utility.h" diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 5670b3c73629..248471b5ea90 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -67,18 +67,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "substitution_format_string_lib", - srcs = ["substitution_format_string.cc"], - hdrs = ["substitution_format_string.h"], - deps = [ - "//include/envoy/access_log:access_log_interface", - "//source/common/access_log:access_log_formatter_lib", - "//source/common/protobuf", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "compiler_requirements_lib", hdrs = ["compiler_requirements.h"], diff --git a/source/common/formatter/BUILD b/source/common/formatter/BUILD new file mode 100644 index 000000000000..0c0df68ecd7a --- /dev/null +++ b/source/common/formatter/BUILD @@ -0,0 +1,39 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "substitution_formatter_lib", + srcs = ["substitution_formatter.cc"], + hdrs = ["substitution_formatter.h"], + external_deps = ["abseil_str_format"], + deps = [ + "//include/envoy/formatter:substitution_formatter_interface", + "//include/envoy/stream_info:stream_info_interface", + "//source/common/common:assert_lib", + "//source/common/common:utility_lib", + "//source/common/config:metadata_lib", + "//source/common/grpc:common_lib", + "//source/common/http:utility_lib", + "//source/common/protobuf:message_validator_lib", + "//source/common/stream_info:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "substitution_format_string_lib", + srcs = ["substitution_format_string.cc"], + hdrs = ["substitution_format_string.h"], + deps = [ + ":substitution_formatter_lib", + "//source/common/protobuf", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/common/common/substitution_format_string.cc b/source/common/formatter/substitution_format_string.cc similarity index 75% rename from source/common/common/substitution_format_string.cc rename to source/common/formatter/substitution_format_string.cc index 19001acc18f8..ec9e2db96865 100644 --- a/source/common/common/substitution_format_string.cc +++ b/source/common/formatter/substitution_format_string.cc @@ -1,8 +1,9 @@ -#include "common/common/substitution_format_string.h" +#include "common/formatter/substitution_format_string.h" -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" namespace Envoy { +namespace Formatter { namespace { absl::flat_hash_map @@ -19,18 +20,18 @@ convertJsonFormatToMap(const ProtobufWkt::Struct& json_format) { } // namespace -AccessLog::FormatterPtr +FormatterPtr SubstitutionFormatStringUtils::createJsonFormatter(const ProtobufWkt::Struct& struct_format, bool preserve_types) { auto json_format_map = convertJsonFormatToMap(struct_format); - return std::make_unique(json_format_map, preserve_types); + return std::make_unique(json_format_map, preserve_types); } -AccessLog::FormatterPtr SubstitutionFormatStringUtils::fromProtoConfig( +FormatterPtr SubstitutionFormatStringUtils::fromProtoConfig( const envoy::config::core::v3::SubstitutionFormatString& config) { switch (config.format_case()) { case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kTextFormat: - return std::make_unique(config.text_format()); + return std::make_unique(config.text_format()); case envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat: { return createJsonFormatter(config.json_format(), true); } @@ -40,4 +41,5 @@ AccessLog::FormatterPtr SubstitutionFormatStringUtils::fromProtoConfig( return nullptr; } +} // namespace Formatter } // namespace Envoy diff --git a/source/common/common/substitution_format_string.h b/source/common/formatter/substitution_format_string.h similarity index 68% rename from source/common/common/substitution_format_string.h rename to source/common/formatter/substitution_format_string.h index 77b0b3f1f091..97d3bd1e8a17 100644 --- a/source/common/common/substitution_format_string.h +++ b/source/common/formatter/substitution_format_string.h @@ -3,12 +3,13 @@ #include #include -#include "envoy/access_log/access_log.h" #include "envoy/config/core/v3/substitution_format_string.pb.h" +#include "envoy/formatter/substitution_formatter.h" #include "common/protobuf/protobuf.h" namespace Envoy { +namespace Formatter { /** * Utilities for using envoy::config::core::v3::SubstitutionFormatString @@ -18,14 +19,15 @@ class SubstitutionFormatStringUtils { /** * Generate a formatter object from config SubstitutionFormatString. */ - static AccessLog::FormatterPtr + static FormatterPtr fromProtoConfig(const envoy::config::core::v3::SubstitutionFormatString& config); /** * Generate a Json formatter object from proto::Struct config */ - static AccessLog::FormatterPtr createJsonFormatter(const ProtobufWkt::Struct& struct_format, - bool preserve_types); + static FormatterPtr createJsonFormatter(const ProtobufWkt::Struct& struct_format, + bool preserve_types); }; +} // namespace Formatter } // namespace Envoy diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/formatter/substitution_formatter.cc similarity index 96% rename from source/common/access_log/access_log_formatter.cc rename to source/common/formatter/substitution_formatter.cc index 8fce54b47dbf..4400ca3f6510 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -1,4 +1,4 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" #include #include @@ -27,7 +27,7 @@ using Envoy::Config::Metadata; namespace Envoy { -namespace AccessLog { +namespace Formatter { static const std::string UnspecifiedValueString = "-"; @@ -51,26 +51,26 @@ const std::regex& getNewlinePattern() { CONSTRUCT_ON_FIRST_USE(std::regex, "\n") } // namespace -const std::string AccessLogFormatUtils::DEFAULT_FORMAT = +const std::string SubstitutionFormatUtils::DEFAULT_FORMAT = "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" " "%RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% " "%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% " "\"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" " "\"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"; -FormatterPtr AccessLogFormatUtils::defaultAccessLogFormatter() { +FormatterPtr SubstitutionFormatUtils::defaultSubstitutionFormatter() { return FormatterPtr{new FormatterImpl(DEFAULT_FORMAT)}; } const std::string& -AccessLogFormatUtils::protocolToString(const absl::optional& protocol) { +SubstitutionFormatUtils::protocolToString(const absl::optional& protocol) { if (protocol) { return Http::Utility::getProtocolString(protocol.value()); } return UnspecifiedValueString; } -const std::string AccessLogFormatUtils::getHostname() { +const std::string SubstitutionFormatUtils::getHostname() { #ifdef HOST_NAME_MAX const size_t len = HOST_NAME_MAX; #else @@ -90,7 +90,7 @@ const std::string AccessLogFormatUtils::getHostname() { } FormatterImpl::FormatterImpl(const std::string& format) { - providers_ = AccessLogFormatParser::parse(format); + providers_ = SubstitutionFormatParser::parse(format); } std::string FormatterImpl::format(const Http::RequestHeaderMap& request_headers, @@ -113,7 +113,7 @@ JsonFormatterImpl::JsonFormatterImpl( const absl::flat_hash_map& format_mapping, bool preserve_types) : preserve_types_(preserve_types) { for (const auto& pair : format_mapping) { - json_output_format_.emplace(pair.first, AccessLogFormatParser::parse(pair.second)); + json_output_format_.emplace(pair.first, SubstitutionFormatParser::parse(pair.second)); } } @@ -162,10 +162,10 @@ ProtobufWkt::Struct JsonFormatterImpl::toStruct(const Http::RequestHeaderMap& re return output; } -void AccessLogFormatParser::parseCommandHeader(const std::string& token, const size_t start, - std::string& main_header, - std::string& alternative_header, - absl::optional& max_length) { +void SubstitutionFormatParser::parseCommandHeader(const std::string& token, const size_t start, + std::string& main_header, + std::string& alternative_header, + absl::optional& max_length) { std::vector subs; parseCommand(token, start, "?", main_header, subs, max_length); if (subs.size() > 1) { @@ -186,10 +186,10 @@ void AccessLogFormatParser::parseCommandHeader(const std::string& token, const s } } -void AccessLogFormatParser::parseCommand(const std::string& token, const size_t start, - const std::string& separator, std::string& main, - std::vector& sub_items, - absl::optional& max_length) { +void SubstitutionFormatParser::parseCommand(const std::string& token, const size_t start, + const std::string& separator, std::string& main, + std::vector& sub_items, + absl::optional& max_length) { // TODO(dnoe): Convert this to use string_view throughout. const size_t end_request = token.find(')', start); sub_items.clear(); @@ -230,8 +230,8 @@ void AccessLogFormatParser::parseCommand(const std::string& token, const size_t } } -// TODO(derekargueta): #2967 - Rewrite AccessLogFormatter with parser library & formal grammar -std::vector AccessLogFormatParser::parse(const std::string& format) { +// TODO(derekargueta): #2967 - Rewrite SubstitutionFormatter with parser library & formal grammar +std::vector SubstitutionFormatParser::parse(const std::string& format) { std::string current_token; std::vector formatters; static constexpr absl::string_view DYNAMIC_META_TOKEN{"DYNAMIC_METADATA("}; @@ -581,7 +581,7 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { } else if (field_name == "PROTOCOL") { field_extractor_ = std::make_unique( [](const StreamInfo::StreamInfo& stream_info) { - return AccessLogFormatUtils::protocolToString(stream_info.protocol()); + return SubstitutionFormatUtils::protocolToString(stream_info.protocol()); }); } else if (field_name == "RESPONSE_CODE") { field_extractor_ = std::make_unique( @@ -759,7 +759,7 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { return result; }); } else if (field_name == "HOSTNAME") { - std::string hostname = AccessLogFormatUtils::getHostname(); + std::string hostname = SubstitutionFormatUtils::getHostname(); field_extractor_ = std::make_unique( [hostname](const StreamInfo::StreamInfo&) { return hostname; }); } else { @@ -1115,5 +1115,5 @@ ProtobufWkt::Value StartTimeFormatter::formatValue( format(request_headers, response_headers, response_trailers, stream_info, local_reply_body)); } -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/source/common/access_log/access_log_formatter.h b/source/common/formatter/substitution_formatter.h similarity index 97% rename from source/common/access_log/access_log_formatter.h rename to source/common/formatter/substitution_formatter.h index fb525cd29925..00b4be31ac0d 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/formatter/substitution_formatter.h @@ -5,9 +5,9 @@ #include #include -#include "envoy/access_log/access_log.h" #include "envoy/common/time.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/formatter/substitution_formatter.h" #include "envoy/stream_info/stream_info.h" #include "common/common/utility.h" @@ -16,14 +16,12 @@ #include "absl/types/optional.h" namespace Envoy { -namespace AccessLog { - -// TODO(qiwzhang): move this to source/common/common to be shared +namespace Formatter { /** * Access log format parser. */ -class AccessLogFormatParser { +class SubstitutionFormatParser { public: static std::vector parse(const std::string& format); @@ -73,14 +71,14 @@ class AccessLogFormatParser { /** * Util class for access log format. */ -class AccessLogFormatUtils { +class SubstitutionFormatUtils { public: - static FormatterPtr defaultAccessLogFormatter(); + static FormatterPtr defaultSubstitutionFormatter(); static const std::string& protocolToString(const absl::optional& protocol); static const std::string getHostname(); private: - AccessLogFormatUtils(); + SubstitutionFormatUtils(); static const std::string DEFAULT_FORMAT; }; @@ -357,5 +355,5 @@ class StartTimeFormatter : public FormatterProvider { const Envoy::DateFormatter date_formatter_; }; -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 8bacf5d1395a..7b7b7e536adf 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -196,7 +196,6 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan_interface", "//include/envoy/upstream:upstream_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:dump_state_utils", diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 68846e8a0040..355949e4900d 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -6,7 +6,6 @@ #include "envoy/type/v3/percent.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/empty_string.h" #include "common/common/utility.h" #include "common/http/header_utility.h" diff --git a/source/common/local_reply/BUILD b/source/common/local_reply/BUILD index 750a3e59fa5a..87a2a93dd9d1 100644 --- a/source/common/local_reply/BUILD +++ b/source/common/local_reply/BUILD @@ -17,11 +17,11 @@ envoy_cc_library( "//include/envoy/http:header_map_interface", "//include/envoy/server:filter_config_interface", "//include/envoy/stream_info:stream_info_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", "//source/common/common:enum_to_int", - "//source/common/common:substitution_format_string_lib", "//source/common/config:datasource_lib", + "//source/common/formatter:substitution_format_string_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/http:header_map_lib", "//source/common/stream_info:stream_info_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc index 31b7ce468992..2c18db1ff4b5 100644 --- a/source/common/local_reply/local_reply.cc +++ b/source/common/local_reply/local_reply.cc @@ -3,11 +3,11 @@ #include #include -#include "common/access_log/access_log_formatter.h" #include "common/access_log/access_log_impl.h" #include "common/common/enum_to_int.h" -#include "common/common/substitution_format_string.h" #include "common/config/datasource.h" +#include "common/formatter/substitution_format_string.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" namespace Envoy { @@ -26,11 +26,11 @@ using StaticEmptyHeaders = ConstSingleton; class BodyFormatter { public: BodyFormatter() - : formatter_(std::make_unique("%LOCAL_REPLY_BODY%")), + : formatter_(std::make_unique("%LOCAL_REPLY_BODY%")), content_type_(Http::Headers::get().ContentTypeValues.Text) {} BodyFormatter(const envoy::config::core::v3::SubstitutionFormatString& config) - : formatter_(SubstitutionFormatStringUtils::fromProtoConfig(config)), + : formatter_(Formatter::SubstitutionFormatStringUtils::fromProtoConfig(config)), content_type_( config.format_case() == envoy::config::core::v3::SubstitutionFormatString::FormatCase::kJsonFormat @@ -48,7 +48,7 @@ class BodyFormatter { } private: - const AccessLog::FormatterPtr formatter_; + const Formatter::FormatterPtr formatter_; const absl::string_view content_type_; }; diff --git a/source/common/router/BUILD b/source/common/router/BUILD index b194d6e239a6..d69cad792900 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -365,10 +365,10 @@ envoy_cc_library( "//include/envoy/router:string_accessor_interface", "//include/envoy/stream_info:filter_state_interface", "//include/envoy/stream_info:stream_info_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/config:metadata_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/http:header_map_lib", "//source/common/json:json_loader_lib", ], diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index e846240aa1f3..5793dcfec99e 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -4,11 +4,11 @@ #include "envoy/router/string_accessor.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/fmt.h" #include "common/common/logger.h" #include "common/common/utility.h" #include "common/config/metadata.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" #include "common/stream_info/utility.h" @@ -222,7 +222,7 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam : append_(append) { if (field_name == "PROTOCOL") { field_extractor_ = [](const Envoy::StreamInfo::StreamInfo& stream_info) { - return Envoy::AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol()); + return Envoy::Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol()); }; } else if (field_name == "DOWNSTREAM_REMOTE_ADDRESS") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { @@ -319,7 +319,7 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam const std::string pattern = fmt::format("%{}%", field_name); if (start_time_formatters_.find(pattern) == start_time_formatters_.end()) { start_time_formatters_.emplace( - std::make_pair(pattern, AccessLog::AccessLogFormatParser::parse(pattern))); + std::make_pair(pattern, Formatter::SubstitutionFormatParser::parse(pattern))); } field_extractor_ = [this, pattern](const Envoy::StreamInfo::StreamInfo& stream_info) { const auto& formatters = start_time_formatters_.at(pattern); @@ -343,7 +343,7 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam } else if (absl::StartsWith(field_name, "REQ")) { field_extractor_ = parseRequestHeader(field_name.substr(STATIC_STRLEN("REQ"))); } else if (field_name == "HOSTNAME") { - std::string hostname = Envoy::AccessLog::AccessLogFormatUtils::getHostname(); + std::string hostname = Envoy::Formatter::SubstitutionFormatUtils::getHostname(); field_extractor_ = [hostname](const StreamInfo::StreamInfo&) { return hostname; }; } else if (field_name == "RESPONSE_FLAGS") { field_extractor_ = [](const StreamInfo::StreamInfo& stream_info) { diff --git a/source/common/router/header_formatter.h b/source/common/router/header_formatter.h index eb9f8766f548..55d1206a1112 100644 --- a/source/common/router/header_formatter.h +++ b/source/common/router/header_formatter.h @@ -4,7 +4,7 @@ #include #include -#include "envoy/access_log/access_log.h" +#include "envoy/formatter/substitution_formatter.h" #include "absl/strings/string_view.h" @@ -45,7 +45,7 @@ class StreamInfoHeaderFormatter : public HeaderFormatter { private: FieldExtractor field_extractor_; const bool append_; - std::unordered_map> + std::unordered_map> start_time_formatters_; }; diff --git a/source/common/tracing/BUILD b/source/common/tracing/BUILD index d516876313b5..a99899fc312e 100644 --- a/source/common/tracing/BUILD +++ b/source/common/tracing/BUILD @@ -23,12 +23,12 @@ envoy_cc_library( "//include/envoy/thread_local:thread_local_interface", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:cluster_manager_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:base64_lib", "//source/common/common:macros", "//source/common/common:utility_lib", "//source/common/config:metadata_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/grpc:common_lib", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 2548b7ed6581..d6010aa083be 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -7,11 +7,11 @@ #include "envoy/type/metadata/v3/metadata.pb.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/formatter/substitution_formatter.h" #include "common/grpc/common.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" @@ -170,7 +170,7 @@ void HttpTracerUtility::finalizeDownstreamSpan(Span& span, valueOrDefault(request_headers->EnvoyDownstreamServiceCluster(), "-")); span.setTag(Tracing::Tags::get().UserAgent, valueOrDefault(request_headers->UserAgent(), "-")); span.setTag(Tracing::Tags::get().HttpProtocol, - AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol())); + Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol())); const auto& remote_address = stream_info.downstreamDirectRemoteAddress(); @@ -212,7 +212,7 @@ void HttpTracerUtility::finalizeUpstreamSpan(Span& span, const StreamInfo::StreamInfo& stream_info, const Config& tracing_config) { span.setTag(Tracing::Tags::get().HttpProtocol, - AccessLog::AccessLogFormatUtils::protocolToString(stream_info.protocol())); + Formatter::SubstitutionFormatUtils::protocolToString(stream_info.protocol())); if (stream_info.upstreamHost()) { span.setTag(Tracing::Tags::get().UpstreamAddress, diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 5d19c5f8e4db..015920030af9 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -29,7 +29,7 @@ envoy_cc_extension( deps = [ ":file_access_log_lib", "//include/envoy/registry", - "//source/common/common:substitution_format_string_lib", + "//source/common/formatter:substitution_format_string_lib", "//source/common/protobuf", "//source/extensions/access_loggers:well_known_names", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc index dd430c0c298e..2b5e6877cf2d 100644 --- a/source/extensions/access_loggers/file/config.cc +++ b/source/extensions/access_loggers/file/config.cc @@ -8,9 +8,9 @@ #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/logger.h" -#include "common/common/substitution_format_string.h" +#include "common/formatter/substitution_format_string.h" +#include "common/formatter/substitution_formatter.h" #include "common/protobuf/protobuf.h" #include "extensions/access_loggers/file/file_access_log_impl.h" @@ -28,12 +28,13 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, const auto& fal_config = MessageUtil::downcastAndValidate< const envoy::extensions::access_loggers::file::v3::FileAccessLog&>( config, context.messageValidationVisitor()); - AccessLog::FormatterPtr formatter; + Formatter::FormatterPtr formatter; if (fal_config.has_log_format()) { - formatter = SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); } else if (fal_config.has_json_format()) { - formatter = SubstitutionFormatStringUtils::createJsonFormatter(fal_config.json_format(), false); + formatter = Formatter::SubstitutionFormatStringUtils::createJsonFormatter( + fal_config.json_format(), false); } else if (fal_config.access_log_format_case() != envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: ACCESS_LOG_FORMAT_NOT_SET) { @@ -49,10 +50,10 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, default: NOT_REACHED_GCOVR_EXCL_LINE; } - formatter = SubstitutionFormatStringUtils::fromProtoConfig(sff_config); + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config); } if (!formatter) { - formatter = AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(); + formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(); } return std::make_shared(fal_config.path(), std::move(filter), std::move(formatter), diff --git a/source/extensions/access_loggers/file/file_access_log_impl.cc b/source/extensions/access_loggers/file/file_access_log_impl.cc index 323160da2a17..4d571251b859 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.cc +++ b/source/extensions/access_loggers/file/file_access_log_impl.cc @@ -6,7 +6,7 @@ namespace AccessLoggers { namespace File { FileAccessLog::FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter, - AccessLog::FormatterPtr&& formatter, + Formatter::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager) : ImplBase(std::move(filter)), formatter_(std::move(formatter)) { log_file_ = log_manager.createAccessLog(access_log_path); diff --git a/source/extensions/access_loggers/file/file_access_log_impl.h b/source/extensions/access_loggers/file/file_access_log_impl.h index ce278ed776af..3cd195c44d1a 100644 --- a/source/extensions/access_loggers/file/file_access_log_impl.h +++ b/source/extensions/access_loggers/file/file_access_log_impl.h @@ -1,5 +1,7 @@ #pragma once +#include "common/formatter/substitution_formatter.h" + #include "extensions/access_loggers/common/access_log_base.h" namespace Envoy { @@ -13,7 +15,7 @@ namespace File { class FileAccessLog : public Common::ImplBase { public: FileAccessLog(const std::string& access_log_path, AccessLog::FilterPtr&& filter, - AccessLog::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager); + Formatter::FormatterPtr&& formatter, AccessLog::AccessLogManager& log_manager); private: // Common::ImplBase @@ -23,7 +25,7 @@ class FileAccessLog : public Common::ImplBase { const StreamInfo::StreamInfo& stream_info) override; AccessLog::AccessLogFileSharedPtr log_file_; - AccessLog::FormatterPtr formatter_; + Formatter::FormatterPtr formatter_; }; } // namespace File diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 84b331a927f1..491bdefcd940 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -36,7 +36,6 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", @@ -46,6 +45,7 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/html:utility_lib", "//source/common/http:codes_lib", "//source/common/http:conn_manager_lib", diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 71aca6a62a51..5be7a9665359 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -21,7 +21,6 @@ #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" -#include "common/access_log/access_log_formatter.h" #include "common/access_log/access_log_impl.h" #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" @@ -29,6 +28,7 @@ #include "common/common/fmt.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" +#include "common/formatter/substitution_formatter.h" #include "common/html/utility.h" #include "common/http/codes.h" #include "common/http/conn_manager_utility.h" @@ -544,7 +544,7 @@ void AdminImpl::startHttpListener(const std::string& access_log_path, // TODO(mattklein123): Allow admin to use normal access logger extension loading and avoid the // hard dependency here. access_logs_.emplace_back(new Extensions::AccessLoggers::File::FileAccessLog( - access_log_path, {}, AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(), + access_log_path, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), server_.accessLogManager())); socket_ = std::make_shared(address, socket_options, true); socket_factory_ = std::make_shared(socket_); diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 836975e9ae34..3a71099e7276 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -2,56 +2,12 @@ licenses(["notice"]) # Apache 2 load( "//bazel:envoy_build_system.bzl", - "envoy_benchmark_test", - "envoy_cc_benchmark_binary", - "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", - "envoy_proto_library", ) envoy_package() -envoy_proto_library( - name = "access_log_formatter_fuzz_proto", - srcs = ["access_log_formatter_fuzz.proto"], - deps = ["//test/fuzz:common_proto"], -) - -envoy_cc_fuzz_test( - name = "access_log_formatter_fuzz_test", - srcs = ["access_log_formatter_fuzz_test.cc"], - corpus = "access_log_formatter_corpus", - dictionaries = [ - "access_log_formatter_fuzz_test.dict", - "//test/fuzz:headers.dict", - ], - deps = [ - ":access_log_formatter_fuzz_proto_cc_proto", - "//source/common/access_log:access_log_formatter_lib", - "//test/fuzz:utility_lib", - ], -) - -envoy_cc_test( - name = "access_log_formatter_test", - srcs = ["access_log_formatter_test.cc"], - deps = [ - "//source/common/access_log:access_log_formatter_lib", - "//source/common/common:utility_lib", - "//source/common/http:header_map_lib", - "//source/common/router:string_accessor_lib", - "//test/mocks/api:api_mocks", - "//test/mocks/http:http_mocks", - "//test/mocks/ssl:ssl_mocks", - "//test/mocks/stream_info:stream_info_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:threadsafe_singleton_injector_lib", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "access_log_impl_test", srcs = ["access_log_impl_test.cc"], @@ -87,25 +43,3 @@ envoy_cc_test( "//test/mocks/filesystem:filesystem_mocks", ], ) - -envoy_cc_benchmark_binary( - name = "access_log_formatter_speed_test", - srcs = ["access_log_formatter_speed_test.cc"], - external_deps = [ - "benchmark", - ], - deps = [ - "//source/common/access_log:access_log_formatter_lib", - "//source/common/http:header_map_lib", - "//source/common/network:address_lib", - "//test/common/stream_info:test_util", - "//test/mocks/http:http_mocks", - "//test/mocks/stream_info:stream_info_mocks", - "//test/test_common:printers_lib", - ], -) - -envoy_benchmark_test( - name = "access_log_formatter_speed_test_benchmark_test", - benchmark_binary = "access_log_formatter_speed_test", -) diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 32ae464b0fe9..fefbe4c3183c 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -38,18 +38,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "substitution_format_string_test", - srcs = ["substitution_format_string_test.cc"], - deps = [ - "//source/common/common:substitution_format_string_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/stream_info:stream_info_mocks", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_fuzz_test( name = "base64_fuzz_test", srcs = ["base64_fuzz_test.cc"], diff --git a/test/common/formatter/BUILD b/test/common/formatter/BUILD new file mode 100644 index 000000000000..77b26bc4a05f --- /dev/null +++ b/test/common/formatter/BUILD @@ -0,0 +1,87 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", + "envoy_cc_fuzz_test", + "envoy_cc_test", + "envoy_package", + "envoy_proto_library", +) + +envoy_package() + +envoy_proto_library( + name = "substitution_formatter_fuzz_proto", + srcs = ["substitution_formatter_fuzz.proto"], + deps = ["//test/fuzz:common_proto"], +) + +envoy_cc_fuzz_test( + name = "substitution_formatter_fuzz_test", + srcs = ["substitution_formatter_fuzz_test.cc"], + corpus = "substitution_formatter_corpus", + dictionaries = [ + "substitution_formatter_fuzz_test.dict", + "//test/fuzz:headers.dict", + ], + deps = [ + ":substitution_formatter_fuzz_proto_cc_proto", + "//source/common/formatter:substitution_formatter_lib", + "//test/fuzz:utility_lib", + ], +) + +envoy_cc_test( + name = "substitution_formatter_test", + srcs = ["substitution_formatter_test.cc"], + deps = [ + "//source/common/common:utility_lib", + "//source/common/formatter:substitution_formatter_lib", + "//source/common/http:header_map_lib", + "//source/common/router:string_accessor_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "substitution_format_string_test", + srcs = ["substitution_format_string_test.cc"], + deps = [ + "//source/common/formatter:substitution_format_string_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_benchmark_binary( + name = "substitution_formatter_speed_test", + srcs = ["substitution_formatter_speed_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//source/common/formatter:substitution_formatter_lib", + "//source/common/http:header_map_lib", + "//source/common/network:address_lib", + "//test/common/stream_info:test_util", + "//test/mocks/http:http_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:printers_lib", + ], +) + +envoy_benchmark_test( + name = "substitution_formatter_speed_test_benchmark_test", + benchmark_binary = "substitution_formatter_speed_test", +) diff --git a/test/common/common/substitution_format_string_test.cc b/test/common/formatter/substitution_format_string_test.cc similarity index 96% rename from test/common/common/substitution_format_string_test.cc rename to test/common/formatter/substitution_format_string_test.cc index 01ba892bdbb4..22e4a030d430 100644 --- a/test/common/common/substitution_format_string_test.cc +++ b/test/common/formatter/substitution_format_string_test.cc @@ -1,6 +1,6 @@ #include "envoy/config/core/v3/substitution_format_string.pb.validate.h" -#include "common/common/substitution_format_string.h" +#include "common/formatter/substitution_format_string.h" #include "test/mocks/http/mocks.h" #include "test/mocks/stream_info/mocks.h" @@ -12,6 +12,7 @@ using testing::Return; namespace Envoy { +namespace Formatter { class SubstitutionFormatStringUtilsTest : public ::testing::Test { public: @@ -92,4 +93,5 @@ TEST_F(SubstitutionFormatStringUtilsTest, TestInvalidConfigs) { } } +} // namespace Formatter } // namespace Envoy diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 diff --git a/test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz b/test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz rename to test/common/formatter/substitution_formatter_corpus/clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz diff --git a/test/common/access_log/access_log_formatter_corpus/dynamic_metadata b/test/common/formatter/substitution_formatter_corpus/dynamic_metadata similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/dynamic_metadata rename to test/common/formatter/substitution_formatter_corpus/dynamic_metadata diff --git a/test/common/access_log/access_log_formatter_corpus/empty b/test/common/formatter/substitution_formatter_corpus/empty similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/empty rename to test/common/formatter/substitution_formatter_corpus/empty diff --git a/test/common/access_log/access_log_formatter_corpus/headers b/test/common/formatter/substitution_formatter_corpus/headers similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/headers rename to test/common/formatter/substitution_formatter_corpus/headers diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_0 b/test/common/formatter/substitution_formatter_corpus/invalid_0 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_0 rename to test/common/formatter/substitution_formatter_corpus/invalid_0 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_1 b/test/common/formatter/substitution_formatter_corpus/invalid_1 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_1 rename to test/common/formatter/substitution_formatter_corpus/invalid_1 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_10 b/test/common/formatter/substitution_formatter_corpus/invalid_10 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_10 rename to test/common/formatter/substitution_formatter_corpus/invalid_10 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_11 b/test/common/formatter/substitution_formatter_corpus/invalid_11 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_11 rename to test/common/formatter/substitution_formatter_corpus/invalid_11 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_12 b/test/common/formatter/substitution_formatter_corpus/invalid_12 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_12 rename to test/common/formatter/substitution_formatter_corpus/invalid_12 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_13 b/test/common/formatter/substitution_formatter_corpus/invalid_13 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_13 rename to test/common/formatter/substitution_formatter_corpus/invalid_13 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_14 b/test/common/formatter/substitution_formatter_corpus/invalid_14 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_14 rename to test/common/formatter/substitution_formatter_corpus/invalid_14 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_15 b/test/common/formatter/substitution_formatter_corpus/invalid_15 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_15 rename to test/common/formatter/substitution_formatter_corpus/invalid_15 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_16 b/test/common/formatter/substitution_formatter_corpus/invalid_16 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_16 rename to test/common/formatter/substitution_formatter_corpus/invalid_16 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_17 b/test/common/formatter/substitution_formatter_corpus/invalid_17 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_17 rename to test/common/formatter/substitution_formatter_corpus/invalid_17 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_18 b/test/common/formatter/substitution_formatter_corpus/invalid_18 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_18 rename to test/common/formatter/substitution_formatter_corpus/invalid_18 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_19 b/test/common/formatter/substitution_formatter_corpus/invalid_19 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_19 rename to test/common/formatter/substitution_formatter_corpus/invalid_19 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_2 b/test/common/formatter/substitution_formatter_corpus/invalid_2 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_2 rename to test/common/formatter/substitution_formatter_corpus/invalid_2 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_3 b/test/common/formatter/substitution_formatter_corpus/invalid_3 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_3 rename to test/common/formatter/substitution_formatter_corpus/invalid_3 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_4 b/test/common/formatter/substitution_formatter_corpus/invalid_4 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_4 rename to test/common/formatter/substitution_formatter_corpus/invalid_4 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_5 b/test/common/formatter/substitution_formatter_corpus/invalid_5 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_5 rename to test/common/formatter/substitution_formatter_corpus/invalid_5 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_6 b/test/common/formatter/substitution_formatter_corpus/invalid_6 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_6 rename to test/common/formatter/substitution_formatter_corpus/invalid_6 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_7 b/test/common/formatter/substitution_formatter_corpus/invalid_7 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_7 rename to test/common/formatter/substitution_formatter_corpus/invalid_7 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_8 b/test/common/formatter/substitution_formatter_corpus/invalid_8 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_8 rename to test/common/formatter/substitution_formatter_corpus/invalid_8 diff --git a/test/common/access_log/access_log_formatter_corpus/invalid_9 b/test/common/formatter/substitution_formatter_corpus/invalid_9 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/invalid_9 rename to test/common/formatter/substitution_formatter_corpus/invalid_9 diff --git a/test/common/access_log/access_log_formatter_corpus/plain_string b/test/common/formatter/substitution_formatter_corpus/plain_string similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/plain_string rename to test/common/formatter/substitution_formatter_corpus/plain_string diff --git a/test/common/access_log/access_log_formatter_corpus/response_code b/test/common/formatter/substitution_formatter_corpus/response_code similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/response_code rename to test/common/formatter/substitution_formatter_corpus/response_code diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_0 b/test/common/formatter/substitution_formatter_corpus/start_time_0 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_0 rename to test/common/formatter/substitution_formatter_corpus/start_time_0 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_1 b/test/common/formatter/substitution_formatter_corpus/start_time_1 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_1 rename to test/common/formatter/substitution_formatter_corpus/start_time_1 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_2 b/test/common/formatter/substitution_formatter_corpus/start_time_2 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_2 rename to test/common/formatter/substitution_formatter_corpus/start_time_2 diff --git a/test/common/access_log/access_log_formatter_corpus/start_time_3 b/test/common/formatter/substitution_formatter_corpus/start_time_3 similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/start_time_3 rename to test/common/formatter/substitution_formatter_corpus/start_time_3 diff --git a/test/common/access_log/access_log_formatter_corpus/upstream_local_address b/test/common/formatter/substitution_formatter_corpus/upstream_local_address similarity index 100% rename from test/common/access_log/access_log_formatter_corpus/upstream_local_address rename to test/common/formatter/substitution_formatter_corpus/upstream_local_address diff --git a/test/common/access_log/access_log_formatter_fuzz.proto b/test/common/formatter/substitution_formatter_fuzz.proto similarity index 84% rename from test/common/access_log/access_log_formatter_fuzz.proto rename to test/common/formatter/substitution_formatter_fuzz.proto index 8a58841849e4..6cd0a2f116ea 100644 --- a/test/common/access_log/access_log_formatter_fuzz.proto +++ b/test/common/formatter/substitution_formatter_fuzz.proto @@ -1,12 +1,12 @@ syntax = "proto3"; -package test.common.access_log; +package test.common.substitution; import "test/fuzz/common.proto"; import "validate/validate.proto"; -// Structured input for access_log_formatter_fuzz_test. +// Structured input for substitution_formatter_fuzz_test. message TestCase { // Do not allow invalid header characters in %REQ(...)% and %RESP(...)%. diff --git a/test/common/access_log/access_log_formatter_fuzz_test.cc b/test/common/formatter/substitution_formatter_fuzz_test.cc similarity index 74% rename from test/common/access_log/access_log_formatter_fuzz_test.cc rename to test/common/formatter/substitution_formatter_fuzz_test.cc index 7d96a275c094..38356f182a21 100644 --- a/test/common/access_log/access_log_formatter_fuzz_test.cc +++ b/test/common/formatter/substitution_formatter_fuzz_test.cc @@ -1,6 +1,6 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" -#include "test/common/access_log/access_log_formatter_fuzz.pb.validate.h" +#include "test/common/formatter/substitution_formatter_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" @@ -8,11 +8,11 @@ namespace Envoy { namespace Fuzz { namespace { -DEFINE_PROTO_FUZZER(const test::common::access_log::TestCase& input) { +DEFINE_PROTO_FUZZER(const test::common::substitution::TestCase& input) { try { TestUtility::validate(input); - std::vector formatters = - AccessLog::AccessLogFormatParser::parse(input.format()); + std::vector formatters = + Formatter::SubstitutionFormatParser::parse(input.format()); const auto& request_headers = Fuzz::fromHeaders(input.request_headers()); const auto& response_headers = diff --git a/test/common/access_log/access_log_formatter_fuzz_test.dict b/test/common/formatter/substitution_formatter_fuzz_test.dict similarity index 100% rename from test/common/access_log/access_log_formatter_fuzz_test.dict rename to test/common/formatter/substitution_formatter_fuzz_test.dict diff --git a/test/common/access_log/access_log_formatter_speed_test.cc b/test/common/formatter/substitution_formatter_speed_test.cc similarity index 88% rename from test/common/access_log/access_log_formatter_speed_test.cc rename to test/common/formatter/substitution_formatter_speed_test.cc index 1fd9f360a750..fd2b6c7fe7a9 100644 --- a/test/common/access_log/access_log_formatter_speed_test.cc +++ b/test/common/formatter/substitution_formatter_speed_test.cc @@ -1,4 +1,4 @@ -#include "common/access_log/access_log_formatter.h" +#include "common/formatter/substitution_formatter.h" #include "common/network/address_impl.h" #include "test/common/stream_info/test_util.h" @@ -10,7 +10,7 @@ namespace Envoy { namespace { -std::unique_ptr makeJsonFormatter(bool typed) { +std::unique_ptr makeJsonFormatter(bool typed) { absl::flat_hash_map JsonLogFormat = { {"remote_address", "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%"}, {"start_time", "%START_TIME(%Y/%m/%dT%H:%M:%S%z %s)%"}, @@ -23,7 +23,7 @@ std::unique_ptr makeJsonFormatter(bool type {"referer", "%REQ(REFERER)%"}, {"user-agent", "%REQ(USER-AGENT)%"}}; - return std::make_unique(JsonLogFormat, typed); + return std::make_unique(JsonLogFormat, typed); } std::unique_ptr makeStreamInfo() { @@ -44,8 +44,8 @@ static void BM_AccessLogFormatter(benchmark::State& state) { "%REQ(X-FORWARDED-PROTO)%://%REQ(:AUTHORITY)%%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL% " "s%RESPONSE_CODE% %BYTES_SENT% %DURATION% %REQ(REFERER)% \"%REQ(USER-AGENT)%\" - - -\n"; - std::unique_ptr formatter = - std::make_unique(LogFormat); + std::unique_ptr formatter = + std::make_unique(LogFormat); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; @@ -64,7 +64,7 @@ BENCHMARK(BM_AccessLogFormatter); // NOLINTNEXTLINE(readability-identifier-naming) static void BM_JsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); - std::unique_ptr json_formatter = makeJsonFormatter(false); + std::unique_ptr json_formatter = makeJsonFormatter(false); size_t output_bytes = 0; Http::TestRequestHeaderMapImpl request_headers; @@ -84,7 +84,7 @@ BENCHMARK(BM_JsonAccessLogFormatter); // NOLINTNEXTLINE(readability-identifier-naming) static void BM_TypedJsonAccessLogFormatter(benchmark::State& state) { std::unique_ptr stream_info = makeStreamInfo(); - std::unique_ptr typed_json_formatter = + std::unique_ptr typed_json_formatter = makeJsonFormatter(true); size_t output_bytes = 0; diff --git a/test/common/access_log/access_log_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc similarity index 98% rename from test/common/access_log/access_log_formatter_test.cc rename to test/common/formatter/substitution_formatter_test.cc index c182f49406ac..b5caec7619ef 100644 --- a/test/common/access_log/access_log_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -5,8 +5,8 @@ #include "envoy/config/core/v3/base.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/common/utility.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" #include "common/protobuf/utility.h" #include "common/router/string_accessor_impl.h" @@ -30,7 +30,7 @@ using testing::Return; using testing::ReturnRef; namespace Envoy { -namespace AccessLog { +namespace Formatter { namespace { class TestSerializedUnknownFilterState : public StreamInfo::FilterState::Object { @@ -92,14 +92,14 @@ class TestSerializedStringFilterState : public StreamInfo::FilterState::Object { std::string raw_string_; }; -TEST(AccessLogFormatUtilsTest, protocolToString) { - EXPECT_EQ("HTTP/1.0", AccessLogFormatUtils::protocolToString(Http::Protocol::Http10)); - EXPECT_EQ("HTTP/1.1", AccessLogFormatUtils::protocolToString(Http::Protocol::Http11)); - EXPECT_EQ("HTTP/2", AccessLogFormatUtils::protocolToString(Http::Protocol::Http2)); - EXPECT_EQ("-", AccessLogFormatUtils::protocolToString({})); +TEST(SubstitutionFormatUtilsTest, protocolToString) { + EXPECT_EQ("HTTP/1.0", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http10)); + EXPECT_EQ("HTTP/1.1", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http11)); + EXPECT_EQ("HTTP/2", SubstitutionFormatUtils::protocolToString(Http::Protocol::Http2)); + EXPECT_EQ("-", SubstitutionFormatUtils::protocolToString({})); } -TEST(AccessLogFormatterTest, plainStringFormatter) { +TEST(SubstitutionFormatterTest, plainStringFormatter) { PlainStringFormatter formatter("plain"); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; @@ -114,7 +114,7 @@ TEST(AccessLogFormatterTest, plainStringFormatter) { ProtoEq(ValueUtil::stringValue("plain"))); } -TEST(AccessLogFormatterTest, streamInfoFormatter) { +TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THROW(StreamInfoFormatter formatter("unknown_field"), EnvoyException); NiceMock stream_info; @@ -979,7 +979,7 @@ TEST(AccessLogFormatterTest, streamInfoFormatter) { } } -TEST(AccessLogFormatterTest, requestHeaderFormatter) { +TEST(SubstitutionFormatterTest, requestHeaderFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}}; @@ -1032,7 +1032,7 @@ TEST(AccessLogFormatterTest, requestHeaderFormatter) { } } -TEST(AccessLogFormatterTest, responseHeaderFormatter) { +TEST(SubstitutionFormatterTest, responseHeaderFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; @@ -1085,7 +1085,7 @@ TEST(AccessLogFormatterTest, responseHeaderFormatter) { } } -TEST(AccessLogFormatterTest, responseTrailerFormatter) { +TEST(SubstitutionFormatterTest, responseTrailerFormatter) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{":method", "PUT"}, {"test", "test"}}; @@ -1154,7 +1154,7 @@ void populateMetadataTestData(envoy::config::core::v3::Metadata& metadata) { (*metadata.mutable_filter_metadata())["com.test"] = struct_obj; } -TEST(AccessLogFormatterTest, DynamicMetadataFormatter) { +TEST(SubstitutionFormatterTest, DynamicMetadataFormatter) { envoy::config::core::v3::Metadata metadata; populateMetadataTestData(metadata); NiceMock stream_info; @@ -1249,7 +1249,7 @@ TEST(AccessLogFormatterTest, DynamicMetadataFormatter) { } } -TEST(AccessLogFormatterTest, FilterStateFormatter) { +TEST(SubstitutionFormatterTest, FilterStateFormatter) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1373,7 +1373,7 @@ TEST(AccessLogFormatterTest, FilterStateFormatter) { } } -TEST(AccessLogFormatterTest, StartTimeFormatter) { +TEST(SubstitutionFormatterTest, StartTimeFormatter) { NiceMock stream_info; Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_headers; @@ -1405,7 +1405,7 @@ TEST(AccessLogFormatterTest, StartTimeFormatter) { } } -TEST(AccessLogFormatterTest, GrpcStatusFormatterTest) { +TEST(SubstitutionFormatterTest, GrpcStatusFormatterTest) { GrpcStatusFormatter formatter("grpc-status", "", absl::optional()); NiceMock stream_info; Http::TestRequestHeaderMapImpl request_header; @@ -1473,7 +1473,7 @@ void verifyJsonOutput(std::string json_string, } } -TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { +TEST(SubstitutionFormatterTest, JsonFormatterPlainStringTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; @@ -1497,7 +1497,7 @@ TEST(AccessLogFormatterTest, JsonFormatterPlainStringTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { +TEST(SubstitutionFormatterTest, JsonFormatterSingleOperatorTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; @@ -1519,7 +1519,7 @@ TEST(AccessLogFormatterTest, JsonFormatterSingleOperatorTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { +TEST(SubstitutionFormatterTest, JsonFormatterNonExistentHeaderTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"some_request_header", "SOME_REQUEST_HEADER"}}; Http::TestResponseHeaderMapImpl response_header{{"some_response_header", "SOME_RESPONSE_HEADER"}}; @@ -1547,7 +1547,7 @@ TEST(AccessLogFormatterTest, JsonFormatterNonExistentHeaderTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { +TEST(SubstitutionFormatterTest, JsonFormatterAlternateHeaderTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{ {"request_present_header", "REQUEST_PRESENT_HEADER"}}; @@ -1581,7 +1581,7 @@ TEST(AccessLogFormatterTest, JsonFormatterAlternateHeaderTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { +TEST(SubstitutionFormatterTest, JsonFormatterDynamicMetadataTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; @@ -1610,7 +1610,7 @@ TEST(AccessLogFormatterTest, JsonFormatterDynamicMetadataTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedDynamicMetadataTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; @@ -1641,7 +1641,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedDynamicMetadataTest) { fields.at("test_obj").struct_value().fields().at("inner_key").string_value()); } -TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { +TEST(SubstitutionFormatterTest, JsonFormatterFilterStateTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1668,7 +1668,7 @@ TEST(AccessLogFormatterTest, JsonFormatterFilterStateTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedFilterStateTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1700,7 +1700,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedFilterStateTest) { // Test new specifier (PLAIN/TYPED) of FilterState. Ensure that after adding additional specifier, // the FilterState can call the serializeAsProto or serializeAsString methods correctly. -TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { +TEST(SubstitutionFormatterTest, FilterStateSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1729,7 +1729,7 @@ TEST(AccessLogFormatterTest, FilterStateSpeciferTest) { // Test new specifier (PLAIN/TYPED) of FilterState and convert the output log string to proto // and then verify the result. -TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { +TEST(SubstitutionFormatterTest, TypedFilterStateSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1758,7 +1758,7 @@ TEST(AccessLogFormatterTest, TypedFilterStateSpeciferTest) { } // Error specifier will cause an exception to be thrown. -TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { +TEST(SubstitutionFormatterTest, FilterStateErrorSpeciferTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1777,7 +1777,7 @@ TEST(AccessLogFormatterTest, FilterStateErrorSpeciferTest) { "Invalid filter state serialize type, only support PLAIN/TYPED."); } -TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { +TEST(SubstitutionFormatterTest, JsonFormatterStartTimeTest) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; @@ -1808,7 +1808,7 @@ TEST(AccessLogFormatterTest, JsonFormatterStartTimeTest) { expected_json_map); } -TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { +TEST(SubstitutionFormatterTest, JsonFormatterMultiTokenTest) { { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"some_request_header", "SOME_REQUEST_HEADER"}}; @@ -1839,7 +1839,7 @@ TEST(AccessLogFormatterTest, JsonFormatterMultiTokenTest) { } } -TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { +TEST(SubstitutionFormatterTest, JsonFormatterTypedTest) { Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -1882,7 +1882,7 @@ TEST(AccessLogFormatterTest, JsonFormatterTypedTest) { EXPECT_THAT(output.fields().at("filter_state"), ProtoEq(expected)); } -TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { +TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { StreamInfo::MockStreamInfo stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; @@ -2021,8 +2021,8 @@ TEST(AccessLogFormatterTest, CompositeFormatterSuccess) { } } -TEST(AccessLogFormatterTest, ParserFailures) { - AccessLogFormatParser parser; +TEST(SubstitutionFormatterTest, ParserFailures) { + SubstitutionFormatParser parser; std::vector test_cases = { "{{%PROTOCOL%}} ++ %REQ(FIRST?SECOND)% %RESP(FIRST?SECOND)", @@ -2061,5 +2061,5 @@ TEST(AccessLogFormatterTest, ParserFailures) { } } // namespace -} // namespace AccessLog +} // namespace Formatter } // namespace Envoy diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 3f9e7fcb3715..105f9e510006 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -199,11 +199,11 @@ envoy_cc_test( "//include/envoy/event:dispatcher_interface", "//include/envoy/http:request_id_extension_interface", "//include/envoy/tracing:http_tracer_interface", - "//source/common/access_log:access_log_formatter_lib", "//source/common/access_log:access_log_lib", "//source/common/buffer:buffer_lib", "//source/common/common:macros", "//source/common/event:dispatcher_lib", + "//source/common/formatter:substitution_formatter_lib", "//source/common/http:conn_manager_lib", "//source/common/http:context_lib", "//source/common/http:date_provider_lib", diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 58a22f40874b..7083b4f92d9d 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -13,11 +13,11 @@ #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "envoy/type/v3/percent.pb.h" -#include "common/access_log/access_log_formatter.h" #include "common/access_log/access_log_impl.h" #include "common/buffer/buffer_impl.h" #include "common/common/empty_string.h" #include "common/common/macros.h" +#include "common/formatter/substitution_formatter.h" #include "common/http/conn_manager_impl.h" #include "common/http/context_impl.h" #include "common/http/date_provider_impl.h" @@ -88,8 +88,8 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan : http_context_(fake_stats_.symbolTable()), access_log_path_("dummy_path"), access_logs_{ AccessLog::InstanceSharedPtr{new Extensions::AccessLoggers::File::FileAccessLog( - access_log_path_, {}, AccessLog::AccessLogFormatUtils::defaultAccessLogFormatter(), - log_manager_)}}, + access_log_path_, {}, + Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), log_manager_)}}, codec_(new NiceMock()), stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 60ab0a9d0216..2a82d7f93ffc 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -76,7 +76,7 @@ "./source/common/common/utility.cc", "./source/common/common/regex.h", "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", - "./source/common/access_log/access_log_formatter.cc", + "./source/common/formatter/substitution_formatter.cc", "./source/extensions/filters/http/squash/squash_filter.h", "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/admin/utils.h", "./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h", From 86caf439d6cae2c8173b19fd4fdc95361565a72d Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 28 May 2020 20:35:56 -0700 Subject: [PATCH 244/909] udp_proxy: upgrade to not alpha (#11339) This extension is used in production and we should treat it as such. Signed-off-by: Matt Klein --- api/BUILD | 2 +- .../udp/udp_proxy/v2alpha/udp_proxy.proto | 5 ++- .../extensions/filters/udp/udp_proxy/v3/BUILD | 12 ++++++ .../filters/udp/udp_proxy/v3/udp_proxy.proto | 38 +++++++++++++++++++ api/versioning/BUILD | 3 +- docs/root/api-v3/config/filter/udp/udp.rst | 2 +- .../listeners/udp_filters/udp_proxy.rst | 10 ++--- docs/root/version_history/current.rst | 5 ++- .../udp/udp_proxy/v2alpha/udp_proxy.proto | 5 ++- .../extensions/filters/udp/udp_proxy/v3/BUILD | 12 ++++++ .../filters/udp/udp_proxy/v3/udp_proxy.proto | 38 +++++++++++++++++++ source/extensions/filters/udp/udp_proxy/BUILD | 5 +-- .../extensions/filters/udp/udp_proxy/config.h | 8 ++-- .../filters/udp/udp_proxy/udp_proxy_filter.h | 4 +- test/extensions/filters/udp/udp_proxy/BUILD | 2 +- .../udp/udp_proxy/udp_proxy_filter_test.cc | 6 +-- .../udp_proxy/udp_proxy_integration_test.cc | 2 +- 17 files changed, 131 insertions(+), 28 deletions(-) create mode 100644 api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD create mode 100644 api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto diff --git a/api/BUILD b/api/BUILD index cdc59c8a143b..73912d9fbbac 100644 --- a/api/BUILD +++ b/api/BUILD @@ -133,7 +133,6 @@ proto_library( "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", @@ -223,6 +222,7 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", diff --git a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 5079c1f0df48..06dc150d5c70 100644 --- a/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,13 +4,16 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filters.udp.udp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD new file mode 100644 index 000000000000..c9a0d3106039 --- /dev/null +++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto new file mode 100644 index 000000000000..43d2c56c0673 --- /dev/null +++ b/api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.udp_proxy.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; +option java_outer_classname = "UdpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP proxy] +// UDP proxy :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.udp_proxy] + +// Configuration for the UDP proxy filter. +message UdpProxyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; + + // The stat prefix used when emitting UDP proxy filter stats. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by + // the session. The default if not specified is 1 minute. + google.protobuf.Duration idle_timeout = 3; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index fecf08a9f701..1bc4b51231e2 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -16,7 +16,6 @@ proto_library( "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", @@ -106,6 +105,7 @@ proto_library( "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", + "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", @@ -217,6 +217,7 @@ proto_library( "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/listener/v2:pkg", "//envoy/config/metrics/v2:pkg", diff --git a/docs/root/api-v3/config/filter/udp/udp.rst b/docs/root/api-v3/config/filter/udp/udp.rst index 45a9d0a2b97a..c430280ca06a 100644 --- a/docs/root/api-v3/config/filter/udp/udp.rst +++ b/docs/root/api-v3/config/filter/udp/udp.rst @@ -5,5 +5,5 @@ UDP listener filters :glob: :maxdepth: 2 - */v2alpha/* + ../../../extensions/filters/udp/*/v3/* ../../../extensions/filters/udp/*/v3alpha/* diff --git a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst index 1929fc3c2a7e..1ea17b0e830a 100644 --- a/docs/root/configuration/listeners/udp_filters/udp_proxy.rst +++ b/docs/root/configuration/listeners/udp_filters/udp_proxy.rst @@ -3,11 +3,7 @@ UDP proxy ========= -.. attention:: - - UDP proxy support should be considered alpha and not production ready. - -* :ref:`v3 API reference ` +* :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.udp_listener.udp_proxy* Overview @@ -22,7 +18,7 @@ Because UDP is not a connection oriented protocol, Envoy must keep track of a cl such that the response datagrams from an upstream server can be routed back to the correct client. Each session is index by the 4-tuple consisting of source IP/port and local IP/port that the datagram is received on. Sessions last until the :ref:`idle timeout -` is reached. +` is reached. Load balancing and unhealthy host handling ------------------------------------------ @@ -69,7 +65,7 @@ server listening on port 1235. listener_filters: name: envoy.filters.udp_listener.udp_proxy typed_config: - '@type': type.googleapis.com/envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig + '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig stat_prefix: service cluster: service_udp clusters: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7d4fcd9a3f5d..13b7dae1217d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -78,12 +78,13 @@ New Features * router: add support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters `. * router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. -* router: more fine grained internal redirect configs are added to the :ref`internal_redirect_policy - ` field. +* router: more fine grained internal redirect configs are added to the :ref:`internal_redirect_policy + ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. +* udp: :ref:`udp_proxy ` filter has been upgraded to v3 and is no longer considered alpha. Deprecated ---------- diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto index 5079c1f0df48..06dc150d5c70 100644 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto @@ -4,13 +4,16 @@ package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filters.udp.udp_proxy.v3"; +option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD new file mode 100644 index 000000000000..c9a0d3106039 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto new file mode 100644 index 000000000000..43d2c56c0673 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.filters.udp.udp_proxy.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; +option java_outer_classname = "UdpProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP proxy] +// UDP proxy :ref:`configuration overview `. +// [#extension: envoy.filters.udp_listener.udp_proxy] + +// Configuration for the UDP proxy filter. +message UdpProxyConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; + + // The stat prefix used when emitting UDP proxy filter stats. + string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The upstream cluster to connect to. + string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by + // the session. The default if not specified is 1 minute. + google.protobuf.Duration idle_timeout = 3; +} diff --git a/source/extensions/filters/udp/udp_proxy/BUILD b/source/extensions/filters/udp/udp_proxy/BUILD index 0704d744ad64..c111de929f66 100644 --- a/source/extensions/filters/udp/udp_proxy/BUILD +++ b/source/extensions/filters/udp/udp_proxy/BUILD @@ -20,7 +20,7 @@ envoy_cc_library( "//include/envoy/network:listener_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/network:utility_lib", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) @@ -29,11 +29,10 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", - status = "alpha", deps = [ ":udp_proxy_filter_lib", "//include/envoy/registry", "//include/envoy/server:filter_config_interface", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/udp/udp_proxy/config.h b/source/extensions/filters/udp/udp_proxy/config.h index a82991f5dd57..36dc5e2b7550 100644 --- a/source/extensions/filters/udp/udp_proxy/config.h +++ b/source/extensions/filters/udp/udp_proxy/config.h @@ -1,7 +1,7 @@ #pragma once -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.validate.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/udp/udp_proxy/udp_proxy_filter.h" @@ -24,7 +24,7 @@ class UdpProxyFilterConfigFactory auto shared_config = std::make_shared( context.clusterManager(), context.timeSource(), context.scope(), MessageUtil::downcastAndValidate< - const envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig&>( + const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig&>( config, context.messageValidationVisitor())); return [shared_config](Network::UdpListenerFilterManager& filter_manager, Network::UdpReadFilterCallbacks& callbacks) -> void { @@ -33,7 +33,7 @@ class UdpProxyFilterConfigFactory } ProtobufTypes::MessagePtr createEmptyConfigProto() override { - return std::make_unique(); + return std::make_unique(); } std::string name() const override { return "envoy.filters.udp_listener.udp_proxy"; } diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 78a906fbd2f0..9b883646c9fa 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -1,8 +1,8 @@ #pragma once -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" #include "envoy/event/file_event.h" #include "envoy/event/timer.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" @@ -60,7 +60,7 @@ class UdpProxyFilterConfig { public: UdpProxyFilterConfig(Upstream::ClusterManager& cluster_manager, TimeSource& time_source, Stats::Scope& root_scope, - const envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig& config) + const envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig& config) : cluster_manager_(cluster_manager), time_source_(time_source), cluster_(config.cluster()), session_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, idle_timeout, 60 * 1000)), stats_(generateStats(config.stat_prefix(), root_scope)) {} diff --git a/test/extensions/filters/udp/udp_proxy/BUILD b/test/extensions/filters/udp/udp_proxy/BUILD index c5dfaa36f194..480131efc548 100644 --- a/test/extensions/filters/udp/udp_proxy/BUILD +++ b/test/extensions/filters/udp/udp_proxy/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/extensions/filters/udp/udp_proxy:udp_proxy_filter_lib", "//test/mocks/network:io_handle_mocks", "//test/mocks/upstream:upstream_mocks", - "@envoy_api//envoy/config/filter/udp/udp_proxy/v2alpha:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/udp/udp_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index 3e773189bcc9..c918c0aad783 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -1,5 +1,5 @@ -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.h" -#include "envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.pb.validate.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.h" +#include "envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.pb.validate.h" #include "extensions/filters/udp/udp_proxy/udp_proxy_filter.h" @@ -128,7 +128,7 @@ class UdpProxyFilterTest : public testing::Test { ~UdpProxyFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } void setup(const std::string& yaml, bool has_cluster = true) { - envoy::config::filter::udp::udp_proxy::v2alpha::UdpProxyConfig config; + envoy::extensions::filters::udp::udp_proxy::v3::UdpProxyConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); config_ = std::make_shared(cluster_manager_, time_system_, stats_store_, config); diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index 88b51986362b..8711f45633a0 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -18,7 +18,7 @@ class UdpProxyIntegrationTest : public testing::TestWithParam Date: Fri, 29 May 2020 03:26:38 -0700 Subject: [PATCH 245/909] proto: hash() on proto message ignores unknown field (#11329) The hash() in MessageUtil should ignore unknown field. Most importantly "original type" is as unknown field is introduced by VersionConverter. Ignores unknown field is the implicit behavior in MessageUtil implemented std::equal_to, so hash() should align with this behavior. See https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.util.message_differencer#MessageDifferencer.Equivalent.details Additional Description: Risk Level: Low Testing: unit test Docs Changes: Release Notes: Fixes #11312 Signed-off-by: Yuchen Dai --- source/common/protobuf/utility.cc | 1 + test/common/protobuf/BUILD | 2 ++ test/common/protobuf/utility_test.cc | 30 ++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 6aba34ea841b..048da4098382 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -263,6 +263,7 @@ size_t MessageUtil::hash(const Protobuf::Message& message) { printer.SetExpandAny(true); printer.SetUseFieldNumber(true); printer.SetSingleLineMode(true); + printer.SetHideUnknownFields(true); printer.PrintToString(message, &text_format); } diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index c6709f80e46f..389e4e235d4d 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -37,9 +37,11 @@ envoy_cc_test( "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/api/v2/core:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 35b634d1edaa..ae832053dcab 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/api/v2/cluster.pb.h" +#include "envoy/api/v2/core/base.pb.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" @@ -8,6 +9,7 @@ #include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/cluster/v3/filter.pb.h" #include "envoy/config/cluster/v3/filter.pb.validate.h" +#include "envoy/config/core/v3/base.pb.h" #include "envoy/type/v3/percent.pb.h" #include "common/common/base64.h" @@ -154,6 +156,34 @@ TEST_F(ProtobufUtilityTest, MessageUtilHash) { EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)); } +TEST_F(ProtobufUtilityTest, MessageUtilHashAndEqualToIgnoreOriginalTypeField) { + ProtobufWkt::Struct s; + (*s.mutable_fields())["ab"].set_string_value("fgh"); + EXPECT_EQ(1, s.fields_size()); + envoy::api::v2::core::Metadata mv2; + mv2.mutable_filter_metadata()->insert({"xyz", s}); + EXPECT_EQ(1, mv2.filter_metadata_size()); + + // Add the OriginalTypeFieldNumber as unknown field. + envoy::config::core::v3::Metadata mv3; + Config::VersionConverter::upgrade(mv2, mv3); + + // Add another unknown field. + { + const Protobuf::Reflection* reflection = mv3.GetReflection(); + auto* unknown_field_set = reflection->MutableUnknownFields(&mv3); + auto set_size = unknown_field_set->field_count(); + // 183412668 is the magic number OriginalTypeFieldNumber. The successor number should not be + // occupied. + unknown_field_set->AddFixed32(183412668 + 1, 1); + EXPECT_EQ(set_size + 1, unknown_field_set->field_count()) << "Fail to add an unknown field"; + } + + envoy::config::core::v3::Metadata mv3dup = mv3; + ASSERT_EQ(MessageUtil::hash(mv3), MessageUtil::hash(mv3dup)); + ASSERT(MessageUtil()(mv3, mv3dup)); +} + TEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) { Protobuf::RepeatedPtrField repeated; EXPECT_EQ("[]", RepeatedPtrUtil::debugString(repeated)); From f8efe36d8176893d7d09fa88b80dcb4ec619bf20 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 29 May 2020 06:33:46 -0400 Subject: [PATCH 246/909] event: remove unneeded ASSERT. (#11358) This should now be fixed in https://github.com/libevent/libevent/issues/984. Risk level: Low Testing: CI (previously verified in the libevent work that the fix worked). Signed-off-by: Harvey Tuch --- source/common/event/file_event_impl.cc | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index 7607551fc99a..dd306b0d0e82 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -62,12 +62,8 @@ void FileEventImpl::assignEvents(uint32_t events, event_base* base) { events |= FileReadyType::Closed; } - // TODO(htuch): this should be ASSERT(events), but - // https://github.com/libevent/libevent/issues/984 seems to be producing unexpected - // behavior. The ASSERT should be restored once this issue is resolved. - if (events) { - event->cb_(events); - } + ASSERT(events != 0); + event->cb_(events); }, this); } From eca79b57b8429d67cc444025e406e3a7f3b43cbd Mon Sep 17 00:00:00 2001 From: Manish Date: Fri, 29 May 2020 17:51:42 +0530 Subject: [PATCH 247/909] health: Refactored health_checker tests. (#11133) * Refactored health_checker Signed-off-by: Manish Kumar --- .../upstream/health_checker_impl_test.cc | 486 +++++++----------- 1 file changed, 188 insertions(+), 298 deletions(-) diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 9ce644323bfc..562edf43d67a 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -96,6 +96,18 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { .get())); } +class HealthCheckerTestBase { +public: + std::shared_ptr cluster_{ + std::make_shared>()}; + NiceMock dispatcher_; + std::unique_ptr event_logger_storage_{ + std::make_unique()}; + MockHealthCheckEventLogger& event_logger_{*event_logger_storage_}; + NiceMock random_; + NiceMock runtime_; +}; + class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { public: using HttpHealthCheckerImpl::HttpHealthCheckerImpl; @@ -110,7 +122,7 @@ class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { Http::CodecClient::Type codecClientType() { return codec_client_type_; } }; -class HttpHealthCheckerImplTest : public testing::Test { +class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { public: struct TestSession { Event::MockTimer* interval_timer_{}; @@ -127,9 +139,18 @@ class HttpHealthCheckerImplTest : public testing::Test { std::unordered_map; - HttpHealthCheckerImplTest() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) {} + void allocHealthChecker(const std::string& yaml) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { + health_checker_->addHostCheckCompleteCb( + [this](HostSharedPtr host, HealthTransition changed_state) -> void { + onHostStatus(host, changed_state); + }); + } void setupNoServiceValidationHCWithHttp2() { const std::string yaml = R"EOF( @@ -146,13 +167,8 @@ class HttpHealthCheckerImplTest : public testing::Test { codec_client_type: Http2 )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupInitialJitter() { @@ -170,13 +186,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupIntervalJitterPercent() { @@ -193,13 +204,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHC() { @@ -216,13 +222,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHCOneUnhealthy() { @@ -239,13 +240,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHCAlwaysLogFailure() { @@ -263,13 +259,8 @@ class HttpHealthCheckerImplTest : public testing::Test { always_log_health_check_failures: true )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationNoReuseConnectionHC() { @@ -284,13 +275,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupHealthCheckIntervalOverridesHC() { @@ -310,13 +296,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationHC() { @@ -332,13 +313,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupDeprecatedServiceNameValidationHC(const std::string& prefix) { @@ -355,13 +331,8 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", prefix); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServicePrefixPatternValidationHC() { @@ -377,13 +348,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceExactPatternValidationHC() { @@ -399,13 +365,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceRegexPatternValidationHC() { @@ -423,13 +384,8 @@ class HttpHealthCheckerImplTest : public testing::Test { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationWithCustomHostValueHC(const std::string& host) { @@ -447,13 +403,8 @@ class HttpHealthCheckerImplTest : public testing::Test { )EOF", host); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig @@ -522,13 +473,8 @@ class HttpHealthCheckerImplTest : public testing::Test { value: "%START_TIME(%s.%9f)%" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupServiceValidationWithoutUserAgent() { @@ -547,13 +493,8 @@ class HttpHealthCheckerImplTest : public testing::Test { request_headers_to_remove: ["user-agent"] )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void expectSessionCreate(const HostWithHealthCheckMap& health_check_map) { @@ -664,7 +605,7 @@ class HttpHealthCheckerImplTest : public testing::Test { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false, false, false, false, health_checked_cluster); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -689,7 +630,7 @@ class HttpHealthCheckerImplTest : public testing::Test { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false, false, false, false, health_checked_cluster); @@ -699,13 +640,8 @@ class HttpHealthCheckerImplTest : public testing::Test { MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); - std::shared_ptr cluster_; - NiceMock dispatcher_; std::vector test_sessions_; std::shared_ptr health_checker_; - NiceMock runtime_; - NiceMock random_; - MockHealthCheckEventLogger* event_logger_{}; std::list connection_index_{}; std::list codec_index_{}; const HostWithHealthCheckMap health_checker_map_{}; @@ -752,7 +688,7 @@ TEST_F(HttpHealthCheckerImplTest, Degraded) { // We start off as healthy, and should go degraded after receiving the degraded health response. EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logDegraded(_, _)); + EXPECT_CALL(event_logger_, logDegraded(_, _)); respond(0, "200", false, false, true, false, {}, true); EXPECT_EQ(Host::Health::Degraded, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); @@ -763,7 +699,7 @@ TEST_F(HttpHealthCheckerImplTest, Degraded) { EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); - EXPECT_CALL(*event_logger_, logNoLongerDegraded(_, _)); + EXPECT_CALL(event_logger_, logNoLongerDegraded(_, _)); respond(0, "200", false, false, true, false, {}, false); EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } @@ -1028,13 +964,8 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -1097,10 +1028,7 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq("http1"))); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - + allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; cluster_->info_->stats().upstream_cx_total_.inc(); @@ -1495,12 +1423,12 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithoutUserAgent) { TEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) { setupServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1526,12 +1454,12 @@ TEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) { TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) { setupServiceRegexPatternValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1557,12 +1485,12 @@ TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) { TEST_F(HttpHealthCheckerImplTest, ServiceNotPresentInResponseFail) { setupServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -1675,7 +1603,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { // Test fast success immediately moves us to healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, true)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, true)); EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.max_interval", _)).WillOnce(Return(500)); EXPECT_CALL(runtime_.snapshot_, getInteger("health_check.min_interval", _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(500), _)); @@ -1709,10 +1637,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackNoClose) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); })); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); } @@ -1731,10 +1659,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackClose) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); })); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)).Times(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()).Times(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", true); } @@ -1748,10 +1676,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFail) { health_checker_->start(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1778,7 +1706,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFail) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -1795,10 +1723,10 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { health_checker_->start(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1815,7 +1743,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, false)); respond(0, "503", false); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -1842,7 +1770,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -1851,7 +1779,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { TEST_F(HttpHealthCheckerImplTest, Disconnect) { setupNoServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { @@ -1873,7 +1801,7 @@ TEST_F(HttpHealthCheckerImplTest, Disconnect) { EXPECT_CALL(*this, onHostStatus(cluster_->prioritySet().getMockHostSet(0)->hosts_[0], HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -1896,8 +1824,8 @@ TEST_F(HttpHealthCheckerImplTest, Timeout) { EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->timeout_timer_->invokeCallback(); EXPECT_EQ(Host::Health::Unhealthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); @@ -1923,7 +1851,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) { test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), false); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); @@ -1944,7 +1872,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) { TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { setupNoServiceValidationHC(); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; expectSessionCreate(); @@ -1965,7 +1893,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { test_sessions_[0]->interval_timer_->invokeCallback(); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -1984,7 +1912,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; expectSessionCreate(); expectStreamCreate(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)).Times(2); health_checker_->start(); @@ -1996,7 +1924,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) { } EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->enableTimer(std::chrono::seconds(10), nullptr); @@ -2164,7 +2092,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // ignored and health state changes immediately. Since the threshold is ignored, next health // check respects "unhealthy_interval". EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "503", false); @@ -2221,7 +2149,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -2272,7 +2200,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is // reached, health state should also change. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->invokeCallback(); @@ -2321,7 +2249,7 @@ TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respond(0, "200", false); @@ -2574,9 +2502,7 @@ TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -2617,9 +2543,7 @@ TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { cluster_->info_->transport_socket_matcher_ = std::move(transport_socket_match); - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -2643,8 +2567,21 @@ class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { } }; -class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { +class ProdHttpHealthCheckerTest : public testing::Test, public HealthCheckerTestBase { public: + void allocHealthChecker(const std::string& yaml) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { + health_checker_->addHostCheckCompleteCb( + [this](HostSharedPtr host, HealthTransition changed_state) -> void { + onHostStatus(host, changed_state); + }); + } + void setupNoServiceValidationHCWithHttp2() { const std::string yaml = R"EOF( timeout: 1s @@ -2660,13 +2597,8 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { codec_client_type: Http2 )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } void setupNoServiceValidationHC() { @@ -2683,15 +2615,11 @@ class ProdHttpHealthCheckerTest : public HttpHealthCheckerImplTest { path: /healthcheck )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); } + MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); std::unique_ptr connection_ = std::make_unique>(); std::shared_ptr health_checker_; @@ -2718,13 +2646,8 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { use_http2: false )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP1, health_checker_->codecClientType()); } @@ -2743,13 +2666,8 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { use_http2: true )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(yaml); + addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType()); } @@ -2789,12 +2707,12 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMismatch)) { setupDeprecatedServiceNameValidationHC("locations"); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(1); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; @@ -3068,11 +2986,13 @@ TEST(TcpHealthCheckMatcher, match) { EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer)); } -class TcpHealthCheckerImplTest : public testing::Test { +class TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { public: - TcpHealthCheckerImplTest() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) {} + void allocHealthChecker(const std::string& yaml) { + health_checker_ = std::make_shared( + *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } void setupData(unsigned int unhealthy_threshold = 2) { std::ostringstream yaml; @@ -3089,9 +3009,7 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml.str()), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml.str()); } void setupNoData() { @@ -3103,9 +3021,7 @@ class TcpHealthCheckerImplTest : public testing::Test { tcp_health_check: {} )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); } void setupDataDontReuseConnection() { @@ -3122,9 +3038,7 @@ class TcpHealthCheckerImplTest : public testing::Test { - text: "02" )EOF"; - health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + allocHealthChecker(yaml); } void expectSessionCreate() { @@ -3138,16 +3052,11 @@ class TcpHealthCheckerImplTest : public testing::Test { EXPECT_CALL(*connection_, addReadFilter(_)).WillOnce(SaveArg<0>(&read_filter_)); } - std::shared_ptr cluster_; - NiceMock dispatcher_; std::shared_ptr health_checker_; - MockHealthCheckEventLogger* event_logger_{}; Network::MockClientConnection* connection_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; Network::ReadFilterSharedPtr read_filter_; - NiceMock runtime_; - NiceMock random_; }; TEST_F(TcpHealthCheckerImplTest, Success) { @@ -3253,7 +3162,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3268,7 +3177,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) { connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -3313,8 +3222,8 @@ TEST_F(TcpHealthCheckerImplTest, Timeout) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3347,7 +3256,7 @@ TEST_F(TcpHealthCheckerImplTest, DoubleTimeout) { read_filter_->onData(response, false); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3363,7 +3272,7 @@ TEST_F(TcpHealthCheckerImplTest, DoubleTimeout) { connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_CALL(*connection_, close(_)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); timeout_timer_->invokeCallback(); @@ -3442,7 +3351,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutWithoutReusingConnection) { connection_->raiseEvent(Network::ConnectionEvent::Connected); // Expected flow when a healthcheck times out. - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -3489,8 +3398,8 @@ TEST_F(TcpHealthCheckerImplTest, PassiveFailure) { expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); EXPECT_CALL(*timeout_timer_, enableTimer(_, _)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); // Do multiple passive failures. This will not reset the active HC timers. @@ -3587,7 +3496,7 @@ TEST_F(TcpHealthCheckerImplTest, ConnectionLocalFailure) { health_checker_->start(); // Expect the LocalClose to be handled as a health check failure - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_, _)); @@ -3614,7 +3523,7 @@ class TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl { MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&)); }; -class GrpcHealthCheckerImplTestBase { +class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { public: struct TestSession { TestSession() = default; @@ -3685,34 +3594,35 @@ class GrpcHealthCheckerImplTestBase { std::vector> trailers; }; - GrpcHealthCheckerImplTestBase() - : cluster_(new NiceMock()), - event_logger_(new MockHealthCheckEventLogger()) { + GrpcHealthCheckerImplTestBase() { EXPECT_CALL(*cluster_->info_, features()) .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); } - void setupHC() { - const auto config = createGrpcHealthCheckConfig(); + void allocHealthChecker(const envoy::config::core::v3::HealthCheck& config) { health_checker_ = std::make_shared( *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); + HealthCheckEventLoggerPtr(event_logger_storage_.release())); + } + + void addCompletionCallback() { health_checker_->addHostCheckCompleteCb( [this](HostSharedPtr host, HealthTransition changed_state) -> void { onHostStatus(host, changed_state); }); } + void setupHC() { + const auto config = createGrpcHealthCheckConfig(); + allocHealthChecker(config); + addCompletionCallback(); + } + void setupHCWithUnhealthyThreshold(int value) { auto config = createGrpcHealthCheckConfig(); config.mutable_unhealthy_threshold()->set_value(value); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupServiceNameHC(const absl::optional& authority) { @@ -3721,25 +3631,15 @@ class GrpcHealthCheckerImplTestBase { if (authority.has_value()) { config.mutable_grpc_health_check()->set_authority(authority.value()); } - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupNoReuseConnectionHC() { auto config = createGrpcHealthCheckConfig(); config.mutable_reuse_connection()->set_value(false); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void setupHealthCheckIntervalOverridesHC() { @@ -3752,13 +3652,8 @@ class GrpcHealthCheckerImplTestBase { config.mutable_interval_jitter()->set_seconds(0); config.mutable_unhealthy_threshold()->set_value(3); config.mutable_healthy_threshold()->set_value(3); - health_checker_ = std::make_shared( - *cluster_, config, dispatcher_, runtime_, random_, - HealthCheckEventLoggerPtr(event_logger_)); - health_checker_->addHostCheckCompleteCb( - [this](HostSharedPtr host, HealthTransition changed_state) -> void { - onHostStatus(host, changed_state); - }); + allocHealthChecker(config); + addCompletionCallback(); } void expectSessionCreate() { @@ -3962,13 +3857,8 @@ class GrpcHealthCheckerImplTestBase { MOCK_METHOD(void, onHostStatus, (HostSharedPtr host, HealthTransition changed_state)); - std::shared_ptr cluster_; - NiceMock dispatcher_; std::vector test_sessions_; std::shared_ptr health_checker_; - NiceMock runtime_; - NiceMock random_; - MockHealthCheckEventLogger* event_logger_{}; std::list connection_index_{}; std::list codec_index_{}; }; @@ -4139,7 +4029,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { expectHealthcheckStop(0, 500); // Fast success immediately moves us to healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, true)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4165,7 +4055,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { // Host was unhealthy from the start, but we expect a state change due to the pending active hc // flag changing. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); expectHostHealthy(false); EXPECT_FALSE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4188,7 +4078,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); } @@ -4201,13 +4091,13 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); // Explicit healthcheck failure immediately renders host unhealthy. expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); expectHostHealthy(false); @@ -4227,7 +4117,7 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { expectHealthcheckStop(0); // Host should has become healthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); expectHostHealthy(true); } @@ -4240,7 +4130,7 @@ TEST_F(GrpcHealthCheckerImplTest, Disconnect) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); @@ -4256,7 +4146,7 @@ TEST_F(GrpcHealthCheckerImplTest, Disconnect) { expectHealthcheckStop(0); // Now, host should be unhealthy. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); expectHostHealthy(false); } @@ -4268,13 +4158,13 @@ TEST_F(GrpcHealthCheckerImplTest, Timeout) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); // Unhealthy threshold is 1 so first timeout causes unhealthy EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); test_sessions_[0]->timeout_timer_->invokeCallback(); expectHostHealthy(false); } @@ -4287,7 +4177,7 @@ TEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) { expectSessionCreate(); expectHealthcheckStart(0); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); health_checker_->start(); expectHealthcheckStop(0); @@ -4301,7 +4191,7 @@ TEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) { expectHealthcheckStop(0); EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); // Close connection. Timeouts and connection closes counts together. test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); expectHostHealthy(false); @@ -4373,7 +4263,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // ignored and health state changes immediately. Since the threshold is ignored, next health // check respects "unhealthy_interval". EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); @@ -4430,7 +4320,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); @@ -4477,7 +4367,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // Subsequent failing checks should respect unhealthy_interval. As the unhealthy threshold is // reached, health state should also change. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(2000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); test_sessions_[0]->timeout_timer_->invokeCallback(); @@ -4522,7 +4412,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { // After the healthy threshold is reached, health state should change while checks should respect // the default interval. EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); - EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); + EXPECT_CALL(event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(std::chrono::milliseconds(1000), _)); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, disableTimer()); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); @@ -4601,8 +4491,8 @@ TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::UNKNOWN); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4615,8 +4505,8 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) { TEST_F(GrpcHealthCheckerImplTest, GrpcFailServiceUnknown) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVICE_UNKNOWN); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4629,8 +4519,8 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailServiceUnknown) { TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknownHealthStatus) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); respondServiceStatus(0, static_cast(999)); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( @@ -4645,8 +4535,8 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { // is reached. setupHCWithUnhealthyThreshold(1); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); test_sessions_[0]->codec_client_->raiseGoAway(); @@ -4784,8 +4674,8 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(BadResponseGrpcHealthCheckerImplTest, GrpcBadResponse) { setupHC(); expectSingleHealthcheck(HealthTransition::Changed); - EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, true)); - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); ResponseSpec spec = GetParam(); respondResponseSpec(0, std::move(spec)); From 436732f1ddb05362b8b33460269ff180d7faa7a5 Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Fri, 29 May 2020 10:49:45 -0400 Subject: [PATCH 248/909] Buffer overflow watermark implementation - base (#11179) Basic mechanism for buffer overflow watermark. First part of #10920 - implementing the basic callback and runtime configuration setting needed for overflow watermark. Signed-off-by: Adi Suissa-Peleg --- include/envoy/buffer/buffer.h | 3 +- source/common/buffer/BUILD | 1 + source/common/buffer/watermark_buffer.cc | 52 +++-- source/common/buffer/watermark_buffer.h | 20 +- source/common/http/conn_manager_impl.cc | 13 +- source/common/http/http1/codec_impl.cc | 3 +- source/common/http/http2/codec_impl.h | 6 +- source/common/network/connection_impl.cc | 6 +- source/common/router/upstream_request.cc | 3 +- .../filters/http/fault/fault_filter.cc | 3 +- test/common/buffer/BUILD | 1 + test/common/buffer/watermark_buffer_test.cc | 197 +++++++++++++++++- test/common/network/connection_impl_test.cc | 38 ++-- .../transport_sockets/tls/ssl_socket_test.cc | 14 +- test/integration/integration.cc | 17 +- .../integration/tcp_proxy_integration_test.cc | 9 +- test/mocks/buffer/mocks.cc | 10 +- test/mocks/buffer/mocks.h | 23 +- 18 files changed, 331 insertions(+), 88 deletions(-) diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 61d803c2d204..bb2d81259bc8 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -376,7 +376,8 @@ class WatermarkFactory { * @return a newly created InstancePtr. */ virtual InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark) PURE; + std::function above_high_watermark, + std::function above_overflow_watermark) PURE; }; using WatermarkFactoryPtr = std::unique_ptr; diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index 0b9acc3732ab..02371e3c63d6 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -15,6 +15,7 @@ envoy_cc_library( deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", + "//source/common/runtime:runtime_features_lib", ], ) diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index 4f891d7c8029..e3537ffe7943 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -1,38 +1,39 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Buffer { void WatermarkBuffer::add(const void* data, uint64_t size) { OwnedImpl::add(data, size); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::add(absl::string_view data) { OwnedImpl::add(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::add(const Instance& data) { OwnedImpl::add(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::prepend(absl::string_view data) { OwnedImpl::prepend(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::prepend(Instance& data) { OwnedImpl::prepend(data); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::commit(RawSlice* iovecs, uint64_t num_iovecs) { OwnedImpl::commit(iovecs, num_iovecs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::drain(uint64_t size) { @@ -42,23 +43,23 @@ void WatermarkBuffer::drain(uint64_t size) { void WatermarkBuffer::move(Instance& rhs) { OwnedImpl::move(rhs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } void WatermarkBuffer::move(Instance& rhs, uint64_t length) { OwnedImpl::move(rhs, length); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); } Api::IoCallUint64Result WatermarkBuffer::read(Network::IoHandle& io_handle, uint64_t max_length) { Api::IoCallUint64Result result = OwnedImpl::read(io_handle, max_length); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); return result; } uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) { uint64_t bytes_reserved = OwnedImpl::reserve(length, iovecs, num_iovecs); - checkHighWatermark(); + checkHighAndOverflowWatermarks(); return bytes_reserved; } @@ -70,9 +71,19 @@ Api::IoCallUint64Result WatermarkBuffer::write(Network::IoHandle& io_handle) { void WatermarkBuffer::setWatermarks(uint32_t low_watermark, uint32_t high_watermark) { ASSERT(low_watermark < high_watermark || (high_watermark == 0 && low_watermark == 0)); + uint32_t overflow_watermark_multiplier = + Runtime::getInteger("envoy.buffer.overflow_multiplier", 0); + if (overflow_watermark_multiplier > 0 && + (static_cast(overflow_watermark_multiplier) * high_watermark) > + std::numeric_limits::max()) { + ENVOY_LOG_MISC(debug, "Error setting overflow threshold: envoy.buffer.overflow_multiplier * " + "high_watermark is overflowing. Disabling overflow watermark."); + overflow_watermark_multiplier = 0; + } low_watermark_ = low_watermark; high_watermark_ = high_watermark; - checkHighWatermark(); + overflow_watermark_ = overflow_watermark_multiplier * high_watermark; + checkHighAndOverflowWatermarks(); checkLowWatermark(); } @@ -86,14 +97,23 @@ void WatermarkBuffer::checkLowWatermark() { below_low_watermark_(); } -void WatermarkBuffer::checkHighWatermark() { - if (above_high_watermark_called_ || high_watermark_ == 0 || - OwnedImpl::length() <= high_watermark_) { +void WatermarkBuffer::checkHighAndOverflowWatermarks() { + if (high_watermark_ == 0 || OwnedImpl::length() <= high_watermark_) { return; } - above_high_watermark_called_ = true; - above_high_watermark_(); + if (!above_high_watermark_called_) { + above_high_watermark_called_ = true; + above_high_watermark_(); + } + + // Check if overflow watermark is enabled, wasn't previously triggered, + // and the buffer size is above the threshold + if (overflow_watermark_ != 0 && !above_overflow_watermark_called_ && + OwnedImpl::length() > overflow_watermark_) { + above_overflow_watermark_called_ = true; + above_overflow_watermark_(); + } } } // namespace Buffer diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 5bc111a4e1e3..127069307902 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -13,11 +13,15 @@ namespace Buffer { // buffer size transitions from under the low watermark to above the high watermark, the // above_high_watermark function is called one time. It will not be called again until the buffer // is drained below the low watermark, at which point the below_low_watermark function is called. +// If the buffer size is above the overflow watermark, above_overflow_watermark is called. +// It is only called on the first time the buffer overflows. class WatermarkBuffer : public OwnedImpl { public: WatermarkBuffer(std::function below_low_watermark, - std::function above_high_watermark) - : below_low_watermark_(below_low_watermark), above_high_watermark_(above_high_watermark) {} + std::function above_high_watermark, + std::function above_overflow_watermark) + : below_low_watermark_(below_low_watermark), above_high_watermark_(above_high_watermark), + above_overflow_watermark_(above_overflow_watermark) {} // Override all functions from Instance which can result in changing the size // of the underlying buffer. @@ -43,20 +47,24 @@ class WatermarkBuffer : public OwnedImpl { bool highWatermarkTriggered() const { return above_high_watermark_called_; } private: - void checkHighWatermark(); + void checkHighAndOverflowWatermarks(); void checkLowWatermark(); std::function below_low_watermark_; std::function above_high_watermark_; + std::function above_overflow_watermark_; // Used for enforcing buffer limits (off by default). If these are set to non-zero by a call to // setWatermarks() the watermark callbacks will be called as described above. uint32_t high_watermark_{0}; uint32_t low_watermark_{0}; + uint32_t overflow_watermark_{0}; // Tracks the latest state of watermark callbacks. // True between the time above_high_watermark_ has been called until above_high_watermark_ has // been called. bool above_high_watermark_called_{false}; + // Set to true when above_overflow_watermark_ is called (and isn't cleared). + bool above_overflow_watermark_called_{false}; }; using WatermarkBufferPtr = std::unique_ptr; @@ -65,8 +73,10 @@ class WatermarkBufferFactory : public WatermarkFactory { public: // Buffer::WatermarkFactory InstancePtr create(std::function below_low_watermark, - std::function above_high_watermark) override { - return InstancePtr{new WatermarkBuffer(below_low_watermark, above_high_watermark)}; + std::function above_high_watermark, + std::function above_overflow_watermark) override { + return std::make_unique(below_low_watermark, above_high_watermark, + above_overflow_watermark); } }; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 2d19400b6271..22e40d6b5f8d 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -2317,9 +2317,10 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::clearRouteCache() { } Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::createBuffer() { - auto buffer = - std::make_unique([this]() -> void { this->requestDataDrained(); }, - [this]() -> void { this->requestDataTooLarge(); }); + auto buffer = std::make_unique( + [this]() -> void { this->requestDataDrained(); }, + [this]() -> void { this->requestDataTooLarge(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } @@ -2490,8 +2491,10 @@ ConnectionManagerImpl::ActiveStreamDecoderFilter::routeConfig() { } Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamEncoderFilter::createBuffer() { - auto buffer = new Buffer::WatermarkBuffer([this]() -> void { this->responseDataDrained(); }, - [this]() -> void { this->responseDataTooLarge(); }); + auto buffer = new Buffer::WatermarkBuffer( + [this]() -> void { this->responseDataDrained(); }, + [this]() -> void { this->responseDataTooLarge(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return Buffer::WatermarkBufferPtr{buffer}; } diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 70f40af8d742..2ae945658f61 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -459,7 +459,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.reject_unsupported_transfer_encodings")), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }), + [&]() -> void { this->onAboveHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 2e5f29095811..371252374f23 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -281,10 +281,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable void { this->pendingRecvBufferLowWatermark(); }, - [this]() -> void { this->pendingRecvBufferHighWatermark(); }}; + [this]() -> void { this->pendingRecvBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; Buffer::WatermarkBuffer pending_send_data_{ [this]() -> void { this->pendingSendBufferLowWatermark(); }, - [this]() -> void { this->pendingSendBufferHighWatermark(); }}; + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; HeaderMapPtr pending_trailers_to_encode_; std::unique_ptr metadata_decoder_; std::unique_ptr metadata_encoder_; diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 6378678322f4..140f786a1956 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -49,10 +49,12 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt transport_socket_(std::move(transport_socket)), socket_(std::move(socket)), stream_info_(stream_info), filter_manager_(*this), read_buffer_([this]() -> void { this->onReadBufferLowWatermark(); }, - [this]() -> void { this->onReadBufferHighWatermark(); }), + [this]() -> void { this->onReadBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), write_buffer_(dispatcher.getWatermarkFactory().create( [this]() -> void { this->onWriteBufferLowWatermark(); }, - [this]() -> void { this->onWriteBufferHighWatermark(); })), + [this]() -> void { this->onWriteBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ })), write_buffer_above_high_watermark_(false), detect_early_close_(true), enable_half_close_(false), read_end_stream_raised_(false), read_end_stream_(false), write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false) { diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 7e53c292802c..7c18a77687a3 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -192,7 +192,8 @@ void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { if (!buffered_request_body_) { buffered_request_body_ = std::make_unique( [this]() -> void { this->enableDataFromDownstreamForFlowControl(); }, - [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }); + [this]() -> void { this->disableDataFromDownstreamForFlowControl(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffered_request_body_->setWatermarks(parent_.callbacks()->decoderBufferLimit()); } diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 217471da0801..ad40112619e2 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -500,7 +500,8 @@ StreamRateLimiter::StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_da // ~63ms intervals. token_bucket_(SecondDivisor, time_source, SecondDivisor), token_timer_(dispatcher.createTimer([this] { onTokenTimer(); })), - buffer_(resume_data_cb, pause_data_cb) { + buffer_(resume_data_cb, pause_data_cb, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }) { ASSERT(bytes_per_time_slice_ > 0); ASSERT(max_buffered_data > 0); buffer_.setWatermarks(max_buffered_data); diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index d91cc0b57354..c0968d43d80d 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -79,6 +79,7 @@ envoy_cc_test( "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/network:address_lib", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index f5c13fa7e177..776bd64a8217 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -6,6 +6,7 @@ #include "common/network/io_socket_handle_impl.h" #include "test/common/buffer/utility.h" +#include "test/test_common/test_runtime.h" #include "gtest/gtest.h" @@ -20,9 +21,11 @@ class WatermarkBufferTest : public testing::Test { WatermarkBufferTest() { buffer_.setWatermarks(5, 10); } Buffer::WatermarkBuffer buffer_{[&]() -> void { ++times_low_watermark_called_; }, - [&]() -> void { ++times_high_watermark_called_; }}; + [&]() -> void { ++times_high_watermark_called_; }, + [&]() -> void { ++times_overflow_watermark_called_; }}; uint32_t times_low_watermark_called_{0}; uint32_t times_high_watermark_called_{0}; + uint32_t times_overflow_watermark_called_{0}; }; TEST_F(WatermarkBufferTest, TestWatermark) { ASSERT_EQ(10, buffer_.highWatermark()); } @@ -97,8 +100,10 @@ TEST_F(WatermarkBufferTest, PrependBuffer) { uint32_t prefix_buffer_low_watermark_hits{0}; uint32_t prefix_buffer_high_watermark_hits{0}; + uint32_t prefix_buffer_overflow_watermark_hits{0}; WatermarkBuffer prefixBuffer{[&]() -> void { ++prefix_buffer_low_watermark_hits; }, - [&]() -> void { ++prefix_buffer_high_watermark_hits; }}; + [&]() -> void { ++prefix_buffer_high_watermark_hits; }, + [&]() -> void { ++prefix_buffer_overflow_watermark_hits; }}; prefixBuffer.setWatermarks(5, 10); prefixBuffer.add(prefix); prefixBuffer.add(suffix); @@ -252,22 +257,27 @@ TEST_F(WatermarkBufferTest, MoveWatermarks) { EXPECT_EQ(1, times_low_watermark_called_); buffer_.setWatermarks(9, 20); EXPECT_EQ(1, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); EXPECT_EQ(1, times_high_watermark_called_); buffer_.setWatermarks(2); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(1, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); buffer_.setWatermarks(0); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(2, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); buffer_.setWatermarks(1); EXPECT_EQ(3, times_high_watermark_called_); EXPECT_EQ(2, times_low_watermark_called_); + EXPECT_EQ(0, times_overflow_watermark_called_); // Fully drain the buffer. buffer_.drain(9); EXPECT_EQ(3, times_low_watermark_called_); EXPECT_EQ(0, buffer_.length()); + EXPECT_EQ(0, times_overflow_watermark_called_); } TEST_F(WatermarkBufferTest, GetRawSlices) { @@ -301,8 +311,10 @@ TEST_F(WatermarkBufferTest, StartsWith) { TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { int high_watermark_buffer1 = 0; int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, - [&]() -> void { ++high_watermark_buffer1; }}; + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; buffer1.setWatermarks(5, 10); // Stick 20 bytes in buffer_ and expect the high watermark is hit. @@ -314,16 +326,195 @@ TEST_F(WatermarkBufferTest, MoveBackWithWatermarks) { buffer1.move(buffer_, 10); EXPECT_EQ(0, times_low_watermark_called_); EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); // Move 10 more bytes to the new buffer. Both buffers should hit watermark callbacks. buffer1.move(buffer_, 10); EXPECT_EQ(1, times_low_watermark_called_); EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, times_overflow_watermark_called_); + EXPECT_EQ(0, overflow_watermark_buffer1); // Now move all the data back to the original buffer. Watermarks should trigger immediately. buffer_.move(buffer1); EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(0, times_overflow_watermark_called_); + EXPECT_EQ(0, overflow_watermark_buffer1); +} + +TEST_F(WatermarkBufferTest, OverflowWatermark) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "2"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(21, buffer1.length()); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(22, buffer1.length()); + + // Overflow is only triggered once + buffer1.drain(18); + EXPECT_EQ(4, buffer1.length()); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(14, buffer1.length()); + buffer1.add(TEN_BYTES, 6); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + EXPECT_EQ(20, buffer1.length()); +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkDisabled) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "0"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + EXPECT_EQ(21, buffer1.length()); +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkDisabledOnVeryHighValue) { +// Disabling execution with TSAN as it causes the test to use too much memory +// and time, making the test fail in some settings (such as CI) +#if defined(__has_feature) && __has_feature(thread_sanitizer) + ENVOY_LOG_MISC(critical, "WatermarkBufferTest::OverflowWatermarkDisabledOnVeryHighValue not " + "supported by this compiler configuration"); +#else + // Verifies that the overflow watermark is disabled when its value is higher + // than uint32_t max value + TestScopedRuntime scoped_runtime; + + int high_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void {}, [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + + // Make sure the overflow threshold will be above std::numeric_limits::max() + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "3"}}); + buffer1.setWatermarks((std::numeric_limits::max() / 3) + 4); + + // Add 2 halves instead of full uint32_t::max to get around std::bad_alloc exception + const uint32_t half_max = std::numeric_limits::max() / 2; + const std::string half_max_str = std::string(half_max, 'a'); + buffer1.add(half_max_str.data(), half_max); + buffer1.add(half_max_str.data(), half_max); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + EXPECT_EQ(2 * half_max + static_cast(10), buffer1.length()); +#endif +} + +TEST_F(WatermarkBufferTest, OverflowWatermarkEqualHighWatermark) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "1"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.add("a", 1); + EXPECT_EQ(0, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + + buffer1.drain(6); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.add(TEN_BYTES, 10); + EXPECT_EQ(15, buffer1.length()); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); +} + +TEST_F(WatermarkBufferTest, MoveWatermarksOverflow) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "2"}}); + + int high_watermark_buffer1 = 0; + int low_watermark_buffer1 = 0; + int overflow_watermark_buffer1 = 0; + Buffer::WatermarkBuffer buffer1{[&]() -> void { ++low_watermark_buffer1; }, + [&]() -> void { ++high_watermark_buffer1; }, + [&]() -> void { ++overflow_watermark_buffer1; }}; + buffer1.setWatermarks(5, 10); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 9); + EXPECT_EQ(0, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 8); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 5); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(0, overflow_watermark_buffer1); + buffer1.setWatermarks(1, 4); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + + // Overflow is only triggered once + buffer1.setWatermarks(3, 6); + EXPECT_EQ(0, low_watermark_buffer1); + EXPECT_EQ(1, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); + buffer1.drain(7); + buffer1.add(TEN_BYTES, 9); + EXPECT_EQ(11, buffer1.length()); + EXPECT_EQ(1, low_watermark_buffer1); + EXPECT_EQ(2, high_watermark_buffer1); + EXPECT_EQ(1, overflow_watermark_buffer1); } } // namespace diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 6ef055b9fab3..72168e2a666d 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -172,16 +172,16 @@ class ConnectionImplTest : public testing::TestWithParam { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // The first call to create a client session will get a MockBuffer. // Other calls for server sessions will by default get a normal OwnedImpl. - EXPECT_CALL(*factory, create_(_, _)) + EXPECT_CALL(*factory, create_(_, _, _)) .Times(AnyNumber()) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new MockWatermarkBuffer(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = new MockWatermarkBuffer(below_low, above_high, above_overflow); return client_write_buffer_; })) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); } @@ -196,12 +196,12 @@ class ConnectionImplTest : public testing::TestWithParam { ConnectionMocks createConnectionMocks(bool create_timer = true) { auto dispatcher = std::make_unique>(); - EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { + EXPECT_CALL(dispatcher->buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { // ConnectionImpl calls Envoy::MockBufferFactory::create(), which calls create_() and // wraps the returned raw pointer below with a unique_ptr. - return new Buffer::WatermarkBuffer(below_low, above_high); + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); Event::MockTimer* timer = nullptr; @@ -1413,10 +1413,10 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayConfigDisabledTest) { NiceMock callbacks; NiceMock dispatcher; - EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + EXPECT_CALL(dispatcher.buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); IoHandlePtr io_handle = std::make_unique(0); std::unique_ptr server_connection(new Network::ConnectionImpl( @@ -1601,10 +1601,10 @@ class FakeReadFilter : public Network::ReadFilter { class MockTransportConnectionImplTest : public testing::Test { public: MockTransportConnectionImplTest() : stream_info_(dispatcher_.timeSource()) { - EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _)) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + EXPECT_CALL(dispatcher_.buffer_factory_, create_(_, _, _)) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); file_event_ = new Event::MockFileEvent; diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index ebaf803d8de2..49b98320ff9d 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -4368,16 +4368,16 @@ class SslReadBufferLimitTest : public SslSocketTest { dispatcher_ = api_->allocateDispatcher("test_thread", Buffer::WatermarkFactoryPtr{factory}); // By default, expect 4 buffers to be created - the client and server read and write buffers. - EXPECT_CALL(*factory, create_(_, _)) + EXPECT_CALL(*factory, create_(_, _, _)) .Times(2) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer = new MockWatermarkBuffer(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer = new MockWatermarkBuffer(below_low, above_high, above_overflow); return client_write_buffer; })) - .WillRepeatedly(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); initialize(); diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 1f4b9ffcc5b3..7efe8b299404 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -154,10 +154,11 @@ IntegrationTcpClient::IntegrationTcpClient(Event::Dispatcher& dispatcher, bool enable_half_close) : payload_reader_(new WaitForPayloadReader(dispatcher)), callbacks_(new ConnectionCallbacks(*this)) { - EXPECT_CALL(factory, create_(_, _)) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new NiceMock(below_low, above_high); + EXPECT_CALL(factory, create_(_, _, _)) + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = + new NiceMock(below_low, above_high, above_overflow); return client_write_buffer_; })); @@ -264,10 +265,10 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea // complex test hooks to the server and/or spin waiting on stats, neither of which I think are // necessary right now. timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); - ON_CALL(*mock_buffer_factory_, create_(_, _)) - .WillByDefault(Invoke([](std::function below_low, - std::function above_high) -> Buffer::Instance* { - return new Buffer::WatermarkBuffer(below_low, above_high); + ON_CALL(*mock_buffer_factory_, create_(_, _, _)) + .WillByDefault(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); } diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 7d5f5db0c4e0..54893f0fda6e 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -692,11 +692,12 @@ void TcpProxySslIntegrationTest::setupConnections() { // Set up the mock buffer factory so the newly created SSL client will have a mock write // buffer. This allows us to track the bytes actually written to the socket. - EXPECT_CALL(*mock_buffer_factory_, create_(_, _)) + EXPECT_CALL(*mock_buffer_factory_, create_(_, _, _)) .Times(1) - .WillOnce(Invoke([&](std::function below_low, - std::function above_high) -> Buffer::Instance* { - client_write_buffer_ = new NiceMock(below_low, above_high); + .WillOnce(Invoke([&](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + client_write_buffer_ = + new NiceMock(below_low, above_high, above_overflow); ON_CALL(*client_write_buffer_, move(_)) .WillByDefault(Invoke(client_write_buffer_, &MockWatermarkBuffer::baseMove)); ON_CALL(*client_write_buffer_, drain(_)) diff --git a/test/mocks/buffer/mocks.cc b/test/mocks/buffer/mocks.cc index 64459da03703..8328f870b063 100644 --- a/test/mocks/buffer/mocks.cc +++ b/test/mocks/buffer/mocks.cc @@ -6,16 +6,18 @@ namespace Envoy { template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high) - : Buffer::WatermarkBuffer(below_low, above_high) {} + std::function above_high, + std::function above_overflow) + : Buffer::WatermarkBuffer(below_low, above_high, above_overflow) {} template <> MockBufferBase::MockBufferBase() - : Buffer::WatermarkBuffer([&]() -> void {}, [&]() -> void {}) { + : Buffer::WatermarkBuffer([&]() -> void {}, [&]() -> void {}, [&]() -> void {}) { ASSERT(0); // This constructor is not supported for WatermarkBuffer. } template <> -MockBufferBase::MockBufferBase(std::function, std::function) +MockBufferBase::MockBufferBase(std::function, std::function, + std::function) : Buffer::OwnedImpl() { ASSERT(0); // This constructor is not supported for OwnedImpl. } diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index a37d6cc3a297..76246f0dc4c3 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -17,7 +17,8 @@ namespace Envoy { template class MockBufferBase : public BaseClass { public: MockBufferBase(); - MockBufferBase(std::function below_low, std::function above_high); + MockBufferBase(std::function below_low, std::function above_high, + std::function above_overflow); MOCK_METHOD(Api::IoCallUint64Result, write, (Network::IoHandle & io_handle)); MOCK_METHOD(void, move, (Buffer::Instance & rhs)); @@ -57,12 +58,14 @@ template class MockBufferBase : public BaseClass { template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high); + std::function above_high, + std::function above_overflow); template <> MockBufferBase::MockBufferBase(); template <> MockBufferBase::MockBufferBase(std::function below_low, - std::function above_high); + std::function above_high, + std::function above_overflow); template <> MockBufferBase::MockBufferBase(); class MockBuffer : public MockBufferBase { @@ -78,8 +81,9 @@ class MockWatermarkBuffer : public MockBufferBase { public: using BaseClass = MockBufferBase; - MockWatermarkBuffer(std::function below_low, std::function above_high) - : BaseClass(below_low, above_high) { + MockWatermarkBuffer(std::function below_low, std::function above_high, + std::function above_overflow) + : BaseClass(below_low, above_high, above_overflow) { ON_CALL(*this, write(testing::_)) .WillByDefault(testing::Invoke(this, &MockWatermarkBuffer::trackWrites)); ON_CALL(*this, move(testing::_)) @@ -92,13 +96,14 @@ class MockBufferFactory : public Buffer::WatermarkFactory { MockBufferFactory(); ~MockBufferFactory() override; - Buffer::InstancePtr create(std::function below_low, - std::function above_high) override { - return Buffer::InstancePtr{create_(below_low, above_high)}; + Buffer::InstancePtr create(std::function below_low, std::function above_high, + std::function above_overflow) override { + return Buffer::InstancePtr{create_(below_low, above_high, above_overflow)}; } MOCK_METHOD(Buffer::Instance*, create_, - (std::function below_low, std::function above_high)); + (std::function below_low, std::function above_high, + std::function above_overflow)); }; MATCHER_P(BufferEqual, rhs, testing::PrintToString(*rhs)) { From bbcc8c6a6d6c11ea950d1496e0370f5cf7f354a8 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 29 May 2020 08:30:23 -0700 Subject: [PATCH 249/909] network: move remaining socket syscalls to socket (#11348) * network: move remaining socket syscalls to socket - add functions to set/get socket options directly on socket. - add function to update socket blocking flag - add SocketInterface function to retrieve hostname Signed-off-by: Florin Coras --- include/envoy/network/socket.h | 22 +++++ source/common/network/address_impl.cc | 1 - source/common/network/connection_impl.cc | 22 ++--- source/common/network/socket_impl.cc | 57 ++++++++++++- source/common/network/socket_impl.h | 10 ++- source/common/network/socket_option_impl.cc | 4 +- source/common/network/utility.cc | 23 ++--- source/common/network/utility.h | 6 +- .../listener/original_dst/original_dst.cc | 10 +-- .../listener/original_dst/original_dst.h | 2 +- test/common/network/address_impl_test.cc | 11 +-- .../network/socket_option_factory_test.cc | 24 +++--- .../common/network/socket_option_impl_test.cc | 12 +-- test/common/network/socket_option_test.h | 13 +-- test/common/network/udp_listener_impl_test.cc | 5 +- test/common/network/utility_test.cc | 5 +- .../upstream/cluster_manager_impl_test.cc | 85 ++++++++++--------- .../common/statsd/udp_statsd_test.cc | 7 +- test/mocks/network/mocks.h | 8 ++ test/server/filter_chain_benchmark_test.cc | 6 ++ .../listener_manager_impl_quic_only_test.cc | 9 +- test/server/listener_manager_impl_test.cc | 40 +++++---- test/server/listener_manager_impl_test.h | 16 ++-- test/test_common/network_utility.cc | 4 +- tools/spelling/spelling_dictionary.txt | 2 + 25 files changed, 233 insertions(+), 171 deletions(-) diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index 14aed46083d8..ac8152ecc86a 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -78,6 +78,11 @@ class Socket { */ virtual Address::SocketType socketType() const PURE; + /** + * @return the type (IP or pipe) of addresses used by the socket (subset of socket domain) + */ + virtual Address::Type addressType() const PURE; + /** * Close the underlying socket. */ @@ -113,6 +118,23 @@ class Socket { */ virtual Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr address) PURE; + /** + * Propagates option to underlying socket (@see man 2 setsockopt) + */ + virtual Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) PURE; + + /** + * Retrieves option from underlying socket (@see man 2 getsockopt) + */ + virtual Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) PURE; + + /** + * Toggle socket blocking state + */ + virtual Api::SysCallIntResult setBlockingForTest(bool blocking) PURE; + /** * Visitor class for setting socket options. */ diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index dbd7eee1e380..01ce620dbd4d 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -11,7 +11,6 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" -#include "common/network/io_socket_handle_impl.h" namespace Envoy { namespace Network { diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 140f786a1956..6d2c75662a32 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -10,7 +10,6 @@ #include "envoy/event/timer.h" #include "envoy/network/filter.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" @@ -242,23 +241,14 @@ void ConnectionImpl::noDelay(bool enable) { } // Don't set NODELAY for unix domain sockets - sockaddr_storage addr; - socklen_t len = sizeof(addr); - - auto os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(ioHandle().fd(), reinterpret_cast(&addr), &len); - - RELEASE_ASSERT(result.rc_ == 0, ""); - - if (addr.ss_family == AF_UNIX) { + if (socket_->addressType() == Address::Type::Pipe) { return; } // Set NODELAY int new_value = enable; - result = os_sys_calls.setsockopt(ioHandle().fd(), IPPROTO_TCP, TCP_NODELAY, &new_value, - sizeof(new_value)); + Api::SysCallIntResult result = + socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value)); #if defined(__APPLE__) if (SOCKET_FAILURE(result.rc_) && result.errno_ == EINVAL) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is @@ -604,7 +594,7 @@ ConnectionImpl::unixSocketPeerCredentials() const { #else struct ucred ucred; socklen_t ucred_size = sizeof(ucred); - int rc = getsockopt(ioHandle().fd(), SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size); + int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).rc_; if (rc == -1) { return absl::nullopt; } @@ -619,9 +609,7 @@ void ConnectionImpl::onWriteReady() { if (connecting_) { int error; socklen_t error_size = sizeof(error); - RELEASE_ASSERT(Api::OsSysCallsSingleton::get() - .getsockopt(ioHandle().fd(), SOL_SOCKET, SO_ERROR, &error, &error_size) - .rc_ == 0, + RELEASE_ASSERT(socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).rc_ == 0, ""); if (error == 0) { diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 64dc99594b8b..26beacf07375 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -136,10 +136,45 @@ Address::InstanceConstSharedPtr SocketInterface::peerAddressFromFd(os_fd_t fd) { SocketImpl::SocketImpl(Address::SocketType type, Address::Type addr_type, Address::IpVersion version) - : io_handle_(SocketInterface::socket(type, addr_type, version)) {} + : io_handle_(SocketInterface::socket(type, addr_type, version)), sock_type_(type), + addr_type_(addr_type) {} -SocketImpl::SocketImpl(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr) - : io_handle_(SocketInterface::socket(socket_type, addr)) {} +SocketImpl::SocketImpl(Address::SocketType sock_type, const Address::InstanceConstSharedPtr addr) + : io_handle_(SocketInterface::socket(sock_type, addr)), sock_type_(sock_type), + addr_type_(addr->type()) {} + +SocketImpl::SocketImpl(IoHandlePtr&& io_handle, + const Address::InstanceConstSharedPtr& local_address) + : io_handle_(std::move(io_handle)), local_address_(local_address) { + + // Should not happen but some tests inject -1 fds + if (SOCKET_INVALID(io_handle_->fd())) { + if (local_address != nullptr) { + addr_type_ = local_address->type(); + } else { + addr_type_ = Address::Type::Ip; + } + return; + } + + sockaddr_storage addr; + socklen_t len = sizeof(addr); + Api::SysCallIntResult result; + + result = Api::OsSysCallsSingleton::get().getsockname( + io_handle_->fd(), reinterpret_cast(&addr), &len); + + // This should never happen in practice but too many tests inject fake fds ... + if (result.rc_ < 0) { + return; + } + + if (addr.ss_family == AF_UNIX) { + addr_type_ = Address::Type::Pipe; + } else { + addr_type_ = Address::Type::Ip; + } +} Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { if (address->type() == Address::Type::Pipe) { @@ -179,5 +214,21 @@ Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstS address->sockAddrLen()); } +Api::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) { + return Api::OsSysCallsSingleton::get().setsockopt(io_handle_->fd(), level, optname, optval, + optlen); +} + +Api::SysCallIntResult SocketImpl::getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) { + return Api::OsSysCallsSingleton::get().getsockopt(io_handle_->fd(), level, optname, optval, + optlen); +} + +Api::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) { + return Api::OsSysCallsSingleton::get().setsocketblocking(io_handle_->fd(), blocking); +} + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index 99766d23d158..050455a487ec 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -86,18 +86,24 @@ class SocketImpl : public virtual Socket { Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; Api::SysCallIntResult listen(int backlog) override; Api::SysCallIntResult connect(const Address::InstanceConstSharedPtr addr) override; + Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval, + socklen_t optlen) override; + Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, + socklen_t* optlen) override; + Api::SysCallIntResult setBlockingForTest(bool blocking) override; const OptionsSharedPtr& options() const override { return options_; } Address::SocketType socketType() const override { return sock_type_; } + Address::Type addressType() const override { return addr_type_; } protected: - SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) - : io_handle_(std::move(io_handle)), local_address_(local_address) {} + SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address); const IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; OptionsSharedPtr options_; Address::SocketType sock_type_; + Address::Type addr_type_; }; } // namespace Network diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 2253fa8a19c9..b78cb0530649 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -53,9 +53,7 @@ Api::SysCallIntResult SocketOptionImpl::setSocketOption(Socket& socket, return {-1, ENOTSUP}; } - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - return os_syscalls.setsockopt(socket.ioHandle().fd(), optname.level(), optname.option(), value, - size); + return socket.setSocketOption(optname.level(), optname.option(), value, size); } } // namespace Network diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 2d138e1961be..5c334879bdb5 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -343,15 +343,15 @@ Address::InstanceConstSharedPtr Utility::getAddressWithPort(const Address::Insta NOT_REACHED_GCOVR_EXCL_LINE; } -Address::InstanceConstSharedPtr Utility::getOriginalDst(os_fd_t fd) { +Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { #ifdef SOL_IP sockaddr_storage orig_addr; socklen_t addr_len = sizeof(sockaddr_storage); int socket_domain; socklen_t domain_len = sizeof(socket_domain); - auto& os_syscalls = Api::OsSysCallsSingleton::get(); + // TODO(fcoras): improve once we store ip version in socket const Api::SysCallIntResult result = - os_syscalls.getsockopt(fd, SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); + sock.getSocketOption(SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); int status = result.rc_; if (status != 0) { @@ -359,9 +359,9 @@ Address::InstanceConstSharedPtr Utility::getOriginalDst(os_fd_t fd) { } if (socket_domain == AF_INET) { - status = os_syscalls.getsockopt(fd, SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; + status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; } else if (socket_domain == AF_INET6) { - status = os_syscalls.getsockopt(fd, SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; + status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; } else { return nullptr; } @@ -370,20 +370,11 @@ Address::InstanceConstSharedPtr Utility::getOriginalDst(os_fd_t fd) { return nullptr; } - switch (orig_addr.ss_family) { - case AF_INET: - return Address::InstanceConstSharedPtr{ - new Address::Ipv4Instance(reinterpret_cast(&orig_addr))}; - case AF_INET6: - return Address::InstanceConstSharedPtr{ - new Address::Ipv6Instance(reinterpret_cast(orig_addr))}; - default: - return nullptr; - } + return Address::addressFromSockAddr(orig_addr, 0, true /* default for v6 constructor */); #else // TODO(zuercher): determine if connection redirection is possible under macOS (c.f. pfctl and // divert), and whether it's possible to find the learn destination address. - UNREFERENCED_PARAMETER(fd); + UNREFERENCED_PARAMETER(sock); return nullptr; #endif } diff --git a/source/common/network/utility.h b/source/common/network/utility.h index c0d3501d7854..2f7145ed928e 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -231,13 +231,13 @@ class Utility { uint32_t port); /** - * Retrieve the original destination address from an accepted fd. + * Retrieve the original destination address from an accepted socket. * The address (IP and port) may be not local and the port may differ from * the listener port if the packets were redirected using iptables - * @param fd is the descriptor returned by accept() + * @param sock is accepted socket * @return the original destination or nullptr if not available. */ - static Address::InstanceConstSharedPtr getOriginalDst(os_fd_t fd); + static Address::InstanceConstSharedPtr getOriginalDst(Socket& sock); /** * Parses a string containing a comma-separated list of port numbers and/or diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index d6e49bc3b7ba..cea15d4664bb 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -10,18 +10,16 @@ namespace Extensions { namespace ListenerFilters { namespace OriginalDst { -Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(os_fd_t fd) { - return Network::Utility::getOriginalDst(fd); +Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(Network::Socket& sock) { + return Network::Utility::getOriginalDst(sock); } Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "original_dst: New connection accepted"); Network::ConnectionSocket& socket = cb.socket(); - const Network::Address::Instance& local_address = *socket.localAddress(); - if (local_address.type() == Network::Address::Type::Ip) { - Network::Address::InstanceConstSharedPtr original_local_address = - getOriginalDst(socket.ioHandle().fd()); + if (socket.addressType() == Network::Address::Type::Ip) { + Network::Address::InstanceConstSharedPtr original_local_address = getOriginalDst(socket); // A listener that has the use_original_dst flag set to true can still receive // connections that are NOT redirected using iptables. If a connection was not redirected, diff --git a/source/extensions/filters/listener/original_dst/original_dst.h b/source/extensions/filters/listener/original_dst/original_dst.h index 59c5cc0ee886..836834a4d658 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.h +++ b/source/extensions/filters/listener/original_dst/original_dst.h @@ -14,7 +14,7 @@ namespace OriginalDst { */ class OriginalDstFilter : public Network::ListenerFilter, Logger::Loggable { public: - virtual Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t fd); + virtual Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket& sock); // Network::ListenerFilter Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override; diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 3b1dd622ff35..9142756dcc37 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -51,17 +51,12 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // Create a socket on which we'll listen for connections from clients. SocketImpl sock(SocketType::Stream, addr_port); ASSERT_GE(sock.ioHandle().fd(), 0) << addr_port->asString(); - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); // Check that IPv6 sockets accept IPv6 connections only. if (addr_port->ip()->version() == IpVersion::v6) { int socket_v6only = 0; socklen_t size_int = sizeof(socket_v6only); - ASSERT_GE( - os_sys_calls - .getsockopt(sock.ioHandle().fd(), IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int) - .rc_, - 0); + ASSERT_GE(sock.getSocketOption(IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int).rc_, 0); EXPECT_EQ(v6only, socket_v6only != 0); } @@ -74,7 +69,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // require another thread. ASSERT_EQ(sock.listen(128).rc_, 0); - auto client_connect = [&os_sys_calls](Address::InstanceConstSharedPtr addr_port) { + auto client_connect = [](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. SocketImpl client_sock(SocketType::Stream, addr_port); @@ -84,7 +79,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // operation of ::connect(), so connect returns with errno==EWOULDBLOCK before the tcp // handshake can complete. For testing convenience, re-enable blocking on the socket // so that connect will wait for the handshake to complete. - ASSERT_EQ(os_sys_calls.setsocketblocking(client_sock.ioHandle().fd(), true).rc_, 0); + ASSERT_EQ(client_sock.setBlockingForTest(true).rc_, 0); // Connect to the server. const Api::SysCallIntResult result = client_sock.connect(addr_port); diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index 3ff0a214021a..280e522b667f 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -59,13 +59,13 @@ TEST_F(SocketOptionFactoryTest, TestBuildSocketMarkOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) - .WillOnce(Invoke([type, option](os_fd_t, int input_type, int input_option, const void* optval, - socklen_t) -> int { + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) + .WillOnce(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(100, *static_cast(optval)); EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); - return 0; + return {0, 0}; })); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -83,14 +83,14 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv4TransparentOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) .Times(2) - .WillRepeatedly(Invoke([type, option](os_fd_t, int input_type, int input_option, - const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -110,14 +110,14 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv6TransparentOptions) { const int type = expected_option.level(); const int option = expected_option.option(); - EXPECT_CALL(os_sys_calls_mock_, setsockopt_(_, _, _, _, sizeof(int))) + EXPECT_CALL(socket_mock_, setSocketOption(_, _, _, sizeof(int))) .Times(2) - .WillRepeatedly(Invoke([type, option](os_fd_t, int input_type, int input_option, - const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke([type, option](int input_type, int input_option, const void* optval, + socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(type, input_type); EXPECT_EQ(option, input_option); EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index d6e0e3932e1b..6ecf656244dd 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -32,10 +32,10 @@ TEST_F(SocketOptionImplTest, HasName) { // If we fail to set an option, verify that the log message // contains the option name so the operator can debug. SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND, optname, 1}; - EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket_, setSocketOption(_, _, _, _)) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return -1; + return {-1, 0}; })); EXPECT_LOG_CONTAINS( @@ -46,10 +46,10 @@ TEST_F(SocketOptionImplTest, HasName) { TEST_F(SocketOptionImplTest, SetOptionSuccessTrue) { SocketOptionImpl socket_option{envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), 1}; - EXPECT_CALL(os_sys_calls_, setsockopt_(_, 5, 10, _, sizeof(int))) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket_, setSocketOption(5, 10, _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); EXPECT_TRUE( socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); diff --git a/test/common/network/socket_option_test.h b/test/common/network/socket_option_test.h index edc9a4e6a49a..af50690af6cb 100644 --- a/test/common/network/socket_option_test.h +++ b/test/common/network/socket_option_test.h @@ -75,12 +75,13 @@ class SocketOptionTest : public testing::Test { const std::set& when) { for (auto state : when) { if (option_name.hasValue()) { - EXPECT_CALL(os_sys_calls_, - setsockopt_(_, option_name.level(), option_name.option(), _, sizeof(int))) - .WillOnce(Invoke([option_val](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(option_val, *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket_, + setSocketOption(option_name.level(), option_name.option(), _, sizeof(int))) + .WillOnce(Invoke( + [option_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(option_val, *static_cast(optval)); + return {0, 0}; + })); EXPECT_TRUE(socket_option.setOption(socket_, state)); } else { EXPECT_FALSE(socket_option.setOption(socket_, state)); diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index c42bbafa30fa..7ba8311f0a67 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -123,10 +123,9 @@ TEST_P(UdpListenerImplTest, UdpSetListeningSocketOptionsSuccess) { #ifdef SO_RXQ_OVFL // Verify that overflow detection is enabled. int get_overflow = 0; - auto& os_syscalls = Api::OsSysCallsSingleton::get(); socklen_t int_size = static_cast(sizeof(get_overflow)); - const Api::SysCallIntResult result = os_syscalls.getsockopt( - server_socket_->ioHandle().fd(), SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size); + const Api::SysCallIntResult result = + server_socket_->getSocketOption(SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size); EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, get_overflow); #endif diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 120f13615c82..49e99f755fa2 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -169,7 +169,10 @@ TEST_P(NetworkUtilityGetLocalAddress, GetLocalAddress) { EXPECT_NE(nullptr, Utility::getLocalAddress(GetParam())); } -TEST(NetworkUtility, GetOriginalDst) { EXPECT_EQ(nullptr, Utility::getOriginalDst(-1)); } +TEST(NetworkUtility, GetOriginalDst) { + testing::NiceMock socket; + EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); +} TEST(NetworkUtility, LocalConnection) { Network::Address::InstanceConstSharedPtr local_addr; diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 7e1255aef2a6..9a45af5f71c0 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -3078,21 +3078,23 @@ class SockoptsTest : public ClusterManagerImplTest { NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + NiceMock socket; bool expect_success = true; for (const auto& name_val : names_vals) { if (!name_val.first.hasValue()) { expect_success = false; continue; } - EXPECT_CALL(os_sys_calls, - setsockopt_(_, name_val.first.level(), name_val.first.option(), _, sizeof(int))) - .WillOnce(Invoke([&name_val](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(name_val.second, *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, + setSocketOption(name_val.first.level(), name_val.first.option(), _, sizeof(int))) + .WillOnce( + Invoke([&name_val](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(name_val.second, *static_cast(optval)); + return {0, 0}; + })); } EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Invoke([this, &names_vals, expect_success]( + .WillOnce(Invoke([this, &names_vals, expect_success, &socket]( Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr& options) @@ -3101,7 +3103,6 @@ class SockoptsTest : public ClusterManagerImplTest { if (options.get() != nullptr) { // Don't crash the entire test. EXPECT_EQ(names_vals.size(), options->size()); } - NiceMock socket; if (expect_success) { EXPECT_TRUE((Network::Socket::applyOptions( options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -3350,50 +3351,50 @@ class TcpKeepaliveTest : public ClusterManagerImplTest { } NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + NiceMock socket; EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce( - Invoke([this](Network::Address::InstanceConstSharedPtr, - Network::Address::InstanceConstSharedPtr, Network::TransportSocketPtr&, - const Network::ConnectionSocket::OptionsSharedPtr& options) - -> Network::ClientConnection* { - EXPECT_NE(nullptr, options.get()); - NiceMock socket; - EXPECT_TRUE((Network::Socket::applyOptions( - options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - return connection_; - })); - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_SO_KEEPALIVE.level(), - ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int))) - .WillOnce(Invoke([](os_fd_t, int, int, const void* optval, socklen_t) -> int { + .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr, + Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, + const Network::ConnectionSocket::OptionsSharedPtr& options) + -> Network::ClientConnection* { + EXPECT_NE(nullptr, options.get()); + EXPECT_TRUE((Network::Socket::applyOptions( + options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); + return connection_; + })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_KEEPALIVE.level(), + ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(1, *static_cast(optval)); - return 0; + return {0, 0}; })); if (keepalive_probes.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPCNT.level(), - ENVOY_SOCKET_TCP_KEEPCNT.option(), _, sizeof(int))) - .WillOnce( - Invoke([&keepalive_probes](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(keepalive_probes.value(), *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPCNT.level(), + ENVOY_SOCKET_TCP_KEEPCNT.option(), _, sizeof(int))) + .WillOnce(Invoke([&keepalive_probes](int, int, const void* optval, + socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(keepalive_probes.value(), *static_cast(optval)); + return {0, 0}; + })); } if (keepalive_time.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPIDLE.level(), - ENVOY_SOCKET_TCP_KEEPIDLE.option(), _, sizeof(int))) - .WillOnce( - Invoke([&keepalive_time](os_fd_t, int, int, const void* optval, socklen_t) -> int { + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPIDLE.level(), + ENVOY_SOCKET_TCP_KEEPIDLE.option(), _, sizeof(int))) + .WillOnce(Invoke( + [&keepalive_time](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(keepalive_time.value(), *static_cast(optval)); - return 0; + return {0, 0}; })); } if (keepalive_interval.has_value()) { - EXPECT_CALL(os_sys_calls, setsockopt_(_, ENVOY_SOCKET_TCP_KEEPINTVL.level(), - ENVOY_SOCKET_TCP_KEEPINTVL.option(), _, sizeof(int))) - .WillOnce(Invoke( - [&keepalive_interval](os_fd_t, int, int, const void* optval, socklen_t) -> int { - EXPECT_EQ(keepalive_interval.value(), *static_cast(optval)); - return 0; - })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_TCP_KEEPINTVL.level(), + ENVOY_SOCKET_TCP_KEEPINTVL.option(), _, sizeof(int))) + .WillOnce(Invoke([&keepalive_interval](int, int, const void* optval, + socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(keepalive_interval.value(), *static_cast(optval)); + return {0, 0}; + })); } auto conn_data = cluster_manager_->tcpConnForCluster("TcpKeepaliveCluster", nullptr); EXPECT_EQ(connection_, conn_data.connection_.get()); diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index 7ca2fad17b68..aa9e6f132808 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -51,13 +51,8 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { sink.flush(snapshot); // Start the server. - // TODO(mattklein123): Right now all sockets are non-blocking. Move this non-blocking - // modification back to the abstraction layer so it will work for multiple platforms. Additionally - // this uses low level networking calls because our abstractions in this area only work for IP - // sockets. Revisit this also. Network::SocketImpl sock(Network::Address::SocketType::Datagram, uds_address); - RELEASE_ASSERT( - Api::OsSysCallsSingleton::get().setsocketblocking(sock.ioHandle().fd(), false).rc_ != -1, ""); + RELEASE_ASSERT(sock.setBlockingForTest(false).rc_ != -1, ""); sock.bind(uds_address); // Do the flush which should have somewhere to write now. diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 73384f8850b7..c967b2b18b5b 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -220,6 +220,7 @@ class MockListenSocket : public Socket { MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Address::Type, addressType, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option)); @@ -232,6 +233,9 @@ class MockListenSocket : public Socket { MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); + MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*)); + MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; @@ -278,6 +282,7 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Address::Type, addressType, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType, Address::Type, Address::IpVersion), @@ -287,6 +292,9 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); + MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*)); + MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 54935927a5a3..600855709f2d 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -92,6 +92,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { Network::Address::SocketType socketType() const override { return Network::Address::SocketType::Stream; } + Network::Address::Type addressType() const override { return local_address_->type(); } void setLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void restoreLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void setRemoteAddress(const Network::Address::InstanceConstSharedPtr&) override {} @@ -107,6 +108,11 @@ class MockConnectionSocket : public Network::ConnectionSocket { Api::SysCallIntResult connect(const Network::Address::InstanceConstSharedPtr) override { return {0, 0}; } + Api::SysCallIntResult setSocketOption(int, int, const void*, socklen_t) override { + return {0, 0}; + } + Api::SysCallIntResult getSocketOption(int, int, void*, socklen_t*) override { return {0, 0}; } + Api::SysCallIntResult setBlockingForTest(bool) override { return {0, 0}; } private: Network::IoHandlePtr io_handle_; diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 8106aac6adfc..296ba96b4107 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -55,21 +55,18 @@ reuse_port: true #endif /* expected_creation_params */ {true, false}); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ IPPROTO_IP, + expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP, /* expected_sockopt_name */ ENVOY_IP_PKTINFO, /* expected_value */ 1, /* expected_num_calls */ 1); #ifdef SO_RXQ_OVFL - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, + expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_RXQ_OVFL, /* expected_value */ 1, /* expected_num_calls */ 1); #endif - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, + expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_REUSEPORT, /* expected_value */ 1, /* expected_num_calls */ 1); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 3325a221684e..6d3f7d887ac3 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -77,8 +77,8 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { ListenSocketCreationParams expected_creation_params = {true, true}) { if (expected_option.hasValue()) { expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params); - expectSetsockopt(os_sys_calls_, expected_option.level(), expected_option.option(), - expected_value, expected_num_options); + expectSetsockopt(expected_option.level(), expected_option.option(), expected_value, + expected_num_options); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); } else { @@ -336,8 +336,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(listener_factory_, - createListenSocket(_, Network::Address::SocketType::Datagram, _, {{true, false}})); - EXPECT_CALL(os_sys_calls_, setsockopt_(_, _, _, _, _)).Times(testing::AtLeast(1)); + createListenSocket(_, Network::Address::SocketType::Datagram, _, {{true, false}})) + .WillOnce( + Invoke([this](const Network::Address::InstanceConstSharedPtr&, + Network::Address::SocketType, const Network::Socket::OptionsSharedPtr&, + const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + return listener_factory_.socket_; + })); + EXPECT_CALL(*listener_factory_.socket_, setSocketOption(_, _, _, _)).Times(testing::AtLeast(1)); EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); @@ -3481,7 +3487,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { } class OriginalDstTestFilter : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter { - Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t) override { + Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override { return Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance("127.0.0.2", 2345)}; } @@ -3555,7 +3561,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { class OriginalDstTestFilterIPv6 : public Extensions::ListenerFilters::OriginalDst::OriginalDstFilter { - Network::Address::InstanceConstSharedPtr getOriginalDst(os_fd_t) override { + Network::Address::InstanceConstSharedPtr getOriginalDst(Network::Socket&) override { return Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv6Instance("1::2", 2345)}; } @@ -3728,14 +3734,12 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) { #endif /* expected_creation_params */ {true, false}); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ IPPROTO_IP, + expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP, /* expected_sockopt_name */ ENVOY_IP_PKTINFO, /* expected_value */ 1, /* expected_num_calls */ 1); #ifdef SO_RXQ_OVFL - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ SOL_SOCKET, + expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_RXQ_OVFL, /* expected_value */ 1, /* expected_num_calls */ 1); @@ -3764,14 +3768,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, /* expected_num_options */ 3); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ 1, - /* expected_sockopt_name */ 2, - /* expected_value */ 3); - expectSetsockopt(os_sys_calls_, - /* expected_sockopt_level */ 4, - /* expected_sockopt_name */ 5, - /* expected_value */ 6); + expectSetsockopt( + /* expected_sockopt_level */ 1, + /* expected_sockopt_name */ 2, + /* expected_value */ 3); + expectSetsockopt( + /* expected_sockopt_level */ 4, + /* expected_sockopt_name */ 5, + /* expected_value */ 6); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); } diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 5bf7491a4b9d..0790b8faa4f7 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -212,18 +212,18 @@ class ListenerManagerImplTest : public testing::Test { } /** - * Validate that setsockopt() is called the expected number of times with the expected options. + * Validate that setSocketOption() is called the expected number of times with the expected + * options. */ - void expectSetsockopt(NiceMock& os_sys_calls, int expected_sockopt_level, - int expected_sockopt_name, int expected_value, + void expectSetsockopt(int expected_sockopt_level, int expected_sockopt_name, int expected_value, uint32_t expected_num_calls = 1) { - EXPECT_CALL(os_sys_calls, - setsockopt_(_, expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) + EXPECT_CALL(*listener_factory_.socket_, + setSocketOption(expected_sockopt_level, expected_sockopt_name, _, sizeof(int))) .Times(expected_num_calls) - .WillRepeatedly( - Invoke([expected_value](os_fd_t, int, int, const void* optval, socklen_t) -> int { + .WillRepeatedly(Invoke( + [expected_value](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { EXPECT_EQ(expected_value, *static_cast(optval)); - return 0; + return {0, 0}; })); } diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 3a03ecb4c947..ec6f3772491b 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -230,9 +230,7 @@ Api::IoCallUint64Result readFromSocket(IoHandle& handle, const Address::Instance UdpSyncPeer::UdpSyncPeer(Network::Address::IpVersion version) : socket_( std::make_unique(getCanonicalLoopbackAddress(version), nullptr, true)) { - RELEASE_ASSERT( - Api::OsSysCallsSingleton::get().setsocketblocking(socket_->ioHandle().fd(), true).rc_ != -1, - ""); + RELEASE_ASSERT(socket_->setBlockingForTest(true).rc_ != -1, ""); } void UdpSyncPeer::write(const std::string& buffer, const Network::Address::Instance& peer) { diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index c59db7754d40..69069ac79220 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -801,6 +801,7 @@ openssl opentracing optimizations optname +optval ostream outlier outliers @@ -987,6 +988,7 @@ snapshotted sockaddr socketpair sockfd +socklen sockopt sockopts somestring From d9358de1e621f84052c6c5a61c8ecd70e449c008 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 29 May 2020 08:41:03 -0700 Subject: [PATCH 250/909] tech-debt: delete unused files (#11361) Signed-off-by: Lizan Zhou --- .azure-pipelines/linux.yml | 1 - bazel/README.md | 2 +- clang-tidy-fixes.yaml | 0 tools/bazel.rc | 4 ---- 4 files changed, 1 insertion(+), 6 deletions(-) delete mode 120000 .azure-pipelines/linux.yml delete mode 100644 clang-tidy-fixes.yaml delete mode 100644 tools/bazel.rc diff --git a/.azure-pipelines/linux.yml b/.azure-pipelines/linux.yml deleted file mode 120000 index ea3cc67f3da8..000000000000 --- a/.azure-pipelines/linux.yml +++ /dev/null @@ -1 +0,0 @@ -pipelines.yml \ No newline at end of file diff --git a/bazel/README.md b/bazel/README.md index 8188228d634e..f2a8683dc0df 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -577,7 +577,7 @@ have seen some issues with seeing the artifacts tab. If you can't see it, log ou then log back in and it should start working. The latest coverage report for master is available -[here](https://storage.googleapis.com/envoy-coverage/report-master/index.html). +[here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). It's also possible to specialize the coverage build to a specified test or test dir. This is useful when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by diff --git a/clang-tidy-fixes.yaml b/clang-tidy-fixes.yaml deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tools/bazel.rc b/tools/bazel.rc deleted file mode 100644 index 77a70a875a02..000000000000 --- a/tools/bazel.rc +++ /dev/null @@ -1,4 +0,0 @@ -# This is intended to fail build process when this tools/bazel.rc is processed. -# Bazel will print what is processed and raise an error since --dummy_unknown_option is not recognized. - -common --dummy_unknown_option="ERROR: tools/bazel.rc is being processed, either due to old version of bazel or wrongly symlinked from .bazelrc. Update your bazel and symlink to top level .bazelrc instead." From 02721026be6fdaba948a03f3250eeb37e269e075 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 29 May 2020 13:02:24 -0400 Subject: [PATCH 251/909] ci: fix regression in docker push. (#11356) I think this snafu came from moving to an array in #11005. Signed-off-by: Harvey Tuch --- ci/docker_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 53fd19464738..5a497631586c 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -41,7 +41,7 @@ fi docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" -for BUILD_TYPE in ${BUILD_TYPES}; do +for BUILD_TYPE in "${BUILD_TYPES[@]}"; do docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" From 2e5a36522f48c7f3ffe2b5b3f163a89194839e63 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 29 May 2020 17:24:55 -0400 Subject: [PATCH 252/909] grpc: add support for custom channel args. (#11277) This is useful to allow plumbing of args such as grpc.keepalive_time_ms, e.g. as a fix for #5173. Risk level: Low Testing: Unit and integration tests added. Signed-off-by: Harvey Tuch --- .clang-tidy | 3 +- api/envoy/config/core/v3/grpc_service.proto | 21 +++++++++++- .../config/core/v4alpha/grpc_service.proto | 27 ++++++++++++++- docs/root/version_history/current.rst | 1 + .../envoy/config/core/v3/grpc_service.proto | 21 +++++++++++- .../config/core/v4alpha/grpc_service.proto | 27 ++++++++++++++- source/common/grpc/google_grpc_utils.cc | 23 ++++++++++++- source/common/grpc/google_grpc_utils.h | 10 +++++- test/common/grpc/google_grpc_utils_test.cc | 34 +++++++++++++++++++ .../grpc/grpc_client_integration_test.cc | 12 +++++++ .../grpc_client_integration_test_harness.h | 20 +++++++---- 11 files changed, 185 insertions(+), 14 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 93d48258a9ae..693858657d47 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,5 @@ -Checks: '-clang-analyzer-optin.cplusplus.UninitializedObject, +Checks: '-clang-analyzer-core.NonNullParamChecker, + -clang-analyzer-optin.cplusplus.UninitializedObject, abseil-duration-*, abseil-faster-strsplit-delimiter, abseil-no-namespace, diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index 3acd3c1c9b9c..cf7663b3487f 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -38,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 8] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -203,6 +203,22 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + message Value { + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -237,6 +253,9 @@ message GrpcService { // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index b547cfb7deec..3abff88ea4fc 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -38,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 8] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -203,6 +203,28 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; + + message Value { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -237,6 +259,9 @@ message GrpcService { // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 13b7dae1217d..136472d24fb3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -55,6 +55,7 @@ New Features :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. * filter: add `upstram_rq_time` stats to the GPRC stats filter. Disabled by default and can be enabled via :ref:`enable_upstream_stats `. +* grpc: added support for Google gRPC :ref:`custom channel arguments `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. * gzip filter: added option to set zlib's next output buffer size. diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 89ce3132ef05..04d14566934e 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -38,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 8] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; @@ -201,6 +201,22 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + message Value { + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -235,6 +251,9 @@ message GrpcService { // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index b547cfb7deec..3abff88ea4fc 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -38,7 +38,7 @@ message GrpcService { string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } - // [#next-free-field: 8] + // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; @@ -203,6 +203,28 @@ message GrpcService { } } + // Channel arguments. + message ChannelArgs { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; + + message Value { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. @@ -237,6 +259,9 @@ message GrpcService { // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; } reserved 4; diff --git a/source/common/grpc/google_grpc_utils.cc b/source/common/grpc/google_grpc_utils.cc index 395ad33151f2..b3fe1e20a320 100644 --- a/source/common/grpc/google_grpc_utils.cc +++ b/source/common/grpc/google_grpc_utils.cc @@ -113,10 +113,31 @@ Buffer::InstancePtr GoogleGrpcUtils::makeBufferInstance(const grpc::ByteBuffer& return buffer; } +grpc::ChannelArguments +GoogleGrpcUtils::channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config) { + grpc::ChannelArguments args; + for (const auto& channel_arg : config.google_grpc().channel_args().args()) { + switch (channel_arg.second.value_specifier_case()) { + case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kStringValue: { + args.SetString(channel_arg.first, channel_arg.second.string_value()); + break; + } + case envoy::config::core::v3::GrpcService::GoogleGrpc::ChannelArgs::Value::kIntValue: { + args.SetInt(channel_arg.first, channel_arg.second.int_value()); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + return args; +} + std::shared_ptr GoogleGrpcUtils::createChannel(const envoy::config::core::v3::GrpcService& config, Api::Api& api) { std::shared_ptr creds = getGoogleGrpcChannelCredentials(config, api); - return CreateChannel(config.google_grpc().target_uri(), creds); + const grpc::ChannelArguments args = channelArgsFromConfig(config); + return CreateCustomChannel(config.google_grpc().target_uri(), creds, args); } } // namespace Grpc diff --git a/source/common/grpc/google_grpc_utils.h b/source/common/grpc/google_grpc_utils.h index 03e7c6f618cb..859a61ccfff9 100644 --- a/source/common/grpc/google_grpc_utils.h +++ b/source/common/grpc/google_grpc_utils.h @@ -31,9 +31,17 @@ class GoogleGrpcUtils { */ static Buffer::InstancePtr makeBufferInstance(const grpc::ByteBuffer& buffer); + /** + * Build grpc::ChannelArguments from gRPC service config. + * @param config Google gRPC config. + * @return grpc::ChannelArguments corresponding to config. + */ + static grpc::ChannelArguments + channelArgsFromConfig(const envoy::config::core::v3::GrpcService& config); + /** * Build gRPC channel based on the given GrpcService configuration. - * @param config Google gRPC config. + * @param config Google gRPC config. * @param api reference to the Api object * @return static std::shared_ptr a gRPC channel. */ diff --git a/test/common/grpc/google_grpc_utils_test.cc b/test/common/grpc/google_grpc_utils_test.cc index f115d1ab3015..2b422af3f4eb 100644 --- a/test/common/grpc/google_grpc_utils_test.cc +++ b/test/common/grpc/google_grpc_utils_test.cc @@ -8,6 +8,9 @@ #include "gtest/gtest.h" +using testing::Pair; +using testing::UnorderedElementsAre; + namespace Envoy { namespace Grpc { namespace { @@ -83,6 +86,37 @@ TEST(GoogleGrpcUtilsTest, ByteBufferInstanceRoundTrip) { EXPECT_EQ(buffer_instance2->toString(), "test this"); } +// Validate that we build the grpc::ChannelArguments as expected. +TEST(GoogleGrpcUtilsTest, ChannelArgsFromConfig) { + const auto config = TestUtility::parseYaml(R"EOF( + google_grpc: + channel_args: + args: + grpc.http2.max_pings_without_data: { int_value: 3 } + grpc.default_authority: { string_value: foo } + grpc.http2.max_ping_strikes: { int_value: 5 } + grpc.ssl_target_name_override: { string_value: bar } + )EOF"); + const grpc::ChannelArguments channel_args = GoogleGrpcUtils::channelArgsFromConfig(config); + grpc_channel_args effective_args = channel_args.c_channel_args(); + std::unordered_map string_args; + std::unordered_map int_args; + for (uint32_t n = 0; n < effective_args.num_args; ++n) { + const grpc_arg arg = effective_args.args[n]; + ASSERT_TRUE(arg.type == GRPC_ARG_STRING || arg.type == GRPC_ARG_INTEGER); + if (arg.type == GRPC_ARG_STRING) { + string_args[arg.key] = arg.value.string; + } else if (arg.type == GRPC_ARG_INTEGER) { + int_args[arg.key] = arg.value.integer; + } + } + EXPECT_THAT(string_args, UnorderedElementsAre(Pair("grpc.ssl_target_name_override", "bar"), + Pair("grpc.primary_user_agent", "grpc-c++/1.25.0"), + Pair("grpc.default_authority", "foo"))); + EXPECT_THAT(int_args, UnorderedElementsAre(Pair("grpc.http2.max_ping_strikes", 5), + Pair("grpc.http2.max_pings_without_data", 3))); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index 15d8567a77a3..51bcf4ad319d 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -133,6 +133,18 @@ TEST_P(GrpcClientIntegrationTest, BadReplyGrpcFraming) { dispatcher_helper_.runDispatcher(); } +// Validate that custom channel args can be set on the Google gRPC client. +// +TEST_P(GrpcClientIntegrationTest, CustomChannelArgs) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + channel_args_.emplace_back("grpc.primary_user_agent", "test_agent"); + initialize(); + auto request = createRequest(empty_metadata_); + request->sendReply(); + dispatcher_helper_.runDispatcher(); + EXPECT_THAT(stream_headers_->get_("user-agent"), testing::HasSubstr("test_agent")); +} + // Validate that a reply with bad protobuf is handled as an INTERNAL gRPC error. TEST_P(GrpcClientIntegrationTest, BadReplyProtobuf) { initialize(); diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 903deb0080e0..d466b5c886f8 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -307,6 +307,10 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { auto* google_grpc = config.mutable_google_grpc(); google_grpc->set_target_uri(fake_upstream_->localAddress()->asString()); google_grpc->set_stat_prefix("fake_cluster"); + for (const auto& config_arg : channel_args_) { + (*google_grpc->mutable_channel_args()->mutable_args())[config_arg.first].set_string_value( + config_arg.second); + } fillServiceWideInitialMetadata(config); return config; } @@ -326,16 +330,16 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { void expectInitialHeaders(FakeStream& fake_stream, const TestMetadata& initial_metadata) { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); - EXPECT_EQ("POST", stream_headers.get_(":method")); - EXPECT_EQ("/helloworld.Greeter/SayHello", stream_headers.get_(":path")); - EXPECT_EQ("application/grpc", stream_headers.get_("content-type")); - EXPECT_EQ("trailers", stream_headers.get_("te")); + stream_headers_ = std::make_unique(fake_stream.headers()); + EXPECT_EQ("POST", stream_headers_->get_(":method")); + EXPECT_EQ("/helloworld.Greeter/SayHello", stream_headers_->get_(":path")); + EXPECT_EQ("application/grpc", stream_headers_->get_("content-type")); + EXPECT_EQ("trailers", stream_headers_->get_("te")); for (const auto& value : initial_metadata) { - EXPECT_EQ(value.second, stream_headers.get_(value.first)); + EXPECT_EQ(value.second, stream_headers_->get_(value.first)); } for (const auto& value : service_wide_initial_metadata_) { - EXPECT_EQ(value.second, stream_headers.get_(value.first)); + EXPECT_EQ(value.second, stream_headers_->get_(value.first)); } } @@ -430,6 +434,8 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { Stats::ScopeSharedPtr stats_scope_{stats_store_}; Grpc::StatNames google_grpc_stat_names_{stats_store_->symbolTable()}; TestMetadata service_wide_initial_metadata_; + std::unique_ptr stream_headers_; + std::vector> channel_args_; #ifdef ENVOY_GOOGLE_GRPC std::unique_ptr google_tls_; #endif From 582eb0ed1df4681063d551cb8966990599a2a5ba Mon Sep 17 00:00:00 2001 From: nigriMSFT Date: Fri, 29 May 2020 14:29:30 -0700 Subject: [PATCH 253/909] docs: improve Windows dev setup documentation (#11359) Commit Message: docs: improve Windows dev setup documentation Additional Description: - Add H3 for each platform in step 1 to improve readability - Improved Windows dev env in step 1 - Formatting of install dependencies - Added cmd line examples for the additional config steps - Shuffled around install dependencies to decrease chances of PATH ordering issues (VSBT before MSYS2) Risk Level: Low Testing: N/A Docs Changes: See description Release Notes: N/A Signed-off-by: Nick Grifka --- bazel/README.md | 82 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 18 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index f2a8683dc0df..237fb7bfc3eb 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -3,6 +3,7 @@ ## Installing Bazelisk as Bazel It is recommended to use [Bazelisk](https://github.com/bazelbuild/bazelisk) installed as `bazel`, to avoid Bazel compatibility issues. + On Linux, run the following commands: ``` @@ -15,8 +16,12 @@ On macOS, run the following command: brew install bazelbuild/tap/bazelisk ``` -On Windows, download [bazelisk-windows-amd64.exe](https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe) -and save this binary in a directory on the PATH as `bazel.exe`. +On Windows, run the following commands: +``` +mkdir %USERPROFILE%\bazel +powershell Invoke-WebRequest https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe -OutFile %USERPROFILE%\bazel\bazel.exe +set PATH=%PATH%;%USERPROFILE%\bazel +``` If you're building from an revision of Envoy prior to August 2019, which doesn't contains a `.bazelversion` file, run `ci/run_envoy_docker.sh "bazel version"` to find the right version of Bazel and set the version to `USE_BAZEL_VERSION` environment variable to build. @@ -41,8 +46,9 @@ up-to-date with the latest security patches. See [this doc](https://github.com/envoyproxy/envoy/blob/master/bazel/EXTERNAL_DEPS.md#updating-an-external-dependency-version) for how to update or override dependencies. -1. Install external dependencies libtool, cmake, ninja, realpath and curl libraries separately. - On Ubuntu, run the following command: +1. Install external dependencies. + ### Ubuntu + On Ubuntu, run the following: ``` sudo apt-get install \ libtool \ @@ -56,11 +62,13 @@ for how to update or override dependencies. virtualenv ``` + ### Fedora On Fedora (maybe also other red hat distros), run the following: ``` dnf install cmake libtool libstdc++ libstdc++-static libatomic ninja-build lld patch aspell-en ``` + ### Linux On Linux, we recommend using the prebuilt Clang+LLVM package from [LLVM official site](http://releases.llvm.org/download.html). Extract the tar.xz and run the following: ``` @@ -72,6 +80,7 @@ for how to update or override dependencies. echo "build --config=clang" >> user.bazelrc ``` + ### macOS On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell @@ -97,34 +106,49 @@ for how to update or override dependencies. version of `ar` on the PATH, so if you run into issues building third party code like luajit consider uninstalling binutils. - On Windows, additional dependencies are required: - - Install the [MSYS2 shell](https://msys2.github.io/) and install the `diffutils`, `patch`, - `unzip`, and `zip` packages using `pacman`. Set the `BAZEL_SH` environment variable to the path - of the installed MSYS2 `bash.exe` executable. Setting the `MSYS2_ARG_CONV_EXCL` environment - variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell - behaves as expected. - - `Git` is required. The version installable via MSYS2 is sufficient. + ### Windows + On Windows, you'll need to install several dependencies manually. - Install the Windows-native [python3](https://www.python.org/downloads/), the POSIX flavor - available via MSYS2 will not work. You need to add a symlink for `python3.exe` pointing to + [python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor. The POSIX flavor + available via MSYS2 will not work, nor will the Windows Store flavor. You need to add a symlink for `python3.exe` pointing to the installed `python.exe` for Bazel rules which follow POSIX conventions. Be sure to add `pip.exe` to the PATH and install the `wheel` package. + ``` + mklink %USERPROFILE%\Python38\python3.exe %USERPROFILE%\Python38\python.exe + set PATH=%PATH%;%USERPROFILE%\Python38 + set PATH=%PATH%;%USERPROFILE%\Python38\Scripts + pip install wheel + ``` - For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ - workload from the - [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019). + [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019): + For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload. You may also download Visual Studio 2019 and use the Build Tools packaged with that installation. Earlier versions of VC++ Build Tools/Visual Studio are not recommended at this time. If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable to the path of the VC++ package to allow Bazel to find your installation of VC++. Use caution to ensure the `link.exe` that resolves on your PATH is from VC++ Build Tools and not MSYS2. + ``` + set BAZEL_VC=%USERPROFILE%\VSBT2019\VC + set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64 + ``` Ensure `CMake` and `ninja` binaries are on the PATH. The versions packaged with VC++ Build Tools are sufficient. + ``` + set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin + set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja + ``` + [MSYS2 shell](https://msys2.github.io/): Set the `BAZEL_SH` environment variable to the path + of the installed MSYS2 `bash.exe` executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment + variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell + behaves as expected. + ``` + set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin + set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe + set MSYS2_ARG_CONV_EXCL=* + ``` In addition, because of the behavior of the `rules_foreign_cc` component of Bazel, set the `TMPDIR` environment variable to a path usable as a temporary directory (e.g. `C:\Windows\TEMP`). This variable is used frequently by `mktemp` from MSYS2 in the Envoy Bazel @@ -133,6 +157,28 @@ for how to update or override dependencies. symlink linking `C:\c` to `C:\` in order to enable build scripts run via MSYS2 to access dependencies in the temporary directory specified above. If you are not using that script, you will need to create that symlink manually. + ``` + set TMPDIR=C:\Windows\TEMP + mklink /d C:\c C:\ + ``` + In the MSYS2 shell, install additional packages via pacman: + ``` + pacman -S diffutils patch unzip zip + ``` + + [Git](https://git-scm.com/downloads): The version installable via MSYS2 is also sufficient. + ``` + set PATH=%PATH%;%USERPROFILE%\Git\bin + ``` + + Lastly, persist environment variable changes. + ``` + setx PATH "%PATH%" + setx BAZEL_SH "%BAZEL_SH%" + setx MSYS2_ARG_CONV_EXCL "%MSYS2_ARG_CONV_EXCL%" + setx BAZEL_VC "%BAZEL_VC%" + setx TMPDIR "%TMPDIR%" + ``` 1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md) and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files. From 0b6a6d0b15b104865f3aca123857bc5f08253bb4 Mon Sep 17 00:00:00 2001 From: yugantrana Date: Fri, 29 May 2020 23:23:48 +0000 Subject: [PATCH 254/909] udp: renaming ActiveUdpListener to ActiveRawUdpListener (#11352) Renaming the ActiveUdpListener to ActiveRawUdpListener under source/server/connection_handler_impl.h. The original name is not informative enough to distinguish with ActiveQuicListener which is also a UDP listener callback. Additional Description: Submitting as a part of intern ramp up. Risk Level: Low Testing: bazel build passes successfully after the renaming. Ran all tests. All 631 tests passed successfully. Signed-off-by: Yugant --- .../server/active_raw_udp_listener_config.cc | 2 +- source/server/connection_handler_impl.cc | 25 ++++++++++--------- source/server/connection_handler_impl.h | 17 ++++++------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/source/server/active_raw_udp_listener_config.cc b/source/server/active_raw_udp_listener_config.cc index eb242510219c..f34abe2fcb0e 100644 --- a/source/server/active_raw_udp_listener_config.cc +++ b/source/server/active_raw_udp_listener_config.cc @@ -15,7 +15,7 @@ Network::ConnectionHandler::ActiveListenerPtr ActiveRawUdpListenerFactory::createActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) { - return std::make_unique(parent, dispatcher, config); + return std::make_unique(parent, dispatcher, config); } ProtobufTypes::MessagePtr ActiveRawUdpListenerConfigFactory::createEmptyConfigProto() { diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 060e940ec1f7..1b2d03794eb9 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -521,16 +521,17 @@ ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { active_connections_.listener_.parent_.decNumConnections(); } -ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, - Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveUdpListener( +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener( parent, dispatcher.createUdpListener(config.listenSocketFactory().getListenSocket(), *this), config) {} -ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, - Network::UdpListenerPtr&& listener, - Network::ListenerConfig& config) +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::UdpListenerPtr&& listener, + Network::ListenerConfig& config) : ConnectionHandlerImpl::ActiveListenerImplBase(parent, &config), udp_listener_(std::move(listener)), read_filter_(nullptr) { // Create the filter chain on creating a new udp listener @@ -544,26 +545,26 @@ ActiveUdpListener::ActiveUdpListener(Network::ConnectionHandler& parent, } } -void ActiveUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } +void ActiveRawUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } -void ActiveUdpListener::onReadReady() {} +void ActiveRawUdpListener::onReadReady() {} -void ActiveUdpListener::onWriteReady(const Network::Socket&) { +void ActiveRawUdpListener::onWriteReady(const Network::Socket&) { // TODO(sumukhs): This is not used now. When write filters are implemented, this is a // trigger to invoke the on write ready API on the filters which is when they can write // data } -void ActiveUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { +void ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { read_filter_->onReceiveError(error_code); } -void ActiveUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { +void ActiveRawUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { ASSERT(read_filter_ == nullptr, "Cannot add a 2nd UDP read filter"); read_filter_ = std::move(filter); } -Network::UdpListener& ActiveUdpListener::udpListener() { return *udp_listener_; } +Network::UdpListener& ActiveRawUdpListener::udpListener() { return *udp_listener_; } } // namespace Server } // namespace Envoy diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 821e628995c9..bc6f00ad657f 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -323,17 +323,16 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, /** * Wrapper for an active udp listener owned by this handler. - * TODO(danzh): rename to ActiveRawUdpListener. */ -class ActiveUdpListener : public Network::UdpListenerCallbacks, - public ConnectionHandlerImpl::ActiveListenerImplBase, - public Network::UdpListenerFilterManager, - public Network::UdpReadFilterCallbacks { +class ActiveRawUdpListener : public Network::UdpListenerCallbacks, + public ConnectionHandlerImpl::ActiveListenerImplBase, + public Network::UdpListenerFilterManager, + public Network::UdpReadFilterCallbacks { public: - ActiveUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, - Network::ListenerConfig& config); - ActiveUdpListener(Network::ConnectionHandler& parent, Network::UdpListenerPtr&& listener, - Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::UdpListenerPtr&& listener, + Network::ListenerConfig& config); // Network::UdpListenerCallbacks void onData(Network::UdpRecvData& data) override; From 3c36973832ce20c4345bc5213eaec9ae589dac63 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Sat, 30 May 2020 14:19:31 -0700 Subject: [PATCH 255/909] devex: several fix for devcontainer (#11342) Signed-off-by: Lizan Zhou --- .devcontainer/Dockerfile | 6 ++++-- .devcontainer/devcontainer.json | 6 +++++- .devcontainer/setup.sh | 9 ++++++--- tools/vscode/refresh_compdb.sh | 6 +++--- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index d17a43c3f431..35439f303f24 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -10,12 +10,14 @@ ENV ENVOY_STDLIB=libstdc++ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get -y update \ && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \ - # + # Change pcap gid to some larger number which doesn't conflict with common gid (1000) + && groupmod -g 65515 pcap && chgrp pcap /usr/sbin/tcpdump \ # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ - && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -d /build \ # [Optional] Add sudo support for non-root user && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && chmod 0440 /etc/sudoers.d/$USERNAME ENV DEBIAN_FRONTEND= +ENV PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index b4c56432cec5..58eda81be329 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -18,6 +18,10 @@ "files.exclude": { "**/.clangd/**": true, "**/bazel-*/**": true + }, + "files.watcherExclude": { + "**/.clangd/**": true, + "**/bazel-*/**": true } }, "remoteUser": "vscode", @@ -30,4 +34,4 @@ "llvm-vs-code-extensions.vscode-clangd", "webfreak.debug" ] -} \ No newline at end of file +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh index 4dd2ddbff92b..9a8e4ab5ac97 100755 --- a/.devcontainer/setup.sh +++ b/.devcontainer/setup.sh @@ -8,8 +8,11 @@ BAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm # Use generated toolchain config because we know the base container is the one we're using in RBE. # Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9. echo "build --config=rbe-toolchain-clang" >> ~/.bazelrc -echo "build --symlink_prefix=/" >> ~/.bazelrc echo "build ${BAZEL_BUILD_EXTRA_OPTIONS}" | tee -a ~/.bazelrc -echo "startup --output_base=/build/tmp" -[[ ! -z "${BUILD_DIR}" ]] && sudo chown -R "$(id -u):$(id -g)" ${BUILD_DIR} \ No newline at end of file +# Ideally we want this line so bazel doesn't pollute things outside of the devcontainer, but some of +# API tooling (proto_sync) depends on symlink like bazel-bin. +# TODO(lizan): Fix API tooling and enable this again +#echo "build --symlink_prefix=/" >> ~/.bazelrc + +[[ ! -z "${BUILD_DIR}" ]] && sudo chown -R "$(id -u):$(id -g)" ${BUILD_DIR} diff --git a/tools/vscode/refresh_compdb.sh b/tools/vscode/refresh_compdb.sh index c40074be87e3..1f6a279256eb 100755 --- a/tools/vscode/refresh_compdb.sh +++ b/tools/vscode/refresh_compdb.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash -tools/proto_format/proto_format.sh fix +[[ -z "${SKIP_PROTO_FORMAT}" ]] && tools/proto_format/proto_format.sh fix -# Setting platform suffix here so the compdb headers won't be overwritten by another bazel run -BAZEL_BUILD_OPTIONS=--platform_suffix=-compdb tools/gen_compilation_database.py --run_bazel_build -k +# Setting TEST_TMPDIR here so the compdb headers won't be overwritten by another bazel run +TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py --run_bazel_build -k # Kill clangd to reload the compilation database killall -v /opt/llvm/bin/clangd From e08a7a65c02bbd411c7c5bb9b4eea03026b728d4 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Sat, 30 May 2020 17:24:39 -0400 Subject: [PATCH 256/909] Windows: Remove hard coded /dev/null path from test configs (#11328) Replace with platform specific null device and add helper method to get it in TestEnvironment Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- .../access_log/access_log_manager_impl.cc | 23 +++------- .../google_com_proxy_port_0.v2.yaml | 2 +- test/config/integration/server.yaml | 4 +- .../integration/server_unix_listener.yaml | 2 +- .../integration/server_xds.bootstrap.yaml | 2 +- test/config/utility.cc | 44 ++++++++++--------- test/config/utility.h | 2 +- test/integration/hotrestart_test.sh | 4 ++ test/server/server_test.cc | 7 +-- .../callbacks_stats_sink_bootstrap.yaml | 2 +- .../server/cluster_dupe_bootstrap.yaml | 2 +- .../cluster_health_check_bootstrap.yaml | 4 +- .../test_data/server/node_bootstrap.pb_text | 2 +- .../test_data/server/node_bootstrap.yaml | 2 +- .../server/node_bootstrap_no_admin_port.yaml | 2 +- ...e_bootstrap_with_admin_socket_options.yaml | 2 +- .../server/proxy_version_bootstrap.yaml | 2 +- .../server/stats_sink_bootstrap.yaml | 2 +- test/test_common/environment.cc | 12 +++++ test/test_common/environment.h | 6 +++ 20 files changed, 72 insertions(+), 56 deletions(-) diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index 534b4be0b547..f173904d2dff 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -25,26 +25,15 @@ void AccessLogManagerImpl::reopen() { } } -AccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name_arg) { - const std::string* file_name = &file_name_arg; -#ifdef WIN32 - // Preserve the expected behavior of specifying path: /dev/null on Windows - static const std::string windows_dev_null("NUL"); - if (file_name_arg.compare("/dev/null") == 0) { - file_name = static_cast(&windows_dev_null); +AccessLogFileSharedPtr AccessLogManagerImpl::createAccessLog(const std::string& file_name) { + if (access_logs_.count(file_name)) { + return access_logs_[file_name]; } -#endif - std::unordered_map::const_iterator access_log = - access_logs_.find(*file_name); - if (access_log != access_logs_.end()) { - return access_log->second; - } - - access_logs_[*file_name] = std::make_shared( - api_.fileSystem().createFile(*file_name), dispatcher_, lock_, file_stats_, + access_logs_[file_name] = std::make_shared( + api_.fileSystem().createFile(file_name), dispatcher_, lock_, file_stats_, file_flush_interval_msec_, api_.threadFactory()); - return access_logs_[*file_name]; + return access_logs_[file_name]; } AccessLogFileImpl::AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatcher& dispatcher, diff --git a/test/config/integration/google_com_proxy_port_0.v2.yaml b/test/config/integration/google_com_proxy_port_0.v2.yaml index 47b1cfd0f6de..c67b6845960d 100644 --- a/test/config/integration/google_com_proxy_port_0.v2.yaml +++ b/test/config/integration/google_com_proxy_port_0.v2.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ip_any_address }}" diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index 10e1175c17d9..c3a97cb48705 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -53,7 +53,7 @@ static_resources: - name: accesslog typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {{ null_device_path }} filter: or_filter: filters: @@ -181,7 +181,7 @@ layered_runtime: - name: admin admin_layer: {} admin: - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" profile_path: "{{ test_tmpdir }}/envoy.prof" address: socket_address: diff --git a/test/config/integration/server_unix_listener.yaml b/test/config/integration/server_unix_listener.yaml index b4f3d15becf0..2c3328cd1026 100644 --- a/test/config/integration/server_unix_listener.yaml +++ b/test/config/integration/server_unix_listener.yaml @@ -39,7 +39,7 @@ static_resources: cluster_manager: {} watchdog: {} admin: - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ip_loopback_address }}" diff --git a/test/config/integration/server_xds.bootstrap.yaml b/test/config/integration/server_xds.bootstrap.yaml index 26eafc3e79d6..70c4302e3725 100644 --- a/test/config/integration/server_xds.bootstrap.yaml +++ b/test/config/integration/server_xds.bootstrap.yaml @@ -4,7 +4,7 @@ dynamic_resources: cds_config: path: {{ cds_json_path }} admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/config/utility.cc b/test/config/utility.cc index e6d196e2ae08..afd05d26bd50 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -31,16 +31,16 @@ namespace Envoy { std::string ConfigHelper::baseConfig() { - return R"EOF( + return fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 port_value: 0 dynamic_resources: lds_config: - path: /dev/null + path: {} static_resources: secrets: - name: "secret_static_0" @@ -68,13 +68,14 @@ std::string ConfigHelper::baseConfig() { socket_address: address: 127.0.0.1 port_value: 0 -)EOF"; +)EOF", + TestEnvironment::nullDevicePath(), TestEnvironment::nullDevicePath()); } std::string ConfigHelper::baseUdpListenerConfig() { - return R"EOF( + return fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -98,7 +99,8 @@ std::string ConfigHelper::baseUdpListenerConfig() { address: 0.0.0.0 port_value: 0 protocol: udp -)EOF"; +)EOF", + TestEnvironment::nullDevicePath()); } std::string ConfigHelper::tcpProxyConfig() { @@ -121,7 +123,7 @@ name: "envoy.filters.listener.tls_inspector" } std::string ConfigHelper::httpProxyConfig() { - return absl::StrCat(baseConfig(), R"EOF( + return absl::StrCat(baseConfig(), fmt::format(R"EOF( filter_chains: filters: name: http @@ -136,10 +138,10 @@ std::string ConfigHelper::httpProxyConfig() { access_log: name: accesslog filter: - not_health_check_filter: {} + not_health_check_filter: {{}} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {} route_config: virtual_hosts: name: integration @@ -150,14 +152,15 @@ std::string ConfigHelper::httpProxyConfig() { prefix: "/" domains: "*" name: route_config_0 -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } // TODO(danzh): For better compatibility with HTTP integration test framework, // it's better to combine with HTTP_PROXY_CONFIG, and use config modifiers to // specify quic specific things. std::string ConfigHelper::quicHttpProxyConfig() { - return absl::StrCat(baseUdpListenerConfig(), R"EOF( + return absl::StrCat(baseUdpListenerConfig(), fmt::format(R"EOF( filter_chains: transport_socket: name: envoy.transport_sockets.quic @@ -172,10 +175,10 @@ std::string ConfigHelper::quicHttpProxyConfig() { access_log: name: file_access_log filter: - not_health_check_filter: {} + not_health_check_filter: {{}} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: /dev/null + path: {} route_config: virtual_hosts: name: integration @@ -188,7 +191,8 @@ std::string ConfigHelper::quicHttpProxyConfig() { name: route_config_0 udp_listener_config: udp_listener_name: "quiche_quic_listener" -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } std::string ConfigHelper::defaultBufferFilter() { @@ -248,7 +252,7 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ return fmt::format( R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -305,7 +309,7 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ prefix: "/cluster2" domains: "*" )EOF", - api_type); + TestEnvironment::nullDevicePath(), api_type); } // TODO(#6327) cleaner approach to testing with static config. @@ -337,13 +341,13 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type) { lb_policy: ROUND_ROBIN http2_protocol_options: {{}} admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 port_value: 0 )EOF", - api_type); + TestEnvironment::nullDevicePath(), api_type); } envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, int port, @@ -762,7 +766,7 @@ bool ConfigHelper::setAccessLog(const std::string& filename, absl::string_view f if (getFilterFromListener("http") == nullptr) { return false; } - // Replace /dev/null with a real path for the file access log. + // Replace null device with a real path for the file access log. envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager hcm_config; loadHttpConnectionManager(hcm_config); diff --git a/test/config/utility.h b/test/config/utility.h index dc49abaa01e4..0983132be998 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -156,7 +156,7 @@ class ConfigHelper { void addSslConfig() { addSslConfig({}); } // Set the HTTP access log for the first HCM (if present) to a given file. The default is - // /dev/null. + // the platform's null device. bool setAccessLog(const std::string& filename, absl::string_view format = ""); // Set the listener access log for the first listener to a given file. diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 665ea44a264a..1efca96ccf9e 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -23,6 +23,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ sed -e "s#{{ reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V4}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V4}") fi @@ -37,6 +38,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ ip_loopback_address }}#::1#" | \ sed -e "s#{{ reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#v6_only#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V6}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_V6}") fi @@ -48,6 +50,7 @@ SOCKET_DIR="$(mktemp -d /tmp/envoy_test_hotrestart.XXXXXX)" cat "${TEST_SRCDIR}/envoy"/test/config/integration/server_unix_listener.yaml | sed -e "s#{{ socket_dir }}#${SOCKET_DIR}#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_UDS}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_UDS}") @@ -61,6 +64,7 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ sed -e "s#{{ reuse_port }}#true#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ + sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_REUSE_PORT}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}") diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 6975f2ceecfb..b897be2418a1 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -186,7 +186,7 @@ class ServerInstanceImplTestBase { std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), std::move(process_context_)); - EXPECT_TRUE(server_->api().fileSystem().fileExists("/dev/null")); + EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); } void initializeWithHealthCheckParams(const std::string& bootstrap_path, const double timeout, @@ -205,7 +205,7 @@ class ServerInstanceImplTestBase { std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); - EXPECT_TRUE(server_->api().fileSystem().fileExists("/dev/null")); + EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); } Thread::ThreadPtr startTestServer(const std::string& bootstrap_path, @@ -868,9 +868,10 @@ namespace { void bindAndListenTcpSocket(const Network::Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options) { auto socket = std::make_unique(address, options, true); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); // Some kernels erroneously allow `bind` without SO_REUSEPORT for addresses // with some other socket already listening on it, see #7636. - if (::listen(socket->ioHandle().fd(), 1) != 0) { + if (SOCKET_FAILURE(os_sys_calls.listen(socket->ioHandle().fd(), 1).rc_)) { // Mimic bind exception for the test simplicity. throw Network::SocketBindException(fmt::format("cannot listen: {}", strerror(errno)), errno); } diff --git a/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml b/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml index cf8108dd2d7a..3a065d86580d 100644 --- a/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml +++ b/test/server/test_data/server/callbacks_stats_sink_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/cluster_dupe_bootstrap.yaml b/test/server/test_data/server/cluster_dupe_bootstrap.yaml index b4c5422108c9..0bff93617c65 100644 --- a/test/server/test_data/server/cluster_dupe_bootstrap.yaml +++ b/test/server/test_data/server/cluster_dupe_bootstrap.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/cluster_health_check_bootstrap.yaml b/test/server/test_data/server/cluster_health_check_bootstrap.yaml index 7d928f9ca433..0d285796e4b1 100644 --- a/test/server/test_data/server/cluster_health_check_bootstrap.yaml +++ b/test/server/test_data/server/cluster_health_check_bootstrap.yaml @@ -1,5 +1,5 @@ admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} @@ -14,4 +14,4 @@ static_resources: unhealthy_threshold: 1 healthy_threshold: 1 http_health_check: - path: "/" \ No newline at end of file + path: "/" diff --git a/test/server/test_data/server/node_bootstrap.pb_text b/test/server/test_data/server/node_bootstrap.pb_text index 5f184a784d13..f47df39a8351 100644 --- a/test/server/test_data/server/node_bootstrap.pb_text +++ b/test/server/test_data/server/node_bootstrap.pb_text @@ -7,7 +7,7 @@ node { } } admin { - access_log_path: "/dev/null" + access_log_path: "{{ null_device_path }}" address { socket_address { address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/node_bootstrap.yaml b/test/server/test_data/server/node_bootstrap.yaml index 2b9f69e6df7a..bce1b610dc7c 100644 --- a/test/server/test_data/server/node_bootstrap.yaml +++ b/test/server/test_data/server/node_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/node_bootstrap_no_admin_port.yaml b/test/server/test_data/server/node_bootstrap_no_admin_port.yaml index 54fbe9a01e88..a7a602ccbdb8 100644 --- a/test/server/test_data/server/node_bootstrap_no_admin_port.yaml +++ b/test/server/test_data/server/node_bootstrap_no_admin_port.yaml @@ -5,4 +5,4 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} diff --git a/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml b/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml index c13b6d783838..77ba41f9985d 100644 --- a/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml +++ b/test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/proxy_version_bootstrap.yaml b/test/server/test_data/server/proxy_version_bootstrap.yaml index 5af740e874cc..253a7a7e3f11 100644 --- a/test/server/test_data/server/proxy_version_bootstrap.yaml +++ b/test/server/test_data/server/proxy_version_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/server/test_data/server/stats_sink_bootstrap.yaml b/test/server/test_data/server/stats_sink_bootstrap.yaml index cb5b85e2a5a7..ebd3c531d507 100644 --- a/test/server/test_data/server/stats_sink_bootstrap.yaml +++ b/test/server/test_data/server/stats_sink_bootstrap.yaml @@ -5,7 +5,7 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: /dev/null + access_log_path: {{ null_device_path }} address: socket_address: address: {{ ntop_ip_loopback_address }} diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index b4a73c5248dd..7aa41d13d55c 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -287,6 +287,14 @@ const std::string& TestEnvironment::temporaryDirectory() { CONSTRUCT_ON_FIRST_USE(std::string, getTemporaryDirectory()); } +const std::string& TestEnvironment::nullDevicePath() { +#ifdef WIN32 + CONSTRUCT_ON_FIRST_USE(std::string, "NUL"); +#else + CONSTRUCT_ON_FIRST_USE(std::string, "/dev/null"); +#endif +} + std::string TestEnvironment::runfilesDirectory(const std::string& workspace) { RELEASE_ASSERT(runfiles_ != nullptr, ""); return runfiles_->Rlocation(workspace); @@ -315,6 +323,10 @@ std::string TestEnvironment::substitute(const std::string& str, out_json_string = std::regex_replace(out_json_string, port_regex, it.second); } + // Substitute platform specific null device. + const std::regex null_device_regex(R"(\{\{ null_device_path \}\})"); + out_json_string = std::regex_replace(out_json_string, null_device_regex, nullDevicePath()); + // Substitute IP loopback addresses. const std::regex loopback_address_regex(R"(\{\{ ip_loopback_address \}\})"); out_json_string = std::regex_replace(out_json_string, loopback_address_regex, diff --git a/test/test_common/environment.h b/test/test_common/environment.h index bc0a30b23ce7..e0e73efe002a 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -128,6 +128,12 @@ class TestEnvironment { return absl::StrCat(temporaryDirectory(), "/", path); } + /** + * Obtain platform specific null device path + * @return const std::string& null device path + */ + static const std::string& nullDevicePath(); + /** * Obtain read-only test input data directory. * @param workspace the name of the Bazel workspace where the input data is. From c6a5f78d226d9cc846d9b9869c6bfb5a9c056990 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Sat, 30 May 2020 17:27:18 -0400 Subject: [PATCH 257/909] test: close a few gaps in coverage that looked easy. (#11344) Signed-off-by: Joshua Marantz --- include/envoy/http/header_map.h | 12 ------------ source/common/stats/thread_local_store.cc | 6 +----- test/common/http/header_map_impl_test.cc | 6 ++++++ test/common/stats/thread_local_store_test.cc | 16 ++++++++++++++++ 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 96af5483d5cd..a518395a3320 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -70,18 +70,6 @@ class LowerCaseString { std::string string_; }; -/** - * Lower case string hasher. - */ -struct LowerCaseStringHash { - size_t operator()(const LowerCaseString& value) const { return HashUtil::xxHash64(value.get()); } -}; - -/** - * Convenient type for unordered set of lower case string. - */ -using LowerCaseStrUnorderedSet = std::unordered_set; - /** * Convenient type for a vector of lower case string and string pair. */ diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index e526537d453a..e2eeae81f2f4 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -72,11 +72,7 @@ void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& } bool ThreadLocalStoreImpl::rejects(StatName stat_name) const { - // Don't both elaborating the StatName there are no pattern-based - // exclusions;/inclusions. - if (stats_matcher_->acceptsAll()) { - return false; - } + ASSERT(!stats_matcher_->acceptsAll()); // TODO(ambuc): If stats_matcher_ depends on regexes, this operation (on the // hot path) could become prohibitively expensive. Revisit this usage in the diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index a3a695941cd5..9ef0a7a7af15 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1349,5 +1349,11 @@ TEST(HeaderMapImplTest, InlineHeaderByteSize) { } } +TEST(HeaderMapImplTest, ValidHeaderString) { + EXPECT_TRUE(validHeaderString("abc")); + EXPECT_FALSE(validHeaderString(absl::string_view("a\000bc", 4))); + EXPECT_FALSE(validHeaderString("abc\n")); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 3dc88b03c9b3..8bb02d1004d0 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -643,6 +643,7 @@ TEST_F(LookupWithStatNameTest, NotFound) { EXPECT_FALSE(store_->findCounter(not_found)); EXPECT_FALSE(store_->findGauge(not_found)); EXPECT_FALSE(store_->findHistogram(not_found)); + EXPECT_FALSE(store_->findTextReadout(not_found)); } class StatsMatcherTLSTest : public StatsThreadLocalStoreTest { @@ -716,6 +717,7 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { store_->histogramFromString("noop_histogram", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(noop_histogram.name(), ""); EXPECT_FALSE(noop_histogram.used()); + EXPECT_EQ(Stats::Histogram::Unit::Null, noop_histogram.unit()); Histogram& noop_histogram_2 = store_->histogramFromString("noop_histogram_2", Stats::Histogram::Unit::Unspecified); EXPECT_EQ(&noop_histogram, &noop_histogram_2); @@ -938,6 +940,12 @@ class RememberStatsMatcherTest : public testing::TestWithParam { }; } + LookupStatFn lookupTextReadoutFn() { + return [this](const std::string& stat_name) -> std::string { + return scope_->textReadoutFromString(stat_name).name(); + }; + } + Stats::SymbolTablePtr symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; @@ -979,6 +987,14 @@ TEST_P(RememberStatsMatcherTest, HistogramRejectsAll) { testRejectsAll(lookupHis TEST_P(RememberStatsMatcherTest, HistogramAcceptsAll) { testAcceptsAll(lookupHistogramFn()); } +TEST_P(RememberStatsMatcherTest, TextReadoutRejectOne) { + testRememberMatcher(lookupTextReadoutFn()); +} + +TEST_P(RememberStatsMatcherTest, TextReadoutRejectsAll) { testRejectsAll(lookupTextReadoutFn()); } + +TEST_P(RememberStatsMatcherTest, TextReadoutAcceptsAll) { testAcceptsAll(lookupTextReadoutFn()); } + TEST_F(StatsThreadLocalStoreTest, RemoveRejectedStats) { store_->initializeThreading(main_thread_dispatcher_, tls_); Counter& counter = store_->counterFromString("c1"); From ca7bea2dba6be517a21f3c3e28330449329a3f87 Mon Sep 17 00:00:00 2001 From: htuch Date: Sun, 31 May 2020 22:34:07 -0400 Subject: [PATCH 258/909] docs: clarify VRP starts at 1.15.0. (#11375) Signed-off-by: Harvey Tuch --- docs/root/intro/arch_overview/security/google_vrp.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/root/intro/arch_overview/security/google_vrp.rst b/docs/root/intro/arch_overview/security/google_vrp.rst index 5d94cf11e8f8..b05adc3aaf32 100644 --- a/docs/root/intro/arch_overview/security/google_vrp.rst +++ b/docs/root/intro/arch_overview/security/google_vrp.rst @@ -74,7 +74,8 @@ We supply Docker images that act as the reference environment for this program: * `envoyproxy/envoy-google-vrp `_ images are based on Envoy point releases. Only the latest point release at the time of vulnerability - submission is eligible for the program. + submission is eligible for the program. The first point release available for VRP will be the + 1.15.0 Envoy release. * `envoyproxy/envoy-google-vrp-dev `_ images are based on Envoy master builds. Only builds within the last 5 days at the time of From eeaebc1bea6d5b52368df18378797b1dcc163141 Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Mon, 1 Jun 2020 08:12:53 -0400 Subject: [PATCH 259/909] fuzz: Support access logging in uber filter fuzz test (#11288) * Support access logging in uber filter fuzz test Description: Some filters (Tap, ESPv2 ServiceControl) implement custom access loggers. For completeness, fuzz these with the same upstream/downstream headers used for decoders/encoders. Signed-off-by: Teju Nareddy --- .../filters/http/common/fuzz/uber_filter.cc | 14 ++++++++++++++ .../filters/http/common/fuzz/uber_filter.h | 6 ++++++ 2 files changed, 20 insertions(+) diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index ae7fbc2d9f92..d5f8d346fdb4 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -34,6 +34,10 @@ UberFilterFuzzer::UberFilterFuzzer() { encoder_filter_ = filter; encoder_filter_->setEncoderFilterCallbacks(encoder_callbacks_); })); + // This filter supports access logging. + ON_CALL(filter_callback_, addAccessLogHandler(_)) + .WillByDefault( + Invoke([&](AccessLog::InstanceSharedPtr handler) -> void { access_logger_ = handler; })); // Set expectations for particular filters that may get fuzzed. perFilterSetup(); } @@ -153,6 +157,12 @@ void UberFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, filter->encodeTrailers(response_trailers_); } +void UberFilterFuzzer::accessLog(AccessLog::Instance* access_logger, + const StreamInfo::StreamInfo& stream_info) { + ENVOY_LOG_MISC(debug, "Access logging"); + access_logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + void UberFilterFuzzer::fuzz( const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, @@ -180,6 +190,9 @@ void UberFilterFuzzer::fuzz( if (encoder_filter_ != nullptr) { runData(encoder_filter_.get(), upstream_data); } + if (access_logger_ != nullptr) { + accessLog(access_logger_.get(), stream_info_); + } reset(); } @@ -195,6 +208,7 @@ void UberFilterFuzzer::reset() { } encoder_filter_.reset(); + access_logger_.reset(); request_headers_.clear(); response_headers_.clear(); request_trailers_.clear(); diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index af6b060f6a80..10582b101248 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -2,6 +2,7 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/server/mocks.h" +#include "test/mocks/stream_info/mocks.h" namespace Envoy { namespace Extensions { @@ -19,6 +20,9 @@ class UberFilterFuzzer { // This executes the filter decoders/encoders with the fuzzed data. template void runData(FilterType* filter, const test::fuzz::HttpData& data); + // This executes the access logger with the fuzzed headers/trailers. + void accessLog(AccessLog::Instance* access_logger, const StreamInfo::StreamInfo& stream_info); + // For fuzzing proto data, guide the mutator to useful 'Any' types. static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); @@ -54,6 +58,7 @@ class UberFilterFuzzer { Http::FilterFactoryCb cb_; NiceMock connection_; Network::Address::InstanceConstSharedPtr addr_; + NiceMock stream_info_; // Mocked callbacks. NiceMock decoder_callbacks_; @@ -62,6 +67,7 @@ class UberFilterFuzzer { // Filter constructed from the config. Http::StreamDecoderFilterSharedPtr decoder_filter_; Http::StreamEncoderFilterSharedPtr encoder_filter_; + AccessLog::InstanceSharedPtr access_logger_; // Headers/trailers need to be saved for the lifetime of the the filter, // so save them as member variables. From c6642bac6f81cfe391129108e264a071c151a8a2 Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Mon, 1 Jun 2020 11:18:59 -0400 Subject: [PATCH 260/909] drain manager: Refactor DrainManagerImpl with simulated time (#11138) Signed-off-by: Auni Ahsan --- include/envoy/server/drain_manager.h | 6 +- source/server/BUILD | 1 + source/server/drain_manager_impl.cc | 38 +++++----- source/server/drain_manager_impl.h | 18 ++--- source/server/server.cc | 2 +- test/integration/protocol_integration_test.cc | 2 +- test/server/drain_manager_impl_test.cc | 71 +++++++++++++++---- 7 files changed, 90 insertions(+), 48 deletions(-) diff --git a/include/envoy/server/drain_manager.h b/include/envoy/server/drain_manager.h index 214ed65c0f93..0f29b0cd3eed 100644 --- a/include/envoy/server/drain_manager.h +++ b/include/envoy/server/drain_manager.h @@ -16,10 +16,10 @@ class DrainManager : public Network::DrainDecision { public: /** * Invoked to begin the drain procedure. (Making drain close operations more likely). - * @param completion supplies the completion that will be called when the drain sequence is - * finished. The parameter is optional and can be an unassigned function. + * @param drain_complete_cb will be invoked once the drain sequence is finished. The parameter is + * optional and can be an unassigned function. */ - virtual void startDrainSequence(std::function completion) PURE; + virtual void startDrainSequence(std::function drain_complete_cb) PURE; /** * Invoked in the newly launched primary process to begin the parent shutdown sequence. At the end diff --git a/source/server/BUILD b/source/server/BUILD index 7ffee07345b9..18042399a07e 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -108,6 +108,7 @@ envoy_cc_library( deps = [ ":watchdog_lib", "//include/envoy/api:api_interface", + "//include/envoy/common:time_interface", "//include/envoy/event:timer_interface", "//include/envoy/server:configuration_interface", "//include/envoy/server:guarddog_interface", diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index d9b98e5eaa84..a89912774b50 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -5,10 +5,7 @@ #include #include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" -#include "envoy/runtime/runtime.h" -#include "envoy/server/instance.h" #include "common/common/assert.h" @@ -35,30 +32,29 @@ bool DrainManagerImpl::drainClose() const { return false; } - // We use the tick time as in increasing chance that we shutdown connections. - return static_cast(drain_time_completed_.load()) > - (server_.random().random() % server_.options().drainTime().count()); -} - -void DrainManagerImpl::drainSequenceTick() { - ENVOY_LOG(trace, "drain tick #{}", drain_time_completed_.load()); - ASSERT(drain_time_completed_.load() < server_.options().drainTime().count()); - ++drain_time_completed_; - - if (drain_time_completed_.load() < server_.options().drainTime().count()) { - drain_tick_timer_->enableTimer(std::chrono::milliseconds(1000)); - } else if (drain_sequence_completion_) { - drain_sequence_completion_(); + const MonotonicTime current_time = server_.dispatcher().timeSource().monotonicTime(); + if (current_time >= drain_deadline_) { + return true; } + + // P(return true) = elapsed time / drain timeout + const auto remaining_time = + std::chrono::duration_cast(drain_deadline_ - current_time); + ASSERT(server_.options().drainTime() >= remaining_time); + const auto elapsed_time = server_.options().drainTime() - remaining_time; + return static_cast(elapsed_time.count()) > + (server_.random().random() % server_.options().drainTime().count()); } -void DrainManagerImpl::startDrainSequence(std::function completion) { - drain_sequence_completion_ = completion; +void DrainManagerImpl::startDrainSequence(std::function drain_complete_cb) { + ASSERT(drain_complete_cb); ASSERT(!draining_); ASSERT(!drain_tick_timer_); draining_ = true; - drain_tick_timer_ = server_.dispatcher().createTimer([this]() -> void { drainSequenceTick(); }); - drainSequenceTick(); + drain_tick_timer_ = server_.dispatcher().createTimer(drain_complete_cb); + const std::chrono::seconds drain_delay(server_.options().drainTime()); + drain_tick_timer_->enableTimer(drain_delay); + drain_deadline_ = server_.dispatcher().timeSource().monotonicTime() + drain_delay; } void DrainManagerImpl::startParentShutdownSequence() { diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index 7b0e8d651988..38a02465b761 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -1,9 +1,10 @@ #pragma once -#include #include +#include "envoy/common/time.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/event/timer.h" #include "envoy/server/drain_manager.h" #include "envoy/server/instance.h" @@ -22,21 +23,22 @@ class DrainManagerImpl : Logger::Loggable, public DrainManager public: DrainManagerImpl(Instance& server, envoy::config::listener::v3::Listener::DrainType drain_type); - // Server::DrainManager + // Network::DrainDecision bool drainClose() const override; - void startDrainSequence(std::function completion) override; + + // Server::DrainManager + void startDrainSequence(std::function drain_complete_cb) override; void startParentShutdownSequence() override; private: - void drainSequenceTick(); - Instance& server_; const envoy::config::listener::v3::Listener::DrainType drain_type_; - Event::TimerPtr drain_tick_timer_; + std::atomic draining_{false}; - std::atomic drain_time_completed_{}; + Event::TimerPtr drain_tick_timer_; + MonotonicTime drain_deadline_; + Event::TimerPtr parent_shutdown_timer_; - std::function drain_sequence_completion_; }; } // namespace Server diff --git a/source/server/server.cc b/source/server/server.cc index d01cc5387956..58ef0505a227 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -136,7 +136,7 @@ Upstream::ClusterManager& InstanceImpl::clusterManager() { return *config_.clust void InstanceImpl::drainListeners() { ENVOY_LOG(info, "closing and draining listeners"); listener_manager_->stopListeners(ListenerManager::StopListenersType::All); - drain_manager_->startDrainSequence(nullptr); + drain_manager_->startDrainSequence([] {}); } void InstanceImpl::failHealthcheck(bool fail) { diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 5f8fdda34197..0099264492d6 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -281,7 +281,7 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { absl::Notification drain_sequence_started; test_server_->server().dispatcher().post([this, &drain_sequence_started]() { - test_server_->drainManager().startDrainSequence(nullptr); + test_server_->drainManager().startDrainSequence([] {}); drain_sequence_started.Notify(); }); drain_sequence_started.WaitForNotification(); diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 94e5ce3c099e..92a3febcef25 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -17,10 +17,13 @@ namespace Envoy { namespace Server { namespace { -class DrainManagerImplTest : public testing::Test { +constexpr int DrainTimeSeconds(600); + +class DrainManagerImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: DrainManagerImplTest() { - ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(600))); + ON_CALL(server_.options_, drainTime()) + .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); ON_CALL(server_.options_, parentShutdownTime()) .WillByDefault(Return(std::chrono::seconds(900))); } @@ -48,21 +51,61 @@ TEST_F(DrainManagerImplTest, Default) { // Test drain sequence. Event::MockTimer* drain_timer = new Event::MockTimer(&server_.dispatcher_); - EXPECT_CALL(*drain_timer, enableTimer(_, _)); + const auto expected_delay = std::chrono::milliseconds(DrainTimeSeconds * 1000); + EXPECT_CALL(*drain_timer, enableTimer(expected_delay, nullptr)); ReadyWatcher drain_complete; drain_manager.startDrainSequence([&drain_complete]() -> void { drain_complete.ready(); }); + EXPECT_CALL(drain_complete, ready()); + drain_timer->invokeCallback(); +} - // 600s which is the default drain time. - for (size_t i = 0; i < 599; i++) { - if (i < 598) { - EXPECT_CALL(*drain_timer, enableTimer(_, _)); - } else { - EXPECT_CALL(drain_complete, ready()); - } - drain_timer->invokeCallback(); - } +TEST_F(DrainManagerImplTest, DrainDeadline) { + // TODO(auni53): Add integration tests for this once TestDrainManager is + // removed. + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); - EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(false)); + // Ensure drainClose() behaviour is determined by the deadline. + drain_manager.startDrainSequence([] {}); + EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); + ON_CALL(server_.random_, random()).WillByDefault(Return(DrainTimeSeconds * 2 - 1)); + ON_CALL(server_.options_, drainTime()) + .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); + + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + + // Test that this still works if remaining time is negative + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(500)); + EXPECT_TRUE(drain_manager.drainClose()); +} + +TEST_F(DrainManagerImplTest, DrainDeadlineProbability) { + ON_CALL(server_.random_, random()).WillByDefault(Return(4)); + ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3))); + + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); + + EXPECT_CALL(server_, healthCheckFailed()).WillOnce(Return(true)); + EXPECT_TRUE(drain_manager.drainClose()); + EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); + EXPECT_FALSE(drain_manager.drainClose()); + drain_manager.startDrainSequence([] {}); + + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + // Current elapsed time is 0 + // drainClose() will return true when elapsed time > (4 % 3 == 1). + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(2)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); EXPECT_TRUE(drain_manager.drainClose()); } @@ -70,7 +113,7 @@ TEST_F(DrainManagerImplTest, ModifyOnly) { InSequence s; DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY); - EXPECT_CALL(server_, healthCheckFailed()).Times(0); + EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit EXPECT_FALSE(drain_manager.drainClose()); } From ea6903cef0206ec73ccb329efa6209eb21466569 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Mon, 1 Jun 2020 08:20:11 -0700 Subject: [PATCH 261/909] Fix a bug with empty format in file access_logger config (#11376) If format in file access_logger is an empty string, it should use default access log format. But PR: #11125 changed its behaviors to log nothing. This PR fixes the bug; to be backward compatible to use default log format. Signed-off-by: Wayne Zhang --- .../extensions/access_loggers/file/config.cc | 40 +-- test/extensions/access_loggers/file/BUILD | 1 + .../access_loggers/file/config_test.cc | 229 ++++++++---------- 3 files changed, 124 insertions(+), 146 deletions(-) diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc index 2b5e6877cf2d..60c536131389 100644 --- a/source/extensions/access_loggers/file/config.cc +++ b/source/extensions/access_loggers/file/config.cc @@ -30,30 +30,34 @@ FileAccessLogFactory::createAccessLogInstance(const Protobuf::Message& config, config, context.messageValidationVisitor()); Formatter::FormatterPtr formatter; - if (fal_config.has_log_format()) { - formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); - } else if (fal_config.has_json_format()) { + switch (fal_config.access_log_format_case()) { + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kFormat: + if (fal_config.format().empty()) { + formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(); + } else { + envoy::config::core::v3::SubstitutionFormatString sff_config; + sff_config.set_text_format(fal_config.format()); + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config); + } + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kJsonFormat: formatter = Formatter::SubstitutionFormatStringUtils::createJsonFormatter( fal_config.json_format(), false); - } else if (fal_config.access_log_format_case() != - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - ACCESS_LOG_FORMAT_NOT_SET) { + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: + kTypedJsonFormat: { envoy::config::core::v3::SubstitutionFormatString sff_config; - switch (fal_config.access_log_format_case()) { - case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kFormat: - sff_config.set_text_format(fal_config.format()); - break; - case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kTypedJsonFormat: - *sff_config.mutable_json_format() = fal_config.typed_json_format(); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } + *sff_config.mutable_json_format() = fal_config.typed_json_format(); formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(sff_config); + break; } - if (!formatter) { + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kLogFormat: + formatter = Formatter::SubstitutionFormatStringUtils::fromProtoConfig(fal_config.log_format()); + break; + case envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: + ACCESS_LOG_FORMAT_NOT_SET: formatter = Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(); + break; } return std::make_shared(fal_config.path(), std::move(filter), std::move(formatter), diff --git a/test/extensions/access_loggers/file/BUILD b/test/extensions/access_loggers/file/BUILD index 78434bcf535b..268e48d29837 100644 --- a/test/extensions/access_loggers/file/BUILD +++ b/test/extensions/access_loggers/file/BUILD @@ -19,6 +19,7 @@ envoy_extension_cc_test( "//source/extensions/access_loggers/file:config", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", + "//test/test_common:utility_lib", "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", ], diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc index d6225b29ff2a..f0c7e6dd9359 100644 --- a/test/extensions/access_loggers/file/config_test.cc +++ b/test/extensions/access_loggers/file/config_test.cc @@ -10,17 +10,20 @@ #include "extensions/access_loggers/well_known_names.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::Return; + namespace Envoy { namespace Extensions { namespace AccessLoggers { namespace File { namespace { -TEST(FileAccessLogConfigTest, ValidateFail) { +TEST(FileAccessLogNegativeTest, ValidateFail) { NiceMock context; EXPECT_THROW(FileAccessLogFactory().createAccessLogInstance( @@ -28,7 +31,7 @@ TEST(FileAccessLogConfigTest, ValidateFail) { ProtoValidationException); } -TEST(FileAccessLogConfigTest, ConfigureFromProto) { +TEST(FileAccessLogNegativeTest, InvalidNameFail) { envoy::config::accesslog::v3::AccessLog config; NiceMock context; @@ -39,167 +42,137 @@ TEST(FileAccessLogConfigTest, ConfigureFromProto) { EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, "Didn't find a registered implementation for name: 'INVALID'"); - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - config.mutable_typed_config()->PackFrom(fal_config); - - config.set_name(AccessLogNames::get().File); - - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); -} - -TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogTest)) { - auto factory = - Registry::FactoryRegistry::getFactory( - AccessLogNames::get().File); - ASSERT_NE(nullptr, factory); - - ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); - ASSERT_NE(nullptr, message); - - envoy::extensions::access_loggers::file::v3::FileAccessLog file_access_log; - file_access_log.set_path("/dev/null"); - file_access_log.set_format("%START_TIME%"); - TestUtility::jsonConvert(file_access_log, *message); - - AccessLog::FilterPtr filter; - NiceMock context; - - AccessLog::InstanceSharedPtr instance = - factory->createAccessLogInstance(*message, std::move(filter), context); - EXPECT_NE(nullptr, instance); - EXPECT_NE(nullptr, dynamic_cast(instance.get())); } -TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogJsonTest)) { - envoy::config::accesslog::v3::AccessLog config; - - NiceMock context; - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, - "Provided name for static registration lookup was empty."); +class FileAccessLogTest : public testing::Test { +public: + FileAccessLogTest() = default; - config.set_name("INVALID"); - - EXPECT_THROW_WITH_MESSAGE(AccessLog::AccessLogFactory::fromProto(config, context), EnvoyException, - "Didn't find a registered implementation for name: 'INVALID'"); - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("%PROTOCOL%"); - - auto json_format = fal_config.mutable_json_format(); - (*json_format->mutable_fields())["protocol"] = string_value; - - EXPECT_EQ( - fal_config.access_log_format_case(), - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase::kJsonFormat); - config.mutable_typed_config()->PackFrom(fal_config); - - config.set_name(AccessLogNames::get().File); - - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); -} - -TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogTypedJsonTest)) { - envoy::config::accesslog::v3::AccessLog config; - - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - fal_config.set_path("/dev/null"); - - ProtobufWkt::Value string_value; - string_value.set_string_value("%PROTOCOL%"); + void runTest(const std::string& yaml, absl::string_view expected, bool is_json) { + envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; + TestUtility::loadFromYaml(yaml, fal_config); - auto json_format = fal_config.mutable_typed_json_format(); - (*json_format->mutable_fields())["protocol"] = string_value; + envoy::config::accesslog::v3::AccessLog config; + config.mutable_typed_config()->PackFrom(fal_config); - EXPECT_EQ(fal_config.access_log_format_case(), - envoy::extensions::access_loggers::file::v3::FileAccessLog::AccessLogFormatCase:: - kTypedJsonFormat); - config.mutable_typed_config()->PackFrom(fal_config); + auto file = std::make_shared(); + EXPECT_CALL(context_.access_log_manager_, createAccessLog(fal_config.path())) + .WillOnce(Return(file)); + + AccessLog::InstanceSharedPtr logger = AccessLog::AccessLogFactory::fromProto(config, context_); + + absl::Time abslStartTime = + TestUtility::parseTime("Dec 18 01:50:34 2018 GMT", "%b %e %H:%M:%S %Y GMT"); + stream_info_.start_time_ = absl::ToChronoTime(abslStartTime); + EXPECT_CALL(stream_info_, upstreamHost()).WillRepeatedly(Return(nullptr)); + stream_info_.response_code_ = 200; + + EXPECT_CALL(*file, write(_)).WillOnce(Invoke([expected, is_json](absl::string_view got) { + if (is_json) { + EXPECT_TRUE(TestUtility::jsonStringEqual(std::string(got), std::string(expected))); + } else { + EXPECT_EQ(got, expected); + } + })); + logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); + } - config.set_name(AccessLogNames::get().File); + Http::TestRequestHeaderMapImpl request_headers_{{":method", "GET"}, {":path", "/bar/foo"}}; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; + NiceMock stream_info_; - NiceMock context; - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); + NiceMock context_; +}; - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatEmpty)) { + runTest( + R"( + path: "/foo" + format: "" +)", + "[2018-12-18T01:50:34.000Z] \"GET /bar/foo -\" 200 - 0 0 - - \"-\" \"-\" \"-\" \"-\" \"-\"\n", + false); } -TEST(FileAccessLogConfigTest, DEPRECATED_FEATURE_TEST(FileAccessLogDeprecatedFormat)) { - const std::vector configs{ +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyFormatPlainText)) { + runTest( R"( path: "/foo" format: "plain_text" )", + "plain_text", false); +} + +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyJsonFormat)) { + runTest( R"( path: "/foo" json_format: - text: "plain_text" + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" )", + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": "200" +})", + true); +} + +TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyTypedJsonFormat)) { + runTest( R"( path: "/foo" typed_json_format: - text: "plain_text" + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" )", - }; - - for (const auto& yaml : configs) { - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - TestUtility::loadFromYaml(yaml, fal_config); - - envoy::config::accesslog::v3::AccessLog config; - config.mutable_typed_config()->PackFrom(fal_config); - - NiceMock context; - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})", + true); +} - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); - } +TEST_F(FileAccessLogTest, EmptyFormat) { + runTest( + R"( + path: "/foo" +)", + "[2018-12-18T01:50:34.000Z] \"GET /bar/foo -\" 200 - 0 0 - - \"-\" \"-\" \"-\" \"-\" \"-\"\n", + false); } -TEST(FileAccessLogConfigTest, FileAccessLogCheckLogFormat) { - const std::vector configs{ - // log_format: text_format +TEST_F(FileAccessLogTest, LogFormatText) { + runTest( R"( path: "/foo" log_format: - text_format: "plain_text" + text_format: "plain_text - %REQ(:path)% - %RESPONSE_CODE%" )", + "plain_text - /bar/foo - 200", false); +} - // log_format: json_format +TEST_F(FileAccessLogTest, LogFormatJson) { + runTest( R"( path: "/foo" log_format: json_format: - text: "plain_text" + text: "plain text" + path: "%REQ(:path)%" + code: "%RESPONSE_CODE%" )", - }; - - for (const auto& yaml : configs) { - envoy::extensions::access_loggers::file::v3::FileAccessLog fal_config; - TestUtility::loadFromYaml(yaml, fal_config); - - envoy::config::accesslog::v3::AccessLog config; - config.mutable_typed_config()->PackFrom(fal_config); - - NiceMock context; - AccessLog::InstanceSharedPtr log = AccessLog::AccessLogFactory::fromProto(config, context); - - EXPECT_NE(nullptr, log); - EXPECT_NE(nullptr, dynamic_cast(log.get())); - } + R"({ + "text": "plain text", + "path": "/bar/foo", + "code": 200 +})", + true); } } // namespace From deb42a3cdc13f2d1099ea48f3e9903ffcdf62c32 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 1 Jun 2020 11:21:08 -0400 Subject: [PATCH 262/909] docs: provide co-author guidance in contributer guide. (#11381) Addresses https://github.com/envoyproxy/envoy/pull/11007#issuecomment-635009257. Signed-off-by: Harvey Tuch --- CONTRIBUTING.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c383a6fe2f43..93edb5e2b08c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -100,10 +100,15 @@ versioning guidelines: colon. Examples: * "docs: fix grammar error" * "http conn man: add new feature" -* Your PR commit message will be used as the commit message when your PR is merged. You should +* Your PR commit message will be used as the commit message when your PR is merged. You should update this field if your PR diverges during review. * Your PR description should have details on what the PR does. If it fixes an existing issue it should end with "Fixes #XXX". +* If your PR is co-authored or based on an earlier PR from another contributor, + please attribute them with `Co-authored-by: name `. See + GitHub's [multiple author + guidance](https://help.github.com/en/github/committing-changes-to-your-project/creating-a-commit-with-multiple-authors) + for further details. * When all of the tests are passing and all other conditions described herein are satisfied, a maintainer will be assigned to review and merge the PR. * Once you submit a PR, *please do not rebase it*. It's much easier to review if subsequent commits From 22391fb702272ce080b4ad491ccd873f7ab14f1b Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Mon, 1 Jun 2020 11:35:42 -0500 Subject: [PATCH 263/909] Enable several previously-disabled tests on windows arch (#11335) With bazel 3.1.0 these tests are executed in the correct directory and now pass Co-authored-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr --- test/integration/BUILD | 3 - test/server/BUILD | 2 - test/server/config_validation/BUILD | 1 - test/server/config_validation/server_test.cc | 16 +++-- test/server/overload_manager_impl_test.cc | 65 +++++++++++++++++--- test/tools/router_check/test/BUILD | 2 - 6 files changed, 68 insertions(+), 21 deletions(-) diff --git a/test/integration/BUILD b/test/integration/BUILD index 10a3d147de0f..bcc738c0cfb3 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -250,8 +250,6 @@ envoy_sh_test( "test_utility.sh", "//test/config/integration:server_config_files", ], - # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], ) envoy_cc_test( @@ -984,7 +982,6 @@ envoy_cc_test( envoy_cc_test( name = "version_integration_test", srcs = ["version_integration_test.cc"], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/filters/http/ip_tagging:config", diff --git a/test/server/BUILD b/test/server/BUILD index 7a6b145abd8f..1fe019224c2c 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -163,7 +163,6 @@ envoy_cc_test( envoy_cc_test( name = "overload_manager_impl_test", srcs = ["overload_manager_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//include/envoy/registry", "//source/common/stats:isolated_store_lib", @@ -428,5 +427,4 @@ envoy_benchmark_test( name = "filter_chain_benchmark_test_benchmark_test", timeout = "long", benchmark_binary = "filter_chain_benchmark_test", - tags = ["fails_on_windows"], ) diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index f4d848a58303..8c466c9b30cd 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -61,7 +61,6 @@ envoy_cc_test( "//configs:example_configs", "//test/config_test:example_configs_test_setup.sh", ], - tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//source/extensions/filters/network/http_connection_manager:config", diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index cd310eaefadd..4bd903a65fb1 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -130,12 +130,16 @@ TEST_P(ValidationServerTest, NoopLifecycleNotifier) { // TODO(rlazarus): We'd like use this setup to replace //test/config_test (that is, run it against // all the example configs) but can't until light validation is implemented, mocking out access to // the filesystem for TLS certs, etc. In the meantime, these are the example configs that work -// as-is. -INSTANTIATE_TEST_SUITE_P(ValidConfigs, ValidationServerTest, - ::testing::Values("front-proxy_front-envoy.yaml", - "google_com_proxy.v2.yaml", - "grpc-bridge_server_envoy-proxy.yaml", - "front-proxy_service-envoy.yaml")); +// as-is. (Note, /dev/stdout as an access log file is invalid on Windows, no equivalent /dev/ +// exists.) + +auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.v2.yaml", +#ifndef WIN32 + "grpc-bridge_server_envoy-proxy.yaml", +#endif + "front-proxy_service-envoy.yaml"); + +INSTANTIATE_TEST_SUITE_P(ValidConfigs, ValidationServerTest, testing_values); // Just make sure that all configs can be ingested without a crash. Processing of config files // may not be successful, but there should be no crash. diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index ee801a6d97af..73715e249c07 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -130,6 +130,26 @@ class OverloadManagerImplTest : public testing::Test { )EOF"; } + std::string getConfigSimple() { + return R"EOF( + refresh_interval { + seconds: 1 + } + resource_monitors { + name: "envoy.resource_monitors.fake_resource1" + } + actions { + name: "envoy.overload_actions.dummy_action" + triggers { + name: "envoy.resource_monitors.fake_resource1" + threshold { + value: 0.9 + } + } + } + )EOF"; + } + std::unique_ptr createOverloadManager(const std::string& config) { return std::make_unique(dispatcher_, stats_, thread_local_, parseConfig(config), validation_visitor_, *api_); @@ -174,6 +194,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { const OverloadActionState& action_state = manager->getThreadLocalOverloadState().getState("envoy.overload_actions.dummy_action"); + // Update does not exceed fake_resource1 trigger threshold, no callback expected factory1_.monitor_->setPressure(0.5); timer_cb_(); EXPECT_FALSE(is_active); @@ -182,6 +203,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(0, active_gauge.value()); EXPECT_EQ(50, pressure_gauge1.value()); + // Update exceeds fake_resource1 trigger threshold, callback is expected factory1_.monitor_->setPressure(0.95); timer_cb_(); EXPECT_TRUE(is_active); @@ -190,7 +212,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(1, active_gauge.value()); EXPECT_EQ(95, pressure_gauge1.value()); - // Callback should not be invoked if action active state has not changed + // Callback should not be invoked if action state does not change factory1_.monitor_->setPressure(0.94); timer_cb_(); EXPECT_TRUE(is_active); @@ -198,23 +220,50 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_EQ(1, cb_count); EXPECT_EQ(94, pressure_gauge1.value()); - // Different triggers firing but overall action remains active so no callback expected - factory1_.monitor_->setPressure(0.5); + // The action is already active for fake_resource1 so no callback expected factory2_.monitor_->setPressure(0.9); timer_cb_(); EXPECT_TRUE(is_active); EXPECT_EQ(action_state, OverloadActionState::Active); EXPECT_EQ(1, cb_count); + EXPECT_EQ(90, pressure_gauge2.value()); + + // The action remains active for fake_resource2 so no callback expected + factory1_.monitor_->setPressure(0.5); + timer_cb_(); + EXPECT_TRUE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Active); + EXPECT_EQ(1, cb_count); EXPECT_EQ(50, pressure_gauge1.value()); EXPECT_EQ(90, pressure_gauge2.value()); - factory2_.monitor_->setPressure(0.4); + // Both become inactive so callback is expected + factory2_.monitor_->setPressure(0.3); timer_cb_(); EXPECT_FALSE(is_active); EXPECT_EQ(action_state, OverloadActionState::Inactive); EXPECT_EQ(2, cb_count); - EXPECT_EQ(0, active_gauge.value()); - EXPECT_EQ(40, pressure_gauge2.value()); + EXPECT_EQ(30, pressure_gauge2.value()); + + // Different triggers, both become active, only one callback expected + factory1_.monitor_->setPressure(0.97); + factory2_.monitor_->setPressure(0.96); + timer_cb_(); + EXPECT_TRUE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Active); + EXPECT_EQ(3, cb_count); + EXPECT_EQ(97, pressure_gauge1.value()); + EXPECT_EQ(96, pressure_gauge2.value()); + + // Different triggers, both become inactive, only one callback expected + factory1_.monitor_->setPressure(0.41); + factory2_.monitor_->setPressure(0.42); + timer_cb_(); + EXPECT_FALSE(is_active); + EXPECT_EQ(action_state, OverloadActionState::Inactive); + EXPECT_EQ(4, cb_count); + EXPECT_EQ(41, pressure_gauge1.value()); + EXPECT_EQ(42, pressure_gauge2.value()); manager->stop(); } @@ -239,10 +288,12 @@ TEST_F(OverloadManagerImplTest, SkippedUpdates) { setDispatcherExpectation(); // Save the post callback instead of executing it. + // Note that this test works for only one resource. If using the default config, + // two events fire, so a list of all post_cb's between timer_cb_'s would need to be invoked. Event::PostCb post_cb; ON_CALL(dispatcher_, post(_)).WillByDefault(Invoke([&](Event::PostCb cb) { post_cb = cb; })); - auto manager(createOverloadManager(getConfig())); + auto manager(createOverloadManager(getConfigSimple())); manager->start(); Stats::Counter& skipped_updates = stats_.counter("overload.envoy.resource_monitors.fake_resource1.skipped_updates"); diff --git a/test/tools/router_check/test/BUILD b/test/tools/router_check/test/BUILD index 42960b2da0e1..fbe2bfe9b0c7 100644 --- a/test/tools/router_check/test/BUILD +++ b/test/tools/router_check/test/BUILD @@ -15,8 +15,6 @@ envoy_sh_test( data = [ ":configs", ], - # TODO: This script invocation does not work on Windows, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], ) filegroup( From 97064d19ca23e81d89ebcc298318be420c4379db Mon Sep 17 00:00:00 2001 From: Teju Nareddy Date: Mon, 1 Jun 2020 13:36:44 -0400 Subject: [PATCH 264/909] fuzz: Make `reset` public in UberFilterFuzzer (#11372) Signed-off-by: Teju Nareddy --- test/extensions/filters/http/common/fuzz/uber_filter.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 10582b101248..b871524706f1 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -26,6 +26,9 @@ class UberFilterFuzzer { // For fuzzing proto data, guide the mutator to useful 'Any' types. static void guideAnyProtoType(test::fuzz::HttpData* mutable_data, uint choice); + // Resets cached data (request headers, etc.). Should be called for each fuzz iteration. + void reset(); + protected: // Set-up filter specific mock expectations in constructor. void perFilterSetup(); @@ -49,8 +52,6 @@ class UberFilterFuzzer { template void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; - void reset(); - private: NiceMock factory_context_; NiceMock filter_callback_; From 8d288144ff6d7e83d6455499652d7c2f5ba68803 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 1 Jun 2020 16:18:50 -0400 Subject: [PATCH 265/909] test: adding per-extension coverage limits (#11337) Checking each extension to ensure it is above the 97% coverage bar Additional Description: This may get a little annoying for extensions close to the bar, or refactors crossing extensions. We can always drop it, but as noted in the exception file many extensions are below the bar, and some pretty far down there. Risk Level: n/a Testing: CI Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/per_file_coverage.sh | 107 +++++++++++++++++++++++++++++++ test/run_envoy_bazel_coverage.sh | 13 ++++ 2 files changed, 120 insertions(+) create mode 100755 test/per_file_coverage.sh diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh new file mode 100755 index 000000000000..69b96ebc0bb6 --- /dev/null +++ b/test/per_file_coverage.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +# directory:coverage_percent +# for existing extensions with low coverage. +declare -a KNOWN_LOW_COVERAGE=( +"source/extensions/common:95.1" +"source/extensions/common/crypto:91.5" +"source/extensions/common/wasm:87.8" +"source/extensions/common/wasm/v8:88.3" +"source/extensions/common/wasm/null:77.8" +"source/extensions/compression:90.1" +"source/extensions/compression/gzip:93.1" +"source/extensions/compression/gzip/decompressor:81.1" +"source/extensions/compression/common:60.0" +"source/extensions/compression/common/decompressor:20.0" +"source/extensions/filters/network/sni_cluster:90.3" +"source/extensions/filters/network/thrift_proxy/router:96.4" +"source/extensions/filters/network/sni_dynamic_forward_proxy:92.4" +"source/extensions/filters/network/dubbo_proxy:96.7" +"source/extensions/filters/network/dubbo_proxy/router:96.1" +"source/extensions/filters/network/direct_response:89.3" +"source/extensions/filters/http/dynamic_forward_proxy:93.2" +"source/extensions/filters/http/cache:80.8" +"source/extensions/filters/http/cache/simple_http_cache:84.5" +"source/extensions/filters/http/csrf:96.6" +"source/extensions/filters/http/ip_tagging:92.0" +"source/extensions/filters/http/decompressor:80" +"source/extensions/filters/http/compressor:84.4" +"source/extensions/filters/http/header_to_metadata:95.0" +"source/extensions/filters/http/grpc_json_transcoder:93.3" +"source/extensions/filters/http/aws_request_signing:93.3" +"source/extensions/filters/listener:95.7" +"source/extensions/filters/listener/tls_inspector:92.9" +"source/extensions/filters/listener/http_inspector:93.3" +"source/extensions/filters/udp:91.2" +"source/extensions/filters/udp/dns_filter:84.1" +"source/extensions/filters/common:96.4" +"source/extensions/filters/common/expr:92.2" +"source/extensions/filters/common/rbac:93.0" +"source/extensions/grpc_credentials:93.9" +"source/extensions/grpc_credentials/aws_iam:88.6" +"source/extensions/quic_listeners:85.1" +"source/extensions/quic_listeners/quiche:85.1" +"source/extensions/quic_listeners/quiche/platform:0" +"source/extensions/resource_monitors/fixed_heap:90.9" +"source/extensions/retry:95.5" +"source/extensions/retry/host:85.7" +"source/extensions/retry/host/omit_host_metadata:96.9" +"source/extensions/retry/host/previous_hosts:82.4" +"source/extensions/retry/host/omit_canary_hosts:64.3" +"source/extensions/stat_sinks/statsd:92.6" +"source/extensions/tracers:96.8" +"source/extensions/tracers/opencensus:93.9" +"source/extensions/tracers/xray:95.5" +"source/extensions/transport_sockets:95.0" +"source/extensions/transport_sockets/raw_buffer:90.9" +"source/extensions/transport_sockets/tap:95.6" +"source/extensions/transport_sockets/tls:94.5" +"source/extensions/transport_sockets/tls/private_key:76.9" +) + +[[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" +COVERAGE_DIR="${SRCDIR}"/generated/coverage +COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" + +FAILED=0 +DEFAULT_COVERAGE_THRESHOLD=97.0 +DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD + +# Unfortunately we have a bunch of preexisting extensions with low coverage. +# Set their low bar as their current coverage level. +get_coverage_target() { + DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD + for FILE_PERCENT in ${KNOWN_LOW_COVERAGE[@]} + do + if [[ $FILE_PERCENT =~ "$1:" ]]; then + DIRECTORY_THRESHOLD=$(echo $FILE_PERCENT | sed 's/.*://') + return + fi + done +} + +# Make sure that for each extension directory with code, coverage doesn't dip +# below the default coverage threshold. +for DIRECTORY in $(find source/extensions/* -type d) +do + get_coverage_target $DIRECTORY + COVERAGE_VALUE=$(lcov -e $COVERAGE_DATA "$DIRECTORY/*" -o /dev/null | grep line | cut -d ' ' -f 4) + COVERAGE_VALUE=${COVERAGE_VALUE%?} + # If the coverage number is 'n' (no data found) there is 0% coverage. This is + # probably a directory without source code, so we skip checks. + # + # We could insist that we validate that 0% coverage directories are in a + # documented list, but instead of adding busy-work for folks adding + # non-source-containing directories, we trust reviewers to notice if there's + # absolutely no tests for a full directory. + if [[ $COVERAGE_VALUE =~ "n" ]]; then + continue; + fi; + COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${DIRECTORY_THRESHOLD}" | bc) + if test ${COVERAGE_FAILED} -eq 1; then + echo Code coverage for extension ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} \(${COVERAGE_VALUE}\) + FAILED=1 + fi +done + +exit $FAILED diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 0754ab01a163..29a251d3df9e 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -59,4 +59,17 @@ if [[ "$VALIDATE_COVERAGE" == "true" ]]; then echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} fi fi + +if [[ "$VALIDATE_COVERAGE" == "true" ]]; then + echo "Checking per-extension coverage" + output=$(./test/per_file_coverage.sh) + + if [ $? -eq 1 ]; then + echo Per-extension coverage failed: + echo $output + exit 1 + fi + echo Per-extension coverage passed. +fi + echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" From 97ba1684a8c85395fdad43a35615915ff086193c Mon Sep 17 00:00:00 2001 From: fpliu233 <62083774+fpliu233@users.noreply.github.com> Date: Mon, 1 Jun 2020 18:41:39 -0700 Subject: [PATCH 266/909] ext_authz: Allow runtime configuration to deny at disable (#11279) This adds a runtime configuration for `envoy.filters.http.ext_authz` to deny requests when the filter is disabled. While the risk level is low, it updates the frozen v2 API. It also adds a runtime guard: The runtime flag can be specified via `http.ext_authz.deny_at_disable`. This runtime key will work with `http.ext_authz.filter_enable` flag to deny all filter protected paths without sending RPC requests to the `ext_authz` service. Signed-off-by: Fangpeng Liu <62083774+fpliu233@users.noreply.github.com> --- .../filter/http/ext_authz/v2/ext_authz.proto | 11 +++- .../filters/http/ext_authz/v3/ext_authz.proto | 11 +++- .../http/ext_authz/v4alpha/ext_authz.proto | 11 +++- docs/root/version_history/current.rst | 1 + .../filter/http/ext_authz/v2/ext_authz.proto | 11 +++- .../filters/http/ext_authz/v3/ext_authz.proto | 11 +++- .../http/ext_authz/v4alpha/ext_authz.proto | 11 +++- .../filters/http/ext_authz/ext_authz.cc | 11 ++++ .../filters/http/ext_authz/ext_authz.h | 9 +++ .../ext_authz/ext_authz_integration_test.cc | 43 ++++++++++--- .../filters/http/ext_authz/ext_authz_test.cc | 63 +++++++++++++++++++ 11 files changed, 180 insertions(+), 13 deletions(-) diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a407f4628d2e..db188a572ae0 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { // External authorization service configuration. oneof services { @@ -98,6 +98,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 44673ad6ff26..20223787549e 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -97,6 +97,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index b39a2d56d00d..03f0b3a27724 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -97,6 +97,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 136472d24fb3..4d1d89439fb9 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -49,6 +49,7 @@ New Features * config: added :ref:`version_text ` stat that reflects xDS version. * decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index a407f4628d2e..db188a572ae0 100644 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { // External authorization service configuration. oneof services { @@ -98,6 +98,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 4ede2bd5abf8..ff55d66979ee 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -93,6 +93,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index b39a2d56d00d..03f0b3a27724 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 11] +// [#next-free-field: 12] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -97,6 +97,15 @@ message ExtAuthz { // If this field is not specified, the filter will be enabled for all requests. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; + // Specifies whether to deny the requests, when the filter is disabled. + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to determine whether to deny request for + // filter protected path at filter disabling. If filter is disabled in + // typed_per_filter_config for the path, requests will not be denied. + // + // If this field is not specified, all requests will be allowed when disabled. + config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; + // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index e3f754e07841..a5960424dc28 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -78,6 +78,17 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, skip_check_ = skipCheckForRoute(route); if (!config_->filterEnabled() || skip_check_) { + if (skip_check_) { + return Http::FilterHeadersStatus::Continue; + } + if (config_->denyAtDisable()) { + ENVOY_STREAM_LOG(trace, "ext_authz filter is disabled. Deny the request.", *callbacks_); + callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UnauthorizedExternalService); + callbacks_->sendLocalReply(config_->statusOnError(), EMPTY_STRING, nullptr, absl::nullopt, + RcDetails::get().AuthzError); + return Http::FilterHeadersStatus::StopIteration; + } return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 57388120480f..14b52ffd776a 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -70,6 +70,10 @@ class FilterConfig { ? absl::optional( Runtime::FractionalPercent(config.filter_enabled(), runtime_)) : absl::nullopt), + deny_at_disable_(config.has_deny_at_disable() + ? absl::optional( + Runtime::FeatureFlag(config.deny_at_disable(), runtime_)) + : absl::nullopt), pool_(scope_.symbolTable()), metadata_context_namespaces_(config.metadata_context_namespaces().begin(), config.metadata_context_namespaces().end()), @@ -93,6 +97,10 @@ class FilterConfig { bool filterEnabled() { return filter_enabled_.has_value() ? filter_enabled_->enabled() : true; } + bool denyAtDisable() { + return deny_at_disable_.has_value() ? deny_at_disable_->enabled() : false; + } + Stats::Scope& scope() { return scope_; } Http::Context& httpContext() { return http_context_; } @@ -133,6 +141,7 @@ class FilterConfig { Http::Context& http_context_; const absl::optional filter_enabled_; + const absl::optional deny_at_disable_; // TODO(nezdolik): stop using pool as part of deprecating cluster scope stats. Stats::StatNamePool pool_; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index a6e00760db52..d368f04db6f9 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -29,7 +29,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem())); } - void initializeWithDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { + void initializeConfig() { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters(); ext_authz_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -40,14 +40,25 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, setGrpcService(*proto_config_.mutable_grpc_service(), "ext_authz", fake_upstreams_.back()->localAddress()); + proto_config_.mutable_filter_enabled()->set_runtime_key("envoy.ext_authz.enable"); + proto_config_.mutable_filter_enabled()->mutable_default_value()->set_numerator(100); + proto_config_.mutable_deny_at_disable()->set_runtime_key("envoy.ext_authz.deny_at_disable"); + proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false); + envoy::config::listener::v3::Filter ext_authz_filter; ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization); ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_); config_helper_.addFilter(MessageUtil::getJsonStringFromMessage(ext_authz_filter)); }); + } - setDownstreamProtocol(downstream_protocol); - HttpIntegrationTest::initialize(); + void setDenyAtDisableRuntimeConfig(bool deny_at_disable) { + config_helper_.addRuntimeOverride("envoy.ext_authz.enable", "numerator: 0"); + if (deny_at_disable) { + config_helper_.addRuntimeOverride("envoy.ext_authz.deny_at_disable", "true"); + } else { + config_helper_.addRuntimeOverride("envoy.ext_authz.deny_at_disable", "false"); + } } void initiateClientConnection(uint64_t request_body_length) { @@ -99,7 +110,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); } - void waitForSuccessfulUpstreamResponse() { + void waitForSuccessfulUpstreamResponse(const std::string& expected_response_code) { AssertionResult result = fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); RELEASE_ASSERT(result, result.message()); @@ -116,7 +127,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, EXPECT_EQ(request_body_.length(), upstream_request_->bodyLength()); EXPECT_TRUE(response_->complete()); - EXPECT_EQ("200", response_->headers().getStatusValue()); + EXPECT_EQ(expected_response_code, response_->headers().getStatusValue()); EXPECT_EQ(response_size_, response_->body().size()); } @@ -169,11 +180,25 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, void expectCheckRequestWithBody(Http::CodecClient::Type downstream_protocol, uint64_t request_size) { - initializeWithDownstreamProtocol(downstream_protocol); + initializeConfig(); + setDownstreamProtocol(downstream_protocol); + HttpIntegrationTest::initialize(); initiateClientConnection(request_size); waitForExtAuthzRequest(expectedCheckRequest(downstream_protocol)); sendExtAuthzResponse(); - waitForSuccessfulUpstreamResponse(); + waitForSuccessfulUpstreamResponse("200"); + cleanup(); + } + + void expectFilterDisableCheck(bool deny_at_disable, const std::string& expected_status) { + initializeConfig(); + setDenyAtDisableRuntimeConfig(deny_at_disable); + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + HttpIntegrationTest::initialize(); + initiateClientConnection(4); + if (!deny_at_disable) { + waitForSuccessfulUpstreamResponse(expected_status); + } cleanup(); } @@ -319,6 +344,10 @@ TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) { expectCheckRequestWithBody(Http::CodecClient::Type::HTTP2, 2048); } +TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { expectFilterDisableCheck(false, "200"); } + +TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { expectFilterDisableCheck(true, "403"); } + INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 2d9eb7a87a12..a7ea123bbc2f 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -909,6 +909,69 @@ TEST_F(HttpFilterTest, FilterEnabled) { filter_->decodeHeaders(request_headers_, false)); } +// Test that filter can deny for protected path when filter is disabled via filter_enabled field. +TEST_F(HttpFilterTest, FilterDenyAtDisable) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 0 + denominator: HUNDRED + deny_at_disable: + runtime_key: "http.ext_authz.deny_at_disable" + default_value: + value: true + )EOF"); + + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(0)))) + .WillByDefault(Return(false)); + + ON_CALL(runtime_.snapshot_, featureEnabled("http.ext_authz.enabled", false)) + .WillByDefault(Return(true)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); +} + +// Test that filter allows for protected path when filter is disabled via filter_enabled field. +TEST_F(HttpFilterTest, FilterAllowAtDisable) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + filter_enabled: + runtime_key: "http.ext_authz.enabled" + default_value: + numerator: 0 + denominator: HUNDRED + deny_at_disable: + runtime_key: "http.ext_authz.deny_at_disable" + default_value: + value: false + )EOF"); + + ON_CALL(runtime_.snapshot_, + featureEnabled("http.ext_authz.enabled", + testing::Matcher(Percent(0)))) + .WillByDefault(Return(false)); + + ON_CALL(runtime_.snapshot_, featureEnabled("http.ext_authz.enabled", false)) + .WillByDefault(Return(false)); + + // Make sure check is not called. + EXPECT_CALL(*client_, check(_, _, _, _)).Times(0); + // Engage the filter. + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + // ------------------- // Parameterized Tests // ------------------- From 190cce0d9d3a89d29aa819c6f5045c42fa9b3611 Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Mon, 1 Jun 2020 23:12:08 -0400 Subject: [PATCH 267/909] integration: Configure drain time in integration tests (#11345) Signed-off-by: Auni Ahsan --- test/integration/integration.cc | 8 +++--- test/integration/integration.h | 3 +++ test/integration/protocol_integration_test.cc | 17 ++++++++++--- test/integration/server.cc | 25 +++++++++++-------- test/integration/server.h | 11 +++++--- 5 files changed, 42 insertions(+), 22 deletions(-) diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 7efe8b299404..5ba1f7e746ab 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -454,10 +454,10 @@ std::string getListenerDetails(Envoy::Server::Instance& server) { void BaseIntegrationTest::createGeneratedApiTestServer( const std::string& bootstrap_path, const std::vector& port_names, Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { - test_server_ = IntegrationTestServer::create(bootstrap_path, version_, on_server_ready_function_, - on_server_init_function_, deterministic_, - timeSystem(), *api_, defer_listener_finalization_, - process_object_, validator_config, concurrency_); + test_server_ = IntegrationTestServer::create( + bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, + timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, + concurrency_, drain_time_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { diff --git a/test/integration/integration.h b/test/integration/integration.h index 6047c577dd52..3ec31c3eff66 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -433,6 +433,9 @@ class BaseIntegrationTest : protected Logger::Loggable { // The number of worker threads that the test server uses. uint32_t concurrency_{1}; + // The duration of the drain manager graceful drain period. + std::chrono::seconds drain_time_{1}; + // Member variables for xDS testing. FakeUpstream* xds_upstream_{}; FakeHttpConnectionPtr xds_connection_; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 0099264492d6..1f1d2cf956c8 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -276,6 +276,10 @@ name: add-trailers-filter // Add a health check filter and verify correct behavior when draining. TEST_P(ProtocolIntegrationTest, DrainClose) { + // The probability of drain close increases over time. With a high timeout, + // the probability will be very low, but the rapid retries prevent this from + // increasing total test time. + drain_time_ = std::chrono::seconds(100); config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); @@ -287,11 +291,18 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { drain_sequence_started.WaitForNotification(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); - response->waitForEndStream(); - codec_client_->waitForDisconnect(); + EXPECT_FALSE(codec_client_->disconnected()); + IntegrationStreamDecoderPtr response; + while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + } + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); + + codec_client_->waitForDisconnect(); EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { EXPECT_TRUE(codec_client_->sawGoAway()); diff --git a/test/integration/server.cc b/test/integration/server.cc index 755ca829d4e7..ec9469b919c1 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -31,14 +31,15 @@ namespace Server { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, - FieldValidationConfig validation_config, uint32_t concurrency) { + FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); test_options.setConfigYaml(config_yaml); test_options.setLocalAddressIpVersion(ip_version); test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50)); - test_options.setDrainTime(std::chrono::seconds(1)); + test_options.setDrainTime(drain_time); test_options.setParentShutdownTime(std::chrono::seconds(2)); test_options.setAllowUnkownFields(validation_config.allow_unknown_static_fields); test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields); @@ -57,14 +58,14 @@ IntegrationTestServerPtr IntegrationTestServer::create( std::function on_server_init_function, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, - uint32_t concurrency) { + uint32_t concurrency, std::chrono::seconds drain_time) { IntegrationTestServerPtr server{ std::make_unique(time_system, api, config_path)}; if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } server->start(version, on_server_init_function, deterministic, defer_listener_finalization, - process_object, validation_config, concurrency); + process_object, validation_config, concurrency, drain_time); return server; } @@ -83,12 +84,14 @@ void IntegrationTestServer::start(const Network::Address::IpVersion version, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validator_config, - uint32_t concurrency) { + uint32_t concurrency, std::chrono::seconds drain_time) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); - thread_ = api_.threadFactory().createThread( - [version, deterministic, process_object, validator_config, concurrency, this]() -> void { - threadRoutine(version, deterministic, process_object, validator_config, concurrency); + thread_ = + api_.threadFactory().createThread([version, deterministic, process_object, validator_config, + concurrency, drain_time, this]() -> void { + threadRoutine(version, deterministic, process_object, validator_config, concurrency, + drain_time); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -164,9 +167,9 @@ void IntegrationTestServer::serverReady() { void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, - uint32_t concurrency) { - OptionsImpl options( - Server::createTestOptionsImpl(config_path_, "", version, validation_config, concurrency)); + uint32_t concurrency, std::chrono::seconds drain_time) { + OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, validation_config, + concurrency, drain_time)); Thread::MutexBasicLockable lock; Runtime::RandomGeneratorPtr random_generator; diff --git a/test/integration/server.h b/test/integration/server.h index 9a4367263adb..531a5574e2cb 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -42,7 +42,8 @@ struct FieldValidationConfig { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, FieldValidationConfig validation_config = FieldValidationConfig(), - uint32_t concurrency = 1); + uint32_t concurrency = 1, + std::chrono::seconds drain_time = std::chrono::seconds(1)); class TestComponentFactory : public ComponentFactory { public: @@ -274,7 +275,7 @@ class IntegrationTestServer : public Logger::Loggable, bool defer_listener_finalization = false, ProcessObjectOptRef process_object = absl::nullopt, Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), - uint32_t concurrency = 1); + uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1)); // Note that the derived class is responsible for tearing down the server in its // destructor. ~IntegrationTestServer() override; @@ -296,7 +297,8 @@ class IntegrationTestServer : public Logger::Loggable, void start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - Server::FieldValidationConfig validation_config, uint32_t concurrency); + Server::FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time); void waitForCounterEq(const std::string& name, uint64_t value) override { TestUtility::waitForCounterEq(statStore(), name, value, time_system_); @@ -379,7 +381,8 @@ class IntegrationTestServer : public Logger::Loggable, */ void threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, - Server::FieldValidationConfig validation_config, uint32_t concurrency); + Server::FieldValidationConfig validation_config, uint32_t concurrency, + std::chrono::seconds drain_time); Event::TestTimeSystem& time_system_; Api::Api& api_; From 3b52fc36373272902d9817f0db97dd2fccc40784 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Mon, 1 Jun 2020 20:13:06 -0700 Subject: [PATCH 268/909] decompressor: fix logs and docs (#11393) Signed-off-by: Jose Nino --- .../extensions/compression/gzip/decompressor/v3/gzip.proto | 5 +++-- .../extensions/compression/gzip/decompressor/v3/gzip.proto | 5 +++-- .../filters/http/decompressor/decompressor_filter.cc | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto index 097531ab1e9f..0ab0d947bd01 100644 --- a/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto +++ b/api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -20,8 +20,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message Gzip { // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. // The decompression window size needs to be equal or larger than the compression window size. - // The default is 15 per zlib's manual. For more details about this parameter, please refer to - // zlib manual > inflateInit2. + // The default is 12 to match the default in the + // :ref:`gzip compressor `. + // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Value for zlib's decompressor output buffer. If not set, defaults to 4096. diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto index 097531ab1e9f..0ab0d947bd01 100644 --- a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto @@ -20,8 +20,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message Gzip { // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. // The decompression window size needs to be equal or larger than the compression window size. - // The default is 15 per zlib's manual. For more details about this parameter, please refer to - // zlib manual > inflateInit2. + // The default is 12 to match the default in the + // :ref:`gzip compressor `. + // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Value for zlib's decompressor output buffer. If not set, defaults to 4096. diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc index f8938489a6a3..284b14fe841d 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.cc +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -106,8 +106,8 @@ Http::FilterHeadersStatus DecompressorFilter::maybeInitDecompress( headers.removeContentLength(); modifyContentEncoding(headers); - ENVOY_STREAM_LOG(debug, "do decompress (without buffering) {}: {}", callbacks, - direction_config.logString(), headers); + ENVOY_STREAM_LOG(debug, "do decompress {}: {}", callbacks, direction_config.logString(), + headers); } else { direction_config.stats().not_decompressed_.inc(); ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), From dd96311ebafb3f44012c3d1440a57a333d836dd4 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Wed, 3 Jun 2020 00:11:36 +0900 Subject: [PATCH 269/909] http: initialize HeaderMap from iterator (#11360) Signed-off-by: Shikugawa --- source/common/http/header_map_impl.cc | 12 ------------ source/common/http/header_map_impl.h | 25 ++++++++++++++++++++---- test/common/http/header_map_impl_test.cc | 9 +++++++++ 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 1779eef2c304..73de67f6f0e0 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -231,18 +231,6 @@ uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view d return data.size() + byte_size; } -void HeaderMapImpl::initFromInitList( - HeaderMap& new_header_map, - const std::initializer_list>& values) { - for (auto& value : values) { - HeaderString key_string; - key_string.setCopy(value.first.get().c_str(), value.first.get().size()); - HeaderString value_string; - value_string.setCopy(value.second.c_str(), value.second.size()); - new_header_map.addViaMove(std::move(key_string), std::move(value_string)); - } -} - void HeaderMapImpl::updateSize(uint64_t from_size, uint64_t to_size) { ASSERT(cached_byte_size_ >= from_size); cached_byte_size_ -= from_size; diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 7d456916e230..57fa82593e26 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -5,6 +5,7 @@ #include #include #include +#include #include "envoy/http/header_map.h" @@ -60,9 +61,19 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { // The following "constructors" call virtual functions during construction and must use the // static factory pattern. static void copyFrom(HeaderMap& lhs, const HeaderMap& rhs); - static void - initFromInitList(HeaderMap& new_header_map, - const std::initializer_list>& values); + // The value_type of iterator must be pair, and the first value of them must be LowerCaseString. + // If not, it won't be compiled successfully. + template static void initFromInitList(HeaderMap& new_header_map, It begin, It end) { + for (auto it = begin; it != end; ++it) { + static_assert(std::is_samefirst), LowerCaseString>::value, + "iterator must be pair and the first value of them must be LowerCaseString"); + HeaderString key_string; + key_string.setCopy(it->first.get().c_str(), it->first.get().size()); + HeaderString value_string; + value_string.setCopy(it->second.c_str(), it->second.size()); + new_header_map.addViaMove(std::move(key_string), std::move(value_string)); + } + } // Performs a manual byte size count for test verification. void verifyByteSizeInternalForTest() const; @@ -328,7 +339,13 @@ template std::unique_ptr createHeaderMap(const std::initializer_list>& values) { auto new_header_map = std::make_unique(); - HeaderMapImpl::initFromInitList(*new_header_map, values); + HeaderMapImpl::initFromInitList(*new_header_map, values.begin(), values.end()); + return new_header_map; +} + +template std::unique_ptr createHeaderMap(It begin, It end) { + auto new_header_map = std::make_unique(); + HeaderMapImpl::initFromInitList(*new_header_map, begin, end); return new_header_map; } diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 9ef0a7a7af15..7095daf5ec42 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -923,6 +923,15 @@ TEST(HeaderMapImplTest, Get) { } } +TEST(HeaderMapImplTest, CreateHeaderMapFromIterator) { + std::vector> iter_headers{ + {LowerCaseString(Headers::get().Path), "/"}, {LowerCaseString("hello"), "world"}}; + auto headers = createHeaderMap(iter_headers.cbegin(), iter_headers.cend()); + EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); + EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); +} + TEST(HeaderMapImplTest, TestHeaderList) { std::array keys{Headers::get().Path, LowerCaseString("hello")}; std::array values{"/", "world"}; From b9aff554ca3a574b9eb5df96cd7a7a325fa4d0f5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 2 Jun 2020 11:55:49 -0400 Subject: [PATCH 270/909] http: refactoring http and tcp upstreams (#11349) Moving the choice of http or tcp connection pool from the router to the generic connection pool. This will allow pluggable connection pools to choose to do HTTP or TCP on their own, as well as getting rid of a bunch of ugly variant logic. Risk Level: medium (router refactor, ideally no-op) Testing: existing test pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/router/router.cc | 72 ++++++--------------- source/common/router/router.h | 6 +- source/common/router/upstream_request.cc | 4 +- source/common/router/upstream_request.h | 28 ++++++-- test/common/router/upstream_request_test.cc | 12 +++- 5 files changed, 55 insertions(+), 67 deletions(-) diff --git a/source/common/router/router.cc b/source/common/router/router.cc index eb0c5650a86c..b17c0b0e2edf 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -60,16 +60,6 @@ bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, constexpr uint64_t TimeoutPrecisionFactor = 100; -Http::ConnectionPool::Instance* -httpPool(absl::variant pool) { - return absl::get(pool); -} - -Tcp::ConnectionPool::Instance* -tcpPool(absl::variant pool) { - return absl::get(pool); -} - } // namespace // Express percentage as [0, TimeoutPrecisionFactor] because stats do not accept floating point @@ -499,13 +489,16 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } } - Upstream::HostDescriptionConstSharedPtr host; - Filter::HttpOrTcpPool conn_pool = createConnPool(host); + transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( + *callbacks_->streamInfo().filterState()); + std::unique_ptr generic_conn_pool = createConnPool(); + Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - if (!host) { + if (!generic_conn_pool->initialize(config_.cm_, *route_entry_, protocol, this)) { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } + Upstream::HostDescriptionConstSharedPtr host = generic_conn_pool->host(); if (debug_config && debug_config->append_upstream_host_) { // The hostname and address will be appended to any local or upstream responses from this point, @@ -592,7 +585,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Hang onto the modify_headers function for later use in handling upstream responses. modify_headers_ = modify_headers; - UpstreamRequestPtr upstream_request = createUpstreamRequest(conn_pool); + UpstreamRequestPtr upstream_request = + std::make_unique(*this, std::move(generic_conn_pool)); upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { @@ -602,47 +596,15 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } -Filter::HttpOrTcpPool Filter::createConnPool(Upstream::HostDescriptionConstSharedPtr& host) { - Filter::HttpOrTcpPool conn_pool; +std::unique_ptr Filter::createConnPool() { const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; - if (!should_tcp_proxy) { - conn_pool = getHttpConnPool(); - if (httpPool(conn_pool)) { - host = httpPool(conn_pool)->host(); - } - } else { - transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( - *callbacks_->streamInfo().filterState()); - conn_pool = config_.cm_.tcpConnPoolForCluster(route_entry_->clusterName(), - Upstream::ResourcePriority::Default, this); - if (tcpPool(conn_pool)) { - host = tcpPool(conn_pool)->host(); - } + if (should_tcp_proxy) { + return std::make_unique(); } - return conn_pool; -} - -UpstreamRequestPtr Filter::createUpstreamRequest(Filter::HttpOrTcpPool conn_pool) { - if (absl::holds_alternative(conn_pool)) { - return std::make_unique(*this, - std::make_unique(*httpPool(conn_pool))); - } - return std::make_unique(*this, - std::make_unique(tcpPool(conn_pool))); -} - -Http::ConnectionPool::Instance* Filter::getHttpConnPool() { - // Choose protocol based on cluster configuration and downstream connection - // Note: Cluster may downgrade HTTP2 to HTTP1 based on runtime configuration. - Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( - *callbacks_->streamInfo().filterState()); - - return config_.cm_.httpConnPoolForCluster(route_entry_->clusterName(), route_entry_->priority(), - protocol, this); + return std::make_unique(); } void Filter::sendNoHealthyUpstreamResponse() { @@ -1546,13 +1508,17 @@ void Filter::doRetry() { pending_retries_--; Upstream::HostDescriptionConstSharedPtr host; - Filter::HttpOrTcpPool conn_pool = createConnPool(host); - if (!host) { + + std::unique_ptr generic_conn_pool = createConnPool(); + + Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); + if (!generic_conn_pool->initialize(config_.cm_, *route_entry_, protocol, this)) { sendNoHealthyUpstreamResponse(); cleanup(); return; } - UpstreamRequestPtr upstream_request = createUpstreamRequest(conn_pool); + UpstreamRequestPtr upstream_request = + std::make_unique(*this, std::move(generic_conn_pool)); if (include_attempt_count_in_request_) { downstream_headers_->setEnvoyAttemptCount(attempt_count_); diff --git a/source/common/router/router.h b/source/common/router/router.h index 682aaed92ca6..22c0e4f8ac77 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -477,12 +477,8 @@ class Filter : Logger::Loggable, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; - using HttpOrTcpPool = - absl::variant; - HttpOrTcpPool createConnPool(Upstream::HostDescriptionConstSharedPtr& host); - UpstreamRequestPtr createUpstreamRequest(Filter::HttpOrTcpPool conn_pool); + std::unique_ptr createConnPool(); - Http::ConnectionPool::Instance* getHttpConnPool(); void maybeDoShadowing(); bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); uint32_t numRequestsAwaitingHeaders(); diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 7c18a77687a3..0f69e617fcde 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -523,7 +523,7 @@ void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { // might get deleted inline as well. Only write the returned handle out if it is not nullptr to // deal with this case. Http::ConnectionPool::Cancellable* handle = - conn_pool_.newStream(*callbacks->upstreamRequest(), *this); + conn_pool_->newStream(*callbacks->upstreamRequest(), *this); if (handle) { conn_pool_stream_handle_ = handle; } @@ -548,7 +548,7 @@ bool HttpConnPool::cancelAnyPendingRequest() { return false; } -absl::optional HttpConnPool::protocol() const { return conn_pool_.protocol(); } +absl::optional HttpConnPool::protocol() const { return conn_pool_->protocol(); } void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view transport_failure_reason, diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index a2fe46067101..260418447455 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -35,6 +35,11 @@ class GenericConnPool : public Logger::Loggable { public: virtual ~GenericConnPool() = default; + // Initializes the connection pool. This must be called before the connection + // pool is valid, and can be used. + virtual bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, + Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) PURE; + // Called to create a new HTTP stream or TCP connection. The implementation // is then responsible for calling either onPoolReady or onPoolFailure on the // supplied GenericConnectionPoolCallbacks. @@ -44,6 +49,8 @@ class GenericConnPool : public Logger::Loggable { virtual bool cancelAnyPendingRequest() PURE; // Optionally returns the protocol for the connection pool. virtual absl::optional protocol() const PURE; + // Returns the host for this conn pool. + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; // An API for the UpstreamRequest to get callbacks from either an HTTP or TCP @@ -196,12 +203,18 @@ class UpstreamRequest : public Logger::Loggable, class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { public: - HttpConnPool(Http::ConnectionPool::Instance& conn_pool) : conn_pool_(conn_pool) {} - // GenericConnPool + bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, + Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) override { + conn_pool_ = + cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), protocol, ctx); + return conn_pool_ != nullptr; + } + void newStream(GenericConnectionPoolCallbacks* callbacks) override; bool cancelAnyPendingRequest() override; absl::optional protocol() const override; + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } // Http::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, @@ -213,14 +226,19 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba private: // Points to the actual connection pool to create streams from. - Http::ConnectionPool::Instance& conn_pool_; + Http::ConnectionPool::Instance* conn_pool_{}; Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; GenericConnectionPoolCallbacks* callbacks_{}; }; class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { public: - TcpConnPool(Tcp::ConnectionPool::Instance* conn_pool) : conn_pool_(conn_pool) {} + bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol, + Upstream::LoadBalancerContext* ctx) override { + conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), + Upstream::ResourcePriority::Default, ctx); + return conn_pool_ != nullptr; + } void newStream(GenericConnectionPoolCallbacks* callbacks) override { callbacks_ = callbacks; @@ -236,7 +254,7 @@ class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callback return false; } absl::optional protocol() const override { return absl::nullopt; } - + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } // Tcp::ConnectionPool::Callbacks void onPoolFailure(ConnectionPool::PoolFailureReason reason, Upstream::HostDescriptionConstSharedPtr host) override { diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index 1f36264c15e5..4fa02a4cc56c 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -29,6 +29,10 @@ class MockGenericConnPool : public GenericConnPool { MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request)); MOCK_METHOD(bool, cancelAnyPendingRequest, ()); MOCK_METHOD(absl::optional, protocol, (), (const)); + MOCK_METHOD(bool, initialize, + (Upstream::ClusterManager&, const RouteEntry&, Http::Protocol, + Upstream::LoadBalancerContext*)); + MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); }; class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { @@ -107,8 +111,12 @@ class MockRouterFilterInterface : public RouterFilterInterface { class TcpConnPoolTest : public ::testing::Test { public: - TcpConnPoolTest() - : conn_pool_(&mock_pool_), host_(std::make_shared>()) {} + TcpConnPoolTest() : host_(std::make_shared>()) { + NiceMock route_entry; + NiceMock cm; + EXPECT_CALL(cm, tcpConnPoolForCluster(_, _, _)).WillOnce(Return(&mock_pool_)); + EXPECT_TRUE(conn_pool_.initialize(cm, route_entry, Http::Protocol::Http11, nullptr)); + } TcpConnPool conn_pool_; Tcp::ConnectionPool::MockInstance mock_pool_; From 6a69cc5e228a9a25ef51bd41461d27100f65f4f6 Mon Sep 17 00:00:00 2001 From: Phillip Huang Date: Tue, 2 Jun 2020 11:44:50 -0700 Subject: [PATCH 271/909] grpc-json: preserve request method in x-envoy-original-method header (#11126) Commit Message: grpc-json: preserve http request method in `x-envoy-original-method` header so that applications have access to it. Additional Description: The grpc-json transcoder currently forwards HTTP path to applications via "x-envoy-original-path" header. We would find it useful if it also forwarded the HTTP method. Risk Level: Low Testing: Updated grpc-json-transcoder unit tests Docs Changes: Added docs Release Notes: Added release notes Signed-off-by: Phillip Huang --- .../http/http_filters/grpc_json_transcoder_filter.rst | 9 +++++++++ docs/root/version_history/current.rst | 1 + source/common/http/headers.h | 1 + .../http/grpc_json_transcoder/json_transcoder_filter.cc | 1 + .../grpc_json_transcoder/json_transcoder_filter_test.cc | 9 +++++++++ 5 files changed, 21 insertions(+) diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index 3e01c544e819..1eb2fe082718 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -87,6 +87,15 @@ can be send by the gRPC server in the server streaming case. In this case, HTTP response header `Content-Type` will use the `content-type` from the first `google.api.HttpBody `. +Headers +-------- + +gRPC-JSON forwards the following headers to the gRPC server: + +* `x-envoy-original-path`, containing the value of the original path of HTTP request +* `x-envoy-original-method`, containing the value of the original method of HTTP request + + Sample Envoy configuration -------------------------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 4d1d89439fb9..286c9b8aa0c1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -59,6 +59,7 @@ New Features * grpc: added support for Google gRPC :ref:`custom channel arguments `. * grpc-json: added support for streaming response using `google.api.HttpBody `_. +* grpc-json: send a `x-envoy-original-method` header to grpc services. * gzip filter: added option to set zlib's next output buffer size. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 10b87e3da092..8c9abe61aa07 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -90,6 +90,7 @@ class HeaderValues { const LowerCaseString EnvoyMaxRetries{absl::StrCat(prefix(), "-max-retries")}; const LowerCaseString EnvoyNotForwarded{absl::StrCat(prefix(), "-not-forwarded")}; const LowerCaseString EnvoyOriginalDstHost{absl::StrCat(prefix(), "-original-dst-host")}; + const LowerCaseString EnvoyOriginalMethod{absl::StrCat(prefix(), "-original-method")}; const LowerCaseString EnvoyOriginalPath{absl::StrCat(prefix(), "-original-path")}; const LowerCaseString EnvoyOverloaded{absl::StrCat(prefix(), "-overloaded")}; const LowerCaseString EnvoyRateLimited{absl::StrCat(prefix(), "-ratelimited")}; diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 526bf1ac8abe..8de5069993d4 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -387,6 +387,7 @@ Http::FilterHeadersStatus JsonTranscoderFilter::decodeHeaders(Http::RequestHeade headers.removeContentLength(); headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); headers.setEnvoyOriginalPath(headers.getPathValue()); + headers.addReferenceKey(Http::Headers::get().EnvoyOriginalMethod, headers.getMethodValue()); headers.setPath("/" + method_->descriptor_->service()->full_name() + "/" + method_->descriptor_->name()); headers.setReferenceMethod(Http::Headers::get().MethodValues.Post); diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index e820cb04f273..87caa2275c8d 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -388,6 +388,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPost) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/shelf", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelf", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -454,6 +455,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithPackageServiceMetho EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelfWithPackageServiceAndMethod", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -597,6 +599,7 @@ TEST_F(GrpcJsonTranscoderFilterSkipRecalculatingTest, TranscodingUnaryPostSkipRe EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/shelf", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/CreateShelf", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -674,6 +677,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/index", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndex", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -708,6 +712,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSpli EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/index", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndex", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -753,6 +758,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryPostWithHttpBody) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/postBody?arg=hi", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/PostBody", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -800,6 +806,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamPostWithHttpBody) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/streamBody?arg=hi", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("POST", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/StreamBody", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -855,6 +862,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/indexStream", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndexStream", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); @@ -907,6 +915,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingStreamWithFragmentedHttpBody) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ("application/grpc", request_headers.get_("content-type")); EXPECT_EQ("/indexStream", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); EXPECT_EQ("/bookstore.Bookstore/GetIndexStream", request_headers.get_(":path")); EXPECT_EQ("trailers", request_headers.get_("te")); From 35702fed462f63a0a237cfbfdf26184272207c11 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 2 Jun 2020 15:21:34 -0400 Subject: [PATCH 272/909] docs: fixing broken link (#11406) Signed-off-by: Alyssa Wilk --- bazel/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/README.md b/bazel/README.md index 237fb7bfc3eb..9bad8cf1431a 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -532,7 +532,7 @@ The following optional features can be enabled on the Bazel build command-line: * Perf annotation with `--define perf_annotation=enabled` (see source/common/common/perf_annotation.h for details). * BoringSSL can be built in a FIPS-compliant mode with `--define boringssl=fips` - (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/ssl.html#fips-140-2) for details). + (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for details). * ASSERT() can be configured to log failures and increment a stat counter in a release build with `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of release builds so that the condition is not evaluated. This option has no effect in debug builds. From 67649af8d4881fb7593c728f181eff393f85a638 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 2 Jun 2020 18:21:20 -0700 Subject: [PATCH 273/909] build: use rules_cc and use buildifier -lint to enforce it (#11399) Commit Message: Partially address #9488. Resurrected #10597. Additional Description: Risk Level: Low Testing: CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Lizan Zhou --- api/bazel/BUILD | 4 ++-- api/bazel/api_build_system.bzl | 5 ++-- api/bazel/repositories.bzl | 1 - api/envoy/service/auth/v2alpha/BUILD | 4 ++-- api/test/build/BUILD | 4 ++-- api/test/validate/BUILD | 4 ++-- api/tools/BUILD | 2 ++ api/versioning/BUILD | 4 ++-- bazel/BUILD | 6 ++--- bazel/envoy_binary.bzl | 4 +++- bazel/envoy_library.bzl | 10 ++++---- bazel/envoy_test.bzl | 18 +++++++-------- bazel/external/BUILD | 2 ++ bazel/external/apache_thrift.BUILD | 2 ++ bazel/external/boringssl_fips.BUILD | 5 ++-- bazel/external/compiler_rt.BUILD | 2 ++ bazel/external/fmtlib.BUILD | 2 ++ bazel/external/http-parser.BUILD | 2 ++ bazel/external/jinja.BUILD | 2 ++ bazel/external/libcircllhist.BUILD | 2 ++ bazel/external/libprotobuf_mutator.BUILD | 2 ++ bazel/external/markupsafe.BUILD | 2 ++ bazel/external/quiche.BUILD | 20 ++++++++-------- bazel/external/rapidjson.BUILD | 2 ++ bazel/external/spdlog.BUILD | 2 ++ bazel/external/sqlparser.BUILD | 2 ++ bazel/external/tclap.BUILD | 2 ++ .../twitter_common_finagle_thrift.BUILD | 2 ++ bazel/external/twitter_common_lang.BUILD | 2 ++ bazel/external/twitter_common_rpc.BUILD | 2 ++ bazel/external/wee8.BUILD | 5 ++-- bazel/external/xxhash.BUILD | 2 ++ bazel/foreign_cc/BUILD | 5 ++-- bazel/genrule_repository.bzl | 6 ++--- bazel/repositories.bzl | 1 + bazel/repository_locations.bzl | 8 +++++++ configs/BUILD | 7 +++--- examples/BUILD | 4 ++-- generated_api_shadow/bazel/BUILD | 4 ++-- .../bazel/api_build_system.bzl | 5 ++-- generated_api_shadow/bazel/repositories.bzl | 1 - .../envoy/service/auth/v2alpha/BUILD | 4 ++-- include/envoy/access_log/BUILD | 4 ++-- include/envoy/api/BUILD | 4 ++-- include/envoy/buffer/BUILD | 4 ++-- include/envoy/common/BUILD | 4 ++-- include/envoy/common/crypto/BUILD | 4 ++-- include/envoy/compression/compressor/BUILD | 4 ++-- include/envoy/compression/decompressor/BUILD | 4 ++-- include/envoy/config/BUILD | 4 ++-- include/envoy/event/BUILD | 4 ++-- include/envoy/filesystem/BUILD | 4 ++-- include/envoy/formatter/BUILD | 4 ++-- include/envoy/grpc/BUILD | 4 ++-- include/envoy/http/BUILD | 4 ++-- include/envoy/init/BUILD | 4 ++-- include/envoy/json/BUILD | 4 ++-- include/envoy/local_info/BUILD | 4 ++-- include/envoy/network/BUILD | 4 ++-- include/envoy/protobuf/BUILD | 4 ++-- include/envoy/ratelimit/BUILD | 4 ++-- include/envoy/registry/BUILD | 4 ++-- include/envoy/router/BUILD | 4 ++-- include/envoy/runtime/BUILD | 4 ++-- include/envoy/secret/BUILD | 4 ++-- include/envoy/server/BUILD | 4 ++-- include/envoy/singleton/BUILD | 4 ++-- include/envoy/ssl/BUILD | 4 ++-- include/envoy/ssl/private_key/BUILD | 4 ++-- include/envoy/stats/BUILD | 4 ++-- include/envoy/stream_info/BUILD | 4 ++-- include/envoy/tcp/BUILD | 4 ++-- include/envoy/thread/BUILD | 4 ++-- include/envoy/thread_local/BUILD | 4 ++-- include/envoy/tracing/BUILD | 4 ++-- include/envoy/upstream/BUILD | 4 ++-- restarter/BUILD | 4 ++-- source/common/access_log/BUILD | 4 ++-- source/common/api/BUILD | 4 ++-- source/common/buffer/BUILD | 4 ++-- source/common/chromium_url/BUILD | 4 ++-- source/common/common/BUILD | 4 ++-- source/common/config/BUILD | 4 ++-- source/common/crypto/BUILD | 4 ++-- source/common/event/BUILD | 4 ++-- source/common/filesystem/BUILD | 4 ++-- source/common/formatter/BUILD | 4 ++-- source/common/grpc/BUILD | 4 ++-- source/common/html/BUILD | 4 ++-- source/common/http/BUILD | 4 ++-- source/common/http/http1/BUILD | 4 ++-- source/common/http/http2/BUILD | 4 ++-- source/common/http/http3/BUILD | 4 ++-- source/common/init/BUILD | 4 ++-- source/common/json/BUILD | 4 ++-- source/common/local_info/BUILD | 4 ++-- source/common/local_reply/BUILD | 4 ++-- source/common/memory/BUILD | 4 ++-- source/common/network/BUILD | 4 ++-- source/common/profiler/BUILD | 4 ++-- source/common/protobuf/BUILD | 5 ++-- source/common/router/BUILD | 4 ++-- source/common/runtime/BUILD | 4 ++-- source/common/secret/BUILD | 4 ++-- source/common/shared_pool/BUILD | 4 ++-- source/common/signal/BUILD | 4 ++-- source/common/singleton/BUILD | 4 ++-- source/common/ssl/BUILD | 4 ++-- source/common/stats/BUILD | 4 ++-- source/common/stream_info/BUILD | 4 ++-- source/common/tcp/BUILD | 4 ++-- source/common/tcp_proxy/BUILD | 4 ++-- source/common/thread_local/BUILD | 4 ++-- source/common/tracing/BUILD | 4 ++-- source/common/upstream/BUILD | 4 ++-- source/exe/BUILD | 4 ++-- source/extensions/access_loggers/BUILD | 4 ++-- source/extensions/access_loggers/common/BUILD | 8 +++---- source/extensions/access_loggers/file/BUILD | 10 ++++---- source/extensions/access_loggers/grpc/BUILD | 10 ++++---- source/extensions/clusters/BUILD | 4 ++-- source/extensions/clusters/aggregate/BUILD | 4 ++-- .../clusters/dynamic_forward_proxy/BUILD | 4 ++-- source/extensions/clusters/redis/BUILD | 4 ++-- source/extensions/common/BUILD | 4 ++-- source/extensions/common/aws/BUILD | 4 ++-- source/extensions/common/crypto/BUILD | 4 ++-- .../common/dynamic_forward_proxy/BUILD | 4 ++-- source/extensions/common/proxy_protocol/BUILD | 4 ++-- source/extensions/common/redis/BUILD | 12 +++++----- source/extensions/common/tap/BUILD | 4 ++-- source/extensions/common/wasm/BUILD | 4 ++-- source/extensions/common/wasm/null/BUILD | 4 ++-- source/extensions/common/wasm/v8/BUILD | 4 ++-- .../compression/common/compressor/BUILD | 4 ++-- .../compression/common/decompressor/BUILD | 4 ++-- .../extensions/compression/gzip/common/BUILD | 4 ++-- .../compression/gzip/compressor/BUILD | 4 ++-- .../compression/gzip/decompressor/BUILD | 4 ++-- source/extensions/filters/common/expr/BUILD | 4 ++-- .../extensions/filters/common/ext_authz/BUILD | 4 ++-- source/extensions/filters/common/fault/BUILD | 4 ++-- source/extensions/filters/common/lua/BUILD | 4 ++-- .../filters/common/original_src/BUILD | 8 +++---- .../extensions/filters/common/ratelimit/BUILD | 4 ++-- source/extensions/filters/common/rbac/BUILD | 4 ++-- source/extensions/filters/http/BUILD | 4 ++-- .../filters/http/adaptive_concurrency/BUILD | 12 +++++----- .../adaptive_concurrency/controller/BUILD | 12 +++++----- .../extensions/filters/http/aws_lambda/BUILD | 10 ++++---- .../filters/http/aws_request_signing/BUILD | 10 ++++---- source/extensions/filters/http/buffer/BUILD | 10 ++++---- source/extensions/filters/http/cache/BUILD | 8 +++---- .../http/cache/simple_http_cache/BUILD | 8 +++---- source/extensions/filters/http/common/BUILD | 4 ++-- .../filters/http/common/compressor/BUILD | 4 ++-- .../extensions/filters/http/compressor/BUILD | 10 ++++---- source/extensions/filters/http/cors/BUILD | 10 ++++---- source/extensions/filters/http/csrf/BUILD | 10 ++++---- .../filters/http/decompressor/BUILD | 10 ++++---- .../filters/http/dynamic_forward_proxy/BUILD | 4 ++-- source/extensions/filters/http/dynamo/BUILD | 10 ++++---- .../extensions/filters/http/ext_authz/BUILD | 10 ++++---- source/extensions/filters/http/fault/BUILD | 10 ++++---- .../filters/http/grpc_http1_bridge/BUILD | 10 ++++---- .../http/grpc_http1_reverse_bridge/BUILD | 4 ++-- .../filters/http/grpc_json_transcoder/BUILD | 10 ++++---- .../extensions/filters/http/grpc_stats/BUILD | 8 +++---- source/extensions/filters/http/grpc_web/BUILD | 10 ++++---- source/extensions/filters/http/gzip/BUILD | 10 ++++---- .../filters/http/header_to_metadata/BUILD | 10 ++++---- .../filters/http/health_check/BUILD | 10 ++++---- .../extensions/filters/http/ip_tagging/BUILD | 10 ++++---- .../extensions/filters/http/jwt_authn/BUILD | 4 ++-- source/extensions/filters/http/lua/BUILD | 10 ++++---- .../extensions/filters/http/on_demand/BUILD | 8 +++---- .../filters/http/original_src/BUILD | 8 +++---- .../extensions/filters/http/ratelimit/BUILD | 10 ++++---- source/extensions/filters/http/rbac/BUILD | 4 ++-- source/extensions/filters/http/router/BUILD | 10 ++++---- source/extensions/filters/http/squash/BUILD | 10 ++++---- source/extensions/filters/http/tap/BUILD | 10 ++++---- source/extensions/filters/listener/BUILD | 4 ++-- .../filters/listener/http_inspector/BUILD | 8 +++---- .../filters/listener/original_dst/BUILD | 10 ++++---- .../filters/listener/original_src/BUILD | 8 +++---- .../filters/listener/proxy_protocol/BUILD | 8 +++---- .../filters/listener/tls_inspector/BUILD | 10 ++++---- source/extensions/filters/network/BUILD | 4 ++-- .../filters/network/client_ssl_auth/BUILD | 10 ++++---- .../extensions/filters/network/common/BUILD | 4 ++-- .../filters/network/common/redis/BUILD | 4 ++-- .../filters/network/direct_response/BUILD | 10 ++++---- .../filters/network/dubbo_proxy/BUILD | 4 ++-- .../filters/network/dubbo_proxy/filters/BUILD | 4 ++-- .../filters/network/dubbo_proxy/router/BUILD | 4 ++-- source/extensions/filters/network/echo/BUILD | 10 ++++---- .../filters/network/ext_authz/BUILD | 10 ++++---- .../network/http_connection_manager/BUILD | 12 +++++----- source/extensions/filters/network/kafka/BUILD | 11 +++++---- .../filters/network/local_ratelimit/BUILD | 10 ++++---- .../filters/network/mongo_proxy/BUILD | 10 ++++---- .../filters/network/mysql_proxy/BUILD | 10 ++++---- .../filters/network/postgres_proxy/BUILD | 14 +++++------ .../filters/network/ratelimit/BUILD | 10 ++++---- source/extensions/filters/network/rbac/BUILD | 4 ++-- .../filters/network/redis_proxy/BUILD | 12 +++++----- .../filters/network/rocketmq_proxy/BUILD | 4 ++-- .../network/rocketmq_proxy/router/BUILD | 4 ++-- .../filters/network/sni_cluster/BUILD | 4 ++-- .../network/sni_dynamic_forward_proxy/BUILD | 4 ++-- .../filters/network/tcp_proxy/BUILD | 10 ++++---- .../filters/network/thrift_proxy/BUILD | 4 ++-- .../network/thrift_proxy/filters/BUILD | 4 ++-- .../thrift_proxy/filters/ratelimit/BUILD | 4 ++-- .../filters/network/thrift_proxy/router/BUILD | 4 ++-- .../filters/network/zookeeper_proxy/BUILD | 10 ++++---- .../extensions/filters/udp/dns_filter/BUILD | 4 ++-- source/extensions/filters/udp/udp_proxy/BUILD | 4 ++-- source/extensions/grpc_credentials/BUILD | 4 ++-- .../extensions/grpc_credentials/aws_iam/BUILD | 8 +++---- .../extensions/grpc_credentials/example/BUILD | 8 +++---- .../file_based_metadata/BUILD | 8 +++---- source/extensions/health_checkers/BUILD | 4 ++-- source/extensions/health_checkers/redis/BUILD | 8 +++---- source/extensions/internal_redirect/BUILD | 4 ++-- .../allow_listed_routes/BUILD | 4 ++-- .../internal_redirect/previous_routes/BUILD | 4 ++-- .../internal_redirect/safe_cross_scheme/BUILD | 4 ++-- source/extensions/quic_listeners/quiche/BUILD | 4 ++-- .../quic_listeners/quiche/platform/BUILD | 4 ++-- source/extensions/resource_monitors/BUILD | 4 ++-- .../extensions/resource_monitors/common/BUILD | 4 ++-- .../resource_monitors/fixed_heap/BUILD | 4 ++-- .../resource_monitors/injected_resource/BUILD | 4 ++-- source/extensions/retry/host/BUILD | 4 ++-- .../retry/host/omit_canary_hosts/BUILD | 4 ++-- .../retry/host/omit_host_metadata/BUILD | 4 ++-- .../retry/host/previous_hosts/BUILD | 4 ++-- source/extensions/retry/priority/BUILD | 4 ++-- .../retry/priority/previous_priorities/BUILD | 4 ++-- source/extensions/stat_sinks/BUILD | 4 ++-- .../extensions/stat_sinks/common/statsd/BUILD | 4 ++-- source/extensions/stat_sinks/dog_statsd/BUILD | 10 ++++---- source/extensions/stat_sinks/hystrix/BUILD | 8 +++---- .../stat_sinks/metrics_service/BUILD | 8 +++---- source/extensions/stat_sinks/statsd/BUILD | 8 +++---- source/extensions/tracers/BUILD | 4 ++-- source/extensions/tracers/common/BUILD | 4 ++-- source/extensions/tracers/common/ot/BUILD | 4 ++-- source/extensions/tracers/datadog/BUILD | 8 +++---- source/extensions/tracers/dynamic_ot/BUILD | 8 +++---- source/extensions/tracers/lightstep/BUILD | 8 +++---- source/extensions/tracers/opencensus/BUILD | 8 +++---- source/extensions/tracers/xray/BUILD | 8 +++---- source/extensions/tracers/zipkin/BUILD | 8 +++---- source/extensions/transport_sockets/BUILD | 4 ++-- .../extensions/transport_sockets/alts/BUILD | 10 ++++---- .../transport_sockets/raw_buffer/BUILD | 8 +++---- source/extensions/transport_sockets/tap/BUILD | 8 +++---- source/extensions/transport_sockets/tls/BUILD | 8 +++---- .../transport_sockets/tls/private_key/BUILD | 4 ++-- source/server/BUILD | 4 ++-- source/server/admin/BUILD | 4 ++-- source/server/config_validation/BUILD | 4 ++-- test/BUILD | 4 ++-- test/benchmark/BUILD | 4 ++-- test/common/access_log/BUILD | 4 ++-- test/common/buffer/BUILD | 4 ++-- test/common/common/BUILD | 4 ++-- test/common/config/BUILD | 4 ++-- test/common/crypto/BUILD | 4 ++-- test/common/event/BUILD | 4 ++-- test/common/filesystem/BUILD | 4 ++-- test/common/formatter/BUILD | 4 ++-- test/common/grpc/BUILD | 4 ++-- test/common/html/BUILD | 4 ++-- test/common/http/BUILD | 4 ++-- test/common/http/http1/BUILD | 4 ++-- test/common/http/http2/BUILD | 4 ++-- test/common/init/BUILD | 4 ++-- test/common/json/BUILD | 4 ++-- .../json/config_schemas_test_data/BUILD | 4 ++-- test/common/local_reply/BUILD | 4 ++-- test/common/memory/BUILD | 4 ++-- test/common/network/BUILD | 4 ++-- test/common/protobuf/BUILD | 4 ++-- test/common/router/BUILD | 4 ++-- test/common/runtime/BUILD | 4 ++-- test/common/secret/BUILD | 4 ++-- test/common/shared_pool/BUILD | 4 ++-- test/common/signal/BUILD | 4 ++-- test/common/singleton/BUILD | 4 ++-- test/common/stats/BUILD | 4 ++-- test/common/stream_info/BUILD | 4 ++-- test/common/tcp/BUILD | 4 ++-- test/common/tcp_proxy/BUILD | 4 ++-- test/common/thread_local/BUILD | 4 ++-- test/common/tracing/BUILD | 4 ++-- test/common/upstream/BUILD | 4 ++-- test/config/BUILD | 4 ++-- test/config/integration/BUILD | 4 ++-- test/config/integration/certs/BUILD | 4 ++-- test/config_test/BUILD | 4 ++-- test/dependencies/BUILD | 4 ++-- test/exe/BUILD | 4 ++-- test/extensions/access_loggers/common/BUILD | 4 ++-- test/extensions/access_loggers/file/BUILD | 4 ++-- test/extensions/access_loggers/grpc/BUILD | 4 ++-- test/extensions/clusters/aggregate/BUILD | 4 ++-- .../clusters/dynamic_forward_proxy/BUILD | 4 ++-- test/extensions/clusters/redis/BUILD | 4 ++-- test/extensions/common/BUILD | 4 ++-- test/extensions/common/aws/BUILD | 4 ++-- .../common/dynamic_forward_proxy/BUILD | 4 ++-- test/extensions/common/proxy_protocol/BUILD | 4 ++-- test/extensions/common/redis/BUILD | 4 ++-- test/extensions/common/tap/BUILD | 4 ++-- test/extensions/common/wasm/BUILD | 4 ++-- test/extensions/common/wasm/test_data/BUILD | 4 ++-- test/extensions/compression/gzip/BUILD | 4 ++-- .../compression/gzip/compressor/BUILD | 4 ++-- .../compression/gzip/decompressor/BUILD | 4 ++-- test/extensions/filters/common/expr/BUILD | 4 ++-- .../extensions/filters/common/ext_authz/BUILD | 4 ++-- test/extensions/filters/common/fault/BUILD | 4 ++-- test/extensions/filters/common/lua/BUILD | 4 ++-- .../filters/common/original_src/BUILD | 4 ++-- .../extensions/filters/common/ratelimit/BUILD | 4 ++-- test/extensions/filters/common/rbac/BUILD | 4 ++-- .../filters/http/adaptive_concurrency/BUILD | 4 ++-- .../adaptive_concurrency/controller/BUILD | 4 ++-- test/extensions/filters/http/aws_lambda/BUILD | 4 ++-- .../filters/http/aws_request_signing/BUILD | 4 ++-- test/extensions/filters/http/buffer/BUILD | 4 ++-- test/extensions/filters/http/cache/BUILD | 4 ++-- .../http/cache/simple_http_cache/BUILD | 4 ++-- test/extensions/filters/http/common/BUILD | 4 ++-- .../filters/http/common/compressor/BUILD | 4 ++-- .../extensions/filters/http/common/fuzz/BUILD | 4 ++-- test/extensions/filters/http/compressor/BUILD | 4 ++-- test/extensions/filters/http/cors/BUILD | 4 ++-- test/extensions/filters/http/csrf/BUILD | 4 ++-- .../filters/http/decompressor/BUILD | 4 ++-- .../filters/http/dynamic_forward_proxy/BUILD | 4 ++-- test/extensions/filters/http/dynamo/BUILD | 4 ++-- test/extensions/filters/http/ext_authz/BUILD | 4 ++-- test/extensions/filters/http/fault/BUILD | 4 ++-- .../filters/http/grpc_http1_bridge/BUILD | 4 ++-- .../http/grpc_http1_reverse_bridge/BUILD | 4 ++-- .../filters/http/grpc_json_transcoder/BUILD | 4 ++-- test/extensions/filters/http/grpc_stats/BUILD | 4 ++-- test/extensions/filters/http/grpc_web/BUILD | 4 ++-- test/extensions/filters/http/gzip/BUILD | 4 ++-- .../filters/http/header_to_metadata/BUILD | 4 ++-- .../filters/http/health_check/BUILD | 4 ++-- test/extensions/filters/http/ip_tagging/BUILD | 4 ++-- test/extensions/filters/http/jwt_authn/BUILD | 4 ++-- test/extensions/filters/http/lua/BUILD | 4 ++-- test/extensions/filters/http/on_demand/BUILD | 4 ++-- .../filters/http/original_src/BUILD | 4 ++-- test/extensions/filters/http/ratelimit/BUILD | 4 ++-- test/extensions/filters/http/rbac/BUILD | 4 ++-- test/extensions/filters/http/router/BUILD | 4 ++-- test/extensions/filters/http/squash/BUILD | 4 ++-- test/extensions/filters/http/tap/BUILD | 4 ++-- .../filters/listener/http_inspector/BUILD | 4 ++-- .../filters/listener/original_dst/BUILD | 4 ++-- .../filters/listener/original_src/BUILD | 4 ++-- .../filters/listener/proxy_protocol/BUILD | 4 ++-- .../filters/listener/tls_inspector/BUILD | 4 ++-- .../filters/network/client_ssl_auth/BUILD | 4 ++-- test/extensions/filters/network/common/BUILD | 4 ++-- .../filters/network/common/redis/BUILD | 4 ++-- .../filters/network/direct_response/BUILD | 4 ++-- .../filters/network/dubbo_proxy/BUILD | 4 ++-- .../filters/network/ext_authz/BUILD | 4 ++-- .../network/http_connection_manager/BUILD | 4 ++-- test/extensions/filters/network/kafka/BUILD | 5 ++-- .../filters/network/kafka/broker/BUILD | 4 ++-- .../kafka/broker/integration_test/BUILD | 5 ++-- .../filters/network/local_ratelimit/BUILD | 4 ++-- .../filters/network/mongo_proxy/BUILD | 4 ++-- .../filters/network/mysql_proxy/BUILD | 4 ++-- .../filters/network/postgres_proxy/BUILD | 4 ++-- .../filters/network/ratelimit/BUILD | 4 ++-- test/extensions/filters/network/rbac/BUILD | 4 ++-- .../filters/network/redis_proxy/BUILD | 4 ++-- .../filters/network/rocketmq_proxy/BUILD | 4 ++-- .../filters/network/sni_cluster/BUILD | 4 ++-- .../network/sni_dynamic_forward_proxy/BUILD | 4 ++-- .../filters/network/tcp_proxy/BUILD | 4 ++-- .../filters/network/thrift_proxy/BUILD | 4 ++-- .../filters/network/thrift_proxy/driver/BUILD | 5 ++-- .../thrift_proxy/driver/fbthrift/BUILD | 5 ++-- .../network/thrift_proxy/driver/finagle/BUILD | 5 ++-- .../driver/generated/example/BUILD | 5 ++-- .../thrift_proxy/filters/ratelimit/BUILD | 4 ++-- .../filters/network/zookeeper_proxy/BUILD | 4 ++-- test/extensions/filters/udp/dns_filter/BUILD | 4 ++-- test/extensions/filters/udp/udp_proxy/BUILD | 4 ++-- .../extensions/grpc_credentials/aws_iam/BUILD | 4 ++-- .../file_based_metadata/BUILD | 4 ++-- test/extensions/health_checkers/redis/BUILD | 4 ++-- .../internal_redirect/previous_routes/BUILD | 4 ++-- test/extensions/quic_listeners/quiche/BUILD | 4 ++-- .../quic_listeners/quiche/integration/BUILD | 4 ++-- .../quic_listeners/quiche/platform/BUILD | 4 ++-- .../resource_monitors/fixed_heap/BUILD | 4 ++-- .../resource_monitors/injected_resource/BUILD | 4 ++-- .../retry/host/omit_canary_hosts/BUILD | 4 ++-- .../retry/host/omit_host_metadata/BUILD | 4 ++-- .../retry/host/previous_hosts/BUILD | 4 ++-- .../retry/priority/previous_priorities/BUILD | 4 ++-- .../stats_sinks/common/statsd/BUILD | 4 ++-- test/extensions/stats_sinks/dog_statsd/BUILD | 4 ++-- test/extensions/stats_sinks/hystrix/BUILD | 4 ++-- .../stats_sinks/metrics_service/BUILD | 4 ++-- test/extensions/stats_sinks/statsd/BUILD | 4 ++-- test/extensions/tracers/common/ot/BUILD | 4 ++-- test/extensions/tracers/datadog/BUILD | 4 ++-- test/extensions/tracers/dynamic_ot/BUILD | 4 ++-- test/extensions/tracers/lightstep/BUILD | 4 ++-- test/extensions/tracers/opencensus/BUILD | 4 ++-- test/extensions/tracers/xray/BUILD | 4 ++-- test/extensions/tracers/zipkin/BUILD | 4 ++-- test/extensions/transport_sockets/alts/BUILD | 4 ++-- test/extensions/transport_sockets/tap/BUILD | 4 ++-- test/extensions/transport_sockets/tls/BUILD | 4 ++-- .../transport_sockets/tls/integration/BUILD | 4 ++-- .../transport_sockets/tls/test_data/BUILD | 4 ++-- test/fuzz/BUILD | 4 ++-- test/integration/BUILD | 5 ++-- test/integration/clusters/BUILD | 4 ++-- test/integration/filters/BUILD | 4 ++-- test/mocks/BUILD | 4 ++-- test/mocks/access_log/BUILD | 4 ++-- test/mocks/api/BUILD | 4 ++-- test/mocks/buffer/BUILD | 4 ++-- test/mocks/compression/compressor/BUILD | 4 ++-- test/mocks/compression/decompressor/BUILD | 4 ++-- test/mocks/config/BUILD | 4 ++-- test/mocks/event/BUILD | 4 ++-- test/mocks/filesystem/BUILD | 4 ++-- test/mocks/grpc/BUILD | 4 ++-- test/mocks/http/BUILD | 4 ++-- test/mocks/init/BUILD | 4 ++-- test/mocks/local_info/BUILD | 4 ++-- test/mocks/network/BUILD | 4 ++-- test/mocks/protobuf/BUILD | 4 ++-- test/mocks/ratelimit/BUILD | 4 ++-- test/mocks/redis/BUILD | 4 ++-- test/mocks/router/BUILD | 4 ++-- test/mocks/runtime/BUILD | 4 ++-- test/mocks/secret/BUILD | 4 ++-- test/mocks/server/BUILD | 4 ++-- test/mocks/ssl/BUILD | 4 ++-- test/mocks/stats/BUILD | 4 ++-- test/mocks/stream_info/BUILD | 4 ++-- test/mocks/tcp/BUILD | 4 ++-- test/mocks/thread_local/BUILD | 4 ++-- test/mocks/tracing/BUILD | 4 ++-- test/mocks/upstream/BUILD | 4 ++-- test/proto/BUILD | 4 ++-- test/server/BUILD | 4 ++-- test/server/admin/BUILD | 4 ++-- test/server/config_validation/BUILD | 4 ++-- test/test_common/BUILD | 4 ++-- test/tools/config_load_check/BUILD | 4 ++-- test/tools/router_check/BUILD | 4 ++-- test/tools/router_check/test/BUILD | 4 ++-- test/tools/schema_validator/BUILD | 4 ++-- test/tools/type_whisperer/BUILD | 4 ++-- test/tools/wee8_compile/BUILD | 4 ++-- tools/BUILD | 5 ++-- tools/api_boost/testdata/BUILD | 4 ++-- tools/api_proto_plugin/BUILD | 4 ++-- tools/api_proto_plugin/plugin.bzl | 6 ++--- tools/clang_tools/support/clang_tools.bzl | 8 ++++--- tools/code_format/check_format.py | 2 +- tools/code_format/check_format_test_helper.py | 4 ++-- tools/code_format/envoy_build_fixer.py | 23 +++++++++++-------- tools/config_validation/BUILD | 5 ++-- tools/proto_format/active_protos_gen.py | 4 ++-- tools/protodoc/BUILD | 5 ++-- tools/protoxform/BUILD | 2 ++ .../check_format/add_envoy_package.BUILD.gold | 4 ++-- .../bad_envoy_build_sys_ref.BUILD | 4 ++-- .../bad_envoy_build_sys_ref.BUILD.gold | 4 ++-- .../check_format/canonical_api_deps.BUILD | 4 ++-- .../canonical_api_deps.BUILD.gold | 4 ++-- .../check_format/canonical_spacing.BUILD.gold | 4 ++-- .../check_format/remove_unused_loads.BUILD | 4 ++-- .../remove_unused_loads.BUILD.gold | 4 ++-- .../check_format/skip_envoy_package.BUILD | 2 ++ .../skip_envoy_package.BUILD.gold | 2 ++ .../check_format/update_license.BUILD.gold | 4 ++-- tools/testdata/protoxform/BUILD | 2 ++ .../envoy/active_non_terminal/v2/BUILD | 2 ++ .../protoxform/envoy/active_terminal/v2/BUILD | 2 ++ .../testdata/protoxform/envoy/frozen/v2/BUILD | 2 ++ .../testdata/protoxform/envoy/frozen/v3/BUILD | 2 ++ tools/testdata/protoxform/envoy/v2/BUILD | 2 ++ tools/testdata/protoxform/external/BUILD | 2 ++ tools/type_whisperer/BUILD | 5 ++-- .../file_descriptor_set_text.bzl | 2 ++ 506 files changed, 1260 insertions(+), 1173 deletions(-) diff --git a/api/bazel/BUILD b/api/bazel/BUILD index 279c7c9e6a9b..4b582bb8be3f 100644 --- a/api/bazel/BUILD +++ b/api/bazel/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +licenses(["notice"]) # Apache 2 + go_proto_compiler( name = "pgv_plugin_go", options = ["lang=go"], diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl index 7e88ab2bf9e5..e9119b329d01 100644 --- a/api/bazel/api_build_system.bzl +++ b/api/bazel/api_build_system.bzl @@ -1,7 +1,8 @@ +load("@rules_cc//cc:defs.bzl", "cc_test") load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@rules_proto//proto:defs.bzl", "proto_library") load( @@ -138,7 +139,7 @@ def api_cc_py_proto_library( _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) def api_cc_test(name, **kwargs): - native.cc_test( + cc_test( name = name, **kwargs ) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index af1f11331d01..a64e733cf74a 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -1,4 +1,3 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD index 0bd31fdc6ff8..c75dabe1a8a0 100644 --- a/api/envoy/service/auth/v2alpha/BUILD +++ b/api/envoy/service/auth/v2alpha/BUILD @@ -1,9 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + licenses(["notice"]) # Apache 2 # DO NOT EDIT. This file is generated by tools/proto_sync.py. -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - api_proto_package( has_services = True, deps = ["//envoy/service/auth/v2:pkg"], diff --git a/api/test/build/BUILD b/api/test/build/BUILD index 59f0a3641011..2dae9fa0de03 100644 --- a/api/test/build/BUILD +++ b/api/test/build/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") +licenses(["notice"]) # Apache 2 + api_cc_test( name = "build_test", srcs = ["build_test.cc"], diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD index 4398672c27af..c9a7ba701f97 100644 --- a/api/test/validate/BUILD +++ b/api/test/validate/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test") +licenses(["notice"]) # Apache 2 + api_cc_test( name = "pgv_test", srcs = ["pgv_test.cc"], diff --git a/api/tools/BUILD b/api/tools/BUILD index 8d2207b94070..2273a9b9dd0b 100644 --- a/api/tools/BUILD +++ b/api/tools/BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_test") + licenses(["notice"]) # Apache 2 py_binary( diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 1bc4b51231e2..ccea9008a05f 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -1,9 +1,9 @@ # DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. -licenses(["notice"]) # Apache 2 - load("@rules_proto//proto:defs.bzl", "proto_library") +licenses(["notice"]) # Apache 2 + # This tracks active development versions of protos. proto_library( name = "active_protos", diff --git a/bazel/BUILD b/bazel/BUILD index f94bc1da4433..c670adc0620d 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -1,13 +1,11 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") licenses(["notice"]) # Apache 2 -load("//bazel:envoy_build_system.bzl", "envoy_package") - envoy_package() -load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") - exports_files([ "gen_sh_test_runner.sh", "sh_test_wrapper.sh", diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index e53e42d8e284..16adfb38a439 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy binary targets load( @@ -27,7 +29,7 @@ def envoy_cc_binary( linkopts = linkopts + _envoy_stamped_linkopts() deps = deps + _envoy_stamped_deps() deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + envoy_stdlib_deps() - native.cc_binary( + cc_binary( name = name, srcs = srcs, data = data, diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 69453fd1b6fd..6f8c56497093 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy library targets load( @@ -6,7 +8,6 @@ load( "envoy_external_dep_path", "envoy_linkstatic", ) -load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") # As above, but wrapped in list form for adding to dep lists. This smell seems needed as @@ -23,7 +24,7 @@ def tcmalloc_external_deps(repository): # all envoy targets pass through an envoy-declared skylark function where they can be modified # before being passed to a native bazel function. def envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs): - native.cc_library( + cc_library( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs @@ -101,8 +102,7 @@ def envoy_cc_library( "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], "//conditions:default": [], }) - - native.cc_library( + cc_library( name = name, srcs = srcs, hdrs = hdrs, @@ -131,7 +131,7 @@ def envoy_cc_library( # Intended for usage by external consumers. This allows them to disambiguate # include paths via `external/envoy...` - native.cc_library( + cc_library( name = name + "_with_external_headers", hdrs = hdrs, copts = envoy_copts(repository) + copts, diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index fb68b46f7e64..0e439d84dd12 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -1,3 +1,6 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") + # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy test targets. This includes both test library and test binary targets. load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") @@ -29,8 +32,7 @@ def _envoy_cc_test_infrastructure_library( **kargs): # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests. deps += tcmalloc_external_deps(repository) - - native.cc_library( + cc_library( name = name, srcs = srcs, hdrs = hdrs, @@ -105,7 +107,7 @@ def envoy_cc_fuzz_test( tags = tags, **kwargs ) - native.cc_test( + cc_test( name = name, copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = _envoy_test_linkopts(), @@ -129,7 +131,7 @@ def envoy_cc_fuzz_test( # https://github.com/google/oss-fuzz/blob/master/projects/envoy/build.sh. It won't yield # anything useful on its own, as it expects to be run in an environment where the linker options # provide a path to FuzzingEngine. - native.cc_binary( + cc_binary( name = name + "_driverless", copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = ["-lFuzzingEngine"] + _envoy_test_linkopts(), @@ -138,8 +140,7 @@ def envoy_cc_fuzz_test( deps = [":" + test_lib_name], tags = ["manual"] + tags, ) - - native.cc_test( + cc_test( name = name + "_with_libfuzzer", copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = ["-fsanitize=fuzzer"] + _envoy_test_linkopts(), @@ -171,8 +172,7 @@ def envoy_cc_test( size = "medium", flaky = False): coverage_tags = tags + ([] if coverage else ["nocoverage"]) - - native.cc_test( + cc_test( name = name, srcs = srcs, data = data, @@ -277,7 +277,7 @@ def envoy_py_test_binary( external_deps = [], deps = [], **kargs): - native.py_binary( + py_binary( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs diff --git a/bazel/external/BUILD b/bazel/external/BUILD index 11dabbc90cee..719adb21855a 100644 --- a/bazel/external/BUILD +++ b/bazel/external/BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 # Use a wrapper cc_library with an empty source source file to force diff --git a/bazel/external/apache_thrift.BUILD b/bazel/external/apache_thrift.BUILD index 02cbf535514d..db12d91f0b84 100644 --- a/bazel/external/apache_thrift.BUILD +++ b/bazel/external/apache_thrift.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 # The apache-thrift distribution does not keep the thrift files in a directory with the diff --git a/bazel/external/boringssl_fips.BUILD b/bazel/external/boringssl_fips.BUILD index 6add632b3a34..7b913e413614 100644 --- a/bazel/external/boringssl_fips.BUILD +++ b/bazel/external/boringssl_fips.BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") load(":genrule_cmd.bzl", "genrule_cmd") +licenses(["notice"]) # Apache 2 + cc_library( name = "crypto", srcs = [ diff --git a/bazel/external/compiler_rt.BUILD b/bazel/external/compiler_rt.BUILD index 96d90b46ab23..82dfe8f8be03 100644 --- a/bazel/external/compiler_rt.BUILD +++ b/bazel/external/compiler_rt.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/fmtlib.BUILD b/bazel/external/fmtlib.BUILD index 7ac5ecceffbd..c4d97a2c9e69 100644 --- a/bazel/external/fmtlib.BUILD +++ b/bazel/external/fmtlib.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/http-parser.BUILD b/bazel/external/http-parser.BUILD index 303950d7c00b..5fefacde47dc 100644 --- a/bazel/external/http-parser.BUILD +++ b/bazel/external/http-parser.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/jinja.BUILD b/bazel/external/jinja.BUILD index f7ce6718caeb..4ca60460e41d 100644 --- a/bazel/external/jinja.BUILD +++ b/bazel/external/jinja.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/libcircllhist.BUILD b/bazel/external/libcircllhist.BUILD index a77269ef60b0..4dff51012671 100644 --- a/bazel/external/libcircllhist.BUILD +++ b/bazel/external/libcircllhist.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/libprotobuf_mutator.BUILD b/bazel/external/libprotobuf_mutator.BUILD index 12fd8b49b51f..697a3c6334a4 100644 --- a/bazel/external/libprotobuf_mutator.BUILD +++ b/bazel/external/libprotobuf_mutator.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/markupsafe.BUILD b/bazel/external/markupsafe.BUILD index 4d792e1d4ad3..87e2871e9dfc 100644 --- a/bazel/external/markupsafe.BUILD +++ b/bazel/external/markupsafe.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index d19e7e80ae53..9bf32c27c6c5 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1,3 +1,13 @@ +load("@rules_cc//cc:defs.bzl", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load(":genrule_cmd.bzl", "genrule_cmd") +load( + "@envoy//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_cc_test", + "envoy_cc_test_library", +) + licenses(["notice"]) # Apache 2 # QUICHE is Google's implementation of QUIC and related protocols. It is the @@ -25,16 +35,6 @@ licenses(["notice"]) # Apache 2 # QUICHE platform APIs in //source/extensions/quic_listeners/quiche/platform/, # should remain largely the same. -load("@rules_proto//proto:defs.bzl", "proto_library") -load(":genrule_cmd.bzl", "genrule_cmd") -load( - "@envoy//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_cc_test", - "envoy_cc_test_library", - "envoy_proto_library", -) - src_files = glob([ "**/*.h", "**/*.c", diff --git a/bazel/external/rapidjson.BUILD b/bazel/external/rapidjson.BUILD index 97948eee7072..a74a0fe55d37 100644 --- a/bazel/external/rapidjson.BUILD +++ b/bazel/external/rapidjson.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/spdlog.BUILD b/bazel/external/spdlog.BUILD index dec2ab43d3b6..4be48da95173 100644 --- a/bazel/external/spdlog.BUILD +++ b/bazel/external/spdlog.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/sqlparser.BUILD b/bazel/external/sqlparser.BUILD index 8e14f45e5360..5a12383074f5 100644 --- a/bazel/external/sqlparser.BUILD +++ b/bazel/external/sqlparser.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/tclap.BUILD b/bazel/external/tclap.BUILD index fabf6c4c3f99..39bd270fd749 100644 --- a/bazel/external/tclap.BUILD +++ b/bazel/external/tclap.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/external/twitter_common_finagle_thrift.BUILD b/bazel/external/twitter_common_finagle_thrift.BUILD index ee1d121d77c9..9121874d4350 100644 --- a/bazel/external/twitter_common_finagle_thrift.BUILD +++ b/bazel/external/twitter_common_finagle_thrift.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/twitter_common_lang.BUILD b/bazel/external/twitter_common_lang.BUILD index 469ee3331cff..40fac2f1d1ed 100644 --- a/bazel/external/twitter_common_lang.BUILD +++ b/bazel/external/twitter_common_lang.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/twitter_common_rpc.BUILD b/bazel/external/twitter_common_rpc.BUILD index 8e8622ebb4db..df79842360bd 100644 --- a/bazel/external/twitter_common_rpc.BUILD +++ b/bazel/external/twitter_common_rpc.BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_library") + licenses(["notice"]) # Apache 2 py_library( diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index c6e64f43bc3b..341e1ad66c07 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") load(":genrule_cmd.bzl", "genrule_cmd") +licenses(["notice"]) # Apache 2 + cc_library( name = "wee8", srcs = [ diff --git a/bazel/external/xxhash.BUILD b/bazel/external/xxhash.BUILD index 5f8120dfee0f..33f9bbe69705 100644 --- a/bazel/external/xxhash.BUILD +++ b/bazel/external/xxhash.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + licenses(["notice"]) # Apache 2 cc_library( diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 9d05d6d0dd68..747d2b7e1bf5 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -1,8 +1,9 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_library") load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") +licenses(["notice"]) # Apache 2 + envoy_package() # autotools packages are unusable on Windows as-is diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index 0689c39c88b0..ff4e6fe9dcaa 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -115,7 +115,7 @@ def _genrule_environment(ctx): ld_flags = [] ld_libs = [] if ctx.var.get("ENVOY_CONFIG_COVERAGE"): - ld_libs += ["-lgcov"] + ld_libs.append("-lgcov") if ctx.var.get("ENVOY_CONFIG_ASAN"): cc_flags += asan_flags ld_flags += asan_flags @@ -137,8 +137,8 @@ def _genrule_environment(ctx): lines.append("export ASAN_OPTIONS=detect_leaks=0") lines.append("") - out = ctx.new_file(ctx.attr.name + ".sh") - ctx.file_action(out, "\n".join(lines)) + out = ctx.actions.declare_file(ctx.attr.name + ".sh") + ctx.actions.write(out, "\n".join(lines)) return DefaultInfo(files = depset([out])) genrule_environment = rule( diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 39bc408ea748..6d104839e91a 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -191,6 +191,7 @@ def envoy_dependencies(skip_targets = []): _repository_impl("bazel_toolchains") _repository_impl("bazel_compdb") _repository_impl("envoy_build_tools") + _repository_impl("rules_cc") # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index a206815b962a..3d82497d31f6 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -358,6 +358,14 @@ DEPENDENCY_REPOSITORIES = dict( urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz"], use_category = ["build"], ), + rules_cc = dict( + sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0", + strip_prefix = "rules_cc-818289e5613731ae410efb54218a4077fb9dbb03", + # 2020-05-13 + # TODO(lizan): pin to a point releases when there's a released version. + urls = ["https://github.com/bazelbuild/rules_cc/archive/818289e5613731ae410efb54218a4077fb9dbb03.tar.gz"], + use_category = ["build"], + ), rules_foreign_cc = dict( sha256 = "3184c244b32e65637a74213fc448964b687390eeeca42a36286f874c046bba15", strip_prefix = "rules_foreign_cc-7bc4be735b0560289f6b86ab6136ee25d20b65b7", diff --git a/configs/BUILD b/configs/BUILD index 451946f1805d..f13fcb170f3a 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -1,13 +1,12 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", + "envoy_py_test_binary", ) -envoy_package() +licenses(["notice"]) # Apache 2 -load("//bazel:envoy_build_system.bzl", "envoy_py_test_binary") +envoy_package() envoy_py_test_binary( name = "configgen", diff --git a/examples/BUILD b/examples/BUILD index 341a86be7600..d89668cf1c1c 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/generated_api_shadow/bazel/BUILD b/generated_api_shadow/bazel/BUILD index 279c7c9e6a9b..4b582bb8be3f 100644 --- a/generated_api_shadow/bazel/BUILD +++ b/generated_api_shadow/bazel/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +licenses(["notice"]) # Apache 2 + go_proto_compiler( name = "pgv_plugin_go", options = ["lang=go"], diff --git a/generated_api_shadow/bazel/api_build_system.bzl b/generated_api_shadow/bazel/api_build_system.bzl index 7e88ab2bf9e5..e9119b329d01 100644 --- a/generated_api_shadow/bazel/api_build_system.bzl +++ b/generated_api_shadow/bazel/api_build_system.bzl @@ -1,7 +1,8 @@ +load("@rules_cc//cc:defs.bzl", "cc_test") load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@rules_proto//proto:defs.bzl", "proto_library") load( @@ -138,7 +139,7 @@ def api_cc_py_proto_library( _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) def api_cc_test(name, **kwargs): - native.cc_test( + cc_test( name = name, **kwargs ) diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl index af1f11331d01..a64e733cf74a 100644 --- a/generated_api_shadow/bazel/repositories.bzl +++ b/generated_api_shadow/bazel/repositories.bzl @@ -1,4 +1,3 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD index 0bd31fdc6ff8..c75dabe1a8a0 100644 --- a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD +++ b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD @@ -1,9 +1,9 @@ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + licenses(["notice"]) # Apache 2 # DO NOT EDIT. This file is generated by tools/proto_sync.py. -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - api_proto_package( has_services = True, deps = ["//envoy/service/auth/v2:pkg"], diff --git a/include/envoy/access_log/BUILD b/include/envoy/access_log/BUILD index 991715a6a830..c2ba9dba547b 100644 --- a/include/envoy/access_log/BUILD +++ b/include/envoy/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index 0bdbc5a87588..6855cc6b8688 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD index e22d136b17de..3f2880cb720e 100644 --- a/include/envoy/buffer/BUILD +++ b/include/envoy/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index b950bcbd7fbe..4d427383bf21 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_basic_cc_library( diff --git a/include/envoy/common/crypto/BUILD b/include/envoy/common/crypto/BUILD index 80d0fbb3971c..db3e738b80ff 100644 --- a/include/envoy/common/crypto/BUILD +++ b/include/envoy/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/compression/compressor/BUILD b/include/envoy/compression/compressor/BUILD index f9e90c9ec612..6632229aaed4 100644 --- a/include/envoy/compression/compressor/BUILD +++ b/include/envoy/compression/compressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/compression/decompressor/BUILD b/include/envoy/compression/decompressor/BUILD index 60a8e9cb7eeb..156d81d52356 100644 --- a/include/envoy/compression/decompressor/BUILD +++ b/include/envoy/compression/decompressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 50fca3a73007..991ddee5d3b5 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/event/BUILD b/include/envoy/event/BUILD index 05ea911bf8cc..d24846f32871 100644 --- a/include/envoy/event/BUILD +++ b/include/envoy/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/filesystem/BUILD b/include/envoy/filesystem/BUILD index e740d2ee25e4..6a95240457a9 100644 --- a/include/envoy/filesystem/BUILD +++ b/include/envoy/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/formatter/BUILD b/include/envoy/formatter/BUILD index a39fd0a2d63f..df87c4cbac10 100644 --- a/include/envoy/formatter/BUILD +++ b/include/envoy/formatter/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/grpc/BUILD b/include/envoy/grpc/BUILD index a2beb3e61e7e..07d87ce33d19 100644 --- a/include/envoy/grpc/BUILD +++ b/include/envoy/grpc/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index d1839dce3bf4..7ffc41f6372d 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/init/BUILD b/include/envoy/init/BUILD index 2229d7c7a12e..4bbc0d18f682 100644 --- a/include/envoy/init/BUILD +++ b/include/envoy/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/json/BUILD b/include/envoy/json/BUILD index 89c21942185a..4bbab2712a14 100644 --- a/include/envoy/json/BUILD +++ b/include/envoy/json/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/local_info/BUILD b/include/envoy/local_info/BUILD index 52372ccf334f..749ad670563b 100644 --- a/include/envoy/local_info/BUILD +++ b/include/envoy/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 233a5a55566b..977f8e7d4067 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/protobuf/BUILD b/include/envoy/protobuf/BUILD index c23eccce45ce..6510c566d103 100644 --- a/include/envoy/protobuf/BUILD +++ b/include/envoy/protobuf/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/ratelimit/BUILD b/include/envoy/ratelimit/BUILD index 38b7e7bf4f0a..d726ae9f54ed 100644 --- a/include/envoy/ratelimit/BUILD +++ b/include/envoy/ratelimit/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/registry/BUILD b/include/envoy/registry/BUILD index 7e2c9e38fd67..de34bccd492f 100644 --- a/include/envoy/registry/BUILD +++ b/include/envoy/registry/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index 44aee699e338..80afd99d4abf 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index 5118a04457c8..eaed57026a42 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/secret/BUILD b/include/envoy/secret/BUILD index 5f16335ecba3..219884c19e81 100644 --- a/include/envoy/secret/BUILD +++ b/include/envoy/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index 0b6b538b40b1..2388482f251c 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/singleton/BUILD b/include/envoy/singleton/BUILD index f47887a06244..a0eb2536c45e 100644 --- a/include/envoy/singleton/BUILD +++ b/include/envoy/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/ssl/BUILD b/include/envoy/ssl/BUILD index fb14af1a211c..b8e7d530174f 100644 --- a/include/envoy/ssl/BUILD +++ b/include/envoy/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/ssl/private_key/BUILD b/include/envoy/ssl/private_key/BUILD index bf8a908421df..51ecf0198ec2 100644 --- a/include/envoy/ssl/private_key/BUILD +++ b/include/envoy/ssl/private_key/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/stats/BUILD b/include/envoy/stats/BUILD index 4a3a6948aad5..c810ac7ad30c 100644 --- a/include/envoy/stats/BUILD +++ b/include/envoy/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/stream_info/BUILD b/include/envoy/stream_info/BUILD index 63fa4b47ba5a..e491ce423332 100644 --- a/include/envoy/stream_info/BUILD +++ b/include/envoy/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/tcp/BUILD b/include/envoy/tcp/BUILD index 991ccbd75e13..bbf990581003 100644 --- a/include/envoy/tcp/BUILD +++ b/include/envoy/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/thread/BUILD b/include/envoy/thread/BUILD index ef8f2450a237..d23937766850 100644 --- a/include/envoy/thread/BUILD +++ b/include/envoy/thread/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/thread_local/BUILD b/include/envoy/thread_local/BUILD index d5fbfb396718..3b23de4e0175 100644 --- a/include/envoy/thread_local/BUILD +++ b/include/envoy/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/tracing/BUILD b/include/envoy/tracing/BUILD index 1a6e82e01880..bc50ea3769fc 100644 --- a/include/envoy/tracing/BUILD +++ b/include/envoy/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index c6fe37d45beb..d31ba0fa6d97 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/restarter/BUILD b/restarter/BUILD index af4b8c78558d..811a10b6d098 100644 --- a/restarter/BUILD +++ b/restarter/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index faf3c2420a00..08408b26a9cf 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 25c142ce784a..1fd681a0cc7d 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -7,6 +5,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index 02371e3c63d6..171aa8a08987 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD index 9b07e76b0013..2d4acb348765 100644 --- a/source/common/chromium_url/BUILD +++ b/source/common/chromium_url/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 248471b5ea90..5d473a9776f2 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -12,6 +10,8 @@ load( "envoy_select_boringssl", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 12042dee180a..19a26940fbb8 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/crypto/BUILD b/source/common/crypto/BUILD index d7431432f6eb..e47c843fe72a 100644 --- a/source/common/crypto/BUILD +++ b/source/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 33fd947b8d77..78184213018b 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/filesystem/BUILD b/source/common/filesystem/BUILD index 7aa299d43d74..12899b58f981 100644 --- a/source/common/filesystem/BUILD +++ b/source/common/filesystem/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/formatter/BUILD b/source/common/formatter/BUILD index 0c0df68ecd7a..d4eb45228abf 100644 --- a/source/common/formatter/BUILD +++ b/source/common/formatter/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 8534b6f0c67c..ce22cd37046b 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -8,6 +6,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/html/BUILD b/source/common/html/BUILD index 42c5fc06a7ef..fc2b6c391ad1 100644 --- a/source/common/html/BUILD +++ b/source/common/html/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 7b7b7e536adf..eb524eb4077e 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index be5088d41966..47499a86bb0c 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 45835a318267..a27a6d85282d 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/http/http3/BUILD b/source/common/http/http3/BUILD index cadca40d0d25..43ba5729097c 100644 --- a/source/common/http/http3/BUILD +++ b/source/common/http/http3/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/init/BUILD b/source/common/init/BUILD index 6fef3006865b..5b75ca4a8285 100644 --- a/source/common/init/BUILD +++ b/source/common/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/json/BUILD b/source/common/json/BUILD index afcfec49af76..edda394d6949 100644 --- a/source/common/json/BUILD +++ b/source/common/json/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/local_info/BUILD b/source/common/local_info/BUILD index 4cfb87f03834..ae6482c600ad 100644 --- a/source/common/local_info/BUILD +++ b/source/common/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/local_reply/BUILD b/source/common/local_reply/BUILD index 87a2a93dd9d1..6de00f364e0c 100644 --- a/source/common/local_reply/BUILD +++ b/source/common/local_reply/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/memory/BUILD b/source/common/memory/BUILD index 1501b81a6b01..45cc04041baa 100644 --- a/source/common/memory/BUILD +++ b/source/common/memory/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 66db07d3680f..54f82d48ef1a 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/profiler/BUILD b/source/common/profiler/BUILD index c853cfea30c3..192f9712568a 100644 --- a/source/common/profiler/BUILD +++ b/source/common/profiler/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index de33b5516828..20ead1f753c6 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_cc//cc:defs.bzl", "cc_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load( "//bazel:envoy_build_system.bzl", @@ -7,6 +6,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() proto_library( diff --git a/source/common/router/BUILD b/source/common/router/BUILD index d69cad792900..610cb0f10d75 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index ddeb069e3e5a..4e76ab315760 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index 719c3e884af9..f486c2f7ce8e 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/shared_pool/BUILD b/source/common/shared_pool/BUILD index 447ad6538b56..1d55c9ec99a0 100644 --- a/source/common/shared_pool/BUILD +++ b/source/common/shared_pool/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/signal/BUILD b/source/common/signal/BUILD index 17dec6c9be55..6dc082eda079 100644 --- a/source/common/signal/BUILD +++ b/source/common/signal/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/singleton/BUILD b/source/common/singleton/BUILD index 1b52b93501a1..06d67beae1b5 100644 --- a/source/common/singleton/BUILD +++ b/source/common/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/ssl/BUILD b/source/common/ssl/BUILD index 3f46b11b2471..0be754cc4809 100644 --- a/source/common/ssl/BUILD +++ b/source/common/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 256074df9cbf..37d4cb76df4f 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/stream_info/BUILD b/source/common/stream_info/BUILD index 9abb095ed978..d2962e67ef15 100644 --- a/source/common/stream_info/BUILD +++ b/source/common/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/tcp/BUILD b/source/common/tcp/BUILD index a9e3b948a1b2..f176de79ed55 100644 --- a/source/common/tcp/BUILD +++ b/source/common/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/tcp_proxy/BUILD b/source/common/tcp_proxy/BUILD index fc81b6fdb625..328aca0a23e9 100644 --- a/source/common/tcp_proxy/BUILD +++ b/source/common/tcp_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/thread_local/BUILD b/source/common/thread_local/BUILD index 82b0d913f0e4..3892298e00d6 100644 --- a/source/common/thread_local/BUILD +++ b/source/common/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/tracing/BUILD b/source/common/tracing/BUILD index a99899fc312e..ef703c964a90 100644 --- a/source/common/tracing/BUILD +++ b/source/common/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index f0634fd33bc4..bdbe7c309f6d 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/exe/BUILD b/source/exe/BUILD index 867605c7663d..0bee6f5858ef 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", @@ -12,6 +10,8 @@ load( load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() alias( diff --git a/source/extensions/access_loggers/BUILD b/source/extensions/access_loggers/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/access_loggers/BUILD +++ b/source/extensions/access_loggers/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/access_loggers/common/BUILD b/source/extensions/access_loggers/common/BUILD index daa8a198e578..a4cf5294cf81 100644 --- a/source/extensions/access_loggers/common/BUILD +++ b/source/extensions/access_loggers/common/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# Base class for implementations of AccessLog::Instance. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Base class for implementations of AccessLog::Instance. + envoy_package() envoy_cc_library( diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 015920030af9..6e86f2e0a490 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Access log implementation that writes to a file. -# Public docs: docs/root/configuration/access_log.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Access log implementation that writes to a file. +# Public docs: docs/root/configuration/access_log.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/access_loggers/grpc/BUILD b/source/extensions/access_loggers/grpc/BUILD index 3cf198c4d0fc..e92a44b24d6d 100644 --- a/source/extensions/access_loggers/grpc/BUILD +++ b/source/extensions/access_loggers/grpc/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Access log implementation that writes to a gRPC service. -# Public docs: TODO(rodaine): Docs needed. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Access log implementation that writes to a gRPC service. +# Public docs: TODO(rodaine): Docs needed. + envoy_package() envoy_cc_library( diff --git a/source/extensions/clusters/BUILD b/source/extensions/clusters/BUILD index 7a4780afbdab..ee5bcf6bc186 100644 --- a/source/extensions/clusters/BUILD +++ b/source/extensions/clusters/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/clusters/aggregate/BUILD b/source/extensions/clusters/aggregate/BUILD index 8dab07320fdf..d6c7d4d1a515 100644 --- a/source/extensions/clusters/aggregate/BUILD +++ b/source/extensions/clusters/aggregate/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index d063252df167..744f1e1bfca8 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/clusters/redis/BUILD b/source/extensions/clusters/redis/BUILD index 3519f9350629..3edf4864852c 100644 --- a/source/extensions/clusters/redis/BUILD +++ b/source/extensions/clusters/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/BUILD b/source/extensions/common/BUILD index 035e287aa3c9..54a5bcddfc7f 100644 --- a/source/extensions/common/BUILD +++ b/source/extensions/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/aws/BUILD b/source/extensions/common/aws/BUILD index 876b88065c74..4d610a59545f 100644 --- a/source/extensions/common/aws/BUILD +++ b/source/extensions/common/aws/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/crypto/BUILD b/source/extensions/common/crypto/BUILD index 04c698e08480..836c8320a523 100644 --- a/source/extensions/common/crypto/BUILD +++ b/source/extensions/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index 89f6d47aae98..b4dbdb57eaa2 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/proxy_protocol/BUILD b/source/extensions/common/proxy_protocol/BUILD index 7eb374f5a5bf..fb0d2f74c09f 100644 --- a/source/extensions/common/proxy_protocol/BUILD +++ b/source/extensions/common/proxy_protocol/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/redis/BUILD b/source/extensions/common/redis/BUILD index 7c51e3a80f72..1d50b1cfc6fc 100644 --- a/source/extensions/common/redis/BUILD +++ b/source/extensions/common/redis/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis -# clusters. -# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis +# clusters. +# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index ec9acb716fa4..8795a34b9170 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 9333c679421f..c511d3806fe1 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/wasm/null/BUILD b/source/extensions/common/wasm/null/BUILD index 2bae8acd9f4f..0d9d49510412 100644 --- a/source/extensions/common/wasm/null/BUILD +++ b/source/extensions/common/wasm/null/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/common/wasm/v8/BUILD b/source/extensions/common/wasm/v8/BUILD index 04d0954d1b2c..0e4f86d97a66 100644 --- a/source/extensions/common/wasm/v8/BUILD +++ b/source/extensions/common/wasm/v8/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/compression/common/compressor/BUILD b/source/extensions/compression/common/compressor/BUILD index eb16810cc8b9..54843124ba79 100644 --- a/source/extensions/compression/common/compressor/BUILD +++ b/source/extensions/compression/common/compressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/compression/common/decompressor/BUILD b/source/extensions/compression/common/decompressor/BUILD index 3b3e96b3e980..27208bee530a 100644 --- a/source/extensions/compression/common/decompressor/BUILD +++ b/source/extensions/compression/common/decompressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/compression/gzip/common/BUILD b/source/extensions/compression/gzip/common/BUILD index b2393354384f..8ec29af79ddb 100644 --- a/source/extensions/compression/gzip/common/BUILD +++ b/source/extensions/compression/gzip/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/compression/gzip/compressor/BUILD b/source/extensions/compression/gzip/compressor/BUILD index 20df9161513d..3f37d2524356 100644 --- a/source/extensions/compression/gzip/compressor/BUILD +++ b/source/extensions/compression/gzip/compressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD index 220c40f5c5cc..f31199e80811 100644 --- a/source/extensions/compression/gzip/decompressor/BUILD +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/expr/BUILD b/source/extensions/filters/common/expr/BUILD index 316c36b05b19..d9abedc88404 100644 --- a/source/extensions/filters/common/expr/BUILD +++ b/source/extensions/filters/common/expr/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 58ed8316353e..66db5c593ff4 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/fault/BUILD b/source/extensions/filters/common/fault/BUILD index e70a66db64eb..d64605085fc4 100644 --- a/source/extensions/filters/common/fault/BUILD +++ b/source/extensions/filters/common/fault/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/lua/BUILD b/source/extensions/filters/common/lua/BUILD index d1f515945c6e..b36d7b7414c7 100644 --- a/source/extensions/filters/common/lua/BUILD +++ b/source/extensions/filters/common/lua/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -8,6 +6,8 @@ load( load("//bazel:envoy_internal.bzl", "envoy_external_dep_path") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") +licenses(["notice"]) # Apache 2 + envoy_package() bool_flag( diff --git a/source/extensions/filters/common/original_src/BUILD b/source/extensions/filters/common/original_src/BUILD index 7cf8fd5926d2..76662376ee0c 100644 --- a/source/extensions/filters/common/original_src/BUILD +++ b/source/extensions/filters/common/original_src/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# Helprs for filters for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Helprs for filters for mirroring the downstream remote address on the upstream's source. + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/ratelimit/BUILD b/source/extensions/filters/common/ratelimit/BUILD index bd26ccb6b8b0..726bdf338f9a 100644 --- a/source/extensions/filters/common/ratelimit/BUILD +++ b/source/extensions/filters/common/ratelimit/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/common/rbac/BUILD b/source/extensions/filters/common/rbac/BUILD index 2784d91ffb11..9a9bbc105749 100644 --- a/source/extensions/filters/common/rbac/BUILD +++ b/source/extensions/filters/common/rbac/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/BUILD b/source/extensions/filters/http/BUILD index 7a4780afbdab..ee5bcf6bc186 100644 --- a/source/extensions/filters/http/BUILD +++ b/source/extensions/filters/http/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/adaptive_concurrency/BUILD b/source/extensions/filters/http/adaptive_concurrency/BUILD index 1cff74436f07..c6a7a2d4e95f 100644 --- a/source/extensions/filters/http/adaptive_concurrency/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/BUILD @@ -1,9 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that dynamically adjusts the number of allowed concurrent -# requests based on sampled latencies. -# Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -11,6 +5,12 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that dynamically adjusts the number of allowed concurrent +# requests based on sampled latencies. +# Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD index b5e828f9a3b3..768d34438936 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that dynamically adjusts the number of allowed concurrent -# requests based on sampled latencies. -# Public docs: TODO (tonya11en) - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that dynamically adjusts the number of allowed concurrent +# requests based on sampled latencies. +# Public docs: TODO (tonya11en) + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/aws_lambda/BUILD b/source/extensions/filters/http/aws_lambda/BUILD index a3c73926c517..1e3d6006293a 100644 --- a/source/extensions/filters/http/aws_lambda/BUILD +++ b/source/extensions/filters/http/aws_lambda/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP AWS Lambda filter -# Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -11,6 +6,11 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP AWS Lambda filter +# Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst + envoy_package() envoy_proto_library( diff --git a/source/extensions/filters/http/aws_request_signing/BUILD b/source/extensions/filters/http/aws_request_signing/BUILD index c723fb932b08..a83efef61e98 100644 --- a/source/extensions/filters/http/aws_request_signing/BUILD +++ b/source/extensions/filters/http/aws_request_signing/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP AWS request signing filter -# Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP AWS request signing filter +# Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index e7629854f46b..eeb4a403931e 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Request buffering and timeout L7 HTTP filter -# Public docs: docs/root/configuration/http_filters/buffer_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Request buffering and timeout L7 HTTP filter +# Public docs: docs/root/configuration/http_filters/buffer_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 03c1c4932fef..63327875e739 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -## Pluggable HTTP cache filter - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +6,10 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + +## Pluggable HTTP cache filter + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/cache/simple_http_cache/BUILD b/source/extensions/filters/http/cache/simple_http_cache/BUILD index 6f569711d5dc..b38c273b2601 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/source/extensions/filters/http/cache/simple_http_cache/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -## WIP: Simple in-memory cache storage plugin. Not ready for deployment. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + +## WIP: Simple in-memory cache storage plugin. Not ready for deployment. + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/http/common/BUILD b/source/extensions/filters/http/common/BUILD index b5d2b2a030e3..7a3ccda3d2c1 100644 --- a/source/extensions/filters/http/common/BUILD +++ b/source/extensions/filters/http/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/common/compressor/BUILD b/source/extensions/filters/http/common/compressor/BUILD index 7058ab2fc58b..56468881c8f2 100644 --- a/source/extensions/filters/http/common/compressor/BUILD +++ b/source/extensions/filters/http/common/compressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() # TODO(rojkov): move this library to source/extensions/filters/http/compressor/. diff --git a/source/extensions/filters/http/compressor/BUILD b/source/extensions/filters/http/compressor/BUILD index 188ee5e38be0..ea1d38801a5e 100644 --- a/source/extensions/filters/http/compressor/BUILD +++ b/source/extensions/filters/http/compressor/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that performs compression with configurable compression libraries -# Public docs: docs/root/configuration/http_filters/compressor_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs compression with configurable compression libraries +# Public docs: docs/root/configuration/http_filters/compressor_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/cors/BUILD b/source/extensions/filters/http/cors/BUILD index 9004a1c984fa..0685c0e41f27 100644 --- a/source/extensions/filters/http/cors/BUILD +++ b/source/extensions/filters/http/cors/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) -# Public docs: docs/root/configuration/http_filters/cors_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) +# Public docs: docs/root/configuration/http_filters/cors_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index cd2315773e6e..a9361502dd10 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) -# Public docs: docs/root/configuration/http_filters/csrf_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) +# Public docs: docs/root/configuration/http_filters/csrf_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/decompressor/BUILD b/source/extensions/filters/http/decompressor/BUILD index 3e8faf194250..b4665ca09b7b 100644 --- a/source/extensions/filters/http/decompressor/BUILD +++ b/source/extensions/filters/http/decompressor/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that performs decompression with configurable decompression libraries -# Public docs: docs/root/configuration/http_filters/decompressor_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs decompression with configurable decompression libraries +# Public docs: docs/root/configuration/http_filters/decompressor_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index dc85075eb30d..3e63ff181921 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index 79296db8818f..ad5f2fc3b97e 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ -# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ +# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 1af4bcf28687..559363edcf7b 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# External authorization L7 HTTP filter -# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# External authorization L7 HTTP filter +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 749e04b67a4c..726cda5785d0 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that injects faults into the request flow -# Public docs: docs/root/configuration/http_filters/fault_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that injects faults into the request flow +# Public docs: docs/root/configuration/http_filters/fault_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/grpc_http1_bridge/BUILD b/source/extensions/filters/http/grpc_http1_bridge/BUILD index 379e0d75f1b6..486904e2f8a5 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_bridge/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that bridges HTTP/1.1 unary "gRPC" to compliant HTTP/2 gRPC. -# Public docs: docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that bridges HTTP/1.1 unary "gRPC" to compliant HTTP/2 gRPC. +# Public docs: docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index d44f111be849..1a80fefdb45c 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/grpc_json_transcoder/BUILD b/source/extensions/filters/http/grpc_json_transcoder/BUILD index ca2ce1749d40..3b7ab0a09d22 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/source/extensions/filters/http/grpc_json_transcoder/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements binary gRPC to JSON transcoding -# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements binary gRPC to JSON transcoding +# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/grpc_stats/BUILD b/source/extensions/filters/http/grpc_stats/BUILD index 171e49afd320..62bc49e8be01 100644 --- a/source/extensions/filters/http/grpc_stats/BUILD +++ b/source/extensions/filters/http/grpc_stats/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements gRPC telemetry - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements gRPC telemetry + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/http/grpc_web/BUILD b/source/extensions/filters/http/grpc_web/BUILD index e1509bde3c50..1f6910590907 100644 --- a/source/extensions/filters/http/grpc_web/BUILD +++ b/source/extensions/filters/http/grpc_web/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) -# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) +# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index b27fe1fef620..3844addc83b6 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that performs gzip compression -# Public docs: docs/root/configuration/http_filters/gzip_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that performs gzip compression +# Public docs: docs/root/configuration/http_filters/gzip_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index d22a182a2dfd..e0232d4d8d1c 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that transforms request data into dynamic metadata -# Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that transforms request data into dynamic metadata +# Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index d26759688ee8..f0841d388b48 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements health check responses -# Public docs: docs/root/configuration/http_filters/health_check_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements health check responses +# Public docs: docs/root/configuration/http_filters/health_check_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index a28d3cf649ed..cbcf98b1d516 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter that writes an IP tagging header based on IP trie data -# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that writes an IP tagging header based on IP trie data +# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index b64922a9c442..a2967b990132 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index d7560f5207b7..9df156c1eb32 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) -# Public docs: docs/root/configuration/http_filters/lua_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) +# Public docs: docs/root/configuration/http_filters/lua_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/on_demand/BUILD b/source/extensions/filters/http/on_demand/BUILD index 2332afdae292..3f4ef02c1dba 100644 --- a/source/extensions/filters/http/on_demand/BUILD +++ b/source/extensions/filters/http/on_demand/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# On-demand RDS update HTTP filter - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# On-demand RDS update HTTP filter + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/original_src/BUILD b/source/extensions/filters/http/original_src/BUILD index b95bf621118b..eff7f4cf9679 100644 --- a/source/extensions/filters/http/original_src/BUILD +++ b/source/extensions/filters/http/original_src/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# A filter for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# A filter for mirroring the downstream remote address on the upstream's source. + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index af3b29e07908..4a8c7a8c35d5 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Ratelimit L7 HTTP filter -# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Ratelimit L7 HTTP filter +# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/rbac/BUILD b/source/extensions/filters/http/rbac/BUILD index 77472b169e84..9554a910a16c 100644 --- a/source/extensions/filters/http/rbac/BUILD +++ b/source/extensions/filters/http/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/http/router/BUILD b/source/extensions/filters/http/router/BUILD index 98ce18396f5e..ab7487d00b6a 100644 --- a/source/extensions/filters/http/router/BUILD +++ b/source/extensions/filters/http/router/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# HTTP L7 filter responsible for routing to upstream connection pools -# Public docs: docs/root/configuration/http_filters/router_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter responsible for routing to upstream connection pools +# Public docs: docs/root/configuration/http_filters/router_filter.rst + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index 0047bdf3f7de..8579d7a2860a 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP filter that implements the Squash microservice debugger -# Public docs: docs/root/configuration/http_filters/squash_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP filter that implements the Squash microservice debugger +# Public docs: docs/root/configuration/http_filters/squash_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/http/tap/BUILD b/source/extensions/filters/http/tap/BUILD index d388912dfe56..62a8d2f36f5a 100644 --- a/source/extensions/filters/http/tap/BUILD +++ b/source/extensions/filters/http/tap/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# L7 HTTP Tap filter -# Public docs: docs/root/configuration/http_filters/tap_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L7 HTTP Tap filter +# Public docs: docs/root/configuration/http_filters/tap_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/BUILD b/source/extensions/filters/listener/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/filters/listener/BUILD +++ b/source/extensions/filters/listener/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index 8df52851b824..70d097484943 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain. + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/original_dst/BUILD b/source/extensions/filters/listener/original_dst/BUILD index d21098d656e3..a940d212c987 100644 --- a/source/extensions/filters/listener/original_dst/BUILD +++ b/source/extensions/filters/listener/original_dst/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# ORIGINAL_DST iptables redirection listener filter -# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# ORIGINAL_DST iptables redirection listener filter +# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/original_src/BUILD b/source/extensions/filters/listener/original_src/BUILD index 96fac9a17014..4bed07cc6619 100644 --- a/source/extensions/filters/listener/original_src/BUILD +++ b/source/extensions/filters/listener/original_src/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# A filter for mirroring the downstream remote address on the upstream's source. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# A filter for mirroring the downstream remote address on the upstream's source. + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index f62e9940af1c..d4b87f1bfd5e 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/listener/tls_inspector/BUILD b/source/extensions/filters/listener/tls_inspector/BUILD index d400c3534e28..c751c53156a4 100644 --- a/source/extensions/filters/listener/tls_inspector/BUILD +++ b/source/extensions/filters/listener/tls_inspector/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# TLS inspector filter for examining various TLS parameters before routing to a FilterChain. -# Public docs: docs/root/configuration/listener_filters/tls_inspector.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# TLS inspector filter for examining various TLS parameters before routing to a FilterChain. +# Public docs: docs/root/configuration/listener_filters/tls_inspector.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/BUILD b/source/extensions/filters/network/BUILD index 7a4780afbdab..ee5bcf6bc186 100644 --- a/source/extensions/filters/network/BUILD +++ b/source/extensions/filters/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/client_ssl_auth/BUILD b/source/extensions/filters/network/client_ssl_auth/BUILD index d2f50785404d..2a120e030866 100644 --- a/source/extensions/filters/network/client_ssl_auth/BUILD +++ b/source/extensions/filters/network/client_ssl_auth/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Client SSL authorization L4 network filter -# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Client SSL authorization L4 network filter +# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/common/BUILD b/source/extensions/filters/network/common/BUILD index dcbf6142308e..4e70e2aa414d 100644 --- a/source/extensions/filters/network/common/BUILD +++ b/source/extensions/filters/network/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index d648832e0b1e..3b4dcedbb01e 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/direct_response/BUILD b/source/extensions/filters/network/direct_response/BUILD index e5037679e900..fe6244a5c19d 100644 --- a/source/extensions/filters/network/direct_response/BUILD +++ b/source/extensions/filters/network/direct_response/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Direct response L4 network filter. -# Public docs: docs/root/configuration/network_filters/direct_response_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Direct response L4 network filter. +# Public docs: docs/root/configuration/network_filters/direct_response_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index cb61a5bfc989..6b2affdd7d72 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/dubbo_proxy/filters/BUILD b/source/extensions/filters/network/dubbo_proxy/filters/BUILD index 19f4fd317675..2fc5922c92ea 100644 --- a/source/extensions/filters/network/dubbo_proxy/filters/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/filters/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/dubbo_proxy/router/BUILD b/source/extensions/filters/network/dubbo_proxy/router/BUILD index 13a115434790..9dd2cf7e46c8 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/echo/BUILD b/source/extensions/filters/network/echo/BUILD index 6e26725de2fb..6d39336775b0 100644 --- a/source/extensions/filters/network/echo/BUILD +++ b/source/extensions/filters/network/echo/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Echo L4 network filter. This is primarily a simplistic example. -# Public docs: docs/root/configuration/network_filters/echo_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Echo L4 network filter. This is primarily a simplistic example. +# Public docs: docs/root/configuration/network_filters/echo_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index 286c454dc9cd..e321d1b1221d 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# External authorization L4 network filter -# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# External authorization L4 network filter +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 21a10ddadef3..ec576a8abac9 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -1,15 +1,15 @@ -licenses(["notice"]) # Apache 2 - -# L4 network filter that implements HTTP protocol handling and filtering. This filter internally -# drives all of the L7 HTTP filters. -# Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# L4 network filter that implements HTTP protocol handling and filtering. This filter internally +# drives all of the L7 HTTP filters. +# Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD index f4588076cfae..495a94a7bad6 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/source/extensions/filters/network/kafka/BUILD @@ -1,8 +1,4 @@ -licenses(["notice"]) # Apache 2 - -# Kafka network filter. -# Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst - +load("@rules_python//python:defs.bzl", "py_binary", "py_library") load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +6,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Kafka network filter. +# Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/network/local_ratelimit/BUILD b/source/extensions/filters/network/local_ratelimit/BUILD index c13b64a3b73d..052b817726d9 100644 --- a/source/extensions/filters/network/local_ratelimit/BUILD +++ b/source/extensions/filters/network/local_ratelimit/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Local ratelimit L4 network filter -# Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Local ratelimit L4 network filter +# Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index e471803285ad..04c14c2c610c 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Mongo proxy L4 network filter (observability and fault injection). -# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Mongo proxy L4 network filter (observability and fault injection). +# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/source/extensions/filters/network/mysql_proxy/BUILD index bd27d007f8b2..99b5ebdd8ae9 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/source/extensions/filters/network/mysql_proxy/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# MySQL proxy L7 network filter. -# Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# MySQL proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/source/extensions/filters/network/postgres_proxy/BUILD index 05b99ade974f..a3c13b7c1633 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/source/extensions/filters/network/postgres_proxy/BUILD @@ -1,10 +1,3 @@ -licenses(["notice"]) # Apache 2 - -#package(default_visibility = ["//visibility:public"]) - -# PostgresSQL proxy L7 network filter. -# Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -12,6 +5,13 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +#package(default_visibility = ["//visibility:public"]) + +# PostgresSQL proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 6ac6dc7f87f1..68f54558afa4 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Ratelimit L4 network filter -# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Ratelimit L4 network filter +# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/rbac/BUILD b/source/extensions/filters/network/rbac/BUILD index f5c63db53d0a..367104e913d8 100644 --- a/source/extensions/filters/network/rbac/BUILD +++ b/source/extensions/filters/network/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 38611a99f1d5..4d452f0cad3c 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -1,9 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis -# clusters. -# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -11,6 +5,12 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Redis proxy L4 network filter. Implements consistent hashing and observability for large redis +# clusters. +# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/rocketmq_proxy/BUILD b/source/extensions/filters/network/rocketmq_proxy/BUILD index 65c4f18be827..7ce5e971d74a 100644 --- a/source/extensions/filters/network/rocketmq_proxy/BUILD +++ b/source/extensions/filters/network/rocketmq_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/rocketmq_proxy/router/BUILD b/source/extensions/filters/network/rocketmq_proxy/router/BUILD index 19227abff64a..03f3b70a34be 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/BUILD +++ b/source/extensions/filters/network/rocketmq_proxy/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/sni_cluster/BUILD b/source/extensions/filters/network/sni_cluster/BUILD index 06dbdf39d290..6524b5defe1e 100644 --- a/source/extensions/filters/network/sni_cluster/BUILD +++ b/source/extensions/filters/network/sni_cluster/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index b4a08a55260c..d7f95b44d6bd 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/tcp_proxy/BUILD b/source/extensions/filters/network/tcp_proxy/BUILD index 9b6db30952a0..312b3233b10d 100644 --- a/source/extensions/filters/network/tcp_proxy/BUILD +++ b/source/extensions/filters/network/tcp_proxy/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# TCP proxy L4 network filter. -# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# TCP proxy L4 network filter. +# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/network/thrift_proxy/BUILD b/source/extensions/filters/network/thrift_proxy/BUILD index cad78eb1d783..baa733731637 100644 --- a/source/extensions/filters/network/thrift_proxy/BUILD +++ b/source/extensions/filters/network/thrift_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/thrift_proxy/filters/BUILD b/source/extensions/filters/network/thrift_proxy/filters/BUILD index ba1054d990cf..808e42dd8e98 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 953db0fdffc7..5c136b0a0353 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index f268e6a85cbd..74a706741538 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_extension( diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD index 5582ecdec9fe..301498c6465b 100644 --- a/source/extensions/filters/network/zookeeper_proxy/BUILD +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# ZooKeeper proxy L7 network filter. -# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# ZooKeeper proxy L7 network filter. +# Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index b538d52110a4..ab3de9ef5f08 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/filters/udp/udp_proxy/BUILD b/source/extensions/filters/udp/udp_proxy/BUILD index c111de929f66..7b9efa4498a2 100644 --- a/source/extensions/filters/udp/udp_proxy/BUILD +++ b/source/extensions/filters/udp/udp_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/grpc_credentials/BUILD b/source/extensions/grpc_credentials/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/grpc_credentials/BUILD +++ b/source/extensions/grpc_credentials/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/grpc_credentials/aws_iam/BUILD b/source/extensions/grpc_credentials/aws_iam/BUILD index 2b8980e7651f..4c3e179096b1 100644 --- a/source/extensions/grpc_credentials/aws_iam/BUILD +++ b/source/extensions/grpc_credentials/aws_iam/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# AWS IAM gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# AWS IAM gRPC Credentials + envoy_package() envoy_cc_extension( diff --git a/source/extensions/grpc_credentials/example/BUILD b/source/extensions/grpc_credentials/example/BUILD index b62762a2030d..30025a7c046e 100644 --- a/source/extensions/grpc_credentials/example/BUILD +++ b/source/extensions/grpc_credentials/example/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# Example gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Example gRPC Credentials + envoy_package() envoy_cc_library( diff --git a/source/extensions/grpc_credentials/file_based_metadata/BUILD b/source/extensions/grpc_credentials/file_based_metadata/BUILD index f1feb60d3196..b41ac277c73f 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/source/extensions/grpc_credentials/file_based_metadata/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# File Based Metadata gRPC Credentials - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# File Based Metadata gRPC Credentials + envoy_package() envoy_cc_extension( diff --git a/source/extensions/health_checkers/BUILD b/source/extensions/health_checkers/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/health_checkers/BUILD +++ b/source/extensions/health_checkers/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 0dbdfb73694e..3dd32163468a 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Redis custom health checker. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Redis custom health checker. + envoy_package() envoy_cc_library( diff --git a/source/extensions/internal_redirect/BUILD b/source/extensions/internal_redirect/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/internal_redirect/BUILD +++ b/source/extensions/internal_redirect/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD index 02cf2789dc79..c2ee85a134ac 100644 --- a/source/extensions/internal_redirect/allow_listed_routes/BUILD +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD index d022a4c6719c..91f76aebc135 100644 --- a/source/extensions/internal_redirect/previous_routes/BUILD +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD index 94293850b53b..50433bf8fb42 100644 --- a/source/extensions/internal_redirect/safe_cross_scheme/BUILD +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index aa0be8877546..124be2ac6d51 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 3f5cb5fef47f..2d36d09fa280 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() # Build targets in this package are part of the QUICHE platform implementation. diff --git a/source/extensions/resource_monitors/BUILD b/source/extensions/resource_monitors/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/resource_monitors/BUILD +++ b/source/extensions/resource_monitors/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/resource_monitors/common/BUILD b/source/extensions/resource_monitors/common/BUILD index ff6773aaa8d1..7e759d696abd 100644 --- a/source/extensions/resource_monitors/common/BUILD +++ b/source/extensions/resource_monitors/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/resource_monitors/fixed_heap/BUILD b/source/extensions/resource_monitors/fixed_heap/BUILD index e54cfe813179..4feb2a6e7cd2 100644 --- a/source/extensions/resource_monitors/fixed_heap/BUILD +++ b/source/extensions/resource_monitors/fixed_heap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/resource_monitors/injected_resource/BUILD b/source/extensions/resource_monitors/injected_resource/BUILD index 650d87c69b98..4b3702afffdf 100644 --- a/source/extensions/resource_monitors/injected_resource/BUILD +++ b/source/extensions/resource_monitors/injected_resource/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/host/BUILD b/source/extensions/retry/host/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/retry/host/BUILD +++ b/source/extensions/retry/host/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/host/omit_canary_hosts/BUILD b/source/extensions/retry/host/omit_canary_hosts/BUILD index 39ecd978faaf..1f4f6fed89a7 100644 --- a/source/extensions/retry/host/omit_canary_hosts/BUILD +++ b/source/extensions/retry/host/omit_canary_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/host/omit_host_metadata/BUILD b/source/extensions/retry/host/omit_host_metadata/BUILD index d2a0de1ceac7..f0e4013ecb94 100644 --- a/source/extensions/retry/host/omit_host_metadata/BUILD +++ b/source/extensions/retry/host/omit_host_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/host/previous_hosts/BUILD b/source/extensions/retry/host/previous_hosts/BUILD index 17ab0e326132..7ec06c64c705 100644 --- a/source/extensions/retry/host/previous_hosts/BUILD +++ b/source/extensions/retry/host/previous_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/priority/BUILD b/source/extensions/retry/priority/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/retry/priority/BUILD +++ b/source/extensions/retry/priority/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/retry/priority/previous_priorities/BUILD b/source/extensions/retry/priority/previous_priorities/BUILD index 1a545c2509f0..65061e5740da 100644 --- a/source/extensions/retry/priority/previous_priorities/BUILD +++ b/source/extensions/retry/priority/previous_priorities/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/stat_sinks/BUILD b/source/extensions/stat_sinks/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/stat_sinks/BUILD +++ b/source/extensions/stat_sinks/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/stat_sinks/common/statsd/BUILD b/source/extensions/stat_sinks/common/statsd/BUILD index 57c0b009c506..378a7146234d 100644 --- a/source/extensions/stat_sinks/common/statsd/BUILD +++ b/source/extensions/stat_sinks/common/statsd/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/stat_sinks/dog_statsd/BUILD b/source/extensions/stat_sinks/dog_statsd/BUILD index 0c9e5f299573..2a6e1d7d9c44 100644 --- a/source/extensions/stat_sinks/dog_statsd/BUILD +++ b/source/extensions/stat_sinks/dog_statsd/BUILD @@ -1,14 +1,14 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol -# (https://docs.datadoghq.com/developers/dogstatsd/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol +# (https://docs.datadoghq.com/developers/dogstatsd/). + envoy_package() envoy_cc_extension( diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index d058088df9b2..463576dd757c 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec). + envoy_package() envoy_cc_extension( diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index c26135a75049..ecd35309b7fb 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto + envoy_package() envoy_cc_library( diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index 5ec22566d12b..a9c862e12e8b 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec). + envoy_package() envoy_cc_extension( diff --git a/source/extensions/tracers/BUILD b/source/extensions/tracers/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/tracers/BUILD +++ b/source/extensions/tracers/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/common/BUILD b/source/extensions/tracers/common/BUILD index 04a67fdad5f4..f31e56bc9cd6 100644 --- a/source/extensions/tracers/common/BUILD +++ b/source/extensions/tracers/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/common/ot/BUILD b/source/extensions/tracers/common/ot/BUILD index 29dd62e655f8..16a0a3642905 100644 --- a/source/extensions/tracers/common/ot/BUILD +++ b/source/extensions/tracers/common/ot/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index 95cc7d74a212..bd35a9a25431 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for Datadog (https://datadoghq.com/) - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for Datadog (https://datadoghq.com/) + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/dynamic_ot/BUILD b/source/extensions/tracers/dynamic_ot/BUILD index 4302159453d8..bd5a269fc130 100644 --- a/source/extensions/tracers/dynamic_ot/BUILD +++ b/source/extensions/tracers/dynamic_ot/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/). + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index a72d39b37376..840162fe4f37 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for LightStep (https://lightstep.com/) - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for LightStep (https://lightstep.com/) + envoy_package() envoy_cc_library( diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index 0956ae5cce3a..d0a66792196a 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for OpenCensus: https://opencensus.io/ - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for OpenCensus: https://opencensus.io/ + envoy_package() envoy_cc_extension( diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index f225797780d3..09fa6ea67191 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for AWS X-Ray. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +6,10 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for AWS X-Ray. + envoy_package() envoy_proto_library( diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index f2321bab8710..942d2c3744e2 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Trace driver for Zipkin (https://zipkin.io/). - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Trace driver for Zipkin (https://zipkin.io/). + envoy_package() envoy_cc_library( diff --git a/source/extensions/transport_sockets/BUILD b/source/extensions/transport_sockets/BUILD index 6156949edef6..06456dbbcb5e 100644 --- a/source/extensions/transport_sockets/BUILD +++ b/source/extensions/transport_sockets/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index 4575772e5a58..5145a1abdb1d 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -1,8 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy. -# https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -10,6 +5,11 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy. +# https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ + envoy_package() envoy_cc_library( diff --git a/source/extensions/transport_sockets/raw_buffer/BUILD b/source/extensions/transport_sockets/raw_buffer/BUILD index f5b11f64def9..4d5bdacbe88c 100644 --- a/source/extensions/transport_sockets/raw_buffer/BUILD +++ b/source/extensions/transport_sockets/raw_buffer/BUILD @@ -1,13 +1,13 @@ -licenses(["notice"]) # Apache 2 - -# Built-in plaintext connection transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Built-in plaintext connection transport socket. + envoy_package() envoy_cc_extension( diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index e9107a046cfc..e319ee596df3 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# tap wrapper around a transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# tap wrapper around a transport socket. + envoy_package() envoy_cc_library( diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 7cf2407b61fb..f885d1c6bfa9 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -1,7 +1,3 @@ -licenses(["notice"]) # Apache 2 - -# Built-in TLS connection transport socket. - load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -9,6 +5,10 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + +# Built-in TLS connection transport socket. + envoy_package() envoy_cc_extension( diff --git a/source/extensions/transport_sockets/tls/private_key/BUILD b/source/extensions/transport_sockets/tls/private_key/BUILD index 8f8a96663c14..8b0563f5e06d 100644 --- a/source/extensions/transport_sockets/tls/private_key/BUILD +++ b/source/extensions/transport_sockets/tls/private_key/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/server/BUILD b/source/server/BUILD index 18042399a07e..eefcabc7d458 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -8,6 +6,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 491bdefcd940..1ddc534ef0bc 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index f3295a3f5f60..713ea38dad22 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/test/BUILD b/test/BUILD index a3cd553d30e8..ab4a56a7d42e 100644 --- a/test/BUILD +++ b/test/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() # TODO(htuch): remove when we have a solution for https://github.com/bazelbuild/bazel/issues/3510 diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index 96d08112c091..afba86c9dd22 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 3a71099e7276..bcd51b067bd6 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index c0968d43d80d..bd01534ca6ca 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/common/common/BUILD b/test/common/common/BUILD index fefbe4c3183c..5842443825c6 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 2310f7a7330c..4d55e78e637e 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -8,6 +6,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index b1f7d592ace4..614c0e8c5b82 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/event/BUILD b/test/common/event/BUILD index f2af3acf3dc5..a275f39c1630 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/filesystem/BUILD b/test/common/filesystem/BUILD index de9c92bb4c04..68f4eca5f716 100644 --- a/test/common/filesystem/BUILD +++ b/test/common/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/formatter/BUILD b/test/common/formatter/BUILD index 77b26bc4a05f..bb4ffbcacdcf 100644 --- a/test/common/formatter/BUILD +++ b/test/common/formatter/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -10,6 +8,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 612f7f798b5b..71e1a071604d 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -9,6 +7,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/html/BUILD b/test/common/html/BUILD index 6dfff216dc3e..378ddd98edde 100644 --- a/test/common/html/BUILD +++ b/test/common/html/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 105f9e510006..fc43ab1ff66f 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index eac82f9734ec..6bfc1268c9c9 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 1d5ba73e5a52..794da7621781 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -8,6 +6,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/init/BUILD b/test/common/init/BUILD index 894e7493aa72..e2d164576326 100644 --- a/test/common/init/BUILD +++ b/test/common/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/json/BUILD b/test/common/json/BUILD index a3b170033a51..803f2abca6af 100644 --- a/test/common/json/BUILD +++ b/test/common/json/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_fuzz_test( diff --git a/test/common/json/config_schemas_test_data/BUILD b/test/common/json/config_schemas_test_data/BUILD index 603cfa6adcca..185f28e77ce9 100644 --- a/test/common/json/config_schemas_test_data/BUILD +++ b/test/common/json/config_schemas_test_data/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", "envoy_py_test_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_py_test_binary( diff --git a/test/common/local_reply/BUILD b/test/common/local_reply/BUILD index fcd56c500a53..df768fb66dd5 100644 --- a/test/common/local_reply/BUILD +++ b/test/common/local_reply/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/memory/BUILD b/test/common/memory/BUILD index 55aa793bcee1..1688511f5db9 100644 --- a/test/common/memory/BUILD +++ b/test/common/memory/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/network/BUILD b/test/common/network/BUILD index c3c5bc8ba451..495473031b6d 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -10,6 +8,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index 389e4e235d4d..851765f03b58 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 3d1264b07fec..25d03dfbe973 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +9,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index c2cd9d5be4be..47bb5e802978 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["filesystem_setup.sh"]) diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index 80ce11317c43..8ff77b980fa7 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/shared_pool/BUILD b/test/common/shared_pool/BUILD index 9a9a641f380a..f4eaecda80fa 100644 --- a/test/common/shared_pool/BUILD +++ b/test/common/shared_pool/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/signal/BUILD b/test/common/signal/BUILD index 746babe0fedf..97a42a58cafd 100644 --- a/test/common/signal/BUILD +++ b/test/common/signal/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/singleton/BUILD b/test/common/singleton/BUILD index 8076c8bafbe5..cd5e582a5ed8 100644 --- a/test/common/singleton/BUILD +++ b/test/common/singleton/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 909e4a4c25f4..e947b5dae927 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/stream_info/BUILD b/test/common/stream_info/BUILD index de97bc18ddea..9b88e001fc35 100644 --- a/test/common/stream_info/BUILD +++ b/test/common/stream_info/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/tcp/BUILD b/test/common/tcp/BUILD index 097d23b9ecb6..f455f407e707 100644 --- a/test/common/tcp/BUILD +++ b/test/common/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/tcp_proxy/BUILD b/test/common/tcp_proxy/BUILD index 493f742d2db1..955a2b05070e 100644 --- a/test/common/tcp_proxy/BUILD +++ b/test/common/tcp_proxy/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/thread_local/BUILD b/test/common/thread_local/BUILD index c1d95410e5ab..fdd28bc076fb 100644 --- a/test/common/thread_local/BUILD +++ b/test/common/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/tracing/BUILD b/test/common/tracing/BUILD index 73152809a8f4..acc6850f3f72 100644 --- a/test/common/tracing/BUILD +++ b/test/common/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 19f713f672a2..b12cb6337c6e 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/config/BUILD b/test/config/BUILD index baf14f36e68f..33746b1c0cec 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/config/integration/BUILD b/test/config/integration/BUILD index 29fe25251783..684312b35b20 100644 --- a/test/config/integration/BUILD +++ b/test/config/integration/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/test/config/integration/certs/BUILD b/test/config/integration/certs/BUILD index a4350864d8d3..8e80a2f1d2f7 100644 --- a/test/config/integration/certs/BUILD +++ b/test/config/integration/certs/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/config_test/BUILD b/test/config_test/BUILD index e8ccfd04daad..31a067438c82 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -9,6 +7,8 @@ load( load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["example_configs_test_setup.sh"]) diff --git a/test/dependencies/BUILD b/test/dependencies/BUILD index 2e6ae296b760..1ef365b90fc2 100644 --- a/test/dependencies/BUILD +++ b/test/dependencies/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/exe/BUILD b/test/exe/BUILD index 7540aafa7525..ffae95b57f47 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_sh_test( diff --git a/test/extensions/access_loggers/common/BUILD b/test/extensions/access_loggers/common/BUILD index 9dbb3c91c70f..a6f87344a490 100644 --- a/test/extensions/access_loggers/common/BUILD +++ b/test/extensions/access_loggers/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/access_loggers/file/BUILD b/test/extensions/access_loggers/file/BUILD index 268e48d29837..76361877e881 100644 --- a/test/extensions/access_loggers/file/BUILD +++ b/test/extensions/access_loggers/file/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index 570f723c6c2f..219520a97056 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index b5d47f29614f..2e445737fcf9 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/clusters/dynamic_forward_proxy/BUILD b/test/extensions/clusters/dynamic_forward_proxy/BUILD index 1668ea02f636..baaf5068a1b5 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/test/extensions/clusters/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index 1fc6aa3a6a08..1ae33bf15b0c 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/common/BUILD b/test/extensions/common/BUILD index 216c9b56b094..e976a4fc4c1b 100644 --- a/test/extensions/common/BUILD +++ b/test/extensions/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index 0f014b4bf458..eae532ee27f9 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/extensions/common/dynamic_forward_proxy/BUILD b/test/extensions/common/dynamic_forward_proxy/BUILD index eb51afa1ce87..5c33d024fde2 100644 --- a/test/extensions/common/dynamic_forward_proxy/BUILD +++ b/test/extensions/common/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD index e96325c56faf..90a37c8cd60d 100644 --- a/test/extensions/common/proxy_protocol/BUILD +++ b/test/extensions/common/proxy_protocol/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/redis/BUILD b/test/extensions/common/redis/BUILD index fc6009a41d71..07bc8e5dc11d 100644 --- a/test/extensions/common/redis/BUILD +++ b/test/extensions/common/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_platform_dep", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/extensions/common/tap/BUILD b/test/extensions/common/tap/BUILD index 833f4bbb566c..5483de65de26 100644 --- a/test/extensions/common/tap/BUILD +++ b/test/extensions/common/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/extensions/common/wasm/BUILD b/test/extensions/common/wasm/BUILD index 4a4c0bdda7a6..e85cf73322e4 100644 --- a/test/extensions/common/wasm/BUILD +++ b/test/extensions/common/wasm/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/common/wasm/test_data/BUILD b/test/extensions/common/wasm/test_data/BUILD index ef4f37386280..f46c28bbd63e 100644 --- a/test/extensions/common/wasm/test_data/BUILD +++ b/test/extensions/common/wasm/test_data/BUILD @@ -1,10 +1,10 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/extensions/compression/gzip/BUILD b/test/extensions/compression/gzip/BUILD index 772bc17016ad..290209d3eb61 100644 --- a/test/extensions/compression/gzip/BUILD +++ b/test/extensions/compression/gzip/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_fuzz_test( diff --git a/test/extensions/compression/gzip/compressor/BUILD b/test/extensions/compression/gzip/compressor/BUILD index 0121199e1060..6d101cd2aafd 100644 --- a/test/extensions/compression/gzip/compressor/BUILD +++ b/test/extensions/compression/gzip/compressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/compression/gzip/decompressor/BUILD b/test/extensions/compression/gzip/decompressor/BUILD index 1816ef8da235..19520c24b545 100644 --- a/test/extensions/compression/gzip/decompressor/BUILD +++ b/test/extensions/compression/gzip/decompressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/common/expr/BUILD b/test/extensions/filters/common/expr/BUILD index c6af64c0a0f1..fe758380dc37 100644 --- a/test/extensions/filters/common/expr/BUILD +++ b/test/extensions/filters/common/expr/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/common/ext_authz/BUILD b/test/extensions/filters/common/ext_authz/BUILD index c43c822f14b1..9f983589bd63 100644 --- a/test/extensions/filters/common/ext_authz/BUILD +++ b/test/extensions/filters/common/ext_authz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/fault/BUILD b/test/extensions/filters/common/fault/BUILD index a2b3a89d9acb..da4af82caf98 100644 --- a/test/extensions/filters/common/fault/BUILD +++ b/test/extensions/filters/common/fault/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/lua/BUILD b/test/extensions/filters/common/lua/BUILD index cbe0ef71b7b7..b6d7bfecd6d5 100644 --- a/test/extensions/filters/common/lua/BUILD +++ b/test/extensions/filters/common/lua/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/original_src/BUILD b/test/extensions/filters/common/original_src/BUILD index 02da243f0ca2..1d3a5d28847d 100644 --- a/test/extensions/filters/common/original_src/BUILD +++ b/test/extensions/filters/common/original_src/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/ratelimit/BUILD b/test/extensions/filters/common/ratelimit/BUILD index 2bc6d08d7e8b..eb4d027b6eef 100644 --- a/test/extensions/filters/common/ratelimit/BUILD +++ b/test/extensions/filters/common/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/common/rbac/BUILD b/test/extensions/filters/common/rbac/BUILD index 6454c69e159f..64e405da4d91 100644 --- a/test/extensions/filters/common/rbac/BUILD +++ b/test/extensions/filters/common/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/adaptive_concurrency/BUILD b/test/extensions/filters/http/adaptive_concurrency/BUILD index 5dd177f945bd..c91f90dcfcf5 100644 --- a/test/extensions/filters/http/adaptive_concurrency/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/BUILD b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD index 94592fb47acf..63bf457be4f3 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/test/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/aws_lambda/BUILD b/test/extensions/filters/http/aws_lambda/BUILD index f20dd5e903aa..4d14f2477630 100644 --- a/test/extensions/filters/http/aws_lambda/BUILD +++ b/test/extensions/filters/http/aws_lambda/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/aws_request_signing/BUILD b/test/extensions/filters/http/aws_request_signing/BUILD index 5a3194877bc2..7496366a2b72 100644 --- a/test/extensions/filters/http/aws_request_signing/BUILD +++ b/test/extensions/filters/http/aws_request_signing/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/buffer/BUILD b/test/extensions/filters/http/buffer/BUILD index 50a68a4f2489..900ab38a6747 100644 --- a/test/extensions/filters/http/buffer/BUILD +++ b/test/extensions/filters/http/buffer/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index 82eb2ff8dc12..12553fadd9cf 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/cache/simple_http_cache/BUILD b/test/extensions/filters/http/cache/simple_http_cache/BUILD index 89198975f330..3030d84eeae9 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/test/extensions/filters/http/cache/simple_http_cache/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") load( "//test/extensions:extensions_build_system.bzl", "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/common/BUILD b/test/extensions/filters/http/common/BUILD index 99dca40599e6..9994b2f0f218 100644 --- a/test/extensions/filters/http/common/BUILD +++ b/test/extensions/filters/http/common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index a3cb82c215e4..a29d919d0be6 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 9c42dc89bb6d..cc19fb5dfc14 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -12,6 +10,8 @@ load( "envoy_all_extensions", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD index 1a608f282f0a..0da9087a4d99 100644 --- a/test/extensions/filters/http/compressor/BUILD +++ b/test/extensions/filters/http/compressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/cors/BUILD b/test/extensions/filters/http/cors/BUILD index 9320855a4af2..a91934cb1249 100644 --- a/test/extensions/filters/http/cors/BUILD +++ b/test/extensions/filters/http/cors/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/csrf/BUILD b/test/extensions/filters/http/csrf/BUILD index 2cde41859068..a7e4b2585968 100644 --- a/test/extensions/filters/http/csrf/BUILD +++ b/test/extensions/filters/http/csrf/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD index 2da39cd00c40..e6739858b251 100644 --- a/test/extensions/filters/http/decompressor/BUILD +++ b/test/extensions/filters/http/decompressor/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index 71650b6b24d6..f5fcd3868e1b 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/dynamo/BUILD b/test/extensions/filters/http/dynamo/BUILD index 4fcc77be7412..e1761811ec40 100644 --- a/test/extensions/filters/http/dynamo/BUILD +++ b/test/extensions/filters/http/dynamo/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index ef1b8be3cd89..f1524fddc23b 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/fault/BUILD b/test/extensions/filters/http/fault/BUILD index 1578099273ed..7938aad25d2c 100644 --- a/test/extensions/filters/http/fault/BUILD +++ b/test/extensions/filters/http/fault/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/grpc_http1_bridge/BUILD b/test/extensions/filters/http/grpc_http1_bridge/BUILD index d429303dbb84..946a6c189c3f 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_bridge/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index c3ce59f00bc7..b50e0b1295dd 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/grpc_json_transcoder/BUILD b/test/extensions/filters/http/grpc_json_transcoder/BUILD index bac0bd17b1db..6f2a2deac803 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/test/extensions/filters/http/grpc_json_transcoder/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/grpc_stats/BUILD b/test/extensions/filters/http/grpc_stats/BUILD index 15dd7ab9aeff..03f148c20036 100644 --- a/test/extensions/filters/http/grpc_stats/BUILD +++ b/test/extensions/filters/http/grpc_stats/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index ef16548a00dc..b0f586a9c9c0 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index 4924425df994..2126067c5bee 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/header_to_metadata/BUILD b/test/extensions/filters/http/header_to_metadata/BUILD index 20e85fba58ce..80fd87fb9605 100644 --- a/test/extensions/filters/http/header_to_metadata/BUILD +++ b/test/extensions/filters/http/header_to_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/health_check/BUILD b/test/extensions/filters/http/health_check/BUILD index 924cea4f2970..04852922e7fb 100644 --- a/test/extensions/filters/http/health_check/BUILD +++ b/test/extensions/filters/http/health_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/ip_tagging/BUILD b/test/extensions/filters/http/ip_tagging/BUILD index fdfa8d58cc98..7625367f3e62 100644 --- a/test/extensions/filters/http/ip_tagging/BUILD +++ b/test/extensions/filters/http/ip_tagging/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index e8339bdcb473..f19bf5269283 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index 93b555322a18..49889a1c90eb 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/on_demand/BUILD b/test/extensions/filters/http/on_demand/BUILD index 9a5acca7688f..d9412a137039 100644 --- a/test/extensions/filters/http/on_demand/BUILD +++ b/test/extensions/filters/http/on_demand/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/original_src/BUILD b/test/extensions/filters/http/original_src/BUILD index 18cfcae4e112..411bbe6d25a7 100644 --- a/test/extensions/filters/http/original_src/BUILD +++ b/test/extensions/filters/http/original_src/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index fe03b1f6eb30..bfa69da7e296 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/rbac/BUILD b/test/extensions/filters/http/rbac/BUILD index 6097f22589a8..3ce6b5fd175f 100644 --- a/test/extensions/filters/http/rbac/BUILD +++ b/test/extensions/filters/http/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index e44c981792f3..46aaecbb7ae2 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/squash/BUILD b/test/extensions/filters/http/squash/BUILD index 2954bd98b425..f98a5695d5ad 100644 --- a/test/extensions/filters/http/squash/BUILD +++ b/test/extensions/filters/http/squash/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index 95f895888972..414ce2effd3e 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index afb64b5eee2b..944ec4eff0d1 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/listener/original_dst/BUILD b/test/extensions/filters/listener/original_dst/BUILD index aaed1667cdf1..4c2ad41b41ce 100644 --- a/test/extensions/filters/listener/original_dst/BUILD +++ b/test/extensions/filters/listener/original_dst/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index 7a62688540fe..1b1806827236 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/listener/proxy_protocol/BUILD b/test/extensions/filters/listener/proxy_protocol/BUILD index f07771bbb91e..1ff78bc0b820 100644 --- a/test/extensions/filters/listener/proxy_protocol/BUILD +++ b/test/extensions/filters/listener/proxy_protocol/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/listener/tls_inspector/BUILD b/test/extensions/filters/listener/tls_inspector/BUILD index 8e2b7360a5ad..bfa8c6ebd18a 100644 --- a/test/extensions/filters/listener/tls_inspector/BUILD +++ b/test/extensions/filters/listener/tls_inspector/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", @@ -12,6 +10,8 @@ load( "envoy_extension_cc_benchmark_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/network/client_ssl_auth/BUILD b/test/extensions/filters/network/client_ssl_auth/BUILD index ffde1a55d3c4..b24854cbce09 100644 --- a/test/extensions/filters/network/client_ssl_auth/BUILD +++ b/test/extensions/filters/network/client_ssl_auth/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/common/BUILD b/test/extensions/filters/network/common/BUILD index 246a1c9f8a0d..046af1acac14 100644 --- a/test/extensions/filters/network/common/BUILD +++ b/test/extensions/filters/network/common/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD index a2d076a1b4b9..dffc23954488 100644 --- a/test/extensions/filters/network/common/redis/BUILD +++ b/test/extensions/filters/network/common/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -8,6 +6,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/extensions/filters/network/direct_response/BUILD b/test/extensions/filters/network/direct_response/BUILD index a828acc3d659..06fa488357ec 100644 --- a/test/extensions/filters/network/direct_response/BUILD +++ b/test/extensions/filters/network/direct_response/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/dubbo_proxy/BUILD b/test/extensions/filters/network/dubbo_proxy/BUILD index 00cbebf6780a..20410865388d 100644 --- a/test/extensions/filters/network/dubbo_proxy/BUILD +++ b/test/extensions/filters/network/dubbo_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/extensions/filters/network/ext_authz/BUILD b/test/extensions/filters/network/ext_authz/BUILD index 4d003eb26dec..6004f220d387 100644 --- a/test/extensions/filters/network/ext_authz/BUILD +++ b/test/extensions/filters/network/ext_authz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index ebaaf0038867..1725e4dd9de3 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/test/extensions/filters/network/kafka/BUILD b/test/extensions/filters/network/kafka/BUILD index d45e3702b0da..de260c365d7d 100644 --- a/test/extensions/filters/network/kafka/BUILD +++ b/test/extensions/filters/network/kafka/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -10,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index 158c8a7cb27b..da35cc9ea7c4 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/kafka/broker/integration_test/BUILD b/test/extensions/filters/network/kafka/broker/integration_test/BUILD index 14fec8991db5..d82d6b95c011 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/BUILD +++ b/test/extensions/filters/network/kafka/broker/integration_test/BUILD @@ -1,10 +1,11 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_test") load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() py_test( diff --git a/test/extensions/filters/network/local_ratelimit/BUILD b/test/extensions/filters/network/local_ratelimit/BUILD index 291f6726ddc4..8f11258d9af8 100644 --- a/test/extensions/filters/network/local_ratelimit/BUILD +++ b/test/extensions/filters/network/local_ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/mongo_proxy/BUILD b/test/extensions/filters/network/mongo_proxy/BUILD index aa836af608c9..07c6cc33cca0 100644 --- a/test/extensions/filters/network/mongo_proxy/BUILD +++ b/test/extensions/filters/network/mongo_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/mysql_proxy/BUILD b/test/extensions/filters/network/mysql_proxy/BUILD index 073312b3eff4..17e3c8c204d8 100644 --- a/test/extensions/filters/network/mysql_proxy/BUILD +++ b/test/extensions/filters/network/mysql_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test_library( diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/test/extensions/filters/network/postgres_proxy/BUILD index afb0b1415014..0e58a294742b 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/test/extensions/filters/network/postgres_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test_library( diff --git a/test/extensions/filters/network/ratelimit/BUILD b/test/extensions/filters/network/ratelimit/BUILD index c1905934b4b0..38c2eb7a8512 100644 --- a/test/extensions/filters/network/ratelimit/BUILD +++ b/test/extensions/filters/network/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index fb4195d62562..8d4d479cefb0 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 0697c8c39d90..749e93663fea 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -12,6 +10,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD index 2e719d6b145b..c5cf5e5b34f0 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/extensions/filters/network/sni_cluster/BUILD b/test/extensions/filters/network/sni_cluster/BUILD index a521d1f07145..3bc852b873d7 100644 --- a/test/extensions/filters/network/sni_cluster/BUILD +++ b/test/extensions/filters/network/sni_cluster/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index 059a4a0ef6ff..c9981ba5f70c 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/tcp_proxy/BUILD b/test/extensions/filters/network/tcp_proxy/BUILD index 318a2aa9b78a..73c4717e8ef3 100644 --- a/test/extensions/filters/network/tcp_proxy/BUILD +++ b/test/extensions/filters/network/tcp_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index c07318e6cb9a..9428df5d84e7 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_mock( diff --git a/test/extensions/filters/network/thrift_proxy/driver/BUILD b/test/extensions/filters/network/thrift_proxy/driver/BUILD index b0461509c7a1..4e5d0f47d1d4 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD b/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD index a1b33006f10f..82b251aeac77 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/fbthrift/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD b/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD index 71fa29d64063..e2f159ae992d 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/finagle/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD b/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD index 6c9595737b16..d3a7029ab41d 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/generated/example/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() py_library( diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 06542bc5cbb5..69b40d35e0e7 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/network/zookeeper_proxy/BUILD b/test/extensions/filters/network/zookeeper_proxy/BUILD index d4d9cb5cef8c..ec24601e81b9 100644 --- a/test/extensions/filters/network/zookeeper_proxy/BUILD +++ b/test/extensions/filters/network/zookeeper_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index e4a66a2fcf10..08455411d5ad 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +9,8 @@ load( "envoy_extension_cc_test_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test_library( diff --git a/test/extensions/filters/udp/udp_proxy/BUILD b/test/extensions/filters/udp/udp_proxy/BUILD index 480131efc548..9205ec9237a7 100644 --- a/test/extensions/filters/udp/udp_proxy/BUILD +++ b/test/extensions/filters/udp/udp_proxy/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/grpc_credentials/aws_iam/BUILD b/test/extensions/grpc_credentials/aws_iam/BUILD index 656bfd76357c..0796f78a871c 100644 --- a/test/extensions/grpc_credentials/aws_iam/BUILD +++ b/test/extensions/grpc_credentials/aws_iam/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/grpc_credentials/file_based_metadata/BUILD b/test/extensions/grpc_credentials/file_based_metadata/BUILD index ccb2fd8263b3..53cff427b2fe 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/test/extensions/grpc_credentials/file_based_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_select_google_grpc", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/health_checkers/redis/BUILD b/test/extensions/health_checkers/redis/BUILD index 73eafe90384a..aa4f80e21330 100644 --- a/test/extensions/health_checkers/redis/BUILD +++ b/test/extensions/health_checkers/redis/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/internal_redirect/previous_routes/BUILD b/test/extensions/internal_redirect/previous_routes/BUILD index 8425dec9126c..5ec2358246a0 100644 --- a/test/extensions/internal_redirect/previous_routes/BUILD +++ b/test/extensions/internal_redirect/previous_routes/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 43347b72524d..8301bf42aa88 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD index ec7a3cd5dcfa..bb473cd6effc 100644 --- a/test/extensions/quic_listeners/quiche/integration/BUILD +++ b/test/extensions/quic_listeners/quiche/integration/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index f48ced9263f2..d43071a61163 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/resource_monitors/fixed_heap/BUILD b/test/extensions/resource_monitors/fixed_heap/BUILD index 4f8594bbe4ea..2d28542abde3 100644 --- a/test/extensions/resource_monitors/fixed_heap/BUILD +++ b/test/extensions/resource_monitors/fixed_heap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/resource_monitors/injected_resource/BUILD b/test/extensions/resource_monitors/injected_resource/BUILD index e15aa3821a7a..e8f32dc41060 100644 --- a/test/extensions/resource_monitors/injected_resource/BUILD +++ b/test/extensions/resource_monitors/injected_resource/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/retry/host/omit_canary_hosts/BUILD b/test/extensions/retry/host/omit_canary_hosts/BUILD index 605fc111a64c..98d0b8e0d823 100644 --- a/test/extensions/retry/host/omit_canary_hosts/BUILD +++ b/test/extensions/retry/host/omit_canary_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/host/omit_host_metadata/BUILD b/test/extensions/retry/host/omit_host_metadata/BUILD index c219d6695b29..37030ee17c44 100644 --- a/test/extensions/retry/host/omit_host_metadata/BUILD +++ b/test/extensions/retry/host/omit_host_metadata/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/host/previous_hosts/BUILD b/test/extensions/retry/host/previous_hosts/BUILD index 961c3897038f..308fa11445d8 100644 --- a/test/extensions/retry/host/previous_hosts/BUILD +++ b/test/extensions/retry/host/previous_hosts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/retry/priority/previous_priorities/BUILD b/test/extensions/retry/priority/previous_priorities/BUILD index 712f9874b54c..f06784b4b7a3 100644 --- a/test/extensions/retry/priority/previous_priorities/BUILD +++ b/test/extensions/retry/priority/previous_priorities/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/stats_sinks/common/statsd/BUILD b/test/extensions/stats_sinks/common/statsd/BUILD index 93d7505ab94c..49afc25b4e94 100644 --- a/test/extensions/stats_sinks/common/statsd/BUILD +++ b/test/extensions/stats_sinks/common/statsd/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/stats_sinks/dog_statsd/BUILD b/test/extensions/stats_sinks/dog_statsd/BUILD index 003c7853fdda..8c909f5d54f0 100644 --- a/test/extensions/stats_sinks/dog_statsd/BUILD +++ b/test/extensions/stats_sinks/dog_statsd/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index 36f35b971491..093fa1f1d516 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/stats_sinks/metrics_service/BUILD b/test/extensions/stats_sinks/metrics_service/BUILD index ae60b006f1e2..9cf530605be0 100644 --- a/test/extensions/stats_sinks/metrics_service/BUILD +++ b/test/extensions/stats_sinks/metrics_service/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/stats_sinks/statsd/BUILD b/test/extensions/stats_sinks/statsd/BUILD index c081621d7403..0b21cee0f5c3 100644 --- a/test/extensions/stats_sinks/statsd/BUILD +++ b/test/extensions/stats_sinks/statsd/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/common/ot/BUILD b/test/extensions/tracers/common/ot/BUILD index 76fd9d8b1378..3e56002a89a1 100644 --- a/test/extensions/tracers/common/ot/BUILD +++ b/test/extensions/tracers/common/ot/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/datadog/BUILD b/test/extensions/tracers/datadog/BUILD index 1e6b94c6e0f0..f362c834eb39 100644 --- a/test/extensions/tracers/datadog/BUILD +++ b/test/extensions/tracers/datadog/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/dynamic_ot/BUILD b/test/extensions/tracers/dynamic_ot/BUILD index 4037befe21f9..48e8d4a97c67 100644 --- a/test/extensions/tracers/dynamic_ot/BUILD +++ b/test/extensions/tracers/dynamic_ot/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/lightstep/BUILD b/test/extensions/tracers/lightstep/BUILD index 40815b572d75..86849c5f84e0 100644 --- a/test/extensions/tracers/lightstep/BUILD +++ b/test/extensions/tracers/lightstep/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/opencensus/BUILD b/test/extensions/tracers/opencensus/BUILD index 9aa809b29f8d..abdaa5fc7d8e 100644 --- a/test/extensions/tracers/opencensus/BUILD +++ b/test/extensions/tracers/opencensus/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/xray/BUILD b/test/extensions/tracers/xray/BUILD index 8d1d57c436be..6ebfdf8071d0 100644 --- a/test/extensions/tracers/xray/BUILD +++ b/test/extensions/tracers/xray/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/tracers/zipkin/BUILD b/test/extensions/tracers/zipkin/BUILD index 385c4f194759..08da01bfa49b 100644 --- a/test/extensions/tracers/zipkin/BUILD +++ b/test/extensions/tracers/zipkin/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index ce1deb5e0c4f..d1232d178908 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -10,6 +8,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/transport_sockets/tap/BUILD b/test/extensions/transport_sockets/tap/BUILD index 1aaba1cdb5d5..be4dd9ba9c95 100644 --- a/test/extensions/transport_sockets/tap/BUILD +++ b/test/extensions/transport_sockets/tap/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -9,6 +7,8 @@ load( "envoy_extension_cc_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_extension_cc_test( diff --git a/test/extensions/transport_sockets/tls/BUILD b/test/extensions/transport_sockets/tls/BUILD index bc1b9b9cbf0f..2e2586354fb9 100644 --- a/test/extensions/transport_sockets/tls/BUILD +++ b/test/extensions/transport_sockets/tls/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/transport_sockets/tls/integration/BUILD b/test/extensions/transport_sockets/tls/integration/BUILD index 2f806978c8c6..4425448c624f 100644 --- a/test/extensions/transport_sockets/tls/integration/BUILD +++ b/test/extensions/transport_sockets/tls/integration/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/extensions/transport_sockets/tls/test_data/BUILD b/test/extensions/transport_sockets/tls/test_data/BUILD index 2397d1c8f633..e37742552d78 100644 --- a/test/extensions/transport_sockets/tls/test_data/BUILD +++ b/test/extensions/transport_sockets/tls/test_data/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() filegroup( diff --git a/test/fuzz/BUILD b/test/fuzz/BUILD index d46ae39a51b3..35bd8e0ac197 100644 --- a/test/fuzz/BUILD +++ b/test/fuzz/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/test/integration/BUILD b/test/integration/BUILD index bcc738c0cfb3..98661cd8ee99 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", @@ -11,6 +10,8 @@ load( "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/integration/clusters/BUILD b/test/integration/clusters/BUILD index f7c33d4be98a..97b137eb75f7 100644 --- a/test/integration/clusters/BUILD +++ b/test/integration/clusters/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 950370da9e70..516b02f8c100 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/mocks/BUILD b/test/mocks/BUILD index 29ef00661510..3b4e8e65bcd0 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/mocks/access_log/BUILD b/test/mocks/access_log/BUILD index 58bbae8a0aa7..6e2ad4141fec 100644 --- a/test/mocks/access_log/BUILD +++ b/test/mocks/access_log/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/api/BUILD b/test/mocks/api/BUILD index 491bef01dadb..e4d44c573e55 100644 --- a/test/mocks/api/BUILD +++ b/test/mocks/api/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_select_hot_restart", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/buffer/BUILD b/test/mocks/buffer/BUILD index 9d9634528ae7..38d61c302cf4 100644 --- a/test/mocks/buffer/BUILD +++ b/test/mocks/buffer/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/compression/compressor/BUILD b/test/mocks/compression/compressor/BUILD index e598f5cc5cf5..855752e06a41 100644 --- a/test/mocks/compression/compressor/BUILD +++ b/test/mocks/compression/compressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/compression/decompressor/BUILD b/test/mocks/compression/decompressor/BUILD index 3e4e605cef52..5e308cef1ded 100644 --- a/test/mocks/compression/decompressor/BUILD +++ b/test/mocks/compression/decompressor/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/config/BUILD b/test/mocks/config/BUILD index c9756f13893a..6df51d8b98c6 100644 --- a/test/mocks/config/BUILD +++ b/test/mocks/config/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/event/BUILD b/test/mocks/event/BUILD index c807c41d0b6a..8a27b1804d07 100644 --- a/test/mocks/event/BUILD +++ b/test/mocks/event/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/filesystem/BUILD b/test/mocks/filesystem/BUILD index edb14369c6b3..96cbe7876b31 100644 --- a/test/mocks/filesystem/BUILD +++ b/test/mocks/filesystem/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/grpc/BUILD b/test/mocks/grpc/BUILD index 436363c08203..972cba77fbe1 100644 --- a/test/mocks/grpc/BUILD +++ b/test/mocks/grpc/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/http/BUILD b/test/mocks/http/BUILD index 34227d7ac58b..65fdb6ebc4f5 100644 --- a/test/mocks/http/BUILD +++ b/test/mocks/http/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/init/BUILD b/test/mocks/init/BUILD index 5aa9f74bacd3..d2969531ecb6 100644 --- a/test/mocks/init/BUILD +++ b/test/mocks/init/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/local_info/BUILD b/test/mocks/local_info/BUILD index b7cd52aa9ff0..9ae293dfc9a6 100644 --- a/test/mocks/local_info/BUILD +++ b/test/mocks/local_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 877861539c9d..020e4b6db404 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/protobuf/BUILD b/test/mocks/protobuf/BUILD index 5d7637475f67..2db80c5e3173 100644 --- a/test/mocks/protobuf/BUILD +++ b/test/mocks/protobuf/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/ratelimit/BUILD b/test/mocks/ratelimit/BUILD index 9aa235577e39..db30620b1edd 100644 --- a/test/mocks/ratelimit/BUILD +++ b/test/mocks/ratelimit/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/redis/BUILD b/test/mocks/redis/BUILD index d97ea0dbaab9..6ff8990aae05 100644 --- a/test/mocks/redis/BUILD +++ b/test/mocks/redis/BUILD @@ -1,8 +1,8 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() diff --git a/test/mocks/router/BUILD b/test/mocks/router/BUILD index db572a1f92a7..282a60b154f8 100644 --- a/test/mocks/router/BUILD +++ b/test/mocks/router/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/runtime/BUILD b/test/mocks/runtime/BUILD index 7d899cb90c58..f68b4dde2ef7 100644 --- a/test/mocks/runtime/BUILD +++ b/test/mocks/runtime/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/secret/BUILD b/test/mocks/secret/BUILD index 313a56038cde..92a80718db01 100644 --- a/test/mocks/secret/BUILD +++ b/test/mocks/secret/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index 4d85cd8496ab..067c98d846f6 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/ssl/BUILD b/test/mocks/ssl/BUILD index 7141bb5c8fc1..e79694f5224f 100644 --- a/test/mocks/ssl/BUILD +++ b/test/mocks/ssl/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/stats/BUILD b/test/mocks/stats/BUILD index 1880b79a93cb..6d4ddd19a050 100644 --- a/test/mocks/stats/BUILD +++ b/test/mocks/stats/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/stream_info/BUILD b/test/mocks/stream_info/BUILD index 6d33901f6cf1..da45abf717ce 100644 --- a/test/mocks/stream_info/BUILD +++ b/test/mocks/stream_info/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/tcp/BUILD b/test/mocks/tcp/BUILD index 8634b86e9c5c..2ac39f512288 100644 --- a/test/mocks/tcp/BUILD +++ b/test/mocks/tcp/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/thread_local/BUILD b/test/mocks/thread_local/BUILD index bc65c8da86c9..e05b8f6f4b96 100644 --- a/test/mocks/thread_local/BUILD +++ b/test/mocks/thread_local/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/tracing/BUILD b/test/mocks/tracing/BUILD index a5ef26bce3ff..3f4eaf881d4d 100644 --- a/test/mocks/tracing/BUILD +++ b/test/mocks/tracing/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index 8c4ca0d28c68..6e210478aa6a 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_mock( diff --git a/test/proto/BUILD b/test/proto/BUILD index 3e058be5ec2c..f1ab09c62349 100644 --- a/test/proto/BUILD +++ b/test/proto/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", @@ -7,6 +5,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files(["bookstore.proto"]) diff --git a/test/server/BUILD b/test/server/BUILD index 1fe019224c2c..d6e7b23cc8cb 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", @@ -13,6 +11,8 @@ load( load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/server/admin/BUILD b/test/server/admin/BUILD index 07045a5b3868..75e3fc35f8e5 100644 --- a/test/server/admin/BUILD +++ b/test/server/admin/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 8c466c9b30cd..19942cb6d89e 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -1,9 +1,9 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 33f404d67e06..967cfe90f0c8 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_basic_cc_library", @@ -9,6 +7,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_basic_cc_library( diff --git a/test/tools/config_load_check/BUILD b/test/tools/config_load_check/BUILD index da23f11daedf..68e744520fd6 100644 --- a/test/tools/config_load_check/BUILD +++ b/test/tools/config_load_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_library( diff --git a/test/tools/router_check/BUILD b/test/tools/router_check/BUILD index 874fff81929e..610e09311027 100644 --- a/test/tools/router_check/BUILD +++ b/test/tools/router_check/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -8,6 +6,8 @@ load( "envoy_proto_library", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( diff --git a/test/tools/router_check/test/BUILD b/test/tools/router_check/test/BUILD index fbe2bfe9b0c7..3e8d50e06b8f 100644 --- a/test/tools/router_check/test/BUILD +++ b/test/tools/router_check/test/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_package", "envoy_sh_test", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_sh_test( diff --git a/test/tools/schema_validator/BUILD b/test/tools/schema_validator/BUILD index 5bfa78514ffc..c0d198f6f5d5 100644 --- a/test/tools/schema_validator/BUILD +++ b/test/tools/schema_validator/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( diff --git a/test/tools/type_whisperer/BUILD b/test/tools/type_whisperer/BUILD index 5d2787128058..9e23abf89d39 100644 --- a/test/tools/type_whisperer/BUILD +++ b/test/tools/type_whisperer/BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test( diff --git a/test/tools/wee8_compile/BUILD b/test/tools/wee8_compile/BUILD index 9c363c7d9275..d1184b071750 100644 --- a/test/tools/wee8_compile/BUILD +++ b/test/tools/wee8_compile/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_binary", @@ -7,6 +5,8 @@ load( "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_test_binary( diff --git a/tools/BUILD b/tools/BUILD index fbd9abfc774b..a3313f01becc 100644 --- a/tools/BUILD +++ b/tools/BUILD @@ -1,5 +1,4 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_library") load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", @@ -8,6 +7,8 @@ load( "envoy_py_test_binary", ) +licenses(["notice"]) # Apache 2 + envoy_package() exports_files([ diff --git a/tools/api_boost/testdata/BUILD b/tools/api_boost/testdata/BUILD index 17f7233955bc..148b1b5c4787 100644 --- a/tools/api_boost/testdata/BUILD +++ b/tools/api_boost/testdata/BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_library( diff --git a/tools/api_proto_plugin/BUILD b/tools/api_proto_plugin/BUILD index 5c6c535a4a88..788b6d6e4295 100644 --- a/tools/api_proto_plugin/BUILD +++ b/tools/api_proto_plugin/BUILD @@ -1,9 +1,9 @@ -licenses(["notice"]) # Apache 2 - load("@bazel_skylib//rules:common_settings.bzl", "string_flag") load("@rules_python//python:defs.bzl", "py_library") load("//tools/type_whisperer:type_database.bzl", "type_database") +licenses(["notice"]) # Apache 2 + py_library( name = "api_proto_plugin", srcs = [ diff --git a/tools/api_proto_plugin/plugin.bzl b/tools/api_proto_plugin/plugin.bzl index 95568e47123a..0b19fb60031f 100644 --- a/tools/api_proto_plugin/plugin.bzl +++ b/tools/api_proto_plugin/plugin.bzl @@ -36,7 +36,7 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes): # extractions. See https://github.com/bazelbuild/bazel/issues/3971. import_paths = [] for f in target[ProtoInfo].transitive_sources.to_list(): - import_paths += ["{}={}".format(_path_ignoring_repository(f), f.path)] + import_paths.append("{}={}".format(_path_ignoring_repository(f), f.path)) # The outputs live in the ctx.label's package root. We add some additional # path information to match with protoc's notion of path relative locations. @@ -56,9 +56,9 @@ def api_proto_plugin_impl(target, ctx, output_group, mnemonic, output_suffixes): inputs = depset(transitive = [inputs] + [ctx.attr._type_db.files]) if len(ctx.attr._type_db.files.to_list()) != 1: fail("{} must have one type database file".format(ctx.attr._type_db)) - args += ["--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path] + args.append("--api_proto_plugin_opt=type_db_path=" + ctx.attr._type_db.files.to_list()[0].path) if hasattr(ctx.attr, "_extra_args"): - args += ["--api_proto_plugin_opt=extra_args=" + ctx.attr._extra_args[BuildSettingInfo].value] + args.append("--api_proto_plugin_opt=extra_args=" + ctx.attr._extra_args[BuildSettingInfo].value) args += [src.path for src in target[ProtoInfo].direct_sources] env = {} diff --git a/tools/clang_tools/support/clang_tools.bzl b/tools/clang_tools/support/clang_tools.bzl index ece24fc87231..a738fa57f28d 100644 --- a/tools/clang_tools/support/clang_tools.bzl +++ b/tools/clang_tools/support/clang_tools.bzl @@ -1,5 +1,7 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") + def clang_tools_cc_binary(name, copts = [], tags = [], deps = [], **kwargs): - native.cc_binary( + cc_binary( name = name, copts = copts + [ "-fno-exceptions", @@ -11,13 +13,13 @@ def clang_tools_cc_binary(name, copts = [], tags = [], deps = [], **kwargs): ) def clang_tools_cc_library(name, **kwargs): - native.cc_library( + cc_library( name = name, **kwargs ) def clang_tools_cc_test(name, deps = [], **kwargs): - native.cc_test( + cc_test( name = name, deps = deps + ["@com_google_googletest//:gtest_main"], **kwargs diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 2a82d7f93ffc..295e969661d0 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -727,7 +727,7 @@ def fixBuildPath(file_path): if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] - if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: + if os.system("%s -lint=fix -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: error_messages += ["buildifier rewrite failed for file: %s" % file_path] return error_messages diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 67276235adce..e00144812e40 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -61,12 +61,12 @@ def fixFileExpectingSuccess(file, extra_input_files=None): command, infile, outfile, status, stdout = fixFileHelper(file, extra_input_files=extra_input_files) if status != 0: - print("FAILED:") + print("FAILED: " + infile) emitStdoutAsError(stdout) return 1 status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile + '.gold') if status != 0: - print("FAILED:") + print("FAILED: " + infile) emitStdoutAsError(stdout + stderr) return 1 return 0 diff --git a/tools/code_format/envoy_build_fixer.py b/tools/code_format/envoy_build_fixer.py index 865e9fffd48d..51f7d0fb866c 100755 --- a/tools/code_format/envoy_build_fixer.py +++ b/tools/code_format/envoy_build_fixer.py @@ -19,6 +19,9 @@ # Where does Buildozer live? BUILDOZER_PATH = paths.getBuildozer() +# Where does Buildifier live? +BUILDIFIER_PATH = paths.getBuildifier() + # Canonical Envoy license. LICENSE_STRING = 'licenses(["notice"]) # Apache 2\n\n' @@ -89,14 +92,15 @@ def FixPackageAndLicense(contents): return contents -# Remove trailing blank lines, unnecessary double blank lines. -def FixEmptyLines(contents): - return re.sub('\n\s*$', '\n', re.sub('\n\n\n', '\n\n', contents)) - - -# Misc. Buildozer cleanups. -def FixBuildozerCleanups(contents): - return RunBuildozer([('fix unusedLoads', '__pkg__')], contents) +# Run Buildifier commands on a string with lint mode. +def BuildifierLint(contents): + r = subprocess.run([BUILDIFIER_PATH, '-lint=fix', '-mode=fix', '-type=build'], + input=contents.encode(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if r.returncode != 0: + raise EnvoyBuildFixerError('buildozer execution failed: %s' % r) + return r.stdout.decode('utf-8') # Find all the API headers in a C++ source file. @@ -170,9 +174,8 @@ def FixBuild(path): contents = f.read() xforms = [ FixPackageAndLicense, - FixEmptyLines, functools.partial(FixApiDeps, path), - FixBuildozerCleanups, + BuildifierLint, ] for xform in xforms: contents = xform(contents) diff --git a/tools/config_validation/BUILD b/tools/config_validation/BUILD index 99d15311d6f0..5ca3d0ef9a4a 100644 --- a/tools/config_validation/BUILD +++ b/tools/config_validation/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("@config_validation_pip3//:requirements.bzl", "requirement") +licenses(["notice"]) # Apache 2 + py_binary( name = "validate_fragment", srcs = ["validate_fragment.py"], diff --git a/tools/proto_format/active_protos_gen.py b/tools/proto_format/active_protos_gen.py index 37b871d93f2e..bd29cc197d7d 100755 --- a/tools/proto_format/active_protos_gen.py +++ b/tools/proto_format/active_protos_gen.py @@ -11,10 +11,10 @@ BUILD_FILE_TEMPLATE = string.Template( """# DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. -licenses(["notice"]) # Apache 2 - load("@rules_proto//proto:defs.bzl", "proto_library") +licenses(["notice"]) # Apache 2 + # This tracks active development versions of protos. proto_library( name = "active_protos", diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 45480b086306..256316f2a18c 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,7 +1,8 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("@protodoc_pip3//:requirements.bzl", "requirement") +licenses(["notice"]) # Apache 2 + py_binary( name = "generate_empty", srcs = ["generate_empty.py"], diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index 631a4f9585e4..fa15105ca927 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -1,3 +1,5 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_test") + licenses(["notice"]) # Apache 2 py_binary( diff --git a/tools/testdata/check_format/add_envoy_package.BUILD.gold b/tools/testdata/check_format/add_envoy_package.BUILD.gold index 69f20390a6f7..01852a416018 100644 --- a/tools/testdata/check_format/add_envoy_package.BUILD.gold +++ b/tools/testdata/check_format/add_envoy_package.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD index 0ffd61847fbb..f1381ba24ace 100644 --- a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD +++ b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_binary", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold index 218f4d656df3..f2dba3a21d96 100644 --- a/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold +++ b/tools/testdata/check_format/bad_envoy_build_sys_ref.BUILD.gold @@ -1,11 +1,11 @@ -licenses(["notice"]) # Apache 2 - load( "//bazel:envoy_build_system.bzl", "envoy_cc_binary", "envoy_package", ) +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/canonical_api_deps.BUILD b/tools/testdata/check_format/canonical_api_deps.BUILD index e342c9cf9b24..3bf457143107 100644 --- a/tools/testdata/check_format/canonical_api_deps.BUILD +++ b/tools/testdata/check_format/canonical_api_deps.BUILD @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() # Deps can be inferred, irrelevant deps are removed. diff --git a/tools/testdata/check_format/canonical_api_deps.BUILD.gold b/tools/testdata/check_format/canonical_api_deps.BUILD.gold index 759f31e1cb42..69bfe69e0d49 100644 --- a/tools/testdata/check_format/canonical_api_deps.BUILD.gold +++ b/tools/testdata/check_format/canonical_api_deps.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() # Deps can be inferred, irrelevant deps are removed. diff --git a/tools/testdata/check_format/canonical_spacing.BUILD.gold b/tools/testdata/check_format/canonical_spacing.BUILD.gold index 69f20390a6f7..01852a416018 100644 --- a/tools/testdata/check_format/canonical_spacing.BUILD.gold +++ b/tools/testdata/check_format/canonical_spacing.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/remove_unused_loads.BUILD b/tools/testdata/check_format/remove_unused_loads.BUILD index 586df2dc5632..6a7bcfdc23b5 100644 --- a/tools/testdata/check_format/remove_unused_loads.BUILD +++ b/tools/testdata/check_format/remove_unused_loads.BUILD @@ -1,8 +1,8 @@ -licenses(["notice"]) # Apache 2 - load("//foo.bzl", "bar") load("//bazel:envoy_build_system.bzl", "envoy_package", "envoy_cc_library") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/remove_unused_loads.BUILD.gold b/tools/testdata/check_format/remove_unused_loads.BUILD.gold index 69f20390a6f7..01852a416018 100644 --- a/tools/testdata/check_format/remove_unused_loads.BUILD.gold +++ b/tools/testdata/check_format/remove_unused_loads.BUILD.gold @@ -1,7 +1,7 @@ -licenses(["notice"]) # Apache 2 - load("//bazel:envoy_build_system.bzl", "envoy_package") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_cc_binary( diff --git a/tools/testdata/check_format/skip_envoy_package.BUILD b/tools/testdata/check_format/skip_envoy_package.BUILD index 51736f94d67d..11d7a8c872b1 100644 --- a/tools/testdata/check_format/skip_envoy_package.BUILD +++ b/tools/testdata/check_format/skip_envoy_package.BUILD @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + licenses(["notice"]) # Apache 2 cc_binary( diff --git a/tools/testdata/check_format/skip_envoy_package.BUILD.gold b/tools/testdata/check_format/skip_envoy_package.BUILD.gold index 51736f94d67d..11d7a8c872b1 100644 --- a/tools/testdata/check_format/skip_envoy_package.BUILD.gold +++ b/tools/testdata/check_format/skip_envoy_package.BUILD.gold @@ -1,3 +1,5 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + licenses(["notice"]) # Apache 2 cc_binary( diff --git a/tools/testdata/check_format/update_license.BUILD.gold b/tools/testdata/check_format/update_license.BUILD.gold index c66178a8f94a..eff2213c760c 100644 --- a/tools/testdata/check_format/update_license.BUILD.gold +++ b/tools/testdata/check_format/update_license.BUILD.gold @@ -1,5 +1,5 @@ -licenses(["notice"]) # Apache 2 - load("//some:thing.bzl", "foo") +licenses(["notice"]) # Apache 2 + foo() diff --git a/tools/testdata/protoxform/BUILD b/tools/testdata/protoxform/BUILD index 382cffec50e4..6769f453f6ff 100644 --- a/tools/testdata/protoxform/BUILD +++ b/tools/testdata/protoxform/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD index 4c756ea94137..00f83f0bf1f7 100644 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD +++ b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD index d97319b2631a..db6244be9a36 100644 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD +++ b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/frozen/v2/BUILD b/tools/testdata/protoxform/envoy/frozen/v2/BUILD index bbbcaffdbc75..9226f3e71361 100644 --- a/tools/testdata/protoxform/envoy/frozen/v2/BUILD +++ b/tools/testdata/protoxform/envoy/frozen/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/frozen/v3/BUILD b/tools/testdata/protoxform/envoy/frozen/v3/BUILD index bbbcaffdbc75..9226f3e71361 100644 --- a/tools/testdata/protoxform/envoy/frozen/v3/BUILD +++ b/tools/testdata/protoxform/envoy/frozen/v3/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/envoy/v2/BUILD b/tools/testdata/protoxform/envoy/v2/BUILD index 08fcd5836915..18cca27da4c6 100644 --- a/tools/testdata/protoxform/envoy/v2/BUILD +++ b/tools/testdata/protoxform/envoy/v2/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/testdata/protoxform/external/BUILD b/tools/testdata/protoxform/external/BUILD index 96986f3e19ae..3908c1ec3a49 100644 --- a/tools/testdata/protoxform/external/BUILD +++ b/tools/testdata/protoxform/external/BUILD @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") + licenses(["notice"]) # Apache 2 proto_library( diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index 191e2b90d1ba..4b2b7735de55 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -1,11 +1,12 @@ -licenses(["notice"]) # Apache 2 - +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_package", "envoy_proto_library") load("//tools/type_whisperer:api_build_file.bzl", "api_build_file") load("//tools/type_whisperer:file_descriptor_set_text.bzl", "file_descriptor_set_text") load("//tools/type_whisperer:type_database.bzl", "type_database") load("//tools/type_whisperer:proto_cc_source.bzl", "proto_cc_source") +licenses(["notice"]) # Apache 2 + envoy_package() envoy_proto_library( diff --git a/tools/type_whisperer/file_descriptor_set_text.bzl b/tools/type_whisperer/file_descriptor_set_text.bzl index 5146e1f82331..18a5c2e72050 100644 --- a/tools/type_whisperer/file_descriptor_set_text.bzl +++ b/tools/type_whisperer/file_descriptor_set_text.bzl @@ -1,3 +1,5 @@ +load("@rules_proto//proto:defs.bzl", "ProtoInfo") + def _file_descriptor_set_text(ctx): file_descriptor_sets = depset() for dep in ctx.attr.deps: From f7a63f14c8159b25bbd90561098cadd7fc68cb94 Mon Sep 17 00:00:00 2001 From: asraa Date: Tue, 2 Jun 2020 21:57:18 -0400 Subject: [PATCH 274/909] [fuzz] fix ext authz send (#11351) Commit Message: Fixes a fuzz-only bug where on clean-up, the ext authz client `request_` was cancelled, but `request_` was never set because the call was mocked out. Additional Description: Attempting to cancel a nullptr request would never happen in practice, because cancelling the request only occurs when the filter is in the middle of calling. The cases where `check` doesn't end up sending the request (for eg cluster no longer exists) appropriately change the state to complete. Fixes issue: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22566 Risk level: Low Testing: added corpus entry Signed-off-by: Asra Ali --- ...zz-testcase-minimized-filter_fuzz_test-5144919410999296 | 7 +++++++ test/extensions/filters/http/common/fuzz/uber_filter.cc | 2 +- test/extensions/filters/http/common/fuzz/uber_filter.h | 2 ++ .../extensions/filters/http/common/fuzz/uber_per_filter.cc | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 new file mode 100644 index 000000000000..4178a4b00286 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 @@ -0,0 +1,7 @@ +config { + name: "envoy.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + value: "\020\001\032\356\001\n\317\001\n\177\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\035\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\022Gtype.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\032\003\020\200`\022\032envoy.ext_aeny.ext_aututhz" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index d5f8d346fdb4..37d1a7af8a09 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -13,7 +13,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { -UberFilterFuzzer::UberFilterFuzzer() { +UberFilterFuzzer::UberFilterFuzzer() : async_request_{&cluster_manager_.async_client_} { // This is a decoder filter. ON_CALL(filter_callback_, addStreamDecoderFilter(_)) .WillByDefault(Invoke([&](Http::StreamDecoderFilterSharedPtr filter) -> void { diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index b871524706f1..bc9db9988055 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -59,6 +59,8 @@ class UberFilterFuzzer { Http::FilterFactoryCb cb_; NiceMock connection_; Network::Address::InstanceConstSharedPtr addr_; + NiceMock cluster_manager_; + NiceMock async_request_; NiceMock stream_info_; // Mocked callbacks. diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index ad6913bda5f0..50c33396c2f8 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -83,6 +83,8 @@ void UberFilterFuzzer::perFilterSetup() { addr_ = std::make_shared("1.2.3.4", 1111); ON_CALL(connection_, remoteAddress()).WillByDefault(testing::ReturnRef(addr_)); ON_CALL(connection_, localAddress()).WillByDefault(testing::ReturnRef(addr_)); + ON_CALL(factory_context_, clusterManager()).WillByDefault(testing::ReturnRef(cluster_manager_)); + ON_CALL(cluster_manager_.async_client_, send_(_, _, _)).WillByDefault(Return(&async_request_)); ON_CALL(decoder_callbacks_, connection()).WillByDefault(testing::Return(&connection_)); ON_CALL(decoder_callbacks_, activeSpan()) From f71ab4cfb1a7293f2ffb3a7271550d719ee01393 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Wed, 3 Jun 2020 00:20:33 -0400 Subject: [PATCH 275/909] Push vX.Y-latest when a release is cut (#11405) Enables users of tagged releases to stay on the latest release of a major/minor combination Resolves https://github.com/envoyproxy/envoy/issues/11091 Signed-off-by: Sunjay Bhatia --- ci/docker_ci.sh | 7 +++++++ docs/root/install/building.rst | 3 +-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 5a497631586c..7accf7f63d36 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -50,6 +50,13 @@ for BUILD_TYPE in "${BUILD_TYPES[@]}"; do docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" fi + + # Push vX.Y-latest to tag the latest image in a release line + if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then + RELEASE_LINE=$(echo "$IMAGE_NAME" | sed -E 's/(v[0-9]+\.[0-9]+)\.[0-9]+/\1-latest/') + docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + fi done diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index 65f86471f389..a4868f04d2ce 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -43,8 +43,7 @@ be found in the following repositories: .. note:: - In the above repositories, we do **not** tag a *latest* image. As we now do security/stable - releases, *latest* has no good meaning and users should pin to a specific tag. + In the above repositories, we tag a *vX.Y-latest* image for each security/stable release line. On every master commit we additionally create a set of development Docker images. These images can be found in the following repositories: From f3b957591c3ccf54289aededbe4ed47df147b5af Mon Sep 17 00:00:00 2001 From: Joey Muia Date: Wed, 3 Jun 2020 07:46:21 -0700 Subject: [PATCH 276/909] health check: close conn after timeout/stream rst when grpc hc has conn reuse disabled (#11325) Close connection after timeout/stream reset when grpc health checker has connection reuse disabled. Signed-off-by: Joey Muia --- source/common/upstream/health_checker_impl.cc | 11 +++- .../upstream/health_checker_impl_test.cc | 60 +++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 62b2397f9329..3904b5a8986d 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -714,6 +714,11 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::St ENVOY_CONN_LOG(debug, "connection/stream error health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); + if (!parent_.reuse_connection_) { + // Stream reset was unexpected, so we haven't closed the connection yet. + client_->close(); + } + // TODO(baranov1ch): according to all HTTP standards, we should check if reason is one of // Http::StreamResetReason::RemoteRefusedStreamReset (which may mean GOAWAY), // Http::StreamResetReason::RemoteReset or Http::StreamResetReason::ConnectionTermination (both @@ -783,7 +788,11 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onTimeout() { ENVOY_CONN_LOG(debug, "connection/stream timeout health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); expect_reset_ = true; - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + if (!parent_.reuse_connection_) { + client_->close(); + } else { + request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + } } void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::logHealthCheckStatus( diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 562edf43d67a..dc328cb18516 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -4487,6 +4487,66 @@ TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { expectHostHealthy(true); } +// Test that we close connections when a timeout occurs and reuse_connection is false. +TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionTimeout) { + setupNoReuseConnectionHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Timeouts are considered network failures and make host unhealthy also after 2nd event. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + test_sessions_[0]->timeout_timer_->invokeCallback(); + expectHostHealthy(true); + + // A new client is created because we close the connection + // when a timeout occurs and connection reuse is disabled. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + +// Test that we close connections when a stream reset occurs and reuse_connection is false. +TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionStreamReset) { + setupNoReuseConnectionHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Resets are considered network failures and make host unhealthy also after 2nd event. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + expectHostHealthy(true); + + // A new client is created because we close the connection + // when a stream reset occurs and connection reuse is disabled. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + // Test UNKNOWN health status is considered unhealthy. TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknown) { setupHC(); From 5d8f7d785b59d1450b67f3279706b787c5641a66 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Wed, 3 Jun 2020 07:48:08 -0700 Subject: [PATCH 277/909] grpc_transcoder: implement Skip function in transcoder input stream (#11387) Skip function in Buffer::ZeroCopyInputStream is not implemented. It is used in grpc transcoder filter. Signed-off-by: Wayne Zhang --- .../buffer/zero_copy_input_stream_impl.cc | 20 ++- .../buffer/zero_copy_input_stream_impl.h | 6 +- .../buffer/zero_copy_input_stream_test.cc | 146 ++++++++++++++++++ .../grpc_json_transcoder_integration_test.cc | 31 +++- test/proto/bookstore.proto | 24 ++- 5 files changed, 222 insertions(+), 5 deletions(-) diff --git a/source/common/buffer/zero_copy_input_stream_impl.cc b/source/common/buffer/zero_copy_input_stream_impl.cc index e94e36799b52..6b805eaf01a0 100644 --- a/source/common/buffer/zero_copy_input_stream_impl.cc +++ b/source/common/buffer/zero_copy_input_stream_impl.cc @@ -19,11 +19,15 @@ void ZeroCopyInputStreamImpl::move(Buffer::Instance& instance) { buffer_->move(instance); } -bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { +void ZeroCopyInputStreamImpl::drainLastSlice() { if (position_ != 0) { buffer_->drain(position_); position_ = 0; } +} + +bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { + drainLastSlice(); Buffer::RawSliceVector slices = buffer_->getRawSlices(1); @@ -44,7 +48,19 @@ bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) { return false; } -bool ZeroCopyInputStreamImpl::Skip(int) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } +bool ZeroCopyInputStreamImpl::Skip(int count) { + ASSERT(count >= 0); + drainLastSlice(); + + // Could not skip more than buffer length. + if (static_cast(count) > buffer_->length()) { + return false; + } + + buffer_->drain(count); + byte_count_ += count; + return true; +} void ZeroCopyInputStreamImpl::BackUp(int count) { ASSERT(count >= 0); diff --git a/source/common/buffer/zero_copy_input_stream_impl.h b/source/common/buffer/zero_copy_input_stream_impl.h index 96bdea0be9ea..23304d06e34f 100644 --- a/source/common/buffer/zero_copy_input_stream_impl.h +++ b/source/common/buffer/zero_copy_input_stream_impl.h @@ -36,10 +36,14 @@ class ZeroCopyInputStreamImpl : public virtual Protobuf::io::ZeroCopyInputStream // LimitingInputStream before passing to protobuf code to avoid a spin loop. bool Next(const void** data, int* size) override; void BackUp(int count) override; - bool Skip(int count) override; // Not implemented + bool Skip(int count) override; ProtobufTypes::Int64 ByteCount() const override { return byte_count_; } protected: + // The last slice is kept to support limited BackUp() calls. + // This function will drain it. + void drainLastSlice(); + Buffer::InstancePtr buffer_; uint64_t position_{0}; bool finished_{false}; diff --git a/test/common/buffer/zero_copy_input_stream_test.cc b/test/common/buffer/zero_copy_input_stream_test.cc index 8a35002d91bd..9ff0ffd6683e 100644 --- a/test/common/buffer/zero_copy_input_stream_test.cc +++ b/test/common/buffer/zero_copy_input_stream_test.cc @@ -90,6 +90,152 @@ TEST_F(ZeroCopyInputStreamTest, Finish) { EXPECT_FALSE(stream_.Next(&data_, &size_)); } +class ZeroCopyInputStreamSkipTest : public testing::Test { +public: + ZeroCopyInputStreamSkipTest() { + Buffer::OwnedImpl buffer; + buffer.addBufferFragment(buffer1_); + buffer.addBufferFragment(buffer2_); + buffer.addBufferFragment(buffer3_); + buffer.addBufferFragment(buffer4_); + + stream_.move(buffer); + } + + const std::string slice1_{"This is the first slice of the message."}; + const std::string slice2_{"This is the second slice of the message."}; + const std::string slice3_{"This is the third slice of the message."}; + const std::string slice4_{"This is the fourth slice of the message."}; + BufferFragmentImpl buffer1_{slice1_.data(), slice1_.size(), nullptr}; + BufferFragmentImpl buffer2_{slice2_.data(), slice2_.size(), nullptr}; + BufferFragmentImpl buffer3_{slice3_.data(), slice3_.size(), nullptr}; + BufferFragmentImpl buffer4_{slice4_.data(), slice4_.size(), nullptr}; + + const size_t total_bytes_{slice1_.size() + slice2_.size() + slice3_.size() + slice4_.size()}; + ZeroCopyInputStreamImpl stream_; + + const void* data_; + int size_; + + // Convert data_ buffer into a string + absl::string_view dataString() const { + return absl::string_view{reinterpret_cast(data_), static_cast(size_)}; + } +}; + +TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstPartialSlice) { + // Only skip the 10 bytes in the first slice. + constexpr int skip_count = 10; + EXPECT_TRUE(stream_.Skip(skip_count)); + + EXPECT_EQ(skip_count, stream_.ByteCount()); + + // Read the first slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size() - skip_count, size_); + EXPECT_EQ(slice1_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipFirstFullSlice) { + // Skip the full first slice + EXPECT_TRUE(stream_.Skip(slice1_.size())); + + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); + + // Read the second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size(), size_); + EXPECT_EQ(slice2_, dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, BackUpAndSkipToEndOfSlice) { + // Read the first slice, backUp 10 byes, skip 10 bytes to the end of the first slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count)); + + EXPECT_EQ(slice1_.size(), stream_.ByteCount()); + + // Next read is the second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size(), size_); + EXPECT_EQ(slice2_, dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossTwoSlices) { + // Read the first slice, backUp 10 byes, skip 15 bytes; 5 bytes into the second slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + constexpr int skip_count = 5; // The skip bytes in the second slice + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count + skip_count)); + + EXPECT_EQ(slice1_.size() + skip_count, stream_.ByteCount()); + + // Read the remain second slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice2_.size() - skip_count, size_); + EXPECT_EQ(slice2_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipAcrossThreeSlices) { + // Read the first slice, backUp 10 byes, skip 10 + slice2.size + 5; 5 bytes into the third slice. + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + constexpr int skip_count = 5; // The skip bytes in the third slice + stream_.BackUp(backup_count); + EXPECT_TRUE(stream_.Skip(backup_count + slice2_.size() + skip_count)); + + EXPECT_EQ(slice1_.size() + slice2_.size() + skip_count, stream_.ByteCount()); + + // Read the remain third slice + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice3_.size() - skip_count, size_); + EXPECT_EQ(slice3_.substr(skip_count), dataString()); + EXPECT_EQ(slice1_.size() + slice2_.size() + slice3_.size(), stream_.ByteCount()); +} + +TEST_F(ZeroCopyInputStreamSkipTest, SkipToEndOfBuffer) { + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(total_bytes_ + 1)); + + EXPECT_TRUE(stream_.Skip(total_bytes_)); + EXPECT_EQ(total_bytes_, stream_.ByteCount()); + + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(1)); +} + +TEST_F(ZeroCopyInputStreamSkipTest, ReadFirstSkipToTheEnd) { + // Read the first slice, backUp 10 byes, skip to the end of buffer + EXPECT_TRUE(stream_.Next(&data_, &size_)); + EXPECT_EQ(slice1_.size(), size_); + EXPECT_EQ(slice1_, dataString()); + + constexpr int backup_count = 10; // the backup bytes to the end of first slice. + stream_.BackUp(backup_count); + + EXPECT_TRUE(stream_.Skip(total_bytes_ - slice1_.size() + backup_count)); + EXPECT_EQ(total_bytes_, stream_.ByteCount()); + + // Failed to skip one extra byte + EXPECT_FALSE(stream_.Skip(1)); +} + } // namespace } // namespace Buffer } // namespace Envoy diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 982a3dffa707..c5acf3eedc69 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -809,7 +809,9 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, LargeStruct) { R"({"content":)" + largeJson + R"(})"); } -TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownField) { +TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownFieldInRequest) { + // Request JSON has many fields that are unknown to the request proto message. + // They are discarded. HttpIntegrationTest::initialize(); testTranscoding( Http::TestRequestHeaderMapImpl{{":method", "POST"}, @@ -825,6 +827,33 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownField) { R"({"id":"20","theme":"Children"})"); } +// Test proto to json transcoding with an unknown field in the response message. +// gRPC server may use a updated proto with a new field, but Envoy transcoding +// filter could use an old proto descriptor without that field. That fields is unknown +// to the Envoy transcoder filter. Expected result: the unknown field is discarded, +// other fields should be transcoded properly. +TEST_P(GrpcJsonTranscoderIntegrationTest, UnknownResponse) { + // The mocked upstream proto response message is bookstore::BigBook which has + // all 3 fields. But the proto descriptor used by the Envoy transcoder filter is using + // bookstore::OldBigBook which is missing the `field1` field. + HttpIntegrationTest::initialize(); + // The bug is ZeroCopyInputStreamImpl::Skip() which is not implemented. + // In order to trigger a call to that function, the response message has to be big enough + // so it is stored in multiple slices. + const std::string field1_value = std::string(32 * 1024, 'O'); + const std::string response_body = + fmt::format(R"(field1: "{}" field2: "field2_value" field3: "field3_value" )", field1_value); + testTranscoding( + Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/bigbook"}, {":authority", "host"}}, + "", {""}, {response_body}, Status(), + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"content-type", "application/json"}, + {"content-length", "49"}, + {"grpc-status", "0"}}, + R"({"field2":"field2_value","field3":"field3_value"})"); +} + TEST_P(GrpcJsonTranscoderIntegrationTest, UTF8) { HttpIntegrationTest::initialize(); testTranscoding( diff --git a/test/proto/bookstore.proto b/test/proto/bookstore.proto index d814cb8f2e8d..c4ecf02e7502 100644 --- a/test/proto/bookstore.proto +++ b/test/proto/bookstore.proto @@ -120,6 +120,14 @@ service Bookstore { body: "content" }; } + // To test grpc transcoding with an unknown field. + // This could happen when the grpc server is using a updated proto with a new field, + // but Envoy transcoding config is still using the old version. + rpc GetBigBook(google.protobuf.Empty) returns (OldBigBook) { + option (google.api.http) = { + get: "/bigbook" + }; + } } // A shelf resource. @@ -251,4 +259,18 @@ message DeepNestedBody { Nested nested = 1000000; } Nested nested = 1; -} \ No newline at end of file +} + +// gRPC server is using BigBook, but envoy transcoder filter is using +// OldBigBook with missing `field1`. +message BigBook { + string field1 = 1; + string field2 = 2; + string field3 = 3; +} + +// The BigBook message with missing `field1`. +message OldBigBook { + string field2 = 2; + string field3 = 3; +} From 981507ff53da65669017ea7c60854142e1b849c8 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Wed, 3 Jun 2020 11:01:55 -0700 Subject: [PATCH 278/909] tcp_proxy: fix a crash when h2 tunnel is set (#11401) Fix a crash following the pattern in TcpUpstream. The upstream handle should not be created if the newStream returns nullptr, regardless on failure or on immediate success. Additional Description: Without the fix, the below backtrace could be detected by the new test case. Risk: low: bugfix with HTTP/2 CONNECT Testing: new integration test Signed-off-by: Yuchen Dai --- source/common/tcp_proxy/tcp_proxy.cc | 8 +++- .../tcp_tunneling_integration_test.cc | 43 +++++++++++++++++++ 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 539ea9dde24e..b5b489238790 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -437,8 +437,12 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { upstream_ = std::make_unique(*upstream_callbacks_, config_->tunnelingConfig()->hostname()); HttpUpstream* http_upstream = static_cast(upstream_.get()); - upstream_handle_ = std::make_shared( - conn_pool->newStream(http_upstream->responseDecoder(), *this)); + Http::ConnectionPool::Cancellable* cancellable = + conn_pool->newStream(http_upstream->responseDecoder(), *this); + if (cancellable) { + ASSERT(upstream_handle_.get() == nullptr); + upstream_handle_ = std::make_shared(cancellable); + } return Network::FilterStatus::StopIteration; } } diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 00954ccde7aa..3359a8f28787 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -476,6 +476,49 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { tcp_client->waitForHalfClose(); } +// Test that h2 connection is reused. +TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { + initialize(); + + // Establish a connection. + IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + upstream_request_->encodeHeaders(default_response_headers_, false); + + // Send data in both directions. + tcp_client1->write("hello1", false); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); + + // Send data from upstream to downstream with an end stream and make sure the data is received + // before the connection is half-closed. + upstream_request_->encodeData("world1", true); + tcp_client1->waitForData("world1"); + tcp_client1->waitForHalfClose(); + tcp_client1->close(); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Establish a new connection. + IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + + // The new CONNECT stream is established in the existing h2 connection. + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + upstream_request_->encodeHeaders(default_response_headers_, false); + + tcp_client2->write("hello2", false); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); + + // Send data from upstream to downstream with an end stream and make sure the data is received + // before the connection is half-closed. + upstream_request_->encodeData("world2", true); + tcp_client2->waitForData("world2"); + tcp_client2->waitForHalfClose(); + tcp_client2->close(); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, TcpTunnelingIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); From 8b6ea4eaf95c7fa4822a35b25e6984fb2a718b49 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 3 Jun 2020 19:02:06 -0400 Subject: [PATCH 279/909] build: bump nghttp2 to 1.41.0. (#11412) See release notes at https://github.com/nghttp2/nghttp2/releases/tag/v1.41.0. This addresses https://github.com/nghttp2/nghttp2/security/advisories/GHSA-q5wr-xfw9-q7xr. Signed-off-by: Harvey Tuch --- bazel/foreign_cc/BUILD | 4 ++-- bazel/repository_locations.bzl | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 747d2b7e1bf5..7f6ec4f54176 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -203,8 +203,8 @@ envoy_cmake_external( defines = ["NGHTTP2_STATICLIB"], lib_source = "@com_github_nghttp2_nghttp2//:all", static_libraries = select({ - "//bazel:windows_x86_64": ["nghttp2_static.lib"], - "//conditions:default": ["libnghttp2_static.a"], + "//bazel:windows_x86_64": ["nghttp2.lib"], + "//conditions:default": ["libnghttp2.a"], }), ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 3d82497d31f6..fdb3b9b9e186 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -201,9 +201,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_nghttp2_nghttp2 = dict( - sha256 = "eb9d9046495a49dd40c7ef5d6c9907b51e5a6b320ea6e2add11eb8b52c982c47", - strip_prefix = "nghttp2-1.40.0", - urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.40.0/nghttp2-1.40.0.tar.gz"], + sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", + strip_prefix = "nghttp2-1.41.0", + urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.41.0/nghttp2-1.41.0.tar.gz"], use_category = ["dataplane"], cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), From ed792a3ee72e059fb864dbe1ed1ade22a2ffbaa7 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 3 Jun 2020 19:05:24 -0400 Subject: [PATCH 280/909] docs: clarify circuit breaker and H2 conn pool behavior. (#11373) I've has a few questions on how this works and how active connection limits and stats relate. After doing some spelunking this seems to be the story. Signed-off-by: Harvey Tuch --- .../upstream/circuit_breaking.rst | 20 +++++++++- .../upstream/connection_pooling.rst | 38 +++++++++++++++---- 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst index f0a51e344ccf..5c808e4d4c06 100644 --- a/docs/root/intro/arch_overview/upstream/circuit_breaking.rst +++ b/docs/root/intro/arch_overview/upstream/circuit_breaking.rst @@ -13,7 +13,16 @@ configure and code each application independently. Envoy supports various types * **Cluster maximum connections**: The maximum number of connections that Envoy will establish to all hosts in an upstream cluster. If this circuit breaker overflows the :ref:`upstream_cx_overflow - ` counter for the cluster will increment. + ` counter for the cluster will increment. All connections, + whether active or draining, count against this limit. Even if this circuit breaker has overflowed, + Envoy will ensure that a host selected by cluster load balancing has at least one connection + allocated. This has the implication that the :ref:`upstream_cx_active + ` count for a cluster may be higher than the cluster maximum + connection circuit breaker, with an upper bound of + `cluster maximum connections + (number of endpoints in a cluster) * (connection pools for the + cluster)`. This bound applies to the sum of connections across all workers threads. See + :ref:`connection pooling ` for details on how many connection + pools a cluster may have. * **Cluster maximum pending requests**: The maximum number of requests that will be queued while waiting for a ready connection pool connection. Requests are added to the list of pending requests whenever there aren't enough upstream connections available to immediately dispatch @@ -57,6 +66,15 @@ the distributed system to be tuned independently and have different limits. The circuit breakers, including the number of resources remaining until a circuit breaker opens, can be observed via :ref:`statistics `. +Workers threads share circuit breaker limits, i.e. if the active connection threshold is 500, worker +thread 1 has 498 connections active, then worker thread 2 can only allocate 2 more connections. +Since the implementation is eventually consistent, races between threads may allow limits to be +potentially exceeded. + +Circuit breakers are enabled by default and have modest default values, e.g. 1024 connections per +cluster. To disable circuit breakers, set the :ref:`thresholds ` to +the highest allowed values. + Note that circuit breaking will cause the :ref:`x-envoy-overloaded ` header to be set by the router filter in the case of HTTP requests. diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index ee597f4d067d..2b239f479b3b 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -21,14 +21,36 @@ HTTP/2 ------ The HTTP/2 connection pool multiplexes multiple requests over a single connection, up to the limits -imposed by :ref:`max concurrent streams ` -and :ref:`max requests per connection `. -The HTTP/2 connection pool establishes only as many connections as are needed to serve the current -requests. With no limits, this will be only a single connection. If a GOAWAY frame is received or -if the connection reaches the maximum stream limit, the connection pool will drain the existing one. -New connections are established anytime there is a pending request without a connection that it can -be dispatched to (up to circuit breaker limits for connections). -HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. +imposed by :ref:`max concurrent streams +` and :ref:`max +requests per connection `. +The HTTP/2 connection pool establishes as many connections as are needed to serve requests. With no +limits, this will be only a single connection. If a GOAWAY frame is received or if the connection +reaches the :ref:`maximum requests per connection +` limit, the connection +pool will drain the affected connection. Once a connection reaches its :ref:`maximum concurrent +stream limit `, it +will be marked as busy until a stream is available. New connections are established anytime there is +a pending request without a connection that can be dispatched to (up to circuit breaker limits for +connections). HTTP/2 is the preferred communication protocol, as connections rarely, if ever, get +severed. + +.. _arch_overview_conn_pool_how_many: + +Number of connection pools +-------------------------- + +Each host in each cluster will have one or more connection pools. If the cluster is HTTP/1 or HTTP/2 +only, then the host may have only a single connection pool. However, if the cluster supports multiple +upstream protocols, then at least one connection pool per protocol will be allocated. Separate +connection pools are also allocated for each of the following features: + +* :ref:`Routing priority ` +* :ref:`Socket options ` +* :ref:`Transport socket (e.g. TLS) options ` + +Each worker thread maintains its own connection pools for each cluster, so if an Envoy has two +threads and a cluster with both HTTP/1 and HTTP/2 support, there will be at least 4 connection pools. .. _arch_overview_conn_pool_health_checking: From 47f5015a6ec7b248b014b48c15627409b8f64146 Mon Sep 17 00:00:00 2001 From: James Fish Date: Wed, 3 Jun 2020 16:26:26 -0700 Subject: [PATCH 281/909] protobuf: Update yaml-cpp parser to handle edge case (#11304) Signed-off-by: James Fish --- bazel/foreign_cc/BUILD | 2 +- bazel/repository_locations.bzl | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 7f6ec4f54176..c87f82ff4eae 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -217,7 +217,7 @@ envoy_cmake_external( }, lib_source = "@com_github_jbeder_yaml_cpp//:all", static_libraries = select({ - "//bazel:windows_x86_64": ["libyaml-cpp.lib"], + "//bazel:windows_x86_64": ["yaml-cpp.lib"], "//conditions:default": ["libyaml-cpp.a"], }), ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index fdb3b9b9e186..8c8676f7b90b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -263,9 +263,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_jbeder_yaml_cpp = dict( - sha256 = "77ea1b90b3718aa0c324207cb29418f5bced2354c2e483a9523d98c3460af1ed", - strip_prefix = "yaml-cpp-yaml-cpp-0.6.3", - urls = ["https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.3.tar.gz"], + sha256 = "17ffa6320c33de65beec33921c9334dee65751c8a4b797ba5517e844062b98f1", + strip_prefix = "yaml-cpp-6701275f1910bf63631528dfd9df9c3ac787365b", + # 2020-05-25 + urls = ["https://github.com/jbeder/yaml-cpp/archive/6701275f1910bf63631528dfd9df9c3ac787365b.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From b19a2a59b47e4357f87268f6537bea02e044d850 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Wed, 3 Jun 2020 18:29:08 -0500 Subject: [PATCH 282/909] fuzz: added fuzzer for getSha256Digest() in utility_impl.cc (#11407) Signed-off-by: jianwen --- test/common/crypto/BUILD | 8 ++++++++ .../35d26780ea66d4ffb726bbafaa9302687bda7624 | Bin 0 -> 64 bytes .../58030c65410d7553b1804eb7ed64bdff1188f145 | Bin 0 -> 55 bytes .../9c8bd40d34a88522d71d184c462af82e3148c02d | Bin 0 -> 56 bytes .../e7af10a10f2540b1d1d497df2926786640285b1c | Bin 0 -> 63 bytes test/common/crypto/utility_fuzz_test.cc | 15 +++++++++++++++ 6 files changed, 23 insertions(+) create mode 100644 test/common/crypto/utility_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 create mode 100644 test/common/crypto/utility_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 create mode 100644 test/common/crypto/utility_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d create mode 100644 test/common/crypto/utility_corpus/e7af10a10f2540b1d1d497df2926786640285b1c create mode 100644 test/common/crypto/utility_fuzz_test.cc diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index 614c0e8c5b82..f9c91449d259 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", ) @@ -23,3 +24,10 @@ envoy_cc_test( "//source/extensions/common/crypto:utility_lib", ], ) + +envoy_cc_fuzz_test( + name = "utility_fuzz_test", + srcs = ["utility_fuzz_test.cc"], + corpus = "utility_corpus", + deps = ["//source/extensions/common/crypto:utility_lib"], +) diff --git a/test/common/crypto/utility_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 b/test/common/crypto/utility_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 new file mode 100644 index 0000000000000000000000000000000000000000..5062158477797e418ad83d7fcadece98fc967e34 GIT binary patch literal 64 qcmd<$0s=34hNPq;M1& literal 0 HcmV?d00001 diff --git a/test/common/crypto/utility_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d b/test/common/crypto/utility_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d new file mode 100644 index 0000000000000000000000000000000000000000..7d28a98757a87e126898b8f64deb93c2cf00a656 GIT binary patch literal 56 ocmd<$0s=34hNPq Date: Wed, 3 Jun 2020 20:08:39 -0400 Subject: [PATCH 283/909] Windows: add docker development documentation (#11363) Commit Message: Windows: add docker development documentation Additional Description: Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Sunjay Bhatia --- bazel/README.md | 10 +++++++++- ci/README.md | 37 ++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index 9bad8cf1431a..435c5b29d212 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -190,12 +190,20 @@ for how to update or override dependencies. ## Building Envoy with the CI Docker image -Envoy can also be built with the Docker image used for CI, by installing Docker and executing: +Envoy can also be built with the Docker image used for CI, by installing Docker and executing the following. + +On Linux, run: ``` ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` +On Windows: + +``` +./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +``` + See also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the CI Docker image. diff --git a/ci/README.md b/ci/README.md index 7a705a948c0f..e7d52fba450a 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,8 +1,10 @@ # Developer use of CI Docker images -Two flavors of Envoy Docker images, based on Ubuntu and Alpine Linux, are built. +There are two available flavors of Envoy Docker images for Linux, based on Ubuntu and Alpine Linux +and an image based on Windows2019. ## Ubuntu Envoy image + The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) @@ -18,18 +20,27 @@ one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols an Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests. +## Windows 2019 Envoy image + +The Windows 2019 based Envoy Docker image at [`envoyproxy/envoy-build-windows2019:`](https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/) +is used for CI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). +Developers may work with the most recent `envoyproxy/envoy-build-windows2019` image to provide a self-contained environment for building Envoy binaries and +running tests that reflects the latest built Windows 2019 Envoy image. + # Build image base and compiler versions -Currently there are three build images: +Currently there are three build images for Linux and one for Windows: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. * `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 9 compiler. * `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 9 compiler, this image is experimental and not well tested. +* `envoyproxy/envoy-build-windows2019` — based on Windows 2019 LTS with VS 2019 Build Tools. The source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools) repository. -We use the Clang compiler for all CI runs with tests. We have an additional CI run with GCC which builds binary only. +We use the Clang compiler for all Linux CI runs with tests. We have an additional Linux CI run with GCC which builds binary only. +Currently, Windows CI builds the static Envoy binary only. # C++ standard library @@ -40,6 +51,8 @@ run `./ci/do_ci.sh` as described below. # Building and running tests as a developer +## On Linux + An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is: ```bash @@ -122,6 +135,24 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. * `docs`— build documentation tree in `generated/docs`. +## On Windows + +An example basic invocation to build the Envoy static binary and run tests is: + +```bash +./ci/run_envoy_docker_windows.sh './ci/windows_ci_steps.sh' +``` + +You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. + +If you would like to run an interactive session to keep the build container running (to persist your local build environment), run: + +```bash +./ci/run_envoy_docker_windows.sh 'bash' +``` + +From an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests. + # Testing changes to the build image as a developer While all changes to the build image should eventually be upstreamed, it can be useful to From 43ab77f86fcbc4c7eee4f70f6b630d6cfdbc8686 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 3 Jun 2020 20:11:24 -0400 Subject: [PATCH 284/909] test: changing waitForDisconnect to only wait for the default timeout (#11428) This way if waitForDisconect fails one test case should fail rather than the entire test hanging. Also tagged it with ABSL_MUST_USE_RESULT so tests which wait for a disconnect before doing work would correctly fast-fail. Signed-off-by: Alyssa Wilk --- .../aggregate/cluster_integration_test.cc | 8 ++--- test/integration/cds_integration_test.cc | 14 ++++---- test/integration/http2_integration_test.cc | 2 +- test/integration/http_integration.cc | 23 ++++++------ test/integration/http_integration.h | 3 +- .../idle_timeout_integration_test.cc | 2 +- test/integration/integration_test.cc | 10 +++--- .../local_reply_integration_test.cc | 10 +++--- test/integration/overload_integration_test.cc | 2 +- test/integration/protocol_integration_test.cc | 22 ++++++------ .../tcp_tunneling_integration_test.cc | 2 +- test/integration/vhds_integration_test.cc | 36 +++++++++---------- test/integration/websocket_integration_test.h | 2 +- 13 files changed, 70 insertions(+), 66 deletions(-) diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 9303792a4db8..69059250fac4 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -199,7 +199,7 @@ TEST_P(AggregateIntegrationTest, ClusterUpDownUp) { EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -218,7 +218,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_2 is here. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); @@ -230,7 +230,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { // A request for aggregate cluster should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, FirstUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -242,7 +242,7 @@ TEST_P(AggregateIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, SecondUpstreamIndex, "/aggregatecluster"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index b9e60886086c..8a28eeb652c8 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -163,7 +163,7 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { EXPECT_EQ("503", response->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -186,7 +186,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_2 is here. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); @@ -198,7 +198,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { // A request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -211,7 +211,7 @@ TEST_P(CdsIntegrationTest, TwoClusters) { // Even with cluster_1 gone, a request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Tell Envoy that cluster_1 is back. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "42", {}, {}, {})); @@ -236,7 +236,7 @@ TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Close the connection carrying Envoy's xDS gRPC stream... AssertionResult result = xds_connection_->close(); @@ -265,11 +265,11 @@ TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) { // A request for cluster_1 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A request for cluster_2 should be fine. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } // namespace diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 9235ea005a2c..0f28f2b180c0 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -512,7 +512,7 @@ TEST_P(Http2MetadataIntegrationTest, RequestMetadataReachSizeLimit) { } // Verifies client connection will be closed. - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_FALSE(response->complete()); } diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index fdc7b4ad2bf6..13689d1add69 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -152,7 +152,10 @@ IntegrationCodecClient::startRequest(const Http::RequestHeaderMap& headers) { return {encoder, std::move(response)}; } -bool IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to_wait) { +AssertionResult IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to_wait) { + if (disconnected_) { + return AssertionSuccess(); + } Event::TimerPtr wait_timer; bool wait_timer_triggered = false; if (time_to_wait.count()) { @@ -171,11 +174,11 @@ bool IntegrationCodecClient::waitForDisconnect(std::chrono::milliseconds time_to } if (wait_timer_triggered && !disconnected_) { - return false; + return AssertionFailure() << "Timed out waiting for disconnect"; } EXPECT_TRUE(disconnected_); - return true; + return AssertionSuccess(); } void IntegrationCodecClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) { @@ -537,7 +540,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeRequestComplete() { response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -564,7 +567,7 @@ void HttpIntegrationTest::testRouterUpstreamDisconnectBeforeResponseComplete( ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -661,7 +664,7 @@ void HttpIntegrationTest::testRouterUpstreamResponseBeforeRequestComplete() { } if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -992,7 +995,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("431", response->headers().getStatusValue()); } else { @@ -1031,7 +1034,7 @@ void HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_s if (size >= max_size) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("431", response->headers().getStatusValue()); } else { @@ -1236,7 +1239,7 @@ void HttpIntegrationTest::testMaxStreamDuration() { test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -1281,7 +1284,7 @@ void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstr if (invoke_retry_upstream_disconnect) { test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 2); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 85448db6d672..fca8019011b0 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -38,7 +38,8 @@ class IntegrationCodecClient : public Http::CodecClientProd { void sendMetadata(Http::RequestEncoder& encoder, Http::MetadataMap metadata_map); std::pair startRequest(const Http::RequestHeaderMap& headers); - bool waitForDisconnect(std::chrono::milliseconds time_to_wait = std::chrono::milliseconds(0)); + ABSL_MUST_USE_RESULT AssertionResult + waitForDisconnect(std::chrono::milliseconds time_to_wait = TestUtility::DefaultTimeout); Network::ClientConnection* connection() const { return connection_.get(); } Network::ConnectionEvent lastConnectionEvent() const { return last_connection_event_; } Network::Connection& rawConnection() { return *connection_; } diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 8c906aa51ada..fc7069c3d440 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -66,7 +66,7 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { void waitForTimeout(IntegrationStreamDecoder& response, absl::string_view stat_name = "", absl::string_view stat_prefix = "http.config_test") { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response.waitForReset(); codec_client_->close(); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index d9ef3a84a650..1ce77d956f7b 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -151,7 +151,7 @@ TEST_P(IntegrationTest, ConnectionClose) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); @@ -809,7 +809,7 @@ TEST_P(IntegrationTest, UpstreamProtocolError) { ASSERT_TRUE(fake_upstream_connection->waitForData(187, &data)); ASSERT_TRUE(fake_upstream_connection->write("bad protocol data!")); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("503", response->headers().getStatusValue()); @@ -953,7 +953,7 @@ TEST_P(IntegrationTest, ViaAppendHeaderOnly) { EXPECT_THAT(upstream_request_->headers(), HeaderValueOf(Headers::get().Via, "foo, bar")); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().Via, "bar")); @@ -1134,7 +1134,7 @@ TEST_P(IntegrationTest, ProcessObjectHealthy) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); @@ -1155,7 +1155,7 @@ TEST_P(IntegrationTest, ProcessObjectUnealthy) { {":authority", "host"}, {"connection", "close"}}); response->waitForEndStream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("500")); diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index 3568eb979536..833d822d8744 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -62,7 +62,7 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -134,7 +134,7 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFi response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -206,7 +206,7 @@ TEST_P(LocalReplyIntegrationTest, ShouldNotMatchAnyFilter) { response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -267,7 +267,7 @@ TEST_P(LocalReplyIntegrationTest, ShouldMapResponseCodeAndMapToDefaultTextRespon response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } @@ -324,7 +324,7 @@ TEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToCustomString) { response->waitForEndStream(); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { codec_client_->close(); } diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index 749f947e553d..6ded479c4212 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -118,7 +118,7 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { Http::TestRequestHeaderMapImpl request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; auto response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 1f1d2cf956c8..3bbcf5c9ce8a 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -300,7 +300,7 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { } EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); @@ -992,7 +992,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefa {"foo_bar", "baz"}}); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_TRUE(response->complete()); EXPECT_EQ("400", response->headers().getStatusValue()); } else { @@ -1127,7 +1127,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { {"content-length", "-1"}}); auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); @@ -1158,7 +1158,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -1183,7 +1183,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) { {"content-length", "3,2"}}); auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); @@ -1212,7 +1212,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response->waitForReset(); codec_client_->close(); @@ -1420,7 +1420,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) { codec_client_->sendTrailers(*request_encoder_, request_trailers); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("431", response->headers().getStatusValue()); } else { @@ -1543,7 +1543,7 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { auto encoder_decoder = codec_client_->startRequest(request_headers); request_encoder_ = &encoder_decoder.first; auto response = std::move(encoder_decoder.second); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("400", response->headers().getStatusValue()); } else { @@ -1845,7 +1845,7 @@ TEST_P(ProtocolIntegrationTest, TestDownstreamResetIdleTimeout) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } // Test connection is closed after single request processed. @@ -1954,7 +1954,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidAuthority) { } else { // For HTTP/2 this is handled by nghttp2 which resets the connection without // sending an HTTP response. - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); ASSERT_FALSE(response->complete()); } } @@ -1973,7 +1973,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { EXPECT_TRUE(response->complete()); } else { response->waitForReset(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 3359a8f28787..b22fd48ca9e2 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -177,7 +177,7 @@ TEST_P(ConnectTerminationIntegrationTest, BasicMaxStreamDuration) { test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } else { response_->waitForReset(); codec_client_->close(); diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index d9851f4dd09d..db74a8c79e26 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -206,7 +206,7 @@ TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Update RouteConfig, this time include VHDS config sendSotwDiscoveryResponse( @@ -231,7 +231,7 @@ TEST_P(VhdsInitializationTest, InitializeVhdsAfterRdsHasBeenInitialized) { // Confirm vhost.first that was configured via VHDS is reachable testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } class VhdsIntegrationTest : public HttpIntegrationTest, @@ -392,7 +392,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, VhdsIntegrationTest, GRPC_CLIENT_ TEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "host"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Update RouteConfig, but don't change VHDS config sendSotwDiscoveryResponse( @@ -403,7 +403,7 @@ TEST_P(VhdsIntegrationTest, RdsUpdateWithoutVHDSChangesDoesNotRestartVHDS) { // Confirm vhost_0 that was originally configured via VHDS is reachable testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/", "host"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } // tests a scenario when: @@ -416,7 +416,7 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse adds two virtual hosts sendDeltaDiscoveryResponse( @@ -426,10 +426,10 @@ TEST_P(VhdsIntegrationTest, VhdsVirtualHostAddUpdateRemove) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/one", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/two", "vhost.second"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse removes newly added virtual hosts sendDeltaDiscoveryResponse( @@ -475,7 +475,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse adds two virtual hosts sendDeltaDiscoveryResponse( @@ -486,13 +486,13 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/one", "vhost.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/two", "vhost.second"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // A spontaneous VHDS DiscoveryResponse removes virtual hosts added via vhds sendDeltaDiscoveryResponse( @@ -503,7 +503,7 @@ TEST_P(VhdsIntegrationTest, RdsWithVirtualHostsVhdsVirtualHostAddUpdateRemove) { // verify rds-based virtual host is still present testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, @@ -538,12 +538,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateWithResourceNameAsAlias) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); @@ -584,12 +584,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveTheAlias) { testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); @@ -625,12 +625,12 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) testRouterHeaderOnlyRequestAndResponse(nullptr, 1); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // verify that rds-based virtual host can be resolved testRouterHeaderOnlyRequestAndResponse(nullptr, 1, "/rdsone", "vhost.rds.first"); cleanupUpstreamAndDownstream(); - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); // Attempt to make a request to an unknown host codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); diff --git a/test/integration/websocket_integration_test.h b/test/integration/websocket_integration_test.h index 0657a8fa5a57..c060f043c732 100644 --- a/test/integration/websocket_integration_test.h +++ b/test/integration/websocket_integration_test.h @@ -39,7 +39,7 @@ class WebsocketIntegrationTest : public HttpProtocolIntegrationTest { if (downstreamProtocol() != Http::CodecClient::Type::HTTP1) { response_->waitForReset(); } else { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); } } From 192cd1341643f3a01bb27f992bbb6f0950eef867 Mon Sep 17 00:00:00 2001 From: Yutong Li Date: Wed, 3 Jun 2020 17:17:12 -0700 Subject: [PATCH 285/909] api: add eds to config dump (#11425) Add EndpointsConfigDump message to support EDS in config_dump.proto (not implemented in Envoy) Additional Description: Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A This is the first step to solve #3362 Signed-off-by: Yutong Li --- api/envoy/admin/v3/config_dump.proto | 34 +++++++++++++++ api/envoy/admin/v4alpha/config_dump.proto | 42 +++++++++++++++++++ .../envoy/admin/v3/config_dump.proto | 34 +++++++++++++++ .../envoy/admin/v4alpha/config_dump.proto | 42 +++++++++++++++++++ 4 files changed, 152 insertions(+) diff --git a/api/envoy/admin/v3/config_dump.proto b/api/envoy/admin/v3/config_dump.proto index b3c3836a8cc0..0f51c56e6b37 100644 --- a/api/envoy/admin/v3/config_dump.proto +++ b/api/envoy/admin/v3/config_dump.proto @@ -32,6 +32,7 @@ message ConfigDump { // * *clusters*: :ref:`ClustersConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` + // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -346,3 +347,36 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// [#not-implemented-hide:] +// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + message StaticEndpointConfig { + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/api/envoy/admin/v4alpha/config_dump.proto b/api/envoy/admin/v4alpha/config_dump.proto index 02709a414506..ca1399b21deb 100644 --- a/api/envoy/admin/v4alpha/config_dump.proto +++ b/api/envoy/admin/v4alpha/config_dump.proto @@ -32,6 +32,7 @@ message ConfigDump { // * *clusters*: :ref:`ClustersConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` + // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -340,3 +341,44 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// [#not-implemented-hide:] +// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; + + message StaticEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; + + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto index b3c3836a8cc0..0f51c56e6b37 100644 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v3/config_dump.proto @@ -32,6 +32,7 @@ message ConfigDump { // * *clusters*: :ref:`ClustersConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` + // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -346,3 +347,36 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// [#not-implemented-hide:] +// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + message StaticEndpointConfig { + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} diff --git a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto index 02709a414506..ca1399b21deb 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto @@ -32,6 +32,7 @@ message ConfigDump { // * *clusters*: :ref:`ClustersConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` + // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -340,3 +341,44 @@ message SecretsConfigDump { // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } + +// [#not-implemented-hide:] +// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +message EndpointsConfigDump { + option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; + + message StaticEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; + + // The endpoint config. + google.protobuf.Any endpoint_config = 1; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 2; + } + + message DynamicEndpointConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + string version_info = 1; + + // The endpoint config. + google.protobuf.Any endpoint_config = 2; + + // The timestamp when the Endpoint was last updated. + google.protobuf.Timestamp last_updated = 3; + } + + // The statically loaded endpoint configs. + repeated StaticEndpointConfig static_endpoint_configs = 2; + + // The dynamically loaded endpoint configs. + repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; +} From ed300943217723dbc449480afd3f967e08ac4c95 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 3 Jun 2020 21:01:18 -0400 Subject: [PATCH 286/909] fixing coverage issue (#11402) Fixing the coverage scripts to correctly output extensions failing coverage limits. Risk Level: n/a Testing: manual testing on first commit Signed-off-by: Alyssa Wilk --- test/run_envoy_bazel_coverage.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 29a251d3df9e..68d5ac1567c3 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -60,6 +60,8 @@ if [[ "$VALIDATE_COVERAGE" == "true" ]]; then fi fi +# We want to allow per_file_coverage to fail without exiting this script. +set +e if [[ "$VALIDATE_COVERAGE" == "true" ]]; then echo "Checking per-extension coverage" output=$(./test/per_file_coverage.sh) From 8cb2958f29af3e45e87653356b6099bd3a4cbfbf Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 3 Jun 2020 21:35:56 -0700 Subject: [PATCH 287/909] api: add bootstrap extensions (#11413) Commit Message: Adds a configurable server wide extension hook point. Allow extensions to instantiate singletons / context managers with configs in bootstrap. Additional Description: Risk Level: Low (not used API) Testing: unittest, mock Docs Changes: protodoc Release Notes: N/A as no real extension lives today. Fixes #11219 Signed-off-by: Lizan Zhou --- api/envoy/config/bootstrap/v3/bootstrap.proto | 7 ++- .../config/bootstrap/v4alpha/bootstrap.proto | 7 ++- .../envoy/config/bootstrap/v3/bootstrap.proto | 7 ++- .../config/bootstrap/v4alpha/bootstrap.proto | 7 ++- include/envoy/server/BUILD | 9 ++++ .../envoy/server/bootstrap_extension_config.h | 47 +++++++++++++++++ source/server/BUILD | 1 + source/server/server.cc | 11 ++++ source/server/server.h | 2 + test/mocks/server/BUILD | 1 + test/mocks/server/mocks.cc | 3 ++ test/mocks/server/mocks.h | 12 +++++ test/server/BUILD | 1 + test/server/server_test.cc | 50 +++++++++++++++++++ .../server/bootstrap_extensions.yaml | 5 ++ 15 files changed, 166 insertions(+), 4 deletions(-) create mode 100644 include/envoy/server/bootstrap_extension_config.h create mode 100644 test/server/test_data/server/bootstrap_extensions.yaml diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 8eba15a5ba72..22337ab514b5 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/event_service_config.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; @@ -35,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 22] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -181,6 +182,10 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; } // Administration interface :ref:`operations documentation diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index bd4169356a4e..328ccae67a5e 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/listener.proto"; import "envoy/config/metrics/v4alpha/stats.proto"; @@ -34,7 +35,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 22] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -173,6 +174,10 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; } // Administration interface :ref:`operations documentation diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index de0bc8ffa443..224328ef5bd0 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/event_service_config.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; @@ -35,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 22] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -180,6 +181,10 @@ message Bootstrap { // specified. bool use_tcp_for_dns_lookups = 20; + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; + Runtime hidden_envoy_deprecated_runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index ec0a4b3d6a89..86bbf02e32f6 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/listener.proto"; import "envoy/config/metrics/v4alpha/stats.proto"; @@ -35,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] +// [#next-free-field: 22] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -181,6 +182,10 @@ message Bootstrap { // :ref:`use_tcp_for_dns_lookups ` are // specified. bool use_tcp_for_dns_lookups = 20; + + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; } // Administration interface :ref:`operations documentation diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index 2388482f251c..8803acb34cc5 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -293,3 +293,12 @@ envoy_cc_library( "//include/envoy/network:connection_handler_interface", ], ) + +envoy_cc_library( + name = "bootstrap_extension_config_interface", + hdrs = ["bootstrap_extension_config.h"], + deps = [ + ":filter_config_interface", + "//include/envoy/config:typed_config_interface", + ], +) diff --git a/include/envoy/server/bootstrap_extension_config.h b/include/envoy/server/bootstrap_extension_config.h new file mode 100644 index 000000000000..9b0d6e043396 --- /dev/null +++ b/include/envoy/server/bootstrap_extension_config.h @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include "envoy/server/filter_config.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Server { + +/** + * Parent class for bootstrap extensions. + */ +class BootstrapExtension { +public: + virtual ~BootstrapExtension() = default; +}; + +using BootstrapExtensionPtr = std::unique_ptr; + +namespace Configuration { + +/** + * Implemented for each bootstrap extension and registered via Registry::registerFactory or the + * convenience class RegisterFactory. + */ +class BootstrapExtensionFactory : public Config::TypedFactory { +public: + ~BootstrapExtensionFactory() override = default; + + /** + * Create a particular bootstrap extension implementation from a config proto. If the + * implementation is unable to produce a factory with the provided parameters, it should throw an + * EnvoyException. The returned pointer should never be nullptr. + * @param config the custom configuration for this bootstrap extension type. + * @param context general filter context through which persistent resources can be accessed. + */ + virtual BootstrapExtensionPtr createBootstrapExtension(const Protobuf::Message& config, + ServerFactoryContext& context) PURE; + + std::string category() const override { return "envoy.bootstrap"; } +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index eefcabc7d458..e92701862188 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -397,6 +397,7 @@ envoy_cc_library( "//include/envoy/event:signal_interface", "//include/envoy/event:timer_interface", "//include/envoy/network:dns_interface", + "//include/envoy/server:bootstrap_extension_config_interface", "//include/envoy/server:drain_manager_interface", "//include/envoy/server:instance_interface", "//include/envoy/server:listener_manager_interface", diff --git a/source/server/server.cc b/source/server/server.cc index 58ef0505a227..b1e31d7ce0ed 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -18,6 +18,7 @@ #include "envoy/event/timer.h" #include "envoy/network/dns.h" #include "envoy/registry/registry.h" +#include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/options.h" #include "envoy/upstream/cluster_manager.h" @@ -481,6 +482,16 @@ void InstanceImpl::initialize(const Options& options, // GuardDog (deadlock detection) object and thread setup before workers are // started and before our own run() loop runs. guard_dog_ = std::make_unique(stats_store_, config_, *api_); + + for (const auto& bootstrap_extension : bootstrap_.bootstrap_extensions()) { + auto& factory = Config::Utility::getAndCheckFactory( + bootstrap_extension); + auto config = Config::Utility::translateAnyToFactoryConfig( + bootstrap_extension.typed_config(), messageValidationContext().staticValidationVisitor(), + factory); + bootstrap_extensions_.push_back( + factory.createBootstrapExtension(*config, serverFactoryContext())); + } } void InstanceImpl::onClusterManagerPrimaryInitializationComplete() { diff --git a/source/server/server.h b/source/server/server.h index 56e0eb188616..2ec00d266696 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -10,6 +10,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/timer.h" +#include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/drain_manager.h" #include "envoy/server/guarddog.h" #include "envoy/server/instance.h" @@ -346,6 +347,7 @@ class InstanceImpl final : Logger::Loggable, Upstream::ProdClusterInfoFactory info_factory_; Upstream::HdsDelegatePtr hds_delegate_; std::unique_ptr overload_manager_; + std::vector bootstrap_extensions_; Envoy::MutexTracer* mutex_tracer_; Grpc::ContextImpl grpc_context_; Http::ContextImpl http_context_; diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index 067c98d846f6..c6fcf62562ed 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -15,6 +15,7 @@ envoy_cc_mock( deps = [ "//include/envoy/secret:secret_manager_interface", "//include/envoy/server:admin_interface", + "//include/envoy/server:bootstrap_extension_config_interface", "//include/envoy/server:configuration_interface", "//include/envoy/server:drain_manager_interface", "//include/envoy/server:filter_config_interface", diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index b85e9924ecfa..0ea123678448 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -303,6 +303,9 @@ MockTracerFactoryContext::MockTracerFactoryContext() { } MockTracerFactoryContext::~MockTracerFactoryContext() = default; + +MockBootstrapExtensionFactory::MockBootstrapExtensionFactory() = default; +MockBootstrapExtensionFactory::~MockBootstrapExtensionFactory() = default; } // namespace Configuration } // namespace Server } // namespace Envoy diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 70cb86598c90..50c73fb1036b 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -13,6 +13,7 @@ #include "envoy/config/listener/v3/listener_components.pb.h" #include "envoy/protobuf/message_validator.h" #include "envoy/server/admin.h" +#include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/configuration.h" #include "envoy/server/drain_manager.h" #include "envoy/server/filter_config.h" @@ -679,6 +680,17 @@ class MockTracerFactoryContext : public TracerFactoryContext { testing::NiceMock server_factory_context_; }; +class MockBootstrapExtensionFactory : public BootstrapExtensionFactory { +public: + MockBootstrapExtensionFactory(); + ~MockBootstrapExtensionFactory() override; + + MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension, + (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override)); + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override)); + MOCK_METHOD(std::string, name, (), (const override)); +}; + } // namespace Configuration } // namespace Server } // namespace Envoy diff --git a/test/server/BUILD b/test/server/BUILD index d6e7b23cc8cb..7706a8a85baf 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -357,6 +357,7 @@ envoy_cc_test( "//source/extensions/tracers/zipkin:config", "//source/server:process_context_lib", "//source/server:server_lib", + "//test/common/config:dummy_config_proto_cc_proto", "//test/common/stats:stat_test_utility_lib", "//test/integration:integration_lib", "//test/mocks/server:server_mocks", diff --git a/test/server/server_test.cc b/test/server/server_test.cc index b897be2418a1..806ef87a5837 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -1,17 +1,20 @@ #include #include "envoy/config/core/v3/base.pb.h" +#include "envoy/server/bootstrap_extension_config.h" #include "common/common/assert.h" #include "common/common/version.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_impl.h" +#include "common/protobuf/protobuf.h" #include "common/thread_local/thread_local_impl.h" #include "server/process_context_impl.h" #include "server/server.h" +#include "test/common/config/dummy_config.pb.h" #include "test/common/stats/stat_test_utility.h" #include "test/integration/server.h" #include "test/mocks/server/mocks.h" @@ -1051,6 +1054,53 @@ TEST_P(ServerInstanceImplTest, WithProcessContext) { EXPECT_FALSE(object_from_context.boolean_flag_); } +class FooBootstrapExtension : public BootstrapExtension {}; + +TEST_P(ServerInstanceImplTest, WithBootstrapExtensions) { + NiceMock mock_factory; + EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique(); + })); + EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return("envoy_test.bootstrap.foo")); + EXPECT_CALL(mock_factory, createBootstrapExtension(_, _)) + .WillOnce(Invoke([](const Protobuf::Message& config, Configuration::ServerFactoryContext&) { + const auto* proto = dynamic_cast(&config); + EXPECT_NE(nullptr, proto); + EXPECT_EQ(proto->a(), "foo"); + return std::make_unique(); + })); + + Registry::InjectFactory registered_factory( + mock_factory); + + EXPECT_NO_THROW(initialize("test/server/test_data/server/bootstrap_extensions.yaml")); +} + +TEST_P(ServerInstanceImplTest, WithBootstrapExtensionsThrowingError) { + NiceMock mock_factory; + EXPECT_CALL(mock_factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique(); + })); + EXPECT_CALL(mock_factory, name()).WillRepeatedly(Return("envoy_test.bootstrap.foo")); + EXPECT_CALL(mock_factory, createBootstrapExtension(_, _)) + .WillOnce(Invoke([](const Protobuf::Message&, + Configuration::ServerFactoryContext&) -> BootstrapExtensionPtr { + throw EnvoyException("Unable to initiate mock_bootstrap_extension."); + })); + + Registry::InjectFactory registered_factory( + mock_factory); + + EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/bootstrap_extensions.yaml"), + EnvoyException, "Unable to initiate mock_bootstrap_extension."); +} + +TEST_P(ServerInstanceImplTest, WithUnknownBootstrapExtensions) { + EXPECT_THROW_WITH_REGEX( + initialize("test/server/test_data/server/bootstrap_extensions.yaml"), EnvoyException, + "Didn't find a registered implementation for name: 'envoy_test.bootstrap.foo'"); +} + // Static configuration validation. We test with both allow/reject settings various aspects of // configuration from YAML. class StaticValidationTest diff --git a/test/server/test_data/server/bootstrap_extensions.yaml b/test/server/test_data/server/bootstrap_extensions.yaml new file mode 100644 index 000000000000..8a85583403c9 --- /dev/null +++ b/test/server/test_data/server/bootstrap_extensions.yaml @@ -0,0 +1,5 @@ +bootstrap_extensions: + - name: envoy_test.bootstrap.foo + typed_config: + "@type": type.googleapis.com/test.common.config.DummyConfig + a: foo From 69f2dfcae15401f14d7cc3c829fd32ff2efc9f0c Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Thu, 4 Jun 2020 08:13:41 -0700 Subject: [PATCH 288/909] hot restart: provide a mechanism for obtaining a base-id dynamically (#11357) Provides a --use-dynamic-base-id flag to select an unused base-id. Primarily useful for testing, but generally available. Adds a --base-id-path flag where Envoy writes the base id to a file. Converts tests to use the dynamic base id selection rather than trying to keep all base ids unique. Signed-off-by: Stephan Zuercher --- api/envoy/admin/v3/server_info.proto | 8 ++- api/envoy/admin/v4alpha/server_info.proto | 8 ++- .../arch_overview/operations/hot_restart.rst | 6 ++ docs/root/operations/cli.rst | 30 ++++++--- docs/root/operations/hot_restarter.rst | 13 +++- docs/root/version_history/current.rst | 3 +- .../envoy/admin/v3/server_info.proto | 8 ++- .../envoy/admin/v4alpha/server_info.proto | 8 ++- include/envoy/server/hot_restart.h | 14 ++++ include/envoy/server/options.h | 11 ++++ source/exe/main_common.cc | 65 ++++++++++++++++--- source/exe/main_common.h | 1 + source/server/hot_restart_impl.cc | 26 +++++--- source/server/hot_restart_impl.h | 10 ++- source/server/hot_restart_nop_impl.h | 1 + source/server/hot_restarting_base.cc | 8 ++- source/server/options_impl.cc | 24 +++++-- source/server/options_impl.h | 6 ++ source/server/server.cc | 4 +- test/exe/BUILD | 1 + test/exe/main_common_test.cc | 63 +++++++++++++++--- test/integration/hotrestart_test.sh | 27 +++++--- test/integration/run_envoy_test.sh | 9 +-- test/mocks/server/mocks.h | 3 + test/server/hot_restart_impl_test.cc | 23 ++++++- test/server/options_impl_test.cc | 25 ++++++- test/test_common/environment.cc | 24 ------- test/test_common/environment.h | 41 ------------ 28 files changed, 330 insertions(+), 140 deletions(-) diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index b89e58749f7e..a89834bef5e6 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 31] +// [#next-free-field: 33] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -82,6 +82,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index b9e8c3043002..04f1f1ef36d7 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 31] +// [#next-free-field: 33] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -81,6 +81,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; diff --git a/docs/root/intro/arch_overview/operations/hot_restart.rst b/docs/root/intro/arch_overview/operations/hot_restart.rst index 0add1f3fb2f1..38a4dc35c0ae 100644 --- a/docs/root/intro/arch_overview/operations/hot_restart.rst +++ b/docs/root/intro/arch_overview/operations/hot_restart.rst @@ -26,3 +26,9 @@ hot restart functionality has the following general architecture: the processes takes place only using unix domain sockets. * An example restarter/parent process written in Python is included in the source distribution. This parent process is usable with standard process control utilities such as monit/runit/etc. + +Envoy's default command line options assume that only a single set of Envoy processes is running on +a given host: an active Envoy server process and, potentially, a draining Envoy server process that +will exit as described above. The :option:`--base-id` or :option:`--use-dynamic-base-id` options +may be used to allow multiple, distinctly configured Envoys to run on the same host and hot restart +independently. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index ea446c82c1d0..8a5d029672b1 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -66,10 +66,24 @@ following are the command line options that Envoy supports. set this option. However, if Envoy needs to be run multiple times on the same machine, each running Envoy will need a unique base ID so that the shared memory regions do not conflict. +.. option:: --use-dynamic-base-id + + *(optional)* Selects an unused base ID to use when allocating shared memory regions. Using + preselected values with :option:`--base-id` is preferred, however. If this option is enabled, + it supersedes the :option:`--base-id` value. This flag may not be used when the value of + :option:`--restart-epoch` is non-zero. Instead, for subsequent hot restarts, set + :option:`--base-id` option with the selected base ID. See :option:`--base-id-path`. + +.. option:: --base-id-path + + *(optional)* Writes the base ID to the given path. While this option is compatible with + :option:`--base-id`, its intended use is to provide access to the dynamic base ID selected by + :option:`--use-dynamic-base-id`. + .. option:: --concurrency *(optional)* The number of :ref:`worker threads ` to run. If not - specified defaults to the number of hardware threads on the machine. If set to zero, Envoy will + specified defaults to the number of hardware threads on the machine. If set to zero, Envoy will still run one worker thread. .. option:: -l , --log-level @@ -79,9 +93,9 @@ following are the command line options that Envoy supports. .. option:: --component-log-level - *(optional)* The comma separated list of logging level per component. Non developers should generally - never set this option. For example, if you want `upstream` component to run at `debug` level and - `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to + *(optional)* The comma separated list of logging level per component. Non developers should generally + never set this option. For example, if you want `upstream` component to run at `debug` level and + `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to this flag. See ``ALL_LOGGER_IDS`` in :repo:`/source/common/common/logger.h` for a list of components. .. option:: --cpuset-threads @@ -239,11 +253,11 @@ following are the command line options that Envoy supports. .. option:: --drain-time-s - *(optional)* The time in seconds that Envoy will drain connections during + *(optional)* The time in seconds that Envoy will drain connections during a :ref:`hot restart ` or when individual listeners are being - modified or removed via :ref:`LDS `. - Defaults to 600 seconds (10 minutes). Generally the drain time should be less than - the parent shutdown time set via the :option:`--parent-shutdown-time-s` option. How the two + modified or removed via :ref:`LDS `. + Defaults to 600 seconds (10 minutes). Generally the drain time should be less than + the parent shutdown time set via the :option:`--parent-shutdown-time-s` option. How the two settings are configured depends on the specific deployment. In edge scenarios, it might be desirable to have a very long drain time. In service to service scenarios, it might be possible to make the drain and shutdown time much shorter (e.g., 60s/90s). diff --git a/docs/root/operations/hot_restarter.rst b/docs/root/operations/hot_restarter.rst index 72e09b097686..3cb902dedca0 100644 --- a/docs/root/operations/hot_restarter.rst +++ b/docs/root/operations/hot_restarter.rst @@ -21,15 +21,22 @@ The restarter is invoked like so: ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }} sysctl fs.inotify.max_user_watches={{ pillar.get('envoy_max_inotify_watches', '524288') }} - + exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }} Note on `inotify.max_user_watches`: If Envoy is being configured to watch many files for configuration in a directory on a Linux machine, increase this value as Linux enforces limits on the maximum number of files that can be watched. - -The *RESTART_EPOCH* environment variable is set by the restarter on each restart and can be passed + +The *RESTART_EPOCH* environment variable is set by the restarter on each restart and must be passed to the :option:`--restart-epoch` option. +.. warning:: + + Special care must be taken if you wish to use the :option:`--use-dynamic-base-id` option. That + flag may only be set when the *RESTART_EPOCH* is 0 and your *start_envoy.sh* must obtain the + chosen base ID (via :option:`--base-id-path`), store it, and use it as the :option:`--base-id` + value on subsequent invocations (when *RESTART_EPOCH* is greater than 0). + The restarter handles the following signals: * **SIGTERM** or **SIGINT** (Ctrl-C): Will cleanly terminate all child processes and exit. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 286c9b8aa0c1..9fe4d345399e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -13,6 +13,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. +* hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. @@ -68,7 +69,7 @@ New Features * listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. -* logger: added :ref:`--log-format-prefix-with-location ` command line option to prefix '%v' with file path and line number. +* logger: added :option:`--log-format-prefix-with-location` command line option to prefix '%v' with file path and line number. * lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field in LRS response, which allows management servers to avoid explicitly listing all clusters it is interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index 4962a95d631b..a428e4b8ca4a 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 31] +// [#next-free-field: 33] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -80,6 +80,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index b9e8c3043002..04f1f1ef36d7 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 31] +// [#next-free-field: 33] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -81,6 +81,12 @@ message CommandLineOptions { // See :option:`--base-id` for details. uint64 base_id = 1; + // See :option:`--use-dynamic-base-id` for details. + bool use_dynamic_base_id = 31; + + // See :option:`--base-id-path` for details. + string base_id_path = 32; + // See :option:`--concurrency` for details. uint32 concurrency = 2; diff --git a/include/envoy/server/hot_restart.h b/include/envoy/server/hot_restart.h index 16c182c8da3c..7525fa6a0f4f 100644 --- a/include/envoy/server/hot_restart.h +++ b/include/envoy/server/hot_restart.h @@ -79,6 +79,11 @@ class HotRestart { */ virtual void shutdown() PURE; + /** + * Return the base id used to generate a domain socket name. + */ + virtual uint32_t baseId() PURE; + /** * Return the hot restart compatibility version so that operations code can decide whether to * perform a full or hot restart. @@ -96,5 +101,14 @@ class HotRestart { virtual Thread::BasicLockable& accessLogLock() PURE; }; +/** + * HotRestartDomainSocketInUseException is thrown during HotRestart construction only when the + * underlying domain socket is in use. + */ +class HotRestartDomainSocketInUseException : public EnvoyException { +public: + HotRestartDomainSocketInUseException(const std::string& what) : EnvoyException(what) {} +}; + } // namespace Server } // namespace Envoy diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index c83102e472f4..1bbffc116a91 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -59,6 +59,17 @@ class Options { */ virtual uint64_t baseId() const PURE; + /** + * @return bool choose an unused base ID dynamically. The chosen base id can be written to a + * a file using the baseIdPath option. + */ + virtual bool useDynamicBaseId() const PURE; + + /** + * @return const std::string& the dynamic base id output file. + */ + virtual const std::string& baseIdPath() const PURE; + /** * @return the number of worker threads to run in the server. */ diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index a46523033ab5..d0a1708bd2f4 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -1,5 +1,6 @@ #include "exe/main_common.h" +#include #include #include #include @@ -7,6 +8,7 @@ #include "envoy/config/listener/v3/listener.pb.h" #include "common/common/compiler_requirements.h" +#include "common/common/logger.h" #include "common/common/perf_annotation.h" #include "common/network/utility.h" #include "common/stats/symbol_table_creator.h" @@ -58,14 +60,7 @@ MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& ti switch (options_.mode()) { case Server::Mode::InitOnly: case Server::Mode::Serve: { -#ifdef ENVOY_HOT_RESTART - if (!options.hotRestartDisabled()) { - restarter_ = std::make_unique(options_); - } -#endif - if (restarter_ == nullptr) { - restarter_ = std::make_unique(); - } + configureHotRestarter(*random_generator); tls_ = std::make_unique(); Thread::BasicLockable& log_lock = restarter_->logLock(); @@ -106,6 +101,60 @@ void MainCommonBase::configureComponentLogLevels() { } } +void MainCommonBase::configureHotRestarter(Runtime::RandomGenerator& random_generator) { +#ifdef ENVOY_HOT_RESTART + if (!options_.hotRestartDisabled()) { + uint32_t base_id = options_.baseId(); + + if (options_.useDynamicBaseId()) { + ASSERT(options_.restartEpoch() == 0, "cannot use dynamic base id during hot restart"); + + std::unique_ptr restarter; + + // Try 100 times to get an unused base ID and then give up under the assumption + // that some other problem has occurred to prevent binding the domain socket. + for (int i = 0; i < 100 && restarter == nullptr; i++) { + // HotRestartImpl is going to multiply this value by 10, so leave head room. + base_id = static_cast(random_generator.random()) & 0x0FFFFFFF; + + try { + restarter = std::make_unique(base_id, 0); + } catch (Server::HotRestartDomainSocketInUseException& ex) { + // No luck, try again. + ENVOY_LOG_MISC(debug, "dynamic base id: {}", ex.what()); + } + } + + if (restarter == nullptr) { + throw EnvoyException("unable to select a dynamic base id"); + } + + restarter_.swap(restarter); + } else { + restarter_ = std::make_unique(base_id, options_.restartEpoch()); + } + + // Write the base-id to the requested path whether we selected it + // dynamically or not. + if (!options_.baseIdPath().empty()) { + std::ofstream base_id_out_file(options_.baseIdPath()); + if (!base_id_out_file) { + ENVOY_LOG_MISC(critical, "cannot open base id output file {} for writing.", + options_.baseIdPath()); + } else { + base_id_out_file << base_id; + } + } + } +#else + UNREFERENCED_PARAMETER(random_generator); +#endif + + if (restarter_ == nullptr) { + restarter_ = std::make_unique(); + } +} + bool MainCommonBase::run() { switch (options_.mode()) { case Server::Mode::Serve: diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 1bd2ca6c2a87..fc5194903449 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -87,6 +87,7 @@ class MainCommonBase { private: void configureComponentLogLevels(); + void configureHotRestarter(Runtime::RandomGenerator& random_generator); }; // TODO(jmarantz): consider removing this class; I think it'd be more useful to diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 2a39c9c425d2..015c4f1009c4 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -12,7 +12,6 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/file_event.h" #include "envoy/server/instance.h" -#include "envoy/server/options.h" #include "common/api/os_sys_calls_impl.h" #include "common/api/os_sys_calls_impl_hot_restart.h" @@ -24,13 +23,13 @@ namespace Envoy { namespace Server { -SharedMemory* attachSharedMemory(const Options& options) { +SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); Api::HotRestartOsSysCalls& hot_restart_os_sys_calls = Api::HotRestartOsSysCallsSingleton::get(); int flags = O_RDWR; - const std::string shmem_name = fmt::format("/envoy_shared_memory_{}", options.baseId()); - if (options.restartEpoch() == 0) { + const std::string shmem_name = fmt::format("/envoy_shared_memory_{}", base_id); + if (restart_epoch == 0) { flags |= O_CREAT | O_EXCL; // If we are meant to be first, attempt to unlink a previous shared memory instance. If this @@ -45,7 +44,7 @@ SharedMemory* attachSharedMemory(const Options& options) { shmem_name, strerror(result.errno_))); } - if (options.restartEpoch() == 0) { + if (restart_epoch == 0) { const Api::SysCallIntResult truncateRes = os_sys_calls.ftruncate(result.rc_, sizeof(SharedMemory)); RELEASE_ASSERT(truncateRes.rc_ != -1, ""); @@ -57,7 +56,7 @@ SharedMemory* attachSharedMemory(const Options& options) { RELEASE_ASSERT(shmem != MAP_FAILED, ""); RELEASE_ASSERT((reinterpret_cast(shmem) % alignof(decltype(shmem))) == 0, ""); - if (options.restartEpoch() == 0) { + if (restart_epoch == 0) { shmem->size_ = sizeof(SharedMemory); shmem->version_ = HOT_RESTART_VERSION; initializeMutex(shmem->log_lock_); @@ -91,10 +90,16 @@ void initializeMutex(pthread_mutex_t& mutex) { pthread_mutex_init(&mutex, &attribute); } -HotRestartImpl::HotRestartImpl(const Options& options) - : as_child_(HotRestartingChild(options.baseId(), options.restartEpoch())), - as_parent_(HotRestartingParent(options.baseId(), options.restartEpoch())), - shmem_(attachSharedMemory(options)), log_lock_(shmem_->log_lock_), +// The base id is automatically scaled by 10 to prevent overlap of domain socket names when +// multiple Envoys with different base-ids run on a single host. Note that older versions of Envoy +// performed the multiplication in OptionsImpl which produced incorrect server info output. +// TODO(zuercher): ideally, the base_id would be separated from the restart_epoch in +// the socket names to entirely prevent collisions between consecutive base ids. +HotRestartImpl::HotRestartImpl(uint32_t base_id, uint32_t restart_epoch) + : base_id_(base_id), scaled_base_id_(base_id * 10), + as_child_(HotRestartingChild(scaled_base_id_, restart_epoch)), + as_parent_(HotRestartingParent(scaled_base_id_, restart_epoch)), + shmem_(attachSharedMemory(scaled_base_id_, restart_epoch)), log_lock_(shmem_->log_lock_), access_log_lock_(shmem_->access_log_lock_) { // If our parent ever goes away just terminate us so that we don't have to rely on ops/launching // logic killing the entire process tree. We should never exist without our parent. @@ -137,6 +142,7 @@ HotRestartImpl::mergeParentStatsIfAny(Stats::StoreRoot& stats_store) { void HotRestartImpl::shutdown() { as_parent_.shutdown(); } +uint32_t HotRestartImpl::baseId() { return base_id_; } std::string HotRestartImpl::version() { return hotRestartVersion(); } std::string HotRestartImpl::hotRestartVersion() { diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index b8cb4c636e22..bec2159fc987 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -40,8 +40,11 @@ static const uint64_t SHMEM_FLAGS_INITIALIZING = 0x1; /** * Initialize the shared memory segment, depending on whether we are the first running * envoy, or a host restarted envoy process. + * + * @param base_id uint32_t that is the base id flag used to start this Envoy. + * @param restart_epoch uint32_t the restart epoch flag used to start this Envoy. */ -SharedMemory* attachSharedMemory(const Options& options); +SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch); /** * Initialize a pthread mutex for process shared locking. @@ -95,7 +98,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { */ class HotRestartImpl : public HotRestart { public: - HotRestartImpl(const Options& options); + HotRestartImpl(uint32_t base_id, uint32_t restart_epoch); // Server::HotRestart void drainParentListeners() override; @@ -105,6 +108,7 @@ class HotRestartImpl : public HotRestart { void sendParentTerminateRequest() override; ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) override; void shutdown() override; + uint32_t baseId() override; std::string version() override; Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } @@ -116,6 +120,8 @@ class HotRestartImpl : public HotRestart { static std::string hotRestartVersion(); private: + uint32_t base_id_; + uint32_t scaled_base_id_; HotRestartingChild as_child_; HotRestartingParent as_parent_; // This pointer is shared memory, and is expected to exist until process end. diff --git a/source/server/hot_restart_nop_impl.h b/source/server/hot_restart_nop_impl.h index 205097649b81..5e5250185582 100644 --- a/source/server/hot_restart_nop_impl.h +++ b/source/server/hot_restart_nop_impl.h @@ -23,6 +23,7 @@ class HotRestartNopImpl : public Server::HotRestart { void sendParentTerminateRequest() override {} ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot&) override { return {}; } void shutdown() override {} + uint32_t baseId() override { return 0; } std::string version() override { return "disabled"; } Thread::BasicLockable& logLock() override { return log_lock_; } Thread::BasicLockable& accessLogLock() override { return access_log_lock_; } diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 2d96db059b74..5f3a33068df7 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -48,9 +48,13 @@ void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { Api::SysCallIntResult result = os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); if (result.rc_ != 0) { - throw EnvoyException(fmt::format( + const auto msg = fmt::format( "unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)", - base_id_, id, result.errno_)); + base_id_, id, result.errno_); + if (result.errno_ == EADDRINUSE) { + throw HotRestartDomainSocketInUseException(msg); + } + throw EnvoyException(msg); } } diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index e8e7cc9fa04d..e7bb267ecd1e 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -59,6 +59,13 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg base_id( "", "base-id", "base ID so that multiple envoys can run on the same host if needed", false, 0, "uint32_t", cmd); + TCLAP::SwitchArg use_dynamic_base_id( + "", "use-dynamic-base-id", + "the server chooses a base ID dynamically. Supersedes a static base ID. May not be used " + "when the restart epoch is non-zero.", + cmd, false); + TCLAP::ValueArg base_id_path( + "", "base-id-path", "path to which the base ID is written", false, "", "string", cmd); TCLAP::ValueArg concurrency("", "concurrency", "# of worker threads to run", false, std::thread::hardware_concurrency(), "uint32_t", cmd); TCLAP::ValueArg config_path("c", "config-path", "Path to configuration file", false, @@ -209,9 +216,16 @@ OptionsImpl::OptionsImpl(std::vector args, fmt::format("error: unknown IP address version '{}'", local_address_ip_version.getValue()); throw MalformedArgvException(message); } + base_id_ = base_id.getValue(); + use_dynamic_base_id_ = use_dynamic_base_id.getValue(); + base_id_path_ = base_id_path.getValue(); + restart_epoch_ = restart_epoch.getValue(); - // For base ID, scale what the user inputs by 10 so that we have spread for domain sockets. - base_id_ = base_id.getValue() * 10; + if (use_dynamic_base_id_ && restart_epoch_ > 0) { + const std::string message = fmt::format( + "error: cannot use --restart-epoch={} with --use-dynamic-base-id", restart_epoch_); + throw MalformedArgvException(message); + } if (!concurrency.isSet() && cpuset_threads_) { // The 'concurrency' command line option wasn't set but the 'cpuset-threads' @@ -241,7 +255,6 @@ OptionsImpl::OptionsImpl(std::vector args, ignore_unknown_dynamic_fields_ = ignore_unknown_dynamic_fields.getValue(); admin_address_path_ = admin_address_path.getValue(); log_path_ = log_path.getValue(); - restart_epoch_ = restart_epoch.getValue(); service_cluster_ = service_cluster.getValue(); service_node_ = service_node.getValue(); service_zone_ = service_zone.getValue(); @@ -320,6 +333,8 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { Server::CommandLineOptionsPtr command_line_options = std::make_unique(); command_line_options->set_base_id(baseId()); + command_line_options->set_use_dynamic_base_id(useDynamicBaseId()); + command_line_options->set_base_id_path(baseIdPath()); command_line_options->set_concurrency(concurrency()); command_line_options->set_config_path(configPath()); command_line_options->set_config_yaml(configYaml()); @@ -366,7 +381,8 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_node, const std::string& service_zone, spdlog::level::level_enum log_level) - : base_id_(0u), concurrency_(1u), config_path_(""), config_yaml_(""), + : base_id_(0u), use_dynamic_base_id_(false), base_id_path_(""), concurrency_(1u), + config_path_(""), config_yaml_(""), local_address_ip_version_(Network::Address::IpVersion::v4), log_level_(log_level), log_format_(Logger::Logger::DEFAULT_LOG_FORMAT), log_format_escaped_(false), restart_epoch_(0u), service_cluster_(service_cluster), service_node_(service_node), diff --git a/source/server/options_impl.h b/source/server/options_impl.h index d82b28ca9fb1..17217cbe60c4 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -50,6 +50,8 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable argv_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonTest, @@ -87,6 +87,54 @@ TEST_P(MainCommonTest, ConstructDestructHotRestartDisabledNoInit) { EXPECT_TRUE(main_common.run()); } +// Exercise base-id-path option. +TEST_P(MainCommonTest, ConstructWritesBasePathId) { +#ifdef ENVOY_HOT_RESTART + const std::string base_id_path = TestEnvironment::temporaryPath("base-id-file"); + addArg("--base-id-path"); + addArg(base_id_path.c_str()); + VERBOSE_EXPECT_NO_THROW(MainCommon main_common(argc(), argv())); + + EXPECT_NE("", TestEnvironment::readFileToStringForTest(base_id_path)); +#endif +} + +// Test that an in-use base id triggers a retry and that we eventually give up. +TEST_P(MainCommonTest, RetryDynamicBaseIdFails) { +#ifdef ENVOY_HOT_RESTART + PlatformImpl platform; + Event::TestRealTimeSystem real_time_system; + DefaultListenerHooks default_listener_hooks; + ProdComponentFactory prod_component_factory; + + const std::string base_id_path = TestEnvironment::temporaryPath("base-id-file"); + + const auto first_args = std::vector({"envoy-static", "--use-dynamic-base-id", "-c", + config_file_, "--base-id-path", base_id_path}); + OptionsImpl first_options(first_args, &MainCommon::hotRestartVersion, spdlog::level::info); + MainCommonBase first(first_options, real_time_system, default_listener_hooks, + prod_component_factory, std::make_unique(), + platform.threadFactory(), platform.fileSystem(), nullptr); + + const std::string base_id_str = TestEnvironment::readFileToStringForTest(base_id_path); + uint32_t base_id; + ASSERT_TRUE(absl::SimpleAtoi(base_id_str, &base_id)); + + auto* mock_rng = new NiceMock(); + EXPECT_CALL(*mock_rng, random()).WillRepeatedly(Return(base_id)); + + const auto second_args = + std::vector({"envoy-static", "--use-dynamic-base-id", "-c", config_file_}); + OptionsImpl second_options(second_args, &MainCommon::hotRestartVersion, spdlog::level::info); + + EXPECT_THROW_WITH_MESSAGE( + MainCommonBase(second_options, real_time_system, default_listener_hooks, + prod_component_factory, std::unique_ptr{mock_rng}, + platform.threadFactory(), platform.fileSystem(), nullptr), + EnvoyException, "unable to select a dynamic base id"); +#endif +} + // Test that std::set_new_handler() was called and the callback functions as expected. // This test fails under TSAN and ASAN, so don't run it in that build: // [ DEATH ] ==845==ERROR: ThreadSanitizer: requested allocation size 0x3e800000000 @@ -106,9 +154,6 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { ENVOY_LOG_MISC(critical, "MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration"); #else - // Death test forks and restarts the test with special arguments. Since we're meant to choose - // the same base-id on the second attempt we can't succeed with hot restart enabled. - addArg("--disable-hot-restart"); MainCommon main_common(argc(), argv()); #if !defined(WIN32) // Resolving symbols for a backtrace takes longer than the timeout in coverage builds, diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 1efca96ccf9e..7048f4fb351d 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -68,30 +68,37 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | cat > "${HOT_RESTART_JSON_REUSE_PORT}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}") -# Use TEST_RANDOM_SEED or TEST_SHARD_INDEX to choose a base id. This -# replicates the logic of TestEnvironment::chooseBaseId(1). See that method -# for details. -let BASE_ID=1000000+${TEST_RANDOM_SEED:-${TEST_SHARD_INDEX:-0}} - -echo "Hot restart test using --base-id ${BASE_ID}" +echo "Hot restart test using dynamic base id" TEST_INDEX=0 function run_testsuite() { local HOT_RESTART_JSON="$1" local FAKE_SYMBOL_TABLE="$2" - # TODO(jun03): instead of setting the base-id, the validate server should use the nop hot restart start_test validation check "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" --mode validate --service-cluster cluster \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --base-id "${BASE_ID}" + --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --disable-hot-restart + + local BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') + echo "Selected dynamic base id path ${BASE_ID_PATH}" # Now start the real server, hot restart it twice, and shut it all down as a basic hot restart # sanity test. start_test Starting epoch 0 ADMIN_ADDRESS_PATH_0="${TEST_TMPDIR}"/admin.0."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ - --restart-epoch 0 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + --restart-epoch 0 --use-dynamic-base-id --base-id-path "${BASE_ID_PATH}" \ + --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ + --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + + local BASE_ID=$(cat "${BASE_ID_PATH}") + while [ -z "${BASE_ID}" ]; do + echo "Waiting for base id" + sleep 0.5 + BASE_ID=$(cat "${BASE_ID_PATH}") + done + + echo "Selected dynamic base id ${BASE_ID}" FIRST_SERVER_PID=$BACKGROUND_PID diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index 2dda6f3494f9..72003ddf7e84 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -2,18 +2,13 @@ source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" -# Use TEST_RANDOM_SEED or TEST_SHARD_INDEX to choose a base id. This -# replicates the logic of TestEnvironment::chooseBaseId(2). See that method -# for details. -let BASE_ID=2000000+${TEST_RANDOM_SEED:-${TEST_SHARD_INDEX:-0}} - function expect_fail_with_error() { log="${TEST_TMPDIR}/envoy.log" rm -f "$log" expected_error="$1" shift - echo ${ENVOY_BIN} --base-id "${BASE_ID}" "$@" ">&" "$log" - ${ENVOY_BIN} --base-id "${BASE_ID}" "$@" >& "$log" + echo ${ENVOY_BIN} --use-dynamic-base-id "$@" ">&" "$log" + ${ENVOY_BIN} --use-dynamic-base-id "$@" >& "$log" EXIT_CODE=$? cat "$log" check [ $EXIT_CODE -eq 1 ] diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 50c73fb1036b..38d22ea9d798 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -71,6 +71,8 @@ class MockOptions : public Options { ~MockOptions() override; MOCK_METHOD(uint64_t, baseId, (), (const)); + MOCK_METHOD(bool, useDynamicBaseId, (), (const)); + MOCK_METHOD(const std::string&, baseIdPath, (), (const)); MOCK_METHOD(uint32_t, concurrency, (), (const)); MOCK_METHOD(const std::string&, configPath, (), (const)); MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); @@ -234,6 +236,7 @@ class MockHotRestart : public HotRestart { MOCK_METHOD(void, sendParentTerminateRequest, ()); MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(uint32_t, baseId, ()); MOCK_METHOD(std::string, version, ()); MOCK_METHOD(Thread::BasicLockable&, logLock, ()); MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ()); diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index d9eb46b24c51..a3d431e6c6e2 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -20,6 +20,7 @@ using testing::_; using testing::AnyNumber; using testing::Invoke; using testing::InvokeWithoutArgs; +using testing::Return; using testing::WithArg; namespace Envoy { @@ -42,7 +43,7 @@ class HotRestartImplTest : public testing::Test { EXPECT_CALL(os_sys_calls_, bind(_, _, _)).Times(2); // Test we match the correct stat with empty-slots before, after, or both. - hot_restart_ = std::make_unique(options_); + hot_restart_ = std::make_unique(0, 0); hot_restart_->drainParentListeners(); // We close both sockets. @@ -54,7 +55,6 @@ class HotRestartImplTest : public testing::Test { Api::MockHotRestartOsSysCalls hot_restart_os_sys_calls_; TestThreadsafeSingletonInjector hot_restart_os_calls{ &hot_restart_os_sys_calls_}; - NiceMock options_; std::vector buffer_; std::unique_ptr hot_restart_; }; @@ -80,6 +80,25 @@ TEST_F(HotRestartImplTest, VersionString) { } } +// Test that HotRestartDomainSocketInUseException is thrown when the domain socket is already +// in use, +TEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) { + EXPECT_CALL(os_sys_calls_, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{-1, EADDRINUSE})); + EXPECT_CALL(os_sys_calls_, close(_)).Times(1); + + EXPECT_THROW(std::make_unique(0, 0), + Server::HotRestartDomainSocketInUseException); +} + +// Test that EnvoyException is thrown when the domain socket bind fails for reasons other than +// being in use. +TEST_F(HotRestartImplTest, DomainSocketError) { + EXPECT_CALL(os_sys_calls_, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{-1, EACCES})); + EXPECT_CALL(os_sys_calls_, close(_)).Times(1); + + EXPECT_THROW(std::make_unique(0, 0), EnvoyException); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index efb4b2565f2d..12c590a89444 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -80,19 +80,20 @@ TEST_F(OptionsImplTest, V1Disallowed) { TEST_F(OptionsImplTest, All) { std::unique_ptr options = createOptionsImpl( - "envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 1 " + "envoy --mode validate --concurrency 2 -c hello --admin-address-path path --restart-epoch 0 " "--local-address-ip-version v6 -l info --component-log-level upstream:debug,connection:trace " "--service-cluster cluster --service-node node --service-zone zone " "--file-flush-interval-msec 9000 " "--drain-time-s 60 --log-format [%v] --parent-shutdown-time-s 90 --log-path /foo/bar " "--disable-hot-restart --cpuset-threads --allow-unknown-static-fields " - "--reject-unknown-dynamic-fields --use-fake-symbol-table 0"); + "--reject-unknown-dynamic-fields --use-fake-symbol-table 0 --base-id 5 " + "--use-dynamic-base-id --base-id-path /foo/baz"); EXPECT_EQ(Server::Mode::Validate, options->mode()); EXPECT_EQ(2U, options->concurrency()); EXPECT_EQ("hello", options->configPath()); EXPECT_EQ("path", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion()); - EXPECT_EQ(1U, options->restartEpoch()); + EXPECT_EQ(0U, options->restartEpoch()); EXPECT_EQ(spdlog::level::info, options->logLevel()); EXPECT_EQ(2, options->componentLogLevels().size()); EXPECT_EQ("[[%g:%#] %v]", options->logFormat()); @@ -108,6 +109,9 @@ TEST_F(OptionsImplTest, All) { EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); EXPECT_FALSE(options->fakeSymbolTableEnabled()); + EXPECT_EQ(5U, options->baseId()); + EXPECT_TRUE(options->useDynamicBaseId()); + EXPECT_EQ("/foo/baz", options->baseIdPath()); options = createOptionsImpl("envoy --mode init_only"); EXPECT_EQ(Server::Mode::InitOnly, options->mode()); @@ -431,6 +435,21 @@ TEST_F(OptionsImplTest, LogFormatOverrideNoPrefix) { EXPECT_EQ(options->logFormat(), "%%v %v %t %v"); } +// Test that --base-id and --restart-epoch with non-default values are accepted. +TEST_F(OptionsImplTest, SetBaseIdAndRestartEpoch) { + std::unique_ptr options = + createOptionsImpl({"envoy", "-c", "hello", "--base-id", "99", "--restart-epoch", "999"}); + EXPECT_EQ(99U, options->baseId()); + EXPECT_EQ(999U, options->restartEpoch()); +} + +// Test that --use-dynamic-base-id and --restart-epoch with a non-default value is not accepted. +TEST_F(OptionsImplTest, SetUseDynamicBaseIdAndRestartEpoch) { + EXPECT_THROW_WITH_REGEX( + createOptionsImpl({"envoy", "-c", "hello", "--use-dynamic-base-id", "--restart-epoch", "1"}), + MalformedArgvException, "error: cannot use --restart-epoch=1 with --use-dynamic-base-id"); +} + #if defined(__linux__) using testing::DoAll; diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 7aa41d13d55c..45a6e9b883ba 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -194,30 +194,6 @@ std::string TestEnvironment::getCheckedEnvVar(const std::string& var) { return optional.value(); } -std::string TestEnvironment::chooseBaseId(uint64_t test_base_id) { - ASSERT(test_base_id >= 1); - ASSERT(test_base_id <= 1L << 44); // Leave room to multiple by 1000000. - - test_base_id *= 1000000; - - auto test_random_seed = TestEnvironment::getOptionalEnvVar("TEST_RANDOM_SEED"); - auto test_shard_index = TestEnvironment::getOptionalEnvVar("TEST_SHARD_INDEX"); - - if (test_random_seed) { - int mutator = 0; - if (absl::SimpleAtoi(test_random_seed.value(), &mutator)) { - test_base_id += mutator; - } - } else if (test_shard_index) { - int mutator = 0; - if (absl::SimpleAtoi(test_shard_index.value(), &mutator)) { - test_base_id += mutator; - } - } - - return absl::StrFormat("%d", test_base_id); -} - void TestEnvironment::initializeTestMain(char* program_name) { #ifdef WIN32 _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); diff --git a/test/test_common/environment.h b/test/test_common/environment.h index e0e73efe002a..02b324ef5191 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -72,47 +72,6 @@ class TestEnvironment { */ static std::string getCheckedEnvVar(const std::string& var); - /** - * Generates an appropriate base-id for use as the base id option to an Envoy - * server. Each test that requires a unique test base id should invoke this - * method with a unique value to get a string value appropriate for use the - * value of the --base-id command line argument. In general, tests that - * create an Envoy::Server::HotRestartImpl without mocks should use this - * function. If all test cases within a grouping, say a single test source - * file, are executed consecutively, they may share a test base id. Tests in - * separate groupings cannot share a test base id for reasons described - * below. Special care must be taken with death tests -- the gtest framework - * will fork and invoke a new copy of the same test binary with additional - * command line flags. This can result in the re-use of a given base-id from - * another process (which causing a failure). - * - * We require a unique test base id because random seeds are reused across - * multiple test targets. For example, running: - * bazel --runs_per_test=3 //test:foo_test //test:bar_test - * results in 3 unique seeds. Each test target is run once with each seed. - * Similarly, in coverage tests, the same test may be run concurrently in - * multiple coverage shards. - * - * This method uses the given test base id and one of the following - * environment variables: - * - TEST_RANDOM_SEED is used to handle concurrent runs via the Bazel - * --runs_per_test flag - * - TEST_SHARD_INDEX is used to handle concurrent runs when tests are run in - * shards, such as in coverage testing - * - * This algorithm is re-implemented in the following test scripts: - * - test/integration/hot_restart_test.sh - * - test/integration/run_envoy_test.sh - * - * Currently the in-use test base ids are: - * 1: test/integration/hot_restart_test.sh - * 2: test/integration/run_envoy_test.sh - * 3: test/exe/main_common_test.cc - * - * @param test_base_id a uint64_t used to unique identify a group of tests - */ - static std::string chooseBaseId(uint64_t test_base_id); - /** * Obtain a private writable temporary directory. * @return const std::string& with the path to the temporary directory. From 7fa417cde342d36cad39cf0f739d56f291ee7e23 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 4 Jun 2020 09:35:14 -0700 Subject: [PATCH 289/909] network: add socket interface virtual class (#11380) - convert SocketInterface namespace to virtual class and add a default implementation - add SocketInterfaceSingleton, a custom InjectableSingleton that defaults to SocketInterfaceImpl, i.e., existing SocketInterface implementation. A different SocketInterface implementation could be injected at api init time. Signed-off-by: Florin Coras --- include/envoy/network/socket.h | 47 ++++++ source/common/api/BUILD | 1 + source/common/api/api_impl.h | 1 + source/common/network/BUILD | 12 +- .../addr_family_aware_socket_option_impl.cc | 5 +- source/common/network/base_listener_impl.cc | 2 +- source/common/network/connection_impl.cc | 2 +- source/common/network/listen_socket_impl.cc | 5 +- source/common/network/listen_socket_impl.h | 10 +- source/common/network/listener_impl.cc | 2 +- source/common/network/socket_impl.cc | 144 +----------------- source/common/network/socket_impl.h | 43 ------ .../common/network/socket_interface_impl.cc | 142 +++++++++++++++++ source/common/network/socket_interface_impl.h | 26 ++++ source/common/network/utility.h | 2 - .../filters/udp/udp_proxy/udp_proxy_filter.h | 6 +- .../quic_listeners/quiche/envoy_quic_utils.cc | 3 +- .../stat_sinks/common/statsd/statsd.cc | 3 +- .../extensions/tracers/xray/daemon_broker.cc | 5 +- source/server/filter_chain_manager_impl.cc | 5 +- ...dr_family_aware_socket_option_impl_test.cc | 19 +-- test/common/network/listener_impl_test.cc | 4 +- .../quiche/platform/quic_platform_test.cc | 1 + .../common/statsd/udp_statsd_test.cc | 1 + test/test_common/BUILD | 1 + test/test_common/network_utility.cc | 5 +- tools/spelling/spelling_dictionary.txt | 1 + 27 files changed, 280 insertions(+), 218 deletions(-) create mode 100644 source/common/network/socket_interface_impl.cc create mode 100644 source/common/network/socket_interface_impl.h diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index ac8152ecc86a..d2d87928e98e 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -222,5 +222,52 @@ using SocketPtr = std::unique_ptr; using SocketSharedPtr = std::shared_ptr; using SocketOptRef = absl::optional>; +class SocketInterface { +public: + virtual ~SocketInterface() = default; + + /** + * Low level api to create a socket in the underlying host stack. Does not create a + * @ref Network::SocketImpl + * @param type type of socket requested + * @param addr_type type of address used with the socket + * @param version IP version if address type is IP + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ + virtual IoHandlePtr socket(Address::SocketType type, Address::Type addr_type, + Address::IpVersion version) PURE; + + /** + * Low level api to create a socket in the underlying host stack. Does not create an + * @ref Network::SocketImpl + * @param socket_type type of socket requested + * @param addr address that is gleaned for address type and version if needed + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ + virtual IoHandlePtr socket(Address::SocketType socket_type, + const Address::InstanceConstSharedPtr addr) PURE; + + /** + * Returns true if the given family is supported on this machine. + * @param domain the IP family. + */ + virtual bool ipFamilySupported(int domain) PURE; + + /** + * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. + * @param fd socket file descriptor + * @return InstanceConstSharedPtr for bound address. + */ + virtual Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd) PURE; + + /** + * Obtain the address of the peer of the socket with the specified file descriptor. + * Raises an EnvoyException on failure. + * @param fd socket file descriptor + * @return InstanceConstSharedPtr for peer address. + */ + virtual Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) PURE; +}; + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 1fd681a0cc7d..7e7264f8f0ff 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( "//include/envoy/api:api_interface", "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", + "//source/common/network:socket_lib", ], ) diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index b3318f468637..0096da46ec4b 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -6,6 +6,7 @@ #include "envoy/api/api.h" #include "envoy/event/timer.h" #include "envoy/filesystem/filesystem.h" +#include "envoy/network/socket.h" #include "envoy/thread/thread.h" namespace Envoy { diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 54f82d48ef1a..043e63783d70 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -171,12 +171,19 @@ envoy_cc_library( envoy_cc_library( name = "socket_lib", - srcs = ["socket_impl.cc"], - hdrs = ["socket_impl.h"], + srcs = [ + "socket_impl.cc", + "socket_interface_impl.cc", + ], + hdrs = [ + "socket_impl.h", + "socket_interface_impl.h", + ], deps = [ ":address_lib", "//include/envoy/network:socket_interface", "//source/common/common:assert_lib", + "//source/common/singleton:threadsafe_singleton", ], ) @@ -277,6 +284,7 @@ envoy_cc_library( external_deps = ["abseil_optional"], deps = [ ":address_lib", + ":socket_lib", ":socket_option_lib", "//include/envoy/network:listen_socket_interface", "//source/common/api:os_sys_calls_lib", diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 700a20733556..35d870b89536 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -7,7 +7,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/network/address_impl.h" -#include "common/network/socket_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/socket_option_impl.h" namespace Envoy { @@ -30,7 +30,8 @@ absl::optional getVersionFromSocket(const Socket& socket) { if (socket.localAddress()) { return {getVersionFromAddress(socket.localAddress())}; } else { - return {getVersionFromAddress(SocketInterface::addressFromFd(socket.ioHandle().fd()))}; + return {getVersionFromAddress( + SocketInterfaceSingleton::get().addressFromFd(socket.ioHandle().fd()))}; } } catch (const EnvoyException&) { // Ignore, we get here because we failed in getsockname(). diff --git a/source/common/network/base_listener_impl.cc b/source/common/network/base_listener_impl.cc index 16e1f7ef8bd7..ffad88865fde 100644 --- a/source/common/network/base_listener_impl.cc +++ b/source/common/network/base_listener_impl.cc @@ -16,7 +16,7 @@ namespace Envoy { namespace Network { Address::InstanceConstSharedPtr BaseListenerImpl::getLocalAddress(os_fd_t fd) { - return SocketInterface::addressFromFd(fd); + return SocketInterfaceSingleton::get().addressFromFd(fd); } BaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 6d2c75662a32..57b00be35548 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -774,7 +774,7 @@ void ClientConnectionImpl::connect() { // types, such as UDS, don't have a notion of a local address. // TODO(fcoras) move to SocketImpl? if (socket_->remoteAddress()->type() == Address::Type::Ip) { - socket_->setLocalAddress(SocketInterface::addressFromFd(ioHandle().fd())); + socket_->setLocalAddress(SocketInterfaceSingleton::get().addressFromFd(ioHandle().fd())); } } } // namespace Network diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 7d7a61913d0f..20725bceb6ba 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -31,7 +31,7 @@ Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstShar if (local_address_->type() == Address::Type::Ip && local_address_->ip()->port() == 0) { // If the port we bind is zero, then the OS will pick a free port for us (assuming there are // any), and we need to find out the port number that the OS picked. - local_address_ = SocketInterface::addressFromFd(io_handle_->fd()); + local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); } return {0, 0}; @@ -69,7 +69,8 @@ void NetworkListenSocket< NetworkSocketTrait>::setPrebindSocketOptions() {} UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) - : ListenSocketImpl(SocketInterface::socket(Address::SocketType::Stream, address), address) { + : ListenSocketImpl(SocketInterfaceSingleton::get().socket(Address::SocketType::Stream, address), + address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); bind(local_address_); } diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 7b5f5af7ad7c..99b6a6e499e3 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -11,6 +11,7 @@ #include "common/common/assert.h" #include "common/network/socket_impl.h" +#include "common/network/socket_interface_impl.h" namespace Envoy { namespace Network { @@ -42,7 +43,8 @@ template class NetworkListenSocket : public ListenSocketImpl { public: NetworkListenSocket(const Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port) - : ListenSocketImpl(Network::SocketInterface::socket(T::type, address), address) { + : ListenSocketImpl(Network::SocketInterfaceSingleton::get().socket(T::type, address), + address) { RELEASE_ASSERT(SOCKET_VALID(io_handle_->fd()), ""); setPrebindSocketOptions(); @@ -150,9 +152,9 @@ class ClientSocketImpl : public ConnectionSocketImpl { public: ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address, const OptionsSharedPtr& options) - : ConnectionSocketImpl( - Network::SocketInterface::socket(Address::SocketType::Stream, remote_address), nullptr, - remote_address) { + : ConnectionSocketImpl(Network::SocketInterfaceSingleton::get().socket( + Address::SocketType::Stream, remote_address), + nullptr, remote_address) { if (options) { addOptions(options); } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index 6f4e777752be..e6a6c4dc3575 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -39,7 +39,7 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // IPv4 local_address was created from an IPv6 mapped IPv4 address. const Address::InstanceConstSharedPtr& remote_address = (remote_addr->sa_family == AF_UNIX) - ? SocketInterface::peerAddressFromFd(io_handle->fd()) + ? SocketInterfaceSingleton::get().peerAddressFromFd(io_handle->fd()) : Address::addressFromSockAddr(*reinterpret_cast(remote_addr), remote_addr_len, local_address->ip()->version() == Address::IpVersion::v6); diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 26beacf07375..9232b69626b5 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -5,142 +5,17 @@ #include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "common/network/socket_interface_impl.h" namespace Envoy { namespace Network { -IoHandlePtr SocketInterface::socket(Address::SocketType socket_type, Address::Type addr_type, - Address::IpVersion version) { -#if defined(__APPLE__) || defined(WIN32) - int flags = 0; -#else - int flags = SOCK_NONBLOCK; -#endif - - if (socket_type == Address::SocketType::Stream) { - flags |= SOCK_STREAM; - } else { - flags |= SOCK_DGRAM; - } - - int domain; - if (addr_type == Address::Type::Ip) { - if (version == Address::IpVersion::v6) { - domain = AF_INET6; - } else { - ASSERT(version == Address::IpVersion::v4); - domain = AF_INET; - } - } else { - ASSERT(addr_type == Address::Type::Pipe); - domain = AF_UNIX; - } - - const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); - RELEASE_ASSERT(SOCKET_VALID(result.rc_), - fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); - IoHandlePtr io_handle = std::make_unique(result.rc_); - -#if defined(__APPLE__) || defined(WIN32) - // Cannot set SOCK_NONBLOCK as a ::socket flag. - const int rc = Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_; - RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); -#endif - - return io_handle; -} - -IoHandlePtr SocketInterface::socket(Address::SocketType socket_type, - const Address::InstanceConstSharedPtr addr) { - Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; - IoHandlePtr io_handle = SocketInterface::socket(socket_type, addr->type(), ip_version); - if (addr->type() == Address::Type::Ip && addr->ip()->version() == Address::IpVersion::v6) { - // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. - const int v6only = addr->ip()->ipv6()->v6only(); - const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( - io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), - sizeof(v6only)); - RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); - } - return io_handle; -} - -bool SocketInterface::ipFamilySupported(int domain) { - Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - if (SOCKET_VALID(result.rc_)) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - fmt::format("Fail to close fd: response code {}", strerror(result.rc_))); - } - return SOCKET_VALID(result.rc_); -} - -Address::InstanceConstSharedPtr SocketInterface::addressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } - int socket_v6only = 0; - if (ss.ss_family == AF_INET6) { - socklen_t size_int = sizeof(socket_v6only); - result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); -#ifdef WIN32 - // On Windows, it is possible for this getsockopt() call to fail. - // This can happen if the address we are trying to connect to has nothing - // listening. So we can't use RELEASE_ASSERT and instead must throw an - // exception - if (SOCKET_FAILURE(result.rc_)) { - throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } -#else - RELEASE_ASSERT(result.rc_ == 0, ""); -#endif - } - return Address::addressFromSockAddr(ss, ss_len, socket_v6only); -} - -Address::InstanceConstSharedPtr SocketInterface::peerAddressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); - } -#ifdef __APPLE__ - if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) -#else - if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) -#endif - { - // For Unix domain sockets, can't find out the peer name, but it should match our own - // name for the socket (i.e. the path should match, barring any namespace or other - // mechanisms to hide things, of which there are many). - ss_len = sizeof ss; - result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); - } - } - return Address::addressFromSockAddr(ss, ss_len); -} - SocketImpl::SocketImpl(Address::SocketType type, Address::Type addr_type, Address::IpVersion version) - : io_handle_(SocketInterface::socket(type, addr_type, version)), sock_type_(type), - addr_type_(addr_type) {} + : io_handle_(SocketInterfaceSingleton::get().socket(type, addr_type, version)) {} SocketImpl::SocketImpl(Address::SocketType sock_type, const Address::InstanceConstSharedPtr addr) - : io_handle_(SocketInterface::socket(sock_type, addr)), sock_type_(sock_type), + : io_handle_(SocketInterfaceSingleton::get().socket(sock_type, addr)), sock_type_(sock_type), addr_type_(addr->type()) {} SocketImpl::SocketImpl(IoHandlePtr&& io_handle, @@ -149,11 +24,7 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, // Should not happen but some tests inject -1 fds if (SOCKET_INVALID(io_handle_->fd())) { - if (local_address != nullptr) { - addr_type_ = local_address->type(); - } else { - addr_type_ = Address::Type::Ip; - } + addr_type_ = local_address != nullptr ? local_address->type() : Address::Type::Ip; return; } @@ -166,14 +37,11 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, // This should never happen in practice but too many tests inject fake fds ... if (result.rc_ < 0) { + addr_type_ = local_address != nullptr ? local_address->type() : Address::Type::Ip; return; } - if (addr.ss_family == AF_UNIX) { - addr_type_ = Address::Type::Pipe; - } else { - addr_type_ = Address::Type::Ip; - } + addr_type_ = addr.ss_family == AF_UNIX ? Address::Type::Pipe : Address::Type::Ip; } Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index 050455a487ec..e4e14798f67c 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -7,49 +7,6 @@ namespace Envoy { namespace Network { -namespace SocketInterface { - -/** - * Low level api to create a socket in the underlying host stack. Does not create an - * Envoy socket. - * @param type type of socket requested - * @param addr_type type of address used with the socket - * @param version IP version if address type is IP - * @return Socket file descriptor - */ -IoHandlePtr socket(Address::SocketType type, Address::Type addr_type, Address::IpVersion version); - -/** - * Low level api to create a socket in the underlying host stack. Does not create an - * Envoy socket. - * @param socket_type type of socket requested - * @param addr address that is gleaned for address type and version if needed (@see createSocket) - */ -IoHandlePtr socket(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr); - -/** - * Returns true if the given family is supported on this machine. - * @param domain the IP family. - */ -bool ipFamilySupported(int domain); - -/** - * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for bound address. - */ -Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd); - -/** - * Obtain the address of the peer of the socket with the specified file descriptor. - * Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for peer address. - */ -Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd); - -} // namespace SocketInterface - class SocketImpl : public virtual Socket { public: SocketImpl(Address::SocketType type, Address::Type addr_type, Address::IpVersion version); diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc new file mode 100644 index 000000000000..1e9481e2368d --- /dev/null +++ b/source/common/network/socket_interface_impl.cc @@ -0,0 +1,142 @@ +#include "common/network/socket_interface_impl.h" + +#include "envoy/common/exception.h" +#include "envoy/network/socket.h" + +#include "common/api/os_sys_calls_impl.h" +#include "common/network/address_impl.h" +#include "common/network/io_socket_handle_impl.h" + +namespace Envoy { +namespace Network { + +IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, Address::Type addr_type, + Address::IpVersion version) { +#if defined(__APPLE__) || defined(WIN32) + int flags = 0; +#else + int flags = SOCK_NONBLOCK; +#endif + + if (socket_type == Address::SocketType::Stream) { + flags |= SOCK_STREAM; + } else { + flags |= SOCK_DGRAM; + } + + int domain; + if (addr_type == Address::Type::Ip) { + if (version == Address::IpVersion::v6) { + domain = AF_INET6; + } else { + ASSERT(version == Address::IpVersion::v4); + domain = AF_INET; + } + } else { + ASSERT(addr_type == Address::Type::Pipe); + domain = AF_UNIX; + } + + const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); + RELEASE_ASSERT(SOCKET_VALID(result.rc_), + fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); + IoHandlePtr io_handle = std::make_unique(result.rc_); + +#if defined(__APPLE__) || defined(WIN32) + // Cannot set SOCK_NONBLOCK as a ::socket flag. + const int rc = Api::OsSysCallsSingleton::get().setsocketblocking(io_handle->fd(), false).rc_; + RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); +#endif + + return io_handle; +} + +IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, + const Address::InstanceConstSharedPtr addr) { + Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; + IoHandlePtr io_handle = SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version); + if (addr->type() == Address::Type::Ip && addr->ip()->version() == Address::IpVersion::v6) { + // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. + const int v6only = addr->ip()->ipv6()->v6only(); + const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( + io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), + sizeof(v6only)); + RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); + } + return io_handle; +} + +bool SocketInterfaceImpl::ipFamilySupported(int domain) { + Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); + if (SOCKET_VALID(result.rc_)) { + RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, + fmt::format("Fail to close fd: response code {}", strerror(result.rc_))); + } + return SOCKET_VALID(result.rc_); +} + +Address::InstanceConstSharedPtr SocketInterfaceImpl::addressFromFd(os_fd_t fd) { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, + strerror(result.errno_))); + } + int socket_v6only = 0; + if (ss.ss_family == AF_INET6) { + socklen_t size_int = sizeof(socket_v6only); + result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); +#ifdef WIN32 + // On Windows, it is possible for this getsockopt() call to fail. + // This can happen if the address we are trying to connect to has nothing + // listening. So we can't use RELEASE_ASSERT and instead must throw an + // exception + if (SOCKET_FAILURE(result.rc_)) { + throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, + strerror(result.errno_))); + } +#else + RELEASE_ASSERT(result.rc_ == 0, ""); +#endif + } + return Address::addressFromSockAddr(ss, ss_len, socket_v6only); +} + +Address::InstanceConstSharedPtr SocketInterfaceImpl::peerAddressFromFd(os_fd_t fd) { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); + } +#ifdef __APPLE__ + if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) +#else + if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) +#endif + { + // For Unix domain sockets, can't find out the peer name, but it should match our own + // name for the socket (i.e. the path should match, barring any namespace or other + // mechanisms to hide things, of which there are many). + ss_len = sizeof ss; + result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); + } + } + return Address::addressFromSockAddr(ss, ss_len); +} + +static SocketInterfaceLoader* socket_interface_ = + new SocketInterfaceLoader(std::make_unique()); + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h new file mode 100644 index 000000000000..190a2765031c --- /dev/null +++ b/source/common/network/socket_interface_impl.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/network/address.h" +#include "envoy/network/socket.h" + +#include "common/singleton/threadsafe_singleton.h" + +namespace Envoy { +namespace Network { + +class SocketInterfaceImpl : public SocketInterface { +public: + IoHandlePtr socket(Address::SocketType socket_type, Address::Type addr_type, + Address::IpVersion version) override; + IoHandlePtr socket(Address::SocketType socket_type, + const Address::InstanceConstSharedPtr addr) override; + bool ipFamilySupported(int domain) override; + Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd) override; + Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) override; +}; + +using SocketInterfaceSingleton = InjectableSingleton; +using SocketInterfaceLoader = ScopedInjectableLoader; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/utility.h b/source/common/network/utility.h index 2f7145ed928e..fd4b925aea9a 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -9,8 +9,6 @@ #include "envoy/network/connection.h" #include "envoy/network/listener.h" -#include "common/network/socket_impl.h" - #include "absl/strings/string_view.h" namespace Envoy { diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 9b883646c9fa..dc16c2748082 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -6,7 +6,7 @@ #include "envoy/network/filter.h" #include "envoy/upstream/cluster_manager.h" -#include "common/network/socket_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "absl/container/flat_hash_set.h" @@ -222,8 +222,8 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) { // Virtual so this can be overridden in unit tests. - return Network::SocketInterface::socket(Network::Address::SocketType::Datagram, - host->address()); + return Network::SocketInterfaceSingleton::get().socket(Network::Address::SocketType::Datagram, + host->address()); } // Upstream::ClusterUpdateCallbacks diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index 8659103d1b74..f923d2000fd7 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -112,7 +112,8 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, // TODO(fcoras) maybe move to SocketImpl? if (local_addr->ip()->port() == 0) { // Get ephemeral port number. - local_addr = Network::SocketInterface::addressFromFd(connection_socket->ioHandle().fd()); + local_addr = + Network::SocketInterfaceSingleton::get().addressFromFd(connection_socket->ioHandle().fd()); } if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket, envoy::config::core::v3::SocketOption::STATE_BOUND)) { diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index 320c4b85d922..40d83274de5a 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -15,6 +15,7 @@ #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/config/utility.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "common/stats/symbol_table_impl.h" @@ -27,7 +28,7 @@ namespace Common { namespace Statsd { UdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent) - : parent_(parent), io_handle_(Network::SocketInterface::socket( + : parent_(parent), io_handle_(Network::SocketInterfaceSingleton::get().socket( Network::Address::SocketType::Datagram, parent_.server_address_)) {} void UdpStatsdSink::WriterImpl::write(const std::string& message) { diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index dd80bcdfc121..6e5f1622e7aa 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -3,6 +3,7 @@ #include "envoy/network/address.h" #include "common/buffer/buffer_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" @@ -29,8 +30,8 @@ std::string createHeader(const std::string& format, uint32_t version) { DaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint) : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)), - io_handle_( - Network::SocketInterface::socket(Network::Address::SocketType::Datagram, address_)) {} + io_handle_(Network::SocketInterfaceSingleton::get().socket( + Network::Address::SocketType::Datagram, address_)) {} void DaemonBrokerImpl::send(const std::string& data) const { auto& logger = Logger::Registry::getLog(Logger::Id::tracing); diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 369a2873abf6..f6bdf328e920 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -6,6 +6,7 @@ #include "common/common/empty_string.h" #include "common/common/fmt.h" #include "common/config/utility.h" +#include "common/network/socket_interface_impl.h" #include "common/protobuf/utility.h" #include "server/configuration_impl.h" @@ -360,11 +361,11 @@ std::pair> makeCidrListEntry(const s const T& data) { std::vector subnets; if (cidr == EMPTY_STRING) { - if (Network::SocketInterface::ipFamilySupported(AF_INET)) { + if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv4CidrCatchAllAddress())); } - if (Network::SocketInterface::ipFamilySupported(AF_INET6)) { + if (Network::SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6)) { subnets.push_back( Network::Address::CidrRange::create(Network::Utility::getIpv6CidrCatchAllAddress())); } diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index d65fbe5e5e73..dc874f2c3b41 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -3,6 +3,7 @@ #include "common/network/addr_family_aware_socket_option_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "common/network/socket_interface_impl.h" #include "common/network/utility.h" #include "test/common/network/socket_option_test.h" @@ -46,7 +47,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { // If a platform supports IPv4 socket option variant for an IPv4 address, it works TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -62,7 +63,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { // If a platform doesn't support IPv4 socket option variant for an IPv4 address we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -76,7 +77,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { // If a platform doesn't support IPv4 and IPv6 socket option variants for an IPv4 address, we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -91,7 +92,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { // IPv4 variant TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -105,7 +106,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { // If a platform supports IPv6 socket option variant for an IPv6 address it works TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -122,7 +123,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { // we apply the IPv4 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -139,7 +140,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { // AddrFamilyAwareSocketOptionImpl::setIpSocketOption() works with the IPv6 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -153,7 +154,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { // GetSocketOptionName returns the v4 information for a v4 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); @@ -169,7 +170,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { // GetSocketOptionName returns the v4 information for a v6 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { Address::Ipv6Instance address("2::1", 5678); - IoHandlePtr io_handle = Network::SocketInterface::socket( + IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( Address::SocketType::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 6471641ef764..a2ea3f0645d6 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -201,7 +201,7 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { EXPECT_CALL(listener, getLocalAddress(_)) .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return SocketInterface::addressFromFd(fd); + return SocketInterfaceSingleton::get().addressFromFd(fd); })); StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); @@ -251,7 +251,7 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) { EXPECT_CALL(listener, getLocalAddress(_)) .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return SocketInterface::addressFromFd(fd); + return SocketInterfaceSingleton::get().addressFromFd(fd); })); EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void { client_connection->close(ConnectionCloseType::NoFlush); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 0568f3664337..dc8ecf64a852 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -10,6 +10,7 @@ #include #include "common/memory/stats.h" +#include "common/network/socket_impl.h" #include "common/network/utility.h" #include "extensions/quic_listeners/quiche/platform/flags_impl.h" diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index aa9e6f132808..afd7ede41339 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -5,6 +5,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" +#include "common/network/socket_impl.h" #include "common/network/utility.h" #include "extensions/stat_sinks/common/statsd/statsd.h" diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 967cfe90f0c8..28d3a94ded97 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -55,6 +55,7 @@ envoy_cc_test_library( "//include/envoy/network:filter_interface", "//source/common/common:assert_lib", "//source/common/network:address_lib", + "//source/common/network:listen_socket_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index ec6f3772491b..a4c1765eda17 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -59,7 +59,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared // any), and we need to find out the port number that the OS picked so we can return it. // TODO(fcoras) maybe move to SocketImpl if (addr_port->ip()->port() == 0) { - return SocketInterface::addressFromFd(sock.ioHandle().fd()); + return SocketInterfaceSingleton::get().addressFromFd(sock.ioHandle().fd()); } return addr_port; } @@ -185,7 +185,8 @@ bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { throw EnvoyException(msg); } - return std::make_pair(SocketInterface::addressFromFd(sock->ioHandle().fd()), std::move(sock)); + return std::make_pair(SocketInterfaceSingleton::get().addressFromFd(sock->ioHandle().fd()), + std::move(sock)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 69069ac79220..de6d46a15875 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -148,6 +148,7 @@ IPV IPs IPv ITOA +Injectable Isode Iters JSON From 8d473a8baf031531c8bef68ff1a9c9a944d33413 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Thu, 4 Jun 2020 10:14:55 -0700 Subject: [PATCH 290/909] coverage: update coverage for compression targets (#11395) Commit Message: update coverage for compression targets Additional Description: I believe some of the targets were outdated. I have updated limits that I believe are now not necessary and added one more test were it was missing. I'll work with @rojkov on the http compressor filter coverage which according to the last master run is in fact 84%. Risk Level: low, increasing coverage! Testing: Ci passes Signed-off-by: Jose Nino --- .../decompressor/decompressor_filter_test.cc | 25 +++++++++++++++++++ test/per_file_coverage.sh | 6 ----- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index 216b86bbe0fd..aa7b43435fb8 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -305,6 +305,16 @@ TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); } +TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { + EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + Http::TestHeaderMapImpl headers_before_filter{{"content-length", "256"}}; + std::unique_ptr headers_after_filter = + doHeaders(headers_before_filter, false /* end_stream */); + TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + expectNoDecompression(); +} + TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, @@ -356,6 +366,21 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) expectNoDecompression(); } +TEST_P(DecompressorFilterTest, DecompressionLibraryNotRegistered) { + EXPECT_THROW_WITH_MESSAGE( + setUpFilter(R"EOF( +decompressor_library: + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.does_not_exist" +)EOF"), + EnvoyException, + "Unable to parse JSON as proto (INVALID_ARGUMENT:(decompressor_library.typed_config): " + "invalid value Invalid type URL, unknown type: envoy.extensions.compression.does_not_exist " + "for type Any): " + "{\"decompressor_library\":{\"typed_config\":{\"@type\":\"type.googleapis.com/" + "envoy.extensions.compression.does_not_exist\"}}}"); +} + } // namespace } // namespace Decompressor } // namespace HttpFilters diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 69b96ebc0bb6..a7a3ecefd531 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -8,11 +8,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/common/wasm:87.8" "source/extensions/common/wasm/v8:88.3" "source/extensions/common/wasm/null:77.8" -"source/extensions/compression:90.1" -"source/extensions/compression/gzip:93.1" -"source/extensions/compression/gzip/decompressor:81.1" -"source/extensions/compression/common:60.0" -"source/extensions/compression/common/decompressor:20.0" "source/extensions/filters/network/sni_cluster:90.3" "source/extensions/filters/network/thrift_proxy/router:96.4" "source/extensions/filters/network/sni_dynamic_forward_proxy:92.4" @@ -24,7 +19,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/cache/simple_http_cache:84.5" "source/extensions/filters/http/csrf:96.6" "source/extensions/filters/http/ip_tagging:92.0" -"source/extensions/filters/http/decompressor:80" "source/extensions/filters/http/compressor:84.4" "source/extensions/filters/http/header_to_metadata:95.0" "source/extensions/filters/http/grpc_json_transcoder:93.3" From 5030f92440b2d17dcca193c37178ec0d8aa06097 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 4 Jun 2020 15:40:18 -0400 Subject: [PATCH 291/909] fix all filter fuzz bugs (#11422) Closes fuzz bugs related to configuration and test issues in filter_fuzz_test * Require that encodeComplete() is called after end_stream/trailers per the HCM/filter contract. Otherwise, you run into destruction issues. * TAP configuration can't use unimplemented TapDS configuration. Had to enforce in code because it's a oneof, which can only be constrained to be required * Squash filter configuration requires some JSON attachment template. Scrubbed if invalid JSON Risk Level: Low Testing: Regression corpuses added, fuzzer runs for a 2 minutes Fixes OSS-fuzz issues: * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=18938 * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=20844 * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22428 Signed-off-by: Asra Ali --- .../extensions/filters/http/common/fuzz/BUILD | 3 + ...zed-filter_fuzz_test-5143098977157120.fuzz | 7 ++ ...inimized-filter_fuzz_test-5713820013297664 | 7 ++ ...zed-filter_fuzz_test-5714246842449920.fuzz | 65 +++++++++++++++++++ .../fuzz/filter_corpus/not_implemented_tap | 7 ++ .../filters/http/common/fuzz/uber_filter.cc | 13 +++- .../http/common/fuzz/uber_per_filter.cc | 33 +++++++++- 7 files changed, 132 insertions(+), 3 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index cc19fb5dfc14..27f5d59038d5 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -36,12 +36,15 @@ envoy_cc_test_library( "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/common:utility_lib", "//test/fuzz:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", "//test/mocks/server:server_mocks", "//test/proto:bookstore_proto_cc_proto", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz new file mode 100644 index 000000000000..d212ffdb4e19 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz @@ -0,0 +1,7 @@ +config { + name: "envoy.squash" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.squash.v3.Squash" + value: "\n\002Ae\022\356\n\n\342\n\n\001\017\022\334\n2\331\n\n\305\n2\302\n\n\0022\000\n\267\n*\264\n\n\261\n\n\004o\177\177\177\022\250\n2\245\n\n\216\n2\213\n\n\0022\000\n\200\n*\375\t\n\372\t\n\001\017\022\364\t2\361\t\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\230\t2\225\t\n\375\0102\372\010\n\357\0102\354\010\n\365\0072\362\007\n\0022\000\n\347\007*\344\007\n\341\007\n\004o\177\177\177\022\330\0072\325\007\n\276\0072\273\007\n\0022\000\n\260\007*\255\007\n\252\007\n\001\017\022\244\0072\241\007\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\310\0062\305\006\n\255\0062\252\006\n\237\0062\234\006\n\0142\n\n\000\n\0022\000\n\002*\000\n\374\0052\371\005\n\366\0052\363\005\n\337\0052\334\005\n\0022\000\n\321\005*\316\005\n\313\005\n\004o\177\177\177\022\302\0052\277\005\n\0022\000\n\0302\026\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\002*\000\n\003\032\001#\n\231\005*\226\005\n\223\005\n\004o\177\177\177\022\212\0052\207\005\n\360\0042\355\004\n\0022\000\n\342\004*\337\004\n\334\004\n\001\017\022\326\0042\323\004\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\372\0032\367\003\n\337\0032\334\003\n\321\0032\316\003\n\327\0022\324\002\n\0022\000\n\311\002*\306\002\n\303\002\n\004o\177\177\177\021\272\0022\267\002\n\240\0022\235\002\n\0022\000\n\222\002*\217\002\n\214\002\n\001\017\022\206\0022\203\002\nE2C\n\0022\000\n9*7\n5\n\004o\177\177\177\022-2+\n\0252\023\n\0022\000\n\t*\007\n\005\n\001@\022\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\252\0012\247\001\n\217\0012\214\001\n\201\0012\177\n\0142\n\n\000\n\0022\000\n\002*\000\n`2^\n\\2Z\nG2E\n\0022\000\n;*9\n7\n\004o\177\177\177\022/2-\n\0022\000\n\0302\026\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\002*\000\n\003\032\001#\n\010*\006\n\004\n\000\022\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\nc2a\n_2]\nJ2H\n\0022\000\n>*<\n:\n\004o\177\177\177\022220\n\0022\000\n!2\037\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\013*\t\n\007\n\001\001\022\002\010\000\n\003\032\001#\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\nc2a\n_2]\nJ2H\n\0022\000\n>*<\n:\n\004o\177\177\177\022220\n\0022\000\n!2\037\n\0022\000\n\014*\n\n\010\n\004o\177\177\177\022\000\n\013*\t\n\007\n\001\001\022\002\010\000\n\003\032\001#\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\0022\000\n\002*\000\n\0042\002\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\003\032\001#\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\002*\000\n\000\n\t\021\010\000\000\000\000\000\0002\n\002*\000\n\007\n\001\001\022\002\010\000*\007\010 \020\261\300\334\001" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 new file mode 100644 index 000000000000..58adf9a30292 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.adaptive_concurrency" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.adaptive_concurrency.v3.AdaptiveConcurrency" + value: "\n\016\022\005\032\003\010\200\001\032\005\n\003\010\200\001" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz new file mode 100644 index 000000000000..069f873d8896 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz @@ -0,0 +1,65 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "*\023x-envoy-max-retries" + } +} +data { + headers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + trailers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "&&&&&&&&&&&" + } + headers { + key: "x-envoy-max-retries" + value: "x-envoy-max-retries" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "?" + } + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap b/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap new file mode 100644 index 000000000000..0b0167248818 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/not_implemented_tap @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\ns\n\000\032o\nf\nd\nb\032`\022^\n$\022\"\n\034\022\032\n\n\032\010\032\006\n\004\n\002\032\000\n\006\022\004\n\002 \001\n\004\032\002 \001\n\002*\000\n2\n0\n.\032,\022*\n\"\022 \n\032\022\030\n\n\032\010\032\006\n\004\n\002\032\000\n\004\022\002\n\000\n\004\032\002 \001\n\002 \001\n\000\n\002 \001\n\002 \001\022\005\n\000\022s\006" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 37d1a7af8a09..49d8ff3bbe49 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -124,7 +124,11 @@ Http::FilterHeadersStatus UberFilterFuzzer::sendHeaders(Http::StreamEncoderFilte } ENVOY_LOG_MISC(debug, "Encoding headers (end_stream={}):\n{} ", end_stream, response_headers_); - return filter->encodeHeaders(response_headers_, end_stream); + Http::FilterHeadersStatus status = filter->encodeHeaders(response_headers_, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; } template <> @@ -138,7 +142,11 @@ template <> Http::FilterDataStatus UberFilterFuzzer::sendData(Http::StreamEncoderFilter* filter, Buffer::Instance& buffer, bool end_stream) { ENVOY_LOG_MISC(debug, "Encoding data (end_stream={}): {} ", end_stream, buffer.toString()); - return filter->encodeData(buffer, end_stream); + Http::FilterDataStatus status = filter->encodeData(buffer, end_stream); + if (end_stream) { + filter->encodeComplete(); + } + return status; } template <> @@ -155,6 +163,7 @@ void UberFilterFuzzer::sendTrailers(Http::StreamEncoderFilter* filter, response_trailers_ = Fuzz::fromHeaders(data.trailers()); ENVOY_LOG_MISC(debug, "Encoding trailers:\n{} ", response_trailers_); filter->encodeTrailers(response_trailers_); + filter->encodeComplete(); } void UberFilterFuzzer::accessLog(AccessLog::Instance* access_logger, diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 50c33396c2f8..da4d963164ee 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -1,5 +1,8 @@ #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" +#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "envoy/extensions/filters/http/tap/v3/tap.pb.h" +#include "extensions/filters/http/common/utility.h" #include "extensions/filters/http/well_known_names.h" #include "test/extensions/filters/http/common/fuzz/uber_filter.h" @@ -70,11 +73,39 @@ void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uin mutable_any->set_type_url(type_url); } +void cleanAttachmentTemplate(Protobuf::Message* message) { + envoy::extensions::filters::http::squash::v3::Squash& config = + dynamic_cast(*message); + std::string json; + Protobuf::util::JsonPrintOptions json_options; + if (!Protobuf::util::MessageToJsonString(config.attachment_template(), &json, json_options) + .ok()) { + config.clear_attachment_template(); + } +} + +void cleanTapConfig(Protobuf::Message* message) { + envoy::extensions::filters::http::tap::v3::Tap& config = + dynamic_cast(*message); + if (config.common_config().config_type_case() == + envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kTapdsConfig) { + config.mutable_common_config()->mutable_static_config()->mutable_match_config()->set_any_match( + true); + } +} + void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, Protobuf::Message* message) { + const std::string name = Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName( + std::string(filter_name)); // Map filter name to clean-up function. - if (filter_name == HttpFilterNames::get().GrpcJsonTranscoder) { + if (name == HttpFilterNames::get().GrpcJsonTranscoder) { addBookstoreProtoDescriptor(message); + } else if (name == HttpFilterNames::get().Squash) { + cleanAttachmentTemplate(message); + } else if (name == HttpFilterNames::get().Tap) { + // TapDS oneof field not implemented. + cleanTapConfig(message); } } From a150fb423e5dc20b1de2e8a76869711c3bc4f480 Mon Sep 17 00:00:00 2001 From: Douglas Reid Date: Thu, 4 Jun 2020 12:55:38 -0700 Subject: [PATCH 292/909] zipkin: add key name to prevent bad replacements (#11374) This PR attempts to fix issues that arise with the current scheme for replacements in the Zipkin serialization that was added in #10400. In particular, this PR is trying to prevent issues that occur when size tags have the same value as the span durations. This was originally reported in istio/istio#24177. Signed-off-by: Douglas Reid --- .../extensions/tracers/zipkin/span_buffer.cc | 6 ++- source/extensions/tracers/zipkin/util.cc | 6 ++- source/extensions/tracers/zipkin/util.h | 7 ++- .../tracers/zipkin/zipkin_core_types.cc | 6 +-- .../tracers/zipkin/span_buffer_test.cc | 51 ++++++++++--------- 5 files changed, 43 insertions(+), 33 deletions(-) diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index a0803fe080bd..d40071c96182 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -137,7 +137,8 @@ JsonV2Serializer::toListOfSpans(const Span& zipkin_span, Util::Replacements& rep // us 1.58432429547687e+15. Instead we store it as the string of 1584324295476870 (when it is // serialized: "1584324295476870"), and replace it post MessageToJsonString serialization with // integer (1584324295476870 without `"`), see: JsonV2Serializer::serialize. - (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(annotation.timestamp(), replacements); + (*fields)[SPAN_TIMESTAMP] = + Util::uint64Value(annotation.timestamp(), SPAN_TIMESTAMP, replacements); (*fields)[SPAN_LOCAL_ENDPOINT] = ValueUtil::structValue(toProtoEndpoint(annotation.endpoint())); } @@ -157,7 +158,8 @@ JsonV2Serializer::toListOfSpans(const Span& zipkin_span, Util::Replacements& rep if (zipkin_span.isSetDuration()) { // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to // store it. - (*fields)[SPAN_DURATION] = Util::uint64Value(zipkin_span.duration(), replacements); + (*fields)[SPAN_DURATION] = + Util::uint64Value(zipkin_span.duration(), SPAN_DURATION, replacements); } const auto& binary_annotations = zipkin_span.binaryAnnotations(); diff --git a/source/extensions/tracers/zipkin/util.cc b/source/extensions/tracers/zipkin/util.cc index 3d4ff6913f53..5263eec00ecc 100644 --- a/source/extensions/tracers/zipkin/util.cc +++ b/source/extensions/tracers/zipkin/util.cc @@ -23,9 +23,11 @@ uint64_t Util::generateRandom64(TimeSource& time_source) { return rand_64(); } -ProtobufWkt::Value Util::uint64Value(uint64_t value, Replacements& replacements) { +ProtobufWkt::Value Util::uint64Value(uint64_t value, absl::string_view name, + Replacements& replacements) { const std::string string_value = std::to_string(value); - replacements.push_back({absl::StrCat("\"", string_value, "\""), string_value}); + replacements.push_back({absl::StrCat("\"", name, "\":\"", string_value, "\""), + absl::StrCat("\"", name, "\":", string_value)}); return ValueUtil::stringValue(string_value); } diff --git a/source/extensions/tracers/zipkin/util.h b/source/extensions/tracers/zipkin/util.h index 6f1a93374484..0c9158a36423 100644 --- a/source/extensions/tracers/zipkin/util.h +++ b/source/extensions/tracers/zipkin/util.h @@ -49,13 +49,16 @@ class Util { /** * Returns a wrapped uint64_t value as a string. In addition to that, it also pushes back a - * replacement to the given replacements vector. + * replacement to the given replacements vector. The replacement includes the supplied name + * as a key, for identification in a JSON stream. * * @param value unt64_t number that will be represented in string. + * @param name std::string that is the key for the value being replaced. * @param replacements a container to hold the required replacements when serializing this value. * @return ProtobufWkt::Value wrapped uint64_t as a string. */ - static ProtobufWkt::Value uint64Value(uint64_t value, Replacements& replacements); + static ProtobufWkt::Value uint64Value(uint64_t value, absl::string_view name, + Replacements& replacements); }; } // namespace Zipkin diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.cc b/source/extensions/tracers/zipkin/zipkin_core_types.cc index 19db113b4997..3128a82586f8 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.cc +++ b/source/extensions/tracers/zipkin/zipkin_core_types.cc @@ -69,7 +69,7 @@ void Annotation::changeEndpointServiceName(const std::string& service_name) { const ProtobufWkt::Struct Annotation::toStruct(Util::Replacements& replacements) const { ProtobufWkt::Struct annotation; auto* fields = annotation.mutable_fields(); - (*fields)[ANNOTATION_TIMESTAMP] = Util::uint64Value(timestamp_, replacements); + (*fields)[ANNOTATION_TIMESTAMP] = Util::uint64Value(timestamp_, SPAN_TIMESTAMP, replacements); (*fields)[ANNOTATION_VALUE] = ValueUtil::stringValue(value_); if (endpoint_.has_value()) { (*fields)[ANNOTATION_ENDPOINT] = @@ -159,13 +159,13 @@ const ProtobufWkt::Struct Span::toStruct(Util::Replacements& replacements) const // Usually we store number to a ProtobufWkt::Struct object via ValueUtil::numberValue. // However, due to the possibility of rendering that to a number with scientific notation, we // chose to store it as a string and keeping track the corresponding replacement. - (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(timestamp_.value(), replacements); + (*fields)[SPAN_TIMESTAMP] = Util::uint64Value(timestamp_.value(), SPAN_TIMESTAMP, replacements); } if (duration_.has_value()) { // Since SPAN_DURATION has the same data type with SPAN_TIMESTAMP, we use Util::uint64Value to // store it. - (*fields)[SPAN_DURATION] = Util::uint64Value(duration_.value(), replacements); + (*fields)[SPAN_DURATION] = Util::uint64Value(duration_.value(), SPAN_DURATION, replacements); } if (!annotations_.empty()) { diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 05563a02de01..210e9df37b25 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -51,8 +51,9 @@ Annotation createAnnotation(const absl::string_view value, const IpType ip_type) BinaryAnnotation createTag() { BinaryAnnotation tag; - tag.setKey("component"); - tag.setValue("proxy"); + tag.setKey("response_size"); + // ensure duration replacement doesn't override this value. + tag.setValue(std::to_string(DEFAULT_TEST_DURATION)); return tag; } @@ -134,11 +135,12 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestamp) { ProtobufWkt::Struct object; auto* fields = object.mutable_fields(); Util::Replacements replacements; - (*fields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, replacements); + (*fields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, "timestamp", replacements); ASSERT_EQ(1, replacements.size()); - EXPECT_EQ(absl::StrCat("\"", default_timestamp_string, "\""), replacements.at(0).first); - EXPECT_EQ(default_timestamp_string, replacements.at(0).second); + EXPECT_EQ(absl::StrCat("\"timestamp\":\"", default_timestamp_string, "\""), + replacements.at(0).first); + EXPECT_EQ(absl::StrCat("\"timestamp\":", default_timestamp_string), replacements.at(0).second); } TEST(ZipkinSpanBufferTest, ConstructBuffer) { @@ -157,8 +159,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]}])"); + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]}])"); const std::string expected2 = withDefaultTimestampAndDuration(R"([{"traceId":"0000000000000001",)" @@ -175,8 +177,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]},)" + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]},)" R"({"traceId":"0000000000000001",)" R"("name":"",)" R"("id":"0000000000000001",)" @@ -191,8 +193,8 @@ TEST(ZipkinSpanBufferTest, ConstructBuffer) { R"("endpoint":{"ipv4":"1.2.3.4",)" R"("port":8080,)" R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"component",)" - R"("value":"proxy"}]}])"); + R"("binaryAnnotations":[{"key":"response_size",)" + R"("value":"DEFAULT_TEST_DURATION"}]}])"); const bool shared = true; const bool delay_allocation = true; @@ -221,7 +223,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"},)" "}]"), JsonStringEq(wrapAsObject(buffer1.serialize()))); @@ -238,7 +240,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv6":"2001:db8:85a3::8a2e:370:4444",)" R"("port":7334},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"},)" "}]"), JsonStringEq(wrapAsObject(buffer1_v6.serialize()))); @@ -255,7 +257,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"0000000000000001",)" R"("id":"0000000000000001",)" @@ -267,7 +269,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"},)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" R"("shared":true)" "}]"), JsonStringEq(wrapAsObject(buffer2.serialize()))); @@ -285,7 +287,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"0000000000000001",)" R"("id":"0000000000000001",)" @@ -297,7 +299,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"1.2.3.4",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]"), JsonStringEq(wrapAsObject(buffer3.serialize()))); @@ -315,7 +317,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer4.serialize())); @@ -333,7 +335,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv6":"IAENuIWjAAAAAIouA3BERA==",)" R"("port":7334},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer4_v6.serialize())); @@ -351,7 +353,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"AAAAAAAAAAE=",)" R"("id":"AQAAAAAAAAA=",)" @@ -363,7 +365,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"},)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" R"("shared":true)" "}]}"), serializedMessageToJson(buffer5.serialize())); @@ -382,7 +384,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"}},)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" R"({)" R"("traceId":"AAAAAAAAAAE=",)" R"("id":"AQAAAAAAAAA=",)" @@ -394,7 +396,7 @@ TEST(ZipkinSpanBufferTest, SerializeSpan) { R"("ipv4":"AQIDBA==",)" R"("port":8080},)" R"("tags":{)" - R"("component":"proxy"})" + R"("response_size":"DEFAULT_TEST_DURATION"})" "}]}"), serializedMessageToJson(buffer6.serialize())); } @@ -413,7 +415,8 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) { ProtobufWkt::Struct object; auto* objectFields = object.mutable_fields(); Util::Replacements replacements; - (*objectFields)["timestamp"] = Util::uint64Value(DEFAULT_TEST_TIMESTAMP, replacements); + (*objectFields)["timestamp"] = + Util::uint64Value(DEFAULT_TEST_TIMESTAMP, "timestamp", replacements); const auto objectJson = MessageUtil::getJsonStringFromMessage(object, false, true); // We still have "1584324295476870" from MessageUtil::getJsonStringFromMessage here. EXPECT_EQ(R"({"timestamp":"1584324295476870"})", objectJson); From e8a2d1e24dc9a0da5273442204ec3cdfad1e7ca8 Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 4 Jun 2020 20:56:20 +0100 Subject: [PATCH 293/909] build: non-root docker (#11323) Allow envoy to run as non-root user in Docker container. Signed-off-by: Ryan Northey --- ci/Dockerfile-envoy | 23 ++++++++++++++++++++++- ci/Dockerfile-envoy-alpine | 2 ++ ci/Dockerfile-envoy-alpine-debug | 2 ++ ci/docker-entrypoint.sh | 12 +++++++++++- docs/root/version_history/current.rst | 1 + 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 8e1046a9be35..377fb3684b8d 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -1,4 +1,22 @@ -FROM ubuntu:18.04 +ARG BUILD_FROM=ubuntu:18.04 + + +# Build stage +FROM $BUILD_FROM as build + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install --no-install-recommends -y ca-certificates curl gcc libc-dev \ + && echo "d6c40440609a23483f12eb6295b5191e94baf08298a856bab6e15b10c3b82891 /tmp/su-exec.c" > /tmp/checksum \ + && curl -o /tmp/su-exec.c https://raw.githubusercontent.com/ncopa/su-exec/212b75144bbc06722fbd7661f651390dc47a43d1/su-exec.c \ + && sha256sum -c /tmp/checksum \ + && gcc -Wall /tmp/su-exec.c -o/usr/local/bin/su-exec \ + && chown root:root /usr/local/bin/su-exec \ + && chmod 0755 /usr/local/bin/su-exec + + +# Final stage +FROM $BUILD_FROM RUN apt-get update \ && apt-get upgrade -y \ @@ -8,6 +26,9 @@ RUN apt-get update \ && rm -rf /tmp/* /var/tmp/* \ && rm -rf /var/lib/apt/lists/* +COPY --from=build /usr/local/bin/su-exec /usr/local/bin/su-exec +RUN adduser --group --system envoy + RUN mkdir -p /etc/envoy ADD build_release_stripped/envoy /usr/local/bin/envoy diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index 6d993080d3b2..4ac4578370c2 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -4,6 +4,8 @@ RUN mkdir -p /etc/envoy ADD build_release_stripped/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +RUN apk add --no-cache shadow su-exec \ + && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine-debug b/ci/Dockerfile-envoy-alpine-debug index 56162717ae64..fe4957814612 100644 --- a/ci/Dockerfile-envoy-alpine-debug +++ b/ci/Dockerfile-envoy-alpine-debug @@ -4,6 +4,8 @@ RUN mkdir -p /etc/envoy ADD build_release/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +RUN apk add --no-cache shadow su-exec \ + && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy EXPOSE 10000 diff --git a/ci/docker-entrypoint.sh b/ci/docker-entrypoint.sh index b731653319ad..10b78e74c1d7 100755 --- a/ci/docker-entrypoint.sh +++ b/ci/docker-entrypoint.sh @@ -13,4 +13,14 @@ if [ "$1" = 'envoy' ]; then fi fi -exec "$@" +if [ "$ENVOY_UID" != "0" ]; then + if [ -n "$ENVOY_UID" ]; then + usermod -u "$ENVOY_UID" envoy + fi + if [ -n "$ENVOY_GID" ]; then + groupmod -g "$ENVOY_GID" envoy + fi + su-exec envoy "${@}" +else + exec "${@}" +fi diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9fe4d345399e..6a88f6d00328 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -13,6 +13,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. +* build: run as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. From ebed4a1a394b04a24cfde5018551cbdf626dcc2e Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 4 Jun 2020 13:00:30 -0700 Subject: [PATCH 294/909] ci: fix mac bazelisk (#11444) Signed-off-by: Lizan Zhou --- ci/mac_ci_setup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index f3991ac407ea..b9870f942696 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -40,8 +40,8 @@ fi # Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have # to unlink/overwrite them to install bazelisk echo "Installing bazelbuild/tap/bazelisk" -brew install --force bazelbuild/tap/bazelisk -brew unlink bazelbuild/tap/bazelisk || true +brew tap bazelbuild/tap +brew reinstall --force bazelbuild/tap/bazelisk if ! brew link --overwrite bazelbuild/tap/bazelisk; then echo "Failed to install and link bazelbuild/tap/bazelisk" exit 1 From ff40a02a838d123f4bf018858c61c983e1c5cce9 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 4 Jun 2020 13:02:49 -0700 Subject: [PATCH 295/909] tls: fix error log message for invalid cipher suite name (#11430) When the TLS library rejects a list of cipher suites, Envoy tries each one to try to log which ones caused the failure. The code to split the list was incorrect, resulting in parts of cipher suite names being tried, and nothing useful being logged. Signed-off-by: Greg Greenway --- .../transport_sockets/tls/context_impl.cc | 16 +++++++++++++++- .../transport_sockets/tls/context_impl_test.cc | 11 ++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 11853e04dbec..041c28b3da1f 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -23,6 +23,7 @@ #include "extensions/transport_sockets/tls/utility.h" +#include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "openssl/evp.h" #include "openssl/hmac.h" @@ -88,11 +89,24 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), config.cipherSuites().c_str())) { + // Break up a set of ciphers into each individual cipher and try them each individually in + // order to attempt to log which specific one failed. Example of config.cipherSuites(): + // "-ALL:[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:ECDHE-ECDSA-AES128-SHA". + // + // "-" is both an operator when in the leading position of a token (-ALL: don't allow this + // cipher), and the common separator in names (ECDHE-ECDSA-AES128-GCM-SHA256). Don't split on + // it because it will separate pieces of the same cipher. When it is a leading character, it + // is removed below. std::vector ciphers = - StringUtil::splitToken(config.cipherSuites(), ":+-![|]", false); + StringUtil::splitToken(config.cipherSuites(), ":+![|]", false); std::vector bad_ciphers; for (const auto& cipher : ciphers) { std::string cipher_str(cipher); + + if (absl::StartsWith(cipher_str, "-")) { + cipher_str.erase(cipher_str.begin()); + } + if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), cipher_str.c_str())) { bad_ciphers.push_back(cipher_str); } diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index e2213fbf7e29..cb790eff3b99 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -126,16 +126,17 @@ TEST_F(SslContextImplTest, TestCipherSuites) { const std::string yaml = R"EOF( common_tls_context: tls_params: - cipher_suites: "-ALL:+[AES128-SHA|BOGUS1]:BOGUS2:AES256-SHA" + cipher_suites: "-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA" )EOF"; envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); ClientContextConfigImpl cfg(tls_context, factory_context_); - EXPECT_THROW_WITH_MESSAGE(manager_.createSslClientContext(store_, cfg), EnvoyException, - "Failed to initialize cipher suites " - "-ALL:+[AES128-SHA|BOGUS1]:BOGUS2:AES256-SHA. The following " - "ciphers were rejected when tried individually: BOGUS1, BOGUS2"); + EXPECT_THROW_WITH_MESSAGE( + manager_.createSslClientContext(store_, cfg), EnvoyException, + "Failed to initialize cipher suites " + "-ALL:+[AES128-SHA|BOGUS1-SHA256]:BOGUS2-SHA:AES256-SHA. The following " + "ciphers were rejected when tried individually: BOGUS1-SHA256, BOGUS2-SHA"); } TEST_F(SslContextImplTest, TestExpiringCert) { From 99f0f021867b0304c2c5807edc918262f30b88c0 Mon Sep 17 00:00:00 2001 From: Misha Efimov Date: Thu, 4 Jun 2020 19:01:40 -0400 Subject: [PATCH 296/909] Reflect DiscoveryResponse.control_plane.identifier as a TextReadout stats (#11390) Add ControlPlaneStats::identifier field with text from DiscoveryResponse.control_plane.identifier set by the remote. Signed-off-by: Misha Efimov --- docs/root/configuration/overview/mgmt_server.rst | 1 + docs/root/version_history/current.rst | 1 + include/envoy/config/grpc_mux.h | 11 +++++++---- source/common/config/grpc_mux_impl.cc | 6 +++++- source/common/config/grpc_mux_impl.h | 9 +++++---- source/common/config/grpc_stream.h | 13 ++++--------- source/common/config/new_grpc_mux_impl.cc | 3 ++- source/common/config/new_grpc_mux_impl.h | 3 ++- source/common/config/utility.h | 12 ++++++++++++ test/common/config/delta_subscription_impl_test.cc | 6 +++--- .../common/config/delta_subscription_test_harness.h | 2 +- test/common/config/grpc_stream_test.cc | 7 +++---- test/common/config/grpc_subscription_test_harness.h | 4 +++- test/common/config/new_grpc_mux_impl_test.cc | 10 ++++++---- test/common/config/subscription_test_harness.h | 10 +++++----- test/mocks/config/mocks.h | 3 ++- 16 files changed, 62 insertions(+), 39 deletions(-) diff --git a/docs/root/configuration/overview/mgmt_server.rst b/docs/root/configuration/overview/mgmt_server.rst index febc770bb207..68d1f3b1d958 100644 --- a/docs/root/configuration/overview/mgmt_server.rst +++ b/docs/root/configuration/overview/mgmt_server.rst @@ -29,6 +29,7 @@ Management Server has a statistics tree rooted at *control_plane.* with the foll connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server rate_limit_enforced, Counter, Total number of times rate limit was enforced for management server requests pending_requests, Gauge, Total number of pending requests when the rate limit was enforced + identifier, TextReadout, The identifier of the control plane instance that sent the last discovery response .. _subscription_statistics: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6a88f6d00328..bffadd1cf24b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -48,6 +48,7 @@ New Features * access loggers: file access logger config added :ref:`log_format `. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * compressor: generic :ref:`compressor ` filter exposed to users. +* config: added :ref:`identifier ` stat that reflects control plane identifier. * config: added :ref:`version_text ` stat that reflects xDS version. * decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index ff2c8d3c7a31..946ee3d13da8 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -13,16 +13,18 @@ namespace Config { /** * All control plane related stats. @see stats_macros.h */ -#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE) \ +#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE, TEXT_READOUT) \ COUNTER(rate_limit_enforced) \ GAUGE(connected_state, NeverImport) \ - GAUGE(pending_requests, Accumulate) + GAUGE(pending_requests, Accumulate) \ + TEXT_READOUT(identifier) /** * Struct definition for all control plane stats. @see stats_macros.h */ struct ControlPlaneStats { - ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_TEXT_READOUT_STRUCT) }; /** @@ -119,7 +121,8 @@ template class GrpcStreamCallbacks { /** * For the GrpcStream to pass received protos to the context. */ - virtual void onDiscoveryResponse(std::unique_ptr&& message) PURE; + virtual void onDiscoveryResponse(std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) PURE; /** * For the GrpcStream to call when its rate limiting logic allows more requests to be sent. diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 6cc17325bd50..a18e9deced82 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -124,9 +124,13 @@ bool GrpcMuxImpl::paused(const std::string& type_url) const { } void GrpcMuxImpl::onDiscoveryResponse( - std::unique_ptr&& message) { + std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) { const std::string& type_url = message->type_url(); ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url, message->version_info()); + if (message->has_control_plane()) { + control_plane_stats.identifier_.set(message->control_plane().identifier()); + } if (api_state_.count(type_url) == 0) { ENVOY_LOG(warn, "Ignoring the message for type URL {} as it has no current subscribers.", type_url); diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index cf93c899d284..63c39c0994b7 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -53,8 +53,9 @@ class GrpcMuxImpl : public GrpcMux, // Config::GrpcStreamCallbacks void onStreamEstablished() override; void onEstablishmentFailure() override; - void onDiscoveryResponse( - std::unique_ptr&& message) override; + void + onDiscoveryResponse(std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) override; void onWriteable() override; GrpcStream&&) override {} + void onDiscoveryResponse(std::unique_ptr&&, + ControlPlaneStats&) override {} }; } // namespace Config diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index d1b80ad6a38b..7700b96a7dfc 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -25,8 +25,9 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, Event::Dispatcher& dispatcher, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) : callbacks_(callbacks), async_client_(std::move(async_client)), - service_method_(service_method), control_plane_stats_(generateControlPlaneStats(scope)), - random_(random), time_source_(dispatcher.timeSource()), + service_method_(service_method), + control_plane_stats_(Utility::generateControlPlaneStats(scope)), random_(random), + time_source_(dispatcher.timeSource()), rate_limiting_enabled_(rate_limit_settings.enabled_) { retry_timer_ = dispatcher.createTimer([this]() -> void { establishNewStream(); }); if (rate_limiting_enabled_) { @@ -80,7 +81,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of // management server connection. control_plane_stats_.connected_state_.set(1); - callbacks_->onDiscoveryResponse(std::move(message)); + callbacks_->onDiscoveryResponse(std::move(message), control_plane_stats_); } void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) override { @@ -125,12 +126,6 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); } - ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) { - const std::string control_plane_prefix = "control_plane."; - return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix), - POOL_GAUGE_PREFIX(scope, control_plane_prefix))}; - } - GrpcStreamCallbacks* const callbacks_; Grpc::AsyncClient async_client_; diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index fcec95a0b4cc..c69a14326b09 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -37,7 +37,8 @@ bool NewGrpcMuxImpl::paused(const std::string& type_url) const { } void NewGrpcMuxImpl::onDiscoveryResponse( - std::unique_ptr&& message) { + std::unique_ptr&& message, + ControlPlaneStats&) { ENVOY_LOG(debug, "Received DeltaDiscoveryResponse for {} at version {}", message->type_url(), message->system_version_info()); auto sub = subscriptions_.find(message->type_url()); diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index c7d63a93d01d..cfc712ad6222 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -41,7 +41,8 @@ class NewGrpcMuxImpl void resume(const std::string& type_url) override; bool paused(const std::string& type_url) const override; void onDiscoveryResponse( - std::unique_ptr&& message) override; + std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) override; void onStreamEstablished() override; diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 78e98e93f219..ce8641e86d1b 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -184,6 +184,18 @@ class Utility { static RateLimitSettings parseRateLimitSettings(const envoy::config::core::v3::ApiConfigSource& api_config_source); + /** + * Generate a ControlPlaneStats object from stats scope. + * @param scope for stats. + * @return ControlPlaneStats for scope. + */ + static ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) { + const std::string control_plane_prefix = "control_plane."; + return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix), + POOL_GAUGE_PREFIX(scope, control_plane_prefix), + POOL_TEXT_READOUT_PREFIX(scope, control_plane_prefix))}; + } + /** * Generate a SubscriptionStats object from stats scope. * @param scope for stats. diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index a13cb22ae28d..58a7ad0bb0fd 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -78,7 +78,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // The server gives us our first version of resource name2. // subscription_ now wants to ACK name1 and then name2 (but can't due to pause). @@ -92,7 +92,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // The server gives us an updated version of resource name1. // subscription_ now wants to ACK name1A, then name2, then name1B (but can't due to pause). @@ -106,7 +106,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { message->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); nonce_acks_required_.push(nonce); static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(message)); + ->onDiscoveryResponse(std::move(message), control_plane_stats_); } // All ACK sendMessage()s will happen upon calling resume(). EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)) diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index c52a7fd1a2d1..31439cd84bdb 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -156,7 +156,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage({}, {}, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config", {}); } static_cast(subscription_->grpcMux().get()) - ->onDiscoveryResponse(std::move(response)); + ->onDiscoveryResponse(std::move(response), control_plane_stats_); Mock::VerifyAndClearExpectations(&async_stream_); } diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 03864161b5f0..4e5c620cc472 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -101,11 +101,10 @@ TEST_F(GrpcStreamTest, ReceiveMessage) { response_copy.set_type_url("faketypeURL"); auto response = std::make_unique(response_copy); envoy::service::discovery::v3::DiscoveryResponse received_message; - EXPECT_CALL(callbacks_, onDiscoveryResponse(_)) + EXPECT_CALL(callbacks_, onDiscoveryResponse(_, _)) .WillOnce([&received_message]( - std::unique_ptr&& message) { - received_message = *message; - }); + std::unique_ptr&& message, + ControlPlaneStats&) { received_message = *message; }); grpc_stream_.onReceiveMessage(std::move(response)); EXPECT_TRUE(TestUtility::protoEqual(response_copy, received_message)); } diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 643009df1b95..1673e9a5a411 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -105,6 +105,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { last_response_nonce_ = std::to_string(HashUtil::xxHash64(version)); response->set_nonce(last_response_nonce_); response->set_type_url(Config::TypeUrl::get().ClusterLoadAssignment); + response->mutable_control_plane()->set_identifier("ground_control_foo123"); Protobuf::RepeatedPtrField typed_resources; for (const auto& cluster : cluster_names) { if (std::find(last_cluster_names_.begin(), last_cluster_names_.end(), cluster) != @@ -125,7 +126,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { expectSendMessage(last_cluster_names_, version_, false, Grpc::Status::WellKnownGrpcStatus::Internal, "bad config"); } - mux_->onDiscoveryResponse(std::move(response)); + mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + EXPECT_EQ(control_plane_stats_.identifier_.value(), "ground_control_foo123"); Mock::VerifyAndClearExpectations(&async_stream_); } diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 81ecf627bc7d..b1b3b18f2d0a 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -41,6 +41,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { public: NewGrpcMuxImplTestBase() : async_client_(new Grpc::MockAsyncClient()), + control_plane_stats_(Utility::generateControlPlaneStats(stats_)), control_plane_connected_state_( stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)) {} @@ -62,6 +63,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { NiceMock local_info_; Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; + ControlPlaneStats control_plane_stats_; Stats::Gauge& control_plane_connected_state_; }; @@ -86,7 +88,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { unexpected_response->set_type_url(type_url); unexpected_response->set_system_version_info("0"); EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")).Times(0); - grpc_mux_->onDiscoveryResponse(std::move(unexpected_response)); + grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); } { auto response = std::make_unique(); @@ -107,7 +109,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { added_resources[0].resource()); EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); })); - grpc_mux_->onDiscoveryResponse(std::move(response)); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); } } @@ -136,7 +138,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { response->mutable_resources()->at(0).add_aliases("domain1.test"); response->mutable_resources()->at(0).add_aliases("domain2.test"); - grpc_mux_->onDiscoveryResponse(std::move(response)); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); const auto& subscriptions = grpc_mux_->subscriptions(); auto sub = subscriptions.find(type_url); @@ -165,7 +167,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { response->mutable_resources()->at(0).set_name("not-found"); response->mutable_resources()->at(0).add_aliases("domain1.test"); - grpc_mux_->onDiscoveryResponse(std::move(response)); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); const auto& subscriptions = grpc_mux_->subscriptions(); auto sub = subscriptions.find(type_url); diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index 4653a6b646bf..57342a11af92 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -20,7 +20,9 @@ const uint64_t TEST_TIME_MILLIS = 42000; */ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { public: - SubscriptionTestHarness() : stats_(Utility::generateStats(stats_store_)) { + SubscriptionTestHarness() + : stats_(Utility::generateStats(stats_store_)), + control_plane_stats_(Utility::generateControlPlaneStats(stats_store_)) { simTime().setSystemTime(SystemTime(std::chrono::milliseconds(TEST_TIME_MILLIS))); } virtual ~SubscriptionTestHarness() = default; @@ -94,10 +96,7 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { } virtual void verifyControlPlaneStats(uint32_t connected_state) { - EXPECT_EQ( - connected_state, - stats_store_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport) - .value()); + EXPECT_EQ(connected_state, control_plane_stats_.connected_state_.value()); } virtual void expectConfigUpdateFailed() PURE; @@ -112,6 +111,7 @@ class SubscriptionTestHarness : public Event::TestUsingSimulatedTime { Stats::TestUtil::TestStore stats_store_; SubscriptionStats stats_; + ControlPlaneStats control_plane_stats_; }; ACTION_P(ThrowOnRejectedConfig, accept) { diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 4ac54dbc2010..99a6a08bc8ef 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -96,7 +96,8 @@ class MockGrpcStreamCallbacks MOCK_METHOD(void, onStreamEstablished, ()); MOCK_METHOD(void, onEstablishmentFailure, ()); MOCK_METHOD(void, onDiscoveryResponse, - (std::unique_ptr && message)); + (std::unique_ptr && message, + ControlPlaneStats& control_plane_stats)); MOCK_METHOD(void, onWriteable, ()); }; From ea87cee5795443a609f45297587ce8f02b8e1fe8 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Thu, 4 Jun 2020 19:02:38 -0400 Subject: [PATCH 297/909] Review and correct //test/exe:* targets for Windows (#11291) - Capture the Clean vs Modified scm build status on Windows - Corrects the version_out test - on MacOS, override the Clean vs Modified with an explicit argument to preserve existing behavior - Other changes are editorial to justify the reason for skipping tests Co-authored-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia --- source/common/common/BUILD | 6 +++++- source/common/common/generate_version_linkstamp.sh | 9 +++++++-- test/exe/BUILD | 12 +++++------- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 5d473a9776f2..f9942fbc3223 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -348,7 +348,11 @@ genrule( genrule( name = "generate_version_linkstamp", outs = ["lib/version_linkstamp.h"], - cmd = "$(location :generate_version_linkstamp.sh) >> $@", + cmd = select({ + # Only iOS builds typically follow this logic, OS/X is built as a normal binary + "//bazel:apple": "$(location :generate_version_linkstamp.sh) Library >> $@", + "//conditions:default": "$(location :generate_version_linkstamp.sh) >> $@", + }), # Undocumented attr to depend on workspace status files. # https://github.com/bazelbuild/bazel/issues/4942 # Used here because generate_version_linkstamp.sh depends on the workspace status files. diff --git a/source/common/common/generate_version_linkstamp.sh b/source/common/common/generate_version_linkstamp.sh index 4ad2da073589..d8873f8b5a33 100755 --- a/source/common/common/generate_version_linkstamp.sh +++ b/source/common/common/generate_version_linkstamp.sh @@ -10,9 +10,14 @@ # But following the implicit trail one can deduce that linkstamp is in effect when "stamping" (https://github.com/bazelbuild/bazel/issues/2893) is on. # envoy_cc_library -- and the underlying cc_library rule -- does not support "stamping". # This makes sense as stamping mainly makes sense in the context of binaries for production releases, not static libraries. -build_scm_revision=$(grep BUILD_SCM_REVISION bazel-out/volatile-status.txt | sed 's/^BUILD_SCM_REVISION //' | tr -d '\\n') +build_scm_revision=$(sed -n -E 's/^BUILD_SCM_REVISION ([0-9a-f]{40})$/\1/p' < bazel-out/volatile-status.txt) +if [ -z "$1" ]; then + build_scm_status=$(sed -n -E 's/^BUILD_SCM_STATUS ([a-zA-Z]*)$/\1/p' < bazel-out/volatile-status.txt) +else + build_scm_status=$1 +fi echo "extern const char build_scm_revision[];" echo "extern const char build_scm_status[];" echo "const char build_scm_revision[] = \"$build_scm_revision\";" -echo "const char build_scm_status[] = \"Library\";" \ No newline at end of file +echo "const char build_scm_status[] = \"$build_scm_status\";" diff --git a/test/exe/BUILD b/test/exe/BUILD index 8581ec21ffe0..806820660bca 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -17,8 +17,9 @@ envoy_sh_test( "//bazel:raw_build_id.ldscript", "//source/exe:envoy-static", ], - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], + # The Windows equivalent of a binaries' "link stamp" is a resource file descriptor of the + # executable. Our build revision API and output of --version flags are sufficient for now. + tags = ["skip_on_windows"], ) envoy_sh_test( @@ -26,13 +27,12 @@ envoy_sh_test( srcs = ["envoy_static_test.sh"], coverage = False, data = ["//source/exe:envoy-static"], - # For windows, we expect to use a .ps1 script that leverages dumpbin.exe, see: + # TODO(Windows): expect to test to leverage dumpbin.exe to confirm we avoid msvcrt, see # https://github.com/envoyproxy/envoy/pull/8280#pullrequestreview-290187328 - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 # Sanitizers doesn't like statically linked lib(std)c++ and libgcc, skip this test in that context. tags = [ - "fails_on_windows", "no_san", + "skip_on_windows", ], ) @@ -57,8 +57,6 @@ envoy_sh_test( "//bazel:raw_build_id.ldscript", "//source/exe:envoy-static", ], - # The sh_test helper from Bazel does not work as expected, see: https://github.com/bazelbuild/bazel/issues/10959 - tags = ["fails_on_windows"], ) envoy_cc_test( From 5118616e7e6ab7343432ed55138c56e2263f6799 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 4 Jun 2020 20:30:10 -0700 Subject: [PATCH 298/909] check_format: don't error if clang-format-9 is in PATH under home (#11449) Signed-off-by: Greg Greenway --- tools/code_format/check_format.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 295e969661d0..2dc4ed979f6d 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -214,7 +214,7 @@ def readFile(path): # environment variable. If it cannot be found, empty string is returned. def lookPath(executable): for path_dir in os.environ["PATH"].split(os.pathsep): - executable_path = os.path.join(path_dir, executable) + executable_path = os.path.expanduser(os.path.join(path_dir, executable)) if os.path.exists(executable_path): return executable_path return "" From 8ad0884d0ecc4c5e6e0671b7727b1e5abad47653 Mon Sep 17 00:00:00 2001 From: Seiji Date: Fri, 5 Jun 2020 00:30:59 -0300 Subject: [PATCH 299/909] docs: update disable_circuit_breaking.rst (#11451) https://github.com/envoyproxy/envoy/issues/11445 Signed-off-by: Seiji --- .../load_balancing/disable_circuit_breaking.rst | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/docs/root/faq/load_balancing/disable_circuit_breaking.rst b/docs/root/faq/load_balancing/disable_circuit_breaking.rst index dfc6180628c9..338c30caf0c7 100644 --- a/docs/root/faq/load_balancing/disable_circuit_breaking.rst +++ b/docs/root/faq/load_balancing/disable_circuit_breaking.rst @@ -15,8 +15,15 @@ of circuit breaking by setting the thresholds to a value of `1000000000`. circuit_breakers: thresholds: - priority: HIGH - max_connections: 1000000000 - max_pending_requests: 1000000000 - max_requests: 1000000000 - max_retries: 1000000000 + - priority: DEFAULT + max_connections: 1000000000 + max_pending_requests: 1000000000 + max_requests: 1000000000 + max_retries: 1000000000 + - priority: HIGH + max_connections: 1000000000 + max_pending_requests: 1000000000 + max_requests: 1000000000 + max_retries: 1000000000 + +Envoy supports priority routing at the route level. You may adjust the thresholds accordingly. From 67376d984e5c252d9934f33a32df65ab99f5258a Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Fri, 5 Jun 2020 16:34:20 +0200 Subject: [PATCH 300/909] Validate deprecated fields via validation visitor (#10853) This change intends to unify field deprecation and unknown field handling behind the validation visitor. So far validation of unknown fields was done through validation visitor and validation of deprecated fields was performed inline. Risk Level:Low Fixes #8092 Signed-off-by: Kateryna Nezdolii --- include/envoy/protobuf/BUILD | 4 +- include/envoy/protobuf/message_validator.h | 21 +++++++++- source/common/protobuf/BUILD | 1 + .../common/protobuf/message_validator_impl.cc | 39 +++++++++++++++---- .../common/protobuf/message_validator_impl.h | 14 ++++--- source/common/protobuf/utility.cc | 30 +++++++------- source/server/server.cc | 4 +- .../http/conn_manager_impl_fuzz_test.cc | 3 ++ .../protobuf/message_validator_impl_test.cc | 10 ++--- test/common/protobuf/utility_test.cc | 14 ++++--- test/mocks/protobuf/BUILD | 4 +- test/mocks/protobuf/mocks.h | 1 + test/server/listener_manager_impl_test.h | 5 +++ 13 files changed, 104 insertions(+), 46 deletions(-) diff --git a/include/envoy/protobuf/BUILD b/include/envoy/protobuf/BUILD index 6510c566d103..76eff507352d 100644 --- a/include/envoy/protobuf/BUILD +++ b/include/envoy/protobuf/BUILD @@ -11,5 +11,7 @@ envoy_package() envoy_cc_library( name = "message_validator_interface", hdrs = ["message_validator.h"], - deps = ["//source/common/protobuf"], + deps = [ + "//source/common/protobuf", + ], ) diff --git a/include/envoy/protobuf/message_validator.h b/include/envoy/protobuf/message_validator.h index ddd6d14d3da2..8ec4dccb46e3 100644 --- a/include/envoy/protobuf/message_validator.h +++ b/include/envoy/protobuf/message_validator.h @@ -12,13 +12,22 @@ namespace ProtobufMessage { /** * Exception class for reporting validation errors due to the presence of unknown - * fields in a protobuf + * fields in a protobuf. */ class UnknownProtoFieldException : public EnvoyException { public: UnknownProtoFieldException(const std::string& message) : EnvoyException(message) {} }; +/** + * Exception class for reporting validation errors due to the presence of deprecated + * fields in a protobuf. + */ +class DeprecatedProtoFieldException : public EnvoyException { +public: + DeprecatedProtoFieldException(const std::string& message) : EnvoyException(message) {} +}; + /** * Visitor interface for a Protobuf::Message. The methods of ValidationVisitor are invoked to * perform validation based on events encountered during or after the parsing of proto binary @@ -30,7 +39,7 @@ class ValidationVisitor { /** * Invoked when an unknown field is encountered. - * @param description human readable description of the field + * @param description human readable description of the field. */ virtual void onUnknownField(absl::string_view description) PURE; @@ -39,6 +48,14 @@ class ValidationVisitor { * possible. **/ virtual bool skipValidation() PURE; + + /** + * Invoked when deprecated field is encountered. + * @param description human readable description of the field. + * @param soft_deprecation is set to true, visitor would log a warning message, otherwise would + * throw an exception. + */ + virtual void onDeprecatedField(absl::string_view description, bool soft_deprecation) PURE; }; class ValidationContext { diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index 20ead1f753c6..f505161b810f 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -34,6 +34,7 @@ envoy_cc_library( deps = [ "//include/envoy/protobuf:message_validator_interface", "//include/envoy/stats:stats_interface", + "//source/common/common:documentation_url_lib", "//source/common/common:hash_lib", "//source/common/common:logger_lib", "//source/common/common:macros", diff --git a/source/common/protobuf/message_validator_impl.cc b/source/common/protobuf/message_validator_impl.cc index 9b164a925da5..c486f9d0d4ec 100644 --- a/source/common/protobuf/message_validator_impl.cc +++ b/source/common/protobuf/message_validator_impl.cc @@ -11,10 +11,24 @@ namespace Envoy { namespace ProtobufMessage { -void WarningValidationVisitorImpl::setCounter(Stats::Counter& counter) { - ASSERT(counter_ == nullptr); - counter_ = &counter; - counter.add(prestats_count_); +namespace { +const char deprecation_error[] = " If continued use of this field is absolutely necessary, " + "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for " + "how to apply a temporary and highly discouraged override."; + +void onDeprecatedFieldCommon(absl::string_view description, bool soft_deprecation) { + if (soft_deprecation) { + ENVOY_LOG_MISC(warn, "Deprecated field: {}", absl::StrCat(description, deprecation_error)); + } else { + throw DeprecatedProtoFieldException(absl::StrCat(description, deprecation_error)); + } +} +} // namespace + +void WarningValidationVisitorImpl::setUnknownCounter(Stats::Counter& counter) { + ASSERT(unknown_counter_ == nullptr); + unknown_counter_ = &counter; + counter.add(prestats_unknown_count_); } void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) { @@ -24,20 +38,31 @@ void WarningValidationVisitorImpl::onUnknownField(absl::string_view description) if (!it.second) { return; } + // It's a new field, log and bump stat. ENVOY_LOG(warn, "Unknown field: {}", description); - if (counter_ == nullptr) { - ++prestats_count_; + if (unknown_counter_ == nullptr) { + ++prestats_unknown_count_; } else { - counter_->inc(); + unknown_counter_->inc(); } } +void WarningValidationVisitorImpl::onDeprecatedField(absl::string_view description, + bool soft_deprecation) { + onDeprecatedFieldCommon(description, soft_deprecation); +} + void StrictValidationVisitorImpl::onUnknownField(absl::string_view description) { throw UnknownProtoFieldException( absl::StrCat("Protobuf message (", description, ") has unknown fields")); } +void StrictValidationVisitorImpl::onDeprecatedField(absl::string_view description, + bool soft_deprecation) { + onDeprecatedFieldCommon(description, soft_deprecation); +} + ValidationVisitor& getNullValidationVisitor() { MUTABLE_CONSTRUCT_ON_FIRST_USE(NullValidationVisitorImpl); } diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index 0ba98161ec5d..e2f49ce9dec7 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -3,6 +3,7 @@ #include "envoy/protobuf/message_validator.h" #include "envoy/stats/stats.h" +#include "common/common/documentation_url.h" #include "common/common/logger.h" #include "absl/container/flat_hash_set.h" @@ -14,6 +15,7 @@ class NullValidationVisitorImpl : public ValidationVisitor { public: // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view) override {} + void onDeprecatedField(absl::string_view, bool) override {} // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return true; } @@ -24,10 +26,11 @@ ValidationVisitor& getNullValidationVisitor(); class WarningValidationVisitorImpl : public ValidationVisitor, public Logger::Loggable { public: - void setCounter(Stats::Counter& counter); + void setUnknownCounter(Stats::Counter& counter); // Envoy::ProtobufMessage::ValidationVisitor void onUnknownField(absl::string_view description) override; + void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return false; } @@ -36,10 +39,10 @@ class WarningValidationVisitorImpl : public ValidationVisitor, // Track hashes of descriptions we've seen, to avoid log spam. A hash is used here to avoid // wasting memory with unused strings. absl::flat_hash_set descriptions_; - // This can be late initialized via setCounter(), enabling the server bootstrap loading which - // occurs prior to the initialization of the stats subsystem. - Stats::Counter* counter_{}; - uint64_t prestats_count_{}; + // This can be late initialized via setUnknownCounter(), enabling the server bootstrap loading + // which occurs prior to the initialization of the stats subsystem. + Stats::Counter* unknown_counter_{}; + uint64_t prestats_unknown_count_{}; }; class StrictValidationVisitorImpl : public ValidationVisitor { @@ -49,6 +52,7 @@ class StrictValidationVisitorImpl : public ValidationVisitor { // Envoy::ProtobufMessage::ValidationVisitor bool skipValidation() override { return false; } + void onDeprecatedField(absl::string_view description, bool soft_deprecation) override; }; ValidationVisitor& getStrictValidationVisitor(); diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 048da4098382..8bd7ed67c41e 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -165,7 +165,8 @@ void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { // otherwise fatal field. Throws a warning on use of a fatal by default field. void deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_deprecated, bool proto_annotated_as_disallowed, const std::string& feature_name, - std::string error, const Protobuf::Message& message) { + std::string error, const Protobuf::Message& message, + ProtobufMessage::ValidationVisitor& validation_visitor) { // This option is for Envoy builds with --define deprecated_features=disabled // The build options CI then verifies that as Envoy developers deprecate fields, // that they update canonical configs and unit tests to not use those deprecated @@ -196,14 +197,9 @@ void deprecatedFieldHelper(Runtime::Loader* runtime, bool proto_annotated_as_dep std::string with_overridden = fmt::format( error, (runtime_overridden ? "runtime overrides to continue using now fatal-by-default " : "")); - if (warn_only) { - ENVOY_LOG_MISC(warn, "{}", with_overridden); - } else { - const char fatal_error[] = " If continued use of this field is absolutely necessary, " - "see " ENVOY_DOC_URL_RUNTIME_OVERRIDE_DEPRECATED " for how " - "to apply a temporary and highly discouraged override."; - throw ProtoValidationException(with_overridden + fatal_error, message); - } + + validation_visitor.onDeprecatedField("type " + message.GetTypeName() + " " + with_overridden, + warn_only); } } // namespace @@ -386,11 +382,10 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa namespace { -void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, - absl::string_view filename, - const Protobuf::FieldDescriptor* field, - const Protobuf::Reflection* reflection, - Runtime::Loader* runtime) { +void checkForDeprecatedNonRepeatedEnumValue( + const Protobuf::Message& message, absl::string_view filename, + const Protobuf::FieldDescriptor* field, const Protobuf::Reflection* reflection, + Runtime::Loader* runtime, ProtobufMessage::ValidationVisitor& validation_visitor) { // Repeated fields will be handled by recursion in checkForUnexpectedFields. if (field->is_repeated() || field->cpp_type() != Protobuf::FieldDescriptor::CPPTYPE_ENUM) { return; @@ -413,7 +408,7 @@ void checkForDeprecatedNonRepeatedEnumValue(const Protobuf::Message& message, runtime, true /*deprecated*/, enum_value_descriptor->options().GetExtension(envoy::annotations::disallowed_by_default_enum), absl::StrCat("envoy.deprecated_features:", enum_value_descriptor->full_name()), error, - message); + message, validation_visitor); } class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { @@ -429,7 +424,8 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // Before we check to see if the field is in use, see if there's a // deprecated default enum value. - checkForDeprecatedNonRepeatedEnumValue(message, filename, &field, reflection, runtime_); + checkForDeprecatedNonRepeatedEnumValue(message, filename, &field, reflection, runtime_, + validation_visitor_); // If this field is not in use, continue. if ((field.is_repeated() && reflection->FieldSize(message, &field) == 0) || @@ -447,7 +443,7 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { deprecatedFieldHelper(runtime_, true /*deprecated*/, field.options().GetExtension(envoy::annotations::disallowed_by_default), absl::StrCat("envoy.deprecated_features:", field.full_name()), warning, - message); + message, validation_visitor_); } return nullptr; } diff --git a/source/server/server.cc b/source/server/server.cc index c3f0c35e683f..246aac405ffe 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -320,9 +320,9 @@ void InstanceImpl::initialize(const Options& options, ServerStats{ALL_SERVER_STATS(POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_stats_prefix), POOL_HISTOGRAM_PREFIX(stats_store_, server_stats_prefix))}); - validation_context_.static_warning_validation_visitor().setCounter( + validation_context_.static_warning_validation_visitor().setUnknownCounter( server_stats_->static_unknown_fields_); - validation_context_.dynamic_warning_validation_visitor().setCounter( + validation_context_.dynamic_warning_validation_visitor().setUnknownCounter( server_stats_->dynamic_unknown_fields_); initialization_timer_ = std::make_unique( diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 61e496f85c54..37079704bb85 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -490,6 +490,9 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { } catch (const ProtoValidationException& e) { ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); return; + } catch (const Envoy::ProtobufMessage::DeprecatedProtoFieldException& e) { + ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); + return; } FuzzConfig config; diff --git a/test/common/protobuf/message_validator_impl_test.cc b/test/common/protobuf/message_validator_impl_test.cc index 7110a1432537..d558799cc440 100644 --- a/test/common/protobuf/message_validator_impl_test.cc +++ b/test/common/protobuf/message_validator_impl_test.cc @@ -23,7 +23,7 @@ TEST(NullValidationVisitorImpl, UnknownField) { // The warning validation visitor logs and bumps stats on unknown fields TEST(WarningValidationVisitorImpl, UnknownField) { Stats::TestUtil::TestStore stats; - Stats::Counter& counter = stats.counter("counter"); + Stats::Counter& unknown_counter = stats.counter("counter"); WarningValidationVisitorImpl warning_validation_visitor; // we want to be executed. EXPECT_FALSE(warning_validation_visitor.skipValidation()); @@ -37,13 +37,13 @@ TEST(WarningValidationVisitorImpl, UnknownField) { EXPECT_LOG_CONTAINS("warn", "Unknown field: bar", warning_validation_visitor.onUnknownField("bar")); // When we set the stats counter, the above increments are transferred. - EXPECT_EQ(0, counter.value()); - warning_validation_visitor.setCounter(counter); - EXPECT_EQ(2, counter.value()); + EXPECT_EQ(0, unknown_counter.value()); + warning_validation_visitor.setUnknownCounter(unknown_counter); + EXPECT_EQ(2, unknown_counter.value()); // A third unknown field is tracked in stats post-initialization. EXPECT_LOG_CONTAINS("warn", "Unknown field: baz", warning_validation_visitor.onUnknownField("baz")); - EXPECT_EQ(3, counter.value()); + EXPECT_EQ(3, unknown_counter.value()); } // The strict validation visitor throws on unknown fields. diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index ae832053dcab..f7cee00ca4a4 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1498,7 +1498,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); } @@ -1509,7 +1509,7 @@ TEST_P(DeprecatedFieldsTest, // Make sure this is set up right. EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); // The config will be rejected, so the feature will not be used. EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); @@ -1542,7 +1542,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { {{"envoy.deprecated_features:envoy.test.deprecation_test.Base.is_deprecated", " false"}}); EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'"); EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } @@ -1557,7 +1557,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { EXPECT_LOG_CONTAINS( "warning", "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", { EXPECT_THROW_WITH_REGEX( - checkForDeprecation(base), ProtoValidationException, + checkForDeprecation(base), Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); }); } @@ -1650,7 +1650,8 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault) {{"envoy.deprecated_features:envoy.test.deprecation_test.Base.DEPRECATED_DEFAULT", "false"}}); // Make sure this is set up right. - EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), ProtoValidationException, + EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), + Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using the default now-deprecated value DEPRECATED_DEFAULT"); } @@ -1659,7 +1660,8 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container()->set_deprecated_enum( envoy::test::deprecation_test::Base::DEPRECATED_FATAL); - EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), ProtoValidationException, + EXPECT_THROW_WITH_REGEX(checkForDeprecation(base), + Envoy::ProtobufMessage::DeprecatedProtoFieldException, "Using deprecated value DEPRECATED_FATAL"); Runtime::LoaderSingleton::getExisting()->mergeValues( diff --git a/test/mocks/protobuf/BUILD b/test/mocks/protobuf/BUILD index 2db80c5e3173..67b4c15cd644 100644 --- a/test/mocks/protobuf/BUILD +++ b/test/mocks/protobuf/BUILD @@ -12,5 +12,7 @@ envoy_cc_mock( name = "protobuf_mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], - deps = ["//include/envoy/protobuf:message_validator_interface"], + deps = [ + "//include/envoy/protobuf:message_validator_interface", + ], ) diff --git a/test/mocks/protobuf/mocks.h b/test/mocks/protobuf/mocks.h index 5170f1ba1228..3e61b31fed12 100644 --- a/test/mocks/protobuf/mocks.h +++ b/test/mocks/protobuf/mocks.h @@ -13,6 +13,7 @@ class MockValidationVisitor : public ValidationVisitor { ~MockValidationVisitor() override; MOCK_METHOD(void, onUnknownField, (absl::string_view)); + MOCK_METHOD(void, onDeprecatedField, (absl::string_view, bool)); bool skipValidation() override { return skip_validation_; } diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 0790b8faa4f7..bd979c98ed14 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -53,6 +53,10 @@ class ListenerManagerImplTest : public testing::Test { void SetUp() override { ON_CALL(server_, api()).WillByDefault(ReturnRef(*api_)); EXPECT_CALL(worker_factory_, createWorker_()).WillOnce(Return(worker_)); + ON_CALL(server_.validation_context_, staticValidationVisitor()) + .WillByDefault(ReturnRef(validation_visitor)); + ON_CALL(server_.validation_context_, dynamicValidationVisitor()) + .WillByDefault(ReturnRef(validation_visitor)); manager_ = std::make_unique(server_, listener_factory_, worker_factory_, enable_dispatcher_stats_); @@ -276,6 +280,7 @@ class ListenerManagerImplTest : public testing::Test { Api::OsSysCallsImpl os_sys_calls_actual_; NiceMock server_; NiceMock listener_factory_; + NiceMock validation_visitor; MockWorker* worker_ = new MockWorker(); NiceMock worker_factory_; std::unique_ptr manager_; From 591fdf8ac68658092dfa4ffa5d72632c01c10f23 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Fri, 5 Jun 2020 11:51:07 -0400 Subject: [PATCH 301/909] test: Use non throwing Http::Connection::dispatch method in H/2 codec unit test (#11419) Use non throwing Http::Connection::dispatch method in H/2 codec unit test. This change transitions codec unit test to using codec's non throwing public API only and is a prerequisite to removing exceptions from the codec. Signed-off-by: Yan Avlasov --- test/common/http/http2/codec_impl_test.cc | 93 +++++++++++++---------- 1 file changed, 53 insertions(+), 40 deletions(-) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index cce3fca74316..ef2944cf3ec0 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -45,6 +45,32 @@ namespace CommonUtility = ::Envoy::Http2::Utility; class Http2CodecImplTestFixture { public: + // The Http::Connection::dispatch method does not throw (any more). However unit tests in this + // file use codecs for sending test data through mock network connections to the codec under test. + // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under + // test, through mock connections and sending codec. As a result error returned by the dispatch + // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note + // that exception goes only through the mock network connection and sending codec, i.e. it is + // thrown only through the test harness code. Specific exception types are to distinguish error + // codes returned when processing requests or responses. + // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec + // under test through the ON_CALL expectations in the + // setupDefaultConnectionMocks() method. This will make the exceptions below + // unnecessary. + struct ClientCodecError : public std::runtime_error { + ClientCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + + struct ServerCodecError : public std::runtime_error { + ServerCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + struct ConnectionWrapper { Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { Http::Status status = Http::okStatus(); @@ -63,26 +89,6 @@ class Http2CodecImplTestFixture { return status; } - // TODO(#10878): This test uses the innerDispatch which may throw exceptions while - // exception removal is in progress. Tests override MockConnection's write with this - // method. Connection::write can be called while dispatching data in a codec callback, or - // outside a dispatching context (for example, in RequestEncoder::encodeHeaders) where they are - // not caught like in Connection::dispatch. In practice, these would never be triggered since - // these inputs would fail parsing on ingress. - // This should be removed, and the throws that are expected outside of dispatching context - // should be replaced with error handling. - void innerDispatch(const Buffer::Instance& data, ConnectionImpl& connection) { - Http::Status status; - buffer_.add(data); - if (!dispatching_) { - while (buffer_.length() > 0) { - dispatching_ = true; - status = connection.innerDispatch(buffer_); - dispatching_ = false; - } - } - } - bool dispatching_{}; Buffer::OwnedImpl buffer_; }; @@ -126,11 +132,17 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - server_wrapper_.innerDispatch(data, *server_); + auto status = server_wrapper_.dispatch(data, *server_); + if (!status.ok()) { + throw ServerCodecError(std::move(status)); + } })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.innerDispatch(data, *client_); + auto status = client_wrapper_.dispatch(data, *client_); + if (!status.ok()) { + throw ClientCodecError(std::move(status)); + } })); } @@ -340,7 +352,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { request_encoder_->encodeHeaders(request_headers, true); TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); } @@ -386,7 +398,7 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); response_encoder_->encode100ContinueHeaders(continue_headers); - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), CodecProtocolException); + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -440,7 +452,7 @@ TEST_P(Http2CodecImplTest, Invalid103) { response_encoder_->encodeHeaders(early_hint_headers, false); EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), - CodecProtocolException, "Unexpected 'trailers' with no end stream."); + ClientCodecError, "Unexpected 'trailers' with no end stream."); EXPECT_EQ(1, stats_store_.counter("http2.too_many_header_frames").value()); } @@ -461,7 +473,7 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { response_headers.addCopy(std::to_string(i), std::to_string(i)); } - EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), CodecProtocolException); + EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -523,8 +535,7 @@ TEST_P(Http2CodecImplTest, RefusedStreamReset) { TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { initialize(); - EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), - CodecProtocolException); + EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError); EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); } @@ -681,7 +692,7 @@ TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { metadata_map_vector.push_back(std::move(metadata_map_ptr)); corrupt_metadata_frame_ = true; - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, "The user callback function failed"); } @@ -1540,7 +1551,7 @@ TEST_P(Http2CodecImplTest, PingFlood) { buffer.move(frame); })); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); } @@ -1606,7 +1617,7 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); } // Verify that codec detects flood of outbound HEADER frames @@ -1633,7 +1644,7 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); @@ -1666,7 +1677,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); @@ -1740,7 +1751,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { // Presently flood mitigation is done only when processing downstream data // So we need to send a frame from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); } // Verify that control frames are added to the counter of outbound frames of all types. @@ -1769,7 +1780,7 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { } // Send one PING frame above the outbound queue size limit EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); @@ -1777,7 +1788,7 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); } TEST_P(Http2CodecImplTest, PriorityFloodOverride) { @@ -1789,7 +1800,7 @@ TEST_P(Http2CodecImplTest, PriorityFloodOverride) { TEST_P(Http2CodecImplTest, WindowUpdateFlood) { windowUpdateFlood(); - EXPECT_THROW(client_->sendPendingFrames(), FrameFloodException); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); } TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { @@ -1802,7 +1813,9 @@ TEST_P(Http2CodecImplTest, EmptyDataFlood) { Buffer::OwnedImpl data; emptyDataFlood(data); EXPECT_CALL(request_decoder_, decodeData(_, false)); - EXPECT_THROW(server_wrapper_.innerDispatch(data, *server_), FrameFloodException); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_TRUE(isBufferFloodError(status)); } TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { @@ -1933,11 +1946,11 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - server_wrapper_.innerDispatch(data, *server_); + ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - client_wrapper_.innerDispatch(data, *client_); + ASSERT_TRUE(client_wrapper_.dispatch(data, *client_).ok()); })); } From 702a3648273af44408e93ab5e584e2b7e5261f6b Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 5 Jun 2020 10:47:04 -0700 Subject: [PATCH 302/909] test: use specific header map types in all tests (#11454) In a forthcoming change we will be dropping the default map. Signed-off-by: Matt Klein --- test/common/grpc/common_test.cc | 8 +- test/common/grpc/context_impl_test.cc | 2 +- .../grpc/grpc_client_integration_test.cc | 13 +- .../grpc_client_integration_test_harness.h | 13 +- test/common/http/async_client_impl_test.cc | 12 +- test/common/http/date_provider_impl_test.cc | 2 +- test/common/http/header_utility_test.cc | 103 ++++++----- test/common/http/http1/codec_impl_test.cc | 94 +++++----- test/common/http/http2/frame_replay_test.cc | 4 +- test/common/http/path_utility_test.cc | 5 +- test/common/http/utility_test.cc | 34 ++-- test/common/router/config_impl_test.cc | 12 +- test/common/router/header_formatter_test.cc | 22 ++- test/common/router/header_parser_fuzz_test.cc | 2 +- test/common/router/rds_impl_test.cc | 8 +- test/common/router/router_ratelimit_test.cc | 20 +- test/common/router/router_test.cc | 4 +- test/common/router/scoped_config_impl_test.cc | 72 ++++---- test/common/router/scoped_rds_test.cc | 42 ++--- .../stream_info/stream_info_impl_test.cc | 6 +- .../ext_authz/check_request_utils_test.cc | 4 +- .../ext_authz/ext_authz_grpc_impl_test.cc | 10 +- .../ext_authz/ext_authz_http_impl_test.cc | 2 +- .../common/ratelimit/ratelimit_impl_test.cc | 4 +- .../filters/common/rbac/engine_impl_test.cc | 19 +- .../filters/common/rbac/matchers_test.cc | 24 +-- .../compressor_filter_integration_test.cc | 8 +- .../http/cors/cors_filter_integration_test.cc | 36 ++-- .../decompressor/decompressor_filter_test.cc | 60 +++--- .../http/dynamo/dynamo_request_parser_test.cc | 9 +- .../filters/http/ext_authz/ext_authz_test.cc | 4 +- .../fault/fault_filter_integration_test.cc | 4 +- .../json_transcoder_filter_test.cc | 13 +- .../http/gzip/gzip_filter_integration_test.cc | 8 +- .../header_to_metadata_filter_test.cc | 8 +- .../http/ip_tagging/ip_tagging_filter_test.cc | 7 +- .../filters/http/lua/lua_filter_test.cc | 58 +++--- .../filters/http/lua/wrappers_test.cc | 20 +- .../http/on_demand/on_demand_filter_test.cc | 6 +- .../ratelimit/ratelimit_integration_test.cc | 6 +- .../filters/http/ratelimit/ratelimit_test.cc | 14 +- .../http/router/auto_sni_integration_test.cc | 4 +- .../filters/ratelimit/ratelimit_test.cc | 2 +- .../header_transport_impl_test.cc | 5 +- .../twitter_protocol_impl_test.cc | 24 +-- .../aws_iam/aws_iam_grpc_credentials_test.cc | 2 +- ...le_based_metadata_grpc_credentials_test.cc | 2 +- .../stats_sinks/hystrix/hystrix_test.cc | 9 +- test/extensions/tracers/xray/tracer_test.cc | 4 +- .../tls/integration/ssl_integration_test.cc | 4 +- .../api_listener_integration_test.cc | 2 +- test/integration/autonomous_upstream.cc | 9 +- test/integration/autonomous_upstream.h | 6 +- test/integration/fake_upstream.cc | 4 +- test/integration/header_integration_test.cc | 173 +++++++++--------- .../http2_upstream_integration_test.cc | 6 +- test/integration/http_integration.cc | 2 +- .../http_timeout_integration_test.cc | 2 +- test/integration/integration_test.cc | 17 +- .../listener_lds_integration_test.cc | 6 +- test/integration/redirect_integration_test.cc | 2 +- .../scoped_rds_integration_test.cc | 8 +- test/integration/utility.cc | 2 +- test/mocks/http/mocks.h | 8 +- test/mocks/http/mocks_test.cc | 48 ++--- test/server/admin/admin_test.cc | 26 +-- test/server/admin/logs_handler_test.cc | 2 +- test/server/admin/profiling_handler_test.cc | 8 +- test/server/admin/runtime_handler_test.cc | 8 +- test/server/admin/server_info_handler_test.cc | 18 +- test/server/admin/stats_handler_test.cc | 8 +- test/test_common/utility_test.cc | 13 +- 72 files changed, 627 insertions(+), 609 deletions(-) diff --git a/test/common/grpc/common_test.cc b/test/common/grpc/common_test.cc index 055c3358dd08..6847f159670e 100644 --- a/test/common/grpc/common_test.cc +++ b/test/common/grpc/common_test.cc @@ -104,20 +104,20 @@ TEST(GrpcContextTest, GetGrpcTimeout) { } TEST(GrpcCommonTest, GrpcStatusDetailsBin) { - Http::TestHeaderMapImpl empty_trailers; + Http::TestResponseTrailerMapImpl empty_trailers; EXPECT_FALSE(Common::getGrpcStatusDetailsBin(empty_trailers)); - Http::TestHeaderMapImpl invalid_value{{"grpc-status-details-bin", "invalid"}}; + Http::TestResponseTrailerMapImpl invalid_value{{"grpc-status-details-bin", "invalid"}}; EXPECT_FALSE(Common::getGrpcStatusDetailsBin(invalid_value)); - Http::TestHeaderMapImpl unpadded_value{ + Http::TestResponseTrailerMapImpl unpadded_value{ {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA"}}; auto status = Common::getGrpcStatusDetailsBin(unpadded_value); ASSERT_TRUE(status); EXPECT_EQ(Status::WellKnownGrpcStatus::NotFound, status->code()); EXPECT_EQ("Resource not found", status->message()); - Http::TestHeaderMapImpl padded_value{ + Http::TestResponseTrailerMapImpl padded_value{ {"grpc-status-details-bin", "CAUSElJlc291cmNlIG5vdCBmb3VuZA=="}}; status = Common::getGrpcStatusDetailsBin(padded_value); ASSERT_TRUE(status); diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index d412dd87920f..ec8e340770c1 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -65,7 +65,7 @@ TEST(GrpcContextTest, ChargeStats) { TEST(GrpcContextTest, ResolveServiceAndMethod) { std::string service; std::string method; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; headers.setPath("/service_name/method_name?a=b"); const Http::HeaderEntry* path = headers.Path(); Stats::TestSymbolTable symbol_table; diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index 51bcf4ad319d..dadfc3e7c684 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -76,7 +76,8 @@ TEST_P(GrpcClientIntegrationTest, HttpNon200Status) { initialize(); for (const auto http_response_status : {400, 401, 403, 404, 429, 431}) { auto stream = createStream(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{{":status", std::to_string(http_response_status)}}; + const Http::TestResponseHeaderMapImpl reply_headers{ + {":status", std::to_string(http_response_status)}}; stream->expectInitialMetadata(empty_metadata_); stream->expectTrailingMetadata(empty_metadata_); // Technically this should be @@ -93,7 +94,7 @@ TEST_P(GrpcClientIntegrationTest, HttpNon200Status) { TEST_P(GrpcClientIntegrationTest, GrpcStatusFallback) { initialize(); auto stream = createStream(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{ + const Http::TestResponseHeaderMapImpl reply_headers{ {":status", "404"}, {"grpc-status", std::to_string(enumToInt(Status::WellKnownGrpcStatus::PermissionDenied))}, {"grpc-message", "error message"}}; @@ -189,7 +190,7 @@ TEST_P(GrpcClientIntegrationTest, OutOfRangeGrpcStatus) { EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); stream->expectGrpcStatus(Status::WellKnownGrpcStatus::InvalidCode); - const Http::TestHeaderMapImpl reply_trailers{{"grpc-status", std::to_string(0x1337)}}; + const Http::TestResponseTrailerMapImpl reply_trailers{{"grpc-status", std::to_string(0x1337)}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); } @@ -203,7 +204,7 @@ TEST_P(GrpcClientIntegrationTest, MissingGrpcStatus) { EXPECT_CALL(*stream, onReceiveTrailingMetadata_(_)).WillExitIfNeeded(); dispatcher_helper_.setStreamEventPending(); stream->expectGrpcStatus(Status::WellKnownGrpcStatus::Unknown); - const Http::TestHeaderMapImpl reply_trailers{{"some", "other header"}}; + const Http::TestResponseTrailerMapImpl reply_trailers{{"some", "other header"}}; stream->fake_stream_->encodeTrailers(reply_trailers); dispatcher_helper_.runDispatcher(); } @@ -304,7 +305,7 @@ TEST_P(GrpcClientIntegrationTest, StreamTrailersOnly) { TEST_P(GrpcClientIntegrationTest, RequestTrailersOnly) { initialize(); auto request = createRequest(empty_metadata_); - const Http::TestHeaderMapImpl reply_headers{{":status", "200"}, {"grpc-status", "0"}}; + const Http::TestResponseTrailerMapImpl reply_headers{{":status", "200"}, {"grpc-status", "0"}}; EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().GrpcStatusCode), Eq("0"))); EXPECT_CALL(*request->child_span_, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); @@ -412,7 +413,7 @@ class GrpcAccessTokenClientIntegrationTest : public GrpcSslClientIntegrationTest void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); if (!access_token_value_.empty()) { if (access_token_value_2_.empty()) { EXPECT_EQ("Bearer " + access_token_value_, stream_headers.get_("authorization")); diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index d466b5c886f8..15ccdf5c5d7b 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -112,7 +112,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks void expectInitialMetadata(const TestMetadata& metadata) { EXPECT_CALL(*this, onReceiveInitialMetadata_(_)) .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) { - Http::TestHeaderMapImpl stream_headers(received_headers); + Http::TestResponseHeaderMapImpl stream_headers(received_headers); for (const auto& value : metadata) { EXPECT_EQ(value.second, stream_headers.get_(value.first)); } @@ -124,7 +124,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks void expectTrailingMetadata(const TestMetadata& metadata) { EXPECT_CALL(*this, onReceiveTrailingMetadata_(_)) .WillOnce(Invoke([this, &metadata](const Http::HeaderMap& received_headers) { - Http::TestHeaderMapImpl stream_headers(received_headers); + Http::TestResponseTrailerMapImpl stream_headers(received_headers); for (auto& value : metadata) { EXPECT_EQ(value.second, stream_headers.get_(value.first)); } @@ -139,7 +139,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks reply_headers->addReference(value.first, value.second); } expectInitialMetadata(metadata); - fake_stream_->encodeHeaders(Http::TestHeaderMapImpl(*reply_headers), false); + fake_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl(*reply_headers), false); } void sendReply() { @@ -164,7 +164,8 @@ class HelloworldStream : public MockAsyncStreamCallbacks void sendServerTrailers(Status::GrpcStatus grpc_status, const std::string& grpc_message, const TestMetadata& metadata, bool trailers_only = false) { - Http::TestHeaderMapImpl reply_trailers{{"grpc-status", std::to_string(enumToInt(grpc_status))}}; + Http::TestResponseTrailerMapImpl reply_trailers{ + {"grpc-status", std::to_string(enumToInt(grpc_status))}}; if (!grpc_message.empty()) { reply_trailers.addCopy("grpc-message", grpc_message); } @@ -330,7 +331,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { void expectInitialHeaders(FakeStream& fake_stream, const TestMetadata& initial_metadata) { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - stream_headers_ = std::make_unique(fake_stream.headers()); + stream_headers_ = std::make_unique(fake_stream.headers()); EXPECT_EQ("POST", stream_headers_->get_(":method")); EXPECT_EQ("/helloworld.Greeter/SayHello", stream_headers_->get_(":path")); EXPECT_EQ("application/grpc", stream_headers_->get_("content-type")); @@ -434,7 +435,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { Stats::ScopeSharedPtr stats_scope_{stats_store_}; Grpc::StatNames google_grpc_stat_names_{stats_store_->symbolTable()}; TestMetadata service_wide_initial_metadata_; - std::unique_ptr stream_headers_; + std::unique_ptr stream_headers_; std::vector> channel_args_; #ifdef ENVOY_GOOGLE_GRPC std::unique_ptr google_tls_; diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 35a93450c732..dcce9db1d83e 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -149,7 +149,7 @@ TEST_F(AsyncClientImplTest, Basic) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -187,7 +187,7 @@ TEST_F(AsyncClientImplTracingTest, Basic) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -231,7 +231,7 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { return nullptr; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -284,7 +284,7 @@ TEST_F(AsyncClientImplTest, BasicHashPolicy) { return &cm_.conn_pool_; })); - TestHeaderMapImpl copy(message_->headers()); + TestRequestHeaderMapImpl copy(message_->headers()); copy.addCopy("x-envoy-internal", "true"); copy.addCopy("x-forwarded-for", "127.0.0.1"); copy.addCopy(":scheme", "http"); @@ -1103,7 +1103,7 @@ TEST_F(AsyncClientImplTest, StreamTimeout) { EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - TestHeaderMapImpl expected_timeout{ + TestRequestHeaderMapImpl expected_timeout{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), false)); EXPECT_CALL(stream_callbacks_, onData(_, true)); @@ -1138,7 +1138,7 @@ TEST_F(AsyncClientImplTest, StreamTimeoutHeadReply) { EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); - TestHeaderMapImpl expected_timeout{ + TestRequestHeaderMapImpl expected_timeout{ {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; EXPECT_CALL(stream_callbacks_, onHeaders_(HeaderMapEqualRef(&expected_timeout), true)); EXPECT_CALL(stream_callbacks_, onComplete()); diff --git a/test/common/http/date_provider_impl_test.cc b/test/common/http/date_provider_impl_test.cc index 304238232ad1..42312ad6af6e 100644 --- a/test/common/http/date_provider_impl_test.cc +++ b/test/common/http/date_provider_impl_test.cc @@ -22,7 +22,7 @@ TEST(DateProviderImplTest, All) { EXPECT_CALL(*timer, enableTimer(std::chrono::milliseconds(500), _)); TlsCachingDateProviderImpl provider(dispatcher, tls); - ResponseHeaderMapImpl headers; + TestResponseHeaderMapImpl headers; provider.setDateHeader(headers); EXPECT_NE(nullptr, headers.Date()); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 229b4e172bce..e28ec4fd17b9 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -30,7 +30,7 @@ class HeaderUtilityTest : public testing::Test { } return *headers_.Host(); } - RequestHeaderMapImpl headers_; + TestRequestHeaderMapImpl headers_; }; // Port's part from host header get removed @@ -188,7 +188,8 @@ invert_match: true } TEST(HeaderDataConstructorTest, GetAllOfHeader) { - TestHeaderMapImpl headers{{"foo", "val1"}, {"bar", "bar2"}, {"foo", "eep, bar"}, {"foo", ""}}; + TestRequestHeaderMapImpl headers{ + {"foo", "val1"}, {"bar", "bar2"}, {"foo", "eep, bar"}, {"foo", ""}}; std::vector foo_out; Http::HeaderUtility::getAllOfHeader(headers, "foo", foo_out); @@ -208,7 +209,7 @@ TEST(HeaderDataConstructorTest, GetAllOfHeader) { } TEST(MatchHeadersTest, MayMatchOneOrMoreRequestHeader) { - TestHeaderMapImpl headers{{"some-header", "a"}, {"other-header", "b"}}; + TestRequestHeaderMapImpl headers{{"some-header", "a"}, {"other-header", "b"}}; const std::string yaml = R"EOF( name: match-header @@ -227,13 +228,13 @@ regex_match: (a|b) } TEST(MatchHeadersTest, MustMatchAllHeaderData) { - TestHeaderMapImpl matching_headers_1{{"match-header-A", "1"}, {"match-header-B", "2"}}; - TestHeaderMapImpl matching_headers_2{ + TestRequestHeaderMapImpl matching_headers_1{{"match-header-A", "1"}, {"match-header-B", "2"}}; + TestRequestHeaderMapImpl matching_headers_2{ {"match-header-A", "3"}, {"match-header-B", "4"}, {"match-header-C", "5"}}; - TestHeaderMapImpl unmatching_headers_1{{"match-header-A", "6"}}; - TestHeaderMapImpl unmatching_headers_2{{"match-header-B", "7"}}; - TestHeaderMapImpl unmatching_headers_3{{"match-header-A", "8"}, {"match-header-C", "9"}}; - TestHeaderMapImpl unmatching_headers_4{{"match-header-C", "10"}, {"match-header-D", "11"}}; + TestRequestHeaderMapImpl unmatching_headers_1{{"match-header-A", "6"}}; + TestRequestHeaderMapImpl unmatching_headers_2{{"match-header-B", "7"}}; + TestRequestHeaderMapImpl unmatching_headers_3{{"match-header-A", "8"}, {"match-header-C", "9"}}; + TestRequestHeaderMapImpl unmatching_headers_4{{"match-header-C", "10"}, {"match-header-D", "11"}}; const std::string yamlA = R"EOF( name: match-header-A @@ -257,8 +258,8 @@ name: match-header-B } TEST(MatchHeadersTest, HeaderPresence) { - TestHeaderMapImpl matching_headers{{"match-header", "value"}}; - TestHeaderMapImpl unmatching_headers{{"other-header", "value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"other-header", "value"}}; const std::string yaml = R"EOF( name: match-header )EOF"; @@ -271,9 +272,9 @@ name: match-header } TEST(MatchHeadersTest, HeaderExactMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "match-value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "other-value"}, - {"other-header", "match-value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "match-value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "other-value"}, + {"other-header", "match-value"}}; const std::string yaml = R"EOF( name: match-header exact_match: match-value @@ -287,9 +288,9 @@ exact_match: match-value } TEST(MatchHeadersTest, HeaderExactMatchInverse) { - TestHeaderMapImpl matching_headers{{"match-header", "other-value"}, - {"other-header", "match-value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "match-value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "other-value"}, + {"other-header", "match-value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "match-value"}}; const std::string yaml = R"EOF( name: match-header @@ -305,8 +306,9 @@ invert_match: true } TEST(MatchHeadersTest, HeaderRegexMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, + {"match-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header regex_match: \d{3} @@ -320,8 +322,9 @@ regex_match: \d{3} } TEST(MatchHeadersTest, HeaderSafeRegexMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "1234"}, + {"match-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header safe_regex_match: @@ -337,8 +340,8 @@ name: match-header } TEST(MatchHeadersTest, HeaderRegexInverseMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "1234"}, {"match-header", "123.456"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; const std::string yaml = R"EOF( name: match-header @@ -354,11 +357,11 @@ invert_match: true } TEST(MatchHeadersTest, HeaderRangeMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "-1"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "0"}, - {"match-header", "somestring"}, - {"match-header", "10.9"}, - {"match-header", "-1somestring"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "-1"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "0"}, + {"match-header", "somestring"}, + {"match-header", "10.9"}, + {"match-header", "-1somestring"}}; const std::string yaml = R"EOF( name: match-header range_match: @@ -374,11 +377,11 @@ name: match-header } TEST(MatchHeadersTest, HeaderRangeInverseMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "0"}, - {"match-header", "somestring"}, - {"match-header", "10.9"}, - {"match-header", "-1somestring"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "-1"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "0"}, + {"match-header", "somestring"}, + {"match-header", "10.9"}, + {"match-header", "-1somestring"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "-1"}}; const std::string yaml = R"EOF( name: match-header @@ -396,9 +399,9 @@ invert_match: true } TEST(MatchHeadersTest, HeaderPresentMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123"}}; - TestHeaderMapImpl unmatching_headers{{"nonmatch-header", "1234"}, - {"other-nonmatch-header", "123.456"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"nonmatch-header", "1234"}, + {"other-nonmatch-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header @@ -413,9 +416,9 @@ present_match: true } TEST(MatchHeadersTest, HeaderPresentInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; - TestHeaderMapImpl matching_headers{{"nonmatch-header", "1234"}, - {"other-nonmatch-header", "123.456"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123"}}; + TestRequestHeaderMapImpl matching_headers{{"nonmatch-header", "1234"}, + {"other-nonmatch-header", "123.456"}}; const std::string yaml = R"EOF( name: match-header @@ -431,8 +434,8 @@ invert_match: true } TEST(MatchHeadersTest, HeaderPrefixMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "value123"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; const std::string yaml = R"EOF( name: match-header @@ -447,8 +450,8 @@ prefix_match: value } TEST(MatchHeadersTest, HeaderPrefixInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; - TestHeaderMapImpl matching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123value"}}; const std::string yaml = R"EOF( name: match-header @@ -464,8 +467,8 @@ invert_match: true } TEST(MatchHeadersTest, HeaderSuffixMatch) { - TestHeaderMapImpl matching_headers{{"match-header", "123value"}}; - TestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "value123"}}; const std::string yaml = R"EOF( name: match-header @@ -480,8 +483,8 @@ suffix_match: value } TEST(MatchHeadersTest, HeaderSuffixInverseMatch) { - TestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; - TestHeaderMapImpl matching_headers{{"match-header", "value123"}}; + TestRequestHeaderMapImpl unmatching_headers{{"match-header", "123value"}}; + TestRequestHeaderMapImpl matching_headers{{"match-header", "value123"}}; const std::string yaml = R"EOF( name: match-header @@ -539,14 +542,14 @@ TEST(HeaderIsValidTest, IsConnectResponse) { } TEST(HeaderAddTest, HeaderAdd) { - TestHeaderMapImpl headers{{"myheader1", "123value"}}; - TestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; + TestRequestHeaderMapImpl headers{{"myheader1", "123value"}}; + TestRequestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; HeaderUtility::addHeaders(headers, headers_to_add); headers_to_add.iterate( [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - TestHeaderMapImpl* headers = static_cast(context); + TestRequestHeaderMapImpl* headers = static_cast(context); Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; EXPECT_EQ(entry.value().getStringView(), headers->get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index f2dea33d03b0..b68fcce0cc76 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -79,7 +79,7 @@ class Http1ServerConnectionImplTest : public Http1CodecTestBase { Http::ServerConnectionPtr codec_; void expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, - TestHeaderMapImpl& expected_headers); + TestRequestHeaderMapImpl& expected_headers); void expect400(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, absl::string_view details = ""); void testRequestHeadersExceedLimit(std::string header_string, absl::string_view details = ""); @@ -90,14 +90,16 @@ class Http1ServerConnectionImplTest : public Http1CodecTestBase { // Send the request, and validate the received request headers. // Then send a response just to clean up. - void sendAndValidateRequestAndSendResponse(absl::string_view raw_request, - const TestHeaderMapImpl& expected_request_headers) { + void + sendAndValidateRequestAndSendResponse(absl::string_view raw_request, + const TestRequestHeaderMapImpl& expected_request_headers) { Buffer::OwnedImpl buffer(raw_request); sendAndValidateRequestAndSendResponse(buffer, expected_request_headers); } - void sendAndValidateRequestAndSendResponse(Buffer::Instance& buffer, - const TestHeaderMapImpl& expected_request_headers) { + void + sendAndValidateRequestAndSendResponse(Buffer::Instance& buffer, + const TestRequestHeaderMapImpl& expected_request_headers) { NiceMock decoder; Http::ResponseEncoder* response_encoder = nullptr; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -153,7 +155,7 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, - TestHeaderMapImpl& expected_headers) { + TestRequestHeaderMapImpl& expected_headers) { InSequence sequence; // Make a new 'codec' with the right settings @@ -302,7 +304,7 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {"Test", ""}, {"Hello", "World"}, {":path", "/"}, @@ -355,7 +357,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -386,7 +388,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -424,7 +426,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -453,7 +455,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "Chunked"}, @@ -480,7 +482,7 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, @@ -516,7 +518,8 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{ + {":authority", "host"}, {":path", "/"}, {":method", "GET"}}; // Regression test spaces before and after the host header value. sendAndValidateRequestAndSendResponse("GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); @@ -541,10 +544,10 @@ TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { // reads, but the important part is that the header value is split such that the pieces have // leading and trailing whitespace characters. const std::string header_value_with_inner_lws = "v" + std::string(32 * 1024, ' ') + "v"; - TestHeaderMapImpl expected_headers{{":authority", "host"}, - {":path", "/"}, - {":method", "GET"}, - {"header_field", header_value_with_inner_lws}}; + TestRequestHeaderMapImpl expected_headers{{":authority", "host"}, + {":path", "/"}, + {":method", "GET"}, + {"header_field", header_value_with_inner_lws}}; { // Regression test spaces in the middle are preserved @@ -574,7 +577,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); @@ -587,7 +590,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET / HTTP/1.0\r\n\r\n"); expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } @@ -595,7 +598,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foobar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foobar HTTP/1.0\r\n\r\n"); expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); @@ -630,7 +633,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { // Now send an HTTP/1.1 request and make sure the protocol is tracked correctly. { - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foobar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET /foobar HTTP/1.1\r\nHost: www.somewhere.com\r\n\r\n"); @@ -650,7 +653,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); @@ -659,7 +662,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); @@ -668,7 +671,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com:4532"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer( "GET http://www.somewhere.com:4532/foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); @@ -678,7 +681,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "bah"}, {":path", "/foo/bar"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET /foo/bar HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); @@ -723,7 +726,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); @@ -754,7 +757,7 @@ TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "bah"}, {":path", "http://www.somewhere.com/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nHost: bah\r\n\r\n"); expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); @@ -763,7 +766,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { TEST_F(Http1ServerConnectionImplTest, Http11Options) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "*"}, {":method", "OPTIONS"}}; Buffer::OwnedImpl buffer("OPTIONS * HTTP/1.1\r\nHost: www.somewhere.com\r\n\r\n"); expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); @@ -777,7 +780,7 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); @@ -929,7 +932,8 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{":authority", "hello"}, {":path", "/"}, {":method", "GET"}}; + TestRequestHeaderMapImpl expected_headers{ + {":authority", "hello"}, {":path", "/"}, {":method", "GET"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nHOST: hello\r\n\r\n"); @@ -993,7 +997,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "h.com"}, {":path", "/"}, {":method", "GET"}, @@ -1017,7 +1021,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "h.com"}, {":path", "/"}, {":method", "GET"}, @@ -1168,7 +1172,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; + TestRequestHeaderMapImpl expected_headers{ + {"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data1("12345"); @@ -1193,7 +1198,8 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{{"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; + TestRequestHeaderMapImpl expected_headers{ + {"content-length", "5"}, {":path", "/"}, {":method", "POST"}}; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data1("12345"); @@ -1543,7 +1549,7 @@ TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersT TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "www.somewhere.com"}, {":path", "/"}, {":method", "GET"}}; Buffer::OwnedImpl buffer( "GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " @@ -1554,10 +1560,10 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "Close"}}; + TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "Close"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " "Upgrade, Close, HTTP2-Settings\r\nUpgrade: h2c\r\nHTTP2-Settings: " "token64\r\nHost: bah\r\n\r\n"); @@ -1567,10 +1573,10 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); - TestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, - {":path", "/"}, - {":method", "GET"}, - {"connection", "Close"}}; + TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, + {":path", "/"}, + {":method", "GET"}, + {"connection", "Close"}}; Buffer::OwnedImpl buffer("GET http://www.somewhere.com/ HTTP/1.1\r\nConnection: " "Upgrade, Close, HTTP2-Settings, Etc\r\nUpgrade: h2c\r\nHTTP2-Settings: " "token64\r\nHost: bah\r\n\r\n"); @@ -1660,7 +1666,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - TestHeaderMapImpl expected_headers{ + TestRequestHeaderMapImpl expected_headers{ {":authority", "host:80"}, {":method", "CONNECT"}, }; diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index 5554f19bcc5f..c88458e10c7e 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -97,7 +97,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); - TestHeaderMapImpl expected_headers; + TestResponseHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); @@ -177,7 +177,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { EXPECT_TRUE(codec.write(WellKnownFrames::defaultSettingsFrame(), connection).ok()); EXPECT_TRUE(codec.write(WellKnownFrames::initialWindowUpdateFrame(), connection).ok()); - TestHeaderMapImpl expected_headers; + TestResponseHeaderMapImpl expected_headers; expected_headers.addCopy(":status", "200"); expected_headers.addCopy("compression", "test"); EXPECT_CALL(codec.response_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), true)); diff --git a/test/common/http/path_utility_test.cc b/test/common/http/path_utility_test.cc index 946c8a8131af..d7c639934136 100644 --- a/test/common/http/path_utility_test.cc +++ b/test/common/http/path_utility_test.cc @@ -1,9 +1,10 @@ #include #include -#include "common/http/header_map_impl.h" #include "common/http/path_utility.h" +#include "test/test_common/utility.h" + #include "gtest/gtest.h" namespace Envoy { @@ -22,7 +23,7 @@ class PathUtilityTest : public testing::Test { headers_.setHost(host_value); return *headers_.Host(); } - RequestHeaderMapImpl headers_; + TestRequestHeaderMapImpl headers_; }; // Already normalized path don't change. diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 469ced999962..70213c6feb67 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -398,7 +398,7 @@ TEST(HttpUtility, getLastAddressFromXFF) { } TEST(HttpUtility, TestParseCookie) { - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, {"cookie", "somekey=somevalue; someotherkey=someothervalue"}, {"cookie", "abc=def; token=abc123; Expires=Wed, 09 Jun 2021 10:18:14 GMT"}, @@ -410,10 +410,10 @@ TEST(HttpUtility, TestParseCookie) { } TEST(HttpUtility, TestParseCookieBadValues) { - TestHeaderMapImpl headers{{"cookie", "token1=abc123; = "}, - {"cookie", "token2=abc123; "}, - {"cookie", "; token3=abc123;"}, - {"cookie", "=; token4=\"abc123\""}}; + TestRequestHeaderMapImpl headers{{"cookie", "token1=abc123; = "}, + {"cookie", "token2=abc123; "}, + {"cookie", "; token3=abc123;"}, + {"cookie", "=; token4=\"abc123\""}}; EXPECT_EQ(Utility::parseCookieValue(headers, "token1"), "abc123"); EXPECT_EQ(Utility::parseCookieValue(headers, "token2"), "abc123"); @@ -422,7 +422,7 @@ TEST(HttpUtility, TestParseCookieBadValues) { } TEST(HttpUtility, TestParseCookieWithQuotes) { - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, {"cookie", "dquote=\"; quoteddquote=\"\"\""}, {"cookie", "leadingdquote=\"foobar;"}, @@ -827,7 +827,7 @@ TEST(HttpUtility, TestTeHeaderGzipTrailersSanitized) { // Expect that the set of headers is valid and can be sanitized EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -855,7 +855,7 @@ TEST(HttpUtility, TestNominatedConnectionHeader) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - TestHeaderMapImpl sanitized_headers = { + TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -883,7 +883,7 @@ TEST(HttpUtility, TestNominatedConnectionHeader2) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -910,7 +910,7 @@ TEST(HttpUtility, TestNominatedPseudoHeader) { }; // Headers remain unchanged since there are nominated pseudo headers - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -932,7 +932,7 @@ TEST(HttpUtility, TestSanitizeEmptyTokensFromHeaders) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -959,7 +959,7 @@ TEST(HttpUtility, TestTooManyNominatedHeaders) { }; // Headers remain unchanged because there are too many nominated headers - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -977,7 +977,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedFor) { }; // Headers remain unchanged due to nominated X-Forwarded* header - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -995,7 +995,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedHost) { }; // Headers remain unchanged due to nominated X-Forwarded* header - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); @@ -1015,7 +1015,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedProto) { // Headers are not sanitized due to nominated X-Forwarded* header EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1039,7 +1039,7 @@ TEST(HttpUtility, TestRejectTrailersSubString) { }; EXPECT_TRUE(Utility::sanitizeConnectionHeader(request_headers)); - Http::TestHeaderMapImpl sanitized_headers = { + Http::TestRequestHeaderMapImpl sanitized_headers = { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1077,7 +1077,7 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { }; // Headers remain unchanged because the TE value is too long - Http::TestHeaderMapImpl sanitized_headers(request_headers); + Http::TestRequestHeaderMapImpl sanitized_headers(request_headers); EXPECT_FALSE(Utility::sanitizeConnectionHeader(request_headers)); EXPECT_EQ(sanitized_headers, request_headers); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 741df2ede163..fa446a0d12f7 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -4016,7 +4016,7 @@ TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { * @brief Generate headers for testing * @param ssl set true to insert "x-forwarded-proto: https", else "x-forwarded-proto: http" * @param internal nullopt for no such "x-envoy-internal" header, or explicit "true/false" - * @return Http::TestHeaderMapImpl + * @return Http::TestRequestHeaderMapImpl */ static Http::TestRequestHeaderMapImpl genRedirectHeaders(const std::string& host, const std::string& path, bool ssl, @@ -4700,7 +4700,7 @@ TEST_F(RouteMatcherTest, WeightedClusters) { Http::TestResponseHeaderMapImpl response_headers; StreamInfo::MockStreamInfo stream_info; route_entry->finalizeResponseHeaders(response_headers, stream_info); - EXPECT_EQ(response_headers, Http::TestHeaderMapImpl{}); + EXPECT_EQ(response_headers, Http::TestResponseHeaderMapImpl{}); } // Weighted Cluster with no runtime, total weight = 10000 @@ -6799,10 +6799,10 @@ name: RetriableHeaders const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); ASSERT_EQ(2, retry_policy.retriableHeaders().size()); - Http::TestHeaderMapImpl expected_0{{":status", "500"}}; - Http::TestHeaderMapImpl unexpected_0{{":status", "200"}}; - Http::TestHeaderMapImpl expected_1{{"x-upstream-pushback", "bar"}}; - Http::TestHeaderMapImpl unexpected_1{{"x-test", "foo"}}; + Http::TestResponseHeaderMapImpl expected_0{{":status", "500"}}; + Http::TestResponseHeaderMapImpl unexpected_0{{":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_1{{"x-upstream-pushback", "bar"}}; + Http::TestResponseHeaderMapImpl unexpected_1{{"x-test", "foo"}}; EXPECT_TRUE(retry_policy.retriableHeaders()[0]->matchesHeaders(expected_0)); EXPECT_FALSE(retry_policy.retriableHeaders()[0]->matchesHeaders(unexpected_0)); diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 88586fcbbe9d..7f5f47b9dba6 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -848,7 +848,7 @@ TEST(HeaderParserTest, TestParseInternal) { new NiceMock()); ON_CALL(stream_info, upstreamHost()).WillByDefault(Return(host)); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; request_headers.addCopy(Http::LowerCaseString(std::string("x-request-id")), 123); ON_CALL(stream_info, getRequestHeaders()).WillByDefault(Return(&request_headers)); @@ -898,7 +898,7 @@ TEST(HeaderParserTest, TestParseInternal) { HeaderParserPtr req_header_parser = HeaderParser::configure(to_add); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; req_header_parser->evaluateHeaders(header_map, stream_info); std::string descriptor = fmt::format("for test case input: {}", test_case.input_); @@ -932,7 +932,7 @@ match: { prefix: "/new_endpoint" } HeaderParserPtr req_header_parser = HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); EXPECT_TRUE(header_map.has("x-client-ip")); @@ -954,7 +954,7 @@ match: { prefix: "/new_endpoint" } HeaderParserPtr req_header_parser = HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; std::shared_ptr> host( new NiceMock()); NiceMock stream_info; @@ -980,7 +980,7 @@ match: { prefix: "/new_endpoint" } HeaderParserPtr req_header_parser = HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}}; + Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); EXPECT_TRUE(header_map.has("static-header")); @@ -1026,7 +1026,8 @@ request_headers_to_remove: ["x-nope"] const auto route = parseRouteFromV2Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; + Http::TestRequestHeaderMapImpl header_map{ + {":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; NiceMock stream_info; absl::optional protocol = Envoy::Http::Protocol::Http11; ON_CALL(stream_info, protocol()).WillByDefault(ReturnPointee(&protocol)); @@ -1124,7 +1125,7 @@ match: { prefix: "/new_endpoint" } HeaderParserPtr req_header_parser = Router::HeaderParser::configure(route.request_headers_to_add()); - Http::TestHeaderMapImpl header_map{ + Http::TestRequestHeaderMapImpl header_map{ {":method", "POST"}, {"static-header", "old-value"}, {"x-client-ip", "0.0.0.0"}}; NiceMock stream_info; @@ -1213,7 +1214,8 @@ response_headers_to_remove: ["x-nope"] const auto route = parseRouteFromV2Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; + Http::TestRequestHeaderMapImpl header_map{ + {":method", "POST"}, {"x-safe", "safe"}, {"x-nope", "nope"}}; NiceMock stream_info; // Initialize start_time as 2018-04-03T23:06:09.123Z in microseconds. @@ -1263,7 +1265,7 @@ request_headers_to_remove: ["x-foo-header"] const auto route = parseRouteFromV2Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; + Http::TestRequestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); @@ -1285,7 +1287,7 @@ response_headers_to_remove: ["x-foo-header"] const auto route = parseRouteFromV2Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); - Http::TestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; + Http::TestResponseHeaderMapImpl header_map{{"x-foo-header", "foo"}}; NiceMock stream_info; resp_header_parser->evaluateHeaders(header_map, stream_info); diff --git a/test/common/router/header_parser_fuzz_test.cc b/test/common/router/header_parser_fuzz_test.cc index 74dede78b331..8acd737fa190 100644 --- a/test/common/router/header_parser_fuzz_test.cc +++ b/test/common/router/header_parser_fuzz_test.cc @@ -14,7 +14,7 @@ DEFINE_PROTO_FUZZER(const test::common::router::TestCase& input) { TestUtility::validate(input); Router::HeaderParserPtr parser = Router::HeaderParser::configure(input.headers_to_add(), input.headers_to_remove()); - Http::HeaderMapImpl header_map; + Http::TestRequestHeaderMapImpl header_map; TestStreamInfo test_stream_info = fromStreamInfo(input.stream_info()); parser->evaluateHeaders(header_map, test_stream_info); ENVOY_LOG_MISC(trace, "Success"); diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 07825cf9636c..d5f87e046635 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -158,7 +158,7 @@ TEST_F(RdsImplTest, Basic) { setup(); // Make sure the initial empty route table works. - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // Initial request. const std::string response1_json = R"EOF( @@ -178,11 +178,11 @@ TEST_F(RdsImplTest, Basic) { EXPECT_CALL(init_watcher_, ready()); rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // 2nd request with same response. Based on hash should not reload config. rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); - EXPECT_EQ(nullptr, route(Http::TestHeaderMapImpl{{":authority", "foo"}})); + EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // Load the config and verified shared count. ConfigConstSharedPtr config = rds_->config(); @@ -224,7 +224,7 @@ TEST_F(RdsImplTest, Basic) { // Make sure we don't lookup/verify clusters. EXPECT_CALL(server_factory_context_.cluster_manager_, get(Eq("bar"))).Times(0); rds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); - EXPECT_EQ("foo", route(Http::TestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) + EXPECT_EQ("foo", route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) ->routeEntry() ->clusterName()); diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 20954ec61ffb..80c21f623efe 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -86,7 +86,7 @@ class RateLimitConfiguration : public testing::Test { NiceMock factory_context_; ProtobufMessage::NullValidationVisitorImpl any_validation_visitor_; std::unique_ptr config_; - Http::TestHeaderMapImpl header_; + Http::TestRequestHeaderMapImpl header_; const RouteEntry* route_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; }; @@ -271,7 +271,7 @@ class RateLimitPolicyEntryTest : public testing::Test { } std::unique_ptr rate_limit_entry_; - Http::TestHeaderMapImpl header_; + Http::TestRequestHeaderMapImpl header_; NiceMock route_; std::vector descriptors_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; @@ -358,7 +358,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, default_remote_address_); @@ -382,7 +382,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithSkipIfAbsent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, default_remote_address_); @@ -406,7 +406,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithDefaultSkipIfAbsent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-test", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-test", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, default_remote_address_); @@ -422,7 +422,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, default_remote_address_); @@ -455,7 +455,7 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), @@ -473,7 +473,7 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); EXPECT_TRUE(descriptors_.empty()); @@ -491,7 +491,7 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), @@ -510,7 +510,7 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) { )EOF"; setupTest(yaml); - Http::TestHeaderMapImpl header{{"x-header-name", "test_value"}}; + Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); EXPECT_TRUE(descriptors_.empty()); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 36b8e0d43a48..c0bd7deb1ee9 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -1437,8 +1437,8 @@ TEST_F(RouterTestSuppressEnvoyHeaders, EnvoyUpstreamServiceTime) { Http::ResponseHeaderMapPtr response_headers( new Http::TestResponseHeaderMapImpl{{":status", "200"}}); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); - Http::TestHeaderMapImpl downstream_response_headers{{":status", "200"}, - {"x-envoy-upstream-service-time", "0"}}; + Http::TestResponseHeaderMapImpl downstream_response_headers{ + {":status", "200"}, {"x-envoy-upstream-service-time", "0"}}; EXPECT_CALL(callbacks_, encodeHeaders_(_, true)) .WillOnce(Invoke([](Http::HeaderMap& headers, bool) { EXPECT_EQ(nullptr, headers.get(Http::Headers::get().EnvoyUpstreamServiceTime)); diff --git a/test/common/router/scoped_config_impl_test.cc b/test/common/router/scoped_config_impl_test.cc index 5d29bfd22309..df842d51cf35 100644 --- a/test/common/router/scoped_config_impl_test.cc +++ b/test/common/router/scoped_config_impl_test.cc @@ -15,7 +15,7 @@ namespace Envoy { namespace Router { namespace { -using ::Envoy::Http::TestHeaderMapImpl; +using ::Envoy::Http::TestRequestHeaderMapImpl; using ::testing::NiceMock; class FooFragment : public ScopeKeyFragmentBase { @@ -116,30 +116,30 @@ TEST(HeaderValueExtractorImplTest, HeaderExtractionByIndex) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = - extractor.computeFragment(TestHeaderMapImpl{{"foo_header", "part-0,part-1:value_bluh"}}); + std::unique_ptr fragment = extractor.computeFragment( + TestRequestHeaderMapImpl{{"foo_header", "part-0,part-1:value_bluh"}}); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"part-1:value_bluh"}); // No such header. - fragment = extractor.computeFragment(TestHeaderMapImpl{{"bar_header", "part-0"}}); + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{{"bar_header", "part-0"}}); EXPECT_EQ(fragment, nullptr); // Empty header value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", ""}, }); EXPECT_EQ(fragment, nullptr); // Index out of bound. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0"}, }); EXPECT_EQ(fragment, nullptr); // Element is empty. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0,,,bluh"}, }); EXPECT_NE(fragment, nullptr); @@ -159,47 +159,48 @@ TEST(HeaderValueExtractorImplTest, HeaderExtractionByKey) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = extractor.computeFragment(TestHeaderMapImpl{ - {"foo_header", "part-0;bar=>bluh;foo=>foo_value"}, - }); + std::unique_ptr fragment = + extractor.computeFragment(TestRequestHeaderMapImpl{ + {"foo_header", "part-0;bar=>bluh;foo=>foo_value"}, + }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"bluh"}); // No such header. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"bluh", "part-0;"}, }); EXPECT_EQ(fragment, nullptr); // Empty header value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", ""}, }); EXPECT_EQ(fragment, nullptr); // No such key. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "part-0"}, }); EXPECT_EQ(fragment, nullptr); // Empty value. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar=>;foo=>last_value"}, }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{""}); // Duplicate values, the first value returned. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar=>value1;bar=>value2;bluh;;bar=>last_value"}, }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"value1"}); // No separator in the element, value is set to empty string. - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "bluh;;bar;bar=>value2;bluh;;bar=>last_value"}, }); EXPECT_NE(fragment, nullptr); @@ -219,13 +220,14 @@ TEST(HeaderValueExtractorImplTest, ElementSeparatorEmpty) { TestUtility::loadFromYaml(yaml_plain, config); HeaderValueExtractorImpl extractor(std::move(config)); - std::unique_ptr fragment = extractor.computeFragment(TestHeaderMapImpl{ - {"foo_header", "bar=b;c=d;e=f"}, - }); + std::unique_ptr fragment = + extractor.computeFragment(TestRequestHeaderMapImpl{ + {"foo_header", "bar=b;c=d;e=f"}, + }); EXPECT_NE(fragment, nullptr); EXPECT_EQ(*fragment, StringKeyFragment{"b;c=d;e=f"}); - fragment = extractor.computeFragment(TestHeaderMapImpl{ + fragment = extractor.computeFragment(TestRequestHeaderMapImpl{ {"foo_header", "a=b;bar=d;e=f"}, }); EXPECT_EQ(fragment, nullptr); @@ -297,7 +299,7 @@ TEST(ScopeKeyBuilderImplTest, Parse) { TestUtility::loadFromYaml(yaml_plain, config); ScopeKeyBuilderImpl key_builder(std::move(config)); - std::unique_ptr key = key_builder.computeScopeKey(TestHeaderMapImpl{ + std::unique_ptr key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value;index2"}, }); @@ -305,7 +307,7 @@ TEST(ScopeKeyBuilderImplTest, Parse) { EXPECT_EQ(*key, makeKey({"bar_value", "index2"})); // Empty string fragment is fine. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar,e=f"}, {"bar_header", "a=b;bar=bar_value;"}, }); @@ -313,35 +315,35 @@ TEST(ScopeKeyBuilderImplTest, Parse) { EXPECT_EQ(*key, makeKey({"", ""})); // Key not found. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,meh,e=f"}, {"bar_header", "a=b;bar=bar_value;"}, }); EXPECT_EQ(key, nullptr); // Index out of bound. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value"}, }); EXPECT_EQ(key, nullptr); // Header missing. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"foobar_header", "a=b;bar=bar_value;index2"}, }); EXPECT_EQ(key, nullptr); // Header value empty. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", ""}, {"bar_header", "a=b;bar=bar_value;index2"}, }); EXPECT_EQ(key, nullptr); // Case sensitive. - key = key_builder.computeScopeKey(TestHeaderMapImpl{ + key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,Bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value;index2"}, }); @@ -447,21 +449,21 @@ TEST_F(ScopedConfigImplTest, PickRoute) { scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); // Key (foo, bar) maps to scope_info_a_. - ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",,key=value,bar=foo,"}, {"bar_header", ";val1;bar;val3"}, }); EXPECT_EQ(route_config, scope_info_a_->routeConfig()); // Key (bar, baz) maps to scope_info_b_. - route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",,key=value,bar=bar,"}, {"bar_header", ";val1;baz;val3"}, }); EXPECT_EQ(route_config, scope_info_b_->routeConfig()); // No such key (bar, NOT_BAZ). - route_config = scoped_config_impl_->getRouteConfig(TestHeaderMapImpl{ + route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",key=value,bar=bar,"}, {"bar_header", ";val1;NOT_BAZ;val3"}, }); @@ -472,7 +474,7 @@ TEST_F(ScopedConfigImplTest, PickRoute) { TEST_F(ScopedConfigImplTest, Update) { scoped_config_impl_ = std::make_unique(std::move(key_builder_config_)); - TestHeaderMapImpl headers{ + TestRequestHeaderMapImpl headers{ {"foo_header", ",,key=value,bar=foo,"}, {"bar_header", ";val1;bar;val3"}, }; @@ -482,8 +484,8 @@ TEST_F(ScopedConfigImplTest, Update) { // Add scope_key (bar, baz). scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); - EXPECT_EQ(scoped_config_impl_->getRouteConfig( - TestHeaderMapImpl{{"foo_header", ",,key=v,bar=bar,"}, {"bar_header", ";val1;baz"}}), + EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ + {"foo_header", ",,key=v,bar=bar,"}, {"bar_header", ";val1;baz"}}), scope_info_b_->routeConfig()); // Add scope_key (foo, bar). @@ -496,8 +498,8 @@ TEST_F(ScopedConfigImplTest, Update) { EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // foo_scope now is keyed by (xyz, xyz). - EXPECT_EQ(scoped_config_impl_->getRouteConfig( - TestHeaderMapImpl{{"foo_header", ",bar=xyz,foo=bar"}, {"bar_header", ";;xyz"}}), + EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ + {"foo_header", ",bar=xyz,foo=bar"}, {"bar_header", ";;xyz"}}), scope_info_a_v2_->routeConfig()); // Remove scope "foo_scope". diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 6940ed7ff02b..4aa8b8012d4d 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -42,7 +42,7 @@ namespace Envoy { namespace Router { namespace { -using ::Envoy::Http::TestHeaderMapImpl; +using ::Envoy::Http::TestRequestHeaderMapImpl; envoy::config::route::v3::ScopedRouteConfiguration parseScopedRouteConfigurationFromYaml(const std::string& yaml) { @@ -306,24 +306,24 @@ route_configuration_name: foo_routes EXPECT_NE(getScopedRdsProvider()->config(), nullptr); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), ""); // RDS updates foo_routes. pushRdsConfig({"foo_routes"}, "111"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); @@ -337,11 +337,11 @@ route_configuration_name: foo_routes .value()); // now scope key "x-bar-key" points to nowhere. EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), IsNull()); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -382,24 +382,24 @@ route_configuration_name: foo_routes EXPECT_NE(getScopedRdsProvider()->config(), nullptr); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), ""); // RDS updates foo_routes. pushRdsConfig({"foo_routes"}, "111"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); @@ -415,11 +415,11 @@ route_configuration_name: foo_routes .value()); // now scope key "x-bar-key" points to nowhere. EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}), IsNull()); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -458,7 +458,7 @@ route_configuration_name: foo_routes EXPECT_NE(getScopedRdsProvider(), nullptr); EXPECT_NE(getScopedRdsProvider()->config(), nullptr); EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( - TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), IsNull()); EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), 0UL); @@ -504,7 +504,7 @@ route_configuration_name: foo_routes server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value()); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } @@ -540,7 +540,7 @@ route_configuration_name: bar_routes // No RDS "foo_routes" config push happened yet, Router::NullConfig is returned. EXPECT_THAT(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); init_watcher_.expectReady().Times(1); @@ -552,7 +552,7 @@ route_configuration_name: bar_routes 1UL); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); @@ -580,7 +580,7 @@ route_configuration_name: foo_routes // The same scope-key now points to the same route table. EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); @@ -606,7 +606,7 @@ route_configuration_name: foo_routes EXPECT_EQ(getScopedRouteMap().count("foo_scope3"), 1); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "bar_routes"); @@ -623,12 +623,12 @@ route_configuration_name: foo_routes EXPECT_EQ(getScopedRouteMap().count("foo_scope4"), 1); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-bar-key"}}) ->name(), "foo_routes"); EXPECT_EQ(getScopedRdsProvider() ->config() - ->getRouteConfig(TestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) + ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), "foo_routes"); } diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 19d86a4d98c4..5c31924b5987 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -232,7 +232,7 @@ TEST_F(StreamInfoImplTest, RequestHeadersTest) { StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem()); EXPECT_FALSE(stream_info.getRequestHeaders()); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; stream_info.setRequestHeaders(headers); EXPECT_EQ(&headers, stream_info.getRequestHeaders()); } @@ -243,8 +243,8 @@ TEST_F(StreamInfoImplTest, DefaultRequestIDExtensionTest) { auto rid_extension = stream_info.getRequestIDExtension(); - Http::RequestHeaderMapImpl request_headers; - Http::ResponseHeaderMapImpl response_headers; + Http::TestRequestHeaderMapImpl request_headers; + Http::TestResponseHeaderMapImpl response_headers; rid_extension->set(request_headers, false); rid_extension->set(request_headers, true); rid_extension->setInResponse(response_headers, request_headers); diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index 8252defad35f..d05f2585e39b 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -167,7 +167,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { // Verify that check request object has only a portion of the request data. TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { const uint64_t size = 4049; - Http::RequestHeaderMapImpl headers_; + Http::TestRequestHeaderMapImpl headers_; envoy::service::auth::v3::CheckRequest request_; EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); @@ -185,7 +185,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { // Verify that check request object has all the request data. TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { - Http::RequestHeaderMapImpl headers_; + Http::TestRequestHeaderMapImpl headers_; envoy::service::auth::v3::CheckRequest request_; EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 7a4137437ca1..ce4363980255 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -84,7 +84,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); @@ -108,7 +108,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); @@ -131,7 +131,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -155,7 +155,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); @@ -182,7 +182,7 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { expectCallSend(request); client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; client_->onCreateInitialMetadata(headers); EXPECT_EQ(nullptr, headers.RequestId()); EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index fbb6f7a7bcfe..74851ee016d6 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -357,7 +357,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf const HeaderValuePair expected_header{"x-authz-header1", "123"}; EXPECT_CALL(async_client_, send_(ContainsPairAsHeader(expected_header), _, _)); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; request_headers.addCopy(Http::LowerCaseString(std::string("x-request-id")), expected_header.second); diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 021c17ebffe5..57facccb78a3 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -64,7 +64,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { { envoy::service::ratelimit::v3::RateLimitRequest request; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; GrpcClientImpl::createRequest(request, "foo", {{{{"foo", "bar"}}}}); EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), Ref(client_), _, _)) .WillOnce( @@ -91,7 +91,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { { envoy::service::ratelimit::v3::RateLimitRequest request; - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; GrpcClientImpl::createRequest(request, "foo", {{{{"foo", "bar"}, {"bar", "baz"}}}}); EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _)) .WillOnce(Return(&async_request_)); diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 42306d4bc7f4..5e2eda5b30e3 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -24,10 +24,11 @@ namespace Common { namespace RBAC { namespace { -void checkEngine(const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, - const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), - const StreamInfo::StreamInfo& info = NiceMock()) { +void checkEngine( + const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, + const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl(), + const StreamInfo::StreamInfo& info = NiceMock()) { EXPECT_EQ(expected, engine.allowed(connection, headers, info, nullptr)); } @@ -137,7 +138,7 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -160,7 +161,7 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -280,7 +281,7 @@ TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; Envoy::Http::LowerCaseString key("foo"); std::string value = "bar"; headers.setReference(key, value); @@ -321,7 +322,7 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); @@ -349,7 +350,7 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { RBAC::RoleBasedAccessControlEngineImpl engine(rbac); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index d710859f9d73..52947461ae69 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -29,7 +29,7 @@ namespace { void checkMatcher( const RBAC::Matcher& matcher, bool expected, const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::RequestHeaderMapImpl(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl(), const StreamInfo::StreamInfo& info = NiceMock()) { EXPECT_EQ(expected, matcher.matches(connection, headers, info)); } @@ -47,7 +47,7 @@ TEST(AndMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -74,7 +74,7 @@ TEST(AndMatcher, Principal_Set) { cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -94,7 +94,7 @@ TEST(OrMatcher, Permission_Set) { perm->set_destination_port(123); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); @@ -116,7 +116,7 @@ TEST(OrMatcher, Principal_Set) { cidr->mutable_prefix_len()->set_value(24); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 456, false); @@ -151,7 +151,7 @@ TEST(HeaderMatcher, HeaderMatcher) { config.set_name("foo"); config.set_exact_match("bar"); - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; Envoy::Http::LowerCaseString key("foo"); std::string value = "bar"; headers.setReference(key, value); @@ -169,7 +169,7 @@ TEST(HeaderMatcher, HeaderMatcher) { TEST(IPMatcher, IPMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr connectionRemote = Envoy::Network::Utility::parseInternetAddress("12.13.14.15", 789, false); @@ -232,7 +232,7 @@ TEST(IPMatcher, IPMatcher) { TEST(PortMatcher, PortMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); @@ -336,7 +336,7 @@ TEST(AuthenticatedMatcher, NoSSL) { TEST(MetadataMatcher, MetadataMatcher) { Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl header; + Envoy::Http::TestRequestHeaderMapImpl header; NiceMock info; auto label = MessageUtil::keyValueStruct("label", "prod"); @@ -368,7 +368,7 @@ TEST(PolicyMatcher, PolicyMatcher) { RBAC::PolicyMatcher matcher(policy, builder.get()); Envoy::Network::MockConnection conn; - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; NiceMock info; auto ssl = std::make_shared(); Envoy::Network::Address::InstanceConstSharedPtr addr = @@ -431,7 +431,7 @@ TEST(RequestedServerNameMatcher, EmptyRequestedServerName) { } TEST(PathMatcher, NoPathInHeader) { - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; envoy::type::matcher::v3::PathMatcher matcher; matcher.mutable_path()->mutable_safe_regex()->mutable_google_re2(); matcher.mutable_path()->mutable_safe_regex()->set_regex(".*"); @@ -443,7 +443,7 @@ TEST(PathMatcher, NoPathInHeader) { } TEST(PathMatcher, ValidPathInHeader) { - Envoy::Http::RequestHeaderMapImpl headers; + Envoy::Http::TestRequestHeaderMapImpl headers; envoy::type::matcher::v3::PathMatcher matcher; matcher.mutable_path()->set_exact("/exact"); diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index 33b976857c55..3982947a6f56 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -25,8 +25,8 @@ class CompressorIntegrationTest : public testing::TestWithParamset_value(false); }); testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "OPTIONS"}, {":path", "/legacy-no-cors/test"}, {":scheme", "http"}, @@ -233,7 +233,7 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled {"access-control-request-method", "GET"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -242,14 +242,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestLegacyCorsDisabled TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-vhost-config/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin"}, {"server", "envoy"}, {"content-length", "0"}, @@ -259,14 +259,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeaders)) { TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeadersCredentialsAllowed)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-credentials-allowed/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin"}, {"access-control-allow-credentials", "true"}, {"server", "envoy"}, @@ -277,14 +277,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestEncodeHeadersCrede TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestAllowedOriginRegex)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-allow-origin-regex/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "www.envoyproxy.io"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "www.envoyproxy.io"}, {"access-control-allow-credentials", "true"}, {"server", "envoy"}, @@ -295,14 +295,14 @@ TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestAllowedOriginRegex TEST_P(CorsFilterIntegrationTest, DEPRECATED_FEATURE_TEST(TestExposeHeaders)) { testNormalRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/cors-expose-headers/test"}, {":scheme", "http"}, {":authority", "test-host"}, {"origin", "test-origin-1"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"access-control-allow-origin", "test-origin-1"}, {"access-control-expose-headers", "custom-header-1,custom-header-2"}, {"server", "envoy"}, diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index aa7b43435fb8..3917ad054595 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -52,12 +52,12 @@ class DecompressorFilterTest : public testing::TestWithParam { std::unique_ptr doHeaders(const Http::HeaderMap& headers, const bool end_stream) { if (isRequestDirection()) { - auto request_headers = Http::createHeaderMap(headers); + auto request_headers = std::make_unique(headers); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(*request_headers, end_stream)); return std::move(request_headers); } else { - auto response_headers = Http::createHeaderMap(headers); + auto response_headers = std::make_unique(headers); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(*response_headers, end_stream)); return std::move(response_headers); @@ -145,38 +145,38 @@ INSTANTIATE_TEST_SUITE_P(IsRequestDirection, DecompressorFilterTest, ::testing::Values(true, false)); TEST_P(DecompressorFilterTest, DecompressionActive) { - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); } TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingSpacing) { // Additional spacing should still match. - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", " mock "}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", " mock "}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); } TEST_P(DecompressorFilterTest, DecompressionActiveContentEncodingCasing) { // Different casing should still match. - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "MOCK"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "MOCK"}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding */); } TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings) { // If the first encoding in the Content-Encoding header is the configured value, the filter should // still be active. - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br"}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, "br"); } TEST_P(DecompressorFilterTest, DecompressionActiveMultipleEncodings2) { // If the first encoding in the Content-Encoding header is the configured value, the filter should // still be active. - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br , gzip "}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock, br , gzip "}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, "br , gzip"); } @@ -189,8 +189,8 @@ TEST_P(DecompressorFilterTest, DisableAdvertiseAcceptEncoding) { advertise_accept_encoding: false )EOF"); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, absl::nullopt /* expected_accept_encoding */); } @@ -204,8 +204,8 @@ TEST_P(DecompressorFilterTest, ExplicitlyEnableAdvertiseAcceptEncoding) { advertise_accept_encoding: true )EOF"); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; if (isRequestDirection()) { // Also test that the filter appends to an already existing header. headers_before_filter.addCopy("accept-encoding", "br"); @@ -232,8 +232,8 @@ TEST_P(DecompressorFilterTest, DecompressionDisabled) { )EOF"); EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); @@ -253,8 +253,8 @@ TEST_P(DecompressorFilterTest, RequestDecompressionDisabled) { runtime_key: does_not_exist )EOF"); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; if (isRequestDirection()) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); @@ -280,8 +280,8 @@ TEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) { runtime_key: does_not_exist )EOF"); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, + {"content-length", "256"}}; if (isRequestDirection()) { // Accept-Encoding is not advertised in the request headers when response decompression is @@ -299,7 +299,7 @@ TEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) { TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter; + Http::TestRequestHeaderMapImpl headers_before_filter; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, true /* end_stream */); TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); @@ -307,7 +307,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter{{"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); @@ -317,8 +317,8 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, + {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); @@ -330,8 +330,8 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); // The decompressor's content scheme is not the first value in the comma-delimited list in the // Content-Encoding header. Therefore, compression will not occur. - Http::TestHeaderMapImpl headers_before_filter{{"content-encoding", "gzip,mock"}, - {"content-length", "256"}}; + Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "gzip,mock"}, + {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); @@ -341,7 +341,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter{ + Http::TestRequestHeaderMapImpl headers_before_filter{ {"cache-control", Http::Headers::get().CacheControlValues.NoTransform}, {"content-encoding", "mock"}, {"content-length", "256"}}; @@ -354,7 +354,7 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); - Http::TestHeaderMapImpl headers_before_filter{ + Http::TestRequestHeaderMapImpl headers_before_filter{ {"cache-control", fmt::format("{}, {}", Http::Headers::get().CacheControlValues.NoCache, Http::Headers::get().CacheControlValues.NoTransform)}, {"content-encoding", "mock"}, diff --git a/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc b/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc index 7d5f26d05095..69d616ddebc6 100644 --- a/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc +++ b/test/extensions/filters/http/dynamo/dynamo_request_parser_test.cc @@ -20,25 +20,26 @@ namespace { TEST(DynamoRequestParser, parseOperation) { // Well formed x-amz-target header, in a format, Version.Operation { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X.Operation"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X.Operation"}}; EXPECT_EQ("Operation", RequestParser::parseOperation(headers)); } // Not well formed x-amz-target header. { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X,Operation"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "X,Operation"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } // Too many entries in the Version.Operation. { - Http::TestHeaderMapImpl headers{{"X", "X"}, {"x-amz-target", "NOT_VALID.NOT_VALID.NOT_VALID"}}; + Http::TestRequestHeaderMapImpl headers{{"X", "X"}, + {"x-amz-target", "NOT_VALID.NOT_VALID.NOT_VALID"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } // Required header is not present in the headers { - Http::TestHeaderMapImpl headers{{"Z", "Z"}}; + Http::TestRequestHeaderMapImpl headers{{"Z", "Z"}}; EXPECT_EQ("", RequestParser::parseOperation(headers)); } } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index a7ea123bbc2f..b5236920fb67 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -1355,7 +1355,7 @@ TEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { response_ptr.reset(); - Http::TestHeaderMapImpl test_headers{*saved_headers}; + Http::TestRequestHeaderMapImpl test_headers{*saved_headers}; EXPECT_EQ(test_headers.get_("foo"), "bar"); EXPECT_EQ(test_headers.get_("bar"), "foo"); EXPECT_EQ(data.toString(), "foo"); @@ -1420,7 +1420,7 @@ TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { EXPECT_CALL(filter_callbacks_, encodeData(_, true)) .WillOnce(Invoke([&](Buffer::Instance& data, bool) { response_ptr.reset(); - Http::TestHeaderMapImpl test_headers{*saved_headers}; + Http::TestRequestHeaderMapImpl test_headers{*saved_headers}; EXPECT_EQ(test_headers.get_("foo"), "bar"); EXPECT_EQ(test_headers.get_("bar"), "foo"); EXPECT_EQ(test_headers.get_("foobar"), "DO_NOT_OVERRIDE"); diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 0864793d136f..b6ad8536ece7 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -328,7 +328,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { decoder->waitForBodyData(127); // Send trailers and wait for end stream. - Http::TestHeaderMapImpl trailers{{"hello", "world"}}; + Http::TestResponseTrailerMapImpl trailers{{"hello", "world"}}; upstream_request_->encodeTrailers(trailers); decoder->waitForEndStream(); EXPECT_NE(nullptr, decoder->trailers()); @@ -348,7 +348,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(128, 'a')); upstream_request_->encodeData(data, false); - Http::TestHeaderMapImpl trailers{{"hello", "world"}}; + Http::TestResponseTrailerMapImpl trailers{{"hello", "world"}}; upstream_request_->encodeTrailers(trailers); // Wait for a tick worth of data. diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 87caa2275c8d..bb72aee46454 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -342,9 +342,10 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { {":method", "POST"}, {":path", "/grpc.service/UnknownGrpcMethod"}}; - Http::TestHeaderMapImpl expected_request_headers{{"content-type", "application/grpc"}, - {":method", "POST"}, - {":path", "/grpc.service/UnknownGrpcMethod"}}; + Http::TestRequestHeaderMapImpl expected_request_headers{ + {"content-type", "application/grpc"}, + {":method", "POST"}, + {":path", "/grpc.service/UnknownGrpcMethod"}}; EXPECT_CALL(decoder_callbacks_, clearRouteCache()).Times(0); @@ -363,8 +364,8 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, {":status", "200"}}; - Http::TestHeaderMapImpl expected_response_headers{{"content-type", "application/grpc"}, - {":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers, false)); EXPECT_EQ(expected_response_headers, response_headers); @@ -374,7 +375,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, NoTranscoding) { EXPECT_EQ(2, response_data.length()); Http::TestResponseTrailerMapImpl response_trailers{{"grpc-status", "0"}}; - Http::TestHeaderMapImpl expected_response_trailers{{"grpc-status", "0"}}; + Http::TestResponseTrailerMapImpl expected_response_trailers{{"grpc-status", "0"}}; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers)); EXPECT_EQ(expected_response_trailers, response_trailers); } diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index 6e41050e3477..409fe39e5eff 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -25,8 +25,8 @@ class GzipIntegrationTest : public testing::TestWithParam expected = {{"auth", "1"}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -213,7 +213,7 @@ TEST_F(HeaderToMetadataTest, NumberTypeTest) { initializeFilter(response_config_yaml); Http::TestResponseHeaderMapImpl incoming_headers{{"x-authenticated", "1"}}; std::map expected = {{"auth", 1}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -238,7 +238,7 @@ TEST_F(HeaderToMetadataTest, StringTypeInBase64UrlTest) { const auto encoded = Base64::encode(data.c_str(), data.size()); Http::TestResponseHeaderMapImpl incoming_headers{{"x-authenticated", encoded}}; std::map expected = {{"auth", data}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, @@ -416,7 +416,7 @@ TEST_F(HeaderToMetadataTest, IgnoreHeaderValueUseConstant) { initializeFilter(response_config_yaml); Http::TestResponseHeaderMapImpl incoming_headers{{"x-something", "thing"}}; std::map expected = {{"something", "else"}}; - Http::TestHeaderMapImpl empty_headers; + Http::TestResponseHeaderMapImpl empty_headers; EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); EXPECT_CALL(req_info_, diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index e99f427275a8..492600503f60 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -3,7 +3,6 @@ #include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h" #include "common/buffer/buffer_impl.h" -#include "common/http/header_map_impl.h" #include "common/network/address_impl.h" #include "common/network/utility.h" @@ -81,7 +80,7 @@ TEST_F(IpTaggingFilterTest, InternalRequest) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); // Check external requests don't get a tag. - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags)); } @@ -147,7 +146,7 @@ request_type: both EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("internal_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; remote_address = Network::Utility::parseInternetAddress("1.2.3.4"); EXPECT_CALL(filter_callbacks_.stream_info_, downstreamRemoteAddress()) .WillOnce(ReturnRef(remote_address)); @@ -283,7 +282,7 @@ TEST_F(IpTaggingFilterTest, ClearRouteCache) { // no tags, no call EXPECT_CALL(filter_callbacks_, clearRouteCache()).Times(0); - request_headers = {}; + request_headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_FALSE(request_headers.has(Http::Headers::get().EnvoyIpTags)); } diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 3d2e35eb3d0e..a2ee0a2f03ca 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -771,12 +771,12 @@ TEST_F(LuaHttpFilterTest, HttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -834,12 +834,12 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -893,12 +893,12 @@ TEST_F(LuaHttpFilterTest, HttpCallAsynchronous) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -961,10 +961,10 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"content-length", "11"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"content-length", "11"}}), message->headers()); callbacks = &cb; return &request; @@ -984,7 +984,7 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -1040,7 +1040,7 @@ TEST_F(LuaHttpFilterTest, HttpCallNoBody) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -1098,7 +1098,7 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestHeaderMapImpl{ + EXPECT_EQ((Http::TestRequestHeaderMapImpl{ {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), message->headers()); callbacks = &cb; @@ -1110,9 +1110,9 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "200"}}})); - Http::TestHeaderMapImpl expected_headers{{":status", "403"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}}; + Http::TestResponseHeaderMapImpl expected_headers{{":status", "403"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); callbacks->onSuccess(request, std::move(response_message)); } @@ -1422,7 +1422,7 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { for (uint64_t i = 0; i < num_loops; i++) { Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; - Http::TestHeaderMapImpl expected_headers{{":status", "503"}, {"content-length", "4"}}; + Http::TestResponseHeaderMapImpl expected_headers{{":status", "503"}, {"content-length", "4"}}; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false)); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 32c003951e1f..bc4c3200d520 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -50,7 +50,7 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_CALL(*this, testPrint("WORLD")); EXPECT_CALL(*this, testPrint("'hello' 'WORLD'")); @@ -86,7 +86,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifiableMethods) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return false; }); start("shouldBeOk"); @@ -119,13 +119,13 @@ TEST_F(LuaHeaderMapWrapperTest, Replace) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{":path", "/"}, {"other_header", "hello"}}; + Http::TestRequestHeaderMapImpl headers{{":path", "/"}, {"other_header", "hello"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); start("callMe"); - EXPECT_EQ((Http::TestHeaderMapImpl{{":path", "/new_path"}, - {"other_header", "other_header_value"}, - {"new_header", "new_header_value"}}), + EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/new_path"}, + {"other_header", "other_header_value"}, + {"new_header", "new_header_value"}}), headers); } @@ -142,7 +142,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifyDuringIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_THROW_WITH_MESSAGE(start("callMe"), Filters::Common::Lua::LuaException, "[string \"...\"]:4: header map cannot be modified while iterating"); @@ -167,7 +167,7 @@ TEST_F(LuaHeaderMapWrapperTest, ModifyAfterIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_CALL(*this, testPrint("'foo' 'bar'")); EXPECT_CALL(*this, testPrint("'foo' 'bar'")); @@ -188,7 +188,7 @@ TEST_F(LuaHeaderMapWrapperTest, DontFinishIteration) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); EXPECT_THROW_WITH_MESSAGE( start("callMe"), Filters::Common::Lua::LuaException, @@ -208,7 +208,7 @@ TEST_F(LuaHeaderMapWrapperTest, IteratorAcrossYield) { InSequence s; setup(SCRIPT); - Http::TestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; + Http::TestRequestHeaderMapImpl headers{{"foo", "bar"}, {"hello", "world"}}; Filters::Common::Lua::LuaDeathRef wrapper( HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }), true); yield_callback_ = [] {}; diff --git a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc index dae119f291ef..1724898c05b8 100644 --- a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc +++ b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc @@ -31,7 +31,7 @@ class OnDemandFilterTest : public testing::Test { // tests decodeHeaders() when no cached route is available and vhds is configured TEST_F(OnDemandFilterTest, TestDecodeHeaders) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; std::shared_ptr route_config_ptr{new NiceMock()}; EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr)); EXPECT_CALL(decoder_callbacks_, routeConfig()).Times(2).WillRepeatedly(Return(route_config_ptr)); @@ -42,13 +42,13 @@ TEST_F(OnDemandFilterTest, TestDecodeHeaders) { // tests decodeHeaders() when no cached route is available TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteAvailable) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, true)); } // tests decodeHeaders() when no route configuration is available TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteConfigIsNotAvailable) { - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; std::shared_ptr route_config_ptr{new NiceMock()}; EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr)); EXPECT_CALL(decoder_callbacks_, routeConfig()).WillOnce(Return(absl::nullopt)); diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 38989dcbc7d7..1b30b786b223 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -173,7 +173,7 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, initiateClientConnection(); waitForRatelimitRequest(); sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, - Http::ResponseHeaderMapImpl{}, Http::RequestHeaderMapImpl{}); + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForSuccessfulUpstreamResponse(); cleanup(); @@ -249,7 +249,7 @@ TEST_P(RatelimitIntegrationTest, OverLimit) { initiateClientConnection(); waitForRatelimitRequest(); sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, - Http::ResponseHeaderMapImpl{}, Http::RequestHeaderMapImpl{}); + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); cleanup(); @@ -264,7 +264,7 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { Http::TestResponseHeaderMapImpl ratelimit_response_headers{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}; sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, - ratelimit_response_headers, Http::RequestHeaderMapImpl{}); + ratelimit_response_headers, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); ratelimit_response_headers.iterate( diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 625801433f74..b177409534a5 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -269,15 +269,15 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { .Times(0); Http::HeaderMapPtr request_headers_to_add{ - new Http::TestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; - Http::HeaderMapPtr rl_headers{ - new Http::TestHeaderMapImpl{{"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "500"}}}; + new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; + Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ + {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "500"}}}; request_callbacks_->complete( Filters::Common::RateLimit::LimitStatus::OK, Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl(*rl_headers)}, Http::RequestHeaderMapPtr{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}); - Http::TestHeaderMapImpl expected_headers(*rl_headers); + Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(true, (expected_headers == response_headers)); @@ -476,9 +476,9 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - Http::HeaderMapPtr rl_headers{new Http::TestHeaderMapImpl{ + Http::HeaderMapPtr rl_headers{new Http::TestResponseHeaderMapImpl{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}}; - Http::TestHeaderMapImpl expected_headers(*rl_headers); + Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); expected_headers.addCopy(":status", "429"); expected_headers.addCopy("x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True); @@ -488,7 +488,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::HeaderMapPtr request_headers_to_add{ - new Http::TestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; + new Http::TestRequestHeaderMapImpl{{"x-rls-rate-limited", "true"}}}; Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)}; Http::RequestHeaderMapPtr uh{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}; diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 9a7770c353c3..5404fcc9b711 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -69,7 +69,7 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); const auto response_ = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "localhost"}}, 0, default_response_headers_, 0); @@ -87,7 +87,7 @@ TEST_P(AutoSniIntegrationTest, PassingNotDNS) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); const auto response_ = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "127.0.0.1"}}, 0, default_response_headers_, 0); diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index df6fabc70d9a..292fad5500a6 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -389,7 +389,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_)); - Http::HeaderMapPtr rl_headers{new Http::TestHeaderMapImpl{ + Http::HeaderMapPtr rl_headers{new Http::TestRequestHeaderMapImpl{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}}; EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); diff --git a/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc b/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc index 5dd0416bc8b2..73a60b91ec5d 100644 --- a/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/header_transport_impl_test.cc @@ -458,7 +458,7 @@ TEST(HeaderTransportTest, InfoBlock) { buffer.writeByte(0); // empty value buffer.writeByte(0); // padding - Http::HeaderMapImpl expected_headers; + Http::TestRequestHeaderMapImpl expected_headers; expected_headers.addCopy(Http::LowerCaseString("not"), "empty"); expected_headers.addCopy(Http::LowerCaseString("key"), "value"); expected_headers.addCopy(Http::LowerCaseString("key2"), std::string(128, 'x')); @@ -467,8 +467,7 @@ TEST(HeaderTransportTest, InfoBlock) { EXPECT_TRUE(transport.decodeFrameStart(buffer, metadata)); EXPECT_THAT(metadata, HasFrameSize(38U)); - Http::HeaderMapImpl& actual_headers = dynamic_cast(metadata.headers()); - EXPECT_EQ(expected_headers, actual_headers); + EXPECT_EQ(expected_headers, metadata.headers()); EXPECT_EQ(buffer.length(), 0); } diff --git a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc index 3d08c0eb95e2..4d57ab1b03d4 100644 --- a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc @@ -487,7 +487,7 @@ TEST_F(TwitterProtocolTest, ParseRequestHeader) { EXPECT_TRUE(metadata_->flags()); EXPECT_EQ(5, *metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(6, test_headers.size()); EXPECT_EQ("thrift-client-id", test_headers.get_(":client-id")); @@ -523,7 +523,7 @@ TEST_F(TwitterProtocolTest, ParseEmptyRequestHeader) { EXPECT_FALSE(metadata_->flags()); EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -556,7 +556,7 @@ TEST_F(TwitterProtocolTest, WriteRequestHeader) { EXPECT_TRUE(*metadata_->sampled()); EXPECT_EQ(5, *metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(4, test_headers.size()); EXPECT_EQ("thrift-client-id", test_headers.get_(":client-id")); EXPECT_EQ("dest", test_headers.get_(":dest")); @@ -581,7 +581,7 @@ TEST_F(TwitterProtocolTest, WriteMostlyEmptyRequestHeader) { EXPECT_FALSE(metadata_->sampled()); EXPECT_FALSE(metadata_->flags()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -696,7 +696,7 @@ TEST_F(TwitterProtocolTest, ParseResponseHeader) { EXPECT_FALSE(span.debug_); } - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(2, test_headers.size()); EXPECT_EQ("v1", test_headers.get_("k1")); EXPECT_EQ("v2", test_headers.get_("k2")); @@ -714,7 +714,7 @@ TEST_F(TwitterProtocolTest, ParseEmptyResponseHeader) { EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -798,7 +798,7 @@ TEST_F(TwitterProtocolTest, WriteResponseHeader) { EXPECT_TRUE(span2.binary_annotations_.empty()); EXPECT_FALSE(span2.debug_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("value1", test_headers.get_("key1")); EXPECT_EQ("value2", test_headers.get_("key2")); } @@ -822,7 +822,7 @@ TEST_F(TwitterProtocolTest, WriteEmptyResponseHeader) { EXPECT_TRUE(metadata_->spans().empty()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ(0, test_headers.size()); } @@ -840,7 +840,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedRequestMessageBegin) { EXPECT_EQ(101, metadata_->sequenceId()); EXPECT_EQ(1, *metadata_->traceId()); EXPECT_EQ(2, *metadata_->spanId()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test_client", test_headers.get_(":client-id")); } @@ -865,7 +865,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedRequestMessageContinuation) { EXPECT_EQ(101, metadata_->sequenceId()); EXPECT_EQ(1, *metadata_->traceId()); EXPECT_EQ(2, *metadata_->spanId()); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test_client", test_headers.get_(":client-id")); } } @@ -885,7 +885,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedReplyMessageBegin) { EXPECT_EQ(1, metadata_->spans().size()); EXPECT_EQ(1, metadata_->spans().front().trace_id_); EXPECT_EQ(2, metadata_->spans().front().span_id_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test-header-value", test_headers.get_("test-header")); } @@ -912,7 +912,7 @@ TEST_F(TwitterProtocolTest, TestUpgradedReplyMessageContinuation) { EXPECT_EQ(1, metadata_->spans().size()); EXPECT_EQ(1, metadata_->spans().front().trace_id_); EXPECT_EQ(2, metadata_->spans().front().span_id_); - Http::TestHeaderMapImpl test_headers(metadata_->headers()); + Http::TestRequestHeaderMapImpl test_headers(metadata_->headers()); EXPECT_EQ("test-header-value", test_headers.get_("test-header")); } } diff --git a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc index 8dcfd96b25f0..689f37b33945 100644 --- a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc @@ -36,7 +36,7 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); const auto auth_header = stream_headers.get_("Authorization"); const auto auth_parts = StringUtil::splitToken(auth_header, ", ", false); ASSERT_EQ(4, auth_parts.size()); diff --git a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc index f567b3d3e258..f9be83ec5b6e 100644 --- a/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/file_based_metadata/file_based_metadata_grpc_credentials_test.cc @@ -23,7 +23,7 @@ class GrpcFileBasedMetadataClientIntegrationTest : public GrpcSslClientIntegrati void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestHeaderMapImpl stream_headers(fake_stream.headers()); + Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); if (!header_value_1_.empty()) { EXPECT_EQ(header_prefix_1_ + header_value_1_, stream_headers.get_(header_key_1_)); } diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index b691c61bc39e..5b88b4643df7 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -506,7 +506,7 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { // This value doesn't matter in handlerHystrixEventStream absl::string_view path_and_query; - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; NiceMock admin_stream_mock; NiceMock connection_mock; @@ -529,11 +529,8 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { EXPECT_EQ(response_headers.ContentType()->value(), "text/event-stream"); EXPECT_EQ(response_headers.CacheControl()->value(), "no-cache"); EXPECT_EQ(response_headers.Connection()->value(), "close"); - EXPECT_EQ(response_headers.AccessControlAllowOrigin()->value(), "*"); - - std::string access_control_allow_headers = - std::string(response_headers.getAccessControlAllowHeadersValue()); - EXPECT_THAT(access_control_allow_headers, HasSubstr("Accept")); + EXPECT_EQ(response_headers.get_("access-control-allow-origin"), "*"); + EXPECT_THAT(response_headers.get_("access-control-allow-headers"), HasSubstr("Accept")); } } // namespace diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 40191027feb0..fcd448ba4df0 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -150,7 +150,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { Tracer tracer{span_name, std::move(broker_), server_.timeSource()}; auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); @@ -163,7 +163,7 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { constexpr auto span_name = "my span"; Tracer tracer{span_name, std::move(broker_), server_.timeSource()}; auto span = tracer.createNonSampledSpan(); - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; span->injectContext(request_headers); auto* header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_NE(header, nullptr); diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index aebffa4b2c35..06e0a549bd4c 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -436,7 +436,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { // First request (ID will be +1 since the client will also bump). const uint64_t first_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1; codec_client_ = makeHttpConnection(creator()); - Http::TestHeaderMapImpl post_request_headers{ + Http::TestRequestHeaderMapImpl post_request_headers{ {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; auto response = @@ -474,7 +474,7 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { // Verify a second request hits a different file. const uint64_t second_id = Network::ConnectionImpl::nextGlobalIdForTest() + 1; codec_client_ = makeHttpConnection(creator()); - Http::TestHeaderMapImpl get_request_headers{ + Http::TestRequestHeaderMapImpl get_request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; response = diff --git a/test/integration/api_listener_integration_test.cc b/test/integration/api_listener_integration_test.cc index f00a7bd1fe0d..c71d57506b7c 100644 --- a/test/integration/api_listener_integration_test.cc +++ b/test/integration/api_listener_integration_test.cc @@ -97,7 +97,7 @@ TEST_P(ApiListenerIntegrationTest, Basic) { // The AutonomousUpstream responds with 200 OK and a body of 10 bytes. // In the http1 codec the end stream is encoded with encodeData and 0 bytes. - Http::TestHeaderMapImpl expected_response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl expected_response_headers{{":status", "200"}}; EXPECT_CALL(stream_encoder_, encodeHeaders(_, false)); EXPECT_CALL(stream_encoder_, encodeData(_, false)); EXPECT_CALL(stream_encoder_, encodeData(BufferStringEqual(""), true)).WillOnce(Notify(&done)); diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 649dbe243029..14cf58a0cfd5 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -3,7 +3,8 @@ namespace Envoy { namespace { -void HeaderToInt(const char header_name[], int32_t& return_int, Http::TestHeaderMapImpl& headers) { +void HeaderToInt(const char header_name[], int32_t& return_int, + Http::TestResponseHeaderMapImpl& headers) { const std::string header_value(headers.get_(header_name)); if (!header_value.empty()) { uint64_t parsed_value; @@ -41,7 +42,7 @@ void AutonomousStream::setEndStream(bool end_stream) { // Check all the special headers and send a customized response based on them. void AutonomousStream::sendResponse() { - Http::TestHeaderMapImpl headers(*headers_); + Http::TestResponseHeaderMapImpl headers(*headers_); upstream_.setLastRequestHeaders(*headers_); int32_t request_body_length = -1; @@ -116,9 +117,9 @@ void AutonomousUpstream::setResponseHeaders( response_headers_ = std::move(response_headers); } -Http::TestHeaderMapImpl AutonomousUpstream::responseHeaders() { +Http::TestResponseHeaderMapImpl AutonomousUpstream::responseHeaders() { Thread::LockGuard lock(headers_lock_); - Http::TestHeaderMapImpl return_headers = *response_headers_; + Http::TestResponseHeaderMapImpl return_headers = *response_headers_; return return_headers; } diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index 5abb7bc186be..c188344a9ec7 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -57,7 +57,7 @@ class AutonomousUpstream : public FakeUpstream { : FakeUpstream(address, type, time_system), allow_incomplete_streams_(allow_incomplete_streams), response_headers_(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}}))) {} + Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} AutonomousUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, uint32_t port, FakeHttpConnection::Type type, Network::Address::IpVersion version, @@ -65,7 +65,7 @@ class AutonomousUpstream : public FakeUpstream { : FakeUpstream(std::move(transport_socket_factory), port, type, version, time_system), allow_incomplete_streams_(allow_incomplete_streams), response_headers_(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}}))) {} + Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} ~AutonomousUpstream() override; bool @@ -78,7 +78,7 @@ class AutonomousUpstream : public FakeUpstream { void setLastRequestHeaders(const Http::HeaderMap& headers); std::unique_ptr lastRequestHeaders(); void setResponseHeaders(std::unique_ptr&& response_headers); - Http::TestHeaderMapImpl responseHeaders(); + Http::TestResponseHeaderMapImpl responseHeaders(); const bool allow_incomplete_streams_{false}; private: diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 67217d6cb32f..5b824a6c872e 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -217,8 +217,8 @@ void FakeStream::startGrpcStream() { } void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { - encodeTrailers( - Http::TestHeaderMapImpl{{"grpc-status", std::to_string(static_cast(status))}}); + encodeTrailers(Http::TestResponseTrailerMapImpl{ + {"grpc-status", std::to_string(static_cast(status))}}); } // The TestHttp1ServerConnectionImpl outlives its underlying Network::Connection diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index c7218ca716ff..15c39fd9f01d 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -419,20 +419,21 @@ class HeaderIntegrationTest } protected: - void performRequest(Http::TestHeaderMapImpl&& request_headers, - Http::TestHeaderMapImpl&& expected_request_headers, - Http::TestHeaderMapImpl&& response_headers, - Http::TestHeaderMapImpl&& expected_response_headers) { + void performRequest(Http::TestRequestHeaderMapImpl&& request_headers, + Http::TestRequestHeaderMapImpl&& expected_request_headers, + Http::TestResponseHeaderMapImpl&& response_headers, + Http::TestResponseHeaderMapImpl&& expected_response_headers) { registerTestServerPorts({"http"}); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); auto response = sendRequestAndWaitForResponse(request_headers, 0, response_headers, 0); - compareHeaders(upstream_request_->headers(), expected_request_headers); - compareHeaders(response->headers(), expected_response_headers); + compareHeaders(Http::TestRequestHeaderMapImpl(upstream_request_->headers()), + expected_request_headers); + compareHeaders(Http::TestResponseHeaderMapImpl(response->headers()), expected_response_headers); } - void compareHeaders(Http::TestHeaderMapImpl&& headers, - Http::TestHeaderMapImpl& expected_headers) { + template + void compareHeaders(Headers&& headers, ExpectedHeaders& expected_headers) { headers.remove(Envoy::Http::LowerCaseString{"content-length"}); headers.remove(Envoy::Http::LowerCaseString{"date"}); if (!routerSuppressEnvoyHeaders()) { @@ -462,26 +463,26 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(HeaderIntegrationTest, TestRequestAndResponseHeaderPassThrough) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "no-headers.com"}, {"x-request-foo", "downstram"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {"x-request-foo", "downstram"}, {":path", "/"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, @@ -493,7 +494,7 @@ TEST_P(HeaderIntegrationTest, TestRequestAndResponseHeaderPassThrough) { TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-only"}, {":scheme", "http"}, @@ -501,21 +502,21 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { {"x-vhost-request", "downstream"}, {"x-vhost-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-vhost-request", "downstream"}, {"x-vhost-request", "vhost"}, {":path", "/vhost-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-vhost-response", "upstream"}, {"x-vhost-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-vhost-response", "upstream"}, {"x-vhost-response", "vhost"}, @@ -527,7 +528,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-only"}, {":scheme", "http"}, @@ -535,21 +536,21 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { {"x-vhost-request", "downstream"}, {"x-unmodified", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "downstream"}, {"x-vhost-request", "vhost"}, {":path", "/vhost-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-vhost-response", "upstream"}, {"x-unmodified", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "upstream"}, {"x-vhost-response", "vhost"}, @@ -561,7 +562,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostReplaceHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/route-only"}, {":scheme", "http"}, @@ -569,21 +570,21 @@ TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "route-headers.com"}, {"x-route-request", "downstream"}, {"x-route-request", "route"}, {":path", "/route-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-route-response", "upstream"}, {"x-route-response", "route"}, @@ -595,7 +596,7 @@ TEST_P(HeaderIntegrationTest, TestRouteAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/route-only"}, {":scheme", "http"}, @@ -604,14 +605,14 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { {"x-route-request-remove", "downstream"}, {"x-unmodified", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "route-headers.com"}, {"x-unmodified", "downstream"}, {"x-route-request", "route"}, {":path", "/route-only"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -619,7 +620,7 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { {"x-route-response-remove", "upstream"}, {"x-unmodified", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "upstream"}, {"x-route-response", "route"}, @@ -631,7 +632,7 @@ TEST_P(HeaderIntegrationTest, TestRouteReplaceHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -641,7 +642,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-vhost-request", "downstream"}, {"x-route-request", "downstream"}, @@ -650,7 +651,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -659,7 +660,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-vhost-response", "upstream"}, {"x-route-response", "upstream"}, @@ -673,7 +674,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteAppendHeaderManipulation) { TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -682,7 +683,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {"x-route-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-route-request", "route"}, @@ -690,7 +691,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -698,7 +699,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) {"x-route-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-route-response", "route"}, @@ -712,7 +713,7 @@ TEST_P(HeaderIntegrationTest, TestVirtualHostAndRouteReplaceHeaderManipulation) TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -724,7 +725,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {"x-route-request", "downstream"}, {"x-route-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-routeconfig-request", "downstream"}, {"x-vhost-request", "downstream"}, @@ -735,7 +736,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -746,7 +747,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani {"x-route-response", "upstream"}, {"x-route-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-routeconfig-response", "upstream"}, {"x-vhost-response", "upstream"}, @@ -763,7 +764,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteAppendHeaderMani TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-and-route"}, {":scheme", "http"}, @@ -773,7 +774,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {"x-route-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-route-request", "route"}, @@ -782,7 +783,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {":path", "/vhost-and-route"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -791,7 +792,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan {"x-route-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-route-response", "route"}, @@ -806,7 +807,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostAndRouteReplaceHeaderMan TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHeaderManipulation) { initializeFilter(HeaderMode::Append, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -820,7 +821,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {"x-weighted-cluster-request", "downstream"}, {"x-weighted-cluster-request-remove", "downstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-routeconfig-request", "downstream"}, {"x-vhost-request", "downstream"}, @@ -833,7 +834,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -846,7 +847,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea {"x-weighted-cluster-response", "upstream"}, {"x-weighted-cluster-response-remove", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-routeconfig-response", "upstream"}, {"x-vhost-response", "upstream"}, @@ -865,7 +866,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterAppendHea TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHeaderManipulation) { initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -876,7 +877,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {"x-weighted-cluster-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-weighted-cluster-request", "weighted-cluster-1"}, @@ -886,7 +887,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -896,7 +897,7 @@ TEST_P(HeaderIntegrationTest, TestRouteConfigVirtualHostRouteAndClusterReplaceHe {"x-weighted-cluster-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-weighted-cluster-response", "weighted-cluster-1"}, @@ -912,7 +913,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { prepareEDS(); initializeFilter(HeaderMode::Replace, true); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/vhost-route-and-weighted-clusters"}, {":scheme", "http"}, @@ -923,7 +924,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {"x-weighted-cluster-request", "downstream"}, {"x-unmodified", "request"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "vhost-headers.com"}, {"x-unmodified", "request"}, {"x-weighted-cluster-request", "weighted-cluster-1"}, @@ -933,7 +934,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {":path", "/vhost-route-and-weighted-clusters"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, @@ -943,7 +944,7 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { {"x-weighted-cluster-response", "upstream"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {"x-weighted-cluster-response", "weighted-cluster-1"}, @@ -962,27 +963,27 @@ TEST_P(HeaderIntegrationTest, TestDynamicHeaders) { TEST_P(HeaderIntegrationTest, TestXFFParsing) { initializeFilter(HeaderMode::Replace, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "xff-headers.com"}, {"x-forwarded-for", "1.2.3.4, 5.6.7.8 ,9.10.11.12"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "xff-headers.com"}, {"x-forwarded-for", "1.2.3.4, 5.6.7.8 ,9.10.11.12"}, {"x-real-ip", "5.6.7.8"}, {":path", "/test"}, {":method", "GET"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -994,7 +995,7 @@ TEST_P(HeaderIntegrationTest, TestXFFParsing) { TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, @@ -1002,7 +1003,7 @@ TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { {"authorization", "token3"}, {"x-foo", "value3"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "append-same-headers.com"}, {":path", "/test"}, {":method", "GET"}, @@ -1011,13 +1012,13 @@ TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { {"x-foo", "value2"}, {"x-foo", "value1"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1031,23 +1032,23 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteWhenNormalizePathOff) { normalize_path_ = false; initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/private/../public"}, {":scheme", "http"}, {":authority", "path-sanitization.com"}, }, - Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, - {":path", "/private/../public"}, - {":method", "GET"}, - {"x-site", "private"}}, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/private/../public"}, + {":method", "GET"}, + {"x-site", "private"}}, + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1061,23 +1062,23 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) { normalize_path_ = true; initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/private/../public"}, {":scheme", "http"}, {":authority", "path-sanitization.com"}, }, - Http::TestHeaderMapImpl{{":authority", "path-sanitization.com"}, - {":path", "/public"}, - {":method", "GET"}, - {"x-site", "public"}}, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{{":authority", "path-sanitization.com"}, + {":path", "/public"}, + {":method", "GET"}, + {"x-site", "public"}}, + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-unmodified", "response"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-unmodified", "response"}, {":status", "200"}, @@ -1088,7 +1089,7 @@ TEST_P(HeaderIntegrationTest, TestPathAndRouteOnNormalizedPath) { TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1097,20 +1098,20 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { {"connection", "te, close"}, {"te", "trailers"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {":path", "/"}, {":method", "GET"}, {"x-request-foo", "downstram"}, {"te", "trailers"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, @@ -1122,7 +1123,7 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderPassthrough) { TEST_P(HeaderIntegrationTest, TestTeHeaderSanitized) { initializeFilter(HeaderMode::Append, false); performRequest( - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -1134,19 +1135,19 @@ TEST_P(HeaderIntegrationTest, TestTeHeaderSanitized) { {"sam", "bar"}, {"will", "baz"}, }, - Http::TestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl{ {":authority", "no-headers.com"}, {":path", "/"}, {":method", "GET"}, {"x-request-foo", "downstram"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"content-length", "0"}, {":status", "200"}, {"x-return-foo", "upstream"}, }, - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {"server", "envoy"}, {"x-return-foo", "upstream"}, {":status", "200"}, diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index b06547333cf9..8839a8737f0b 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -88,7 +88,7 @@ void Http2UpstreamIntegrationTest::bidirectionalStreaming(uint32_t bytes) { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Finish the response. - upstream_request_->encodeTrailers(Http::TestHeaderMapImpl{{"trailer", "bar"}}); + upstream_request_->encodeTrailers(Http::TestResponseTrailerMapImpl{{"trailer", "bar"}}); response->waitForEndStream(); EXPECT_TRUE(response->complete()); } @@ -386,7 +386,7 @@ TEST_P(Http2UpstreamIntegrationTest, TestManyResponseHeadersRejected) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl many_headers(default_response_headers_); + Http::TestResponseHeaderMapImpl many_headers(default_response_headers_); for (int i = 0; i < 100; i++) { many_headers.addCopy("many", std::string(1, 'a')); } @@ -431,7 +431,7 @@ TEST_P(Http2UpstreamIntegrationTest, LargeResponseHeadersRejected) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl large_headers(default_response_headers_); + Http::TestResponseHeaderMapImpl large_headers(default_response_headers_); large_headers.addCopy("large", std::string(60 * 1024, 'a')); auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 13689d1add69..de202248ef26 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1292,7 +1292,7 @@ void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstr EXPECT_EQ("408", response->headers().getStatusValue()); } else { - Http::TestHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; upstream_request_->encodeHeaders(response_headers, true); response->waitForHeaders(); diff --git a/test/integration/http_timeout_integration_test.cc b/test/integration/http_timeout_integration_test.cc index 33dea2c83128..4592533656f0 100644 --- a/test/integration/http_timeout_integration_test.cc +++ b/test/integration/http_timeout_integration_test.cc @@ -299,7 +299,7 @@ TEST_P(HttpTimeoutIntegrationTest, PerTryTimeoutWithoutGlobalTimeout) { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Encode 200 response headers for the first (timed out) request. - Http::TestHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; upstream_request_->encodeHeaders(response_headers, true); response->waitForHeaders(); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 1ce77d956f7b..7a4d262d302f 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -500,7 +500,7 @@ TEST_P(IntegrationTest, Http09WithKeepalive) { initialize(); reinterpret_cast(fake_upstreams_.front().get()) ->setResponseHeaders(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "0"}}))); + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "0"}}))); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET /\r\nConnection: keep-alive\r\n\r\n", &response, true); @@ -588,7 +588,7 @@ TEST_P(IntegrationTest, Http10WithHostandKeepAliveAndContentLengthAndLws) { initialize(); reinterpret_cast(fake_upstreams_.front().get()) ->setResponseHeaders(std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "10"}}))); + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "10"}}))); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.0\r\nHost: foo.com \r\nConnection:Keep-alive\r\n\r\n", @@ -820,10 +820,10 @@ TEST_P(IntegrationTest, TestHead) { codec_client_ = makeHttpConnection(lookupPort("http")); - Http::TestHeaderMapImpl head_request{{":method", "HEAD"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}; + Http::TestRequestHeaderMapImpl head_request{{":method", "HEAD"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; // Without an explicit content length, assume we chunk for HTTP/1.1 auto response = sendRequestAndWaitForResponse(head_request, 0, default_response_headers_, 0); @@ -836,7 +836,8 @@ TEST_P(IntegrationTest, TestHead) { EXPECT_EQ(0, response->body().size()); // Preserve explicit content length. - Http::TestHeaderMapImpl content_length_response{{":status", "200"}, {"content-length", "12"}}; + Http::TestResponseHeaderMapImpl content_length_response{{":status", "200"}, + {"content-length", "12"}}; response = sendRequestAndWaitForResponse(head_request, 0, content_length_response, 0); ASSERT_TRUE(response->complete()); EXPECT_THAT(response->headers(), HttpStatusIs("200")); @@ -1224,7 +1225,7 @@ TEST_P(IntegrationTest, TestFloodUpstreamErrors) { // Set an Upstream reply with an invalid content-length, which will be rejected by the Envoy. auto response_headers = std::make_unique( - Http::TestHeaderMapImpl({{":status", "200"}, {"content-length", "invalid"}})); + Http::TestResponseHeaderMapImpl({{":status", "200"}, {"content-length", "invalid"}})); reinterpret_cast(fake_upstreams_.front().get()) ->setResponseHeaders(std::move(response_headers)); diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index bba29eaa20fb..4c37ac53a0de 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -249,10 +249,10 @@ TEST_P(ListenerIntegrationTest, BasicSuccess) { codec_client_ = makeHttpConnection(lookupPort(listener_name_)); int response_size = 800; int request_size = 10; - Http::TestHeaderMapImpl response_headers{{":status", "200"}, - {"server_id", "cluster_0, backend_0"}}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, + {"server_id", "cluster_0, backend_0"}}; auto response = sendRequestAndWaitForResponse( - Http::TestHeaderMapImpl{ + Http::TestResponseHeaderMapImpl{ {":method", "GET"}, {":path", "/"}, {":authority", "host"}, {":scheme", "http"}}, request_size, response_headers, response_size, /*cluster_0*/ 0); verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 90be3c730b1d..f569a3d63f14 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -214,7 +214,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithBody) { EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); - Http::TestHeaderMapImpl response_with_big_body( + Http::TestResponseHeaderMapImpl response_with_big_body( {{":status", "200"}, {"content-length", "2000000"}}); upstream_request_->encodeHeaders(response_with_big_body, false); upstream_request_->encodeData(2000000, true); diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index 20688daf5ac6..5e786482c4a2 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -278,7 +278,7 @@ route_configuration_name: {} {":scheme", "http"}, {"Addr", "x-foo-key=xyz-route"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // Test "foo-route" and 'bar-route' both gets routed to cluster_0. @@ -349,7 +349,7 @@ route_configuration_name: {} {":scheme", "http"}, {"Addr", "x-foo-key=foo-route"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // Add a new scope foo_scope4. const std::string& scope_route4 = @@ -366,7 +366,7 @@ route_configuration_name: {} response->waitForEndStream(); // Get 404 because RDS hasn't pushed route configuration "foo_route4" yet. // But scope is found and the Router::NullConfigImpl is returned. - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // RDS updated foo_route4, requests with scope key "xyz-route" now hit cluster_1. @@ -410,7 +410,7 @@ route_configuration_name: foo_route1 {":scheme", "http"}, {"Addr", "x-foo-key=foo"}}); response->waitForEndStream(); - verifyResponse(std::move(response), "404", Http::TestHeaderMapImpl{}, ""); + verifyResponse(std::move(response), "404", Http::TestResponseHeaderMapImpl{}, ""); cleanupUpstreamAndDownstream(); // SRDS update fixed the problem. diff --git a/test/integration/utility.cc b/test/integration/utility.cc index 5a0ffa0ab577..c969a5b8a2ef 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -84,7 +84,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt Http::RequestEncoder& encoder = client.newStream(*response); encoder.getStream().addCallbacks(*response); - Http::RequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers; headers.setMethod(method); headers.setPath(url); headers.setHost(host); diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 4aa3c9b9e839..0f7a4063d419 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -530,7 +530,7 @@ class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterfacevalue(); @@ -133,7 +133,7 @@ TEST_P(AdminInstanceTest, EscapeHelpTextWithPunctuation) { } TEST_P(AdminInstanceTest, HelpUsesFormForMutations) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response)); const std::string logging_action = "
(); msg->set_value("bar"); @@ -210,7 +210,7 @@ TEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) { // Run it multiple times and validate that order is preserved. for (size_t i = 0; i < 5; i++) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; EXPECT_EQ(Http::Code::OK, getCallback("/config_dump", header_map, response)); const std::string output = response.toString(); EXPECT_EQ(expected_json, output); @@ -222,7 +222,7 @@ TEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) { // dynamic in the JSON with ?resource=dynamic_listeners. TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); @@ -253,7 +253,7 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { // dynamic in the JSON with ?mask=dynamic_listeners. TEST_P(AdminInstanceTest, ConfigDumpFiltersByMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); auto dyn_listener = msg->add_dynamic_listeners(); @@ -306,7 +306,7 @@ ProtobufTypes::MessagePtr testDumpClustersConfig() { // only the desired resource and the fields specified in the mask. TEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", testDumpClustersConfig); const std::string expected_json = R"EOF({ "configs": [ @@ -335,7 +335,7 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResourceAndMask) { // of the config dump and the fields present in the mask query parameter. TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", testDumpClustersConfig); const std::string expected_json = R"EOF({ "configs": [ @@ -355,7 +355,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentMask) { // resource query parameter. TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto listeners = admin_.getConfigTracker().add("listeners", [] { auto msg = std::make_unique(); msg->set_value("listeners_config"); @@ -368,7 +368,7 @@ TEST_P(AdminInstanceTest, ConfigDumpNonExistentResource) { // repeated field. TEST_P(AdminInstanceTest, ConfigDumpResourceNotRepeated) { Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto clusters = admin_.getConfigTracker().add("clusters", [] { auto msg = std::make_unique(); msg->set_version_info("foo"); @@ -461,7 +461,7 @@ TEST_P(AdminInstanceTest, ClustersJson) { ON_CALL(*host, priority()).WillByDefault(Return(6)); Buffer::OwnedImpl response; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; EXPECT_EQ(Http::Code::OK, getCallback("/clusters?format=json", header_map, response)); std::string output_json = response.toString(); envoy::admin::v3::Clusters output_proto; diff --git a/test/server/admin/logs_handler_test.cc b/test/server/admin/logs_handler_test.cc index 5f6a8aa3724c..9fc99c0c6225 100644 --- a/test/server/admin/logs_handler_test.cc +++ b/test/server/admin/logs_handler_test.cc @@ -8,7 +8,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TestUtility::ipTestParamsToString); TEST_P(AdminInstanceTest, ReopenLogs) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; testing::NiceMock access_log_manager_; diff --git a/test/server/admin/profiling_handler_test.cc b/test/server/admin/profiling_handler_test.cc index 721fd5dc1e68..353bc780e87b 100644 --- a/test/server/admin/profiling_handler_test.cc +++ b/test/server/admin/profiling_handler_test.cc @@ -12,7 +12,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TEST_P(AdminInstanceTest, AdminCpuProfiler) { Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; // Can only get code coverage of AdminImpl::handlerCpuProfiler stopProfiler with // a real profiler linked in (successful call to startProfiler). @@ -31,7 +31,7 @@ TEST_P(AdminInstanceTest, AdminCpuProfiler) { TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; auto repeatResultCode = Http::Code::BadRequest; #ifndef PROFILER_AVAILABLE repeatResultCode = Http::Code::NotImplemented; @@ -46,7 +46,7 @@ TEST_P(AdminInstanceTest, AdminHeapProfilerOnRepeatedRequest) { TEST_P(AdminInstanceTest, AdminHeapProfiler) { Buffer::OwnedImpl data; - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; // The below flow need to begin with the profiler not running Profiler::Heap::stopProfiler(); @@ -68,7 +68,7 @@ TEST_P(AdminInstanceTest, AdminBadProfiler) { Buffer::OwnedImpl data; AdminImpl admin_bad_profile_path(TestEnvironment::temporaryPath("some/unlikely/bad/path.prof"), server_); - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; const absl::string_view post = Http::Headers::get().MethodValues.Post; request_headers_.setMethod(post); admin_filter_.decodeHeaders(request_headers_, false); diff --git a/test/server/admin/runtime_handler_test.cc b/test/server/admin/runtime_handler_test.cc index 6ac7a40b4a87..ec8c0953fc13 100644 --- a/test/server/admin/runtime_handler_test.cc +++ b/test/server/admin/runtime_handler_test.cc @@ -8,7 +8,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TestUtility::ipTestParamsToString); TEST_P(AdminInstanceTest, Runtime) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; Runtime::MockSnapshot snapshot; @@ -75,7 +75,7 @@ TEST_P(AdminInstanceTest, Runtime) { } TEST_P(AdminInstanceTest, RuntimeModify) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; Runtime::MockLoader loader; @@ -101,14 +101,14 @@ TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { EXPECT_CALL(loader, mergeValues(overrides)).Times(1); const std::string body = fmt::format("{}={}", key, value); - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::OK, runCallback("/runtime_modify", header_map, response, "POST", body)); EXPECT_EQ("OK\n", response.toString()); } TEST_P(AdminInstanceTest, RuntimeModifyNoArguments) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::BadRequest, postCallback("/runtime_modify", header_map, response)); diff --git a/test/server/admin/server_info_handler_test.cc b/test/server/admin/server_info_handler_test.cc index fe3276b53485..d9ef53339f95 100644 --- a/test/server/admin/server_info_handler_test.cc +++ b/test/server/admin/server_info_handler_test.cc @@ -18,7 +18,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TestUtility::ipTestParamsToString); TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; // Setup a context that returns null cert details. @@ -47,7 +47,7 @@ TEST_P(AdminInstanceTest, ContextThatReturnsNullCertDetails) { } TEST_P(AdminInstanceTest, Memory) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl response; EXPECT_EQ(Http::Code::OK, getCallback("/memory", header_map, response)); const std::string output_json = response.toString(); @@ -65,7 +65,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { ON_CALL(server_, initManager()).WillByDefault(ReturnRef(initManager)); { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); @@ -75,7 +75,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { } { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); @@ -85,7 +85,7 @@ TEST_P(AdminInstanceTest, GetReadyRequest) { EXPECT_THAT(std::string(response_headers.getContentTypeValue()), HasSubstr("text/plain")); } - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); @@ -108,7 +108,7 @@ TEST_P(AdminInstanceTest, GetRequest) { ON_CALL(server_.hot_restart_, version()).WillByDefault(Return("foo_version")); { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initialized)); @@ -126,7 +126,7 @@ TEST_P(AdminInstanceTest, GetRequest) { } { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Uninitialized)); @@ -142,7 +142,7 @@ TEST_P(AdminInstanceTest, GetRequest) { EXPECT_EQ(server_info_proto.command_line_options().service_cluster(), "cluster"); } - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; ON_CALL(initManager, state()).WillByDefault(Return(Init::Manager::State::Initializing)); @@ -159,7 +159,7 @@ TEST_P(AdminInstanceTest, GetRequest) { } TEST_P(AdminInstanceTest, PostRequest) { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; EXPECT_NO_LOGS(EXPECT_EQ(Http::Code::OK, admin_.request("/healthcheck/fail", "POST", response_headers, body))); diff --git a/test/server/admin/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc index f6c326f909e5..ce80844b635e 100644 --- a/test/server/admin/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -511,7 +511,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AdminInstanceTest, TestUtility::ipTestParamsToString); TEST_P(AdminInstanceTest, StatsInvalidRegex) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl data; EXPECT_LOG_CONTAINS( "error", "Invalid regex: ", @@ -526,7 +526,7 @@ TEST_P(AdminInstanceTest, StatsInvalidRegex) { } TEST_P(AdminInstanceTest, PrometheusStatsInvalidRegex) { - Http::ResponseHeaderMapImpl header_map; + Http::TestResponseHeaderMapImpl header_map; Buffer::OwnedImpl data; EXPECT_LOG_CONTAINS( "error", ": *.ptest", @@ -549,7 +549,7 @@ TEST_P(AdminInstanceTest, TracingStatsDisabled) { } TEST_P(AdminInstanceTest, GetRequestJson) { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; EXPECT_EQ(Http::Code::OK, admin_.request("/stats?format=json", "GET", response_headers, body)); EXPECT_THAT(body, HasSubstr("{\"stats\":[")); @@ -557,7 +557,7 @@ TEST_P(AdminInstanceTest, GetRequestJson) { } TEST_P(AdminInstanceTest, RecentLookups) { - Http::ResponseHeaderMapImpl response_headers; + Http::TestResponseHeaderMapImpl response_headers; std::string body; // Recent lookup tracking is disabled by default. diff --git a/test/test_common/utility_test.cc b/test/test_common/utility_test.cc index a229892e0398..648d65cda365 100644 --- a/test/test_common/utility_test.cc +++ b/test/test_common/utility_test.cc @@ -7,23 +7,24 @@ namespace Envoy { TEST(HeaderMapEqualIgnoreOrder, ActuallyEqual) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); EXPECT_EQ(lhs, rhs); } TEST(HeaderMapEqualIgnoreOrder, IgnoreOrder) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":path", "/"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); EXPECT_THAT(&lhs, HeaderMapEqualIgnoreOrder(&rhs)); EXPECT_FALSE(lhs == rhs); } TEST(HeaderMapEqualIgnoreOrder, NotEqual) { - Http::TestHeaderMapImpl lhs{{":method", "GET"}, {":authority", "host"}, {":authority", "host"}}; - Http::TestHeaderMapImpl rhs{{":method", "GET"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl lhs{ + {":method", "GET"}, {":authority", "host"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl rhs{{":method", "GET"}, {":authority", "host"}}; EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); } From 636d40391ac114a8be960c9251af3849781a293f Mon Sep 17 00:00:00 2001 From: foreseeable Date: Fri, 5 Jun 2020 13:52:59 -0400 Subject: [PATCH 303/909] docs: provide a consistent definition of overprovisioning_factor (#11429) Signed-off-by: Muge Chen --- api/envoy/config/endpoint/v3/endpoint.proto | 2 +- .../upstream/load_balancing/overprovisioning.rst | 4 ++-- generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/envoy/config/endpoint/v3/endpoint.proto b/api/envoy/config/endpoint/v3/endpoint.proto index 63869fafcb54..e58c327156cf 100644 --- a/api/envoy/config/endpoint/v3/endpoint.proto +++ b/api/envoy/config/endpoint/v3/endpoint.proto @@ -80,7 +80,7 @@ message ClusterLoadAssignment { // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts + // level or locality unhealthy until the fraction of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst index 7162b68f3387..e8fa36127fdf 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/overprovisioning.rst @@ -5,6 +5,6 @@ Overprovisioning Factor Priority levels and localities are considered overprovisioned with :ref:`this percentage `. Envoy doesn't consider a priority level or locality unavailable until the -percentage of available hosts multiplied by the overprovisioning factor drops -below 100. The default value is 1.4, so a priority level or locality will not be +fraction of available hosts multiplied by the overprovisioning factor drops +below 100. The default value is 140 (in percentage, which means 140%), so a priority level or locality will not be considered unavailable until the percentage of available endpoints goes below 72%. diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto index e34b07619ab0..7233d5f9561a 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto @@ -78,7 +78,7 @@ message ClusterLoadAssignment { // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts + // level or locality unhealthy until the fraction of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops From abfa72479267de2e57e3e042f9b303d8d47c77ef Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Fri, 5 Jun 2020 14:32:36 -0500 Subject: [PATCH 304/909] Fix fuzz OSS-issue-21276 (#11450) The issue happened because in "regex_rewrite {}" field of the config input, there is a "substitution" field but there is no "pattern" field. The fix is to add "required" to "RegexMatcher pattern" field of api/envoy/type/matcher/v3/regex.proto. Signed-off-by: jianwen --- api/envoy/type/matcher/v3/regex.proto | 2 +- api/envoy/type/matcher/v4alpha/regex.proto | 2 +- generated_api_shadow/envoy/type/matcher/v3/regex.proto | 2 +- generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index e318cb5457d9..5a7922ec6f62 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -62,7 +62,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto index f94a85e778e4..bfd8c3dd3b4f 100644 --- a/api/envoy/type/matcher/v4alpha/regex.proto +++ b/api/envoy/type/matcher/v4alpha/regex.proto @@ -57,7 +57,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index e318cb5457d9..5a7922ec6f62 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -62,7 +62,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto index ed038ec3abb4..3d7d3f029c0e 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto @@ -62,7 +62,7 @@ message RegexMatchAndSubstitute { // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. - RegexMatcher pattern = 1; + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. From 6d918aa4386a090cbc00883bf46caa5e4d2af56e Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Fri, 5 Jun 2020 20:33:15 +0100 Subject: [PATCH 305/909] fuzz: add fuzz tests for utility functions (#11424) Add fuzz tests for Http::Utility::parseAuthority and StringUtil::findToken Signed-off-by: Sam Flattery --- test/common/common/utility_fuzz_test.cc | 14 ++++++++++++++ .../http/utility_corpus/parse_authority_string_0 | 1 + .../http/utility_corpus/parse_authority_string_1 | 1 + .../http/utility_corpus/parse_authority_string_2 | 1 + .../http/utility_corpus/parse_authority_string_3 | 1 + .../http/utility_corpus/parse_authority_string_4 | 1 + .../{pare_cookie_value_5 => parse_cookie_value_5} | 0 .../http/utility_corpus/percent_decoding_string_0 | 2 +- .../http/utility_corpus/percent_decoding_string_1 | 2 +- .../http/utility_corpus/percent_decoding_string_2 | 2 +- test/common/http/utility_fuzz.proto | 1 + test/common/http/utility_fuzz_test.cc | 5 +++++ 12 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 test/common/http/utility_corpus/parse_authority_string_0 create mode 100644 test/common/http/utility_corpus/parse_authority_string_1 create mode 100644 test/common/http/utility_corpus/parse_authority_string_2 create mode 100644 test/common/http/utility_corpus/parse_authority_string_3 create mode 100644 test/common/http/utility_corpus/parse_authority_string_4 rename test/common/http/utility_corpus/{pare_cookie_value_5 => parse_cookie_value_5} (100%) diff --git a/test/common/common/utility_fuzz_test.cc b/test/common/common/utility_fuzz_test.cc index e7daf80a452e..c4f468c91725 100644 --- a/test/common/common/utility_fuzz_test.cc +++ b/test/common/common/utility_fuzz_test.cc @@ -53,6 +53,20 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { StringUtil::cropRight(string_buffer.substr(0, split_point), string_buffer.substr(split_point)); } + { + const std::string string_buffer(reinterpret_cast(buf), len); + + // sample random bit to use as the whitespace flag + bool trimWhitespace = split_point & 1; + const size_t split_point2 = + len > 1 ? reinterpret_cast(buf)[1] % len : split_point; + const size_t split1 = std::min(split_point, split_point2); + const size_t split2 = std::max(split_point, split_point2); + + StringUtil::findToken(string_buffer.substr(0, split1), + string_buffer.substr(split1, split2 - split2), + string_buffer.substr(split2), trimWhitespace); + } } } diff --git a/test/common/http/utility_corpus/parse_authority_string_0 b/test/common/http/utility_corpus/parse_authority_string_0 new file mode 100644 index 000000000000..d4cbd3049147 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_0 @@ -0,0 +1 @@ +parse_authority_string: "1.2.3.4" diff --git a/test/common/http/utility_corpus/parse_authority_string_1 b/test/common/http/utility_corpus/parse_authority_string_1 new file mode 100644 index 000000000000..21904cf3c1e4 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_1 @@ -0,0 +1 @@ +parse_authority_string: "[a:b:c:d::]:0" diff --git a/test/common/http/utility_corpus/parse_authority_string_2 b/test/common/http/utility_corpus/parse_authority_string_2 new file mode 100644 index 000000000000..6e472e09b0b9 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_2 @@ -0,0 +1 @@ +parse_authority_string: "example.com" diff --git a/test/common/http/utility_corpus/parse_authority_string_3 b/test/common/http/utility_corpus/parse_authority_string_3 new file mode 100644 index 000000000000..369543b8883e --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_3 @@ -0,0 +1 @@ +parse_authority_string: "localhost:10000" diff --git a/test/common/http/utility_corpus/parse_authority_string_4 b/test/common/http/utility_corpus/parse_authority_string_4 new file mode 100644 index 000000000000..b4257b2544d5 --- /dev/null +++ b/test/common/http/utility_corpus/parse_authority_string_4 @@ -0,0 +1 @@ +parse_authority_string: "0.0.0.0:4000" diff --git a/test/common/http/utility_corpus/pare_cookie_value_5 b/test/common/http/utility_corpus/parse_cookie_value_5 similarity index 100% rename from test/common/http/utility_corpus/pare_cookie_value_5 rename to test/common/http/utility_corpus/parse_cookie_value_5 diff --git a/test/common/http/utility_corpus/percent_decoding_string_0 b/test/common/http/utility_corpus/percent_decoding_string_0 index af1fb389f2aa..0229e0d87f6d 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_0 +++ b/test/common/http/utility_corpus/percent_decoding_string_0 @@ -1 +1 @@ -"too%20lar%20" +percent_decoding_string: "too%20lar%20" diff --git a/test/common/http/utility_corpus/percent_decoding_string_1 b/test/common/http/utility_corpus/percent_decoding_string_1 index 49b96d308acd..e1f4fe3f99b4 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_1 +++ b/test/common/http/utility_corpus/percent_decoding_string_1 @@ -1 +1 @@ -"too%20larg%e" +percent_decoding_string: "too%20larg%e" diff --git a/test/common/http/utility_corpus/percent_decoding_string_2 b/test/common/http/utility_corpus/percent_decoding_string_2 index 77f241c09555..34e7e53257ee 100644 --- a/test/common/http/utility_corpus/percent_decoding_string_2 +++ b/test/common/http/utility_corpus/percent_decoding_string_2 @@ -1 +1 @@ -"too%20large%" +percent_decoding_string: "too%20large%" diff --git a/test/common/http/utility_fuzz.proto b/test/common/http/utility_fuzz.proto index 940be6f4e0f3..2f023d29911f 100644 --- a/test/common/http/utility_fuzz.proto +++ b/test/common/http/utility_fuzz.proto @@ -43,5 +43,6 @@ message UtilityTestCase { string find_query_string = 9 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE, strict: false}]; CookieValue make_set_cookie_value = 10; + string parse_authority_string = 11; } } diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index 54d5ce8bfa1b..4e62e7c2edc1 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -66,6 +66,11 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { max_age, cookie_value.httponly()); break; } + case test::common::http::UtilityTestCase::kParseAuthorityString: { + const auto& authority_string = input.parse_authority_string(); + Http::Utility::parseAuthority(authority_string); + break; + } default: // Nothing to do. From f0464a4a485ca9c6deb012982d9979232b116355 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Fri, 5 Jun 2020 15:34:52 -0400 Subject: [PATCH 306/909] Windows: Fixing straightforward issues with //test/common/network/... (#11423) Do not use SO_REUSEADDR on Windows as it does not behave the same as Linux and other BSD socket stacks Disable pipe mode test on Windows, see #11354 socket_option_factory_test no longer hard codes socket option value as struct linger consists of ushorts not ints on Windows allocate callback handles on heap instead of stack to work around MSVC enable address_impl_speed_test_benchmark_test additionally, tag new failing tests on Windows Signed-off-by: Sunjay Bhatia Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Co-authored-by: William A Rowe Jr --- source/common/network/listen_socket_impl.cc | 5 +- test/common/network/BUILD | 4 -- test/common/network/address_impl_test.cc | 2 + test/common/network/filter_matcher_test.cc | 46 +++++++++---------- .../network/socket_option_factory_test.cc | 16 ++++--- test/extensions/filters/http/compressor/BUILD | 1 + .../filters/http/decompressor/BUILD | 1 + test/extensions/filters/http/grpc_web/BUILD | 1 + test/extensions/filters/udp/dns_filter/BUILD | 1 + test/integration/BUILD | 1 + test/mocks/server/mocks.h | 2 +- 11 files changed, 44 insertions(+), 36 deletions(-) diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 20725bceb6ba..beed779224b1 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -56,12 +56,15 @@ void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& opti template <> void NetworkListenSocket< NetworkSocketTrait>::setPrebindSocketOptions() { - +// On Windows, SO_REUSEADDR does not restrict subsequent bind calls when there is a listener as on +// Linux and later BSD socket stacks +#ifndef WIN32 int on = 1; auto& os_syscalls = Api::OsSysCallsSingleton::get(); Api::SysCallIntResult status = os_syscalls.setsockopt(io_handle_->fd(), SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); RELEASE_ASSERT(status.rc_ != -1, "failed to set SO_REUSEADDR socket option"); +#endif } template <> diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 495473031b6d..25e5efd4629f 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -33,7 +33,6 @@ envoy_cc_test_library( envoy_cc_test( name = "address_impl_test", srcs = ["address_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:utility_lib", @@ -59,7 +58,6 @@ envoy_cc_benchmark_binary( envoy_benchmark_test( name = "address_impl_speed_test_benchmark_test", benchmark_binary = "address_impl_speed_test", - tags = ["fails_on_windows"], ) envoy_cc_test( @@ -167,7 +165,6 @@ envoy_cc_test( envoy_cc_test( name = "listen_socket_impl_test", srcs = ["listen_socket_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:listen_socket_lib", @@ -266,7 +263,6 @@ envoy_cc_test( name = "socket_option_factory_test", srcs = ["socket_option_factory_test.cc"], external_deps = ["abseil_str_format"], - tags = ["fails_on_windows"], deps = [ "//source/common/network:address_lib", "//source/common/network:socket_option_factory_lib", diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 9142756dcc37..e8c2a1b33fc3 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -319,6 +319,7 @@ TEST(PipeInstanceTest, Basic) { EXPECT_EQ(nullptr, address.ip()); } +#ifndef WIN32 TEST(PipeInstanceTest, BasicPermission) { std::string path = TestEnvironment::unixDomainSocketPath("foo.sock"); @@ -342,6 +343,7 @@ TEST(PipeInstanceTest, BasicPermission) { << path << std::oct << "\t" << (stat_buf.st_mode & 07777) << std::dec << "\t" << (stat_buf.st_mode) << strerror(result.errno_); } +#endif TEST(PipeInstanceTest, PermissionFail) { NiceMock os_sys_calls; diff --git a/test/common/network/filter_matcher_test.cc b/test/common/network/filter_matcher_test.cc index 2668400adbc1..cd00d5cc7174 100644 --- a/test/common/network/filter_matcher_test.cc +++ b/test/common/network/filter_matcher_test.cc @@ -19,13 +19,13 @@ struct CallbackHandle { } // namespace class ListenerFilterMatcherTest : public testing::Test { public: - CallbackHandle createCallbackOnPort(int port) { - CallbackHandle handle; - handle.address_ = std::make_shared("127.0.0.1", port); - handle.socket_ = std::make_unique(); - handle.callback_ = std::make_unique(); - EXPECT_CALL(*handle.socket_, localAddress()).WillRepeatedly(ReturnRef(handle.address_)); - EXPECT_CALL(*handle.callback_, socket()).WillRepeatedly(ReturnRef(*handle.socket_)); + std::unique_ptr createCallbackOnPort(int port) { + auto handle = std::make_unique(); + handle->address_ = std::make_shared("127.0.0.1", port); + handle->socket_ = std::make_unique(); + handle->callback_ = std::make_unique(); + EXPECT_CALL(*(handle->socket_), localAddress()).WillRepeatedly(ReturnRef(handle->address_)); + EXPECT_CALL(*(handle->callback_), socket()).WillRepeatedly(ReturnRef(*(handle->socket_))); return handle; } envoy::config::listener::v3::ListenerFilterChainMatchPredicate createPortPredicate(int port_start, @@ -44,9 +44,9 @@ TEST_F(ListenerFilterMatcherTest, DstPortMatcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_FALSE(matcher->matches(*handle79.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_FALSE(matcher->matches(*handle81.callback_)); + EXPECT_FALSE(matcher->matches(*(handle79->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_FALSE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, AnyMatdcher) { @@ -56,9 +56,9 @@ TEST_F(ListenerFilterMatcherTest, AnyMatdcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_TRUE(matcher->matches(*handle79.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle81.callback_)); + EXPECT_TRUE(matcher->matches(*(handle79->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, NotMatcher) { @@ -69,9 +69,9 @@ TEST_F(ListenerFilterMatcherTest, NotMatcher) { auto handle79 = createCallbackOnPort(79); auto handle80 = createCallbackOnPort(80); auto handle81 = createCallbackOnPort(81); - EXPECT_TRUE(matcher->matches(*handle79.callback_)); - EXPECT_FALSE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle81.callback_)); + EXPECT_TRUE(matcher->matches(*(handle79->callback_))); + EXPECT_FALSE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle81->callback_))); } TEST_F(ListenerFilterMatcherTest, OrMatcher) { @@ -87,9 +87,9 @@ TEST_F(ListenerFilterMatcherTest, OrMatcher) { auto handle443 = createCallbackOnPort(443); auto handle3306 = createCallbackOnPort(3306); - EXPECT_FALSE(matcher->matches(*handle3306.callback_)); - EXPECT_TRUE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle443.callback_)); + EXPECT_FALSE(matcher->matches(*(handle3306->callback_))); + EXPECT_TRUE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle443->callback_))); } TEST_F(ListenerFilterMatcherTest, AndMatcher) { @@ -105,9 +105,9 @@ TEST_F(ListenerFilterMatcherTest, AndMatcher) { auto handle443 = createCallbackOnPort(443); auto handle3306 = createCallbackOnPort(3306); - EXPECT_FALSE(matcher->matches(*handle3306.callback_)); - EXPECT_FALSE(matcher->matches(*handle80.callback_)); - EXPECT_TRUE(matcher->matches(*handle443.callback_)); + EXPECT_FALSE(matcher->matches(*(handle3306->callback_))); + EXPECT_FALSE(matcher->matches(*(handle80->callback_))); + EXPECT_TRUE(matcher->matches(*(handle443->callback_))); } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index 280e522b667f..c364015d1804 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -130,13 +130,20 @@ TEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) { Protobuf::RepeatedPtrField socket_options_proto; Envoy::Protobuf::TextFormat::Parser parser; envoy::config::core::v3::SocketOption socket_option_proto; + struct linger expected_linger; + expected_linger.l_onoff = 1; + expected_linger.l_linger = 3456; + absl::string_view linger_bstr{reinterpret_cast(&expected_linger), + sizeof(struct linger)}; + std::string linger_bstr_formatted = testing::PrintToString(linger_bstr); static const char linger_option_format[] = R"proto( state: STATE_PREBIND level: %d name: %d - buf_value: "\x01\x00\x00\x00\x80\x0d\x00\x00" + buf_value: %s )proto"; - auto linger_option = absl::StrFormat(linger_option_format, SOL_SOCKET, SO_LINGER); + auto linger_option = + absl::StrFormat(linger_option_format, SOL_SOCKET, SO_LINGER, linger_bstr_formatted); ASSERT_TRUE(parser.ParseFromString(linger_option, &socket_option_proto)); *socket_options_proto.Add() = socket_option_proto; static const char keepalive_option_format[] = R"proto( @@ -156,11 +163,6 @@ TEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) { EXPECT_TRUE(option_details.has_value()); EXPECT_EQ(SOL_SOCKET, option_details->name_.level()); EXPECT_EQ(SO_LINGER, option_details->name_.option()); - struct linger expected_linger; - expected_linger.l_onoff = 1; - expected_linger.l_linger = 3456; - absl::string_view linger_bstr{reinterpret_cast(&expected_linger), - sizeof(struct linger)}; EXPECT_EQ(linger_bstr, option_details->value_); option_details = socket_options->at(1)->getOptionDetails( diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD index 0da9087a4d99..149429e09a7e 100644 --- a/test/extensions/filters/http/compressor/BUILD +++ b/test/extensions/filters/http/compressor/BUILD @@ -31,6 +31,7 @@ envoy_extension_cc_test( "compressor_filter_integration_test.cc", ], extension_name = "envoy.filters.http.compressor", + tags = ["fails_on_windows"], deps = [ "//source/extensions/compression/gzip/compressor:config", "//source/extensions/compression/gzip/decompressor:config", diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD index e6739858b251..da72c1dc7a9e 100644 --- a/test/extensions/filters/http/decompressor/BUILD +++ b/test/extensions/filters/http/decompressor/BUILD @@ -36,6 +36,7 @@ envoy_extension_cc_test( "decompressor_filter_integration_test.cc", ], extension_name = "envoy.filters.http.decompressor", + tags = ["fails_on_windows"], deps = [ "//source/extensions/compression/gzip/compressor:config", "//source/extensions/compression/gzip/decompressor:config", diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index b0f586a9c9c0..5cf7788382b4 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -39,6 +39,7 @@ envoy_extension_cc_test( name = "grpc_web_integration_test", srcs = ["grpc_web_filter_integration_test.cc"], extension_name = "envoy.filters.http.grpc_web", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index 08455411d5ad..ffa82525a947 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -42,6 +42,7 @@ envoy_extension_cc_test( name = "dns_filter_integration_test", srcs = ["dns_filter_integration_test.cc"], extension_name = "envoy.filters.udp_listener.dns_filter", + tags = ["fails_on_windows"], deps = [ ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:config", diff --git a/test/integration/BUILD b/test/integration/BUILD index 98661cd8ee99..a8bc846e6492 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1194,6 +1194,7 @@ envoy_cc_test( srcs = [ "local_reply_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", ":http_protocol_integration_lib", diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 38d22ea9d798..1fb58dc9ea0b 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -691,7 +691,7 @@ class MockBootstrapExtensionFactory : public BootstrapExtensionFactory { MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension, (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override)); MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override)); - MOCK_METHOD(std::string, name, (), (const override)); + MOCK_METHOD(std::string, name, (), (const, override)); }; } // namespace Configuration From 8161b915026986de157cd90d6d1ba1534f8567be Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Sat, 6 Jun 2020 00:46:08 +0300 Subject: [PATCH 307/909] coverage: improve coverage for compressor filter (#11438) coverage: improve coverage for compressor filter Risk Level: Low Testing: generated coverage report locally Signed-off-by: Dmitry Rozhkov --- test/extensions/filters/http/compressor/BUILD | 21 +++++++++ .../filters/http/compressor/config_test.cc | 46 +++++++++++++++++++ .../compressor/mock_compressor_library.proto | 6 +++ test/per_file_coverage.sh | 1 - 4 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 test/extensions/filters/http/compressor/config_test.cc create mode 100644 test/extensions/filters/http/compressor/mock_compressor_library.proto diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD index 149429e09a7e..a76be193b710 100644 --- a/test/extensions/filters/http/compressor/BUILD +++ b/test/extensions/filters/http/compressor/BUILD @@ -1,6 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -41,3 +42,23 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_proto_library( + name = "mock_config", + srcs = ["mock_compressor_library.proto"], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = [ + "config_test.cc", + ], + extension_name = "envoy.filters.http.compressor", + deps = [ + ":mock_config_cc_proto", + "//source/extensions/filters/http/compressor:config", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/compressor/config_test.cc b/test/extensions/filters/http/compressor/config_test.cc new file mode 100644 index 000000000000..c3f865108f81 --- /dev/null +++ b/test/extensions/filters/http/compressor/config_test.cc @@ -0,0 +1,46 @@ +#include "extensions/filters/http/compressor/config.h" + +#include "test/mocks/server/mocks.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Compressor { +namespace { + +using testing::NiceMock; + +TEST(CompressorFilterFactoryTests, MissingCompressorLibraryConfig) { + const envoy::extensions::filters::http::compressor::v3::Compressor proto_config; + CompressorFilterFactory factory; + NiceMock context; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "Compressor filter doesn't have compressor_library defined"); +} + +TEST(CompressorFilterFactoryTests, UnregisteredCompressorLibraryConfig) { + const std::string yaml_string = R"EOF( + compressor_library: + name: fake_compressor + typed_config: + "@type": type.googleapis.com/test.mock_compressor_library.Unregistered + )EOF"; + + envoy::extensions::filters::http::compressor::v3::Compressor proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + CompressorFilterFactory factory; + NiceMock context; + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException, + "Didn't find a registered implementation for type: " + "'test.mock_compressor_library.Unregistered'"); +} + +} // namespace +} // namespace Compressor +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/compressor/mock_compressor_library.proto b/test/extensions/filters/http/compressor/mock_compressor_library.proto new file mode 100644 index 000000000000..b6d5ea18d1c0 --- /dev/null +++ b/test/extensions/filters/http/compressor/mock_compressor_library.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +package test.mock_compressor_library; + +message Unregistered { +} \ No newline at end of file diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index a7a3ecefd531..6f5368f024bb 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -19,7 +19,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/cache/simple_http_cache:84.5" "source/extensions/filters/http/csrf:96.6" "source/extensions/filters/http/ip_tagging:92.0" -"source/extensions/filters/http/compressor:84.4" "source/extensions/filters/http/header_to_metadata:95.0" "source/extensions/filters/http/grpc_json_transcoder:93.3" "source/extensions/filters/http/aws_request_signing:93.3" From 4914d0cf4dbc3dd6a156927e64886b10e167be7c Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Fri, 5 Jun 2020 16:30:50 -0700 Subject: [PATCH 308/909] test: give hotrestart_test more time to run (#11478) Recent changes to the hot restart test (to use dynamic base id) make it a bit slower and it's hitting the test timeout. Give it more time and fix the bug where we didn't pass additional args to envoy_cc_test. Risk Level: low Testing: fixes tests Doc Changes: n/a Release Notes: n/a Signed-off-by: Stephan Zuercher --- bazel/envoy_test.bzl | 1 + test/integration/BUILD | 1 + 2 files changed, 2 insertions(+) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 0e439d84dd12..4f603671933b 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -313,6 +313,7 @@ def envoy_sh_test( data = srcs + data + cc_binary, tags = tags, deps = ["//test/test_common:environment_lib"] + cc_binary, + **kargs ) else: diff --git a/test/integration/BUILD b/test/integration/BUILD index a8bc846e6492..6016f55aa567 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -230,6 +230,7 @@ exports_files(["test_utility.sh"]) envoy_sh_test( name = "hotrestart_test", + size = "enormous", srcs = envoy_select_hot_restart([ "hotrestart_test.sh", ]), From 46b7f43ea77e6d464c0fc7011bc43f7ac7602e99 Mon Sep 17 00:00:00 2001 From: fpliu233 <62083774+fpliu233@users.noreply.github.com> Date: Fri, 5 Jun 2020 20:02:37 -0700 Subject: [PATCH 309/909] docs: correct the API link for http ExtAuthz filter documentation (#11476) The original one was pointed to network instead of http one Signed-off-by: Fangpeng Liu --- docs/root/configuration/http/http_filters/ext_authz_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index 44334684abe9..5da29d891ee4 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -3,7 +3,7 @@ External Authorization ====================== * External authorization :ref:`architecture overview ` -* :ref:`HTTP filter v3 API reference ` +* :ref:`HTTP filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_authz*. The external authorization filter calls an external gRPC or HTTP service to check whether an incoming From 9461f6bad1044133d81d69eba44f20f93771aa2e Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Sat, 6 Jun 2020 04:03:48 +0100 Subject: [PATCH 310/909] fuzz: fix ossfuzz crash in router tests (#11467) Signed-off-by: Sam Flattery --- .../config/route/v3/route_components.proto | 2 ++ .../route/v4alpha/route_components.proto | 2 ++ .../config/route/v3/route_components.proto | 2 ++ .../route/v4alpha/route_components.proto | 2 ++ ...minimized-route_fuzz_test-5748492233605120 | 30 +++++++++++++++++++ test/common/router/route_fuzz_test.cc | 7 +---- 6 files changed, 39 insertions(+), 6 deletions(-) create mode 100644 test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index f927f582bd1c..33fa2779f727 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -225,6 +225,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index b23efe34e77b..02161ffd48ef 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -225,6 +225,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 76c75d647c56..db7dec95dd26 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -224,6 +224,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 15a7ee7050b7..f8622decd12a 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -225,6 +225,8 @@ message Route { // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] FilterAction filter_action = 17; } diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 new file mode 100644 index 000000000000..10c1b0a3d452 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 @@ -0,0 +1,30 @@ +config { + virtual_hosts { + name: "[" + domains: "bat.com" + routes { + match { + safe_regex { + google_re2 { + } + regex: "." + } + } + filter_action { + } + } + } +} +headers { + headers { + key: ":authority" + value: "bat.com" + } + headers { + key: ":path" + value: "b" + } + headers { + key: "x-forwarded-proto" + } +} diff --git a/test/common/router/route_fuzz_test.cc b/test/common/router/route_fuzz_test.cc index 089424a2744d..f43540976c79 100644 --- a/test/common/router/route_fuzz_test.cc +++ b/test/common/router/route_fuzz_test.cc @@ -22,12 +22,7 @@ cleanRouteConfig(envoy::config::route::v3::RouteConfiguration route_config) { [](envoy::config::route::v3::VirtualHost& virtual_host) { auto routes = virtual_host.mutable_routes(); for (int i = 0; i < routes->size();) { - // Erase routes that use a regex matcher. This is deprecated and may cause - // crashes when wildcards are matched against very long headers. See - // https://github.com/envoyproxy/envoy/issues/7728. - if (routes->Get(i).match().path_specifier_case() == - envoy::config::route::v3::RouteMatch::PathSpecifierCase:: - kHiddenEnvoyDeprecatedRegex) { + if (routes->Get(i).has_filter_action()) { routes->erase(routes->begin() + i); } else { ++i; From 0920ba7e59487fce34559a7781563c9593e8298c Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Sat, 6 Jun 2020 08:45:48 -0700 Subject: [PATCH 311/909] network: cleanup socket related todos (#11457) - use SocketInterface instead of raw socket calls in address_impl - update local address on zero port bind - initialize local address on connect Signed-off-by: Florin Coras --- source/common/network/BUILD | 2 ++ source/common/network/address_impl.cc | 17 +++----------- source/common/network/connection_impl.cc | 7 ------ source/common/network/listen_socket_impl.cc | 12 +--------- source/common/network/socket_impl.cc | 22 +++++++++++++----- .../quic_listeners/quiche/envoy_quic_utils.cc | 7 +----- test/test_common/network_utility.cc | 23 ++++--------------- 7 files changed, 27 insertions(+), 63 deletions(-) diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 043e63783d70..1073d7590b2d 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -17,12 +17,14 @@ envoy_cc_library( hdrs = [ "address_impl.h", "io_socket_handle_impl.h", + "socket_interface_impl.h", ], deps = [ ":io_socket_error_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", "//include/envoy/network:io_handle_interface", + "//include/envoy/network:socket_interface", "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 01ce620dbd4d..1f3dcbb3fb74 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -7,10 +7,10 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" +#include "common/network/socket_interface_impl.h" namespace Envoy { namespace Network { @@ -18,21 +18,10 @@ namespace Address { namespace { -// Check if an IP family is supported on this machine. -bool ipFamilySupported(int domain) { - Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); - const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - if (SOCKET_VALID(result.rc_)) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - absl::StrCat("Fail to close fd: response code ", result.rc_)); - } - return SOCKET_VALID(result.rc_); -} - // Validate that IPv4 is supported on this platform, raise an exception for the // given address if not. void validateIpv4Supported(const std::string& address) { - static const bool supported = Network::Address::ipFamilySupported(AF_INET); + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET); if (!supported) { throw EnvoyException( fmt::format("IPv4 addresses are not supported on this machine: {}", address)); @@ -42,7 +31,7 @@ void validateIpv4Supported(const std::string& address) { // Validate that IPv6 is supported on this platform, raise an exception for the // given address if not. void validateIpv6Supported(const std::string& address) { - static const bool supported = Network::Address::ipFamilySupported(AF_INET6); + static const bool supported = SocketInterfaceSingleton::get().ipFamilySupported(AF_INET6); if (!supported) { throw EnvoyException( fmt::format("IPv6 addresses are not supported on this machine: {}", address)); diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 57b00be35548..138a1bbbb8c5 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -769,13 +769,6 @@ void ClientConnectionImpl::connect() { file_event_->activate(Event::FileReadyType::Write); } } - - // The local address can only be retrieved for IP connections. Other - // types, such as UDS, don't have a notion of a local address. - // TODO(fcoras) move to SocketImpl? - if (socket_->remoteAddress()->type() == Address::Type::Ip) { - socket_->setLocalAddress(SocketInterfaceSingleton::get().addressFromFd(ioHandle().fd())); - } } } // namespace Network } // namespace Envoy diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index beed779224b1..1dc095bb299d 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -8,7 +8,6 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/network/address_impl.h" @@ -27,13 +26,6 @@ Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstShar fmt::format("cannot bind '{}': {}", local_address_->asString(), strerror(result.errno_)), result.errno_); } - // TODO(fcoras): should this be moved to SocketImpl::bind()? - if (local_address_->type() == Address::Type::Ip && local_address_->ip()->port() == 0) { - // If the port we bind is zero, then the OS will pick a free port for us (assuming there are - // any), and we need to find out the port number that the OS picked. - local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); - } - return {0, 0}; } @@ -60,9 +52,7 @@ void NetworkListenSocket< // Linux and later BSD socket stacks #ifndef WIN32 int on = 1; - auto& os_syscalls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult status = - os_syscalls.setsockopt(io_handle_->fd(), SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + auto status = setSocketOption(SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); RELEASE_ASSERT(status.rc_ != -1, "failed to set SO_REUSEADDR socket option"); #endif } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 9232b69626b5..4e0200c381d2 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -45,6 +45,8 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, } Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { + Api::SysCallIntResult bind_result; + if (address->type() == Address::Type::Pipe) { const Address::Pipe* pipe = address->pipe(); const auto* pipe_sa = reinterpret_cast(address->sockAddr()); @@ -56,8 +58,8 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr unlink(pipe_sa->sun_path); } // Not storing a reference to syscalls singleton because of unit test mocks - auto bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); if (set_permissions.rc_ != 0) { @@ -69,8 +71,12 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr return bind_result; } - return Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); + if (bind_result.rc_ == 0 && address->ip()->port() == 0) { + local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); + } + return bind_result; } Api::SysCallIntResult SocketImpl::listen(int backlog) { @@ -78,8 +84,12 @@ Api::SysCallIntResult SocketImpl::listen(int backlog) { } Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { - return Api::OsSysCallsSingleton::get().connect(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + auto result = Api::OsSysCallsSingleton::get().connect(io_handle_->fd(), address->sockAddr(), + address->sockAddrLen()); + if (address->type() == Address::Type::Ip) { + local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); + } + return result; } Api::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const void* optval, diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index f923d2000fd7..9e0ee82de167 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -109,12 +109,7 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, } connection_socket->bind(local_addr); ASSERT(local_addr->ip()); - // TODO(fcoras) maybe move to SocketImpl? - if (local_addr->ip()->port() == 0) { - // Get ephemeral port number. - local_addr = - Network::SocketInterfaceSingleton::get().addressFromFd(connection_socket->ioHandle().fd()); - } + local_addr = connection_socket->localAddress(); if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket, envoy::config::core::v3::SocketOption::STATE_BOUND)) { ENVOY_LOG_MISC(error, "Fail to apply post-bind options"); diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index a4c1765eda17..4eb091d78be5 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -5,7 +5,6 @@ #include "envoy/common/platform.h" -#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/network/address_impl.h" @@ -55,13 +54,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared << "' with error: " << strerror(result.errno_) << " (" << result.errno_ << ")"; return nullptr; } - // If the port we bind is zero, then the OS will pick a free port for us (assuming there are - // any), and we need to find out the port number that the OS picked so we can return it. - // TODO(fcoras) maybe move to SocketImpl - if (addr_port->ip()->port() == 0) { - return SocketInterfaceSingleton::get().addressFromFd(sock.ioHandle().fd()); - } - return addr_port; + return sock.localAddress(); } Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, @@ -149,15 +142,8 @@ Address::InstanceConstSharedPtr getAnyAddress(const Address::IpVersion version, } bool supportsIpVersion(const Address::IpVersion version) { - Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); - SocketImpl sock(Address::SocketType::Stream, addr); - if (0 != sock.bind(addr).rc_) { - // Socket bind failed. - RELEASE_ASSERT(sock.ioHandle().close().err_ == nullptr, ""); - return false; - } - RELEASE_ASSERT(sock.ioHandle().close().err_ == nullptr, ""); - return true; + return Network::SocketInterfaceSingleton::get().ipFamilySupported( + version == Address::IpVersion::v4 ? AF_INET : AF_INET6); } std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { @@ -185,8 +171,7 @@ bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { throw EnvoyException(msg); } - return std::make_pair(SocketInterfaceSingleton::get().addressFromFd(sock->ioHandle().fd()), - std::move(sock)); + return std::make_pair(sock->localAddress(), std::move(sock)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } From 7ce7a8d2a71a8924ebaa4b9f2899f14241ca79d9 Mon Sep 17 00:00:00 2001 From: andrewmelis Date: Sat, 6 Jun 2020 11:46:43 -0400 Subject: [PATCH 312/909] jwt_authn: fix typo in authenticator name (#11481) Signed-off-by: Andrew Melis --- source/extensions/filters/http/jwt_authn/authenticator.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index 3837b2c4c034..9ca03ac020b7 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -105,7 +105,7 @@ std::string AuthenticatorImpl::name() const { return provider_.value() + (is_allow_missing_ ? "-OPTIONAL" : ""); } if (is_allow_failed_) { - return "_IS_ALLOW_FALED_"; + return "_IS_ALLOW_FAILED_"; } if (is_allow_missing_) { return "_IS_ALLOW_MISSING_"; From 5dfa0f5a7a1bacd4e95089e21e55be21aa6e2b8b Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Sun, 7 Jun 2020 19:39:39 -0400 Subject: [PATCH 313/909] Adding an HTTP/2 integration fuzzer (#10321) Adding an HTTP/2 integration fuzzer that checks different kind of frames handling in both Downstream and Upstream Risk Level: Low - a new test Testing: It is a new fuzz test Signed-off-by: Adi Suissa-Peleg --- test/common/http/http2/http2_frame.cc | 57 +++- test/common/http/http2/http2_frame.h | 30 +++ test/integration/BUILD | 68 +++++ .../h2_capture_direct_response_fuzz_test.cc | 42 +++ test/integration/h2_capture_fuzz.proto | 165 ++++++++++++ test/integration/h2_capture_fuzz_test.cc | 19 ++ test/integration/h2_corpus/simple_test | 48 ++++ test/integration/h2_fuzz.cc | 255 ++++++++++++++++++ test/integration/h2_fuzz.h | 27 ++ 9 files changed, 707 insertions(+), 4 deletions(-) create mode 100644 test/integration/h2_capture_direct_response_fuzz_test.cc create mode 100644 test/integration/h2_capture_fuzz.proto create mode 100644 test/integration/h2_capture_fuzz_test.cc create mode 100644 test/integration/h2_corpus/simple_test create mode 100644 test/integration/h2_fuzz.cc create mode 100644 test/integration/h2_fuzz.h diff --git a/test/common/http/http2/http2_frame.cc b/test/common/http/http2/http2_frame.cc index a044e644cb45..df61d5f88f31 100644 --- a/test/common/http/http2/http2_frame.cc +++ b/test/common/http/http2/http2_frame.cc @@ -33,12 +33,15 @@ const char Http2Frame::Preamble[25] = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; void Http2Frame::setHeader(absl::string_view header) { ASSERT(header.size() >= HeaderSize); data_.assign(HeaderSize, 0); + // TODO(adisuissa): memcpy is discouraged as it may be unsafe. This should be + // use a safer memcpy alternative (example: https://abseil.io/tips/93) memcpy(data_.data(), header.data(), HeaderSize); data_.resize(HeaderSize + payloadSize()); } void Http2Frame::setPayload(absl::string_view payload) { ASSERT(payload.size() >= payloadSize()); + ASSERT(data_.capacity() >= HeaderSize + payloadSize()); memcpy(&data_[HeaderSize], payload.data(), payloadSize()); } @@ -116,6 +119,7 @@ Http2Frame Http2Frame::makePingFrame(absl::string_view data) { static constexpr size_t kPingPayloadSize = 8; Http2Frame frame; frame.buildHeader(Type::Ping, kPingPayloadSize); + ASSERT(frame.data_.capacity() >= HeaderSize + std::min(kPingPayloadSize, data.size())); if (!data.empty()) { memcpy(&frame.data_[HeaderSize], data.data(), std::min(kPingPayloadSize, data.size())); } @@ -152,8 +156,46 @@ Http2Frame Http2Frame::makePriorityFrame(uint32_t stream_index, uint32_t depende static constexpr size_t kPriorityPayloadSize = 5; Http2Frame frame; frame.buildHeader(Type::Priority, kPriorityPayloadSize, 0, makeRequestStreamId(stream_index)); - uint32_t dependent_net = makeRequestStreamId(dependent_index); - memcpy(&frame.data_[HeaderSize], reinterpret_cast(&dependent_net), sizeof(uint32_t)); + const uint32_t dependent_net = makeRequestStreamId(dependent_index); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&dependent_net), sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeEmptyPushPromiseFrame(uint32_t stream_index, + uint32_t promised_stream_index, + HeadersFlags flags) { + static constexpr size_t kEmptyPushPromisePayloadSize = 4; + Http2Frame frame; + frame.buildHeader(Type::PushPromise, kEmptyPushPromisePayloadSize, static_cast(flags), + makeRequestStreamId(stream_index)); + const uint32_t promised_stream_id = makeRequestStreamId(promised_stream_index); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&promised_stream_id), + sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code) { + static constexpr size_t kResetStreamPayloadSize = 4; + Http2Frame frame; + frame.buildHeader(Type::RstStream, kResetStreamPayloadSize, 0, makeRequestStreamId(stream_index)); + const uint32_t error = static_cast(error_code); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&error), sizeof(uint32_t)); + return frame; +} + +Http2Frame Http2Frame::makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code) { + static constexpr size_t kEmptyGoAwayPayloadSize = 8; + Http2Frame frame; + frame.buildHeader(Type::GoAway, kEmptyGoAwayPayloadSize, 0, makeRequestStreamId(0)); + const uint32_t last_stream_id = makeRequestStreamId(last_stream_index); + ASSERT(frame.data_.capacity() >= HeaderSize + 4 + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&last_stream_id), + sizeof(uint32_t)); + const uint32_t error = static_cast(error_code); + memcpy(&frame.data_[HeaderSize + 4], reinterpret_cast(&error), sizeof(uint32_t)); return frame; } @@ -162,8 +204,9 @@ Http2Frame Http2Frame::makeWindowUpdateFrame(uint32_t stream_index, uint32_t inc Http2Frame frame; frame.buildHeader(Type::WindowUpdate, kWindowUpdatePayloadSize, 0, makeRequestStreamId(stream_index)); - uint32_t increment_net = htonl(increment); - memcpy(&frame.data_[HeaderSize], reinterpret_cast(&increment_net), sizeof(uint32_t)); + const uint32_t increment_net = htonl(increment); + ASSERT(frame.data_.capacity() >= HeaderSize + sizeof(uint32_t)); + memcpy(&frame.data_[HeaderSize], reinterpret_cast(&increment_net), sizeof(uint32_t)); return frame; } @@ -218,6 +261,12 @@ Http2Frame Http2Frame::makePostRequest(uint32_t stream_index, absl::string_view return frame; } +Http2Frame Http2Frame::makeGenericFrame(absl::string_view contents) { + Http2Frame frame; + frame.appendData(contents); + return frame; +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/http2_frame.h b/test/common/http/http2/http2_frame.h index 9b04779a0a44..347061e77791 100644 --- a/test/common/http/http2/http2_frame.h +++ b/test/common/http/http2/http2_frame.h @@ -66,6 +66,23 @@ class Http2Frame { Host = 38, }; + enum class ErrorCode : uint8_t { + NoError = 0, + ProtocolError, + InternalError, + FlowControlError, + SettingsTimeout, + StreamClosed, + FrameSizeError, + RefusedStream, + Cancel, + CompressionError, + ConnectError, + EnhanceYourCalm, + InadequateSecurity, + Http11Required + }; + enum class ResponseStatus { Unknown, Ok, NotFound }; // Methods for creating HTTP2 frames @@ -77,6 +94,12 @@ class Http2Frame { HeadersFlags flags = HeadersFlags::None); static Http2Frame makeEmptyDataFrame(uint32_t stream_index, DataFlags flags = DataFlags::None); static Http2Frame makePriorityFrame(uint32_t stream_index, uint32_t dependent_index); + + static Http2Frame makeEmptyPushPromiseFrame(uint32_t stream_index, uint32_t promised_stream_index, + HeadersFlags flags = HeadersFlags::None); + static Http2Frame makeResetStreamFrame(uint32_t stream_index, ErrorCode error_code); + static Http2Frame makeEmptyGoAwayFrame(uint32_t last_stream_index, ErrorCode error_code); + static Http2Frame makeWindowUpdateFrame(uint32_t stream_index, uint32_t increment); static Http2Frame makeMalformedRequest(uint32_t stream_index); static Http2Frame makeMalformedRequestWithZerolenHeader(uint32_t stream_index, @@ -86,6 +109,13 @@ class Http2Frame { absl::string_view path); static Http2Frame makePostRequest(uint32_t stream_index, absl::string_view host, absl::string_view path); + /** + * Creates a frame with the given contents. This frame can be + * malformed/invalid depending on the given contents. + * @param contents the contents of the newly created frame. + * @return an Http2Frame that is comprised of the given contents. + */ + static Http2Frame makeGenericFrame(absl::string_view contents); Type type() const { return static_cast(data_[3]); } ResponseStatus responseStatus() const; diff --git a/test/integration/BUILD b/test/integration/BUILD index 6016f55aa567..0b3f677feb25 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -107,6 +107,11 @@ envoy_proto_library( srcs = [":capture_fuzz.proto"], ) +envoy_proto_library( + name = "h2_capture_fuzz_proto", + srcs = [":h2_capture_fuzz.proto"], +) + envoy_cc_test( name = "cds_integration_test", srcs = ["cds_integration_test.cc"], @@ -1110,6 +1115,69 @@ envoy_cc_fuzz_test( ], ) +H2_FUZZ_LIB_DEPS = [ + ":h2_capture_fuzz_proto_cc_proto", + ":http_integration_lib", + "//source/common/common:assert_lib", + "//source/common/common:logger_lib", + "//test/common/http/http2:http2_frame", + "//test/fuzz:fuzz_runner_lib", + "//test/fuzz:utility_lib", + "//test/integration:integration_lib", + "//test/test_common:environment_lib", +] + +envoy_cc_test_library( + name = "h2_fuzz_lib", + srcs = ["h2_fuzz.cc"], + hdrs = ["h2_fuzz.h"], + deps = H2_FUZZ_LIB_DEPS, +) + +envoy_cc_test_library( + name = "h2_fuzz_persistent_lib", + srcs = ["h2_fuzz.cc"], + hdrs = ["h2_fuzz.h"], + copts = ["-DPERSISTENT_FUZZER"], + deps = H2_FUZZ_LIB_DEPS, +) + +envoy_cc_fuzz_test( + name = "h2_capture_fuzz_test", + srcs = ["h2_capture_fuzz_test.cc"], + corpus = "h2_corpus", + deps = [":h2_fuzz_lib"], +) + +envoy_cc_fuzz_test( + name = "h2_capture_persistent_fuzz_test", + srcs = ["h2_capture_fuzz_test.cc"], + copts = ["-DPERSISTENT_FUZZER"], + corpus = "h2_corpus", + deps = [":h2_fuzz_persistent_lib"], +) + +envoy_cc_fuzz_test( + name = "h2_capture_direct_response_fuzz_test", + srcs = ["h2_capture_direct_response_fuzz_test.cc"], + corpus = "h2_corpus", + deps = [ + ":h2_fuzz_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "h2_capture_direct_response_persistent_fuzz_test", + srcs = ["h2_capture_direct_response_fuzz_test.cc"], + copts = ["-DPERSISTENT_FUZZER"], + corpus = "h2_corpus", + deps = [ + ":h2_fuzz_persistent_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "scoped_rds_integration_test", srcs = [ diff --git a/test/integration/h2_capture_direct_response_fuzz_test.cc b/test/integration/h2_capture_direct_response_fuzz_test.cc new file mode 100644 index 000000000000..78bd2d3f6ef7 --- /dev/null +++ b/test/integration/h2_capture_direct_response_fuzz_test.cc @@ -0,0 +1,42 @@ +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "test/integration/h2_fuzz.h" + +namespace Envoy { + +void H2FuzzIntegrationTest::initialize() { + const std::string body = "Response body"; + const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); + const std::string prefix("/"); + const Http::Code status(Http::Code::OK); + + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + + config_helper_.addConfigModifier( + [&file_path, &prefix]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + auto* route_config = hcm.mutable_route_config(); + // adding direct response mode to the default route + auto* default_route = + hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(0); + default_route->mutable_match()->set_prefix(prefix); + default_route->mutable_direct_response()->set_status(static_cast(status)); + default_route->mutable_direct_response()->mutable_body()->set_filename(file_path); + // adding headers to the default route + auto* header_value_option = route_config->mutable_response_headers_to_add()->Add(); + header_value_option->mutable_header()->set_value("direct-response-enabled"); + header_value_option->mutable_header()->set_key("x-direct-response-header"); + }); + HttpIntegrationTest::initialize(); +} + +DEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) { + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); + const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; + PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version); + h2_fuzz_integration_test.replay(input, true); +} + +} // namespace Envoy diff --git a/test/integration/h2_capture_fuzz.proto b/test/integration/h2_capture_fuzz.proto new file mode 100644 index 000000000000..cbf5f6702367 --- /dev/null +++ b/test/integration/h2_capture_fuzz.proto @@ -0,0 +1,165 @@ +syntax = "proto3"; + +package test.integration; + +message H2FramePing { + enum Flags { + NONE = 0; + ACK = 1; + } + Flags flags = 1; + bytes data = 2; +} + +message H2FrameSettings { + enum Flags { + NONE = 0; + ACK = 1; + } + Flags flags = 1; +} + +enum H2HeadersFlags { + NONE = 0; + END_STREAM = 1; + END_HEADERS = 4; +} + +message H2FrameHeaders { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; +} + +message H2FrameContinuation { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; +} + +message H2FrameData { + enum Flags { + NONE = 0; + END_STREAM = 1; + } + Flags flags = 1; + uint32 stream_index = 2; +} + +message H2FramePriority { + uint32 stream_index = 1; + uint32 dependent_index = 2; +} + +// These map to the errors defined in: https://tools.ietf.org/html/rfc7540#section-7 +enum H2ErrorCode { + NO_ERROR = 0; + PROTOCOL_ERROR = 1; + INTERNAL_ERROR = 2; + FLOW_CONTROL_ERROR = 3; + SETTINGS_TIMEOUT = 4; + STREAM_CLOSED = 5; + FRAME_SIZE_ERROR = 6; + REFUSED_STREAM = 7; + CANCEL = 8; + COMPRESSION_ERROR = 9; + CONNECT_ERROR = 10; + ENHANCE_YOUR_CLAIM = 11; + INADEQUATE_SECURITY = 12; + HTTP_1_1_REQUIRED = 13; +} + +message H2FramePushPromise { + repeated H2HeadersFlags flags = 1; + uint32 stream_index = 2; + uint32 promised_stream_index = 3; +} + +message H2FrameResetStream { + uint32 stream_index = 1; + H2ErrorCode error_code = 2; +} + +message H2FrameGoAway { + uint32 last_stream_index = 1; + H2ErrorCode error_code = 2; +} + +message H2FrameWindowUpdate { + uint32 stream_index = 1; + uint32 increment = 2; +} + +// A header that contains invalid status +message H2FrameMalformedRequest { + uint32 stream_index = 1; +} + +// A request that is comprised of a header that has HTTP GET request with a given host and path and +// an additional zero length header (making this a malformed request) +message H2FrameMalformedRequestWithZerolenHeader { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A request that is comprised of a header that has HTTP GET request with a given host and path +message H2FrameRequest { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A request that is comprised of a header that has HTTP POST request with a given host and path +message H2FramePostRequest { + uint32 stream_index = 1; + string host = 2; + string path = 3; +} + +// A generic frame to emit a malformed frame +message H2FrameGeneric { + bytes frame_bytes = 1; +} + +message H2TestFrame { + // These values map to the frame creation methods in: + // test/common/http/http2/http2_frame.h + oneof frame_type { + H2FramePing ping = 1; + H2FrameSettings settings = 2; + H2FrameHeaders headers = 3; + H2FrameContinuation continuation = 4; + H2FrameData data = 5; + H2FramePriority priority = 6; + H2FramePushPromise push_promise = 7; + H2FrameResetStream reset_stream = 8; + H2FrameGoAway go_away = 9; + H2FrameWindowUpdate window_update = 10; + H2FrameMalformedRequest malformed_request = 11; + H2FrameMalformedRequestWithZerolenHeader malformed_request_with_zerolen_header = 12; + H2FrameRequest request = 13; + H2FramePostRequest post_request = 14; + H2FrameGeneric generic = 15; + } +} + +message DownstreamSendEvent { + repeated H2TestFrame h2_frames = 1; +} + +message UpstreamSendEvent { + repeated H2TestFrame h2_frames = 1; +} + +message Event { + oneof event_selector { + // Downstream sent given frames. + DownstreamSendEvent downstream_send_event = 1; + // Upstream sent given frames. + UpstreamSendEvent upstream_send_event = 2; + } +} + +// Test case in corpus for *_h2_capture_fuzz_test. +message H2CaptureFuzzTestCase { + repeated Event events = 1; +} diff --git a/test/integration/h2_capture_fuzz_test.cc b/test/integration/h2_capture_fuzz_test.cc new file mode 100644 index 000000000000..f31da4f2345f --- /dev/null +++ b/test/integration/h2_capture_fuzz_test.cc @@ -0,0 +1,19 @@ +#include "test/integration/h2_fuzz.h" + +namespace Envoy { +void H2FuzzIntegrationTest::initialize() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + + HttpIntegrationTest::initialize(); +} + +DEFINE_PROTO_FUZZER(const test::integration::H2CaptureFuzzTestCase& input) { + // Pick an IP version to use for loopback, it doesn't matter which. + FUZZ_ASSERT(!TestEnvironment::getIpVersionsForTest().empty()); + const auto ip_version = TestEnvironment::getIpVersionsForTest()[0]; + PERSISTENT_FUZZ_VAR H2FuzzIntegrationTest h2_fuzz_integration_test(ip_version); + h2_fuzz_integration_test.replay(input, false); +} + +} // namespace Envoy diff --git a/test/integration/h2_corpus/simple_test b/test/integration/h2_corpus/simple_test new file mode 100644 index 000000000000..8556a3bdd983 --- /dev/null +++ b/test/integration/h2_corpus/simple_test @@ -0,0 +1,48 @@ +events { + downstream_send_event { + h2_frames { + settings { + flags: NONE + } + } + h2_frames { + settings { + flags: ACK + } + } + h2_frames { + request { + stream_index: 1 + host: "host" + path: "/path/to/long/url" + } + } + } +} +events { + upstream_send_event { + h2_frames { + settings { + flags: NONE + } + } + h2_frames { + settings { + flags: ACK + } + } + h2_frames { + headers { + flags: NONE + flags: END_STREAM + stream_index: 1 + } + } + h2_frames { + data { + flags: NONE + stream_index: 1 + } + } + } +} diff --git a/test/integration/h2_fuzz.cc b/test/integration/h2_fuzz.cc new file mode 100644 index 000000000000..40b6b2410788 --- /dev/null +++ b/test/integration/h2_fuzz.cc @@ -0,0 +1,255 @@ +#include "test/integration/h2_fuzz.h" + +#include + +#include "common/common/assert.h" +#include "common/common/base64.h" +#include "common/common/logger.h" + +#include "test/test_common/environment.h" + +namespace Envoy { + +using namespace Envoy::Http::Http2; + +namespace { + +static Http2Frame::HeadersFlags +unifyHeadersFlags(const Protobuf::RepeatedField& headers_flags) { + int unified_flags = 0; + for (const auto& flag : headers_flags) { + unified_flags |= flag; + } + return static_cast(unified_flags); +} + +} // namespace + +void H2FuzzIntegrationTest::sendFrame(const test::integration::H2TestFrame& proto_frame, + std::function write_func) { + Http2Frame h2_frame; + switch (proto_frame.frame_type_case()) { + case test::integration::H2TestFrame::kPing: + ENVOY_LOG_MISC(trace, "Sending ping frame"); + h2_frame = Http2Frame::makePingFrame(proto_frame.ping().data()); + break; + case test::integration::H2TestFrame::kSettings: { + const Http2Frame::SettingsFlags settings_flags = + static_cast(proto_frame.settings().flags()); + ENVOY_LOG_MISC(trace, "Sending settings frame"); + h2_frame = Http2Frame::makeEmptySettingsFrame(settings_flags); + break; + } + case test::integration::H2TestFrame::kHeaders: { + const Http2Frame::HeadersFlags headers_flags = unifyHeadersFlags(proto_frame.headers().flags()); + const uint32_t stream_idx = proto_frame.headers().stream_index(); + ENVOY_LOG_MISC(trace, "Sending headers frame"); + h2_frame = Http2Frame::makeEmptyHeadersFrame(stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kContinuation: { + const Http2Frame::HeadersFlags headers_flags = + unifyHeadersFlags(proto_frame.continuation().flags()); + const uint32_t stream_idx = proto_frame.continuation().stream_index(); + ENVOY_LOG_MISC(trace, "Sending continuation frame"); + h2_frame = Http2Frame::makeEmptyContinuationFrame(stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kData: { + const Http2Frame::DataFlags data_flags = + static_cast(proto_frame.data().flags()); + const uint32_t stream_idx = proto_frame.data().stream_index(); + ENVOY_LOG_MISC(trace, "Sending data frame"); + h2_frame = Http2Frame::makeEmptyDataFrame(stream_idx, data_flags); + break; + } + case test::integration::H2TestFrame::kPriority: { + const uint32_t stream_idx = proto_frame.priority().stream_index(); + const uint32_t dependent_idx = proto_frame.priority().dependent_index(); + ENVOY_LOG_MISC(trace, "Sending priority frame"); + h2_frame = Http2Frame::makePriorityFrame(stream_idx, dependent_idx); + break; + } + case test::integration::H2TestFrame::kPushPromise: { + const Http2Frame::HeadersFlags headers_flags = + unifyHeadersFlags(proto_frame.push_promise().flags()); + const uint32_t stream_idx = proto_frame.push_promise().stream_index(); + const uint32_t promised_stream_idx = proto_frame.push_promise().promised_stream_index(); + ENVOY_LOG_MISC(trace, "Sending push promise frame"); + h2_frame = + Http2Frame::makeEmptyPushPromiseFrame(stream_idx, promised_stream_idx, headers_flags); + break; + } + case test::integration::H2TestFrame::kResetStream: { + const uint32_t stream_idx = proto_frame.reset_stream().stream_index(); + const Http2Frame::ErrorCode error_code = + static_cast(proto_frame.reset_stream().error_code()); + ENVOY_LOG_MISC(trace, "Sending reset stream frame"); + h2_frame = Http2Frame::makeResetStreamFrame(stream_idx, error_code); + break; + } + case test::integration::H2TestFrame::kGoAway: { + const uint32_t last_stream_idx = proto_frame.go_away().last_stream_index(); + const Http2Frame::ErrorCode error_code = + static_cast(proto_frame.go_away().error_code()); + ENVOY_LOG_MISC(trace, "Sending go-away frame"); + h2_frame = Http2Frame::makeEmptyGoAwayFrame(last_stream_idx, error_code); + break; + } + case test::integration::H2TestFrame::kWindowUpdate: { + const uint32_t stream_idx = proto_frame.window_update().stream_index(); + const uint32_t increment = proto_frame.window_update().increment(); + ENVOY_LOG_MISC(trace, "Sending windows_update frame"); + h2_frame = Http2Frame::makeWindowUpdateFrame(stream_idx, increment); + break; + } + case test::integration::H2TestFrame::kMalformedRequest: { + const uint32_t stream_idx = proto_frame.malformed_request().stream_index(); + ENVOY_LOG_MISC(trace, "Sending malformed_request frame"); + h2_frame = Http2Frame::makeMalformedRequest(stream_idx); + break; + } + case test::integration::H2TestFrame::kMalformedRequestWithZerolenHeader: { + const uint32_t stream_idx = proto_frame.malformed_request_with_zerolen_header().stream_index(); + const absl::string_view host = proto_frame.malformed_request_with_zerolen_header().host(); + const absl::string_view path = proto_frame.malformed_request_with_zerolen_header().path(); + ENVOY_LOG_MISC(trace, "Sending malformed_request_with_zerolen_header"); + h2_frame = Http2Frame::makeMalformedRequestWithZerolenHeader(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kRequest: { + const uint32_t stream_idx = proto_frame.request().stream_index(); + const absl::string_view host = proto_frame.request().host(); + const absl::string_view path = proto_frame.request().path(); + ENVOY_LOG_MISC(trace, "Sending request"); + h2_frame = Http2Frame::makeRequest(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kPostRequest: { + const uint32_t stream_idx = proto_frame.post_request().stream_index(); + const absl::string_view host = proto_frame.post_request().host(); + const absl::string_view path = proto_frame.post_request().path(); + ENVOY_LOG_MISC(trace, "Sending post request"); + h2_frame = Http2Frame::makePostRequest(stream_idx, host, path); + break; + } + case test::integration::H2TestFrame::kGeneric: { + const absl::string_view frame_bytes = proto_frame.generic().frame_bytes(); + ENVOY_LOG_MISC(trace, "Sending generic frame"); + h2_frame = Http2Frame::makeGenericFrame(frame_bytes); + break; + } + default: + ENVOY_LOG_MISC(debug, "Proto-frame not supported!"); + break; + } + + write_func(h2_frame); +} + +void H2FuzzIntegrationTest::replay(const test::integration::H2CaptureFuzzTestCase& input, + bool ignore_response) { + PERSISTENT_FUZZ_VAR bool initialized = [this]() -> bool { + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + return true; + }(); + UNREFERENCED_PARAMETER(initialized); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); + FakeRawConnectionPtr fake_upstream_connection; + bool stop_further_inputs = false; + bool preamble_sent = false; + for (int i = 0; i < input.events().size(); ++i) { + if (stop_further_inputs) { + break; + } + const auto& event = input.events(i); + ENVOY_LOG_MISC(debug, "Processing event: {}", event.DebugString()); + // If we're disconnected, we fail out. + if (!tcp_client->connected()) { + ENVOY_LOG_MISC(debug, "Disconnected, no further event processing."); + break; + } + switch (event.event_selector_case()) { + case test::integration::Event::kDownstreamSendEvent: { + auto downstream_write_func = [&](const Http2Frame& h2_frame) -> void { + tcp_client->write(std::string(h2_frame), false, false); + }; + if (!preamble_sent) { + // Start H2 session - send hello string + tcp_client->write(Http2Frame::Preamble, false, false); + preamble_sent = true; + } + for (auto& frame : event.downstream_send_event().h2_frames()) { + if (!tcp_client->connected()) { + ENVOY_LOG_MISC(debug, + "Disconnected, avoiding sending data, no further event processing."); + break; + } + + ENVOY_LOG_MISC(trace, "sending downstream frame"); + sendFrame(frame, downstream_write_func); + } + break; + } + case test::integration::Event::kUpstreamSendEvent: { + if (ignore_response) { + break; + } + if (fake_upstream_connection == nullptr) { + if (!fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection, max_wait_ms_)) { + // If we timed out, we fail out. + if (tcp_client->connected()) { + tcp_client->close(); + } + stop_further_inputs = true; + break; + } + } + // If we're no longer connected, we're done. + if (!fake_upstream_connection->connected()) { + if (tcp_client->connected()) { + tcp_client->close(); + } + stop_further_inputs = true; + break; + } + { + auto upstream_write_func = [&](const Http2Frame& h2_frame) -> void { + AssertionResult result = fake_upstream_connection->write(std::string(h2_frame)); + RELEASE_ASSERT(result, result.message()); + }; + for (auto& frame : event.upstream_send_event().h2_frames()) { + if (!fake_upstream_connection->connected()) { + ENVOY_LOG_MISC( + debug, + "Upstream disconnected, avoiding sending data, no further event processing."); + stop_further_inputs = true; + break; + } + + ENVOY_LOG_MISC(trace, "sending upstream frame"); + sendFrame(frame, upstream_write_func); + } + } + break; + } + default: + // Maybe nothing is set? + break; + } + } + if (fake_upstream_connection != nullptr) { + if (fake_upstream_connection->connected()) { + AssertionResult result = fake_upstream_connection->close(); + RELEASE_ASSERT(result, result.message()); + } + AssertionResult result = fake_upstream_connection->waitForDisconnect(true); + RELEASE_ASSERT(result, result.message()); + } + if (tcp_client->connected()) { + tcp_client->close(); + } +} + +} // namespace Envoy diff --git a/test/integration/h2_fuzz.h b/test/integration/h2_fuzz.h new file mode 100644 index 000000000000..b73ca0ffbfa2 --- /dev/null +++ b/test/integration/h2_fuzz.h @@ -0,0 +1,27 @@ +#pragma once + +#include "common/common/assert.h" +#include "common/common/logger.h" + +#include "test/common/http/http2/http2_frame.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/fuzz/utility.h" +#include "test/integration/h2_capture_fuzz.pb.h" +#include "test/integration/http_integration.h" + +namespace Envoy { + +class H2FuzzIntegrationTest : public HttpIntegrationTest { +public: + H2FuzzIntegrationTest(Network::Address::IpVersion version) + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, version) {} + + void initialize() override; + void replay(const test::integration::H2CaptureFuzzTestCase&, bool ignore_response); + const std::chrono::milliseconds max_wait_ms_{10}; + +private: + void sendFrame(const test::integration::H2TestFrame&, + std::function); +}; +} // namespace Envoy From f0a79ab72b0a625617e80a0a2e42df5393a90c58 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 8 Jun 2020 11:20:06 -0400 Subject: [PATCH 314/909] test: moving waitForCounter calls off of using sleep (#11289) Risk Level: n/a (test only) Testing: many tsan runs Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/stats/allocator_impl.cc | 7 +- source/common/stats/allocator_impl.h | 5 + test/config/utility.cc | 2 + test/integration/cds_integration_test.cc | 1 + test/integration/integration.cc | 2 +- test/integration/integration.h | 4 + test/integration/server.cc | 18 +++- test/integration/server.h | 113 ++++++++++++++++++++- test/integration/stats_integration_test.cc | 4 +- test/integration/xds_integration_test.cc | 2 + 10 files changed, 145 insertions(+), 13 deletions(-) diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 4f41b208e4ad..04dd31eca9c7 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -264,7 +264,7 @@ CounterSharedPtr AllocatorImpl::makeCounter(StatName name, StatName tag_extracte if (iter != counters_.end()) { return CounterSharedPtr(*iter); } - auto counter = CounterSharedPtr(new CounterImpl(name, *this, tag_extracted_name, stat_name_tags)); + auto counter = CounterSharedPtr(makeCounterInternal(name, tag_extracted_name, stat_name_tags)); counters_.insert(counter.get()); return counter; } @@ -308,5 +308,10 @@ bool AllocatorImpl::isMutexLockedForTest() { return !locked; } +Counter* AllocatorImpl::makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags) { + return new CounterImpl(name, *this, tag_extracted_name, stat_name_tags); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/allocator_impl.h b/source/common/stats/allocator_impl.h index 02e926529358..ddee00c39559 100644 --- a/source/common/stats/allocator_impl.h +++ b/source/common/stats/allocator_impl.h @@ -47,11 +47,16 @@ class AllocatorImpl : public Allocator { */ bool isMutexLockedForTest(); +protected: + virtual Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags); + private: template friend class StatsSharedImpl; friend class CounterImpl; friend class GaugeImpl; friend class TextReadoutImpl; + friend class NotifyingAllocatorImpl; struct HeapStatHash { using is_transparent = void; // NOLINT(readability-identifier-naming) diff --git a/test/config/utility.cc b/test/config/utility.cc index afd05d26bd50..cfbe5a92bf11 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -1003,6 +1003,8 @@ void EdsHelper::setEds(const std::vector& cluster_load_assignments, IntegrationTestServerStats& server_stats) { + // Make sure the last version has been accepted before setting a new one. + server_stats.waitForCounterGe("cluster.cluster_0.update_success", update_successes_); setEds(cluster_load_assignments); // Make sure Envoy has consumed the update now that it is running. ++update_successes_; diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 8a28eeb652c8..1e1790bfd8fa 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -147,6 +147,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, CdsIntegrationTest, TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { // Calls our initialize(), which includes establishing a listener, route, and cluster. testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + test_server_->waitForCounterGe("cluster_manager.cluster_added", 1); // Tell Envoy that cluster_1 is gone. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 5ba1f7e746ab..73e9aa8a4b10 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -457,7 +457,7 @@ void BaseIntegrationTest::createGeneratedApiTestServer( test_server_ = IntegrationTestServer::create( bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, - concurrency_, drain_time_); + concurrency_, drain_time_, use_real_stats_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { diff --git a/test/integration/integration.h b/test/integration/integration.h index 3ec31c3eff66..31ee89af7127 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -447,6 +447,10 @@ class BaseIntegrationTest : protected Logger::Loggable { bool use_lds_{true}; // Use the integration framework's LDS set up. Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw}; + // By default the test server will use custom stats to notify on increment. + // This override exists for tests measuring stats memory. + bool use_real_stats_{}; + private: // The type for the Envoy-to-backend connection FakeHttpConnection::Type upstream_protocol_{FakeHttpConnection::Type::HTTP1}; diff --git a/test/integration/server.cc b/test/integration/server.cc index ec9469b919c1..fa3e14af7602 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -58,9 +58,9 @@ IntegrationTestServerPtr IntegrationTestServer::create( std::function on_server_init_function, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, - uint32_t concurrency, std::chrono::seconds drain_time) { + uint32_t concurrency, std::chrono::seconds drain_time, bool use_real_stats) { IntegrationTestServerPtr server{ - std::make_unique(time_system, api, config_path)}; + std::make_unique(time_system, api, config_path, use_real_stats)}; if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } @@ -182,6 +182,16 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers lock, *this, std::move(random_generator), process_object); } +IntegrationTestServerImpl::IntegrationTestServerImpl(Event::TestTimeSystem& time_system, + Api::Api& api, const std::string& config_path, + bool use_real_stats) + : IntegrationTestServer(time_system, api, config_path), + symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()) { + stats_allocator_ = + (use_real_stats ? std::make_unique(*symbol_table_) + : std::make_unique(*symbol_table_)); +} + void IntegrationTestServerImpl::createAndRunEnvoyServer( OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, @@ -189,11 +199,9 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( Runtime::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) { { Init::ManagerImpl init_manager{"Server"}; - Stats::SymbolTablePtr symbol_table = Stats::SymbolTableCreator::makeSymbolTable(); Server::HotRestartNopImpl restarter; ThreadLocal::InstanceImpl tls; - Stats::AllocatorImpl stats_allocator(*symbol_table); - Stats::ThreadLocalStoreImpl stat_store(stats_allocator); + Stats::ThreadLocalStoreImpl stat_store(*stats_allocator_); std::unique_ptr process_context; if (process_object.has_value()) { process_context = std::make_unique(process_object->get()); diff --git a/test/integration/server.h b/test/integration/server.h index 531a5574e2cb..5862cd1f1bbf 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -15,6 +15,7 @@ #include "common/common/lock_guard.h" #include "common/common/logger.h" #include "common/common/thread.h" +#include "common/stats/allocator_impl.h" #include "server/drain_manager_impl.h" #include "server/listener_hooks.h" @@ -152,6 +153,98 @@ class TestScopeWrapper : public Scope { ScopePtr wrapped_scope_; }; +// A counter which signals on a condition variable when it is incremented. +class NotifyingCounter : public Stats::Counter { +public: + NotifyingCounter(Stats::Counter* counter, absl::Mutex& mutex, absl::CondVar& condvar) + : counter_(counter), mutex_(mutex), condvar_(condvar) {} + + std::string name() const override { return counter_->name(); } + StatName statName() const override { return counter_->statName(); } + TagVector tags() const override { return counter_->tags(); } + std::string tagExtractedName() const override { return counter_->tagExtractedName(); } + void iterateTagStatNames(const TagStatNameIterFn& fn) const override { + counter_->iterateTagStatNames(fn); + } + void add(uint64_t amount) override { + counter_->add(amount); + absl::MutexLock l(&mutex_); + condvar_.Signal(); + } + void inc() override { add(1); } + uint64_t latch() override { return counter_->latch(); } + void reset() override { return counter_->reset(); } + uint64_t value() const override { return counter_->value(); } + void incRefCount() override { counter_->incRefCount(); } + bool decRefCount() override { return counter_->decRefCount(); } + uint32_t use_count() const override { return counter_->use_count(); } + StatName tagExtractedStatName() const override { return counter_->tagExtractedStatName(); } + bool used() const override { return counter_->used(); } + SymbolTable& symbolTable() override { return counter_->symbolTable(); } + const SymbolTable& constSymbolTable() const override { return counter_->constSymbolTable(); } + +private: + std::unique_ptr counter_; + absl::Mutex& mutex_; + absl::CondVar& condvar_; +}; + +// A stats allocator which creates NotifyingCounters rather than regular CounterImpls. +class NotifyingAllocatorImpl : public Stats::AllocatorImpl { +public: + using Stats::AllocatorImpl::AllocatorImpl; + + virtual void waitForCounterFromStringEq(const std::string& name, uint64_t value) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); + while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() != value) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); + } + + virtual void waitForCounterFromStringGe(const std::string& name, uint64_t value) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); + while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() < value) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); + } + +protected: + Stats::Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, + const StatNameTagVector& stat_name_tags) override { + Stats::Counter* counter = new NotifyingCounter( + Stats::AllocatorImpl::makeCounterInternal(name, tag_extracted_name, stat_name_tags), mutex_, + condvar_); + { + absl::MutexLock l(&mutex_); + // Allow getting the counter directly from the allocator, since it's harder to + // signal when the counter has been added to a given stats store. + counters_.emplace(counter->name(), counter); + if (counter->name() == "cluster_manager.cluster_removed") { + } + condvar_.Signal(); + } + return counter; + } + + virtual Stats::Counter* getCounterLockHeld(const std::string& name) + EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + auto it = counters_.find(name); + if (it != counters_.end()) { + return it->second; + } + return nullptr; + } + +private: + absl::flat_hash_map counters_; + absl::Mutex mutex_; + absl::CondVar condvar_; +}; + /** * This is a variant of the isolated store that has locking across all operations so that it can * be used during the integration tests. @@ -275,7 +368,8 @@ class IntegrationTestServer : public Logger::Loggable, bool defer_listener_finalization = false, ProcessObjectOptRef process_object = absl::nullopt, Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), - uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1)); + uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), + bool use_real_stats = false); // Note that the derived class is responsible for tearing down the server in its // destructor. ~IntegrationTestServer() override; @@ -301,11 +395,11 @@ class IntegrationTestServer : public Logger::Loggable, std::chrono::seconds drain_time); void waitForCounterEq(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterEq(statStore(), name, value, time_system_); + notifyingStatsAllocator().waitForCounterFromStringEq(name, value); } void waitForCounterGe(const std::string& name, uint64_t value) override { - TestUtility::waitForCounterGe(statStore(), name, value, time_system_); + notifyingStatsAllocator().waitForCounterFromStringGe(name, value); } void waitForGaugeGe(const std::string& name, uint64_t value) override { @@ -351,6 +445,7 @@ class IntegrationTestServer : public Logger::Loggable, virtual Server::Instance& server() PURE; virtual Stats::Store& statStore() PURE; virtual Network::Address::InstanceConstSharedPtr adminAddress() PURE; + virtual Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() PURE; void useAdminInterfaceToQuit(bool use) { use_admin_interface_to_quit_ = use; } bool useAdminInterfaceToQuit() { return use_admin_interface_to_quit_; } @@ -404,8 +499,7 @@ class IntegrationTestServer : public Logger::Loggable, class IntegrationTestServerImpl : public IntegrationTestServer { public: IntegrationTestServerImpl(Event::TestTimeSystem& time_system, Api::Api& api, - const std::string& config_path) - : IntegrationTestServer(time_system, api, config_path) {} + const std::string& config_path, bool real_stats = false); ~IntegrationTestServerImpl() override; @@ -419,6 +513,13 @@ class IntegrationTestServerImpl : public IntegrationTestServer { } Network::Address::InstanceConstSharedPtr adminAddress() override { return admin_address_; } + Stats::NotifyingAllocatorImpl& notifyingStatsAllocator() override { + auto* ret = dynamic_cast(stats_allocator_.get()); + RELEASE_ASSERT(ret != nullptr, + "notifyingStatsAllocator() is not created when real_stats is true"); + return *ret; + } + private: void createAndRunEnvoyServer(OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, @@ -433,6 +534,8 @@ class IntegrationTestServerImpl : public IntegrationTestServer { Stats::Store* stat_store_{}; Network::Address::InstanceConstSharedPtr admin_address_; absl::Notification server_gone_; + Stats::SymbolTablePtr symbol_table_; + std::unique_ptr stats_allocator_; }; } // namespace Envoy diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 914657507162..43985c20fead 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -144,7 +144,9 @@ TEST_P(StatsIntegrationTest, WithTagSpecifierWithFixedValue) { class ClusterMemoryTestHelper : public BaseIntegrationTest { public: ClusterMemoryTestHelper() - : BaseIntegrationTest(testing::TestWithParam::GetParam()) {} + : BaseIntegrationTest(testing::TestWithParam::GetParam()) { + use_real_stats_ = true; + } static size_t computeMemoryDelta(int initial_num_clusters, int initial_num_hosts, int final_num_clusters, int final_num_hosts, bool allow_stats) { diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 833e2d73f65c..b76f8c476b4a 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -196,6 +196,7 @@ TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigDeletingFilterChain) TEST_P(LdsInplaceUpdateTcpProxyIntegrationTest, ReloadConfigAddingFilterChain) { setUpstreamCount(2); initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); std::string response_0; auto client_conn_0 = createConnectionAndWrite("alpn0", "hello", response_0); @@ -368,6 +369,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { // chain 2. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); From 3ddb272eaeeb44943614ba69efbcf9c9aa0d48bc Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Mon, 8 Jun 2020 08:20:46 -0700 Subject: [PATCH 315/909] test: output value of ip env varialbe to set + minor signature changes (#11487) Also includes a minor cleanup where we remove an unneccessary const-qualifier from a return type. Signed-off-by: Snow Pettersen --- test/test_common/environment.cc | 10 ++++++---- test/test_common/network_utility.cc | 10 +++++----- test/test_common/network_utility.h | 10 +++++----- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 45a6e9b883ba..c303a88aea7b 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -243,10 +243,12 @@ std::vector TestEnvironment::getIpVersionsForTest() if (TestEnvironment::shouldRunTestForIpVersion(version)) { parameters.push_back(version); if (!Network::Test::supportsIpVersion(version)) { - ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), warn, - "Testing with IP{} addresses may not be supported on this machine. If " - "testing fails, set the environment variable ENVOY_IP_TEST_VERSIONS.", - Network::Test::addressVersionAsString(version)); + const auto version_string = Network::Test::addressVersionAsString(version); + ENVOY_LOG_TO_LOGGER( + Logger::Registry::getLog(Logger::Id::testing), warn, + "Testing with IP{} addresses may not be supported on this machine. If " + "testing fails, set the environment variable ENVOY_IP_TEST_VERSIONS to 'v{}only'.", + version_string, version_string); } } } diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 4eb091d78be5..5821465bd495 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -68,35 +68,35 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port return instance; } -const std::string getLoopbackAddressUrlString(const Address::IpVersion version) { +std::string getLoopbackAddressUrlString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("[::1]"); } return std::string("127.0.0.1"); } -const std::string getLoopbackAddressString(const Address::IpVersion version) { +std::string getLoopbackAddressString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("::1"); } return std::string("127.0.0.1"); } -const std::string getAnyAddressUrlString(const Address::IpVersion version) { +std::string getAnyAddressUrlString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("[::]"); } return std::string("0.0.0.0"); } -const std::string getAnyAddressString(const Address::IpVersion version) { +std::string getAnyAddressString(const Address::IpVersion version) { if (version == Address::IpVersion::v6) { return std::string("::"); } return std::string("0.0.0.0"); } -const std::string addressVersionAsString(const Address::IpVersion version) { +std::string addressVersionAsString(const Address::IpVersion version) { if (version == Address::IpVersion::v4) { return std::string("v4"); } diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 76c01a87cc62..3dc280d42388 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -42,7 +42,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port * @param version IP address version of loopback address. * @return std::string URL ready loopback address as a string. */ -const std::string getLoopbackAddressUrlString(const Address::IpVersion version); +std::string getLoopbackAddressUrlString(const Address::IpVersion version); /** * Get a IP loopback address as a string. There are no square brackets around IPv6 addresses, this @@ -50,28 +50,28 @@ const std::string getLoopbackAddressUrlString(const Address::IpVersion version); * @param version IP address version of loopback address. * @return std::string loopback address as a string. */ -const std::string getLoopbackAddressString(const Address::IpVersion version); +std::string getLoopbackAddressString(const Address::IpVersion version); /** * Get a URL ready IP any address as a string. * @param version IP address version of any address. * @return std::string URL ready any address as a string. */ -const std::string getAnyAddressUrlString(const Address::IpVersion version); +std::string getAnyAddressUrlString(const Address::IpVersion version); /** * Get an IP any address as a string. * @param version IP address version of any address. * @return std::string any address as a string. */ -const std::string getAnyAddressString(const Address::IpVersion version); +std::string getAnyAddressString(const Address::IpVersion version); /** * Return a string version of enum IpVersion version. * @param version IP address version. * @return std::string string version of IpVersion. */ -const std::string addressVersionAsString(const Address::IpVersion version); +std::string addressVersionAsString(const Address::IpVersion version); /** * Returns a loopback address for the specified IP version (127.0.0.1 for IPv4 and ::1 for IPv6). From 07f1c65d9d935ec1ecbefb82972bc4f8f547c32a Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 8 Jun 2020 08:22:33 -0700 Subject: [PATCH 316/909] context: add initManager and lifecycleNotifier to ServerFactoryContext (#11485) - extract FactoryContexts to factory_context.h - make initManager in TransportSocketFactoryContext return reference Signed-off-by: Lizan Zhou --- include/envoy/server/BUILD | 32 ++- .../envoy/server/bootstrap_extension_config.h | 2 +- include/envoy/server/factory_context.h | 265 ++++++++++++++++++ include/envoy/server/filter_config.h | 231 +-------------- .../envoy/server/transport_socket_config.h | 6 +- source/common/secret/sds_api.h | 8 +- source/common/secret/secret_manager_impl.h | 1 - source/server/server.h | 5 +- source/server/transport_socket_config_impl.h | 5 +- .../common/secret/secret_manager_impl_test.cc | 16 +- .../tls/context_impl_test.cc | 10 +- .../transport_sockets/tls/ssl_socket_test.cc | 4 +- test/mocks/server/mocks.h | 4 +- 13 files changed, 327 insertions(+), 262 deletions(-) create mode 100644 include/envoy/server/factory_context.h diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index 8803acb34cc5..dd72215ae18e 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -154,8 +154,8 @@ envoy_cc_library( ) envoy_cc_library( - name = "filter_config_interface", - hdrs = ["filter_config.h"], + name = "factory_context_interface", + hdrs = ["factory_context.h"], deps = [ ":admin_interface", ":drain_manager_interface", @@ -173,7 +173,6 @@ envoy_cc_library( "//include/envoy/network:drain_decision_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/server:overload_manager_interface", - "//include/envoy/server:transport_socket_config_interface", "//include/envoy/singleton:manager_interface", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/tracing:http_tracer_interface", @@ -185,6 +184,30 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "filter_config_interface", + hdrs = ["filter_config.h"], + deps = [ + ":drain_manager_interface", + ":factory_context_interface", + ":lifecycle_notifier_interface", + ":process_context_interface", + "//include/envoy/access_log:access_log_interface", + "//include/envoy/config:typed_config_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/server:overload_manager_interface", + "//include/envoy/server:transport_socket_config_interface", + "//include/envoy/singleton:manager_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/tracing:http_tracer_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:assert_lib", + "//source/common/common:macros", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "lifecycle_notifier_interface", hdrs = ["lifecycle_notifier.h"], @@ -220,6 +243,7 @@ envoy_cc_library( name = "transport_socket_config_interface", hdrs = ["transport_socket_config.h"], deps = [ + ":factory_context_interface", "//include/envoy/config:typed_config_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/init:manager_interface", @@ -298,7 +322,7 @@ envoy_cc_library( name = "bootstrap_extension_config_interface", hdrs = ["bootstrap_extension_config.h"], deps = [ - ":filter_config_interface", + ":factory_context_interface", "//include/envoy/config:typed_config_interface", ], ) diff --git a/include/envoy/server/bootstrap_extension_config.h b/include/envoy/server/bootstrap_extension_config.h index 9b0d6e043396..7eaf4dcb2530 100644 --- a/include/envoy/server/bootstrap_extension_config.h +++ b/include/envoy/server/bootstrap_extension_config.h @@ -2,7 +2,7 @@ #include -#include "envoy/server/filter_config.h" +#include "envoy/server/factory_context.h" #include "common/protobuf/protobuf.h" diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h new file mode 100644 index 000000000000..245499464e1d --- /dev/null +++ b/include/envoy/server/factory_context.h @@ -0,0 +1,265 @@ +#pragma once + +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/typed_config.h" +#include "envoy/grpc/context.h" +#include "envoy/http/codes.h" +#include "envoy/http/context.h" +#include "envoy/http/filter.h" +#include "envoy/init/manager.h" +#include "envoy/network/drain_decision.h" +#include "envoy/network/filter.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/admin.h" +#include "envoy/server/drain_manager.h" +#include "envoy/server/lifecycle_notifier.h" +#include "envoy/server/overload_manager.h" +#include "envoy/server/process_context.h" +#include "envoy/singleton/manager.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/tracing/http_tracer.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/assert.h" +#include "common/common/macros.h" +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +/** + * Common interface for downstream and upstream network filters. + */ +class CommonFactoryContext { +public: + virtual ~CommonFactoryContext() = default; + + /** + * @return Upstream::ClusterManager& singleton for use by the entire server. + */ + virtual Upstream::ClusterManager& clusterManager() PURE; + + /** + * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used + * for all singleton processing. + */ + virtual Event::Dispatcher& dispatcher() PURE; + + /** + * @return information about the local environment the server is running in. + */ + virtual const LocalInfo::LocalInfo& localInfo() const PURE; + + /** + * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration + * messages. + */ + virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; + + /** + * @return RandomGenerator& the random generator for the server. + */ + virtual Envoy::Runtime::RandomGenerator& random() PURE; + + /** + * @return Runtime::Loader& the singleton runtime loader for the server. + */ + virtual Envoy::Runtime::Loader& runtime() PURE; + + /** + * @return Stats::Scope& the filter's stats scope. + */ + virtual Stats::Scope& scope() PURE; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; + + /** + * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is + * used to allow runtime lockless updates to configuration, etc. across multiple threads. + */ + virtual ThreadLocal::SlotAllocator& threadLocal() PURE; + + /** + * @return Server::Admin& the server's global admin HTTP endpoint. + */ + virtual Server::Admin& admin() PURE; + + /** + * @return TimeSource& a reference to the time source. + */ + virtual TimeSource& timeSource() PURE; + + /** + * @return Api::Api& a reference to the api object. + */ + virtual Api::Api& api() PURE; +}; + +/** + * ServerFactoryContext is an specialization of common interface for downstream and upstream network + * filters. The implementation guarantees the lifetime is no shorter than server. It could be used + * across listeners. + */ +class ServerFactoryContext : public virtual CommonFactoryContext { +public: + ~ServerFactoryContext() override = default; + + /** + * @return the server-wide grpc context. + */ + virtual Grpc::Context& grpcContext() PURE; + + /** + * @return DrainManager& the server-wide drain manager. + */ + virtual Envoy::Server::DrainManager& drainManager() PURE; + + /** + * @return the server's init manager. This can be used for extensions that need to initialize + * after cluster manager init but before the server starts listening. All extensions + * should register themselves during configuration load. initialize() will be called on + * each registered target after cluster manager init but before the server starts + * listening. Once all targets have initialized and invoked their callbacks, the server + * will start listening. + */ + virtual Init::Manager& initManager() PURE; + + /** + * @return ServerLifecycleNotifier& the lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; +}; + +/** + * Context passed to network and HTTP filters to access server resources. + * TODO(mattklein123): When we lock down visibility of the rest of the code, filters should only + * access the rest of the server via interfaces exposed here. + */ +class FactoryContext : public virtual CommonFactoryContext { +public: + ~FactoryContext() override = default; + + /** + * @return ServerFactoryContext which lifetime is no shorter than the server. + */ + virtual ServerFactoryContext& getServerFactoryContext() const PURE; + + /** + * @return TransportSocketFactoryContext which lifetime is no shorter than the server. + */ + virtual TransportSocketFactoryContext& getTransportSocketFactoryContext() const PURE; + + /** + * @return AccessLogManager for use by the entire server. + */ + virtual AccessLog::AccessLogManager& accessLogManager() PURE; + + /** + * @return envoy::config::core::v3::TrafficDirection the direction of the traffic relative to + * the local proxy. + */ + virtual envoy::config::core::v3::TrafficDirection direction() const PURE; + + /** + * @return const Network::DrainDecision& a drain decision that filters can use to determine if + * they should be doing graceful closes on connections when possible. + */ + virtual const Network::DrainDecision& drainDecision() PURE; + + /** + * @return whether external healthchecks are currently failed or not. + */ + virtual bool healthCheckFailed() PURE; + + /** + * @return the server's init manager. This can be used for extensions that need to initialize + * after cluster manager init but before the server starts listening. All extensions + * should register themselves during configuration load. initialize() will be called on + * each registered target after cluster manager init but before the server starts + * listening. Once all targets have initialized and invoked their callbacks, the server + * will start listening. + */ + virtual Init::Manager& initManager() PURE; + + /** + * @return ServerLifecycleNotifier& the lifecycle notifier for the server. + */ + virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + + /** + * @return Stats::Scope& the listener's stats scope. + */ + virtual Stats::Scope& listenerScope() PURE; + + /** + * @return const envoy::config::core::v3::Metadata& the config metadata associated with this + * listener. + */ + virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE; + + /** + * @return OverloadManager& the overload manager for the server. + */ + virtual OverloadManager& overloadManager() PURE; + + /** + * @return Http::Context& a reference to the http context. + */ + virtual Http::Context& httpContext() PURE; + + /** + * @return Grpc::Context& a reference to the grpc context. + */ + virtual Grpc::Context& grpcContext() PURE; + + /** + * @return ProcessContextOptRef an optional reference to the + * process context. Will be unset when running in validation mode. + */ + virtual ProcessContextOptRef processContext() PURE; + + /** + * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration + * messages. + */ + virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; +}; + +/** + * An implementation of FactoryContext. The life time is no shorter than the created filter chains. + * The life time is no longer than the owning listener. It should be used to create + * NetworkFilterChain. + */ +class FilterChainFactoryContext : public virtual FactoryContext { +public: + /** + * Set the flag that all attached filter chains will be destroyed. + */ + virtual void startDraining() PURE; +}; + +using FilterChainFactoryContextPtr = std::unique_ptr; + +/** + * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains + * and connections. It can be used to create ListenerFilterChain. + */ +class ListenerFactoryContext : public virtual FactoryContext { +public: + /** + * Give access to the listener configuration + */ + virtual const Network::ListenerConfig& listenerConfig() const PURE; +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index 75d8dd24d371..343e6c87ad28 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -2,28 +2,12 @@ #include -#include "envoy/access_log/access_log.h" -#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_config.h" -#include "envoy/grpc/context.h" -#include "envoy/http/codes.h" -#include "envoy/http/context.h" #include "envoy/http/filter.h" #include "envoy/init/manager.h" -#include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" -#include "envoy/server/admin.h" #include "envoy/server/drain_manager.h" -#include "envoy/server/lifecycle_notifier.h" -#include "envoy/server/overload_manager.h" -#include "envoy/server/process_context.h" -#include "envoy/server/transport_socket_config.h" -#include "envoy/singleton/manager.h" -#include "envoy/stats/scope.h" -#include "envoy/thread_local/thread_local.h" -#include "envoy/tracing/http_tracer.h" -#include "envoy/upstream/cluster_manager.h" +#include "envoy/server/factory_context.h" #include "common/common/assert.h" #include "common/common/macros.h" @@ -33,219 +17,6 @@ namespace Envoy { namespace Server { namespace Configuration { -/** - * Common interface for downstream and upstream network filters. - */ -class CommonFactoryContext { -public: - virtual ~CommonFactoryContext() = default; - - /** - * @return Upstream::ClusterManager& singleton for use by the entire server. - */ - virtual Upstream::ClusterManager& clusterManager() PURE; - - /** - * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used - * for all singleton processing. - */ - virtual Event::Dispatcher& dispatcher() PURE; - - /** - * @return information about the local environment the server is running in. - */ - virtual const LocalInfo::LocalInfo& localInfo() const PURE; - - /** - * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration - * messages. - */ - virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; - - /** - * @return RandomGenerator& the random generator for the server. - */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; - - /** - * @return Runtime::Loader& the singleton runtime loader for the server. - */ - virtual Envoy::Runtime::Loader& runtime() PURE; - - /** - * @return Stats::Scope& the filter's stats scope. - */ - virtual Stats::Scope& scope() PURE; - - /** - * @return Singleton::Manager& the server-wide singleton manager. - */ - virtual Singleton::Manager& singletonManager() PURE; - - /** - * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is - * used to allow runtime lockless updates to configuration, etc. across multiple threads. - */ - virtual ThreadLocal::SlotAllocator& threadLocal() PURE; - - /** - * @return Server::Admin& the server's global admin HTTP endpoint. - */ - virtual Server::Admin& admin() PURE; - - /** - * @return TimeSource& a reference to the time source. - */ - virtual TimeSource& timeSource() PURE; - - /** - * @return Api::Api& a reference to the api object. - */ - virtual Api::Api& api() PURE; -}; - -/** - * ServerFactoryContext is an specialization of common interface for downstream and upstream network - * filters. The implementation guarantees the lifetime is no shorter than server. It could be used - * across listeners. - */ -class ServerFactoryContext : public virtual CommonFactoryContext { -public: - ~ServerFactoryContext() override = default; - - /** - * @return the server-wide grpc context. - */ - virtual Grpc::Context& grpcContext() PURE; - - /** - * @return DrainManager& the server-wide drain manager. - */ - virtual Envoy::Server::DrainManager& drainManager() PURE; -}; - -/** - * Context passed to network and HTTP filters to access server resources. - * TODO(mattklein123): When we lock down visibility of the rest of the code, filters should only - * access the rest of the server via interfaces exposed here. - */ -class FactoryContext : public virtual CommonFactoryContext { -public: - ~FactoryContext() override = default; - - /** - * @return ServerFactoryContext which lifetime is no shorter than the server. - */ - virtual ServerFactoryContext& getServerFactoryContext() const PURE; - - /** - * @return TransportSocketFactoryContext which lifetime is no shorter than the server. - */ - virtual TransportSocketFactoryContext& getTransportSocketFactoryContext() const PURE; - - /** - * @return AccessLogManager for use by the entire server. - */ - virtual AccessLog::AccessLogManager& accessLogManager() PURE; - - /** - * @return envoy::config::core::v3::TrafficDirection the direction of the traffic relative to - * the local proxy. - */ - virtual envoy::config::core::v3::TrafficDirection direction() const PURE; - - /** - * @return const Network::DrainDecision& a drain decision that filters can use to determine if - * they should be doing graceful closes on connections when possible. - */ - virtual const Network::DrainDecision& drainDecision() PURE; - - /** - * @return whether external healthchecks are currently failed or not. - */ - virtual bool healthCheckFailed() PURE; - - /** - * @return the server's init manager. This can be used for extensions that need to initialize - * after cluster manager init but before the server starts listening. All extensions - * should register themselves during configuration load. initialize() will be called on - * each registered target after cluster manager init but before the server starts - * listening. Once all targets have initialized and invoked their callbacks, the server - * will start listening. - */ - virtual Init::Manager& initManager() PURE; - - /** - * @return ServerLifecycleNotifier& the lifecycle notifier for the server. - */ - virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; - - /** - * @return Stats::Scope& the listener's stats scope. - */ - virtual Stats::Scope& listenerScope() PURE; - - /** - * @return const envoy::config::core::v3::Metadata& the config metadata associated with this - * listener. - */ - virtual const envoy::config::core::v3::Metadata& listenerMetadata() const PURE; - - /** - * @return OverloadManager& the overload manager for the server. - */ - virtual OverloadManager& overloadManager() PURE; - - /** - * @return Http::Context& a reference to the http context. - */ - virtual Http::Context& httpContext() PURE; - - /** - * @return Grpc::Context& a reference to the grpc context. - */ - virtual Grpc::Context& grpcContext() PURE; - - /** - * @return ProcessContextOptRef an optional reference to the - * process context. Will be unset when running in validation mode. - */ - virtual ProcessContextOptRef processContext() PURE; - - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; -}; - -/** - * An implementation of FactoryContext. The life time is no shorter than the created filter chains. - * The life time is no longer than the owning listener. It should be used to create - * NetworkFilterChain. - */ -class FilterChainFactoryContext : public virtual FactoryContext { -public: - /** - * Set the flag that all attached filter chains will be destroyed. - */ - virtual void startDraining() PURE; -}; - -using FilterChainFactoryContextPtr = std::unique_ptr; - -/** - * An implementation of FactoryContext. The life time should cover the lifetime of the filter chains - * and connections. It can be used to create ListenerFilterChain. - */ -class ListenerFactoryContext : public virtual FactoryContext { -public: - /** - * Give access to the listener configuration - */ - virtual const Network::ListenerConfig& listenerConfig() const PURE; -}; - /** * Common interface for listener filters and UDP listener filters */ diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index a3dd4d5dac6d..ac3337738017 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -9,6 +9,7 @@ #include "envoy/network/transport_socket.h" #include "envoy/runtime/runtime.h" #include "envoy/secret/secret_manager.h" +#include "envoy/server/factory_context.h" #include "envoy/singleton/manager.h" #include "envoy/ssl/context_manager.h" #include "envoy/stats/scope.h" @@ -74,10 +75,9 @@ class TransportSocketFactoryContext { virtual Stats::Store& stats() PURE; /** - * @return a pointer pointing to the instance of an init manager, or nullptr - * if not set. + * @return a reference to the instance of an init manager. */ - virtual Init::Manager* initManager() PURE; + virtual Init::Manager& initManager() PURE; /** * @return the server's singleton manager. diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index 0ca7c93f24aa..d0173f8ae470 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -118,7 +118,7 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } @@ -179,7 +179,7 @@ class CertificateValidationContextSdsApi : public SdsApi, sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -250,7 +250,7 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } @@ -321,7 +321,7 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), secret_provider_context.dispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - *secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), + secret_provider_context.initManager(), destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); } diff --git a/source/common/secret/secret_manager_impl.h b/source/common/secret/secret_manager_impl.h index 002bed3decb2..d7be0e8b6b54 100644 --- a/source/common/secret/secret_manager_impl.h +++ b/source/common/secret/secret_manager_impl.h @@ -88,7 +88,6 @@ class SecretManagerImpl : public SecretManager { std::function unregister_secret_provider = [map_key, this]() { removeDynamicSecretProvider(map_key); }; - ASSERT(secret_provider_context.initManager() != nullptr); secret_provider = SecretType::create(secret_provider_context, sds_config_source, config_name, unregister_secret_provider); dynamic_secret_providers_[map_key] = secret_provider; diff --git a/source/server/server.h b/source/server/server.h index 2ec00d266696..e6014f038645 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -171,19 +171,20 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, Api::Api& api() override { return server_.api(); } Grpc::Context& grpcContext() override { return server_.grpcContext(); } Envoy::Server::DrainManager& drainManager() override { return server_.drainManager(); } + ServerLifecycleNotifier& lifecycleNotifier() override { return server_.lifecycleNotifier(); } // Configuration::TransportSocketFactoryContext Ssl::ContextManager& sslContextManager() override { return server_.sslContextManager(); } Secret::SecretManager& secretManager() override { return server_.secretManager(); } Stats::Store& stats() override { return server_.stats(); } - Init::Manager* initManager() override { return &server_.initManager(); } + Init::Manager& initManager() override { return server_.initManager(); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { // Server has two message validation visitors, one for static and // other for dynamic configuration. Choose the dynamic validation // visitor if server's init manager indicates that the server is // in the Initialized state, as this state is engaged right after // the static configuration (e.g., bootstrap) has been completed. - return initManager()->state() == Init::Manager::State::Initialized + return initManager().state() == Init::Manager::State::Initialized ? server_.messageValidationContext().dynamicValidationVisitor() : server_.messageValidationContext().staticValidationVisitor(); } diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 9e5bb4639e92..6a7a8ec17613 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -41,7 +41,10 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Event::Dispatcher& dispatcher() override { return dispatcher_; } Envoy::Runtime::RandomGenerator& random() override { return random_; } Stats::Store& stats() override { return stats_; } - Init::Manager* initManager() override { return init_manager_; } + Init::Manager& initManager() override { + ASSERT(init_manager_ != nullptr); + return *init_manager_; + } Singleton::Manager& singletonManager() override { return singleton_manager_; } ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 4ee5fe108806..3790fbdf7c95 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -267,7 +267,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -350,7 +350,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); @@ -400,7 +400,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor)); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); EXPECT_CALL(init_manager, add(_)) @@ -450,7 +450,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -708,7 +708,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -841,7 +841,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -913,7 +913,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); @@ -958,7 +958,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { init_target_handle = target.createHandle("test"); })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); - EXPECT_CALL(secret_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index cb790eff3b99..468e22f0bd26 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -593,7 +593,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); auto* sds_secret_configs = tls_context.mutable_session_ticket_keys_sds_secret_config(); sds_secret_configs->set_name("abc.com"); sds_secret_configs->mutable_sds_config(); @@ -1001,7 +1001,7 @@ TEST_F(ClientContextConfigImplTest, SecretNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); @@ -1033,7 +1033,7 @@ TEST_F(ClientContextConfigImplTest, ValidationContextNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); @@ -1339,7 +1339,7 @@ TEST_F(ServerContextConfigImplTest, SecretNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); @@ -1371,7 +1371,7 @@ TEST_F(ServerContextConfigImplTest, ValidationContextNotReady) { NiceMock dispatcher; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); - EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 49b98320ff9d..aa2c429a4ed0 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -4211,7 +4211,7 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto sds_secret_configs = @@ -4246,7 +4246,7 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { NiceMock dispatcher; EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); - EXPECT_CALL(factory_context, initManager()).WillRepeatedly(Return(&init_manager)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 1fb58dc9ea0b..67ece91b6ff7 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -515,6 +515,8 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(Api::Api&, api, ()); Grpc::Context& grpcContext() override { return grpc_context_; } MOCK_METHOD(Server::DrainManager&, drainManager, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); testing::NiceMock cluster_manager_; testing::NiceMock dispatcher_; @@ -604,7 +606,7 @@ class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); MOCK_METHOD(Stats::Store&, stats, ()); - MOCK_METHOD(Init::Manager*, initManager, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ()); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); From 6859159289d433b57ae15c4e943114f90befdf6e Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Mon, 8 Jun 2020 08:24:50 -0700 Subject: [PATCH 317/909] docs: add 1.12.4, 1.13.2 and 1.14.2 release notes. (#11495) Signed-off-by: Piotr Sikora --- docs/root/version_history/v1.12.4.rst | 8 ++++++++ docs/root/version_history/v1.13.2.rst | 8 ++++++++ docs/root/version_history/v1.14.2.rst | 7 +++++++ docs/root/version_history/version_history.rst | 3 +++ 4 files changed, 26 insertions(+) create mode 100644 docs/root/version_history/v1.12.4.rst create mode 100644 docs/root/version_history/v1.13.2.rst create mode 100644 docs/root/version_history/v1.14.2.rst diff --git a/docs/root/version_history/v1.12.4.rst b/docs/root/version_history/v1.12.4.rst new file mode 100644 index 000000000000..1635bbb5f000 --- /dev/null +++ b/docs/root/version_history/v1.12.4.rst @@ -0,0 +1,8 @@ +1.12.4 (June 8, 2020) +===================== + +Changes +------- + +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.13.2.rst b/docs/root/version_history/v1.13.2.rst new file mode 100644 index 000000000000..641bbaa451d4 --- /dev/null +++ b/docs/root/version_history/v1.13.2.rst @@ -0,0 +1,8 @@ +1.13.2 (June 8, 2020) +===================== + +Changes +------- + +* http: added :ref:`headers_with_underscores_action setting ` to control how client requests with header names containing underscore characters are handled. The options are to allow such headers, reject request or drop headers. The default is to allow headers, preserving existing behavior. +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst new file mode 100644 index 000000000000..18bdf0bfce9d --- /dev/null +++ b/docs/root/version_history/v1.14.2.rst @@ -0,0 +1,7 @@ +1.14.2 (June 8, 2020) +===================== + +Changes +------- + +* http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 6451336bffe7..527dec86ca8d 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,10 +7,13 @@ Version history :titlesonly: current + v1.14.2 v1.14.1 v1.14.0 + v1.13.2 v1.13.1 v1.13.0 + v1.12.4 v1.12.3 v1.12.2 v1.12.1 From 97ecef841fae9c7d2fc7b85cd56967c5f1d6ff6d Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 8 Jun 2020 11:38:48 -0400 Subject: [PATCH 318/909] thread: add new optional Thread::Options argument to thread creation, which initially will allow specification of a name. (#11440) Currently Envoy threads are unnamed. When tuning performance it might be useful to be able set names for threads based on what they are used for. Later it might be desirable to cpu affinity or other attributes when creating threads, but for now just adding name visibility seems useful. Signed-off-by: Joshua Marantz --- include/envoy/thread/thread.h | 24 +++- .../access_log/access_log_manager_impl.cc | 3 +- source/common/common/posix/thread_impl.cc | 111 +++++++++++++++--- source/common/common/posix/thread_impl.h | 18 +-- source/common/common/win32/thread_impl.cc | 13 +- source/common/common/win32/thread_impl.h | 6 +- .../common/filesystem/win32/watcher_impl.cc | 5 +- .../common/grpc/google_async_client_impl.cc | 3 +- source/server/guarddog_impl.cc | 4 +- source/server/worker_impl.cc | 16 ++- test/common/common/thread_test.cc | 90 +++++++++++--- 11 files changed, 227 insertions(+), 66 deletions(-) diff --git a/include/envoy/thread/thread.h b/include/envoy/thread/thread.h index 70452ca5d29a..8633c03e1ebe 100644 --- a/include/envoy/thread/thread.h +++ b/include/envoy/thread/thread.h @@ -9,6 +9,9 @@ #include "common/common/thread_annotations.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Thread { @@ -37,13 +40,25 @@ class Thread { virtual ~Thread() = default; /** - * Join on thread exit. + * @return the name of the thread. + */ + virtual std::string name() const PURE; + + /** + * Blocks until the thread exits. */ virtual void join() PURE; }; using ThreadPtr = std::unique_ptr; +// Options specified during thread creation. +struct Options { + std::string name_; // A name supplied for the thread. On Linux this is limited to 15 chars. +}; + +using OptionsOptConstRef = const absl::optional&; + /** * Interface providing a mechanism for creating threads. */ @@ -52,10 +67,13 @@ class ThreadFactory { virtual ~ThreadFactory() = default; /** - * Create a thread. + * Creates a thread, immediately starting the thread_routine. + * * @param thread_routine supplies the function to invoke in the thread. + * @param options supplies options specified on thread creation. */ - virtual ThreadPtr createThread(std::function thread_routine) PURE; + virtual ThreadPtr createThread(std::function thread_routine, + OptionsOptConstRef options = absl::nullopt) PURE; /** * Return the current system thread ID diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index f173904d2dff..055e602bdcfb 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -203,7 +203,8 @@ void AccessLogFileImpl::write(absl::string_view data) { } void AccessLogFileImpl::createFlushStructures() { - flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }); + flush_thread_ = thread_factory_.createThread([this]() -> void { flushThreadFunc(); }, + Thread::Options{"AccessLogFlush"}); flush_timer_->enableTimer(flush_interval_msec_); } diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index 324230ade176..359af8245ed9 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -1,6 +1,8 @@ #include "common/common/assert.h" #include "common/common/thread_impl.h" +#include "absl/strings/str_cat.h" + #if defined(__linux__) #include #endif @@ -24,26 +26,99 @@ int64_t getCurrentThreadId() { } // namespace -ThreadImplPosix::ThreadImplPosix(std::function thread_routine) - : thread_routine_(std::move(thread_routine)) { - RELEASE_ASSERT(Logger::Registry::initialized(), ""); - const int rc = pthread_create( - &thread_handle_, nullptr, - [](void* arg) -> void* { - static_cast(arg)->thread_routine_(); - return nullptr; - }, - this); - RELEASE_ASSERT(rc == 0, ""); -} +// See https://www.man7.org/linux/man-pages/man3/pthread_setname_np.3.html. +// The maximum thread name is 16 bytes including the terminating nul byte, +// so we need to truncate the string_view to 15 bytes. +#define PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE 16 -void ThreadImplPosix::join() { - const int rc = pthread_join(thread_handle_, nullptr); - RELEASE_ASSERT(rc == 0, ""); -} +/** + * Wrapper for a pthread thread. We don't use std::thread because it eats exceptions and leads to + * unusable stack traces. + */ +class ThreadImplPosix : public Thread { +public: + ThreadImplPosix(std::function thread_routine, OptionsOptConstRef options) + : thread_routine_(std::move(thread_routine)) { + if (options) { + name_ = options->name_.substr(0, PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE - 1); + } + RELEASE_ASSERT(Logger::Registry::initialized(), ""); + const int rc = pthread_create( + &thread_handle_, nullptr, + [](void* arg) -> void* { + static_cast(arg)->thread_routine_(); + return nullptr; + }, + this); + RELEASE_ASSERT(rc == 0, ""); + +#ifdef __linux__ + // If the name was not specified, get it from the OS. If the name was + // specified, write it into the thread, and assert that the OS sees it the + // same way. + if (name_.empty()) { + getNameFromOS(name_); + } else { + const int set_name_rc = pthread_setname_np(thread_handle_, name_.c_str()); + if (set_name_rc != 0) { + ENVOY_LOG_MISC(trace, "Error {} setting name `{}'", set_name_rc, name_); + } else { + // When compiling in debug mode, read back the thread-name from the OS, + // and verify it's what we asked for. This ensures the truncation is as + // expected, and that the OS will actually retain all the bytes of the + // name we expect. + // + // Note that the system-call to read the thread name may fail in case + // the thread exits after the call to set the name above, and before the + // call to get the name, so we can only do the assert if that call + // succeeded. + std::string check_name; + ASSERT(!getNameFromOS(check_name) || check_name == name_, + absl::StrCat("configured name=", name_, " os name=", check_name)); + } + } +#endif + } + + ~ThreadImplPosix() override { ASSERT(joined_); } + + std::string name() const override { return name_; } + + // Thread::Thread + void join() override { + ASSERT(!joined_); + joined_ = true; + const int rc = pthread_join(thread_handle_, nullptr); + RELEASE_ASSERT(rc == 0, ""); + } + +private: +#ifdef __linux__ + // Attempts to get the name from the operating system, returning true and + // updating 'name' if successful. Note that during normal operation this + // may fail, if the thread exits prior to the system call. + bool getNameFromOS(std::string& name) { + // Verify that the name got written into the thread as expected. + char buf[PTHREAD_MAX_THREADNAME_LEN_INCLUDING_NULL_BYTE]; + const int get_name_rc = pthread_getname_np(thread_handle_, buf, sizeof(buf)); + if (get_name_rc != 0) { + ENVOY_LOG_MISC(trace, "Error {} getting name", get_name_rc); + return false; + } + name = buf; + return true; + } +#endif + + std::function thread_routine_; + pthread_t thread_handle_; + std::string name_; + bool joined_{false}; +}; -ThreadPtr ThreadFactoryImplPosix::createThread(std::function thread_routine) { - return std::make_unique(thread_routine); +ThreadPtr ThreadFactoryImplPosix::createThread(std::function thread_routine, + OptionsOptConstRef options) { + return std::make_unique(thread_routine, options); } ThreadId ThreadFactoryImplPosix::currentThreadId() { return ThreadId(getCurrentThreadId()); } diff --git a/source/common/common/posix/thread_impl.h b/source/common/common/posix/thread_impl.h index 81c81d3be3fc..9b373ecaceb6 100644 --- a/source/common/common/posix/thread_impl.h +++ b/source/common/common/posix/thread_impl.h @@ -9,29 +9,13 @@ namespace Envoy { namespace Thread { -/** - * Wrapper for a pthread thread. We don't use std::thread because it eats exceptions and leads to - * unusable stack traces. - */ -class ThreadImplPosix : public Thread { -public: - ThreadImplPosix(std::function thread_routine); - - // Thread::Thread - void join() override; - -private: - std::function thread_routine_; - pthread_t thread_handle_; -}; - /** * Implementation of ThreadFactory */ class ThreadFactoryImplPosix : public ThreadFactory { public: // Thread::ThreadFactory - ThreadPtr createThread(std::function thread_routine) override; + ThreadPtr createThread(std::function thread_routine, OptionsOptConstRef options) override; ThreadId currentThreadId() override; }; diff --git a/source/common/common/win32/thread_impl.cc b/source/common/common/win32/thread_impl.cc index 1d3eca968957..8f26d63e0eb3 100644 --- a/source/common/common/win32/thread_impl.cc +++ b/source/common/common/win32/thread_impl.cc @@ -6,8 +6,14 @@ namespace Envoy { namespace Thread { -ThreadImplWin32::ThreadImplWin32(std::function thread_routine) +ThreadImplWin32::ThreadImplWin32(std::function thread_routine, OptionsOptConstRef options) : thread_routine_(thread_routine) { + if (options) { + name_ = options->name_; + // TODO(jmarantz): set the thread name for task manager, etc, or pull the + // auto-generated name from the OS if options is not present. + } + RELEASE_ASSERT(Logger::Registry::initialized(), ""); thread_handle_ = reinterpret_cast(::_beginthreadex( nullptr, 0, @@ -26,8 +32,9 @@ void ThreadImplWin32::join() { RELEASE_ASSERT(rc == WAIT_OBJECT_0, ""); } -ThreadPtr ThreadFactoryImplWin32::createThread(std::function thread_routine) { - return std::make_unique(thread_routine); +ThreadPtr ThreadFactoryImplWin32::createThread(std::function thread_routine, + OptionsOptConstRef options) { + return std::make_unique(thread_routine, options); } ThreadId ThreadFactoryImplWin32::currentThreadId() { diff --git a/source/common/common/win32/thread_impl.h b/source/common/common/win32/thread_impl.h index 8b5d0fe37e15..87be085291c8 100644 --- a/source/common/common/win32/thread_impl.h +++ b/source/common/common/win32/thread_impl.h @@ -14,11 +14,12 @@ namespace Thread { */ class ThreadImplWin32 : public Thread { public: - ThreadImplWin32(std::function thread_routine); + ThreadImplWin32(std::function thread_routine, OptionsOptConstRef options); ~ThreadImplWin32(); // Thread::Thread void join() override; + std::string name() const override { return name_; } // Needed for WatcherImpl for the QueueUserAPC callback context HANDLE handle() const { return thread_handle_; } @@ -26,6 +27,7 @@ class ThreadImplWin32 : public Thread { private: std::function thread_routine_; HANDLE thread_handle_; + std::string name_; }; /** @@ -34,7 +36,7 @@ class ThreadImplWin32 : public Thread { class ThreadFactoryImplWin32 : public ThreadFactory { public: // Thread::ThreadFactory - ThreadPtr createThread(std::function thread_routine) override; + ThreadPtr createThread(std::function thread_routine, OptionsOptConstRef options) override; ThreadId currentThreadId() override; }; diff --git a/source/common/filesystem/win32/watcher_impl.cc b/source/common/filesystem/win32/watcher_impl.cc index 5bc400639109..80531f78d54e 100644 --- a/source/common/filesystem/win32/watcher_impl.cc +++ b/source/common/filesystem/win32/watcher_impl.cc @@ -31,7 +31,10 @@ WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api) thread_exit_event_ = ::CreateEvent(nullptr, false, false, nullptr); ASSERT(thread_exit_event_ != NULL); keep_watching_ = true; - watch_thread_ = thread_factory_.createThread([this]() -> void { watchLoop(); }); + + // See comments in WorkerImpl::start for the naming convention. + Thread::Options options{absl::StrCat("wat:", dispatcher.name())}; + watch_thread_ = thread_factory_.createThread([this]() -> void { watchLoop(); }, options); } WatcherImpl::~WatcherImpl() { diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 18c4936e5e25..ff7774034ac9 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -21,7 +21,8 @@ static constexpr int DefaultBufferLimitBytes = 1024 * 1024; } GoogleAsyncClientThreadLocal::GoogleAsyncClientThreadLocal(Api::Api& api) - : completion_thread_(api.threadFactory().createThread([this] { completionThread(); })) {} + : completion_thread_(api.threadFactory().createThread([this] { completionThread(); }, + Thread::Options{"GrpcGoogClient"})) {} GoogleAsyncClientThreadLocal::~GoogleAsyncClientThreadLocal() { // Force streams to shutdown and invoke TryCancel() to start the drain of diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index d05e84f6ff6e..ad66b59a5299 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -142,8 +142,10 @@ void GuardDogImpl::stopWatching(WatchDogSharedPtr wd) { void GuardDogImpl::start(Api::Api& api) { Thread::LockGuard guard(mutex_); + // See comments in WorkerImpl::start for the naming convention. + Thread::Options options{absl::StrCat("dog:", dispatcher_->name())}; thread_ = api.threadFactory().createThread( - [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }); + [this]() -> void { dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); }, options); loop_timer_->enableTimer(std::chrono::milliseconds(0)); } diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 17e02486e5f8..54ef058ea6e5 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -81,8 +81,20 @@ void WorkerImpl::removeFilterChains(uint64_t listener_tag, void WorkerImpl::start(GuardDog& guard_dog) { ASSERT(!thread_); - thread_ = - api_.threadFactory().createThread([this, &guard_dog]() -> void { threadRoutine(guard_dog); }); + + // In posix, thread names are limited to 15 characters, so contrive to make + // sure all interesting data fits there. The naming occurs in + // ListenerManagerImpl's constructor: absl::StrCat("worker_", i). Let's say we + // have 9999 threads. We'd need, so we need 7 bytes for "worker_", 4 bytes + // for the thread index, leaving us 4 bytes left to distinguish between the + // two threads used per dispatcher. We'll call this one "dsp:" and the + // one allocated in guarddog_impl.cc "dog:". + // + // TODO(jmarantz): consider refactoring how this naming works so this naming + // architecture is centralized, resulting in clearer names. + Thread::Options options{absl::StrCat("wrk:", dispatcher_->name())}; + thread_ = api_.threadFactory().createThread( + [this, &guard_dog]() -> void { threadRoutine(guard_dog); }, options); } void WorkerImpl::initializeStats(Stats::Scope& scope) { dispatcher_->initializeStats(scope); } diff --git a/test/common/common/thread_test.cc b/test/common/common/thread_test.cc index 431d4be38f32..9dac043921f7 100644 --- a/test/common/common/thread_test.cc +++ b/test/common/common/thread_test.cc @@ -5,7 +5,9 @@ #include "test/test_common/thread_factory_for_test.h" +#include "absl/strings/str_cat.h" #include "absl/synchronization/notification.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace Envoy { @@ -27,12 +29,15 @@ TEST_F(ThreadAsyncPtrTest, DeleteOnDestruct) { // On thread1, we will lazily instantiate the string as "thread1". However // in the creation function we will block on a sync-point. - auto thread1 = thread_factory_.createThread([&str, &sync]() { - str.get([&sync]() -> std::string* { - sync.syncPoint("creator"); - return new std::string("thread1"); - }); - }); + auto thread1 = thread_factory_.createThread( + [&str, &sync]() { + str.get([&sync]() -> std::string* { + sync.syncPoint("creator"); + return new std::string("thread1"); + }); + }, + Options{"thread1"}); + EXPECT_EQ("thread1", thread1->name()); sync.barrierOn("creator"); @@ -40,7 +45,9 @@ TEST_F(ThreadAsyncPtrTest, DeleteOnDestruct) { // string as "thread2", but that allocator will never run because // the allocator on thread1 has already locked the AtomicPtr's mutex. auto thread2 = thread_factory_.createThread( - [&str]() { str.get([]() -> std::string* { return new std::string("thread2"); }); }); + [&str]() { str.get([]() -> std::string* { return new std::string("thread2"); }); }, + Options{"thread2"}); + EXPECT_EQ("thread2", thread2->name()); // Now let thread1's initializer finish. sync.signal("creator"); @@ -68,21 +75,25 @@ TEST_F(ThreadAsyncPtrTest, DoNotDelete) { // On thread1, we will lazily instantiate the string as "thread1". However // in the creation function we will block on a sync-point. - auto thread1 = thread_factory_.createThread([&str, &sync, &thread1_str]() { - str.get([&sync, &thread1_str]() -> const std::string* { - sync.syncPoint("creator"); - return &thread1_str; - }); - }); + auto thread1 = thread_factory_.createThread( + [&str, &sync, &thread1_str]() { + str.get([&sync, &thread1_str]() -> const std::string* { + sync.syncPoint("creator"); + return &thread1_str; + }); + }, + Options{"thread1"}); sync.barrierOn("creator"); // Now spawn a separate thread that will attempt to lazy-initialize the // string as "thread2", but that allocator will never run because // the allocator on thread1 has already locked the AtomicPtr's mutex. - auto thread2 = thread_factory_.createThread([&str, &thread2_str]() { - str.get([&thread2_str]() -> const std::string* { return &thread2_str; }); - }); + auto thread2 = thread_factory_.createThread( + [&str, &thread2_str]() { + str.get([&thread2_str]() -> const std::string* { return &thread2_str; }); + }, + Options{"thread2"}); // Now let thread1's initializer finish. sync.signal("creator"); @@ -113,7 +124,9 @@ TEST_F(ThreadAsyncPtrTest, ThreadSpammer) { }; std::vector threads; for (uint32_t i = 0; i < num_threads; ++i) { - threads.emplace_back(thread_factory_.createThread(thread_fn)); + std::string name = absl::StrCat("thread", i); + threads.emplace_back(thread_factory_.createThread(thread_fn, Options{name})); + EXPECT_EQ(name, threads.back()->name()); } EXPECT_EQ(0, calls); go.Notify(); @@ -190,6 +203,49 @@ TEST_F(ThreadAsyncPtrTest, ManagedAlloc) { } } +TEST_F(ThreadAsyncPtrTest, TruncateWait) { + absl::Notification notify; + auto thread = thread_factory_.createThread([¬ify]() { notify.WaitForNotification(); }, + Options{"this name is way too long for posix"}); + notify.Notify(); + + // To make this test work on multiple platforms, just assume the first 10 characters + // are retained. + EXPECT_THAT(thread->name(), testing::StartsWith("this name ")); + thread->join(); +} + +TEST_F(ThreadAsyncPtrTest, TruncateNoWait) { + auto thread = + thread_factory_.createThread([]() {}, Options{"this name is way too long for posix"}); + + // In general, across platforms, just assume the first 10 characters are + // retained. + EXPECT_THAT(thread->name(), testing::StartsWith("this name ")); + + // On Linux we can check for 15 exactly. +#ifdef __linux__ + EXPECT_EQ("this name is wa", thread->name()) << "truncated to 15 chars"; +#endif + + thread->join(); +} + +TEST_F(ThreadAsyncPtrTest, NameNotSpecifiedWait) { + absl::Notification notify; + auto thread = thread_factory_.createThread([¬ify]() { notify.WaitForNotification(); }); + notify.Notify(); + + // For linux builds, the thread name defaults to the name of the + // binary. However the name of the binary is different depending on whether + // this is a coverage test or not. Currently, this population does not occur + // for Mac or Windows. +#ifdef __linux__ + EXPECT_FALSE(thread->name().empty()); +#endif + thread->join(); +} + } // namespace } // namespace Thread } // namespace Envoy From 6679d57dd218b369fb39068177b25977a857f355 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 8 Jun 2020 13:25:05 -0700 Subject: [PATCH 319/909] network: move socket type from address to socket (#11486) Signed-off-by: Florin Coras --- include/envoy/network/address.h | 1 - include/envoy/network/listener.h | 2 +- include/envoy/network/socket.h | 11 +++++-- include/envoy/server/listener_manager.h | 2 +- source/common/network/listen_socket_impl.cc | 8 ++--- source/common/network/listen_socket_impl.h | 29 +++++++++---------- source/common/network/socket_impl.cc | 5 ++-- source/common/network/socket_impl.h | 8 ++--- .../common/network/socket_interface_impl.cc | 6 ++-- source/common/network/socket_interface_impl.h | 5 ++-- source/common/network/utility.cc | 8 ++--- source/common/network/utility.h | 2 +- .../filters/udp/udp_proxy/udp_proxy_filter.h | 2 +- .../quic_listeners/quiche/envoy_quic_utils.cc | 2 +- .../stat_sinks/common/statsd/statsd.cc | 2 +- .../extensions/tracers/xray/daemon_broker.cc | 4 +-- source/server/admin/admin.h | 2 +- source/server/config_validation/server.h | 2 +- source/server/connection_handler_impl.cc | 2 +- source/server/listener_impl.cc | 28 +++++++++--------- source/server/listener_impl.h | 14 ++++----- source/server/listener_manager_impl.cc | 27 +++++++++-------- source/server/listener_manager_impl.h | 2 +- ...dr_family_aware_socket_option_impl_test.cc | 18 ++++++------ test/common/network/address_impl_test.cc | 12 ++++---- .../common/network/listen_socket_impl_test.cc | 12 ++++---- test/common/network/listener_impl_test_base.h | 2 +- test/common/network/utility_test.cc | 8 ++--- .../proxy_protocol_regression_test.cc | 3 +- .../proxy_protocol/proxy_protocol_test.cc | 6 ++-- .../quiche/active_quic_listener_test.cc | 4 +-- .../quiche/envoy_quic_dispatcher_test.cc | 2 +- .../quiche/platform/quic_platform_test.cc | 2 +- .../quiche/platform/quic_port_utils_impl.cc | 4 +-- .../common/statsd/udp_statsd_test.cc | 2 +- test/integration/fake_upstream.h | 2 +- test/mocks/network/mocks.h | 18 +++++------- test/mocks/server/mocks.cc | 3 +- test/mocks/server/mocks.h | 3 +- test/server/connection_handler_test.cc | 15 +++++----- test/server/filter_chain_benchmark_test.cc | 4 +-- test/server/listener_manager_impl_test.cc | 21 +++++++------- test/server/listener_manager_impl_test.h | 3 +- test/test_common/network_utility.cc | 8 ++--- test/test_common/network_utility.h | 6 ++-- test/test_common/network_utility_test.cc | 2 +- 46 files changed, 160 insertions(+), 174 deletions(-) diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index 243c877045d6..136b10f3cf3e 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -116,7 +116,6 @@ class Pipe { }; enum class Type { Ip, Pipe }; -enum class SocketType { Stream, Datagram }; /** * Interface for all network addresses. diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index 2f511eb99a77..ba8f27918cb7 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -37,7 +37,7 @@ class ListenSocketFactory { /** * @return the type of the socket getListenSocket() returns. */ - virtual Address::SocketType socketType() const PURE; + virtual Socket::Type socketType() const PURE; /** * @return the listening address of the socket getListenSocket() returns. Before getListenSocket() diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index d2d87928e98e..07c09474038b 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -49,6 +49,11 @@ class Socket { public: virtual ~Socket() = default; + /** + * Type of sockets supported. See man 2 socket for more details + */ + enum class Type { Stream, Datagram }; + /** * @return the local address of the socket. */ @@ -76,7 +81,7 @@ class Socket { /** * @return the type (stream or datagram) of the socket. */ - virtual Address::SocketType socketType() const PURE; + virtual Socket::Type socketType() const PURE; /** * @return the type (IP or pipe) of addresses used by the socket (subset of socket domain) @@ -234,7 +239,7 @@ class SocketInterface { * @param version IP version if address type is IP * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor */ - virtual IoHandlePtr socket(Address::SocketType type, Address::Type addr_type, + virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, Address::IpVersion version) PURE; /** @@ -244,7 +249,7 @@ class SocketInterface { * @param addr address that is gleaned for address type and version if needed * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor */ - virtual IoHandlePtr socket(Address::SocketType socket_type, + virtual IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) PURE; /** diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index 57a7e97549a2..956a89264ac4 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -71,7 +71,7 @@ class ListenerComponentFactory { */ virtual Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) PURE; diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 1dc095bb299d..43e342d340b6 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -46,8 +46,7 @@ void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& opti } template <> -void NetworkListenSocket< - NetworkSocketTrait>::setPrebindSocketOptions() { +void NetworkListenSocket>::setPrebindSocketOptions() { // On Windows, SO_REUSEADDR does not restrict subsequent bind calls when there is a listener as on // Linux and later BSD socket stacks #ifndef WIN32 @@ -58,11 +57,10 @@ void NetworkListenSocket< } template <> -void NetworkListenSocket< - NetworkSocketTrait>::setPrebindSocketOptions() {} +void NetworkListenSocket>::setPrebindSocketOptions() {} UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) - : ListenSocketImpl(SocketInterfaceSingleton::get().socket(Address::SocketType::Stream, address), + : ListenSocketImpl(SocketInterfaceSingleton::get().socket(Socket::Type::Stream, address), address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); bind(local_address_); diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 99b6a6e499e3..a77ccefbd78a 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -29,14 +29,14 @@ class ListenSocketImpl : public SocketImpl { /** * Wraps a unix socket. */ -template struct NetworkSocketTrait {}; +template struct NetworkSocketTrait {}; -template <> struct NetworkSocketTrait { - static constexpr Address::SocketType type = Address::SocketType::Stream; +template <> struct NetworkSocketTrait { + static constexpr Socket::Type type = Socket::Type::Stream; }; -template <> struct NetworkSocketTrait { - static constexpr Address::SocketType type = Address::SocketType::Datagram; +template <> struct NetworkSocketTrait { + static constexpr Socket::Type type = Socket::Type::Datagram; }; template class NetworkListenSocket : public ListenSocketImpl { @@ -58,23 +58,23 @@ template class NetworkListenSocket : public ListenSocketImpl { setListenSocketOptions(options); } - Address::SocketType socketType() const override { return T::type; } + Socket::Type socketType() const override { return T::type; } protected: void setPrebindSocketOptions(); }; -using TcpListenSocket = NetworkListenSocket>; +using TcpListenSocket = NetworkListenSocket>; using TcpListenSocketPtr = std::unique_ptr; -using UdpListenSocket = NetworkListenSocket>; +using UdpListenSocket = NetworkListenSocket>; using UdpListenSocketPtr = std::unique_ptr; class UdsListenSocket : public ListenSocketImpl { public: UdsListenSocket(const Address::InstanceConstSharedPtr& address); UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address); - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } }; class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { @@ -85,8 +85,7 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { : SocketImpl(std::move(io_handle), local_address), remote_address_(remote_address), direct_remote_address_(remote_address) {} - ConnectionSocketImpl(Address::SocketType type, - const Address::InstanceConstSharedPtr& local_address, + ConnectionSocketImpl(Socket::Type type, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) : SocketImpl(type, local_address), remote_address_(remote_address), direct_remote_address_(remote_address) { @@ -94,7 +93,7 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { } // Network::Socket - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } // Network::ConnectionSocket const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; } @@ -152,9 +151,9 @@ class ClientSocketImpl : public ConnectionSocketImpl { public: ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address, const OptionsSharedPtr& options) - : ConnectionSocketImpl(Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, remote_address), - nullptr, remote_address) { + : ConnectionSocketImpl( + Network::SocketInterfaceSingleton::get().socket(Socket::Type::Stream, remote_address), + nullptr, remote_address) { if (options) { addOptions(options); } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 4e0200c381d2..1c0e89439e68 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -10,11 +10,10 @@ namespace Envoy { namespace Network { -SocketImpl::SocketImpl(Address::SocketType type, Address::Type addr_type, - Address::IpVersion version) +SocketImpl::SocketImpl(Socket::Type type, Address::Type addr_type, Address::IpVersion version) : io_handle_(SocketInterfaceSingleton::get().socket(type, addr_type, version)) {} -SocketImpl::SocketImpl(Address::SocketType sock_type, const Address::InstanceConstSharedPtr addr) +SocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr addr) : io_handle_(SocketInterfaceSingleton::get().socket(sock_type, addr)), sock_type_(sock_type), addr_type_(addr->type()) {} diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index e4e14798f67c..cb84bfb1c699 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -9,8 +9,8 @@ namespace Network { class SocketImpl : public virtual Socket { public: - SocketImpl(Address::SocketType type, Address::Type addr_type, Address::IpVersion version); - SocketImpl(Address::SocketType socket_type, const Address::InstanceConstSharedPtr addr); + SocketImpl(Socket::Type type, Address::Type addr_type, Address::IpVersion version); + SocketImpl(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr); // Network::Socket const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } @@ -50,7 +50,7 @@ class SocketImpl : public virtual Socket { Api::SysCallIntResult setBlockingForTest(bool blocking) override; const OptionsSharedPtr& options() const override { return options_; } - Address::SocketType socketType() const override { return sock_type_; } + Socket::Type socketType() const override { return sock_type_; } Address::Type addressType() const override { return addr_type_; } protected: @@ -59,7 +59,7 @@ class SocketImpl : public virtual Socket { const IoHandlePtr io_handle_; Address::InstanceConstSharedPtr local_address_; OptionsSharedPtr options_; - Address::SocketType sock_type_; + Socket::Type sock_type_; Address::Type addr_type_; }; diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index 1e9481e2368d..a682e2d23406 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -10,7 +10,7 @@ namespace Envoy { namespace Network { -IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, Address::Type addr_type, +IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version) { #if defined(__APPLE__) || defined(WIN32) int flags = 0; @@ -18,7 +18,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, Address int flags = SOCK_NONBLOCK; #endif - if (socket_type == Address::SocketType::Stream) { + if (socket_type == Socket::Type::Stream) { flags |= SOCK_STREAM; } else { flags |= SOCK_DGRAM; @@ -51,7 +51,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, Address return io_handle; } -IoHandlePtr SocketInterfaceImpl::socket(Address::SocketType socket_type, +IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) { Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; IoHandlePtr io_handle = SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version); diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index 190a2765031c..ddef0528d659 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -10,10 +10,9 @@ namespace Network { class SocketInterfaceImpl : public SocketInterface { public: - IoHandlePtr socket(Address::SocketType socket_type, Address::Type addr_type, + IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version) override; - IoHandlePtr socket(Address::SocketType socket_type, - const Address::InstanceConstSharedPtr addr) override; + IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) override; bool ipFamilySupported(int domain) override; Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd) override; Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) override; diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 5c334879bdb5..107b50304eac 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -472,22 +472,22 @@ void Utility::addressToProtobufAddress(const Address::Instance& address, } } -Address::SocketType +Socket::Type Utility::protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address) { switch (proto_address.address_case()) { case envoy::config::core::v3::Address::AddressCase::kSocketAddress: { const auto protocol = proto_address.socket_address().protocol(); switch (protocol) { case envoy::config::core::v3::SocketAddress::TCP: - return Address::SocketType::Stream; + return Socket::Type::Stream; case envoy::config::core::v3::SocketAddress::UDP: - return Address::SocketType::Datagram; + return Socket::Type::Datagram; default: NOT_REACHED_GCOVR_EXCL_LINE; } } case envoy::config::core::v3::Address::AddressCase::kPipe: - return Address::SocketType::Stream; + return Socket::Type::Stream; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/network/utility.h b/source/common/network/utility.h index fd4b925aea9a..be64071e9ea6 100644 --- a/source/common/network/utility.h +++ b/source/common/network/utility.h @@ -286,7 +286,7 @@ class Utility { * @param proto_address the address protobuf * @return socket type */ - static Address::SocketType + static Socket::Type protobufAddressSocketType(const envoy::config::core::v3::Address& proto_address); /** diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index dc16c2748082..8456d96089b3 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -222,7 +222,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) { // Virtual so this can be overridden in unit tests. - return Network::SocketInterfaceSingleton::get().socket(Network::Address::SocketType::Datagram, + return Network::SocketInterfaceSingleton::get().socket(Network::Socket::Type::Datagram, host->address()); } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index 9e0ee82de167..6fa268c53cc0 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -95,7 +95,7 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options) { auto connection_socket = std::make_unique( - Network::Address::SocketType::Datagram, local_addr, peer_addr); + Network::Socket::Type::Datagram, local_addr, peer_addr); connection_socket->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); connection_socket->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); if (options != nullptr) { diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index 40d83274de5a..c502cb6e02ca 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -29,7 +29,7 @@ namespace Statsd { UdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent) : parent_(parent), io_handle_(Network::SocketInterfaceSingleton::get().socket( - Network::Address::SocketType::Datagram, parent_.server_address_)) {} + Network::Socket::Type::Datagram, parent_.server_address_)) {} void UdpStatsdSink::WriterImpl::write(const std::string& message) { // TODO(mattklein123): We can avoid this const_cast pattern by having a constant variant of diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index 6e5f1622e7aa..99e7a9bee4d3 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -30,8 +30,8 @@ std::string createHeader(const std::string& format, uint32_t version) { DaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint) : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)), - io_handle_(Network::SocketInterfaceSingleton::get().socket( - Network::Address::SocketType::Datagram, address_)) {} + io_handle_(Network::SocketInterfaceSingleton::get().socket(Network::Socket::Type::Datagram, + address_)) {} void DaemonBrokerImpl::send(const std::string& data) const { auto& logger = Logger::Registry::getLog(Logger::Id::tracing); diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 74449114bae9..439cacf0013e 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -292,7 +292,7 @@ class AdminImpl : public Admin, AdminListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {} // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_->socketType(); } + Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 70bb29bf180b..14682097f400 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -141,7 +141,7 @@ class ValidationInstance final : Logger::Loggable, return ProdListenerComponentFactory::createUdpListenerFilterFactoryList_(filters, context); } Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr, - Network::Address::SocketType, + Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, const ListenSocketCreationParams&) override { // Returned sockets are not currently used so we can return nothing here safely vs. a diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 1b2d03794eb9..708eef2e47ef 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -30,7 +30,7 @@ void ConnectionHandlerImpl::decNumConnections() { void ConnectionHandlerImpl::addListener(absl::optional overridden_listener, Network::ListenerConfig& config) { ActiveListenerDetails details; - if (config.listenSocketFactory().socketType() == Network::Address::SocketType::Stream) { + if (config.listenSocketFactory().socketType() == Network::Socket::Type::Stream) { if (overridden_listener.has_value()) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == overridden_listener) { diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 4b36fc4a20df..9a31da2cb033 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -53,7 +53,7 @@ bool needTlsInspector(const envoy::config::listener::v3::Listener& config) { ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port, const std::string& listener_name, bool reuse_port) @@ -62,7 +62,7 @@ ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& facto bool create_socket = false; if (local_address_->type() == Network::Address::Type::Ip) { - if (socket_type_ == Network::Address::SocketType::Datagram) { + if (socket_type_ == Network::Socket::Type::Datagram) { ASSERT(reuse_port_ == true); } @@ -260,7 +260,7 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); - if (socket_type == Network::Address::SocketType::Datagram) { + if (socket_type == Network::Socket::Type::Datagram) { return; } buildSocketOptions(); @@ -330,9 +330,9 @@ void ListenerImpl::buildAccessLog() { } } -void ListenerImpl::buildUdpListenerFactory(Network::Address::SocketType socket_type, +void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency) { - if (socket_type == Network::Address::SocketType::Datagram) { + if (socket_type == Network::Socket::Type::Datagram) { auto udp_config = config_.udp_listener_config(); if (udp_config.udp_listener_name().empty()) { udp_config.set_udp_listener_name(UdpListenerNames::get().RawUdp); @@ -350,7 +350,7 @@ void ListenerImpl::buildUdpListenerFactory(Network::Address::SocketType socket_t } } -void ListenerImpl::buildListenSocketOptions(Network::Address::SocketType socket_type) { +void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, transparent, false)) { addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); } @@ -364,7 +364,7 @@ void ListenerImpl::buildListenSocketOptions(Network::Address::SocketType socket_ addListenSocketOptions( Network::SocketOptionFactory::buildLiteralOptions(config_.socket_options())); } - if (socket_type == Network::Address::SocketType::Datagram) { + if (socket_type == Network::Socket::Type::Datagram) { // Needed for recvmsg to return destination address in IP header. addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); // Needed to return receive buffer overflown indicator. @@ -372,10 +372,10 @@ void ListenerImpl::buildListenSocketOptions(Network::Address::SocketType socket_ } } -void ListenerImpl::createListenerFilterFactories(Network::Address::SocketType socket_type) { +void ListenerImpl::createListenerFilterFactories(Network::Socket::Type socket_type) { if (!config_.listener_filters().empty()) { switch (socket_type) { - case Network::Address::SocketType::Datagram: + case Network::Socket::Type::Datagram: if (config_.listener_filters().size() > 1) { // Currently supports only 1 UDP listener filter. throw EnvoyException(fmt::format( @@ -385,7 +385,7 @@ void ListenerImpl::createListenerFilterFactories(Network::Address::SocketType so udp_listener_filter_factories_ = parent_.factory_.createUdpListenerFilterFactoryList( config_.listener_filters(), *listener_factory_context_); break; - case Network::Address::SocketType::Stream: + case Network::Socket::Type::Stream: listener_filter_factories_ = parent_.factory_.createListenerFilterFactoryList( config_.listener_filters(), *listener_factory_context_); break; @@ -395,8 +395,8 @@ void ListenerImpl::createListenerFilterFactories(Network::Address::SocketType so } } -void ListenerImpl::validateFilterChains(Network::Address::SocketType socket_type) { - if (config_.filter_chains().empty() && (socket_type == Network::Address::SocketType::Stream || +void ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) { + if (config_.filter_chains().empty() && (socket_type == Network::Socket::Type::Stream || !udp_listener_factory_->isTransportConnectionless())) { // If we got here, this is a tcp listener or connection-oriented udp listener, so ensure there // is a filter chain specified @@ -637,9 +637,9 @@ bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::L // Currently we only support TCP filter chain update. if (Network::Utility::protobufAddressSocketType(config_.address()) != - Network::Address::SocketType::Stream || + Network::Socket::Type::Stream || Network::Utility::protobufAddressSocketType(config.address()) != - Network::Address::SocketType::Stream) { + Network::Socket::Type::Stream) { return false; } diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 6160c0f4f87c..328e744a29a7 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -40,12 +40,12 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, public: ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, bool bind_to_port, const std::string& listener_name, bool reuse_port); // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_type_; } + Network::Socket::Type socketType() const override { return socket_type_; } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } @@ -73,7 +73,7 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, // Initially, its port number might be 0. Once a socket is created, its port // will be set to the binding port. Network::Address::InstanceConstSharedPtr local_address_; - Network::Address::SocketType socket_type_; + Network::Socket::Type socket_type_; const Network::Socket::OptionsSharedPtr options_; bool bind_to_port_; const std::string& listener_name_; @@ -337,10 +337,10 @@ class ListenerImpl final : public Network::ListenerConfig, uint32_t concurrency); // Helpers for constructor. void buildAccessLog(); - void buildUdpListenerFactory(Network::Address::SocketType socket_type, uint32_t concurrency); - void buildListenSocketOptions(Network::Address::SocketType socket_type); - void createListenerFilterFactories(Network::Address::SocketType socket_type); - void validateFilterChains(Network::Address::SocketType socket_type); + void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency); + void buildListenSocketOptions(Network::Socket::Type socket_type); + void createListenerFilterFactories(Network::Socket::Type socket_type); + void validateFilterChains(Network::Socket::Type socket_type); void buildFilterChains(); void buildSocketOptions(); void buildOriginalDstListenerFilter(); diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index c81fd1da6225..9aac1a14d953 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -39,11 +39,11 @@ namespace Envoy { namespace Server { namespace { -std::string toString(Network::Address::SocketType socket_type) { +std::string toString(Network::Socket::Type socket_type) { switch (socket_type) { - case Network::Address::SocketType::Stream: + case Network::Socket::Type::Stream: return "SocketType::Stream"; - case Network::Address::SocketType::Datagram: + case Network::Socket::Type::Datagram: return "SocketType::Datagram"; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -188,17 +188,17 @@ Network::ListenerFilterMatcherSharedPtr ProdListenerComponentFactory::createList } Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( - Network::Address::InstanceConstSharedPtr address, Network::Address::SocketType socket_type, + Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) { ASSERT(address->type() == Network::Address::Type::Ip || address->type() == Network::Address::Type::Pipe); - ASSERT(socket_type == Network::Address::SocketType::Stream || - socket_type == Network::Address::SocketType::Datagram); + ASSERT(socket_type == Network::Socket::Type::Stream || + socket_type == Network::Socket::Type::Datagram); // For each listener config we share a single socket among all threaded listeners. // First we try to get the socket from our parent if applicable. if (address->type() == Network::Address::Type::Pipe) { - if (socket_type != Network::Address::SocketType::Stream) { + if (socket_type != Network::Socket::Type::Stream) { // This could be implemented in the future, since Unix domain sockets // support SOCK_DGRAM, but there would need to be a way to specify it in // envoy.api.v2.core.Pipe. @@ -215,7 +215,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( return std::make_shared(address); } - const std::string scheme = (socket_type == Network::Address::SocketType::Stream) + const std::string scheme = (socket_type == Network::Socket::Type::Stream) ? std::string(Network::Utility::TCP_SCHEME) : std::string(Network::Utility::UDP_SCHEME); const std::string addr = absl::StrCat(scheme, address->asString()); @@ -225,7 +225,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( if (fd != -1) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); Network::IoHandlePtr io_handle = std::make_unique(fd); - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { return std::make_shared(std::move(io_handle), address, options); } else { return std::make_shared(std::move(io_handle), address, options); @@ -233,7 +233,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( } } - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { return std::make_shared(address, options, params.bind_to_port); } else { return std::make_shared(address, options, params.bind_to_port); @@ -491,13 +491,13 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( draining_listen_socket_factory = existing_draining_listener->listener_->getSocketFactory(); } - Network::Address::SocketType socket_type = + Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(config.address()); new_listener->setSocketFactory( draining_listen_socket_factory ? draining_listen_socket_factory : createListenSocketFactory(config.address(), *new_listener, - (socket_type == Network::Address::SocketType::Datagram) || + (socket_type == Network::Socket::Type::Datagram) || config.reuse_port())); if (workers_started_) { new_listener->debugLog("add warming listener"); @@ -983,8 +983,7 @@ ListenerFilterChainFactoryBuilder::buildFilterChainInternal( Network::ListenSocketFactorySharedPtr ListenerManagerImpl::createListenSocketFactory( const envoy::config::core::v3::Address& proto_address, ListenerImpl& listener, bool reuse_port) { - Network::Address::SocketType socket_type = - Network::Utility::protobufAddressSocketType(proto_address); + Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(proto_address); return std::make_shared( factory_, listener.address(), socket_type, listener.listenSocketOptions(), listener.bindToPort(), listener.name(), reuse_port); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index fa278a2d3806..8a734350103b 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -89,7 +89,7 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, } Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) override; diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index dc874f2c3b41..4e264e30f405 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -48,7 +48,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { Address::Ipv4Instance address("1.2.3.4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -64,7 +64,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { Address::Ipv4Instance address("1.2.3.4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; @@ -78,7 +78,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { Address::Ipv6Instance address("::1:2:3:4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; @@ -93,7 +93,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { Address::Ipv4Instance address("1.2.3.4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -107,7 +107,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { Address::Ipv6Instance address("::1:2:3:4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -124,7 +124,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { Address::Ipv6Instance address("::1:2:3:4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -141,7 +141,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { Address::Ipv6Instance address("::1:2:3:4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -155,7 +155,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { Address::Ipv4Instance address("1.2.3.4", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ @@ -171,7 +171,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { Address::Ipv6Instance address("2::1", 5678); IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Address::SocketType::Stream, std::make_shared(address)); + Socket::Type::Stream, std::make_shared(address)); EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index e8c2a1b33fc3..3935004acf74 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -43,13 +43,13 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl ASSERT_NE(addr_port, nullptr); if (addr_port->ip()->port() == 0) { - addr_port = Network::Test::findOrCheckFreePort(addr_port, SocketType::Stream); + addr_port = Network::Test::findOrCheckFreePort(addr_port, Socket::Type::Stream); } ASSERT_NE(addr_port, nullptr); ASSERT_NE(addr_port->ip(), nullptr); // Create a socket on which we'll listen for connections from clients. - SocketImpl sock(SocketType::Stream, addr_port); + SocketImpl sock(Socket::Type::Stream, addr_port); ASSERT_GE(sock.ioHandle().fd(), 0) << addr_port->asString(); // Check that IPv6 sockets accept IPv6 connections only. @@ -71,7 +71,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl auto client_connect = [](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. - SocketImpl client_sock(SocketType::Stream, addr_port); + SocketImpl client_sock(Socket::Type::Stream, addr_port); ASSERT_GE(client_sock.ioHandle().fd(), 0) << addr_port->asString(); @@ -326,7 +326,7 @@ TEST(PipeInstanceTest, BasicPermission) { const mode_t mode = 0777; PipeInstance pipe(path, mode); InstanceConstSharedPtr address = std::make_shared(pipe); - SocketImpl sock(SocketType::Stream, address); + SocketImpl sock(Socket::Type::Stream, address); ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); @@ -353,7 +353,7 @@ TEST(PipeInstanceTest, PermissionFail) { const mode_t mode = 0777; PipeInstance pipe(path, mode); InstanceConstSharedPtr address = std::make_shared(pipe); - SocketImpl sock(SocketType::Stream, address); + SocketImpl sock(Socket::Type::Stream, address); ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); @@ -425,7 +425,7 @@ TEST(PipeInstanceTest, UnlinksExistingFile) { const auto bind_uds_socket = [](const std::string& path) { PipeInstance pipe(path); InstanceConstSharedPtr address = std::make_shared(pipe); - SocketImpl sock(SocketType::Stream, address); + SocketImpl sock(Socket::Type::Stream, address); ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index 4b2d98442d75..ae595d3ced28 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -20,7 +20,7 @@ namespace Envoy { namespace Network { namespace { -template +template class ListenSocketImplTest : public testing::TestWithParam { protected: ListenSocketImplTest() : version_(GetParam()) {} @@ -44,7 +44,7 @@ class ListenSocketImplTest : public testing::TestWithParam { while (true) { ++loop_number; - auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); + auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream); auto addr = addr_fd.first; SocketPtr& sock = addr_fd.second; EXPECT_TRUE(SOCKET_VALID(sock->ioHandle().fd())); @@ -82,7 +82,7 @@ class ListenSocketImplTest : public testing::TestWithParam { // TODO (conqerAtapple): This is unfortunate. We should be able to templatize this // instead of if block. auto os_sys_calls = Api::OsSysCallsSingleton::get(); - if (NetworkSocketTrait::type == Address::SocketType::Stream) { + if (NetworkSocketTrait::type == Socket::Type::Stream) { EXPECT_EQ(0, socket1->listen(0).rc_); } @@ -125,8 +125,8 @@ class ListenSocketImplTest : public testing::TestWithParam { } }; -using ListenSocketImplTestTcp = ListenSocketImplTest; -using ListenSocketImplTestUdp = ListenSocketImplTest; +using ListenSocketImplTestTcp = ListenSocketImplTest; +using ListenSocketImplTestUdp = ListenSocketImplTest; INSTANTIATE_TEST_SUITE_P(IpVersions, ListenSocketImplTestTcp, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), @@ -146,7 +146,7 @@ class TestListenSocket : public ListenSocketImpl { public: TestListenSocket(Address::InstanceConstSharedPtr address) : ListenSocketImpl(std::make_unique(), address) {} - Address::SocketType socketType() const override { return Address::SocketType::Stream; } + Socket::Type socketType() const override { return Socket::Type::Stream; } }; TEST_P(ListenSocketImplTestTcp, SetLocalAddress) { diff --git a/test/common/network/listener_impl_test_base.h b/test/common/network/listener_impl_test_base.h index 19884ba46982..8f3ee21b8727 100644 --- a/test/common/network/listener_impl_test_base.h +++ b/test/common/network/listener_impl_test_base.h @@ -20,7 +20,7 @@ class ListenerImplTestBase : public testing::TestWithParam { ListenerImplTestBase() : version_(GetParam()), alt_address_(Network::Test::findOrCheckFreePort( - Network::Test::getCanonicalLoopbackAddress(version_), Address::SocketType::Stream)), + Network::Test::getCanonicalLoopbackAddress(version_), Socket::Type::Stream)), api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} Event::DispatcherImpl& dispatcherImpl() { diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 49e99f755fa2..8039f7c873b5 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -350,24 +350,24 @@ TEST(NetworkUtility, ProtobufAddressSocketType) { { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address(); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::TCP); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::UDP); - EXPECT_EQ(Address::SocketType::Datagram, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Datagram, Utility::protobufAddressSocketType(proto_address)); } { envoy::config::core::v3::Address proto_address; proto_address.mutable_pipe(); - EXPECT_EQ(Address::SocketType::Stream, Utility::protobufAddressSocketType(proto_address)); + EXPECT_EQ(Socket::Type::Stream, Utility::protobufAddressSocketType(proto_address)); } } diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index 701df21eb901..e51fbb8b6fa3 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -47,8 +47,7 @@ class ProxyProtocolRegressionTest : public testing::TestWithParamlocalAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 74dc142304e2..75d52d3b4235 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -60,8 +60,7 @@ class ProxyProtocolTest : public testing::TestWithParamlocalAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); @@ -1001,8 +1000,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParamlocalAddress()->ip()->port())), connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_)), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()) { - EXPECT_CALL(socket_factory_, socketType()) - .WillOnce(Return(Network::Address::SocketType::Stream)); + EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()).WillOnce(ReturnRef(socket_->localAddress())); EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index c3836ac38163..ff4598da2b3a 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -68,8 +68,8 @@ class ActiveQuicListenerFactoryPeer { class ActiveQuicListenerTest : public testing::TestWithParam { protected: - using Socket = Network::NetworkListenSocket< - Network::NetworkSocketTrait>; + using Socket = + Network::NetworkListenSocket>; ActiveQuicListenerTest() : version_(GetParam()), api_(Api::createApiForTest(simulated_time_system_)), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 6162165935db..f6357feb98be 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -51,7 +51,7 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParamallocateDispatcher("test_thread")), listen_socket_(std::make_unique>>( + Network::NetworkSocketTrait>>( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, /*bind*/ true)), connection_helper_(*dispatcher_), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index dc8ecf64a852..916195b9dc13 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -670,7 +670,7 @@ TEST_F(QuicPlatformTest, PickUnsedPort) { Envoy::Network::Test::getCanonicalLoopbackAddress(ip_version); Envoy::Network::Address::InstanceConstSharedPtr addr_with_port = Envoy::Network::Utility::getAddressWithPort(*addr, port); - Envoy::Network::SocketImpl sock(Envoy::Network::Address::SocketType::Datagram, addr_with_port); + Envoy::Network::SocketImpl sock(Envoy::Network::Socket::Type::Datagram, addr_with_port); // binding of given port should success. EXPECT_EQ(0, sock.bind(addr_with_port).rc_); } diff --git a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc index de3f39a001a7..ab902f546073 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.cc @@ -32,8 +32,8 @@ int QuicPickServerPortForTestsOrDieImpl() { fmt::format("{}:{}", Envoy::Network::Test::getAnyAddressUrlString(ip_version), /*port*/ 0), /*v6only*/ false); ASSERT(addr_port != nullptr); - addr_port = Envoy::Network::Test::findOrCheckFreePort( - addr_port, Envoy::Network::Address::SocketType::Datagram); + addr_port = + Envoy::Network::Test::findOrCheckFreePort(addr_port, Envoy::Network::Socket::Type::Datagram); if (addr_port != nullptr && addr_port->ip() != nullptr) { // Find a port. return addr_port->ip()->port(); diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index afd7ede41339..9bad9d78a873 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -52,7 +52,7 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { sink.flush(snapshot); // Start the server. - Network::SocketImpl sock(Network::Address::SocketType::Datagram, uds_address); + Network::SocketImpl sock(Network::Socket::Type::Datagram, uds_address); RELEASE_ASSERT(sock.setBlockingForTest(false).rc_ != -1, ""); sock.bind(uds_address); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index ec2bb436b291..d93536f6a925 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -643,7 +643,7 @@ class FakeUpstream : Logger::Loggable, FakeListenSocketFactory(Network::SocketSharedPtr socket) : socket_(socket) {} // Network::ListenSocketFactory - Network::Address::SocketType socketType() const override { return socket_->socketType(); } + Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->localAddress(); diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index c967b2b18b5b..e4e58290ed58 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -219,17 +219,16 @@ class MockListenSocket : public Socket { MOCK_METHOD(void, setLocalAddress, (const Address::InstanceConstSharedPtr&)); MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); - MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Socket::Type, socketType, (), (const)); MOCK_METHOD(Address::Type, addressType, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option)); MOCK_METHOD(void, addOptions_, (const Socket::OptionsSharedPtr& options)); MOCK_METHOD(const OptionsSharedPtr&, options, (), (const)); - MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType, Address::Type, Address::IpVersion), + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr), (const)); - MOCK_METHOD(IoHandlePtr, socketForAddrPtr, - (Address::SocketType, const Address::InstanceConstSharedPtr), (const)); MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); @@ -281,14 +280,13 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(const Network::ConnectionSocket::OptionsSharedPtr&, options, (), (const)); MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); - MOCK_METHOD(Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Socket::Type, socketType, (), (const)); MOCK_METHOD(Address::Type, addressType, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); - MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType, Address::Type, Address::IpVersion), + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const)); + MOCK_METHOD(IoHandlePtr, socketForAddrPtr, (Socket::Type, const Address::InstanceConstSharedPtr), (const)); - MOCK_METHOD(IoHandlePtr, socketForAddrPtr, - (Address::SocketType, const Address::InstanceConstSharedPtr), (const)); MOCK_METHOD(Api::SysCallIntResult, bind, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); @@ -318,7 +316,7 @@ class MockListenSocketFactory : public ListenSocketFactory { public: MockListenSocketFactory() = default; - MOCK_METHOD(Network::Address::SocketType, socketType, (), (const)); + MOCK_METHOD(Network::Socket::Type, socketType, (), (const)); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, localAddress, (), (const)); MOCK_METHOD(Network::SocketSharedPtr, getListenSocket, ()); MOCK_METHOD(SocketOptRef, sharedSocket, (), (const)); @@ -419,7 +417,7 @@ class MockResolvedAddress : public Address::Instance { MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const)); MOCK_METHOD(Address::Ip*, ip, (), (const)); MOCK_METHOD(Address::Pipe*, pipe, (), (const)); - MOCK_METHOD(IoHandlePtr, socket, (Address::SocketType), (const)); + MOCK_METHOD(IoHandlePtr, socket, (Socket::Type), (const)); MOCK_METHOD(Address::Type, type, (), (const)); MOCK_METHOD(sockaddr*, sockAddr, (), (const)); MOCK_METHOD(socklen_t, sockAddrLen, (), (const)); diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index 0ea123678448..e5b7fb43c63b 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -99,8 +99,7 @@ MockOverloadManager::~MockOverloadManager() = default; MockListenerComponentFactory::MockListenerComponentFactory() : socket_(std::make_shared>()) { ON_CALL(*this, createListenSocket(_, _, _, _)) - .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, - Network::Address::SocketType, + .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams&) -> Network::SocketSharedPtr { if (!Network::Socket::applyOptions(options, *socket_, diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 67ece91b6ff7..ac53f79c51ff 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -273,8 +273,7 @@ class MockListenerComponentFactory : public ListenerComponentFactory { (const Protobuf::RepeatedPtrField&, Configuration::ListenerFactoryContext& context)); MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, - (Network::Address::InstanceConstSharedPtr address, - Network::Address::SocketType socket_type, + (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params)); MOCK_METHOD(DrainManager*, createDrainManager_, diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 247810612f14..65703ba56dca 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -50,8 +50,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> filter_chain_manager = nullptr) @@ -126,7 +125,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> overridden_filter_chain_manager = @@ -142,7 +141,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggablesocket_)); - if (socket_type == Network::Address::SocketType::Stream) { + if (socket_type == Network::Socket::Type::Stream) { EXPECT_CALL(dispatcher_, createListener_(_, _, _)) .WillOnce(Invoke([listener, listener_callbacks](Network::SocketSharedPtr&&, Network::ListenerCallbacks& cb, @@ -649,7 +648,7 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, - Network::Address::SocketType::Stream, std::chrono::milliseconds(15000), true); + Network::Socket::Type::Stream, std::chrono::milliseconds(15000), true); EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); @@ -733,7 +732,7 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, - Network::Address::SocketType::Stream, std::chrono::milliseconds()); + Network::Socket::Type::Stream, std::chrono::milliseconds()); EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); @@ -803,7 +802,7 @@ TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, nullptr, nullptr, nullptr, - Network::Address::SocketType::Datagram, std::chrono::milliseconds()); + Network::Socket::Type::Datagram, std::chrono::milliseconds()); EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _)) .WillOnce(Invoke([&](Network::UdpListenerFilterManager&, Network::UdpReadFilterCallbacks&) -> bool { return true; })); @@ -838,7 +837,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { std::make_shared>(); TestListener* new_test_listener = addListener(new_listener_tag, true, false, "test_listener", /* Network::Listener */ nullptr, - &new_listener_callbacks, nullptr, nullptr, Network::Address::SocketType::Stream, + &new_listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, std::chrono::milliseconds(15000), false, overridden_filter_chain_manager); handler_->addListener(old_listener_tag, *new_test_listener); ASSERT_EQ(new_listener_callbacks, nullptr) diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 600855709f2d..18cb7a5caf67 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -89,9 +89,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { // Dummy method void close() override {} bool isOpen() const override { return false; } - Network::Address::SocketType socketType() const override { - return Network::Address::SocketType::Stream; - } + Network::Socket::Type socketType() const override { return Network::Socket::Type::Stream; } Network::Address::Type addressType() const override { return local_address_->type(); } void setLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void restoreLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 6d3f7d887ac3..a9c169fd5b55 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -336,13 +336,12 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(listener_factory_, - createListenSocket(_, Network::Address::SocketType::Datagram, _, {{true, false}})) - .WillOnce( - Invoke([this](const Network::Address::InstanceConstSharedPtr&, - Network::Address::SocketType, const Network::Socket::OptionsSharedPtr&, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - return listener_factory_.socket_; - })); + createListenSocket(_, Network::Socket::Type::Datagram, _, {{true, false}})) + .WillOnce(Invoke([this](const Network::Address::InstanceConstSharedPtr&, + Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, + const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + return listener_factory_.socket_; + })); EXPECT_CALL(*listener_factory_.socket_, setSocketOption(_, _, _, _)).Times(testing::AtLeast(1)); EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); manager_->addOrUpdateListener(listener_proto, "", true); @@ -1370,7 +1369,7 @@ name: foo EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { EXPECT_CALL(server_, hotRestart).Times(0); @@ -1407,7 +1406,7 @@ reuse_port: true EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {{true, false}})) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, - Network::Address::SocketType socket_type, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { EXPECT_CALL(server_, hotRestart).Times(0); @@ -1423,7 +1422,7 @@ TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) { ProdListenerComponentFactory real_listener_factory(server_); EXPECT_THROW_WITH_MESSAGE(real_listener_factory.createListenSocket( std::make_shared("/foo"), - Network::Address::SocketType::Datagram, nullptr, {true}), + Network::Socket::Type::Datagram, nullptr, {true}), EnvoyException, "socket type SocketType::Datagram not supported for pipes"); } @@ -3647,7 +3646,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) - .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Address::SocketType, + .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams&) -> Network::SocketSharedPtr { EXPECT_EQ(options, nullptr); diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index bd979c98ed14..d1f3256fd4a4 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -203,8 +203,7 @@ class ListenerManagerImplTest : public testing::Test { ListenSocketCreationParams expected_creation_params = {true, true}) { EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, expected_creation_params)) .WillOnce(Invoke([this, expected_num_options, &expected_state]( - const Network::Address::InstanceConstSharedPtr&, - Network::Address::SocketType, + const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams&) -> Network::SocketSharedPtr { EXPECT_NE(options.get(), nullptr); diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 5821465bd495..5936b88c69c3 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -20,7 +20,7 @@ namespace Network { namespace Test { Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port, - Address::SocketType type) { + Socket::Type type) { if (addr_port == nullptr || addr_port->type() != Address::Type::Ip) { ADD_FAILURE() << "Not an internet address: " << (addr_port == nullptr ? "nullptr" : addr_port->asString()); @@ -34,7 +34,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared const char* failing_fn = nullptr; if (result.rc_ != 0) { failing_fn = "bind"; - } else if (type == Address::SocketType::Stream) { + } else if (type == Socket::Type::Stream) { // Try listening on the port also, if the type is TCP. result = sock.listen(1); if (result.rc_ != 0) { @@ -58,7 +58,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared } Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, - Address::SocketType type) { + Socket::Type type) { auto instance = Utility::parseInternetAddressAndPort(addr_port); if (instance != nullptr) { instance = findOrCheckFreePort(instance, type); @@ -159,7 +159,7 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { } std::pair -bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type) { +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); SocketPtr sock = std::make_unique(type, addr); Api::SysCallIntResult result = sock->bind(addr); diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 3dc280d42388..36fa1868ea84 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -25,7 +25,7 @@ namespace Test { * listening, else nullptr if the address and port are not free. */ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstSharedPtr addr_port, - Address::SocketType type); + Socket::Type type); /** * As above, but addr_port is specified as a string. For example: @@ -35,7 +35,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared * - [::]:45678 Check whether a specific port on all local IPv6 addresses is free. */ Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, - Address::SocketType type); + Socket::Type type); /** * Get a URL ready IP loopback address as a string. @@ -115,7 +115,7 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version); * @returns the address and the fd of the socket bound to that address. */ std::pair -bindFreeLoopbackPort(Address::IpVersion version, Address::SocketType type); +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type); /** * Create a transport socket for testing purposes. diff --git a/test/test_common/network_utility_test.cc b/test/test_common/network_utility_test.cc index b95e29a94617..dc40f1d28f2f 100644 --- a/test/test_common/network_utility_test.cc +++ b/test/test_common/network_utility_test.cc @@ -35,7 +35,7 @@ TEST_P(NetworkUtilityTest, DISABLED_ValidateBindFreeLoopbackPort) { std::map seen; const size_t kLimit = 50; for (size_t n = 0; n < kLimit; ++n) { - auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Address::SocketType::Stream); + auto addr_fd = Network::Test::bindFreeLoopbackPort(version_, Socket::Type::Stream); addr_fd.second->close(); auto addr = addr_fd.first->asString(); auto search = seen.find(addr); From 1745d224d550078dfb3a3a4e4b171692dbb4b95e Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Mon, 8 Jun 2020 23:17:07 +0100 Subject: [PATCH 320/909] fuzz: add fuzz tests for validateCustomSettingsParameters (#11494) Signed-off-by: Sam Flattery --- test/common/http/BUILD | 1 + test/common/http/utility_fuzz.proto | 2 ++ test/common/http/utility_fuzz_test.cc | 11 +++++++++++ 3 files changed, 14 insertions(+) diff --git a/test/common/http/BUILD b/test/common/http/BUILD index fc43ab1ff66f..41160fb6a407 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -342,6 +342,7 @@ envoy_cc_test( envoy_proto_library( name = "utility_fuzz_proto", srcs = ["utility_fuzz.proto"], + deps = ["@envoy_api//envoy/config/core/v3:pkg"], ) envoy_cc_fuzz_test( diff --git a/test/common/http/utility_fuzz.proto b/test/common/http/utility_fuzz.proto index 2f023d29911f..50bb1a3c911b 100644 --- a/test/common/http/utility_fuzz.proto +++ b/test/common/http/utility_fuzz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package test.common.http; import "validate/validate.proto"; +import "envoy/config/core/v3/protocol.proto"; // Structured input for utility_fuzz_test. @@ -44,5 +45,6 @@ message UtilityTestCase { [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE, strict: false}]; CookieValue make_set_cookie_value = 10; string parse_authority_string = 11; + envoy.config.core.v3.Http2ProtocolOptions initialize_and_validate = 12; } } diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index 4e62e7c2edc1..e81c10e4ae97 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -10,6 +10,12 @@ namespace Fuzz { namespace { DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { + try { + TestUtility::validate(input); + } catch (ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } switch (input.utility_selector_case()) { case test::common::http::UtilityTestCase::kParseQueryString: { Http::Utility::parseQueryString(input.parse_query_string()); @@ -71,6 +77,11 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { Http::Utility::parseAuthority(authority_string); break; } + case test::common::http::UtilityTestCase::kInitializeAndValidate: { + const auto& options = input.initialize_and_validate(); + Http2::Utility::initializeAndValidateOptions(options); + break; + } default: // Nothing to do. From f5753e5d9aa2a201991ab6abcdb494eb5d483733 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 8 Jun 2020 20:35:27 -0400 Subject: [PATCH 321/909] api: populate secure edge defaults manifest. (#11364) Adding API annotations and manifest entries to match https://www.envoyproxy.io/docs/envoy/latest/configuration/best_practices/edge#best-practices-edge. Risk level: Low (API/docs only change) Testing: Docs build and inspection. Fixes #11085 Signed-off-by: Harvey Tuch --- api/envoy/config/cluster/v3/cluster.proto | 7 ++- .../config/cluster/v4alpha/cluster.proto | 7 ++- .../v3/http_connection_manager.proto | 16 ++++-- .../v4alpha/http_connection_manager.proto | 16 ++++-- docs/BUILD | 2 +- docs/edge_defaults_manifest.yaml | 21 -------- docs/protodoc_manifest.yaml | 51 +++++++++++++++++++ .../envoy/config/cluster/v3/cluster.proto | 7 ++- .../config/cluster/v4alpha/cluster.proto | 7 ++- .../v3/http_connection_manager.proto | 16 ++++-- .../v4alpha/http_connection_manager.proto | 16 ++++-- tools/protodoc/BUILD | 11 +++- tools/protodoc/manifest.proto | 29 +++++++++++ tools/protodoc/protodoc.py | 39 +++++++++----- 14 files changed, 180 insertions(+), 65 deletions(-) delete mode 100644 docs/edge_defaults_manifest.yaml create mode 100644 docs/protodoc_manifest.yaml create mode 100644 tools/protodoc/manifest.proto diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index be7710815b70..7eb53d84c4f8 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -584,7 +585,8 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. @@ -635,7 +637,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; + core.v3.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 2b044b2c6437..eab2f2d80fcb 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -585,7 +586,8 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. @@ -636,7 +638,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index b46e63076d7a..355eaba01e93 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -281,13 +282,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -332,13 +335,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -394,7 +399,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 482bb5ed95e9..f5e6619dee33 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -281,13 +282,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -332,13 +335,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -394,7 +399,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when diff --git a/docs/BUILD b/docs/BUILD index d190c0a59a0a..ead7bddb9a7f 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -1,3 +1,3 @@ licenses(["notice"]) # Apache 2 -exports_files(["edge_defaults_manifest.yaml"]) +exports_files(["protodoc_manifest.yaml"]) diff --git a/docs/edge_defaults_manifest.yaml b/docs/edge_defaults_manifest.yaml deleted file mode 100644 index b5072c26a32b..000000000000 --- a/docs/edge_defaults_manifest.yaml +++ /dev/null @@ -1,21 +0,0 @@ -envoy.config.bootstrap.v3.Bootstrap.overload_manager: - refresh_interval: 0.25s - resource_monitors: - - name: "envoy.resource_monitors.fixed_heap" - typed_config: - "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig - # TODO: Tune for your system. - max_heap_size_bytes: 2147483648 # 2 GiB - actions: - - name: "envoy.overload_actions.shrink_heap" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.95 - - name: "envoy.overload_actions.stop_accepting_requests" - triggers: - - name: "envoy.resource_monitors.fixed_heap" - threshold: - value: 0.98 - -envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes: 32768 # 32 KiB diff --git a/docs/protodoc_manifest.yaml b/docs/protodoc_manifest.yaml new file mode 100644 index 000000000000..2e2afff3264d --- /dev/null +++ b/docs/protodoc_manifest.yaml @@ -0,0 +1,51 @@ +fields: + envoy.config.bootstrap.v3.Bootstrap.overload_manager: + edge_config: + example: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + max_heap_size_bytes: 1073741824 + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.90 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes: + edge_config: { example: 32768 } + envoy.config.cluster.v3.Cluster.http2_protocol_options: + edge_config: + example: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes: + edge_config: { example: 32768 } + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options: + edge_config: + example: + idle_timeout: 900s # 15 mins + headers_with_underscores_action: REJECT_REQUEST + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options: + edge_config: + example: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout: + edge_config: + example: 300s # 5 mins + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout: + edge_config: + note: > + This timeout is not compatible with streaming requests. + example: 300s # 5 mins + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address: + edge_config: { example: true } diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index f512cbcc9d22..8140007f68af 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -20,6 +20,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -582,7 +583,8 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. @@ -633,7 +635,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v3.Http2ProtocolOptions http2_protocol_options = 14; + core.v3.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 2b044b2c6437..eab2f2d80fcb 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -585,7 +586,8 @@ message Cluster { // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. @@ -636,7 +638,8 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14; + core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 8efa78bf0eb9..1362850f0530 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -283,13 +284,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -334,13 +337,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -396,7 +401,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 482bb5ed95e9..f5e6619dee33 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -19,6 +19,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -281,13 +282,15 @@ message HttpConnectionManager { // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35; + config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9; + config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. @@ -332,13 +335,15 @@ message HttpConnectionManager { // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; + google.protobuf.Duration stream_idle_timeout = 24 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; + google.protobuf.Duration request_timeout = 28 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. @@ -394,7 +399,8 @@ message HttpConnectionManager { // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; + google.protobuf.BoolValue use_remote_address = 14 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 256316f2a18c..51bb3a9fbde9 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,8 +1,11 @@ load("@rules_python//python:defs.bzl", "py_binary") load("@protodoc_pip3//:requirements.bzl", "requirement") +load("//bazel:envoy_build_system.bzl", "envoy_package", "envoy_proto_library") licenses(["notice"]) # Apache 2 +envoy_package() + py_binary( name = "generate_empty", srcs = ["generate_empty.py"], @@ -10,12 +13,18 @@ py_binary( deps = [":protodoc"], ) +envoy_proto_library( + name = "manifest_proto", + srcs = ["manifest.proto"], +) + py_binary( name = "protodoc", srcs = ["protodoc.py"], - data = ["//docs:edge_defaults_manifest.yaml"], + data = ["//docs:protodoc_manifest.yaml"], visibility = ["//visibility:public"], deps = [ + ":manifest_proto_py_proto", "//tools/api_proto_plugin", "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", diff --git a/tools/protodoc/manifest.proto b/tools/protodoc/manifest.proto new file mode 100644 index 000000000000..4757c76a8c10 --- /dev/null +++ b/tools/protodoc/manifest.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package tools.protodoc; + +import "google/protobuf/struct.proto"; + +// Additional structure information consumed by protodoc when generating +// documentation for a field. +message Description { + message EdgeConfiguration { + // Example secure edge default for the field. + google.protobuf.Value example = 1; + + // Additional note to include in the configuration warning. + string note = 2; + } + + // Additional information for when this field is used in edge deployments. + EdgeConfiguration edge_config = 1; + + // TODO: add additional information here to reflect things like Envoy + // implementation status. +} + +message Manifest { + // Map from fully qualified field name to additional information to be used in + // protodoc generation. + map fields = 1; +} diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 750ca3cd78d1..c96f7cafa6c9 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -12,6 +12,7 @@ import string import sys +from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml @@ -27,6 +28,7 @@ from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment +from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 @@ -401,7 +403,7 @@ def FormatAnchor(label): return '.. _%s:\n\n' % label -def FormatSecurityOptions(security_option, field, type_context, edge_default_yaml): +def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: @@ -410,10 +412,13 @@ def FormatSecurityOptions(security_option, field, type_context, edge_default_yam if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) + if edge_config.note: + sections.append(Indent(4, edge_config.note)) - validate_fragment.ValidateFragment(field.type_name[1:], edge_default_yaml) + example_dict = json_format.MessageToDict(edge_config.example) + validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] - example = {field_name: edge_default_yaml} + example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + @@ -423,14 +428,14 @@ def FormatSecurityOptions(security_option, field, type_context, edge_default_yam return '.. attention::\n' + '\n\n'.join(sections) -def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, - edge_defaults_manifest): +def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. + protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. @@ -479,11 +484,12 @@ def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): - edge_default_yaml = edge_defaults_manifest.get(type_context.name) - if not edge_default_yaml: - raise ProtodocError('Missing edge default YAML example for %s' % type_context.name) + manifest_description = protodoc_manifest.fields.get(type_context.name) + if not manifest_description: + raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( - field.options.Extensions[security_pb2.security], field, type_context, edge_default_yaml) + field.options.Extensions[security_pb2.security], field, type_context, + manifest_description.edge_config) else: formatted_security_options = '' @@ -493,12 +499,13 @@ def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, Indent, 2), comment + formatted_oneof_comment) + formatted_security_options -def FormatMessageAsDefinitionList(type_context, msg, edge_defaults_manifest): +def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. + protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. @@ -518,7 +525,7 @@ def FormatMessageAsDefinitionList(type_context, msg, edge_defaults_manifest): type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), - field, edge_defaults_manifest) + field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' @@ -574,8 +581,12 @@ class RstFormatVisitor(visitor.Visitor): def __init__(self): r = runfiles.Create() - with open(r.Rlocation('envoy/docs/edge_defaults_manifest.yaml'), 'r') as f: - self.edge_defaults_manifest = yaml.load(f.read()) + with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: + # Load as YAML, emit as JSON and then parse as proto to provide type + # checking. + protodoc_manifest_untyped = yaml.load(f.read()) + self.protodoc_manifest = manifest_pb2.Manifest() + json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) @@ -606,7 +617,7 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, - self.edge_defaults_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) + self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True From 2b83bc999036086cbc4f69e084dd85ae50d7f620 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 9 Jun 2020 13:42:25 -0400 Subject: [PATCH 322/909] test: fixing flake. (#11507) This is one of those cases where we actually want delay-close: we are doing a large upload, Enovy sends a response, and we want to make sure the socket stays open long enough the response is received rather than short delay-close causing a connection reset during upload Risk Level: n/a (test only) Testing: indeed Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- .../filters/http/buffer/buffer_filter_integration_test.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc index e899205bd6b9..c61b6e175368 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_integration_test.cc @@ -96,6 +96,13 @@ TEST_P(BufferIntegrationTest, RouterRequestPopulateContentLengthOnTrailers) { } TEST_P(BufferIntegrationTest, RouterRequestBufferLimitExceeded) { + // Make sure the connection isn't closed during request upload. + // Without a large drain-close it's possible that the local reply will be sent + // during request upload, and continued upload will result in TCP reset before + // the response is read. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(2000 * 1000); }); config_helper_.addFilter(ConfigHelper::smallBufferFilter()); initialize(); From e91d616603dc985507f1aaf8b5f7be35b49a6156 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 9 Jun 2020 19:27:30 -0400 Subject: [PATCH 323/909] eds_speed_test: support v2/v3 variants of config. (#11505) This shows that the EDS onConfigUpdate() is 2-3x slower when working with v2 config and doing version conversion with original version recovery. Followup PRs will optimize. Relates to #11362 and #10875. Risk level: Low Testing: Ran benchmark with -c opt binary. Signed-off-by: Harvey Tuch --- test/common/upstream/eds_speed_test.cc | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index e02f16a086f8..f79e037e8bf7 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -30,7 +30,7 @@ namespace Upstream { class EdsSpeedTest { public: - EdsSpeedTest() : api_(Api::createApiForTest(stats_)) {} + EdsSpeedTest(benchmark::State& state) : state_(state), api_(Api::createApiForTest(stats_)) {} void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); @@ -54,7 +54,9 @@ class EdsSpeedTest { // Set up an EDS config with multiple priorities, localities, weights and make sure // they are loaded and reloaded as expected. - void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, int num_hosts) { + void priorityAndLocalityWeightedHelper(bool v2_config, bool ignore_unknown_dynamic_fields, + int num_hosts) { + state_.PauseTiming(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); resetCluster(R"EOF( @@ -95,11 +97,20 @@ class EdsSpeedTest { initialize(); Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); + auto* resource = resources.Add(); + resource->PackFrom(cluster_load_assignment); + if (v2_config) { + RELEASE_ASSERT(resource->type_url() == + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + ""); + resource->set_type_url("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"); + } + state_.ResumeTiming(); eds_callbacks_->onConfigUpdate(resources, ""); ASSERT(initialized_); } + benchmark::State& state_; bool initialized_{}; Stats::IsolatedStoreImpl stats_; Ssl::MockContextManager ssl_context_manager_; @@ -126,9 +137,9 @@ static void priorityAndLocalityWeighted(benchmark::State& state) { Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test; - speed_test.priorityAndLocalityWeightedHelper(state.range(0), state.range(1)); + Envoy::Upstream::EdsSpeedTest speed_test(state); + speed_test.priorityAndLocalityWeightedHelper(state.range(0), state.range(1), state.range(2)); } } -BENCHMARK(priorityAndLocalityWeighted)->Ranges({{false, true}, {2000, 100000}}); +BENCHMARK(priorityAndLocalityWeighted)->Ranges({{false, true}, {false, true}, {2000, 100000}}); From e3f616ec7faa8650fa84e6afaa40754a31056dbf Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Tue, 9 Jun 2020 20:28:15 -0700 Subject: [PATCH 324/909] test: removes abstract mock classes Stream Encoder/Decoder (#11480) Signed-off-by: Sotiris Nanopoulos --- test/common/http/BUILD | 2 -- test/common/http/http1/BUILD | 2 -- test/common/http/http2/BUILD | 1 - test/common/http/http2/codec_impl_test.cc | 2 +- test/common/router/BUILD | 1 - test/mocks/http/stream_decoder.cc | 3 --- test/mocks/http/stream_decoder.h | 20 +++++++------- test/mocks/http/stream_encoder.cc | 8 ++---- test/mocks/http/stream_encoder.h | 32 ++++++++++++----------- 9 files changed, 30 insertions(+), 41 deletions(-) diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 41160fb6a407..2ab34e09af0d 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -16,7 +16,6 @@ envoy_package() envoy_cc_test( name = "async_client_impl_test", srcs = ["async_client_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ ":common_lib", "//source/common/buffer:buffer_lib", @@ -192,7 +191,6 @@ envoy_cc_fuzz_test( envoy_cc_test( name = "conn_manager_impl_test", srcs = ["conn_manager_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/buffer:buffer_interface", diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 6bfc1268c9c9..715e6dbf0c23 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -19,7 +19,6 @@ envoy_cc_test( envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/event:dispatcher_interface", @@ -45,7 +44,6 @@ envoy_cc_test( envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 794da7621781..53cbbfdedc62 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -51,7 +51,6 @@ envoy_cc_test_library( envoy_cc_test( name = "conn_pool_test", srcs = ["conn_pool_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/http/http2:conn_pool_lib", diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index ef2944cf3ec0..18da2005571f 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -854,7 +854,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); // Now create a second stream on the connection. - MockStreamDecoder response_decoder2; + MockResponseDecoder response_decoder2; RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_); StreamEncoder* response_encoder2; MockStreamCallbacks server_stream_callbacks2; diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 25d03dfbe973..d1c5b7c036ed 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -253,7 +253,6 @@ envoy_cc_test( envoy_cc_test( name = "router_test", srcs = ["router_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:context_lib", diff --git a/test/mocks/http/stream_decoder.cc b/test/mocks/http/stream_decoder.cc index e4bf5ef3958d..76145b6dc43f 100644 --- a/test/mocks/http/stream_decoder.cc +++ b/test/mocks/http/stream_decoder.cc @@ -6,9 +6,6 @@ using testing::Invoke; namespace Envoy { namespace Http { -MockStreamDecoder::MockStreamDecoder() = default; -MockStreamDecoder::~MockStreamDecoder() = default; - MockRequestDecoder::MockRequestDecoder() { ON_CALL(*this, decodeHeaders_(_, _)).WillByDefault(Invoke([](RequestHeaderMapPtr& headers, bool) { // Check to see that method is not-null. Path can be null for CONNECT and authority can be null diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index 2abbe175aaa4..1238c55f91b5 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -6,22 +6,16 @@ namespace Envoy { namespace Http { -class MockStreamDecoder : public virtual StreamDecoder { +class MockRequestDecoder : public RequestDecoder { public: - MockStreamDecoder(); - ~MockStreamDecoder() override; + MockRequestDecoder(); + ~MockRequestDecoder() override; void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); } // Http::StreamDecoder MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map)); -}; - -class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { -public: - MockRequestDecoder(); - ~MockRequestDecoder() override; void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { decodeHeaders_(headers, end_stream); @@ -33,11 +27,17 @@ class MockRequestDecoder : public MockStreamDecoder, public RequestDecoder { MOCK_METHOD(void, decodeTrailers_, (RequestTrailerMapPtr & trailers)); }; -class MockResponseDecoder : public MockStreamDecoder, public ResponseDecoder { +class MockResponseDecoder : public ResponseDecoder { public: MockResponseDecoder(); ~MockResponseDecoder() override; + void decodeMetadata(MetadataMapPtr&& metadata_map) override { decodeMetadata_(metadata_map); } + + // Http::StreamDecoder + MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream)); + MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map)); + void decode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override { decode100ContinueHeaders_(headers); } diff --git a/test/mocks/http/stream_encoder.cc b/test/mocks/http/stream_encoder.cc index 0c13a2ebe340..ad9b646af7d8 100644 --- a/test/mocks/http/stream_encoder.cc +++ b/test/mocks/http/stream_encoder.cc @@ -9,13 +9,8 @@ namespace Http { MockHttp1StreamEncoderOptions::MockHttp1StreamEncoderOptions() = default; MockHttp1StreamEncoderOptions::~MockHttp1StreamEncoderOptions() = default; -MockStreamEncoder::MockStreamEncoder() { - ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); -} - -MockStreamEncoder::~MockStreamEncoder() = default; - MockRequestEncoder::MockRequestEncoder() { + ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); ON_CALL(*this, encodeHeaders(_, _)) .WillByDefault(Invoke([](const RequestHeaderMap& headers, bool) { // Check to see that method is not-null. Path can be null for CONNECT and authority can be @@ -26,6 +21,7 @@ MockRequestEncoder::MockRequestEncoder() { MockRequestEncoder::~MockRequestEncoder() = default; MockResponseEncoder::MockResponseEncoder() { + ON_CALL(*this, getStream()).WillByDefault(ReturnRef(stream_)); ON_CALL(*this, encodeHeaders(_, _)) .WillByDefault(Invoke([](const ResponseHeaderMap& headers, bool) { // Check for passing request headers as response headers in a test. diff --git a/test/mocks/http/stream_encoder.h b/test/mocks/http/stream_encoder.h index 768951a411b4..fa302cdefbe2 100644 --- a/test/mocks/http/stream_encoder.h +++ b/test/mocks/http/stream_encoder.h @@ -17,31 +17,25 @@ class MockHttp1StreamEncoderOptions : public Http1StreamEncoderOptions { MOCK_METHOD(void, disableChunkEncoding, ()); }; -class MockStreamEncoder : public virtual StreamEncoder { +class MockRequestEncoder : public RequestEncoder { public: - MockStreamEncoder(); - ~MockStreamEncoder() override; + MockRequestEncoder(); + ~MockRequestEncoder() override; + + // Http::RequestEncoder + MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream)); + MOCK_METHOD(void, encodeTrailers, (const RequestTrailerMap& trailers)); // Http::StreamEncoder MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector)); - MOCK_METHOD(Stream&, getStream, ()); MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); + MOCK_METHOD(Stream&, getStream, (), ()); testing::NiceMock stream_; }; -class MockRequestEncoder : public MockStreamEncoder, public RequestEncoder { -public: - MockRequestEncoder(); - ~MockRequestEncoder() override; - - // Http::RequestEncoder - MOCK_METHOD(void, encodeHeaders, (const RequestHeaderMap& headers, bool end_stream)); - MOCK_METHOD(void, encodeTrailers, (const RequestTrailerMap& trailers)); -}; - -class MockResponseEncoder : public MockStreamEncoder, public ResponseEncoder { +class MockResponseEncoder : public ResponseEncoder { public: MockResponseEncoder(); ~MockResponseEncoder() override; @@ -50,6 +44,14 @@ class MockResponseEncoder : public MockStreamEncoder, public ResponseEncoder { MOCK_METHOD(void, encode100ContinueHeaders, (const ResponseHeaderMap& headers)); MOCK_METHOD(void, encodeHeaders, (const ResponseHeaderMap& headers, bool end_stream)); MOCK_METHOD(void, encodeTrailers, (const ResponseTrailerMap& trailers)); + + // Http::StreamEncoder + MOCK_METHOD(void, encodeData, (Buffer::Instance & data, bool end_stream)); + MOCK_METHOD(void, encodeMetadata, (const MetadataMapVector& metadata_map_vector)); + MOCK_METHOD(Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); + MOCK_METHOD(Stream&, getStream, (), ()); + + testing::NiceMock stream_; }; } // namespace Http From c4a8b0331948c0301f962eb99a390c90214cfc67 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Wed, 10 Jun 2020 04:29:03 +0100 Subject: [PATCH 325/909] fuzz: flip forwardClientCert option in HCM fuzz target (#11496) Signed-off-by: Sam Flattery --- test/common/http/BUILD | 1 + test/common/http/conn_manager_impl_fuzz.proto | 4 +++ .../http/conn_manager_impl_fuzz_test.cc | 32 +++++++++++++++++-- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 2ab34e09af0d..5a42fb54f59b 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -154,6 +154,7 @@ envoy_proto_library( srcs = ["conn_manager_impl_fuzz.proto"], deps = [ "//test/fuzz:common_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg", ], ) diff --git a/test/common/http/conn_manager_impl_fuzz.proto b/test/common/http/conn_manager_impl_fuzz.proto index 58a7d8ba0d53..92d6e1c32652 100644 --- a/test/common/http/conn_manager_impl_fuzz.proto +++ b/test/common/http/conn_manager_impl_fuzz.proto @@ -6,6 +6,8 @@ import "google/protobuf/empty.proto"; import "test/fuzz/common.proto"; +import "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto"; + // Structured input for conn_manager_impl_fuzz_test. message NewStream { @@ -99,4 +101,6 @@ message Action { message ConnManagerImplTestCase { repeated Action actions = 1; + envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + .ForwardClientCertDetails forward_client_cert = 2; } diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 37079704bb85..bbfbbceab89e 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -62,7 +62,8 @@ class FuzzConfig : public ConnectionManagerConfig { std::shared_ptr route_config_{new NiceMock()}; }; - FuzzConfig() + FuzzConfig(envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::ForwardClientCertDetails forward_client_cert) : stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), @@ -74,6 +75,7 @@ class FuzzConfig : public ConnectionManagerConfig { .WillByDefault(Return(time_system_.systemTime())); access_logs_.emplace_back(std::make_shared>()); request_id_extension_ = RequestIDExtensionFactory::defaultInstance(random_); + forward_client_cert_ = fromClientCert(forward_client_cert); } void newStream() { @@ -91,6 +93,30 @@ class FuzzConfig : public ConnectionManagerConfig { EXPECT_CALL(*encoder_filter_, setEncoderFilterCallbacks(_)); } + Http::ForwardClientCertType + fromClientCert(envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::ForwardClientCertDetails forward_client_cert) { + switch (forward_client_cert) { + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + SANITIZE: + return Http::ForwardClientCertType::Sanitize; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + FORWARD_ONLY: + return Http::ForwardClientCertType::ForwardOnly; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + APPEND_FORWARD: + return Http::ForwardClientCertType::AppendForward; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + SANITIZE_SET: + return Http::ForwardClientCertType::SanitizeSet; + case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + ALWAYS_FORWARD_ONLY: + return Http::ForwardClientCertType::AlwaysForwardOnly; + default: + return Http::ForwardClientCertType::Sanitize; + } + } + // Http::ConnectionManagerConfig RequestIDExtensionSharedPtr requestIDExtension() override { return request_id_extension_; } @@ -194,7 +220,7 @@ class FuzzConfig : public ConnectionManagerConfig { std::chrono::milliseconds request_timeout_{}; std::chrono::milliseconds delayed_close_timeout_{}; bool use_remote_address_{true}; - Http::ForwardClientCertType forward_client_cert_{Http::ForwardClientCertType::Sanitize}; + Http::ForwardClientCertType forward_client_cert_; std::vector set_current_client_cert_details_; Network::Address::Ipv4Instance local_address_{"127.0.0.1"}; absl::optional user_agent_; @@ -495,7 +521,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { return; } - FuzzConfig config; + FuzzConfig config(input.forward_client_cert()); NiceMock drain_close; NiceMock random; Stats::SymbolTablePtr symbol_table(Stats::SymbolTableCreator::makeSymbolTable()); From 8fd1a52e608f7223b4864ca913cc3ed82340991d Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Wed, 10 Jun 2020 12:29:29 +0900 Subject: [PATCH 326/909] bump pgv for wasm standalone build (#11521) Signed-off-by: Shikugawa --- api/bazel/repository_locations.bzl | 4 ++-- generated_api_shadow/bazel/repository_locations.bzl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 77539ee9b109..503d6bc89078 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "ab56c3dd1cf9b516b62c5087e1ec1471bd63631e" # Mar 11, 2020 -PGV_SHA256 = "3be12077affd1ebf8787001f5fba545cc5f1b914964dab4e0cc77c43fba03b41" +PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 +PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 77539ee9b109..503d6bc89078 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "ab56c3dd1cf9b516b62c5087e1ec1471bd63631e" # Mar 11, 2020 -PGV_SHA256 = "3be12077affd1ebf8787001f5fba545cc5f1b914964dab4e0cc77c43fba03b41" +PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 +PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" From 9d702c125acde33933fc5d63818b1defe36b5cf3 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Wed, 10 Jun 2020 06:03:24 -0700 Subject: [PATCH 327/909] vhds: fixing a filter teardown bug (#11341) This is to help with #9784 Risk Level: Low (added a single test) Testing: a new unit test and integration test Docs Changes: n/a Release Notes: n/a Signed-off-by: Dmitri Dolguikh --- source/common/router/rds_impl.cc | 2 +- .../http/on_demand/on_demand_update.cc | 5 +++ .../filters/http/on_demand/on_demand_update.h | 2 +- test/integration/vhds_integration_test.cc | 35 ++++++++++++++++++- 4 files changed, 41 insertions(+), 3 deletions(-) diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 11f45c683dd3..3c4baddf6cee 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -283,7 +283,7 @@ void RdsRouteConfigProviderImpl::onConfigUpdate() { Http::RequestHeaderMapImpl host_header; host_header.setHost(VhdsSubscription::aliasToDomainName(it->alias_)); const bool host_exists = config->virtualHostExists(host_header); - auto current_cb = it->cb_; + std::weak_ptr current_cb(it->cb_); it->thread_local_dispatcher_.post([current_cb, host_exists] { if (auto cb = current_cb.lock()) { (*cb)(host_exists); diff --git a/source/extensions/filters/http/on_demand/on_demand_update.cc b/source/extensions/filters/http/on_demand/on_demand_update.cc index cf69080e667d..da5b2ec6bc10 100644 --- a/source/extensions/filters/http/on_demand/on_demand_update.cc +++ b/source/extensions/filters/http/on_demand/on_demand_update.cc @@ -37,6 +37,11 @@ void OnDemandRouteUpdate::setDecoderFilterCallbacks(Http::StreamDecoderFilterCal callbacks_ = &callbacks; } +// A weak_ptr copy of the route_config_updated_callback_ is kept by RdsRouteConfigProviderImpl +// in config_update_callbacks_. By resetting the pointer in onDestroy() callback we ensure +// that this filter/filter-chain will not be resumed if the corresponding has been closed +void OnDemandRouteUpdate::onDestroy() { route_config_updated_callback_.reset(); } + // This is the callback which is called when an update requested in requestRouteConfigUpdate() // has been propagated to workers, at which point the request processing is restarted from the // beginning. diff --git a/source/extensions/filters/http/on_demand/on_demand_update.h b/source/extensions/filters/http/on_demand/on_demand_update.h index a2cd51e07e20..455ef4160aa5 100644 --- a/source/extensions/filters/http/on_demand/on_demand_update.h +++ b/source/extensions/filters/http/on_demand/on_demand_update.h @@ -27,7 +27,7 @@ class OnDemandRouteUpdate : public Http::StreamDecoderFilter { void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override; - void onDestroy() override {} + void onDestroy() override; private: Http::StreamDecoderFilterCallbacks* callbacks_{}; diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index db74a8c79e26..994cf1ff3187 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -653,5 +653,38 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateFailToResolveOneAliasOutOfSeveral) cleanupUpstreamAndDownstream(); } +// Verify that an vhds update succeeds even when the client closes its connection +TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateHttpConnectionCloses) { + // RDS exchange with a non-empty virtual_hosts field + useRdsWithVhosts(); + + testRouterHeaderOnlyRequestAndResponse(nullptr, 1); + cleanupUpstreamAndDownstream(); + EXPECT_TRUE(codec_client_->waitForDisconnect()); + + // Attempt to make a request to an unknown host + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "vhost_1"}, + {"x-lyft-user-id", "123"}}; + auto encoder_decoder = codec_client_->startRequest(request_headers); + Http::RequestEncoder& encoder = encoder_decoder.first; + IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().VirtualHost, + {vhdsRequestResourceName("vhost_1")}, {}, vhds_stream_)); + + envoy::api::v2::DeltaDiscoveryResponse vhds_update = + createDeltaDiscoveryResponseWithResourceNameUsedAsAlias(); + vhds_stream_->sendGrpcMessage(vhds_update); + + codec_client_->sendReset(encoder); + response->waitForReset(); + EXPECT_TRUE(codec_client_->connected()); + + cleanupUpstreamAndDownstream(); +} + } // namespace -} // namespace Envoy +} // namespace Envoy \ No newline at end of file From 92e608f066b017d664d8fa161cfc735f0c6b41e7 Mon Sep 17 00:00:00 2001 From: danzh Date: Wed, 10 Jun 2020 11:14:16 -0400 Subject: [PATCH 328/909] quiche: update tar and implement EnvoyQuicProofSource (#11316) Signed-off-by: Dan Zhang --- api/BUILD | 1 + .../transport_sockets/quic/v3/BUILD | 12 ++ .../quic/v3/quic_transport.proto | 28 +++ .../transport_sockets/quic/v4alpha/BUILD | 13 ++ .../quic/v4alpha/quic_transport.proto | 35 ++++ api/versioning/BUILD | 1 + bazel/external/quiche.BUILD | 155 +++++++++++++-- bazel/repository_locations.bzl | 6 +- .../transport_sockets/quic/v3/BUILD | 12 ++ .../quic/v3/quic_transport.proto | 28 +++ .../transport_sockets/quic/v4alpha/BUILD | 13 ++ .../quic/v4alpha/quic_transport.proto | 35 ++++ source/extensions/extensions_build_config.bzl | 1 + source/extensions/quic_listeners/quiche/BUILD | 25 ++- .../quiche/active_quic_listener.cc | 20 +- .../quiche/active_quic_listener.h | 7 + .../quic_listeners/quiche/envoy_quic_alarm.cc | 11 +- .../quiche/envoy_quic_client_session.cc | 8 +- .../quiche/envoy_quic_client_session.h | 1 + .../quiche/envoy_quic_fake_proof_source.h | 40 ++-- .../quiche/envoy_quic_fake_proof_verifier.h | 12 +- .../quiche/envoy_quic_proof_source.cc | 100 ++++++++++ .../quiche/envoy_quic_proof_source.h | 35 ++++ .../quiche/envoy_quic_server_session.cc | 5 + .../quiche/envoy_quic_server_session.h | 1 + .../quiche/platform/flags_list.h | 186 +++++++++--------- .../quiche/platform/quic_containers_impl.h | 6 + .../quiche/platform/quic_macros_impl.h | 1 + .../quiche/platform/quiche_text_utils_impl.h | 33 +++- .../quiche/platform/string_utils.cc | 9 +- .../quiche/platform/string_utils.h | 5 +- .../quiche/quic_transport_socket_factory.cc | 27 +-- test/extensions/quic_listeners/quiche/BUILD | 29 ++- .../quiche/active_quic_listener_test.cc | 99 ++++------ .../quiche/crypto_test_utils_for_envoy.cc | 7 +- .../quiche/envoy_quic_alarm_test.cc | 10 +- .../quiche/envoy_quic_client_session_test.cc | 9 +- .../quiche/envoy_quic_client_stream_test.cc | 11 +- .../quiche/envoy_quic_dispatcher_test.cc | 83 ++++---- .../quiche/envoy_quic_proof_source_test.cc | 89 ++++++--- .../quiche/envoy_quic_server_session_test.cc | 85 ++++---- .../quiche/envoy_quic_server_stream_test.cc | 3 + .../quic_listeners/quiche/integration/BUILD | 4 +- .../integration/quic_http_integration_test.cc | 35 ++-- .../quic_listeners/quiche/test_proof_source.h | 48 +++++ .../quic_listeners/quiche/test_utils.h | 82 +++++++- test/mocks/ssl/mocks.h | 14 ++ .../listener_manager_impl_quic_only_test.cc | 28 +-- 48 files changed, 1125 insertions(+), 383 deletions(-) create mode 100644 api/envoy/extensions/transport_sockets/quic/v3/BUILD create mode 100644 api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto create mode 100644 api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD create mode 100644 api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h create mode 100644 test/extensions/quic_listeners/quiche/test_proof_source.h diff --git a/api/BUILD b/api/BUILD index 73912d9fbbac..8e8831ea7e41 100644 --- a/api/BUILD +++ b/api/BUILD @@ -229,6 +229,7 @@ proto_library( "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", diff --git a/api/envoy/extensions/transport_sockets/quic/v3/BUILD b/api/envoy/extensions/transport_sockets/quic/v3/BUILD new file mode 100644 index 000000000000..e95e504f3caf --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto new file mode 100644 index 000000000000..b17e2262bc1e --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v3; + +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + tls.v3.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD new file mode 100644 index 000000000000..47c94aa706ee --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/quic/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto new file mode 100644 index 000000000000..255bfe627b74 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; + + tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; + + tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index ccea9008a05f..9f62a77d9a5f 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -112,6 +112,7 @@ proto_library( "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 9bf32c27c6c5..30c28f30f607 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1494,6 +1494,7 @@ envoy_cc_library( deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_bandwidth_sampler_lib", + ":quic_core_congestion_control_bbr_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_windowed_filter_lib", @@ -1689,7 +1690,9 @@ envoy_cc_library( ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", + ":quic_core_idle_network_detector_lib", ":quic_core_mtu_discovery_lib", + ":quic_core_network_blackhole_detector_lib", ":quic_core_one_block_arena_lib", ":quic_core_packet_creator_lib", ":quic_core_packet_writer_interface_lib", @@ -1816,6 +1819,36 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_crypto_boring_utils_lib", + hdrs = ["quiche/quic/core/crypto/boring_utils.h"], + copts = quiche_copts, + external_deps = ["ssl"], + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_export", + ":quiche_common_platform", + ], +) + +envoy_cc_library( + name = "quic_core_crypto_certificate_view_lib", + srcs = ["quiche/quic/core/crypto/certificate_view.cc"], + hdrs = ["quiche/quic/core/crypto/certificate_view.h"], + copts = quiche_copts, + external_deps = ["ssl"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_crypto_boring_utils_lib", + ":quic_platform", + ":quic_platform_ip_address", + ":quiche_common_platform", + ], +) + envoy_cc_library( name = "quic_core_crypto_encryption_lib", srcs = [ @@ -1943,6 +1976,7 @@ envoy_cc_library( repository = "@envoy", tags = ["nofips"], deps = [ + ":quic_core_crypto_proof_source_interface_lib", ":quic_core_types_lib", ":quic_platform_base", ], @@ -1987,10 +2021,14 @@ envoy_cc_library( srcs = ["quiche/quic/core/quic_error_codes.cc"], hdrs = ["quiche/quic/core/quic_error_codes.h"], copts = quiche_copts, + external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], - deps = [":quic_platform_export"], + deps = [ + ":quic_platform_base", + ":quic_platform_export", + ], ) envoy_cc_library( @@ -2141,6 +2179,7 @@ envoy_cc_library( deps = [ ":quic_core_circular_deque_lib", ":quic_core_packets_lib", + ":quic_core_qpack_qpack_header_table_lib", ":quic_platform_base", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", @@ -2291,6 +2330,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_idle_network_detector_lib", + srcs = ["quiche/quic/core/quic_idle_network_detector.cc"], + hdrs = ["quiche/quic/core/quic_idle_network_detector.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_alarm_factory_interface_lib", + ":quic_core_alarm_interface_lib", + ":quic_core_constants_lib", + ":quic_core_one_block_arena_lib", + ":quic_core_time_lib", + ":quic_platform_export", + ], +) + envoy_cc_library( name = "quic_core_interval_lib", hdrs = ["quiche/quic/core/quic_interval.h"], @@ -2359,6 +2415,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_network_blackhole_detector_lib", + srcs = ["quiche/quic/core/quic_network_blackhole_detector.cc"], + hdrs = ["quiche/quic/core/quic_network_blackhole_detector.h"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_alarm_factory_interface_lib", + ":quic_core_alarm_interface_lib", + ":quic_core_constants_lib", + ":quic_core_one_block_arena_lib", + ":quic_core_time_lib", + ":quic_platform_export", + ], +) + envoy_cc_library( name = "quic_core_packet_creator_lib", srcs = ["quiche/quic/core/quic_packet_creator.cc"], @@ -2468,16 +2541,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "quic_core_qpack_qpack_instructions_lib", - srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"], - hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"], - copts = quiche_copts, - repository = "@envoy", - tags = ["nofips"], - deps = [":quic_platform_base"], -) - envoy_cc_library( name = "quic_core_qpack_qpack_decoder_lib", srcs = ["quiche/quic/core/qpack/qpack_decoder.cc"], @@ -2546,6 +2609,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_qpack_qpack_instructions_lib", + srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"], + hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [":quic_platform_base"], +) + envoy_cc_library( name = "quic_core_qpack_qpack_instruction_encoder_lib", srcs = ["quiche/quic/core/qpack/qpack_instruction_encoder.cc"], @@ -2810,11 +2883,13 @@ envoy_cc_library( "quiche/quic/core/chlo_extractor.cc", "quiche/quic/core/quic_buffered_packet_store.cc", "quiche/quic/core/quic_dispatcher.cc", + "quiche/quic/core/tls_chlo_extractor.cc", ], hdrs = [ "quiche/quic/core/chlo_extractor.h", "quiche/quic/core/quic_buffered_packet_store.h", "quiche/quic/core/quic_dispatcher.h", + "quiche/quic/core/tls_chlo_extractor.h", ], copts = quiche_copts, repository = "@envoy", @@ -3186,6 +3261,51 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_crypto_server_config_peer_lib", + srcs = [ + "quiche/quic/test_tools/quic_crypto_server_config_peer.cc", + ], + hdrs = [ + "quiche/quic/test_tools/quic_crypto_server_config_peer.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_crypto_crypto_handshake_lib", + ":quic_test_tools_mock_clock_lib", + ":quic_test_tools_mock_random_lib", + ":quic_test_tools_test_utils_interface_lib", + ":quiche_common_platform", + ], +) + +envoy_cc_test_library( + name = "quic_test_tools_first_flight_lib", + srcs = [ + "quiche/quic/test_tools/first_flight.cc", + ], + hdrs = [ + "quiche/quic/test_tools/first_flight.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_config_lib", + ":quic_core_connection_lib", + ":quic_core_crypto_crypto_handshake_lib", + ":quic_core_http_client_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_core_packets_lib", + ":quic_core_types_lib", + ":quic_core_versions_lib", + ":quic_platform", + ":quic_test_tools_test_utils_interface_lib", + ], +) + envoy_cc_test_library( name = "quic_test_tools_framer_peer_lib", srcs = ["quiche/quic/test_tools/quic_framer_peer.cc"], @@ -3309,6 +3429,19 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_test_certificates_lib", + srcs = ["quiche/quic/test_tools/test_certificates.cc"], + hdrs = ["quiche/quic/test_tools/test_certificates.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_base", + ":quiche_common_platform", + ], +) + envoy_cc_test_library( name = "quic_test_tools_test_utils_interface_lib", srcs = [ diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 8c8676f7b90b..d54a87cc5b3c 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -411,9 +411,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz - sha256 = "75af53154402e1654cfd32d8aaeed5fab4dbb79d3cab8c9866019d5369c1889e", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/41c9fdead26b31deefae3c325a2cf1a873688ba3.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/25da9198727ef05edeb99d9f4ce5b6acb3cb87b5.tar.gz + sha256 = "52bac2f91a0900730fe3bfb14ffb668f205fe0de48e42ecfee677e3743ec33ee", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/25da9198727ef05edeb99d9f4ce5b6acb3cb87b5.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD new file mode 100644 index 000000000000..e95e504f3caf --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/tls/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto new file mode 100644 index 000000000000..b17e2262bc1e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v3; + +import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + tls.v3.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD new file mode 100644 index 000000000000..47c94aa706ee --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/transport_sockets/quic/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto new file mode 100644 index 000000000000..255bfe627b74 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.quic.v4alpha; + +import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; +option java_outer_classname = "QuicTransportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: quic transport] +// [#extension: envoy.transport_sockets.quic] + +// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicDownstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; + + tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} + +// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. +message QuicUpstreamTransport { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; + + tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 + [(validate.rules).message = {required: true}]; +} diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 163efb8ffd70..348862eea1e8 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -161,6 +161,7 @@ EXTENSIONS = { "envoy.transport_sockets.alts": "//source/extensions/transport_sockets/alts:config", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config", + "envoy.transport_sockets.quic": "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", # # Retry host predicates diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index 124be2ac6d51..3082bdf98eca 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", "envoy_cc_library", "envoy_package", ) @@ -61,7 +62,7 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_quic_proof_source_lib", + name = "envoy_quic_fake_proof_source_lib", hdrs = ["envoy_quic_fake_proof_source.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], @@ -71,6 +72,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "envoy_quic_proof_source_lib", + srcs = ["envoy_quic_proof_source.cc"], + hdrs = ["envoy_quic_proof_source.h"], + external_deps = ["ssl"], + tags = ["nofips"], + deps = [ + ":envoy_quic_fake_proof_source_lib", + ":envoy_quic_utils_lib", + ":quic_io_handle_wrapper_lib", + ":quic_transport_socket_factory_lib", + "//include/envoy/ssl:tls_certificate_config_interface", + "//source/extensions/transport_sockets:well_known_names", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + ], +) + envoy_cc_library( name = "envoy_quic_proof_verifier_lib", hdrs = ["envoy_quic_fake_proof_verifier.h"], @@ -309,10 +327,11 @@ envoy_cc_library( ], ) -envoy_cc_library( +envoy_cc_extension( name = "quic_transport_socket_factory_lib", srcs = ["quic_transport_socket_factory.cc"], hdrs = ["quic_transport_socket_factory.h"], + security_posture = "unknown", tags = ["nofips"], deps = [ "//include/envoy/network:transport_socket_interface", @@ -321,6 +340,6 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/extensions/transport_sockets:well_known_names", "//source/extensions/transport_sockets/tls:context_config_lib", - "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 30c65d443e8a..55f5da2e49f1 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -9,7 +9,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" @@ -33,6 +33,20 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options, const envoy::config::core::v3::RuntimeFeatureFlag& enabled) + : ActiveQuicListener(dispatcher, parent, listen_socket, listener_config, quic_config, + std::move(options), + std::make_unique( + listen_socket, listener_config.filterChainManager()), + enabled) {} + +ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, + Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket, + Network::ListenerConfig& listener_config, + const quic::QuicConfig& quic_config, + Network::Socket::OptionsSharedPtr options, + std::unique_ptr proof_source, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, &listener_config), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), listen_socket_(*listen_socket), enabled_(enabled, Runtime::LoaderSingleton::get()) { @@ -51,8 +65,7 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, random->RandBytes(random_seed_, sizeof(random_seed_)); crypto_config_ = std::make_unique( quiche::QuicheStringPiece(reinterpret_cast(random_seed_), sizeof(random_seed_)), - quic::QuicRandom::GetInstance(), std::make_unique(), - quic::KeyExchangeSource::Default()); + quic::QuicRandom::GetInstance(), std::move(proof_source), quic::KeyExchangeSource::Default()); auto connection_helper = std::make_unique(dispatcher_); crypto_config_->AddDefaultConfig(random, connection_helper->GetClock(), quic::QuicCryptoServerConfig::ConfigOptions()); @@ -122,7 +135,6 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( config.has_idle_timeout() ? DurationUtil::durationToMilliseconds(config.idle_timeout()) : 300000; quic_config_.SetIdleNetworkTimeout( - quic::QuicTime::Delta::FromMilliseconds(idle_network_timeout_ms), quic::QuicTime::Delta::FromMilliseconds(idle_network_timeout_ms)); int32_t max_time_before_crypto_handshake_ms = config.has_crypto_handshake_timeout() diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 8d0d5c9dd46e..a9c87d4b4d66 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -36,6 +36,13 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, Network::Socket::OptionsSharedPtr options, const envoy::config::core::v3::RuntimeFeatureFlag& enabled); + ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket, + Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, + Network::Socket::OptionsSharedPtr options, + std::unique_ptr proof_source, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled); + ~ActiveQuicListener() override; void onListenerShutdown(); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc index b490aff8b955..759e31a4d83f 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm.h" +#include + namespace Envoy { namespace Quic { @@ -11,8 +13,13 @@ EnvoyQuicAlarm::EnvoyQuicAlarm(Event::Dispatcher& dispatcher, const quic::QuicCl void EnvoyQuicAlarm::CancelImpl() { timer_->disableTimer(); } void EnvoyQuicAlarm::SetImpl() { - // TODO(#7170) switch to use microseconds if it is supported. - timer_->enableTimer(std::chrono::milliseconds(getDurationBeforeDeadline().ToMilliseconds())); + quic::QuicTime::Delta duration = getDurationBeforeDeadline(); + // Round up the duration so that any duration < 1us will not be triggered within current event + // loop. QUICHE alarm is not expected to be scheduled in current event loop. This bit is a bummer + // in QUICHE, and we are working on the fix. Once QUICHE is fixed of expecting this behavior, we + // no longer need to round up the duration. + // TODO(antoniovicente) improve the timer behavior in such case. + timer_->enableHRTimer(std::chrono::microseconds(std::max(1l, duration.ToMicroseconds()))); } void EnvoyQuicAlarm::UpdateImpl() { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc index 930e47052800..5f0984f1d5a1 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc @@ -27,7 +27,9 @@ void EnvoyQuicClientSession::connect() { // Start version negotiation and crypto handshake during which the connection may fail if server // doesn't support the one and only supported version. CryptoConnect(); - SetMaxAllowedPushId(0u); + if (quic::VersionUsesHttp3(transport_version())) { + SetMaxPushId(0u); + } } void EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame, @@ -81,5 +83,9 @@ EnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) { bool EnvoyQuicClientSession::hasDataToWrite() { return HasDataToWrite(); } +void EnvoyQuicClientSession::OnOneRttKeysAvailable() { + raiseConnectionEvent(Network::ConnectionEvent::Connected); +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h index a3b2542dfb4a..b79943da1f12 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.h @@ -55,6 +55,7 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void Initialize() override; void OnCanWrite() override; void OnGoAway(const quic::QuicGoAwayFrame& frame) override; + void OnOneRttKeysAvailable() override; // quic::QuicSpdyClientSessionBase void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h index 01f392279c18..cddf10b7799c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h @@ -15,6 +15,8 @@ #pragma GCC diagnostic pop +#include "openssl/ssl.h" +#include "envoy/network/filter.h" #include "quiche/quic/platform/api/quic_reference_counted.h" #include "quiche/quic/platform/api/quic_socket_address.h" #include "quiche/common/platform/api/quiche_string_piece.h" @@ -22,58 +24,46 @@ namespace Envoy { namespace Quic { -// A fake implementation of quic::ProofSource which returns a fake cert and -// a fake signature for a given QUIC server config. +// A fake implementation of quic::ProofSource which uses RSA cipher suite to sign in GetProof(). +// TODO(danzh) Rename it to EnvoyQuicProofSource once it's fully implemented. class EnvoyQuicFakeProofSource : public quic::ProofSource { public: ~EnvoyQuicFakeProofSource() override = default; // quic::ProofSource - // Returns a fake certs chain and its fake SCT "Fake timestamp" and fake TLS signature wrapped + // Returns a certs chain and its fake SCT "Fake timestamp" and TLS signature wrapped // in QuicCryptoProof. - void GetProof(const quic::QuicSocketAddress& server_address, const std::string& hostname, + void GetProof(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname, const std::string& server_config, quic::QuicTransportVersion /*transport_version*/, quiche::QuicheStringPiece /*chlo_hash*/, std::unique_ptr callback) override { quic::QuicReferenceCountedPointer chain = - GetCertChain(server_address, hostname); + GetCertChain(server_address, client_address, hostname); quic::QuicCryptoProof proof; bool success = false; + // TODO(danzh) Get the signature algorithm from leaf cert. auto signature_callback = std::make_unique(success, proof.signature); - ComputeTlsSignature(server_address, hostname, 0, server_config, std::move(signature_callback)); + ComputeTlsSignature(server_address, client_address, hostname, SSL_SIGN_RSA_PSS_RSAE_SHA256, + server_config, std::move(signature_callback)); ASSERT(success); proof.leaf_cert_scts = "Fake timestamp"; callback->Run(true, chain, proof, nullptr /* details */); } - // Returns a certs chain with a fake certificate "Fake cert from [host_name]". - quic::QuicReferenceCountedPointer - GetCertChain(const quic::QuicSocketAddress& /*server_address*/, - const std::string& /*hostname*/) override { - std::vector certs; - certs.push_back(absl::StrCat("Fake cert")); - return quic::QuicReferenceCountedPointer( - new quic::ProofSource::Chain(certs)); - } - - // Always call callback with a signature "Fake signature for { [server_config] }". - void - ComputeTlsSignature(const quic::QuicSocketAddress& /*server_address*/, - const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, - quiche::QuicheStringPiece in, - std::unique_ptr callback) override { - callback->Run(true, absl::StrCat("Fake signature for { ", in, " }")); - } + TicketCrypter* GetTicketCrypter() override { return nullptr; } private: // Used by GetProof() to get fake signature. class FakeSignatureCallback : public quic::ProofSource::SignatureCallback { public: + // TODO(danzh) Pass in Details to retain the certs chain, and quic::ProofSource::Callback to be + // triggered in Run(). FakeSignatureCallback(bool& success, std::string& signature) : success_(success), signature_(signature) {} // quic::ProofSource::SignatureCallback - void Run(bool ok, std::string signature) override { + void Run(bool ok, std::string signature, std::unique_ptr
/*details*/) override { success_ = ok; signature_ = signature; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h index 49abe56e9122..a72f0b3d8e3c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h @@ -26,22 +26,20 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { // [server_config] }". Otherwise failure. quic::QuicAsyncStatus VerifyProof(const std::string& hostname, const uint16_t /*port*/, - const std::string& server_config, quic::QuicTransportVersion /*quic_version*/, + const std::string& /*server_config*/, quic::QuicTransportVersion /*quic_version*/, absl::string_view /*chlo_hash*/, const std::vector& certs, - const std::string& cert_sct, const std::string& signature, + const std::string& cert_sct, const std::string& /*signature*/, const quic::ProofVerifyContext* context, std::string* error_details, std::unique_ptr* details, std::unique_ptr callback) override { if (VerifyCertChain(hostname, certs, "", cert_sct, context, error_details, details, - std::move(callback)) == quic::QUIC_SUCCESS && - signature == absl::StrCat("Fake signature for { ", server_config, " }")) { + std::move(callback)) == quic::QUIC_SUCCESS) { return quic::QUIC_SUCCESS; } return quic::QUIC_FAILURE; } - // Return success if the certs chain has only one fake certificate "Fake cert from [host_name]" - // and its SCT is "Fake timestamp". Otherwise failure. + // Return success upon one arbitrary cert content. Otherwise failure. quic::QuicAsyncStatus VerifyCertChain(const std::string& /*hostname*/, const std::vector& certs, const std::string& /*ocsp_response*/, const std::string& cert_sct, @@ -49,7 +47,7 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { std::unique_ptr* /*details*/, std::unique_ptr /*callback*/) override { // Cert SCT support is not enabled for fake ProofSource. - if (cert_sct.empty() && certs.size() == 1 && certs[0] == "Fake cert") { + if (cert_sct.empty() && certs.size() == 1) { return quic::QUIC_SUCCESS; } return quic::QUIC_FAILURE; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc new file mode 100644 index 000000000000..ffb567a4dbf3 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc @@ -0,0 +1,100 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" + +#include + +#include "envoy/ssl/tls_certificate_config.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/quic_io_handle_wrapper.h" +#include "extensions/transport_sockets/well_known_names.h" + +#include "openssl/bytestring.h" +#include "quiche/quic/core/crypto/certificate_view.h" + +namespace Envoy { +namespace Quic { + +quic::QuicReferenceCountedPointer +EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname) { + absl::optional> cert_config_ref = + getTlsCertConfig(server_address, client_address, hostname); + if (!cert_config_ref.has_value()) { + ENVOY_LOG(warn, "No matching filter chain found for handshake."); + return nullptr; + } + auto& cert_config = cert_config_ref.value().get(); + const std::string& chain_str = cert_config.certificateChain(); + std::string pem_str = std::string(const_cast(chain_str.data()), chain_str.size()); + std::stringstream pem_stream(chain_str); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + if (chain.empty()) { + ENVOY_LOG(warn, "Failed to load certificate chain from %s", cert_config.certificateChainPath()); + return quic::QuicReferenceCountedPointer( + new quic::ProofSource::Chain({})); + } + return quic::QuicReferenceCountedPointer( + new quic::ProofSource::Chain(chain)); +} + +void EnvoyQuicProofSource::ComputeTlsSignature( + const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) { + absl::optional> cert_config_ref = + getTlsCertConfig(server_address, client_address, hostname); + if (!cert_config_ref.has_value()) { + ENVOY_LOG(warn, "No matching filter chain found for handshake."); + callback->Run(false, "", nullptr); + return; + } + auto& cert_config = cert_config_ref.value().get(); + // Load private key. + const std::string& pkey = cert_config.privateKey(); + std::stringstream pem_str(pkey); + std::unique_ptr pem_key = + quic::CertificatePrivateKey::LoadPemFromStream(&pem_str); + + // Sign. + std::string sig = pem_key->Sign(in, signature_algorithm); + + bool success = !sig.empty(); + callback->Run(success, sig, nullptr); +} + +absl::optional> +EnvoyQuicProofSource::getTlsCertConfig(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname) { + ENVOY_LOG(trace, "Getting cert chain for {}", hostname); + Network::ConnectionSocketImpl connection_socket( + std::make_unique(listen_socket_->ioHandle()), + quicAddressToEnvoyAddressInstance(server_address), + quicAddressToEnvoyAddressInstance(client_address)); + + connection_socket.setDetectedTransportProtocol( + Extensions::TransportSockets::TransportProtocolNames::get().Quic); + connection_socket.setRequestedServerName(hostname); + connection_socket.setRequestedApplicationProtocols({"h2"}); + const Network::FilterChain* filter_chain = + filter_chain_manager_.findFilterChain(connection_socket); + if (filter_chain == nullptr) { + ENVOY_LOG(warn, "No matching filter chain found for handshake."); + return absl::nullopt; + } + const Network::TransportSocketFactory& transport_socket_factory = + filter_chain->transportSocketFactory(); + std::vector> tls_cert_configs = + dynamic_cast(transport_socket_factory) + .serverContextConfig() + .tlsCertificates(); + + // Only return the first TLS cert config. + // TODO(danzh) Choose based on supported cipher suites in TLS1.3 CHLO and prefer EC + // certs if supported. + return {tls_cert_configs[0].get()}; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h new file mode 100644 index 000000000000..4204f4b13634 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h @@ -0,0 +1,35 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" + +namespace Envoy { +namespace Quic { + +class EnvoyQuicProofSource : public EnvoyQuicFakeProofSource, + protected Logger::Loggable { +public: + EnvoyQuicProofSource(Network::SocketSharedPtr listen_socket, + Network::FilterChainManager& filter_chain_manager) + : listen_socket_(std::move(listen_socket)), filter_chain_manager_(filter_chain_manager) {} + + ~EnvoyQuicProofSource() override = default; + + quic::QuicReferenceCountedPointer + GetCertChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname) override; + void ComputeTlsSignature(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override; + +private: + absl::optional> + getTlsCertConfig(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname); + + Network::SocketSharedPtr listen_socket_; + Network::FilterChainManager& filter_chain_manager_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc index 05e76e9ba459..73a62a93d8b3 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc @@ -105,5 +105,10 @@ void EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel lev bool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); } +void EnvoyQuicServerSession::OnOneRttKeysAvailable() { + quic::QuicServerSessionBase::OnOneRttKeysAvailable(); + raiseConnectionEvent(Network::ConnectionEvent::Connected); +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h index cbbbfb8c0f37..c0cbc334d8e3 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h @@ -50,6 +50,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::ConnectionCloseSource source) override; void Initialize() override; void OnCanWrite() override; + void OnOneRttKeysAvailable() override; // quic::QuicSpdySession void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 2043554da438..e4af6a590bc3 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -13,33 +13,25 @@ #if defined(QUICHE_FLAG) -QUICHE_FLAG(bool, http2_reloadable_flag_http2_add_backend_ping_manager, true, - "If true, SpdyBackendDispatcher will instantiate and use a PeriodicPingManager for " - "handling PING logic.") +QUICHE_FLAG( + bool, http2_reloadable_flag_http2_adapt_backend_stream_receive_window, true, + "If true, SpdyBackendDispatcher will use its PeriodicPingManager to periodically update the " + "per-stream receive flow control window based on its recorded RTT and a target throughput.") QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") +QUICHE_FLAG(bool, http2_reloadable_flag_http2_new_window_behavior, false, + "If true, the GFE is slightly more aggressive about sending WINDOW_UPDATE frames to " + "the peer. This should reduce flow control bottlenecks.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " "with client or terminate the session with SPDY_INADEQUATE_SECURITY if HTTP/2 is " "already negotiated. The spec contains both cipher and TLS version requirements.") -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_skip_querying_entry_buffer_error, true, - "If true, do not query entry_buffer_.error_detected() in HpackDecoder::error_detected().") - -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_support_periodic_ping_manager_cbs, true, - "If true, PeriodicPingManager will invoke user-provided callbacks on receiving PING acks.") - -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_use_settings_rtt_in_ping_manager, true, - "If true along with --gfe2_reloadable_flag_http2_add_backend_ping_manager, SpdyDispatcher will " - "bootstrap its PingManager RTT with the RTT determined from the initial SETTINGS<-->ack.") - QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, false, "") QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") @@ -59,19 +51,34 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_arm_pto_with_earliest_sent_time, true, - "If true, arm the 1st PTO with earliest in flight sent time.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_overestimate_bandwidth_with_aggregation, true, "If true, fix QUIC bandwidth sampler to avoid over estimating bandwidth in the " "presence of ack aggregation.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, true, "If true, QUIC BBRv2 to take ack height into account when calculating " "queuing_threshold in PROBE_UP.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_unnecessary_probe_rtt, true, - "If true, QUIC BBRv2 to avoid unnecessary PROBE_RTTs after quiescence.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_too_low_probe_bw_cwnd, false, + "If true, QUIC BBRv2's PROBE_BW mode will not reduce cwnd below BDP+ack_height.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, false, + "When true, the 1RTT and 2RTT connection options decrease the number of round trips in " + "BBRv2 STARTUP without a 25% bandwidth increase to 1 or 2 round trips respectively.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_ignore_inflight_lo, false, + "When true, QUIC's BBRv2 ignores inflight_lo in PROBE_BW.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_lower_startup_cwnd_gain, false, + "When true, QUIC BBRv2 lowers the CWND gain in STARTUP to 2 when the BBQ2 connection " + "option is supplied in the handshake.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_copy_sampler_state_from_v1_to_v2, true, + "If true, when QUIC switches from BbrSender to Bbr2Sender, Bbr2Sender will copy the " + "bandwidth sampler states from BbrSender.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_default_exit_startup_on_loss, false, + "If true, QUIC will enable connection options LRTT+BBQ2 by default.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, "If true, do not inject bandwidth in BbrSender::AdjustNetworkParameters.") @@ -79,10 +86,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, true, "If true, re-calculate pacing rate when cwnd gets bootstrapped.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_bbr_fix_zero_bw_on_loss_only_event, false, - "If true, fix a bug in QUIC BBR where bandwidth estimate becomes 0 after a loss only event.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, "When true and the BBR9 connection option is present, BBR only considers bandwidth " "samples app-limited if they're not filling the pipe.") @@ -95,17 +98,11 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recove "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in " "CalculateCongestionWindow()") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_one_mss_conservation, false, - "When true, ensure BBR allows at least one MSS to be sent in response to an ACK in " - "packet conservation.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_startup_rate_reduction, false, - "When true, enables the BBS4 and BBS5 connection options, which reduce BBR's pacing " - "rate in STARTUP as more losses occur as a fraction of CWND.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_use_available_min_rtt, true, + "If true, returns min_rtt in rtt_stats_ if it is available.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bundle_retransmittable_with_pto_ack, true, - "When the EACK connection option is sent by the client, an ack-eliciting frame is " - "bundled with ACKs sent after the PTO fires.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_break_session_stream_close_loop, false, + "If true, break session/stream close loop.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, false, "If true, quic::BandwidthSampler will start in application limited phase.") @@ -116,10 +113,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_create_incoming_stream_bug, false, - "If true, trigger QUIC_BUG in two ShouldCreateIncomingStream() overrides when called " - "with locally initiated stream ID.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, "If true, consider getting QoS after stream has been detached as GFE bug.") @@ -130,6 +123,9 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, "If true, use BBRv2 as the default congestion controller. Takes precedence over " "--quic_default_to_bbr.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_deprecate_draining_streams, false, + "If true, remove draining_streams_ from QuicSession.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false, "If true, disable QUIC version Q043.") @@ -155,8 +151,14 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_t QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, true, "Default enables QUIC ack decimation and adds a connection option to disable it.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_pcc3, false, - "If true, enable experiment for testing PCC congestion-control.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_gfe, false, + "If ture, enable GFE-picked loss detection experiment.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, + "If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_tls_resumption, false, + "If true, enables support for TLS resumption in QUIC.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, false, "If true, enable QUIC version h3-25.") @@ -164,25 +166,25 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, false, "If true, enable QUIC version h3-27.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050, true, - "If true, enable QUIC version T050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050_v2, false, + "If true, enable QUIC version h3-T050.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, "If true, adjust congestion window when doing bandwidth resumption in BBR.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_one_write_error_after_mtu_probe, false, +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_ietf_alt_svc_format_first, false, + "When true, advertise IETF Alt-Svc format before legacy Google format instead of after.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_one_write_error_after_mtu_probe, true, "If true, QUIC connection will ignore one packet write error after MTU probe.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " "will never be a fake EPOLLOUT event in the next epoll iteration.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_minimum_validation_of_coalesced_packets, true, - "If true, only do minimum validation of coalesced packets (only validate connection ID).") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false, "If true, will negotiate the ACK delay time.") @@ -190,26 +192,24 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_populate_mean_rtt_deviation_in_tcs, true, - "If true, populate mean rtt deviation in transport connection stats.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, "If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip " "in TcpProxyHeaderProto.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping " - "which is used to announce VIP in SHLO for proxied sessions.") + "which is used to announce VIP in SHLO for proxied sessions. ") QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_android_conformance_test_workaround, false, + "If true, disable QuicDispatcher workaround that replies to invalid QUIC packets from " + "the Android Conformance Test.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_ping_when_pto_skips_packet_number, false, - "If true, send PING when PTO skips packet number and there is no data to send.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") @@ -217,8 +217,8 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_skip_packet_threshold_loss_detection_with_runt, false, - "If true, skip packet threshold loss detection if largest acked is a runt.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_stream_id_manager_handles_accounting, false, + "If true, move Goolge QUIC stream accounting to LegacyQuicStreamIdManager.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") @@ -226,20 +226,21 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, "A testonly reloadable flag that will always default to true.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_tracegraf_populate_rtt_variation, true, - "If true, QUIC tracegraf populates RTT variation.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection options in " "QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_ack_frame_to_get_min_size, false, - "If true, use passed in ack_frame to calculate minimum size of the serialized ACK frame.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_blackhole_detector, true, + "If true, use blackhole detector in QuicConnection to detect path degrading and " + "network blackhole.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_use_idle_network_detector, true, + "If true, use idle network detector to detect handshake timeout and idle network timeout.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_ip_bandwidth_module, true, "If true, use IpBandwidthModule for cwnd bootstrapping if it is registered.") @@ -247,23 +248,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false, "If true, QUIC will attempt to use the Leto key exchange service and only fall back to " "local key exchange if that fails.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_pigeon_sockets, false, - "Use USPS Direct Path for QUIC egress.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_quic_time_for_received_timestamp2, true, - "If true, use QuicClock::Now() as the source of packet receive time instead of WallNow().") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_standard_deviation_for_pto, true, - "If true, use standard deviation when calculating PTO timeout.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_write_with_transmission, false, - "If true, QuicSession's various write methods will set transmission type.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_writevdata_at_level, true, - "If true, QuicSession::WritevData() will support writing data at a specified encryption level.") - QUICHE_FLAG(bool, quic_reloadable_flag_send_quic_fallback_server_config_on_leto_error, false, "If true and using Leto for QUIC shared-key calculations, GFE will react to a failure " "to contact Leto by sending a REJ containing a fallback ServerConfig, allowing the " @@ -273,30 +257,36 @@ QUICHE_FLAG( bool, quic_restart_flag_dont_fetch_quic_private_keys_from_leto, false, "If true, GFE will not request private keys when fetching QUIC ServerConfigs from Leto.") +QUICHE_FLAG(bool, quic_restart_flag_quic_adjust_initial_cwnd_by_gws, true, + "If true, GFE informs backend that a client request is the first one on the connection " + "via frontline header \"first_request=1\". Also, adjust initial cwnd based on " + "X-Google-Gws-Initial-Cwnd-Mode sent by GWS.") + QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") -QUICHE_FLAG(bool, quic_restart_flag_quic_batch_writer_always_drop_packets_on_error, false, +QUICHE_FLAG(bool, quic_restart_flag_quic_batch_writer_always_drop_packets_on_error, true, "If true, QUIC (gso|sendmmsg) batch writers will always drop packets on write error.") -QUICHE_FLAG( - bool, quic_restart_flag_quic_no_cap_net_raw_for_usps_egress, true, - "If true, gfe2::RawSocket::CapabilityNeeded will return false if QUIC egress method is USPS.") - -QUICHE_FLAG(bool, quic_restart_flag_quic_no_fallback_for_pigeon_socket, true, - "If true, GFEs using USPS egress will not fallback to raw ip socket.") +QUICHE_FLAG(bool, quic_restart_flag_quic_disable_ietf_quic_on_cloud_vips, false, + "If true, disable IETF QUIC on cloud VIPs by 1) not advertise IETF QUIC and 2) send " + "version negotiation if receive IETF packet on those VIPs.") QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") +QUICHE_FLAG(bool, quic_restart_flag_quic_pigeon_client_reconnect_forever, false, + "If true, QUIC pigeon client will 1) reconnect forever after disconnected, and 2) mark " + "GFE degraded after failing to reconnect for some time.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_replace_time_wait_list_encryption_level, false, + "Replace the usage of ConnectionData::encryption_level in quic_time_wait_list_manager " + "with a new TimeWaitAction.") + QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, "If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.") -QUICHE_FLAG(bool, quic_restart_flag_quic_send_settings_on_write_key_available, false, - "If true, send H3 SETTINGs when 1-RTT write key is available (rather then both keys " - "are available).") - QUICHE_FLAG(bool, quic_restart_flag_quic_should_accept_new_connection, false, "If true, reject QUIC CHLO packets when dispatcher is asked to do so.") @@ -321,9 +311,9 @@ QUICHE_FLAG( "and the rest of the response bytes would still be delivered even though the response code " "said there should not be any body associated with the response code.") -QUICHE_FLAG(bool, spdy_reloadable_flag_spdy_enable_granular_decompress_errors, false, - "If true, emit more granular errors instead of " - "SpdyFramerError::SPDY_DECOMPRESS_FAILURE in Http2DecoderAdapter.") +QUICHE_FLAG(bool, spdy_reloadable_flag_spdy_hpack_use_indexed_name, false, + "If true, use indexed name if possible when sending Literal Header Field without " + "Indexing instruction.") QUICHE_FLAG(bool, quic_allow_chlo_buffering, true, "If true, allows packets to be buffered in anticipation of a " @@ -387,6 +377,9 @@ QUICHE_FLAG(bool, quic_client_convert_http_header_name_to_lowercase, true, "If true, HTTP request header names sent from QuicSpdyClientBase(and " "descendents) will be automatically converted to lower case.") +QUICHE_FLAG(bool, quic_enable_http3_server_push, false, + "If true, server push will be allowed in QUIC versions that use HTTP/3.") + QUICHE_FLAG(int32_t, quic_bbr2_default_probe_bw_base_duration_ms, 2000, "The default minimum duration for BBRv2-native probes, in milliseconds.") @@ -434,6 +427,9 @@ QUICHE_FLAG(int32_t, quic_max_aggressive_retransmittable_on_wire_ping_count, 0, QUICHE_FLAG(int32_t, quic_max_congestion_window, 2000, "The maximum congestion window in packets.") +QUICHE_FLAG(int32_t, quic_max_streams_window_divisor, 2, + "The divisor that controls how often MAX_STREAMS frame is sent.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") diff --git a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h index 06d79eb00112..0e20247707e4 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_containers_impl.h @@ -7,6 +7,7 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/container/node_hash_map.h" #include "absl/container/node_hash_set.h" @@ -26,6 +27,11 @@ template using QuicDefaultHasherImpl = absl::Hash; template using QuicUnorderedMapImpl = absl::node_hash_map; +template +using QuicHashMapImpl = absl::flat_hash_map; + +template using QuicHashSetImpl = absl::flat_hash_set; + template using QuicUnorderedSetImpl = absl::node_hash_set; template diff --git a/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h index eb8ce413fb8a..b8b70a0426b4 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_macros_impl.h @@ -10,3 +10,4 @@ #define QUIC_MUST_USE_RESULT_IMPL ABSL_MUST_USE_RESULT #define QUIC_UNUSED_IMPL ABSL_ATTRIBUTE_UNUSED +#define QUIC_CONST_INIT_IMPL ABSL_CONST_INIT diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h index c2b117284bbf..3a6d1a393a8b 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_text_utils_impl.h @@ -1,5 +1,8 @@ #pragma once +#include "common/common/base64.h" + +#include "extensions/quic_listeners/quiche/platform/quiche_optional_impl.h" #include "extensions/quic_listeners/quiche/platform/quiche_string_piece_impl.h" #include "extensions/quic_listeners/quiche/platform/string_utils.h" @@ -21,58 +24,86 @@ namespace quiche { class QuicheTextUtilsImpl { public: + // NOLINTNEXTLINE(readability-identifier-naming) static bool StartsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl prefix) { return absl::StartsWith(data, prefix); } + // NOLINTNEXTLINE(readability-identifier-naming) + static bool EndsWith(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) { + return absl::EndsWith(data, suffix); + } + + // NOLINTNEXTLINE(readability-identifier-naming) static bool EndsWithIgnoreCase(QuicheStringPieceImpl data, QuicheStringPieceImpl suffix) { return absl::EndsWithIgnoreCase(data, suffix); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string ToLower(QuicheStringPieceImpl data) { return absl::AsciiStrToLower(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static void RemoveLeadingAndTrailingWhitespace(QuicheStringPieceImpl* data) { *data = absl::StripAsciiWhitespace(*data); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToUint64(QuicheStringPieceImpl in, uint64_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToInt(QuicheStringPieceImpl in, int* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToUint32(QuicheStringPieceImpl in, uint32_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool StringToSizeT(QuicheStringPieceImpl in, size_t* out) { return absl::SimpleAtoi(in, out); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string Uint64ToString(uint64_t in) { return absl::StrCat(in); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexEncode(QuicheStringPieceImpl data) { return absl::BytesToHexString(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string Hex(uint32_t v) { return absl::StrCat(absl::Hex(v)); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexDecode(QuicheStringPieceImpl data) { return absl::HexStringToBytes(data); } + // NOLINTNEXTLINE(readability-identifier-naming) static void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { - return quiche::Base64Encode(data, data_len, output); + *output = + Envoy::Base64::encode(reinterpret_cast(data), data_len, /*add_padding=*/false); + } + + // NOLINTNEXTLINE(readability-identifier-naming) + static QuicheOptionalImpl Base64Decode(QuicheStringPieceImpl input) { + return Envoy::Base64::decodeWithoutPadding(input); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::string HexDump(QuicheStringPieceImpl binary_data) { return quiche::HexDump(binary_data); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool ContainsUpperCase(QuicheStringPieceImpl data) { return std::any_of(data.begin(), data.end(), absl::ascii_isupper); } + // NOLINTNEXTLINE(readability-identifier-naming) static bool IsAllDigits(QuicheStringPieceImpl data) { return std::all_of(data.begin(), data.end(), absl::ascii_isdigit); } + // NOLINTNEXTLINE(readability-identifier-naming) static std::vector Split(QuicheStringPieceImpl data, char delim) { return absl::StrSplit(data, delim); } diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.cc b/source/extensions/quic_listeners/quiche/platform/string_utils.cc index 957c61fc3d7e..24ef55bfe94a 100644 --- a/source/extensions/quic_listeners/quiche/platform/string_utils.cc +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.cc @@ -14,15 +14,10 @@ #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "common/common/assert.h" -#include "common/common/base64.h" namespace quiche { -void Base64Encode(const uint8_t* data, size_t data_len, std::string* output) { - *output = - Envoy::Base64::encode(reinterpret_cast(data), data_len, /*add_padding=*/false); -} - +// NOLINTNEXTLINE(readability-identifier-naming) std::string HexDump(absl::string_view data) { const int kBytesPerLine = 16; const char* buf = data.data(); @@ -56,6 +51,7 @@ std::string HexDump(absl::string_view data) { return out; } +// NOLINTNEXTLINE(readability-identifier-naming) char HexDigitToInt(char c) { ASSERT(std::isxdigit(c)); @@ -71,6 +67,7 @@ char HexDigitToInt(char c) { return 0; } +// NOLINTNEXTLINE(readability-identifier-naming) bool HexDecodeToUInt32(absl::string_view data, uint32_t* out) { if (data.empty() || data.size() > 8u) { return false; diff --git a/source/extensions/quic_listeners/quiche/platform/string_utils.h b/source/extensions/quic_listeners/quiche/platform/string_utils.h index 43ebe1c066f3..28441305f2bf 100644 --- a/source/extensions/quic_listeners/quiche/platform/string_utils.h +++ b/source/extensions/quic_listeners/quiche/platform/string_utils.h @@ -11,15 +11,16 @@ namespace quiche { -void Base64Encode(const uint8_t* data, size_t data_len, std::string* output); - +// NOLINTNEXTLINE(readability-identifier-naming) std::string HexDump(absl::string_view data); // '0' => 0, '1' => 1, 'a' or 'A' => 10, etc. +// NOLINTNEXTLINE(readability-identifier-naming) char HexDigitToInt(char c); // Turns a 8-byte hex string into a uint32 in host byte order. // e.g. "12345678" => 0x12345678 +// NOLINTNEXTLINE(readability-identifier-naming) bool HexDecodeToUInt32(absl::string_view data, uint32_t* out); } // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc index e604a7be7aa4..17f16d2a8254 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.cc @@ -1,8 +1,8 @@ #include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" - +// #include "envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.validate.h" #include "extensions/transport_sockets/tls/context_config_impl.h" namespace Envoy { @@ -12,32 +12,33 @@ Network::TransportSocketFactoryPtr QuicServerTransportSocketConfigFactory::createTransportSocketFactory( const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context, const std::vector& /*server_names*/) { + auto quic_transport = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport&>( + config, context.messageValidationVisitor()); auto server_config = std::make_unique( - MessageUtil::downcastAndValidate< - const envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext&>( - config, context.messageValidationVisitor()), - context); + quic_transport.downstream_tls_context(), context); return std::make_unique(std::move(server_config)); } ProtobufTypes::MessagePtr QuicServerTransportSocketConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique< + envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport>(); } Network::TransportSocketFactoryPtr QuicClientTransportSocketConfigFactory::createTransportSocketFactory( const Protobuf::Message& config, Server::Configuration::TransportSocketFactoryContext& context) { + auto quic_transport = MessageUtil::downcastAndValidate< + const envoy::extensions::transport_sockets::quic::v3::QuicUpstreamTransport&>( + config, context.messageValidationVisitor()); auto client_config = std::make_unique( - MessageUtil::downcastAndValidate< - const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext&>( - config, context.messageValidationVisitor()), - context); + quic_transport.upstream_tls_context(), context); return std::make_unique(std::move(client_config)); } ProtobufTypes::MessagePtr QuicClientTransportSocketConfigFactory::createEmptyConfigProto() { - return std::make_unique(); + return std::make_unique(); } REGISTER_FACTORY(QuicServerTransportSocketConfigFactory, diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 8301bf42aa88..37eb776630cf 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -49,7 +49,10 @@ envoy_cc_test( deps = [ "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/ssl:ssl_mocks", "@com_googlesource_quiche//:quic_core_versions_lib", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", ], ) @@ -111,11 +114,12 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_proof_source_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:codec_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib", - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_server_connection_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_server_session_lib", "//source/server:configuration_lib", @@ -127,6 +131,7 @@ envoy_cc_test( "//test/test_common:global_lib", "//test/test_common:logging_lib", "//test/test_common:simulated_time_system_lib", + "@com_googlesource_quiche//:quic_test_tools_config_peer_lib", "@com_googlesource_quiche//:quic_test_tools_server_session_base_peer", "@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib", ], @@ -142,6 +147,7 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:codec_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", @@ -167,15 +173,16 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_utils_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", "//source/server:configuration_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", - "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "@com_googlesource_quiche//:quic_test_tools_crypto_server_config_peer_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -190,6 +197,8 @@ envoy_cc_test( ], deps = [ ":quic_test_utils_for_envoy_lib", + ":test_proof_source_lib", + ":test_utils_lib", "//include/envoy/stats:stats_macros", "//source/extensions/quic_listeners/quiche:envoy_quic_alarm_factory_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_connection_helper_lib", @@ -201,18 +210,27 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/stats:stats_mocks", - "//test/test_common:environment_lib", "//test/test_common:global_lib", "//test/test_common:simulated_time_system_lib", ], ) +envoy_cc_test_library( + name = "test_proof_source_lib", + hdrs = ["test_proof_source.h"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_fake_proof_source_lib", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + ], +) + envoy_cc_test_library( name = "quic_test_utils_for_envoy_lib", srcs = ["crypto_test_utils_for_envoy.cc"], tags = ["nofips"], deps = [ - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", + ":test_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", "@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib", ], @@ -275,9 +293,12 @@ envoy_cc_test( envoy_cc_test_library( name = "test_utils_lib", hdrs = ["test_utils.h"], + external_deps = ["bazel_runfiles"], tags = ["nofips"], deps = [ "//source/extensions/quic_listeners/quiche:quic_filter_manager_connection_lib", + "//test/test_common:environment_lib", "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_googlesource_quiche//:quic_test_tools_first_flight_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index ff4598da2b3a..8b4d0039b246 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -17,6 +17,7 @@ #include "quiche/quic/test_tools/crypto_test_utils.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#include "quiche/quic/test_tools/quic_crypto_server_config_peer.h" #pragma GCC diagnostic pop @@ -25,6 +26,8 @@ #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" #include "extensions/quic_listeners/quiche/active_quic_listener.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/environment.h" #include "test/mocks/network/mocks.h" @@ -66,16 +69,18 @@ class ActiveQuicListenerFactoryPeer { } }; -class ActiveQuicListenerTest : public testing::TestWithParam { +class ActiveQuicListenerTest : public QuicMultiVersionTest { protected: using Socket = Network::NetworkListenSocket>; ActiveQuicListenerTest() - : version_(GetParam()), api_(Api::createApiForTest(simulated_time_system_)), + : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), clock_(*dispatcher_), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)), - connection_handler_(*dispatcher_) {} + connection_handler_(*dispatcher_) { + SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); + } template std::unique_ptr staticUniquePointerCast(std::unique_ptr&& source) { @@ -98,10 +103,15 @@ class ActiveQuicListenerTest : public testing::TestWithParam(listener_factory_->createActiveUdpListener( connection_handler_, *dispatcher_, listener_config_)); quic_dispatcher_ = ActiveQuicListenerPeer::quicDispatcher(*quic_listener_); + quic::QuicCryptoServerConfig& crypto_config = + ActiveQuicListenerPeer::cryptoConfig(*quic_listener_); + quic::test::QuicCryptoServerConfigPeer crypto_config_peer(&crypto_config); + crypto_config_peer.ResetProofSource(std::make_unique()); simulated_time_system_.advanceTimeWait(std::chrono::milliseconds(100)); } @@ -128,8 +138,10 @@ class ActiveQuicListenerTest : public testing::TestWithParam(local_address_, nullptr, /*bind*/ false)); - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - &clock_, quic::AllSupportedVersions()[0].transport_version, - &ActiveQuicListenerPeer::cryptoConfig(*quic_listener_)); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - quic::CryptoHandshakeMessage full_chlo; - quic::QuicReferenceCountedPointer signed_config( - new quic::QuicSignedServerConfig); - quic::QuicCompressedCertsCache cache( - quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); - quic::test::crypto_test_utils::GenerateFullCHLO( - chlo, &ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), - envoyAddressInstanceToQuicSocketAddress(local_address_), + Buffer::OwnedImpl payload = generateChloPacketToSend( + quic::CurrentSupportedVersions()[0], quic_config_, + ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), connection_id, clock_, envoyAddressInstanceToQuicSocketAddress(local_address_), - quic::AllSupportedVersions()[0].transport_version, &clock_, signed_config, &cache, - &full_chlo); - // Overwrite version label to highest current supported version. - full_chlo.SetVersion(quic::kVER, quic::CurrentSupportedVersions()[0]); - quic::QuicConfig quic_config; - quic_config.ToHandshakeMessage(&full_chlo, - quic::CurrentSupportedVersions()[0].transport_version); - - std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - auto encrypted_packet = std::unique_ptr( - quic::test::ConstructEncryptedPacket(connection_id, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content)); - - Buffer::RawSlice first_slice{ - reinterpret_cast(const_cast(encrypted_packet->data())), - encrypted_packet->length()}; + envoyAddressInstanceToQuicSocketAddress(local_address_), "test.example.org"); + Buffer::RawSliceVector slice = payload.getRawSlices(); + ASSERT_EQ(1u, slice.size()); // Send a full CHLO to finish 0-RTT handshake. - auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), &first_slice, + auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(), 1, nullptr, *listen_socket_->localAddress()); - ASSERT_EQ(encrypted_packet->length(), send_rc.rc_); + ASSERT_EQ(slice[0].len_, send_rc.rc_); } - void ReadFromClientSockets() { + void readFromClientSockets() { for (auto& client_socket : client_sockets_) { Buffer::InstancePtr result_buffer(new Buffer::OwnedImpl()); const uint64_t bytes_to_read = 11; @@ -278,9 +265,8 @@ class ActiveQuicListenerTest : public testing::TestWithParam filter_chains_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, ActiveQuicListenerTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest, + testing::ValuesIn(generateTestParam()), testParamsToString); TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { auto option = std::make_unique(); @@ -288,6 +274,7 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { .WillOnce(Return(false)); auto options = std::make_shared>(); options->emplace_back(std::move(option)); + EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_)); EXPECT_THROW_WITH_REGEX( std::make_unique( *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, @@ -297,15 +284,15 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { EnvoyException, "Failed to apply socket options."); } -TEST_P(ActiveQuicListenerTest, ReceiveFullQuicCHLO) { +TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); configureMocks(/* connection_count = */ 1); - sendFullCHLO(quic::test::TestConnectionId(1)); + sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(buffered_packets->HasChlosBuffered()); EXPECT_FALSE(quic_dispatcher_->session_map().empty()); - ReadFromClientSockets(); + readFromClientSockets(); } TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { @@ -315,7 +302,7 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { // Generate one more CHLO than can be processed immediately. for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 1; ++i) { - sendFullCHLO(quic::test::TestConnectionId(i)); + sendCHLO(quic::test::TestConnectionId(i)); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -330,7 +317,7 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { EXPECT_FALSE(quic_dispatcher_->session_map().empty()); // Generate more data to trigger a socket read during the next event loop. - sendFullCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); + sendCHLO(quic::test::TestConnectionId(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // The socket read results in processing all CHLOs. @@ -339,20 +326,20 @@ TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { } EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - ReadFromClientSockets(); + readFromClientSockets(); } TEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) { EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " false"}}); - sendFullCHLO(quic::test::TestConnectionId(1)); + sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // If listener was enabled, there should have been session created for active connection. EXPECT_TRUE(quic_dispatcher_->session_map().empty()); EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_)); Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " true"}}); configureMocks(/* connection_count = */ 1); - sendFullCHLO(quic::test::TestConnectionId(1)); + sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(quic_dispatcher_->session_map().empty()); EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); @@ -367,21 +354,21 @@ class ActiveQuicListenerEmptyFlagConfigTest : public ActiveQuicListenerTest { } }; -INSTANTIATE_TEST_SUITE_P(IpVersions, ActiveQuicListenerEmptyFlagConfigTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerEmptyFlagConfigTests, + ActiveQuicListenerEmptyFlagConfigTest, + testing::ValuesIn(generateTestParam()), testParamsToString); // Quic listener should be enabled by default, if not enabled explicitly in config. TEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); configureMocks(/* connection_count = */ 1); - sendFullCHLO(quic::test::TestConnectionId(1)); + sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(buffered_packets->HasChlosBuffered()); EXPECT_FALSE(quic_dispatcher_->session_map().empty()); EXPECT_TRUE(ActiveQuicListenerPeer::enabled(*quic_listener_)); - ReadFromClientSockets(); + readFromClientSockets(); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc index 22df487392d4..c5b7a11d70e3 100644 --- a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc +++ b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc @@ -19,20 +19,23 @@ #endif #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" #include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" namespace quic { namespace test { namespace crypto_test_utils { +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofSourceForTesting() { - return std::make_unique(); + return std::make_unique(); } +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofVerifierForTesting() { return std::make_unique(); } +// NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofVerifyContextForTesting() { // No context needed for fake verifier. return nullptr; diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc index 0b020afb7888..0e0ab28bb48d 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_alarm_test.cc @@ -154,10 +154,11 @@ TEST_F(EnvoyQuicAlarmTest, SetAlarmToPastTime) { EXPECT_EQ(100, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds()); auto unowned_delegate = new TestDelegate(); quic::QuicArenaScopedPtr alarm(alarm_factory_.CreateAlarm(unowned_delegate)); - // alarm becomes active upon Set(). + // Alarm will be active 1ms after Update() for the purpose of avoiding firing + // in the same event loop. alarm->Set(clock_.Now() - QuicTime::Delta::FromMilliseconds(10)); EXPECT_FALSE(unowned_delegate->fired()); - dispatcher_->run(Dispatcher::RunType::NonBlock); + advanceMsAndLoop(1); EXPECT_TRUE(unowned_delegate->fired()); } @@ -168,9 +169,10 @@ TEST_F(EnvoyQuicAlarmTest, UpdateAlarmWithPastDeadline) { advanceMsAndLoop(9); EXPECT_EQ(9, (clock_.Now() - quic::QuicTime::Zero()).ToMilliseconds()); EXPECT_FALSE(unowned_delegate->fired()); - // alarm becomes active upon Update(). + // Alarm will be active 1ms after Update() for the purpose of avoiding firing + // in the same event loop. alarm->Update(clock_.Now() - QuicTime::Delta::FromMilliseconds(1), quic::QuicTime::Delta::Zero()); - dispatcher_->run(Dispatcher::RunType::NonBlock); + advanceMsAndLoop(1); EXPECT_TRUE(unowned_delegate->fired()); unowned_delegate->set_fired(false); advanceMsAndLoop(1); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index f489a983b43e..3b4f7cbda335 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -16,6 +16,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "envoy/stats/stats_macros.h" #include "test/mocks/event/mocks.h" @@ -62,9 +63,9 @@ class TestQuicCryptoClientStream : public quic::QuicCryptoClientStream { TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, std::unique_ptr verify_context, quic::QuicCryptoClientConfig* crypto_config, - ProofHandler* proof_handler) + ProofHandler* proof_handler, bool has_application_state) : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config, - proof_handler) {} + proof_handler, has_application_state) {} bool encryption_established() const override { return true; } }; @@ -84,7 +85,7 @@ class TestEnvoyQuicClientSession : public EnvoyQuicClientSession { std::unique_ptr CreateQuicCryptoStream() override { return std::make_unique( server_id(), this, crypto_config()->proof_verifier()->CreateDefaultContext(), - crypto_config(), this); + crypto_config(), this, true); } }; @@ -122,6 +123,8 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { void SetUp() override { envoy_quic_session_.Initialize(); + setQuicConfigWithDefaultValues(envoy_quic_session_.config()); + envoy_quic_session_.OnConfigNegotiated(); envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_); envoy_quic_session_.setConnectionStats( {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr}); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 99a2931d933d..583a1ee8521c 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -38,12 +38,13 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { createConnectionSocket(peer_addr_, self_addr_, nullptr))), quic_session_(quic_config_, {quic_version_}, quic_connection_, *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), - stream_id_(quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u), + stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}} { quic_stream_->setResponseDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); + EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false)); EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, quic::StreamSendingState state, bool, @@ -59,6 +60,8 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); + setQuicConfigWithDefaultValues(quic_session_.config()); + quic_session_.OnConfigNegotiated(); quic_connection_->setUpConnectionSocket(); response_headers_.OnHeaderBlockStart(); response_headers_.OnHeader(":status", "200"); @@ -67,7 +70,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { trailers_.OnHeaderBlockStart(); trailers_.OnHeader("key1", "value1"); - if (quic_version_.transport_version != quic::QUIC_VERSION_IETF_DRAFT_27) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { // ":final-offset" is required and stripped off by quic. trailers_.OnHeader(":final-offset", absl::StrCat("", response_body_.length())); } @@ -135,7 +138,7 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ(0, buffer.length()); })); std::string data = response_body_; - if (quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27) { + if (quic::VersionUsesHttp3(quic_version_.transport_version)) { std::unique_ptr data_buffer; quic::QuicByteCount data_frame_header_length = quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer); @@ -173,7 +176,7 @@ TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_); std::string data = response_body_; - if (quic_version_.transport_version == quic::QUIC_VERSION_IETF_DRAFT_27) { + if (quic::VersionUsesHttp3(quic_version_.transport_version)) { std::unique_ptr data_buffer; quic::QuicByteCount data_frame_header_length = quic::HttpEncoder::SerializeDataFrameHeader(response_body_.length(), &data_buffer); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index f6357feb98be..43cd3852ecbe 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -9,6 +9,7 @@ #include "quiche/quic/core/quic_dispatcher.h" #include "quiche/quic/test_tools/quic_dispatcher_peer.h" #include "quiche/quic/test_tools/crypto_test_utils.h" + #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/common/platform/api/quiche_text_utils.h" #pragma GCC diagnostic pop @@ -25,7 +26,8 @@ #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/transport_sockets/well_known_names.h" @@ -44,20 +46,23 @@ namespace { const size_t kNumSessionsToCreatePerLoopForTests = 16; } -class EnvoyQuicDispatcherTest : public testing::TestWithParam, +class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, protected Logger::Loggable { public: EnvoyQuicDispatcherTest() - : version_(GetParam()), api_(Api::createApiForTest(time_system_)), + : version_(GetParam().first), api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), listen_socket_(std::make_unique>>( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, /*bind*/ true)), connection_helper_(*dispatcher_), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), - std::make_unique(), - quic::KeyExchangeSource::Default()), - version_manager_(quic::CurrentSupportedVersions()), + std::make_unique(), quic::KeyExchangeSource::Default()), + version_manager_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); + return quic::CurrentSupportedVersions(); + }()), + quic_version_(quic::CurrentSupportedVersions()[0]), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), POOL_HISTOGRAM(listener_config_.listenerScope()))}), @@ -95,40 +100,23 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParamrun(Event::Dispatcher::RunType::NonBlock); } - // TODO(bencebeky): Factor out parts common with - // ActiveQuicListenerTest::SendFullCHLO() to test_utils. std::unique_ptr - createFullChloPacket(quic::QuicSocketAddress client_address) { + createChloReceivedPacket(quic::QuicSocketAddress client_address) { EnvoyQuicClock clock(*dispatcher_); - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - &clock, quic::AllSupportedVersions()[0].transport_version, &crypto_config_); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - chlo.SetStringPiece(quic::kSNI, "www.abc.com"); - quic::CryptoHandshakeMessage full_chlo; - quic::QuicReferenceCountedPointer signed_config( - new quic::QuicSignedServerConfig); - quic::QuicCompressedCertsCache cache( - quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); - quic::test::crypto_test_utils::GenerateFullCHLO( - chlo, &crypto_config_, + Buffer::OwnedImpl payload = generateChloPacketToSend( + quic_version_, quic_config_, crypto_config_, connection_id_, clock, envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), client_address, - quic::AllSupportedVersions()[0].transport_version, &clock, signed_config, &cache, - &full_chlo); - // Overwrite version label to highest current supported version. - full_chlo.SetVersion(quic::kVER, quic::CurrentSupportedVersions()[0]); - quic::QuicConfig quic_config; - quic_config.ToHandshakeMessage(&full_chlo, - quic::CurrentSupportedVersions()[0].transport_version); - - std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - std::unique_ptr encrypted_packet( - quic::test::ConstructEncryptedPacket(connection_id_, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content)); + "test.example.org"); + Buffer::RawSliceVector slice = payload.getRawSlices(); + ASSERT(slice.size() == 1); + auto encrypted_packet = std::make_unique( + static_cast(slice[0].mem_), slice[0].len_); return std::unique_ptr( quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); } + bool quicVersionUsesHttp3() { return quic::VersionUsesHttp3(quic_version_.transport_version); } + protected: Network::Address::IpVersion version_; Event::SimulatedTimeSystemHelper time_system_; @@ -139,7 +127,7 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParam listener_config_; Server::ListenerStats listener_stats_; Server::PerHandlerListenerStats per_worker_stats_; @@ -148,9 +136,8 @@ class EnvoyQuicDispatcherTest : public testing::TestWithParam received_packet = createFullChloPacket(peer_addr); + std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); envoy_quic_dispatcher_.ProcessPacket( envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, *received_packet); @@ -219,7 +209,7 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponCHLO) { EXPECT_TRUE( envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("www.abc.com", read_filter->callbacks_->connection().requestedServerName()); + EXPECT_EQ("test.example.org", read_filter->callbacks_->connection().requestedServerName()); EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( read_filter->callbacks_->connection().remoteAddress())); EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); @@ -270,8 +260,9 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { EXPECT_CALL(*read_filter, onNewConnection()) // Stop iteration to avoid calling getRead/WriteBuffer(). .WillOnce(Return(Network::FilterStatus::StopIteration)); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); - + if (!quicVersionUsesHttp3()) { + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); + } quic::QuicBufferedPacketStore* buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); EXPECT_FALSE(buffered_packets->HasChlosBuffered()); @@ -279,7 +270,7 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { // Incoming CHLO packet is buffered, because ProcessPacket() is called before // ProcessBufferedChlos(). - std::unique_ptr received_packet = createFullChloPacket(peer_addr); + std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); envoy_quic_dispatcher_.ProcessPacket( envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, *received_packet); @@ -296,7 +287,7 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { EXPECT_TRUE( envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("www.abc.com", read_filter->callbacks_->connection().requestedServerName()); + EXPECT_EQ("test.example.org", read_filter->callbacks_->connection().requestedServerName()); EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( read_filter->callbacks_->connection().remoteAddress())); EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); @@ -318,7 +309,7 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToMissingFilterChain) { EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); return nullptr; })); - std::unique_ptr received_packet = createFullChloPacket(peer_addr); + std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); envoy_quic_dispatcher_.ProcessPacket( envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, @@ -350,7 +341,7 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToEmptyFilterChain) { std::vector filter_factory; EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - std::unique_ptr received_packet = createFullChloPacket(peer_addr); + std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); envoy_quic_dispatcher_.ProcessPacket( envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc index d25e190f5a7a..965b090fbf13 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc @@ -1,11 +1,21 @@ +#include #include #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" #include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +#include "test/mocks/network/mocks.h" +#include "test/mocks/ssl/mocks.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "quiche/quic/test_tools/test_certificates.h" + +using testing::Invoke; +using testing::Return; +using testing::ReturnRef; namespace Envoy { @@ -13,62 +23,93 @@ namespace Quic { class TestGetProofCallback : public quic::ProofSource::Callback { public: - TestGetProofCallback(bool& called, std::string signature, std::string leaf_cert_scts, - std::vector certs) - : called_(called), expected_signature_(std::move(signature)), - expected_leaf_certs_scts_(std::move(leaf_cert_scts)), expected_certs_(std::move(certs)) {} + TestGetProofCallback(bool& called, std::string leaf_cert_scts, const absl::string_view cert) + : called_(called), expected_leaf_certs_scts_(std::move(leaf_cert_scts)), + expected_leaf_cert_(cert) {} // quic::ProofSource::Callback void Run(bool ok, const quic::QuicReferenceCountedPointer& chain, const quic::QuicCryptoProof& proof, std::unique_ptr details) override { EXPECT_TRUE(ok); - EXPECT_EQ(expected_signature_, proof.signature); EXPECT_EQ(expected_leaf_certs_scts_, proof.leaf_cert_scts); - EXPECT_EQ(expected_certs_, chain->certs); + EXPECT_EQ(2, chain->certs.size()); + EXPECT_EQ(expected_leaf_cert_, chain->certs[0]); EXPECT_EQ(nullptr, details); called_ = true; } private: bool& called_; - std::string expected_signature_; std::string expected_leaf_certs_scts_; - std::vector expected_certs_; + absl::string_view expected_leaf_cert_; }; -class EnvoyQuicFakeProofSourceTest : public ::testing::Test { +class EnvoyQuicProofSourceTest : public ::testing::Test { +public: + EnvoyQuicProofSourceTest() + : server_address_(quic::QuicIpAddress::Loopback4(), 12345), + client_address_(quic::QuicIpAddress::Loopback4(), 54321), + listen_socket_(std::make_shared()), + proof_source_(listen_socket_, filter_chain_manager_) {} + protected: std::string hostname_{"www.fake.com"}; quic::QuicSocketAddress server_address_; + quic::QuicSocketAddress client_address_; quic::QuicTransportVersion version_{quic::QUIC_VERSION_UNSUPPORTED}; quiche::QuicheStringPiece chlo_hash_{""}; std::string server_config_{"Server Config"}; - std::vector expected_certs_{"Fake cert"}; - std::string expected_signature_{absl::StrCat("Fake signature for { ", server_config_, " }")}; - EnvoyQuicFakeProofSource proof_source_; + std::string expected_certs_{quic::test::kTestCertificateChainPem}; + std::string pkey_{quic::test::kTestCertificatePrivateKeyPem}; + Network::MockFilterChain filter_chain_; + Network::MockFilterChainManager filter_chain_manager_; + std::shared_ptr listen_socket_; + EnvoyQuicProofSource proof_source_; EnvoyQuicFakeProofVerifier proof_verifier_; }; -TEST_F(EnvoyQuicFakeProofSourceTest, TestGetProof) { +TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { bool called = false; - auto callback = std::make_unique(called, expected_signature_, - "Fake timestamp", expected_certs_); - proof_source_.GetProof(server_address_, hostname_, server_config_, version_, chlo_hash_, - std::move(callback)); + auto callback = std::make_unique(called, "Fake timestamp", + quic::test::kTestCertificate); + EXPECT_CALL(*listen_socket_, ioHandle()).Times(2); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), + *connection_socket.localAddress()); + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), + *connection_socket.remoteAddress()); + EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, + connection_socket.detectedTransportProtocol()); + EXPECT_EQ("h2", connection_socket.requestedApplicationProtocols()[0]); + return &filter_chain_; + })); + auto server_context_config = std::make_unique(); + auto server_context_config_ptr = server_context_config.get(); + QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config)); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory)); + + Ssl::MockTlsCertificateConfig tls_cert_config; + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config)}; + EXPECT_CALL(*server_context_config_ptr, tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + EXPECT_CALL(tls_cert_config, certificateChain()).WillOnce(ReturnRef(expected_certs_)); + EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(pkey_)); + proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, + chlo_hash_, std::move(callback)); EXPECT_TRUE(called); -} -TEST_F(EnvoyQuicFakeProofSourceTest, TestVerifyProof) { EXPECT_EQ(quic::QUIC_SUCCESS, proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - expected_certs_, "", expected_signature_, nullptr, nullptr, + {"Fake cert"}, "", "fake signature", nullptr, nullptr, nullptr, nullptr)); - std::vector wrong_certs{"wrong cert"}; EXPECT_EQ(quic::QUIC_FAILURE, proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - wrong_certs, "Fake timestamp", expected_signature_, nullptr, - nullptr, nullptr, nullptr)); + {"Fake cert", "Unexpected cert"}, "Fake timestamp", + "fake signature", nullptr, nullptr, nullptr, nullptr)); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 188603a527e3..71fc30c1b857 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -9,7 +9,6 @@ #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/core/quic_versions.h" #include "quiche/quic/test_tools/crypto_test_utils.h" -#include "quiche/quic/test_tools/quic_config_peer.h" #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_server_session_base_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" @@ -25,7 +24,8 @@ #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_proof_source.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" #include "extensions/transport_sockets/well_known_names.h" #include "envoy/stats/stats_macros.h" @@ -113,8 +113,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { connection_helper_, alarm_factory_, writer_, quic_version_, listener_config_, listener_stats_, *listener_config_.socket_)), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), - std::make_unique(), - quic::KeyExchangeSource::Default()), + std::make_unique(), quic::KeyExchangeSource::Default()), envoy_quic_session_(quic_config_, quic_version_, std::unique_ptr(quic_connection_), /*visitor=*/nullptr, &crypto_stream_helper_, &crypto_config_, @@ -138,7 +137,22 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { ON_CALL(crypto_stream_helper_, CanAcceptClientHello(_, _, _, _, _)).WillByDefault(Return(true)); } - void SetUp() override { envoy_quic_session_.Initialize(); } + void SetUp() override { + envoy_quic_session_.Initialize(); + setQuicConfigWithDefaultValues(envoy_quic_session_.config()); + envoy_quic_session_.OnConfigNegotiated(); + + // Switch to a encryption forward secure crypto stream. + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); + quic::test::QuicServerSessionBasePeer::SetCryptoStream( + &envoy_quic_session_, + new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, + &envoy_quic_session_, &crypto_stream_helper_)); + quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + quic_connection_->SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_SERVER)); + } bool installReadFilter() { // Setup read filter. @@ -171,7 +185,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { return request_decoder; })); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; return envoy_quic_session_.GetOrCreateStream(stream_id); } @@ -223,7 +237,7 @@ TEST_P(EnvoyQuicServerSessionTest, NewStream) { EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) .WillOnce(testing::ReturnRef(request_decoder)); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; auto stream = reinterpret_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); // Receive a GET request on created stream. @@ -251,12 +265,15 @@ TEST_P(EnvoyQuicServerSessionTest, InvalidIncomingStreamId) { Http::MockStreamCallbacks stream_callbacks; // IETF stream 5 and G-Quic stream 2 are server initiated. quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 5u : 2u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; std::string data("aaaa"); quic::QuicStreamFrame stream_frame(stream_id, false, 0, data); EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_INVALID_STREAM_ID, - "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket((quic::VersionUsesHttp3(quic_version_[0].transport_version) + ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION + : quic::QUIC_INVALID_STREAM_ID), + "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); envoy_quic_session_.OnStreamFrame(stream_frame); @@ -268,10 +285,13 @@ TEST_P(EnvoyQuicServerSessionTest, NoNewStreamForInvalidIncomingStream) { Http::MockStreamCallbacks stream_callbacks; // IETF stream 5 and G-Quic stream 2 are server initiated. quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 5u : 2u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_INVALID_STREAM_ID, - "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket(quic::VersionUsesHttp3(quic_version_[0].transport_version) + ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION + : quic::QUIC_INVALID_STREAM_ID, + "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); // Stream creation on closed connection should fail. @@ -286,13 +306,14 @@ TEST_P(EnvoyQuicServerSessionTest, OnResetFrame) { quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream1->id(), quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _)); - if (quic_version_[0].transport_version < quic::QUIC_VERSION_IETF_DRAFT_27) { + if (!quic::VersionUsesHttp3(quic_version_[0].transport_version)) { EXPECT_CALL(*quic_connection_, SendControlFrame(_)) .WillOnce(Invoke([stream_id = stream1->id()](const quic::QuicFrame& frame) { EXPECT_EQ(stream_id, frame.rst_stream_frame->stream_id); EXPECT_EQ(quic::QUIC_RST_ACKNOWLEDGEMENT, frame.rst_stream_frame->error_code); return false; })); + } else { } stream1->OnStreamReset(rst1); @@ -382,16 +403,6 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) { // timer. TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { installReadFilter(); - // Switch to a encryption forward secure crypto stream. - quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); - quic::test::QuicServerSessionBasePeer::SetCryptoStream( - &envoy_quic_session_, - new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, - &envoy_quic_session_, &crypto_stream_helper_)); - quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); - quic_connection_->SetEncrypter( - quic::ENCRYPTION_FORWARD_SECURE, - std::make_unique(quic::Perspective::IS_SERVER)); // Drive congestion control manually. auto send_algorithm = new testing::NiceMock; quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); @@ -693,17 +704,18 @@ TEST_P(EnvoyQuicServerSessionTest, ShutdownNotice) { TEST_P(EnvoyQuicServerSessionTest, GoAway) { installReadFilter(); - EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + testing::NiceMock debug_visitor; + envoy_quic_session_.set_debug_visitor(&debug_visitor); + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); + } else { + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + } http_connection_->goAway(); } TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { - // Generate a CHLO packet. - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - connection_helper_.GetClock(), quic::CurrentSupportedVersions()[0].transport_version, - &crypto_config_); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - std::string packet_content(chlo.GetSerialized().AsStringPiece()); + std::string packet_content("random payload"); auto encrypted_packet = std::unique_ptr(quic::test::ConstructEncryptedPacket( quic_connection_->connection_id(), quic::EmptyQuicConnectionId(), /*version_flag=*/true, @@ -746,13 +758,12 @@ TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); return true; })); - // A reject should be sent because of the inchoate CHLO. - EXPECT_CALL(writer_, WritePacket(_, _, _, _, _)) - .WillOnce(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1))); + // Connection should be closed because this packet has invalid payload. + EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(_, _)); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); quic_connection_->ProcessUdpPacket(self_address, quic_connection_->peer_address(), *packet); - EXPECT_TRUE(quic_connection_->connected()); + EXPECT_FALSE(quic_connection_->connected()); EXPECT_EQ(nullptr, envoy_quic_session_.socketOptions()); - EXPECT_FALSE(envoy_quic_session_.IsEncryptionEstablished()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().isOpen()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().close().ok()); EXPECT_FALSE(quic_connection_->connectionSocket()->ioHandle().isOpen()); @@ -799,7 +810,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { return request_decoder; })); quic::QuicStreamId stream_id = - quic_version_[0].transport_version == quic::QUIC_VERSION_IETF_DRAFT_27 ? 4u : 5u; + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; auto stream1 = dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index ab792b1e7024..475ce45ae8a3 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -49,6 +49,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_stream_->setRequestDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); + EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false)); EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, quic::StreamSendingState state, bool, @@ -64,6 +65,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); + setQuicConfigWithDefaultValues(quic_session_.config()); + quic_session_.OnConfigNegotiated(); request_headers_.OnHeaderBlockStart(); request_headers_.OnHeader(":authority", host_); request_headers_.OnHeader(":method", "POST"); diff --git a/test/extensions/quic_listeners/quiche/integration/BUILD b/test/extensions/quic_listeners/quiche/integration/BUILD index bb473cd6effc..a36af5d08dee 100644 --- a/test/extensions/quic_listeners/quiche/integration/BUILD +++ b/test/extensions/quic_listeners/quiche/integration/BUILD @@ -10,6 +10,7 @@ envoy_package() envoy_cc_test( name = "quic_http_integration_test", + size = "medium", srcs = ["quic_http_integration_test.cc"], data = ["//test/config/integration/certs"], # Skipping as quiche quic_stream_send_buffer.cc does not currently compile on Windows @@ -28,10 +29,11 @@ envoy_cc_test( "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", "//source/extensions/resource_monitors/injected_resource:config", "//test/extensions/quic_listeners/quiche:quic_test_utils_for_envoy_lib", + "//test/extensions/quic_listeners/quiche:test_utils_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index 7c701372ff21..094a5c73c04f 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -3,7 +3,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/overload/v3/overload.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" #include "test/config/utility.h" #include "test/integration/http_integration.h" @@ -28,6 +28,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "test/extensions/quic_listeners/quiche/test_utils.h" namespace Envoy { namespace Quic { @@ -43,13 +44,15 @@ class CodecClientCallbacksForTest : public Http::CodecClientCallbacks { Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset}; }; -class QuicHttpIntegrationTest : public HttpIntegrationTest, - public testing::TestWithParam { +class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVersionTest { public: QuicHttpIntegrationTest() - : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam(), + : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam().first, ConfigHelper::quicHttpProxyConfig()), - supported_versions_(quic::CurrentSupportedVersions()), + supported_versions_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); + return quic::CurrentSupportedVersions(); + }()), crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), injected_resource_filename_(TestEnvironment::temporaryPath("injected_resource")), @@ -107,12 +110,15 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - ConfigHelper::initializeTls({}, *tls_context.mutable_common_tls_context()); + envoy::extensions::transport_sockets::quic::v3::QuicDownstreamTransport + quic_transport_socket_config; + auto tls_context = quic_transport_socket_config.mutable_downstream_tls_context(); + ConfigHelper::initializeTls(ConfigHelper::ServerSslOptions().setRsaCert(true).setTlsV13(true), + *tls_context->mutable_common_tls_context()); auto* filter_chain = bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); auto* transport_socket = filter_chain->mutable_transport_socket(); - transport_socket->mutable_typed_config()->PackFrom(tls_context); + transport_socket->mutable_typed_config()->PackFrom(quic_transport_socket_config); bootstrap.mutable_static_resources()->mutable_listeners(0)->set_reuse_port(set_reuse_port_); @@ -176,9 +182,8 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, AtomicFileUpdater file_updater_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, QuicHttpIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest, + testing::ValuesIn(generateTestParam()), testParamsToString); TEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) { testRouterHeaderOnlyRequestAndResponse(); @@ -285,13 +290,13 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersWithBPF) { cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); } else { test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); } for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForGaugeEq( fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i), 1u); test_server_->waitForCounterEq( @@ -328,7 +333,7 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq("listener.0.0.0.0_0.downstream_cx_total", 8u); } else { test_server_->waitForCounterEq("listener.[__]_0.downstream_cx_total", 8u); @@ -336,7 +341,7 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { // Even without BPF support, these connections should more or less distributed // across different workers. for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam() == Network::Address::IpVersion::v4) { + if (GetParam().first == Network::Address::IpVersion::v4) { EXPECT_LT( test_server_->gauge(fmt::format("listener.0.0.0.0_0.worker_{}.downstream_cx_active", i)) ->value(), diff --git a/test/extensions/quic_listeners/quiche/test_proof_source.h b/test/extensions/quic_listeners/quiche/test_proof_source.h new file mode 100644 index 000000000000..3df1aec6b23a --- /dev/null +++ b/test/extensions/quic_listeners/quiche/test_proof_source.h @@ -0,0 +1,48 @@ +#ifdef __GNUC__ +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#pragma GCC diagnostic ignored "-Wtype-limits" +#include "quiche/quic/test_tools/test_certificates.h" + +#pragma GCC diagnostic pop +#else +#include "quiche/quic/test_tools/test_certificates.h" +#endif + +#include +#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" + +namespace Envoy { +namespace Quic { + +// A test ProofSource which always provide a hard-coded test certificate in +// QUICHE and a fake signature. +class TestProofSource : public Quic::EnvoyQuicFakeProofSource { +public: + quic::QuicReferenceCountedPointer + GetCertChain(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/) override { + return cert_chain_; + } + + void + ComputeTlsSignature(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override { + callback->Run(true, absl::StrCat("Fake signature for { ", in, " }"), nullptr); + } + +private: + quic::QuicReferenceCountedPointer cert_chain_{ + new quic::ProofSource::Chain( + std::vector{std::string(quic::test::kTestCertificate)})}; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h index 3e07ec1a5c1a..684d96881518 100644 --- a/test/extensions/quic_listeners/quiche/test_utils.h +++ b/test/extensions/quic_listeners/quiche/test_utils.h @@ -9,11 +9,15 @@ #include "quiche/quic/core/http/quic_spdy_session.h" #include "quiche/quic/core/http/quic_spdy_client_session.h" #include "quiche/quic/test_tools/quic_test_utils.h" +#include "quiche/quic/test_tools/first_flight.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/test_tools/crypto_test_utils.h" - +#include "quiche/quic/test_tools/quic_config_peer.h" #pragma GCC diagnostic pop +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "test/test_common/environment.h" + namespace Envoy { namespace Quic { @@ -40,6 +44,7 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, quiche::QuicheOptional level)); + MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); absl::string_view requestedServerName() const override { return {GetCryptoStream()->crypto_negotiated_params().sni}; @@ -83,6 +88,7 @@ class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, quiche::QuicheOptional level)); + MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); absl::string_view requestedServerName() const override { return {GetCryptoStream()->crypto_negotiated_params().sni}; @@ -97,5 +103,79 @@ class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, quic::QuicCryptoClientConfig crypto_config_; }; +Buffer::OwnedImpl +generateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic_config, + quic::QuicCryptoServerConfig& crypto_config, + quic::QuicConnectionId connection_id, quic::QuicClock& clock, + const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, std::string sni) { + if (quic::VersionUsesHttp3(quic_version.transport_version)) { + std::unique_ptr packet = + std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]); + return Buffer::OwnedImpl(packet->data(), packet->length()); + } + + quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( + &clock, quic_version.transport_version, &crypto_config); + chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); + chlo.SetStringPiece(quic::kSNI, sni); + quic::CryptoHandshakeMessage full_chlo; + quic::QuicReferenceCountedPointer signed_config( + new quic::QuicSignedServerConfig); + quic::QuicCompressedCertsCache cache( + quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); + quic::test::crypto_test_utils::GenerateFullCHLO(chlo, &crypto_config, server_address, + client_address, quic_version.transport_version, + &clock, signed_config, &cache, &full_chlo); + // Overwrite version label to the version passed in. + full_chlo.SetVersion(quic::kVER, quic_version); + quic::QuicConfig quic_config_tmp; + quic_config_tmp.ToHandshakeMessage(&full_chlo, quic_version.transport_version); + + std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); + auto encrypted_packet = std::unique_ptr( + quic::test::ConstructEncryptedPacket(connection_id, quic::EmptyQuicConnectionId(), + /*version_flag=*/true, /*reset_flag*/ false, + /*packet_number=*/1, packet_content)); + + return Buffer::OwnedImpl(encrypted_packet->data(), encrypted_packet->length()); +} + +void setQuicConfigWithDefaultValues(quic::QuicConfig* config) { + quic::test::QuicConfigPeer::SetReceivedMaxBidirectionalStreams( + config, quic::kDefaultMaxStreamsPerConnection); + quic::test::QuicConfigPeer::SetReceivedMaxUnidirectionalStreams( + config, quic::kDefaultMaxStreamsPerConnection); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional( + config, quic::kMinimumFlowControlSendWindow); + quic::test::QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow( + config, quic::kMinimumFlowControlSendWindow); +} + +// A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation. +class QuicMultiVersionTest + : public testing::TestWithParam> {}; + +std::vector> generateTestParam() { + std::vector> param; + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + for (bool use_http3 : {true, false}) { + param.emplace_back(ip_version, use_http3); + } + } + + return param; +} + +std::string testParamsToString( + const ::testing::TestParamInfo>& params) { + std::string ip_version = params.param.first == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6"; + return absl::StrCat(ip_version, params.param.second ? "_UseHttp3" : "_UseGQuic"); +} + } // namespace Quic } // namespace Envoy diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index 3aed0577db6e..825822992a7c 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -114,6 +114,20 @@ class MockServerContextConfig : public ServerContextConfig { MOCK_METHOD(bool, disableStatelessSessionResumption, (), (const)); }; +class MockTlsCertificateConfig : public TlsCertificateConfig { +public: + MockTlsCertificateConfig() = default; + ~MockTlsCertificateConfig() override = default; + + MOCK_METHOD(const std::string&, certificateChain, (), (const)); + MOCK_METHOD(const std::string&, certificateChainPath, (), (const)); + MOCK_METHOD(const std::string&, privateKey, (), (const)); + MOCK_METHOD(const std::string&, privateKeyPath, (), (const)); + MOCK_METHOD(const std::string&, password, (), (const)); + MOCK_METHOD(const std::string&, passwordPath, (), (const)); + MOCK_METHOD(Envoy::Ssl::PrivateKeyMethodProviderSharedPtr, privateKeyMethod, (), (const)); +}; + class MockPrivateKeyMethodManager : public PrivateKeyMethodManager { public: MockPrivateKeyMethodManager(); diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 296ba96b4107..b54079590eef 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -26,19 +26,21 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { filters: [] transport_socket: name: envoy.transport_sockets.quic - config: - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - match_subject_alt_names: - - exact: localhost - - exact: 127.0.0.1 + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport + downstream_tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + - exact: localhost + - exact: 127.0.0.1 reuse_port: true udp_listener_config: udp_listener_name: "quiche_quic_listener" From 1f648d8653e1287eefad801cf32540c7bca5a0fa Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 10 Jun 2020 12:09:49 -0400 Subject: [PATCH 329/909] http: rejecting CONNECT requests with bodies (#11530) Risk Level: Low (connect only) Testing: fixed up tests Docs Changes: no Release Notes: no Runtime guard: no - connect in Alpha Part of #1451 Signed-off-by: Alyssa Wilk --- source/common/http/http1/codec_impl.cc | 19 +++++++++++++-- test/common/http/http1/codec_impl_test.cc | 29 ++++++++++++++++------- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 2ae945658f61..f3072449a813 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -34,6 +34,7 @@ struct Http1ResponseCodeDetailValues { const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; const absl::string_view InvalidUrl = "http1.invalid_url"; const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; + const absl::string_view BodyDisallowed = "http1.body_disallowed"; }; struct Http1HeaderTypesValues { @@ -660,16 +661,30 @@ int ConnectionImpl::onHeadersCompleteBase() { } } if (parser_.method == HTTP_CONNECT) { + if (request_or_response_headers.ContentLength()) { + if (request_or_response_headers.getContentLengthValue() == "0") { + request_or_response_headers.removeContentLength(); + } else { + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); + throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); + } + } ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); handling_upgrade_ = true; } // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject // transfer-codings it does not understand. + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. if (request_or_response_headers.TransferEncoding()) { const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); - if (reject_unsupported_transfer_encodings_ && - !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) { + if ((reject_unsupported_transfer_encodings_ && + !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) || + parser_.method == HTTP_CONNECT) { error_code_ = Http::Code::NotImplemented; sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index b68fcce0cc76..742817803ee1 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -1717,20 +1717,31 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - // Connect with body is technically illegal, but Envoy does not inspect the - // body to see if there is a non-zero byte chunk. It will instead pass it - // through. - // TODO(alyssawilk) track connect payload and block if this happens. - Buffer::OwnedImpl expected_data("12345abcd"); - EXPECT_CALL(decoder, decodeHeaders_(_, false)); - EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 CONNECT with body has no defined + // semantics: Envoy will reject chunked CONNECT requests. Buffer::OwnedImpl buffer( "CONNECT host:80 HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); auto status = codec_->dispatch(buffer); - EXPECT_TRUE(status.ok()); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); +} + +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { + initialize(); + + InSequence sequence; + NiceMock decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + // Make sure we avoid the deferred_end_stream_headers_ optimization for + // requests-with-no-body. + Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 1\r\n\r\nabcd"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithContentLength) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { initialize(); InSequence sequence; From 353c21673395ba53fad54b4916cda0ad87ddb695 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Wed, 10 Jun 2020 14:14:24 -0700 Subject: [PATCH 330/909] http1: do not add transfer-encoding: chunked to 1xx/204 responses (#11458) Follow-on to #10811. The previous change would strip the transfer-encoding: chunked header if returned by an upstream during upgrade, but the http/1.1 codec adds the header back. RFC 7230, Section 3.3.1 requires that no such header appear in a 1xx or 204 response. Use runtime feature "envoy.reloadable_features.strict_1xx_and_204_response_headers" to disable. Risk Level: medium Testing: updated tests Docs Changes: n/a Release Notes: updated Signed-off-by: Stephan Zuercher --- docs/root/version_history/current.rst | 1 + source/common/http/http1/codec_impl.cc | 50 +++-- source/common/http/http1/codec_impl.h | 9 +- source/common/runtime/runtime_features.cc | 3 +- test/common/http/http1/codec_impl_test.cc | 171 +++++++++++++++++- .../integration/websocket_integration_test.cc | 5 +- 6 files changed, 220 insertions(+), 19 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index bffadd1cf24b..9f8e831a91a0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -20,6 +20,7 @@ Minor Behavior Changes Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. * http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. * http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. +* http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index f3072449a813..7fe0a6259092 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -35,6 +35,8 @@ struct Http1ResponseCodeDetailValues { const absl::string_view InvalidUrl = "http1.invalid_url"; const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; const absl::string_view BodyDisallowed = "http1.body_disallowed"; + const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; + const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; }; struct Http1HeaderTypesValues { @@ -69,7 +71,7 @@ StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), processing_100_continue_(false), is_response_to_head_request_(false), - is_response_to_connect_request_(false), is_content_length_allowed_(true), + is_response_to_connect_request_(false), is_1xx_(false), is_204_(false), header_key_formatter_(header_key_formatter) { if (connection_.connection().aboveHighWatermark()) { runHighWatermarkCallbacks(); @@ -159,15 +161,22 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // response to a HEAD request. // For 204s and 1xx where content length is disallowed, don't append the content length but // also don't chunk encode. - if (is_content_length_allowed_) { + if (!is_1xx_ && !is_204_) { encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); } chunk_encoding_ = false; } else if (connection_.protocol() == Protocol::Http10) { chunk_encoding_ = false; + } else if (connection_.strict1xxAnd204Headers() && (is_1xx_ || is_204_)) { + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked + // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 + chunk_encoding_ = false; + + // Assert 1xx (may have content) OR 204 and end stream. + ASSERT(is_1xx_ || end_stream); } else { // For responses to connect requests, do not send the chunked encoding header: - // https://tools.ietf.org/html/rfc7231#section-4.3.6 + // https://tools.ietf.org/html/rfc7231#section-4.3.6. if (!is_response_to_connect_request_) { encodeFormattedHeader(Headers::get().TransferEncoding.get(), Headers::get().TransferEncodingValues.Chunked); @@ -352,14 +361,12 @@ void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool e connection_.addCharToBuffer('\r'); connection_.addCharToBuffer('\n'); - if (numeric_status == 204 || numeric_status < 200) { - // Per https://tools.ietf.org/html/rfc7230#section-3.3.2 - setIsContentLengthAllowed(false); - } else { - // Make sure that if we encodeHeaders(100) then encodeHeaders(200) that we - // set is_content_length_allowed_ back to true. - setIsContentLengthAllowed(true); - } + // Enabling handling of https://tools.ietf.org/html/rfc7230#section-3.3.1 and + // https://tools.ietf.org/html/rfc7230#section-3.3.2. Also resets these flags + // if a 100 Continue is followed by another status. + setIs1xx(numeric_status < 200); + setIs204(numeric_status == 204); + if (numeric_status >= 300) { // Don't do special CONNECT logic if the CONNECT was rejected. is_response_to_connect_request_ = false; @@ -459,6 +466,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat enable_trailers_(enable_trailers), reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.reject_unsupported_transfer_encodings")), + strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.strict_1xx_and_204_response_headers")), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, [&]() -> void { this->onAboveHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), @@ -1078,6 +1087,25 @@ int ClientConnectionImpl::onHeadersComplete() { } } + if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { + if (headers->TransferEncoding()) { + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); + } + + if (headers->ContentLength()) { + // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. + if (headers->ContentLength()->value().getStringView() != "0") { + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: content length not allowed in 1xx or 204"); + } + + headers->removeContentLength(); + } + } + if (parser_.status_code == 100) { // http-parser treats 100 continue headers as their own complete response. // Swallow the spurious onMessageComplete and continue processing. diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index dd731982c6c3..5ebc73b8363d 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -95,7 +95,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, protected: StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); - void setIsContentLengthAllowed(bool value) { is_content_length_allowed_ = value; } + void setIs1xx(bool value) { is_1xx_ = value; } + void setIs204(bool value) { is_204_ = value; } void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, bool end_stream); void encodeTrailersBase(const HeaderMap& headers); @@ -109,7 +110,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, bool processing_100_continue_ : 1; bool is_response_to_head_request_ : 1; bool is_response_to_connect_request_ : 1; - bool is_content_length_allowed_ : 1; + bool is_1xx_ : 1; + bool is_204_ : 1; private: /** @@ -236,6 +238,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\nConnection: upgrade\r\nUpgrade: foo\r\n\r\n"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0U, buffer.length()); + + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + + TestResponseHeaderMapImpl headers{{":status", "101"}}; + response_encoder->encodeHeaders(headers, false); + EXPECT_EQ("HTTP/1.1 101 Switching Protocols\r\n\r\n", output); +} + TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); @@ -1956,11 +1981,116 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { request_encoder.encodeHeaders(headers, true); EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); - Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\n\r\n"); auto status = codec_->dispatch(response); EXPECT_TRUE(status.ok()); } +// 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2. +TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { + // By default, content-length is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: content-length allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 20\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +// 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we +// allow it. +TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 0\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } + + // Test with feature disabled: content-length allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, true)); + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nContent-Length: 0\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + +// 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. +TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { + // By default, transfer-encoding is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: transfer-encoding allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response("HTTP/1.1 204 OK\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + TEST_F(Http1ClientConnectionImplTest, 100Response) { initialize(); @@ -1976,11 +2106,48 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); - Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\nContent-Length: 20\r\n\r\n"); + Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n"); status = codec_->dispatch(response); EXPECT_TRUE(status.ok()); } +// 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. +TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { + // By default, transfer-encoding is barred. + { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response( + "HTTP/1.1 101 Switching Protocols\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_FALSE(status.ok()); + } + + // Test with feature disabled: transfer-encoding allowed. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.strict_1xx_and_204_response_headers", "false"}}); + + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl response( + "HTTP/1.1 101 Switching Protocols\r\nTransfer-Encoding: chunked\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); + } +} + TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { initialize(); diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index 195eb2ce0bdb..a7e92f4bca6f 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -100,6 +100,8 @@ void WebsocketIntegrationTest::validateUpgradeResponseHeaders( proxied_response_headers.removeDate(); proxied_response_headers.removeServer(); + ASSERT_TRUE(proxied_response_headers.TransferEncoding() == nullptr); + commonValidate(proxied_response_headers, original_response_headers); EXPECT_THAT(&proxied_response_headers, HeaderMapEqualIgnoreOrder(&original_response_headers)); @@ -419,9 +421,6 @@ TEST_P(WebsocketIntegrationTest, BidirectionalChunkedData) { if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { ASSERT_TRUE(upstream_request_->headers().TransferEncoding() != nullptr); } - if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { - ASSERT_TRUE(response_->headers().TransferEncoding() != nullptr); - } // Send both a chunked request body and "websocket" payload. std::string request_payload = "3\r\n123\r\n0\r\n\r\nSomeWebsocketRequestPayload"; From 97a271b863e1f4092cf5af7e297a92c2e7823e1f Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Thu, 11 Jun 2020 08:01:32 +1000 Subject: [PATCH 331/909] health_check filter & README: fix typos (#11513) Signed-off-by: Martin Matusiak --- generated_api_shadow/README.md | 6 +++--- .../filters/http/health_check/health_check_test.cc | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/generated_api_shadow/README.md b/generated_api_shadow/README.md index 23442d8c1752..04633c218a7c 100644 --- a/generated_api_shadow/README.md +++ b/generated_api_shadow/README.md @@ -1,6 +1,6 @@ This directory is for generated Envoy internal artifacts (via `proto_format`). -Do not hand edit any file under `envoy/`. This shadow API may only be used be -used in the Envoy source tree. +Do not hand edit any file under `envoy/`. This shadow API may only be used in +the Envoy source tree. -The `bazel/` tree is an symlink back to the official API Bazel rules. +The `bazel/` tree is a symlink back to the official API Bazel rules. diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index 6b1063bf5c82..16393b403359 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -249,9 +249,9 @@ TEST_F(HealthCheckFilterPassThroughTest, Ok) { EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); - Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", service_hc_respnose.getEnvoyUpstreamHealthCheckedClusterValue()); + Http::TestResponseHeaderMapImpl service_hc_response{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true)); + EXPECT_EQ("cluster_name", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { @@ -268,9 +268,9 @@ TEST_F(HealthCheckFilterPassThroughTest, OkWithContinue) { filter_->encode100ContinueHeaders(continue_response)); Http::MetadataMap metadata_map{{"metadata", "metadata"}}; EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(metadata_map)); - Http::TestResponseHeaderMapImpl service_hc_respnose{{":status", "200"}}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_respnose, true)); - EXPECT_EQ("cluster_name", service_hc_respnose.getEnvoyUpstreamHealthCheckedClusterValue()); + Http::TestResponseHeaderMapImpl service_hc_response{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(service_hc_response, true)); + EXPECT_EQ("cluster_name", service_hc_response.getEnvoyUpstreamHealthCheckedClusterValue()); } TEST_F(HealthCheckFilterPassThroughTest, Failed) { From aa0fc9db37674570c22927b4ee4ccf394b1d475f Mon Sep 17 00:00:00 2001 From: asraa Date: Wed, 10 Jun 2020 19:36:20 -0400 Subject: [PATCH 332/909] [fuzz] fix fuzz crashes related to not implemented protos (#11385) Fixes crashes (panic: not reached) due to not implemented protos being fuzzed. - load balancing policy LOAD_BALANCING_POLICY_CONFIG not implemented, causing server_fuzz_test to crash - connect matcher not supported in Jwt authentication filter, but matcher available @qiwzhang Risk Level: Low Testing: Regression testcases added Fixes OSS-fuzz issues - https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=21876 - https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=17762 Signed-off-by: Asra Ali --- api/envoy/config/cluster/v3/cluster.proto | 3 ++- api/envoy/config/cluster/v4alpha/cluster.proto | 3 ++- .../envoy/config/cluster/v3/cluster.proto | 3 ++- .../envoy/config/cluster/v4alpha/cluster.proto | 3 ++- .../filters/http/jwt_authn/matcher.cc | 5 +++++ test/extensions/filters/http/common/fuzz/BUILD | 1 + .../http/common/fuzz/filter_corpus/jwt_connect | 7 +++++++ .../http/common/fuzz/uber_per_filter.cc | 18 +++++++++++++++++- test/server/server_corpus/not_reached | 1 + 9 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect create mode 100644 test/server/server_corpus/not_reached diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 7eb53d84c4f8..5817fb254fad 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -590,7 +590,8 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index eab2f2d80fcb..454a9c163f49 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -591,7 +591,8 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 8140007f68af..298f874a946b 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -588,7 +588,8 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index eab2f2d80fcb..454a9c163f49 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -591,7 +591,8 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 622d73f022ae..ff60faa0431f 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -154,6 +154,11 @@ MatcherConstPtr Matcher::create(const RequirementRule& rule) { case RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case RouteMatch::PathSpecifierCase::kSafeRegex: return std::make_unique(rule); + case RouteMatch::PathSpecifierCase::kConnectMatcher: + // TODO: When CONNECT match support is implemented, remove the manual clean-up of CONNECT + // matching in the filter fuzzer implementation: + // //test/extensions/filters/http/common/fuzz/uber_per_filter.cc + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // path specifier is required. case RouteMatch::PathSpecifierCase::PATH_SPECIFIER_NOT_SET: default: diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index 27f5d59038d5..a31d18651968 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -43,6 +43,7 @@ envoy_cc_test_library( "//test/mocks/server:server_mocks", "//test/proto:bookstore_proto_cc_proto", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect b/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect new file mode 100644 index 000000000000..2b2d00ecda5b --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/jwt_connect @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.jwt_authn" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication" + value: "\022\004\n\002b\000" + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index da4d963164ee..c6db8b6ffebe 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -1,4 +1,5 @@ #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" +#include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" #include "envoy/extensions/filters/http/squash/v3/squash.pb.h" #include "envoy/extensions/filters/http/tap/v3/tap.pb.h" @@ -73,6 +74,16 @@ void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uin mutable_any->set_type_url(type_url); } +void removeConnectMatcher(Protobuf::Message* message) { + envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication& config = + dynamic_cast(*message); + for (auto& rules : *config.mutable_rules()) { + if (rules.match().has_connect_matcher()) { + rules.mutable_match()->set_path("/"); + } + } +} + void cleanAttachmentTemplate(Protobuf::Message* message) { envoy::extensions::filters::http::squash::v3::Squash& config = dynamic_cast(*message); @@ -99,7 +110,8 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, const std::string name = Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName( std::string(filter_name)); // Map filter name to clean-up function. - if (name == HttpFilterNames::get().GrpcJsonTranscoder) { + if (filter_name == HttpFilterNames::get().GrpcJsonTranscoder) { + // Add a valid service proto descriptor. addBookstoreProtoDescriptor(message); } else if (name == HttpFilterNames::get().Squash) { cleanAttachmentTemplate(message); @@ -107,6 +119,10 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, // TapDS oneof field not implemented. cleanTapConfig(message); } + if (filter_name == HttpFilterNames::get().JwtAuthn) { + // Remove when connect matcher is implemented for Jwt Authentication filter. + removeConnectMatcher(message); + } } void UberFilterFuzzer::perFilterSetup() { diff --git a/test/server/server_corpus/not_reached b/test/server/server_corpus/not_reached new file mode 100644 index 000000000000..93b264ade6c9 --- /dev/null +++ b/test/server/server_corpus/not_reached @@ -0,0 +1 @@ +static_resources { clusters { name: " " connect_timeout { nanos: 4 } lb_policy: LOAD_BALANCING_POLICY_CONFIG } } \ No newline at end of file From 8c7df0f08bd86bbeebaee3773b79cf6e3949fe3c Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Wed, 10 Jun 2020 19:46:48 -0400 Subject: [PATCH 333/909] drain manager: Make probabilistic drain configurable (#11403) Add DrainStrategy enum to Options with Graceful and Immediate Disable probabilistic drain in DrainManager if DrainStrategy == Immediate Add integration tests Risk Level: Low. Testing: Integration tests, verify that the race condition from #11240 does not occur if the probabilistic drain is disabled. Signed-off-by: Auni Ahsan --- api/envoy/admin/v3/server_info.proto | 13 ++- api/envoy/admin/v4alpha/server_info.proto | 13 ++- docs/root/operations/cli.rst | 8 ++ docs/root/version_history/current.rst | 1 + .../envoy/admin/v3/server_info.proto | 13 ++- .../envoy/admin/v4alpha/server_info.proto | 13 ++- include/envoy/server/options.h | 37 +++++-- source/server/drain_manager_impl.cc | 8 +- source/server/options_impl.cc | 28 +++++- source/server/options_impl.h | 12 ++- test/integration/integration.cc | 2 +- test/integration/integration.h | 4 + test/integration/protocol_integration_test.cc | 33 ++++++- test/integration/server.cc | 21 ++-- test/integration/server.h | 17 ++-- test/mocks/server/mocks.h | 3 +- test/server/drain_manager_impl_test.cc | 99 ++++++++++++------- test/server/options_impl_test.cc | 11 ++- 18 files changed, 261 insertions(+), 75 deletions(-) diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index a89834bef5e6..7f8ea45650d4 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 33] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -75,6 +75,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -142,6 +150,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto index 04f1f1ef36d7..e3e40ac2eabc 100644 --- a/api/envoy/admin/v4alpha/server_info.proto +++ b/api/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 33] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -74,6 +74,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -141,6 +149,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 8a5d029672b1..df2326ea6800 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -262,6 +262,14 @@ following are the command line options that Envoy supports. desirable to have a very long drain time. In service to service scenarios, it might be possible to make the drain and shutdown time much shorter (e.g., 60s/90s). +.. option:: --drain-strategy + + *(optional)* Determine behaviour of Envoy during the hot restart drain sequence. During the drain sequence, the drain manager encourages draining through terminating connections on request completion, sending "Connection: CLOSE" on HTTP1, and sending GOAWAY on HTTP2. + + * ``gradual``: *(default)* The percentage of requests encouraged to drain increases to 100% as the drain time elapses. + + * ``immediate``: All requests are encouraged to drain as soon as the drain sequence begins. + .. option:: --parent-shutdown-time-s *(optional)* The time in seconds that Envoy will wait before shutting down the parent process diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9f8e831a91a0..465a4767fb75 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -88,6 +88,7 @@ New Features * router: more fine grained internal redirect configs are added to the :ref:`internal_redirect_policy ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. +* server: add the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index a428e4b8ca4a..480ce862c6a5 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 33] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -75,6 +75,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12; // See :option:`--base-id` for details. @@ -140,6 +148,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto index 04f1f1ef36d7..e3e40ac2eabc 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto @@ -54,7 +54,7 @@ message ServerInfo { CommandLineOptions command_line_options = 6; } -// [#next-free-field: 33] +// [#next-free-field: 34] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; @@ -74,6 +74,14 @@ message CommandLineOptions { InitOnly = 2; } + enum DrainStrategy { + // Gradually discourage connections over the course of the drain period. + Gradual = 0; + + // Discourage all connections for the duration of the drain sequence. + Immediate = 1; + } + reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; @@ -141,6 +149,9 @@ message CommandLineOptions { // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; + // See :option:`--drain-strategy` for details. + DrainStrategy drain_strategy = 33; + // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 1bbffc116a91..98ea52e2ce6c 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -42,6 +42,24 @@ enum class Mode { // to be validated in a non-prod environment. }; +/** + * During the drain sequence, different components ask the DrainManager + * whether to drain via drainClose(). This enum dictates the behaviour of + * drainClose() calls. + */ +enum class DrainStrategy { + /** + * The probability of drainClose() returning true increases from 0 to 100% + * over the duration of the drain period. + */ + Gradual, + + /** + * drainClose() will return true as soon as the drain sequence is initiated. + */ + Immediate, +}; + using CommandLineOptionsPtr = std::unique_ptr; /** @@ -76,10 +94,21 @@ class Options { virtual uint32_t concurrency() const PURE; /** - * @return the number of seconds that envoy will perform draining during a hot restart. + * @return the duration of the drain period in seconds. */ virtual std::chrono::seconds drainTime() const PURE; + /** + * @return the strategy that defines behaviour of DrainManager::drainClose(); + */ + virtual DrainStrategy drainStrategy() const PURE; + + /** + * @return the delay before shutting down the parent envoy in a hot restart, + * generally longer than drainTime(). + */ + virtual std::chrono::seconds parentShutdownTime() const PURE; + /** * @return const std::string& the path to the configuration file. */ @@ -154,12 +183,6 @@ class Options { */ virtual const std::string& logPath() const PURE; - /** - * @return the number of seconds that envoy will wait before shutting down the parent envoy during - * a host restart. Generally this will be longer than the drainTime() option. - */ - virtual std::chrono::seconds parentShutdownTime() const PURE; - /** * @return the restart epoch. 0 indicates the first server start, 1 the second, and so on. */ diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index a89912774b50..d9b1e1dcfa8f 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -32,12 +32,18 @@ bool DrainManagerImpl::drainClose() const { return false; } + if (server_.options().drainStrategy() == Server::DrainStrategy::Immediate) { + return true; + } + ASSERT(server_.options().drainStrategy() == Server::DrainStrategy::Gradual); + + // P(return true) = elapsed time / drain timeout + // If the drain deadline is exceeded, skip the probability calculation. const MonotonicTime current_time = server_.dispatcher().timeSource().monotonicTime(); if (current_time >= drain_deadline_) { return true; } - // P(return true) = elapsed time / drain timeout const auto remaining_time = std::chrono::duration_cast(drain_deadline_ - current_time); ASSERT(server_.options().drainTime() >= remaining_time); diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index e7bb267ecd1e..ca58890a481f 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -131,6 +131,10 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg drain_time_s("", "drain-time-s", "Hot restart and LDS removal drain time in seconds", false, 600, "uint32_t", cmd); + TCLAP::ValueArg drain_strategy( + "", "drain-strategy", + "Hot restart drain sequence behaviour, one of 'gradual' (default) or 'immediate'.", false, + "gradual", "string", cmd); TCLAP::ValueArg parent_shutdown_time_s("", "parent-shutdown-time-s", "Hot restart parent shutdown time in seconds", false, 900, "uint32_t", cmd); @@ -262,6 +266,15 @@ OptionsImpl::OptionsImpl(std::vector args, drain_time_ = std::chrono::seconds(drain_time_s.getValue()); parent_shutdown_time_ = std::chrono::seconds(parent_shutdown_time_s.getValue()); + if (drain_strategy.getValue() == "immediate") { + drain_strategy_ = Server::DrainStrategy::Immediate; + } else if (drain_strategy.getValue() == "gradual") { + drain_strategy_ = Server::DrainStrategy::Gradual; + } else { + throw MalformedArgvException( + fmt::format("error: unknown drain-strategy '{}'", mode.getValue())); + } + if (hot_restart_version_option.getValue()) { std::cerr << hot_restart_version_cb(!hot_restart_disabled_); throw NoServingException(); @@ -365,10 +378,15 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { } command_line_options->mutable_file_flush_interval()->MergeFrom( Protobuf::util::TimeUtil::MillisecondsToDuration(fileFlushIntervalMsec().count())); - command_line_options->mutable_parent_shutdown_time()->MergeFrom( - Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count())); + command_line_options->mutable_drain_time()->MergeFrom( Protobuf::util::TimeUtil::SecondsToDuration(drainTime().count())); + command_line_options->set_drain_strategy(drainStrategy() == Server::DrainStrategy::Immediate + ? envoy::admin::v3::CommandLineOptions::Immediate + : envoy::admin::v3::CommandLineOptions::Gradual); + command_line_options->mutable_parent_shutdown_time()->MergeFrom( + Protobuf::util::TimeUtil::SecondsToDuration(parentShutdownTime().count())); + command_line_options->set_disable_hot_restart(hotRestartDisabled()); command_line_options->set_enable_mutex_tracing(mutexTracingEnabled()); command_line_options->set_cpuset_threads(cpusetThreadsEnabled()); @@ -387,9 +405,9 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& log_format_(Logger::Logger::DEFAULT_LOG_FORMAT), log_format_escaped_(false), restart_epoch_(0u), service_cluster_(service_cluster), service_node_(service_node), service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), - parent_shutdown_time_(900), mode_(Server::Mode::Serve), hot_restart_disabled_(false), - signal_handling_enabled_(true), mutex_tracing_enabled_(false), cpuset_threads_(false), - fake_symbol_table_enabled_(false) {} + parent_shutdown_time_(900), drain_strategy_(Server::DrainStrategy::Gradual), + mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), + mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false) {} void OptionsImpl::disableExtensions(const std::vector& names) { for (const auto& name : names) { diff --git a/source/server/options_impl.h b/source/server/options_impl.h index 17217cbe60c4..216fcf9eb548 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -65,12 +65,13 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable>& componentLogLevels() const override { @@ -129,7 +133,6 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable 0 && !defer_listener_finalization_) { diff --git a/test/integration/integration.h b/test/integration/integration.h index 31ee89af7127..d3699cb7efc8 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -436,6 +436,10 @@ class BaseIntegrationTest : protected Logger::Loggable { // The duration of the drain manager graceful drain period. std::chrono::seconds drain_time_{1}; + // The DrainStrategy that dictates the behaviour of + // DrainManagerImpl::drainClose(). + Server::DrainStrategy drain_strategy_{Server::DrainStrategy::Gradual}; + // Member variables for xDS testing. FakeUpstream* xds_upstream_{}; FakeHttpConnectionPtr xds_connection_; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 3bbcf5c9ce8a..9c66e96ad56a 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -275,7 +275,7 @@ name: add-trailers-filter } // Add a health check filter and verify correct behavior when draining. -TEST_P(ProtocolIntegrationTest, DrainClose) { +TEST_P(ProtocolIntegrationTest, DrainCloseGradual) { // The probability of drain close increases over time. With a high timeout, // the probability will be very low, but the rapid retries prevent this from // increasing total test time. @@ -311,6 +311,37 @@ TEST_P(ProtocolIntegrationTest, DrainClose) { } } +TEST_P(ProtocolIntegrationTest, DrainCloseImmediate) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(100); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + // Regression test for https://github.com/envoyproxy/envoy/issues/9873 TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { initialize(); diff --git a/test/integration/server.cc b/test/integration/server.cc index fa3e14af7602..4b7d2beecb2e 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -32,7 +32,8 @@ namespace Server { OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, Network::Address::IpVersion ip_version, FieldValidationConfig validation_config, uint32_t concurrency, - std::chrono::seconds drain_time) { + std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); @@ -41,6 +42,7 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str test_options.setFileFlushIntervalMsec(std::chrono::milliseconds(50)); test_options.setDrainTime(drain_time); test_options.setParentShutdownTime(std::chrono::seconds(2)); + test_options.setDrainStrategy(drain_strategy); test_options.setAllowUnkownFields(validation_config.allow_unknown_static_fields); test_options.setRejectUnknownFieldsDynamic(validation_config.reject_unknown_dynamic_fields); test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields); @@ -58,14 +60,15 @@ IntegrationTestServerPtr IntegrationTestServer::create( std::function on_server_init_function, bool deterministic, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, - uint32_t concurrency, std::chrono::seconds drain_time, bool use_real_stats) { + uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + bool use_real_stats) { IntegrationTestServerPtr server{ std::make_unique(time_system, api, config_path, use_real_stats)}; if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } server->start(version, on_server_init_function, deterministic, defer_listener_finalization, - process_object, validation_config, concurrency, drain_time); + process_object, validation_config, concurrency, drain_time, drain_strategy); return server; } @@ -84,14 +87,15 @@ void IntegrationTestServer::start(const Network::Address::IpVersion version, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validator_config, - uint32_t concurrency, std::chrono::seconds drain_time) { + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); thread_ = api_.threadFactory().createThread([version, deterministic, process_object, validator_config, - concurrency, drain_time, this]() -> void { + concurrency, drain_time, drain_strategy, this]() -> void { threadRoutine(version, deterministic, process_object, validator_config, concurrency, - drain_time); + drain_time, drain_strategy); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -167,9 +171,10 @@ void IntegrationTestServer::serverReady() { void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, - uint32_t concurrency, std::chrono::seconds drain_time) { + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy) { OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, validation_config, - concurrency, drain_time)); + concurrency, drain_time, drain_strategy)); Thread::MutexBasicLockable lock; Runtime::RandomGeneratorPtr random_generator; diff --git a/test/integration/server.h b/test/integration/server.h index 5862cd1f1bbf..155dbe036b86 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -40,11 +40,13 @@ struct FieldValidationConfig { }; // Create OptionsImpl structures suitable for tests. Disables hot restart. -OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, - Network::Address::IpVersion ip_version, - FieldValidationConfig validation_config = FieldValidationConfig(), - uint32_t concurrency = 1, - std::chrono::seconds drain_time = std::chrono::seconds(1)); +OptionsImpl +createTestOptionsImpl(const std::string& config_path, const std::string& config_yaml, + Network::Address::IpVersion ip_version, + FieldValidationConfig validation_config = FieldValidationConfig(), + uint32_t concurrency = 1, + std::chrono::seconds drain_time = std::chrono::seconds(1), + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual); class TestComponentFactory : public ComponentFactory { public: @@ -369,6 +371,7 @@ class IntegrationTestServer : public Logger::Loggable, ProcessObjectOptRef process_object = absl::nullopt, Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual, bool use_real_stats = false); // Note that the derived class is responsible for tearing down the server in its // destructor. @@ -392,7 +395,7 @@ class IntegrationTestServer : public Logger::Loggable, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, - std::chrono::seconds drain_time); + std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy); void waitForCounterEq(const std::string& name, uint64_t value) override { notifyingStatsAllocator().waitForCounterFromStringEq(name, value); @@ -477,7 +480,7 @@ class IntegrationTestServer : public Logger::Loggable, void threadRoutine(const Network::Address::IpVersion version, bool deterministic, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, - std::chrono::seconds drain_time); + std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy); Event::TestTimeSystem& time_system_; Api::Api& api_; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index ac53f79c51ff..34ffef72e615 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -84,13 +84,14 @@ class MockOptions : public Options { MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); + MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); + MOCK_METHOD(Server::DrainStrategy, drainStrategy, (), (const)); MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const)); MOCK_METHOD((const std::vector>&), componentLogLevels, (), (const)); MOCK_METHOD(const std::string&, logFormat, (), (const)); MOCK_METHOD(bool, logFormatEscaped, (), (const)); MOCK_METHOD(const std::string&, logPath, (), (const)); - MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); MOCK_METHOD(uint64_t, restartEpoch, (), (const)); MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const)); MOCK_METHOD(Mode, mode, (), (const)); diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 92a3febcef25..be09ee0ec7fb 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -19,8 +19,9 @@ namespace { constexpr int DrainTimeSeconds(600); -class DrainManagerImplTest : public testing::Test, public Event::TestUsingSimulatedTime { -public: +class DrainManagerImplTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { +protected: DrainManagerImplTest() { ON_CALL(server_.options_, drainTime()) .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); @@ -59,7 +60,19 @@ TEST_F(DrainManagerImplTest, Default) { drain_timer->invokeCallback(); } -TEST_F(DrainManagerImplTest, DrainDeadline) { +TEST_F(DrainManagerImplTest, ModifyOnly) { + InSequence s; + DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY); + + EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit + EXPECT_FALSE(drain_manager.drainClose()); +} + +TEST_P(DrainManagerImplTest, DrainDeadline) { + const bool drain_gradually = GetParam(); + ON_CALL(server_.options_, drainStrategy()) + .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual + : Server::DrainStrategy::Immediate)); // TODO(auni53): Add integration tests for this once TestDrainManager is // removed. DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::DEFAULT); @@ -71,22 +84,39 @@ TEST_F(DrainManagerImplTest, DrainDeadline) { ON_CALL(server_.options_, drainTime()) .WillByDefault(Return(std::chrono::seconds(DrainTimeSeconds))); - // random() should be called when elapsed time < drain timeout - EXPECT_CALL(server_.random_, random()).Times(2); - EXPECT_FALSE(drain_manager.drainClose()); - simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); - EXPECT_FALSE(drain_manager.drainClose()); - simTime().advanceTimeWait(std::chrono::seconds(1)); - EXPECT_TRUE(drain_manager.drainClose()); - - // Test that this still works if remaining time is negative - simTime().advanceTimeWait(std::chrono::seconds(1)); - EXPECT_TRUE(drain_manager.drainClose()); - simTime().advanceTimeWait(std::chrono::seconds(500)); - EXPECT_TRUE(drain_manager.drainClose()); + if (drain_gradually) { + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + + // Test that this still works if remaining time is negative + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(500)); + EXPECT_TRUE(drain_manager.drainClose()); + } else { + EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(DrainTimeSeconds - 1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(500)); + EXPECT_TRUE(drain_manager.drainClose()); + } } -TEST_F(DrainManagerImplTest, DrainDeadlineProbability) { +TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { + const bool drain_gradually = GetParam(); + ON_CALL(server_.options_, drainStrategy()) + .WillByDefault(Return(drain_gradually ? Server::DrainStrategy::Gradual + : Server::DrainStrategy::Immediate)); ON_CALL(server_.random_, random()).WillByDefault(Return(4)); ON_CALL(server_.options_, drainTime()).WillByDefault(Return(std::chrono::seconds(3))); @@ -98,24 +128,27 @@ TEST_F(DrainManagerImplTest, DrainDeadlineProbability) { EXPECT_FALSE(drain_manager.drainClose()); drain_manager.startDrainSequence([] {}); - // random() should be called when elapsed time < drain timeout - EXPECT_CALL(server_.random_, random()).Times(2); - // Current elapsed time is 0 - // drainClose() will return true when elapsed time > (4 % 3 == 1). - EXPECT_FALSE(drain_manager.drainClose()); - simTime().advanceTimeWait(std::chrono::seconds(2)); - EXPECT_TRUE(drain_manager.drainClose()); - simTime().advanceTimeWait(std::chrono::seconds(1)); - EXPECT_TRUE(drain_manager.drainClose()); + if (drain_gradually) { + // random() should be called when elapsed time < drain timeout + EXPECT_CALL(server_.random_, random()).Times(2); + // Current elapsed time is 0 + // drainClose() will return true when elapsed time > (4 % 3 == 1). + EXPECT_FALSE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(2)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + } else { + EXPECT_CALL(server_.random_, random()).Times(0); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(2)); + EXPECT_TRUE(drain_manager.drainClose()); + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_TRUE(drain_manager.drainClose()); + } } -TEST_F(DrainManagerImplTest, ModifyOnly) { - InSequence s; - DrainManagerImpl drain_manager(server_, envoy::config::listener::v3::Listener::MODIFY_ONLY); - - EXPECT_CALL(server_, healthCheckFailed()).Times(0); // Listener check will short-circuit - EXPECT_FALSE(drain_manager.drainClose()); -} +INSTANTIATE_TEST_SUITE_P(DrainStrategies, DrainManagerImplTest, testing::Bool()); } // namespace } // namespace Server diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 12c590a89444..83247306bc3e 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -154,10 +154,11 @@ TEST_F(OptionsImplTest, SetAll) { options->setAdminAddressPath("path"); options->setLocalAddressIpVersion(Network::Address::IpVersion::v6); options->setDrainTime(std::chrono::seconds(42)); + options->setDrainStrategy(Server::DrainStrategy::Immediate); + options->setParentShutdownTime(std::chrono::seconds(43)); options->setLogLevel(spdlog::level::trace); options->setLogFormat("%L %n %v"); options->setLogPath("/foo/bar"); - options->setParentShutdownTime(std::chrono::seconds(43)); options->setRestartEpoch(44); options->setFileFlushIntervalMsec(std::chrono::milliseconds(45)); options->setMode(Server::Mode::Validate); @@ -181,6 +182,7 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ("path", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v6, options->localAddressIpVersion()); EXPECT_EQ(std::chrono::seconds(42), options->drainTime()); + EXPECT_EQ(Server::DrainStrategy::Immediate, options->drainStrategy()); EXPECT_EQ(spdlog::level::trace, options->logLevel()); EXPECT_EQ("%L %n %v", options->logFormat()); EXPECT_EQ("/foo/bar", options->logPath()); @@ -209,11 +211,13 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(envoy::admin::v3::CommandLineOptions::v6, command_line_options->local_address_ip_version()); EXPECT_EQ(options->drainTime().count(), command_line_options->drain_time().seconds()); + EXPECT_EQ(envoy::admin::v3::CommandLineOptions::Immediate, + command_line_options->drain_strategy()); + EXPECT_EQ(options->parentShutdownTime().count(), + command_line_options->parent_shutdown_time().seconds()); EXPECT_EQ(spdlog::level::to_string_view(options->logLevel()), command_line_options->log_level()); EXPECT_EQ(options->logFormat(), command_line_options->log_format()); EXPECT_EQ(options->logPath(), command_line_options->log_path()); - EXPECT_EQ(options->parentShutdownTime().count(), - command_line_options->parent_shutdown_time().seconds()); EXPECT_EQ(options->restartEpoch(), command_line_options->restart_epoch()); EXPECT_EQ(options->fileFlushIntervalMsec().count() / 1000, command_line_options->file_flush_interval().seconds()); @@ -229,6 +233,7 @@ TEST_F(OptionsImplTest, SetAll) { TEST_F(OptionsImplTest, DefaultParams) { std::unique_ptr options = createOptionsImpl("envoy -c hello"); EXPECT_EQ(std::chrono::seconds(600), options->drainTime()); + EXPECT_EQ(Server::DrainStrategy::Gradual, options->drainStrategy()); EXPECT_EQ(std::chrono::seconds(900), options->parentShutdownTime()); EXPECT_EQ("", options->adminAddressPath()); EXPECT_EQ(Network::Address::IpVersion::v4, options->localAddressIpVersion()); From 8cfecd9541ccbf0a6e08394aa4d5142b8f2657b9 Mon Sep 17 00:00:00 2001 From: Weixiao Huang Date: Thu, 11 Jun 2020 09:21:43 +0800 Subject: [PATCH 334/909] feat(ci): add NO_PROXY and BAZELISK_BASE_URL env in ci/run_envoy_docker.sh (#11534) Signed-off-by: weixiao-huang --- ci/run_envoy_docker.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 59c8c4a1a0df..b1059c893b8a 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -23,12 +23,12 @@ export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. -docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \ +docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} -e NO_PROXY=${no_proxy} \ -u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build -v /var/run/docker.sock:/var/run/docker.sock \ -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ - -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME \ + -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ --home-dir /build envoybuild && usermod -a -G pcap envoybuild && sudo -EHs -u envoybuild bash -c \"cd /source && $*\"" From ce9e8b0c5d24178135a494437db73590b10d071b Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Thu, 11 Jun 2020 02:23:00 +0100 Subject: [PATCH 335/909] build: update rules_foreign_cc (#11520) The aim is to fix one of the issues encountered while cross-compiling Envoy (see #11446 and bazelbuild/rules_foreign_cc#407). Signed-off-by: Ilya Dmitrichenko --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d54a87cc5b3c..f636e983d8af 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -368,10 +368,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), rules_foreign_cc = dict( - sha256 = "3184c244b32e65637a74213fc448964b687390eeeca42a36286f874c046bba15", - strip_prefix = "rules_foreign_cc-7bc4be735b0560289f6b86ab6136ee25d20b65b7", - # 2019-09-26 - urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/7bc4be735b0560289f6b86ab6136ee25d20b65b7.tar.gz"], + sha256 = "7ca49ac5b0bc8f5a2c9a7e87b7f86aca604bda197259c9b96f8b7f0a4f38b57b", + strip_prefix = "rules_foreign_cc-f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7", + # 2020-06-09 + urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/f54b7ae56dcf1b81bcafed3a08d58fc08ac095a7.tar.gz"], use_category = ["build"], ), rules_python = dict( From 6a19fe03d7986afef3a1fb17157e213d5ace1350 Mon Sep 17 00:00:00 2001 From: Ranjith Kumar Date: Thu, 11 Jun 2020 14:35:13 +0530 Subject: [PATCH 336/909] Fix collect_build_profile (#11550) Signed-off-by: Ranjith Kumar --- ci/do_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index cee7405cf7ea..7b1221ed82e0 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -19,7 +19,7 @@ cd "${SRCDIR}" echo "building using ${NUM_CPUS} CPUs" function collect_build_profile() { - cp -f "$(bazel info output_base)/command.profile" "${ENVOY_BUILD_PROFILE}/$1.profile" || true + cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/$1.profile.gz" || true } function bazel_with_collection() { From 98a8b700bffc9d8a89eb07465f1ec74e72f07466 Mon Sep 17 00:00:00 2001 From: Konstantin Belyalov Date: Thu, 11 Jun 2020 03:06:25 -0600 Subject: [PATCH 337/909] grpc-transcoder: bugfix with grpc-status to http-status conversion in streaming mode (#11456) Currently httpStatus is not being rewritten when trailers only gRPC streamed response. Risk Level: low Testing: added integration tests Docs Changes: not changed, bugfix Release Notes: added line to bugfixes section Signed-off-by: Konstantin Belyalov --- docs/root/version_history/current.rst | 1 + .../json_transcoder_filter.cc | 35 +++++++++---------- .../grpc_json_transcoder_integration_test.cc | 11 +++++- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 465a4767fb75..56b719d7e800 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -28,6 +28,7 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* grpc-json: fix a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 8de5069993d4..d225304a1320 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -567,28 +567,24 @@ void JsonTranscoderFilter::doTrailers(Http::ResponseHeaderOrTrailerMap& headers_ return; } - if (method_->response_type_is_http_body_ && method_->descriptor_->server_streaming()) { - // Do not add empty json when HttpBody + streaming - // Also, headers already sent, just continue. - return; + if (!method_->response_type_is_http_body_) { + Buffer::OwnedImpl data; + readToBuffer(*transcoder_->ResponseOutput(), data); + if (data.length()) { + encoder_callbacks_->addEncodedData(data, true); + } } - Buffer::OwnedImpl data; - readToBuffer(*transcoder_->ResponseOutput(), data); - - if (data.length()) { - encoder_callbacks_->addEncodedData(data, true); - } + // If there was no previous headers frame, this |trailers| map is our |response_headers_|, + // so there is no need to copy headers from one to the other. + const bool is_trailers_only_response = response_headers_ == &headers_or_trailers; + const bool is_server_streaming = method_->descriptor_->server_streaming(); - if (method_->descriptor_->server_streaming()) { - // For streaming case, the headers are already sent, so just continue here. + if (is_server_streaming && !is_trailers_only_response) { + // Continue if headers were sent already. return; } - // If there was no previous headers frame, this |trailers| map is our |response_headers_|, - // so there is no need to copy headers from one to the other. - bool is_trailers_only_response = response_headers_ == &headers_or_trailers; - if (!grpc_status || grpc_status.value() == Grpc::Status::WellKnownGrpcStatus::InvalidCode) { response_headers_->setStatus(enumToInt(Http::Code::ServiceUnavailable)); } else { @@ -611,8 +607,11 @@ void JsonTranscoderFilter::doTrailers(Http::ResponseHeaderOrTrailerMap& headers_ response_headers_->remove(trailerHeader()); } - response_headers_->setContentLength( - encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0); + if (!method_->descriptor_->server_streaming()) { + // Set content-length for non-streaming responses. + response_headers_->setContentLength( + encoder_callbacks_->encodingBuffer() ? encoder_callbacks_->encodingBuffer()->length() : 0); + } } void JsonTranscoderFilter::setEncoderFilterCallbacks( diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index c5acf3eedc69..b7e34593ff38 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -330,6 +330,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetHttpBody) { TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) { HttpIntegrationTest::initialize(); + // 1. Normal streaming get testTranscoding( Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/indexStream"}, {":authority", "host"}}, @@ -339,6 +340,14 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBody) { Status(), Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-type", "text/html"}}, R"(

Hello!

)" R"(Hello!)"); + + // 2. Empty response (trailers only) from streaming backend, with a gRPC error. + testTranscoding( + Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":path", "/indexStream"}, {":authority", "host"}}, + "", {""}, {}, Status(Code::NOT_FOUND, "Not Found"), + Http::TestResponseHeaderMapImpl{{":status", "404"}, {"content-type", "application/json"}}, + ""); } TEST_P(GrpcJsonTranscoderIntegrationTest, StreamGetHttpBodyMultipleFramesInData) { @@ -642,7 +651,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, ServerStreamingGet) { Http::TestRequestHeaderMapImpl{ {":method", "GET"}, {":path", "/shelves/37/books"}, {":authority", "host"}}, "", {"shelf: 37"}, {}, Status(Code::NOT_FOUND, "Shelf 37 not found"), - Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-type", "application/json"}}, + Http::TestResponseHeaderMapImpl{{":status", "404"}, {"content-type", "application/json"}}, "[]"); } From 79d6466bca261d7657ff210476d76ab7c4919412 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Thu, 11 Jun 2020 07:15:45 -0500 Subject: [PATCH 338/909] fuzz: Enabled "upgrade" request/response in fuzz test (#11541) Added test case with "upgrade" request/response in fuzz test. Enabled "upgrade" option by adding an EXPECT_CALL in conn_manager_impl_fuzz_test.cc. Fuzz test covers mutateRequestHeaders() and mutateResponseHeaders() with above changes. Signed-off-by: jianwen --- .../upgrade_test_case | 53 +++++++++++++++++++ .../http/conn_manager_impl_fuzz_test.cc | 6 +++ 2 files changed, 59 insertions(+) create mode 100644 test/common/http/conn_manager_impl_corpus/upgrade_test_case diff --git a/test/common/http/conn_manager_impl_corpus/upgrade_test_case b/test/common/http/conn_manager_impl_corpus/upgrade_test_case new file mode 100644 index 000000000000..8ec4c2364e32 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/upgrade_test_case @@ -0,0 +1,53 @@ +actions { + new_stream { + request_headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":authority" + value: "host" + } + headers { + key: "connection" + value: "upgrade" + } + headers { + key: "upgrade" + value: "WebSocket" + } + } + } +} + + +actions { + stream_action { + stream_id: 0 + response { + headers { + headers { + key: "connection" + value: "upgrade" + } + headers { + key: "upgrade" + value: "WebSocket" + } + headers { + key: ":status" + value: "101" + } + } + } + } +} diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index bbfbbceab89e..e151b1656fd6 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -91,6 +91,12 @@ class FuzzConfig : public ConnectionManagerConfig { })); EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); EXPECT_CALL(*encoder_filter_, setEncoderFilterCallbacks(_)); + EXPECT_CALL(filter_factory_, createUpgradeFilterChain("WebSocket", _, _)) + .WillRepeatedly(Invoke([&](absl::string_view, const Http::FilterChainFactory::UpgradeMap*, + FilterChainFactoryCallbacks& callbacks) -> bool { + filter_factory_.createFilterChain(callbacks); + return true; + })); } Http::ForwardClientCertType From e34d12fdc2e405ec38a805b226f57895ade8dcb7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 11 Jun 2020 08:17:39 -0700 Subject: [PATCH 339/909] network: add socket ip version accessor (#11514) Signed-off-by: Florin Coras --- include/envoy/network/socket.h | 7 +- .../addr_family_aware_socket_option_impl.cc | 28 +------- source/common/network/socket_impl.cc | 31 ++++++++- source/common/network/socket_impl.h | 3 +- source/common/network/utility.cc | 25 ++++--- ...dr_family_aware_socket_option_impl_test.cc | 68 +++---------------- .../network/socket_option_factory_test.cc | 3 +- test/common/network/utility_test.cc | 1 + .../upstream/cluster_manager_impl_test.cc | 2 + test/mocks/api/mocks.cc | 2 +- test/mocks/network/mocks.cc | 2 + test/mocks/network/mocks.h | 6 +- test/server/filter_chain_benchmark_test.cc | 7 +- test/server/listener_manager_impl_test.cc | 3 + 14 files changed, 82 insertions(+), 106 deletions(-) diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index 07c09474038b..411c1e8a2e92 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -88,6 +88,11 @@ class Socket { */ virtual Address::Type addressType() const PURE; + /** + * @return the IP version used by the socket if address type is IP, absl::nullopt otherwise + */ + virtual absl::optional ipVersion() const PURE; + /** * Close the underlying socket. */ @@ -133,7 +138,7 @@ class Socket { * Retrieves option from underlying socket (@see man 2 getsockopt) */ virtual Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, - socklen_t* optlen) PURE; + socklen_t* optlen) const PURE; /** * Toggle socket blocking state diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 35d870b89536..4a234e8fbca3 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -14,36 +14,10 @@ namespace Envoy { namespace Network { namespace { -Address::IpVersion getVersionFromAddress(Address::InstanceConstSharedPtr addr) { - if (addr->ip() != nullptr) { - return addr->ip()->version(); - } - throw EnvoyException("Unable to set socket option on non-IP sockets"); -} - -absl::optional getVersionFromSocket(const Socket& socket) { - try { - // We have local address when the socket is used in a listener but have to - // infer the IP from the socket FD when initiating connections. - // TODO(htuch): Figure out a way to obtain a consistent interface for IP - // version from socket. - if (socket.localAddress()) { - return {getVersionFromAddress(socket.localAddress())}; - } else { - return {getVersionFromAddress( - SocketInterfaceSingleton::get().addressFromFd(socket.ioHandle().fd()))}; - } - } catch (const EnvoyException&) { - // Ignore, we get here because we failed in getsockname(). - // TODO(htuch): We should probably clean up this logic to avoid relying on exceptions. - } - - return absl::nullopt; -} SocketOptionImplOptRef getOptionForSocket(const Socket& socket, SocketOptionImpl& ipv4_option, SocketOptionImpl& ipv6_option) { - auto version = getVersionFromSocket(socket); + auto version = socket.ipVersion(); if (!version.has_value()) { return absl::nullopt; } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 1c0e89439e68..60bb24764dd9 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -11,7 +11,8 @@ namespace Envoy { namespace Network { SocketImpl::SocketImpl(Socket::Type type, Address::Type addr_type, Address::IpVersion version) - : io_handle_(SocketInterfaceSingleton::get().socket(type, addr_type, version)) {} + : io_handle_(SocketInterfaceSingleton::get().socket(type, addr_type, version)), + sock_type_(type), addr_type_(addr_type) {} SocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr addr) : io_handle_(SocketInterfaceSingleton::get().socket(sock_type, addr)), sock_type_(sock_type), @@ -98,7 +99,7 @@ Api::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const } Api::SysCallIntResult SocketImpl::getSocketOption(int level, int optname, void* optval, - socklen_t* optlen) { + socklen_t* optlen) const { return Api::OsSysCallsSingleton::get().getsockopt(io_handle_->fd(), level, optname, optval, optlen); } @@ -107,5 +108,31 @@ Api::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) { return Api::OsSysCallsSingleton::get().setsocketblocking(io_handle_->fd(), blocking); } +absl::optional SocketImpl::ipVersion() const { + if (addr_type_ == Address::Type::Ip) { + // Always hit after socket is initialized, i.e., accepted or connected + if (local_address_ != nullptr) { + return local_address_->ip()->version(); + } else { +#ifdef SOL_IP + int socket_domain; + socklen_t domain_len = sizeof(socket_domain); + auto result = getSocketOption(SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); + if (result.rc_ != 0) { + return absl::nullopt; + } + if (socket_domain == AF_INET) { + return Address::IpVersion::v4; + } else if (socket_domain == AF_INET6) { + return Address::IpVersion::v6; + } else { + return absl::nullopt; + } +#endif + } + } + return absl::nullopt; +} + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index cb84bfb1c699..eb6fa747cc70 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -46,12 +46,13 @@ class SocketImpl : public virtual Socket { Api::SysCallIntResult setSocketOption(int level, int optname, const void* optval, socklen_t optlen) override; Api::SysCallIntResult getSocketOption(int level, int optname, void* optval, - socklen_t* optlen) override; + socklen_t* optlen) const override; Api::SysCallIntResult setBlockingForTest(bool blocking) override; const OptionsSharedPtr& options() const override { return options_; } Socket::Type socketType() const override { return sock_type_; } Address::Type addressType() const override { return addr_type_; } + absl::optional ipVersion() const override; protected: SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address); diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 107b50304eac..2cdb3e614de4 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -345,25 +345,24 @@ Address::InstanceConstSharedPtr Utility::getAddressWithPort(const Address::Insta Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { #ifdef SOL_IP - sockaddr_storage orig_addr; - socklen_t addr_len = sizeof(sockaddr_storage); - int socket_domain; - socklen_t domain_len = sizeof(socket_domain); - // TODO(fcoras): improve once we store ip version in socket - const Api::SysCallIntResult result = - sock.getSocketOption(SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); - int status = result.rc_; - if (status != 0) { + if (sock.addressType() != Address::Type::Ip) { + return nullptr; + } + + auto ipVersion = sock.ipVersion(); + if (!ipVersion.has_value()) { return nullptr; } - if (socket_domain == AF_INET) { + sockaddr_storage orig_addr; + socklen_t addr_len = sizeof(sockaddr_storage); + int status; + + if (*ipVersion == Address::IpVersion::v4) { status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; - } else if (socket_domain == AF_INET6) { - status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; } else { - return nullptr; + status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; } if (status != 0) { diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index 4e264e30f405..ff0cabd40acc 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -25,6 +25,7 @@ class AddrFamilyAwareSocketOptionImplTest : public SocketOptionTest { // We fail to set the option when the underlying setsockopt syscall fails. TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { + EXPECT_CALL(socket_, ipVersion).WillRepeatedly(testing::Return(absl::nullopt)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -33,24 +34,11 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionFailure) { EXPECT_LOG_CONTAINS("warning", "Failed to set IP socket option on non-IP socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - - Address::InstanceConstSharedPtr pipe_address = - std::make_shared("/foo"); - { - EXPECT_CALL(socket_, localAddress).WillRepeatedly(testing::ReturnRef(pipe_address)); - EXPECT_LOG_CONTAINS("warning", "Failed to set IP socket option on non-IP socket", - EXPECT_FALSE(socket_option.setOption( - socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); - } } // If a platform supports IPv4 socket option variant for an IPv4 address, it works TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -62,13 +50,9 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, SetOptionSuccess) { // If a platform doesn't support IPv4 socket option variant for an IPv4 address we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; - EXPECT_LOG_CONTAINS("warning", "Failed to set unsupported option on socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -76,13 +60,8 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4EmptyOptionNames) { // If a platform doesn't support IPv4 and IPv6 socket option variants for an IPv4 address, we fail TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, {}, 1}; - EXPECT_LOG_CONTAINS("warning", "Failed to set unsupported option on socket", EXPECT_FALSE(socket_option.setOption( socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND))); @@ -91,11 +70,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { // If a platform supports IPv4 and IPv6 socket option variants for an IPv4 address, we apply the // IPv4 variant TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -105,11 +80,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { // If a platform supports IPv6 socket option variant for an IPv6 address it works TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, {}, @@ -122,11 +93,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Only) { // If a platform supports only the IPv4 variant for an IPv6 address, // we apply the IPv4 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), @@ -139,11 +106,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6OnlyV4Fallback) { // If a platform supports IPv4 and IPv6 socket option variants for an IPv6 address, // AddrFamilyAwareSocketOptionImpl::setIpSocketOption() works with the IPv6 variant. TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { - Address::Ipv6Instance address("::1:2:3:4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -153,11 +116,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6Precedence) { // GetSocketOptionName returns the v4 information for a v4 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { - Address::Ipv4Instance address("1.2.3.4", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 1}; @@ -169,11 +128,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V4GetSocketOptionName) { // GetSocketOptionName returns the v4 information for a v6 address TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { - Address::Ipv6Instance address("2::1", 5678); - IoHandlePtr io_handle = Network::SocketInterfaceSingleton::get().socket( - Socket::Type::Stream, std::make_shared(address)); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillRepeatedly(testing::ReturnRef(*io_handle)); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; @@ -185,8 +140,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6GetSocketOptionName) { // GetSocketOptionName returns nullopt if the state is wrong TEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionWrongState) { - socket_.local_address_ = Utility::parseInternetAddress("2::1", 5678); - + EXPECT_CALL(socket_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); AddrFamilyAwareSocketOptionImpl socket_option{ envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_MAKE_SOCKET_OPTION_NAME(5, 10), ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; @@ -202,7 +156,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, GetSocketOptionCannotDetermineVersio ENVOY_MAKE_SOCKET_OPTION_NAME(6, 11), 5}; IoHandlePtr io_handle = std::make_unique(); - EXPECT_CALL(testing::Const(socket_), ioHandle()).WillOnce(testing::ReturnRef(*io_handle)); + EXPECT_CALL(socket_, ipVersion).WillOnce(testing::Return(absl::nullopt)); auto result = socket_option.getOptionDetails(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND); EXPECT_FALSE(result.has_value()); diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index c364015d1804..7ca08149bd93 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -92,7 +92,7 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv4TransparentOptions) { EXPECT_EQ(1, *static_cast(optval)); return {0, 0}; })); - + EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v4)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, @@ -120,6 +120,7 @@ TEST_F(SocketOptionFactoryTest, TestBuildIpv6TransparentOptions) { return {0, 0}; })); + EXPECT_CALL(socket_mock_, ipVersion()).WillRepeatedly(testing::Return(Address::IpVersion::v6)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); EXPECT_TRUE(Network::Socket::applyOptions(options, socket_mock_, diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index 8039f7c873b5..fa4b1b9ad5b4 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -171,6 +171,7 @@ TEST_P(NetworkUtilityGetLocalAddress, GetLocalAddress) { TEST(NetworkUtility, GetOriginalDst) { testing::NiceMock socket; + EXPECT_CALL(socket, ipVersion()).WillOnce(testing::Return(absl::nullopt)); EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); } diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 9a45af5f71c0..9df6b7bfa99f 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -3093,6 +3093,8 @@ class SockoptsTest : public ClusterManagerImplTest { return {0, 0}; })); } + EXPECT_CALL(socket, ipVersion()) + .WillRepeatedly(testing::Return(Network::Address::IpVersion::v4)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Invoke([this, &names_vals, expect_success, &socket]( Network::Address::InstanceConstSharedPtr, diff --git a/test/mocks/api/mocks.cc b/test/mocks/api/mocks.cc index f12240cfc094..e1bd53bbeca6 100644 --- a/test/mocks/api/mocks.cc +++ b/test/mocks/api/mocks.cc @@ -52,7 +52,7 @@ SysCallIntResult MockOsSysCalls::setsockopt(os_fd_t sockfd, int level, int optna SysCallIntResult MockOsSysCalls::getsockopt(os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) { - ASSERT(*optlen == sizeof(int)); + ASSERT(*optlen == sizeof(int) || *optlen == sizeof(sockaddr_storage)); int val = 0; const auto& it = boolsockopts_.find(SockOptKey(sockfd, level, optname)); if (it != boolsockopts_.end()) { diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index 81f9d03721c2..61369918ef9e 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -135,6 +135,7 @@ MockListenSocket::MockListenSocket() ON_CALL(testing::Const(*this), isOpen()).WillByDefault(Invoke([this]() { return socket_is_open_; })); + ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version())); } MockSocketOption::MockSocketOption() { @@ -152,6 +153,7 @@ MockConnectionSocket::MockConnectionSocket() ON_CALL(*this, directRemoteAddress()).WillByDefault(ReturnRef(remote_address_)); ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_)); ON_CALL(testing::Const(*this), ioHandle()).WillByDefault(ReturnRef(*io_handle_)); + ON_CALL(*this, ipVersion()).WillByDefault(Return(local_address_->ip()->version())); } MockConnectionSocket::~MockConnectionSocket() = default; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index e4e58290ed58..9c710e1f48f4 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -221,6 +221,7 @@ class MockListenSocket : public Socket { MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); MOCK_METHOD(Socket::Type, socketType, (), (const)); MOCK_METHOD(Address::Type, addressType, (), (const)); + MOCK_METHOD(absl::optional, ipVersion, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(void, addOption_, (const Socket::OptionConstSharedPtr& option)); @@ -233,7 +234,7 @@ class MockListenSocket : public Socket { MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); - MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const)); MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; @@ -282,6 +283,7 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(const IoHandle&, ioHandle, (), (const)); MOCK_METHOD(Socket::Type, socketType, (), (const)); MOCK_METHOD(Address::Type, addressType, (), (const)); + MOCK_METHOD(absl::optional, ipVersion, (), (const)); MOCK_METHOD(void, close, ()); MOCK_METHOD(bool, isOpen, (), (const)); MOCK_METHOD(IoHandlePtr, socket, (Socket::Type, Address::Type, Address::IpVersion), (const)); @@ -291,7 +293,7 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(Api::SysCallIntResult, connect, (const Address::InstanceConstSharedPtr)); MOCK_METHOD(Api::SysCallIntResult, listen, (int)); MOCK_METHOD(Api::SysCallIntResult, setSocketOption, (int, int, const void*, socklen_t)); - MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*)); + MOCK_METHOD(Api::SysCallIntResult, getSocketOption, (int, int, void*, socklen_t*), (const)); MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); IoHandlePtr io_handle_; diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 18cb7a5caf67..56a3ec4754df 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -91,6 +91,9 @@ class MockConnectionSocket : public Network::ConnectionSocket { bool isOpen() const override { return false; } Network::Socket::Type socketType() const override { return Network::Socket::Type::Stream; } Network::Address::Type addressType() const override { return local_address_->type(); } + absl::optional ipVersion() const override { + return Network::Address::IpVersion::v4; + } void setLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void restoreLocalAddress(const Network::Address::InstanceConstSharedPtr&) override {} void setRemoteAddress(const Network::Address::InstanceConstSharedPtr&) override {} @@ -109,7 +112,9 @@ class MockConnectionSocket : public Network::ConnectionSocket { Api::SysCallIntResult setSocketOption(int, int, const void*, socklen_t) override { return {0, 0}; } - Api::SysCallIntResult getSocketOption(int, int, void*, socklen_t*) override { return {0, 0}; } + Api::SysCallIntResult getSocketOption(int, int, void*, socklen_t*) const override { + return {0, 0}; + } Api::SysCallIntResult setBlockingForTest(bool) override { return {0, 0}; } private: diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index a9c169fd5b55..36b1195fd28f 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -3465,6 +3465,9 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { Network::FilterChainFactory& filterChainFactory = listener.filterChainFactory(); Network::MockListenerFilterManager manager; + // Return error when trying to retrieve the original dst on the invalid handle + EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)).WillOnce(Return(-1)); + NiceMock callbacks; Network::AcceptedSocketImpl socket(std::make_unique(), Network::Address::InstanceConstSharedPtr{ From 761a04b1704adb4ac30f0c375f7c4f53d2a6ab98 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Thu, 11 Jun 2020 12:17:31 -0400 Subject: [PATCH 340/909] deps: updating cel-cpp version used (#11524) This is a prerequisite for porting envoy to c++17 as c++17 forbids the initialization of absl::string_view() with nullptr. The most recent patch of cel-cpp master fixes an instance of this issue. Signed-off-by: Yifan Yang --- bazel/repository_locations.bzl | 8 ++++---- source/extensions/filters/common/expr/evaluator.cc | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f636e983d8af..668e856d79db 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -425,10 +425,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_google_cel_cpp = dict( - sha256 = "326ec397b55e39f48bd5380ccded1af5b04653ee96e769cd4d694f9a3bacef50", - strip_prefix = "cel-cpp-80e1cca533190d537a780ad007e8db64164c582e", - # 2020-02-26 - urls = ["https://github.com/google/cel-cpp/archive/80e1cca533190d537a780ad007e8db64164c582e.tar.gz"], + sha256 = "1b283f93619b130504880d2f400bd449de9ab6be94ef26ecd2bb96921f48dd6c", + strip_prefix = "cel-cpp-50196761917300bbd47b59bd162e84817b67b7ab", + # 2020-06-08 + urls = ["https://github.com/google/cel-cpp/archive/50196761917300bbd47b59bd162e84817b67b7ab.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/source/extensions/filters/common/expr/evaluator.cc b/source/extensions/filters/common/expr/evaluator.cc index 0bd1d3554f19..e4920fd21fda 100644 --- a/source/extensions/filters/common/expr/evaluator.cc +++ b/source/extensions/filters/common/expr/evaluator.cc @@ -62,7 +62,7 @@ ExpressionPtr createExpression(Builder& builder, const google::api::expr::v1alph throw CelException( absl::StrCat("failed to create an expression: ", cel_expression_status.status().message())); } - return std::move(cel_expression_status.ValueOrDie()); + return std::move(cel_expression_status.value()); } absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena, @@ -76,7 +76,7 @@ absl::optional evaluate(const Expression& expr, Protobuf::Arena* arena return {}; } - return eval_status.ValueOrDie(); + return eval_status.value(); } bool matches(const Expression& expr, const StreamInfo::StreamInfo& info, From 8b4c9431392c136296ea0b2232989fb148e510ec Mon Sep 17 00:00:00 2001 From: Christoph Pakulski Date: Thu, 11 Jun 2020 13:35:49 -0400 Subject: [PATCH 341/909] sql: create common SQL parsing library to be used by mysql and postgres (#11368) Description: Created _sqlutils_ library to be shared for common functionality between SQL filters. mysql and postgres filters will use that library to create filter metadata based on SQL query. mysql filter was already producing metadata but postges will use the new library as described in #11065. Risk Level: Low: No new functionality has been added and only mysql_proxy filter is affected Testing: Added unit tests. Docs Changes: No. Release Notes: No. Fixes: #11320 Signed-off-by: Christoph Pakulski --- CODEOWNERS | 1 + bazel/repository_locations.bzl | 8 +- source/extensions/common/sqlutils/BUILD | 19 ++ source/extensions/common/sqlutils/sqlutils.cc | 40 ++++ source/extensions/common/sqlutils/sqlutils.h | 26 +++ .../filters/network/mysql_proxy/BUILD | 2 +- .../network/mysql_proxy/mysql_filter.cc | 33 +--- test/extensions/common/sqlutils/BUILD | 20 ++ .../common/sqlutils/sqlutils_test.cc | 182 ++++++++++++++++++ .../network/mysql_proxy/mysql_command_test.cc | 5 +- 10 files changed, 303 insertions(+), 33 deletions(-) create mode 100644 source/extensions/common/sqlutils/BUILD create mode 100644 source/extensions/common/sqlutils/sqlutils.cc create mode 100644 source/extensions/common/sqlutils/sqlutils.h create mode 100644 test/extensions/common/sqlutils/BUILD create mode 100644 test/extensions/common/sqlutils/sqlutils_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index 200d8fbf9e30..17fd742943a3 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -77,6 +77,7 @@ extensions/filters/common/original_src @snowp @klarose # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste /*/extensions/common/proxy_protocol @alyssawilk @wez470 +/*/extensions/common/sqlutils @cpakulski @dio /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio /*/extensions/filters/http/fault @rshriram @alyssawilk diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 668e856d79db..53e19c32abc4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -132,10 +132,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( - sha256 = "b2d3882698cf85b64c87121e208ce0b24d5fe2a00a5d058cf4571f1b25b45403", - strip_prefix = "sql-parser-b14d010afd4313f2372a1cc96aa2327e674cc798", - # 2020-01-10 - urls = ["https://github.com/envoyproxy/sql-parser/archive/b14d010afd4313f2372a1cc96aa2327e674cc798.tar.gz"], + sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71", + strip_prefix = "sql-parser-3b40ba2d106587bdf053a292f7e3bb17e818a57f", + # 2020-06-10 + urls = ["https://github.com/envoyproxy/sql-parser/archive/3b40ba2d106587bdf053a292f7e3bb17e818a57f.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/source/extensions/common/sqlutils/BUILD b/source/extensions/common/sqlutils/BUILD new file mode 100644 index 000000000000..c0129c29cfc3 --- /dev/null +++ b/source/extensions/common/sqlutils/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "sqlutils_lib", + srcs = ["sqlutils.cc"], + hdrs = ["sqlutils.h"], + external_deps = ["sqlparser"], + deps = [ + "//source/common/protobuf:utility_lib", + ], +) diff --git a/source/extensions/common/sqlutils/sqlutils.cc b/source/extensions/common/sqlutils/sqlutils.cc new file mode 100644 index 000000000000..023a5529e083 --- /dev/null +++ b/source/extensions/common/sqlutils/sqlutils.cc @@ -0,0 +1,40 @@ +#include "extensions/common/sqlutils/sqlutils.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +bool SQLUtils::setMetadata(const std::string& query, ProtobufWkt::Struct& metadata) { + hsql::SQLParserResult result; + hsql::SQLParser::parse(query, &result); + + if (!result.isValid()) { + return false; + } + + auto& fields = *metadata.mutable_fields(); + + for (auto i = 0u; i < result.size(); ++i) { + if (result.getStatement(i)->type() == hsql::StatementType::kStmtShow) { + continue; + } + hsql::TableAccessMap table_access_map; + // Get names of accessed tables. + result.getStatement(i)->tablesAccessed(table_access_map); + for (auto& it : table_access_map) { + auto& operations = *fields[it.first].mutable_list_value(); + // For each table get names of operations performed on that table. + for (const auto& ot : it.second) { + operations.add_values()->set_string_value(ot); + } + } + } + + return true; +} + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/sqlutils/sqlutils.h b/source/extensions/common/sqlutils/sqlutils.h new file mode 100644 index 000000000000..8519f21836fa --- /dev/null +++ b/source/extensions/common/sqlutils/sqlutils.h @@ -0,0 +1,26 @@ +#include "common/protobuf/utility.h" + +#include "include/sqlparser/SQLParser.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +class SQLUtils { +public: + /** + * Method parses SQL query string and writes output to metadata. + * @param query supplies SQL statement. + * @param metadata supplies placeholder where metadata should be written. + * @return True if parsing was successful and False if parsing failed. + * If True was returned the metadata contains result of parsing. The results are + * stored in metadata.mutable_fields. + **/ + static bool setMetadata(const std::string& query, ProtobufWkt::Struct& metadata); +}; + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/source/extensions/filters/network/mysql_proxy/BUILD index 99b5ebdd8ae9..152584385054 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/source/extensions/filters/network/mysql_proxy/BUILD @@ -36,7 +36,6 @@ envoy_cc_library( "mysql_session.h", "mysql_utils.h", ], - external_deps = ["sqlparser"], deps = [ "//include/envoy/network:filter_interface", "//include/envoy/server:filter_config_interface", @@ -44,6 +43,7 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/network:filter_lib", + "//source/extensions/common/sqlutils:sqlutils_lib", "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc index 0d8be2394d15..648171e786e3 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc +++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc @@ -6,10 +6,9 @@ #include "common/common/assert.h" #include "common/common/logger.h" +#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/well_known_names.h" -#include "include/sqlparser/SQLParser.h" - namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -105,39 +104,21 @@ void MySQLFilter::onCommand(Command& command) { } // Parse a given query - hsql::SQLParserResult result; - hsql::SQLParser::parse(command.getData(), &result); + envoy::config::core::v3::Metadata& dynamic_metadata = + read_callbacks_->connection().streamInfo().dynamicMetadata(); + ProtobufWkt::Struct metadata( + (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]); + auto result = Common::SQLUtils::SQLUtils::setMetadata(command.getData(), metadata); ENVOY_CONN_LOG(trace, "mysql_proxy: query processed {}", read_callbacks_->connection(), command.getData()); - if (!result.isValid()) { + if (!result) { config_->stats_.queries_parse_error_.inc(); return; } config_->stats_.queries_parsed_.inc(); - // Set dynamic metadata - envoy::config::core::v3::Metadata& dynamic_metadata = - read_callbacks_->connection().streamInfo().dynamicMetadata(); - ProtobufWkt::Struct metadata( - (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]); - auto& fields = *metadata.mutable_fields(); - - for (auto i = 0u; i < result.size(); ++i) { - if (result.getStatement(i)->type() == hsql::StatementType::kStmtShow) { - continue; - } - hsql::TableAccessMap table_access_map; - result.getStatement(i)->tablesAccessed(table_access_map); - for (auto& it : table_access_map) { - auto& operations = *fields[it.first].mutable_list_value(); - for (const auto& ot : it.second) { - operations.add_values()->set_string_value(ot); - } - } - } - read_callbacks_->connection().streamInfo().setDynamicMetadata( NetworkFilterNames::get().MySQLProxy, metadata); } diff --git a/test/extensions/common/sqlutils/BUILD b/test/extensions/common/sqlutils/BUILD new file mode 100644 index 000000000000..0277e47706b2 --- /dev/null +++ b/test/extensions/common/sqlutils/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "sqlutils_tests", + srcs = [ + "sqlutils_test.cc", + ], + external_deps = ["sqlparser"], + deps = [ + "//source/extensions/common/sqlutils:sqlutils_lib", + ], +) diff --git a/test/extensions/common/sqlutils/sqlutils_test.cc b/test/extensions/common/sqlutils/sqlutils_test.cc new file mode 100644 index 000000000000..74ce5c8dfef8 --- /dev/null +++ b/test/extensions/common/sqlutils/sqlutils_test.cc @@ -0,0 +1,182 @@ +#include "extensions/common/sqlutils/sqlutils.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace SQLUtils { + +// MetadataFromSQLTest class is used for parameterized tests. +// The values in the tests are: +// std::string - SQL query +// bool - whether to expect SQL parsing to be successful +// std::map> map of expected tables accessed based on the query. +// The map is checked only when parsing was successful. Map is indexed by table name and points to +// list of operations performed on the table. For example table1: "select", "insert" says that there +// was SELECT and INSERT operations on table1. +class MetadataFromSQLTest + : public ::testing::TestWithParam< + std::tuple>>> {}; + +// Test takes SQL query as a parameter and checks if the parsing +// produces the correct metadata. +// Metadata is 2-level structure. First layer is list of resources +// over which the SQL query operates: in our case is list of tables. +// Under each table there is secondary list which contains operations performed +// on the table, like "select", "insert", etc. +TEST_P(MetadataFromSQLTest, ParsingAndMetadataTest) { + // Get the SQL query + const std::string& query = std::get<0>(GetParam()); + // vector of queries to check. + std::vector test_queries; + test_queries.push_back(query); + + // Create uppercase and lowercase versions of the queries and put + // them into vector of queries to check + test_queries.push_back(absl::AsciiStrToLower(query)); + test_queries.push_back(absl::AsciiStrToUpper(query)); + + while (!test_queries.empty()) { + std::string test_query = test_queries.back(); + ProtobufWkt::Struct metadata; + + // Check if the parsing result is what expected. + ASSERT_EQ(std::get<1>(GetParam()), SQLUtils::setMetadata(test_query, metadata)); + + // If parsing was expected to fail do not check parsing values. + if (!std::get<1>(GetParam())) { + return; + } + + // Access metadata fields, where parsing results are stored. + auto& fields = *metadata.mutable_fields(); + + // Get the names of resources which SQL query operates on. + std::map> expected_tables = std::get<2>(GetParam()); + // Check if query results return the same number of resources as expected. + ASSERT_EQ(expected_tables.size(), fields.size()); + for (const auto& i : fields) { + // Get from created metadata the list of operations on the resource + const auto& operations = i; + std::string table_name = operations.first; + + std::transform(table_name.begin(), table_name.end(), table_name.begin(), + [](unsigned char c) { return std::tolower(c); }); + // Get the list of expected operations on the same resource from test param. + const auto& table_name_it = expected_tables.find(table_name); + // Make sure that a resource (table) found in metadata is expected. + ASSERT_NE(expected_tables.end(), table_name_it); + auto& operations_list = table_name_it->second; + // The number of expected operations and created in metadata must be the same. + ASSERT_EQ(operations_list.size(), operations.second.list_value().values().size()); + // Now iterate over the operations list found in metadata and check if the same operation + // is listed as expected in test param. + for (const auto& j : operations.second.list_value().values()) { + // Find that operation in test params. + const auto operation_it = + std::find(operations_list.begin(), operations_list.end(), j.string_value()); + ASSERT_NE(operations_list.end(), operation_it); + // Erase the operation. At the end of the test this list should be empty what means + // that we found all expected operations. + operations_list.erase(operation_it); + } + // Make sure that we went through all expected operations. + ASSERT_TRUE(operations_list.empty()); + // Remove the table from the list. At the end of the test this list must be empty. + expected_tables.erase(table_name_it); + } + + ASSERT_TRUE(expected_tables.empty()); + test_queries.pop_back(); + } +} + +// Note: This parameterized test's queries are converted to all lowercase and all uppercase +// to validate that parser is case-insensitive. The test routine converts to uppercase and +// lowercase entire query string, not only SQL keywords. This introduces a problem when comparing +// tables' names when verifying parsing result. Therefore the test converts table names to lowercase +// before comparing. It however requires that all table names in the queries below use lowercase +// only. +#define TEST_VALUE(...) \ + std::tuple>> { __VA_ARGS__ } +INSTANTIATE_TEST_SUITE_P( + SQLUtilsTestSuite, MetadataFromSQLTest, + ::testing::Values( + TEST_VALUE("blahblah;", false, {}), + + TEST_VALUE("CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}), + TEST_VALUE("CREATE TABLE IF NOT EXISTS `table number 1`(Usr VARCHAR(40),Count INT);", true, + {{"table number 1", {"create"}}}), + TEST_VALUE( + "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table1;", + true, {{"table1", {"select", "create"}}}), + TEST_VALUE( + "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table2;", + true, {{"table1", {"create"}}, {"table2", {"select"}}}), + + TEST_VALUE("CREATE TABLE table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}), + TEST_VALUE("CREATE TABLE;", false, {}), + TEST_VALUE("CREATE TEMPORARY table table1(Usr VARCHAR(40),Count INT);", true, + {{"table1", {"create"}}}), + TEST_VALUE("DROP TABLE IF EXISTS table1", true, {{"table1", {"drop"}}}), + TEST_VALUE("ALTER TABLE table1 add column Id varchar (20);", true, {{"table1", {"alter"}}}), + TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}), + TEST_VALUE("INSERT LOW_PRIORITY INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}), + TEST_VALUE("INSERT IGNORE INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, + {{"table1", {"insert"}}}), + TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);SELECT * from table1", + true, {{"table1", {"insert", "select"}}}), + TEST_VALUE("DELETE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), + TEST_VALUE("DELETE LOW_PRIORITY FROM table1 WHERE Count > 3;", true, + {{"table1", {"delete"}}}), + TEST_VALUE("DELETE QUICK FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), + TEST_VALUE("DELETE IGNORE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), + + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}), + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}), + TEST_VALUE("SELECT product.category FROM table1 WHERE Count = 1;", true, + {{"table1", {"select"}}, {"product", {"unknown"}}}), + TEST_VALUE("SELECT DISTINCT Usr FROM table1;", true, {{"table1", {"select"}}}), + TEST_VALUE("SELECT Usr, Count FROM table1 ORDER BY Count DESC;", true, + {{"table1", {"select"}}}), + TEST_VALUE("SELECT 12 AS a, a FROM table1 GROUP BY a;", true, {{"table1", {"select"}}}), + TEST_VALUE("SELECT;", false, {}), TEST_VALUE("SELECT Usr, Count FROM;", false, {}), + TEST_VALUE("INSERT INTO table1 SELECT * FROM table2;", true, + {{"table1", {"insert"}}, {"table2", {"select"}}}), + TEST_VALUE("INSERT INTO table1 SELECT tbl_temp1.fld_order_id FROM table2;", true, + {{"tbl_temp1", {"unknown"}}, {"table2", {"select"}}, {"table1", {"insert"}}}), + TEST_VALUE("UPDATE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}), + TEST_VALUE("UPDATE LOW_PRIORITY table1 SET col1 = col1 + 1", true, + {{"table1", {"update"}}}), + TEST_VALUE("UPDATE IGNORE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}), + TEST_VALUE("UPDATE table1 SET column1=(SELECT * columnX from table2);", true, + {{"table1", {"update"}}, {"table2", {"select"}}}), + + // operations on database should not create any metadata + TEST_VALUE("CREATE DATABASE testdb;", true, {}), + TEST_VALUE("CREATE DATABASE IF NOT EXISTS testdb;", true, {}), + TEST_VALUE("ALTER DATABASE testdb CHARACTER SET charset_name;", true, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET charset_name;", true, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET = charset_name;", true, {}), + TEST_VALUE("ALTER SCHEMA testdb default CHARACTER SET = charset_name;", true, {}), + + // The following DROP DATABASE tests should not produce metadata. + TEST_VALUE("DROP DATABASE testdb;", true, {}), + TEST_VALUE("DROP DATABASE IF EXISTS testdb;", true, {}), + + // Schema. Should be parsed fine, but should not produce any metadata + TEST_VALUE("SHOW databases;", true, {}), TEST_VALUE("SHOW tables;", true, {}), + TEST_VALUE("SELECT * FROM;", false, {}), + TEST_VALUE("SELECT 1 FROM tabletest1;", true, {{"tabletest1", {"select"}}}) + + )); + +} // namespace SQLUtils +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc b/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc index cce430facd77..55cacdc2ab53 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc +++ b/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc @@ -203,7 +203,7 @@ class MySQLCommandTest : public testing::Test, public MySQLTestUtils { EXPECT_EQ(1UL, result.size()); EXPECT_EQ(statement_type, result.getStatement(0)->type()); hsql::TableAccessMap table_access_map; - if (expected_table_access_map.empty()) { + if (expected_table_access_map.empty() && (statement_type == hsql::StatementType::kStmtShow)) { return; } result.getStatement(0)->tablesAccessed(table_access_map); @@ -454,7 +454,8 @@ TEST_F(MySQLCommandTest, MySQLTest20) { std::string command = buildAlter(TestResource::TABLE, table, "add column Id varchar (20)"); hsql::SQLParserResult result; EXPECT_EQ(MYSQL_SUCCESS, encodeQuery(command, result)); - expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, {}); + expectStatementTypeAndTableAccessMap(result, hsql::StatementType::kStmtAlter, + {{table, {"alter"}}}); } /* From be75dae90d349c4284600eacfd5688c6e5ba7f5c Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Thu, 11 Jun 2020 13:39:46 -0400 Subject: [PATCH 342/909] integration test: Add DrainCloseIntegrationTest (#11499) Description: groups drain close tests together Risk Level: n/a (test only) Signed-off-by: Auni Ahsan --- test/integration/BUILD | 12 +++ .../drain_close_integration_test.cc | 85 +++++++++++++++++++ test/integration/integration_test.cc | 3 - test/integration/protocol_integration_test.cc | 68 --------------- 4 files changed, 97 insertions(+), 71 deletions(-) create mode 100644 test/integration/drain_close_integration_test.cc diff --git a/test/integration/BUILD b/test/integration/BUILD index 0b3f677feb25..f54fa39cad81 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -231,6 +231,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "drain_close_integration_test", + srcs = [ + "drain_close_integration_test.cc", + ], + deps = [ + ":http_protocol_integration_lib", + "//source/extensions/filters/http/health_check:config", + "//test/test_common:utility_lib", + ], +) + exports_files(["test_utility.sh"]) envoy_sh_test( diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc new file mode 100644 index 000000000000..cbe58e973ecd --- /dev/null +++ b/test/integration/drain_close_integration_test.cc @@ -0,0 +1,85 @@ +#include "test/integration/http_protocol_integration.h" + +namespace Envoy { +namespace { + +using DrainCloseIntegrationTest = HttpProtocolIntegrationTest; + +// Add a health check filter and verify correct behavior when draining. +TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { + // The probability of drain close increases over time. With a high timeout, + // the probability will be very low, but the rapid retries prevent this from + // increasing total test time. + drain_time_ = std::chrono::seconds(100); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + } + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(100); + config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + initialize(); + + absl::Notification drain_sequence_started; + test_server_->server().dispatcher().post([this, &drain_sequence_started]() { + test_server_->drainManager().startDrainSequence([] {}); + drain_sequence_started.Notify(); + }); + drain_sequence_started.WaitForNotification(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + EXPECT_FALSE(codec_client_->disconnected()); + + IntegrationStreamDecoderPtr response; + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } +} + +TEST_P(DrainCloseIntegrationTest, AdminDrain) { testAdminDrain(downstreamProtocol()); } + +INSTANTIATE_TEST_SUITE_P(Protocols, DrainCloseIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2}, + {FakeHttpConnection::Type::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace +} // namespace Envoy diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 7a4d262d302f..d7b8f4a5662a 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -96,9 +96,6 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { check_listener_stats(0, 1); } -// Validates that the drain actually drains the listeners. -TEST_P(IntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(downstreamProtocol()); } - TEST_P(IntegrationTest, RouterDirectResponse) { const std::string body = "Response body"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 9c66e96ad56a..1ea4ab1da5d4 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -274,74 +274,6 @@ name: add-trailers-filter } } -// Add a health check filter and verify correct behavior when draining. -TEST_P(ProtocolIntegrationTest, DrainCloseGradual) { - // The probability of drain close increases over time. With a high timeout, - // the probability will be very low, but the rapid retries prevent this from - // increasing total test time. - drain_time_ = std::chrono::seconds(100); - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); - initialize(); - - absl::Notification drain_sequence_started; - test_server_->server().dispatcher().post([this, &drain_sequence_started]() { - test_server_->drainManager().startDrainSequence([] {}); - drain_sequence_started.Notify(); - }); - drain_sequence_started.WaitForNotification(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - EXPECT_FALSE(codec_client_->disconnected()); - - IntegrationStreamDecoderPtr response; - while (!test_server_->counter("http.config_test.downstream_cx_drain_close")->value()) { - response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); - response->waitForEndStream(); - } - EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_drain_close")->value(), 1L); - - ASSERT_TRUE(codec_client_->waitForDisconnect()); - EXPECT_TRUE(response->complete()); - - EXPECT_EQ("200", response->headers().getStatusValue()); - if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_TRUE(codec_client_->sawGoAway()); - } else { - EXPECT_EQ("close", response->headers().getConnectionValue()); - } -} - -TEST_P(ProtocolIntegrationTest, DrainCloseImmediate) { - drain_strategy_ = Server::DrainStrategy::Immediate; - drain_time_ = std::chrono::seconds(100); - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); - initialize(); - - absl::Notification drain_sequence_started; - test_server_->server().dispatcher().post([this, &drain_sequence_started]() { - test_server_->drainManager().startDrainSequence([] {}); - drain_sequence_started.Notify(); - }); - drain_sequence_started.WaitForNotification(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - EXPECT_FALSE(codec_client_->disconnected()); - - IntegrationStreamDecoderPtr response; - response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); - response->waitForEndStream(); - - ASSERT_TRUE(codec_client_->waitForDisconnect()); - EXPECT_TRUE(response->complete()); - - EXPECT_EQ("200", response->headers().getStatusValue()); - if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { - EXPECT_TRUE(codec_client_->sawGoAway()); - } else { - EXPECT_EQ("close", response->headers().getConnectionValue()); - } -} - // Regression test for https://github.com/envoyproxy/envoy/issues/9873 TEST_P(ProtocolIntegrationTest, ResponseWithHostHeader) { initialize(); From e8dc25ecec277c0b94d02151de79353a9ba07b4e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 11 Jun 2020 15:22:49 -0400 Subject: [PATCH 343/909] upstreams: APIs, docs, and stubs for pluggable upstreams (#11554) split out from #11327 There's a bit of transitive ugliness: declaring the extensions requires security posture, requires stub build files, requires codeowners before the code move, but it'll be pretty short lived. Risk Level: Low (mostly only APIs) Testing: n/a Docs Changes: some of the new docs Release Notes: n/a Signed-off-by: Alyssa Wilk --- CODEOWNERS | 5 ++++ api/BUILD | 3 +++ api/envoy/config/cluster/v3/cluster.proto | 23 ++++++++++++++++++- .../config/cluster/v4alpha/cluster.proto | 23 ++++++++++++++++++- .../upstreams/http/generic/v3/BUILD | 9 ++++++++ .../generic/v3/generic_connection_pool.proto | 18 +++++++++++++++ .../extensions/upstreams/http/http/v3/BUILD | 9 ++++++++ .../http/http/v3/http_connection_pool.proto | 17 ++++++++++++++ .../extensions/upstreams/http/tcp/v3/BUILD | 9 ++++++++ .../http/tcp/v3/tcp_connection_pool.proto | 17 ++++++++++++++ api/versioning/BUILD | 3 +++ docs/root/api-v3/config/config.rst | 1 + docs/root/api-v3/config/upstream/upstream.rst | 8 +++++++ .../envoy/config/cluster/v3/cluster.proto | 23 ++++++++++++++++++- .../config/cluster/v4alpha/cluster.proto | 23 ++++++++++++++++++- .../upstreams/http/generic/v3/BUILD | 9 ++++++++ .../generic/v3/generic_connection_pool.proto | 18 +++++++++++++++ .../extensions/upstreams/http/http/v3/BUILD | 9 ++++++++ .../http/http/v3/http_connection_pool.proto | 17 ++++++++++++++ .../extensions/upstreams/http/tcp/v3/BUILD | 9 ++++++++ .../http/tcp/v3/tcp_connection_pool.proto | 17 ++++++++++++++ source/extensions/extensions_build_config.bzl | 8 +++++++ .../extensions/upstreams/http/generic/BUILD | 20 ++++++++++++++++ source/extensions/upstreams/http/http/BUILD | 20 ++++++++++++++++ source/extensions/upstreams/http/tcp/BUILD | 20 ++++++++++++++++ 25 files changed, 334 insertions(+), 4 deletions(-) create mode 100644 api/envoy/extensions/upstreams/http/generic/v3/BUILD create mode 100644 api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto create mode 100644 api/envoy/extensions/upstreams/http/http/v3/BUILD create mode 100644 api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto create mode 100644 api/envoy/extensions/upstreams/http/tcp/v3/BUILD create mode 100644 api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto create mode 100644 docs/root/api-v3/config/upstream/upstream.rst create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto create mode 100644 source/extensions/upstreams/http/generic/BUILD create mode 100644 source/extensions/upstreams/http/http/BUILD create mode 100644 source/extensions/upstreams/http/tcp/BUILD diff --git a/CODEOWNERS b/CODEOWNERS index 17fd742943a3..9de37d0646ad 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -120,3 +120,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/compression/common @junr03 @rojkov /*/extensions/compression/gzip @junr03 @rojkov /*/extensions/filters/http/decompressor @rojkov @dio +# Core upstream code +extensions/upstreams/http @alyssawilk @snowp @mattklein123 +extensions/upstreams/http/http @alyssawilk @snowp @mattklein123 +extensions/upstreams/http/tcp @alyssawilk @mattklein123 +extensions/upstreams/http/default @alyssawilk @snowp @mattklein123 diff --git a/api/BUILD b/api/BUILD index 8e8831ea7e41..e803ebf19244 100644 --- a/api/BUILD +++ b/api/BUILD @@ -233,6 +233,9 @@ proto_library( "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/extensions/upstreams/http/generic/v3:pkg", + "//envoy/extensions/upstreams/http/http/v3:pkg", + "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 5817fb254fad..69c3a1e62f39 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v3/outlier_detection.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -32,7 +33,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 49] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -816,6 +817,26 @@ message Cluster { // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; + + // [#not-implemented-hide:] + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v3.TypedExtensionConfig upstream_config = 48; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 454a9c163f49..e3ab0c16b258 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v4alpha/outlier_detection.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -32,7 +33,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 49] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -817,6 +818,26 @@ message Cluster { // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; + + // [#not-implemented-hide:] + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v4alpha.TypedExtensionConfig upstream_config = 48; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/api/envoy/extensions/upstreams/http/generic/v3/BUILD b/api/envoy/extensions/upstreams/http/generic/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/upstreams/http/generic/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto new file mode 100644 index 000000000000..c6b02364aa2d --- /dev/null +++ b/api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.generic.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; +option java_outer_classname = "GenericConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Generic Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, +// based on CONNECT configuration. +// [#extension: envoy.upstreams.http.generic] +message GenericConnectionPoolProto { +} diff --git a/api/envoy/extensions/upstreams/http/http/v3/BUILD b/api/envoy/extensions/upstreams/http/http/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/upstreams/http/http/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto new file mode 100644 index 000000000000..e4c2d6ff9b84 --- /dev/null +++ b/api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.http.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; +option java_outer_classname = "HttpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Http Connection Pool] + +// A connection pool which forwards downstream HTTP as HTTP to upstream. +// [#extension: envoy.upstreams.http.http] +message HttpConnectionPoolProto { +} diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/BUILD b/api/envoy/extensions/upstreams/http/tcp/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/upstreams/http/tcp/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto new file mode 100644 index 000000000000..5bc8734cb3f7 --- /dev/null +++ b/api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.tcp.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; +option java_outer_classname = "TcpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Tcp Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP to upstream, +// [#extension: envoy.upstreams.http.tcp] +message TcpConnectionPoolProto { +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 9f62a77d9a5f..c26c4a894093 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -116,6 +116,9 @@ proto_library( "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/extensions/upstreams/http/generic/v3:pkg", + "//envoy/extensions/upstreams/http/http/v3:pkg", + "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index e1ccac77719d..23518f83cc19 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -19,3 +19,4 @@ Extensions trace/trace internal_redirect/internal_redirect endpoint/endpoint + upstream/upstream diff --git a/docs/root/api-v3/config/upstream/upstream.rst b/docs/root/api-v3/config/upstream/upstream.rst new file mode 100644 index 000000000000..5047eaa92b28 --- /dev/null +++ b/docs/root/api-v3/config/upstream/upstream.rst @@ -0,0 +1,8 @@ +Upstream Configuration +====================== + +.. toctree:: + :glob: + :maxdepth: 3 + + ../../extensions/upstreams/http/*/v3/** diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 298f874a946b..523162df2247 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v3/outlier_detection.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -33,7 +34,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 49] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -815,6 +816,26 @@ message Cluster { // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; + // [#not-implemented-hide:] + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v3.TypedExtensionConfig upstream_config = 48; + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 454a9c163f49..e3ab0c16b258 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -8,6 +8,7 @@ import "envoy/config/cluster/v4alpha/outlier_detection.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; @@ -32,7 +33,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] // Configuration for a single upstream cluster. -// [#next-free-field: 48] +// [#next-free-field: 49] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -817,6 +818,26 @@ message Cluster { // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; + + // [#not-implemented-hide:] + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + core.v4alpha.TypedExtensionConfig upstream_config = 48; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto new file mode 100644 index 000000000000..c6b02364aa2d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.generic.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; +option java_outer_classname = "GenericConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Generic Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, +// based on CONNECT configuration. +// [#extension: envoy.upstreams.http.generic] +message GenericConnectionPoolProto { +} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto new file mode 100644 index 000000000000..e4c2d6ff9b84 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.http.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; +option java_outer_classname = "HttpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Http Connection Pool] + +// A connection pool which forwards downstream HTTP as HTTP to upstream. +// [#extension: envoy.upstreams.http.http] +message HttpConnectionPoolProto { +} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto new file mode 100644 index 000000000000..5bc8734cb3f7 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.upstreams.http.tcp.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; +option java_outer_classname = "TcpConnectionPoolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Tcp Connection Pool] + +// A connection pool which forwards downstream HTTP as TCP to upstream, +// [#extension: envoy.upstreams.http.tcp] +message TcpConnectionPoolProto { +} diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 348862eea1e8..21009f9de918 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -189,4 +189,12 @@ EXTENSIONS = { "envoy.internal_redirect_predicates.allow_listed_routes": "//source/extensions/internal_redirect/allow_listed_routes:config", "envoy.internal_redirect_predicates.previous_routes": "//source/extensions/internal_redirect/previous_routes:config", "envoy.internal_redirect_predicates.safe_cross_scheme": "//source/extensions/internal_redirect/safe_cross_scheme:config", + + # Http Upstreams + + "envoy.upstreams.http.generic": "//source/extensions/upstreams/http/generic:config", + "envoy.upstreams.http.http": "//source/extensions/upstreams/http/http:config", + "envoy.upstreams.http.tcp": "//source/extensions/upstreams/http/tcp:config", + + } diff --git a/source/extensions/upstreams/http/generic/BUILD b/source/extensions/upstreams/http/generic/BUILD new file mode 100644 index 000000000000..7aa32b16a219 --- /dev/null +++ b/source/extensions/upstreams/http/generic/BUILD @@ -0,0 +1,20 @@ +# placeholder build files for security_posture +# Will be filled in as #11327 lands. +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_extension( + name = "config", + srcs = [ + ], + hdrs = [ + ], + security_posture = "robust_to_untrusted_downstream", +) diff --git a/source/extensions/upstreams/http/http/BUILD b/source/extensions/upstreams/http/http/BUILD new file mode 100644 index 000000000000..7aa32b16a219 --- /dev/null +++ b/source/extensions/upstreams/http/http/BUILD @@ -0,0 +1,20 @@ +# placeholder build files for security_posture +# Will be filled in as #11327 lands. +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_extension( + name = "config", + srcs = [ + ], + hdrs = [ + ], + security_posture = "robust_to_untrusted_downstream", +) diff --git a/source/extensions/upstreams/http/tcp/BUILD b/source/extensions/upstreams/http/tcp/BUILD new file mode 100644 index 000000000000..7aa32b16a219 --- /dev/null +++ b/source/extensions/upstreams/http/tcp/BUILD @@ -0,0 +1,20 @@ +# placeholder build files for security_posture +# Will be filled in as #11327 lands. +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_extension( + name = "config", + srcs = [ + ], + hdrs = [ + ], + security_posture = "robust_to_untrusted_downstream", +) From e720cda39664bc8bbaffcbb62f51fc1c9397f401 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 11 Jun 2020 16:17:06 -0400 Subject: [PATCH 344/909] conn_pool: moving APIs to include (#11555) Moving the Generic class definitions into the include file, in preparation for pluggable upstreams split out from #11327 Risk Level: Low (mostly only header changes) Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- include/envoy/router/BUILD | 3 + include/envoy/router/router.h | 153 +++++++++++++++++++- source/common/router/router.cc | 24 +-- source/common/router/router.h | 1 + source/common/router/upstream_request.h | 64 +------- test/common/router/upstream_request_test.cc | 47 ++---- test/mocks/router/mocks.h | 24 +++ 7 files changed, 212 insertions(+), 104 deletions(-) diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index 80afd99d4abf..e679ee28c187 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -55,12 +55,15 @@ envoy_cc_library( deps = [ ":internal_redirect_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:conn_pool_interface", "//include/envoy/common:matchers_interface", "//include/envoy/config:typed_metadata_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:codes_interface", + "//include/envoy/http:conn_pool_interface", "//include/envoy/http:hash_policy_interface", "//include/envoy/http:header_map_interface", + "//include/envoy/tcp:conn_pool_interface", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:retry_interface", diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 39be039e7cfd..b0761122f1c9 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -9,15 +9,18 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/conn_pool.h" #include "envoy/common/matchers.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" +#include "envoy/http/conn_pool.h" #include "envoy/http/hash_policy.h" #include "envoy/http/header_map.h" #include "envoy/router/internal_redirect.h" +#include "envoy/tcp/conn_pool.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/resource_manager.h" @@ -32,7 +35,8 @@ namespace Envoy { namespace Upstream { class ClusterManager; -} +class LoadBalancerContext; +} // namespace Upstream namespace Router { @@ -1091,5 +1095,152 @@ class Config { using ConfigConstSharedPtr = std::shared_ptr; +class GenericConnectionPoolCallbacks; +class UpstreamRequest; +class GenericUpstream; + +/** + * An API for wrapping either an HTTP or a TCP connection pool. + * + * The GenericConnPool exists to create a GenericUpstream handle via a call to + * newStream resulting in an eventual call to onPoolReady + */ +class GenericConnPool { +public: + virtual ~GenericConnPool() = default; + + /** + * Called to create a new HTTP stream or TCP connection for "CONNECT streams". + * + * The implementation of the GenericConnPool will either call + * GenericConnectionPoolCallbacks::onPoolReady + * when a stream is available or GenericConnectionPoolCallbacks::onPoolFailure + * if stream creation fails. + * + * The caller is responsible for calling cancelAnyPendingRequest() if stream + * creation is no longer desired. newStream may only be called once per + * GenericConnPool. + * + * @param callbacks callbacks to communicate stream failure or creation on. + */ + virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; + /** + * Called to cancel any pending newStream request, + */ + virtual bool cancelAnyPendingRequest() PURE; + /** + * @return optionally returns the protocol for the connection pool. + */ + virtual absl::optional protocol() const PURE; + /** + * @return optionally returns the host for the connection pool. + */ + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; +}; + +/** + * An API for wrapping callbacks from either an HTTP or a TCP connection pool. + * + * Just like the connection pool callbacks, the GenericConnectionPoolCallbacks + * will either call onPoolReady when a GenericUpstream is ready, or + * onPoolFailure if a connection/stream can not be established. + */ +class GenericConnectionPoolCallbacks { +public: + virtual ~GenericConnectionPoolCallbacks() = default; + + /** + * Called to indicate a failure for GenericConnPool::newStream to establish a stream. + * + * @param reason supplies the failure reason. + * @param transport_failure_reason supplies the details of the transport failure reason. + * @param host supplies the description of the host that caused the failure. This may be nullptr + * if no host was involved in the failure (for example overflow). + */ + virtual void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) PURE; + /** + * Called when GenericConnPool::newStream has established a new stream. + * + * @param upstream supplies the generic upstream for the stream. + * @param host supplies the description of the host that will carry the request. For logical + * connection pools the description may be different each time this is called. + * @param upstream_local_address supplies the local address of the upstream connection. + * @param info supplies the stream info object associated with the upstream connection. + */ + virtual void onPoolReady(std::unique_ptr&& upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info) PURE; + + // TODO(alyssawilk) This exists because the Connection Pool creates the GenericUpstream, and the + // GenericUpstream needs a handle back to the upstream request to pass on events, as upstream + // data flows in. Do interface clean up in a follow-up PR. + virtual UpstreamRequest* upstreamRequest() PURE; +}; + +/** + * An API for sending information to either a TCP or HTTP upstream. + * + * It is similar logically to RequestEncoder, only without the getStream interface. + */ +class GenericUpstream { +public: + virtual ~GenericUpstream() = default; + /** + * Encode a data frame. + * @param data supplies the data to encode. The data may be moved by the encoder. + * @param end_stream supplies whether this is the last data frame. + */ + virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; + /** + * Encode metadata. + * @param metadata_map_vector is the vector of metadata maps to encode. + */ + virtual void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) PURE; + /** + * Encode headers, optionally indicating end of stream. + * @param headers supplies the header map to encode. + * @param end_stream supplies whether this is a header only request. + */ + virtual void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) PURE; + /** + * Encode trailers. This implicitly ends the stream. + * @param trailers supplies the trailers to encode. + */ + virtual void encodeTrailers(const Http::RequestTrailerMap& trailers) PURE; + /** + * Enable/disable further data from this stream. + */ + virtual void readDisable(bool disable) PURE; + /** + * Reset the stream. No events will fire beyond this point. + * @param reason supplies the reset reason. + */ + virtual void resetStream() PURE; +}; + +using GenericConnPoolPtr = std::unique_ptr; + +/* + * A factory for creating generic connection pools. + */ +class GenericConnPoolFactory : public Envoy::Config::TypedFactory { +public: + ~GenericConnPoolFactory() override = default; + + /* + * @param options for creating the transport socket + * @return may be null + */ + virtual GenericConnPoolPtr createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const RouteEntry& route_entry, + Http::Protocol protocol, + Upstream::LoadBalancerContext* ctx) const PURE; +}; + +using GenericConnPoolFactoryPtr = std::unique_ptr; + } // namespace Router } // namespace Envoy diff --git a/source/common/router/router.cc b/source/common/router/router.cc index b17c0b0e2edf..5ab68cc2d17c 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -20,6 +20,7 @@ #include "common/common/enum_to_int.h" #include "common/common/scope_tracker.h" #include "common/common/utility.h" +#include "common/config/utility.h" #include "common/grpc/common.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" @@ -492,9 +493,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, transport_socket_options_ = Network::TransportSocketOptionsUtility::fromFilterState( *callbacks_->streamInfo().filterState()); std::unique_ptr generic_conn_pool = createConnPool(); - Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - if (!generic_conn_pool->initialize(config_.cm_, *route_entry_, protocol, this)) { + if (!generic_conn_pool) { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } @@ -600,11 +600,19 @@ std::unique_ptr Filter::createConnPool() { const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; - + Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); if (should_tcp_proxy) { - return std::make_unique(); + auto pool = std::make_unique(config_.cm_, *route_entry_, protocol, this); + if (pool->valid()) { + return pool; + } + } else { + auto pool = std::make_unique(config_.cm_, *route_entry_, protocol, this); + if (pool->valid()) { + return pool; + } } - return std::make_unique(); + return nullptr; } void Filter::sendNoHealthyUpstreamResponse() { @@ -1507,12 +1515,8 @@ void Filter::doRetry() { ASSERT(pending_retries_ > 0); pending_retries_--; - Upstream::HostDescriptionConstSharedPtr host; - std::unique_ptr generic_conn_pool = createConnPool(); - - Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - if (!generic_conn_pool->initialize(config_.cm_, *route_entry_, protocol, this)) { + if (!generic_conn_pool) { sendNoHealthyUpstreamResponse(); cleanup(); return; diff --git a/source/common/router/router.h b/source/common/router/router.h index 22c0e4f8ac77..18259f9c2dc4 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -478,6 +478,7 @@ class Filter : Logger::Loggable, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; std::unique_ptr createConnPool(); + UpstreamRequestPtr createUpstreamRequest(); void maybeDoShadowing(); bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest& upstream_request); diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 260418447455..89c5506c20b4 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -30,45 +30,6 @@ class GenericConnectionPoolCallbacks; class RouterFilterInterface; class UpstreamRequest; -// An API for wrapping either an HTTP or a TCP connection pool. -class GenericConnPool : public Logger::Loggable { -public: - virtual ~GenericConnPool() = default; - - // Initializes the connection pool. This must be called before the connection - // pool is valid, and can be used. - virtual bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, - Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) PURE; - - // Called to create a new HTTP stream or TCP connection. The implementation - // is then responsible for calling either onPoolReady or onPoolFailure on the - // supplied GenericConnectionPoolCallbacks. - virtual void newStream(GenericConnectionPoolCallbacks* callbacks) PURE; - // Called to cancel a call to newStream. Returns true if a newStream request - // was canceled, false otherwise. - virtual bool cancelAnyPendingRequest() PURE; - // Optionally returns the protocol for the connection pool. - virtual absl::optional protocol() const PURE; - // Returns the host for this conn pool. - virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; -}; - -// An API for the UpstreamRequest to get callbacks from either an HTTP or TCP -// connection pool. -class GenericConnectionPoolCallbacks { -public: - virtual ~GenericConnectionPoolCallbacks() = default; - - virtual void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) PURE; - virtual void onPoolReady(std::unique_ptr&& upstream, - Upstream::HostDescriptionConstSharedPtr host, - const Network::Address::InstanceConstSharedPtr& upstream_local_address, - const StreamInfo::StreamInfo& info) PURE; - virtual UpstreamRequest* upstreamRequest() PURE; -}; - // The base request for Upstream. class UpstreamRequest : public Logger::Loggable, public Http::ResponseDecoder, @@ -204,12 +165,12 @@ class UpstreamRequest : public Logger::Loggable, class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { public: // GenericConnPool - bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, - Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) override { + HttpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol protocol, + Upstream::LoadBalancerContext* ctx) { conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), protocol, ctx); - return conn_pool_ != nullptr; } + bool valid() const { return conn_pool_ != nullptr; } void newStream(GenericConnectionPoolCallbacks* callbacks) override; bool cancelAnyPendingRequest() override; @@ -233,13 +194,12 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { public: - bool initialize(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol, - Upstream::LoadBalancerContext* ctx) override { + TcpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol, + Upstream::LoadBalancerContext* ctx) { conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), Upstream::ResourcePriority::Default, ctx); - return conn_pool_ != nullptr; } - + bool valid() const { return conn_pool_ != nullptr; } void newStream(GenericConnectionPoolCallbacks* callbacks) override { callbacks_ = callbacks; upstream_handle_ = conn_pool_->newConnection(*this); @@ -271,18 +231,6 @@ class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callback GenericConnectionPoolCallbacks* callbacks_{}; }; -// A generic API which covers common functionality between HTTP and TCP upstreams. -class GenericUpstream { -public: - virtual ~GenericUpstream() = default; - virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; - virtual void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) PURE; - virtual void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) PURE; - virtual void encodeTrailers(const Http::RequestTrailerMap& trailers) PURE; - virtual void readDisable(bool disable) PURE; - virtual void resetStream() PURE; -}; - class HttpUpstream : public GenericUpstream, public Http::StreamCallbacks { public: HttpUpstream(UpstreamRequest& upstream_request, Http::RequestEncoder* encoder) diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index 4fa02a4cc56c..976e378afb92 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -25,30 +25,6 @@ namespace Envoy { namespace Router { namespace { -class MockGenericConnPool : public GenericConnPool { - MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request)); - MOCK_METHOD(bool, cancelAnyPendingRequest, ()); - MOCK_METHOD(absl::optional, protocol, (), (const)); - MOCK_METHOD(bool, initialize, - (Upstream::ClusterManager&, const RouteEntry&, Http::Protocol, - Upstream::LoadBalancerContext*)); - MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); -}; - -class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { -public: - MOCK_METHOD(void, onPoolFailure, - (Http::ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host)); - MOCK_METHOD(void, onPoolReady, - (std::unique_ptr && upstream, - Upstream::HostDescriptionConstSharedPtr host, - const Network::Address::InstanceConstSharedPtr& upstream_local_address, - const StreamInfo::StreamInfo& info)); - MOCK_METHOD(UpstreamRequest*, upstreamRequest, ()); -}; - class MockRouterFilterInterface : public RouterFilterInterface { public: MockRouterFilterInterface() @@ -115,10 +91,11 @@ class TcpConnPoolTest : public ::testing::Test { NiceMock route_entry; NiceMock cm; EXPECT_CALL(cm, tcpConnPoolForCluster(_, _, _)).WillOnce(Return(&mock_pool_)); - EXPECT_TRUE(conn_pool_.initialize(cm, route_entry, Http::Protocol::Http11, nullptr)); + conn_pool_ = std::make_unique(cm, route_entry, Http::Protocol::Http11, nullptr); + EXPECT_TRUE(conn_pool_->valid()); } - TcpConnPool conn_pool_; + std::unique_ptr conn_pool_; Tcp::ConnectionPool::MockInstance mock_pool_; MockGenericConnectionPoolCallbacks mock_generic_callbacks_; std::shared_ptr> host_; @@ -129,38 +106,38 @@ TEST_F(TcpConnPoolTest, Basic) { NiceMock connection; EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); - conn_pool_.newStream(&mock_generic_callbacks_); + conn_pool_->newStream(&mock_generic_callbacks_); EXPECT_CALL(mock_generic_callbacks_, upstreamRequest()); EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _)); auto data = std::make_unique>(); EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection)); - conn_pool_.onPoolReady(std::move(data), host_); + conn_pool_->onPoolReady(std::move(data), host_); } TEST_F(TcpConnPoolTest, OnPoolFailure) { EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); - conn_pool_.newStream(&mock_generic_callbacks_); + conn_pool_->newStream(&mock_generic_callbacks_); EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _)); - conn_pool_.onPoolFailure(Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, host_); + conn_pool_->onPoolFailure(Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, host_); // Make sure that the pool failure nulled out the pending request. - EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); } TEST_F(TcpConnPoolTest, Cancel) { // Initially cancel should fail as there is no pending request. - EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); - conn_pool_.newStream(&mock_generic_callbacks_); + conn_pool_->newStream(&mock_generic_callbacks_); // Canceling should now return true as there was an active request. - EXPECT_TRUE(conn_pool_.cancelAnyPendingRequest()); + EXPECT_TRUE(conn_pool_->cancelAnyPendingRequest()); // A second cancel should return false as there is not a pending request. - EXPECT_FALSE(conn_pool_.cancelAnyPendingRequest()); + EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); } class TcpUpstreamTest : public ::testing::Test { diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index d221d81b3439..4fe1103f8451 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -519,5 +519,29 @@ class MockScopedRouteConfigProvider : public Envoy::Config::ConfigProvider { std::shared_ptr config_; }; +class MockGenericConnPool : public GenericConnPool { + MOCK_METHOD(void, newStream, (GenericConnectionPoolCallbacks * request)); + MOCK_METHOD(bool, cancelAnyPendingRequest, ()); + MOCK_METHOD(absl::optional, protocol, (), (const)); + MOCK_METHOD(bool, initialize, + (Upstream::ClusterManager&, const RouteEntry&, Http::Protocol, + Upstream::LoadBalancerContext*)); + MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); +}; + +class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { +public: + MOCK_METHOD(void, onPoolFailure, + (Http::ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPoolReady, + (std::unique_ptr && upstream, + Upstream::HostDescriptionConstSharedPtr host, + const Network::Address::InstanceConstSharedPtr& upstream_local_address, + const StreamInfo::StreamInfo& info)); + MOCK_METHOD(UpstreamRequest*, upstreamRequest, ()); +}; + } // namespace Router } // namespace Envoy From 4a91d7f1fb8f00e9a743fa3dfb87c6bef1e317ca Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 11 Jun 2020 17:01:12 -0400 Subject: [PATCH 345/909] test: fixing a teardown ordering bug (#11542) As (now) commented, when codecs access runtime, it creates a destruction order invariant that the fake upstreams are torn down before the server (and its runtime). Fixing this in the base integration test and cleaning up test sub-classes. Risk Level: n/a (test only) Testing: http2 test passes cleanly with 200 runs (where before it flaked out) Docs Changes: n/a Release Notes: n/a Fixes #11538 Signed-off-by: Alyssa Wilk --- .../tcp_grpc_access_log_integration_test.cc | 5 ----- .../aggregate/cluster_integration_test.cc | 6 +----- .../redis/redis_cluster_integration_test.cc | 5 ----- .../aws_metadata_fetcher_integration_test.cc | 5 ----- .../aws_lambda_filter_integration_test.cc | 6 +----- .../reverse_bridge_integration_test.cc | 6 +----- .../grpc_json_transcoder_integration_test.cc | 13 +------------ .../direct_response_integration_test.cc | 11 ----------- .../local_ratelimit_integration_test.cc | 5 ----- .../mysql_proxy/mysql_integration_test.cc | 7 ------- .../postgres_integration_test.cc | 5 ----- .../filters/network/rbac/integration_test.cc | 5 ----- .../redis_proxy_integration_test.cc | 5 ----- .../network/thrift_proxy/integration_test.cc | 5 ----- .../translation_integration_test.cc | 5 ----- .../dns_filter/dns_filter_integration_test.cc | 5 ----- .../udp_proxy/udp_proxy_integration_test.cc | 8 -------- test/integration/ads_integration.cc | 6 +----- test/integration/ads_integration_test.cc | 18 +++--------------- .../api_version_integration_test.cc | 2 -- test/integration/cds_integration_test.cc | 2 -- test/integration/echo_integration_test.cc | 11 ----------- .../filter_manager_integration_test.cc | 5 ----- test/integration/header_integration_test.cc | 3 --- test/integration/http_integration.cc | 6 +----- test/integration/integration.cc | 8 ++++++++ test/integration/integration.h | 2 +- test/integration/integration_admin_test.h | 8 -------- test/integration/integration_test.cc | 3 --- .../listener_lds_integration_test.cc | 6 +----- test/integration/rtds_integration_test.cc | 7 +------ .../integration/scoped_rds_integration_test.cc | 5 +---- .../sds_dynamic_integration_test.cc | 6 ------ .../sds_generic_secret_integration_test.cc | 6 +----- test/integration/stats_integration_test.cc | 5 ----- .../tcp_conn_pool_integration_test.cc | 6 ------ test/integration/tcp_proxy_integration_test.h | 5 ----- test/integration/vhds_integration_test.cc | 12 ++---------- 38 files changed, 24 insertions(+), 215 deletions(-) diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index 9225508c5dd6..e3358b3d9910 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -32,11 +32,6 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT enable_half_close_ = true; } - ~TcpGrpcAccessLogIntegrationTest() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - void createUpstreams() override { BaseIntegrationTest::createUpstreams(); fake_upstreams_.emplace_back( diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 69059250fac4..1fbd999083e0 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -119,11 +119,7 @@ class AggregateIntegrationTest : public testing::TestWithParam void testTranscoding(Http::RequestHeaderMap&& request_headers, const std::string& request_body, diff --git a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc index 4b08edb73f47..8ff030275c41 100644 --- a/test/extensions/filters/network/direct_response/direct_response_integration_test.cc +++ b/test/extensions/filters/network/direct_response/direct_response_integration_test.cc @@ -21,21 +21,10 @@ class DirectResponseIntegrationTest : public testing::TestWithParam(fmt::format(R"EOF( diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 9153bce772a0..5d61f60c7418 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -601,11 +601,7 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -645,11 +641,7 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -810,11 +802,7 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index 7dcfffc6cf11..c8bf5164b028 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -218,8 +218,6 @@ class ApiVersionIntegrationTest : public testing::TestWithParam, if (xds_stream_ != nullptr) { cleanUpXdsConnection(); } - test_server_.reset(); - fake_upstreams_.clear(); } std::string endpoint_; diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 1e1790bfd8fa..ab6206c0ab78 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -42,8 +42,6 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht void TearDown() override { if (!test_skipped_) { cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); } } diff --git a/test/integration/echo_integration_test.cc b/test/integration/echo_integration_test.cc index 62c3f66037af..001247c3c563 100644 --- a/test/integration/echo_integration_test.cc +++ b/test/integration/echo_integration_test.cc @@ -28,18 +28,7 @@ class EchoIntegrationTest : public testing::TestWithParam* field, diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index de202248ef26..01c6de72ad65 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -253,11 +253,7 @@ void HttpIntegrationTest::useAccessLog(absl::string_view format) { ASSERT_TRUE(config_helper_.setAccessLog(access_log_name_, format)); } -HttpIntegrationTest::~HttpIntegrationTest() { - cleanupUpstreamAndDownstream(); - test_server_.reset(); - fake_upstreams_.clear(); -} +HttpIntegrationTest::~HttpIntegrationTest() { cleanupUpstreamAndDownstream(); } void HttpIntegrationTest::setDownstreamProtocol(Http::CodecClient::Type downstream_protocol) { downstream_protocol_ = downstream_protocol; diff --git a/test/integration/integration.cc b/test/integration/integration.cc index ecaa9d0af59d..cc2c9a458010 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -282,6 +282,14 @@ BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, }, version, config) {} +BaseIntegrationTest::~BaseIntegrationTest() { + // Tear down the fake upstream before the test server. + // When the HTTP codecs do runtime checks, it is important to finish all + // runtime access before the server, and the runtime singleton, go away. + fake_upstreams_.clear(); + test_server_.reset(); +} + Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnection(uint32_t port) { return makeClientConnectionWithOptions(port, nullptr); } diff --git a/test/integration/integration.h b/test/integration/integration.h index d3699cb7efc8..0996fa3f8cc0 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -162,7 +162,7 @@ class BaseIntegrationTest : protected Logger::Loggable { Network::Address::IpVersion version, const std::string& config = ConfigHelper::httpProxyConfig()); - virtual ~BaseIntegrationTest() = default; + virtual ~BaseIntegrationTest(); // TODO(jmarantz): Remove this once // https://github.com/envoyproxy/envoy-filter-example/pull/69 is reverted. diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index e5ab002fcd7a..b84ef6b83faa 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -35,14 +35,6 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { return response->headers().getStatusValue(); } - /** - * Destructor for an individual test. - */ - void TearDown() override { - test_server_.reset(); - fake_upstreams_.clear(); - } - /** * Validates that the passed in string conforms to output of stats in JSON format. */ diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index d7b8f4a5662a..987eb03bedf1 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -841,8 +841,6 @@ TEST_P(IntegrationTest, TestHead) { EXPECT_THAT(response->headers(), HeaderValueOf(Headers::get().ContentLength, "12")); EXPECT_EQ(response->headers().TransferEncoding(), nullptr); EXPECT_EQ(0, response->body().size()); - - cleanupUpstreamAndDownstream(); } // The Envoy HTTP/1.1 codec ASSERTs that T-E headers are cleared in @@ -1299,7 +1297,6 @@ TEST_P(IntegrationTest, TestUpgradeHeaderInResponse) { response->waitForEndStream(); EXPECT_TRUE(response->complete()); EXPECT_EQ("Hello World", response->body()); - cleanupUpstreamAndDownstream(); } TEST_P(IntegrationTest, ConnectWithNoBody) { diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 4c37ac53a0de..534b9a7fb326 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -32,10 +32,7 @@ class ListenerIntegrationTest : public HttpIntegrationTest, ListenerIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {} - ~ListenerIntegrationTest() override { - resetConnections(); - cleanupUpstreamAndDownstream(); - } + ~ListenerIntegrationTest() override { resetConnections(); } void initialize() override { // We want to use the GRPC based LDS. @@ -258,7 +255,6 @@ TEST_P(ListenerIntegrationTest, BasicSuccess) { verifyResponse(std::move(response), "200", response_headers, std::string(response_size, 'a')); EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(request_size, upstream_request_->bodyLength()); - cleanupUpstreamAndDownstream(); } } // namespace diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index 37f4b50a834d..e0f7b7a1e2ad 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -75,11 +75,7 @@ class RtdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public H sotw_or_delta_ = sotwOrDelta(); } - void TearDown() override { - cleanUpXdsConnection(); - test_server_.reset(); - fake_upstreams_.clear(); - } + void TearDown() override { cleanUpXdsConnection(); } void initialize() override { // The tests infra expects the xDS server to be the second fake upstream, so @@ -228,7 +224,6 @@ TEST_P(RtdsIntegrationTest, RtdsAfterAsyncPrimaryClusterInitialization) { EXPECT_EQ(initial_load_success_ + 1, test_server_->counter("runtime.load_success")->value()); EXPECT_EQ(initial_keys_ + 1, test_server_->gauge("runtime.num_keys")->value()); EXPECT_EQ(3, test_server_->gauge("runtime.num_layers")->value()); - cleanupUpstreamAndDownstream(); } } // namespace diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index 5e786482c4a2..59dc3069d32d 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -32,10 +32,7 @@ class ScopedRdsIntegrationTest : public HttpIntegrationTest, ScopedRdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion(), realTime()) {} - ~ScopedRdsIntegrationTest() override { - resetConnections(); - cleanupUpstreamAndDownstream(); - } + ~ScopedRdsIntegrationTest() override { resetConnections(); } void initialize() override { // Setup two upstream hosts, one for each cluster. diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 0cc56c3742a7..34c1763e3998 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -183,10 +183,7 @@ class SdsDynamicDownstreamIntegrationTest : public SdsDynamicIntegrationBaseTest void TearDown() override { cleanUpXdsConnection(); - client_ssl_ctx_.reset(); - cleanupUpstreamAndDownstream(); - codec_client_.reset(); } Network::ClientConnectionPtr makeSslClientConnection() { @@ -346,9 +343,6 @@ class SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstrea client_ssl_ctx_.reset(); cleanupUpstreamAndDownstream(); codec_client_.reset(); - - test_server_.reset(); - fake_upstreams_.clear(); } void enableCombinedValidationContext(bool enable) { use_combined_validation_context_ = enable; } diff --git a/test/integration/sds_generic_secret_integration_test.cc b/test/integration/sds_generic_secret_integration_test.cc index 5e68cdcdc7d2..719bc0cc4f39 100644 --- a/test/integration/sds_generic_secret_integration_test.cc +++ b/test/integration/sds_generic_secret_integration_test.cc @@ -104,11 +104,7 @@ class SdsGenericSecretIntegrationTest : public Grpc::GrpcClientIntegrationParamT HttpIntegrationTest::initialize(); } - void TearDown() override { - cleanUpXdsConnection(); - cleanupUpstreamAndDownstream(); - codec_client_.reset(); - } + void TearDown() override { cleanUpXdsConnection(); } void createSdsStream() { createXdsConnection(); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 43985c20fead..40d65003dd32 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -25,11 +25,6 @@ class StatsIntegrationTest : public testing::TestWithParam filter_resolver_; diff --git a/test/integration/tcp_proxy_integration_test.h b/test/integration/tcp_proxy_integration_test.h index 6504befc3630..f6b119b86d1d 100644 --- a/test/integration/tcp_proxy_integration_test.h +++ b/test/integration/tcp_proxy_integration_test.h @@ -17,11 +17,6 @@ class TcpProxyIntegrationTest : public testing::TestWithParam Date: Fri, 12 Jun 2020 21:00:53 +0530 Subject: [PATCH 346/909] Added httpConnectionManagerConfigHelper to refactor config_test (#11161) Signed-off-by: Manish Kumar --- .../http_connection_manager/config_test.cc | 69 ++++++------------- 1 file changed, 21 insertions(+), 48 deletions(-) diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 9ee7ed609910..6d745c2d8b5d 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -56,6 +56,11 @@ class HttpConnectionManagerConfigTest : public testing::Test { NiceMock http_tracer_manager_; std::shared_ptr> http_tracer_{ std::make_shared>()}; + void createHttpConnectionManagerConfig(const std::string& yaml) { + HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_); + } }; TEST_F(HttpConnectionManagerConfigTest, ValidateFail) { @@ -84,11 +89,8 @@ stat_prefix: router - name: foo )EOF"; - EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, "Didn't find a registered implementation for name: 'foo'"); + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Didn't find a registered implementation for name: 'foo'"); } TEST_F(HttpConnectionManagerConfigTest, RouterInverted) { @@ -115,10 +117,7 @@ stat_prefix: router )EOF"; EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router " "must be the last filter in a http filter chain."); } @@ -145,14 +144,10 @@ stat_prefix: router pass_through_mode: false )EOF"; - EXPECT_THROW_WITH_MESSAGE( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, - "Error: non-terminal filter named health_check of type " - "envoy.filters.http.health_check is the last filter in a http filter " - "chain."); + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: non-terminal filter named health_check of type " + "envoy.filters.http.health_check is the last filter in a http filter " + "chain."); } TEST_F(HttpConnectionManagerConfigTest, MiscConfig) { @@ -1302,9 +1297,7 @@ stat_prefix: my_stat_prefix custom_settings_parameters: { identifier: 3, value: 2048 } )EOF"; // This will throw when Http2ProtocolOptions validation fails. - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + createHttpConnectionManagerConfig(yaml_string); } // Validates that named and user defined parameter collisions will trigger a config validation @@ -1336,10 +1329,7 @@ stat_prefix: my_stat_prefix - { identifier: 3, value: 1024 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, R"(the \{hpack_table_size,max_concurrent_streams\} HTTP/2 SETTINGS parameter\(s\) can not be)" " configured"); } @@ -1370,10 +1360,7 @@ stat_prefix: my_stat_prefix - { identifier: 8, value: 0 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "the \"allow_connect\" SETTINGS parameter must only be configured through the named field"); const std::string yaml_string2 = R"EOF( @@ -1395,9 +1382,7 @@ stat_prefix: my_stat_prefix http2_protocol_options: allow_connect: true )EOF"; - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string2), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + createHttpConnectionManagerConfig(yaml_string2); } // Validates that setting the server push parameter via user defined parameters is disallowed. @@ -1423,10 +1408,7 @@ stat_prefix: my_stat_prefix )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "server push is not supported by Envoy and can not be enabled via a SETTINGS parameter."); // Specify both the server push parameter and colliding named and user defined parameters. @@ -1457,10 +1439,7 @@ stat_prefix: my_stat_prefix // The server push exception is thrown first. EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, "server push is not supported by Envoy and can not be enabled via a SETTINGS parameter."); } @@ -1491,10 +1470,7 @@ stat_prefix: my_stat_prefix - { identifier: 12, value: 10 } )EOF"; EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, + createHttpConnectionManagerConfig(yaml_string), EnvoyException, R"(inconsistent HTTP/2 custom SETTINGS parameter\(s\) detected; identifiers = \{0x0a\})"); } @@ -1619,11 +1595,8 @@ TEST_F(HttpConnectionManagerConfigTest, UnknownRequestIDExtension) { - name: envoy.filters.http.router )EOF"; - EXPECT_THROW_WITH_REGEX( - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), - EnvoyException, "Didn't find a registered implementation for type"); + EXPECT_THROW_WITH_REGEX(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Didn't find a registered implementation for type"); } TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { From d5f6ada3de69a3b54806072a44a0747ac7e19ab4 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 12 Jun 2020 11:32:27 -0400 Subject: [PATCH 347/909] conn_pool: unifying cancellation between http and TCP (#11529) Signed-off-by: Alyssa Wilk --- include/envoy/common/conn_pool.h | 31 ++++ include/envoy/http/conn_pool.h | 14 +- include/envoy/tcp/conn_pool.h | 32 +--- source/common/http/conn_pool_base.cc | 38 +++-- source/common/http/conn_pool_base.h | 12 +- source/common/router/upstream_request.cc | 2 +- source/common/router/upstream_request.h | 2 +- source/common/tcp_proxy/upstream.h | 4 +- test/common/http/http1/conn_pool_test.cc | 31 +++- test/common/http/http2/conn_pool_test.cc | 143 +++++++++++++++++- test/common/router/router_test.cc | 36 ++--- test/common/router/upstream_request_test.cc | 2 +- test/common/tcp_proxy/tcp_proxy_test.cc | 4 +- .../network/dubbo_proxy/router_test.cc | 2 +- test/mocks/BUILD | 1 + test/mocks/common.cc | 5 + test/mocks/common.h | 13 ++ test/mocks/http/BUILD | 1 + test/mocks/http/conn_pool.cc | 3 - test/mocks/http/conn_pool.h | 10 +- test/mocks/tcp/BUILD | 1 + test/mocks/tcp/mocks.cc | 5 +- test/mocks/tcp/mocks.h | 13 +- 23 files changed, 284 insertions(+), 121 deletions(-) diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index c8a988b54794..0079afe2f7d5 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -1,8 +1,39 @@ #pragma once +#include "envoy/common/pure.h" + namespace Envoy { namespace ConnectionPool { +/** + * Controls the behavior of a canceled request. + */ +enum class CancelPolicy { + // By default, canceled requests allow a pending connection to complete and become + // available for a future request. + Default, + // When a request is canceled, closes a pending connection if there will still be sufficient + // connections to serve pending requests. CloseExcess is largely useful for callers that never + // re-use connections (e.g. by closing rather than releasing connections). Using CloseExcess in + // this situation guarantees that no idle connections will be held open by the conn pool awaiting + // a connection request. + CloseExcess, +}; + +/** + * Handle that allows a pending connection or stream request to be canceled before it is completed. + */ +class Cancellable { +public: + virtual ~Cancellable() = default; + + /** + * Cancel the pending connection or stream request. + * @param cancel_policy a CancelPolicy that controls the behavior of this cancellation. + */ + virtual void cancel(CancelPolicy cancel_policy) PURE; +}; + enum class PoolFailureReason { // A resource overflowed and policy prevented a new connection from being created. Overflow, diff --git a/include/envoy/http/conn_pool.h b/include/envoy/http/conn_pool.h index c41fe5764120..ebb5bf363492 100644 --- a/include/envoy/http/conn_pool.h +++ b/include/envoy/http/conn_pool.h @@ -13,20 +13,8 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -/** - * Handle that allows a pending request to be cancelled before it is bound to a connection. - */ -class Cancellable { -public: - virtual ~Cancellable() = default; - - /** - * Cancel the pending request. - */ - virtual void cancel() PURE; -}; - using PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason; +using Cancellable = ::Envoy::ConnectionPool::Cancellable; /** * Pool callbacks invoked in the context of a newStream() call, either synchronously or diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index 5cdcd617daf7..52712177030e 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -13,36 +13,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -/** - * Controls the behavior of a canceled connection request. - */ -enum class CancelPolicy { - // By default, canceled connection requests allow a pending connection to complete and become - // available for a future connection request. - Default, - // When a connection request is canceled, closes a pending connection if there are more pending - // connections than pending connection requests. CloseExcess is useful for callers that never - // re-use connections (e.g. by closing rather than releasing connections). Using CloseExcess in - // this situation guarantees that no idle connections will be held open by the conn pool awaiting - // a connection request. - CloseExcess, -}; - -/** - * Handle that allows a pending connection request to be canceled before it is completed. - */ -class Cancellable { -public: - virtual ~Cancellable() = default; - - /** - * Cancel the pending connection request. - * @param cancel_policy a CancelPolicy that controls the behavior of this connection request - * cancellation. - */ - virtual void cancel(CancelPolicy cancel_policy) PURE; -}; - /* * UpstreamCallbacks for connection pool upstream connection callbacks and data. Note that * onEvent(Connected) is never triggered since the event always occurs before a ConnectionPool @@ -119,6 +89,8 @@ class ConnectionData { using ConnectionDataPtr = std::unique_ptr; using PoolFailureReason = ::Envoy::ConnectionPool::PoolFailureReason; +using Cancellable = ::Envoy::ConnectionPool::Cancellable; +using CancelPolicy = ::Envoy::ConnectionPool::CancelPolicy; /** * Pool callbacks invoked in the context of a newConnection() call, either synchronously or diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index b59c45a5beef..5b64e408969e 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -15,10 +15,11 @@ ConnPoolImplBase::ConnPoolImplBase( ConnPoolImplBase::~ConnPoolImplBase() { ASSERT(ready_clients_.empty()); ASSERT(busy_clients_.empty()); + ASSERT(connecting_clients_.empty()); } void ConnPoolImplBase::destructAllConnections() { - for (auto* list : {&ready_clients_, &busy_clients_}) { + for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { while (!list->empty()) { list->front()->close(); } @@ -43,7 +44,8 @@ void ConnPoolImplBase::tryCreateNewConnection() { // If we are at the connection circuit-breaker limit due to other upstreams having // too many open connections, and this upstream has no connections, always create one, to // prevent pending requests being queued to this upstream with no way to be processed. - if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty())) { + if (can_create_connection || + (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) { ENVOY_LOG(debug, "creating a new connection"); ActiveClientPtr client = instantiateActiveClient(); ASSERT(client->state_ == ActiveClient::State::CONNECTING); @@ -156,7 +158,7 @@ std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { switch (state) { case ActiveClient::State::CONNECTING: - return busy_clients_; + return connecting_clients_; case ActiveClient::State::READY: return ready_clients_; case ActiveClient::State::BUSY: @@ -201,10 +203,8 @@ void ConnPoolImplBase::closeIdleConnections() { } if (pending_requests_.empty()) { - for (auto& client : busy_clients_) { - if (client->state_ == ActiveClient::State::CONNECTING) { - to_close.push_back(client.get()); - } + for (auto& client : connecting_clients_) { + to_close.push_back(client.get()); } } @@ -227,12 +227,7 @@ void ConnPoolImplBase::drainConnections() { // so use a for-loop since the list is not mutated. ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); for (auto& busy_client : busy_clients_) { - // Moving a CONNECTING client to DRAINING would violate state assumptions, namely that DRAINING - // connections have active requests (otherwise they would be closed) and that clients receiving - // a Connected event are in state CONNECTING. - if (busy_client->state_ != ActiveClient::State::CONNECTING) { - transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); - } + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); } } @@ -243,7 +238,8 @@ void ConnPoolImplBase::checkForDrained() { closeIdleConnections(); - if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty()) { + if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty() && + connecting_clients_.empty()) { ENVOY_LOG(debug, "invoking drained callbacks"); for (const DrainedCb& cb : drained_callbacks_) { cb(); @@ -363,7 +359,8 @@ void ConnPoolImplBase::purgePendingRequests( } } -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request, + Envoy::ConnectionPool::CancelPolicy policy) { ENVOY_LOG(debug, "cancelling pending request"); if (!pending_requests_to_purge_.empty()) { // If pending_requests_to_purge_ is not empty, it means that we are called from @@ -374,6 +371,17 @@ void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request) { } else { request.removeFromList(pending_requests_); } + // There's excess capacity if + // pending_requests < connecting_request_capacity_ - capacity of most recent client. + // It's calculated below with addition instead to avoid underflow issues, overflow being + // assumed to not be a problem across the connection pool. + if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() && + (pending_requests_.size() + connecting_clients_.front()->effectiveConcurrentRequestLimit() <= + connecting_request_capacity_)) { + auto& client = *connecting_clients_.front(); + transitionActiveClientState(client, ActiveClient::State::DRAINING); + client.close(); + } host_->cluster().stats().upstream_rq_cancelled_.inc(); checkForDrained(); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index e702f2a057da..53ba14f26151 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -100,7 +100,9 @@ class ConnPoolImplBase : public ConnectionPool::Instance, ~PendingRequest() override; // ConnectionPool::Cancellable - void cancel() override { parent_.onPendingRequestCancel(*this); } + void cancel(Envoy::ConnectionPool::CancelPolicy policy) override { + parent_.onPendingRequestCancel(*this, policy); + } ConnPoolImplBase& parent_; ResponseDecoder& decoder_; @@ -123,7 +125,7 @@ class ConnPoolImplBase : public ConnectionPool::Instance, ConnectionPool::Callbacks& callbacks); // Removes the PendingRequest from the list of requests. Called when the PendingRequest is // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request); + void onPendingRequestCancel(PendingRequest& request, Envoy::ConnectionPool::CancelPolicy policy); // Fails all pending requests, calling onPoolFailure on the associated callbacks. void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, @@ -169,10 +171,12 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // All entries are in state READY. std::list ready_clients_; - // Clients that are not ready to handle additional requests. - // Entries are in possible states CONNECTING, BUSY, or DRAINING. + // Clients that are not ready to handle additional requests due to being BUSY or DRAINING. std::list busy_clients_; + // Clients that are not ready to handle additional requests because they are CONNECTING. + std::list connecting_clients_; + // The number of requests currently attached to clients. uint64_t num_active_requests_{0}; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 0f69e617fcde..7189a14003c9 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -541,7 +541,7 @@ void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data bool HttpConnPool::cancelAnyPendingRequest() { if (conn_pool_stream_handle_) { - conn_pool_stream_handle_->cancel(); + conn_pool_stream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); conn_pool_stream_handle_ = nullptr; return true; } diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 89c5506c20b4..c898fa5b495d 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -207,7 +207,7 @@ class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callback bool cancelAnyPendingRequest() override { if (upstream_handle_) { - upstream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + upstream_handle_->cancel(ConnectionPool::CancelPolicy::Default); upstream_handle_ = nullptr; return true; } diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index db193b0f4bba..8d2a301d7137 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -33,7 +33,9 @@ class TcpConnectionHandle : public ConnectionHandle { class HttpConnectionHandle : public ConnectionHandle { public: HttpConnectionHandle(Http::ConnectionPool::Cancellable* handle) : upstream_http_handle_(handle) {} - void cancel() override { upstream_http_handle_->cancel(); } + void cancel() override { + upstream_http_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } private: Http::ConnectionPool::Cancellable* upstream_http_handle_{}; diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index c9464299a92c..3b35ee583f9b 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -297,7 +297,7 @@ TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { EXPECT_CALL(callbacks1.pool_failure_, ready()).Times(0); ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks2.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - handle1->cancel(); + handle1->cancel(Envoy::ConnectionPool::CancelPolicy::Default); })); NiceMock outer_decoder; @@ -362,7 +362,7 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); - handle->cancel(); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(conn_pool_, onClientDestroy()); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -497,7 +497,7 @@ TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - handle->cancel(); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Cause the connection to go away. @@ -506,6 +506,25 @@ TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { dispatcher_.clearDeferredDeleteList(); } +/** + * Test cancelling with CloseExcess + */ +TEST_F(Http1ConnPoolImplTest, CancelExcessBeforeBound) { + InSequence s; + + // Request 1 should kick off a new connection. + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_.expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + handle->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + // Unlike CancelBeforeBound there is no need to raise a close event to destroy the connection. + EXPECT_CALL(conn_pool_, onClientDestroy()); + dispatcher_.clearDeferredDeleteList(); +} + /** * Test an upstream disconnection while there is a bound request. */ @@ -919,7 +938,7 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); - r2.handle_->cancel(); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); EXPECT_CALL(drained, ready()); @@ -945,7 +964,7 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); - handle->cancel(); + handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); @@ -1030,7 +1049,7 @@ TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { EXPECT_TRUE(conn_pool_.hasActiveConnections()); EXPECT_CALL(conn_pool_, onClientDestroy()); - r1.handle_->cancel(); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); conn_pool_.drainConnections(); conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c8ced6e33f5e..c48152e7bb44 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -288,11 +288,152 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { pool_.drainConnections(); // Cancel the pending request, and then the connection can be closed. - r.handle_->cancel(); + r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(*this, onClientDestroy()); pool_.drainConnections(); } +/** + * Verify that on CloseExcess, the connection is destroyed immediately. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcess) { + InSequence s; + + expectClientCreate(); + ActiveTestRequest r(*this, 0, false); + + // Pending request prevents the connection from being drained + pool_.drainConnections(); + + EXPECT_CALL(*this, onClientDestroy()); + r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); +} + +/** + * Verify that on CloseExcess connections are destroyed when they can be. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcessTwo) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + InSequence s; + + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + + expectClientCreate(); + ActiveTestRequest r2(*this, 0, false); + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + { + EXPECT_CALL(*this, onClientDestroy()); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } +} + +/** + * Verify that on CloseExcess, the connections are destroyed iff they are actually excess. + */ +TEST_F(Http2ConnPoolImplTest, CloseExcessMultipleRequests) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(3); + InSequence s; + + // With 3 requests per connection, the first request will result in a client + // connection, and the next two will be queued for that connection. + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + // The fourth request will kick off a second connection, and the fifth will plan to share it. + expectClientCreate(); + ActiveTestRequest r4(*this, 0, false); + ActiveTestRequest r5(*this, 0, false); + + // The section below cancels the active requests in fairly random order, to + // ensure there's no association between the requests and the clients created + // for them. + + // The first cancel will not destroy any clients, as there are still four pending + // requests and they can not all share the first connection. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // The second cancel will destroy one client, as there will be three pending requests + // remaining, and they only need one connection. + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // The next two calls will not destroy the final client, as there are two other + // pending requests waiting on it. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // Finally with the last request gone, the final client is destroyed. + { + EXPECT_CALL(*this, onClientDestroy()); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } +} + +TEST_F(Http2ConnPoolImplTest, CloseExcessMixedMultiplexing) { + InSequence s; + + // Create clients with in-order capacity: + // 3 2 6 + // Connection capacity is min(max requests per connection, max concurrent streams). + // Use maxRequestsPerConnection here since max requests is tested above. + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(3)); + expectClientCreate(); + ActiveTestRequest r1(*this, 0, false); + ActiveTestRequest r2(*this, 0, false); + ActiveTestRequest r3(*this, 0, false); + + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(2)); + expectClientCreate(); + ActiveTestRequest r4(*this, 0, false); + ActiveTestRequest r5(*this, 0, false); + + EXPECT_CALL(*cluster_, maxRequestsPerConnection).WillOnce(Return(6)); + expectClientCreate(); + ActiveTestRequest r6(*this, 0, false); + + // 6 requests, capacity [3, 2, 6] - the first cancel should tear down the client with [3] + // since we destroy oldest first and [3, 2] can handle the remaining 5 requests. + { + EXPECT_CALL(*this, onClientDestroy()); + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // 5 requests, capacity [3, 2] - no teardown + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + // 4 requests, capacity [3, 2] - canceling one destroys the client with [2] + { + EXPECT_CALL(*this, onClientDestroy()); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + + // 3 requests, capacity [3]. Tear down the last channel when all 3 are canceled. + { + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r4.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r5.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } + { + EXPECT_CALL(*this, onClientDestroy()); + r6.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + } +} + /** * Verify that connections are drained when requested. */ diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index c0bd7deb1ee9..6b9137e723ca 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -207,7 +207,7 @@ class RouterTestBase : public testing::Test { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -228,7 +228,7 @@ class RouterTestBase : public testing::Test { EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -351,7 +351,7 @@ class RouterTestBase : public testing::Test { NiceMock cm_; NiceMock runtime_; NiceMock random_; - Http::ConnectionPool::MockCancellable cancellable_; + Envoy::ConnectionPool::MockCancellable cancellable_; Http::ContextImpl http_context_; NiceMock callbacks_; MockShadowWriter* shadow_writer_; @@ -409,7 +409,7 @@ TEST_F(RouterTest, UpdateServerNameFilterState) { stream_info.filterState() ->getDataReadOnly(Network::UpstreamServerName::key()) .value()); - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -437,7 +437,7 @@ TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { ->getDataReadOnly( Network::UpstreamSubjectAltNames::key()) .value()[0]); - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -522,7 +522,7 @@ TEST_F(RouterTest, Http1Upstream) { EXPECT_EQ("10", headers.get_("x-envoy-expected-rq-timeout-ms")); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -547,7 +547,7 @@ TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { EXPECT_FALSE(headers.has("x-envoy-expected-rq-timeout-ms")); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -568,7 +568,7 @@ TEST_F(RouterTest, Http2Upstream) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -595,7 +595,7 @@ TEST_F(RouterTest, HashPolicy) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -622,7 +622,7 @@ TEST_F(RouterTest, HashPolicyNoHash) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -818,7 +818,7 @@ TEST_F(RouterTest, MetadataMatchCriteria) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -847,7 +847,7 @@ TEST_F(RouterTest, NoMetadataMatchCriteria) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); } @@ -860,7 +860,7 @@ TEST_F(RouterTest, CancelBeforeBoundToPool) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, @@ -4068,7 +4068,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - Http::ConnectionPool::MockCancellable cancellable; + Envoy::ConnectionPool::MockCancellable cancellable; EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* { @@ -4078,7 +4078,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) { router_.retry_state_->callback_(); // Fire timeout. - EXPECT_CALL(cancellable, cancel()); + EXPECT_CALL(cancellable, cancel(_)); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -4124,7 +4124,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); - Http::ConnectionPool::MockCancellable cancellable; + Envoy::ConnectionPool::MockCancellable cancellable; EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&) -> Http::ConnectionPool::Cancellable* { @@ -4134,7 +4134,7 @@ TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltRespo router_.retry_state_->callback_(); // Fire timeout. - EXPECT_CALL(cancellable, cancel()); + EXPECT_CALL(cancellable, cancel(_)); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); @@ -5890,7 +5890,7 @@ TEST_F(RouterTest, ApplicationProtocols) { router_.decodeHeaders(headers, true); // When the router filter gets reset we should cancel the pool request. - EXPECT_CALL(cancellable_, cancel()); + EXPECT_CALL(cancellable_, cancel(_)); router_.onDestroy(); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(0U, diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index 976e378afb92..72705db13377 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -99,7 +99,7 @@ class TcpConnPoolTest : public ::testing::Test { Tcp::ConnectionPool::MockInstance mock_pool_; MockGenericConnectionPoolCallbacks mock_generic_callbacks_; std::shared_ptr> host_; - NiceMock cancellable_; + NiceMock cancellable_; }; TEST_F(TcpConnPoolTest, Basic) { diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 5e4c0b70bf70..91a8b897c7ca 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -877,7 +877,7 @@ class TcpProxyTest : public testing::Test { .WillByDefault(ReturnRef(*upstream_connections_.back())); upstream_hosts_.push_back(std::make_shared>()); conn_pool_handles_.push_back( - std::make_unique>()); + std::make_unique>()); ON_CALL(*upstream_hosts_.at(i), cluster()) .WillByDefault(ReturnPointee( @@ -969,7 +969,7 @@ class TcpProxyTest : public testing::Test { std::vector>> upstream_connection_data_{}; std::vector conn_pool_callbacks_; - std::vector>> conn_pool_handles_; + std::vector>> conn_pool_handles_; NiceMock conn_pool_; Tcp::ConnectionPool::UpstreamCallbacks* upstream_callbacks_; StringViewSaver access_log_data_; diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index ad5f1d5b9004..26405ea3a6b1 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -484,7 +484,7 @@ TEST_F(DubboRouterTest, DestroyWhileConnecting) { initializeRouter(); initializeMetadata(MessageType::Request); - NiceMock conn_pool_handle; + NiceMock conn_pool_handle; EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) .WillOnce(Invoke([&](Tcp::ConnectionPool::Callbacks&) -> Tcp::ConnectionPool::Cancellable* { return &conn_pool_handle; diff --git a/test/mocks/BUILD b/test/mocks/BUILD index 3b4e8e65bcd0..e7f7a132092d 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -13,6 +13,7 @@ envoy_cc_test_library( srcs = ["common.cc"], hdrs = ["common.h"], deps = [ + "//include/envoy/common:conn_pool_interface", "//include/envoy/common:time_interface", "//include/envoy/common:token_bucket_interface", "//source/common/common:minimal_logger_lib", diff --git a/test/mocks/common.cc b/test/mocks/common.cc index ba36e63fc102..bb5fda7a3ff5 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -1,6 +1,11 @@ #include "test/mocks/common.h" namespace Envoy { +namespace ConnectionPool { +MockCancellable::MockCancellable() = default; +MockCancellable::~MockCancellable() = default; +} // namespace ConnectionPool + ReadyWatcher::ReadyWatcher() = default; ReadyWatcher::~ReadyWatcher() = default; diff --git a/test/mocks/common.h b/test/mocks/common.h index 89b7c3e9ca3e..1c5d899d975e 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -2,6 +2,7 @@ #include +#include "envoy/common/conn_pool.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" #include "envoy/common/token_bucket.h" @@ -95,4 +96,16 @@ class MockScopedTrackedObject : public ScopeTrackedObject { MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); }; +namespace ConnectionPool { + +class MockCancellable : public Cancellable { +public: + MockCancellable(); + ~MockCancellable() override; + + // ConnectionPool::Cancellable + MOCK_METHOD(void, cancel, (CancelPolicy cancel_policy)); +}; +} // namespace ConnectionPool + } // namespace Envoy diff --git a/test/mocks/http/BUILD b/test/mocks/http/BUILD index 65fdb6ebc4f5..d169464f6c26 100644 --- a/test/mocks/http/BUILD +++ b/test/mocks/http/BUILD @@ -24,6 +24,7 @@ envoy_cc_mock( hdrs = ["conn_pool.h"], deps = [ "//include/envoy/http:conn_pool_interface", + "//test/mocks:common_lib", "//test/mocks/upstream:host_mocks", ], ) diff --git a/test/mocks/http/conn_pool.cc b/test/mocks/http/conn_pool.cc index 77d21bdf85a3..035f566a6b13 100644 --- a/test/mocks/http/conn_pool.cc +++ b/test/mocks/http/conn_pool.cc @@ -4,9 +4,6 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -MockCancellable::MockCancellable() = default; -MockCancellable::~MockCancellable() = default; - MockInstance::MockInstance() : host_{std::make_shared>()} { ON_CALL(*this, host()).WillByDefault(Return(host_)); diff --git a/test/mocks/http/conn_pool.h b/test/mocks/http/conn_pool.h index f5116ae3cda6..4fd32853cfa9 100644 --- a/test/mocks/http/conn_pool.h +++ b/test/mocks/http/conn_pool.h @@ -2,6 +2,7 @@ #include "envoy/http/conn_pool.h" +#include "test/mocks/common.h" #include "test/mocks/upstream/host.h" #include "gmock/gmock.h" @@ -10,15 +11,6 @@ namespace Envoy { namespace Http { namespace ConnectionPool { -class MockCancellable : public Cancellable { -public: - MockCancellable(); - ~MockCancellable() override; - - // Http::ConnectionPool::Cancellable - MOCK_METHOD(void, cancel, ()); -}; - class MockInstance : public Instance { public: MockInstance(); diff --git a/test/mocks/tcp/BUILD b/test/mocks/tcp/BUILD index 2ac39f512288..263b2e49ba08 100644 --- a/test/mocks/tcp/BUILD +++ b/test/mocks/tcp/BUILD @@ -15,6 +15,7 @@ envoy_cc_mock( deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/tcp:conn_pool_interface", + "//test/mocks:common_lib", "//test/mocks/network:network_mocks", "//test/mocks/upstream:host_mocks", ], diff --git a/test/mocks/tcp/mocks.cc b/test/mocks/tcp/mocks.cc index 8d86a1f204a0..8b86988451af 100644 --- a/test/mocks/tcp/mocks.cc +++ b/test/mocks/tcp/mocks.cc @@ -12,9 +12,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -MockCancellable::MockCancellable() = default; -MockCancellable::~MockCancellable() = default; - MockUpstreamCallbacks::MockUpstreamCallbacks() = default; MockUpstreamCallbacks::~MockUpstreamCallbacks() = default; @@ -33,7 +30,7 @@ MockInstance::MockInstance() { } MockInstance::~MockInstance() = default; -MockCancellable* MockInstance::newConnectionImpl(Callbacks& cb) { +Envoy::ConnectionPool::MockCancellable* MockInstance::newConnectionImpl(Callbacks& cb) { handles_.emplace_back(); callbacks_.push_back(&cb); return &handles_.back(); diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 74f5c8f85f24..ab9af40f376c 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -15,15 +15,6 @@ namespace Envoy { namespace Tcp { namespace ConnectionPool { -class MockCancellable : public Cancellable { -public: - MockCancellable(); - ~MockCancellable() override; - - // Tcp::ConnectionPool::Cancellable - MOCK_METHOD(void, cancel, (CancelPolicy cancel_policy)); -}; - class MockUpstreamCallbacks : public UpstreamCallbacks { public: MockUpstreamCallbacks(); @@ -65,14 +56,14 @@ class MockInstance : public Instance { MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); - MockCancellable* newConnectionImpl(Callbacks& cb); + Envoy::ConnectionPool::MockCancellable* newConnectionImpl(Callbacks& cb); void poolFailure(PoolFailureReason reason); void poolReady(Network::MockClientConnection& conn); // Invoked when connection_data_, having been assigned via poolReady is released. MOCK_METHOD(void, released, (Network::MockClientConnection&)); - std::list> handles_; + std::list> handles_; std::list callbacks_; std::shared_ptr> host_{ From e5ccf1af01ec3c6c70726f8e12495ecfdcfdc2ca Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Fri, 12 Jun 2020 10:33:53 -0500 Subject: [PATCH 348/909] http: fix OSS-issue-22664 (#11455) Notice that identifier is defined in api/envoy/config/core/v3/protocol.proto as: google.protobuf.UInt32Value identifier = 1 [ (validate.rules).uint32 = {lte: 65536 gte: 1}, (validate.rules).message = {required: true} ]; The value should range from 0 to 65535 instead of from 1 to 65536. Because 65536 is greater than uint16::max, it triggers the assert to terminate the program. The fix is to set it to {lte:65535 gte:0}, instead of {lte: 65536 gte: 1} Signed-off-by: jianwen --- api/envoy/config/core/v3/protocol.proto | 2 +- api/envoy/config/core/v4alpha/protocol.proto | 2 +- .../envoy/config/core/v3/protocol.proto | 2 +- .../envoy/config/core/v4alpha/protocol.proto | 2 +- ...inimized-config_fuzz_test-5186283155750912 | 70 +++++++++++++++++++ 5 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 7866b87999e4..339601feab3d 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -172,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index 773aa184bdba..2ec9244124bd 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -172,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 7866b87999e4..339601feab3d 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -172,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index 773aa184bdba..2ec9244124bd 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -172,7 +172,7 @@ message Http2ProtocolOptions { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, + (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 b/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 new file mode 100644 index 000000000000..d5f214057b15 --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 @@ -0,0 +1,70 @@ +static_resources { + clusters { + name: "www.google.com" + connect_timeout { + nanos: 61 + } + http2_protocol_options { + initial_stream_window_size { + value: 917504 + } + initial_connection_window_size { + value: 1952382976 + } + allow_connect: true + max_outbound_control_frames { + value: 1952382976 + } + stream_error_on_invalid_http_messaging: true + custom_settings_parameters { + identifier { + value: 65536 + } + value { + value: 7536640 + } + } + custom_settings_parameters { + identifier { + value: 65536 + } + value { + value: 7536640 + } + } + } + alt_stat_name: ";" + load_assignment { + cluster_name: "domains" + policy { + hidden_envoy_deprecated_disable_overprovisioning: true + } + } + lrs_server { + path: ":" + } + } +} +dynamic_resources { +} +stats_sinks { + hidden_envoy_deprecated_config { + fields { + key: "fffffffffffffffffffffffffff" + value { + } + } + } +} +stats_sinks { +} +stats_sinks { + typed_config { + type_url: "type.googleapis.com/envoy.api.v2.route.Route" + value: "J\004\022\002\010\001J\005\n\003\022\0019J\004\022\002\010\001b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000b\000" + } +} +admin { +} +enable_dispatcher_stats: true +header_prefix: "*" From 393f1bf5cf1c28fef9d382ea5b4e2eb54c19e07c Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 12 Jun 2020 09:33:55 -0700 Subject: [PATCH 349/909] network: add more socket apis to io handle (#11557) Let io handle implementation interact with the underlying socket layer. Signed-off-by: Florin Coras --- include/envoy/network/BUILD | 3 +- include/envoy/network/address.h | 1 - include/envoy/network/io_handle.h | 59 +++++++++++++++++-- source/common/network/BUILD | 1 + .../common/network/io_socket_handle_impl.cc | 30 ++++++++++ source/common/network/io_socket_handle_impl.h | 9 +++ source/common/network/socket_impl.cc | 24 +++----- .../quiche/quic_io_handle_wrapper.h | 21 +++++++ test/common/http/http2/BUILD | 1 + test/mocks/network/io_handle.h | 9 +++ 10 files changed, 134 insertions(+), 24 deletions(-) diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 977f8e7d4067..6e395ed56ab7 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -12,7 +12,6 @@ envoy_cc_library( name = "address_interface", hdrs = ["address.h"], deps = [ - ":io_handle_interface", "//include/envoy/api:os_sys_calls_interface", ], ) @@ -77,7 +76,9 @@ envoy_cc_library( name = "io_handle_interface", hdrs = ["io_handle.h"], deps = [ + ":address_interface", "//include/envoy/api:io_error_interface", + "//include/envoy/api:os_sys_calls_interface", "//source/common/common:assert_lib", ], ) diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index 136b10f3cf3e..7ba285ca23da 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -10,7 +10,6 @@ #include "envoy/api/os_sys_calls.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" -#include "envoy/network/io_handle.h" #include "absl/numeric/int128.h" #include "absl/strings/string_view.h" diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index 132912218c52..4c4e70ceb87e 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -5,6 +5,7 @@ #include "envoy/api/io_error.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" +#include "envoy/network/address.h" #include "absl/container/fixed_array.h" @@ -16,12 +17,6 @@ struct RawSlice; using RawSliceArrays = absl::FixedArray>; namespace Network { -namespace Address { -class Instance; -class Ip; - -using InstanceConstSharedPtr = std::shared_ptr; -} // namespace Address /** * IoHandle: an abstract interface for all I/O operations @@ -144,6 +139,58 @@ class IoHandle { * return true if the platform supports recvmmsg() and sendmmsg(). */ virtual bool supportsMmsg() const PURE; + + /** + * Bind to address. The handle should have been created with a call to socket() + * @param address address to bind to. + * @param addrlen address length + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult bind(const sockaddr* address, socklen_t addrlen) PURE; + + /** + * Listen on bound handle. + * @param backlog maximum number of pending connections for listener + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult listen(int backlog) PURE; + + /** + * Connect to address. The handle should have been created with a call to socket() + * on this object. + * @param address remote address to connect to. + * @param addrlen remote address length + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult connect(const sockaddr* address, socklen_t addrlen) PURE; + + /** + * Set option (see man 2 setsockopt) + */ + virtual Api::SysCallIntResult setOption(int level, int optname, const void* optval, + socklen_t optlen) PURE; + + /** + * Get option (see man 2 getsockopt) + */ + virtual Api::SysCallIntResult getOption(int level, int optname, void* optval, + socklen_t* optlen) PURE; + + /** + * Get local address to which handle is bound (see man 2 getsockname) + */ + virtual Api::SysCallIntResult getLocalAddress(sockaddr* address, socklen_t* addrlen) PURE; + + /** + * Toggle blocking behavior + * @param blocking flag to set/unset blocking state + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual Api::SysCallIntResult setBlocking(bool blocking) PURE; }; using IoHandlePtr = std::unique_ptr; diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 1073d7590b2d..c7c024fa2cf1 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -144,6 +144,7 @@ envoy_cc_library( hdrs = ["hash_policy.h"], deps = [ "//include/envoy/network:hash_policy_interface", + "//source/common/common:assert_lib", "//source/common/common:hash_lib", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 306c0c425f64..b0ac43924963 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -365,5 +365,35 @@ bool IoSocketHandleImpl::supportsMmsg() const { return Api::OsSysCallsSingleton::get().supportsMmsg(); } +Api::SysCallIntResult IoSocketHandleImpl::bind(const sockaddr* address, socklen_t addrlen) { + return Api::OsSysCallsSingleton::get().bind(fd_, address, addrlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::listen(int backlog) { + return Api::OsSysCallsSingleton::get().listen(fd_, backlog); +} + +Api::SysCallIntResult IoSocketHandleImpl::connect(const sockaddr* address, socklen_t addrlen) { + return Api::OsSysCallsSingleton::get().connect(fd_, address, addrlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::setOption(int level, int optname, const void* optval, + socklen_t optlen) { + return Api::OsSysCallsSingleton::get().setsockopt(fd_, level, optname, optval, optlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::getOption(int level, int optname, void* optval, + socklen_t* optlen) { + return Api::OsSysCallsSingleton::get().getsockopt(fd_, level, optname, optval, optlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::getLocalAddress(sockaddr* address, socklen_t* addrlen) { + return Api::OsSysCallsSingleton::get().getsockname(fd_, address, addrlen); +} + +Api::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) { + return Api::OsSysCallsSingleton::get().setsocketblocking(fd_, blocking); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index cd1a97ea3ac1..9f9a563aabe8 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -45,6 +45,15 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 60bb24764dd9..55081d5f3a62 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -32,8 +32,7 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, socklen_t len = sizeof(addr); Api::SysCallIntResult result; - result = Api::OsSysCallsSingleton::get().getsockname( - io_handle_->fd(), reinterpret_cast(&addr), &len); + result = io_handle_->getLocalAddress(reinterpret_cast(&addr), &len); // This should never happen in practice but too many tests inject fake fds ... if (result.rc_ < 0) { @@ -58,8 +57,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr unlink(pipe_sa->sun_path); } // Not storing a reference to syscalls singleton because of unit test mocks - bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + bind_result = io_handle_->bind(address->sockAddr(), address->sockAddrLen()); if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); if (set_permissions.rc_ != 0) { @@ -71,21 +69,17 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr return bind_result; } - bind_result = Api::OsSysCallsSingleton::get().bind(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + bind_result = io_handle_->bind(address->sockAddr(), address->sockAddrLen()); if (bind_result.rc_ == 0 && address->ip()->port() == 0) { local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); } return bind_result; } -Api::SysCallIntResult SocketImpl::listen(int backlog) { - return Api::OsSysCallsSingleton::get().listen(io_handle_->fd(), backlog); -} +Api::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->listen(backlog); } Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { - auto result = Api::OsSysCallsSingleton::get().connect(io_handle_->fd(), address->sockAddr(), - address->sockAddrLen()); + auto result = io_handle_->connect(address->sockAddr(), address->sockAddrLen()); if (address->type() == Address::Type::Ip) { local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); } @@ -94,18 +88,16 @@ Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstS Api::SysCallIntResult SocketImpl::setSocketOption(int level, int optname, const void* optval, socklen_t optlen) { - return Api::OsSysCallsSingleton::get().setsockopt(io_handle_->fd(), level, optname, optval, - optlen); + return io_handle_->setOption(level, optname, optval, optlen); } Api::SysCallIntResult SocketImpl::getSocketOption(int level, int optname, void* optval, socklen_t* optlen) const { - return Api::OsSysCallsSingleton::get().getsockopt(io_handle_->fd(), level, optname, optval, - optlen); + return io_handle_->getOption(level, optname, optval, optlen); } Api::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) { - return Api::OsSysCallsSingleton::get().setsocketblocking(io_handle_->fd(), blocking); + return io_handle_->setBlocking(blocking); } absl::optional SocketImpl::ipVersion() const { diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 6a468bf867be..86b6f092d28f 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -63,6 +63,27 @@ class QuicIoHandleWrapper : public Network::IoHandle { return io_handle_.recvmmsg(slices, self_port, output); } bool supportsMmsg() const override { return io_handle_.supportsMmsg(); } + Api::SysCallIntResult bind(const sockaddr* address, socklen_t addrlen) override { + return io_handle_.bind(address, addrlen); + } + Api::SysCallIntResult listen(int backlog) override { return io_handle_.listen(backlog); } + Api::SysCallIntResult connect(const sockaddr* address, socklen_t addrlen) override { + return io_handle_.connect(address, addrlen); + } + Api::SysCallIntResult setOption(int level, int optname, const void* optval, + socklen_t optlen) override { + return io_handle_.setOption(level, optname, optval, optlen); + } + Api::SysCallIntResult getOption(int level, int optname, void* optval, + socklen_t* optlen) override { + return io_handle_.getOption(level, optname, optval, optlen); + } + Api::SysCallIntResult getLocalAddress(sockaddr* address, socklen_t* addrlen) override { + return io_handle_.getLocalAddress(address, addrlen); + } + Api::SysCallIntResult setBlocking(bool blocking) override { + return io_handle_.setBlocking(blocking); + } private: Network::IoHandle& io_handle_; diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 53cbbfdedc62..db49b187c87e 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -72,6 +72,7 @@ envoy_cc_test_library( srcs = ["http2_frame.cc"], hdrs = ["http2_frame.h"], deps = [ + "//source/common/common:assert_lib", "//source/common/common:macros", ], ) diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 460d90f7c428..53700f823789 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -29,6 +29,15 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(Api::IoCallUint64Result, recvmmsg, (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output)); MOCK_METHOD(bool, supportsMmsg, (), (const)); + MOCK_METHOD(Api::SysCallIntResult, bind, (const sockaddr* address, socklen_t addrlen)); + MOCK_METHOD(Api::SysCallIntResult, listen, (int backlog)); + MOCK_METHOD(Api::SysCallIntResult, connect, (const sockaddr* address, socklen_t addrlen)); + MOCK_METHOD(Api::SysCallIntResult, setOption, + (int level, int optname, const void* optval, socklen_t optlen)); + MOCK_METHOD(Api::SysCallIntResult, getOption, + (int level, int optname, void* optval, socklen_t* optlen)); + MOCK_METHOD(Api::SysCallIntResult, getLocalAddress, (sockaddr * address, socklen_t* addrlen)); + MOCK_METHOD(Api::SysCallIntResult, setBlocking, (bool blocking)); }; } // namespace Network From 9e8b32f3f29213e66dd32a07d5922dca700ea0e2 Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Sat, 13 Jun 2020 02:55:52 +0900 Subject: [PATCH 350/909] style: fix Doxygen style (#11516) Signed-off-by: tomocy --- include/envoy/http/filter.h | 2 +- source/common/upstream/health_checker_impl.h | 3 ++- source/extensions/filters/common/rbac/matchers.h | 2 +- source/server/listener_impl.h | 2 +- source/server/server.h | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index ef1e195b28c6..0d6e243140a6 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -545,7 +545,7 @@ class StreamDecoderFilter : public StreamFilterBase { * should consider using StopAllIterationAndBuffer or StopAllIterationAndWatermark in * decodeHeaders() to prevent metadata passing to the following filters. * - * @param metadata supplies the decoded metadata. + * @param metadata_map supplies the decoded metadata. */ virtual FilterMetadataStatus decodeMetadata(MetadataMap& /* metadata_map */) { return Http::FilterMetadataStatus::Continue; diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 07b2f8d93dc5..b8b083138151 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -32,8 +32,9 @@ class HealthCheckerFactory : public Logger::Loggable * @param runtime supplies the runtime loader. * @param random supplies the random generator. * @param dispatcher supplies the dispatcher. - * @param event_logger supplies the event_logger. + * @param log_manager supplies the log_manager. * @param validation_visitor message validation visitor instance. + * @param api reference to the Api object * @return a health checker. */ static HealthCheckerSharedPtr diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index fcc10f41fdb1..a73bcf373266 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -38,7 +38,7 @@ class Matcher { * @param connection the downstream connection used to match against. * @param headers the request headers used to match against. An empty map should be used if * there are none headers available. - * @param metadata the additional information about the action/principal. + * @param info the additional information about the action/principal. */ virtual bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 328e744a29a7..082e384e0dee 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -220,7 +220,7 @@ class ListenerImpl final : public Network::ListenerConfig, * @param workers_started supplies whether the listener is being added before or after workers * have been started. This controls various behavior related to init management. * @param hash supplies the hash to use for duplicate checking. - * @param validation_visitor message validation visitor instance. + * @param concurrency is the number of listeners instances to be created. */ ListenerImpl(const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, diff --git a/source/server/server.h b/source/server/server.h index e6014f038645..b92dd5c96e37 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -117,8 +117,8 @@ class InstanceUtil : Logger::Loggable { * Load a bootstrap config and perform validation. * @param bootstrap supplies the bootstrap to fill. * @param options supplies the server options. - * @param api reference to the Api object * @param validation_visitor message validation visitor instance. + * @param api reference to the Api object */ static void loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options, From 6880b1fb3dee7d9b74efc681bb749eb3c153545c Mon Sep 17 00:00:00 2001 From: danzh Date: Sat, 13 Jun 2020 21:59:19 -0400 Subject: [PATCH 351/909] quiche: update tarball to d88a2f7a9ff5f9f6be2f50411b15b091affe04d3 (#11562) Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 17 +- bazel/repository_locations.bzl | 16 +- .../quiche/envoy_quic_dispatcher.h | 7 - .../quiche/envoy_quic_fake_proof_verifier.h | 11 +- .../quiche/envoy_quic_packet_writer.h | 7 +- .../quic_listeners/quiche/platform/BUILD | 13 +- .../quiche/platform/flags_list.h | 178 +++++++++++------- .../quiche/platform/quiche_optional_impl.h | 2 +- .../quiche/platform/quiche_time_utils_impl.cc | 42 +++++ .../quiche/platform/quiche_time_utils_impl.h | 21 +++ .../quiche/active_quic_listener_test.cc | 21 ++- .../quiche/envoy_quic_client_session_test.cc | 2 + .../quiche/envoy_quic_client_stream_test.cc | 9 +- .../quiche/envoy_quic_dispatcher_test.cc | 16 +- .../quiche/envoy_quic_server_session_test.cc | 2 + .../quiche/envoy_quic_server_stream_test.cc | 9 +- .../integration/quic_http_integration_test.cc | 8 +- .../quic_listeners/quiche/test_utils.h | 44 +++-- 18 files changed, 299 insertions(+), 126 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc create mode 100644 source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 30c28f30f607..f4718d39f692 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1843,6 +1843,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ ":quic_core_crypto_boring_utils_lib", + ":quic_core_types_lib", ":quic_platform", ":quic_platform_ip_address", ":quiche_common_platform", @@ -3553,11 +3554,22 @@ envoy_cc_test_library( deps = [":epoll_server_platform"], ) +envoy_cc_library( + name = "quiche_common_platform_optional", + hdrs = ["quiche/common/platform/api/quiche_optional.h"], + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quiche_common_platform_export", + "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_optional_impl_lib", + ], +) + envoy_cc_library( name = "quiche_common_platform", hdrs = [ "quiche/common/platform/api/quiche_arraysize.h", - "quiche/common/platform/api/quiche_export.h", "quiche/common/platform/api/quiche_logging.h", "quiche/common/platform/api/quiche_map_util.h", "quiche/common/platform/api/quiche_optional.h", @@ -3565,6 +3577,7 @@ envoy_cc_library( "quiche/common/platform/api/quiche_str_cat.h", "quiche/common/platform/api/quiche_string_piece.h", "quiche/common/platform/api/quiche_text_utils.h", + "quiche/common/platform/api/quiche_time_utils.h", "quiche/common/platform/api/quiche_unordered_containers.h", ], repository = "@envoy", @@ -3572,6 +3585,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ ":quiche_common_platform_export", + ":quiche_common_platform_optional", "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_impl_lib", ], ) @@ -3582,6 +3596,7 @@ envoy_cc_test_library( "quiche/common/platform/api/quiche_endian_test.cc", "quiche/common/platform/api/quiche_str_cat_test.cc", "quiche/common/platform/api/quiche_text_utils_test.cc", + "quiche/common/platform/api/quiche_time_utils_test.cc", ], hdrs = ["quiche/common/platform/api/quiche_test.h"], repository = "@envoy", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 53e19c32abc4..6d081d5a65f8 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -74,16 +74,16 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), boringssl = dict( - sha256 = "8ae14b52b7889cf92f3b107610b12afb5011506c77f90c7b3d4a36ed7283905a", - strip_prefix = "boringssl-107c03cf6d364939469194396bf7a6b2572d0f9c", + sha256 = "07f1524766b9ed1543674b48e7fce7e3569b6e2b6c0c43ec124dedee9b60f641", + strip_prefix = "boringssl-a0899df79b3a63e606448c72d63a090d86bdb75b", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # - # chromium-83.0.4103.62 - # 2020-03-16 - urls = ["https://github.com/google/boringssl/archive/107c03cf6d364939469194396bf7a6b2572d0f9c.tar.gz"], + # chromium-84.0.4147.45(beta) + # 2020-05-14 + urls = ["https://github.com/google/boringssl/archive/a0899df79b3a63e606448c72d63a090d86bdb75b.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), @@ -411,9 +411,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/25da9198727ef05edeb99d9f4ce5b6acb3cb87b5.tar.gz - sha256 = "52bac2f91a0900730fe3bfb14ffb668f205fe0de48e42ecfee677e3743ec33ee", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/25da9198727ef05edeb99d9f4ce5b6acb3cb87b5.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/d88a2f7a9ff5f9f6be2f50411b15b091affe04d3.tar.gz + sha256 = "c1c5dc165f0509097fa3917d81988e4ac5f9f3da4c2361ee435dfa7f8f428016", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/d88a2f7a9ff5f9f6be2f50411b15b091affe04d3.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h index 2ad8d56241a5..ede0c5b42625 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h @@ -56,13 +56,6 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { const std::string& error_details, quic::ConnectionCloseSource source) override; - quic::QuicConnectionId - GenerateNewServerConnectionId(quic::ParsedQuicVersion /*version*/, - quic::QuicConnectionId /*connection_id*/) const override { - // TODO(danzh): create reject connection id based on given connection_id. - return quic::QuicUtils::CreateRandomConnectionId(); - } - protected: std::unique_ptr CreateQuicSession(quic::QuicConnectionId server_connection_id, diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h index a72f0b3d8e3c..af107983317b 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h @@ -25,14 +25,14 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { // Return success if the certs chain is valid and signature is "Fake signature for { // [server_config] }". Otherwise failure. quic::QuicAsyncStatus - VerifyProof(const std::string& hostname, const uint16_t /*port*/, + VerifyProof(const std::string& hostname, const uint16_t port, const std::string& /*server_config*/, quic::QuicTransportVersion /*quic_version*/, absl::string_view /*chlo_hash*/, const std::vector& certs, const std::string& cert_sct, const std::string& /*signature*/, const quic::ProofVerifyContext* context, std::string* error_details, std::unique_ptr* details, std::unique_ptr callback) override { - if (VerifyCertChain(hostname, certs, "", cert_sct, context, error_details, details, + if (VerifyCertChain(hostname, port, certs, "", cert_sct, context, error_details, details, std::move(callback)) == quic::QUIC_SUCCESS) { return quic::QUIC_SUCCESS; } @@ -41,9 +41,10 @@ class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { // Return success upon one arbitrary cert content. Otherwise failure. quic::QuicAsyncStatus - VerifyCertChain(const std::string& /*hostname*/, const std::vector& certs, - const std::string& /*ocsp_response*/, const std::string& cert_sct, - const quic::ProofVerifyContext* /*context*/, std::string* /*error_details*/, + VerifyCertChain(const std::string& /*hostname*/, const uint16_t /*port*/, + const std::vector& certs, const std::string& /*ocsp_response*/, + const std::string& cert_sct, const quic::ProofVerifyContext* /*context*/, + std::string* /*error_details*/, std::unique_ptr* /*details*/, std::unique_ptr /*callback*/) override { // Cert SCT support is not enabled for fake ProofSource. diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h index 55a6e5146d3a..4d2eed570165 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h @@ -34,9 +34,10 @@ class EnvoyQuicPacketWriter : public quic::QuicPacketWriter { // Currently this writer doesn't support pacing offload or batch writing. bool SupportsReleaseTime() const override { return false; } bool IsBatchMode() const override { return false; } - char* GetNextWriteLocation(const quic::QuicIpAddress& /*self_address*/, - const quic::QuicSocketAddress& /*peer_address*/) override { - return nullptr; + quic::QuicPacketBuffer + GetNextWriteLocation(const quic::QuicIpAddress& /*self_address*/, + const quic::QuicSocketAddress& /*peer_address*/) override { + return {nullptr, nullptr}; } quic::WriteResult Flush() override { return {quic::WRITE_STATUS_OK, 0}; } diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 2d36d09fa280..69eae676a3fe 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -240,17 +240,27 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quiche_common_platform_optional_impl_lib", + hdrs = ["quiche_optional_impl.h"], + external_deps = [ + "abseil_node_hash_map", + ], + visibility = ["//visibility:public"], +) + envoy_cc_library( name = "quiche_common_platform_impl_lib", + srcs = ["quiche_time_utils_impl.cc"], hdrs = [ "quiche_arraysize_impl.h", "quiche_logging_impl.h", "quiche_map_util_impl.h", - "quiche_optional_impl.h", "quiche_ptr_util_impl.h", "quiche_str_cat_impl.h", "quiche_string_piece_impl.h", "quiche_text_utils_impl.h", + "quiche_time_utils_impl.h", "quiche_unordered_containers_impl.h", ], external_deps = [ @@ -261,6 +271,7 @@ envoy_cc_library( deps = [ ":quic_platform_logging_impl_lib", ":string_utils_lib", + "@com_googlesource_quiche//:quiche_common_platform_optional", ], ) diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index e4af6a590bc3..9ced2934b27f 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -8,30 +8,22 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. -// This file is generated by //third_party/quic/tools:quic_flags_list in -// Google3. - #if defined(QUICHE_FLAG) -QUICHE_FLAG( - bool, http2_reloadable_flag_http2_adapt_backend_stream_receive_window, true, - "If true, SpdyBackendDispatcher will use its PeriodicPingManager to periodically update the " - "per-stream receive flow control window based on its recorded RTT and a target throughput.") - QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") -QUICHE_FLAG(bool, http2_reloadable_flag_http2_new_window_behavior, false, - "If true, the GFE is slightly more aggressive about sending WINDOW_UPDATE frames to " - "the peer. This should reduce flow control bottlenecks.") - QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " "with client or terminate the session with SPDY_INADEQUATE_SECURITY if HTTP/2 is " "already negotiated. The spec contains both cipher and TLS version requirements.") +QUICHE_FLAG(bool, http2_reloadable_flag_permissive_http2_switch, false, + "If true, the GFE allows both HTTP/1.0 and HTTP/1.1 versions in HTTP/2 upgrade " + "requests/responses.") + QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, false, "") QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") @@ -40,6 +32,9 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false, "When true, ensure the ACK delay is never less than the alarm granularity when ACK " "decimation is enabled.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_advance_ack_timeout_update, true, + "If true, update ack timeout upon receiving an retransmittable frame.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is provided, the " "stream TTL is set. A QUIC stream will be immediately canceled when tries to write " @@ -51,9 +46,12 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_avoid_overestimate_bandwidth_with_aggregation, true, - "If true, fix QUIC bandwidth sampler to avoid over estimating bandwidth in the " - "presence of ack aggregation.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_always_send_earliest_ack, false, + "If true, SendAllPendingAcks always send the earliest ACK.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_avoid_leak_writer_buffer, false, + "If true, QUIC will free writer-allocated packet buffer if writer->WritePacket is not called.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, true, "If true, QUIC BBRv2 to take ack height into account when calculating " @@ -69,15 +67,11 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, fals QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_ignore_inflight_lo, false, "When true, QUIC's BBRv2 ignores inflight_lo in PROBE_BW.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_lower_startup_cwnd_gain, false, - "When true, QUIC BBRv2 lowers the CWND gain in STARTUP to 2 when the BBQ2 connection " - "option is supplied in the handshake.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_copy_sampler_state_from_v1_to_v2, true, - "If true, when QUIC switches from BbrSender to Bbr2Sender, Bbr2Sender will copy the " - "bandwidth sampler states from BbrSender.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false, + "When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_default_exit_startup_on_loss, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_default_exit_startup_on_loss, true, "If true, QUIC will enable connection options LRTT+BBQ2 by default.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, @@ -98,13 +92,10 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recove "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in " "CalculateCongestionWindow()") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_use_available_min_rtt, true, - "If true, returns min_rtt in rtt_stats_ if it is available.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_break_session_stream_close_loop, false, - "If true, break session/stream close loop.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, + "If true, bootstrap initial QUIC cwnd by SPDY priorities.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, true, "If true, quic::BandwidthSampler will start in application limited phase.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, @@ -113,8 +104,11 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_debug_wrong_qos, false, - "If true, consider getting QoS after stream has been detached as GFE bug.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, false, + "If true, default-enable 5RTO blachole detection.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_on_pto, false, + "If true, default on PTO which unifies TLP + RTO loss recovery.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr, true, "When true, defaults to BBR congestion control instead of Cubic.") @@ -123,9 +117,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, "If true, use BBRv2 as the default congestion controller. Takes precedence over " "--quic_default_to_bbr.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_deprecate_draining_streams, false, - "If true, remove draining_streams_ from QuicSession.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false, "If true, disable QUIC version Q043.") @@ -141,10 +132,16 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q049, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, "If true, disable QUIC version Q050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t050, false, + "If true, disable QUIC version h3-T050.") + QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_change_queued_ack, false, + "If true, do not change ACK in PostProcessAckFrame if an ACK has been queued.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") @@ -160,52 +157,75 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_tls_resumption, false, "If true, enables support for TLS resumption in QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, true, "If true, enable QUIC version h3-25.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, true, "If true, enable QUIC version h3-27.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t050_v2, false, - "If true, enable QUIC version h3-T050.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_28, false, + "If true, enable QUIC version h3-28.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_zero_rtt_for_tls, false, + "If true, support for IETF QUIC 0-rtt is enabled.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_extend_idle_time_on_decryptable_packets, true, + "If true, only extend idle time on decryptable packets.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, "If true, adjust congestion window when doing bandwidth resumption in BBR.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_ietf_alt_svc_format_first, false, - "When true, advertise IETF Alt-Svc format before legacy Google format instead of after.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_checking_should_generate_packet, false, + "If true, check ShouldGeneratePacket for every crypto packet.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_ignore_one_write_error_after_mtu_probe, true, - "If true, QUIC connection will ignore one packet write error after MTU probe.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_last_inflight_packets_sent_time, false, + "If true, clear last_inflight_packets_sent_time_ of a packet number space when there " + "is no bytes in flight.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_server_pto_timeout, false, + "If true, do not arm PTO on half RTT packets if they are the only ones in flight.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_willing_and_able_to_write, false, + "If true, check connection level flow control for send control stream and qpack " + "streams in QuicSession::WillingAndAbleToWrite.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_fix_write_pending_crypto_retransmission, false, + "If true, return from QuicCryptoStream::WritePendingCryptoRetransmission after partial writes.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " "will never be a fake EPOLLOUT event in the next epoll iteration.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_move_amplification_limit, false, + "When true, always check the amplification limit before writing, not just for " + "handshake packets.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, true, "If true, will negotiate the ACK delay time.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_notify_stream_id_manager_when_disconnected, false, + "If true, notify stream ID manager even connection disconnects.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_only_truncate_long_cids, true, + "In IETF QUIC, only truncate long CIDs from the client's Initial, don't modify them.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_proxy_write_packed_strings, false, "If true, QuicProxyDispatcher will write packed_client_address and packed_server_vip " "in TcpProxyHeaderProto.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, true, "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping " "which is used to announce VIP in SHLO for proxied sessions. ") QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_android_conformance_test_workaround, false, - "If true, disable QuicDispatcher workaround that replies to invalid QUIC packets from " - "the Android Conformance Test.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") @@ -214,11 +234,14 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_two_alt_addresses, true, + "When true, GFE will send two AlternateServerAddress (IPv6+IPv4) instead of one.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_stream_id_manager_handles_accounting, false, - "If true, move Goolge QUIC stream accounting to LegacyQuicStreamIdManager.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_sending_duplicate_max_streams, false, + "If true, session does not send duplicate MAX_STREAMS.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") @@ -226,19 +249,25 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, "A testonly reloadable flag that will always default to true.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_tls_enforce_valid_sni, false, + "If true, reject IETF QUIC connections with invalid SNI.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection options in " "QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_blackhole_detector, true, - "If true, use blackhole detector in QuicConnection to detect path degrading and " - "network blackhole.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_ack_alarm_in_send_all_pending_acks, false, + "If true, QuicConnection::SendAllPendingAcks will Update instead of Set the ack alarm.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_dispatcher_clock_for_read_timestamp, false, + "If true, in QuicListener, use QuicDispatcher's clock as the source for packet read " + "timestamps.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_idle_network_detector, true, + bool, quic_reloadable_flag_quic_use_idle_network_detector, false, "If true, use idle network detector to detect handshake timeout and idle network timeout.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_ip_bandwidth_module, true, @@ -266,19 +295,23 @@ QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") -QUICHE_FLAG(bool, quic_restart_flag_quic_batch_writer_always_drop_packets_on_error, true, - "If true, QUIC (gso|sendmmsg) batch writers will always drop packets on write error.") +QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, false, + "When true, QUIC+TLS will not send nor parse the old-format Google-specific transport " + "parameters.") + +QUICHE_FLAG( + bool, quic_restart_flag_quic_google_transport_param_send_new, false, + "When true, QUIC+TLS will send and parse the new-format Google-specific transport parameters.") -QUICHE_FLAG(bool, quic_restart_flag_quic_disable_ietf_quic_on_cloud_vips, false, - "If true, disable IETF QUIC on cloud VIPs by 1) not advertise IETF QUIC and 2) send " - "version negotiation if receive IETF packet on those VIPs.") +QUICHE_FLAG(bool, quic_restart_flag_quic_ignore_cid_first_byte_in_bpf, false, + "If true, ignore CID first byte in BPF for both UDP socket and RX_RING.") QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") -QUICHE_FLAG(bool, quic_restart_flag_quic_pigeon_client_reconnect_forever, false, - "If true, QUIC pigeon client will 1) reconnect forever after disconnected, and 2) mark " - "GFE degraded after failing to reconnect for some time.") +QUICHE_FLAG(bool, quic_restart_flag_quic_replace_gfe_connection_ids, false, + "When true, GfeQuicDispatcher will replace long connection IDs with 64bit ones before " + "inserting them in the connection map.") QUICHE_FLAG(bool, quic_restart_flag_quic_replace_time_wait_list_encryption_level, false, "Replace the usage of ConnectionData::encryption_level in quic_time_wait_list_manager " @@ -290,6 +323,10 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, QUICHE_FLAG(bool, quic_restart_flag_quic_should_accept_new_connection, false, "If true, reject QUIC CHLO packets when dispatcher is asked to do so.") +QUICHE_FLAG(bool, quic_restart_flag_quic_support_release_time_for_gso, false, + "If true, QuicGsoBatchWriter will support release time if it is available and the " + "process has the permission to do so.") + QUICHE_FLAG(bool, quic_restart_flag_quic_testonly_default_false, false, "A testonly restart flag that will always default to false.") @@ -304,6 +341,13 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend connections and switch " "to use it after successful handshake.") +QUICHE_FLAG(bool, spdy_reloadable_flag_fix_spdy_header_coalescing, false, + "If true, when coalescing multivalued spdy headers, only headers that exist in spdy " + "headers block are updated.") + +QUICHE_FLAG(bool, spdy_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, + "If true, bootstrap initial QUIC cwnd by SPDY priorities.") + QUICHE_FLAG( bool, spdy_reloadable_flag_spdy_discard_response_body_if_disallowed, false, "If true, SPDY will discard all response body bytes when response code indicates no response " @@ -311,10 +355,6 @@ QUICHE_FLAG( "and the rest of the response bytes would still be delivered even though the response code " "said there should not be any body associated with the response code.") -QUICHE_FLAG(bool, spdy_reloadable_flag_spdy_hpack_use_indexed_name, false, - "If true, use indexed name if possible when sending Literal Header Field without " - "Indexing instruction.") - QUICHE_FLAG(bool, quic_allow_chlo_buffering, true, "If true, allows packets to be buffered in anticipation of a " "future CHLO, and allow CHLO packets to be buffered until next " @@ -326,8 +366,8 @@ QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true, "If true, enforce that QUIC CHLOs fit in one packet") QUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000, - "Maximum number of connections on the time-wait list. " - "A negative value implies no configured limit.") + "Maximum number of connections on the time-wait list. A negative value implies no " + "configured limit.") QUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200, "Time period for which a given connection_id should live in " diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h index b5c63d7ec303..f8b2b6c0800d 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_optional_impl.h @@ -12,6 +12,6 @@ namespace quiche { template using QuicheOptionalImpl = absl::optional; -#define QuicheNullOptImpl absl::nullopt +#define QUICHE_NULLOPT_IMPL absl::nullopt } // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc new file mode 100644 index 000000000000..3260eafee4da --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.cc @@ -0,0 +1,42 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h" + +namespace quiche { + +namespace { +QuicheOptional quicheUtcDateTimeToUnixSecondsInner(int year, int month, int day, int hour, + int minute, int second) { + const absl::CivilSecond civil_time(year, month, day, hour, minute, second); + if (second != 60 && (civil_time.year() != year || civil_time.month() != month || + civil_time.day() != day || civil_time.hour() != hour || + civil_time.minute() != minute || civil_time.second() != second)) { + return absl::nullopt; + } + + const absl::Time time = absl::FromCivil(civil_time, absl::UTCTimeZone()); + return absl::ToUnixSeconds(time); +} +} // namespace + +// NOLINTNEXTLINE(readability-identifier-naming) +QuicheOptional QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour, + int minute, int second) { + // Handle leap seconds without letting any other irregularities happen. + if (second == 60) { + auto previous_second = + quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second - 1); + if (!previous_second.has_value()) { + return absl::nullopt; + } + return *previous_second + 1; + } + + return quicheUtcDateTimeToUnixSecondsInner(year, month, day, hour, minute, second); +} + +} // namespace quiche diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h new file mode 100644 index 000000000000..a1b70b70a51e --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quiche_time_utils_impl.h @@ -0,0 +1,21 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#pragma once + +#include + +#include "absl/time/civil_time.h" +#include "absl/time/time.h" +#include "quiche/common/platform/api/quiche_optional.h" + +namespace quiche { + +// NOLINTNEXTLINE(readability-identifier-naming) +QuicheOptional QuicheUtcDateTimeToUnixSecondsImpl(int year, int month, int day, int hour, + int minute, int second); + +} // namespace quiche diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 8b4d0039b246..b18a58be2e63 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -78,9 +78,16 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), clock_(*dispatcher_), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)), - connection_handler_(*dispatcher_) { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); - } + connection_handler_(*dispatcher_), quic_version_([]() { + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); + return quic::CurrentSupportedVersions(); + }()[0]) {} template std::unique_ptr
staticUniquePointerCast(std::unique_ptr&& source) { @@ -138,7 +145,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); return true; })); - if (!quic::VersionUsesHttp3(quic::CurrentSupportedVersions()[0].transport_version)) { + if (!quic_version_.UsesTls()) { EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)) .Times(connection_count); } @@ -174,9 +181,8 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { void sendCHLO(quic::QuicConnectionId connection_id) { client_sockets_.push_back(std::make_unique(local_address_, nullptr, /*bind*/ false)); Buffer::OwnedImpl payload = generateChloPacketToSend( - quic::CurrentSupportedVersions()[0], quic_config_, - ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), connection_id, clock_, - envoyAddressInstanceToQuicSocketAddress(local_address_), + quic_version_, quic_config_, ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), + connection_id, clock_, envoyAddressInstanceToQuicSocketAddress(local_address_), envoyAddressInstanceToQuicSocketAddress(local_address_), "test.example.org"); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT_EQ(1u, slice.size()); @@ -263,6 +269,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { // of elements are saved in expectations before new elements are added. std::list> filter_factories_; std::list filter_chains_; + quic::ParsedQuicVersion quic_version_; }; INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest, diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index 3b4f7cbda335..5707ae2dbfca 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -95,7 +95,9 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 583a1ee8521c..ccd7d6a76d6d 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -25,7 +25,9 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); return quic::CurrentSupportedVersions()[0]; }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -40,7 +42,8 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), - request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}} { + request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}}, + request_trailers_{{"trailer-key", "trailer-value"}} { quic_stream_->setResponseDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); @@ -103,6 +106,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { Http::MockStreamCallbacks stream_callbacks_; std::string host_{"www.abc.com"}; Http::TestRequestHeaderMapImpl request_headers_; + Http::TestRequestTrailerMapImpl request_trailers_; quic::QuicHeaderList response_headers_; quic::QuicHeaderList trailers_; Buffer::OwnedImpl request_body_{"Hello world"}; @@ -115,7 +119,8 @@ INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest, TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); quic_stream_->encodeHeaders(request_headers_, false); - quic_stream_->encodeData(request_body_, true); + quic_stream_->encodeData(request_body_, false); + quic_stream_->encodeTrailers(request_trailers_); EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 43cd3852ecbe..07f036571678 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -59,10 +59,16 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), std::make_unique(), quic::KeyExchangeSource::Default()), version_manager_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); return quic::CurrentSupportedVersions(); }()), - quic_version_(quic::CurrentSupportedVersions()[0]), + quic_version_(version_manager_.GetSupportedVersions()[0]), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), POOL_HISTOGRAM(listener_config_.listenerScope()))}), @@ -115,7 +121,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); } - bool quicVersionUsesHttp3() { return quic::VersionUsesHttp3(quic_version_.transport_version); } + bool quicVersionUsesTls() { return quic_version_.UsesTls(); } protected: Network::Address::IpVersion version_; @@ -181,7 +187,7 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponCHLO) { EXPECT_CALL(*read_filter, onNewConnection()) // Stop iteration to avoid calling getRead/WriteBuffer(). .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesHttp3()) { + if (!quicVersionUsesTls()) { // QUICHE doesn't support 0-RTT TLS1.3 handshake yet. EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); } @@ -260,7 +266,7 @@ TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { EXPECT_CALL(*read_filter, onNewConnection()) // Stop iteration to avoid calling getRead/WriteBuffer(). .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesHttp3()) { + if (!quicVersionUsesTls()) { EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); } quic::QuicBufferedPacketStore* buffered_packets = diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 71fc30c1b857..5483d0d7c858 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -103,7 +103,9 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 475ce45ae8a3..84120cae913b 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -30,7 +30,9 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { + SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); return quic::CurrentSupportedVersions()[0]; }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), @@ -45,7 +47,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicServerStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), - response_headers_{{":status", "200"}} { + response_headers_{{":status", "200"}}, response_trailers_{ + {"trailer-key", "trailer-value"}} { quic_stream_->setRequestDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); @@ -144,6 +147,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { Http::MockStreamCallbacks stream_callbacks_; quic::QuicHeaderList request_headers_; Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; quic::QuicHeaderList trailers_; std::string host_{"www.abc.com"}; std::string request_body_{"Hello world"}; @@ -176,7 +180,8 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { TEST_P(EnvoyQuicServerStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); sendRequest(request_body_, true, request_body_.size() * 2); - quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true); + quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); + quic_stream_->encodeTrailers(response_trailers_); } TEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index 094a5c73c04f..d7177aee7a04 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -50,7 +50,13 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers : HttpIntegrationTest(Http::CodecClient::Type::HTTP3, GetParam().first, ConfigHelper::quicHttpProxyConfig()), supported_versions_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam().second); + if (GetParam().second == QuicVersionType::GquicQuicCrypto) { + return quic::CurrentSupportedVersionsWithQuicCrypto(); + } + bool use_http3 = GetParam().second == QuicVersionType::Iquic; + SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); return quic::CurrentSupportedVersions(); }()), crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h index 684d96881518..3f5862115b12 100644 --- a/test/extensions/quic_listeners/quiche/test_utils.h +++ b/test/extensions/quic_listeners/quiche/test_utils.h @@ -109,12 +109,11 @@ generateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic::QuicConnectionId connection_id, quic::QuicClock& clock, const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, std::string sni) { - if (quic::VersionUsesHttp3(quic_version.transport_version)) { + if (quic_version.UsesTls()) { std::unique_ptr packet = std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]); return Buffer::OwnedImpl(packet->data(), packet->length()); } - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( &clock, quic_version.transport_version, &crypto_config); chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); @@ -133,10 +132,13 @@ generateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic_config_tmp.ToHandshakeMessage(&full_chlo, quic_version.transport_version); std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - auto encrypted_packet = std::unique_ptr( - quic::test::ConstructEncryptedPacket(connection_id, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content)); + quic::ParsedQuicVersionVector supported_versions{quic_version}; + auto encrypted_packet = + std::unique_ptr(quic::test::ConstructEncryptedPacket( + connection_id, quic::EmptyQuicConnectionId(), + /*version_flag=*/true, /*reset_flag*/ false, + /*packet_number=*/1, packet_content, quic::CONNECTION_ID_PRESENT, + quic::CONNECTION_ID_ABSENT, quic::PACKET_4BYTE_PACKET_NUMBER, &supported_versions)); return Buffer::OwnedImpl(encrypted_packet->data(), encrypted_packet->length()); } @@ -156,25 +158,39 @@ void setQuicConfigWithDefaultValues(quic::QuicConfig* config) { config, quic::kMinimumFlowControlSendWindow); } +enum class QuicVersionType { + GquicQuicCrypto, + GquicTls, + Iquic, +}; + // A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation. class QuicMultiVersionTest - : public testing::TestWithParam> {}; + : public testing::TestWithParam> {}; -std::vector> generateTestParam() { - std::vector> param; +std::vector> generateTestParam() { + std::vector> param; for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { - for (bool use_http3 : {true, false}) { - param.emplace_back(ip_version, use_http3); - } + param.emplace_back(ip_version, QuicVersionType::GquicQuicCrypto); + param.emplace_back(ip_version, QuicVersionType::GquicTls); + param.emplace_back(ip_version, QuicVersionType::Iquic); } return param; } std::string testParamsToString( - const ::testing::TestParamInfo>& params) { + const ::testing::TestParamInfo>& + params) { std::string ip_version = params.param.first == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6"; - return absl::StrCat(ip_version, params.param.second ? "_UseHttp3" : "_UseGQuic"); + switch (params.param.second) { + case QuicVersionType::GquicQuicCrypto: + return absl::StrCat(ip_version, "_UseGQuicWithQuicCrypto"); + case QuicVersionType::GquicTls: + return absl::StrCat(ip_version, "_UseGQuicWithTLS"); + case QuicVersionType::Iquic: + return absl::StrCat(ip_version, "_UseHttp3"); + } } } // namespace Quic From 015018997f5c4cc7dec9f3d4171f8115555abe18 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Sat, 13 Jun 2020 19:02:14 -0700 Subject: [PATCH 352/909] build: update several go dependencies (#11581) Signed-off-by: Lizan Zhou --- bazel/dependency_imports.bzl | 4 ++-- bazel/repository_locations.bzl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index cc2ff635ede3..633457727a6a 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -23,8 +23,8 @@ def envoy_dependency_imports(go_version = GO_VERSION): name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=", - version = "v1.23.0", + sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=", + version = "v1.29.1", ) go_repository( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 6d081d5a65f8..645d56a6ddfe 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -355,8 +355,8 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), io_bazel_rules_go = dict( - sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz"], + sha256 = "a8d6b1b354d371a646d2f7927319974e0f9e52f73a2452d2b3877118169eb6bb", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.23.3/rules_go-v0.23.3.tar.gz"], use_category = ["build"], ), rules_cc = dict( From b612236bcd8ef3cd14d904c21348888a6755d5d6 Mon Sep 17 00:00:00 2001 From: danzh Date: Sat, 13 Jun 2020 22:05:51 -0400 Subject: [PATCH 353/909] quiche: destroy file event before close socket during connection migration. (#11544) Signed-off-by: Dan Zhang --- .../quic_listeners/quiche/envoy_quic_client_connection.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc index 0ae38b38dbb2..c8cfe4d14d69 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc @@ -95,6 +95,10 @@ void EnvoyQuicClientConnection::setUpConnectionSocket() { void EnvoyQuicClientConnection::switchConnectionSocket( Network::ConnectionSocketPtr&& connection_socket) { auto writer = std::make_unique(*connection_socket); + // Destroy the old file_event before closing the old socket. Otherwise the socket might be picked + // up by another socket() call while file_event is still operating on it. + file_event_.reset(); + // The old socket is closed in this call. setConnectionSocket(std::move(connection_socket)); setUpConnectionSocket(); SetQuicPacketWriter(writer.release(), true); From 12a4bc430eaf440ceb0d11286cfbd4c16b79cdd1 Mon Sep 17 00:00:00 2001 From: Jonathan Oddy Date: Mon, 15 Jun 2020 16:02:20 +0100 Subject: [PATCH 354/909] Fix close_connections_on_host_health_failure (#11241) close_connections_on_host_health_failure was disabled by the refactoring to make tcp_proxy use the TCP connection pool implementation. To fix it we: * Add a method to immediately close all active connections on a TCP conn pool. * Invoke this method instead of drainConnections() if close_connections_on_host_health_failure is set. Signed-off-by: Jonathan Oddy --- include/envoy/tcp/conn_pool.h | 7 + source/common/tcp/conn_pool.cc | 14 ++ source/common/tcp/conn_pool.h | 1 + .../common/upstream/cluster_manager_impl.cc | 17 ++- .../integration/tcp_proxy_integration_test.cc | 124 ++++++++++++++++++ test/mocks/tcp/mocks.h | 1 + 6 files changed, 162 insertions(+), 2 deletions(-) diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index 52712177030e..e99312ff1d6d 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -151,6 +151,13 @@ class Instance : public Event::DeferredDeletable { */ virtual void drainConnections() PURE; + /** + * Immediately close all existing connection pool connections. This method can be used in cases + * where the connection pool is not being destroyed, but the caller wishes to terminate all + * existing connections. For example, when a health check failure occurs. + */ + virtual void closeConnections() PURE; + /** * Create a new connection on the pool. * @param cb supplies the callbacks to invoke when the connection is ready or has failed. The diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 76f6d453be78..8f905769cb29 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -53,6 +53,20 @@ void ConnPoolImpl::drainConnections() { } } +void ConnPoolImpl::closeConnections() { + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!busy_conns_.empty()) { + busy_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + + while (!pending_conns_.empty()) { + pending_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } +} + void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { drained_callbacks_.push_back(cb); checkForDrained(); diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index a80e8e05a044..d676b593bc60 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -30,6 +30,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: // ConnectionPool::Instance void addDrainedCallback(DrainedCb cb) override; void drainConnections() override; + void closeConnections() override; ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 8da7fa17e6ca..8eeb3c7ff99a 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1163,18 +1163,31 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( } } { + // Drain or close any TCP connection pool for the host. Draining a TCP pool doesn't lead to + // connections being closed, it only prevents new connections through the pool. The + // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any + // active connections. const auto& container = config.host_tcp_conn_pool_map_.find(host); if (container != config.host_tcp_conn_pool_map_.end()) { for (const auto& pair : container->second.pools_) { const Tcp::ConnectionPool::InstancePtr& pool = pair.second; - pool->drainConnections(); + if (host->cluster().features() & + ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { + pool->closeConnections(); + } else { + pool->drainConnections(); + } } } } if (host->cluster().features() & ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { - + // Close non connection pool TCP connections obtained from tcpConnForCluster() + // + // TODO(jono): The only remaining user of the non-pooled connections seems to be the statsd + // TCP client. Perhaps it could be rewritten to use a connection pool, and this code deleted. + // // Each connection will remove itself from the TcpConnectionsMap when it closes, via its // Network::ConnectionCallbacks. The last removed tcp conn will remove the TcpConnectionsMap // from host_tcp_conn_map_, so do not cache it between iterations. diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 54893f0fda6e..e007336d607e 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -390,6 +390,130 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); } +TEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) { + concurrency_ = 2; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* static_resources = bootstrap.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + auto* cluster = static_resources->mutable_clusters(i); + cluster->set_close_connections_on_host_health_failure(false); + cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0); + cluster->add_health_checks()->mutable_timeout()->set_seconds(20); + cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true); + cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_tcp_health_check(); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text( + "50696E67"); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text( + "506F6E67"); + } + }); + + FakeRawConnectionPtr fake_upstream_health_connection; + on_server_init_function_ = [&](void) -> void { + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection)); + ASSERT_TRUE(fake_upstream_health_connection->waitForData( + FakeRawConnection::waitForInexactMatch("Ping"))); + ASSERT_TRUE(fake_upstream_health_connection->write("Pong")); + }; + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->write("hello"); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + ASSERT_TRUE(fake_upstream_connection->write("world")); + tcp_client->waitForData("world"); + tcp_client->write("hello"); + ASSERT_TRUE(fake_upstream_connection->waitForData(10)); + + ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); + ASSERT_TRUE(fake_upstream_health_connection->close()); + ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect(true)); + + // By waiting we know the previous health check attempt completed (with a failure since we closed + // the connection on it) + FakeRawConnectionPtr fake_upstream_health_connection_reconnect; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection_reconnect)); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForData( + FakeRawConnection::waitForInexactMatch("Ping"))); + + tcp_client->write("still"); + ASSERT_TRUE(fake_upstream_connection->waitForData(15)); + ASSERT_TRUE(fake_upstream_connection->write("here")); + tcp_client->waitForData("here", false); + + test_server_.reset(); + ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); + ASSERT_TRUE(fake_upstream_connection->close()); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForHalfClose()); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->close()); + ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForDisconnect(true)); + tcp_client->waitForHalfClose(); + tcp_client->close(); +} + +TEST_P(TcpProxyIntegrationTest, TestCloseOnHealthFailure) { + concurrency_ = 2; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* static_resources = bootstrap.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + auto* cluster = static_resources->mutable_clusters(i); + cluster->set_close_connections_on_host_health_failure(true); + cluster->mutable_common_lb_config()->mutable_healthy_panic_threshold()->set_value(0); + cluster->add_health_checks()->mutable_timeout()->set_seconds(20); + cluster->mutable_health_checks(0)->mutable_reuse_connection()->set_value(true); + cluster->mutable_health_checks(0)->mutable_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_no_traffic_interval()->set_seconds(1); + cluster->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(1); + cluster->mutable_health_checks(0)->mutable_tcp_health_check(); + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->mutable_send()->set_text( + "50696E67"); + ; + cluster->mutable_health_checks(0)->mutable_tcp_health_check()->add_receive()->set_text( + "506F6E67"); + } + }); + + FakeRawConnectionPtr fake_upstream_health_connection; + on_server_init_function_ = [&](void) -> void { + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_health_connection)); + ASSERT_TRUE(fake_upstream_health_connection->waitForData(4)); + ASSERT_TRUE(fake_upstream_health_connection->write("Pong")); + }; + + initialize(); + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->write("hello"); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + ASSERT_TRUE(fake_upstream_connection->write("world")); + tcp_client->waitForData("world"); + tcp_client->write("hello"); + ASSERT_TRUE(fake_upstream_connection->waitForData(10)); + + ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + ASSERT_TRUE(fake_upstream_health_connection->close()); + ASSERT_TRUE(fake_upstream_health_connection->waitForDisconnect(true)); + + ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); + tcp_client->waitForHalfClose(); + + ASSERT_TRUE(fake_upstream_connection->close()); + tcp_client->close(); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); +} + class TcpProxyMetadataMatchIntegrationTest : public TcpProxyIntegrationTest { public: void initialize() override; diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index ab9af40f376c..7b0b5fb34951 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -53,6 +53,7 @@ class MockInstance : public Instance { // Tcp::ConnectionPool::Instance MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb)); MOCK_METHOD(void, drainConnections, ()); + MOCK_METHOD(void, closeConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); From 19778f9edabe8956fbf68cec2be0d397ea1f18c1 Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Mon, 15 Jun 2020 16:48:25 -0400 Subject: [PATCH 355/909] fuzz: added fuzz tests for sanitizeStatsName (#11591) * fuzz: added fuzz tests for sanitizeStatsName Signed-off-by: Arthur Yan --- test/common/stats/BUILD | 9 +++++++++ test/common/stats/utility_corpus/test | 1 + test/common/stats/utility_fuzz_test.cc | 16 ++++++++++++++++ 3 files changed, 26 insertions(+) create mode 100644 test/common/stats/utility_corpus/test create mode 100644 test/common/stats/utility_fuzz_test.cc diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index e947b5dae927..fb5db97ba91e 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -162,6 +162,15 @@ envoy_cc_fuzz_test( ], ) +envoy_cc_fuzz_test( + name = "utility_fuzz_test", + srcs = ["utility_fuzz_test.cc"], + corpus = "utility_corpus", + deps = [ + "//source/common/stats:utility_lib", + ], +) + envoy_cc_test_binary( name = "symbol_table_speed_test", srcs = [ diff --git a/test/common/stats/utility_corpus/test b/test/common/stats/utility_corpus/test new file mode 100644 index 000000000000..95d09f2b1015 --- /dev/null +++ b/test/common/stats/utility_corpus/test @@ -0,0 +1 @@ +hello world \ No newline at end of file diff --git a/test/common/stats/utility_fuzz_test.cc b/test/common/stats/utility_fuzz_test.cc new file mode 100644 index 000000000000..bcf2bca6e4f9 --- /dev/null +++ b/test/common/stats/utility_fuzz_test.cc @@ -0,0 +1,16 @@ +#include "common/stats/utility.h" + +#include "test/fuzz/fuzz_runner.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Fuzz { + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + const absl::string_view string_buffer(reinterpret_cast(buf), len); + Stats::Utility::sanitizeStatsName(string_buffer); +} + +} // namespace Fuzz +} // namespace Envoy From f98c9fd906525504865f12a21862693b35667df6 Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Mon, 15 Jun 2020 14:49:56 -0700 Subject: [PATCH 356/909] adaptive concurrency: Disregard samples from previous minRTT epoch (#11579) Signed-off-by: Tony Allen --- docs/root/version_history/current.rst | 3 + .../adaptive_concurrency_filter.cc | 13 +- .../http/adaptive_concurrency/config.cc | 3 +- .../adaptive_concurrency/controller/BUILD | 1 + .../controller/controller.h | 5 +- .../controller/gradient_controller.cc | 23 ++- .../controller/gradient_controller.h | 10 +- .../adaptive_concurrency_filter_test.cc | 20 +- .../controller/gradient_controller_test.cc | 179 ++++++++++++++---- 9 files changed, 185 insertions(+), 72 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 56b719d7e800..a33646b66334 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -28,6 +28,9 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency + limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the + start of the new minRTT window. * grpc-json: fix a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. diff --git a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc index 69e706cbf2b6..b2478d040896 100644 --- a/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc +++ b/source/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter.cc @@ -43,16 +43,11 @@ Http::FilterHeadersStatus AdaptiveConcurrencyFilter::decodeHeaders(Http::Request return Http::FilterHeadersStatus::StopIteration; } - // When the deferred_sample_task_ object is destroyed, the time difference between its destruction - // and the request start time is measured as the request latency. This value is sampled by the - // concurrency controller either when encoding is complete or during destruction of this filter - // object. + // When the deferred_sample_task_ object is destroyed, the request start time is sampled. This + // occurs either when encoding is complete or during destruction of this filter object. + const auto now = config_->timeSource().monotonicTime(); deferred_sample_task_ = - std::make_unique([this, rq_start_time = config_->timeSource().monotonicTime()]() { - const auto now = config_->timeSource().monotonicTime(); - const std::chrono::nanoseconds rq_latency = now - rq_start_time; - controller_->recordLatencySample(rq_latency); - }); + std::make_unique([this, now]() { controller_->recordLatencySample(now); }); return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index fc6b9d5e0f99..63a3d2d7f369 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -26,7 +26,8 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); controller = std::make_shared( std::move(gradient_controller_config), context.dispatcher(), context.runtime(), - acc_stats_prefix + "gradient_controller.", context.scope(), context.random()); + acc_stats_prefix + "gradient_controller.", context.scope(), context.random(), + context.timeSource()); AdaptiveConcurrencyFilterConfigSharedPtr filter_config( new AdaptiveConcurrencyFilterConfig(config, context.runtime(), std::move(acc_stats_prefix), diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD index 768d34438936..ae74e71c6b35 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -23,6 +23,7 @@ envoy_cc_library( "libcircllhist", ], deps = [ + "//include/envoy/common:time_interface", "//source/common/event:dispatcher_lib", "//source/common/protobuf", "//source/common/runtime:runtime_lib", diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h index ecb78307a9d2..a6ba79f55a42 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/controller.h @@ -3,6 +3,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/common/time.h" namespace Envoy { namespace Extensions { @@ -41,9 +42,9 @@ class ConcurrencyController { * request latency to update the internal state of the controller for * concurrency limit calculations. * - * @param rq_latency is the clocked round-trip time for the request. + * @param rq_send_time the time point which the sampled request was sent */ - virtual void recordLatencySample(std::chrono::nanoseconds rq_latency) PURE; + virtual void recordLatencySample(MonotonicTime rq_send_time) PURE; /** * Omit sampling an outstanding request and update the internal state of the controller to reflect diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc index c94ddaef11c3..fc30b1ffb423 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc @@ -46,10 +46,11 @@ GradientControllerConfig::GradientControllerConfig( GradientController::GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader&, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random) + Runtime::RandomGenerator& random, TimeSource& time_source) : config_(std::move(config)), dispatcher_(dispatcher), scope_(scope), - stats_(generateStats(scope_, stats_prefix)), random_(random), deferred_limit_value_(0), - num_rq_outstanding_(0), concurrency_limit_(config_.minConcurrency()), + stats_(generateStats(scope_, stats_prefix)), random_(random), time_source_(time_source), + deferred_limit_value_(0), num_rq_outstanding_(0), + concurrency_limit_(config_.minConcurrency()), latency_sample_hist_(hist_fast_alloc(), hist_free) { min_rtt_calc_timer_ = dispatcher_.createTimer([this]() -> void { enterMinRTTSamplingWindow(); }); @@ -102,6 +103,8 @@ void GradientController::enterMinRTTSamplingWindow() { // Throw away any latency samples from before the recalculation window as it may not represent // the minRTT. hist_clear(latency_sample_hist_.get()); + + min_rtt_epoch_ = time_source_.monotonicTime(); } void GradientController::updateMinRTT() { @@ -192,16 +195,22 @@ RequestForwardingAction GradientController::forwardingDecision() { return RequestForwardingAction::Block; } -void GradientController::recordLatencySample(std::chrono::nanoseconds rq_latency) { - const uint32_t latency_usec = - std::chrono::duration_cast(rq_latency).count(); +void GradientController::recordLatencySample(MonotonicTime rq_send_time) { ASSERT(num_rq_outstanding_.load() > 0); --num_rq_outstanding_; + if (rq_send_time < min_rtt_epoch_) { + // Disregard samples from requests started in the previous minRTT window. + return; + } + + const std::chrono::microseconds rq_latency = + std::chrono::duration_cast(time_source_.monotonicTime() - + rq_send_time); uint32_t sample_count; { absl::MutexLock ml(&sample_mutation_mtx_); - hist_insert(latency_sample_hist_.get(), latency_usec, 1); + hist_insert(latency_sample_hist_.get(), rq_latency.count(), 1); sample_count = hist_sample_count(latency_sample_hist_.get()); } diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h index 1da1c3d8b81a..46bbe50aa909 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h" #include "envoy/runtime/runtime.h" @@ -210,11 +211,11 @@ class GradientController : public ConcurrencyController { public: GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random); + Runtime::RandomGenerator& random, TimeSource& time_source); // ConcurrencyController. RequestForwardingAction forwardingDecision() override; - void recordLatencySample(std::chrono::nanoseconds rq_latency) override; + void recordLatencySample(MonotonicTime rq_send_time) override; void cancelLatencySample() override; uint32_t concurrencyLimit() const override { return concurrency_limit_.load(); } @@ -238,6 +239,7 @@ class GradientController : public ConcurrencyController { Stats::Scope& scope_; GradientControllerStats stats_; Runtime::RandomGenerator& random_; + TimeSource& time_source_; // Protects data related to latency sampling and RTT values. In addition to protecting the latency // sample histogram, the mutex ensures that the minRTT calculation window and the sample window @@ -274,6 +276,10 @@ class GradientController : public ConcurrencyController { // after remaining at the minimum limit for too long. uint32_t consecutive_min_concurrency_set_ ABSL_GUARDED_BY(sample_mutation_mtx_); + // We will disregard sampling any requests admitted before this timestamp to prevent sampling + // requests admitted before the start of a minRTT window and potentially skewing the minRTT. + MonotonicTime min_rtt_epoch_; + Event::TimerPtr min_rtt_calc_timer_; Event::TimerPtr sample_reset_timer_; }; diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc index bcfed37a2409..5742385d5cc5 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_test.cc @@ -29,7 +29,7 @@ class MockConcurrencyController : public Controller::ConcurrencyController { public: MOCK_METHOD(RequestForwardingAction, forwardingDecision, ()); MOCK_METHOD(void, cancelLatencySample, ()); - MOCK_METHOD(void, recordLatencySample, (std::chrono::nanoseconds)); + MOCK_METHOD(void, recordLatencySample, (MonotonicTime)); uint32_t concurrencyLimit() const override { return 0; } }; @@ -223,12 +223,12 @@ TEST_F(AdaptiveConcurrencyFilterTest, OnDestroyCleanupTest) { .WillOnce(Return(RequestForwardingAction::Forward)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); - const auto advance_time = std::chrono::nanoseconds(42); - time_system_.advanceTimeWait(advance_time); + const auto rq_rcv_time = time_system_.monotonicTime(); + time_system_.advanceTimeWait(std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); filter_->onDestroy(); @@ -248,16 +248,16 @@ TEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTestWithBody) { Http::TestRequestTrailerMapImpl request_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers)); - const auto advance_time = std::chrono::nanoseconds(42); + const auto rq_rcv_time = time_system_.monotonicTime(); mt = time_system_.monotonicTime(); - time_system_.setMonotonicTime(mt + advance_time); + time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, false)); Http::TestResponseTrailerMapImpl response_trailers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); } @@ -271,13 +271,13 @@ TEST_F(AdaptiveConcurrencyFilterTest, EncodeHeadersValidTest) { .WillOnce(Return(RequestForwardingAction::Forward)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); - const auto advance_time = std::chrono::nanoseconds(42); + const auto rq_rcv_time = time_system_.monotonicTime(); mt = time_system_.monotonicTime(); - time_system_.setMonotonicTime(mt + advance_time); + time_system_.setMonotonicTime(mt + std::chrono::nanoseconds(42)); Http::TestResponseHeaderMapImpl response_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); - EXPECT_CALL(*controller_, recordLatencySample(advance_time)); + EXPECT_CALL(*controller_, recordLatencySample(rq_rcv_time)); filter_->encodeComplete(); } diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc index 812e30529777..5317009fb78b 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc @@ -51,11 +51,22 @@ class GradientControllerTest : public testing::Test { dispatcher_(api_->allocateDispatcher("test_thread")) {} GradientControllerSharedPtr makeController(const std::string& yaml_config) { - return std::make_shared(makeConfig(yaml_config, runtime_), *dispatcher_, - runtime_, "test_prefix.", stats_, random_); + const auto config = std::make_shared(makeConfig(yaml_config, runtime_), + *dispatcher_, runtime_, "test_prefix.", + stats_, random_, time_system_); + + // Advance time so that the latency sample calculations don't underflow if monotonic time is 0. + time_system_.advanceTimeAsync(std::chrono::hours(42)); + + return config; } protected: + void sampleLatency(const GradientControllerSharedPtr& controller, + std::chrono::microseconds latency) { + controller->recordLatencySample(time_system_.monotonicTime() - latency); + } + // Helper function that will attempt to pull forwarding decisions. void tryForward(const GradientControllerSharedPtr& controller, const bool expect_forward_response) { @@ -71,10 +82,30 @@ class GradientControllerTest : public testing::Test { const auto config = makeConfig(yaml_config, runtime_); for (uint32_t i = 0; i <= config.minRTTAggregateRequestCount(); ++i) { tryForward(controller, true); - controller->recordLatencySample(latency); + sampleLatency(controller, latency); } } + void verifyMinRTTValue(std::chrono::milliseconds min_rtt) { + EXPECT_EQ( + min_rtt.count(), + stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + } + + void verifyMinRTTActive() { + EXPECT_EQ( + 1, + stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) + .value()); + } + + void verifyMinRTTInactive() { + EXPECT_EQ( + 0, + stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) + .value()); + } + Event::SimulatedTimeSystem time_system_; Stats::TestUtil::TestStore stats_; NiceMock runtime_; @@ -206,6 +237,71 @@ TEST_F(GradientControllerConfigTest, DefaultValuesTest) { EXPECT_EQ(config.minRTTBufferPercent(), 0.25); } +// Verify that requests started in the previous minRTT window are not sampled in the next. +TEST_F(GradientControllerTest, MinRTTEpoch) { + const std::string yaml = R"EOF( +sample_aggregate_percentile: + value: 50 +concurrency_limit_params: + concurrency_update_interval: 0.1s +min_rtt_calc_params: + jitter: + value: 0.0 + interval: 30s + request_count: 25 + min_concurrency: 2 + buffer: + value: 0.0 +)EOF"; + + const int min_concurrency = 2; + auto controller = makeController(yaml); + const auto min_rtt = std::chrono::milliseconds(1350); + time_system_.advanceTimeAsync(min_rtt); + + verifyMinRTTActive(); + EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); + advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(1350)); + verifyMinRTTInactive(); + verifyMinRTTValue(std::chrono::milliseconds(1350)); + + // Advance time to just before the end of the epoch and inflate the concurrency limit. + uint32_t last_limit = controller->concurrencyLimit(); + for (int i = 0; i < 29; ++i) { + tryForward(controller, true); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); + sampleLatency(controller, min_rtt); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_GT(controller->concurrencyLimit(), last_limit); + last_limit = controller->concurrencyLimit(); + } + + int active_rq_counter = 0; + // Send out requests that we won't attempt to sample until the next minRTT window so the requests + // will be disregarded as they were started in the previous minRTT window. + for (uint32_t i = 0; i < controller->concurrencyLimit(); ++i) { + tryForward(controller, true); + ++active_rq_counter; + } + + // Move into the next minRTT window while the requests are outstanding. + time_system_.advanceTimeAsync(std::chrono::seconds(5)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + verifyMinRTTActive(); + EXPECT_EQ(controller->concurrencyLimit(), min_concurrency); + + // Sample more than enough requests to break out of the minRTT measurement window (>25). These are + // expected to be disregarded since they would have started in the previous minRTT epoch. + // Therefore, we expect the minRTT window to still be active. + EXPECT_GT(active_rq_counter, 25); + for (int i = 0; i < active_rq_counter; ++i) { + // Sample requests that were send "5 minutes ago," which would surely be from an older minRTT + // epoch. + sampleLatency(controller, std::chrono::minutes(5)); + } + verifyMinRTTActive(); +} + TEST_F(GradientControllerTest, MinRTTLogicTest) { const std::string yaml = R"EOF( sample_aggregate_percentile: @@ -226,34 +322,28 @@ TEST_F(GradientControllerTest, MinRTTLogicTest) { // The controller should be measuring minRTT upon creation, so the concurrency window is 7 (the // min concurrency). - EXPECT_EQ( - 1, - stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) - .value()); + verifyMinRTTActive(); EXPECT_EQ(controller->concurrencyLimit(), 7); for (int i = 0; i < 7; ++i) { tryForward(controller, true); } tryForward(controller, false); tryForward(controller, false); + time_system_.advanceTimeAsync(min_rtt); for (int i = 0; i < 7; ++i) { - controller->recordLatencySample(min_rtt); + sampleLatency(controller, min_rtt); } // 43 more requests should cause the minRTT to be done calculating. for (int i = 0; i < 43; ++i) { EXPECT_EQ(controller->concurrencyLimit(), 7); tryForward(controller, true); - controller->recordLatencySample(min_rtt); + sampleLatency(controller, min_rtt); } // Verify the minRTT value measured is accurate. - EXPECT_EQ( - 0, - stats_.gauge("test_prefix.min_rtt_calculation_active", Stats::Gauge::ImportMode::Accumulate) - .value()); - EXPECT_EQ( - 13, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTInactive(); + verifyMinRTTValue(std::chrono::milliseconds(13)); } TEST_F(GradientControllerTest, CancelLatencySample) { @@ -274,10 +364,9 @@ TEST_F(GradientControllerTest, CancelLatencySample) { for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(i)); + sampleLatency(controller, std::chrono::milliseconds(i)); } - EXPECT_EQ( - 3, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(3)); } TEST_F(GradientControllerTest, SamplePercentileProcessTest) { @@ -326,8 +415,7 @@ TEST_F(GradientControllerTest, MinRTTBufferTest) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the minRTT doesn't decrease due to the buffer added. for (int recalcs = 0; recalcs < 10; ++recalcs) { @@ -336,7 +424,7 @@ TEST_F(GradientControllerTest, MinRTTBufferTest) { tryForward(controller, true); // Recording sample that's technically higher than the minRTT, but the 50% buffer should // prevent the concurrency limit from decreasing. - controller->recordLatencySample(std::chrono::milliseconds(6)); + sampleLatency(controller, std::chrono::milliseconds(6)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -366,8 +454,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. @@ -382,7 +469,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -396,7 +483,7 @@ TEST_F(GradientControllerTest, ConcurrencyLimitBehaviorTestBasic) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(6)); + sampleLatency(controller, std::chrono::milliseconds(6)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -422,15 +509,17 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { auto controller = makeController(yaml); EXPECT_EQ(controller->concurrencyLimit(), 3); - // Get initial minRTT measurement out of the way. + // Get initial minRTT measurement out of the way and advance time so request samples are not + // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -445,11 +534,14 @@ TEST_F(GradientControllerTest, MinRTTReturnToPreviousLimit) { dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_EQ(controller->concurrencyLimit(), 3); + // Advance time again for request samples to appear from the current epoch. + time_system_.advanceTimeAsync(std::chrono::seconds(1)); + // 49 more requests should cause the minRTT to be done calculating. for (int i = 0; i < 5; ++i) { EXPECT_EQ(controller->concurrencyLimit(), 3); tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(13)); + sampleLatency(controller, std::chrono::milliseconds(13)); } // Check that we restored the old concurrency limit value. @@ -473,15 +565,17 @@ TEST_F(GradientControllerTest, MinRTTRescheduleTest) { auto controller = makeController(yaml); EXPECT_EQ(controller->concurrencyLimit(), 3); - // Get initial minRTT measurement out of the way. + // Get initial minRTT measurement out of the way and advance time so request samples are not + // thought to come from the previous minRTT epoch. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::seconds(1)); // Force the limit calculation to run a few times from some measurements. for (int sample_iters = 0; sample_iters < 5; ++sample_iters) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -525,7 +619,7 @@ TEST_F(GradientControllerTest, NoSamplesTest) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(4)); + sampleLatency(controller, std::chrono::milliseconds(4)); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -567,8 +661,9 @@ TEST_F(GradientControllerTest, TimerAccuracyTest) { .WillOnce(Return(rtt_timer)) .WillOnce(Return(sample_timer)); EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); - auto controller = std::make_shared( - makeConfig(yaml, runtime_), fake_dispatcher, runtime_, "test_prefix.", stats_, random_); + auto controller = + std::make_shared(makeConfig(yaml, runtime_), fake_dispatcher, runtime_, + "test_prefix.", stats_, random_, time_system_); // Set the minRTT- this will trigger the timer for the next minRTT calculation. @@ -580,7 +675,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTest) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -609,8 +705,9 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { .WillOnce(Return(rtt_timer)) .WillOnce(Return(sample_timer)); EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); - auto controller = std::make_shared( - makeConfig(yaml, runtime_), fake_dispatcher, runtime_, "test_prefix.", stats_, random_); + auto controller = + std::make_shared(makeConfig(yaml, runtime_), fake_dispatcher, runtime_, + "test_prefix.", stats_, random_, time_system_); // Set the minRTT- this will trigger the timer for the next minRTT calculation. EXPECT_CALL(*rtt_timer, enableTimer(std::chrono::milliseconds(45000), _)); @@ -618,7 +715,8 @@ TEST_F(GradientControllerTest, TimerAccuracyTestNoJitter) { EXPECT_CALL(*sample_timer, enableTimer(std::chrono::milliseconds(123), _)); for (int i = 0; i < 6; ++i) { tryForward(controller, true); - controller->recordLatencySample(std::chrono::milliseconds(5)); + time_system_.advanceTimeAsync(std::chrono::milliseconds(5)); + sampleLatency(controller, std::chrono::milliseconds(5)); } } @@ -646,8 +744,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { // Force a minRTT of 5ms. advancePastMinRTTStage(controller, yaml, std::chrono::milliseconds(5)); - EXPECT_EQ( - 5, stats_.gauge("test_prefix.min_rtt_msecs", Stats::Gauge::ImportMode::NeverImport).value()); + verifyMinRTTValue(std::chrono::milliseconds(5)); // Ensure that the concurrency window increases on its own due to the headroom calculation with // the max gradient. @@ -662,7 +759,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { for (int recalcs = 0; recalcs < 5; ++recalcs) { for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(elevated_latency); + sampleLatency(controller, elevated_latency); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -673,7 +770,7 @@ TEST_F(GradientControllerTest, ConsecutiveMinConcurrencyReset) { const auto last_concurrency = controller->concurrencyLimit(); for (int i = 1; i <= 5; ++i) { tryForward(controller, true); - controller->recordLatencySample(elevated_latency); + sampleLatency(controller, elevated_latency); } time_system_.advanceTimeAsync(std::chrono::milliseconds(101)); dispatcher_->run(Event::Dispatcher::RunType::Block); From 0125d6fd611958fdef85e6665f307d2bffd2a531 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 15 Jun 2020 20:30:34 -0400 Subject: [PATCH 357/909] eds_speed_test: add GrpcMuxImpl config ingestion path to benchmark. (#11566) A lot of the overhead we see in https://github.com/envoyproxy/envoy/issues/11362 has to do with the overhead from the GrpcMuxImpl ingestion path, and helps put the EDS cluster overhead in context. This patch adds a GrpcMuxImpl and GrpcSubscription to the test, allowing a fairly realistic cost of EDS update to be obtained. Below is the example output. We're seeing v3 costs ~3.2x of the v2 ingestion. Future patches will narrow this. --------------------------------------------------------------------------------- Benchmark Time CPU Iterations --------------------------------------------------------------------------------- priorityAndLocalityWeighted/0/0/2000 6277063 ns 6276456 ns 113 priorityAndLocalityWeighted/1/0/2000 20236878 ns 20236505 ns 35 priorityAndLocalityWeighted/0/1/2000 4589757 ns 4588833 ns 154 priorityAndLocalityWeighted/1/1/2000 14853274 ns 14850006 ns 42 priorityAndLocalityWeighted/0/0/4096 11865838 ns 11865314 ns 59 priorityAndLocalityWeighted/1/0/4096 39787876 ns 39787716 ns 18 priorityAndLocalityWeighted/0/1/4096 8186874 ns 8185724 ns 87 priorityAndLocalityWeighted/1/1/4096 29376139 ns 29374980 ns 24 priorityAndLocalityWeighted/0/0/32768 110248743 ns 110246101 ns 6 priorityAndLocalityWeighted/1/0/32768 343826306 ns 343761359 ns 2 priorityAndLocalityWeighted/0/1/32768 76962915 ns 76953406 ns 7 priorityAndLocalityWeighted/1/1/32768 254426231 ns 254401890 ns 3 priorityAndLocalityWeighted/0/0/100000 352576987 ns 352550994 ns 2 priorityAndLocalityWeighted/1/0/100000 1100804152 ns 1100646263 ns 1 priorityAndLocalityWeighted/0/1/100000 253870688 ns 253857626 ns 3 priorityAndLocalityWeighted/1/1/100000 808080918 ns 808028852 ns 1 Risk level: Low (test only) Testing: Added ASSERTs to validate that we're loaded all the hosts at the end of the benchmark. Signed-off-by: Harvey Tuch --- test/common/upstream/BUILD | 3 ++ test/common/upstream/eds_speed_test.cc | 47 ++++++++++++++++++++------ 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index b12cb6337c6e..fc6bc5d966ef 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -104,6 +104,9 @@ envoy_cc_benchmark_binary( ], deps = [ ":utility_lib", + "//source/common/config:grpc_mux_lib", + "//source/common/config:grpc_subscription_lib", + "//source/common/config:protobuf_link_hacks", "//source/common/config:utility_lib", "//source/common/upstream:eds_lib", "//source/extensions/transport_sockets/raw_buffer:config", diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index f79e037e8bf7..1f1c25c4ad9a 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -8,6 +8,8 @@ #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" +#include "common/config/grpc_mux_impl.h" +#include "common/config/grpc_subscription_impl.h" #include "common/config/utility.h" #include "common/singleton/manager_impl.h" #include "common/upstream/eds.h" @@ -30,7 +32,18 @@ namespace Upstream { class EdsSpeedTest { public: - EdsSpeedTest(benchmark::State& state) : state_(state), api_(Api::createApiForTest(stats_)) {} + EdsSpeedTest(benchmark::State& state, bool v2_config) + : state_(state), v2_config_(v2_config), + type_url_(v2_config_ + ? "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" + : "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"), + subscription_stats_(Config::Utility::generateStats(stats_)), + api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()), + grpc_mux_(new Config::GrpcMuxImpl( + local_info_, std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) {} void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); @@ -45,6 +58,9 @@ class EdsSpeedTest { std::move(scope), false); EXPECT_EQ(initialize_phase, cluster_->initializePhase()); eds_callbacks_ = cm_.subscription_factory_.callbacks_; + subscription_ = std::make_unique( + grpc_mux_, *eds_callbacks_, subscription_stats_, type_url_, dispatcher_, + std::chrono::milliseconds(), false); } void initialize() { @@ -54,8 +70,7 @@ class EdsSpeedTest { // Set up an EDS config with multiple priorities, localities, weights and make sure // they are loaded and reloaded as expected. - void priorityAndLocalityWeightedHelper(bool v2_config, bool ignore_unknown_dynamic_fields, - int num_hosts) { + void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, size_t num_hosts) { state_.PauseTiming(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); @@ -83,7 +98,7 @@ class EdsSpeedTest { endpoints->mutable_load_balancing_weight()->set_value(1); uint32_t port = 1000; - for (int i = 0; i < num_hosts; ++i) { + for (size_t i = 0; i < num_hosts; ++i) { auto* socket_address = endpoints->add_lb_endpoints() ->mutable_endpoint() ->mutable_address() @@ -96,23 +111,31 @@ class EdsSpeedTest { validation_visitor_.setSkipValidation(ignore_unknown_dynamic_fields); initialize(); - Protobuf::RepeatedPtrField resources; - auto* resource = resources.Add(); + auto response = std::make_unique(); + response->set_type_url(type_url_); + auto* resource = response->mutable_resources()->Add(); resource->PackFrom(cluster_load_assignment); - if (v2_config) { + if (v2_config_) { RELEASE_ASSERT(resource->type_url() == "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", ""); resource->set_type_url("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"); } + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(testing::Return(&async_stream_)); + subscription_->start({"fare"}); state_.ResumeTiming(); - eds_callbacks_->onConfigUpdate(resources, ""); + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); ASSERT(initialized_); + ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() == + num_hosts); } benchmark::State& state_; + const bool v2_config_; + const std::string type_url_; bool initialized_{}; Stats::IsolatedStoreImpl stats_; + Config::SubscriptionStats subscription_stats_; Ssl::MockContextManager ssl_context_manager_; envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; @@ -127,6 +150,10 @@ class EdsSpeedTest { NiceMock tls_; ProtobufMessage::MockValidationVisitor validation_visitor_; Api::ApiPtr api_; + Grpc::MockAsyncClient* async_client_; + NiceMock async_stream_; + std::shared_ptr grpc_mux_; + std::unique_ptr subscription_; }; } // namespace Upstream @@ -137,8 +164,8 @@ static void priorityAndLocalityWeighted(benchmark::State& state) { Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state); - speed_test.priorityAndLocalityWeightedHelper(state.range(0), state.range(1), state.range(2)); + Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(0)); + speed_test.priorityAndLocalityWeightedHelper(state.range(1), state.range(2)); } } From 74530c92cfa3682b49b540fddf2aba40ac10c68e Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Tue, 16 Jun 2020 22:27:31 +0900 Subject: [PATCH 358/909] listener manager: fix typo (#11600) fix typo Signed-off-by: tomocy --- source/server/listener_manager_impl.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 9aac1a14d953..ca7bc7547148 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -474,7 +474,7 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( // We have no warming or active listener so we need to make a new one. What we do depends on // whether workers have been started or not. Additionally, search through draining listeners // to see if there is a listener that has a socket factory for the same address we are - // configured for and doesn't not use SO_REUSEPORT. This is an edge case, but may happen if a + // configured for and doesn't use SO_REUSEPORT. This is an edge case, but may happen if a // listener is removed and then added back with a same or different name and intended to listen // on the same address. This should work and not fail. Network::ListenSocketFactorySharedPtr draining_listen_socket_factory; From e9c2c8c4a0141c9634316e8283f98f412d0dd207 Mon Sep 17 00:00:00 2001 From: Jason Heiss Date: Tue, 16 Jun 2020 11:20:37 -0400 Subject: [PATCH 359/909] dependency and docs: update jwt_verify_lib to 2020-06-03 (#11479) Signed-off-by: Jason Heiss --- bazel/repository_locations.bzl | 8 ++++---- .../configuration/http/http_filters/jwt_authn_filter.rst | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 645d56a6ddfe..417b9507f670 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -278,10 +278,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_jwt_verify = dict( - sha256 = "118f955620509f1634cbd918c63234d2048dce56b1815caf348d78e3c3dc899c", - strip_prefix = "jwt_verify_lib-44291b2ee4c19631e5a0a0bf4f965436a9364ca7", - # 2020-05-21 - urls = ["https://github.com/google/jwt_verify_lib/archive/44291b2ee4c19631e5a0a0bf4f965436a9364ca7.tar.gz"], + sha256 = "d2e28897c297bd04429e43a1b485f7350acc23cbfee6365b8a3634c17840b2f6", + strip_prefix = "jwt_verify_lib-f44cf49d185ad0694b472da78071b4d67313fb86", + # 2020-06-03 + urls = ["https://github.com/google/jwt_verify_lib/archive/f44cf49d185ad0694b472da78071b4d67313fb86.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 118f21946b95..27f2d739a330 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -8,7 +8,7 @@ This HTTP filter can be used to verify JSON Web Token (JWT). It will verify its JWKS is needed to verify JWT signatures. They can be specified in the filter config or can be fetched remotely from a JWKS server. .. attention:: - ES256, ES384, ES512, HS256, HS384, HS512, RS256, RS384 and RS512 are supported for the JWT alg. + EdDSA, ES256, ES384, ES512, HS256, HS384, HS512, RS256, RS384 and RS512 are supported for the JWT alg. Configuration ------------- From 7e88243610c09a8346226b49528bc7516fcfd7a7 Mon Sep 17 00:00:00 2001 From: ahedberg Date: Tue, 16 Jun 2020 12:55:49 -0400 Subject: [PATCH 360/909] Use `ABSL_` prefixed thread annotations. (#11597) Commit Message: Use ABSL_ prefixed thread annotations consistently. Additional Description: Envoy currently uses a mix of ABSL_-prefixed and non-ABSL_-prefixed thread annotations. Abseil is renaming its thread-safety analysis attributes per Clang's guidance to define macros for them, so the non-ABSL names will go away eventually. Risk Level: Low Testing: existing Docs Changes: n/a Release Notes: n/a Signed-off-by: Ashley Hedberg --- source/common/common/lock_guard.h | 6 ++-- source/common/grpc/google_grpc_context.h | 2 +- .../common/network/connection_balancer_impl.h | 2 +- source/common/stats/allocator_impl.cc | 8 +++--- source/common/stats/allocator_impl.h | 12 ++++---- source/common/stats/symbol_table_impl.cc | 10 +++---- source/common/stats/symbol_table_impl.h | 16 +++++------ source/common/stats/thread_local_store.h | 6 ++-- .../clusters/redis/redis_cluster_lb.h | 2 +- .../redis/cluster_refresh_manager_impl.h | 2 +- .../simple_http_cache/simple_http_cache.h | 2 +- .../quiche/platform/flags_impl.h | 2 +- .../quiche/platform/quic_mutex_impl.h | 17 +++++++---- source/server/hot_restart_impl.h | 6 ++-- .../tracers/opencensus/tracer_test.cc | 2 +- test/integration/server.h | 2 +- test/test_common/global.h | 3 +- test/test_common/only_one_thread.h | 2 +- test/test_common/simulated_time_system.cc | 28 ++++++++++--------- test/test_common/simulated_time_system.h | 26 ++++++++--------- test/test_common/test_time.h | 2 +- test/test_common/test_time_system.h | 11 ++++---- 22 files changed, 89 insertions(+), 80 deletions(-) diff --git a/source/common/common/lock_guard.h b/source/common/common/lock_guard.h index d3025a333999..f1524469067d 100644 --- a/source/common/common/lock_guard.h +++ b/source/common/common/lock_guard.h @@ -39,7 +39,7 @@ class ABSL_SCOPED_LOCKABLE OptionalLockGuard { // At the moment, TryLockGuard is very hard to annotate correctly, I // believe due to limitations in clang. At the moment there are no -// GUARDED_BY variables for any tryLocks in the codebase, so it's +// ABSL_GUARDED_BY variables for any tryLocks in the codebase, so it's // easiest just to leave it out. In a future clang release it's // possible we can enable this. See also the commented-out block // in ThreadTest.TestTryLockGuard in test/common/common/thread_test.cc. @@ -64,7 +64,7 @@ class ABSL_SCOPED_LOCKABLE TryLockGuard { /** * Destruction of the TryLockGuard unlocks the lock, if it was locked. */ - ~TryLockGuard() DISABLE_TRYLOCKGUARD_ANNOTATION(UNLOCK_FUNCTION()) { + ~TryLockGuard() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_UNLOCK_FUNCTION()) { if (is_locked_) { lock_.unlock(); } @@ -73,7 +73,7 @@ class ABSL_SCOPED_LOCKABLE TryLockGuard { /** * @return bool whether the lock was successfully acquired. */ - bool tryLock() DISABLE_TRYLOCKGUARD_ANNOTATION(EXCLUSIVE_TRYLOCK_FUNCTION(true)) { + bool tryLock() DISABLE_TRYLOCKGUARD_ANNOTATION(ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true)) { is_locked_ = lock_.tryLock(); return is_locked_; } diff --git a/source/common/grpc/google_grpc_context.h b/source/common/grpc/google_grpc_context.h index b83f29a12214..2ec235161f2e 100644 --- a/source/common/grpc/google_grpc_context.h +++ b/source/common/grpc/google_grpc_context.h @@ -21,7 +21,7 @@ class GoogleGrpcContext { private: struct InstanceTracker { Thread::MutexBasicLockable mutex_; - uint64_t live_instances_ GUARDED_BY(mutex_) = 0; + uint64_t live_instances_ ABSL_GUARDED_BY(mutex_) = 0; }; static InstanceTracker& instanceTracker(); diff --git a/source/common/network/connection_balancer_impl.h b/source/common/network/connection_balancer_impl.h index 49cdb601ace8..17a09542c1d7 100644 --- a/source/common/network/connection_balancer_impl.h +++ b/source/common/network/connection_balancer_impl.h @@ -24,7 +24,7 @@ class ExactConnectionBalancerImpl : public ConnectionBalancer { private: absl::Mutex lock_; - std::vector handlers_ GUARDED_BY(lock_); + std::vector handlers_ ABSL_GUARDED_BY(lock_); }; /** diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 04dd31eca9c7..c4995e90956a 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -96,7 +96,7 @@ template class StatsSharedImpl : public MetricImpl * our ref-count decrement hits zero. The counters and gauges are held in * distinct sets so we virtualize this removal helper. */ - virtual void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE; + virtual void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) PURE; protected: AllocatorImpl& alloc_; @@ -121,7 +121,7 @@ class CounterImpl : public StatsSharedImpl { const StatNameTagVector& stat_name_tags) : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {} - void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { + void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { const size_t count = alloc_.counters_.erase(statName()); ASSERT(count == 1); } @@ -165,7 +165,7 @@ class GaugeImpl : public StatsSharedImpl { } } - void removeFromSetLockHeld() override EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) { + void removeFromSetLockHeld() override ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) { const size_t count = alloc_.gauges_.erase(statName()); ASSERT(count == 1); } @@ -234,7 +234,7 @@ class TextReadoutImpl : public StatsSharedImpl { const StatNameTagVector& stat_name_tags) : StatsSharedImpl(name, alloc, tag_extracted_name, stat_name_tags) {} - void removeFromSetLockHeld() EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { + void removeFromSetLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(alloc_.mutex_) override { const size_t count = alloc_.text_readouts_.erase(statName()); ASSERT(count == 1); } diff --git a/source/common/stats/allocator_impl.h b/source/common/stats/allocator_impl.h index ddee00c39559..3242d0de5fef 100644 --- a/source/common/stats/allocator_impl.h +++ b/source/common/stats/allocator_impl.h @@ -72,18 +72,18 @@ class AllocatorImpl : public Allocator { bool operator()(const Metric* a, StatName b) const { return a->statName() == b; } }; - void removeCounterFromSetLockHeld(Counter* counter) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeGaugeFromSetLockHeld(Gauge* gauge) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeTextReadoutFromSetLockHeld(Counter* counter) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeCounterFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeGaugeFromSetLockHeld(Gauge* gauge) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeTextReadoutFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // An unordered set of HeapStatData pointers which keys off the key() // field in each object. This necessitates a custom comparator and hasher, which key off of the // StatNamePtr's own StatNamePtrHash and StatNamePtrCompare operators. template using StatSet = absl::flat_hash_set; - StatSet counters_ GUARDED_BY(mutex_); - StatSet gauges_ GUARDED_BY(mutex_); - StatSet text_readouts_ GUARDED_BY(mutex_); + StatSet counters_ ABSL_GUARDED_BY(mutex_); + StatSet gauges_ ABSL_GUARDED_BY(mutex_); + StatSet text_readouts_ ABSL_GUARDED_BY(mutex_); SymbolTable& symbol_table_; diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 78d20924c328..a36a4a2a9681 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -162,7 +162,7 @@ std::vector SymbolTableImpl::decodeStrings(const SymbolTable: Encoding::decodeTokens( array, size, [this, &strings](Symbol symbol) - NO_THREAD_SAFETY_ANALYSIS { strings.push_back(fromSymbol(symbol)); }, + ABSL_NO_THREAD_SAFETY_ANALYSIS { strings.push_back(fromSymbol(symbol)); }, [&strings](absl::string_view str) { strings.push_back(str); }); return strings; } @@ -184,7 +184,7 @@ void SymbolTableImpl::Encoding::appendToMemBlock(StatName stat_name, } SymbolTableImpl::SymbolTableImpl() - // Have to be explicitly initialized, if we want to use the GUARDED_BY macro. + // Have to be explicitly initialized, if we want to use the ABSL_GUARDED_BY macro. : next_symbol_(FirstValidSymbol), monotonic_counter_(FirstValidSymbol) {} SymbolTableImpl::~SymbolTableImpl() { @@ -294,7 +294,7 @@ uint64_t SymbolTableImpl::getRecentLookups(const RecentLookupsFn& iter) const { Thread::LockGuard lock(lock_); recent_lookups_.forEach( [&name_count_map](absl::string_view str, uint64_t count) - NO_THREAD_SAFETY_ANALYSIS { name_count_map[std::string(str)] += count; }); + ABSL_NO_THREAD_SAFETY_ANALYSIS { name_count_map[std::string(str)] += count; }); total += recent_lookups_.total(); } @@ -388,13 +388,13 @@ Symbol SymbolTableImpl::toSymbol(absl::string_view sv) { } absl::string_view SymbolTableImpl::fromSymbol(const Symbol symbol) const - EXCLUSIVE_LOCKS_REQUIRED(lock_) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { auto search = decode_map_.find(symbol); RELEASE_ASSERT(search != decode_map_.end(), "no such symbol"); return search->second->toStringView(); } -void SymbolTableImpl::newSymbol() EXCLUSIVE_LOCKS_REQUIRED(lock_) { +void SymbolTableImpl::newSymbol() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { if (pool_.empty()) { next_symbol_ = ++monotonic_counter_; } else { diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 97e16172814b..e620637b2a15 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -237,7 +237,7 @@ class SymbolTableImpl : public SymbolTable { * @param sv the individual string to be encoded as a symbol. * @return Symbol the encoded string. */ - Symbol toSymbol(absl::string_view sv) EXCLUSIVE_LOCKS_REQUIRED(lock_); + Symbol toSymbol(absl::string_view sv) ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_); /** * Convenience function for decode(), decoding one symbol at a time. @@ -245,7 +245,7 @@ class SymbolTableImpl : public SymbolTable { * @param symbol the individual symbol to be decoded. * @return absl::string_view the decoded string. */ - absl::string_view fromSymbol(Symbol symbol) const EXCLUSIVE_LOCKS_REQUIRED(lock_); + absl::string_view fromSymbol(Symbol symbol) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_); /** * Stages a new symbol for use. To be called after a successful insertion. @@ -268,7 +268,7 @@ class SymbolTableImpl : public SymbolTable { // Stores the symbol to be used at next insertion. This should exist ahead of insertion time so // that if insertion succeeds, the value written is the correct one. - Symbol next_symbol_ GUARDED_BY(lock_); + Symbol next_symbol_ ABSL_GUARDED_BY(lock_); // If the free pool is exhausted, we monotonically increase this counter. Symbol monotonic_counter_; @@ -278,14 +278,14 @@ class SymbolTableImpl : public SymbolTable { // Using absl::string_view lets us only store the complete string once, in the decode map. using EncodeMap = absl::flat_hash_map; using DecodeMap = absl::flat_hash_map; - EncodeMap encode_map_ GUARDED_BY(lock_); - DecodeMap decode_map_ GUARDED_BY(lock_); + EncodeMap encode_map_ ABSL_GUARDED_BY(lock_); + DecodeMap decode_map_ ABSL_GUARDED_BY(lock_); // Free pool of symbols for re-use. // TODO(ambuc): There might be an optimization here relating to storing ranges of freed symbols // using an Envoy::IntervalSet. - std::stack pool_ GUARDED_BY(lock_); - RecentLookups recent_lookups_ GUARDED_BY(lock_); + std::stack pool_ ABSL_GUARDED_BY(lock_); + RecentLookups recent_lookups_ ABSL_GUARDED_BY(lock_); }; // Base class for holding the backing-storing for a StatName. The two derived @@ -818,7 +818,7 @@ class StatNameSet { const std::string name_; Stats::SymbolTable& symbol_table_; - Stats::StatNamePool pool_ GUARDED_BY(mutex_); + Stats::StatNamePool pool_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; using StringStatNameMap = absl::flat_hash_map; StringStatNameMap builtin_stat_names_; diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 135abeb257e4..83c7b51e2a70 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -115,7 +115,7 @@ class ParentHistogramImpl : public MetricImpl { uint32_t use_count() const override { return refcount_helper_.use_count(); } private: - bool usedLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); + bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); Histogram::Unit unit_; Store& parent_; @@ -125,7 +125,7 @@ class ParentHistogramImpl : public MetricImpl { HistogramStatisticsImpl interval_statistics_; HistogramStatisticsImpl cumulative_statistics_; mutable Thread::MutexBasicLockable merge_lock_; - std::list tls_histograms_ GUARDED_BY(merge_lock_); + std::list tls_histograms_ ABSL_GUARDED_BY(merge_lock_); bool merged_; RefcountHelper refcount_helper_; }; @@ -424,7 +424,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Event::Dispatcher* main_thread_dispatcher_{}; ThreadLocal::SlotPtr tls_; mutable Thread::MutexBasicLockable lock_; - absl::flat_hash_set scopes_ GUARDED_BY(lock_); + absl::flat_hash_set scopes_ ABSL_GUARDED_BY(lock_); ScopePtr default_scope_; std::list> timer_sinks_; TagProducerPtr tag_producer_; diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index d7912294570c..19d27cda3d0f 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -197,7 +197,7 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, }; absl::Mutex mutex_; - SlotArraySharedPtr slot_array_ GUARDED_BY(mutex_); + SlotArraySharedPtr slot_array_ ABSL_GUARDED_BY(mutex_); ClusterSlotsSharedPtr current_cluster_slot_; ShardVectorSharedPtr shard_vector_; Runtime::RandomGenerator& random_; diff --git a/source/extensions/common/redis/cluster_refresh_manager_impl.h b/source/extensions/common/redis/cluster_refresh_manager_impl.h index eaef07799cf8..a62db60327f1 100644 --- a/source/extensions/common/redis/cluster_refresh_manager_impl.h +++ b/source/extensions/common/redis/cluster_refresh_manager_impl.h @@ -93,7 +93,7 @@ class ClusterRefreshManagerImpl : public ClusterRefreshManager, Event::Dispatcher& main_thread_dispatcher_; Upstream::ClusterManager& cm_; TimeSource& time_source_; - std::map info_map_ GUARDED_BY(map_mutex_); + std::map info_map_ ABSL_GUARDED_BY(map_mutex_); Thread::MutexBasicLockable map_mutex_; }; diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index ba3851142874..453deda6fed1 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -33,7 +33,7 @@ class SimpleHttpCache : public HttpCache { void insert(const Key& key, Http::ResponseHeaderMapPtr&& response_headers, std::string&& body); absl::Mutex mutex_; - absl::flat_hash_map map_ GUARDED_BY(mutex_); + absl::flat_hash_map map_ ABSL_GUARDED_BY(mutex_); }; } // namespace Cache diff --git a/source/extensions/quic_listeners/quiche/platform/flags_impl.h b/source/extensions/quic_listeners/quiche/platform/flags_impl.h index 22aca0ef995f..5db939925510 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_impl.h @@ -91,7 +91,7 @@ template class TypedFlag : public Flag { private: mutable absl::Mutex mutex_; - T value_ GUARDED_BY(mutex_); + T value_ ABSL_GUARDED_BY(mutex_); T default_value_; }; diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h index 15297625deef..c3759e47d560 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mutex_impl.h @@ -24,27 +24,32 @@ namespace quic { #define QUIC_ASSERT_SHARED_LOCK_IMPL ABSL_ASSERT_SHARED_LOCK // A class wrapping a non-reentrant mutex. -class LOCKABLE QUIC_EXPORT_PRIVATE QuicLockImpl { +class QUIC_LOCKABLE_IMPL QUIC_EXPORT_PRIVATE QuicLockImpl { public: QuicLockImpl() = default; QuicLockImpl(const QuicLockImpl&) = delete; QuicLockImpl& operator=(const QuicLockImpl&) = delete; // Block until mu_ is free, then acquire it exclusively. - void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { mu_.WriterLock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void WriterLock() QUIC_EXCLUSIVE_LOCK_FUNCTION_IMPL() { mu_.WriterLock(); } // Release mu_. Caller must hold it exclusively. - void WriterUnlock() UNLOCK_FUNCTION() { mu_.WriterUnlock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void WriterUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.WriterUnlock(); } // Block until mu_ is free or shared, then acquire a share of it. - void ReaderLock() SHARED_LOCK_FUNCTION() { mu_.ReaderLock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void ReaderLock() QUIC_SHARED_LOCK_FUNCTION_IMPL() { mu_.ReaderLock(); } // Release mu_. Caller could hold it in shared mode. - void ReaderUnlock() UNLOCK_FUNCTION() { mu_.ReaderUnlock(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void ReaderUnlock() QUIC_UNLOCK_FUNCTION_IMPL() { mu_.ReaderUnlock(); } // Returns immediately if current thread holds mu_ in at least shared // mode. Otherwise, reports an error by crashing with a diagnostic. - void AssertReaderHeld() const ASSERT_SHARED_LOCK() { mu_.AssertReaderHeld(); } + // NOLINTNEXTLINE(readability-identifier-naming) + void AssertReaderHeld() const QUIC_ASSERT_SHARED_LOCK_IMPL() { mu_.AssertReaderHeld(); } private: absl::Mutex mu_; diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index bec2159fc987..9b91e892d104 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -58,7 +58,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { public: ProcessSharedMutex(pthread_mutex_t& mutex) : mutex_(mutex) {} - void lock() EXCLUSIVE_LOCK_FUNCTION() override { + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { // Deal with robust handling here. If the other process dies without unlocking, we are going // to die shortly but try to make sure that we can handle any signals, etc. that happen without // getting into a further messed up state. @@ -69,7 +69,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { } } - bool tryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) override { + bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { int rc = pthread_mutex_trylock(&mutex_); if (rc == EBUSY) { return false; @@ -83,7 +83,7 @@ class ProcessSharedMutex : public Thread::BasicLockable { return true; } - void unlock() UNLOCK_FUNCTION() override { + void unlock() ABSL_UNLOCK_FUNCTION() override { int rc = pthread_mutex_unlock(&mutex_); ASSERT(rc == 0); } diff --git a/test/extensions/tracers/opencensus/tracer_test.cc b/test/extensions/tracers/opencensus/tracer_test.cc index 7ee0a23184c2..6bc91183341a 100644 --- a/test/extensions/tracers/opencensus/tracer_test.cc +++ b/test/extensions/tracers/opencensus/tracer_test.cc @@ -76,7 +76,7 @@ class SpanCatcher : public SpanExporter::Handler { private: mutable absl::Mutex mu_; - std::vector spans_ GUARDED_BY(mu_); + std::vector spans_ ABSL_GUARDED_BY(mu_); }; // Use a Singleton SpanCatcher. diff --git a/test/integration/server.h b/test/integration/server.h index 155dbe036b86..d584dd61631b 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -233,7 +233,7 @@ class NotifyingAllocatorImpl : public Stats::AllocatorImpl { } virtual Stats::Counter* getCounterLockHeld(const std::string& name) - EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { auto it = counters_.find(name); if (it != counters_.end()) { return it->second; diff --git a/test/test_common/global.h b/test/test_common/global.h index 3628180bd6fc..872f81e520eb 100644 --- a/test/test_common/global.h +++ b/test/test_common/global.h @@ -88,7 +88,8 @@ class Globals { std::string describeActiveSingletonsHelper(); Thread::MutexBasicLockable map_mutex_; - absl::flat_hash_map> singleton_map_ GUARDED_BY(map_mutex_); + absl::flat_hash_map> + singleton_map_ ABSL_GUARDED_BY(map_mutex_); }; /** diff --git a/test/test_common/only_one_thread.h b/test/test_common/only_one_thread.h index 678e40b319a0..c726ae6d28fa 100644 --- a/test/test_common/only_one_thread.h +++ b/test/test_common/only_one_thread.h @@ -20,7 +20,7 @@ class OnlyOneThread { private: ThreadFactory& thread_factory_; - ThreadId thread_advancing_time_ GUARDED_BY(mutex_); + ThreadId thread_advancing_time_ ABSL_GUARDED_BY(mutex_); mutable MutexBasicLockable mutex_; }; diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 92f7f81a8197..dcc0374423fd 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -71,9 +71,9 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { return armed_ || base_timer_->enabled(); } - void disableTimerLockHeld() EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); + void disableTimerLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); - void setTimeLockHeld(MonotonicTime time) EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + void setTimeLockHeld(MonotonicTime time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { time_ = time; } @@ -82,7 +82,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { * typically via Dispatcher::run(). */ void activateLockHeld(const ScopeTrackedObject* scope = nullptr) - EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { ASSERT(armed_); armed_ = false; if (pending_) { @@ -101,7 +101,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { base_timer_->enableTimer(duration, scope); } - MonotonicTime time() const EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + MonotonicTime time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { ASSERT(armed_); return time_; } @@ -125,16 +125,16 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { TimerPtr base_timer_; SimulatedTimeSystemHelper& time_system_; - MonotonicTime time_ GUARDED_BY(time_system_.mutex_); + MonotonicTime time_ ABSL_GUARDED_BY(time_system_.mutex_); const uint64_t index_; - bool armed_ GUARDED_BY(time_system_.mutex_); - bool pending_ GUARDED_BY(time_system_.mutex_); + bool armed_ ABSL_GUARDED_BY(time_system_.mutex_); + bool pending_ ABSL_GUARDED_BY(time_system_.mutex_); }; // Compare two alarms, based on wakeup time and insertion order. Returns true if // a comes before b. bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const Alarm* b) const - EXCLUSIVE_LOCKS_REQUIRED(a->time_system_.mutex_, b->time_system_.mutex_) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(a->time_system_.mutex_, b->time_system_.mutex_) { if (a != b) { if (a->time() < b->time()) { return true; @@ -241,7 +241,8 @@ void SimulatedTimeSystemHelper::advanceTimeWait(const Duration& duration) { waitForNoPendingLockHeld(); } -void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(mutex_) { +void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { mutex_.Await(absl::Condition( +[](const uint32_t* pending_alarms) -> bool { return *pending_alarms == 0; }, &pending_alarms_)); @@ -249,7 +250,7 @@ void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const EXCLUSIVE_LOCKS Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { + const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { only_one_thread_.checkOneThread(); // TODO(#10568): This real-time polling delay should not be necessary. Without @@ -295,14 +296,15 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( return Thread::CondVar::WaitStatus::Timeout; } -MonotonicTime SimulatedTimeSystemHelper::alarmTimeLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { +MonotonicTime +SimulatedTimeSystemHelper::alarmTimeLockHeld(Alarm* alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { // We disable thread-safety analysis as the compiler can't detect that // alarm_->timeSystem() == this, so we must be holding the right mutex. ASSERT(&(alarm->timeSystem()) == this); return alarm->time(); } -void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm* alarm) NO_THREAD_SAFETY_ANALYSIS { +void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm* alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { // We disable thread-safety analysis as the compiler can't detect that // alarm_->timeSystem() == this, so we must be holding the right mutex. ASSERT(&(alarm->timeSystem()) == this); @@ -315,7 +317,7 @@ int64_t SimulatedTimeSystemHelper::nextIndex() { } void SimulatedTimeSystemHelper::addAlarmLockHeld( - Alarm* alarm, const std::chrono::microseconds& duration) NO_THREAD_SAFETY_ANALYSIS { + Alarm* alarm, const std::chrono::microseconds& duration) ABSL_NO_THREAD_SAFETY_ANALYSIS { ASSERT(&(alarm->timeSystem()) == this); alarm->setTimeLockHeld(monotonic_time_ + duration); alarms_.insert(alarm); diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 04e411a171d3..e578bd767071 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -30,7 +30,7 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { void advanceTimeAsync(const Duration& duration) override; Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // TimeSource SystemTime systemTime() override; @@ -78,10 +78,10 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { * @param monotonic_time The desired new current time. */ void setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) - EXCLUSIVE_LOCKS_REQUIRED(mutex_); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - MonotonicTime alarmTimeLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void alarmActivateLockHeld(Alarm* alarm) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + MonotonicTime alarmTimeLockHeld(Alarm* alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void alarmActivateLockHeld(Alarm* alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // The simulation keeps a unique ID for each alarm to act as a deterministic // tie-breaker for alarm-ordering. @@ -89,25 +89,25 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { // Adds/removes an alarm. void addAlarmLockHeld(Alarm*, const std::chrono::microseconds& duration) - EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeAlarmLockHeld(Alarm*) EXCLUSIVE_LOCKS_REQUIRED(mutex_); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeAlarmLockHeld(Alarm*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Keeps track of how many alarms have been activated but not yet called, // which helps waitFor() determine when to give up and declare a timeout. - void incPendingLockHeld() EXCLUSIVE_LOCKS_REQUIRED(mutex_) { ++pending_alarms_; } + void incPendingLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { ++pending_alarms_; } void decPending() { absl::MutexLock lock(&mutex_); --pending_alarms_; } - void waitForNoPendingLockHeld() const EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void waitForNoPendingLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_; - MonotonicTime monotonic_time_ GUARDED_BY(mutex_); - SystemTime system_time_ GUARDED_BY(mutex_); - AlarmSet alarms_ GUARDED_BY(mutex_); - uint64_t index_ GUARDED_BY(mutex_); + MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_); + SystemTime system_time_ ABSL_GUARDED_BY(mutex_); + AlarmSet alarms_ ABSL_GUARDED_BY(mutex_); + uint64_t index_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; - uint32_t pending_alarms_ GUARDED_BY(mutex_); + uint32_t pending_alarms_ ABSL_GUARDED_BY(mutex_); Thread::OnlyOneThread only_one_thread_; }; diff --git a/test/test_common/test_time.h b/test/test_common/test_time.h index bb7ceeb06ca4..4b0beec54439 100644 --- a/test/test_common/test_time.h +++ b/test/test_common/test_time.h @@ -16,7 +16,7 @@ class TestRealTimeSystem : public TestTimeSystem { void advanceTimeWait(const Duration& duration) override; Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // Event::TimeSystem Event::SchedulerPtr createScheduler(Scheduler& base_scheduler) override { diff --git a/test/test_common/test_time_system.h b/test/test_common/test_time_system.h index fd501f7b3393..449ec8065391 100644 --- a/test/test_common/test_time_system.h +++ b/test/test_common/test_time_system.h @@ -58,11 +58,12 @@ class TestTimeSystem : public Event::TimeSystem { */ virtual Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; + const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; template - Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const D& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) { + Thread::CondVar::WaitStatus + waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const D& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { return waitFor(mutex, condvar, std::chrono::duration_cast(duration)); } }; @@ -91,7 +92,7 @@ class SingletonTimeSystemHelper { TestTimeSystem& timeSystem(const MakeTimeSystemFn& make_time_system); private: - std::unique_ptr time_system_ GUARDED_BY(mutex_); + std::unique_ptr time_system_ ABSL_GUARDED_BY(mutex_); Thread::MutexBasicLockable mutex_; }; @@ -109,7 +110,7 @@ template class DelegatingTestTimeSystemBase : public T Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept EXCLUSIVE_LOCKS_REQUIRED(mutex) override { + const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { return timeSystem().waitFor(mutex, condvar, duration); } From 4ec8dc0c5d63b0eaf3376813c1c4bd85ea75502e Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 16 Jun 2020 13:48:16 -0400 Subject: [PATCH 361/909] stats: clear hot-restart parent contributions from child gauges when parent terminates. (#11301) Commit Message: A typical gauge tracks some in-progress count within an Envoy process, which is expected to rise and eventually fall to zero. During the hot-restart process, the child stats for Gauges with an ImportMode of Accumulate include the parent data. However, not all parent gauges get zeroed and clean out their contributions to the child gauges, so do so forcibly on parent termination. One exception to this rule is server.hot_restart_generation which should accumulate over the generations, by definition, so a mechanism was added to allow retention of accumulated gauges. It's possible others may need this as well. Arguably server.hot_restart_generation should be a counter and not a gauge, but due to the way counters transmit latched values over the generations, this will not work as is. Additional Description: Risk Level: medium -- will change behavior of some gauges. Testing: //test/... Docs Changes: n/a Release Notes: n/a Fixes #10806 Signed-off-by: Joshua Marantz --- include/envoy/stats/stats.h | 9 ++ source/common/stats/allocator_impl.cc | 17 ++-- source/common/stats/null_gauge.h | 1 + source/common/stats/stat_merger.cc | 28 +++++-- source/common/stats/stat_merger.h | 29 ++++++- source/exe/main.cc | 30 +------ source/exe/main_common.cc | 33 ++++++++ source/exe/main_common.h | 17 ++++ source/server/BUILD | 3 + source/server/hot_restarting_base.cc | 17 ++++ source/server/hot_restarting_base.h | 5 ++ source/server/hot_restarting_child.cc | 16 ++-- source/server/hot_restarting_child.h | 1 + source/server/hot_restarting_parent.cc | 13 +-- test/common/stats/stat_merger_test.cc | 3 +- test/integration/BUILD | 16 +++- test/integration/hotrestart_main.cc | 21 +++++ test/integration/hotrestart_test.sh | 111 +++++++++++++++++-------- test/integration/test_utility.sh | 36 ++++++++ test/mocks/stats/mocks.h | 1 + tools/spelling/spelling_dictionary.txt | 2 + 21 files changed, 310 insertions(+), 99 deletions(-) create mode 100644 test/integration/hotrestart_main.cc diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index eb7677bef742..c40e072c9d11 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -137,6 +137,15 @@ class Gauge : public Metric { virtual void sub(uint64_t amount) PURE; virtual uint64_t value() const PURE; + /** + * Sets a value from a hot-restart parent. This parent contribution must be + * kept distinct from the child value, so that when we erase the value it + * is not commingled with the child value, which may have been set() directly. + * + * @param parent_value the value from the hot-restart parent. + */ + virtual void setParentValue(uint64_t parent_value) PURE; + /** * @return the import mode, dictating behavior of the gauge across hot restarts. */ diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index c4995e90956a..5e507db18522 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -172,21 +172,21 @@ class GaugeImpl : public StatsSharedImpl { // Stats::Gauge void add(uint64_t amount) override { - value_ += amount; + child_value_ += amount; flags_ |= Flags::Used; } void dec() override { sub(1); } void inc() override { add(1); } void set(uint64_t value) override { - value_ = value; + child_value_ = value; flags_ |= Flags::Used; } void sub(uint64_t amount) override { - ASSERT(value_ >= amount); + ASSERT(child_value_ >= amount); ASSERT(used() || amount == 0); - value_ -= amount; + child_value_ -= amount; } - uint64_t value() const override { return value_; } + uint64_t value() const override { return child_value_ + parent_value_; } ImportMode importMode() const override { if (flags_ & Flags::NeverImport) { @@ -217,15 +217,18 @@ class GaugeImpl : public StatsSharedImpl { // A previous revision of Envoy may have transferred a gauge that it // thought was Accumulate. But the new version thinks it's NeverImport, so // we clear the accumulated value. - value_ = 0; + parent_value_ = 0; flags_ &= ~Flags::Used; flags_ |= Flags::NeverImport; break; } } + void setParentValue(uint64_t value) override { parent_value_ = value; } + private: - std::atomic value_{0}; + std::atomic parent_value_{0}; + std::atomic child_value_{0}; }; class TextReadoutImpl : public StatsSharedImpl { diff --git a/source/common/stats/null_gauge.h b/source/common/stats/null_gauge.h index c3e7ccc46871..bbd8b2e50735 100644 --- a/source/common/stats/null_gauge.h +++ b/source/common/stats/null_gauge.h @@ -27,6 +27,7 @@ class NullGaugeImpl : public MetricImpl { void inc() override {} void dec() override {} void set(uint64_t) override {} + void setParentValue(uint64_t) override {} void sub(uint64_t) override {} uint64_t value() const override { return 0; } ImportMode importMode() const override { return ImportMode::NeverImport; } diff --git a/source/common/stats/stat_merger.cc b/source/common/stats/stat_merger.cc index e8e2c8b7b55b..870866a2615e 100644 --- a/source/common/stats/stat_merger.cc +++ b/source/common/stats/stat_merger.cc @@ -7,6 +7,18 @@ namespace Stats { StatMerger::StatMerger(Store& target_store) : temp_scope_(target_store.createScope("")) {} +StatMerger::~StatMerger() { + // By the time a parent exits, all its contributions to accumulated gauges + // should be zero. But depending on the timing of the stat-merger + // communication shutdown and other shutdown activities on the parent, the + // gauges may not all be zero yet. So simply erase all the parent + // contributions. + for (StatName stat_name : parent_gauges_) { + Gauge& gauge = temp_scope_->gaugeFromStatName(stat_name, Gauge::ImportMode::Uninitialized); + gauge.setParentValue(0); + } +} + StatName StatMerger::DynamicContext::makeDynamicStatName(const std::string& name, const DynamicsMap& map) { auto iter = map.find(name); @@ -124,18 +136,16 @@ void StatMerger::mergeGauges(const Protobuf::Map& gauges, continue; } - uint64_t& parent_value_ref = parent_gauge_values_[gauge_ref.statName()]; - uint64_t old_parent_value = parent_value_ref; - uint64_t new_parent_value = gauge.second; - parent_value_ref = new_parent_value; - - // Note that new_parent_value may be less than old_parent_value, in which - // case 2s complement does its magic (-1 == 0xffffffffffffffff) and adding - // that to the gauge's current value works the same as subtraction. - gauge_ref.add(new_parent_value - old_parent_value); + const uint64_t new_parent_value = gauge.second; + parent_gauges_.insert(gauge_ref.statName()); + gauge_ref.setParentValue(new_parent_value); } } +void StatMerger::retainParentGaugeValue(Stats::StatName gauge_name) { + parent_gauges_.erase(gauge_name); +} + void StatMerger::mergeStats(const Protobuf::Map& counter_deltas, const Protobuf::Map& gauges, const DynamicsMap& dynamics) { diff --git a/source/common/stats/stat_merger.h b/source/common/stats/stat_merger.h index 2eb1ca2ff792..6dbf01d25aa3 100644 --- a/source/common/stats/stat_merger.h +++ b/source/common/stats/stat_merger.h @@ -41,20 +41,43 @@ class StatMerger { }; StatMerger(Stats::Store& target_store); + ~StatMerger(); - // Merge the values of stats_proto into stats_store. Counters are always straightforward - // addition, while gauges default to addition but have exceptions. + /** + * Merge the values of stats_proto into stats_store. Counters are always + * straightforward addition, while gauges default to addition but have + * exceptions. + * + * @param counter_deltas map of counter changes from parent + * @param gauges map of gauge changes from parent + * @param dynamics information about which segments of the names are dynamic. + */ void mergeStats(const Protobuf::Map& counter_deltas, const Protobuf::Map& gauges, const DynamicsMap& dynamics = DynamicsMap()); + /** + * Indicates that a gauge's value from the hot-restart parent should be + * retained, combining it with the child data. By default, data is transferred + * from parent gauges only during the hot-restart process, but the parent + * contribution is subtracted from the child when the parent terminates. This + * makes sense for gauges such as active connection counts, but is not + * appropriate for server.hot_restart_generation. + * + * This function must be called immediately prior to destruction of the + * StatMerger instance. + * + * @param gauge_name The gauge to be retained. + */ + void retainParentGaugeValue(Stats::StatName gauge_name); + private: void mergeCounters(const Protobuf::Map& counter_deltas, const DynamicsMap& dynamics_map); void mergeGauges(const Protobuf::Map& gauges, const DynamicsMap& dynamics_map); - StatNameHashMap parent_gauge_values_; + StatNameHashSet parent_gauges_; // A stats Scope for our in-the-merging-process counters to live in. Scopes conceptually hold // shared_ptrs to the stats that live in them, with the question of which stats are living in a // given scope determined by which stat names have been accessed via that scope. E.g., if you diff --git a/source/exe/main.cc b/source/exe/main.cc index 6afcd3fd3359..80cfc86f18b0 100644 --- a/source/exe/main.cc +++ b/source/exe/main.cc @@ -1,7 +1,5 @@ #include "exe/main_common.h" -#include "absl/debugging/symbolize.h" - // NOLINT(namespace-envoy) /** @@ -11,30 +9,4 @@ * deployment such as initializing signal handling. It calls main_common * after setting up command line options. */ -int main(int argc, char** argv) { -#ifndef __APPLE__ - // absl::Symbolize mostly works without this, but this improves corner case - // handling, such as running in a chroot jail. - absl::InitializeSymbolizer(argv[0]); -#endif - std::unique_ptr main_common; - - // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE - // as needed. Whatever code in the initialization path that fails is expected to log an error - // message so the user can diagnose. - try { - main_common = std::make_unique(argc, argv); - } catch (const Envoy::NoServingException& e) { - return EXIT_SUCCESS; - } catch (const Envoy::MalformedArgvException& e) { - std::cerr << e.what() << std::endl; - return EXIT_FAILURE; - } catch (const Envoy::EnvoyException& e) { - std::cerr << e.what() << std::endl; - return EXIT_FAILURE; - } - - // Run the server listener loop outside try/catch blocks, so that unexpected exceptions - // show up as a core-dumps for easier diagnostics. - return main_common->run() ? EXIT_SUCCESS : EXIT_FAILURE; -} +int main(int argc, char** argv) { return Envoy::MainCommon::main(argc, argv); } diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index d0a1708bd2f4..245c8bace630 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -21,6 +21,7 @@ #include "server/options_impl.h" #include "server/server.h" +#include "absl/debugging/symbolize.h" #include "absl/strings/str_split.h" #ifdef ENVOY_HOT_RESTART @@ -201,4 +202,36 @@ std::string MainCommon::hotRestartVersion(bool hot_restart_enabled) { return "disabled"; } +int MainCommon::main(int argc, char** argv, PostServerHook hook) { +#ifndef __APPLE__ + // absl::Symbolize mostly works without this, but this improves corner case + // handling, such as running in a chroot jail. + absl::InitializeSymbolizer(argv[0]); +#endif + std::unique_ptr main_common; + + // Initialize the server's main context under a try/catch loop and simply return EXIT_FAILURE + // as needed. Whatever code in the initialization path that fails is expected to log an error + // message so the user can diagnose. + try { + main_common = std::make_unique(argc, argv); + Envoy::Server::Instance* server = main_common->server(); + if (server != nullptr && hook != nullptr) { + hook(*server); + } + } catch (const Envoy::NoServingException& e) { + return EXIT_SUCCESS; + } catch (const Envoy::MalformedArgvException& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } catch (const Envoy::EnvoyException& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + // Run the server listener loop outside try/catch blocks, so that unexpected exceptions + // show up as a core-dumps for easier diagnostics. + return main_common->run() ? EXIT_SUCCESS : EXIT_FAILURE; +} + } // namespace Envoy diff --git a/source/exe/main_common.h b/source/exe/main_common.h index fc5194903449..8f55253bf55e 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -94,6 +94,9 @@ class MainCommonBase { // go through MainCommonBase directly. class MainCommon { public: + // Hook to run after a server is created. + using PostServerHook = std::function; + MainCommon(int argc, const char* const* argv); bool run() { return base_.run(); } // Only tests have a legitimate need for this today. @@ -121,6 +124,20 @@ class MainCommon { */ Server::Instance* server() { return base_.server(); } + /** + * Instantiates a MainCommon using default factory implements, parses args, + * and runs an event loop depending on the mode. + * + * Note that MainCommonBase can also be directly instantiated, providing the + * opportunity to override subsystem implementations for custom + * implementations. + * + * @param argc number of command-line args + * @param argv command-line argument array + * @param hook optional hook to run after a server is created + */ + static int main(int argc, char** argv, PostServerHook hook = nullptr); + private: #ifdef ENVOY_HANDLE_SIGNALS Envoy::SignalAction handle_sigs_; diff --git a/source/server/BUILD b/source/server/BUILD index e92701862188..673c96fc2e6b 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -140,10 +140,12 @@ envoy_cc_library( "//include/envoy/server:hot_restart_interface", "//include/envoy/server:instance_interface", "//include/envoy/server:options_interface", + "//include/envoy/stats:stats_interface", "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", "//source/common/network:utility_lib", + "//source/common/stats:utility_lib", ], ) @@ -167,6 +169,7 @@ envoy_cc_library( "//source/common/memory:stats_lib", "//source/common/stats:stat_merger_lib", "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", ], ) diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 5f3a33068df7..95d12d089226 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -2,6 +2,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" +#include "common/stats/utility.h" namespace Envoy { namespace Server { @@ -224,5 +225,21 @@ std::unique_ptr HotRestartingBase::receiveHotRestartMessage(B return ret; } +Stats::Gauge& HotRestartingBase::hotRestartGeneration(Stats::Scope& scope) { + // Track the hot-restart generation. Using gauge's accumulate semantics, + // the increments will be combined across hot-restart. This may be useful + // at some point, though the main motivation for this stat is to enable + // an integration test showing that dynamic stat-names can be coalesced + // across hot-restarts. There's no other reason this particular stat-name + // needs to be created dynamically. + // + // Note also, this stat cannot currently be represented as a counter due to + // the way stats get latched on sink update. See the comment in + // InstanceUtil::flushMetricsToSinks. + return Stats::Utility::gaugeFromElements(scope, + {Stats::DynamicName("server.hot_restart_generation")}, + Stats::Gauge::ImportMode::Accumulate); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/hot_restarting_base.h b/source/server/hot_restarting_base.h index 1ef303d983e8..0e2b5abc4817 100644 --- a/source/server/hot_restarting_base.h +++ b/source/server/hot_restarting_base.h @@ -11,6 +11,7 @@ #include "envoy/common/platform.h" #include "envoy/server/hot_restart.h" #include "envoy/server/options.h" +#include "envoy/stats/scope.h" #include "common/common/assert.h" @@ -57,6 +58,10 @@ class HotRestartingBase { bool replyIsExpectedType(const envoy::HotRestartMessage* proto, envoy::HotRestartMessage::Reply::ReplyCase oneof_type) const; + // Returns a Gauge that tracks hot-restart generation, where every successive + // child increments this number. + static Stats::Gauge& hotRestartGeneration(Stats::Scope& scope); + private: void getPassedFdIfPresent(envoy::HotRestartMessage* out, msghdr* message); std::unique_ptr parseProtoAndResetState(); diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc index f5eb8296c663..25cb46bcf0fd 100644 --- a/source/server/hot_restarting_child.cc +++ b/source/server/hot_restarting_child.cc @@ -80,12 +80,17 @@ void HotRestartingChild::sendParentTerminateRequest() { wrapped_request.mutable_request()->mutable_terminate(); sendHotRestartMessage(parent_address_, wrapped_request); parent_terminated_ = true; - // Once setting parent_terminated_ == true, we can send no more hot restart RPCs, and therefore - // receive no more responses, including stats. So, now safe to forget our stat transferral state. + + // Note that the 'generation' counter needs to retain the contribution from + // the parent. + stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_); + + // Now it is safe to forget our stat transferral state. // - // This destruction is actually important far beyond memory efficiency. The scope-based temporary - // counter logic relies on the StatMerger getting destroyed once hot restart's stat merging is - // all done. (See stat_merger.h for details). + // This destruction is actually important far beyond memory efficiency. The + // scope-based temporary counter logic relies on the StatMerger getting + // destroyed once hot restart's stat merging is all done. (See stat_merger.h + // for details). stat_merger_.reset(); } @@ -93,6 +98,7 @@ void HotRestartingChild::mergeParentStats(Stats::Store& stats_store, const HotRestartMessage::Reply::Stats& stats_proto) { if (!stat_merger_) { stat_merger_ = std::make_unique(stats_store); + hot_restart_generation_stat_name_ = hotRestartGeneration(stats_store).statName(); } // Convert the protobuf for serialized dynamic spans into the structure diff --git a/source/server/hot_restarting_child.h b/source/server/hot_restarting_child.h index 08c3cc27359f..0fe656d06d10 100644 --- a/source/server/hot_restarting_child.h +++ b/source/server/hot_restarting_child.h @@ -27,6 +27,7 @@ class HotRestartingChild : HotRestartingBase, Logger::Loggable bool parent_terminated_{}; sockaddr_un parent_address_; std::unique_ptr stat_merger_{}; + Stats::StatName hot_restart_generation_stat_name_; }; } // namespace Server diff --git a/source/server/hot_restarting_parent.cc b/source/server/hot_restarting_parent.cc index 6022b204cf89..5049c8077f91 100644 --- a/source/server/hot_restarting_parent.cc +++ b/source/server/hot_restarting_parent.cc @@ -6,6 +6,7 @@ #include "common/network/utility.h" #include "common/stats/stat_merger.h" #include "common/stats/symbol_table_impl.h" +#include "common/stats/utility.h" #include "server/listener_impl.h" @@ -85,16 +86,8 @@ void HotRestartingParent::onSocketEvent() { void HotRestartingParent::shutdown() { socket_event_.reset(); } HotRestartingParent::Internal::Internal(Server::Instance* server) : server_(server) { - // Track the hot-restart generation. Using gauge's accumulate semantics, - // the increments will be combined across hot-restart. This may be useful - // at some point, though the main motivation for this stat is to enable - // an integration test showing that dynamic stat-names can be coalesced - // across hot-restarts. There's no other reason this particular stat-name - // needs to be created dynamically. - Stats::StatNameDynamicPool pool(server_->stats().symbolTable()); - Stats::Gauge& gauge = server_->stats().gaugeFromStatName( - pool.add("server.hot_restart_generation"), Stats::Gauge::ImportMode::Accumulate); - gauge.inc(); + Stats::Gauge& hot_restart_generation = hotRestartGeneration(server->stats()); + hot_restart_generation.inc(); } HotRestartMessage HotRestartingParent::Internal::shutdownAdmin() { diff --git a/test/common/stats/stat_merger_test.cc b/test/common/stats/stat_merger_test.cc index 1f37f0774f41..ee8b5bf65ae8 100644 --- a/test/common/stats/stat_merger_test.cc +++ b/test/common/stats/stat_merger_test.cc @@ -400,9 +400,10 @@ TEST_F(StatMergerThreadLocalTest, RetainImportModeAfterMerge) { Protobuf::Map gauges; gauges["mygauge"] = 789; stat_merger.mergeStats(counter_deltas, gauges); + EXPECT_EQ(789 + 42, gauge.value()); } + EXPECT_EQ(42, gauge.value()); EXPECT_EQ(Gauge::ImportMode::Accumulate, gauge.importMode()); - EXPECT_EQ(789 + 42, gauge.value()); } // Verify that if we create a never import stat in the child process which then gets merged diff --git a/test/integration/BUILD b/test/integration/BUILD index f54fa39cad81..a4df5652bf62 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -3,6 +3,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", + "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", "envoy_proto_library", @@ -245,13 +246,26 @@ envoy_cc_test( exports_files(["test_utility.sh"]) +envoy_cc_test_binary( + name = "hotrestart_main", + srcs = ["hotrestart_main.cc"], + external_deps = [ + "abseil_symbolize", + ], + stamped = True, + deps = [ + "//source/exe:envoy_main_common_lib", + "//source/exe:platform_impl_lib", + ], +) + envoy_sh_test( name = "hotrestart_test", size = "enormous", srcs = envoy_select_hot_restart([ "hotrestart_test.sh", ]), - cc_binary = ["//source/exe:envoy-static"], + cc_binary = [":hotrestart_main"], data = [ "test_utility.sh", "//test/config/integration:server_config_files", diff --git a/test/integration/hotrestart_main.cc b/test/integration/hotrestart_main.cc new file mode 100644 index 000000000000..75f72cc36436 --- /dev/null +++ b/test/integration/hotrestart_main.cc @@ -0,0 +1,21 @@ +#include "common/stats/utility.h" + +#include "exe/main_common.h" + +// NOLINT(namespace-envoy) + +/** + * Custom main() for hotrestart_test. This should be identical to + * source/exe/main.cc, except for the registration and increment of a new gauge + * specifically for hot_restart.test.sh. + */ +int main(int argc, char** argv) { + return Envoy::MainCommon::main(argc, argv, [](Envoy::Server::Instance& server) { + // Creates a gauge that will be incremented once and then never touched. This is + // for testing parent-gauge accumulation in hot_restart_test.sh. + Envoy::Stats::Utility::gaugeFromElements(server.stats(), + {Envoy::Stats::DynamicName("hotrestart_test_gauge")}, + Envoy::Stats::Gauge::ImportMode::Accumulate) + .inc(); + }); +} diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 7048f4fb351d..8a3051f589d2 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -1,5 +1,10 @@ #!/bin/bash +# For this test we use a slightly modiified test binary, based on +# source/exe/enovy-static. If this starts failing to run or build, ensure that +# source/exe/main.cc and ./hotrestart_main.cc have not diverged except for +# adding the new gauge. +export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main source "$TEST_SRCDIR/envoy/test/integration/test_utility.sh" # TODO(htuch): In this test script, we are duplicating work done in test_environment.cc via sed. @@ -82,8 +87,9 @@ function run_testsuite() { local BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') echo "Selected dynamic base id path ${BASE_ID_PATH}" - # Now start the real server, hot restart it twice, and shut it all down as a basic hot restart - # sanity test. + # Now start the real server, hot restart it twice, and shut it all down as a + # basic hot restart sanity test. We expect SERVER_0 to exit quickly when + # SERVER_2 starts, and are not relying on timeouts. start_test Starting epoch 0 ADMIN_ADDRESS_PATH_0="${TEST_TMPDIR}"/admin.0."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ @@ -100,7 +106,7 @@ function run_testsuite() { echo "Selected dynamic base id ${BASE_ID}" - FIRST_SERVER_PID=$BACKGROUND_PID + SERVER_0_PID=$BACKGROUND_PID start_test Updating original config listener addresses sleep 3 @@ -112,8 +118,8 @@ function run_testsuite() { # Send SIGUSR1 signal to the first server, this should not kill it. Also send SIGHUP which should # get eaten. echo "Sending SIGUSR1/SIGHUP to first server" - kill -SIGUSR1 ${FIRST_SERVER_PID} - kill -SIGHUP ${FIRST_SERVER_PID} + kill -SIGUSR1 ${SERVER_0_PID} + kill -SIGHUP ${SERVER_0_PID} sleep 3 disableHeapCheck @@ -143,29 +149,40 @@ function run_testsuite() { check [ "${ADMIN_HOT_RESTART_VERSION}" = "${CLI_HOT_RESTART_VERSION}" ] start_test Checking server.hot_restart_generation 1 - GENERATION_0=$(curl -sg http://${ADMIN_ADDRESS_0}/stats | grep server.hot_restart_generation) - check [ "$GENERATION_0" = "server.hot_restart_generation: 1" ]; + GENERATION_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "server.hot_restart_generation") + check [ "$GENERATION_0" = "1" ]; # Verify we can see server.live in the admin port. - SERVER_LIVE_0=$(curl -sg http://${ADMIN_ADDRESS_0}/stats | grep server.live) - check [ "$SERVER_LIVE_0" = "server.live: 1" ]; + SERVER_LIVE_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "server.live") + check [ "$SERVER_LIVE_0" = "1" ]; + + # Capture the value of test_gauge from the initial parent: it should be 1. + TEST_GAUGE_0=$(scrape_stat "${ADMIN_ADDRESS_0}" "hotrestart_test_gauge") + check [ "$TEST_GAUGE_0" = "1" ]; enableHeapCheck - start_test Starting epoch 1 ADMIN_ADDRESS_PATH_1="${TEST_TMPDIR}"/admin.1."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 1 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_1}" - SECOND_SERVER_PID=$BACKGROUND_PID + SERVER_1_PID=$BACKGROUND_PID # Wait for stat flushing sleep 7 ADMIN_ADDRESS_1=$(cat "${ADMIN_ADDRESS_PATH_1}") - SERVER_LIVE_1=$(curl -sg http://${ADMIN_ADDRESS_1}/stats | grep server.live) - check [ "$SERVER_LIVE_1" = "server.live: 1" ]; + SERVER_LIVE_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "server.live") + check [ "$SERVER_LIVE_1" = "1" ]; + + # Check to see that the SERVER_1 accumulates the test_gauge value from + # SERVER_0, This will be erased once SERVER_0 terminates. + if [ "$TEST_GAUGE_0" != 0 ]; then + start_test Checking that the hotrestart_test_gauge incorporates SERVER_0 and SERVER_1. + TEST_GAUGE_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "hotrestart_test_gauge") + check [ $TEST_GAUGE_1 = "2" ] + fi start_test Checking that listener addresses have not changed HOT_RESTART_JSON_1="${TEST_TMPDIR}"/hot_restart.1."${TEST_INDEX}".yaml @@ -174,18 +191,58 @@ function run_testsuite() { CONFIG_DIFF=$(diff "${UPDATED_HOT_RESTART_JSON}" "${HOT_RESTART_JSON_1}") [[ -z "${CONFIG_DIFF}" ]] + # Send SIGUSR1 signal to the second server, this should not kill it, and + # we prove that by checking its stats after having sent it a signal. + start_test Sending SIGUSR1 to SERVER_1. + kill -SIGUSR1 ${SERVER_1_PID} + sleep 3 + start_test Checking server.hot_restart_generation 2 - GENERATION_1=$(curl -sg http://${ADMIN_ADDRESS_1}/stats | grep server.hot_restart_generation) - check [ "$GENERATION_1" = "server.hot_restart_generation: 2" ]; + GENERATION_1=$(scrape_stat "${ADMIN_ADDRESS_1}" "server.hot_restart_generation") + check [ "$GENERATION_1" = "2" ]; ADMIN_ADDRESS_PATH_2="${TEST_TMPDIR}"/admin.2."${TEST_INDEX}".address start_test Starting epoch 2 run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 2 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" + --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" \ + --parent-shutdown-time-s 3 - THIRD_SERVER_PID=$BACKGROUND_PID - sleep 3 + SERVER_2_PID=$BACKGROUND_PID + + # Now wait for the SERVER_0 to exit. It should occur immediately when SERVER_2 starts, as + # SERVER_1 will terminate SERVER_0 when it becomes the parent. + start_test Waiting for epoch 0 to finish. + echo time wait ${SERVER_0_PID} + time wait ${SERVER_0_PID} + [[ $? == 0 ]] + + # Then wait for the SERVER_1 to exit, which should happen within a few seconds + # due to '--parent-shutdown-time-s 3' on SERVER_2. + start_test Waiting for epoch 1 to finish. + echo time wait ${SERVER_1_PID} + time wait ${SERVER_1_PID} + [[ $? == 0 ]] + + # This tests that we are retaining the generation count. For most Gauges, + # we erase the parent contribution when the parent exits, but + # server.hot_restart_generation is excluded. Commenting out the call to + # stat_merger_->retainParentGaugeValue(hot_restart_generation_stat_name_) + # in source/server/hot_restarting_child.cc results in this test failing, + # with the generation being decremented back to 1. + start_test Checking server.hot_restart_generation 2 + ADMIN_ADDRESS_2=$(cat "${ADMIN_ADDRESS_PATH_2}") + GENERATION_2=$(scrape_stat "${ADMIN_ADDRESS_2}" "server.hot_restart_generation") + check [ "$GENERATION_2" = "3" ]; + + # Check to see that the SERVER_2's test_gauge value reverts bac to 1, since + # its parents have now exited and we have erased their gauge contributions. + start_test Check that the hotrestart_test_gauge reported in SERVER_2 excludes parent contribution + wait_status=$(wait_for_stat "$ADMIN_ADDRESS_2" "hotrestart_test_gauge" -eq 1 5) + echo $wait_status + if [[ "$wait_status" != success* ]]; then + handle_failure timeout + fi start_test Checking that listener addresses have not changed HOT_RESTART_JSON_2="${TEST_TMPDIR}"/hot_restart.2."${TEST_INDEX}".yaml @@ -194,24 +251,10 @@ function run_testsuite() { CONFIG_DIFF=$(diff "${UPDATED_HOT_RESTART_JSON}" "${HOT_RESTART_JSON_2}") [[ -z "${CONFIG_DIFF}" ]] - # First server should already be gone. - start_test Waiting for epoch 0 - wait ${FIRST_SERVER_PID} - [[ $? == 0 ]] - - #Send SIGUSR1 signal to the second server, this should not kill it - start_test Sending SIGUSR1 to the second server - kill -SIGUSR1 ${SECOND_SERVER_PID} - sleep 3 - # Now term the last server, and the other one should exit also. start_test Killing and waiting for epoch 2 - kill ${THIRD_SERVER_PID} - wait ${THIRD_SERVER_PID} - [[ $? == 0 ]] - - start_test Waiting for epoch 1 - wait ${SECOND_SERVER_PID} + kill ${SERVER_2_PID} + wait ${SERVER_2_PID} [[ $? == 0 ]] } diff --git a/test/integration/test_utility.sh b/test/integration/test_utility.sh index ee5ab5316c75..5b872f59e0c6 100644 --- a/test/integration/test_utility.sh +++ b/test/integration/test_utility.sh @@ -75,4 +75,40 @@ enableHeapCheck () { HEAPCHECK=${SAVED_HEAPCHECK} } +# Scrapes a stat value from an an admin port. +scrape_stat() { + local ADMIN_ADDRESS="$1" + local STAT_NAME="$2" + curl -sg "$ADMIN_ADDRESS"/stats | grep "^${STAT_NAME}: " | cut -f2 -d" " +} + +milliseconds() { + local nanos=$(date +%N | sed 's/^0*//') + local seconds=$(date +%s) + echo $((1000*seconds + nanos/1000000)) +} + +wait_for_stat() { + local ADMIN_ADDRESS="$1" + local STAT_NAME="$2" + local OP="$3" + local VALUE="$4" + local TIMEOUT_SEC="$5" + local start_time_ms=$(milliseconds) + local end_time=$((SECONDS + TIMEOUT_SEC)) + local ret="" + while [ "$ret" = "" ]; do + local stat=$(scrape_stat "$ADMIN_ADDRESS" "$STAT_NAME") + if [ $stat $OP $VALUE ]; then + local end_time_ms=$(milliseconds) + ret="success: $STAT_NAME reached $stat after $((end_time_ms - start_time_ms)) ms" + elif [ "$SECONDS" -gt "$end_time" ]; then + ret="timeout: waiting $TIMEOUT_SEC seconds for $STAT_NAME=$stat to reach $VALUE" + else + sleep 0.1 + fi + done + echo "$ret" +} + [[ -z "${ENVOY_BIN}" ]] && ENVOY_BIN="${TEST_SRCDIR}"/envoy/source/exe/envoy-static diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index f5337f27aeaa..272603041d98 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -187,6 +187,7 @@ class MockGauge : public MockStatWithRefcount { MOCK_METHOD(void, dec, ()); MOCK_METHOD(void, inc, ()); MOCK_METHOD(void, set, (uint64_t value)); + MOCK_METHOD(void, setParentValue, (uint64_t parent_value)); MOCK_METHOD(void, sub, (uint64_t amount)); MOCK_METHOD(void, mergeImportMode, (ImportMode)); MOCK_METHOD(bool, used, (), (const)); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index de6d46a15875..cf535b89380c 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -295,6 +295,7 @@ STL STRLEN STS SVG +Symbolizer TBD TCLAP TCP @@ -643,6 +644,7 @@ hoc hostname hostnames hostset +hotrestart hrefs huffman hystrix From 3969cde882687fad8a7bf355d500bc82b7ea84d9 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Tue, 16 Jun 2020 15:02:12 -0400 Subject: [PATCH 362/909] http: make it possible to match upstream alpn with selected protocol (#10812) Updates the HTTP connection pools to specify the upstream ALPN when one is not specified through the TLS context. This simplifies configuration as Envoy can reuse the protocol selection logic to determine what ALPN to negotiate on upstream connections. Signed-off-by: Snow Pettersen --- docs/root/version_history/current.rst | 1 + include/envoy/network/transport_socket.h | 16 + source/common/http/conn_manager_utility.cc | 4 +- source/common/http/conn_pool_base.cc | 42 ++- source/common/http/conn_pool_base.h | 3 +- source/common/http/http1/conn_pool.cc | 2 +- source/common/http/http2/codec_impl.h | 2 - source/common/http/http2/conn_pool.cc | 2 +- source/common/http/utility.h | 14 + .../network/transport_socket_options_impl.cc | 32 +- .../network/transport_socket_options_impl.h | 35 +- source/common/runtime/runtime_features.cc | 1 + .../filters/listener/http_inspector/BUILD | 1 + .../listener/http_inspector/http_inspector.cc | 7 +- .../transport_sockets/tls/context_impl.cc | 37 +- .../transport_sockets/tls/context_impl.h | 1 + .../grpc_client_integration_test_harness.h | 2 +- test/common/http/conn_manager_utility_test.cc | 6 +- test/common/http/http1/conn_pool_test.cc | 343 ++++++++++-------- test/common/http/http2/BUILD | 1 + test/common/http/http2/conn_pool_test.cc | 121 ++++-- .../transport_socket_options_impl_test.cc | 7 +- test/config/utility.cc | 4 +- .../http_inspector/http_inspector_test.cc | 22 +- .../tls_inspector/tls_inspector_benchmark.cc | 3 +- .../tls_inspector/tls_inspector_test.cc | 8 +- .../transport_sockets/tls/ssl_socket_test.cc | 27 +- test/integration/BUILD | 17 + .../alpn_selection_integration_test.cc | 213 +++++++++++ test/integration/integration.cc | 2 +- .../sds_dynamic_integration_test.cc | 5 +- .../sds_static_integration_test.cc | 2 +- test/integration/ssl_utility.cc | 5 +- test/server/listener_manager_impl_test.cc | 63 +++- 34 files changed, 787 insertions(+), 264 deletions(-) create mode 100644 test/integration/alpn_selection_integration_test.cc diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a33646b66334..8cb2ef04c508 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -21,6 +21,7 @@ Minor Behavior Changes * http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. * http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. * http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. +* http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index e303248fd471..b8f1063ad4bc 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -180,10 +180,26 @@ class TransportSocketOptions { virtual const std::vector& verifySubjectAltNameListOverride() const PURE; /** + * The application protocols to use when negotiating an upstream connection. When an application + * protocol override is provided, it will *always* be used. * @return the optional overridden application protocols. */ virtual const std::vector& applicationProtocolListOverride() const PURE; + /** + * The application protocol to use when negotiating an upstream connection and no other + * application protocol has been configured. Both + * TransportSocketOptions::applicationProtocolListOverride and application protocols configured + * in the CommonTlsContext on the Cluster will take precedence. + * + * Note that this option is intended for intermediate code (e.g. the HTTP connection pools) to + * specify a default ALPN when no specific values are specified elsewhere. As such, providing a + * value here might not make sense prior to load balancing. + * @return the optional fallback for application protocols, for when they are not specified in the + * TLS configuration. + */ + virtual const absl::optional& applicationProtocolFallback() const PURE; + /** * @param vector of bytes to which the option should append hash key data that will be used * to separate connections based on the option. Any data already in the key vector must diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 355949e4900d..65b5af861cc1 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -34,7 +34,7 @@ std::string ConnectionManagerUtility::determineNextProtocol(Network::Connection& // us the first few bytes of the HTTP/2 prefix since in all public cases we use SSL/ALPN. For // internal cases this should practically never happen. if (data.startsWith(Http2::CLIENT_MAGIC_PREFIX)) { - return Http2::ALPN_STRING; + return Utility::AlpnNames::get().Http2; } return ""; @@ -49,7 +49,7 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) { - if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { + if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) { Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); return std::make_unique( connection, callbacks, stats, http2_options, max_request_headers_kb, diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 5b64e408969e..fb0668458153 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -1,16 +1,54 @@ #include "common/http/conn_pool_base.h" +#include "common/common/assert.h" +#include "common/http/utility.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/runtime/runtime_features.h" #include "common/stats/timespan_impl.h" #include "common/upstream/upstream_impl.h" namespace Envoy { namespace Http { +Network::TransportSocketOptionsSharedPtr +wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options, + Protocol protocol) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http_default_alpn")) { + return transport_socket_options; + } + + // If configured to do so, we override the ALPN to use for the upstream connection to match the + // selected protocol. + std::string alpn; + switch (protocol) { + case Http::Protocol::Http10: + NOT_REACHED_GCOVR_EXCL_LINE; + case Http::Protocol::Http11: + alpn = Http::Utility::AlpnNames::get().Http11; + break; + case Http::Protocol::Http2: + alpn = Http::Utility::AlpnNames::get().Http2; + break; + case Http::Protocol::Http3: + // TODO(snowp): Add once HTTP/3 upstream support is added. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + break; + } + + if (transport_socket_options) { + return std::make_shared( + std::move(alpn), transport_socket_options); + } else { + return std::make_shared( + "", std::vector{}, std::vector{}, std::move(alpn)); + } +} + ConnPoolImplBase::ConnPoolImplBase( Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, Protocol protocol) : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), - transport_socket_options_(transport_socket_options) {} + transport_socket_options_(wrapTransportSocketOptions(transport_socket_options, protocol)) {} ConnPoolImplBase::~ConnPoolImplBase() { ASSERT(ready_clients_.empty()); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 53ba14f26151..8611418d3bb5 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -29,7 +29,8 @@ class ConnPoolImplBase : public ConnectionPool::Instance, ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Protocol protocol); ~ConnPoolImplBase() override; // Closes and destroys all connections. This must be called in the destructor of diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index df4aec11f3e8..6db02bb8cdf5 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -27,7 +27,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options), + transport_socket_options, Protocol::Http11), upstream_ready_timer_(dispatcher_.createTimer([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 371252374f23..f2914433ffd0 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -31,8 +31,6 @@ namespace Envoy { namespace Http { namespace Http2 { -const std::string ALPN_STRING = "h2"; - // This is not the full client magic, but it's the smallest size that should be able to // differentiate between HTTP/1 and HTTP/2. const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 6107357e5158..7eaa83757342 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -17,7 +17,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options) {} + transport_socket_options, Protocol::Http2) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 476eda1c6c7d..d6f055a9d551 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -30,6 +30,20 @@ namespace Utility { // TODO(#10878): Remove this. Http::Status exceptionToStatus(std::function dispatch, Buffer::Instance& data); + +/** + * Well-known HTTP ALPN values. + */ +class AlpnNameValues { +public: + const std::string Http10 = "http/1.0"; + const std::string Http11 = "http/1.1"; + const std::string Http2 = "h2"; + const std::string Http2c = "h2c"; +}; + +using AlpnNames = ConstSingleton; + } // namespace Utility } // namespace Http diff --git a/source/common/network/transport_socket_options_impl.cc b/source/common/network/transport_socket_options_impl.cc index 4e88d9812ea2..6d3ccf5ecea3 100644 --- a/source/common/network/transport_socket_options_impl.cc +++ b/source/common/network/transport_socket_options_impl.cc @@ -1,5 +1,6 @@ #include "common/network/transport_socket_options_impl.h" +#include #include #include #include @@ -13,22 +14,39 @@ namespace Envoy { namespace Network { -void TransportSocketOptionsImpl::hashKey(std::vector& key) const { - if (override_server_name_.has_value()) { - pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(override_server_name_.value()), key); +namespace { +void commonHashKey(const TransportSocketOptions& options, std::vector& key) { + const auto& server_name_overide = options.serverNameOverride(); + if (server_name_overide.has_value()) { + pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(server_name_overide.value()), key); } - if (!override_verify_san_list_.empty()) { - for (const auto& san : override_verify_san_list_) { + const auto& verify_san_list = options.verifySubjectAltNameListOverride(); + if (!verify_san_list.empty()) { + for (const auto& san : verify_san_list) { pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(san), key); } } - if (!override_alpn_list_.empty()) { - for (const auto& protocol : override_alpn_list_) { + const auto& alpn_list = options.applicationProtocolListOverride(); + if (!alpn_list.empty()) { + for (const auto& protocol : alpn_list) { pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(protocol), key); } } + const auto& alpn_fallback = options.applicationProtocolFallback(); + if (alpn_fallback.has_value()) { + pushScalarToByteVector(StringUtil::CaseInsensitiveHash()(*alpn_fallback), key); + } +} +} // namespace + +void AlpnDecoratingTransportSocketOptions::hashKey(std::vector& key) const { + commonHashKey(*this, key); +} + +void TransportSocketOptionsImpl::hashKey(std::vector& key) const { + commonHashKey(*this, key); } TransportSocketOptionsSharedPtr diff --git a/source/common/network/transport_socket_options_impl.h b/source/common/network/transport_socket_options_impl.h index 341ab567e8ed..a181676db176 100644 --- a/source/common/network/transport_socket_options_impl.h +++ b/source/common/network/transport_socket_options_impl.h @@ -6,16 +6,43 @@ namespace Envoy { namespace Network { +// A wrapper around another TransportSocketOptions that overrides the ALPN fallback. +class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { +public: + AlpnDecoratingTransportSocketOptions(std::string&& alpn, + TransportSocketOptionsSharedPtr inner_options) + : alpn_fallback_(std::move(alpn)), inner_options_(std::move(inner_options)) {} + // Network::TransportSocketOptions + const absl::optional& serverNameOverride() const override { + return inner_options_->serverNameOverride(); + } + const std::vector& verifySubjectAltNameListOverride() const override { + return inner_options_->verifySubjectAltNameListOverride(); + } + const std::vector& applicationProtocolListOverride() const override { + return inner_options_->applicationProtocolListOverride(); + } + const absl::optional& applicationProtocolFallback() const override { + return alpn_fallback_; + } + void hashKey(std::vector& key) const override; + +private: + const absl::optional alpn_fallback_; + const TransportSocketOptionsSharedPtr inner_options_; +}; + class TransportSocketOptionsImpl : public TransportSocketOptions { public: TransportSocketOptionsImpl(absl::string_view override_server_name = "", std::vector&& override_verify_san_list = {}, - std::vector&& override_alpn = {}) + std::vector&& override_alpn = {}, + absl::optional&& fallback_alpn = {}) : override_server_name_(override_server_name.empty() ? absl::nullopt : absl::optional(override_server_name)), override_verify_san_list_{std::move(override_verify_san_list)}, - override_alpn_list_{std::move(override_alpn)} {} + override_alpn_list_{std::move(override_alpn)}, alpn_fallback_{std::move(fallback_alpn)} {} // Network::TransportSocketOptions const absl::optional& serverNameOverride() const override { @@ -27,12 +54,16 @@ class TransportSocketOptionsImpl : public TransportSocketOptions { const std::vector& applicationProtocolListOverride() const override { return override_alpn_list_; } + const absl::optional& applicationProtocolFallback() const override { + return alpn_fallback_; + } void hashKey(std::vector& key) const override; private: const absl::optional override_server_name_; const std::vector override_verify_san_list_; const std::vector override_alpn_list_; + const absl::optional alpn_fallback_; }; class TransportSocketOptionsUtility { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 82495f8bb6c0..722e3fa31992 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -66,6 +66,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.fixed_connection_close", + "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index 70d097484943..87e808230bd1 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -24,6 +24,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:minimal_logger_lib", "//source/common/http:headers_lib", + "//source/common/http:utility_lib", "//source/extensions/transport_sockets:well_known_names", ], ) diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index bb039f8cfe5d..b94e7b3322ff 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/macros.h" #include "common/http/headers.h" +#include "common/http/utility.h" #include "extensions/transport_sockets/well_known_names.h" @@ -181,16 +182,16 @@ void Filter::done(bool success) { absl::string_view protocol; if (protocol_ == Http::Headers::get().ProtocolStrings.Http10String) { config_->stats().http10_found_.inc(); - protocol = "http/1.0"; + protocol = Http::Utility::AlpnNames::get().Http10; } else if (protocol_ == Http::Headers::get().ProtocolStrings.Http11String) { config_->stats().http11_found_.inc(); - protocol = "http/1.1"; + protocol = Http::Utility::AlpnNames::get().Http11; } else { ASSERT(protocol_ == "HTTP/2"); config_->stats().http2_found_.inc(); // h2 HTTP/2 over TLS, h2c HTTP/2 over TCP // TODO(yxue): use detected protocol from http inspector and support h2c token in HCM - protocol = "h2c"; + protocol = Http::Utility::AlpnNames::get().Http2c; } cb_->socket().setRequestedApplicationProtocols({protocol}); diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 041c28b3da1f..ff8021f72558 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -865,6 +865,18 @@ ClientContextImpl::ClientContextImpl(Stats::Scope& scope, } } +bool ContextImpl::parseAndSetAlpn(const std::vector& alpn, SSL& ssl) { + std::vector parsed_alpn = parseAlpnProtocols(absl::StrJoin(alpn, ",")); + if (!parsed_alpn.empty()) { + const int rc = SSL_set_alpn_protos(&ssl, parsed_alpn.data(), parsed_alpn.size()); + // This should only if memory allocation fails, e.g. OOM. + RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); + return true; + } + + return false; +} + bssl::UniquePtr ClientContextImpl::newSsl(const Network::TransportSocketOptions* options) { bssl::UniquePtr ssl_con(ContextImpl::newSsl(options)); @@ -882,14 +894,23 @@ bssl::UniquePtr ClientContextImpl::newSsl(const Network::TransportSocketOpt SSL_set_verify(ssl_con.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); } - if (options && !options->applicationProtocolListOverride().empty()) { - std::vector parsed_override_alpn = - parseAlpnProtocols(absl::StrJoin(options->applicationProtocolListOverride(), ",")); - if (!parsed_override_alpn.empty()) { - const int rc = SSL_set_alpn_protos(ssl_con.get(), parsed_override_alpn.data(), - parsed_override_alpn.size()); - RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); - } + // We determine what ALPN using the following precedence: + // 1. Option-provided ALPN override. + // 2. ALPN statically configured in the upstream TLS context. + // 3. Option-provided ALPN fallback. + + // At this point in the code the ALPN has already been set (if present) to the value specified in + // the TLS context. We've stored this value in parsed_alpn_protocols_ so we can check that to see + // if it's already been set. + bool has_alpn_defined = !parsed_alpn_protocols_.empty(); + if (options) { + // ALPN override takes precedence over TLS context specified, so blindly overwrite it. + has_alpn_defined |= parseAndSetAlpn(options->applicationProtocolListOverride(), *ssl_con); + } + + if (options && !has_alpn_defined && options->applicationProtocolFallback().has_value()) { + // If ALPN hasn't already been set (either through TLS context or override), use the fallback. + parseAndSetAlpn({*options->applicationProtocolFallback()}, *ssl_con); } if (allow_renegotiation_) { diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index b72168337d72..407dd45f86f8 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -143,6 +143,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { static bool verifyCertificateSpkiList(X509* cert, const std::vector>& expected_hashes); + bool parseAndSetAlpn(const std::vector& alpn, SSL& ssl); std::vector parseAlpnProtocols(const std::string& alpn_protocols); static SslStats generateStats(Stats::Scope& scope); diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 15ccdf5c5d7b..3e472fa67b37 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -521,7 +521,7 @@ class GrpcSslClientIntegrationTest : public GrpcClientIntegrationTest { Network::TransportSocketFactoryPtr createUpstreamSslContext() { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("h2"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); auto* tls_cert = common_tls_context->add_tls_certificates(); tls_cert->mutable_certificate_chain()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem")); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index eca659e2989b..3cb307cd6b71 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -237,14 +237,16 @@ TEST_F(ConnectionManagerUtilityTest, DetermineNextProtocol) { Network::MockConnection connection; EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("")); Buffer::OwnedImpl data("PRI * HTTP/2.0\r\n"); - EXPECT_EQ("h2", ConnectionManagerUtility::determineNextProtocol(connection, data)); + EXPECT_EQ(Utility::AlpnNames::get().Http2, + ConnectionManagerUtility::determineNextProtocol(connection, data)); } { Network::MockConnection connection; EXPECT_CALL(connection, nextProtocol()).WillRepeatedly(Return("")); Buffer::OwnedImpl data("PRI * HTTP/2"); - EXPECT_EQ("h2", ConnectionManagerUtility::determineNextProtocol(connection, data)); + EXPECT_EQ(Utility::AlpnNames::get().Http2, + ConnectionManagerUtility::determineNextProtocol(connection, data)); } { diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 3b35ee583f9b..d2f084313195 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -2,11 +2,14 @@ #include #include "envoy/http/codec.h" +#include "envoy/network/transport_socket.h" #include "common/buffer/buffer_impl.h" #include "common/event/dispatcher_impl.h" #include "common/http/codec_client.h" #include "common/http/http1/conn_pool.h" +#include "common/http/utility.h" +#include "common/network/raw_buffer_socket.h" #include "common/network/utility.h" #include "common/upstream/upstream_impl.h" @@ -18,6 +21,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "test/mocks/upstream/transport_socket_match.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_runtime.h" @@ -130,7 +134,8 @@ class Http1ConnPoolImplTest : public testing::Test { public: Http1ConnPoolImplTest() : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_(dispatcher_, cluster_, upstream_ready_timer_) {} + conn_pool_( + std::make_unique(dispatcher_, cluster_, upstream_ready_timer_)) {} ~Http1ConnPoolImplTest() override { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); @@ -139,7 +144,7 @@ class Http1ConnPoolImplTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; NiceMock* upstream_ready_timer_; - ConnPoolImplForTest conn_pool_; + std::unique_ptr conn_pool_; NiceMock runtime_; }; @@ -155,14 +160,14 @@ struct ActiveTestRequest { parent_.cluster_->resourceManager(Upstream::ResourcePriority::Default).requests().count(); uint64_t current_rq_total = parent_.cluster_->stats_.upstream_rq_total_.value(); if (type == Type::CreateConnection) { - parent.conn_pool_.expectClientCreate(); + parent.conn_pool_->expectClientCreate(); } if (type == Type::Immediate) { expectNewStream(); } - handle_ = parent.conn_pool_.newStream(outer_decoder_, callbacks_); + handle_ = parent.conn_pool_->newStream(outer_decoder_, callbacks_); if (type == Type::Immediate) { EXPECT_EQ(nullptr, handle_); @@ -172,8 +177,8 @@ struct ActiveTestRequest { if (type == Type::CreateConnection) { expectNewStream(); - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].connect_timer_, disableTimer()); - parent.conn_pool_.test_clients_[client_index_].connection_->raiseEvent( + EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].connect_timer_, disableTimer()); + parent.conn_pool_->test_clients_[client_index_].connection_->raiseEvent( Network::ConnectionEvent::Connected); } if (type != Type::Pending) { @@ -198,7 +203,7 @@ struct ActiveTestRequest { } void expectNewStream() { - EXPECT_CALL(*parent_.conn_pool_.test_clients_[client_index_].codec_, newStream(_)) + EXPECT_CALL(*parent_.conn_pool_->test_clients_[client_index_].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(request_encoder_))); EXPECT_CALL(callbacks_.pool_ready_, ready()); } @@ -220,7 +225,7 @@ struct ActiveTestRequest { /** * Verify that the pool's host is a member of the cluster the pool was constructed with. */ -TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_.host()->cluster()); } +TEST_F(Http1ConnPoolImplTest, Host) { EXPECT_EQ(cluster_.get(), &conn_pool_->host()->cluster()); } /** * Verify that connections are drained when requested. @@ -238,13 +243,13 @@ TEST_F(Http1ConnPoolImplTest, DrainConnections) { r1.completeResponse(false); // This will destroy the ready client and set requests remaining to 1 on the busy client. - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); // This will destroy the busy client when the response finishes. r2.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -261,8 +266,41 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { r1.startRequest(); r1.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); +} + +/** + * Verify that we set the ALPN fallback. + */ +TEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) { + // Override the TransportSocketFactory with a mock version we can add expectations to. + auto factory = std::make_unique(); + EXPECT_CALL(*factory, createTransportSocket(_)) + .WillOnce(Invoke( + [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + EXPECT_TRUE(options != nullptr); + EXPECT_EQ(options->applicationProtocolFallback(), + Http::Utility::AlpnNames::get().Http11); + return std::make_unique(); + })); + cluster_->transport_socket_matcher_ = + std::make_unique>(std::move(factory)); + + EXPECT_CALL(dispatcher_, createTimer_(_)); + // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at + // our test transport socket factory. + conn_pool_ = std::make_unique(dispatcher_, cluster_, upstream_ready_timer_); + NiceMock outer_decoder; + ConnPoolCallbacks callbacks; + conn_pool_->expectClientCreate(Protocol::Http11); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); + EXPECT_NE(nullptr, handle); + + EXPECT_CALL(*conn_pool_, onClientDestroy()); + EXPECT_CALL(callbacks.pool_failure_, ready()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -272,15 +310,15 @@ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { TEST_F(Http1ConnPoolImplTest, VerifyBufferLimits) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); - EXPECT_CALL(*conn_pool_.test_clients_.back().connection_, setBufferLimits(8192)); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + EXPECT_CALL(*conn_pool_->test_clients_.back().connection_, setBufferLimits(8192)); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); EXPECT_CALL(callbacks.pool_failure_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -302,17 +340,17 @@ TEST_F(Http1ConnPoolImplTest, VerifyCancelInCallback) { NiceMock outer_decoder; // Create the first client. - conn_pool_.expectClientCreate(); - handle1 = conn_pool_.newStream(outer_decoder, callbacks1); + conn_pool_->expectClientCreate(); + handle1 = conn_pool_->newStream(outer_decoder, callbacks1); ASSERT_NE(nullptr, handle1); // Create the second client. - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder, callbacks2); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder, callbacks2); ASSERT_NE(nullptr, handle2); // Simulate connection failure. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -334,8 +372,8 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { r2.completeResponse(true); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -349,14 +387,14 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks2.pool_failure_, ready()); - Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); + Http::ConnectionPool::Cancellable* handle2 = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(nullptr, handle2); EXPECT_EQ(callbacks2.reason_, ConnectionPool::PoolFailureReason::Overflow); @@ -364,8 +402,8 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_pending_overflow_.value()); @@ -381,14 +419,14 @@ TEST_F(Http1ConnPoolImplTest, ConnectFailure) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_connect_fail_.value()); @@ -411,12 +449,12 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { InSequence s; // Start the first connect attempt. - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); // Move time forward and start the second connect attempt. simulated_time.advanceTimeWait(std::chrono::milliseconds(sleep1_ms)); - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r2(*this, 1, ActiveTestRequest::Type::Pending); // Move time forward, signal that the first connect completed and verify the time to connect. @@ -426,8 +464,8 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms1)); r1.expectNewStream(); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep1_ms + sleep2_ms, upstream_cx_connect_ms1); // Move time forward, signal that the second connect completed and verify the time to connect. @@ -437,17 +475,18 @@ TEST_F(Http1ConnPoolImplTest, MeasureConnectTime) { deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)) .WillOnce(SaveArg<1>(&upstream_cx_connect_ms2)); r2.expectNewStream(); - EXPECT_CALL(*conn_pool_.test_clients_[1].connect_timer_, disableTimer()); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[1].connect_timer_, disableTimer()); + conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_EQ(sleep2_ms + sleep3_ms, upstream_cx_connect_ms2); // Cleanup, cause the connections to go away. - while (!conn_pool_.test_clients_.empty()) { + while (!conn_pool_->test_clients_.empty()) { EXPECT_CALL( cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_.front().connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_.front().connection_->raiseEvent( + Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } } @@ -461,22 +500,22 @@ TEST_F(Http1ConnPoolImplTest, ConnectTimeout) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks1; - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder1, callbacks1)); + conn_pool_->expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder1, callbacks1)); NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; EXPECT_CALL(callbacks1.pool_failure_, ready()).WillOnce(Invoke([&]() -> void { - conn_pool_.expectClientCreate(); - EXPECT_NE(nullptr, conn_pool_.newStream(outer_decoder2, callbacks2)); + conn_pool_->expectClientCreate(); + EXPECT_NE(nullptr, conn_pool_->newStream(outer_decoder2, callbacks2)); })); - conn_pool_.test_clients_[0].connect_timer_->invokeCallback(); + conn_pool_->test_clients_[0].connect_timer_->invokeCallback(); EXPECT_CALL(callbacks2.pool_failure_, ready()); - conn_pool_.test_clients_[1].connect_timer_->invokeCallback(); + conn_pool_->test_clients_[1].connect_timer_->invokeCallback(); - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); + EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); @@ -493,16 +532,16 @@ TEST_F(Http1ConnPoolImplTest, CancelBeforeBound) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -515,13 +554,13 @@ TEST_F(Http1ConnPoolImplTest, CancelExcessBeforeBound) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); handle->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); // Unlike CancelBeforeBound there is no need to raise a close event to destroy the connection. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -534,17 +573,17 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // We should get a reset callback when the connection disconnects. Http::MockStreamCallbacks stream_callbacks; @@ -552,8 +591,8 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { request_encoder.getStream().addCallbacks(stream_callbacks); // Kill the connection while it has an active request. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -568,15 +607,15 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks); EXPECT_NE(nullptr, handle); // Request 2 should not kick off a new connection. NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); + handle = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); @@ -585,15 +624,15 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { // Connect event will bind to request 1. NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Finishing request 1 will immediately bind to request 2. - conn_pool_.expectEnableUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + conn_pool_->expectEnableUpstreamReady(); + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks2.pool_ready_, ready()); @@ -602,7 +641,7 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); callbacks2.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // N.B. clang_tidy insists that we use std::make_unique which can not infer std::initialize_list. @@ -611,8 +650,8 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { inner_decoder->decodeHeaders(std::move(response_headers), true); // Cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -626,15 +665,15 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder1; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder1, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder1, callbacks); EXPECT_NE(nullptr, handle); // Request 2 should not kick off a new connection. NiceMock outer_decoder2; ConnPoolCallbacks callbacks2; - handle = conn_pool_.newStream(outer_decoder2, callbacks2); + handle = conn_pool_->newStream(outer_decoder2, callbacks2); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); EXPECT_NE(nullptr, handle); @@ -642,14 +681,14 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { // Connect event will bind to request 1. NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Finishing request 1 will schedule binding the connection to request 2. - conn_pool_.expectEnableUpstreamReady(); + conn_pool_->expectEnableUpstreamReady(); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -657,17 +696,17 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { inner_decoder->decodeHeaders(std::move(response_headers), true); // Cause the connection to go away. - conn_pool_.expectClientCreate(); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->expectClientCreate(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks2.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -676,8 +715,8 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseWithoutHeader) { std::initializer_list>{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } @@ -690,23 +729,23 @@ TEST_F(Http1ConnPoolImplTest, ConnectionCloseHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Connection", "Close"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -724,24 +763,24 @@ TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeader) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + EXPECT_CALL(*conn_pool_, onClientDestroy()); // Response with 'proxy-connection: close' which should cause the connection to go away, even if // there are other tokens in that header. - EXPECT_CALL(conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close, foo"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -762,24 +801,24 @@ TEST_F(Http1ConnPoolImplTest, ProxyConnectionCloseHeaderLegacy) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response with 'proxy-connection: close' which should cause the connection to go away, even if // there are other tokens in that header. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":status", "200"}, {"Proxy-Connection", "Close"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -797,23 +836,23 @@ TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAlive) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(Protocol::Http10); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response without 'connection: keep-alive' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -834,23 +873,23 @@ TEST_F(Http1ConnPoolImplTest, Http10NoConnectionKeepAliveLegacy) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(Protocol::Http10); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(Protocol::Http10); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response without 'connection: keep-alive' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); ResponseHeaderMapPtr response_headers( new TestResponseHeaderMapImpl{{":protocol", "HTTP/1.0"}, {":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); @@ -870,23 +909,23 @@ TEST_F(Http1ConnPoolImplTest, MaxRequestsPerConnection) { // Request 1 should kick off a new connection. NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // Response with 'connection: close' which should cause the connection to go away. - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); Http::ResponseHeaderMapPtr response_headers(new TestResponseHeaderMapImpl{{":status", "200"}}); inner_decoder->decodeHeaders(std::move(response_headers), true); dispatcher_.clearDeferredDeleteList(); @@ -908,11 +947,11 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { ActiveTestRequest r3(*this, 0, ActiveTestRequest::Type::Pending); // Finish r1, which gets r3 going. - conn_pool_.expectEnableUpstreamReady(); + conn_pool_->expectEnableUpstreamReady(); r3.expectNewStream(); r1.completeResponse(false); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_->expectAndRunUpstreamReady(); r3.startRequest(); EXPECT_EQ(3U, cluster_->stats_.upstream_rq_total_.value()); @@ -920,9 +959,9 @@ TEST_F(Http1ConnPoolImplTest, ConcurrentConnections) { r3.completeResponse(false); // Disconnect both clients. - EXPECT_CALL(conn_pool_, onClientDestroy()).Times(2); - conn_pool_.test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()).Times(2); + conn_pool_->test_clients_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(2U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -934,7 +973,7 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); @@ -945,7 +984,7 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { r1.startRequest(); r1.completeResponse(false); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -956,17 +995,17 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + EXPECT_CALL(*conn_pool_->test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -975,17 +1014,17 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { NiceMock outer_decoder; ConnPoolCallbacks callbacks; - conn_pool_.expectClientCreate(); - Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(outer_decoder, callbacks); + conn_pool_->expectClientCreate(); + Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); NiceMock request_encoder; ResponseDecoder* inner_decoder; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, newStream(_)) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder), ReturnRef(request_encoder))); EXPECT_CALL(callbacks.pool_ready_, ready()); - EXPECT_CALL(*conn_pool_.test_clients_[0].connect_timer_, disableTimer()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + EXPECT_CALL(*conn_pool_->test_clients_[0].connect_timer_, disableTimer()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); callbacks.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); @@ -996,17 +1035,17 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { inner_decoder->decodeData(dummy_data, false); Buffer::OwnedImpl empty_data; - EXPECT_CALL(*conn_pool_.test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) + EXPECT_CALL(*conn_pool_->test_clients_[0].codec_, dispatch(BufferEqual(&empty_data))) .WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { // Simulate the onResponseComplete call to decodeData since dispatch is mocked out. inner_decoder->decodeData(data, true); return Http::okStatus(); })); - EXPECT_CALL(*conn_pool_.test_clients_[0].connection_, + EXPECT_CALL(*conn_pool_->test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(conn_pool_, onClientDestroy()); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + EXPECT_CALL(*conn_pool_, onClientDestroy()); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); @@ -1014,19 +1053,19 @@ TEST_F(Http1ConnPoolImplTest, RemoteCloseToCompleteResponse) { } TEST_F(Http1ConnPoolImplTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(conn_pool_.hasActiveConnections()); + EXPECT_FALSE(conn_pool_->hasActiveConnections()); } TEST_F(Http1ConnPoolImplTest, ActiveRequestHasActiveConnectionsTrue) { ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); r1.startRequest(); - EXPECT_TRUE(conn_pool_.hasActiveConnections()); + EXPECT_TRUE(conn_pool_->hasActiveConnections()); // cleanup r1.completeResponse(false); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } @@ -1035,24 +1074,24 @@ TEST_F(Http1ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection r1.startRequest(); r1.completeResponse(false); - EXPECT_FALSE(conn_pool_.hasActiveConnections()); + EXPECT_FALSE(conn_pool_->hasActiveConnections()); - conn_pool_.drainConnections(); - EXPECT_CALL(conn_pool_, onClientDestroy()); + conn_pool_->drainConnections(); + EXPECT_CALL(*conn_pool_, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); } TEST_F(Http1ConnPoolImplTest, PendingRequestIsConsideredActive) { - conn_pool_.expectClientCreate(); + conn_pool_->expectClientCreate(); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::Pending); - EXPECT_TRUE(conn_pool_.hasActiveConnections()); + EXPECT_TRUE(conn_pool_->hasActiveConnections()); - EXPECT_CALL(conn_pool_, onClientDestroy()); + EXPECT_CALL(*conn_pool_, onClientDestroy()); r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(0U, cluster_->stats_.upstream_rq_total_.value()); - conn_pool_.drainConnections(); - conn_pool_.test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_->drainConnections(); + conn_pool_->test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_destroy_.value()); diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index db49b187c87e..6876a13477a1 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -32,6 +32,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:transport_socket_match_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c48152e7bb44..5aee5e508dd2 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -4,6 +4,7 @@ #include "common/event/dispatcher_impl.h" #include "common/http/http2/conn_pool.h" +#include "common/network/raw_buffer_socket.h" #include "common/network/utility.h" #include "common/upstream/upstream_impl.h" @@ -60,7 +61,8 @@ class Http2ConnPoolImplTest : public testing::Test { Http2ConnPoolImplTest() : api_(Api::createApiForTest(stats_store_)), - pool_(dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr) { + pool_(std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr)) { // Default connections to 1024 because the tests shouldn't be relying on the // connection resource limit for most tests. cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); @@ -70,15 +72,19 @@ class Http2ConnPoolImplTest : public testing::Test { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } - // Creates a new test client, expecting a new connection to be created and associated - // with the new client. - void expectClientCreate(absl::optional buffer_limits = {}) { + TestCodecClient& createTestClient() { test_clients_.emplace_back(); TestCodecClient& test_client = test_clients_.back(); test_client.connection_ = new NiceMock(); test_client.codec_ = new NiceMock(); test_client.connect_timer_ = new NiceMock(&dispatcher_); test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); + + return test_client; + } + + void expectConnectionSetupForClient(TestCodecClient& test_client, + absl::optional buffer_limits = {}) { EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); auto cluster = std::make_shared>(); @@ -91,13 +97,19 @@ class Http2ConnPoolImplTest : public testing::Test { EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); } - EXPECT_CALL(pool_, createCodecClient_(_)) + EXPECT_CALL(*pool_, createCodecClient_(_)) .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { return test_clients_.back().codec_client_; })); EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); } + // Creates a new test client, expecting a new connection to be created and associated + // with the new client. + void expectClientCreate(absl::optional buffer_limits = {}) { + expectConnectionSetupForClient(createTestClient(), buffer_limits); + } + // Connects a pending connection for client with the given index, asserting // that the provided request receives onPoolReady. void expectClientConnect(size_t index, ActiveTestRequest& r); @@ -133,7 +145,7 @@ class Http2ConnPoolImplTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; - TestConnPoolImpl pool_; + std::unique_ptr pool_; std::vector test_clients_; NiceMock runtime_; }; @@ -145,9 +157,9 @@ class ActiveTestRequest { EXPECT_CALL(*test.test_clients_[client_index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&inner_decoder_), ReturnRef(inner_encoder_))); EXPECT_CALL(callbacks_.pool_ready_, ready()); - EXPECT_EQ(nullptr, test.pool_.newStream(decoder_, callbacks_)); + EXPECT_EQ(nullptr, test.pool_->newStream(decoder_, callbacks_)); } else { - handle_ = test.pool_.newStream(decoder_, callbacks_); + handle_ = test.pool_->newStream(decoder_, callbacks_); EXPECT_NE(nullptr, handle_); } } @@ -211,7 +223,7 @@ void Http2ConnPoolImplTest::completeRequestCloseUpstream(size_t index, ActiveTes /** * Verify that the pool retains and returns the host it was constructed with. */ -TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_.host()); } +TEST_F(Http2ConnPoolImplTest, Host) { EXPECT_EQ(host_, pool_->host()); } /** * Verify that idle connections are closed immediately when draining. @@ -225,7 +237,54 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionIdle) { completeRequest(r); EXPECT_CALL(*this, onClientDestroy()); - pool_.drainConnections(); + pool_->drainConnections(); +} + +/** + * Verify that we set the ALPN fallback. + */ +TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { + InSequence s; + + // Override the TransportSocketFactory with a mock version we can add expectations to. + auto factory = std::make_unique(); + auto factory_ptr = factory.get(); + cluster_->transport_socket_matcher_ = + std::make_unique>(std::move(factory)); + + // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at + // our test transport socket factory. + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80"); + pool_ = std::make_unique(dispatcher_, host_, + Upstream::ResourcePriority::Default, nullptr, nullptr); + + // This requires some careful set up of expectations ordering: the call to createTransportSocket + // happens before all the connection set up but after the test client is created (due to some) + // of the mocks that are constructed as part of the test client. + auto& client = createTestClient(); + EXPECT_CALL(*factory_ptr, createTransportSocket(_)) + .WillOnce(Invoke( + [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { + EXPECT_TRUE(options != nullptr); + EXPECT_EQ(options->applicationProtocolFallback(), + Http::Utility::AlpnNames::get().Http2); + return std::make_unique(); + })); + expectConnectionSetupForClient(client); + ActiveTestRequest r(*this, 0, false); + expectClientConnect(0, r); + EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); + r.callbacks_.outer_encoder_->encodeHeaders( + TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); + + EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); + EXPECT_CALL(*this, onClientDestroy()); + r.inner_decoder_->decodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); + + // Close connections. + test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + dispatcher_.clearDeferredDeleteList(); } /** @@ -242,7 +301,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionReadyWithRequest) { r.callbacks_.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - pool_.drainConnections(); + pool_->drainConnections(); EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); EXPECT_CALL(*this, onClientDestroy()); @@ -265,7 +324,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionBusy) { r.callbacks_.outer_encoder_->encodeHeaders( TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); - pool_.drainConnections(); + pool_->drainConnections(); EXPECT_CALL(r.decoder_, decodeHeaders_(_, true)); EXPECT_CALL(*this, onClientDestroy()); @@ -285,12 +344,12 @@ TEST_F(Http2ConnPoolImplTest, DrainConnectionConnecting) { ActiveTestRequest r(*this, 0, false); // Pending request prevents the connection from being drained - pool_.drainConnections(); + pool_->drainConnections(); // Cancel the pending request, and then the connection can be closed. r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(*this, onClientDestroy()); - pool_.drainConnections(); + pool_->drainConnections(); } /** @@ -303,7 +362,7 @@ TEST_F(Http2ConnPoolImplTest, CloseExcess) { ActiveTestRequest r(*this, 0, false); // Pending request prevents the connection from being drained - pool_.drainConnections(); + pool_->drainConnections(); EXPECT_CALL(*this, onClientDestroy()); r.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); @@ -444,7 +503,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { cluster_->max_requests_per_connection_ = 1; // Test drain connections call prior to any connections being created. - pool_.drainConnections(); + pool_->drainConnections(); expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -463,7 +522,7 @@ TEST_F(Http2ConnPoolImplTest, DrainConnections) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); // This will move the second connection to draining. - pool_.drainConnections(); + pool_->drainConnections(); // This will destroy the 2 draining connections. test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -755,7 +814,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { MockResponseDecoder decoder; ConnPoolCallbacks callbacks; EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks)); expectStreamConnect(0, r1); EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); @@ -906,7 +965,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(drained, ready()); @@ -939,7 +998,7 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -979,7 +1038,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true); ReadyWatcher drained; - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -1030,7 +1089,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); - pool_.addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addDrainedCallback([&]() -> void { drained.ready(); }); EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); @@ -1089,7 +1148,7 @@ TEST_F(Http2ConnPoolImplTest, MaxGlobalRequests) { ConnPoolCallbacks callbacks; MockResponseDecoder decoder; EXPECT_CALL(callbacks.pool_failure_, ready()); - EXPECT_EQ(nullptr, pool_.newStream(decoder, callbacks)); + EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks)); test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_CALL(*this, onClientDestroy()); @@ -1133,7 +1192,7 @@ TEST_F(Http2ConnPoolImplTest, GoAway) { } TEST_F(Http2ConnPoolImplTest, NoActiveConnectionsByDefault) { - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); } // Show that an active request on the primary connection is considered active. @@ -1142,7 +1201,7 @@ TEST_F(Http2ConnPoolImplTest, ActiveConnectionsHasActiveRequestsTrue) { ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); completeRequestCloseUpstream(0, r1); } @@ -1152,7 +1211,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsConsideredActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); expectClientConnect(0, r1); completeRequestCloseUpstream(0, r1); @@ -1166,7 +1225,7 @@ TEST_F(Http2ConnPoolImplTest, ResponseCompletedConnectionReadyNoActiveConnection expectClientConnect(0, r1); completeRequest(r1); - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); closeClient(0); } @@ -1177,9 +1236,9 @@ TEST_F(Http2ConnPoolImplTest, DrainingConnectionsConsideredActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - pool_.drainConnections(); + pool_->drainConnections(); - EXPECT_TRUE(pool_.hasActiveConnections()); + EXPECT_TRUE(pool_->hasActiveConnections()); completeRequest(r1); closeClient(0); @@ -1191,10 +1250,10 @@ TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { expectClientCreate(); ActiveTestRequest r1(*this, 0, false); expectClientConnect(0, r1); - pool_.drainConnections(); + pool_->drainConnections(); completeRequest(r1); - EXPECT_FALSE(pool_.hasActiveConnections()); + EXPECT_FALSE(pool_->hasActiveConnections()); closeClient(0); } diff --git a/test/common/network/transport_socket_options_impl_test.cc b/test/common/network/transport_socket_options_impl_test.cc index d330a8edf8d3..51535afa53ba 100644 --- a/test/common/network/transport_socket_options_impl_test.cc +++ b/test/common/network/transport_socket_options_impl_test.cc @@ -1,3 +1,4 @@ +#include "common/http/utility.h" #include "common/network/application_protocol.h" #include "common/network/transport_socket_options_impl.h" #include "common/network/upstream_server_name.h" @@ -37,7 +38,8 @@ TEST_F(TransportSocketOptionsImplTest, UpstreamServer) { } TEST_F(TransportSocketOptionsImplTest, ApplicationProtocols) { - std::vector http_alpns{"h2", "http/1.1"}; + std::vector http_alpns{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; filter_state_.setData( ApplicationProtocols::key(), std::make_unique(http_alpns), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); @@ -47,7 +49,8 @@ TEST_F(TransportSocketOptionsImplTest, ApplicationProtocols) { } TEST_F(TransportSocketOptionsImplTest, Both) { - std::vector http_alpns{"h2", "http/1.1"}; + std::vector http_alpns{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; filter_state_.setData( UpstreamServerName::key(), std::make_unique("www.example.com"), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); diff --git a/test/config/utility.cc b/test/config/utility.cc index cfbe5a92bf11..6cc4a7063b24 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -801,8 +801,8 @@ bool ConfigHelper::setListenerAccessLog(const std::string& filename, absl::strin void ConfigHelper::initializeTls( const ServerSslOptions& options, envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_tls_context) { - common_tls_context.add_alpn_protocols("h2"); - common_tls_context.add_alpn_protocols("http/1.1"); + common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); + common_tls_context.add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* validation_context = common_tls_context.mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index c55189470427..0467a35ef844 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -104,7 +104,7 @@ TEST_F(HttpInspectorTest, InlineReadInspectHttp10) { memcpy(buffer, header.data(), header.size()); return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(dispatcher_, createFileEvent_(_, _, _, _)).Times(0); @@ -152,7 +152,7 @@ TEST_F(HttpInspectorTest, InspectHttp10) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -176,7 +176,7 @@ TEST_F(HttpInspectorTest, InspectHttp11) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -200,7 +200,7 @@ TEST_F(HttpInspectorTest, InspectHttp11WithNonEmptyRequestBody) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -221,7 +221,7 @@ TEST_F(HttpInspectorTest, ExtraSpaceInRequestLine) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.1")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http11}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -277,7 +277,7 @@ TEST_F(HttpInspectorTest, OldHttpProtocol) { return Api::SysCallSizeResult{ssize_t(header.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); file_event_callback_(Event::FileReadyType::Read); @@ -321,7 +321,7 @@ TEST_F(HttpInspectorTest, InspectHttp2) { return Api::SysCallSizeResult{ssize_t(data.size()), 0}; })); - const std::vector alpn_protos{absl::string_view("h2c")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http2c}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)); @@ -362,7 +362,7 @@ TEST_F(HttpInspectorTest, ReadError) { TEST_F(HttpInspectorTest, MultipleReadsHttp2) { init(); - const std::vector alpn_protos = {absl::string_view("h2c")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http2c}; const std::string header = "505249202a20485454502f322e300d0a0d0a534d0d0a0d0a00000c04000000000000041000000000020000000000" @@ -455,7 +455,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; @@ -599,7 +599,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeRequestLine) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; @@ -635,7 +635,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeHeader) { } bool got_continue = false; - const std::vector alpn_protos = {absl::string_view("http/1.0")}; + const std::vector alpn_protos{Http::Utility::AlpnNames::get().Http10}; EXPECT_CALL(socket_, setRequestedApplicationProtocols(alpn_protos)); EXPECT_CALL(cb_, continueFilterChain(true)).WillOnce(InvokeWithoutArgs([&got_continue]() { got_continue = true; diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc index 7cb0915a89d0..1552954e89bb 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc @@ -85,7 +85,8 @@ static void BM_TlsInspector(benchmark::State& state) { RELEASE_ASSERT(socket.detectedTransportProtocol() == "tls", ""); RELEASE_ASSERT(socket.requestedServerName() == "example.com", ""); RELEASE_ASSERT(socket.requestedApplicationProtocols().size() == 2 && - socket.requestedApplicationProtocols().front() == "h2", + socket.requestedApplicationProtocols().front() == + Http::Utility::AlpnNames::get().Http2, ""); socket.setDetectedTransportProtocol(""); socket.setRequestedServerName(""); diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 8d1da06977a4..d16303dc8d5b 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -127,8 +127,8 @@ TEST_P(TlsInspectorTest, SniRegistered) { // Test that a ClientHello with an ALPN value causes the correct name notification. TEST_P(TlsInspectorTest, AlpnRegistered) { init(); - const std::vector alpn_protos = {absl::string_view("h2"), - absl::string_view("http/1.1")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2, + Http::Utility::AlpnNames::get().Http11}; std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), "", "\x02h2\x08http/1.1"); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) @@ -151,7 +151,7 @@ TEST_P(TlsInspectorTest, AlpnRegistered) { // Test with the ClientHello spread over multiple socket reads. TEST_P(TlsInspectorTest, MultipleReads) { init(); - const std::vector alpn_protos = {absl::string_view("h2")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2}; const std::string servername("example.com"); std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), servername, "\x02h2"); @@ -256,7 +256,7 @@ TEST_P(TlsInspectorTest, InlineReadSucceed) { EXPECT_CALL(cb_, socket()).WillRepeatedly(ReturnRef(socket_)); EXPECT_CALL(cb_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(socket_, ioHandle()).WillRepeatedly(ReturnRef(*io_handle_)); - const std::vector alpn_protos = {absl::string_view("h2")}; + const auto alpn_protos = std::vector{Http::Utility::AlpnNames::get().Http2}; const std::string servername("example.com"); std::vector client_hello = Tls::Test::generateClientHello( std::get<0>(GetParam()), std::get<1>(GetParam()), servername, "\x02h2"); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index aa2c429a4ed0..b8481bc28a97 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -3847,6 +3847,11 @@ TEST_P(SslSocketTest, ALPN) { testUtilV2(test_options); client_ctx->clear_alpn_protocols(); server_ctx->clear_alpn_protocols(); + + // Client attempts to configure ALPN that is too large. + client_ctx->add_alpn_protocols(std::string(100000, 'a')); + EXPECT_THROW_WITH_MESSAGE(testUtilV2(test_options), EnvoyException, + "Invalid ALPN protocol string"); } TEST_P(SslSocketTest, CipherSuites) { @@ -4189,15 +4194,29 @@ TEST_P(SslSocketTest, OverrideApplicationProtocols) { server_ctx->add_alpn_protocols("test"); testUtilV2(test_options); server_ctx->clear_alpn_protocols(); - // Override client side ALPN, "test" ALPN is used. server_ctx->add_alpn_protocols("test"); - Network::TransportSocketOptionsSharedPtr transport_socket_options( - new Network::TransportSocketOptionsImpl("", {}, {"foo", "test", "bar"})); + auto transport_socket_options = std::make_shared( + "", std::vector{}, std::vector{"foo", "test", "bar"}); testUtilV2(test_options.setExpectedALPNProtocol("test").setTransportSocketOptions( transport_socket_options)); - server_ctx->clear_alpn_protocols(); + + // Set fallback ALPN on the client side ALPN, "test" ALPN is used since no ALPN is specified + // in the config. + server_ctx->add_alpn_protocols("test"); + transport_socket_options = std::make_shared( + "", std::vector{}, std::vector{}, "test"); + testUtilV2(test_options.setExpectedALPNProtocol("test").setTransportSocketOptions( + transport_socket_options)); + + // Update the client TLS config to specify ALPN. The fallback value should no longer be used. + // Note that the server prefers "test" over "bar", but since the client only configures "bar", + // the resulting ALPN will be "bar" even though "test" is included in the fallback. + server_ctx->add_alpn_protocols("bar"); + client.mutable_common_tls_context()->add_alpn_protocols("bar"); + testUtilV2(test_options.setExpectedALPNProtocol("bar").setTransportSocketOptions( + transport_socket_options)); } // Validate that if downstream secrets are not yet downloaded from SDS server, Envoy creates diff --git a/test/integration/BUILD b/test/integration/BUILD index a4df5652bf62..8b52391380c1 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -285,6 +285,23 @@ envoy_sh_test( ], ) +envoy_cc_test( + name = "alpn_selection_integration_test", + srcs = [ + "alpn_selection_integration_test.cc", + ], + data = [ + "//test/config/integration/certs", + ], + deps = [ + ":http_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "header_integration_test", srcs = [ diff --git a/test/integration/alpn_selection_integration_test.cc b/test/integration/alpn_selection_integration_test.cc new file mode 100644 index 000000000000..a576f51d3e16 --- /dev/null +++ b/test/integration/alpn_selection_integration_test.cc @@ -0,0 +1,213 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" + +#include "common/http/utility.h" + +#include "extensions/transport_sockets/tls/context_config_impl.h" +#include "extensions/transport_sockets/tls/context_impl.h" +#include "extensions/transport_sockets/tls/ssl_socket.h" + +#include "test/integration/http_integration.h" + +#include "absl/strings/str_replace.h" +#include "gtest/gtest.h" + +namespace Envoy { + +class AlpnSelectionIntegrationTest : public testing::Test, public HttpIntegrationTest { +public: + AlpnSelectionIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, + TestEnvironment::getIpVersionsForTest().front(), + ConfigHelper::httpProxyConfig()) {} + + void initialize() override { + setDownstreamProtocol(Http::CodecClient::Type::HTTP1); + setUpstreamProtocol(use_h2_ ? FakeHttpConnection::Type::HTTP2 + : FakeHttpConnection::Type::HTTP1); + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* static_resources = bootstrap.mutable_static_resources(); + auto* cluster = static_resources->mutable_clusters(0); + + if (use_h2_) { + cluster->mutable_http2_protocol_options(); + } + const std::string transport_socket_yaml = absl::StrFormat( + R"EOF( +name: tls +typed_config: + "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + common_tls_context: + alpn_protocols: [ %s ] + tls_certificates: + - certificate_chain: { filename: "%s" } + private_key: { filename: "%s" } + )EOF", + absl::StrJoin(configured_alpn_, ","), + TestEnvironment::runfilesPath("test/config/integration/certs/clientcert.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/clientkey.pem")); + auto* transport_socket = cluster->mutable_transport_socket(); + TestUtility::loadFromYaml(transport_socket_yaml, *transport_socket); + }); + HttpIntegrationTest::initialize(); + } + + Network::TransportSocketFactoryPtr createUpstreamSslContext() { + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + const std::string yaml = absl::StrFormat( + R"EOF( +common_tls_context: + alpn_protocols: [%s] + tls_certificates: + - certificate_chain: { filename: "%s" } + private_key: { filename: "%s" } + validation_context: + trusted_ca: { filename: "%s" } +require_client_certificate: true +)EOF", + absl::StrJoin(upstream_alpn_, ","), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamkey.pem"), + TestEnvironment::runfilesPath("test/config/integration/certs/cacert.pem")); + TestUtility::loadFromYaml(yaml, tls_context); + auto cfg = std::make_unique( + tls_context, factory_context_); + static Stats::Scope* upstream_stats_store = new Stats::IsolatedStoreImpl(); + return std::make_unique( + std::move(cfg), context_manager_, *upstream_stats_store, std::vector{}); + } + + void createUpstreams() override { + auto endpoint = upstream_address_fn_(0); + fake_upstreams_.emplace_back(new FakeUpstream( + createUpstreamSslContext(), endpoint->ip()->port(), + use_h2_ ? FakeHttpConnection::Type::HTTP2 : FakeHttpConnection::Type::HTTP1, + endpoint->ip()->version(), timeSystem())); + } + + bool use_h2_{}; + std::vector upstream_alpn_; + std::vector configured_alpn_; +}; + +// No upstream ALPN is specified in the protocol, but we successfully negotiate h2 ALPN +// due to the default ALPN set through the HTTP/2 conn pool. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMatchingAlpn) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ(Http::Utility::AlpnNames::get().Http2, + fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// No upstream ALPN is specified in the protocol and we fail to negotiate h2 ALPN +// since the upstream doesn't list h2 in its ALPN list. Note that the call still goes +// through because ALPN negotiation failure doesn't necessarily fail the call. +// TODO(snowp): We should actually fail the handshake in case of negotiation failure, +// fix that and update these tests. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamMismatchingAlpn) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + // No ALPN negotiated. + EXPECT_EQ("", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream supports h2,custom-alpn, and we configure the upstream TLS context to negotiate +// custom-alpn. No attempt to negotiate h2 should happen, so we should select custom-alpn. +TEST_F(AlpnSelectionIntegrationTest, Http2UpstreamConfiguredALPN) { + use_h2_ = true; + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + upstream_alpn_.emplace_back("custom-alpn"); + configured_alpn_.emplace_back("custom-alpn"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ("custom-alpn", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// No upstream ALPN is specified in the protocol, but we successfully negotiate http/1.1 ALPN +// due to the default ALPN set through the HTTP/1.1 conn pool. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMatchingAlpn) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ(Http::Utility::AlpnNames::get().Http11, + fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream only lists h2 but we attempt to negotiate http/1.1 due to the default ALPN set by +// the conn pool. This results in no protocol being negotiated. Note that the call still goes +// through because ALPN negotiation failure doesn't necessarily fail the call. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreaMismatchingAlpn) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http2); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + // No ALPN selected. + EXPECT_EQ("", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// The upstream supports http/1.1,custom-alpn, and we configure the upstream TLS context to +// negotiate custom-alpn. No attempt to negotiate http/1.1 should happen, so we should select +// custom-alpn. +// TODO(snowp): We should actually fail the handshake in case of negotiation failure, +// fix that and update these tests. +TEST_F(AlpnSelectionIntegrationTest, Http11UpstreamConfiguredALPN) { + upstream_alpn_.emplace_back(Http::Utility::AlpnNames::get().Http11); + upstream_alpn_.emplace_back("custom-alpn"); + configured_alpn_.emplace_back("custom-alpn"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + EXPECT_EQ("custom-alpn", fake_upstream_connection_->connection().nextProtocol()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + EXPECT_EQ("200", response->headers().getStatusValue()); +} +} // namespace Envoy diff --git a/test/integration/integration.cc b/test/integration/integration.cc index cc2c9a458010..74f7667d2f01 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -590,7 +590,7 @@ void BaseIntegrationTest::createXdsUpstream() { } else { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("h2"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); auto* tls_cert = common_tls_context->add_tls_certificates(); tls_cert->mutable_certificate_chain()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcert.pem")); diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 34c1763e3998..426e7c6a3a30 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -9,6 +9,7 @@ #include "common/config/api_version.h" #include "common/event/dispatcher_impl.h" +#include "common/http/utility.h" #include "common/network/connection_impl.h" #include "common/network/utility.h" @@ -151,7 +152,7 @@ class SdsDynamicDownstreamIntegrationTest : public SdsDynamicIntegrationBaseTest ->mutable_listeners(0) ->mutable_filter_chains(0) ->mutable_transport_socket(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* validation_context = common_tls_context->mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( @@ -254,7 +255,7 @@ class SdsDynamicDownstreamCertValidationContextTest : public SdsDynamicDownstrea ->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); auto* tls_certificate = common_tls_context->add_tls_certificates(); tls_certificate->mutable_certificate_chain()->set_filename( diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index f0a92b8e19c7..38f47be55031 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -46,7 +46,7 @@ class SdsStaticDownstreamIntegrationTest ->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; auto* common_tls_context = tls_context.mutable_common_tls_context(); - common_tls_context->add_alpn_protocols("http/1.1"); + common_tls_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); common_tls_context->mutable_validation_context_sds_secret_config()->set_name( "validation_context"); diff --git a/test/integration/ssl_utility.cc b/test/integration/ssl_utility.cc index af54c2cdb40a..14c1a0bf85c8 100644 --- a/test/integration/ssl_utility.cc +++ b/test/integration/ssl_utility.cc @@ -2,6 +2,7 @@ #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "common/http/utility.h" #include "common/json/json_loader.h" #include "common/network/utility.h" @@ -54,8 +55,8 @@ createClientSslTransportSocketFactory(const ClientSslTransportOptions& options, auto* common_context = tls_context.mutable_common_tls_context(); if (options.alpn_) { - common_context->add_alpn_protocols("h2"); - common_context->add_alpn_protocols("http/1.1"); + common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http2); + common_context->add_alpn_protocols(Http::Utility::AlpnNames::get().Http11); } if (options.san_) { common_context->mutable_validation_context() diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 36b1195fd28f..59cda6c5f1af 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -2192,7 +2192,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP EXPECT_EQ(filter_chain, nullptr); // TLS client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "8.8.8.8", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "8.8.8.8", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2235,8 +2238,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_EQ(filter_chain, nullptr); // LOCAL IPv4 client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2247,8 +2252,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_EQ(server_names.front(), "server1.example.com"); // LOCAL UDS client with "http/1.1" ALPN - using 1st filter chain. - filter_chain = - findFilterChain(0, "/tmp/test.sock", "", "tls", {"h2", "http/1.1"}, "/tmp/test.sock", 111); + filter_chain = findFilterChain( + 0, "/tmp/test.sock", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, + "/tmp/test.sock", 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2292,8 +2299,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_EQ(filter_chain, nullptr); // IPv4 client with source 10.0.0.10, Match. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "10.0.0.10", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "10.0.0.10", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2309,8 +2318,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_EQ(filter_chain, nullptr); // UDS client. No match. - filter_chain = - findFilterChain(0, "/tmp/test.sock", "", "tls", {"h2", "http/1.1"}, "/tmp/test.sock", 0); + filter_chain = findFilterChain( + 0, "/tmp/test.sock", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, + "/tmp/test.sock", 0); ASSERT_EQ(filter_chain, nullptr); } @@ -2394,7 +2405,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa EXPECT_EQ(server_names.front(), "server1.example.com"); // Client with source port 101. No match. - filter_chain = findFilterChain(1234, "8.8.8.8", "", "tls", {"h2", "http/1.1"}, "4.4.4.4", 101); + filter_chain = findFilterChain( + 1234, "8.8.8.8", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "4.4.4.4", + 101); ASSERT_EQ(filter_chain, nullptr); } @@ -2447,8 +2461,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType EXPECT_EQ(1U, manager_->listeners().size()); // LOCAL TLS client with "http/1.1" ALPN - no match. - auto filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + auto filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); EXPECT_EQ(filter_chain, nullptr); // LOCAL TLS client without "http/1.1" ALPN - using 1st filter chain. @@ -2463,7 +2479,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType EXPECT_EQ(server_names.front(), "server1.example.com"); // EXTERNAL TLS client with "http/1.1" ALPN - using 2nd filter chain. - filter_chain = findFilterChain(1234, "8.8.8.8", "", "tls", {"h2", "http/1.1"}, "4.4.4.4", 111); + filter_chain = findFilterChain( + 1234, "8.8.8.8", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "4.4.4.4", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2832,8 +2851,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport()); // TLS client with "h2,http/1.1" ALPN - using 2nd filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); @@ -2885,14 +2906,18 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR EXPECT_EQ(filter_chain, nullptr); // TLS client with ALPN match but without SNI - using 1st filter chain. - filter_chain = - findFilterChain(1234, "127.0.0.1", "", "tls", {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_FALSE(filter_chain->transportSocketFactory().implementsSecureTransport()); // TLS client with exact SNI match and ALPN match - using 2nd filter chain. - filter_chain = findFilterChain(1234, "127.0.0.1", "server1.example.com", "tls", - {"h2", "http/1.1"}, "127.0.0.1", 111); + filter_chain = findFilterChain( + 1234, "127.0.0.1", "server1.example.com", "tls", + {Http::Utility::AlpnNames::get().Http2, Http::Utility::AlpnNames::get().Http11}, "127.0.0.1", + 111); ASSERT_NE(filter_chain, nullptr); EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); From 5a0a62d5c0a62795071a9d1e3255b135b0e05aa2 Mon Sep 17 00:00:00 2001 From: Andrew Lyu Date: Wed, 17 Jun 2020 03:38:30 +0800 Subject: [PATCH 363/909] docs: correct dynamicMetadata example (#11603) Signed-off-by: Andrew Lyu --- docs/root/configuration/http/http_filters/lua_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 7e381a69b2d5..0bea99a2ba0d 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -530,7 +530,7 @@ its keys can only be *string* or *numeric*. end function envoy_on_response(response_handle) - local meta = response_handle:streamInfo():dynamicMetadata()["request.info"] + local meta = response_handle:streamInfo():dynamicMetadata():get("envoy.filters.http.lua")["request.info"] response_handle:logInfo("Auth: "..meta.auth..", token: "..meta.token) end From 8e6a1eee9c343dd8d6805a520dd651f4c10c2b8b Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Tue, 16 Jun 2020 15:39:43 -0400 Subject: [PATCH 364/909] Convert std::pair CategoryDescription to struct for better readability (#11604) Signed-off-by: Yifan Yang --- source/common/common/perf_annotation.cc | 6 +++--- source/common/common/perf_annotation.h | 11 +++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/source/common/common/perf_annotation.cc b/source/common/common/perf_annotation.cc index 4c1f08379f0a..daa39ff0ca31 100644 --- a/source/common/common/perf_annotation.cc +++ b/source/common/common/perf_annotation.cc @@ -34,7 +34,7 @@ PerfAnnotationContext::PerfAnnotationContext() = default; void PerfAnnotationContext::record(std::chrono::nanoseconds duration, absl::string_view category, absl::string_view description) { - CategoryDescription key((std::string(category)), (std::string(description))); + CategoryDescription key = {std::string(category), std::string(description)}; { #if PERF_THREAD_SAFE Thread::LockGuard lock(mutex_); @@ -112,8 +112,8 @@ std::string PerfAnnotationContext::toString() { columns[4].push_back(nanoseconds_string(stats.min_)); columns[5].push_back(nanoseconds_string(stats.max_)); const CategoryDescription& category_description = p->first; - columns[6].push_back(category_description.first); - columns[7].push_back(category_description.second); + columns[6].push_back(category_description.category); + columns[7].push_back(category_description.description); for (size_t i = 0; i < num_columns; ++i) { widths[i] = std::max(widths[i], columns[i].back().size()); } diff --git a/source/common/common/perf_annotation.h b/source/common/common/perf_annotation.h index e21b012d0a75..5701eaa68c32 100644 --- a/source/common/common/perf_annotation.h +++ b/source/common/common/perf_annotation.h @@ -117,7 +117,14 @@ class PerfAnnotationContext { */ PerfAnnotationContext(); - using CategoryDescription = std::pair; + struct CategoryDescription { + std::string category; + std::string description; + + bool operator==(const CategoryDescription& other) const { + return category == other.category && description == other.description; + } + }; struct DurationStats { std::chrono::nanoseconds total_{0}; @@ -128,7 +135,7 @@ class PerfAnnotationContext { struct Hash { size_t operator()(const CategoryDescription& a) const { - return std::hash()(a.first) + 13 * std::hash()(a.second); + return std::hash()(a.category) + 13 * std::hash()(a.description); } }; From a64a7b30647076408b1a130373e4b5e2694b2c15 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 16 Jun 2020 16:51:28 -0400 Subject: [PATCH 365/909] upstreams: creating TCP and HTTP upstream via factory (#11561) Part 3 of #11327, using the new configuration to create the upstream connection pool. Risk Level: Medium (router refactor, intended as no-op) Testing: new unit tests Docs Changes: n/a Release Notes: pending final PR Signed-off-by: Alyssa Wilk --- include/envoy/upstream/upstream.h | 7 ++++ source/common/router/router.cc | 22 +++++------ source/common/upstream/upstream_impl.cc | 7 +++- source/common/upstream/upstream_impl.h | 5 +++ .../extensions/upstreams/http/generic/BUILD | 9 ++++- .../upstreams/http/generic/config.cc | 28 ++++++++++++++ .../upstreams/http/generic/config.h | 37 +++++++++++++++++++ source/extensions/upstreams/http/http/BUILD | 9 ++++- .../extensions/upstreams/http/http/config.cc | 24 ++++++++++++ .../extensions/upstreams/http/http/config.h | 37 +++++++++++++++++++ source/extensions/upstreams/http/tcp/BUILD | 9 ++++- .../extensions/upstreams/http/tcp/config.cc | 24 ++++++++++++ source/extensions/upstreams/http/tcp/config.h | 35 ++++++++++++++++++ source/server/BUILD | 3 ++ test/common/http/BUILD | 1 + test/common/router/BUILD | 6 +++ test/common/router/router_test.cc | 31 ++++++++++++++++ test/integration/stats_integration_test.cc | 6 ++- test/mocks/upstream/cluster_info.cc | 1 + test/mocks/upstream/cluster_info.h | 3 ++ 20 files changed, 283 insertions(+), 21 deletions(-) create mode 100644 source/extensions/upstreams/http/generic/config.cc create mode 100644 source/extensions/upstreams/http/generic/config.h create mode 100644 source/extensions/upstreams/http/http/config.cc create mode 100644 source/extensions/upstreams/http/http/config.h create mode 100644 source/extensions/upstreams/http/tcp/config.cc create mode 100644 source/extensions/upstreams/http/tcp/config.h diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index dbb89c88be67..139eb8ebb3a3 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -789,6 +789,13 @@ class ClusterInfo { virtual const absl::optional& lbOriginalDstConfig() const PURE; + /** + * @return const absl::optional& the configuration + * for the upstream, if a custom upstream is configured. + */ + virtual const absl::optional& + upstreamConfig() const PURE; + /** * @return Whether the cluster is currently in maintenance mode and should not be routed to. * Different filters may handle this situation in different ways. The implementation diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 5ab68cc2d17c..64f8450de417 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -597,22 +597,20 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, } std::unique_ptr Filter::createConnPool() { + GenericConnPoolFactory* factory = nullptr; + if (cluster_->upstreamConfig().has_value()) { + factory = &Envoy::Config::Utility::getAndCheckFactory( + cluster_->upstreamConfig().value()); + } else { + factory = &Envoy::Config::Utility::getAndCheckFactoryByName( + "envoy.filters.connection_pools.http.generic"); + } const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - if (should_tcp_proxy) { - auto pool = std::make_unique(config_.cm_, *route_entry_, protocol, this); - if (pool->valid()) { - return pool; - } - } else { - auto pool = std::make_unique(config_.cm_, *route_entry_, protocol, this); - if (pool->valid()) { - return pool; - } - } - return nullptr; + return factory->createGenericConnPool(config_.cm_, should_tcp_proxy, *route_entry_, protocol, + this); } void Filter::sendNoHealthyUpstreamResponse() { diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index a78a331cb9cc..a11e06f2039e 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -694,7 +694,12 @@ ClusterInfoImpl::ClusterInfoImpl( source_address_(getSourceAddress(config, bind_config)), lb_least_request_config_(config.least_request_lb_config()), lb_ring_hash_config_(config.ring_hash_lb_config()), - lb_original_dst_config_(config.original_dst_lb_config()), added_via_api_(added_via_api), + lb_original_dst_config_(config.original_dst_lb_config()), + upstream_config_(config.has_upstream_config() + ? absl::make_optional( + config.upstream_config()) + : absl::nullopt), + added_via_api_(added_via_api), lb_subset_(LoadBalancerSubsetInfoImpl(config.lb_subset_config())), metadata_(config.metadata()), typed_metadata_(config.metadata()), common_lb_config_(config.common_lb_config()), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 722aa5e5dcc3..372716d07894 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -565,6 +565,10 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable& + upstreamConfig() const override { + return upstream_config_; + } bool maintenanceMode() const override; uint64_t maxRequestsPerConnection() const override { return max_requests_per_connection_; } uint32_t maxResponseHeadersCount() const override { return max_response_headers_count_; } @@ -645,6 +649,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable lb_ring_hash_config_; absl::optional lb_original_dst_config_; + absl::optional upstream_config_; const bool added_via_api_; LoadBalancerSubsetInfoImpl lb_subset_; const envoy::config::core::v3::Metadata metadata_; diff --git a/source/extensions/upstreams/http/generic/BUILD b/source/extensions/upstreams/http/generic/BUILD index 7aa32b16a219..f61d9801103f 100644 --- a/source/extensions/upstreams/http/generic/BUILD +++ b/source/extensions/upstreams/http/generic/BUILD @@ -1,5 +1,3 @@ -# placeholder build files for security_posture -# Will be filled in as #11327 lands. load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -13,8 +11,15 @@ envoy_package() envoy_cc_extension( name = "config", srcs = [ + "config.cc", ], hdrs = [ + "config.h", ], security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + "//source/common/router:router_lib", + "@envoy_api//envoy/extensions/upstreams/http/generic/v3:pkg_cc_proto", + ], ) diff --git a/source/extensions/upstreams/http/generic/config.cc b/source/extensions/upstreams/http/generic/config.cc new file mode 100644 index 000000000000..f3057ccb8561 --- /dev/null +++ b/source/extensions/upstreams/http/generic/config.cc @@ -0,0 +1,28 @@ +#include "extensions/upstreams/http/generic/config.h" + +#include "common/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Generic { + +Router::GenericConnPoolPtr GenericGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { + if (is_connect) { + auto ret = std::make_unique(cm, route_entry, protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); + } + auto ret = std::make_unique(cm, route_entry, protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(GenericGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Generic +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/generic/config.h b/source/extensions/upstreams/http/generic/config.h new file mode 100644 index 000000000000..048e9998a403 --- /dev/null +++ b/source/extensions/upstreams/http/generic/config.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Generic { + +/** + * Config registration for the GenericConnPool. * @see Router::GenericConnPoolFactory + */ +class GenericGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.generic"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + Upstream::LoadBalancerContext* ctx) const override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto>(); + } +}; + +DECLARE_FACTORY(GenericGenericConnPoolFactory); + +} // namespace Generic +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/BUILD b/source/extensions/upstreams/http/http/BUILD index 7aa32b16a219..caf8f766e6f3 100644 --- a/source/extensions/upstreams/http/http/BUILD +++ b/source/extensions/upstreams/http/http/BUILD @@ -1,5 +1,3 @@ -# placeholder build files for security_posture -# Will be filled in as #11327 lands. load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -13,8 +11,15 @@ envoy_package() envoy_cc_extension( name = "config", srcs = [ + "config.cc", ], hdrs = [ + "config.h", ], security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + "//source/common/router:router_lib", + "@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto", + ], ) diff --git a/source/extensions/upstreams/http/http/config.cc b/source/extensions/upstreams/http/http/config.cc new file mode 100644 index 000000000000..a257b7e39b37 --- /dev/null +++ b/source/extensions/upstreams/http/http/config.cc @@ -0,0 +1,24 @@ +#include "extensions/upstreams/http/http/config.h" + +#include "common/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +Router::GenericConnPoolPtr HttpGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, + Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, route_entry, protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(HttpGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/config.h b/source/extensions/upstreams/http/http/config.h new file mode 100644 index 000000000000..9481f742a78d --- /dev/null +++ b/source/extensions/upstreams/http/http/config.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +/** + * Config registration for the HttpConnPool. @see Router::GenericConnPoolFactory + */ +class HttpGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.http"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + Upstream::LoadBalancerContext* ctx) const override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto>(); + } +}; + +DECLARE_FACTORY(HttpGenericConnPoolFactory); + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/BUILD b/source/extensions/upstreams/http/tcp/BUILD index 7aa32b16a219..960f8b4b9c0d 100644 --- a/source/extensions/upstreams/http/tcp/BUILD +++ b/source/extensions/upstreams/http/tcp/BUILD @@ -1,5 +1,3 @@ -# placeholder build files for security_posture -# Will be filled in as #11327 lands. load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", @@ -13,8 +11,15 @@ envoy_package() envoy_cc_extension( name = "config", srcs = [ + "config.cc", ], hdrs = [ + "config.h", ], security_posture = "robust_to_untrusted_downstream", + visibility = ["//visibility:public"], + deps = [ + "//source/common/router:router_lib", + "@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto", + ], ) diff --git a/source/extensions/upstreams/http/tcp/config.cc b/source/extensions/upstreams/http/tcp/config.cc new file mode 100644 index 000000000000..ffd0412b643e --- /dev/null +++ b/source/extensions/upstreams/http/tcp/config.cc @@ -0,0 +1,24 @@ +#include "extensions/upstreams/http/tcp/config.h" + +#include "common/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +Router::GenericConnPoolPtr TcpGenericConnPoolFactory::createGenericConnPool( + Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, + Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, route_entry, protocol, ctx); + return (ret->valid() ? std::move(ret) : nullptr); +} + +REGISTER_FACTORY(TcpGenericConnPoolFactory, Router::GenericConnPoolFactory); + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/config.h b/source/extensions/upstreams/http/tcp/config.h new file mode 100644 index 000000000000..8fdaa9c31d03 --- /dev/null +++ b/source/extensions/upstreams/http/tcp/config.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/router/router.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +/** + * Config registration for the TcpConnPool. @see Router::GenericConnPoolFactory + */ +class TcpGenericConnPoolFactory : public Router::GenericConnPoolFactory { +public: + std::string name() const override { return "envoy.filters.connection_pools.http.tcp"; } + std::string category() const override { return "envoy.upstreams"; } + Router::GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + Upstream::LoadBalancerContext* ctx) const override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } +}; + +DECLARE_FACTORY(TcpGenericConnPoolFactory); + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index 673c96fc2e6b..3f8b312ee64d 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -79,6 +79,9 @@ envoy_cc_library( "//source/common/stats:timespan_lib", "//source/common/stream_info:stream_info_lib", "//source/extensions/transport_sockets:well_known_names", + "//source/extensions/upstreams/http/generic:config", + "//source/extensions/upstreams/http/http:config", + "//source/extensions/upstreams/http/tcp:config", ], ) diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 5a42fb54f59b..e723a48abeb0 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -23,6 +23,7 @@ envoy_cc_test( "//source/common/http:context_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", + "//source/extensions/upstreams/http/generic:config", "//test/mocks:common_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index d1c5b7c036ed..81a4b7e7a63b 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -262,6 +262,9 @@ envoy_cc_test( "//source/common/stream_info:uint32_accessor_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", + "//source/extensions/upstreams/http/generic:config", + "//source/extensions/upstreams/http/http:config", + "//source/extensions/upstreams/http/tcp:config", "//test/common/http:common_lib", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", @@ -276,6 +279,8 @@ envoy_cc_test( "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) @@ -292,6 +297,7 @@ envoy_cc_test( "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", "//source/extensions/access_loggers/file:config", + "//source/extensions/upstreams/http/generic:config", "//test/common/http:common_lib", "//test/mocks/access_log:access_log_mocks", "//test/mocks/filesystem:filesystem_mocks", diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 6b9137e723ca..eaf43fc2be4f 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5,6 +5,8 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h" +#include "envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h" #include "envoy/type/v3/percent.pb.h" #include "common/buffer/buffer_impl.h" @@ -5932,6 +5934,14 @@ TEST_F(RouterTest, ConnectPauseAndResume) { // Verify that CONNECT payload is not sent upstream if non-200 response headers are received. TEST_F(RouterTest, ConnectPauseNoResume) { + // Explicitly configure an HTTP upstream, to test factory creation. + cm_.thread_local_cluster_.cluster_.info_->upstream_config_ = + absl::make_optional(); + envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto http_config; + cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value() + .mutable_typed_config() + ->PackFrom(http_config); + NiceMock encoder; Http::ResponseDecoder* response_decoder = nullptr; EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) @@ -5962,6 +5972,27 @@ TEST_F(RouterTest, ConnectPauseNoResume) { response_decoder->decodeHeaders(std::move(response_headers), true); } +TEST_F(RouterTest, ConnectExplicitTcpUpstream) { + // Explicitly configure an TCP upstream, to test factory creation. + cm_.thread_local_cluster_.cluster_.info_->upstream_config_ = + absl::make_optional(); + envoy::extensions::upstreams::http::tcp::v3::TcpConnectionPoolProto tcp_config; + cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value() + .mutable_typed_config() + ->PackFrom(tcp_config); + callbacks_.route_->route_entry_.connect_config_ = + absl::make_optional(); + + // Make sure newConnection is called on the TCP pool, not newStream on the HTTP pool. + EXPECT_CALL(cm_.tcp_conn_pool_, newConnection(_)); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.setMethod("CONNECT"); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); +} + class WatermarkTest : public RouterTest { public: void sendRequest(bool header_only_request = true, bool pool_ready = true) { diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 40d65003dd32..15d060d6d4a0 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -272,6 +272,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate // 2020/05/13 10531 44425 44600 Refactor resource manager // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. + // 2020/06/10 11561 44491 44811 Make upstreams pluggable // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -286,7 +287,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_cluster, 44491); - EXPECT_MEMORY_LE(m_per_cluster, 44600); + EXPECT_MEMORY_LE(m_per_cluster, 44811); } TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { @@ -334,6 +335,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate // 2020/05/13 10531 36537 36800 Refactor resource manager // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. + // 2020/06/10 11561 36603 36923 Make upstreams pluggable // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -348,7 +350,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_cluster, 36603); - EXPECT_MEMORY_LE(m_per_cluster, 36800); + EXPECT_MEMORY_LE(m_per_cluster, 36923); } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index dd428aa124c2..3a2c1aa8af8d 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -81,6 +81,7 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, lbSubsetInfo()).WillByDefault(ReturnRef(lb_subset_)); ON_CALL(*this, lbRingHashConfig()).WillByDefault(ReturnRef(lb_ring_hash_config_)); ON_CALL(*this, lbOriginalDstConfig()).WillByDefault(ReturnRef(lb_original_dst_config_)); + ON_CALL(*this, upstreamConfig()).WillByDefault(ReturnRef(upstream_config_)); ON_CALL(*this, lbConfig()).WillByDefault(ReturnRef(lb_config_)); ON_CALL(*this, clusterSocketOptions()).WillByDefault(ReturnRef(cluster_socket_options_)); ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 2e99eea091ef..e3bce01c6282 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -108,6 +108,8 @@ class MockClusterInfo : public ClusterInfo { lbLeastRequestConfig, (), (const)); MOCK_METHOD(const absl::optional&, lbOriginalDstConfig, (), (const)); + MOCK_METHOD(const absl::optional&, upstreamConfig, + (), (const)); MOCK_METHOD(bool, maintenanceMode, (), (const)); MOCK_METHOD(uint32_t, maxResponseHeadersCount, (), (const)); MOCK_METHOD(uint64_t, maxRequestsPerConnection, (), (const)); @@ -163,6 +165,7 @@ class MockClusterInfo : public ClusterInfo { upstream_http_protocol_options_; absl::optional lb_ring_hash_config_; absl::optional lb_original_dst_config_; + absl::optional upstream_config_; Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_; envoy::config::cluster::v3::Cluster::CommonLbConfig lb_config_; envoy::config::core::v3::Metadata metadata_; From 7ac536ab6d84ec510dcbbd1b1a1e4f6cd2ef96d9 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Tue, 16 Jun 2020 13:51:39 -0700 Subject: [PATCH 366/909] network: fd usage cleanup in listener and io handle (#11576) - move local and peer address retrieval functions from socket interface to io handle - add io handle domain retrieval function - let socket interface decide the type of io handle to wrap accepted fd into. - use io_hande->domain() in SocketImpl::ipVersion() Signed-off-by: Florin Coras --- include/envoy/network/BUILD | 1 + include/envoy/network/io_handle.h | 26 +++++-- include/envoy/network/socket.h | 22 ++---- source/common/network/base_listener_impl.cc | 4 - source/common/network/base_listener_impl.h | 2 - .../common/network/io_socket_handle_impl.cc | 78 ++++++++++++++++++- source/common/network/io_socket_handle_impl.h | 4 +- source/common/network/listener_impl.cc | 9 +-- source/common/network/socket_impl.cc | 33 ++++---- .../common/network/socket_interface_impl.cc | 63 +-------------- source/common/network/socket_interface_impl.h | 3 +- .../quiche/quic_io_handle_wrapper.h | 10 ++- test/common/network/listener_impl_test.cc | 11 --- .../quiche/quic_io_handle_wrapper_test.cc | 20 +++++ test/mocks/network/io_handle.h | 4 +- 15 files changed, 159 insertions(+), 131 deletions(-) diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 6e395ed56ab7..7914df35c0ef 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -75,6 +75,7 @@ envoy_cc_library( envoy_cc_library( name = "io_handle_interface", hdrs = ["io_handle.h"], + external_deps = ["abseil_optional"], deps = [ ":address_interface", "//include/envoy/api:io_error_interface", diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index 4c4e70ceb87e..ac2766ff3281 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -8,6 +8,7 @@ #include "envoy/network/address.h" #include "absl/container/fixed_array.h" +#include "absl/types/optional.h" namespace Envoy { namespace Buffer { @@ -179,11 +180,6 @@ class IoHandle { virtual Api::SysCallIntResult getOption(int level, int optname, void* optval, socklen_t* optlen) PURE; - /** - * Get local address to which handle is bound (see man 2 getsockname) - */ - virtual Api::SysCallIntResult getLocalAddress(sockaddr* address, socklen_t* addrlen) PURE; - /** * Toggle blocking behavior * @param blocking flag to set/unset blocking state @@ -191,6 +187,26 @@ class IoHandle { * is successful, errno_ shouldn't be used. */ virtual Api::SysCallIntResult setBlocking(bool blocking) PURE; + + /** + * Get domain used by underlying socket (see man 2 socket) + * @param domain updated to the underlying socket's domain if call is successful + * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call + * is successful, errno_ shouldn't be used. + */ + virtual absl::optional domain() PURE; + + /** + * Get local address (ip:port pair) + * @return local address as @ref Address::InstanceConstSharedPtr + */ + virtual Address::InstanceConstSharedPtr localAddress() PURE; + + /** + * Get peer's address (ip:port pair) + * @return peer's address as @ref Address::InstanceConstSharedPtr + */ + virtual Address::InstanceConstSharedPtr peerAddress() PURE; }; using IoHandlePtr = std::unique_ptr; diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index 411c1e8a2e92..0951f0a04617 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -258,25 +258,17 @@ class SocketInterface { const Address::InstanceConstSharedPtr addr) PURE; /** - * Returns true if the given family is supported on this machine. - * @param domain the IP family. + * Wrap socket file descriptor in IoHandle + * @param fd socket file descriptor to be wrapped + * @return @ref Network::IoHandlePtr that wraps the socket file descriptor */ - virtual bool ipFamilySupported(int domain) PURE; + virtual IoHandlePtr socket(os_fd_t fd) PURE; /** - * Obtain an address from a bound file descriptor. Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for bound address. - */ - virtual Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd) PURE; - - /** - * Obtain the address of the peer of the socket with the specified file descriptor. - * Raises an EnvoyException on failure. - * @param fd socket file descriptor - * @return InstanceConstSharedPtr for peer address. + * Returns true if the given family is supported on this machine. + * @param domain the IP family. */ - virtual Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) PURE; + virtual bool ipFamilySupported(int domain) PURE; }; } // namespace Network diff --git a/source/common/network/base_listener_impl.cc b/source/common/network/base_listener_impl.cc index ffad88865fde..e1adf6b930ec 100644 --- a/source/common/network/base_listener_impl.cc +++ b/source/common/network/base_listener_impl.cc @@ -15,10 +15,6 @@ namespace Envoy { namespace Network { -Address::InstanceConstSharedPtr BaseListenerImpl::getLocalAddress(os_fd_t fd) { - return SocketInterfaceSingleton::get().addressFromFd(fd); -} - BaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket) : local_address_(nullptr), dispatcher_(dispatcher), socket_(std::move(socket)) { const auto ip = socket_->localAddress()->ip(); diff --git a/source/common/network/base_listener_impl.h b/source/common/network/base_listener_impl.h index 806789ad3535..878136a4fe29 100644 --- a/source/common/network/base_listener_impl.h +++ b/source/common/network/base_listener_impl.h @@ -23,8 +23,6 @@ class BaseListenerImpl : public virtual Listener { BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket); protected: - virtual Address::InstanceConstSharedPtr getLocalAddress(os_fd_t fd); - Address::InstanceConstSharedPtr local_address_; Event::DispatcherImpl& dispatcher_; const SocketSharedPtr socket_; diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index b0ac43924963..4e0c5534c19d 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -387,13 +387,83 @@ Api::SysCallIntResult IoSocketHandleImpl::getOption(int level, int optname, void return Api::OsSysCallsSingleton::get().getsockopt(fd_, level, optname, optval, optlen); } -Api::SysCallIntResult IoSocketHandleImpl::getLocalAddress(sockaddr* address, socklen_t* addrlen) { - return Api::OsSysCallsSingleton::get().getsockname(fd_, address, addrlen); -} - Api::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) { return Api::OsSysCallsSingleton::get().setsocketblocking(fd_, blocking); } +absl::optional IoSocketHandleImpl::domain() { + sockaddr_storage addr; + socklen_t len = sizeof(addr); + Api::SysCallIntResult result; + + result = Api::OsSysCallsSingleton::get().getsockname( + fd_, reinterpret_cast(&addr), &len); + + if (result.rc_ == 0) { + return {addr.ss_family}; + } + + return absl::nullopt; +} + +Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { + sockaddr_storage ss; + socklen_t ss_len = sizeof(ss); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, + strerror(result.errno_))); + } + int socket_v6only = 0; + if (ss.ss_family == AF_INET6) { + socklen_t size_int = sizeof(socket_v6only); + result = os_sys_calls.getsockopt(fd_, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); +#ifdef WIN32 + // On Windows, it is possible for this getsockopt() call to fail. + // This can happen if the address we are trying to connect to has nothing + // listening. So we can't use RELEASE_ASSERT and instead must throw an + // exception + if (SOCKET_FAILURE(result.rc_)) { + throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd_, result.errno_, + strerror(result.errno_))); + } +#else + RELEASE_ASSERT(result.rc_ == 0, ""); +#endif + } + return Address::addressFromSockAddr(ss, ss_len, socket_v6only); +} + +Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { + sockaddr_storage ss; + socklen_t ss_len = sizeof ss; + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + Api::SysCallIntResult result = + os_sys_calls.getpeername(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getpeername failed for '{}': {}", fd_, strerror(result.errno_))); + } +#ifdef __APPLE__ + if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) +#else + if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) +#endif + { + // For Unix domain sockets, can't find out the peer name, but it should match our own + // name for the socket (i.e. the path should match, barring any namespace or other + // mechanisms to hide things, of which there are many). + ss_len = sizeof ss; + result = os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); + if (result.rc_ != 0) { + throw EnvoyException( + fmt::format("getsockname failed for '{}': {}", fd_, strerror(result.errno_))); + } + } + return Address::addressFromSockAddr(ss, ss_len); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 9f9a563aabe8..98fcad7c5ccd 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -51,8 +51,10 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable domain() override; + Address::InstanceConstSharedPtr localAddress() override; + Address::InstanceConstSharedPtr peerAddress() override; private: // Converts a SysCallSizeResult to IoCallUint64Result. diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index e6a6c4dc3575..31dd923445a4 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -21,14 +21,13 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* int remote_addr_len, void* arg) { ListenerImpl* listener = static_cast(arg); - // Create the IoSocketHandleImpl for the fd here. - IoHandlePtr io_handle = std::make_unique(fd); + // Wrap raw socket fd in IoHandle. + IoHandlePtr io_handle = SocketInterfaceSingleton::get().socket(fd); // Get the local address from the new socket if the listener is listening on IP ANY // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case). const Address::InstanceConstSharedPtr& local_address = - listener->local_address_ ? listener->local_address_ - : listener->getLocalAddress(io_handle->fd()); + listener->local_address_ ? listener->local_address_ : io_handle->localAddress(); // The accept() call that filled in remote_addr doesn't fill in more than the sa_family field // for Unix domain sockets; apparently there isn't a mechanism in the kernel to get the @@ -39,7 +38,7 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // IPv4 local_address was created from an IPv6 mapped IPv4 address. const Address::InstanceConstSharedPtr& remote_address = (remote_addr->sa_family == AF_UNIX) - ? SocketInterfaceSingleton::get().peerAddressFromFd(io_handle->fd()) + ? io_handle->peerAddress() : Address::addressFromSockAddr(*reinterpret_cast(remote_addr), remote_addr_len, local_address->ip()->version() == Address::IpVersion::v6); diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 55081d5f3a62..445ea5f57d17 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -22,25 +22,24 @@ SocketImpl::SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address) : io_handle_(std::move(io_handle)), local_address_(local_address) { + if (local_address_ != nullptr) { + addr_type_ = local_address_->type(); + return; + } + // Should not happen but some tests inject -1 fds if (SOCKET_INVALID(io_handle_->fd())) { - addr_type_ = local_address != nullptr ? local_address->type() : Address::Type::Ip; return; } - sockaddr_storage addr; - socklen_t len = sizeof(addr); - Api::SysCallIntResult result; - - result = io_handle_->getLocalAddress(reinterpret_cast(&addr), &len); + auto domain = io_handle_->domain(); // This should never happen in practice but too many tests inject fake fds ... - if (result.rc_ < 0) { - addr_type_ = local_address != nullptr ? local_address->type() : Address::Type::Ip; + if (!domain.has_value()) { return; } - addr_type_ = addr.ss_family == AF_UNIX ? Address::Type::Pipe : Address::Type::Ip; + addr_type_ = *domain == AF_UNIX ? Address::Type::Pipe : Address::Type::Ip; } Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { @@ -71,7 +70,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr bind_result = io_handle_->bind(address->sockAddr(), address->sockAddrLen()); if (bind_result.rc_ == 0 && address->ip()->port() == 0) { - local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); + local_address_ = io_handle_->localAddress(); } return bind_result; } @@ -81,7 +80,7 @@ Api::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->liste Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { auto result = io_handle_->connect(address->sockAddr(), address->sockAddrLen()); if (address->type() == Address::Type::Ip) { - local_address_ = SocketInterfaceSingleton::get().addressFromFd(io_handle_->fd()); + local_address_ = io_handle_->localAddress(); } return result; } @@ -106,21 +105,17 @@ absl::optional SocketImpl::ipVersion() const { if (local_address_ != nullptr) { return local_address_->ip()->version(); } else { -#ifdef SOL_IP - int socket_domain; - socklen_t domain_len = sizeof(socket_domain); - auto result = getSocketOption(SOL_SOCKET, SO_DOMAIN, &socket_domain, &domain_len); - if (result.rc_ != 0) { + auto domain = io_handle_->domain(); + if (!domain.has_value()) { return absl::nullopt; } - if (socket_domain == AF_INET) { + if (*domain == AF_INET) { return Address::IpVersion::v4; - } else if (socket_domain == AF_INET6) { + } else if (*domain == AF_INET6) { return Address::IpVersion::v6; } else { return absl::nullopt; } -#endif } } return absl::nullopt; diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index a682e2d23406..d6d57e82b84b 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -66,6 +66,10 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, return io_handle; } +IoHandlePtr SocketInterfaceImpl::socket(os_fd_t fd) { + return std::make_unique(fd); +} + bool SocketInterfaceImpl::ipFamilySupported(int domain) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); @@ -76,65 +80,6 @@ bool SocketInterfaceImpl::ipFamilySupported(int domain) { return SOCKET_VALID(result.rc_); } -Address::InstanceConstSharedPtr SocketInterfaceImpl::addressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } - int socket_v6only = 0; - if (ss.ss_family == AF_INET6) { - socklen_t size_int = sizeof(socket_v6only); - result = os_sys_calls.getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); -#ifdef WIN32 - // On Windows, it is possible for this getsockopt() call to fail. - // This can happen if the address we are trying to connect to has nothing - // listening. So we can't use RELEASE_ASSERT and instead must throw an - // exception - if (SOCKET_FAILURE(result.rc_)) { - throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd, result.errno_, - strerror(result.errno_))); - } -#else - RELEASE_ASSERT(result.rc_ == 0, ""); -#endif - } - return Address::addressFromSockAddr(ss, ss_len, socket_v6only); -} - -Address::InstanceConstSharedPtr SocketInterfaceImpl::peerAddressFromFd(os_fd_t fd) { - sockaddr_storage ss; - socklen_t ss_len = sizeof ss; - auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - Api::SysCallIntResult result = - os_sys_calls.getpeername(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd, strerror(result.errno_))); - } -#ifdef __APPLE__ - if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) -#else - if (ss_len == sizeof(sa_family_t) && ss.ss_family == AF_UNIX) -#endif - { - // For Unix domain sockets, can't find out the peer name, but it should match our own - // name for the socket (i.e. the path should match, barring any namespace or other - // mechanisms to hide things, of which there are many). - ss_len = sizeof ss; - result = os_sys_calls.getsockname(fd, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { - throw EnvoyException( - fmt::format("getsockname failed for '{}': {}", fd, strerror(result.errno_))); - } - } - return Address::addressFromSockAddr(ss, ss_len); -} - static SocketInterfaceLoader* socket_interface_ = new SocketInterfaceLoader(std::make_unique()); diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index ddef0528d659..88798559844a 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -13,9 +13,8 @@ class SocketInterfaceImpl : public SocketInterface { IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version) override; IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) override; + IoHandlePtr socket(os_fd_t fd) override; bool ipFamilySupported(int domain) override; - Address::InstanceConstSharedPtr addressFromFd(os_fd_t fd) override; - Address::InstanceConstSharedPtr peerAddressFromFd(os_fd_t fd) override; }; using SocketInterfaceSingleton = InjectableSingleton; diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 86b6f092d28f..96cb711fc1e0 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -78,12 +78,16 @@ class QuicIoHandleWrapper : public Network::IoHandle { socklen_t* optlen) override { return io_handle_.getOption(level, optname, optval, optlen); } - Api::SysCallIntResult getLocalAddress(sockaddr* address, socklen_t* addrlen) override { - return io_handle_.getLocalAddress(address, addrlen); - } Api::SysCallIntResult setBlocking(bool blocking) override { return io_handle_.setBlocking(blocking); } + absl::optional domain() override { return io_handle_.domain(); } + Network::Address::InstanceConstSharedPtr localAddress() override { + return io_handle_.localAddress(); + } + Network::Address::InstanceConstSharedPtr peerAddress() override { + return io_handle_.peerAddress(); + } private: Network::IoHandle& io_handle_; diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index a2ea3f0645d6..b19ba3664fb1 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -153,8 +153,6 @@ TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); - EXPECT_CALL(listener, getLocalAddress(_)).WillOnce(Return(local_dst_address)); - StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { @@ -199,11 +197,6 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); - EXPECT_CALL(listener, getLocalAddress(_)) - .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return SocketInterfaceSingleton::get().addressFromFd(fd); - })); - StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); EXPECT_CALL(listener_callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { @@ -249,10 +242,6 @@ TEST_P(ListenerImplTest, DisableAndEnableListener) { // When the listener is re-enabled, the pending connection should be accepted. listener.enable(); - EXPECT_CALL(listener, getLocalAddress(_)) - .WillOnce(Invoke([](os_fd_t fd) -> Address::InstanceConstSharedPtr { - return SocketInterfaceSingleton::get().addressFromFd(fd); - })); EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce(Invoke([&](ConnectionSocketPtr&) -> void { client_connection->close(ConnectionCloseType::NoFlush); })); diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index 63d2aef7b392..4357d75edeae 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -57,6 +57,26 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_CALL(os_sys_calls_, sendmsg(fd, _, 0)).WillOnce(Return(Api::SysCallSizeResult{5u, 0})); wrapper_->sendmsg(&slice, 1, 0, /*self_ip=*/nullptr, *addr); + EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); + wrapper_->domain(); + + EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)).WillOnce(Return(0)); + EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)) + .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult { + addr->sa_family = AF_INET6; + *addrlen = sizeof(sockaddr_in6); + return Api::SysCallIntResult{0, 0}; + })); + addr = wrapper_->localAddress(); + + EXPECT_CALL(os_sys_calls_, getpeername(_, _, _)) + .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult { + addr->sa_family = AF_INET6; + *addrlen = sizeof(sockaddr_in6); + return Api::SysCallIntResult{0, 0}; + })); + addr = wrapper_->peerAddress(); + Network::IoHandle::RecvMsgOutput output(1, nullptr); EXPECT_CALL(os_sys_calls_, recvmsg(fd, _, 0)).WillOnce(Invoke([](os_fd_t, msghdr* msg, int) { sockaddr_storage ss; diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 53700f823789..bfd84dfedf2e 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -36,8 +36,10 @@ class MockIoHandle : public IoHandle { (int level, int optname, const void* optval, socklen_t optlen)); MOCK_METHOD(Api::SysCallIntResult, getOption, (int level, int optname, void* optval, socklen_t* optlen)); - MOCK_METHOD(Api::SysCallIntResult, getLocalAddress, (sockaddr * address, socklen_t* addrlen)); MOCK_METHOD(Api::SysCallIntResult, setBlocking, (bool blocking)); + MOCK_METHOD(absl::optional, domain, ()); + MOCK_METHOD(Address::InstanceConstSharedPtr, localAddress, ()); + MOCK_METHOD(Address::InstanceConstSharedPtr, peerAddress, ()); }; } // namespace Network From c33e1a4f2882e7494bee424aec8a1dba8671b556 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Tue, 16 Jun 2020 15:56:54 -0700 Subject: [PATCH 367/909] docs: various small changes (#11548) Note need to install a sufficiently recent libstdc++ version when setting up clang builds. For example, Ubuntu gcc packages install libstdc++, but depending on the gcc version libstdc++ might not be new enough to compile Envoy. Additional Description: c.f., https://envoyproxy.slack.com/archives/C7E6C71QB/p1586307917037300 Risk Level: low, docs only Testing: n/a Docs Changes: yes Release Notes: n/a Signed-off-by: Stephan Zuercher --- RELEASES.md | 2 +- .../v2/http_connection_manager.proto | 6 +++--- .../v3/http_connection_manager.proto | 8 ++++---- .../v4alpha/http_connection_manager.proto | 8 ++++---- bazel/README.md | 7 ++++++- .../v2/http_connection_manager.proto | 6 +++--- .../v3/http_connection_manager.proto | 8 ++++---- .../v4alpha/http_connection_manager.proto | 8 ++++---- 8 files changed, 29 insertions(+), 24 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index d76b3fe7981f..a954e376e473 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -60,7 +60,7 @@ deadline of 3 weeks. |:-------:|:----------:|:----------:|:----------:|:-----------:| | 1.12.0 | 2019/09/30 | 2019/10/31 | +31 days | 2020/10/31 | | 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 | -| 1.14.0 | 2020/03/31 | | | | +| 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 | | 1.15.0 | 2020/06/30 | | | | | 1.16.0 | 2020/09/30 | | | | | 1.17.0 | 2020/12/31 | | | | diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 742e5584befe..4db4af690490 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -487,17 +487,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 355eaba01e93..598f9aa62068 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -490,17 +490,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -525,7 +525,7 @@ message HttpConnectionManager { // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec ` and is provided for convenience. + // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index f5e6619dee33..bf303d549712 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -490,17 +490,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -525,7 +525,7 @@ message HttpConnectionManager { // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec ` and is provided for convenience. + // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; } diff --git a/bazel/README.md b/bazel/README.md index 435c5b29d212..f278cba98a9b 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -80,6 +80,11 @@ for how to update or override dependencies. echo "build --config=clang" >> user.bazelrc ``` + Note: Either `libc++` or `libstdc++-7-dev` (or higher) must be installed. These are typically + available via a package manager, but may not be available in default repositories depending on + OS version. To build against `libc++` build with the `--config=libc++` instead of the + `--config=clang` flag. + ### macOS On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` @@ -119,7 +124,7 @@ for how to update or override dependencies. set PATH=%PATH%;%USERPROFILE%\Python38\Scripts pip install wheel ``` - + [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019): For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload. You may also download Visual Studio 2019 and use the Build Tools packaged with that diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 742e5584befe..4db4af690490 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -487,17 +487,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 1362850f0530..1ebec4a8ff55 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -492,17 +492,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -527,7 +527,7 @@ message HttpConnectionManager { // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec ` and is provided for convenience. + // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index f5e6619dee33..bf303d549712 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -490,17 +490,17 @@ message HttpConnectionManager { // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison ` + // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform - // `case normalization ` + // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec ` and is provided for convenience. + // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as @@ -525,7 +525,7 @@ message HttpConnectionManager { // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec ` and is provided for convenience. + // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; } From ad9245b99abbcd74b67b4d37006446cbbb4f9f54 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 16 Jun 2020 19:37:36 -0400 Subject: [PATCH 368/909] remove local shadow variables and manual clearing of parent status. (#11606) Signed-off-by: Joshua Marantz Commit Message: Remove superfluous shadow gauge values from the health-checker, as tracking child-values separately is now done in all gauges, so that parent contributions can be erased when the parents exit. Additional Description: Risk Level: medium -- semantics are not super-clear to me but I think this looks right. Testing: //test/... Docs Changes: n/a Release Notes: n/a --- .../upstream/health_checker_base_impl.cc | 35 +++---------------- .../upstream/health_checker_base_impl.h | 3 -- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index 7146b20bb3d2..691bc12552cf 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -78,17 +78,9 @@ HealthCheckerImplBase::~HealthCheckerImplBase() { } } -void HealthCheckerImplBase::decHealthy() { - ASSERT(local_process_healthy_ > 0); - local_process_healthy_--; - refreshHealthyStat(); -} +void HealthCheckerImplBase::decHealthy() { stats_.healthy_.sub(1); } -void HealthCheckerImplBase::decDegraded() { - ASSERT(local_process_degraded_ > 0); - local_process_degraded_--; - refreshHealthyStat(); -} +void HealthCheckerImplBase::decDegraded() { stats_.degraded_.sub(1); } HealthCheckerStats HealthCheckerImplBase::generateStats(Stats::Scope& scope) { std::string prefix("health_check."); @@ -96,15 +88,9 @@ HealthCheckerStats HealthCheckerImplBase::generateStats(Stats::Scope& scope) { POOL_GAUGE_PREFIX(scope, prefix))}; } -void HealthCheckerImplBase::incHealthy() { - local_process_healthy_++; - refreshHealthyStat(); -} +void HealthCheckerImplBase::incHealthy() { stats_.healthy_.add(1); } -void HealthCheckerImplBase::incDegraded() { - local_process_degraded_++; - refreshHealthyStat(); -} +void HealthCheckerImplBase::incDegraded() { stats_.degraded_.add(1); } std::chrono::milliseconds HealthCheckerImplBase::interval(HealthState state, HealthTransition changed_state) const { @@ -187,20 +173,7 @@ void HealthCheckerImplBase::onClusterMemberUpdate(const HostVector& hosts_added, } } -void HealthCheckerImplBase::refreshHealthyStat() { - // Each hot restarted process health checks independently. To make the stats easier to read, - // we assume that both processes will converge and the last one that writes wins for the host. - stats_.healthy_.set(local_process_healthy_); - stats_.degraded_.set(local_process_degraded_); -} - void HealthCheckerImplBase::runCallbacks(HostSharedPtr host, HealthTransition changed_state) { - // When a parent process shuts down, it will kill all of the active health checking sessions, - // which will decrement the healthy count and the healthy stat in the parent. If the child is - // stable and does not update, the healthy stat will be wrong. This routine is called any time - // any HC happens against a host so just refresh the healthy stat here so that it is correct. - refreshHealthyStat(); - for (const HostStatusCb& cb : callbacks_) { cb(host, changed_state); } diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index a69765fbbd0f..2da51007f0df 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -135,7 +135,6 @@ class HealthCheckerImplBase : public HealthChecker, std::chrono::milliseconds intervalWithJitter(uint64_t base_time_ms, std::chrono::milliseconds interval_jitter) const; void onClusterMemberUpdate(const HostVector& hosts_added, const HostVector& hosts_removed); - void refreshHealthyStat(); void runCallbacks(HostSharedPtr host, HealthTransition changed_state); void setUnhealthyCrossThread(const HostSharedPtr& host); static std::shared_ptr @@ -155,8 +154,6 @@ class HealthCheckerImplBase : public HealthChecker, const std::chrono::milliseconds unhealthy_edge_interval_; const std::chrono::milliseconds healthy_edge_interval_; std::unordered_map active_sessions_; - uint64_t local_process_healthy_{}; - uint64_t local_process_degraded_{}; const std::shared_ptr transport_socket_options_; const MetadataConstSharedPtr transport_socket_match_metadata_; }; From 3151220aa4815bc3e9f3097c06a84b8d6e32e4c1 Mon Sep 17 00:00:00 2001 From: Taras Date: Wed, 17 Jun 2020 09:37:23 -0400 Subject: [PATCH 369/909] alts: add explicit grpc alts version for new connections open by alts extension (#11539) This PR will fix incompatibility of envoy ALTS extension with grpc-go that explicitly requires match of RPC versions when establishing ALTS connection. https://github.com/grpc/grpc-go/blob/399ae780646dfdf73dac136ddef0db066199ead9/credentials/alts/alts.go#L216 Currently not setting rpc version in ALTS request makes it impossible to use envoy to proxy grpc-go requests. Signed-off-by: Taras Galkovskyi --- bazel/repositories.bzl | 10 ++ .../transport_sockets/alts/config.cc | 10 ++ .../transport_sockets/alts/grpc_tsi.h | 2 + test/extensions/transport_sockets/alts/BUILD | 2 + .../alts/alts_integration_test.cc | 97 ++++++++++++++++++- 5 files changed, 118 insertions(+), 3 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 6d104839e91a..baf22ea109dd 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -739,6 +739,16 @@ def _com_github_grpc_grpc(): actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:fake_handshaker_lib", ) + native.bind( + name = "grpc_alts_handshaker_proto", + actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:handshaker_proto", + ) + + native.bind( + name = "grpc_alts_transport_security_common_proto", + actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:transport_security_common_proto", + ) + def _upb(): _repository_impl( name = "upb", diff --git a/source/extensions/transport_sockets/alts/config.cc b/source/extensions/transport_sockets/alts/config.cc index 1b9514162463..c45e7f0a9ee1 100644 --- a/source/extensions/transport_sockets/alts/config.cc +++ b/source/extensions/transport_sockets/alts/config.cc @@ -26,6 +26,15 @@ using GrpcAltsCredentialsOptionsPtr = namespace { +// TODO: gRPC v1.30.0-pre1 defines the equivalent function grpc_alts_set_rpc_protocol_versions +// that should be called directly when available. +void grpcAltsSetRpcProtocolVersions(grpc_gcp_rpc_protocol_versions* rpc_versions) { + grpc_gcp_rpc_protocol_versions_set_max(rpc_versions, GRPC_PROTOCOL_VERSION_MAX_MAJOR, + GRPC_PROTOCOL_VERSION_MAX_MINOR); + grpc_gcp_rpc_protocol_versions_set_min(rpc_versions, GRPC_PROTOCOL_VERSION_MIN_MAJOR, + GRPC_PROTOCOL_VERSION_MIN_MINOR); +} + // Returns true if the peer's service account is found in peers, otherwise // returns false and fills out err with an error message. bool doValidate(const tsi_peer& peer, const std::unordered_set& peers, @@ -108,6 +117,7 @@ Network::TransportSocketFactoryPtr createTransportSocketFactoryHelper( } else { options = GrpcAltsCredentialsOptionsPtr(grpc_alts_credentials_server_options_create()); } + grpcAltsSetRpcProtocolVersions(&options->rpc_versions); const char* target_name = is_upstream ? "" : nullptr; tsi_handshaker* handshaker = nullptr; // Specifying target name as empty since TSI won't take care of validating peer identity diff --git a/source/extensions/transport_sockets/alts/grpc_tsi.h b/source/extensions/transport_sockets/alts/grpc_tsi.h index de36141e87dd..d07cd8d57fb2 100644 --- a/source/extensions/transport_sockets/alts/grpc_tsi.h +++ b/source/extensions/transport_sockets/alts/grpc_tsi.h @@ -11,8 +11,10 @@ #endif #include "grpc/grpc_security.h" +#include "src/core/lib/transport/transport.h" #include "src/core/tsi/alts/handshaker/alts_shared_resource.h" #include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" #include "src/core/tsi/transport_security_grpc.h" #include "src/core/tsi/transport_security_interface.h" diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index d1232d178908..386c25ace615 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -75,6 +75,8 @@ envoy_extension_cc_test( extension_name = "envoy.transport_sockets.alts", external_deps = [ "grpc_alts_fake_handshaker_server", + "grpc_alts_handshaker_proto", + "grpc_alts_transport_security_common_proto", ], tags = ["fails_on_windows"], deps = [ diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index 587fd3b8e490..8075ae85fdda 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -5,7 +5,18 @@ #include "extensions/transport_sockets/alts/config.h" +#ifdef major +#undef major +#endif +#ifdef minor +#undef minor +#endif + #include "test/core/tsi/alts/fake_handshaker/fake_handshaker_server.h" +#include "test/core/tsi/alts/fake_handshaker/handshaker.grpc.pb.h" +#include "test/core/tsi/alts/fake_handshaker/handshaker.pb.h" +#include "test/core/tsi/alts/fake_handshaker/transport_security_common.pb.h" + #include "test/integration/http_integration.h" #include "test/integration/integration.h" #include "test/integration/server.h" @@ -29,16 +40,55 @@ namespace TransportSockets { namespace Alts { namespace { +// Fake handshaker message, copied from grpc::gcp::FakeHandshakerService implementation. +constexpr char kClientInitFrame[] = "ClientInit"; + +// Hollowed out implementation of HandshakerService that is dysfunctional, but +// responds correctly to the first client request, capturing client and server +// ALTS versions in the process. +class CapturingHandshakerService : public grpc::gcp::HandshakerService::Service { +public: + CapturingHandshakerService() = default; + + grpc::Status + DoHandshake(grpc::ServerContext*, + grpc::ServerReaderWriter* stream) + override { + grpc::gcp::HandshakerReq request; + grpc::gcp::HandshakerResp response; + while (stream->Read(&request)) { + if (request.has_client_start()) { + client_versions = request.client_start().rpc_versions(); + // Sets response to make first request successful. + response.set_out_frames(kClientInitFrame); + response.set_bytes_consumed(0); + response.mutable_status()->set_code(grpc::StatusCode::OK); + } else if (request.has_server_start()) { + server_versions = request.server_start().rpc_versions(); + response.mutable_status()->set_code(grpc::StatusCode::CANCELLED); + } + stream->Write(response); + request.Clear(); + } + return grpc::Status::OK; + } + + // Storing client and server RPC versions for later verification. + grpc::gcp::RpcProtocolVersions client_versions; + grpc::gcp::RpcProtocolVersions server_versions; +}; + class AltsIntegrationTestBase : public testing::TestWithParam, public HttpIntegrationTest { public: AltsIntegrationTestBase(const std::string& server_peer_identity, const std::string& client_peer_identity, bool server_connect_handshaker, - bool client_connect_handshaker) + bool client_connect_handshaker, bool capturing_handshaker = false) : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()), server_peer_identity_(server_peer_identity), client_peer_identity_(client_peer_identity), server_connect_handshaker_(server_connect_handshaker), - client_connect_handshaker_(client_connect_handshaker) {} + client_connect_handshaker_(client_connect_handshaker), + capturing_handshaker_(capturing_handshaker) {} void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -60,7 +110,14 @@ class AltsIntegrationTestBase : public testing::TestWithParamthreadFactory().createThread([this]() { - std::unique_ptr service = grpc::gcp::CreateFakeHandshakerService(); + std::unique_ptr service; + if (capturing_handshaker_) { + capturing_handshaker_service_ = new CapturingHandshakerService(); + service = std::unique_ptr{capturing_handshaker_service_}; + } else { + capturing_handshaker_service_ = nullptr; + service = grpc::gcp::CreateFakeHandshakerService(); + } std::string server_address = Network::Test::getLoopbackAddressUrlString(version_) + ":0"; grpc::ServerBuilder builder; @@ -143,6 +200,8 @@ class AltsIntegrationTestBase : public testing::TestWithParamconnected()); } +class AltsIntegrationTestCapturingHandshaker : public AltsIntegrationTestBase { +public: + AltsIntegrationTestCapturingHandshaker() + : AltsIntegrationTestBase("", "", + /* server_connect_handshaker */ true, + /* client_connect_handshaker */ true, + /* capturing_handshaker */ true) {} +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestCapturingHandshaker, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Verifies that handshake request should include ALTS version. +TEST_P(AltsIntegrationTestCapturingHandshaker, CheckAltsVersion) { + initialize(); + codec_client_ = makeRawHttpConnection(makeAltsConnection()); + EXPECT_FALSE(codec_client_->connected()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().major(), + capturing_handshaker_service_->server_versions.max_rpc_version().major()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().minor(), + capturing_handshaker_service_->server_versions.max_rpc_version().minor()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().major(), + capturing_handshaker_service_->server_versions.min_rpc_version().major()); + EXPECT_EQ(capturing_handshaker_service_->client_versions.min_rpc_version().minor(), + capturing_handshaker_service_->server_versions.min_rpc_version().minor()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().major()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.max_rpc_version().minor()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().major()); + EXPECT_NE(0, capturing_handshaker_service_->client_versions.min_rpc_version().minor()); +} + } // namespace } // namespace Alts } // namespace TransportSockets From ddb5187840fc3f8a7f3fdecaf1d4497d3fedacdc Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 17 Jun 2020 12:58:54 -0400 Subject: [PATCH 370/909] conn_pool: unifying drain interfaces (#11615) Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Part of #11529 Signed-off-by: Alyssa Wilk --- include/envoy/common/BUILD | 4 ++++ include/envoy/common/conn_pool.h | 36 ++++++++++++++++++++++++++++++++ include/envoy/http/conn_pool.h | 28 +------------------------ include/envoy/tcp/conn_pool.h | 30 +------------------------- 4 files changed, 42 insertions(+), 56 deletions(-) diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index 4d427383bf21..30a931976d55 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -22,6 +22,10 @@ envoy_basic_cc_library( envoy_cc_library( name = "conn_pool_interface", hdrs = ["conn_pool.h"], + deps = [ + "//include/envoy/event:deferred_deletable", + "//include/envoy/upstream:upstream_interface", + ], ) envoy_cc_library( diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index 0079afe2f7d5..7c22f3a43c63 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -1,6 +1,8 @@ #pragma once #include "envoy/common/pure.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/upstream/upstream.h" namespace Envoy { namespace ConnectionPool { @@ -34,6 +36,40 @@ class Cancellable { virtual void cancel(CancelPolicy cancel_policy) PURE; }; +/* + * An instance of a generic connection pool. + */ +class Instance : public Event::DeferredDeletable { +public: + ~Instance() override = default; + + /** + * Called when a connection pool has been drained of pending requests, busy connections, and + * ready connections. + */ + using DrainedCb = std::function; + + /** + * Register a callback that gets called when the connection pool is fully drained. No actual + * draining is done. The owner of the connection pool is responsible for not creating any + * new streams. + */ + virtual void addDrainedCallback(DrainedCb cb) PURE; + + /** + * Actively drain all existing connection pool connections. This method can be used in cases + * where the connection pool is not being destroyed, but the caller wishes to make sure that + * all new streams take place on a new connection. For example, when a health check failure + * occurs. + */ + virtual void drainConnections() PURE; + + /** + * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled. + */ + virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; +}; + enum class PoolFailureReason { // A resource overflowed and policy prevented a new connection from being created. Overflow, diff --git a/include/envoy/http/conn_pool.h b/include/envoy/http/conn_pool.h index ebb5bf363492..ef0d1a4e98d7 100644 --- a/include/envoy/http/conn_pool.h +++ b/include/envoy/http/conn_pool.h @@ -48,7 +48,7 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Event::DeferredDeletable { +class Instance : public Envoy::ConnectionPool::Instance { public: ~Instance() override = default; @@ -57,27 +57,6 @@ class Instance : public Event::DeferredDeletable { */ virtual Http::Protocol protocol() const PURE; - /** - * Called when a connection pool has been drained of pending requests, busy connections, and - * ready connections. - */ - using DrainedCb = std::function; - - /** - * Register a callback that gets called when the connection pool is fully drained. No actual - * draining is done. The owner of the connection pool is responsible for not creating any - * new streams. - */ - virtual void addDrainedCallback(DrainedCb cb) PURE; - - /** - * Actively drain all existing connection pool connections. This method can be used in cases - * where the connection pool is not being destroyed, but the caller wishes to make sure that - * all new streams take place on a new connection. For example, when a health check failure - * occurs. - */ - virtual void drainConnections() PURE; - /** * Determines whether the connection pool is actively processing any requests. * @return true if the connection pool has any pending requests or any active requests. @@ -101,11 +80,6 @@ class Instance : public Event::DeferredDeletable { */ virtual Cancellable* newStream(Http::ResponseDecoder& response_decoder, Callbacks& callbacks) PURE; - - /** - * @return Upstream::HostDescriptionConstSharedPtr the host for which connections are pooled. - */ - virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; using InstancePtr = std::unique_ptr; diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index e99312ff1d6d..e5679abb25ec 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -126,31 +126,8 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Event::DeferredDeletable { +class Instance : public Envoy::ConnectionPool::Instance { public: - ~Instance() override = default; - - /** - * Called when a connection pool has been drained of pending requests, busy connections, and - * ready connections. - */ - using DrainedCb = std::function; - - /** - * Register a callback that gets called when the connection pool is fully drained. No actual - * draining is done. The owner of the connection pool is responsible for not creating any - * new connections. - */ - virtual void addDrainedCallback(DrainedCb cb) PURE; - - /** - * Actively drain all existing connection pool connections. This method can be used in cases - * where the connection pool is not being destroyed, but the caller wishes to make sure that - * all new requests take place on a new connection. For example, when a health check failure - * occurs. - */ - virtual void drainConnections() PURE; - /** * Immediately close all existing connection pool connections. This method can be used in cases * where the connection pool is not being destroyed, but the caller wishes to terminate all @@ -170,11 +147,6 @@ class Instance : public Event::DeferredDeletable { * should be done by resetting the connection. */ virtual Cancellable* newConnection(Callbacks& callbacks) PURE; - - /** - * @return the description of the host this connection pool is for. - */ - virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; using InstancePtr = std::unique_ptr; From 291cb8ce3f495ce8e254ebbf4e2c9203ebac7dc5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 17 Jun 2020 13:33:40 -0400 Subject: [PATCH 371/909] docs: adding a connect matcher faq (#11614) Risk Level: n/a (docs) Testing: n/a (docs) Docs Changes: yes Release Notes: no Fixes #11552 Signed-off-by: Alyssa Wilk --- .../faq/debugging/why_is_envoy_404ing_connect_requests.rst | 6 ++++++ docs/root/faq/overview.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst diff --git a/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst b/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst new file mode 100644 index 000000000000..ef7ce6fddfbb --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_404ing_connect_requests.rst @@ -0,0 +1,6 @@ +.. _faq_why_is_envoy_404ing_connect_requests: + +Why is Envoy sending 404s to CONNECT requests? +============================================== + +Envoy's default matchers match based on host and path. Because CONNECT requests (generally) do not have a path, most matchers will fail to match CONNECT requests, and Envoy will send a 404 because the route is not found. The solution for HTTP/1.1 CONNECT requests, is to use a :ref:`connect_matcher ` as described in the CONNECT section of the :ref:`upgrade documentation`. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 48267fb00684..47156f7cd70e 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -33,6 +33,7 @@ Debugging :maxdepth: 2 debugging/why_is_envoy_sending_internal_responses + debugging/why_is_envoy_404ing_connect_requests debugging/why_is_envoy_sending_413s debugging/why_is_my_route_not_found From a13bce894c229847a78f41569cacea48c93e50bf Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Thu, 18 Jun 2020 02:36:35 +0900 Subject: [PATCH 372/909] docs: fix description of flow control (#11612) Risk Level: Low Docs Changes: This pull request fixes typos and the descriptions about calling readDisable on high/low watermarks. Signed-off-by: tomocy --- source/docs/flow_control.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/source/docs/flow_control.md b/source/docs/flow_control.md index 1bdad5450da3..80f3bd01a161 100644 --- a/source/docs/flow_control.md +++ b/source/docs/flow_control.md @@ -135,7 +135,7 @@ time, it should return `FilterDataStatus::StopIterationAndWatermark` to pause further data processing, which will cause the `ConnectionManagerImpl` to trigger watermark callbacks on behalf of the filter. If a filter can not make forward progress without the complete body, it should return `FilterDataStatus::StopIterationAndBuffer`. -in this case if the `ConnectionManagerImpl` buffers more than the allowed data +In this case if the `ConnectionManagerImpl` buffers more than the allowed data it will return an error downstream: a 413 on the request path, 500 or `resetStream()` on the response path. @@ -165,7 +165,7 @@ And the low watermark path: `StreamDecoderFilterCallback::onDecoderFilterBelowWriteBufferLowWatermark()`. * When `Envoy::Http::ConnectionManagerImpl` receives `onDecoderFilterAboveWriteBufferHighWatermark()` it calls `readDisable(false)` on the downstream - stream to pause data. + stream to resume data. # Encoder filters @@ -192,11 +192,11 @@ The encoder high watermark path for streaming filters is as follows: `DownstreamWatermarkCallbacks::onAboveWriteBufferHighWatermark()` for all filters which registered to receive watermark events * `Envoy::Router::Filter` receives `onAboveWriteBufferHighWatermark()` and calls - `readDisable(false)` on the upstream request. + `readDisable(true)` on the upstream request. The encoder low watermark path for streaming filters is as follows: - * When an instance of `Envoy::Router::StreamEncoderFilter` buffers too much data it should call + * When an instance of `Envoy::Router::StreamEncoderFilter` buffers drains it should call `StreamEncoderFilterCallback::onEncodeFilterBelowWriteBufferLowWatermark()`. * When `Envoy::Http::ConnectionManagerImpl::ActiveStreamEncoderFilter` receives `onEncoderFilterBelowWriteBufferLowWatermark()` it calls @@ -205,7 +205,7 @@ The encoder low watermark path for streaming filters is as follows: `DownstreamWatermarkCallbacks::onBelowWriteBufferLowWatermark()` for all filters which registered to receive watermark events * `Envoy::Router::Filter` receives `onBelowWriteBufferLowWatermark()` and calls - `readDisable(true)` on the upstream request. + `readDisable(false)` on the upstream request. # HTTP and HTTP/2 codec upstream send buffer @@ -394,6 +394,8 @@ watermark path is as follows: From this point the `ConnectionManagerImpl` takes over and the code path is the same as for the HTTP/2 codec downstream send buffer. +The low watermark path is as follows: + * When `Http::Http1::ConnectionImpl::output_buffer_` drains it calls `onOutputBufferBelowLowWatermark()` * Http::Http1::ConnectionImpl::ServerConnectionImpl::onOutputBufferBelowLowWatermark() calls @@ -416,6 +418,8 @@ watermark path is as follows: From this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as for the HTTP/2 codec upstream send buffer. +The low watermark path is as follows: + * When `Http::Http1::ConnectionImpl::output_buffer_` drains it calls `onOutputBufferBelowLowWatermark()` * Http::Http1::ConnectionImpl::ClientConnectionImpl::onOutputBufferBelowLowWatermark() calls From 9aa85a314552bffb60c13a6acfd60317eaa3bc14 Mon Sep 17 00:00:00 2001 From: sschepens Date: Wed, 17 Jun 2020 15:32:37 -0300 Subject: [PATCH 373/909] ads: pause and resume v3 apis (#11300) Pause and resune V3 Api Verions as well as V2 when using ads. Currently only V2 Api is being paused, this causes envoy to send a separate discovery request for every resource on CDS/LDS/SRDS updates. Risk Level: Medium? Testing: Fixes: #11267 Signed-off-by: Sebastian Schepens --- include/envoy/config/grpc_mux.h | 25 +++ source/common/config/grpc_mux_impl.cc | 21 +++ source/common/config/grpc_mux_impl.h | 6 + source/common/config/new_grpc_mux_impl.cc | 21 +++ source/common/config/new_grpc_mux_impl.h | 4 + source/common/router/scoped_rds.cc | 10 +- source/common/upstream/cds_api_impl.cc | 8 +- .../common/upstream/cluster_manager_impl.cc | 25 +-- source/server/lds_api.cc | 12 +- source/server/server.cc | 8 +- test/integration/ads_integration.cc | 80 ++++++--- test/integration/ads_integration.h | 17 +- test/integration/ads_integration_test.cc | 152 +++++++++++++++++- test/integration/integration.h | 44 +++-- test/test_common/utility.cc | 26 +++ 15 files changed, 384 insertions(+), 75 deletions(-) diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 946ee3d13da8..35729b9f7ea9 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -65,6 +65,15 @@ class GrpcMux { */ virtual void pause(const std::string& type_url) PURE; + /** + * Pause discovery requests for given API types. This is useful when we're processing an update + * for LDS or CDS and don't want a flood of updates for RDS or EDS respectively. Discovery + * requests may later be resumed with resume(). + * @param type_urls type URLs corresponding to xDS API, e.g. + * type.googleapis.com/envoy.api.v2.Cluster. + */ + virtual void pause(const std::vector type_urls) PURE; + /** * Resume discovery requests for a given API type. This will send a discovery request if one would * have been sent during the pause. @@ -72,6 +81,14 @@ class GrpcMux { */ virtual void resume(const std::string& type_url) PURE; + /** + * Resume discovery requests for given API types. This will send a discovery request if one would + * have been sent during the pause. + * @param type_urls type URLs corresponding to xDS API e.g. + * type.googleapis.com/envoy.api.v2.Cluster + */ + virtual void resume(const std::vector type_urls) PURE; + /** * Retrieves the current pause state as set by pause()/resume(). * @param type_url type URL corresponding to xDS API, e.g. @@ -80,6 +97,14 @@ class GrpcMux { */ virtual bool paused(const std::string& type_url) const PURE; + /** + * Retrieves the current pause state as set by pause()/resume(). + * @param type_urls type URLs corresponding to xDS API, e.g. + * type.googleapis.com/envoy.api.v2.Cluster + * @return bool whether any of the APIs is paused. + */ + virtual bool paused(const std::vector type_urls) const PURE; + /** * Start a configuration subscription asynchronously for some API type and resources. * @param type_url type URL corresponding to xDS API, e.g. diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index a18e9deced82..6da8cf9e54b7 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -102,6 +102,12 @@ void GrpcMuxImpl::pause(const std::string& type_url) { api_state.paused_ = true; } +void GrpcMuxImpl::pause(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + pause(type_url); + } +} + void GrpcMuxImpl::resume(const std::string& type_url) { ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url); ApiState& api_state = api_state_[type_url]; @@ -115,6 +121,12 @@ void GrpcMuxImpl::resume(const std::string& type_url) { } } +void GrpcMuxImpl::resume(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + resume(type_url); + } +} + bool GrpcMuxImpl::paused(const std::string& type_url) const { auto entry = api_state_.find(type_url); if (entry == api_state_.end()) { @@ -123,6 +135,15 @@ bool GrpcMuxImpl::paused(const std::string& type_url) const { return entry->second.paused_; } +bool GrpcMuxImpl::paused(const std::vector type_urls) const { + for (const auto& type_url : type_urls) { + if (paused(type_url)) { + return true; + } + } + return false; +} + void GrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) { diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 63c39c0994b7..00120fed44f0 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -39,8 +39,11 @@ class GrpcMuxImpl : public GrpcMux, // GrpcMux void pause(const std::string& type_url) override; + void pause(const std::vector type_urls) override; void resume(const std::string& type_url) override; + void resume(const std::vector type_urls) override; bool paused(const std::string& type_url) const override; + bool paused(const std::vector type_urls) const override; GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, SubscriptionCallbacks& callbacks) override; @@ -142,8 +145,11 @@ class NullGrpcMuxImpl : public GrpcMux, public: void start() override {} void pause(const std::string&) override {} + void pause(const std::vector) override {} void resume(const std::string&) override {} + void resume(const std::vector) override {} bool paused(const std::string&) const override { return false; } + bool paused(const std::vector) const override { return false; } GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, SubscriptionCallbacks&) override { diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index c69a14326b09..ff7d2568a921 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -27,15 +27,36 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, void NewGrpcMuxImpl::pause(const std::string& type_url) { pausable_ack_queue_.pause(type_url); } +void NewGrpcMuxImpl::pause(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + pause(type_url); + } +} + void NewGrpcMuxImpl::resume(const std::string& type_url) { pausable_ack_queue_.resume(type_url); trySendDiscoveryRequests(); } +void NewGrpcMuxImpl::resume(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + resume(type_url); + } +} + bool NewGrpcMuxImpl::paused(const std::string& type_url) const { return pausable_ack_queue_.paused(type_url); } +bool NewGrpcMuxImpl::paused(const std::vector type_urls) const { + for (const auto& type_url : type_urls) { + if (paused(type_url)) { + return true; + } + } + return false; +} + void NewGrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats&) { diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index cfc712ad6222..73478991b17f 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -38,8 +38,12 @@ class NewGrpcMuxImpl SubscriptionCallbacks& callbacks) override; void pause(const std::string& type_url) override; + void pause(const std::vector type_urls) override; void resume(const std::string& type_url) override; + void resume(const std::vector type_urls) override; bool paused(const std::string& type_url) const override; + bool paused(const std::vector type_urls) const override; + void onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) override; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index c900d0ead013..a1ecd85c3639 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -236,17 +236,17 @@ void ScopedRdsConfigSubscription::onConfigUpdate( std::unique_ptr resume_rds; // if local init manager is initialized, the parent init manager may have gone away. if (localInitManager().state() == Init::Manager::State::Initialized) { - const auto type_url = Envoy::Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); + const auto type_urls = + Envoy::Config::getAllVersionTypeUrls(); noop_init_manager = std::make_unique(fmt::format("SRDS {}:{}", name_, version_info)); // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. // In the case if factory_context_.init_manager() is uninitialized, RDS is already paused // either by Server init or LDS init. if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->pause(type_url); + factory_context_.clusterManager().adsMux()->pause(type_urls); } - resume_rds = std::make_unique([this, &noop_init_manager, version_info, type_url] { + resume_rds = std::make_unique([this, &noop_init_manager, version_info, type_urls] { // For new RDS subscriptions created after listener warming up, we don't wait for them to // warm up. Init::WatcherImpl noop_watcher( @@ -258,7 +258,7 @@ void ScopedRdsConfigSubscription::onConfigUpdate( // Note in the case of partial acceptance, accepted RDS subscriptions should be started // despite of any error. if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->resume(type_url); + factory_context_.clusterManager().adsMux()->resume(type_urls); } }); } diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 389f4f5f265d..86759a9a3d94 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -67,11 +67,11 @@ void CdsApiImpl::onConfigUpdate( const std::string& system_version_info) { std::unique_ptr maybe_eds_resume; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - cm_.adsMux()->pause(type_url); + const auto type_urls = + Config::getAllVersionTypeUrls(); + cm_.adsMux()->pause(type_urls); maybe_eds_resume = - std::make_unique([this, type_url] { cm_.adsMux()->resume(type_url); }); + std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); } ENVOY_LOG(info, "cds: add {} cluster(s), remove {} cluster(s)", added_resources.size(), diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 8eeb3c7ff99a..d34f12558373 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -149,19 +149,21 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { secondary_init_clusters_.empty()); if (!secondary_init_clusters_.empty()) { if (!started_secondary_initialize_) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); ENVOY_LOG(info, "cm init: initializing secondary clusters"); // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to // avoid double pause ClusterLoadAssignment. - if (cm_.adsMux() == nullptr || cm_.adsMux()->paused(type_url)) { - initializeSecondaryClusters(); - } else { - cm_.adsMux()->pause(type_url); - Cleanup eds_resume([this, type_url] { cm_.adsMux()->resume(type_url); }); - initializeSecondaryClusters(); + std::unique_ptr maybe_eds_resume; + if (cm_.adsMux()) { + const auto type_urls = + Config::getAllVersionTypeUrls(); + if (!cm_.adsMux()->paused(type_urls)) { + cm_.adsMux()->pause(type_urls); + maybe_eds_resume = + std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); + } } + initializeSecondaryClusters(); } return; } @@ -798,13 +800,12 @@ void ClusterManagerImpl::updateClusterCounts() { // signal to ADS to proceed with RDS updates. // If we're in the middle of shutting down (ads_mux_ already gone) then this is irrelevant. if (ads_mux_) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); + const auto type_urls = Config::getAllVersionTypeUrls(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { - ads_mux_->pause(type_url); + ads_mux_->pause(type_urls); } else if (previous_warming > 0 && warming_clusters_.empty()) { - ads_mux_->resume(type_url); + ads_mux_->resume(type_urls); } } cm_stats_.active_clusters_.set(active_clusters_.size()); diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 373956339a96..c767970aa173 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -41,13 +41,13 @@ void LdsApiImpl::onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { - std::unique_ptr maybe_eds_resume; + std::unique_ptr maybe_rds_resume; if (cm_.adsMux()) { - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - cm_.adsMux()->pause(type_url); - maybe_eds_resume = - std::make_unique([this, type_url] { cm_.adsMux()->resume(type_url); }); + const auto type_urls = + Config::getAllVersionTypeUrls(); + cm_.adsMux()->pause(type_urls); + maybe_rds_resume = + std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); } bool any_applied = false; diff --git a/source/server/server.cc b/source/server/server.cc index 246aac405ffe..6b5ea74e0981 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -600,13 +600,13 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch return; } - const auto type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); + const auto type_urls = + Config::getAllVersionTypeUrls(); // Pause RDS to ensure that we don't send any requests until we've // subscribed to all the RDS resources. The subscriptions happen in the init callbacks, // so we pause RDS until we've completed all the callbacks. if (cm.adsMux()) { - cm.adsMux()->pause(type_url); + cm.adsMux()->pause(type_urls); } ENVOY_LOG(info, "all clusters initialized. initializing init manager"); @@ -615,7 +615,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // Now that we're execute all the init callbacks we can resume RDS // as we've subscribed to all the statically defined RDS resources. if (cm.adsMux()) { - cm.adsMux()->resume(type_url); + cm.adsMux()->resume(type_urls); } }); } diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index 9ccc97070f98..dab9cd0125cf 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -20,45 +20,66 @@ using testing::AssertionResult; namespace Envoy { -AdsIntegrationTest::AdsIntegrationTest() +AdsIntegrationTest::AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version) : HttpIntegrationTest( Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + adsIntegrationConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" + : "V3")) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = true; sotw_or_delta_ = sotwOrDelta(); + api_version_ = api_version; } void AdsIntegrationTest::TearDown() { cleanUpXdsConnection(); } +bool AdsIntegrationTest::shouldBoost() { + return api_version_ == envoy::config::core::v3::ApiVersion::V2 ? true : false; +} + envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std::string& name) { - return TestUtility::parseYaml(fmt::format(R"EOF( + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml( + fmt::format(R"EOF( name: {} connect_timeout: 5s type: EDS - eds_cluster_config: {{ eds_config: {{ ads: {{}} }} }} + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} lb_policy: ROUND_ROBIN http2_protocol_options: {{}} )EOF", - name)); + name, api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), + cluster, shouldBoost()); + return cluster; } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) { - return TestUtility::parseYaml(fmt::format(R"EOF( + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml( + fmt::format(R"EOF( name: {} connect_timeout: 5s type: EDS - eds_cluster_config: {{ eds_config: {{ ads: {{}} }} }} + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} lb_policy: MAGLEV )EOF", - name)); + name, api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), + cluster, shouldBoost()); + return cluster; } envoy::config::endpoint::v3::ClusterLoadAssignment AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { - return TestUtility::parseYaml( - fmt::format(R"EOF( + API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; + TestUtility::loadFromYaml(fmt::format(R"EOF( cluster_name: {} endpoints: - lb_endpoints: @@ -68,15 +89,19 @@ AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { address: {} port_value: {} )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), - fake_upstreams_[0]->localAddress()->ip()->port())); + name, Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[0]->localAddress()->ip()->port()), + cluster_load_assignment, shouldBoost()); + return cluster_load_assignment; } envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { - return TestUtility::parseYaml(fmt::format( - R"EOF( + API_NO_BOOST(envoy::config::listener::v3::Listener) listener; + TestUtility::loadFromYaml( + fmt::format( + R"EOF( name: {} address: socket_address: @@ -91,16 +116,22 @@ AdsIntegrationTest::buildListener(const std::string& name, const std::string& ro codec_type: HTTP2 rds: route_config_name: {} - config_source: {{ ads: {{}} }} + config_source: + resource_api_version: {} + ads: {{}} http_filters: [{{ name: envoy.filters.http.router }}] )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), stat_prefix, route_config)); + name, Network::Test::getLoopbackAddressString(ipVersion()), stat_prefix, route_config, + api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), + listener, shouldBoost()); + return listener; } envoy::config::listener::v3::Listener AdsIntegrationTest::buildRedisListener(const std::string& name, const std::string& cluster) { - return TestUtility::parseYaml(fmt::format( - R"EOF( + API_NO_BOOST(envoy::config::listener::v3::Listener) listener; + TestUtility::loadFromYaml(fmt::format( + R"EOF( name: {} address: socket_address: @@ -118,12 +149,16 @@ AdsIntegrationTest::buildRedisListener(const std::string& name, const std::strin catch_all_route: cluster: {} )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), name, cluster)); + name, Network::Test::getLoopbackAddressString(ipVersion()), name, + cluster), + listener, shouldBoost()); + return listener; } envoy::config::route::v3::RouteConfiguration AdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& cluster) { - return TestUtility::parseYaml(fmt::format(R"EOF( + API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route; + TestUtility::loadFromYaml(fmt::format(R"EOF( name: {} virtual_hosts: - name: integration @@ -132,8 +167,9 @@ AdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& - match: {{ prefix: "/" }} route: {{ cluster: {} }} )EOF", - name, - cluster)); + name, cluster), + route, shouldBoost()); + return route; } void AdsIntegrationTest::makeSingleRequest() { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index c9f71fa5234c..665aa48288f3 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -15,16 +15,20 @@ // TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification // work restores it here. namespace Envoy { -static std::string AdsIntegrationConfig(const std::string& api_type) { +static std::string adsIntegrationConfig(const std::string& api_type, + const std::string& api_version = "V2") { // Note: do not use CONSTRUCT_ON_FIRST_USE here! return fmt::format(R"EOF( dynamic_resources: lds_config: + resource_api_version: {1} ads: {{}} cds_config: + resource_api_version: {1} ads: {{}} ads_config: - api_type: {} + transport_api_version: {1} + api_type: {0} set_node_on_first_message_only: false static_resources: clusters: @@ -50,12 +54,13 @@ static std::string AdsIntegrationConfig(const std::string& api_type) { address: 127.0.0.1 port_value: 0 )EOF", - api_type); + api_type, api_version); } class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { public: - AdsIntegrationTest(); + AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version); + AdsIntegrationTest() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V2) {} void TearDown() override; @@ -86,6 +91,10 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::admin::v3::ClustersConfigDump getClustersConfigDump(); envoy::admin::v3::ListenersConfigDump getListenersConfigDump(); envoy::admin::v3::RoutesConfigDump getRoutesConfigDump(); + + envoy::config::core::v3::ApiVersion api_version_; + + bool shouldBoost(); }; } // namespace Envoy diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 5d61f60c7418..84305d188a95 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -594,7 +594,7 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsFailIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( + adsIntegrationConfig( sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; @@ -634,7 +634,7 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsConfigIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( + adsIntegrationConfig( sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; @@ -795,7 +795,7 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam public: AdsClusterFromFileIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - AdsIntegrationConfig( + adsIntegrationConfig( sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; @@ -980,4 +980,150 @@ TEST_P(AdsIntegrationTestWithRtdsAndSecondaryClusters, Basic) { testBasicFlow(); } +// Check if EDS cluster defined in file is loaded before ADS request and used as xDS server +class AdsClusterV3Test : public AdsIntegrationTest { +public: + AdsClusterV3Test() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V3) {} +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV3Test, + DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); + +// Verify CDS is paused during cluster warming. +TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { + initialize(); + + const auto cds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto eds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto lds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto rds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + // Send initial configuration, validate we can process a request. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); + sendDiscoveryResponse( + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); + + sendDiscoveryResponse( + eds_type_url, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "", {}, {}, {})); + sendDiscoveryResponse( + lds_type_url, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"cluster_0"}, {}, {})); + EXPECT_TRUE( + compareDiscoveryRequest(rds_type_url, "", {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + rds_type_url, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "1", {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + EXPECT_FALSE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); + // Send the first warming cluster. + sendDiscoveryResponse( + cds_type_url, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, + {"cluster_0"}, "2", false); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + EXPECT_TRUE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_1"}, + {"warming_cluster_1"}, {"cluster_0"})); + + // Send the second warming cluster. + sendDiscoveryResponse( + cds_type_url, {buildCluster("warming_cluster_2")}, {buildCluster("warming_cluster_2")}, {}, + "3", false); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 2); + // We would've got a Cluster discovery request with version 2 here, had the CDS not been paused. + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_2", "warming_cluster_1"}, + {"warming_cluster_2"}, {})); + + EXPECT_TRUE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); + // Finish warming the clusters. + sendDiscoveryResponse( + eds_type_url, + {buildClusterLoadAssignment("warming_cluster_1"), + buildClusterLoadAssignment("warming_cluster_2")}, + {buildClusterLoadAssignment("warming_cluster_1"), + buildClusterLoadAssignment("warming_cluster_2")}, + {"cluster_0"}, "2", false); + + // Validate that clusters are warmed. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + EXPECT_FALSE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); + + // CDS is resumed and EDS response was acknowledged. + if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { + // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't + // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2 + // ACK goes out, they're both acknowledging version 3. + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); + } + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {"warming_cluster_2", "warming_cluster_1"}, + {}, {})); +} + +// Validates that the initial xDS request batches all resources referred to in static config +TEST_P(AdsClusterV3Test, XdsBatching) { + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + bootstrap.mutable_dynamic_resources()->clear_cds_config(); + bootstrap.mutable_dynamic_resources()->clear_lds_config(); + + auto static_resources = bootstrap.mutable_static_resources(); + static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster")); + static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster2")); + + static_resources->add_listeners()->MergeFrom(buildListener("rds_listener", "route_config")); + static_resources->add_listeners()->MergeFrom(buildListener("rds_listener2", "route_config2")); + }); + + on_server_init_function_ = [this]() { + createXdsConnection(); + ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_)); + xds_stream_->startGrpcStream(); + + const auto eds_type_url = + Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto rds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"eds_cluster2", "eds_cluster"}, + {"eds_cluster2", "eds_cluster"}, {}, true)); + sendDiscoveryResponse( + eds_type_url, + {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, + {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, {}, + "1", false); + + EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "", {"route_config2", "route_config"}, + {"route_config2", "route_config"}, {})); + sendDiscoveryResponse( + rds_type_url, + {buildRouteConfig("route_config2", "eds_cluster2"), + buildRouteConfig("route_config", "dummy_cluster")}, + {buildRouteConfig("route_config2", "eds_cluster2"), + buildRouteConfig("route_config", "dummy_cluster")}, + {}, "1", false); + }; + + initialize(); +} + } // namespace Envoy diff --git a/test/integration/integration.h b/test/integration/integration.h index 0996fa3f8cc0..4b55c14b6a26 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -248,11 +248,12 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendDiscoveryResponse(const std::string& type_url, const std::vector& state_of_the_world, const std::vector& added_or_updated, - const std::vector& removed, const std::string& version) { + const std::vector& removed, const std::string& version, + const bool api_downgrade = true) { if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { - sendSotwDiscoveryResponse(type_url, state_of_the_world, version); + sendSotwDiscoveryResponse(type_url, state_of_the_world, version, api_downgrade); } else { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version); + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, api_downgrade); } } @@ -284,12 +285,16 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendSotwDiscoveryResponse(const std::string& type_url, const std::vector& messages, - const std::string& version) { + const std::string& version, const bool api_downgrade = true) { API_NO_BOOST(envoy::api::v2::DiscoveryResponse) discovery_response; discovery_response.set_version_info(version); discovery_response.set_type_url(type_url); for (const auto& message : messages) { - discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message)); + if (api_downgrade) { + discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message)); + } else { + discovery_response.add_resources()->PackFrom(message); + } } static int next_nonce_counter = 0; discovery_response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); @@ -297,18 +302,21 @@ class BaseIntegrationTest : protected Logger::Loggable { } template - void - sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, - const std::vector& removed, const std::string& version) { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_); + void sendDeltaDiscoveryResponse(const std::string& type_url, + const std::vector& added_or_updated, + const std::vector& removed, + const std::string& version, const bool api_downgrade = true) { + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_, {}, + api_downgrade); } template void sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - FakeStreamPtr& stream, const std::vector& aliases = {}) { - auto response = - createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, aliases); + FakeStreamPtr& stream, const std::vector& aliases = {}, + const bool api_downgrade = true) { + auto response = createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, + aliases, api_downgrade); stream->sendGrpcMessage(response); } @@ -316,7 +324,8 @@ class BaseIntegrationTest : protected Logger::Loggable { envoy::api::v2::DeltaDiscoveryResponse createDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - const std::vector& aliases) { + const std::vector& aliases, + const bool api_downgrade = true) { API_NO_BOOST(envoy::api::v2::DeltaDiscoveryResponse) response; response.set_system_version_info("system_version_info_this_is_a_test"); @@ -324,10 +333,15 @@ class BaseIntegrationTest : protected Logger::Loggable { for (const auto& message : added_or_updated) { auto* resource = response.add_resources(); ProtobufWkt::Any temp_any; - temp_any.PackFrom(API_DOWNGRADE(message)); + if (api_downgrade) { + temp_any.PackFrom(API_DOWNGRADE(message)); + resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); + } else { + temp_any.PackFrom(message); + resource->mutable_resource()->PackFrom(message); + } resource->set_name(TestUtility::xdsResourceName(temp_any)); resource->set_version(version); - resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); for (const auto& alias : aliases) { resource->add_aliases(alias); } diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 9d2678d2fe6b..36e266db8e7c 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -25,6 +25,7 @@ #include "common/common/lock_guard.h" #include "common/common/thread_impl.h" #include "common/common/utility.h" +#include "common/config/resource_name.h" #include "common/filesystem/directory.h" #include "common/filesystem/filesystem_impl.h" #include "common/json/json_loader.h" @@ -221,6 +222,31 @@ std::string TestUtility::xdsResourceName(const ProtobufWkt::Any& resource) { if (resource.type_url() == Config::TypeUrl::get().Runtime) { return TestUtility::anyConvert(resource).name(); } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource) + .cluster_name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } + if (resource.type_url() == Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3)) { + return TestUtility::anyConvert(resource).name(); + } throw EnvoyException( absl::StrCat("xdsResourceName does not know about type URL ", resource.type_url())); } From 83433aea3d9959f866c1794a33e08270c6083094 Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Wed, 17 Jun 2020 14:14:12 -0700 Subject: [PATCH 374/909] admission control: Filter implementation with no-op controller (#11414) Signed-off-by: Tony Allen --- CODEOWNERS | 2 + api/BUILD | 1 + .../v3/adaptive_concurrency.proto | 1 - .../http/admission_control/v3alpha/BUILD | 13 + .../v3alpha/admission_control.proto | 90 ++++++ api/versioning/BUILD | 1 + .../http/http_filters/http_filters.rst | 1 + generated_api_shadow/BUILD | 2 + .../v3/adaptive_concurrency.proto | 1 - .../http/admission_control/v3alpha/BUILD | 13 + .../v3alpha/admission_control.proto | 90 ++++++ source/extensions/extensions_build_config.bzl | 3 + .../filters/http/admission_control/BUILD | 35 +++ .../admission_control/admission_control.cc | 138 ++++++++ .../admission_control/admission_control.h | 115 +++++++ .../http/admission_control/evaluators/BUILD | 26 ++ .../evaluators/response_evaluator.h | 33 ++ .../evaluators/success_criteria_evaluator.cc | 73 +++++ .../evaluators/success_criteria_evaluator.h | 36 +++ .../thread_local_controller.h | 35 +++ .../filters/http/well_known_names.h | 2 + .../filters/http/admission_control/BUILD | 57 ++++ .../admission_control_filter_test.cc | 297 ++++++++++++++++++ .../http/admission_control/config_test.cc | 114 +++++++ .../success_criteria_evaluator_test.cc | 178 +++++++++++ tools/spelling/spelling_dictionary.txt | 2 + 26 files changed, 1357 insertions(+), 2 deletions(-) create mode 100644 api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto create mode 100644 source/extensions/filters/http/admission_control/BUILD create mode 100644 source/extensions/filters/http/admission_control/admission_control.cc create mode 100644 source/extensions/filters/http/admission_control/admission_control.h create mode 100644 source/extensions/filters/http/admission_control/evaluators/BUILD create mode 100644 source/extensions/filters/http/admission_control/evaluators/response_evaluator.h create mode 100644 source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc create mode 100644 source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h create mode 100644 source/extensions/filters/http/admission_control/thread_local_controller.h create mode 100644 test/extensions/filters/http/admission_control/BUILD create mode 100644 test/extensions/filters/http/admission_control/admission_control_filter_test.cc create mode 100644 test/extensions/filters/http/admission_control/config_test.cc create mode 100644 test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index 9de37d0646ad..6038872ce0cc 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -68,6 +68,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/common/aws @lavignes @mattklein123 # adaptive concurrency limit extension. /*/extensions/filters/http/adaptive_concurrency @tonya11en @mattklein123 +# admission control extension. +/*/extensions/filters/http/admission_control @tonya11en @mattklein123 # http inspector /*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan # attribute context diff --git a/api/BUILD b/api/BUILD index e803ebf19244..3bbdd21a64fa 100644 --- a/api/BUILD +++ b/api/BUILD @@ -164,6 +164,7 @@ proto_library( "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", diff --git a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 3d2ef3e96d96..8dd851f4020a 100644 --- a/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.extensions.filters.http.adaptive_concurrency.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; -import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD new file mode 100644 index 000000000000..f139cce54af2 --- /dev/null +++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto new file mode 100644 index 000000000000..6f01c88885f4 --- /dev/null +++ b/api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.admission_control.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_outer_classname = "AdmissionControlProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Admission Control] +// [#extension: envoy.filters.http.admission_control] + +message AdmissionControl { + // Default method of specifying what constitutes a successful request. All status codes that + // indicate a successful request must be explicitly specified if not relying on the default + // values. + message SuccessCriteria { + message HttpCriteria { + // Status code ranges that constitute a successful request. Configurable codes are in the + // range [100, 600). + repeated type.v3.Int32Range http_success_status = 1 + [(validate.rules).repeated = {min_items: 1}]; + } + + message GrpcCriteria { + // Status codes that constitute a successful request. + // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful + // responses. + // + // .. note:: + // + // The default HTTP codes considered successful by the admission controller are done so due + // to the unlikelihood that sending fewer requests would change their behavior (for example: + // redirects, unauthorized access, or bad requests won't be alleviated by sending less + // traffic). + HttpCriteria http_criteria = 1; + + // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, + // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, + // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. + // + // .. note:: + // + // The default gRPC codes that are considered successful by the admission controller are + // chosen because of the unlikelihood that sending fewer requests will change the behavior. + GrpcCriteria grpc_criteria = 2; + } + + // If set to false, the admission control filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + + // Defines how a request is considered a success/failure. + oneof evaluation_criteria { + option (validate.required) = true; + + SuccessCriteria success_criteria = 2; + } + + // The sliding time window over which the success rate is calculated. The window is rounded to the + // nearest second. Defaults to 120s. + google.protobuf.Duration sampling_window = 3; + + // Rejection probability is defined by the formula:: + // + // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // + // The coefficient dictates how aggressively the admission controller will throttle requests as + // the success rate drops. Lower values will cause throttling to kick in at higher success rates + // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the coefficient is 2.0. + config.core.v3.RuntimeDouble aggression_coefficient = 4; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index c26c4a894093..796d8246a31e 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -47,6 +47,7 @@ proto_library( "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index 911034fe13c6..97626448d249 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -41,4 +41,5 @@ HTTP filters .. toctree:: :hidden: + ../../../api-v3/extensions/filters/http/admission_control/v3alpha/admission_control.proto ../../../api-v3/extensions/filters/http/cache/v3alpha/cache.proto diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index 15ac05d10ced..9ae658780070 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -34,6 +34,7 @@ proto_library( "//envoy/config/filter/dubbo/router/v2alpha1:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", + "//envoy/config/filter/http/admission_control/v2alpha:pkg", "//envoy/config/filter/http/buffer/v2:pkg", "//envoy/config/filter/http/compressor/v2:pkg", "//envoy/config/filter/http/cors/v2:pkg", @@ -129,6 +130,7 @@ proto_library( "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", + "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 3d2ef3e96d96..8dd851f4020a 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -5,7 +5,6 @@ package envoy.extensions.filters.http.adaptive_concurrency.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; -import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD new file mode 100644 index 000000000000..f139cce54af2 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto new file mode 100644 index 000000000000..6f01c88885f4 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.admission_control.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/range.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; +option java_outer_classname = "AdmissionControlProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Admission Control] +// [#extension: envoy.filters.http.admission_control] + +message AdmissionControl { + // Default method of specifying what constitutes a successful request. All status codes that + // indicate a successful request must be explicitly specified if not relying on the default + // values. + message SuccessCriteria { + message HttpCriteria { + // Status code ranges that constitute a successful request. Configurable codes are in the + // range [100, 600). + repeated type.v3.Int32Range http_success_status = 1 + [(validate.rules).repeated = {min_items: 1}]; + } + + message GrpcCriteria { + // Status codes that constitute a successful request. + // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful + // responses. + // + // .. note:: + // + // The default HTTP codes considered successful by the admission controller are done so due + // to the unlikelihood that sending fewer requests would change their behavior (for example: + // redirects, unauthorized access, or bad requests won't be alleviated by sending less + // traffic). + HttpCriteria http_criteria = 1; + + // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, + // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, + // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. + // + // .. note:: + // + // The default gRPC codes that are considered successful by the admission controller are + // chosen because of the unlikelihood that sending fewer requests will change the behavior. + GrpcCriteria grpc_criteria = 2; + } + + // If set to false, the admission control filter will operate as a pass-through filter. If the + // message is unspecified, the filter will be enabled. + config.core.v3.RuntimeFeatureFlag enabled = 1; + + // Defines how a request is considered a success/failure. + oneof evaluation_criteria { + option (validate.required) = true; + + SuccessCriteria success_criteria = 2; + } + + // The sliding time window over which the success rate is calculated. The window is rounded to the + // nearest second. Defaults to 120s. + google.protobuf.Duration sampling_window = 3; + + // Rejection probability is defined by the formula:: + // + // max(0, (rq_count - aggression_coefficient * rq_success_count) / (rq_count + 1)) + // + // The coefficient dictates how aggressively the admission controller will throttle requests as + // the success rate drops. Lower values will cause throttling to kick in at higher success rates + // and result in more aggressive throttling. Any values less than 1.0, will be set to 1.0. If the + // message is unspecified, the coefficient is 2.0. + config.core.v3.RuntimeDouble aggression_coefficient = 4; +} diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 21009f9de918..c6341c8cd8dd 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -41,6 +41,9 @@ EXTENSIONS = { # "envoy.filters.http.adaptive_concurrency": "//source/extensions/filters/http/adaptive_concurrency:config", + # NOTE: The admission control filter does not have a proper filter + # implemented right now. We are just referencing the filter lib here. + "envoy.filters.http.admission_control": "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "envoy.filters.http.aws_lambda": "//source/extensions/filters/http/aws_lambda:config", "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD new file mode 100644 index 000000000000..cb4a9975b09b --- /dev/null +++ b/source/extensions/filters/http/admission_control/BUILD @@ -0,0 +1,35 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. +# Public docs: docs/root/configuration/http_filters/admission_control.rst + +envoy_package() + +envoy_cc_extension( + name = "admission_control_filter_lib", + srcs = [ + "admission_control.cc", + ], + hdrs = [ + "admission_control.h", + "thread_local_controller.h", + ], + security_posture = "unknown", + deps = [ + "//include/envoy/http:filter_interface", + "//include/envoy/runtime:runtime_interface", + "//source/common/common:cleanup_lib", + "//source/common/http:codes_lib", + "//source/common/runtime:runtime_lib", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc new file mode 100644 index 000000000000..7953b79c36f1 --- /dev/null +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -0,0 +1,138 @@ +#include "extensions/filters/http/admission_control/admission_control.h" + +#include +#include +#include +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/grpc/status.h" +#include "envoy/http/codes.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/filter_config.h" + +#include "common/common/cleanup.h" +#include "common/common/enum_to_int.h" +#include "common/grpc/common.h" +#include "common/http/codes.h" +#include "common/http/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +using GrpcStatus = Grpc::Status::GrpcStatus; + +static constexpr double defaultAggression = 2.0; + +AdmissionControlFilterConfig::AdmissionControlFilterConfig( + const AdmissionControlProto& proto_config, Runtime::Loader& runtime, TimeSource&, + Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + std::shared_ptr response_evaluator) + : random_(random), scope_(scope), tls_(std::move(tls)), + admission_control_feature_(proto_config.enabled(), runtime), + aggression_( + proto_config.has_aggression_coefficient() + ? std::make_unique(proto_config.aggression_coefficient(), runtime) + : nullptr), + response_evaluator_(std::move(response_evaluator)) {} + +double AdmissionControlFilterConfig::aggression() const { + return std::max(1.0, aggression_ ? aggression_->value() : defaultAggression); +} + +AdmissionControlFilter::AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, + const std::string& stats_prefix) + : config_(std::move(config)), stats_(generateStats(config_->scope(), stats_prefix)), + record_request_(true) {} + +Http::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHeaderMap&, bool) { + // TODO(tonya11en): Ensure we document the fact that healthchecks are ignored. + if (!config_->filterEnabled() || decoder_callbacks_->streamInfo().healthCheck()) { + // We must forego recording the success/failure of this request during encoding. + record_request_ = false; + return Http::FilterHeadersStatus::Continue; + } + + if (shouldRejectRequest()) { + decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, + "denied by admission control"); + stats_.rq_rejected_.inc(); + return Http::FilterHeadersStatus::StopIteration; + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterHeadersStatus AdmissionControlFilter::encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) { + // TODO(tonya11en): It's not possible for an HTTP filter to understand why a stream is reset, so + // we are not currently accounting for resets when recording requests. + + if (!record_request_) { + return Http::FilterHeadersStatus::Continue; + } + + bool successful_response = false; + if (Grpc::Common::isGrpcResponseHeaders(headers, end_stream)) { + absl::optional grpc_status = Grpc::Common::getGrpcStatus(headers); + + // If the GRPC status isn't found in the headers, it must be found in the trailers. + expect_grpc_status_in_trailer_ = !grpc_status.has_value(); + if (expect_grpc_status_in_trailer_) { + return Http::FilterHeadersStatus::Continue; + } + + const uint32_t status = enumToInt(grpc_status.value()); + successful_response = config_->responseEvaluator().isGrpcSuccess(status); + } else { + // HTTP response. + const uint64_t http_status = Http::Utility::getResponseStatus(headers); + successful_response = config_->responseEvaluator().isHttpSuccess(http_status); + } + + if (successful_response) { + recordSuccess(); + } else { + recordFailure(); + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterTrailersStatus +AdmissionControlFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { + if (expect_grpc_status_in_trailer_) { + absl::optional grpc_status = Grpc::Common::getGrpcStatus(trailers, false); + + if (grpc_status.has_value() && + config_->responseEvaluator().isGrpcSuccess(grpc_status.value())) { + recordSuccess(); + } else { + recordFailure(); + } + } + + return Http::FilterTrailersStatus::Continue; +} + +bool AdmissionControlFilter::shouldRejectRequest() const { + const double total = config_->getController().requestTotalCount(); + const double success = config_->getController().requestSuccessCount(); + const double probability = (total - config_->aggression() * success) / (total + 1); + + // Choosing an accuracy of 4 significant figures for the probability. + static constexpr uint64_t accuracy = 1e4; + auto r = config_->random().random(); + return (accuracy * std::max(probability, 0.0)) > (r % accuracy); +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h new file mode 100644 index 000000000000..22edcf539396 --- /dev/null +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/time.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/http/codes.h" +#include "envoy/http/filter.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/filter_config.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/cleanup.h" +#include "common/grpc/common.h" +#include "common/grpc/status.h" +#include "common/http/codes.h" +#include "common/runtime/runtime_protos.h" + +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/thread_local_controller.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * All stats for the admission control filter. + */ +#define ALL_ADMISSION_CONTROL_STATS(COUNTER) COUNTER(rq_rejected) + +/** + * Wrapper struct for admission control filter stats. @see stats_macros.h + */ +struct AdmissionControlStats { + ALL_ADMISSION_CONTROL_STATS(GENERATE_COUNTER_STRUCT) +}; + +using AdmissionControlProto = + envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl; + +/** + * Configuration for the admission control filter. + */ +class AdmissionControlFilterConfig { +public: + AdmissionControlFilterConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, + TimeSource&, Runtime::RandomGenerator& random, Stats::Scope& scope, + ThreadLocal::SlotPtr&& tls, + std::shared_ptr response_evaluator); + virtual ~AdmissionControlFilterConfig() = default; + + virtual ThreadLocalController& getController() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + + Runtime::RandomGenerator& random() const { return random_; } + bool filterEnabled() const { return admission_control_feature_.enabled(); } + Stats::Scope& scope() const { return scope_; } + double aggression() const; + ResponseEvaluator& responseEvaluator() const { return *response_evaluator_; } + +private: + Runtime::RandomGenerator& random_; + Stats::Scope& scope_; + const ThreadLocal::SlotPtr tls_; + Runtime::FeatureFlag admission_control_feature_; + std::unique_ptr aggression_; + std::shared_ptr response_evaluator_; +}; + +using AdmissionControlFilterConfigSharedPtr = std::shared_ptr; + +/** + * A filter that probabilistically rejects requests based on upstream success-rate. + */ +class AdmissionControlFilter : public Http::PassThroughFilter, + Logger::Loggable { +public: + AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, + const std::string& stats_prefix); + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override; + + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override; + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; + +private: + static AdmissionControlStats generateStats(Stats::Scope& scope, const std::string& prefix) { + return {ALL_ADMISSION_CONTROL_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + bool shouldRejectRequest() const; + + void recordSuccess() { config_->getController().recordSuccess(); } + + void recordFailure() { config_->getController().recordFailure(); } + + const AdmissionControlFilterConfigSharedPtr config_; + AdmissionControlStats stats_; + bool expect_grpc_status_in_trailer_; + + // If false, the filter will forego recording a request success or failure during encoding. + bool record_request_; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/BUILD b/source/extensions/filters/http/admission_control/evaluators/BUILD new file mode 100644 index 000000000000..79910a264e7e --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +# HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. + +envoy_package() + +envoy_cc_library( + name = "response_evaluator_lib", + srcs = ["success_criteria_evaluator.cc"], + hdrs = [ + "response_evaluator.h", + "success_criteria_evaluator.h", + ], + visibility = ["//visibility:public"], + deps = [ + "//include/envoy/grpc:status", + "//source/common/common:enum_to_int", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h new file mode 100644 index 000000000000..9915014fdede --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/response_evaluator.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * Determines of a request was successful based on response headers. + */ +class ResponseEvaluator { +public: + virtual ~ResponseEvaluator() = default; + + /** + * Returns true if the provided HTTP code constitutes a success. + */ + virtual bool isHttpSuccess(uint64_t code) const PURE; + + /** + * Returns true if the provided gRPC status counts constitutes a success. + */ + virtual bool isGrpcSuccess(uint32_t status) const PURE; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc new file mode 100644 index 000000000000..6771bfba9a7b --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.cc @@ -0,0 +1,73 @@ +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include + +#include "envoy/common/exception.h" +#include "envoy/grpc/status.h" + +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +SuccessCriteriaEvaluator::SuccessCriteriaEvaluator(const SuccessCriteria& success_criteria) { + // HTTP status. + if (success_criteria.has_http_criteria()) { + for (const auto& range : success_criteria.http_criteria().http_success_status()) { + if (!validHttpRange(range.start(), range.end())) { + throw EnvoyException( + fmt::format("invalid HTTP range: [{}, {})", range.start(), range.end())); + } + + const auto start = static_cast(range.start()); + const auto end = static_cast(range.end()); + http_success_fns_.emplace_back( + [start, end](uint64_t status) { return (start <= status) && (status < end); }); + } + } else { + // We default to all non-5xx codes as successes. + http_success_fns_.emplace_back([](uint64_t status) { return status < 500; }); + } + + // GRPC status. + if (success_criteria.has_grpc_criteria()) { + for (const auto& status : success_criteria.grpc_criteria().grpc_success_status()) { + if (status > 16) { + throw EnvoyException(fmt::format("invalid gRPC code {}", status)); + } + + grpc_success_codes_.emplace_back(status); + } + } else { + grpc_success_codes_ = { + enumToInt(Grpc::Status::WellKnownGrpcStatus::AlreadyExists), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Canceled), + enumToInt(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition), + enumToInt(Grpc::Status::WellKnownGrpcStatus::InvalidArgument), + enumToInt(Grpc::Status::WellKnownGrpcStatus::NotFound), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Ok), + enumToInt(Grpc::Status::WellKnownGrpcStatus::OutOfRange), + enumToInt(Grpc::Status::WellKnownGrpcStatus::PermissionDenied), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unauthenticated), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unimplemented), + enumToInt(Grpc::Status::WellKnownGrpcStatus::Unknown), + }; + } +} + +bool SuccessCriteriaEvaluator::isGrpcSuccess(uint32_t status) const { + return std::count(grpc_success_codes_.begin(), grpc_success_codes_.end(), status) > 0; +} + +bool SuccessCriteriaEvaluator::isHttpSuccess(uint64_t code) const { + return std::any_of(http_success_fns_.begin(), http_success_fns_.end(), + [code](auto fn) { return fn(code); }); +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h new file mode 100644 index 000000000000..511d54408f42 --- /dev/null +++ b/source/extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +class SuccessCriteriaEvaluator : public ResponseEvaluator { +public: + using SuccessCriteria = envoy::extensions::filters::http::admission_control::v3alpha:: + AdmissionControl::SuccessCriteria; + SuccessCriteriaEvaluator(const SuccessCriteria& evaluation_criteria); + // ResponseEvaluator + bool isHttpSuccess(uint64_t code) const override; + bool isGrpcSuccess(uint32_t status) const override; + +private: + bool validHttpRange(const int32_t start, const int32_t end) const { + return start <= end && start < 600 && start >= 100 && end <= 600 && end >= 100; + } + + std::vector> http_success_fns_; + std::vector grpc_success_codes_; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/thread_local_controller.h b/source/extensions/filters/http/admission_control/thread_local_controller.h new file mode 100644 index 000000000000..9b5096b80569 --- /dev/null +++ b/source/extensions/filters/http/admission_control/thread_local_controller.h @@ -0,0 +1,35 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/http/codes.h" +#include "envoy/thread_local/thread_local.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/* + * Thread-local admission controller interface. + */ +class ThreadLocalController { +public: + virtual ~ThreadLocalController() = default; + + // Record success/failure of a request and update the internal state of the controller to reflect + // this. + virtual void recordSuccess() PURE; + virtual void recordFailure() PURE; + + // Returns the current number of recorded requests. + virtual uint32_t requestTotalCount() PURE; + + // Returns the current number of recorded request successes. + virtual uint32_t requestSuccessCount() PURE; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index c3971fa30678..2adb1681701f 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -66,6 +66,8 @@ class HttpFilterNameValues { const std::string Tap = "envoy.filters.http.tap"; // Adaptive concurrency limit filter const std::string AdaptiveConcurrency = "envoy.filters.http.adaptive_concurrency"; + // Admission control filter + const std::string AdmissionControl = "envoy.filters.http.admission_control"; // Original Src Filter const std::string OriginalSrc = "envoy.filters.http.original_src"; // Dynamic forward proxy filter diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD new file mode 100644 index 000000000000..b161f26e16a1 --- /dev/null +++ b/test/extensions/filters/http/admission_control/BUILD @@ -0,0 +1,57 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "admission_control_filter_test", + srcs = ["admission_control_filter_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/common:enum_to_int", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "success_criteria_evaluator_test", + srcs = ["success_criteria_evaluator_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc new file mode 100644 index 000000000000..5aaa6ba1658d --- /dev/null +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -0,0 +1,297 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/grpc/status.h" + +#include "common/common/enum_to_int.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +class MockThreadLocalController : public ThreadLocal::ThreadLocalObject, + public ThreadLocalController { +public: + MOCK_METHOD(uint32_t, requestTotalCount, ()); + MOCK_METHOD(uint32_t, requestSuccessCount, ()); + MOCK_METHOD(void, recordSuccess, ()); + MOCK_METHOD(void, recordFailure, ()); +}; + +class MockResponseEvaluator : public ResponseEvaluator { +public: + MOCK_METHOD(bool, isHttpSuccess, (uint64_t code), (const)); + MOCK_METHOD(bool, isGrpcSuccess, (uint32_t status), (const)); +}; + +class TestConfig : public AdmissionControlFilterConfig { +public: + TestConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, + TimeSource& time_source, Runtime::RandomGenerator& random, Stats::Scope& scope, + ThreadLocal::SlotPtr&& tls, MockThreadLocalController& controller, + std::shared_ptr evaluator) + : AdmissionControlFilterConfig(proto_config, runtime, time_source, random, scope, + std::move(tls), std::move(evaluator)), + controller_(controller) {} + ThreadLocalController& getController() const override { return controller_; } + +private: + MockThreadLocalController& controller_; +}; + +class AdmissionControlTest : public testing::Test { +public: + AdmissionControlTest() = default; + + std::shared_ptr makeConfig(const std::string& yaml) { + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + auto tls = context_.threadLocal().allocateSlot(); + evaluator_ = std::make_shared(); + + return std::make_shared(proto, runtime_, time_system_, random_, scope_, + std::move(tls), controller_, evaluator_); + } + + void setupFilter(std::shared_ptr config) { + filter_ = std::make_shared(config, "test_prefix."); + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + } + + void sampleGrpcRequest(const Grpc::Status::WellKnownGrpcStatus status) { + Http::TestResponseHeaderMapImpl headers{{"content-type", "application/grpc"}, + {"grpc-status", std::to_string(enumToInt(status))}}; + filter_->encodeHeaders(headers, true); + } + + void sampleGrpcRequestTrailer(const Grpc::Status::WellKnownGrpcStatus status) { + Http::TestResponseHeaderMapImpl headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + filter_->encodeHeaders(headers, false); + Http::TestResponseTrailerMapImpl trailers{{"grpc-message", "foo"}, + {"grpc-status", std::to_string(enumToInt(status))}}; + filter_->encodeTrailers(trailers); + } + + void sampleHttpRequest(const std::string& http_error_code) { + Http::TestResponseHeaderMapImpl headers{{":status", http_error_code}}; + filter_->encodeHeaders(headers, true); + } + +protected: + std::string stats_prefix_; + NiceMock runtime_; + NiceMock context_; + Stats::IsolatedStoreImpl scope_; + Event::SimulatedTimeSystem time_system_; + NiceMock random_; + std::shared_ptr filter_; + NiceMock decoder_callbacks_; + NiceMock controller_; + std::shared_ptr evaluator_; + const std::string default_yaml_{R"EOF( +enabled: + default_value: true + runtime_key: "foo.enabled" +sampling_window: 10s +aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"}; +}; + +// Ensure the filter can be disabled/enabled via runtime. +TEST_F(AdmissionControlTest, FilterRuntimeOverride) { + const std::string yaml = R"EOF( +enabled: + default_value: true + runtime_key: "foo.enabled" +sampling_window: 10s +aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + setupFilter(config); + + // "Disable" the filter via runtime. + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo.enabled", true)).WillRepeatedly(Return(false)); + + // The filter is bypassed via runtime. + EXPECT_CALL(controller_, requestTotalCount()).Times(0); + EXPECT_CALL(controller_, requestSuccessCount()).Times(0); + + // We expect no rejections. + Http::RequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + +// Ensure the filter disregards healthcheck traffic. +TEST_F(AdmissionControlTest, DisregardHealthChecks) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + StreamInfo::MockStreamInfo stream_info; + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(testing::ReturnRef(stream_info)); + EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true)); + + // We do not make admission decisions for health checks, so we expect no lookup of request success + // counts. + EXPECT_CALL(controller_, requestTotalCount()).Times(0); + EXPECT_CALL(controller_, requestSuccessCount()).Times(0); + + Http::TestRequestHeaderMapImpl request_headers; + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); +} + +// Validate simple HTTP failure case. +TEST_F(AdmissionControlTest, HttpFailureBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(*evaluator_, isHttpSuccess(500)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleHttpRequest("500"); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple HTTP success case. +TEST_F(AdmissionControlTest, HttpSuccessBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(*evaluator_, isHttpSuccess(200)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleHttpRequest("200"); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +// Validate simple gRPC failure case. +TEST_F(AdmissionControlTest, GrpcFailureBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple gRPC success case with status in the trailer. +TEST_F(AdmissionControlTest, GrpcSuccessBehaviorTrailer) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::Ok); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +// Validate simple gRPC failure case with status in the trailer. +TEST_F(AdmissionControlTest, GrpcFailureBehaviorTrailer) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequestTrailer(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + + // We expect rejection counter to increment upon failure. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 1, time_system_); +} + +// Validate simple gRPC success case. +TEST_F(AdmissionControlTest, GrpcSuccessBehavior) { + auto config = makeConfig(default_yaml_); + setupFilter(config); + + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); + + EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); + + Http::TestRequestHeaderMapImpl request_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + sampleGrpcRequest(Grpc::Status::WellKnownGrpcStatus::Ok); + + // We expect rejection counter to NOT increment upon success. + TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc new file mode 100644 index 000000000000..2201b3c36cb1 --- /dev/null +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -0,0 +1,114 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +class AdmissionControlConfigTest : public testing::Test { +public: + AdmissionControlConfigTest() = default; + + std::shared_ptr makeConfig(const std::string& yaml) { + AdmissionControlProto proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + auto tls = context_.threadLocal().allocateSlot(); + auto evaluator = std::make_unique(proto.success_criteria()); + return std::make_shared( + proto, runtime_, time_system_, random_, scope_, std::move(tls), std::move(evaluator)); + } + +protected: + NiceMock runtime_; + NiceMock context_; + Stats::IsolatedStoreImpl scope_; + Event::SimulatedTimeSystem time_system_; + NiceMock random_; +}; + +// Verify the configuration when all fields are set. +TEST_F(AdmissionControlConfigTest, BasicTestAllConfigured) { + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +aggression_coefficient: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + + EXPECT_FALSE(config->filterEnabled()); + EXPECT_EQ(4.2, config->aggression()); +} + +// Verify the config defaults when not specified. +TEST_F(AdmissionControlConfigTest, BasicTestMinimumConfigured) { + // Empty config. No fields are required. + AdmissionControlProto proto; + + const std::string yaml = R"EOF( +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + auto config = makeConfig(yaml); + + EXPECT_TRUE(config->filterEnabled()); + EXPECT_EQ(2.0, config->aggression()); +} + +// Ensure runtime fields are honored. +TEST_F(AdmissionControlConfigTest, VerifyRuntime) { + const std::string yaml = R"EOF( +enabled: + default_value: false + runtime_key: "foo.enabled" +sampling_window: 1337s +aggression_coefficient: + default_value: 4.2 + runtime_key: "foo.aggression" +success_criteria: + http_criteria: + grpc_criteria: +)EOF"; + + auto config = makeConfig(yaml); + + EXPECT_CALL(runtime_.snapshot_, getBoolean("foo.enabled", false)).WillOnce(Return(true)); + EXPECT_TRUE(config->filterEnabled()); + EXPECT_CALL(runtime_.snapshot_, getDouble("foo.aggression", 4.2)).WillOnce(Return(1.3)); + EXPECT_EQ(1.3, config->aggression()); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc new file mode 100644 index 000000000000..888497a1363e --- /dev/null +++ b/test/extensions/filters/http/admission_control/success_criteria_evaluator_test.cc @@ -0,0 +1,178 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +class SuccessCriteriaTest : public testing::Test { +public: + SuccessCriteriaTest() = default; + + void makeEvaluator(const std::string& yaml) { + AdmissionControlProto::SuccessCriteria proto; + TestUtility::loadFromYamlAndValidate(yaml, proto); + + evaluator_ = std::make_unique(proto); + } + + void expectHttpSuccess(int code) { EXPECT_TRUE(evaluator_->isHttpSuccess(code)); } + + void expectHttpFail(int code) { EXPECT_FALSE(evaluator_->isHttpSuccess(code)); } + + void expectGrpcSuccess(int code) { EXPECT_TRUE(evaluator_->isGrpcSuccess(code)); } + + void expectGrpcFail(int code) { EXPECT_FALSE(evaluator_->isGrpcSuccess(code)); } + + void verifyGrpcDefaultEval() { + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::AlreadyExists); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Canceled); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::FailedPrecondition); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::InvalidArgument); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::NotFound); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Ok); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::OutOfRange); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::PermissionDenied); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unauthenticated); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unimplemented); + expectGrpcSuccess(Grpc::Status::WellKnownGrpcStatus::Unknown); + + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Aborted)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DataLoss)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Internal)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted)); + expectGrpcFail(enumToInt(Grpc::Status::WellKnownGrpcStatus::Unavailable)); + } + + void verifyHttpDefaultEval() { + for (int code = 200; code < 600; ++code) { + if (code < 500) { + expectHttpSuccess(code); + } else { + expectHttpFail(code); + } + } + } + +protected: + std::unique_ptr evaluator_; +}; + +// Ensure the HTTP code successful range configurations are honored. +TEST_F(SuccessCriteriaTest, HttpErrorCodes) { + const std::string yaml = R"EOF( +http_criteria: + http_success_status: + - start: 200 + end: 300 + - start: 400 + end: 500 +)EOF"; + + makeEvaluator(yaml); + + for (int code = 200; code < 600; ++code) { + if ((code < 300 && code >= 200) || (code < 500 && code >= 400)) { + expectHttpSuccess(code); + continue; + } + + expectHttpFail(code); + } + + verifyGrpcDefaultEval(); +} + +// Verify default success values of the evaluator. +TEST_F(SuccessCriteriaTest, DefaultBehaviorTest) { + const std::string yaml = R"EOF( +http_criteria: +grpc_criteria: +)EOF"; + + makeEvaluator(yaml); + verifyGrpcDefaultEval(); + verifyHttpDefaultEval(); +} + +// Check that GRPC error code configurations are honored. +TEST_F(SuccessCriteriaTest, GrpcErrorCodes) { + const std::string yaml = R"EOF( +grpc_criteria: + grpc_success_status: + - 7 + - 13 +)EOF"; + + makeEvaluator(yaml); + + using GrpcStatus = Grpc::Status::WellKnownGrpcStatus; + for (int code = GrpcStatus::Ok; code <= GrpcStatus::MaximumKnown; ++code) { + if (code == 7 || code == 13) { + expectGrpcSuccess(code); + } else { + expectGrpcFail(code); + } + } + + verifyHttpDefaultEval(); +} + +// Verify correct gRPC range validation. +TEST_F(SuccessCriteriaTest, GrpcRangeValidation) { + const std::string yaml = R"EOF( +grpc_criteria: + grpc_success_status: + - 17 +)EOF"; + EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, "invalid gRPC code*"); +} + +// Verify correct HTTP range validation. +TEST_F(SuccessCriteriaTest, HttpRangeValidation) { + auto check_ranges = [this](std::string&& yaml) { + EXPECT_THROW_WITH_REGEX(makeEvaluator(yaml), EnvoyException, "invalid HTTP range*"); + }; + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 300 + end: 200 +)EOF"); + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 600 + end: 600 +)EOF"); + + check_ranges(R"EOF( +http_criteria: + http_success_status: + - start: 99 + end: 99 +)EOF"); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index cf535b89380c..a9c2b9af21ef 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -567,6 +567,7 @@ epoll errno etag etags +evaluator evbuffer evbuffers evconnlistener @@ -863,6 +864,7 @@ preorder prepend prepended prev +probabilistically proc profiler programmatically From ceb74cdb205ca5272c4d33d39ecc9bd57f86aaf2 Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Thu, 18 Jun 2020 06:59:41 +0900 Subject: [PATCH 375/909] style: use type aliases (#11601) Risk Level: Low Testing: Docs Changes: Release Notes: Fixes #11535 Signed-off-by: tomocy --- api/bazel/repository_locations.bzl | 2 +- bazel/envoy_library.bzl | 2 +- bazel/genrule_repository.bzl | 2 +- generated_api_shadow/bazel/repository_locations.bzl | 2 +- include/envoy/network/filter.h | 2 ++ include/envoy/runtime/runtime.h | 4 +++- include/envoy/server/factory_context.h | 1 + include/envoy/stream_info/filter_state.h | 8 +++++--- source/common/grpc/google_async_client_impl.h | 11 +++++++---- source/common/router/scoped_config_impl.cc | 5 ++--- source/common/router/scoped_config_impl.h | 7 +++++-- source/common/runtime/runtime_impl.cc | 4 ++-- source/common/runtime/runtime_impl.h | 8 +++++--- source/common/stream_info/filter_state_impl.cc | 4 ++-- source/common/stream_info/filter_state_impl.h | 10 +++++----- source/common/stream_info/stream_info_impl.h | 2 +- source/extensions/common/wasm/BUILD | 1 + source/extensions/common/wasm/null/null_vm.h | 2 +- source/extensions/common/wasm/null/null_vm_plugin.h | 6 +++++- source/server/filter_chain_factory_context_callback.h | 2 +- source/server/filter_chain_manager_impl.cc | 5 ++--- source/server/filter_chain_manager_impl.h | 8 ++++---- source/server/listener_manager_impl.cc | 8 +++----- source/server/listener_manager_impl.h | 9 ++++----- test/common/grpc/google_async_client_impl_test.cc | 4 ++-- test/common/router/scoped_config_impl_test.cc | 2 +- test/mocks/runtime/mocks.h | 2 +- test/server/filter_chain_benchmark_test.cc | 2 +- test/server/filter_chain_manager_impl_test.cc | 2 +- tools/protodoc/protodoc.bzl | 2 +- tools/protoxform/protoxform.bzl | 2 +- tools/type_whisperer/type_whisperer.bzl | 2 +- 32 files changed, 74 insertions(+), 59 deletions(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 503d6bc89078..afe78af1e47b 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -33,7 +33,7 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], ), com_google_googleapis = dict( - # TODO(dio): Consider writing a Skylark macro for importing Google API proto. + # TODO(dio): Consider writing a Starlark macro for importing Google API proto. sha256 = GOOGLEAPIS_SHA, strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 6f8c56497093..40cd6683836e 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -21,7 +21,7 @@ def tcmalloc_external_deps(repository): # Envoy C++ library targets that need no transformations or additional dependencies before being # passed to cc_library should be specified with this function. Note: this exists to ensure that -# all envoy targets pass through an envoy-declared skylark function where they can be modified +# all envoy targets pass through an envoy-declared starlark function where they can be modified # before being passed to a native bazel function. def envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs): cc_library( diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl index ff4e6fe9dcaa..28f37adfe55c 100644 --- a/bazel/genrule_repository.bzl +++ b/bazel/genrule_repository.bzl @@ -68,7 +68,7 @@ def _genrule_cc_deps(ctx): genrule_cc_deps = rule( attrs = { "deps": attr.label_list( - providers = [], # CcSkylarkApiProvider + providers = [], # CcStarlarkApiProvider mandatory = True, allow_empty = False, ), diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 503d6bc89078..afe78af1e47b 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -33,7 +33,7 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"], ), com_google_googleapis = dict( - # TODO(dio): Consider writing a Skylark macro for importing Google API proto. + # TODO(dio): Consider writing a Starlark macro for importing Google API proto. sha256 = GOOGLEAPIS_SHA, strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA, urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"], diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index f929f0472afd..a2603416e9df 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -356,6 +356,8 @@ class DrainableFilterChain : public FilterChain { virtual void startDraining() PURE; }; +using DrainableFilterChainSharedPtr = std::shared_ptr; + /** * Interface for searching through configured filter chains. */ diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index f68b67a0ae56..89f904ffac08 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -257,6 +257,8 @@ class Snapshot { virtual const std::vector& getLayers() const PURE; }; +using SnapshotConstSharedPtr = std::shared_ptr; + /** * Loads runtime snapshots from storage (local disk, etc.). */ @@ -285,7 +287,7 @@ class Loader { * @return shared_ptr the current snapshot. This function may safely be called * from non-worker threads. */ - virtual std::shared_ptr threadsafeSnapshot() PURE; + virtual SnapshotConstSharedPtr threadsafeSnapshot() PURE; /** * Merge the given map of key-value pairs into the runtime's state. To remove a previous merge for diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h index 245499464e1d..56dac952be3e 100644 --- a/include/envoy/server/factory_context.h +++ b/include/envoy/server/factory_context.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include "envoy/access_log/access_log.h" #include "envoy/config/core/v3/base.pb.h" diff --git a/include/envoy/stream_info/filter_state.h b/include/envoy/stream_info/filter_state.h index f68fca790ab2..20377176b56f 100644 --- a/include/envoy/stream_info/filter_state.h +++ b/include/envoy/stream_info/filter_state.h @@ -15,6 +15,10 @@ namespace Envoy { namespace StreamInfo { +class FilterState; + +using FilterStateSharedPtr = std::shared_ptr; + /** * FilterState represents dynamically generated information regarding a stream (TCP or HTTP level) * or a connection by various filters in Envoy. FilterState can be write-once or write-many. @@ -146,14 +150,12 @@ class FilterState { * @return the pointer of the parent FilterState that has longer life span. nullptr means this is * either the top LifeSpan or the parent is not yet created. */ - virtual std::shared_ptr parent() const PURE; + virtual FilterStateSharedPtr parent() const PURE; protected: virtual const Object* getDataReadOnlyGeneric(absl::string_view data_name) const PURE; virtual Object* getDataMutableGeneric(absl::string_view data_name) PURE; }; -using FilterStateSharedPtr = std::shared_ptr; - } // namespace StreamInfo } // namespace Envoy diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index a23a3791f6f2..f2dc3eded14d 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/api/api.h" @@ -128,6 +129,8 @@ class GoogleStub { grpc::CompletionQueue* cq) PURE; }; +using GoogleStubSharedPtr = std::shared_ptr; + class GoogleGenericStub : public GoogleStub { public: GoogleGenericStub(std::shared_ptr channel) : stub_(channel) {} @@ -148,12 +151,12 @@ class GoogleStubFactory { virtual ~GoogleStubFactory() = default; // Create a stub from a given channel. - virtual std::shared_ptr createStub(std::shared_ptr channel) PURE; + virtual GoogleStubSharedPtr createStub(std::shared_ptr channel) PURE; }; class GoogleGenericStubFactory : public GoogleStubFactory { public: - std::shared_ptr createStub(std::shared_ptr channel) override { + GoogleStubSharedPtr createStub(std::shared_ptr channel) override { return std::make_shared(channel); } }; @@ -185,7 +188,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable stub_; + GoogleStubSharedPtr stub_; std::list> active_streams_; const std::string stat_prefix_; const Protobuf::RepeatedPtrField initial_metadata_; @@ -272,7 +275,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, Event::Dispatcher& dispatcher_; // We hold a ref count on the stub_ to allow the stream to wait for its tags // to drain from the CQ on cleanup. - std::shared_ptr stub_; + GoogleStubSharedPtr stub_; std::string service_full_name_; std::string method_name_; RawAsyncStreamCallbacks& callbacks_; diff --git a/source/common/router/scoped_config_impl.cc b/source/common/router/scoped_config_impl.cc index 6a5d6ae2934c..e5e51b763191 100644 --- a/source/common/router/scoped_config_impl.cc +++ b/source/common/router/scoped_config_impl.cc @@ -103,8 +103,7 @@ ScopeKeyBuilderImpl::ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config) } } -std::unique_ptr -ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const { +ScopeKeyPtr ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) const { ScopeKey key; for (const auto& builder : fragment_builders_) { // returns nullopt if a null fragment is found. @@ -139,7 +138,7 @@ void ScopedConfigImpl::removeRoutingScope(const std::string& scope_name) { Router::ConfigConstSharedPtr ScopedConfigImpl::getRouteConfig(const Http::HeaderMap& headers) const { - std::unique_ptr scope_key = scope_key_builder_.computeScopeKey(headers); + ScopeKeyPtr scope_key = scope_key_builder_.computeScopeKey(headers); if (scope_key == nullptr) { return nullptr; } diff --git a/source/common/router/scoped_config_impl.h b/source/common/router/scoped_config_impl.h index 1879fb33a87d..575a097407d6 100644 --- a/source/common/router/scoped_config_impl.h +++ b/source/common/router/scoped_config_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/config/route/v3/scoped_route.pb.h" @@ -77,6 +78,8 @@ class ScopeKey { std::vector> fragments_; }; +using ScopeKeyPtr = std::unique_ptr; + // String fragment. class StringKeyFragment : public ScopeKeyFragmentBase { public: @@ -130,7 +133,7 @@ class ScopeKeyBuilderBase { virtual ~ScopeKeyBuilderBase() = default; // Computes scope key for given headers, returns nullptr if a key can't be computed. - virtual std::unique_ptr computeScopeKey(const Http::HeaderMap& headers) const PURE; + virtual ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const PURE; protected: const ScopedRoutes::ScopeKeyBuilder config_; @@ -140,7 +143,7 @@ class ScopeKeyBuilderImpl : public ScopeKeyBuilderBase { public: explicit ScopeKeyBuilderImpl(ScopedRoutes::ScopeKeyBuilder&& config); - std::unique_ptr computeScopeKey(const Http::HeaderMap& headers) const override; + ScopeKeyPtr computeScopeKey(const Http::HeaderMap& headers) const override; private: std::vector> fragment_builders_; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index a9f81c4f1a41..7d95b940344c 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -604,7 +604,7 @@ const Snapshot& LoaderImpl::snapshot() { return tls_->getTyped(); } -std::shared_ptr LoaderImpl::threadsafeSnapshot() { +SnapshotConstSharedPtr LoaderImpl::threadsafeSnapshot() { if (tls_->currentThreadRegistered()) { return std::dynamic_pointer_cast(tls_->get()); } @@ -630,7 +630,7 @@ RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { return stats; } -std::unique_ptr LoaderImpl::createNewSnapshot() { +SnapshotImplPtr LoaderImpl::createNewSnapshot() { std::vector layers; uint32_t disk_layers = 0; uint32_t error_layers = 0; diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 480a345f3f72..5bec747b93f7 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -130,6 +130,8 @@ class SnapshotImpl : public Snapshot, RuntimeStats& stats_; }; +using SnapshotImplPtr = std::unique_ptr; + /** * Base implementation of OverrideLayer that by itself provides an empty values map. */ @@ -252,7 +254,7 @@ class LoaderImpl : public Loader, Logger::Loggable { // Runtime::Loader void initialize(Upstream::ClusterManager& cm) override; const Snapshot& snapshot() override; - std::shared_ptr threadsafeSnapshot() override; + SnapshotConstSharedPtr threadsafeSnapshot() override; void mergeValues(const std::unordered_map& values) override; void startRtdsSubscriptions(ReadyCallback on_done) override; @@ -260,7 +262,7 @@ class LoaderImpl : public Loader, Logger::Loggable { friend RtdsSubscription; // Create a new Snapshot - virtual std::unique_ptr createNewSnapshot(); + virtual SnapshotImplPtr createNewSnapshot(); // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); @@ -281,7 +283,7 @@ class LoaderImpl : public Loader, Logger::Loggable { Upstream::ClusterManager* cm_{}; absl::Mutex snapshot_mutex_; - std::shared_ptr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_); + SnapshotConstSharedPtr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_); }; } // namespace Runtime diff --git a/source/common/stream_info/filter_state_impl.cc b/source/common/stream_info/filter_state_impl.cc index 6097f02e0428..d873587abfcf 100644 --- a/source/common/stream_info/filter_state_impl.cc +++ b/source/common/stream_info/filter_state_impl.cc @@ -97,8 +97,8 @@ void FilterStateImpl::maybeCreateParent(ParentAccessMode parent_access_mode) { if (life_span_ >= FilterState::LifeSpan::TopSpan) { return; } - if (absl::holds_alternative>(ancestor_)) { - std::shared_ptr ancestor = absl::get>(ancestor_); + if (absl::holds_alternative(ancestor_)) { + FilterStateSharedPtr ancestor = absl::get(ancestor_); if (ancestor == nullptr || ancestor->lifeSpan() != life_span_ + 1) { parent_ = std::make_shared(ancestor, FilterState::LifeSpan(life_span_ + 1)); } else { diff --git a/source/common/stream_info/filter_state_impl.h b/source/common/stream_info/filter_state_impl.h index 6bf8fb9ad517..793938e29ead 100644 --- a/source/common/stream_info/filter_state_impl.h +++ b/source/common/stream_info/filter_state_impl.h @@ -22,12 +22,12 @@ class FilterStateImpl : public FilterState { * @param ancestor a std::shared_ptr storing an already created ancestor. * @param life_span the life span this is handling. */ - FilterStateImpl(std::shared_ptr ancestor, FilterState::LifeSpan life_span) + FilterStateImpl(FilterStateSharedPtr ancestor, FilterState::LifeSpan life_span) : ancestor_(ancestor), life_span_(life_span) { maybeCreateParent(ParentAccessMode::ReadOnly); } - using LazyCreateAncestor = std::pair&, FilterState::LifeSpan>; + using LazyCreateAncestor = std::pair; /** * @param ancestor a std::pair storing an ancestor, that can be passed in as a way to lazy * initialize a FilterState that's owned by an object with bigger scope than this. This is to @@ -49,7 +49,7 @@ class FilterStateImpl : public FilterState { bool hasDataAtOrAboveLifeSpan(FilterState::LifeSpan life_span) const override; FilterState::LifeSpan lifeSpan() const override { return life_span_; } - std::shared_ptr parent() const override { return parent_; } + FilterStateSharedPtr parent() const override { return parent_; } private: // This only checks the local data_storage_ for data_name existence. @@ -62,8 +62,8 @@ class FilterStateImpl : public FilterState { FilterState::StateType state_type_; }; - absl::variant, LazyCreateAncestor> ancestor_; - std::shared_ptr parent_; + absl::variant ancestor_; + FilterStateSharedPtr parent_; const FilterState::LifeSpan life_span_; absl::flat_hash_map> data_storage_; }; diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 3c440f91afa1..bf8af8d73b5b 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -27,7 +27,7 @@ struct StreamInfoImpl : public StreamInfo { std::make_shared(FilterState::LifeSpan::FilterChain)) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, - std::shared_ptr& parent_filter_state) + FilterStateSharedPtr& parent_filter_state) : StreamInfoImpl(protocol, time_source, std::make_shared( FilterStateImpl::LazyCreateAncestor(parent_filter_state, diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index c511d3806fe1..c31b2deb485b 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -21,6 +21,7 @@ envoy_cc_library( hdrs = ["wasm_vm.h"], deps = [ ":well_known_names", + "//include/envoy/stats:stats_interface", "//source/common/common:minimal_logger_lib", ], ) diff --git a/source/extensions/common/wasm/null/null_vm.h b/source/extensions/common/wasm/null/null_vm.h index e0cf345c51b6..9bdaad668f8b 100644 --- a/source/extensions/common/wasm/null/null_vm.h +++ b/source/extensions/common/wasm/null/null_vm.h @@ -55,7 +55,7 @@ struct NullVm : public WasmVmBase { #undef _REGISTER_CALLBACK std::string plugin_name_; - std::unique_ptr plugin_; + NullVmPluginPtr plugin_; }; } // namespace Null diff --git a/source/extensions/common/wasm/null/null_vm_plugin.h b/source/extensions/common/wasm/null/null_vm_plugin.h index bc89271452c6..1176c98c07c9 100644 --- a/source/extensions/common/wasm/null/null_vm_plugin.h +++ b/source/extensions/common/wasm/null/null_vm_plugin.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/typed_config.h" #include "extensions/common/wasm/wasm_vm.h" @@ -24,6 +26,8 @@ class NullVmPlugin { #undef _DEFIN_GET_FUNCTIONE }; +using NullVmPluginPtr = std::unique_ptr; + /** * Pseudo-WASM plugins using the NullVM should implement this factory and register via * Registry::registerFactory or the convenience class RegisterFactory. @@ -37,7 +41,7 @@ class NullVmPluginFactory : public Config::UntypedFactory { /** * Create an instance of the plugin. */ - virtual std::unique_ptr create() const PURE; + virtual NullVmPluginPtr create() const PURE; }; } // namespace Null diff --git a/source/server/filter_chain_factory_context_callback.h b/source/server/filter_chain_factory_context_callback.h index 1230bfe5c7e1..883f1477b48e 100644 --- a/source/server/filter_chain_factory_context_callback.h +++ b/source/server/filter_chain_factory_context_callback.h @@ -21,7 +21,7 @@ class FilterChainFactoryContextCreator { * Generate the filter chain factory context from proto. Note the caller does not own the filter * chain context. */ - virtual std::unique_ptr createFilterChainFactoryContext( + virtual Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) PURE; }; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index f6bdf328e920..7c544ca66521 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -593,7 +593,7 @@ void FilterChainManagerImpl::convertIPsToTries() { } } -std::shared_ptr FilterChainManagerImpl::findExistingFilterChain( +Network::DrainableFilterChainSharedPtr FilterChainManagerImpl::findExistingFilterChain( const envoy::config::listener::v3::FilterChain& filter_chain_message) { // Origin filter chain manager could be empty if the current is the ancestor. const auto* origin = getOriginFilterChainManager(); @@ -609,8 +609,7 @@ std::shared_ptr FilterChainManagerImpl::findExist return nullptr; } -std::unique_ptr -FilterChainManagerImpl::createFilterChainFactoryContext( +Configuration::FilterChainFactoryContextPtr FilterChainManagerImpl::createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) { // TODO(lambdai): add stats UNREFERENCED_PARAMETER(filter_chain); diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 4acdc52470c0..6857bba4620b 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -30,7 +30,7 @@ class FilterChainFactoryBuilder { * @return Shared filter chain where builder is allowed to determine and reuse duplicated filter * chain. Throw exception if failed. */ - virtual std::shared_ptr + virtual Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const PURE; }; @@ -168,7 +168,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, public: using FcContextMap = absl::flat_hash_map, MessageUtil, MessageUtil>; + Network::DrainableFilterChainSharedPtr, MessageUtil, MessageUtil>; FilterChainManagerImpl(const Network::Address::InstanceConstSharedPtr& address, Configuration::FactoryContext& factory_context, Init::Manager& init_manager) @@ -179,7 +179,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, Init::Manager& init_manager, const FilterChainManagerImpl& parent_manager); // FilterChainFactoryContextCreator - std::unique_ptr createFilterChainFactoryContext( + Configuration::FilterChainFactoryContextPtr createFilterChainFactoryContext( const ::envoy::config::listener::v3::FilterChain* const filter_chain) override; // Network::FilterChainManager @@ -288,7 +288,7 @@ class FilterChainManagerImpl : public Network::FilterChainManager, const FilterChainManagerImpl* getOriginFilterChainManager() { return origin_.value(); } // Duplicate the inherent factory context if any. - std::shared_ptr + Network::DrainableFilterChainSharedPtr findExistingFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain_message); // Mapping from filter chain message to filter chain. This is used by LDS response handler to diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index ca7bc7547148..f4e6e87e9528 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -936,18 +936,16 @@ ListenerFilterChainFactoryBuilder::ListenerFilterChainFactoryBuilder( : validator_(validator), listener_component_factory_(listener_component_factory), factory_context_(factory_context) {} -std::shared_ptr ListenerFilterChainFactoryBuilder::buildFilterChain( +Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChain( const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const { return buildFilterChainInternal(filter_chain, context_creator.createFilterChainFactoryContext(&filter_chain)); } -std::shared_ptr -ListenerFilterChainFactoryBuilder::buildFilterChainInternal( +Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildFilterChainInternal( const envoy::config::listener::v3::FilterChain& filter_chain, - std::unique_ptr&& filter_chain_factory_context) - const { + Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const { // If the cluster doesn't have transport socket configured, then use the default "raw_buffer" // transport socket or BoringSSL-based "tls" transport socket if TLS settings are configured. // We copy by value first then override if necessary. diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 8a734350103b..b677792800e4 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -318,15 +318,14 @@ class ListenerFilterChainFactoryBuilder : public FilterChainFactoryBuilder { ListenerComponentFactory& listener_component_factory, Server::Configuration::TransportSocketFactoryContextImpl& factory_context); - std::shared_ptr + Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain& filter_chain, FilterChainFactoryContextCreator& context_creator) const override; private: - std::shared_ptr - buildFilterChainInternal(const envoy::config::listener::v3::FilterChain& filter_chain, - std::unique_ptr&& - filter_chain_factory_context) const; + Network::DrainableFilterChainSharedPtr buildFilterChainInternal( + const envoy::config::listener::v3::FilterChain& filter_chain, + Configuration::FilterChainFactoryContextPtr&& filter_chain_factory_context) const; ProtobufMessage::ValidationVisitor& validator_; ListenerComponentFactory& listener_component_factory_; diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index 86066f8bc66a..1fa083234e1d 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -39,12 +39,12 @@ class MockGenericStub : public GoogleStub { class MockStubFactory : public GoogleStubFactory { public: - std::shared_ptr createStub(std::shared_ptr /*channel*/) override { + GoogleStubSharedPtr createStub(std::shared_ptr /*channel*/) override { return shared_stub_; } MockGenericStub* stub_ = new MockGenericStub(); - std::shared_ptr shared_stub_{stub_}; + GoogleStubSharedPtr shared_stub_{stub_}; }; class EnvoyGoogleAsyncClientImplTest : public testing::Test { diff --git a/test/common/router/scoped_config_impl_test.cc b/test/common/router/scoped_config_impl_test.cc index df842d51cf35..e84540850b9b 100644 --- a/test/common/router/scoped_config_impl_test.cc +++ b/test/common/router/scoped_config_impl_test.cc @@ -299,7 +299,7 @@ TEST(ScopeKeyBuilderImplTest, Parse) { TestUtility::loadFromYaml(yaml_plain, config); ScopeKeyBuilderImpl key_builder(std::move(config)); - std::unique_ptr key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ + ScopeKeyPtr key = key_builder.computeScopeKey(TestRequestHeaderMapImpl{ {"foo_header", "a=b,bar=bar_value,e=f"}, {"bar_header", "a=b;bar=bar_value;index2"}, }); diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 1f4d5d0589fa..7e02d94f1397 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -73,7 +73,7 @@ class MockLoader : public Loader { MOCK_METHOD(void, initialize, (Upstream::ClusterManager & cm)); MOCK_METHOD(const Snapshot&, snapshot, ()); - MOCK_METHOD(std::shared_ptr, threadsafeSnapshot, ()); + MOCK_METHOD(SnapshotConstSharedPtr, threadsafeSnapshot, ()); MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 56a3ec4754df..819932e968f9 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -28,7 +28,7 @@ namespace Server { namespace { class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { - std::shared_ptr + Network::DrainableFilterChainSharedPtr buildFilterChain(const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&) const override { // A place holder to be found diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 85a67482abfc..4779dc32576e 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -51,7 +51,7 @@ class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { .WillByDefault(Return(std::make_shared())); } - MOCK_METHOD(std::shared_ptr, buildFilterChain, + MOCK_METHOD(Network::DrainableFilterChainSharedPtr, buildFilterChain, (const envoy::config::listener::v3::FilterChain&, FilterChainFactoryContextCreator&), (const)); }; diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl index b25ae7a3577d..0ed26121fe6a 100644 --- a/tools/protodoc/protodoc.bzl +++ b/tools/protodoc/protodoc.bzl @@ -3,7 +3,7 @@ load("//tools/api_proto_plugin:plugin.bzl", "api_proto_plugin_aspect", "api_prot def _protodoc_impl(target, ctx): return api_proto_plugin_impl(target, ctx, "rst", "protodoc", [".rst"]) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to produce docs via //tools/protodoc for # proto_library targets. Example use: # diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl index d3ea80534896..abdbac95b396 100644 --- a/tools/protoxform/protoxform.bzl +++ b/tools/protoxform/protoxform.bzl @@ -13,7 +13,7 @@ def _protoxform_impl(target, ctx): ], ) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to perform API transforms via //tools/protoxform for # proto_library targets. Example use: # diff --git a/tools/type_whisperer/type_whisperer.bzl b/tools/type_whisperer/type_whisperer.bzl index 248b1752eeb6..b9df280829ad 100644 --- a/tools/type_whisperer/type_whisperer.bzl +++ b/tools/type_whisperer/type_whisperer.bzl @@ -3,7 +3,7 @@ load("//tools/api_proto_plugin:plugin.bzl", "api_proto_plugin_aspect", "api_prot def _type_whisperer_impl(target, ctx): return api_proto_plugin_impl(target, ctx, "types_pb_text", "TypeWhisperer", [".types.pb_text"]) -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) # that can be invoked from the CLI to perform API type analysis via //tools/type_whisperer for # proto_library targets. Example use: # From 8262a0d6943116e55ee6210df550e4c744fc6821 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 17 Jun 2020 17:36:31 -0600 Subject: [PATCH 376/909] http: O(1) custom header map (#11546) Implement custom O(1) header registration for header maps. - Remove virtual inheritence from header maps. - Implement variable inline storage for concrete header maps. - Various TODO/logic cleanups from previous header refactors. - Demonstrate what custom header registration looks like for the CORS filter. More cleanup of this type can be done. Signed-off-by: Matt Klein --- include/envoy/http/header_map.h | 164 ++++++- source/common/grpc/async_client_impl.cc | 8 +- .../common/grpc/google_async_client_impl.cc | 16 +- source/common/http/conn_manager_impl.cc | 4 +- source/common/http/header_map_impl.cc | 126 +++-- source/common/http/header_map_impl.h | 434 +++++++++++++----- source/common/http/http1/codec_impl.h | 9 +- source/common/http/http2/codec_impl.cc | 2 +- source/common/http/http2/codec_impl.h | 20 +- source/common/http/message_impl.h | 2 +- source/common/local_reply/local_reply.cc | 20 +- source/common/router/header_formatter.cc | 11 +- source/common/router/rds_impl.cc | 6 +- source/docs/header_map.md | 30 ++ source/exe/main_common.cc | 6 +- .../access_loggers/common/access_log_base.cc | 9 +- .../extensions/filters/common/expr/context.cc | 4 +- .../common/ratelimit/ratelimit_impl.cc | 4 +- .../filters/common/rbac/engine_impl.cc | 4 +- .../filters/http/cors/cors_filter.cc | 39 +- .../json_transcoder_filter.cc | 5 +- .../filters/http/jwt_authn/filter.cc | 6 +- .../extensions/filters/http/lua/lua_filter.cc | 4 +- .../filters/http/ratelimit/ratelimit.cc | 2 +- .../network/dubbo_proxy/serializer_impl.h | 6 +- .../filters/network/rocketmq_proxy/metadata.h | 6 +- .../filters/network/thrift_proxy/metadata.h | 6 +- .../quic_listeners/quiche/envoy_quic_utils.h | 4 +- .../extensions/stat_sinks/hystrix/hystrix.cc | 13 +- source/server/admin/admin.cc | 6 +- source/server/admin/admin_filter.cc | 2 +- source/server/server.cc | 7 + test/common/http/header_map_impl_fuzz_test.cc | 4 +- .../common/http/header_map_impl_speed_test.cc | 142 +++--- test/common/http/header_map_impl_test.cc | 110 ++--- test/common/http/utility_fuzz_test.cc | 18 +- .../http/on_demand/on_demand_filter_test.cc | 2 +- test/integration/protocol_integration_test.cc | 6 +- test/test_common/utility.h | 148 +++--- 39 files changed, 928 insertions(+), 487 deletions(-) create mode 100644 source/docs/header_map.md diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index a518395a3320..40969a78255b 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -258,14 +258,13 @@ class HeaderEntry { }; /** - * The following defines all request headers that Envoy allows direct access to inside of the - * header map. In practice, these are all headers used during normal Envoy request flow + * The following defines all default request headers that Envoy allows direct access to inside of + * the header map. In practice, these are all headers used during normal Envoy request flow * processing. This allows O(1) access to these headers without even a hash lookup. */ #define INLINE_REQ_HEADERS(HEADER_FUNC) \ HEADER_FUNC(Accept) \ HEADER_FUNC(AcceptEncoding) \ - HEADER_FUNC(AccessControlRequestMethod) \ HEADER_FUNC(Authorization) \ HEADER_FUNC(ClientTraceId) \ HEADER_FUNC(EnvoyDownstreamServiceCluster) \ @@ -305,15 +304,9 @@ class HeaderEntry { HEADER_FUNC(UserAgent) /** - * O(1) response headers. + * Default O(1) response headers. */ #define INLINE_RESP_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(AccessControlAllowCredentials) \ - HEADER_FUNC(AccessControlAllowHeaders) \ - HEADER_FUNC(AccessControlAllowMethods) \ - HEADER_FUNC(AccessControlAllowOrigin) \ - HEADER_FUNC(AccessControlExposeHeaders) \ - HEADER_FUNC(AccessControlMaxAge) \ HEADER_FUNC(Date) \ HEADER_FUNC(Etag) \ HEADER_FUNC(EnvoyDegraded) \ @@ -328,7 +321,7 @@ class HeaderEntry { HEADER_FUNC(Vary) /** - * O(1) request and response headers. + * Default O(1) request and response headers. */ #define INLINE_REQ_RESP_HEADERS(HEADER_FUNC) \ HEADER_FUNC(CacheControl) \ @@ -346,7 +339,7 @@ class HeaderEntry { HEADER_FUNC(Via) /** - * O(1) response headers and trailers. + * Default O(1) response headers and trailers. */ #define INLINE_RESP_HEADERS_TRAILERS(HEADER_FUNC) \ HEADER_FUNC(GrpcMessage) \ @@ -374,12 +367,7 @@ class HeaderEntry { virtual void set##name(absl::string_view value) PURE; \ virtual void set##name(uint64_t value) PURE; \ virtual size_t remove##name() PURE; \ - absl::string_view get##name##Value() const { \ - if (name() != nullptr) { \ - return name()->value().getStringView(); \ - } \ - return ""; \ - } + virtual absl::string_view get##name##Value() const PURE; /** * Wraps a set of HTTP headers. @@ -625,42 +613,170 @@ class HeaderMap { using HeaderMapPtr = std::unique_ptr; +/** + * Registry for custom headers. Headers can be registered multiple times in independent + * compilation units and will still point to the same slot. Headers are registered independently + * for each concrete header map type and do not overlap. Handles are strongly typed and do not + * allow mixing. + */ +class CustomInlineHeaderRegistry { +public: + enum class Type { RequestHeaders, RequestTrailers, ResponseHeaders, ResponseTrailers }; + using RegistrationMap = std::map; + + // A "phantom" type is used here to force the compiler to verify that handles are not mixed + // between concrete header map types. + template struct Handle { + Handle(RegistrationMap::const_iterator it) : it_(it) {} + + RegistrationMap::const_iterator it_; + }; + + /** + * Register an inline header and return a handle for use in inline header calls. Must be called + * prior to finalize(). + */ + template + static Handle registerInlineHeader(const LowerCaseString& header_name) { + static size_t inline_header_index = 0; + + ASSERT(!mutableFinalized()); + auto& map = mutableRegistrationMap(); + auto entry = map.find(header_name); + if (entry == map.end()) { + map[header_name] = inline_header_index++; + } + return Handle(map.find(header_name)); + } + + /** + * Fetch the handle for a registered inline header. May only be called after finalized(). + */ + template + static absl::optional> getInlineHeader(const LowerCaseString& header_name) { + ASSERT(mutableFinalized()); + auto& map = mutableRegistrationMap(); + auto entry = map.find(header_name); + if (entry != map.end()) { + return Handle(entry); + } + return absl::nullopt; + } + + /** + * Fetch all registered headers. May only be called after finalized(). + */ + template static const RegistrationMap& headers() { + ASSERT(mutableFinalized()); + return mutableRegistrationMap(); + } + + /** + * Finalize the custom header registrations. No further changes are allowed after this point. + * This guaranteed that all header maps created by the process have the same variable size and + * custom registrations. + */ + template static void finalize() { + ASSERT(!mutableFinalized()); + mutableFinalized() = true; + } + +private: + template static RegistrationMap& mutableRegistrationMap() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(RegistrationMap); + } + template static bool& mutableFinalized() { MUTABLE_CONSTRUCT_ON_FIRST_USE(bool); } +}; + +/** + * Static initializer to register a custom header in a compilation unit. This can be used by + * extensions to register custom headers. + */ +template class RegisterCustomInlineHeader { +public: + RegisterCustomInlineHeader(const LowerCaseString& header) + : handle_(CustomInlineHeaderRegistry::registerInlineHeader(header)) {} + + typename CustomInlineHeaderRegistry::Handle handle() { return handle_; } + +private: + const typename CustomInlineHeaderRegistry::Handle handle_; +}; + +/** + * The following functions allow O(1) access for custom inline headers. + */ +template class CustomInlineHeaderBase { +public: + virtual ~CustomInlineHeaderBase() = default; + + static constexpr CustomInlineHeaderRegistry::Type header_map_type = type; + using Handle = CustomInlineHeaderRegistry::Handle; + + virtual const HeaderEntry* getInline(Handle handle) const PURE; + virtual void appendInline(Handle handle, absl::string_view data, + absl::string_view delimiter) PURE; + virtual void setReferenceInline(Handle, absl::string_view value) PURE; + virtual void setInline(Handle, absl::string_view value) PURE; + virtual void setInline(Handle, uint64_t value) PURE; + virtual size_t removeInline(Handle handle) PURE; + absl::string_view getInlineValue(Handle handle) const { + const auto header = getInline(handle); + if (header != nullptr) { + return header->value().getStringView(); + } + return {}; + } +}; + /** * Typed derived classes for all header map types. */ // Base class for both request and response headers. -class RequestOrResponseHeaderMap : public virtual HeaderMap { +class RequestOrResponseHeaderMap : public HeaderMap { public: INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER) }; // Request headers. -class RequestHeaderMap : public RequestOrResponseHeaderMap { +class RequestHeaderMap + : public RequestOrResponseHeaderMap, + public CustomInlineHeaderBase { public: INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER) }; using RequestHeaderMapPtr = std::unique_ptr; // Request trailers. -class RequestTrailerMap : public virtual HeaderMap {}; +class RequestTrailerMap + : public HeaderMap, + public CustomInlineHeaderBase {}; using RequestTrailerMapPtr = std::unique_ptr; // Base class for both response headers and trailers. -class ResponseHeaderOrTrailerMap : public virtual HeaderMap { +class ResponseHeaderOrTrailerMap { public: + virtual ~ResponseHeaderOrTrailerMap() = default; + INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER) }; // Response headers. -class ResponseHeaderMap : public RequestOrResponseHeaderMap, public ResponseHeaderOrTrailerMap { +class ResponseHeaderMap + : public RequestOrResponseHeaderMap, + public ResponseHeaderOrTrailerMap, + public CustomInlineHeaderBase { public: INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER) }; using ResponseHeaderMapPtr = std::unique_ptr; // Response trailers. -class ResponseTrailerMap : public virtual HeaderMap, public ResponseHeaderOrTrailerMap {}; +class ResponseTrailerMap + : public ResponseHeaderOrTrailerMap, + public HeaderMap, + public CustomInlineHeaderBase {}; using ResponseTrailerMapPtr = std::unique_ptr; /** diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 19aa96ed460f..ecb5709288f7 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -100,7 +100,7 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { void AsyncStreamImpl::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { const auto http_response_status = Http::Utility::getResponseStatus(*headers); const auto grpc_status = Common::getGrpcStatus(*headers); - callbacks_.onReceiveInitialMetadata(end_stream ? std::make_unique() + callbacks_.onReceiveInitialMetadata(end_stream ? Http::ResponseHeaderMapImpl::create() : std::move(headers)); if (http_response_status != enumToInt(Http::Code::OK)) { // https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md requires that @@ -108,6 +108,10 @@ void AsyncStreamImpl::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_s if (end_stream && grpc_status) { // Due to headers/trailers type differences we need to copy here. This is an uncommon case but // we can potentially optimize in the future. + + // TODO(mattklein123): clang-tidy is showing a use after move when passing to + // onReceiveInitialMetadata() above. This looks like an actual bug that I will fix in a + // follow up. onTrailers(Http::createHeaderMap(*headers)); return; } @@ -163,7 +167,7 @@ void AsyncStreamImpl::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { } void AsyncStreamImpl::streamError(Status::GrpcStatus grpc_status, const std::string& message) { - callbacks_.onReceiveTrailingMetadata(std::make_unique()); + callbacks_.onReceiveTrailingMetadata(Http::ResponseTrailerMapImpl::create()); callbacks_.onRemoteClose(grpc_status, message); resetStream(); } diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index ff7774034ac9..d22903da723e 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -171,9 +171,9 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { } // Due to the different HTTP header implementations, we effectively double // copy headers here. - Http::RequestHeaderMapImpl initial_metadata; - callbacks_.onCreateInitialMetadata(initial_metadata); - initial_metadata.iterate( + auto initial_metadata = Http::RequestHeaderMapImpl::create(); + callbacks_.onCreateInitialMetadata(*initial_metadata); + initial_metadata->iterate( [](const Http::HeaderEntry& header, void* ctxt) { auto* client_context = static_cast(ctxt); client_context->AddMetadata(std::string(header.key().getStringView()), @@ -207,9 +207,8 @@ void GoogleAsyncStreamImpl::notifyRemoteClose(Status::GrpcStatus grpc_status, parent_.stats_.streams_closed_[grpc_status]->inc(); } ENVOY_LOG(debug, "notifyRemoteClose {} {}", grpc_status, message); - callbacks_.onReceiveTrailingMetadata(trailing_metadata - ? std::move(trailing_metadata) - : std::make_unique()); + callbacks_.onReceiveTrailingMetadata(trailing_metadata ? std::move(trailing_metadata) + : Http::ResponseTrailerMapImpl::create()); callbacks_.onRemoteClose(grpc_status, message); } @@ -312,7 +311,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo ASSERT(call_initialized_); rw_->Read(&read_buf_, &read_tag_); ++inflight_tags_; - Http::ResponseHeaderMapPtr initial_metadata = std::make_unique(); + Http::ResponseHeaderMapPtr initial_metadata = Http::ResponseHeaderMapImpl::create(); metadataTranslate(ctxt_.GetServerInitialMetadata(), *initial_metadata); callbacks_.onReceiveInitialMetadata(std::move(initial_metadata)); break; @@ -346,8 +345,7 @@ void GoogleAsyncStreamImpl::handleOpCompletion(GoogleAsyncTag::Operation op, boo case GoogleAsyncTag::Operation::Finish: { ASSERT(finish_pending_); ENVOY_LOG(debug, "Finish with grpc-status code {}", status_.error_code()); - Http::ResponseTrailerMapPtr trailing_metadata = - std::make_unique(); + Http::ResponseTrailerMapPtr trailing_metadata = Http::ResponseTrailerMapImpl::create(); metadataTranslate(ctxt_.GetServerTrailingMetadata(), *trailing_metadata); notifyRemoteClose(static_cast(status_.error_code()), std::move(trailing_metadata), status_.error_message()); diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 22e40d6b5f8d..10c682ae5714 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1254,7 +1254,7 @@ RequestTrailerMap& ConnectionManagerImpl::ActiveStream::addDecodedTrailers() { // Trailers can only be added once. ASSERT(!request_trailers_); - request_trailers_ = std::make_unique(); + request_trailers_ = RequestTrailerMapImpl::create(); return *request_trailers_; } @@ -1817,7 +1817,7 @@ ResponseTrailerMap& ConnectionManagerImpl::ActiveStream::addEncodedTrailers() { // Trailers can only be added once. ASSERT(!response_trailers_); - response_trailers_ = std::make_unique(); + response_trailers_ = ResponseTrailerMapImpl::create(); return *response_trailers_; } diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 73de67f6f0e0..809fa2402e6e 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -5,6 +5,8 @@ #include #include +#include "envoy/http/header_map.h" + #include "common/common/assert.h" #include "common/common/dump_state_utils.h" #include "common/common/empty_string.h" @@ -26,22 +28,22 @@ void validateCapacity(uint64_t new_capacity) { "Trying to allocate overly large headers."); } -absl::string_view get_str_view(const VariantHeader& buffer) { +absl::string_view getStrView(const VariantHeader& buffer) { return absl::get(buffer); } -InlineHeaderVector& get_in_vec(VariantHeader& buffer) { +InlineHeaderVector& getInVec(VariantHeader& buffer) { return absl::get(buffer); } -const InlineHeaderVector& get_in_vec(const VariantHeader& buffer) { +const InlineHeaderVector& getInVec(const VariantHeader& buffer) { return absl::get(buffer); } } // namespace // Initialize as a Type::Inline HeaderString::HeaderString() : buffer_(InlineHeaderVector()) { - ASSERT((get_in_vec(buffer_).capacity()) >= MaxIntegerLength); + ASSERT((getInVec(buffer_).capacity()) >= MaxIntegerLength); ASSERT(valid()); } @@ -72,19 +74,19 @@ void HeaderString::append(const char* data, uint32_t data_size) { case Type::Reference: { // Rather than be too clever and optimize this uncommon case, we switch to // Inline mode and copy. - const absl::string_view prev = get_str_view(buffer_); + const absl::string_view prev = getStrView(buffer_); buffer_ = InlineHeaderVector(); // Assigning new_capacity to avoid resizing when appending the new data - get_in_vec(buffer_).reserve(new_capacity); - get_in_vec(buffer_).assign(prev.begin(), prev.end()); + getInVec(buffer_).reserve(new_capacity); + getInVec(buffer_).assign(prev.begin(), prev.end()); break; } case Type::Inline: { - get_in_vec(buffer_).reserve(new_capacity); + getInVec(buffer_).reserve(new_capacity); break; } } - get_in_vec(buffer_).insert(get_in_vec(buffer_).end(), data, data + data_size); + getInVec(buffer_).insert(getInVec(buffer_).end(), data, data + data_size); } void HeaderString::rtrim() { @@ -92,21 +94,21 @@ void HeaderString::rtrim() { absl::string_view original = getStringView(); absl::string_view rtrimmed = StringUtil::rtrim(original); if (original.size() != rtrimmed.size()) { - get_in_vec(buffer_).resize(rtrimmed.size()); + getInVec(buffer_).resize(rtrimmed.size()); } } absl::string_view HeaderString::getStringView() const { if (type() == Type::Reference) { - return get_str_view(buffer_); + return getStrView(buffer_); } ASSERT(type() == Type::Inline); - return {get_in_vec(buffer_).data(), get_in_vec(buffer_).size()}; + return {getInVec(buffer_).data(), getInVec(buffer_).size()}; } void HeaderString::clear() { if (type() == Type::Inline) { - get_in_vec(buffer_).clear(); + getInVec(buffer_).clear(); } } @@ -118,8 +120,8 @@ void HeaderString::setCopy(const char* data, uint32_t size) { buffer_ = InlineHeaderVector(); } - get_in_vec(buffer_).reserve(size); - get_in_vec(buffer_).assign(data, data + size); + getInVec(buffer_).reserve(size); + getInVec(buffer_).assign(data, data + size); ASSERT(valid()); } @@ -141,8 +143,8 @@ void HeaderString::setInteger(uint64_t value) { // Switching from Type::Reference to Type::Inline buffer_ = InlineHeaderVector(); } - ASSERT((get_in_vec(buffer_).capacity()) > MaxIntegerLength); - get_in_vec(buffer_).assign(inner_buffer, inner_buffer + int_length); + ASSERT((getInVec(buffer_).capacity()) > MaxIntegerLength); + getInVec(buffer_).assign(inner_buffer, inner_buffer + int_length); } void HeaderString::setReference(absl::string_view ref_value) { @@ -152,10 +154,10 @@ void HeaderString::setReference(absl::string_view ref_value) { uint32_t HeaderString::size() const { if (type() == Type::Reference) { - return get_str_view(buffer_).size(); + return getStrView(buffer_).size(); } ASSERT(type() == Type::Inline); - return get_in_vec(buffer_).size(); + return getInVec(buffer_).size(); } HeaderString::Type HeaderString::type() const { @@ -192,30 +194,47 @@ void HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) { value(header.value().getStringView()); } -#define INLINE_HEADER_STATIC_MAP_ENTRY(name) \ - add(Headers::get().name.get().c_str(), [](HeaderMapType& h) -> StaticLookupResponse { \ - return {&h.inline_headers_.name##_, &Headers::get().name}; \ - }); +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_DEFAULT_REQUEST_HEADER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_REQ_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER) + INLINE_REQ_RESP_HEADERS(REGISTER_DEFAULT_REQUEST_HEADER) -template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { - INLINE_REQ_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_REQ_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) + finalizeTable(); // Special case where we map a legacy host header to :authority. - add(Headers::get().HostLegacy.get().c_str(), [](HeaderMapType& h) -> StaticLookupResponse { - return {&h.inline_headers_.Host_, &Headers::get().Host}; + const auto handle = + CustomInlineHeaderRegistry::getInlineHeader( + Headers::get().Host); + add(Headers::get().HostLegacy.get().c_str(), [handle](HeaderMapImpl& h) -> StaticLookupResponse { + return {&h.inlineHeaders()[handle.value().it_->second], &handle.value().it_->first}; }); } -template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { - INLINE_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_REQ_RESP_HEADERS(INLINE_HEADER_STATIC_MAP_ENTRY) - INLINE_RESP_HEADERS_TRAILERS(INLINE_HEADER_STATIC_MAP_ENTRY) +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { + finalizeTable(); } -template <> -HeaderMapImpl::StaticLookupTable::StaticLookupTable(){ - INLINE_RESP_HEADERS_TRAILERS(INLINE_HEADER_STATIC_MAP_ENTRY)} +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_RESPONSE_HEADER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_RESP_HEADERS(REGISTER_RESPONSE_HEADER) + INLINE_REQ_RESP_HEADERS(REGISTER_RESPONSE_HEADER) + INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_HEADER) + + finalizeTable(); +} + +template <> HeaderMapImpl::StaticLookupTable::StaticLookupTable() { +#define REGISTER_RESPONSE_TRAILER(name) \ + CustomInlineHeaderRegistry::registerInlineHeader( \ + Headers::get().name); + INLINE_RESP_HEADERS_TRAILERS(REGISTER_RESPONSE_TRAILER) + + finalizeTable(); +} uint64_t HeaderMapImpl::appendToHeader(HeaderString& header, absl::string_view data, absl::string_view delimiter) { @@ -454,7 +473,7 @@ HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { return nullptr; } -void HeaderMapImpl::iterate(ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb, void* context) const { for (const HeaderEntryImpl& header : headers_) { if (cb(header, context) == HeaderMap::Iterate::Break) { break; @@ -462,7 +481,7 @@ void HeaderMapImpl::iterate(ConstIterateCb cb, void* context) const { } } -void HeaderMapImpl::iterateReverse(ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const { for (auto it = headers_.rbegin(); it != headers_.rend(); it++) { if (cb(*it, context) == HeaderMap::Iterate::Break) { break; @@ -482,13 +501,13 @@ HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, if (lookup.has_value()) { *entry = *lookup.value().entry_; if (*entry) { - return Lookup::Found; + return HeaderMap::Lookup::Found; } else { - return Lookup::NotFound; + return HeaderMap::Lookup::NotFound; } } else { *entry = nullptr; - return Lookup::NotSupported; + return HeaderMap::Lookup::NotSupported; } } @@ -603,5 +622,32 @@ size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { return 1; } +namespace { +template +HeaderMapImplUtility::HeaderMapImplInfo makeHeaderMapImplInfo(absl::string_view name) { + // Constructing a header map implementation will force the custom headers and sizing to be + // finalized, so do that first. + auto header_map = T::create(); + + HeaderMapImplUtility::HeaderMapImplInfo info; + info.name_ = std::string(name); + info.size_ = T::inlineHeadersSize() + sizeof(T); + for (const auto& header : CustomInlineHeaderRegistry::headers()) { + info.registered_headers_.push_back(header.first.get()); + } + return info; +} +} // namespace + +std::vector +HeaderMapImplUtility::getAllHeaderMapImplInfo() { + std::vector ret; + ret.push_back(makeHeaderMapImplInfo("request header map")); + ret.push_back(makeHeaderMapImplInfo("request trailer map")); + ret.push_back(makeHeaderMapImplInfo("response header map")); + ret.push_back(makeHeaderMapImplInfo("response trailer map")); + return ret; +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 57fa82593e26..30e2eab892b9 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -18,46 +18,36 @@ namespace Http { /** * These are definitions of all of the inline header access functions described inside header_map.h - * TODO(asraa): Simplify code here so macros expand into single virtual calls. */ #define DEFINE_INLINE_HEADER_FUNCS(name) \ public: \ - const HeaderEntry* name() const override { return inline_headers_.name##_; } \ + const HeaderEntry* name() const override { return getInline(HeaderHandles::get().name); } \ void append##name(absl::string_view data, absl::string_view delimiter) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter)); \ + appendInline(HeaderHandles::get().name, data, delimiter); \ } \ void setReference##name(absl::string_view value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - updateSize(entry.value().size(), value.size()); \ - entry.value().setReference(value); \ + setReferenceInline(HeaderHandles::get().name, value); \ } \ void set##name(absl::string_view value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - updateSize(entry.value().size(), value.size()); \ - entry.value().setCopy(value); \ + setInline(HeaderHandles::get().name, value); \ } \ - void set##name(uint64_t value) override { \ - HeaderEntry& entry = maybeCreateInline(&inline_headers_.name##_, Headers::get().name); \ - subtractSize(inline_headers_.name##_->value().size()); \ - entry.value().setInteger(value); \ - addSize(inline_headers_.name##_->value().size()); \ - } \ - size_t remove##name() override { return removeInline(&inline_headers_.name##_); } - -#define DEFINE_INLINE_HEADER_STRUCT(name) HeaderEntryImpl* name##_; + void set##name(uint64_t value) override { setInline(HeaderHandles::get().name, value); } \ + size_t remove##name() override { return removeInline(HeaderHandles::get().name); } \ + absl::string_view get##name##Value() const override { \ + return getInlineValue(HeaderHandles::get().name); \ + } /** * Implementation of Http::HeaderMap. This is heavily optimized for performance. Roughly, when - * headers are added to the map, we do a hash lookup to see if it's one of the O(1) headers. - * If it is, we store a reference to it that can be accessed later directly. Most high performance - * paths use O(1) direct access. In general, we try to copy as little as possible and allocate as - * little as possible in any of the paths. - * TODO(mattklein123): The end result of the header refactor should be to make this a fully - * protected base class or a mix-in for the concrete header types below. + * headers are added to the map by string, we do a trie lookup to see if it's one of the O(1) + * headers. If it is, we store a reference to it that can be accessed later directly via direct + * method access. Most high performance paths use O(1) direct method access. In general, we try to + * copy as little as possible and allocate as little as possible in any of the paths. */ -class HeaderMapImpl : public virtual HeaderMap, NonCopyable { +class HeaderMapImpl : NonCopyable { public: + virtual ~HeaderMapImpl() = default; + // The following "constructors" call virtual functions during construction and must use the // static factory pattern. static void copyFrom(HeaderMap& lhs, const HeaderMap& rhs); @@ -78,30 +68,33 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { // Performs a manual byte size count for test verification. void verifyByteSizeInternalForTest() const; - // Http::HeaderMap - bool operator==(const HeaderMap& rhs) const override; - bool operator!=(const HeaderMap& rhs) const override; - void addViaMove(HeaderString&& key, HeaderString&& value) override; - void addReference(const LowerCaseString& key, absl::string_view value) override; - void addReferenceKey(const LowerCaseString& key, uint64_t value) override; - void addReferenceKey(const LowerCaseString& key, absl::string_view value) override; - void addCopy(const LowerCaseString& key, uint64_t value) override; - void addCopy(const LowerCaseString& key, absl::string_view value) override; - void appendCopy(const LowerCaseString& key, absl::string_view value) override; - void setReference(const LowerCaseString& key, absl::string_view value) override; - void setReferenceKey(const LowerCaseString& key, absl::string_view value) override; - void setCopy(const LowerCaseString& key, absl::string_view value) override; - uint64_t byteSize() const override; - const HeaderEntry* get(const LowerCaseString& key) const override; - void iterate(ConstIterateCb cb, void* context) const override; - void iterateReverse(ConstIterateCb cb, void* context) const override; - Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override; - void clear() override; - size_t remove(const LowerCaseString& key) override; - size_t removePrefix(const LowerCaseString& key) override; - size_t size() const override { return headers_.size(); } - bool empty() const override { return headers_.empty(); } - void dumpState(std::ostream& os, int indent_level = 0) const override; + // Note: This class does not actually implement Http::HeaderMap to avoid virtual inheritance in + // the derived classes. Instead, it is used as a mix-in class for TypedHeaderMapImpl below. This + // both avoid virtual inheritance and allows the concrete final header maps to use a variable + // length member at the end. + bool operator==(const HeaderMap& rhs) const; + bool operator!=(const HeaderMap& rhs) const; + void addViaMove(HeaderString&& key, HeaderString&& value); + void addReference(const LowerCaseString& key, absl::string_view value); + void addReferenceKey(const LowerCaseString& key, uint64_t value); + void addReferenceKey(const LowerCaseString& key, absl::string_view value); + void addCopy(const LowerCaseString& key, uint64_t value); + void addCopy(const LowerCaseString& key, absl::string_view value); + void appendCopy(const LowerCaseString& key, absl::string_view value); + void setReference(const LowerCaseString& key, absl::string_view value); + void setReferenceKey(const LowerCaseString& key, absl::string_view value); + void setCopy(const LowerCaseString& key, absl::string_view value); + uint64_t byteSize() const; + const HeaderEntry* get(const LowerCaseString& key) const; + void iterate(HeaderMap::ConstIterateCb cb, void* context) const; + void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const; + HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const; + void clear(); + size_t remove(const LowerCaseString& key); + size_t removePrefix(const LowerCaseString& key); + size_t size() const { return headers_.size(); } + bool empty() const { return headers_.empty(); } + void dumpState(std::ostream& os, int indent_level = 0) const; protected: struct HeaderEntryImpl : public HeaderEntry, NonCopyable { @@ -134,20 +127,44 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { /** * Base class for a static lookup table that converts a string key into an O(1) header. */ - template - struct StaticLookupTable : public TrieLookupTable { - using HeaderMapType = T; - + template + struct StaticLookupTable + : public TrieLookupTable> { StaticLookupTable(); - static absl::optional lookup(T& header_map, absl::string_view key) { - auto entry = ConstSingleton::get().find(key); + void finalizeTable() { + CustomInlineHeaderRegistry::finalize(); + auto& headers = CustomInlineHeaderRegistry::headers(); + size_ = headers.size(); + for (const auto& header : headers) { + this->add(header.first.get().c_str(), [&header](HeaderMapImpl& h) -> StaticLookupResponse { + return {&h.inlineHeaders()[header.second], &header.first}; + }); + } + } + + static size_t size() { + // The size of the lookup table is finalized when the singleton lookup table is created. This + // allows for late binding of custom headers as well as envoy header prefix changes. This + // does mean that once the first header map is created of this type, no further changes are + // possible. + // TODO(mattklein123): If we decide to keep this implementation, it is conceivable that header + // maps could be created by an API factory that is owned by the listener/HCM, thus making + // O(1) header delivery over xDS possible. + return ConstSingleton::get().size_; + } + + static absl::optional lookup(HeaderMapImpl& header_map, + absl::string_view key) { + const auto& entry = ConstSingleton::get().find(key); if (entry != nullptr) { return entry(header_map); } else { return absl::nullopt; } } + + size_t size_; }; /** @@ -230,13 +247,9 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { void updateSize(uint64_t from_size, uint64_t to_size); void addSize(uint64_t size); void subtractSize(uint64_t size); - virtual absl::optional staticLookup(absl::string_view) { - // TODO(mattklein123): Make this pure once HeaderMapImpl is a base class only. - return absl::nullopt; - } - virtual void clearInline() { - // TODO(mattklein123): Make this pure once HeaderMapImpl is a base class only. - } + virtual absl::optional staticLookup(absl::string_view) PURE; + virtual void clearInline() PURE; + virtual HeaderEntryImpl** inlineHeaders() PURE; HeaderList headers_; // This holds the internal byte size of the HeaderMap. @@ -244,107 +257,258 @@ class HeaderMapImpl : public virtual HeaderMap, NonCopyable { }; /** - * Typed derived classes for all header map types. + * Typed derived classes for all header map types. This class implements the actual typed + * interface and for the majority of methods just passes through to the HeaderMapImpl mix-in. Per + * above, this avoids virtual inheritance. + */ +template class TypedHeaderMapImpl : public HeaderMapImpl, public Interface { +public: + // Implementation of Http::HeaderMap that passes through to HeaderMapImpl. + bool operator==(const HeaderMap& rhs) const override { return HeaderMapImpl::operator==(rhs); } + bool operator!=(const HeaderMap& rhs) const override { return HeaderMapImpl::operator!=(rhs); } + void addViaMove(HeaderString&& key, HeaderString&& value) override { + HeaderMapImpl::addViaMove(std::move(key), std::move(value)); + } + void addReference(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addReference(key, value); + } + void addReferenceKey(const LowerCaseString& key, uint64_t value) override { + HeaderMapImpl::addReferenceKey(key, value); + } + void addReferenceKey(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addReferenceKey(key, value); + } + void addCopy(const LowerCaseString& key, uint64_t value) override { + HeaderMapImpl::addCopy(key, value); + } + void addCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::addCopy(key, value); + } + void appendCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::appendCopy(key, value); + } + void setReference(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setReference(key, value); + } + void setReferenceKey(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setReferenceKey(key, value); + } + void setCopy(const LowerCaseString& key, absl::string_view value) override { + HeaderMapImpl::setCopy(key, value); + } + uint64_t byteSize() const override { return HeaderMapImpl::byteSize(); } + const HeaderEntry* get(const LowerCaseString& key) const override { + return HeaderMapImpl::get(key); + } + void iterate(HeaderMap::ConstIterateCb cb, void* context) const override { + HeaderMapImpl::iterate(cb, context); + } + void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { + HeaderMapImpl::iterateReverse(cb, context); + } + HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override { + return HeaderMapImpl::lookup(key, entry); + } + void clear() override { HeaderMapImpl::clear(); } + size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); } + size_t removePrefix(const LowerCaseString& key) override { + return HeaderMapImpl::removePrefix(key); + } + size_t size() const override { return HeaderMapImpl::size(); } + bool empty() const override { return HeaderMapImpl::empty(); } + void dumpState(std::ostream& os, int indent_level = 0) const override { + HeaderMapImpl::dumpState(os, indent_level); + } + + // Generic custom header functions for each fully typed interface. To avoid accidental issues, + // the Handle type is different for each interface, which is why these functions live here vs. + // inside HeaderMapImpl. + using Handle = CustomInlineHeaderRegistry::Handle; + const HeaderEntry* getInline(Handle handle) const override { + ASSERT(handle.it_->second < inlineHeadersSize()); + return constInlineHeaders()[handle.it_->second]; + } + void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + addSize(HeaderMapImpl::appendToHeader(entry.value(), data, delimiter)); + } + void setReferenceInline(Handle handle, absl::string_view value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + updateSize(entry.value().size(), value.size()); + entry.value().setReference(value); + } + void setInline(Handle handle, absl::string_view value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + updateSize(entry.value().size(), value.size()); + entry.value().setCopy(value); + } + void setInline(Handle handle, uint64_t value) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + HeaderEntry& entry = maybeCreateInline(&inlineHeaders()[handle.it_->second], handle.it_->first); + subtractSize(entry.value().size()); + entry.value().setInteger(value); + addSize(entry.value().size()); + } + size_t removeInline(Handle handle) override { + ASSERT(handle.it_->second < inlineHeadersSize()); + return HeaderMapImpl::removeInline(&inlineHeaders()[handle.it_->second]); + } + static size_t inlineHeadersSize() { + return StaticLookupTable::size() * sizeof(HeaderEntryImpl*); + } + +protected: + absl::optional staticLookup(absl::string_view key) override { + return StaticLookupTable::lookup(*this, key); + } + virtual const HeaderEntryImpl* const* constInlineHeaders() const PURE; +}; + +#define DEFINE_HEADER_HANDLE(name) \ + Handle name = \ + CustomInlineHeaderRegistry::getInlineHeader(Headers::get().name).value(); + +/** + * Concrete implementation of RequestHeaderMap which allows for variable custom registered inline + * headers. */ -class RequestHeaderMapImpl : public HeaderMapImpl, public RequestHeaderMap { +class RequestHeaderMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) RequestHeaderMapImpl()); + } + INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the request header map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // we reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) + // NOTE: Because inline_headers_ is a variable size member, it must be the last member in the + // most derived class. This forces the definition of the following three functions to also be + // in the most derived class and thus duplicated. There may be a way to consolidate thus but it's + // not clear and can be deferred for now. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_REQ_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + RequestHeaderMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; -class RequestTrailerMapImpl : public HeaderMapImpl, public RequestTrailerMap {}; +/** + * Concrete implementation of RequestTrailerMap which allows for variable custom registered inline + * headers. + */ +class RequestTrailerMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { +public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + RequestTrailerMapImpl()); + } + +protected: + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } -class ResponseHeaderMapImpl : public HeaderMapImpl, public ResponseHeaderMap { +private: + RequestTrailerMapImpl() { clearInline(); } + + HeaderEntryImpl* inline_headers_[]; +}; + +/** + * Concrete implementation of ResponseHeaderMap which allows for variable custom registered inline + * headers. + */ +class ResponseHeaderMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + ResponseHeaderMapImpl()); + } + INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_FUNCS) INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the response header map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // we reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_REQ_RESP_HEADERS(DEFINE_INLINE_HEADER_STRUCT) - INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_STRUCT) + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_RESP_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_REQ_RESP_HEADERS(DEFINE_HEADER_HANDLE) + INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + ResponseHeaderMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; -class ResponseTrailerMapImpl : public HeaderMapImpl, public ResponseTrailerMap { +/** + * Concrete implementation of ResponseTrailerMap which allows for variable custom registered + * inline headers. + */ +class ResponseTrailerMapImpl final : public TypedHeaderMapImpl, + public InlineStorage { public: + static std::unique_ptr create() { + return std::unique_ptr(new (inlineHeadersSize()) + ResponseTrailerMapImpl()); + } + INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_FUNCS) protected: - // Explicit inline headers for the response trailer map. - // TODO(mattklein123): This is mostly copied between all of the concrete header map types. - // In a future change we can either get rid of O(1) headers completely, or it should be possible - // to statically register all O(1) headers and move to a single dynamically sized class where we - // reference the O(1) headers in the table by an offset. - struct AllInlineHeaders { - AllInlineHeaders() { clear(); } - void clear() { memset(this, 0, sizeof(*this)); } - - INLINE_RESP_HEADERS_TRAILERS(DEFINE_INLINE_HEADER_STRUCT) + // See comment in RequestHeaderMapImpl. + void clearInline() override { memset(inline_headers_, 0, inlineHeadersSize()); } + const HeaderEntryImpl* const* constInlineHeaders() const override { return inline_headers_; } + HeaderEntryImpl** inlineHeaders() override { return inline_headers_; } + +private: + struct HeaderHandleValues { + INLINE_RESP_HEADERS_TRAILERS(DEFINE_HEADER_HANDLE) }; - absl::optional staticLookup(absl::string_view key) override { - return StaticLookupTable::lookup(*this, key); - } - void clearInline() override { inline_headers_.clear(); } + using HeaderHandles = ConstSingleton; - AllInlineHeaders inline_headers_; + ResponseTrailerMapImpl() { clearInline(); } - friend class HeaderMapImpl; + HeaderEntryImpl* inline_headers_[]; }; template std::unique_ptr createHeaderMap(const std::initializer_list>& values) { - auto new_header_map = std::make_unique(); + auto new_header_map = T::create(); HeaderMapImpl::initFromInitList(*new_header_map, values.begin(), values.end()); return new_header_map; } template std::unique_ptr createHeaderMap(It begin, It end) { - auto new_header_map = std::make_unique(); + auto new_header_map = T::create(); HeaderMapImpl::initFromInitList(*new_header_map, begin, end); return new_header_map; } @@ -355,10 +519,36 @@ template std::unique_ptr createHeaderMap(const HeaderMap& rhs) { // a few places when dealing with gRPC headers/trailers conversions so it's not trivial to remove. // We should revisit this to figure how to make this a bit safer as a non-intentional conversion // may have surprising results with different O(1) headers, implementations, etc. - auto new_header_map = std::make_unique(); + auto new_header_map = T::create(); HeaderMapImpl::copyFrom(*new_header_map, rhs); return new_header_map; } +struct EmptyHeaders { + RequestHeaderMapPtr request_headers = RequestHeaderMapImpl::create(); + ResponseHeaderMapPtr response_headers = ResponseHeaderMapImpl::create(); + ResponseTrailerMapPtr response_trailers = ResponseTrailerMapImpl::create(); +}; + +using StaticEmptyHeaders = ConstSingleton; + +class HeaderMapImplUtility { +public: + struct HeaderMapImplInfo { + // Human readable name for the header map used in info logging. + std::string name_; + // The byte size of the header map including both fixed space as well as variable space used + // by the registered custom headers. + size_t size_; + // All registered custom headers for the header map. + std::vector registered_headers_; + }; + + /** + * Fetch detailed information about each header map implementation for use in logging. + */ + static std::vector getAllHeaderMapImplInfo(); +}; + } // namespace Http } // namespace Envoy diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 5ebc73b8363d..d96a175722a8 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -490,12 +490,12 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); - headers_or_trailers_.emplace(std::make_unique()); + headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); } void maybeAllocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace(std::make_unique()); + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); } } @@ -573,13 +573,12 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); - headers_or_trailers_.emplace(std::make_unique()); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } void maybeAllocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace( - std::make_unique()); + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); } } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index b2c2e6e8ded6..24e8d8ebbebf 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -205,7 +205,7 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { // In this case we want trailers to come after we release all pending body data that is // waiting on window updates. We need to save the trailers so that we can emit them later. ASSERT(!pending_trailers_to_encode_); - pending_trailers_to_encode_ = createHeaderMap(trailers); + pending_trailers_to_encode_ = cloneTrailers(trailers); } else { submitTrailers(trailers); parent_.sendPendingFrames(); diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index f2914433ffd0..7f8e26a31d34 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -217,6 +217,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable()) {} + headers_or_trailers_(ResponseHeaderMapImpl::create()) {} // StreamImpl void submitHeaders(const std::vector& final_headers, @@ -328,13 +329,14 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable( - std::make_unique()); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } else { - headers_or_trailers_.emplace( - std::make_unique()); + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); } } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } // RequestEncoder void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; @@ -354,8 +356,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable()) {} + : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} // StreamImpl void submitHeaders(const std::vector& final_headers, @@ -371,7 +372,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(std::make_unique()); + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); } // ResponseEncoder diff --git a/source/common/http/message_impl.h b/source/common/http/message_impl.h index a42a0a2afd5d..698c51824a06 100644 --- a/source/common/http/message_impl.h +++ b/source/common/http/message_impl.h @@ -19,7 +19,7 @@ template class MessageImpl : public Message { public: - MessageImpl() : headers_(std::make_unique()) {} + MessageImpl() : headers_(HeadersImplType::create()) {} MessageImpl(std::unique_ptr&& headers) : headers_(std::move(headers)) {} // Http::Message diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc index 2c18db1ff4b5..9574de79c6fd 100644 --- a/source/common/local_reply/local_reply.cc +++ b/source/common/local_reply/local_reply.cc @@ -12,16 +12,6 @@ namespace Envoy { namespace LocalReply { -namespace { - -struct EmptyHeaders { - Http::RequestHeaderMapImpl request_headers; - Http::ResponseTrailerMapImpl response_trailers; -}; - -using StaticEmptyHeaders = ConstSingleton; - -} // namespace class BodyFormatter { public: @@ -137,14 +127,14 @@ class LocalReplyImpl : public LocalReply { stream_info.response_code_ = static_cast(code); if (request_headers == nullptr) { - request_headers = &StaticEmptyHeaders::get().request_headers; + request_headers = Http::StaticEmptyHeaders::get().request_headers.get(); } BodyFormatter* final_formatter{}; for (const auto& mapper : mappers_) { if (mapper->matchAndRewrite(*request_headers, response_headers, - StaticEmptyHeaders::get().response_trailers, stream_info, code, - body, final_formatter)) { + *Http::StaticEmptyHeaders::get().response_trailers, stream_info, + code, body, final_formatter)) { break; } } @@ -153,8 +143,8 @@ class LocalReplyImpl : public LocalReply { final_formatter = body_formatter_.get(); } return final_formatter->format(*request_headers, response_headers, - StaticEmptyHeaders::get().response_trailers, stream_info, body, - content_type); + *Http::StaticEmptyHeaders::get().response_trailers, stream_info, + body, content_type); } private: diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 5793dcfec99e..4e12d6fd8712 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -323,14 +323,13 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam } field_extractor_ = [this, pattern](const Envoy::StreamInfo::StreamInfo& stream_info) { const auto& formatters = start_time_formatters_.at(pattern); - static const Http::RequestHeaderMapImpl empty_request_headers; - static const Http::ResponseHeaderMapImpl empty_response_headers; - static const Http::ResponseTrailerMapImpl empty_response_trailers; std::string formatted; for (const auto& formatter : formatters) { - absl::StrAppend(&formatted, formatter->format(empty_request_headers, empty_response_headers, - empty_response_trailers, stream_info, - absl::string_view())); + absl::StrAppend(&formatted, + formatter->format(*Http::StaticEmptyHeaders::get().request_headers, + *Http::StaticEmptyHeaders::get().response_headers, + *Http::StaticEmptyHeaders::get().response_trailers, + stream_info, absl::string_view())); } return formatted; }; diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 3c4baddf6cee..8a041ce0f12c 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -280,9 +280,9 @@ void RdsRouteConfigProviderImpl::onConfigUpdate() { auto found = aliases.find(it->alias_); if (found != aliases.end()) { // TODO(dmitri-d) HeaderMapImpl is expensive, need to profile this - Http::RequestHeaderMapImpl host_header; - host_header.setHost(VhdsSubscription::aliasToDomainName(it->alias_)); - const bool host_exists = config->virtualHostExists(host_header); + auto host_header = Http::RequestHeaderMapImpl::create(); + host_header->setHost(VhdsSubscription::aliasToDomainName(it->alias_)); + const bool host_exists = config->virtualHostExists(*host_header); std::weak_ptr current_cb(it->cb_); it->thread_local_dispatcher_.post([current_cb, host_exists] { if (auto cb = current_cb.lock()) { diff --git a/source/docs/header_map.md b/source/docs/header_map.md new file mode 100644 index 000000000000..59ae9db78621 --- /dev/null +++ b/source/docs/header_map.md @@ -0,0 +1,30 @@ +# Header map implementation overview + +The Envoy header map implementation (`HeaderMapImpl`) has the following properties: +* Headers are stored in a linked list (`HeaderList`) in the order they are added, with pseudo + headers kept at the front of the list. +* O(1) direct access is possible for common headers needed during data plane processing. This is + provided by a table of pointers that reach directly into a linked list that is populated when + headers are added or removed from the map. When O(1) headers are accessed by direct method + (`DEFINE_INLINE_HEADER` and `CustomInlineHeaderBase`) they use direct pointer access to see + whether a header is present, add it, modify it, etc. When headers are added by name a trie is used to lookup the pointer in the table (`StaticLookupTable`). +* Custom headers can be registered statically against a specific implementation (request headers, + request trailers, response headers, and response trailers) via core code and extensions + (`CustomInlineHeaderRegistry`). Each registered header increases the size of the table by the size of a single pointer. +* Operations that search, replace, etc. for a header by name that is not one of the O(1) headers + will incur an O(N) search through the linked list. This is an implementation deficiency for + certain usage patterns that will be improved in future changes. + +## Implementation details + +* O(1) registered headers are tracked during static initialization via the `CustomInlineHeaderBase` + class. +* The first time a header map is constructed (in practice this is after bootstrap load and the + Envoy header prefix is finalized when `getAllHeaderMapImplInfo` is called), the + `StaticLookupTable` is finalized for each header map type. No further changes are possible after + this point. The `StaticLookupTable` defines the amount of variable pointer table space that is + require for each header map type. +* Each concrete header map type derives from `InlineStorage` with a variable length member at the + end of the definition. +* Each concrete header map type uses a factory function and a provide constructor. The required + size is determined via the `inlineHeadersSize` function. \ No newline at end of file diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 245c8bace630..85f1cad8919b 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -178,10 +178,10 @@ void MainCommonBase::adminRequest(absl::string_view path_and_query, absl::string std::string path_and_query_buf = std::string(path_and_query); std::string method_buf = std::string(method); server_->dispatcher().post([this, path_and_query_buf, method_buf, handler]() { - Http::ResponseHeaderMapImpl response_headers; + auto response_headers = Http::ResponseHeaderMapImpl::create(); std::string body; - server_->admin().request(path_and_query_buf, method_buf, response_headers, body); - handler(response_headers, body); + server_->admin().request(path_and_query_buf, method_buf, *response_headers, body); + handler(*response_headers, body); }); } diff --git a/source/extensions/access_loggers/common/access_log_base.cc b/source/extensions/access_loggers/common/access_log_base.cc index 99c3fa9e12f5..0323013d6350 100644 --- a/source/extensions/access_loggers/common/access_log_base.cc +++ b/source/extensions/access_loggers/common/access_log_base.cc @@ -12,17 +12,14 @@ void ImplBase::log(const Http::RequestHeaderMap* request_headers, const Http::ResponseHeaderMap* response_headers, const Http::ResponseTrailerMap* response_trailers, const StreamInfo::StreamInfo& stream_info) { - ConstSingleton empty_request_headers; - ConstSingleton empty_response_headers; - ConstSingleton empty_response_trailers; if (!request_headers) { - request_headers = &empty_request_headers.get(); + request_headers = Http::StaticEmptyHeaders::get().request_headers.get(); } if (!response_headers) { - response_headers = &empty_response_headers.get(); + response_headers = Http::StaticEmptyHeaders::get().response_headers.get(); } if (!response_trailers) { - response_trailers = &empty_response_trailers.get(); + response_trailers = Http::StaticEmptyHeaders::get().response_trailers.get(); } if (filter_ && !filter_->evaluate(stream_info, *request_headers, *response_headers, *response_trailers)) { diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 44929440b70d..aa132bea068a 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -136,8 +136,8 @@ absl::optional ResponseWrapper::operator[](CelValue key) const { return CelValue::CreateInt64(info_.responseFlags()); } else if (value == GrpcStatus) { auto const& optional_status = Grpc::Common::getGrpcStatus( - trailers_.value_ ? *trailers_.value_ : ConstSingleton::get(), - headers_.value_ ? *headers_.value_ : ConstSingleton::get(), + trailers_.value_ ? *trailers_.value_ : *Http::StaticEmptyHeaders::get().response_trailers, + headers_.value_ ? *headers_.value_ : *Http::StaticEmptyHeaders::get().response_headers, info_); if (optional_status.has_value()) { return CelValue::CreateInt64(optional_status.value()); diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 2df6f8445deb..100e75338b4f 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -78,14 +78,14 @@ void GrpcClientImpl::onSuccess( Http::ResponseHeaderMapPtr response_headers_to_add; Http::RequestHeaderMapPtr request_headers_to_add; if (!response->response_headers_to_add().empty()) { - response_headers_to_add = std::make_unique(); + response_headers_to_add = Http::ResponseHeaderMapImpl::create(); for (const auto& h : response->response_headers_to_add()) { response_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value()); } } if (!response->request_headers_to_add().empty()) { - request_headers_to_add = std::make_unique(); + request_headers_to_add = Http::RequestHeaderMapImpl::create(); for (const auto& h : response->request_headers_to_add()) { request_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value()); } diff --git a/source/extensions/filters/common/rbac/engine_impl.cc b/source/extensions/filters/common/rbac/engine_impl.cc index bd8a0a9cd0ca..d9717ef509c0 100644 --- a/source/extensions/filters/common/rbac/engine_impl.cc +++ b/source/extensions/filters/common/rbac/engine_impl.cc @@ -51,8 +51,8 @@ bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connec bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connection, const StreamInfo::StreamInfo& info, std::string* effective_policy_id) const { - static const Http::RequestHeaderMapImpl* empty_header = new Http::RequestHeaderMapImpl(); - return allowed(connection, *empty_header, info, effective_policy_id); + return allowed(connection, *Http::StaticEmptyHeaders::get().request_headers, info, + effective_policy_id); } } // namespace RBAC diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index e482ca565b33..103ee87277e1 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -1,6 +1,7 @@ #include "extensions/filters/http/cors/cors_filter.h" #include "envoy/http/codes.h" +#include "envoy/http/header_map.h" #include "envoy/stats/scope.h" #include "common/common/empty_string.h" @@ -13,6 +14,21 @@ namespace Extensions { namespace HttpFilters { namespace Cors { +Http::RegisterCustomInlineHeader + access_control_request_method(Http::Headers::get().AccessControlRequestMethod); +Http::RegisterCustomInlineHeader + access_control_allow_origin(Http::Headers::get().AccessControlAllowOrigin); +Http::RegisterCustomInlineHeader + access_control_allow_credentials(Http::Headers::get().AccessControlAllowCredentials); +Http::RegisterCustomInlineHeader + access_control_allow_methods(Http::Headers::get().AccessControlAllowMethods); +Http::RegisterCustomInlineHeader + access_control_allow_headers(Http::Headers::get().AccessControlAllowHeaders); +Http::RegisterCustomInlineHeader + access_control_max_age(Http::Headers::get().AccessControlMaxAge); +Http::RegisterCustomInlineHeader + access_control_expose_headers(Http::Headers::get().AccessControlExposeHeaders); + CorsFilterConfig::CorsFilterConfig(const std::string& stats_prefix, Stats::Scope& scope) : stats_(generateStats(stats_prefix + "cors.", scope)) {} @@ -58,31 +74,31 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head return Http::FilterHeadersStatus::Continue; } - const auto requestMethod = headers.AccessControlRequestMethod(); - if (requestMethod == nullptr || requestMethod->value().empty()) { + if (headers.getInlineValue(access_control_request_method.handle()).empty()) { return Http::FilterHeadersStatus::Continue; } auto response_headers{Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::OK))}})}; - response_headers->setAccessControlAllowOrigin(origin_->value().getStringView()); + response_headers->setInline(access_control_allow_origin.handle(), + origin_->value().getStringView()); if (allowCredentials()) { - response_headers->setReferenceAccessControlAllowCredentials( - Http::Headers::get().CORSValues.True); + response_headers->setReferenceInline(access_control_allow_credentials.handle(), + Http::Headers::get().CORSValues.True); } if (!allowMethods().empty()) { - response_headers->setAccessControlAllowMethods(allowMethods()); + response_headers->setInline(access_control_allow_methods.handle(), allowMethods()); } if (!allowHeaders().empty()) { - response_headers->setAccessControlAllowHeaders(allowHeaders()); + response_headers->setInline(access_control_allow_headers.handle(), allowHeaders()); } if (!maxAge().empty()) { - response_headers->setAccessControlMaxAge(maxAge()); + response_headers->setInline(access_control_max_age.handle(), maxAge()); } decoder_callbacks_->encodeHeaders(std::move(response_headers), true); @@ -97,13 +113,14 @@ Http::FilterHeadersStatus CorsFilter::encodeHeaders(Http::ResponseHeaderMap& hea return Http::FilterHeadersStatus::Continue; } - headers.setAccessControlAllowOrigin(origin_->value().getStringView()); + headers.setInline(access_control_allow_origin.handle(), origin_->value().getStringView()); if (allowCredentials()) { - headers.setReferenceAccessControlAllowCredentials(Http::Headers::get().CORSValues.True); + headers.setReferenceInline(access_control_allow_credentials.handle(), + Http::Headers::get().CORSValues.True); } if (!exposeHeaders().empty()) { - headers.setAccessControlExposeHeaders(exposeHeaders()); + headers.setInline(access_control_expose_headers.handle(), exposeHeaders()); } return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index d225304a1320..3f04c64f1b35 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -719,7 +719,10 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_ return false; } - auto status_details = Grpc::Common::getGrpcStatusDetailsBin(trailers); + // TODO(mattklein123): The dynamic cast here is needed because ResponseHeaderOrTrailerMap is not + // a header map. This can likely be cleaned up. + auto status_details = + Grpc::Common::getGrpcStatusDetailsBin(dynamic_cast(trailers)); if (!status_details) { // If no rpc.Status object was sent in the grpc-status-details-bin header, // construct it from the grpc-status and grpc-message headers. diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 8cc7d85e56ac..36b49855a068 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -16,11 +16,13 @@ namespace JwtAuthn { namespace { +Http::RegisterCustomInlineHeader + access_control_request_method(Http::Headers::get().AccessControlRequestMethod); + bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { return headers.getMethodValue() == Http::Headers::get().MethodValues.Options && headers.Origin() && !headers.Origin()->value().empty() && - headers.AccessControlRequestMethod() && - !headers.AccessControlRequestMethod()->value().empty(); + !headers.getInlineValue(access_control_request_method.handle()).empty(); } } // namespace diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 3429627624d4..d63c109fd647 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -115,7 +115,7 @@ Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, luaL_error(state, "http call cluster invalid. Must be configured"); } - auto headers = std::make_unique(); + auto headers = Http::RequestHeaderMapImpl::create(); buildHeadersFromTable(*headers, state, 3); Http::RequestMessagePtr message(new Http::RequestMessageImpl(std::move(headers))); @@ -240,7 +240,7 @@ int StreamHandleWrapper::luaRespond(lua_State* state) { luaL_checktype(state, 2, LUA_TTABLE); size_t body_size; const char* raw_body = luaL_optlstring(state, 3, nullptr, &body_size); - auto headers = std::make_unique(); + auto headers = Http::ResponseHeaderMapImpl::create(); buildHeadersFromTable(*headers, state, 2); uint64_t status; diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index cc4b8dd5bb77..69075249162d 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -154,7 +154,7 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, false}; httpContext().codeStats().chargeResponseStat(info); if (response_headers_to_add_ == nullptr) { - response_headers_to_add_ = std::make_unique(); + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); } response_headers_to_add_->setReferenceEnvoyRateLimited( Http::Headers::get().EnvoyRateLimitedValues.True); diff --git a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h index 983843c6f7fe..cec6ac1a0252 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h @@ -14,8 +14,6 @@ class RpcInvocationImpl : public RpcInvocationBase { using ParameterValueMap = std::unordered_map; using ParameterValueMapPtr = std::unique_ptr; - using HeaderMapPtr = std::unique_ptr; - RpcInvocationImpl() = default; ~RpcInvocationImpl() override = default; @@ -32,7 +30,7 @@ class RpcInvocationImpl : public RpcInvocationBase { private: inline void assignHeaderIfNeed() { if (!headers_) { - headers_ = std::make_unique(); + headers_ = Http::RequestHeaderMapImpl::create(); } } @@ -43,7 +41,7 @@ class RpcInvocationImpl : public RpcInvocationBase { } ParameterValueMapPtr parameter_map_; - HeaderMapPtr headers_; // attachment + Http::HeaderMapPtr headers_; // attachment }; class RpcResultImpl : public RpcResult { diff --git a/source/extensions/filters/network/rocketmq_proxy/metadata.h b/source/extensions/filters/network/rocketmq_proxy/metadata.h index 8fca6ab7811a..ed913a1f92e0 100644 --- a/source/extensions/filters/network/rocketmq_proxy/metadata.h +++ b/source/extensions/filters/network/rocketmq_proxy/metadata.h @@ -25,14 +25,14 @@ class MessageMetadata { /** * @return HeaderMap of current headers */ - const Http::HeaderMap& headers() const { return headers_; } - Http::HeaderMap& headers() { return headers_; } + const Http::HeaderMap& headers() const { return *headers_; } + Http::HeaderMap& headers() { return *headers_; } private: bool is_oneway_{false}; absl::optional topic_name_{}; - Http::HeaderMapImpl headers_; + Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; }; using MessageMetadataSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 7ee3e68f297f..525c9fb4ae2a 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -54,8 +54,8 @@ class MessageMetadata { /** * @return HeaderMap of current headers (never throws) */ - const Http::HeaderMap& headers() const { return headers_; } - Http::HeaderMap& headers() { return headers_; } + const Http::HeaderMap& headers() const { return *headers_; } + Http::HeaderMap& headers() { return *headers_; } /** * @return SpanList an immutable list of Spans @@ -104,7 +104,7 @@ class MessageMetadata { absl::optional method_name_{}; absl::optional seq_id_{}; absl::optional msg_type_{}; - Http::HeaderMapImpl headers_; + Http::HeaderMapPtr headers_{Http::RequestHeaderMapImpl::create()}; absl::optional app_ex_type_; absl::optional app_ex_msg_; bool protocol_upgrade_message_{false}; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index 3348a1096b5f..eecaa9045d41 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -38,7 +38,7 @@ quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( // The returned header map has all keys in lower case. template std::unique_ptr quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list) { - auto headers = std::make_unique(); + auto headers = T::create(); for (const auto& entry : header_list) { // TODO(danzh): Avoid copy by referencing entry as header_list is already validated by QUIC. headers->addCopy(Http::LowerCaseString(entry.first), entry.second); @@ -48,7 +48,7 @@ std::unique_ptr quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_ template std::unique_ptr spdyHeaderBlockToEnvoyHeaders(const spdy::SpdyHeaderBlock& header_block) { - auto headers = std::make_unique(); + auto headers = T::create(); for (auto entry : header_block) { // TODO(danzh): Avoid temporary strings and addCopy() with std::string_view. std::string key(entry.first); diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index 0596dd4cda41..c2317658a03b 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -22,6 +22,11 @@ namespace Extensions { namespace StatSinks { namespace Hystrix { +Http::RegisterCustomInlineHeader + access_control_allow_origin(Http::Headers::get().AccessControlAllowOrigin); +Http::RegisterCustomInlineHeader + access_control_allow_headers(Http::Headers::get().AccessControlAllowHeaders); + const uint64_t HystrixSink::DEFAULT_NUM_BUCKETS; ClusterStatsCache::ClusterStatsCache(const std::string& cluster_name) : cluster_name_(cluster_name) {} @@ -290,10 +295,10 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextEventStream); response_headers.setReferenceCacheControl(Http::Headers::get().CacheControlValues.NoCache); response_headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Close); - response_headers.setReferenceAccessControlAllowHeaders( - AccessControlAllowHeadersValue.AllowHeadersHystrix); - response_headers.setReferenceAccessControlAllowOrigin( - Http::Headers::get().AccessControlAllowOriginValue.All); + response_headers.setReferenceInline(access_control_allow_headers.handle(), + AccessControlAllowHeadersValue.AllowHeadersHystrix); + response_headers.setReferenceInline(access_control_allow_origin.handle(), + Http::Headers::get().AccessControlAllowOriginValue.All); Http::StreamDecoderFilterCallbacks& stream_decoder_filter_callbacks = admin_stream.getDecoderFilterCallbacks(); diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 5be7a9665359..208d1caf7595 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -811,9 +811,9 @@ Http::Code AdminImpl::request(absl::string_view path_and_query, absl::string_vie Http::ResponseHeaderMap& response_headers, std::string& body) { AdminFilter filter(createCallbackFunction()); - Http::RequestHeaderMapImpl request_headers; - request_headers.setMethod(method); - filter.decodeHeaders(request_headers, false); + auto request_headers = Http::RequestHeaderMapImpl::create(); + request_headers->setMethod(method); + filter.decodeHeaders(*request_headers, false); Buffer::OwnedImpl response; Http::Code code = runCallback(path_and_query, response_headers, response, filter); diff --git a/source/server/admin/admin_filter.cc b/source/server/admin/admin_filter.cc index 92ac92ad289b..0cfb76839325 100644 --- a/source/server/admin/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -66,7 +66,7 @@ void AdminFilter::onComplete() { ENVOY_STREAM_LOG(debug, "request complete: path: {}", *decoder_callbacks_, path); Buffer::OwnedImpl response; - Http::ResponseHeaderMapPtr header_map{new Http::ResponseHeaderMapImpl}; + auto header_map = Http::ResponseHeaderMapImpl::create(); RELEASE_ASSERT(request_headers_, ""); Http::Code code = admin_server_callback_func_(path, *header_map, response, *this); Utility::populateFallbackResponseHeaders(code, *header_map); diff --git a/source/server/server.cc b/source/server/server.cc index 6b5ea74e0981..5b201c6c1e4c 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -309,6 +309,13 @@ void InstanceImpl::initialize(const Options& options, // setPrefix has a release assert verifying that setPrefix() is not called after prefix() ThreadSafeSingleton::get().setPrefix(bootstrap_.header_prefix().c_str()); } + // TODO(mattklein123): Custom O(1) headers can be registered at this point for creating/finalizing + // any header maps. + ENVOY_LOG(info, "HTTP header map info:"); + for (const auto& info : Http::HeaderMapImplUtility::getAllHeaderMapImplInfo()) { + ENVOY_LOG(info, " {}: {} bytes: {}", info.name_, info.size_, + absl::StrJoin(info.registered_headers_, ",")); + } // Needs to happen as early as possible in the instantiation to preempt the objects that require // stats. diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index bfd7507e0558..5ab9e79ca2ed 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -16,7 +16,7 @@ namespace Envoy { // Fuzz the header map implementation. DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) { - auto header_map = std::make_unique(); + auto header_map = Http::RequestHeaderMapImpl::create(); std::vector> lower_case_strings; std::vector> strings; uint64_t set_integer; @@ -149,7 +149,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) break; } case test::common::http::Action::kCopy: { - header_map = Http::createHeaderMap(*header_map); + header_map = Http::createHeaderMap(*header_map); break; } case test::common::http::Action::kLookup: { diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc index eb0f5e980a65..1c65c3a19a13 100644 --- a/test/common/http/header_map_impl_speed_test.cc +++ b/test/common/http/header_map_impl_speed_test.cc @@ -16,14 +16,16 @@ static void addDummyHeaders(HeaderMap& headers, size_t num_headers) { } } -/** Measure the construction/destruction speed of HeaderMapImpl.*/ -static void HeaderMapImplCreate(benchmark::State& state) { +/** Measure the construction/destruction speed of RequestHeaderMapImpl.*/ +static void headerMapImplCreate(benchmark::State& state) { + // Make sure first time construction is not counted. + Http::ResponseHeaderMapImpl::create(); for (auto _ : state) { - HeaderMapImpl headers; - benchmark::DoNotOptimize(headers.size()); + auto headers = Http::ResponseHeaderMapImpl::create(); + benchmark::DoNotOptimize(headers->size()); } } -BENCHMARK(HeaderMapImplCreate); +BENCHMARK(headerMapImplCreate); /** * Measure the speed of setting/overwriting a header value. The numeric Arg passed @@ -32,17 +34,17 @@ BENCHMARK(HeaderMapImplCreate); * identify whether the speed of setReference() is dependent on the number of other * headers in the HeaderMapImpl. */ -static void HeaderMapImplSetReference(benchmark::State& state) { +static void headerMapImplSetReference(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); for (auto _ : state) { - headers.setReference(key, value); + headers->setReference(key, value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of retrieving a header value. The numeric Arg passed by the @@ -52,127 +54,127 @@ BENCHMARK(HeaderMapImplSetReference)->Arg(0)->Arg(1)->Arg(10)->Arg(50); * method depends (or doesn't depend) on the number of other headers in the * HeaderMapImpl. */ -static void HeaderMapImplGet(benchmark::State& state) { +static void headerMapImplGet(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.setReference(key, value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setReference(key, value); size_t successes = 0; for (auto _ : state) { - successes += (headers.get(key) != nullptr); + successes += (headers->get(key) != nullptr); } benchmark::DoNotOptimize(successes); } -BENCHMARK(HeaderMapImplGet)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGet)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the retrieval speed of a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplGetInline(benchmark::State& state) { +static void headerMapImplGetInline(benchmark::State& state) { const std::string value("01234567890123456789"); - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.setReferenceConnection(value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->setReferenceConnection(value); size_t size = 0; for (auto _ : state) { - size += headers.Connection()->value().size(); + size += headers->Connection()->value().size(); } benchmark::DoNotOptimize(size); } -BENCHMARK(HeaderMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplSetInlineMacro(benchmark::State& state) { +static void headerMapImplSetInlineMacro(benchmark::State& state) { const std::string value("01234567890123456789"); - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); for (auto _ : state) { - headers.setReferenceConnection(value); + headers->setReferenceConnection(value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineMacro)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of writing to a header for which HeaderMapImpl is expected to * provide special optimizations. */ -static void HeaderMapImplSetInlineInteger(benchmark::State& state) { +static void headerMapImplSetInlineInteger(benchmark::State& state) { uint64_t value = 12345; - RequestHeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); for (auto _ : state) { - headers.setConnection(value); + headers->setConnection(value); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplSetInlineInteger)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** Measure the speed of the byteSize() estimation method. */ -static void HeaderMapImplGetByteSize(benchmark::State& state) { - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); +static void headerMapImplGetByteSize(benchmark::State& state) { + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); uint64_t size = 0; for (auto _ : state) { - size += headers.byteSize(); + size += headers->byteSize(); } benchmark::DoNotOptimize(size); } -BENCHMARK(HeaderMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplGetByteSize)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** Measure the speed of iteration with a lightweight callback. */ -static void HeaderMapImplIterate(benchmark::State& state) { - HeaderMapImpl headers; +static void headerMapImplIterate(benchmark::State& state) { + auto headers = Http::ResponseHeaderMapImpl::create(); size_t num_callbacks = 0; - addDummyHeaders(headers, state.range(0)); + addDummyHeaders(*headers, state.range(0)); auto counting_callback = [](const HeaderEntry&, void* context) -> HeaderMap::Iterate { (*static_cast(context))++; return HeaderMap::Iterate::Continue; }; for (auto _ : state) { - headers.iterate(counting_callback, &num_callbacks); + headers->iterate(counting_callback, &num_callbacks); } benchmark::DoNotOptimize(num_callbacks); } -BENCHMARK(HeaderMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** Measure the speed of the HeaderMapImpl lookup() method. */ -static void HeaderMapImplLookup(benchmark::State& state) { +static void headerMapImplLookup(benchmark::State& state) { const LowerCaseString key("connection"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); - headers.addReference(key, value); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); + headers->addReference(key, value); for (auto _ : state) { const HeaderEntry* entry = nullptr; - auto result = headers.lookup(key, &entry); + auto result = headers->lookup(key, &entry); benchmark::DoNotOptimize(result); } } -BENCHMARK(HeaderMapImplLookup)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplLookup)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name. * @note The measured time for each iteration includes the time needed to add * one copy of the header. */ -static void HeaderMapImplRemove(benchmark::State& state) { +static void headerMapImplRemove(benchmark::State& state) { const LowerCaseString key("example-key"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); for (auto _ : state) { - headers.addReference(key, value); - headers.remove(key); + headers->addReference(key, value); + headers->remove(key); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of removing a header by key name, for the special case of @@ -180,24 +182,24 @@ BENCHMARK(HeaderMapImplRemove)->Arg(0)->Arg(1)->Arg(10)->Arg(50); * @note The measured time for each iteration includes the time needed to add * one copy of the header. */ -static void HeaderMapImplRemoveInline(benchmark::State& state) { +static void headerMapImplRemoveInline(benchmark::State& state) { const LowerCaseString key("connection"); const std::string value("01234567890123456789"); - HeaderMapImpl headers; - addDummyHeaders(headers, state.range(0)); + auto headers = Http::ResponseHeaderMapImpl::create(); + addDummyHeaders(*headers, state.range(0)); for (auto _ : state) { - headers.addReference(key, value); - headers.remove(key); + headers->addReference(key, value); + headers->remove(key); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } -BENCHMARK(HeaderMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); +BENCHMARK(headerMapImplRemoveInline)->Arg(0)->Arg(1)->Arg(10)->Arg(50); /** * Measure the speed of creating a HeaderMapImpl and populating it with a realistic * set of response headers. */ -static void HeaderMapImplPopulate(benchmark::State& state) { +static void headerMapImplPopulate(benchmark::State& state) { const std::pair headers_to_add[] = { {LowerCaseString("cache-control"), "max-age=0, private, must-revalidate"}, {LowerCaseString("content-encoding"), "gzip"}, @@ -211,14 +213,14 @@ static void HeaderMapImplPopulate(benchmark::State& state) { {LowerCaseString("set-cookie"), "_cookie2=12345678; path = /; secure"}, }; for (auto _ : state) { - HeaderMapImpl headers; + auto headers = Http::ResponseHeaderMapImpl::create(); for (const auto& key_value : headers_to_add) { - headers.addReference(key_value.first, key_value.second); + headers->addReference(key_value.first, key_value.second); } - benchmark::DoNotOptimize(headers.size()); + benchmark::DoNotOptimize(headers->size()); } } -BENCHMARK(HeaderMapImplPopulate); +BENCHMARK(headerMapImplPopulate); } // namespace Http } // namespace Envoy diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 7095daf5ec42..b283e214a0ab 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -355,30 +355,30 @@ TEST(HeaderStringTest, All) { } #define TEST_INLINE_HEADER_FUNCS(name) \ - header_map.addCopy(Headers::get().name, #name); \ - EXPECT_EQ(header_map.name()->value().getStringView(), #name); \ - header_map.remove##name(); \ - EXPECT_EQ(nullptr, header_map.name()); \ - header_map.set##name(#name); \ - EXPECT_EQ(header_map.get(Headers::get().name)->value().getStringView(), #name); + header_map->addCopy(Headers::get().name, #name); \ + EXPECT_EQ(header_map->name()->value().getStringView(), #name); \ + header_map->remove##name(); \ + EXPECT_EQ(nullptr, header_map->name()); \ + header_map->set##name(#name); \ + EXPECT_EQ(header_map->get(Headers::get().name)->value().getStringView(), #name); // Make sure that the O(1) headers are wired up properly. TEST(HeaderMapImplTest, AllInlineHeaders) { { - RequestHeaderMapImpl header_map; + auto header_map = RequestHeaderMapImpl::create(); INLINE_REQ_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) } { // No request trailer O(1) headers. } { - ResponseHeaderMapImpl header_map; + auto header_map = ResponseHeaderMapImpl::create(); INLINE_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_REQ_RESP_HEADERS(TEST_INLINE_HEADER_FUNCS) INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS) } { - ResponseTrailerMapImpl header_map; + auto header_map = ResponseTrailerMapImpl::create(); INLINE_RESP_HEADERS_TRAILERS(TEST_INLINE_HEADER_FUNCS) } } @@ -545,7 +545,7 @@ TEST(HeaderMapImplTest, RemoveRegex) { } TEST(HeaderMapImplTest, SetRemovesAllValues) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key1("hello"); LowerCaseString key2("olleh"); @@ -636,7 +636,7 @@ TEST(HeaderMapImplTest, DoubleInlineAdd) { // Per https://github.com/envoyproxy/envoy/issues/7488 make sure we don't // combine set-cookie headers TEST(HeaderMapImplTest, DoubleCookieAdd) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; const std::string foo("foo"); const std::string bar("bar"); const LowerCaseString& set_cookie = Http::Headers::get().SetCookie; @@ -660,7 +660,7 @@ TEST(HeaderMapImplTest, DoubleInlineSet) { } TEST(HeaderMapImplTest, AddReferenceKey) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.addReferenceKey(foo, "world"); EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); @@ -668,7 +668,7 @@ TEST(HeaderMapImplTest, AddReferenceKey) { } TEST(HeaderMapImplTest, SetReferenceKey) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("hello"); headers.setReferenceKey(foo, "world"); EXPECT_NE("world", headers.get(foo)->value().getStringView().data()); @@ -803,8 +803,8 @@ TEST(HeaderMapImplTest, AddCopy) { } TEST(HeaderMapImplTest, Equality) { - TestHeaderMapImpl headers1; - TestHeaderMapImpl headers2; + TestRequestHeaderMapImpl headers1; + TestRequestHeaderMapImpl headers2; EXPECT_EQ(headers1, headers2); headers1.addCopy(LowerCaseString("hello"), "world"); @@ -815,7 +815,7 @@ TEST(HeaderMapImplTest, Equality) { } TEST(HeaderMapImplTest, LargeCharInHeader) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString static_key("\x90hello"); std::string ref_value("value"); headers.addReference(static_key, ref_value); @@ -823,7 +823,7 @@ TEST(HeaderMapImplTest, LargeCharInHeader) { } TEST(HeaderMapImplTest, Iterate) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "xxx"); headers.addCopy(LowerCaseString("world"), "hello"); @@ -847,7 +847,7 @@ TEST(HeaderMapImplTest, Iterate) { } TEST(HeaderMapImplTest, IterateReverse) { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; headers.addCopy(LowerCaseString("hello"), "world"); headers.addCopy(LowerCaseString("foo"), "bar"); LowerCaseString world_key("world"); @@ -902,31 +902,31 @@ TEST(HeaderMapImplTest, Lookup) { TEST(HeaderMapImplTest, Get) { { - auto headers = createHeaderMap( - {{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); - EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); - EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); + auto headers = + TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); + EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView()); + EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView()); + EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } { - auto headers = createHeaderMap( - {{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); + auto headers = + TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); // There is not HeaderMap method to set a header and copy both the key and value. const LowerCaseString path(":path"); - headers->setReferenceKey(path, "/new_path"); - EXPECT_EQ("/new_path", headers->get(LowerCaseString(":path"))->value().getStringView()); + headers.setReferenceKey(path, "/new_path"); + EXPECT_EQ("/new_path", headers.get(LowerCaseString(":path"))->value().getStringView()); const LowerCaseString foo("hello"); - headers->setReferenceKey(foo, "world2"); - EXPECT_EQ("world2", headers->get(foo)->value().getStringView()); - EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); + headers.setReferenceKey(foo, "world2"); + EXPECT_EQ("world2", headers.get(foo)->value().getStringView()); + EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } } TEST(HeaderMapImplTest, CreateHeaderMapFromIterator) { std::vector> iter_headers{ {LowerCaseString(Headers::get().Path), "/"}, {LowerCaseString("hello"), "world"}}; - auto headers = createHeaderMap(iter_headers.cbegin(), iter_headers.cend()); + auto headers = createHeaderMap(iter_headers.cbegin(), iter_headers.cend()); EXPECT_EQ("/", headers->get(LowerCaseString(":path"))->value().getStringView()); EXPECT_EQ("world", headers->get(LowerCaseString("hello"))->value().getStringView()); EXPECT_EQ(nullptr, headers->get(LowerCaseString("foo"))); @@ -936,8 +936,8 @@ TEST(HeaderMapImplTest, TestHeaderList) { std::array keys{Headers::get().Path, LowerCaseString("hello")}; std::array values{"/", "world"}; - auto headers = createHeaderMap({{keys[0], values[0]}, {keys[1], values[1]}}); - HeaderListView header_list(headers->header_map_); + auto headers = TestRequestHeaderMapImpl({{keys[0], values[0]}, {keys[1], values[1]}}); + HeaderListView header_list(headers); auto to_string_views = [](const HeaderListView::HeaderStringRefs& strs) -> std::vector { std::vector str_views(strs.size()); @@ -953,7 +953,7 @@ TEST(HeaderMapImplTest, TestHeaderList) { TEST(HeaderMapImplTest, TestAppendHeader) { // Test appending to a string with a value. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString foo("key1"); headers.addCopy(foo, "some;"); headers.appendCopy(foo, "test"); @@ -962,7 +962,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Test appending to an empty string. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key2("key2"); headers.appendCopy(key2, "my tag data"); EXPECT_EQ(headers.get(key2)->value().getStringView(), "my tag data"); @@ -970,7 +970,7 @@ TEST(HeaderMapImplTest, TestAppendHeader) { // Test empty data case. { - TestHeaderMapImpl headers; + TestRequestHeaderMapImpl headers; LowerCaseString key3("key3"); headers.addCopy(key3, "empty"); headers.appendCopy(key3, ""); @@ -1019,7 +1019,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { { LowerCaseString foo("hello"); - Http::TestHeaderMapImpl headers{}; + Http::TestRequestHeaderMapImpl headers{}; EXPECT_EQ(0UL, headers.size()); EXPECT_TRUE(headers.empty()); @@ -1173,11 +1173,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Starting with a normal header { - auto headers = createHeaderMap({{Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {Headers::get().Path, "/"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().ContentType, "text/plain"}, + {Headers::get().Method, "GET"}, + {Headers::get().Path, "/"}, + {LowerCaseString("hello"), "world"}, + {Headers::get().Host, "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":method", "GET")); @@ -1186,7 +1186,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers->iterate( + headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { static_cast(cb_v)->Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); @@ -1197,11 +1197,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Starting with a pseudo-header { - auto headers = createHeaderMap({{Headers::get().Path, "/"}, - {Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, + {Headers::get().ContentType, "text/plain"}, + {Headers::get().Method, "GET"}, + {LowerCaseString("hello"), "world"}, + {Headers::get().Host, "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":path", "/")); @@ -1210,7 +1210,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers->iterate( + headers.iterate( [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { static_cast(cb_v)->Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); @@ -1220,18 +1220,18 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { } } -// Validate that TestHeaderMapImpl copy construction and assignment works. This is a +// Validate that TestRequestHeaderMapImpl copy construction and assignment works. This is a // regression for where we were missing a valid copy constructor and had the // default (dangerous) move semantics takeover. -TEST(HeaderMapImplTest, TestHeaderMapImplyCopy) { - TestHeaderMapImpl foo; +TEST(HeaderMapImplTest, TestRequestHeaderMapImplyCopy) { + TestRequestHeaderMapImpl foo; foo.addCopy(LowerCaseString("foo"), "bar"); - auto headers = std::make_unique(foo); + auto headers = std::make_unique(foo); EXPECT_EQ("bar", headers->get(LowerCaseString("foo"))->value().getStringView()); - TestHeaderMapImpl baz{{"foo", "baz"}}; + TestRequestHeaderMapImpl baz{{"foo", "baz"}}; baz = *headers; EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); - const TestHeaderMapImpl& baz2 = baz; + const TestRequestHeaderMapImpl& baz2 = baz; baz = baz2; EXPECT_EQ("bar", baz.get(LowerCaseString("foo"))->value().getStringView()); } diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index e81c10e4ae97..e3524fde27e4 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -23,22 +23,22 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { } case test::common::http::UtilityTestCase::kParseCookieValue: { const auto& parse_cookie_value = input.parse_cookie_value(); - // Use the production HeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. - Http::HeaderMapImpl headers; + // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. + auto headers = Http::RequestHeaderMapImpl::create(); for (const std::string& cookie : parse_cookie_value.cookies()) { - headers.addCopy(Http::LowerCaseString("cookie"), replaceInvalidCharacters(cookie)); + headers->addCopy(Http::LowerCaseString("cookie"), replaceInvalidCharacters(cookie)); } - Http::Utility::parseCookieValue(headers, parse_cookie_value.key()); + Http::Utility::parseCookieValue(*headers, parse_cookie_value.key()); break; } case test::common::http::UtilityTestCase::kGetLastAddressFromXff: { const auto& get_last_address_from_xff = input.get_last_address_from_xff(); - // Use the production HeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. - Http::RequestHeaderMapImpl headers; - headers.addCopy(Http::LowerCaseString("x-forwarded-for"), - replaceInvalidCharacters(get_last_address_from_xff.xff())); + // Use the production RequestHeaderMapImpl to avoid timeouts from TestHeaderMapImpl asserts. + auto headers = Http::RequestHeaderMapImpl::create(); + headers->addCopy(Http::LowerCaseString("x-forwarded-for"), + replaceInvalidCharacters(get_last_address_from_xff.xff())); // Take num_to_skip modulo 32 to avoid wasting time in lala land. - Http::Utility::getLastAddressFromXFF(headers, get_last_address_from_xff.num_to_skip() % 32); + Http::Utility::getLastAddressFromXFF(*headers, get_last_address_from_xff.num_to_skip() % 32); break; } case test::common::http::UtilityTestCase::kExtractHostPathFromUri: { diff --git a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc index 1724898c05b8..d226ee4dcec6 100644 --- a/test/extensions/filters/http/on_demand/on_demand_filter_test.cc +++ b/test/extensions/filters/http/on_demand/on_demand_filter_test.cc @@ -56,7 +56,7 @@ TEST_F(OnDemandFilterTest, TestDecodeHeadersWhenRouteConfigIsNotAvailable) { } TEST_F(OnDemandFilterTest, TestDecodeTrailers) { - Http::RequestTrailerMapImpl headers; + Http::TestRequestTrailerMapImpl headers; EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(headers)); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 1ea4ab1da5d4..42622f5e73fe 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1454,9 +1454,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { max_request_headers_count_); }); - Http::RequestTrailerMapImpl request_trailers; + auto request_trailers = Http::RequestTrailerMapImpl::create(); for (int i = 0; i < 20000; i++) { - request_trailers.addCopy(Http::LowerCaseString(std::to_string(i)), ""); + request_trailers->addCopy(Http::LowerCaseString(std::to_string(i)), ""); } initialize(); @@ -1468,7 +1468,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { {":authority", "host"}}); request_encoder_ = &encoder_decoder.first; auto response = std::move(encoder_decoder.second); - codec_client_->sendTrailers(*request_encoder_, request_trailers); + codec_client_->sendTrailers(*request_encoder_, *request_trailers); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e66100184743..60b71b0cc5c4 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -633,28 +633,29 @@ namespace Http { */ #define DEFINE_TEST_INLINE_HEADER_FUNCS(name) \ public: \ - const HeaderEntry* name() const override { return header_map_.name(); } \ + const HeaderEntry* name() const override { return header_map_->name(); } \ void append##name(absl::string_view data, absl::string_view delimiter) override { \ - header_map_.append##name(data, delimiter); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->append##name(data, delimiter); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void setReference##name(absl::string_view value) override { \ - header_map_.setReference##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->setReference##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void set##name(absl::string_view value) override { \ - header_map_.set##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->set##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ void set##name(uint64_t value) override { \ - header_map_.set##name(value); \ - header_map_.verifyByteSizeInternalForTest(); \ + header_map_->set##name(value); \ + header_map_->verifyByteSizeInternalForTest(); \ } \ size_t remove##name() override { \ - size_t headers_removed = header_map_.remove##name(); \ - header_map_.verifyByteSizeInternalForTest(); \ + const size_t headers_removed = header_map_->remove##name(); \ + header_map_->verifyByteSizeInternalForTest(); \ return headers_removed; \ - } + } \ + absl::string_view get##name##Value() const override { return header_map_->get##name##Value(); } /** * Base class for all test header map types. This class wraps an underlying real header map @@ -668,23 +669,30 @@ template class TestHeaderMapImplBase : public Inte TestHeaderMapImplBase() = default; TestHeaderMapImplBase(const std::initializer_list>& values) { for (auto& value : values) { - header_map_.addCopy(LowerCaseString(value.first), value.second); + header_map_->addCopy(LowerCaseString(value.first), value.second); } - header_map_.verifyByteSizeInternalForTest(); + header_map_->verifyByteSizeInternalForTest(); + } + TestHeaderMapImplBase( + const std::initializer_list>& values) { + for (auto& value : values) { + header_map_->addCopy(value.first, value.second); + } + header_map_->verifyByteSizeInternalForTest(); } TestHeaderMapImplBase(const TestHeaderMapImplBase& rhs) - : TestHeaderMapImplBase(rhs.header_map_) {} + : TestHeaderMapImplBase(*rhs.header_map_) {} TestHeaderMapImplBase(const HeaderMap& rhs) { - HeaderMapImpl::copyFrom(header_map_, rhs); - header_map_.verifyByteSizeInternalForTest(); + HeaderMapImpl::copyFrom(*header_map_, rhs); + header_map_->verifyByteSizeInternalForTest(); } TestHeaderMapImplBase& operator=(const TestHeaderMapImplBase& rhs) { if (this == &rhs) { return *this; } clear(); - HeaderMapImpl::copyFrom(header_map_, rhs); - header_map_.verifyByteSizeInternalForTest(); + HeaderMapImpl::copyFrom(*header_map_, rhs); + header_map_->verifyByteSizeInternalForTest(); return *this; } @@ -706,86 +714,112 @@ template class TestHeaderMapImplBase : public Inte size_t remove(const std::string& key) { return remove(LowerCaseString(key)); } // HeaderMap - bool operator==(const HeaderMap& rhs) const override { return header_map_.operator==(rhs); } - bool operator!=(const HeaderMap& rhs) const override { return header_map_.operator!=(rhs); } + bool operator==(const HeaderMap& rhs) const override { return header_map_->operator==(rhs); } + bool operator!=(const HeaderMap& rhs) const override { return header_map_->operator!=(rhs); } void addViaMove(HeaderString&& key, HeaderString&& value) override { - header_map_.addViaMove(std::move(key), std::move(value)); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addViaMove(std::move(key), std::move(value)); + header_map_->verifyByteSizeInternalForTest(); } void addReference(const LowerCaseString& key, absl::string_view value) override { - header_map_.addReference(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReference(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addReferenceKey(const LowerCaseString& key, uint64_t value) override { - header_map_.addReferenceKey(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReferenceKey(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addReferenceKey(const LowerCaseString& key, absl::string_view value) override { - header_map_.addReferenceKey(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addReferenceKey(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addCopy(const LowerCaseString& key, uint64_t value) override { - header_map_.addCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void addCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.addCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->addCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void appendCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.appendCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->appendCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); } void setReference(const LowerCaseString& key, absl::string_view value) override { - header_map_.setReference(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->setReference(key, value); + header_map_->verifyByteSizeInternalForTest(); } void setReferenceKey(const LowerCaseString& key, absl::string_view value) override { - header_map_.setReferenceKey(key, value); + header_map_->setReferenceKey(key, value); } void setCopy(const LowerCaseString& key, absl::string_view value) override { - header_map_.setCopy(key, value); - header_map_.verifyByteSizeInternalForTest(); + header_map_->setCopy(key, value); + header_map_->verifyByteSizeInternalForTest(); + } + uint64_t byteSize() const override { return header_map_->byteSize(); } + const HeaderEntry* get(const LowerCaseString& key) const override { + return header_map_->get(key); } - uint64_t byteSize() const override { return header_map_.byteSize(); } - const HeaderEntry* get(const LowerCaseString& key) const override { return header_map_.get(key); } void iterate(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_.iterate(cb, context); + header_map_->iterate(cb, context); } void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_.iterateReverse(cb, context); + header_map_->iterateReverse(cb, context); } HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override { - return header_map_.lookup(key, entry); + return header_map_->lookup(key, entry); } void clear() override { - header_map_.clear(); - header_map_.verifyByteSizeInternalForTest(); + header_map_->clear(); + header_map_->verifyByteSizeInternalForTest(); } size_t remove(const LowerCaseString& key) override { - size_t headers_removed = header_map_.remove(key); - header_map_.verifyByteSizeInternalForTest(); + size_t headers_removed = header_map_->remove(key); + header_map_->verifyByteSizeInternalForTest(); return headers_removed; } size_t removePrefix(const LowerCaseString& key) override { - size_t headers_removed = header_map_.removePrefix(key); - header_map_.verifyByteSizeInternalForTest(); + size_t headers_removed = header_map_->removePrefix(key); + header_map_->verifyByteSizeInternalForTest(); return headers_removed; } - size_t size() const override { return header_map_.size(); } - bool empty() const override { return header_map_.empty(); } + size_t size() const override { return header_map_->size(); } + bool empty() const override { return header_map_->empty(); } void dumpState(std::ostream& os, int indent_level = 0) const override { - header_map_.dumpState(os, indent_level); + header_map_->dumpState(os, indent_level); + } + + using Handle = typename CustomInlineHeaderRegistry::Handle; + const HeaderEntry* getInline(Handle handle) const override { + return header_map_->getInline(handle); + } + void appendInline(Handle handle, absl::string_view data, absl::string_view delimiter) override { + header_map_->appendInline(handle, data, delimiter); + header_map_->verifyByteSizeInternalForTest(); + } + void setReferenceInline(Handle handle, absl::string_view value) override { + header_map_->setReferenceInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + void setInline(Handle handle, absl::string_view value) override { + header_map_->setInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + void setInline(Handle handle, uint64_t value) override { + header_map_->setInline(handle, value); + header_map_->verifyByteSizeInternalForTest(); + } + size_t removeInline(Handle handle) override { + const size_t rc = header_map_->removeInline(handle); + header_map_->verifyByteSizeInternalForTest(); + return rc; } - Impl header_map_; + std::unique_ptr header_map_{Impl::create()}; }; /** * Typed test implementations for all of the concrete header types. */ -using TestHeaderMapImpl = TestHeaderMapImplBase; - class TestRequestHeaderMapImpl : public TestHeaderMapImplBase { public: From 439a9f0bed5e2fa39417e41ab2d617e227846819 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Wed, 17 Jun 2020 19:27:09 -0700 Subject: [PATCH 377/909] LocalReply: invalid json format in grpc-message (#11621) For grpc requests, error message will write to grpc-message header. LocalReplyConfig::body_format still applies. If it is JSON format, grpc-message header value will be in json too. But there is a bug when json format is used, it generates an invalid json format as: grpc-message: '{"code":401,"message":"Jwt is missing"%0A' It should be: grpc-message: '{"code":401,"message":"Jwt is missing"}' Risk Level: Low Testing: integration test Signed-off-by: Wayne Zhang --- source/common/http/utility.cc | 5 ++ .../local_reply_integration_test.cc | 53 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 079544db9aa2..dce557c7606b 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -469,6 +469,11 @@ void Utility::sendLocalReply(const bool& is_reset, const EncodeFunctions& encode if (!body_text.empty() && !local_reply_data.is_head_request_) { // TODO(dio): Probably it is worth to consider caching the encoded message based on gRPC // status. + // JsonFormatter adds a '\n' at the end. For header value, it should be removed. + // https://github.com/envoyproxy/envoy/blob/master/source/common/formatter/substitution_formatter.cc#L129 + if (body_text[body_text.length() - 1] == '\n') { + body_text = body_text.substr(0, body_text.length() - 1); + } response_headers->setGrpcMessage(PercentEncoding::encode(body_text)); } encode_functions.encode_headers_(std::move(response_headers), true); // Trailers only response diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index 833d822d8744..472aaf8220be 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -78,6 +78,59 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); } +// For grpc, the error message is in grpc-message header. +// If it is json, the header value is in json format. +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson4Grpc) { + const std::string yaml = R"EOF( +body_format: + json_format: + code: "%RESPONSE_CODE%" + message: "%LOCAL_REPLY_BODY%" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + const std::string expected_grpc_message = R"({ + "code": 503, + "message":"upstream connect error or disconnect/reset before headers. reset reason: connection termination" +})"; + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/package.service/method"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/grpc", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); + // Check if grpc-message value is same as expected + EXPECT_TRUE(TestUtility::jsonStringEqual( + std::string(response->headers().GrpcMessage()->value().getStringView()), + expected_grpc_message)); +} + // Matched second filter has code and body rewrite and its format TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) { const std::string yaml = R"EOF( From 6f0bab2b46c332b3bc730e96b4da9b582ad67cf5 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 17 Jun 2020 22:38:45 -0700 Subject: [PATCH 378/909] tests: fix admission control filter test (#11631) Test added by #11414 uses RequestHeaderMapImpl constructor made private in #11546 Signed-off-by: Florin Coras --- .../http/admission_control/admission_control_filter_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index 5aaa6ba1658d..ad1c3ca28543 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -149,7 +149,7 @@ sampling_window: 10s EXPECT_CALL(controller_, requestSuccessCount()).Times(0); // We expect no rejections. - Http::RequestHeaderMapImpl request_headers; + Http::TestRequestHeaderMapImpl request_headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } From 8d7cd0c61659a2dee0e6527de2c624ada8d2e12b Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 17 Jun 2020 23:55:22 -0700 Subject: [PATCH 379/909] build: use clang-10 (#11222) Risk Level: Med Testing: CI Docs Changes: Changed clang references Release Notes: Added Signed-off-by: Lizan Zhou --- .bazelrc | 5 +- .circleci/config.yml | 2 +- .devcontainer/Dockerfile | 2 +- bazel/README.md | 4 +- bazel/external/compiler_rt.BUILD | 2 +- bazel/foreign_cc/nghttp2.patch | 70 ++++++++++++++++ bazel/repository_locations.bzl | 14 ++-- ci/README.md | 4 +- ci/run_envoy_docker.sh | 2 +- docs/root/version_history/current.rst | 2 + source/common/common/thread.h | 6 +- source/common/stats/symbol_table_impl.h | 22 +++-- source/extensions/common/tap/admin.cc | 2 +- .../extensions/common/tap/tap_config_base.h | 4 +- .../filters/http/ext_authz/ext_authz.cc | 2 +- test/common/http/utility_test.cc | 39 +++++---- .../common/tap/tap_config_base_test.cc | 40 +++++----- .../redis_proxy/command_splitter_impl_test.cc | 3 +- test/fuzz/fuzz_runner.h | 2 +- test/mocks/common.h | 6 +- test/per_file_coverage.sh | 80 ++++++++++--------- test/run_envoy_bazel_coverage.sh | 2 +- test/test_common/simulated_time_system.cc | 7 +- test/test_common/simulated_time_system.h | 6 +- test/test_common/test_time.h | 6 +- test/test_common/test_time_system.h | 19 ++--- tools/api_boost/api_boost.py | 3 +- tools/clang_tools/README.md | 2 +- tools/clang_tools/support/BUILD.prebuilt | 13 ++- tools/clang_tools/support/clang_tools.bzl | 17 ++-- tools/code_format/check_format.py | 10 +-- 31 files changed, 252 insertions(+), 146 deletions(-) diff --git a/.bazelrc b/.bazelrc index a77ca3f3d92a..84c7ecf7fa6e 100644 --- a/.bazelrc +++ b/.bazelrc @@ -115,7 +115,8 @@ coverage --config=coverage build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 build:coverage --action_env=GCOV=llvm-profdata build:coverage --copt=-DNDEBUG -build:coverage --test_timeout=900 +# Doubling timeout in all categories +build:coverage --test_timeout=120,600,1800,7200 build:coverage --define=ENVOY_CONFIG_COVERAGE=1 build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support @@ -182,7 +183,7 @@ build:remote-msan --config=rbe-toolchain-msan # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index ab33aac57881..5efd1289cb1f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ executors: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:04f06115b6ee7cfea74930353fb47a41149cbec3 + - image: envoyproxy/envoy-build-ubuntu:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 resource_class: xlarge working_directory: /source diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 35439f303f24..ad93066b0d0a 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:04f06115b6ee7cfea74930353fb47a41149cbec3 +FROM gcr.io/envoy-ci/envoy-build:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 ARG USERNAME=vscode ARG USER_UID=501 diff --git a/bazel/README.md b/bazel/README.md index f278cba98a9b..8bd7cf3dceb2 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -272,7 +272,7 @@ for more details. ## Supported compiler versions We now require Clang >= 5.0 due to known issues with std::string thread safety and C++14 support. GCC >= 7 is also -known to work. Currently the CI is running with Clang 9. +known to work. Currently the CI is running with Clang 10. ## Clang STL debug symbols @@ -730,7 +730,7 @@ also have 'buildifier' installed from the bazel distribution. Edit the paths shown here to reflect the installation locations on your system: ```shell -export CLANG_FORMAT="$HOME/ext/clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-format" +export CLANG_FORMAT="$HOME/ext/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format" export BUILDIFIER_BIN="/usr/bin/buildifier" ``` diff --git a/bazel/external/compiler_rt.BUILD b/bazel/external/compiler_rt.BUILD index 82dfe8f8be03..dbcb1be5134d 100644 --- a/bazel/external/compiler_rt.BUILD +++ b/bazel/external/compiler_rt.BUILD @@ -4,6 +4,6 @@ licenses(["notice"]) # Apache 2 cc_library( name = "fuzzed_data_provider", - hdrs = ["fuzzer/utils/FuzzedDataProvider.h"], + hdrs = ["fuzzer/FuzzedDataProvider.h"], visibility = ["//visibility:public"], ) diff --git a/bazel/foreign_cc/nghttp2.patch b/bazel/foreign_cc/nghttp2.patch index 55768dca2003..91ddf1898e45 100644 --- a/bazel/foreign_cc/nghttp2.patch +++ b/bazel/foreign_cc/nghttp2.patch @@ -15,3 +15,73 @@ index 35c77d1d..47bd63f5 100644 endif() # AC_TYPE_UINT8_T # AC_TYPE_UINT16_T +# https://github.com/nghttp2/nghttp2/pull/1468 +diff --git a/lib/nghttp2_buf.c b/lib/nghttp2_buf.c +index 2a435bebf..92f97f7f2 100644 +--- a/lib/nghttp2_buf.c ++++ b/lib/nghttp2_buf.c +@@ -82,8 +82,10 @@ void nghttp2_buf_reset(nghttp2_buf *buf) { + } + + void nghttp2_buf_wrap_init(nghttp2_buf *buf, uint8_t *begin, size_t len) { +- buf->begin = buf->pos = buf->last = buf->mark = begin; +- buf->end = begin + len; ++ buf->begin = buf->pos = buf->last = buf->mark = buf->end = begin; ++ if (buf->end != NULL) { ++ buf->end += len; ++ } + } + + static int buf_chain_new(nghttp2_buf_chain **chain, size_t chunk_length, +diff --git a/lib/nghttp2_frame.c b/lib/nghttp2_frame.c +index 4821de408..940c723b0 100644 +--- a/lib/nghttp2_frame.c ++++ b/lib/nghttp2_frame.c +@@ -818,8 +818,10 @@ int nghttp2_frame_unpack_origin_payload(nghttp2_extension *frame, + size_t len = 0; + + origin = frame->payload; +- p = payload; +- end = p + payloadlen; ++ p = end = payload; ++ if (end != NULL) { ++ end += payloadlen; ++ } + + for (; p != end;) { + if (end - p < 2) { +diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c +index 563ccd7de..794f141a1 100644 +--- a/lib/nghttp2_session.c ++++ b/lib/nghttp2_session.c +@@ -5349,7 +5349,7 @@ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, + + ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + size_t inlen) { +- const uint8_t *first = in, *last = in + inlen; ++ const uint8_t *first = in, *last = in; + nghttp2_inbound_frame *iframe = &session->iframe; + size_t readlen; + ssize_t padlen; +@@ -5360,6 +5360,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + size_t pri_fieldlen; + nghttp2_mem *mem; + ++ if (in != NULL) { ++ last += inlen; ++ } ++ + DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", + session->recv_window_size, session->local_window_size); + +@@ -5389,7 +5393,9 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, + } + + iframe->payloadleft -= readlen; +- in += readlen; ++ if (in != NULL) { ++ in += readlen; ++ } + + if (iframe->payloadleft == 0) { + session_inbound_frame_reset(session); diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 417b9507f670..e8b193983ac1 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "78e794ae1c1197f59b7ecbf8bd62c053ecb1625daaccdbe287581ee6f12ec0fb", - strip_prefix = "envoy-build-tools-b47394aa94c45e15c479d18eab18ffd43ec62d89", - # 2020-05-14 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/b47394aa94c45e15c479d18eab18ffd43ec62d89.tar.gz"], + sha256 = "b0efe70a1d122fffb89570771f4ec3b912aa0a8a0ce56218223918d7737d01e2", + strip_prefix = "envoy-build-tools-3cbc1d66b9e9ead42daf69e01597cacf4fb52151", + # 2020-05-15 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/3cbc1d66b9e9ead42daf69e01597cacf4fb52151.tar.gz"], use_category = ["build"], ), boringssl = dict( @@ -443,10 +443,10 @@ DEPENDENCY_REPOSITORIES = dict( # provided as part of the compiler-rt source distribution. We can't use the # Clang variant as we are not a Clang-LLVM only shop today. org_llvm_releases_compiler_rt = dict( - sha256 = "56e4cd96dd1d8c346b07b4d6b255f976570c6f2389697347a6c3dcb9e820d10e", + sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75", # Only allow peeking at fuzzer related files for now. - strip_prefix = "compiler-rt-9.0.0.src/lib", - urls = ["http://releases.llvm.org/9.0.0/compiler-rt-9.0.0.src.tar.xz"], + strip_prefix = "compiler-rt-10.0.0.src/include", + urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/compiler-rt-10.0.0.src.tar.xz"], use_category = ["test"], ), upb = dict( diff --git a/ci/README.md b/ci/README.md index e7d52fba450a..a137ea7e6129 100644 --- a/ci/README.md +++ b/ci/README.md @@ -32,8 +32,8 @@ running tests that reflects the latest built Windows 2019 Envoy image. Currently there are three build images for Linux and one for Windows: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. -* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 9 compiler. -* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 9 compiler, this image is experimental and not well tested. +* `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 10 compiler. +* `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 10 compiler, this image is experimental and not well tested. * `envoyproxy/envoy-build-windows2019` — based on Windows 2019 LTS with VS 2019 Build Tools. The source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools) diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index b1059c893b8a..886a2347d378 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -17,7 +17,7 @@ USER_GROUP=root [[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build [[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=" -it" -[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v \"$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)\"" +[[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=" -v $(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)" export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 8cb2ef04c508..2740b74a1511 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -53,6 +53,8 @@ New Features * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * access loggers: file access logger config added :ref:`log_format `. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* build: official released binary is now built with Clang 10.0.0. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`identifier ` stat that reflects control plane identifier. * config: added :ref:`version_text ` stat that reflects xDS version. diff --git a/source/common/common/thread.h b/source/common/common/thread.h index bbad9fee6913..4808d391dfbd 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -64,9 +64,9 @@ class CondVar { * @return WaitStatus whether the condition timed out or not. */ template - WaitStatus waitFor( - MutexBasicLockable& mutex, - std::chrono::duration duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { + WaitStatus waitFor(MutexBasicLockable& mutex, + std::chrono::duration duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { return condvar_.WaitWithTimeout(&mutex.mutex_, absl::FromChrono(duration)) ? WaitStatus::Timeout : WaitStatus::NoTimeout; diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index e620637b2a15..baf862267227 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -386,12 +386,7 @@ class StatName { return H::combine(std::move(h), absl::string_view()); } - // Casts the raw data as a string_view. Note that this string_view will not - // be in human-readable form, but it will be compatible with a string-view - // hasher. - const char* cdata = reinterpret_cast(stat_name.data()); - absl::string_view data_as_string_view = absl::string_view(cdata, stat_name.dataSize()); - return H::combine(std::move(h), data_as_string_view); + return H::combine(std::move(h), stat_name.dataAsStringView()); } /** @@ -403,8 +398,7 @@ class StatName { uint64_t hash() const { return absl::Hash()(*this); } bool operator==(const StatName& rhs) const { - const uint64_t sz = dataSize(); - return sz == rhs.dataSize() && memcmp(data(), rhs.data(), sz * sizeof(uint8_t)) == 0; + return dataAsStringView() == rhs.dataAsStringView(); } bool operator!=(const StatName& rhs) const { return !(*this == rhs); } @@ -452,6 +446,9 @@ class StatName { * @return A pointer to the first byte of data (skipping over size bytes). */ const uint8_t* data() const { + if (size_and_data_ == nullptr) { + return nullptr; + } return size_and_data_ + SymbolTableImpl::Encoding::encodingSizeBytes(dataSize()); } @@ -463,6 +460,15 @@ class StatName { bool empty() const { return size_and_data_ == nullptr || dataSize() == 0; } private: + /** + * Casts the raw data as a string_view. Note that this string_view will not + * be in human-readable form, but it will be compatible with a string-view + * hasher and comparator. + */ + absl::string_view dataAsStringView() const { + return {reinterpret_cast(data()), dataSize()}; + } + const uint8_t* size_and_data_{nullptr}; }; diff --git a/source/extensions/common/tap/admin.cc b/source/extensions/common/tap/admin.cc index b6c4449db177..9dc6b9b08411 100644 --- a/source/extensions/common/tap/admin.cc +++ b/source/extensions/common/tap/admin.cc @@ -110,7 +110,7 @@ void AdminHandler::AdminPerTapSinkHandle::submitTrace( std::shared_ptr shared_trace{std::move(trace)}; // The handle can be destroyed before the cross thread post is complete. Thus, we capture a // reference to our parent. - parent_.main_thread_dispatcher_.post([& parent = parent_, trace = shared_trace, format]() { + parent_.main_thread_dispatcher_.post([&parent = parent_, trace = shared_trace, format]() { if (!parent.attached_request_.has_value()) { return; } diff --git a/source/extensions/common/tap/tap_config_base.h b/source/extensions/common/tap/tap_config_base.h index 519f87562063..59b53da027f6 100644 --- a/source/extensions/common/tap/tap_config_base.h +++ b/source/extensions/common/tap/tap_config_base.h @@ -53,7 +53,9 @@ class Utility { const uint32_t start_offset_trim = std::min(start_offset, slice.len_); slice.len_ -= start_offset_trim; start_offset -= start_offset_trim; - slice.mem_ = static_cast(slice.mem_) + start_offset_trim; + if (slice.mem_ != nullptr) { + slice.mem_ = static_cast(slice.mem_) + start_offset_trim; + } const uint32_t final_length = std::min(length, slice.len_); slice.len_ = final_length; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index a5960424dc28..38d62a85a051 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -212,7 +212,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { callbacks_->sendLocalReply( response->status_code, response->body, - [& headers = response->headers_to_add, + [&headers = response->headers_to_add, &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the local response:", callbacks); diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 70213c6feb67..d4320cd19324 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -665,10 +665,9 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) { const std::string filter_name = "envoy.filter"; NiceMock filter_callbacks; - const Router::RouteSpecificFilterConfig* nullconfig = nullptr; - const Router::RouteSpecificFilterConfig* one = nullconfig + 1; - const Router::RouteSpecificFilterConfig* two = nullconfig + 2; - const Router::RouteSpecificFilterConfig* three = nullconfig + 3; + const Router::RouteSpecificFilterConfig one; + const Router::RouteSpecificFilterConfig two; + const Router::RouteSpecificFilterConfig three; // Test when there's nothing on the route EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, @@ -676,23 +675,23 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfigGeneric) { // Testing in reverse order, so that the method always returns the last object. ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(one)); - EXPECT_EQ(one, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + .WillByDefault(Return(&one)); + EXPECT_EQ(&one, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(two)); - EXPECT_EQ(two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(&two)); + EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(three)); - EXPECT_EQ(three, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + .WillByDefault(Return(&three)); + EXPECT_EQ(&three, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); // Cover the case of no route entry ON_CALL(*filter_callbacks.route_, routeEntry()).WillByDefault(Return(nullptr)); - EXPECT_EQ(two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, - filter_callbacks.route())); + EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfigGeneric(filter_name, + filter_callbacks.route())); } // Verify that traversePerFilterConfigGeneric traverses in the order of specificity. @@ -702,16 +701,16 @@ TEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) { // Create configs to test; to ease of testing instead of using real objects // we will use pointers that are actually indexes. - const Router::RouteSpecificFilterConfig* nullconfig = nullptr; + const std::vector nullconfigs(5); size_t num_configs = 1; ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); num_configs++; ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); num_configs++; ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(nullconfig + num_configs)); + .WillByDefault(Return(&nullconfigs[num_configs])); // a vector to save which configs are visited by the traversePerFilterConfigGeneric std::vector visited_configs(num_configs, 0); @@ -720,7 +719,7 @@ TEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) { size_t index = 0; Utility::traversePerFilterConfigGeneric(filter_name, filter_callbacks.route(), [&](const Router::RouteSpecificFilterConfig& cfg) { - int cfg_index = &cfg - nullconfig; + int cfg_index = &cfg - nullconfigs.data(); visited_configs[index] = cfg_index - 1; index++; }); diff --git a/test/extensions/common/tap/tap_config_base_test.cc b/test/extensions/common/tap/tap_config_base_test.cc index 74cf5074b3f5..75ccec5a1595 100644 --- a/test/extensions/common/tap/tap_config_base_test.cc +++ b/test/extensions/common/tap/tap_config_base_test.cc @@ -91,6 +91,8 @@ TEST(AddBufferToProtoBytes, All) { } TEST(TrimSlice, All) { + std::string slice_mem = "static base slice memory that is long enough"; + void* test_base = static_cast(&slice_mem[0]); { std::vector slices; Utility::trimSlices(slices, 0, 100); @@ -98,63 +100,63 @@ TEST(TrimSlice, All) { } { - std::vector slices = {{nullptr, 5}}; + std::vector slices = {{test_base, 5}}; Utility::trimSlices(slices, 0, 100); - const std::vector expected{{nullptr, 5}}; + const std::vector expected{{test_base, 5}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}}; + std::vector slices = {{test_base, 5}}; Utility::trimSlices(slices, 3, 3); - const std::vector expected{{reinterpret_cast(0x3), 2}}; + const std::vector expected{{static_cast(&slice_mem[3]), 2}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 3, 3); - const std::vector expected{{reinterpret_cast(0x3), 2}, - {reinterpret_cast(0x0), 1}}; + const std::vector expected{{static_cast(&slice_mem[3]), 2}, + {static_cast(&slice_mem[0]), 1}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 6, 3); - const std::vector expected{{reinterpret_cast(0x5), 0}, - {reinterpret_cast(0x1), 3}}; + const std::vector expected{{static_cast(&slice_mem[5]), 0}, + {static_cast(&slice_mem[1]), 3}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 0, 0); - const std::vector expected{{reinterpret_cast(0x0), 0}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[0]), 0}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 0, 3); - const std::vector expected{{reinterpret_cast(0x0), 3}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[0]), 3}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } { - std::vector slices = {{nullptr, 5}, {nullptr, 4}}; + std::vector slices = {{test_base, 5}, {test_base, 4}}; Utility::trimSlices(slices, 1, 3); - const std::vector expected{{reinterpret_cast(0x1), 3}, - {reinterpret_cast(0x0), 0}}; + const std::vector expected{{static_cast(&slice_mem[1]), 3}, + {static_cast(&slice_mem[0]), 0}}; EXPECT_EQ(expected, slices); } } diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index b8020b2a29c1..097cb3d49f4c 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -485,6 +485,7 @@ class FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); + std::vector dummy_requests(num_gets); for (uint32_t i = 0; i < num_gets; i++) { Common::Redis::Client::PoolRequest* request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == @@ -494,7 +495,7 @@ class FragmentedRequestCommandHandlerTest : public RedisCommandSplitterImplTest Common::Redis::Client::PoolRequest* mirror_request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == null_handle_indexes.end()) { - mirror_request_to_use = &mirror_request_to_use[i]; + mirror_request_to_use = &dummy_requests[i]; } EXPECT_CALL(*conn_pool_, makeRequest_(std::to_string(i), CompositeArrayEq(expected_requests_[i]), _)) diff --git a/test/fuzz/fuzz_runner.h b/test/fuzz/fuzz_runner.h index 31a317a220c0..5349d1241cd5 100644 --- a/test/fuzz/fuzz_runner.h +++ b/test/fuzz/fuzz_runner.h @@ -8,7 +8,7 @@ #include "libprotobuf_mutator/src/libfuzzer/libfuzzer_macro.h" // Bring in FuzzedDataProvider, see // https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider -#include "fuzzer/utils/FuzzedDataProvider.h" +#include "fuzzer/FuzzedDataProvider.h" #include "spdlog/spdlog.h" namespace Envoy { diff --git a/test/mocks/common.h b/test/mocks/common.h index 1c5d899d975e..57bdc20623f6 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -58,9 +58,9 @@ class MockTimeSystem : public Event::TestTimeSystem { void advanceTimeAsync(const Duration& duration) override { real_time_.advanceTimeAsync(duration); } - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { return real_time_.waitFor(mutex, condvar, duration); // NO_CHECK_FORMAT(real_time) } MOCK_METHOD(SystemTime, systemTime, ()); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 6f5368f024bb..cb7f14b81b51 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,52 +3,60 @@ # directory:coverage_percent # for existing extensions with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/extensions/common:95.1" +"source/extensions/common:94.0" "source/extensions/common/crypto:91.5" -"source/extensions/common/wasm:87.8" -"source/extensions/common/wasm/v8:88.3" +"source/extensions/common/tap:95.9" +"source/extensions/common/wasm:85.4" "source/extensions/common/wasm/null:77.8" -"source/extensions/filters/network/sni_cluster:90.3" -"source/extensions/filters/network/thrift_proxy/router:96.4" -"source/extensions/filters/network/sni_dynamic_forward_proxy:92.4" -"source/extensions/filters/network/dubbo_proxy:96.7" -"source/extensions/filters/network/dubbo_proxy/router:96.1" -"source/extensions/filters/network/direct_response:89.3" -"source/extensions/filters/http/dynamic_forward_proxy:93.2" -"source/extensions/filters/http/cache:80.8" +"source/extensions/common/wasm/v8:85.4" +"source/extensions/filters/common:94.6" +"source/extensions/filters/common/expr:92.2" +"source/extensions/filters/common/fault:95.8" +"source/extensions/filters/common/lua:95.9" +"source/extensions/filters/common/rbac:87.2" +"source/extensions/filters/http/aws_lambda:96.4" +"source/extensions/filters/http/aws_request_signing:93.3" +"source/extensions/filters/http/cache:80.7" "source/extensions/filters/http/cache/simple_http_cache:84.5" -"source/extensions/filters/http/csrf:96.6" -"source/extensions/filters/http/ip_tagging:92.0" -"source/extensions/filters/http/header_to_metadata:95.0" +"source/extensions/filters/http/dynamic_forward_proxy:91.5" +"source/extensions/filters/http/ext_authz:96.5" "source/extensions/filters/http/grpc_json_transcoder:93.3" -"source/extensions/filters/http/aws_request_signing:93.3" -"source/extensions/filters/listener:95.7" -"source/extensions/filters/listener/tls_inspector:92.9" +"source/extensions/filters/http/header_to_metadata:95.0" +"source/extensions/filters/http/ip_tagging:91.2" +"source/extensions/filters/listener:95.6" "source/extensions/filters/listener/http_inspector:93.3" -"source/extensions/filters/udp:91.2" -"source/extensions/filters/udp/dns_filter:84.1" -"source/extensions/filters/common:96.4" -"source/extensions/filters/common/expr:92.2" -"source/extensions/filters/common/rbac:93.0" -"source/extensions/grpc_credentials:93.9" -"source/extensions/grpc_credentials/aws_iam:88.6" -"source/extensions/quic_listeners:85.1" -"source/extensions/quic_listeners/quiche:85.1" -"source/extensions/quic_listeners/quiche/platform:0" +"source/extensions/filters/listener/tls_inspector:92.4" +"source/extensions/filters/network/common:96.0" +"source/extensions/filters/network/common/redis:96.2" +"source/extensions/filters/network/direct_response:89.3" +"source/extensions/filters/network/dubbo_proxy:96.1" +"source/extensions/filters/network/dubbo_proxy/router:95.1" +"source/extensions/filters/network/http_connection_manager:95.9" +"source/extensions/filters/network/mongo_proxy:94.0" +"source/extensions/filters/network/sni_cluster:90.3" +"source/extensions/filters/network/sni_dynamic_forward_proxy:89.4" +"source/extensions/filters/network/thrift_proxy/router:96.0" +"source/extensions/filters/udp:91.0" +"source/extensions/filters/udp/dns_filter:88.5" +"source/extensions/grpc_credentials:92.0" +"source/extensions/grpc_credentials/aws_iam:86.8" +"source/extensions/health_checkers:95.9" +"source/extensions/health_checkers/redis:95.9" +"source/extensions/quic_listeners:84.8" +"source/extensions/quic_listeners/quiche:84.8" "source/extensions/resource_monitors/fixed_heap:90.9" "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" -"source/extensions/retry/host/omit_host_metadata:96.9" -"source/extensions/retry/host/previous_hosts:82.4" "source/extensions/retry/host/omit_canary_hosts:64.3" -"source/extensions/stat_sinks/statsd:92.6" -"source/extensions/tracers:96.8" -"source/extensions/tracers/opencensus:93.9" -"source/extensions/tracers/xray:95.5" -"source/extensions/transport_sockets:95.0" +"source/extensions/retry/host/previous_hosts:82.4" +"source/extensions/stat_sinks/statsd:85.2" +"source/extensions/tracers:96.3" +"source/extensions/tracers/opencensus:90.1" +"source/extensions/tracers/xray:95.3" +"source/extensions/transport_sockets:94.8" "source/extensions/transport_sockets/raw_buffer:90.9" "source/extensions/transport_sockets/tap:95.6" -"source/extensions/transport_sockets/tls:94.5" +"source/extensions/transport_sockets/tls:94.2" "source/extensions/transport_sockets/tls/private_key:76.9" ) @@ -57,7 +65,7 @@ COVERAGE_DIR="${SRCDIR}"/generated/coverage COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat" FAILED=0 -DEFAULT_COVERAGE_THRESHOLD=97.0 +DEFAULT_COVERAGE_THRESHOLD=96.6 DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD # Unfortunately we have a bunch of preexisting extensions with low coverage. diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 68d5ac1567c3..84c66b79f349 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -49,7 +49,7 @@ if [[ "$VALIDATE_COVERAGE" == "true" ]]; then if [[ "${FUZZ_COVERAGE}" == "true" ]]; then COVERAGE_THRESHOLD=27.0 else - COVERAGE_THRESHOLD=97.0 + COVERAGE_THRESHOLD=96.5 fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index dcc0374423fd..6577dccf003e 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -248,9 +248,10 @@ void SimulatedTimeSystemHelper::waitForNoPendingLockHeld() const &pending_alarms_)); } -Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor( - Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { +Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasicLockable& mutex, + Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { only_one_thread_.checkOneThread(); // TODO(#10568): This real-time polling delay should not be necessary. Without diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index e578bd767071..cc34fcf953a9 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -28,9 +28,9 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { // TestTimeSystem void advanceTimeWait(const Duration& duration) override; void advanceTimeAsync(const Duration& duration) override; - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // TimeSource SystemTime systemTime() override; diff --git a/test/test_common/test_time.h b/test/test_common/test_time.h index 4b0beec54439..f5e24b8bd8a5 100644 --- a/test/test_common/test_time.h +++ b/test/test_common/test_time.h @@ -14,9 +14,9 @@ class TestRealTimeSystem : public TestTimeSystem { // TestTimeSystem void advanceTimeAsync(const Duration& duration) override; void advanceTimeWait(const Duration& duration) override; - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // Event::TimeSystem Event::SchedulerPtr createScheduler(Scheduler& base_scheduler) override { diff --git a/test/test_common/test_time_system.h b/test/test_common/test_time_system.h index 449ec8065391..cb9ac3480215 100644 --- a/test/test_common/test_time_system.h +++ b/test/test_common/test_time_system.h @@ -56,14 +56,15 @@ class TestTimeSystem : public Event::TimeSystem { * @param duration The maximum amount of time to wait. * @return Thread::CondVar::WaitStatus whether the condition timed out or not. */ - virtual Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; + virtual Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, + Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) PURE; template - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const D& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const D& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { return waitFor(mutex, condvar, std::chrono::duration_cast(duration)); } }; @@ -108,9 +109,9 @@ template class DelegatingTestTimeSystemBase : public T timeSystem().advanceTimeWait(duration); } - Thread::CondVar::WaitStatus - waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, - const Duration& duration) noexcept ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { + Thread::CondVar::WaitStatus waitFor(Thread::MutexBasicLockable& mutex, Thread::CondVar& condvar, + const Duration& duration) noexcept + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override { return timeSystem().waitFor(mutex, condvar, duration); } diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py index e644680e3392..8916e8b82822 100755 --- a/tools/api_boost/api_boost.py +++ b/tools/api_boost/api_boost.py @@ -44,7 +44,8 @@ def ApiBoostFile(llvm_include_path, debug_log, path): result = sp.run([ './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster', '--extra-arg-before=-xc++', - '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', path + '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', + '--extra-arg=-Wno-old-style-cast', path ], capture_output=True, check=True) diff --git a/tools/clang_tools/README.md b/tools/clang_tools/README.md index a53ad6038af3..705138a7e357 100644 --- a/tools/clang_tools/README.md +++ b/tools/clang_tools/README.md @@ -14,7 +14,7 @@ framework for writing Clang tools in the style of `clang-format` and To build tools in this tree, a Clang binary install must be available. If you are building Envoy with `clang`, this should already be true of your system. You can find prebuilt binary releases of Clang at https://releases.llvm.org. You -will need the Clang version used by Envoy in CI (currently clang-9.0). +will need the Clang version used by Envoy in CI (currently clang-10.0). To build a tool, set the following environment variable: diff --git a/tools/clang_tools/support/BUILD.prebuilt b/tools/clang_tools/support/BUILD.prebuilt index 277c9ad802f8..e77dcb0fe268 100644 --- a/tools/clang_tools/support/BUILD.prebuilt +++ b/tools/clang_tools/support/BUILD.prebuilt @@ -1,8 +1,8 @@ -# Clang 9.0 library pre-built Bazel. +# Clang 10.0 library pre-built Bazel. # # This file was mostly manually assembled (with some hacky Python scripts) from -# clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz and corresponding -# https://github.com/llvm/llvm-project.git source. It needs Clang 9.0 to work. +# clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz and corresponding +# https://github.com/llvm/llvm-project.git source. It needs Clang 10.0 to work. # # The BUILD file has sufficient dependency relationships # between the prebuilt libraries in a clang-llvm distribution to support building libtooling @@ -152,6 +152,7 @@ cc_library( ":clang_basic", ":clang_lex", ":clang_sema", + ":llvm_frontend_omp", ":llvm_mc", ":llvm_mcparser", ":llvm_support", @@ -306,6 +307,12 @@ cc_library( hdrs = glob(["llvm/Demangle/**"]), ) +cc_library( + name = "llvm_frontend_omp", + srcs = ["lib/libLLVMFrontendOpenMP.a"], + hdrs = glob(["llvm/Frontend/OpenMP/**"]), +) + cc_library( name = "llvm_mc", srcs = ["lib/libLLVMMC.a"], diff --git a/tools/clang_tools/support/clang_tools.bzl b/tools/clang_tools/support/clang_tools.bzl index a738fa57f28d..949903aaff05 100644 --- a/tools/clang_tools/support/clang_tools.bzl +++ b/tools/clang_tools/support/clang_tools.bzl @@ -1,26 +1,31 @@ load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") +_clang_tools_copts = [ + "-std=c++14", + "-fno-exceptions", + "-fno-rtti", +] + def clang_tools_cc_binary(name, copts = [], tags = [], deps = [], **kwargs): cc_binary( name = name, - copts = copts + [ - "-fno-exceptions", - "-fno-rtti", - ], + copts = copts + _clang_tools_copts, tags = tags + ["manual"], deps = deps + ["@envoy//bazel/foreign_cc:zlib"], **kwargs ) -def clang_tools_cc_library(name, **kwargs): +def clang_tools_cc_library(name, copts = [], **kwargs): cc_library( name = name, + copts = copts + _clang_tools_copts, **kwargs ) -def clang_tools_cc_test(name, deps = [], **kwargs): +def clang_tools_cc_test(name, copts = [], deps = [], **kwargs): cc_test( name = name, + copts = copts + _clang_tools_copts, deps = deps + ["@com_google_googletest//:gtest_main"], **kwargs ) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 2dc4ed979f6d..689ed09c7e5b 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -87,7 +87,7 @@ # Only one C++ file should instantiate grpc_init GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") -CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9") +CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), @@ -245,13 +245,13 @@ def checkTools(): "users".format(CLANG_FORMAT_PATH)) else: error_messages.append( - "Command {} not found. If you have clang-format in version 9.x.x " + "Command {} not found. If you have clang-format in version 10.x.x " "installed, but the binary name is different or it's not available in " "PATH, please use CLANG_FORMAT environment variable to specify the path. " "Examples:\n" - " export CLANG_FORMAT=clang-format-9.0.0\n" - " export CLANG_FORMAT=/opt/bin/clang-format-9\n" - " export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH)) + " export CLANG_FORMAT=clang-format-10.0.0\n" + " export CLANG_FORMAT=/opt/bin/clang-format-10\n" + " export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format".format(CLANG_FORMAT_PATH)) def checkBazelTool(name, path, var): bazel_tool_abs_path = lookPath(path) From f67efb1907ddfdfdc6de619a17dc0a8e76d82b1d Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Thu, 18 Jun 2020 08:32:00 -0400 Subject: [PATCH 380/909] fuzz: added fuzz tests for network utility functions (#11613) fuzz: added fuzz tests for some network utility functions. increased line coverage of source/common/network/utility.cc by 21.0% Signed-off-by: Arthur Yan --- test/common/network/BUILD | 12 ++++ test/common/network/utility_corpus/test | 1 + test/common/network/utility_fuzz_test.cc | 71 ++++++++++++++++++++++++ 3 files changed, 84 insertions(+) create mode 100644 test/common/network/utility_corpus/test create mode 100644 test/common/network/utility_fuzz_test.cc diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 25e5efd4629f..dafca5dc1c31 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_benchmark_test", "envoy_cc_benchmark_binary", + "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_binary", "envoy_cc_test_library", @@ -299,6 +300,17 @@ envoy_cc_test( ], ) +envoy_cc_fuzz_test( + name = "utility_fuzz_test", + srcs = ["utility_fuzz_test.cc"], + corpus = "utility_corpus", + deps = [ + "//source/common/network:address_lib", + "//source/common/network:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_test_binary( name = "lc_trie_speed_test", srcs = ["lc_trie_speed_test.cc"], diff --git a/test/common/network/utility_corpus/test b/test/common/network/utility_corpus/test new file mode 100644 index 000000000000..f7ea73bed6ef --- /dev/null +++ b/test/common/network/utility_corpus/test @@ -0,0 +1 @@ +127.0.0.1:0 \ No newline at end of file diff --git a/test/common/network/utility_fuzz_test.cc b/test/common/network/utility_fuzz_test.cc new file mode 100644 index 000000000000..8d49667a24e0 --- /dev/null +++ b/test/common/network/utility_fuzz_test.cc @@ -0,0 +1,71 @@ +#include "envoy/config/core/v3/address.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/utility.h" + +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Fuzz { + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + const std::string string_buffer(reinterpret_cast(buf), len); + + try { + Network::Utility::parseInternetAddress(string_buffer); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + Network::Utility::parseInternetAddressAndPort(string_buffer); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + std::list port_range_list; + Network::Utility::parsePortRangeList(string_buffer, port_range_list); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + proto_address.mutable_pipe()->set_path(string_buffer); + Network::Utility::protobufAddressToAddress(proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + FuzzedDataProvider provider(buf, len); + envoy::config::core::v3::Address proto_address; + const auto port_value = provider.ConsumeIntegral(); + const std::string address_value = provider.ConsumeRemainingBytesAsString(); + proto_address.mutable_socket_address()->set_address(address_value); + proto_address.mutable_socket_address()->set_port_value(port_value); + Network::Utility::protobufAddressToAddress(proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + Network::Address::Ipv4Instance address(string_buffer); + Network::Utility::addressToProtobufAddress(address, proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } + + try { + envoy::config::core::v3::Address proto_address; + Network::Address::PipeInstance address(string_buffer); + Network::Utility::addressToProtobufAddress(address, proto_address); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + } +} + +} // namespace Fuzz +} // namespace Envoy From 5faff2add085eb4f11046725861233a63d89ad7c Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Thu, 18 Jun 2020 08:32:39 -0400 Subject: [PATCH 381/909] Throwing an exception when setting hidden_envoy_deprecated fields (#11160) Disabling direct setting of hidden_envoy_deprecated fields, while still allowing the setting by an upgraded protobuf message. Fixes: #9496 Risk Level: Medium - in cases where operators were using deprecated fields and they weren't supposed to, an exception will be thrown. Testing: Added a general test for deprecated.proto, and a specific test for EDS. Signed-off-by: Adi Suissa-Peleg --- ci/filter_example_setup.sh | 2 +- source/common/config/version_converter.cc | 4 +- source/common/config/version_converter.h | 3 + source/common/protobuf/utility.cc | 45 ++- source/server/options_impl.h | 1 + test/common/protobuf/utility_test.cc | 54 ++- test/common/router/config_impl_test.cc | 348 +++++++++--------- test/common/upstream/eds_test.cc | 28 ++ test/config_test/BUILD | 16 + test/config_test/config_test.cc | 27 ++ test/config_test/config_test.h | 19 + test/config_test/deprecated_configs_test.cc | 239 ++++++++++++ .../network/redis_proxy/config_test.cc | 4 +- .../filters/network/tcp_proxy/config_test.cc | 2 +- test/integration/integration.cc | 2 +- .../integration/tcp_proxy_integration_test.cc | 6 + test/integration/version_integration_test.cc | 5 + test/test_common/utility.h | 19 +- 18 files changed, 629 insertions(+), 195 deletions(-) create mode 100644 test/config_test/deprecated_configs_test.cc diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index 1991ea1244da..ade91f673b87 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -5,7 +5,7 @@ set -e # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. -ENVOY_FILTER_EXAMPLE_GITSHA="bb2e91fde758446fbccc3f8fedffce1827a47bcb" +ENVOY_FILTER_EXAMPLE_GITSHA="777342f20d93b3a50b641556749ad41502a63d09" ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example" export ENVOY_FILTER_EXAMPLE_TESTS="//:echo2_integration_test //http-filter-example:http_filter_integration_test //:envoy_binary_test" diff --git a/source/common/config/version_converter.cc b/source/common/config/version_converter.cc index 2c4949bff071..9e97bb53ab03 100644 --- a/source/common/config/version_converter.cc +++ b/source/common/config/version_converter.cc @@ -14,8 +14,6 @@ namespace Config { namespace { -const char DeprecatedFieldShadowPrefix[] = "hidden_envoy_deprecated_"; - class ProtoVisitor { public: virtual ~ProtoVisitor() = default; @@ -218,5 +216,7 @@ void VersionUtil::scrubHiddenEnvoyDeprecated(Protobuf::Message& message) { ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr); } +const char VersionUtil::DeprecatedFieldShadowPrefix[] = "hidden_envoy_deprecated_"; + } // namespace Config } // namespace Envoy diff --git a/source/common/config/version_converter.h b/source/common/config/version_converter.h index cd7ee29afdc7..22ecc383d491 100644 --- a/source/common/config/version_converter.h +++ b/source/common/config/version_converter.h @@ -115,6 +115,9 @@ class VersionUtil { public: // Some helpers for working with earlier message version deprecated fields. static void scrubHiddenEnvoyDeprecated(Protobuf::Message& message); + + // A prefix that is added to deprecated fields names upon shadowing. + static const char DeprecatedFieldShadowPrefix[]; }; } // namespace Config diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 8bd7ed67c41e..f29c39430773 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -343,13 +343,33 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa // If the filename ends with .pb, attempt to parse it as a binary proto. if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. - if (message.ParseFromString(contents)) { - MessageUtil::checkForUnexpectedFields(message, validation_visitor); - return; + auto read_proto_binary = [&contents, &validation_visitor](Protobuf::Message& message, + MessageVersion message_version) { + try { + if (message.ParseFromString(contents)) { + MessageUtil::checkForUnexpectedFields(message, validation_visitor); + } + return; + } catch (EnvoyException& ex) { + if (message_version == MessageVersion::LATEST_VERSION) { + // Failed reading the latest version - pass the same error upwards + throw ex; + } + } + throw ApiBoostRetryException( + "Failed to parse at earlier version, trying again at later version."); + }; + + if (do_boosting) { + // Attempts to read as the previous version and upgrade, and if it fails + // attempts to read as latest version. + tryWithApiBoosting(read_proto_binary, message); + } else { + read_proto_binary(message, MessageVersion::LATEST_VERSION); } - throw EnvoyException("Unable to parse file \"" + path + "\" as a binary protobuf (type " + - message.GetTypeName() + ")"); + return; } + // If the filename ends with .pb_text, attempt to parse it as a text proto. if (absl::EndsWith(path, FileExtensions::get().ProtoText)) { auto read_proto_text = [&contents, &path](Protobuf::Message& message, @@ -435,6 +455,21 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { + if (absl::StartsWith(field.name(), Config::VersionUtil::DeprecatedFieldShadowPrefix)) { + // The field was marked as hidden_envoy_deprecated and an error must be thrown, + // unless it is part of an explicit test that needs access to the deprecated field + // when we enable runtime deprecation override to allow point field overrides for tests. + if (!runtime_ || + !runtime_->snapshot().deprecatedFeatureEnabled( + absl::StrCat("envoy.deprecated_features:", field.full_name()), false)) { + const std::string fatal_error = absl::StrCat( + "Illegal use of hidden_envoy_deprecated_ V2 field '", field.full_name(), + "' from file ", filename, + " while using the latest V3 configuration. This field has been removed from the " + "current Envoy API. Please see " ENVOY_DOC_URL_VERSION_HISTORY " for details."); + throw ProtoValidationException(fatal_error, message); + } + } const std::string warning = absl::StrCat("Using {}deprecated option '", field.full_name(), "' from file ", filename, ". This configuration will be removed from " diff --git a/source/server/options_impl.h b/source/server/options_impl.h index 216fcf9eb548..d609986926ec 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -58,6 +58,7 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable(R"EOF( + not_deprecated: field1 + is_deprecated: hidden_field1 + not_deprecated_message: + inner_not_deprecated: subfield1 + repeated_message: + - inner_not_deprecated: subfield2 + )EOF"); + + // Non-fatal checks for a deprecated field should log rather than throw an exception. + EXPECT_LOG_CONTAINS("warning", + "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated'", + checkForDeprecation(base_should_warn)); + EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); + + // Create an upgraded message and insert a deprecated field. This is a bypass + // of the upgrading procedure validation, and should fail + envoy::test::deprecation_test::UpgradedBase base_should_fail = + TestUtility::parseYaml(R"EOF( + not_deprecated: field1 + hidden_envoy_deprecated_is_deprecated: hidden_field1 + not_deprecated_message: + inner_not_deprecated: subfield1 + repeated_message: + - inner_not_deprecated: subfield2 + )EOF"); + + EXPECT_THROW_WITH_REGEX( + MessageUtil::checkForUnexpectedFields(base_should_fail, + ProtobufMessage::getStrictValidationVisitor()), + ProtoValidationException, + "Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.test.deprecation_test.UpgradedBase.hidden_envoy_deprecated_is_deprecated'"); + // The config will be rejected, so the feature will not be used. + EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); + EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); +} + class TimestampUtilTest : public testing::Test, public ::testing::WithParamInterface {}; TEST_P(TimestampUtilTest, SystemClockToTimestampTest) { diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index fa446a0d12f7..93caa4b5367c 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -134,10 +134,13 @@ Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::st return genHeaders(host, path, method, "http"); } +// Loads a V3 RouteConfiguration yaml envoy::config::route::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromYaml(const std::string& yaml) { envoy::config::route::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config); + // Load the file and keep the annotations (in case of an upgrade) to make sure + // validate() observes the upgrade + TestUtility::loadFromYaml(yaml, route_config, true); TestUtility::validate(route_config); return route_config; } @@ -379,7 +382,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestLegacyRoutes)) { )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Regular Expression matching EXPECT_EQ("clock", @@ -525,7 +528,7 @@ TEST_F(RouteMatcherTest, TestConnectRoutes) { name: ulu )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Connect matching EXPECT_EQ("connect_match", @@ -810,7 +813,7 @@ TEST_F(RouteMatcherTest, TestRoutes) { name: ulu )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // No host header, no x-forwarded-proto and no path header testing. EXPECT_EQ(nullptr, @@ -1178,7 +1181,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithWildcardAndDefaultOnly) { route: { cluster: "default" } )EOF"; - const auto proto_config = parseRouteConfigurationFromV2Yaml(yaml); + const auto proto_config = parseRouteConfigurationFromYaml(yaml); TestConfigImpl config(proto_config, factory_context_, true); EXPECT_EQ("wildcard", @@ -1213,10 +1216,10 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestRoutesWithInvalidRegexLegac NiceMock stream_info; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_route), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true), EnvoyException, "Invalid regex '/\\(\\+invalid\\)':"); - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), factory_context_, true), EnvoyException, "Invalid regex '\\^/\\(\\+invalid\\)':"); } @@ -1253,10 +1256,10 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { NiceMock stream_info; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_route), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_route), factory_context_, true), EnvoyException, "no argument for repetition operator:"); - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), factory_context_, true), EnvoyException, "no argument for repetition operator"); } @@ -1275,7 +1278,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { - name: "invalid" )EOF"; - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_virtual_cluster), + EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), factory_context_, true), EnvoyException, "virtual clusters must define either 'pattern' or 'headers'"); @@ -1370,7 +1373,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveRequestHeaders) { NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Request header manipulation testing. { @@ -1420,8 +1423,7 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalse) { const std::string yaml = requestHeadersConfig(false); NiceMock stream_info; - envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + envoy::config::route::v3::RouteConfiguration route_config = parseRouteConfigurationFromYaml(yaml); TestConfigImpl config(route_config, factory_context_, true); @@ -1478,7 +1480,7 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddWithAppendFalseMostSpecificWins) const std::string yaml = requestHeadersConfig(true); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route overrides vhost and global. { @@ -1517,7 +1519,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeaders) { const std::string yaml = responseHeadersConfig(false, true); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Response header manipulation testing. { @@ -1573,7 +1575,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendFalse) { const std::string yaml = responseHeadersConfig(false, false); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); @@ -1589,7 +1591,7 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendMostSpecificWins) { const std::string yaml = responseHeadersConfig(true, false); NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); @@ -1625,7 +1627,7 @@ most_specific_header_mutations_wins: true )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl req_headers = genHeaders("www.lyft.com", "/cacheable", "GET"); @@ -1664,7 +1666,7 @@ name: foo NiceMock stream_info; envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromYaml(yaml); EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true), EnvoyException, ":-prefixed headers may not be modified"); @@ -1688,7 +1690,7 @@ name: foo NiceMock stream_info; envoy::config::route::v3::RouteConfiguration route_config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromYaml(yaml); EXPECT_THROW_WITH_MESSAGE(TestConfigImpl config(route_config, factory_context_, true), EnvoyException, ":-prefixed or host headers may not be removed"); @@ -1713,7 +1715,7 @@ TEST_F(RouteMatcherTest, Priority) { cluster: local_service_grpc )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(Upstream::ResourcePriority::High, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->priority()); @@ -1736,7 +1738,7 @@ TEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewrite) { auto_host_rewrite: true )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1755,7 +1757,7 @@ TEST_F(RouteMatcherTest, NoHostRewriteAndAutoRewriteHeader) { auto_host_rewrite_header: "dummy-header" )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1774,7 +1776,7 @@ TEST_F(RouteMatcherTest, NoAutoRewriteAndAutoRewriteHeader) { auto_host_rewrite_header: "dummy-header" )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -1839,7 +1841,7 @@ TEST_F(RouteMatcherTest, HeaderMatchedRouting) { cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service_without_headers", @@ -1932,11 +1934,11 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidHeaderMatchedRoutingConf route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "Invalid regex"); } @@ -1970,11 +1972,11 @@ TEST_F(RouteMatcherTest, InvalidHeaderMatchedRoutingConfig) { route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "no argument for repetition operator"); } @@ -2030,7 +2032,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(QueryParamMatchedRouting)) { )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genHeaders("example.com", "/", "GET"); @@ -2124,11 +2126,11 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(InvalidQueryParamMatchedRouting route: { cluster: "local_service" } )EOF"; - EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(value_with_regex_chars), + EXPECT_NO_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(value_with_regex_chars), factory_context_, true)); EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_regex), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_regex), factory_context_, true), EnvoyException, "Invalid regex"); } @@ -2152,7 +2154,7 @@ class RouterMatcherHashPolicyTest : public testing::Test, public ConfigImplTestB route: cluster: bar )EOF"; - route_config_ = parseRouteConfigurationFromV2Yaml(yaml); + route_config_ = parseRouteConfigurationFromYaml(yaml); } envoy::config::route::v3::RouteAction::HashPolicy* firstRouteHashPolicy() { @@ -2708,7 +2710,7 @@ TEST_F(RouteMatcherTest, ClusterHeader) { )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ( "some_cluster", @@ -2763,7 +2765,7 @@ TEST_F(RouteMatcherTest, ContentType) { cluster: local_service )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service", @@ -2801,7 +2803,7 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffset) { cluster: local_service_grpc )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ( @@ -2835,7 +2837,7 @@ TEST_F(RouteMatcherTest, GrpcTimeoutOffsetOfDynamicRoute) { cluster_header: request_to )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl reqeust_headers = genHeaders("www.lyft.com", "/", "GET"); @@ -2881,7 +2883,7 @@ TEST_F(RouteMatcherTest, FractionalRuntime) { Runtime::MockSnapshot snapshot; ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); EXPECT_CALL(snapshot, featureEnabled("bogus_key", Matcher(_), 41)) @@ -2918,7 +2920,7 @@ TEST_F(RouteMatcherTest, ShadowClusterNotFound) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("some_cluster"))) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -2937,7 +2939,7 @@ TEST_F(RouteMatcherTest, ClusterNotFound) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -2956,7 +2958,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundNotChecking) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, false); } TEST_F(RouteMatcherTest, ClusterNotFoundNotCheckingViaConfig) { @@ -2975,7 +2977,7 @@ validate_clusters: false EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("www2"))).WillRepeatedly(Return(nullptr)); - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true); } TEST_F(RouteMatcherTest, AttemptCountHeader) { @@ -2991,7 +2993,7 @@ TEST_F(RouteMatcherTest, AttemptCountHeader) { cluster: "whatever" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -3013,7 +3015,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCode) { cluster: "not_found" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -3034,7 +3036,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig503) { cluster_not_found_response_code: SERVICE_UNAVAILABLE )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -3055,7 +3057,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { cluster_not_found_response_code: NOT_FOUND )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); @@ -3107,7 +3109,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { cluster: www2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto& foo_shadow_policies = config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0)->routeEntry()->shadowPolicies(); @@ -3153,8 +3155,8 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(ShadowPolicyAndPolicies)) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Cannot specify both request_mirror_policy and request_mirror_policies"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Cannot specify both request_mirror_policy and request_mirror_policies"); } class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; @@ -3181,7 +3183,7 @@ name: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ("foo_mirror", config.route(genHeaders("mirror.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -3229,7 +3231,7 @@ TEST_F(RouteMatcherTest, Retry) { retry_on: 5xx,gateway-error,connect-failure,reset )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3297,7 +3299,7 @@ name: RetryVirtualHostLevel route: {cluster: www} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route level retry policy takes precedence. EXPECT_EQ(std::chrono::milliseconds(0), @@ -3381,7 +3383,7 @@ TEST_F(RouteMatcherTest, GrpcRetry) { retry_on: 5xx,deadline-exceeded,resource-exhausted )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(std::chrono::milliseconds(0), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3468,7 +3470,7 @@ TEST_F(RouteMatcherTest, RetryBackOffIntervals) { retry_on: connect-failure )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(absl::optional(50), config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3534,7 +3536,7 @@ TEST_F(RouteMatcherTest, InvalidRetryBackOff) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(invalid_max), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(invalid_max), factory_context_, true), EnvoyException, "retry_policy.max_interval must greater than or equal to the base_interval"); } @@ -3566,7 +3568,7 @@ name: HedgeRouteLevel denominator: HUNDRED )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(3, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() @@ -3634,7 +3636,7 @@ name: HedgeVirtualHostLevel route: {cluster: www} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // Route level hedge policy takes precedence. EXPECT_EQ(1, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) @@ -3706,9 +3708,8 @@ TEST_F(RouteMatcherTest, TestBadDefaultConfig) { - x-lyft-user-id )EOF"; - EXPECT_THROW( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException); + EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException); } TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { @@ -3732,9 +3733,8 @@ TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { cluster: www2_staging )EOF"; - EXPECT_THROW( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException); + EXPECT_THROW(TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException); } // Test to detect if hostname matches are case-insensitive @@ -3754,7 +3754,7 @@ TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(config_with_case_sensitive_domains), + TestConfigImpl(parseRouteConfigurationFromYaml(config_with_case_sensitive_domains), factory_context_, true), EnvoyException, "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com"); @@ -3776,8 +3776,8 @@ TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Only a single wildcard domain is permitted"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only a single wildcard domain is permitted"); } TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { @@ -3796,8 +3796,7 @@ TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com"); } @@ -3817,8 +3816,7 @@ TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only unique values for domains are permitted. Duplicate entry of domain bar.*"); } @@ -3835,8 +3833,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.PrefixRewrite:.*value does not match regex pattern"); } @@ -3853,8 +3850,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInHostRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.HostRewriteLiteral:.*value does not match regex pattern"); } @@ -3871,8 +3867,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInAutoHostRewrites) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RouteActionValidationError.HostRewriteHeader:.*value does not match regex pattern"); } @@ -3887,8 +3882,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInHostRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.HostRedirect:.*value does not match regex pattern"); } @@ -3903,8 +3897,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPathRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.PathRedirect:.*value does not match regex pattern"); } @@ -3919,8 +3912,7 @@ TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewriteRedirect) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "RedirectActionValidationError.PrefixRewrite:.*value does not match regex pattern"); } @@ -3942,8 +3934,8 @@ TEST_F(RouteMatcherTest, TestPrefixAndRegexRewrites) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Cannot specify both prefix_rewrite and regex_rewrite"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Cannot specify both prefix_rewrite and regex_rewrite"); } TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { @@ -3971,7 +3963,7 @@ TEST_F(RouteMatcherTest, TestDomainMatchOrderConfig) { route: { cluster: default } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ( "exact", @@ -4005,7 +3997,7 @@ TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { cluster: www )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); // route may be called early in some edge cases and "x-forwarded-proto" will not be set. Http::TestRequestHeaderMapImpl headers{{":authority", "www.lyft.com"}, {":path", "/"}}; @@ -4048,7 +4040,7 @@ TEST_F(RouteMatcherTest, RouteName) { redirect: { host_redirect: new.lyft.com } )EOF"; NiceMock factory_context; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context, false); { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/", "GET"); EXPECT_EQ("route-test", config.route(headers, 0)->routeEntry()->routeName()); @@ -4214,7 +4206,7 @@ name: foo route: { cluster: www2 } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4517,7 +4509,7 @@ TEST_F(RouteMatcherTest, ExclusiveRouteEntryOrDirectResponseEntry) { host_redirect: new.lyft.com )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4561,7 +4553,7 @@ TEST_F(RouteMatcherTest, ExclusiveWeightedClustersEntryOrDirectResponseEntry) { host_redirect: "[fe80::1]" )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("www.lyft.com", "/foo", true, true); @@ -4663,7 +4655,7 @@ TEST_F(RouteMatcherTest, WeightedClusters) { BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); auto& runtime = factory_context_.runtime_loader_; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = @@ -4800,7 +4792,7 @@ TEST_F(RouteMatcherTest, ExclusiveWeightedClustersOrClusterConfig) { cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4818,7 +4810,7 @@ TEST_F(RouteMatcherTest, WeightedClustersMissingClusterList) { runtime_key_prefix: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4837,7 +4829,7 @@ TEST_F(RouteMatcherTest, WeightedClustersEmptyClustersList) { clusters: [] )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4860,8 +4852,8 @@ TEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Sum of weights in the weighted_cluster should add up to 100"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Sum of weights in the weighted_cluster should add up to 100"); yaml = R"EOF( virtual_hosts: @@ -4882,8 +4874,8 @@ TEST_F(RouteMatcherTest, WeightedClustersSumOFWeightsNotEqualToMax) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Sum of weights in the weighted_cluster should add up to 99"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Sum of weights in the weighted_cluster should add up to 99"); } TEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) { @@ -4905,7 +4897,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterWithMissingWeights) { - name: cluster3 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4936,7 +4928,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterInvalidClusterName) { EXPECT_CALL(factory_context_.cluster_manager_, get(Eq("cluster3-invalid"))) .WillRepeatedly(Return(nullptr)); - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -4974,7 +4966,7 @@ TEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) { response_headers_to_remove: [ "x-remove-cluster2" ] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); NiceMock stream_info; { @@ -5032,7 +5024,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteConfig) { fake_entry: fake_type )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -5051,7 +5043,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadVirtualHostConfig) { cluster: www2 )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -5069,7 +5061,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfig) { timeout: 1234s )EOF"; - EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + EXPECT_THROW(TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException); } @@ -5088,8 +5080,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPath) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|path)' for " "type oneof"); } @@ -5106,8 +5097,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "RouteValidationError.Match: \\[\"value is required\"\\]"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "RouteValidationError.Match: \\[\"value is required\"\\]"); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { @@ -5125,8 +5116,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|regex)' for " "type oneof"); } @@ -5143,8 +5133,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "caused by field: \"action\", reason: is required"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "caused by field: \"action\", reason: is required"); } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { @@ -5162,8 +5152,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(path|regex)' for " "type oneof"); } @@ -5184,8 +5173,8 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "invalid value oneof field 'path_specifier' is already set."); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "invalid value oneof field 'path_specifier' is already set."); } TEST_F(RouteMatcherTest, TestOpaqueConfig) { @@ -5206,7 +5195,7 @@ TEST_F(RouteMatcherTest, TestOpaqueConfig) { name2: value2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const std::multimap& opaque_config = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); @@ -5234,7 +5223,7 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(TestOpaqueConfigUsingDeprecated name2: value2 )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const std::multimap& opaque_config = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->opaqueConfig(); @@ -5261,7 +5250,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); std::unique_ptr config_ptr; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); @@ -5280,7 +5269,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_FALSE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); @@ -5300,7 +5289,7 @@ TEST_F(RoutePropertyTest, ExcludeVHRateLimits) { - remote_address: {} )EOF"; - config_ptr = std::make_unique(parseRouteConfigurationFromV2Yaml(yaml), + config_ptr = std::make_unique(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_TRUE(config_ptr->route(headers, 0)->routeEntry()->includeVirtualHostRateLimits()); } @@ -5351,7 +5340,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestVHostCorsConfig)) { .WillOnce(Return(true)); EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0) @@ -5408,7 +5397,7 @@ TEST_F(RoutePropertyTest, TestRouteCorsConfig) { .WillOnce(Return(true)); EXPECT_CALL(factory_context_.runtime_loader_, snapshot()).WillRepeatedly(ReturnRef(snapshot)); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, false); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->corsPolicy(); @@ -5445,7 +5434,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TTestVHostCorsLegacyConfig)) { cluster: ats )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0) @@ -5485,7 +5474,7 @@ TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestRouteCorsLegacyConfig)) { allow_credentials: true )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const Router::CorsPolicy* cors_policy = config.route(genHeaders("api.lyft.com", "/api", "GET"), 0)->routeEntry()->corsPolicy(); @@ -5516,8 +5505,8 @@ TEST_F(RoutePropertyTest, TestBadCorsConfig) { )EOF"; EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); } TEST_F(RouteMatcherTest, Decorator) { @@ -5540,7 +5529,7 @@ TEST_F(RouteMatcherTest, Decorator) { cluster: bar )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); @@ -5589,7 +5578,7 @@ TEST_F(CustomRequestHeadersTest, AddNewHeader) { value: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" )EOF"; NiceMock stream_info; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/new_endpoint/foo", "GET"); const RouteEntry* route = config.route(headers, 0)->routeEntry(); route->finalizeRequestHeaders(headers, stream_info, true); @@ -5627,7 +5616,7 @@ TEST_F(CustomRequestHeadersTest, CustomHeaderWrongFormat) { )EOF"; NiceMock stream_info; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Invalid header configuration. Un-terminated variable expression " "'DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT'"); @@ -5905,7 +5894,7 @@ name: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -5931,7 +5920,7 @@ name: foo direct_response: { status: 200, body: { inline_string: "content" } } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto* direct_response = config.route(genHeaders("example.com", "/", "GET"), 0)->directResponseEntry(); @@ -5956,9 +5945,9 @@ name: foo inline_string: )EOF" + response_body + "\n"; - EXPECT_THROW_WITH_MESSAGE(TestConfigImpl invalid_config(parseRouteConfigurationFromV2Yaml(yaml), - factory_context_, true), - EnvoyException, "response body size is 4097 bytes; maximum is 4096"); + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl invalid_config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), + EnvoyException, "response body size is 4097 bytes; maximum is 4096"); } void checkPathMatchCriterion(const Route* route, const std::string& expected_matcher, @@ -5987,7 +5976,7 @@ name: foo BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), Envoy::EnvoyException, "Cannot create a Baz when metadata is empty."); } @@ -6011,7 +6000,7 @@ name: foo )EOF"; BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); checkPathMatchCriterion(config.route(genHeaders("www.foo.com", "/regex", "GET"), 0).get(), "/rege[xy]", PathMatchType::Regex); @@ -6085,7 +6074,7 @@ name: foo )EOF"; BazFactory baz_factory; Registry::InjectFactory registered_factory(baz_factory); - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route1 = config.route(genHeaders("www.foo.com", "/first", "GET"), 0); const auto route2 = config.route(genHeaders("www.foo.com", "/second", "GET"), 0); @@ -6146,7 +6135,7 @@ name: AllRedirects redirect: { prefix_rewrite: "/" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RedirectPrefixRewrite), factory_context_, + TestConfigImpl config(parseRouteConfigurationFromYaml(RedirectPrefixRewrite), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6265,7 +6254,7 @@ name: AllRedirects redirect: { host_redirect: "new.lyft.com", prefix_rewrite: "/new/prefix" , https_redirect: "true", strip_query: "true" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RouteDynPathRedirect), factory_context_, + TestConfigImpl config(parseRouteConfigurationFromYaml(RouteDynPathRedirect), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6402,7 +6391,7 @@ name: foo cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { EXPECT_EQ("local_service_without_headers", @@ -6526,7 +6515,7 @@ name: foo cluster: local_service_without_headers )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { NiceMock stream_info; @@ -6659,7 +6648,7 @@ name: RegexNoMatch route: { cluster: some-cluster } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RegexRewrite), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(RegexRewrite), factory_context_, true); { // Get our regex route entry @@ -6692,7 +6681,7 @@ name: NoIdleTimeout cluster: some-cluster )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(NoIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(NoIdleTimeout), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6715,7 +6704,7 @@ name: ZeroIdleTimeout idle_timeout: 0s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ZeroIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(ZeroIdleTimeout), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6738,7 +6727,7 @@ name: ExplicitIdleTimeout idle_timeout: 7s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, + TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); @@ -6763,7 +6752,7 @@ name: RetriableStatusCodes retriable_status_codes: [100, 200] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, + TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); @@ -6792,8 +6781,7 @@ name: RetriableHeaders - name: X-Upstream-Pushback )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(RetriableHeaders), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(RetriableHeaders), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -6829,7 +6817,7 @@ name: RetriableStatusCodes enabled: false )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(UpgradeYaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(UpgradeYaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry::UpgradeMap& upgrade_map = config.route(headers, 0)->routeEntry()->upgradeMap(); @@ -6858,8 +6846,8 @@ name: RetriableStatusCodes )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Duplicate upgrade WebSocket"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Duplicate upgrade WebSocket"); } TEST_F(RouteConfigurationV2, BadConnectConfig) { @@ -6882,8 +6870,8 @@ name: RetriableStatusCodes )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Non-CONNECT upgrade type Websocket has ConnectConfig"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Non-CONNECT upgrade type Websocket has ConnectConfig"); } // Verifies that we're creating a new instance of the retry plugins on each call instead of always @@ -6916,7 +6904,7 @@ name: RetriableStatusCodes Registry::InjectFactory inject_predicate_factory( host_predicate_factory); - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(ExplicitIdleTimeout), factory_context_, + TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); @@ -6944,8 +6932,8 @@ name: InternalRedirectEnabled cluster: some-cluster )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), - factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, + true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -6969,8 +6957,8 @@ name: InternalRedirectEnabled internal_redirect_policy: {} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), - factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, + true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7001,8 +6989,8 @@ name: InternalRedirectEnabled redirect_response_codes: [301, 302, 303, 304] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), - factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, + true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7038,8 +7026,8 @@ name: InternalRedirectEnabled redirect_response_codes: [200, 304] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(InternalRedirectEnabled), - factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, + true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7100,7 +7088,7 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { void checkEach(const std::string& yaml, uint32_t expected_entry, uint32_t expected_route, uint32_t expected_vhost) { - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); const auto* route_entry = route->routeEntry(); @@ -7121,7 +7109,7 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { } void checkNoPerFilterConfig(const std::string& yaml) { - const TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); const auto* route_entry = route->routeEntry(); @@ -7157,7 +7145,7 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only one of typed_configs or configs can be specified"); } @@ -7177,7 +7165,7 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only one of typed_configs or configs can be specified"); } } @@ -7195,8 +7183,8 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Didn't find a registered implementation for name: 'unknown.filter'"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Didn't find a registered implementation for name: 'unknown.filter'"); } TEST_F(PerFilterConfigsTest, UnknownFilterAny) { @@ -7214,8 +7202,8 @@ name: foo )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true), - EnvoyException, "Didn't find a registered implementation for name: 'unknown.filter'"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Didn't find a registered implementation for name: 'unknown.filter'"); } // Test that a trivially specified NamedHttpFilterConfigFactory ignores per_filter_config without @@ -7413,7 +7401,7 @@ name: foo cluster: default )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; RouteConstSharedPtr accepted_route = config.route( @@ -7454,7 +7442,7 @@ name: foo cluster: default )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); std::vector clusters{"foo", "foo_bar"}; RouteConstSharedPtr accepted_route = config.route( @@ -7495,7 +7483,7 @@ name: foo cluster: default )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); std::vector clusters{"default", "foo", "foo_bar", "foo_bar_baz"}; RouteConstSharedPtr accepted_route = config.route( @@ -7535,7 +7523,7 @@ name: foo cluster: foo )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); RouteConstSharedPtr accepted_route = config.route( [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { ADD_FAILURE() @@ -7564,7 +7552,7 @@ name: foo cluster: default )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); RouteConstSharedPtr accepted_route = config.route( [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { ADD_FAILURE() @@ -7593,7 +7581,7 @@ name: foo cluster: default )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); RouteConstSharedPtr accepted_route = config.route( [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { ADD_FAILURE() @@ -7623,7 +7611,7 @@ name: foo require_tls: ALL )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); RouteConstSharedPtr accepted_route = config.route( [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { ADD_FAILURE() @@ -7653,7 +7641,7 @@ name: foo require_tls: EXTERNAL_ONLY )EOF"; - TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); RouteConstSharedPtr accepted_route = config.route( [](RouteConstSharedPtr, RouteEvalStatus) -> RouteMatchStatus { ADD_FAILURE() diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index b40aa3f40f7f..865e05bd9f98 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -1873,6 +1873,34 @@ TEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) { } } +// Validate that onConfigUpdate() verifies that no deprecated fields are used. +TEST_F(EdsTest, DeprecatedFieldsError) { + // This test is only valid in API-v3, and should be updated for API-v4, as + // the deprecated fields of API-v2 will be removed. + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = + TestUtility::parseYaml(R"EOF( + cluster_name: fare + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 1.2.3.4 + port_value: 80 + policy: + overprovisioning_factor: 100 + hidden_envoy_deprecated_disable_overprovisioning: true + )EOF"); + + initialize(); + Protobuf::RepeatedPtrField resources; + resources.Add()->PackFrom(cluster_load_assignment); + EXPECT_THROW_WITH_REGEX(eds_callbacks_->onConfigUpdate(resources, ""), ProtoValidationException, + "Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_" + "deprecated_disable_overprovisioning'"); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/config_test/BUILD b/test/config_test/BUILD index 31a067438c82..154dc3ec4c1e 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -50,3 +50,19 @@ envoy_cc_test_library( "//conditions:default": envoy_all_extensions(), }), ) + +envoy_cc_test( + name = "deprecated_configs_test", + srcs = [ + "deprecated_configs_test.cc", + ], + deps = [ + ":config_test_lib", + "//source/common/config:api_version_lib", + "//test/test_common:environment_lib", + "//test/test_common:logging_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 71f30f2eb11a..37e6f92f4605 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -179,5 +179,32 @@ uint32_t run(const std::string& directory) { return num_tested; } +void loadVersionedBootstrapFile(const std::string& filename, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, + absl::optional bootstrap_version) { + Api::ApiPtr api = Api::createApiForTest(); + OptionsImpl options( + Envoy::Server::createTestOptionsImpl(filename, "", Network::Address::IpVersion::v6)); + // Avoid contention issues with other tests over the hot restart domain socket. + options.setHotRestartDisabled(true); + if (bootstrap_version.has_value()) { + options.setBootstrapVersion(*bootstrap_version); + } + Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options, + ProtobufMessage::getStrictValidationVisitor(), *api); +} + +void loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message) { + Api::ApiPtr api = Api::createApiForTest(); + OptionsImpl options( + Envoy::Server::createTestOptionsImpl("", "", Network::Address::IpVersion::v6)); + options.setConfigProto(in_proto); + // Avoid contention issues with other tests over the hot restart domain socket. + options.setHotRestartDisabled(true); + Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options, + ProtobufMessage::getStrictValidationVisitor(), *api); +} + } // namespace ConfigTest } // namespace Envoy diff --git a/test/config_test/config_test.h b/test/config_test/config_test.h index fafa0cd1fd10..551fffadce33 100644 --- a/test/config_test/config_test.h +++ b/test/config_test/config_test.h @@ -3,6 +3,10 @@ #include #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "absl/types/optional.h" + namespace Envoy { namespace ConfigTest { @@ -18,5 +22,20 @@ uint32_t run(const std::string& path); */ void testMerge(); +/** + * Loads the given bootstrap file with an optional bootstrap_version into the + * given bootstrap protobuf message using the server's loadBootstrapConfig. + */ +void loadVersionedBootstrapFile(const std::string& filename, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, + absl::optional bootstrap_version = absl::nullopt); + +/** + * Loads the given bootstrap proto into the given bootstrap protobuf message + * using the server's loadBootstrapConfig. + */ +void loadBootstrapConfigProto(const envoy::config::bootstrap::v3::Bootstrap& in_proto, + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message); + } // namespace ConfigTest } // namespace Envoy diff --git a/test/config_test/deprecated_configs_test.cc b/test/config_test/deprecated_configs_test.cc new file mode 100644 index 000000000000..dbb10707912e --- /dev/null +++ b/test/config_test/deprecated_configs_test.cc @@ -0,0 +1,239 @@ +#include "envoy/config/bootstrap/v2/bootstrap.pb.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "common/config/api_version.h" + +#include "test/config_test/config_test.h" +#include "test/test_common/environment.h" +#include "test/test_common/logging.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::HasSubstr; +using testing::StartsWith; + +namespace Envoy { + +// A deprecated field can be used in previous version text proto and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapTextProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + std::string bootstrap_text; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + AllOf(StartsWith("Unable to parse file"), + HasSubstr("as a text protobuf (type envoy.config.bootstrap.v3.Bootstrap)"))); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + std::string bootstrap_text_v3; + ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap_v3, &bootstrap_text_v3)); + const std::string filename_v3 = + TestEnvironment::writeStringToFileForTest("proto_v3.pb_text", bootstrap_text_v3); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading v3 with hidden-deprecated field with boosting should fail as it + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +// A deprecated field can be used in previous version binary proto and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapBinaryProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + std::string bootstrap_binary_str; + bootstrap_binary_str.reserve(bootstrap.ByteSizeLong()); + bootstrap.SerializeToString(&bootstrap_binary_str); + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.pb", bootstrap_binary_str); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + std::string bootstrap_binary_str_v3; + bootstrap_binary_str_v3.reserve(bootstrap.ByteSizeLong()); + bootstrap.SerializeToString(&bootstrap_binary_str_v3); + const std::string filename_v3 = + TestEnvironment::writeStringToFileForTest("proto_v3.pb", bootstrap_binary_str_v3); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading binary proto v3 with hidden-deprecated field with boosting will + // succeed as it cannot differentiate between v2 with the deprecated field and + // v3 with hidden_envoy_deprecated field + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file); + EXPECT_EQ("foo", proto_v3_from_file.node().hidden_envoy_deprecated_build_version()); +} + +// A deprecated field can be used in previous version yaml and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapYamlDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + EXPECT_EQ("node:\n build_version: foo", + MessageUtil::getYamlStringFromMessage(bootstrap, true, false)); + const std::string filename = TestEnvironment::writeStringToFileForTest( + "proto.yaml", MessageUtil::getYamlStringFromMessage(bootstrap, false, false)); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + AllOf(HasSubstr("type envoy.config.bootstrap.v3.Bootstrap"), + HasSubstr("build_version: Cannot find field"))); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + EXPECT_EQ("node:\n hidden_envoy_deprecated_build_version: foo", + MessageUtil::getYamlStringFromMessage(bootstrap_v3, true, false)); + const std::string filename_v3 = TestEnvironment::writeStringToFileForTest( + "proto_v3.yaml", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false)); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); + + // Loading v3 with hidden-deprecated field with boosting should fail as the name + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +// A deprecated field can be used in previous version json and upgraded. +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapJsonDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) + bootstrap = TestUtility::parseYaml(R"EOF( + node: + build_version: foo + )EOF"); + + EXPECT_EQ("{\"node\":{\"build_version\":\"foo\"}}", + MessageUtil::getJsonStringFromMessage(bootstrap, false, false)); + const std::string filename = TestEnvironment::writeStringToFileForTest( + "proto.json", MessageUtil::getJsonStringFromMessage(bootstrap, false, false)); + + // Loading as previous version should work (after upgrade) + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v2_from_file; + EXPECT_LOG_CONTAINS("warning", "Using deprecated option 'envoy.api.v2.core.Node.build_version'", + ConfigTest::loadVersionedBootstrapFile(filename, proto_v2_from_file, 2)); + EXPECT_EQ("foo", proto_v2_from_file.node().hidden_envoy_deprecated_build_version()); + + // Loading as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THROW_WITH_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename, proto_v3_from_file, 3), EnvoyException, + "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap reason INVALID_ARGUMENT:(node) " + "build_version: Cannot find field.) has unknown fields"); + + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + EXPECT_EQ("{\"node\":{\"hidden_envoy_deprecated_build_version\":\"foo\"}}", + MessageUtil::getJsonStringFromMessage(bootstrap_v3, false, false)); + const std::string filename_v3 = TestEnvironment::writeStringToFileForTest( + "proto_v3.json", MessageUtil::getYamlStringFromMessage(bootstrap_v3, false, false)); + + // Loading v3 with hidden-deprecated field as current version should fail + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file, 3), EnvoyException, + AllOf(StartsWith("Unable to parse JSON as proto"), + HasSubstr("hidden_envoy_deprecated_build_version: foo"))); + + // Loading v3 with hidden-deprecated field with boosting should fail as the name + // doesn't appear in v2 and only in v3 but marked as hidden_envoy_deprecated + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadVersionedBootstrapFile(filename_v3, proto_v3_from_file), EnvoyException, + AllOf(StartsWith("Unable to parse JSON as proto"), + HasSubstr("hidden_envoy_deprecated_build_version: foo"))); +} + +// Test the config_proto option when loading from bootstrap +TEST(DeprecatedConfigsTest, DEPRECATED_FEATURE_TEST(LoadV2BootstrapConfigProtoDeprecatedField)) { + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) + in_bootstrap_v3 = TestUtility::parseYaml(R"EOF( + node: + hidden_envoy_deprecated_build_version: foo + )EOF"); + + // Loading v3 with hidden-deprecated field as current version should fail + API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_v3_from_file; + EXPECT_THAT_THROWS_MESSAGE( + ConfigTest::loadBootstrapConfigProto(in_bootstrap_v3, proto_v3_from_file), EnvoyException, + HasSubstr("Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.core.v3.Node.hidden_envoy_deprecated_build_version'")); +} + +} // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 9270cc637c99..55b8c50193da 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -85,7 +85,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); @@ -114,7 +114,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index 4104f8540ec3..d1a3c2d6f1a2 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -91,7 +91,7 @@ TEST_P(RouteIpListConfigTest, DEPRECATED_FEATURE_TEST(TcpProxy)) { )EOF"; envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy proto_config; - TestUtility::loadFromJson(json_string, proto_config); + TestUtility::loadFromJson(json_string, proto_config, true); NiceMock context; ConfigFactory factory; diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 74f7667d2f01..e43fc1b1240b 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -372,7 +372,7 @@ void BaseIntegrationTest::createEnvoy() { MessageUtil::getYamlStringFromMessage(bootstrap)); const std::string bootstrap_path = TestEnvironment::writeStringToFileForTest( - "bootstrap.json", MessageUtil::getJsonStringFromMessage(bootstrap)); + "bootstrap.pb", TestUtility::getProtobufBinaryStringFromMessage(bootstrap)); std::vector named_ports; const auto& static_resources = config_helper_.bootstrap().static_resources(); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index e007336d607e..55a8e13e1e9e 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -649,6 +649,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." + "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", + "true"); initialize(); expectEndpointToMatchRoute(); @@ -737,6 +740,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); + config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." + "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", + "true"); initialize(); expectEndpointNotToMatchRoute(); diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index bbc46fcc33aa..b07d2c881abb 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -31,6 +31,11 @@ TEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructCo config: )EOF", ExampleIpTaggingConfig)); + + config_helper_.addRuntimeOverride( + "envoy.deprecated_features:envoy.extensions.filters.network." + "http_connection_manager.v3.HttpFilter.hidden_envoy_deprecated_config", + "true"); initialize(); } diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 60b71b0cc5c4..23c3f95bc992 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -555,10 +555,13 @@ class TestUtility { } template - static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message) { + static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, + bool preserve_original_type = false) { MessageUtil::loadFromYamlAndValidate(yaml, message, ProtobufMessage::getStrictValidationVisitor()); - Config::VersionConverter::eraseOriginalTypeInformation(message); + if (!preserve_original_type) { + Config::VersionConverter::eraseOriginalTypeInformation(message); + } } template static void validate(const MessageType& message) { @@ -584,6 +587,18 @@ class TestUtility { MessageUtil::loadFromJson(json, message); return message; } + + /** + * Extract the Protobuf binary format of a google.protobuf.Message as a string. + * @param message message of type type.googleapis.com/google.protobuf.Message. + * @return std::string of the Protobuf binary object. + */ + static std::string getProtobufBinaryStringFromMessage(const Protobuf::Message& message) { + std::string pb_binary_str; + pb_binary_str.reserve(message.ByteSizeLong()); + message.SerializeToString(&pb_binary_str); + return pb_binary_str; + } }; /** From 7a652daf35d7d4a6a6bad5a010fe65947ee4411a Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Thu, 18 Jun 2020 23:55:10 +0900 Subject: [PATCH 382/909] devex: add Python settings (#11636) Signed-off-by: tomocy --- .devcontainer/devcontainer.json | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 58eda81be329..c3c3cd7ed19f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -15,6 +15,10 @@ "bazel.buildifierFixOnFormat": true, "clangd.path": "/opt/llvm/bin/clangd", "python.pythonPath": "/usr/bin/python3", + "python.formatting.provider": "yapf", + "python.formatting.yapfArgs": [ + "style=tools/code_format/.style.yapf" + ], "files.exclude": { "**/.clangd/**": true, "**/bazel-*/**": true @@ -32,6 +36,7 @@ "zxh404.vscode-proto3", "bazelbuild.vscode-bazel", "llvm-vs-code-extensions.vscode-clangd", - "webfreak.debug" + "webfreak.debug", + "ms-python.python" ] -} +} \ No newline at end of file From 445fcb9ea8e07840941a074adae8066f98e02505 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 18 Jun 2020 15:12:58 -0400 Subject: [PATCH 383/909] conn_pool: renaming TCP pool (#11637) Leaving the unit test where it is. It will be made into TEST_P and used to test both the old and the new. Risk Level: Low (only minor clang refactors) Testing: tests pass Docs Changes: n/a Release Notes: n/a Part of #11528 Signed-off-by: Alyssa Wilk --- source/common/tcp/BUILD | 4 +- .../{conn_pool.cc => original_conn_pool.cc} | 75 ++++++++++--------- .../tcp/{conn_pool.h => original_conn_pool.h} | 20 ++--- .../common/upstream/cluster_manager_impl.cc | 4 +- test/common/tcp/conn_pool_test.cc | 25 ++++--- 5 files changed, 67 insertions(+), 61 deletions(-) rename source/common/tcp/{conn_pool.cc => original_conn_pool.cc} (85%) rename source/common/tcp/{conn_pool.h => original_conn_pool.h} (89%) diff --git a/source/common/tcp/BUILD b/source/common/tcp/BUILD index f176de79ed55..e0b1232dfa5b 100644 --- a/source/common/tcp/BUILD +++ b/source/common/tcp/BUILD @@ -10,8 +10,8 @@ envoy_package() envoy_cc_library( name = "conn_pool_lib", - srcs = ["conn_pool.cc"], - hdrs = ["conn_pool.h"], + srcs = ["original_conn_pool.cc"], + hdrs = ["original_conn_pool.h"], external_deps = ["abseil_optional"], deps = [ "//include/envoy/event:deferred_deletable", diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/original_conn_pool.cc similarity index 85% rename from source/common/tcp/conn_pool.cc rename to source/common/tcp/original_conn_pool.cc index 8f905769cb29..78d7ff3532e4 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -1,4 +1,4 @@ -#include "common/tcp/conn_pool.h" +#include "common/tcp/original_conn_pool.h" #include @@ -12,15 +12,15 @@ namespace Envoy { namespace Tcp { -ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options) +OriginalConnPoolImpl::OriginalConnPoolImpl( + Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options) : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options), transport_socket_options_(transport_socket_options), upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} -ConnPoolImpl::~ConnPoolImpl() { +OriginalConnPoolImpl::~OriginalConnPoolImpl() { while (!ready_conns_.empty()) { ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); } @@ -37,7 +37,7 @@ ConnPoolImpl::~ConnPoolImpl() { dispatcher_.clearDeferredDeleteList(); } -void ConnPoolImpl::drainConnections() { +void OriginalConnPoolImpl::drainConnections() { while (!ready_conns_.empty()) { ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); } @@ -53,7 +53,7 @@ void ConnPoolImpl::drainConnections() { } } -void ConnPoolImpl::closeConnections() { +void OriginalConnPoolImpl::closeConnections() { while (!ready_conns_.empty()) { ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); } @@ -67,12 +67,13 @@ void ConnPoolImpl::closeConnections() { } } -void ConnPoolImpl::addDrainedCallback(DrainedCb cb) { +void OriginalConnPoolImpl::addDrainedCallback(DrainedCb cb) { drained_callbacks_.push_back(cb); checkForDrained(); } -void ConnPoolImpl::assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& callbacks) { +void OriginalConnPoolImpl::assignConnection(ActiveConn& conn, + ConnectionPool::Callbacks& callbacks) { ASSERT(conn.wrapper_ == nullptr); conn.wrapper_ = std::make_shared(conn); @@ -80,7 +81,7 @@ void ConnPoolImpl::assignConnection(ActiveConn& conn, ConnectionPool::Callbacks& conn.real_host_description_); } -void ConnPoolImpl::checkForDrained() { +void OriginalConnPoolImpl::checkForDrained() { if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_conns_.empty() && pending_conns_.empty()) { while (!ready_conns_.empty()) { @@ -93,13 +94,14 @@ void ConnPoolImpl::checkForDrained() { } } -void ConnPoolImpl::createNewConnection() { +void OriginalConnPoolImpl::createNewConnection() { ENVOY_LOG(debug, "creating a new connection"); ActiveConnPtr conn(new ActiveConn(*this)); conn->moveIntoList(std::move(conn), pending_conns_); } -ConnectionPool::Cancellable* ConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { +ConnectionPool::Cancellable* +OriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { if (!ready_conns_.empty()) { ready_conns_.front()->moveBetweenLists(ready_conns_, busy_conns_); ENVOY_CONN_LOG(debug, "using existing connection", *busy_conns_.front()->conn_); @@ -132,7 +134,7 @@ ConnectionPool::Cancellable* ConnPoolImpl::newConnection(ConnectionPool::Callbac } } -void ConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event) { +void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { ENVOY_CONN_LOG(debug, "client disconnected", *conn.conn_); @@ -176,7 +178,8 @@ void ConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; } - std::list pending_requests_to_purge(std::move(pending_requests_)); + std::list pending_requests_to_purge; + pending_requests_to_purge.swap(pending_requests_); while (!pending_requests_to_purge.empty()) { PendingRequestPtr request = pending_requests_to_purge.front()->removeFromList(pending_requests_to_purge); @@ -215,8 +218,8 @@ void ConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::ConnectionEvent } } -void ConnPoolImpl::onPendingRequestCancel(PendingRequest& request, - ConnectionPool::CancelPolicy cancel_policy) { +void OriginalConnPoolImpl::onPendingRequestCancel(PendingRequest& request, + ConnectionPool::CancelPolicy cancel_policy) { ENVOY_LOG(debug, "canceling pending request"); request.removeFromList(pending_requests_); host_->cluster().stats().upstream_rq_cancelled_.inc(); @@ -232,7 +235,7 @@ void ConnPoolImpl::onPendingRequestCancel(PendingRequest& request, checkForDrained(); } -void ConnPoolImpl::onConnReleased(ActiveConn& conn) { +void OriginalConnPoolImpl::onConnReleased(ActiveConn& conn) { ENVOY_CONN_LOG(debug, "connection released", *conn.conn_); if (conn.remaining_requests_ > 0 && --conn.remaining_requests_ == 0) { @@ -248,11 +251,11 @@ void ConnPoolImpl::onConnReleased(ActiveConn& conn) { } } -void ConnPoolImpl::onConnDestroyed(ActiveConn& conn) { +void OriginalConnPoolImpl::onConnDestroyed(ActiveConn& conn) { ENVOY_CONN_LOG(debug, "connection destroyed", *conn.conn_); } -void ConnPoolImpl::onUpstreamReady() { +void OriginalConnPoolImpl::onUpstreamReady() { upstream_ready_enabled_ = false; while (!pending_requests_.empty() && !ready_conns_.empty()) { ActiveConn& conn = *ready_conns_.front(); @@ -265,7 +268,8 @@ void ConnPoolImpl::onUpstreamReady() { } } -void ConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection, bool delay) { +void OriginalConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection, + bool delay) { if (conn.wrapper_) { conn.wrapper_->invalidate(); conn.wrapper_.reset(); @@ -312,24 +316,25 @@ void ConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_connection, checkForDrained(); } -ConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) { +OriginalConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) { parent_.parent_.host_->cluster().stats().upstream_rq_total_.inc(); parent_.parent_.host_->cluster().stats().upstream_rq_active_.inc(); parent_.parent_.host_->stats().rq_total_.inc(); parent_.parent_.host_->stats().rq_active_.inc(); } -Network::ClientConnection& ConnPoolImpl::ConnectionWrapper::connection() { +Network::ClientConnection& OriginalConnPoolImpl::ConnectionWrapper::connection() { ASSERT(conn_valid_); return *parent_.conn_; } -void ConnPoolImpl::ConnectionWrapper::addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& cb) { +void OriginalConnPoolImpl::ConnectionWrapper::addUpstreamCallbacks( + ConnectionPool::UpstreamCallbacks& cb) { ASSERT(!released_); callbacks_ = &cb; } -void ConnPoolImpl::ConnectionWrapper::release(bool closed) { +void OriginalConnPoolImpl::ConnectionWrapper::release(bool closed) { // Allow multiple calls: connection close and destruction of ConnectionDataImplPtr will both // result in this call. if (!released_) { @@ -344,20 +349,20 @@ void ConnPoolImpl::ConnectionWrapper::release(bool closed) { } } -ConnPoolImpl::PendingRequest::PendingRequest(ConnPoolImpl& parent, - ConnectionPool::Callbacks& callbacks) +OriginalConnPoolImpl::PendingRequest::PendingRequest(OriginalConnPoolImpl& parent, + ConnectionPool::Callbacks& callbacks) : parent_(parent), callbacks_(callbacks) { parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); } -ConnPoolImpl::PendingRequest::~PendingRequest() { +OriginalConnPoolImpl::PendingRequest::~PendingRequest() { parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); } -ConnPoolImpl::ActiveConn::ActiveConn(ConnPoolImpl& parent) +OriginalConnPoolImpl::ActiveConn::ActiveConn(OriginalConnPoolImpl& parent) : parent_(parent), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })), remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()), timed_out_(false) { @@ -398,7 +403,7 @@ ConnPoolImpl::ActiveConn::ActiveConn(ConnPoolImpl& parent) conn_->noDelay(true); } -ConnPoolImpl::ActiveConn::~ActiveConn() { +OriginalConnPoolImpl::ActiveConn::~ActiveConn() { if (wrapper_) { wrapper_->invalidate(); } @@ -411,7 +416,7 @@ ConnPoolImpl::ActiveConn::~ActiveConn() { parent_.onConnDestroyed(*this); } -void ConnPoolImpl::ActiveConn::onConnectTimeout() { +void OriginalConnPoolImpl::ActiveConn::onConnectTimeout() { // We just close the connection at this point. This will result in both a timeout and a connect // failure and will fold into all the normal connect failure logic. ENVOY_CONN_LOG(debug, "connect timeout", *conn_); @@ -420,7 +425,7 @@ void ConnPoolImpl::ActiveConn::onConnectTimeout() { conn_->close(Network::ConnectionCloseType::NoFlush); } -void ConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_stream) { +void OriginalConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_stream) { if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { // Delegate to the connection owner. wrapper_->callbacks_->onUpstreamData(data, end_stream); @@ -431,7 +436,7 @@ void ConnPoolImpl::ActiveConn::onUpstreamData(Buffer::Instance& data, bool end_s } } -void ConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) { +void OriginalConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) { ConnectionPool::UpstreamCallbacks* cb = nullptr; if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { cb = wrapper_->callbacks_; @@ -446,13 +451,13 @@ void ConnPoolImpl::ActiveConn::onEvent(Network::ConnectionEvent event) { } } -void ConnPoolImpl::ActiveConn::onAboveWriteBufferHighWatermark() { +void OriginalConnPoolImpl::ActiveConn::onAboveWriteBufferHighWatermark() { if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { wrapper_->callbacks_->onAboveWriteBufferHighWatermark(); } } -void ConnPoolImpl::ActiveConn::onBelowWriteBufferLowWatermark() { +void OriginalConnPoolImpl::ActiveConn::onBelowWriteBufferLowWatermark() { if (wrapper_ != nullptr && wrapper_->callbacks_ != nullptr) { wrapper_->callbacks_->onBelowWriteBufferLowWatermark(); } diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/original_conn_pool.h similarity index 89% rename from source/common/tcp/conn_pool.h rename to source/common/tcp/original_conn_pool.h index d676b593bc60..148416e2aa98 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -18,14 +18,14 @@ namespace Envoy { namespace Tcp { -class ConnPoolImpl : Logger::Loggable, public ConnectionPool::Instance { +class OriginalConnPoolImpl : Logger::Loggable, public ConnectionPool::Instance { public: - ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, - Upstream::ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr transport_socket_options); + OriginalConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options); - ~ConnPoolImpl() override; + ~OriginalConnPoolImpl() override; // ConnectionPool::Instance void addDrainedCallback(DrainedCb cb) override; @@ -93,7 +93,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: struct ActiveConn : LinkedObject, public Network::ConnectionCallbacks, public Event::DeferredDeletable { - ActiveConn(ConnPoolImpl& parent); + ActiveConn(OriginalConnPoolImpl& parent); ~ActiveConn() override; void onConnectTimeout(); @@ -109,7 +109,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: } ConnectionPool::ConnectionState* connectionState() { return conn_state_.get(); } - ConnPoolImpl& parent_; + OriginalConnPoolImpl& parent_; Upstream::HostDescriptionConstSharedPtr real_host_description_; ConnectionWrapperSharedPtr wrapper_; Network::ClientConnectionPtr conn_; @@ -123,7 +123,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: using ActiveConnPtr = std::unique_ptr; struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImpl& parent, ConnectionPool::Callbacks& callbacks); + PendingRequest(OriginalConnPoolImpl& parent, ConnectionPool::Callbacks& callbacks); ~PendingRequest() override; // ConnectionPool::Cancellable @@ -131,7 +131,7 @@ class ConnPoolImpl : Logger::Loggable, public ConnectionPool:: parent_.onPendingRequestCancel(*this, cancel_policy); } - ConnPoolImpl& parent_; + OriginalConnPoolImpl& parent_; ConnectionPool::Callbacks& callbacks_; }; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index d34f12558373..0a9a21e48da2 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -32,7 +32,7 @@ #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/router/shadow_writer_impl.h" -#include "common/tcp/conn_pool.h" +#include "common/tcp/original_conn_pool.h" #include "common/upstream/cds_api_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/maglev_lb.h" @@ -1416,7 +1416,7 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsSharedPtr transport_socket_options) { return Tcp::ConnectionPool::InstancePtr{ - new Tcp::ConnPoolImpl(dispatcher, host, priority, options, transport_socket_options)}; + new Tcp::OriginalConnPoolImpl(dispatcher, host, priority, options, transport_socket_options)}; } std::pair ProdClusterManagerFactory::clusterFromProto( diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 1470abc69c37..2f2634ac14d0 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -3,7 +3,7 @@ #include "common/event/dispatcher_impl.h" #include "common/network/utility.h" -#include "common/tcp/conn_pool.h" +#include "common/tcp/original_conn_pool.h" #include "common/upstream/upstream_impl.h" #include "test/common/upstream/utility.h" @@ -65,13 +65,14 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { }; /** - * A test version of ConnPoolImpl that allows for mocking. + * A test version of OriginalConnPoolImpl that allows for mocking. */ -class ConnPoolImplForTest : public ConnPoolImpl { +class ConnPoolImplForTest : public OriginalConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, NiceMock* upstream_ready_timer) - : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), + : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, + nullptr), mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} ~ConnPoolImplForTest() override { @@ -120,7 +121,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { std::vector test_conns_; protected: - void onConnReleased(ConnPoolImpl::ActiveConn& conn) override { + void onConnReleased(OriginalConnPoolImpl::ActiveConn& conn) override { for (auto& test_conn : test_conns_) { if (conn.conn_.get() == test_conn.connection_) { onConnReleasedForTest(); @@ -128,10 +129,10 @@ class ConnPoolImplForTest : public ConnPoolImpl { } } - ConnPoolImpl::onConnReleased(conn); + OriginalConnPoolImpl::onConnReleased(conn); } - void onConnDestroyed(ConnPoolImpl::ActiveConn& conn) override { + void onConnDestroyed(OriginalConnPoolImpl::ActiveConn& conn) override { for (auto i = test_conns_.begin(); i != test_conns_.end(); i++) { if (conn.conn_.get() == i->connection_) { onConnDestroyedForTest(); @@ -140,7 +141,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { } } - ConnPoolImpl::onConnDestroyed(conn); + OriginalConnPoolImpl::onConnDestroyed(conn); } }; @@ -173,9 +174,9 @@ class TcpConnPoolImplDestructorTest : public testing::Test { public: TcpConnPoolImplDestructorTest() : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_{new ConnPoolImpl(dispatcher_, - Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr)} {} + conn_pool_{new OriginalConnPoolImpl( + dispatcher_, Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"), + Upstream::ResourcePriority::Default, nullptr, nullptr)} {} ~TcpConnPoolImplDestructorTest() override = default; @@ -199,7 +200,7 @@ class TcpConnPoolImplDestructorTest : public testing::Test { NiceMock* upstream_ready_timer_; NiceMock* connect_timer_; NiceMock* connection_; - std::unique_ptr conn_pool_; + std::unique_ptr conn_pool_; std::unique_ptr callbacks_; }; From 34e7279524a035079bb9b6b0a430d31f0763f2c9 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Thu, 18 Jun 2020 15:43:18 -0500 Subject: [PATCH 384/909] common/common/utility.cc: Optimized DateFormatter.parse() to O(n) (#11580) * Fixes DateFormatter.parse() timeout because of its O(n^2) time complexity. Fixes fuzz bug Timeout (exceeds 60 secs). https://oss-fuzz.com/testcase-detail/5700644951556096(or https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=21682) The original implementation uses "string.replace" to generate a new string and to collect specifiers in while(regex_search) loop. However, the new string is completely unnecessary and will never not used after this function call. Each iteration in the loop, the regex needs to search for the next matched string from the beginning. This causes it to be O(n^2). My implementation is to loop on suffix. In each iteration, set `suffix=matched.suffix()` and continue searching from the end of the matched string. So when collecting specifiers, we need to use different indices there to make sure they still point to the same positions in the string after being formatted. Signed-off-by: jianwen --- source/common/common/utility.cc | 24 ++++++++-------- test/common/common/utility_speed_test.cc | 28 +++++++++++++++++++ test/common/common/utility_test.cc | 20 +++++++++++++ .../header_parser_corpus/timeout_test_case | 6 ++++ 4 files changed, 65 insertions(+), 13 deletions(-) create mode 100644 test/common/router/header_parser_corpus/timeout_test_case diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index 7a3656f47ba7..1d4e933e8e87 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -127,10 +127,13 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { } void DateFormatter::parse(const std::string& format_string) { - std::string new_format_string = format_string; + std::string suffix = format_string; std::smatch matched; + // "step" is the last specifier's position + the last specifier's width. It's not the current + // position in "format_string" because the length has changed. It is actually the index which + // points to the end of the last specifier in formatted string (generated in the future). size_t step = 0; - while (regex_search(new_format_string, matched, SpecifierConstants::get().PATTERN)) { + while (regex_search(suffix, matched, SpecifierConstants::get().PATTERN)) { // The std::smatch matched for (%([1-9])?f)|(%s): [all, subsecond-specifier, subsecond-specifier // width, second-specifier]. const std::string& width_specifier = matched[2]; @@ -139,27 +142,22 @@ void DateFormatter::parse(const std::string& format_string) { // In the template string to be used in runtime substitution, the width is the number of // characters to be replaced. const size_t width = width_specifier.empty() ? 9 : width_specifier.at(0) - '0'; - new_format_string.replace(matched.position(), matched.length(), - std::string(second_specifier.empty() ? width : 2, '?')); - - ASSERT(step < new_format_string.size()); + ASSERT(!suffix.empty()); // This records matched position, the width of current subsecond pattern, and also the string // segment before the matched position. These values will be used later at data path. specifiers_.emplace_back( second_specifier.empty() - ? Specifier(matched.position(), width, - new_format_string.substr(step, matched.position() - step)) - : Specifier(matched.position(), - new_format_string.substr(step, matched.position() - step))); - + ? Specifier(step + matched.position(), width, suffix.substr(0, matched.position())) + : Specifier(step + matched.position(), suffix.substr(0, matched.position()))); step = specifiers_.back().position_ + specifiers_.back().width_; + suffix = matched.suffix(); } // To capture the segment after the last specifier pattern of a format string by creating a zero // width specifier. E.g. %3f-this-is-the-last-%s-segment-%Y-until-this. - if (step < new_format_string.size()) { - Specifier specifier(step, 0, new_format_string.substr(step)); + if (!suffix.empty()) { + Specifier specifier(step, 0, suffix); specifiers_.emplace_back(specifier); } } diff --git a/test/common/common/utility_speed_test.cc b/test/common/common/utility_speed_test.cc index c95af89c3a3c..a0563263b076 100644 --- a/test/common/common/utility_speed_test.cc +++ b/test/common/common/utility_speed_test.cc @@ -62,6 +62,34 @@ static void BM_DateTimeFormatterWithSubseconds(benchmark::State& state) { } BENCHMARK(BM_DateTimeFormatterWithSubseconds); +// This benchmark is basically similar with the above BM_DateTimeFormatterWithSubseconds, the +// differences are: 1. the format string input is long with duplicated subseconds. 2. The purpose +// is to test DateFormatter.parse() which is called in constructor. +// NOLINTNEXTLINE(readability-identifier-naming) +static void BM_DateTimeFormatterWithLongSubsecondsString(benchmark::State& state) { + int outputBytes = 0; + + Envoy::SystemTime time(std::chrono::seconds(1522796769)); + std::mt19937 prng(1); + std::uniform_int_distribution distribution(-10, 20); + std::string input; + int num_duplicates = 400; + std::string duplicate_input = "%%1f %1f, %2f, %3f, %4f, "; + for (int i = 0; i < num_duplicates; i++) { + absl::StrAppend(&input, duplicate_input, "("); + } + absl::StrAppend(&input, duplicate_input); + + for (auto _ : state) { + Envoy::DateFormatter date_formatter(input); + time += std::chrono::milliseconds(static_cast(distribution(prng))); + outputBytes += date_formatter.fromTime(time).length(); + } + benchmark::DoNotOptimize(outputBytes); +} +BENCHMARK(BM_DateTimeFormatterWithLongSubsecondsString); + +// NOLINTNEXTLINE(readability-identifier-naming) static void BM_DateTimeFormatterWithoutSubseconds(benchmark::State& state) { int outputBytes = 0; diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index bb326989b94f..531f7017204e 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -813,6 +813,26 @@ TEST(DateFormatter, FromTime) { EXPECT_EQ("aaa00", DateFormatter(std::string(3, 'a') + "%H").fromTime(time2)); } +// Check the time complexity. Make sure DateFormatter can finish parsing long messy string without +// crashing/freezing. This should pass in 0-2 seconds if O(n). Finish in 30-120 seconds if O(n^2) +TEST(DateFormatter, ParseLongString) { + std::string input; + std::string expected_output; + int num_duplicates = 400; + std::string duplicate_input = "%%1f %1f, %2f, %3f, %4f, "; + std::string duplicate_output = "%1 1, 14, 142, 1420, "; + for (int i = 0; i < num_duplicates; i++) { + absl::StrAppend(&input, duplicate_input, "("); + absl::StrAppend(&expected_output, duplicate_output, "("); + } + absl::StrAppend(&input, duplicate_input); + absl::StrAppend(&expected_output, duplicate_output); + + const SystemTime time1(std::chrono::seconds(1522796769) + std::chrono::milliseconds(142)); + std::string output = DateFormatter(input).fromTime(time1); + EXPECT_EQ(expected_output, output); +} + // Verify that two DateFormatter patterns with the same ??? patterns but // different format strings don't false share cache entries. This is a // regression test for when they did. diff --git a/test/common/router/header_parser_corpus/timeout_test_case b/test/common/router/header_parser_corpus/timeout_test_case new file mode 100644 index 000000000000..a4df35418062 --- /dev/null +++ b/test/common/router/header_parser_corpus/timeout_test_case @@ -0,0 +1,6 @@ +headers_to_add { + header { + key: " " + value: "%START_TIME(`Qf;BBBBB)%%START_TIME(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %2f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1%3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f,\016~\177 %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 , ,,2, fff%f%13%(%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, %3f,f, (%%3 %1f, %2f, % %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f,, %6f,(%7f, %8f, % %4f, %5)%%START_TIME(%5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %2f, %3f, %4f, %5f, %6f, % %4f, %5)%%START_TIME(%%3 %1f, %2f, %3f,f, %1f, %25f, %6f, %4294967295f, %8f, 9f)%" + } +} From faec4aee4c16627057904da5cada437ebb78e1e7 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 18 Jun 2020 22:43:27 +0100 Subject: [PATCH 385/909] utility: refactor utility functions in ads integration test (#11588) * move utility functions from ads_integration.cc to utility.cc so they can be reused in the xDS fuzzer I am writing * the config functions rely on member variables in the ads integration test, so create stubs that fill in the member variables for readability in the tests * change existing utility function buildCluster to buildStaticCluster Risk Level: Low Testing: Passes ads_integration_test Docs Changes: N/A Release Notes: N/A Signed-off-by: Sam Flattery --- test/config/utility.cc | 124 +++++++++++++++-- test/config/utility.h | 38 +++++- .../aggregate/cluster_integration_test.cc | 4 +- test/integration/ads_integration.cc | 126 +++--------------- test/integration/ads_integration.h | 45 ------- test/integration/ads_integration_test.cc | 15 ++- test/integration/cds_integration_test.cc | 4 +- 7 files changed, 181 insertions(+), 175 deletions(-) diff --git a/test/config/utility.cc b/test/config/utility.cc index 6cc4a7063b24..ea96c7ddb142 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -313,16 +313,19 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ } // TODO(#6327) cleaner approach to testing with static config. -std::string ConfigHelper::adsBootstrap(const std::string& api_type) { - return fmt::format( - R"EOF( +std::string ConfigHelper::adsBootstrap(const std::string& api_type, + envoy::config::core::v3::ApiVersion api_version) { + return fmt::format(R"EOF( dynamic_resources: lds_config: + resource_api_version: {1} ads: {{}} cds_config: + resource_api_version: {1} ads: {{}} ads_config: - api_type: {} + transport_api_version: {1} + api_type: {0} static_resources: clusters: name: dummy_cluster @@ -341,17 +344,19 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type) { lb_policy: ROUND_ROBIN http2_protocol_options: {{}} admin: - access_log_path: {} + access_log_path: {2} address: socket_address: address: 127.0.0.1 port_value: 0 )EOF", - TestEnvironment::nullDevicePath(), api_type); + api_type, api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3", + TestEnvironment::nullDevicePath()); } -envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, int port, - const std::string& ip_version) { +// TODO(samflattery): bundle this up with buildCluster +envoy::config::cluster::v3::Cluster +ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::string& address) { return TestUtility::parseYaml(fmt::format(R"EOF( name: {} connect_timeout: 5s @@ -369,7 +374,108 @@ envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string http2_protocol_options: {{}} )EOF", name, name, - ip_version, port)); + address, port)); +} + +envoy::config::cluster::v3::Cluster +ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml(fmt::format(R"EOF( + name: {} + connect_timeout: 5s + type: EDS + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} + lb_policy: {} + http2_protocol_options: {{}} + )EOF", + name, apiVersionStr(api_version), lb_policy), + cluster, shouldBoost(api_version)); + return cluster; +} + +envoy::config::endpoint::v3::ClusterLoadAssignment +ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::string& address, + uint32_t port, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; + TestUtility::loadFromYaml(fmt::format(R"EOF( + cluster_name: {} + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {} + port_value: {} + )EOF", + name, address, port), + cluster_load_assignment, shouldBoost(api_version)); + return cluster_load_assignment; +} + +envoy::config::listener::v3::Listener +ConfigHelper::buildBaseListener(const std::string& name, const std::string& address, + const std::string& filter_chains, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::listener::v3::Listener) listener; + TestUtility::loadFromYaml(fmt::format( + R"EOF( + name: {} + address: + socket_address: + address: {} + port_value: 0 + filter_chains: + {} + )EOF", + name, address, filter_chains), + listener, shouldBoost(api_version)); + return listener; +} + +envoy::config::listener::v3::Listener +ConfigHelper::buildListener(const std::string& name, const std::string& route_config, + const std::string& address, const std::string& stat_prefix, + envoy::config::core::v3::ApiVersion api_version) { + std::string hcm = fmt::format( + R"EOF( + filters: + - name: http + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + stat_prefix: {} + codec_type: HTTP2 + rds: + route_config_name: {} + config_source: + resource_api_version: {} + ads: {{}} + http_filters: [{{ name: envoy.filters.http.router }}] + )EOF", + stat_prefix, route_config, apiVersionStr(api_version)); + return buildBaseListener(name, address, hcm, api_version); +} + +envoy::config::route::v3::RouteConfiguration +ConfigHelper::buildRouteConfig(const std::string& name, const std::string& cluster, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route; + TestUtility::loadFromYaml(fmt::format(R"EOF( + name: {} + virtual_hosts: + - name: integration + domains: ["*"] + routes: + - match: {{ prefix: "/" }} + route: {{ cluster: {} }} + )EOF", + name, cluster), + route, shouldBoost(api_version)); + return route; } envoy::config::endpoint::v3::Endpoint ConfigHelper::buildEndpoint(const std::string& address) { diff --git a/test/config/utility.h b/test/config/utility.h index 0983132be998..39bcb00a4454 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -16,6 +16,7 @@ #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/http/codes.h" +#include "common/config/api_version.h" #include "common/network/address_impl.h" #include "common/protobuf/protobuf.h" @@ -98,10 +99,33 @@ class ConfigHelper { // Configuration for L7 proxying, with clusters cluster_1 and cluster_2 meant to be added via CDS. // api_type should be REST, GRPC, or DELTA_GRPC. static std::string discoveredClustersBootstrap(const std::string& api_type); - static std::string adsBootstrap(const std::string& api_type); + static std::string adsBootstrap(const std::string& api_type, + envoy::config::core::v3::ApiVersion api_version); // Builds a standard Cluster config fragment, with a single endpoint (at address:port). - static envoy::config::cluster::v3::Cluster buildCluster(const std::string& name, int port, - const std::string& address); + static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port, + const std::string& address); + + // ADS configurations + static envoy::config::cluster::v3::Cluster buildCluster( + const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment( + const std::string& name, const std::string& ip_version, uint32_t port, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::listener::v3::Listener buildBaseListener( + const std::string& name, const std::string& address, const std::string& filter_chains = "", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::listener::v3::Listener buildListener( + const std::string& name, const std::string& route_config, const std::string& address, + const std::string& stat_prefix, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + + static envoy::config::route::v3::RouteConfiguration buildRouteConfig( + const std::string& name, const std::string& cluster, + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); // Builds a standard Endpoint suitable for population by finalize(). static envoy::config::endpoint::v3::Endpoint buildEndpoint(const std::string& address); @@ -206,6 +230,14 @@ class ConfigHelper { config); private: + static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { + return api_version == envoy::config::core::v3::ApiVersion::V2; + } + + static std::string apiVersionStr(envoy::config::core::v3::ApiVersion api_version) { + return api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"; + } + // Load the first HCM struct from the first listener into a parsed proto. bool loadHttpConnectionManager(HttpConnectionManager& hcm); // Take the contents of the provided HCM proto and stuff them into the first HCM diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 1fbd999083e0..7a034d24d260 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -135,10 +135,10 @@ class AggregateIntegrationTest : public testing::TestWithParamset_allow_unexpected_disconnects(false); - cluster1_ = ConfigHelper::buildCluster( + cluster1_ = ConfigHelper::buildStaticCluster( FirstClusterName, fake_upstreams_[FirstUpstreamIndex]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(GetParam())); - cluster2_ = ConfigHelper::buildCluster( + cluster2_ = ConfigHelper::buildStaticCluster( SecondClusterName, fake_upstreams_[SecondUpstreamIndex]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(GetParam())); diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index dab9cd0125cf..63c8279d5138 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -23,9 +23,8 @@ namespace Envoy { AdsIntegrationTest::AdsIntegrationTest(const envoy::config::core::v3::ApiVersion api_version) : HttpIntegrationTest( Http::CodecClient::Type::HTTP2, ipVersion(), - adsIntegrationConfig(sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", - api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" - : "V3")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", api_version)) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = true; @@ -35,141 +34,52 @@ AdsIntegrationTest::AdsIntegrationTest(const envoy::config::core::v3::ApiVersion void AdsIntegrationTest::TearDown() { cleanUpXdsConnection(); } -bool AdsIntegrationTest::shouldBoost() { - return api_version_ == envoy::config::core::v3::ApiVersion::V2 ? true : false; -} - envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std::string& name) { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; - TestUtility::loadFromYaml( - fmt::format(R"EOF( - name: {} - connect_timeout: 5s - type: EDS - eds_cluster_config: - eds_config: - resource_api_version: {} - ads: {{}} - lb_policy: ROUND_ROBIN - http2_protocol_options: {{}} - )EOF", - name, api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), - cluster, shouldBoost()); - return cluster; + return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; - TestUtility::loadFromYaml( - fmt::format(R"EOF( - name: {} - connect_timeout: 5s - type: EDS - eds_cluster_config: - eds_config: - resource_api_version: {} - ads: {{}} - lb_policy: MAGLEV - )EOF", - name, api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), - cluster, shouldBoost()); - return cluster; + return ConfigHelper::buildCluster(name, "MAGLEV", api_version_); } envoy::config::endpoint::v3::ClusterLoadAssignment AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { - API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; - TestUtility::loadFromYaml(fmt::format(R"EOF( - cluster_name: {} - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: {} - port_value: {} - )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), - fake_upstreams_[0]->localAddress()->ip()->port()), - cluster_load_assignment, shouldBoost()); - return cluster_load_assignment; + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); } envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { - API_NO_BOOST(envoy::config::listener::v3::Listener) listener; - TestUtility::loadFromYaml( - fmt::format( - R"EOF( - name: {} - address: - socket_address: - address: {} - port_value: 0 - filter_chains: - filters: - - name: http - typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager - stat_prefix: {} - codec_type: HTTP2 - rds: - route_config_name: {} - config_source: - resource_api_version: {} - ads: {{}} - http_filters: [{{ name: envoy.filters.http.router }}] - )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), stat_prefix, route_config, - api_version_ == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"), - listener, shouldBoost()); - return listener; + return ConfigHelper::buildListener(name, route_config, + Network::Test::getLoopbackAddressString(ipVersion()), + stat_prefix, api_version_); } envoy::config::listener::v3::Listener AdsIntegrationTest::buildRedisListener(const std::string& name, const std::string& cluster) { - API_NO_BOOST(envoy::config::listener::v3::Listener) listener; - TestUtility::loadFromYaml(fmt::format( - R"EOF( - name: {} - address: - socket_address: - address: {} - port_value: 0 - filter_chains: + std::string redis = fmt::format( + R"EOF( filters: - name: redis typed_config: "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy - settings: + settings: op_timeout: 1s stat_prefix: {} prefix_routes: - catch_all_route: + catch_all_route: cluster: {} )EOF", - name, Network::Test::getLoopbackAddressString(ipVersion()), name, - cluster), - listener, shouldBoost()); - return listener; + name, cluster); + return ConfigHelper::buildBaseListener(name, Network::Test::getLoopbackAddressString(ipVersion()), + redis, api_version_); } envoy::config::route::v3::RouteConfiguration AdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& cluster) { - API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route; - TestUtility::loadFromYaml(fmt::format(R"EOF( - name: {} - virtual_hosts: - - name: integration - domains: ["*"] - routes: - - match: {{ prefix: "/" }} - route: {{ cluster: {} }} - )EOF", - name, cluster), - route, shouldBoost()); - return route; + return ConfigHelper::buildRouteConfig(name, cluster, api_version_); } void AdsIntegrationTest::makeSingleRequest() { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index 665aa48288f3..78cce1913de9 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -12,50 +12,7 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" -// TODO(fredlas) set_node_on_first_message_only was true; the delta+SotW unification -// work restores it here. namespace Envoy { -static std::string adsIntegrationConfig(const std::string& api_type, - const std::string& api_version = "V2") { - // Note: do not use CONSTRUCT_ON_FIRST_USE here! - return fmt::format(R"EOF( -dynamic_resources: - lds_config: - resource_api_version: {1} - ads: {{}} - cds_config: - resource_api_version: {1} - ads: {{}} - ads_config: - transport_api_version: {1} - api_type: {0} - set_node_on_first_message_only: false -static_resources: - clusters: - name: dummy_cluster - connect_timeout: - seconds: 5 - type: STATIC - load_assignment: - cluster_name: dummy_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 0 - lb_policy: ROUND_ROBIN - http2_protocol_options: {{}} -admin: - access_log_path: /dev/null - address: - socket_address: - address: 127.0.0.1 - port_value: 0 -)EOF", - api_type, api_version); -} class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { public: @@ -93,8 +50,6 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::admin::v3::RoutesConfigDump getRoutesConfigDump(); envoy::config::core::v3::ApiVersion api_version_; - - bool shouldBoost(); }; } // namespace Envoy diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 84305d188a95..c62af3e4d01e 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -594,8 +594,9 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsFailIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - adsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -634,8 +635,9 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public: AdsConfigIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - adsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -795,8 +797,9 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam public: AdsClusterFromFileIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), - adsIntegrationConfig( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { + ConfigHelper::adsBootstrap( + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", + envoy::config::core::v3::ApiVersion::V2)) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index ab6206c0ab78..9ef423a0f7d1 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -78,10 +78,10 @@ class CdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem(), enable_half_close_)); fake_upstreams_[UpstreamIndex2]->set_allow_unexpected_disconnects(false); - cluster1_ = ConfigHelper::buildCluster( + cluster1_ = ConfigHelper::buildStaticCluster( ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(ipVersion())); - cluster2_ = ConfigHelper::buildCluster( + cluster2_ = ConfigHelper::buildStaticCluster( ClusterName2, fake_upstreams_[UpstreamIndex2]->localAddress()->ip()->port(), Network::Test::getLoopbackAddressString(ipVersion())); From d93e7afb59dc4387c6aaeda4327aa316555fbe16 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 18 Jun 2020 14:53:48 -0700 Subject: [PATCH 386/909] build: fix go-control-plane mirror (#11627) Signed-off-by: Lizan Zhou --- tools/api/generate_go_protobuf.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index 746008c82c85..620e80bb32c5 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -40,8 +40,8 @@ def generateProtobufs(output): # Example output directory: # go_out/envoy/config/bootstrap/v2 rule_dir, proto = rule.decode()[len('@envoy_api//'):].rsplit(':', 1) - input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, 'linux_amd64_stripped', - proto + '%', IMPORT_BASE, rule_dir) + input_dir = os.path.join(bazel_bin, 'external', 'envoy_api', rule_dir, proto + '_', IMPORT_BASE, + rule_dir) input_files = glob.glob(os.path.join(input_dir, '*.go')) output_dir = os.path.join(output, rule_dir) @@ -80,7 +80,7 @@ def findLastSyncSHA(repo): def updatedSinceSHA(repo, last_sha): # Determine if there are changes to API since last SHA - return git(None, 'rev-list', '%s..HEAD' % last_sha, 'api/envoy').split() + return git(None, 'rev-list', '%s..HEAD' % last_sha).split() def writeRevisionInfo(repo, sha): @@ -97,6 +97,7 @@ def syncGoProtobufs(output, repo): git(repo, 'rm', '-r', 'envoy') # Copy subtree at envoy from output to repo shutil.copytree(os.path.join(output, 'envoy'), dst) + git(repo, 'add', 'envoy') def publishGoProtobufs(repo, sha): @@ -108,17 +109,22 @@ def publishGoProtobufs(repo, sha): git(repo, 'push', 'origin', BRANCH) +def updated(repo): + return len( + [f for f in git(repo, 'diff', 'HEAD', '--name-only').splitlines() if f != 'envoy/COMMIT']) > 0 + + if __name__ == "__main__": workspace = check_output(['bazel', 'info', 'workspace']).decode().strip() output = os.path.join(workspace, OUTPUT_BASE) generateProtobufs(output) repo = os.path.join(workspace, REPO_BASE) cloneGoProtobufs(repo) + syncGoProtobufs(output, repo) last_sha = findLastSyncSHA(repo) changes = updatedSinceSHA(repo, last_sha) - if changes: + if updated(repo): print('Changes detected: %s' % changes) new_sha = changes[0] - syncGoProtobufs(output, repo) writeRevisionInfo(repo, new_sha) publishGoProtobufs(repo, new_sha) From 983599ad530964aa3b34543d7c78aa4b20bdc2ca Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 18 Jun 2020 20:04:54 -0400 Subject: [PATCH 387/909] docs: first pass at documenting how to add extensions (#11540) This includes enhancing the script failures to leave breadcrumbs for devs who miss the new docs. Risk Level: n/a (tooling / docs) Testing: manual testing removing files from #11327 Docs Changes: yes Release Notes: no Signed-off-by: Alyssa Wilk --- CONTRIBUTING.md | 20 ++++++++++++++++++++ docs/generate_extension_db.py | 4 +++- tools/protodoc/protodoc.py | 23 ++++++++++++++--------- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 93edb5e2b08c..17a5095bf338 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -215,6 +215,26 @@ and false. * If a PR includes a deprecation/breaking change, notification should be sent to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list. +# Adding new extensions + +For developers adding a new extension, one can take an existing extension as the starting point. + +Extension configuration should be located in a directory structure like +`api/envoy/extensions/area/plugin/`, for example `api/envoy/extensions/access_loggers/file/` + +The code for the extension should be located under the equivalent +`source/extensions/area/plugin`, and include an *envoy_cc_extension* with the +configuration and tagged with the appropriate security posture, and an +*envoy_cc_library* with the code. More details on how to add a new extension +API can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api): + +Other changes will likely include + + * Editing [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) to include the new extensions + * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area + * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs + * Adding `source/extensions/area/well_known_names.h` for registered plugins + # DCO: Sign your work Envoy ships commit hooks that allow you to auto-generate the DCO signoff line if diff --git a/docs/generate_extension_db.py b/docs/generate_extension_db.py index ebcb94307493..726a4cac1eb5 100755 --- a/docs/generate_extension_db.py +++ b/docs/generate_extension_db.py @@ -40,7 +40,9 @@ def GetExtensionMetadata(target): stderr=subprocess.PIPE) security_posture, status, undocumented = r.stdout.decode('utf-8').strip().split(' ') if IsMissing(security_posture): - raise ExtensionDbError('Missing security posture for %s' % target) + raise ExtensionDbError( + 'Missing security posture for %s. Please make sure the target is an envoy_cc_extension and security_posture is set' + % target) return { 'security_posture': security_posture, 'undocumented': False if IsMissing(undocumented) else bool(undocumented), diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index c96f7cafa6c9..ed3885f7b145 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -188,15 +188,20 @@ def FormatExtension(extension): Returns: RST formatted extension description. """ - extension_metadata = json.loads(pathlib.Path( - os.getenv('EXTENSION_DB_PATH')).read_text())[extension] - anchor = FormatAnchor('extension_' + extension) - status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') - security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] - return EXTENSION_TEMPLATE.substitute(anchor=anchor, - extension=extension, - status=status, - security_posture=security_posture) + try: + extension_metadata = json.loads(pathlib.Path( + os.getenv('EXTENSION_DB_PATH')).read_text())[extension] + anchor = FormatAnchor('extension_' + extension) + status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') + security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] + return EXTENSION_TEMPLATE.substitute(anchor=anchor, + extension=extension, + status=status, + security_posture=security_posture) + except KeyError as e: + sys.stderr.write( + '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') + exit(1) # Raising the error buries the above message in tracebacks. def FormatHeaderFromFile(style, source_code_info, proto_name): From 96920250a05e62095a68fbaf4f77612179e5af32 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 19 Jun 2020 07:11:27 +0700 Subject: [PATCH 388/909] Allow to define API version for extensions that call external services (#11583) This patch allows configuring the API version for gRPC external services. This specifies the version of the endpoint and message to be used. Affected extensions: - filters/http/ext_authz - filters/network/ext_authz - filters/http/ratelimit - filters/network/ratelimit - access_loggers/grpc - stat_sinks/metrics_service Affected upstream services: - upstream/health_discovery_service - upstream/load_stats_reporter Additional Description: tap - TapSinkService is not yet implemented. - TapDiscoveryService uses theconfig.core.v3.ApiConfigSource and is not yet implemented. trace - StreamTraces has no implemented client inside the code-base (implemented externally?) Risk Level: Low Testing: Added. Docs Changes: Added. Release Notes: Added. Fixes #10609 Signed-off-by: Dhi Aurrahman --- api/envoy/config/core/v3/config_source.proto | 2 +- .../config/core/v4alpha/config_source.proto | 2 +- .../config/metrics/v3/metrics_service.proto | 5 ++ .../metrics/v4alpha/metrics_service.proto | 5 ++ api/envoy/config/ratelimit/v3/rls.proto | 5 ++ .../access_loggers/grpc/v3/als.proto | 8 ++- .../filters/http/ext_authz/v3/ext_authz.proto | 8 ++- .../http/ext_authz/v4alpha/ext_authz.proto | 8 ++- .../network/ext_authz/v3/ext_authz.proto | 7 ++ docs/root/version_history/current.rst | 7 ++ .../envoy/config/core/v3/config_source.proto | 2 +- .../config/core/v4alpha/config_source.proto | 2 +- .../config/metrics/v3/metrics_service.proto | 5 ++ .../metrics/v4alpha/metrics_service.proto | 5 ++ .../envoy/config/ratelimit/v3/rls.proto | 5 ++ .../access_loggers/grpc/v3/als.proto | 8 ++- .../filters/http/ext_authz/v3/ext_authz.proto | 8 ++- .../http/ext_authz/v4alpha/ext_authz.proto | 8 ++- .../network/ext_authz/v3/ext_authz.proto | 7 ++ source/common/config/version_converter.cc | 2 + source/common/grpc/BUILD | 1 + source/common/grpc/typed_async_client.cc | 2 + source/common/grpc/typed_async_client.h | 71 ++++++++++++++++++- .../upstream/health_discovery_service.cc | 6 +- .../upstream/health_discovery_service.h | 1 + source/common/upstream/load_stats_reporter.cc | 6 +- source/common/upstream/load_stats_reporter.h | 1 + .../grpc/grpc_access_log_impl.cc | 27 +++---- .../grpc/grpc_access_log_impl.h | 5 +- .../extensions/filters/common/ext_authz/BUILD | 1 + .../common/ext_authz/ext_authz_grpc_impl.cc | 26 +++---- .../common/ext_authz/ext_authz_grpc_impl.h | 7 +- .../common/ratelimit/ratelimit_impl.cc | 20 ++++-- .../filters/common/ratelimit/ratelimit_impl.h | 9 ++- .../filters/http/ext_authz/config.cc | 4 +- .../filters/http/ratelimit/config.cc | 3 +- .../filters/network/ext_authz/config.cc | 4 +- .../filters/network/ratelimit/config.cc | 3 +- .../thrift_proxy/filters/ratelimit/config.cc | 3 +- .../stat_sinks/metrics_service/config.cc | 3 +- .../grpc_metrics_service_impl.cc | 18 +++-- .../grpc_metrics_service_impl.h | 5 +- test/common/grpc/grpc_client_integration.h | 32 +++++++++ .../grpc/grpc_access_log_impl_test.cc | 3 +- .../http_grpc_access_log_integration_test.cc | 22 ++++-- .../tcp_grpc_access_log_integration_test.cc | 13 ++-- .../ext_authz/ext_authz_grpc_impl_test.cc | 23 +++--- .../filters/common/ext_authz/test_common.cc | 2 +- .../filters/common/ext_authz/test_common.h | 8 +-- .../common/ratelimit/ratelimit_impl_test.cc | 4 +- .../filters/http/ext_authz/config_test.cc | 16 ++++- .../ext_authz/ext_authz_integration_test.cc | 12 ++-- .../ratelimit/ratelimit_integration_test.cc | 10 +-- .../filters/network/ext_authz/config_test.cc | 29 +++++--- .../grpc_metrics_service_impl_test.cc | 5 +- .../metrics_service_integration_test.cc | 8 ++- test/integration/hds_integration_test.cc | 31 ++++++-- .../load_stats_integration_test.cc | 16 +++-- test/test_common/utility.h | 69 ++++++++++++++++++ 59 files changed, 503 insertions(+), 135 deletions(-) diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index 7337403bc853..2522d4fd53e5 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto index 253a576a46ed..6c5e9778e802 100644 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API diff --git a/api/envoy/config/metrics/v3/metrics_service.proto b/api/envoy/config/metrics/v3/metrics_service.proto index 0e078c0916f8..4bb6c77e66c2 100644 --- a/api/envoy/config/metrics/v3/metrics_service.proto +++ b/api/envoy/config/metrics/v3/metrics_service.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.metrics.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/wrappers.proto"; @@ -28,6 +29,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the diff --git a/api/envoy/config/metrics/v4alpha/metrics_service.proto b/api/envoy/config/metrics/v4alpha/metrics_service.proto index e4da16c56bfd..e2d83ce4c1c9 100644 --- a/api/envoy/config/metrics/v4alpha/metrics_service.proto +++ b/api/envoy/config/metrics/v4alpha/metrics_service.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.metrics.v4alpha; +import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/wrappers.proto"; @@ -28,6 +29,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the diff --git a/api/envoy/config/ratelimit/v3/rls.proto b/api/envoy/config/ratelimit/v3/rls.proto index bb3c538bbabf..98889b1e2882 100644 --- a/api/envoy/config/ratelimit/v3/rls.proto +++ b/api/envoy/config/ratelimit/v3/rls.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.ratelimit.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -26,4 +27,8 @@ message RateLimitServiceConfig { // will connect to this cluster when it needs to make rate limit service // requests. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + + // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and + // version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/extensions/access_loggers/grpc/v3/als.proto b/api/envoy/extensions/access_loggers/grpc/v3/als.proto index 3cc154416627..4996a877a9c6 100644 --- a/api/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/api/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.access_loggers.grpc.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; @@ -53,7 +54,7 @@ message TcpGrpcAccessLogConfig { } // Common configuration for gRPC access logs. -// [#next-free-field: 6] +// [#next-free-field: 7] message CommonGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; @@ -66,6 +67,11 @@ message CommonGrpcAccessLogConfig { // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + // API version for access logs service transport protocol. This describes the access logs service + // gRPC endpoint and version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 6 + [(validate.rules).enum = {defined_only: true}]; + // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 20223787549e..0efa67c61873 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 12] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -40,6 +41,11 @@ message ExtAuthz { HttpService http_service = 3; } + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 03f0b3a27724..fe288f85aefb 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/type/matcher/v4alpha/string.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 12] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -40,6 +41,11 @@ message ExtAuthz { HttpService http_service = 3; } + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with diff --git a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index c3a63ac0a4f6..50161f1cb92b 100644 --- a/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -22,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 6] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; @@ -44,4 +46,9 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v3.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2740b74a1511..ed7596c16e58 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -52,6 +52,7 @@ New Features * access loggers: added GRPC_STATUS operator on logging format. * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * access loggers: file access logger config added :ref:`log_format `. +* access loggers: gRPC access logger config added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. @@ -61,6 +62,8 @@ New Features * decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. +* ext_authz filter: added API version field for both :ref:`HTTP ` + and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using @@ -72,6 +75,7 @@ New Features `google.api.HttpBody `_. * grpc-json: send a `x-envoy-original-method` header to grpc services. * gzip filter: added option to set zlib's next output buffer size. +* hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. * http: added :ref:`stripping port from host header ` support. @@ -84,8 +88,11 @@ New Features in LRS response, which allows management servers to avoid explicitly listing all clusters it is interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability in :ref:`client_features` field. +* lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. +* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index 363f4ef91f90..fbac531d71ec 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto index 4f532f089869..83ec10a16b44 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Configuration sources] -// xDS API version. This is used to describe both resource and transport +// xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto index 0e078c0916f8..4bb6c77e66c2 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.metrics.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/wrappers.proto"; @@ -28,6 +29,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto index e4da16c56bfd..e2d83ce4c1c9 100644 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.metrics.v4alpha; +import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/wrappers.proto"; @@ -28,6 +29,10 @@ message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto index bb3c538bbabf..98889b1e2882 100644 --- a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.ratelimit.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -26,4 +27,8 @@ message RateLimitServiceConfig { // will connect to this cluster when it needs to make rate limit service // requests. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + + // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and + // version of messages used on the wire. + core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto index 3cc154416627..4996a877a9c6 100644 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.access_loggers.grpc.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; @@ -53,7 +54,7 @@ message TcpGrpcAccessLogConfig { } // Common configuration for gRPC access logs. -// [#next-free-field: 6] +// [#next-free-field: 7] message CommonGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; @@ -66,6 +67,11 @@ message CommonGrpcAccessLogConfig { // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; + // API version for access logs service transport protocol. This describes the access logs service + // gRPC endpoint and version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 6 + [(validate.rules).enum = {defined_only: true}]; + // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index ff55d66979ee..ebe404ccfb77 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 12] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -36,6 +37,11 @@ message ExtAuthz { HttpService http_service = 3; } + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v3.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index 03f0b3a27724..fe288f85aefb 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v4alpha; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/type/matcher/v4alpha/string.proto"; @@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 12] +// [#next-free-field: 13] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; @@ -40,6 +41,11 @@ message ExtAuthz { HttpService http_service = 3; } + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of messages used on the wire. + config.core.v4alpha.ApiVersion transport_api_version = 12 + [(validate.rules).enum = {defined_only: true}]; + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto index c3a63ac0a4f6..50161f1cb92b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v3; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; @@ -22,6 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. +// [#next-free-field: 6] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; @@ -44,4 +46,9 @@ message ExtAuthz { // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; + + // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and + // version of Check{Request,Response} used on the wire. + config.core.v3.ApiVersion transport_api_version = 5 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/source/common/config/version_converter.cc b/source/common/config/version_converter.cc index 9e97bb53ab03..c123a101b50e 100644 --- a/source/common/config/version_converter.cc +++ b/source/common/config/version_converter.cc @@ -3,6 +3,7 @@ #include "envoy/common/exception.h" #include "common/common/assert.h" +#include "common/common/macros.h" #include "common/config/api_type_oracle.h" #include "common/protobuf/visitor.h" #include "common/protobuf/well_known.h" @@ -158,6 +159,7 @@ VersionConverter::getJsonStringFromMessage(const Protobuf::Message& message, DynamicMessagePtr dynamic_message; switch (api_version) { case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; case envoy::config::core::v3::ApiVersion::V2: { // TODO(htuch): this works as long as there are no new fields in the v3+ // DiscoveryRequest. When they are added, we need to do a full v2 conversion diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index ce22cd37046b..3daea1ce4395 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( ":typed_async_client_lib", "//include/envoy/grpc:async_client_interface", "//source/common/buffer:zero_copy_input_stream_lib", + "//source/common/config:version_converter_lib", "//source/common/http:async_client_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/grpc/typed_async_client.cc b/source/common/grpc/typed_async_client.cc index 9c06dc6b701a..465bde6e139e 100644 --- a/source/common/grpc/typed_async_client.cc +++ b/source/common/grpc/typed_async_client.cc @@ -1,6 +1,8 @@ #include "common/grpc/typed_async_client.h" #include "common/buffer/zero_copy_input_stream_impl.h" +#include "common/common/assert.h" +#include "common/common/macros.h" #include "common/common/utility.h" #include "common/grpc/common.h" #include "common/http/utility.h" diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 39435f65c827..1de73ff6e8d9 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -4,6 +4,9 @@ #include "envoy/grpc/async_client.h" +#include "common/common/empty_string.h" +#include "common/config/version_converter.h" + namespace Envoy { namespace Grpc { namespace Internal { @@ -33,7 +36,13 @@ template class AsyncStream /* : public RawAsyncStream */ { AsyncStream() = default; AsyncStream(RawAsyncStream* stream) : stream_(stream) {} AsyncStream(const AsyncStream& other) = default; - void sendMessage(const Request& request, bool end_stream) { + void sendMessage(const Protobuf::Message& request, bool end_stream) { + Internal::sendMessageUntyped(stream_, std::move(request), end_stream); + } + void sendMessage(const Protobuf::Message& request, + envoy::config::core::v3::ApiVersion transport_api_version, bool end_stream) { + Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), + transport_api_version); Internal::sendMessageUntyped(stream_, std::move(request), end_stream); } void closeStream() { stream_->closeStream(); } @@ -74,6 +83,55 @@ template class AsyncRequestCallbacks : public RawAsyncReques } }; +/** + * Versioned methods wrapper. + */ +class VersionedMethods { +public: + VersionedMethods(const std::string& v3, const std::string& v2, const std::string& v2_alpha = "") + : v3_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v3)), + v2_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2)), + v2_alpha_(v2_alpha.empty() + ? nullptr + : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2_alpha)) {} + + /** + * Given a version, return the method descriptor for a specific version. + * + * @param api_version target API version. + * @param use_alpha if this is an alpha version of an API method. + * + * @return Protobuf::MethodDescriptor& of a method for a specific version. + */ + const Protobuf::MethodDescriptor& + getMethodDescriptorForVersion(envoy::config::core::v3::ApiVersion api_version, + bool use_alpha = false) const { + switch (api_version) { + case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; + case envoy::config::core::v3::ApiVersion::V2: { + const auto* descriptor = use_alpha ? v2_alpha_ : v2_; + ASSERT(descriptor != nullptr); + return *descriptor; + } + + case envoy::config::core::v3::ApiVersion::V3: { + const auto* descriptor = v3_; + ASSERT(descriptor != nullptr); + return *descriptor; + } + + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + +private: + const Protobuf::MethodDescriptor* v3_{nullptr}; + const Protobuf::MethodDescriptor* v2_{nullptr}; + const Protobuf::MethodDescriptor* v2_alpha_{nullptr}; +}; + /** * Convenience subclasses for AsyncStreamCallbacks. */ @@ -108,6 +166,17 @@ template class AsyncClient /* : public Raw return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, options); } + virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method, + const Protobuf::Message& request, + AsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, + const Http::AsyncClient::RequestOptions& options, + envoy::config::core::v3::ApiVersion transport_api_version) { + Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), + transport_api_version); + return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, + options); + } + virtual AsyncStream start(const Protobuf::MethodDescriptor& service_method, AsyncStreamCallbacks& callbacks, const Http::AsyncClient::StreamOptions& options) { diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 76da4ca38d70..a133b93f4c45 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -35,8 +35,10 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : stats_{ALL_HDS_STATS(POOL_COUNTER_PREFIX(scope, "hds_delegate."))}, - service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck")), + service_method_(Grpc::VersionedMethods( + "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", + "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck") + .getMethodDescriptorForVersion(transport_api_version)), async_client_(std::move(async_client)), transport_api_version_(transport_api_version), dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats), ssl_context_manager_(ssl_context_manager), random_(random), info_factory_(info_factory), diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 079c22448dc3..3506e974f368 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -12,6 +12,7 @@ #include "common/common/backoff_strategy.h" #include "common/common/logger.h" +#include "common/common/macros.h" #include "common/config/utility.h" #include "common/grpc/async_client_impl.h" #include "common/network/resolver_impl.h" diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index c2f997050f98..2a9404219d37 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -17,8 +17,10 @@ LoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info, : cm_(cluster_manager), stats_{ALL_LOAD_REPORTER_STATS( POOL_COUNTER_PREFIX(scope, "load_reporter."))}, async_client_(std::move(async_client)), transport_api_version_(transport_api_version), - service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats")), + service_method_( + Grpc::VersionedMethods("envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats", + "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats") + .getMethodDescriptorForVersion(transport_api_version)), time_source_(dispatcher.timeSource()) { request_.mutable_node()->MergeFrom(local_info.node()); request_.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); diff --git a/source/common/upstream/load_stats_reporter.h b/source/common/upstream/load_stats_reporter.h index 3334abeec7b4..b89f3d4f75c8 100644 --- a/source/common/upstream/load_stats_reporter.h +++ b/source/common/upstream/load_stats_reporter.h @@ -6,6 +6,7 @@ #include "common/common/logger.h" #include "common/grpc/async_client_impl.h" +#include "common/grpc/typed_async_client.h" namespace Envoy { namespace Upstream { diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index d9295a17f29c..21c1ce123e0c 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -5,6 +5,7 @@ #include "envoy/upstream/upstream.h" #include "common/common/assert.h" +#include "common/grpc/typed_async_client.h" #include "common/network/utility.h" #include "common/runtime/runtime_features.h" #include "common/stream_info/utility.h" @@ -24,12 +25,11 @@ void GrpcAccessLoggerImpl::LocalStream::onRemoteClose(Grpc::Status::GrpcStatus, } } -GrpcAccessLoggerImpl::GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, - std::chrono::milliseconds buffer_flush_interval_msec, - uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info, - Stats::Scope& scope) +GrpcAccessLoggerImpl::GrpcAccessLoggerImpl( + Grpc::RawAsyncClientPtr&& client, std::string log_name, + std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, + Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, + envoy::config::core::v3::ApiVersion transport_api_version) : stats_({ALL_GRPC_ACCESS_LOGGER_STATS( POOL_COUNTER_PREFIX(scope, "access_logs.grpc_access_log."))}), client_(std::move(client)), log_name_(log_name), @@ -38,7 +38,12 @@ GrpcAccessLoggerImpl::GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std flush(); flush_timer_->enableTimer(buffer_flush_interval_msec_); })), - max_buffer_size_bytes_(max_buffer_size_bytes), local_info_(local_info) { + max_buffer_size_bytes_(max_buffer_size_bytes), local_info_(local_info), + service_method_( + Grpc::VersionedMethods("envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs", + "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) { flush_timer_->enableTimer(buffer_flush_interval_msec_); } @@ -91,9 +96,7 @@ void GrpcAccessLoggerImpl::flush() { if (stream_->stream_ == nullptr) { stream_->stream_ = - client_->start(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs"), - *stream_, Http::AsyncClient::StreamOptions()); + client_->start(service_method_, *stream_, Http::AsyncClient::StreamOptions()); auto* identifier = message_.mutable_identifier(); *identifier->mutable_node() = local_info_.node(); @@ -104,7 +107,7 @@ void GrpcAccessLoggerImpl::flush() { if (stream_->stream_->isAboveWriteBufferHighWatermark()) { return; } - stream_->stream_->sendMessage(message_, false); + stream_->stream_->sendMessage(message_, transport_api_version_, false); } else { // Clear out the stream data due to stream creation failure. stream_.reset(); @@ -141,7 +144,7 @@ GrpcAccessLoggerSharedPtr GrpcAccessLoggerCacheImpl::getOrCreateLogger( factory->create(), config.log_name(), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, - local_info_, scope); + local_info_, scope, config.transport_api_version()); cache.access_loggers_.emplace(cache_key, logger); return logger; } diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index c913522cd967..b79a7ad1b8d5 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -85,7 +85,8 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { GrpcAccessLoggerImpl(Grpc::RawAsyncClientPtr&& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info, Stats::Scope& scope); + const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, + envoy::config::core::v3::ApiVersion transport_api_version); // Extensions::AccessLoggers::GrpcCommon::GrpcAccessLogger void log(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) override; @@ -124,6 +125,8 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { envoy::service::accesslog::v3::StreamAccessLogsMessage message_; absl::optional stream_; const LocalInfo::LocalInfo& local_info_; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessLoggerCache { diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 66db5c593ff4..45d4fb01d96f 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -44,6 +44,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index b5ca79aeb1b5..2dc572ffb0a1 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -1,6 +1,7 @@ #include "extensions/filters/common/ext_authz/ext_authz_grpc_impl.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/service/auth/v2alpha/external_auth.pb.h" #include "envoy/service/auth/v3/external_auth.pb.h" #include "common/common/assert.h" @@ -16,16 +17,16 @@ namespace Filters { namespace Common { namespace ExtAuthz { -// Values used for selecting service paths. -// TODO(gsagula): keep only V2 when V2Alpha gets deprecated. -constexpr char V2[] = "envoy.service.auth.v2.Authorization.Check"; -constexpr char V2alpha[] = "envoy.service.auth.v2alpha.Authorization.Check"; - GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha) - : service_method_(getMethodDescriptor(use_alpha)), async_client_(std::move(async_client)), - timeout_(timeout) {} + : async_client_(std::move(async_client)), timeout_(timeout), + service_method_(Grpc::VersionedMethods("envoy.service.auth.v3.Authorization.Check", + "envoy.service.auth.v2.Authorization.Check", + "envoy.service.auth.v2alpha.Authorization.Check") + .getMethodDescriptorForVersion(transport_api_version, use_alpha)), + transport_api_version_(transport_api_version) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -43,7 +44,8 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, ENVOY_LOG(trace, "Sending CheckRequest: {}", request.DebugString()); request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_)); + Http::AsyncClient::RequestOptions().setTimeout(timeout_), + transport_api_version_); } void GrpcClientImpl::onSuccess(std::unique_ptr&& response, @@ -99,14 +101,6 @@ void GrpcClientImpl::toAuthzResponseHeader( } } -const Protobuf::MethodDescriptor& GrpcClientImpl::getMethodDescriptor(bool use_alpha) { - const auto* descriptor = - use_alpha ? Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2alpha) - : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(V2); - ASSERT(descriptor != nullptr); - return *descriptor; -} - } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index 467f3acb2cea..ad9799940e9f 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -44,7 +44,8 @@ class GrpcClientImpl : public Client, public: // TODO(gsagula): remove `use_alpha` param when V2Alpha gets deprecated. GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout, bool use_alpha); + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version, bool use_alpha); ~GrpcClientImpl() override; // ExtAuthz::Client @@ -60,16 +61,16 @@ class GrpcClientImpl : public Client, Tracing::Span& span) override; private: - static const Protobuf::MethodDescriptor& getMethodDescriptor(bool use_alpha); void toAuthzResponseHeader( ResponsePtr& response, const Protobuf::RepeatedPtrField& headers); - const Protobuf::MethodDescriptor& service_method_; Grpc::AsyncClient async_client_; Grpc::AsyncRequest* request_{}; absl::optional timeout_; RequestCallbacks* callbacks_{}; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; } // namespace ExtAuthz diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 100e75338b4f..588916fae200 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -21,10 +21,14 @@ namespace Common { namespace RateLimit { GrpcClientImpl::GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout) - : service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit")), - async_client_(std::move(async_client)), timeout_(timeout) {} + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version) + : async_client_(std::move(async_client)), timeout_(timeout), + service_method_( + Grpc::VersionedMethods("envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit", + "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -60,7 +64,8 @@ void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domai createRequest(request, domain, descriptors); request_ = async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_)); + Http::AsyncClient::RequestOptions().setTimeout(timeout_), + transport_api_version_); } void GrpcClientImpl::onSuccess( @@ -104,14 +109,15 @@ void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::strin ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout) { + const std::chrono::milliseconds timeout, + envoy::config::core::v3::ApiVersion transport_api_version) { // TODO(ramaraochavali): register client to singleton when GrpcClientImpl supports concurrent // requests. const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); return std::make_unique( - async_client_factory->create(), timeout); + async_client_factory->create(), timeout, transport_api_version); } } // namespace RateLimit diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index f6daf85b14e4..4108ec2b45c0 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -46,7 +46,8 @@ class GrpcClientImpl : public Client, public Logger::Loggable { public: GrpcClientImpl(Grpc::RawAsyncClientPtr&& async_client, - const absl::optional& timeout); + const absl::optional& timeout, + envoy::config::core::v3::ApiVersion transport_api_version); ~GrpcClientImpl() override; static void createRequest(envoy::service::ratelimit::v3::RateLimitRequest& request, @@ -67,13 +68,14 @@ class GrpcClientImpl : public Client, Tracing::Span& span) override; private: - const Protobuf::MethodDescriptor& service_method_; Grpc::AsyncClient async_client_; Grpc::AsyncRequest* request_{}; absl::optional timeout_; RequestCallbacks* callbacks_{}; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; /** @@ -81,7 +83,8 @@ class GrpcClientImpl : public Client, */ ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout); + const std::chrono::milliseconds timeout, + envoy::config::core::v3::ApiVersion transport_api_version); } // namespace RateLimit } // namespace Common diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index 1d994268cbe8..255329029dfa 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -46,13 +46,15 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, timeout_ms, + transport_api_version = proto_config.transport_api_version(), use_alpha = proto_config.hidden_envoy_deprecated_use_alpha()]( Http::FilterChainFactoryCallbacks& callbacks) { const auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms), use_alpha); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), + transport_api_version, use_alpha); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index c234672301b8..1bcf930af390 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -31,7 +31,8 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index a47f488a6f6b..f42e2957512b 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -27,13 +27,15 @@ Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoType const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, 200); return [grpc_service = proto_config.grpc_service(), &context, ext_authz_config, + transport_api_version = proto_config.transport_api_version(), timeout_ms](Network::FilterManager& filter_manager) -> void { auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->create(), std::chrono::milliseconds(timeout_ms), false); + async_client_factory->create(), std::chrono::milliseconds(timeout_ms), + transport_api_version, false); filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ std::make_shared(ext_authz_config, std::move(client))}); }; diff --git a/source/extensions/filters/network/ratelimit/config.cc b/source/extensions/filters/network/ratelimit/config.cc index 4e4546860350..82037f5b424f 100644 --- a/source/extensions/filters/network/ratelimit/config.cc +++ b/source/extensions/filters/network/ratelimit/config.cc @@ -35,7 +35,8 @@ Network::FilterFactoryCb RateLimitConfigFactory::createFilterFactoryFromProtoTyp filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc index 43e2fd28f991..9813ec583c97 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc @@ -34,7 +34,8 @@ RateLimitFilterConfig::createFilterFactoryFromProtoTyped( config](ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addDecoderFilter(std::make_shared( config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout))); + context, proto_config.rate_limit_service().grpc_service(), timeout, + proto_config.rate_limit_service().transport_api_version()))); }; } diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 69f0860d228b..73dadf66ddea 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -25,13 +25,14 @@ Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Messag MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); const auto& grpc_service = sink_config.grpc_service(); + const auto& transport_api_version = sink_config.transport_api_version(); ENVOY_LOG(debug, "Metrics Service gRPC service configuration: {}", grpc_service.DebugString()); std::shared_ptr grpc_metrics_streamer = std::make_shared( server.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, server.stats(), false), - server.localInfo()); + server.localInfo(), transport_api_version); return std::make_unique( grpc_metrics_streamer, server.timeSource(), diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc index 85f2a63b7fb3..092e3fbe6fcf 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc @@ -16,20 +16,24 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { -GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl(Grpc::AsyncClientFactoryPtr&& factory, - const LocalInfo::LocalInfo& local_info) - : client_(factory->create()), local_info_(local_info) {} +GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl( + Grpc::AsyncClientFactoryPtr&& factory, const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version) + : client_(factory->create()), local_info_(local_info), + service_method_( + Grpc::VersionedMethods("envoy.service.metrics.v3.MetricsService.StreamMetrics", + "envoy.service.metrics.v2.MetricsService.StreamMetrics") + .getMethodDescriptorForVersion(transport_api_version)), + transport_api_version_(transport_api_version) {} void GrpcMetricsStreamerImpl::send(envoy::service::metrics::v3::StreamMetricsMessage& message) { if (stream_ == nullptr) { - stream_ = client_->start(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.metrics.v2.MetricsService.StreamMetrics"), - *this, Http::AsyncClient::StreamOptions()); + stream_ = client_->start(service_method_, *this, Http::AsyncClient::StreamOptions()); auto* identifier = message.mutable_identifier(); *identifier->mutable_node() = local_info_.node(); } if (stream_ != nullptr) { - stream_->sendMessage(message, false); + stream_->sendMessage(message, transport_api_version_, false); } } diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index 84c2d19695f4..0e35d3b06304 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -50,7 +50,8 @@ using GrpcMetricsStreamerSharedPtr = std::shared_ptr; class GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsStreamer { public: GrpcMetricsStreamerImpl(Grpc::AsyncClientFactoryPtr&& factory, - const LocalInfo::LocalInfo& local_info); + const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version); // GrpcMetricsStreamer void send(envoy::service::metrics::v3::StreamMetricsMessage& message) override; @@ -64,6 +65,8 @@ class GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsSt envoy::service::metrics::v3::StreamMetricsResponse> client_; const LocalInfo::LocalInfo& local_info_; + const Protobuf::MethodDescriptor& service_method_; + const envoy::config::core::v3::ApiVersion transport_api_version_; }; /** diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index 6f35c4b06e59..1f2bb941d622 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -55,6 +55,26 @@ class GrpcClientIntegrationParamTest ClientType clientType() const override { return std::get<1>(GetParam()); } }; +class VersionedGrpcClientIntegrationParamTest + : public BaseGrpcClientIntegrationParamTest, + public testing::TestWithParam> { +public: + static std::string protocolTestParamsToString( + const ::testing::TestParamInfo>& p) { + return fmt::format("{}_{}_{}", + std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", + std::get<1>(p.param) == ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", + std::get<2>(p.param) == envoy::config::core::v3::ApiVersion::V3 + ? "V3" + : envoy::config::core::v3::ApiVersion::V2 ? "V2" : "AUTO"); + } + Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } + ClientType clientType() const override { return std::get<1>(GetParam()); } + envoy::config::core::v3::ApiVersion apiVersion() const { return std::get<2>(GetParam()); } +}; + class DeltaSotwIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, public testing::TestWithParam< @@ -90,6 +110,12 @@ class DeltaSotwIntegrationParamTest #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc)) +#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc), \ + testing::Values(envoy::config::core::v3::ApiVersion::V3, \ + envoy::config::core::v3::ApiVersion::V2, \ + envoy::config::core::v3::ApiVersion::AUTO)) #define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc, Grpc::ClientType::GoogleGrpc), \ @@ -98,6 +124,12 @@ class DeltaSotwIntegrationParamTest #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc)) +#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS \ + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ + testing::Values(Grpc::ClientType::EnvoyGrpc), \ + testing::Values(envoy::config::core::v3::ApiVersion::V3, \ + envoy::config::core::v3::ApiVersion::V2, \ + envoy::config::core::v3::ApiVersion::AUTO)) #define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::Values(Grpc::ClientType::EnvoyGrpc), \ diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 5747ed71b3ed..4cb7054017b8 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -44,7 +44,8 @@ class GrpcAccessLoggerImplTest : public testing::Test { EXPECT_CALL(*timer_, enableTimer(buffer_flush_interval_msec, _)); logger_ = std::make_unique( Grpc::RawAsyncClientPtr{async_client_}, log_name_, buffer_flush_interval_msec, - buffer_size_bytes, dispatcher_, local_info_, stats_store_); + buffer_size_bytes, dispatcher_, local_info_, stats_store_, + envoy::config::core::v3::ApiVersion::AUTO); } void expectStreamStart(MockAccessLogStream& stream, AccessLogCallbacks** callbacks_to_set) { diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index 9eff52031ef2..148921270f23 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -19,7 +19,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: AccessLogIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} @@ -48,6 +48,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); + common_config->set_transport_api_version(apiVersion()); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(config); @@ -56,6 +57,14 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, HttpIntegrationTest::initialize(); } + static ProtobufTypes::MessagePtr scrubHiddenEnvoyDeprecated(const Protobuf::Message& message) { + ProtobufTypes::MessagePtr mutable_clone; + mutable_clone.reset(message.New()); + mutable_clone->MergeFrom(message); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone); + return mutable_clone; + } + ABSL_MUST_USE_RESULT AssertionResult waitForAccessLogConnection() { return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_); @@ -71,7 +80,8 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); - EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", + "StreamAccessLogs", apiVersion()), access_log_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); @@ -93,8 +103,10 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, node->clear_extensions(); node->clear_user_agent_build_version(); } - EXPECT_EQ(request_msg.DebugString(), expected_request_msg.DebugString()); - + Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); + EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, + /*ignore_repeated_field_ordering=*/false)); return AssertionSuccess(); } @@ -112,7 +124,7 @@ class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, AccessLogIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic full access logging flow. TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index e3358b3d9910..f7a7322616a8 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -24,7 +24,7 @@ void clearPort(envoy::config::core::v3::Address& address) { address.mutable_socket_address()->clear_port_specifier(); } -class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public BaseIntegrationTest { public: TcpGrpcAccessLogIntegrationTest() @@ -54,6 +54,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig access_log_config; auto* common_config = access_log_config.mutable_common_config(); common_config->set_log_name("foo"); + common_config->set_transport_api_version(apiVersion()); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(access_log_config); @@ -76,7 +77,8 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); - EXPECT_EQ("/envoy.service.accesslog.v2.AccessLogService/StreamAccessLogs", + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", + "StreamAccessLogs", apiVersion()), access_log_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); @@ -99,7 +101,10 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT node->clear_extensions(); node->clear_user_agent_build_version(); } - EXPECT_EQ(request_msg.DebugString(), expected_request_msg.DebugString()); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); + Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); + EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, + /*ignore_repeated_field_ordering=*/false)); return AssertionSuccess(); } @@ -119,7 +124,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamT }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, TcpGrpcAccessLogIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic full access logging flow. TEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) { diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index ce4363980255..711680fa6394 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -31,17 +31,17 @@ namespace Filters { namespace Common { namespace ExtAuthz { -constexpr char V2[] = "envoy.service.auth.v2.Authorization"; -constexpr char V2Alpha[] = "envoy.service.auth.v2alpha.Authorization"; +using Params = std::tuple; -class ExtAuthzGrpcClientTest : public testing::TestWithParam { +class ExtAuthzGrpcClientTest : public testing::TestWithParam { public: ExtAuthzGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), timeout_(10) {} - void initialize(bool use_alpha) { - use_alpha_ = use_alpha; + void initialize(const Params& param) { + api_version_ = std::get<0>(param); + use_alpha_ = std::get<1>(param); client_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, timeout_, - use_alpha_); + api_version_, use_alpha_); } void expectCallSend(envoy::service::auth::v3::CheckRequest& request) { @@ -51,7 +51,9 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Invoke([this](absl::string_view service_full_name, absl::string_view method_name, Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, const Http::AsyncClient::RequestOptions& options) -> Grpc::AsyncRequest* { - EXPECT_EQ(use_alpha_ ? V2Alpha : V2, service_full_name); + EXPECT_EQ(TestUtility::getVersionedServiceFullName( + "envoy.service.auth.{}.Authorization", api_version_, use_alpha_), + service_full_name); EXPECT_EQ("Check", method_name); EXPECT_EQ(timeout_->count(), options.timeout->count()); return &async_request_; @@ -66,9 +68,14 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Tracing::MockSpan span_; bool use_alpha_{}; NiceMock stream_info_; + envoy::config::core::v3::ApiVersion api_version_; }; -INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, Values(true, false)); +INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, + Values(Params(envoy::config::core::v3::ApiVersion::AUTO, false), + Params(envoy::config::core::v3::ApiVersion::V2, false), + Params(envoy::config::core::v3::ApiVersion::V2, true), + Params(envoy::config::core::v3::ApiVersion::V3, false))); // Test the client when an ok response is received. TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 9e8af02f3807..5dc5603e93a3 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -98,7 +98,7 @@ Http::ResponseMessagePtr TestCommon::makeMessageResponse(const HeaderValueOption return response; }; -bool TestCommon::CompareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs) { +bool TestCommon::compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs) { return std::set>(lhs.begin(), lhs.end()) == std::set>(rhs.begin(), rhs.end()); } diff --git a/test/extensions/filters/common/ext_authz/test_common.h b/test/extensions/filters/common/ext_authz/test_common.h index 47a4ad6e3bb8..07348b838dfa 100644 --- a/test/extensions/filters/common/ext_authz/test_common.h +++ b/test/extensions/filters/common/ext_authz/test_common.h @@ -44,7 +44,7 @@ class TestCommon { static HeaderValueOptionVector makeHeaderValueOption(KeyValueOptionVector&& headers); - static bool CompareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); + static bool compareHeaderVector(const Http::HeaderVector& lhs, const Http::HeaderVector& rhs); }; MATCHER_P(AuthzErrorResponse, status, "") { @@ -77,7 +77,7 @@ MATCHER_P(AuthzDeniedResponse, response, "") { return false; } // Compare headers_to_add. - return TestCommon::CompareHeaderVector(response.headers_to_add, arg->headers_to_add); + return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add); } MATCHER_P(AuthzOkResponse, response, "") { @@ -85,12 +85,12 @@ MATCHER_P(AuthzOkResponse, response, "") { return false; } // Compare headers_to_append. - if (!TestCommon::CompareHeaderVector(response.headers_to_append, arg->headers_to_append)) { + if (!TestCommon::compareHeaderVector(response.headers_to_append, arg->headers_to_append)) { return false; } // Compare headers_to_add. - return TestCommon::CompareHeaderVector(response.headers_to_add, arg->headers_to_add); + return TestCommon::compareHeaderVector(response.headers_to_add, arg->headers_to_add); ; } diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 57facccb78a3..c17101438214 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -49,8 +49,8 @@ class RateLimitGrpcClientTest : public testing::Test { public: RateLimitGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), - client_(Grpc::RawAsyncClientPtr{async_client_}, - absl::optional()) {} + client_(Grpc::RawAsyncClientPtr{async_client_}, absl::optional(), + envoy::config::core::v3::ApiVersion::AUTO) {} Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncRequest async_request_; diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index c6f8f18b5b4c..048a740a9848 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -6,6 +6,7 @@ #include "extensions/filters/http/ext_authz/config.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -19,18 +20,20 @@ namespace HttpFilters { namespace ExtAuthz { namespace { -TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { +void expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion api_version) { std::string yaml = R"EOF( grpc_service: google_grpc: target_uri: ext_authz_server stat_prefix: google failure_mode_allow: false + transport_api_version: {} )EOF"; ExtAuthzFilterConfig factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(yaml, *proto_config); + TestUtility::loadFromYaml( + fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); testing::StrictMock context; EXPECT_CALL(context, messageValidationVisitor()).Times(1); @@ -48,6 +51,14 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { cb(filter_callback); } +} // namespace + +TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::AUTO); + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V2); + expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V3); +} + TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { std::string yaml = R"EOF( http_service: @@ -110,7 +121,6 @@ TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterNa deprecated_name)); } -} // namespace } // namespace ExtAuthz } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index d368f04db6f9..94ec7b287b80 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -3,6 +3,8 @@ #include "envoy/extensions/filters/http/ext_authz/v3/ext_authz.pb.h" #include "envoy/service/auth/v3/external_auth.pb.h" +#include "common/common/macros.h" + #include "extensions/filters/http/well_known_names.h" #include "test/common/grpc/grpc_client_integration.h" @@ -15,9 +17,8 @@ using testing::AssertionResult; namespace Envoy { -namespace { -class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: ExtAuthzGrpcIntegrationTest() @@ -44,6 +45,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, proto_config_.mutable_filter_enabled()->mutable_default_value()->set_numerator(100); proto_config_.mutable_deny_at_disable()->set_runtime_key("envoy.ext_authz.deny_at_disable"); proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false); + proto_config_.set_transport_api_version(apiVersion()); envoy::config::listener::v3::Filter ext_authz_filter; ext_authz_filter.set_name(Extensions::HttpFilters::HttpFilterNames::get().ExtAuthorization); @@ -83,7 +85,8 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, RELEASE_ASSERT(result, result.message()); EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); - EXPECT_EQ("/envoy.service.auth.v2.Authorization/Check", + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.auth.{}.Authorization", "Check", + apiVersion()), ext_authz_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); @@ -318,7 +321,7 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, ExtAuthzGrpcIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/1.1. @@ -369,5 +372,4 @@ TEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) { EXPECT_EQ(case_sensitive_header_value_, header_entry->value().getStringView()); } -} // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 1b30b786b223..db2c5849c651 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -20,7 +20,7 @@ namespace Envoy { namespace { // Tests Ratelimit functionality with config in filter. -class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: RatelimitIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} @@ -46,6 +46,7 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, proto_config_.set_failure_mode_deny(failure_mode_deny_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); + proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); envoy::config::listener::v3::Filter ratelimit_filter; ratelimit_filter.set_name("envoy.filters.http.ratelimit"); @@ -86,7 +87,8 @@ class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, result = ratelimit_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); EXPECT_EQ("POST", ratelimit_request_->headers().getMethodValue()); - EXPECT_EQ("/envoy.service.ratelimit.v2.RateLimitService/ShouldRateLimit", + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.ratelimit.{}.RateLimitService", + "ShouldRateLimit", apiVersion()), ratelimit_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", ratelimit_request_->headers().getContentTypeValue()); @@ -203,9 +205,9 @@ class RatelimitFailureModeIntegrationTest : public RatelimitIntegrationTest { }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } diff --git a/test/extensions/filters/network/ext_authz/config_test.cc b/test/extensions/filters/network/ext_authz/config_test.cc index 010ad2018e85..bdee7d9a7a69 100644 --- a/test/extensions/filters/network/ext_authz/config_test.cc +++ b/test/extensions/filters/network/ext_authz/config_test.cc @@ -6,6 +6,7 @@ #include "extensions/filters/network/ext_authz/config.h" #include "test/mocks/server/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -18,14 +19,8 @@ namespace Extensions { namespace NetworkFilters { namespace ExtAuthz { -TEST(ExtAuthzFilterConfigTest, ValidateFail) { - NiceMock context; - EXPECT_THROW(ExtAuthzConfigFactory().createFilterFactoryFromProto( - envoy::extensions::filters::network::ext_authz::v3::ExtAuthz(), context), - ProtoValidationException); -} - -TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { +namespace { +void expectCorrectProto(envoy::config::core::v3::ApiVersion api_version) { std::string yaml = R"EOF( grpc_service: google_grpc: @@ -33,11 +28,13 @@ TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { stat_prefix: google failure_mode_allow: false stat_prefix: name + transport_api_version: {} )EOF"; ExtAuthzConfigFactory factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml(yaml, *proto_config); + TestUtility::loadFromYaml( + fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); NiceMock context; @@ -50,6 +47,20 @@ TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { EXPECT_CALL(connection, addReadFilter(_)); cb(connection); } +} // namespace + +TEST(ExtAuthzFilterConfigTest, ValidateFail) { + NiceMock context; + EXPECT_THROW(ExtAuthzConfigFactory().createFilterFactoryFromProto( + envoy::extensions::filters::network::ext_authz::v3::ExtAuthz(), context), + ProtoValidationException); +} + +TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { + expectCorrectProto(envoy::config::core::v3::ApiVersion::AUTO); + expectCorrectProto(envoy::config::core::v3::ApiVersion::V2); + expectCorrectProto(envoy::config::core::v3::ApiVersion::V3); +} // Test that the deprecated extension name still functions. TEST(ExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index d0e5325607e9..649b383496b6 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -31,8 +31,9 @@ class GrpcMetricsStreamerImplTest : public testing::Test { EXPECT_CALL(*factory_, create()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; })); - streamer_ = std::make_unique(Grpc::AsyncClientFactoryPtr{factory_}, - local_info_); + streamer_ = std::make_unique( + Grpc::AsyncClientFactoryPtr{factory_}, local_info_, + envoy::config::core::v3::ApiVersion::AUTO); } void expectStreamStart(MockMetricsStream& stream, MetricsServiceCallbacks** callbacks_to_set) { diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 55e8baafcfae..0e227d6469dd 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -18,7 +18,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, +class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: MetricsServiceIntegrationTest() @@ -44,6 +44,7 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes envoy::config::metrics::v3::MetricsServiceConfig config; setGrpcService(*config.mutable_grpc_service(), "metrics_service", fake_upstreams_.back()->localAddress()); + config.set_transport_api_version(apiVersion()); metrics_sink->mutable_typed_config()->PackFrom(config); // Shrink reporting period down to 1s to make test not take forever. bootstrap.mutable_stats_flush_interval()->CopyFrom( @@ -80,7 +81,8 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes envoy::service::metrics::v3::StreamMetricsMessage request_msg; VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", metrics_service_request_->headers().getMethodValue()); - EXPECT_EQ("/envoy.service.metrics.v2.MetricsService/StreamMetrics", + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.metrics.{}.MetricsService", + "StreamMetrics", apiVersion()), metrics_service_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", metrics_service_request_->headers().getContentTypeValue()); EXPECT_TRUE(request_msg.envoy_metrics_size() > 0); @@ -140,7 +142,7 @@ class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTes }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, MetricsServiceIntegrationTest, - GRPC_CLIENT_INTEGRATION_PARAMS); + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); // Test a basic metric service flow. TEST_P(MetricsServiceIntegrationTest, BasicFlow) { diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index 91bc7143110b..f5fc80cdd556 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -26,10 +26,10 @@ namespace Envoy { namespace { // TODO(jmarantz): switch this to simulated-time after debugging flakes. -class HdsIntegrationTest : public testing::TestWithParam, +class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: - HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} void createUpstreams() override { fake_upstreams_.emplace_back( @@ -41,11 +41,12 @@ class HdsIntegrationTest : public testing::TestWithParamset_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); hds_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("hds_cluster"); + hds_config->set_transport_api_version(apiVersion()); auto* hds_cluster = bootstrap.mutable_static_resources()->add_clusters(); hds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); hds_cluster->mutable_circuit_breakers()->Clear(); @@ -209,6 +210,25 @@ class HdsIntegrationTest : public testing::TestWithParamlocalAddress())) { ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_)); + EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); + EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", + "StreamHealthCheck", apiVersion(), + /*use_alpha=*/false, serviceNamespace()), + hds_stream_->headers().getPathValue()); + EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); + } + } + + const std::string serviceNamespace() const { + switch (apiVersion()) { + case envoy::config::core::v3::ApiVersion::AUTO: + FALLTHRU; + case envoy::config::core::v3::ApiVersion::V2: + return "discovery"; + case envoy::config::core::v3::ApiVersion::V3: + return "health"; + default: + NOT_REACHED_GCOVR_EXCL_LINE; } } @@ -232,9 +252,8 @@ class HdsIntegrationTest : public testing::TestWithParam, +class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: - LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) { + LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) { // We rely on some fairly specific load balancing picks in this test, so // determinize the schedule. setDeterministic(); @@ -112,6 +112,7 @@ class LoadStatsIntegrationTest : public testing::TestWithParammutable_load_stats_config(); loadstats_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); loadstats_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("load_report"); + loadstats_config->set_transport_api_version(apiVersion()); auto* load_report_cluster = bootstrap.mutable_static_resources()->add_clusters(); load_report_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); load_report_cluster->mutable_circuit_breakers()->Clear(); @@ -282,8 +283,10 @@ class LoadStatsIntegrationTest : public testing::TestWithParamheaders().getMethodValue()); - EXPECT_EQ("/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats", - loadstats_stream_->headers().getPathValue()); + EXPECT_EQ( + TestUtility::getVersionedMethodPath("envoy.service.load_stats.{}.LoadReportingService", + "StreamLoadStats", apiVersion()), + loadstats_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", loadstats_stream_->headers().getContentTypeValue()); } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, loadstats_request.cluster_stats(), true)); @@ -376,9 +379,8 @@ class LoadStatsIntegrationTest : public testing::TestWithParam Date: Fri, 19 Jun 2020 05:10:06 -0700 Subject: [PATCH 389/909] docs: fix link in external dependency documentation (#11651) Signed-off-by: Tal Nordan --- bazel/EXTERNAL_DEPS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 7eebe1c3ec2b..160f925fdbf3 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -25,8 +25,8 @@ This is the preferred style of adding dependencies that use CMake for their buil 1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. -2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](bazel/foreign_cc/BUILD). This will - reference the source repository in step 1. +2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference + the source repository in step 1. 3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1 `external_deps` attribute. 4. `bazel test //test/...` From aeaf585fdc8a47030edc69d315baeb2c77dfdb21 Mon Sep 17 00:00:00 2001 From: antonio Date: Fri, 19 Jun 2020 12:40:31 -0400 Subject: [PATCH 390/909] [test] Fix handling of Timer::disableTimer when using simulated time (#11563) Commit Message: test: Fix handling of Timer::disableTimer when using simulated time on timers that are disabled between the time they are found to have fired and when the actual event_activate triggers. Additional Description: Risk Level: n/a test library changes Testing: unit Docs Changes: n/a Release Notes: n/a Signed-off-by: Antonio Vicente --- test/test_common/simulated_time_system.cc | 17 ++- test/test_common/simulated_time_system.h | 3 +- .../test_common/simulated_time_system_test.cc | 109 +++++++++++++++++- 3 files changed, 124 insertions(+), 5 deletions(-) diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index 6577dccf003e..b840fd137a6f 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -170,6 +170,7 @@ SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { } void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { + base_timer_->disableTimer(); absl::MutexLock lock(&time_system_.mutex_); disableTimerLockHeld(); } @@ -179,12 +180,26 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimerLockHeld() { time_system_.removeAlarmLockHeld(this); armed_ = false; } + if (pending_) { + pending_ = false; + time_system_.decPendingLockHeld(); + } } void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( const std::chrono::microseconds& duration, const ScopeTrackedObject* scope) { + if (duration.count() != 0) { + disableTimer(); + } absl::MutexLock lock(&time_system_.mutex_); - disableTimerLockHeld(); + if (pending_) { + // Calling enableTimer on a timer that is already pending is a no-op. Timer will still fire + // based on the original time it was scheduled. + return; + } else if (armed_) { + disableTimerLockHeld(); + } + armed_ = true; if (duration.count() == 0) { activateLockHeld(scope); diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index cc34fcf953a9..2b31a564c8f7 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -97,8 +97,9 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { void incPendingLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { ++pending_alarms_; } void decPending() { absl::MutexLock lock(&mutex_); - --pending_alarms_; + decPendingLockHeld(); } + void decPendingLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { --pending_alarms_; } void waitForNoPendingLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_; diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index df5565b455d0..efe47edd9e2e 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -3,6 +3,7 @@ #include "common/event/libevent_scheduler.h" #include "common/event/timer_impl.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" @@ -22,12 +23,25 @@ class SimulatedTimeSystemTest : public testing::Test { start_monotonic_time_(time_system_.monotonicTime()), start_system_time_(time_system_.systemTime()) {} - void addTask(int64_t delay_ms, char marker) { + void trackPrepareCalls() { + base_scheduler_.registerOnPrepareCallback([this]() { output_.append(1, 'p'); }); + } + + void addTask(int64_t delay_ms, char marker, bool expect_monotonic = true) { + addCustomTask( + delay_ms, marker, []() {}, expect_monotonic); + } + + void addCustomTask(int64_t delay_ms, char marker, std::function cb, + bool expect_monotonic = true) { std::chrono::milliseconds delay(delay_ms); TimerPtr timer = scheduler_->createTimer( - [this, marker, delay]() { + [this, marker, delay, cb, expect_monotonic]() { output_.append(1, marker); - EXPECT_GE(time_system_.monotonicTime(), start_monotonic_time_ + delay); + if (expect_monotonic) { + EXPECT_GE(time_system_.monotonicTime(), start_monotonic_time_ + delay); + } + cb(); }, dispatcher_); timer->enableTimer(delay); @@ -62,6 +76,95 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } +TEST_F(SimulatedTimeSystemTest, TimerOrdering) { + trackPrepareCalls(); + + addTask(0, '0'); + addTask(1, '1'); + addTask(2, '2'); + EXPECT_EQ(3, timers_.size()); + + advanceMsAndLoop(5); + + // Verify order. + EXPECT_EQ("p012", output_); +} + +// Timers that are scheduled to execute and but are disabled first do not trigger. +TEST_F(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { + trackPrepareCalls(); + + // Create 3 timers. The first timer should disable the second, so it doesn't trigger. + addCustomTask(0, '0', [this]() { timers_[1]->disableTimer(); }); + addTask(1, '1'); + addTask(2, '2'); + EXPECT_EQ(3, timers_.size()); + + // Expect timers to execute in order since the timers are scheduled at have different times and + // that timer 1 does not execute because it was disabled as part of 0's execution. + advanceMsAndLoop(5); + // Verify that timer 1 was skipped. + EXPECT_EQ("p02", output_); +} + +// Capture behavior of timers which are rescheduled without being disabled first. +TEST_F(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) { + trackPrepareCalls(); + + // Reschedule timers 1, 2 and 4 without disabling first. + addCustomTask(0, '0', [this]() { + timers_[1]->enableTimer(std::chrono::milliseconds(0)); + timers_[2]->enableTimer(std::chrono::milliseconds(100)); + timers_[4]->enableTimer(std::chrono::milliseconds(0)); + }); + addTask(1, '1'); + addTask(2, '2'); + addTask(3, '3'); + addTask(10000, '4', false); + EXPECT_EQ(5, timers_.size()); + + // Rescheduling timers that are already scheduled to run in the current event loop iteration has + // no effect if the time delta is 0. Expect timers 0, 1 and 3 to execute in the original order. + // Timer 4 runs as part of the first wakeup since its new schedule time has a delta of 0. Timer 2 + // is delayed since it is rescheduled with a non-zero delta. + advanceMsAndLoop(5); + EXPECT_EQ("p0134", output_); + + advanceMsAndLoop(100); + EXPECT_EQ("p0134p2", output_); +} + +// Disable and re-enable timers that is already pending execution and verify that execution is +// delayed. +TEST_F(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) { + trackPrepareCalls(); + + // Disable and reschedule timers 1, 2 and 4 when timer 0 triggers. + addCustomTask(0, '0', [this]() { + timers_[1]->disableTimer(); + timers_[1]->enableTimer(std::chrono::milliseconds(0)); + timers_[2]->disableTimer(); + timers_[2]->enableTimer(std::chrono::milliseconds(100)); + timers_[4]->disableTimer(); + timers_[4]->enableTimer(std::chrono::milliseconds(0)); + }); + addTask(1, '1'); + addTask(2, '2'); + addTask(3, '3'); + addTask(10000, '4', false); + EXPECT_EQ(5, timers_.size()); + + // timer 0 is expected to run first and reschedule timers 1 and 2. Timer 3 should fire before + // timer 1 since timer 3's registration is unaffected. timer 1 runs in the same iteration + // because it is scheduled with zero delay. Timer 2 executes in a later iteration because it is + // re-enabled with a non-zero timeout. + advanceMsAndLoop(5); + EXPECT_EQ("p0314", output_); + + advanceMsAndLoop(100); + EXPECT_EQ("p0314p2", output_); +} + TEST_F(SimulatedTimeSystemTest, AdvanceTimeWait) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); From 4237cfed60f0de1b5a2760a9fb87d98e56b2332e Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 19 Jun 2020 10:01:32 -0700 Subject: [PATCH 391/909] ci: disable cache in AWS pool (#11633) Bottleneck of coverage build was network instead of CPU, while network transfer is expensive. The coverage build artifacts is huge, saving cache storage + network by CPU. At least until we have cache inside AWS. Along with some CI optimization: - remove workarounds for self run agents - tar coverage report for artifact to reduce upload time - append arm64 for arm64 artifacts Signed-off-by: Lizan Zhou --- .azure-pipelines/bazel.yml | 51 +++++++++++++------------------- .azure-pipelines/pipelines.yml | 11 +++---- .bazelrc | 4 +-- ci/build_setup.sh | 3 +- test/run_envoy_bazel_coverage.sh | 8 +++-- 5 files changed, 36 insertions(+), 41 deletions(-) diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index 2683b2f1a53a..ea96e3778742 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -3,25 +3,22 @@ parameters: displayName: "CI target" type: string default: bazel.release + - name: artifactSuffix + displayName: "Suffix of artifact" + type: string + default: "" - name: rbe displayName: "Enable RBE" - type: string - default: "true" - - name: bazelBuildExtraOptions - type: string - # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks - # to save disk space. - default: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks" + type: boolean + default: true - name: managedAgent type: boolean default: true + - name: bazelBuildExtraOptions + type: string + default: "" steps: - - bash: | - /usr/local/bin/set-instance-protection.sh on - displayName: "Set Instance Protection on Agent to prevent scale in" - condition: eq(false, ${{ parameters.managedAgent }}) - - task: Cache@2 inputs: key: '"${{ parameters.ciTarget }}" | ./WORKSPACE | **/*.bzl' @@ -51,11 +48,17 @@ steps: workingDirectory: $(Build.SourcesDirectory) env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - ENVOY_RBE: "${{ parameters.rbe }}" - BAZEL_BUILD_EXTRA_OPTIONS: "${{ parameters.bazelBuildExtraOptions }}" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + ${{ if parameters.rbe }}: + ENVOY_RBE: "1" + # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks + # to save disk space. + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks ${{ parameters.bazelBuildExtraOptions }}" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + ${{ if eq(parameters.rbe, false) }}: + BAZEL_BUILD_EXTRA_OPTIONS: "--curses=no --experimental_repository_cache_hardlinks ${{ parameters.bazelBuildExtraOptions }}" + displayName: "Run CI script" - bash: | @@ -74,17 +77,5 @@ steps: - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: ${{ parameters.ciTarget }} + artifactName: ${{ parameters.ciTarget }}${{ parameters.artifactSuffix }} condition: always() - - # TODO(lizan): This is a workaround for self hosted azure agent can't clean up bazel local cache due to - # permission. Remove this once it is resolved. - - bash: | - chmod -R u+w $(Build.StagingDirectory) - displayName: "Self hosted agent clean up" - condition: eq(false, ${{ parameters.managedAgent }}) - - - bash: | - /usr/local/bin/set-instance-protection.sh off - displayName: "Set Instance Protection on Agent to prevent scale in" - condition: eq(false, ${{ parameters.managedAgent }}) \ No newline at end of file diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 8ca800f172e5..599f45425384 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -60,8 +60,8 @@ jobs: parameters: managedAgent: false ciTarget: bazel.release.server_only - rbe: "" - bazelBuildExtraOptions: "--curses=no" + rbe: false + artifactSuffix: ".arm64" - job: bazel displayName: "Linux-x64" @@ -99,10 +99,11 @@ jobs: parameters: managedAgent: false ciTarget: bazel.coverage - rbe: "" - bazelBuildExtraOptions: "--test_env=ENVOY_IP_TEST_VERSIONS=v4only --curses=no" + rbe: false + # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces + bazelBuildExtraOptions: "--test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /build/envoy/generated/coverage coverage' + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/coverage coverage' displayName: "Upload Report to GCS" env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) diff --git a/.bazelrc b/.bazelrc index 84c7ecf7fa6e..b7bd7b87950c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -115,8 +115,8 @@ coverage --config=coverage build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 build:coverage --action_env=GCOV=llvm-profdata build:coverage --copt=-DNDEBUG -# Doubling timeout in all categories -build:coverage --test_timeout=120,600,1800,7200 +# 1.5x original timeout + 300s for trace merger in all categories +build:coverage --test_timeout=390,750,1500,5700 build:coverage --define=ENVOY_CONFIG_COVERAGE=1 build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 29e3098bf148..fcf9ae633fe4 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -103,8 +103,7 @@ export ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe mkdir -p "${ENVOY_DELIVERY_DIR}" # This is where we copy the coverage report to. -export ENVOY_COVERAGE_DIR="${ENVOY_BUILD_DIR}"/generated/coverage -mkdir -p "${ENVOY_COVERAGE_DIR}" +export ENVOY_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/coverage.tar.gz # This is where we dump failed test logs for CI collection. export ENVOY_FAILED_TEST_LOGS="${ENVOY_BUILD_DIR}"/generated/failed-testlogs diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 84c66b79f349..7b34381d7167 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -30,7 +30,11 @@ else BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage,-fuzz_target" fi -bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS} +bazel coverage ${BAZEL_BUILD_OPTIONS} ${COVERAGE_TARGETS} + +# Collecting profile and testlogs +[[ -z "${ENVOY_BUILD_PROFILE}" ]] || cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/coverage.profile.gz" || true +[[ -z "${ENVOY_BUILD_DIR}" ]] || find bazel-testlogs/ -name test.log | tar zcf "${ENVOY_BUILD_DIR}/testlogs.tar.gz" -T - COVERAGE_DIR="${SRCDIR}"/generated/coverage @@ -43,7 +47,7 @@ cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) COVERAGE_VALUE=${COVERAGE_VALUE%?} -[[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}" +[[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./coverage/' . if [[ "$VALIDATE_COVERAGE" == "true" ]]; then if [[ "${FUZZ_COVERAGE}" == "true" ]]; then From cfb163d3487a9820a65907e147f4d5fa4627c99e Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Fri, 19 Jun 2020 14:51:56 -0400 Subject: [PATCH 392/909] Fixing HCM fuzzer ContinueAndEndStream with end_stream set (#11497) Fixing the HCM fuzzer to avoid cases where its decodeHeaders method returns FilterHeadersStatus::ContinueAndEndStream when called with end_stream=true. This assert was added in #4885, to avoid misuse of the FilterHeadersStatus::ContinueAndEndStream value. Risk Level: Low - tests only Testing: Changed HCM fuzzer test code Fixes oss fuzz issue 21971 Signed-off-by: Adi Suissa-Peleg --- docs/root/faq/extensions/contract.rst | 48 +++++++++++++++++++ docs/root/faq/overview.rst | 8 ++++ include/envoy/http/filter.h | 1 + ...zz-testcase-continueandendstream-endstream | 16 +++++++ .../http/conn_manager_impl_fuzz_test.cc | 15 ++++-- 5 files changed, 83 insertions(+), 5 deletions(-) create mode 100644 docs/root/faq/extensions/contract.rst create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream diff --git a/docs/root/faq/extensions/contract.rst b/docs/root/faq/extensions/contract.rst new file mode 100644 index 000000000000..35e9a05f06ba --- /dev/null +++ b/docs/root/faq/extensions/contract.rst @@ -0,0 +1,48 @@ +.. _faq_filter_contract: + +Is there a contract my HTTP filter must adhere to? +-------------------------------------------------- + +* Headers encoding/decoding + + * During encoding/decoding of headers if a filter returns ``FilterHeadersStatus::StopIteration``, + the processing can be resumed if ``encodeData()``/``decodeData()`` return + ``FilterDataStatus::Continue`` or by explicitly calling + ``continueEncoding()``/``continueDecoding()``. + + * During encoding/decoding of headers if a filter returns + ``FilterHeadersStatus::StopAllIterationAndBuffer`` or + ``FilterHeadersStatus::StopAllIterationAndWatermark``, the processing can be resumed by calling + ``continueEncoding()``/``continueDecoding()``. + + * A filter's ``decodeHeaders()`` implementation must not return + ``FilterHeadersStatus::ContinueAndEndStream`` when called with ``end_stream`` set to *true*. In this case + ``FilterHeadersStatus::Continue`` should be returned. + + * A filter's ``encode100ContinueHeaders()`` must return ``FilterHeadersStatus::Continue`` or + ``FilterHeadersStatus::StopIteration``. + +* Data encoding/decoding + + * During encoding/decoding of data if a filter returns + ``FilterDataStatus::StopIterationAndBuffer``, ``FilterDataStatus::StopIterationAndWatermark``, + or ``FilterDataStatus::StopIterationNoBuffer``, the processing can be resumed if + ``encodeData()``/``decodeData()`` return ``FilterDataStatus::Continue`` or by explicitly + calling ``continueEncoding()``/``continueDecoding()``. + +* Trailers encoding/decoding + + * During encoding/decoding of trailers if a filter returns ``FilterTrailersStatus::StopIteration``, + the processing can be resumed by explicitly calling ``continueEncoding()``/``continueDecoding()``. + +Are there well-known headers that will appear in the given headers map of ``decodeHeaders()``? +---------------------------------------------------------------------------------------------- + +The first filter of the decoding filter chain will have the following headers in the map: + +* ``Host`` +* ``Path`` (this might be omitted for CONNECT requests). + +Although these headers may be omitted by one of the filters on the decoding filter chain, +they should be reinserted before the terminal filter is triggered. + diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 47156f7cd70e..3a65808a8b8b 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -72,3 +72,11 @@ Load balancing load_balancing/disable_circuit_breaking load_balancing/transient_failures load_balancing/region_failover + +Extensions +---------- + +.. toctree:: + :maxdepth: 2 + + extensions/contract diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index 0d6e243140a6..ee3133b3b1ee 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -34,6 +34,7 @@ enum class FilterHeadersStatus { StopIteration, // Continue iteration to remaining filters, but ignore any subsequent data or trailers. This // results in creating a header only request/response. + // This status MUST NOT be returned by decodeHeaders() when end_stream is set to true. ContinueAndEndStream, // Do not iterate for headers as well as data and trailers for the current filter and the filters // following, and buffer body data for later dispatching. ContinueDecoding() MUST diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream new file mode 100644 index 000000000000..2b64d93f4355 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-continueandendstream-endstream @@ -0,0 +1,16 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + end_stream: true + status: HEADER_CONTINUE_AND_END_STREAM + } +} diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index e151b1656fd6..196b8bdc24d2 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -284,11 +284,16 @@ class FuzzStream { return Http::okStatus(); })); ON_CALL(*decoder_filter_, decodeHeaders(_, _)) - .WillByDefault( - InvokeWithoutArgs([this, decode_header_status]() -> Http::FilterHeadersStatus { - header_status_ = fromHeaderStatus(decode_header_status); - return *header_status_; - })); + .WillByDefault(InvokeWithoutArgs([this, decode_header_status, + end_stream]() -> Http::FilterHeadersStatus { + header_status_ = fromHeaderStatus(decode_header_status); + // When a filter should not return ContinueAndEndStream when send with end_stream set + // (see https://github.com/envoyproxy/envoy/pull/4885#discussion_r232176826) + if (end_stream && (*header_status_ == Http::FilterHeadersStatus::ContinueAndEndStream)) { + *header_status_ = Http::FilterHeadersStatus::Continue; + } + return *header_status_; + })); fakeOnData(); FUZZ_ASSERT(testing::Mock::VerifyAndClearExpectations(config_.codec_)); } From 019353e8aabd28da8c73204167997cf9e51220a5 Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Fri, 19 Jun 2020 17:09:47 -0400 Subject: [PATCH 393/909] fuzz: fix oss-fuzz crash related to channelArgsFromConfig (#11641) Added regression test to server_corpus Risk Level: Low Testing: passes regression test that originally crashed on oss-fuzz Docs Changes: N/A Release Notes: N/A Fixes: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22824 Signed-off-by: Arthur Yan --- api/envoy/config/core/v3/grpc_service.proto | 2 ++ .../config/core/v4alpha/grpc_service.proto | 2 ++ .../envoy/config/core/v3/grpc_service.proto | 2 ++ .../config/core/v4alpha/grpc_service.proto | 2 ++ ...minimized-server_fuzz_test-5714049408172032 | 18 ++++++++++++++++++ 5 files changed, 26 insertions(+) create mode 100644 test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index cf7663b3487f..3f62884df6e3 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -209,6 +209,8 @@ message GrpcService { // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { + option (validate.required) = true; + string string_value = 1; int64 int_value = 2; diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index 3abff88ea4fc..4c95bb9e9853 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -215,6 +215,8 @@ message GrpcService { // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { + option (validate.required) = true; + string string_value = 1; int64 int_value = 2; diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index 04d14566934e..f4d41ddba258 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -207,6 +207,8 @@ message GrpcService { // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { + option (validate.required) = true; + string string_value = 1; int64 int_value = 2; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index 3abff88ea4fc..4c95bb9e9853 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -215,6 +215,8 @@ message GrpcService { // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { + option (validate.required) = true; + string string_value = 1; int64 int_value = 2; diff --git a/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 new file mode 100644 index 000000000000..db5e415d569b --- /dev/null +++ b/test/server/server_corpus/clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 @@ -0,0 +1,18 @@ +cluster_manager { + load_stats_config { + api_type: GRPC + grpc_services { + google_grpc { + target_uri: "18446744073709551617" + stat_prefix: "2147483649" + channel_args { + args { + key: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + value { + } + } + } + } + } + } +} From 7f251daa2e488587bc7335f91faceed420f162c4 Mon Sep 17 00:00:00 2001 From: Pengyuan Bian Date: Fri, 19 Jun 2020 14:12:45 -0700 Subject: [PATCH 394/909] Wrap google grpc usage in opencensus tracer (#11632) fixes #11574 Signed-off-by: Pengyuan Bian --- source/extensions/tracers/opencensus/BUILD | 4 +- .../opencensus/opencensus_tracer_impl.cc | 17 +++++++- .../tracers/opencensus/config_test.cc | 42 +++++++++++++++++++ 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index d0a66792196a..eb8ee9cb879e 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -3,6 +3,7 @@ load( "envoy_cc_extension", "envoy_cc_library", "envoy_package", + "envoy_select_google_grpc", ) licenses(["notice"]) # Apache 2 @@ -42,8 +43,7 @@ envoy_cc_library( ], deps = [ "//source/common/config:utility_lib", - "//source/common/grpc:google_async_client_lib", "//source/common/tracing:http_tracer_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", - ], + ] + envoy_select_google_grpc(["//source/common/grpc:google_async_client_lib"]), ) diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index c39d4ebddd56..53e82591350f 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -6,7 +6,6 @@ #include "envoy/http/header_map.h" #include "common/common/base64.h" -#include "common/grpc/google_grpc_utils.h" #include "absl/strings/str_cat.h" #include "google/devtools/cloudtrace/v2/tracing.grpc.pb.h" @@ -24,12 +23,18 @@ #include "opencensus/trace/trace_config.h" #include "opencensus/trace/trace_params.h" +#ifdef ENVOY_GOOGLE_GRPC +#include "common/grpc/google_grpc_utils.h" +#endif + namespace Envoy { namespace Extensions { namespace Tracers { namespace OpenCensus { +#ifdef ENVOY_GOOGLE_GRPC constexpr char GoogleStackdriverTraceAddress[] = "cloudtrace.googleapis.com"; +#endif namespace { @@ -267,6 +272,7 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); } else if (oc_config.has_stackdriver_grpc_service() && oc_config.stackdriver_grpc_service().has_google_grpc()) { +#ifdef ENVOY_GOOGLE_GRPC envoy::config::core::v3::GrpcService stackdriver_service = oc_config.stackdriver_grpc_service(); if (stackdriver_service.google_grpc().target_uri().empty()) { @@ -276,6 +282,10 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, } auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(stackdriver_service, api); opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); +#else + throw EnvoyException("Opencensus tracer: cannot handle stackdriver google grpc service, " + "google grpc is not built in."); +#endif } ::opencensus::exporters::trace::StackdriverExporter::Register(std::move(opts)); } @@ -290,11 +300,16 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, opts.address = oc_config.ocagent_address(); } else if (oc_config.has_ocagent_grpc_service() && oc_config.ocagent_grpc_service().has_google_grpc()) { +#ifdef ENVOY_GOOGLE_GRPC const envoy::config::core::v3::GrpcService& ocagent_service = oc_config.ocagent_grpc_service(); auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(ocagent_service, api); opts.trace_service_stub = ::opencensus::proto::agent::trace::v1::TraceService::NewStub(channel); +#else + throw EnvoyException("Opencensus tracer: cannot handle ocagent google grpc service, google " + "grpc is not built in."); +#endif } opts.service_name = local_info_.clusterName(); ::opencensus::exporters::trace::OcAgentExporter::Register(std::move(opts)); diff --git a/test/extensions/tracers/opencensus/config_test.cc b/test/extensions/tracers/opencensus/config_test.cc index 29888485e3de..0f5baa929bdc 100644 --- a/test/extensions/tracers/opencensus/config_test.cc +++ b/test/extensions/tracers/opencensus/config_test.cc @@ -154,12 +154,18 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerGrpc) { OpenCensusTracerFactory factory; auto message = Config::Utility::translateToFactoryConfig( configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); EXPECT_NE(nullptr, tracer); // Reset TraceParams back to default. ::opencensus::trace::TraceConfig::SetCurrentTraceParams( {32, 32, 128, 32, ::opencensus::trace::ProbabilitySampler(1e-4)}); +#else + EXPECT_THROW_WITH_MESSAGE( + (factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in."); +#endif } TEST(OpenCensusTracerConfigTest, ShouldCreateAtMostOneOpenCensusTracer) { @@ -253,9 +259,15 @@ TEST(OpenCensusTracerConfigTest, ShouldNotCacheInvalidConfiguration) { auto message_two = Config::Utility::translateToFactoryConfig( configuration_two.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC Tracing::HttpTracerSharedPtr tracer_two = factory.createHttpTracer(*message_two, context); // Verify that a new tracer has been created despite an earlier failed attempt. EXPECT_NE(nullptr, tracer_two); +#else + EXPECT_THROW_WITH_MESSAGE( + (factory.createHttpTracer(*message_two, context)), EnvoyException, + "Opencensus tracer: cannot handle ocagent google grpc service, google grpc is not built in."); +#endif } TEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDifferentConfig) { @@ -298,6 +310,36 @@ TEST(OpenCensusTracerConfigTest, ShouldRejectSubsequentCreateAttemptsWithDiffere "Opencensus has already been configured with a different config."); } +TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerStackdriverGrpc) { + NiceMock context; + const std::string yaml_string = R"EOF( + http: + name: opencensus + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v2.OpenCensusConfig + stackdriver_exporter_enabled: true + stackdriver_grpc_service: + google_grpc: + target_uri: 127.0.0.1:55678 + stat_prefix: test + )EOF"; + + envoy::config::trace::v3::Tracing configuration; + TestUtility::loadFromYaml(yaml_string, configuration); + + OpenCensusTracerFactory factory; + auto message = Config::Utility::translateToFactoryConfig( + configuration.http(), ProtobufMessage::getStrictValidationVisitor(), factory); +#ifdef ENVOY_GOOGLE_GRPC + Tracing::HttpTracerSharedPtr tracer = factory.createHttpTracer(*message, context); + EXPECT_NE(nullptr, tracer); +#else + EXPECT_THROW_WITH_MESSAGE((factory.createHttpTracer(*message, context)), EnvoyException, + "Opencensus tracer: cannot handle stackdriver google grpc service, " + "google grpc is not built in."); +#endif +} + } // namespace OpenCensus } // namespace Tracers } // namespace Extensions From 6c0f88411709424218b1c483958a3096765ce724 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Sun, 21 Jun 2020 20:55:49 -0400 Subject: [PATCH 395/909] docs: Update Threat Model doc with security release process checklist (#11244) Risk Level: Low (doc only) Testing: Doc build Docs Changes: Yes Signed-off-by: Yan Avlasov --- .../arch_overview/security/threat_model.rst | 38 ++++++++++++++----- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/docs/root/intro/arch_overview/security/threat_model.rst b/docs/root/intro/arch_overview/security/threat_model.rst index 765d1ba0d3a7..f0e4713e672b 100644 --- a/docs/root/intro/arch_overview/security/threat_model.rst +++ b/docs/root/intro/arch_overview/security/threat_model.rst @@ -15,16 +15,34 @@ highest priority concerns. Availability, in particular in areas relating to DoS exhaustion, is also a serious security concern for Envoy operators, in particular those utilizing Envoy in edge deployments. -The Envoy availability stance around CPU and memory DoS, as well as Query-of-Death (QoD), is still -evolving. We will continue to iterate and fix well known resource issues in the open, e.g. overload -manager and watermark improvements. We will activate the security process for disclosures that -appear to present a risk profile that is significantly greater than the current Envoy availability -hardening status quo. Examples of disclosures that would elicit this response: - -* QoD; where a single query from a client can bring down an Envoy server. - -* Highly asymmetric resource exhaustion attacks, where very little traffic can cause resource exhaustion, - e.g. that delivered by a single client. +We will activate the security release process for disclosures that meet the following criteria: + +* All issues that lead to loss of data confidentiality or integrity trigger the security release process. +* An availability issue, such as Query-of-Death (QoD) or resource exhaustion needs to meet all of the + following criteria to trigger the security release process: + + - A component tagged as hardened is affected (see `Core and extensions`_ for the list of hardened components). + + - The type of traffic (upstream or downstream) that exhibits the issue matches the component's hardening tag. + I.e. component tagged as “hardened to untrusted downstream” is affected by downstream request. + + - A resource exhaustion issue needs to meet these additional criteria: + + + Not covered by an existing timeout or where applying short timeout values is impractical and either + + + Memory exhaustion, including out of memory conditions, where per-request memory use 100x or more above + the configured header or high watermark limit. I.e. 10 KiB client request leading to 1 MiB bytes of + memory consumed by Envoy; + + + Highly asymmetric CPU utilization where Envoy uses 100x or more CPU compared to client. + + +The Envoy availability stance around CPU and memory DoS is still evolving, especially for brute force +attacks. We acknowledge that brute force (i.e. these with amplification factor less than 100) attacks are +likely for Envoy deployments as part of cloud infrastructure or with the use of botnets. We will continue +to iterate and fix well known resource issues in the open, e.g. overload manager and watermark improvements. +We will activate the security process for brute force disclosures that appear to present a risk to +existing Envoy deployments. Note that we do not currently consider the default settings for Envoy to be safe from an availability perspective. It is necessary for operators to explicitly :ref:`configure ` From 1bb927ed5ca4e292f5bec61cc045bf16de2e020e Mon Sep 17 00:00:00 2001 From: Yutong Li Date: Sun, 21 Jun 2020 17:57:11 -0700 Subject: [PATCH 396/909] api: add eds config to csds proto (#11662) EDS config has been added to config_dump in #11425 and implemented in #11577 . Risk Level: Low Testing: N/A Signed-off-by: Yutong Li --- api/envoy/service/status/v3/csds.proto | 5 ++++- api/envoy/service/status/v4alpha/csds.proto | 5 ++++- generated_api_shadow/envoy/service/status/v3/csds.proto | 5 ++++- generated_api_shadow/envoy/service/status/v4alpha/csds.proto | 5 ++++- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/api/envoy/service/status/v3/csds.proto b/api/envoy/service/status/v3/csds.proto index 3347def21d8f..beccfb8cb58e 100644 --- a/api/envoy/service/status/v3/csds.proto +++ b/api/envoy/service/status/v3/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v3.RoutesConfigDump route_config = 4; admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v3.EndpointsConfigDump endpoint_config = 6; } } diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto index f6f5fa654d70..2286eb94a8a7 100644 --- a/api/envoy/service/status/v4alpha/csds.proto +++ b/api/envoy/service/status/v4alpha/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v4alpha.RoutesConfigDump route_config = 4; admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v4alpha.EndpointsConfigDump endpoint_config = 6; } } diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto index 3347def21d8f..beccfb8cb58e 100644 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ b/generated_api_shadow/envoy/service/status/v3/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v3.RoutesConfigDump route_config = 4; admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v3.EndpointsConfigDump endpoint_config = 6; } } diff --git a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto index f6f5fa654d70..2286eb94a8a7 100644 --- a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto +++ b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto @@ -64,7 +64,7 @@ message ClientStatusRequest { } // Detailed config (per xDS) with status. -// [#next-free-field: 6] +// [#next-free-field: 7] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.PerXdsConfig"; @@ -79,6 +79,9 @@ message PerXdsConfig { admin.v4alpha.RoutesConfigDump route_config = 4; admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; + + // [#not-implemented-hide:] + admin.v4alpha.EndpointsConfigDump endpoint_config = 6; } } From 3dd4612bd0ccd00a160319763642220068b67413 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Sun, 21 Jun 2020 20:18:29 -0500 Subject: [PATCH 397/909] Fuzz: Added fuzz test for function verifySignature() and importPublicKey() in utility.cc (#11543) Renamed utility_fuzz_test.cc to get_sha_256_digest_fuzz_test.cc Added fuzz test for function verifySignature() and importPublicKey() in verify_signature_fuzz_test.cc and added the corpus for it. Modified the BUILD file to match above changes. Risk Level:low Testing: increase the functions coverage in source/extensions/common/crypto/utility_impl.cc by 60%. Signed-off-by: jianwen --- test/common/crypto/BUILD | 25 ++++++++++++-- .../35d26780ea66d4ffb726bbafaa9302687bda7624 | Bin .../58030c65410d7553b1804eb7ed64bdff1188f145 | Bin .../9c8bd40d34a88522d71d184c462af82e3148c02d | Bin .../e7af10a10f2540b1d1d497df2926786640285b1c | Bin ...est.cc => get_sha_256_digest_fuzz_test.cc} | 0 .../test_contains_sha1_wrong | 4 +++ .../test_contains_sha256_correct | 4 +++ .../common/crypto/verify_signature_fuzz.proto | 10 ++++++ .../crypto/verify_signature_fuzz_test.cc | 31 ++++++++++++++++++ .../crypto/verify_signature_fuzz_test.dict | 6 ++++ 11 files changed, 77 insertions(+), 3 deletions(-) rename test/common/crypto/{utility_corpus => get_sha_256_digest_corpus}/35d26780ea66d4ffb726bbafaa9302687bda7624 (100%) rename test/common/crypto/{utility_corpus => get_sha_256_digest_corpus}/58030c65410d7553b1804eb7ed64bdff1188f145 (100%) rename test/common/crypto/{utility_corpus => get_sha_256_digest_corpus}/9c8bd40d34a88522d71d184c462af82e3148c02d (100%) rename test/common/crypto/{utility_corpus => get_sha_256_digest_corpus}/e7af10a10f2540b1d1d497df2926786640285b1c (100%) rename test/common/crypto/{utility_fuzz_test.cc => get_sha_256_digest_fuzz_test.cc} (100%) create mode 100644 test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong create mode 100644 test/common/crypto/verify_signature_corpus/test_contains_sha256_correct create mode 100644 test/common/crypto/verify_signature_fuzz.proto create mode 100644 test/common/crypto/verify_signature_fuzz_test.cc create mode 100644 test/common/crypto/verify_signature_fuzz_test.dict diff --git a/test/common/crypto/BUILD b/test/common/crypto/BUILD index f9c91449d259..a7243b2309f0 100644 --- a/test/common/crypto/BUILD +++ b/test/common/crypto/BUILD @@ -3,6 +3,7 @@ load( "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -25,9 +26,27 @@ envoy_cc_test( ], ) +envoy_proto_library( + name = "verify_signature_fuzz_proto", + srcs = ["verify_signature_fuzz.proto"], +) + envoy_cc_fuzz_test( - name = "utility_fuzz_test", - srcs = ["utility_fuzz_test.cc"], - corpus = "utility_corpus", + name = "get_sha_256_digest_fuzz_test", + srcs = ["get_sha_256_digest_fuzz_test.cc"], + corpus = "get_sha_256_digest_corpus", deps = ["//source/extensions/common/crypto:utility_lib"], ) + +envoy_cc_fuzz_test( + name = "verify_signature_fuzz_test", + srcs = ["verify_signature_fuzz_test.cc"], + corpus = "verify_signature_corpus", + dictionaries = ["verify_signature_fuzz_test.dict"], + deps = [ + ":verify_signature_fuzz_proto_cc_proto", + "//source/common/common:hex_lib", + "//source/common/crypto:utility_lib", + "//source/extensions/common/crypto:utility_lib", + ], +) diff --git a/test/common/crypto/utility_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 b/test/common/crypto/get_sha_256_digest_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 similarity index 100% rename from test/common/crypto/utility_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 rename to test/common/crypto/get_sha_256_digest_corpus/35d26780ea66d4ffb726bbafaa9302687bda7624 diff --git a/test/common/crypto/utility_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 b/test/common/crypto/get_sha_256_digest_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 similarity index 100% rename from test/common/crypto/utility_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 rename to test/common/crypto/get_sha_256_digest_corpus/58030c65410d7553b1804eb7ed64bdff1188f145 diff --git a/test/common/crypto/utility_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d b/test/common/crypto/get_sha_256_digest_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d similarity index 100% rename from test/common/crypto/utility_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d rename to test/common/crypto/get_sha_256_digest_corpus/9c8bd40d34a88522d71d184c462af82e3148c02d diff --git a/test/common/crypto/utility_corpus/e7af10a10f2540b1d1d497df2926786640285b1c b/test/common/crypto/get_sha_256_digest_corpus/e7af10a10f2540b1d1d497df2926786640285b1c similarity index 100% rename from test/common/crypto/utility_corpus/e7af10a10f2540b1d1d497df2926786640285b1c rename to test/common/crypto/get_sha_256_digest_corpus/e7af10a10f2540b1d1d497df2926786640285b1c diff --git a/test/common/crypto/utility_fuzz_test.cc b/test/common/crypto/get_sha_256_digest_fuzz_test.cc similarity index 100% rename from test/common/crypto/utility_fuzz_test.cc rename to test/common/crypto/get_sha_256_digest_fuzz_test.cc diff --git a/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong b/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong new file mode 100644 index 000000000000..73f683034300 --- /dev/null +++ b/test/common/crypto/verify_signature_corpus/test_contains_sha1_wrong @@ -0,0 +1,4 @@ +key: "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001" +hash_func: "sha1" +signature: "345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332" +data: "hello" diff --git a/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct b/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct new file mode 100644 index 000000000000..1afb6ea95a07 --- /dev/null +++ b/test/common/crypto/verify_signature_corpus/test_contains_sha256_correct @@ -0,0 +1,4 @@ +key: "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a62cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001" +hash_func: "sha256" +signature: "345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a29c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf570a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332" +data: "hello" diff --git a/test/common/crypto/verify_signature_fuzz.proto b/test/common/crypto/verify_signature_fuzz.proto new file mode 100644 index 000000000000..3d22351e9834 --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test.common.crypto; + +message VerifySignatureFuzzTestCase { + string key = 1; + string hash_func = 2; + string signature = 3; + string data = 4; +} \ No newline at end of file diff --git a/test/common/crypto/verify_signature_fuzz_test.cc b/test/common/crypto/verify_signature_fuzz_test.cc new file mode 100644 index 000000000000..c64fa2436ecb --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz_test.cc @@ -0,0 +1,31 @@ +#include "common/common/hex.h" +#include "common/crypto/utility.h" + +#include "test/common/crypto/verify_signature_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Common { +namespace Crypto { +namespace { + +DEFINE_PROTO_FUZZER(const test::common::crypto::VerifySignatureFuzzTestCase& input) { + const auto& key = input.key(); + const auto& hash_func = input.hash_func(); + const auto& signature = input.signature(); + const auto& data = input.data(); + + Common::Crypto::CryptoObjectPtr crypto_ptr( + Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key))); + Common::Crypto::CryptoObject* crypto(crypto_ptr.get()); + + std::vector text(data.begin(), data.end()); + + const auto sig = Hex::decode(signature); + UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text); +} + +} // namespace +} // namespace Crypto +} // namespace Common +} // namespace Envoy diff --git a/test/common/crypto/verify_signature_fuzz_test.dict b/test/common/crypto/verify_signature_fuzz_test.dict new file mode 100644 index 000000000000..b6378abfd190 --- /dev/null +++ b/test/common/crypto/verify_signature_fuzz_test.dict @@ -0,0 +1,6 @@ +# hash_func +"sha1" +"sha224" +"sha256" +"sha384" +"sha512" From d0e52aade70bf841803476b12657ab6053897745 Mon Sep 17 00:00:00 2001 From: Alvin Baptiste <11775386+abaptiste@users.noreply.github.com> Date: Sun, 21 Jun 2020 20:30:56 -0700 Subject: [PATCH 398/909] dns_filter: Add external resolution (#11384) Extend the DNS Filter so that it can resolve queries using external resolvers Additional Description: The DNS Filter up to this point resolves queries from its local configuration. This change adds the ability to define external name servers and refer queries that cannot be answered from the local config. We also add metrics to gauge the filters behavior and performance. Signed-off-by: Alvin Baptiste --- .../udp/dns_filter/v3alpha/dns_filter.proto | 17 +- .../udp/dns_filter/v4alpha/dns_filter.proto | 17 +- .../listeners/udp_filters/dns_filter.rst | 9 +- .../udp/dns_filter/v3alpha/dns_filter.proto | 17 +- .../udp/dns_filter/v4alpha/dns_filter.proto | 17 +- .../extensions/filters/udp/dns_filter/BUILD | 4 + .../filters/udp/dns_filter/dns_filter.cc | 79 ++- .../filters/udp/dns_filter/dns_filter.h | 167 ++++- .../udp/dns_filter/dns_filter_resolver.cc | 128 ++++ .../udp/dns_filter/dns_filter_resolver.h | 77 +++ .../filters/udp/dns_filter/dns_parser.cc | 29 +- .../filters/udp/dns_filter/dns_parser.h | 54 +- .../udp/dns_filter/dns_filter_fuzz_test.cc | 70 ++- .../dns_filter/dns_filter_integration_test.cc | 202 +++++- .../filters/udp/dns_filter/dns_filter_test.cc | 587 +++++++++++++++--- 15 files changed, 1256 insertions(+), 218 deletions(-) create mode 100644 source/extensions/filters/udp/dns_filter/dns_filter_resolver.cc create mode 100644 source/extensions/filters/udp/dns_filter/dns_filter_resolver.h diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index ed9d1c27d04e..fda4bbf2c6b9 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v3alpha; +import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/data/dns/v3/dns_table.proto"; @@ -46,14 +47,18 @@ message DnsFilterConfig { message ClientContextConfig { // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 5 - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries - repeated string upstream_resolvers = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; + repeated config.core.v3.Address upstream_resolvers = 2 + [(validate.rules).repeated = {min_items: 1}]; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto index be78ebf40c18..8b7fd74c3b16 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v4alpha; +import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/data/dns/v4alpha/dns_table.proto"; @@ -56,14 +57,18 @@ message DnsFilterConfig { // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 5 - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries - repeated string upstream_resolvers = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; + repeated config.core.v4alpha.Address upstream_resolvers = 2 + [(validate.rules).repeated = {min_items: 1}]; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics diff --git a/docs/root/configuration/listeners/udp_filters/dns_filter.rst b/docs/root/configuration/listeners/udp_filters/dns_filter.rst index 0b6874aafa26..da0780d780f4 100644 --- a/docs/root/configuration/listeners/udp_filters/dns_filter.rst +++ b/docs/root/configuration/listeners/udp_filters/dns_filter.rst @@ -40,8 +40,13 @@ Example Configuration client_config: resolution_timeout: 5s upstream_resolvers: - - "8.8.8.8" - - "8.8.4.4" + - socket_address: + address: "8.8.8.8" + port_value: 53 + - socket_address: + address: "8.8.4.4" + port_value: 53 + max_pending_lookups: 256 server_config: inline_dns_table: known_suffixes: diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index ed9d1c27d04e..fda4bbf2c6b9 100644 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v3alpha; +import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/data/dns/v3/dns_table.proto"; @@ -46,14 +47,18 @@ message DnsFilterConfig { message ClientContextConfig { // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 5 - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries - repeated string upstream_resolvers = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; + repeated config.core.v3.Address upstream_resolvers = 2 + [(validate.rules).repeated = {min_items: 1}]; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto index be78ebf40c18..8b7fd74c3b16 100644 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v4alpha; +import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/data/dns/v4alpha/dns_table.proto"; @@ -56,14 +57,18 @@ message DnsFilterConfig { // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 5 - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 5}}]; + // value here is 1. Note that the total latency for a failed query is the + // number of retries multiplied by the resolver_timeout. + google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries - repeated string upstream_resolvers = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; + repeated config.core.v4alpha.Address upstream_resolvers = 2 + [(validate.rules).repeated = {min_items: 1}]; + + // Controls how many outstanding external lookup contexts the filter tracks. + // The context structure allows the filter to respond to every query even if the external + // resolution times out or is otherwise unsuccessful + uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index ab3de9ef5f08..1d4f8e0ab1fb 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -13,16 +13,20 @@ envoy_cc_library( name = "dns_filter_lib", srcs = [ "dns_filter.cc", + "dns_filter_resolver.cc", "dns_parser.cc", ], hdrs = [ "dns_filter.h", + "dns_filter_resolver.h", "dns_parser.h", ], external_deps = ["ares"], deps = [ "//include/envoy/buffer:buffer_interface", + "//include/envoy/event:dispatcher_interface", "//include/envoy/network:address_interface", + "//include/envoy/network:dns_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listener_interface", "//source/common/buffer:buffer_lib", diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index 909843977ad8..a666ff7924e9 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -79,11 +79,13 @@ DnsFilterEnvoyConfig::DnsFilterEnvoyConfig( const auto& upstream_resolvers = client_config.upstream_resolvers(); resolvers_.reserve(upstream_resolvers.size()); for (const auto& resolver : upstream_resolvers) { - auto ipaddr = Network::Utility::parseInternetAddress(resolver, 0 /* port */); - resolvers_.push_back(std::move(ipaddr)); + auto ipaddr = Network::Utility::protobufAddressToAddress(resolver); + resolvers_.emplace_back(std::move(ipaddr)); } resolver_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( client_config, resolver_timeout, DEFAULT_RESOLVER_TIMEOUT.count())); + + max_pending_lookups_ = client_config.max_pending_lookups(); } } @@ -116,16 +118,66 @@ bool DnsFilterEnvoyConfig::loadServerConfig( return data_source_loaded; } +DnsFilter::DnsFilter(Network::UdpReadFilterCallbacks& callbacks, + const DnsFilterEnvoyConfigSharedPtr& config) + : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()), + cluster_manager_(config_->clusterManager()), + message_parser_(config->forwardQueries(), listener_.dispatcher().timeSource(), + config->retryCount(), config->random(), + config_->stats().downstream_rx_query_latency_) { + // This callback is executed when the dns resolution completes. At that time of a response by the + // resolver, we build an answer record from each IP returned then send a response to the client + resolver_callback_ = [this](DnsQueryContextPtr context, const DnsQueryRecord* query, + AddressConstPtrVec& iplist) -> void { + if (context->resolution_status_ != Network::DnsResolver::ResolutionStatus::Success && + context->retry_ > 0) { + --context->retry_; + ENVOY_LOG(debug, "resolving name [{}] via external resolvers [retry {}]", query->name_, + context->retry_); + resolver_->resolveExternalQuery(std::move(context), query); + return; + } + + config_->stats().externally_resolved_queries_.inc(); + if (iplist.empty()) { + config_->stats().unanswered_queries_.inc(); + } + + incrementExternalQueryTypeCount(query->type_); + for (const auto& ip : iplist) { + incrementExternalQueryTypeAnswerCount(query->type_); + const std::chrono::seconds ttl = getDomainTTL(query->name_); + message_parser_.buildDnsAnswerRecord(context, *query, ttl, std::move(ip)); + } + sendDnsResponse(std::move(context)); + }; + + resolver_ = std::make_unique(resolver_callback_, config->resolvers(), + config->resolverTimeout(), listener_.dispatcher(), + config->maxPendingLookups()); +} + void DnsFilter::onData(Network::UdpRecvData& client_request) { + config_->stats().downstream_rx_bytes_.recordValue(client_request.buffer_->length()); + config_->stats().downstream_rx_queries_.inc(); + + // Setup counters for the parser + DnsParserCounters parser_counters(config_->stats().query_buffer_underflow_, + config_->stats().record_name_overflow_, + config_->stats().query_parsing_failure_); + // Parse the query, if it fails return an response to the client - DnsQueryContextPtr query_context = message_parser_.createQueryContext(client_request); + DnsQueryContextPtr query_context = + message_parser_.createQueryContext(client_request, parser_counters); + incrementQueryTypeCount(query_context->queries_); if (!query_context->parse_status_) { + config_->stats().downstream_rx_invalid_queries_.inc(); sendDnsResponse(std::move(query_context)); return; } // Resolve the requested name - const auto response = getResponseForQuery(query_context); + auto response = getResponseForQuery(query_context); // We were not able to satisfy the request locally. Return an empty response to the client if (response == DnsLookupResponseCode::Failure) { @@ -133,7 +185,11 @@ void DnsFilter::onData(Network::UdpRecvData& client_request) { return; } - // TODO(abaptiste): external resolution + // Externally resolved. We'll respond to the client when the external DNS resolution callback + // is executed + if (response == DnsLookupResponseCode::External) { + return; + } // We have an answer. Send it to the client sendDnsResponse(std::move(query_context)); @@ -145,6 +201,8 @@ void DnsFilter::sendDnsResponse(DnsQueryContextPtr query_context) { // Serializes the generated response to the parsed query from the client. If there is a // parsing error or the incoming query is invalid, we will still generate a valid DNS response message_parser_.buildResponseBuffer(query_context, response); + config_->stats().downstream_tx_responses_.inc(); + config_->stats().downstream_tx_bytes_.recordValue(response.length()); Network::UdpSendData response_data{query_context->local_->ip(), *(query_context->peer_), response}; listener_.send(response_data); @@ -174,10 +232,15 @@ DnsLookupResponseCode DnsFilter::getResponseForQuery(DnsQueryContextPtr& context continue; } } - // TODO(abaptiste): resolve the query externally + + ENVOY_LOG(debug, "resolving name [{}] via external resolvers", query->name_); + resolver_->resolveExternalQuery(std::move(context), query.get()); + + return DnsLookupResponseCode::External; } if (context->answers_.empty()) { + config_->stats().unanswered_queries_.inc(); return DnsLookupResponseCode::Failure; } return DnsLookupResponseCode::Success; @@ -206,6 +269,7 @@ bool DnsFilter::isKnownDomain(const absl::string_view domain_name) { // TODO(abaptiste): Use a trie to find a match instead of iterating through the list for (auto& suffix : known_suffixes) { if (suffix->match(domain_name)) { + config_->stats().known_domain_queries_.inc(); return true; } } @@ -264,6 +328,7 @@ bool DnsFilter::resolveViaClusters(DnsQueryContextPtr& context, const DnsQueryRe ++discovered_endpoints; ENVOY_LOG(debug, "using cluster host address {} for domain [{}]", host->address()->ip()->addressAsString(), lookup_name); + incrementClusterQueryTypeAnswerCount(query.type_); message_parser_.buildDnsAnswerRecord(context, query, ttl, host->address()); } } @@ -287,6 +352,7 @@ bool DnsFilter::resolveViaConfiguredHosts(DnsQueryContextPtr& context, uint64_t hosts_found = 0; for (const auto& configured_address : *configured_address_list) { ASSERT(configured_address != nullptr); + incrementLocalQueryTypeAnswerCount(query.type_); ENVOY_LOG(debug, "using local address {} for domain [{}]", configured_address->ip()->addressAsString(), query.name_); ++hosts_found; @@ -297,6 +363,7 @@ bool DnsFilter::resolveViaConfiguredHosts(DnsQueryContextPtr& context, } void DnsFilter::onReceiveError(Api::IoError::IoErrorCode error_code) { + config_->stats().downstream_rx_errors_.inc(); UNREFERENCED_PARAMETER(error_code); } diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index 3e295f1c79a9..d1ccbd18e207 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -2,6 +2,7 @@ #include "envoy/event/file_event.h" #include "envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.pb.h" +#include "envoy/network/dns.h" #include "envoy/network/filter.h" #include "common/buffer/buffer_impl.h" @@ -9,6 +10,7 @@ #include "common/config/config_provider_impl.h" #include "common/network/utility.h" +#include "extensions/filters/udp/dns_filter/dns_filter_resolver.h" #include "extensions/filters/udp/dns_filter/dns_parser.h" #include "absl/container/flat_hash_set.h" @@ -20,21 +22,42 @@ namespace DnsFilter { /** * All DNS Filter stats. @see stats_macros.h - * Track the number of answered and un-answered queries for A and AAAA records */ -#define ALL_DNS_FILTER_STATS(COUNTER) \ - COUNTER(queries_a_record) \ - COUNTER(noanswers_a_record) \ - COUNTER(answers_a_record) \ - COUNTER(queries_aaaa_record) \ - COUNTER(noanswers_aaaa_record) \ - COUNTER(answers_aaaa_record) +#define ALL_DNS_FILTER_STATS(COUNTER, HISTOGRAM) \ + COUNTER(a_record_queries) \ + COUNTER(aaaa_record_queries) \ + COUNTER(cluster_a_record_answers) \ + COUNTER(cluster_aaaa_record_answers) \ + COUNTER(cluster_unsupported_answers) \ + COUNTER(downstream_rx_errors) \ + COUNTER(downstream_rx_invalid_queries) \ + COUNTER(downstream_rx_queries) \ + COUNTER(external_a_record_queries) \ + COUNTER(external_a_record_answers) \ + COUNTER(external_aaaa_record_answers) \ + COUNTER(external_aaaa_record_queries) \ + COUNTER(external_unsupported_answers) \ + COUNTER(external_unsupported_queries) \ + COUNTER(externally_resolved_queries) \ + COUNTER(known_domain_queries) \ + COUNTER(local_a_record_answers) \ + COUNTER(local_aaaa_record_answers) \ + COUNTER(local_unsupported_answers) \ + COUNTER(unanswered_queries) \ + COUNTER(unsupported_queries) \ + COUNTER(downstream_tx_responses) \ + COUNTER(query_buffer_underflow) \ + COUNTER(query_parsing_failure) \ + COUNTER(record_name_overflow) \ + HISTOGRAM(downstream_rx_bytes, Bytes) \ + HISTOGRAM(downstream_rx_query_latency, Milliseconds) \ + HISTOGRAM(downstream_tx_bytes, Bytes) /** * Struct definition for all DNS Filter stats. @see stats_macros.h */ struct DnsFilterStats { - ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT) + ALL_DNS_FILTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; struct DnsEndpointConfig { @@ -65,11 +88,13 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } uint64_t retryCount() const { return retry_count_; } Runtime::RandomGenerator& random() const { return random_; } + uint64_t maxPendingLookups() const { return max_pending_lookups_; } private: static DnsFilterStats generateStats(const std::string& stat_prefix, Stats::Scope& scope) { const auto final_prefix = absl::StrCat("dns_filter.", stat_prefix); - return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix))}; + return {ALL_DNS_FILTER_STATS(POOL_COUNTER_PREFIX(scope, final_prefix), + POOL_HISTOGRAM_PREFIX(scope, final_prefix))}; } bool loadServerConfig(const envoy::extensions::filters::udp::dns_filter::v3alpha:: @@ -78,6 +103,7 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { Stats::Scope& root_scope_; Upstream::ClusterManager& cluster_manager_; + Network::DnsResolverSharedPtr resolver_; Api::Api& api_; mutable DnsFilterStats stats_; @@ -89,6 +115,7 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { AddressConstPtrVec resolvers_; std::chrono::milliseconds resolver_timeout_; Runtime::RandomGenerator& random_; + uint64_t max_pending_lookups_; }; using DnsFilterEnvoyConfigSharedPtr = std::shared_ptr; @@ -102,10 +129,8 @@ enum class DnsLookupResponseCode { Success, Failure, External }; */ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable { public: - DnsFilter(Network::UdpReadFilterCallbacks& callbacks, const DnsFilterEnvoyConfigSharedPtr& config) - : UdpListenerReadFilter(callbacks), config_(config), listener_(callbacks.udpListener()), - cluster_manager_(config_->clusterManager()), - message_parser_(config->forwardQueries(), config->retryCount(), config->random()) {} + DnsFilter(Network::UdpReadFilterCallbacks& callbacks, + const DnsFilterEnvoyConfigSharedPtr& config); // Network::UdpListenerReadFilter callbacks void onData(Network::UdpRecvData& client_request) override; @@ -133,7 +158,8 @@ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggablestats().external_a_record_queries_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().external_aaaa_record_queries_.inc(); + break; + default: + config_->stats().external_unsupported_queries_.inc(); + break; + } + } + + /** + * @brief Increment the counter for the parsed query type + * + * @param queries a vector of all the incoming queries received from a client + */ + void incrementQueryTypeCount(const DnsQueryPtrVec& queries) { + for (const auto& query : queries) { + incrementQueryTypeCount(query->type_); + } + } + + /** + * @brief Increment the counter for the given query type. + * + * @param query_type indicate the type of record being resolved (A, AAAA, or other). + */ + void incrementQueryTypeCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().a_record_queries_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().aaaa_record_queries_.inc(); + break; + default: + config_->stats().unsupported_queries_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved via cluster names + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementClusterQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().cluster_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().cluster_aaaa_record_answers_.inc(); + break; + default: + config_->stats().cluster_unsupported_answers_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved from the local + * configuration. + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementLocalQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().local_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().local_aaaa_record_answers_.inc(); + break; + default: + config_->stats().local_unsupported_answers_.inc(); + break; + } + } + + /** + * @brief Increment the counter for answers for the given query type resolved via an external + * resolver + * + * @param query_type indicate the type of answer record returned to the client + */ + void incrementExternalQueryTypeAnswerCount(const uint16_t query_type) { + switch (query_type) { + case DNS_RECORD_TYPE_A: + config_->stats().external_a_record_answers_.inc(); + break; + case DNS_RECORD_TYPE_AAAA: + config_->stats().external_aaaa_record_answers_.inc(); + break; + default: + config_->stats().external_unsupported_answers_.inc(); + break; + } + } + /** * @brief Helper function to retrieve the Endpoint configuration for a requested domain */ @@ -175,9 +309,10 @@ class DnsFilter : public Network::UdpListenerReadFilter, Logger::Loggable( + dispatcher_.timeSource().systemTime().time_since_epoch()) + .count() + + std::chrono::duration_cast(timeout_).count(); + ctx.resolver_status = DnsFilterResolverStatus::Pending; + + Network::DnsLookupFamily lookup_family; + switch (domain_query->type_) { + case DNS_RECORD_TYPE_A: + lookup_family = Network::DnsLookupFamily::V4Only; + break; + case DNS_RECORD_TYPE_AAAA: + lookup_family = Network::DnsLookupFamily::V6Only; + break; + default: + // We don't support other lookups other than A and AAAA. Set success here so that we don't + // retry for something that we are certain will fail. + ENVOY_LOG(debug, "Unknown query type [{}] for upstream lookup", domain_query->type_); + ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Success; + ctx.resolver_status = DnsFilterResolverStatus::Complete; + invokeCallback(ctx); + return; + } + + const DnsQueryRecord* id = domain_query; + + // If we have too many pending lookups, invoke the callback to retry the query. + if (lookups_.size() > max_pending_lookups_) { + ENVOY_LOG( + trace, + "Retrying query for [{}] because there are too many pending lookups: [pending {}/max {}]", + domain_query->name_, lookups_.size(), max_pending_lookups_); + ctx.resolver_status = DnsFilterResolverStatus::Complete; + invokeCallback(ctx); + return; + } + + ctx.timeout_timer = dispatcher_.createTimer([this]() -> void { onResolveTimeout(); }); + ctx.timeout_timer->enableTimer(timeout_); + + lookups_.emplace(id, std::move(ctx)); + + ENVOY_LOG(trace, "Pending queries: {}", lookups_.size()); + + // Define the callback that is executed when resolution completes + auto resolve_cb = [this, id](Network::DnsResolver::ResolutionStatus status, + std::list&& response) -> void { + auto ctx_iter = lookups_.find(id); + + // If the context is not in the map, the lookup has timed out and was removed + // when the timer executed + if (ctx_iter == lookups_.end()) { + ENVOY_LOG(debug, "Unable to find context for DNS query for ID [{}]", + reinterpret_cast(id)); + return; + } + + auto ctx = std::move(ctx_iter->second); + lookups_.erase(ctx_iter->first); + + // We are processing the response here, so we did not timeout. Cancel the timer + ctx.timeout_timer->disableTimer(); + + ENVOY_LOG(trace, "async query status returned. Entries {}", response.size()); + ASSERT(ctx.resolver_status == DnsFilterResolverStatus::Pending); + + ctx.query_context->resolution_status_ = status; + ctx.resolver_status = DnsFilterResolverStatus::Complete; + + // C-ares doesn't expose the TTL in the data available here. + if (status == Network::DnsResolver::ResolutionStatus::Success) { + ctx.resolved_hosts.reserve(response.size()); + for (const auto& resp : response) { + ASSERT(resp.address_ != nullptr); + ENVOY_LOG(trace, "Resolved address: {} for {}", resp.address_->ip()->addressAsString(), + ctx.query_rec->name_); + ctx.resolved_hosts.emplace_back(std::move(resp.address_)); + } + } + // Invoke the filter callback notifying it of resolved addresses + invokeCallback(ctx); + }; + + // Resolve the address in the query and add to the resolved_hosts vector + resolver_->resolve(domain_query->name_, lookup_family, resolve_cb); +} + +void DnsFilterResolver::onResolveTimeout() { + const uint64_t now = std::chrono::duration_cast( + dispatcher_.timeSource().systemTime().time_since_epoch()) + .count(); + ENVOY_LOG(trace, "Pending queries: {}", lookups_.size()); + + // Find an outstanding pending query and purge it + for (auto& ctx_iter : lookups_) { + if (ctx_iter.second.expiry <= now && + ctx_iter.second.resolver_status == DnsFilterResolverStatus::Pending) { + auto ctx = std::move(ctx_iter.second); + + ENVOY_LOG(trace, "Purging expired query: {}", ctx_iter.first->name_); + + ctx.query_context->resolution_status_ = Network::DnsResolver::ResolutionStatus::Failure; + + lookups_.erase(ctx_iter.first); + callback_(std::move(ctx.query_context), ctx.query_rec, ctx.resolved_hosts); + return; + } + } +} +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h new file mode 100644 index 000000000000..ee499683db76 --- /dev/null +++ b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h @@ -0,0 +1,77 @@ +#pragma once + +#include "envoy/event/dispatcher.h" +#include "envoy/network/dns.h" + +#include "extensions/filters/udp/dns_filter/dns_parser.h" + +namespace Envoy { +namespace Extensions { +namespace UdpFilters { +namespace DnsFilter { + +enum class DnsFilterResolverStatus { Pending, Complete, TimedOut }; + +/* + * This class encapsulates the logic of handling an asynchronous DNS request for the DNS filter. + * External request timeouts are handled here. + */ +class DnsFilterResolver : Logger::Loggable { +public: + DnsFilterResolver(DnsFilterResolverCallback& callback, AddressConstPtrVec resolvers, + std::chrono::milliseconds timeout, Event::Dispatcher& dispatcher, + uint64_t max_pending_lookups) + : dispatcher_(dispatcher), + resolver_(dispatcher.createDnsResolver(resolvers, false /* use_tcp_for_dns_lookups */)), + callback_(callback), timeout_(timeout), max_pending_lookups_(max_pending_lookups) {} + /** + * @brief entry point to resolve the name in a DnsQueryRecord + * + * This function uses the query object to determine whether it is requesting an A or AAAA record + * for the given name. When the resolver callback executes, this will execute a DNS Filter + * callback in order to build the answer object returned to the client. + * + * @param domain_query the query record object containing the name for which we are resolving + */ + void resolveExternalQuery(DnsQueryContextPtr context, const DnsQueryRecord* domain_query); + +private: + struct LookupContext { + const DnsQueryRecord* query_rec; + DnsQueryContextPtr query_context; + uint64_t expiry; + AddressConstPtrVec resolved_hosts; + DnsFilterResolverStatus resolver_status; + Event::TimerPtr timeout_timer; + }; + /** + * @brief invokes the DNS Filter callback only if our state indicates we have not timed out + * waiting for a response from the external resolver + */ + void invokeCallback(LookupContext& context) { + // If we've timed out. Guard against sending a response + if (context.resolver_status == DnsFilterResolverStatus::Complete) { + callback_(std::move(context.query_context), context.query_rec, context.resolved_hosts); + } + } + + /** + * @brief Invoke the DNS Filter callback to send a response to a client if the query has timed out + * DNS Filter will respond to the client appropriately. + */ + void onResolveTimeout(); + + Event::Dispatcher& dispatcher_; + const Network::DnsResolverSharedPtr resolver_; + DnsFilterResolverCallback& callback_; + std::chrono::milliseconds timeout_; + absl::flat_hash_map lookups_; + uint64_t max_pending_lookups_; +}; + +using DnsFilterResolverPtr = std::unique_ptr; + +} // namespace DnsFilter +} // namespace UdpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.cc b/source/extensions/filters/udp/dns_filter/dns_parser.cc index fdd300d45157..488d3952f5bc 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.cc +++ b/source/extensions/filters/udp/dns_filter/dns_parser.cc @@ -102,9 +102,10 @@ bool DnsAnswerRecord::serialize(Buffer::OwnedImpl& output) { return false; } -DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request) { +DnsQueryContextPtr DnsMessageParser::createQueryContext(Network::UdpRecvData& client_request, + DnsParserCounters& counters) { DnsQueryContextPtr query_context = std::make_unique( - client_request.addresses_.local_, client_request.addresses_.peer_); + client_request.addresses_.local_, client_request.addresses_.peer_, counters, retry_count_); query_context->parse_status_ = parseDnsObject(query_context, client_request.buffer_); if (!query_context->parse_status_) { @@ -127,6 +128,7 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, while (state != DnsQueryParseState::Finish) { // Ensure that we have enough data remaining in the buffer to parse the query if (available_bytes < field_size) { + context->counters_.underflow_counter.inc(); ENVOY_LOG(debug, "Exhausted available bytes in the buffer. Insufficient data to parse query field."); return false; @@ -179,7 +181,16 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, } } - // TODO(abaptiste): Verify that queries do not contain answer records + if (!header_.flags.qr && header_.answers) { + ENVOY_LOG(debug, "Answer records present in query"); + return false; + } + + if (header_.questions > 1) { + ENVOY_LOG(debug, "Multiple [{}] questions in DNS query", header_.questions); + return false; + } + // Verify that we still have available data in the buffer to read answer and query records if (offset > buffer->length()) { ENVOY_LOG(debug, "Buffer read offset[{}] is larget than buffer length [{}].", offset, @@ -204,6 +215,7 @@ bool DnsMessageParser::parseDnsObject(DnsQueryContextPtr& context, ENVOY_LOG(trace, "Parsing [{}/{}] questions", index, header_.questions); auto rec = parseDnsQueryRecord(buffer, &offset); if (rec == nullptr) { + context->counters_.query_parsing_failure.inc(); ENVOY_LOG(debug, "Couldn't parse query record from buffer"); return false; } @@ -360,7 +372,7 @@ DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::Instance *offset = data_offset; return std::make_unique(record_name, record_type, record_class, - std::chrono::seconds{ttl}, std::move(ip_addr)); + std::chrono::seconds(ttl), std::move(ip_addr)); } DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePtr& buffer, @@ -402,7 +414,11 @@ DnsQueryRecordPtr DnsMessageParser::parseDnsQueryRecord(const Buffer::InstancePt return nullptr; } - // stop reading he buffer here since we aren't parsing additional records + auto rec = std::make_unique(record_name, record_type, record_class); + rec->query_time_ms_ = std::make_unique( + query_latency_histogram_, timesource_); + + // stop reading the buffer here since we aren't parsing additional records ENVOY_LOG(trace, "Extracted query record. Name: {} type: {} class: {}", record_name, record_type, record_class); @@ -554,9 +570,8 @@ void DnsMessageParser::buildResponseBuffer(DnsQueryContextPtr& query_context, // names, we should not end up with a non-conforming name here. // // See Section 2.3.4 of https://tools.ietf.org/html/rfc1035 - - // TODO(abaptiste): add stats for record overflow if (query->name_.size() > MAX_DNS_NAME_SIZE) { + query_context->counters_.record_name_overflow.inc(); ENVOY_LOG( debug, "Query name '{}' is longer than the maximum permitted length. Skipping serialization", diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h index b64962019b49..d06e6bb80afa 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.h +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -3,10 +3,12 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/platform.h" #include "envoy/network/address.h" +#include "envoy/network/dns.h" #include "envoy/network/listener.h" #include "common/buffer/buffer_impl.h" #include "common/runtime/runtime_impl.h" +#include "common/stats/timespan_impl.h" namespace Envoy { namespace Extensions { @@ -19,7 +21,6 @@ constexpr uint16_t DNS_RECORD_TYPE_AAAA = 28; constexpr uint16_t DNS_RESPONSE_CODE_NO_ERROR = 0; constexpr uint16_t DNS_RESPONSE_CODE_FORMAT_ERROR = 1; -constexpr uint16_t DNS_RESPONSE_CODE_SERVER_FAILURE = 2; constexpr uint16_t DNS_RESPONSE_CODE_NAME_ERROR = 3; constexpr uint16_t DNS_RESPONSE_CODE_NOT_IMPLEMENTED = 4; @@ -48,12 +49,13 @@ class DnsQueryRecord : public BaseDnsRecord { DnsQueryRecord(const std::string& rec_name, const uint16_t rec_type, const uint16_t rec_class) : BaseDnsRecord(rec_name, rec_type, rec_class) {} bool serialize(Buffer::OwnedImpl& output) override; + + std::unique_ptr query_time_ms_; }; using DnsQueryRecordPtr = std::unique_ptr; using DnsQueryPtrVec = std::vector; using AddressConstPtrVec = std::vector; -using AnswerCallback = std::function; /** * DnsAnswerRecord represents a single answer record for a name that is to be serialized and sent to @@ -74,26 +76,47 @@ class DnsAnswerRecord : public BaseDnsRecord { using DnsAnswerRecordPtr = std::unique_ptr; using DnsAnswerMap = std::unordered_multimap; +/** + * @brief This struct is used to hold pointers to the counters that are relevant to the + * parser. This is done to prevent dependency loops between the parser and filter headers + */ +struct DnsParserCounters { + Stats::Counter& underflow_counter; + Stats::Counter& record_name_overflow; + Stats::Counter& query_parsing_failure; + + DnsParserCounters(Stats::Counter& underflow, Stats::Counter& record_name, + Stats::Counter& query_parsing) + : underflow_counter(underflow), record_name_overflow(record_name), + query_parsing_failure(query_parsing) {} +}; + /** * DnsQueryContext contains all the data necessary for responding to a query from a given client. */ class DnsQueryContext { public: DnsQueryContext(Network::Address::InstanceConstSharedPtr local, - Network::Address::InstanceConstSharedPtr peer) - : local_(std::move(local)), peer_(std::move(peer)), parse_status_(false), - response_code_(DNS_RESPONSE_CODE_NO_ERROR) {} + Network::Address::InstanceConstSharedPtr peer, DnsParserCounters& counters, + uint64_t retry_count) + : local_(std::move(local)), peer_(std::move(peer)), counters_(counters), parse_status_(false), + response_code_(DNS_RESPONSE_CODE_NO_ERROR), retry_(retry_count) {} const Network::Address::InstanceConstSharedPtr local_; const Network::Address::InstanceConstSharedPtr peer_; + DnsParserCounters& counters_; bool parse_status_; uint16_t response_code_; + uint64_t retry_; uint16_t id_; + Network::DnsResolver::ResolutionStatus resolution_status_; DnsQueryPtrVec queries_; DnsAnswerMap answers_; }; using DnsQueryContextPtr = std::unique_ptr; +using DnsFilterResolverCallback = std::function; /** * This class orchestrates parsing a DNS query and building the response to be sent to a client. @@ -139,8 +162,10 @@ class DnsMessageParser : public Logger::Loggable { uint16_t additional_rrs; }); - DnsMessageParser(bool recurse, uint64_t retry_count, Runtime::RandomGenerator& random) - : recursion_available_(recurse), retry_count_(retry_count), rng_(random) {} + DnsMessageParser(bool recurse, TimeSource& timesource, uint64_t retry_count, + Runtime::RandomGenerator& random, Stats::Histogram& latency_histogram) + : recursion_available_(recurse), timesource_(timesource), retry_count_(retry_count), + query_latency_histogram_(latency_histogram), rng_(random) {} /** * @brief Builds an Answer record for the active query. The active query transaction ID is at the @@ -193,23 +218,14 @@ class DnsMessageParser : public Logger::Loggable { */ uint16_t getQueryResponseCode() { return static_cast(header_.flags.rcode); } - /** - * @return uint16_t the number of answer records in the parsed dns object - */ - uint16_t getAnswers() { return header_.answers; } - - /** - * @return uint16_t the response code flag value from a generated dns object - */ - uint16_t getAnswerResponseCode() { return static_cast(response_header_.flags.rcode); } - /** * @brief Parse the incoming query and create a context object for the filter * * @param client_request a structure containing addressing information and the buffer received * from a client */ - DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request); + DnsQueryContextPtr createQueryContext(Network::UdpRecvData& client_request, + DnsParserCounters& counters); /** * @param buffer a reference to the incoming request object received by the listener * @return bool true if all DNS records and flags were successfully parsed from the buffer @@ -249,7 +265,9 @@ class DnsMessageParser : public Logger::Loggable { uint64_t* name_offset); bool recursion_available_; + TimeSource& timesource_; uint64_t retry_count_; + Stats::Histogram& query_latency_histogram_; DnsHeader header_; DnsHeader response_header_; Runtime::RandomGenerator& rng_; diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc index 964bb0d0eea9..334147bf3b01 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc @@ -17,46 +17,52 @@ namespace UdpFilters { namespace DnsFilter { namespace { -const std::string generateQuery(FuzzedDataProvider* data_provider) { - size_t query_size = data_provider->ConsumeIntegralInRange(0, 512); - return data_provider->ConsumeRandomLengthString(query_size); -} - DEFINE_FUZZER(const uint8_t* buf, size_t len) { - FuzzedDataProvider data_provider(buf, len); - const bool recurse = data_provider.ConsumeBool(); - const uint16_t retry_count = data_provider.ConsumeIntegralInRange(0, 3); + static const auto local = Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353"); + static const auto peer = Network::Utility::parseInternetAddressAndPort("127.0.2.1:55088"); static NiceMock random; - DnsMessageParser message_parser(recurse, retry_count, random); - - const auto local = Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353"); - const auto peer = Network::Utility::parseInternetAddressAndPort("127.0.2.1:55088"); + static NiceMock histogram; + histogram.unit_ = Stats::Histogram::Unit::Milliseconds; + static Api::ApiPtr api = Api::createApiForTest(); + static NiceMock mock_query_buffer_underflow; + static NiceMock mock_record_name_overflow; + static NiceMock query_parsing_failure; + static DnsParserCounters counters(mock_query_buffer_underflow, mock_record_name_overflow, + query_parsing_failure); + FuzzedDataProvider data_provider(buf, len); Buffer::InstancePtr query_buffer = std::make_unique(); - const std::string query = generateQuery(&data_provider); - query_buffer->add(query.data(), query.size()); - const uint8_t fuzz_function = data_provider.ConsumeIntegralInRange(0, 2); - switch (fuzz_function) { - case 0: { - DnsQueryContextPtr query_context = std::make_unique(local, peer); - bool result = message_parser.parseDnsObject(query_context, query_buffer); - UNREFERENCED_PARAMETER(result); - } break; + while (data_provider.remaining_bytes()) { + const std::string query = data_provider.ConsumeRandomLengthString(1024); + query_buffer->add(query.data(), query.size()); + + const uint16_t retry_count = data_provider.ConsumeIntegralInRange(0, 3); + DnsMessageParser message_parser(true, api->timeSource(), retry_count, random, histogram); + uint64_t offset = data_provider.ConsumeIntegralInRange(0, query.size()); + + const uint8_t fuzz_function = data_provider.ConsumeIntegralInRange(0, 2); + switch (fuzz_function) { + case 0: { + DnsQueryContextPtr query_context = + std::make_unique(local, peer, counters, retry_count); + bool result = message_parser.parseDnsObject(query_context, query_buffer); + UNREFERENCED_PARAMETER(result); + } break; - case 1: { - uint64_t offset = data_provider.ConsumeIntegralInRange(0, query_buffer->length()); - DnsQueryRecordPtr ptr = message_parser.parseDnsQueryRecord(query_buffer, &offset); - UNREFERENCED_PARAMETER(ptr); - } break; + case 1: { + DnsQueryRecordPtr ptr = message_parser.parseDnsQueryRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; - case 2: { - uint64_t offset = data_provider.ConsumeIntegralInRange(0, query_buffer->length()); - DnsAnswerRecordPtr ptr = message_parser.parseDnsAnswerRecord(query_buffer, &offset); - UNREFERENCED_PARAMETER(ptr); - } break; - } // end case + case 2: { + DnsAnswerRecordPtr ptr = message_parser.parseDnsAnswerRecord(query_buffer, &offset); + UNREFERENCED_PARAMETER(ptr); + } break; + } // end case + query_buffer->drain(query_buffer->length()); + } } } // namespace } // namespace DnsFilter diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc index cf5f407d5eaa..46d1e8ff070f 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -16,41 +16,130 @@ namespace { class DnsFilterIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: - DnsFilterIntegrationTest() : BaseIntegrationTest(GetParam(), configToUse()) { + DnsFilterIntegrationTest() + : BaseIntegrationTest(GetParam(), configToUse()), api_(Api::createApiForTest()), + counters_(mock_query_buffer_underflow_, mock_record_name_overflow_, + query_parsing_failure_) { setupResponseParser(); } void setupResponseParser() { - response_parser_ = std::make_unique(true /*recursive queries */, - 0 /* retry_count */, random_); + histogram_.unit_ = Stats::Histogram::Unit::Milliseconds; + response_parser_ = std::make_unique( + true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_); } static std::string configToUse() { - return absl::StrCat(ConfigHelper::baseUdpListenerConfig(), R"EOF( - listener_filters: - name: "envoy.filters.udp.dns_filter" - typed_config: - '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' - stat_prefix: "my_prefix" - server_config: - inline_dns_table: - external_retry_count: 3 - known_suffixes: - - suffix: "foo1.com" - - suffix: "cluster_0" - virtual_domains: - - name: "www.foo1.com" - endpoint: - address_list: - address: - - 10.0.0.1 - - 10.0.0.2 - - 10.0.0.3 - - 10.0.0.4 - - name: "cluster.foo1.com" - endpoint: - cluster_name: "cluster_0" - )EOF"); + return fmt::format(R"EOF( +admin: + access_log_path: {} + address: + socket_address: + address: 127.0.0.1 + port_value: 0 +static_resources: + clusters: + name: cluster_0 + load_assignment: + cluster_name: cluster_0 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + )EOF", + TestEnvironment::nullDevicePath()); + } + + Network::Address::InstanceConstSharedPtr getListenerBindAddressAndPort() { + auto addr = Network::Utility::parseInternetAddressAndPort( + fmt::format("{}:{}", Envoy::Network::Test::getLoopbackAddressUrlString(version_), 0), + false); + + ASSERT(addr != nullptr); + + addr = Network::Test::findOrCheckFreePort(addr, Network::Socket::Type::Datagram); + ASSERT(addr != nullptr && addr->ip() != nullptr); + + return addr; + } + + envoy::config::listener::v3::Listener + getListener0(Network::Address::InstanceConstSharedPtr& addr) { + auto config = fmt::format(R"EOF( +name: listener_0 +address: + socket_address: + address: {} + port_value: 0 + protocol: udp +listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "my_prefix" + client_config: + resolver_timeout: 1s + upstream_resolvers: + - socket_address: + address: {} + port_value: {} + max_pending_lookups: 256 + server_config: + inline_dns_table: + external_retry_count: 0 + known_suffixes: + - suffix: "foo1.com" + - suffix: "cluster_0" + virtual_domains: + - name: "www.foo1.com" + endpoint: + address_list: + address: + - 10.0.0.1 + - 10.0.0.2 + - 10.0.0.3 + - 10.0.0.4 + - name: "cluster.foo1.com" + endpoint: + cluster_name: "cluster_0" +)EOF", + addr->ip()->addressAsString(), addr->ip()->addressAsString(), + addr->ip()->port()); + return TestUtility::parseYaml(config); + } + + envoy::config::listener::v3::Listener + getListener1(Network::Address::InstanceConstSharedPtr& addr) { + auto config = fmt::format(R"EOF( +name: listener_1 +address: + socket_address: + address: {} + port_value: {} + protocol: udp +listener_filters: + name: "envoy.filters.udp.dns_filter" + typed_config: + '@type': 'type.googleapis.com/envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig' + stat_prefix: "external_resolver" + server_config: + inline_dns_table: + external_retry_count: 0 + known_suffixes: + - suffix: "google.com" + virtual_domains: + - name: "www.google.com" + endpoint: + address_list: + address: + - 42.42.42.42 + - 2607:42:42::42:42 +)EOF", + addr->ip()->addressAsString(), addr->ip()->port()); + return TestUtility::parseYaml(config); } void setup(uint32_t upstream_count) { @@ -72,6 +161,15 @@ class DnsFilterIntegrationTest : public testing::TestWithParamadd_listeners()->MergeFrom(listener_0); + bootstrap.mutable_static_resources()->add_listeners()->MergeFrom(listener_1); + }); + BaseIntegrationTest::initialize(); } @@ -83,7 +181,13 @@ class DnsFilterIntegrationTest : public testing::TestWithParam histogram_; NiceMock random_; + NiceMock mock_query_buffer_underflow_; + NiceMock mock_record_name_overflow_; + NiceMock query_parsing_failure_; + DnsParserCounters counters_; std::unique_ptr response_parser_; DnsQueryContextPtr query_ctx_; }; @@ -92,6 +196,42 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, DnsFilterIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +TEST_P(DnsFilterIntegrationTest, ExternalLookupTest) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.google.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(1, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + +TEST_P(DnsFilterIntegrationTest, ExternalLookupTestIPv6) { + setup(0); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + + Network::UdpRecvData response; + std::string query = + Utils::buildQueryForDomain("www.google.com", DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + requestResponseWithListenerAddress(*listener_address, query, response); + + query_ctx_ = response_parser_->createQueryContext(response, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(1, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); +} + TEST_P(DnsFilterIntegrationTest, LocalLookupTest) { setup(0); const uint32_t port = lookupPort("listener_0"); @@ -103,7 +243,7 @@ TEST_P(DnsFilterIntegrationTest, LocalLookupTest) { Utils::buildQueryForDomain("www.foo1.com", DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); requestResponseWithListenerAddress(*listener_address, query, response); - query_ctx_ = response_parser_->createQueryContext(response); + query_ctx_ = response_parser_->createQueryContext(response, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(4, query_ctx_->answers_.size()); @@ -127,7 +267,7 @@ TEST_P(DnsFilterIntegrationTest, ClusterLookupTest) { std::string query = Utils::buildQueryForDomain("cluster_0", record_type, DNS_RECORD_CLASS_IN); requestResponseWithListenerAddress(*listener_address, query, response); - query_ctx_ = response_parser_->createQueryContext(response); + query_ctx_ = response_parser_->createQueryContext(response, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(2, query_ctx_->answers_.size()); @@ -152,7 +292,7 @@ TEST_P(DnsFilterIntegrationTest, ClusterEndpointLookupTest) { Utils::buildQueryForDomain("cluster.foo1.com", record_type, DNS_RECORD_CLASS_IN); requestResponseWithListenerAddress(*listener_address, query, response); - query_ctx_ = response_parser_->createQueryContext(response); + query_ctx_ = response_parser_->createQueryContext(response, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(2, query_ctx_->answers_.size()); diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 942a6d88fea3..d0d1b151be7d 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -12,10 +12,13 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::AnyNumber; using testing::AtLeast; using testing::InSequence; +using testing::Mock; using testing::Return; using testing::ReturnRef; +using testing::SaveArg; namespace Envoy { namespace Extensions { @@ -29,18 +32,18 @@ Api::IoCallUint64Result makeNoError(uint64_t rc) { return no_error; } -class DnsFilterTest : public testing::Test { +class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime { public: DnsFilterTest() : listener_address_(Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353")), - api_(Api::createApiForTest()) { - - response_parser_ = - std::make_unique(true /* recursive queries */, 0 /* retries */, random_); + api_(Api::createApiForTest()), + counters_(mock_query_buffer_underflow_, mock_record_name_overflow_, + query_parsing_failure_) { udp_response_.addresses_.local_ = listener_address_; udp_response_.addresses_.peer_ = listener_address_; udp_response_.buffer_ = std::make_unique(); + setupResponseParser(); EXPECT_CALL(callbacks_, udpListener()).Times(AtLeast(0)); EXPECT_CALL(callbacks_.udp_listener_, send(_)) .WillRepeatedly( @@ -53,16 +56,23 @@ class DnsFilterTest : public testing::Test { ~DnsFilterTest() override { EXPECT_CALL(callbacks_.udp_listener_, onDestroy()); } + void setupResponseParser() { + histogram_.unit_ = Stats::Histogram::Unit::Milliseconds; + response_parser_ = std::make_unique( + true /* recursive queries */, api_->timeSource(), 0 /* retries */, random_, histogram_); + } + void setup(const std::string& yaml) { envoy::extensions::filters::udp::dns_filter::v3alpha::DnsFilterConfig config; TestUtility::loadFromYamlAndValidate(yaml, config); auto store = stats_store_.createScope("dns_scope"); - EXPECT_CALL(listener_factory_, scope()).WillOnce(ReturnRef(*store)); - EXPECT_CALL(listener_factory_, dispatcher()).Times(AtLeast(0)); - EXPECT_CALL(listener_factory_, clusterManager()).Times(AtLeast(0)); - EXPECT_CALL(listener_factory_, api()).WillOnce(ReturnRef(*api_)); + ON_CALL(listener_factory_, scope()).WillByDefault(ReturnRef(*store)); + ON_CALL(listener_factory_, api()).WillByDefault(ReturnRef(*api_)); ON_CALL(random_, random()).WillByDefault(Return(3)); - EXPECT_CALL(listener_factory_, random()).WillOnce(ReturnRef(random_)); + ON_CALL(listener_factory_, random()).WillByDefault(ReturnRef(random_)); + + resolver_ = std::make_shared(); + ON_CALL(dispatcher_, createDnsResolver(_, _)).WillByDefault(Return(resolver_)); config_ = std::make_shared(listener_factory_, config); filter_ = std::make_unique(callbacks_, config_); @@ -80,15 +90,20 @@ class DnsFilterTest : public testing::Test { const Network::Address::InstanceConstSharedPtr listener_address_; Api::ApiPtr api_; DnsFilterEnvoyConfigSharedPtr config_; + NiceMock mock_query_buffer_underflow_; + NiceMock mock_record_name_overflow_; + NiceMock query_parsing_failure_; + DnsParserCounters counters_; DnsQueryContextPtr query_ctx_; - Event::MockDispatcher dispatcher_; + NiceMock dispatcher_; Network::MockUdpReadFilterCallbacks callbacks_; Network::UdpRecvData udp_response_; NiceMock file_system_; NiceMock histogram_; NiceMock random_; - Server::Configuration::MockListenerFactoryContext listener_factory_; + NiceMock listener_factory_; Stats::IsolatedStoreImpl stats_store_; + std::shared_ptr resolver_; std::unique_ptr filter_; std::unique_ptr response_parser_; @@ -159,14 +174,21 @@ stat_prefix: "my_prefix" const std::string forward_query_on_config = R"EOF( stat_prefix: "my_prefix" client_config: - resolver_timeout: 5s + resolver_timeout: 1s upstream_resolvers: - - "1.1.1.1" - - "8.8.8.8" - - "8.8.4.4" + - socket_address: + address: "1.1.1.1" + port_value: 53 + - socket_address: + address: "8.8.8.8" + port_value: 53 + - socket_address: + address: "8.8.4.4" + port_value: 53 + max_pending_lookups: 1 server_config: inline_dns_table: - external_retry_count: 3 + external_retry_count: 0 known_suffixes: - suffix: foo1.com - suffix: foo2.com @@ -181,9 +203,12 @@ stat_prefix: "my_prefix" const std::string external_dns_table_config = R"EOF( stat_prefix: "my_prefix" client_config: - resolver_timeout: 5s + resolver_timeout: 1s upstream_resolvers: - - "1.1.1.1" + - socket_address: + address: "1.1.1.1" + port_value: 53 + max_pending_lookups: 256 server_config: external_dns_table: filename: {} @@ -239,11 +264,20 @@ TEST_F(DnsFilterTest, InvalidQuery) { setup(forward_query_off_config); sendQueryFromClient("10.0.0.1:1000", "hello"); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) { @@ -259,7 +293,7 @@ TEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) { sendQueryFromClient("10.0.0.1:1000", query); EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); @@ -268,6 +302,15 @@ TEST_F(DnsFilterTest, MaxQueryAndResponseSizeTest) { // serialized answer puts the buffer over the 512 byte limit. The query itself is also // around 100 bytes. EXPECT_EQ(3, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + + // Although there are only 3 answers returned, the filter did find 8 records for the query + EXPECT_EQ(8, config_->stats().local_aaaa_record_answers_.value()); + EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); } TEST_F(DnsFilterTest, InvalidQueryNameTooLongTest) { @@ -281,11 +324,20 @@ TEST_F(DnsFilterTest, InvalidQueryNameTooLongTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, InvalidLabelNameTooLongTest) { @@ -300,17 +352,27 @@ TEST_F(DnsFilterTest, InvalidLabelNameTooLongTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); + + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, SingleTypeAQuery) { InSequence s; setup(forward_query_off_config); + const std::string domain("www.foo3.com"); const std::string query = Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); @@ -318,7 +380,7 @@ TEST_F(DnsFilterTest, SingleTypeAQuery) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); @@ -330,7 +392,16 @@ TEST_F(DnsFilterTest, SingleTypeAQuery) { // Verify the address returned const std::list expected{"10.0.3.1"}; + Utils::verifyAddress(expected, answer); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(1, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_TRUE(config_->stats().downstream_rx_bytes_.used()); + EXPECT_TRUE(config_->stats().downstream_tx_bytes_.used()); } TEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) { @@ -348,7 +419,7 @@ TEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); @@ -361,6 +432,12 @@ TEST_F(DnsFilterTest, RepeatedTypeAQuerySuccess) { std::list expected{"10.0.3.1"}; Utils::verifyAddress(expected, answer); } + + // Validate stats + EXPECT_EQ(loopCount, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(loopCount, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(loopCount, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(loopCount, config_->stats().a_record_queries_.value()); } TEST_F(DnsFilterTest, LocalTypeAQueryFail) { @@ -372,11 +449,18 @@ TEST_F(DnsFilterTest, LocalTypeAQueryFail) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); - EXPECT_EQ(3, response_parser_->getQueryResponseCode()); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(3, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); } TEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) { @@ -390,7 +474,7 @@ TEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); @@ -401,6 +485,349 @@ TEST_F(DnsFilterTest, LocalTypeAAAAQuerySuccess) { EXPECT_EQ(answer.first, domain); Utils::verifyAddress(expected, answer.second); } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(3, config_->stats().local_aaaa_record_answers_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnSingleAddress) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string expected_address("130.207.244.251"); + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(AnyNumber()); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + std::list expected{expected_address}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionIpv6SingleAddress) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string expected_address("2a04:4e42:d::323"); + const std::string domain("www.foobaz.com"); + + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(1, query_ctx_->answers_.size()); + + std::list expected{expected_address}; + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().external_aaaa_record_answers_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnMultipleAddresses) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::list expected_address{"130.207.244.251", "130.207.244.252", + "130.207.244.253", "130.207.244.254"}; + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({expected_address})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + + EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(expected_address.size(), query_ctx_->answers_.size()); + + EXPECT_LT(udp_response_.buffer_->length(), Utils::MAX_UDP_DNS_SIZE); + + for (const auto& answer : query_ctx_->answers_) { + EXPECT_EQ(answer.first, domain); + Utils::verifyAddress(expected_address, answer.second); + } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(expected_address.size(), config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(0, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionReturnNoAddresses) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + + EXPECT_CALL(*timeout_timer, disableTimer()).Times(1); + + // Execute resolve callback + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionTimeout) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + EXPECT_CALL(*resolver_, resolve(domain, _, _)).WillOnce(Return(&resolver_->active_query_)); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + simTime().advanceTimeWait(std::chrono::milliseconds(1500)); + + // Execute timeout timer callback + timeout_timer->invokeCallback(); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionTimeout2) { + InSequence s; + + auto timeout_timer = new NiceMock(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(1); + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + + const std::string query = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query.empty()); + + // Verify that we are calling the resolver with the expected name + Network::DnsResolver::ResolveCb resolve_cb; + EXPECT_CALL(*resolver_, resolve(domain, _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + + // Send a query to for a name not in our configuration + sendQueryFromClient("10.0.0.1:1000", query); + simTime().advanceTimeWait(std::chrono::milliseconds(1500)); + + // Execute timeout timer callback + timeout_timer->invokeCallback(); + + // Execute resolve callback. This should harmlessly return and not alter + // the response received by the client. Even though we are returning a successful + // response, the client does not get an answer + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"130.207.244.251"})); + + // parse the result + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + EXPECT_EQ(0, query_ctx_->answers_.size()); + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(resolver_.get())); +} + +TEST_F(DnsFilterTest, ExternalResolutionExceedMaxPendingLookups) { + InSequence s; + + const std::string domain("www.foobaz.com"); + setup(forward_query_on_config); + const std::string query1 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query1.empty()); + + const std::string query2 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_AAAA, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query2.empty()); + + const std::string query3 = + Utils::buildQueryForDomain(domain, DNS_RECORD_TYPE_A, DNS_RECORD_CLASS_IN); + ASSERT_FALSE(query3.empty()); + + // Send the first query. This will remain 'in-flight' + EXPECT_CALL(dispatcher_, createTimer_(_)); + EXPECT_CALL(*resolver_, resolve(domain, _, _)); + sendQueryFromClient("10.0.0.1:1000", query1); + + // Send the second query. This will remain 'in-flight' also + EXPECT_CALL(dispatcher_, createTimer_(_)); + EXPECT_CALL(*resolver_, resolve(domain, _, _)); + sendQueryFromClient("10.0.0.1:1000", query2); + + // The third query should be rejected since pending queries (2) > 1, and + // we've disabled retries. The client will get a response for this single + // query + sendQueryFromClient("10.0.0.1:1000", query3); + + // Parse the result for the third query. Since the first two queries are + // still in flight, the third query is the only one to generate a response + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_TRUE(query_ctx_->parse_status_); + EXPECT_EQ(0, query_ctx_->answers_.size()); + EXPECT_EQ(DNS_RESPONSE_CODE_NAME_ERROR, response_parser_->getQueryResponseCode()); + + // Validate stats + EXPECT_EQ(3, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().external_a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().external_a_record_answers_.value()); + EXPECT_EQ(2, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().aaaa_record_queries_.value()); + EXPECT_EQ(1, config_->stats().unanswered_queries_.value()); } TEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) { @@ -418,7 +845,7 @@ TEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(2, query_ctx_->answers_.size()); @@ -429,6 +856,12 @@ TEST_F(DnsFilterTest, ConsumeExternalJsonTableTest) { EXPECT_EQ(answer.first, domain); Utils::verifyAddress(expected, answer.second); } + + // Validate stats + ASSERT_EQ(1, config_->stats().downstream_rx_queries_.value()); + ASSERT_EQ(1, config_->stats().known_domain_queries_.value()); + ASSERT_EQ(2, config_->stats().local_a_record_answers_.value()); + ASSERT_EQ(1, config_->stats().a_record_queries_.value()); } TEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) { @@ -446,7 +879,7 @@ TEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(2, query_ctx_->answers_.size()); @@ -457,6 +890,12 @@ TEST_F(DnsFilterTest, ConsumeExternalYamlTableTest) { EXPECT_EQ(answer.first, domain); Utils::verifyAddress(expected, answer.second); } + + // Validate stats + EXPECT_EQ(1, config_->stats().downstream_rx_queries_.value()); + EXPECT_EQ(1, config_->stats().known_domain_queries_.value()); + EXPECT_EQ(2, config_->stats().local_a_record_answers_.value()); + EXPECT_EQ(1, config_->stats().a_record_queries_.value()); } TEST_F(DnsFilterTest, RawBufferTest) { @@ -483,7 +922,7 @@ TEST_F(DnsFilterTest, RawBufferTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); EXPECT_EQ(1, query_ctx_->answers_.size()); @@ -500,6 +939,7 @@ TEST_F(DnsFilterTest, InvalidQueryNameTest) { InSequence s; setup(forward_query_off_config); + // In this buffer the name segment sizes are incorrect. The filter will indicate that the parsing // failed constexpr char dns_request[] = { @@ -520,9 +960,12 @@ TEST_F(DnsFilterTest, InvalidQueryNameTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + // TODO(abaptiste): underflow stats + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); } TEST_F(DnsFilterTest, InvalidQueryNameTest2) { @@ -549,9 +992,12 @@ TEST_F(DnsFilterTest, InvalidQueryNameTest2) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + // TODO(abaptiste): underflow/overflow stats + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); } TEST_F(DnsFilterTest, MultipleQueryCountTest) { @@ -559,7 +1005,9 @@ TEST_F(DnsFilterTest, MultipleQueryCountTest) { setup(forward_query_off_config); // In this buffer we have 2 queries for two different domains. This is a rare case - // and serves to validate that we handle the protocol correctly. + // and serves to validate that we handle the protocol correctly. We will return an + // error to the client since most implementations will send the two questions as two + // separate DNS queries constexpr char dns_request[] = { 0x36, 0x6d, // Transaction ID 0x01, 0x20, // Flags @@ -582,55 +1030,18 @@ TEST_F(DnsFilterTest, MultipleQueryCountTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); - EXPECT_TRUE(query_ctx_->parse_status_); - EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); - EXPECT_EQ(3, query_ctx_->answers_.size()); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); + EXPECT_FALSE(query_ctx_->parse_status_); + EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); - // Verify that the answers contain an entry for each domain - for (const auto& answer : query_ctx_->answers_) { - if (answer.first == "www.foo1.com") { - Utils::verifyAddress({"10.0.0.1", "10.0.0.2"}, answer.second); - } else if (answer.first == "www.foo3.com") { - Utils::verifyAddress({"10.0.3.1"}, answer.second); - } else { - FAIL() << "Unexpected domain in DNS response: " << answer.first; - } - } + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, InvalidQueryCountTest) { InSequence s; - setup(forward_query_off_config); - // In this buffer the Questions count is incorrect. We will abort parsing and return a response - // to the client. - constexpr char dns_request[] = { - 0x36, 0x6e, // Transaction ID - 0x01, 0x20, // Flags - 0x00, 0x0a, // Questions - 0x00, 0x00, // Answers - 0x00, 0x00, // Authority RRs - 0x00, 0x00, // Additional RRs - 0x03, 0x77, 0x77, 0x77, 0x04, 0x66, 0x6f, // Query record for - 0x6f, 0x33, 0x03, 0x63, 0x6f, 0x6d, 0x00, // www.foo3.com - 0x00, 0x01, // Query Type - A - 0x00, 0x01, // Query Class - IN - }; - - constexpr size_t count = sizeof(dns_request) / sizeof(dns_request[0]); - const std::string query = Utils::buildQueryFromBytes(dns_request, count); - - sendQueryFromClient("10.0.0.1:1000", query); - - query_ctx_ = response_parser_->createQueryContext(udp_response_); - EXPECT_TRUE(query_ctx_->parse_status_); - EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); -} - -TEST_F(DnsFilterTest, InvalidQueryCountTest2) { - InSequence s; - setup(forward_query_off_config); // In this buffer the Questions count is zero. This is an invalid query and is handled as such. constexpr char dns_request[] = { @@ -651,16 +1062,21 @@ TEST_F(DnsFilterTest, InvalidQueryCountTest2) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); + EXPECT_EQ(0, query_ctx_->answers_.size()); } TEST_F(DnsFilterTest, NotImplementedQueryTest) { InSequence s; setup(forward_query_off_config); - // In this buffer the Questions count is zero. This is an invalid query and is handled as such. + // This buffer requests a CNAME record which we do not support. We respond to the client with a + // "not implemented" response code constexpr char dns_request[] = { 0x36, 0x70, // Transaction ID 0x01, 0x20, // Flags @@ -679,9 +1095,12 @@ TEST_F(DnsFilterTest, NotImplementedQueryTest) { sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NOT_IMPLEMENTED, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(0, config_->stats().downstream_rx_invalid_queries_.value()); } TEST_F(DnsFilterTest, InvalidShortBufferTest) { @@ -693,9 +1112,12 @@ TEST_F(DnsFilterTest, InvalidShortBufferTest) { const std::string query = Utils::buildQueryFromBytes(dns_request, 1); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_FALSE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_FORMAT_ERROR, response_parser_->getQueryResponseCode()); + + EXPECT_EQ(0, config_->stats().a_record_queries_.value()); + EXPECT_EQ(1, config_->stats().downstream_rx_invalid_queries_.value()); } TEST_F(DnsFilterTest, RandomizeFirstAnswerTest) { @@ -709,7 +1131,7 @@ TEST_F(DnsFilterTest, RandomizeFirstAnswerTest) { ASSERT_FALSE(query.empty()); sendQueryFromClient("10.0.0.1:1000", query); - query_ctx_ = response_parser_->createQueryContext(udp_response_); + query_ctx_ = response_parser_->createQueryContext(udp_response_, counters_); EXPECT_TRUE(query_ctx_->parse_status_); EXPECT_EQ(DNS_RESPONSE_CODE_NO_ERROR, response_parser_->getQueryResponseCode()); @@ -728,6 +1150,7 @@ TEST_F(DnsFilterTest, RandomizeFirstAnswerTest) { EXPECT_NE(0L, resolved_address.compare(*defined_answer_iter++)); } } + } // namespace } // namespace DnsFilter } // namespace UdpFilters From 8c0e2779ca40f8041e56339a9c4448e5e12171b8 Mon Sep 17 00:00:00 2001 From: antonio Date: Sun, 21 Jun 2020 23:55:03 -0400 Subject: [PATCH 399/909] [dispatcher] Tests to capture low-level timer behavior (#11564) Signed-off-by: Antonio Vicente --- test/common/event/dispatcher_impl_test.cc | 269 ++++++++++++++++++++++ 1 file changed, 269 insertions(+) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index f6db84f844bf..f15107f67307 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -209,6 +209,7 @@ TEST_F(DispatcherImplTest, RunPostCallbacksLocking) { } TEST_F(DispatcherImplTest, Timer) { + timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(0)); }); timerTest([](Timer& timer) { timer.enableTimer(std::chrono::milliseconds(50)); }); timerTest([](Timer& timer) { timer.enableHRTimer(std::chrono::microseconds(50)); }); } @@ -339,6 +340,274 @@ TEST(TimerImplTest, TimerEnabledDisabled) { EXPECT_FALSE(timer->enabled()); } +// Timers scheduled at different times execute in order. +TEST(TimerImplTest, TimerOrdering) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + + // Sleep for 5ms so timers above all trigger in the same loop iteration. + absl::SleepFor(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + + // Expect watcher calls to happen in order since timers have different times. + InSequence s; + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher->run(Dispatcher::RunType::Block); +} + +// Alarms that are scheduled to execute and are cancelled do not trigger. +TEST(TimerImplTest, TimerOrderAndDisableAlarm) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher->createTimer([&] { + timer2->disableTimer(); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + + // Sleep for 5ms so timers above all trigger in the same loop iteration. + absl::SleepFor(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + + // Expect watcher calls to happen in order since timers have different times. + InSequence s; + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher->run(Dispatcher::RunType::Block); +} + +// Change the registration time for a timer that is already activated by disabling and re-enabling +// the timer. Verify that execution is delayed. +TEST(TimerImplTest, TimerOrderDisableAndReschedule) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher->createTimer([&] { watcher4.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher->createTimer([&] { + timer2->disableTimer(); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer3->disableTimer(); + timer3->enableTimer(std::chrono::milliseconds(1)); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer4->enableTimer(std::chrono::milliseconds(3)); + + // Sleep for 5ms so timers above all trigger in the same loop iteration. + absl::SleepFor(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + + // timer1 is expected to run first and reschedule timers 2 and 3. timer4 should fire before + // timer2 and timer3 since timer4's registration is unaffected. + InSequence s; + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher4, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher->run(Dispatcher::RunType::Block); +} + +// Change the registration time for a timer that is already activated by re-enabling the timer +// without calling disableTimer first. +TEST(TimerImplTest, TimerOrderAndReschedule) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher->createTimer([&] { watcher4.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher->createTimer([&] { + timer2->enableTimer(std::chrono::milliseconds(0)); + timer3->enableTimer(std::chrono::milliseconds(1)); + watcher1.ready(); + }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer4->enableTimer(std::chrono::milliseconds(3)); + + // Sleep for 5ms so timers above all trigger in the same loop iteration. + absl::SleepFor(absl::Milliseconds(5)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + + // Rescheduling timers that are already scheduled to run in the current event loop iteration has + // no effect if the time delta is 0. Expect timers 1, 2 and 4 to execute in the original order. + // Timer 3 is delayed since it is rescheduled with a non-zero delta. + InSequence s; + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher4, ready()); + EXPECT_CALL(watcher3, ready()); + dispatcher->run(Dispatcher::RunType::Block); +} + +TEST(TimerImplTest, TimerChaining) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher->createTimer([&] { + watcher2.ready(); + timer1->enableTimer(std::chrono::milliseconds(0)); + }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher->createTimer([&] { + watcher3.ready(); + timer2->enableTimer(std::chrono::milliseconds(0)); + }); + + ReadyWatcher watcher4; + Event::TimerPtr timer4 = dispatcher->createTimer([&] { + watcher4.ready(); + timer3->enableTimer(std::chrono::milliseconds(0)); + }); + + timer4->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_FALSE(timer1->enabled()); + EXPECT_FALSE(timer2->enabled()); + EXPECT_FALSE(timer3->enabled()); + EXPECT_TRUE(timer4->enabled()); + EXPECT_CALL(watcher4, ready()); + EXPECT_CALL(watcher3, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher1, ready()); + dispatcher->run(Dispatcher::RunType::NonBlock); + + EXPECT_FALSE(timer1->enabled()); + EXPECT_FALSE(timer2->enabled()); + EXPECT_FALSE(timer3->enabled()); + EXPECT_FALSE(timer4->enabled()); +} + +TEST(TimerImplTest, TimerChainDisable) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher; + Event::TimerPtr timer1; + Event::TimerPtr timer2; + Event::TimerPtr timer3; + + auto timer_cb = [&] { + watcher.ready(); + timer1->disableTimer(); + timer2->disableTimer(); + timer3->disableTimer(); + }; + + timer1 = dispatcher->createTimer(timer_cb); + timer2 = dispatcher->createTimer(timer_cb); + timer3 = dispatcher->createTimer(timer_cb); + + timer3->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + // Only 1 call to watcher ready since the other 2 timers were disabled by the first timer. + EXPECT_CALL(watcher, ready()); + dispatcher->run(Dispatcher::RunType::NonBlock); +} + +TEST(TimerImplTest, TimerChainDelete) { + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + + ReadyWatcher watcher; + Event::TimerPtr timer1; + Event::TimerPtr timer2; + Event::TimerPtr timer3; + + auto timer_cb = [&] { + watcher.ready(); + timer1.reset(); + timer2.reset(); + timer3.reset(); + }; + + timer1 = dispatcher->createTimer(timer_cb); + timer2 = dispatcher->createTimer(timer_cb); + timer3 = dispatcher->createTimer(timer_cb); + + timer3->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(0)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + EXPECT_TRUE(timer1->enabled()); + EXPECT_TRUE(timer2->enabled()); + EXPECT_TRUE(timer3->enabled()); + // Only 1 call to watcher ready since the other 2 timers were deleted by the first timer. + EXPECT_CALL(watcher, ready()); + dispatcher->run(Dispatcher::RunType::NonBlock); +} + class TimerImplTimingTest : public testing::Test { public: std::chrono::nanoseconds getTimerTiming(Event::SimulatedTimeSystem& time_system, From b8e9a3599541084efe44c6c91651c6308f90e671 Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Mon, 22 Jun 2020 13:57:35 +1000 Subject: [PATCH 400/909] =?UTF-8?q?health=5Fcheck=20filter:=20optimize=20p?= =?UTF-8?q?redicate=20by=20eliminating=20floating=20point=E2=80=A6=20(#115?= =?UTF-8?q?85)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes a TODO in the health check filter by translating the floating point arithmetic to integer arithmetic instead. Signed-off-by: Martin Matusiak --- .../filter/http/health_check/v2/health_check.proto | 5 +++++ .../filters/http/health_check/v3/health_check.proto | 5 +++++ .../http/health_check/v4alpha/health_check.proto | 5 +++++ docs/root/version_history/current.rst | 1 + .../filter/http/health_check/v2/health_check.proto | 5 +++++ .../filters/http/health_check/v3/health_check.proto | 5 +++++ .../http/health_check/v4alpha/health_check.proto | 5 +++++ .../filters/http/health_check/health_check.cc | 10 ++++------ 8 files changed, 35 insertions(+), 6 deletions(-) diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto index d7f6da8c82d4..7f2a486b2618 100644 --- a/api/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -37,6 +37,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto index 1a5dbf1bb900..f3a0c42c388c 100644 --- a/api/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/api/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto index f530363e2380..3725d085dd7b 100644 --- a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto +++ b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ed7596c16e58..cefc991093cc 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -14,6 +14,7 @@ Minor Behavior Changes * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. * build: run as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. +* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto index d7f6da8c82d4..7f2a486b2618 100644 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto +++ b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto @@ -37,6 +37,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto index 1a5dbf1bb900..f3a0c42c388c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto index f530363e2380..3725d085dd7b 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto @@ -38,6 +38,11 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. + // + // .. note:: + // + // This value is interpreted as an integer by truncating, so 12.50% will be calculated + // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index 6f1e3bf9c8a7..801544b69f44 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -134,7 +134,7 @@ void HealthCheckFilter::onComplete() { for (const auto& item : *cluster_min_healthy_percentages_) { details = &RcDetails::get().HealthCheckClusterHealthy; const std::string& cluster_name = item.first; - const double min_healthy_percentage = item.second; + const uint64_t min_healthy_percentage = static_cast(item.second); auto* cluster = clusterManager.get(cluster_name); if (cluster == nullptr) { // If the cluster does not exist at all, consider the service unhealthy. @@ -148,7 +148,7 @@ void HealthCheckFilter::onComplete() { if (membership_total == 0) { // If the cluster exists but is empty, consider the service unhealthy unless // the specified minimum percent healthy for the cluster happens to be zero. - if (min_healthy_percentage == 0.0) { + if (min_healthy_percentage == 0UL) { continue; } else { final_status = Http::Code::ServiceUnavailable; @@ -158,10 +158,8 @@ void HealthCheckFilter::onComplete() { } // In the general case, consider the service unhealthy if fewer than the // specified percentage of the servers in the cluster are available (healthy + degraded). - // TODO(brian-pane) switch to purely integer-based math here, because the - // int-to-float conversions and floating point division are slow. - if ((stats.membership_healthy_.value() + stats.membership_degraded_.value()) < - membership_total * min_healthy_percentage / 100.0) { + if ((100UL * (stats.membership_healthy_.value() + stats.membership_degraded_.value())) < + membership_total * min_healthy_percentage) { final_status = Http::Code::ServiceUnavailable; details = &RcDetails::get().HealthCheckClusterUnhealthy; break; From 7dc71292316f2bf4643f330c8a2b713222d724f9 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Sun, 21 Jun 2020 23:58:17 -0400 Subject: [PATCH 401/909] dependencies: Bump go version (#11626) Signed-off-by: Sunjay Bhatia --- bazel/dependency_imports.bzl | 2 +- ci/verify_examples.sh | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 633457727a6a..1bcc3a8f35e5 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -8,7 +8,7 @@ load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") # go version for rules_go -GO_VERSION = "1.13.5" +GO_VERSION = "1.14.4" def envoy_dependency_imports(go_version = GO_VERSION): rules_foreign_cc_dependencies() diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 4b9273ee052a..61fb380ef2d6 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -23,8 +23,9 @@ cd ../ # Test grpc bridge example # install go -curl -O https://storage.googleapis.com/golang/go1.13.5.linux-amd64.tar.gz -tar -xf go1.13.5.linux-amd64.tar.gz +GO_VERSION="1.14.4" +curl -O https://storage.googleapis.com/golang/go$GO_VERSION.linux-amd64.tar.gz +tar -xf go$GO_VERSION.linux-amd64.tar.gz sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin export GOPATH=$HOME/go From 347ec29ab1834e14b49df4e24618ee07d57cf3c2 Mon Sep 17 00:00:00 2001 From: antonio Date: Mon, 22 Jun 2020 08:54:53 -0400 Subject: [PATCH 402/909] test: Fix flakiness in ProtocolIntegrationTest.RetryStreamingReset (#11668) Commit Message: test: Fix flakiness in ProtocolIntegrationTest.RetryStreamingReset caused by FakeUpstream use-after-free. Additional Description: FakeUpstream crashes due an use after free if the proxy resets the fake stream between the time the 503 headers are sent by the fake upstream and when the fake upstream sends the reset. Executing the header and stream reset serialization in the fake upstream thread guarantees that no fd events are processed between the time the headers and encoded and stream is reset, eliminating the possibility of this test-only crash. Test can be easily reproduced by adding a sleep between upstream_request_->encodeHeaders(...) and upstream_request_->encodeResetStream() Risk Level: n/a test only changes Testing: n/a Docs Changes: n/a Release Notes: n/a Fixes #11652 Signed-off-by: Antonio Vicente --- test/integration/fake_upstream.cc | 4 ++++ test/integration/fake_upstream.h | 5 +++++ test/integration/protocol_integration_test.cc | 9 ++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 5b824a6c872e..4763c9bbfe05 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -73,6 +73,10 @@ void FakeStream::decodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) { } } +void FakeStream::postToConnectionThread(std::function cb) { + parent_.connection().dispatcher().post(cb); +} + void FakeStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) { std::shared_ptr headers_copy( Http::createHeaderMap(headers)); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index d93536f6a925..d31866c89421 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -60,6 +60,11 @@ class FakeStream : public Http::RequestDecoder, Thread::LockGuard lock(lock_); return end_stream_; } + + // Execute a callback using the dispatcher associated with the FakeStream's connection. This + // allows execution of non-interrupted sequences of operations on the fake stream which may run + // into trouble if client-side events are interleaved. + void postToConnectionThread(std::function cb); void encode100ContinueHeaders(const Http::ResponseHeaderMap& headers); void encodeHeaders(const Http::HeaderMap& headers, bool end_stream); void encodeData(uint64_t size, bool end_stream); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 42622f5e73fe..be6d5b5e48e6 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -452,9 +452,12 @@ TEST_P(ProtocolIntegrationTest, RetryStreamingReset) { ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); // Send back an upstream failure and end stream. Make sure an immediate reset - // doesn't cause problems. - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); - upstream_request_->encodeResetStream(); + // doesn't cause problems. Schedule via the upstream_request_ dispatcher to ensure that the stream + // still exists when encoding the reset stream. + upstream_request_->postToConnectionThread([this]() { + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); + upstream_request_->encodeResetStream(); + }); // Make sure the fake stream is reset. if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { From 7e6c71b22150e1d43e695c7bb7993b779b24c3cf Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 22 Jun 2020 09:50:00 -0400 Subject: [PATCH 403/909] conn_pool: minor refactors (#11646) Doing some refactors in preparation for the shared TCP-HTTP connection pool: Moving ActiveClient and PendingRequest out. Also moving several functions in the connection pool class away from referencing the codec client, which will not be present for the TCP variant Risk Level: Low (fairly small connection pool refactor) Testing: n/a Docs Changes: n/a Release Notes: n/a Part of #11528 Signed-off-by: Alyssa Wilk --- source/common/http/codec_client.h | 2 +- source/common/http/conn_pool_base.cc | 51 +++++---- source/common/http/conn_pool_base.h | 155 +++++++++++++------------- source/common/http/http1/conn_pool.cc | 4 +- source/common/http/http1/conn_pool.h | 7 +- source/common/http/http2/conn_pool.cc | 4 +- source/common/http/http2/conn_pool.h | 9 +- 7 files changed, 121 insertions(+), 111 deletions(-) diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 63d16b93ddb9..606e95f18e9d 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -76,7 +76,7 @@ class CodecClient : Logger::Loggable, /** * @return the underlying connection ID. */ - uint64_t id() { return connection_->id(); } + uint64_t id() const { return connection_->id(); } /** * @return the underlying codec protocol. diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index fb0668458153..75213d61104d 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -105,15 +105,15 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, nullptr); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); } else { - ENVOY_CONN_LOG(debug, "creating stream", *client.codec_client_); + ENVOY_CONN_LOG(debug, "creating stream", client); RequestEncoder& new_encoder = client.newStreamEncoder(response_decoder); client.remaining_requests_--; if (client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", *client.codec_client_); + ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", client); host_->cluster().stats().upstream_cx_max_requests_.inc(); transitionActiveClientState(client, ActiveClient::State::DRAINING); - } else if (client.codec_client_->numActiveRequests() >= client.concurrent_request_limit_) { + } else if (client.numActiveRequests() >= client.concurrent_request_limit_) { transitionActiveClientState(client, ActiveClient::State::BUSY); } @@ -129,20 +129,18 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, } void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", *client.codec_client_, - client.codec_client_->numActiveRequests()); + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); ASSERT(num_active_requests_ > 0); num_active_requests_--; host_->stats().rq_active_.dec(); host_->cluster().stats().upstream_rq_active_.dec(); host_->cluster().resourceManager(priority_).requests().dec(); - if (client.state_ == ActiveClient::State::DRAINING && - client.codec_client_->numActiveRequests() == 0) { + if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { // Close out the draining client if we no longer have active requests. client.codec_client_->close(); } else if (client.state_ == ActiveClient::State::BUSY) { // A request was just ended, so we should be below the limit now. - ASSERT(client.codec_client_->numActiveRequests() < client.concurrent_request_limit_); + ASSERT(client.numActiveRequests() < client.concurrent_request_limit_); transitionActiveClientState(client, ActiveClient::State::READY); if (!delay_attaching_request) { @@ -155,7 +153,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(ResponseDecoder& respon ConnectionPool::Callbacks& callbacks) { if (!ready_clients_.empty()) { ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "using existing connection", *client.codec_client_); + ENVOY_CONN_LOG(debug, "using existing connection", client); attachRequestToClient(client, response_decoder, callbacks); return nullptr; } @@ -180,7 +178,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(ResponseDecoder& respon void ConnPoolImplBase::onUpstreamReady() { while (!pending_requests_.empty() && !ready_clients_.empty()) { ActiveClientPtr& client = ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client->codec_client_); + ENVOY_CONN_LOG(debug, "attaching to next request", *client); // Pending requests are pushed onto the front, so pull from the back. attachRequestToClient(*client, pending_requests_.back()->decoder_, pending_requests_.back()->callbacks_); @@ -192,8 +190,7 @@ bool ConnPoolImplBase::hasActiveConnections() const { return (!pending_requests_.empty() || (num_active_requests_ > 0)); } -std::list& -ConnPoolImplBase::owningList(ActiveClient::State state) { +std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { switch (state) { case ActiveClient::State::CONNECTING: return connecting_clients_; @@ -285,7 +282,7 @@ void ConnPoolImplBase::checkForDrained() { } } -void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, +void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event) { if (client.state_ == ActiveClient::State::CONNECTING) { ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); @@ -295,8 +292,7 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", *client.codec_client_, - client.codec_client_->connectionFailureReason()); + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", client, failure_reason); Envoy::Upstream::reportUpstreamCxDestroy(host_, event); const bool incomplete_request = client.closingWithIncompleteRequest(); @@ -361,19 +357,23 @@ void ConnPoolImplBase::onConnectionEvent(ConnPoolImplBase::ActiveClient& client, } } -ConnPoolImplBase::PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) +PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, + ConnectionPool::Callbacks& callbacks) : parent_(parent), decoder_(decoder), callbacks_(callbacks) { parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); } -ConnPoolImplBase::PendingRequest::~PendingRequest() { +PendingRequest::~PendingRequest() { parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); } +void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { + parent_.onPendingRequestCancel(*this, policy); +} + ConnectionPool::Cancellable* ConnPoolImplBase::newPendingRequest(ResponseDecoder& decoder, ConnectionPool::Callbacks& callbacks) { @@ -433,9 +433,8 @@ uint64_t translateZeroToUnlimited(uint64_t limit) { } } // namespace -ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, - uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit) +ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { @@ -465,9 +464,9 @@ ConnPoolImplBase::ActiveClient::ActiveClient(ConnPoolImplBase& parent, &parent_.host_->cluster().stats().bind_errors_, nullptr}); } -ConnPoolImplBase::ActiveClient::~ActiveClient() { releaseResources(); } +ActiveClient::~ActiveClient() { releaseResources(); } -void ConnPoolImplBase::ActiveClient::releaseResources() { +void ActiveClient::releaseResources() { if (!resources_released_) { resources_released_ = true; @@ -479,12 +478,16 @@ void ConnPoolImplBase::ActiveClient::releaseResources() { } } -void ConnPoolImplBase::ActiveClient::onConnectTimeout() { +void ActiveClient::onConnectTimeout() { ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); timed_out_ = true; close(); } +void ActiveClient::onEvent(Network::ConnectionEvent event) { + parent_.onConnectionEvent(*this, codec_client_->connectionFailureReason(), event); +} + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 8611418d3bb5..3d74f0989182 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -13,6 +13,81 @@ namespace Envoy { namespace Http { +class ConnPoolImplBase; + +// ActiveClient provides a base class for connection pool clients that handles connection timings +// as well as managing the connection timeout. +class ActiveClient : public LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable, + protected Logger::Loggable { +public: + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit); + ~ActiveClient() override; + + void releaseResources(); + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + void onConnectTimeout(); + + // Returns the concurrent request limit, accounting for if the total request limit + // is less than the concurrent request limit. + uint64_t effectiveConcurrentRequestLimit() const { + return std::min(remaining_requests_, concurrent_request_limit_); + } + + void close() { codec_client_->close(); } + uint64_t id() const { return codec_client_->id(); } + virtual bool hasActiveRequests() const PURE; + virtual bool closingWithIncompleteRequest() const PURE; + virtual size_t numActiveRequests() const { return codec_client_->numActiveRequests(); } + virtual RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) PURE; + + enum class State { + CONNECTING, // Connection is not yet established. + READY, // Additional requests may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent request limit. + DRAINING, // No more requests can be dispatched to this connection, and it will be closed + // when all requests complete. + CLOSED // Connection is closed and object is queued for destruction. + }; + + ConnPoolImplBase& parent_; + uint64_t remaining_requests_; + const uint64_t concurrent_request_limit_; + State state_{State::CONNECTING}; + CodecClientPtr codec_client_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + Event::TimerPtr connect_timer_; + bool resources_released_{false}; + bool timed_out_{false}; +}; + +using ActiveClientPtr = std::unique_ptr; + +class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { +public: + PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, + ConnectionPool::Callbacks& callbacks); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; + + ConnPoolImplBase& parent_; + ResponseDecoder& decoder_; + ConnectionPool::Callbacks& callbacks_; +}; + +using PendingRequestPtr = std::unique_ptr; + // Base class that handles request queueing logic shared between connection pool implementations. class ConnPoolImplBase : public ConnectionPool::Instance, protected Logger::Loggable { @@ -39,80 +114,6 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // (due to bottom-up destructor ordering in c++) that access will be invalid. void destructAllConnections(); - // ActiveClient provides a base class for connection pool clients that handles connection timings - // as well as managing the connection timeout. - class ActiveClient : public LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable { - public: - ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit); - ~ActiveClient() override; - - void releaseResources(); - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - parent_.onConnectionEvent(*this, event); - } - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - void onConnectTimeout(); - void close() { codec_client_->close(); } - - // Returns the concurrent request limit, accounting for if the total request limit - // is less than the concurrent request limit. - uint64_t effectiveConcurrentRequestLimit() const { - return std::min(remaining_requests_, concurrent_request_limit_); - } - - virtual bool hasActiveRequests() const PURE; - virtual bool closingWithIncompleteRequest() const PURE; - virtual RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) PURE; - - enum class State { - CONNECTING, // Connection is not yet established. - READY, // Additional requests may be immediately dispatched to this connection. - BUSY, // Connection is at its concurrent request limit. - DRAINING, // No more requests can be dispatched to this connection, and it will be closed - // when all requests complete. - CLOSED // Connection is closed and object is queued for destruction. - }; - - ConnPoolImplBase& parent_; - uint64_t remaining_requests_; - const uint64_t concurrent_request_limit_; - State state_{State::CONNECTING}; - CodecClientPtr codec_client_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; - Event::TimerPtr connect_timer_; - bool resources_released_{false}; - bool timed_out_{false}; - }; - - using ActiveClientPtr = std::unique_ptr; - - struct PendingRequest : LinkedObject, public ConnectionPool::Cancellable { - PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); - ~PendingRequest() override; - - // ConnectionPool::Cancellable - void cancel(Envoy::ConnectionPool::CancelPolicy policy) override { - parent_.onPendingRequestCancel(*this, policy); - } - - ConnPoolImplBase& parent_; - ResponseDecoder& decoder_; - ConnectionPool::Callbacks& callbacks_; - }; - - using PendingRequestPtr = std::unique_ptr; - - // Create a new CodecClient. virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; // Returns a new instance of ActiveClient. @@ -142,7 +143,8 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Changes the state_ of an ActiveClient and moves to the appropriate list. void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); - void onConnectionEvent(ActiveClient& client, Network::ConnectionEvent event); + void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, + Network::ConnectionEvent event); void checkForDrained(); void onUpstreamReady(); void attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, @@ -157,6 +159,9 @@ class ConnPoolImplBase : public ConnectionPool::Instance, const Upstream::ResourcePriority priority_; protected: + friend class ActiveClient; + friend class PendingRequest; + Event::Dispatcher& dispatcher_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 6db02bb8cdf5..c717fb1d39cf 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -35,7 +35,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } @@ -112,7 +112,7 @@ void ConnPoolImpl::StreamWrapper::onDecodeComplete() { } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient( + : Envoy::Http::ActiveClient( parent, parent.host_->cluster().maxRequestsPerConnection(), 1 // HTTP1 always has a concurrent-request-limit of 1 per connection. ) { diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 8211664df592..719671b0772f 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -17,7 +17,7 @@ namespace Http1 { * address. Higher layer code should handle resolving DNS on error and creating a new pool * bound to a different IP address. */ -class ConnPoolImpl : public ConnPoolImplBase { +class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -33,7 +33,7 @@ class ConnPoolImpl : public ConnPoolImplBase { ActiveClientPtr instantiateActiveClient() override; protected: - struct ActiveClient; + class ActiveClient; struct StreamWrapper : public RequestEncoderWrapper, public ResponseDecoderWrapper, @@ -64,7 +64,8 @@ class ConnPoolImpl : public ConnPoolImplBase { using StreamWrapperPtr = std::unique_ptr; - struct ActiveClient : public ConnPoolImplBase::ActiveClient { + class ActiveClient : public Envoy::Http::ActiveClient { + public: ActiveClient(ConnPoolImpl& parent); ConnPoolImpl& parent() { return static_cast(parent_); } diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 7eaa83757342..fdbec353dce0 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -21,7 +21,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ConnPoolImplBase::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } void ConnPoolImpl::onGoAway(ActiveClient& client) { @@ -65,7 +65,7 @@ uint64_t ConnPoolImpl::maxRequestsPerConnection() { } ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) - : ConnPoolImplBase::ActiveClient( + : Envoy::Http::ActiveClient( parent, parent.maxRequestsPerConnection(), parent.host_->cluster().http2Options().max_concurrent_streams().value()) { codec_client_->setCodecClientCallbacks(*this); diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 481f4eb24a97..1c42d71bca3c 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -16,7 +16,7 @@ namespace Http2 { * shifting to a new connection if we reach max streams on the primary. This is a base class * used for both the prod implementation as well as the testing one. */ -class ConnPoolImpl : public ConnPoolImplBase { +class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -32,9 +32,10 @@ class ConnPoolImpl : public ConnPoolImplBase { ActiveClientPtr instantiateActiveClient() override; protected: - struct ActiveClient : public CodecClientCallbacks, - public Http::ConnectionCallbacks, - public ConnPoolImplBase::ActiveClient { + class ActiveClient : public CodecClientCallbacks, + public Http::ConnectionCallbacks, + public Envoy::Http::ActiveClient { + public: ActiveClient(ConnPoolImpl& parent); ~ActiveClient() override = default; From 3e2686c2bda4cc196db8d083d0dcdf6738c578ac Mon Sep 17 00:00:00 2001 From: Jonathan Oddy Date: Mon, 22 Jun 2020 16:20:14 +0100 Subject: [PATCH 404/909] Add option to allow previously disallowed dynamic forward proxy config. (#11685) This adds the option allow_insecure_cluster_options to the dynamic forward proxy's cluster configuration. Enabling this flag allows disabling auto_sni and auto_san_validation in the cluster's UpstreamHttpProtocolOptions, which was previously disallowed. This allows use where e.g. automatic adding of an exact SAN matcher for the original authority is undesirable, while preserving the existing "safe by default" behaviour for most use cases. Signed-off-by: Jonathan Oddy --- .../dynamic_forward_proxy/v3/cluster.proto | 5 +++++ docs/root/version_history/current.rst | 1 + .../dynamic_forward_proxy/v3/cluster.proto | 5 +++++ .../clusters/dynamic_forward_proxy/cluster.cc | 5 +++-- .../dynamic_forward_proxy/cluster_test.cc | 17 +++++++++++++++++ 5 files changed, 31 insertions(+), 2 deletions(-) diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index 6f100d9dbb7e..869e8c42caba 100644 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -27,4 +27,9 @@ message ClusterConfig { // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; + + // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options + // in the :ref:`cluster's upstream_http_protocol_options + // ` + bool allow_insecure_cluster_options = 2; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index cefc991093cc..b18b9af6afed 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -62,6 +62,7 @@ New Features * config: added :ref:`version_text ` stat that reflects xDS version. * decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. * ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. * ext_authz filter: added API version field for both :ref:`HTTP ` and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto index 6f100d9dbb7e..869e8c42caba 100644 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto @@ -27,4 +27,9 @@ message ClusterConfig { // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; + + // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options + // in the :ref:`cluster's upstream_http_protocol_options + // ` + bool allow_insecure_cluster_options = 2; } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index e79e6e019756..f4ad845a8b63 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -201,8 +201,9 @@ ClusterFactory::createClusterWithConfig( context.stats()); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (cluster_config.has_upstream_http_protocol_options()) { - if (!cluster_config.upstream_http_protocol_options().auto_sni() || - !cluster_config.upstream_http_protocol_options().auto_san_validation()) { + if (!proto_config.allow_insecure_cluster_options() && + (!cluster_config.upstream_http_protocol_options().auto_sni() || + !cluster_config.upstream_http_protocol_options().auto_san_validation())) { throw EnvoyException( "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when " "configured with upstream_http_protocol_options"); diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index e44a7a7d18e9..2a90f3ee3878 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -297,6 +297,23 @@ upstream_http_protocol_options: {} "configured with upstream_http_protocol_options"); } +TEST_F(ClusterFactoryTest, InsecureUpstreamHttpProtocolOptions) { + const std::string yaml_config = TestEnvironment::substitute(R"EOF( +name: name +connect_timeout: 0.25s +cluster_type: + name: dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + allow_insecure_cluster_options: true + dns_cache_config: + name: foo +upstream_http_protocol_options: {} +)EOF"); + + createCluster(yaml_config); +} + } // namespace DynamicForwardProxy } // namespace Clusters } // namespace Extensions From 31cf43c588765aee9d53d957c6c0197a8be3d65c Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Mon, 22 Jun 2020 11:44:01 -0400 Subject: [PATCH 405/909] upstream: correctly resolve protocol for aggregate cluster (#11435) Previously there existed an invariant that any host selected would belong to the top level cluster. With the introduction of the aggregate cluster this is no longer true, so whenever we're applying cluster configuration to a selected host, we must take care to use the configuration associated with that host. This passes through the downstream protocol into the cluster manager, allowing us to determine the protocol used based on the actual host selected instead of the top-level cluster. Signed-off-by: Snow Pettersen --- include/envoy/router/router.h | 9 +++-- include/envoy/upstream/cluster_manager.h | 10 +++-- source/common/router/router.cc | 5 +-- source/common/router/upstream_request.h | 11 +++--- .../common/upstream/cluster_manager_impl.cc | 11 ++++-- source/common/upstream/cluster_manager_impl.h | 14 ++++--- source/common/upstream/upstream_impl.cc | 3 +- .../upstreams/http/generic/config.cc | 7 ++-- .../upstreams/http/generic/config.h | 3 +- .../extensions/upstreams/http/http/config.cc | 5 ++- .../extensions/upstreams/http/http/config.h | 3 +- .../extensions/upstreams/http/tcp/config.cc | 5 ++- source/extensions/upstreams/http/tcp/config.h | 3 +- .../config_validation/cluster_manager.cc | 5 +-- .../config_validation/cluster_manager.h | 2 +- test/common/http/async_client_impl_test.cc | 2 +- test/common/router/router_test.cc | 37 +++++++------------ .../aggregate/cluster_integration_test.cc | 8 +++- test/mocks/upstream/mocks.h | 4 +- 19 files changed, 79 insertions(+), 68 deletions(-) diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index b0761122f1c9..ce36ec69ccc8 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -1234,10 +1234,11 @@ class GenericConnPoolFactory : public Envoy::Config::TypedFactory { * @param options for creating the transport socket * @return may be null */ - virtual GenericConnPoolPtr createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, - const RouteEntry& route_entry, - Http::Protocol protocol, - Upstream::LoadBalancerContext* ctx) const PURE; + virtual GenericConnPoolPtr + createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, + const RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const PURE; }; using GenericConnPoolFactoryPtr = std::unique_ptr; diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index e3eea850f000..de2fa32b75f1 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -158,11 +158,13 @@ class ClusterManager { * * Can return nullptr if there is no host available in the cluster or if the cluster does not * exist. + * + * To resolve the protocol to use, we provide the downstream protocol (if one exists). */ - virtual Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string& cluster, - ResourcePriority priority, - Http::Protocol protocol, - LoadBalancerContext* context) PURE; + virtual Http::ConnectionPool::Instance* + httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, + absl::optional downstream_protocol, + LoadBalancerContext* context) PURE; /** * Allocate a load balanced TCP connection pool for a cluster. This is *per-thread* so that diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 64f8450de417..8a373f263530 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -608,9 +608,8 @@ std::unique_ptr Filter::createConnPool() { const bool should_tcp_proxy = route_entry_->connectConfig().has_value() && downstream_headers_->getMethodValue() == Http::Headers::get().MethodValues.Connect; - Http::Protocol protocol = cluster_->upstreamHttpProtocol(callbacks_->streamInfo().protocol()); - return factory->createGenericConnPool(config_.cm_, should_tcp_proxy, *route_entry_, protocol, - this); + return factory->createGenericConnPool(config_.cm_, should_tcp_proxy, *route_entry_, + callbacks_->streamInfo().protocol(), this); } void Filter::sendNoHealthyUpstreamResponse() { diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index c898fa5b495d..93e3eb268644 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -165,10 +165,11 @@ class UpstreamRequest : public Logger::Loggable, class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { public: // GenericConnPool - HttpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol protocol, + HttpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, + absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) { - conn_pool_ = - cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), protocol, ctx); + conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), + downstream_protocol, ctx); } bool valid() const { return conn_pool_ != nullptr; } @@ -194,8 +195,8 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { public: - TcpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, Http::Protocol, - Upstream::LoadBalancerContext* ctx) { + TcpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, + absl::optional, Upstream::LoadBalancerContext* ctx) { conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), Upstream::ResourcePriority::Default, ctx); } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 0a9a21e48da2..423aba6425c8 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -825,7 +825,8 @@ ThreadLocalCluster* ClusterManagerImpl::get(absl::string_view cluster) { Http::ConnectionPool::Instance* ClusterManagerImpl::httpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, - Http::Protocol protocol, LoadBalancerContext* context) { + absl::optional protocol, + LoadBalancerContext* context) { ThreadLocalClusterManagerImpl& cluster_manager = tls_->getTyped(); auto entry = cluster_manager.thread_local_clusters_.find(cluster); @@ -1291,7 +1292,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() Http::ConnectionPool::Instance* ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( - ResourcePriority priority, Http::Protocol protocol, LoadBalancerContext* context) { + ResourcePriority priority, absl::optional downstream_protocol, + LoadBalancerContext* context) { HostConstSharedPtr host = lb_->chooseHost(context); if (!host) { ENVOY_LOG(debug, "no healthy host for HTTP connection pool"); @@ -1299,7 +1301,8 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( return nullptr; } - std::vector hash_key = {uint8_t(protocol)}; + auto upstream_protocol = host->cluster().upstreamHttpProtocol(downstream_protocol); + std::vector hash_key = {uint8_t(upstream_protocol)}; Network::Socket::OptionsSharedPtr upstream_options(std::make_shared()); if (context) { @@ -1330,7 +1333,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( ConnPoolsContainer::ConnPools::PoolOptRef pool = container.pools_->getPool(priority, hash_key, [&]() { return parent_.parent_.factory_.allocateConnPool( - parent_.thread_local_dispatcher_, host, priority, protocol, + parent_.thread_local_dispatcher_, host, priority, upstream_protocol, !upstream_options->empty() ? upstream_options : nullptr, have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr); }); diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index a373eb20aaf2..3998d51edb68 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -223,10 +223,13 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable downstream_protocol, + LoadBalancerContext* context) override; Tcp::ConnectionPool::Instance* tcpConnPoolForCluster(const std::string& cluster, ResourcePriority priority, LoadBalancerContext* context) override; @@ -327,7 +330,8 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable downstream_protocol, LoadBalancerContext* context); Tcp::ConnectionPool::Instance* tcpConnPool(ResourcePriority priority, diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index a11e06f2039e..65321d0b3e1d 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -861,7 +861,8 @@ void ClusterInfoImpl::createNetworkFilterChain(Network::Connection& connection) Http::Protocol ClusterInfoImpl::upstreamHttpProtocol(absl::optional downstream_protocol) const { - if (features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) { + if (downstream_protocol.has_value() && + features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) { return downstream_protocol.value(); } else { return (features_ & Upstream::ClusterInfo::Features::HTTP2) ? Http::Protocol::Http2 diff --git a/source/extensions/upstreams/http/generic/config.cc b/source/extensions/upstreams/http/generic/config.cc index f3057ccb8561..193f14c6e759 100644 --- a/source/extensions/upstreams/http/generic/config.cc +++ b/source/extensions/upstreams/http/generic/config.cc @@ -10,12 +10,13 @@ namespace Generic { Router::GenericConnPoolPtr GenericGenericConnPoolFactory::createGenericConnPool( Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, - Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { if (is_connect) { - auto ret = std::make_unique(cm, route_entry, protocol, ctx); + auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } - auto ret = std::make_unique(cm, route_entry, protocol, ctx); + auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/generic/config.h b/source/extensions/upstreams/http/generic/config.h index 048e9998a403..1c2f1a2f16d3 100644 --- a/source/extensions/upstreams/http/generic/config.h +++ b/source/extensions/upstreams/http/generic/config.h @@ -19,7 +19,8 @@ class GenericGenericConnPoolFactory : public Router::GenericConnPoolFactory { std::string category() const override { return "envoy.upstreams"; } Router::GenericConnPoolPtr createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, - const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const override; ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/extensions/upstreams/http/http/config.cc b/source/extensions/upstreams/http/http/config.cc index a257b7e39b37..5c5915afced5 100644 --- a/source/extensions/upstreams/http/http/config.cc +++ b/source/extensions/upstreams/http/http/config.cc @@ -10,8 +10,9 @@ namespace Http { Router::GenericConnPoolPtr HttpGenericConnPoolFactory::createGenericConnPool( Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, - Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { - auto ret = std::make_unique(cm, route_entry, protocol, ctx); + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/http/config.h b/source/extensions/upstreams/http/http/config.h index 9481f742a78d..4c6036ddf3b5 100644 --- a/source/extensions/upstreams/http/http/config.h +++ b/source/extensions/upstreams/http/http/config.h @@ -19,7 +19,8 @@ class HttpGenericConnPoolFactory : public Router::GenericConnPoolFactory { std::string category() const override { return "envoy.upstreams"; } Router::GenericConnPoolPtr createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, - const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const override; ProtobufTypes::MessagePtr createEmptyConfigProto() override { diff --git a/source/extensions/upstreams/http/tcp/config.cc b/source/extensions/upstreams/http/tcp/config.cc index ffd0412b643e..27e90035a9e4 100644 --- a/source/extensions/upstreams/http/tcp/config.cc +++ b/source/extensions/upstreams/http/tcp/config.cc @@ -10,8 +10,9 @@ namespace Tcp { Router::GenericConnPoolPtr TcpGenericConnPoolFactory::createGenericConnPool( Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, - Envoy::Http::Protocol protocol, Upstream::LoadBalancerContext* ctx) const { - auto ret = std::make_unique(cm, route_entry, protocol, ctx); + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) const { + auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/tcp/config.h b/source/extensions/upstreams/http/tcp/config.h index 8fdaa9c31d03..5ff4df42f5b3 100644 --- a/source/extensions/upstreams/http/tcp/config.h +++ b/source/extensions/upstreams/http/tcp/config.h @@ -19,7 +19,8 @@ class TcpGenericConnPoolFactory : public Router::GenericConnPoolFactory { std::string category() const override { return "envoy.upstreams"; } Router::GenericConnPoolPtr createGenericConnPool(Upstream::ClusterManager& cm, bool is_connect, - const Router::RouteEntry& route_entry, Envoy::Http::Protocol protocol, + const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const override; ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique(); diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 4dbdc73a31ef..7cdbf0b1df4b 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -37,9 +37,8 @@ ValidationClusterManager::ValidationClusterManager( grpc_context), async_client_(api, time_system) {} -Http::ConnectionPool::Instance* -ValidationClusterManager::httpConnPoolForCluster(const std::string&, ResourcePriority, - Http::Protocol, LoadBalancerContext*) { +Http::ConnectionPool::Instance* ValidationClusterManager::httpConnPoolForCluster( + const std::string&, ResourcePriority, absl::optional, LoadBalancerContext*) { return nullptr; } diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index 07ea8f3f8c1c..e2a8157e34e6 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -65,7 +65,7 @@ class ValidationClusterManager : public ClusterManagerImpl { Event::TimeSystem& time_system); Http::ConnectionPool::Instance* httpConnPoolForCluster(const std::string&, ResourcePriority, - Http::Protocol, + absl::optional, LoadBalancerContext*) override; Host::CreateConnectionData tcpConnForCluster(const std::string&, LoadBalancerContext*) override; Http::AsyncClient& httpAsyncClientForCluster(const std::string&) override; diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index dcce9db1d83e..0523bde76810 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -277,7 +277,7 @@ TEST_F(AsyncClientImplTest, BasicHashPolicy) { })); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, auto, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { // this is the hash of :path header value "/" EXPECT_EQ(16761507700594825962UL, context->computeHashKey().value()); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index eaf43fc2be4f..169a8d801d29 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -176,9 +176,9 @@ class RouterTestBase : public testing::Test { } EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) - .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, - Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { + .WillOnce(Invoke( + [&](const std::string&, Upstream::ResourcePriority, absl::optional, + Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { auto match = context->metadataMatchCriteria()->metadataMatchCriteria(); EXPECT_EQ(match.size(), 2); auto it = match.begin(); @@ -509,10 +509,7 @@ TEST_F(RouterTest, PoolFailureWithPriority) { } TEST_F(RouterTest, Http1Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http11)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http11, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -535,10 +532,7 @@ TEST_F(RouterTest, Http1Upstream) { // x-envoy-original-path in the basic upstream test when Envoy header // suppression is configured. TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http11)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http11, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -557,10 +551,7 @@ TEST_F(RouterTestSuppressEnvoyHeaders, Http1Upstream) { } TEST_F(RouterTest, Http2Upstream) { - EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocol(_)) - .WillOnce(Return(Http::Protocol::Http2)); - - EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, Http::Protocol::Http2, _)); + EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, absl::optional(), _)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)).WillOnce(Return(&cancellable_)); expectResponseTimerCreate(); @@ -584,7 +575,7 @@ TEST_F(RouterTest, HashPolicy) { .WillOnce(Return(absl::optional(10))); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -611,7 +602,7 @@ TEST_F(RouterTest, HashPolicyNoHash) { .WillOnce(Return(absl::optional())); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, &router_)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_FALSE(context->computeHashKey()); return &cm_.conn_pool_; @@ -653,7 +644,7 @@ TEST_F(RouterTest, AddCookie) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -705,7 +696,7 @@ TEST_F(RouterTest, AddCookieNoDuplicate) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -755,7 +746,7 @@ TEST_F(RouterTest, AddMultipleCookies) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(10UL, context->computeHashKey().value()); return &cm_.conn_pool_; @@ -806,7 +797,7 @@ TEST_F(RouterTest, MetadataMatchCriteria) { .WillByDefault(Return(&callbacks_.route_->route_entry_.metadata_matches_criteria_)); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(context->metadataMatchCriteria(), &callbacks_.route_->route_entry_.metadata_matches_criteria_); @@ -836,7 +827,7 @@ TEST_F(RouterTest, NoMetadataMatchCriteria) { ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria()).WillByDefault(Return(nullptr)); EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { EXPECT_EQ(context->metadataMatchCriteria(), nullptr); return &cm_.conn_pool_; @@ -5871,7 +5862,7 @@ TEST_F(RouterTest, ApplicationProtocols) { EXPECT_CALL(cm_, httpConnPoolForCluster(_, _, _, _)) .WillOnce( - Invoke([&](const std::string&, Upstream::ResourcePriority, Http::Protocol, + Invoke([&](const std::string&, Upstream::ResourcePriority, absl::optional, Upstream::LoadBalancerContext* context) -> Http::ConnectionPool::Instance* { Network::TransportSocketOptionsSharedPtr transport_socket_options = context->upstreamTransportSocketOptions(); diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 7a034d24d260..5172bc8bbc4e 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -61,6 +61,7 @@ const std::string& config() { - name: aggregate_cluster connect_timeout: 0.25s lb_policy: CLUSTER_PROVIDED + protocol_selection: USE_DOWNSTREAM_PROTOCOL # this should be ignored, as cluster_1 and cluster_2 specify HTTP/2. cluster_type: name: envoy.clusters.aggregate typed_config: @@ -129,10 +130,10 @@ class AggregateIntegrationTest : public testing::TestWithParamset_allow_unexpected_disconnects(false); - fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP1, version_, + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, timeSystem(), enable_half_close_)); fake_upstreams_[SecondUpstreamIndex]->set_allow_unexpected_disconnects(false); cluster1_ = ConfigHelper::buildStaticCluster( @@ -275,8 +276,11 @@ TEST_P(AggregateIntegrationTest, PreviousPrioritiesRetryPredicate) { waitForNextUpstreamRequest(FirstUpstreamIndex); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, false); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); fake_upstream_connection_.reset(); + waitForNextUpstreamRequest(SecondUpstreamIndex); upstream_request_->encodeHeaders(default_response_headers_, true); diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 6222f636cb06..ed48b0a5a814 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -317,8 +317,8 @@ class MockClusterManager : public ClusterManager { MOCK_METHOD(const ClusterSet&, primaryClusters, ()); MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, - (const std::string& cluster, ResourcePriority priority, Http::Protocol protocol, - LoadBalancerContext* context)); + (const std::string& cluster, ResourcePriority priority, + absl::optional downstream_protocol, LoadBalancerContext* context)); MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster, (const std::string& cluster, ResourcePriority priority, LoadBalancerContext* context)); From b2d99df6d10940aadf30fabc24edfbbab508349f Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 22 Jun 2020 11:47:29 -0400 Subject: [PATCH 406/909] http: fix and generalize PercentEncoding utility. (#11677) * Fix an off-by-one bug in PercentEncoding::encode(). It doesn't appear that this utility is used anywhere that this would be security sensitive, but it is used for sendLocalReply() for gRPC messages. * Generalize from '%' to arbitrary visible reserved chars. RFC3986 encoding requires more flexibility, e.g. being able to percent encode `/` or `#`. This change was motivated by a larger patch that provides encoding/decoding of udpa:// URIs, as part of #11264. Risk level: Low Testing: Additional unit tests. Signed-off-by: Harvey Tuch --- source/common/http/utility.cc | 15 +++++++++------ source/common/http/utility.h | 14 +++++++++----- test/common/http/utility_test.cc | 6 ++++++ 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index dce557c7606b..a48bea5d08ae 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -833,7 +833,9 @@ void Utility::traversePerFilterConfigGeneric( } } -std::string Utility::PercentEncoding::encode(absl::string_view value) { +std::string Utility::PercentEncoding::encode(absl::string_view value, + absl::string_view reserved_chars) { + absl::flat_hash_set reserved_char_set{reserved_chars.begin(), reserved_chars.end()}; for (size_t i = 0; i < value.size(); ++i) { const char& ch = value[i]; // The escaping characters are defined in @@ -842,22 +844,23 @@ std::string Utility::PercentEncoding::encode(absl::string_view value) { // We do checking for each char in the string. If the current char is included in the defined // escaping characters, we jump to "the slow path" (append the char [encoded or not encoded] // to the returned string one by one) started from the current index. - if (ch < ' ' || ch >= '~' || ch == '%') { - return PercentEncoding::encode(value, i); + if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) { + return PercentEncoding::encode(value, i, reserved_char_set); } } return std::string(value); } -std::string Utility::PercentEncoding::encode(absl::string_view value, const size_t index) { +std::string Utility::PercentEncoding::encode(absl::string_view value, const size_t index, + const absl::flat_hash_set& reserved_char_set) { std::string encoded; if (index > 0) { - absl::StrAppend(&encoded, value.substr(0, index - 1)); + absl::StrAppend(&encoded, value.substr(0, index)); } for (size_t i = index; i < value.size(); ++i) { const char& ch = value[i]; - if (ch < ' ' || ch >= '~' || ch == '%') { + if (ch < ' ' || ch >= '~' || reserved_char_set.find(ch) != reserved_char_set.end()) { // For consistency, URI producers should use uppercase hexadecimal digits for all // percent-encodings. https://tools.ietf.org/html/rfc3986#section-2.1. absl::StrAppend(&encoded, fmt::format("%{:02X}", ch)); diff --git a/source/common/http/utility.h b/source/common/http/utility.h index d6f055a9d551..e43f8fd977a8 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -142,12 +142,15 @@ class Url { class PercentEncoding { public: /** - * Encodes string view to its percent encoded representation. + * Encodes string view to its percent encoded representation. Non-visible ASCII is always escaped, + * in addition to a given list of reserved chars. + * * @param value supplies string to be encoded. - * @return std::string percent-encoded string based on - * https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses. + * @param reserved_chars list of reserved chars to escape. By default the escaped chars in + * https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#responses are used. + * @return std::string percent-encoded string. */ - static std::string encode(absl::string_view value); + static std::string encode(absl::string_view value, absl::string_view reserved_chars = "%"); /** * Decodes string view from its percent encoded representation. @@ -158,7 +161,8 @@ class PercentEncoding { private: // Encodes string view to its percent encoded representation, with start index. - static std::string encode(absl::string_view value, const size_t index); + static std::string encode(absl::string_view value, const size_t index, + const absl::flat_hash_set& reserved_char_set); }; /** diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index d4320cd19324..8751acddb024 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -1194,5 +1194,11 @@ TEST(PercentEncoding, Trailing) { EXPECT_EQ(Utility::PercentEncoding::decode("too%20large%"), "too large%"); } +TEST(PercentEncoding, Encoding) { + EXPECT_EQ(Utility::PercentEncoding::encode("too%large"), "too%25large"); + EXPECT_EQ(Utility::PercentEncoding::encode("too%!large/"), "too%25!large/"); + EXPECT_EQ(Utility::PercentEncoding::encode("too%!large/", "%!/"), "too%25%21large%2F"); +} + } // namespace Http } // namespace Envoy From dcf34972d1bc15324835c40dfd7a780e8fc69d72 Mon Sep 17 00:00:00 2001 From: Weixiao Huang Date: Tue, 23 Jun 2020 00:01:34 +0800 Subject: [PATCH 407/909] ext_authz: support sending multiple headers with the same name to upstream (#11158) This patch adds allowed_upstream_headers_to_append to allow sending multiple headers with the same name to upstream. Relevant issue: solo-io/gloo#2983. Risk Level: Low Testing: Unit tests. Docs Changes: Added. Release Notes: Added. Fixes #11156 Signed-off-by: weixiao-huang --- .../filters/http/ext_authz/v3/ext_authz.proto | 5 ++ .../http/ext_authz/v4alpha/ext_authz.proto | 5 ++ docs/root/version_history/current.rst | 1 + .../filters/http/ext_authz/v3/ext_authz.proto | 5 ++ .../http/ext_authz/v4alpha/ext_authz.proto | 5 ++ .../filters/common/ext_authz/ext_authz.h | 9 ++- .../common/ext_authz/ext_authz_grpc_impl.cc | 2 +- .../common/ext_authz/ext_authz_http_impl.cc | 28 +++++++-- .../common/ext_authz/ext_authz_http_impl.h | 10 ++++ .../filters/http/ext_authz/ext_authz.cc | 10 +++- .../ext_authz/ext_authz_http_impl_test.cc | 16 ++++- .../filters/common/ext_authz/test_common.cc | 2 +- .../filters/http/ext_authz/config_test.cc | 4 ++ .../ext_authz/ext_authz_integration_test.cc | 58 ++++++++++++++++++- .../filters/http/ext_authz/ext_authz_test.cc | 16 ++--- 15 files changed, 151 insertions(+), 25 deletions(-) diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 0efa67c61873..d9264ca66b66 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -213,6 +213,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index fe288f85aefb..7442715a0db3 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -213,6 +213,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b18b9af6afed..8a9e6c81a2ab 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -66,6 +66,7 @@ New Features * ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. * ext_authz filter: added API version field for both :ref:`HTTP ` and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. +* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. * fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index ebe404ccfb77..0c99cb6997f8 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -212,6 +212,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto index fe288f85aefb..7442715a0db3 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto @@ -213,6 +213,11 @@ message AuthorizationResponse { // Note that coexistent headers will be overridden. type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be added to the client's response. Note + // that coexistent headers will be appended. + type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; + // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 725f534090f7..a46d6e3b7191 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -49,9 +49,14 @@ enum class CheckStatus { struct Response { // Call status. CheckStatus status; - // Optional http headers used on either denied or ok responses. + // A set of HTTP headers returned by the authorization server, that will be optionally appended + // to the request to the upstream server. Http::HeaderVector headers_to_append; - // Optional http headers used on either denied or ok responses. + // A set of HTTP headers returned by the authorization server, will be optionally set + // (using "setCopy") to the request to the upstream server. + Http::HeaderVector headers_to_set; + // A set of HTTP headers returned by the authorization server, will be optionally added + // (using "addCopy") to the request to the upstream server. Http::HeaderVector headers_to_add; // Optional http body used only on denied response. std::string body; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index 2dc572ffb0a1..ae5ce707fb37 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -95,7 +95,7 @@ void GrpcClientImpl::toAuthzResponseHeader( response->headers_to_append.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } else { - response->headers_to_add.emplace_back(Http::LowerCaseString(header.header().key()), + response->headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 42e3864943a0..97ccaa25cbfa 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -33,19 +33,30 @@ const Http::HeaderMap& lengthZeroHeader() { const Response& errorResponse() { CONSTRUCT_ON_FIRST_USE(Response, Response{CheckStatus::Error, Http::HeaderVector{}, Http::HeaderVector{}, - EMPTY_STRING, Http::Code::Forbidden}); + Http::HeaderVector{}, EMPTY_STRING, Http::Code::Forbidden}); } // SuccessResponse used for creating either DENIED or OK authorization responses. struct SuccessResponse { SuccessResponse(const Http::HeaderMap& headers, const MatcherSharedPtr& matchers, - Response&& response) - : headers_(headers), matchers_(matchers), response_(std::make_unique(response)) { + const MatcherSharedPtr& append_matchers, Response&& response) + : headers_(headers), matchers_(matchers), append_matchers_(append_matchers), + response_(std::make_unique(response)) { headers_.iterate( [](const Http::HeaderEntry& header, void* ctx) -> Http::HeaderMap::Iterate { auto* context = static_cast(ctx); // UpstreamHeaderMatcher if (context->matchers_->matches(header.key().getStringView())) { + context->response_->headers_to_set.emplace_back( + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); + } + if (context->append_matchers_->matches(header.key().getStringView())) { + // If there is an existing matching key in the current headers, the new entry will be + // appended with the same key. For example, given {"key": "value1"} headers, if there is + // a matching "key" from the authorization response headers {"key": "value2"}, the + // request to upstream server will have two entries for "key": {"key": "value1", "key": + // "value2"}. context->response_->headers_to_add.emplace_back( Http::LowerCaseString{std::string(header.key().getStringView())}, std::string(header.value().getStringView())); @@ -57,6 +68,7 @@ struct SuccessResponse { const Http::HeaderMap& headers_; const MatcherSharedPtr& matchers_; + const MatcherSharedPtr& append_matchers_; ResponsePtr response_; }; @@ -128,6 +140,9 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 upstream_header_matchers_(toUpstreamMatchers( config.http_service().authorization_response().allowed_upstream_headers(), enable_case_sensitive_string_matcher_)), + upstream_header_to_append_matchers_(toUpstreamMatchers( + config.http_service().authorization_response().allowed_upstream_headers_to_append(), + enable_case_sensitive_string_matcher_)), cluster_name_(config.http_service().server_uri().cluster()), timeout_(timeout), path_prefix_(path_prefix), tracing_name_(fmt::format("async {} egress", config.http_service().server_uri().cluster())), @@ -316,16 +331,19 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Create an Ok authorization response. if (status_code == enumToInt(Http::Code::OK)) { SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, - EMPTY_STRING, Http::Code::OK}}; + Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK}}; span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); return std::move(ok.response_); } // Create a Denied authorization response. SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, - message->bodyAsString(), static_cast(status_code)}}; + Http::HeaderVector{}, message->bodyAsString(), + static_cast(status_code)}}; span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); return std::move(denied.response_); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index c1b017bb741a..4bcb5741bba5 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -98,6 +98,15 @@ class ClientConfig { */ const MatcherSharedPtr& upstreamHeaderMatchers() const { return upstream_header_matchers_; } + /** + * Returns a list of matchers used for selecting the authorization response headers that + * should be sent to the upstream server. The same header keys will be appended, instead of + * be replaced. + */ + const MatcherSharedPtr& upstreamHeaderToAppendMatchers() const { + return upstream_header_to_append_matchers_; + } + /** * Returns the name used for tracing. */ @@ -123,6 +132,7 @@ class ClientConfig { const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; const MatcherSharedPtr upstream_header_matchers_; + const MatcherSharedPtr upstream_header_to_append_matchers_; const Http::LowerCaseStrPairVector authorization_headers_to_add_; const std::string cluster_name_; const std::chrono::milliseconds timeout_; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 38d62a85a051..8159b2b9e196 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -166,14 +166,18 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { case CheckStatus::OK: { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the request:", *callbacks_); if (config_->clearRouteCache() && - (!response->headers_to_add.empty() || !response->headers_to_append.empty())) { + (!response->headers_to_set.empty() || !response->headers_to_append.empty())) { ENVOY_STREAM_LOG(debug, "ext_authz is clearing route cache", *callbacks_); callbacks_->clearRouteCache(); } - for (const auto& header : response->headers_to_add) { + for (const auto& header : response->headers_to_set) { ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); request_headers_->setCopy(header.first, header.second); } + for (const auto& header : response->headers_to_add) { + ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); + request_headers_->addCopy(header.first, header.second); + } for (const auto& header : response->headers_to_append) { const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); if (header_to_modify) { @@ -212,7 +216,7 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { callbacks_->sendLocalReply( response->status_code, response->body, - [&headers = response->headers_to_add, + [&headers = response->headers_to_set, &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the local response:", callbacks); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 74851ee016d6..fd5bbf6435eb 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -81,6 +81,12 @@ class ExtAuthzHttpClientTest : public testing::Test { ignore_case: true - prefix: "X-" ignore_case: true + allowed_upstream_headers_to_append: + patterns: + - exact: Alice + ignore_case: true + - prefix: "Append-" + ignore_case: true allowed_client_headers: patterns: - exact: Foo @@ -141,6 +147,7 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { const Http::LowerCaseString foo{"foo"}; const Http::LowerCaseString baz{"baz"}; const Http::LowerCaseString bar{"bar"}; + const Http::LowerCaseString alice{"alice"}; // Check allowed request headers. EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get())); @@ -149,7 +156,7 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(baz.get())); - // // Check allowed client headers. + // Check allowed client headers. EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Status.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Path.get())); @@ -158,10 +165,13 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Origin.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(foo.get())); - // // Check allowed upstream headers. + // Check allowed upstream headers. EXPECT_TRUE(config_->upstreamHeaderMatchers()->matches(bar.get())); - // // Check other attributes. + // Check allowed upstream headers to append. + EXPECT_TRUE(config_->upstreamHeaderToAppendMatchers()->matches(alice.get())); + + // Check other attributes. EXPECT_EQ(config_->pathPrefix(), "/bar"); EXPECT_EQ(config_->cluster(), "ext_authz"); EXPECT_EQ(config_->tracingName(), "async ext_authz egress"); diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 5dc5603e93a3..f23c363d2ed3 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -65,7 +65,7 @@ Response TestCommon::makeAuthzResponse(CheckStatus status, Http::Code status_cod authz_response.headers_to_append.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } else { - authz_response.headers_to_add.emplace_back(Http::LowerCaseString(header.header().key()), + authz_response.headers_to_set.emplace_back(Http::LowerCaseString(header.header().key()), header.header().value()); } } diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 048a740a9848..aae44a7e8d39 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -87,6 +87,10 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { patterns: - exact: baz - prefix: x-fail + allowed_upstream_headers_to_append: + patterns: + - exact: baz-append + - prefix: x-append path_prefix: /extauth diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 94ec7b287b80..83787a393f85 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -248,6 +248,8 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, {":scheme", "http"}, {":authority", "host"}, {"x-case-sensitive-header", case_sensitive_header_value_}, + {"baz", "foo"}, + {"bat", "foo"}, }); } @@ -259,6 +261,17 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, RELEASE_ASSERT(result, result.message()); result = ext_authz_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); + + // Send back authorization response with "baz" and "bat" headers. + // Also add multiple values "append-foo" and "append-bar" for key "x-append-bat". + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, + {"baz", "baz"}, + {"bat", "bar"}, + {"x-append-bat", "append-foo"}, + {"x-append-bat", "append-bar"}, + }; + ext_authz_request_->encodeHeaders(response_headers, true); } void cleanup() { @@ -294,6 +307,34 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, initiateClientConnection(); waitForExtAuthzRequest(); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + // The original client request header value of "baz" is "foo". Since we configure to "override" + // the value of "baz", we expect the request headers to be sent to upstream contain only one + // "baz" with value "baz" (set by the authorization server). + EXPECT_THAT(upstream_request_->headers(), Http::HeaderValueOf("baz", "baz")); + + // The original client request header value of "bat" is "foo". Since we configure to "append" + // the value of "bat", we expect the request headers to be sent to upstream contain two "bat"s, + // with values: "foo" and "bar" (the "bat: bar" header is appended by the authorization server). + const auto& request_existed_headers = + Http::TestRequestHeaderMapImpl{{"bat", "foo"}, {"bat", "bar"}}; + EXPECT_THAT(request_existed_headers, Http::IsSubsetOfHeaders(upstream_request_->headers())); + + // The original client request header does not contain x-append-bat. Since we configure to + // "append" the value of "x-append-bat", we expect the headers to be sent to upstream contain + // two "x-append-bat"s, instead of replacing the first with the last one, with values: + // "append-foo" and "append-bar" + const auto& request_nonexisted_headers = Http::TestRequestHeaderMapImpl{ + {"x-append-bat", "append-foo"}, {"x-append-bat", "append-bar"}}; + EXPECT_THAT(request_nonexisted_headers, Http::IsSubsetOfHeaders(upstream_request_->headers())); + response_->waitForEndStream(); EXPECT_TRUE(response_->complete()); @@ -312,10 +353,23 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, uri: "ext_authz:9000" cluster: "ext_authz" timeout: 0.25s + authorization_request: allowed_headers: patterns: - exact: X-Case-Sensitive-Header + + authorization_response: + allowed_upstream_headers: + patterns: + - exact: baz + - prefix: x-success + + allowed_upstream_headers_to_append: + patterns: + - exact: bat + - prefix: x-append + failure_mode_allow: true )EOF"; }; @@ -355,7 +409,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -// Verifies that by default HTTP service uses the case sensitive string matcher. +// Verifies that by default HTTP service uses the case-sensitive string matcher. TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { setupWithDisabledCaseSensitiveStringMatcher(false); const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); @@ -364,7 +418,7 @@ TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { // Verifies that by setting "false" to // envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher, the string -// matcher used by HTTP service will case insensitive. +// matcher used by HTTP service will be case-insensitive. TEST_P(ExtAuthzHttpIntegrationTest, DisableCaseSensitiveStringMatcher) { setupWithDisabledCaseSensitiveStringMatcher(true); const auto* header_entry = ext_authz_request_->headers().get(case_sensitive_header_name_); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index b5236920fb67..a075909cd4ca 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -597,7 +597,7 @@ TEST_F(HttpFilterTest, ClearCache) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -677,7 +677,7 @@ TEST_F(HttpFilterTest, ClearCacheRouteHeadersToAddOnly) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -752,7 +752,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteConfig) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"bar"}, "foo"}}; request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -775,7 +775,7 @@ TEST_F(HttpFilterTest, NoClearCacheRouteDeniedResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Unauthorized; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; auto response_ptr = std::make_unique(response); EXPECT_CALL(*client_, check(_, _, testing::A(), _)) @@ -1169,7 +1169,7 @@ TEST_P(HttpFilterTestParam, ImmediateDeniedResponseWithHttpAttributes) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Unauthorized; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; response.body = std::string{"baz"}; auto response_ptr = std::make_unique(response); @@ -1211,7 +1211,7 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; response.headers_to_append = Http::HeaderVector{{request_header_key, "bar"}}; - response.headers_to_add = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; + response.headers_to_set = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; auto response_ptr = std::make_unique(response); @@ -1330,7 +1330,7 @@ TEST_P(HttpFilterTestParam, DestroyResponseBeforeSendLocalReply) { response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Forbidden; response.body = std::string{"foo"}; - response.headers_to_add = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, {Http::LowerCaseString{"bar"}, "foo"}}; Filters::Common::ExtAuthz::ResponsePtr response_ptr = std::make_unique(response); @@ -1384,7 +1384,7 @@ TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; response.status_code = Http::Code::Forbidden; response.body = std::string{"foo"}; - response.headers_to_add = + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}, {Http::LowerCaseString{"bar"}, "foo"}, {Http::LowerCaseString{"set-cookie"}, "cookie1=value"}, From dc296cd306918b49a17456b9e2004d9b8e6a34cf Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 22 Jun 2020 13:03:09 -0400 Subject: [PATCH 408/909] conn_pool: test refactors (#11643) Splitting out a test refactor from an already large PR Making the connection pool test parameterized so we can test with old and new pools. Risk Level: n/a (test only) Testing: yes Docs Changes: no Release Notes: no Part of #11528 Signed-off-by: Alyssa Wilk --- test/common/tcp/conn_pool_test.cc | 293 +++++++++++++++++------------- 1 file changed, 163 insertions(+), 130 deletions(-) diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 2f2634ac14d0..63ac6f3fc6ae 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -18,8 +18,8 @@ #include "gtest/gtest.h" using testing::_; -using testing::InSequence; using testing::Invoke; +using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Property; using testing::Return; @@ -65,21 +65,21 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { }; /** - * A test version of OriginalConnPoolImpl that allows for mocking. + * A wrapper around a ConnectionPoolImpl which tracks when the bridge between + * the pool and the consumer of the connection is released and destroyed. */ -class ConnPoolImplForTest : public OriginalConnPoolImpl { +class ConnPoolBase : public Tcp::ConnectionPool::Instance { public: - ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, - NiceMock* upstream_ready_timer) - : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, - nullptr), - mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer) {} - - ~ConnPoolImplForTest() override { - EXPECT_EQ(0U, ready_conns_.size()); - EXPECT_EQ(0U, busy_conns_.size()); - EXPECT_EQ(0U, pending_requests_.size()); + ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + NiceMock* upstream_ready_timer, bool test_new_connection_pool); + + void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); } + void drainConnections() override { conn_pool_->drainConnections(); } + void closeConnections() override { conn_pool_->closeConnections(); } + ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { + return conn_pool_->newConnection(callbacks); } + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } MOCK_METHOD(void, onConnReleasedForTest, ()); MOCK_METHOD(void, onConnDestroyedForTest, ()); @@ -103,81 +103,129 @@ class ConnPoolImplForTest : public OriginalConnPoolImpl { [&](Network::ReadFilterSharedPtr filter) -> void { test_conn.filter_ = filter; })); EXPECT_CALL(*test_conn.connection_, connect()); EXPECT_CALL(*test_conn.connect_timer_, enableTimer(_, _)); - } - void expectEnableUpstreamReady() { - EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + ON_CALL(*test_conn.connection_, close(Network::ConnectionCloseType::NoFlush)) + .WillByDefault(InvokeWithoutArgs([test_conn]() -> void { + test_conn.connection_->raiseEvent(Network::ConnectionEvent::LocalClose); + })); } - void expectAndRunUpstreamReady() { - EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); - EXPECT_FALSE(upstream_ready_enabled_); - } + void expectEnableUpstreamReady(bool run); + std::unique_ptr conn_pool_; Event::MockDispatcher& mock_dispatcher_; NiceMock* mock_upstream_ready_timer_; std::vector test_conns_; + Network::ConnectionCallbacks* callbacks_ = nullptr; + bool test_new_connection_pool_; protected: - void onConnReleased(OriginalConnPoolImpl::ActiveConn& conn) override { - for (auto& test_conn : test_conns_) { - if (conn.conn_.get() == test_conn.connection_) { - onConnReleasedForTest(); - break; - } + class ConnPoolImplForTest : public OriginalConnPoolImpl { + public: + ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + ConnPoolBase& parent) + : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, + nullptr), + parent_(parent) {} + + ~ConnPoolImplForTest() override { + EXPECT_EQ(0U, ready_conns_.size()); + EXPECT_EQ(0U, busy_conns_.size()); + EXPECT_EQ(0U, pending_requests_.size()); } - OriginalConnPoolImpl::onConnReleased(conn); - } + void onConnReleased(OriginalConnPoolImpl::ActiveConn& conn) override { + parent_.onConnReleasedForTest(); + OriginalConnPoolImpl::onConnReleased(conn); + } - void onConnDestroyed(OriginalConnPoolImpl::ActiveConn& conn) override { - for (auto i = test_conns_.begin(); i != test_conns_.end(); i++) { - if (conn.conn_.get() == i->connection_) { - onConnDestroyedForTest(); - test_conns_.erase(i); - break; + void onConnDestroyed(OriginalConnPoolImpl::ActiveConn& conn) override { + parent_.onConnDestroyedForTest(); + OriginalConnPoolImpl::onConnDestroyed(conn); + } + void expectEnableUpstreamReady(bool run) { + if (!run) { + EXPECT_FALSE(upstream_ready_enabled_); + EXPECT_CALL(*parent_.mock_upstream_ready_timer_, enableTimer(_, _)) + .Times(1) + .RetiresOnSaturation(); + } else { + EXPECT_TRUE(upstream_ready_enabled_); + parent_.mock_upstream_ready_timer_->invokeCallback(); + EXPECT_FALSE(upstream_ready_enabled_); } } + ConnPoolBase& parent_; + }; +}; - OriginalConnPoolImpl::onConnDestroyed(conn); +ConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + NiceMock* upstream_ready_timer, + bool test_new_connection_pool) + : mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer), + test_new_connection_pool_(test_new_connection_pool) { + // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. + ASSERT(!test_new_connection_pool_); + if (!test_new_connection_pool_) { + conn_pool_ = std::make_unique(dispatcher, host, *this); } -}; +} + +void ConnPoolBase::expectEnableUpstreamReady(bool run) { + if (!test_new_connection_pool_) { + dynamic_cast(conn_pool_.get())->expectEnableUpstreamReady(run); + } else { + if (!run) { + EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + } else { + mock_upstream_ready_timer_->invokeCallback(); + } + } +} /** * Test fixture for connection pool tests. */ -class TcpConnPoolImplTest : public testing::Test { +class TcpConnPoolImplTest : public testing::TestWithParam { public: TcpConnPoolImplTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), + : test_new_connection_pool_(GetParam()), + upstream_ready_timer_(new NiceMock(&dispatcher_)), host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), - conn_pool_(dispatcher_, host_, upstream_ready_timer_) {} + conn_pool_(dispatcher_, host_, upstream_ready_timer_, test_new_connection_pool_) { + // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. + ASSERT(!test_new_connection_pool_); + } ~TcpConnPoolImplTest() override { - EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())); + EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())) + << TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges()); } + bool test_new_connection_pool_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; NiceMock* upstream_ready_timer_; Upstream::HostSharedPtr host_; - ConnPoolImplForTest conn_pool_; + ConnPoolBase conn_pool_; NiceMock runtime_; }; /** * Test fixture for connection pool destructor tests. */ -class TcpConnPoolImplDestructorTest : public testing::Test { +class TcpConnPoolImplDestructorTest : public testing::TestWithParam { public: TcpConnPoolImplDestructorTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), - conn_pool_{new OriginalConnPoolImpl( - dispatcher_, Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"), - Upstream::ResourcePriority::Default, nullptr, nullptr)} {} - + : test_new_connection_pool_(GetParam()), + upstream_ready_timer_(new NiceMock(&dispatcher_)) { + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"); + ASSERT(!test_new_connection_pool_); + if (!test_new_connection_pool_) { + conn_pool_ = std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); + } + } ~TcpConnPoolImplDestructorTest() override = default; void prepareConn() { @@ -195,12 +243,14 @@ class TcpConnPoolImplDestructorTest : public testing::Test { connection_->raiseEvent(Network::ConnectionEvent::Connected); } + bool test_new_connection_pool_; + Upstream::HostConstSharedPtr host_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; NiceMock* upstream_ready_timer_; NiceMock* connect_timer_; NiceMock* connection_; - std::unique_ptr conn_pool_; + std::unique_ptr conn_pool_; std::unique_ptr callbacks_; }; @@ -265,14 +315,13 @@ struct ActiveTestConn { bool completed_{}; }; -TEST_F(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); } +TEST_P(TcpConnPoolImplTest, HostAccessor) { EXPECT_EQ(conn_pool_.host(), host_); } /** * Verify that connections are drained when requested. */ -TEST_F(TcpConnPoolImplTest, DrainConnections) { +TEST_P(TcpConnPoolImplTest, DrainConnections) { cluster_->resetResourceManager(3, 1024, 1024, 1, 1); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection); @@ -281,32 +330,35 @@ TEST_F(TcpConnPoolImplTest, DrainConnections) { EXPECT_CALL(conn_pool_, onConnReleasedForTest()); c1.releaseConn(); - // This will destroy the ready connection and set requests remaining to 1 on the busy and pending - // connections. - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - conn_pool_.drainConnections(); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the busy connection when the response finishes. - EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - c2.releaseConn(); - dispatcher_.clearDeferredDeleteList(); - - // This will destroy the pending connection when the response finishes. - c3.conn_index_ = 0; // c1/c2 have been deleted from test_conns_. - c3.completeConnection(); - - EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - c3.releaseConn(); - dispatcher_.clearDeferredDeleteList(); + { + // This will destroy the ready connection and set requests remaining to 1 on the busy and + // pending connections. + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + conn_pool_.drainConnections(); + dispatcher_.clearDeferredDeleteList(); + } + { + // This will destroy the busy connection when the response finishes. + EXPECT_CALL(conn_pool_, onConnReleasedForTest()); + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + c2.releaseConn(); + dispatcher_.clearDeferredDeleteList(); + } + { + // This will destroy the pending connection when the response finishes. + c3.completeConnection(); + + EXPECT_CALL(conn_pool_, onConnReleasedForTest()); + EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); + c3.releaseConn(); + dispatcher_.clearDeferredDeleteList(); + } } /** * Test all timing stats are set. */ -TEST_F(TcpConnPoolImplTest, VerifyTimingStats) { +TEST_P(TcpConnPoolImplTest, VerifyTimingStats) { EXPECT_CALL(cluster_->stats_store_, deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); EXPECT_CALL(cluster_->stats_store_, @@ -326,7 +378,7 @@ TEST_F(TcpConnPoolImplTest, VerifyTimingStats) { /** * Test that buffer limits are set. */ -TEST_F(TcpConnPoolImplTest, VerifyBufferLimits) { +TEST_P(TcpConnPoolImplTest, VerifyBufferLimits) { ConnPoolCallbacks callbacks; conn_pool_.expectConnCreate(); EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(8192)); @@ -344,10 +396,9 @@ TEST_F(TcpConnPoolImplTest, VerifyBufferLimits) { /** * Test that upstream callback fire for assigned connections. */ -TEST_F(TcpConnPoolImplTest, UpstreamCallbacks) { +TEST_P(TcpConnPoolImplTest, UpstreamCallbacks) { Buffer::OwnedImpl buffer; - InSequence s; ConnectionPool::MockUpstreamCallbacks callbacks; // Create connection, set UpstreamCallbacks @@ -381,10 +432,9 @@ TEST_F(TcpConnPoolImplTest, UpstreamCallbacks) { /** * Test that upstream callback close event fires for assigned connections. */ -TEST_F(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { +TEST_P(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { Buffer::OwnedImpl buffer; - InSequence s; ConnectionPool::MockUpstreamCallbacks callbacks; // Create connection, set UpstreamCallbacks @@ -401,11 +451,9 @@ TEST_F(TcpConnPoolImplTest, UpstreamCallbacksCloseEvent) { /** * Test that a connection pool functions without upstream callbacks. */ -TEST_F(TcpConnPoolImplTest, NoUpstreamCallbacks) { +TEST_P(TcpConnPoolImplTest, NoUpstreamCallbacks) { Buffer::OwnedImpl buffer; - InSequence s; - // Create connection. ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -420,8 +468,7 @@ TEST_F(TcpConnPoolImplTest, NoUpstreamCallbacks) { * Tests a request that generates a new connection, completes, and then a second request that uses * the same connection. */ -TEST_F(TcpConnPoolImplTest, MultipleRequestAndResponse) { - InSequence s; +TEST_P(TcpConnPoolImplTest, MultipleRequestAndResponse) { // Request 1 should kick off a new connection. ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); @@ -444,8 +491,7 @@ TEST_F(TcpConnPoolImplTest, MultipleRequestAndResponse) { /** * Tests ConnectionState assignment, lookup and destruction. */ -TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectionStateLifecycle) { bool state_destroyed = false; @@ -483,7 +529,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateLifecycle) { /** * Test when we overflow max pending requests. */ -TEST_F(TcpConnPoolImplTest, MaxPendingRequests) { +TEST_P(TcpConnPoolImplTest, MaxPendingRequests) { cluster_->resetResourceManager(1, 1, 1024, 1, 1); ConnPoolCallbacks callbacks; @@ -511,8 +557,7 @@ TEST_F(TcpConnPoolImplTest, MaxPendingRequests) { * Tests a connection failure before a request is bound which should result in the pending request * getting purged. */ -TEST_F(TcpConnPoolImplTest, RemoteConnectFailure) { - InSequence s; +TEST_P(TcpConnPoolImplTest, RemoteConnectFailure) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -537,8 +582,7 @@ TEST_F(TcpConnPoolImplTest, RemoteConnectFailure) { * Tests a connection failure before a request is bound which should result in the pending request * getting purged. */ -TEST_F(TcpConnPoolImplTest, LocalConnectFailure) { - InSequence s; +TEST_P(TcpConnPoolImplTest, LocalConnectFailure) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -562,8 +606,7 @@ TEST_F(TcpConnPoolImplTest, LocalConnectFailure) { /** * Tests a connect timeout. Also test that we can add a new request during ejection processing. */ -TEST_F(TcpConnPoolImplTest, ConnectTimeout) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectTimeout) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks1; @@ -594,8 +637,7 @@ TEST_F(TcpConnPoolImplTest, ConnectTimeout) { /** * Test cancelling before the request is bound to a connection. */ -TEST_F(TcpConnPoolImplTest, CancelBeforeBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, CancelBeforeBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -615,8 +657,7 @@ TEST_F(TcpConnPoolImplTest, CancelBeforeBound) { /** * Test cancelling before the request is bound to a connection, with connection close. */ -TEST_F(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -634,8 +675,7 @@ TEST_F(TcpConnPoolImplTest, CancelAndCloseBeforeBound) { /** * Test an upstream disconnection while there is a bound request. */ -TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DisconnectWhileBound) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; @@ -656,9 +696,8 @@ TEST_F(TcpConnPoolImplTest, DisconnectWhileBound) { /** * Test upstream disconnection of one request while another is pending. */ -TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) { +TEST_P(TcpConnPoolImplTest, DisconnectWhilePending) { cluster_->resetResourceManager(1, 1024, 1024, 1, 1); - InSequence s; // First request connected. ConnPoolCallbacks callbacks; @@ -682,26 +721,24 @@ TEST_F(TcpConnPoolImplTest, DisconnectWhilePending) { EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); dispatcher_.clearDeferredDeleteList(); - // test_conns_[0] was replaced with a new connection - EXPECT_CALL(*conn_pool_.test_conns_[0].connect_timer_, disableTimer()); + // test_conns_[1] is the new connection + EXPECT_CALL(*conn_pool_.test_conns_[1].connect_timer_, disableTimer()); EXPECT_CALL(callbacks2.pool_ready_, ready()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::Connected); EXPECT_CALL(conn_pool_, onConnReleasedForTest()); callbacks2.conn_data_.reset(); // Disconnect EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); dispatcher_.clearDeferredDeleteList(); } /** * Test that we correctly handle reaching max connections. */ -TEST_F(TcpConnPoolImplTest, MaxConnections) { - InSequence s; - +TEST_P(TcpConnPoolImplTest, MaxConnections) { // Request 1 should kick off a new connection. ConnPoolCallbacks callbacks; conn_pool_.expectConnCreate(); @@ -722,11 +759,11 @@ TEST_F(TcpConnPoolImplTest, MaxConnections) { // Finishing request 1 will immediately bind to request 2. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); EXPECT_CALL(callbacks2.pool_ready_, ready()); callbacks.conn_data_.reset(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); EXPECT_CALL(conn_pool_, onConnReleasedForTest()); callbacks2.conn_data_.reset(); @@ -739,8 +776,7 @@ TEST_F(TcpConnPoolImplTest, MaxConnections) { /** * Test when we reach max requests per connection. */ -TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) { - InSequence s; +TEST_P(TcpConnPoolImplTest, MaxRequestsPerConnection) { cluster_->max_requests_per_connection_ = 1; @@ -766,9 +802,8 @@ TEST_F(TcpConnPoolImplTest, MaxRequestsPerConnection) { /* * Test that multiple connections can be assigned at once. */ -TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { +TEST_P(TcpConnPoolImplTest, ConcurrentConnections) { cluster_->resetResourceManager(2, 1024, 1024, 1, 1); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ActiveTestConn c2(*this, 1, ActiveTestConn::Type::CreateConnection); @@ -776,11 +811,11 @@ TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { // Finish c1, which gets c3 going. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); c3.expectNewConn(); c1.releaseConn(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); EXPECT_CALL(conn_pool_, onConnReleasedForTest()).Times(2); c2.releaseConn(); c3.releaseConn(); @@ -795,8 +830,7 @@ TEST_F(TcpConnPoolImplTest, ConcurrentConnections) { /** * Tests ConnectionState lifecycle with multiple concurrent connections. */ -TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { - InSequence s; +TEST_P(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { int state_destroyed = 0; auto* s1 = new TestConnectionState(1, [&]() -> void { state_destroyed |= 1; }); @@ -814,11 +848,11 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { // Finish c1, which gets c3 going. EXPECT_CALL(conn_pool_, onConnReleasedForTest()); - conn_pool_.expectEnableUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(false); c3.expectNewConn(); c1.releaseConn(); - conn_pool_.expectAndRunUpstreamReady(); + conn_pool_.expectEnableUpstreamReady(true); // c3 now has the state set by c1. EXPECT_EQ(s1, c3.callbacks_.conn_data_->connectionStateTyped()); @@ -846,8 +880,7 @@ TEST_F(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { /** * Tests that the DrainCallback is invoked when the number of connections goes to zero. */ -TEST_F(TcpConnPoolImplTest, DrainCallback) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DrainCallback) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); @@ -869,8 +902,7 @@ TEST_F(TcpConnPoolImplTest, DrainCallback) { /** * Test draining a connection pool that has a pending connection. */ -TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { - InSequence s; +TEST_P(TcpConnPoolImplTest, DrainWhileConnecting) { ReadyWatcher drained; ConnPoolCallbacks callbacks; @@ -883,7 +915,6 @@ TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(drained, ready()); conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); dispatcher_.clearDeferredDeleteList(); } @@ -891,12 +922,11 @@ TEST_F(TcpConnPoolImplTest, DrainWhileConnecting) { /** * Test that the DrainCallback is invoked when a connection is closed. */ -TEST_F(TcpConnPoolImplTest, DrainOnClose) { +TEST_P(TcpConnPoolImplTest, DrainOnClose) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - InSequence s; ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ConnectionPool::MockUpstreamCallbacks callbacks; @@ -917,7 +947,7 @@ TEST_F(TcpConnPoolImplTest, DrainOnClose) { /** * Test that pending connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { connection_ = new NiceMock(); connect_timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_)); @@ -936,7 +966,7 @@ TEST_F(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { /** * Test that busy connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { prepareConn(); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); @@ -947,7 +977,7 @@ TEST_F(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { /** * Test that ready connections are closed when the connection pool is destroyed. */ -TEST_F(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { +TEST_P(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { prepareConn(); // Transition connection to ready list @@ -958,5 +988,8 @@ TEST_F(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { conn_pool_.reset(); } +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Values(false)); +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Values(false)); + } // namespace Tcp } // namespace Envoy From 3efafaa0d2a1625e74bf40976d6e9cfef9064f4b Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Mon, 22 Jun 2020 14:13:30 -0400 Subject: [PATCH 409/909] Add required using declarations or qualifications to avoid relying on ADL (#11688) Signed-off-by: Yan Avlasov --- test/mocks/event/mocks.cc | 1 + test/mocks/router/mocks.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 6cdf4b5d10ec..6d7a5f6d842e 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -5,6 +5,7 @@ using testing::_; using testing::Assign; +using testing::DoAll; using testing::Invoke; using testing::NiceMock; using testing::Return; diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 2d0a995bf1ca..962c86d70f67 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -6,6 +6,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::DoAll; using testing::NiceMock; using testing::Return; using testing::ReturnPointee; From 3a87b210d177a95f366e6c97f4e2da300f55c6dd Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Tue, 23 Jun 2020 04:50:38 +0530 Subject: [PATCH 410/909] listener: remove static listeners when creation fails on worker (#11654) When static listener creation fails on worker because of race, currently it is not removed and remains in active listeners. This is because existing removeListener checks if it was added via api and returns. This PR fixes that behaviour. Signed-off-by: Rama Chavali --- docs/root/version_history/current.rst | 1 + source/server/listener_manager_impl.cc | 11 +++++-- source/server/listener_manager_impl.h | 2 ++ test/server/listener_manager_impl_test.cc | 38 +++++++++++++++++++++++ 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 8a9e6c81a2ab..0b2d3c569b0e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -23,6 +23,7 @@ Minor Behavior Changes * http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. * http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. * http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. +* listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. * router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. * router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index f4e6e87e9528..1d30aa9a0f60 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -671,7 +671,7 @@ void ListenerManagerImpl::addListenerToWorker(Worker& worker, ENVOY_LOG(critical, "listener '{}' failed to listen on address '{}' on worker", listener.name(), listener.listenSocketFactory().localAddress()->asString()); stats_.listener_create_failure_.inc(); - removeListener(listener.name()); + removeListenerInternal(listener.name(), false); } if (success) { stats_.listener_create_success_.inc(); @@ -795,14 +795,19 @@ uint64_t ListenerManagerImpl::numConnections() const { } bool ListenerManagerImpl::removeListener(const std::string& name) { + return removeListenerInternal(name, true); +} + +bool ListenerManagerImpl::removeListenerInternal(const std::string& name, + bool dynamic_listeners_only) { ENVOY_LOG(debug, "begin remove listener: name={}", name); auto existing_active_listener = getListenerByName(active_listeners_, name); auto existing_warming_listener = getListenerByName(warming_listeners_, name); if ((existing_warming_listener == warming_listeners_.end() || - (*existing_warming_listener)->blockRemove()) && + (dynamic_listeners_only && (*existing_warming_listener)->blockRemove())) && (existing_active_listener == active_listeners_.end() || - (*existing_active_listener)->blockRemove())) { + (dynamic_listeners_only && (*existing_active_listener)->blockRemove()))) { ENVOY_LOG(debug, "unknown/locked listener '{}'. no remove", name); return false; } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index b677792800e4..36f9ccd1e7f8 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -210,6 +210,8 @@ class ListenerManagerImpl : public ListenerManager, Logger::LoggablestartWorkers(guard_dog_); + + // Add foo listener into active. + const std::string listener_foo_yaml = R"EOF( +name: foo +address: + socket_address: + address: 0.0.0.0 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(false, false); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(*worker_, addListener(_, _, _)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false)); + + EXPECT_CALL(*worker_, stopListener(_, _)); + EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); + worker_->callAddCompletion(false); + + EXPECT_CALL(*worker_, removeListener(_, _)); + listener_foo->drain_manager_->drain_sequence_completion_(); + + EXPECT_CALL(*listener_foo, onDestroy()); + worker_->callRemovalCompletion(); + + EXPECT_EQ( + 1UL, + server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); + EXPECT_EQ(0, manager_->listeners().size()); +} + TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { const std::string yaml = R"EOF( address: From 1717683b84bc45617953fd74274a104ca2945676 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 22 Jun 2020 16:29:55 -0700 Subject: [PATCH 411/909] network: pass addresses to iohandle in connect/bind (#11675) Let IoHandle decide how to use Addresses, instead of passing the raw sockaddr struct. Signed-off-by: Florin Coras --- include/envoy/network/io_handle.h | 4 ++-- source/common/network/io_socket_handle_impl.cc | 8 ++++---- source/common/network/io_socket_handle_impl.h | 4 ++-- source/common/network/socket_impl.cc | 6 +++--- .../quic_listeners/quiche/quic_io_handle_wrapper.h | 8 ++++---- test/mocks/network/io_handle.h | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index ac2766ff3281..bd569c56179e 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -148,7 +148,7 @@ class IoHandle { * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call * is successful, errno_ shouldn't be used. */ - virtual Api::SysCallIntResult bind(const sockaddr* address, socklen_t addrlen) PURE; + virtual Api::SysCallIntResult bind(Address::InstanceConstSharedPtr address) PURE; /** * Listen on bound handle. @@ -166,7 +166,7 @@ class IoHandle { * @return a Api::SysCallIntResult with rc_ = 0 for success and rc_ = -1 for failure. If the call * is successful, errno_ shouldn't be used. */ - virtual Api::SysCallIntResult connect(const sockaddr* address, socklen_t addrlen) PURE; + virtual Api::SysCallIntResult connect(Address::InstanceConstSharedPtr address) PURE; /** * Set option (see man 2 setsockopt) diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 4e0c5534c19d..0f4e323e4a65 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -365,16 +365,16 @@ bool IoSocketHandleImpl::supportsMmsg() const { return Api::OsSysCallsSingleton::get().supportsMmsg(); } -Api::SysCallIntResult IoSocketHandleImpl::bind(const sockaddr* address, socklen_t addrlen) { - return Api::OsSysCallsSingleton::get().bind(fd_, address, addrlen); +Api::SysCallIntResult IoSocketHandleImpl::bind(Address::InstanceConstSharedPtr address) { + return Api::OsSysCallsSingleton::get().bind(fd_, address->sockAddr(), address->sockAddrLen()); } Api::SysCallIntResult IoSocketHandleImpl::listen(int backlog) { return Api::OsSysCallsSingleton::get().listen(fd_, backlog); } -Api::SysCallIntResult IoSocketHandleImpl::connect(const sockaddr* address, socklen_t addrlen) { - return Api::OsSysCallsSingleton::get().connect(fd_, address, addrlen); +Api::SysCallIntResult IoSocketHandleImpl::connect(Address::InstanceConstSharedPtr address) { + return Api::OsSysCallsSingleton::get().connect(fd_, address->sockAddr(), address->sockAddrLen()); } Api::SysCallIntResult IoSocketHandleImpl::setOption(int level, int optname, const void* optval, diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 98fcad7c5ccd..110fe7e58328 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -45,9 +45,9 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggablesun_path); } // Not storing a reference to syscalls singleton because of unit test mocks - bind_result = io_handle_->bind(address->sockAddr(), address->sockAddrLen()); + bind_result = io_handle_->bind(address); if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); if (set_permissions.rc_ != 0) { @@ -68,7 +68,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr return bind_result; } - bind_result = io_handle_->bind(address->sockAddr(), address->sockAddrLen()); + bind_result = io_handle_->bind(address); if (bind_result.rc_ == 0 && address->ip()->port() == 0) { local_address_ = io_handle_->localAddress(); } @@ -78,7 +78,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr Api::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->listen(backlog); } Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { - auto result = io_handle_->connect(address->sockAddr(), address->sockAddrLen()); + auto result = io_handle_->connect(address); if (address->type() == Address::Type::Ip) { local_address_ = io_handle_->localAddress(); } diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index 96cb711fc1e0..f80d73afa92f 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -63,12 +63,12 @@ class QuicIoHandleWrapper : public Network::IoHandle { return io_handle_.recvmmsg(slices, self_port, output); } bool supportsMmsg() const override { return io_handle_.supportsMmsg(); } - Api::SysCallIntResult bind(const sockaddr* address, socklen_t addrlen) override { - return io_handle_.bind(address, addrlen); + Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override { + return io_handle_.bind(address); } Api::SysCallIntResult listen(int backlog) override { return io_handle_.listen(backlog); } - Api::SysCallIntResult connect(const sockaddr* address, socklen_t addrlen) override { - return io_handle_.connect(address, addrlen); + Api::SysCallIntResult connect(Network::Address::InstanceConstSharedPtr address) override { + return io_handle_.connect(address); } Api::SysCallIntResult setOption(int level, int optname, const void* optval, socklen_t optlen) override { diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index bfd84dfedf2e..4724b2fcea10 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -29,9 +29,9 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(Api::IoCallUint64Result, recvmmsg, (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output)); MOCK_METHOD(bool, supportsMmsg, (), (const)); - MOCK_METHOD(Api::SysCallIntResult, bind, (const sockaddr* address, socklen_t addrlen)); + MOCK_METHOD(Api::SysCallIntResult, bind, (Address::InstanceConstSharedPtr address)); MOCK_METHOD(Api::SysCallIntResult, listen, (int backlog)); - MOCK_METHOD(Api::SysCallIntResult, connect, (const sockaddr* address, socklen_t addrlen)); + MOCK_METHOD(Api::SysCallIntResult, connect, (Address::InstanceConstSharedPtr address)); MOCK_METHOD(Api::SysCallIntResult, setOption, (int level, int optname, const void* optval, socklen_t optlen)); MOCK_METHOD(Api::SysCallIntResult, getOption, From ff1f3125e53fa005bf767388505eb265c79a3341 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Tue, 23 Jun 2020 19:37:06 +0700 Subject: [PATCH 412/909] docker, examples: Use taggeed alpine image (#11680) The latest frolvlad/alpine-glibc is tagged using alpine-3.12, in which the pip3 is no longer packed inside the python3 package. Insted, we should use the py3-pip (https://pkgs.alpinelinux.org/contents?branch=v3.12&name=py3-pip&arch=x86_64&repo=community) package. alpine-3.11 packs pip3 inside the python3 package: https://pkgs.alpinelinux.org/contents?branch=v3.11&name=python3&arch=x86_64&repo=main, which is no longer true for alpine-3.12: https://pkgs.alpinelinux.org/contents?branch=v3.12&name=python3&arch=x86_64&repo=main Signed-off-by: Dhi Aurrahman --- ci/Dockerfile-envoy-alpine | 2 +- ci/Dockerfile-envoy-alpine-debug | 2 +- examples/cors/backend/Dockerfile-service | 2 +- examples/cors/frontend/Dockerfile-service | 2 +- examples/csrf/crosssite/Dockerfile-service | 2 +- examples/csrf/samesite/Dockerfile-service | 2 +- examples/front-proxy/Dockerfile-service | 2 +- examples/load-reporting-service/Dockerfile-http-server | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index 4ac4578370c2..a4bd4ffbe5e2 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,4 +1,4 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy diff --git a/ci/Dockerfile-envoy-alpine-debug b/ci/Dockerfile-envoy-alpine-debug index fe4957814612..b7e7f34529a4 100644 --- a/ci/Dockerfile-envoy-alpine-debug +++ b/ci/Dockerfile-envoy-alpine-debug @@ -1,4 +1,4 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy diff --git a/examples/cors/backend/Dockerfile-service b/examples/cors/backend/Dockerfile-service index 89b5fc12736e..37c253fa81f1 100644 --- a/examples/cors/backend/Dockerfile-service +++ b/examples/cors/backend/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./service.py /code/ diff --git a/examples/cors/frontend/Dockerfile-service b/examples/cors/frontend/Dockerfile-service index 8d882faa172f..735aaf42a095 100644 --- a/examples/cors/frontend/Dockerfile-service +++ b/examples/cors/frontend/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./service.py ./index.html /code/ diff --git a/examples/csrf/crosssite/Dockerfile-service b/examples/csrf/crosssite/Dockerfile-service index 63ff61ef75ce..37c5296aeefc 100644 --- a/examples/csrf/crosssite/Dockerfile-service +++ b/examples/csrf/crosssite/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./crosssite/service.py ./index.html /code/ diff --git a/examples/csrf/samesite/Dockerfile-service b/examples/csrf/samesite/Dockerfile-service index d97322f8acbd..f2413d895618 100644 --- a/examples/csrf/samesite/Dockerfile-service +++ b/examples/csrf/samesite/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash +RUN apk update && apk add py3-pip bash RUN pip3 install -q Flask==0.11.1 RUN mkdir /code ADD ./samesite/service.py ./index.html /code/ diff --git a/examples/front-proxy/Dockerfile-service b/examples/front-proxy/Dockerfile-service index c3f5bafefc19..03a6a9422ea2 100644 --- a/examples/front-proxy/Dockerfile-service +++ b/examples/front-proxy/Dockerfile-service @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash curl +RUN apk update && apk add py3-pip bash curl RUN pip3 install -q Flask==0.11.1 requests==2.18.4 RUN mkdir /code ADD ./service.py /code diff --git a/examples/load-reporting-service/Dockerfile-http-server b/examples/load-reporting-service/Dockerfile-http-server index 6139ee948402..3ae32015fb8d 100644 --- a/examples/load-reporting-service/Dockerfile-http-server +++ b/examples/load-reporting-service/Dockerfile-http-server @@ -1,6 +1,6 @@ FROM envoyproxy/envoy-alpine-dev:latest -RUN apk update && apk add python3 bash curl +RUN apk update && apk add py3-pip bash curl RUN mkdir /code ADD ./start_service.sh /usr/local/bin/start_service.sh COPY . ./code From c9d07f1f590f4aec14e4a974a91b3efb2b73f74c Mon Sep 17 00:00:00 2001 From: danzh Date: Tue, 23 Jun 2020 08:38:18 -0400 Subject: [PATCH 413/909] update googleurl (#11699) Update googleurl to commit 6dafefa72cba2ab2ba4922d17a30618e9617c7cf which avoids having both build/ and BUILD under root directory. Signed-off-by: Dan Zhang --- bazel/repository_locations.bzl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e8b193983ac1..78b43ab04610 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -418,9 +418,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_googleurl = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz - sha256 = "b40cd22cadba577b7281a76db66f6a66dd744edbad8cc2c861c2c976ef721e4d", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_dbf5ad147f60afc125e99db7549402af49a5eae8.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz + sha256 = "f1ab73ddd1a7db4e08a9e4db6c2e98e5a0a7bbaca08f5fee0d73adb02c24e44a", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From 7f90cbf717c9dce71ad096906fa249feeea338c9 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Tue, 23 Jun 2020 21:48:46 +0900 Subject: [PATCH 414/909] bump re2 for wasm build (#11669) * bump re2 for wasm build Signed-off-by: Shikugawa --- bazel/repository_locations.bzl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 78b43ab04610..4d9a62e38137 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -432,10 +432,14 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["dataplane"], cpe = "N/A", ), + # TODO(shikugawa): replace this with release tag after released package which includes + # disable pthread when build with emscripten. We use hash temporary to enable our changes to + # build envoy-wasm library with emscripten. https://github.com/google/re2/pull/263 com_googlesource_code_re2 = dict( - sha256 = "04ee2aaebaa5038554683329afc494e684c30f82f2a1e47eb62450e59338f84d", - strip_prefix = "re2-2020-03-03", - urls = ["https://github.com/google/re2/archive/2020-03-03.tar.gz"], + sha256 = "455bcacd2b94fca8897decd81172c5a93e5303ea0e5816b410877c51d6179ffb", + strip_prefix = "re2-2b25567a8ee3b6e97c3cd05d616f296756c52759", + # 2020-06-08 + urls = ["https://github.com/google/re2/archive/2b25567a8ee3b6e97c3cd05d616f296756c52759.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From 8a9d6155601846d2d256defd09a3c408c6eb4386 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 23 Jun 2020 08:58:56 -0400 Subject: [PATCH 415/909] http: making http upstreams pluggable (#11327) Mainly moving code, but finishes up the series of pluggable upstream PRs. Additional Description: This unhides the configurable extension point to the cluster for selecting a connection pool and creating an upstream, which can be used for custom business logic in upstream creation. Risk Level: medium (router refactor) Testing: with prior PRs Docs Changes: inline with APIs Release Notes: added Signed-off-by: Alyssa Wilk --- api/envoy/config/cluster/v3/cluster.proto | 1 - .../config/cluster/v4alpha/cluster.proto | 1 - docs/root/version_history/current.rst | 1 + .../envoy/config/cluster/v3/cluster.proto | 1 - .../config/cluster/v4alpha/cluster.proto | 1 - source/common/router/upstream_request.cc | 121 --------------- source/common/router/upstream_request.h | 140 ------------------ source/extensions/extensions_build_config.bzl | 1 + .../extensions/upstreams/http/generic/BUILD | 3 +- .../upstreams/http/generic/config.cc | 9 +- source/extensions/upstreams/http/http/BUILD | 31 +++- .../extensions/upstreams/http/http/config.cc | 6 +- .../upstreams/http/http/upstream_request.cc | 74 +++++++++ .../upstreams/http/http/upstream_request.h | 102 +++++++++++++ source/extensions/upstreams/http/tcp/BUILD | 30 +++- .../extensions/upstreams/http/tcp/config.cc | 6 +- .../upstreams/http/tcp/upstream_request.cc | 113 ++++++++++++++ .../upstreams/http/tcp/upstream_request.h | 93 ++++++++++++ source/server/BUILD | 6 +- test/common/router/BUILD | 20 --- test/extensions/upstreams/http/tcp/BUILD | 31 ++++ .../http/tcp}/upstream_request_test.cc | 77 ++++++---- 22 files changed, 538 insertions(+), 330 deletions(-) create mode 100644 source/extensions/upstreams/http/http/upstream_request.cc create mode 100644 source/extensions/upstreams/http/http/upstream_request.h create mode 100644 source/extensions/upstreams/http/tcp/upstream_request.cc create mode 100644 source/extensions/upstreams/http/tcp/upstream_request.h create mode 100644 test/extensions/upstreams/http/tcp/BUILD rename test/{common/router => extensions/upstreams/http/tcp}/upstream_request_test.cc (80%) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 69c3a1e62f39..06bbb91afb99 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -818,7 +818,6 @@ message Cluster { // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; - // [#not-implemented-hide:] // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index e3ab0c16b258..3a347634c5a1 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -819,7 +819,6 @@ message Cluster { // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; - // [#not-implemented-hide:] // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0b2d3c569b0e..56ea33449ca2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -58,6 +58,7 @@ New Features * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. +* cluster: added an extension point for configurable :ref:`upstreams `. * compressor: generic :ref:`compressor ` filter exposed to users. * config: added :ref:`identifier ` stat that reflects control plane identifier. * config: added :ref:`version_text ` stat that reflects xDS version. diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 523162df2247..0e800ce5bf5b 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -816,7 +816,6 @@ message Cluster { // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; - // [#not-implemented-hide:] // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index e3ab0c16b258..3a347634c5a1 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -819,7 +819,6 @@ message Cluster { // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; - // [#not-implemented-hide:] // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 7189a14003c9..ab43e82157d0 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -517,126 +517,5 @@ void UpstreamRequest::enableDataFromDownstreamForFlowControl() { } } -void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { - callbacks_ = callbacks; - // It's possible for a reset to happen inline within the newStream() call. In this case, we - // might get deleted inline as well. Only write the returned handle out if it is not nullptr to - // deal with this case. - Http::ConnectionPool::Cancellable* handle = - conn_pool_->newStream(*callbacks->upstreamRequest(), *this); - if (handle) { - conn_pool_stream_handle_ = handle; - } -} - -void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, - Upstream::HostDescriptionConstSharedPtr host) { - upstream_handle_ = nullptr; - Network::Connection& latched_conn = conn_data->connection(); - auto upstream = - std::make_unique(callbacks_->upstreamRequest(), std::move(conn_data)); - callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(), - latched_conn.streamInfo()); -} - -bool HttpConnPool::cancelAnyPendingRequest() { - if (conn_pool_stream_handle_) { - conn_pool_stream_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); - conn_pool_stream_handle_ = nullptr; - return true; - } - return false; -} - -absl::optional HttpConnPool::protocol() const { return conn_pool_->protocol(); } - -void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) { - callbacks_->onPoolFailure(reason, transport_failure_reason, host); -} - -void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) { - conn_pool_stream_handle_ = nullptr; - auto upstream = std::make_unique(*callbacks_->upstreamRequest(), &request_encoder); - callbacks_->onPoolReady(std::move(upstream), host, - request_encoder.getStream().connectionLocalAddress(), info); -} - -TcpUpstream::TcpUpstream(UpstreamRequest* upstream_request, - Tcp::ConnectionPool::ConnectionDataPtr&& upstream) - : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) { - upstream_conn_data_->connection().enableHalfClose(true); - upstream_conn_data_->addUpstreamCallbacks(*this); -} - -void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { - upstream_conn_data_->connection().write(data, end_stream); -} - -void TcpUpstream::encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) { - // Headers should only happen once, so use this opportunity to add the proxy - // proto header, if configured. - ASSERT(upstream_request_->parent().routeEntry()->connectConfig().has_value()); - Buffer::OwnedImpl data; - auto& connect_config = upstream_request_->parent().routeEntry()->connectConfig().value(); - if (connect_config.has_proxy_protocol_config()) { - const Network::Connection& connection = *upstream_request_->parent().callbacks()->connection(); - Extensions::Common::ProxyProtocol::generateProxyProtoHeader( - connect_config.proxy_protocol_config(), connection, data); - } - - if (data.length() != 0 || end_stream) { - upstream_conn_data_->connection().write(data, end_stream); - } - - // TcpUpstream::encodeHeaders is called after the UpstreamRequest is fully initialized. Also use - // this time to synthesize the 200 response headers downstream to complete the CONNECT handshake. - Http::ResponseHeaderMapPtr headers{ - Http::createHeaderMap({{Http::Headers::get().Status, "200"}})}; - upstream_request_->decodeHeaders(std::move(headers), false); -} - -void TcpUpstream::encodeTrailers(const Http::RequestTrailerMap&) { - Buffer::OwnedImpl data; - upstream_conn_data_->connection().write(data, true); -} - -void TcpUpstream::readDisable(bool disable) { - if (upstream_conn_data_->connection().state() != Network::Connection::State::Open) { - return; - } - upstream_conn_data_->connection().readDisable(disable); -} - -void TcpUpstream::resetStream() { - upstream_request_ = nullptr; - upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); -} - -void TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) { - upstream_request_->decodeData(data, end_stream); -} - -void TcpUpstream::onEvent(Network::ConnectionEvent event) { - if (event != Network::ConnectionEvent::Connected && upstream_request_) { - upstream_request_->onResetStream(Http::StreamResetReason::ConnectionTermination, ""); - } -} - -void TcpUpstream::onAboveWriteBufferHighWatermark() { - if (upstream_request_) { - upstream_request_->disableDataFromDownstreamForFlowControl(); - } -} - -void TcpUpstream::onBelowWriteBufferLowWatermark() { - if (upstream_request_) { - upstream_request_->enableDataFromDownstreamForFlowControl(); - } -} - } // namespace Router } // namespace Envoy diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 93e3eb268644..027731972e9a 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -162,145 +162,5 @@ class UpstreamRequest : public Logger::Loggable, Event::TimerPtr max_stream_duration_timer_; }; -class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callbacks { -public: - // GenericConnPool - HttpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, - absl::optional downstream_protocol, - Upstream::LoadBalancerContext* ctx) { - conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), - downstream_protocol, ctx); - } - bool valid() const { return conn_pool_ != nullptr; } - - void newStream(GenericConnectionPoolCallbacks* callbacks) override; - bool cancelAnyPendingRequest() override; - absl::optional protocol() const override; - Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } - - // Http::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Http::RequestEncoder& callbacks_encoder, - Upstream::HostDescriptionConstSharedPtr host, - const StreamInfo::StreamInfo& info) override; - -private: - // Points to the actual connection pool to create streams from. - Http::ConnectionPool::Instance* conn_pool_{}; - Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; - GenericConnectionPoolCallbacks* callbacks_{}; -}; - -class TcpConnPool : public GenericConnPool, public Tcp::ConnectionPool::Callbacks { -public: - TcpConnPool(Upstream::ClusterManager& cm, const RouteEntry& route_entry, - absl::optional, Upstream::LoadBalancerContext* ctx) { - conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), - Upstream::ResourcePriority::Default, ctx); - } - bool valid() const { return conn_pool_ != nullptr; } - void newStream(GenericConnectionPoolCallbacks* callbacks) override { - callbacks_ = callbacks; - upstream_handle_ = conn_pool_->newConnection(*this); - } - - bool cancelAnyPendingRequest() override { - if (upstream_handle_) { - upstream_handle_->cancel(ConnectionPool::CancelPolicy::Default); - upstream_handle_ = nullptr; - return true; - } - return false; - } - absl::optional protocol() const override { return absl::nullopt; } - Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } - // Tcp::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - Upstream::HostDescriptionConstSharedPtr host) override { - upstream_handle_ = nullptr; - callbacks_->onPoolFailure(reason, "", host); - } - - void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, - Upstream::HostDescriptionConstSharedPtr host) override; - -private: - Tcp::ConnectionPool::Instance* conn_pool_; - Tcp::ConnectionPool::Cancellable* upstream_handle_{}; - GenericConnectionPoolCallbacks* callbacks_{}; -}; - -class HttpUpstream : public GenericUpstream, public Http::StreamCallbacks { -public: - HttpUpstream(UpstreamRequest& upstream_request, Http::RequestEncoder* encoder) - : upstream_request_(upstream_request), request_encoder_(encoder) { - request_encoder_->getStream().addCallbacks(*this); - } - - // GenericUpstream - void encodeData(Buffer::Instance& data, bool end_stream) override { - request_encoder_->encodeData(data, end_stream); - } - void encodeMetadata(const Http::MetadataMapVector& metadata_map_vector) override { - request_encoder_->encodeMetadata(metadata_map_vector); - } - void encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) override { - request_encoder_->encodeHeaders(headers, end_stream); - } - void encodeTrailers(const Http::RequestTrailerMap& trailers) override { - request_encoder_->encodeTrailers(trailers); - } - - void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); } - - void resetStream() override { - request_encoder_->getStream().removeCallbacks(*this); - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); - } - - // Http::StreamCallbacks - void onResetStream(Http::StreamResetReason reason, - absl::string_view transport_failure_reason) override { - upstream_request_.onResetStream(reason, transport_failure_reason); - } - - void onAboveWriteBufferHighWatermark() override { - upstream_request_.disableDataFromDownstreamForFlowControl(); - } - - void onBelowWriteBufferLowWatermark() override { - upstream_request_.enableDataFromDownstreamForFlowControl(); - } - -private: - UpstreamRequest& upstream_request_; - Http::RequestEncoder* request_encoder_{}; -}; - -class TcpUpstream : public GenericUpstream, public Tcp::ConnectionPool::UpstreamCallbacks { -public: - TcpUpstream(UpstreamRequest* upstream_request, Tcp::ConnectionPool::ConnectionDataPtr&& upstream); - - // GenericUpstream - void encodeData(Buffer::Instance& data, bool end_stream) override; - void encodeMetadata(const Http::MetadataMapVector&) override {} - void encodeHeaders(const Http::RequestHeaderMap&, bool end_stream) override; - void encodeTrailers(const Http::RequestTrailerMap&) override; - void readDisable(bool disable) override; - void resetStream() override; - - // Tcp::ConnectionPool::UpstreamCallbacks - void onUpstreamData(Buffer::Instance& data, bool end_stream) override; - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override; - void onBelowWriteBufferLowWatermark() override; - -private: - UpstreamRequest* upstream_request_; - Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; -}; - } // namespace Router } // namespace Envoy diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index c6341c8cd8dd..1807538e2d8b 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -194,6 +194,7 @@ EXTENSIONS = { "envoy.internal_redirect_predicates.safe_cross_scheme": "//source/extensions/internal_redirect/safe_cross_scheme:config", # Http Upstreams + # TODO(alyssawilk) these are linked in the default build and shouldn't be here: fix tooling and remove. "envoy.upstreams.http.generic": "//source/extensions/upstreams/http/generic:config", "envoy.upstreams.http.http": "//source/extensions/upstreams/http/http:config", diff --git a/source/extensions/upstreams/http/generic/BUILD b/source/extensions/upstreams/http/generic/BUILD index f61d9801103f..712b0d9632ea 100644 --- a/source/extensions/upstreams/http/generic/BUILD +++ b/source/extensions/upstreams/http/generic/BUILD @@ -19,7 +19,8 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", visibility = ["//visibility:public"], deps = [ - "//source/common/router:router_lib", + "//source/extensions/upstreams/http/http:upstream_request_lib", + "//source/extensions/upstreams/http/tcp:upstream_request_lib", "@envoy_api//envoy/extensions/upstreams/http/generic/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/upstreams/http/generic/config.cc b/source/extensions/upstreams/http/generic/config.cc index 193f14c6e759..3404f49bf46a 100644 --- a/source/extensions/upstreams/http/generic/config.cc +++ b/source/extensions/upstreams/http/generic/config.cc @@ -1,6 +1,7 @@ #include "extensions/upstreams/http/generic/config.h" -#include "common/router/upstream_request.h" +#include "extensions/upstreams/http/http/upstream_request.h" +#include "extensions/upstreams/http/tcp/upstream_request.h" namespace Envoy { namespace Extensions { @@ -13,10 +14,12 @@ Router::GenericConnPoolPtr GenericGenericConnPoolFactory::createGenericConnPool( absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const { if (is_connect) { - auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); + auto ret = std::make_unique(cm, is_connect, route_entry, + downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } - auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); + auto ret = std::make_unique(cm, is_connect, route_entry, + downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/http/BUILD b/source/extensions/upstreams/http/http/BUILD index caf8f766e6f3..f97f894d3294 100644 --- a/source/extensions/upstreams/http/http/BUILD +++ b/source/extensions/upstreams/http/http/BUILD @@ -1,6 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", + "envoy_cc_library", "envoy_package", ) @@ -19,7 +20,35 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", visibility = ["//visibility:public"], deps = [ - "//source/common/router:router_lib", + ":upstream_request_lib", "@envoy_api//envoy/extensions/upstreams/http/http/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "upstream_request_lib", + srcs = [ + "upstream_request.cc", + ], + hdrs = [ + "upstream_request.h", + ], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:conn_pool_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/common/http:message_lib", + "//source/common/network:application_protocol_lib", + "//source/common/network:transport_socket_options_lib", + "//source/common/router:router_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + ], +) diff --git a/source/extensions/upstreams/http/http/config.cc b/source/extensions/upstreams/http/http/config.cc index 5c5915afced5..e8c933f45216 100644 --- a/source/extensions/upstreams/http/http/config.cc +++ b/source/extensions/upstreams/http/http/config.cc @@ -1,6 +1,6 @@ #include "extensions/upstreams/http/http/config.h" -#include "common/router/upstream_request.h" +#include "extensions/upstreams/http/http/upstream_request.h" namespace Envoy { namespace Extensions { @@ -9,10 +9,10 @@ namespace Http { namespace Http { Router::GenericConnPoolPtr HttpGenericConnPoolFactory::createGenericConnPool( - Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const { - auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); + auto ret = std::make_unique(cm, is_connect, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/http/upstream_request.cc b/source/extensions/upstreams/http/http/upstream_request.cc new file mode 100644 index 000000000000..d9b7e6ca6d3d --- /dev/null +++ b/source/extensions/upstreams/http/http/upstream_request.cc @@ -0,0 +1,74 @@ +#include "extensions/upstreams/http/http/upstream_request.h" + +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/grpc/status.h" +#include "envoy/http/conn_pool.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/assert.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/headers.h" +#include "common/http/message_impl.h" +#include "common/http/utility.h" +#include "common/router/router.h" + +using Envoy::Router::GenericConnectionPoolCallbacks; + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { + callbacks_ = callbacks; + // It's possible for a reset to happen inline within the newStream() call. In this case, we + // might get deleted inline as well. Only write the returned handle out if it is not nullptr to + // deal with this case. + Envoy::Http::ConnectionPool::Cancellable* handle = + conn_pool_->newStream(*callbacks->upstreamRequest(), *this); + if (handle) { + conn_pool_stream_handle_ = handle; + } +} + +bool HttpConnPool::cancelAnyPendingRequest() { + if (conn_pool_stream_handle_) { + conn_pool_stream_handle_->cancel(ConnectionPool::CancelPolicy::Default); + conn_pool_stream_handle_ = nullptr; + return true; + } + return false; +} + +absl::optional HttpConnPool::protocol() const { + return conn_pool_->protocol(); +} + +void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) { + callbacks_->onPoolFailure(reason, transport_failure_reason, host); +} + +void HttpConnPool::onPoolReady(Envoy::Http::RequestEncoder& request_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) { + conn_pool_stream_handle_ = nullptr; + auto upstream = std::make_unique(*callbacks_->upstreamRequest(), &request_encoder); + callbacks_->onPoolReady(std::move(upstream), host, + request_encoder.getStream().connectionLocalAddress(), info); +} + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/http/upstream_request.h b/source/extensions/upstreams/http/http/upstream_request.h new file mode 100644 index 000000000000..7fcad5a0b24f --- /dev/null +++ b/source/extensions/upstreams/http/http/upstream_request.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include + +#include "envoy/http/codes.h" +#include "envoy/http/conn_pool.h" + +#include "common/common/logger.h" +#include "common/config/well_known_names.h" +#include "common/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Http { + +class HttpConnPool : public Router::GenericConnPool, public Envoy::Http::ConnectionPool::Callbacks { +public: + // GenericConnPool + HttpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional downstream_protocol, + Upstream::LoadBalancerContext* ctx) { + ASSERT(!is_connect); + conn_pool_ = cm.httpConnPoolForCluster(route_entry.clusterName(), route_entry.priority(), + downstream_protocol, ctx); + } + void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override; + bool cancelAnyPendingRequest() override; + absl::optional protocol() const override; + + // Http::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Envoy::Http::RequestEncoder& callbacks_encoder, + Upstream::HostDescriptionConstSharedPtr host, + const StreamInfo::StreamInfo& info) override; + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } + + bool valid() { return conn_pool_ != nullptr; } + +private: + // Points to the actual connection pool to create streams from. + Envoy::Http::ConnectionPool::Instance* conn_pool_{}; + Envoy::Http::ConnectionPool::Cancellable* conn_pool_stream_handle_{}; + Router::GenericConnectionPoolCallbacks* callbacks_{}; +}; + +class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamCallbacks { +public: + HttpUpstream(Router::UpstreamRequest& upstream_request, Envoy::Http::RequestEncoder* encoder) + : upstream_request_(upstream_request), request_encoder_(encoder) { + request_encoder_->getStream().addCallbacks(*this); + } + + // GenericUpstream + void encodeData(Buffer::Instance& data, bool end_stream) override { + request_encoder_->encodeData(data, end_stream); + } + void encodeMetadata(const Envoy::Http::MetadataMapVector& metadata_map_vector) override { + request_encoder_->encodeMetadata(metadata_map_vector); + } + void encodeHeaders(const Envoy::Http::RequestHeaderMap& headers, bool end_stream) override { + request_encoder_->encodeHeaders(headers, end_stream); + } + void encodeTrailers(const Envoy::Http::RequestTrailerMap& trailers) override { + request_encoder_->encodeTrailers(trailers); + } + + void readDisable(bool disable) override { request_encoder_->getStream().readDisable(disable); } + + void resetStream() override { + request_encoder_->getStream().removeCallbacks(*this); + request_encoder_->getStream().resetStream(Envoy::Http::StreamResetReason::LocalReset); + } + + // Http::StreamCallbacks + void onResetStream(Envoy::Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override { + upstream_request_.onResetStream(reason, transport_failure_reason); + } + + void onAboveWriteBufferHighWatermark() override { + upstream_request_.disableDataFromDownstreamForFlowControl(); + } + + void onBelowWriteBufferLowWatermark() override { + upstream_request_.enableDataFromDownstreamForFlowControl(); + } + +private: + Router::UpstreamRequest& upstream_request_; + Envoy::Http::RequestEncoder* request_encoder_{}; +}; + +} // namespace Http +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/BUILD b/source/extensions/upstreams/http/tcp/BUILD index 960f8b4b9c0d..82b0422fad70 100644 --- a/source/extensions/upstreams/http/tcp/BUILD +++ b/source/extensions/upstreams/http/tcp/BUILD @@ -1,6 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", + "envoy_cc_library", "envoy_package", ) @@ -19,7 +20,34 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", visibility = ["//visibility:public"], deps = [ - "//source/common/router:router_lib", + ":upstream_request_lib", "@envoy_api//envoy/extensions/upstreams/http/tcp/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "upstream_request_lib", + srcs = [ + "upstream_request.cc", + ], + hdrs = [ + "upstream_request.h", + ], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", + "//source/common/http:codes_lib", + "//source/common/http:header_map_lib", + "//source/common/http:headers_lib", + "//source/common/http:message_lib", + "//source/common/network:application_protocol_lib", + "//source/common/network:transport_socket_options_lib", + "//source/common/router:router_lib", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + ], +) diff --git a/source/extensions/upstreams/http/tcp/config.cc b/source/extensions/upstreams/http/tcp/config.cc index 27e90035a9e4..15c01f524af7 100644 --- a/source/extensions/upstreams/http/tcp/config.cc +++ b/source/extensions/upstreams/http/tcp/config.cc @@ -1,6 +1,6 @@ #include "extensions/upstreams/http/tcp/config.h" -#include "common/router/upstream_request.h" +#include "extensions/upstreams/http/tcp/upstream_request.h" namespace Envoy { namespace Extensions { @@ -9,10 +9,10 @@ namespace Http { namespace Tcp { Router::GenericConnPoolPtr TcpGenericConnPoolFactory::createGenericConnPool( - Upstream::ClusterManager& cm, bool, const Router::RouteEntry& route_entry, + Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, absl::optional downstream_protocol, Upstream::LoadBalancerContext* ctx) const { - auto ret = std::make_unique(cm, route_entry, downstream_protocol, ctx); + auto ret = std::make_unique(cm, is_connect, route_entry, downstream_protocol, ctx); return (ret->valid() ? std::move(ret) : nullptr); } diff --git a/source/extensions/upstreams/http/tcp/upstream_request.cc b/source/extensions/upstreams/http/tcp/upstream_request.cc new file mode 100644 index 000000000000..26e8fb50ec94 --- /dev/null +++ b/source/extensions/upstreams/http/tcp/upstream_request.cc @@ -0,0 +1,113 @@ +#include "extensions/upstreams/http/tcp/upstream_request.h" + +#include +#include + +#include "envoy/upstream/upstream.h" + +#include "common/common/assert.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/headers.h" +#include "common/http/message_impl.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/router/router.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +void TcpConnPool::onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + upstream_handle_ = nullptr; + Network::Connection& latched_conn = conn_data->connection(); + auto upstream = + std::make_unique(callbacks_->upstreamRequest(), std::move(conn_data)); + callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(), + latched_conn.streamInfo()); +} + +TcpUpstream::TcpUpstream(Router::UpstreamRequest* upstream_request, + Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream) + : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) { + upstream_conn_data_->connection().enableHalfClose(true); + upstream_conn_data_->addUpstreamCallbacks(*this); +} + +void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { + upstream_conn_data_->connection().write(data, end_stream); +} + +void TcpUpstream::encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) { + // Headers should only happen once, so use this opportunity to add the proxy + // proto header, if configured. + ASSERT(upstream_request_->parent().routeEntry()->connectConfig().has_value()); + Buffer::OwnedImpl data; + auto& connect_config = upstream_request_->parent().routeEntry()->connectConfig().value(); + if (connect_config.has_proxy_protocol_config()) { + const Network::Connection& connection = *upstream_request_->parent().callbacks()->connection(); + Extensions::Common::ProxyProtocol::generateProxyProtoHeader( + connect_config.proxy_protocol_config(), connection, data); + } + + if (data.length() != 0 || end_stream) { + upstream_conn_data_->connection().write(data, end_stream); + } + + // TcpUpstream::encodeHeaders is called after the UpstreamRequest is fully initialized. Also use + // this time to synthesize the 200 response headers downstream to complete the CONNECT handshake. + Envoy::Http::ResponseHeaderMapPtr headers{ + Envoy::Http::createHeaderMap( + {{Envoy::Http::Headers::get().Status, "200"}})}; + upstream_request_->decodeHeaders(std::move(headers), false); +} + +void TcpUpstream::encodeTrailers(const Envoy::Http::RequestTrailerMap&) { + Buffer::OwnedImpl data; + upstream_conn_data_->connection().write(data, true); +} + +void TcpUpstream::readDisable(bool disable) { + if (upstream_conn_data_->connection().state() != Network::Connection::State::Open) { + return; + } + upstream_conn_data_->connection().readDisable(disable); +} + +void TcpUpstream::resetStream() { + upstream_request_ = nullptr; + upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); +} + +void TcpUpstream::onUpstreamData(Buffer::Instance& data, bool end_stream) { + upstream_request_->decodeData(data, end_stream); +} + +void TcpUpstream::onEvent(Network::ConnectionEvent event) { + if (event != Network::ConnectionEvent::Connected && upstream_request_) { + upstream_request_->onResetStream(Envoy::Http::StreamResetReason::ConnectionTermination, ""); + } +} + +void TcpUpstream::onAboveWriteBufferHighWatermark() { + if (upstream_request_) { + upstream_request_->disableDataFromDownstreamForFlowControl(); + } +} + +void TcpUpstream::onBelowWriteBufferLowWatermark() { + if (upstream_request_) { + upstream_request_->enableDataFromDownstreamForFlowControl(); + } +} + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/upstreams/http/tcp/upstream_request.h b/source/extensions/upstreams/http/tcp/upstream_request.h new file mode 100644 index 000000000000..2d0bf85a148e --- /dev/null +++ b/source/extensions/upstreams/http/tcp/upstream_request.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include + +#include "envoy/http/codec.h" +#include "envoy/tcp/conn_pool.h" + +#include "common/buffer/watermark_buffer.h" +#include "common/common/cleanup.h" +#include "common/common/logger.h" +#include "common/config/well_known_names.h" +#include "common/router/upstream_request.h" +#include "common/stream_info/stream_info_impl.h" + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + +class TcpConnPool : public Router::GenericConnPool, public Envoy::Tcp::ConnectionPool::Callbacks { +public: + TcpConnPool(Upstream::ClusterManager& cm, bool is_connect, const Router::RouteEntry& route_entry, + absl::optional, Upstream::LoadBalancerContext* ctx) { + ASSERT(is_connect); + conn_pool_ = cm.tcpConnPoolForCluster(route_entry.clusterName(), + Upstream::ResourcePriority::Default, ctx); + } + void newStream(Router::GenericConnectionPoolCallbacks* callbacks) override { + callbacks_ = callbacks; + upstream_handle_ = conn_pool_->newConnection(*this); + } + + bool cancelAnyPendingRequest() override { + if (upstream_handle_) { + upstream_handle_->cancel(Envoy::Tcp::ConnectionPool::CancelPolicy::Default); + upstream_handle_ = nullptr; + return true; + } + return false; + } + absl::optional protocol() const override { return absl::nullopt; } + Upstream::HostDescriptionConstSharedPtr host() const override { return conn_pool_->host(); } + + bool valid() { return conn_pool_ != nullptr; } + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + Upstream::HostDescriptionConstSharedPtr host) override { + upstream_handle_ = nullptr; + callbacks_->onPoolFailure(reason, "", host); + } + + void onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) override; + +private: + Envoy::Tcp::ConnectionPool::Instance* conn_pool_; + Envoy::Tcp::ConnectionPool::Cancellable* upstream_handle_{}; + Router::GenericConnectionPoolCallbacks* callbacks_{}; +}; + +class TcpUpstream : public Router::GenericUpstream, + public Envoy::Tcp::ConnectionPool::UpstreamCallbacks { +public: + TcpUpstream(Router::UpstreamRequest* upstream_request, + Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream); + + // GenericUpstream + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const Envoy::Http::MetadataMapVector&) override {} + void encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) override; + void encodeTrailers(const Envoy::Http::RequestTrailerMap&) override; + void readDisable(bool disable) override; + void resetStream() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + +private: + Router::UpstreamRequest* upstream_request_; + Envoy::Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; +}; + +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index 3f8b312ee64d..1ed58d10ef0c 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -79,9 +79,6 @@ envoy_cc_library( "//source/common/stats:timespan_lib", "//source/common/stream_info:stream_info_lib", "//source/extensions/transport_sockets:well_known_names", - "//source/extensions/upstreams/http/generic:config", - "//source/extensions/upstreams/http/http:config", - "//source/extensions/upstreams/http/tcp:config", ], ) @@ -330,6 +327,9 @@ envoy_cc_library( "//source/extensions/filters/listener:well_known_names", "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets:well_known_names", + "//source/extensions/upstreams/http/generic:config", + "//source/extensions/upstreams/http/http:config", + "//source/extensions/upstreams/http/tcp:config", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 81a4b7e7a63b..7d88ae53b96e 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -327,26 +327,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "upstream_request_test", - srcs = ["upstream_request_test.cc"], - deps = [ - "//source/common/buffer:buffer_lib", - "//source/common/router:router_lib", - "//source/common/upstream:upstream_includes", - "//source/common/upstream:upstream_lib", - "//test/common/http:common_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:environment_lib", - "//test/test_common:simulated_time_system_lib", - "//test/test_common:utility_lib", - ], -) - envoy_cc_test( name = "header_formatter_test", srcs = ["header_formatter_test.cc"], diff --git a/test/extensions/upstreams/http/tcp/BUILD b/test/extensions/upstreams/http/tcp/BUILD new file mode 100644 index 000000000000..39a5a8d33a89 --- /dev/null +++ b/test/extensions/upstreams/http/tcp/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "upstream_request_test", + srcs = ["upstream_request_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/router:router_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/upstreams/http/tcp:upstream_request_lib", + "//test/common/http:common_lib", + "//test/mocks:common_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/router:router_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/router/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc similarity index 80% rename from test/common/router/upstream_request_test.cc rename to test/extensions/upstreams/http/tcp/upstream_request_test.cc index 72705db13377..1889b93dce1b 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -4,8 +4,10 @@ #include "common/router/upstream_request.h" #include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/upstreams/http/tcp/upstream_request.h" #include "test/common/http/common.h" +#include "test/mocks/common.h" #include "test/mocks/http/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/server/mocks.h" @@ -15,6 +17,8 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using Envoy::Http::TestRequestHeaderMapImpl; +using Envoy::Router::UpstreamRequest; using testing::_; using testing::AnyNumber; using testing::NiceMock; @@ -43,28 +47,28 @@ class MockRouterFilterInterface : public RouterFilterInterface { } MOCK_METHOD(void, onUpstream100ContinueHeaders, - (Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); + (Envoy::Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); MOCK_METHOD(void, onUpstreamHeaders, - (uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, + (uint64_t response_code, Envoy::Http::ResponseHeaderMapPtr&& headers, UpstreamRequest& upstream_request, bool end_stream)); MOCK_METHOD(void, onUpstreamData, (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream)); MOCK_METHOD(void, onUpstreamTrailers, - (Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); - MOCK_METHOD(void, onUpstreamMetadata, (Http::MetadataMapPtr && metadata_map)); + (Envoy::Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamMetadata, (Envoy::Http::MetadataMapPtr && metadata_map)); MOCK_METHOD(void, onUpstreamReset, - (Http::StreamResetReason reset_reason, absl::string_view transport_failure, + (Envoy::Http::StreamResetReason reset_reason, absl::string_view transport_failure, UpstreamRequest& upstream_request)); MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); - MOCK_METHOD(Http::StreamDecoderFilterCallbacks*, callbacks, ()); + MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); MOCK_METHOD(FilterConfig&, config, ()); MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); - MOCK_METHOD(Http::RequestHeaderMap*, downstreamHeaders, ()); - MOCK_METHOD(Http::RequestTrailerMap*, downstreamTrailers, ()); + MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ()); + MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ()); MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); MOCK_METHOD(bool, downstreamEndStream, (), (const)); MOCK_METHOD(uint32_t, attemptCount, (), (const)); @@ -74,7 +78,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const)); MOCK_METHOD(TimeSource&, timeSource, ()); - NiceMock callbacks_; + NiceMock callbacks_; NiceMock route_entry_; NiceMock client_connection_; @@ -85,19 +89,29 @@ class MockRouterFilterInterface : public RouterFilterInterface { std::list requests_; }; +} // namespace +} // namespace Router +} // namespace Envoy + +namespace Envoy { +namespace Extensions { +namespace Upstreams { +namespace Http { +namespace Tcp { + class TcpConnPoolTest : public ::testing::Test { public: TcpConnPoolTest() : host_(std::make_shared>()) { - NiceMock route_entry; + NiceMock route_entry; NiceMock cm; EXPECT_CALL(cm, tcpConnPoolForCluster(_, _, _)).WillOnce(Return(&mock_pool_)); - conn_pool_ = std::make_unique(cm, route_entry, Http::Protocol::Http11, nullptr); - EXPECT_TRUE(conn_pool_->valid()); + conn_pool_ = std::make_unique(cm, true, route_entry, Envoy::Http::Protocol::Http11, + nullptr); } std::unique_ptr conn_pool_; - Tcp::ConnectionPool::MockInstance mock_pool_; - MockGenericConnectionPoolCallbacks mock_generic_callbacks_; + Envoy::Tcp::ConnectionPool::MockInstance mock_pool_; + Router::MockGenericConnectionPoolCallbacks mock_generic_callbacks_; std::shared_ptr> host_; NiceMock cancellable_; }; @@ -110,7 +124,7 @@ TEST_F(TcpConnPoolTest, Basic) { EXPECT_CALL(mock_generic_callbacks_, upstreamRequest()); EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _)); - auto data = std::make_unique>(); + auto data = std::make_unique>(); EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection)); conn_pool_->onPoolReady(std::move(data), host_); } @@ -120,7 +134,8 @@ TEST_F(TcpConnPoolTest, OnPoolFailure) { conn_pool_->newStream(&mock_generic_callbacks_); EXPECT_CALL(mock_generic_callbacks_, onPoolFailure(_, _, _)); - conn_pool_->onPoolFailure(Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, host_); + conn_pool_->onPoolFailure(Envoy::Tcp::ConnectionPool::PoolFailureReason::LocalConnectionFailure, + host_); // Make sure that the pool failure nulled out the pending request. EXPECT_FALSE(conn_pool_->cancelAnyPendingRequest()); @@ -144,8 +159,8 @@ class TcpUpstreamTest : public ::testing::Test { public: TcpUpstreamTest() { mock_router_filter_.requests_.push_back(std::make_unique( - mock_router_filter_, std::make_unique>())); - auto data = std::make_unique>(); + mock_router_filter_, std::make_unique>())); + auto data = std::make_unique>(); EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection_)); tcp_upstream_ = std::make_unique(mock_router_filter_.requests_.front().get(), std::move(data)); @@ -154,14 +169,14 @@ class TcpUpstreamTest : public ::testing::Test { protected: NiceMock connection_; - NiceMock mock_router_filter_; - Tcp::ConnectionPool::MockConnectionData* mock_connection_data_; + NiceMock mock_router_filter_; + Envoy::Tcp::ConnectionPool::MockConnectionData* mock_connection_data_; std::unique_ptr tcp_upstream_; - Http::TestRequestHeaderMapImpl request_{{":method", "CONNECT"}, - {":path", "/"}, - {":protocol", "bytestream"}, - {":scheme", "https"}, - {":authority", "host"}}; + TestRequestHeaderMapImpl request_{{":method", "CONNECT"}, + {":path", "/"}, + {":protocol", "bytestream"}, + {":scheme", "https"}, + {":authority", "host"}}; }; TEST_F(TcpUpstreamTest, Basic) { @@ -176,7 +191,7 @@ TEST_F(TcpUpstreamTest, Basic) { tcp_upstream_->encodeData(buffer, false); // Metadata is swallowed. - Http::MetadataMapVector metadata_map_vector; + Envoy::Http::MetadataMapVector metadata_map_vector; tcp_upstream_->encodeMetadata(metadata_map_vector); // Forward data. @@ -241,7 +256,7 @@ TEST_F(TcpUpstreamTest, TrailersEndStream) { tcp_upstream_->encodeHeaders(request_, false); EXPECT_CALL(connection_, write(BufferStringEqual(""), true)); - Http::TestRequestTrailerMapImpl trailers{{"foo", "bar"}}; + Envoy::Http::TestRequestTrailerMapImpl trailers{{"foo", "bar"}}; tcp_upstream_->encodeTrailers(trailers); } @@ -266,7 +281,7 @@ TEST_F(TcpUpstreamTest, ReadDisable) { TEST_F(TcpUpstreamTest, UpstreamEvent) { // Make sure upstream disconnects result in stream reset. EXPECT_CALL(mock_router_filter_, - onUpstreamReset(Http::StreamResetReason::ConnectionTermination, "", _)); + onUpstreamReset(Envoy::Http::StreamResetReason::ConnectionTermination, "", _)); tcp_upstream_->onEvent(Network::ConnectionEvent::RemoteClose); } @@ -279,6 +294,8 @@ TEST_F(TcpUpstreamTest, Watermarks) { tcp_upstream_->onBelowWriteBufferLowWatermark(); } -} // namespace -} // namespace Router +} // namespace Tcp +} // namespace Http +} // namespace Upstreams +} // namespace Extensions } // namespace Envoy From fc857cd59dce16118a7455169f2664000bc54165 Mon Sep 17 00:00:00 2001 From: danzh Date: Tue, 23 Jun 2020 09:17:12 -0400 Subject: [PATCH 416/909] quiche: fix headers/trailers bytes accounting against watermark (#11640) There is a bug in current watermark accounting that headers and trailers are not counted. It is suboptimal in gQUIC, but an accounting bug in iQUIC. In iQUIC, if we don't count headers bytes as total bytes buffered, but subtract from it the bytes written in stream OnCanWrite(), we would subtract more than we counted into total bytes buffered. This is because iQUIC stream writes headers/trailers on data stream. The fix is counting headers/trailers into total bytes buffered. In gQUIC these bytes are counted against connection level watermark, but not stream watermark. Because it's hard to determine which byte belongs to which stream when header stream writes them out. In iQUIC these bytes are counted against both connection level and stream level watermarks. This also fix a concern of unlimited buffer in headers stream where headers-only responses are buffered in headers stream. Risk Level: low Testing: added new test in quic stream and session Docs: added to flow control docs Part of: #8826, #2557 Signed-off-by: Dan Zhang --- source/docs/flow_control.md | 3 + source/docs/quiche_integration.md | 4 + .../quiche/envoy_quic_client_session.cc | 5 + .../quiche/envoy_quic_client_stream.cc | 25 +++- .../quiche/envoy_quic_server_session.cc | 6 + .../quiche/envoy_quic_server_stream.cc | 26 ++++- .../quic_filter_manager_connection_impl.cc | 6 + .../quiche/envoy_quic_client_stream_test.cc | 76 ++++++++++++ .../quiche/envoy_quic_server_session_test.cc | 110 ++++++++++++++++++ .../quiche/envoy_quic_server_stream_test.cc | 79 ++++++++++++- 10 files changed, 332 insertions(+), 8 deletions(-) diff --git a/source/docs/flow_control.md b/source/docs/flow_control.md index 80f3bd01a161..e32bc4b2e850 100644 --- a/source/docs/flow_control.md +++ b/source/docs/flow_control.md @@ -427,3 +427,6 @@ The low watermark path is as follows: receiving an `onBelowWriteBufferLowWatermark()` callback. From this point on the `Envoy::Router::Filter` picks up the event and the code path is the same as for the HTTP/2 codec upstream send buffer. + +### HTTP3 implementation details +HTTP3 network buffer and stream send buffer works differently from HTTP2 and HTTP. See quiche_integration.md. diff --git a/source/docs/quiche_integration.md b/source/docs/quiche_integration.md index 45de216cc6b4..9232278631f7 100644 --- a/source/docs/quiche_integration.md +++ b/source/docs/quiche_integration.md @@ -31,3 +31,7 @@ When the bytes buffered in a stream's send buffer exceeds its high watermark, it QUICHE doesn't buffer data at the local connection layer. All the data is buffered in the respective streams.To prevent the case where all streams collectively buffers a lot of data, there is also a simulated watermark buffer for each QUIC connection which is updated upon each stream write. When the aggregated buffered bytes goes above high watermark, its registered network callbacks will call Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark(). The HCM will notify each stream via QUIC codec Http::Connection::onUnderlyingConnectionAboveWriteBufferHighWatermark() which will call each stream's StreamCallbackHelper::runHighWatermarkCallbacks(). There might be a way to simply the call stack as Quic connection already knows about all the stream, there is no need to call to HCM and notify each stream via codec. But here we just follow the same logic as HTTP2 codec does. In the same way, any QuicStream::OnCanWrite() may change the aggregated buffered bytes in the connection level bookkeeping as well. If the buffered bytes goes down below the low watermark, the same calls will be triggered to propagate onBelowWriteBufferLowWatermark() to each stream. + +As to Http::StreamEncoder::encodeHeaders()/encodeTrailers(), the accounting is done differently between Google QUIC and IETF QUIC: + * In Google QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on header stream before and after writing headers/trailers. In QuicSession::OnCanWrite(), may drain header stream send buffer, so there we also check send buffer size decrease on header stream. + * In IETF QUIC, encodeHeaders()/encodeTrailers() check the buffer size increase on the corresponding data stream which is similar to encodeData(). The buffered headers/trailers are only drained via QuicStream::OnCanWrite() so there is no need to check QuicSession::OnCanWrite. diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc index 5f0984f1d5a1..f516e2e573e4 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc @@ -44,7 +44,12 @@ void EnvoyQuicClientSession::Initialize() { } void EnvoyQuicClientSession::OnCanWrite() { + const uint64_t headers_to_send_old = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); quic::QuicSpdyClientSession::OnCanWrite(); + const uint64_t headers_to_send_new = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + adjustBytesToSend(headers_to_send_new - headers_to_send_old); maybeApplyDelayClosePolicy(); } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc index aa604d34092f..39a16309c271 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.cc @@ -46,8 +46,18 @@ EnvoyQuicClientStream::EnvoyQuicClientStream(quic::PendingStream* pending, void EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) { ENVOY_STREAM_LOG(debug, "encodeHeaders: (end_stream={}) {}.", *this, end_stream, headers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr); local_end_stream_ = end_stream; + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + // IETF QUIC sends HEADER frame on current stream. After writing headers, the + // buffer may increase. + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) { @@ -55,7 +65,7 @@ void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) data.length()); local_end_stream_ = end_stream; // This is counting not serialized bytes in the send buffer. - uint64_t bytes_to_send_old = BufferedDataBytes(); + const uint64_t bytes_to_send_old = BufferedDataBytes(); // QUIC stream must take all. WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); if (data.length() > 0) { @@ -64,7 +74,7 @@ void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) return; } - uint64_t bytes_to_send_new = BufferedDataBytes(); + const uint64_t bytes_to_send_new = BufferedDataBytes(); ASSERT(bytes_to_send_old <= bytes_to_send_new); maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } @@ -73,7 +83,18 @@ void EnvoyQuicClientStream::encodeTrailers(const Http::RequestTrailerMap& traile ASSERT(!local_end_stream_); local_end_stream_ = true; ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + // IETF QUIC sends HEADER frame on current stream. After writing trailers, the + // buffer may increase. + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicClientStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc index 73a62a93d8b3..9c621ad3d690 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc @@ -89,7 +89,13 @@ void EnvoyQuicServerSession::Initialize() { } void EnvoyQuicServerSession::OnCanWrite() { + const uint64_t headers_to_send_old = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + quic::QuicServerSessionBase::OnCanWrite(); + const uint64_t headers_to_send_new = + quic::VersionUsesHttp3(transport_version()) ? 0u : headers_stream()->BufferedDataBytes(); + adjustBytesToSend(headers_to_send_new - headers_to_send_old); // Do not update delay close state according to connection level packet egress because that is // equivalent to TCP transport layer egress. But only do so if the session gets chance to write. maybeApplyDelayClosePolicy(); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc index 12d93227bbb2..feda7c2f2a94 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.cc @@ -64,8 +64,18 @@ void EnvoyQuicServerStream::encodeHeaders(const Http::ResponseHeaderMap& headers // Same vulnerability exists in crypto stream which can infinitely buffer data // if handshake implementation goes wrong. // TODO(#8826) Modify QUICHE to have an upper bound for header stream send buffer. + // This is counting not serialized bytes in the send buffer. + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); + WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr); local_end_stream_ = end_stream; + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) { @@ -73,7 +83,7 @@ void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) data.length()); local_end_stream_ = end_stream; // This is counting not serialized bytes in the send buffer. - uint64_t bytes_to_send_old = BufferedDataBytes(); + const uint64_t bytes_to_send_old = BufferedDataBytes(); // QUIC stream must take all. WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); if (data.length() > 0) { @@ -82,7 +92,7 @@ void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) return; } - uint64_t bytes_to_send_new = BufferedDataBytes(); + const uint64_t bytes_to_send_new = BufferedDataBytes(); ASSERT(bytes_to_send_old <= bytes_to_send_new); maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } @@ -91,7 +101,15 @@ void EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trail ASSERT(!local_end_stream_); local_end_stream_ = true; ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); + quic::QuicStream* writing_stream = + quic::VersionUsesHttp3(transport_version()) + ? static_cast(this) + : (dynamic_cast(session())->headers_stream()); + const uint64_t bytes_to_send_old = writing_stream->BufferedDataBytes(); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + const uint64_t bytes_to_send_new = writing_stream->BufferedDataBytes(); + ASSERT(bytes_to_send_old <= bytes_to_send_new); + maybeCheckWatermark(bytes_to_send_old, bytes_to_send_new, *filterManagerConnection()); } void EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { @@ -230,9 +248,9 @@ void EnvoyQuicServerStream::OnClose() { } void EnvoyQuicServerStream::OnCanWrite() { - uint64_t buffered_data_old = BufferedDataBytes(); + const uint64_t buffered_data_old = BufferedDataBytes(); quic::QuicSpdyServerStreamBase::OnCanWrite(); - uint64_t buffered_data_new = BufferedDataBytes(); + const uint64_t buffered_data_new = BufferedDataBytes(); // As long as OnCanWriteNewData() is no-op, data to sent in buffer shouldn't // increase. ASSERT(buffered_data_new <= buffered_data_old); diff --git a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc index 270c7eec91ff..e005a3dd7691 100644 --- a/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc +++ b/source/extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.cc @@ -130,7 +130,13 @@ void QuicFilterManagerConnectionImpl::rawWrite(Buffer::Instance& /*data*/, bool } void QuicFilterManagerConnectionImpl::adjustBytesToSend(int64_t delta) { + const size_t bytes_to_send_old = bytes_to_send_; bytes_to_send_ += delta; + if (delta < 0) { + ASSERT(bytes_to_send_old > bytes_to_send_); + } else { + ASSERT(bytes_to_send_old <= bytes_to_send_); + } write_buffer_watermark_simulation_.checkHighWatermark(bytes_to_send_); write_buffer_watermark_simulation_.checkLowWatermark(bytes_to_send_); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index ccd7d6a76d6d..120fc7d83b35 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -266,5 +266,81 @@ TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } +// Tests that headers and trailers buffered in send buffer contribute towards buffer watermark +// limits. Only IETF QUIC writes them on data stream, gQUIC writes them on dedicated headers stream +// and only contributes to connection watermark buffer. +TEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); + return; + } + + // Bump connection flow control window large enough not to cause connection level flow control + // blocked + quic::QuicWindowUpdateFrame window_update( + quic::kInvalidControlFrameId, + quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024); + quic_session_.OnWindowUpdateFrame(window_update); + + // Make the stream blocked by congestion control. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/false); + + // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously + // buffered headers, this call should make the send buffers reach their high watermark. + std::string request(16 * 1024 - 10, 'a'); + Buffer::OwnedImpl buffer(request); + EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark()); + quic_stream_->encodeData(buffer, false); + EXPECT_EQ(0u, buffer.length()); + + // Unblock writing now, and this will write out 16kB data and cause stream to + // be blocked by the flow control limit. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); + quic_session_.OnCanWrite(); + EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + + // Update flow control window to write all the buffered data. + quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), + 32 * 1024); + quic_stream_->OnWindowUpdateFrame(window_update1); + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + quic_session_.OnCanWrite(); + // No data should be buffered at this point. + + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + // Send more data. If watermark bytes counting were not cleared in previous + // OnCanWrite, this write would have caused the stream to exceed its high watermark. + std::string request1(16 * 1024 - 3, 'a'); + Buffer::OwnedImpl buffer1(request1); + quic_stream_->encodeData(buffer1, false); + // Buffering more trailers will cause stream to reach high watermark, but + // because trailers closes the stream, no callback should be triggered. + quic_stream_->encodeTrailers(request_trailers_); + + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 5483d0d7c858..df0d61491990 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -962,5 +962,115 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { EXPECT_TRUE(stream2->write_side_closed()); } +TEST_P(EnvoyQuicServerSessionTest, HeadersContributeToWatermarkGquic) { + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + installReadFilter(); + return; + } + // Switch to a encryption forward secure crypto stream. + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); + quic::test::QuicServerSessionBasePeer::SetCryptoStream( + &envoy_quic_session_, + new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, + &envoy_quic_session_, &crypto_stream_helper_)); + quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + quic_connection_->SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_SERVER)); + // Drive congestion control manually. + auto send_algorithm = new testing::NiceMock; + quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); + EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero())); + EXPECT_CALL(*send_algorithm, BandwidthEstimate()) + .WillRepeatedly(Return(quic::QuicBandwidth::Zero())); + EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber()); + + // Bump connection flow control window large enough not to interfere + // stream writing. + envoy_quic_session_.flow_controller()->UpdateSendWindowOffset( + 10 * quic::kDefaultFlowControlSendWindow); + installReadFilter(); + Http::MockRequestDecoder request_decoder; + Http::MockStreamCallbacks stream_callbacks; + EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) + .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder, + bool) -> Http::RequestDecoder& { + encoder.getStream().addCallbacks(stream_callbacks); + return request_decoder; + })); + quic::QuicStreamId stream_id = + quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; + auto stream1 = + dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); + + // Receive a GET request on created stream. + quic::QuicHeaderList request_headers; + request_headers.OnHeaderBlockStart(); + std::string host("www.abc.com"); + request_headers.OnHeader(":authority", host); + request_headers.OnHeader(":method", "GET"); + request_headers.OnHeader(":path", "/"); + request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); + // Request headers should be propagated to decoder. + EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) + .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { + EXPECT_EQ(host, decoded_headers->getHostValue()); + EXPECT_EQ("/", decoded_headers->getPathValue()); + EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); + })); + stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), + request_headers); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + // Make connection congestion control blocked so headers are buffered. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); + stream1->encodeHeaders(response_headers, false); + // Buffer a response slightly smaller than connection level watermark, but + // with the previously buffered headers, this write should reach high + // watermark. + std::string response(24 * 1024 - 1, 'a'); + Buffer::OwnedImpl buffer(response); + // Triggered twice, once by stream, the other time by connection. + EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()).Times(2); + EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); + stream1->encodeData(buffer, false); + EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); + + // Write the buffered data out till stream is flow control blocked. Both + // stream and connection level buffers should drop below watermark. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true)); + EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS)); + EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); + EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(2); + envoy_quic_session_.OnCanWrite(); + EXPECT_TRUE(stream1->flow_controller()->IsBlocked()); + + // Buffer more response because of flow control. The buffered bytes become just below connection + // level high watermark. + std::string response1(16 * 1024 - 20, 'a'); + Buffer::OwnedImpl buffer1(response1); + EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()); + stream1->encodeData(buffer1, false); + + // Make connection congestion control blocked again. + EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); + // Buffering the trailers will cause connection to reach high watermark. + EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); + Http::TestResponseTrailerMapImpl response_trailers{{"trailer-key", "trailer-value"}}; + stream1->encodeTrailers(response_trailers); + + EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) + .WillOnce(Invoke( + [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); + EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::LocalReset, _)); + stream1->resetStream(Http::StreamResetReason::LocalReset); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 84120cae913b..240a80cdf05e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -47,8 +47,8 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), quic_stream_(new EnvoyQuicServerStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL)), - response_headers_{{":status", "200"}}, response_trailers_{ - {"trailer-key", "trailer-value"}} { + response_headers_{{":status", "200"}, {"response-key", "response-value"}}, + response_trailers_{{"trailer-key", "trailer-value"}} { quic_stream_->setRequestDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); @@ -368,5 +368,80 @@ TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { EXPECT_TRUE(quic_stream_->write_side_closed()); } +TEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { + if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); + return; + } + + sendRequest(request_body_, true, request_body_.size() * 2); + + // Bump connection flow control window large enough not to cause connection level flow control + // blocked + quic::QuicWindowUpdateFrame window_update( + quic::kInvalidControlFrameId, + quic::QuicUtils::GetInvalidStreamId(quic_version_.transport_version), 1024 * 1024); + quic_session_.OnWindowUpdateFrame(window_update); + + // Make the stream blocked by congestion control. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t /*write_length*/, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); + + // Encode 16kB -10 bytes request body. Because the high watermark is 16KB, with previously + // buffered headers, this call should make the send buffers reach their high watermark. + std::string response(16 * 1024 - 10, 'a'); + Buffer::OwnedImpl buffer(response); + EXPECT_CALL(stream_callbacks_, onAboveWriteBufferHighWatermark()); + quic_stream_->encodeData(buffer, false); + EXPECT_EQ(0u, buffer.length()); + + // Unblock writing now, and this will write out 16kB data and cause stream to + // be blocked by the flow control limit. + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + EXPECT_CALL(stream_callbacks_, onBelowWriteBufferLowWatermark()); + quic_session_.OnCanWrite(); + EXPECT_TRUE(quic_stream_->flow_controller()->IsBlocked()); + + // Update flow control window to write all the buffered data. + quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), + 32 * 1024); + quic_stream_->OnWindowUpdateFrame(window_update1); + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillOnce(Invoke([](quic::QuicStreamId, size_t write_length, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{write_length, state != quic::NO_FIN}; + })); + quic_session_.OnCanWrite(); + // No data should be buffered at this point. + + EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) + .WillRepeatedly(Invoke([](quic::QuicStreamId, size_t, quic::QuicStreamOffset, + quic::StreamSendingState state, bool, + quiche::QuicheOptional) { + return quic::QuicConsumedData{0u, state != quic::NO_FIN}; + })); + // Send more data. If watermark bytes counting were not cleared in previous + // OnCanWrite, this write would have caused the stream to exceed its high watermark. + std::string response1(16 * 1024 - 3, 'a'); + Buffer::OwnedImpl buffer1(response1); + quic_stream_->encodeData(buffer1, false); + // Buffering more trailers will cause stream to reach high watermark, but + // because trailers closes the stream, no callback should be triggered. + quic_stream_->encodeTrailers(response_trailers_); + + EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); +} + } // namespace Quic } // namespace Envoy From d75702e609ac51a2666966fe877cb35537165ac4 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 23 Jun 2020 10:24:12 -0600 Subject: [PATCH 417/909] listener: fix multiple issues around creation failures (#11647) Previously we were not correctly handling listener creation failures when the failure occurred on the worker thread (as in the case of using reuse port). Fixes https://github.com/envoyproxy/envoy/issues/11340 Fixes https://github.com/envoyproxy/envoy/issues/10070 Signed-off-by: Matt Klein --- docs/root/version_history/current.rst | 3 ++ include/envoy/network/BUILD | 5 +++ include/envoy/network/exception.h | 32 ++++++++++++++ include/envoy/network/listen_socket.h | 15 ------- include/envoy/network/listener.h | 8 ---- source/common/network/BUILD | 2 + source/common/network/listen_socket_impl.cc | 3 +- source/common/network/listener_impl.cc | 1 + source/common/network/udp_listener_impl.cc | 1 + .../quiche/active_quic_listener.cc | 4 +- source/server/BUILD | 2 + source/server/connection_handler_impl.cc | 1 + source/server/listener_impl.cc | 13 +++--- source/server/listener_manager_impl.cc | 21 +++++---- source/server/worker_impl.cc | 2 + .../common/network/listen_socket_impl_test.cc | 1 + test/common/network/listener_impl_test.cc | 1 + .../udp_proxy/udp_proxy_integration_test.cc | 10 +++++ .../quiche/active_quic_listener_test.cc | 3 +- test/integration/integration.cc | 8 ---- test/integration/integration.h | 1 - test/integration/integration_test.cc | 44 +++++++++++++++++++ test/server/connection_handler_test.cc | 1 + test/server/listener_manager_impl_test.cc | 36 +++------------ test/server/server_test.cc | 1 + test/server/worker_impl_test.cc | 2 + test/test_common/BUILD | 1 + test/test_common/network_utility.cc | 8 +++- test/test_common/network_utility.h | 6 +-- 29 files changed, 154 insertions(+), 82 deletions(-) create mode 100644 include/envoy/network/exception.h diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 56ea33449ca2..c2a2d7e36d3a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -39,6 +39,9 @@ Bug Fixes * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * prometheus stats: fix the sort order of output lines to comply with the standard. +* udp: the :ref:`reuse_port ` listener option must now be + specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a + bug fix. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. Removed Config or Runtime diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 7914df35c0ef..c2254842ed92 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -52,6 +52,11 @@ envoy_cc_library( hdrs = ["drain_decision.h"], ) +envoy_cc_library( + name = "exception_interface", + hdrs = ["exception.h"], +) + envoy_cc_library( name = "filter_interface", hdrs = ["filter.h"], diff --git a/include/envoy/network/exception.h b/include/envoy/network/exception.h new file mode 100644 index 000000000000..54ba28bca290 --- /dev/null +++ b/include/envoy/network/exception.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/common/exception.h" + +namespace Envoy { +namespace Network { + +/** + * Thrown when there is a runtime error creating/binding a listener. + */ +class CreateListenerException : public EnvoyException { +public: + CreateListenerException(const std::string& what) : EnvoyException(what) {} +}; + +/** + * Thrown when there is a runtime error binding a socket. + */ +class SocketBindException : public CreateListenerException { +public: + SocketBindException(const std::string& what, int error_number) + : CreateListenerException(what), error_number_(error_number) {} + + // This can't be called errno because otherwise the standard errno macro expansion replaces it. + int errorNumber() const { return error_number_; } + +private: + const int error_number_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/listen_socket.h b/include/envoy/network/listen_socket.h index c654cecdaf8a..bc0c736589ee 100644 --- a/include/envoy/network/listen_socket.h +++ b/include/envoy/network/listen_socket.h @@ -96,20 +96,5 @@ class ConnectionSocket : public virtual Socket { using ConnectionSocketPtr = std::unique_ptr; -/** - * Thrown when there is a runtime error binding a socket. - */ -class SocketBindException : public EnvoyException { -public: - SocketBindException(const std::string& what, int error_number) - : EnvoyException(what), error_number_(error_number) {} - - // This can't be called errno because otherwise the standard errno macro expansion replaces it. - int errorNumber() const { return error_number_; } - -private: - const int error_number_; -}; - } // namespace Network } // namespace Envoy diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index ba8f27918cb7..cd868029ea00 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -298,13 +298,5 @@ class UdpListener : public virtual Listener { using UdpListenerPtr = std::unique_ptr; -/** - * Thrown when there is a runtime error creating/binding a listener. - */ -class CreateListenerException : public EnvoyException { -public: - CreateListenerException(const std::string& what) : EnvoyException(what) {} -}; - } // namespace Network } // namespace Envoy diff --git a/source/common/network/BUILD b/source/common/network/BUILD index c7c024fa2cf1..145044e99c8c 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -197,6 +197,7 @@ envoy_cc_library( deps = [ ":socket_lib", ":utility_lib", + "//include/envoy/network:exception_interface", "//include/envoy/network:listen_socket_interface", "//source/common/common:assert_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -220,6 +221,7 @@ envoy_cc_library( ":listen_socket_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:file_event_interface", + "//include/envoy/network:exception_interface", "//include/envoy/network:listener_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 43e342d340b6..237a2fa1af76 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -7,6 +7,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/common/assert.h" #include "common/common/fmt.h" @@ -32,7 +33,7 @@ Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstShar void ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { if (!Network::Socket::applyOptions(options, *this, envoy::config::core::v3::SocketOption::STATE_PREBIND)) { - throw EnvoyException("ListenSocket: Setting socket options failed"); + throw CreateListenerException("ListenSocket: Setting socket options failed"); } } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index 31dd923445a4..71e8cfb4ad2c 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -3,6 +3,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/common/assert.h" #include "common/common/empty_string.h" diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index f05817f19d18..c959132d1642 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -8,6 +8,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 55f5da2e49f1..280d778e31e3 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/active_quic_listener.h" +#include "envoy/network/exception.h" + #if defined(__linux__) #include #endif @@ -56,7 +58,7 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, if (!ok) { ENVOY_LOG(warn, "Failed to apply socket options to socket {} on listener {} after binding", listen_socket_.ioHandle().fd(), listener_config.name()); - throw EnvoyException("Failed to apply socket options."); + throw Network::CreateListenerException("Failed to apply socket options."); } listen_socket_.addOptions(options); } diff --git a/source/server/BUILD b/source/server/BUILD index 1ed58d10ef0c..36809ab90e85 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -66,6 +66,7 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/network:connection_handler_interface", "//include/envoy/network:connection_interface", + "//include/envoy/network:exception_interface", "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", "//include/envoy/network:listener_interface", @@ -482,6 +483,7 @@ envoy_cc_library( "//include/envoy/api:api_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", + "//include/envoy/network:exception_interface", "//include/envoy/server:configuration_interface", "//include/envoy/server:guarddog_interface", "//include/envoy/server:listener_manager_interface", diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 708eef2e47ef..797e746df724 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -2,6 +2,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" +#include "envoy/network/exception.h" #include "envoy/network/filter.h" #include "envoy/stats/scope.h" #include "envoy/stats/timespan.h" diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 9a31da2cb033..967d37bb17df 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -3,6 +3,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/network/exception.h" #include "envoy/registry/registry.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/server/transport_socket_config.h" @@ -106,7 +107,7 @@ Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOpti fmt::format("{}: Setting socket options {}", listener_name_, ok ? "succeeded" : "failed"); if (!ok) { ENVOY_LOG(warn, "{}", message); - throw EnvoyException(message); + throw Network::CreateListenerException(message); } else { ENVOY_LOG(debug, "{}", message); } @@ -333,6 +334,12 @@ void ListenerImpl::buildAccessLog() { void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency) { if (socket_type == Network::Socket::Type::Datagram) { + if (!config_.reuse_port() && concurrency > 1) { + throw EnvoyException("Listening on UDP when concurrency is > 1 without the SO_REUSEPORT " + "socket option results in " + "unstable packet proxying. Configure the reuse_port listener option or " + "set concurrency = 1."); + } auto udp_config = config_.udp_listener_config(); if (udp_config.udp_listener_name().empty()) { udp_config.set_udp_listener_name(UdpListenerNames::get().RawUdp); @@ -343,10 +350,6 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig(udp_config, validation_visitor_, config_factory); udp_listener_factory_ = config_factory.createActiveUdpListenerFactory(*message, concurrency); - if (!config_.reuse_port() && concurrency > 1) { - ENVOY_LOG(warn, "Listening on UDP without SO_REUSEPORT socket option may result to unstable " - "packet proxying. Consider configuring the reuse_port listener option."); - } } } diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 1d30aa9a0f60..7b85cf590356 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -659,16 +659,19 @@ void ListenerManagerImpl::addListenerToWorker(Worker& worker, // The add listener completion runs on the worker thread. Post back to the main thread to // avoid locking. server_.dispatcher().post([this, success, &listener, completion_callback]() -> void { - // It is theoretically possible for a listener to get added on 1 worker but not the - // others. The below check with onListenerCreateFailure() is there to ensure we execute - // the removal/logging/stats at most once on failure. Note also that drain/removal can - // race with addition. It's guaranteed that workers process remove after add so this - // should be fine. + // It is possible for a listener to get added on 1 worker but not the others. The below + // check with onListenerCreateFailure() is there to ensure we execute the + // removal/logging/stats at most once on failure. Note also that drain/removal can race + // with addition. It's guaranteed that workers process remove after add so this should be + // fine. + // + // TODO(mattklein123): We should consider rewriting how listener sockets are added to + // workers, especially in the case of reuse port. If we were to create all needed + // listener sockets on the main thread (even in the case of reuse port) we could catch + // almost all socket errors here. This would both greatly simplify the logic and allow + // for xDS NACK in most cases. if (!success && !listener.onListenerCreateFailure()) { - // TODO(mattklein123): In addition to a critical log and a stat, we should consider - // adding a startup option here to cause the server to exit. I think we probably want - // this at Lyft but I will do it in a follow up. - ENVOY_LOG(critical, "listener '{}' failed to listen on address '{}' on worker", + ENVOY_LOG(error, "listener '{}' failed to listen on address '{}' on worker", listener.name(), listener.listenSocketFactory().localAddress()->asString()); stats_.listener_create_failure_.inc(); removeListenerInternal(listener.name(), false); diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 54ef058ea6e5..eae51fa68837 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -5,6 +5,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" +#include "envoy/network/exception.h" #include "envoy/server/configuration.h" #include "envoy/thread_local/thread_local.h" @@ -45,6 +46,7 @@ void WorkerImpl::addListener(absl::optional overridden_listener, hooks_.onWorkerListenerAdded(); completion(true); } catch (const Network::CreateListenerException& e) { + ENVOY_LOG(error, "failed to add listener on worker: {}", e.what()); completion(false); } }); diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index ae595d3ced28..39b790d14163 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -1,5 +1,6 @@ #include "envoy/common/platform.h" #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/api/os_sys_calls_impl.h" #include "common/network/io_socket_handle_impl.h" diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index b19ba3664fb1..f02d27ea09d2 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -1,4 +1,5 @@ #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "common/network/address_impl.h" #include "common/network/listener_impl.h" diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index 68318229d1f7..19ee0597a147 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -84,6 +84,16 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, UdpProxyIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// Make sure that we gracefully fail if the user does not configure reuse port and concurrency is +// > 1. +TEST_P(UdpProxyIntegrationTest, NoReusePort) { + concurrency_ = 2; + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + setup(1); + test_server_->waitForCounterGe("listener_manager.lds.update_rejected", 1); +} + // Basic loopback test. TEST_P(UdpProxyIntegrationTest, HelloWorldOnLoopback) { setup(1); diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index b18a58be2e63..fb644bd68df9 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -12,6 +12,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/base.pb.validate.h" +#include "envoy/network/exception.h" #include "quiche/quic/core/crypto/crypto_protocol.h" #include "quiche/quic/test_tools/crypto_test_utils.h" @@ -288,7 +289,7 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { options, ActiveQuicListenerFactoryPeer::runtimeEnabled( static_cast(listener_factory_.get()))), - EnvoyException, "Failed to apply socket options."); + Network::CreateListenerException, "Failed to apply socket options."); } TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { diff --git a/test/integration/integration.cc b/test/integration/integration.cc index e43fc1b1240b..3359b8b7f617 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -513,14 +513,6 @@ void BaseIntegrationTest::createApiTestServer(const ApiFilesystemConfig& api_fil port_names, validator_config, allow_lds_rejection); } -void BaseIntegrationTest::createTestServer(const std::string& json_path, - const std::vector& port_names) { - test_server_ = createIntegrationTestServer( - TestEnvironment::temporaryFileSubstitute(json_path, port_map_, version_), nullptr, nullptr, - timeSystem()); - registerTestServerPorts(port_names); -} - void BaseIntegrationTest::sendRawHttpAndWaitForResponse(int port, const char* raw_http, std::string* response, bool disconnect_after_headers_complete) { diff --git a/test/integration/integration.h b/test/integration/integration.h index 4b55c14b6a26..5231f1b5e2e1 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -203,7 +203,6 @@ class BaseIntegrationTest : protected Logger::Loggable { const Network::ConnectionSocket::OptionsSharedPtr& options); void registerTestServerPorts(const std::vector& port_names); - void createTestServer(const std::string& json_path, const std::vector& port_names); void createGeneratedApiTestServer(const std::string& bootstrap_path, const std::vector& port_names, Server::FieldValidationConfig validator_config, diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 987eb03bedf1..c6da9f9a3e56 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -54,6 +54,50 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, IntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// Verify that we gracefully handle an invalid pre-bind socket option when using reuse port. +TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { + // Reserve a port that we can then use on the integration listener with reuse port. + auto addr_socket = + Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_reuse_port(true); + listener->mutable_address()->mutable_socket_address()->set_port_value( + addr_socket.second->localAddress()->ip()->port()); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_PREBIND); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + +// Verify that we gracefully handle an invalid post-bind socket option when using reuse port. +TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { + // Reserve a port that we can then use on the integration listener with reuse port. + auto addr_socket = + Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->set_reuse_port(true); + listener->mutable_address()->mutable_socket_address()->set_port_value( + addr_socket.second->localAddress()->ip()->port()); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_BOUND); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + // Make sure we have correctly specified per-worker performance stats. TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { concurrency_ = 2; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 65703ba56dca..a30d2ded6399 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -1,5 +1,6 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/udp_listener_config.pb.h" +#include "envoy/network/exception.h" #include "envoy/network/filter.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/stats/scope.h" diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 9f04135735e5..3b5c09c56e63 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -3779,40 +3779,18 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForTcp) { } TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) { - auto listener = createIPv4Listener("UdpListener"); listener.mutable_address()->mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::UDP); - // For UDP, reuse_port is set to true forcibly, even it's set to false explicitly in config + // For UDP, verify that we fail if reuse port is false and concurrency is > 1. listener.set_reuse_port(false); - - // Port should be 0 for creating the shared socket, otherwise socket - // creation will happen worker thread. - listener.mutable_address()->mutable_socket_address()->set_port_value(0); - - // IpPacketInfo and RxQueueOverFlow are always set if supported - expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, -#ifdef SO_RXQ_OVFL - /* expected_num_options */ 2, -#else - /* expected_num_options */ 1, -#endif - /* expected_creation_params */ {true, false}); - - expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP, - /* expected_sockopt_name */ ENVOY_IP_PKTINFO, - /* expected_value */ 1, - /* expected_num_calls */ 1); -#ifdef SO_RXQ_OVFL - expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, - /* expected_sockopt_name */ SO_RXQ_OVFL, - /* expected_value */ 1, - /* expected_num_calls */ 1); -#endif - server_.options_.concurrency_ = 2; - manager_->addOrUpdateListener(listener, "", true); - EXPECT_EQ(1U, manager_->listeners().size()); + + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(listener, "", true), EnvoyException, + "Listening on UDP when concurrency is > 1 without the SO_REUSEPORT socket option results in " + "unstable packet proxying. Configure the reuse_port listener option or set concurrency = 1."); + EXPECT_EQ(0, manager_->listeners().size()); } TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 806ef87a5837..4025abd2c881 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/core/v3/base.pb.h" +#include "envoy/network/exception.h" #include "envoy/server/bootstrap_extension_config.h" #include "common/common/assert.h" diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index 02afcc0a50fd..4a7c560c33cb 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -1,3 +1,5 @@ +#include "envoy/network/exception.h" + #include "common/api/api_impl.h" #include "common/event/dispatcher_impl.h" diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 28d3a94ded97..dc4e5634d4e9 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -57,6 +57,7 @@ envoy_cc_test_library( "//source/common/network:address_lib", "//source/common/network:listen_socket_lib", "//source/common/network:raw_buffer_socket_lib", + "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", "//source/common/runtime:runtime_lib", ], diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 5936b88c69c3..89a667fa23bd 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -10,6 +10,7 @@ #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" +#include "common/network/socket_option_factory.h" #include "common/network/utility.h" #include "common/runtime/runtime_impl.h" @@ -159,9 +160,14 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version) { } std::pair -bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type) { +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port) { Address::InstanceConstSharedPtr addr = getCanonicalLoopbackAddress(version); SocketPtr sock = std::make_unique(type, addr); + if (reuse_port) { + sock->addOptions(SocketOptionFactory::buildReusePortOptions()); + Socket::applyOptions(sock->options(), *sock, + envoy::config::core::v3::SocketOption::STATE_PREBIND); + } Api::SysCallIntResult result = sock->bind(addr); if (0 != result.rc_) { sock->close(); diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 36fa1868ea84..d1d8d4cf32fa 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -108,14 +108,14 @@ std::string ipVersionToDnsFamily(Network::Address::IpVersion version); /** * Bind a socket to a free port on a loopback address, and return the socket's fd and bound address. - * Enables a test server to reliably "select" a port to listen on. Note that the socket option - * SO_REUSEADDR has NOT been set on the socket. + * Enables a test server to reliably "select" a port to listen on. * @param version the IP version of the loopback address. * @param type the type of socket to be bound. + * @param reuse_port specifies whether the socket option SO_REUSEADDR has been set on the socket. * @returns the address and the fd of the socket bound to that address. */ std::pair -bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type); +bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_port = false); /** * Create a transport socket for testing purposes. From 1d83b58cea498f47ebc949ebe4d1963a601ee517 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 23 Jun 2020 10:08:05 -0700 Subject: [PATCH 418/909] caching: CacheFilter bypasses cache for requests with authorization headers (#11598) Refactored isCacheableRequest and isCacheableResponse out of CacheFilter to a separate CacheFilterUtils class for easier test-ability and extend-ability. Added unit tests for isCacheableRequest. CacheFilter now bypasses cache for requests with authorization headers. Signed-off-by: Yosry Ahmed --- source/extensions/filters/http/cache/BUILD | 11 +++ .../filters/http/cache/cache_filter.cc | 26 ++----- .../filters/http/cache/cache_filter.h | 4 -- .../filters/http/cache/cache_filter_utils.cc | 33 +++++++++ .../filters/http/cache/cache_filter_utils.h | 21 ++++++ .../filters/http/cache/http_cache.cc | 6 +- .../filters/http/cache/http_cache_utils.cc | 12 ++-- .../filters/http/cache/http_cache_utils.h | 2 +- test/extensions/filters/http/cache/BUILD | 10 +++ .../http/cache/cache_filter_utils_test.cc | 67 +++++++++++++++++++ .../http/cache/http_cache_utils_test.cc | 7 +- 11 files changed, 160 insertions(+), 39 deletions(-) create mode 100644 source/extensions/filters/http/cache/cache_filter_utils.cc create mode 100644 source/extensions/filters/http/cache/cache_filter_utils.h create mode 100644 test/extensions/filters/http/cache/cache_filter_utils_test.cc diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 63327875e739..9fd2e8f27ba9 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( srcs = ["cache_filter.cc"], hdrs = ["cache_filter.h"], deps = [ + ":cache_filter_utils_lib", ":http_cache_lib", "//source/common/common:logger_lib", "//source/common/common:macros", @@ -27,6 +28,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "cache_filter_utils_lib", + srcs = ["cache_filter_utils.cc"], + hdrs = ["cache_filter_utils.h"], + deps = [ + "//source/common/common:utility_lib", + "//source/common/http:headers_lib", + ], +) + envoy_proto_library( name = "key", srcs = ["key.proto"], diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 53bdc5cd5344..0cdc0dbc65cc 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -2,6 +2,8 @@ #include "common/http/headers.h" +#include "extensions/filters/http/cache/cache_filter_utils.h" + #include "absl/strings/string_view.h" namespace Envoy { @@ -15,26 +17,6 @@ struct CacheResponseCodeDetailValues { using CacheResponseCodeDetails = ConstSingleton; -bool CacheFilter::isCacheableRequest(Http::RequestHeaderMap& headers) { - const Http::HeaderEntry* method = headers.Method(); - const Http::HeaderEntry* forwarded_proto = headers.ForwardedProto(); - const Http::HeaderValues& header_values = Http::Headers::get(); - // TODO(toddmgreer): Also serve HEAD requests from cache. - // TODO(toddmgreer): Check all the other cache-related headers. - return method && forwarded_proto && headers.Path() && headers.Host() && - (method->value() == header_values.MethodValues.Get) && - (forwarded_proto->value() == header_values.SchemeValues.Http || - forwarded_proto->value() == header_values.SchemeValues.Https); -} - -bool CacheFilter::isCacheableResponse(Http::ResponseHeaderMap& headers) { - const absl::string_view cache_control = headers.getCacheControlValue(); - // TODO(toddmgreer): fully check for cacheability. See for example - // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. - return !StringUtil::caseFindToken(cache_control, ",", - Http::Headers::get().CacheControlValues.Private); -} - CacheFilter::CacheFilter(const envoy::extensions::filters::http::cache::v3alpha::CacheConfig&, const std::string&, Stats::Scope&, TimeSource& time_source, HttpCache& http_cache) @@ -55,7 +37,7 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; } - if (!isCacheableRequest(headers)) { + if (!CacheFilterUtils::isCacheableRequest(headers)) { ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders ignoring uncacheable request: {}", *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; @@ -78,7 +60,7 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (lookup_ && isCacheableResponse(headers)) { + if (lookup_ && CacheFilterUtils::isCacheableResponse(headers)) { ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); insert_ = cache_.makeInsertContext(std::move(lookup_)); insert_->insertHeaders(headers, end_stream); diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 212ff8728284..67d8193f00e5 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -42,10 +42,6 @@ class CacheFilter : public Http::PassThroughFilter, void onBody(Buffer::InstancePtr&& body); void onTrailers(Http::ResponseTrailerMapPtr&& trailers); - // These don't require private access, but are members per envoy convention. - static bool isCacheableRequest(Http::RequestHeaderMap& headers); - static bool isCacheableResponse(Http::ResponseHeaderMap& headers); - TimeSource& time_source_; HttpCache& cache_; LookupContextPtr lookup_; diff --git a/source/extensions/filters/http/cache/cache_filter_utils.cc b/source/extensions/filters/http/cache/cache_filter_utils.cc new file mode 100644 index 000000000000..69dd6515d944 --- /dev/null +++ b/source/extensions/filters/http/cache/cache_filter_utils.cc @@ -0,0 +1,33 @@ +#include "extensions/filters/http/cache/cache_filter_utils.h" + +#include "common/common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +bool CacheFilterUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) { + const absl::string_view method = headers.getMethodValue(); + const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); + const Http::HeaderValues& header_values = Http::Headers::get(); + // TODO(toddmgreer): Also serve HEAD requests from cache. + // TODO(toddmgreer): Check all the other cache-related headers. + return headers.Path() && headers.Host() && !headers.Authorization() && + (method == header_values.MethodValues.Get) && + (forwarded_proto == header_values.SchemeValues.Http || + forwarded_proto == header_values.SchemeValues.Https); +} + +bool CacheFilterUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers) { + const absl::string_view cache_control = headers.getCacheControlValue(); + // TODO(toddmgreer): fully check for cacheability. See for example + // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. + return !StringUtil::caseFindToken(cache_control, ",", + Http::Headers::get().CacheControlValues.Private); +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/cache/cache_filter_utils.h b/source/extensions/filters/http/cache/cache_filter_utils.h new file mode 100644 index 000000000000..6af3ae764d54 --- /dev/null +++ b/source/extensions/filters/http/cache/cache_filter_utils.h @@ -0,0 +1,21 @@ +#pragma once + +#include "common/common/utility.h" +#include "common/http/headers.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +class CacheFilterUtils { +public: + // Checks if a request can be served from cache + static bool isCacheableRequest(const Http::RequestHeaderMap& headers); + + // Checks if a response can be stored in cache + static bool isCacheableResponse(const Http::ResponseHeaderMap& headers); +}; +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 213ae1d7b8ca..fc755ea1e974 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -76,12 +76,12 @@ bool LookupRequest::isFresh(const Http::ResponseHeaderMap& response_headers) con const Http::HeaderEntry* cache_control_header = response_headers.CacheControl(); if (cache_control_header) { const SystemTime::duration effective_max_age = - Utils::effectiveMaxAge(cache_control_header->value().getStringView()); - return timestamp_ - Utils::httpTime(response_headers.Date()) < effective_max_age; + HttpCacheUtils::effectiveMaxAge(cache_control_header->value().getStringView()); + return timestamp_ - HttpCacheUtils::httpTime(response_headers.Date()) < effective_max_age; } // We didn't find a cache-control header with enough info to determine // freshness, so fall back to the expires header. - return timestamp_ <= Utils::httpTime(response_headers.get(Http::Headers::get().Expires)); + return timestamp_ <= HttpCacheUtils::httpTime(response_headers.get(Http::Headers::get().Expires)); } LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers, diff --git a/source/extensions/filters/http/cache/http_cache_utils.cc b/source/extensions/filters/http/cache/http_cache_utils.cc index 14d3f67d9709..43dd624d8563 100644 --- a/source/extensions/filters/http/cache/http_cache_utils.cc +++ b/source/extensions/filters/http/cache/http_cache_utils.cc @@ -18,7 +18,7 @@ namespace Cache { // // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" // / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -bool Utils::tchar(char c) { +bool HttpCacheUtils::tchar(char c) { switch (c) { case '!': case '#': @@ -44,7 +44,7 @@ bool Utils::tchar(char c) { // token was present. // // token = 1*tchar -bool Utils::eatToken(absl::string_view& s) { +bool HttpCacheUtils::eatToken(absl::string_view& s) { const absl::string_view::iterator token_end = absl::c_find_if_not(s, &tchar); if (token_end == s.begin()) { return false; @@ -65,7 +65,7 @@ bool Utils::eatToken(absl::string_view& s) { // // For example, the directive "my-extension=42" has an argument of "42", so an // input of "public, my-extension=42, max-age=999" -void Utils::eatDirectiveArgument(absl::string_view& s) { +void HttpCacheUtils::eatDirectiveArgument(absl::string_view& s) { if (s.empty()) { return; } @@ -83,7 +83,7 @@ void Utils::eatDirectiveArgument(absl::string_view& s) { // If s is null or doesn't begin with digits, returns // SystemTime::duration::zero(). If parsing overflows, returns // SystemTime::duration::max(). -SystemTime::duration Utils::eatLeadingDuration(absl::string_view& s) { +SystemTime::duration HttpCacheUtils::eatLeadingDuration(absl::string_view& s) { const absl::string_view::iterator digits_end = absl::c_find_if_not(s, &absl::ascii_isdigit); const size_t digits_length = digits_end - s.begin(); if (digits_length == 0) { @@ -101,7 +101,7 @@ SystemTime::duration Utils::eatLeadingDuration(absl::string_view& s) { // // TODO(#9833): Write a CacheControl class to fully parse the cache-control // header value. Consider sharing with the gzip filter. -SystemTime::duration Utils::effectiveMaxAge(absl::string_view cache_control) { +SystemTime::duration HttpCacheUtils::effectiveMaxAge(absl::string_view cache_control) { // The grammar for This Cache-Control header value should be: // Cache-Control = 1#cache-directive // cache-directive = token [ "=" ( token / quoted-string ) ] @@ -160,7 +160,7 @@ SystemTime::duration Utils::effectiveMaxAge(absl::string_view cache_control) { return max_age; } -SystemTime Utils::httpTime(const Http::HeaderEntry* header_entry) { +SystemTime HttpCacheUtils::httpTime(const Http::HeaderEntry* header_entry) { if (!header_entry) { return {}; } diff --git a/source/extensions/filters/http/cache/http_cache_utils.h b/source/extensions/filters/http/cache/http_cache_utils.h index d62599b8f5bb..248b3fda4ace 100644 --- a/source/extensions/filters/http/cache/http_cache_utils.h +++ b/source/extensions/filters/http/cache/http_cache_utils.h @@ -10,7 +10,7 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Cache { -class Utils { +class HttpCacheUtils { public: // Parses and returns max-age or s-maxage (with s-maxage taking precedence), // parsed into a SystemTime::Duration. Returns SystemTime::Duration::zero if diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index 12553fadd9cf..b43da2f3d190 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -46,6 +46,16 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "cache_filter_utils_test", + srcs = ["cache_filter_utils_test.cc"], + extension_name = "envoy.filters.http.cache", + deps = [ + "//source/extensions/filters/http/cache:cache_filter_utils_lib", + "//test/test_common:utility_lib", + ], +) + envoy_extension_cc_test( name = "config_test", srcs = ["config_test.cc"], diff --git a/test/extensions/filters/http/cache/cache_filter_utils_test.cc b/test/extensions/filters/http/cache/cache_filter_utils_test.cc new file mode 100644 index 000000000000..bc3ef92cf296 --- /dev/null +++ b/test/extensions/filters/http/cache/cache_filter_utils_test.cc @@ -0,0 +1,67 @@ +#include "extensions/filters/http/cache/cache_filter_utils.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +namespace { + +class IsCacheableRequestTest : public testing::Test { +protected: + const Http::TestRequestHeaderMapImpl cacheable_request_headers = {{":path", "/"}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}, + {":authority", "test.com"}}; +}; + +TEST_F(IsCacheableRequestTest, PathHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; + EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.removePath(); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, HostHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; + EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.removeHost(); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, MethodHeader) { + const Http::HeaderValues& header_values = Http::Headers::get(); + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; + EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Post); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Put); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.removeMethod(); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, ForwardedProtoHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; + EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.setForwardedProto("ftp"); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.removeForwardedProto(); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, AuthorizationHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; + EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); + request_headers.setAuthorization("basic YWxhZGRpbjpvcGVuc2VzYW1l"); + EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); +} + +} // namespace +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cache/http_cache_utils_test.cc b/test/extensions/filters/http/cache/http_cache_utils_test.cc index 09a825662277..bd13d392eadd 100644 --- a/test/extensions/filters/http/cache/http_cache_utils_test.cc +++ b/test/extensions/filters/http/cache/http_cache_utils_test.cc @@ -30,10 +30,11 @@ INSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(ok_times)); TEST_P(HttpTimeTest, Ok) { Http::TestResponseHeaderMapImpl response_headers{{"date", GetParam()}}; // Manually confirmed that 784111777 is 11/6/94, 8:46:37. - EXPECT_EQ(784111777, SystemTime::clock::to_time_t(Utils::httpTime(response_headers.Date()))); + EXPECT_EQ(784111777, + SystemTime::clock::to_time_t(HttpCacheUtils::httpTime(response_headers.Date()))); } -TEST(HttpTime, Null) { EXPECT_EQ(Utils::httpTime(nullptr), SystemTime()); } +TEST(HttpTime, Null) { EXPECT_EQ(HttpCacheUtils::httpTime(nullptr), SystemTime()); } struct EffectiveMaxAgeParams { absl::string_view cache_control; @@ -70,7 +71,7 @@ class EffectiveMaxAgeTest : public testing::TestWithParam INSTANTIATE_TEST_SUITE_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest, testing::ValuesIn(params)); TEST_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest) { - EXPECT_EQ(Utils::effectiveMaxAge(GetParam().cache_control), + EXPECT_EQ(HttpCacheUtils::effectiveMaxAge(GetParam().cache_control), std::chrono::seconds(GetParam().effective_max_age_secs)); } From 1cac626ff5473a53b190f4b3c6c5481850edfdcc Mon Sep 17 00:00:00 2001 From: Shriram Rajagopalan Date: Tue, 23 Jun 2020 14:43:56 -0400 Subject: [PATCH 419/909] dns_filter: fix incorrect validation annotation (#11703) The upstream resolver list should not be mandatory as it won't allow Envoy to default to the ambient DNS resolvers to resolve hosts not found in the static dns table. Risk Level: Low Signed-off-by: Shriram Rajagopalan --- .../filters/udp/dns_filter/v3alpha/dns_filter.proto | 7 ++++--- .../filters/udp/dns_filter/v4alpha/dns_filter.proto | 7 ++++--- .../filters/udp/dns_filter/v3alpha/dns_filter.proto | 7 ++++--- .../filters/udp/dns_filter/v4alpha/dns_filter.proto | 7 ++++--- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index fda4bbf2c6b9..32103540c1d2 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -51,9 +51,10 @@ message DnsFilterConfig { // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - // A list of DNS servers to which we can forward queries - repeated config.core.v3.Address upstream_resolvers = 2 - [(validate.rules).repeated = {min_items: 1}]; + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v3.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto index 8b7fd74c3b16..54615b8b93ed 100644 --- a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -61,9 +61,10 @@ message DnsFilterConfig { // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - // A list of DNS servers to which we can forward queries - repeated config.core.v4alpha.Address upstream_resolvers = 2 - [(validate.rules).repeated = {min_items: 1}]; + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v4alpha.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto index fda4bbf2c6b9..32103540c1d2 100644 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto @@ -51,9 +51,10 @@ message DnsFilterConfig { // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - // A list of DNS servers to which we can forward queries - repeated config.core.v3.Address upstream_resolvers = 2 - [(validate.rules).repeated = {min_items: 1}]; + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v3.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto index 8b7fd74c3b16..54615b8b93ed 100644 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto @@ -61,9 +61,10 @@ message DnsFilterConfig { // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - // A list of DNS servers to which we can forward queries - repeated config.core.v4alpha.Address upstream_resolvers = 2 - [(validate.rules).repeated = {min_items: 1}]; + // A list of DNS servers to which we can forward queries. If not + // specified, Envoy will use the ambient DNS resolvers in the + // system. + repeated config.core.v4alpha.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external From 8ff0471c501f4cd9f0a27a537fc55c6a0c38329e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 23 Jun 2020 15:39:16 -0400 Subject: [PATCH 420/909] test: fixing a flake (#11717) When the upstream sends invalid data, it may (based on timing) pick up the disconnect. Also fixing the flake instructions now that bazel cares how large those numbers are. Risk Level: n/a (test only) Testing: 2k runs now pass cleanly Docs Changes: updated deflake instructions to work again. Release Notes: no Signed-off-by: Alyssa Wilk --- test/integration/README.md | 2 +- test/integration/protocol_integration_test.cc | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/test/integration/README.md b/test/integration/README.md index 19cf5cf4dc7a..5554b5ad0564 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -160,7 +160,7 @@ The full command might look something like ``` bazel test //test/integration:http2_upstream_integration_test \ --test_arg=--gtest_filter="IpVersions/Http2UpstreamIntegrationTest.RouterRequestAndResponseWithBodyNoBuffer/IPv6" \ ---jobs 60 --local_ram_resources=100000000000 --local_cpu_resources=100000000000 --runs_per_test=1000 --test_arg="-l trace" +--jobs 60 --local_ram_resources=1000000000 --local_cpu_resources=1000000000 --runs_per_test=1000 --test_arg="-l trace" ``` ## Debugging test flakes diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index be6d5b5e48e6..370435c3f967 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -992,6 +992,10 @@ TEST_P(ProtocolIntegrationTest, 304WithBody) { codec_client_ = makeHttpConnection(lookupPort("http")); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // The invalid data will trigger disconnect. + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + } auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); From 85491294f335b1e2a4c12247dc87a7545457f0be Mon Sep 17 00:00:00 2001 From: Bibby Date: Tue, 23 Jun 2020 15:42:27 -0400 Subject: [PATCH 421/909] redis: add support for redis 6 acls via the `AUTH username password` command (#11408) Adds an optional username to config and auth commands, and tests Signed-off-by: bibby --- .../network/redis_proxy/v3/redis_proxy.proto | 18 ++- docs/root/version_history/current.rst | 1 + .../network/redis_proxy/v3/redis_proxy.proto | 18 ++- .../clusters/redis/redis_cluster.cc | 5 +- .../extensions/clusters/redis/redis_cluster.h | 1 + .../filters/network/common/redis/client.h | 5 +- .../network/common/redis/client_impl.cc | 13 +- .../network/common/redis/client_impl.h | 5 +- .../filters/network/common/redis/utility.cc | 12 ++ .../filters/network/common/redis/utility.h | 1 + .../network/redis_proxy/command_splitter.h | 9 +- .../redis_proxy/command_splitter_impl.cc | 7 +- .../filters/network/redis_proxy/config.h | 18 ++- .../network/redis_proxy/conn_pool_impl.cc | 7 +- .../network/redis_proxy/conn_pool_impl.h | 1 + .../network/redis_proxy/proxy_filter.cc | 30 +++- .../network/redis_proxy/proxy_filter.h | 5 + .../extensions/health_checkers/redis/redis.cc | 2 +- .../redis/redis_cluster_integration_test.cc | 25 ++- .../clusters/redis/redis_cluster_test.cc | 2 +- .../network/common/redis/client_impl_test.cc | 38 ++++- .../filters/network/common/redis/mocks.h | 2 +- .../redis_proxy/command_lookup_speed_test.cc | 1 + .../redis_proxy/conn_pool_impl_test.cc | 6 +- .../filters/network/redis_proxy/mocks.h | 1 + .../network/redis_proxy/proxy_filter_test.cc | 148 ++++++++++++++++++ .../redis_proxy_integration_test.cc | 30 ++-- .../health_checkers/redis/redis_test.cc | 2 +- tools/spelling/spelling_dictionary.txt | 2 + 29 files changed, 368 insertions(+), 47 deletions(-) diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 143bd4da65e1..658ac1c16b8c 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 7] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -234,6 +234,18 @@ message RedisProxy { // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + + // If a username is provided an ACL style AUTH command will be required with a username and password. + // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis + // AUTH command `_ with this username and the *downstream_auth_password* + // before enabling any other command. If an AUTH command's username and password matches this username + // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH + // command username or password does not match this username or the *downstream_auth_password*, then an + // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this + // password is set, then a "NOAUTH Authentication required." error response will be sent to the + // client. If an AUTH command is received when the password is not set, then an "ERR Client sent + // AUTH, but no ACL is set" error will be returned. + config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -246,4 +258,8 @@ message RedisProtocolOptions { // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; + + // Upstream server username as defined by the `user` directive + // `_ in the server's configuration file. + config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c2a2d7e36d3a..b932f837c873 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -101,6 +101,7 @@ New Features * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index b9ca387f4ca5..098f5f4a2ea9 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -22,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 7] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -230,6 +230,18 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + // If a username is provided an ACL style AUTH command will be required with a username and password. + // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis + // AUTH command `_ with this username and the *downstream_auth_password* + // before enabling any other command. If an AUTH command's username and password matches this username + // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH + // command username or password does not match this username or the *downstream_auth_password*, then an + // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this + // password is set, then a "NOAUTH Authentication required." error response will be sent to the + // client. If an AUTH command is received when the password is not set, then an "ERR Client sent + // AUTH, but no ACL is set" error will be returned. + config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; + string hidden_envoy_deprecated_cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -244,4 +256,8 @@ message RedisProtocolOptions { // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; + + // Upstream server username as defined by the `user` directive + // `_ in the server's configuration file. + config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 6c0e04bdfbc1..38dbccc60e6a 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -45,6 +45,8 @@ RedisCluster::RedisCluster( : Config::Utility::translateClusterHosts(cluster.hidden_envoy_deprecated_hosts())), local_info_(factory_context.localInfo()), random_(factory_context.random()), redis_discovery_session_(*this, redis_client_factory), lb_factory_(std::move(lb_factory)), + auth_username_( + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(info(), api)), auth_password_( NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)), cluster_name_(cluster.name()), @@ -278,7 +280,8 @@ void RedisCluster::RedisDiscoverySession::startResolveRedis() { client = std::make_unique(*this); client->host_ = current_host_address_; client->client_ = client_factory_.create(host, dispatcher_, *this, redis_command_stats_, - parent_.info()->statsScope(), parent_.auth_password_); + parent_.info()->statsScope(), parent_.auth_username_, + parent_.auth_password_); client->client_->addConnectionCallbacks(*client); } diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index f716960385c8..ce0c32dd800e 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -276,6 +276,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { Upstream::HostVector hosts_; Upstream::HostMap all_hosts_; + const std::string auth_username_; const std::string auth_password_; const std::string cluster_name_; const Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index b420438ac55f..0c8a15cb65fc 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -106,7 +106,7 @@ class Client : public Event::DeferredDeletable { * Initialize the connection. Issue the auth command and readonly command as needed. * @param auth password for upstream host. */ - virtual void initialize(const std::string& auth_password) PURE; + virtual void initialize(const std::string& auth_username, const std::string& auth_password) PURE; }; using ClientPtr = std::unique_ptr; @@ -206,7 +206,8 @@ class ClientFactory { virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) PURE; + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) PURE; }; } // namespace Client diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index 9643b725a011..cadfa95a1371 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -285,8 +285,12 @@ void ClientImpl::PendingRequest::cancel() { canceled_ = true; } -void ClientImpl::initialize(const std::string& auth_password) { - if (!auth_password.empty()) { +void ClientImpl::initialize(const std::string& auth_username, const std::string& auth_password) { + if (!auth_username.empty()) { + // Send an AUTH command to the upstream server with username and password. + Utility::AuthRequest auth_request(auth_username, auth_password); + makeRequest(auth_request, null_pool_callbacks); + } else if (!auth_password.empty()) { // Send an AUTH command to the upstream server. Utility::AuthRequest auth_request(auth_password); makeRequest(auth_request, null_pool_callbacks); @@ -304,10 +308,11 @@ ClientFactoryImpl ClientFactoryImpl::instance_; ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) { + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) { ClientPtr client = ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, config, redis_command_stats, scope); - client->initialize(auth_password); + client->initialize(auth_username, auth_password); return client; } diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h index 5d7bcb182ea8..ad5b6231ffb7 100644 --- a/source/extensions/filters/network/common/redis/client_impl.h +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -87,7 +87,7 @@ class ClientImpl : public Client, public DecoderCallbacks, public Network::Conne PoolRequest* makeRequest(const RespValue& request, ClientCallbacks& callbacks) override; bool active() override { return !pending_requests_.empty(); } void flushBufferAndResetTimer(); - void initialize(const std::string& auth_password) override; + void initialize(const std::string& auth_username, const std::string& auth_password) override; private: friend class RedisClientImplTest; @@ -151,7 +151,8 @@ class ClientFactoryImpl : public ClientFactory { // RedisProxy::ConnPool::ClientFactoryImpl ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, - Stats::Scope& scope, const std::string& auth_password) override; + Stats::Scope& scope, const std::string& auth_username, + const std::string& auth_password) override; static ClientFactoryImpl instance_; diff --git a/source/extensions/filters/network/common/redis/utility.cc b/source/extensions/filters/network/common/redis/utility.cc index c652addb3e12..773196dd70e2 100644 --- a/source/extensions/filters/network/common/redis/utility.cc +++ b/source/extensions/filters/network/common/redis/utility.cc @@ -19,6 +19,18 @@ AuthRequest::AuthRequest(const std::string& password) { asArray().swap(values); } +AuthRequest::AuthRequest(const std::string& username, const std::string& password) { + std::vector values(3); + values[0].type(RespType::BulkString); + values[0].asString() = "auth"; + values[1].type(RespType::BulkString); + values[1].asString() = username; + values[2].type(RespType::BulkString); + values[2].asString() = password; + type(RespType::Array); + asArray().swap(values); +} + RespValuePtr makeError(const std::string& error) { Common::Redis::RespValuePtr response(new RespValue()); response->type(Common::Redis::RespType::Error); diff --git a/source/extensions/filters/network/common/redis/utility.h b/source/extensions/filters/network/common/redis/utility.h index b2e77b8e94ab..ca5774d2d3a6 100644 --- a/source/extensions/filters/network/common/redis/utility.h +++ b/source/extensions/filters/network/common/redis/utility.h @@ -13,6 +13,7 @@ namespace Utility { class AuthRequest : public Redis::RespValue { public: + AuthRequest(const std::string& username, const std::string& password); AuthRequest(const std::string& password); }; diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index 5e1248d3b500..e03d0a92e137 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -41,11 +41,18 @@ class SplitCallbacks { virtual bool connectionAllowed() PURE; /** - * Called when an authentication command has been received. + * Called when an authentication command has been received with a password. * @param password supplies the AUTH password provided by the downstream client. */ virtual void onAuth(const std::string& password) PURE; + /** + * Called when an authentication command has been received with a username and password. + * @param username supplies the AUTH username provided by the downstream client. + * @param password supplies the AUTH password provided by the downstream client. + */ + virtual void onAuth(const std::string& username, const std::string& password) PURE; + /** * Called when the response is ready. * @param value supplies the response which is now owned by the callee. diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index adfbf7ff9fbe..a5bd89588f51 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -439,7 +439,12 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, onInvalidRequest(callbacks); return nullptr; } - callbacks.onAuth(request->asArray()[1].asString()); + if (request->asArray().size() == 3) { + callbacks.onAuth(request->asArray()[1].asString(), request->asArray()[2].asString()); + } else { + callbacks.onAuth(request->asArray()[1].asString()); + } + return nullptr; } diff --git a/source/extensions/filters/network/redis_proxy/config.h b/source/extensions/filters/network/redis_proxy/config.h index e13d0cda331e..cbb1866018f4 100644 --- a/source/extensions/filters/network/redis_proxy/config.h +++ b/source/extensions/filters/network/redis_proxy/config.h @@ -24,12 +24,27 @@ class ProtocolOptionsConfigImpl : public Upstream::ProtocolOptionsConfig { ProtocolOptionsConfigImpl( const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions& proto_config) - : auth_password_(proto_config.auth_password()) {} + : auth_username_(proto_config.auth_username()), auth_password_(proto_config.auth_password()) { + } + + std::string authUsername(Api::Api& api) const { + return Config::DataSource::read(auth_username_, true, api); + } std::string authPassword(Api::Api& api) const { return Config::DataSource::read(auth_password_, true, api); } + static const std::string authUsername(const Upstream::ClusterInfoConstSharedPtr info, + Api::Api& api) { + auto options = info->extensionProtocolOptionsTyped( + NetworkFilterNames::get().RedisProxy); + if (options) { + return options->authUsername(api); + } + return EMPTY_STRING; + } + static const std::string authPassword(const Upstream::ClusterInfoConstSharedPtr info, Api::Api& api) { auto options = info->extensionProtocolOptionsTyped( @@ -41,6 +56,7 @@ class ProtocolOptionsConfigImpl : public Upstream::ProtocolOptionsConfig { } private: + envoy::config::core::v3::DataSource auth_username_; envoy::config::core::v3::DataSource auth_password_; }; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index 9bc15b0f14a6..b446901c6072 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -74,6 +74,7 @@ InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Disp cluster_update_handle_ = parent_.cm_.addThreadLocalClusterUpdateCallbacks(*this); Upstream::ThreadLocalCluster* cluster = parent_.cm_.get(cluster_name_); if (cluster != nullptr) { + auth_username_ = ProtocolOptionsConfigImpl::authUsername(cluster->info(), parent_.api_); auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent_.api_); onClusterAddOrUpdateNonVirtual(*cluster); } @@ -214,9 +215,9 @@ InstanceImpl::ThreadLocalPool::threadLocalActiveClient(Upstream::HostConstShared if (!client) { client = std::make_unique(*this); client->host_ = host; - client->redis_client_ = parent_.client_factory_.create(host, dispatcher_, parent_.config_, - parent_.redis_command_stats_, - *parent_.stats_scope_, auth_password_); + client->redis_client_ = parent_.client_factory_.create( + host, dispatcher_, parent_.config_, parent_.redis_command_stats_, *parent_.stats_scope_, + auth_username_, auth_password_); client->redis_client_->addConnectionCallbacks(*client); } return client; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 6dcb695efac8..6fa31f2ca0ea 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -157,6 +157,7 @@ class InstanceImpl : public Instance { std::unordered_map client_map_; Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{}; std::unordered_map host_address_map_; + std::string auth_username_; std::string auth_password_; std::list created_via_redirect_hosts_; std::list clients_to_drain_; diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index 7782485d5ec9..aa2f558cc51a 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -23,6 +23,8 @@ ProxyFilterConfig::ProxyFilterConfig( : drain_decision_(drain_decision), runtime_(runtime), stat_prefix_(fmt::format("redis.{}.", config.stat_prefix())), stats_(generateStats(stat_prefix_, scope)), + downstream_auth_username_( + Config::DataSource::read(config.downstream_auth_username(), true, api)), downstream_auth_password_( Config::DataSource::read(config.downstream_auth_password(), true, api)) {} @@ -38,7 +40,8 @@ ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, config_(config) { config_->stats_.downstream_cx_total_.inc(); config_->stats_.downstream_cx_active_.inc(); - connection_allowed_ = config_->downstream_auth_password_.empty(); + connection_allowed_ = + config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty(); } ProxyFilter::~ProxyFilter() { @@ -96,6 +99,31 @@ void ProxyFilter::onAuth(PendingRequest& request, const std::string& password) { request.onResponse(std::move(response)); } +void ProxyFilter::onAuth(PendingRequest& request, const std::string& username, + const std::string& password) { + Common::Redis::RespValuePtr response{new Common::Redis::RespValue()}; + if (config_->downstream_auth_username_.empty() && config_->downstream_auth_password_.empty()) { + response->type(Common::Redis::RespType::Error); + response->asString() = "ERR Client sent AUTH, but no username-password pair is set"; + } else if (config_->downstream_auth_username_.empty() && username == "default" && + password == config_->downstream_auth_password_) { + // empty username and "default" are synonymous in Redis 6 ACLs + response->type(Common::Redis::RespType::SimpleString); + response->asString() = "OK"; + connection_allowed_ = true; + } else if (username == config_->downstream_auth_username_ && + password == config_->downstream_auth_password_) { + response->type(Common::Redis::RespType::SimpleString); + response->asString() = "OK"; + connection_allowed_ = true; + } else { + response->type(Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + connection_allowed_ = false; + } + request.onResponse(std::move(response)); +} + void ProxyFilter::onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value) { ASSERT(!pending_requests_.empty()); request.pending_response_ = std::move(value); diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 23ebd3e0f039..1694a2a0640e 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -57,6 +57,7 @@ class ProxyFilterConfig { const std::string stat_prefix_; const std::string redis_drain_close_runtime_key_{"redis.drain_close_enabled"}; ProxyStats stats_; + const std::string downstream_auth_username_; const std::string downstream_auth_password_; private: @@ -100,6 +101,9 @@ class ProxyFilter : public Network::ReadFilter, // RedisProxy::CommandSplitter::SplitCallbacks bool connectionAllowed() override { return parent_.connectionAllowed(); } void onAuth(const std::string& password) override { parent_.onAuth(*this, password); } + void onAuth(const std::string& username, const std::string& password) override { + parent_.onAuth(*this, username, password); + } void onResponse(Common::Redis::RespValuePtr&& value) override { parent_.onResponse(*this, std::move(value)); } @@ -110,6 +114,7 @@ class ProxyFilter : public Network::ReadFilter, }; void onAuth(PendingRequest& request, const std::string& password); + void onAuth(PendingRequest& request, const std::string& username, const std::string& password); void onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value); Common::Redis::DecoderPtr decoder_; diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index d508193445da..2f403b9742e5 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -65,7 +65,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() { if (!client_) { client_ = parent_.client_factory_.create( host_, parent_.dispatcher_, *this, redis_command_stats_, - parent_.cluster_.info()->statsScope(), parent_.auth_password_); + parent_.cluster_.info()->statsScope(), "", parent_.auth_password_); client_->addConnectionCallbacks(*this); } diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 1bcb95142309..ed83eab698d7 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -194,7 +194,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwaitForData(auth_command.size() + request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -239,13 +242,14 @@ class RedisClusterIntegrationTest : public testing::TestWithParamclose(); EXPECT_TRUE(fake_upstream_connection->close()); } void expectCallClusterSlot(int stream_index, std::string& response, + const std::string& auth_username = "", const std::string& auth_password = "") { std::string cluster_slot_request = makeBulkStringArray({"CLUSTER", "SLOTS"}); @@ -259,10 +263,18 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwaitForData(cluster_slot_request.size(), &proxied_cluster_slot_request)); EXPECT_EQ(cluster_slot_request, proxied_cluster_slot_request); - } else { + } else if (auth_username.empty()) { std::string auth_request = makeBulkStringArray({"auth", auth_password}); std::string ok = "+OK\r\n"; + EXPECT_TRUE(fake_upstream_connection_->waitForData( + auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request)); + EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request); + EXPECT_TRUE(fake_upstream_connection_->write(ok)); + } else { + std::string auth_request = makeBulkStringArray({"auth", auth_username, auth_password}); + std::string ok = "+OK\r\n"; + EXPECT_TRUE(fake_upstream_connection_->waitForData( auth_request.size() + cluster_slot_request.size(), &proxied_cluster_slot_request)); EXPECT_EQ(auth_request + cluster_slot_request, proxied_cluster_slot_request); @@ -525,7 +537,7 @@ TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotMasterReplica) { on_server_init_function_ = [this]() { std::string cluster_slot_response = singleSlotMasterReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); - expectCallClusterSlot(0, cluster_slot_response, "somepassword"); + expectCallClusterSlot(0, cluster_slot_response, "", "somepassword"); }; initialize(); @@ -534,7 +546,8 @@ TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotMasterReplica) { FakeRawConnectionPtr fake_upstream_connection; roundtripToUpstreamStep(fake_upstreams_[random_index_], makeBulkStringArray({"get", "foo"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection, "somepassword"); + "$3\r\nbar\r\n", redis_client, fake_upstream_connection, "", + "somepassword"); redis_client->close(); EXPECT_TRUE(fake_upstream_connection->close()); diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 6b9a87ab778a..5936773382ad 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -65,7 +65,7 @@ class RedisClusterTest : public testing::Test, create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, const Extensions::NetworkFilters::Common::Redis::Client::Config&, const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string&) override { + Stats::Scope&, const std::string&, const std::string&) override { EXPECT_EQ(22120, host->address()->ip()->port()); return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{ create_(host->address()->asString())}; diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index c9028a1da42a..d5402aeaa6db 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -116,7 +116,7 @@ class RedisClientImplTest : public testing::Test, Common::Redis::RespValue readonly_request = Utility::ReadOnlyRequest::instance(); EXPECT_CALL(*encoder_, encode(Eq(readonly_request), _)); EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); @@ -143,6 +143,7 @@ class RedisClientImplTest : public testing::Test, NiceMock stats_; Stats::ScopePtr stats_scope_; Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; + std::string auth_username_; std::string auth_password_; }; @@ -290,7 +291,7 @@ TEST_F(RedisClientImplTest, Basic) { setup(); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); Common::Redis::RespValue request1; MockClientCallbacks callbacks1; @@ -370,7 +371,7 @@ TEST_F(RedisClientImplTest, CommandStatsDisabledSingleRequest) { setup(); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); std::string get_command = "get"; @@ -426,7 +427,7 @@ TEST_F(RedisClientImplTest, CommandStatsEnabledTwoRequests) { setup(std::make_unique()); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); std::string get_command = "get"; @@ -511,7 +512,29 @@ TEST_F(RedisClientImplTest, InitializedWithAuthPassword) { Utility::AuthRequest auth_request(auth_password_); EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); - client_->initialize(auth_password_); + client_->initialize(auth_username_, auth_password_); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, InitializedWithAuthAcl) { + InSequence s; + + setup(); + + auth_username_ = "testing username"; + auth_password_ = "testing password"; + Utility::AuthRequest auth_request(auth_username_, auth_password_); + EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); + EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); + client_->initialize(auth_username_, auth_password_); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); @@ -1188,9 +1211,10 @@ TEST(RedisClientFactoryImplTest, Basic) { Stats::IsolatedStoreImpl stats_; auto redis_command_stats = Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable()); + const std::string auth_username; const std::string auth_password; - ClientPtr client = - factory.create(host, dispatcher, config, redis_command_stats, stats_, auth_password); + ClientPtr client = factory.create(host, dispatcher, config, redis_command_stats, stats_, + auth_username, auth_password); client->close(); } } // namespace Client diff --git a/test/extensions/filters/network/common/redis/mocks.h b/test/extensions/filters/network/common/redis/mocks.h index 0561c7bb57e0..4f8f11bdaa4b 100644 --- a/test/extensions/filters/network/common/redis/mocks.h +++ b/test/extensions/filters/network/common/redis/mocks.h @@ -87,7 +87,7 @@ class MockClient : public Client { MOCK_METHOD(void, close, ()); MOCK_METHOD(PoolRequest*, makeRequest_, (const Common::Redis::RespValue& request, ClientCallbacks& callbacks)); - MOCK_METHOD(void, initialize, (const std::string& password)); + MOCK_METHOD(void, initialize, (const std::string& username, const std::string& password)); std::list callbacks_; std::list client_callbacks_; diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index a6ccc4f1b59d..edf29c973092 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -28,6 +28,7 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { bool connectionAllowed() override { return true; } void onAuth(const std::string&) override {} + void onAuth(const std::string&, const std::string&) override {} void onResponse(Common::Redis::RespValuePtr&&) override {} }; diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 639a9b8313c8..f3350db7cd5f 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -84,6 +84,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client read_policy_), api_, std::move(store), redis_command_stats, cluster_refresh_manager_); // Set the authentication password for this connection pool. + conn_pool_impl->tls_->getTyped().auth_username_ = auth_username_; conn_pool_impl->tls_->getTyped().auth_password_ = auth_password_; conn_pool_ = std::move(conn_pool_impl); test_address_ = Network::Utility::resolveUrl("tcp://127.0.0.1:3000"); @@ -199,7 +200,9 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client Common::Redis::Client::ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, const Common::Redis::Client::Config&, const Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string& password) override { + Stats::Scope&, const std::string& username, + const std::string& password) override { + EXPECT_EQ(auth_username_, username); EXPECT_EQ(auth_password_, password); return Common::Redis::Client::ClientPtr{create_(host)}; } @@ -273,6 +276,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client Upstream::ClusterUpdateCallbacks* update_callbacks_{}; Common::Redis::Client::MockClient* client_{}; Network::Address::InstanceConstSharedPtr test_address_; + std::string auth_username_; std::string auth_password_; NiceMock api_; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index 5bb208bfa901..b093ad35b9b9 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -101,6 +101,7 @@ class MockSplitCallbacks : public SplitCallbacks { MOCK_METHOD(bool, connectionAllowed, ()); MOCK_METHOD(void, onAuth, (const std::string& password)); + MOCK_METHOD(void, onAuth, (const std::string& username, const std::string& password)); MOCK_METHOD(void, onResponse_, (Common::Redis::RespValuePtr & value)); }; diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 72cebf97fcd2..f094c02b665a 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -63,6 +63,7 @@ TEST_F(RedisProxyFilterConfigTest, Normal) { parseProtoFromYaml(yaml_string); ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_); EXPECT_EQ("redis.foo.", config.stat_prefix_); + EXPECT_TRUE(config.downstream_auth_username_.empty()); EXPECT_TRUE(config.downstream_auth_password_.empty()); } @@ -93,6 +94,27 @@ TEST_F(RedisProxyFilterConfigTest, DownstreamAuthPasswordSet) { EXPECT_EQ(config.downstream_auth_password_, "somepassword"); } +TEST_F(RedisProxyFilterConfigTest, DownstreamAuthAclSet) { + const std::string yaml_string = R"EOF( + prefix_routes: + catch_all_route: + cluster: fake_cluster + stat_prefix: foo + settings: + op_timeout: 0.01s + downstream_auth_username: + inline_string: someusername + downstream_auth_password: + inline_string: somepassword + )EOF"; + + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config = + parseProtoFromYaml(yaml_string); + ProxyFilterConfig config(proto_config, store_, drain_decision_, runtime_, api_); + EXPECT_EQ(config.downstream_auth_username_, "someusername"); + EXPECT_EQ(config.downstream_auth_password_, "somepassword"); +} + class RedisProxyFilterTest : public testing::Test, public Common::Redis::DecoderFactory { public: static constexpr const char* DefaultConfig = R"EOF( @@ -310,6 +332,33 @@ TEST_F(RedisProxyFilterTest, AuthWhenNotRequired) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); } +TEST_F(RedisProxyFilterTest, AuthAclWhenNotRequired) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_TRUE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr error(new Common::Redis::RespValue()); + error->type(Common::Redis::RespType::Error); + error->asString() = "ERR Client sent AUTH, but no username-password pair is set"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("foo", "bar"); + // callbacks cannot be accessed now. + EXPECT_TRUE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + const std::string downstream_auth_password_config = R"EOF( prefix_routes: catch_all_route: @@ -380,6 +429,105 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordIncorrect) { EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); } +const std::string downstream_auth_acl_config = R"EOF( +prefix_routes: + catch_all_route: + cluster: fake_cluster +stat_prefix: foo +settings: + op_timeout: 0.01s +downstream_auth_username: + inline_string: someusername +downstream_auth_password: + inline_string: somepassword +)EOF"; + +class RedisProxyFilterWithAuthAclTest : public RedisProxyFilterTest { +public: + RedisProxyFilterWithAuthAclTest() : RedisProxyFilterTest(downstream_auth_acl_config) {} +}; + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclCorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::SimpleString); + reply->asString() = "OK"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("someusername", "somepassword"); + // callbacks cannot be accessed now. + EXPECT_TRUE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclUsernameIncorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::Error); + reply->asString() = "WRONGPASS invalid username-password pair"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("wrongusername", "somepassword"); + // callbacks cannot be accessed now. + EXPECT_FALSE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + +TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclPasswordIncorrect) { + InSequence s; + + Buffer::OwnedImpl fake_data; + Common::Redis::RespValuePtr request(new Common::Redis::RespValue()); + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + decoder_callbacks_->onRespValue(std::move(request)); + })); + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + .WillOnce( + Invoke([&](const Common::Redis::RespValue&, + CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { + EXPECT_FALSE(callbacks.connectionAllowed()); + Common::Redis::RespValuePtr reply(new Common::Redis::RespValue()); + reply->type(Common::Redis::RespType::Error); + reply->asString() = "WRONGPASS invalid username-password pair"; + EXPECT_CALL(*encoder_, encode(Eq(ByRef(*reply)), _)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); + callbacks.onAuth("someusername", "wrongpassword"); + // callbacks cannot be accessed now. + EXPECT_FALSE(filter_->connectionAllowed()); + return nullptr; + })); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(fake_data, false)); +} + } // namespace RedisProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index 7625f406b1ad..f44f10b9d1de 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -355,12 +355,13 @@ class RedisProxyIntegrationTest : public testing::TestWithParamclearData(); redis_client->write(request); expectUpstreamRequestResponse(upstream, request, response, fake_upstream_connection, - auth_password); + auth_username, auth_password); redis_client->waitForData(response); // The original response should be received by the fake Redis client. @@ -489,7 +492,8 @@ void RedisProxyIntegrationTest::roundtripToUpstreamStep( void RedisProxyIntegrationTest::expectUpstreamRequestResponse( FakeUpstreamPtr& upstream, const std::string& request, const std::string& response, - FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_password) { + FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_username, + const std::string& auth_password) { std::string proxy_to_server; bool expect_auth_command = false; std::string ok = "+OK\r\n"; @@ -499,7 +503,9 @@ void RedisProxyIntegrationTest::expectUpstreamRequestResponse( EXPECT_TRUE(upstream->waitForRawConnection(fake_upstream_connection)); } if (expect_auth_command) { - std::string auth_command = makeBulkStringArray({"auth", auth_password}); + std::string auth_command = (auth_username.empty()) + ? makeBulkStringArray({"auth", auth_password}) + : makeBulkStringArray({"auth", auth_username, auth_password}); EXPECT_TRUE(fake_upstream_connection->waitForData(auth_command.size() + request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -522,7 +528,8 @@ void RedisProxyIntegrationTest::simpleRoundtripToUpstream(FakeUpstreamPtr& upstr IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); FakeRawConnectionPtr fake_upstream_connection; - roundtripToUpstreamStep(upstream, request, response, redis_client, fake_upstream_connection, ""); + roundtripToUpstreamStep(upstream, request, response, redis_client, fake_upstream_connection, "", + ""); EXPECT_TRUE(fake_upstream_connection->close()); redis_client->close(); @@ -621,10 +628,11 @@ TEST_P(RedisProxyWithCommandStatsIntegrationTest, MGETRequestAndResponse) { // Make GET request to upstream (MGET is turned into GETs for upstream) FakeUpstreamPtr& upstream = fake_upstreams_[0]; FakeRawConnectionPtr fake_upstream_connection; + std::string auth_username = ""; std::string auth_password = ""; std::string upstream_request = makeBulkStringArray({"get", "foo"}); expectUpstreamRequestResponse(upstream, upstream_request, upstream_response, - fake_upstream_connection, auth_password); + fake_upstream_connection, auth_username, auth_password); // Downstream response for MGET redis_client->waitForData(downstream_response); @@ -910,7 +918,7 @@ TEST_P(RedisProxyWithDownstreamAuthIntegrationTest, ErrorsUntilCorrectPasswordSe proxyResponseStep(makeBulkStringArray({"auth", "somepassword"}), "+OK\r\n", redis_client); roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n", - redis_client, fake_upstream_connection, ""); + redis_client, fake_upstream_connection, "", ""); EXPECT_TRUE(fake_upstream_connection->close()); redis_client->close(); @@ -927,16 +935,16 @@ TEST_P(RedisProxyWithRoutesAndAuthPasswordsIntegrationTest, TransparentAuthentic // roundtrip to cluster_0 (catch_all route) roundtripToUpstreamStep(fake_upstreams_[0], makeBulkStringArray({"get", "toto"}), "$3\r\nbar\r\n", - redis_client, fake_upstream_connection[0], "cluster_0_password"); + redis_client, fake_upstream_connection[0], "", "cluster_0_password"); // roundtrip to cluster_1 (prefix "foo:" route) roundtripToUpstreamStep(fake_upstreams_[1], makeBulkStringArray({"get", "foo:123"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection[1], + "$3\r\nbar\r\n", redis_client, fake_upstream_connection[1], "", "cluster_1_password"); // roundtrip to cluster_2 (prefix "baz:" route) roundtripToUpstreamStep(fake_upstreams_[2], makeBulkStringArray({"get", "baz:123"}), - "$3\r\nbar\r\n", redis_client, fake_upstream_connection[2], + "$3\r\nbar\r\n", redis_client, fake_upstream_connection[2], "", "cluster_2_password"); EXPECT_TRUE(fake_upstream_connection[0]->close()); diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 9d413879998c..7b1b850f0033 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -157,7 +157,7 @@ class RedisHealthCheckerTest create(Upstream::HostConstSharedPtr, Event::Dispatcher&, const Extensions::NetworkFilters::Common::Redis::Client::Config&, const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string&) override { + Stats::Scope&, const std::string&, const std::string&) override { return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{create_()}; } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index a9c2b9af21ef..acdc35b82743 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -4,6 +4,7 @@ # are allowed for any otherwise correctly spelled word. ABI ACK +ACL AES AFAICT ALPN @@ -337,6 +338,7 @@ WASM WAVM WIP WKT +WRONGPASS WRR WS WSA From 02a526257a013e80bc508399446aac0234c3ec4e Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 23 Jun 2020 16:06:18 -0400 Subject: [PATCH 422/909] repokitteh: tighten API path expression. (#11712) Reduce the number of false positives we see during review. API shepherds were being flagged on files in include/envoy/api for example. Signed-off-by: Harvey Tuch --- repokitteh.star | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/repokitteh.star b/repokitteh.star index 342a3f9675e3..e902d9eae2ea 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -17,13 +17,13 @@ use( }, { "owner": "envoyproxy/api-shepherds!", - "path": "api/", + "path": "api/envoy/", "label": "api", "github_status_label": "any API change", }, { "owner": "envoyproxy/api-watchers", - "path": "api/", + "path": "api/envoy/", }, ], ) From 658ef43005444187c33435818a16fec05c9f0809 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Tue, 23 Jun 2020 18:05:41 -0500 Subject: [PATCH 423/909] Fuzz: added fuzz test for network filter "local_ratelimit" (#11608) Added fuzz test code for network filter "local_ratelimit" which is defined in extensions/filters/network/local_ratelimit/local_ratelimit.h Added protobuf as the fuzz test input. Added a basic test case to cover the code. Risk Level: low Testing: Increased the coverage from 1.2% to 100% on the filter's .cc and .h files. Signed-off-by: jianwen --- .../filters/network/local_ratelimit/BUILD | 26 +++++ .../local_ratelimit_corpus/basic_test_case | 39 +++++++ .../local_ratelimit_fuzz.proto | 28 +++++ .../local_ratelimit_fuzz_test.cc | 102 ++++++++++++++++++ 4 files changed, 195 insertions(+) create mode 100644 test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case create mode 100644 test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto create mode 100644 test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc diff --git a/test/extensions/filters/network/local_ratelimit/BUILD b/test/extensions/filters/network/local_ratelimit/BUILD index 8f11258d9af8..ee8bcec55d8b 100644 --- a/test/extensions/filters/network/local_ratelimit/BUILD +++ b/test/extensions/filters/network/local_ratelimit/BUILD @@ -1,6 +1,8 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -35,3 +37,27 @@ envoy_extension_cc_test( "//test/integration:integration_lib", ], ) + +envoy_proto_library( + name = "local_ratelimit_fuzz_proto", + srcs = ["local_ratelimit_fuzz.proto"], + deps = [ + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "local_ratelimit_fuzz_test", + srcs = ["local_ratelimit_fuzz_test.cc"], + corpus = "local_ratelimit_corpus", + deps = [ + ":local_ratelimit_fuzz_proto_cc_proto", + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/local_ratelimit:local_ratelimit_lib", + "//test/fuzz:utility_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case b/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case new file mode 100644 index 000000000000..282b7dc8fd0c --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_corpus/basic_test_case @@ -0,0 +1,39 @@ +config{ + stat_prefix: "local_rate_limit_stats" + token_bucket:{ + max_tokens: 1 + fill_interval{ + seconds: 1 + } + } + runtime_enabled:{ + default_value: { + value: true + } + runtime_key: "foo_key" + } + +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} \ No newline at end of file diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto new file mode 100644 index 000000000000..d6b2896d06c9 --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package envoy.extensions.filters.network.local_ratelimit; + +import "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; + +message OnData { + bytes data = 1; + bool end_stream = 2; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection(). + google.protobuf.Empty on_new_connection = 1; + // Call onData(). + OnData on_data = 2; + // Timer ends and refill the bucket. + google.protobuf.Empty refill = 3; + } +} +message LocalRateLimitTestCase { + envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit config = 1 + [(validate.rules).message = {required: true}]; + repeated Action actions = 2; +} diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc new file mode 100644 index 000000000000..a632b2612b07 --- /dev/null +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc @@ -0,0 +1,102 @@ +#include "envoy/common/exception.h" +#include "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/stats/isolated_store_impl.h" + +#include "extensions/filters/network/local_ratelimit/local_ratelimit.h" + +#include "test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace LocalRateLimitFilter { +struct ActiveFilter { + ActiveFilter(const ConfigSharedPtr& config) : filter_(config) { + filter_.initializeReadFilterCallbacks(read_filter_callbacks_); + } + + NiceMock read_filter_callbacks_; + Filter filter_; +}; + +DEFINE_PROTO_FUZZER( + const envoy::extensions::filters::network::local_ratelimit::LocalRateLimitTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } catch (const ProtobufMessage::DeprecatedProtoFieldException& e) { + ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); + return; + } + if (input.config().token_bucket().fill_interval().nanos() < 0) { + // TODO: + // protoc-gen-validate has an issue on type "Duration" which may generate interval with seconds + // > 0 while "nanos" < 0. And negative "nanos" will cause validation inside the filter to fail. + // see https://github.com/envoyproxy/protoc-gen-validate/issues/348 for detail. + ENVOY_LOG_MISC(debug, "In fill_interval, nanos should not be negative!"); + return; + } + static NiceMock dispatcher; + Stats::IsolatedStoreImpl stats_store; + static NiceMock runtime; + Event::MockTimer* fill_timer = new Event::MockTimer(&dispatcher); + envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit proto_config = + input.config(); + ConfigSharedPtr config = nullptr; + try { + config = std::make_shared(proto_config, dispatcher, stats_store, runtime); + } catch (EnvoyException e) { + ENVOY_LOG_MISC(debug, "EnvoyException in config's constructor: {}", e.what()); + return; + } + + ActiveFilter active_filter(config); + std::chrono::milliseconds fill_interval( + PROTOBUF_GET_MS_REQUIRED(proto_config.token_bucket(), fill_interval)); + + for (const auto& action : input.actions()) { + ENVOY_LOG_MISC(trace, "action {}", action.DebugString()); + + switch (action.action_selector_case()) { + case envoy::extensions::filters::network::local_ratelimit::Action::kOnData: { + Buffer::OwnedImpl buffer(action.on_data().data()); + active_filter.filter_.onData(buffer, action.on_data().end_stream()); + break; + } + case envoy::extensions::filters::network::local_ratelimit::Action::kOnNewConnection: { + active_filter.filter_.onNewConnection(); + break; + } + case envoy::extensions::filters::network::local_ratelimit::Action::kRefill: { + EXPECT_CALL(*fill_timer, enableTimer(fill_interval, nullptr)); + fill_timer->invokeCallback(); + break; + } + default: + // Unhandled actions + PANIC("A case is missing for an action"); + } + } +} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) + // Silence clang-tidy here because it thinks there is a memory leak for "fill_timer" + // However, ownership of each MockTimer instance is transferred to the (caller of) dispatcher's + // createTimer_(), so to avoid destructing it twice, the MockTimer must have been dynamically + // allocated and must not be deleted by it's creator. See test/mocks/event/mocks.cc for detail. +} // namespace LocalRateLimitFilter +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy From 668b4786c6d81a8bb7720727d7bebc73c5bcf325 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 23 Jun 2020 22:24:03 -0400 Subject: [PATCH 424/909] test: cleaning up teardown (#11696) test: cleaning up teardown Signed-off-by: Alyssa Wilk --- test/integration/integration.cc | 8 ---- test/integration/integration.h | 66 +++++++++++++++++++-------------- 2 files changed, 38 insertions(+), 36 deletions(-) diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 3359b8b7f617..bd4f71416c02 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -282,14 +282,6 @@ BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, }, version, config) {} -BaseIntegrationTest::~BaseIntegrationTest() { - // Tear down the fake upstream before the test server. - // When the HTTP codecs do runtime checks, it is important to finish all - // runtime access before the server, and the runtime singleton, go away. - fake_upstreams_.clear(); - test_server_.reset(); -} - Network::ClientConnectionPtr BaseIntegrationTest::makeClientConnection(uint32_t port) { return makeClientConnectionWithOptions(port, nullptr); } diff --git a/test/integration/integration.h b/test/integration/integration.h index 5231f1b5e2e1..a0a2699e834e 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -161,8 +161,7 @@ class BaseIntegrationTest : protected Logger::Loggable { BaseIntegrationTest(const InstanceConstSharedPtrFn& upstream_address_fn, Network::Address::IpVersion version, const std::string& config = ConfigHelper::httpProxyConfig()); - - virtual ~BaseIntegrationTest(); + virtual ~BaseIntegrationTest() = default; // TODO(jmarantz): Remove this once // https://github.com/envoyproxy/envoy-filter-example/pull/69 is reverted. @@ -399,6 +398,11 @@ class BaseIntegrationTest : protected Logger::Loggable { std::unique_ptr upstream_stats_store_; + // Make sure the test server will be torn down after any fake client. + // The test server owns the runtime, which is often accessed by client and + // fake upstream codecs and must outlast them. + IntegrationTestServerPtr test_server_; + // The IpVersion (IPv4, IPv6) to use. Network::Address::IpVersion version_; // IP Address to use when binding sockets on upstreams. @@ -415,15 +419,42 @@ class BaseIntegrationTest : protected Logger::Loggable { // pre-init, control plane synchronization needed for server start. std::function on_server_init_function_; - std::vector> fake_upstreams_; - // Target number of upstreams. - uint32_t fake_upstreams_count_{1}; - spdlog::level::level_enum default_log_level_; - IntegrationTestServerPtr test_server_; // A map of keys to port names. Generally the names are pulled from the v2 listener name // but if a listener is created via ADS, it will be from whatever key is used with registerPort. TestEnvironment::PortMap port_map_; + // The DrainStrategy that dictates the behaviour of + // DrainManagerImpl::drainClose(). + Server::DrainStrategy drain_strategy_{Server::DrainStrategy::Gradual}; + + // Member variables for xDS testing. + FakeUpstream* xds_upstream_{}; + FakeHttpConnectionPtr xds_connection_; + FakeStreamPtr xds_stream_; + bool create_xds_upstream_{false}; + bool tls_xds_upstream_{false}; + bool use_lds_{true}; // Use the integration framework's LDS set up. + + testing::NiceMock factory_context_; + Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()}; + + // The fake upstreams_ are created using the context_manager, so make sure + // they are destroyed before it is. + std::vector> fake_upstreams_; + + Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw}; + + spdlog::level::level_enum default_log_level_; + + // Target number of upstreams. + uint32_t fake_upstreams_count_{1}; + + // The duration of the drain manager graceful drain period. + std::chrono::seconds drain_time_{1}; + + // The number of worker threads that the test server uses. + uint32_t concurrency_{1}; + // If true, use AutonomousUpstream for fake upstreams. bool autonomous_upstream_{false}; @@ -443,27 +474,6 @@ class BaseIntegrationTest : protected Logger::Loggable { // them in the port_map_. bool defer_listener_finalization_{false}; - // The number of worker threads that the test server uses. - uint32_t concurrency_{1}; - - // The duration of the drain manager graceful drain period. - std::chrono::seconds drain_time_{1}; - - // The DrainStrategy that dictates the behaviour of - // DrainManagerImpl::drainClose(). - Server::DrainStrategy drain_strategy_{Server::DrainStrategy::Gradual}; - - // Member variables for xDS testing. - FakeUpstream* xds_upstream_{}; - FakeHttpConnectionPtr xds_connection_; - FakeStreamPtr xds_stream_; - testing::NiceMock factory_context_; - Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{timeSystem()}; - bool create_xds_upstream_{false}; - bool tls_xds_upstream_{false}; - bool use_lds_{true}; // Use the integration framework's LDS set up. - Grpc::SotwOrDelta sotw_or_delta_{Grpc::SotwOrDelta::Sotw}; - // By default the test server will use custom stats to notify on increment. // This override exists for tests measuring stats memory. bool use_real_stats_{}; From 549d30ecc539544e95a405c5986eba69f301d209 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Tue, 23 Jun 2020 22:25:14 -0400 Subject: [PATCH 425/909] Subject: bugfix potential memory bug: use after free (#11701) This line of code is returning a local copy of string and try to construct a string_view object with that local copy. As string_view is non-owning, it will potentially cause a memory use after free issue. This is found by clang when in C++17 absl::string_view uses std::string_view. Signed-off-by: Yifan Yang --- source/common/http/status.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/http/status.cc b/source/common/http/status.cc index d2b4ea122d35..78ef5c562f2d 100644 --- a/source/common/http/status.cc +++ b/source/common/http/status.cc @@ -14,7 +14,7 @@ constexpr absl::string_view EnvoyPayloadUrl = "Envoy"; absl::string_view statusCodeToString(StatusCode code) { switch (code) { case StatusCode::Ok: - return absl::OkStatus().ToString(); + return "OK"; case StatusCode::CodecProtocolError: return "CodecProtocolError"; case StatusCode::BufferFloodError: From f190f05388ed4e0538c4006411e6554a60c0477d Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Wed, 24 Jun 2020 14:22:01 +0100 Subject: [PATCH 426/909] fuzz: fix oss crash on http utility function (#11687) Commit Message: Fix crash on initializeAndValidateOptions fuzz test the crash occurred because validateCustomSettingsParameters does not allow duplicate custom settings identifiers and the fuzzer fed it an input that had duplicates. fixed by catching expected exceptions from duplicate custom settings identifiers. Fixes: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=23616 Signed-off-by: Sam Flattery --- ...nimized-utility_fuzz_test-5091558495092736 | 17 ++++++++++++++ test/common/http/utility_corpus/valid | 3 +-- test/common/http/utility_fuzz_test.cc | 23 ++++++++++++++++++- 3 files changed, 40 insertions(+), 3 deletions(-) create mode 100644 test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 diff --git a/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 b/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 new file mode 100644 index 000000000000..aff9fb772604 --- /dev/null +++ b/test/common/http/utility_corpus/clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 @@ -0,0 +1,17 @@ +initialize_and_validate { + custom_settings_parameters { + identifier { + value: 11008 + } + value { + value: 65536 + } + } + custom_settings_parameters { + identifier { + value: 11008 + } + value { + } + } +} diff --git a/test/common/http/utility_corpus/valid b/test/common/http/utility_corpus/valid index f47f99e15a99..1ea7275bf45e 100644 --- a/test/common/http/utility_corpus/valid +++ b/test/common/http/utility_corpus/valid @@ -1,2 +1 @@ -find_query_string: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\7\177\177\17 -U²@/177\177N¿77\177" \ No newline at end of file +find_query_string: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\7\177\177\17U²@/177\177N¿77\177" diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index e3524fde27e4..18d5c0c4c388 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -79,7 +79,28 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { } case test::common::http::UtilityTestCase::kInitializeAndValidate: { const auto& options = input.initialize_and_validate(); - Http2::Utility::initializeAndValidateOptions(options); + try { + Http2::Utility::initializeAndValidateOptions(options); + } catch (EnvoyException& e) { + absl::string_view msg = e.what(); + // initializeAndValidateOptions throws exceptions for 4 different reasons due to malformed + // settings, so check for them and allow any other exceptions through + if (absl::StartsWith( + msg, "server push is not supported by Envoy and can not be enabled via a SETTINGS " + "parameter.") || + absl::StartsWith( + msg, "the \"allow_connect\" SETTINGS parameter must only be configured through the " + "named field") || + absl::StartsWith( + msg, "inconsistent HTTP/2 custom SETTINGS parameter(s) detected; identifiers =") || + absl::EndsWith( + msg, "HTTP/2 SETTINGS parameter(s) can not be configured through both named and " + "custom parameters")) { + ENVOY_LOG_MISC(trace, "Caught exception {} in initializeAndValidateOptions test", e.what()); + } else { + throw EnvoyException(e.what()); + } + } break; } From 77efb4799d906400b97b43d6c2ed0b5d8369b2fc Mon Sep 17 00:00:00 2001 From: Auni Ahsan Date: Wed, 24 Jun 2020 09:44:13 -0400 Subject: [PATCH 427/909] admin: Support /drain_listeners?graceful (#11639) Calling /drain_listeners?graceful will trigger the drain manager drain sequence prior to closing listeners. Risk Level: Low. Testing: Tested that connections are terminated on request complete during the graceful drain period, that new connections can still be opened, H1/H2-specific response behaviour. Docs Changes: Add docs to admin.rst, improve the overall drain sequence documentation. Signed-off-by: Auni Ahsan --- .../arch_overview/operations/draining.rst | 42 +++++--- docs/root/operations/admin.rst | 6 ++ include/envoy/server/drain_manager.h | 5 + source/server/admin/listeners_handler.cc | 16 ++- source/server/drain_manager_impl.h | 1 + .../drain_close_integration_test.cc | 100 ++++++++++++++++++ test/mocks/server/mocks.h | 1 + test/server/drain_manager_impl_test.cc | 3 + 8 files changed, 160 insertions(+), 14 deletions(-) diff --git a/docs/root/intro/arch_overview/operations/draining.rst b/docs/root/intro/arch_overview/operations/draining.rst index 0a0932e57a92..18003197c844 100644 --- a/docs/root/intro/arch_overview/operations/draining.rst +++ b/docs/root/intro/arch_overview/operations/draining.rst @@ -3,16 +3,42 @@ Draining ======== -Draining is the process by which Envoy attempts to gracefully shed connections in response to -various events. Draining occurs at the following times: +In a few different scenarios, Envoy will attempt to gracefully shed connections. For instance, +during server shutdown, existing requests can be discouraged and listeners set to stop accepting, +to reduce the number of open connections when the server shuts down. Draining behaviour is defined +by the server options in addition to individual listener configs. +Draining occurs at the following times: + +* The server is being :ref:`hot restarted `. +* The server begins the graceful drain sequence via the :ref:`drain_listeners?graceful + ` admin endpoint. * The server has been manually health check failed via the :ref:`healthcheck/fail ` admin endpoint. See the :ref:`health check filter ` architecture overview for more information. -* The server is being :ref:`hot restarted `. * Individual listeners are being modified or removed via :ref:`LDS `. +By default, the Envoy server will close listeners immediately on server shutdown. To drain listeners +for some duration of time prior to server shutdown, use :ref:`drain_listeners ` +before shutting down the server. The listeners will be directly stopped without any graceful draining behaviour, +and cease accepting new connections immediately. + +To add a graceful drain period prior to listeners being closed, use the query parameter +:ref:`drain_listeners?graceful `. By default, Envoy +will discourage requests for some period of time (as determined by :option:`--drain-time-s`). +The behaviour of request discouraging is determined by the drain manager. + +Note that although draining is a per-listener concept, it must be supported at the network filter +level. Currently the only filters that support graceful draining are +:ref:`Redis `, +:ref:`Mongo `, +and :ref:`HTTP connection manager `. + +By default, the :ref:`HTTP connection manager ` filter will +add "Connection: close" to HTTP1 requests, send HTTP2 GOAWAY, and terminate connections +on request completion (after the delayed close period). + Each :ref:`configured listener ` has a :ref:`drain_type ` setting which controls when draining takes place. The currently supported values are: @@ -27,13 +53,3 @@ modify_only It may be desirable to set *modify_only* on egress listeners so they only drain during modifications while relying on ingress listener draining to perform full server draining when attempting to do a controlled shutdown. - -Note that although draining is a per-listener concept, it must be supported at the network filter -level. Currently the only filters that support graceful draining are -:ref:`HTTP connection manager `, -:ref:`Redis `, and -:ref:`Mongo `. - -Listeners can also be stopped via :ref:`drain_listeners `. In this case, -they are directly stopped (without going through the actual draining process) on worker threads, -so that they will not accept any new requests. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index c4a3cd81ee13..b90a1461f415 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -258,6 +258,12 @@ modify different aspects of the server: :ref:`Listener ` is used to determine whether a listener is inbound or outbound. + .. http:post:: /drain_listeners?graceful + + When draining listeners, enter a graceful drain period prior to closing listeners. + This behaviour and duration is configurable via server options or CLI + (:option:`--drain-time-s` and :option:`--drain-strategy`). + .. attention:: This operation directly stops the matched listeners on workers. Once listeners in a given diff --git a/include/envoy/server/drain_manager.h b/include/envoy/server/drain_manager.h index 0f29b0cd3eed..49ecc194166a 100644 --- a/include/envoy/server/drain_manager.h +++ b/include/envoy/server/drain_manager.h @@ -21,6 +21,11 @@ class DrainManager : public Network::DrainDecision { */ virtual void startDrainSequence(std::function drain_complete_cb) PURE; + /** + * @return whether the drain sequence has started. + */ + virtual bool draining() const PURE; + /** * Invoked in the newly launched primary process to begin the parent shutdown sequence. At the end * of the sequence the previous primary process will be terminated. diff --git a/source/server/admin/listeners_handler.cc b/source/server/admin/listeners_handler.cc index 3d813ad4b4c8..93407d9eb6cc 100644 --- a/source/server/admin/listeners_handler.cc +++ b/source/server/admin/listeners_handler.cc @@ -16,10 +16,24 @@ ListenersHandler::ListenersHandler(Server::Instance& server) : HandlerContextBas Http::Code ListenersHandler::handlerDrainListeners(absl::string_view url, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&) { const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + ListenerManager::StopListenersType stop_listeners_type = params.find("inboundonly") != params.end() ? ListenerManager::StopListenersType::InboundOnly : ListenerManager::StopListenersType::All; - server_.listenerManager().stopListeners(stop_listeners_type); + + const bool graceful = params.find("graceful") != params.end(); + if (graceful) { + // Ignore calls to /drain_listeners?graceful if the drain sequence has + // already started. + if (!server_.drainManager().draining()) { + server_.drainManager().startDrainSequence([this, stop_listeners_type]() { + server_.listenerManager().stopListeners(stop_listeners_type); + }); + } + } else { + server_.listenerManager().stopListeners(stop_listeners_type); + } + response.add("OK\n"); return Http::Code::OK; } diff --git a/source/server/drain_manager_impl.h b/source/server/drain_manager_impl.h index 38a02465b761..c8056f22396c 100644 --- a/source/server/drain_manager_impl.h +++ b/source/server/drain_manager_impl.h @@ -28,6 +28,7 @@ class DrainManagerImpl : Logger::Loggable, public DrainManager // Server::DrainManager void startDrainSequence(std::function drain_complete_cb) override; + bool draining() const override { return draining_; } void startParentShutdownSequence() override; private: diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index cbe58e973ecd..aa0afd8d141b 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -75,6 +75,106 @@ TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { TEST_P(DrainCloseIntegrationTest, AdminDrain) { testAdminDrain(downstreamProtocol()); } +TEST_P(DrainCloseIntegrationTest, AdminGracefulDrain) { + drain_strategy_ = Server::DrainStrategy::Immediate; + drain_time_ = std::chrono::seconds(999); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + uint32_t http_port = lookupPort("http"); + codec_client_ = makeHttpConnection(http_port); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + // The request is completed but the connection remains open. + EXPECT_TRUE(codec_client_->connected()); + + // Invoke /drain_listeners with graceful drain + BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + // With a 999s graceful drain period, the listener should still be open. + EXPECT_EQ(test_server_->counter("listener_manager.listener_stopped")->value(), 0); + + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + + // Connections will terminate on request complete + ASSERT_TRUE(codec_client_->waitForDisconnect()); + if (downstream_protocol_ == Http::CodecClient::Type::HTTP2) { + EXPECT_TRUE(codec_client_->sawGoAway()); + } else { + EXPECT_EQ("close", response->headers().getConnectionValue()); + } + + // New connections can still be made. + auto second_codec_client_ = makeRawHttpConnection(makeClientConnection(http_port)); + EXPECT_TRUE(second_codec_client_->connected()); + + // Invoke /drain_listeners and shut down listeners. + second_codec_client_->rawConnection().close(Network::ConnectionCloseType::NoFlush); + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); + EXPECT_NO_THROW(Network::TcpListenSocket( + Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), + http_port), + nullptr, true)); +} + +TEST_P(DrainCloseIntegrationTest, RepeatedAdminGracefulDrain) { + // Use the default gradual probabilistic DrainStrategy so drainClose() + // behaviour isn't conflated with whether the drain sequence has started. + drain_time_ = std::chrono::seconds(999); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + uint32_t http_port = lookupPort("http"); + codec_client_ = makeHttpConnection(http_port); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + // Invoke /drain_listeners with graceful drain + BufferingStreamDecoderPtr admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(test_server_->counter("listener_manager.listener_stopped")->value(), 0); + + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners?graceful", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), Http::HttpStatusIs("200")); + + admin_response = IntegrationUtil::makeSingleRequest( + lookupPort("admin"), "POST", "/drain_listeners", "", downstreamProtocol(), version_); + EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); + + test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); + EXPECT_NO_THROW(Network::TcpListenSocket( + Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), + http_port), + nullptr, true)); +} + INSTANTIATE_TEST_SUITE_P(Protocols, DrainCloseIntegrationTest, testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( {Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2}, diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 34ffef72e615..6e5060014f72 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -192,6 +192,7 @@ class MockDrainManager : public DrainManager { // Server::DrainManager MOCK_METHOD(bool, drainClose, (), (const)); + MOCK_METHOD(bool, draining, (), (const)); MOCK_METHOD(void, startDrainSequence, (std::function completion)); MOCK_METHOD(void, startParentShutdownSequence, ()); diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index be09ee0ec7fb..9afeba1b7955 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -126,7 +126,10 @@ TEST_P(DrainManagerImplTest, DrainDeadlineProbability) { EXPECT_TRUE(drain_manager.drainClose()); EXPECT_CALL(server_, healthCheckFailed()).WillRepeatedly(Return(false)); EXPECT_FALSE(drain_manager.drainClose()); + EXPECT_FALSE(drain_manager.draining()); + drain_manager.startDrainSequence([] {}); + EXPECT_TRUE(drain_manager.draining()); if (drain_gradually) { // random() should be called when elapsed time < drain timeout From c52488e0d0f9caf053134186f543b6875a8d25af Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Wed, 24 Jun 2020 22:01:10 +0700 Subject: [PATCH 428/909] examples: Add `ext_authz` HTTP filter example (#11667) Signed-off-by: Dhi Aurrahman --- docs/root/start/sandboxes/ext_authz.rst | 113 ++++++++++++++++++ docs/root/start/start.rst | 1 + examples/BUILD | 3 + examples/ext_authz/.env | 1 + examples/ext_authz/README.md | 2 + .../ext_authz/auth/grpc-service/Dockerfile | 10 ++ examples/ext_authz/auth/grpc-service/Makefile | 8 ++ examples/ext_authz/auth/grpc-service/go.mod | 10 ++ examples/ext_authz/auth/grpc-service/go.sum | 74 ++++++++++++ examples/ext_authz/auth/grpc-service/main.go | 42 +++++++ .../auth/grpc-service/pkg/auth/users.go | 32 +++++ .../auth/grpc-service/pkg/auth/v2/auth.go | 68 +++++++++++ .../auth/grpc-service/pkg/auth/v3/auth.go | 68 +++++++++++ .../ext_authz/auth/http-service/Dockerfile | 4 + .../ext_authz/auth/http-service/server.js | 29 +++++ examples/ext_authz/auth/users.json | 5 + .../ext_authz/config/grpc-service/v2.yaml | 72 +++++++++++ .../ext_authz/config/grpc-service/v3.yaml | 72 +++++++++++ examples/ext_authz/config/http-service.yaml | 75 ++++++++++++ examples/ext_authz/docker-compose.yaml | 53 ++++++++ .../ext_authz/upstream/service/Dockerfile | 5 + examples/ext_authz/upstream/service/server.py | 12 ++ examples/ext_authz/users.json | 5 + test/config_test/config_test.cc | 4 + test/config_test/example_configs_test.cc | 4 +- 25 files changed, 770 insertions(+), 2 deletions(-) create mode 100644 docs/root/start/sandboxes/ext_authz.rst create mode 100644 examples/ext_authz/.env create mode 100644 examples/ext_authz/README.md create mode 100644 examples/ext_authz/auth/grpc-service/Dockerfile create mode 100644 examples/ext_authz/auth/grpc-service/Makefile create mode 100644 examples/ext_authz/auth/grpc-service/go.mod create mode 100644 examples/ext_authz/auth/grpc-service/go.sum create mode 100644 examples/ext_authz/auth/grpc-service/main.go create mode 100644 examples/ext_authz/auth/grpc-service/pkg/auth/users.go create mode 100644 examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go create mode 100644 examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go create mode 100644 examples/ext_authz/auth/http-service/Dockerfile create mode 100644 examples/ext_authz/auth/http-service/server.js create mode 100644 examples/ext_authz/auth/users.json create mode 100644 examples/ext_authz/config/grpc-service/v2.yaml create mode 100644 examples/ext_authz/config/grpc-service/v3.yaml create mode 100644 examples/ext_authz/config/http-service.yaml create mode 100644 examples/ext_authz/docker-compose.yaml create mode 100644 examples/ext_authz/upstream/service/Dockerfile create mode 100644 examples/ext_authz/upstream/service/server.py create mode 100644 examples/ext_authz/users.json diff --git a/docs/root/start/sandboxes/ext_authz.rst b/docs/root/start/sandboxes/ext_authz.rst new file mode 100644 index 000000000000..9ff39229dad2 --- /dev/null +++ b/docs/root/start/sandboxes/ext_authz.rst @@ -0,0 +1,113 @@ +.. _install_sandboxes_ext_authz: + +External Authorization Filter +============================= + +The External Authorization sandbox demonstrates Envoy's :ref:`ext_authz filter ` +capability to delegate authorization of incoming requests through Envoy to an external services. + +While ext_authz can also be employed as a network filter, this sandbox is limited to exhibit +ext_authz HTTP Filter, which supports to call HTTP or gRPC service. + +The setup of this sandbox is very similar to front-proxy deployment, however calls to upstream +service behind the proxy will be checked by an external HTTP or gRPC service. In this sandbox, +for every authorized call, the external authorization service adds additional ``x-current-user`` +header entry to the original request headers to be forwarded to the upstream service. + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +**Step 1: Install Docker** + +Ensure that you have a recent versions of ``docker`` and ``docker-compose``. + +A simple way to achieve this is via the `Docker Desktop `_. + +**Step 2: Clone the Envoy repository and start all of our containers** + +If you have not cloned the Envoy repository, clone it with ``git clone git@github.com:envoyproxy/envoy`` +or ``git clone https://github.com/envoyproxy/envoy.git``. + +To build this sandbox example and start the example services, run the following commands:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ docker-compose up --build -d + $ docker-compose ps + + Name Command State Ports + --------------------------------------------------------------------------------------------------------------------------------------- + ext_authz_ext_authz-grpc-service_1 /app/server -users /etc/us Up + ext_authz_ext_authz-http-service_1 docker-entrypoint.sh node Up + ext_authz_front-envoy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + ext_authz_upstream-service_1 python3 /app/service/server.py Up + +.. note:: + This sandbox has multiple setup controlled by ``FRONT_ENVOY_YAML`` environment variable which + points to the effective Envoy configuration to be used. The default value of ``FRONT_ENVOY_YAML`` + can be defined in the ``.env`` file or provided inline when running the ``docker-compose up`` + command. For more information, pease take a look at `environment variables in Compose documentation `_. + +By default, ``FRONT_ENVOY_YAML`` points to ``config/grpc-service/v3.yaml`` file which bootstraps +front-envoy with ext_authz HTTP filter with gRPC service ``V3`` (this is specified by :ref:`transport_api_version field`). +The possible values of ``FRONT_ENVOY_YAML`` can be found inside the ``envoy/examples/ext_authz/config`` +directory. + +For example, to run Envoy with ext_authz HTTP filter with HTTP service will be:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ # Tearing down the currently running setup + $ docker-compose down + $ FRONT_ENVOY_YAML=config/http-service.yaml docker-compose up --build -d + $ # Or you can update the .env file with the above FRONT_ENVOY_YAML value, so you don't have to specify it when running the "up" command. + +**Step 3: Access the upstream-service behind the Front Envoy** + +You can now try to send a request to upstream-service via the front-envoy as follows:: + + $ curl -v localhost:8000/service + * Trying 127.0.0.1... + * TCP_NODELAY set + * Connected to localhost (127.0.0.1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.58.0 + > Accept: */* + > + < HTTP/1.1 403 Forbidden + < date: Fri, 19 Jun 2020 15:02:24 GMT + < server: envoy + < content-length: 0 + +As observed, the request failed with ``403 Forbidden`` status code. This happened since the ext_authz +filter employed by Envoy rejected the call. To let the request reach the upstream service, you need +to provide a ``Bearer`` token via the ``Authorization`` header. + +.. note:: + A complete list of users is defined in ``envoy/examples/ext_authz/auth/users.json`` file. For + example, the ``token1`` used in the below example is corresponding to ``user1``. + +An example of successful requests can be observed as follows:: + + $ curl -v -H "Authorization: Bearer token1" localhost:8000/service + * Trying 127.0.0.1... + * TCP_NODELAY set + * Connected to localhost (127.0.0.1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.58.0 + > Accept: */* + > Authorization: Bearer token1 + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 24 + < server: envoy + < date: Fri, 19 Jun 2020 15:04:29 GMT + < x-envoy-upstream-service-time: 2 + < + * Connection #0 to host localhost left intact + Hello user1 from behind Envoy! diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 586896366be5..88f9742c260c 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -162,6 +162,7 @@ features. The following sandboxes are available: sandboxes/cors sandboxes/csrf + sandboxes/ext_authz sandboxes/fault_injection sandboxes/front_proxy sandboxes/grpc_bridge diff --git a/examples/BUILD b/examples/BUILD index d89668cf1c1c..2ad8bbe29b5a 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -14,6 +14,9 @@ filegroup( "cors/backend/service-envoy.yaml", "cors/frontend/front-envoy.yaml", "cors/frontend/service-envoy.yaml", + "ext_authz/config/grpc-service/v2.yaml", + "ext_authz/config/grpc-service/v3.yaml", + "ext_authz/config/http-service.yaml", "front-proxy/front-envoy.yaml", "front-proxy/service-envoy.yaml", "grpc-bridge/client/envoy-proxy.yaml", diff --git a/examples/ext_authz/.env b/examples/ext_authz/.env new file mode 100644 index 000000000000..0a7d4cb0eaf0 --- /dev/null +++ b/examples/ext_authz/.env @@ -0,0 +1 @@ +FRONT_ENVOY_YAML=config/grpc-service/v3.yaml diff --git a/examples/ext_authz/README.md b/examples/ext_authz/README.md new file mode 100644 index 000000000000..c0a121144d07 --- /dev/null +++ b/examples/ext_authz/README.md @@ -0,0 +1,2 @@ +To learn about this sandbox and for instructions on how to run it please head over +to the [envoy docs](https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/ext_authz) diff --git a/examples/ext_authz/auth/grpc-service/Dockerfile b/examples/ext_authz/auth/grpc-service/Dockerfile new file mode 100644 index 000000000000..f77cdd69e39f --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/Dockerfile @@ -0,0 +1,10 @@ +FROM golang:alpine AS builder + +RUN apk --no-cache add make +COPY . /app +RUN make -C /app/grpc-service + +FROM alpine + +COPY --from=builder /app/grpc-service/server /app/server +CMD ["/app/server", "-users", "/etc/users.json"] diff --git a/examples/ext_authz/auth/grpc-service/Makefile b/examples/ext_authz/auth/grpc-service/Makefile new file mode 100644 index 000000000000..e9ee1e9581cf --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/Makefile @@ -0,0 +1,8 @@ +all: server + +server: + @CGO_ENABLED=0 GOOS=linux go build -a --ldflags '-extldflags "-static"' \ + -tags "netgo" -installsuffix netgo \ + -o server +clean: + @rm -fr server diff --git a/examples/ext_authz/auth/grpc-service/go.mod b/examples/ext_authz/auth/grpc-service/go.mod new file mode 100644 index 000000000000..e1eebc33626a --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/go.mod @@ -0,0 +1,10 @@ +module github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service + +go 1.14 + +require ( + github.com/envoyproxy/go-control-plane v0.9.5 + github.com/golang/protobuf v1.3.2 + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 + google.golang.org/grpc v1.25.1 +) diff --git a/examples/ext_authz/auth/grpc-service/go.sum b/examples/ext_authz/auth/grpc-service/go.sum new file mode 100644 index 000000000000..e5921d26237e --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/go.sum @@ -0,0 +1,74 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/envoyproxy/go-control-plane v0.9.0 h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= +github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/ext_authz/auth/grpc-service/main.go b/examples/ext_authz/auth/grpc-service/main.go new file mode 100644 index 000000000000..6861bce06055 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "log" + "net" + + envoy_service_auth_v2 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2" + envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" + "google.golang.org/grpc" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" + auth_v2 "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v2" + auth_v3 "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth/v3" +) + +func main() { + port := flag.Int("port", 9001, "gRPC port") + data := flag.String("users", "../../users.json", "users file") + + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen to %d: %v", *port, err) + } + + users, err := auth.LoadUsers(*data) + if err != nil { + log.Fatalf("failed to load user data:%s %v", *data, err) + } + gs := grpc.NewServer() + + // Serve v3 and v2. + envoy_service_auth_v3.RegisterAuthorizationServer(gs, auth_v3.New(users)) + envoy_service_auth_v2.RegisterAuthorizationServer(gs, auth_v2.New(users)) + + log.Printf("starting gRPC server on: %d\n", *port) + + gs.Serve(lis) +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/users.go b/examples/ext_authz/auth/grpc-service/pkg/auth/users.go new file mode 100644 index 000000000000..507c3560bdf1 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/users.go @@ -0,0 +1,32 @@ +package auth + +import ( + "encoding/json" + "io/ioutil" +) + +// Users holds a list of users. +type Users map[string]string + +// Check checks if a key could retrieve a user from a list of users. +func (u Users) Check(key string) (bool, string) { + value, ok := u[key] + if !ok { + return false, "" + } + return ok, value +} + +// LoadUsers load users data from a JSON file. +func LoadUsers(jsonFile string) (Users, error) { + var users Users + data, err := ioutil.ReadFile(jsonFile) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &users); err != nil { + return nil, err + } + return users, nil +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go b/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go new file mode 100644 index 000000000000..3b16c6c5cb9f --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/v2/auth.go @@ -0,0 +1,68 @@ +package v2 + +import ( + "context" + "log" + "strings" + + envoy_api_v2_core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + envoy_service_auth_v2 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v2" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/genproto/googleapis/rpc/status" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" +) + +type server struct { + users auth.Users +} + +var _ envoy_service_auth_v2.AuthorizationServer = &server{} + +// New creates a new authorization server. +func New(users auth.Users) envoy_service_auth_v2.AuthorizationServer { + return &server{users} +} + +// Check implements authorization's Check interface which performs authorization check based on the +// attributes associated with the incoming request. +func (s *server) Check( + ctx context.Context, + req *envoy_service_auth_v2.CheckRequest) (*envoy_service_auth_v2.CheckResponse, error) { + authorization := req.Attributes.Request.Http.Headers["authorization"] + log.Println(authorization) + + extracted := strings.Fields(authorization) + if len(extracted) == 2 && extracted[0] == "Bearer" { + valid, user := s.users.Check(extracted[1]) + if valid { + return &envoy_service_auth_v2.CheckResponse{ + HttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{ + OkResponse: &envoy_service_auth_v2.OkHttpResponse{ + Headers: []*envoy_api_v2_core.HeaderValueOption{ + { + Append: &wrappers.BoolValue{Value: false}, + Header: &envoy_api_v2_core.HeaderValue{ + // For a successful request, the authorization server sets the + // x-current-user value. + Key: "x-current-user", + Value: user, + }, + }, + }, + }, + }, + Status: &status.Status{ + Code: int32(code.Code_OK), + }, + }, nil + } + } + + return &envoy_service_auth_v2.CheckResponse{ + Status: &status.Status{ + Code: int32(code.Code_PERMISSION_DENIED), + }, + }, nil +} diff --git a/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go b/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go new file mode 100644 index 000000000000..1cae7cbd8d43 --- /dev/null +++ b/examples/ext_authz/auth/grpc-service/pkg/auth/v3/auth.go @@ -0,0 +1,68 @@ +package v3 + +import ( + "context" + "log" + "strings" + + envoy_api_v3_core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/genproto/googleapis/rpc/status" + + "github.com/envoyproxy/envoy/examples/ext_authz/auth/grpc-service/pkg/auth" +) + +type server struct { + users auth.Users +} + +var _ envoy_service_auth_v3.AuthorizationServer = &server{} + +// New creates a new authorization server. +func New(users auth.Users) envoy_service_auth_v3.AuthorizationServer { + return &server{users} +} + +// Check implements authorization's Check interface which performs authorization check based on the +// attributes associated with the incoming request. +func (s *server) Check( + ctx context.Context, + req *envoy_service_auth_v3.CheckRequest) (*envoy_service_auth_v3.CheckResponse, error) { + authorization := req.Attributes.Request.Http.Headers["authorization"] + log.Println(authorization) + + extracted := strings.Fields(authorization) + if len(extracted) == 2 && extracted[0] == "Bearer" { + valid, user := s.users.Check(extracted[1]) + if valid { + return &envoy_service_auth_v3.CheckResponse{ + HttpResponse: &envoy_service_auth_v3.CheckResponse_OkResponse{ + OkResponse: &envoy_service_auth_v3.OkHttpResponse{ + Headers: []*envoy_api_v3_core.HeaderValueOption{ + { + Append: &wrappers.BoolValue{Value: false}, + Header: &envoy_api_v3_core.HeaderValue{ + // For a successful request, the authorization server sets the + // x-current-user value. + Key: "x-current-user", + Value: user, + }, + }, + }, + }, + }, + Status: &status.Status{ + Code: int32(code.Code_OK), + }, + }, nil + } + } + + return &envoy_service_auth_v3.CheckResponse{ + Status: &status.Status{ + Code: int32(code.Code_PERMISSION_DENIED), + }, + }, nil +} diff --git a/examples/ext_authz/auth/http-service/Dockerfile b/examples/ext_authz/auth/http-service/Dockerfile new file mode 100644 index 000000000000..d0bcbc91f8b0 --- /dev/null +++ b/examples/ext_authz/auth/http-service/Dockerfile @@ -0,0 +1,4 @@ +FROM node:alpine + +COPY . /app +CMD ["node", "/app/http-service/server"] diff --git a/examples/ext_authz/auth/http-service/server.js b/examples/ext_authz/auth/http-service/server.js new file mode 100644 index 000000000000..9c890d75226a --- /dev/null +++ b/examples/ext_authz/auth/http-service/server.js @@ -0,0 +1,29 @@ +const Http = require("http"); +const path = require("path"); + +const tokens = require(process.env.USERS || + path.join(__dirname, "..", "users.json")); + +const server = new Http.Server((req, res) => { + const authorization = req.headers["authorization"] || ""; + const extracted = authorization.split(" "); + if (extracted.length === 2 && extracted[0] === "Bearer") { + const user = checkToken(extracted[1]); + if (user !== undefined) { + // The authorization server returns a response with "x-current-user" header for a successful + // request. + res.writeHead(200, { "x-current-user": user }); + return res.end(); + } + } + res.writeHead(403); + res.end(); +}); + +const port = process.env.PORT || 9002; +server.listen(port); +console.log(`starting HTTP server on: ${port}`); + +function checkToken(token) { + return tokens[token]; +} diff --git a/examples/ext_authz/auth/users.json b/examples/ext_authz/auth/users.json new file mode 100644 index 000000000000..4068bcb7628e --- /dev/null +++ b/examples/ext_authz/auth/users.json @@ -0,0 +1,5 @@ +{ + "token1": "user1", + "token2": "user2", + "token3": "user3" +} diff --git a/examples/ext_authz/config/grpc-service/v2.yaml b/examples/ext_authz/config/grpc-service/v2.yaml new file mode 100644 index 000000000000..bd1a6eee7f6d --- /dev/null +++ b/examples/ext_authz/config/grpc-service/v2.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-grpc-service + timeout: 0.250s + transport_api_version: V2 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-grpc-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-grpc-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-grpc-service + port_value: 9001 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/config/grpc-service/v3.yaml b/examples/ext_authz/config/grpc-service/v3.yaml new file mode 100644 index 000000000000..2b4829e2c90c --- /dev/null +++ b/examples/ext_authz/config/grpc-service/v3.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-grpc-service + timeout: 0.250s + transport_api_version: V3 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-grpc-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-grpc-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-grpc-service + port_value: 9001 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/config/http-service.yaml b/examples/ext_authz/config/http-service.yaml new file mode 100644 index 000000000000..85065d99806c --- /dev/null +++ b/examples/ext_authz/config/http-service.yaml @@ -0,0 +1,75 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + http_service: + server_uri: + uri: ext_authz + cluster: ext_authz-http-service + timeout: 0.250s + authorization_response: + allowed_upstream_headers: + patterns: + - exact: x-current-user + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-http-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: ext_authz-http-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-http-service + port_value: 9002 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/docker-compose.yaml b/examples/ext_authz/docker-compose.yaml new file mode 100644 index 000000000000..66714d1a7dc1 --- /dev/null +++ b/examples/ext_authz/docker-compose.yaml @@ -0,0 +1,53 @@ +version: "3.7" +services: + + front-envoy: + build: + context: ../ + dockerfile: front-proxy/Dockerfile-frontenvoy + volumes: + - ./${FRONT_ENVOY_YAML}:/etc/front-envoy.yaml + networks: + - envoymesh + expose: + - "8000" + - "8001" + ports: + - "8000:8000" + - "8001:8001" + + ext_authz-http-service: + build: + context: ./auth + dockerfile: http-service/Dockerfile + volumes: + - ./users.json:/etc/users.json + environment: + - USERS=/etc/users.json + networks: + envoymesh: + aliases: + - ext_authz-http-service + + ext_authz-grpc-service: + build: + context: ./auth + dockerfile: grpc-service/Dockerfile + volumes: + - ./users.json:/etc/users.json + networks: + envoymesh: + aliases: + - ext_authz-grpc-service + + upstream-service: + build: + context: ./upstream + dockerfile: service/Dockerfile + networks: + envoymesh: + aliases: + - upstream-service + +networks: + envoymesh: {} diff --git a/examples/ext_authz/upstream/service/Dockerfile b/examples/ext_authz/upstream/service/Dockerfile new file mode 100644 index 000000000000..5f70f40aca7c --- /dev/null +++ b/examples/ext_authz/upstream/service/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3-alpine + +RUN pip3 install -q Flask==0.11.1 +COPY . ./app +CMD ["python3", "/app/service/server.py"] diff --git a/examples/ext_authz/upstream/service/server.py b/examples/ext_authz/upstream/service/server.py new file mode 100644 index 000000000000..a3d539f195ab --- /dev/null +++ b/examples/ext_authz/upstream/service/server.py @@ -0,0 +1,12 @@ +from flask import Flask, request + +app = Flask(__name__) + + +@app.route('/service') +def hello(): + return 'Hello ' + request.headers.get('x-current-user') + ' from behind Envoy!' + + +if __name__ == "__main__": + app.run(host='0.0.0.0', port=8080, debug=False) diff --git a/examples/ext_authz/users.json b/examples/ext_authz/users.json new file mode 100644 index 000000000000..4068bcb7628e --- /dev/null +++ b/examples/ext_authz/users.json @@ -0,0 +1,5 @@ +{ + "token1": "user1", + "token2": "user2", + "token3": "user3" +} diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 37e6f92f4605..2b25b9df8523 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -76,6 +76,9 @@ class ConfigTest { ScopedRuntimeInjector scoped_runtime(server_.runtime()); ON_CALL(server_.runtime_loader_.snapshot_, deprecatedFeatureEnabled(_, _)) .WillByDefault(Invoke([](absl::string_view, bool default_value) { return default_value; })); + ON_CALL(server_.runtime_loader_, threadsafeSnapshot()).WillByDefault(Invoke([this]() { + return snapshot_; + })); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig( @@ -144,6 +147,7 @@ class ConfigTest { Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_, false}; Runtime::RandomGeneratorImpl random_; + Runtime::SnapshotConstSharedPtr snapshot_{std::make_shared>()}; NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls{&os_sys_calls_}; NiceMock file_system_; diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 951d70a8b5dc..6d68ccc0c424 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -21,9 +21,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(25UL, ConfigTest::run(directory)); + EXPECT_EQ(28UL, ConfigTest::run(directory)); #else - EXPECT_EQ(26UL, ConfigTest::run(directory)); + EXPECT_EQ(29UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); From 3d59b0b79b29cc839e149527720f9dd340709239 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Wed, 24 Jun 2020 08:47:20 -0700 Subject: [PATCH 429/909] od: ejection behavior should not depend on metric (#11629) Maintain both gauge and internal value for ejection_active. Ideally they should be the same. Now the max_ejection works even the metric are disabled. Signed-off-by: Yuchen Dai --- .../common/upstream/outlier_detection_impl.cc | 14 +++---- .../common/upstream/outlier_detection_impl.h | 19 ++++++++++ .../upstream/outlier_detection_impl_test.cc | 38 +++++++++++++++++++ 3 files changed, 64 insertions(+), 7 deletions(-) diff --git a/source/common/upstream/outlier_detection_impl.cc b/source/common/upstream/outlier_detection_impl.cc index e077a862739e..e27ab5cca985 100644 --- a/source/common/upstream/outlier_detection_impl.cc +++ b/source/common/upstream/outlier_detection_impl.cc @@ -268,8 +268,8 @@ DetectorImpl::DetectorImpl(const Cluster& cluster, DetectorImpl::~DetectorImpl() { for (const auto& host : host_monitors_) { if (host.first->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { - ASSERT(stats_.ejections_active_.value() > 0); - stats_.ejections_active_.dec(); + ASSERT(ejections_active_helper_.value() > 0); + ejections_active_helper_.dec(); } } } @@ -301,8 +301,8 @@ void DetectorImpl::initialize(const Cluster& cluster) { for (const HostSharedPtr& host : hosts_removed) { ASSERT(host_monitors_.count(host) == 1); if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { - ASSERT(stats_.ejections_active_.value() > 0); - stats_.ejections_active_.dec(); + ASSERT(ejections_active_helper_.value() > 0); + ejections_active_helper_.dec(); } host_monitors_.erase(host); @@ -335,7 +335,7 @@ void DetectorImpl::checkHostForUneject(HostSharedPtr host, DetectorHostMonitorIm "outlier_detection.base_ejection_time_ms", config_.baseEjectionTimeMs())); ASSERT(monitor->numEjections() > 0); if ((base_eject_time * monitor->numEjections()) <= (now - monitor->lastEjectionTime().value())) { - stats_.ejections_active_.dec(); + ejections_active_helper_.dec(); host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK); // Reset the consecutive failure counters to avoid re-ejection on very few new errors due // to the non-triggering counter being close to its trigger value. @@ -451,7 +451,7 @@ void DetectorImpl::ejectHost(HostSharedPtr host, uint64_t max_ejection_percent = std::min( 100, runtime_.snapshot().getInteger("outlier_detection.max_ejection_percent", config_.maxEjectionPercent())); - double ejected_percent = 100.0 * stats_.ejections_active_.value() / host_monitors_.size(); + double ejected_percent = 100.0 * ejections_active_helper_.value() / host_monitors_.size(); // Note this is not currently checked per-priority level, so it is possible // for outlier detection to eject all hosts at any given priority level. if (ejected_percent < max_ejection_percent) { @@ -461,7 +461,7 @@ void DetectorImpl::ejectHost(HostSharedPtr host, stats_.ejections_total_.inc(); } if (enforceEjection(type)) { - stats_.ejections_active_.inc(); + ejections_active_helper_.inc(); updateEnforcedEjectionStats(type); host_monitors_[host]->eject(time_source_.monotonicTime()); runCallbacks(host); diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index 39e891e44e57..ff42473f3cc0 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -18,6 +18,7 @@ #include "envoy/http/codes.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/upstream.h" @@ -387,11 +388,29 @@ class DetectorImpl : public Detector, public std::enable_shared_from_this ejections_active_value_{0}; + }; DetectorConfig config_; Event::Dispatcher& dispatcher_; Runtime::Loader& runtime_; TimeSource& time_source_; DetectionStats stats_; + EjectionsActiveHelper ejections_active_helper_{stats_.ejections_active_}; Event::TimerPtr interval_timer_; std::list callbacks_; std::unordered_map host_monitors_; diff --git a/test/common/upstream/outlier_detection_impl_test.cc b/test/common/upstream/outlier_detection_impl_test.cc index a79b1a1b31d6..74a96a074fc8 100644 --- a/test/common/upstream/outlier_detection_impl_test.cc +++ b/test/common/upstream/outlier_detection_impl_test.cc @@ -1352,6 +1352,44 @@ TEST_F(OutlierDetectorImplTest, NotEnforcing) { .value()); } +TEST_F(OutlierDetectorImplTest, EjectionActiveValueIsAccountedWithoutMetricStorage) { + EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); + addHosts({"tcp://127.0.0.1:80", "tcp://127.0.0.1:81"}); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + std::shared_ptr detector(DetectorImpl::create( + cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); + detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); + + ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.max_ejection_percent", _)) + .WillByDefault(Return(1)); + + loadRq(hosts_[0], 4, 500); + + time_system_.setMonotonicTime(std::chrono::milliseconds(0)); + + // Manually increase the gauge. From metric's perspective it's overflowed. + outlier_detection_ejections_active_.inc(); + + // Since the overflow is not determined by the metric. Host[0] can be ejected. + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + hosts_[0]->outlierDetector().putHttpResponseCode(500); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + + // Expect active helper_ has the value 1. However, helper is private and it cannot be tested. + EXPECT_EQ(2UL, outlier_detection_ejections_active_.value()); + EXPECT_EQ(0UL, + cluster_.info_->stats_store_.counter("outlier_detection.ejections_overflow").value()); + + // Now it starts to overflow. + loadRq(hosts_[1], 5, 500); + EXPECT_FALSE(hosts_[1]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(2UL, outlier_detection_ejections_active_.value()); + EXPECT_EQ(1UL, + cluster_.info_->stats_store_.counter("outlier_detection.ejections_overflow").value()); +} + TEST_F(OutlierDetectorImplTest, CrossThreadRemoveRace) { EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); addHosts({"tcp://127.0.0.1:80"}); From f87e68f8a7eef471b1e12b8ef1f662ab845356fb Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 24 Jun 2020 16:14:44 -0400 Subject: [PATCH 430/909] upstream: cleaning up APIs (#11713) Risk Level: Low (API change) Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- include/envoy/router/router.h | 27 +++++++++++++++---- source/common/router/upstream_request.cc | 5 ++++ source/common/router/upstream_request.h | 16 +++++++---- .../upstreams/http/http/upstream_request.cc | 5 ++-- .../upstreams/http/http/upstream_request.h | 8 +++--- .../upstreams/http/tcp/upstream_request.cc | 15 +++++------ .../upstreams/http/tcp/upstream_request.h | 4 +-- .../http/tcp/upstream_request_test.cc | 2 +- test/mocks/router/mocks.cc | 3 +++ test/mocks/router/mocks.h | 22 ++++++++++++++- 10 files changed, 79 insertions(+), 28 deletions(-) diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index ce36ec69ccc8..27ab91591d9f 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -1096,7 +1096,6 @@ class Config { using ConfigConstSharedPtr = std::shared_ptr; class GenericConnectionPoolCallbacks; -class UpstreamRequest; class GenericUpstream; /** @@ -1138,6 +1137,22 @@ class GenericConnPool { virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; }; +/** + * An API for the interactions the upstream stream needs to have with the downstream stream + * and/or router components + */ +class UpstreamToDownstream : public Http::ResponseDecoder, public Http::StreamCallbacks { +public: + /** + * @return return the routeEntry for the downstream stream. + */ + virtual const RouteEntry& routeEntry() const PURE; + /** + * @return return the connection for the downstream stream. + */ + virtual const Network::Connection& connection() const PURE; +}; + /** * An API for wrapping callbacks from either an HTTP or a TCP connection pool. * @@ -1174,10 +1189,12 @@ class GenericConnectionPoolCallbacks { const Network::Address::InstanceConstSharedPtr& upstream_local_address, const StreamInfo::StreamInfo& info) PURE; - // TODO(alyssawilk) This exists because the Connection Pool creates the GenericUpstream, and the - // GenericUpstream needs a handle back to the upstream request to pass on events, as upstream - // data flows in. Do interface clean up in a follow-up PR. - virtual UpstreamRequest* upstreamRequest() PURE; + // @return the UpstreamToDownstream interface for this stream. + // + // This is the interface for all interactions the upstream stream needs to have with the + // downstream stream. It is in the GenericConnectionPoolCallbacks as the GenericConnectionPool + // creates the GenericUpstream, and the GenericUpstream will need this interface. + virtual UpstreamToDownstream& upstreamToDownstream() PURE; }; /** diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index ab43e82157d0..e4d97c82f0bb 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -157,6 +157,11 @@ void UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) { } parent_.onUpstreamTrailers(std::move(trailers), *this); } +const RouteEntry& UpstreamRequest::routeEntry() const { return *parent_.routeEntry(); } + +const Network::Connection& UpstreamRequest::connection() const { + return *parent_.callbacks()->connection(); +} void UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { parent_.onUpstreamMetadata(std::move(metadata_map)); diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 027731972e9a..91f17f511239 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -32,7 +32,7 @@ class UpstreamRequest; // The base request for Upstream. class UpstreamRequest : public Logger::Loggable, - public Http::ResponseDecoder, + public UpstreamToDownstream, public LinkedObject, public GenericConnectionPoolCallbacks { public: @@ -54,12 +54,18 @@ class UpstreamRequest : public Logger::Loggable, void decodeData(Buffer::Instance& data, bool end_stream) override; void decodeMetadata(Http::MetadataMapPtr&& metadata_map) override; - // Http::ResponseDecoder + // UpstreamToDownstream (Http::ResponseDecoder) void decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) override; void decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) override; - - void onResetStream(Http::StreamResetReason reason, absl::string_view transport_failure_reason); + // UpstreamToDownstream (Http::StreamCallbacks) + void onResetStream(Http::StreamResetReason reason, + absl::string_view transport_failure_reason) override; + void onAboveWriteBufferHighWatermark() override { disableDataFromDownstreamForFlowControl(); } + void onBelowWriteBufferLowWatermark() override { enableDataFromDownstreamForFlowControl(); } + // UpstreamToDownstream + const RouteEntry& routeEntry() const override; + const Network::Connection& connection() const override; void disableDataFromDownstreamForFlowControl(); void enableDataFromDownstreamForFlowControl(); @@ -72,7 +78,7 @@ class UpstreamRequest : public Logger::Loggable, Upstream::HostDescriptionConstSharedPtr host, const Network::Address::InstanceConstSharedPtr& upstream_local_address, const StreamInfo::StreamInfo& info) override; - UpstreamRequest* upstreamRequest() override { return this; } + UpstreamToDownstream& upstreamToDownstream() override { return *this; } void clearRequestEncoder(); void onStreamMaxDurationReached(); diff --git a/source/extensions/upstreams/http/http/upstream_request.cc b/source/extensions/upstreams/http/http/upstream_request.cc index d9b7e6ca6d3d..dce29ea2d9d8 100644 --- a/source/extensions/upstreams/http/http/upstream_request.cc +++ b/source/extensions/upstreams/http/http/upstream_request.cc @@ -33,7 +33,7 @@ void HttpConnPool::newStream(GenericConnectionPoolCallbacks* callbacks) { // might get deleted inline as well. Only write the returned handle out if it is not nullptr to // deal with this case. Envoy::Http::ConnectionPool::Cancellable* handle = - conn_pool_->newStream(*callbacks->upstreamRequest(), *this); + conn_pool_->newStream(callbacks->upstreamToDownstream(), *this); if (handle) { conn_pool_stream_handle_ = handle; } @@ -62,7 +62,8 @@ void HttpConnPool::onPoolReady(Envoy::Http::RequestEncoder& request_encoder, Upstream::HostDescriptionConstSharedPtr host, const StreamInfo::StreamInfo& info) { conn_pool_stream_handle_ = nullptr; - auto upstream = std::make_unique(*callbacks_->upstreamRequest(), &request_encoder); + auto upstream = + std::make_unique(callbacks_->upstreamToDownstream(), &request_encoder); callbacks_->onPoolReady(std::move(upstream), host, request_encoder.getStream().connectionLocalAddress(), info); } diff --git a/source/extensions/upstreams/http/http/upstream_request.h b/source/extensions/upstreams/http/http/upstream_request.h index 7fcad5a0b24f..fa01ddbb5d47 100644 --- a/source/extensions/upstreams/http/http/upstream_request.h +++ b/source/extensions/upstreams/http/http/upstream_request.h @@ -50,7 +50,7 @@ class HttpConnPool : public Router::GenericConnPool, public Envoy::Http::Connect class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamCallbacks { public: - HttpUpstream(Router::UpstreamRequest& upstream_request, Envoy::Http::RequestEncoder* encoder) + HttpUpstream(Router::UpstreamToDownstream& upstream_request, Envoy::Http::RequestEncoder* encoder) : upstream_request_(upstream_request), request_encoder_(encoder) { request_encoder_->getStream().addCallbacks(*this); } @@ -83,15 +83,15 @@ class HttpUpstream : public Router::GenericUpstream, public Envoy::Http::StreamC } void onAboveWriteBufferHighWatermark() override { - upstream_request_.disableDataFromDownstreamForFlowControl(); + upstream_request_.onAboveWriteBufferHighWatermark(); } void onBelowWriteBufferLowWatermark() override { - upstream_request_.enableDataFromDownstreamForFlowControl(); + upstream_request_.onBelowWriteBufferLowWatermark(); } private: - Router::UpstreamRequest& upstream_request_; + Router::UpstreamToDownstream& upstream_request_; Envoy::Http::RequestEncoder* request_encoder_{}; }; diff --git a/source/extensions/upstreams/http/tcp/upstream_request.cc b/source/extensions/upstreams/http/tcp/upstream_request.cc index 26e8fb50ec94..4284a2e5a13d 100644 --- a/source/extensions/upstreams/http/tcp/upstream_request.cc +++ b/source/extensions/upstreams/http/tcp/upstream_request.cc @@ -27,12 +27,12 @@ void TcpConnPool::onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& co upstream_handle_ = nullptr; Network::Connection& latched_conn = conn_data->connection(); auto upstream = - std::make_unique(callbacks_->upstreamRequest(), std::move(conn_data)); + std::make_unique(&callbacks_->upstreamToDownstream(), std::move(conn_data)); callbacks_->onPoolReady(std::move(upstream), host, latched_conn.localAddress(), latched_conn.streamInfo()); } -TcpUpstream::TcpUpstream(Router::UpstreamRequest* upstream_request, +TcpUpstream::TcpUpstream(Router::UpstreamToDownstream* upstream_request, Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream) : upstream_request_(upstream_request), upstream_conn_data_(std::move(upstream)) { upstream_conn_data_->connection().enableHalfClose(true); @@ -46,13 +46,12 @@ void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) { void TcpUpstream::encodeHeaders(const Envoy::Http::RequestHeaderMap&, bool end_stream) { // Headers should only happen once, so use this opportunity to add the proxy // proto header, if configured. - ASSERT(upstream_request_->parent().routeEntry()->connectConfig().has_value()); + ASSERT(upstream_request_->routeEntry().connectConfig().has_value()); Buffer::OwnedImpl data; - auto& connect_config = upstream_request_->parent().routeEntry()->connectConfig().value(); + auto& connect_config = upstream_request_->routeEntry().connectConfig().value(); if (connect_config.has_proxy_protocol_config()) { - const Network::Connection& connection = *upstream_request_->parent().callbacks()->connection(); Extensions::Common::ProxyProtocol::generateProxyProtoHeader( - connect_config.proxy_protocol_config(), connection, data); + connect_config.proxy_protocol_config(), upstream_request_->connection(), data); } if (data.length() != 0 || end_stream) { @@ -96,13 +95,13 @@ void TcpUpstream::onEvent(Network::ConnectionEvent event) { void TcpUpstream::onAboveWriteBufferHighWatermark() { if (upstream_request_) { - upstream_request_->disableDataFromDownstreamForFlowControl(); + upstream_request_->onAboveWriteBufferHighWatermark(); } } void TcpUpstream::onBelowWriteBufferLowWatermark() { if (upstream_request_) { - upstream_request_->enableDataFromDownstreamForFlowControl(); + upstream_request_->onBelowWriteBufferLowWatermark(); } } diff --git a/source/extensions/upstreams/http/tcp/upstream_request.h b/source/extensions/upstreams/http/tcp/upstream_request.h index 2d0bf85a148e..1c2e7a44d033 100644 --- a/source/extensions/upstreams/http/tcp/upstream_request.h +++ b/source/extensions/upstreams/http/tcp/upstream_request.h @@ -64,7 +64,7 @@ class TcpConnPool : public Router::GenericConnPool, public Envoy::Tcp::Connectio class TcpUpstream : public Router::GenericUpstream, public Envoy::Tcp::ConnectionPool::UpstreamCallbacks { public: - TcpUpstream(Router::UpstreamRequest* upstream_request, + TcpUpstream(Router::UpstreamToDownstream* upstream_request, Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& upstream); // GenericUpstream @@ -82,7 +82,7 @@ class TcpUpstream : public Router::GenericUpstream, void onBelowWriteBufferLowWatermark() override; private: - Router::UpstreamRequest* upstream_request_; + Router::UpstreamToDownstream* upstream_request_; Envoy::Tcp::ConnectionPool::ConnectionDataPtr upstream_conn_data_; }; diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index 1889b93dce1b..3f5fc02692df 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -122,7 +122,7 @@ TEST_F(TcpConnPoolTest, Basic) { EXPECT_CALL(mock_pool_, newConnection(_)).WillOnce(Return(&cancellable_)); conn_pool_->newStream(&mock_generic_callbacks_); - EXPECT_CALL(mock_generic_callbacks_, upstreamRequest()); + EXPECT_CALL(mock_generic_callbacks_, upstreamToDownstream()); EXPECT_CALL(mock_generic_callbacks_, onPoolReady(_, _, _, _)); auto data = std::make_unique>(); EXPECT_CALL(*data, connection()).Times(AnyNumber()).WillRepeatedly(ReturnRef(connection)); diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 962c86d70f67..93de670c3a2d 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -154,5 +154,8 @@ MockScopedRouteConfigProvider::MockScopedRouteConfigProvider() } MockScopedRouteConfigProvider::~MockScopedRouteConfigProvider() = default; +MockGenericConnectionPoolCallbacks::MockGenericConnectionPoolCallbacks() { + ON_CALL(*this, upstreamToDownstream()).WillByDefault(ReturnRef(upstream_to_downstream_)); +} } // namespace Router } // namespace Envoy diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 4fe1103f8451..d1ae30fd8f5d 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -529,8 +529,26 @@ class MockGenericConnPool : public GenericConnPool { MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); }; +class MockUpstreamToDownstream : public UpstreamToDownstream { +public: + MOCK_METHOD(const RouteEntry&, routeEntry, (), (const)); + MOCK_METHOD(const Network::Connection&, connection, (), (const)); + + MOCK_METHOD(void, decodeData, (Buffer::Instance&, bool)); + MOCK_METHOD(void, decodeMetadata, (Http::MetadataMapPtr &&)); + MOCK_METHOD(void, decode100ContinueHeaders, (Http::ResponseHeaderMapPtr &&)); + MOCK_METHOD(void, decodeHeaders, (Http::ResponseHeaderMapPtr&&, bool)); + MOCK_METHOD(void, decodeTrailers, (Http::ResponseTrailerMapPtr &&)); + + MOCK_METHOD(void, onResetStream, (Http::StreamResetReason, absl::string_view)); + MOCK_METHOD(void, onAboveWriteBufferHighWatermark, ()); + MOCK_METHOD(void, onBelowWriteBufferLowWatermark, ()); +}; + class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks { public: + MockGenericConnectionPoolCallbacks(); + MOCK_METHOD(void, onPoolFailure, (Http::ConnectionPool::PoolFailureReason reason, absl::string_view transport_failure_reason, @@ -540,7 +558,9 @@ class MockGenericConnectionPoolCallbacks : public GenericConnectionPoolCallbacks Upstream::HostDescriptionConstSharedPtr host, const Network::Address::InstanceConstSharedPtr& upstream_local_address, const StreamInfo::StreamInfo& info)); - MOCK_METHOD(UpstreamRequest*, upstreamRequest, ()); + MOCK_METHOD(UpstreamToDownstream&, upstreamToDownstream, ()); + + NiceMock upstream_to_downstream_; }; } // namespace Router From 6fecf0b1438fd3d61245d759c8ab42d7e745cb46 Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Wed, 24 Jun 2020 14:04:06 -0700 Subject: [PATCH 431/909] admission control: Implement thread-local controller (#11628) Signed-off-by: Tony Allen --- source/extensions/extensions_build_config.bzl | 4 +- .../filters/http/admission_control/BUILD | 18 ++ .../admission_control/admission_control.cc | 7 +- .../admission_control/admission_control.h | 6 +- .../filters/http/admission_control/config.cc | 64 +++++++ .../filters/http/admission_control/config.h | 32 ++++ .../thread_local_controller.cc | 49 +++++ .../thread_local_controller.h | 86 ++++++++- .../filters/http/admission_control/BUILD | 24 +++ .../admission_control_filter_test.cc | 42 ++--- .../admission_control_integration_test.cc | 171 ++++++++++++++++++ .../http/admission_control/config_test.cc | 5 +- .../http/admission_control/controller_test.cc | 107 +++++++++++ test/integration/autonomous_upstream.cc | 15 +- test/integration/autonomous_upstream.h | 5 + 15 files changed, 594 insertions(+), 41 deletions(-) create mode 100644 source/extensions/filters/http/admission_control/config.cc create mode 100644 source/extensions/filters/http/admission_control/config.h create mode 100644 source/extensions/filters/http/admission_control/thread_local_controller.cc create mode 100644 test/extensions/filters/http/admission_control/admission_control_integration_test.cc create mode 100644 test/extensions/filters/http/admission_control/controller_test.cc diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 1807538e2d8b..2f11d428f0af 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -41,9 +41,7 @@ EXTENSIONS = { # "envoy.filters.http.adaptive_concurrency": "//source/extensions/filters/http/adaptive_concurrency:config", - # NOTE: The admission control filter does not have a proper filter - # implemented right now. We are just referencing the filter lib here. - "envoy.filters.http.admission_control": "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "envoy.filters.http.admission_control": "//source/extensions/filters/http/admission_control:config", "envoy.filters.http.aws_lambda": "//source/extensions/filters/http/aws_lambda:config", "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD index cb4a9975b09b..2bfdfb9912a6 100644 --- a/source/extensions/filters/http/admission_control/BUILD +++ b/source/extensions/filters/http/admission_control/BUILD @@ -15,6 +15,7 @@ envoy_cc_extension( name = "admission_control_filter_lib", srcs = [ "admission_control.cc", + "thread_local_controller.cc", ], hdrs = [ "admission_control.h", @@ -33,3 +34,20 @@ envoy_cc_extension( "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", ], ) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + security_posture = "unknown", + status = "alpha", + deps = [ + "//include/envoy/registry", + "//source/common/common:enum_to_int", + "//source/extensions/filters/http:well_known_names", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//source/extensions/filters/http/admission_control/evaluators:response_evaluator_lib", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index 7953b79c36f1..8886d73596f1 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -31,7 +31,7 @@ using GrpcStatus = Grpc::Status::GrpcStatus; static constexpr double defaultAggression = 2.0; AdmissionControlFilterConfig::AdmissionControlFilterConfig( - const AdmissionControlProto& proto_config, Runtime::Loader& runtime, TimeSource&, + const AdmissionControlProto& proto_config, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, std::shared_ptr response_evaluator) : random_(random), scope_(scope), tls_(std::move(tls)), @@ -122,8 +122,9 @@ AdmissionControlFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { } bool AdmissionControlFilter::shouldRejectRequest() const { - const double total = config_->getController().requestTotalCount(); - const double success = config_->getController().requestSuccessCount(); + const auto request_counts = config_->getController().requestCounts(); + const double total = request_counts.requests; + const double success = request_counts.successes; const double probability = (total - config_->aggression() * success) / (total + 1); // Choosing an accuracy of 4 significant figures for the probability. diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h index 22edcf539396..a962096ae8ce 100644 --- a/source/extensions/filters/http/admission_control/admission_control.h +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -49,12 +49,14 @@ using AdmissionControlProto = class AdmissionControlFilterConfig { public: AdmissionControlFilterConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, - TimeSource&, Runtime::RandomGenerator& random, Stats::Scope& scope, + Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, std::shared_ptr response_evaluator); virtual ~AdmissionControlFilterConfig() = default; - virtual ThreadLocalController& getController() const { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + virtual ThreadLocalController& getController() const { + return tls_->getTyped(); + } Runtime::RandomGenerator& random() const { return random_; } bool filterEnabled() const { return admission_control_feature_.enabled(); } diff --git a/source/extensions/filters/http/admission_control/config.cc b/source/extensions/filters/http/admission_control/config.cc new file mode 100644 index 000000000000..297fabf4f6d7 --- /dev/null +++ b/source/extensions/filters/http/admission_control/config.cc @@ -0,0 +1,64 @@ +#include "extensions/filters/http/admission_control/config.h" + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" +#include "envoy/registry/registry.h" + +#include "common/common/enum_to_int.h" + +#include "extensions/filters/http/admission_control/admission_control.h" +#include "extensions/filters/http/admission_control/evaluators/response_evaluator.h" +#include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +static constexpr std::chrono::seconds defaultSamplingWindow{120}; + +Http::FilterFactoryCb AdmissionControlFilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { + + const std::string prefix = stats_prefix + "admission_control."; + + // Create the thread-local controller. + auto tls = context.threadLocal().allocateSlot(); + auto sampling_window = std::chrono::seconds( + PROTOBUF_GET_MS_OR_DEFAULT(config, sampling_window, 1000 * defaultSamplingWindow.count()) / + 1000); + tls->set( + [sampling_window, &context](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(context.timeSource(), sampling_window); + }); + + std::unique_ptr response_evaluator; + switch (config.evaluation_criteria_case()) { + case AdmissionControlProto::EvaluationCriteriaCase::kSuccessCriteria: + response_evaluator = std::make_unique(config.success_criteria()); + break; + case AdmissionControlProto::EvaluationCriteriaCase::EVALUATION_CRITERIA_NOT_SET: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + AdmissionControlFilterConfigSharedPtr filter_config = + std::make_shared(config, context.runtime(), context.random(), + context.scope(), std::move(tls), + std::move(response_evaluator)); + + return [filter_config, prefix](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config, prefix)); + }; +} + +/** + * Static registration for the admission_control filter. @see RegisterFactory. + */ +REGISTER_FACTORY(AdmissionControlFilterFactory, + Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/config.h b/source/extensions/filters/http/admission_control/config.h new file mode 100644 index 000000000000..8abe84eafefc --- /dev/null +++ b/source/extensions/filters/http/admission_control/config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +/** + * Config registration for the adaptive concurrency limit filter. @see NamedHttpFilterConfigFactory. + */ +class AdmissionControlFilterFactory + : public Common::FactoryBase< + envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl> { +public: + AdmissionControlFilterFactory() : FactoryBase(HttpFilterNames::get().AdmissionControl) {} + + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::admission_control::v3alpha::AdmissionControl& + proto_config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/thread_local_controller.cc b/source/extensions/filters/http/admission_control/thread_local_controller.cc new file mode 100644 index 000000000000..30f0aac40061 --- /dev/null +++ b/source/extensions/filters/http/admission_control/thread_local_controller.cc @@ -0,0 +1,49 @@ +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include + +#include "envoy/common/pure.h" +#include "envoy/common/time.h" +#include "envoy/http/codes.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { + +static constexpr std::chrono::seconds defaultHistoryGranularity{1}; + +ThreadLocalControllerImpl::ThreadLocalControllerImpl(TimeSource& time_source, + std::chrono::seconds sampling_window) + : time_source_(time_source), sampling_window_(sampling_window) {} + +void ThreadLocalControllerImpl::maybeUpdateHistoricalData() { + // Purge stale samples. + while (!historical_data_.empty() && ageOfOldestSample() >= sampling_window_) { + removeOldestSample(); + } + + // It's possible we purged stale samples from the history and are left with nothing, so it's + // necessary to add an empty entry. We will also need to roll over into a new entry in the + // historical data if we've exceeded the time specified by the granularity. + if (historical_data_.empty() || ageOfNewestSample() >= defaultHistoryGranularity) { + historical_data_.emplace_back(time_source_.monotonicTime(), RequestData()); + } +} + +void ThreadLocalControllerImpl::recordRequest(bool success) { + maybeUpdateHistoricalData(); + + // The back of the deque will be the most recent samples. + ++historical_data_.back().second.requests; + ++global_data_.requests; + if (success) { + ++historical_data_.back().second.successes; + ++global_data_.successes; + } +} + +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/admission_control/thread_local_controller.h b/source/extensions/filters/http/admission_control/thread_local_controller.h index 9b5096b80569..11f938758177 100644 --- a/source/extensions/filters/http/admission_control/thread_local_controller.h +++ b/source/extensions/filters/http/admission_control/thread_local_controller.h @@ -15,6 +15,19 @@ namespace AdmissionControl { */ class ThreadLocalController { public: + struct RequestData { + RequestData(uint32_t request_count, uint32_t success_count) + : requests(request_count), successes(success_count) {} + RequestData() = default; + + inline bool operator==(const RequestData& rhs) const { + return (requests == rhs.requests) && (successes == rhs.successes); + } + + uint32_t requests{0}; + uint32_t successes{0}; + }; + virtual ~ThreadLocalController() = default; // Record success/failure of a request and update the internal state of the controller to reflect @@ -22,11 +35,76 @@ class ThreadLocalController { virtual void recordSuccess() PURE; virtual void recordFailure() PURE; - // Returns the current number of recorded requests. - virtual uint32_t requestTotalCount() PURE; + // Returns the current number of requests and how many of them are successful. + virtual RequestData requestCounts() PURE; +}; + +/** + * Thread-local object to track request counts and successes over a rolling time window. Request + * data for the time window is kept recent via a circular buffer that phases out old request/success + * counts when recording new samples. + * + * This controller is thread-local so that we do not need to take any locks on the sample histories + * to update them, at the cost of decreasing the number of samples. + * + * The look-back window for request samples is accurate up to a hard-coded 1-second granularity. + * TODO (tonya11en): Allow the granularity to be configurable. + */ +class ThreadLocalControllerImpl : public ThreadLocalController, + public ThreadLocal::ThreadLocalObject { +public: + ThreadLocalControllerImpl(TimeSource& time_source, std::chrono::seconds sampling_window); + ~ThreadLocalControllerImpl() override = default; + void recordSuccess() override { recordRequest(true); } + void recordFailure() override { recordRequest(false); } + + RequestData requestCounts() override { + maybeUpdateHistoricalData(); + return global_data_; + } + +private: + void recordRequest(bool success); + + // Potentially remove any stale samples and record sample aggregates to the historical data. + void maybeUpdateHistoricalData(); + + // Returns the age of the oldest sample in the historical data. + std::chrono::microseconds ageOfOldestSample() const { + ASSERT(!historical_data_.empty()); + using namespace std::chrono; + return duration_cast(time_source_.monotonicTime() - + historical_data_.front().first); + } + + // Returns the age of the newest sample in the historical data. + std::chrono::microseconds ageOfNewestSample() const { + ASSERT(!historical_data_.empty()); + using namespace std::chrono; + return duration_cast(time_source_.monotonicTime() - + historical_data_.back().first); + } + + // Removes the oldest sample in the historical data and reconciles the global data. + void removeOldestSample() { + ASSERT(!historical_data_.empty()); + global_data_.successes -= historical_data_.front().second.successes; + global_data_.requests -= historical_data_.front().second.requests; + historical_data_.pop_front(); + } + + TimeSource& time_source_; + + // Stores samples from oldest (front) to newest (back). Since there is no need to read/modify + // entries that are not the oldest or newest (front/back), we can get away with using a deque + // which allocates memory in chunks and keeps most elements contiguous and cache-friendly. + std::deque> historical_data_; + + // Request data aggregated for the whole look-back window. + RequestData global_data_; - // Returns the current number of recorded request successes. - virtual uint32_t requestSuccessCount() PURE; + // The rolling time window size. + const std::chrono::seconds sampling_window_; }; } // namespace AdmissionControl diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index b161f26e16a1..1b9595276119 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -55,3 +55,27 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "admission_control_integration_test", + srcs = ["admission_control_integration_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/extensions/filters/http/admission_control:config", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "admission_controller_test", + srcs = ["controller_test.cc"], + extension_name = "envoy.filters.http.admission_control", + deps = [ + "//source/common/http:headers_lib", + "//source/extensions/filters/http/admission_control:admission_control_filter_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/admission_control/v3alpha:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index ad1c3ca28543..0b188f78c139 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -29,11 +29,12 @@ namespace HttpFilters { namespace AdmissionControl { namespace { +using RequestData = ThreadLocalController::RequestData; + class MockThreadLocalController : public ThreadLocal::ThreadLocalObject, public ThreadLocalController { public: - MOCK_METHOD(uint32_t, requestTotalCount, ()); - MOCK_METHOD(uint32_t, requestSuccessCount, ()); + MOCK_METHOD(RequestData, requestCounts, ()); MOCK_METHOD(void, recordSuccess, ()); MOCK_METHOD(void, recordFailure, ()); }; @@ -47,11 +48,10 @@ class MockResponseEvaluator : public ResponseEvaluator { class TestConfig : public AdmissionControlFilterConfig { public: TestConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, - TimeSource& time_source, Runtime::RandomGenerator& random, Stats::Scope& scope, - ThreadLocal::SlotPtr&& tls, MockThreadLocalController& controller, - std::shared_ptr evaluator) - : AdmissionControlFilterConfig(proto_config, runtime, time_source, random, scope, - std::move(tls), std::move(evaluator)), + Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + MockThreadLocalController& controller, std::shared_ptr evaluator) + : AdmissionControlFilterConfig(proto_config, runtime, random, scope, std::move(tls), + std::move(evaluator)), controller_(controller) {} ThreadLocalController& getController() const override { return controller_; } @@ -69,8 +69,8 @@ class AdmissionControlTest : public testing::Test { auto tls = context_.threadLocal().allocateSlot(); evaluator_ = std::make_shared(); - return std::make_shared(proto, runtime_, time_system_, random_, scope_, - std::move(tls), controller_, evaluator_); + return std::make_shared(proto, runtime_, random_, scope_, std::move(tls), + controller_, evaluator_); } void setupFilter(std::shared_ptr config) { @@ -145,8 +145,7 @@ sampling_window: 10s EXPECT_CALL(runtime_.snapshot_, getBoolean("foo.enabled", true)).WillRepeatedly(Return(false)); // The filter is bypassed via runtime. - EXPECT_CALL(controller_, requestTotalCount()).Times(0); - EXPECT_CALL(controller_, requestSuccessCount()).Times(0); + EXPECT_CALL(controller_, requestCounts()).Times(0); // We expect no rejections. Http::TestRequestHeaderMapImpl request_headers; @@ -164,8 +163,7 @@ TEST_F(AdmissionControlTest, DisregardHealthChecks) { // We do not make admission decisions for health checks, so we expect no lookup of request success // counts. - EXPECT_CALL(controller_, requestTotalCount()).Times(0); - EXPECT_CALL(controller_, requestSuccessCount()).Times(0); + EXPECT_CALL(controller_, requestCounts()).Times(0); Http::TestRequestHeaderMapImpl request_headers; Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; @@ -181,8 +179,7 @@ TEST_F(AdmissionControlTest, HttpFailureBehavior) { // We expect rejection counter to increment upon failure. TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); EXPECT_CALL(*evaluator_, isHttpSuccess(500)).WillRepeatedly(Return(false)); Http::TestRequestHeaderMapImpl request_headers; @@ -201,8 +198,7 @@ TEST_F(AdmissionControlTest, HttpSuccessBehavior) { // We expect rejection counter to NOT increment upon success. TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); EXPECT_CALL(*evaluator_, isHttpSuccess(200)).WillRepeatedly(Return(true)); Http::TestRequestHeaderMapImpl request_headers; @@ -219,8 +215,7 @@ TEST_F(AdmissionControlTest, GrpcFailureBehavior) { TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); Http::TestRequestHeaderMapImpl request_headers; @@ -239,8 +234,7 @@ TEST_F(AdmissionControlTest, GrpcSuccessBehaviorTrailer) { TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); Http::TestRequestHeaderMapImpl request_headers; @@ -258,8 +252,7 @@ TEST_F(AdmissionControlTest, GrpcFailureBehaviorTrailer) { TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(0)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 0))); EXPECT_CALL(*evaluator_, isGrpcSuccess(7)).WillRepeatedly(Return(false)); Http::TestRequestHeaderMapImpl request_headers; @@ -278,8 +271,7 @@ TEST_F(AdmissionControlTest, GrpcSuccessBehavior) { TestUtility::waitForCounterEq(scope_, "test_prefix.rq_rejected", 0, time_system_); - EXPECT_CALL(controller_, requestTotalCount()).WillRepeatedly(Return(100)); - EXPECT_CALL(controller_, requestSuccessCount()).WillRepeatedly(Return(100)); + EXPECT_CALL(controller_, requestCounts()).WillRepeatedly(Return(RequestData(100, 100))); EXPECT_CALL(*evaluator_, isGrpcSuccess(0)).WillRepeatedly(Return(true)); Http::TestRequestHeaderMapImpl request_headers; diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc new file mode 100644 index 000000000000..a361cbff4d09 --- /dev/null +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -0,0 +1,171 @@ +#include "common/grpc/common.h" + +#include "test/integration/autonomous_upstream.h" +#include "test/integration/http_integration.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace { + +const std::string ADMISSION_CONTROL_CONFIG = + R"EOF( +name: envoy.filters.http.admission_control +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl + success_criteria: + http_criteria: + grpc_criteria: + sampling_window: 120s + aggression_coefficient: + default_value: 1.0 + runtime_key: "foo.aggression" + enabled: + default_value: true + runtime_key: "foo.enabled" +)EOF"; + +class AdmissionControlIntegrationTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam, + public HttpIntegrationTest { +public: + AdmissionControlIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {} + + void SetUp() override {} + + void initialize() override { + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + config_helper_.addFilter(ADMISSION_CONTROL_CONFIG); + HttpIntegrationTest::initialize(); + } + +protected: + void verifyGrpcSuccess(IntegrationStreamDecoderPtr response) { + EXPECT_EQ("0", response->trailers()->GrpcStatus()->value().getStringView()); + } + + void verifyHttpSuccess(IntegrationStreamDecoderPtr response) { + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } + + IntegrationStreamDecoderPtr sendGrpcRequestWithReturnCode(uint64_t code) { + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Set the response headers on the autonomous upstream. + auto headers = std::make_unique(); + headers->setStatus(200); + headers->setContentType("application/grpc"); + + auto trailers = std::make_unique(); + trailers->setGrpcMessage("this is a message"); + trailers->setGrpcStatus(code); + + auto* au = reinterpret_cast(fake_upstreams_.front().get()); + au->setResponseHeaders(std::move(headers)); + au->setResponseTrailers(std::move(trailers)); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + codec_client_->close(); + return response; + } + + IntegrationStreamDecoderPtr sendRequestWithReturnCode(std::string&& code) { + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Set the response headers on the autonomous upstream. + auto* au = reinterpret_cast(fake_upstreams_.front().get()); + au->setResponseHeaders(std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", code}}))); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + codec_client_->close(); + return response; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AdmissionControlIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); + +TEST_P(AdmissionControlIntegrationTest, HttpTest) { + autonomous_upstream_ = true; + initialize(); + + // Drop the success rate to a very low value. + ENVOY_LOG(info, "dropping success rate"); + for (int i = 0; i < 1000; ++i) { + sendRequestWithReturnCode("500"); + } + + // Measure throttling rate from the admission control filter. + double throttle_count = 0; + double request_count = 0; + ENVOY_LOG(info, "validating throttling rate"); + for (int i = 0; i < 1000; ++i) { + auto response = sendRequestWithReturnCode("500"); + auto rc = response->headers().Status()->value().getStringView(); + if (rc == "503") { + ++throttle_count; + } else { + ASSERT_EQ(rc, "500"); + } + ++request_count; + } + + // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% + // throttling rate. Allowing an error of 3%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + + // We now wait for the history to become stale. + timeSystem().advanceTimeWait(std::chrono::seconds(120)); + + // We expect a 100% success rate after waiting. No throttling should occur. + for (int i = 0; i < 100; ++i) { + verifyHttpSuccess(sendRequestWithReturnCode("200")); + } +} + +TEST_P(AdmissionControlIntegrationTest, GrpcTest) { + autonomous_upstream_ = true; + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + initialize(); + + // Drop the success rate to a very low value. + for (int i = 0; i < 1000; ++i) { + sendGrpcRequestWithReturnCode(14); + } + + // Measure throttling rate from the admission control filter. + double throttle_count = 0; + double request_count = 0; + for (int i = 0; i < 1000; ++i) { + auto response = sendGrpcRequestWithReturnCode(10); + + // When the filter is throttling, it returns an HTTP code 503 and the GRPC status is unset. + // Otherwise, we expect a GRPC status of "Unknown" as set above. + if (response->headers().Status()->value().getStringView() == "503") { + ++throttle_count; + } else { + auto grpc_status = Grpc::Common::getGrpcStatus(*(response->trailers())); + ASSERT_EQ(grpc_status, Grpc::Status::WellKnownGrpcStatus::Aborted); + } + ++request_count; + } + + // Given the current throttling rate formula with an aggression of 1, it should result in a ~98% + // throttling rate. Allowing an error of 3%. + EXPECT_NEAR(throttle_count / request_count, 0.98, 0.03); + + // We now wait for the history to become stale. + timeSystem().advanceTimeWait(std::chrono::seconds(120)); + + // We expect a 100% success rate after waiting. No throttling should occur. + for (int i = 0; i < 100; ++i) { + verifyGrpcSuccess(sendGrpcRequestWithReturnCode(0)); + } +} + +} // namespace +} // namespace Envoy diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index 2201b3c36cb1..aa716a054e1e 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -35,15 +35,14 @@ class AdmissionControlConfigTest : public testing::Test { TestUtility::loadFromYamlAndValidate(yaml, proto); auto tls = context_.threadLocal().allocateSlot(); auto evaluator = std::make_unique(proto.success_criteria()); - return std::make_shared( - proto, runtime_, time_system_, random_, scope_, std::move(tls), std::move(evaluator)); + return std::make_shared(proto, runtime_, random_, scope_, + std::move(tls), std::move(evaluator)); } protected: NiceMock runtime_; NiceMock context_; Stats::IsolatedStoreImpl scope_; - Event::SimulatedTimeSystem time_system_; NiceMock random_; }; diff --git a/test/extensions/filters/http/admission_control/controller_test.cc b/test/extensions/filters/http/admission_control/controller_test.cc new file mode 100644 index 000000000000..bf88a7037431 --- /dev/null +++ b/test/extensions/filters/http/admission_control/controller_test.cc @@ -0,0 +1,107 @@ +#include + +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" +#include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.validate.h" + +#include "extensions/filters/http/admission_control/thread_local_controller.h" + +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace AdmissionControl { +namespace { + +using RequestData = ThreadLocalController::RequestData; + +class ThreadLocalControllerTest : public testing::Test { +public: + ThreadLocalControllerTest() : window_(5), tlc_(time_system_, window_) {} + +protected: + // Submit a single request per entry in the historical data (this comes out to a single request + // each second). The final sample does not advance time to allow for testing of this transition. + void fillHistorySlots(const bool successes = true) { + std::function record; + if (successes) { + record = [this]() { tlc_.recordSuccess(); }; + } else { + record = [this]() { tlc_.recordFailure(); }; + } + for (int tick = 0; tick < window_.count(); ++tick) { + record(); + time_system_.advanceTimeWait(std::chrono::seconds(1)); + } + // Don't sleep after the final sample to allow for measurements. + record(); + } + + Event::SimulatedTimeSystem time_system_; + std::chrono::seconds window_; + ThreadLocalControllerImpl tlc_; +}; + +// Test the basic functionality of the admission controller. +TEST_F(ThreadLocalControllerTest, BasicRecord) { + EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts()); + + tlc_.recordFailure(); + EXPECT_EQ(RequestData(1, 0), tlc_.requestCounts()); + + tlc_.recordSuccess(); + EXPECT_EQ(RequestData(2, 1), tlc_.requestCounts()); +} + +// Verify that stale historical samples are removed when they grow stale. +TEST_F(ThreadLocalControllerTest, RemoveStaleSamples) { + fillHistorySlots(); + + // We expect a single request counted in each second of the window. + EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts()); + + time_system_.advanceTimeWait(std::chrono::seconds(1)); + + // Continuing to sample requests at 1 per second should maintain the same request counts. We'll + // record failures here. + fillHistorySlots(false); + EXPECT_EQ(RequestData(window_.count(), 0), tlc_.requestCounts()); + + // Expect the oldest entry to go stale. + time_system_.advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(RequestData(window_.count() - 1, 0), tlc_.requestCounts()); +} + +// Verify that stale historical samples are removed when they grow stale. +TEST_F(ThreadLocalControllerTest, RemoveStaleSamples2) { + fillHistorySlots(); + + // We expect a single request counted in each second of the window. + EXPECT_EQ(RequestData(window_.count(), window_.count()), tlc_.requestCounts()); + + // Let's just sit here for a full day. We expect all samples to become stale. + time_system_.advanceTimeWait(std::chrono::hours(24)); + + EXPECT_EQ(RequestData(0, 0), tlc_.requestCounts()); +} + +// Verify that historical samples are made only when there is data to record. +TEST_F(ThreadLocalControllerTest, VerifyMemoryUsage) { + // Make sure we don't add any null data to the history if there are sparse requests. + tlc_.recordSuccess(); + time_system_.advanceTimeWait(std::chrono::seconds(1)); + tlc_.recordSuccess(); + time_system_.advanceTimeWait(std::chrono::seconds(3)); + tlc_.recordSuccess(); + EXPECT_EQ(RequestData(3, 3), tlc_.requestCounts()); +} + +} // namespace +} // namespace AdmissionControl +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 14cf58a0cfd5..45a467dcf7e7 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -60,7 +60,8 @@ void AutonomousStream::sendResponse() { HeaderToInt(RESPONSE_SIZE_BYTES, response_body_length, headers); encodeHeaders(upstream_.responseHeaders(), false); - encodeData(response_body_length, true); + encodeData(response_body_length, false); + encodeTrailers(upstream_.responseTrailers()); } AutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, @@ -111,12 +112,24 @@ std::unique_ptr AutonomousUpstream::lastRequestH return std::move(last_request_headers_); } +void AutonomousUpstream::setResponseTrailers( + std::unique_ptr&& response_trailers) { + Thread::LockGuard lock(headers_lock_); + response_trailers_ = std::move(response_trailers); +} + void AutonomousUpstream::setResponseHeaders( std::unique_ptr&& response_headers) { Thread::LockGuard lock(headers_lock_); response_headers_ = std::move(response_headers); } +Http::TestResponseTrailerMapImpl AutonomousUpstream::responseTrailers() { + Thread::LockGuard lock(headers_lock_); + Http::TestResponseTrailerMapImpl return_trailers = *response_trailers_; + return return_trailers; +} + Http::TestResponseHeaderMapImpl AutonomousUpstream::responseHeaders() { Thread::LockGuard lock(headers_lock_); Http::TestResponseHeaderMapImpl return_headers = *response_headers_; diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index c188344a9ec7..e9d247a4ba95 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -56,6 +56,7 @@ class AutonomousUpstream : public FakeUpstream { bool allow_incomplete_streams) : FakeUpstream(address, type, time_system), allow_incomplete_streams_(allow_incomplete_streams), + response_trailers_(std::make_unique()), response_headers_(std::make_unique( Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} @@ -64,6 +65,7 @@ class AutonomousUpstream : public FakeUpstream { Event::TestTimeSystem& time_system, bool allow_incomplete_streams) : FakeUpstream(std::move(transport_socket_factory), port, type, version, time_system), allow_incomplete_streams_(allow_incomplete_streams), + response_trailers_(std::make_unique()), response_headers_(std::make_unique( Http::TestResponseHeaderMapImpl({{":status", "200"}}))) {} @@ -77,13 +79,16 @@ class AutonomousUpstream : public FakeUpstream { void setLastRequestHeaders(const Http::HeaderMap& headers); std::unique_ptr lastRequestHeaders(); + void setResponseTrailers(std::unique_ptr&& response_trailers); void setResponseHeaders(std::unique_ptr&& response_headers); + Http::TestResponseTrailerMapImpl responseTrailers(); Http::TestResponseHeaderMapImpl responseHeaders(); const bool allow_incomplete_streams_{false}; private: Thread::MutexBasicLockable headers_lock_; std::unique_ptr last_request_headers_; + std::unique_ptr response_trailers_; std::unique_ptr response_headers_; std::vector http_connections_; std::vector shared_connections_; From 92c35e3b31df2ae27d9d0cb9a63eaddf8eea940f Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Wed, 24 Jun 2020 14:16:01 -0700 Subject: [PATCH 432/909] http1: pass some encoder flags as params (#11704) Some flags in the StreamEncoderImpl are only necessary for the duration of calls to encodeHeadersBase. Remove the fields and pass response status as an optional value rather than setting fields ahead of the encodeHeadersBase call. Risk Level: medium Testing: existing tests pass Doc Changes: n/a Release Note: n/a Signed-off-by: Stephan Zuercher --- source/common/http/http1/codec_impl.cc | 31 +++++++++++--------------- source/common/http/http1/codec_impl.h | 16 +++++-------- 2 files changed, 19 insertions(+), 28 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 7fe0a6259092..67680eac1677 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -70,8 +70,7 @@ const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), - processing_100_continue_(false), is_response_to_head_request_(false), - is_response_to_connect_request_(false), is_1xx_(false), is_204_(false), + is_response_to_head_request_(false), is_response_to_connect_request_(false), header_key_formatter_(header_key_formatter) { if (connection_.connection().aboveHighWatermark()) { runHighWatermarkCallbacks(); @@ -103,13 +102,11 @@ void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::strin void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { ASSERT(headers.Status()->value() == "100"); - processing_100_continue_ = true; encodeHeaders(headers, false); - processing_100_continue_ = false; } void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, - bool end_stream) { + absl::optional status, bool end_stream) { bool saw_content_length = false; headers.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { @@ -153,7 +150,7 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head if (saw_content_length || disable_chunk_encoding_) { chunk_encoding_ = false; } else { - if (processing_100_continue_) { + if (status && *status == 100) { // Make sure we don't serialize chunk information with 100-Continue headers. chunk_encoding_ = false; } else if (end_stream && !is_response_to_head_request_) { @@ -161,19 +158,23 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // response to a HEAD request. // For 204s and 1xx where content length is disallowed, don't append the content length but // also don't chunk encode. - if (!is_1xx_ && !is_204_) { + if (!status || (*status >= 200 && *status != 204)) { encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); } chunk_encoding_ = false; } else if (connection_.protocol() == Protocol::Http10) { chunk_encoding_ = false; - } else if (connection_.strict1xxAnd204Headers() && (is_1xx_ || is_204_)) { + } else if (status && (*status < 200 || *status == 204) && + connection_.strict1xxAnd204Headers()) { + // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" + // feature flag is removed, this block can be coalesced with the 100 Continue logic above. + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 chunk_encoding_ = false; // Assert 1xx (may have content) OR 204 and end stream. - ASSERT(is_1xx_ || end_stream); + ASSERT(*status < 200 || end_stream); } else { // For responses to connect requests, do not send the chunked encoding header: // https://tools.ietf.org/html/rfc7231#section-4.3.6. @@ -361,18 +362,12 @@ void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool e connection_.addCharToBuffer('\r'); connection_.addCharToBuffer('\n'); - // Enabling handling of https://tools.ietf.org/html/rfc7230#section-3.3.1 and - // https://tools.ietf.org/html/rfc7230#section-3.3.2. Also resets these flags - // if a 100 Continue is followed by another status. - setIs1xx(numeric_status < 200); - setIs204(numeric_status == 204); - if (numeric_status >= 300) { // Don't do special CONNECT logic if the CONNECT was rejected. is_response_to_connect_request_ = false; } - encodeHeadersBase(headers, end_stream); + encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); } static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; @@ -408,7 +403,7 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end } connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); - encodeHeadersBase(headers, end_stream); + encodeHeadersBase(headers, absl::nullopt, end_stream); } http_parser_settings ConnectionImpl::settings_{ @@ -1129,7 +1124,7 @@ int ClientConnectionImpl::onHeadersComplete() { bool ClientConnectionImpl::upgradeAllowed() const { if (pending_response_.has_value()) { - return pending_response_->encoder_.upgrade_request_; + return pending_response_->encoder_.upgradeRequest(); } return false; } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index d96a175722a8..750118ec8ce4 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -95,9 +95,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, protected: StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); - void setIs1xx(bool value) { is_1xx_ = value; } - void setIs204(bool value) { is_204_ = value; } - void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, bool end_stream); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); void encodeTrailersBase(const HeaderMap& headers); static const std::string CRLF; @@ -107,11 +106,8 @@ class StreamEncoderImpl : public virtual StreamEncoder, uint32_t read_disable_calls_{}; bool disable_chunk_encoding_ : 1; bool chunk_encoding_ : 1; - bool processing_100_continue_ : 1; bool is_response_to_head_request_ : 1; bool is_response_to_connect_request_ : 1; - bool is_1xx_ : 1; - bool is_204_ : 1; private: /** @@ -167,16 +163,16 @@ class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { public: RequestEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) : StreamEncoderImpl(connection, header_key_formatter) {} - bool headRequest() { return head_request_; } - bool connectRequest() { return connect_request_; } + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } + bool connectRequest() const { return connect_request_; } // Http::RequestEncoder void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } - bool upgrade_request_{}; - private: + bool upgrade_request_{}; bool head_request_{}; bool connect_request_{}; }; From 4c4fc058cd431c5fdfd2770c6560c67d69bdf239 Mon Sep 17 00:00:00 2001 From: Joey Muia Date: Wed, 24 Jun 2020 14:47:42 -0700 Subject: [PATCH 433/909] health check: gracefully handle GOAWAY in grpc health checker (#11324) Gracefully handle GOAWAY in gRPC health checker, allowing in-progress health checks to complete before closing the connection. Signed-off-by: Joey Muia Co-authored-by: John Murray --- include/envoy/http/codec.h | 10 +- source/common/http/codec_client.h | 4 +- source/common/http/conn_manager_impl.cc | 2 +- source/common/http/conn_manager_impl.h | 2 +- source/common/http/http2/codec_impl.cc | 13 +- source/common/http/http2/conn_pool.cc | 2 +- source/common/http/http2/conn_pool.h | 6 +- source/common/upstream/health_checker_impl.cc | 29 ++- source/common/upstream/health_checker_impl.h | 7 +- .../quiche/envoy_quic_client_session.cc | 4 +- .../quic_listeners/quiche/envoy_quic_utils.cc | 9 + .../quic_listeners/quiche/envoy_quic_utils.h | 4 + test/common/http/common.h | 2 +- test/common/http/http2/codec_impl_test.cc | 4 +- test/common/http/http2/conn_pool_test.cc | 2 +- .../upstream/health_checker_impl_test.cc | 182 +++++++++++++++++- test/integration/fake_upstream.h | 2 +- test/integration/http_integration.h | 2 +- test/mocks/http/mocks.h | 2 +- 19 files changed, 259 insertions(+), 29 deletions(-) diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index bb19ce83bcab..406a30a65766 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -36,6 +36,14 @@ const char MaxResponseHeadersCountOverrideKey[] = class Stream; +/** + * Error codes used to convey the reason for a GOAWAY. + */ +enum class GoAwayErrorCode { + NoError, + Other, +}; + /** * Stream encoder options specific to HTTP/1. */ @@ -324,7 +332,7 @@ class ConnectionCallbacks { /** * Fires when the remote indicates "go away." No new streams should be created. */ - virtual void onGoAway() PURE; + virtual void onGoAway(GoAwayErrorCode error_code) PURE; }; /** diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 606e95f18e9d..c3bb9d4b3f9a 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -131,9 +131,9 @@ class CodecClient : Logger::Loggable, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher); // Http::ConnectionCallbacks - void onGoAway() override { + void onGoAway(GoAwayErrorCode error_code) override { if (codec_callbacks_) { - codec_callbacks_->onGoAway(); + codec_callbacks_->onGoAway(error_code); } } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 10c682ae5714..243260c98bf2 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -464,7 +464,7 @@ void ConnectionManagerImpl::doConnectionClose( } } -void ConnectionManagerImpl::onGoAway() { +void ConnectionManagerImpl::onGoAway(GoAwayErrorCode) { // Currently we do nothing with remote go away frames. In the future we can decide to no longer // push resources if applicable. } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index bf8f9d153053..9d34e4cd8817 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -84,7 +84,7 @@ class ConnectionManagerImpl : Logger::Loggable, void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; // Http::ConnectionCallbacks - void onGoAway() override; + void onGoAway(GoAwayErrorCode error_code) override; // Http::ServerConnectionCallbacks RequestDecoder& newStream(ResponseEncoder& response_encoder, diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 24e8d8ebbebf..d26ef284ceab 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -563,6 +563,16 @@ int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { return 0; } +ABSL_MUST_USE_RESULT +enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { + switch (code) { + case NGHTTP2_NO_ERROR: + return GoAwayErrorCode::NoError; + default: + return GoAwayErrorCode::Other; + } +} + int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); @@ -579,10 +589,11 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown // notifications are the same as a normal GOAWAY. + // TODO: handle multiple GOAWAY frames. if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { ASSERT(frame->hd.stream_id == 0); raised_goaway_ = true; - callbacks().onGoAway(); + callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); return 0; } diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index fdbec353dce0..01dd1583b46e 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -24,7 +24,7 @@ ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } -void ConnPoolImpl::onGoAway(ActiveClient& client) { +void ConnPoolImpl::onGoAway(ActiveClient& client, Http::GoAwayErrorCode) { ENVOY_CONN_LOG(debug, "remote goaway", *client.codec_client_); host_->cluster().stats().upstream_cx_close_notify_.inc(); if (client.state_ != ActiveClient::State::DRAINING) { diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 1c42d71bca3c..8e6e852a609a 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -53,14 +53,16 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { } // Http::ConnectionCallbacks - void onGoAway() override { parent().onGoAway(*this); } + void onGoAway(Http::GoAwayErrorCode error_code) override { + parent().onGoAway(*this, error_code); + } bool closed_with_active_rq_{}; }; uint64_t maxRequestsPerConnection(); void movePrimaryClientToDraining(); - void onGoAway(ActiveClient& client); + void onGoAway(ActiveClient& client, Http::GoAwayErrorCode error_code); void onStreamDestroy(ActiveClient& client); void onStreamReset(ActiveClient& client, Http::StreamResetReason reason); diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 3904b5a8986d..0dc7c3d88bd0 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -703,19 +703,22 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onInterval() { void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::StreamResetReason, absl::string_view) { const bool expected_reset = expect_reset_; + const bool goaway = received_no_error_goaway_; resetState(); if (expected_reset) { // Stream reset was initiated by us (bogus gRPC response, timeout or cluster host is going - // away). In these cases health check failure has already been reported, so just return. + // away). In these cases health check failure has already been reported and a GOAWAY (if any) + // has already been handled, so just return. return; } ENVOY_CONN_LOG(debug, "connection/stream error health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); - if (!parent_.reuse_connection_) { - // Stream reset was unexpected, so we haven't closed the connection yet. + if (goaway || !parent_.reuse_connection_) { + // Stream reset was unexpected, so we haven't closed the connection + // yet in response to a GOAWAY or due to disabled connection reuse. client_->close(); } @@ -727,9 +730,19 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onResetStream(Http::St handleFailure(envoy::data::core::v3::NETWORK); } -void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway() { +void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway( + Http::GoAwayErrorCode error_code) { ENVOY_CONN_LOG(debug, "connection going away health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); + // If we have an active health check probe and receive a GOAWAY indicating + // graceful shutdown, allow the probe to complete before closing the connection. + // The connection will be closed when the active check completes or another + // terminal condition occurs, such as a timeout or stream reset. + if (request_encoder_ && error_code == Http::GoAwayErrorCode::NoError) { + received_no_error_goaway_ = true; + return; + } + // Even if we have active health check probe, fail it on GOAWAY and schedule new one. if (request_encoder_) { handleFailure(envoy::data::core::v3::NETWORK); @@ -762,6 +775,9 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete( handleFailure(envoy::data::core::v3::ACTIVE); } + // Read the value as we may call resetState() and clear it. + const bool goaway = received_no_error_goaway_; + // |end_stream| will be false if we decided to stop healthcheck before HTTP stream has ended - // invalid gRPC payload, unexpected message stream or wrong content-type. if (end_stream) { @@ -772,7 +788,7 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete( request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); } - if (!parent_.reuse_connection_) { + if (!parent_.reuse_connection_ || goaway) { client_->close(); } } @@ -782,13 +798,14 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::resetState() { request_encoder_ = nullptr; decoder_ = Grpc::Decoder(); health_check_response_.reset(); + received_no_error_goaway_ = false; } void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onTimeout() { ENVOY_CONN_LOG(debug, "connection/stream timeout health_flags={}", *client_, HostUtility::healthFlagsToString(*host_)); expect_reset_ = true; - if (!parent_.reuse_connection_) { + if (received_no_error_goaway_ || !parent_.reuse_connection_) { client_->close(); } else { request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index b8b083138151..154a4d6a0d35 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -324,7 +324,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { void onBelowWriteBufferLowWatermark() override {} void onEvent(Network::ConnectionEvent event); - void onGoAway(); + void onGoAway(Http::GoAwayErrorCode error_code); class ConnectionCallbackImpl : public Network::ConnectionCallbacks { public: @@ -342,7 +342,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { public: HttpConnectionCallbackImpl(GrpcActiveHealthCheckSession& parent) : parent_(parent) {} // Http::ConnectionCallbacks - void onGoAway() override { parent_.onGoAway(); } + void onGoAway(Http::GoAwayErrorCode error_code) override { parent_.onGoAway(error_code); } private: GrpcActiveHealthCheckSession& parent_; @@ -359,6 +359,9 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { // e.g. remote reset. In this case healthcheck status has already been reported, only state // cleanup is required. bool expect_reset_ = false; + // If true, we received a GOAWAY (NO_ERROR code) and are deferring closing the connection + // until the active probe completes. + bool received_no_error_goaway_ = false; }; virtual Http::CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc index f516e2e573e4..3fd67c0ab4de 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_session.cc @@ -1,5 +1,7 @@ #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + namespace Envoy { namespace Quic { @@ -58,7 +60,7 @@ void EnvoyQuicClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) { quic::QuicErrorCodeToString(frame.error_code), frame.reason_phrase); quic::QuicSpdyClientSession::OnGoAway(frame); if (http_connection_callbacks_ != nullptr) { - http_connection_callbacks_->onGoAway(); + http_connection_callbacks_->onGoAway(quicErrorCodeToEnvoyErrorCode(frame.error_code)); } } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index 6fa268c53cc0..611cf7b7b721 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -90,6 +90,15 @@ Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode erro } } +Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept { + switch (error) { + case quic::QUIC_NO_ERROR: + return Http::GoAwayErrorCode::NoError; + default: + return Http::GoAwayErrorCode::Other; + } +} + Network::ConnectionSocketPtr createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index eecaa9045d41..f5714ef15b83 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -69,6 +69,10 @@ Http::StreamResetReason quicRstErrorToEnvoyResetReason(quic::QuicRstStreamErrorC // Called when underlying QUIC connection is closed either locally or by peer. Http::StreamResetReason quicErrorCodeToEnvoyResetReason(quic::QuicErrorCode error); +// Called when a GOAWAY frame is received. +ABSL_MUST_USE_RESULT +Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept; + // Create a connection socket instance and apply given socket options to the // socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported. Network::ConnectionSocketPtr diff --git a/test/common/http/common.h b/test/common/http/common.h index 7eacfa3ad03b..2cd5a9db335b 100644 --- a/test/common/http/common.h +++ b/test/common/http/common.h @@ -28,7 +28,7 @@ class CodecClientForTest : public Http::CodecClient { destroy_cb_(this); } } - void raiseGoAway() { onGoAway(); } + void raiseGoAway(Http::GoAwayErrorCode error_code) { onGoAway(error_code); } Event::Timer* idleTimer() { return idle_timer_.get(); } DestroyCb destroy_cb_; diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 18da2005571f..de25ea53b040 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -317,7 +317,7 @@ TEST_P(Http2CodecImplTest, ShutdownNotice) { EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); request_encoder_->encodeHeaders(request_headers, true); - EXPECT_CALL(client_callbacks_, onGoAway()); + EXPECT_CALL(client_callbacks_, onGoAway(_)); server_->shutdownNotice(); server_->goAway(); @@ -1456,7 +1456,7 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersExceedPerHeaderLimit) { request_headers.addCopy("big", long_string); EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_CALL(client_callbacks_, onGoAway()); + EXPECT_CALL(client_callbacks_, onGoAway(_)); server_->shutdownNotice(); server_->goAway(); request_encoder_->encodeHeaders(request_headers, true); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 5aee5e508dd2..fa8df9101d85 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -1171,7 +1171,7 @@ TEST_F(Http2ConnPoolImplTest, GoAway) { r1.inner_decoder_->decodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); - test_clients_[0].codec_client_->raiseGoAway(); + test_clients_[0].codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index dc328cb18516..5070c903e184 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -4589,8 +4589,8 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcFailUnknownHealthStatus) { cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } -// Test receiving GOAWAY is interpreted as connection close event. -TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { +// Test receiving GOAWAY (error) is interpreted as connection close event. +TEST_F(GrpcHealthCheckerImplTest, GoAwayErrorProbeInProgress) { // FailureType::Network will be issued, it will render host unhealthy only if unhealthy_threshold // is reached. setupHCWithUnhealthyThreshold(1); @@ -4598,7 +4598,9 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); - test_sessions_[0]->codec_client_->raiseGoAway(); + // GOAWAY with non-NO_ERROR code will result in a healthcheck failure + // and the connection closing. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other); EXPECT_TRUE(cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagGet( Host::HealthFlag::FAILED_ACTIVE_HC)); @@ -4606,6 +4608,178 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } +// Test receiving GOAWAY (no error) is handled gracefully while a check is in progress. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + health_checker_->start(); + + expectHealthcheckStop(0); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + + // GOAWAY with NO_ERROR code during check should be handle gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Test host state haven't changed. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(true); +} + +// Test receiving GOAWAY (no error) closes connection after an in progress probe times outs. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressTimeout) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first timeout causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->timeout_timer_->invokeCallback(); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) closes connection after an unexpected stream reset. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressStreamReset) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first stream reset causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->request_encoder_.stream_.resetStream(Http::StreamResetReason::RemoteReset); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) closes connection after a bad response. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressBadResponse) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first bad response causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + respondResponseSpec(0, ResponseSpec{{{":status", "200"}, {"content-type", "application/grpc"}}, + {ResponseSpec::invalidChunk()}, + {}}); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + +// Test receiving GOAWAY (no error) and a connection close. +TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressConnectionClose) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + expectHealthcheckStop(0); + // Unhealthy threshold is 1 so first bad response causes unhealthy + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + + // GOAWAY during check should be handled gracefully. + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + expectHostHealthy(true); + + test_sessions_[0]->client_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + expectHostHealthy(false); + + // GOAWAY should cause a new connection to be created. + expectClientCreate(0); + expectHealthcheckStart(0); + test_sessions_[0]->interval_timer_->invokeCallback(); + + expectHealthcheckStop(0); + // Healthy threshold is 2, so the we'ere pending a state change. + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::SERVING); + expectHostHealthy(false); +} + // Test receiving GOAWAY between checks affects nothing. TEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) { setupHC(); @@ -4622,7 +4796,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) { expectHostHealthy(true); // GOAWAY between checks should go unnoticed. - test_sessions_[0]->codec_client_->raiseGoAway(); + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); expectClientCreate(0); expectHealthcheckStart(0); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index d31866c89421..e5bfda741d4a 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -447,7 +447,7 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo // Http::ServerConnectionCallbacks Http::RequestDecoder& newStream(Http::ResponseEncoder& response_encoder, bool) override; - void onGoAway() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void onGoAway(Http::GoAwayErrorCode) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } private: struct ReadFilter : public Network::ReadFilterBaseImpl { diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index fca8019011b0..25cad95bb37b 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -61,7 +61,7 @@ class IntegrationCodecClient : public Http::CodecClientProd { CodecCallbacks(IntegrationCodecClient& parent) : parent_(parent) {} // Http::ConnectionCallbacks - void onGoAway() override { parent_.saw_goaway_ = true; } + void onGoAway(Http::GoAwayErrorCode) override { parent_.saw_goaway_ = true; } IntegrationCodecClient& parent_; }; diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 0f7a4063d419..c4794fbd74ef 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -44,7 +44,7 @@ class MockConnectionCallbacks : public virtual ConnectionCallbacks { ~MockConnectionCallbacks() override; // Http::ConnectionCallbacks - MOCK_METHOD(void, onGoAway, ()); + MOCK_METHOD(void, onGoAway, (GoAwayErrorCode error_code)); }; class MockServerConnectionCallbacks : public ServerConnectionCallbacks, From 0f6af54cdd11cad6a7f0905b6ff4c1292cc124cf Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Thu, 25 Jun 2020 07:03:02 +0900 Subject: [PATCH 434/909] devex: set CLANG_FORMAT (#11683) * set CLANG_FORMAT in Dockerfile Signed-off-by: tomocy --- .devcontainer/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index ad93066b0d0a..797edace866a 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -21,3 +21,5 @@ RUN apt-get -y update \ ENV DEBIAN_FRONTEND= ENV PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +ENV CLANG_FORMAT=/opt/llvm/bin/clang-format From ca41842c207c7b371b86bb1eceb3353bd6f30fed Mon Sep 17 00:00:00 2001 From: antonio Date: Wed, 24 Jun 2020 18:13:12 -0400 Subject: [PATCH 435/909] [dispatcher] Refactor how callbacks are scheduled in the event loop. (#11663) Introduce a separate interface for to schedule callbacks for execution in the event loop. Also migrate existing users of Timer::enableTimer(0ms) that require immediate execution of the callback to use this new interface instead. Signed-off-by: Antonio Vicente --- include/envoy/event/BUILD | 7 ++ include/envoy/event/dispatcher.h | 9 ++ include/envoy/event/schedulable_cb.h | 51 +++++++++ include/envoy/event/timer.h | 4 +- source/common/event/BUILD | 14 +++ source/common/event/dispatcher_impl.cc | 16 ++- source/common/event/dispatcher_impl.h | 5 +- source/common/event/libevent_scheduler.cc | 6 + source/common/event/libevent_scheduler.h | 4 +- source/common/event/real_time_system.cc | 2 +- source/common/event/real_time_system.h | 2 +- source/common/event/schedulable_cb_impl.cc | 32 ++++++ source/common/event/schedulable_cb_impl.h | 30 +++++ source/common/http/http1/conn_pool.cc | 5 +- source/common/http/http1/conn_pool.h | 2 +- source/common/tcp/original_conn_pool.cc | 4 +- source/common/tcp/original_conn_pool.h | 3 +- test/common/event/dispatcher_impl_test.cc | 106 ++++++++++++++++++ test/common/http/http1/conn_pool_test.cc | 23 ++-- test/common/tcp/conn_pool_test.cc | 29 ++--- test/mocks/common.h | 5 +- test/mocks/event/mocks.cc | 12 ++ test/mocks/event/mocks.h | 36 +++++- test/test_common/simulated_time_system.cc | 37 +++--- test/test_common/simulated_time_system.h | 2 +- .../test_common/simulated_time_system_test.cc | 4 +- test/test_common/test_time.h | 5 +- test/test_common/test_time_system.h | 5 +- tools/spelling/spelling_dictionary.txt | 1 + 29 files changed, 391 insertions(+), 70 deletions(-) create mode 100644 include/envoy/event/schedulable_cb.h create mode 100644 source/common/event/schedulable_cb_impl.cc create mode 100644 source/common/event/schedulable_cb_impl.h diff --git a/include/envoy/event/BUILD b/include/envoy/event/BUILD index d24846f32871..ad215d6cc133 100644 --- a/include/envoy/event/BUILD +++ b/include/envoy/event/BUILD @@ -19,6 +19,7 @@ envoy_cc_library( deps = [ ":deferred_deletable", ":file_event_interface", + ":schedulable_cb_interface", ":signal_interface", "//include/envoy/common:scope_tracker_interface", "//include/envoy/common:time_interface", @@ -39,6 +40,11 @@ envoy_cc_library( hdrs = ["file_event.h"], ) +envoy_cc_library( + name = "schedulable_cb_interface", + hdrs = ["schedulable_cb.h"], +) + envoy_cc_library( name = "signal_interface", hdrs = ["signal.h"], @@ -48,6 +54,7 @@ envoy_cc_library( name = "timer_interface", hdrs = ["timer.h"], deps = [ + ":schedulable_cb_interface", "//include/envoy/common:time_interface", ], ) diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 1cad39e3d1f5..1cbd3e212f1d 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -9,6 +9,7 @@ #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" #include "envoy/event/file_event.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/signal.h" #include "envoy/event/timer.h" #include "envoy/filesystem/watcher.h" @@ -163,6 +164,14 @@ class Dispatcher { */ virtual Event::TimerPtr createTimer(TimerCb cb) PURE; + /** + * Allocates a schedulable callback. @see SchedulableCallback for docs on how to use the wrapped + * callback. + * @param cb supplies the callback to invoke when the SchedulableCallback is triggered on the + * event loop. + */ + virtual Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) PURE; + /** * Submits an item for deferred delete. @see DeferredDeletable. */ diff --git a/include/envoy/event/schedulable_cb.h b/include/envoy/event/schedulable_cb.h new file mode 100644 index 000000000000..f73a82b316db --- /dev/null +++ b/include/envoy/event/schedulable_cb.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Event { + +/** + * Callback wrapper that allows direct scheduling of callbacks in the event loop. + */ +class SchedulableCallback { +public: + virtual ~SchedulableCallback() = default; + + /** + * Schedule the callback so it runs in the current iteration of the event loop after all events + * scheduled in the current event loop have had a chance to execute. + */ + virtual void scheduleCallbackCurrentIteration() PURE; + + /** + * Cancel pending execution of the callback. + */ + virtual void cancel() PURE; + + /** + * Return true whether the SchedulableCallback is scheduled for execution. + */ + virtual bool enabled() PURE; +}; + +using SchedulableCallbackPtr = std::unique_ptr; + +/** + * SchedulableCallback factory. + */ +class CallbackScheduler { +public: + virtual ~CallbackScheduler() = default; + + /** + * Create a schedulable callback. + */ + virtual SchedulableCallbackPtr createSchedulableCallback(const std::function& cb) PURE; +}; + +} // namespace Event +} // namespace Envoy diff --git a/include/envoy/event/timer.h b/include/envoy/event/timer.h index 629fcdf10240..337c318a1224 100644 --- a/include/envoy/event/timer.h +++ b/include/envoy/event/timer.h @@ -6,6 +6,7 @@ #include "envoy/common/pure.h" #include "envoy/common/time.h" +#include "envoy/event/schedulable_cb.h" namespace Envoy { @@ -85,7 +86,8 @@ class TimeSystem : public TimeSource { * Creates a timer factory. This indirection enables thread-local timer-queue management, * so servers can have a separate timer-factory in each thread. */ - virtual SchedulerPtr createScheduler(Scheduler& base_scheduler) PURE; + virtual SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) PURE; }; } // namespace Event diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 78184213018b..5b1025db7490 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -64,6 +64,7 @@ envoy_cc_library( "dispatcher_impl.h", "event_impl_base.h", "file_event_impl.h", + "schedulable_cb_impl.h", ], deps = [ ":libevent_lib", @@ -104,6 +105,7 @@ envoy_cc_library( external_deps = ["event"], deps = [ ":libevent_lib", + ":schedulable_cb_lib", ":timer_lib", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -111,6 +113,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "schedulable_cb_lib", + srcs = ["schedulable_cb_impl.cc"], + hdrs = ["schedulable_cb_impl.h"], + external_deps = ["event"], + deps = [ + ":event_impl_base_lib", + ":libevent_lib", + "//include/envoy/event:schedulable_cb_interface", + ], +) + envoy_cc_library( name = "timer_lib", srcs = ["timer_impl.cc"], diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index c58b9076682c..d4f9c28c68e6 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -39,9 +39,10 @@ DispatcherImpl::DispatcherImpl(const std::string& name, Api::Api& api, DispatcherImpl::DispatcherImpl(const std::string& name, Buffer::WatermarkFactoryPtr&& factory, Api::Api& api, Event::TimeSystem& time_system) : name_(name), api_(api), buffer_factory_(std::move(factory)), - scheduler_(time_system.createScheduler(base_scheduler_)), - deferred_delete_timer_(createTimerInternal([this]() -> void { clearDeferredDeleteList(); })), - post_timer_(createTimerInternal([this]() -> void { runPostCallbacks(); })), + scheduler_(time_system.createScheduler(base_scheduler_, base_scheduler_)), + deferred_delete_cb_(base_scheduler_.createSchedulableCallback( + [this]() -> void { clearDeferredDeleteList(); })), + post_cb_(base_scheduler_.createSchedulableCallback([this]() -> void { runPostCallbacks(); })), current_to_delete_(&to_delete_1_) { ASSERT(!name_.empty()); #ifdef ENVOY_HANDLE_SIGNALS @@ -159,6 +160,11 @@ TimerPtr DispatcherImpl::createTimer(TimerCb cb) { return createTimerInternal(cb); } +Event::SchedulableCallbackPtr DispatcherImpl::createSchedulableCallback(std::function cb) { + ASSERT(isThreadSafe()); + return base_scheduler_.createSchedulableCallback(cb); +} + TimerPtr DispatcherImpl::createTimerInternal(TimerCb cb) { return scheduler_->createTimer(cb, *this); } @@ -168,7 +174,7 @@ void DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) { current_to_delete_->emplace_back(std::move(to_delete)); ENVOY_LOG(trace, "item added to deferred deletion list (size={})", current_to_delete_->size()); if (1 == current_to_delete_->size()) { - deferred_delete_timer_->enableTimer(std::chrono::milliseconds(0)); + deferred_delete_cb_->scheduleCallbackCurrentIteration(); } } @@ -188,7 +194,7 @@ void DispatcherImpl::post(std::function callback) { } if (do_post) { - post_timer_->enableTimer(std::chrono::milliseconds(0)); + post_cb_->scheduleCallbackCurrentIteration(); } } diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 41be86039ad2..104791708d58 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -64,6 +64,7 @@ class DispatcherImpl : Logger::Loggable, Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr&& socket, Network::UdpListenerCallbacks& cb) override; TimerPtr createTimer(TimerCb cb) override; + Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override; void deferredDelete(DeferredDeletablePtr&& to_delete) override; void exit() override; SignalEventPtr listenForSignal(int signal_num, SignalCb cb) override; @@ -109,8 +110,8 @@ class DispatcherImpl : Logger::Loggable, Buffer::WatermarkFactoryPtr buffer_factory_; LibeventScheduler base_scheduler_; SchedulerPtr scheduler_; - TimerPtr deferred_delete_timer_; - TimerPtr post_timer_; + SchedulableCallbackPtr deferred_delete_cb_; + SchedulableCallbackPtr post_cb_; std::vector to_delete_1_; std::vector to_delete_2_; std::vector* current_to_delete_; diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index db5f44a306cc..cec3cc8228ee 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -1,6 +1,7 @@ #include "common/event/libevent_scheduler.h" #include "common/common/assert.h" +#include "common/event/schedulable_cb_impl.h" #include "common/event/timer_impl.h" #include "event2/util.h" @@ -23,6 +24,11 @@ TimerPtr LibeventScheduler::createTimer(const TimerCb& cb, Dispatcher& dispatche return std::make_unique(libevent_, cb, dispatcher); }; +SchedulableCallbackPtr +LibeventScheduler::createSchedulableCallback(const std::function& cb) { + return std::make_unique(libevent_, cb); +}; + void LibeventScheduler::run(Dispatcher::RunType mode) { int flag = 0; switch (mode) { diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index f67d185636e7..748036114f5b 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -3,6 +3,7 @@ #include #include "envoy/event/dispatcher.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/timer.h" #include "common/event/libevent.h" @@ -14,13 +15,14 @@ namespace Envoy { namespace Event { // Implements Scheduler based on libevent. -class LibeventScheduler : public Scheduler { +class LibeventScheduler : public Scheduler, public CallbackScheduler { public: using OnPrepareCallback = std::function; LibeventScheduler(); // Scheduler TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) override; + SchedulableCallbackPtr createSchedulableCallback(const std::function& cb) override; /** * Runs the event loop. diff --git a/source/common/event/real_time_system.cc b/source/common/event/real_time_system.cc index c528b58b4e8c..7f022c23a0bb 100644 --- a/source/common/event/real_time_system.cc +++ b/source/common/event/real_time_system.cc @@ -22,7 +22,7 @@ class RealScheduler : public Scheduler { } // namespace -SchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler) { +SchedulerPtr RealTimeSystem::createScheduler(Scheduler& base_scheduler, CallbackScheduler&) { return std::make_unique(base_scheduler); } diff --git a/source/common/event/real_time_system.h b/source/common/event/real_time_system.h index 5323da8bfac3..a5b86466eec1 100644 --- a/source/common/event/real_time_system.h +++ b/source/common/event/real_time_system.h @@ -13,7 +13,7 @@ namespace Event { class RealTimeSystem : public TimeSystem { public: // TimeSystem - SchedulerPtr createScheduler(Scheduler&) override; + SchedulerPtr createScheduler(Scheduler&, CallbackScheduler&) override; // TimeSource SystemTime systemTime() override { return time_source_.systemTime(); } diff --git a/source/common/event/schedulable_cb_impl.cc b/source/common/event/schedulable_cb_impl.cc new file mode 100644 index 000000000000..697ad2026b09 --- /dev/null +++ b/source/common/event/schedulable_cb_impl.cc @@ -0,0 +1,32 @@ +#include "common/event/schedulable_cb_impl.h" + +#include "common/common/assert.h" + +#include "event2/event.h" + +namespace Envoy { +namespace Event { + +SchedulableCallbackImpl::SchedulableCallbackImpl(Libevent::BasePtr& libevent, + std::function cb) + : cb_(cb) { + ASSERT(cb_); + evtimer_assign( + &raw_event_, libevent.get(), + [](evutil_socket_t, short, void* arg) -> void { + SchedulableCallbackImpl* cb = static_cast(arg); + cb->cb_(); + }, + this); +} + +void SchedulableCallbackImpl::scheduleCallbackCurrentIteration() { + event_active(&raw_event_, EV_TIMEOUT, 0); +} + +void SchedulableCallbackImpl::cancel() { event_del(&raw_event_); } + +bool SchedulableCallbackImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); } + +} // namespace Event +} // namespace Envoy diff --git a/source/common/event/schedulable_cb_impl.h b/source/common/event/schedulable_cb_impl.h new file mode 100644 index 000000000000..e6bea654f4b9 --- /dev/null +++ b/source/common/event/schedulable_cb_impl.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/event/schedulable_cb.h" + +#include "common/event/event_impl_base.h" +#include "common/event/libevent.h" + +namespace Envoy { +namespace Event { + +class DispatcherImpl; + +/** + * libevent implementation of SchedulableCallback. + */ +class SchedulableCallbackImpl : public SchedulableCallback, ImplBase { +public: + SchedulableCallbackImpl(Libevent::BasePtr& libevent, std::function cb); + + // SchedulableCallback implementation. + void scheduleCallbackCurrentIteration() override; + void cancel() override; + bool enabled() override; + +private: + std::function cb_; +}; + +} // namespace Event +} // namespace Envoy diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index c717fb1d39cf..050dcb4ce3cd 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -5,6 +5,7 @@ #include #include "envoy/event/dispatcher.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" #include "envoy/http/header_map.h" @@ -28,7 +29,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, transport_socket_options, Protocol::Http11), - upstream_ready_timer_(dispatcher_.createTimer([this]() { + upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); })) {} @@ -58,7 +59,7 @@ void ConnPoolImpl::onResponseComplete(ActiveClient& client) { if (!pending_requests_.empty() && !upstream_ready_enabled_) { upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + upstream_ready_cb_->scheduleCallbackCurrentIteration(); } checkForDrained(); diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 719671b0772f..e8ddc883525b 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -81,7 +81,7 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { void onDownstreamReset(ActiveClient& client); void onResponseComplete(ActiveClient& client); - Event::TimerPtr upstream_ready_timer_; + Event::SchedulableCallbackPtr upstream_ready_cb_; bool upstream_ready_enabled_{false}; }; diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index 78d7ff3532e4..378a3314f7d2 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -18,7 +18,7 @@ OriginalConnPoolImpl::OriginalConnPoolImpl( Network::TransportSocketOptionsSharedPtr transport_socket_options) : dispatcher_(dispatcher), host_(host), priority_(priority), socket_options_(options), transport_socket_options_(transport_socket_options), - upstream_ready_timer_(dispatcher_.createTimer([this]() { onUpstreamReady(); })) {} + upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {} OriginalConnPoolImpl::~OriginalConnPoolImpl() { while (!ready_conns_.empty()) { @@ -310,7 +310,7 @@ void OriginalConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_conn if (delay && !pending_requests_.empty() && !upstream_ready_enabled_) { upstream_ready_enabled_ = true; - upstream_ready_timer_->enableTimer(std::chrono::milliseconds(0)); + upstream_ready_cb_->scheduleCallbackCurrentIteration(); } checkForDrained(); diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h index 148416e2aa98..2c0af2d50680 100644 --- a/source/common/tcp/original_conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -4,6 +4,7 @@ #include #include "envoy/event/deferred_deletable.h" +#include "envoy/event/schedulable_cb.h" #include "envoy/event/timer.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" @@ -159,7 +160,7 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti std::list pending_requests_; std::list drained_callbacks_; Stats::TimespanPtr conn_connect_ms_; - Event::TimerPtr upstream_ready_timer_; + Event::SchedulableCallbackPtr upstream_ready_cb_; bool upstream_ready_enabled_{false}; }; diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index f15107f67307..60efa4c8d53a 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -25,6 +25,112 @@ namespace Envoy { namespace Event { namespace { +class SchedulableCallbackImplTest : public testing::Test { +protected: + SchedulableCallbackImplTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void createCallback(std::function cb) { + callbacks_.emplace_back(dispatcher_->createSchedulableCallback(cb)); + } + + Api::ApiPtr api_; + DispatcherPtr dispatcher_; + std::vector callbacks_; + + static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); + } +}; + +TEST_F(SchedulableCallbackImplTest, ScheduleAndCancel) { + ReadyWatcher watcher; + + auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); }); + + // Cancel is a no-op if not scheduled. + cb->cancel(); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callback is not invoked if cancelled before it executes. + cb->scheduleCallbackCurrentIteration(); + EXPECT_TRUE(cb->enabled()); + cb->cancel(); + EXPECT_FALSE(cb->enabled()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Scheduled callback executes. + cb->scheduleCallbackCurrentIteration(); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callbacks implicitly cancelled if runner is deleted. + cb->scheduleCallbackCurrentIteration(); + cb.reset(); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_F(SchedulableCallbackImplTest, ScheduleOrder) { + ReadyWatcher watcher0; + createCallback([&]() { watcher0.ready(); }); + ReadyWatcher watcher1; + createCallback([&]() { watcher1.ready(); }); + + // Callback run in the order they are scheduled. + callbacks_[0]->scheduleCallbackCurrentIteration(); + callbacks_[1]->scheduleCallbackCurrentIteration(); + EXPECT_CALL(watcher0, ready()); + EXPECT_CALL(watcher1, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) { + DispatcherImpl* dispatcher_impl = static_cast(dispatcher_.get()); + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&dispatcher_impl->base(), onWatcherReady, &prepare_watcher); + + ReadyWatcher watcher0; + createCallback([&]() { + watcher0.ready(); + callbacks_[1]->scheduleCallbackCurrentIteration(); + }); + + ReadyWatcher watcher1; + createCallback([&]() { + watcher1.ready(); + callbacks_[2]->scheduleCallbackCurrentIteration(); + callbacks_[3]->scheduleCallbackCurrentIteration(); + callbacks_[4]->scheduleCallbackCurrentIteration(); + }); + + ReadyWatcher watcher2; + createCallback([&]() { + watcher2.ready(); + EXPECT_TRUE(callbacks_[3]->enabled()); + callbacks_[3]->cancel(); + EXPECT_TRUE(callbacks_[4]->enabled()); + callbacks_[4].reset(); + }); + + ReadyWatcher watcher3; + createCallback([&]() { watcher3.ready(); }); + + ReadyWatcher watcher4; + createCallback([&]() { watcher4.ready(); }); + + // Chained callbacks run in the same event loop iteration, as signaled by a single call to + // prepare_watcher.ready(). watcher3 and watcher4 are not invoked because cb2 cancels + // cb3 and deletes cb4 as part of its execution. + callbacks_[0]->scheduleCallbackCurrentIteration(); + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher0, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + class TestDeferredDeletable : public DeferredDeletable { public: TestDeferredDeletable(std::function on_destroy) : on_destroy_(on_destroy) {} diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index d2f084313195..9b2bdc3f9e7e 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -52,11 +52,11 @@ class ConnPoolImplForTest : public ConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::ClusterInfoConstSharedPtr cluster, - NiceMock* upstream_ready_timer) + NiceMock* upstream_ready_cb) : ConnPoolImpl(dispatcher, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), Upstream::ResourcePriority::Default, nullptr, nullptr), api_(Api::createApiForTest()), mock_dispatcher_(dispatcher), - mock_upstream_ready_timer_(upstream_ready_timer) {} + mock_upstream_ready_cb_(upstream_ready_cb) {} ~ConnPoolImplForTest() override { EXPECT_EQ(0U, ready_clients_.size()); @@ -112,18 +112,20 @@ class ConnPoolImplForTest : public ConnPoolImpl { void expectEnableUpstreamReady() { EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) + .Times(1) + .RetiresOnSaturation(); } void expectAndRunUpstreamReady() { EXPECT_TRUE(upstream_ready_enabled_); - mock_upstream_ready_timer_->invokeCallback(); + mock_upstream_ready_cb_->invokeCallback(); EXPECT_FALSE(upstream_ready_enabled_); } Api::ApiPtr api_; Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; + NiceMock* mock_upstream_ready_cb_; std::vector test_clients_; }; @@ -133,9 +135,9 @@ class ConnPoolImplForTest : public ConnPoolImpl { class Http1ConnPoolImplTest : public testing::Test { public: Http1ConnPoolImplTest() - : upstream_ready_timer_(new NiceMock(&dispatcher_)), + : upstream_ready_cb_(new NiceMock(&dispatcher_)), conn_pool_( - std::make_unique(dispatcher_, cluster_, upstream_ready_timer_)) {} + std::make_unique(dispatcher_, cluster_, upstream_ready_cb_)) {} ~Http1ConnPoolImplTest() override { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); @@ -143,7 +145,7 @@ class Http1ConnPoolImplTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; + NiceMock* upstream_ready_cb_; std::unique_ptr conn_pool_; NiceMock runtime_; }; @@ -288,10 +290,11 @@ TEST_F(Http1ConnPoolImplTest, VerifyAlpnFallback) { cluster_->transport_socket_matcher_ = std::make_unique>(std::move(factory)); - EXPECT_CALL(dispatcher_, createTimer_(_)); + new NiceMock(&dispatcher_); + // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at // our test transport socket factory. - conn_pool_ = std::make_unique(dispatcher_, cluster_, upstream_ready_timer_); + conn_pool_ = std::make_unique(dispatcher_, cluster_, upstream_ready_cb_); NiceMock outer_decoder; ConnPoolCallbacks callbacks; conn_pool_->expectClientCreate(Protocol::Http11); diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 63ac6f3fc6ae..3b5511088fbd 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -71,7 +71,8 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { class ConnPoolBase : public Tcp::ConnectionPool::Instance { public: ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, - NiceMock* upstream_ready_timer, bool test_new_connection_pool); + NiceMock* upstream_ready_cb, + bool test_new_connection_pool); void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); } void drainConnections() override { conn_pool_->drainConnections(); } @@ -114,7 +115,7 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { std::unique_ptr conn_pool_; Event::MockDispatcher& mock_dispatcher_; - NiceMock* mock_upstream_ready_timer_; + NiceMock* mock_upstream_ready_cb_; std::vector test_conns_; Network::ConnectionCallbacks* callbacks_ = nullptr; bool test_new_connection_pool_; @@ -146,12 +147,12 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { void expectEnableUpstreamReady(bool run) { if (!run) { EXPECT_FALSE(upstream_ready_enabled_); - EXPECT_CALL(*parent_.mock_upstream_ready_timer_, enableTimer(_, _)) + EXPECT_CALL(*parent_.mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) .Times(1) .RetiresOnSaturation(); } else { EXPECT_TRUE(upstream_ready_enabled_); - parent_.mock_upstream_ready_timer_->invokeCallback(); + parent_.mock_upstream_ready_cb_->invokeCallback(); EXPECT_FALSE(upstream_ready_enabled_); } } @@ -160,9 +161,9 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { }; ConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, - NiceMock* upstream_ready_timer, + NiceMock* upstream_ready_cb, bool test_new_connection_pool) - : mock_dispatcher_(dispatcher), mock_upstream_ready_timer_(upstream_ready_timer), + : mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb), test_new_connection_pool_(test_new_connection_pool) { // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. ASSERT(!test_new_connection_pool_); @@ -176,9 +177,11 @@ void ConnPoolBase::expectEnableUpstreamReady(bool run) { dynamic_cast(conn_pool_.get())->expectEnableUpstreamReady(run); } else { if (!run) { - EXPECT_CALL(*mock_upstream_ready_timer_, enableTimer(_, _)).Times(1).RetiresOnSaturation(); + EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) + .Times(1) + .RetiresOnSaturation(); } else { - mock_upstream_ready_timer_->invokeCallback(); + mock_upstream_ready_cb_->invokeCallback(); } } } @@ -190,9 +193,9 @@ class TcpConnPoolImplTest : public testing::TestWithParam { public: TcpConnPoolImplTest() : test_new_connection_pool_(GetParam()), - upstream_ready_timer_(new NiceMock(&dispatcher_)), + upstream_ready_cb_(new NiceMock(&dispatcher_)), host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), - conn_pool_(dispatcher_, host_, upstream_ready_timer_, test_new_connection_pool_) { + conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) { // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. ASSERT(!test_new_connection_pool_); } @@ -205,7 +208,7 @@ class TcpConnPoolImplTest : public testing::TestWithParam { bool test_new_connection_pool_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; + NiceMock* upstream_ready_cb_; Upstream::HostSharedPtr host_; ConnPoolBase conn_pool_; NiceMock runtime_; @@ -218,7 +221,7 @@ class TcpConnPoolImplDestructorTest : public testing::TestWithParam { public: TcpConnPoolImplDestructorTest() : test_new_connection_pool_(GetParam()), - upstream_ready_timer_(new NiceMock(&dispatcher_)) { + upstream_ready_cb_(new NiceMock(&dispatcher_)) { host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"); ASSERT(!test_new_connection_pool_); if (!test_new_connection_pool_) { @@ -247,7 +250,7 @@ class TcpConnPoolImplDestructorTest : public testing::TestWithParam { Upstream::HostConstSharedPtr host_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - NiceMock* upstream_ready_timer_; + NiceMock* upstream_ready_cb_; NiceMock* connect_timer_; NiceMock* connection_; std::unique_ptr conn_pool_; diff --git a/test/mocks/common.h b/test/mocks/common.h index 57bdc20623f6..c9bf312e4c31 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -51,8 +51,9 @@ class MockTimeSystem : public Event::TestTimeSystem { // where timer callbacks are triggered by the advancement of time. This implementation // matches recent behavior, where real-time timers were created directly in libevent // by dispatcher_impl.cc. - Event::SchedulerPtr createScheduler(Event::Scheduler& base_scheduler) override { - return real_time_.createScheduler(base_scheduler); + Event::SchedulerPtr createScheduler(Event::Scheduler& base_scheduler, + Event::CallbackScheduler& cb_scheduler) override { + return real_time_.createScheduler(base_scheduler, cb_scheduler); } void advanceTimeWait(const Duration& duration) override { real_time_.advanceTimeWait(duration); } void advanceTimeAsync(const Duration& duration) override { diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 6d7a5f6d842e..60c0659bd5c1 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -51,6 +51,18 @@ MockTimer::MockTimer(MockDispatcher* dispatcher) : MockTimer() { MockTimer::~MockTimer() = default; +MockSchedulableCallback::~MockSchedulableCallback() = default; + +MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher) + : dispatcher_(dispatcher) { + EXPECT_CALL(*dispatcher, createSchedulableCallback_(_)) + .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this))) + .RetiresOnSaturation(); + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); + ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_)); +} + MockSignalEvent::MockSignalEvent(MockDispatcher* dispatcher) { EXPECT_CALL(*dispatcher, listenForSignal_(_, _)) .WillOnce(DoAll(SaveArg<1>(&callback_), Return(this))) diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 018f937203a4..f11775802062 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -74,7 +74,17 @@ class MockDispatcher : public Dispatcher { } Event::TimerPtr createTimer(Event::TimerCb cb) override { - return Event::TimerPtr{createTimer_(cb)}; + auto timer = Event::TimerPtr{createTimer_(cb)}; + // Assert that the timer is not null to avoid confusing test failures down the line. + ASSERT(timer != nullptr); + return timer; + } + + Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) override { + auto schedulable_cb = Event::SchedulableCallbackPtr{createSchedulableCallback_(cb)}; + // Assert that schedulable_cb is not null to avoid confusing test failures down the line. + ASSERT(schedulable_cb != nullptr); + return schedulable_cb; } void deferredDelete(DeferredDeletablePtr&& to_delete) override { @@ -109,6 +119,7 @@ class MockDispatcher : public Dispatcher { MOCK_METHOD(Network::UdpListener*, createUdpListener_, (Network::SocketSharedPtr && socket, Network::UdpListenerCallbacks& cb)); MOCK_METHOD(Timer*, createTimer_, (Event::TimerCb cb)); + MOCK_METHOD(SchedulableCallback*, createSchedulableCallback_, (std::function cb)); MOCK_METHOD(void, deferredDelete_, (DeferredDeletable * to_delete)); MOCK_METHOD(void, exit, ()); MOCK_METHOD(SignalEvent*, listenForSignal_, (int signal_num, SignalCb cb)); @@ -163,6 +174,29 @@ class MockTimer : public Timer { Event::TimerCb callback_; }; +class MockSchedulableCallback : public SchedulableCallback { +public: + MockSchedulableCallback(MockDispatcher* dispatcher); + ~MockSchedulableCallback() override; + + void invokeCallback() { + EXPECT_TRUE(enabled_); + enabled_ = false; + callback_(); + } + + // SchedulableCallback + MOCK_METHOD(void, scheduleCallbackCurrentIteration, ()); + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(bool, enabled, ()); + + MockDispatcher* dispatcher_{}; + bool enabled_{}; + +private: + std::function callback_; +}; + class MockSignalEvent : public SignalEvent { public: MockSignalEvent(MockDispatcher* dispatcher); diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index b840fd137a6f..ecc707b699d6 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -50,9 +50,8 @@ class UnlockGuard { // mechanism used in RealTimeSystem timers is employed for simulated alarms. class SimulatedTimeSystemHelper::Alarm : public Timer { public: - Alarm(SimulatedTimeSystemHelper& time_system, Scheduler& base_scheduler, TimerCb cb, - Dispatcher& dispatcher) - : base_timer_(base_scheduler.createTimer([this, cb] { runAlarm(cb); }, dispatcher)), + Alarm(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler, TimerCb cb) + : cb_(cb_scheduler.createSchedulableCallback([this, cb] { runAlarm(cb); })), time_system_(time_system), index_(time_system.nextIndex()), armed_(false), pending_(false) { } @@ -68,7 +67,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { const ScopeTrackedObject* scope) override; bool enabled() override { absl::MutexLock lock(&time_system_.mutex_); - return armed_ || base_timer_->enabled(); + return armed_ || cb_->enabled(); } void disableTimerLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); @@ -81,8 +80,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { * Activates the timer so it will be run the next time the libevent loop is run, * typically via Dispatcher::run(). */ - void activateLockHeld(const ScopeTrackedObject* scope = nullptr) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { + void activateLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { ASSERT(armed_); armed_ = false; if (pending_) { @@ -97,8 +95,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { // See class comment for UnlockGuard for details on saving // time_system_.mutex_ prior to running libevent, which may delete this. UnlockGuard unlocker(time_system_.mutex_); - std::chrono::milliseconds duration = std::chrono::milliseconds::zero(); - base_timer_->enableTimer(duration, scope); + cb_->scheduleCallbackCurrentIteration(); } MonotonicTime time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { @@ -123,7 +120,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { time_system.decPending(); } - TimerPtr base_timer_; + SchedulableCallbackPtr cb_; SimulatedTimeSystemHelper& time_system_; MonotonicTime time_ ABSL_GUARDED_BY(time_system_.mutex_); const uint64_t index_; @@ -151,16 +148,15 @@ bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const // the expected thread. class SimulatedTimeSystemHelper::SimulatedScheduler : public Scheduler { public: - SimulatedScheduler(SimulatedTimeSystemHelper& time_system, Scheduler& base_scheduler) - : time_system_(time_system), base_scheduler_(base_scheduler) {} - TimerPtr createTimer(const TimerCb& cb, Dispatcher& dispatcher) override { - return std::make_unique(time_system_, base_scheduler_, cb, - dispatcher); + SimulatedScheduler(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler) + : time_system_(time_system), cb_scheduler_(cb_scheduler) {} + TimerPtr createTimer(const TimerCb& cb, Dispatcher& /*dispatcher*/) override { + return std::make_unique(time_system_, cb_scheduler_, cb); }; private: SimulatedTimeSystemHelper& time_system_; - Scheduler& base_scheduler_; + CallbackScheduler& cb_scheduler_; }; SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { @@ -170,7 +166,7 @@ SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { } void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { - base_timer_->disableTimer(); + cb_->cancel(); absl::MutexLock lock(&time_system_.mutex_); disableTimerLockHeld(); } @@ -187,7 +183,7 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimerLockHeld() { } void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( - const std::chrono::microseconds& duration, const ScopeTrackedObject* scope) { + const std::chrono::microseconds& duration, const ScopeTrackedObject* /*scope*/) { if (duration.count() != 0) { disableTimer(); } @@ -202,7 +198,7 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( armed_ = true; if (duration.count() == 0) { - activateLockHeld(scope); + activateLockHeld(); } else { time_system_.addAlarmLockHeld(this, duration); } @@ -341,8 +337,9 @@ void SimulatedTimeSystemHelper::addAlarmLockHeld( void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm* alarm) { alarms_.erase(alarm); } -SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& base_scheduler) { - return std::make_unique(*this, base_scheduler); +SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& /*base_scheduler*/, + CallbackScheduler& cb_scheduler) { + return std::make_unique(*this, cb_scheduler); } void SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) { diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 2b31a564c8f7..521beaf3f28b 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -23,7 +23,7 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { ~SimulatedTimeSystemHelper() override; // TimeSystem - SchedulerPtr createScheduler(Scheduler& base_scheduler) override; + SchedulerPtr createScheduler(Scheduler& base_scheduler, CallbackScheduler& cb_scheduler) override; // TestTimeSystem void advanceTimeWait(const Duration& duration) override; diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index efe47edd9e2e..5eba67a83bb8 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -19,7 +19,7 @@ namespace { class SimulatedTimeSystemTest : public testing::Test { protected: SimulatedTimeSystemTest() - : scheduler_(time_system_.createScheduler(base_scheduler_)), + : scheduler_(time_system_.createScheduler(base_scheduler_, base_scheduler_)), start_monotonic_time_(time_system_.monotonicTime()), start_system_time_(time_system_.systemTime()) {} @@ -58,7 +58,7 @@ class SimulatedTimeSystemTest : public testing::Test { base_scheduler_.run(Dispatcher::RunType::NonBlock); } - testing::NiceMock dispatcher_; + Event::MockDispatcher dispatcher_; LibeventScheduler base_scheduler_; SimulatedTimeSystem time_system_; SchedulerPtr scheduler_; diff --git a/test/test_common/test_time.h b/test/test_common/test_time.h index f5e24b8bd8a5..31880b73b5e2 100644 --- a/test/test_common/test_time.h +++ b/test/test_common/test_time.h @@ -19,8 +19,9 @@ class TestRealTimeSystem : public TestTimeSystem { ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) override; // Event::TimeSystem - Event::SchedulerPtr createScheduler(Scheduler& base_scheduler) override { - return real_time_system_.createScheduler(base_scheduler); + Event::SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) override { + return real_time_system_.createScheduler(base_scheduler, cb_scheduler); } // TimeSource diff --git a/test/test_common/test_time_system.h b/test/test_common/test_time_system.h index cb9ac3480215..bc5d38972879 100644 --- a/test/test_common/test_time_system.h +++ b/test/test_common/test_time_system.h @@ -115,8 +115,9 @@ template class DelegatingTestTimeSystemBase : public T return timeSystem().waitFor(mutex, condvar, duration); } - SchedulerPtr createScheduler(Scheduler& base_scheduler) override { - return timeSystem().createScheduler(base_scheduler); + SchedulerPtr createScheduler(Scheduler& base_scheduler, + CallbackScheduler& cb_scheduler) override { + return timeSystem().createScheduler(base_scheduler, cb_scheduler); } SystemTime systemTime() override { return timeSystem().systemTime(); } MonotonicTime monotonicTime() override { return timeSystem().monotonicTime(); } diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index acdc35b82743..4f485c29bf46 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -973,6 +973,7 @@ sanitizer satisfiable scalability sched +schedulable schemas scopekey sendmsg From cc655cddba70eb677ffe24bb157b04e9ddb759ef Mon Sep 17 00:00:00 2001 From: asraa Date: Wed, 24 Jun 2020 18:13:42 -0400 Subject: [PATCH 436/909] fix router bugs (#11625) Signed-off-by: Asra Ali --- source/common/router/config_impl.cc | 11 +-- .../route_corpus/internal_redirect_nullderef | 23 ++++++ .../fuzz/filter_corpus/router_buffering | 74 +++++++++++++++++++ .../filters/http/common/fuzz/uber_filter.cc | 24 ++++-- .../filters/http/common/fuzz/uber_filter.h | 2 + 5 files changed, 121 insertions(+), 13 deletions(-) create mode 100644 test/common/router/route_corpus/internal_redirect_nullderef create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 128351c99dd8..08c46b9e25fb 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -149,14 +149,11 @@ InternalRedirectPolicyImpl::InternalRedirectPolicyImpl( PROTOBUF_GET_WRAPPED_OR_DEFAULT(policy_config, max_internal_redirects, 1)), enabled_(true), allow_cross_scheme_redirect_(policy_config.allow_cross_scheme_redirect()) { for (const auto& predicate : policy_config.predicates()) { - const std::string type{ - TypeUtil::typeUrlToDescriptorFullName(predicate.typed_config().type_url())}; - auto* factory = - Registry::FactoryRegistry::getFactoryByType(type); - - auto config = factory->createEmptyConfigProto(); + auto& factory = + Envoy::Config::Utility::getAndCheckFactory(predicate); + auto config = factory.createEmptyConfigProto(); Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), {}, validator, *config); - predicate_factories_.emplace_back(factory, std::move(config)); + predicate_factories_.emplace_back(&factory, std::move(config)); } } diff --git a/test/common/router/route_corpus/internal_redirect_nullderef b/test/common/router/route_corpus/internal_redirect_nullderef new file mode 100644 index 000000000000..962e3eb264e5 --- /dev/null +++ b/test/common/router/route_corpus/internal_redirect_nullderef @@ -0,0 +1,23 @@ +config { + virtual_hosts { + name: "q" + domains: "" + routes { + match { + path: "" + } + route { + cluster: "." + internal_redirect_policy { + predicates { + name: ":" + typed_config { + value: "-" + } + } + } + } + } + } +} +random_value: 1 \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering b/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering new file mode 100644 index 000000000000..04bb89d6e842 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/router_buffering @@ -0,0 +1,74 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "*\023x-envoy-max-retries" + } +} +data { + headers { + headers { + key: "x-envoy-max-retries" + value: "?" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "&&&&&&&&&&&" + } + headers { + key: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + value: "&&&&&&&&&&&" + } + headers { + key: "=" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + trailers { + headers { + key: "x-envoy-max!-retries" + value: "&" + } + headers { + key: "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC" + value: "&&&&&&&&&&&" + } + headers { + key: "x-envoy-max-retries" + } + headers { + key: "x-env-max-retries" + value: "fff\002fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "ffffffffffffffffffffffffffffffffffffffffffvfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-max-retries" + value: "fff\002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmtmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmfffffffffffffffffffffffffffffffffffffffffffffffffffffmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm}mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmffffffffffffffffffffffffffffffff" + } + headers { + key: "x-envoy-m_x-retries" + value: "x-envoy-max-retries" + } + headers { + key: "x-envoy-max-retries" + value: "?" + } + } + proto_body { + message { + type_url: "type.googleapis.com/google.protobuf.Empty" + } + chunk_size: 32 + } +} \ No newline at end of file diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 49d8ff3bbe49..8052888886e5 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -38,6 +38,16 @@ UberFilterFuzzer::UberFilterFuzzer() : async_request_{&cluster_manager_.async_cl ON_CALL(filter_callback_, addAccessLogHandler(_)) .WillByDefault( Invoke([&](AccessLog::InstanceSharedPtr handler) -> void { access_logger_ = handler; })); + // This handles stopping execution after a direct response is sent. + ON_CALL(decoder_callbacks_, sendLocalReply(_, _, _, _, _)) + .WillByDefault( + Invoke([this](Http::Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, + absl::string_view details) { + enabled_ = false; + decoder_callbacks_.sendLocalReply_(code, body, modify_headers, grpc_status, details); + })); // Set expectations for particular filters that may get fuzzed. perFilterSetup(); } @@ -61,13 +71,15 @@ std::vector UberFilterFuzzer::parseHttpData(const test::fuzz::HttpD template void UberFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& data) { bool end_stream = false; + enabled_ = true; if (data.body_case() == test::fuzz::HttpData::BODY_NOT_SET && !data.has_trailers()) { end_stream = true; } const auto& headersStatus = sendHeaders(filter, data, end_stream); - if (headersStatus != Http::FilterHeadersStatus::Continue && - headersStatus != Http::FilterHeadersStatus::StopIteration) { - ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); + ENVOY_LOG_MISC(debug, "Finished with FilterHeadersStatus: {}", headersStatus); + if ((headersStatus != Http::FilterHeadersStatus::Continue && + headersStatus != Http::FilterHeadersStatus::StopIteration) || + !enabled_) { return; } @@ -78,13 +90,13 @@ void UberFilterFuzzer::runData(FilterType* filter, const test::fuzz::HttpData& d } Buffer::OwnedImpl buffer(data_chunks[i]); const auto& dataStatus = sendData(filter, buffer, end_stream); - if (dataStatus != Http::FilterDataStatus::Continue) { - ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); + ENVOY_LOG_MISC(debug, "Finished with FilterDataStatus: {}", dataStatus); + if (dataStatus != Http::FilterDataStatus::Continue || !enabled_) { return; } } - if (data.has_trailers()) { + if (data.has_trailers() && enabled_) { sendTrailers(filter, data); } } diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index bc9db9988055..772b94320b51 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -53,6 +53,8 @@ class UberFilterFuzzer { void sendTrailers(FilterType* filter, const test::fuzz::HttpData& data) = delete; private: + // This keeps track of when a filter will stop decoding due to direct responses. + bool enabled_ = true; NiceMock factory_context_; NiceMock filter_callback_; std::shared_ptr resolver_{std::make_shared()}; From 286ca92afa3eaa586ad60fe20d4f5541e77b5d5e Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Thu, 25 Jun 2020 08:18:51 +1000 Subject: [PATCH 437/909] api: rename num_retries to max_retries (#11729) This PR proposes to rename the RetryPolicy field num_retries to max_retries. This parameter exists in two places: 1) the RetryPolicy message in the route configuration and 2) the header x-envoy-max-retries. The naming inconsistency is a UX papercut. max_retries feels like right name for what this field is for ie. the maximum number of retries that are permitted. There is also a stripped down RetryPolicy message which is used by RemoteDataSource which has a num_retries field. I'm including a matching rename of that for consistency. Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Martin Matusiak --- api/envoy/config/core/v3/base.proto | 4 +++- api/envoy/config/core/v4alpha/base.proto | 2 +- api/envoy/config/route/v3/route_components.proto | 4 +++- api/envoy/config/route/v4alpha/route_components.proto | 2 +- generated_api_shadow/envoy/config/core/v3/base.proto | 4 +++- generated_api_shadow/envoy/config/core/v4alpha/base.proto | 2 +- .../envoy/config/route/v3/route_components.proto | 4 +++- .../envoy/config/route/v4alpha/route_components.proto | 2 +- 8 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index b8ce5bff4bd5..6175e585d708 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -13,6 +13,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -332,7 +333,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto index dbc3c31e40e4..29364d51b5b8 100644 --- a/api/envoy/config/core/v4alpha/base.proto +++ b/api/envoy/config/core/v4alpha/base.proto @@ -332,7 +332,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 33fa2779f727..e51f4ab87d93 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -17,6 +17,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -1085,7 +1086,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 02161ffd48ef..741ff2cd540d 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1064,7 +1064,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index 6f8c1129ac0f..af93ab8e9a09 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -13,6 +13,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -330,7 +331,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto index dbc3c31e40e4..29364d51b5b8 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -332,7 +332,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index db7dec95dd26..0c1b7fd6ce7b 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -17,6 +17,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -1092,7 +1093,8 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index f8622decd12a..a107ecf7efbf 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1092,7 +1092,7 @@ message RetryPolicy { // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; + google.protobuf.UInt32Value max_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for From 6a6c0792966d34488e95c5b04977b74da8e7089a Mon Sep 17 00:00:00 2001 From: foreseeable Date: Thu, 25 Jun 2020 00:24:58 +0000 Subject: [PATCH 438/909] test: Split huge monolith mock header to speed up test compilation (#11649) Commit Message: Split huge monolith mock header to speed up test compilation Additional Description: `cluster_manager_test` only used a simple mock class `MockAdmin` from `test/mocks/server/mocks.h`, which is a huge mock library. After splitting, the overall build time for `cluster_manager_test` reduced from 143.481s to 82.443s in my build cluster. Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/mocks/server/BUILD | 21 +++++++++ test/mocks/server/admin.cc | 18 ++++++++ test/mocks/server/admin.h | 39 ++++++++++++++++ test/mocks/server/config_tracker.cc | 26 +++++++++++ test/mocks/server/config_tracker.h | 30 +++++++++++++ test/mocks/server/mocks.cc | 15 ------- test/mocks/server/mocks.h | 45 +------------------ test/server/config_validation/BUILD | 3 +- .../config_validation/cluster_manager_test.cc | 3 +- 9 files changed, 140 insertions(+), 60 deletions(-) create mode 100644 test/mocks/server/admin.cc create mode 100644 test/mocks/server/admin.h create mode 100644 test/mocks/server/config_tracker.cc create mode 100644 test/mocks/server/config_tracker.h diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index c6fcf62562ed..a907ca21feb7 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -8,6 +8,25 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_mock( + name = "config_tracker_mocks", + srcs = ["config_tracker.cc"], + hdrs = ["config_tracker.h"], + deps = [ + "//include/envoy/server:configuration_interface", + ], +) + +envoy_cc_mock( + name = "admin_mocks", + srcs = ["admin.cc"], + hdrs = ["admin.h"], + deps = [ + "//include/envoy/server:admin_interface", + "//test/mocks/server:config_tracker_mocks", + ], +) + envoy_cc_mock( name = "server_mocks", srcs = ["mocks.cc"], @@ -44,6 +63,8 @@ envoy_cc_mock( "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:config_tracker_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/mocks/server/admin.cc b/test/mocks/server/admin.cc new file mode 100644 index 000000000000..2411ef375f00 --- /dev/null +++ b/test/mocks/server/admin.cc @@ -0,0 +1,18 @@ +#include "admin.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +MockAdmin::MockAdmin() { + ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_)); +} + +MockAdmin::~MockAdmin() = default; + +} // namespace Server + +} // namespace Envoy diff --git a/test/mocks/server/admin.h b/test/mocks/server/admin.h new file mode 100644 index 000000000000..2a82d23859c6 --- /dev/null +++ b/test/mocks/server/admin.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include "envoy/server/admin.h" + +#include "absl/strings/string_view.h" +#include "config_tracker.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockAdmin : public Admin { +public: + MockAdmin(); + ~MockAdmin() override; + + // Server::Admin + MOCK_METHOD(bool, addHandler, + (const std::string& prefix, const std::string& help_text, HandlerCb callback, + bool removable, bool mutates_server_state)); + MOCK_METHOD(bool, removeHandler, (const std::string& prefix)); + MOCK_METHOD(Network::Socket&, socket, ()); + MOCK_METHOD(ConfigTracker&, getConfigTracker, ()); + MOCK_METHOD(void, startHttpListener, + (const std::string& access_log_path, const std::string& address_out_path, + Network::Address::InstanceConstSharedPtr address, + const Network::Socket::OptionsSharedPtr& socket_options, + Stats::ScopePtr&& listener_scope)); + MOCK_METHOD(Http::Code, request, + (absl::string_view path_and_query, absl::string_view method, + Http::ResponseHeaderMap& response_headers, std::string& body)); + MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler)); + + ::testing::NiceMock config_tracker_; +}; +} // namespace Server + +} // namespace Envoy diff --git a/test/mocks/server/config_tracker.cc b/test/mocks/server/config_tracker.cc new file mode 100644 index 000000000000..bf53f7501e3a --- /dev/null +++ b/test/mocks/server/config_tracker.cc @@ -0,0 +1,26 @@ +#include "config_tracker.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockConfigTracker::MockConfigTracker() { + ON_CALL(*this, add_(_, _)) + .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* { + EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end()); + config_tracker_callbacks_[key] = callback; + return new MockEntryOwner(); + })); +} + +MockConfigTracker::~MockConfigTracker() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/config_tracker.h b/test/mocks/server/config_tracker.h new file mode 100644 index 000000000000..a84b87c7feb0 --- /dev/null +++ b/test/mocks/server/config_tracker.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include "envoy/server/config_tracker.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockConfigTracker : public ConfigTracker { +public: + MockConfigTracker(); + ~MockConfigTracker() override; + + struct MockEntryOwner : public EntryOwner {}; + + MOCK_METHOD(EntryOwner*, add_, (std::string, Cb)); + + // Server::ConfigTracker + MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const)); + EntryOwnerPtr add(const std::string& key, Cb callback) override { + return EntryOwnerPtr{add_(key, std::move(callback))}; + } + + std::unordered_map config_tracker_callbacks_; +}; +} // namespace Server + +} // namespace Envoy diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index e5b7fb43c63b..e9fc293558c5 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -53,21 +53,6 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p } MockOptions::~MockOptions() = default; -MockConfigTracker::MockConfigTracker() { - ON_CALL(*this, add_(_, _)) - .WillByDefault(Invoke([this](const std::string& key, Cb callback) -> EntryOwner* { - EXPECT_TRUE(config_tracker_callbacks_.find(key) == config_tracker_callbacks_.end()); - config_tracker_callbacks_[key] = callback; - return new MockEntryOwner(); - })); -} -MockConfigTracker::~MockConfigTracker() = default; - -MockAdmin::MockAdmin() { - ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_)); -} -MockAdmin::~MockAdmin() = default; - MockAdminStream::MockAdminStream() = default; MockAdminStream::~MockAdminStream() = default; diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 6e5060014f72..1685d0f706d2 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -53,6 +53,8 @@ #include "test/test_common/test_time_system.h" #include "absl/strings/string_view.h" +#include "admin.h" +#include "config_tracker.h" #include "gmock/gmock.h" #include "spdlog/spdlog.h" @@ -128,49 +130,6 @@ class MockOptions : public Options { std::vector disabled_extensions_; }; -class MockConfigTracker : public ConfigTracker { -public: - MockConfigTracker(); - ~MockConfigTracker() override; - - struct MockEntryOwner : public EntryOwner {}; - - MOCK_METHOD(EntryOwner*, add_, (std::string, Cb)); - - // Server::ConfigTracker - MOCK_METHOD(const CbsMap&, getCallbacksMap, (), (const)); - EntryOwnerPtr add(const std::string& key, Cb callback) override { - return EntryOwnerPtr{add_(key, std::move(callback))}; - } - - std::unordered_map config_tracker_callbacks_; -}; - -class MockAdmin : public Admin { -public: - MockAdmin(); - ~MockAdmin() override; - - // Server::Admin - MOCK_METHOD(bool, addHandler, - (const std::string& prefix, const std::string& help_text, HandlerCb callback, - bool removable, bool mutates_server_state)); - MOCK_METHOD(bool, removeHandler, (const std::string& prefix)); - MOCK_METHOD(Network::Socket&, socket, ()); - MOCK_METHOD(ConfigTracker&, getConfigTracker, ()); - MOCK_METHOD(void, startHttpListener, - (const std::string& access_log_path, const std::string& address_out_path, - Network::Address::InstanceConstSharedPtr address, - const Network::Socket::OptionsSharedPtr& socket_options, - Stats::ScopePtr&& listener_scope)); - MOCK_METHOD(Http::Code, request, - (absl::string_view path_and_query, absl::string_view method, - Http::ResponseHeaderMap& response_headers, std::string& body)); - MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler)); - - NiceMock config_tracker_; -}; - class MockAdminStream : public AdminStream { public: MockAdminStream(); diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 19942cb6d89e..086575831d1f 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -27,6 +27,7 @@ envoy_cc_test( "//include/envoy/upstream:resource_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/api:api_lib", + "//source/common/singleton:manager_impl_lib", "//source/common/stats:stats_lib", "//source/extensions/transport_sockets/tls:context_lib", "//source/server/config_validation:cluster_manager_lib", @@ -39,7 +40,7 @@ envoy_cc_test( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index cd6cfdbb9b56..a1475f197281 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -3,6 +3,7 @@ #include "envoy/upstream/upstream.h" #include "common/api/api_impl.h" +#include "common/grpc/context_impl.h" #include "common/http/context_impl.h" #include "common/singleton/manager_impl.h" @@ -18,7 +19,7 @@ #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/simulated_time_system.h" From 0f2373b3aea79bad35140f180d214622937d9b86 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 25 Jun 2020 00:41:46 -0400 Subject: [PATCH 439/909] build: removing hard-coded extension from generate_extension_db.py (#11711) Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- docs/generate_extension_db.py | 5 ++++- source/extensions/extensions_build_config.bzl | 5 +---- source/server/BUILD | 2 -- test/extensions/filters/http/common/fuzz/BUILD | 1 + 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/generate_extension_db.py b/docs/generate_extension_db.py index 726a4cac1eb5..fe7b5aa70c98 100755 --- a/docs/generate_extension_db.py +++ b/docs/generate_extension_db.py @@ -55,8 +55,11 @@ def GetExtensionMetadata(target): extension_db = {} for extension, target in extensions_build_config.EXTENSIONS.items(): extension_db[extension] = GetExtensionMetadata(target) - # The TLS transport extension is not in source/extensions/extensions_build_config.bzl + # The TLS and generic upstream extensions are hard-coded into the build, so + # not in source/extensions/extensions_build_config.bzl extension_db['envoy.transport_sockets.tls'] = GetExtensionMetadata( '//source/extensions/transport_sockets/tls:config') + extension_db['envoy.upstreams.http.generic'] = GetExtensionMetadata( + '//source/extensions/upstreams/http/generic:config') pathlib.Path(output_path).write_text(json.dumps(extension_db)) diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 2f11d428f0af..8c1e615a26a8 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -191,10 +191,7 @@ EXTENSIONS = { "envoy.internal_redirect_predicates.previous_routes": "//source/extensions/internal_redirect/previous_routes:config", "envoy.internal_redirect_predicates.safe_cross_scheme": "//source/extensions/internal_redirect/safe_cross_scheme:config", - # Http Upstreams - # TODO(alyssawilk) these are linked in the default build and shouldn't be here: fix tooling and remove. - - "envoy.upstreams.http.generic": "//source/extensions/upstreams/http/generic:config", + # Http Upstreams (excepting envoy.upstreams.http.generic which is hard-coded into the build so not registered here) "envoy.upstreams.http.http": "//source/extensions/upstreams/http/http:config", "envoy.upstreams.http.tcp": "//source/extensions/upstreams/http/tcp:config", diff --git a/source/server/BUILD b/source/server/BUILD index 36809ab90e85..bbb8dc655b76 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -329,8 +329,6 @@ envoy_cc_library( "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/transport_sockets:well_known_names", "//source/extensions/upstreams/http/generic:config", - "//source/extensions/upstreams/http/http:config", - "//source/extensions/upstreams/http/tcp:config", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index a31d18651968..e372825fc6c4 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -59,6 +59,7 @@ envoy_cc_fuzz_test( ":uber_filter_lib", "//source/common/config:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/upstreams/http/generic:config", "//test/config:utility_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", From c4a218971f28357510bdaaaa7cc3289d7f4b41ed Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Thu, 25 Jun 2020 08:12:12 -0400 Subject: [PATCH 440/909] Windows: test compilation refresh (#11719) * Windows: test compilation refresh - Remove TestHeaderMapImplBase constructor that takes a list of std::pair - MSVC erroneously treats this as ambiguous. It appears the explicit attribute of the LowerCaseString constructor is incorrectly discarded. We are able to reproduce this in a minimal example and see that MSVC is wrong here: https://godbolt.org/z/VjgsAi - To mitigate, remove LowerCaseString based constructor since it is only used in tests, tests always use std::string - Correct use of long in envoy_quic_alarm.cc, explicitly cast to int64_t (long on Windows is 32 bits) - `using std::string_literal::operator""s` is erroneously rejected by MSVC, throwing error C4455. - Instead, simply utilize the namespace and continue to use the operator as is - operator usage could be replaced by `std::string (const char* s, size_t n)` constructor - See https://developercommunity.visualstudio.com/content/problem/270349/warning-c4455-issued-when-using-standardized-liter.html and other related duplicate issues that have not yet been resolved. Signed-off-by: Sunjay Bhatia Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Co-authored-by: William A Rowe Jr --- .../quic_listeners/quiche/envoy_quic_alarm.cc | 3 +- test/common/buffer/BUILD | 2 ++ test/common/http/header_map_impl_test.cc | 28 +++++++++---------- test/common/runtime/BUILD | 2 ++ test/exe/BUILD | 2 ++ .../reverse_bridge_integration_test.cc | 4 ++- test/test_common/utility.h | 7 ----- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc index 759e31a4d83f..e652b79a6120 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc @@ -19,7 +19,8 @@ void EnvoyQuicAlarm::SetImpl() { // in QUICHE, and we are working on the fix. Once QUICHE is fixed of expecting this behavior, we // no longer need to round up the duration. // TODO(antoniovicente) improve the timer behavior in such case. - timer_->enableHRTimer(std::chrono::microseconds(std::max(1l, duration.ToMicroseconds()))); + timer_->enableHRTimer( + std::chrono::microseconds(std::max(static_cast(1), duration.ToMicroseconds()))); } void EnvoyQuicAlarm::UpdateImpl() { diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index bd01534ca6ca..0e32ba806f14 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -74,6 +74,8 @@ envoy_cc_test( envoy_cc_test( name = "watermark_buffer_test", srcs = ["watermark_buffer_test.cc"], + # Fails on windows with cr/lf yaml file checkouts + tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index b283e214a0ab..7d3b59ee0ce2 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -902,16 +902,14 @@ TEST(HeaderMapImplTest, Lookup) { TEST(HeaderMapImplTest, Get) { { - auto headers = - TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); EXPECT_EQ("/", headers.get(LowerCaseString(":path"))->value().getStringView()); EXPECT_EQ("world", headers.get(LowerCaseString("hello"))->value().getStringView()); EXPECT_EQ(nullptr, headers.get(LowerCaseString("foo"))); } { - auto headers = - TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, {LowerCaseString("hello"), "world"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); // There is not HeaderMap method to set a header and copy both the key and value. const LowerCaseString path(":path"); headers.setReferenceKey(path, "/new_path"); @@ -933,7 +931,7 @@ TEST(HeaderMapImplTest, CreateHeaderMapFromIterator) { } TEST(HeaderMapImplTest, TestHeaderList) { - std::array keys{Headers::get().Path, LowerCaseString("hello")}; + std::array keys{Headers::get().Path.get(), "hello"}; std::array values{"/", "world"}; auto headers = TestRequestHeaderMapImpl({{keys[0], values[0]}, {keys[1], values[1]}}); @@ -1173,11 +1171,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Starting with a normal header { - auto headers = TestRequestHeaderMapImpl({{Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {Headers::get().Path, "/"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().ContentType.get(), "text/plain"}, + {Headers::get().Method.get(), "GET"}, + {Headers::get().Path.get(), "/"}, + {"hello", "world"}, + {Headers::get().Host.get(), "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":method", "GET")); @@ -1197,11 +1195,11 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { // Starting with a pseudo-header { - auto headers = TestRequestHeaderMapImpl({{Headers::get().Path, "/"}, - {Headers::get().ContentType, "text/plain"}, - {Headers::get().Method, "GET"}, - {LowerCaseString("hello"), "world"}, - {Headers::get().Host, "host"}}); + auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, + {Headers::get().ContentType.get(), "text/plain"}, + {Headers::get().Method.get(), "GET"}, + {"hello", "world"}, + {Headers::get().Host.get(), "host"}}); InSequence seq; EXPECT_CALL(cb, Call(":path", "/")); diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 47bb5e802978..d6ffd6dfb1a7 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -42,6 +42,8 @@ envoy_cc_test( name = "runtime_impl_test", srcs = ["runtime_impl_test.cc"], data = glob(["test_data/**"]) + ["filesystem_setup.sh"], + # Fails on windows with cr/lf yaml file checkouts + tags = ["fails_on_windows"], deps = [ "//source/common/config:runtime_utility_lib", "//source/common/runtime:runtime_lib", diff --git a/test/exe/BUILD b/test/exe/BUILD index 806820660bca..2b1d4b80aab5 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -63,6 +63,8 @@ envoy_cc_test( name = "main_common_test", srcs = ["main_common_test.cc"], data = ["//test/config/integration:google_com_proxy_port_0"], + # Fails on windows with cr/lf yaml file checkouts + tags = ["fails_on_windows"], deps = [ "//source/common/api:api_lib", "//source/exe:envoy_main_common_lib", diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index 0a9f090b1638..0dd77b0094d6 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -14,7 +14,9 @@ #include "gtest/gtest.h" using Envoy::Http::HeaderValueOf; -using std::string_literals::operator""s; + +// for ::operator""s (which Windows compiler does not support): +using namespace std::string_literals; namespace Envoy { namespace { diff --git a/test/test_common/utility.h b/test/test_common/utility.h index b45b229dd4e5..7bbdc5548c78 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -757,13 +757,6 @@ template class TestHeaderMapImplBase : public Inte } header_map_->verifyByteSizeInternalForTest(); } - TestHeaderMapImplBase( - const std::initializer_list>& values) { - for (auto& value : values) { - header_map_->addCopy(value.first, value.second); - } - header_map_->verifyByteSizeInternalForTest(); - } TestHeaderMapImplBase(const TestHeaderMapImplBase& rhs) : TestHeaderMapImplBase(*rhs.header_map_) {} TestHeaderMapImplBase(const HeaderMap& rhs) { From d0b4b4f1e8169ebc579f98e48db3a1190eb3add6 Mon Sep 17 00:00:00 2001 From: Christoph Pakulski Date: Thu, 25 Jun 2020 11:12:15 -0400 Subject: [PATCH 441/909] examples: fixed fault-injection delay demo (#11715) fault injection example did not work for delay option. After gdb debugging it was discovered that fault injection config requires delay part to be present. Without delay part Envoy assumes that Delay is disabled and does not scan RTDS for delay config updates. Risk Level: Low: Testing: Did manual testing as described in https://www.envoyproxy.io/docs/envoy/latest/start/sandboxes/fault_injection Docs Changes: No Release Notes: No Fixes: #11095 Signed-off-by: Christoph Pakulski --- examples/fault-injection/docker-compose.yaml | 3 +++ examples/fault-injection/envoy.yaml | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/examples/fault-injection/docker-compose.yaml b/examples/fault-injection/docker-compose.yaml index fe8ec0c9d68f..97680c848450 100644 --- a/examples/fault-injection/docker-compose.yaml +++ b/examples/fault-injection/docker-compose.yaml @@ -13,6 +13,9 @@ services: ports: - 9211:9211 - 9901:9901 + # Run Envoy as root to grant access to /dev/stdout + environment: + ENVOY_UID: 0 backend: image: kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb networks: diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index 9ba70be94ba9..661e5ad88f54 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -36,6 +36,11 @@ static_resources: percentage: numerator: 0 denominator: HUNDRED + delay: + fixed_delay: 3s + percentage: + numerator: 0 + denominator: HUNDRED - name: envoy.filters.http.router typed_config: {} clusters: From f46325fec2483130d5c55489e25d8989cd65b5a5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 25 Jun 2020 11:31:35 -0400 Subject: [PATCH 442/909] http:: sending http/1.1 errors via HCM (#11714) Solving the problem where early response errors look different, and don't have HCM tracking, by sending them through the hcm. This means early responses will get all of the usual Envoy standard header additions, get accounted for in listener stats, be visible in access logs, etc. Risk Level: high (changes to HTTP early response path) Testing: new unit tests Docs Changes: n/a Release Notes: inline Runtime guard: envoy.reloadable_features.early_errors_via_hcm Fixes #8545 Part of #9846 Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + include/envoy/http/BUILD | 1 + include/envoy/http/codec.h | 17 +++ source/common/http/conn_manager_impl.h | 2 +- source/common/http/http1/BUILD | 1 + source/common/http/http1/codec_impl.cc | 32 +++++- source/common/http/http1/codec_impl.h | 4 +- source/common/runtime/runtime_features.cc | 1 + test/common/http/http1/codec_impl_test.cc | 108 +++++++++++++++--- test/integration/fake_upstream.h | 17 +++ test/integration/integration_test.cc | 21 ++-- test/integration/protocol_integration_test.cc | 1 + test/mocks/http/mocks.h | 5 + test/mocks/http/stream_decoder.h | 5 + 14 files changed, 183 insertions(+), 33 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b932f837c873..b52e89366440 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -16,6 +16,7 @@ Minor Behavior Changes * build: run as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. * health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). +* http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index 7ffc41f6372d..f84ccbc9c60a 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -35,6 +35,7 @@ envoy_cc_library( ":metadata_interface", ":protocol_interface", "//include/envoy/buffer:buffer_interface", + "//include/envoy/grpc:status", "//include/envoy/network:address_interface", "//source/common/http:status_lib", ], diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 406a30a65766..d7bb27d64998 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -6,6 +6,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" +#include "envoy/grpc/status.h" #include "envoy/http/header_map.h" #include "envoy/http/metadata_interface.h" #include "envoy/http/protocol.h" @@ -185,6 +186,22 @@ class RequestDecoder : public virtual StreamDecoder { * @param trailers supplies the decoded trailers. */ virtual void decodeTrailers(RequestTrailerMapPtr&& trailers) PURE; + + /** + * Called if the codec needs to send a protocol error. + * @param is_grpc_request indicates if the request is a gRPC request + * @param code supplies the HTTP error code to send. + * @param body supplies an optional body to send with the local reply. + * @param modify_headers supplies a way to edit headers before they are sent downstream. + * @param is_head_request indicates if the request is a HEAD request + * @param grpc_status an optional gRPC status for gRPC requests + * @param details details about the source of the error, for debug purposes + */ + virtual void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, + const absl::optional grpc_status, + absl::string_view details) PURE; }; /** diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 9d34e4cd8817..f51240bbc149 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -495,7 +495,7 @@ class ConnectionManagerImpl : Logger::Loggable, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, - absl::string_view details); + absl::string_view details) override; void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers); // As with most of the encode functions, this runs encodeHeaders on various // filters before calling encodeHeadersInternal which does final header munging and passes the diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 47499a86bb0c..bb7bdd0e8ba5 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -32,6 +32,7 @@ envoy_cc_library( "//source/common/common:statusor_lib", "//source/common/common:thread_lib", "//source/common/common:utility_lib", + "//source/common/grpc:common_lib", "//source/common/http:codec_helper_lib", "//source/common/http:codes_lib", "//source/common/http:exception_lib", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 67680eac1677..1e821f7a0dec 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -11,6 +11,7 @@ #include "common/common/enum_to_int.h" #include "common/common/utility.h" +#include "common/grpc/common.h" #include "common/http/exception.h" #include "common/http/header_utility.h" #include "common/http/headers.h" @@ -964,7 +965,7 @@ void ServerConnectionImpl::onResetStream(StreamResetReason reason) { active_request_.reset(); } -void ServerConnectionImpl::sendProtocolError(absl::string_view details) { +void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { if (active_request_.has_value()) { active_request_.value().response_encoder_.setDetails(details); } @@ -982,6 +983,35 @@ void ServerConnectionImpl::sendProtocolError(absl::string_view details) { } } +void ServerConnectionImpl::sendProtocolError(absl::string_view details) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { + sendProtocolErrorOld(details); + return; + } + // We do this here because we may get a protocol error before we have a logical stream. + if (!active_request_.has_value()) { + onMessageBeginBase(); + } + ASSERT(active_request_.has_value()); + + active_request_.value().response_encoder_.setDetails(details); + if (!active_request_.value().response_encoder_.startedResponse()) { + // Note that the correctness of is_grpc_request and is_head_request is best-effort. + // If headers have not been fully parsed they may not be inferred correctly. + bool is_grpc_request = false; + if (absl::holds_alternative(headers_or_trailers_) && + absl::get(headers_or_trailers_) != nullptr) { + is_grpc_request = + Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); + } + const bool is_head_request = parser_.method == HTTP_HEAD; + active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, + CodeUtility::toString(error_code_), nullptr, + is_head_request, absl::nullopt, details); + return; + } +} + void ServerConnectionImpl::onAboveHighWatermark() { if (active_request_.has_value()) { active_request_.value().response_encoder_.runHighWatermarkCallbacks(); diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 750118ec8ce4..dab1b41de235 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -242,6 +242,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable( @@ -144,9 +141,9 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur return decoder; })); + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); EXPECT_EQ(p, codec_->protocol()); if (!details.empty()) { EXPECT_EQ(details, response_encoder->getStream().responseDetails()); @@ -243,6 +240,7 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(trailer_string + "\r\n\r\n"); if (enable_trailers) { + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "trailers size exceeds limit"); @@ -270,6 +268,7 @@ void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string he auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(header_string + "\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); @@ -329,6 +328,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -343,6 +343,7 @@ TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: gzip\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -494,6 +495,7 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { "6\r\nHello \r\n" "invalid\r\nWorl"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); @@ -510,6 +512,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: " "identity,chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -698,9 +701,6 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { initialize(); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)) .WillOnce(Invoke([&](ResponseEncoder&, bool) -> RequestDecoder& { return decoder; })); @@ -718,9 +718,9 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { "body\r\n0\r\n" "badtrailer\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { @@ -789,16 +789,82 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.early_errors_via_hcm", "false"}}); initialize(); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + MockRequestDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).Times(0); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)).Times(0); + + Buffer::OwnedImpl buffer("bad"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); +} + +// Test that if the stream is not created at the time an error is detected, it +// is created as part of sending the protocol error. +TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { + initialize(); + + MockRequestDecoder decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + // Check that before any headers are parsed, requests do not look like HEAD or gRPC requests. + EXPECT_CALL(decoder, sendLocalReply(false, _, _, _, false, _, _)); + Buffer::OwnedImpl buffer("bad"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); +} + +// Make sure that if the first line is parsed, that sendLocalReply tracks HEAD requests correctly. +TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { + initialize(); + + MockRequestDecoder decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + // Make sure sendLocalReply picks up the head request. + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, true, _, _)); + + // Send invalid characters + Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\nHOST: h.com\r\r\r\r"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); +} + +// Make sure that if gRPC headers are parsed, they are tracked by sendLocalReply. +TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { + initialize(); + + MockRequestDecoder decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + // Make sure sendLocalReply picks up the head request. + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, true, _, _)); + + // Send invalid characters + Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\ncontent-type: application/grpc\r\nHOST: ###\r\r"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); } // This behavior was observed during CVE-2019-18801 and helped to limit the @@ -809,21 +875,15 @@ TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - Buffer::OwnedImpl buffer("BAD / HTTP/1.1\r\nHost: foo\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); - std::string output; - ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); - MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); @@ -832,9 +892,9 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_TRUE(status.ok()); Buffer::OwnedImpl buffer2("g"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } TEST_F(Http1ServerConnectionImplTest, FloodProtection) { @@ -982,6 +1042,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { })); Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: header value contains invalid chars"); @@ -1050,6 +1111,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: header name contains underscores"); @@ -1070,6 +1132,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { return decoder; })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.\"com\r\n\r\n")); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), @@ -1092,6 +1155,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); @@ -1112,6 +1176,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, '\0'), example_input.substr(n))); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_FALSE(status.ok()); EXPECT_TRUE(isCodecProtocolError(status)); @@ -1716,6 +1781,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("CONNECT http://host:80 HTTP/1.1\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } @@ -1744,6 +1810,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 CONNECT with body has no defined // semantics: Envoy will reject chunked CONNECT requests. + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); Buffer::OwnedImpl buffer( "CONNECT host:80 HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); auto status = codec_->dispatch(buffer); @@ -1761,6 +1828,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { // Make sure we avoid the deferred_end_stream_headers_ optimization for // requests-with-no-body. Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 1\r\n\r\nabcd"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); @@ -2508,6 +2576,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { } // the 60th 1kb header should induce overflow buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); @@ -2537,6 +2606,7 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // The final 101th header should induce overflow. buffer = Buffer::OwnedImpl("header101:\r\n\r\n"); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); @@ -2565,6 +2635,7 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { initialize(); + NiceMock decoder; NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; @@ -2661,6 +2732,7 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); auto status = codec_->dispatch(buffer); buffer = Buffer::OwnedImpl(createHeaderFragment(101) + "\r\n"); + status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index e5bfda741d4a..28cdba57cdcb 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -81,6 +81,23 @@ class FakeStream : public Http::RequestDecoder, Http::Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() { return encoder_.http1StreamEncoderOptions(); } + void + sendLocalReply(bool is_grpc_request, Http::Code code, absl::string_view body, + const std::function& /*modify_headers*/, + bool is_head_request, const absl::optional grpc_status, + absl::string_view /*details*/) override { + Http::Utility::sendLocalReply( + false, + Http::Utility::EncodeFunctions( + {nullptr, + [&](Http::ResponseHeaderMapPtr&& headers, bool end_stream) -> void { + encoder_.encodeHeaders(*headers, end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + encoder_.encodeData(data, end_stream); + }}), + Http::Utility::LocalReplyData({is_grpc_request, code, body, grpc_status, is_head_request})); + } ABSL_MUST_USE_RESULT testing::AssertionResult diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index c6da9f9a3e56..7c751f82f5d7 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -409,8 +409,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "GET / HTTP/1.1\r\nHost: host\r\ncontent-length: 36\r\ntransfer-encoding: chunked\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), full_request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } { std::string response; @@ -418,8 +417,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "\r\ncontent-length: 36\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } { std::string response; @@ -427,8 +425,7 @@ TEST_P(IntegrationTest, TestSmuggling) { "identity,chunked \r\ncontent-length: 36\r\n\r\n" + smuggled_request; sendRawHttpAndWaitForResponse(lookupPort("http"), request.c_str(), &response, false); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", - response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } } @@ -436,7 +433,7 @@ TEST_P(IntegrationTest, BadFirstline) { initialize(); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "hello", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, MissingDelimiter) { @@ -445,7 +442,7 @@ TEST_P(IntegrationTest, MissingDelimiter) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.1\r\nHost: host\r\nfoo bar\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http1.codec_error")); } @@ -454,7 +451,7 @@ TEST_P(IntegrationTest, InvalidCharacterInFirstline) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GE(T / HTTP/1.1\r\nHost: host\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, InvalidVersion) { @@ -462,7 +459,7 @@ TEST_P(IntegrationTest, InvalidVersion) { std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.01\r\nHost: host\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } // Expect that malformed trailers to break the connection @@ -479,7 +476,7 @@ TEST_P(IntegrationTest, BadTrailer) { "badtrailer\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } // Expect malformed headers to break the connection @@ -496,7 +493,7 @@ TEST_P(IntegrationTest, BadHeader) { "body\r\n0\r\n\r\n", &response); - EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", response); + EXPECT_THAT(response, HasSubstr("HTTP/1.1 400 Bad Request\r\n")); } TEST_P(IntegrationTest, Http10Disabled) { diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 370435c3f967..ccb9654c1b54 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1102,6 +1102,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { ASSERT_TRUE(response->complete()); EXPECT_EQ("400", response->headers().getStatusValue()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_4xx", 1); } else { ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index c4794fbd74ef..e323357974d4 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -258,6 +258,11 @@ class MockStreamDecoderFilter : public StreamDecoderFilter { MOCK_METHOD(FilterMetadataStatus, decodeMetadata, (Http::MetadataMap & metadata_map)); MOCK_METHOD(void, setDecoderFilterCallbacks, (StreamDecoderFilterCallbacks & callbacks)); MOCK_METHOD(void, decodeComplete, ()); + MOCK_METHOD(void, sendLocalReply, + (bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, const absl::optional grpc_status, + absl::string_view details)); Http::StreamDecoderFilterCallbacks* callbacks_{}; }; diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index 1238c55f91b5..b822de460b9d 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -16,6 +16,11 @@ class MockRequestDecoder : public RequestDecoder { // Http::StreamDecoder MOCK_METHOD(void, decodeData, (Buffer::Instance & data, bool end_stream)); MOCK_METHOD(void, decodeMetadata_, (MetadataMapPtr & metadata_map)); + MOCK_METHOD(void, sendLocalReply, + (bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, const absl::optional grpc_status, + absl::string_view details)); void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { decodeHeaders_(headers, end_stream); From 45c7d876f16bff31651d8b326d0cae3e4fe441e7 Mon Sep 17 00:00:00 2001 From: Henry Yang <4411287+HenryYYang@users.noreply.github.com> Date: Thu, 25 Jun 2020 09:08:54 -0700 Subject: [PATCH 443/909] Fix missing instantiation of parameterized tests for redis cluster (#11545) Signed-off-by: Henry Yang --- .../network/common/redis/client_impl.cc | 6 +++- .../network/common/redis/client_impl.h | 1 + .../redis/redis_cluster_integration_test.cc | 31 ++++++++++++++----- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index cadfa95a1371..55c99ba05288 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -240,7 +240,11 @@ void ClientImpl::onRespValue(RespValuePtr&& value) { } } if (!redirected) { - callbacks.onResponse(std::move(value)); + if (err[0] == RedirectionResponse::get().CLUSTER_DOWN) { + callbacks.onFailure(); + } else { + callbacks.onResponse(std::move(value)); + } } } else { callbacks.onResponse(std::move(value)); diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h index ad5b6231ffb7..ca0eb5f21dbe 100644 --- a/source/extensions/filters/network/common/redis/client_impl.h +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -31,6 +31,7 @@ namespace Client { struct RedirectionValues { const std::string ASK = "ASK"; const std::string MOVED = "MOVED"; + const std::string CLUSTER_DOWN = "CLUSTERDOWN"; }; using RedirectionResponse = ConstSingleton; diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index ed83eab698d7..693ecc517854 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -194,7 +194,8 @@ class RedisClusterIntegrationTest : public testing::TestWithParamwrite(ok)); + } else if (expect_readonly) { + std::string readonly_command = makeBulkStringArray({"readonly"}); + EXPECT_TRUE(fake_upstream_connection->waitForData(readonly_command.size() + request.size(), + &proxy_to_server)); + EXPECT_EQ(readonly_command + request, proxy_to_server); + // Send back an OK for the readonly command. + EXPECT_TRUE(fake_upstream_connection->write(ok)); } else { EXPECT_TRUE(fake_upstream_connection->waitForData(request.size(), &proxy_to_server)); // The original request should be the same as the data received by the server. @@ -237,12 +245,12 @@ class RedisClusterIntegrationTest : public testing::TestWithParamclose(); EXPECT_TRUE(fake_upstream_connection->close()); @@ -383,6 +391,14 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithAuthIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithReadPolicyIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisClusterWithRefreshIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + // This test sends a simple "get foo" command from a fake // downstream client through the proxy to a fake upstream // Redis cluster with a single slot with master and replica. @@ -515,8 +531,8 @@ TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotMasterReplicaReadRep initialize(); // foo hashes to slot 12182 which has master node in upstream 0 and replica in upstream 1 - simpleRequestAndResponse(0, makeBulkStringArray({"set", "foo", "bar"}), ":1\r\n"); - simpleRequestAndResponse(1, makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n"); + simpleRequestAndResponse(0, makeBulkStringArray({"set", "foo", "bar"}), ":1\r\n", true); + simpleRequestAndResponse(1, makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n", true); } // This test sends a simple "get foo" command from a fake @@ -572,6 +588,7 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { std::string request = makeBulkStringArray({"get", "foo"}); // The actual error response. std::string error_response = "-CLUSTERDOWN The cluster is down\r\n"; + std::string upstream_error_response = "-upstream failure\r\n"; std::string cluster_slots_request = makeBulkStringArray({"CLUSTER", "SLOTS"}); std::string proxy_to_server; @@ -589,9 +606,9 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { // Send the server down error response from the first fake Redis server back to the proxy. EXPECT_TRUE(fake_upstream_connection_1->write(error_response)); - redis_client->waitForData(error_response); + redis_client->waitForData(upstream_error_response); // The client should receive response unchanged. - EXPECT_EQ(error_response, redis_client->data()); + EXPECT_EQ(upstream_error_response, redis_client->data()); // A new connection should be created to fake_upstreams_[0] for topology discovery. proxy_to_server.clear(); From 789eafde25bd12dcb60de7cff740dcadb61c3bac Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 25 Jun 2020 10:21:24 -0600 Subject: [PATCH 444/909] community: update inclusive language guidelines (#11733) Signed-off-by: Matt Klein --- CONTRIBUTING.md | 19 ++++- .../filter/http/cache/v2alpha/cache.proto | 2 +- .../filters/http/cache/v3alpha/cache.proto | 2 +- .../filters/http/cache/v4alpha/cache.proto | 2 +- bazel/foreign_cc/luajit.patch | 8 +-- bazel/foreign_cc/moonjit.patch | 8 +-- .../http/http_filters/csrf_filter.rst | 2 +- .../security/jwt_authn_filter.rst | 2 +- .../root/intro/arch_overview/security/ssl.rst | 2 +- docs/root/version_history/v1.11.0.rst | 2 +- docs/root/version_history/v1.6.0.rst | 2 +- .../filter/http/cache/v2alpha/cache.proto | 2 +- .../filters/http/cache/v3alpha/cache.proto | 2 +- .../filters/http/cache/v4alpha/cache.proto | 2 +- include/envoy/filesystem/filesystem.h | 4 +- .../filesystem/posix/filesystem_impl.cc | 2 +- source/extensions/all_extensions.bzl | 4 +- .../clusters/redis/redis_cluster.cc | 6 +- .../filters/network/rocketmq_proxy/codec.cc | 3 +- .../filters/network/rocketmq_proxy/protocol.h | 2 +- .../filters/udp/dns_filter/dns_filter.cc | 2 +- source/extensions/tracers/xray/tracer.h | 2 +- source/server/listener_manager_impl.cc | 8 +-- .../common/filesystem/filesystem_impl_test.cc | 2 +- .../filters/common/rbac/engine_impl_test.cc | 4 +- .../integration/tcp_proxy_integration_test.cc | 34 ++++----- test/server/listener_manager_impl_test.cc | 8 +-- tools/clang_tools/api_booster/main.cc | 2 +- tools/code_format/check_format.py | 70 +++++++++---------- tools/protoxform/protoprint.py | 2 +- tools/spelling/check_spelling.sh | 10 +-- ...words.txt => spelling_allowlist_words.txt} | 0 tools/spelling/spelling_dictionary.txt | 2 - 33 files changed, 120 insertions(+), 104 deletions(-) rename tools/spelling/{spelling_whitelist_words.txt => spelling_allowlist_words.txt} (100%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 17a5095bf338..8a56feda06b7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,23 @@ maximize the chances of your PR being merged. * See [STYLE.md](STYLE.md) +# Inclusive language policy + +The Envoy community has an explicit goal to be inclusive to all. As such, all PRs must adhere to the +following guidelines for all code, APIs, and documentation: + +* The following words and phrases are not allowed: + * *Whitelist*: use allowlist instead. + * *Blacklist*: use denylist or blocklist instead. + * *Master*: use primary instead. + * *Slave*: use secondary or replica instead. +* Documentation should be written in an inclusive style. The [Google developer + documentation](https://developers.google.com/style/inclusive-documentation) contains an excellent + reference on this topic. +* The above policy is not considered definitive and may be amended in the future as industry best + practices evolve. Additional comments on this topic may be provided by maintainers during code + review. + # Breaking change policy Both API and implementation stability are important to Envoy. Since the API is consumed by clients @@ -205,7 +222,7 @@ and false. organization specific shortcuts into the code. * If there is a question on who should review a PR please discuss in Slack. * Anyone is welcome to review any PR that they want, whether they are a maintainer or not. -* Please make sure that the PR title, commit message, and description are updated if the PR changes +* Please make sure that the PR title, commit message, and description are updated if the PR changes significantly during review. * Please **clean up the title and body** before merging. By default, GitHub fills the squash merge title with the original title, and the commit body with every individual commit from the PR. diff --git a/api/envoy/config/filter/http/cache/v2alpha/cache.proto b/api/envoy/config/filter/http/cache/v2alpha/cache.proto index a9e51cf56a10..d08b5462fd88 100644 --- a/api/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/api/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -57,7 +57,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 1ff305bb0e27..f78b1d24ac2c 100644 --- a/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto index 7cb48d4d6c26..19921edb0310 100644 --- a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index 7623ddafa49a..296d66c85e52 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -86,11 +86,11 @@ index 0000000..9c71271 + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + -+ # Blacklist LuaJIT from ASAN for now. ++ # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. + if "ENVOY_CONFIG_ASAN" in os.environ: -+ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blacklist.txt" % os.environ["PWD"] -+ with open("clang-asan-blacklist.txt", "w") as f: ++ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blocklist.txt" % os.environ["PWD"] ++ with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + + os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) @@ -108,7 +108,7 @@ index 0000000..9c71271 + shutil.copy(header, dst_dir + "/include/luajit-2.1") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") -+ ++ +if os.name == 'nt': + win_main() +else: diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index c0d2c274eaae..8bb54f01d803 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -77,11 +77,11 @@ index 0000000..9c71271 + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + -+ # Blacklist LuaJIT from ASAN for now. ++ # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. + if "ENVOY_CONFIG_ASAN" in os.environ: -+ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blacklist.txt" % os.environ["PWD"] -+ with open("clang-asan-blacklist.txt", "w") as f: ++ os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blocklist.txt" % os.environ["PWD"] ++ with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + + os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) @@ -99,7 +99,7 @@ index 0000000..9c71271 + shutil.copy(header, dst_dir + "/include/moonjit-2.2") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") -+ ++ +if os.name == 'nt': + win_main() +else: diff --git a/docs/root/configuration/http/http_filters/csrf_filter.rst b/docs/root/configuration/http/http_filters/csrf_filter.rst index 4e01e413f595..0295b5f9fa59 100644 --- a/docs/root/configuration/http/http_filters/csrf_filter.rst +++ b/docs/root/configuration/http/http_filters/csrf_filter.rst @@ -57,7 +57,7 @@ valid. The reason it is able to do this while still mitigating cross-site reques forgery attempts is because the target origin has already been reached by the time front-envoy is applying the filter. This means that while endpoints may support cross-origin requests they are still protected from malicious third-parties who -have not been whitelisted. +have not been allowlisted. It's important to note that requests should generally originate from the same origin as the target but there are use cases where that may not be possible. diff --git a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst index 848d17298974..9c53106ab146 100644 --- a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst +++ b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst @@ -13,7 +13,7 @@ could be configured to either reject the request with invalid JWT immediately or to later filters by passing the JWT payload to other filters. The JWT Authentication filter supports to check the JWT under various conditions of the request, it -could be configured to check JWT only on specific paths so that you could whitelist some paths from +could be configured to check JWT only on specific paths so that you could allowlist some paths from the JWT authentication, which is useful if a path is accessible publicly and doesn't require any JWT authentication. diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index eee933e3b8e8..7790ac42ed6d 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -175,7 +175,7 @@ Authentication filter Envoy provides a network filter that performs TLS client authentication via principals fetched from a REST VPN service. This filter matches the presented client certificate hash against the principal -list to determine whether the connection should be allowed or not. Optional IP white listing can +list to determine whether the connection should be allowed or not. Optional IP allowlisting can also be configured. This functionality can be used to build edge proxy VPN support for web infrastructure. diff --git a/docs/root/version_history/v1.11.0.rst b/docs/root/version_history/v1.11.0.rst index 78f761f99671..1bc4051b7da4 100644 --- a/docs/root/version_history/v1.11.0.rst +++ b/docs/root/version_history/v1.11.0.rst @@ -19,7 +19,7 @@ Changes * build: releases are built with Clang and linked with LLD. * config: added :ref:stats_server_version_override` ` in bootstrap, that can be used to override :ref:`server.version statistic `. * control-plane: management servers can respond with HTTP 304 to indicate that config is up to date for Envoy proxies polling a :ref:`REST API Config Type ` -* csrf: added support for whitelisting additional source origins. +* csrf: added support for allowlisting additional source origins. * dns: added support for getting DNS record TTL which is used by STRICT_DNS/LOGICAL_DNS cluster as DNS refresh rate. * dubbo_proxy: support the :ref:`dubbo proxy filter `. * dynamo_request_parser: adding support for transactions. Adds check for new types of dynamodb operations (TransactWriteItems, TransactGetItems) and awareness for new types of dynamodb errors (IdempotentParameterMismatchException, TransactionCanceledException, TransactionInProgressException). diff --git a/docs/root/version_history/v1.6.0.rst b/docs/root/version_history/v1.6.0.rst index cdcbbcf447f2..879eb2f8df77 100644 --- a/docs/root/version_history/v1.6.0.rst +++ b/docs/root/version_history/v1.6.0.rst @@ -68,7 +68,7 @@ Changes * lua: extended to support :ref:`metadata object ` API. * redis: added local `PING` support to the :ref:`Redis filter `. * redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter - ` whitelist. + ` allowlist. * router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header formatters `. The CLIENT_IP header formatter diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto index a9e51cf56a10..d08b5462fd88 100644 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto @@ -57,7 +57,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto index 1ff305bb0e27..f78b1d24ac2c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto index 7cb48d4d6c26..19921edb0310 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto @@ -61,7 +61,7 @@ message CacheConfig { // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // - // During insertion, *allowed_vary_headers* acts as a whitelist: if a + // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't in // *allowed_vary_headers*, that response will not be cached. // diff --git a/include/envoy/filesystem/filesystem.h b/include/envoy/filesystem/filesystem.h index 503eb4d87d9c..033d418683f2 100644 --- a/include/envoy/filesystem/filesystem.h +++ b/include/envoy/filesystem/filesystem.h @@ -120,12 +120,12 @@ class Instance { /** * Determine if the path is on a list of paths Envoy will refuse to access. This - * is a basic sanity check for users, blacklisting some clearly bad paths. Paths + * is a basic sanity check for users, denying some clearly bad paths. Paths * may still be problematic (e.g. indirectly leading to /dev/mem) even if this * returns false, it is up to the user to validate that supplied paths are * valid. * @param path some filesystem path. - * @return is the path on the blacklist? + * @return is the path on the deny list? */ virtual bool illegalPath(const std::string& path) PURE; }; diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index 70ddf0ecf98b..4ebe73ce6521 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -127,7 +127,7 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { // _before_ canonicalizing the path is that different unix flavors implement // /dev/fd/* differently, for example on linux they are symlinks to /dev/pts/* // which are symlinks to /proc/self/fds/. On BSD (and darwin) they are not - // symlinks at all. To avoid lots of platform, specifics, we whitelist + // symlinks at all. To avoid lots of platform, specifics, we allowlist // /dev/fd/* _before_ resolving the canonical path. if (absl::StartsWith(path, "/dev/fd/")) { return false; diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index 95f018c44973..f22633aeeb2a 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -9,8 +9,8 @@ _required_extensions = { } # Return all extensions to be compiled into Envoy. -def envoy_all_extensions(blacklist = []): +def envoy_all_extensions(denylist = []): all_extensions = dicts.add(_required_extensions, EXTENSIONS) # These extensions can be removed on a site specific basis. - return [v for k, v in all_extensions.items() if not k in blacklist] + return [v for k, v in all_extensions.items() if not k in denylist] diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 38dbccc60e6a..c1bc8ca90da2 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -295,7 +295,7 @@ void RedisCluster::RedisDiscoverySession::onResponse( const uint32_t SlotRangeStart = 0; const uint32_t SlotRangeEnd = 1; const uint32_t SlotMaster = 2; - const uint32_t SlotSlaveStart = 3; + const uint32_t SlotReplicaStart = 3; // Do nothing if the cluster is empty. if (value->type() != NetworkFilters::Common::Redis::RespType::Array || value->asArray().empty()) { @@ -333,8 +333,8 @@ void RedisCluster::RedisDiscoverySession::onResponse( slots->emplace_back(slot_range[SlotRangeStart].asInteger(), slot_range[SlotRangeEnd].asInteger(), master_address); - for (auto replica = std::next(slot_range.begin(), SlotSlaveStart); replica != slot_range.end(); - ++replica) { + for (auto replica = std::next(slot_range.begin(), SlotReplicaStart); + replica != slot_range.end(); ++replica) { auto replica_address = ProcessCluster(*replica); if (!replica_address) { onUnexpectedResponse(value); diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.cc b/source/extensions/filters/network/rocketmq_proxy/codec.cc index 628fc302f99d..b56e0d5d599a 100644 --- a/source/extensions/filters/network/rocketmq_proxy/codec.cc +++ b/source/extensions/filters/network/rocketmq_proxy/codec.cc @@ -318,7 +318,8 @@ CommandCustomHeaderPtr Decoder::decodeResponseExtHeader(ResponseCode response_co ProtobufWkt::Struct& header_struct, RequestCode request_code) { // No need to decode a failed response. - if (response_code != ResponseCode::Success && response_code != ResponseCode::SlaveNotAvailable) { + if (response_code != ResponseCode::Success && + response_code != ResponseCode::ReplicaNotAvailable) { return nullptr; } const auto& filed_value_pair = header_struct.fields(); diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.h b/source/extensions/filters/network/rocketmq_proxy/protocol.h index aa9c213bbc89..fee961767e0e 100644 --- a/source/extensions/filters/network/rocketmq_proxy/protocol.h +++ b/source/extensions/filters/network/rocketmq_proxy/protocol.h @@ -244,7 +244,7 @@ enum class ResponseCode : uint32_t { SystemError = 1, SystemBusy = 2, RequestCodeNotSupported = 3, - SlaveNotAvailable = 11, + ReplicaNotAvailable = 11, }; /** diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index a666ff7924e9..dc4c8bc30820 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -259,7 +259,7 @@ std::chrono::seconds DnsFilter::getDomainTTL(const absl::string_view domain) { bool DnsFilter::isKnownDomain(const absl::string_view domain_name) { const auto& known_suffixes = config_->knownSuffixes(); - // If we don't have a list of whitelisted domain suffixes, we will resolve the name with an + // If we don't have a list of allowlisted domain suffixes, we will resolve the name with an // external DNS server if (known_suffixes.empty()) { ENVOY_LOG(debug, "Known domains list is empty"); diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index b0f92da9c941..7436a45498c3 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -65,7 +65,7 @@ class Span : public Tracing::Span, Logger::Loggable { /** * Adds a key-value pair to either the Span's annotations or metadata. - * A whitelist of keys are added to the annotations, everything else is added to the metadata. + * An allowlist of keys are added to the annotations, everything else is added to the metadata. */ void setTag(absl::string_view name, absl::string_view value) override; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 7b85cf590356..77fc33d33464 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -593,14 +593,14 @@ void ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) { server_.dispatcher().post([this, draining_it]() -> void { // TODO(lambdai): Resolve race condition below. // Consider the below events in global sequence order - // master thread: calling drainListener + // main thread: calling drainListener // work thread: deferred delete the active connection - // work thread: post to master that the drain is done - // master thread: erase the listener + // work thread: post to main that the drain is done + // main thread: erase the listener // worker thread: execute destroying connection when the shared listener config is // destroyed at step 4 (could be worse such as access the connection because connection is // not yet started to deleted). The race condition is introduced because 3 occurs too - // early. My solution is to defer schedule the callback posting to master thread, by + // early. My solution is to defer schedule the callback posting to main thread, by // introducing DeferTaskUtil. So that 5 should always happen before 3. if (--draining_it->workers_pending_removal_ == 0) { draining_it->listener_->debugLog("draining listener removal complete"); diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index 0531f3b0d6c8..4595099da007 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -103,7 +103,7 @@ TEST_F(FileSystemImplTest, FileReadToEndDoesNotExist) { EnvoyException); } -TEST_F(FileSystemImplTest, FileReadToEndBlacklisted) { +TEST_F(FileSystemImplTest, FileReadToEndDenylisted) { EXPECT_THROW(file_system_.fileReadToEnd("/dev/urandom"), EnvoyException); EXPECT_THROW(file_system_.fileReadToEnd("/proc/cpuinfo"), EnvoyException); EXPECT_THROW(file_system_.fileReadToEnd("/sys/block/sda/dev"), EnvoyException); diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 5e2eda5b30e3..8f4f3d7e6ad1 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -127,7 +127,7 @@ TEST(RoleBasedAccessControlEngineImpl, InvalidConfig) { } } -TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { +TEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) { envoy::config::rbac::v3::Policy policy; policy.add_permissions()->set_destination_port(123); policy.add_principals()->set_any(true); @@ -150,7 +150,7 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedWhitelist) { checkEngine(engine, false, conn, headers, info); } -TEST(RoleBasedAccessControlEngineImpl, DeniedBlacklist) { +TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { envoy::config::rbac::v3::Policy policy; policy.add_permissions()->set_destination_port(123); policy.add_principals()->set_any(true); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 55a8e13e1e9e..7bc39b386557 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -627,9 +627,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.set_cluster("cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -645,9 +645,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( "cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", @@ -665,9 +665,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterW cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -684,9 +684,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( - {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + {{"role", "primary"}, {"stage", "prod"}})); // should override `stage` value at top-level - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -699,12 +699,12 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterWithTopLevelMetadataMatch) { tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); initialize(); @@ -718,7 +718,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.set_cluster("cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); @@ -736,7 +736,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( "cluster_0"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); @@ -757,7 +757,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); @@ -776,9 +776,9 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); cluster_0->mutable_metadata_match()->MergeFrom(lbMetadata( - {{"role", "master"}, {"stage", "prod"}})); // should override `stage` value at top-level + {{"role", "primary"}, {"stage", "prod"}})); // should override `stage` value at top-level - endpoint_metadata_ = lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "dev"}}); + endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "dev"}}); initialize(); @@ -791,7 +791,7 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldNotMatchWeightedClusterWithTopLevelMetadataMatch) { tcp_proxy_.set_stat_prefix("tcp_stats"); tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "master"}, {"version", "v1"}, {"stage", "prod"}})); + lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); auto* cluster_0 = tcp_proxy_.mutable_weighted_clusters()->add_clusters(); cluster_0->set_name("cluster_0"); cluster_0->set_weight(1); @@ -842,7 +842,7 @@ void TcpProxySslIntegrationTest::setupConnections() { dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(), context_->createTransportSocket(nullptr), nullptr); - // Perform the SSL handshake. Loopback is whitelisted in tcp_proxy.json for the ssl_auth + // Perform the SSL handshake. Loopback is allowlisted in tcp_proxy.json for the ssl_auth // filter so there will be no pause waiting on auth data. ssl_client_->addConnectionCallbacks(connect_callbacks_); ssl_client_->enableHalfClose(true); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 3b5c09c56e63..c2cee57c8044 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -94,7 +94,7 @@ class ListenerManagerImplForInPlaceFilterChainUpdateTest : public ListenerManage envoy::config::listener::v3::Listener createDefaultListener() { envoy::config::listener::v3::Listener listener_proto; Protobuf::TextFormat::ParseFromString(R"EOF( - name: "foo" + name: "foo" address: { socket_address: { address: "127.0.0.1" @@ -1619,7 +1619,7 @@ name: foo socket_address: address: 127.0.0.1 port_value: 1234 -per_connection_buffer_limit_bytes: 999 +per_connection_buffer_limit_bytes: 999 filter_chains: - filters: [] )EOF"; @@ -3484,7 +3484,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { route: { cluster: service_foo } listener_filters: - name: "envoy.filters.listener.original_dst" - config: {} + config: {} )EOF", Network::Address::IpVersion::v4); Configuration::ListenerFactoryContext* listener_factory_context = nullptr; @@ -4398,7 +4398,7 @@ traffic_direction: INBOUND EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); filter_chain_drain_timer->invokeCallback(); - // Once worker clean up is done, it's safe for the master thread to remove the original listener. + // Once worker clean up is done, it's safe for the main thread to remove the original listener. EXPECT_CALL(*listener_foo, onDestroy()); worker_->callDrainFilterChainsComplete(); checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); diff --git a/tools/clang_tools/api_booster/main.cc b/tools/clang_tools/api_booster/main.cc index c1ca773024ce..b71d9542752e 100644 --- a/tools/clang_tools/api_booster/main.cc +++ b/tools/clang_tools/api_booster/main.cc @@ -489,7 +489,7 @@ class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, !absl::StartsWith(proto_type_name, "envoy.test") && !absl::StartsWith(proto_type_name, "envoy.tracers.xray.daemon")) { // Die hard if we don't have a useful proto type for something that looks - // like an API type(modulo a short whitelist). + // like an API type(modulo a short allowlist). std::cerr << "Unknown API type: " << proto_type_name << std::endl; // TODO(htuch): maybe there is a nicer way to terminate AST traversal? ::exit(1); diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 689ed09c7e5b..0e955d5e305d 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -25,13 +25,13 @@ PROTO_SUFFIX = (".proto") # Files in these paths can make reference to protobuf stuff directly -GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test") +GOOGLE_PROTOBUF_ALLOWLIST = ("ci/prebuilt", "source/common/protobuf", "api/test") REPOSITORIES_BZL = "bazel/repositories.bzl" # Files matching these exact names can reference real-world time. These include the class # definitions for real-world time, the construction of them in main(), and perf annotation. # For now it includes the validation server but that really should be injected too. -REAL_TIME_WHITELIST = ("./source/common/common/utility.h", +REAL_TIME_ALLOWLIST = ("./source/common/common/utility.h", "./source/extensions/common/aws/utility.cc", "./source/common/event/real_time_system.cc", "./source/common/event/real_time_system.h", "./source/exe/main_common.cc", @@ -46,11 +46,11 @@ # Tests in these paths may make use of the Registry::RegisterFactory constructor or the # REGISTER_FACTORY macro. Other locations should use the InjectFactory helper class to # perform temporary registrations. -REGISTER_FACTORY_TEST_WHITELIST = ("./test/common/config/registry_test.cc", +REGISTER_FACTORY_TEST_ALLOWLIST = ("./test/common/config/registry_test.cc", "./test/integration/clusters/", "./test/integration/filters/") # Files in these paths can use MessageLite::SerializeAsString -SERIALIZE_AS_STRING_WHITELIST = ( +SERIALIZE_AS_STRING_ALLOWLIST = ( "./source/common/config/version_converter.cc", "./source/common/protobuf/utility.cc", "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", @@ -62,17 +62,17 @@ ) # Files in these paths can use Protobuf::util::JsonStringToMessage -JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc") +JSON_STRING_TO_MESSAGE_ALLOWLIST = ("./source/common/protobuf/utility.cc") # Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing # ones were grandfathered as part of PR #8484 for backwards compatibility. -HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", +HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", "initialization_time_ms", "loop_duration_us", "poll_delay_us", "request_time_ms", "upstream_cx_connect_ms", "upstream_cx_length_ms") # Files in these paths can use std::regex -STD_REGEX_WHITELIST = ( +STD_REGEX_ALLOWLIST = ( "./source/common/common/utility.cc", "./source/common/common/regex.h", "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", @@ -85,7 +85,7 @@ "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") # Only one C++ file should instantiate grpc_init -GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") +GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10") BUILDIFIER_PATH = paths.getBuildifier() @@ -305,49 +305,49 @@ def packageNameForProto(file_path): # To avoid breaking the Lyft import, we just check for path inclusion here. -def whitelistedForProtobufDeps(file_path): +def allowlistedForProtobufDeps(file_path): return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ - any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST)) + any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST)) # Real-world time sources should not be instantiated in the source, except for a few # specific cases. They should be passed down from where they are instantied to where # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. -def whitelistedForRealTime(file_path): +def allowlistedForRealTime(file_path): if file_path.endswith(".md"): return True - return file_path in REAL_TIME_WHITELIST + return file_path in REAL_TIME_ALLOWLIST -def whitelistedForRegisterFactory(file_path): +def allowlistedForRegisterFactory(file_path): if not file_path.startswith("./test/"): return True - return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_WHITELIST) + return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST) -def whitelistedForSerializeAsString(file_path): - return file_path in SERIALIZE_AS_STRING_WHITELIST +def allowlistedForSerializeAsString(file_path): + return file_path in SERIALIZE_AS_STRING_ALLOWLIST -def whitelistedForJsonStringToMessage(file_path): - return file_path in JSON_STRING_TO_MESSAGE_WHITELIST +def allowlistedForJsonStringToMessage(file_path): + return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST -def whitelistedForHistogramSiSuffix(name): - return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST +def allowlistedForHistogramSiSuffix(name): + return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST -def whitelistedForStdRegex(file_path): - return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith( +def allowlistedForStdRegex(file_path): + return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith( DOCS_SUFFIX) -def whitelistedForGrpcInit(file_path): - return file_path in GRPC_INIT_WHITELIST +def allowlistedForGrpcInit(file_path): + return file_path in GRPC_INIT_ALLOWLIST -def whitelistedForUnpackTo(file_path): +def allowlistedForUnpackTo(file_path): return file_path.startswith("./test") or file_path in [ "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" ] @@ -584,7 +584,7 @@ def checkSourceLine(line, file_path, reportError): # Some errors cannot be fixed automatically, and actionable, consistent, # navigable messages should be emitted to make it easy to find and fix # the errors by hand. - if not whitelistedForProtobufDeps(file_path): + if not allowlistedForProtobufDeps(file_path): if '"google/protobuf' in line or "google::protobuf" in line: reportError("unexpected direct dependency on google.protobuf, use " "the definitions in common/protobuf/protobuf.h instead.") @@ -597,17 +597,17 @@ def checkSourceLine(line, file_path, reportError): # We don't check here for std::shared_timed_mutex because that may # legitimately show up in comments, for example this one. reportError("Don't use , use absl::Mutex for reader/writer locks.") - if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: + if not allowlistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: if "RealTimeSource" in line or \ ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") - if not whitelistedForRegisterFactory(file_path): + if not allowlistedForRegisterFactory(file_path): if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line: reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, " "use Registry::InjectFactory instead.") - if not whitelistedForUnpackTo(file_path): + if not allowlistedForUnpackTo(file_path): if "UnpackTo" in line: reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") # Check that we use the absl::Time library @@ -653,13 +653,13 @@ def checkSourceLine(line, file_path, reportError): # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins # with a lowercase letter. reportError("Test names should be CamelCase, starting with a capital letter") - if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line: + if not allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line: # The MessageLite::SerializeAsString doesn't generate deterministic serialization, # use MessageUtil::hash instead. reportError( "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." ) - if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: + if not allowlistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing # behavior. reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") @@ -675,16 +675,16 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use mangled Protobuf names for enum constants") hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line) - if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)): + if hist_m and not allowlistedForHistogramSiSuffix(hist_m.group(0)): reportError( "Don't suffix histogram names with the unit symbol, " "it's already part of the histogram object and unit-supporting sinks can use this information natively, " "other sinks can add the suffix automatically on flush should they prefer to do so.") - if not whitelistedForStdRegex(file_path) and "std::regex" in line: + if not allowlistedForStdRegex(file_path) and "std::regex" in line: reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") - if not whitelistedForGrpcInit(file_path): + if not allowlistedForGrpcInit(file_path): grpc_init_or_shutdown = line.find("grpc_init()") grpc_shutdown = line.find("grpc_shutdown()") if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and @@ -701,7 +701,7 @@ def checkBuildLine(line, file_path, reportError): if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/") or "python/runfiles" in line): reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") - if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line: + if not allowlistedForProtobufDeps(file_path) and '"protobuf"' in line: reportError("unexpected direct external dependency on protobuf, use " "//source/common/protobuf instead.") if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 57a305afa4f6..092c86d6bca7 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -30,7 +30,7 @@ from google.protobuf import text_format # Note: we have to include those proto definitions to make FormatOptions work, -# this also serves as whitelist of extended options. +# this also serves as allowlist of extended options. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ from envoy.annotations import deprecation_pb2 as _ diff --git a/tools/spelling/check_spelling.sh b/tools/spelling/check_spelling.sh index df43aadcecf7..f6a7eea839c0 100755 --- a/tools/spelling/check_spelling.sh +++ b/tools/spelling/check_spelling.sh @@ -67,16 +67,16 @@ if [[ ! ${ACTUAL_SHA} == ${EXPECT_SHA} ]]; then fi chmod +x "${TMP_DIR}/misspell" - + # Spell checking # All the skipping files are defined in tools/spelling/spelling_skip_files.txt SPELLING_SKIP_FILES="${ROOTDIR}/tools/spelling/spelling_skip_files.txt" -# All the ignore words are defined in tools/spelling/spelling_whitelist_words.txt -SPELLING_WHITELIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_whitelist_words.txt" +# All the ignore words are defined in tools/spelling/spelling_allowlist_words.txt +SPELLING_ALLOWLIST_WORDS_FILE="${ROOTDIR}/tools/spelling/spelling_allowlist_words.txt" -WHITELIST_WORDS=$(echo -n $(cat "${SPELLING_WHITELIST_WORDS_FILE}" | \ +ALLOWLIST_WORDS=$(echo -n $(cat "${SPELLING_ALLOWLIST_WORDS_FILE}" | \ grep -v "^#"|grep -v "^$") | tr ' ' ',') SKIP_FILES=$(echo $(cat "${SPELLING_SKIP_FILES}") | sed "s| | -e |g") git ls-files | grep -v -e ${SKIP_FILES} | xargs "${TMP_DIR}/misspell" -i \ - "${WHITELIST_WORDS}" ${MISSPELL_ARGS} + "${ALLOWLIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/spelling/spelling_whitelist_words.txt b/tools/spelling/spelling_allowlist_words.txt similarity index 100% rename from tools/spelling/spelling_whitelist_words.txt rename to tools/spelling/spelling_allowlist_words.txt diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 4f485c29bf46..a4483164e7e2 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -1156,8 +1156,6 @@ vptr wakeup wakeups websocket -whitelist -whitelisted whitespace whitespaces wildcard From e71ce8a59c695015c7c46b5e928afd31716caadb Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 25 Jun 2020 13:39:13 -0400 Subject: [PATCH 445/909] conn_pool: refactoring out a TCP-friendly base class (#11689) Taking the connection pool shared between HTTP/1.1 and HTTP/2 and refactoring it into a TCP friendly base class and shared-HTTP-versions subclass. I'd tried handling HTTP and TCP differences via templates, but that failed because the active client and pool both needed to be templates and both reference the innards of the others, so inheritance it is! Risk Level: Medium (ideally no-op, but largeish refactor) Testing: n/a Docs Changes: n/a Release Notes: n/a Part of #11528 Signed-off-by: Alyssa Wilk --- include/envoy/common/conn_pool.h | 6 +- include/envoy/http/conn_pool.h | 2 +- include/envoy/tcp/conn_pool.h | 2 +- source/common/http/conn_pool_base.cc | 208 ++++++++++++----------- source/common/http/conn_pool_base.h | 170 +++++++++++++----- source/common/http/http1/conn_pool.cc | 8 +- source/common/http/http1/conn_pool.h | 5 +- source/common/http/http2/conn_pool.cc | 10 +- source/common/http/http2/conn_pool.h | 5 +- test/common/http/http1/conn_pool_test.cc | 2 +- test/common/http/http2/conn_pool_test.cc | 2 +- 11 files changed, 257 insertions(+), 163 deletions(-) diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index 7c22f3a43c63..9e42b4025538 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -36,12 +36,12 @@ class Cancellable { virtual void cancel(CancelPolicy cancel_policy) PURE; }; -/* +/** * An instance of a generic connection pool. */ -class Instance : public Event::DeferredDeletable { +class Instance { public: - ~Instance() override = default; + virtual ~Instance() = default; /** * Called when a connection pool has been drained of pending requests, busy connections, and diff --git a/include/envoy/http/conn_pool.h b/include/envoy/http/conn_pool.h index ef0d1a4e98d7..753956243524 100644 --- a/include/envoy/http/conn_pool.h +++ b/include/envoy/http/conn_pool.h @@ -48,7 +48,7 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Envoy::ConnectionPool::Instance { +class Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable { public: ~Instance() override = default; diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h index e5679abb25ec..14dd5677907f 100644 --- a/include/envoy/tcp/conn_pool.h +++ b/include/envoy/tcp/conn_pool.h @@ -126,7 +126,7 @@ class Callbacks { /** * An instance of a generic connection pool. */ -class Instance : public Envoy::ConnectionPool::Instance { +class Instance : public Envoy::ConnectionPool::Instance, public Event::DeferredDeletable { public: /** * Immediately close all existing connection pool connections. This method can be used in cases diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 75213d61104d..432bfb304c64 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -8,47 +8,14 @@ #include "common/upstream/upstream_impl.h" namespace Envoy { -namespace Http { -Network::TransportSocketOptionsSharedPtr -wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options, - Protocol protocol) { - if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http_default_alpn")) { - return transport_socket_options; - } - - // If configured to do so, we override the ALPN to use for the upstream connection to match the - // selected protocol. - std::string alpn; - switch (protocol) { - case Http::Protocol::Http10: - NOT_REACHED_GCOVR_EXCL_LINE; - case Http::Protocol::Http11: - alpn = Http::Utility::AlpnNames::get().Http11; - break; - case Http::Protocol::Http2: - alpn = Http::Utility::AlpnNames::get().Http2; - break; - case Http::Protocol::Http3: - // TODO(snowp): Add once HTTP/3 upstream support is added. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - break; - } - - if (transport_socket_options) { - return std::make_shared( - std::move(alpn), transport_socket_options); - } else { - return std::make_shared( - "", std::vector{}, std::vector{}, std::move(alpn)); - } -} +namespace ConnectionPool { ConnPoolImplBase::ConnPoolImplBase( Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, Protocol protocol) + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), - transport_socket_options_(wrapTransportSocketOptions(transport_socket_options, protocol)) {} + transport_socket_options_(transport_socket_options) {} ConnPoolImplBase::~ConnPoolImplBase() { ASSERT(ready_clients_.empty()); @@ -94,27 +61,26 @@ void ConnPoolImplBase::tryCreateNewConnection() { } } -void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, - ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { - ASSERT(client.state_ == ActiveClient::State::READY); +void ConnPoolImplBase::attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, + void* context) { + ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { ENVOY_LOG(debug, "max requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); + onPoolFailure(client.real_host_description_, absl::string_view(), + ConnectionPool::PoolFailureReason::Overflow, context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); } else { ENVOY_CONN_LOG(debug, "creating stream", client); - RequestEncoder& new_encoder = client.newStreamEncoder(response_decoder); client.remaining_requests_--; if (client.remaining_requests_ == 0) { ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", client); host_->cluster().stats().upstream_cx_max_requests_.inc(); - transitionActiveClientState(client, ActiveClient::State::DRAINING); - } else if (client.numActiveRequests() >= client.concurrent_request_limit_) { - transitionActiveClientState(client, ActiveClient::State::BUSY); + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); + } else if (client.numActiveRequests() + 1 >= client.concurrent_request_limit_) { + // As soon as the new request is created, the client will be maxed out. + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); } num_active_requests_++; @@ -123,12 +89,13 @@ void ConnPoolImplBase::attachRequestToClient(ActiveClient& client, host_->cluster().stats().upstream_rq_total_.inc(); host_->cluster().stats().upstream_rq_active_.inc(); host_->cluster().resourceManager(priority_).requests().inc(); - callbacks.onPoolReady(new_encoder, client.real_host_description_, - client.codec_client_->streamInfo()); + + onPoolReady(client, context); } } -void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attaching_request) { +void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, + bool delay_attaching_request) { ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); ASSERT(num_active_requests_ > 0); num_active_requests_--; @@ -137,7 +104,7 @@ void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attachin host_->cluster().resourceManager(priority_).requests().dec(); if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { // Close out the draining client if we no longer have active requests. - client.codec_client_->close(); + client.close(); } else if (client.state_ == ActiveClient::State::BUSY) { // A request was just ended, so we should be below the limit now. ASSERT(client.numActiveRequests() < client.concurrent_request_limit_); @@ -149,17 +116,16 @@ void ConnPoolImplBase::onRequestClosed(ActiveClient& client, bool delay_attachin } } -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) { +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(void* context) { if (!ready_clients_.empty()) { ActiveClient& client = *ready_clients_.front(); ENVOY_CONN_LOG(debug, "using existing connection", client); - attachRequestToClient(client, response_decoder, callbacks); + attachRequestToClientImpl(client, context); return nullptr; } if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ConnectionPool::Cancellable* pending = newPendingRequest(response_decoder, callbacks); + ConnectionPool::Cancellable* pending = newPendingRequest(context); // This must come after newPendingRequest() because this function uses the // length of pending_requests_ to determine if a new connection is needed. @@ -168,8 +134,8 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(ResponseDecoder& respon return pending; } else { ENVOY_LOG(debug, "max pending requests overflow"); - callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, absl::string_view(), - nullptr); + onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, + context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); return nullptr; } @@ -180,16 +146,11 @@ void ConnPoolImplBase::onUpstreamReady() { ActiveClientPtr& client = ready_clients_.front(); ENVOY_CONN_LOG(debug, "attaching to next request", *client); // Pending requests are pushed onto the front, so pull from the back. - attachRequestToClient(*client, pending_requests_.back()->decoder_, - pending_requests_.back()->callbacks_); + attachRequestToClient(*client, *pending_requests_.back()); pending_requests_.pop_back(); } } -bool ConnPoolImplBase::hasActiveConnections() const { - return (!pending_requests_.empty() || (num_active_requests_ > 0)); -} - std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { switch (state) { case ActiveClient::State::CONNECTING: @@ -222,7 +183,7 @@ void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, } } -void ConnPoolImplBase::addDrainedCallback(DrainedCb cb) { +void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { drained_callbacks_.push_back(cb); checkForDrained(); } @@ -232,7 +193,7 @@ void ConnPoolImplBase::closeIdleConnections() { std::list to_close; for (auto& client : ready_clients_) { - if (!client->hasActiveRequests()) { + if (client->numActiveRequests() == 0) { to_close.push_back(client.get()); } } @@ -248,7 +209,7 @@ void ConnPoolImplBase::closeIdleConnections() { } } -void ConnPoolImplBase::drainConnections() { +void ConnPoolImplBase::drainConnectionsImpl() { closeIdleConnections(); // closeIdleConnections() closes all connections in ready_clients_ with no active requests, @@ -276,7 +237,7 @@ void ConnPoolImplBase::checkForDrained() { if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty()) { ENVOY_LOG(debug, "invoking drained callbacks"); - for (const DrainedCb& cb : drained_callbacks_) { + for (const Instance::DrainedCb& cb : drained_callbacks_) { cb(); } } @@ -319,8 +280,7 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view // do with the request. // NOTE: We move the existing pending requests to a temporary list. This is done so that // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, - client.codec_client_->connectionFailureReason(), reason); + purgePendingRequests(client.real_host_description_, failure_reason, reason); } // We need to release our resourceManager() resources before checking below for @@ -357,9 +317,7 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view } } -PendingRequest::PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) - : parent_(parent), decoder_(decoder), callbacks_(callbacks) { +PendingRequest::PendingRequest(ConnPoolImplBase& parent) : parent_(parent) { parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); @@ -374,15 +332,6 @@ void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { parent_.onPendingRequestCancel(*this, policy); } -ConnectionPool::Cancellable* -ConnPoolImplBase::newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks) { - ENVOY_LOG(debug, "queueing request due to no available connections"); - PendingRequestPtr pending_request(new PendingRequest(*this, decoder, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); -} - void ConnPoolImplBase::purgePendingRequests( const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { @@ -393,7 +342,7 @@ void ConnPoolImplBase::purgePendingRequests( PendingRequestPtr request = pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - request->callbacks_.onPoolFailure(reason, failure_reason, host_description); + onPoolFailure(host_description, failure_reason, reason, request->context()); } } @@ -438,12 +387,6 @@ ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_l : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); - real_host_description_ = data.host_description_; - codec_client_ = parent_.createCodecClient(data); - codec_client_->addConnectionCallbacks(*this); - conn_connect_ms_ = std::make_unique( parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); conn_length_ = std::make_unique( @@ -455,17 +398,14 @@ ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_l parent_.host_->cluster().stats().upstream_cx_total_.inc(); parent_.host_->cluster().stats().upstream_cx_active_.inc(); parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); - - codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); } ActiveClient::~ActiveClient() { releaseResources(); } +void ActiveClient::onEvent(Network::ConnectionEvent event) { + parent_.onConnectionEvent(*this, "", event); +} + void ActiveClient::releaseResources() { if (!resources_released_) { resources_released_ = true; @@ -479,14 +419,88 @@ void ActiveClient::releaseResources() { } void ActiveClient::onConnectTimeout() { - ENVOY_CONN_LOG(debug, "connect timeout", *codec_client_); + ENVOY_CONN_LOG(debug, "connect timeout", *this); parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); timed_out_ = true; close(); } -void ActiveClient::onEvent(Network::ConnectionEvent event) { - parent_.onConnectionEvent(*this, codec_client_->connectionFailureReason(), event); +} // namespace ConnectionPool + +namespace Http { + +Network::TransportSocketOptionsSharedPtr +wrapTransportSocketOptions(Network::TransportSocketOptionsSharedPtr transport_socket_options, + Protocol protocol) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http_default_alpn")) { + return transport_socket_options; + } + + // If configured to do so, we override the ALPN to use for the upstream connection to match the + // selected protocol. + std::string alpn; + switch (protocol) { + case Http::Protocol::Http10: + NOT_REACHED_GCOVR_EXCL_LINE; + case Http::Protocol::Http11: + alpn = Http::Utility::AlpnNames::get().Http11; + break; + case Http::Protocol::Http2: + alpn = Http::Utility::AlpnNames::get().Http2; + break; + case Http::Protocol::Http3: + // TODO(snowp): Add once HTTP/3 upstream support is added. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + break; + } + + if (transport_socket_options) { + return std::make_shared( + std::move(alpn), transport_socket_options); + } else { + return std::make_shared( + "", std::vector{}, std::vector{}, std::move(alpn)); + } +} + +HttpConnPoolImplBase::HttpConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Http::Protocol protocol) + : Envoy::ConnectionPool::ConnPoolImplBase( + host, priority, dispatcher, options, + wrapTransportSocketOptions(transport_socket_options, protocol)) {} + +ConnectionPool::Cancellable* +HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, + Http::ConnectionPool::Callbacks& callbacks) { + AttachContext context = std::make_pair(&response_decoder, &callbacks); + return Envoy::ConnectionPool::ConnPoolImplBase::newStream(reinterpret_cast(&context)); +} + +bool HttpConnPoolImplBase::hasActiveConnections() const { + return (!pending_requests_.empty() || (num_active_requests_ > 0)); +} + +ConnectionPool::Cancellable* HttpConnPoolImplBase::newPendingRequest(void* context) { + Http::ResponseDecoder& decoder = *reinterpret_cast(context)->first; + Http::ConnectionPool::Callbacks& callbacks = *reinterpret_cast(context)->second; + ENVOY_LOG(debug, "queueing request due to no available connections"); + Envoy::ConnectionPool::PendingRequestPtr pending_request( + new HttpPendingRequest(*this, decoder, callbacks)); + pending_request->moveIntoList(std::move(pending_request), pending_requests_); + return pending_requests_.front().get(); +} + +void HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client, void* context) { + ActiveClient* http_client = reinterpret_cast(&client); + auto* pair = reinterpret_cast(context); + Http::ResponseDecoder& response_decoder = *pair->first; + Http::ConnectionPool::Callbacks& callbacks = *pair->second; + Http::RequestEncoder& new_encoder = http_client->newStreamEncoder(response_decoder); + callbacks.onPoolReady(new_encoder, client.real_host_description_, + http_client->codec_client_->streamInfo()); } } // namespace Http diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 3d74f0989182..715511255c00 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -11,7 +11,9 @@ #include "absl/strings/string_view.h" namespace Envoy { -namespace Http { +// TODO(alyssawilk) move all the code in this namespace to // source/common/conn_pool/ in +// a follow up, in the hopes git will preserve history. +namespace ConnectionPool { class ConnPoolImplBase; @@ -33,6 +35,7 @@ class ActiveClient : public LinkedObject, void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} + // Called if the connection does not complete within the cluster's connectTimeout() void onConnectTimeout(); // Returns the concurrent request limit, accounting for if the total request limit @@ -41,12 +44,14 @@ class ActiveClient : public LinkedObject, return std::min(remaining_requests_, concurrent_request_limit_); } - void close() { codec_client_->close(); } - uint64_t id() const { return codec_client_->id(); } - virtual bool hasActiveRequests() const PURE; + // Closes the underlying connection. + virtual void close() PURE; + // Returns the ID of the underlying connection. + virtual uint64_t id() const PURE; + // Returns true if this closed with an incomplete request, for stats tracking/ purposes. virtual bool closingWithIncompleteRequest() const PURE; - virtual size_t numActiveRequests() const { return codec_client_->numActiveRequests(); } - virtual RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) PURE; + // Returns the number of active requests on this connection. + virtual size_t numActiveRequests() const PURE; enum class State { CONNECTING, // Connection is not yet established. @@ -61,7 +66,6 @@ class ActiveClient : public LinkedObject, uint64_t remaining_requests_; const uint64_t concurrent_request_limit_; State state_{State::CONNECTING}; - CodecClientPtr codec_client_; Upstream::HostDescriptionConstSharedPtr real_host_description_; Stats::TimespanPtr conn_connect_ms_; Stats::TimespanPtr conn_length_; @@ -70,43 +74,37 @@ class ActiveClient : public LinkedObject, bool timed_out_{false}; }; -using ActiveClientPtr = std::unique_ptr; - +// PendingRequest is the base class for a connection which has been created but not yet established. class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { public: - PendingRequest(ConnPoolImplBase& parent, ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); + PendingRequest(ConnPoolImplBase& parent); ~PendingRequest() override; // ConnectionPool::Cancellable void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; + // The context here returns a pointer to whatever context is provided with newStream(), + // which will be passed back to the parent in onPoolReady or onPoolFailure. + virtual void* context() PURE; + ConnPoolImplBase& parent_; - ResponseDecoder& decoder_; - ConnectionPool::Callbacks& callbacks_; }; using PendingRequestPtr = std::unique_ptr; +using ActiveClientPtr = std::unique_ptr; + // Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : public ConnectionPool::Instance, - protected Logger::Loggable { +class ConnPoolImplBase : protected Logger::Loggable { public: - // ConnectionPool::Instance - ConnectionPool::Cancellable* newStream(ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks) override; - void addDrainedCallback(DrainedCb cb) override; - bool hasActiveConnections() const override; - void drainConnections() override; - Upstream::HostDescriptionConstSharedPtr host() const override { return host_; }; - -protected: ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options, - Protocol protocol); - ~ConnPoolImplBase() override; + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + virtual ~ConnPoolImplBase(); + + void addDrainedCallbackImpl(Instance::DrainedCb cb); + void drainConnectionsImpl(); // Closes and destroys all connections. This must be called in the destructor of // derived classes because the derived ActiveClient will downcast parent_ to a more @@ -114,17 +112,12 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // (due to bottom-up destructor ordering in c++) that access will be invalid. void destructAllConnections(); - virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; - // Returns a new instance of ActiveClient. virtual ActiveClientPtr instantiateActiveClient() PURE; // Gets a pointer to the list that currently owns this client. std::list& owningList(ActiveClient::State state); - // Creates a new PendingRequest and enqueues it into the request queue. - ConnectionPool::Cancellable* newPendingRequest(ResponseDecoder& decoder, - ConnectionPool::Callbacks& callbacks); // Removes the PendingRequest from the list of requests. Called when the PendingRequest is // cancelled, e.g. when the stream is reset before a connection has been established. void onPendingRequestCancel(PendingRequest& request, Envoy::ConnectionPool::CancelPolicy policy); @@ -137,9 +130,6 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // Closes any idle connections. void closeIdleConnections(); - // Called by derived classes any time a request is completed or destroyed for any reason. - void onRequestClosed(ActiveClient& client, bool delay_attaching_request); - // Changes the state_ of an ActiveClient and moves to the appropriate list. void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); @@ -147,26 +137,34 @@ class ConnPoolImplBase : public ConnectionPool::Instance, Network::ConnectionEvent event); void checkForDrained(); void onUpstreamReady(); - void attachRequestToClient(ActiveClient& client, ResponseDecoder& response_decoder, - ConnectionPool::Callbacks& callbacks); + ConnectionPool::Cancellable* newStream(void* context); + + virtual ConnectionPool::Cancellable* newPendingRequest(void* context) PURE; // Creates a new connection if allowed by resourceManager, or if created to avoid // starving this pool. void tryCreateNewConnection(); -public: + virtual void attachRequestToClient(ActiveClient& client, PendingRequest& request) PURE; + void attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, void* pair); + virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, + ConnectionPool::PoolFailureReason pool_failure_reason, + void* context) PURE; + virtual void onPoolReady(ActiveClient& client, void* context) PURE; + // Called by derived classes any time a request is completed or destroyed for any reason. + void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); + const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; -protected: friend class ActiveClient; friend class PendingRequest; - Event::Dispatcher& dispatcher_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; - std::list drained_callbacks_; + std::list drained_callbacks_; std::list pending_requests_; // When calling purgePendingRequests, this list will be used to hold the requests we are about @@ -190,5 +188,95 @@ class ConnPoolImplBase : public ConnectionPool::Instance, // if all CONNECTING connections become connected. uint64_t connecting_request_capacity_{0}; }; +} // namespace ConnectionPool + +namespace Http { + +// An implementation of Envoy::ConnectionPool::PendingRequest for HTTP/1.1 and HTTP/2 +class HttpPendingRequest : public Envoy::ConnectionPool::PendingRequest { +public: + // OnPoolSuccess for HTTP requires both the decoder and callbacks. OnPoolFailure + // requires only the callbacks, but passes both for consistency. + using AttachContext = std::pair; + HttpPendingRequest(Envoy::ConnectionPool::ConnPoolImplBase& parent, + Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + : Envoy::ConnectionPool::PendingRequest(parent), + context_(std::make_pair(&decoder, &callbacks)) {} + + void* context() override { return static_cast(&context_); } + AttachContext context_; +}; + +// An implementation of Envoy::ConnectionPool::ConnPoolImplBase for shared code +// between HTTP/1.1 and HTTP/2 +class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, + public Http::ConnectionPool::Instance { +public: + using AttachContext = std::pair; + + HttpConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options, + Http::Protocol protocol); + + // ConnectionPool::Instance + void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + void drainConnections() override { drainConnectionsImpl(); } + Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } + ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, + Http::ConnectionPool::Callbacks& callbacks) override; + bool hasActiveConnections() const override; + + void attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::PendingRequest& base_request) override { + HttpPendingRequest* request = reinterpret_cast(&base_request); + attachRequestToClientImpl(client, static_cast(&request->context_)); + } + + // Creates a new PendingRequest and enqueues it into the request queue. + ConnectionPool::Cancellable* newPendingRequest(void* context) override; + void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason, + void* context) override { + auto* callbacks = reinterpret_cast(context)->second; + callbacks->onPoolFailure(reason, failure_reason, host_description); + } + void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, void* context) override; + + virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; +}; + +// An implementation of Envoy::ConnectionPool::ActiveClient for HTTP/1.1 and HTTP/2 +class ActiveClient : public Envoy::ConnectionPool::ActiveClient { +public: + ActiveClient(HttpConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) + : Envoy::ConnectionPool::ActiveClient(parent, lifetime_request_limit, + concurrent_request_limit) { + Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( + parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + real_host_description_ = data.host_description_; + codec_client_ = parent.createCodecClient(data); + codec_client_->addConnectionCallbacks(*this); + codec_client_->setConnectionStats( + {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host_->cluster().stats().bind_errors_, nullptr}); + } + void close() override { codec_client_->close(); } + virtual Http::RequestEncoder& newStreamEncoder(Http::ResponseDecoder& response_decoder) PURE; + void onEvent(Network::ConnectionEvent event) override { + parent_.onConnectionEvent(*this, codec_client_->connectionFailureReason(), event); + } + size_t numActiveRequests() const override { return codec_client_->numActiveRequests(); } + uint64_t id() const override { return codec_client_->id(); } + + Http::CodecClientPtr codec_client_; +}; + } // namespace Http + } // namespace Envoy diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 050dcb4ce3cd..f7a6453e2b28 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -27,8 +27,8 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options, Protocol::Http11), + : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options, Protocol::Http11), upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { upstream_ready_enabled_ = false; onUpstreamReady(); @@ -36,7 +36,7 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +Envoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } @@ -120,8 +120,6 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent.host_->cluster().stats().upstream_cx_http1_total_.inc(); } -bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { return stream_wrapper_ != nullptr; } - bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { return (stream_wrapper_ != nullptr) && (!stream_wrapper_->decode_complete_); } diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index e8ddc883525b..c7538705a6ff 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -17,7 +17,7 @@ namespace Http1 { * address. Higher layer code should handle resolving DNS on error and creating a new pool * bound to a different IP address. */ -class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { +class ConnPoolImpl : public Http::HttpConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -30,7 +30,7 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http11; } // ConnPoolImplBase - ActiveClientPtr instantiateActiveClient() override; + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override; protected: class ActiveClient; @@ -71,7 +71,6 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImplBase::ActiveClient - bool hasActiveRequests() const override; bool closingWithIncompleteRequest() const override; RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override; diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 01dd1583b46e..cde47b8498f2 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -16,12 +16,12 @@ ConnPoolImpl::ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSha Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : ConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, - transport_socket_options, Protocol::Http2) {} + : HttpConnPoolImplBase(std::move(host), std::move(priority), dispatcher, options, + transport_socket_options, Protocol::Http2) {} ConnPoolImpl::~ConnPoolImpl() { destructAllConnections(); } -ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { +Envoy::ConnectionPool::ActiveClientPtr ConnPoolImpl::instantiateActiveClient() { return std::make_unique(*this); } void ConnPoolImpl::onGoAway(ActiveClient& client, Http::GoAwayErrorCode) { @@ -74,10 +74,6 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent.host_->cluster().stats().upstream_cx_http2_total_.inc(); } -bool ConnPoolImpl::ActiveClient::hasActiveRequests() const { - return codec_client_->numActiveRequests() > 0; -} - bool ConnPoolImpl::ActiveClient::closingWithIncompleteRequest() const { return closed_with_active_rq_; } diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 8e6e852a609a..9ae8a78834e8 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -16,7 +16,7 @@ namespace Http2 { * shifting to a new connection if we reach max streams on the primary. This is a base class * used for both the prod implementation as well as the testing one. */ -class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { +class ConnPoolImpl : public Envoy::Http::HttpConnPoolImplBase { public: ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -29,7 +29,7 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { Http::Protocol protocol() const override { return Http::Protocol::Http2; } // ConnPoolImplBase - ActiveClientPtr instantiateActiveClient() override; + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override; protected: class ActiveClient : public CodecClientCallbacks, @@ -42,7 +42,6 @@ class ConnPoolImpl : public Envoy::Http::ConnPoolImplBase { ConnPoolImpl& parent() { return static_cast(parent_); } // ConnPoolImpl::ActiveClient - bool hasActiveRequests() const override; bool closingWithIncompleteRequest() const override; RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override; diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 9b2bdc3f9e7e..91e692527928 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -103,10 +103,10 @@ class ConnPoolImplForTest : public ConnPoolImpl { } }, Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); EXPECT_CALL(*this, createCodecClient_()).WillOnce(Return(test_client.codec_client_)); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); ON_CALL(*test_client.codec_, protocol()).WillByDefault(Return(protocol)); } diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index fa8df9101d85..c3fab0eeedab 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -79,6 +79,7 @@ class Http2ConnPoolImplTest : public testing::Test { test_client.codec_ = new NiceMock(); test_client.connect_timer_ = new NiceMock(&dispatcher_); test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); return test_client; } @@ -101,7 +102,6 @@ class Http2ConnPoolImplTest : public testing::Test { .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { return test_clients_.back().codec_client_; })); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); } // Creates a new test client, expecting a new connection to be created and associated From 5330d15b6b37af3d3ff39cd45ec710fbe7dc6d6d Mon Sep 17 00:00:00 2001 From: Lisa Lu Date: Thu, 25 Jun 2020 10:48:53 -0700 Subject: [PATCH 446/909] regex: don't enforce max_program_size when field isn't present in config. (#11187) max_program_size was deprecated, but when the field isn't specified, a default max program size of 100 is still enforced. This change makes it such that when the field isn't present, no max program size check is enforced. Signed-off-by: Lisa Lu --- api/envoy/type/matcher/regex.proto | 10 +++ api/envoy/type/matcher/v3/regex.proto | 10 +++ api/envoy/type/matcher/v4alpha/regex.proto | 10 +++ docs/root/version_history/current.rst | 3 +- .../envoy/type/matcher/regex.proto | 10 +++ .../envoy/type/matcher/v3/regex.proto | 10 +++ .../envoy/type/matcher/v4alpha/regex.proto | 10 +++ include/envoy/runtime/BUILD | 1 + include/envoy/runtime/runtime.h | 6 ++ source/common/common/BUILD | 1 + source/common/common/regex.cc | 58 ++++++++++-- source/common/runtime/runtime_impl.cc | 4 +- source/common/runtime/runtime_impl.h | 2 + test/common/common/BUILD | 2 + test/common/common/regex_test.cc | 88 ++++++++++++++++++- test/mocks/runtime/BUILD | 1 + test/mocks/runtime/mocks.cc | 5 +- test/mocks/runtime/mocks.h | 4 + 18 files changed, 224 insertions(+), 11 deletions(-) diff --git a/api/envoy/type/matcher/regex.proto b/api/envoy/type/matcher/regex.proto index 9e41637ab70c..b23c0bff3075 100644 --- a/api/envoy/type/matcher/regex.proto +++ b/api/envoy/type/matcher/regex.proto @@ -19,6 +19,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured diff --git a/api/envoy/type/matcher/v3/regex.proto b/api/envoy/type/matcher/v3/regex.proto index 5a7922ec6f62..6087c6f90fad 100644 --- a/api/envoy/type/matcher/v3/regex.proto +++ b/api/envoy/type/matcher/v3/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher.GoogleRE2"; diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto index bfd8c3dd3b4f..087c5e3f7292 100644 --- a/api/envoy/type/matcher/v4alpha/regex.proto +++ b/api/envoy/type/matcher/v4alpha/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b52e89366440..8d133b922f81 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -103,6 +103,7 @@ New Features * network filters: added a :ref:`rocketmq proxy filter `. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. +* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. @@ -128,7 +129,7 @@ Deprecated :ref:`Compressor `. * The * :ref:`GoogleRE2.max_program_size` field is now deprecated. Management servers are expected to validate regexp program sizes - instead of expecting the client to do it. + instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. * The :ref:`internal_redirect_action ` field and :ref:`max_internal_redirects ` field are now deprecated. This changes the implemented default cross scheme redirect behavior. diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto index 9e41637ab70c..b23c0bff3075 100644 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/regex.proto @@ -19,6 +19,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto index 5a7922ec6f62..6087c6f90fad 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher.GoogleRE2"; diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto index 3d7d3f029c0e..f64614728733 100644 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto +++ b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto @@ -22,6 +22,16 @@ message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` + // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and + // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, + // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented + // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index eaed57026a42..5fee4e86610e 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["runtime.h"], external_deps = ["abseil_optional"], deps = [ + "//include/envoy/stats:stats_interface", "//source/common/common:assert_lib", "//source/common/singleton:threadsafe_singleton", "@envoy_api//envoy/type/v3:pkg_cc_proto", diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 89f904ffac08..57df07919119 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -9,6 +9,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/stats/store.h" #include "envoy/type/v3/percent.pb.h" #include "common/common/assert.h" @@ -301,6 +302,11 @@ class Loader { * have either received and applied their responses or timed out. */ virtual void startRtdsSubscriptions(ReadyCallback on_done) PURE; + + /** + * @return Stats::Scope& the root scope. + */ + virtual Stats::Scope& getRootScope() PURE; }; using LoaderPtr = std::unique_ptr; diff --git a/source/common/common/BUILD b/source/common/common/BUILD index f9942fbc3223..76081a5b7b08 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -239,6 +239,7 @@ envoy_cc_library( ":assert_lib", "//include/envoy/common:regex_interface", "//source/common/protobuf:utility_lib", + "//source/common/stats:symbol_table_lib", "@com_googlesource_code_re2//:re2", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], diff --git a/source/common/common/regex.cc b/source/common/common/regex.cc index 5f7faa7d728b..735a24d25b8d 100644 --- a/source/common/common/regex.cc +++ b/source/common/common/regex.cc @@ -1,11 +1,13 @@ #include "common/common/regex.h" #include "envoy/common/exception.h" +#include "envoy/runtime/runtime.h" #include "envoy/type/matcher/v3/regex.pb.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/protobuf/utility.h" +#include "common/stats/symbol_table_impl.h" #include "re2/re2.h" @@ -47,12 +49,56 @@ class CompiledGoogleReMatcher : public CompiledMatcher { throw EnvoyException(regex_.error()); } - const uint32_t max_program_size = - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); - if (static_cast(regex_.ProgramSize()) > max_program_size) { - throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " - "{}. Increase configured max program size if necessary.", - config.regex(), regex_.ProgramSize(), max_program_size)); + const uint32_t regex_program_size = static_cast(regex_.ProgramSize()); + + // Check if the deprecated field max_program_size is set first, and follow the old logic if so. + if (config.google_re2().has_max_program_size()) { + const uint32_t max_program_size = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); + if (regex_program_size > max_program_size) { + throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " + "{}. Increase configured max program size if necessary.", + config.regex(), regex_program_size, max_program_size)); + } + return; + } + + Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); + if (runtime) { + Stats::Scope& root_scope = runtime->getRootScope(); + + // TODO(perf): It would be more efficient to create the stats (program size histogram, warning + // counter) on startup and not with each regex match. + Stats::StatNameManagedStorage program_size_stat_name("re2.program_size", + root_scope.symbolTable()); + Stats::Histogram& program_size_stat = root_scope.histogramFromStatName( + program_size_stat_name.statName(), Stats::Histogram::Unit::Unspecified); + program_size_stat.recordValue(regex_program_size); + + Stats::StatNameManagedStorage warn_count_stat_name("re2.exceeded_warn_level", + root_scope.symbolTable()); + Stats::Counter& warn_count = root_scope.counterFromStatName(warn_count_stat_name.statName()); + + const uint32_t max_program_size_error_level = + runtime->snapshot().getInteger("re2.max_program_size.error_level", 100); + if (regex_program_size > max_program_size_error_level) { + throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " + "{} set for the error level threshold. Increase " + "configured max program size if necessary.", + config.regex(), regex_program_size, + max_program_size_error_level)); + } + + const uint32_t max_program_size_warn_level = + runtime->snapshot().getInteger("re2.max_program_size.warn_level", UINT32_MAX); + if (regex_program_size > max_program_size_warn_level) { + warn_count.inc(); + ENVOY_LOG_MISC( + warn, + "regex '{}' RE2 program size of {} > max program size of {} set for the warn " + "level threshold. Increase configured max program size if necessary.", + config.regex(), regex_program_size, max_program_size_warn_level); + } } } diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 7d95b940344c..49eae3c9c641 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -477,7 +477,7 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), config_(config), service_cluster_(local_info.clusterName()), api_(api), - init_watcher_("RDTS", [this]() { onRdtsReady(); }) { + init_watcher_("RDTS", [this]() { onRdtsReady(); }), store_(store) { std::unordered_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); @@ -623,6 +623,8 @@ void LoaderImpl::mergeValues(const std::unordered_map& loadNewSnapshot(); } +Stats::Scope& LoaderImpl::getRootScope() { return store_; } + RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { std::string prefix = "runtime."; RuntimeStats stats{ diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 5bec747b93f7..f744261c5c27 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -257,6 +257,7 @@ class LoaderImpl : public Loader, Logger::Loggable { SnapshotConstSharedPtr threadsafeSnapshot() override; void mergeValues(const std::unordered_map& values) override; void startRtdsSubscriptions(ReadyCallback on_done) override; + Stats::Scope& getRootScope() override; private: friend RtdsSubscription; @@ -281,6 +282,7 @@ class LoaderImpl : public Loader, Logger::Loggable { Init::ManagerImpl init_manager_{"RTDS"}; std::vector subscriptions_; Upstream::ClusterManager* cm_{}; + Stats::Store& store_; absl::Mutex snapshot_mutex_; SnapshotConstSharedPtr thread_safe_snapshot_ ABSL_GUARDED_BY(snapshot_mutex_); diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 5842443825c6..dbd4473d555b 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -173,6 +173,8 @@ envoy_cc_test( srcs = ["regex_test.cc"], deps = [ "//source/common/common:regex_lib", + "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], diff --git a/test/common/common/regex_test.cc b/test/common/common/regex_test.cc index b5848799acaf..5b1d9bdd4bf4 100644 --- a/test/common/common/regex_test.cc +++ b/test/common/common/regex_test.cc @@ -3,6 +3,8 @@ #include "common/common/regex.h" +#include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -58,11 +60,24 @@ TEST(Utility, ParseRegex) { EXPECT_TRUE(compiled_matcher->match(long_string)); } - // Verify max program size. + // Positive case to ensure no max program size is enforced. { + TestScopedRuntime scoped_runtime; + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + } + + // Verify max program size with the deprecated field codepath plus runtime. + // The deprecated field codepath precedes any runtime settings. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.error_level", "3"}}); envoy::type::matcher::v3::RegexMatcher matcher; - matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1); matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2()->mutable_max_program_size()->set_value(1); #ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException, "RE2 program size of [0-9]+ > max program size of 1\\."); @@ -71,6 +86,75 @@ TEST(Utility, ParseRegex) { "RE2 program size of \\d+ > max program size of 1\\."); #endif } + + // Verify that an exception is thrown for the error level max program size. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.error_level", "1"}}); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); +#ifndef GTEST_USES_SIMPLE_RE + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of [0-9]+ > max program size of 1 set for the error level threshold\\."); +#else + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of \\d+ > max program size of 1 set for the error level threshold\\."); +#endif + } + + // Verify that the error level max program size defaults to 100 if not set by runtime. + { + TestScopedRuntime scoped_runtime; + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex( + "/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*/asdf/.*"); + matcher.mutable_google_re2(); +#ifndef GTEST_USES_SIMPLE_RE + EXPECT_THROW_WITH_REGEX(Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of [0-9]+ > max program size of 100 set for the " + "error level threshold\\."); +#else + EXPECT_THROW_WITH_REGEX( + Utility::parseRegex(matcher), EnvoyException, + "RE2 program size of \\d+ > max program size of 100 set for the error level threshold\\."); +#endif + } + + // Verify that a warning is logged for the warn level max program size. + { + TestScopedRuntime scoped_runtime; + Envoy::Stats::Counter& warn_count = + Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString( + "re2.exceeded_warn_level"); + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"re2.max_program_size.warn_level", "1"}}); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + EXPECT_EQ(1, warn_count.value()); + EXPECT_LOG_CONTAINS("warn", "> max program size of 1 set for the warn level threshold", + Utility::parseRegex(matcher)); + EXPECT_EQ(2, warn_count.value()); + } + + // Verify that no check is performed if the warn level max program size is not set by runtime. + { + TestScopedRuntime scoped_runtime; + Envoy::Stats::Counter& warn_count = + Runtime::LoaderSingleton::getExisting()->getRootScope().counterFromString( + "re2.exceeded_warn_level"); + envoy::type::matcher::v3::RegexMatcher matcher; + matcher.set_regex("/asdf/.*"); + matcher.mutable_google_re2(); + EXPECT_NO_THROW(Utility::parseRegex(matcher)); + EXPECT_LOG_NOT_CONTAINS("warn", "> max program size", Utility::parseRegex(matcher)); + EXPECT_EQ(0, warn_count.value()); + } } } // namespace diff --git a/test/mocks/runtime/BUILD b/test/mocks/runtime/BUILD index f68b4dde2ef7..a6af7e7570fe 100644 --- a/test/mocks/runtime/BUILD +++ b/test/mocks/runtime/BUILD @@ -17,6 +17,7 @@ envoy_cc_mock( "//include/envoy/runtime:runtime_interface", "//include/envoy/upstream:cluster_manager_interface", "//test/mocks:common_lib", + "//test/mocks/stats:stats_mocks", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) diff --git a/test/mocks/runtime/mocks.cc b/test/mocks/runtime/mocks.cc index 9dda39b1087c..2ad66ac70629 100644 --- a/test/mocks/runtime/mocks.cc +++ b/test/mocks/runtime/mocks.cc @@ -23,7 +23,10 @@ MockSnapshot::MockSnapshot() { MockSnapshot::~MockSnapshot() = default; -MockLoader::MockLoader() { ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_)); } +MockLoader::MockLoader() { + ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_)); + ON_CALL(*this, getRootScope()).WillByDefault(ReturnRef(store_)); +} MockLoader::~MockLoader() = default; diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 7e02d94f1397..af0601436493 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -8,6 +8,8 @@ #include "envoy/type/v3/percent.pb.h" #include "envoy/upstream/cluster_manager.h" +#include "test/mocks/stats/mocks.h" + #include "gmock/gmock.h" namespace Envoy { @@ -76,8 +78,10 @@ class MockLoader : public Loader { MOCK_METHOD(SnapshotConstSharedPtr, threadsafeSnapshot, ()); MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); + MOCK_METHOD(Stats::Scope&, getRootScope, ()); testing::NiceMock snapshot_; + testing::NiceMock store_; }; class MockOverrideLayer : public Snapshot::OverrideLayer { From 9f056215bcbfd11cdb954019b2b4f03510b2c24d Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 25 Jun 2020 14:11:03 -0400 Subject: [PATCH 447/909] docs: life of a request. (#11284) This documentation goes deeper than the existing architecture overview, by diving inside Envoy and discussing how internal subsystems work. The target audience are primarily new Envoy developers, but also anyone interested in the low-level detail provided, e.g. Envoy operators doing performance work. The patch also updates the style guide to request that this be kept up-to-date, since reliance on low-level internals means that this description is a moving target. Signed-off-by: Harvey Tuch htuch@google.com Co-authored-by: Alyssa Wilk alyssar@chromium.org Co-authored-by: Matt Klein mklein@lyft.com Co-authored-by: Snow Pettersen kpettersen@netflix.com Co-authored-by: Jose Nino jnino@lyft.com Co-authored-by: Greg Greenway ggreenway@apple.com --- PULL_REQUESTS.md | 3 + STYLE.md | 5 + docs/root/_static/lor-architecture.svg | 1 + docs/root/_static/lor-client.svg | 1 + docs/root/_static/lor-filter-chain-match.svg | 1 + docs/root/_static/lor-http-decode.svg | 1 + docs/root/_static/lor-http-encode.svg | 1 + docs/root/_static/lor-http-filters.svg | 1 + docs/root/_static/lor-http.svg | 1 + docs/root/_static/lor-lb.svg | 1 + docs/root/_static/lor-listener-filters.svg | 1 + docs/root/_static/lor-listeners.svg | 1 + docs/root/_static/lor-network-filters.svg | 1 + docs/root/_static/lor-network-read.svg | 1 + docs/root/_static/lor-network-write.svg | 1 + docs/root/_static/lor-route-config.svg | 1 + docs/root/_static/lor-topology-edge.svg | 1 + docs/root/_static/lor-topology-hybrid.svg | 1 + docs/root/_static/lor-topology-ilb.svg | 1 + .../lor-topology-service-mesh-node.svg | 1 + .../_static/lor-topology-service-mesh.svg | 1 + docs/root/_static/lor-topology-tiered.svg | 1 + docs/root/_static/lor-transport-socket.svg | 1 + .../listener_filters/tls_inspector.rst | 4 +- docs/root/faq/overview.rst | 2 + docs/root/img/envoy-logo.png | Bin 0 -> 49908 bytes .../arch_overview/listeners/listeners.rst | 2 + .../observability/access_logging.rst | 2 + docs/root/intro/intro.rst | 1 + docs/root/intro/life_of_a_request.rst | 647 ++++++++++++++++++ 30 files changed, 685 insertions(+), 2 deletions(-) create mode 100644 docs/root/_static/lor-architecture.svg create mode 100644 docs/root/_static/lor-client.svg create mode 100644 docs/root/_static/lor-filter-chain-match.svg create mode 100644 docs/root/_static/lor-http-decode.svg create mode 100644 docs/root/_static/lor-http-encode.svg create mode 100644 docs/root/_static/lor-http-filters.svg create mode 100644 docs/root/_static/lor-http.svg create mode 100644 docs/root/_static/lor-lb.svg create mode 100644 docs/root/_static/lor-listener-filters.svg create mode 100644 docs/root/_static/lor-listeners.svg create mode 100644 docs/root/_static/lor-network-filters.svg create mode 100644 docs/root/_static/lor-network-read.svg create mode 100644 docs/root/_static/lor-network-write.svg create mode 100644 docs/root/_static/lor-route-config.svg create mode 100644 docs/root/_static/lor-topology-edge.svg create mode 100644 docs/root/_static/lor-topology-hybrid.svg create mode 100644 docs/root/_static/lor-topology-ilb.svg create mode 100644 docs/root/_static/lor-topology-service-mesh-node.svg create mode 100644 docs/root/_static/lor-topology-service-mesh.svg create mode 100644 docs/root/_static/lor-topology-tiered.svg create mode 100644 docs/root/_static/lor-transport-socket.svg create mode 100644 docs/root/img/envoy-logo.png create mode 100644 docs/root/intro/life_of_a_request.rst diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 0293b144b40b..9b3d5cd043ba 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -59,6 +59,9 @@ If there are documentation changes, please include a brief description of what t changes may be in [docs/root](docs/root) and/or inline with the API protos. Please write in N/A if there were no documentation changes. +Any PRs with structural changes to the dataplane should also update the [Life of a +Request](docs/root/intro/life_of_a_request.md) documentation as appropriate. + ### Release notes If this change is user impacting OR extension developer impacting (filter API, etc.) you **must** diff --git a/STYLE.md b/STYLE.md index 2c5b6d2e785f..7965f90f7236 100644 --- a/STYLE.md +++ b/STYLE.md @@ -16,6 +16,11 @@ * Please see [REPO_LAYOUT.md](REPO_LAYOUT.md). +# Documentation + +* If you are modifying the data plane structually, please keep the [Life of a + Request](docs/root/intro/life_of_a_request.md) documentation up-to-date. + # Deviations from Google C++ style guidelines * Exceptions are allowed and encouraged where appropriate. When using exceptions, do not add diff --git a/docs/root/_static/lor-architecture.svg b/docs/root/_static/lor-architecture.svg new file mode 100644 index 000000000000..7231fc674185 --- /dev/null +++ b/docs/root/_static/lor-architecture.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-client.svg b/docs/root/_static/lor-client.svg new file mode 100644 index 000000000000..3542be34a2ef --- /dev/null +++ b/docs/root/_static/lor-client.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-filter-chain-match.svg b/docs/root/_static/lor-filter-chain-match.svg new file mode 100644 index 000000000000..2f5d8ab1aad5 --- /dev/null +++ b/docs/root/_static/lor-filter-chain-match.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-decode.svg b/docs/root/_static/lor-http-decode.svg new file mode 100644 index 000000000000..1ed02946fb7b --- /dev/null +++ b/docs/root/_static/lor-http-decode.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-encode.svg b/docs/root/_static/lor-http-encode.svg new file mode 100644 index 000000000000..bac0d585d066 --- /dev/null +++ b/docs/root/_static/lor-http-encode.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http-filters.svg b/docs/root/_static/lor-http-filters.svg new file mode 100644 index 000000000000..7d27798a1acc --- /dev/null +++ b/docs/root/_static/lor-http-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-http.svg b/docs/root/_static/lor-http.svg new file mode 100644 index 000000000000..d48e243ac46c --- /dev/null +++ b/docs/root/_static/lor-http.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-lb.svg b/docs/root/_static/lor-lb.svg new file mode 100644 index 000000000000..94733ffd1904 --- /dev/null +++ b/docs/root/_static/lor-lb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-listener-filters.svg b/docs/root/_static/lor-listener-filters.svg new file mode 100644 index 000000000000..61171a3ca423 --- /dev/null +++ b/docs/root/_static/lor-listener-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-listeners.svg b/docs/root/_static/lor-listeners.svg new file mode 100644 index 000000000000..ccff9c40fbf6 --- /dev/null +++ b/docs/root/_static/lor-listeners.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-filters.svg b/docs/root/_static/lor-network-filters.svg new file mode 100644 index 000000000000..04aac073759e --- /dev/null +++ b/docs/root/_static/lor-network-filters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-read.svg b/docs/root/_static/lor-network-read.svg new file mode 100644 index 000000000000..5fcfa5cc38db --- /dev/null +++ b/docs/root/_static/lor-network-read.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-network-write.svg b/docs/root/_static/lor-network-write.svg new file mode 100644 index 000000000000..a719b6d58aef --- /dev/null +++ b/docs/root/_static/lor-network-write.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-route-config.svg b/docs/root/_static/lor-route-config.svg new file mode 100644 index 000000000000..148113b59cc4 --- /dev/null +++ b/docs/root/_static/lor-route-config.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-edge.svg b/docs/root/_static/lor-topology-edge.svg new file mode 100644 index 000000000000..7ccc85858fc2 --- /dev/null +++ b/docs/root/_static/lor-topology-edge.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-hybrid.svg b/docs/root/_static/lor-topology-hybrid.svg new file mode 100644 index 000000000000..e210d8506854 --- /dev/null +++ b/docs/root/_static/lor-topology-hybrid.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-ilb.svg b/docs/root/_static/lor-topology-ilb.svg new file mode 100644 index 000000000000..e0112d94ef97 --- /dev/null +++ b/docs/root/_static/lor-topology-ilb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-service-mesh-node.svg b/docs/root/_static/lor-topology-service-mesh-node.svg new file mode 100644 index 000000000000..7622445478e9 --- /dev/null +++ b/docs/root/_static/lor-topology-service-mesh-node.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-service-mesh.svg b/docs/root/_static/lor-topology-service-mesh.svg new file mode 100644 index 000000000000..e7446e68e4e1 --- /dev/null +++ b/docs/root/_static/lor-topology-service-mesh.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-topology-tiered.svg b/docs/root/_static/lor-topology-tiered.svg new file mode 100644 index 000000000000..0a212d597d71 --- /dev/null +++ b/docs/root/_static/lor-topology-tiered.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/_static/lor-transport-socket.svg b/docs/root/_static/lor-transport-socket.svg new file mode 100644 index 000000000000..b14f96209b0f --- /dev/null +++ b/docs/root/_static/lor-transport-socket.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst index 2b7d30cd434c..e9897435e880 100644 --- a/docs/root/configuration/listeners/listener_filters/tls_inspector.rst +++ b/docs/root/configuration/listeners/listener_filters/tls_inspector.rst @@ -26,8 +26,8 @@ A sample filter configuration could be: .. code-block:: yaml listener_filters: - - name: "envoy.filters.listener.tls_inspector" - typed_config: {} + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} Statistics ---------- diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 3a65808a8b8b..2953330a6009 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -27,6 +27,8 @@ API api/why_versioning api/incremental +.. _faq_overview_debug: + Debugging --------- .. toctree:: diff --git a/docs/root/img/envoy-logo.png b/docs/root/img/envoy-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..5c5b78ebba16296da86d2a13858e5be74cde3a60 GIT binary patch literal 49908 zcmZ5o2Rzkn`#(~Y9y}=#aViIic!(scMUI`lRmk3ZYkClJtZcH%EF(u|Dnj;B0{UiLmY866o4g(AmF+*3fIj=Q5!N6Lulq~9iLR~;%@7-2%>YC{lwxB=pg>S%?%YxIL{ZCRWX;YO8 z_VM4zGESU{BL0)&?c3ZVE$n~AT>k6m+kkU<=NqxdQdAmq9uv!CK1`ziMWf{0)zV_2 z$-~^%u#ECA6cz4WN=Wn7k8$bW-|Q&cqeh|T+Bf$W9dNUl5=>Fseqp%&H09n}%ao{U zo30_Ye#>uA5(h$}P>tIAi7P}XtL^&rKvyh`An1kiPn^eY5T#T+HNzWxWZ= zB0pF}iY381%nj%|M(UqE_9x-t#L;9*loy|1zi6K{kF5II?=Rly!7J5RoDcP1gawT{ zl4uml)TY$4I3{OiF-cssz5B8!;n6L2HZa{zyr9mYjbaU~g0Ds%WI4b5b81e0>rnkJ z-9K1$ZWEpui6AGLzMmuupWb?_-=q6SxXy!vC!ENWd8<-Sw>LR6pIY9Dwx@@%>vEP< z5FR%QAzSJ`$h10_#Nld|zY^#is^6&lhg|W{!E+Aed6{jgXO9#Q*5-|VB9rG#OUP(o zXg}dO%7_HnUs6gHuhrqV_xQ$Q_rcFsQ^7EH8uwM;=VCP<&{7BD+;T_MifL`4=ImI# zUX3rK)$u(-_{7xxNn+GQ$nx%RZ-_x4gRxt9Iwlx4q&`M?-0Ig%ifVLeBMW~XVnE5R zWiiG(+8TK6a982Wuxq9=Xu*2!J6X8BbCABt=t;r{dqhFNc>6SJ&S6YovNby=?ho*? z!WqItE6)%Sl$XlKicy_nslp`#W+Ze`D7-6ZeJfFY&}KdhCk@M$4wn}o%f-`F@PFWh zg2b=7Nf$1SN*_MsCp;7A7iCO}o>`Qq%U`)NIudxOcloTs77AsOtue?Wp}zL9B6%-M zL3a1XsRQMD7!esiXIL2*6Ks2#AjQL7WXG;g70JpV#kSwzy@QiJB2jbeYVwp&9dJP;)YN-< zvcoFKW~%DYstfMOkMnaXUZPhx4K}~EQW92gAffW`Dj}g*uoO5mi5F5$EAa(rSYJ*8$Y`i{yq)1 za`(r=p|#`@!A*DO^@9@x+>2Eu8<8}=e)V|7&k{I=G%wEhbNJT zKJf5H^ongEo!@8&LDl$B1z+x8BEge?U{o&_L^zV1iev3Vmmu9vAEaC3 zJ!I_#$i>?@2IKOsVkz}KwL^7S9>6Ce36NJXH@v|Kt!aVu#jN1chsIys0^@ z97R&1w>}+)r#%82d=OBr73m?tqMQdoeM=i6^A@?<;E{a019-Ye2=-eH@bhxvoBg75 z%n{$oXx``kD}CN0w!o8)Z&w|-_(&ZD8-ke!!4}U6CLRbt5`nE2V(r#7?WF(8u`UBv zw&iPI?2&H)`tPrejDT~l}9Q-^vt7| zCqG*JPyXy&1~dLFQzbwii501=VwRiw;f_(TV|)t|rEIA#LLnqpRsRE2Jug7a7;vWi zH8k?6L*1t3;ZxT#tcXnJ%>Ns@8;oF)yjaMOa%G%4&bVf%*)}L|E1wZ!G~ytDH_n16 z8k27OPS*^Y^pM!%Z56{|alu@vkgC2{o9@h6rbLr0 zNx#Un{};%)fm^*Q0pr5(5N|FoGt?H6xW(#I`@39jLA@{xJ z5PP>105JF@EdJ1*gDh^iy*h6{lh=_R!hGovnQsD0cxj}uP)p;^e~%=KoK}HCysXxh z5s+-C6VUzz0A=_PAbSC>Ze=P3pQr?=dEy$CGh?lisAnoT+Dcia;#%_G+>WP*_{KY` z;2WuEuus4=;d~-}m29k@Z{u5JRomtV4mX^I94;?`&BAWT@Jw^P$|YfNjbb}_C9uRT zbCnzPIs~gODUean5y%gm`Ka!wj7kv@8=cW0beJProwxoV_=O+YFy=kW$@9Qb z@L#1B`5q(2ZouNkb^6-BNb*c@y*kWlBN_-13uSO&wEmy$T1HuwnO22^pKkh{5HjBv zW&~e{9>h`?M7@{hgG_%~R%bx)8S|Q)s+_x3a~G<4B{j^~-j&hHekUB>=m!4F@JIZo zT+{q5BmMNLG(BhBNQAC8!HkEv>@^0y8Sqvgyd&i%&11ey7EXV@yNK=NUzEvB8kWJY zp?n{z9SKCoi$2}rI3;ih67u#!Q2ItYE1RFTr1mVp+kfk!5fblNvk>oZmPkIP?i;7o zXImA1weC;napmxDzbOMwJghEtQ2ojsKUi!~3>kYy){%vyMQOlVo@Us@d>y4KpHMD> zGJJ?72W5Dyei2r+MnT?s{v@N(7`Rf+VbH0^9H1#6 zXQp~iP%f{d2uQ*Cy9c`^!ER-U%34X$cdQ=^|A>hZtW+MWr~h9)-Xl&C5zQ10pyuR_ zqUKz8W$cs&*+21WOzYKzav2^V@_W)`N-v6Zgj~%?Jb#r_k?b)Z+B?P9i4!+1krdec zhG9UC^>PJlY~*fh*%$^U$ZueLgf1sOa0nhb%g=8Jz99M%WDO;^5nc3{>yPIR7MToAb zcEE)Ai-<+~Qi>N{S;#yb1G=Tvja&)N|A%3CtpXOk=Z56*@e5%W5*lq-k6!ev)8$tpG-3Z39B8H4x&(IjOd9&o>06T+&@pa217OUH%=R0)} z(r2|t5CIR2(#!QRFfFKCj`sCE8TAnjkZ=D}!1^H&1_F>MEe5)#=KD-g#M>fOYcq$* zzuhG~XXg_Yfk2kW0oTDZ0bO_wbTC&txwuor_F7nEv|=hZ+lRLCDA4VIu9tTJ>*#rh zkgQRCN`4mOv_Lk?()xS1>*_y60iBjhYMNSZ_^osikUIQO7NOQ|8HBkBiOJf6 zlIKKLlZ38{>RSGT0{;X;ydNA`de?r~DjGUR=2LW2mqy7KpeJ);qqu2D(J!lR=W_YL zhJ!swhmW%JZ6+;`+juv_9Y9UQ8&1tBAKOyJ<|kJ_{=b0mVga%$w9HBSFN9a2OW+{d?`hm2CP7j;dG)eJ@15{%ra0TH7M z>C`8i^Lmnzx52n3pG4|Gxntb2X!e2&f(-vHc(>@meM)KeoR*l|&$W`HC9i#|IOZ|2 zQx;=0<$XeK7OCCe3aJo%jHn^LF1t0}pkcF=Z^*dnm9KNMlC6U&Vy#tHl}*Y<@qiDT zL(63sTR}>06RB?^L)0_H_vo6zhbbQdy#3Sq$^iD-bs^!8qr}Bm0!`d568VKAAhhzh z%IRR_M6XIHBR8kr_VM8(9F-f2azIRlv#5hH4a`j%s#p@1%Y+n>Nrt_UNd`}K0OtKU zcCC1i4nG9lvd+zltW!uU5qgVAU_TQ{&l(B8@4mqg!6g{D>xClr3v5wO;1KR-Df8Yn&Eymn)VOL<_}H6Z8Y~oiv|!-MXv;337@mq$o|-%r>5w(%6|&r{`v}|4`5$gui{GqtahDco z(__>GZBatD9se!T0v^1364xFqn%^f zbhV#LaCg8(EB{bLZ0@D`2oyiT;=VvmgJr?W!n))eUV^3K<|^ayhKITw4j?juk)@Jo zw;x3qMgBprNUd~-VCi6?5Gdl%70Lf~6xp#I5C9pEl|oMz2IvCzZZU1YQT>Wn!wP2{M~FU~T0choW799&X3My<Fn?QEdt?cRur|l6=LwZ=ON7y}> zNF7cH1whA;Df?nH#odWx8#+!N6MW`YaV)^iNqq&f6f8?mO^NDsjPN)`cNcHBeO zUGM?nPf4QvZG)C7M_X^WdYS4jldyF>G)JUQn?CFbybe_Kw~o|r*Zl+N*+#;2-;2n}wPFy&!gb7%}tEh-V z7WZ0jR7z8u`5c7Z!9?L!!GR$ju!A9b8uCAe=a>~R`K##ak^a-mO+c+j3~KTfwsFxq zO$lj9V21w`w_Dpt*vGF}TV^TrsYN?a(Z;KE{HVhKctS+{YeqJE5L8t$Fx5#z<}>-F zA_yZ`lBdi3pP^(IP=|>ZQz`9SIK2H~3dcX_sJ5=DM$%@-o%YbcG4#43KVFpvng$N3X3--BYMPnYM`VPXUmpxLy@u_rh#=j?U%= zk7Pv75DHi8YcM)9S`3AnR?S3BGCMQ-t8_^$?Lf<94}B#&!Cvnv;XEl$SX7V*^JbXx zS>wvYn{qNgXnAr+GlOZ^{o6YwmX@Jz-y+OFt%`tL zN#LON%}Ww_6)9!9ox^OhRlmHaG44Nfpa^nz0K?){nr{Z+DQ4%3{_PKTU{5Fr(E8a` zD;!-_dOb$^!$^cpd|AdAWv2>fVK$|3ipF_0Q6PyqfpwN|fwop$zD;hqv~^WKj7lY! zY;ogQ75Lm6id&AbJ=h6!C_r@ZAKl!umLotnVbI<4;W6(#b>b#R^_A*cjHd2R89%NV zjx*qnTu(aIs13ftV7aQ07h?!p4~!BM_60n54Bla?4!uV=T)nyX zX^91qVh4q!h{`3SZ#HWzW2#sDN3q3X#b|To4_&>H>!xg^~&oqaO(`+Fq#Sd|Gi(XiI zDS@^w6(WO;!fOsvJs9JOwYlQoMMyT37d`wrkd-;N(7(!2okm+n`j`|^<`5KOMmj-H;8Ol4c0#V*$~32X9&9?X(?Fkr?#m;%7Uaa zs+=4w-YBMt6*)t&A3zF0YmC9*Kn(AkvK)c;Sh*9N_=D99&8iMj{kxWuU}caGNZnP) zzPTR($C3R8wjh8uVBExY9aqdjh^?Lgc5d^H{?fHWA zb?&9#mI>iL(r1HS`?>+9a#gX0Z<^PWMbIa4bKhWC?WoSNeg0YUndnH$aerHx8Ao$AOZe1SPFHpl+LjRiDH1 zY&KQAQ%*iZ21j9O3}R4`QwI=G3sVH7AOunoEIH@9$_ZmT7@Xy-$Jrj>KV7>!6)aas>96N85E zR0&N&O}vLwusJa!N_5ELa&t(ZJ$XM>e6+KDp@AGOBdr>ZvQvUPD8Q{=jHi^%#i03j zyeE*w9gsjfng;=er>#@PddTK5QR9RBF_y5q`u_qHyRloaDFHyVTK0WVU{IZ!lI7>W zg(;`=f=9w1^Lhs9e{r;PVx#h!gRy%!wh}Nj92+Y|$OSc;V3GK5uxkNrot#3J)uo^7 zE&q8J!1?S;$~kcwKgx#SXh@T+a43iH%3NqjSfwvIpeJSGCunbs88zC3034J-wb z;kX!B8ai^9JF-lQr%&*v)}}dXBKjHulTg$8&7^ks0M_8VNd3?enJMKBi4c*AfGS`f zuf>QO1=WA|ajlqX^-dZpp&6D?6#iRBoAK`R; zn|73O)qPC~tmOo!qBbnri*EVB%@WIpdAN6+19qTEwmzf|qfXLy1U*m#uHe9{$fXO$ z`4YKDW6JTlIu%@;U(*inXia#i#KEeKYb@@!o`e4Jv~>+Qx4$c@)-`;*BD=9LqdDMG z(89gOq8tDEVbmuD-DtG_-Af&xsffrw28}r9&lwau+TqWfKruY5JCE$B1-%(BOph9| zl)-M`tpb?lg^~$u_jZ#e+qfSQ_Tf=wjpxpbplsHp}s)Y;#r|JF#UMw zUf{S#;bjg)W(tUiIkhTPyu4@oOWu}YO!{M9^Z`ji#S2Prc?l}K9(x|5DD04Q+uxj3 z;M83vbBr?tGAXvw^}e^^r30TC!R0bu3>l2o@Nu%Zpd25uB1LxZO7-^|nuC+pz&aMU zA%tL{J5B){5mH(#Zyd`YLyje(F*PjftMyeAi zzA<3}W+P76xd81diBq&bgnlYWaoYOe^LfvkNqgks2prtoU^?F%*Q!lEKG!3T8;@h9 znq^?%6eE}z)o3P(wvti7_e9E^va3wEYFFMG}!9KFwk(3|_gaxjOYBCHs0q&0Q41DB(oOeMpkV9Js+^X~{1I4!@K&q^ie1ct3R+Q3P@qE5b#i~#mAM=(;FR_rJW!%UMlFPw3o$zgeD`PFh zNtg@E0swH3d6}xi3kr|a1But>jK(*s3&6C zYHi64VU9Vho&*sr!dE$Y??By9mD}3CHcCeGk`m7#XumfIA7sUT4vu#DU}F03@_F#7 zKrFgd18(cyME3tpGFJ~eUa7X=jzlMEZ9bm5FFcD8fQo9Y+)jjwA7d1D`r=x*Uo*Nb zm)(FN^%rb+#k%xw7G(E%x|HDPPz6rfU9vgO{`7@9?7r4Odix4=P~N4NZQS-6xXP6J)xU}E2Z~r~tL!h2uY0C9cy2yp zv{-eTPv)?76R8*7AAjO8;QFSYVLY`=Fsnh~Y`I$>@jze}Hg12*OZ?Bt?)DNUjiJ$4YboG|N5;-P7AWQc{G=v%P&=^zx&1mn|l1! zyfs4h>({HD<|h^u&fHQ3vIQlPeI#%fT2|Q~s1DUo!OzRxOz32*Av~~(qhK8Lj4`#tfe@yAIYk;Mpu$4 z=fn{2+8}RwxTUN-AeP~DR~ipT29kj#k@*ghFt_!QjaLj2DTVv=y<4kCM=fyAr$mbs zZ1&u8H!v(DjU~f-wx3MjILPD|Zw$Q^S(GGh-E+x{>X&5H-jLnpEu*Fm_aOF4k>47q zR}L0VS;UC!%S-x_+Wf+Bifm2Z^$g~C45Y$t!Zb;-{(be`)e^dkfHfC>&Z z^dBugFY|FB!CkP+^V7a{Zi#L)CSX{g9V z&V9;QkG*HR7Z^h}!UEwA5A~sYet~!N{d$+n{^Y7f9nZE9xg+Shg3n5ydK+7^zp|Av zT2*aP)0QV=x4G9cO>9_HU&4Qmf8su)lde@_N1)bXgl3-@(a3$zjwe_b^4-N$ z$+Va)ZS$_}{Kb9ePW;IfSJZk!BqKM_^We)dZJmVHbq~4d@MeK`F{D@UpS-a;EY6z~ zUt3y2_;)kHMt31ug%~mSf7w*Gwd(R};tw zbO5MhnLDqFFI(3*q#V(37rl`Qn3LmZcY_*U_EXVys}>1JW$S=aEX!x`IBbcnI8^QZ zobj%u8 z)wbObJf&gJ5;lKRR`>cUpXbVeal`oQdN&=*p4EBJ61$!hU?pOM#B`(3Z+EndYARzU zn4PCXvoifLT)@gQG)|JB8tDQVg=Up?|H#G_`&3k!7l*rrjdXyn^=VOb^5Mgt3t+y6 zs#2h7os#OW)r4IZ>=fLI`p@Q)?9a$)tZ6LRlLn->YkSw@c5pV$&nl|6`aDIjOSOrl za^cE_!P)O6qhqqn`M<)tJ=z{RO%__0TcSL@PHM2ZbyTV?A~2m?!mqPB9&xyNX)_ zDoX5U&WOBEmpb(kIAxFEO~N=x!!>LqgSIaJJ&{-ChZ@FO4EnL^mU}05DdCFe%J;9r z=l4bLcj{?hU+S*Q>pi>U{Dge3vvN?=%UhQ4fL^3HeO~>VM(dP>5uEr2oVpXgYXS0N4 zH_zAyv)+ZA$cfW z+bRyAMh@vVafGt2^Los%O`i>3hFFJ~GdBnvD@m1B-}@;3#y*}#*QXau^N}<+hGXv$2|$_3=2AQ>8oAZ?&G7Fqk1V;|IqUtZ z@8?0y6uQEFX)3LjZj5KI;LRTR*7AM6{TUj%LP$~BCB?`h&5tJ;SvKWgkL8-woI#bx zY(Mt@#`R#$=v^k1C_F2Lg%cxfsut7qd8;zU)MB6Z_n}Plq9r=a&K-@vAP}-yO0rVh ztXqFfH(*BX*w*zUo%h^gZ8Vg_Q(4p(@AUFcI_+dn!&G*f_Y_h82O!LTBsS)I)C`&sIGDH0)z#g0e_I$LBP!>1g8XNNP~E2v6y= zS97(6JYz9llHnS!TZ`#-X|w;t_JqOB-vO4uohY5vuJLC`dgNa&D+7}j9eWEPvWS!b zQK^qZNTue7rgV5aQ_XLR%~aQJ@oFk|uhh}*HxP_yfHNK67hjOSVmf7Q(q(Ck+MMNg>E4K48b$KHw0zXHo|w%qIxzWxlw}&5wEqIYya9YN za^g`-O8SC~%$Gs?Rrr~@s?J-J(BIe^`)8US%9;l3)!qTJai&Y$LJR5LX|^-BpNsVC zj#9QWvAGY7RBCU)^_28^-a6n^Ccr8#RQEUca6SI=khMA+XdJxC@XP@)6*o$DT}Oj@0nJMg-(UFpLLb+SZQD0AXV12&6sZ8;#s*cSIjK zJsaz>Xx@oe+>W-KZ!ylN6+I#!vtku}(YYk!pi6t&x}eOJJei_}NTt(b88(yc+l^T= zai&vzR_T!xx0f68ql{HP%uyn6u=CL*zZR~*;Z5ucgBG&zN}jM~Im8ch3=9FmJA8G$ z0>6(P&gAT4j-c>CQ!Mjbi%Om}?jBvuh8Ts+lg^wzmCkQ-Hk#6xDb2(&f}`JDFp^!ccq z;@@K*sAN3i%)9RAHWZb-im@$ofzt4hp*Ueawm;iSoo{Ab*ZG8cDoliLWMPfVL- zYJ*Lf6{7k)Jz}q{R)Pf{L+8+D7~1X(Bncb2UGimGH;HtSWO~ zrjvGMV6(fCNQ%p!aIyVUF&V$<+SD7iTNtS|!84m!*iXzmI+*BZLOq}gmngczPm?;? zwFY#c$;$#q!zf?4&RsI~Bx7py7Wb)RR^D1P@#3W$IlW2evhLe z#tN!rPLG$f^smruw3#Ha^k$gVg#=Ey=p|neUD8?k2F}s6?aTBHwA9|W+AZo_m765q zC~<^Q<_f&5f>vd?UxLX_dN56iZ9o_iB=rxXo~PGN-4a&P#ck!ppqE6vvXO!(#@7oQv@F$v`P=3dHF;MXavpUlGcn{% z{)SmLZppYs6;3=4-ki?=SEbqhAwwhduWJI7Q2|tO+2G=!qoPJk)m-K-H96nLU+8Gq zTnjRdXv<4_*hT7?jFl58%VZ(q-PFj1nzwH_zh_TZ{kb%7Kenm5&h?lRrm9@Oe&ML> z3`%a`#S4=Y0#?l{)B~l@99XHP{mI6aDb-=C|xOtd5P7n_Rnlt@+Uwy2+^CYgn8*P{~-p(^nwA2DfG zP2ks$_;~85X=HQo?gy<9jkGvk)k?Eo4tp~|be2U*#XpJxD~RLtc~wA$E7i%9tD>20 zxdfA<3+FippZA8Uy}FpG^Q({Mrle@tg*b;^fdTS@sB7GG4KlcaUp?EGIOl~AGK`KlOeHn z*Fma%^lMg98E022!yO`&mpjJoVKo`qu$XJ#+(|)PfX}@-23M!q=AmP?Pd?OmcTRup z)vSCMc}C@%+mN@ETV9t<;AY?^mudAoPbMt-)`rpG5vL3~es9Us<|3!bvl(>rn(%gV1e z5V4$pd1HUqdzkF#w1R@SFXR4adygBj?b}C>^jTdE3jmpYqQ0}OPNd3U`{5s~xBw&n zV<;~MjN49Qe$uvmom4EEopSG*Id`e*n~+w?OyBMg$35rC?+bYj4whu7m@xQ15snk# zWqvjPDacHf{X|T%c0KFaaYZVTfv+xhGwq3|O`oIdT!J+|*c;nX2vJdsx|Q%AlD7YBze!zEp-?msa}R=K|^y@6NUN_fO! zT$~nOOt~jKe7`E;9q%c9R>SFT^Yz6WLnbTDK^wH*sd1}cmZqQIx-?nxRn4MZGLs<@ zO?OupJ$pIxiOJVGqjq|_*Yj@$NEqu(Sx$@o!76eVw&#sprIHopQg^;ychiU{R4WHi~ zp-&ckR8{JJ{#>1lp0LDGFa&ur_JaTU3|mS_?6={ieZ6R3fg;g?(GK%*=FzYOU4&ERZKNo6!z-0Tbw}cL{Zn$&_3UUNVl_z@x5drnlisxFE@-;u&tYODw({xt z*)eXYN9mPypPCth&_1s^0+?mAM&UQ4cK^tTkh-#g3yyQWG#S{Gp?bc~<^EZRZvjtA z66j4IG|$iR_Z)M3U5cl9y>Q?A&jD7dz_|!ptfBbuZ>!9Zflli|4Xv83+{+MG+0T-` z4+wXfM12%?l=IRJxFfQT-9pXagH~=Ue4^t|H0g;xO?3ec==x5Gs-c3}uoau@?!(3upcJ40f&n{zvtQbkN z+@5wnnH=83&ORqPC($L}Wnt`X_{(z=|9itK@H_+;eW#g^Jw~!;`^6P>Ew{?nZ3fZU zZx*X^wPo97^}5T}Ey@orf7#Dn8px(U?}zSkXpo}TTYn~;xQw2Ewd<_f_*Y)Ldzl+_iFRsm8{zmp_VXv=fkY%6v-QX^h;&(`A*G`GZuO9ddsw1XT=(t&0rDkz0I zX07KHCcR%LP@=0DwOwtKqyG59NrbWSTv|ymD^M|^c9&?St8-Zf#Abbw@Y%5YyZet5aDh>J-gALqfPf;FC9Te>V5uEidRo)RD=wyhx8(ipPQN@K z)q9p%ML*5I8MLCwXQ2WKQv2@AadAEihmg`6r5L)tD-v5SO(IBz^Ls&z@-o9y23-`E zi($JuE?H4UoTxtUz&p)=-I6h`wyOQyTmNvRBPOW<=fP0t5(+wP>~TNjovyo|t7FJJ zP0d_V+nJjx*OA^jOZ>MlauKL2RKTW1nnlkvU zhq=D2j{0o7J72SFP%K4f7Yd4xjfp1vh^cY0o4%PY=ure*QJEKSzK`9{CICxP+^Ea1 z6GV3u|Ed0%>zih(&EJ&`iBLP;Bp1t1l2eQ4=h7z&$I;Imb&YksT?T<&Xi#-l=c685 z$EIMikM@0)?j1ZP)HkW$K=g#^Dbg$sJmn`ey+>al{@9=U$CyPZ>w77wM$^0IzHPGS zL{2OQuNq)JOn4Z^_NAWzqJ0#fO&FJX8Z=cSTLm}>7wc%^~0oRMroLZ zYZ1lq(iBZq;kVF;ejmWqmAVYk{n83m?r7xf^|DPeGi#=}d^6}YR#04$-Sw;T?#A8E zj`}R;t9$UlD_>`QW$$2TAO!TnU##99oDT>PqVsDrK^I|)tSPu`$^f}b8v+wDL}{!) z@UjWK-<&30?#kSb5i6+wC8eBhe$v)&@caF-velecojI9hG|0EqRB^1lMtOM5oX_Hw z!ro2&C|s#_o*aM*n_xE)%IT@b2j!rZzKsSrzkc?uzFzrBe<%z|zJsfCoU;@*gNa8h zJ4ZY;9~ir)$#1TFy>KzWX)yA`ML}gYss_Dt*qcAU8r#OrJg7ERSb{n#^4=uWdnAyU zX<8wDnyr0jS}(bzzFM>Y#G93;9F4If{d2%lQH<{^>mZlL48}tzN!hVrBR1rhUN{DB ztDDzmF)RNu^JF!Wol7UEH+)eW+K$C9raJ{q86DMk6~en2dgV3Fiheh2sMGk6KP;6I zYQdBH{%hu=tC#NTtvYCr%n|jLH-v6GJg2_Wxa;1)P&5)KBu?_R>_6`UsTyMO<=2m$`%vJ^Et2NZ6a8Qi^W?jJJ;>Zbw#f9_}d@D-tF($Ee#CX zY+w%xvhLHjo@WM%U+I(7pKfjI4OPrdnUDZxm#<1W-Mv?w&pz1z5#}An1#ewxqRh2L zTIvl1psZq|e9-fg%M7pS3#S+b%Fbr04mitxOQa1X7wx#;bLH>#YsH!G9O;bl1~;zDkhty0iRs5a4WZe#RcmxQeK=Sc8a=&E&FnoQ?G)k{D@;& z*MrCi3Qq>9NUc0M_7_LszXZU%s@k&v(6BxpUS&0OzoO~dyh1YtwcB3l*{jIeUvss; zFeF;{Fg2McUt$_A4o)pvCziP3QSZ)9gA39XsjAbK$`w8Hq%BZSS0hsR^uCXo${E4< z%wfs--k=#J!~K2+9qL()lX$qdexl2xZ9SwijLREC@LH$7R` z(#^H|3O|#o@q}+YGG8Cf+J1jDN*2uWGVn=*PT)mmkByKO?vUO<|5K_}lI=2rIf^ll zCx(Sv-Q&rvcO6oAA6WkiT}lzKxcuS$U+Eqj9({_l#0lTG%mDRrH%OrkLT!W_m*`$3 z+k|!`7->gbQmwWhyc#HJPD2ZVLEflZs4jeJ8Vp|eGkDncs`1?6gRy!er{FZzYO26iGX3{*|j?bhz_m2G#)>L;3muahN zD;E6yCJ(|VqJ;GV{E98=Jstn)) znC5Qf?(HTcPL^0Rl48g_U-WAD=dQ&j;()bJd|^fHfy6h_78vEX1!314>_b(QD2x?* zKaJ1gIbv(O^KR(vxjN}OP(SCn-AuZ0J*2C^kFltnDQi>;GwHZ~;rRX;jQhn!V$>V{ zxo?eGUlW#ZJNYwWJk**8e_FefW?Xcoc8VRUbN}IfxqwbF)-=4x!Q`^L_rs^z>&+Io zQokoRJ8%}C*4+~>r2VNizP)O^NtfJ3YWmtWLd6U^TPOOOnQpPon*-;-G7QHUAaNB9Or`Azw^z(1D(>* zB@|kl_8j-W-<)3sT91i3Q6(n;Y@E8QFvBh@{c^NPO%bQhuPU_$gIr&=@1Z%Z+l~*K zKL-sy`SJ>+K%s;_0GX8x?uH&WoC4`J;qY5UI^ zV%&-+U7~;PQK*DRMw(f^mQ3%(lLicqLtUni@@;!F{ai2mptS>o zS2%RikLjK^KJv&Q#(9z|oaZj`LavhvpGAGTb?&7}$_K08?pNix5TRcDD$$yurC(#_ z>L@vLPNQM2UGnM`zpY7q-f8>e-@5Th5!Y(VzNVVCy8nfyBWGrw3+EqK4|2MZ5-^ij zxf$e`f$i|{3^*e`RF#swJ`KeF^HZh@E!LyP;VlT>N-s_iHcD zTobf^D6f}Fgj&T2i`58)`At znN1v(VflMh?PtU*3qy|H#fiW;Azf+siRz?Z*0pzoFME+%oPt$vh$Lkx9bv9fs*iu}3`GRU4l~6UigiryC_4 zf*IyFw|)hyX-PE>akMxd4>B{OE7WO!atb+7j*aRF$2&UyacFtotiUfnt*K>Gn8Ulp zPyg0Wt*MIHxM68ghoMJ{^P480Mc?U|7GD!R;$qG_?QgI`{5G9zAnN^$523LzsHx_hr8Tk?~cAMuqx<~K#xKs=k8IP5jWnVoR7Is4nB6xre@+2IpeFDHD z+NV-uvq$$d$}9Zi$`U)!oPzT_f<22l3K7ghe_OlTHms{OedAWoy!pdo+^y~$KckBL z6s^ddBaZHuYD#-TkXga6wN;l-H%M^|JO6gnYAMJGM}}U-Vvn;Lfk7QZf55o9TNJ{3ois1!f@W zyaIDCj55x*iJ;UzOC4XTou%!*&eOwLbfs>Z_MGAO1dYN#t^V{;5skh-eWV`?@X|>+ z2#)Jz-B_O7GbygAS~Y5)wwj94_0yqe*w)Exy|T6OTAagnog%?#grTA?CwK*pL!qRG zHA?T{JZQKURQx%tQC?yQ{aC>DimW$H{wkL*a6VP6;FzPy_VuRex^kT88P*Kqr9>5z z*`Kn_GjD7;a|?Z^f9z?jv~O6?IMKHFn6Wf3Fs`p=D2;1wEg!?8f0Ch>P^gBq$~tR4 zi?ht`7&;CoV4uF*@7*8j;~4_f{-RrTHXUg-=y zXR!dkf2MF;WV(?qBg*l7W+|62^lS7YI%pETYS*5HxZp4mm`^^UE&HWQXMjs{=w+yC#_6> zpCW_Km4~OE!u!e%pQqf-Lk4{Bmp*wYZU5{j@gM)qp9TXU>@oIrwZNgz7mH4WV#F6Y zzHe?2TU-sdqOj^6F?W|jpkK|;r02Zk70E4W-msKA@S^ou+OCAVp@Dy?3#4Ss-k^Cl zo;I(RGw955b?$t>ozEX?()#+*;aNxgxj2|>h3sIAQ5n5eR(qHU0LSv-d6JkmJxC_Q5zSD5;-W`*hH+StNQ-ft4I?C=FVdv|d$;A7+}ciE&!H z&C|f)zOu9%QEO84?(f=x@t&%$!c5;}gy;i*n4ELW5PNQz@9XEdZdQq>SfmAcmd!EUsgA zFGlTp6QRm$Ki(x3p|vd;p+h$dJ$^rm!%#<*g|Q5oIK_T4p)T#!yIP;D9sd30!gzV1 z8O!vhOM~p+25&OH8zo}{q2gJ7r!a|v@#^?Jq6KgVMX1W|hBPngf^N#{9?65B)F-+6 z8ZM_YTwlL9+Ep&kem*mqmg>m-Yya1|Mgx1-!|KSXqw*E}T>F+0PUqmGrDH`smXTkP zTyuZV?sZ?Tdxk`~m8T(vR!50zTV@?0*ZV*P;Lbw0m8c}COG{VWZXA1fYWAc}g}f<^ z$no)SzsqSI1*9X2SU%KBoxbyiEgk)ui6Un6xBMv6bNkE6B1(Q@M z`!Aj`NNiTlnex1dMHA773|GdVSN;1as{9Mg1EJ1AA?RdVo<1oOeFWw9WYD0!#&P1I zdF5G>>iYFx0Q~rS3M*=X8eCqA()*C>yS$ zSA~iK1T;1WM5NW#bQP+v3g$U1uDguj^&3q5!kH*~9j8;tv998;qMWYB`hL9|Yt=p; z`C}N(P5R`O33@-4m_28a;?dO(PAs~TyvEue|%Nv0;C5lOy5)(Xci=(1=D7FMjz7D(NB!%$$l(>2go(j!nRjwBGicU9!m z8qw+#-f3>R(e2WhpE{UzAAbp-H);z=+cM^T6xr%c$TwYpEZ}p(sqAnvLtCU z{(SH#DGGOhqlsgfZtLh zJwUdC6ysV=)-95y*n6$_#3$Z{3dg4W{Oe4U-`=Qxwo15k?=9REQPeCt$p;ip>Yc+o zmWM!~s3L(P*jejEin@>uC*2tPYqb9R&l{bqu|$-P3C~>@a>)HfuJLPW25Z?;%K@k3 zUz|P9&e5L!mIC|gGM4oTv0gd}8D z_Le=%40UAh?7bb^?{(?^e81m6emx%d{m1?2eqY!18qe4Bwch9JS4)MbuM%qA$0}rn zi?mW_#o=ilBKej>oRXb(oI6)}YR}yCd~42lKn<#-endO97(W)Xt|pnNichKPwgeBCctg&Z1wP?!YQ< zYTkS<{bl)MHA>CmWix)+&4rWvnhOVNy{yI2?}njm6l4!=IgNlw8G>7QlN&4vjd+F{ zhEx0U>a=eV1b&&;8S390EEPSg_9pPRRZdS=b8We|PXL{o{(brIuCcOC*>ww83lYm-A9QSbNyA|_$fgw_&pRd+F>1-pXfF@RL zTR|bmWV{h9QhGc`6jw{v*qfbcmq4uT383~Q6;KL#M)q7NW9&%}RsT`KCPMn%ax*lS zWkB8VRPsgAuX`0(2L0i_m324stft4`Id!=XnU>mztPD|iRB_(#t{UweJcns;fn?7x)uMFi&{srjBtA9BxIMnMcJSLUt>$orDdtXX)Ho^X*m!%2^4l}h zEDZR?qx{;J14h#-MJ@5R<$j`;vj{w?UFoQL8kg(SNOnx>&S8QC?Y^Bj>d~Qy=7H{F z%P5rpdeEthq^gFS%JLFaUbfl=7VSF^nAE;NU9#@k?Vmigrrdn91B_#hBJmBB(U*mV z``^^+PoEawy|1n0qI&gNQ;=nb9T}Ou-ewe;!&5t6 z=EJL_oeOrZ?{nb!P?hxi0cgPhf=D5I2WnVyQP^@7n zD|;!Dao~`_*Q(xXXLP8<(W>qrq^3E)JO^kdlyJFdtfx=@`0yqHf8vu5jd<4i$>~q5 z^pv9ove)3;_BZ&Gbys*@J@jKTOzUNAZhDklDQbO8u+vH+XVw<{~f1{a^av@Cd*RrH1)@TmpT2?wWUD+I; zIiI#^bxW3Pgi6eo47DH=eAFY4ql5UNiJFYv&jh{QKC`YK758Mu0bj^}_S14TDIw*% zn)tF>{XiA#go5c6g2ZKEo?5YVpGNo4{6C!--a1*BSa_MB1Wp1lemM5vQ3w+TU!Zut zo0)z7Wy1aSKzl{afr`k2h9e?}eAz}PEmW7B^# z&R=Bw7!G~PieIyau=`GjUz2Cm58V}pI7G3Rb&1rqg2g#*NSDuD1Grp;z+j_6T;8vkd^zE2_WIu5I3R7$E zqb*BnfwC4)?L6TvdQx474Z0rx?pe=fQE^;FaMgi+`8S6~uXUH3&{I>gGI)Ywg7V@* zJwbtVzW&Sa#`D>=7!M4J`4Qi=wclF$mM9xm{<}eRBt=WQb}KO9);7?Q$Uk!Y^3wT1 zA3w@=Upd}d3i!Hm?YDsZqYa0%1PN`udx7@vxSUy!(o!U_BPO6D_hv1l+wr}S+AU<; zCgo`oZ{;Olnmg%9m-Gg;Pz&nNRMY0mYc`mU4?n){n*r5Qy^;65P~H7Ld{X_UDf5ND z59|mLB?IpyKCHgVdoX<`zne>VO zRuD4Mui~Fff$I2E1Xi#h==a;oS946_Z?EGO)A`xj-Zz=3hXX>Op&u0&(>U+3jt+`{ zyGZHO(fu|7A9Hd)bW5CmH(Zz?n5|5AB<0u?P1vxMXf%C;m5($m>Ux~er#+p1<|fJj zOg=mk#O9oi-U+CT_1zxgyL?$O1{EejQ*Usl1L*yus%Mj7eStz;wlCjLl+RdpT= zN*U9HAGrUR%KUPb9lNmvPwg?76+qQd6x#%;0=LtpJGA}h>h(FE3jJeN%_9s0Y8BU? zxdN`5L#O6mOd9^0j<#vHsUS zBac1l*o3jgkiBEX%KK^9G$}x3WJAFiH2G`kpvNV9Z+Kah=a3eY#|6$iwP$>+QJ^(x z;bc5L?m^w3tI$(XbE-9-WFh2D-M(+cop0JTwzVAaEv?BqV&xJ~?FsdQiceXF!=hnA zh%5UZ@;Ej#K@Pjp2L!M*D9NL}isLL^()a_{kEpo$`{3il+M_YQ200yS3t6SRe+I8Fh!igu?@ zvo71E{kdG;Y)PH%eCU;vdBP70^Mhp3WXxm9%(MYNL=MlmESBc^cceAFRNA!U*DR9L zC~&z?*Qou#kBl$#OYRh?NmpurHAOAzsKkr(lf_A%X!*B+V%{VIlKm9ky)42-;ow;q zo25RT`%AB1t>=n@!Jx}xQQqTB#s9ErHQX`Z>ifIRVHB&#E5IRI$04nJa23FiHFs1eF#Qj-UmN^)uSsH+H!b z{H^W2hq(lvJ|Tq-rkxdTr!#6|6-%!8>VJLY?IKXmfI^|L zmj&n4(h0|G2CW}IUwooRch?2|sXhV!kI0{!jNBF^5xBu7TRKA#d!mblOJ8}AHv^(e zbL8o=Yeu078ocl<3yXTOCX)yASWS&-*5TV#z2CA$GkNwzcHbMBKjWGN$DBOUu$V?< z5}4UuYVkfgNO_nLNIB{N{&}JbB7ttUfiN6uUE$h$@-(Z{_xIskLk*~si`*3QG34Wf z<|DLS)`@&H)O=iH*!d6%pj)><-TKf;%d~h%{3a!;2P z(D9?@;*n28tv?l-R={W>FQj%Q-10W6u8Z~ByBF0i+2ZigaWCSpE$bXAk=q0kuu!s- zvL;>;qa3sXsRHoLlIr`1G7W5Q?jKa)k2zFZV*2%`tAXxYk-1@R)JQ<$8&x3eZ5Nqz zXly=3*ak7Pyd9l`vRwv>oU}jd<*=chsl6j#Hh+xnKJrJq{dTI)8WlW zws3ghUcp{O)E3*~4csWw3Q41|N!xb^H8?CE4m(y$;PSH8JbS!X?B7v?J^-;CHSec~ z>Al}3Px2w-6`WBgPcK0w>T>Y$M|^qbHgqVe{%n6+LM)-*vUp=0ymiL92enY~nRqKx zxq!3x-%%CxwzPqct&$O2b&{+Kv)00I@#|FW&~s$jqxQ>MymO~!a*NeGJ+G^QAOZMU z3-Fvl?Awm)ojUpk?9>0U*ZvR_@{MW{m(GGwv5|i75eI*9=unO^Jh+fssPr~EzrBBp zm`=W6Psb`>q&L_2g043xcYNcbZ+mlFBDbr!(a_QtqoJ3USv-gP#*@a?DFer<40 z^vNbCKeO2vW#d{K4jf`1Ig6DgP|=~md9ev%(!)2FTwH?3hl_JNyDzjNQW79madwO$ z;LUbuUu9BqM*_QQ>2-pGJ8eZrRbMq-2dsLZ!e)@Pbica8v+%HHPpW-(IF4BRiB$y5 zx%?JQxHv>s1F)VANm$gr32+C;UBq0vHU@1~yQ-CiwHpObp1TORG4rDLV(LD z*|b01!FysK6ECV4|D+8Fg+9Zr;G?Ne=|0w}FvxpZl{WmE65afpfT!fw93x2hF$q-g zYJu}Rbn zD1T*U_F3V8sFH&6NlURo&lsq+&jkxwj&wP=Jf9`zxjfkwnygkPt@H9a|L;|T?3u7{ z%sS-KqORi4UOvj*TKo|m!NM~C$|I%+IL%SMIkgYuBCa@|6c{AiZoXO&B9sf~j2G^K z4xh`O_Sb%bsYUa{c$ibeujHo<<+~x)ecR6JJy&Cqs z`$i=_#}If4`;m842X*iJ9HprL*&KmX4J;v%fVGF!56kd4l=KjA~fcJZ4 z@`#ep;A(rXFUslf1Ly@aMXCx%=m;H=KN9HZOxsp+j@+scI)-Q6KIP@F8MM;fypFx; z4g=XdnZ@aHVRHl1&VTh8s;D>ohZv8eXpq}~8^Jvt^)r7$oN;i|E)RY2#Bw4feeL(f zqapXoX&=iQBv~3tNsi0>3Y9q0?0H{ttT+5Rbz)xE01gfK8JydAgK?qTJkX6VaRwH7 z52D!bsx+9XUV8*#`rQu$Pb1~MIDqq@Tw``F+^2DF9Ga@0t^-X{q_G*`DC3r(lNLRN zR@t1eEI&e2sG!Lg{{ZrGh%)INVLOcSKqJ`Uaj_tby`$!p7P!>!H@jL)$u1N*jT_YM z{Rguyv7C)USC;$3xQ~9~fF6J63%}QV5Bc(Vm&6v4Xzmk=(7)SUA%mK7oBDgnTD|2a z+}`9Brgx`z&cR9oO0p)kWIM(3D?lw(n?y8J_JlEKPQJd)Fy%L?PSgMRbk3ISVQ~q; z(>1SL7%eY%>7S9-*IIQv?#PD!G*+(D{vs%g)n0B(W~a!#Eqz8O&(|AMNBNbp%Hza7 zRY4j2lp|8|bCaRee%`B*NR{tKs3hl}*aa8+LMyR0>1VSf<$^?ZkL7bpC&KxgJShdv zT21zr*ODABQcvE(OmZ9%AxOyb>aELnQqe{{&J7Kqsq5aZmB$Af421X15a z5s827X)?DWf08@mz8ma$@`a7%8}+UApQP*FmfCN@=G_IZVorz$R!5{9I`MW3_)&4^ zN~VvzJsc@7+y1$r!M$r&*}25MO+ao_cH*%M$Ojds#+#$yXbxrlJO1JG9Y=3>$+9;| z=PhJqEm~xJI!t@SYP_1O?#*S`?+g6iW;RB zaPBOsqLe236w4_?A8ByBs;_H{^fYMW5Z0*3c3}mWv`#5d8r^v;*h2dAl$%TVno0`E z-y!&!-le5xU}X;dRZp9T)X}P|i9C~6pnxf!jf2)kbXN9qN>%Iul2NjPP00B&q*2GNX@O(`VIcR4?Kawe0tmR?(rafmFB&hX@BB=z>hijgLfAWZD8@k z?_%=46%U{++##9OUJ;?^~Cx z%`AV)+>CxKzAH_q;g;fB5Ozs|o0nFr?Kk`H{->*R@A54-LZSYc2>x@=xDAiLMJ{;n zEJ7mG2wAaNOI2r39tWY;aBEH7n6<>VHbE2h6#HIeZiPpLVSFV+qveI zD|Gj)V^)Tq=sqR4d$)*fA(^Q51+_rOMP*zah&L@%1Qlg7P2lZ&_Kp5|w(7HE@FYpe zUOE=O7S3E~B@RBl;HKu+zh3?=2=igYW!5;0_IiEohQVS1Ues)CE-xZ4*qYiz0xN=4 zaX-;q3R=l>;U>5Zl$*>09V_cBH&FuVpTMCF1g9lTbDiLLRC{fMVkXd`6IKQAS!y7l z=TX!A7(Y83H*NLQo0ib9b+M`y`@8vsPP6R>SG>ET^aIb0=dMs)46KEMlsCqqn2EB~ zy=_j8x8d0<8x^B0L_n$d#PQMBNZfh?!B=ywU$o4r)+X(biEQ`EHc6{!m|)0+m1Dlu zR#+lR7(lng5XRf@w={Wj-+9%uFcVX~vllA6S;xe$sT*jzK@3_co>@5saaMBOj!J4> z(@~kP$zkx9 zd5l|z2OR#+?MZP=a>R$+&b9Qk0(mcj_kYcmqeZb_gaM<&_fficJ&%sON@VsvLkHKG z_=9&!cl8*wI~eoPl1ONW-Qv0LGF{H_(C1M4;-J$KP)l9XTP;#wW4$IUL@C_9T|U13 z6NN)R*TA^K7qAs`QRIWe!mN1c3!D%LM`cNqY(xuUHPXCi?{(C6(&D3~QI>e+*g0B4 z75>&>Q8aa#=#A-oQqWKvAa7YwZVM*dqCD-8_IlZ7M|RWUNB!B!sD!_}%3H6&X{)L( zq5gxhzs;+*{2uGW*s7#8V zz?0VN9~3V3eOnVhgkl#&)NJhhLO!3lZ*gX~%)G_DB~L#42R7A>LfmcG!o{J%^&0l# z^=)sC^%+(N(}~9->Wh08%GRyjGt5~zGcs)Xmd0SEj=An_Wqsu)Xxye&?mU?;dK$_i z+wV@YJP%*$29OhLk@r`9^r^vDO5oHaVVYX=%X7QG>D-;z{KyQmr&BimIXU7{G3EEN z{^D{i$=JHeP4EDe$gWWTvSXFM8HoQvR3($(!QW%BiE?{cx_WuQ>I>_v zwcFVgbeBDpExf9k1A$>;j5{V47y}1ywiKxdKavp?w`W-@HgQjHR_ZQxnSTHeaWmRW z?hDT4D318$5CJtkq`a54_IGE8&;DlRYapgDg2**@TO9hG%PvEPv%N=LW(Wzfrh*OM^QA-N*}-LGgvYW|Kn>pR#8JU&o9CKsjpzm#Uf}W_*qdjkNMDpSjdZQ z{!=wXtZEv4cpAzLcOfu>c}{9tt~+ccNyWq}m5kke*<~eU&)usavg6YAxdXMrU!CSgQvDmfH z^4@X{5*^g^xLs^COl5sGT4qQ9W)}KlVX*vqkPtLi9!XPpZ2w#?wqV|Uxct2)-mywX zWTt8_DRw6R^1#;PtEU~x`*VBSM{77Q>sqP&&6{vdJX_8fW^L~_c4>0>0TC&Sg)<4% z-4oE+26Ou_wuMBM8Myac$BrPCvB4zc(w6kiZQPN-jT^3Q)qf>EmAL=i%S#hl_Pua4$YhkE*q5t z4}w+_RZKW<(V(=iwrKPSG2K`mKtEQCv25p0;47Zbp^5AJ^ka4G6_NLfO5Z8;UeQ=Z zA+d^r3ASWd1?0N*cbDZS_UjTilDS5D0ebk7cqZQ(QRsP{$(Otc4@PCi>0ADUAzAH$ zo-sR3E}Gq0O5iv*v*}SK@AHxmN8UQFpKx$d3KfI4t2$NqwIsMc%=^Lkl(L6b-vSs;q{9Yt4dMHbCR+_M8lE^fDkHxh47BPJ&#NXS@E2;`i{MLIWEGlzB{JBDP6ASIeK-k%#z&K`zMwSdQ%rV>%cZv`R45}<{X`rM3j1-Kn~)wryXD6C z&L>Qp+OR97Bj&7`o&K4rfe%**E1VuN#37?+))^7<*Dqb}t$Dc@C1@S>?iO6A$d~8d zrieK~t6<{Lou7dHvg#I&%tWKq7^Vfy9ERqyI5950P_-OuuqQ`VMU0DRh0+gbT=_Wa z7{085@D!o~3hQJ06>HiotA}yXESe_o%4BGaml@A#X-l6o$Z*yC48s$d*89V+BuS>< zj%(UP1NC_kD&$(TYy&gOKQ{23SWYe%aRg{+jY$VkEzQe*Gs@)oL0<`$cBPMbOhwvb zr*&-Z9Of93j%$!%2>!kCk zx)!}NMP*kFWri>=9}A`rbY(wSrCe1UZ7$Te(PN8RmN;F z5$vIr5M)jEy=@(_Ba#Im;oKzqrZEV$;O?Q zTh+PdxPi~BZuaW)%NNPkV`ZsOmh&%f&plP+4vMjXtd$EMorb_UdNhw76M(3607oUo zC9rWZa^rL^$0XOMm|VW@W`3K}$H79Xo7&>XJ78G~KiT+{vrH#6Kiz8$lLn$H_8&<2mgk`hL9@jj_%G zi<9ekBYnJ&UbtloH zB=q={xK_|gJ!h=SwQia;5k4@G^ouFZ;x1!VCi9o5gB?!%T?0~zl6<^siwsk!&(p&b z4-LUbRh;Xk$x{4;oqx|fmqGdz)#zC{#Efy}T%?q225~8;YY~rx6(5R{-8{LV)T`3% zbTRfoSB`1Z6WF_x+x)!JXZKa&SNl_b*?#@mg=e|;WGG(O)Yj;Pjokh(Z`t7iOi&Up zA_$Q0IF?wt`*<{%qM;~gmG3y)@>Xg9S3t<$_DQ783wxrQN= z7H47kyRt^Qu0-eds;POwW!7GuY&#fWA|XRw@DvRx;ur8J*g0Og3?#SW+&P~<(z>K^ zBc6^L)dF=zF_1%CW`pUx+=qvY2PGBZdv4ERXsO9fe{rVWwu#za3>9PS}fI$_VnlkH}>vs-H)OKB%=+RvC632F_ zZGX{*)b5h%b&rotv&$cU&OVN%t#)c|jZMIdZgo-0F+SkVd%$;?|5O*}ahdd&y&|K0 zT`-KDIk$4>CKc1os#->no5ykg)8){6yq-q#{m*CW(hi@kP`cG1<7Yk@GlJKg%`X|Z z(^AB^TD8!ejV}>~R61Di65{q@ZKj~i>kWo4iTN7-I~{2OiJph*tHZFl@f>D*QK(oe&ezN9R@*}sjqyPm{C#*&IcZTz>)KT9IbM0r~ z*II1*wqK3k?{?cU{iV@iXOX(H^E7(45DTN5A_82DxDeZXTC31SyWW^L4y3(bQN&xr z8aIbVSqo%NMu#4K`2`WifD%)!ccn%Tp$y)og>W4P{x8Uqp~lRk(9K^~B|K66Mlc(X z>^&~+SkLS}-3q6qX4mH{{-hZvQ@v9^ZM7{98{6qdsC(xK5^f{j^m+ZMQrrSrgGzCW z-xLy+CFy!kE6MT6{S4)L`E!2HAWPA<1;s*mGuINhH7)G>_c6w0-BssQ@yL0?yXb57 zka@BJBrNQZbK=mu&VuJZv~PiY&K93u=%$Q3pa*gBS};+|r(|%)?b~Rcf1{c)!)p;i zdoAjPH%n0yR`XC1zpu@LTCqRq!&r!~Sr7L<3b)8B!Uu>Yrrlh~H76O)|5jhv#r97; zRlex|*K0j8I`Zp<@f*C_Hv;CRu$D0^l|N|NNxh01Y&_;X&~Qy733A>>_8@M)WPUH( z%?ZO2-aZUHEk*A2yV{##)CNlnEy)a0JXt#zHj~IJ$~pg@>3e9=-X%cZk}k~{ULpCn zcG@gkKr_M~jFo;|cubNYv707O6>T50B1Mfx$=W)65_+mi)M<6v8*bmw^R)zg8j0*N zhK+#$Bk^eo>XG^JXfT2=^Nr>MQDc$+sFmJRJGL7bu4JL_3Ev+dH zdxe*eoBdS}DB(W6@JCEt|9xT1y~QcNaW39J+b3tWP=jR2uJ3Ju%y1+sV`~sX!(QGi zb572z{H~Po&DD^$Uu^IC@8rv_L&19bV$7cWO{x-YUpCmGq+PT)wYzb*%2!ltj&tt~ zrQ2Y5g}Y9$^pU|(`gfUz(4sXije$}$Ez;}_(wXxalQ~C*@57k{Es&wGLN7@23C-_C z?xsp};NVgNHe?wnmm$|)FoH*cL8gf56aR0eAoyLQvY}vDvMzLxGVAh0@Y?V!R1Jgl z;~!^mXctr~*~{^{{G>jJ+O`xZKY?~+lcVyCun~E3=CIPog5_hv_xsLW#Y`@ayPc(w zd^RC?DhSRBtc%M{WuI)#?MEw3@0@|x^LYGdzAAE|>I_?v()uU#Rn&!7^x8=X!#;3i z`raU??0P2tjcStj(QQ`57{!@xt1*E&l z6J+zKj0`ox00()R;%*~nDSut7l)+9<|FXYzfNur*OI8C0bR(qkBXG(q(X)~~&`~Y^ zJ8H=zyr0EX!$uqi*6Hz@3KWVb6$Yw@KmNTE%j^-vQ24a)*yS&_VT`qwRto9@;A z!e!c_UhG%{$(f^h{2&4W*eqMOlYW;8XS@DGMKC`aKeU>cd@yM4c~tqbcE7~X1**T| zQ1nfA;l8NnIEUG}Q$@KfsjKJ^sNU;Jq$DHvK&QO^jt_#}G;rAv<5Hz!|LxcW=){OZ z5K@@{zeG;!sxx&Y6+@|fWPvh=?hK!6i;J{SQ0!K+b;c~G!hHKp4id77D?D!e&z<#ibOQn zm^=@P-3CYJeq|Z=e)?C>@6>M`$t^?X5)YLFMS}+Xf#AVZgt#p1GoP+JzYh-oBYCK* zaXUI?_B*F3yG1RJ-UfHx=n);5(HV4(d~aR@{?)M(twfju#f`{N#*+ zfXvOh8cuy~0=zn@jVKstR)~#c?U5{V;u9k=_K=X{$0-j(lXGuq6|08oYTHx^% z&7qy?vasW@a2S0jN85|ojml(jG`6>{mFd$=yUi`fi0w3Z4O5JSx65uBCwbAzyh$I| z4+D+z5b3;*y?cwxuI``hLDD|J4z`!dX5Nu0mwsyreJ+c|zc6^E2lihW4x`k%G;gR% zH1u$@&h|$2d`s`6F}*Kt$BkTwVSkHo>A2ex2M(VBw|t;VkSJ&X;ZcSt!!%L}MVy%S ztz)qsR=H*5y7z;R>W22to$oFkG?b9<*89SVO1)k6oWk_}$oGhZI59yF@t^cR-I0UB z^k6hv1|)cfX+-Kj8a{LFmENONvTY%53J)D9aP{UnZ?H2IFP5_4%5z#7Q0>>1e6{;` zKP232k4wF6GhbFj$xpGkzH6D1sUda$_97I1Q65K-i1@9Uo=>iEq3>ugI{<>|KR`g$ z)gc=S*5+co@byVsQ*8P-$xx=@6x%)PgFe2dzt4=uCm!3CvZWufr7L-1-n(!ZF(U@V zvcreA?h3uMy-+Py`#bk?fH|PVUCy}>E02Aky!aVr2Ybi22Vc; z(PQgXeHULu>1L;Mve@wWnBDelxQqxRO7ads!*h5^JZ6V!g5rqrJ}6@orhL!8lZAv? zutu>_vnXUm4}xSLIchsHeB86SURq-9Z-mXRTgZ@1+_8}l%fm+PeA#2)X?>zT9%JVE_zMvn-qritW>Fv~a1%z9?=WZE4HkX?r_b4yg<{B~M zcM=0$MsTw=J>i`-)@xOJ!oe@LYP0_qahTd%F`nRwt?5MWvF(xW63AVQ?DoN|Ip@zj{ut=+qa^iEW>-8lP*``itmymm$*K(t=E@3(Y9QTdvZf4OKrk>_`8}*32rp=8Kt%d!* z@z6zmpEuqpe{c;}f3~-=fG?m;1YazlL;3`n?zu@pT&*16ZL*6qQW3LpaL~k%Kj{)= zIyMY$G-~)=ApMH4cd6Ip*Q>c@;}g7ao2Y*XVd5#WRGB=gtMT!%?~YP>XnAC?U!c;a#op43jiNu^sKKZ2)^ zON&>Ud*^m?yKPko&B%bFx>@xcRi=TLFx#@ebF2L&g?{Rklaj$!F#`^N3YOLzD}GRy zQ~~nDt^8P^{!R5jiny5Dk0|%>P{jw!IkJ=ahZMkWP`F!CkN70vW&+|sJ-QEc{piDB zlSSidpq;gQ3j6HGI|%LyP;A{n;7~T>rd<3X!j-eFPgeW?P}xf zeFi!m;azE}ObW)EsFG#?lC*?6S01@{`IJ&>`TIckMHvLWrC+coT}mV2HA9(E9#T*^s%~lvmTjk4 zU=-M3CqR|@#Cg}rG8u!*w&#qmeOR5Kp+#*FWT)!J#DvB!q?+(MO{a%Ka&}@>>GI{4 zY(e{YcZRp)-sGlb`QtDx{Btme6Qq0&cHqQ8f1Qr^R=u>UhaePLmtS$2-VUPlZewpo z^_!{B56rh?*-xXihox5Ww26W}IFw`5)*Ciyoc^X1xunH-8%(QQ6)KRx4trf{f6?@_ z@5=0bXorcb;j=du#-|l|>nmmaD#vjIwks?9x8h)Mun-g6L=?m>Z$XjlXRLM1hzDG!}K|Wg=JpKXMNX#2HYCpD%mOIU~THg#IE?3@fqzjcmRlEiQ zHI&;g$jh(15&fECa$TwokN6Q58Z{Nk;ui|}cVI(#S1CHADVl);YOH= z7(_2EfWeeu_2e@!l7$jqzQrJWf0Bl@g}vQ{J5sJR_r4gbA3O*=dvIC(#ozYlBpf@- zCikhXD)p7_G%@bf8^=D?{ZeD3Q(Z%z6`wo6l$pFts}=zdoC6wMZojJeU|EnY;8HMrW(A=mma#on@>$@KfU856{I!DKD0#_<3u^hGEtqZf_W}?t z>AQ|aZp9#S`d*QR%U%?xcVc^@=t@?$$*h4UJO{sshpJHciZa|6w*RN%EA-)FbDh!- zLS861^p&3y_H{L{S`h6}?Qri2OwSK}VvW4OT?Qx(t^_ylu}=m{pH8^(KT^1$4++Gz zG!kQC8kK|}1<$`@I}MYxtg438*}$3y9ZAaAz|>t2PlvhJ

&ah!0-aPo?_bigT)A@cM9*u{15z@&MO`<&PD#_AKK}XeS1G-0 zeZ#|f&u+3+aTohCR2kpfnJS-dMlB4?ac)=;`REBBS+$q~ zSakkG(e`tTrpH*a)wVX7uUxEG^531FPCn6m zsH3k-SFnCynnhrCnWw&-Y3KL7^B(z+S7gNC%N%~AwYcGT+ZdI>;840O;%*Bo>`jgf zlQT`l;dh7`oxC7g{;!?ncUXC8r~DQ$WnUp{T)jt~-&~4m;VxY=A@Y$vIH3Bv23_riui=&y2;ahT zL*a!%r~%A7!Ir0gU0bCJw@N-H(4y}R={E|cF|p|NB}(Ya^lVe4yaJ|g>i2Qx5w(`+<;&0D9th%X|&7gH^XpfB5yM|3?@_*Zt~HYvBYV4Zqzv? zpY0|Zm4fJstJJ#l528cEq$0PH%#UfS;`MED*z5NW4+{L#J}PFUKq|x5tgwB2?>t#W z!;OsEQdYDU)3}23A|htO5d3xdtM0SGoaRwm`!_A)Cx? zTf9{<4k^gEN`Df!Kd8R#{vv9`_auEycq@#(!Kkk~9xdLduo~scSmFjM z$y3%4jHOo#0Qr4nZq0GLG|NMYUflO@QL}}*@Z~wPU$~_z>-_c{#O!2)XxA}!3NjCX z8WDM#J|L}UjjETMQ@J|C1r@I#Zp4^4NG}dAJQ*6$_XzB^8sn~DttYB6sZBtGKW)Te zr+6qAoZnjN`FKmfDIRGn>y@oh7=eX(oh8eUBhPXD7DK0tC>Pt9cJ6-xjEfp6J~>o1^^p%Tfy^I)EGz0t{2hHqH+7ZM zLkHudISZqiTgekUW#>f(ul`ejWi+a{E*%%HVM+w;B4!&-!q^y>O%)7x?{Rba8b3YLHNbBBzvgRN6 z84&dNj};f~yJ8KMGH^F=D_D%;|18E41gEfc?oL^5+qZqlwt7ep&lFc^iy8y*?X5)~ z6Tkx4wU3+I3l}^{PV2rivGfHRc!Fk=|Kz>%>*Os2O)y0L;%aa@z2vzA z!fic>lEFnlb|wbQemhGGJ8q5+LCIoG0h|XpG&`x(Un0QXGgELjV|fj5SijNM-dkdD z4*UV$H)DKX*o6gb z(U=5ar8I6PSsSX%SnLR|SjA#24G}EXzFWKeq$r-%<$F9v75v?7iVWU8=TYR;KIBJ_ zm_TT~y7;3*3;PPUJC$>+Ck_;?iMQN1f2Y8br1%awzVk^fty5Z2UjHR9(`A>)5hv}t z_|TVl_+|2q1XXP%M8ya`mVohd!%wr0q;Z-jg8hOT4I#)u=;{=t&mc8cmtyv0txa*p z@lM{2#t;~=->H~|t8brSK;XCYI(X_V#*r8lzJ*9iRb|$K+O^^tSf;Pzr5iEKty-gl z9LPrz`D#P7yXxjH0xBkpvCMF77@8%2YYTa#g+0CXea+4Bnj#Um)L>0y0wc7|6+(1nC+cIT~on=wYV~zwLZ=`Zs63=(krJU!Lb7 z6YWK0`H8K*iqz92ZR)ToM?wm?;KRcbf5B$BN|#)Sd@LK$s(2UC*9xn`&hHULzbKl( zpFU;KBS_x@@=_!)|Bia~>SC48p^bhuBpmBT-f&xZA=tFBJBLK|HE$7*MS;}sZjMHF ztxJ7MORXjEqnrGVWkgjzPl|kz!MOnt>v_qPef)BGy43PIpxPr29l8%rsIAeVObI2y z3*3>(Y(tj=j|m7>rvS9ZL1O|{5hiBw%k+#!dK3FfC2tp%MgTX&tn~*jSGyV1oWN#~ z(7*n7IvovAx2jE4CxvVmOyHZD0Vez?Gj-Us=fi!Sal6saYc zykx5RP;CP|1Z9>}OCLR#AUZJKSK>%o>P+(TPv*Er7%jq*#JIkSIR1k@ApKd-BO@-jA z>=FbCuSaKkBL~|6?gljSnihM0)#ed04a(}YV*bUi<^A|&-WEmM46)IIz?_xTuL9v0 zV7yl0h6lWgP`w4ABP`@81c)$)V1sYKV$LJpRw4L7sQX8*FD(aNf%}b9C zcc+T69C>J-tn>8_60@s;a)2N}^oWp4b=cSvrWE6sPw1EdZk!&vNmC-X)B)6X240&2 zdF`)YpSd9>?ith;{Uy%lSF-fp+Uq5*HCB_7$_Ag7&T%?Q?5nou3u~_O|IsF!78bNg zaBu6Q{cwG0R9snd(r0wU<`0KvbI3L;h~{(8wod180lYmQ64fi*PYLhSg|TCsx?0a4 z3G2_-RAaCJ(+=8q?caIjgOwTLuH(v71KJUT{ReVjmGj_LdW7=Z_NR9%fH5RVD|RH^ zDG+~f;`9VpJvQWp_YF85C6PY2^gDsFtVYFr*-U;SUO#ER89pC=Mw4^y4+IeiHfraM zz+V>1fIla6KXOkwm-Ye}74X#5N-b4tp1BUn#(>m#LYD|?^Yg&1oR-zwL*(7RH!J>MbL6-K#0iWCDNR z65>m0e}*S)n`+keZZk^h**8BlRL{G!W6WW5eG*myOwv9>eW zTp*Ud-2_54Zb^kxxCTfAC^#Y=o%XG58n{mLL-QBqmb|&Z)WA_EK2?(HK((K{?`b7E zD~7NBKP2J_tx=pRZdV0wxhRi-K}Co0gK#`XG|wP%8mP9tu3#Huy0Ex#Z3w1g`N{QO zLHbhTZ>`nlf|5I!B}<@~ZD_@}4~-C~gTxI;w~*Bz#2=yzo@?Kwf57>rkB=pJPTWRw zZ|QS9rGo9la^}g2eUgNsomsM4MD)w|t?STOf---oi0WUU*sHiGrb5L^)EaM~%uMF)R-i|bCj zxj2ZO`d{jzf{-icq%y48n@Ev)?ZC9&XIthI)r^{{0fV(Q@Z(O&Y?n}JNJb4jZW#E3 z2rp5NK4d$00y4rn zhiM7mj|W!O!mX;c*E$=_C5E*M9+n?=b9m`^1%lY{|AH9eVwU|;wAYh_OO3w#OSrAP z<|Dq~>_STNKia4KFYyL+d*;OQ?@QVzg_^bmtjxNEV}4QE}8Y1%r~_9e=D(7Zj0 z79l!2U-J+j+fNGP6{Y7RIzW0-gmM}C4|(7O&R|a$+Ni<}_YV#oi3I;M-A`gGoRGBo zTGQ!i0wx0x&e42Tcp%?f!j6CS8AR;?B=Rl3+u?%TaiiqDHW+NQ^)D5m21P(r61yLX z$iYwha&C0QVNVOMmpQ7qmst&HW(~Xr!lita;lo(ITlu;vWJj7fL`dz!pq1jJA{U{i z&Z(=_I=egb?t4qjA=-uTBPKKXh1ZHMtX;6b^3#9PVux$#-c{jl67JR3sNS@?C$O@u z+=a>6_{P72iajkk6L>qqk%u>aD~a`Cj8_gBxekacoOrf@*&2-{d11?U#c+xQRv8)k z57Xv^hiCR-aIR(;eoE{?fS*OT{afbe{iVG*FPn5rXjlyYFJ+I;9xwn}3ET1@VhK!Gl=-Y-Ty@|%!YF^)3(`X8Qd;_m2_gKCNRnSJ{hf1w8*jAg!%uG zd7dZb;x0M^BIQ;C+cv;rL4dEoDqIQZ2exjWH9HK^BWTE(zd*Qz(3?j<)YTjHic*4L z^6?t&xfLl6z-PlGVRfCaHFumL=mD}6bu0{D!&(2XTAjRhyIlKU_^UFSU4#77_T;A| z4xMI*Pt7uOk<3ZYWG&KwCSL<99N)LXC=+k_+J6#jS^#d)hfy`Q7#tS9Q!Lc{{}@K( z$T$<*_>bRDvkVJ+Ac)vM@Q3{UN<#>yI@j>ZLL?YRZ&IPSE&Cv* z?enGC7a=2n+8?j3mvtr0N6v>Ffw!6FFMEccHeHK-82M_C=$af4RcchlGe;;aSS8Ot zi7d%j`3p=`I;HEv-my(t0xo0Qo(BJLBHm~0N3J=`lKG2B6R)f+^G{F!;cgpi2w_U~ z{Z;$sPrM`|aNFdC+PnusbDJ|5QNo~7Zo6aNwp)C|z3S_# z5U+lwiFZz_Vw2$bWsvdEcGYqZN%Yj`!N1UK-)}fW9u6s*dcd5MB`7%$2?UoXRkyIv~USF$HpB&ktG+FX8Ocoy_zTG#_&#MgN4 z)pmXXWah)ENt73X?mmjVq|kHk_@-KXQDeomDkJkX*W_mO9sb^()%*}%1^1Z`@k24e zuP*J!%+3dpJ>8I98lK-)DyNVnd@Ki1?VKqs>w7dH#rDPZOu6lbQKmVy>&Qg*y8mcD z-SU%e+!c|LZ%xyWhVausq9Nn?^8a;p<$+M{Uw^7wXw$EaNOP;X+$2;|NuzWl31tb9 zUBp-$i9v{R4$WTTTCPqLNpjlWWBuS`^@O~zOO$S&-4AB?>V3I zIiGW$XT;=MABdMXF(r&ZUJy(!`rt(A1qe>t>g^sdlD5_lNN>#56b`2+0Le@34pVqPlBH{cM<-pAqY6`0bY;*YiTC%+0M+P?sXtzDqlSJLc6>46aJz`QSEPTBGG zon*KTC^iXj8}`|Zv2fw)p|Z4SkP7RU)_cTdnB1G*0QU$cz`i5~>b{#e$C$EIM)kr@ zQ=~UdEN)q3B!IMZLc?olse;qztITb{ToipVByUZlADwGGLcl zi@2XB-I$yx2g}0mbgikv#f$qcyZ;ND67OuCzLkd^9DI{E_{6~Yv$m^E{mDImM{6N) zAA%1^%@Pd;$TiNlFZlM$v(ow`ojL0QNWs3nzM__eisf)}Ra3?Bj%j-4hk-KL@F){T z#mtzi%!ktI)YmPLH|K5u3h)gCmZP;1`Z`txk9o352%JE(?ZFGIi6N_1=AWMG4RcWp z0GJYLkJ9L!O++ZNAlcSp^OX&qtN5`B?V5+QmA70n2HEFBj8^}f1Zj@JzO*vz7VxF%5 zyQ$n52miXe=41BqJWwUJpj;5MkhR${wN71g!7QCDZktpOdl`a@!dJ>AjEaw3LKUur z;uJ1gHlhHk@rsgo{|nxVqVEUz1Tzr)3UqIK!{dQnAdAV&CA(3E2wm7N&P_~%_V%@5 zP5Mftwc!%aw)IWXqh#?f#{H7Wj<%NK0H}(6N4r3O1_BNm1k#_?A6Cx;iV@2lMr;6$1gK)+B-9}yC8eNSU)nRa{i?v zKXs$a^Jpg25#nXWz8=P=5?8kat$8ecQfMjllgw5%$C3e!Y_R^P;_VhtSHK=(Fwv2z ze@oQ<36&s6SqOq-L$%62tX44`&+VM^7uC6-xzOLWv^X8K5Cvk7&bmr}6}rr#RoU#} zZ&o0=#@?5TI@;z$i2>i}#C%pe;BY|xm!PSL*iEDd)QOz zuG{;{-Hiif=e8}{562V%oiI#voNk-U**a(c?0>;#cVYFwm>Ffuw1F7cV3`$MJ~L?T zBn#B1EPee*zELAuX{@Q)wASAb#jQu|$9_kHA zG=s~1w-&1OUPy2{|CxFAI1=6~01tfNZ;@H$TJBlvM+&qOh!wP>Mm;es%iA%8Mg!|) zHsSauIbm_XC}1|u8TaKf{g+r@lY2PPLcRHv%Qnli09iRF)(=f1q#PCZjrQWqe{u6j zPb%ZzAsH)-X3b-b%7Rcb(cjTA5cB{Qd~*j;dqx~bN&}V;aw(FAL3ssF&II$z)vD03S_Saj$)s2G}Liakrs$VC!0f) zgZVduJdBb4z#4pFd;fvceZX^e!iMjA;0B;fBU6saUH5 zd(ei+^|A|b#O_vl_`BTYp&7`Gqp?Yh#R=e{^D2uqAhf1;*w|BDz)m+b8}fmPno-%Pw&hNimRq>c>-#j%KW$O`cd6*4} z=Ac>6@m38MtIFrM7>1TR{jDRaWrK63fvXJ-W>u10Vup7N zT+S17xoiymPwf@-mjM*9hE3%;B@hEg_SB9;;EyO+JVu$ZR^H&v@AXrPwY89r`KghR zPOBmMzng!mjsIxZjF)H~xGP*gGPTFxXvnm=+tZ!t9M1T~Prx_A6WU3ZVdKTX#@dgu1{Di^YO1o+_Vn=+*`#T%u?3|DJYH{`x+}n zyR8=V{kMuHW*n5~zDRO{nP=`-nn-ZL=wb>OE25h&5?UB%GHCd&!L3JU640qZ_88{L33%P*J6V zDt0%r%g?$01yUWT!Jyfa)qDg4?h$~uz~a5AHqhA64l3X>X>?y@-92Ld&SYM%^OPf$ zi5`ze*iX^{I1sq6`~);1Zg3IpF*ZeVvANKjEk|KK}Mt5qru{D8N7VBf$@9y1#V)7=h&t zn8D}I*kbl95uYHXwb5SVqXP18zS|$Pqu?(GBe>=K77gVJEX=v9>uI zJYmkoU}n!zjg3DM&jqSXy4~wjPo2$)_%bt?f6Q=goRiswpbz&9qoO>iHl^mfo&rGt z0X-eOJg>jcdi%fk<;7edCYI>LL`GqGpnW5p3R`2MQhNM@nbdJ;+og^+pPQIZA~Ogo z`|6o;&ywq2ul`$?wbxub#y?X`9DE$y`PU;FnXud5xsw{A8;qEYsY389p4jPy$`!L= z&7f>l#_p6}Rrwt_1)HFG96Z_`%6IA@&>M`$Q zsbxC46)?1+qPe`b0|eI%G4X%D5$XQ60~;228H6@WqcSnANC^dQCX{HuW(c9S~gf^TZXa9yY5@so6!JIi)& zAqYc~4@g8F(lPwGs;HdmpzyY&vcgcI?8HCMRW_@wLRbc9F%yP{nS*tZ30nxv@!%c^ zCD5#%NWT)9g{ne>GWuXJ0@Y=`ru&SFfsv+locjLY-!F9dm37uulYkey+VDpBf14T4 zYuT2(v$3G^t!l`tY-K;_6c6UQ#Kfnr^_y{y%9&74A}0{q@@NZ16?1VwO zzJCNo;=U#)kqLkTmXv|FlPu>L)bYPEk-L0!J6o>#HTPZG<^Qf|=Hq{4&E0eCJ-i=5 zy1pwfR`{5A)vS8B>a_0aQ(oW3qdTZvNdtBAjb0O@Se11FgWvN^MIe;h&c8auN>~j! zj#g9C)lNps?P4rOe`A8QIB3?~$Cbx|yQAL#%Gh~q^us%C%IudPdE)cOu)g_e74 z-$E6lO}go(m$|}c@EV7{#xgYmFJ`3r0z#~hy=9wuVncjd*>$GVxU_+~z|pxcW{&2E zUiTM_Zqwp!1PO)MdX6a%YX02eYD~;Wj)J3dkjkaur5kq2i&M#%&J40K?ztzN6rJJ_ zKQ@%Oe}ypjZQ>mw#Rf)Uthe}la0Ydhk1HVSEjFdvfqU=ZJ`7Jk-~6XzCVW%8fFr$v zcK|}krwgC}cfi6I=XBY7!|dTU1bdecD6-S<5F>qz6AU%9L9(uui_75tCa*n#)k;WF zIG@+t)^a){dz<$*)0+v>3(f6*)yNS;>X$C?FLvRWGf5X9Q#{E)4;_nk>HaJ7lN=RA5JZ88A^?riiQbWzq45AB{Kw*UtK+f)i$_i zzYzCN-WLqaf^SGqSF6Y+O-|mr640G4k4_0qC8vNL$&-Y#Gw|l;#8e=Yz4{}aqu@9W3%`=@n69HLzV46U@{d&MKWZTc zT{<-&uC!s24X#PneCyn~K%u~P1v3W9Bbf71*%<4K`mPAeDASdhS{O}P>$|h0kNAED zIp3RswMN-g{p5@WdciHO8*&WDeAT?@U=!@Fe`#sfa;Mtd^f2bHilHz50)fVnzz57V z38PXPPYb8`o@_70lAJnhx8MIDuYAQ7zcCEMJjkgjtMMa&3CSl`HC)O{W$s|-Ck^+DU75wZQ`)h0*~`7QA1j*+dtSherw`nqs+IcxfEjtXkoxjf+%?w)_=C(-MQDum>UY-Toi+45 z1;Aj6SB}n~?DnWs-YH!jlu+q+eN^zF9G#DZ9LV*SCwrDm2|L8FOaL#?=fz^e;PzbE zAJM+|;GKi?>n-e*j#d@#3TF@!e_TQdD!<$cQszg39n!?6Q9XgXXLB}*EDR*)7;8a-^&A z((;BN@!NkXvf}{|&U{!~6^{o6>2yZA82I8Rew0FXH8E$Zno=R(YW+L3CSH$u@O8g7 z;O59iJ@0=vsU-^mjrJ4lN8=>8F$1o`vzhQ&_J8XSxLlI> z7A!(M;jx5|geuTy^sz(?i1_&s{dxh480P$mW9rc{JJqG`sCDjF%scTyT@mM6rvtZ0Rn zE`V;+rAULybR;DrTj3n@@AI99zB;RJnx+)g{>%bZcY5YbB_&dE1Q6!7{l^3%U9W#& ze3qqG$-pp~*!zLxXe*idlYCgjy?0JNgRhmls-peeh4@B&vQ|Bk?v z80U2%8|7mAVTKhbVQHLSB_nHGcsvUCvc|z?ur~xr@_+vWM$+8Omyx9#&~n7zmmHx@twpqJ)oyCkeEW7i}}?LPC*qck3vQs&acWTqnl|2 z-bA{oJua-gN4ZlB+mYRi}l~aN#FaP zsH9JtSkoVV)ucAH44@aty0|APM|ha{GsJs(li?_q%Mq!BnZgH* zO(KPTX3eel0;F7kVX-s|a@N52swwYw2*oYL2!%`)N)6G;l_f;f|_U($idh)Ow&uWvmz}HB=}X zQm23(&Z1Vf&lFbdx>6G@08&zldg5L*tf6l zI>qIMLg(Z6K&ngu)P6RYIV0*c6)3c0*_n$wIJ_vMI2`U=Q||!f6jr?C72hb2$;f1m zC&qWV=!LmAP<^vdyTIYwXi{u=kxwj?Q>j~*KvfxOP23OM`@AXP_(;PV`tMlE&7BG* zI(SO`#9sM6d3;64n)pW4qtYRMBL@ORs#_;MLP~1Du&QaU*io92alG?K!H12|#`hY! zB9t6(l7C*^-hu%OY7ApyLs0Zfiz8xq`S#o!)7E=UiJ%1aCf7aB)42#O)bxtzDCgV# zW#w|fEQd84Uw z&qaWE< z9=(X(za7FEZiP_2ROKfXINLAIaYmMy) zB@&x?Q5YpGn$^89WOD>r2foYirdattU|m2h~I3WMMU=fqGGSWk3c98Oy+p)mWE z%i|M1Fe(czL1Nx5Iiz=T&tiwCzmlA0SH@&X1*t*t)NKf^I+VN)MVZscv6D` zj2sv&>-XT(n!v!b5qB^>GKE9kT$bK!-heKB9N9bfV939?yOnV+(YgBLbjZdHk@_0yHjgz91h9%; zmrHb;YbCU1CPgzbaRvti4CEv5aD*j_rjPp8XB*!CRRPh%#{X@UNG|)c?F;JZ@d^ic zk5Xk$6fhtBC4o~LDZ?z4={VPhgfn=-h9fGb&ILm5&A!=f(vC>w}R#j-{4L zy%VJcIvwzk!Xp%%D-!MEVzwQA9VVw6eoBe9@@HtZF2Org+EffRpvU$qRvKfXFTZS= zE8PnwkMC1J={dqyU_HRXAIReIJS*i?MB}e$#|`hPZQ>&hZt-_${$_-%ssh7o#EOHU z{wJP*UB14xg2{ofo@I-gYqy=o?FH0zZHrB|@K zGZs=KRcXT3>_U|DXKOwH;w-MiDqDGgBgeTiidyH5c{?!S!iBQ}&Fcg>3%ObH{RF28 ze*20uz6b2_(#sE%8lonU336-`tc%I~c*kW$HmR=yOhoN3VS|tZpr=AL9M98Jdj|;x z2T+Z`8}NjsXTo^%&oO2+6L?+&6w+qf=Ju#bo-QFzrv*53ica!*A1)XsChQ+UCOk%8 zoy$h*XL9Xcb5M&+-d--{=L;-j;a(~U1t`GQ-~e#NACbUm2a^{((A!)0;T%8z;#|{1 ziDFsYIk(O1mF>=myJSOW0|ZHM6t$yAZY}SLv_6Tv9b!b=W1U4gKdoXHWNU0&n0fE+ zoC3=JE^Gc#Sm+4`6utqURY!Nn>iDzPOefTK0(MatFWzH783#I-jU+<&A&DiC<3MWx zyDHLQ4#TFs?~)uIJ}%Fik=&hKe*B@f-m;Pn;I8${WV0?*RS$vo2C$y>JL|eMfgU&d z(Zd2f>u$5wkj?UuMS*eMQ7P%XqOb)7NGNi$Mg){l7~VdhYMS}uY@-O1d8^?Gt}X19 z_TiVKAWT^+2#B}&A8ce3hij5l#hbv$w&yyF7{+-P ztM~jqB0Ohuf~NLB`BGGur%7dF!hbJ|nkLv~wJbyHL?BHw=w8zXy$^3(f7C~+RQ8bN zc|p?^RXoJgiRJ^r<<_Bfo57;m6zJ2Il&)XCZ>~Woai*H*DDR;l+6J(~*5942ZsPvt z$7hP*Gs2jQW_nPi?^%S_{Q)Yx+ts1sXlK5l1<%V!O}-ih0CXDK{?dV=1<1I+daf1Y ztskFP(a*u2{#lE95inIa0v9}kG63iLoL|N(k(;qL&?<@FU(E*I!cg$@XmO$x5Y8+E zL-&$kz*stTBha-u{-Qm|l-aS?H;{|ei?Kbv^_VWN1Gh+R5=bP0@bhlic|}&-e`GfR z>L$!7ed=r0THeu1sZDd;1@P31d_4zN3%TwT65%r}yOV?~-XeGg%B~mn!WEVJ!-M&`r@J;I59)hn_R-UGu@q$r z6W*!dW)7+1{fnUH(kL=sxNzjkkS9owrX4U|s%r86A%y|3e2pGUBDhKgvSujtTG*@7&KV3 zRc{_%JM=oN3;Yx&a`ac+OHi3n&!PMU6Z4G*XoLrTE~^iQ#G)VvxX{s0KDcZMoc$&k z#|P&`B!rt;qlU#OuSo^}cB%l@20>O9<6!#cFsohkDDCyJAE1tVqA?g!1Xg7015wCu zvx7p~_4sz5-{F~0vd?aQZyE4&9YqR)5D9zwG3 zW&R_N00i5&%yny{XO{r%16W34%l?8Z4sH?|DxwIN}&`SVUmQfAY4x$h#1)bX=qyQ{dWdeE1et>0I zuy#2NS~o)DyJG6NoE$Du^ow3f%en2Zg9kAK)XS3wnDsCE)* zIkk5f%RCrHK9rJ#uh=0i70$nFT+=533MS{}yBR6MUmcOLwevsSmn)&Il==Z{xZLw_ z$tx!(`U#oBL7jvhlE+WbWj^sa6C^*$$ye{e<$4JM8c$`$m%hVr!7|q3vLKR8(z%Oy z)0v6kiQ`NcN_?9NpCRFJuD~&VhN7J-`GFSX6*Q&7&~c9?(0zC&z~yQz27Sp47q@S# Q$J|@vtoCo&sus8Z2l~Yc`2YX_ literal 0 HcmV?d00001 diff --git a/docs/root/intro/arch_overview/listeners/listeners.rst b/docs/root/intro/arch_overview/listeners/listeners.rst index 8802a1dcb776..153d344f57db 100644 --- a/docs/root/intro/arch_overview/listeners/listeners.rst +++ b/docs/root/intro/arch_overview/listeners/listeners.rst @@ -33,6 +33,8 @@ Listeners can also be fetched dynamically via the :ref:`listener discovery servi Listener :ref:`configuration `. +.. _arch_overview_listeners_udp: + UDP --- diff --git a/docs/root/intro/arch_overview/observability/access_logging.rst b/docs/root/intro/arch_overview/observability/access_logging.rst index 46ce106d89f4..afaacda8630c 100644 --- a/docs/root/intro/arch_overview/observability/access_logging.rst +++ b/docs/root/intro/arch_overview/observability/access_logging.rst @@ -26,6 +26,8 @@ Envoy supports several built-in :ref:`extension filters` that are registered at runtime. +.. _arch_overview_access_logs_sinks: + Access logging sinks -------------------- diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst index cf3404c363ec..4fd158e49f7f 100644 --- a/docs/root/intro/intro.rst +++ b/docs/root/intro/intro.rst @@ -8,6 +8,7 @@ Introduction what_is_envoy arch_overview/arch_overview + life_of_a_request deployment_types/deployment_types getting_help diff --git a/docs/root/intro/life_of_a_request.rst b/docs/root/intro/life_of_a_request.rst new file mode 100644 index 000000000000..ac6e5334689b --- /dev/null +++ b/docs/root/intro/life_of_a_request.rst @@ -0,0 +1,647 @@ +.. _life_of_a_request: + +Life of a Request +================= + +Below we describe the events in the life of a request passing through an Envoy proxy. We first +describe how Envoy fits into the request path for a request and then the internal events that take +place following the arrival of a request at the Envoy proxy from downstream. We follow the request +until the corresponding dispatch upstream and the response path. + + +Terminology +----------- + +Envoy uses the following terms through its codebase and documentation: + +* *Cluster*: a logical service with a set of endpoints that Envoy forwards requests to. +* *Downstream*: an entity connecting to Envoy. This may be a local application (in a sidecar model) or + a network node. In non-sidecar models, this is a remote client. +* *Endpoints*: network nodes that implement a logical service. They are grouped into clusters. + Endpoints in a cluster are *upstream* of an Envoy proxy. +* *Filter*: a module in the connection or request processing pipeline providing some aspect of + request handling. An analogy from Unix is the composition of small utilities (filters) with Unix + pipes (filter chains). +* *Filter chain*: a series of filters. +* *Listeners*: Envoy module responsible for binding to an IP/port, accepting new TCP connections (or + UDP datagrams) and orchestrating the downstream facing aspects of request processing. +* *Upstream*: an endpoint (network node) that Envoy connects to when forwarding requests for a + service. This may be a local application (in a sidecar model) or a network node. In non-sidecar + models, this corresponds with a remote backend. + +Network topology +---------------- + +How a request flows through the components in a network (including Envoy) depends on the network’s +topology. Envoy can be used in a wide variety of networking topologies. We focus on the inner +operation of Envoy below, but briefly we address how Envoy relates to the rest of the network in +this section. + +Envoy originated as a `service mesh +`_ sidecar proxy, +factoring out load balancing, routing, observability, security and discovery services from +applications. In the service mesh model, requests flow through Envoys as a gateway to the network. +Requests arrive at an Envoy via either ingress or egress listeners: + +* Ingress listeners take requests from other nodes in the service mesh and forward them to the + local application. Responses from the local application flow back through Envoy to the downstream. +* Egress listeners take requests from the local application and forward them to other nodes in the + network. These receiving nodes will also be typically running Envoy and accepting the request via + their ingress listeners. + +.. image:: /_static/lor-topology-service-mesh.svg + :width: 80% + :align: center + +.. image:: /_static/lor-topology-service-mesh-node.svg + :width: 40% + :align: center + + +Envoy is used in a variety of configurations beyond the service mesh. For example, it can also act +as an internal load balancer: + +.. image:: /_static/lor-topology-ilb.svg + :width: 65% + :align: center + +Or as an ingress/egress proxy on the network edge: + +.. image:: /_static/lor-topology-edge.svg + :width: 90% + :align: center + +In practice, a hybrid of these is often used, where Envoy features in a service mesh, on the edge +and as an internal load balancer. A request path may traverse multiple Envoys. + +.. image:: /_static/lor-topology-hybrid.svg + :width: 90% + :align: center + +Envoy may be configured in multi-tier topologies for scalability and reliability, with a request +first passing through an edge Envoy prior to passing through a second Envoy tier: + +.. image:: /_static/lor-topology-tiered.svg + :width: 80% + :align: center + +In all the above cases, a request will arrive at a specific Envoy via TCP, UDP or Unix domain +sockets from downstream. Envoy will forward requests upstream via TCP, UDP or Unix domain sockets. +We focus on a single Envoy proxy below. + +Configuration +------------- + +Envoy is a very extensible platform. This results in a combinatorial explosion of possible request +paths, depending on: + +* L3/4 protocol, e.g. TCP, UDP, Unix domain sockets. +* L7 protocol, e.g. HTTP/1, HTTP/2, HTTP/3, gRPC, Thrift, Dubbo, Kafka, Redis and various databases. +* Transport socket, e.g. plain text, TLS, ALTS. +* Connection routing, e.g. PROXY protocol, original destination, dynamic forwarding. +* Authentication and authorization. +* Circuit breakers and outlier detection configuration and activation state. +* Many other configurations for networking, HTTP, listener, access logging, health checking, tracing + and stats extensions. + +It's helpful to focus on one at a time, so this example covers the following: + +* An HTTP/2 request with :ref:`TLS ` over a TCP connection for both downstream + and upstream. +* The :ref:`HTTP connection manager ` as the only :ref:`network filter + `. +* A hypothetical CustomFilter and the `router ` filter as the :ref:`HTTP + filter ` chain. +* :ref:`Filesystem access logging `. +* :ref:`Statsd sink `. +* A single :ref:`cluster ` with static endpoints. + +We assume a static bootstrap configuration file for simplicity: + +.. code-block:: yaml + + static_resources: + listeners: + # There is a single listener bound to port 443. + - name: listener_https + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 443 + # A single listener filter exists for TLS inspector. + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} + # On the listener, there is a single filter chain that matches SNI for acme.com. + filter_chains: + - filter_chain_match: + # This will match the SNI extracted by the TLS Inspector filter. + server_names: ["acme.com"] + # Downstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } + filters: + # The HTTP connection manager is the only network filter. + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + http2_protocol_options: + max_concurrent_streams: 100 + # File system based access logging. + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/var/log/envoy/access.log" + # The route table, mapping /foo to some_service. + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["acme.com"] + routes: + - match: + path: "/foo" + route: + cluster: some_service + # CustomFilter and the HTTP router filter are the HTTP filter chain. + http_filters: + - name: some.customer.filter + - name: envoy.filters.http.router + clusters: + - name: some_service + connect_timeout: 5s + # Upstream TLS configuration. + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + load_assignment: + cluster_name: some_service + # Static endpoint assignment. + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.1.2.10 + port_value: 10002 + - endpoint: + address: + socket_address: + address: 10.1.2.11 + port_value: 10002 + http2_protocol_options: + max_concurrent_streams: 100 + - name: some_statsd_sink + connect_timeout: 5s + # The rest of the configuration for statsd sink cluster. + # statsd sink. + stats_sinks: + - name: envoy.stat_sinks.statsd + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink + tcp_cluster_name: some_statsd_cluster + +High level architecture +----------------------- + +The request processing path in Envoy has two main parts: + +* :ref:`Listener subsystem ` which handles **downstream** request + processing. It is also responsible for managing the downstream request lifecycle and for the + response path to the client. The downstream HTTP/2 codec lives here. +* :ref:`Cluster subsystem ` which is responsible for selecting and + configuring the **upstream** connection to an endpoint. This is where knowledge of cluster and + endpoint health, load balancing and connection pooling exists. The upstream HTTP/2 codec lives + here. + +The two subsystems are bridged with the HTTP router filter, which forwards the HTTP request from +downstream to upstream. + +.. image:: /_static/lor-architecture.svg + :width: 80% + :align: center + +We use the terms :ref:`listener subsystem ` and :ref:`cluster subsystem +` above to refer to the group of modules and instance classes that +are created by the top level `ListenerManager` and `ClusterManager` classes. There are many +components that we discuss below that are instantiated before and during the course of a request by +these management systems, for example listeners, filter chains, codecs, connection pools and load +balancing data structures. + +Envoy has an `event-based thread model +`_. A main thread is responsible for +the server lifecycle, configuration processing, stats, etc. and some number of :ref:`worker threads +` process requests. All threads operate around an event loop (`libevent +`_) and any given downstream TCP connection (including all the multiplexed +streams on it) will be handled by exactly one worker thread for its lifetime. Each worker thread +maintains its own pool of TCP connections to upstream endpoints. :ref:`UDP +` handling makes use of SO_REUSEPORT to have the kernel consistently +hash the source/destination IP:port tuples to the same worker thread. UDP filter state is shared for +a given worker thread, with the filter responsible for providing session semantics as needed. This +is in contrast to the connection oriented TCP filters we discuss below, where filter state exists on +a per connection and, in the case of HTTP filters, per-request basis. + +Worker threads rarely share state and operate in a trivially parallel fashion. This threading model +enables scaling to very high core count CPUs. + +Request flow +------------ + +Overview +^^^^^^^^ + +A brief outline of the life cycle of a request and response using the example configuration above: + +1. A TCP connection from downstream is accepted by an Envoy :ref:`listener + ` running on a :ref:`worker thread `. +2. The :ref:`listener filter ` chain is created and runs. It can + provide SNI and other pre-TLS info. Once completed, the listener will match a network filter + chain. Each listener may have multiple filter chains which match on some combination of + destination IP CIDR range, SNI, ALPN, source ports, etc. A transport socket, in our case the TLS + transport socket, is associated with this filter chain. +3. On network reads, the :ref:`TLS ` transport socket decrypts the data read from + the TCP connection to a decrypted data stream for further processing. +4. The :ref:`network filter ` chain is created and runs. The most + important filter for HTTP is the HTTP connection manager, which is the last network filter in the + chain. +5. The HTTP/2 codec in :ref:`HTTP connection manager ` deframes and + demultiplexes the decrypted data stream from the TLS connection to a number of independent + streams. Each stream handles a single request and response. +6. For each HTTP stream, an :ref:`HTTP filter ` chain is created and + runs. The request first passes through CustomFilter which may read and modify the request. The + most important HTTP filter is the router filter which sits at the end of the HTTP filter chain. + When `decodeHeaders` is invoked on the router filter, the route is selected and a cluster is + picked. The request headers on the stream are forwarded to an upstream endpoint in that cluster. + The :ref:`router ` filter obtains an HTTP :ref:`connection pool + ` from the cluster manager for the matched cluster to do this. +7. Cluster specific :ref:`load balancing ` is performed to find an + endpoint. The cluster’s circuit breakers are checked to determine if a new stream is allowed. A + new connection to the endpoint is created if the endpoint's connection pool is empty or lacks + capacity. +8. The upstream endpoint connection's HTTP/2 codec multiplexes and frames the request’s stream with + any other streams going to that upstream over a single TCP connection. +9. The upstream endpoint connection's TLS transport socket encrypts these bytes and writes them to a + TCP socket for the upstream connection. +10. The request, consisting of headers, and optional body and trailers, is proxied upstream, and the + response is proxied downstream. The response passes through the HTTP filters in the + :ref:`opposite order ` from the request, starting at the + router filter and passing through CustomFilter, before being sent downstream. +11. When the response is complete, the stream is destroyed. Post-request processing will update + stats, write to the access log and finalize trace spans. + +We elaborate on each of these steps in the sections below. + +1. Listener TCP accept +^^^^^^^^^^^^^^^^^^^^^^ + +.. image:: /_static/lor-listeners.svg + :width: 90% + :align: center + +The *ListenerManager* is responsible for taking configuration representing :ref:`listeners +` and instantiating a number of *Listener* instances bound to their +respective IP/ports. Listeners may be in one of three states: + +* *Warming*: the listener is waiting for configuration dependencies (e.g. route configuration, + dynamic secrets). The listener is not yet ready to accept TCP connections. +* *Active*: the listener is bound to its IP/port and accepts TCP connections. +* *Draining*: the listener no longer accepts new TCP connections while its existing TCP connections + are allowed to continue for a drain period. + +Each :ref:`worker thread ` maintains its own *Listener* instance for each +of the configured listeners. Each listener may bind to the same port via SO_REUSEPORT or share a +single socket bound to this port. When a new TCP connection arrives, the kernel decides which +worker thread will accept the connection and the *Listener* for this worker thread will have its +``Server::ConnectionHandlerImpl::ActiveTcpListener::onAccept()`` callback invoked. + +2. Listener filter chains and network filter chain matching +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The worker thread’s *Listener* then creates and runs the :ref:`listener filter +` chain. Filter chains are created by applying each filter’s *filter +factory*. The factory is aware of the filter’s configuration and creates a new instance of the +filter for each connection or stream. + +In the case of our TLS listener configuration, the listener filter chain consists of the :ref:`TLS +inspector ` filter +(``envoy.filters.listener.tls_inspector``). This filter examines the initial TLS handshake and +extracts the server name (SNI). The SNI is then made available for filter chain matching. While the +TLS inspector appears explicitly in the listener filter chain configuration, Envoy is also capable +of inserting this automatically whenever there is a need for SNI (or ALPN) in a listener’s filter +chain. + +.. image:: /_static/lor-listener-filters.svg + :width: 80% + :align: center + +The TLS inspector filter implements the :repo:`ListenerFilter ` +interface. All filter interfaces, whether listener or network/HTTP, require that filters implement +callbacks for specific connection or stream events. In the case of `ListenerFilter`, this is: + + +.. code-block:: cpp + + virtual FilterStatus onAccept(ListenerFilterCallbacks& cb) PURE; + +``onAccept()`` allows a filter to run during the TCP accept processing. The ``FilterStatus`` +returned by the callback controls how the listener filter chain will continue. Listener filters may +pause the filter chain and then later resume, e.g. in response to an RPC made to another service. + +Information extracted from the listener filters and connection properties is then used to match a +filter chain, giving the network filter chain and transport socket that will be used to handle the +connection. + +.. image:: /_static/lor-filter-chain-match.svg + :width: 50% + :align: center + +.. _life_of_a_request_tls_decryption: + +3. TLS transport socket decryption +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Envoy offers pluggable transport sockets via the +:repo:`TransportSocket ` +extension interface. Transport sockets follow the lifecycle events of a TCP connection and +read/write into network buffers. Some key methods that transport sockets must implement are: + +.. code-block:: cpp + + virtual void onConnected() PURE; + virtual IoResult doRead(Buffer::Instance& buffer) PURE; + virtual IoResult doWrite(Buffer::Instance& buffer, bool end_stream) PURE; + virtual void closeSocket(Network::ConnectionEvent event) PURE; + +When data is available on a TCP connection, ``Network::ConnectionImpl::onReadReady()`` invokes the +:ref:`TLS ` transport socket via ``SslSocket::doRead()``. The transport socket +then performs a TLS handshake on the TCP connection. When the handshake completes, +``SslSocket::doRead()`` provides a decrypted byte stream to an instance of +``Network::FilterManagerImpl``, responsible for managing the network filter chain. + +.. image:: /_static/lor-transport-socket.svg + :width: 80% + :align: center + +It’s important to note that no operation, whether it’s a TLS handshake or a pause of a filter +pipeline is truly blocking. Since Envoy is event-based, any situation in which processing requires +additional data will lead to early event completion and yielding of the CPU to another event. When +the network makes more data available to read, a read event will trigger the resumption of a TLS +handshake. + +4. Network filter chain processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As with the listener filter chain, Envoy, via `Network::FilterManagerImpl`, will instantiate a +series of :ref:`network filters ` from their filter factories. The +instance is fresh for each new connection. Network filters, like transport sockets, follow TCP +lifecycle events and are invoked as data becomes available from the transport socket. + +.. image:: /_static/lor-network-filters.svg + :width: 80% + :align: center + +Network filters are composed as a pipeline, unlike transport sockets which are one-per-connection. +Network filters come in three varieties: + +* :repo:`ReadFilter ` implementing ``onData()``, called when data is + available from the connection (due to some request). +* :repo:`WriteFilter ` implementing ``onWrite()``, called when data + is about to be written to the connection (due to some response). +* :repo:`Filter ` implementing both *ReadFilter* and *WriteFilter*. + +The method signatures for the key filter methods are: + +.. code-block:: cpp + + virtual FilterStatus onNewConnection() PURE; + virtual FilterStatus onData(Buffer::Instance& data, bool end_stream) PURE; + virtual FilterStatus onWrite(Buffer::Instance& data, bool end_stream) PURE; + +As with the listener filter, the ``FilterStatus`` allows filters to pause execution of the filter +chain. For example, if a rate limiting service needs to be queried, a rate limiting network filter +would return ``Network::FilterStatus::StopIteration`` from ``onData()`` and later invoke +``continueReading()`` when the query completes. + +The last network filter for a listener dealing with HTTP is :ref:`HTTP connection manager +` (HCM). This is responsible for creating the HTTP/2 codec and managing +the HTTP filter chain. In our example, this is the only network filter. An example network filter +chain making use of multiple network filters would look like: + +.. image:: /_static/lor-network-read.svg + :width: 80% + :align: center + +On the response path, the network filter chain is executed in the reverse order to the request path. + +.. image:: /_static/lor-network-write.svg + :width: 80% + :align: center + +.. _life_of_a_request_http2_decoding: + +5. HTTP/2 codec decoding +^^^^^^^^^^^^^^^^^^^^^^^^ + +The HTTP/2 codec in Envoy is based on `nghttp2 `_. It is invoked by the HCM +with plaintext bytes from the TCP connection (after network filter chain transformation). The codec +decodes the byte stream as a series of HTTP/2 frames and demultiplexes the connection into a number +of independent HTTP streams. Stream multiplexing is a key feature in HTTP/2, providing significant +performance advantages over HTTP/1. Each HTTP stream handles a single request and response. + +The codec is also responsible for handling HTTP/2 setting frames and both stream and connection +level :repo:`flow control `. + +The codecs are responsible for abstracting the specifics of the HTTP connection, presenting a +standard view to the HTTP connection manager and HTTP filter chain of a connection split into +streams, each with request/response headers/body/trailers. This is true regardless of whether the +protocol is HTTP/1, HTTP/2 or HTTP/3. + +6. HTTP filter chain processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For each HTTP stream, the HCM instantiates an :ref:`HTTP filter ` chain, +following the pattern established above for listener and network filter chains. + +.. image:: /_static/lor-http-filters.svg + :width: 80% + :align: center + +There are three kinds of HTTP filter interfaces: + +* :repo:`StreamDecoderFilter ` with callbacks for request processing. +* :repo:`StreamEncoderFilter ` with callbacks for response processing. +* :repo:`StreamFilter ` implementing both `StreamDecoderFilter` and + `StreamEncoderFilter`. + +Looking at the decoder filter interface: + +.. code-block:: cpp + + virtual FilterHeadersStatus decodeHeaders(RequestHeaderMap& headers, bool end_stream) PURE; + virtual FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) PURE; + virtual FilterTrailersStatus decodeTrailers(RequestTrailerMap& trailers) PURE; + +Rather than operating on connection buffers and events, HTTP filters follow the lifecycle of an HTTP +request, e.g. ``decodeHeaders()`` takes HTTP headers as an argument rather than a byte buffer. The +returned ``FilterStatus`` provides, as with network and listener filters, the ability to manage filter +chain control flow. + +When the HTTP/2 codec makes available the HTTP requests headers, these are first passed to +``decodeHeaders()`` in CustomFilter. If the returned ``FilterHeadersStatus`` is ``Continue``, HCM +then passes the headers (possibly mutated by CustomFilter) to the router filter. + +Decoder and encoder-decoder filters are executed on the request path. Encoder and encoder-decoder +filters are executed on the response path, in :ref:`reverse direction +`. Consider the following example filter chain: + +.. image:: /_static/lor-http.svg + :width: 80% + :align: center + +The request path will look like: + +.. image:: /_static/lor-http-decode.svg + :width: 80% + :align: center + +While the response path will look like: + +.. image:: /_static/lor-http-encode.svg + :width: 80% + :align: center + +When ``decodeHeaders()`` is invoked on the :ref:`router ` filter, the +route selection is finalized and a cluster is picked. The HCM selects a route from its +``RouteConfiguration`` at the start of HTTP filter chain execution. This is referred to as the +*cached route*. Filters may modify headers and cause a new route to be selected, by asking HCM to +clear the route cache and requesting HCM to reevaluate the route selection. When the router filter +is invoked, the route is finalized. The selected route’s configuration will point at an upstream +cluster name. The router filter then asks the `ClusterManager` for an HTTP :ref:`connection pool +` for the cluster. This involves load balancing and the connection pool, +discussed in the next section. + +.. image:: /_static/lor-route-config.svg + :width: 70% + :align: center + +The resulting HTTP connection pool is used to build an `UpstreamRequest` object in the router, which +encapsulates the HTTP encoding and decoding callback methods for the upstream HTTP request. Once a +stream is allocated on a connection in the HTTP connection pool, the request headers are forwarded +to the upstream endpoint by the invocation of ``UpstreamRequest::encoderHeaders()``. + +The router filter is responsible for all aspects of upstream request lifecycle management on the +stream allocated from the HTTP connection pool. It also is responsible for request timeouts, retries +and affinity. + +7. Load balancing +^^^^^^^^^^^^^^^^^ + +Each cluster has a :ref:`load balancer ` which picks an endpoint when +a new request arrives. Envoy supports a variety of load balancing algorithms, e.g. weighted +round-robin, Maglev, least-loaded, random. Load balancers obtain their effective assignments from a +combination of static bootstrap configuration, DNS, dynamic xDS (the CDS and EDS discovery services) +and active/passive health checks. Further details on how load balancing works in Envoy are provided +in the :ref:`load balancing documentation `. + +Once an endpoint is selected, the :ref:`connection pool ` for this endpoint +is used to find a connection to forward the request on. If no connection to the host exists, or all +connections are at their maximum concurrent stream limit, a new connection is established and placed +in the connection pool, unless the circuit breaker for maximum connections for the cluster has +tripped. If a maximum lifetime stream limit for a connection is configured and reached, a new +connection is allocated in the pool and the affected HTTP/2 connection is drained. Other circuit +breakers, e.g. maximum concurrent requests to a cluster are also checked. See :repo:`circuit +breakers ` and :ref:`connection pools ` for +further details. + +.. image:: /_static/lor-lb.svg + :width: 80% + :align: center + +8. HTTP/2 codec encoding +^^^^^^^^^^^^^^^^^^^^^^^^ + +The selected connection's HTTP/2 codec multiplexes the request stream with any other streams going +to the same upstream over a single TCP connection. This is the reverse of :ref:`HTTP/2 codec +decoding `. + +As with the downstream HTTP/2 codec, the upstream codec is responsible for taking Envoy’s standard +abstraction of HTTP, i.e. multiple streams multiplexed on a single connection with request/response +headers/body/trailers, and mapping this to the specifics of HTTP/2 by generating a series of HTTP/2 +frames. + +9. TLS transport socket encryption +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The upstream endpoint connection's TLS transport socket encrypts the bytes from the HTTP/2 codec +output and writes them to a TCP socket for the upstream connection. As with :ref:`TLS transport +socket decryption `, in our example the cluster has a transport +socket configured that provides TLS transport security. The same interfaces exist for upstream and +downstream transport socket extensions. + +.. image:: /_static/lor-client.svg + :width: 70% + :align: center + +10. Response path and HTTP lifecycle +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The request, consisting of headers, and optional body and trailers, is proxied upstream, and the +response is proxied downstream. The response passes through the HTTP and network filters in the +:ref:`opposite order `. from the request. + +Various callbacks for decoder/encoder request lifecycle events will be invoked in HTTP filters, e.g. +when response trailers are being forwarded or the request body is streamed. Similarly, read/write +network filters will also have their respective callbacks invoked as data continues to flow in both +directions during a request. + +:ref:`Outlier detection ` status for the endpoint is revised as the +request progresses. + +A request completes when the upstream response reaches its end-of-stream, i.e. when trailers or the +response header/body with end-stream set are received. This is handled in +``Router::Filter::onUpstreamComplete()``. + +It is possible for a request to terminate early. This may be due to (but not limited to): + +* Request timeout. +* Upstream endpoint steam reset. +* HTTP filter stream reset. +* Circuit breaking. +* Unavailability of upstream resources, e.g. missing a cluster for a route. +* No healthy endpoints. +* DoS protection. +* HTTP protocol violations. +* Local reply from either the HCM or an HTTP filter. E.g. a rate limit HTTP filter returning a 429 + response. + +If any of these occur, Envoy may either send an internally generated response, if upstream response +headers have not yet been sent, or will reset the stream, if response headers have already been +forwarded downstream. The Envoy :ref:`debugging FAQ ` has further information on +interpreting these early stream terminations. + +11. Post-request processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once a request completes, the stream is destroyed. The following also takes places: + +* The post-request :ref:`statistics ` are updated (e.g. timing, active + requests, upgrades, health checks). Some statistics are updated earlier however, during request + processing. Stats are not written to the stats :ref:`sink + ` at this point, they are batched + and written by the main thread periodically. In our example this is a statsd sink. +* :ref:`Access logs ` are written to the access log :ref:`sinks + `. In our example this is a file access log. +* :ref:`Trace ` spans are finalized. If our example request was traced, a + trace span, describing the duration and details of the request would be created by the HCM when + processing request headers and then finalized by the HCM during post-request processing. From 881258b4721353ba447d8605cf3ffddbf4186324 Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Fri, 26 Jun 2020 03:42:02 +0900 Subject: [PATCH 448/909] fix Python settings (#11749) Signed-off-by: tomocy --- .devcontainer/devcontainer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c3c3cd7ed19f..c4ffcb978205 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -17,7 +17,7 @@ "python.pythonPath": "/usr/bin/python3", "python.formatting.provider": "yapf", "python.formatting.yapfArgs": [ - "style=tools/code_format/.style.yapf" + "--style=${workspaceFolder}/tools/code_format/.style.yapf" ], "files.exclude": { "**/.clangd/**": true, From 3cec62a096584894e8d1552ba915e47cffd8bf49 Mon Sep 17 00:00:00 2001 From: Yutong Li Date: Thu, 25 Jun 2020 11:51:48 -0700 Subject: [PATCH 449/909] server: add handler for dumping out eds (#11577) /config_dump API now supports dumping out EDS while using parameter ?include_eds Add help method dumpEndpointConfigs() to dump out EDS in /config_dump by calling this method in the handler handlerConfigDump() This will dump out envoy::admin::v3::EndpointsConfigDump by generating envoy::config::endpoint::v3::ClusterLoadAssignment based on data stored in server_.clusterManager().clusters() Missing Field: - ClusterLoadAssignment - Policy - endpoint_stale_after - StaticEndpointConfig - last_updated - DynamicEndpointConfig - version_info - last_updated Risk Level: Medium Testing: add unit test, integration test Docs Changes: operations_admin_interface Release Notes: N/A Part of fixing #3362 Signed-off-by: Yutong Li --- api/envoy/admin/v3/config_dump.proto | 13 +- api/envoy/admin/v4alpha/config_dump.proto | 13 +- docs/root/operations/admin.rst | 7 + docs/root/version_history/current.rst | 1 + .../envoy/admin/v3/config_dump.proto | 13 +- .../envoy/admin/v4alpha/config_dump.proto | 13 +- source/server/admin/BUILD | 1 + source/server/admin/admin.cc | 128 +++++- source/server/admin/admin.h | 11 +- test/integration/integration_admin_test.cc | 23 ++ test/server/admin/admin_test.cc | 369 ++++++++++++++++++ 11 files changed, 559 insertions(+), 33 deletions(-) diff --git a/api/envoy/admin/v3/config_dump.proto b/api/envoy/admin/v3/config_dump.proto index 0f51c56e6b37..73156697fdb2 100644 --- a/api/envoy/admin/v3/config_dump.proto +++ b/api/envoy/admin/v3/config_dump.proto @@ -30,9 +30,11 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` - // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -348,8 +350,7 @@ message SecretsConfigDump { repeated DynamicSecret dynamic_warming_secrets = 3; } -// [#not-implemented-hide:] -// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { @@ -357,12 +358,12 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 1; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicEndpointConfig { - // This is the per-resource version information. This version is currently taken from the + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; @@ -370,7 +371,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 2; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } diff --git a/api/envoy/admin/v4alpha/config_dump.proto b/api/envoy/admin/v4alpha/config_dump.proto index ca1399b21deb..8bbd5743219d 100644 --- a/api/envoy/admin/v4alpha/config_dump.proto +++ b/api/envoy/admin/v4alpha/config_dump.proto @@ -30,9 +30,11 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` - // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -342,8 +344,7 @@ message SecretsConfigDump { repeated DynamicSecret dynamic_warming_secrets = 3; } -// [#not-implemented-hide:] -// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { @@ -356,7 +357,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 1; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } @@ -364,7 +365,7 @@ message EndpointsConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; - // This is the per-resource version information. This version is currently taken from the + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; @@ -372,7 +373,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 2; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index b90a1461f415..b2ad2e7c6391 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -137,6 +137,13 @@ modify different aspects of the server: The underlying proto is marked v2alpha and hence its contents, including the JSON representation, are not guaranteed to be stable. +.. _operations_admin_interface_config_dump_include_eds: + +.. http:get:: /config_dump?include_eds + + Dump currently loaded configuration including EDS. See the :ref:`response definition ` for more + information. + .. _operations_admin_interface_config_dump_by_mask: .. http:get:: /config_dump?mask={} diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 8d133b922f81..95bd4c5bf88b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -59,6 +59,7 @@ New Features * access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. * access loggers: file access logger config added :ref:`log_format `. * access loggers: gRPC access logger config added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. * aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto index 0f51c56e6b37..73156697fdb2 100644 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v3/config_dump.proto @@ -30,9 +30,11 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` - // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -348,8 +350,7 @@ message SecretsConfigDump { repeated DynamicSecret dynamic_warming_secrets = 3; } -// [#not-implemented-hide:] -// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { @@ -357,12 +358,12 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 1; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicEndpointConfig { - // This is the per-resource version information. This version is currently taken from the + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; @@ -370,7 +371,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 2; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } diff --git a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto index ca1399b21deb..8bbd5743219d 100644 --- a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto +++ b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto @@ -30,9 +30,11 @@ message ConfigDump { // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` + // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` - // [#not-implemented-hide:] * *endpoints*: :ref:`EndpointsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, @@ -342,8 +344,7 @@ message SecretsConfigDump { repeated DynamicSecret dynamic_warming_secrets = 3; } -// [#not-implemented-hide:] -// Envoy's EDS implementation *will* fill this message with all currently known endpoints. Endpoint +// Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { @@ -356,7 +357,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 1; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } @@ -364,7 +365,7 @@ message EndpointsConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; - // This is the per-resource version information. This version is currently taken from the + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; @@ -372,7 +373,7 @@ message EndpointsConfigDump { // The endpoint config. google.protobuf.Any endpoint_config = 2; - // The timestamp when the Endpoint was last updated. + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 1ddc534ef0bc..2fb1cd56b1ac 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -67,6 +67,7 @@ envoy_cc_library( "//source/extensions/access_loggers/file:file_access_log_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 208d1caf7595..20b08bc3100a 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -14,11 +14,13 @@ #include "envoy/admin/v3/metrics.pb.h" #include "envoy/admin/v3/server_info.pb.h" #include "envoy/config/core/v3/health_check.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "envoy/filesystem/filesystem.h" #include "envoy/server/hot_restart.h" #include "envoy/server/instance.h" #include "envoy/server/options.h" #include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/upstream.h" #include "common/access_log/access_log_impl.h" @@ -130,6 +132,11 @@ absl::optional maskParam(const Http::Utility::QueryParams& params) return Utility::queryParam(params, "mask"); } +// Helper method to get the eds parameter. +bool shouldIncludeEdsInDump(const Http::Utility::QueryParams& params) { + return Utility::queryParam(params, "include_eds") != absl::nullopt; +} + // Helper method that ensures that we've setting flags based on all the health flag values on the // host. void setHealthFlag(Upstream::Host::HealthFlag flag, const Upstream::Host& host, @@ -449,8 +456,16 @@ Http::Code AdminImpl::handlerClusters(absl::string_view url, } void AdminImpl::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask) const { - for (const auto& key_callback_pair : config_tracker_.getCallbacksMap()) { + const absl::optional& mask, + bool include_eds) const { + Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); + if (include_eds) { + if (!server_.clusterManager().clusters().empty()) { + callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + } + } + + for (const auto& key_callback_pair : callbacks_map) { ProtobufTypes::MessagePtr message = key_callback_pair.second(); ASSERT(message); @@ -469,9 +484,16 @@ void AdminImpl::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, absl::optional> AdminImpl::addResourceToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask, - const std::string& resource) const { - for (const auto& key_callback_pair : config_tracker_.getCallbacksMap()) { + const absl::optional& mask, const std::string& resource, + bool include_eds) const { + Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); + if (include_eds) { + if (!server_.clusterManager().clusters().empty()) { + callbacks_map.emplace("endpoint", [this] { return dumpEndpointConfigs(); }); + } + } + + for (const auto& key_callback_pair : callbacks_map) { ProtobufTypes::MessagePtr message = key_callback_pair.second(); ASSERT(message); @@ -506,23 +528,115 @@ AdminImpl::addResourceToDump(envoy::admin::v3::ConfigDump& dump, std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))}; } +void AdminImpl::addLbEndpoint( + const Upstream::HostSharedPtr& host, + envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const { + auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add(); + if (host->metadata() != nullptr) { + lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata()); + } + lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight()); + + switch (host->health()) { + case Upstream::Host::Health::Healthy: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY); + break; + case Upstream::Host::Health::Unhealthy: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY); + break; + case Upstream::Host::Health::Degraded: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED); + break; + default: + lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN); + } + + auto& endpoint = *lb_endpoint.mutable_endpoint(); + endpoint.set_hostname(host->hostname()); + Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address()); + auto& health_check_config = *endpoint.mutable_health_check_config(); + health_check_config.set_hostname(host->hostnameForHealthChecks()); + if (host->healthCheckAddress()->asString() != host->address()->asString()) { + health_check_config.set_port_value(host->healthCheckAddress()->ip()->port()); + } +} + +ProtobufTypes::MessagePtr AdminImpl::dumpEndpointConfigs() const { + auto endpoint_config_dump = std::make_unique(); + + for (auto& cluster_pair : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_pair.second.get(); + Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + + if (cluster_info->eds_service_name().has_value()) { + cluster_load_assignment.set_cluster_name(cluster_info->eds_service_name().value()); + } else { + cluster_load_assignment.set_cluster_name(cluster_info->name()); + } + auto& policy = *cluster_load_assignment.mutable_policy(); + + for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { + policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor()); + + if (!host_set->hostsPerLocality().get().empty()) { + for (int index = 0; index < static_cast(host_set->hostsPerLocality().get().size()); + index++) { + auto locality_host_set = host_set->hostsPerLocality().get()[index]; + + if (!locality_host_set.empty()) { + auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); + locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality()); + locality_lb_endpoint.set_priority(locality_host_set[0]->priority()); + if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) { + locality_lb_endpoint.mutable_load_balancing_weight()->set_value( + (*host_set->localityWeights())[index]); + } + + for (auto& host : locality_host_set) { + addLbEndpoint(host, locality_lb_endpoint); + } + } + } + } else { + for (auto& host : host_set->hosts()) { + auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); + locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality()); + locality_lb_endpoint.set_priority(host->priority()); + addLbEndpoint(host, locality_lb_endpoint); + } + } + } + + if (cluster_info->addedViaApi()) { + auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add(); + dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); + } else { + auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add(); + static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); + } + } + return endpoint_config_dump; +} + Http::Code AdminImpl::handlerConfigDump(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) const { Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); const auto resource = resourceParam(query_params); const auto mask = maskParam(query_params); + const bool include_eds = shouldIncludeEdsInDump(query_params); envoy::admin::v3::ConfigDump dump; if (resource.has_value()) { - auto err = addResourceToDump(dump, mask, resource.value()); + auto err = addResourceToDump(dump, mask, resource.value(), include_eds); if (err.has_value()) { response.add(err.value().second); return err.value().first; } } else { - addAllConfigToDump(dump, mask); + addAllConfigToDump(dump, mask, include_eds); } MessageUtil::redact(dump); diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 439cacf0013e..51818a6a714e 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -259,7 +259,7 @@ class AdminImpl : public Admin, * Helper methods for the /config_dump url handler. */ void addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, - const absl::optional& mask) const; + const absl::optional& mask, bool include_eds) const; /** * Add the config matching the passed resource to the passed config dump. * @return absl::nullopt on success, else the Http::Code and an error message that should be added @@ -267,10 +267,17 @@ class AdminImpl : public Admin, */ absl::optional> addResourceToDump(envoy::admin::v3::ConfigDump& dump, const absl::optional& mask, - const std::string& resource) const; + const std::string& resource, bool include_eds) const; std::vector sortedHandlers() const; envoy::admin::v3::ServerInfo::State serverState(); + + /** + * Helper methods for the /config_dump url handler to add endpoints config + */ + void addLbEndpoint(const Upstream::HostSharedPtr& host, + envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const; + ProtobufTypes::MessagePtr dumpEndpointConfigs() const; /** * URL handlers. */ diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 17df926b2b3c..7dec5deabe8f 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -358,6 +358,29 @@ TEST_P(IntegrationAdminTest, Admin) { config_dump.configs(5).UnpackTo(&secret_config_dump); EXPECT_EQ("secret_static_0", secret_config_dump.static_secrets(0).name()); + EXPECT_EQ("200", request("admin", "GET", "/config_dump?include_eds", response)); + EXPECT_EQ("application/json", ContentType(response)); + json = Json::Factory::loadFromString(response->body()); + index = 0; + const std::string expected_types_eds[] = { + "type.googleapis.com/envoy.admin.v3.BootstrapConfigDump", + "type.googleapis.com/envoy.admin.v3.ClustersConfigDump", + "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "type.googleapis.com/envoy.admin.v3.ListenersConfigDump", + "type.googleapis.com/envoy.admin.v3.ScopedRoutesConfigDump", + "type.googleapis.com/envoy.admin.v3.RoutesConfigDump", + "type.googleapis.com/envoy.admin.v3.SecretsConfigDump"}; + + for (const Json::ObjectSharedPtr& obj_ptr : json->getObjectArray("configs")) { + EXPECT_TRUE(expected_types_eds[index].compare(obj_ptr->getString("@type")) == 0); + index++; + } + + // Validate we can parse as proto. + envoy::admin::v3::ConfigDump config_dump_with_eds; + TestUtility::loadFromJson(response->body(), config_dump_with_eds); + EXPECT_EQ(7, config_dump_with_eds.configs_size()); + // Validate that the "inboundonly" does not stop the default listener. response = IntegrationUtil::makeSingleRequest(lookupPort("admin"), "POST", "/drain_listeners?inboundonly", "", diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index 8b80d8d9ba71..443ae80595af 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -1,7 +1,9 @@ #include #include +#include #include #include +#include #include "envoy/admin/v3/clusters.pb.h" #include "envoy/admin/v3/config_dump.pb.h" @@ -10,11 +12,14 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/json/json_object.h" +#include "envoy/upstream/outlier_detection.h" +#include "envoy/upstream/upstream.h" #include "common/http/message_impl.h" #include "common/json/json_loader.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/upstream/upstream_impl.h" #include "test/server/admin/admin_instance.h" #include "test/test_common/logging.h" @@ -217,6 +222,283 @@ TEST_P(AdminInstanceTest, ConfigDumpMaintainsOrder) { } } +// helper method for adding host's info +void addHostInfo(NiceMock& host, const std::string& hostname, + const std::string& address_url, envoy::config::core::v3::Locality& locality, + const std::string& hostname_for_healthcheck, + const std::string& healthcheck_address_url, int weight, int priority) { + ON_CALL(host, locality()).WillByDefault(ReturnRef(locality)); + + Network::Address::InstanceConstSharedPtr address = Network::Utility::resolveUrl(address_url); + ON_CALL(host, address()).WillByDefault(Return(address)); + ON_CALL(host, hostname()).WillByDefault(ReturnRef(hostname)); + + ON_CALL(host, hostnameForHealthChecks()).WillByDefault(ReturnRef(hostname_for_healthcheck)); + Network::Address::InstanceConstSharedPtr healthcheck_address = + Network::Utility::resolveUrl(healthcheck_address_url); + ON_CALL(host, healthCheckAddress()).WillByDefault(Return(healthcheck_address)); + + auto metadata = std::make_shared(); + ON_CALL(host, metadata()).WillByDefault(Return(metadata)); + + ON_CALL(host, health()).WillByDefault(Return(Upstream::Host::Health::Healthy)); + + ON_CALL(host, weight()).WillByDefault(Return(weight)); + ON_CALL(host, priority()).WillByDefault(Return(priority)); +} + +// Test that using ?include_eds parameter adds EDS to the config dump. +TEST_P(AdminInstanceTest, ConfigDumpWithEndpoint) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster; + cluster_map.emplace(cluster.info_->name_, cluster); + + ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set = cluster.priority_set_.getMockHostSet(0); + auto host = std::make_shared>(); + host_set->hosts_.emplace_back(host); + + envoy::config::core::v3::Locality locality; + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname = "foo.com"; + + addHostInfo(*host, hostname, "tcp://1.2.3.4:80", locality, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?include_eds", header_map, response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "static_endpoint_configs": [ + { + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + } + ], + "priority": 6 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); +} + +// Test EDS config dump while multiple localities and priorities exist +TEST_P(AdminInstanceTest, ConfigDumpWithLocalityEndpoint) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster; + cluster_map.emplace(cluster.info_->name_, cluster); + + ON_CALL(*cluster.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set_1 = cluster.priority_set_.getMockHostSet(0); + auto host_1 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_1); + + envoy::config::core::v3::Locality locality_1; + locality_1.set_region("oceania"); + locality_1.set_zone("hello"); + locality_1.set_sub_zone("world"); + + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname_1 = "foo.com"; + + addHostInfo(*host_1, hostname_1, "tcp://1.2.3.4:80", locality_1, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + auto host_2 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_2); + const std::string empty_hostname_for_healthcheck = ""; + const std::string hostname_2 = "boo.com"; + + addHostInfo(*host_2, hostname_2, "tcp://1.2.3.7:8", locality_1, empty_hostname_for_healthcheck, + "tcp://1.2.3.7:8", 3, 6); + + envoy::config::core::v3::Locality locality_2; + + auto host_3 = std::make_shared>(); + host_set_1->hosts_.emplace_back(host_3); + const std::string hostname_3 = "coo.com"; + + addHostInfo(*host_3, hostname_3, "tcp://1.2.3.8:8", locality_2, empty_hostname_for_healthcheck, + "tcp://1.2.3.8:8", 3, 4); + + std::vector locality_hosts = { + {Upstream::HostSharedPtr(host_1), Upstream::HostSharedPtr(host_2)}, + {Upstream::HostSharedPtr(host_3)}}; + auto hosts_per_locality = new Upstream::HostsPerLocalityImpl(std::move(locality_hosts), false); + + Upstream::LocalityWeightsConstSharedPtr locality_weights{new Upstream::LocalityWeights{1, 3}}; + ON_CALL(*host_set_1, hostsPerLocality()).WillByDefault(ReturnRef(*hosts_per_locality)); + ON_CALL(*host_set_1, localityWeights()).WillByDefault(Return(locality_weights)); + + Upstream::MockHostSet* host_set_2 = cluster.priority_set_.getMockHostSet(1); + auto host_4 = std::make_shared>(); + host_set_2->hosts_.emplace_back(host_4); + const std::string hostname_4 = "doo.com"; + + addHostInfo(*host_4, hostname_4, "tcp://1.2.3.9:8", locality_1, empty_hostname_for_healthcheck, + "tcp://1.2.3.9:8", 3, 2); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, getCallback("/config_dump?include_eds", header_map, response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump", + "static_endpoint_configs": [ + { + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": { + "region": "oceania", + "zone": "hello", + "sub_zone": "world" + }, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + }, + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.7", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "boo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "load_balancing_weight": 1, + "priority": 6 + }, + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.8", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "coo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "load_balancing_weight": 3, + "priority": 4 + }, + { + "locality": { + "region": "oceania", + "zone": "hello", + "sub_zone": "world" + }, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.9", + "port_value": 8 + } + }, + "health_check_config": {}, + "hostname": "doo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 3 + } + ], + "priority": 2 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); + delete (hosts_per_locality); +} + // Test that using the resource query parameter filters the config dump. // We add both static and dynamic listener config to the dump, but expect only // dynamic in the JSON with ?resource=dynamic_listeners. @@ -248,6 +530,93 @@ TEST_P(AdminInstanceTest, ConfigDumpFiltersByResource) { EXPECT_EQ(expected_json, output); } +// Test that using the resource query parameter filters the config dump including EDS. +// We add both static and dynamic endpoint config to the dump, but expect only +// dynamic in the JSON with ?resource=dynamic_endpoint_configs. +TEST_P(AdminInstanceTest, ConfigDumpWithEndpointFiltersByResource) { + Upstream::ClusterManager::ClusterInfoMap cluster_map; + ON_CALL(server_.cluster_manager_, clusters()).WillByDefault(ReturnPointee(&cluster_map)); + + NiceMock cluster_1; + cluster_map.emplace(cluster_1.info_->name_, cluster_1); + + ON_CALL(*cluster_1.info_, addedViaApi()).WillByDefault(Return(true)); + + Upstream::MockHostSet* host_set = cluster_1.priority_set_.getMockHostSet(0); + auto host_1 = std::make_shared>(); + host_set->hosts_.emplace_back(host_1); + + envoy::config::core::v3::Locality locality; + const std::string hostname_for_healthcheck = "test_hostname_healthcheck"; + const std::string hostname_1 = "foo.com"; + + addHostInfo(*host_1, hostname_1, "tcp://1.2.3.4:80", locality, hostname_for_healthcheck, + "tcp://1.2.3.5:90", 5, 6); + + NiceMock cluster_2; + cluster_2.info_->name_ = "fake_cluster_2"; + cluster_map.emplace(cluster_2.info_->name_, cluster_2); + + ON_CALL(*cluster_2.info_, addedViaApi()).WillByDefault(Return(false)); + + Upstream::MockHostSet* host_set_2 = cluster_2.priority_set_.getMockHostSet(0); + auto host_2 = std::make_shared>(); + host_set_2->hosts_.emplace_back(host_2); + const std::string hostname_2 = "boo.com"; + + addHostInfo(*host_2, hostname_2, "tcp://1.2.3.5:8", locality, hostname_for_healthcheck, + "tcp://1.2.3.4:1", 3, 4); + + Buffer::OwnedImpl response; + Http::TestResponseHeaderMapImpl header_map; + EXPECT_EQ(Http::Code::OK, + getCallback("/config_dump?include_eds&resource=dynamic_endpoint_configs", header_map, + response)); + std::string output = response.toString(); + const std::string expected_json = R"EOF({ + "configs": [ + { + "@type": "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig", + "endpoint_config": { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "cluster_name": "fake_cluster", + "endpoints": [ + { + "locality": {}, + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "1.2.3.4", + "port_value": 80 + } + }, + "health_check_config": { + "port_value": 90, + "hostname": "test_hostname_healthcheck" + }, + "hostname": "foo.com" + }, + "health_status": "HEALTHY", + "metadata": {}, + "load_balancing_weight": 5 + } + ], + "priority": 6 + } + ], + "policy": { + "overprovisioning_factor": 140 + } + } + } + ] +} +)EOF"; + EXPECT_EQ(expected_json, output); +} + // Test that using the mask query parameter filters the config dump. // We add both static and dynamic listener config to the dump, but expect only // dynamic in the JSON with ?mask=dynamic_listeners. From cef64978fb9fe48583ef8d95b90921de23d70127 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 25 Jun 2020 15:20:47 -0400 Subject: [PATCH 450/909] gitattributes: improve GitHub and local git treatment of corpus and API generated files. (#11742) - Ignore v4alpha in GitHub diffs, since v4 is some ways away, reduce the amount of noise in reviews. - Get rid of rules for .generated.pb_text, I don't think we have any of these today. - Don't treat API generated files as binary locally, this causes pain when doing manual fixups on patches for CVEs, as they appear as binary diffs. Signed-off-by: Harvey Tuch --- .gitattributes | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.gitattributes b/.gitattributes index bed26d270e07..03203a47dda8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,10 +1,6 @@ /docs/root/version_history/current.rst merge=union -*.generated.pb_text linguist-generated=true -*.generated.pb_text -diff -merge +/api/envoy/**/v4alpha/* linguist-generated=true /generated_api_shadow/envoy/** linguist-generated=true -/generated_api_shadow/envoy/** -diff -merge /generated_api_shadow/bazel/** linguist-generated=true -/generated_api_shadow/bazel/** -diff -merge *.svg binary /test/**/*_corpus/* linguist-generated=true -/test/**/*_corpus/* -diff -merge From a707b460b12920e79161e0a4ed5033164b683b3e Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 25 Jun 2020 15:24:14 -0400 Subject: [PATCH 451/909] udpa: UDPA URI encoding/decoding utils. (#11678) These map between the structured udpa::core::v1::ResourceName message and flat udpa:// URI representations of resource names. Risk level: Low Testing: Unit tests added. Part of #11264. Signed-off-by: Harvey Tuch --- api/bazel/repository_locations.bzl | 4 +- .../bazel/repository_locations.bzl | 4 +- source/common/config/BUILD | 10 +++ source/common/config/udpa_resource.cc | 79 +++++++++++++++++ source/common/config/udpa_resource.h | 48 +++++++++++ test/common/config/BUILD | 9 ++ test/common/config/udpa_resource_test.cc | 84 +++++++++++++++++++ tools/spelling/spelling_dictionary.txt | 1 + 8 files changed, 235 insertions(+), 4 deletions(-) create mode 100644 source/common/config/udpa_resource.cc create mode 100644 source/common/config/udpa_resource.h create mode 100644 test/common/config/udpa_resource_test.cc diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index afe78af1e47b..80989a6f4d35 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "9f54a527e3bf4d1f4a6527f93d329fb1cc4516ac" # May 8, 2020 -UDPA_SHA256 = "7edae88586a84360203e5a4c724080c740b7b6002d5d56f5e806f27c912895cd" +UDPA_GIT_SHA = "ca580c4fcf87b178547c2e9e41a2481b0008efe9" # June 24, 2020 +UDPA_SHA256 = "a1dc305cd56f1dd393fec8ec6b19f4f7d76af9740c7746e9377c8dd480f77e70" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index afe78af1e47b..80989a6f4d35 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "9f54a527e3bf4d1f4a6527f93d329fb1cc4516ac" # May 8, 2020 -UDPA_SHA256 = "7edae88586a84360203e5a4c724080c740b7b6002d5d56f5e806f27c912895cd" +UDPA_GIT_SHA = "ca580c4fcf87b178547c2e9e41a2481b0008efe9" # June 24, 2020 +UDPA_SHA256 = "a1dc305cd56f1dd393fec8ec6b19f4f7d76af9740c7746e9377c8dd480f77e70" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 19a26940fbb8..4a57dc587013 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -300,6 +300,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udpa_resource_lib", + srcs = ["udpa_resource.cc"], + hdrs = ["udpa_resource.h"], + deps = [ + "//source/common/http:utility_lib", + "@com_github_cncf_udpa//udpa/core/v1:pkg_cc_proto", + ], +) + envoy_cc_library( name = "update_ack_lib", hdrs = ["update_ack.h"], diff --git a/source/common/config/udpa_resource.cc b/source/common/config/udpa_resource.cc new file mode 100644 index 000000000000..6c10d4cc5e3f --- /dev/null +++ b/source/common/config/udpa_resource.cc @@ -0,0 +1,79 @@ +#include "common/config/udpa_resource.h" + +#include + +#include "common/common/fmt.h" +#include "common/http/utility.h" + +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" + +// TODO(htuch): This file has a bunch of ad hoc URI encoding/decoding based on Envoy's HTTP util +// functions. Once https://github.com/envoyproxy/envoy/issues/6588 lands, we can replace with GURL. + +namespace Envoy { +namespace Config { + +using PercentEncoding = Http::Utility::PercentEncoding; + +std::string UdpaResourceName::encodeUri(const udpa::core::v1::ResourceName& resource_name, + const EncodeOptions& options) { + // We need to percent-encode authority, id, path and query params. Qualified types should not have + // reserved characters. + const std::string authority = PercentEncoding::encode(resource_name.authority(), "%/?#"); + std::vector path_components; + for (const auto& id_component : resource_name.id()) { + path_components.emplace_back(PercentEncoding::encode(id_component, "%:/?#[]")); + } + const std::string path = absl::StrJoin(path_components, "/"); + std::vector query_param_components; + for (const auto& context_param : resource_name.context().params()) { + query_param_components.emplace_back( + absl::StrCat(PercentEncoding::encode(context_param.first, "%#[]&="), "=", + PercentEncoding::encode(context_param.second, "%#[]&="))); + } + if (options.sort_context_params_) { + std::sort(query_param_components.begin(), query_param_components.end()); + } + const std::string query_params = + query_param_components.empty() ? "" : "?" + absl::StrJoin(query_param_components, "&"); + return absl::StrCat("udpa://", authority, "/", resource_name.resource_type(), + path.empty() ? "" : "/", path, query_params); +} + +udpa::core::v1::ResourceName UdpaResourceName::decodeUri(absl::string_view resource_uri) { + if (!absl::StartsWith(resource_uri, "udpa:")) { + throw UdpaResourceName::DecodeException( + fmt::format("{} does not have an udpa scheme", resource_uri)); + } + absl::string_view host, path; + Http::Utility::extractHostPathFromUri(resource_uri, host, path); + udpa::core::v1::ResourceName decoded_resource_name; + decoded_resource_name.set_authority(PercentEncoding::decode(host)); + const size_t query_params_start = path.find('?'); + Http::Utility::QueryParams query_params; + if (query_params_start != absl::string_view::npos) { + query_params = Http::Utility::parseQueryString(path.substr(query_params_start)); + for (const auto& it : query_params) { + (*decoded_resource_name.mutable_context() + ->mutable_params())[PercentEncoding::decode(it.first)] = + PercentEncoding::decode(it.second); + } + path = path.substr(0, query_params_start); + } + // This is guaranteed by Http::Utility::extractHostPathFromUri. + ASSERT(absl::StartsWith(path, "/")); + const std::vector path_components = absl::StrSplit(path.substr(1), '/'); + decoded_resource_name.set_resource_type(std::string(path_components[0])); + if (decoded_resource_name.resource_type().empty()) { + throw UdpaResourceName::DecodeException( + fmt::format("Qualified type missing from {}", resource_uri)); + } + for (auto it = std::next(path_components.cbegin()); it != path_components.cend(); it++) { + decoded_resource_name.add_id(PercentEncoding::decode(*it)); + } + return decoded_resource_name; +} + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/udpa_resource.h b/source/common/config/udpa_resource.h new file mode 100644 index 000000000000..8c81eab9143b --- /dev/null +++ b/source/common/config/udpa_resource.h @@ -0,0 +1,48 @@ +#include "envoy/common/exception.h" + +#include "absl/strings/string_view.h" +#include "udpa/core/v1/resource_name.pb.h" + +namespace Envoy { +namespace Config { + +// Utilities for URI encoding/decoding of udpa::core::v1::ResourceName. +class UdpaResourceName { +public: + // Options for encoded URIs. + struct EncodeOptions { + // Should the context params be sorted by key? This provides deterministic encoding. + bool sort_context_params_{}; + }; + + /** + * Encode a udpa::core::v1::ResourceName message as a udpa:// URI string. + * + * @param resource_name resource name message. + * @param options encoding options. + * @return std::string udpa:// URI for resource_name. + */ + static std::string encodeUri(const udpa::core::v1::ResourceName& resource_name, + const EncodeOptions& options); + static std::string encodeUri(const udpa::core::v1::ResourceName& resource_name) { + return encodeUri(resource_name, {}); + } + + // Thrown when an exception occurs during URI decoding. + class DecodeException : public EnvoyException { + public: + DecodeException(const std::string& what) : EnvoyException(what) {} + }; + + /** + * Decode a udpa:// URI string to a udpa::core::v1::ResourceName. + * + * @param resource_uri udpa:// resource URI. + * @return udpa::core::v1::ResourceName resource name message for resource_uri. + * @throws DecodeException when parsing fails. + */ + static udpa::core::v1::ResourceName decodeUri(absl::string_view resource_uri); +}; + +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 4d55e78e637e..06cda3b3dfd6 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -416,6 +416,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "udpa_resource_test", + srcs = ["udpa_resource_test.cc"], + deps = [ + "//source/common/config:udpa_resource_lib", + "//test/test_common:utility_lib", + ], +) + envoy_proto_library( name = "version_converter_proto", srcs = ["version_converter.proto"], diff --git a/test/common/config/udpa_resource_test.cc b/test/common/config/udpa_resource_test.cc new file mode 100644 index 000000000000..ad05b37ed8e5 --- /dev/null +++ b/test/common/config/udpa_resource_test.cc @@ -0,0 +1,84 @@ +#include "common/config/udpa_resource.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Config { +namespace { + +const std::string EscapedUri = + "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" + "baz?%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df"; +const std::string EscapedUriWithManyQueryParams = + "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" + "baz?%25%23%5B%5D%26%3D=bar&%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df&foo=%25%23%5B%5D%26%3D"; + +// for all x. encodeUri(decodeUri(x)) = x where x comes from sample of valid udpa:// URIs. +// TODO(htuch): write a fuzzer that validates this property as well. +TEST(UdpaResourceNameTest, DecodeEncode) { + const std::vector uris = { + "udpa:///envoy.config.listener.v3.Listener", + "udpa://foo/envoy.config.listener.v3.Listener", + "udpa://foo/envoy.config.listener.v3.Listener/bar", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz", + "udpa://foo/envoy.config.listener.v3.Listener/bar////baz", + "udpa://foo/envoy.config.listener.v3.Listener?ab=cde", + "udpa://foo/envoy.config.listener.v3.Listener/bar?ab=cd", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?=cd", + "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde&ba=edc&z=f", + EscapedUri, + EscapedUriWithManyQueryParams, + }; + UdpaResourceName::EncodeOptions encode_options; + encode_options.sort_context_params_ = true; + for (const std::string& uri : uris) { + EXPECT_EQ(uri, UdpaResourceName::encodeUri(UdpaResourceName::decodeUri(uri), encode_options)); + } +} + +// Validate that URI decoding behaves as expected component-wise. +TEST(UdpaResourceNameTest, DecodeSuccess) { + const auto resource_name = UdpaResourceName::decodeUri(EscapedUriWithManyQueryParams); + EXPECT_EQ("f123%/?#o", resource_name.authority()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); + EXPECT_EQ(3, resource_name.id().size()); + EXPECT_EQ("b%:/?#[]ar", resource_name.id()[0]); + EXPECT_EQ("", resource_name.id()[1]); + EXPECT_EQ("baz", resource_name.id()[2]); + EXPECT_EQ(3, resource_name.context().params().size()); + EXPECT_EQ("bar", resource_name.context().params().at("%#[]&=")); + EXPECT_EQ("cde%#[]&=f", resource_name.context().params().at("%#[]&=ab")); + EXPECT_EQ("%#[]&=", resource_name.context().params().at("foo")); +} + +// Validate that the URI decoding behaves with a near-empty UDPA resource name. +TEST(UdpaResourceNameTest, DecodeEmpty) { + const auto resource_name = + UdpaResourceName::decodeUri("udpa:///envoy.config.listener.v3.Listener"); + EXPECT_TRUE(resource_name.authority().empty()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); + EXPECT_TRUE(resource_name.id().empty()); + EXPECT_TRUE(resource_name.context().params().empty()); +} + +// Negative tests for URI decoding. +TEST(UdpaResourceNameTest, DecodeFail) { + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceName::decodeUri("foo://"), + UdpaResourceName::DecodeException, + "foo:// does not have an udpa scheme"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceName::decodeUri("udpa://foo"), + UdpaResourceName::DecodeException, + "Qualified type missing from udpa://foo"); + } +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index a4483164e7e2..9bfe64a17c0d 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -1105,6 +1105,7 @@ typedef typeid typesafe ucontext +udpa uint un- unacked From bda15c0a5a0390daed991ed4c96a4873ab3ba7d8 Mon Sep 17 00:00:00 2001 From: Kuat Date: Thu, 25 Jun 2020 12:33:13 -0700 Subject: [PATCH 452/909] api: add filter config discovery (#11571) Define filter config discovery. Add FDS for HTTP filters (HTTP extensions is where the pain is felt the most). Modelled after RDS with a twist of config override for re-use. Risk Level: low (not implemented) Testing: Docs Changes: Release Notes: Issue: #7867 Signed-off-by: Kuat Yessenov --- .../v3/http_connection_manager.proto | 34 ++++++++++++++++- .../v4alpha/http_connection_manager.proto | 38 ++++++++++++++++++- api/envoy/service/filter/v3/BUILD | 14 +++++++ .../filter/v3/filter_config_discovery.proto | 37 ++++++++++++++++++ api/versioning/BUILD | 1 + .../v3/http_connection_manager.proto | 34 ++++++++++++++++- .../v4alpha/http_connection_manager.proto | 38 ++++++++++++++++++- .../envoy/service/filter/v3/BUILD | 14 +++++++ .../filter/v3/filter_config_discovery.proto | 37 ++++++++++++++++++ 9 files changed, 239 insertions(+), 8 deletions(-) create mode 100644 api/envoy/service/filter/v3/BUILD create mode 100644 api/envoy/service/filter/v3/filter_config_discovery.proto create mode 100644 generated_api_shadow/envoy/service/filter/v3/BUILD create mode 100644 generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 598f9aa62068..2d8b09b117f0 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -750,22 +750,52 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; + // [#not-implemented-hide:] Configuration source specifier for the late-bound + // filter configuration. The HTTP Listener is warmed until all the initial + // filter configurations are received, unless the flag to apply the default + // configuration is set. Subsequent filter updates are atomic on a per-worker + // basis, and apply to new streams while the active streams continue using + // the older filter configurations. If the initial delivery of the filter + // configuration fails, due to a timeout for example, the optional default + // configuration is applied. Without a default configuration, the filter is + // disabled, and the HTTP listener responds with 500 immediately. After the + // failure, the listener continues subscribing to the subsequent filter + // configurations. + message HttpFilterConfigSource { + config.core.v3.ConfigSource config_source = 1; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial filter configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first xDS response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + } + reserved 3, 2; reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in FilterConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. + HttpFilterConfigSource filter_config_ds = 5; } } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bf303d549712..bc3826f80f29 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -757,22 +757,56 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; + // [#not-implemented-hide:] Configuration source specifier for the late-bound + // filter configuration. The HTTP Listener is warmed until all the initial + // filter configurations are received, unless the flag to apply the default + // configuration is set. Subsequent filter updates are atomic on a per-worker + // basis, and apply to new streams while the active streams continue using + // the older filter configurations. If the initial delivery of the filter + // configuration fails, due to a timeout for example, the optional default + // configuration is applied. Without a default configuration, the filter is + // disabled, and the HTTP listener responds with 500 immediately. After the + // failure, the listener continues subscribing to the subsequent filter + // configurations. + message HttpFilterConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter." + "HttpFilterConfigSource"; + + config.core.v4alpha.ConfigSource config_source = 1; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial filter configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first xDS response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + } + reserved 3, 2; reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in FilterConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. + HttpFilterConfigSource filter_config_ds = 5; } } diff --git a/api/envoy/service/filter/v3/BUILD b/api/envoy/service/filter/v3/BUILD new file mode 100644 index 000000000000..6c68a071b873 --- /dev/null +++ b/api/envoy/service/filter/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/annotations:pkg", + "//envoy/service/discovery/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/service/filter/v3/filter_config_discovery.proto b/api/envoy/service/filter/v3/filter_config_discovery.proto new file mode 100644 index 000000000000..79c5846710bb --- /dev/null +++ b/api/envoy/service/filter/v3/filter_config_discovery.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.service.filter.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.filter.v3"; +option java_outer_classname = "FilterConfigDiscoveryProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: FilterConfigDS] + +// Return filter configurations. +service FilterConfigDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; + + rpc StreamFilterConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaFilterConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchFilterConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:filter_configs"; + option (google.api.http).body = "*"; + } +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 796d8246a31e..1d91b1724b1c 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -127,6 +127,7 @@ proto_library( "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", + "//envoy/service/filter/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 1ebec4a8ff55..230a2b98e087 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -755,14 +755,41 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; + // [#not-implemented-hide:] Configuration source specifier for the late-bound + // filter configuration. The HTTP Listener is warmed until all the initial + // filter configurations are received, unless the flag to apply the default + // configuration is set. Subsequent filter updates are atomic on a per-worker + // basis, and apply to new streams while the active streams continue using + // the older filter configurations. If the initial delivery of the filter + // configuration fails, due to a timeout for example, the optional default + // configuration is applied. Without a default configuration, the filter is + // disabled, and the HTTP listener responds with 500 immediately. After the + // failure, the listener continues subscribing to the subsequent filter + // configurations. + message HttpFilterConfigSource { + config.core.v3.ConfigSource config_source = 1; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial filter configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first xDS response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + } + reserved 3; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in FilterConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -770,6 +797,9 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; + // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. + HttpFilterConfigSource filter_config_ds = 5; + google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bf303d549712..bc3826f80f29 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -757,22 +757,56 @@ message ScopedRds { [(validate.rules).message = {required: true}]; } +// [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; + // [#not-implemented-hide:] Configuration source specifier for the late-bound + // filter configuration. The HTTP Listener is warmed until all the initial + // filter configurations are received, unless the flag to apply the default + // configuration is set. Subsequent filter updates are atomic on a per-worker + // basis, and apply to new streams while the active streams continue using + // the older filter configurations. If the initial delivery of the filter + // configuration fails, due to a timeout for example, the optional default + // configuration is applied. Without a default configuration, the filter is + // disabled, and the HTTP listener responds with 500 immediately. After the + // failure, the listener continues subscribing to the subsequent filter + // configurations. + message HttpFilterConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter." + "HttpFilterConfigSource"; + + config.core.v4alpha.ConfigSource config_source = 1; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial filter configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first xDS response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + } + reserved 3, 2; reserved "config"; - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. + // The name of the filter configuration. The name is used as a fallback to + // select an extension if the type of the configuration proto is not + // sufficient. It also serves as a resource name in FilterConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; + + // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. + HttpFilterConfigSource filter_config_ds = 5; } } diff --git a/generated_api_shadow/envoy/service/filter/v3/BUILD b/generated_api_shadow/envoy/service/filter/v3/BUILD new file mode 100644 index 000000000000..6c68a071b873 --- /dev/null +++ b/generated_api_shadow/envoy/service/filter/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/annotations:pkg", + "//envoy/service/discovery/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto b/generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto new file mode 100644 index 000000000000..79c5846710bb --- /dev/null +++ b/generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.service.filter.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "google/api/annotations.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.filter.v3"; +option java_outer_classname = "FilterConfigDiscoveryProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: FilterConfigDS] + +// Return filter configurations. +service FilterConfigDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; + + rpc StreamFilterConfigs(stream discovery.v3.DiscoveryRequest) + returns (stream discovery.v3.DiscoveryResponse) { + } + + rpc DeltaFilterConfigs(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } + + rpc FetchFilterConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:filter_configs"; + option (google.api.http).body = "*"; + } +} From c611ed0e20fea544853a4f683b9e2107a80ca00b Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Thu, 25 Jun 2020 16:33:09 -0400 Subject: [PATCH 453/909] fuzz: added fuzz test for listener filter original_src (#11732) * fuzz: added fuzz test for listener filter original_src Created original_src_corpus and populated with valid and invalid testcases Signed-off-by: Arthur Yan --- .../filters/listener/original_src/BUILD | 22 +++++++++ .../original_src_corpus/ipv4_test | 7 +++ .../original_src_corpus/unix_test | 6 +++ .../original_src/original_src_fuzz_test.cc | 45 +++++++++++++++++++ .../original_src/original_src_fuzz_test.proto | 13 ++++++ 5 files changed, 93 insertions(+) create mode 100644 test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test create mode 100644 test/extensions/filters/listener/original_src/original_src_corpus/unix_test create mode 100644 test/extensions/filters/listener/original_src/original_src_fuzz_test.cc create mode 100644 test/extensions/filters/listener/original_src/original_src_fuzz_test.proto diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index 1b1806827236..235c5263793d 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -1,6 +1,8 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -49,3 +51,23 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto", ], ) + +envoy_proto_library( + name = "original_src_fuzz_test_proto", + srcs = ["original_src_fuzz_test.proto"], + deps = [ + "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "original_src_fuzz_test", + srcs = ["original_src_fuzz_test.cc"], + corpus = "original_src_corpus", + deps = [ + ":original_src_fuzz_test_proto_cc_proto", + "//source/extensions/filters/listener/original_src:original_src_lib", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test b/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test new file mode 100644 index 000000000000..bd2aeb926935 --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test @@ -0,0 +1,7 @@ +config { + bind_port: true + mark: 0 +} + +address: "tcp://1.2.3.4:0" + diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/unix_test b/test/extensions/filters/listener/original_src/original_src_corpus/unix_test new file mode 100644 index 000000000000..841e6094d5ca --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_corpus/unix_test @@ -0,0 +1,6 @@ +config { + bind_port: true + mark: 0 +} + +address: "unix://domain.socket" \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc new file mode 100644 index 000000000000..970303b9da67 --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -0,0 +1,45 @@ +#include "envoy/extensions/filters/listener/original_src/v3/original_src.pb.h" + +#include "common/network/utility.h" + +#include "extensions/filters/listener/original_src/original_src.h" + +#include "test/extensions/filters/listener/original_src/original_src_fuzz_test.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace OriginalSrc { + +DEFINE_PROTO_FUZZER( + const envoy::extensions::filters::listener::original_src::OriginalSrcTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + NiceMock callbacks_; + try { + callbacks_.socket_.remote_address_ = Network::Utility::resolveUrl(input.address()); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + return; + } + + Config config(input.config()); + auto filter = std::make_unique(config); + + filter->onAccept(callbacks_); +} + +} // namespace OriginalSrc +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto new file mode 100644 index 000000000000..187aa9114d3d --- /dev/null +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package envoy.extensions.filters.listener.original_src; + +import "envoy/extensions/filters/listener/original_src/v3/original_src.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; + +message OriginalSrcTestCase { + envoy.extensions.filters.listener.original_src.v3.OriginalSrc config = 1 + [(validate.rules).message.required = true]; + string address = 2; +} \ No newline at end of file From edaad6d02b3e9265418196f96b4fd6b3689cc2b7 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Thu, 25 Jun 2020 15:22:24 -0700 Subject: [PATCH 454/909] api: Add transport_api_version to SelfConfigSource. (#11754) This will allow configuring the version of LRS independently from the version of xDS in the lrs_server field in Cluster messages. Risk Level: Low Testing: N/A Docs Changes: Inline in PR Signed-off-by: Mark D. Roth --- api/envoy/config/core/v3/config_source.proto | 4 ++++ api/envoy/config/core/v4alpha/config_source.proto | 4 ++++ generated_api_shadow/envoy/config/core/v3/config_source.proto | 4 ++++ .../envoy/config/core/v4alpha/config_source.proto | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index 2522d4fd53e5..dc47586233c1 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -110,6 +110,10 @@ message AggregatedConfigSource { // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto index 6c5e9778e802..ec3f9dfe0971 100644 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -112,6 +112,10 @@ message AggregatedConfigSource { message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index fbac531d71ec..beb670796932 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -110,6 +110,10 @@ message AggregatedConfigSource { // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto index 83ec10a16b44..503d5c451cc0 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -112,6 +112,10 @@ message AggregatedConfigSource { message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. From 76c9a95d010093058e6e1e8e1ffde6980d48b305 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 25 Jun 2020 19:20:29 -0400 Subject: [PATCH 455/909] RETURN_IF_ERROR macro to provide early exit based on Status value (#11666) Signed-off-by: Yan Avlasov --- source/common/http/status.h | 34 +++++++++++++++++++++++++++++++++ test/common/http/status_test.cc | 32 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/source/common/http/status.h b/source/common/http/status.h index bc2e1370df04..2b47a5d0a48b 100644 --- a/source/common/http/status.h +++ b/source/common/http/status.h @@ -109,5 +109,39 @@ ABSL_MUST_USE_RESULT bool isCodecClientError(const Status& status); */ Http::Code getPrematureResponseHttpCode(const Status& status); +/** + * Macro that checks return value of expression that results in Status and returns from + * the current function is status is not OK. + * + * Example usage: + * Status foo() { + * RETURN_IF_ERROR(bar()); + * return okStatus(); + * } + */ + +#define RETURN_IF_ERROR(expr) \ + do { \ + if (::Envoy::Http::Details::StatusAdapter adapter{(expr)}) { \ + } else { \ + return std::move(adapter.status_); \ + } \ + } while (false) + +namespace Details { +// Helper class to convert `Status` to `bool` so it can be used inside `if` statements. +struct StatusAdapter { + StatusAdapter(const Status& status) : status_(status) {} + StatusAdapter(Status&& status) : status_(std::move(status)) {} + + StatusAdapter(const StatusAdapter&) = delete; + StatusAdapter& operator=(const StatusAdapter&) = delete; + + explicit operator bool() const { return status_.ok(); } + + Status status_; +}; +} // namespace Details + } // namespace Http } // namespace Envoy diff --git a/test/common/http/status_test.cc b/test/common/http/status_test.cc index 4783b64dd090..327bba34a5a5 100644 --- a/test/common/http/status_test.cc +++ b/test/common/http/status_test.cc @@ -67,5 +67,37 @@ TEST(Status, CodecClientError) { EXPECT_TRUE(isCodecClientError(status)); } +TEST(Status, ReturnIfError) { + + auto outer = [](Status (*inner)()) { + RETURN_IF_ERROR(inner()); + return bufferFloodError("boom"); + }; + + auto result = outer([]() { return okStatus(); }); + EXPECT_FALSE(result.ok()); + EXPECT_EQ("boom", result.message()); + EXPECT_TRUE(isBufferFloodError(result)); + result = outer([]() { return codecClientError("foobar"); }); + EXPECT_FALSE(result.ok()); + EXPECT_TRUE(isCodecClientError(result)); + EXPECT_EQ("foobar", result.message()); + + // Check that passing a `Status` object directly into the RETURN_IF_ERROR works. + auto direct_status = [](const Status& status) { + RETURN_IF_ERROR(status); + return bufferFloodError("baz"); + }; + result = direct_status(codecClientError("foobar")); + EXPECT_FALSE(result.ok()); + EXPECT_TRUE(isCodecClientError(result)); + EXPECT_EQ("foobar", result.message()); + + result = direct_status(okStatus()); + EXPECT_FALSE(result.ok()); + EXPECT_EQ("baz", result.message()); + EXPECT_TRUE(isBufferFloodError(result)); +} + } // namespace Http } // namespace Envoy From a24ed29c4dbc4f07a56e3551584b4d85f3ac1862 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 25 Jun 2020 19:22:05 -0400 Subject: [PATCH 456/909] test: Add targets to build server main lib with limited list of extensions (#11737) Signed-off-by: Yan Avlasov --- source/exe/BUILD | 41 +++++++++++++++++++++++++++- source/extensions/all_extensions.bzl | 17 ++++++++++++ test/config/integration/server.yaml | 15 ---------- test/integration/BUILD | 4 +-- test/integration/run_envoy_test.sh | 1 + 5 files changed, 60 insertions(+), 18 deletions(-) diff --git a/source/exe/BUILD b/source/exe/BUILD index 0bee6f5858ef..fc00d543a415 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -7,7 +7,7 @@ load( "envoy_cc_win32_library", "envoy_package", ) -load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") +load("//source/extensions:all_extensions.bzl", "envoy_all_core_extensions", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") licenses(["notice"]) # Apache 2 @@ -80,6 +80,45 @@ envoy_cc_library( }), ) +envoy_cc_library( + name = "envoy_common_with_core_extensions_lib", + deps = [ + "//source/common/event:libevent_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//source/common/stats:thread_local_store_lib", + "//source/server:drain_manager_lib", + "//source/server:options_lib", + "//source/server:server_lib", + "//source/server:listener_hooks_lib", + ] + envoy_all_core_extensions(), +) + +envoy_cc_library( + name = "envoy_main_common_with_core_extensions_lib", + srcs = ["main_common.cc"], + hdrs = ["main_common.h"], + deps = [ + ":envoy_common_with_core_extensions_lib", + ":platform_impl_lib", + ":process_wide_lib", + "//source/common/api:os_sys_calls_lib", + "//source/common/common:compiler_requirements_lib", + "//source/common/common:perf_annotation_lib", + "//source/common/grpc:google_grpc_context_lib", + "//source/common/stats:symbol_table_creator_lib", + "//source/server:hot_restart_lib", + "//source/server:hot_restart_nop_lib", + "//source/server/config_validation:server_lib", + ] + select({ + "//bazel:disable_signal_trace": [], + "//conditions:default": [ + "//source/common/signal:sigaction_lib", + ":terminate_handler_lib", + ], + }), +) + envoy_cc_library( name = "process_wide_lib", srcs = ["process_wide.cc"], diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index f22633aeeb2a..73e3da62c5d7 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -14,3 +14,20 @@ def envoy_all_extensions(denylist = []): # These extensions can be removed on a site specific basis. return [v for k, v in all_extensions.items() if not k in denylist] + +# Core extensions needed to run Envoy's integration tests. +_core_extensions = [ + "envoy.access_loggers.file", + "envoy.filters.http.router", + "envoy.filters.http.health_check", + "envoy.filters.network.http_connection_manager", + "envoy.stat_sinks.statsd", + "envoy.transport_sockets.raw_buffer", +] + +# Return all core extensions to be compiled into Envoy. +def envoy_all_core_extensions(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + # These extensions can be removed on a site specific basis. + return [v for k, v in all_extensions.items() if k in _core_extensions] diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index c3a97cb48705..78d8f24fd8f5 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -69,21 +69,6 @@ static_resources: value: default_value: 1000 runtime_key: access_log.access_error.duration - - address: - socket_address: - address: {{ ip_loopback_address }} - port_value: 0 - filter_chains: - - filters: - - name: redis - typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy - settings: - op_timeout: 0.4s - stat_prefix: redis - prefix_routes: - catch_all_route: - cluster: redis clusters: - name: cluster_1 connect_timeout: 5s diff --git a/test/integration/BUILD b/test/integration/BUILD index 8b52391380c1..cd527321b6d4 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -254,7 +254,7 @@ envoy_cc_test_binary( ], stamped = True, deps = [ - "//source/exe:envoy_main_common_lib", + "//source/exe:envoy_main_common_with_core_extensions_lib", "//source/exe:platform_impl_lib", ], ) @@ -278,7 +278,7 @@ envoy_sh_test( envoy_sh_test( name = "run_envoy_test", srcs = ["run_envoy_test.sh"], - cc_binary = ["//source/exe:envoy-static"], + cc_binary = [":hotrestart_main"], data = [ "test_utility.sh", "//test/config/integration:server_config_files", diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index 72003ddf7e84..8009dce7bf56 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -1,5 +1,6 @@ #!/bin/bash +export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" function expect_fail_with_error() { From 5c553261d14730af5a2c290c4b3687ae667a65db Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Fri, 26 Jun 2020 21:44:28 +0900 Subject: [PATCH 457/909] compile regexs (#11765) format: compile regexes in format script Signed-off-by: tomocy --- tools/code_format/check_format.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 0e955d5e305d..e845b930ed76 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -98,6 +98,11 @@ INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE) PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE) X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*') +DESIGNATED_INITIALIZER_REGEX = re.compile(r"\{\s*\.\w+\s*\=") +MANGLED_PROTOBUF_NAME_REGEX = re.compile(r"envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}") +HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)") +TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{") +EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)') # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -637,7 +642,7 @@ def checkSourceLine(line, file_path, reportError): # can be used instead reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " "in include/envoy/common/platform.h instead") - if re.search("\{\s*\.\w+\s*\=", line): + if DESIGNATED_INITIALIZER_REGEX.search(line): # Designated initializers are not part of the C++14 standard and are not supported # by MSVC reportError("Don't use designated initializers in struct initialization, " @@ -649,7 +654,7 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use 'using testing::Test;, elaborate the type instead") if line.startswith("using testing::TestWithParams;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") - if re.search("TEST(_.\(.*,\s|\()[a-z].*\)\s\{", line): + if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line): # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins # with a lowercase letter. reportError("Test names should be CamelCase, starting with a capital letter") @@ -671,10 +676,10 @@ def checkSourceLine(line, file_path, reportError): '->histogramFromString(' in line or '->textReadoutFromString(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") - if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line): + if MANGLED_PROTOBUF_NAME_REGEX.search(line): reportError("Don't use mangled Protobuf names for enum constants") - hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line) + hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line) if hist_m and not allowlistedForHistogramSiSuffix(hist_m.group(0)): reportError( "Don't suffix histogram names with the unit symbol, " @@ -1009,7 +1014,7 @@ def ownedDirectories(error_messages): for line in f: # If this line is of the form "extensions/... @owner1 @owner2" capture the directory # name and store it in the list of directories with documented owners. - m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line) + m = EXTENSIONS_CODEOWNERS_REGEX.search(line) if m is not None and not line.startswith('#'): owned.append(m.group(1).strip()) owners = re.findall('@\S+', m.group(2).strip()) From f8bfc6a16ffebf1ecb9cdf8eb1265912118ae7bf Mon Sep 17 00:00:00 2001 From: Song Hu Date: Fri, 26 Jun 2020 07:42:47 -0700 Subject: [PATCH 458/909] Extract TLVs from Proxy Protocol v2 and emit to dynamic metadata (#11488) Before this change TLVs are read and discarded in the Proxy Protocol filter. This change is to extend the Proxy Protocol Listener filter to extract TLVs of interest and emit them as metadata which can be consumed by other filters. Fixes #11336 Signed-off-by: Song --- .../proxy_protocol/v3/proxy_protocol.proto | 23 ++ .../listener_filters/proxy_protocol.rst | 6 +- .../proxy_protocol/v3/proxy_protocol.proto | 23 ++ include/envoy/network/BUILD | 1 + include/envoy/network/filter.h | 17 + .../filters/listener/proxy_protocol/BUILD | 2 + .../filters/listener/proxy_protocol/config.cc | 12 +- .../listener/proxy_protocol/proxy_protocol.cc | 123 +++++++- .../listener/proxy_protocol/proxy_protocol.h | 39 ++- source/server/BUILD | 1 + source/server/connection_handler_impl.cc | 15 +- source/server/connection_handler_impl.h | 7 +- source/server/listener_impl.cc | 3 +- .../proxy_protocol_regression_test.cc | 4 +- .../proxy_protocol/proxy_protocol_test.cc | 294 +++++++++++++++++- .../proxy_proto_integration_test.cc | 21 ++ .../proxy_proto_integration_test.h | 12 +- test/mocks/network/mocks.h | 3 + 18 files changed, 582 insertions(+), 24 deletions(-) diff --git a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 63ad72945e28..8fd0c63d0c82 100644 --- a/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.listener.proxy_protocol.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; @@ -17,4 +18,26 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message ProxyProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; + + message KeyValuePair { + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + // The type that triggers the rule - required + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; + + // If the TLV type is present, apply this metadata KeyValuePair. + KeyValuePair on_tlv_present = 2; + } + + // The list of rules to apply to requests. + repeated Rule rules = 1; } diff --git a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst index 257977b8627f..4848c364308f 100644 --- a/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst +++ b/docs/root/configuration/listeners/listener_filters/proxy_protocol.rst @@ -11,8 +11,8 @@ which places the original coordinates (IP, PORT) into a connection-string. Envoy then extracts these and uses them as the remote address. In Proxy Protocol v2 there exists the concept of extensions (TLV) -tags that are optional. This implementation skips over these without -using them. +tags that are optional. If the type of the TLV is added to the filter's configuration, +the TLV will be emitted as dynamic metadata with user-specified key. This implementation supports both version 1 and version 2, it automatically determines on a per-connection basis which of the two @@ -35,4 +35,4 @@ This filter emits the following statistics: :header: Name, Type, Description :widths: 1, 1, 2 - downstream_cx_proxy_proto_error, Counter, Total proxy protocol errors + downstream_cx_proxy_proto_error, Counter, Total proxy protocol errors \ No newline at end of file diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto index 63ad72945e28..8fd0c63d0c82 100644 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto @@ -4,6 +4,7 @@ package envoy.extensions.filters.listener.proxy_protocol.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; @@ -17,4 +18,26 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message ProxyProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; + + message KeyValuePair { + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + // The type that triggers the rule - required + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; + + // If the TLV type is present, apply this metadata KeyValuePair. + KeyValuePair on_tlv_present = 2; + } + + // The list of rules to apply to requests. + repeated Rule rules = 1; } diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index c2254842ed92..f4a342d26bb8 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -65,6 +65,7 @@ envoy_cc_library( ":transport_socket_interface", "//include/envoy/buffer:buffer_interface", "//include/envoy/upstream:host_description_interface", + "//source/common/protobuf", ], ) diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index a2603416e9df..e43f166f73b9 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -7,6 +7,8 @@ #include "envoy/network/transport_socket.h" #include "envoy/upstream/host_description.h" +#include "common/protobuf/protobuf.h" + namespace Envoy { namespace Event { @@ -269,6 +271,21 @@ class ListenerFilterCallbacks { * @param success boolean telling whether the filter execution was successful or not. */ virtual void continueFilterChain(bool success) PURE; + + /** + * @param name the namespace used in the metadata in reverse DNS format, for example: + * envoy.test.my_filter. + * @param value the struct to set on the namespace. A merge will be performed with new values for + * the same key overriding existing. + */ + virtual void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) PURE; + + /** + * @return const envoy::api::v2::core::Metadata& the dynamic metadata associated with this + * connection. + */ + virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE; + virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE; }; /** diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index d4b87f1bfd5e..d39dc0a51d6d 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -29,6 +29,8 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/extensions/filters/listener:well_known_names", + "@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/listener/proxy_protocol/config.cc b/source/extensions/filters/listener/proxy_protocol/config.cc index 0fa044542d08..4641fcd6c9bf 100644 --- a/source/extensions/filters/listener/proxy_protocol/config.cc +++ b/source/extensions/filters/listener/proxy_protocol/config.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.validate.h" #include "envoy/registry/registry.h" @@ -18,10 +20,16 @@ class ProxyProtocolConfigFactory : public Server::Configuration::NamedListenerFi public: // NamedListenerFilterConfigFactory Network::ListenerFilterFactoryCb createListenerFilterFactoryFromProto( - const Protobuf::Message&, + const Protobuf::Message& message, const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, Server::Configuration::ListenerFactoryContext& context) override { - ConfigSharedPtr config(new Config(context.scope())); + + // downcast it to the proxy protocol config + const auto& proto_config = MessageUtil::downcastAndValidate< + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol&>( + message, context.messageValidationVisitor()); + + ConfigSharedPtr config = std::make_shared(context.scope(), proto_config); return [listener_filter_matcher, config](Network::ListenerFilterManager& filter_manager) -> void { filter_manager.addAcceptFilter(listener_filter_matcher, std::make_unique(config)); diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 1c90e9c924ec..e2561480785a 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -1,6 +1,7 @@ #include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" #include +#include #include #include #include @@ -15,16 +16,37 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" +#include "common/common/fmt.h" #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/network/utility.h" +#include "extensions/filters/listener/well_known_names.h" + namespace Envoy { namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { -Config::Config(Stats::Scope& scope) : stats_{ALL_PROXY_PROTOCOL_STATS(POOL_COUNTER(scope))} {} +Config::Config( + Stats::Scope& scope, + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config) + : stats_{ALL_PROXY_PROTOCOL_STATS(POOL_COUNTER(scope))} { + for (const auto& rule : proto_config.rules()) { + tlv_types_[0xFF & rule.tlv_type()] = rule.on_tlv_present(); + } +} + +const KeyValuePair* Config::isTlvTypeNeeded(uint8_t type) const { + auto tlv_type = tlv_types_.find(type); + if (tlv_types_.end() != tlv_type) { + return &tlv_type->second; + } + + return nullptr; +} + +size_t Config::numberOfNeededTlvTypes() const { return tlv_types_.size(); } Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); @@ -54,7 +76,7 @@ void Filter::onReadWorker() { Network::ConnectionSocket& socket = cb_->socket(); if ((!proxy_protocol_header_.has_value() && !readProxyHeader(socket.ioHandle().fd())) || - (proxy_protocol_header_.has_value() && !parseExtensions(socket.ioHandle().fd()))) { + (proxy_protocol_header_.has_value() && !readExtensions(socket.ioHandle().fd()))) { // We return if a) we do not yet have the header, or b) we have the header but not yet all // the extension data. In both cases we'll be called again when the socket is ready to read // and pick up where we left off. @@ -231,11 +253,10 @@ void Filter::parseV1Header(char* buf, size_t len) { } } -bool Filter::parseExtensions(os_fd_t fd) { +bool Filter::parseExtensions(os_fd_t fd, uint8_t* buf, size_t buf_size, size_t* buf_off) { // If we ever implement extensions elsewhere, be sure to // continue to skip and ignore those for LOCAL. while (proxy_protocol_header_.value().extensions_length_) { - // buf_ is no longer in use so we re-use it to read/discard int bytes_avail; auto& os_syscalls = Api::OsSysCallsSingleton::get(); if (os_syscalls.ioctl(fd, FIONREAD, &bytes_avail).rc_ < 0) { @@ -244,14 +265,104 @@ bool Filter::parseExtensions(os_fd_t fd) { if (bytes_avail == 0) { return false; } - bytes_avail = std::min(size_t(bytes_avail), sizeof(buf_)); + bytes_avail = std::min(size_t(bytes_avail), buf_size); bytes_avail = std::min(size_t(bytes_avail), proxy_protocol_header_.value().extensions_length_); - const Api::SysCallSizeResult recv_result = os_syscalls.recv(fd, buf_, bytes_avail, 0); + buf += (nullptr != buf_off) ? *buf_off : 0; + const Api::SysCallSizeResult recv_result = os_syscalls.recv(fd, buf, bytes_avail, 0); if (recv_result.rc_ != bytes_avail) { throw EnvoyException("failed to read proxy protocol extension"); } proxy_protocol_header_.value().extensions_length_ -= recv_result.rc_; + + if (nullptr != buf_off) { + *buf_off += recv_result.rc_; + } } + + return true; +} + +/** + * @note A TLV is arranged in the following format: + * struct pp2_tlv { + * uint8_t type; + * uint8_t length_hi; + * uint8_t length_lo; + * uint8_t value[0]; + * }; + * See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + */ +void Filter::parseTlvs(const std::vector& tlvs) { + size_t idx{0}; + while (idx < tlvs.size()) { + const uint8_t tlv_type = tlvs[idx]; + idx++; + + if ((idx + 1) >= tlvs.size()) { + throw EnvoyException( + fmt::format("failed to read proxy protocol extension. No bytes for TLV length. " + "Extension length is {}, current index is {}, current type is {}.", + tlvs.size(), idx, tlv_type)); + } + + const uint8_t tlv_length_upper = tlvs[idx]; + const uint8_t tlv_length_lower = tlvs[idx + 1]; + const size_t tlv_value_length = (tlv_length_upper << 8) + tlv_length_lower; + idx += 2; + + // Get the value. + if ((idx + tlv_value_length - 1) >= tlvs.size()) { + throw EnvoyException( + fmt::format("failed to read proxy protocol extension. No bytes for TLV value. " + "Extension length is {}, current index is {}, current type is {}, current " + "value length is {}.", + tlvs.size(), idx, tlv_type, tlv_length_upper)); + } + + // Only save to dynamic metadata if this type of TLV is needed. + auto key_value_pair = config_->isTlvTypeNeeded(tlv_type); + if (nullptr != key_value_pair) { + ProtobufWkt::Value metadata_value; + metadata_value.set_string_value(reinterpret_cast(tlvs.data() + idx), + tlv_value_length); + + std::string metadata_key = key_value_pair->metadata_namespace().empty() + ? ListenerFilterNames::get().ProxyProtocol + : key_value_pair->metadata_namespace(); + + ProtobufWkt::Struct metadata( + (*cb_->dynamicMetadata().mutable_filter_metadata())[metadata_key]); + metadata.mutable_fields()->insert({key_value_pair->key(), metadata_value}); + cb_->setDynamicMetadata(metadata_key, metadata); + } else { + ENVOY_LOG(trace, "proxy_protocol: Skip TLV of type {} since it's not needed", tlv_type); + } + + idx += tlv_value_length; + ASSERT(idx <= tlvs.size()); + } +} + +bool Filter::readExtensions(os_fd_t fd) { + // Parse and discard the extensions if this is a local command or there's no TLV needs to be saved + // to metadata. + if (proxy_protocol_header_.value().local_command_ || 0 == config_->numberOfNeededTlvTypes()) { + // buf_ is no longer in use so we re-use it to read/discard. + return parseExtensions(fd, reinterpret_cast(buf_), sizeof(buf_), nullptr); + } + + // Initialize the buf_tlv_ only when we need to read the TLVs. + if (buf_tlv_.empty()) { + buf_tlv_.resize(proxy_protocol_header_.value().extensions_length_); + } + + // Parse until we have all the TLVs in buf_tlv. + if (!parseExtensions(fd, buf_tlv_.data(), buf_tlv_.size(), &buf_tlv_off_)) { + return false; + } + + parseTlvs(buf_tlv_); + return true; } diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h index 4b23d470dc40..ac390908f52c 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h @@ -1,12 +1,14 @@ #pragma once #include "envoy/event/file_event.h" +#include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/network/filter.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" #include "common/common/logger.h" +#include "absl/container/flat_hash_map.h" #include "proxy_protocol_header.h" namespace Envoy { @@ -14,6 +16,9 @@ namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { +using KeyValuePair = + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol::KeyValuePair; + /** * All stats for the proxy protocol. @see stats_macros.h */ @@ -32,11 +37,27 @@ struct ProxyProtocolStats { /** * Global configuration for Proxy Protocol listener filter. */ -class Config { +class Config : public Logger::Loggable { public: - Config(Stats::Scope& scope); + Config( + Stats::Scope& scope, + const envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol& proto_config); ProxyProtocolStats stats_; + + /** + * Return null if the type of TLV is not needed otherwise a pointer to the KeyValuePair for + * emitting to dynamic metadata. + */ + const KeyValuePair* isTlvTypeNeeded(uint8_t type) const; + + /** + * Number of TLV types that need to be parsed and saved to dynamic metadata. + */ + size_t numberOfNeededTlvTypes() const; + +private: + absl::flat_hash_map tlv_types_; }; using ConfigSharedPtr = std::shared_ptr; @@ -79,7 +100,9 @@ class Filter : public Network::ListenerFilter, Logger::Loggable& tlvs); + bool readExtensions(os_fd_t fd); /** * Given a char * & len, parse the header as per spec @@ -102,6 +125,16 @@ class Filter : public Network::ListenerFilter, Logger::Loggable buf_tlv_; + + /** + * The index in buf_tlv_ that has been fully read. + */ + size_t buf_tlv_off_{}; + ConfigSharedPtr config_; absl::optional proxy_protocol_header_; diff --git a/source/server/BUILD b/source/server/BUILD index bbb8dc655b76..16e4ddfb8d63 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -334,6 +334,7 @@ envoy_cc_library( "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto", ], ) diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 797e746df724..7353404fc3bc 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -287,6 +287,11 @@ void ConnectionHandlerImpl::ActiveTcpSocket::continueFilterChain(bool success) { } } +void ConnectionHandlerImpl::ActiveTcpSocket::setDynamicMetadata(const std::string& name, + const ProtobufWkt::Struct& value) { + (*metadata_.mutable_filter_metadata())[name].MergeFrom(value); +} + void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { // Check if the socket may need to be redirected to another listener. ActiveTcpListenerOptRef new_listener; @@ -318,7 +323,7 @@ void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { // Particularly the assigned events need to reset before assigning new events in the follow up. accept_filters_.clear(); // Create a new connection on this listener. - listener_.newConnection(std::move(socket_)); + listener_.newConnection(std::move(socket_), dynamicMetadata()); } } @@ -363,12 +368,18 @@ void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_in } // namespace void ConnectionHandlerImpl::ActiveTcpListener::newConnection( - Network::ConnectionSocketPtr&& socket) { + Network::ConnectionSocketPtr&& socket, + const envoy::config::core::v3::Metadata& dynamic_metadata) { auto stream_info = std::make_unique(parent_.dispatcher_.timeSource()); stream_info->setDownstreamLocalAddress(socket->localAddress()); stream_info->setDownstreamRemoteAddress(socket->remoteAddress()); stream_info->setDownstreamDirectRemoteAddress(socket->directRemoteAddress()); + // merge from the given dynamic metadata if it's not empty + if (dynamic_metadata.filter_metadata_size() > 0) { + stream_info->dynamicMetadata().MergeFrom(dynamic_metadata); + } + // Find matching filter chain. const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index bc6f00ad657f..a0ec09cae2b1 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -143,7 +143,8 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, /** * Create a new connection from a socket accepted by the listener. */ - void newConnection(Network::ConnectionSocketPtr&& socket); + void newConnection(Network::ConnectionSocketPtr&& socket, + const envoy::config::core::v3::Metadata& dynamic_metadata); /** * Return the active connections container attached with the given filter chain. @@ -294,6 +295,9 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, Network::ConnectionSocket& socket() override { return *socket_.get(); } Event::Dispatcher& dispatcher() override { return listener_.parent_.dispatcher_; } void continueFilterChain(bool success) override; + void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override; + envoy::config::core::v3::Metadata& dynamicMetadata() override { return metadata_; }; + const envoy::config::core::v3::Metadata& dynamicMetadata() const override { return metadata_; }; ActiveTcpListener& listener_; Network::ConnectionSocketPtr socket_; @@ -301,6 +305,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, std::list accept_filters_; std::list::iterator iter_; Event::TimerPtr timer_; + envoy::config::core::v3::Metadata metadata_{}; }; using ActiveTcpListenerOptRef = absl::optional>; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 967d37bb17df..3e99e0b6004d 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -3,6 +3,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/network/exception.h" #include "envoy/registry/registry.h" #include "envoy/server/active_udp_listener_config.h" @@ -471,7 +472,7 @@ void ListenerImpl::buildProxyProtocolListenerFilter() { Config::Utility::getAndCheckFactoryByName( Extensions::ListenerFilters::ListenerFilterNames::get().ProxyProtocol); listener_filter_factories_.push_back(factory.createListenerFilterFactoryFromProto( - Envoy::ProtobufWkt::Empty(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol(), /*listener_filter_matcher=*/nullptr, *listener_factory_context_)); } } diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index e51fbb8b6fa3..2660309242a2 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -97,7 +97,9 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam( - std::make_shared(listenerScope()))); + std::make_shared( + listenerScope(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol()))); maybeExitDispatcher(); return true; })); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 75d52d3b4235..afc2bfd7e724 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -18,6 +18,7 @@ #include "server/connection_handler_impl.h" #include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" +#include "extensions/filters/listener/well_known_names.h" #include "test/mocks/api/mocks.h" #include "test/mocks/buffer/mocks.h" @@ -35,6 +36,7 @@ using testing::_; using testing::AnyNumber; using testing::AtLeast; +using testing::ElementsAre; using testing::Invoke; using testing::NiceMock; using testing::Return; @@ -96,7 +98,9 @@ class ProxyProtocolTest : public testing::TestWithParam void { expected_callbacks--; @@ -108,7 +112,11 @@ class ProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - nullptr, std::make_unique(std::make_shared(listenerScope()))); + nullptr, std::make_unique(std::make_shared( + listenerScope(), (nullptr != proto_config) + ? *proto_config + : envoy::extensions::filters::listener:: + proxy_protocol::v3::ProxyProtocol()))); maybeExitDispatcher(); return true; })); @@ -875,6 +883,244 @@ TEST_P(ProxyProtocolTest, V2PartialRead) { disconnect(); } +TEST_P(ProxyProtocolTest, V2ExtractTlvOfInterest) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule = proto_config.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2 type authority"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_s = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2ExtractTlvOfInterestAndEmitWithSpecifiedMetadataNamespace) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule = proto_config.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2 type authority"); + rule->mutable_on_tlv_present()->set_metadata_namespace("We need a different metadata namespace"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count("We need a different metadata namespace")); + + auto fields = metadata.at("We need a different metadata namespace").fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_s = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_s, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2ExtractMultipleTlvsOfInterest) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x39, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff}; + // a TLV of type 0x02 with size of 10 bytes (7 bytes are value) + constexpr uint8_t tlv_type_authority[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + // a TLV of type 0x0f with size of 6 bytes (3 bytes are value) + constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f}; + // a TLV of type 0xea with size of 25 bytes (22 bytes are value) + constexpr uint8_t tlv_vpc_id[] = {0xea, 0x00, 0x16, 0x01, 0x76, 0x70, 0x63, 0x2d, 0x30, + 0x32, 0x35, 0x74, 0x65, 0x73, 0x74, 0x32, 0x66, 0x61, + 0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_type_authority = proto_config.add_rules(); + rule_type_authority->set_tlv_type(0x02); + rule_type_authority->mutable_on_tlv_present()->set_key("PP2 type authority"); + + auto rule_vpc_id = proto_config.add_rules(); + rule_vpc_id->set_tlv_type(0xea); + rule_vpc_id->mutable_on_tlv_present()->set_key("PP2 vpc id"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority, sizeof(tlv_type_authority)); + write(tlv3, sizeof(tlv3)); + write(tlv_vpc_id, sizeof(tlv_vpc_id)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(2, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + EXPECT_EQ(1, fields.count("PP2 vpc id")); + + auto value_type_authority = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + + auto value_vpc_id = fields.at("PP2 vpc id").string_value(); + ASSERT_THAT(value_vpc_id, + ElementsAre(0x01, 0x76, 0x70, 0x63, 0x2d, 0x30, 0x32, 0x35, 0x74, 0x65, 0x73, 0x74, + 0x32, 0x66, 0x61, 0x36, 0x63, 0x36, 0x33, 0x68, 0x61, 0x37)); + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2WillNotOverwriteTLV) { + // A well-formed ipv4/tcp with a pair of TLV extensions is accepted + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x2a, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x00, 0x00, 0x01, 0xff}; + // a TLV of type 0x02 with size of 10 bytes (7 bytes are value) + constexpr uint8_t tlv_type_authority1[] = {0x02, 0x00, 0x07, 0x66, 0x6f, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + // a TLV of type 0x0f with size of 6 bytes (3 bytes are value) + constexpr uint8_t tlv3[] = {0x0f, 0x00, 0x03, 0xf0, 0x00, 0x0f}; + // a TLV of type 0x02 (again) with size of 10 bytes (7 bytes are value) and different values + constexpr uint8_t tlv_type_authority2[] = {0x02, 0x00, 0x07, 0x62, 0x61, + 0x72, 0x2e, 0x6e, 0x65, 0x74}; + constexpr uint8_t data[] = {'D', 'A', 'T', 'A'}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_type_authority = proto_config.add_rules(); + rule_type_authority->set_tlv_type(0x02); + rule_type_authority->mutable_on_tlv_present()->set_key("PP2 type authority"); + + connect(true, &proto_config); + write(buffer, sizeof(buffer)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + + write(tlv1, sizeof(tlv1)); + write(tlv_type_authority1, sizeof(tlv_type_authority1)); + write(tlv3, sizeof(tlv3)); + write(tlv_type_authority2, sizeof(tlv_type_authority2)); + write(data, sizeof(data)); + expectData("DATA"); + + EXPECT_EQ(1, server_connection_->streamInfo().dynamicMetadata().filter_metadata_size()); + + auto metadata = server_connection_->streamInfo().dynamicMetadata().filter_metadata(); + EXPECT_EQ(1, metadata.size()); + EXPECT_EQ(1, metadata.count(ListenerFilters::ListenerFilterNames::get().ProxyProtocol)); + + auto fields = metadata.at(ListenerFilters::ListenerFilterNames::get().ProxyProtocol).fields(); + EXPECT_EQ(1, fields.size()); + EXPECT_EQ(1, fields.count("PP2 type authority")); + + auto value_type_authority = fields.at("PP2 type authority").string_value(); + ASSERT_THAT(value_type_authority, ElementsAre(0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d)); + + disconnect(); +} + +TEST_P(ProxyProtocolTest, V2WrongTLVLength) { + // A well-formed ipv4/tcp with buffer[14]15] being 0x00 and 0x10. It says we should have 16 bytes + // following. + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + + // tlv[2] should be 0x1 since there's only one byte for tlv value. + constexpr uint8_t tlv[] = {0x0, 0x0, 0x2, 0xff}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_00 = proto_config.add_rules(); + rule_00->set_tlv_type(0x00); + rule_00->mutable_on_tlv_present()->set_key("00"); + + connect(false, &proto_config); + write(buffer, sizeof(buffer)); + write(tlv, sizeof(tlv)); + + expectProxyProtoError(); +} + +TEST_P(ProxyProtocolTest, V2IncompleteTLV) { + // A ipv4/tcp with buffer[14]15] being 0x00 and 0x11. It says we should have 17 bytes following, + // however we have 20. + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, + 0x54, 0x0a, 0x21, 0x11, 0x00, 0x11, 0x01, 0x02, 0x03, 0x04, + 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x00, 0x02}; + + // a TLV of type 0x00 with size of 4 (1 byte is value) + constexpr uint8_t tlv1[] = {0x0, 0x0, 0x1, 0xff}; + // a TLV of type 0x01 with size of 4 (1 byte is value) + constexpr uint8_t tlv2[] = {0x1, 0x0, 0x1, 0xff}; + + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proto_config; + auto rule_00 = proto_config.add_rules(); + rule_00->set_tlv_type(0x00); + rule_00->mutable_on_tlv_present()->set_key("00"); + + auto rule_01 = proto_config.add_rules(); + rule_01->set_tlv_type(0x01); + rule_01->mutable_on_tlv_present()->set_key("01"); + + connect(false, &proto_config); + write(buffer, sizeof(buffer)); + write(tlv1, sizeof(tlv1)); + write(tlv2, sizeof(tlv2)); + + expectProxyProtoError(); +} + TEST_P(ProxyProtocolTest, MalformedProxyLine) { connect(false); @@ -1012,7 +1258,10 @@ class WildcardProxyProtocolTest : public testing::TestWithParam bool { filter_manager.addAcceptFilter( - nullptr, std::make_unique(std::make_shared(listenerScope()))); + nullptr, + std::make_unique(std::make_shared( + listenerScope(), + envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol()))); return true; })); } @@ -1136,6 +1385,45 @@ TEST_P(WildcardProxyProtocolTest, BasicV6) { disconnect(); } +TEST(ProxyProtocolConfigFactoryTest, TestCreateFactory) { + Server::Configuration::NamedListenerFilterConfigFactory* factory = + Registry::FactoryRegistry:: + getFactory(ListenerFilters::ListenerFilterNames::get().ProxyProtocol); + + EXPECT_EQ(factory->name(), ListenerFilters::ListenerFilterNames::get().ProxyProtocol); + + const std::string yaml = R"EOF( + rules: + - tlv_type: 0x01 + on_tlv_present: + key: "PP2_TYPE_ALPN" + - tlv_type: 0x1a + on_tlv_present: + key: "PP2_TYPE_CUSTOMER_A" +)EOF"; + + ProtobufTypes::MessagePtr proto_config = factory->createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + Server::Configuration::MockListenerFactoryContext context; + EXPECT_CALL(context, scope()).Times(1); + EXPECT_CALL(context, messageValidationVisitor()).Times(1); + Network::ListenerFilterFactoryCb cb = + factory->createListenerFilterFactoryFromProto(*proto_config, nullptr, context); + + Network::MockListenerFilterManager manager; + Network::ListenerFilterPtr added_filter; + EXPECT_CALL(manager, addAcceptFilter_(_, _)) + .WillOnce(Invoke([&added_filter](const Network::ListenerFilterMatcherSharedPtr&, + Network::ListenerFilterPtr& filter) { + added_filter = std::move(filter); + })); + cb(manager); + + // Make sure we actually create the correct type! + EXPECT_NE(dynamic_cast(added_filter.get()), nullptr); +} + // Test that the deprecated extension name still functions. TEST(ProxyProtocolConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.listener.proxy_protocol"; diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index 10b1a961040f..92c4bc90b39d 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -18,6 +18,27 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtoIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +TEST_P(ProxyProtoIntegrationTest, CaptureTlvToMetadata) { + useListenerAccessLog( + "%DYNAMIC_METADATA(envoy.filters.listener.proxy_protocol:PP2TypeAuthority)%"); + + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); + constexpr uint8_t buffer[] = {0x0d, 0x0a, 0x0d, 0x0a, 0x00, 0x0d, 0x0a, 0x51, 0x55, 0x49, 0x54, + 0x0a, 0x21, 0x11, 0x00, 0x1a, 0x01, 0x02, 0x03, 0x04, 0x00, 0x01, + 0x01, 0x02, 0x03, 0x05, 0x00, 0x02, 0x00, 0x00, 0x01, 0xff, 0x02, + 0x00, 0x07, 0x66, 0x6f, 0x6f, 0x2e, 0x63, 0x6f, 0x6d}; + Buffer::OwnedImpl buf(buffer, sizeof(buffer)); + conn->write(buf, false); + return conn; + }; + + testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); + cleanupUpstreamAndDownstream(); + const std::string log_line = waitForAccessLog(listener_access_log_name_); + EXPECT_EQ(log_line, "\"foo.com\""); +} + TEST_P(ProxyProtoIntegrationTest, V1RouterRequestAndResponseWithBodyNoBuffer) { ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); diff --git a/test/integration/proxy_proto_integration_test.h b/test/integration/proxy_proto_integration_test.h index e67b69e9a47e..140d5b63d3f7 100644 --- a/test/integration/proxy_proto_integration_test.h +++ b/test/integration/proxy_proto_integration_test.h @@ -4,6 +4,8 @@ #include "common/http/codec_client.h" +#include "extensions/filters/listener/proxy_protocol/proxy_protocol.h" + #include "test/integration/fake_upstream.h" #include "test/integration/http_integration.h" #include "test/integration/server.h" @@ -17,9 +19,15 @@ class ProxyProtoIntegrationTest : public testing::TestWithParam void { + ::envoy::extensions::filters::listener::proxy_protocol::v3::ProxyProtocol proxy_protocol; + auto rule = proxy_protocol.add_rules(); + rule->set_tlv_type(0x02); + rule->mutable_on_tlv_present()->set_key("PP2TypeAuthority"); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - auto* filter_chain = listener->mutable_filter_chains(0); - filter_chain->mutable_use_proxy_proto()->set_value(true); + auto* ppv_filter = listener->add_listener_filters(); + ppv_filter->set_name("envoy.listener.proxy_protocol"); + ppv_filter->mutable_typed_config()->PackFrom(proxy_protocol); }); } }; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 9c710e1f48f4..315ce275fb95 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -310,6 +310,9 @@ class MockListenerFilterCallbacks : public ListenerFilterCallbacks { MOCK_METHOD(ConnectionSocket&, socket, ()); MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(void, continueFilterChain, (bool)); + MOCK_METHOD(void, setDynamicMetadata, (const std::string&, const ProtobufWkt::Struct&)); + MOCK_METHOD(envoy::config::core::v3::Metadata&, dynamicMetadata, ()); + MOCK_METHOD(const envoy::config::core::v3::Metadata&, dynamicMetadata, (), (const)); NiceMock socket_; }; From 84d09df6ac71474fac6ca087b703d04e3167790e Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Fri, 26 Jun 2020 08:06:20 -0700 Subject: [PATCH 459/909] admission control: use less iterations in integration test (#11760) Signed-off-by: Tony Allen --- .../admission_control_integration_test.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index a361cbff4d09..578f39db10c3 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -95,7 +95,7 @@ TEST_P(AdmissionControlIntegrationTest, HttpTest) { // Drop the success rate to a very low value. ENVOY_LOG(info, "dropping success rate"); - for (int i = 0; i < 1000; ++i) { + for (int i = 0; i < 300; ++i) { sendRequestWithReturnCode("500"); } @@ -103,7 +103,7 @@ TEST_P(AdmissionControlIntegrationTest, HttpTest) { double throttle_count = 0; double request_count = 0; ENVOY_LOG(info, "validating throttling rate"); - for (int i = 0; i < 1000; ++i) { + for (int i = 0; i < 300; ++i) { auto response = sendRequestWithReturnCode("500"); auto rc = response->headers().Status()->value().getStringView(); if (rc == "503") { @@ -133,14 +133,14 @@ TEST_P(AdmissionControlIntegrationTest, GrpcTest) { initialize(); // Drop the success rate to a very low value. - for (int i = 0; i < 1000; ++i) { + for (int i = 0; i < 300; ++i) { sendGrpcRequestWithReturnCode(14); } // Measure throttling rate from the admission control filter. double throttle_count = 0; double request_count = 0; - for (int i = 0; i < 1000; ++i) { + for (int i = 0; i < 300; ++i) { auto response = sendGrpcRequestWithReturnCode(10); // When the filter is throttling, it returns an HTTP code 503 and the GRPC status is unset. From 471e2394b79d2b5b13a270626738d4807a51e34a Mon Sep 17 00:00:00 2001 From: Clara Date: Fri, 26 Jun 2020 11:17:49 -0400 Subject: [PATCH 460/909] Ratelimit: Add dynamic metadata to ratelimit actions (#11723) Modifies ratelimit filter to be able to use information from the dynamic metadata as one of its actions. Signed-off-by: Clara Andrew-Wani --- api/envoy/config/route/v3/BUILD | 1 + .../config/route/v3/route_components.proto | 20 +- api/envoy/config/route/v4alpha/BUILD | 1 + .../route/v4alpha/route_components.proto | 23 ++- docs/root/version_history/current.rst | 1 + .../envoy/config/route/v3/BUILD | 1 + .../config/route/v3/route_components.proto | 20 +- .../envoy/config/route/v4alpha/BUILD | 1 + .../route/v4alpha/route_components.proto | 23 ++- include/envoy/router/BUILD | 1 + include/envoy/router/router_ratelimit.h | 22 ++- source/common/router/BUILD | 2 + source/common/router/router_ratelimit.cc | 52 ++++-- source/common/router/router_ratelimit.h | 47 ++++- .../filters/http/ratelimit/ratelimit.cc | 3 +- test/common/router/router_ratelimit_test.cc | 172 +++++++++++++++--- .../filters/http/ratelimit/ratelimit_test.cc | 42 ++--- test/mocks/router/mocks.h | 3 +- 18 files changed, 357 insertions(+), 78 deletions(-) diff --git a/api/envoy/config/route/v3/BUILD b/api/envoy/config/route/v3/BUILD index 019cf27528c6..6f653723e5ae 100644 --- a/api/envoy/config/route/v3/BUILD +++ b/api/envoy/config/route/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/api/v2/route:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index e51f4ab87d93..2dcfd3838ad8 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -1342,7 +1343,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1456,6 +1457,20 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1476,6 +1491,9 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; } } diff --git a/api/envoy/config/route/v4alpha/BUILD b/api/envoy/config/route/v4alpha/BUILD index 13dd451d1b4a..c72b7030b9fb 100644 --- a/api/envoy/config/route/v4alpha/BUILD +++ b/api/envoy/config/route/v4alpha/BUILD @@ -10,6 +10,7 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 741ff2cd540d..8251d64c09a6 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -1321,7 +1322,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1435,6 +1436,23 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1455,6 +1473,9 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; } } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 95bd4c5bf88b..e87797afffaa 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -102,6 +102,7 @@ New Features * metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. +* ratelimit: add support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. * regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. diff --git a/generated_api_shadow/envoy/config/route/v3/BUILD b/generated_api_shadow/envoy/config/route/v3/BUILD index 019cf27528c6..6f653723e5ae 100644 --- a/generated_api_shadow/envoy/config/route/v3/BUILD +++ b/generated_api_shadow/envoy/config/route/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/api/v2/route:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 0c1b7fd6ce7b..4799ffa075c2 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -1354,7 +1355,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1468,6 +1469,20 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1488,6 +1503,9 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; } } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/BUILD b/generated_api_shadow/envoy/config/route/v4alpha/BUILD index 13dd451d1b4a..c72b7030b9fb 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/route/v4alpha/BUILD @@ -10,6 +10,7 @@ api_proto_package( "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", + "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index a107ecf7efbf..21a26d181c26 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -7,6 +7,7 @@ import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/type/matcher/v4alpha/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; @@ -1349,7 +1350,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 7] + // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1463,6 +1464,23 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } + // The following descriptor entry is appended when the dynamic metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message DynamicMetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1483,6 +1501,9 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + DynamicMetaData dynamic_metadata = 7; } } diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index e679ee28c187..deb5de97578b 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -92,6 +92,7 @@ envoy_cc_library( "//include/envoy/http:filter_interface", "//include/envoy/http:header_map_interface", "//include/envoy/ratelimit:ratelimit_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/router/router_ratelimit.h b/include/envoy/router/router_ratelimit.h index 246c177bd47b..a21e0854bd27 100644 --- a/include/envoy/router/router_ratelimit.h +++ b/include/envoy/router/router_ratelimit.h @@ -5,6 +5,7 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" #include "envoy/ratelimit/ratelimit.h" @@ -25,12 +26,14 @@ class RateLimitAction { * @param local_service_cluster supplies the name of the local service cluster. * @param headers supplies the header for the request. * @param remote_address supplies the trusted downstream address for the connection. + * @param dynamic_metadata supplies the dynamic metadata for the request * @return true if the RateLimitAction populated the descriptor. */ - virtual bool populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor, - const std::string& local_service_cluster, - const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const PURE; + virtual bool + populateDescriptor(const RouteEntry& route, RateLimit::Descriptor& descriptor, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE; }; using RateLimitActionPtr = std::unique_ptr; @@ -59,12 +62,13 @@ class RateLimitPolicyEntry { * @param local_service_cluster supplies the name of the local service cluster. * @param headers supplies the header for the request. * @param remote_address supplies the trusted downstream address for the connection. + * @param dynamic_metadata supplies the dynamic metadata for the request. */ - virtual void populateDescriptors(const RouteEntry& route, - std::vector& descriptors, - const std::string& local_service_cluster, - const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const PURE; + virtual void + populateDescriptors(const RouteEntry& route, std::vector& descriptors, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const PURE; }; /** diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 610cb0f10d75..188242186a73 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -325,8 +325,10 @@ envoy_cc_library( "//include/envoy/router:router_ratelimit_interface", "//source/common/common:assert_lib", "//source/common/common:empty_string", + "//source/common/config:metadata_lib", "//source/common/http:header_utility_lib", "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index c883ce894210..6937aee47989 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -5,10 +5,12 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "common/common/assert.h" #include "common/common/empty_string.h" +#include "common/config/metadata.h" #include "common/protobuf/utility.h" namespace Envoy { @@ -20,7 +22,8 @@ bool SourceClusterAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"source_cluster", local_service_cluster}); return true; } @@ -28,7 +31,8 @@ bool SourceClusterAction::populateDescriptor(const Router::RouteEntry&, bool DestinationClusterAction::populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"destination_cluster", route.clusterName()}); return true; } @@ -36,7 +40,8 @@ bool DestinationClusterAction::populateDescriptor(const Router::RouteEntry& rout bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap& headers, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { const Http::HeaderEntry* header_value = headers.get(header_name_); // If header is not present in the request and if skip_if_absent is true skip this descriptor, @@ -50,9 +55,11 @@ bool RequestHeadersAction::populateDescriptor(const Router::RouteEntry&, return true; } -bool RemoteAddressAction::populateDescriptor( - const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, - const Http::HeaderMap&, const Network::Address::Instance& remote_address) const { +bool RemoteAddressAction::populateDescriptor(const Router::RouteEntry&, + RateLimit::Descriptor& descriptor, const std::string&, + const Http::HeaderMap&, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata*) const { if (remote_address.type() != Network::Address::Type::Ip) { return false; } @@ -63,12 +70,30 @@ bool RemoteAddressAction::populateDescriptor( bool GenericKeyAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, - const Http::HeaderMap&, - const Network::Address::Instance&) const { + const Http::HeaderMap&, const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { descriptor.entries_.push_back({"generic_key", descriptor_value_}); return true; } +DynamicMetaDataAction::DynamicMetaDataAction( + const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action) + : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()) {} + +bool DynamicMetaDataAction::populateDescriptor( + const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, + const Http::HeaderMap&, const Network::Address::Instance&, + const envoy::config::core::v3::Metadata* dynamic_metadata) const { + const ProtobufWkt::Value& metadata_value = + Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_); + if (metadata_value.kind_case() != ProtobufWkt::Value::kStringValue) { + return false; + } + descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); + + return !metadata_value.string_value().empty(); +} + HeaderValueMatchAction::HeaderValueMatchAction( const envoy::config::route::v3::RateLimit::Action::HeaderValueMatch& action) : descriptor_value_(action.descriptor_value()), @@ -78,7 +103,8 @@ HeaderValueMatchAction::HeaderValueMatchAction( bool HeaderValueMatchAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap& headers, - const Network::Address::Instance&) const { + const Network::Address::Instance&, + const envoy::config::core::v3::Metadata*) const { if (expect_match_ == Http::HeaderUtility::matchHeaders(headers, action_headers_)) { descriptor.entries_.push_back({"header_match", descriptor_value_}); return true; @@ -108,6 +134,9 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kGenericKey: actions_.emplace_back(new GenericKeyAction(action.generic_key())); break; + case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDynamicMetadata: + actions_.emplace_back(new DynamicMetaDataAction(action.dynamic_metadata())); + break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch: actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match())); break; @@ -120,12 +149,13 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( void RateLimitPolicyEntryImpl::populateDescriptors( const Router::RouteEntry& route, std::vector& descriptors, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const { + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const { RateLimit::Descriptor descriptor; bool result = true; for (const RateLimitActionPtr& action : actions_) { result = result && action->populateDescriptor(route, descriptor, local_service_cluster, headers, - remote_address); + remote_address, dynamic_metadata); if (!result) { break; } diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index df42e898952b..2b990a659253 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -5,10 +5,12 @@ #include #include +#include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/router/router.h" #include "envoy/router/router_ratelimit.h" +#include "common/config/metadata.h" #include "common/http/header_utility.h" namespace Envoy { @@ -22,7 +24,8 @@ class SourceClusterAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -33,7 +36,8 @@ class DestinationClusterAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -48,7 +52,8 @@ class RequestHeadersAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const Http::LowerCaseString header_name_; @@ -64,7 +69,8 @@ class RemoteAddressAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; }; /** @@ -78,12 +84,30 @@ class GenericKeyAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string descriptor_value_; }; +/** + * Action for dynamic metadata rate limiting. + */ +class DynamicMetaDataAction : public RateLimitAction { +public: + DynamicMetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); + // Router::RateLimitAction + bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, + const std::string& local_service_cluster, const Http::HeaderMap& headers, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; + +private: + const Envoy::Config::MetadataKey metadata_key_; + const std::string descriptor_key_; +}; + /** * Action for header value match rate limiting. */ @@ -95,7 +119,8 @@ class HeaderValueMatchAction : public RateLimitAction { // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address) const override; + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string descriptor_value_; @@ -113,10 +138,12 @@ class RateLimitPolicyEntryImpl : public RateLimitPolicyEntry { // Router::RateLimitPolicyEntry uint64_t stage() const override { return stage_; } const std::string& disableKey() const override { return disable_key_; } - void populateDescriptors(const Router::RouteEntry& route, - std::vector& descriptors, - const std::string& local_service_cluster, const Http::HeaderMap&, - const Network::Address::Instance& remote_address) const override; + void + populateDescriptors(const Router::RouteEntry& route, + std::vector& descriptors, + const std::string& local_service_cluster, const Http::HeaderMap&, + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata) const override; private: const std::string disable_key_; diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 69075249162d..2bec4783a626 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -201,7 +201,8 @@ void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_li continue; } rate_limit.populateDescriptors(*route_entry, descriptors, config_->localInfo().clusterName(), - headers, *callbacks_->streamInfo().downstreamRemoteAddress()); + headers, *callbacks_->streamInfo().downstreamRemoteAddress(), + &callbacks_->streamInfo().dynamicMetadata()); } } diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 80c21f623efe..159b32e2dca5 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -89,6 +89,7 @@ class RateLimitConfiguration : public testing::Test { Http::TestRequestHeaderMapImpl header_; const RouteEntry* route_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; + const envoy::config::core::v3::Metadata* dynamic_metadata_; }; TEST_F(RateLimitConfiguration, NoApplicableRateLimit) { @@ -169,7 +170,8 @@ TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { - rate_limit.populateDescriptors(*route_, descriptors, "", header_, default_remote_address_); + rate_limit.populateDescriptors(*route_, descriptors, "", header_, default_remote_address_, + dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors)); @@ -202,7 +204,7 @@ TEST_F(RateLimitConfiguration, TestVirtualHost) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"destination_cluster", "www2test"}}}}), testing::ContainerEq(descriptors)); @@ -241,7 +243,7 @@ TEST_F(RateLimitConfiguration, Stages) { std::vector descriptors; for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector( {{{{"destination_cluster", "www2test"}}}, @@ -254,7 +256,7 @@ TEST_F(RateLimitConfiguration, Stages) { for (const RateLimitPolicyEntry& rate_limit : rate_limits) { rate_limit.populateDescriptors(*route_, descriptors, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); } EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors)); @@ -275,6 +277,7 @@ class RateLimitPolicyEntryTest : public testing::Test { NiceMock route_; std::vector descriptors_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; + const envoy::config::core::v3::Metadata* dynamic_metadata_; }; TEST_F(RateLimitPolicyEntryTest, RateLimitPolicyEntryMembers) { @@ -299,8 +302,8 @@ TEST_F(RateLimitPolicyEntryTest, RemoteAddress) { setupTest(yaml); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, - default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(descriptors_)); } @@ -315,7 +318,8 @@ TEST_F(RateLimitPolicyEntryTest, PipeAddress) { setupTest(yaml); Network::Address::PipeInstance pipe_address("/hello"); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, pipe_address); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, pipe_address, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -328,7 +332,7 @@ TEST_F(RateLimitPolicyEntryTest, SourceService) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector({{{{"source_cluster", "service_cluster"}}}}), testing::ContainerEq(descriptors_)); @@ -343,7 +347,7 @@ TEST_F(RateLimitPolicyEntryTest, DestinationService) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector({{{{"destination_cluster", "fake_cluster"}}}}), testing::ContainerEq(descriptors_)); @@ -361,7 +365,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeaders) { Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT(std::vector({{{{"my_header_name", "test_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -385,7 +389,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithSkipIfAbsent) { Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT(std::vector({{{{"my_header_name", "test_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -409,7 +413,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersWithDefaultSkipIfAbsent) { Http::TestRequestHeaderMapImpl header{{"x-header-test", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -425,7 +429,7 @@ TEST_F(RateLimitPolicyEntryTest, RequestHeadersNoMatch) { Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -438,12 +442,134 @@ TEST_F(RateLimitPolicyEntryTest, RateLimitKey) { setupTest(yaml); - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, - default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"generic_key", "fake_key"}}}}), testing::ContainerEq(descriptors_)); } +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: "" + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: + foo: bar + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} + TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { const std::string yaml = R"EOF( actions: @@ -457,7 +583,8 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatch) { setupTest(yaml); Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -475,7 +602,8 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchNoMatch) { setupTest(yaml); Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -493,7 +621,8 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersNotPresent) { setupTest(yaml); Http::TestRequestHeaderMapImpl header{{"x-header-name", "not_same_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_THAT(std::vector({{{{"header_match", "fake_value"}}}}), testing::ContainerEq(descriptors_)); } @@ -512,7 +641,8 @@ TEST_F(RateLimitPolicyEntryTest, HeaderValueMatchHeadersPresent) { setupTest(yaml); Http::TestRequestHeaderMapImpl header{{"x-header-name", "test_value"}}; - rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header, default_remote_address_, + dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } @@ -526,7 +656,7 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActions) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_THAT( std::vector( {{{{"destination_cluster", "fake_cluster"}, {"source_cluster", "service_cluster"}}}}), @@ -547,7 +677,7 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActionsNoDescriptor) { setupTest(yaml); rate_limit_entry_->populateDescriptors(route_, descriptors_, "service_cluster", header_, - default_remote_address_); + default_remote_address_, dynamic_metadata_); EXPECT_TRUE(descriptors_.empty()); } diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index b177409534a5..e4bfb5c4b9c6 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -155,8 +155,8 @@ TEST_F(HttpRateLimitFilterTest, NoApplicableRateLimit) { TEST_F(HttpRateLimitFilterTest, NoDescriptor) { SetUpTest(filter_config_); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(1); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -190,7 +190,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -236,7 +236,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -291,7 +291,7 @@ TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, "foo", @@ -321,7 +321,7 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, "foo", @@ -356,7 +356,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -389,7 +389,7 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { SetUpTest(fail_close_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -420,7 +420,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -460,7 +460,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -512,7 +512,7 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -554,7 +554,7 @@ TEST_F(HttpRateLimitFilterTest, ResetDuringCall) { SetUpTest(filter_config_); InSequence s; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(*client_, limit(_, _, _, _)) .WillOnce( @@ -576,7 +576,7 @@ TEST_F(HttpRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_key.http_filter_enabled", 100)) .WillByDefault(Return(false)); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); @@ -596,7 +596,7 @@ TEST_F(HttpRateLimitFilterTest, VirtualHostRateLimitDisabledForRouteKey) { ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_vh_key.http_filter_enabled", 100)) .WillByDefault(Return(false)); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); @@ -618,8 +618,8 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { )EOF"; SetUpTest(internal_filter_config); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -639,8 +639,8 @@ TEST_F(HttpRateLimitFilterTest, IncorrectRequestType) { SetUpTest(external_filter_config); Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-internal", "true"}}; - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(0); + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _, _, _)).Times(0); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); @@ -666,7 +666,7 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -711,7 +711,7 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) .Times(1); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -751,7 +751,7 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { InSequence s; EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); - EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) .WillOnce(SetArgReferee<1>(descriptor_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index d1ae30fd8f5d..890a6ccef893 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -184,7 +184,8 @@ class MockRateLimitPolicyEntry : public RateLimitPolicyEntry { MOCK_METHOD(void, populateDescriptors, (const RouteEntry& route, std::vector& descriptors, const std::string& local_service_cluster, const Http::HeaderMap& headers, - const Network::Address::Instance& remote_address), + const Network::Address::Instance& remote_address, + const envoy::config::core::v3::Metadata* dynamic_metadata), (const)); uint64_t stage_{}; From 5d87553cfbb0c38f83de0db1bc543ff54c09e097 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 26 Jun 2020 09:18:21 -0600 Subject: [PATCH 461/909] header map: various small improvements (#11744) - Remove redundant code leading to multiple static table lookups. I think this was just the result of various iterative changes over the years. - Remove the lookup() method as it is not needed with the registry anymore and had only one user. This will make writing other implementations easier. - Make the get() method do a trie lookup first to see if the header is an O(1) header. This satisfies the only user of lookup() and will also make other users faster if they are looking up O(1) headers. Note that later I intend to make the header matcher work directly with header handles to avoid this case for header matching. Signed-off-by: Matt Klein --- bazel/repository_locations.bzl | 8 +- include/envoy/http/header_map.h | 12 --- source/common/http/header_map_impl.cc | 85 +++++-------------- source/common/http/header_map_impl.h | 5 -- .../common/ot/opentracing_driver_impl.cc | 12 +-- test/common/http/header_map_impl_fuzz.proto | 1 - test/common/http/header_map_impl_fuzz_test.cc | 6 -- .../common/http/header_map_impl_speed_test.cc | 15 ---- test/common/http/header_map_impl_test.cc | 27 ------ test/test_common/utility.h | 3 - 10 files changed, 29 insertions(+), 145 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 4d9a62e38137..6c6a949a3846 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -161,10 +161,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( - sha256 = "f6def6cdf63e29a367d46c0ad9e3e31eed89d031e22e0caac126f1e62d8b3fd0", - strip_prefix = "libprotobuf-mutator-3521f47a2828da9ace403e4ecc4aece1a84feb36", - # 2020-02-04 - urls = ["https://github.com/google/libprotobuf-mutator/archive/3521f47a2828da9ace403e4ecc4aece1a84feb36.tar.gz"], + sha256 = "d51365191580c4bf5e9ff104eebcfe34f7ff5f471006d7a460c15dcb3657501c", + strip_prefix = "libprotobuf-mutator-7a2ed51a6b682a83e345ff49fc4cfd7ca47550db", + # 2020-06-25 + urls = ["https://github.com/google/libprotobuf-mutator/archive/7a2ed51a6b682a83e345ff49fc4cfd7ca47550db.tar.gz"], use_category = ["test"], ), com_github_gperftools_gperftools = dict( diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 40969a78255b..ed72f4e52492 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -549,18 +549,6 @@ class HeaderMap { */ virtual void iterateReverse(ConstIterateCb cb, void* context) const PURE; - enum class Lookup { Found, NotFound, NotSupported }; - - /** - * Lookup one of the predefined inline headers (see ALL_INLINE_HEADERS below) by key. - * @param key supplies the header key. - * @param entry is set to the header entry if it exists and if key is one of the predefined inline - * headers; otherwise, nullptr. - * @return Lookup::Found if lookup was successful, Lookup::NotFound if the header entry doesn't - * exist, or Lookup::NotSupported if key is not one of the predefined inline headers. - */ - virtual Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const PURE; - /** * Clears the headers in the map. */ diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 809fa2402e6e..8803997b706d 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -333,23 +333,13 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { } void HeaderMapImpl::addViaMove(HeaderString&& key, HeaderString&& value) { - // If this is an inline header, we can't addViaMove, because we'll overwrite - // the existing value. - auto* entry = getExistingInline(key.getStringView()); - if (entry != nullptr) { - const uint64_t added_size = appendToHeader(entry->value(), value.getStringView()); - addSize(added_size); - key.clear(); - value.clear(); - } else { - insertByKey(std::move(key), std::move(value)); - } + insertByKey(std::move(key), std::move(value)); } void HeaderMapImpl::addReference(const LowerCaseString& key, absl::string_view value) { HeaderString ref_key(key); HeaderString ref_value(value); - addViaMove(std::move(ref_key), std::move(ref_value)); + insertByKey(std::move(ref_key), std::move(ref_value)); } void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, uint64_t value) { @@ -369,14 +359,8 @@ void HeaderMapImpl::addReferenceKey(const LowerCaseString& key, absl::string_vie } void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { - auto* entry = getExistingInline(key.get()); - if (entry != nullptr) { - char buf[32]; - StringUtil::itoa(buf, sizeof(buf), value); - const uint64_t added_size = appendToHeader(entry->value(), buf); - addSize(added_size); - return; - } + // In the case that the header is appended, we will perform a needless copy of the key and value. + // This is done on purpose to keep the code simple and should be rare. HeaderString new_key; new_key.setCopy(key.get()); HeaderString new_value; @@ -387,12 +371,8 @@ void HeaderMapImpl::addCopy(const LowerCaseString& key, uint64_t value) { } void HeaderMapImpl::addCopy(const LowerCaseString& key, absl::string_view value) { - auto* entry = getExistingInline(key.get()); - if (entry != nullptr) { - const uint64_t added_size = appendToHeader(entry->value(), value); - addSize(added_size); - return; - } + // In the case that the header is appended, we will perform a needless copy of the key and value. + // This is done on purpose to keep the code simple and should be rare. HeaderString new_key; new_key.setCopy(key.get()); HeaderString new_value; @@ -454,16 +434,25 @@ void HeaderMapImpl::verifyByteSizeInternalForTest() const { } const HeaderEntry* HeaderMapImpl::get(const LowerCaseString& key) const { - for (const HeaderEntryImpl& header : headers_) { - if (header.key() == key.get().c_str()) { - return &header; - } - } - - return nullptr; + return const_cast(this)->getExisting(key); } HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { + // Attempt a trie lookup first to see if the user is requesting an O(1) header. This may be + // relatively common in certain header matching / routing patterns. + // TODO(mattklein123): Add inline handle support directly to the header matcher code to support + // this use case more directly. + auto lookup = staticLookup(key.get()); + if (lookup.has_value()) { + return *lookup.value().entry_; + } + + // If the requested header is not an O(1) header we do a full scan. Doing the trie lookup is + // wasteful in the miss case, but is present for code consistency with other functions that do + // similar things. + // TODO(mattklein123): The full scan here and in remove() are the biggest issues with this + // implementation for certain use cases. We can either replace this with a totally different + // implementation or potentially create a lazy map if the size of the map is above a threshold. for (HeaderEntryImpl& header : headers_) { if (header.key() == key.get().c_str()) { return &header; @@ -489,28 +478,6 @@ void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb, void* context) } } -HeaderMap::Lookup HeaderMapImpl::lookup(const LowerCaseString& key, - const HeaderEntry** entry) const { - // The accessor callbacks for predefined inline headers take a HeaderMapImpl& as an argument; - // even though we don't make any modifications, we need to const_cast in order to use the - // accessor. - // - // Making this work without const_cast would require managing an additional const accessor - // callback for each predefined inline header and add to the complexity of the code. - auto lookup = const_cast(this)->staticLookup(key.get()); - if (lookup.has_value()) { - *entry = *lookup.value().entry_; - if (*entry) { - return HeaderMap::Lookup::Found; - } else { - return HeaderMap::Lookup::NotFound; - } - } else { - *entry = nullptr; - return HeaderMap::Lookup::NotSupported; - } -} - void HeaderMapImpl::clear() { clearInline(); headers_.clear(); @@ -601,14 +568,6 @@ HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl return **entry; } -HeaderMapImpl::HeaderEntryImpl* HeaderMapImpl::getExistingInline(absl::string_view key) { - auto lookup = staticLookup(key); - if (lookup.has_value()) { - return *lookup.value().entry_; - } - return nullptr; -} - size_t HeaderMapImpl::removeInline(HeaderEntryImpl** ptr_to_entry) { if (!*ptr_to_entry) { return 0; diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 30e2eab892b9..693762a21aeb 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -88,7 +88,6 @@ class HeaderMapImpl : NonCopyable { const HeaderEntry* get(const LowerCaseString& key) const; void iterate(HeaderMap::ConstIterateCb cb, void* context) const; void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const; - HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const; void clear(); size_t remove(const LowerCaseString& key); size_t removePrefix(const LowerCaseString& key); @@ -242,7 +241,6 @@ class HeaderMapImpl : NonCopyable { HeaderEntryImpl& maybeCreateInline(HeaderEntryImpl** entry, const LowerCaseString& key, HeaderString&& value); HeaderEntry* getExisting(const LowerCaseString& key); - HeaderEntryImpl* getExistingInline(absl::string_view key); size_t removeInline(HeaderEntryImpl** entry); void updateSize(uint64_t from_size, uint64_t to_size); void addSize(uint64_t size); @@ -306,9 +304,6 @@ template class TypedHeaderMapImpl : public HeaderMapImpl, publ void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { HeaderMapImpl::iterateReverse(cb, context); } - HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override { - return HeaderMapImpl::lookup(key, entry); - } void clear() override { HeaderMapImpl::clear(); } size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); } size_t removePrefix(const LowerCaseString& key) override { diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 1e1ee2e67e54..6f7fbd23aff8 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -45,19 +45,13 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { // opentracing::HTTPHeadersReader opentracing::expected LookupKey(opentracing::string_view key) const override { - const Http::HeaderEntry* entry; - Http::HeaderMap::Lookup lookup_result = - request_headers_.lookup(Http::LowerCaseString{key}, &entry); - switch (lookup_result) { - case Http::HeaderMap::Lookup::Found: + const Http::HeaderEntry* entry = request_headers_.get(Http::LowerCaseString{key}); + if (entry != nullptr) { return opentracing::string_view{entry->value().getStringView().data(), entry->value().getStringView().length()}; - case Http::HeaderMap::Lookup::NotFound: + } else { return opentracing::make_unexpected(opentracing::key_not_found_error); - case Http::HeaderMap::Lookup::NotSupported: - return opentracing::make_unexpected(opentracing::lookup_key_not_supported_error); } - NOT_REACHED_GCOVR_EXCL_LINE; } opentracing::expected ForeachKey(OpenTracingCb f) const override { diff --git a/test/common/http/header_map_impl_fuzz.proto b/test/common/http/header_map_impl_fuzz.proto index bebe373b6ae5..69e4ae244a0a 100644 --- a/test/common/http/header_map_impl_fuzz.proto +++ b/test/common/http/header_map_impl_fuzz.proto @@ -83,7 +83,6 @@ message Action { MutateAndMove mutate_and_move = 12; Append append = 11; google.protobuf.Empty copy = 7; - string lookup = 8; string remove = 9; string remove_prefix = 10; } diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index 5ab9e79ca2ed..7c3d8e6a4296 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -152,12 +152,6 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) header_map = Http::createHeaderMap(*header_map); break; } - case test::common::http::Action::kLookup: { - const Http::HeaderEntry* header_entry; - header_map->lookup(Http::LowerCaseString(replaceInvalidCharacters(action.lookup())), - &header_entry); - break; - } case test::common::http::Action::kRemove: { header_map->remove(Http::LowerCaseString(replaceInvalidCharacters(action.remove()))); break; diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc index 1c65c3a19a13..6f45f39825cd 100644 --- a/test/common/http/header_map_impl_speed_test.cc +++ b/test/common/http/header_map_impl_speed_test.cc @@ -143,21 +143,6 @@ static void headerMapImplIterate(benchmark::State& state) { } BENCHMARK(headerMapImplIterate)->Arg(0)->Arg(1)->Arg(10)->Arg(50); -/** Measure the speed of the HeaderMapImpl lookup() method. */ -static void headerMapImplLookup(benchmark::State& state) { - const LowerCaseString key("connection"); - const std::string value("01234567890123456789"); - auto headers = Http::ResponseHeaderMapImpl::create(); - addDummyHeaders(*headers, state.range(0)); - headers->addReference(key, value); - for (auto _ : state) { - const HeaderEntry* entry = nullptr; - auto result = headers->lookup(key, &entry); - benchmark::DoNotOptimize(result); - } -} -BENCHMARK(headerMapImplLookup)->Arg(0)->Arg(1)->Arg(10)->Arg(50); - /** * Measure the speed of removing a header by key name. * @note The measured time for each iteration includes the time needed to add diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 7d3b59ee0ce2..205b145ca2a3 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -873,33 +873,6 @@ TEST(HeaderMapImplTest, IterateReverse) { &cb); } -TEST(HeaderMapImplTest, Lookup) { - TestRequestHeaderMapImpl headers; - headers.addCopy(LowerCaseString("hello"), "world"); - headers.setContentLength(5); - - // Lookup is not supported for non predefined inline headers. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::NotSupported, headers.lookup(LowerCaseString{"hello"}, &entry)); - EXPECT_EQ(nullptr, entry); - } - - // Lookup returns the entry of a predefined inline header if it exists. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::Found, headers.lookup(Headers::get().ContentLength, &entry)); - EXPECT_EQ("5", entry->value().getStringView()); - } - - // Lookup returns HeaderMap::Lookup::NotFound if a predefined inline header does not exist. - { - const HeaderEntry* entry; - EXPECT_EQ(HeaderMap::Lookup::NotFound, headers.lookup(Headers::get().Host, &entry)); - EXPECT_EQ(nullptr, entry); - } -} - TEST(HeaderMapImplTest, Get) { { auto headers = TestRequestHeaderMapImpl({{Headers::get().Path.get(), "/"}, {"hello", "world"}}); diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 7bbdc5548c78..e4cf7da7a6ee 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -842,9 +842,6 @@ template class TestHeaderMapImplBase : public Inte void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { header_map_->iterateReverse(cb, context); } - HeaderMap::Lookup lookup(const LowerCaseString& key, const HeaderEntry** entry) const override { - return header_map_->lookup(key, entry); - } void clear() override { header_map_->clear(); header_map_->verifyByteSizeInternalForTest(); From f40b76498d0ebf7a4bb62660aeceba42bea0010f Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 26 Jun 2020 09:28:57 -0600 Subject: [PATCH 462/909] client_ssl_auth: update to use inclusive language (#11761) Also clean up some dead code. Part of https://github.com/envoyproxy/envoy/issues/11596 Signed-off-by: Matt Klein --- .../client_ssl_auth/v2/client_ssl_auth.proto | 2 +- .../client_ssl_auth/v3/client_ssl_auth.proto | 6 +- .../client_ssl_auth_filter.rst | 2 +- docs/root/version_history/current.rst | 6 +- .../client_ssl_auth/v2/client_ssl_auth.proto | 2 +- .../client_ssl_auth/v3/client_ssl_auth.proto | 6 +- source/common/network/BUILD | 1 - source/common/network/cidr_range.cc | 16 -- source/common/network/cidr_range.h | 5 +- .../client_ssl_auth/client_ssl_auth.cc | 6 +- .../network/client_ssl_auth/client_ssl_auth.h | 8 +- test/common/network/BUILD | 1 - test/common/network/cidr_range_test.cc | 244 ++++++------------ test/common/stats/tag_extractor_impl_test.cc | 4 +- .../client_ssl_auth/client_ssl_auth_test.cc | 2 +- .../filters/network/ratelimit/config_test.cc | 4 +- 16 files changed, 106 insertions(+), 209 deletions(-) diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index d1f459078f20..4da6d97ca299 100644 --- a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -41,6 +41,6 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. + // IP allowlist. repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index e2da157574f8..29cd04939b8a 100644 --- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -42,6 +43,7 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. - repeated config.core.v3.CidrRange ip_white_list = 4; + // IP allowlist. + repeated config.core.v3.CidrRange ip_white_list = 4 + [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; } diff --git a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst index c415e7b118c3..d2243f21cc44 100644 --- a/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst +++ b/docs/root/configuration/listeners/network_filters/client_ssl_auth_filter.rst @@ -22,7 +22,7 @@ Every configured client TLS authentication filter has statistics rooted at update_success, Counter, Total principal update successes update_failure, Counter, Total principal update failures auth_no_ssl, Counter, Total connections ignored due to no TLS - auth_ip_white_list, Counter, Total connections allowed due to the IP white list + auth_ip_allowlist, Counter, Total connections allowed due to the IP allowlist auth_digest_match, Counter, Total connections allowed due to certificate match auth_digest_no_match, Counter, Total connections denied due to no certificate match total_principals, Gauge, Total loaded principals diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e87797afffaa..7a643f86e5b6 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -7,6 +7,8 @@ Incompatible Behavior Changes *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* client_ssl_auth: the `auth_ip_white_list` stat has been renamed to + :ref:`auth_ip_allowlist `. Minor Behavior Changes ---------------------- @@ -40,8 +42,8 @@ Bug Fixes * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * prometheus stats: fix the sort order of output lines to comply with the standard. -* udp: the :ref:`reuse_port ` listener option must now be - specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a +* udp: the :ref:`reuse_port ` listener option must now be + specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a bug fix. * upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto index d1f459078f20..4da6d97ca299 100644 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -41,6 +41,6 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. + // IP allowlist. repeated api.v2.core.CidrRange ip_white_list = 4; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index e2da157574f8..29cd04939b8a 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -42,6 +43,7 @@ message ClientSSLAuth { // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no - // IP white list. - repeated config.core.v3.CidrRange ip_white_list = 4; + // IP allowlist. + repeated config.core.v3.CidrRange ip_white_list = 4 + [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; } diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 145044e99c8c..8359c50165c4 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -48,7 +48,6 @@ envoy_cc_library( deps = [ ":address_lib", ":utility_lib", - "//include/envoy/json:json_object_interface", "//include/envoy/network:address_interface", "//source/common/common:assert_lib", "//source/common/common:utility_lib", diff --git a/source/common/network/cidr_range.cc b/source/common/network/cidr_range.cc index 57292e0f6867..277125e17c4a 100644 --- a/source/common/network/cidr_range.cc +++ b/source/common/network/cidr_range.cc @@ -190,18 +190,6 @@ InstanceConstSharedPtr CidrRange::truncateIpAddressAndLength(InstanceConstShared NOT_REACHED_GCOVR_EXCL_LINE; } -IpList::IpList(const std::vector& subnets) { - for (const std::string& entry : subnets) { - CidrRange list_entry = CidrRange::create(entry); - if (list_entry.isValid()) { - ip_list_.push_back(list_entry); - } else { - throw EnvoyException( - fmt::format("invalid ip/mask combo '{}' (format is /<# mask bits>)", entry)); - } - } -} - IpList::IpList(const Protobuf::RepeatedPtrField& cidrs) { for (const envoy::config::core::v3::CidrRange& entry : cidrs) { CidrRange list_entry = CidrRange::create(entry); @@ -224,10 +212,6 @@ bool IpList::contains(const Instance& address) const { return false; } -IpList::IpList(const Json::Object& config, const std::string& member_name) - : IpList(config.hasObject(member_name) ? config.getStringArray(member_name) - : std::vector()) {} - } // namespace Address } // namespace Network } // namespace Envoy diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index 37b894a3908a..a98d0c1ef118 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -4,7 +4,6 @@ #include #include "envoy/config/core/v3/address.pb.h" -#include "envoy/json/json_object.h" #include "envoy/network/address.h" #include "common/protobuf/protobuf.h" @@ -126,9 +125,7 @@ class CidrRange { */ class IpList { public: - IpList(const std::vector& subnets); - IpList(const Json::Object& config, const std::string& member_name); - IpList(const Protobuf::RepeatedPtrField& cidrs); + explicit IpList(const Protobuf::RepeatedPtrField& cidrs); IpList() = default; bool contains(const Instance& address) const; diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 67af3ff10bbf..fcf1bc346fcf 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -29,7 +29,7 @@ ClientSslAuthConfig::ClientSslAuthConfig( cm, config.auth_api_cluster(), dispatcher, random, std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, refresh_delay, 60000)), std::chrono::milliseconds(1000)), - tls_(tls.allocateSlot()), ip_white_list_(config.ip_white_list()), + tls_(tls.allocateSlot()), ip_allowlist_(config.ip_white_list()), stats_(generateStats(scope, config.stat_prefix())) { if (!cm.get(remote_cluster_name_)) { @@ -111,8 +111,8 @@ void ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) { } ASSERT(read_callbacks_->connection().ssl()); - if (config_->ipWhiteList().contains(*read_callbacks_->connection().remoteAddress())) { - config_->stats().auth_ip_white_list_.inc(); + if (config_->ipAllowlist().contains(*read_callbacks_->connection().remoteAddress())) { + config_->stats().auth_ip_allowlist_.inc(); read_callbacks_->continueReading(); return; } diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h index 967a0903e907..f766a28017da 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h @@ -30,7 +30,7 @@ namespace ClientSslAuth { #define ALL_CLIENT_SSL_AUTH_STATS(COUNTER, GAUGE) \ COUNTER(auth_digest_match) \ COUNTER(auth_digest_no_match) \ - COUNTER(auth_ip_white_list) \ + COUNTER(auth_ip_allowlist) \ COUNTER(auth_no_ssl) \ COUNTER(update_failure) \ COUNTER(update_success) \ @@ -70,7 +70,7 @@ using ClientSslAuthConfigSharedPtr = std::shared_ptr; /** * Global configuration for client SSL authentication. The config contacts a JSON API to fetch the * list of allowed principals, caches it, then makes auth decisions on it and any associated IP - * white list. + * allowlist. */ class ClientSslAuthConfig : public Http::RestApiFetcher { public: @@ -80,7 +80,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::RandomGenerator& random); const AllowedPrincipals& allowedPrincipals(); - const Network::Address::IpList& ipWhiteList() { return ip_white_list_; } + const Network::Address::IpList& ipAllowlist() { return ip_allowlist_; } GlobalStats& stats() { return stats_; } private: @@ -98,7 +98,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { void onFetchFailure(Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; ThreadLocal::SlotPtr tls_; - Network::Address::IpList ip_white_list_; + Network::Address::IpList ip_allowlist_; GlobalStats stats_; }; diff --git a/test/common/network/BUILD b/test/common/network/BUILD index dafca5dc1c31..19e63d2a0cc8 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -65,7 +65,6 @@ envoy_cc_test( name = "cidr_range_test", srcs = ["cidr_range_test.cc"], deps = [ - "//source/common/json:json_loader_lib", "//source/common/network:address_lib", "//source/common/network:cidr_range_lib", ], diff --git a/test/common/network/cidr_range_test.cc b/test/common/network/cidr_range_test.cc index 51d9efd75435..5a30bf6cd18e 100644 --- a/test/common/network/cidr_range_test.cc +++ b/test/common/network/cidr_range_test.cc @@ -378,197 +378,109 @@ TEST(Ipv6CidrRange, BigRange) { EXPECT_FALSE(rng.isInRange(Ipv6Instance("2001:0db8:85a4::"))); } -TEST(IpListTest, Errors) { - { - std::string json = R"EOF( - { - "ip_white_list": ["foo"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["foo/bar"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1/33"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); - } - - { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); +Protobuf::RepeatedPtrField +makeCidrRangeList(const std::vector>& ranges) { + Protobuf::RepeatedPtrField ret; + for (auto& range : ranges) { + auto new_element = ret.Add(); + new_element->set_address_prefix(range.first); + new_element->mutable_prefix_len()->set_value(range.second); } + return ret; +} +TEST(IpListTest, Errors) { { - std::string json = R"EOF( - { - "ip_white_list": ["::/129"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - EXPECT_THROW({ IpList wl(*loader, "ip_white_list"); }, EnvoyException); + EXPECT_THROW({ IpList list(makeCidrRangeList({{"foo", 0}})); }, EnvoyException); } } TEST(IpListTest, SpecificAddressAllowed) { - std::string json = R"EOF( - { - "ip_white_list": ["192.168.1.1/24"] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"192.168.1.1", 24}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.1.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.1.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.0.0"))); } TEST(IpListTest, Normal) { - std::string json = R"EOF( - { - "ip_white_list": [ - "192.168.3.0/24", - "50.1.2.3/32", - "10.15.0.0/16" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.2.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.4.0"))); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("50.1.2.3"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("50.1.2.2"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("50.1.2.4"))); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.90.90"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("10.15.255.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("10.14.255.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("10.16.0.0"))); - - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + IpList list(makeCidrRangeList({{"192.168.3.0", 24}, {"50.1.2.3", 32}, {"10.15.0.0", 16}})); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.2.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.4.0"))); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("50.1.2.3"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("50.1.2.2"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("50.1.2.4"))); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.90.90"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("10.15.255.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("10.14.255.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("10.16.0.0"))); + + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, AddressVersionMix) { - std::string json = R"EOF( - { - "ip_white_list": [ - "192.168.3.0/24", - "2001:db8:85a3::/64", - "::1/128" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); - - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.2.255"))); - EXPECT_FALSE(wl.contains(Address::Ipv4Instance("192.168.4.0"))); - - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3:0:1::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::ffff:ffff:ffff:ffff"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::ffff"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("2001:db8:85a3:1::"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("2002:db8:85a3::"))); - - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::"))); - - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + IpList list(makeCidrRangeList({{"192.168.3.0", 24}, {"2001:db8:85a3::", 64}, {"::1", 128}})); + + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.2.255"))); + EXPECT_FALSE(list.contains(Address::Ipv4Instance("192.168.4.0"))); + + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3:0:1::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::ffff:ffff:ffff:ffff"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::ffff"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::1"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("2001:db8:85a3:1::"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("2002:db8:85a3::"))); + + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::"))); + + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, MatchAny) { - std::string json = R"EOF( - { - "ip_white_list": [ - "0.0.0.0/0" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"0.0.0.0", 0}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.0.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("1.1.1.1"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.0.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("1.1.1.1"))); - EXPECT_FALSE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + EXPECT_FALSE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } TEST(IpListTest, MatchAnyAll) { - std::string json = R"EOF( - { - "ip_white_list": [ - "0.0.0.0/0", - "::/0" - ] - } - )EOF"; - - Json::ObjectSharedPtr loader = Json::Factory::loadFromString(json); - IpList wl(*loader, "ip_white_list"); + IpList list(makeCidrRangeList({{"0.0.0.0", 0}, {"::", 0}})); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.3"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.3.255"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.168.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("192.0.0.0"))); - EXPECT_TRUE(wl.contains(Address::Ipv4Instance("1.1.1.1"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.3"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.3.255"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.168.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("192.0.0.0"))); + EXPECT_TRUE(list.contains(Address::Ipv4Instance("1.1.1.1"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::1"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("2001:db8:85a3::"))); - EXPECT_TRUE(wl.contains(Address::Ipv6Instance("ffee::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::1"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("2001:db8:85a3::"))); + EXPECT_TRUE(list.contains(Address::Ipv6Instance("ffee::"))); - EXPECT_FALSE(wl.contains(Address::PipeInstance("foo"))); + EXPECT_FALSE(list.contains(Address::PipeInstance("foo"))); } } // namespace diff --git a/test/common/stats/tag_extractor_impl_test.cc b/test/common/stats/tag_extractor_impl_test.cc index c80fbf7047df..5ca2bb933b73 100644 --- a/test/common/stats/tag_extractor_impl_test.cc +++ b/test/common/stats/tag_extractor_impl_test.cc @@ -306,8 +306,8 @@ TEST(TagExtractorTest, DefaultTagExtractors) { client_ssl.name_ = tag_names.CLIENTSSL_PREFIX; client_ssl.value_ = "clientssl_prefix"; - regex_tester.testRegex("auth.clientssl.clientssl_prefix.auth_ip_white_list", - "auth.clientssl.auth_ip_white_list", {client_ssl}); + regex_tester.testRegex("auth.clientssl.clientssl_prefix.auth_ip_allowlist", + "auth.clientssl.auth_ip_allowlist", {client_ssl}); // TCP Prefix Tag tcp_prefix; diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index 634d780a0170..ee92f3dbdb19 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -215,7 +215,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.update_success").value()); - EXPECT_EQ(2U, stats_store_.counter("auth.clientssl.vpn.auth_ip_white_list").value()); + EXPECT_EQ(2U, stats_store_.counter("auth.clientssl.vpn.auth_ip_allowlist").value()); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.auth_digest_match").value()); EXPECT_EQ(1U, stats_store_.counter("auth.clientssl.vpn.auth_digest_no_match").value()); diff --git a/test/extensions/filters/network/ratelimit/config_test.cc b/test/extensions/filters/network/ratelimit/config_test.cc index 9920c9dbfe1e..d8bbeff1bd7e 100644 --- a/test/extensions/filters/network/ratelimit/config_test.cc +++ b/test/extensions/filters/network/ratelimit/config_test.cc @@ -75,12 +75,12 @@ domain: fake_domain - entries: - key: my_key value: my_value -ip_white_list: '12' +ip_allowlist: '12' )EOF"; envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config; EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException, - "ip_white_list: Cannot find field"); + "ip_allowlist: Cannot find field"); } // Test that the deprecated extension name still functions. From fc8c79ad4196d7b4422d8917b07c673d353328db Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 26 Jun 2020 22:30:48 +0700 Subject: [PATCH 463/909] http, async-client: Add `onBeforeFinalizeUpstreamSpan` (#11730) This allows the receiver to modify the current upstream span before it is finalized by the async client. This also updates ext_authz (HTTP service) and Lua (httpCall() API) to produce traces based on HTTP async client's internal tracing mechanism. Signed-off-by: Dhi Aurrahman --- docs/root/version_history/current.rst | 1 + include/envoy/http/async_client.h | 8 + source/common/config/remote_data_fetcher.h | 2 + source/common/http/async_client_impl.cc | 14 +- source/common/http/rest_api_fetcher.h | 2 + source/common/router/shadow_writer_impl.h | 2 + .../common/ext_authz/ext_authz_http_impl.cc | 62 +++---- .../common/ext_authz/ext_authz_http_impl.h | 8 +- .../filters/http/common/jwks_fetcher.cc | 2 + .../filters/http/ext_authz/config.cc | 2 +- .../extensions/filters/http/lua/lua_filter.cc | 18 +- .../extensions/filters/http/lua/lua_filter.h | 13 +- .../filters/http/squash/squash_filter.h | 1 + .../tracers/datadog/datadog_tracer_impl.h | 1 + .../tracers/lightstep/lightstep_tracer_impl.h | 1 + .../tracers/zipkin/zipkin_tracer_impl.h | 1 + test/common/http/async_client_impl_test.cc | 38 +++- .../ext_authz/ext_authz_http_impl_test.cc | 162 +++--------------- .../filters/http/ext_authz/config_test.cc | 1 - .../filters/http/lua/lua_filter_test.cc | 5 + test/mocks/http/mocks.h | 2 + 21 files changed, 150 insertions(+), 196 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7a643f86e5b6..0166007807d6 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -101,6 +101,7 @@ New Features interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability in :ref:`client_features` field. * lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* lua: added tracing to the ``httpCall()`` API. * metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. diff --git a/include/envoy/http/async_client.h b/include/envoy/http/async_client.h index 65aa72dc8c74..066ccb04e716 100644 --- a/include/envoy/http/async_client.h +++ b/include/envoy/http/async_client.h @@ -70,6 +70,14 @@ class AsyncClient { * @param reason failure reason */ virtual void onFailure(const Request& request, FailureReason reason) PURE; + + /** + * Called before finalizing upstream span when the request is complete or reset. + * @param span a tracing span to fill with extra tags. + * @param response_headers the response headers. + */ + virtual void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span& span, + const Http::ResponseHeaderMap* response_headers) PURE; }; /** diff --git a/source/common/config/remote_data_fetcher.h b/source/common/config/remote_data_fetcher.h index 34a7863ff2f0..6ffe0b052ef0 100644 --- a/source/common/config/remote_data_fetcher.h +++ b/source/common/config/remote_data_fetcher.h @@ -53,6 +53,8 @@ class RemoteDataFetcher : public Logger::Loggable, void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} /** * Fetch data from remote. diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 6a55567bc8db..ce46de45a374 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -240,7 +240,6 @@ AsyncRequestImpl::AsyncRequestImpl(RequestMessagePtr&& request, AsyncClientImpl& AsyncClient::Callbacks& callbacks, const AsyncClient::RequestOptions& options) : AsyncStreamImpl(parent, *this, options), request_(std::move(request)), callbacks_(callbacks) { - if (nullptr != options.parent_span_) { const std::string child_span_name = options.child_span_name_.empty() @@ -266,6 +265,8 @@ void AsyncRequestImpl::initialize() { } void AsyncRequestImpl::onComplete() { + callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_, &response_->headers()); + Tracing::HttpTracerUtility::finalizeUpstreamSpan(*child_span_, &response_->headers(), response_->trailers(), streamInfo(), Tracing::EgressConfig::get()); @@ -293,12 +294,15 @@ void AsyncRequestImpl::onTrailers(ResponseTrailerMapPtr&& trailers) { void AsyncRequestImpl::onReset() { if (!cancelled_) { - // Add tags about reset. - child_span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); + // Set "error reason" tag related to reset. The tagging for "error true" is done inside the + // Tracing::HttpTracerUtility::finalizeUpstreamSpan. child_span_->setTag(Tracing::Tags::get().ErrorReason, "Reset"); } - // Finalize the span based on whether we received a response or not + callbacks_.onBeforeFinalizeUpstreamSpan(*child_span_, + remoteClosed() ? &response_->headers() : nullptr); + + // Finalize the span based on whether we received a response or not. Tracing::HttpTracerUtility::finalizeUpstreamSpan( *child_span_, remoteClosed() ? &response_->headers() : nullptr, remoteClosed() ? response_->trailers() : nullptr, streamInfo(), Tracing::EgressConfig::get()); @@ -312,7 +316,7 @@ void AsyncRequestImpl::onReset() { void AsyncRequestImpl::cancel() { cancelled_ = true; - // Add tags about the cancellation + // Add tags about the cancellation. child_span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True); reset(); diff --git a/source/common/http/rest_api_fetcher.h b/source/common/http/rest_api_fetcher.h index f7dfa76dcde3..7dac6f3ae836 100644 --- a/source/common/http/rest_api_fetcher.h +++ b/source/common/http/rest_api_fetcher.h @@ -65,6 +65,8 @@ class RestApiFetcher : public Http::AsyncClient::Callbacks { void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} Runtime::RandomGenerator& random_; const std::chrono::milliseconds refresh_interval_; diff --git a/source/common/router/shadow_writer_impl.h b/source/common/router/shadow_writer_impl.h index 2224912e8856..c65748e3325e 100644 --- a/source/common/router/shadow_writer_impl.h +++ b/source/common/router/shadow_writer_impl.h @@ -26,6 +26,8 @@ class ShadowWriterImpl : Logger::Loggable, // Http::AsyncClient::Callbacks void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override {} void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override {} + void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, + const Http::ResponseHeaderMap*) override {} private: Upstream::ClusterManager& cm_; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 97ccaa25cbfa..3d5f27165871 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -205,23 +205,15 @@ ClientConfig::toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatch createStringMatchers(list, disable_lowercase_string_matcher)); } -RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, - TimeSource& time_source) - : cm_(cm), config_(config), time_source_(time_source) {} +RawHttpClientImpl::RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config) + : cm_(cm), config_(config) {} -RawHttpClientImpl::~RawHttpClientImpl() { - ASSERT(callbacks_ == nullptr); - ASSERT(span_ == nullptr); -} +RawHttpClientImpl::~RawHttpClientImpl() { ASSERT(callbacks_ == nullptr); } void RawHttpClientImpl::cancel() { ASSERT(callbacks_ != nullptr); - ASSERT(span_ != nullptr); - span_->setTag(Tracing::Tags::get().Status, Tracing::Tags::get().Canceled); - span_->finishSpan(); request_->cancel(); callbacks_ = nullptr; - span_ = nullptr; } // Client @@ -230,11 +222,7 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info) { ASSERT(callbacks_ == nullptr); - ASSERT(span_ == nullptr); callbacks_ = &callbacks; - span_ = parent_span.spawnChild(Tracing::EgressConfig::get(), config_->tracingName(), - time_source_.systemTime()); - span_->setTag(Tracing::Tags::get().UpstreamCluster, config_->cluster()); Http::RequestHeaderMapPtr headers; const uint64_t request_length = request.attributes().request().http().body().size(); @@ -275,51 +263,47 @@ void RawHttpClientImpl::check(RequestCallbacks& callbacks, // It's possible that the cluster specified in the filter configuration no longer exists due to a // CDS removal. if (cm_.get(cluster) == nullptr) { - // TODO(dio): Add stats and tracing related to this. + // TODO(dio): Add stats related to this. ENVOY_LOG(debug, "ext_authz cluster '{}' does not exist", cluster); callbacks_->onComplete(std::make_unique(errorResponse())); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } else { - span_->injectContext(message->headers()); - request_ = cm_.httpAsyncClientForCluster(cluster).send( - std::move(message), *this, - Http::AsyncClient::RequestOptions().setTimeout(config_->timeout())); + auto options = Http::AsyncClient::RequestOptions() + .setTimeout(config_->timeout()) + .setParentSpan(parent_span) + .setChildSpanName(config_->tracingName()); + + request_ = cm_.httpAsyncClientForCluster(cluster).send(std::move(message), *this, options); } } void RawHttpClientImpl::onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) { callbacks_->onComplete(toResponse(std::move(message))); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } void RawHttpClientImpl::onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) { ASSERT(reason == Http::AsyncClient::FailureReason::Reset); callbacks_->onComplete(std::make_unique(errorResponse())); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - span_->finishSpan(); callbacks_ = nullptr; - span_ = nullptr; } -ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { - // Set an error status if parsing status code fails. A Forbidden response is sent to the client - // if the filter has not been configured with failure_mode_allow. - uint64_t status_code{}; - if (!absl::SimpleAtoi(message->headers().getStatusValue(), &status_code)) { - ENVOY_LOG(warn, "ext_authz HTTP client failed to parse the HTTP status code."); - span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True); - return std::make_unique(errorResponse()); +void RawHttpClientImpl::onBeforeFinalizeUpstreamSpan( + Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + if (response_headers != nullptr) { + const uint64_t status_code = Http::Utility::getResponseStatus(*response_headers); + span.setTag(TracingConstants::get().HttpStatus, + Http::CodeUtility::toString(static_cast(status_code))); + span.setTag(TracingConstants::get().TraceStatus, status_code == enumToInt(Http::Code::OK) + ? TracingConstants::get().TraceOk + : TracingConstants::get().TraceUnauthz); } +} - span_->setTag(TracingConstants::get().HttpStatus, - Http::CodeUtility::toString(static_cast(status_code))); +ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { + const uint64_t status_code = Http::Utility::getResponseStatus(message->headers()); // Set an error status if the call to the authorization server returns any of the 5xx HTTP error // codes. A Forbidden response is sent to the client if the filter has not been configured with @@ -334,7 +318,6 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK}}; - span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceOk); return std::move(ok.response_); } @@ -344,7 +327,6 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, message->bodyAsString(), static_cast(status_code)}}; - span_->setTag(TracingConstants::get().TraceStatus, TracingConstants::get().TraceUnauthz); return std::move(denied.response_); } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index 4bcb5741bba5..8f5abd684379 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -154,8 +154,7 @@ class RawHttpClientImpl : public Client, public Http::AsyncClient::Callbacks, Logger::Loggable { public: - explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config, - TimeSource& time_source); + explicit RawHttpClientImpl(Upstream::ClusterManager& cm, ClientConfigSharedPtr config); ~RawHttpClientImpl() override; // ExtAuthz::Client @@ -167,15 +166,16 @@ class RawHttpClientImpl : public Client, void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason reason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span& span, + const Http::ResponseHeaderMap* response_headers) override; private: ResponsePtr toResponse(Http::ResponseMessagePtr message); + Upstream::ClusterManager& cm_; ClientConfigSharedPtr config_; Http::AsyncClient::Request* request_{}; RequestCallbacks* callbacks_{}; - TimeSource& time_source_; - Tracing::SpanPtr span_; }; } // namespace ExtAuthz diff --git a/source/extensions/filters/http/common/jwks_fetcher.cc b/source/extensions/filters/http/common/jwks_fetcher.cc index 3406879727c7..f1711d3ef203 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.cc +++ b/source/extensions/filters/http/common/jwks_fetcher.cc @@ -102,6 +102,8 @@ class JwksFetcherImpl : public JwksFetcher, reset(); } + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} + private: Upstream::ClusterManager& cm_; bool complete_{}; diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index 255329029dfa..f5808ee7fdf5 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -37,7 +37,7 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( callback = [filter_config, client_config, &context](Http::FilterChainFactoryCallbacks& callbacks) { auto client = std::make_unique( - context.clusterManager(), client_config, context.timeSource()); + context.clusterManager(), client_config); callbacks.addStreamDecoderFilter(Http::StreamDecoderFilterSharedPtr{ std::make_shared(filter_config, std::move(client))}); }; diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index d63c109fd647..918fe1bec293 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -101,6 +101,7 @@ void buildHeadersFromTable(Http::HeaderMap& headers, lua_State* state, int table } Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, + Tracing::Span& parent_span, Http::AsyncClient::Callbacks& callbacks) { const std::string cluster = luaL_checkstring(state, 2); luaL_checktype(state, 3, LUA_TTABLE); @@ -135,8 +136,9 @@ Http::AsyncClient::Request* makeHttpCall(lua_State* state, Filter& filter, timeout = std::chrono::milliseconds(timeout_ms); } - return filter.clusterManager().httpAsyncClientForCluster(cluster).send( - std::move(message), callbacks, Http::AsyncClient::RequestOptions().setTimeout(timeout)); + auto options = Http::AsyncClient::RequestOptions().setTimeout(timeout).setParentSpan(parent_span); + return filter.clusterManager().httpAsyncClientForCluster(cluster).send(std::move(message), + callbacks, options); } } // namespace @@ -270,14 +272,14 @@ int StreamHandleWrapper::luaHttpCall(lua_State* state) { } if (lua_toboolean(state, async_flag_index)) { - return luaHttpCallAsynchronous(state); + return doAsynchronousHttpCall(state, callbacks_.activeSpan()); } else { - return luaHttpCallSynchronous(state); + return doSynchronousHttpCall(state, callbacks_.activeSpan()); } } -int StreamHandleWrapper::luaHttpCallSynchronous(lua_State* state) { - http_request_ = makeHttpCall(state, filter_, *this); +int StreamHandleWrapper::doSynchronousHttpCall(lua_State* state, Tracing::Span& span) { + http_request_ = makeHttpCall(state, filter_, span, *this); if (http_request_) { state_ = State::HttpCall; return lua_yield(state, 0); @@ -288,8 +290,8 @@ int StreamHandleWrapper::luaHttpCallSynchronous(lua_State* state) { } } -int StreamHandleWrapper::luaHttpCallAsynchronous(lua_State* state) { - makeHttpCall(state, filter_, noopCallbacks()); +int StreamHandleWrapper::doAsynchronousHttpCall(lua_State* state, Tracing::Span& span) { + makeHttpCall(state, filter_, span, noopCallbacks()); return 0; } diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index 88725c50ef40..ce0e27cf5ec7 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -69,6 +69,11 @@ class FilterCallbacks { * @return const Network::Connection* the current network connection handle. */ virtual const Network::Connection* connection() const PURE; + + /** + * @return const Tracing::Span& the current tracing active span. + */ + virtual Tracing::Span& activeSpan() PURE; }; class Filter; @@ -236,8 +241,8 @@ class StreamHandleWrapper : public Filters::Common::Lua::BaseLuaObject { const ProtobufWkt::Struct& metadata() const override; StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); } const Network::Connection* connection() const override { return callbacks_->connection(); } + Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); } Filter& parent_; Http::StreamDecoderFilterCallbacks* callbacks_{}; @@ -402,6 +410,7 @@ class Filter : public Http::StreamFilter, Logger::Loggable { const ProtobufWkt::Struct& metadata() const override; StreamInfo::StreamInfo& streamInfo() override { return callbacks_->streamInfo(); } const Network::Connection* connection() const override { return callbacks_->connection(); } + Tracing::Span& activeSpan() override { return callbacks_->activeSpan(); } Filter& parent_; Http::StreamEncoderFilterCallbacks* callbacks_{}; diff --git a/source/extensions/filters/http/squash/squash_filter.h b/source/extensions/filters/http/squash/squash_filter.h index f1b8446a132f..d654e7f22088 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/source/extensions/filters/http/squash/squash_filter.h @@ -68,6 +68,7 @@ class AsyncClientCallbackShim : public Http::AsyncClient::Callbacks { void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason f) override { on_fail_(f); } + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: const std::function on_success_; diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.h b/source/extensions/tracers/datadog/datadog_tracer_impl.h index 87c48ffd2eb0..b3dc01d6a7cf 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.h +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.h @@ -109,6 +109,7 @@ class TraceReporter : public Http::AsyncClient::Callbacks, // Http::AsyncClient::Callbacks. void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: /** diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h index 5a67bc8575b8..e99d92b5346e 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.h +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.h @@ -95,6 +95,7 @@ class LightStepDriver : public Common::Ot::OpenTracingDriver { void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason failure_reason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} private: std::unique_ptr active_report_; diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 5968a4464bbf..def08e83e50c 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -201,6 +201,7 @@ class ReporterImpl : Logger::Loggable, // The callbacks below record Zipkin-span-related stats. void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&&) override; void onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) override; + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} /** * Creates a heap-allocated ZipkinReporter. diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 0523bde76810..385fb22dd3bb 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -52,7 +52,8 @@ class AsyncClientImplTest : public testing::Test { .WillByDefault(ReturnRef(envoy::config::core::v3::Locality().default_instance())); } - void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) { + virtual void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) { + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -92,6 +93,22 @@ class AsyncClientImplTracingTest : public AsyncClientImplTest { public: Tracing::MockSpan parent_span_; const std::string child_span_name_{"Test Child Span Name"}; + + void expectSuccess(AsyncClient::Request* sent_request, uint64_t code) override { + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)) + .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + span.setTag("onBeforeFinalizeUpstreamSpan", "called"); + ASSERT_NE(nullptr, response_headers); + })); + EXPECT_CALL(callbacks_, onSuccess_(_, _)) + .WillOnce(Invoke([sent_request, code](const AsyncClient::Request& request, + ResponseMessage* response) -> void { + // Verify that callback is called with the same request handle as returned by + // AsyncClient::send(). + EXPECT_EQ(sent_request, &request); + EXPECT_EQ(code, Utility::getResponseStatus(response->headers())); + })); + } }; TEST_F(AsyncClientImplTest, BasicStream) { @@ -204,6 +221,7 @@ TEST_F(AsyncClientImplTracingTest, Basic) { expectSuccess(request, 200); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -250,6 +268,7 @@ TEST_F(AsyncClientImplTracingTest, BasicNamedChildSpan) { expectSuccess(request, 200); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -520,6 +539,7 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { // Finish request 2. ResponseHeaderMapPtr response_headers2(new TestResponseHeaderMapImpl{{":status", "503"}}); + EXPECT_CALL(callbacks2, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks2, onSuccess_(_, _)) .WillOnce(Invoke( [request2](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -538,6 +558,7 @@ TEST_F(AsyncClientImplTest, MultipleRequests) { // Finish request 3. ResponseHeaderMapPtr response_headers3(new TestResponseHeaderMapImpl{{":status", "500"}}); + EXPECT_CALL(callbacks3, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks3, onSuccess_(_, _)) .WillOnce(Invoke( [request3](const AsyncClient::Request& request, ResponseMessage* response) -> void { @@ -885,6 +906,7 @@ TEST_F(AsyncClientImplTest, ResetAfterResponseStart) { auto* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); EXPECT_NE(request, nullptr); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -927,6 +949,7 @@ TEST_F(AsyncClientImplTest, CancelRequest) { EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message_->headers()), true)); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions()); request->cancel(); @@ -947,8 +970,15 @@ TEST_F(AsyncClientImplTracingTest, CancelRequest) { AsyncClient::RequestOptions options = AsyncClient::RequestOptions().setParentSpan(parent_span_); EXPECT_CALL(*child_span, setSampled(true)); EXPECT_CALL(*child_span, injectContext(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)) + .WillOnce(Invoke([](Tracing::Span& span, const Http::ResponseHeaderMap* response_headers) { + span.setTag("onBeforeFinalizeUpstreamSpan", "called"); + // Since this is a failure, we expect no response headers. + ASSERT_EQ(nullptr, response_headers); + })); AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, options); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -992,6 +1022,7 @@ TEST_F(AsyncClientImplTest, DestroyWithActiveRequest) { EXPECT_NE(request, nullptr); EXPECT_CALL(stream_encoder_.stream_, resetStream(_)); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -1021,6 +1052,7 @@ TEST_F(AsyncClientImplTracingTest, DestroyWithActiveRequest) { auto* request = client_.send(std::move(message_), callbacks_, options); EXPECT_NE(request, nullptr); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onFailure(_, _)) .WillOnce(Invoke([sent_request = request](const AsyncClient::Request& request, AsyncClient::FailureReason reason) { @@ -1051,6 +1083,7 @@ TEST_F(AsyncClientImplTest, PoolFailure) { return nullptr; })); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { // The callback gets called before AsyncClient::send() completes, which means that we don't @@ -1075,6 +1108,7 @@ TEST_F(AsyncClientImplTest, PoolFailureWithBody) { return nullptr; })); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); EXPECT_CALL(callbacks_, onSuccess_(_, _)) .WillOnce(Invoke([](const AsyncClient::Request& request, ResponseMessage* response) -> void { // The callback gets called before AsyncClient::send() completes, which means that we don't @@ -1207,6 +1241,7 @@ TEST_F(AsyncClientImplTracingTest, RequestTimeout) { expectSuccess(request, 504); + EXPECT_CALL(*child_span, setTag(Eq("onBeforeFinalizeUpstreamSpan"), Eq("called"))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().HttpProtocol), Eq("HTTP/1.1"))); @@ -1236,6 +1271,7 @@ TEST_F(AsyncClientImplTest, DisableTimer) { AsyncClient::Request* request = client_.send(std::move(message_), callbacks_, AsyncClient::RequestOptions().setTimeout(std::chrono::milliseconds(200))); + EXPECT_CALL(callbacks_, onBeforeFinalizeUpstreamSpan(_, _)).Times(1); request->cancel(); } diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index fd5bbf6435eb..6728640d458c 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -35,14 +35,11 @@ namespace { class ExtAuthzHttpClientTest : public testing::Test { public: - ExtAuthzHttpClientTest() - : async_request_{&async_client_}, time_source_{async_client_.dispatcher().timeSource()} { - initialize(EMPTY_STRING); - } + ExtAuthzHttpClientTest() : async_request_{&async_client_} { initialize(EMPTY_STRING); } void initialize(const std::string& yaml) { config_ = createConfig(yaml); - client_ = std::make_unique(cm_, config_, time_source_); + client_ = std::make_unique(cm_, config_); ON_CALL(cm_, httpAsyncClientForCluster(config_->cluster())) .WillByDefault(ReturnRef(async_client_)); } @@ -123,7 +120,7 @@ class ExtAuthzHttpClientTest : public testing::Test { const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); client_->onSuccess(async_request_, std::move(check_response)); @@ -135,10 +132,10 @@ class ExtAuthzHttpClientTest : public testing::Test { NiceMock async_client_; NiceMock async_request_; ClientConfigSharedPtr config_; - TimeSource& time_source_; std::unique_ptr client_; MockRequestCallbacks request_callbacks_; - Tracing::MockSpan active_span_; + Tracing::MockSpan parent_span_; + Tracing::MockSpan child_span_; NiceMock stream_info_; }; @@ -282,24 +279,14 @@ TEST_F(ExtAuthzHttpClientTest, AllowedRequestHeadersPrefix) { // Verify client response when authorization server returns a 200 OK. TEST_F(ExtAuthzHttpClientTest, AuthorizationOk) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } @@ -307,7 +294,6 @@ using HeaderValuePair = std::pairmutable_request()->mutable_http()->mutable_headers(); (*mutable_headers)[std::string{":x-authz-header2"}] = std::string{"forged-value"}; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); // Expect that header1 will be added and header2 correctly overwritten. Due to this behavior, the // append property of header value option should always be false. const HeaderValuePair header1{"x-authz-header1", "value"}; const HeaderValuePair header2{"x-authz-header2", "value"}; EXPECT_CALL(async_client_, send_(AllOf(ContainsPairAsHeader(header1), ContainsPairAsHeader(header2)), _, _)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + // Check for child span tagging when the request is allowed. + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("OK"))); + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); + client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } @@ -354,16 +337,10 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf initialize(yaml); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}}); const auto authz_response = TestCommon::makeAuthzResponse(CheckStatus::OK); auto check_response = TestCommon::makeMessageResponse(expected_headers); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - const HeaderValuePair expected_header{"x-authz-header1", "123"}; EXPECT_CALL(async_client_, send_(ContainsPairAsHeader(expected_header), _, _)); @@ -375,19 +352,15 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeadersFromStreamInf EXPECT_CALL(stream_info, getRequestHeaders()).WillOnce(Return(&request_headers)); envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info); + client_->check(request_callbacks_, request, parent_span_, stream_info); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } // Verify client response headers when allow_upstream_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const std::string empty_body{}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"x-baz", "foo", false}, {"bar", "foo", false}}); @@ -397,11 +370,7 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzOkResponse(authz_response)))); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":status", "200", false}, @@ -412,30 +381,25 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAllowHeader) { {"x-baz", "foo", false}, {"foobar", "foo", false}}); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_ok"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("OK"))); - EXPECT_CALL(*child_span, finishSpan()); auto message_response = TestCommon::makeMessageResponse(check_response_headers); client_->onSuccess(async_request_, std::move(message_response)); } // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Forbidden, EMPTY_STRING, expected_headers); + auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); - - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); - EXPECT_CALL(*child_span, finishSpan()); + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + // Check for child span tagging when the request is denied. + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_http_status"), Eq("Forbidden"))); + EXPECT_CALL(child_span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + client_->onBeforeFinalizeUpstreamSpan(child_span_, &check_response->headers()); + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); client_->onSuccess(async_request_, TestCommon::makeMessageResponse(expected_headers)); @@ -443,24 +407,15 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { // Verify client response headers and body when the authorization server denies the request. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto expected_headers = TestCommon::makeHeaderValueOption( {{":status", "401", false}, {"foo", "bar", false}, {"x-foobar", "bar", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, expected_headers); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); - EXPECT_CALL(*child_span, finishSpan()); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); client_->onSuccess(async_request_, @@ -470,25 +425,16 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithAllAttributes) { // Verify client response headers when the authorization server denies the request and // allowed_client_headers is configured. TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; const auto expected_body = std::string{"test"}; const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::Denied, Http::Code::Unauthorized, expected_body, TestCommon::makeHeaderValueOption( {{"x-foo", "bar", false}, {":status", "401", false}, {"foo", "bar", false}})); - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - envoy::service::auth::v3::CheckRequest request; - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Unauthorized"))); - EXPECT_CALL(*child_span, finishSpan()); const auto check_response_headers = TestCommon::makeHeaderValueOption({{":method", "post", false}, {"x-foo", "bar", false}, {":status", "401", false}, @@ -499,20 +445,12 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedAndAllowedClientHeaders) { // Test the client when an unknown error occurs. TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); client_->onFailure(async_request_, Http::AsyncClient::FailureReason::Reset); } @@ -520,79 +458,35 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestError) { TEST_F(ExtAuthzHttpClientTest, AuthorizationRequest5xxError) { Http::ResponseMessagePtr check_response(new Http::ResponseMessageImpl( Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "503"}}})); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); - - EXPECT_CALL(request_callbacks_, - onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq("ext_authz_http_status"), Eq("Service Unavailable"))); - EXPECT_CALL(*child_span, finishSpan()); - client_->onSuccess(async_request_, std::move(check_response)); -} - -// Test the client when a call to authorization server returns a status code that cannot be -// parsed. -TEST_F(ExtAuthzHttpClientTest, AuthorizationRequestErrorParsingStatusCode) { - Http::ResponseMessagePtr check_response(new Http::ResponseMessageImpl( - Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl{{":status", "foo"}}})); - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - envoy::service::auth::v3::CheckRequest request; - - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); - - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); client_->onSuccess(async_request_, std::move(check_response)); } // Test the client when the request is canceled. TEST_F(ExtAuthzHttpClientTest, CancelledAuthorizationRequest) { - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; envoy::service::auth::v3::CheckRequest request; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); - EXPECT_CALL(*child_span, injectContext(_)); EXPECT_CALL(async_client_, send_(_, _, _)).WillOnce(Return(&async_request_)); - client_->check(request_callbacks_, request, active_span_, stream_info_); + client_->check(request_callbacks_, request, parent_span_, stream_info_); EXPECT_CALL(async_request_, cancel()); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().Status), Eq(Tracing::Tags::get().Canceled))); - EXPECT_CALL(*child_span, finishSpan()); client_->cancel(); } // Test the client when the configured cluster is missing/removed. TEST_F(ExtAuthzHttpClientTest, NoCluster) { InSequence s; - Tracing::MockSpan* child_span{new Tracing::MockSpan()}; - EXPECT_CALL(active_span_, spawnChild_(_, config_->tracingName(), _)).WillOnce(Return(child_span)); - EXPECT_CALL(*child_span, - setTag(Eq(Tracing::Tags::get().UpstreamCluster), Eq(config_->cluster()))); EXPECT_CALL(cm_, get(Eq("ext_authz"))).WillOnce(Return(nullptr)); EXPECT_CALL(cm_, httpAsyncClientForCluster("ext_authz")).Times(0); EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo(AuthzErrorResponse(CheckStatus::Error)))); - EXPECT_CALL(*child_span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); - EXPECT_CALL(*child_span, finishSpan()); - client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, active_span_, + client_->check(request_callbacks_, envoy::service::auth::v3::CheckRequest{}, parent_span_, stream_info_); } diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index aae44a7e8d39..2081bc00bd11 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -108,7 +108,6 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { EXPECT_CALL(context, clusterManager()).Times(1); EXPECT_CALL(context, runtime()).Times(1); EXPECT_CALL(context, scope()).Times(1); - EXPECT_CALL(context, timeSource()).Times(1); Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); testing::StrictMock filter_callback; EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index a2ee0a2f03ca..6cfa049e31d5 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -54,6 +54,7 @@ class LuaHttpFilterTest : public testing::Test { decoder_callbacks_.buffer_->move(data); })); + EXPECT_CALL(decoder_callbacks_, activeSpan()).Times(AtLeast(0)); EXPECT_CALL(decoder_callbacks_, decodingBuffer()).Times(AtLeast(0)); EXPECT_CALL(decoder_callbacks_, route()).Times(AtLeast(0)); @@ -65,6 +66,7 @@ class LuaHttpFilterTest : public testing::Test { } encoder_callbacks_.buffer_->move(data); })); + EXPECT_CALL(encoder_callbacks_, activeSpan()).Times(AtLeast(0)); EXPECT_CALL(encoder_callbacks_, encodingBuffer()).Times(AtLeast(0)); } @@ -103,6 +105,7 @@ class LuaHttpFilterTest : public testing::Test { std::shared_ptr> ssl_; NiceMock connection_; NiceMock stream_info_; + Tracing::MockSpan child_span_; const std::string HEADER_ONLY_SCRIPT{R"EOF( function envoy_on_request(request_handle) @@ -797,6 +800,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("response"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); + callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers()); callbacks->onSuccess(request, std::move(response_message)); } @@ -997,6 +1001,7 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 403"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("no body"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); + callbacks->onBeforeFinalizeUpstreamSpan(child_span_, &response_message->headers()); callbacks->onSuccess(request, std::move(response_message)); Buffer::OwnedImpl data("hello"); diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index e323357974d4..c196501f7c1f 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -351,6 +351,8 @@ class MockAsyncClientCallbacks : public AsyncClient::Callbacks { MOCK_METHOD(void, onSuccess_, (const Http::AsyncClient::Request&, ResponseMessage*)); MOCK_METHOD(void, onFailure, (const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason)); + MOCK_METHOD(void, onBeforeFinalizeUpstreamSpan, + (Envoy::Tracing::Span&, const Http::ResponseHeaderMap*)); }; class MockAsyncClientStreamCallbacks : public AsyncClient::StreamCallbacks { From 270523550e660f7c1410d3541394ff0a097cb672 Mon Sep 17 00:00:00 2001 From: Henry Yang <4411287+HenryYYang@users.noreply.github.com> Date: Fri, 26 Jun 2020 08:31:39 -0700 Subject: [PATCH 464/909] Fix race condition when the redis filter is destroyed. (#11466) Signed-off-by: Henry Yang --- .../clusters/redis/redis_cluster.cc | 8 +++ .../filters/network/common/redis/client.h | 2 + .../filters/network/redis_proxy/config.cc | 12 ++-- .../filters/network/redis_proxy/conn_pool.h | 8 --- .../network/redis_proxy/conn_pool_impl.cc | 68 +++++++++++++------ .../network/redis_proxy/conn_pool_impl.h | 22 +++--- .../clusters/redis/redis_cluster_test.cc | 1 + .../redis_proxy/conn_pool_impl_test.cc | 63 ++++++++++++++++- test/mocks/thread_local/mocks.h | 4 +- 9 files changed, 142 insertions(+), 46 deletions(-) diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index c1bc8ca90da2..98e3763b9808 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -151,6 +151,10 @@ RedisCluster::DnsDiscoveryResolveTarget::~DnsDiscoveryResolveTarget() { if (active_query_) { active_query_->cancel(); } + // Disable timer for mock tests. + if (resolve_timer_) { + resolve_timer_->disableTimer(); + } } void RedisCluster::DnsDiscoveryResolveTarget::startResolveDns() { @@ -228,6 +232,10 @@ RedisCluster::RedisDiscoverySession::~RedisDiscoverySession() { current_request_->cancel(); current_request_ = nullptr; } + // Disable timer for mock tests. + if (resolve_timer_) { + resolve_timer_->disableTimer(); + } while (!client_map_.empty()) { client_map_.begin()->second->client_->close(); diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index 0c8a15cb65fc..147abbcebf24 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -186,6 +186,8 @@ class Config { virtual ReadPolicy readPolicy() const PURE; }; +using ConfigSharedPtr = std::shared_ptr; + /** * A factory for individual redis client connections. */ diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index e52a04ffe065..2d62f511b393 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -75,12 +75,12 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP for (auto& cluster : unique_clusters) { Stats::ScopePtr stats_scope = context.scope().createScope(fmt::format("cluster.{}.redis_cluster", cluster)); - - upstreams.emplace(cluster, std::make_shared( - cluster, context.clusterManager(), - Common::Redis::Client::ClientFactoryImpl::instance_, - context.threadLocal(), proto_config.settings(), context.api(), - std::move(stats_scope), redis_command_stats, refresh_manager)); + auto conn_pool_ptr = std::make_shared( + cluster, context.clusterManager(), Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings(), context.api(), std::move(stats_scope), + redis_command_stats, refresh_manager); + conn_pool_ptr->init(); + upstreams.emplace(cluster, conn_pool_ptr); } auto router = diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index 0fa1e68bec96..385fac2eb527 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -62,14 +62,6 @@ class Instance { */ virtual Common::Redis::Client::PoolRequest* makeRequest(const std::string& hash_key, RespVariant&& request, PoolCallbacks& callbacks) PURE; - - /** - * Notify the redirection manager singleton that a redirection error has been received from an - * upstream server associated with the pool's associated cluster. - * @return bool true if a cluster's registered callback with the redirection manager is scheduled - * to be called from the main thread dispatcher, false otherwise. - */ - virtual bool onRedirection() PURE; }; using InstanceSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index b446901c6072..8e9ac6f186a6 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -44,21 +44,36 @@ InstanceImpl::InstanceImpl( const Common::Redis::RedisCommandStatsSharedPtr& redis_command_stats, Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager) : cluster_name_(cluster_name), cm_(cm), client_factory_(client_factory), - tls_(tls.allocateSlot()), config_(config), api_(api), stats_scope_(std::move(stats_scope)), + tls_(tls.allocateSlot()), config_(new Common::Redis::Client::ConfigImpl(config)), api_(api), + stats_scope_(std::move(stats_scope)), redis_command_stats_(redis_command_stats), redis_cluster_stats_{REDIS_CLUSTER_STATS( POOL_COUNTER(*stats_scope_))}, - refresh_manager_(std::move(refresh_manager)) { - tls_->set([this, cluster_name]( - Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { - return std::make_shared(*this, dispatcher, cluster_name); - }); + refresh_manager_(std::move(refresh_manager)) {} + +void InstanceImpl::init() { + // Note: `this` and `cluster_name` have a a lifetime of the filter. + // That may be shorter than the tls callback if the listener is torn down shortly after it is + // created. We use a weak pointer to make sure this object outlives the tls callbacks. + std::weak_ptr this_weak_ptr = this->shared_from_this(); + tls_->set( + [this_weak_ptr](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + if (auto this_shared_ptr = this_weak_ptr.lock()) { + return std::make_shared(this_shared_ptr, dispatcher, + this_shared_ptr->cluster_name_); + } + return nullptr; + }); } +// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped +// failing due to InstanceImpl going away. Common::Redis::Client::PoolRequest* InstanceImpl::makeRequest(const std::string& key, RespVariant&& request, PoolCallbacks& callbacks) { return tls_->getTyped().makeRequest(key, std::move(request), callbacks); } +// This method is always called from a InstanceSharedPtr we don't have to worry about tls_->getTyped +// failing due to InstanceImpl going away. Common::Redis::Client::PoolRequest* InstanceImpl::makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, @@ -66,16 +81,20 @@ InstanceImpl::makeRequestToHost(const std::string& host_address, return tls_->getTyped().makeRequestToHost(host_address, request, callbacks); } -InstanceImpl::ThreadLocalPool::ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, +InstanceImpl::ThreadLocalPool::ThreadLocalPool(std::shared_ptr parent, + Event::Dispatcher& dispatcher, std::string cluster_name) : parent_(parent), dispatcher_(dispatcher), cluster_name_(std::move(cluster_name)), drain_timer_(dispatcher.createTimer([this]() -> void { drainClients(); })), - is_redis_cluster_(false) { - cluster_update_handle_ = parent_.cm_.addThreadLocalClusterUpdateCallbacks(*this); - Upstream::ThreadLocalCluster* cluster = parent_.cm_.get(cluster_name_); + is_redis_cluster_(false), client_factory_(parent->client_factory_), config_(parent->config_), + stats_scope_(parent->stats_scope_), redis_command_stats_(parent->redis_command_stats_), + redis_cluster_stats_(parent->redis_cluster_stats_), + refresh_manager_(parent->refresh_manager_) { + cluster_update_handle_ = parent->cm_.addThreadLocalClusterUpdateCallbacks(*this); + Upstream::ThreadLocalCluster* cluster = parent->cm_.get(cluster_name_); if (cluster != nullptr) { - auth_username_ = ProtocolOptionsConfigImpl::authUsername(cluster->info(), parent_.api_); - auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent_.api_); + auth_username_ = ProtocolOptionsConfigImpl::authUsername(cluster->info(), parent->api_); + auth_password_ = ProtocolOptionsConfigImpl::authPassword(cluster->info(), parent->api_); onClusterAddOrUpdateNonVirtual(*cluster); } } @@ -100,6 +119,11 @@ void InstanceImpl::ThreadLocalPool::onClusterAddOrUpdateNonVirtual( if (cluster.info()->name() != cluster_name_) { return; } + // Ensure the filter is not deleted in the main thread during this method. + auto shared_parent = parent_.lock(); + if (!shared_parent) { + return; + } if (cluster_ != nullptr) { // Treat an update as a removal followed by an add. @@ -215,9 +239,9 @@ InstanceImpl::ThreadLocalPool::threadLocalActiveClient(Upstream::HostConstShared if (!client) { client = std::make_unique(*this); client->host_ = host; - client->redis_client_ = parent_.client_factory_.create( - host, dispatcher_, parent_.config_, parent_.redis_command_stats_, *parent_.stats_scope_, - auth_username_, auth_password_); + client->redis_client_ = + client_factory_.create(host, dispatcher_, *config_, redis_command_stats_, *(stats_scope_), + auth_username_, auth_password_); client->redis_client_->addConnectionCallbacks(*client); } return client; @@ -232,9 +256,9 @@ InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, RespVariant&& return nullptr; } - Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, parent_.config_.enableHashtagging(), + Clusters::Redis::RedisLoadBalancerContextImpl lb_context(key, config_->enableHashtagging(), is_redis_cluster_, getRequest(request), - parent_.config_.readPolicy()); + config_->readPolicy()); Upstream::HostConstSharedPtr host = cluster_->loadBalancer().chooseHost(&lb_context); if (!host) { return nullptr; @@ -290,9 +314,9 @@ Common::Redis::Client::PoolRequest* InstanceImpl::ThreadLocalPool::makeRequestTo auto it = host_address_map_.find(host_address_map_key); if (it == host_address_map_.end()) { // This host is not known to the cluster manager. Create a new host and insert it into the map. - if (created_via_redirect_hosts_.size() == parent_.config_.maxUpstreamUnknownConnections()) { + if (created_via_redirect_hosts_.size() == config_->maxUpstreamUnknownConnections()) { // Too many upstream connections to unknown hosts have been created. - parent_.redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc(); + redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc(); return nullptr; } if (!ipv6) { @@ -344,7 +368,7 @@ void InstanceImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent eve it++) { if ((*it).get() == this) { if (!redis_client_->active()) { - parent_.parent_.redis_cluster_stats_.upstream_cx_drained_.inc(); + parent_.redis_cluster_stats_.upstream_cx_drained_.inc(); } parent_.dispatcher_.deferredDelete(std::move(redis_client_)); parent_.clients_to_drain_.erase(it); @@ -380,7 +404,7 @@ void InstanceImpl::PendingRequest::onResponse(Common::Redis::RespValuePtr&& resp void InstanceImpl::PendingRequest::onFailure() { request_handler_ = nullptr; pool_callbacks_.onFailure(); - parent_.parent_.onFailure(); + parent_.refresh_manager_->onFailure(parent_.cluster_name_); parent_.onRequestCompleted(); } @@ -403,7 +427,7 @@ bool InstanceImpl::PendingRequest::onRedirection(Common::Redis::RespValuePtr&& v onResponse(std::move(value)); return false; } else { - parent_.parent_.onRedirection(); + parent_.refresh_manager_->onRedirection(parent_.cluster_name_); return true; } } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index 6fa31f2ca0ea..aaa25c238510 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -24,6 +24,7 @@ #include "source/extensions/clusters/redis/redis_cluster_lb.h" #include "extensions/common/redis/cluster_refresh_manager.h" +#include "extensions/filters/network/common/redis/client.h" #include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/common/redis/utility.h" @@ -52,7 +53,7 @@ class DoNothingPoolCallbacks : public PoolCallbacks { void onFailure() override{}; }; -class InstanceImpl : public Instance { +class InstanceImpl : public Instance, public std::enable_shared_from_this { public: InstanceImpl( const std::string& cluster_name, Upstream::ClusterManager& cm, @@ -79,9 +80,7 @@ class InstanceImpl : public Instance { makeRequestToHost(const std::string& host_address, const Common::Redis::RespValue& request, Common::Redis::Client::ClientCallbacks& callbacks); - bool onRedirection() override { return refresh_manager_->onRedirection(cluster_name_); } - bool onFailure() { return refresh_manager_->onFailure(cluster_name_); } - bool onHostDegraded() { return refresh_manager_->onHostDegraded(cluster_name_); } + void init(); // Allow the unit test to have access to private members. friend class RedisConnPoolImplTest; @@ -127,7 +126,8 @@ class InstanceImpl : public Instance { struct ThreadLocalPool : public ThreadLocal::ThreadLocalObject, public Upstream::ClusterUpdateCallbacks { - ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name); + ThreadLocalPool(std::shared_ptr parent, Event::Dispatcher& dispatcher, + std::string cluster_name); ~ThreadLocalPool() override; ThreadLocalActiveClientPtr& threadLocalActiveClient(Upstream::HostConstSharedPtr host); Common::Redis::Client::PoolRequest* makeRequest(const std::string& key, RespVariant&& request, @@ -149,7 +149,7 @@ class InstanceImpl : public Instance { void onRequestCompleted(); - InstanceImpl& parent_; + std::weak_ptr parent_; Event::Dispatcher& dispatcher_; const std::string cluster_name_; Upstream::ClusterUpdateCallbacksHandlePtr cluster_update_handle_; @@ -171,15 +171,21 @@ class InstanceImpl : public Instance { */ Event::TimerPtr drain_timer_; bool is_redis_cluster_; + Common::Redis::Client::ClientFactory& client_factory_; + Common::Redis::Client::ConfigSharedPtr config_; + Stats::ScopeSharedPtr stats_scope_; + Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; + RedisClusterStats redis_cluster_stats_; + const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; }; const std::string cluster_name_; Upstream::ClusterManager& cm_; Common::Redis::Client::ClientFactory& client_factory_; ThreadLocal::SlotPtr tls_; - Common::Redis::Client::ConfigImpl config_; + Common::Redis::Client::ConfigSharedPtr config_; Api::Api& api_; - Stats::ScopePtr stats_scope_; + Stats::ScopeSharedPtr stats_scope_; Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; RedisClusterStats redis_cluster_stats_; const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 5936773382ad..c41793d816c2 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -160,6 +160,7 @@ class RedisClusterTest : public testing::Test, void expectRedisSessionCreated() { resolve_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*resolve_timer_, disableTimer()); ON_CALL(random_, random()).WillByDefault(Return(0)); } diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index f3350db7cd5f..ed83f593dfe6 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -78,11 +78,12 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client std::make_shared>(); auto redis_command_stats = Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable()); - std::unique_ptr conn_pool_impl = std::make_unique( + std::shared_ptr conn_pool_impl = std::make_shared( cluster_name_, cm_, *this, tls_, Common::Redis::Client::createConnPoolSettings(20, hashtagging, true, max_unknown_conns, read_policy_), api_, std::move(store), redis_command_stats, cluster_refresh_manager_); + conn_pool_impl->init(); // Set the authentication password for this connection pool. conn_pool_impl->tls_->getTyped().auth_username_ = auth_username_; conn_pool_impl->tls_->getTyped().auth_password_ = auth_password_; @@ -176,6 +177,11 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client return conn_pool_impl->tls_->getTyped().clients_to_drain_; } + InstanceImpl::ThreadLocalPool& threadLocalPool() { + InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); + return conn_pool_impl->tls_->getTyped(); + } + Event::TimerPtr& drainTimer() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().drain_timer_; @@ -1156,6 +1162,61 @@ TEST_F(RedisConnPoolImplTest, AskRedirectionFailure) { tls_.shutdownThread(); } +TEST_F(RedisConnPoolImplTest, MakeRequestAndRedirectFollowedByDelete) { + tls_.defer_delete = true; + std::unique_ptr> store = + std::make_unique>(); + cluster_refresh_manager_ = + std::make_shared>(); + auto redis_command_stats = + Common::Redis::RedisCommandStats::createRedisCommandStats(store->symbolTable()); + conn_pool_ = std::make_shared( + cluster_name_, cm_, *this, tls_, + Common::Redis::Client::createConnPoolSettings(20, true, true, 100, read_policy_), api_, + std::move(store), redis_command_stats, cluster_refresh_manager_); + conn_pool_->init(); + + auto& local_pool = threadLocalPool(); + conn_pool_.reset(); + + // Request + Common::Redis::Client::MockClient* client = new NiceMock(); + Common::Redis::RespValueSharedPtr value = std::make_shared(); + Common::Redis::Client::MockPoolRequest active_request; + MockPoolCallbacks callbacks; + EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) + .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->metadataMatchCriteria(), nullptr); + EXPECT_EQ(context->downstreamConnection(), nullptr); + return this->cm_.thread_local_cluster_.lb_.host_; + })); + EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); + EXPECT_CALL(*cm_.thread_local_cluster_.lb_.host_, address()) + .WillRepeatedly(Return(this->test_address_)); + EXPECT_CALL(*client, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request)); + EXPECT_NE(nullptr, local_pool.makeRequest("hash_key", value, callbacks)); + + // Move redirection. + Common::Redis::Client::MockPoolRequest active_request2; + Common::Redis::Client::MockClient* client2 = new NiceMock(); + Upstream::HostConstSharedPtr host1; + Common::Redis::RespValuePtr moved_response{new Common::Redis::RespValue()}; + moved_response->type(Common::Redis::RespType::Error); + moved_response->asString() = "MOVED 1111 10.1.2.3:4000"; + + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client2))); + EXPECT_CALL(*client2, makeRequest_(Ref(*value), _)).WillOnce(Return(&active_request2)); + EXPECT_TRUE(client->client_callbacks_.back()->onRedirection(std::move(moved_response), + "10.1.2.3:4000", false)); + EXPECT_EQ(host1->address()->asString(), "10.1.2.3:4000"); + EXPECT_CALL(callbacks, onResponse_(_)); + client2->client_callbacks_.back()->onResponse(std::make_unique()); + + EXPECT_CALL(*client, close()); + tls_.shutdownThread(); +} + } // namespace ConnPool } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/mocks/thread_local/mocks.h b/test/mocks/thread_local/mocks.h index b2f3f1e13f2f..a4a68cf6881d 100644 --- a/test/mocks/thread_local/mocks.h +++ b/test/mocks/thread_local/mocks.h @@ -51,7 +51,8 @@ class MockInstance : public Instance { ~SlotImpl() override { // Do not actually clear slot data during shutdown. This mimics the production code. - if (!parent_.shutdown_) { + // The defer_delete mimics the recycle() code with Bookkeeper. + if (!parent_.shutdown_ && !parent_.defer_delete) { EXPECT_LT(index_, parent_.data_.size()); parent_.data_[index_].reset(); } @@ -98,6 +99,7 @@ class MockInstance : public Instance { bool defer_data{}; bool shutdown_{}; bool registered_{true}; + bool defer_delete{}; }; } // namespace ThreadLocal From 5f1348bbc6c530b6adf118f41718bc4200fb55e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 26 Jun 2020 13:39:54 -0400 Subject: [PATCH 465/909] header-to-metadata: add support for regex substitutions (#11698) Currently, the header-to-metadata filter supports adding a header's value to a metadata key. This extends this to support performing a regex match & substitution before the value is added as metadata. The use-case we have is extracting parts of a the :path header and using those as metadata for routing decisions via the subset LB. Risk Level: Low Testing: Unit tests. Docs Changes: adding in a bit. Release Notes: added. Signed-off-by: Raul Gutierrez Segales --- .../filters/http/header_to_metadata/v3/BUILD | 1 + .../v3/header_to_metadata.proto | 25 +++- .../http/header_to_metadata/v4alpha/BUILD | 13 ++ .../v4alpha/header_to_metadata.proto | 120 ++++++++++++++++++ .../header_to_metadata_filter.rst | 20 +++ docs/root/version_history/current.rst | 2 + .../filters/http/header_to_metadata/v3/BUILD | 1 + .../v3/header_to_metadata.proto | 25 +++- .../http/header_to_metadata/v4alpha/BUILD | 13 ++ .../v4alpha/header_to_metadata.proto | 120 ++++++++++++++++++ .../header_to_metadata_filter.cc | 76 +++++++---- .../header_to_metadata_filter.h | 37 ++++-- .../http/header_to_metadata/config_test.cc | 48 +++++++ .../header_to_metadata_filter_test.cc | 61 ++++++++- 14 files changed, 512 insertions(+), 50 deletions(-) create mode 100644 api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD create mode 100644 api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD index a8dda77ddfc3..8253ea6dff83 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/header_to_metadata/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 8e7c490f01b6..07fbba4089f7 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "envoy/type/matcher/v3/regex.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -44,7 +47,7 @@ message Config { BASE64 = 1; } - // [#next-free-field: 6] + // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; @@ -57,12 +60,22 @@ message Config { // The value to pair with the given key. // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 + [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. ValueType type = 4; diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD new file mode 100644 index 000000000000..285e2346e0ff --- /dev/null +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto new file mode 100644 index 000000000000..c7df11e3fcb6 --- /dev/null +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -0,0 +1,120 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.header_to_metadata.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; +option java_outer_classname = "HeaderToMetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Header-To-Metadata Filter] +// +// The configuration for transforming headers into metadata. This is useful +// for matching load balancer subsets, logging, etc. +// +// Header to Metadata :ref:`configuration overview `. +// [#extension: envoy.filters.http.header_to_metadata] + +message Config { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config"; + + enum ValueType { + STRING = 0; + + NUMBER = 1; + + // The value is a serialized `protobuf.Value + // `_. + PROTOBUF_VALUE = 2; + } + + // ValueEncode defines the encoding algorithm. + enum ValueEncode { + // The value is not encoded. + NONE = 0; + + // The value is encoded in `Base64 `_. + // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the + // non-ASCII characters in the header. + BASE64 = 1; + } + + // [#next-free-field: 7] + message KeyValuePair { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; + + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof value_type { + // The value to pair with the given key. + // + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. + // + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; + } + + // The value's type — defaults to string. + ValueType type = 4; + + // How is the value encoded, default is NONE (not encoded). + // The value will be decoded accordingly before storing to metadata. + ValueEncode encode = 5; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; + + // The header that triggers this rule — required. + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If the header is present, apply this metadata KeyValuePair. + // + // If the value in the KeyValuePair is non-empty, it'll be used instead + // of the header value. + KeyValuePair on_header_present = 2; + + // If the header is not present, apply this metadata KeyValuePair. + // + // The value in the KeyValuePair must be set, since it'll be used in lieu + // of the missing header value. + KeyValuePair on_header_missing = 3; + + // Whether or not to remove the header after a rule is applied. + // + // This prevents headers from leaking. + bool remove = 4; + } + + // The list of rules to apply to requests. + repeated Rule request_rules = 1; + + // The list of rules to apply to responses. + repeated Rule response_rules = 2; +} diff --git a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst index e482545a481f..f169fbe93989 100644 --- a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst +++ b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst @@ -60,6 +60,26 @@ This would then allow requests with the `x-version` header set to be matched aga endpoints with the corresponding version. Whereas requests with that header missing would be matched with the default endpoints. +If the header's value needs to be transformed before it's added to the request as +dynamic metadata, this filter supports regex matching and substitution: + +.. code-block:: yaml + + http_filters: + - name: envoy.filters.http.header_to_metadata + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config + request_rules: + - header: ":path" + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + Note that this filter also supports per route configuration: .. code-block:: yaml diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0166007807d6..c79658576cb7 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -16,6 +16,7 @@ Minor Behavior Changes * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. * build: run as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. +* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). * health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. @@ -88,6 +89,7 @@ New Features * grpc-json: send a `x-envoy-original-method` header to grpc services. * gzip filter: added option to set zlib's next output buffer size. * hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* header to metadata: added support for regex substitutions on header values. * health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. * http: added :ref:`stripping port from host header ` support. diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD index a8dda77ddfc3..8253ea6dff83 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/header_to_metadata/v2:pkg", + "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 8e7c490f01b6..07fbba4089f7 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; +import "envoy/type/matcher/v3/regex.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -44,7 +47,7 @@ message Config { BASE64 = 1; } - // [#next-free-field: 6] + // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; @@ -57,12 +60,22 @@ message Config { // The value to pair with the given key. // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 + [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. ValueType type = 4; diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD new file mode 100644 index 000000000000..285e2346e0ff --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", + "//envoy/type/matcher/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto new file mode 100644 index 000000000000..c7df11e3fcb6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -0,0 +1,120 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.header_to_metadata.v4alpha; + +import "envoy/type/matcher/v4alpha/regex.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; +option java_outer_classname = "HeaderToMetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Header-To-Metadata Filter] +// +// The configuration for transforming headers into metadata. This is useful +// for matching load balancer subsets, logging, etc. +// +// Header to Metadata :ref:`configuration overview `. +// [#extension: envoy.filters.http.header_to_metadata] + +message Config { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config"; + + enum ValueType { + STRING = 0; + + NUMBER = 1; + + // The value is a serialized `protobuf.Value + // `_. + PROTOBUF_VALUE = 2; + } + + // ValueEncode defines the encoding algorithm. + enum ValueEncode { + // The value is not encoded. + NONE = 0; + + // The value is encoded in `Base64 `_. + // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the + // non-ASCII characters in the header. + BASE64 = 1; + } + + // [#next-free-field: 7] + message KeyValuePair { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; + + // The namespace — if this is empty, the filter's namespace will be used. + string metadata_namespace = 1; + + // The key to use within the namespace. + string key = 2 [(validate.rules).string = {min_bytes: 1}]; + + oneof value_type { + // The value to pair with the given key. + // + // When used for a + // :ref:`on_header_present ` + // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. + // + // When used for a :ref:`on_header_missing ` + // case, a non-empty value must be provided otherwise no metadata is added. + string value = 3; + + // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value + // is used as-is. + // + // This is only used for :ref:`on_header_present `. + // + // Note: if the `value` field is non-empty this field should be empty. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; + } + + // The value's type — defaults to string. + ValueType type = 4; + + // How is the value encoded, default is NONE (not encoded). + // The value will be decoded accordingly before storing to metadata. + ValueEncode encode = 5; + } + + // A Rule defines what metadata to apply when a header is present or missing. + message Rule { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; + + // The header that triggers this rule — required. + string header = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If the header is present, apply this metadata KeyValuePair. + // + // If the value in the KeyValuePair is non-empty, it'll be used instead + // of the header value. + KeyValuePair on_header_present = 2; + + // If the header is not present, apply this metadata KeyValuePair. + // + // The value in the KeyValuePair must be set, since it'll be used in lieu + // of the missing header value. + KeyValuePair on_header_missing = 3; + + // Whether or not to remove the header after a rule is applied. + // + // This prevents headers from leaking. + bool remove = 4; + } + + // The list of rules to apply to requests. + repeated Rule request_rules = 1; + + // The list of rules to apply to responses. + repeated Rule response_rules = 2; +} diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index 92f7728114d8..e96cbb55a53c 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -3,6 +3,7 @@ #include "envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.h" #include "common/common/base64.h" +#include "common/common/regex.h" #include "common/config/well_known_names.h" #include "common/http/utility.h" #include "common/protobuf/protobuf.h" @@ -17,6 +18,14 @@ namespace Extensions { namespace HttpFilters { namespace HeaderToMetadataFilter { +Rule::Rule(const std::string& header, const ProtoRule& rule) : header_(header), rule_(rule) { + if (rule.on_header_present().has_regex_value_rewrite()) { + const auto& rewrite_spec = rule.on_header_present().regex_value_rewrite(); + regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); + regex_rewrite_substitution_ = rewrite_spec.substitution(); + } +} + Config::Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config, const bool per_route) { request_set_ = Config::configToVector(config.request_rules(), request_rules_); @@ -40,8 +49,6 @@ bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, } for (const auto& entry : proto_rules) { - std::pair rule = {Http::LowerCaseString(entry.header()), entry}; - // Rule must have at least one of the `on_header_*` fields set. if (!entry.has_on_header_present() && !entry.has_on_header_missing()) { const auto& error = fmt::format("header to metadata filter: rule for header '{}' has neither " @@ -50,7 +57,18 @@ bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, throw EnvoyException(error); } - vector.push_back(rule); + // Ensure value and regex_value_rewrite are not mixed. + // TODO(rgs1): remove this once we are on v4 and these fields are folded into a oneof. + if (!entry.on_header_present().value().empty() && + entry.on_header_present().has_regex_value_rewrite()) { + throw EnvoyException("Cannot specify both value and regex_value_rewrite"); + } + + if (entry.has_on_header_missing() && entry.on_header_missing().value().empty()) { + throw EnvoyException("Cannot specify on_header_missing rule with an empty value"); + } + + vector.emplace_back(entry.header(), entry); } return true; @@ -94,11 +112,7 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta ValueType type, ValueEncode encode) const { ProtobufWkt::Value val; - if (value.empty()) { - // No value, skip. we could allow this though. - ENVOY_LOG(debug, "no metadata value provided"); - return false; - } + ASSERT(!value.empty()); if (value.size() >= MAX_HEADER_VALUE_LEN) { // Too long, go away. @@ -138,8 +152,7 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta break; } default: - ENVOY_LOG(debug, "unknown value type"); - return false; + NOT_REACHED_GCOVR_EXCL_LINE; } // Have we seen this namespace before? @@ -164,15 +177,27 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, Http::StreamFilterCallbacks& callbacks) { StructMap structs_by_namespace; - for (const auto& rulePair : rules) { - const auto& header = rulePair.first; - const auto& rule = rulePair.second; + for (const auto& rule : rules) { + const auto& header = rule.header(); + const auto& proto_rule = rule.rule(); const Http::HeaderEntry* header_entry = headers.get(header); - if (header_entry != nullptr && rule.has_on_header_present()) { - const auto& keyval = rule.on_header_present(); - absl::string_view value = keyval.value().empty() ? header_entry->value().getStringView() - : absl::string_view(keyval.value()); + if (header_entry != nullptr && proto_rule.has_on_header_present()) { + const auto& keyval = proto_rule.on_header_present(); + absl::string_view value = header_entry->value().getStringView(); + // This is used to hold the rewritten header value, so that it can + // be bound to value without going out of scope. + std::string rewritten_value; + + if (!keyval.value().empty()) { + value = absl::string_view(keyval.value()); + } else { + const auto& matcher = rule.regexRewrite(); + if (matcher != nullptr) { + rewritten_value = matcher->replaceAll(value, rule.regexSubstitution()); + value = rewritten_value; + } + } if (!value.empty()) { const auto& nspace = decideNamespace(keyval.metadata_namespace()); @@ -182,20 +207,17 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, ENVOY_LOG(debug, "value is empty, not adding metadata"); } - if (rule.remove()) { + if (proto_rule.remove()) { headers.remove(header); } - } else if (rule.has_on_header_missing()) { + } else if (proto_rule.has_on_header_missing()) { // Add metadata for the header missing case. - const auto& keyval = rule.on_header_missing(); + const auto& keyval = proto_rule.on_header_missing(); - if (!keyval.value().empty()) { - const auto& nspace = decideNamespace(keyval.metadata_namespace()); - addMetadata(structs_by_namespace, nspace, keyval.key(), keyval.value(), keyval.type(), - keyval.encode()); - } else { - ENVOY_LOG(debug, "value is empty, not adding metadata"); - } + ASSERT(!keyval.value().empty()); + const auto& nspace = decideNamespace(keyval.metadata_namespace()); + addMetadata(structs_by_namespace, nspace, keyval.key(), keyval.value(), keyval.type(), + keyval.encode()); } } diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h index 29614e6704d8..4cc3e117c4ff 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h @@ -8,6 +8,7 @@ #include "envoy/server/filter_config.h" #include "common/common/logger.h" +#include "common/common/matchers.h" #include "absl/strings/string_view.h" @@ -16,10 +17,26 @@ namespace Extensions { namespace HttpFilters { namespace HeaderToMetadataFilter { -using Rule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule; +using ProtoRule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule; using ValueType = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueType; using ValueEncode = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueEncode; -using HeaderToMetadataRules = std::vector>; + +class Rule { +public: + Rule(const std::string& header, const ProtoRule& rule); + const ProtoRule& rule() const { return rule_; } + const Regex::CompiledMatcherPtr& regexRewrite() const { return regex_rewrite_; } + const std::string& regexSubstitution() const { return regex_rewrite_substitution_; } + const Http::LowerCaseString& header() const { return header_; } + +private: + const Http::LowerCaseString header_; + const ProtoRule rule_; + Regex::CompiledMatcherPtr regex_rewrite_{}; + std::string regex_rewrite_substitution_{}; +}; + +using HeaderToMetadataRules = std::vector; // TODO(yangminzhu): Make MAX_HEADER_VALUE_LEN configurable. const uint32_t MAX_HEADER_VALUE_LEN = 8 * 1024; @@ -34,18 +51,13 @@ class Config : public ::Envoy::Router::RouteSpecificFilterConfig, Config(const envoy::extensions::filters::http::header_to_metadata::v3::Config config, bool per_route = false); - HeaderToMetadataRules requestRules() const { return request_rules_; } - HeaderToMetadataRules responseRules() const { return response_rules_; } + const HeaderToMetadataRules& requestRules() const { return request_rules_; } + const HeaderToMetadataRules& responseRules() const { return response_rules_; } bool doResponse() const { return response_set_; } bool doRequest() const { return request_set_; } private: - using ProtobufRepeatedRule = Protobuf::RepeatedPtrField; - - HeaderToMetadataRules request_rules_; - HeaderToMetadataRules response_rules_; - bool response_set_; - bool request_set_; + using ProtobufRepeatedRule = Protobuf::RepeatedPtrField; /** * configToVector is a helper function for converting from configuration (protobuf types) into @@ -60,6 +72,11 @@ class Config : public ::Envoy::Router::RouteSpecificFilterConfig, static bool configToVector(const ProtobufRepeatedRule&, HeaderToMetadataRules&); const std::string& decideNamespace(const std::string& nspace) const; + + HeaderToMetadataRules request_rules_; + HeaderToMetadataRules response_rules_; + bool response_set_; + bool request_set_; }; using ConfigSharedPtr = std::shared_ptr; diff --git a/test/extensions/filters/http/header_to_metadata/config_test.cc b/test/extensions/filters/http/header_to_metadata/config_test.cc index 861e4ee545a7..a9e108371dfb 100644 --- a/test/extensions/filters/http/header_to_metadata/config_test.cc +++ b/test/extensions/filters/http/header_to_metadata/config_test.cc @@ -20,6 +20,18 @@ namespace HeaderToMetadataFilter { using HeaderToMetadataProtoConfig = envoy::extensions::filters::http::header_to_metadata::v3::Config; +void testForbiddenConfig(const std::string& yaml) { + HeaderToMetadataProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + HeaderToMetadataConfig factory; + + EXPECT_THROW(factory.createFilterFactoryFromProto(proto_config, "stats", context), + EnvoyException); +} + +// Tests that an empty header is rejected. TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyHeader) { const std::string yaml = R"EOF( request_rules: @@ -30,6 +42,7 @@ TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyHeader) { EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } +// Tests that empty (metadata) keys are rejected. TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { const std::string yaml = R"EOF( request_rules: @@ -44,6 +57,7 @@ TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } +// Tests that a valid config is properly consumed. TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { const std::string yaml = R"EOF( request_rules: @@ -71,6 +85,7 @@ TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { cb(filter_callbacks); } +// Tests that per route config properly overrides the global config. TEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) { const std::string yaml = R"EOF( request_rules: @@ -99,6 +114,39 @@ TEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) { EXPECT_FALSE(config->doResponse()); } +// Tests that configuration does not allow value and regex_value_rewrite in the same rule. +TEST(HeaderToMetadataFilterConfigTest, ValueAndRegex) { + const std::string yaml = R"EOF( +request_rules: + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: cluster + value: foo + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + )EOF"; + + testForbiddenConfig(yaml); +} + +// Tests that on_header_missing rules don't allow an empty value. +TEST(HeaderToMetadataFilterConfigTest, OnHeaderMissingEmptyValue) { + const std::string yaml = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: "foo" + type: STRING + )EOF"; + + testForbiddenConfig(yaml); +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index 493bdd465dca..906475013f93 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -435,7 +435,7 @@ TEST_F(HeaderToMetadataTest, RejectInvalidRule) { )EOF"; auto expected = "header to metadata filter: rule for header 'x-something' has neither " "`on_header_present` nor `on_header_missing` set"; - EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), Envoy::EnvoyException, expected); + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); } TEST_F(HeaderToMetadataTest, PerRouteEmtpyRules) { @@ -463,6 +463,65 @@ TEST_F(HeaderToMetadataTest, NoEmptyValues) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); } +/** + * Regex substitution on header value. + */ +TEST_F(HeaderToMetadataTest, RegexSubstitution) { + const std::string config = R"EOF( +request_rules: + - header: :path + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" +)EOF"; + initializeFilter(config); + + // Match with additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/cluster-prod-001/x/y"}}; + std::map expected = {{"cluster", "cluster-prod-001"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // Match with no additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/cluster-prod-001"}}; + std::map expected = {{"cluster", "cluster-prod-001"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/foo"}}; + std::map expected = {{"cluster", "/foo"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match with additional path elements. + { + Http::TestRequestHeaderMapImpl headers{{":path", "/foo/bar?x=2"}}; + std::map expected = {{"cluster", "/foo/bar?x=2"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions From 4ae79176f42af30d2fc51c30ff2f53144db61f6c Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 26 Jun 2020 16:33:06 -0400 Subject: [PATCH 466/909] stats: Use dynamic stat-name for locality_zone_stat_name_ in upstream code. (#11768) Signed-off-by: Joshua Marantz --- source/common/upstream/upstream_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 372716d07894..9e46012aaa58 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -149,7 +149,7 @@ class HostDescriptionImpl : virtual public HostDescription, mutable absl::Mutex metadata_mutex_; MetadataConstSharedPtr metadata_ ABSL_GUARDED_BY(metadata_mutex_); const envoy::config::core::v3::Locality locality_; - Stats::StatNameManagedStorage locality_zone_stat_name_; + Stats::StatNameDynamicStorage locality_zone_stat_name_; mutable HostStats stats_; Outlier::DetectorHostMonitorPtr outlier_detector_; HealthCheckHostMonitorPtr health_checker_; From 4b5dfa3d31eebb06ff4da4fffbe0f9c0efcc5633 Mon Sep 17 00:00:00 2001 From: Edward Viaene Date: Fri, 26 Jun 2020 22:50:48 +0200 Subject: [PATCH 467/909] docs: fix http filter decompressor/compressor name in example (#11769) Signed-off-by: Edward Viaene --- docs/root/configuration/http/http_filters/compressor_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index 9b59b15a8d15..acb02e0f44db 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -31,7 +31,7 @@ An example configuration of the filter may look like the following: .. code-block:: yaml http_filters: - - name: compressor + - name: envoy.filters.http.compressor typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor disable_on_etag_header: true From 05aaef6c123bd89cdbac175b77b4b58320a13cb5 Mon Sep 17 00:00:00 2001 From: Kotaro Inoue Date: Sat, 27 Jun 2020 05:53:58 +0900 Subject: [PATCH 468/909] bazel: Fix to avoid effects of any whitespaces in PATH when creating clang.bazelrc (#11763) This fixes some errors due to containing some whitespaces in PATH. I encountered `target names may not start with '/'`, and I found that this comes from some whitespaces contained in $PATH. I confirmed this issue on Ubuntu on WSL1, however, it is a general issue and could occur in other platforms. Commit Message: Fix to avoid effects of any whitespaces in PATH when creating clang.bazelrc Additional Description: This fixes some errors due to containing some whitespaces in PATH. Risk Level: Low Testing: Yes (confirmed that build worked well with generated clang.bazelrc) Signed-off-by: Kotaro Inoue --- bazel/setup_clang.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh index 6b79aaed2484..0ed987b9d4d0 100755 --- a/bazel/setup_clang.sh +++ b/bazel/setup_clang.sh @@ -14,18 +14,18 @@ export PATH="$(${LLVM_PREFIX}/bin/llvm-config --bindir):${PATH}" RT_LIBRARY_PATH="$(dirname $(find $(llvm-config --libdir) -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1))" echo "# Generated file, do not edit. If you want to disable clang, just delete this file. -build:clang --action_env=PATH=${PATH} +build:clang --action_env='PATH=${PATH}' build:clang --action_env=CC=clang build:clang --action_env=CXX=clang++ -build:clang --action_env=LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config -build:clang --repo_env=LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config -build:clang --linkopt=-L$(llvm-config --libdir) -build:clang --linkopt=-Wl,-rpath,$(llvm-config --libdir) +build:clang --action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' +build:clang --repo_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' +build:clang --linkopt='-L$(llvm-config --libdir)' +build:clang --linkopt='-Wl,-rpath,$(llvm-config --libdir)' build:clang-asan --action_env=ENVOY_UBSAN_VPTR=1 build:clang-asan --copt=-fsanitize=vptr,function build:clang-asan --linkopt=-fsanitize=vptr,function -build:clang-asan --linkopt=-L${RT_LIBRARY_PATH} +build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}' build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a " > ${BAZELRC_FILE} From 19f580374cdb19e32ea95ec4f9a99db027c168b8 Mon Sep 17 00:00:00 2001 From: ankatare Date: Sat, 27 Jun 2020 02:36:50 +0530 Subject: [PATCH 469/909] Convert cluster-related v2 API test fragments to v3 (#11031) Commit Message: changes in test files for "Convert v2 API test fragments to v3" Additional Description: file changes in test/common/upstream/ Testing: format test and integration test Progress toward #10843 Signed-off-by: Abhay Narayan Katare --- test/common/upstream/cds_api_impl_test.cc | 14 +- .../upstream/cluster_factory_impl_test.cc | 65 ++-- .../upstream/cluster_manager_impl_test.cc | 279 ++++++++++-------- 3 files changed, 210 insertions(+), 148 deletions(-) diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index fd35bb3ebb2b..61721b797ffd 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -96,7 +96,7 @@ TEST_F(CdsApiImplTest, UpdateVersionOnClusterRemove) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: @@ -268,13 +268,13 @@ TEST_F(CdsApiImplTest, Basic) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster2 type: EDS eds_cluster_config: @@ -295,13 +295,13 @@ version_info: '0' const std::string response2_yaml = R"EOF( version_info: '1' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster3 type: EDS eds_cluster_config: @@ -329,13 +329,13 @@ TEST_F(CdsApiImplTest, FailureInvalidConfig) { const std::string response1_yaml = R"EOF( version_info: '0' resources: -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: eds_config: path: eds path -- "@type": type.googleapis.com/envoy.api.v2.Cluster +- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: cluster1 type: EDS eds_cluster_config: diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index dbeac5c6c75b..467ba917d303 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -79,10 +79,15 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + cluster_name: staticcluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.test_static )EOF"; @@ -117,10 +122,15 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + cluster_name: staticcluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.custom_static typed_config: @@ -157,10 +167,15 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + cluster_name: staticcluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.custom_static typed_config: @@ -196,10 +211,15 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { name: staticcluster connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + cluster_name: staticcluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.bad_cluster_name typed_config: @@ -228,10 +248,15 @@ TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { common_lb_config: consistent_hashing_lb_config: use_hostname_for_hashing: true - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + cluster_name: staticcluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 cluster_type: name: envoy.clusters.test_static )EOF"; diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 9df6b7bfa99f..b997cf90f4a2 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -20,7 +20,7 @@ namespace Envoy { namespace Upstream { namespace { -envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV2Yaml(const std::string& yaml) { +envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml) { envoy::config::bootstrap::v3::Bootstrap bootstrap; TestUtility::loadFromYaml(yaml, bootstrap, true); return bootstrap; @@ -53,13 +53,20 @@ class ClusterManagerImplTest : public testing::Test { connect_timeout: 0.250s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 - - socket_address: - address: "127.0.0.1" - port_value: 11002 + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11002 )EOF"; const std::string merge_window_enabled = R"EOF( common_lb_config: @@ -72,7 +79,7 @@ class ClusterManagerImplTest : public testing::Test { yaml += enable_merge_window ? merge_window_enabled : merge_window_disabled; - const auto& bootstrap = parseBootstrapFromV2Yaml(yaml); + const auto& bootstrap = parseBootstrapFromV3Yaml(yaml); cluster_manager_ = std::make_unique( bootstrap, factory_, factory_.stats_, factory_.tls_, factory_.runtime_, factory_.random_, @@ -136,7 +143,7 @@ envoy::config::bootstrap::v3::Bootstrap defaultConfig() { clusters: [] )EOF"; - return parseBootstrapFromV2Yaml(yaml); + return parseBootstrapFromV3Yaml(yaml); } TEST_F(ClusterManagerImplTest, MultipleProtocolClusterFail) { @@ -150,7 +157,7 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolClusterFail) { http_protocol_options: {} )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: Both HTTP1 and HTTP2 options may only be configured with non-default " "'protocol_selection' values"); } @@ -172,7 +179,7 @@ TEST_F(ClusterManagerImplTest, MultipleHealthCheckFail) { path: "/" )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Multiple health checks not supported"); } @@ -189,7 +196,7 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolCluster) { http_protocol_options: {} protocol_selection: USE_DOWNSTREAM_PROTOCOL )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); checkConfigDump(R"EOF( static_clusters: - cluster: @@ -235,7 +242,7 @@ TEST_F(ClusterManagerImplTest, NoSdsConfig) { type: eds lb_policy: round_robin )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cannot create an EDS cluster without an EDS config"); } @@ -332,16 +339,17 @@ TEST_F(ClusterManagerImplTest, ValidClusterName) { type: static lb_policy: round_robin load_assignment: + cluster_name: foo endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); cluster_manager_->clusters() .find("cluster:name") ->second.get() @@ -394,7 +402,7 @@ TEST_F(ClusterManagerImplTest, PrimaryClusters) { envoy_grpc: cluster_name: static_cluster )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); const auto& primary_clusters = cluster_manager_->primaryClusters(); EXPECT_THAT(primary_clusters, testing::UnorderedElementsAre( "static_cluster", "strict_dns_cluster", "logical_dns_cluster")); @@ -411,7 +419,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy ROUND_ROBIN is not valid for Cluster type ORIGINAL_DST. Only " "'CLUSTER_PROVIDED' or 'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'"); } @@ -426,15 +434,15 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction2) { lb_policy: original_dst_lb load_assignment: endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB is not " "valid for Cluster type STATIC. " "'ORIGINAL_DST_LB' is allowed only with cluster type 'ORIGINAL_DST'"); @@ -483,18 +491,19 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) subset_selectors: - keys: [ "x" ] load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 )EOF"; const std::string& policy_name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()); @@ -511,12 +520,12 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB || GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()))); } else { - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); Upstream::ThreadLocalCluster* tlc = cluster_manager_->get("cluster_1"); @@ -550,7 +559,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) { - keys: [ "x" ] )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB cannot be " "combined with lb_subset_config"); } @@ -570,7 +579,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy CLUSTER_PROVIDED cannot be combined with lb_subset_config"); } @@ -588,6 +597,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) { - keys: [ "x" ] locality_weight_aware: true load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -602,7 +612,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerLocalityAware) { port_value: 8001 )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Locality weight aware subset LB requires that a " "locality_weighted_lb_config be set in cluster_1"); } @@ -618,20 +628,21 @@ TEST_F(ClusterManagerImplTest, RingHashLoadBalancerInitialization) { connect_timeout: 0.250s type: STATIC load_assignment: + cluster_name: redis_cluster endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); } TEST_F(ClusterManagerImplTest, RingHashLoadBalancerV2Initialization) { @@ -642,23 +653,24 @@ TEST_F(ClusterManagerImplTest, RingHashLoadBalancerV2Initialization) { connect_timeout: 0.250s lb_policy: RING_HASH load_assignment: + cluster_name: redis_cluster endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8000 - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 8001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 dns_lookup_family: V4_ONLY ring_hash_lb_config: minimum_ring_size: 125 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); } // Verify EDS clusters have EDS config. @@ -670,7 +682,7 @@ TEST_F(ClusterManagerImplTest, EdsClustersRequireEdsConfig) { type: EDS )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cannot create an EDS cluster without an EDS config"); } @@ -753,13 +765,14 @@ TEST_F(ClusterManagerImplTest, TcpHealthChecker) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 health_checks: - timeout: 1s interval: 1s @@ -777,7 +790,7 @@ TEST_F(ClusterManagerImplTest, TcpHealthChecker) { createClientConnection_( PointeesEq(Network::Utility::resolveUrl("tcp://127.0.0.1:11001")), _, _, _)) .WillOnce(Return(connection)); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -790,13 +803,14 @@ TEST_F(ClusterManagerImplTest, HttpHealthChecker) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 health_checks: - timeout: 1s interval: 1s @@ -811,7 +825,7 @@ TEST_F(ClusterManagerImplTest, HttpHealthChecker) { createClientConnection_( PointeesEq(Network::Utility::resolveUrl("tcp://127.0.0.1:11001")), _, _, _)) .WillOnce(Return(connection)); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -851,16 +865,17 @@ TEST_F(ClusterManagerImplTest, VerifyBufferLimits) { lb_policy: round_robin per_connection_buffer_limit_bytes: 8192 load_assignment: + cluster_name: cluster_1 endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(*connection, setBufferLimits(8192)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) @@ -1155,12 +1170,12 @@ TEST_F(ClusterManagerImplTest, RemoveWarmingCluster) { EXPECT_CALL(*cluster1, initializePhase()).Times(0); EXPECT_CALL(*cluster1, initialize(_)); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1")); + cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version3")); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version1" + - version_info: "version3" cluster: "@type": type.googleapis.com/envoy.api.v2.Cluster name: "fake_cluster" @@ -1198,12 +1213,12 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { EXPECT_CALL(*cluster1, initializePhase()).Times(0); EXPECT_CALL(*cluster1, initialize(_)); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version1")); + cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "version3")); checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version1" + - version_info: "version3" cluster: "@type": type.googleapis.com/envoy.api.v2.Cluster name: "fake_cluster" @@ -1232,11 +1247,11 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { "address": "127.0.0.1", "port_value": 11002 })EOF")), - "version2")); + "version3")); checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); checkConfigDump(R"EOF( dynamic_warming_clusters: - - version_info: "version2" + - version_info: "version3" cluster: "@type": type.googleapis.com/envoy.api.v2.Cluster name: "fake_cluster" @@ -1619,7 +1634,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(connection1)); @@ -1692,7 +1707,7 @@ TEST_F(ClusterManagerImplTest, DoNotCloseTcpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(connection1)); @@ -1721,6 +1736,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { address: 1.2.3.4 port_value: 80 load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -1738,7 +1754,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); // Test for no hosts returning the correct values before we have hosts. @@ -1864,6 +1880,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -1881,7 +1898,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); NiceMock example_com_context; @@ -2116,7 +2133,7 @@ TEST_F(ClusterManagerImplTest, UseTcpInDefaultDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2143,7 +2160,7 @@ TEST_F(ClusterManagerImplTest, UseUdpWithCustomDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2171,7 +2188,7 @@ TEST_F(ClusterManagerImplTest, UseTcpWithCustomDnsResolver) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); factory_.tls_.shutdownThread(); } @@ -2192,6 +2209,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -2209,7 +2227,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); dns_callback(Network::DnsResolver::ResolutionStatus::Success, @@ -2272,6 +2290,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { port_value: 80 lb_policy: ROUND_ROBIN load_assignment: + cluster_name: cluster_1 endpoints: - lb_endpoints: - endpoint: @@ -2289,7 +2308,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { Network::MockActiveDnsQuery active_dns_query; EXPECT_CALL(*dns_resolver, resolve(_, _, _)) .WillRepeatedly(DoAll(SaveArg<2>(&dns_callback), Return(&active_dns_query))); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); EXPECT_FALSE(cluster_manager_->get("cluster_1")->info()->addedViaApi()); dns_callback(Network::DnsResolver::ResolutionStatus::Success, @@ -2331,7 +2350,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstInitialization) { "name": "cluster_1", "connect_timeout": "0.250s", "type": "original_dst", - "lb_policy": "original_dst_lb" + "lb_policy": "cluster_provided" } ] } @@ -2341,7 +2360,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstInitialization) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -2634,6 +2653,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: new_cluster endpoints: - lb_endpoints: - endpoint: @@ -2692,6 +2712,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { type: STATIC lb_policy: ROUND_ROBIN load_assignment: + cluster_name: new_cluster endpoints: - lb_endpoints: - endpoint: @@ -2840,15 +2861,20 @@ TEST_F(ClusterManagerImplTest, AddUpstreamFilters) { connect_timeout: 0.250s lb_policy: ROUND_ROBIN type: STATIC - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 filters: - name: envoy.test.filter )EOF"; - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(*connection, addReadFilter(_)).Times(0); EXPECT_CALL(*connection, addWriteFilter(_)).Times(1); @@ -3069,7 +3095,7 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { // socket_option_impl_test.cc. class SockoptsTest : public ClusterManagerImplTest { public: - void initialize(const std::string& yaml) { create(parseBootstrapFromV2Yaml(yaml)); } + void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); } void TearDown() override { factory_.tls_.shutdownThread(); } @@ -3149,6 +3175,7 @@ TEST_F(SockoptsTest, SockoptsUnset) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3170,6 +3197,7 @@ TEST_F(SockoptsTest, FreebindClusterOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3193,6 +3221,7 @@ TEST_F(SockoptsTest, FreebindClusterManagerOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3217,6 +3246,7 @@ TEST_F(SockoptsTest, FreebindClusterOverride) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3243,6 +3273,7 @@ TEST_F(SockoptsTest, SockoptsClusterOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3271,6 +3302,7 @@ TEST_F(SockoptsTest, SockoptsClusterManagerOnly) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3299,6 +3331,7 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: SockoptsCluster endpoints: - lb_endpoints: - endpoint: @@ -3327,7 +3360,7 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { // tcp_keepalive_option_impl_test.cc. class TcpKeepaliveTest : public ClusterManagerImplTest { public: - void initialize(const std::string& yaml) { create(parseBootstrapFromV2Yaml(yaml)); } + void initialize(const std::string& yaml) { create(parseBootstrapFromV3Yaml(yaml)); } void TearDown() override { factory_.tls_.shutdownThread(); } @@ -3428,6 +3461,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveUnset) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3449,6 +3483,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveCluster) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3472,6 +3507,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveClusterProbes) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3496,6 +3532,7 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveWithAllOptions) { lb_policy: ROUND_ROBIN type: STATIC load_assignment: + cluster_name: TcpKeepaliveCluster endpoints: - lb_endpoints: - endpoint: @@ -3528,7 +3565,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -3646,7 +3683,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { ReadyWatcher initialized; EXPECT_CALL(initialized, ready()); - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); // Set up for an initialize callback. cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -3719,7 +3756,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStatic) { local_cluster_name: new_cluster )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Unexpected non-zero priority for local cluster 'new_cluster'."); } @@ -3744,7 +3781,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStrictDns) { local_cluster_name: new_cluster )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "Unexpected non-zero priority for local cluster 'new_cluster'."); } @@ -3771,7 +3808,7 @@ TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameLogicalDns) { // The priority for LOGICAL_DNS endpoints are written, so we just verify that there is only a // single priority even if the endpoint was configured to be priority 10. - create(parseBootstrapFromV2Yaml(yaml)); + create(parseBootstrapFromV3Yaml(yaml)); const auto cluster = cluster_manager_->get("new_cluster"); EXPECT_EQ(1, cluster->prioritySet().hostSetsPerPriority().size()); } From e95957720cc81e15b65fbe6b865dc1bff1af2d3d Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 26 Jun 2020 17:18:32 -0400 Subject: [PATCH 470/909] [tools] add working codeql build (#11590) Add working CodeQL database build for http core. Risk level: low Testing: Build succeeds. The results show on my fork for some reason https://github.com/asraa/envoy/runs/781027478?check_suite_focus=true Signed-off-by: Asra Ali --- .github/workflows/codeql-daily.yml | 55 ++++++++++++++++++++++++++++ .github/workflows/codeql-push.yml | 57 ++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 .github/workflows/codeql-daily.yml create mode 100644 .github/workflows/codeql-push.yml diff --git a/.github/workflows/codeql-daily.yml b/.github/workflows/codeql-daily.yml new file mode 100644 index 000000000000..d947fa50a5a6 --- /dev/null +++ b/.github/workflows/codeql-daily.yml @@ -0,0 +1,55 @@ +on: + schedule: + - cron: '0 12 * * 4' + +jobs: + CodeQL-Build: + + strategy: + fail-fast: false + + # CodeQL runs on ubuntu-latest and windows-latest + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + # Override language selection by uncommenting this and choosing your languages + with: + languages: cpp + + - name: Install deps + shell: bash + run: | + sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + mkdir -p bin/clang10 + cd bin/clang10 + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz + tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 + export PATH=bin/clang10/bin:$PATH + + - name: Build + run: | + bazel/setup_clang.sh bin/clang10 + bazelisk shutdown + bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/... + + - name: Clean Artifacts + run: | + git clean -xdf + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/codeql-push.yml b/.github/workflows/codeql-push.yml new file mode 100644 index 000000000000..639ef7ce1442 --- /dev/null +++ b/.github/workflows/codeql-push.yml @@ -0,0 +1,57 @@ +on: + push: + paths: + - 'source/common/**' + pull_request: + +jobs: + CodeQL-Build: + + strategy: + fail-fast: false + + # CodeQL runs on ubuntu-latest and windows-latest + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + # Override language selection by uncommenting this and choosing your languages + with: + languages: cpp + + - name: Install deps + shell: bash + run: | + sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 + mkdir -p bin/clang10 + cd bin/clang10 + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz + tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 + export PATH=bin/clang10/bin:$PATH + + - name: Build + run: | + bazel/setup_clang.sh bin/clang10 + bazelisk shutdown + bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/http1:codec_lib //source/common/http/http2:codec_lib + + - name: Clean Artifacts + run: | + git clean -xdf + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 From 84c35a782b56822d0b1a60211af78aa8052d401f Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 26 Jun 2020 19:18:59 -0400 Subject: [PATCH 471/909] stats: in integration test, round up approximate expectations to allow for platform variations. (#11772) Commit Message: Someone had set the bounds too tight for approximate memory checks, making the test brittle to platform variations. This PR adds some slack as well as comments explaining why. Additional Description: Risk Level: low Testing: just the one test Docs Changes: n/a Release Notes: n/a Signed-off-by: Joshua Marantz --- test/integration/stats_integration_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 15d060d6d4a0..dd4fb2bfc722 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -287,7 +287,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_cluster, 44491); - EXPECT_MEMORY_LE(m_per_cluster, 44811); + EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { @@ -350,7 +350,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_cluster, 36603); - EXPECT_MEMORY_LE(m_per_cluster, 36923); + EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { @@ -392,7 +392,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_host, 1380); - EXPECT_MEMORY_LE(m_per_host, 1655); + EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations. } } // namespace From 8614e837cf0e4ba35eea8a45085953274b88fa03 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Sat, 27 Jun 2020 18:15:07 -0400 Subject: [PATCH 472/909] Windows build: Enable RBE build in Windows CI (#11107) Commit Message: Enable RBE build in Windows CI - Bumps envoy-build-tools and bazel-toolchains - Bumps envoy-build-ubuntu image tag - Running Windows CI via docker uses the same mechanism as Linux to find the image tag, the tags are the same for Windows and Linux images - Temporarily adds a patch to fix rules_go for Windows RBE issue executing batch scripts, see https://github.com/envoyproxy/envoy/issues/11657 - Tags/fixes some additional tests that fail in CI Additional Description: N/A Risk Level: Low, but we may observe differences in performance of Windows CI running remotely, we may need to adjust the worker pool accordingly Testing: N/A Docs Changes: N/A Release Notes: N/A Fixes https://github.com/envoyproxy/envoy/issues/10619 Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- .azure-pipelines/pipelines.yml | 2 ++ .bazelrc | 19 +++++++++++- .circleci/config.yml | 2 +- .devcontainer/Dockerfile | 2 +- bazel/repositories.bzl | 7 ++++- bazel/repository_locations.bzl | 16 +++++----- bazel/rules_go.patch | 30 +++++++++++++++++++ ci/run_envoy_docker_windows.sh | 5 ++-- ci/windows_ci_steps.sh | 14 +++------ test/common/event/BUILD | 1 + test/common/network/utility_test.cc | 2 ++ .../filters/http/admission_control/BUILD | 1 + test/extensions/filters/udp/dns_filter/BUILD | 1 + test/integration/BUILD | 2 ++ 14 files changed, 79 insertions(+), 25 deletions(-) create mode 100644 bazel/rules_go.patch diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 599f45425384..582d2e221699 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -168,6 +168,8 @@ jobs: - bash: ci/run_envoy_docker_windows.sh ci/windows_ci_steps.sh displayName: "Run Windows CI" env: + ENVOY_RBE: "true" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) diff --git a/.bazelrc b/.bazelrc index b7bd7b87950c..5f2ce2aa0f4f 100644 --- a/.bazelrc +++ b/.bazelrc @@ -160,6 +160,11 @@ build:rbe-toolchain-gcc --config=rbe-toolchain build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain +build:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform +build:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform +build:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain +build:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain + build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local @@ -168,6 +173,15 @@ build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel +# Windows bazel does not allow sandboxed as a spawn strategy +build:remote-windows --spawn_strategy=remote,local +build:remote-windows --strategy=Javac=remote,local +build:remote-windows --strategy=Closure=remote,local +build:remote-windows --strategy=Genrule=remote,local +build:remote-windows --remote_timeout=7200 +build:remote-windows --auth_enabled=true +build:remote-windows --remote_download_toplevel + build:remote-clang --config=remote build:remote-clang --config=rbe-toolchain-clang @@ -181,9 +195,12 @@ build:remote-msan --config=remote build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan +build:remote-msvc-cl --config=remote-windows +build:remote-msvc-cl --config=rbe-toolchain-msvc-cl + # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 5efd1289cb1f..3bdaab466ba6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ executors: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 + - image: envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e resource_class: xlarge working_directory: /source diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 797edace866a..0646398054d9 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:12b3d2c2ffa582507e5d6dd34632b2b990f1b195 +FROM gcr.io/envoy-ci/envoy-build:f21773ab398a879f976936f72c78c9dd3718ca1e ARG USERNAME=vscode ARG USER_UID=501 diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index baf22ea109dd..d31cbe33d267 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -125,7 +125,12 @@ def _go_deps(skip_targets): # Keep the skip_targets check around until Istio Proxy has stopped using # it to exclude the Go rules. if "io_bazel_rules_go" not in skip_targets: - _repository_impl("io_bazel_rules_go") + _repository_impl( + name = "io_bazel_rules_go", + # TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation + patch_args = ["-p1"], + patches = ["@envoy//bazel:rules_go.patch"], + ) _repository_impl("bazel_gazelle") def envoy_dependencies(skip_targets = []): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 6c6a949a3846..4ea42e9748a3 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -53,11 +53,11 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), bazel_toolchains = dict( - sha256 = "144290c4166bd67e76a54f96cd504ed86416ca3ca82030282760f0823c10be48", - strip_prefix = "bazel-toolchains-3.1.1", + sha256 = "db48eed61552e25d36fe051a65d2a329cc0fb08442627e8f13960c5ab087a44e", + strip_prefix = "bazel-toolchains-3.2.0", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.1.1/bazel-toolchains-3.1.1.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.1.1.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.2.0/bazel-toolchains-3.2.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.2.0.tar.gz", ], use_category = ["build"], ), @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "b0efe70a1d122fffb89570771f4ec3b912aa0a8a0ce56218223918d7737d01e2", - strip_prefix = "envoy-build-tools-3cbc1d66b9e9ead42daf69e01597cacf4fb52151", - # 2020-05-15 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/3cbc1d66b9e9ead42daf69e01597cacf4fb52151.tar.gz"], + sha256 = "c4d27c0a5db918e861b7164d69cdffe920daafbe3f597ffdda5a1d10c1abc992", + strip_prefix = "envoy-build-tools-557ee9b44a3d08cf38d9ce6f08adb872c385d6a5", + # 2020-06-16 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/557ee9b44a3d08cf38d9ce6f08adb872c385d6a5.tar.gz"], use_category = ["build"], ), boringssl = dict( diff --git a/bazel/rules_go.patch b/bazel/rules_go.patch new file mode 100644 index 000000000000..39f99ebb189b --- /dev/null +++ b/bazel/rules_go.patch @@ -0,0 +1,30 @@ +# +# Bazel RBE on Windows GCP workers currently will not invoke cmd.exe batch files correctly +# +# Symptom is program not found 'bazel-out', because of the way that the CreateProcess command +# is constructed by bazel with actions.run with forward slashes, e.g. the command +# cmd.exe /c "bazel-out/host/bin/external/go_sdk/builder.exe.bat" +# where cmd.exe on GCP is treating 'bazel-out' as the target, and /host as a command line switch. +# This problem was not observed on Azure CI pipelines or locally by the developers. The eventual +# fix is not specific to rules_go; this patch simply addresses immediate breakage and can be removed +# once the underlying issue within Bazel/RBE is fixed. +# See: +# - https://github.com/bazelbuild/rules_go/pull/2542 +# - https://github.com/envoyproxy/envoy/issues/11657 +# +diff --git a/go/private/rules/binary.bzl b/go/private/rules/binary.bzl +index b88dfd96..e68b5ece 100644 +--- a/go/private/rules/binary.bzl ++++ b/go/private/rules/binary.bzl +@@ -128,8 +128,9 @@ def _go_tool_binary_impl(ctx): + content = cmd, + ) + ctx.actions.run( +- executable = bat, +- inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go], ++ executable = "cmd.exe", ++ arguments = ["/S", "/C", bat.path.replace("/", "\\")], ++ inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go, bat], + outputs = [cout], + env = {"GOROOT": sdk.root_file.dirname}, # NOTE(#2005): avoid realpath in sandbox + mnemonic = "GoToolchainBinaryCompile", diff --git a/ci/run_envoy_docker_windows.sh b/ci/run_envoy_docker_windows.sh index 6c2cb9e4015f..a1f4e7372b52 100644 --- a/ci/run_envoy_docker_windows.sh +++ b/ci/run_envoy_docker_windows.sh @@ -2,9 +2,8 @@ set -e -# TODO(sunjayBhatia, wrowe): update this note once we have an RBE toolchain generated for Windows -# NOTE: Update this from the latest pushed image here: https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/tags -ENVOY_BUILD_SHA="9b7dc527351b9888805377a05e5975d6ef8d6ae1" +# The image tag for the Windows image is the same as the Linux one so we use the same mechanism to find it +. $(dirname $0)/envoy_build_sha.sh [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" # The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 0ded44c4dd82..62b101d07847 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -26,18 +26,12 @@ fi BAZEL_STARTUP_OPTIONS="--output_base=c:/_eb" BAZEL_BUILD_OPTIONS="-c opt --config=msvc-cl --show_task_finish --verbose_failures \ - --test_output=all ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" - -# With all envoy-static and //test/ tree building, no need to test compile externals -# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //bazel/... --build_tag_filters=-skip_on_windows + --test_output=errors ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //source/exe:envoy-static --build_tag_filters=-skip_on_windows -# TODO(sunjayBhatia, wrowe): We are disabling building/running tests for now as the AZP pipelines -# workers do not provide enough resources for us to produce fast enough or reliable enough builds. -# Test compilation of known MSVC-compatible test sources -# bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows --build_tests_only - # Test invocations of known-working tests on Windows -# bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows --build_tests_only --test_summary=terse --test_output=errors +bazel ${BAZEL_STARTUP_OPTIONS} test ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows --build_tests_only +# Build tests that are failing to ensure no regressions +bazel ${BAZEL_STARTUP_OPTIONS} build ${BAZEL_BUILD_OPTIONS} //test/... --test_tag_filters=-skip_on_windows,fails_on_windows --build_tests_only diff --git a/test/common/event/BUILD b/test/common/event/BUILD index a275f39c1630..1d306cdc3803 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -11,6 +11,7 @@ envoy_package() envoy_cc_test( name = "dispatcher_impl_test", srcs = ["dispatcher_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//source/common/api:api_lib", "//source/common/event:deferred_task", diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index fa4b1b9ad5b4..96f42f40dc97 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -171,7 +171,9 @@ TEST_P(NetworkUtilityGetLocalAddress, GetLocalAddress) { TEST(NetworkUtility, GetOriginalDst) { testing::NiceMock socket; +#ifdef SOL_IP EXPECT_CALL(socket, ipVersion()).WillOnce(testing::Return(absl::nullopt)); +#endif EXPECT_EQ(nullptr, Utility::getOriginalDst(socket)); } diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index 1b9595276119..301e4dd2f884 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -60,6 +60,7 @@ envoy_extension_cc_test( name = "admission_control_integration_test", srcs = ["admission_control_integration_test.cc"], extension_name = "envoy.filters.http.admission_control", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/admission_control:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index ffa82525a947..395367532530 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -28,6 +28,7 @@ envoy_extension_cc_test( name = "dns_filter_test", srcs = ["dns_filter_test.cc"], extension_name = "envoy.filters.udp_listener.dns_filter", + tags = ["fails_on_windows"], deps = [ ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", diff --git a/test/integration/BUILD b/test/integration/BUILD index cd527321b6d4..18070ad0173e 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -237,6 +237,7 @@ envoy_cc_test( srcs = [ "drain_close_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_protocol_integration_lib", "//source/extensions/filters/http/health_check:config", @@ -293,6 +294,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//test/test_common:utility_lib", From 98989082a68bef98cd423b4501816a419b0c10e0 Mon Sep 17 00:00:00 2001 From: antonio Date: Sat, 27 Jun 2020 22:46:36 -0400 Subject: [PATCH 473/909] dispatcher: Delay fd activation until the next itertion of the event loop. (#11750) Processing injected fd events in the same loop they are generated can result in high-throughput connections proxying data multiple times per event loop iteration, effectively starving other connections and increasing small request latency. Signed-off-by: Antonio Vicente --- include/envoy/event/schedulable_cb.h | 7 + source/common/event/BUILD | 1 + source/common/event/file_event_impl.cc | 71 +++++++-- source/common/event/file_event_impl.h | 11 ++ source/common/event/schedulable_cb_impl.cc | 10 ++ source/common/event/schedulable_cb_impl.h | 1 + source/common/runtime/runtime_features.cc | 1 + test/common/event/BUILD | 1 + test/common/event/dispatcher_impl_test.cc | 51 +++++- test/common/event/file_event_impl_test.cc | 175 ++++++++++++++++++++- test/mocks/event/mocks.cc | 1 + test/mocks/event/mocks.h | 1 + 12 files changed, 306 insertions(+), 25 deletions(-) diff --git a/include/envoy/event/schedulable_cb.h b/include/envoy/event/schedulable_cb.h index f73a82b316db..3b7bc3d1bb4a 100644 --- a/include/envoy/event/schedulable_cb.h +++ b/include/envoy/event/schedulable_cb.h @@ -21,6 +21,13 @@ class SchedulableCallback { */ virtual void scheduleCallbackCurrentIteration() PURE; + /** + * Schedule the callback so it runs in the next iteration of the event loop. There are no + * ordering guarantees for callbacks scheduled for the next iteration, not even among + * next-iteration callbacks. + */ + virtual void scheduleCallbackNextIteration() PURE; + /** * Cancel pending execution of the callback. */ diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 5b1025db7490..1a99e72fe7bd 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//source/common/network:connection_lib", "//source/common/network:dns_lib", "//source/common/network:listener_lib", + "//source/common/runtime:runtime_features_lib", ], ) diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index dd306b0d0e82..95335dd36aae 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -4,6 +4,7 @@ #include "common/common/assert.h" #include "common/event/dispatcher_impl.h" +#include "common/runtime/runtime_features.h" #include "event2/event.h" @@ -12,31 +13,59 @@ namespace Event { FileEventImpl::FileEventImpl(DispatcherImpl& dispatcher, os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) - : cb_(cb), fd_(fd), trigger_(trigger) { + : cb_(cb), fd_(fd), trigger_(trigger), + activate_fd_events_next_event_loop_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_fds_next_event_loop")) { #ifdef WIN32 RELEASE_ASSERT(trigger_ == FileTriggerType::Level, "libevent does not support edge triggers on Windows"); #endif assignEvents(events, &dispatcher.base()); event_add(&raw_event_, nullptr); + if (activate_fd_events_next_event_loop_) { + activation_cb_ = dispatcher.createSchedulableCallback([this]() { + ASSERT(injected_activation_events_ != 0); + mergeInjectedEventsAndRunCb(0); + }); + } } void FileEventImpl::activate(uint32_t events) { - int libevent_events = 0; - if (events & FileReadyType::Read) { - libevent_events |= EV_READ; - } + // events is not empty. + ASSERT(events != 0); + // Only supported event types are set. + ASSERT((events & (FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed)) == events); + + if (!activate_fd_events_next_event_loop_) { + // Legacy implementation + int libevent_events = 0; + if (events & FileReadyType::Read) { + libevent_events |= EV_READ; + } + + if (events & FileReadyType::Write) { + libevent_events |= EV_WRITE; + } + + if (events & FileReadyType::Closed) { + libevent_events |= EV_CLOSED; + } - if (events & FileReadyType::Write) { - libevent_events |= EV_WRITE; + ASSERT(libevent_events); + event_active(&raw_event_, libevent_events, 0); + return; } - if (events & FileReadyType::Closed) { - libevent_events |= EV_CLOSED; + // Schedule the activation callback so it runs as part of the next loop iteration if it is not + // already scheduled. + if (injected_activation_events_ == 0) { + ASSERT(!activation_cb_->enabled()); + activation_cb_->scheduleCallbackNextIteration(); } + ASSERT(activation_cb_->enabled()); - ASSERT(libevent_events); - event_active(&raw_event_, libevent_events, 0); + // Merge new events with pending injected events. + injected_activation_events_ |= events; } void FileEventImpl::assignEvents(uint32_t events, event_base* base) { @@ -63,17 +92,35 @@ void FileEventImpl::assignEvents(uint32_t events, event_base* base) { } ASSERT(events != 0); - event->cb_(events); + event->mergeInjectedEventsAndRunCb(events); }, this); } void FileEventImpl::setEnabled(uint32_t events) { + if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) { + // Clear pending events on updates to the fd event mask to avoid delivering events that are no + // longer relevant. Updating the event mask will reset the fd edge trigger state so the proxy + // will be able to determine the fd read/write state without need for the injected activation + // events. + injected_activation_events_ = 0; + activation_cb_->cancel(); + } + auto* base = event_get_base(&raw_event_); event_del(&raw_event_); assignEvents(events, base); event_add(&raw_event_, nullptr); } +void FileEventImpl::mergeInjectedEventsAndRunCb(uint32_t events) { + if (activate_fd_events_next_event_loop_ && injected_activation_events_ != 0) { + events |= injected_activation_events_; + injected_activation_events_ = 0; + activation_cb_->cancel(); + } + cb_(events); +} + } // namespace Event } // namespace Envoy diff --git a/source/common/event/file_event_impl.h b/source/common/event/file_event_impl.h index 918b237fb6c1..e4044fd25194 100644 --- a/source/common/event/file_event_impl.h +++ b/source/common/event/file_event_impl.h @@ -25,10 +25,21 @@ class FileEventImpl : public FileEvent, ImplBase { private: void assignEvents(uint32_t events, event_base* base); + void mergeInjectedEventsAndRunCb(uint32_t events); FileReadyCb cb_; os_fd_t fd_; FileTriggerType trigger_; + + // Injected FileReadyType events that were scheduled by recent calls to activate() and are pending + // delivery. + uint32_t injected_activation_events_{}; + // Used to schedule delayed event activation. Armed iff pending_activation_events_ != 0. + SchedulableCallbackPtr activation_cb_; + // Latched "envoy.reloadable_features.activate_fds_next_event_loop" runtime feature. If true, fd + // events scheduled via activate are evaluated in the next iteration of the event loop after + // polling and activating new fd events. + const bool activate_fd_events_next_event_loop_; }; } // namespace Event diff --git a/source/common/event/schedulable_cb_impl.cc b/source/common/event/schedulable_cb_impl.cc index 697ad2026b09..797e5bb004e1 100644 --- a/source/common/event/schedulable_cb_impl.cc +++ b/source/common/event/schedulable_cb_impl.cc @@ -21,9 +21,19 @@ SchedulableCallbackImpl::SchedulableCallbackImpl(Libevent::BasePtr& libevent, } void SchedulableCallbackImpl::scheduleCallbackCurrentIteration() { + // event_active directly adds the event to the end of the work queue so it executes in the current + // iteration of the event loop. event_active(&raw_event_, EV_TIMEOUT, 0); } +void SchedulableCallbackImpl::scheduleCallbackNextIteration() { + // libevent computes the list of timers to move to the work list after polling for fd events, but + // iteration through the work list starts. Zero delay timers added while iterating through the + // work list execute on the next iteration of the event loop. + const timeval zero_tv{}; + event_add(&raw_event_, &zero_tv); +} + void SchedulableCallbackImpl::cancel() { event_del(&raw_event_); } bool SchedulableCallbackImpl::enabled() { return 0 != evtimer_pending(&raw_event_, nullptr); } diff --git a/source/common/event/schedulable_cb_impl.h b/source/common/event/schedulable_cb_impl.h index e6bea654f4b9..48c6224f6a96 100644 --- a/source/common/event/schedulable_cb_impl.h +++ b/source/common/event/schedulable_cb_impl.h @@ -19,6 +19,7 @@ class SchedulableCallbackImpl : public SchedulableCallback, ImplBase { // SchedulableCallback implementation. void scheduleCallbackCurrentIteration() override; + void scheduleCallbackNextIteration() override; void cancel() override; bool enabled() override; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 37d6fd9e2f1c..b2656438a2e1 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -60,6 +60,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.strict_authority_validation", "envoy.reloadable_features.reject_unsupported_transfer_encodings", // Begin alphabetically sorted section. + "envoy.reloadable_features.activate_fds_next_event_loop", "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.early_errors_via_hcm", diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 1d306cdc3803..6cf270e9b2bc 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -35,6 +35,7 @@ envoy_cc_test( "//source/common/stats:isolated_store_lib", "//test/mocks:common_lib", "//test/test_common:environment_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 60efa4c8d53a..a651162a3f03 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -45,7 +45,7 @@ class SchedulableCallbackImplTest : public testing::Test { } }; -TEST_F(SchedulableCallbackImplTest, ScheduleAndCancel) { +TEST_F(SchedulableCallbackImplTest, ScheduleCurrentAndCancel) { ReadyWatcher watcher; auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); }); @@ -72,17 +72,50 @@ TEST_F(SchedulableCallbackImplTest, ScheduleAndCancel) { dispatcher_->run(Dispatcher::RunType::Block); } +TEST_F(SchedulableCallbackImplTest, ScheduleNextAndCancel) { + ReadyWatcher watcher; + + auto cb = dispatcher_->createSchedulableCallback([&]() { watcher.ready(); }); + + // Cancel is a no-op if not scheduled. + cb->cancel(); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callback is not invoked if cancelled before it executes. + cb->scheduleCallbackNextIteration(); + EXPECT_TRUE(cb->enabled()); + cb->cancel(); + EXPECT_FALSE(cb->enabled()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Scheduled callback executes. + cb->scheduleCallbackNextIteration(); + EXPECT_CALL(watcher, ready()); + dispatcher_->run(Dispatcher::RunType::Block); + + // Callbacks implicitly cancelled if runner is deleted. + cb->scheduleCallbackNextIteration(); + cb.reset(); + dispatcher_->run(Dispatcher::RunType::Block); +} + TEST_F(SchedulableCallbackImplTest, ScheduleOrder) { ReadyWatcher watcher0; createCallback([&]() { watcher0.ready(); }); ReadyWatcher watcher1; createCallback([&]() { watcher1.ready(); }); + ReadyWatcher watcher2; + createCallback([&]() { watcher2.ready(); }); - // Callback run in the order they are scheduled. - callbacks_[0]->scheduleCallbackCurrentIteration(); + // Current iteration callbacks run in the order they are scheduled. Next iteration callbacks run + // after current iteration callbacks. + callbacks_[0]->scheduleCallbackNextIteration(); callbacks_[1]->scheduleCallbackCurrentIteration(); - EXPECT_CALL(watcher0, ready()); + callbacks_[2]->scheduleCallbackCurrentIteration(); + InSequence s; EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher0, ready()); dispatcher_->run(Dispatcher::RunType::Block); } @@ -103,6 +136,7 @@ TEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) { callbacks_[2]->scheduleCallbackCurrentIteration(); callbacks_[3]->scheduleCallbackCurrentIteration(); callbacks_[4]->scheduleCallbackCurrentIteration(); + callbacks_[5]->scheduleCallbackNextIteration(); }); ReadyWatcher watcher2; @@ -120,14 +154,21 @@ TEST_F(SchedulableCallbackImplTest, ScheduleChainingAndCancellation) { ReadyWatcher watcher4; createCallback([&]() { watcher4.ready(); }); + ReadyWatcher watcher5; + createCallback([&]() { watcher5.ready(); }); + // Chained callbacks run in the same event loop iteration, as signaled by a single call to // prepare_watcher.ready(). watcher3 and watcher4 are not invoked because cb2 cancels - // cb3 and deletes cb4 as part of its execution. + // cb3 and deletes cb4 as part of its execution. cb5 runs after a second call to the + // prepare callback since it's scheduled for the next iteration. callbacks_[0]->scheduleCallbackCurrentIteration(); + InSequence s; EXPECT_CALL(prepare_watcher, ready()); EXPECT_CALL(watcher0, ready()); EXPECT_CALL(watcher1, ready()); EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(watcher5, ready()); dispatcher_->run(Dispatcher::RunType::Block); } diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index bca220731f7e..ca34b5eaf844 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -8,6 +8,7 @@ #include "test/mocks/common.h" #include "test/test_common/environment.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -46,22 +47,37 @@ class FileEventImplTest : public testing::Test { Api::OsSysCalls& os_sys_calls_; }; -class FileEventImplActivateTest : public testing::TestWithParam { +class FileEventImplActivateTest + : public testing::TestWithParam> { public: - FileEventImplActivateTest() : os_sys_calls_(Api::OsSysCallsSingleton::get()) {} + FileEventImplActivateTest() : os_sys_calls_(Api::OsSysCallsSingleton::get()) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_fds_next_event_loop", + activateFdsNextEventLoop() ? "true" : "false"}}); + } + + static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); + } + + int domain() { + return std::get<0>(GetParam()) == Network::Address::IpVersion::v4 ? AF_INET : AF_INET6; + } + bool activateFdsNextEventLoop() { return std::get<1>(GetParam()); } protected: Api::OsSysCalls& os_sys_calls_; + TestScopedRuntime scoped_runtime_; }; -INSTANTIATE_TEST_SUITE_P(IpVersions, FileEventImplActivateTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P( + IpVersions, FileEventImplActivateTest, + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool())); TEST_P(FileEventImplActivateTest, Activate) { - os_fd_t fd; - int domain = GetParam() == Network::Address::IpVersion::v4 ? AF_INET : AF_INET6; - fd = os_sys_calls_.socket(domain, SOCK_STREAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; ASSERT_TRUE(SOCKET_VALID(fd)); Api::ApiPtr api = Api::createApiForTest(); @@ -102,6 +118,149 @@ TEST_P(FileEventImplActivateTest, Activate) { os_sys_calls_.close(fd); } +TEST_P(FileEventImplActivateTest, ActivateChaining) { + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; + ASSERT_TRUE(SOCKET_VALID(fd)); + + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + ReadyWatcher fd_event; + ReadyWatcher read_event; + ReadyWatcher write_event; + ReadyWatcher closed_event; + + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, + &prepare_watcher); + +#ifdef WIN32 + const FileTriggerType trigger = FileTriggerType::Level; +#else + const FileTriggerType trigger = FileTriggerType::Edge; +#endif + + Event::FileEventPtr file_event = dispatcher->createFileEvent( + fd, + [&](uint32_t events) -> void { + fd_event.ready(); + if (events & FileReadyType::Read) { + read_event.ready(); + file_event->activate(FileReadyType::Write); + file_event->activate(FileReadyType::Closed); + } + + if (events & FileReadyType::Write) { + write_event.ready(); + file_event->activate(FileReadyType::Closed); + } + + if (events & FileReadyType::Closed) { + closed_event.ready(); + } + }, + trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + + testing::InSequence s; + // First loop iteration: handle scheduled read event and the real write event produced by poll. + // Note that the real and injected events are combined and delivered in a single call to the fd + // callback. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(read_event, ready()); + EXPECT_CALL(write_event, ready()); + if (activateFdsNextEventLoop()) { + // Second loop iteration: handle write and close events scheduled while handling read. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Third loop iteration: handle close event scheduled while handling write. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Fourth loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } else { + // Same loop iteration activation: handle write and close events scheduled while handling read. + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Second same loop iteration activation: handle close event scheduled while handling write. + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(closed_event, ready()); + // Second loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } + + file_event->activate(FileReadyType::Read); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + + os_sys_calls_.close(fd); +} + +TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; + ASSERT_TRUE(SOCKET_VALID(fd)); + + Api::ApiPtr api = Api::createApiForTest(); + DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); + ReadyWatcher fd_event; + ReadyWatcher read_event; + ReadyWatcher write_event; + ReadyWatcher closed_event; + + ReadyWatcher prepare_watcher; + evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, + &prepare_watcher); + +#ifdef WIN32 + const FileTriggerType trigger = FileTriggerType::Level; +#else + const FileTriggerType trigger = FileTriggerType::Edge; +#endif + + Event::FileEventPtr file_event = dispatcher->createFileEvent( + fd, + [&](uint32_t events) -> void { + fd_event.ready(); + if (events & FileReadyType::Read) { + read_event.ready(); + file_event->activate(FileReadyType::Closed); + file_event->setEnabled(FileReadyType::Write | FileReadyType::Closed); + } + + if (events & FileReadyType::Write) { + write_event.ready(); + } + + if (events & FileReadyType::Closed) { + closed_event.ready(); + } + }, + trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + + testing::InSequence s; + // First loop iteration: handle scheduled read event and the real write event produced by poll. + // Note that the real and injected events are combined and delivered in a single call to the fd + // callback. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(read_event, ready()); + EXPECT_CALL(write_event, ready()); + // Second loop iteration: handle real write event after resetting event mask via setEnabled. Close + // injected event is discarded by the setEnable call. + EXPECT_CALL(prepare_watcher, ready()); + EXPECT_CALL(fd_event, ready()); + EXPECT_CALL(write_event, ready()); + // Third loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + + file_event->activate(FileReadyType::Read); + dispatcher->run(Event::Dispatcher::RunType::NonBlock); + + os_sys_calls_.close(fd); +} + #ifndef WIN32 // Libevent on Windows doesn't support edge trigger. TEST_F(FileEventImplTest, EdgeTrigger) { ReadyWatcher read_event; diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 60c0659bd5c1..d263c3ad68bc 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -59,6 +59,7 @@ MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher) .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this))) .RetiresOnSaturation(); ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_)); } diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index f11775802062..16cf4283c218 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -187,6 +187,7 @@ class MockSchedulableCallback : public SchedulableCallback { // SchedulableCallback MOCK_METHOD(void, scheduleCallbackCurrentIteration, ()); + MOCK_METHOD(void, scheduleCallbackNextIteration, ()); MOCK_METHOD(void, cancel, ()); MOCK_METHOD(bool, enabled, ()); From 9e3dbff6578226bac55753c1421548dcf467a795 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Sat, 27 Jun 2020 22:50:48 -0400 Subject: [PATCH 474/909] Add platform agnostic socket error macros and error details helper (#11565) - WSAE* error codes are a disjoint set from POSIX error codes (which are still defined on Windows for non winsock2 errors) so we define macros that map to these error codes on Windows and the usual POSIX error codes on other platforms - do not use strerror to get error details on Windows, instead use FormatMessage and add utility function to replace sterror usage - replace usual error code macros with new SOCKET_ERROR_ macros in applicable locations where they come from socket operations - any differences in error code returned by socket operations continue to be handled with an #ifdef to make the differences between platforms clear (e.g. when SOCKET_ERROR_AGAIN is returned by Windows but SOCKET_ERROR_IN_PROGRESS is returned by other platforms) Signed-off-by: Sunjay Bhatia Co-authored-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr Co-authored-by: William A Rowe Jr --- ci/run_clang_tidy.sh | 13 +++-- include/envoy/common/platform.h | 38 ++++++++++++-- source/common/api/win32/os_sys_calls_impl.cc | 4 +- source/common/common/utility.cc | 25 ++++++++++ source/common/common/utility.h | 8 +++ source/common/filesystem/BUILD | 2 + source/common/filesystem/file_shared_impl.cc | 8 ++- .../common/filesystem/inotify/watcher_impl.cc | 2 +- .../common/filesystem/kqueue/watcher_impl.cc | 2 +- .../posix/directory_iterator_impl.cc | 5 +- .../filesystem/posix/filesystem_impl.cc | 3 +- source/common/http/http2/codec_impl.cc | 16 +++--- source/common/network/BUILD | 3 ++ source/common/network/connection_impl.cc | 19 ++++--- source/common/network/io_socket_error_impl.cc | 23 +++++---- .../common/network/io_socket_handle_impl.cc | 11 ++-- source/common/network/io_socket_handle_impl.h | 4 +- source/common/network/listen_socket_impl.cc | 7 +-- source/common/network/listener_impl.cc | 3 +- source/common/network/socket_impl.cc | 5 +- .../common/network/socket_interface_impl.cc | 7 +-- source/common/network/socket_option_impl.cc | 5 +- .../listener/http_inspector/http_inspector.cc | 4 +- .../listener/tls_inspector/tls_inspector.cc | 2 +- .../quic_listeners/quiche/platform/BUILD | 1 + .../quiche/platform/quic_logging_impl.cc | 6 ++- source/server/hot_restart_impl.cc | 2 +- source/server/hot_restarting_base.cc | 4 +- test/common/buffer/owned_impl_test.cc | 6 ++- test/common/common/utility_test.cc | 26 ++++++++++ .../common/filesystem/filesystem_impl_test.cc | 3 +- test/common/network/BUILD | 1 + test/common/network/address_impl_test.cc | 10 ++-- .../network/io_socket_handle_impl_test.cc | 50 ++++++++++++------- .../common/network/socket_option_impl_test.cc | 2 +- test/common/network/udp_listener_impl_test.cc | 9 ++-- .../http_inspector/http_inspector_test.cc | 18 +++---- .../tls_inspector/tls_inspector_test.cc | 4 +- .../udp/udp_proxy/udp_proxy_filter_test.cc | 6 +-- .../quiche/envoy_quic_writer_test.cc | 8 +-- .../quiche/platform/quic_platform_test.cc | 4 +- test/integration/integration.cc | 13 ++--- test/server/hot_restart_impl_test.cc | 6 ++- test/server/server_test.cc | 5 +- test/test_common/environment.cc | 4 +- test/test_common/network_utility.cc | 9 ++-- tools/code_format/check_format.py | 32 +++++++++--- tools/code_format/check_format_test_helper.py | 2 + tools/spelling/spelling_dictionary.txt | 2 + tools/testdata/check_format/strerror.cc | 9 ++++ 50 files changed, 321 insertions(+), 140 deletions(-) create mode 100644 tools/testdata/check_format/strerror.cc diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index d5c2697e2b44..bbce5f8854f1 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -34,10 +34,15 @@ trap cleanup EXIT "${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --run_bazel_build --include_headers # Do not run clang-tidy against win32 impl -# TODO(scw00): We should run clang-tidy against win32 impl. But currently we only have -# linux ci box. +# TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows function exclude_win32_impl() { - grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 + grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32 +} + +# Do not run clang-tidy against macOS impl +# TODO: We should run clang-tidy against macOS impl for completeness +function exclude_macos_impl() { + grep -v source/common/filesystem/kqueue/ } # Do not run incremental clang-tidy on check_format testdata files. @@ -57,7 +62,7 @@ function exclude_third_party() { } function filter_excludes() { - exclude_testdata | exclude_chromium_url | exclude_win32_impl | exclude_third_party + exclude_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party } if [[ -z "${DIFF_REF}" && "${BUILD_REASON}" != "PullRequest" ]]; then diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index 1d7c58fd3b2a..80870d24240d 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -53,13 +53,13 @@ __pragma(pack(push, 1)) definition, ##__VA_ARGS__; \ __pragma(pack(pop)) -using ssize_t = ptrdiff_t; +typedef ptrdiff_t ssize_t; // This is needed so the OsSysCalls interface compiles on Windows, // shmOpen takes mode_t as an argument. -using mode_t = uint32_t; +typedef uint32_t mode_t; -using os_fd_t = SOCKET; +typedef SOCKET os_fd_t; typedef unsigned int sa_family_t; @@ -125,6 +125,23 @@ struct msghdr { #define ENVOY_SHUT_WR SD_SEND #define ENVOY_SHUT_RDWR SD_BOTH +// winsock2 functions return distinct set of error codes, disjoint from POSIX errors (that are +// also available on Windows and set by POSIX function invocations). Here we map winsock2 error +// codes with platform agnostic macros that correspond to the same or roughly similar errors on +// POSIX systems for use in cross-platform socket error handling. +#define SOCKET_ERROR_AGAIN WSAEWOULDBLOCK +#define SOCKET_ERROR_NOT_SUP WSAEOPNOTSUPP +#define SOCKET_ERROR_AF_NO_SUP WSAEAFNOSUPPORT +#define SOCKET_ERROR_IN_PROGRESS WSAEINPROGRESS +// winsock2 does not differentiate between PERM and ACCESS violations +#define SOCKET_ERROR_PERM WSAEACCES +#define SOCKET_ERROR_ACCESS WSAEACCES +#define SOCKET_ERROR_MSG_SIZE WSAEMSGSIZE +#define SOCKET_ERROR_INTR WSAEINTR +#define SOCKET_ERROR_ADDR_NOT_AVAIL WSAEADDRNOTAVAIL +#define SOCKET_ERROR_INVAL WSAEINVAL +#define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE + #else // POSIX #include @@ -171,7 +188,7 @@ struct msghdr { #define IP6T_SO_ORIGINAL_DST 80 #endif -using os_fd_t = int; +typedef int os_fd_t; #define INVALID_SOCKET -1 #define SOCKET_VALID(sock) ((sock) >= 0) @@ -184,6 +201,19 @@ using os_fd_t = int; #define ENVOY_SHUT_WR SHUT_WR #define ENVOY_SHUT_RDWR SHUT_RDWR +// Mapping POSIX socket errors to common error names +#define SOCKET_ERROR_AGAIN EAGAIN +#define SOCKET_ERROR_NOT_SUP ENOTSUP +#define SOCKET_ERROR_AF_NO_SUP EAFNOSUPPORT +#define SOCKET_ERROR_IN_PROGRESS EINPROGRESS +#define SOCKET_ERROR_PERM EPERM +#define SOCKET_ERROR_ACCESS EACCES +#define SOCKET_ERROR_MSG_SIZE EMSGSIZE +#define SOCKET_ERROR_INTR EINTR +#define SOCKET_ERROR_ADDR_NOT_AVAIL EADDRNOTAVAIL +#define SOCKET_ERROR_INVAL EINVAL +#define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE + #endif // Note: chromium disabled recvmmsg regardless of ndk version. However, the only Android target diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index fa8af1a137cb..49a05b9fda2a 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -246,7 +246,7 @@ SysCallIntResult OsSysCallsImpl::shutdown(os_fd_t sockfd, int how) { SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, os_fd_t sv[2]) { if (sv == nullptr) { - return {SOCKET_ERROR, WSAEINVAL}; + return {SOCKET_ERROR, SOCKET_ERROR_INVAL}; } sv[0] = sv[1] = INVALID_SOCKET; @@ -274,7 +274,7 @@ SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, a.in6.sin6_addr = in6addr_loopback; a.in6.sin6_port = 0; } else { - return {SOCKET_ERROR, WSAEINVAL}; + return {SOCKET_ERROR, SOCKET_ERROR_INVAL}; } auto onErr = [this, listener, sv]() -> void { diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index 1d4e933e8e87..eb7ba3619a39 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -38,6 +38,31 @@ using UnsignedMilliseconds = std::chrono::duration; } // namespace +const std::string errorDetails(int error_code) { +#ifndef WIN32 + // clang-format off + return strerror(error_code); + // clang-format on +#else + // Windows error codes do not correspond to POSIX errno values + // Use FormatMessage, strip trailing newline, and return "Unknown error" on failure (as on POSIX). + // Failures will usually be due to the error message not being found. + char* buffer = NULL; + DWORD msg_size = FormatMessage( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&buffer, 0, NULL); + if (msg_size == 0) { + return "Unknown error"; + } + if (msg_size > 1 && buffer[msg_size - 2] == '\r' && buffer[msg_size - 1] == '\n') { + msg_size -= 2; + } + std::string error_details(buffer, msg_size); + ASSERT(LocalFree(buffer) == NULL); + return error_details; +#endif +} + std::string DateFormatter::fromTime(const SystemTime& time) const { struct CachedTime { // The string length of a number of seconds since the Epoch. E.g. for "1528270093", the length diff --git a/source/common/common/utility.h b/source/common/common/utility.h index 8cab7c8a47c9..e990a4e662ef 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -18,6 +18,14 @@ #include "absl/strings/string_view.h" namespace Envoy { + +/** + * Retrieve string description of error code + * @param int error code + * @return const std::string error detail description + */ +const std::string errorDetails(int error_code); + /** * Utility class for formatting dates given an absl::FormatTime style format string. */ diff --git a/source/common/filesystem/BUILD b/source/common/filesystem/BUILD index 12899b58f981..4059eb96df38 100644 --- a/source/common/filesystem/BUILD +++ b/source/common/filesystem/BUILD @@ -35,6 +35,7 @@ envoy_cc_posix_library( deps = [ "//include/envoy/filesystem:filesystem_interface", "//source/common/api:os_sys_calls_lib", + "//source/common/common:utility_lib", ], ) @@ -70,6 +71,7 @@ envoy_cc_library( deps = [ "//include/envoy/filesystem:filesystem_interface", "//source/common/common:assert_lib", + "//source/common/common:utility_lib", ], ) diff --git a/source/common/filesystem/file_shared_impl.cc b/source/common/filesystem/file_shared_impl.cc index dc0e8bfcdc32..56601badb01c 100644 --- a/source/common/filesystem/file_shared_impl.cc +++ b/source/common/filesystem/file_shared_impl.cc @@ -7,7 +7,13 @@ namespace Filesystem { Api::IoError::IoErrorCode IoFileError::getErrorCode() const { return IoErrorCode::UnknownError; } -std::string IoFileError::getErrorDetails() const { return ::strerror(errno_); } +std::string IoFileError::getErrorDetails() const { + // TODO(sunjayBhatia, wrowe): Disable clang-format until win32 implementation no longer uses POSIX + // subsystem, see https://github.com/envoyproxy/envoy/issues/11655 + // clang-format off + return ::strerror(errno_); + // clang-format on +} Api::IoCallBoolResult FileSharedImpl::open(FlagSet in) { if (isOpen()) { diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index d3e6bd48f69c..2aeb9b6ef30b 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -40,7 +40,7 @@ void WatcherImpl::addWatch(absl::string_view path, uint32_t events, OnChangedCb int watch_fd = inotify_add_watch(inotify_fd_, std::string(result.directory_).c_str(), watch_mask); if (watch_fd == -1) { throw EnvoyException( - fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); + fmt::format("unable to add filesystem watch for file {}: {}", path, errorDetails(errno))); } ENVOY_LOG(debug, "added watch for directory: '{}' file: '{}' fd: {}", result.directory_, diff --git a/source/common/filesystem/kqueue/watcher_impl.cc b/source/common/filesystem/kqueue/watcher_impl.cc index aa1589f0cb8a..2452eeb688b2 100644 --- a/source/common/filesystem/kqueue/watcher_impl.cc +++ b/source/common/filesystem/kqueue/watcher_impl.cc @@ -72,7 +72,7 @@ WatcherImpl::FileWatchPtr WatcherImpl::addWatch(absl::string_view path, uint32_t if (kevent(queue_, &event, 1, nullptr, 0, nullptr) == -1 || event.flags & EV_ERROR) { throw EnvoyException( - fmt::format("unable to add filesystem watch for file {}: {}", path, strerror(errno))); + fmt::format("unable to add filesystem watch for file {}: {}", path, errorDetails(errno))); } ENVOY_LOG(debug, "added watch for file: '{}' fd: {}", path, watch_fd); diff --git a/source/common/filesystem/posix/directory_iterator_impl.cc b/source/common/filesystem/posix/directory_iterator_impl.cc index 06b4a910e12a..f1808242feed 100644 --- a/source/common/filesystem/posix/directory_iterator_impl.cc +++ b/source/common/filesystem/posix/directory_iterator_impl.cc @@ -1,6 +1,7 @@ #include "envoy/common/exception.h" #include "common/common/fmt.h" +#include "common/common/utility.h" #include "common/filesystem/directory_iterator_impl.h" namespace Envoy { @@ -29,7 +30,7 @@ void DirectoryIteratorImpl::openDirectory() { dir_ = temp_dir; if (!dir_) { throw EnvoyException( - fmt::format("unable to open directory {}: {}", directory_path_, strerror(errno))); + fmt::format("unable to open directory {}: {}", directory_path_, errorDetails(errno))); } } @@ -38,7 +39,7 @@ void DirectoryIteratorImpl::nextEntry() { dirent* entry = ::readdir(dir_); if (entry == nullptr && errno != 0) { throw EnvoyException( - fmt::format("unable to iterate directory {}: {}", directory_path_, strerror(errno))); + fmt::format("unable to iterate directory {}: {}", directory_path_, errorDetails(errno))); } if (entry == nullptr) { diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index 4ebe73ce6521..580e980273a9 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -14,6 +14,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/logger.h" +#include "common/common/utility.h" #include "common/filesystem/filesystem_impl.h" #include "absl/strings/match.h" @@ -136,7 +137,7 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { const Api::SysCallStringResult canonical_path = canonicalPath(path); if (canonical_path.rc_.empty()) { ENVOY_LOG_MISC(debug, "Unable to determine canonical path for {}: {}", path, - ::strerror(canonical_path.errno_)); + errorDetails(canonical_path.errno_)); return true; } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index d26ef284ceab..f3317c32bbb6 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -29,23 +29,23 @@ namespace Http2 { class Http2ResponseCodeDetailValues { // Invalid HTTP header field was received and stream is going to be // closed. - const absl::string_view NgHttp2ErrHttpHeader = "http2.invalid.header.field"; + const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; // Violation in HTTP messaging rule. - const absl::string_view NgHttp2ErrHttpMessaging = "http2.violation.of.messaging.rule"; + const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; // none of the above - const absl::string_view NgHttp2ErrUnknown = "http2.unknown.nghttp2.error"; + const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; public: - const absl::string_view strerror(int error_code) const { + const absl::string_view errorDetails(int error_code) const { switch (error_code) { case NGHTTP2_ERR_HTTP_HEADER: - return NgHttp2ErrHttpHeader; + return ng_http2_err_http_header_; case NGHTTP2_ERR_HTTP_MESSAGING: - return NgHttp2ErrHttpMessaging; + return ng_http2_err_http_messaging_; default: - return NgHttp2ErrUnknown; + return ng_http2_err_unknown_; } } }; @@ -720,7 +720,7 @@ int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { // Set details of error_code in the stream whenever we have one. StreamImpl* stream = getStream(stream_id); if (stream != nullptr) { - stream->setDetails(Http2ResponseCodeDetails::get().strerror(error_code)); + stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); } if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 8359c50165c4..1121ac26b16a 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -156,6 +156,7 @@ envoy_cc_library( deps = [ "//include/envoy/api:io_error_interface", "//source/common/common:assert_lib", + "//source/common/common:utility_lib", ], ) @@ -185,6 +186,7 @@ envoy_cc_library( ":address_lib", "//include/envoy/network:socket_interface", "//source/common/common:assert_lib", + "//source/common/common:utility_lib", "//source/common/singleton:threadsafe_singleton", ], ) @@ -277,6 +279,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 138a1bbbb8c5..649e8057d6d2 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -250,14 +250,14 @@ void ConnectionImpl::noDelay(bool enable) { Api::SysCallIntResult result = socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value)); #if defined(__APPLE__) - if (SOCKET_FAILURE(result.rc_) && result.errno_ == EINVAL) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_INVAL) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. return; } #elif defined(WIN32) if (SOCKET_FAILURE(result.rc_) && - (result.errno_ == WSAEWOULDBLOCK || result.errno_ == WSAEINVAL)) { + (result.errno_ == SOCKET_ERROR_AGAIN || result.errno_ == SOCKET_ERROR_INVAL)) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. return; @@ -595,7 +595,7 @@ ConnectionImpl::unixSocketPeerCredentials() const { struct ucred ucred; socklen_t ucred_size = sizeof(ucred); int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).rc_; - if (rc == -1) { + if (SOCKET_FAILURE(rc)) { return absl::nullopt; } @@ -736,7 +736,7 @@ ClientConnectionImpl::ClientConnectionImpl( if (result.rc_ < 0) { // TODO(lizan): consider add this error into transportFailureReason. ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source->get()->asString(), - strerror(result.errno_)); + errorDetails(result.errno_)); bind_error_ = true; // Set a special error state to ensure asynchronous close to give the owner of the // ConnectionImpl a chance to add callbacks and detect the "disconnect". @@ -756,8 +756,15 @@ void ClientConnectionImpl::connect() { // write will become ready. ASSERT(connecting_); } else { - ASSERT(result.rc_ == -1); - if (result.errno_ == EINPROGRESS) { + ASSERT(SOCKET_FAILURE(result.rc_)); +#ifdef WIN32 + // winsock2 connect returns EWOULDBLOCK if the socket is non-blocking and the connection + // cannot be completed immediately. We do not check for EINPROGRESS as that error is for + // blocking operations. + if (result.errno_ == SOCKET_ERROR_AGAIN) { +#else + if (result.errno_ == SOCKET_ERROR_IN_PROGRESS) { +#endif ASSERT(connecting_); ENVOY_CONN_LOG(debug, "connection in progress", *this); } else { diff --git a/source/common/network/io_socket_error_impl.cc b/source/common/network/io_socket_error_impl.cc index 3382ac5acf2f..c1d3c13d78a0 100644 --- a/source/common/network/io_socket_error_impl.cc +++ b/source/common/network/io_socket_error_impl.cc @@ -1,40 +1,41 @@ #include "common/network/io_socket_error_impl.h" #include "common/common/assert.h" +#include "common/common/utility.h" namespace Envoy { namespace Network { Api::IoError::IoErrorCode IoSocketError::getErrorCode() const { switch (errno_) { - case EAGAIN: + case SOCKET_ERROR_AGAIN: ASSERT(this == IoSocketError::getIoSocketEagainInstance(), "Didn't use getIoSocketEagainInstance() to generate `Again`."); return IoErrorCode::Again; - case ENOTSUP: + case SOCKET_ERROR_NOT_SUP: return IoErrorCode::NoSupport; - case EAFNOSUPPORT: + case SOCKET_ERROR_AF_NO_SUP: return IoErrorCode::AddressFamilyNoSupport; - case EINPROGRESS: + case SOCKET_ERROR_IN_PROGRESS: return IoErrorCode::InProgress; - case EPERM: + case SOCKET_ERROR_PERM: return IoErrorCode::Permission; - case EMSGSIZE: + case SOCKET_ERROR_MSG_SIZE: return IoErrorCode::MessageTooBig; - case EINTR: + case SOCKET_ERROR_INTR: return IoErrorCode::Interrupt; - case EADDRNOTAVAIL: + case SOCKET_ERROR_ADDR_NOT_AVAIL: return IoErrorCode::AddressNotAvailable; default: - ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", errno_, ::strerror(errno_)); + ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", errno_, getErrorDetails()); return IoErrorCode::UnknownError; } } -std::string IoSocketError::getErrorDetails() const { return ::strerror(errno_); } +std::string IoSocketError::getErrorDetails() const { return errorDetails(errno_); } IoSocketError* IoSocketError::getIoSocketEagainInstance() { - static auto* instance = new IoSocketError(EAGAIN); + static auto* instance = new IoSocketError(SOCKET_ERROR_AGAIN); return instance; } diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 0f4e323e4a65..bb30a4a3b204 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -3,6 +3,7 @@ #include "envoy/buffer/buffer.h" #include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" #include "absl/container/fixed_array.h" @@ -272,7 +273,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uin RecvMsgOutput& output) { ASSERT(output.msg_.size() == slices.size()); if (slices.empty()) { - return sysCallResultToIoCallResult(Api::SysCallIntResult{0, EAGAIN}); + return sysCallResultToIoCallResult(Api::SysCallIntResult{0, SOCKET_ERROR_AGAIN}); } const uint32_t num_packets_per_mmsg_call = slices.size(); absl::FixedArray mmsg_hdr(num_packets_per_mmsg_call); @@ -414,7 +415,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); if (result.rc_ != 0) { throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, - strerror(result.errno_))); + errorDetails(result.errno_))); } int socket_v6only = 0; if (ss.ss_family == AF_INET6) { @@ -427,7 +428,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { // exception if (SOCKET_FAILURE(result.rc_)) { throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd_, result.errno_, - strerror(result.errno_))); + errorDetails(result.errno_))); } #else RELEASE_ASSERT(result.rc_ == 0, ""); @@ -444,7 +445,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { os_sys_calls.getpeername(fd_, reinterpret_cast(&ss), &ss_len); if (result.rc_ != 0) { throw EnvoyException( - fmt::format("getpeername failed for '{}': {}", fd_, strerror(result.errno_))); + fmt::format("getpeername failed for '{}': {}", fd_, errorDetails(result.errno_))); } #ifdef __APPLE__ if (ss_len == sizeof(sockaddr) && ss.ss_family == AF_UNIX) @@ -459,7 +460,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { result = os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); if (result.rc_ != 0) { throw EnvoyException( - fmt::format("getsockname failed for '{}': {}", fd_, strerror(result.errno_))); + fmt::format("getsockname failed for '{}': {}", fd_, errorDetails(result.errno_))); } } return Address::addressFromSockAddr(ss, ss_len); diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 110fe7e58328..03c9c28ca08d 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -65,10 +65,10 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::LoggableasString(), strerror(result.errno_)), - result.errno_); + throw SocketBindException(fmt::format("cannot bind '{}': {}", local_address_->asString(), + errorDetails(result.errno_)), + result.errno_); } return {0, 0}; } diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index 71e8cfb4ad2c..045f1f2ad759 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/fmt.h" +#include "common/common/utility.h" #include "common/event/dispatcher_impl.h" #include "common/event/file_event_impl.h" #include "common/network/address_impl.h" @@ -76,7 +77,7 @@ ListenerImpl::ListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr so void ListenerImpl::errorCallback(evconnlistener*, void*) { // We should never get an error callback. This can happen if we run out of FDs or memory. In those // cases just crash. - PANIC(fmt::format("listener accept failure: {}", strerror(errno))); + PANIC(fmt::format("listener accept failure: {}", errorDetails(errno))); } void ListenerImpl::enable() { diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 39a23b7d95a8..5160f25549d8 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -3,6 +3,7 @@ #include "envoy/common/exception.h" #include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/socket_interface_impl.h" @@ -62,7 +63,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr if (set_permissions.rc_ != 0) { throw EnvoyException(fmt::format("Failed to create socket with mode {}: {}", std::to_string(pipe->mode()), - strerror(set_permissions.errno_))); + errorDetails(set_permissions.errno_))); } } return bind_result; @@ -122,4 +123,4 @@ absl::optional SocketImpl::ipVersion() const { } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index d6d57e82b84b..acdc8214c55d 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -4,6 +4,7 @@ #include "envoy/network/socket.h" #include "common/api/os_sys_calls_impl.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/network/io_socket_handle_impl.h" @@ -39,7 +40,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); RELEASE_ASSERT(SOCKET_VALID(result.rc_), - fmt::format("socket(2) failed, got error: {}", strerror(result.errno_))); + fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); IoHandlePtr io_handle = std::make_unique(result.rc_); #if defined(__APPLE__) || defined(WIN32) @@ -75,7 +76,7 @@ bool SocketInterfaceImpl::ipFamilySupported(int domain) { const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); if (SOCKET_VALID(result.rc_)) { RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - fmt::format("Fail to close fd: response code {}", strerror(result.rc_))); + fmt::format("Fail to close fd: response code {}", errorDetails(result.rc_))); } return SOCKET_VALID(result.rc_); } @@ -84,4 +85,4 @@ static SocketInterfaceLoader* socket_interface_ = new SocketInterfaceLoader(std::make_unique()); } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index b78cb0530649..7293fa483b2f 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -5,6 +5,7 @@ #include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" +#include "common/common/utility.h" #include "common/network/address_impl.h" namespace Envoy { @@ -23,7 +24,7 @@ bool SocketOptionImpl::setOption(Socket& socket, SocketOptionImpl::setSocketOption(socket, optname_, value_.data(), value_.size()); if (result.rc_ != 0) { ENVOY_LOG(warn, "Setting {} option on socket failed: {}", optname_.name(), - strerror(result.errno_)); + errorDetails(result.errno_)); return false; } } @@ -50,7 +51,7 @@ Api::SysCallIntResult SocketOptionImpl::setSocketOption(Socket& socket, const Network::SocketOptionName& optname, const void* value, size_t size) { if (!optname.hasValue()) { - return {-1, ENOTSUP}; + return {-1, SOCKET_ERROR_NOT_SUP}; } return socket.setSocketOption(optname.level(), optname.option(), value, size); diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index b94e7b3322ff..90234d9b31fe 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -105,9 +105,9 @@ ParseState Filter::onRead() { const Api::SysCallSizeResult result = os_syscalls.recv(socket.ioHandle().fd(), buf_, Config::MAX_INSPECT_SIZE, MSG_PEEK); ENVOY_LOG(trace, "http inspector: recv: {}", result.rc_); - if (result.rc_ == -1 && result.errno_ == EAGAIN) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_AGAIN) { return ParseState::Continue; - } else if (result.rc_ < 0) { + } else if (SOCKET_FAILURE(result.rc_)) { config_->stats().read_error_.inc(); return ParseState::Error; } diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index aa9dc7a7d603..38ea9324b243 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -170,7 +170,7 @@ ParseState Filter::onRead() { config_->maxClientHelloSize(), MSG_PEEK); ENVOY_LOG(trace, "tls inspector: recv: {}", result.rc_); - if (result.rc_ == -1 && result.errno_ == EAGAIN) { + if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_AGAIN) { return ParseState::Continue; } else if (result.rc_ < 0) { config_->stats().read_error_.inc(); diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 69eae676a3fe..4da7f76fcc71 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -104,6 +104,7 @@ envoy_cc_library( deps = [ "//source/common/common:assert_lib", "//source/common/common:stl_helpers", + "//source/common/common:utility_lib", ], ) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc index 60870a742fdd..ae7caedd69d1 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_logging_impl.cc @@ -8,6 +8,8 @@ #include +#include "common/common/utility.h" + namespace quic { namespace { @@ -25,8 +27,8 @@ QuicLogEmitter::QuicLogEmitter(QuicLogLevel level) : level_(level), saved_errno_ QuicLogEmitter::~QuicLogEmitter() { if (is_perror_) { - // TODO(wub): Change to a thread-safe version of strerror. - stream_ << ": " << strerror(saved_errno_) << " [" << saved_errno_ << "]"; + // TODO(wub): Change to a thread-safe version of errorDetails. + stream_ << ": " << Envoy::errorDetails(saved_errno_) << " [" << saved_errno_ << "]"; } std::string content = stream_.str(); if (!content.empty() && content.back() == '\n') { diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 015c4f1009c4..c9e0aa7e7d02 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -41,7 +41,7 @@ SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch) { hot_restart_os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR); if (result.rc_ == -1) { PANIC(fmt::format("cannot open shared memory region {} check user permissions. Error: {}", - shmem_name, strerror(result.errno_))); + shmem_name, errorDetails(result.errno_))); } if (restart_epoch == 0) { diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 95d12d089226..724dd9e8b31a 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -52,7 +52,7 @@ void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role) { const auto msg = fmt::format( "unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)", base_id_, id, result.errno_); - if (result.errno_ == EADDRINUSE) { + if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) { throw HotRestartDomainSocketInUseException(msg); } throw EnvoyException(msg); @@ -187,7 +187,7 @@ std::unique_ptr HotRestartingBase::receiveHotRestartMessage(B message.msg_controllen = CMSG_SPACE(sizeof(int)); const int recvmsg_rc = recvmsg(my_domain_socket_, &message, 0); - if (block == Blocking::No && recvmsg_rc == -1 && errno == EAGAIN) { + if (block == Blocking::No && recvmsg_rc == -1 && errno == SOCKET_ERROR_AGAIN) { return nullptr; } RELEASE_ASSERT(recvmsg_rc != -1, fmt::format("recvmsg() returned -1, errno = {}", errno)); diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index bd06b3233da8..795a4416bc15 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -272,7 +272,8 @@ TEST_F(OwnedImplTest, Write) { EXPECT_EQ(0, result.rc_); EXPECT_EQ(1, buffer.length()); - EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + EXPECT_CALL(os_sys_calls, writev(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = buffer.write(io_handle); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); @@ -310,7 +311,8 @@ TEST_F(OwnedImplTest, Read) { EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); - EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, EAGAIN})); + EXPECT_CALL(os_sys_calls, readv(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = buffer.read(io_handle, 100); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); EXPECT_EQ(0, result.rc_); diff --git a/test/common/common/utility_test.cc b/test/common/common/utility_test.cc index 531f7017204e..cda2a65f807a 100644 --- a/test/common/common/utility_test.cc +++ b/test/common/common/utility_test.cc @@ -18,6 +18,10 @@ #include "gtest/gtest.h" using testing::ContainerEq; +#ifdef WIN32 +using testing::HasSubstr; +using testing::Not; +#endif namespace Envoy { @@ -895,4 +899,26 @@ TEST(InlineStorageTest, InlineString) { EXPECT_EQ("Hello, world!", hello->toString()); } +#ifdef WIN32 +TEST(ErrorDetailsTest, WindowsFormatMessage) { + // winsock2 error + EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), ""); + EXPECT_THAT(errorDetails(SOCKET_ERROR_AGAIN), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(SOCKET_ERROR_AGAIN), "Unknown error"); + + // winsock2 error with a long message + EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), ""); + EXPECT_THAT(errorDetails(SOCKET_ERROR_MSG_SIZE), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(SOCKET_ERROR_MSG_SIZE), "Unknown error"); + + // regular Windows error + EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), ""); + EXPECT_THAT(errorDetails(ERROR_FILE_NOT_FOUND), Not(HasSubstr("\r\n"))); + EXPECT_NE(errorDetails(ERROR_FILE_NOT_FOUND), "Unknown error"); + + // invalid error code + EXPECT_EQ(errorDetails(99999), "Unknown error"); +} +#endif + } // namespace Envoy diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index 4595099da007..7870c285e19d 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -2,6 +2,7 @@ #include #include "common/common/assert.h" +#include "common/common/utility.h" #include "common/filesystem/filesystem_impl.h" #include "test/test_common/environment.h" @@ -117,7 +118,7 @@ TEST_F(FileSystemImplTest, CanonicalPathSuccess) { EXPECT_EQ("/", canonicalPath( TEST_F(FileSystemImplTest, CanonicalPathFail) { const Api::SysCallStringResult result = canonicalPath("/_some_non_existent_file"); EXPECT_TRUE(result.rc_.empty()); - EXPECT_STREQ("No such file or directory", ::strerror(result.errno_)); + EXPECT_EQ("No such file or directory", errorDetails(result.errno_)); } #endif diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 19e63d2a0cc8..403c7068859c 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -326,6 +326,7 @@ envoy_cc_test( name = "io_socket_handle_impl_test", srcs = ["io_socket_handle_impl_test.cc"], deps = [ + "//source/common/common:utility_lib", "//source/common/network:address_lib", ], ) diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 3935004acf74..3eb398804466 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -62,7 +62,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // Bind the socket to the desired address and port. const Api::SysCallIntResult result = sock.bind(addr_port); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; // Do a bare listen syscall. Not bothering to accept connections as that would @@ -83,7 +83,7 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // Connect to the server. const Api::SysCallIntResult result = client_sock.connect(addr_port); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; }; @@ -331,7 +331,7 @@ TEST(PipeInstanceTest, BasicPermission) { ASSERT_GE(sock.ioHandle().fd(), 0) << pipe.asString(); Api::SysCallIntResult result = sock.bind(address); - ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) << "\terrno: " << result.errno_; Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); @@ -341,7 +341,7 @@ TEST(PipeInstanceTest, BasicPermission) { // Get file permissions bits ASSERT_EQ(stat_buf.st_mode & 07777, mode) << path << std::oct << "\t" << (stat_buf.st_mode & 07777) << std::dec << "\t" - << (stat_buf.st_mode) << strerror(result.errno_); + << (stat_buf.st_mode) << errorDetails(result.errno_); } #endif @@ -431,7 +431,7 @@ TEST(PipeInstanceTest, UnlinksExistingFile) { const Api::SysCallIntResult result = sock.bind(address); - ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << strerror(result.errno_) + ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) << "\nerrno: " << result.errno_; }; diff --git a/test/common/network/io_socket_handle_impl_test.cc b/test/common/network/io_socket_handle_impl_test.cc index 4aae5b5296d1..a1a1a506b158 100644 --- a/test/common/network/io_socket_handle_impl_test.cc +++ b/test/common/network/io_socket_handle_impl_test.cc @@ -1,6 +1,8 @@ +#include "common/common/utility.h" #include "common/network/io_socket_error_impl.h" #include "common/network/io_socket_handle_impl.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace Envoy { @@ -8,32 +10,44 @@ namespace Network { namespace { TEST(IoSocketHandleImplTest, TestIoSocketError) { - IoSocketError error1(EAGAIN); + IoSocketError error1(SOCKET_ERROR_AGAIN); EXPECT_DEBUG_DEATH(error1.getErrorCode(), ".*assert failure: .* Details: Didn't use getIoSocketEagainInstance.*"); + EXPECT_EQ(errorDetails(SOCKET_ERROR_AGAIN), + IoSocketError::getIoSocketEagainInstance()->getErrorDetails()); - EXPECT_EQ(::strerror(EAGAIN), IoSocketError::getIoSocketEagainInstance()->getErrorDetails()); + IoSocketError error2(SOCKET_ERROR_NOT_SUP); + EXPECT_EQ(IoSocketError::IoErrorCode::NoSupport, error2.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_NOT_SUP), error2.getErrorDetails()); - IoSocketError error3(ENOTSUP); - EXPECT_EQ(IoSocketError::IoErrorCode::NoSupport, error3.getErrorCode()); - EXPECT_EQ(::strerror(ENOTSUP), error3.getErrorDetails()); + IoSocketError error3(SOCKET_ERROR_AF_NO_SUP); + EXPECT_EQ(IoSocketError::IoErrorCode::AddressFamilyNoSupport, error3.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_AF_NO_SUP), error3.getErrorDetails()); - IoSocketError error4(EAFNOSUPPORT); - EXPECT_EQ(IoSocketError::IoErrorCode::AddressFamilyNoSupport, error4.getErrorCode()); - EXPECT_EQ(::strerror(EAFNOSUPPORT), error4.getErrorDetails()); + IoSocketError error4(SOCKET_ERROR_IN_PROGRESS); + EXPECT_EQ(IoSocketError::IoErrorCode::InProgress, error4.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_IN_PROGRESS), error4.getErrorDetails()); - IoSocketError error5(EINPROGRESS); - EXPECT_EQ(IoSocketError::IoErrorCode::InProgress, error5.getErrorCode()); - EXPECT_EQ(::strerror(EINPROGRESS), error5.getErrorDetails()); + IoSocketError error5(SOCKET_ERROR_PERM); + EXPECT_EQ(IoSocketError::IoErrorCode::Permission, error5.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_PERM), error5.getErrorDetails()); - IoSocketError error6(EPERM); - EXPECT_EQ(IoSocketError::IoErrorCode::Permission, error6.getErrorCode()); - EXPECT_EQ(::strerror(EPERM), error6.getErrorDetails()); + IoSocketError error6(SOCKET_ERROR_MSG_SIZE); + EXPECT_EQ(IoSocketError::IoErrorCode::MessageTooBig, error6.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_MSG_SIZE), error6.getErrorDetails()); - // Random unknown error. - IoSocketError error7(123); - EXPECT_EQ(IoSocketError::IoErrorCode::UnknownError, error7.getErrorCode()); - EXPECT_EQ(::strerror(123), error7.getErrorDetails()); + IoSocketError error7(SOCKET_ERROR_INTR); + EXPECT_EQ(IoSocketError::IoErrorCode::Interrupt, error7.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_INTR), error7.getErrorDetails()); + + IoSocketError error8(SOCKET_ERROR_ADDR_NOT_AVAIL); + EXPECT_EQ(IoSocketError::IoErrorCode::AddressNotAvailable, error8.getErrorCode()); + EXPECT_EQ(errorDetails(SOCKET_ERROR_ADDR_NOT_AVAIL), error8.getErrorDetails()); + + // Random unknown error + IoSocketError error9(123); + EXPECT_EQ(IoSocketError::IoErrorCode::UnknownError, error9.getErrorCode()); + EXPECT_EQ(errorDetails(123), error9.getErrorDetails()); } } // namespace diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index 6ecf656244dd..2bbf4eba6400 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -13,7 +13,7 @@ TEST_F(SocketOptionImplTest, BadFd) { Api::SysCallIntResult result = SocketOptionImpl::setSocketOption(socket_, {}, zero.data(), zero.size()); EXPECT_EQ(-1, result.rc_); - EXPECT_EQ(ENOTSUP, result.errno_); + EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.errno_); } TEST_F(SocketOptionImplTest, HasName) { diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 7ba8311f0a67..0d139b887e43 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -315,7 +315,8 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); EXPECT_CALL(os_sys_calls, supportsMmsg()); - EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, ENOTSUP})); + EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -385,14 +386,16 @@ TEST_P(UdpListenerImplTest, SendDataError) { // Inject mocked OsSysCalls implementation to mock a write failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); - EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, ENOTSUP})); + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); auto send_result = listener_->send(send_data); EXPECT_FALSE(send_result.ok()); EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport); // Failed write shouldn't drain the data. EXPECT_EQ(payload.length(), buffer->length()); - ON_CALL(os_sys_calls, sendmsg(_, _, _)).WillByDefault(Return(Api::SysCallSizeResult{-1, EINVAL})); + ON_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillByDefault(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_INVAL})); // EINVAL should cause RELEASE_ASSERT. EXPECT_DEATH(listener_->send(send_data), "Invalid argument passed in"); } diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 0467a35ef844..576496ed1061 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -353,7 +353,7 @@ TEST_F(HttpInspectorTest, ReadError) { init(); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), ENOTSUP}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP}; })); EXPECT_CALL(cb_, continueFilterChain(false)); file_event_callback_(Event::FileReadyType::Read); @@ -375,7 +375,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp2) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= 24; i++) { @@ -408,7 +408,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp2BadPreface) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -440,7 +440,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -474,7 +474,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -506,7 +506,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1IncompleteBadHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= data.size(); i++) { @@ -540,7 +540,7 @@ TEST_F(HttpInspectorTest, MultipleReadsHttp1BadProtocol) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= truncate_header.size(); i++) { @@ -574,7 +574,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeRequestLine) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); uint64_t num_loops = Config::MAX_INSPECT_SIZE; @@ -620,7 +620,7 @@ TEST_F(HttpInspectorTest, Http1WithLargeHeader) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= 20; i++) { diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index d16303dc8d5b..cf7aef8aca6b 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -94,7 +94,7 @@ TEST_P(TlsInspectorTest, ConnectionClosed) { TEST_P(TlsInspectorTest, ReadError) { init(); EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)).WillOnce(InvokeWithoutArgs([]() { - return Api::SysCallSizeResult{ssize_t(-1), ENOTSUP}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_NOT_SUP}; })); EXPECT_CALL(cb_, continueFilterChain(false)); file_event_callback_(Event::FileReadyType::Read); @@ -159,7 +159,7 @@ TEST_P(TlsInspectorTest, MultipleReads) { InSequence s; EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) .WillOnce(InvokeWithoutArgs([]() -> Api::SysCallSizeResult { - return Api::SysCallSizeResult{ssize_t(-1), EAGAIN}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); for (size_t i = 1; i <= client_hello.size(); i++) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index c918c0aad783..0e4d98458bf6 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -260,13 +260,13 @@ cluster: fake_cluster EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_tx_bytes_total_.value()); - test_sessions_[0].recvDataFromUpstream("world2", 0, EMSGSIZE); + test_sessions_[0].recvDataFromUpstream("world2", 0, SOCKET_ERROR_MSG_SIZE); checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_rx_bytes_total_.value()); EXPECT_EQ(1, config_->stats().downstream_sess_tx_errors_.value()); - test_sessions_[0].recvDataFromUpstream("world2", EMSGSIZE, 0); + test_sessions_[0].recvDataFromUpstream("world2", SOCKET_ERROR_MSG_SIZE, 0); checkTransferStats(5 /*rx_bytes*/, 1 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(6, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ .upstream_cx_rx_bytes_total_.value()); @@ -275,7 +275,7 @@ cluster: fake_cluster "udp.sess_rx_errors") ->value()); - test_sessions_[0].expectUpstreamWrite("hello", EMSGSIZE); + test_sessions_[0].expectUpstreamWrite("hello", SOCKET_ERROR_MSG_SIZE); recvDataFromDownstream("10.0.0.1:1000", "10.0.0.2:80", "hello"); checkTransferStats(10 /*rx_bytes*/, 2 /*rx_datagrams*/, 0 /*tx_bytes*/, 0 /*tx_datagrams*/); EXPECT_EQ(5, cluster_manager_.thread_local_cluster_.cluster_.info_->stats_ diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc index d418531ba359..0c6232fb8e50 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc @@ -82,7 +82,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EAGAIN}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); @@ -94,7 +94,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EAGAIN}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN}; })); #endif EXPECT_DEBUG_DEATH(envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, @@ -109,7 +109,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailure) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, ENOTSUP}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); @@ -123,7 +123,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailureMessageTooBig) { EXPECT_CALL(os_sys_calls_, sendmsg(_, _, _)) .WillOnce(testing::Invoke([this, str](int, const msghdr* message, int) { verifySendData(str, message); - return Api::SysCallSizeResult{-1, EMSGSIZE}; + return Api::SysCallSizeResult{-1, SOCKET_ERROR_MSG_SIZE}; })); quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 916195b9dc13..32bca008f064 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -328,7 +328,7 @@ TEST_F(QuicPlatformTest, QuicLog) { EXPECT_LOG_CONTAINS("info", "i=1", QUIC_VLOG(1) << "i=" << (i = 1)); EXPECT_EQ(1, i); - errno = EINVAL; + errno = SOCKET_ERROR_INVAL; EXPECT_LOG_CONTAINS("info", "i=3:", QUIC_PLOG(INFO) << "i=" << (i = 3)); EXPECT_EQ(3, i); } @@ -686,7 +686,7 @@ TEST_F(QuicPlatformTest, FailToPickUnsedPort) { }); // Fail bind call's to mimic port exhaustion. EXPECT_CALL(os_sys_calls, bind(_, _, _)) - .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, EADDRINUSE})); + .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); EXPECT_DEATH_LOG_TO_STDERR(QuicPickServerPortForTestsOrDie(), "Failed to pick a port for test."); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index bd4f71416c02..e12e12a4f55b 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -466,16 +466,17 @@ void BaseIntegrationTest::createGeneratedApiTestServer( auto end_time = time_system_.monotonicTime() + TestUtility::DefaultTimeout; const char* success = "listener_manager.listener_create_success"; const char* rejected = "listener_manager.lds.update_rejected"; - while ((test_server_->counter(success) == nullptr || - test_server_->counter(success)->value() < concurrency_) && - (!allow_lds_rejection || test_server_->counter(rejected) == nullptr || - test_server_->counter(rejected)->value() == 0)) { + for (Stats::CounterSharedPtr success_counter = test_server_->counter(success), + rejected_counter = test_server_->counter(rejected); + (success_counter == nullptr || success_counter->value() < concurrency_) && + (!allow_lds_rejection || rejected_counter == nullptr || rejected_counter->value() == 0); + success_counter = test_server_->counter(success), + rejected_counter = test_server_->counter(rejected)) { if (time_system_.monotonicTime() >= end_time) { RELEASE_ASSERT(0, "Timed out waiting for listeners."); } if (!allow_lds_rejection) { - RELEASE_ASSERT(test_server_->counter(rejected) == nullptr || - test_server_->counter(rejected)->value() == 0, + RELEASE_ASSERT(rejected_counter == nullptr || rejected_counter->value() == 0, absl::StrCat("Lds update failed. Details\n", getListenerDetails(test_server_->server()))); } diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index a3d431e6c6e2..8237c2ba9aaf 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -83,7 +83,8 @@ TEST_F(HotRestartImplTest, VersionString) { // Test that HotRestartDomainSocketInUseException is thrown when the domain socket is already // in use, TEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) { - EXPECT_CALL(os_sys_calls_, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{-1, EADDRINUSE})); + EXPECT_CALL(os_sys_calls_, bind(_, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); EXPECT_CALL(os_sys_calls_, close(_)).Times(1); EXPECT_THROW(std::make_unique(0, 0), @@ -93,7 +94,8 @@ TEST_F(HotRestartImplTest, DomainSocketAlreadyInUse) { // Test that EnvoyException is thrown when the domain socket bind fails for reasons other than // being in use. TEST_F(HotRestartImplTest, DomainSocketError) { - EXPECT_CALL(os_sys_calls_, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{-1, EACCES})); + EXPECT_CALL(os_sys_calls_, bind(_, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, SOCKET_ERROR_ACCESS})); EXPECT_CALL(os_sys_calls_, close(_)).Times(1); EXPECT_THROW(std::make_unique(0, 0), EnvoyException); diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 4025abd2c881..d4ea4ec91361 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -877,7 +877,8 @@ void bindAndListenTcpSocket(const Network::Address::InstanceConstSharedPtr& addr // with some other socket already listening on it, see #7636. if (SOCKET_FAILURE(os_sys_calls.listen(socket->ioHandle().fd(), 1).rc_)) { // Mimic bind exception for the test simplicity. - throw Network::SocketBindException(fmt::format("cannot listen: {}", strerror(errno)), errno); + throw Network::SocketBindException(fmt::format("cannot listen: {}", errorDetails(errno)), + errno); } } } // namespace @@ -892,7 +893,7 @@ TEST_P(ServerInstanceImplTest, BootstrapNodeWithSocketOptions) { // First attempt to bind and listen socket should fail due to the lack of SO_REUSEPORT socket // options. EXPECT_THAT_THROWS_MESSAGE(bindAndListenTcpSocket(address, nullptr), EnvoyException, - HasSubstr(strerror(EADDRINUSE))); + HasSubstr(errorDetails(SOCKET_ERROR_ADDR_IN_USE))); // Second attempt should succeed as kernel allows multiple sockets to listen the same address iff // both of them use SO_REUSEPORT socket option. diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index c303a88aea7b..4d65f0e78617 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -44,13 +44,13 @@ std::string makeTempDir(std::string basename_template) { std::string name_template = "c:\\Windows\\TEMP\\" + basename_template; char* dirname = ::_mktemp(&name_template[0]); RELEASE_ASSERT(dirname != nullptr, fmt::format("failed to create tempdir from template: {} {}", - name_template, strerror(errno))); + name_template, errorDetails(errno))); TestEnvironment::createPath(dirname); #else std::string name_template = "/tmp/" + basename_template; char* dirname = ::mkdtemp(&name_template[0]); RELEASE_ASSERT(dirname != nullptr, fmt::format("failed to create tempdir from template: {} {}", - name_template, strerror(errno))); + name_template, errorDetails(errno))); #endif return std::string(dirname); } diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 89a667fa23bd..ca0dc51c38ae 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -43,16 +43,17 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared } } if (failing_fn != nullptr) { - if (result.errno_ == EADDRINUSE) { + if (result.errno_ == SOCKET_ERROR_ADDR_IN_USE) { // The port is already in use. Perfectly normal. return nullptr; - } else if (result.errno_ == EACCES) { + } else if (result.errno_ == SOCKET_ERROR_ACCESS) { // A privileged port, and we don't have privileges. Might want to log this. return nullptr; } // Unexpected failure. ADD_FAILURE() << failing_fn << " failed for '" << addr_port->asString() - << "' with error: " << strerror(result.errno_) << " (" << result.errno_ << ")"; + << "' with error: " << errorDetails(result.errno_) << " (" << result.errno_ + << ")"; return nullptr; } return sock.localAddress(); @@ -172,7 +173,7 @@ bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_p if (0 != result.rc_) { sock->close(); std::string msg = fmt::format("bind failed for address {} with error: {} ({})", - addr->asString(), strerror(result.errno_), result.errno_); + addr->asString(), errorDetails(result.errno_), result.errno_); ADD_FAILURE() << msg; throw EnvoyException(msg); } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index e845b930ed76..20387df2fbc6 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -565,6 +565,22 @@ def isInSubdir(filename, *subdirs): return False +# Determines if given token exists in line without leading or trailing token characters +# e.g. will return True for a line containing foo() but not foo_bar() or baz_foo +def tokenInLine(token, line): + index = 0 + while True: + index = line.find(token, index) + if index < 1: + break + if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'): + if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or + line[index + len(token)] == '_'): + return True + index = index + 1 + return False + + def checkSourceLine(line, file_path, reportError): # Check fixable errors. These may have been fixed already. if line.find(". ") != -1: @@ -616,23 +632,25 @@ def checkSourceLine(line, file_path, reportError): if "UnpackTo" in line: reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") # Check that we use the absl::Time library - if "std::get_time" in line: + if tokenInLine("std::get_time", line): if "test/" in file_path: reportError("Don't use std::get_time; use TestUtility::parseTime in tests") else: reportError("Don't use std::get_time; use the injectable time system") - if "std::put_time" in line: + if tokenInLine("std::put_time", line): reportError("Don't use std::put_time; use absl::Time equivalent instead") - if "gmtime" in line: + if tokenInLine("gmtime", line): reportError("Don't use gmtime; use absl::Time equivalent instead") - if "mktime" in line: + if tokenInLine("mktime", line): reportError("Don't use mktime; use absl::Time equivalent instead") - if "localtime" in line: + if tokenInLine("localtime", line): reportError("Don't use localtime; use absl::Time equivalent instead") - if "strftime" in line: + if tokenInLine("strftime", line): reportError("Don't use strftime; use absl::FormatTime instead") - if "strptime" in line: + if tokenInLine("strptime", line): reportError("Don't use strptime; use absl::FormatTime instead") + if tokenInLine("strerror", line): + reportError("Don't use strerror; use Envoy::errorDetails instead") if "std::atomic_" in line: # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic objects, so prefer to use that instead. diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index e00144812e40..acf2cd9f8700 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -230,6 +230,8 @@ def runChecks(): "test/register_factory.cc", "Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use " "Registry::InjectFactory instead.") + errors += checkUnfixableError("strerror.cc", + "Don't use strerror; use Envoy::errorDetails instead") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 9bfe64a17c0d..015a02d29f3c 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -79,8 +79,10 @@ ECMP ECONNREFUSED EDESTRUCTION EDF +EINPROGRESS EINVAL ELB +EMSGSIZE ENOTFOUND ENOTSUP ENV diff --git a/tools/testdata/check_format/strerror.cc b/tools/testdata/check_format/strerror.cc new file mode 100644 index 000000000000..8105987e1f8c --- /dev/null +++ b/tools/testdata/check_format/strerror.cc @@ -0,0 +1,9 @@ +#include + +namespace Envoy { + +char* get_error_illegal(int err) { return strerror(err); } +char* get_error_legal1(int err) { return some_other_strerror(err); } +char* get_error_legal2(int err) { return strerror2(err); } + +} // namespace Envoy From 1c1ce188008bd4600d24bcb3abdd58e73e40093e Mon Sep 17 00:00:00 2001 From: asraa Date: Sat, 27 Jun 2020 22:53:25 -0400 Subject: [PATCH 475/909] [server] add unused ENVOY_BUG implementation (#11503) Signed-off-by: Asra Ali --- .../observability/statistics.rst | 1 + docs/root/version_history/current.rst | 1 + source/common/common/BUILD | 4 + source/common/common/assert.cc | 75 ++++++++++++++++++- source/common/common/assert.h | 73 +++++++++++++++++- source/common/common/logger.h | 1 + source/server/server.cc | 2 + source/server/server.h | 2 + test/common/common/assert_test.cc | 28 +++++++ test/server/server_test.cc | 17 ++++- 10 files changed, 199 insertions(+), 5 deletions(-) diff --git a/docs/root/configuration/observability/statistics.rst b/docs/root/configuration/observability/statistics.rst index e8f92643c029..b531c0583b61 100644 --- a/docs/root/configuration/observability/statistics.rst +++ b/docs/root/configuration/observability/statistics.rst @@ -29,6 +29,7 @@ Server related statistics are rooted at *server.* with following statistics: hot_restart_generation, Gauge, Current hot restart generation -- like hot_restart_epoch but computed automatically by incrementing from parent. initialization_time_ms, Histogram, Total time taken for Envoy initialization in milliseconds. This is the time from server start-up until the worker threads are ready to accept new connections debug_assertion_failures, Counter, Number of debug assertion failures detected in a release build if compiled with `--define log_debug_assert_in_release=enabled` or zero otherwise + envoy_bug_failures, Counter, Number of envoy bug failures detected in a release build. File or report the issue if this increments as this may be serious. static_unknown_fields, Counter, Number of messages in static configuration with unknown fields dynamic_unknown_fields, Counter, Number of messages in dynamic configuration with unknown fields diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c79658576cb7..f7a953024711 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -121,6 +121,7 @@ New Features ` field. * runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. * server: add the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). +* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. * tracing: tracing configuration has been made fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 76081a5b7b08..30d27bd3d66f 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -18,6 +18,10 @@ envoy_cc_library( name = "assert_lib", srcs = ["assert.cc"], hdrs = ["assert.h"], + external_deps = [ + "abseil_base", + "abseil_synchronization", + ], deps = [":minimal_logger_lib"], ) diff --git a/source/common/common/assert.cc b/source/common/common/assert.cc index ab4b1b8776a4..1daf69b9abb9 100644 --- a/source/common/common/assert.cc +++ b/source/common/common/assert.cc @@ -1,5 +1,9 @@ #include "common/common/assert.h" +#include "absl/container/flat_hash_map.h" +#include "absl/strings/str_join.h" +#include "absl/synchronization/mutex.h" + namespace Envoy { namespace Assert { @@ -28,15 +32,84 @@ class ActionRegistrationImpl : public ActionRegistration { static std::function debug_assertion_failure_record_action_; }; +// This class implements the logic for triggering ENVOY_BUG logs and actions. Logging and actions +// will be triggered with exponential back-off per file and line bug. +class EnvoyBugRegistrationImpl : public ActionRegistration { +public: + EnvoyBugRegistrationImpl(std::function action) { + ASSERT(envoy_bug_failure_record_action_ == nullptr, + "An ENVOY_BUG action was already set. Currently only a single action is supported."); + envoy_bug_failure_record_action_ = action; + counters_.clear(); + } + + ~EnvoyBugRegistrationImpl() override { + ASSERT(envoy_bug_failure_record_action_ != nullptr); + envoy_bug_failure_record_action_ = nullptr; + } + + // This method is invoked when an ENVOY_BUG condition fails. It increments a per file and line + // counter for every ENVOY_BUG hit in a mutex guarded map. + // The implementation may also be a inline static counter per-file and line. There is no benchmark + // to show that the performance of this mutex is any worse than atomic counters. Acquiring and + // releasing a mutex is cheaper than a cache miss, but the mutex here is contended for every + // ENVOY_BUG failure rather than per individual bug. Logging ENVOY_BUGs is not a performance + // critical path, and mutex contention would indicate that there is a serious failure. + // Currently, this choice reduces code size and has the advantage that behavior is easier to + // understand and debug, and test behavior is predictable. + static bool shouldLogAndInvoke(absl::string_view bug_name) { + // Increment counter, inserting first if counter does not exist. + uint64_t counter_value = 0; + { + absl::MutexLock lock(&mutex_); + counter_value = ++counters_[bug_name]; + } + + // Check if counter is power of two by its bitwise representation. + return (counter_value & (counter_value - 1)) == 0; + } + + static void invokeAction() { + if (envoy_bug_failure_record_action_ != nullptr) { + envoy_bug_failure_record_action_(); + } + } + +private: + // This implementation currently only handles one action being set at a time. This is currently + // sufficient. If multiple actions are ever needed, the actions should be chained when + // additional actions are registered. + static std::function envoy_bug_failure_record_action_; + + using EnvoyBugMap = absl::flat_hash_map; + static absl::Mutex mutex_; + static EnvoyBugMap counters_ GUARDED_BY(mutex_); +}; + std::function ActionRegistrationImpl::debug_assertion_failure_record_action_; +std::function EnvoyBugRegistrationImpl::envoy_bug_failure_record_action_; +EnvoyBugRegistrationImpl::EnvoyBugMap EnvoyBugRegistrationImpl::counters_; +absl::Mutex EnvoyBugRegistrationImpl::mutex_; ActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function& action) { return std::make_unique(action); } -void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly() { +ActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function& action) { + return std::make_unique(action); +} + +void invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly() { ActionRegistrationImpl::invokeAction(); } +void invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly() { + EnvoyBugRegistrationImpl::invokeAction(); +} + +bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name) { + return EnvoyBugRegistrationImpl::shouldLogAndInvoke(bug_name); +} + } // namespace Assert } // namespace Envoy diff --git a/source/common/common/assert.h b/source/common/common/assert.h index e4c395ca4851..4fc5d38acf71 100644 --- a/source/common/common/assert.h +++ b/source/common/common/assert.h @@ -32,13 +32,44 @@ using ActionRegistrationPtr = std::unique_ptr; */ ActionRegistrationPtr setDebugAssertionFailureRecordAction(const std::function& action); +/** + * Sets an action to be invoked when an ENVOY_BUG failure is detected in a release build. This + * action will be invoked each time an ENVOY_BUG failure is detected. + * + * This function is not thread-safe; concurrent calls to set the action are not allowed. + * + * The action may be invoked concurrently if two ENVOY_BUGs in different threads fail at the + * same time, so the action must be thread-safe. + * + * This has no effect in debug builds (envoy bug failure aborts the process). + * + * @param action The action to take when an envoy bug fails. + * @return A registration object. The registration is removed when the object is destructed. + */ +ActionRegistrationPtr setEnvoyBugFailureRecordAction(const std::function& action); + /** * Invokes the action set by setDebugAssertionFailureRecordAction, or does nothing if * no action has been set. * * This should only be called by ASSERT macros in this file. */ -void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); +void invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly(); + +/** + * Invokes the action set by setEnvoyBugFailureRecordAction, or does nothing if + * no action has been set. + * + * This should only be called by ENVOY_BUG macros in this file. + */ +void invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly(); + +/** + * Increments power of two counter for EnvoyBugRegistrationImpl. + * + * This should only be called by ENVOY_BUG macros in this file. + */ +bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_name); // CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates // the logged failure, e.g., "EAGAIN" vs "11". @@ -87,7 +118,7 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); #if !defined(NDEBUG) // If this is a debug build. #define ASSERT_ACTION abort() #else // If this is not a debug build, but ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE is defined. -#define ASSERT_ACTION Envoy::Assert::invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly() +#define ASSERT_ACTION Envoy::Assert::invokeDebugAssertionFailureRecordActionForAssertMacroUseOnly() #endif // !defined(NDEBUG) #define _ASSERT_ORIGINAL(X) _ASSERT_IMPL(X, #X, ASSERT_ACTION, "") @@ -111,7 +142,7 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); // This non-implementation ensures that its argument is a valid expression that can be statically // casted to a bool, but the expression is never evaluated and will be compiled away. #define KNOWN_ISSUE_ASSERT _NULL_ASSERT_IMPL -#endif // defined(ENVOY_DEBUG_KNOWN_ISSUES) +#endif // defined(ENVOY_DISABLE_KNOWN_ISSUE_ASSERTS) // If ASSERT is called with one argument, the ASSERT_SELECTOR will return // _ASSERT_ORIGINAL and this will call _ASSERT_ORIGINAL(__VA_ARGS__). @@ -134,6 +165,42 @@ void invokeDebugAssertionFailureRecordAction_ForAssertMacroUseOnly(); abort(); \ } while (false) +#if !defined(NDEBUG) +#define ENVOY_BUG_ACTION abort() +#else +#define ENVOY_BUG_ACTION Envoy::Assert::invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly() +#endif + +// These macros are needed to stringify __LINE__ correctly. +#define STRINGIFY(X) #X +#define TOSTRING(X) STRINGIFY(X) + +// CONDITION_STR is needed to prevent macros in condition from being expected, which obfuscates +// the logged failure, e.g., "EAGAIN" vs "11". +// ENVOY_BUG logging and actions are invoked only on power-of-two instances per log line. +#define _ENVOY_BUG_IMPL(CONDITION, CONDITION_STR, ACTION, DETAILS) \ + do { \ + if (!(CONDITION) && Envoy::Assert::shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly( \ + __FILE__ ":" TOSTRING(__LINE__))) { \ + const std::string& details = (DETAILS); \ + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::envoy_bug), error, \ + "envoy bug failure: {}.{}{}", CONDITION_STR, \ + details.empty() ? "" : " Details: ", details); \ + ACTION; \ + } \ + } while (false) + +#define _ENVOY_BUG_VERBOSE(X, Y) _ENVOY_BUG_IMPL(X, #X, ENVOY_BUG_ACTION, Y) + +/** + * Indicate a failure condition that should never be met in normal circumstances. In contrast + * with ASSERT, an ENVOY_BUG is compiled in release mode. If a failure condition is met in release + * mode, it is logged and a stat is incremented with exponential back-off per ENVOY_BUG. In debug + * mode, it will crash if the condition is not met. ENVOY_BUG must be called with two arguments for + * verbose logging. + */ +#define ENVOY_BUG(...) _ENVOY_BUG_VERBOSE(__VA_ARGS__) + // NOT_IMPLEMENTED_GCOVR_EXCL_LINE is for overridden functions that are expressly not implemented. // The macro name includes "GCOVR_EXCL_LINE" to exclude the macro's usage from code coverage // reports. diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 3b2fd61db5bf..5cbc11e15a9b 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -34,6 +34,7 @@ namespace Logger { FUNCTION(conn_handler) \ FUNCTION(decompression) \ FUNCTION(dubbo) \ + FUNCTION(envoy_bug) \ FUNCTION(ext_authz) \ FUNCTION(rocketmq) \ FUNCTION(file) \ diff --git a/source/server/server.cc b/source/server/server.cc index 5b201c6c1e4c..2f75353c64eb 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -339,6 +339,8 @@ void InstanceImpl::initialize(const Options& options, assert_action_registration_ = Assert::setDebugAssertionFailureRecordAction( [this]() { server_stats_->debug_assertion_failures_.inc(); }); + envoy_bug_action_registration_ = Assert::setEnvoyBugFailureRecordAction( + [this]() { server_stats_->envoy_bug_failures_.inc(); }); InstanceImpl::failHealthcheck(false); diff --git a/source/server/server.h b/source/server/server.h index b92dd5c96e37..b41fafbca8b3 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -54,6 +54,7 @@ namespace Server { */ #define ALL_SERVER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(debug_assertion_failures) \ + COUNTER(envoy_bug_failures) \ COUNTER(dynamic_unknown_fields) \ COUNTER(static_unknown_fields) \ GAUGE(concurrency, NeverImport) \ @@ -318,6 +319,7 @@ class InstanceImpl final : Logger::Loggable, Stats::StoreRoot& stats_store_; std::unique_ptr server_stats_; Assert::ActionRegistrationPtr assert_action_registration_; + Assert::ActionRegistrationPtr envoy_bug_action_registration_; ThreadLocal::Instance& thread_local_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; diff --git a/test/common/common/assert_test.cc b/test/common/common/assert_test.cc index 44d65b495766..ec9e96f45792 100644 --- a/test/common/common/assert_test.cc +++ b/test/common/common/assert_test.cc @@ -42,4 +42,32 @@ TEST(AssertDeathTest, VariousLogs) { EXPECT_EQ(expected_counted_failures, assert_fail_count); } +TEST(EnvoyBugDeathTest, VariousLogs) { + int envoy_bug_fail_count = 0; + // ENVOY_BUG actions only occur on power of two counts. + auto envoy_bug_action_registration = + Assert::setEnvoyBugFailureRecordAction([&]() { envoy_bug_fail_count++; }); + +#ifndef NDEBUG + EXPECT_DEATH({ ENVOY_BUG(false, ""); }, ".*envoy bug failure: false.*"); + EXPECT_DEATH({ ENVOY_BUG(false, ""); }, ".*envoy bug failure: false.*"); + EXPECT_DEATH({ ENVOY_BUG(false, "With some logs"); }, + ".*envoy bug failure: false. Details: With some logs.*"); + EXPECT_EQ(0, envoy_bug_fail_count); +#else + // Same log lines trigger exponential back-off. + for (int i = 0; i < 4; i++) { + ENVOY_BUG(false, ""); + } + // 3 counts because 1st, 2nd, and 4th instances are powers of 2. + EXPECT_EQ(3, envoy_bug_fail_count); + + // Different log lines have separate counters for exponential back-off. + EXPECT_LOG_CONTAINS("error", "envoy bug failure: false", ENVOY_BUG(false, "")); + EXPECT_LOG_CONTAINS("error", "envoy bug failure: false. Details: With some logs", + ENVOY_BUG(false, "With some logs")); + EXPECT_EQ(5, envoy_bug_fail_count); +#endif +} + } // namespace Envoy diff --git a/test/server/server_test.cc b/test/server/server_test.cc index d4ea4ec91361..8f99b100ab55 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -474,7 +474,22 @@ TEST_P(ServerInstanceImplTest, Stats) { EXPECT_EQ(2L, TestUtility::findGauge(stats_store_, "server.concurrency")->value()); EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, "server.hot_restart_epoch")->value()); -// This stat only works in this configuration. +// The ENVOY_BUG stat works in release mode. +#if defined(NDEBUG) + // Test exponential back-off on a fixed line ENVOY_BUG. + for (int i = 0; i < 16; i++) { + ENVOY_BUG(false, ""); + } + EXPECT_EQ(5L, TestUtility::findCounter(stats_store_, "server.envoy_bug_failures")->value()); + // Another ENVOY_BUG increments the counter. + ENVOY_BUG(false, "Testing envoy bug assertion failure detection in release build."); + EXPECT_EQ(6L, TestUtility::findCounter(stats_store_, "server.envoy_bug_failures")->value()); +#else + // The ENVOY_BUG macro aborts in debug mode. + EXPECT_DEATH(ENVOY_BUG(false, ""), ""); +#endif + +// The ASSERT stat only works in this configuration. #if defined(NDEBUG) && defined(ENVOY_LOG_DEBUG_ASSERT_IN_RELEASE) ASSERT(false, "Testing debug assertion failure detection in release build."); EXPECT_EQ(1L, TestUtility::findCounter(stats_store_, "server.debug_assertion_failures")->value()); From 504d78bac735e03e733adea6475d31702334e5e4 Mon Sep 17 00:00:00 2001 From: phlax Date: Sun, 28 Jun 2020 03:55:05 +0100 Subject: [PATCH 476/909] Unroot docker examples and fix stdout permissions in container (#11523) Signed-off-by: Ryan Northey --- ci/docker-entrypoint.sh | 2 ++ docs/root/start/sandboxes/cors.rst | 12 ++++++------ docs/root/start/sandboxes/csrf.rst | 8 ++++---- docs/root/start/sandboxes/front_proxy.rst | 18 +++++++++--------- .../start/sandboxes/jaeger_native_tracing.rst | 8 ++++---- docs/root/start/sandboxes/jaeger_tracing.rst | 8 ++++---- docs/root/start/sandboxes/lua.rst | 2 +- docs/root/start/sandboxes/zipkin_tracing.rst | 8 ++++---- docs/root/start/start.rst | 12 ++++++++++++ examples/cors/backend/docker-compose.yaml | 6 +++--- examples/cors/backend/front-envoy.yaml | 4 ++-- examples/cors/backend/service-envoy.yaml | 2 +- examples/cors/frontend/docker-compose.yaml | 6 +++--- examples/cors/frontend/front-envoy.yaml | 4 ++-- examples/cors/frontend/service-envoy.yaml | 2 +- examples/csrf/crosssite/docker-compose.yml | 6 +++--- examples/csrf/crosssite/front-envoy.yaml | 4 ++-- examples/csrf/samesite/docker-compose.yml | 6 +++--- examples/csrf/samesite/front-envoy.yaml | 4 ++-- examples/csrf/service-envoy.yaml | 2 +- examples/fault-injection/docker-compose.yaml | 3 --- examples/front-proxy/docker-compose.yaml | 8 ++++---- examples/front-proxy/front-envoy.yaml | 6 +++--- examples/front-proxy/service-envoy.yaml | 2 +- .../jaeger-native-tracing/docker-compose.yaml | 8 ++++---- .../front-envoy-jaeger.yaml | 4 ++-- .../service1-envoy-jaeger.yaml | 4 ++-- .../service2-envoy-jaeger.yaml | 2 +- examples/jaeger-tracing/docker-compose.yaml | 8 ++++---- .../jaeger-tracing/front-envoy-jaeger.yaml | 4 ++-- .../jaeger-tracing/service1-envoy-jaeger.yaml | 4 ++-- .../jaeger-tracing/service2-envoy-jaeger.yaml | 2 +- .../load-reporting-service/docker-compose.yaml | 8 +++++--- examples/lua/docker-compose.yaml | 4 ++-- examples/lua/envoy.yaml | 6 +++--- examples/zipkin-tracing/docker-compose.yaml | 8 ++++---- .../zipkin-tracing/front-envoy-zipkin.yaml | 4 ++-- .../zipkin-tracing/service1-envoy-zipkin.yaml | 4 ++-- .../zipkin-tracing/service2-envoy-zipkin.yaml | 2 +- 39 files changed, 114 insertions(+), 101 deletions(-) diff --git a/ci/docker-entrypoint.sh b/ci/docker-entrypoint.sh index 10b78e74c1d7..677e617e9fce 100755 --- a/ci/docker-entrypoint.sh +++ b/ci/docker-entrypoint.sh @@ -20,6 +20,8 @@ if [ "$ENVOY_UID" != "0" ]; then if [ -n "$ENVOY_GID" ]; then groupmod -g "$ENVOY_GID" envoy fi + # Ensure the envoy user is able to write to container logs + chown envoy:envoy /dev/stdout /dev/stderr su-exec envoy "${@}" else exec "${@}" diff --git a/docs/root/start/sandboxes/cors.rst b/docs/root/start/sandboxes/cors.rst index 3225cb0e81f5..8e3ac24996ee 100644 --- a/docs/root/start/sandboxes/cors.rst +++ b/docs/root/start/sandboxes/cors.rst @@ -53,9 +53,9 @@ Terminal 1 $ docker-compose ps Name Command State Ports - ---------------------------------------------------------------------------------------------------------------------------- - frontend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - frontend_frontend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + ------------------------------------------------------------------------------------------------------------------------------ + frontend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + frontend_frontend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp Terminal 2 @@ -67,9 +67,9 @@ Terminal 2 $ docker-compose ps Name Command State Ports - -------------------------------------------------------------------------------------------------------------------------- - backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->80/tcp, 0.0.0.0:8003->8001/tcp + ---------------------------------------------------------------------------------------------------------------------------- + backend_backend-service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + backend_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp **Step 3: Test Envoy's CORS capabilities** diff --git a/docs/root/start/sandboxes/csrf.rst b/docs/root/start/sandboxes/csrf.rst index 5b6defcae9e3..66268dd1e50a 100644 --- a/docs/root/start/sandboxes/csrf.rst +++ b/docs/root/start/sandboxes/csrf.rst @@ -55,8 +55,8 @@ Terminal 1 (samesite) Name Command State Ports ---------------------------------------------------------------------------------------------------------------------- - samesite_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - samesite_service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + samesite_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + samesite_service_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp Terminal 2 (crosssite) @@ -69,8 +69,8 @@ Terminal 2 (crosssite) Name Command State Ports ---------------------------------------------------------------------------------------------------------------------- - crosssite_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:8002->80/tcp, 0.0.0.0:8003->8001/tcp - crosssite_service_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 80/tcp + crosssite_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 0.0.0.0:8002->8000/tcp, 0.0.0.0:8003->8001/tcp + crosssite_service_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 8000/tcp **Step 3: Test Envoy's CSRF capabilities** diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index df191abb2518..d9c359fa37fb 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -15,7 +15,7 @@ Below you can see a graphic showing the docker compose deployment: :width: 100% All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on -the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose +the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/front-proxy/docker-compose.yaml`). Moreover, notice that all traffic routed by the front Envoy to the service containers is actually routed to the service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service @@ -49,9 +49,9 @@ or ``git clone https://github.com/envoyproxy/envoy.git``:: Name Command State Ports -------------------------------------------------------------------------------------------------------------------------- - front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp **Step 3: Test Envoy's routing capabilities** @@ -174,13 +174,13 @@ can use ``docker-compose exec /bin/bash``. For example we can enter the ``front-envoy`` container, and ``curl`` for services locally:: $ docker-compose exec front-envoy /bin/bash - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8000/service/1 Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8000/service/1 Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - root@81288499f9d7:/# curl localhost:80/service/1 + root@81288499f9d7:/# curl localhost:8000/service/1 Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - root@81288499f9d7:/# curl localhost:80/service/2 + root@81288499f9d7:/# curl localhost:8000/service/2 Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 **Step 6: enter containers and curl admin** @@ -227,7 +227,7 @@ statistics. For example inside ``frontenvoy`` we can get:: "uptime_current_epoch": "401s", "uptime_all_epochs": "401s" } - + .. code-block:: text root@e654c2c83277:/# curl localhost:8001/stats diff --git a/docs/root/start/sandboxes/jaeger_native_tracing.rst b/docs/root/start/sandboxes/jaeger_native_tracing.rst index 07193e03f974..5c41560d96c4 100644 --- a/docs/root/start/sandboxes/jaeger_native_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_native_tracing.rst @@ -21,7 +21,7 @@ The three containers will be deployed inside a virtual network called ``envoymes only works on x86-64). All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/jaeger-native-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-native-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated @@ -59,10 +59,10 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - jaeger-native-tracing_front-envoy_1 /start-front.sh Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-native-tracing_front-envoy_1 /start-front.sh Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp jaeger-native-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp - jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 80/tcp - jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 80/tcp + jaeger-native-tracing_service1_1 /start-service.sh Up 10000/tcp, 8000/tcp + jaeger-native-tracing_service2_1 /start-service.sh Up 10000/tcp, 8000/tcp **Step 2: Generate some load** diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst index bad25e5bd26f..ce73e6679ddb 100644 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -10,7 +10,7 @@ service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated @@ -48,10 +48,10 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - jaeger-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + jaeger-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp jaeger-tracing_jaeger_1 /go/bin/all-in-one-linux - ... Up 14250/tcp, 14268/tcp, 0.0.0.0:16686->16686/tcp, 5775/udp, 5778/tcp, 6831/udp, 6832/udp, 0.0.0.0:9411->9411/tcp - jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + jaeger-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + jaeger-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp **Step 2: Generate some load** diff --git a/docs/root/start/sandboxes/lua.rst b/docs/root/start/sandboxes/lua.rst index 874711617aa1..3a9b5c75cf91 100644 --- a/docs/root/start/sandboxes/lua.rst +++ b/docs/root/start/sandboxes/lua.rst @@ -37,7 +37,7 @@ Terminal 1 Name Command State Ports -------------------------------------------------------------------------------------------------------------------- - lua_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + lua_proxy_1 /docker-entrypoint.sh /bin Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp lua_web_service_1 node ./index.js Up 0.0.0.0:8080->80/tcp **Step 3: Send a request to the service** diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst index c64ce82e9f39..649e78bffacd 100644 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -10,7 +10,7 @@ service1 makes an API call to service2 before returning a response. The three containers will be deployed inside a virtual network called ``envoymesh``. All incoming requests are routed via the front Envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +sitting on the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yaml`). Notice that all Envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated @@ -48,9 +48,9 @@ To build this sandbox example, and start the example apps run the following comm Name Command State Ports ----------------------------------------------------------------------------------------------------------------------------- - zipkin-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - zipkin-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp - zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 80/tcp + zipkin-tracing_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + zipkin-tracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + zipkin-tracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp zipkin-tracing_zipkin_1 /busybox/sh run.sh Up 9410/tcp, 0.0.0.0:9411->9411/tcp **Step 2: Generate some load** diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 88f9742c260c..6c1bb45b33ca 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -148,6 +148,18 @@ by using a volume. volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml +By default the Docker image will run as the ``envoy`` user created at build time. + +The ``uid`` and ``gid`` of this user can be set at runtime using the ``ENVOY_UID`` and ``ENVOY_GID`` +environment variables. This can be done, for example, on the Docker command line: + + $ docker run -d --name envoy -e ENVOY_UID=777 -e ENVOY_GID=777 -p 9901:9901 -p 10000:10000 envoy:v1 + +This can be useful if you wish to restrict or provide access to ``unix`` sockets inside the container, or +for controlling access to an ``envoy`` socket from outside of the container. + +If you wish to run the container as the ``root`` user you can set ``ENVOY_UID`` to ``0``. + Sandboxes --------- diff --git a/examples/cors/backend/docker-compose.yaml b/examples/cors/backend/docker-compose.yaml index 987b4ef157ba..35427012b465 100644 --- a/examples/cors/backend/docker-compose.yaml +++ b/examples/cors/backend/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8002:80" + - "8002:8000" - "8003:8001" backend-service: @@ -27,7 +27,7 @@ services: aliases: - backendservice expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index d11e7f11a629..0343ac098495 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -85,7 +85,7 @@ static_resources: address: socket_address: address: backendservice - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/backend/service-envoy.yaml b/examples/cors/backend/service-envoy.yaml index 49be77b85953..c49e69ccd002 100644 --- a/examples/cors/backend/service-envoy.yaml +++ b/examples/cors/backend/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/cors/frontend/docker-compose.yaml b/examples/cors/frontend/docker-compose.yaml index 96b19d222e43..5b230317cb7d 100644 --- a/examples/cors/frontend/docker-compose.yaml +++ b/examples/cors/frontend/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" frontend-service: @@ -27,7 +27,7 @@ services: aliases: - frontendservice expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index 31174a20174d..07da44803b6a 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -46,7 +46,7 @@ static_resources: address: socket_address: address: frontendservice - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/cors/frontend/service-envoy.yaml b/examples/cors/frontend/service-envoy.yaml index 49be77b85953..c49e69ccd002 100644 --- a/examples/cors/frontend/service-envoy.yaml +++ b/examples/cors/frontend/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/csrf/crosssite/docker-compose.yml b/examples/csrf/crosssite/docker-compose.yml index 5d25ea0d6d1a..31e2df957979 100644 --- a/examples/csrf/crosssite/docker-compose.yml +++ b/examples/csrf/crosssite/docker-compose.yml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8002:80" + - "8002:8000" - "8003:8001" service: @@ -27,7 +27,7 @@ services: aliases: - service expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/csrf/crosssite/front-envoy.yaml b/examples/csrf/crosssite/front-envoy.yaml index 56449447686d..ea4b7a5f3316 100644 --- a/examples/csrf/crosssite/front-envoy.yaml +++ b/examples/csrf/crosssite/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -44,7 +44,7 @@ static_resources: address: socket_address: address: service - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/csrf/samesite/docker-compose.yml b/examples/csrf/samesite/docker-compose.yml index 490a4896417f..45ef76f05b7f 100644 --- a/examples/csrf/samesite/docker-compose.yml +++ b/examples/csrf/samesite/docker-compose.yml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service: @@ -27,7 +27,7 @@ services: aliases: - service expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/csrf/samesite/front-envoy.yaml b/examples/csrf/samesite/front-envoy.yaml index 479f4d748512..e47aff2ec0f8 100644 --- a/examples/csrf/samesite/front-envoy.yaml +++ b/examples/csrf/samesite/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -107,7 +107,7 @@ static_resources: address: socket_address: address: service - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/csrf/service-envoy.yaml b/examples/csrf/service-envoy.yaml index 49be77b85953..c49e69ccd002 100644 --- a/examples/csrf/service-envoy.yaml +++ b/examples/csrf/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/fault-injection/docker-compose.yaml b/examples/fault-injection/docker-compose.yaml index 97680c848450..fe8ec0c9d68f 100644 --- a/examples/fault-injection/docker-compose.yaml +++ b/examples/fault-injection/docker-compose.yaml @@ -13,9 +13,6 @@ services: ports: - 9211:9211 - 9901:9901 - # Run Envoy as root to grant access to /dev/stdout - environment: - ENVOY_UID: 0 backend: image: kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb networks: diff --git a/examples/front-proxy/docker-compose.yaml b/examples/front-proxy/docker-compose.yaml index 34491c3636ce..4e5f4590245b 100644 --- a/examples/front-proxy/docker-compose.yaml +++ b/examples/front-proxy/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service1: @@ -29,7 +29,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +44,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" networks: envoymesh: {} diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index a612487c8d6b..35747a6d10d3 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -43,7 +43,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: service2 connect_timeout: 0.25s type: strict_dns @@ -57,7 +57,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/front-proxy/service-envoy.yaml b/examples/front-proxy/service-envoy.yaml index df0dfd199a97..67ac03d7287f 100644 --- a/examples/front-proxy/service-envoy.yaml +++ b/examples/front-proxy/service-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager diff --git a/examples/jaeger-native-tracing/docker-compose.yaml b/examples/jaeger-native-tracing/docker-compose.yaml index 3321e110cbb8..ca8fccb3d52b 100644 --- a/examples/jaeger-native-tracing/docker-compose.yaml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -13,10 +13,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" dns: - 8.8.8.8 @@ -38,7 +38,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" dns: - 8.8.8.8 - 8.8.4.4 @@ -59,7 +59,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" dns: - 8.8.8.8 - 8.8.4.4 diff --git a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml index 79e82af2f8aa..b2f3430a3aaa 100644 --- a/examples/jaeger-native-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/front-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -64,7 +64,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml index d64a6ea33af1..9be0a80c1fe1 100644 --- a/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service1-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -103,7 +103,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 admin: access_log_path: "/dev/null" address: diff --git a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml index b04970c9738f..88d4f54ec594 100644 --- a/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-native-tracing/service2-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: diff --git a/examples/jaeger-tracing/docker-compose.yaml b/examples/jaeger-tracing/docker-compose.yaml index 6c353fada6f4..94fbfb62bfa1 100644 --- a/examples/jaeger-tracing/docker-compose.yaml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service1: @@ -29,7 +29,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +44,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" jaeger: image: jaegertracing/all-in-one diff --git a/examples/jaeger-tracing/front-envoy-jaeger.yaml b/examples/jaeger-tracing/front-envoy-jaeger.yaml index f23bffdff5bf..07c157599499 100644 --- a/examples/jaeger-tracing/front-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/front-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -53,7 +53,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: jaeger connect_timeout: 1s type: strict_dns diff --git a/examples/jaeger-tracing/service1-envoy-jaeger.yaml b/examples/jaeger-tracing/service1-envoy-jaeger.yaml index f5ff3b046132..b40ec8b8f1c9 100644 --- a/examples/jaeger-tracing/service1-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service1-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -101,7 +101,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 - name: jaeger connect_timeout: 1s type: strict_dns diff --git a/examples/jaeger-tracing/service2-envoy-jaeger.yaml b/examples/jaeger-tracing/service2-envoy-jaeger.yaml index 24a38c9fb03e..5b6a7d93b65a 100644 --- a/examples/jaeger-tracing/service2-envoy-jaeger.yaml +++ b/examples/jaeger-tracing/service2-envoy-jaeger.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: diff --git a/examples/load-reporting-service/docker-compose.yaml b/examples/load-reporting-service/docker-compose.yaml index 4ed40f3a33fa..ca7f40334dcf 100644 --- a/examples/load-reporting-service/docker-compose.yaml +++ b/examples/load-reporting-service/docker-compose.yaml @@ -7,6 +7,8 @@ services: dockerfile: Dockerfile-http-server volumes: - ./service-envoy-w-lrs.yaml:/etc/service-envoy-w-lrs.yaml + environment: + ENVOY_UID: 0 networks: envoymesh: aliases: @@ -17,7 +19,7 @@ services: ports: - "80-81:80" - "8081-8082:8081" - + lrs_server: build: context: . @@ -32,6 +34,6 @@ services: - "18000" ports: - "18000:18000" - + networks: - envoymesh: {} \ No newline at end of file + envoymesh: {} diff --git a/examples/lua/docker-compose.yaml b/examples/lua/docker-compose.yaml index 2ee4860cfc48..716ae8f6c4ff 100644 --- a/examples/lua/docker-compose.yaml +++ b/examples/lua/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" web_service: diff --git a/examples/lua/envoy.yaml b/examples/lua/envoy.yaml index ab154f528d47..a106bc2feee0 100644 --- a/examples/lua/envoy.yaml +++ b/examples/lua/envoy.yaml @@ -4,7 +4,7 @@ static_resources: address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -21,7 +21,7 @@ static_resources: routes: - match: prefix: "/" - route: + route: cluster: web_service http_filters: - name: envoy.filters.http.lua @@ -29,7 +29,7 @@ static_resources: "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua inline_code: | local mylibrary = require("lib.mylibrary") - + function envoy_on_request(request_handle) request_handle:headers():add("foo", mylibrary.foobar()) end diff --git a/examples/zipkin-tracing/docker-compose.yaml b/examples/zipkin-tracing/docker-compose.yaml index 132e75279305..488ccccf1bb6 100644 --- a/examples/zipkin-tracing/docker-compose.yaml +++ b/examples/zipkin-tracing/docker-compose.yaml @@ -10,10 +10,10 @@ services: networks: - envoymesh expose: - - "80" + - "8000" - "8001" ports: - - "8000:80" + - "8000:8000" - "8001:8001" service1: @@ -29,7 +29,7 @@ services: environment: - SERVICE_NAME=1 expose: - - "80" + - "8000" service2: build: @@ -44,7 +44,7 @@ services: environment: - SERVICE_NAME=2 expose: - - "80" + - "8000" zipkin: image: openzipkin/zipkin diff --git a/examples/zipkin-tracing/front-envoy-zipkin.yaml b/examples/zipkin-tracing/front-envoy-zipkin.yaml index 8af5504e6d3c..41e864552c3c 100644 --- a/examples/zipkin-tracing/front-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/front-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: OUTBOUND filter_chains: - filters: @@ -58,7 +58,7 @@ static_resources: address: socket_address: address: service1 - port_value: 80 + port_value: 8000 - name: zipkin connect_timeout: 1s type: strict_dns diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.yaml b/examples/zipkin-tracing/service1-envoy-zipkin.yaml index 99a9a2df207e..fe7318366db6 100644 --- a/examples/zipkin-tracing/service1-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service1-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: @@ -99,7 +99,7 @@ static_resources: address: socket_address: address: service2 - port_value: 80 + port_value: 8000 - name: zipkin connect_timeout: 1s type: strict_dns diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.yaml b/examples/zipkin-tracing/service2-envoy-zipkin.yaml index 27b7e322149e..ceebbcf60917 100644 --- a/examples/zipkin-tracing/service2-envoy-zipkin.yaml +++ b/examples/zipkin-tracing/service2-envoy-zipkin.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 80 + port_value: 8000 traffic_direction: INBOUND filter_chains: - filters: From 9cceda308259f563925535d20d0425a99d18cfcf Mon Sep 17 00:00:00 2001 From: asraa Date: Sat, 27 Jun 2020 22:55:52 -0400 Subject: [PATCH 477/909] Open [http] split out codec stats in to their own library (#11771) No-op, but moves codec stats to their own library so that when codec splits for exception removal, the diffs can be minimal and the stats can be shared between codecs and their legacy clones. This will minimize the golden diff in fix_format for less confusion during the deprecation period. Signed-off-by: Asra Ali --- source/common/http/conn_manager_utility.h | 4 +- source/common/http/http1/BUILD | 16 +++++-- source/common/http/http1/codec_impl.h | 26 +--------- source/common/http/http1/codec_stats.h | 38 +++++++++++++++ source/common/http/http2/BUILD | 13 ++++- source/common/http/http2/codec_impl.h | 35 +------------- source/common/http/http2/codec_stats.h | 48 +++++++++++++++++++ source/common/upstream/BUILD | 8 ++-- source/common/upstream/upstream_impl.cc | 4 +- source/common/upstream/upstream_impl.h | 4 +- .../network/http_connection_manager/config.h | 4 +- source/server/admin/admin.h | 4 +- .../filters/listener/http_inspector/BUILD | 1 + .../http_inspector/http_inspector_test.cc | 1 + .../filters/listener/tls_inspector/BUILD | 2 + .../tls_inspector/tls_inspector_benchmark.cc | 1 + .../tls_inspector/tls_inspector_test.cc | 1 + test/mocks/upstream/BUILD | 4 +- test/mocks/upstream/cluster_info.h | 4 +- 19 files changed, 135 insertions(+), 83 deletions(-) create mode 100644 source/common/http/http1/codec_stats.h create mode 100644 source/common/http/http2/codec_stats.h diff --git a/source/common/http/conn_manager_utility.h b/source/common/http/conn_manager_utility.h index fd5f2098be7c..b46a98c2f0a7 100644 --- a/source/common/http/conn_manager_utility.h +++ b/source/common/http/conn_manager_utility.h @@ -7,8 +7,8 @@ #include "envoy/network/connection.h" #include "common/http/conn_manager_impl.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" namespace Envoy { namespace Http { diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index bb7bdd0e8ba5..042870491232 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -14,23 +14,32 @@ envoy_cc_library( hdrs = ["header_formatter.h"], ) +envoy_cc_library( + name = "codec_stats_lib", + hdrs = ["codec_stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:thread_lib", + ], +) + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], external_deps = ["http_parser"], deps = [ + ":codec_stats_lib", + ":header_formatter_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/http:codec_interface", "//include/envoy/http:header_map_interface", "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", "//source/common/common:statusor_lib", - "//source/common/common:thread_lib", "//source/common/common:utility_lib", "//source/common/grpc:common_lib", "//source/common/http:codec_helper_lib", @@ -41,7 +50,6 @@ envoy_cc_library( "//source/common/http:headers_lib", "//source/common/http:status_lib", "//source/common/http:utility_lib", - "//source/common/http/http1:header_formatter_lib", "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index dab1b41de235..44e4282742c9 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -15,10 +15,10 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/assert.h" #include "common/common/statusor.h" -#include "common/common/thread.h" #include "common/http/codec_helper.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" +#include "common/http/http1/codec_stats.h" #include "common/http/http1/header_formatter.h" #include "common/http/status.h" @@ -26,30 +26,6 @@ namespace Envoy { namespace Http { namespace Http1 { -/** - * All stats for the HTTP/1 codec. @see stats_macros.h - */ -#define ALL_HTTP1_CODEC_STATS(COUNTER) \ - COUNTER(dropped_headers_with_underscores) \ - COUNTER(metadata_not_supported_error) \ - COUNTER(requests_rejected_with_underscores_in_headers) \ - COUNTER(response_flood) - -/** - * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h - */ -struct CodecStats { - using AtomicPtr = Thread::AtomicPtr; - - static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { - return *ptr.get([&scope]() -> CodecStats* { - return new CodecStats{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http1."))}; - }); - } - - ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT) -}; - class ConnectionImpl; /** diff --git a/source/common/http/http1/codec_stats.h b/source/common/http/http1/codec_stats.h new file mode 100644 index 000000000000..ac296522adc2 --- /dev/null +++ b/source/common/http/http1/codec_stats.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/thread.h" + +namespace Envoy { +namespace Http { +namespace Http1 { + +/** + * All stats for the HTTP/1 codec. @see stats_macros.h + */ +#define ALL_HTTP1_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ + COUNTER(metadata_not_supported_error) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ + COUNTER(response_flood) + +/** + * Wrapper struct for the HTTP/1 codec stats. @see stats_macros.h + */ +struct CodecStats { + using AtomicPtr = Thread::AtomicPtr; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP1_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http1."))}; + }); + } + + ALL_HTTP1_CODEC_STATS(GENERATE_COUNTER_STRUCT) +}; + +} // namespace Http1 +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index a27a6d85282d..dd0333ffa847 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -8,6 +8,16 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_library( + name = "codec_stats_lib", + hdrs = ["codec_stats.h"], + deps = [ + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + "//source/common/common:thread_lib", + ], +) + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], @@ -19,6 +29,7 @@ envoy_cc_library( "abseil_algorithm", ], deps = [ + ":codec_stats_lib", ":metadata_decoder_lib", ":metadata_encoder_lib", "//include/envoy/event:deferred_deletable", @@ -28,14 +39,12 @@ envoy_cc_library( "//include/envoy/http:header_map_interface", "//include/envoy/network:connection_interface", "//include/envoy/stats:stats_interface", - "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", - "//source/common/common:thread_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", "//source/common/http:codes_lib", diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 7f8e26a31d34..bd2f6ecdc3aa 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -19,6 +19,7 @@ #include "common/common/thread.h" #include "common/http/codec_helper.h" #include "common/http/header_map_impl.h" +#include "common/http/http2/codec_stats.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" #include "common/http/status.h" @@ -35,40 +36,6 @@ namespace Http2 { // differentiate between HTTP/1 and HTTP/2. const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; -/** - * All stats for the HTTP/2 codec. @see stats_macros.h - */ -#define ALL_HTTP2_CODEC_STATS(COUNTER) \ - COUNTER(dropped_headers_with_underscores) \ - COUNTER(header_overflow) \ - COUNTER(headers_cb_no_stream) \ - COUNTER(inbound_empty_frames_flood) \ - COUNTER(inbound_priority_frames_flood) \ - COUNTER(inbound_window_update_frames_flood) \ - COUNTER(outbound_control_flood) \ - COUNTER(outbound_flood) \ - COUNTER(requests_rejected_with_underscores_in_headers) \ - COUNTER(rx_messaging_error) \ - COUNTER(rx_reset) \ - COUNTER(too_many_header_frames) \ - COUNTER(trailers) \ - COUNTER(tx_reset) - -/** - * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h - */ -struct CodecStats { - using AtomicPtr = Thread::AtomicPtr; - - static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { - return *ptr.get([&scope]() -> CodecStats* { - return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."))}; - }); - } - - ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) -}; - class Utility { public: /** diff --git a/source/common/http/http2/codec_stats.h b/source/common/http/http2/codec_stats.h new file mode 100644 index 000000000000..9aaeab254eb4 --- /dev/null +++ b/source/common/http/http2/codec_stats.h @@ -0,0 +1,48 @@ +#pragma once + +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/common/thread.h" + +namespace Envoy { +namespace Http { +namespace Http2 { + +/** + * All stats for the HTTP/2 codec. @see stats_macros.h + */ +#define ALL_HTTP2_CODEC_STATS(COUNTER) \ + COUNTER(dropped_headers_with_underscores) \ + COUNTER(header_overflow) \ + COUNTER(headers_cb_no_stream) \ + COUNTER(inbound_empty_frames_flood) \ + COUNTER(inbound_priority_frames_flood) \ + COUNTER(inbound_window_update_frames_flood) \ + COUNTER(outbound_control_flood) \ + COUNTER(outbound_flood) \ + COUNTER(requests_rejected_with_underscores_in_headers) \ + COUNTER(rx_messaging_error) \ + COUNTER(rx_reset) \ + COUNTER(too_many_header_frames) \ + COUNTER(trailers) \ + COUNTER(tx_reset) + +/** + * Wrapper struct for the HTTP/2 codec stats. @see stats_macros.h + */ +struct CodecStats { + using AtomicPtr = Thread::AtomicPtr; + + static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { + return *ptr.get([&scope]() -> CodecStats* { + return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."))}; + }); + } + + ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) +}; + +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index bdbe7c309f6d..081a9b43d334 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -429,8 +429,8 @@ envoy_cc_library( "//source/common/common:enum_to_int", "//source/common/common:thread_lib", "//source/common/common:utility_lib", - "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_lib", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", "//source/common/network:resolver_lib", @@ -513,8 +513,8 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/config:metadata_lib", "//source/common/config:well_known_names", - "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_lib", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/init:manager_lib", "//source/common/shared_pool:shared_pool_lib", "//source/common/stats:isolated_store_lib", diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 65321d0b3e1d..e69f003e1f6a 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -31,8 +31,8 @@ #include "common/common/fmt.h" #include "common/common/utility.h" #include "common/config/utility.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/http/utility.h" #include "common/network/address_impl.h" #include "common/network/resolver_impl.h" diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 9e46012aaa58..135f01c9ed84 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -41,8 +41,8 @@ #include "common/common/thread.h" #include "common/config/metadata.h" #include "common/config/well_known_names.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/init/manager_impl.h" #include "common/network/utility.h" #include "common/shared_pool/shared_pool.h" diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index ca76b80a593a..f10ee8ccee48 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -18,8 +18,8 @@ #include "common/common/logger.h" #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/json/json_loader.h" #include "common/local_reply/local_reply.h" #include "common/router/rds_impl.h" diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 51818a6a714e..d491fb4443df 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -29,8 +29,8 @@ #include "common/http/conn_manager_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/default_server_string.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/http/request_id_extension_impl.h" #include "common/http/utility.h" #include "common/network/connection_balancer_impl.h" diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index 944ec4eff0d1..defcc0d8c7da 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -18,6 +18,7 @@ envoy_extension_cc_test( tags = ["fails_on_windows"], deps = [ "//source/common/common:hex_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/listener/http_inspector:http_inspector_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 576496ed1061..a6638892f26f 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -1,4 +1,5 @@ #include "common/common/hex.h" +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "extensions/filters/listener/http_inspector/http_inspector.h" diff --git a/test/extensions/filters/listener/tls_inspector/BUILD b/test/extensions/filters/listener/tls_inspector/BUILD index bfa8c6ebd18a..0f654911f672 100644 --- a/test/extensions/filters/listener/tls_inspector/BUILD +++ b/test/extensions/filters/listener/tls_inspector/BUILD @@ -19,6 +19,7 @@ envoy_cc_test( srcs = ["tls_inspector_test.cc"], deps = [ ":tls_utility_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/listener/tls_inspector:config", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//test/mocks/api:api_mocks", @@ -37,6 +38,7 @@ envoy_extension_cc_benchmark_binary( ], deps = [ ":tls_utility_lib", + "//source/common/http:utility_lib", "//source/common/network:listen_socket_lib", "//source/extensions/filters/listener/tls_inspector:tls_inspector_lib", "//test/mocks/api:api_mocks", diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc index 1552954e89bb..5bed094fc34c 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_benchmark.cc @@ -1,6 +1,7 @@ #include #include "common/api/os_sys_calls_impl.h" +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/listen_socket_impl.h" diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index cf7aef8aca6b..56f2e637e0fd 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -1,3 +1,4 @@ +#include "common/http/utility.h" #include "common/network/io_socket_handle_impl.h" #include "extensions/filters/listener/tls_inspector/tls_inspector.h" diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index 6e210478aa6a..d3ba248f450f 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -19,8 +19,8 @@ envoy_cc_mock( "//source/common/common:thread_lib", "//source/common/config:metadata_lib", "//source/common/http:utility_lib", - "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_lib", + "//source/common/http/http1:codec_stats_lib", + "//source/common/http/http2:codec_stats_lib", "//source/common/network:raw_buffer_socket_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index e3bce01c6282..9e210997d337 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -14,8 +14,8 @@ #include "envoy/upstream/upstream.h" #include "common/common/thread.h" -#include "common/http/http1/codec_impl.h" -#include "common/http/http2/codec_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http2/codec_stats.h" #include "common/upstream/upstream_impl.h" #include "test/mocks/runtime/mocks.h" From 498fac2a96bb15adfeed94e78a7e2b7d4c62fb51 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Sat, 27 Jun 2020 22:56:19 -0400 Subject: [PATCH 478/909] conn_pool: cleaning up a file move TODO (#11757) Signed-off-by: Alyssa Wilk --- source/common/conn_pool/BUILD | 21 ++ source/common/conn_pool/conn_pool_base.cc | 428 ++++++++++++++++++++++ source/common/conn_pool/conn_pool_base.h | 191 ++++++++++ source/common/http/BUILD | 1 + source/common/http/conn_pool_base.cc | 419 --------------------- source/common/http/conn_pool_base.h | 180 +-------- 6 files changed, 642 insertions(+), 598 deletions(-) create mode 100644 source/common/conn_pool/BUILD create mode 100644 source/common/conn_pool/conn_pool_base.cc create mode 100644 source/common/conn_pool/conn_pool_base.h diff --git a/source/common/conn_pool/BUILD b/source/common/conn_pool/BUILD new file mode 100644 index 000000000000..fafa208adc16 --- /dev/null +++ b/source/common/conn_pool/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "conn_pool_base_lib", + srcs = ["conn_pool_base.cc"], + hdrs = ["conn_pool_base.h"], + deps = [ + "//include/envoy/stats:timespan_interface", + "//source/common/common:linked_object", + "//source/common/stats:timespan_lib", + "//source/common/upstream:upstream_lib", + ], +) diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc new file mode 100644 index 000000000000..6e1be1ddc8a5 --- /dev/null +++ b/source/common/conn_pool/conn_pool_base.cc @@ -0,0 +1,428 @@ +#include "common/conn_pool/conn_pool_base.h" + +#include "common/common/assert.h" +#include "common/network/transport_socket_options_impl.h" +#include "common/runtime/runtime_features.h" +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace ConnectionPool { + +ConnPoolImplBase::ConnPoolImplBase( + Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options) + : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), + transport_socket_options_(transport_socket_options) {} + +ConnPoolImplBase::~ConnPoolImplBase() { + ASSERT(ready_clients_.empty()); + ASSERT(busy_clients_.empty()); + ASSERT(connecting_clients_.empty()); +} + +void ConnPoolImplBase::destructAllConnections() { + for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { + while (!list->empty()) { + list->front()->close(); + } + } + + // Make sure all clients are destroyed before we are destroyed. + dispatcher_.clearDeferredDeleteList(); +} + +void ConnPoolImplBase::tryCreateNewConnection() { + if (pending_requests_.size() <= connecting_request_capacity_) { + // There are already enough CONNECTING connections for the number + // of queued requests. + return; + } + + const bool can_create_connection = + host_->cluster().resourceManager(priority_).connections().canCreate(); + if (!can_create_connection) { + host_->cluster().stats().upstream_cx_overflow_.inc(); + } + // If we are at the connection circuit-breaker limit due to other upstreams having + // too many open connections, and this upstream has no connections, always create one, to + // prevent pending requests being queued to this upstream with no way to be processed. + if (can_create_connection || + (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) { + ENVOY_LOG(debug, "creating a new connection"); + ActiveClientPtr client = instantiateActiveClient(); + ASSERT(client->state_ == ActiveClient::State::CONNECTING); + ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= + client->effectiveConcurrentRequestLimit()); + connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); + client->moveIntoList(std::move(client), owningList(client->state_)); + } +} + +void ConnPoolImplBase::attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, + void* context) { + ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); + + if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { + ENVOY_LOG(debug, "max requests overflow"); + onPoolFailure(client.real_host_description_, absl::string_view(), + ConnectionPool::PoolFailureReason::Overflow, context); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + } else { + ENVOY_CONN_LOG(debug, "creating stream", client); + + client.remaining_requests_--; + if (client.remaining_requests_ == 0) { + ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", client); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); + } else if (client.numActiveRequests() + 1 >= client.concurrent_request_limit_) { + // As soon as the new request is created, the client will be maxed out. + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); + } + + num_active_requests_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + + onPoolReady(client, context); + } +} + +void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, + bool delay_attaching_request) { + ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); + ASSERT(num_active_requests_ > 0); + num_active_requests_--; + host_->stats().rq_active_.dec(); + host_->cluster().stats().upstream_rq_active_.dec(); + host_->cluster().resourceManager(priority_).requests().dec(); + if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { + // Close out the draining client if we no longer have active requests. + client.close(); + } else if (client.state_ == ActiveClient::State::BUSY) { + // A request was just ended, so we should be below the limit now. + ASSERT(client.numActiveRequests() < client.concurrent_request_limit_); + + transitionActiveClientState(client, ActiveClient::State::READY); + if (!delay_attaching_request) { + onUpstreamReady(); + } + } +} + +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(void* context) { + if (!ready_clients_.empty()) { + ActiveClient& client = *ready_clients_.front(); + ENVOY_CONN_LOG(debug, "using existing connection", client); + attachRequestToClientImpl(client, context); + return nullptr; + } + + if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { + ConnectionPool::Cancellable* pending = newPendingRequest(context); + + // This must come after newPendingRequest() because this function uses the + // length of pending_requests_ to determine if a new connection is needed. + tryCreateNewConnection(); + + return pending; + } else { + ENVOY_LOG(debug, "max pending requests overflow"); + onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, + context); + host_->cluster().stats().upstream_rq_pending_overflow_.inc(); + return nullptr; + } +} + +void ConnPoolImplBase::onUpstreamReady() { + while (!pending_requests_.empty() && !ready_clients_.empty()) { + ActiveClientPtr& client = ready_clients_.front(); + ENVOY_CONN_LOG(debug, "attaching to next request", *client); + // Pending requests are pushed onto the front, so pull from the back. + attachRequestToClient(*client, *pending_requests_.back()); + pending_requests_.pop_back(); + } +} + +std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { + switch (state) { + case ActiveClient::State::CONNECTING: + return connecting_clients_; + case ActiveClient::State::READY: + return ready_clients_; + case ActiveClient::State::BUSY: + return busy_clients_; + case ActiveClient::State::DRAINING: + return busy_clients_; + case ActiveClient::State::CLOSED: + NOT_REACHED_GCOVR_EXCL_LINE; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, + ActiveClient::State new_state) { + auto& old_list = owningList(client.state_); + auto& new_list = owningList(new_state); + client.state_ = new_state; + + // old_list and new_list can be equal when transitioning from BUSY to DRAINING. + // + // The documentation for list.splice() (which is what moveBetweenLists() calls) is + // unclear whether it is allowed for src and dst to be the same, so check here + // since it is a no-op anyways. + if (&old_list != &new_list) { + client.moveBetweenLists(old_list, new_list); + } +} + +void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { + drained_callbacks_.push_back(cb); + checkForDrained(); +} + +void ConnPoolImplBase::closeIdleConnections() { + // Create a separate list of elements to close to avoid mutate-while-iterating problems. + std::list to_close; + + for (auto& client : ready_clients_) { + if (client->numActiveRequests() == 0) { + to_close.push_back(client.get()); + } + } + + if (pending_requests_.empty()) { + for (auto& client : connecting_clients_) { + to_close.push_back(client.get()); + } + } + + for (auto& entry : to_close) { + entry->close(); + } +} + +void ConnPoolImplBase::drainConnectionsImpl() { + closeIdleConnections(); + + // closeIdleConnections() closes all connections in ready_clients_ with no active requests, + // so all remaining entries in ready_clients_ are serving requests. Move them and all entries + // in busy_clients_ to draining. + while (!ready_clients_.empty()) { + transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); + } + + // Changing busy_clients_ to DRAINING does not move them between lists, + // so use a for-loop since the list is not mutated. + ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); + for (auto& busy_client : busy_clients_) { + transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); + } +} + +void ConnPoolImplBase::checkForDrained() { + if (drained_callbacks_.empty()) { + return; + } + + closeIdleConnections(); + + if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty() && + connecting_clients_.empty()) { + ENVOY_LOG(debug, "invoking drained callbacks"); + for (const Instance::DrainedCb& cb : drained_callbacks_) { + cb(); + } + } +} + +void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, + Network::ConnectionEvent event) { + if (client.state_ == ActiveClient::State::CONNECTING) { + ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); + connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); + } + + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + // The client died. + ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", client, failure_reason); + + Envoy::Upstream::reportUpstreamCxDestroy(host_, event); + const bool incomplete_request = client.closingWithIncompleteRequest(); + if (incomplete_request) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); + } + + if (client.state_ == ActiveClient::State::CONNECTING) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + + ConnectionPool::PoolFailureReason reason; + if (client.timed_out_) { + reason = ConnectionPool::PoolFailureReason::Timeout; + } else if (event == Network::ConnectionEvent::RemoteClose) { + reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; + } else { + reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; + } + + // Raw connect failures should never happen under normal circumstances. If we have an upstream + // that is behaving badly, requests can get stuck here in the pending state. If we see a + // connect failure, we purge all pending requests so that calling code can determine what to + // do with the request. + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + purgePendingRequests(client.real_host_description_, failure_reason, reason); + } + + // We need to release our resourceManager() resources before checking below for + // whether we can create a new connection. Normally this would happen when + // client's destructor runs, but this object needs to be deferredDelete'd(), so + // this forces part of its cleanup to happen now. + client.releaseResources(); + + dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); + if (incomplete_request) { + checkForDrained(); + } + + client.state_ = ActiveClient::State::CLOSED; + + // If we have pending requests and we just lost a connection we should make a new one. + if (!pending_requests_.empty()) { + tryCreateNewConnection(); + } + } else if (event == Network::ConnectionEvent::Connected) { + client.conn_connect_ms_->complete(); + client.conn_connect_ms_.reset(); + + ASSERT(client.state_ == ActiveClient::State::CONNECTING); + transitionActiveClientState(client, ActiveClient::State::READY); + + onUpstreamReady(); + checkForDrained(); + } + + if (client.connect_timer_) { + client.connect_timer_->disableTimer(); + client.connect_timer_.reset(); + } +} + +PendingRequest::PendingRequest(ConnPoolImplBase& parent) : parent_(parent) { + parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); +} + +PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); +} + +void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { + parent_.onPendingRequestCancel(*this, policy); +} + +void ConnPoolImplBase::purgePendingRequests( + const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { + // NOTE: We move the existing pending requests to a temporary list. This is done so that + // if retry logic submits a new request to the pool, we don't fail it inline. + pending_requests_to_purge_ = std::move(pending_requests_); + while (!pending_requests_to_purge_.empty()) { + PendingRequestPtr request = + pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); + host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); + onPoolFailure(host_description, failure_reason, reason, request->context()); + } +} + +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request, + Envoy::ConnectionPool::CancelPolicy policy) { + ENVOY_LOG(debug, "cancelling pending request"); + if (!pending_requests_to_purge_.empty()) { + // If pending_requests_to_purge_ is not empty, it means that we are called from + // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests + // is down in the call stack). Remove this request from the list as it is cancelled, + // and there is no need to call its onPoolFailure callback. + request.removeFromList(pending_requests_to_purge_); + } else { + request.removeFromList(pending_requests_); + } + // There's excess capacity if + // pending_requests < connecting_request_capacity_ - capacity of most recent client. + // It's calculated below with addition instead to avoid underflow issues, overflow being + // assumed to not be a problem across the connection pool. + if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() && + (pending_requests_.size() + connecting_clients_.front()->effectiveConcurrentRequestLimit() <= + connecting_request_capacity_)) { + auto& client = *connecting_clients_.front(); + transitionActiveClientState(client, ActiveClient::State::DRAINING); + client.close(); + } + + host_->cluster().stats().upstream_rq_cancelled_.inc(); + checkForDrained(); +} + +namespace { +// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't +// have to be handled specially. +uint64_t translateZeroToUnlimited(uint64_t limit) { + return (limit != 0) ? limit : std::numeric_limits::max(); +} +} // namespace + +ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit) + : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), + concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), + connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { + conn_connect_ms_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + conn_length_ = std::make_unique( + parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); + connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); + + parent_.host_->stats().cx_total_.inc(); + parent_.host_->stats().cx_active_.inc(); + parent_.host_->cluster().stats().upstream_cx_total_.inc(); + parent_.host_->cluster().stats().upstream_cx_active_.inc(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); +} + +ActiveClient::~ActiveClient() { releaseResources(); } + +void ActiveClient::onEvent(Network::ConnectionEvent event) { + parent_.onConnectionEvent(*this, "", event); +} + +void ActiveClient::releaseResources() { + if (!resources_released_) { + resources_released_ = true; + + conn_length_->complete(); + + parent_.host_->cluster().stats().upstream_cx_active_.dec(); + parent_.host_->stats().cx_active_.dec(); + parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); + } +} + +void ActiveClient::onConnectTimeout() { + ENVOY_CONN_LOG(debug, "connect timeout", *this); + parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + timed_out_ = true; + close(); +} + +} // namespace ConnectionPool +} // namespace Envoy diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h new file mode 100644 index 000000000000..6e7cb0f2fbc3 --- /dev/null +++ b/source/common/conn_pool/conn_pool_base.h @@ -0,0 +1,191 @@ +#pragma once + +#include "envoy/common/conn_pool.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" + +#include "common/common/linked_object.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace ConnectionPool { + +class ConnPoolImplBase; + +// ActiveClient provides a base class for connection pool clients that handles connection timings +// as well as managing the connection timeout. +class ActiveClient : public LinkedObject, + public Network::ConnectionCallbacks, + public Event::DeferredDeletable, + protected Logger::Loggable { +public: + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, + uint64_t concurrent_request_limit); + ~ActiveClient() override; + + void releaseResources(); + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // Called if the connection does not complete within the cluster's connectTimeout() + void onConnectTimeout(); + + // Returns the concurrent request limit, accounting for if the total request limit + // is less than the concurrent request limit. + uint64_t effectiveConcurrentRequestLimit() const { + return std::min(remaining_requests_, concurrent_request_limit_); + } + + // Closes the underlying connection. + virtual void close() PURE; + // Returns the ID of the underlying connection. + virtual uint64_t id() const PURE; + // Returns true if this closed with an incomplete request, for stats tracking/ purposes. + virtual bool closingWithIncompleteRequest() const PURE; + // Returns the number of active requests on this connection. + virtual size_t numActiveRequests() const PURE; + + enum class State { + CONNECTING, // Connection is not yet established. + READY, // Additional requests may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent request limit. + DRAINING, // No more requests can be dispatched to this connection, and it will be closed + // when all requests complete. + CLOSED // Connection is closed and object is queued for destruction. + }; + + ConnPoolImplBase& parent_; + uint64_t remaining_requests_; + const uint64_t concurrent_request_limit_; + State state_{State::CONNECTING}; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + Stats::TimespanPtr conn_connect_ms_; + Stats::TimespanPtr conn_length_; + Event::TimerPtr connect_timer_; + bool resources_released_{false}; + bool timed_out_{false}; +}; + +// PendingRequest is the base class for a connection which has been created but not yet established. +class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { +public: + PendingRequest(ConnPoolImplBase& parent); + ~PendingRequest() override; + + // ConnectionPool::Cancellable + void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; + + // TODO(alyssawilk) find an alternate to void* + // The context here returns a pointer to whatever context is provided with newStream(), + // which will be passed back to the parent in onPoolReady or onPoolFailure. + virtual void* context() PURE; + + ConnPoolImplBase& parent_; +}; + +using PendingRequestPtr = std::unique_ptr; + +using ActiveClientPtr = std::unique_ptr; + +// Base class that handles request queueing logic shared between connection pool implementations. +class ConnPoolImplBase : protected Logger::Loggable { +public: + ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, + Event::Dispatcher& dispatcher, + const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options); + virtual ~ConnPoolImplBase(); + + void addDrainedCallbackImpl(Instance::DrainedCb cb); + void drainConnectionsImpl(); + + // Closes and destroys all connections. This must be called in the destructor of + // derived classes because the derived ActiveClient will downcast parent_ to a more + // specific type of ConnPoolImplBase, but if the more specific part is already destructed + // (due to bottom-up destructor ordering in c++) that access will be invalid. + void destructAllConnections(); + + // Returns a new instance of ActiveClient. + virtual ActiveClientPtr instantiateActiveClient() PURE; + + // Gets a pointer to the list that currently owns this client. + std::list& owningList(ActiveClient::State state); + + // Removes the PendingRequest from the list of requests. Called when the PendingRequest is + // cancelled, e.g. when the stream is reset before a connection has been established. + void onPendingRequestCancel(PendingRequest& request, Envoy::ConnectionPool::CancelPolicy policy); + + // Fails all pending requests, calling onPoolFailure on the associated callbacks. + void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, + ConnectionPool::PoolFailureReason pool_failure_reason); + + // Closes any idle connections. + void closeIdleConnections(); + + // Changes the state_ of an ActiveClient and moves to the appropriate list. + void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); + + void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, + Network::ConnectionEvent event); + void checkForDrained(); + void onUpstreamReady(); + ConnectionPool::Cancellable* newStream(void* context); + + virtual ConnectionPool::Cancellable* newPendingRequest(void* context) PURE; + + // Creates a new connection if allowed by resourceManager, or if created to avoid + // starving this pool. + void tryCreateNewConnection(); + + virtual void attachRequestToClient(ActiveClient& client, PendingRequest& request) PURE; + void attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, void* pair); + virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view failure_reason, + ConnectionPool::PoolFailureReason pool_failure_reason, + void* context) PURE; + virtual void onPoolReady(ActiveClient& client, void* context) PURE; + // Called by derived classes any time a request is completed or destroyed for any reason. + void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); + + const Upstream::HostConstSharedPtr host_; + const Upstream::ResourcePriority priority_; + + friend class ActiveClient; + friend class PendingRequest; + Event::Dispatcher& dispatcher_; + const Network::ConnectionSocket::OptionsSharedPtr socket_options_; + const Network::TransportSocketOptionsSharedPtr transport_socket_options_; + + std::list drained_callbacks_; + std::list pending_requests_; + + // When calling purgePendingRequests, this list will be used to hold the requests we are about + // to purge. We need this if one cancelled requests cancels a different pending request + std::list pending_requests_to_purge_; + + // Clients that are ready to handle additional requests. + // All entries are in state READY. + std::list ready_clients_; + + // Clients that are not ready to handle additional requests due to being BUSY or DRAINING. + std::list busy_clients_; + + // Clients that are not ready to handle additional requests because they are CONNECTING. + std::list connecting_clients_; + + // The number of requests currently attached to clients. + uint64_t num_active_requests_{0}; + + // The number of requests that can be immediately dispatched + // if all CONNECTING connections become connected. + uint64_t connecting_request_capacity_{0}; +}; + +} // namespace ConnectionPool +} // namespace Envoy diff --git a/source/common/http/BUILD b/source/common/http/BUILD index eb524eb4077e..782eb8d28f16 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -129,6 +129,7 @@ envoy_cc_library( "//include/envoy/http:conn_pool_interface", "//include/envoy/stats:timespan_interface", "//source/common/common:linked_object", + "//source/common/conn_pool:conn_pool_base_lib", "//source/common/stats:timespan_lib", "//source/common/upstream:upstream_lib", ], diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 432bfb304c64..416f97c499d9 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -8,425 +8,6 @@ #include "common/upstream/upstream_impl.h" namespace Envoy { -namespace ConnectionPool { - -ConnPoolImplBase::ConnPoolImplBase( - Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options) - : host_(host), priority_(priority), dispatcher_(dispatcher), socket_options_(options), - transport_socket_options_(transport_socket_options) {} - -ConnPoolImplBase::~ConnPoolImplBase() { - ASSERT(ready_clients_.empty()); - ASSERT(busy_clients_.empty()); - ASSERT(connecting_clients_.empty()); -} - -void ConnPoolImplBase::destructAllConnections() { - for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { - while (!list->empty()) { - list->front()->close(); - } - } - - // Make sure all clients are destroyed before we are destroyed. - dispatcher_.clearDeferredDeleteList(); -} - -void ConnPoolImplBase::tryCreateNewConnection() { - if (pending_requests_.size() <= connecting_request_capacity_) { - // There are already enough CONNECTING connections for the number - // of queued requests. - return; - } - - const bool can_create_connection = - host_->cluster().resourceManager(priority_).connections().canCreate(); - if (!can_create_connection) { - host_->cluster().stats().upstream_cx_overflow_.inc(); - } - // If we are at the connection circuit-breaker limit due to other upstreams having - // too many open connections, and this upstream has no connections, always create one, to - // prevent pending requests being queued to this upstream with no way to be processed. - if (can_create_connection || - (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) { - ENVOY_LOG(debug, "creating a new connection"); - ActiveClientPtr client = instantiateActiveClient(); - ASSERT(client->state_ == ActiveClient::State::CONNECTING); - ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= - client->effectiveConcurrentRequestLimit()); - connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); - client->moveIntoList(std::move(client), owningList(client->state_)); - } -} - -void ConnPoolImplBase::attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, - void* context) { - ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); - - if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); - onPoolFailure(client.real_host_description_, absl::string_view(), - ConnectionPool::PoolFailureReason::Overflow, context); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", client); - - client.remaining_requests_--; - if (client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", client); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); - } else if (client.numActiveRequests() + 1 >= client.concurrent_request_limit_) { - // As soon as the new request is created, the client will be maxed out. - transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); - } - - num_active_requests_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); - - onPoolReady(client, context); - } -} - -void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, - bool delay_attaching_request) { - ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); - ASSERT(num_active_requests_ > 0); - num_active_requests_--; - host_->stats().rq_active_.dec(); - host_->cluster().stats().upstream_rq_active_.dec(); - host_->cluster().resourceManager(priority_).requests().dec(); - if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { - // Close out the draining client if we no longer have active requests. - client.close(); - } else if (client.state_ == ActiveClient::State::BUSY) { - // A request was just ended, so we should be below the limit now. - ASSERT(client.numActiveRequests() < client.concurrent_request_limit_); - - transitionActiveClientState(client, ActiveClient::State::READY); - if (!delay_attaching_request) { - onUpstreamReady(); - } - } -} - -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(void* context) { - if (!ready_clients_.empty()) { - ActiveClient& client = *ready_clients_.front(); - ENVOY_CONN_LOG(debug, "using existing connection", client); - attachRequestToClientImpl(client, context); - return nullptr; - } - - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ConnectionPool::Cancellable* pending = newPendingRequest(context); - - // This must come after newPendingRequest() because this function uses the - // length of pending_requests_ to determine if a new connection is needed. - tryCreateNewConnection(); - - return pending; - } else { - ENVOY_LOG(debug, "max pending requests overflow"); - onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, - context); - host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - return nullptr; - } -} - -void ConnPoolImplBase::onUpstreamReady() { - while (!pending_requests_.empty() && !ready_clients_.empty()) { - ActiveClientPtr& client = ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client); - // Pending requests are pushed onto the front, so pull from the back. - attachRequestToClient(*client, *pending_requests_.back()); - pending_requests_.pop_back(); - } -} - -std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { - switch (state) { - case ActiveClient::State::CONNECTING: - return connecting_clients_; - case ActiveClient::State::READY: - return ready_clients_; - case ActiveClient::State::BUSY: - return busy_clients_; - case ActiveClient::State::DRAINING: - return busy_clients_; - case ActiveClient::State::CLOSED: - NOT_REACHED_GCOVR_EXCL_LINE; - } - NOT_REACHED_GCOVR_EXCL_LINE; -} - -void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, - ActiveClient::State new_state) { - auto& old_list = owningList(client.state_); - auto& new_list = owningList(new_state); - client.state_ = new_state; - - // old_list and new_list can be equal when transitioning from BUSY to DRAINING. - // - // The documentation for list.splice() (which is what moveBetweenLists() calls) is - // unclear whether it is allowed for src and dst to be the same, so check here - // since it is a no-op anyways. - if (&old_list != &new_list) { - client.moveBetweenLists(old_list, new_list); - } -} - -void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); -} - -void ConnPoolImplBase::closeIdleConnections() { - // Create a separate list of elements to close to avoid mutate-while-iterating problems. - std::list to_close; - - for (auto& client : ready_clients_) { - if (client->numActiveRequests() == 0) { - to_close.push_back(client.get()); - } - } - - if (pending_requests_.empty()) { - for (auto& client : connecting_clients_) { - to_close.push_back(client.get()); - } - } - - for (auto& entry : to_close) { - entry->close(); - } -} - -void ConnPoolImplBase::drainConnectionsImpl() { - closeIdleConnections(); - - // closeIdleConnections() closes all connections in ready_clients_ with no active requests, - // so all remaining entries in ready_clients_ are serving requests. Move them and all entries - // in busy_clients_ to draining. - while (!ready_clients_.empty()) { - transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); - } - - // Changing busy_clients_ to DRAINING does not move them between lists, - // so use a for-loop since the list is not mutated. - ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); - for (auto& busy_client : busy_clients_) { - transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); - } -} - -void ConnPoolImplBase::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } - - closeIdleConnections(); - - if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty() && - connecting_clients_.empty()) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const Instance::DrainedCb& cb : drained_callbacks_) { - cb(); - } - } -} - -void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, - Network::ConnectionEvent event) { - if (client.state_ == ActiveClient::State::CONNECTING) { - ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); - connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); - } - - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - // The client died. - ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", client, failure_reason); - - Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - const bool incomplete_request = client.closingWithIncompleteRequest(); - if (incomplete_request) { - Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); - } - - if (client.state_ == ActiveClient::State::CONNECTING) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - - ConnectionPool::PoolFailureReason reason; - if (client.timed_out_) { - reason = ConnectionPool::PoolFailureReason::Timeout; - } else if (event == Network::ConnectionEvent::RemoteClose) { - reason = ConnectionPool::PoolFailureReason::RemoteConnectionFailure; - } else { - reason = ConnectionPool::PoolFailureReason::LocalConnectionFailure; - } - - // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - purgePendingRequests(client.real_host_description_, failure_reason, reason); - } - - // We need to release our resourceManager() resources before checking below for - // whether we can create a new connection. Normally this would happen when - // client's destructor runs, but this object needs to be deferredDelete'd(), so - // this forces part of its cleanup to happen now. - client.releaseResources(); - - dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); - if (incomplete_request) { - checkForDrained(); - } - - client.state_ = ActiveClient::State::CLOSED; - - // If we have pending requests and we just lost a connection we should make a new one. - if (!pending_requests_.empty()) { - tryCreateNewConnection(); - } - } else if (event == Network::ConnectionEvent::Connected) { - client.conn_connect_ms_->complete(); - client.conn_connect_ms_.reset(); - - ASSERT(client.state_ == ActiveClient::State::CONNECTING); - transitionActiveClientState(client, ActiveClient::State::READY); - - onUpstreamReady(); - checkForDrained(); - } - - if (client.connect_timer_) { - client.connect_timer_->disableTimer(); - client.connect_timer_.reset(); - } -} - -PendingRequest::PendingRequest(ConnPoolImplBase& parent) : parent_(parent) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); -} - -PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); -} - -void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { - parent_.onPendingRequestCancel(*this, policy); -} - -void ConnPoolImplBase::purgePendingRequests( - const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - pending_requests_to_purge_ = std::move(pending_requests_); - while (!pending_requests_to_purge_.empty()) { - PendingRequestPtr request = - pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); - host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - onPoolFailure(host_description, failure_reason, reason, request->context()); - } -} - -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request, - Envoy::ConnectionPool::CancelPolicy policy) { - ENVOY_LOG(debug, "cancelling pending request"); - if (!pending_requests_to_purge_.empty()) { - // If pending_requests_to_purge_ is not empty, it means that we are called from - // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests - // is down in the call stack). Remove this request from the list as it is cancelled, - // and there is no need to call its onPoolFailure callback. - request.removeFromList(pending_requests_to_purge_); - } else { - request.removeFromList(pending_requests_); - } - // There's excess capacity if - // pending_requests < connecting_request_capacity_ - capacity of most recent client. - // It's calculated below with addition instead to avoid underflow issues, overflow being - // assumed to not be a problem across the connection pool. - if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() && - (pending_requests_.size() + connecting_clients_.front()->effectiveConcurrentRequestLimit() <= - connecting_request_capacity_)) { - auto& client = *connecting_clients_.front(); - transitionActiveClientState(client, ActiveClient::State::DRAINING); - client.close(); - } - - host_->cluster().stats().upstream_rq_cancelled_.inc(); - checkForDrained(); -} - -namespace { -// Translate zero to UINT64_MAX so that the zero/unlimited case doesn't -// have to be handled specially. -uint64_t translateZeroToUnlimited(uint64_t limit) { - return (limit != 0) ? limit : std::numeric_limits::max(); -} -} // namespace - -ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit) - : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), - concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { - conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); - conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); - connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); - - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); -} - -ActiveClient::~ActiveClient() { releaseResources(); } - -void ActiveClient::onEvent(Network::ConnectionEvent event) { - parent_.onConnectionEvent(*this, "", event); -} - -void ActiveClient::releaseResources() { - if (!resources_released_) { - resources_released_ = true; - - conn_length_->complete(); - - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); - } -} - -void ActiveClient::onConnectTimeout() { - ENVOY_CONN_LOG(debug, "connect timeout", *this); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - timed_out_ = true; - close(); -} - -} // namespace ConnectionPool - namespace Http { Network::TransportSocketOptionsSharedPtr diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 715511255c00..78171c6e2411 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -6,190 +6,12 @@ #include "envoy/stats/timespan.h" #include "common/common/linked_object.h" +#include "common/conn_pool/conn_pool_base.h" #include "common/http/codec_client.h" #include "absl/strings/string_view.h" namespace Envoy { -// TODO(alyssawilk) move all the code in this namespace to // source/common/conn_pool/ in -// a follow up, in the hopes git will preserve history. -namespace ConnectionPool { - -class ConnPoolImplBase; - -// ActiveClient provides a base class for connection pool clients that handles connection timings -// as well as managing the connection timeout. -class ActiveClient : public LinkedObject, - public Network::ConnectionCallbacks, - public Event::DeferredDeletable, - protected Logger::Loggable { -public: - ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit); - ~ActiveClient() override; - - void releaseResources(); - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - // Called if the connection does not complete within the cluster's connectTimeout() - void onConnectTimeout(); - - // Returns the concurrent request limit, accounting for if the total request limit - // is less than the concurrent request limit. - uint64_t effectiveConcurrentRequestLimit() const { - return std::min(remaining_requests_, concurrent_request_limit_); - } - - // Closes the underlying connection. - virtual void close() PURE; - // Returns the ID of the underlying connection. - virtual uint64_t id() const PURE; - // Returns true if this closed with an incomplete request, for stats tracking/ purposes. - virtual bool closingWithIncompleteRequest() const PURE; - // Returns the number of active requests on this connection. - virtual size_t numActiveRequests() const PURE; - - enum class State { - CONNECTING, // Connection is not yet established. - READY, // Additional requests may be immediately dispatched to this connection. - BUSY, // Connection is at its concurrent request limit. - DRAINING, // No more requests can be dispatched to this connection, and it will be closed - // when all requests complete. - CLOSED // Connection is closed and object is queued for destruction. - }; - - ConnPoolImplBase& parent_; - uint64_t remaining_requests_; - const uint64_t concurrent_request_limit_; - State state_{State::CONNECTING}; - Upstream::HostDescriptionConstSharedPtr real_host_description_; - Stats::TimespanPtr conn_connect_ms_; - Stats::TimespanPtr conn_length_; - Event::TimerPtr connect_timer_; - bool resources_released_{false}; - bool timed_out_{false}; -}; - -// PendingRequest is the base class for a connection which has been created but not yet established. -class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { -public: - PendingRequest(ConnPoolImplBase& parent); - ~PendingRequest() override; - - // ConnectionPool::Cancellable - void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; - - // The context here returns a pointer to whatever context is provided with newStream(), - // which will be passed back to the parent in onPoolReady or onPoolFailure. - virtual void* context() PURE; - - ConnPoolImplBase& parent_; -}; - -using PendingRequestPtr = std::unique_ptr; - -using ActiveClientPtr = std::unique_ptr; - -// Base class that handles request queueing logic shared between connection pool implementations. -class ConnPoolImplBase : protected Logger::Loggable { -public: - ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, - Event::Dispatcher& dispatcher, - const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options); - virtual ~ConnPoolImplBase(); - - void addDrainedCallbackImpl(Instance::DrainedCb cb); - void drainConnectionsImpl(); - - // Closes and destroys all connections. This must be called in the destructor of - // derived classes because the derived ActiveClient will downcast parent_ to a more - // specific type of ConnPoolImplBase, but if the more specific part is already destructed - // (due to bottom-up destructor ordering in c++) that access will be invalid. - void destructAllConnections(); - - // Returns a new instance of ActiveClient. - virtual ActiveClientPtr instantiateActiveClient() PURE; - - // Gets a pointer to the list that currently owns this client. - std::list& owningList(ActiveClient::State state); - - // Removes the PendingRequest from the list of requests. Called when the PendingRequest is - // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request, Envoy::ConnectionPool::CancelPolicy policy); - - // Fails all pending requests, calling onPoolFailure on the associated callbacks. - void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, - ConnectionPool::PoolFailureReason pool_failure_reason); - - // Closes any idle connections. - void closeIdleConnections(); - - // Changes the state_ of an ActiveClient and moves to the appropriate list. - void transitionActiveClientState(ActiveClient& client, ActiveClient::State new_state); - - void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, - Network::ConnectionEvent event); - void checkForDrained(); - void onUpstreamReady(); - ConnectionPool::Cancellable* newStream(void* context); - - virtual ConnectionPool::Cancellable* newPendingRequest(void* context) PURE; - - // Creates a new connection if allowed by resourceManager, or if created to avoid - // starving this pool. - void tryCreateNewConnection(); - - virtual void attachRequestToClient(ActiveClient& client, PendingRequest& request) PURE; - void attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, void* pair); - virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, - absl::string_view failure_reason, - ConnectionPool::PoolFailureReason pool_failure_reason, - void* context) PURE; - virtual void onPoolReady(ActiveClient& client, void* context) PURE; - // Called by derived classes any time a request is completed or destroyed for any reason. - void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); - - const Upstream::HostConstSharedPtr host_; - const Upstream::ResourcePriority priority_; - - friend class ActiveClient; - friend class PendingRequest; - Event::Dispatcher& dispatcher_; - const Network::ConnectionSocket::OptionsSharedPtr socket_options_; - const Network::TransportSocketOptionsSharedPtr transport_socket_options_; - - std::list drained_callbacks_; - std::list pending_requests_; - - // When calling purgePendingRequests, this list will be used to hold the requests we are about - // to purge. We need this if one cancelled requests cancels a different pending request - std::list pending_requests_to_purge_; - - // Clients that are ready to handle additional requests. - // All entries are in state READY. - std::list ready_clients_; - - // Clients that are not ready to handle additional requests due to being BUSY or DRAINING. - std::list busy_clients_; - - // Clients that are not ready to handle additional requests because they are CONNECTING. - std::list connecting_clients_; - - // The number of requests currently attached to clients. - uint64_t num_active_requests_{0}; - - // The number of requests that can be immediately dispatched - // if all CONNECTING connections become connected. - uint64_t connecting_request_capacity_{0}; -}; -} // namespace ConnectionPool - namespace Http { // An implementation of Envoy::ConnectionPool::PendingRequest for HTTP/1.1 and HTTP/2 From 0c03a765666d2dbc986fbe789b542491d4b9b788 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Sun, 28 Jun 2020 11:57:57 -0400 Subject: [PATCH 479/909] stats: integrate real symbol table into stats system (#4980) Signed-off-by: Joshua Marantz --- docs/root/version_history/current.rst | 2 ++ source/common/stats/symbol_table_creator.cc | 2 +- source/server/options_impl.cc | 2 +- test/common/stats/stat_test_utility.cc | 9 ++++++--- .../filters/http/grpc_stats/config_test.cc | 17 ++++++++++++----- test/integration/BUILD | 1 + test/integration/http2_integration_test.cc | 3 +-- test/server/admin/prometheus_stats_test.cc | 6 ++---- 8 files changed, 26 insertions(+), 16 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f7a953024711..1a298afbcc5c 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -107,6 +107,8 @@ New Features * metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. +* performance: stats symbol table implementation (enabled by default; to disable it, add + `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy). * ratelimit: add support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. diff --git a/source/common/stats/symbol_table_creator.cc b/source/common/stats/symbol_table_creator.cc index 8b29313130b5..755c8fcce2e4 100644 --- a/source/common/stats/symbol_table_creator.cc +++ b/source/common/stats/symbol_table_creator.cc @@ -4,7 +4,7 @@ namespace Envoy { namespace Stats { bool SymbolTableCreator::initialized_ = false; -bool SymbolTableCreator::use_fake_symbol_tables_ = true; +bool SymbolTableCreator::use_fake_symbol_tables_ = false; SymbolTablePtr SymbolTableCreator::initAndMakeSymbolTable(bool use_fake) { ASSERT(!initialized_ || (use_fake_symbol_tables_ == use_fake)); diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index ca58890a481f..b5608b346634 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -156,7 +156,7 @@ OptionsImpl::OptionsImpl(std::vector args, "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", - "Use fake symbol table implementation", false, true, + "Use fake symbol table implementation", false, false, "bool", cmd); TCLAP::ValueArg disable_extensions("", "disable-extensions", diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index a195614e5682..cc0f0a8d47a7 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -152,7 +152,8 @@ Counter& TestStore::counterFromStatNameWithTags(const StatName& stat_name, } else { // Ensures StatNames with the same string representation are specified // consistently using symbolic/dynamic components on every access. - ASSERT(counter_ref->statName() == stat_name); + ASSERT(counter_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *counter_ref; } @@ -173,7 +174,8 @@ Gauge& TestStore::gaugeFromStatNameWithTags(const StatName& stat_name, if (gauge_ref == nullptr) { gauge_ref = &IsolatedStoreImpl::gaugeFromStatNameWithTags(stat_name, tags, mode); } else { - ASSERT(gauge_ref->statName() == stat_name); + ASSERT(gauge_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *gauge_ref; } @@ -194,7 +196,8 @@ Histogram& TestStore::histogramFromStatNameWithTags(const StatName& stat_name, if (histogram_ref == nullptr) { histogram_ref = &IsolatedStoreImpl::histogramFromStatNameWithTags(stat_name, tags, unit); } else { - ASSERT(histogram_ref->statName() == stat_name); + ASSERT(histogram_ref->statName() == stat_name, "Inconsistent dynamic vs symbolic " + "stat name specification"); } return *histogram_ref; } diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index b75737b06cd2..3b303fe0a8ea 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -358,11 +358,18 @@ TEST_F(GrpcStatsFilterConfigTest, MessageCounts) { .counterFromString( "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count") .value()); - EXPECT_EQ(0U, decoder_callbacks_.clusterInfo() - ->statsScope() - .counterFromString( - "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count") - .value()); + + // Check that there is response_message_count stat yet. We use + // stats_store_.findCounterByString rather than looking on + // clusterInfo()->statsScope() because findCounterByString is not an API on + // Stats::Store, and there is no prefix so the names will match. We verify + // that by double-checking we can find the request_message_count using the + // same API. + EXPECT_FALSE(stats_store_.findCounterByString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.response_message_count")); + EXPECT_TRUE(stats_store_.findCounterByString( + "grpc.lyft.users.BadCompanions.GetBadCompanions.request_message_count")); + const auto& data = stream_info_.filterState()->getDataReadOnly( HttpFilterNames::get().GrpcStats); EXPECT_EQ(2U, data.request_message_count); diff --git a/test/integration/BUILD b/test/integration/BUILD index 18070ad0173e..1922ef9f1e89 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -50,6 +50,7 @@ envoy_cc_test_library( envoy_cc_test( name = "ads_integration_test", + size = "enormous", srcs = ["ads_integration_test.cc"], tags = ["fails_on_windows"], deps = [ diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 0f28f2b180c0..9de0d17da08b 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1562,8 +1562,7 @@ void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::s EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; EXPECT_EQ(1, test_server_->counter(flood_stat)->value()); - EXPECT_EQ(1, - test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); + test_server_->waitForCounterGe("http.config_test.downstream_cx_delayed_close_timeout", 1); } // Verify that the server detects the flood using specified request parameters. diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 7994da560249..35528290dace 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -36,9 +36,7 @@ class HistogramWrapper { class PrometheusStatsFormatterTest : public testing::Test { protected: - PrometheusStatsFormatterTest() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} + PrometheusStatsFormatterTest() : alloc_(*symbol_table_), pool_(*symbol_table_) {} ~PrometheusStatsFormatterTest() override { clearStorage(); } @@ -92,7 +90,7 @@ class PrometheusStatsFormatterTest : public testing::Test { EXPECT_EQ(0, symbol_table_->numSymbols()); } - Stats::SymbolTablePtr symbol_table_; + Stats::TestSymbolTable symbol_table_; Stats::AllocatorImpl alloc_; Stats::StatNamePool pool_; std::vector counters_; From 836ebf56a1694d49c9d8df69d4f985c501157869 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Sun, 28 Jun 2020 23:02:36 +0700 Subject: [PATCH 480/909] common: MSVC workaround for __VA_ARGS__ (#11792) This adds PASS_ON macro to fix warning C4003 for MSVC. https://godbolt.org/z/-Bu58z. Signed-off-by: Dhi Aurrahman --- source/common/common/assert.h | 7 ++++++- test/common/event/BUILD | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/source/common/common/assert.h b/source/common/common/assert.h index 4fc5d38acf71..f4683ede34dd 100644 --- a/source/common/common/assert.h +++ b/source/common/common/assert.h @@ -192,6 +192,11 @@ bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_nam #define _ENVOY_BUG_VERBOSE(X, Y) _ENVOY_BUG_IMPL(X, #X, ENVOY_BUG_ACTION, Y) +// This macro is needed to help to remove: "warning C4003: not enough arguments for function-like +// macro invocation ''" when expanding __VA_ARGS__. In our setup, MSVC treats this +// warning as an error. A sample code to reproduce the case: https://godbolt.org/z/M4zZNG. +#define PASS_ON(...) __VA_ARGS__ + /** * Indicate a failure condition that should never be met in normal circumstances. In contrast * with ASSERT, an ENVOY_BUG is compiled in release mode. If a failure condition is met in release @@ -199,7 +204,7 @@ bool shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(absl::string_view bug_nam * mode, it will crash if the condition is not met. ENVOY_BUG must be called with two arguments for * verbose logging. */ -#define ENVOY_BUG(...) _ENVOY_BUG_VERBOSE(__VA_ARGS__) +#define ENVOY_BUG(...) PASS_ON(PASS_ON(_ENVOY_BUG_VERBOSE)(__VA_ARGS__)) // NOT_IMPLEMENTED_GCOVR_EXCL_LINE is for overridden functions that are expressly not implemented. // The macro name includes "GCOVR_EXCL_LINE" to exclude the macro's usage from code coverage diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 6cf270e9b2bc..50a792f01804 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -28,6 +28,7 @@ envoy_cc_test( envoy_cc_test( name = "file_event_impl_test", srcs = ["file_event_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ "//include/envoy/event:file_event_interface", "//source/common/event:dispatcher_includes", From 7ea1f24bd53522408d9bc55316dff9ed1701bc73 Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Sun, 28 Jun 2020 16:33:12 -0700 Subject: [PATCH 481/909] Support dynamic limit override in ratelimit filter (#11770) Provides a way to specify dynamic rate limit override in the rate limit descriptor from static value or from dynamic metadata. New type, RateLimitUnit was created to share across config protocol and rate limit service protocol. A PR for the reference implementation of the rate limit service will follow after the API changes are discussed and accepted. Signed-off-by: Petr Pchelko --- .../config/route/v3/route_components.proto | 24 ++++ .../route/v4alpha/route_components.proto | 30 +++++ .../extensions/common/ratelimit/v3/BUILD | 1 + .../common/ratelimit/v3/ratelimit.proto | 20 +++ api/envoy/service/ratelimit/v3/rls.proto | 2 + api/envoy/type/v3/ratelimit_unit.proto | 30 +++++ docs/root/api-v3/types/types.rst | 1 + .../advanced/well_known_dynamic_metadata.rst | 4 + .../http/http_filters/rate_limit_filter.rst | 44 +++++++ docs/root/version_history/current.rst | 1 + .../config/route/v3/route_components.proto | 24 ++++ .../route/v4alpha/route_components.proto | 30 +++++ .../extensions/common/ratelimit/v3/BUILD | 1 + .../common/ratelimit/v3/ratelimit.proto | 20 +++ .../envoy/service/ratelimit/v3/rls.proto | 2 + .../envoy/type/v3/ratelimit_unit.proto | 30 +++++ include/envoy/ratelimit/BUILD | 3 + include/envoy/ratelimit/ratelimit.h | 13 ++ include/envoy/router/router_ratelimit.h | 20 +++ source/common/router/router_ratelimit.cc | 39 ++++++ source/common/router/router_ratelimit.h | 20 +++ .../common/ratelimit/ratelimit_impl.cc | 6 + test/common/router/router_ratelimit_test.cc | 124 ++++++++++++++++++ .../common/ratelimit/ratelimit_impl_test.cc | 23 ++++ test/mocks/ratelimit/mocks.h | 6 +- 25 files changed, 517 insertions(+), 1 deletion(-) create mode 100644 api/envoy/type/v3/ratelimit_unit.proto create mode 100644 generated_api_shadow/envoy/type/v3/ratelimit_unit.proto diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 2dcfd3838ad8..21afddba4fb6 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1497,6 +1497,24 @@ message RateLimit { } } + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. @@ -1516,6 +1534,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 8251d64c09a6..1621fba5b7ac 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1479,6 +1479,30 @@ message RateLimit { } } + message Override { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override"; + + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. @@ -1498,6 +1522,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: diff --git a/api/envoy/extensions/common/ratelimit/v3/BUILD b/api/envoy/extensions/common/ratelimit/v3/BUILD index ee90746aa30a..256b1e65eda5 100644 --- a/api/envoy/extensions/common/ratelimit/v3/BUILD +++ b/api/envoy/extensions/common/ratelimit/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 187ae3f229c4..9255deb4b64d 100644 --- a/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/api/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "envoy/type/v3/ratelimit_unit.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -54,6 +56,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. +// +// Optionally the descriptor can contain a limit override under a "limit" key, that specifies +// the number of requests per unit to use instead of the number configured in the +// rate limiting service. message RateLimitDescriptor { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor"; @@ -69,6 +75,20 @@ message RateLimitDescriptor { string value = 2 [(validate.rules).string = {min_bytes: 1}]; } + // Override rate limit to apply to this descriptor instead of the limit + // configured in the rate limit service. See :ref:`rate limit override + // ` for more information. + message RateLimitOverride { + // The number of requests per unit of time. + uint32 requests_per_unit = 1; + + // The unit of time. + type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Optional rate limit override to supply to the ratelimit service. + RateLimitOverride limit = 2; } diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index 4aad42fcaa81..06cb6a9e5550 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -69,6 +69,8 @@ message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; + // Identifies the unit of of time for rate limit. + // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] enum Unit { // The time unit is not known. UNKNOWN = 0; diff --git a/api/envoy/type/v3/ratelimit_unit.proto b/api/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 000000000000..a3fb27ff47ba --- /dev/null +++ b/api/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} diff --git a/docs/root/api-v3/types/types.rst b/docs/root/api-v3/types/types.rst index f9c1cad3ea82..3e6af53865bd 100644 --- a/docs/root/api-v3/types/types.rst +++ b/docs/root/api-v3/types/types.rst @@ -10,6 +10,7 @@ Types ../type/v3/http_status.proto ../type/v3/percent.proto ../type/v3/range.proto + ../type/v3/ratelimit_unit.proto ../type/v3/semantic_version.proto ../type/v3/token_bucket.proto ../type/matcher/v3/metadata.proto diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index 73215617e46d..78bca6614d93 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -18,3 +18,7 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`Role Based Access Control (RBAC) Filter ` * :ref:`Role Based Access Control (RBAC) Network Filter ` * :ref:`ZooKeeper Proxy Filter ` + +The following Envoy filters can be configured to consume dynamic metadata emitted by other filters. + +* :ref:`RateLimit Filter limit override ` \ No newline at end of file diff --git a/docs/root/configuration/http/http_filters/rate_limit_filter.rst b/docs/root/configuration/http/http_filters/rate_limit_filter.rst index 51850ba45640..91ce997c72cd 100644 --- a/docs/root/configuration/http/http_filters/rate_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/rate_limit_filter.rst @@ -75,6 +75,50 @@ the following descriptor is generated: ("remote_address", "") ("source_cluster", "from_cluster") +.. _config_http_filters_rate_limit_rate_limit_override: + +Rate Limit Override +------------------- + +A :ref:`rate limit action ` can optionally contain +a :ref:`limit override `. The limit value +will be appended to the descriptor produced by the action and sent to the ratelimit service, +overriding the static service configuration. + +The override can be configured to be taken from the :ref:`Dynamic Metadata +` under a specified :ref: `key +`. If the value is misconfigured +or key does not exist, the override configuration is ignored. + +Example 3 +^^^^^^^^^ + +The following configuration + +.. code-block:: yaml + + actions: + - {generic_key: {descriptor_value: some_value}} + limit: + metadata_key: + key: test.filter.key + path: + - key: test + +.. _config_http_filters_rate_limit_override_dynamic_metadata: + +Will lookup the value of the dynamic metadata. The value must be a structure with integer field +"requests_per_unit" and a string field "unit" which is parseable to :ref:`RateLimitUnit enum +`. For example, with the following dynamic metadata +the rate limit override of 42 requests per hour will be appended to the rate limit descriptor. + +.. code-block:: yaml + + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + Statistics ---------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 1a298afbcc5c..fe1235fe695f 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -111,6 +111,7 @@ New Features `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy). * ratelimit: add support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. * redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. * regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. * request_id: added to :ref:`always_set_request_id_in_response setting ` diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 4799ffa075c2..9cae6faa5e88 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1509,6 +1509,24 @@ message RateLimit { } } + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. @@ -1528,6 +1546,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 21a26d181c26..9646c0c86301 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1507,6 +1507,30 @@ message RateLimit { } } + message Override { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override"; + + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. @@ -1526,6 +1550,12 @@ message RateLimit { // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } // .. attention:: diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD index ee90746aa30a..256b1e65eda5 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD @@ -7,6 +7,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto index 187ae3f229c4..9255deb4b64d 100644 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; +import "envoy/type/v3/ratelimit_unit.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -54,6 +56,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. +// +// Optionally the descriptor can contain a limit override under a "limit" key, that specifies +// the number of requests per unit to use instead of the number configured in the +// rate limiting service. message RateLimitDescriptor { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor"; @@ -69,6 +75,20 @@ message RateLimitDescriptor { string value = 2 [(validate.rules).string = {min_bytes: 1}]; } + // Override rate limit to apply to this descriptor instead of the limit + // configured in the rate limit service. See :ref:`rate limit override + // ` for more information. + message RateLimitOverride { + // The number of requests per unit of time. + uint32 requests_per_unit = 1; + + // The unit of time. + type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Optional rate limit override to supply to the ratelimit service. + RateLimitOverride limit = 2; } diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto index 4aad42fcaa81..06cb6a9e5550 100644 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto @@ -69,6 +69,8 @@ message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; + // Identifies the unit of of time for rate limit. + // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] enum Unit { // The time unit is not known. UNKNOWN = 0; diff --git a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 000000000000..a3fb27ff47ba --- /dev/null +++ b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} diff --git a/include/envoy/ratelimit/BUILD b/include/envoy/ratelimit/BUILD index d726ae9f54ed..615b69fa3107 100644 --- a/include/envoy/ratelimit/BUILD +++ b/include/envoy/ratelimit/BUILD @@ -11,4 +11,7 @@ envoy_package() envoy_cc_library( name = "ratelimit_interface", hdrs = ["ratelimit.h"], + deps = [ + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], ) diff --git a/include/envoy/ratelimit/ratelimit.h b/include/envoy/ratelimit/ratelimit.h index 4e122b6863f1..f23c8170ef68 100644 --- a/include/envoy/ratelimit/ratelimit.h +++ b/include/envoy/ratelimit/ratelimit.h @@ -3,9 +3,21 @@ #include #include +#include "envoy/type/v3/ratelimit_unit.pb.h" + +#include "absl/types/optional.h" + namespace Envoy { namespace RateLimit { +/** + * An optional dynamic override for the rate limit. See ratelimit.proto + */ +struct RateLimitOverride { + uint32_t requests_per_unit_; + envoy::type::v3::RateLimitUnit unit_; +}; + /** * A single rate limit request descriptor entry. See ratelimit.proto. */ @@ -19,6 +31,7 @@ struct DescriptorEntry { */ struct Descriptor { std::vector entries_; + absl::optional limit_ = absl::nullopt; }; } // namespace RateLimit diff --git a/include/envoy/router/router_ratelimit.h b/include/envoy/router/router_ratelimit.h index a21e0854bd27..1e6910c3b9ba 100644 --- a/include/envoy/router/router_ratelimit.h +++ b/include/envoy/router/router_ratelimit.h @@ -12,6 +12,26 @@ namespace Envoy { namespace Router { + +/** + * Base interface for generic rate limit override action. + */ +class RateLimitOverrideAction { +public: + virtual ~RateLimitOverrideAction() = default; + + /** + * Potentially populate the descriptors 'limit' property with a RateLimitOverride instance + * @param descriptor supplies the descriptor to optionally fill. + * @param metadata supplies the dynamic metadata for the request. + * @return true if RateLimitOverride was set in the descriptor. + */ + virtual bool populateOverride(RateLimit::Descriptor& descriptor, + const envoy::config::core::v3::Metadata* metadata) const PURE; +}; + +using RateLimitOverrideActionPtr = std::unique_ptr; + /** * Base interface for generic rate limit action. */ diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 6937aee47989..9e7f9dab3630 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -18,6 +18,31 @@ namespace Router { const uint64_t RateLimitPolicyImpl::MAX_STAGE_NUMBER = 10UL; +bool DynamicMetadataRateLimitOverride::populateOverride( + RateLimit::Descriptor& descriptor, const envoy::config::core::v3::Metadata* metadata) const { + const ProtobufWkt::Value& metadata_value = + Envoy::Config::Metadata::metadataValue(metadata, metadata_key_); + if (metadata_value.kind_case() != ProtobufWkt::Value::kStructValue) { + return false; + } + + const auto& override_value = metadata_value.struct_value().fields(); + const auto& limit_it = override_value.find("requests_per_unit"); + const auto& unit_it = override_value.find("unit"); + if (limit_it != override_value.end() && + limit_it->second.kind_case() == ProtobufWkt::Value::kNumberValue && + unit_it != override_value.end() && + unit_it->second.kind_case() == ProtobufWkt::Value::kStringValue) { + envoy::type::v3::RateLimitUnit unit; + if (envoy::type::v3::RateLimitUnit_Parse(unit_it->second.string_value(), &unit)) { + descriptor.limit_.emplace(RateLimit::RateLimitOverride{ + static_cast(limit_it->second.number_value()), unit}); + return true; + } + } + return false; +} + bool SourceClusterAction::populateDescriptor(const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, @@ -144,6 +169,16 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( NOT_REACHED_GCOVR_EXCL_LINE; } } + if (config.has_limit()) { + switch (config.limit().override_specifier_case()) { + case envoy::config::route::v3::RateLimit_Override::OverrideSpecifierCase::kDynamicMetadata: + limit_override_.emplace( + new DynamicMetadataRateLimitOverride(config.limit().dynamic_metadata())); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } } void RateLimitPolicyEntryImpl::populateDescriptors( @@ -161,6 +196,10 @@ void RateLimitPolicyEntryImpl::populateDescriptors( } } + if (limit_override_) { + limit_override_.value()->populateOverride(descriptor, dynamic_metadata); + } + if (result) { descriptors.emplace_back(descriptor); } diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 2b990a659253..5343c2aab43c 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -13,9 +13,28 @@ #include "common/config/metadata.h" #include "common/http/header_utility.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Router { +/** + * Populate rate limit override from dynamic metadata. + */ +class DynamicMetadataRateLimitOverride : public RateLimitOverrideAction { +public: + DynamicMetadataRateLimitOverride( + const envoy::config::route::v3::RateLimit::Override::DynamicMetadata& config) + : metadata_key_(config.metadata_key()) {} + + // Router::RateLimitOverrideAction + bool populateOverride(RateLimit::Descriptor& descriptor, + const envoy::config::core::v3::Metadata* metadata) const override; + +private: + const Envoy::Config::MetadataKey metadata_key_; +}; + /** * Action for source cluster rate limiting. */ @@ -149,6 +168,7 @@ class RateLimitPolicyEntryImpl : public RateLimitPolicyEntry { const std::string disable_key_; uint64_t stage_; std::vector actions_; + absl::optional limit_override_ = absl::nullopt; }; /** diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 588916fae200..fb8f7bb3abe7 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -51,6 +51,12 @@ void GrpcClientImpl::createRequest(envoy::service::ratelimit::v3::RateLimitReque new_entry->set_key(entry.key_); new_entry->set_value(entry.value_); } + if (descriptor.limit_) { + envoy::extensions::common::ratelimit::v3::RateLimitDescriptor_RateLimitOverride* new_limit = + new_descriptor->mutable_limit(); + new_limit->set_requests_per_unit(descriptor.limit_.value().requests_per_unit_); + new_limit->set_unit(descriptor.limit_.value().unit_); + } } } diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 159b32e2dca5..ca2ab96779ee 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -681,6 +681,130 @@ TEST_F(RateLimitPolicyEntryTest, CompoundActionsNoDescriptor) { EXPECT_TRUE(descriptors_.empty()); } +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverride) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT( + std::vector( + {{{{"generic_key", "limited_fake_key"}}, {{42, envoy::type::v3::RateLimitUnit::HOUR}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideNotFound) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: unknown.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: HOUR + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongType) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: some_string + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, DynamicMetadataRateLimitOverrideWrongUnit) { + const std::string yaml = R"EOF( +actions: +- generic_key: + descriptor_value: limited_fake_key +limit: + dynamic_metadata: + metadata_key: + key: test.filter.key + path: + - key: test + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + test.filter.key: + test: + requests_per_unit: 42 + unit: NOT_A_UNIT + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + EXPECT_THAT(std::vector({{{{"generic_key", "limited_fake_key"}}}}), + testing::ContainerEq(descriptors_)); +} + } // namespace } // namespace Router } // namespace Envoy diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index c17101438214..2646f8966c96 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -124,6 +124,29 @@ TEST_F(RateLimitGrpcClientTest, Basic) { EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _)); client_.onFailure(Grpc::Status::Unknown, "", span_); } + + { + envoy::service::ratelimit::v3::RateLimitRequest request; + Http::TestRequestHeaderMapImpl headers; + GrpcClientImpl::createRequest( + request, "foo", + {{{{"foo", "bar"}, {"bar", "baz"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}}); + EXPECT_CALL(*async_client_, sendRaw(_, _, Grpc::ProtoBufferEq(request), _, _, _)) + .WillOnce(Return(&async_request_)); + + client_.limit( + request_callbacks_, "foo", + {{{{"foo", "bar"}, {"bar", "baz"}}, {{42, envoy::type::v3::RateLimitUnit::MINUTE}}}}, + Tracing::NullSpan::instance()); + + client_.onCreateInitialMetadata(headers); + + response = std::make_unique(); + response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK); + EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("ok"))); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _)); + client_.onSuccess(std::move(response), span_); + } } TEST_F(RateLimitGrpcClientTest, Cancel) { diff --git a/test/mocks/ratelimit/mocks.h b/test/mocks/ratelimit/mocks.h index 238c3c25d8cc..7f983beabbca 100644 --- a/test/mocks/ratelimit/mocks.h +++ b/test/mocks/ratelimit/mocks.h @@ -10,12 +10,16 @@ namespace Envoy { namespace RateLimit { +inline bool operator==(const RateLimitOverride& lhs, const RateLimitOverride& rhs) { + return lhs.requests_per_unit_ == rhs.requests_per_unit_ && lhs.unit_ == rhs.unit_; +} + inline bool operator==(const DescriptorEntry& lhs, const DescriptorEntry& rhs) { return lhs.key_ == rhs.key_ && lhs.value_ == rhs.value_; } inline bool operator==(const Descriptor& lhs, const Descriptor& rhs) { - return lhs.entries_ == rhs.entries_; + return lhs.entries_ == rhs.entries_ && lhs.limit_ == rhs.limit_; } } // namespace RateLimit From d6e2fd0185ca620745479da2c43c0564eeaf35c5 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 29 Jun 2020 09:29:51 -0600 Subject: [PATCH 482/909] http: further header map cleanups (#11791) - Move more headers from default O(1) headers to extensions - Fix an issue where envoy header prefix override is used with custom registered headers not working. Signed-off-by: Matt Klein --- include/envoy/http/header_map.h | 13 +-- source/common/http/headers.h | 110 ++++++++++-------- .../grpc/http_grpc_access_log_impl.cc | 9 +- source/extensions/common/aws/signer_impl.cc | 2 +- .../compression/gzip/compressor/config.h | 2 +- .../compression/gzip/decompressor/config.h | 2 +- .../extensions/filters/common/expr/context.cc | 5 +- .../common/ext_authz/ext_authz_http_impl.cc | 4 +- .../filters/http/cache/cache_filter_utils.cc | 11 +- .../filters/http/cache/http_cache.cc | 11 +- .../http/common/compressor/compressor.cc | 57 +++++---- .../filters/http/cors/cors_filter.cc | 41 ++++--- .../filters/http/csrf/csrf_filter.cc | 9 +- .../http/decompressor/decompressor_filter.cc | 82 +++++-------- .../http/decompressor/decompressor_filter.h | 68 +++++++++-- .../http/grpc_http1_reverse_bridge/filter.cc | 5 +- .../filters/http/grpc_web/grpc_web_filter.cc | 10 +- .../filters/http/gzip/gzip_filter.cc | 2 +- .../filters/http/jwt_authn/extractor.cc | 2 +- .../filters/http/jwt_authn/filter.cc | 8 +- .../extensions/stat_sinks/hystrix/hystrix.cc | 15 ++- .../common/ot/opentracing_driver_impl.cc | 13 ++- source/server/admin/utils.cc | 11 +- .../grpc/grpc_client_integration_test.cc | 13 +-- test/common/http/header_map_impl_test.cc | 36 +++--- .../extensions/common/aws/signer_impl_test.cc | 64 +++++----- .../ext_authz/ext_authz_http_impl_test.cc | 8 +- .../http/cache/cache_filter_utils_test.cc | 3 +- .../simple_http_cache_test.cc | 6 +- .../compressor_filter_integration_test.cc | 30 +++-- .../decompressor/decompressor_filter_test.cc | 12 +- .../reverse_bridge_integration_test.cc | 2 +- .../reverse_bridge_test.cc | 30 +++-- .../http/grpc_web/grpc_web_filter_test.cc | 8 +- .../http/gzip/gzip_filter_integration_test.cc | 32 +++-- .../filters/http/gzip/gzip_filter_test.cc | 3 +- .../http/jwt_authn/authenticator_test.cc | 4 +- .../filters/http/jwt_authn/extractor_test.cc | 2 +- .../http/jwt_authn/filter_integration_test.cc | 4 +- .../stats_sinks/hystrix/hystrix_test.cc | 2 +- .../common/ot/opentracing_driver_impl_test.cc | 2 +- .../lightstep/lightstep_tracer_impl_test.cc | 24 ++-- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 4 +- test/integration/header_integration_test.cc | 8 +- .../header_prefix_integration_test.cc | 5 +- test/integration/integration_test.cc | 6 +- 46 files changed, 472 insertions(+), 328 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index ed72f4e52492..2bd77a2f6bfc 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -263,9 +263,6 @@ class HeaderEntry { * processing. This allows O(1) access to these headers without even a hash lookup. */ #define INLINE_REQ_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(Accept) \ - HEADER_FUNC(AcceptEncoding) \ - HEADER_FUNC(Authorization) \ HEADER_FUNC(ClientTraceId) \ HEADER_FUNC(EnvoyDownstreamServiceCluster) \ HEADER_FUNC(EnvoyDownstreamServiceNode) \ @@ -290,15 +287,11 @@ class HeaderEntry { HEADER_FUNC(ForwardedClientCert) \ HEADER_FUNC(ForwardedFor) \ HEADER_FUNC(ForwardedProto) \ - HEADER_FUNC(GrpcAcceptEncoding) \ HEADER_FUNC(GrpcTimeout) \ HEADER_FUNC(Host) \ HEADER_FUNC(Method) \ - HEADER_FUNC(OtSpanContext) \ - HEADER_FUNC(Origin) \ HEADER_FUNC(Path) \ HEADER_FUNC(Protocol) \ - HEADER_FUNC(Referer) \ HEADER_FUNC(Scheme) \ HEADER_FUNC(TE) \ HEADER_FUNC(UserAgent) @@ -308,7 +301,6 @@ class HeaderEntry { */ #define INLINE_RESP_HEADERS(HEADER_FUNC) \ HEADER_FUNC(Date) \ - HEADER_FUNC(Etag) \ HEADER_FUNC(EnvoyDegraded) \ HEADER_FUNC(EnvoyImmediateHealthCheckFail) \ HEADER_FUNC(EnvoyRateLimited) \ @@ -317,16 +309,13 @@ class HeaderEntry { HEADER_FUNC(EnvoyUpstreamServiceTime) \ HEADER_FUNC(Location) \ HEADER_FUNC(Server) \ - HEADER_FUNC(Status) \ - HEADER_FUNC(Vary) + HEADER_FUNC(Status) /** * Default O(1) request and response headers. */ #define INLINE_REQ_RESP_HEADERS(HEADER_FUNC) \ - HEADER_FUNC(CacheControl) \ HEADER_FUNC(Connection) \ - HEADER_FUNC(ContentEncoding) \ HEADER_FUNC(ContentLength) \ HEADER_FUNC(ContentType) \ HEADER_FUNC(EnvoyAttemptCount) \ diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 8c9abe61aa07..b13a683d80a9 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -43,12 +43,13 @@ class PrefixValue { }; /** - * Constant HTTP headers and values. All lower case. + * These are headers that are used in extension custom O(1) header registration. These headers + * *must* not contain any prefix override headers, as static init order requires that HeaderValues + * be instantiated for the first time after bootstrap is loaded and before the header maps are + * finalized. */ -class HeaderValues { +class CustomHeaderValues { public: - const char* prefix() { return ThreadSafeSingleton::get().prefix(); } - const LowerCaseString Accept{"accept"}; const LowerCaseString AcceptEncoding{"accept-encoding"}; const LowerCaseString AccessControlRequestMethod{"access-control-request-method"}; @@ -58,14 +59,66 @@ class HeaderValues { const LowerCaseString AccessControlExposeHeaders{"access-control-expose-headers"}; const LowerCaseString AccessControlMaxAge{"access-control-max-age"}; const LowerCaseString AccessControlAllowCredentials{"access-control-allow-credentials"}; - const LowerCaseString Age{"age"}; const LowerCaseString Authorization{"authorization"}; + const LowerCaseString CacheControl{"cache-control"}; + const LowerCaseString ContentEncoding{"content-encoding"}; + const LowerCaseString Etag{"etag"}; + const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; + const LowerCaseString Origin{"origin"}; + const LowerCaseString OtSpanContext{"x-ot-span-context"}; + const LowerCaseString Referer{"referer"}; + const LowerCaseString Vary{"vary"}; + + struct { + const std::string Gzip{"gzip"}; + const std::string Identity{"identity"}; + const std::string Wildcard{"*"}; + } AcceptEncodingValues; + + struct { + const std::string All{"*"}; + } AccessControlAllowOriginValue; + + struct { + const std::string NoCache{"no-cache"}; + const std::string NoCacheMaxAge0{"no-cache, max-age=0"}; + const std::string NoTransform{"no-transform"}; + const std::string Private{"private"}; + } CacheControlValues; + + struct { + const std::string Gzip{"gzip"}; + } ContentEncodingValues; + + struct { + const std::string True{"true"}; + } CORSValues; + + struct { + const std::string Default{"identity,deflate,gzip"}; + } GrpcAcceptEncodingValues; + + struct { + const std::string AcceptEncoding{"Accept-Encoding"}; + const std::string Wildcard{"*"}; + } VaryValues; +}; + +using CustomHeaders = ConstSingleton; + +/** + * Constant HTTP headers and values. All lower case. This group of headers can contain prefix + * override headers. + */ +class HeaderValues { +public: + const char* prefix() { return ThreadSafeSingleton::get().prefix(); } + + const LowerCaseString Age{"age"}; const LowerCaseString ProxyAuthenticate{"proxy-authenticate"}; const LowerCaseString ProxyAuthorization{"proxy-authorization"}; - const LowerCaseString CacheControl{"cache-control"}; const LowerCaseString ClientTraceId{"x-client-trace-id"}; const LowerCaseString Connection{"connection"}; - const LowerCaseString ContentEncoding{"content-encoding"}; const LowerCaseString ContentLength{"content-length"}; const LowerCaseString ContentType{"content-type"}; const LowerCaseString Cookie{"cookie"}; @@ -86,6 +139,9 @@ class HeaderValues { absl::StrCat(prefix(), "-immediate-health-check-fail")}; const LowerCaseString EnvoyOriginalUrl{absl::StrCat(prefix(), "-original-url")}; const LowerCaseString EnvoyInternalRequest{absl::StrCat(prefix(), "-internal")}; + // TODO(mattklein123): EnvoyIpTags should be a custom header registered with the IP tagging + // filter. We need to figure out if we can remove this header from the set of headers that + // participate in prefix overrides. const LowerCaseString EnvoyIpTags{absl::StrCat(prefix(), "-ip-tags")}; const LowerCaseString EnvoyMaxRetries{absl::StrCat(prefix(), "-max-retries")}; const LowerCaseString EnvoyNotForwarded{absl::StrCat(prefix(), "-not-forwarded")}; @@ -116,7 +172,6 @@ class HeaderValues { const LowerCaseString EnvoyUpstreamHealthCheckedCluster{ absl::StrCat(prefix(), "-upstream-healthchecked-cluster")}; const LowerCaseString EnvoyDecoratorOperation{absl::StrCat(prefix(), "-decorator-operation")}; - const LowerCaseString Etag{"etag"}; const LowerCaseString Expect{"expect"}; const LowerCaseString Expires{"expires"}; const LowerCaseString ForwardedClientCert{"x-forwarded-client-cert"}; @@ -126,7 +181,6 @@ class HeaderValues { const LowerCaseString GrpcMessage{"grpc-message"}; const LowerCaseString GrpcStatus{"grpc-status"}; const LowerCaseString GrpcTimeout{"grpc-timeout"}; - const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; const LowerCaseString GrpcStatusDetailsBin{"grpc-status-details-bin"}; const LowerCaseString Host{":authority"}; const LowerCaseString HostLegacy{"host"}; @@ -134,12 +188,9 @@ class HeaderValues { const LowerCaseString KeepAlive{"keep-alive"}; const LowerCaseString Location{"location"}; const LowerCaseString Method{":method"}; - const LowerCaseString Origin{"origin"}; - const LowerCaseString OtSpanContext{"x-ot-span-context"}; const LowerCaseString Path{":path"}; const LowerCaseString Protocol{":protocol"}; const LowerCaseString ProxyConnection{"proxy-connection"}; - const LowerCaseString Referer{"referer"}; const LowerCaseString RequestId{"x-request-id"}; const LowerCaseString Scheme{":scheme"}; const LowerCaseString Server{"server"}; @@ -149,7 +200,6 @@ class HeaderValues { const LowerCaseString TE{"te"}; const LowerCaseString Upgrade{"upgrade"}; const LowerCaseString UserAgent{"user-agent"}; - const LowerCaseString Vary{"vary"}; const LowerCaseString Via{"via"}; const LowerCaseString WWWAuthenticate{"www-authenticate"}; const LowerCaseString XContentTypeOptions{"x-content-type-options"}; @@ -167,13 +217,6 @@ class HeaderValues { const std::string WebSocket{"websocket"}; } UpgradeValues; - struct { - const std::string NoCache{"no-cache"}; - const std::string NoCacheMaxAge0{"no-cache, max-age=0"}; - const std::string NoTransform{"no-transform"}; - const std::string Private{"private"}; - } CacheControlValues; - struct { const std::string Text{"text/plain"}; const std::string TextEventStream{"text/event-stream"}; @@ -261,10 +304,6 @@ class HeaderValues { const std::string EnvoyHealthChecker{"Envoy/HC"}; } UserAgentValues; - struct { - const std::string Default{"identity,deflate,gzip"}; - } GrpcAcceptEncodingValues; - struct { const std::string Trailers{"trailers"}; } TEValues; @@ -273,35 +312,12 @@ class HeaderValues { const std::string Nosniff{"nosniff"}; } XContentTypeOptionValues; - struct { - const std::string True{"true"}; - } CORSValues; - struct { const std::string Http10String{"HTTP/1.0"}; const std::string Http11String{"HTTP/1.1"}; const std::string Http2String{"HTTP/2"}; const std::string Http3String{"HTTP/3"}; } ProtocolStrings; - - struct { - const std::string Gzip{"gzip"}; - const std::string Identity{"identity"}; - const std::string Wildcard{"*"}; - } AcceptEncodingValues; - - struct { - const std::string Gzip{"gzip"}; - } ContentEncodingValues; - - struct { - const std::string AcceptEncoding{"Accept-Encoding"}; - const std::string Wildcard{"*"}; - } VaryValues; - - struct { - const std::string All{"*"}; - } AccessControlAllowOriginValue; }; using Headers = ConstSingleton; diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index fc73d1b649fb..92bc5e38ee73 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -5,6 +5,7 @@ #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" #include "common/common/assert.h" +#include "common/http/headers.h" #include "common/network/utility.h" #include "common/stream_info/utility.h" @@ -15,6 +16,9 @@ namespace Extensions { namespace AccessLoggers { namespace HttpGrpc { +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} @@ -86,8 +90,9 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, if (request_headers.UserAgent() != nullptr) { request_properties->set_user_agent(std::string(request_headers.getUserAgentValue())); } - if (request_headers.Referer() != nullptr) { - request_properties->set_referer(std::string(request_headers.getRefererValue())); + if (request_headers.getInline(referer_handle.handle()) != nullptr) { + request_properties->set_referer( + std::string(request_headers.getInlineValue(referer_handle.handle()))); } if (request_headers.ForwardedFor() != nullptr) { request_properties->set_forwarded_for(std::string(request_headers.getForwardedForValue())); diff --git a/source/extensions/common/aws/signer_impl.cc b/source/extensions/common/aws/signer_impl.cc index da5256e25ed1..157ad46aa4b3 100644 --- a/source/extensions/common/aws/signer_impl.cc +++ b/source/extensions/common/aws/signer_impl.cc @@ -75,7 +75,7 @@ void SignerImpl::sign(Http::RequestHeaderMap& headers, const std::string& conten const auto authorization_header = createAuthorizationHeader( credentials.accessKeyId().value(), credential_scope, canonical_headers, signature); ENVOY_LOG(debug, "Signing request with: {}", authorization_header); - headers.addCopy(Http::Headers::get().Authorization, authorization_header); + headers.addCopy(Http::CustomHeaders::get().Authorization, authorization_header); } std::string SignerImpl::createContentHash(Http::RequestMessage& message, bool sign_body) const { diff --git a/source/extensions/compression/gzip/compressor/config.h b/source/extensions/compression/gzip/compressor/config.h index 25c96fff8a90..2fcee31020b3 100644 --- a/source/extensions/compression/gzip/compressor/config.h +++ b/source/extensions/compression/gzip/compressor/config.h @@ -33,7 +33,7 @@ class GzipCompressorFactory : public Envoy::Compression::Compressor::CompressorF Envoy::Compression::Compressor::CompressorPtr createCompressor() override; const std::string& statsPrefix() const override { return gzipStatsPrefix(); } const std::string& contentEncoding() const override { - return Http::Headers::get().ContentEncodingValues.Gzip; + return Http::CustomHeaders::get().ContentEncodingValues.Gzip; } private: diff --git a/source/extensions/compression/gzip/decompressor/config.h b/source/extensions/compression/gzip/decompressor/config.h index 9a99398b23a4..c2b8ded22562 100644 --- a/source/extensions/compression/gzip/decompressor/config.h +++ b/source/extensions/compression/gzip/decompressor/config.h @@ -31,7 +31,7 @@ class GzipDecompressorFactory : public Envoy::Compression::Decompressor::Decompr Envoy::Compression::Decompressor::DecompressorPtr createDecompressor() override; const std::string& statsPrefix() const override { return gzipStatsPrefix(); } const std::string& contentEncoding() const override { - return Http::Headers::get().ContentEncodingValues.Gzip; + return Http::CustomHeaders::get().ContentEncodingValues.Gzip; } private: diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index aa132bea068a..97420096adcd 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -13,6 +13,9 @@ namespace Filters { namespace Common { namespace Expr { +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + absl::optional convertHeaderEntry(const Http::HeaderEntry* header) { if (header == nullptr) { return {}; @@ -106,7 +109,7 @@ absl::optional RequestWrapper::operator[](CelValue key) const { } else if (value == Method) { return convertHeaderEntry(headers_.value_->Method()); } else if (value == Referer) { - return convertHeaderEntry(headers_.value_->Referer()); + return convertHeaderEntry(headers_.value_->getInline(referer_handle.handle())); } else if (value == ID) { return convertHeaderEntry(headers_.value_->RequestId()); } else if (value == UserAgent) { diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 3d5f27165871..01894546ab0f 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -153,8 +153,8 @@ MatcherSharedPtr ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatcher& list, const bool disable_lowercase_string_matcher) { const std::vector keys{ - {Http::Headers::get().Authorization, Http::Headers::get().Method, Http::Headers::get().Path, - Http::Headers::get().Host}}; + {Http::CustomHeaders::get().Authorization, Http::Headers::get().Method, + Http::Headers::get().Path, Http::Headers::get().Host}}; std::vector matchers( createStringMatchers(list, disable_lowercase_string_matcher)); diff --git a/source/extensions/filters/http/cache/cache_filter_utils.cc b/source/extensions/filters/http/cache/cache_filter_utils.cc index 69dd6515d944..e4f51bc5c611 100644 --- a/source/extensions/filters/http/cache/cache_filter_utils.cc +++ b/source/extensions/filters/http/cache/cache_filter_utils.cc @@ -7,24 +7,29 @@ namespace Extensions { namespace HttpFilters { namespace Cache { +Http::RegisterCustomInlineHeader + authorization_handle(Http::CustomHeaders::get().Authorization); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().Referer); + bool CacheFilterUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) { const absl::string_view method = headers.getMethodValue(); const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); const Http::HeaderValues& header_values = Http::Headers::get(); // TODO(toddmgreer): Also serve HEAD requests from cache. // TODO(toddmgreer): Check all the other cache-related headers. - return headers.Path() && headers.Host() && !headers.Authorization() && + return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) && (method == header_values.MethodValues.Get) && (forwarded_proto == header_values.SchemeValues.Http || forwarded_proto == header_values.SchemeValues.Https); } bool CacheFilterUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers) { - const absl::string_view cache_control = headers.getCacheControlValue(); + const absl::string_view cache_control = headers.getInlineValue(cache_control_handle.handle()); // TODO(toddmgreer): fully check for cacheability. See for example // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. return !StringUtil::caseFindToken(cache_control, ",", - Http::Headers::get().CacheControlValues.Private); + Http::CustomHeaders::get().CacheControlValues.Private); } } // namespace Cache diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index fc755ea1e974..dc75dfdfe38a 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -17,6 +17,11 @@ namespace Extensions { namespace HttpFilters { namespace Cache { +Http::RegisterCustomInlineHeader + request_cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + response_cache_control_handle(Http::CustomHeaders::get().CacheControl); + std::ostream& operator<<(std::ostream& os, CacheEntryStatus status) { switch (status) { case CacheEntryStatus::Ok: @@ -38,7 +43,8 @@ std::ostream& operator<<(std::ostream& os, const AdjustedByteRange& range) { } LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp) - : timestamp_(timestamp), request_cache_control_(request_headers.getCacheControlValue()) { + : timestamp_(timestamp), request_cache_control_(request_headers.getInlineValue( + request_cache_control_handle.handle())) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " @@ -73,7 +79,8 @@ bool LookupRequest::isFresh(const Http::ResponseHeaderMap& response_headers) con if (!response_headers.Date()) { return false; } - const Http::HeaderEntry* cache_control_header = response_headers.CacheControl(); + const Http::HeaderEntry* cache_control_header = + response_headers.getInline(response_cache_control_handle.handle()); if (cache_control_header) { const SystemTime::duration effective_max_age = HttpCacheUtils::effectiveMaxAge(cache_control_header->value().getStringView()); diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index 1b961017fcbc..482abaf348dd 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -11,6 +11,17 @@ namespace Compressors { namespace { +Http::RegisterCustomInlineHeader + accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_handle(Http::CustomHeaders::get().ContentEncoding); +Http::RegisterCustomInlineHeader + etag_handle(Http::CustomHeaders::get().Etag); +Http::RegisterCustomInlineHeader + vary_handle(Http::CustomHeaders::get().Vary); + // Default minimum length of an upstream response that allows compression. const uint64_t DefaultMinimumContentLength = 30; @@ -61,7 +72,7 @@ CompressorFilter::CompressorFilter(const CompressorFilterConfigSharedPtr config) : skip_compression_{true}, config_(std::move(config)) {} Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { - const Http::HeaderEntry* accept_encoding = headers.AcceptEncoding(); + const Http::HeaderEntry* accept_encoding = headers.getInline(accept_encoding_handle.handle()); if (accept_encoding != nullptr) { // Capture the value of the "Accept-Encoding" request header to use it later when making // decision on compressing the corresponding HTTP response. @@ -69,7 +80,7 @@ Http::FilterHeadersStatus CompressorFilter::decodeHeaders(Http::RequestHeaderMap } if (config_->enabled() && config_->removeAcceptEncodingHeader()) { - headers.removeAcceptEncoding(); + headers.removeInline(accept_encoding_handle.handle()); } return Http::FilterHeadersStatus::Continue; @@ -103,12 +114,12 @@ Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMa if (!end_stream && config_->enabled() && isMinimumContentLength(headers) && isAcceptEncodingAllowed(headers) && isContentTypeAllowed(headers) && !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && - isTransferEncodingAllowed(headers) && !headers.ContentEncoding()) { + isTransferEncodingAllowed(headers) && !headers.getInline(content_encoding_handle.handle())) { skip_compression_ = false; sanitizeEtagHeader(headers); insertVaryHeader(headers); headers.removeContentLength(); - headers.setContentEncoding(config_->contentEncoding()); + headers.setInline(content_encoding_handle.handle(), config_->contentEncoding()); config_->stats().compressed_.inc(); // Finally instantiate the compressor. compressor_ = config_->makeCompressor(); @@ -139,10 +150,10 @@ Http::FilterTrailersStatus CompressorFilter::encodeTrailers(Http::ResponseTraile } bool CompressorFilter::hasCacheControlNoTransform(Http::ResponseHeaderMap& headers) const { - const Http::HeaderEntry* cache_control = headers.CacheControl(); + const Http::HeaderEntry* cache_control = headers.getInline(cache_control_handle.handle()); if (cache_control) { return StringUtil::caseFindToken(cache_control->value().getStringView(), ",", - Http::Headers::get().CacheControlValues.NoTransform); + Http::CustomHeaders::get().CacheControlValues.NoTransform); } return false; @@ -236,18 +247,18 @@ CompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const { // If there's no intersection between accepted encodings and the ones provided by the allowed // compressors, then only the "identity" encoding is acceptable. return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::NotValid); } // Find intersection of encodings accepted by the user agent and provided // by the allowed compressors and choose the one with the highest q-value. - EncPair choice{Http::Headers::get().AcceptEncodingValues.Identity, static_cast(0)}; + EncPair choice{Http::CustomHeaders::get().AcceptEncodingValues.Identity, static_cast(0)}; for (const auto& pair : pairs) { if ((pair.second > choice.second) && (allowed_compressors.count(std::string(pair.first)) || - pair.first == Http::Headers::get().AcceptEncodingValues.Identity || - pair.first == Http::Headers::get().AcceptEncodingValues.Wildcard)) { + pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity || + pair.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard)) { choice = pair; } } @@ -255,19 +266,19 @@ CompressorFilter::chooseEncoding(const Http::ResponseHeaderMap& headers) const { if (!choice.second) { // The value of "Accept-Encoding" must be invalid as we ended up with zero q-value. return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::NotValid); } // The "identity" encoding (no compression) is always available. - if (choice.first == Http::Headers::get().AcceptEncodingValues.Identity) { + if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Identity) { return std::make_unique( - Http::Headers::get().AcceptEncodingValues.Identity, + Http::CustomHeaders::get().AcceptEncodingValues.Identity, CompressorFilter::EncodingDecision::HeaderStat::Identity); } // If wildcard is given then use which ever compressor is registered first. - if (choice.first == Http::Headers::get().AcceptEncodingValues.Wildcard) { + if (choice.first == Http::CustomHeaders::get().AcceptEncodingValues.Wildcard) { auto first_registered = std::min_element( allowed_compressors.begin(), allowed_compressors.end(), [](const std::pair& a, @@ -351,7 +362,8 @@ bool CompressorFilter::isContentTypeAllowed(Http::ResponseHeaderMap& headers) co } bool CompressorFilter::isEtagAllowed(Http::ResponseHeaderMap& headers) const { - const bool is_etag_allowed = !(config_->disableOnEtagHeader() && headers.Etag()); + const bool is_etag_allowed = + !(config_->disableOnEtagHeader() && headers.getInline(etag_handle.handle())); if (!is_etag_allowed) { config_->stats().not_compressed_etag_.inc(); } @@ -395,17 +407,18 @@ bool CompressorFilter::isTransferEncodingAllowed(Http::ResponseHeaderMap& header } void CompressorFilter::insertVaryHeader(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* vary = headers.Vary(); + const Http::HeaderEntry* vary = headers.getInline(vary_handle.handle()); if (vary != nullptr) { if (!StringUtil::findToken(vary->value().getStringView(), ",", - Http::Headers::get().VaryValues.AcceptEncoding, true)) { + Http::CustomHeaders::get().VaryValues.AcceptEncoding, true)) { std::string new_header; absl::StrAppend(&new_header, vary->value().getStringView(), ", ", - Http::Headers::get().VaryValues.AcceptEncoding); - headers.setVary(new_header); + Http::CustomHeaders::get().VaryValues.AcceptEncoding); + headers.setInline(vary_handle.handle(), new_header); } } else { - headers.setReferenceVary(Http::Headers::get().VaryValues.AcceptEncoding); + headers.setReferenceInline(vary_handle.handle(), + Http::CustomHeaders::get().VaryValues.AcceptEncoding); } } @@ -415,11 +428,11 @@ void CompressorFilter::insertVaryHeader(Http::ResponseHeaderMap& headers) { // This design attempts to stay more on the safe side by preserving weak etags and removing // the strong ones when disable_on_etag_header is false. Envoy does NOT re-write entity tags. void CompressorFilter::sanitizeEtagHeader(Http::ResponseHeaderMap& headers) { - const Http::HeaderEntry* etag = headers.Etag(); + const Http::HeaderEntry* etag = headers.getInline(etag_handle.handle()); if (etag != nullptr) { absl::string_view value(etag->value().getStringView()); if (value.length() > 2 && !((value[0] == 'w' || value[0] == 'W') && value[1] == '/')) { - headers.removeEtag(); + headers.removeInline(etag_handle.handle()); } } } diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 103ee87277e1..976e9b336f6c 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -15,19 +15,22 @@ namespace HttpFilters { namespace Cors { Http::RegisterCustomInlineHeader - access_control_request_method(Http::Headers::get().AccessControlRequestMethod); + access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().Origin); Http::RegisterCustomInlineHeader - access_control_allow_origin(Http::Headers::get().AccessControlAllowOrigin); + access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin); Http::RegisterCustomInlineHeader - access_control_allow_credentials(Http::Headers::get().AccessControlAllowCredentials); + access_control_allow_credentials_handle( + Http::CustomHeaders::get().AccessControlAllowCredentials); Http::RegisterCustomInlineHeader - access_control_allow_methods(Http::Headers::get().AccessControlAllowMethods); + access_control_allow_methods_handle(Http::CustomHeaders::get().AccessControlAllowMethods); Http::RegisterCustomInlineHeader - access_control_allow_headers(Http::Headers::get().AccessControlAllowHeaders); + access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders); Http::RegisterCustomInlineHeader - access_control_max_age(Http::Headers::get().AccessControlMaxAge); + access_control_max_age_handle(Http::CustomHeaders::get().AccessControlMaxAge); Http::RegisterCustomInlineHeader - access_control_expose_headers(Http::Headers::get().AccessControlExposeHeaders); + access_control_expose_headers_handle(Http::CustomHeaders::get().AccessControlExposeHeaders); CorsFilterConfig::CorsFilterConfig(const std::string& stats_prefix, Stats::Scope& scope) : stats_(generateStats(stats_prefix + "cors.", scope)) {} @@ -52,7 +55,7 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head return Http::FilterHeadersStatus::Continue; } - origin_ = headers.Origin(); + origin_ = headers.getInline(origin_handle.handle()); if (origin_ == nullptr || origin_->value().empty()) { return Http::FilterHeadersStatus::Continue; } @@ -74,31 +77,31 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head return Http::FilterHeadersStatus::Continue; } - if (headers.getInlineValue(access_control_request_method.handle()).empty()) { + if (headers.getInlineValue(access_control_request_method_handle.handle()).empty()) { return Http::FilterHeadersStatus::Continue; } auto response_headers{Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::OK))}})}; - response_headers->setInline(access_control_allow_origin.handle(), + response_headers->setInline(access_control_allow_origin_handle.handle(), origin_->value().getStringView()); if (allowCredentials()) { - response_headers->setReferenceInline(access_control_allow_credentials.handle(), - Http::Headers::get().CORSValues.True); + response_headers->setReferenceInline(access_control_allow_credentials_handle.handle(), + Http::CustomHeaders::get().CORSValues.True); } if (!allowMethods().empty()) { - response_headers->setInline(access_control_allow_methods.handle(), allowMethods()); + response_headers->setInline(access_control_allow_methods_handle.handle(), allowMethods()); } if (!allowHeaders().empty()) { - response_headers->setInline(access_control_allow_headers.handle(), allowHeaders()); + response_headers->setInline(access_control_allow_headers_handle.handle(), allowHeaders()); } if (!maxAge().empty()) { - response_headers->setInline(access_control_max_age.handle(), maxAge()); + response_headers->setInline(access_control_max_age_handle.handle(), maxAge()); } decoder_callbacks_->encodeHeaders(std::move(response_headers), true); @@ -113,14 +116,14 @@ Http::FilterHeadersStatus CorsFilter::encodeHeaders(Http::ResponseHeaderMap& hea return Http::FilterHeadersStatus::Continue; } - headers.setInline(access_control_allow_origin.handle(), origin_->value().getStringView()); + headers.setInline(access_control_allow_origin_handle.handle(), origin_->value().getStringView()); if (allowCredentials()) { - headers.setReferenceInline(access_control_allow_credentials.handle(), - Http::Headers::get().CORSValues.True); + headers.setReferenceInline(access_control_allow_credentials_handle.handle(), + Http::CustomHeaders::get().CORSValues.True); } if (!exposeHeaders().empty()) { - headers.setInline(access_control_expose_headers.handle(), exposeHeaders()); + headers.setInline(access_control_expose_headers_handle.handle(), exposeHeaders()); } return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index dbd0f5f02c0e..d852a78e31a5 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -15,6 +15,11 @@ namespace Extensions { namespace HttpFilters { namespace Csrf { +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().Origin); +Http::RegisterCustomInlineHeader + referer_handle(Http::CustomHeaders::get().Referer); + struct RcDetailsValues { const std::string OriginMismatch = "csrf_origin_mismatch"; }; @@ -43,11 +48,11 @@ absl::string_view hostAndPort(const absl::string_view header) { } absl::string_view sourceOriginValue(const Http::RequestHeaderMap& headers) { - const absl::string_view origin = hostAndPort(headers.getOriginValue()); + const absl::string_view origin = hostAndPort(headers.getInlineValue(origin_handle.handle())); if (origin != EMPTY_STRING) { return origin; } - return hostAndPort(headers.getRefererValue()); + return hostAndPort(headers.getInlineValue(referer_handle.handle())); } absl::string_view targetOriginValue(const Http::RequestHeaderMap& headers) { diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc index 284b14fe841d..ae36a25457cd 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.cc +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -3,13 +3,23 @@ #include "common/buffer/buffer_impl.h" #include "common/common/empty_string.h" #include "common/common/macros.h" -#include "common/http/headers.h" namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Decompressor { +Http::RegisterCustomInlineHeader + accept_encoding_handle(Http::CustomHeaders::get().AcceptEncoding); +Http::RegisterCustomInlineHeader + cache_control_request_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_request_handle(Http::CustomHeaders::get().ContentEncoding); +Http::RegisterCustomInlineHeader + cache_control_response_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + content_encoding_response_handle(Http::CustomHeaders::get().ContentEncoding); + DecompressorFilterConfig::DecompressorFilterConfig( const envoy::extensions::filters::http::decompressor::v3::Decompressor& proto_config, const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime, @@ -60,10 +70,10 @@ Http::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderM // the upstream that this hop is able to decompress responses via the Accept-Encoding header. if (config_->responseDirectionConfig().decompressionEnabled() && config_->requestDirectionConfig().advertiseAcceptEncoding()) { - headers.appendAcceptEncoding(config_->contentEncoding(), ","); + headers.appendInline(accept_encoding_handle.handle(), config_->contentEncoding(), ","); ENVOY_STREAM_LOG(debug, "DecompressorFilter::decodeHeaders advertise Accept-Encoding with value '{}'", - *decoder_callbacks_, headers.AcceptEncoding()->value().getStringView()); + *decoder_callbacks_, headers.getInlineValue(accept_encoding_handle.handle())); } // 2. If request decompression is enabled, then decompress the request. @@ -93,30 +103,6 @@ Http::FilterDataStatus DecompressorFilter::encodeData(Buffer::Instance& data, bo *encoder_callbacks_, data); } -Http::FilterHeadersStatus DecompressorFilter::maybeInitDecompress( - const DecompressorFilterConfig::DirectionConfig& direction_config, - Compression::Decompressor::DecompressorPtr& decompressor, - Http::StreamFilterCallbacks& callbacks, Http::RequestOrResponseHeaderMap& headers) { - if (direction_config.decompressionEnabled() && !hasCacheControlNoTransform(headers) && - contentEncodingMatches(headers)) { - direction_config.stats().decompressed_.inc(); - decompressor = config_->makeDecompressor(); - - // Update headers. - headers.removeContentLength(); - modifyContentEncoding(headers); - - ENVOY_STREAM_LOG(debug, "do decompress {}: {}", callbacks, direction_config.logString(), - headers); - } else { - direction_config.stats().not_decompressed_.inc(); - ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), - headers); - } - - return Http::FilterHeadersStatus::Continue; -} - Http::FilterDataStatus DecompressorFilter::maybeDecompress( const DecompressorFilterConfig::DirectionConfig& direction_config, const Compression::Decompressor::DecompressorPtr& decompressor, @@ -137,36 +123,28 @@ Http::FilterDataStatus DecompressorFilter::maybeDecompress( return Http::FilterDataStatus::Continue; } -bool DecompressorFilter::hasCacheControlNoTransform( - Http::RequestOrResponseHeaderMap& headers) const { - return headers.CacheControl() - ? StringUtil::caseFindToken(headers.CacheControl()->value().getStringView(), ",", - Http::Headers::get().CacheControlValues.NoTransform) - : false; +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getCacheControlHandle() { + return cache_control_request_handle.handle(); } -/** - * Content-Encoding matches if the configured encoding is the first value in the comma-delimited - * Content-Encoding header, regardless of spacing and casing. - */ -bool DecompressorFilter::contentEncodingMatches(Http::RequestOrResponseHeaderMap& headers) const { - if (headers.ContentEncoding()) { - absl::string_view coding = StringUtil::trim( - StringUtil::cropRight(headers.ContentEncoding()->value().getStringView(), ",")); - return StringUtil::CaseInsensitiveCompare()(config_->contentEncoding(), coding); - } - return false; +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getCacheControlHandle() { + return cache_control_response_handle.handle(); } -void DecompressorFilter::modifyContentEncoding(Http::RequestOrResponseHeaderMap& headers) const { - const auto all_codings = StringUtil::trim(headers.ContentEncoding()->value().getStringView()); - const auto remaining_codings = StringUtil::trim(StringUtil::cropLeft(all_codings, ",")); +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getContentEncodingHandle() { + return content_encoding_request_handle.handle(); +} - if (remaining_codings != all_codings) { - headers.setContentEncoding(remaining_codings); - } else { - headers.removeContentEncoding(); - } +template <> +Http::CustomInlineHeaderRegistry::Handle +DecompressorFilter::getContentEncodingHandle() { + return content_encoding_response_handle.handle(); } } // namespace Decompressor diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.h b/source/extensions/filters/http/decompressor/decompressor_filter.h index d7017e385771..ec6df0b35539 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.h +++ b/source/extensions/filters/http/decompressor/decompressor_filter.h @@ -6,6 +6,7 @@ #include "envoy/http/filter.h" #include "common/common/macros.h" +#include "common/http/headers.h" #include "common/runtime/runtime_protos.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -126,22 +127,75 @@ class DecompressorFilter : public Http::PassThroughFilter, Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override; private: + template Http::FilterHeadersStatus maybeInitDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, Compression::Decompressor::DecompressorPtr& decompressor, - Http::StreamFilterCallbacks& callbacks, - Http::RequestOrResponseHeaderMap& headers); + Http::StreamFilterCallbacks& callbacks, HeaderType& headers) { + if (direction_config.decompressionEnabled() && !hasCacheControlNoTransform(headers) && + contentEncodingMatches(headers)) { + direction_config.stats().decompressed_.inc(); + decompressor = config_->makeDecompressor(); + + // Update headers. + headers.removeContentLength(); + modifyContentEncoding(headers); + + ENVOY_STREAM_LOG(debug, "do decompress {}: {}", callbacks, direction_config.logString(), + headers); + } else { + direction_config.stats().not_decompressed_.inc(); + ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), + headers); + } + + return Http::FilterHeadersStatus::Continue; + } Http::FilterDataStatus maybeDecompress(const DecompressorFilterConfig::DirectionConfig& direction_config, const Compression::Decompressor::DecompressorPtr& decompressor, Http::StreamFilterCallbacks& callbacks, Buffer::Instance& input_buffer) const; - // TODO(junr03): these do not need to be member functions. They can all be part of a static - // utility class. Moreover, they can be shared between compressor and decompressor. - bool hasCacheControlNoTransform(Http::RequestOrResponseHeaderMap& headers) const; - bool contentEncodingMatches(Http::RequestOrResponseHeaderMap& headers) const; - void modifyContentEncoding(Http::RequestOrResponseHeaderMap& headers) const; + // TODO(junr03): These can be shared between compressor and decompressor. + template + static Http::CustomInlineHeaderRegistry::Handle getCacheControlHandle(); + template static bool hasCacheControlNoTransform(HeaderType& headers) { + const auto handle = getCacheControlHandle(); + return headers.getInline(handle) + ? StringUtil::caseFindToken( + headers.getInlineValue(handle), ",", + Http::CustomHeaders::get().CacheControlValues.NoTransform) + : false; + } + + /** + * Content-Encoding matches if the configured encoding is the first value in the comma-delimited + * Content-Encoding header, regardless of spacing and casing. + */ + template + static Http::CustomInlineHeaderRegistry::Handle getContentEncodingHandle(); + template bool contentEncodingMatches(HeaderType& headers) const { + const auto handle = getContentEncodingHandle(); + if (headers.getInline(handle)) { + absl::string_view coding = + StringUtil::trim(StringUtil::cropRight(headers.getInlineValue(handle), ",")); + return StringUtil::CaseInsensitiveCompare()(config_->contentEncoding(), coding); + } + return false; + } + + template static void modifyContentEncoding(HeaderType& headers) { + const auto handle = getContentEncodingHandle(); + const auto all_codings = StringUtil::trim(headers.getInlineValue(handle)); + const auto remaining_codings = StringUtil::trim(StringUtil::cropLeft(all_codings, ",")); + + if (remaining_codings != all_codings) { + headers.setInline(handle, remaining_codings); + } else { + headers.removeInline(handle); + } + } DecompressorFilterConfigSharedPtr config_; Compression::Decompressor::DecompressorPtr request_decompressor_{}; diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 955bf6416829..972b31275ff6 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -16,6 +16,9 @@ namespace Extensions { namespace HttpFilters { namespace GrpcHttp1ReverseBridge { +Http::RegisterCustomInlineHeader + accept_handle(Http::CustomHeaders::get().Accept); + struct RcDetailsValues { // The gRPC HTTP/1 reverse bridge failed because the body payload was too // small to be a gRPC frame. @@ -93,7 +96,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // gRPC content type variations such as application/grpc+proto. content_type_ = std::string(headers.getContentTypeValue()); headers.setContentType(upstream_content_type_); - headers.setAccept(upstream_content_type_); + headers.setInline(accept_handle.handle(), upstream_content_type_); if (withhold_grpc_frames_) { // Adjust the content-length header to account for us removing the gRPC frame header. diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index 727d7110e8f2..8cecc0a4003b 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -17,6 +17,11 @@ namespace Extensions { namespace HttpFilters { namespace GrpcWeb { +Http::RegisterCustomInlineHeader + accept_handle(Http::CustomHeaders::get().Accept); +Http::RegisterCustomInlineHeader + grpc_accept_encoding_handle(Http::CustomHeaders::get().GrpcAcceptEncoding); + struct RcDetailsValues { // The grpc web filter couldn't decode the data as the size wasn't a multiple of 4. const std::string GrpcDecodeFailedDueToSize = "grpc_base_64_decode_failed_bad_size"; @@ -71,7 +76,7 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& h } headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Grpc); - const absl::string_view accept = headers.getAcceptValue(); + const absl::string_view accept = headers.getInlineValue(accept_handle.handle()); if (accept == Http::Headers::get().ContentTypeValues.GrpcWebText || accept == Http::Headers::get().ContentTypeValues.GrpcWebTextProto) { // Checks whether gRPC-Web client is asking for base64 encoded response. @@ -81,7 +86,8 @@ Http::FilterHeadersStatus GrpcWebFilter::decodeHeaders(Http::RequestHeaderMap& h // Adds te:trailers to upstream HTTP2 request. It's required for gRPC. headers.setReferenceTE(Http::Headers::get().TEValues.Trailers); // Adds grpc-accept-encoding:identity,deflate,gzip. It's required for gRPC. - headers.setReferenceGrpcAcceptEncoding(Http::Headers::get().GrpcAcceptEncodingValues.Default); + headers.setReferenceInline(grpc_accept_encoding_handle.handle(), + Http::CustomHeaders::get().GrpcAcceptEncodingValues.Default); return Http::FilterHeadersStatus::Continue; } diff --git a/source/extensions/filters/http/gzip/gzip_filter.cc b/source/extensions/filters/http/gzip/gzip_filter.cc index d233e6f20bc3..804c3b9bd337 100644 --- a/source/extensions/filters/http/gzip/gzip_filter.cc +++ b/source/extensions/filters/http/gzip/gzip_filter.cc @@ -28,7 +28,7 @@ GzipFilterConfig::GzipFilterConfig(const envoy::extensions::filters::http::gzip: const std::string& stats_prefix, Stats::Scope& scope, Runtime::Loader& runtime) : CompressorFilterConfig(compressorConfig(gzip), stats_prefix + "gzip.", scope, runtime, - Http::Headers::get().ContentEncodingValues.Gzip), + Http::CustomHeaders::get().ContentEncodingValues.Gzip), compression_level_(compressionLevelEnum(gzip.compression_level())), compression_strategy_(compressionStrategyEnum(gzip.compression_strategy())), memory_level_(memoryLevelUint(gzip.memory_level().value())), diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index fda2c37c91ce..6e02093c0749 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -153,7 +153,7 @@ void ExtractorImpl::addProvider(const JwtProvider& provider) { } // If not specified, use default locations. if (provider.from_headers().empty() && provider.from_params().empty()) { - addHeaderConfig(provider.issuer(), Http::Headers::get().Authorization, + addHeaderConfig(provider.issuer(), Http::CustomHeaders::get().Authorization, JwtConstValues::get().BearerPrefix); addQueryParamConfig(provider.issuer(), JwtConstValues::get().AccessTokenParam); } diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 36b49855a068..753c7d511a0b 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -17,12 +17,14 @@ namespace JwtAuthn { namespace { Http::RegisterCustomInlineHeader - access_control_request_method(Http::Headers::get().AccessControlRequestMethod); + access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); +Http::RegisterCustomInlineHeader + origin_handle(Http::CustomHeaders::get().AccessControlRequestMethod); bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { return headers.getMethodValue() == Http::Headers::get().MethodValues.Options && - headers.Origin() && !headers.Origin()->value().empty() && - !headers.getInlineValue(access_control_request_method.handle()).empty(); + !headers.getInlineValue(origin_handle.handle()).empty() && + !headers.getInlineValue(access_control_request_method_handle.handle()).empty(); } } // namespace diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index c2317658a03b..abea32ca3dd6 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -23,9 +23,11 @@ namespace StatSinks { namespace Hystrix { Http::RegisterCustomInlineHeader - access_control_allow_origin(Http::Headers::get().AccessControlAllowOrigin); + access_control_allow_origin_handle(Http::CustomHeaders::get().AccessControlAllowOrigin); Http::RegisterCustomInlineHeader - access_control_allow_headers(Http::Headers::get().AccessControlAllowHeaders); + access_control_allow_headers_handle(Http::CustomHeaders::get().AccessControlAllowHeaders); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); const uint64_t HystrixSink::DEFAULT_NUM_BUCKETS; ClusterStatsCache::ClusterStatsCache(const std::string& cluster_name) @@ -293,12 +295,13 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, Server::AdminStream& admin_stream) { response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextEventStream); - response_headers.setReferenceCacheControl(Http::Headers::get().CacheControlValues.NoCache); + response_headers.setReferenceInline(cache_control_handle.handle(), + Http::CustomHeaders::get().CacheControlValues.NoCache); response_headers.setReferenceConnection(Http::Headers::get().ConnectionValues.Close); - response_headers.setReferenceInline(access_control_allow_headers.handle(), + response_headers.setReferenceInline(access_control_allow_headers_handle.handle(), AccessControlAllowHeadersValue.AllowHeadersHystrix); - response_headers.setReferenceInline(access_control_allow_origin.handle(), - Http::Headers::get().AccessControlAllowOriginValue.All); + response_headers.setReferenceInline(access_control_allow_origin_handle.handle(), + Http::CustomHeaders::get().AccessControlAllowOriginValue.All); Http::StreamDecoderFilterCallbacks& stream_decoder_filter_callbacks = admin_stream.getDecoderFilterCallbacks(); diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 6f7fbd23aff8..710d94615d59 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -15,6 +15,9 @@ namespace Tracers { namespace Common { namespace Ot { +Http::RegisterCustomInlineHeader + ot_span_context_handle(Http::CustomHeaders::get().OtSpanContext); + namespace { class OpenTracingHTTPHeadersWriter : public opentracing::HTTPHeadersWriter { public: @@ -110,7 +113,8 @@ void OpenTracingSpan::injectContext(Http::RequestHeaderMap& request_headers) { return; } const std::string current_span_context = oss.str(); - request_headers.setOtSpanContext( + request_headers.setInline( + ot_span_context_handle.handle(), Base64::encode(current_span_context.c_str(), current_span_context.length())); } else { // Inject the context using the tracer's standard HTTP header format. @@ -149,10 +153,11 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, const opentracing::Tracer& tracer = this->tracer(); std::unique_ptr active_span; std::unique_ptr parent_span_ctx; - if (propagation_mode == PropagationMode::SingleHeader && request_headers.OtSpanContext()) { + if (propagation_mode == PropagationMode::SingleHeader && + request_headers.getInline(ot_span_context_handle.handle())) { opentracing::expected> parent_span_ctx_maybe; - std::string parent_context = - Base64::decode(std::string(request_headers.getOtSpanContextValue())); + std::string parent_context = Base64::decode( + std::string(request_headers.getInlineValue(ot_span_context_handle.handle()))); if (!parent_context.empty()) { InputConstMemoryStream istream{parent_context.data(), parent_context.size()}; diff --git a/source/server/admin/utils.cc b/source/server/admin/utils.cc index 3c2442bb2894..eaa5a6689a02 100644 --- a/source/server/admin/utils.cc +++ b/source/server/admin/utils.cc @@ -23,18 +23,19 @@ envoy::admin::v3::ServerInfo::State serverState(Init::Manager::State state, void populateFallbackResponseHeaders(Http::Code code, Http::ResponseHeaderMap& header_map) { header_map.setStatus(std::to_string(enumToInt(code))); - const auto& headers = Http::Headers::get(); if (header_map.ContentType() == nullptr) { // Default to text-plain if unset. - header_map.setReferenceContentType(headers.ContentTypeValues.TextUtf8); + header_map.setReferenceContentType(Http::Headers::get().ContentTypeValues.TextUtf8); } // Default to 'no-cache' if unset, but not 'no-store' which may break the back button. - if (header_map.CacheControl() == nullptr) { - header_map.setReferenceCacheControl(headers.CacheControlValues.NoCacheMaxAge0); + if (header_map.get(Http::CustomHeaders::get().CacheControl) == nullptr) { + header_map.setReference(Http::CustomHeaders::get().CacheControl, + Http::CustomHeaders::get().CacheControlValues.NoCacheMaxAge0); } // Under no circumstance should browsers sniff content-type. - header_map.addReference(headers.XContentTypeOptions, headers.XContentTypeOptionValues.Nosniff); + header_map.addReference(Http::Headers::get().XContentTypeOptions, + Http::Headers::get().XContentTypeOptionValues.Nosniff); } // Helper method to get filter parameter, or report an error for an invalid regex. diff --git a/test/common/grpc/grpc_client_integration_test.cc b/test/common/grpc/grpc_client_integration_test.cc index dadfc3e7c684..e347226f0354 100644 --- a/test/common/grpc/grpc_client_integration_test.cc +++ b/test/common/grpc/grpc_client_integration_test.cc @@ -413,14 +413,13 @@ class GrpcAccessTokenClientIntegrationTest : public GrpcSslClientIntegrationTest void expectExtraHeaders(FakeStream& fake_stream) override { AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); - Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); + std::vector auth_headers; + Http::HeaderUtility::getAllOfHeader(fake_stream.headers(), "authorization", auth_headers); if (!access_token_value_.empty()) { - if (access_token_value_2_.empty()) { - EXPECT_EQ("Bearer " + access_token_value_, stream_headers.get_("authorization")); - } else { - EXPECT_EQ("Bearer " + access_token_value_ + ",Bearer " + access_token_value_2_, - stream_headers.get_("authorization")); - } + EXPECT_EQ("Bearer " + access_token_value_, auth_headers[0]); + } + if (!access_token_value_2_.empty()) { + EXPECT_EQ("Bearer " + access_token_value_2_, auth_headers[1]); } } diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 205b145ca2a3..a9223f629046 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -445,20 +445,20 @@ TEST(HeaderMapImplTest, InlineAppend) { TEST(HeaderMapImplTest, MoveIntoInline) { TestRequestHeaderMapImpl headers; HeaderString key; - key.setCopy(Headers::get().CacheControl.get()); + key.setCopy(Headers::get().EnvoyRetryOn.get()); HeaderString value; value.setCopy("hello"); headers.addViaMove(std::move(key), std::move(value)); - EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello", headers.getCacheControlValue()); + EXPECT_EQ("x-envoy-retry-on", headers.EnvoyRetryOn()->key().getStringView()); + EXPECT_EQ("hello", headers.getEnvoyRetryOnValue()); HeaderString key2; - key2.setCopy(Headers::get().CacheControl.get()); + key2.setCopy(Headers::get().EnvoyRetryOn.get()); HeaderString value2; value2.setCopy("there"); headers.addViaMove(std::move(key2), std::move(value2)); - EXPECT_EQ("cache-control", headers.CacheControl()->key().getStringView()); - EXPECT_EQ("hello,there", headers.getCacheControlValue()); + EXPECT_EQ("x-envoy-retry-on", headers.EnvoyRetryOn()->key().getStringView()); + EXPECT_EQ("hello,there", headers.getEnvoyRetryOnValue()); } TEST(HeaderMapImplTest, Remove) { @@ -787,19 +787,19 @@ TEST(HeaderMapImplTest, AddCopy) { EXPECT_EQ("42", headers.get(lcKey3)->value().getStringView()); EXPECT_EQ(2UL, headers.get(lcKey3)->value().size()); - LowerCaseString cache_control("cache-control"); - headers.addCopy(cache_control, "max-age=1345"); - EXPECT_EQ("max-age=1345", headers.get(cache_control)->value().getStringView()); - EXPECT_EQ("max-age=1345", headers.getCacheControlValue()); - headers.addCopy(cache_control, "public"); - EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, ""); - EXPECT_EQ("max-age=1345,public", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, 123); - EXPECT_EQ("max-age=1345,public,123", headers.get(cache_control)->value().getStringView()); - headers.addCopy(cache_control, std::numeric_limits::max()); + LowerCaseString envoy_retry_on("x-envoy-retry-on"); + headers.addCopy(envoy_retry_on, "max-age=1345"); + EXPECT_EQ("max-age=1345", headers.get(envoy_retry_on)->value().getStringView()); + EXPECT_EQ("max-age=1345", headers.getEnvoyRetryOnValue()); + headers.addCopy(envoy_retry_on, "public"); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, ""); + EXPECT_EQ("max-age=1345,public", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, 123); + EXPECT_EQ("max-age=1345,public,123", headers.get(envoy_retry_on)->value().getStringView()); + headers.addCopy(envoy_retry_on, std::numeric_limits::max()); EXPECT_EQ("max-age=1345,public,123,18446744073709551615", - headers.get(cache_control)->value().getStringView()); + headers.get(envoy_retry_on)->value().getStringView()); } TEST(HeaderMapImplTest, Equality) { diff --git a/test/extensions/common/aws/signer_impl_test.cc b/test/extensions/common/aws/signer_impl_test.cc index 2b4681ca15c9..2bae6a72b25e 100644 --- a/test/extensions/common/aws/signer_impl_test.cc +++ b/test/extensions/common/aws/signer_impl_test.cc @@ -54,7 +54,7 @@ class SignerImplTest : public testing::Test { TEST_F(SignerImplTest, AnonymousCredentials) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(Credentials())); signer_.sign(*message_); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // HTTP :method header is required @@ -62,7 +62,7 @@ TEST_F(SignerImplTest, MissingMethodException) { EXPECT_CALL(*credentials_provider_, getCredentials()).WillOnce(Return(credentials_)); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :method header"); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // HTTP :path header is required @@ -71,7 +71,7 @@ TEST_F(SignerImplTest, MissingPathException) { addMethod("GET"); EXPECT_THROW_WITH_MESSAGE(signer_.sign(*message_), EnvoyException, "Message is missing :path header"); - EXPECT_EQ(nullptr, message_->headers().Authorization()); + EXPECT_EQ(nullptr, message_->headers().get(Http::CustomHeaders::get().Authorization)); } // Verify we sign the date header @@ -83,10 +83,11 @@ TEST_F(SignerImplTest, SignDateHeader) { EXPECT_NE(nullptr, message_->headers().get(SignatureHeaders::get().ContentSha256)); EXPECT_EQ("20180102T030400Z", message_->headers().get(SignatureHeaders::get().Date)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the security token header if the token is present in the credentials @@ -98,10 +99,11 @@ TEST_F(SignerImplTest, SignSecurityTokenHeader) { EXPECT_EQ( "token", message_->headers().get(SignatureHeaders::get().SecurityToken)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " - "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date;x-amz-security-token, " + "Signature=1d42526aabf7d8b6d7d33d9db43b03537300cc7e6bb2817e349749e0a08f5b5e", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the content header as the hashed empty string if the body is empty @@ -113,10 +115,11 @@ TEST_F(SignerImplTest, SignEmptyContentHeader) { EXPECT_EQ( SignatureConstants::get().HashedEmptyString, message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4ee6aa9355259c18133f150b139ea9aeb7969c9408ad361b2151f50a516afe42", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign the content header correctly when we have a body @@ -129,10 +132,11 @@ TEST_F(SignerImplTest, SignContentHeader) { EXPECT_EQ( "937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", message_->headers().get(SignatureHeaders::get().ContentSha256)->value().getStringView()); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=x-amz-content-sha256;x-amz-date, " - "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=x-amz-content-sha256;x-amz-date, " + "Signature=4eab89c36f45f2032d6010ba1adab93f8510ddd6afe540821f3a05bb0253e27b", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify we sign some extra headers @@ -144,10 +148,11 @@ TEST_F(SignerImplTest, SignExtraHeaders) { addHeader("b", "b_value"); addHeader("c", "c_value"); signer_.sign(*message_); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " - "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=a;b;c;x-amz-content-sha256;x-amz-date, " + "Signature=0940025fcecfef5d7ee30e0a26a0957e116560e374878cd86ef4316c53ae9e81", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify signing a host header @@ -157,10 +162,11 @@ TEST_F(SignerImplTest, SignHostHeader) { addPath("/"); addHeader("host", "www.example.com"); signer_.sign(*message_); - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - message_->headers().getAuthorizationValue()); + EXPECT_EQ( + "AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", + message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } // Verify signing headers for S3 @@ -179,7 +185,7 @@ TEST_F(SignerImplTest, SignHeadersS3) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/s3/aws4_request, " "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature=d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", - headers.getAuthorizationValue()); + headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); EXPECT_EQ(SignatureConstants::get().UnsignedPayload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); } @@ -200,7 +206,7 @@ TEST_F(SignerImplTest, SignHeadersNonS3) { EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - headers.getAuthorizationValue()); + headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); EXPECT_EQ(SignatureConstants::get().HashedEmptyString, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); } diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 6728640d458c..9ffaae149de2 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -149,7 +149,8 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { // Check allowed request headers. EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get())); - EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Authorization.get())); + EXPECT_TRUE( + config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get())); EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(baz.get())); @@ -159,7 +160,7 @@ TEST_F(ExtAuthzHttpClientTest, ClientConfig) { EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Path.get())); EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Host.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(Http::Headers::get().WWWAuthenticate.get())); - EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::Headers::get().Origin.get())); + EXPECT_FALSE(config_->clientHeaderMatchers()->matches(Http::CustomHeaders::get().Origin.get())); EXPECT_TRUE(config_->clientHeaderMatchers()->matches(foo.get())); // Check allowed upstream headers. @@ -191,7 +192,8 @@ TEST_F(ExtAuthzHttpClientTest, TestDefaultAllowedHeaders) { // Check allowed request headers. EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Method.get())); EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Host.get())); - EXPECT_TRUE(config_->requestHeaderMatchers()->matches(Http::Headers::get().Authorization.get())); + EXPECT_TRUE( + config_->requestHeaderMatchers()->matches(Http::CustomHeaders::get().Authorization.get())); EXPECT_FALSE(config_->requestHeaderMatchers()->matches(Http::Headers::get().ContentLength.get())); // Check allowed client headers. diff --git a/test/extensions/filters/http/cache/cache_filter_utils_test.cc b/test/extensions/filters/http/cache/cache_filter_utils_test.cc index bc3ef92cf296..c3d01fbddb7f 100644 --- a/test/extensions/filters/http/cache/cache_filter_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_utils_test.cc @@ -56,7 +56,8 @@ TEST_F(IsCacheableRequestTest, ForwardedProtoHeader) { TEST_F(IsCacheableRequestTest, AuthorizationHeader) { Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.setAuthorization("basic YWxhZGRpbjpvcGVuc2VzYW1l"); + request_headers.setCopy(Http::CustomHeaders::get().Authorization, + "basic YWxhZGRpbjpvcGVuc2VzYW1l"); EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); } diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index a60b1bb723af..af6271f8c58b 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -24,7 +24,7 @@ class SimpleHttpCacheTest : public testing::Test { request_headers_.setMethod("GET"); request_headers_.setHost("example.com"); request_headers_.setForwardedProto("https"); - request_headers_.setCacheControl("max-age=3600"); + request_headers_.setCopy(Http::CustomHeaders::get().CacheControl, "max-age=3600"); } // Performs a cache lookup. @@ -160,7 +160,7 @@ TEST_F(SimpleHttpCacheTest, Stale) { } TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { - request_headers_.setReferenceKey(Http::Headers::get().CacheControl, "min-fresh=1000"); + request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "min-fresh=1000"); const std::string request_path("Name"); LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); @@ -174,7 +174,7 @@ TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { } TEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) { - request_headers_.setReferenceKey(Http::Headers::get().CacheControl, "max-stale=9000"); + request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "max-stale=9000"); const std::string request_path("Name"); LookupContextPtr name_lookup_context = lookup(request_path); diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index 3982947a6f56..2c112f07eb6a 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -36,8 +36,11 @@ class CompressorIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().getContentEncodingValue()); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + response->headers() + .get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, response->headers().getTransferEncodingValue()); @@ -58,7 +61,7 @@ class CompressorIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); } @@ -183,7 +186,9 @@ TEST_P(CompressorIntegrationTest, UpstreamResponseAlreadyEncoded) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("br", response->headers().getContentEncodingValue()); + ASSERT_EQ( + "br", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); EXPECT_EQ(128U, response->body().size()); } @@ -207,7 +212,7 @@ TEST_P(CompressorIntegrationTest, NotEnoughContentLength) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(10U, response->body().size()); } @@ -230,7 +235,7 @@ TEST_P(CompressorIntegrationTest, EmptyResponse) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("204", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(0U, response->body().size()); } @@ -285,14 +290,16 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigChunkedResponse) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } /** * Verify Vary header values are preserved. */ -TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVeryHeader) { +TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVaryHeader) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -309,7 +316,10 @@ TEST_P(CompressorIntegrationTest, AcceptanceFullConfigVeryHeader) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); - ASSERT_EQ("Cookie, Accept-Encoding", response->headers().getVaryValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", + response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index 3917ad054595..14e82e9a034a 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -114,9 +114,11 @@ class DecompressorFilterTest : public testing::TestWithParam { // The filter removes the decompressor's content encoding from the Content-Encoding header. if (expected_content_encoding.has_value()) { EXPECT_EQ(expected_content_encoding.value(), - headers_after_filter->ContentEncoding()->value().getStringView()); + headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); } else { - EXPECT_EQ(nullptr, headers_after_filter->ContentEncoding()); + EXPECT_EQ(nullptr, headers_after_filter->get(Http::CustomHeaders::get().ContentEncoding)); } // The filter adds the decompressor's content encoding to the Accept-Encoding header on the @@ -342,7 +344,7 @@ TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{ - {"cache-control", Http::Headers::get().CacheControlValues.NoTransform}, + {"cache-control", Http::CustomHeaders::get().CacheControlValues.NoTransform}, {"content-encoding", "mock"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = @@ -355,8 +357,8 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) { EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{ - {"cache-control", fmt::format("{}, {}", Http::Headers::get().CacheControlValues.NoCache, - Http::Headers::get().CacheControlValues.NoTransform)}, + {"cache-control", fmt::format("{}, {}", Http::CustomHeaders::get().CacheControlValues.NoCache, + Http::CustomHeaders::get().CacheControlValues.NoTransform)}, {"content-encoding", "mock"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index 0dd77b0094d6..febd3d40a3ed 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -141,7 +141,7 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { EXPECT_THAT(upstream_request_->headers(), HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(upstream_request_->headers(), - HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); // Respond to the request. Http::TestResponseHeaderMapImpl response_headers; diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index f78a81d6a3a5..15f5bf70687f 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -60,7 +60,8 @@ TEST_F(ReverseBridgeTest, InvalidGrpcRequest) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -174,7 +175,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoManageFrameHeader) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "25")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -234,7 +236,8 @@ TEST_F(ReverseBridgeTest, GrpcRequest) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -313,7 +316,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestNoContentLength) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); // Ensure that we don't insert a content-length header. EXPECT_EQ(nullptr, headers.ContentLength()); } @@ -396,7 +400,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestHeaderOnlyResponse) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -441,7 +446,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestInternalError) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -516,7 +522,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponseNoContentType) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -566,7 +573,8 @@ TEST_F(ReverseBridgeTest, GrpcRequestBadResponse) { {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -656,7 +664,8 @@ TEST_F(ReverseBridgeTest, FilterConfigPerRouteEnabled) { EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { @@ -742,7 +751,8 @@ TEST_F(ReverseBridgeTest, RouteWithTrailers) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); - EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().Accept, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); } { diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index 65c60882f2d8..32f29094010d 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -95,13 +95,13 @@ class GrpcWebFilterTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() != nullptr); - EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, - response->headers().getContentEncodingValue()); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) != nullptr); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + response->headers() + .get(Http::CustomHeaders::get().ContentEncoding) + ->value() + .getStringView()); ASSERT_TRUE(response->headers().TransferEncoding() != nullptr); EXPECT_EQ(Http::Headers::get().TransferEncodingValues.Chunked, response->headers().getTransferEncodingValue()); @@ -60,7 +63,7 @@ class GzipIntegrationTest : public testing::TestWithParambodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); ASSERT_EQ(content_length, response->body().size()); EXPECT_EQ(response->body(), std::string(content_length, 'a')); } @@ -204,7 +207,9 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(UpstreamResponseAlreadyEncod EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("br", response->headers().getContentEncodingValue()); + ASSERT_EQ( + "br", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); EXPECT_EQ(128U, response->body().size()); } @@ -228,7 +233,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(NotEnoughContentLength)) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(10U, response->body().size()); } @@ -251,7 +256,7 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(EmptyResponse)) { EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("204", response->headers().getStatusValue()); - ASSERT_TRUE(response->headers().ContentEncoding() == nullptr); + ASSERT_TRUE(response->headers().get(Http::CustomHeaders::get().ContentEncoding) == nullptr); EXPECT_EQ(0U, response->body().size()); } @@ -306,14 +311,16 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigChunkedR EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); ASSERT_EQ("chunked", response->headers().getTransferEncodingValue()); } /** * Verify Vary header values are preserved. */ -TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVeryHeader)) { +TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVaryHeader)) { initializeFilter(default_config); Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/test/long/url"}, @@ -330,7 +337,10 @@ TEST_P(GzipIntegrationTest, DEPRECATED_FEATURE_TEST(AcceptanceFullConfigVeryHead EXPECT_EQ(0U, upstream_request_->bodyLength()); EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); - ASSERT_EQ("gzip", response->headers().getContentEncodingValue()); - ASSERT_EQ("Cookie, Accept-Encoding", response->headers().getVaryValue()); + ASSERT_EQ( + "gzip", + response->headers().get(Http::CustomHeaders::get().ContentEncoding)->value().getStringView()); + ASSERT_EQ("Cookie, Accept-Encoding", + response->headers().get(Http::CustomHeaders::get().Vary)->value().getStringView()); } } // namespace Envoy diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index c576b6c1cb7f..9400ddf55edc 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -81,7 +81,8 @@ class GzipFilterTest : public testing::Test { feedBuffer(content_length); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); EXPECT_EQ("", headers.get_("content-length")); - EXPECT_EQ(Http::Headers::get().ContentEncodingValues.Gzip, headers.get_("content-encoding")); + EXPECT_EQ(Http::CustomHeaders::get().ContentEncodingValues.Gzip, + headers.get_("content-encoding")); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, !with_trailers)); if (with_trailers) { Buffer::OwnedImpl trailers_buffer; diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index 1cf6c1e89355..349fe3045ceb 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -106,7 +106,7 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { EXPECT_EQ(headers.get_("sec-istio-auth-userinfo"), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(headers.Authorization()); + EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization)); } } @@ -128,7 +128,7 @@ TEST_F(AuthenticatorTest, TestForwardJwt) { expectVerifyStatus(Status::Ok, headers); // Verify the token is NOT removed. - EXPECT_TRUE(headers.Authorization()); + EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization)); // Payload not set by default EXPECT_EQ(out_name_, ""); diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index f32af1b6706e..d91f2c7dfee1 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -112,7 +112,7 @@ TEST_F(ExtractorTest, TestDefaultHeaderLocation) { // Test token remove tokens[0]->removeJwt(headers); - EXPECT_FALSE(headers.Authorization()); + EXPECT_FALSE(headers.has(Http::CustomHeaders::get().Authorization)); } // Test extracting JWT as Bearer token from the default header location: "Authorization" - diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index b4292d937ec2..5b72bb7e2adc 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -114,7 +114,7 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { EXPECT_TRUE(payload_entry != nullptr); EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(upstream_request_->headers().Authorization()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -392,7 +392,7 @@ TEST_P(RemoteJwksIntegrationTest, WithGoodToken) { EXPECT_TRUE(payload_entry != nullptr); EXPECT_EQ(payload_entry->value().getStringView(), ExpectedPayloadValue); // Verify the token is removed. - EXPECT_FALSE(upstream_request_->headers().Authorization()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::CustomHeaders::get().Authorization)); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index 5b88b4643df7..a09fed9ffb12 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -527,7 +527,7 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { // Check that response_headers has been set correctly EXPECT_EQ(response_headers.ContentType()->value(), "text/event-stream"); - EXPECT_EQ(response_headers.CacheControl()->value(), "no-cache"); + EXPECT_EQ(response_headers.get_("cache-control"), "no-cache"); EXPECT_EQ(response_headers.Connection()->value(), "close"); EXPECT_EQ(response_headers.get_("access-control-allow-origin"), "*"); EXPECT_THAT(response_headers.get_("access-control-allow-headers"), HasSubstr("Accept")); diff --git a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc index d159f0de0a58..102bc6a2086c 100644 --- a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc @@ -175,7 +175,7 @@ TEST_F(OpenTracingDriverTest, InjectFailure) { const auto span_context_injection_error_count = stats_.counter("tracing.opentracing.span_context_injection_error").value(); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); EXPECT_EQ(span_context_injection_error_count + 1, diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 8b72f2cffaec..e515d5dad230 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -618,24 +618,24 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { // Supply bogus context, that will be simply ignored. const std::string invalid_context = "notvalidcontext"; - request_headers_.setOtSpanContext(invalid_context); + request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, invalid_context); stats_.counter("tracing.opentracing.span_context_extraction_error").reset(); driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); EXPECT_EQ(1U, stats_.counter("tracing.opentracing.span_context_extraction_error").value()); - std::string injected_ctx(request_headers_.getOtSpanContextValue()); + std::string injected_ctx(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); // Supply empty context. - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.getOtSpanContextValue()); + injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); // Context can be parsed fine. @@ -647,9 +647,9 @@ TEST_F(LightStepDriverTest, SerializeAndDeserializeContext) { // Supply parent context, request_headers has properly populated x-ot-span-context. Tracing::SpanPtr span_with_parent = driver_->startSpan( config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); span_with_parent->injectContext(request_headers_); - injected_ctx = std::string(request_headers_.getOtSpanContextValue()); + injected_ctx = std::string(request_headers_.get_(Http::CustomHeaders::get().OtSpanContext)); EXPECT_FALSE(injected_ctx.empty()); } } @@ -684,9 +684,9 @@ TEST_F(LightStepDriverTest, MultiplePropagationModes) { Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, start_time_, {Tracing::Reason::Sampling, true}); - EXPECT_EQ(nullptr, request_headers_.OtSpanContext()); + EXPECT_FALSE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); span->injectContext(request_headers_); - EXPECT_TRUE(request_headers_.has("x-ot-span-context")); + EXPECT_TRUE(request_headers_.has(Http::CustomHeaders::get().OtSpanContext)); EXPECT_TRUE(request_headers_.has("ot-tracer-traceid")); EXPECT_TRUE(request_headers_.has("x-b3-traceid")); EXPECT_TRUE(request_headers_.has("traceparent")); @@ -709,8 +709,10 @@ TEST_F(LightStepDriverTest, SpawnChild) { childViaHeaders->injectContext(base1); childViaSpawn->injectContext(base2); - std::string base1_context = Base64::decode(std::string(base1.getOtSpanContextValue())); - std::string base2_context = Base64::decode(std::string(base2.getOtSpanContextValue())); + std::string base1_context = + Base64::decode(std::string(base1.get_(Http::CustomHeaders::get().OtSpanContext))); + std::string base2_context = + Base64::decode(std::string(base2.get_(Http::CustomHeaders::get().OtSpanContext))); EXPECT_FALSE(base1_context.empty()); EXPECT_FALSE(base2_context.empty()); diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 7950d95198aa..4cbc21c3bc77 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -620,7 +620,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { // Test effective setTag() // ==== - request_headers_.removeOtSpanContext(); + request_headers_.remove(Http::CustomHeaders::get().OtSpanContext); // New span will have a CS annotation Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, @@ -643,7 +643,7 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { const std::string parent_id = Hex::uint64ToHex(generateRandom64()); const std::string context = trace_id + ";" + span_id + ";" + parent_id + ";" + CLIENT_SEND; - request_headers_.setOtSpanContext(context); + request_headers_.setCopy(Http::CustomHeaders::get().OtSpanContext, context); // New span will have an SR annotation Tracing::SpanPtr span2 = driver_->startSpan(config_, request_headers_, operation_name_, diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 235619d7faac..0dd832e21c43 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -138,7 +138,7 @@ stat_prefix: header_test key: "x-foo" value: "value1" - header: - key: "authorization" + key: "user-agent" value: "token1" routes: - match: { prefix: "/test" } @@ -149,7 +149,7 @@ stat_prefix: header_test key: "x-foo" value: "value2" - header: - key: "authorization" + key: "user-agent" value: "token2" - name: path-sanitization domains: ["path-sanitization.com"] @@ -997,14 +997,14 @@ TEST_P(HeaderIntegrationTest, TestAppendSameHeaders) { {":path", "/test"}, {":scheme", "http"}, {":authority", "append-same-headers.com"}, - {"authorization", "token3"}, + {"user-agent", "token3"}, {"x-foo", "value3"}, }, Http::TestRequestHeaderMapImpl{ {":authority", "append-same-headers.com"}, {":path", "/test"}, {":method", "GET"}, - {"authorization", "token3,token2,token1"}, + {"user-agent", "token3,token2,token1"}, {"x-foo", "value3"}, {"x-foo", "value2"}, {"x-foo", "value1"}, diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index a3a88f08f1dc..723effb513ff 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -12,7 +12,10 @@ namespace Envoy { // bootstrap proto it's too late to set it. // // Instead, set the value early and regression test the bootstrap proto's validation of prefix -// injection. +// injection. We also register a custom header to make sure that registered headers interact well +// with the prefix override. +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); static const char* custom_prefix_ = "x-custom"; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 7c751f82f5d7..0e052dd11d5f 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -577,8 +577,8 @@ TEST_P(IntegrationTest, TestInlineHeaders) { "GET / HTTP/1.1\r\n" "Host: foo.com\r\n" "Foo: bar\r\n" - "Cache-control: public\r\n" - "Cache-control: 123\r\n" + "User-Agent: public\r\n" + "User-Agent: 123\r\n" "Eep: baz\r\n\r\n", &response, true); EXPECT_THAT(response, HasSubstr("HTTP/1.1 200 OK\r\n")); @@ -587,7 +587,7 @@ TEST_P(IntegrationTest, TestInlineHeaders) { reinterpret_cast(fake_upstreams_.front().get())->lastRequestHeaders(); ASSERT_TRUE(upstream_headers != nullptr); EXPECT_EQ(upstream_headers->Host()->value(), "foo.com"); - EXPECT_EQ(upstream_headers->CacheControl()->value(), "public,123"); + EXPECT_EQ(upstream_headers->get_("User-Agent"), "public,123"); ASSERT_TRUE(upstream_headers->get(Envoy::Http::LowerCaseString("foo")) != nullptr); EXPECT_EQ("bar", upstream_headers->get(Envoy::Http::LowerCaseString("foo"))->value().getStringView()); From 08af20bcd6dc4c8b4ed9198183ec98c1f3c6d5ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 29 Jun 2020 12:09:32 -0400 Subject: [PATCH 483/909] upstream: remove unused code (#11781) This was a leftover of ##11086. Signed-off-by: Raul Gutierrez Segales --- source/common/upstream/upstream_impl.cc | 1 - source/common/upstream/upstream_impl.h | 1 - 2 files changed, 2 deletions(-) diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index e69f003e1f6a..7967a7d1ba96 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -878,7 +878,6 @@ ClusterImplBase::ClusterImplBase( init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime), local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), - symbol_table_(stats_scope->symbolTable()), const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool( factory_context.singletonManager(), factory_context.dispatcher())) { factory_context.setInitManager(init_manager_); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 135f01c9ed84..e76565fe6598 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -787,7 +787,6 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable initialization_complete_callback_; uint64_t pending_initialize_health_checks_{}; const bool local_cluster_; - Stats::SymbolTable& symbol_table_; Config::ConstMetadataSharedPoolSharedPtr const_metadata_shared_pool_; }; From b49d117cde32518565ae74a070fd0a63304eadaf Mon Sep 17 00:00:00 2001 From: danzh Date: Mon, 29 Jun 2020 14:21:50 -0400 Subject: [PATCH 484/909] quiche: build GSO quic writer (#11607) Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 215 ++++++++++++++++++ .../quic_listeners/quiche/platform/BUILD | 10 + .../platform/quic_udp_socket_platform_impl.h | 22 ++ 3 files changed, 247 insertions(+) create mode 100644 source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index f4718d39f692..b9536fa6dbb3 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -61,6 +61,9 @@ quiche_copts = select({ "-Wno-unused-function", "-Wno-unknown-warning-option", "-Wno-deprecated-copy", + "-Wno-ignored-qualifiers", + "-Wno-sign-compare", + "-Wno-inconsistent-missing-override", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", ], @@ -1146,6 +1149,17 @@ envoy_cc_test_library( deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], ) +envoy_cc_library( + name = "quic_platform_udp_socket", + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/platform/api/quic_udp_socket_platform_api.h"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], + deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_udp_socket_impl_lib"], +) + envoy_cc_test_library( name = "quic_platform_sleep", hdrs = ["quiche/quic/platform/api/quic_sleep.h"], @@ -1350,6 +1364,125 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_batch_writer_batch_writer_buffer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_buffer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_buffer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_circular_deque_lib", + ":quic_core_linux_socket_utils_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_batch_writer_base_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_base.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_base.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_buffer_lib", + ":quic_core_packet_writer_interface_lib", + ":quic_core_types_lib", + ":quic_platform", + ], +) + +envoy_cc_test_library( + name = "quic_core_batch_writer_batch_writer_test_lib", + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_batch_writer_test.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_udp_socket_lib", + ":quic_platform_test", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_gso_batch_writer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_gso_batch_writer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_gso_batch_writer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_linux_socket_utils_lib", + ":quic_platform", + ], +) + +envoy_cc_library( + name = "quic_core_batch_writer_sendmmsg_batch_writer_lib", + srcs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc", + ], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": [ + "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h", + ], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_batch_writer_batch_writer_base_lib", + ":quic_core_linux_socket_utils_lib", + ], +) + envoy_cc_library( name = "quic_core_blocked_writer_interface_lib", hdrs = ["quiche/quic/core/quic_blocked_writer_interface.h"], @@ -2416,6 +2549,45 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_syscall_wrapper_lib", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.h"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_export", + ], +) + +envoy_cc_library( + name = "quic_core_linux_socket_utils_lib", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.cc"], + "//conditions:default": [], + }), + hdrs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.h"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_packet_writer_interface_lib", + ":quic_core_syscall_wrapper_lib", + ":quic_core_types_lib", + ":quic_platform", + ], +) + envoy_cc_library( name = "quic_core_network_blackhole_detector_lib", srcs = ["quiche/quic/core/quic_network_blackhole_detector.cc"], @@ -3181,6 +3353,32 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_udp_socket_lib", + srcs = select({ + "@envoy//bazel:windows_x86_64": [], + "//conditions:default": ["quiche/quic/core/quic_udp_socket_posix.cc"], + }), + hdrs = select({ + "@envoy//bazel:windows_x86_64": [], + "//conditions:default": ["quiche/quic/core/quic_udp_socket.h"], + }), + copts = quiche_copts + select({ + # On OSX/iOS, condstants from RFC 3542 (e.g. IPV6_RECVPKTINFO) are not usable + # without this define. + "@envoy//bazel:apple": ["-D__APPLE_USE_RFC_3542"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_types_lib", + ":quic_core_utils_lib", + ":quic_platform", + ":quic_platform_udp_socket", + ], +) + envoy_cc_library( name = "quic_core_unacked_packet_map_lib", srcs = ["quiche/quic/core/quic_unacked_packet_map.cc"], @@ -3741,3 +3939,20 @@ envoy_cc_test( ":quic_platform_test_mem_slice_vector_lib", ], ) + +envoy_cc_test( + name = "quic_core_batch_writer_batch_writer_test", + srcs = select({ + "@envoy//bazel:linux": ["quiche/quic/core/batch_writer/quic_batch_writer_test.cc"], + "//conditions:default": [], + }), + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_batch_writer_batch_writer_test_lib", + ":quic_core_batch_writer_gso_batch_writer_lib", + ":quic_core_batch_writer_sendmmsg_batch_writer_lib", + ":quic_platform", + ], +) diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 4da7f76fcc71..4ef4fbbc8d64 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -229,6 +229,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_platform_udp_socket_impl_lib", + hdrs = select({ + "//bazel:linux": ["quic_udp_socket_platform_impl.h"], + "//conditions:default": [], + }), + repository = "@envoy", + tags = ["nofips"], +) + envoy_cc_library( name = "envoy_quic_clock_lib", srcs = ["envoy_quic_clock.cc"], diff --git a/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h new file mode 100644 index 000000000000..248cfc193e02 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_udp_socket_platform_impl.h @@ -0,0 +1,22 @@ +#pragma once + +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +namespace quic { + +const size_t kCmsgSpaceForGooglePacketHeaderImpl = 0; + +// NOLINTNEXTLINE(readability-identifier-naming) +inline bool GetGooglePacketHeadersFromControlMessageImpl(struct ::cmsghdr* /*cmsg*/, + char** /*packet_headers*/, + size_t* /*packet_headers_len*/) { + return false; +} + +} // namespace quic From 89d6c6c6fa202aa7a01e85d2d887b56c8a3268ad Mon Sep 17 00:00:00 2001 From: Jian Zeng Date: Tue, 30 Jun 2020 07:21:23 +0800 Subject: [PATCH 485/909] feat(router): keep query string by default for redirects (#11493) Signed-off-by: knight42 --- api/envoy/api/v2/route/route_components.proto | 15 +++ .../config/route/v3/route_components.proto | 15 +++ .../route/v4alpha/route_components.proto | 15 +++ docs/root/version_history/current.rst | 1 + .../envoy/api/v2/route/route_components.proto | 15 +++ .../config/route/v3/route_components.proto | 15 +++ .../route/v4alpha/route_components.proto | 15 +++ source/common/router/config_impl.cc | 53 +++++++-- source/common/router/config_impl.h | 2 + source/common/runtime/runtime_features.cc | 1 + test/common/router/config_impl_test.cc | 101 +++++++++++++++++- 11 files changed, 241 insertions(+), 7 deletions(-) diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto index c890134414e5..007f71d57cb5 100644 --- a/api/envoy/api/v2/route/route_components.proto +++ b/api/envoy/api/v2/route/route_components.proto @@ -1177,6 +1177,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 21afddba4fb6..f0957363f988 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1208,6 +1208,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 1621fba5b7ac..6f7298d29d67 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1186,6 +1186,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index fe1235fe695f..3ad224ad65ee 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,7 @@ Incompatible Behavior Changes * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * client_ssl_auth: the `auth_ip_white_list` stat has been renamed to :ref:`auth_ip_allowlist `. +* router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. Minor Behavior Changes ---------------------- diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto index c890134414e5..007f71d57cb5 100644 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ b/generated_api_shadow/envoy/api/v2/route/route_components.proto @@ -1177,6 +1177,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 9cae6faa5e88..211d3c63f768 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1215,6 +1215,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 9646c0c86301..ea664e5d62b6 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1214,6 +1214,21 @@ message RedirectAction { oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 08c46b9e25fb..ae92edeab16b 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -34,6 +34,7 @@ #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/retry_state_impl.h" +#include "common/runtime/runtime_features.h" #include "common/tracing/http_tracer_impl.h" #include "extensions/filters/http/common/utility.h" @@ -300,6 +301,9 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, ? ":" + std::to_string(route.redirect().port_redirect()) : ""), path_redirect_(route.redirect().path_redirect()), + path_redirect_has_query_(path_redirect_.find('?') != absl::string_view::npos), + enable_preserve_query_in_path_redirects_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.preserve_query_string_in_path_redirects")), https_redirect_(route.redirect().https_redirect()), prefix_rewrite_redirect_(route.redirect().prefix_rewrite()), strip_query_(route.redirect().strip_query()), @@ -428,6 +432,13 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); regex_rewrite_substitution_ = rewrite_spec.substitution(); } + + if (enable_preserve_query_in_path_redirects_ && path_redirect_has_query_ && strip_query_) { + ENVOY_LOG(warn, + "`strip_query` is set to true, but `path_redirect` contains query string and it will " + "not be stripped: {}", + path_redirect_); + } } bool RouteEntryImplBase::evaluateRuntimeMatch(const uint64_t random_value) const { @@ -661,16 +672,46 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c final_host = processRequestHost(headers, final_scheme, final_port); } - if (!path_redirect_.empty()) { - final_path = path_redirect_.c_str(); - } else { - final_path = headers.getPathValue(); - if (strip_query_) { - size_t path_end = final_path.find("?"); + std::string final_path_value; + if (enable_preserve_query_in_path_redirects_) { + if (!path_redirect_.empty()) { + // The path_redirect query string, if any, takes precedence over the request's query string, + // and it will not be stripped regardless of `strip_query`. + if (path_redirect_has_query_) { + final_path = path_redirect_.c_str(); + } else { + const absl::string_view current_path = headers.getPathValue(); + const size_t path_end = current_path.find('?'); + const bool current_path_has_query = path_end != absl::string_view::npos; + if (current_path_has_query) { + final_path_value = path_redirect_; + final_path_value.append(current_path.data() + path_end, current_path.length() - path_end); + final_path = final_path_value; + } else { + final_path = path_redirect_.c_str(); + } + } + } else { + final_path = headers.getPathValue(); + } + if (!path_redirect_has_query_ && strip_query_) { + const size_t path_end = final_path.find('?'); if (path_end != absl::string_view::npos) { final_path = final_path.substr(0, path_end); } } + } else { + if (!path_redirect_.empty()) { + final_path = path_redirect_.c_str(); + } else { + final_path = headers.getPathValue(); + if (strip_query_) { + const size_t path_end = final_path.find("?"); + if (path_end != absl::string_view::npos) { + final_path = final_path.substr(0, path_end); + } + } + } } return fmt::format("{}://{}{}{}", final_scheme, final_host, final_port, final_path); diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index df1ab51e6e5f..d5f51ec194fa 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -758,6 +758,8 @@ class RouteEntryImplBase : public RouteEntry, const std::string host_redirect_; const std::string port_redirect_; const std::string path_redirect_; + const bool path_redirect_has_query_; + const bool enable_preserve_query_in_path_redirects_; const bool https_redirect_; const std::string prefix_rewrite_redirect_; const bool strip_query_; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index b2656438a2e1..a8d795eee314 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -70,6 +70,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", "envoy.reloadable_features.strict_1xx_and_204_response_headers", diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 93caa4b5367c..500597c46e4d 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -6234,6 +6234,69 @@ name: AllRedirects } } +TEST_F(RouteConfigurationV2, PathRedirectQueryNotPreserved) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.preserve_query_string_in_redirects", "false"}}); + + std::string RouteDynPathRedirect = R"EOF( +name: AllRedirects +virtual_hosts: + - name: redirect + domains: [redirect.lyft.com] + routes: + - match: { path: "/path/redirect/"} + redirect: { path_redirect: "/new/path-redirect/" } + - match: { path: "/path/redirect/strip-query/true"} + redirect: { path_redirect: "/new/path-redirect/", strip_query: "true" } + - match: { path: "/path/redirect/query"} + redirect: { path_redirect: "/new/path-redirect?foo=1" } + - match: { path: "/path/redirect/query-with-strip"} + redirect: { path_redirect: "/new/path-redirect?foo=2", strip_query: "true" } + )EOF"; + + TestConfigImpl config(parseRouteConfigurationFromYaml(RouteDynPathRedirect), factory_context_, + true); + EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); + + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/?lang=eng&con=US", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/strip-query/true?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query-with-strip", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/query-with-strip?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } +} + // Test to check Strip Query for redirect messages TEST_F(RouteConfigurationV2, RedirectStripQuery) { std::string RouteDynPathRedirect = R"EOF( @@ -6250,6 +6313,12 @@ name: AllRedirects redirect: { host_redirect: new.lyft.com } - match: { path: "/path/redirect/"} redirect: { path_redirect: "/new/path-redirect/" } + - match: { path: "/path/redirect/strip-query/true"} + redirect: { path_redirect: "/new/path-redirect/", strip_query: "true" } + - match: { path: "/path/redirect/query"} + redirect: { path_redirect: "/new/path-redirect?foo=1" } + - match: { path: "/path/redirect/query-with-strip"} + redirect: { path_redirect: "/new/path-redirect?foo=2", strip_query: "true" } - match: { prefix: "/all/combinations"} redirect: { host_redirect: "new.lyft.com", prefix_rewrite: "/new/prefix" , https_redirect: "true", strip_query: "true" } )EOF"; @@ -6282,10 +6351,40 @@ name: AllRedirects } { Http::TestRequestHeaderMapImpl headers = - genRedirectHeaders("redirect.lyft.com", "/path/redirect/", true, false); + genRedirectHeaders("redirect.lyft.com", "/path/redirect/?lang=eng&con=US", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/?lang=eng&con=US", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/strip-query/true?lang=eng&con=US", true, false); EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", config.route(headers, 0)->directResponseEntry()->newPath(headers)); } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=1", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = + genRedirectHeaders("redirect.lyft.com", "/path/redirect/query-with-strip", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } + { + Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( + "redirect.lyft.com", "/path/redirect/query-with-strip?bar=1", true, false); + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect?foo=2", + config.route(headers, 0)->directResponseEntry()->newPath(headers)); + } { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders( "redirect.lyft.com", "/all/combinations/here/we/go?key=value", false, false); From dfddb529e914d794ac552e906b13d71233609bf7 Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Mon, 1 Jun 2020 20:29:14 -0700 Subject: [PATCH 486/909] listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen --- .../configuration/best_practices/edge.rst | 102 +++++++++++++++++ .../configuration/listeners/listeners.rst | 1 + docs/root/configuration/listeners/runtime.rst | 8 ++ docs/root/configuration/listeners/stats.rst | 1 + .../faq/configuration/resource_limits.rst | 16 +++ docs/root/faq/overview.rst | 1 + docs/root/version_history/v1.14.2.rst | 2 + examples/front-proxy/front-envoy.yaml | 9 ++ include/envoy/network/listener.h | 6 + .../common/upstream/resource_manager_impl.h | 5 +- source/server/admin/admin.h | 3 + source/server/connection_handler_impl.cc | 8 ++ source/server/connection_handler_impl.h | 12 +- source/server/listener_impl.cc | 18 ++- source/server/listener_impl.h | 11 +- .../proxy_protocol_regression_test.cc | 3 + .../proxy_protocol/proxy_protocol_test.cc | 4 + test/integration/BUILD | 1 + test/integration/cx_limit_integration_test.cc | 106 ++++++++++++++++++ test/integration/fake_upstream.h | 8 ++ test/integration/stats_integration_test.cc | 4 +- test/mocks/network/mocks.h | 1 + test/server/connection_handler_test.cc | 89 +++++++++++++++ test/test_common/logging.cc | 4 + test/test_common/logging.h | 4 +- 25 files changed, 420 insertions(+), 7 deletions(-) create mode 100644 docs/root/configuration/listeners/runtime.rst create mode 100644 docs/root/faq/configuration/resource_limits.rst create mode 100644 test/integration/cx_limit_integration_test.cc diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index e6bc0cbdcc0a..d35b7ac8a1f7 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -24,9 +24,111 @@ HTTP proxies should additionally configure: * :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, * :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. * :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. +* :ref:`Connection limits. ` The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP ` edge server configuration): .. literalinclude:: envoy-edge.yaml :language: yaml + + overload_manager: + refresh_interval: 0.25s + resource_monitors: + - name: "envoy.resource_monitors.fixed_heap" + typed_config: + "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig + # TODO: Tune for your system. + max_heap_size_bytes: 2147483648 # 2 GiB + actions: + - name: "envoy.overload_actions.shrink_heap" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.95 + - name: "envoy.overload_actions.stop_accepting_requests" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + threshold: + value: 0.98 + + admin: + access_log_path: "/var/log/envoy_admin.log" + address: + socket_address: + address: 127.0.0.1 + port_value: 9090 + + static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 443 + listener_filters: + - name: "envoy.filters.listener.tls_inspector" + typed_config: {} + per_connection_buffer_limit_bytes: 32768 # 32 KiB + filter_chains: + - filter_chain_match: + server_names: ["example.com", "www.example.com"] + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "example_com_cert.pem" } + private_key: { filename: "example_com_key.pem" } + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # use_proxy_proto: true + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + use_remote_address: true + common_http_protocol_options: + idle_timeout: 3600s # 1 hour + headers_with_underscores_action: REJECT_REQUEST + http2_protocol_options: + max_concurrent_streams: 100 + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests + route_config: + virtual_hosts: + - name: default + domains: "*" + routes: + - match: { prefix: "/" } + route: + cluster: service_foo + idle_timeout: 15s # must be disabled for long-lived and streaming requests + clusters: + name: service_foo + connect_timeout: 15s + per_connection_buffer_limit_bytes: 32768 # 32 KiB + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8080 + http2_protocol_options: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + + layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst index 9b3e2161ef0c..5e4cc6b22c5e 100644 --- a/docs/root/configuration/listeners/listeners.rst +++ b/docs/root/configuration/listeners/listeners.rst @@ -8,6 +8,7 @@ Listeners overview stats + runtime listener_filters/listener_filters network_filters/network_filters udp_filters/udp_filters diff --git a/docs/root/configuration/listeners/runtime.rst b/docs/root/configuration/listeners/runtime.rst new file mode 100644 index 000000000000..b42b6aa5fa3f --- /dev/null +++ b/docs/root/configuration/listeners/runtime.rst @@ -0,0 +1,8 @@ +.. _config_listeners_runtime: + +Runtime +------- +The following runtime settings are supported: + +envoy.resource_limits.listener..connection_limit + Sets a limit on the number of active connections to the specified listener. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index e9aa8f04487d..0b7936e4b2b4 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -16,6 +16,7 @@ Every listener has a statistics tree rooted at *listener.

.* with the fo downstream_cx_destroy, Counter, Total destroyed connections downstream_cx_active, Gauge, Total active connections downstream_cx_length_ms, Histogram, Connection length milliseconds + downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing no_filter_chain_match, Counter, Total connections that didn't match any filter chain diff --git a/docs/root/faq/configuration/resource_limits.rst b/docs/root/faq/configuration/resource_limits.rst new file mode 100644 index 000000000000..c20c64929b10 --- /dev/null +++ b/docs/root/faq/configuration/resource_limits.rst @@ -0,0 +1,16 @@ +.. _faq_resource_limits: + +How does Envoy prevent file descriptor exhaustion? +================================================== + +:ref:`Per-listener connection limits ` may be configured as an upper bound on +the number of active connections a particular listener will accept. The listener may accept more +connections than the configured value on the order of the number of worker threads. On Unix-based +systems, it is recommended to keep the sum of all connection limits less than half of the system's +file descriptor limit to account for upstream connections, files, and other usage of file +descriptors. + +.. note:: + + This per-listener connection limiting will eventually be handled by the :ref:`overload manager + `. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 2953330a6009..a0a160cb2118 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -62,6 +62,7 @@ Configuration configuration/flow_control configuration/timeouts configuration/deprecation + configuration/resource_limits Load balancing -------------- diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst index 18bdf0bfce9d..1dce099f5cdf 100644 --- a/docs/root/version_history/v1.14.2.rst +++ b/docs/root/version_history/v1.14.2.rst @@ -5,3 +5,5 @@ Changes ------- * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. +* listener: Add runtime support for `per-listener limits ` on + active/accepted connections. diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index 35747a6d10d3..2d3c5f1a95ab 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -64,3 +64,12 @@ admin: socket_address: address: 0.0.0.0 port_value: 8001 +layered_runtime: + layers: + - name: static_layer_0 + static_layer: + envoy: + resource_limits: + listener: + example_listener_name: + connection_limit: 10000 diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index cd868029ea00..2226a50f84fe 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -7,6 +7,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/io_error.h" #include "envoy/common/exception.h" +#include "envoy/common/resource.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/connection.h" #include "envoy/network/connection_balancer.h" @@ -144,6 +145,11 @@ class ListenerConfig { */ virtual ConnectionBalancer& connectionBalancer() PURE; + /** + * Open connection resources for this listener. + */ + virtual ResourceLimit& openConnections() PURE; + /** * @return std::vector access logs emitted by the listener. */ diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index 12d0d498fc72..c03f8f2d348a 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -61,13 +61,14 @@ class ResourceManagerImpl : public ResourceManager { remaining_.set(max); } - // Upstream::Resource - bool canCreate() override { return current_ < max(); } + ~ManagedResourceImpl() override { ASSERT(count() == 0); } + void inc() override { BasicResourceLimitImpl::inc(); updateRemaining(); open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); } + void decBy(uint64_t amount) override { BasicResourceLimitImpl::decBy(amount); updateRemaining(); diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index d491fb4443df..9c035d123e80 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -23,6 +23,7 @@ #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "common/common/basic_resource_impl.h" #include "common/common/empty_string.h" #include "common/common/logger.h" #include "common/common/macros.h" @@ -346,6 +347,7 @@ class AdminImpl : public Admin, return envoy::config::core::v3::UNSPECIFIED; } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } + ResourceLimit& openConnections() override { return open_connections_; } const std::vector& accessLogs() const override { return empty_access_logs_; } @@ -355,6 +357,7 @@ class AdminImpl : public Admin, Stats::ScopePtr scope_; Http::ConnectionManagerListenerStats stats_; Network::NopConnectionBalancerImpl connection_balancer_; + BasicResourceLimitImpl open_connections_; private: const std::vector empty_access_logs_; diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 7353404fc3bc..24b694f82659 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -328,6 +328,14 @@ void ConnectionHandlerImpl::ActiveTcpSocket::newConnection() { } void ConnectionHandlerImpl::ActiveTcpListener::onAccept(Network::ConnectionSocketPtr&& socket) { + if (listenerConnectionLimitReached()) { + ENVOY_LOG(trace, "closing connection: listener connection limit reached for {}", + config_->name()); + socket->close(); + stats_.downstream_cx_overflow_.inc(); + return; + } + onAcceptWorker(std::move(socket), config_->handOffRestoredDestinationConnections(), false); } diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index a0ec09cae2b1..90cf2cacab7e 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -29,6 +29,7 @@ namespace Server { #define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(downstream_cx_destroy) \ COUNTER(downstream_cx_total) \ + COUNTER(downstream_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ COUNTER(no_filter_chain_match) \ GAUGE(downstream_cx_active, Accumulate) \ @@ -113,11 +114,17 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, ActiveTcpListener(ConnectionHandlerImpl& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); ~ActiveTcpListener() override; + bool listenerConnectionLimitReached() const { + // TODO(tonya11en): Delegate enforcement of per-listener connection limits to overload + // manager. + return !config_->openConnections().canCreate(); + } void onAcceptWorker(Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections, bool rebalanced); void decNumConnections() { ASSERT(num_listener_connections_ > 0); --num_listener_connections_; + config_->openConnections().dec(); } // Network::ListenerCallbacks @@ -131,7 +138,10 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // Network::BalancedConnectionHandler uint64_t numConnections() const override { return num_listener_connections_; } - void incNumConnections() override { ++num_listener_connections_; } + void incNumConnections() override { + ++num_listener_connections_; + config_->openConnections().inc(); + } void post(Network::ConnectionSocketPtr&& socket) override; /** diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 3e99e0b6004d..e995732ce8d4 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -246,6 +246,11 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, parent.factory_.createDrainManager(config.drain_type()))), filter_chain_manager_(address_, listener_factory_context_->parentFactoryContext(), initManager()), + cx_limit_runtime_key_("envoy.resource_limits.listener." + config_.name() + + ".connection_limit"), + open_connections_(std::make_shared( + std::numeric_limits::max(), listener_factory_context_->runtime(), + cx_limit_runtime_key_)), local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { if (workers_started_) { parent_.onListenerWarmed(*this); @@ -255,6 +260,16 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, listener_init_target_.ready(); } }) { + + const absl::optional runtime_val = + listener_factory_context_->runtime().snapshot().get(cx_limit_runtime_key_); + if (runtime_val && runtime_val->empty()) { + ENVOY_LOG(warn, + "Listener connection limit runtime key {} is empty. There are currently no " + "limitations on the number of accepted connections for listener {}.", + cx_limit_runtime_key_, config_.name()); + } + buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); buildListenSocketOptions(socket_type); @@ -278,7 +293,7 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, } } -ListenerImpl::ListenerImpl(const ListenerImpl& origin, +ListenerImpl::ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, @@ -322,6 +337,7 @@ ListenerImpl::ListenerImpl(const ListenerImpl& origin, buildOriginalDstListenerFilter(); buildProxyProtocolListenerFilter(); buildTlsInspectorListenerFilter(); + open_connections_ = origin.open_connections_; } void ListenerImpl::buildAccessLog() { diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 082e384e0dee..cdc0ded801f6 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -13,6 +13,7 @@ #include "envoy/server/listener_manager.h" #include "envoy/stats/scope.h" +#include "common/common/basic_resource_impl.h" #include "common/common/logger.h" #include "common/init/manager_impl.h" #include "common/init/target_impl.h" @@ -302,6 +303,8 @@ class ListenerImpl final : public Network::ListenerConfig, return udp_listener_factory_.get(); } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } + + ResourceLimit& openConnections() override { return *open_connections_; } const std::vector& accessLogs() const override { return access_logs_; } @@ -331,7 +334,7 @@ class ListenerImpl final : public Network::ListenerConfig, * Create a new listener from an existing listener and the new config message if the in place * filter chain update is decided. Should be called only by newListenerWithFilterChain(). */ - ListenerImpl(const ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, + ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, uint32_t concurrency); @@ -387,6 +390,12 @@ class ListenerImpl final : public Network::ListenerConfig, std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; + // Per-listener connection limits are only specified via runtime. + // + // TODO (tonya11en): Move this functionality into the overload manager. + const std::string cx_limit_runtime_key_; + std::shared_ptr open_connections_; + // This init watcher, if workers_started_ is false, notifies the "parent" listener manager when // listener initialization is complete. // Important: local_init_watcher_ must be the last field in the class to avoid unexpected watcher diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index 2660309242a2..cd0ed34c9f3f 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -1,6 +1,7 @@ #include "envoy/network/address.h" #include "common/buffer/buffer_impl.h" +#include "common/common/basic_resource_impl.h" #include "common/event/dispatcher_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/listen_socket_impl.h" @@ -70,6 +71,7 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam connection_callbacks_; + BasicResourceLimitImpl open_connections_; Network::Connection* server_connection_; Network::MockConnectionCallbacks server_callbacks_; std::shared_ptr read_filter_; diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index afc2bfd7e724..05a65551e6de 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -83,6 +83,7 @@ class ProxyProtocolTest : public testing::TestWithParam connection_callbacks_; Network::Connection* server_connection_; Network::MockConnectionCallbacks server_callbacks_; + BasicResourceLimitImpl open_connections_; std::shared_ptr read_filter_; std::string name_; Api::OsSysCallsImpl os_sys_calls_actual_; @@ -1274,6 +1276,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParam socket_; Network::Address::InstanceConstSharedPtr local_dst_address_; diff --git a/test/integration/BUILD b/test/integration/BUILD index 1922ef9f1e89..4008f8681eca 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -594,6 +594,7 @@ envoy_cc_test_library( "//source/common/buffer:buffer_lib", "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", "//source/common/config:version_converter_lib", diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc new file mode 100644 index 000000000000..df668624ba27 --- /dev/null +++ b/test/integration/cx_limit_integration_test.cc @@ -0,0 +1,106 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/network/filter.h" +#include "envoy/registry/registry.h" + +#include "common/network/utility.h" + +#include "test/config/utility.h" +#include "test/integration/integration.h" +#include "test/test_common/logging.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class ConnectionLimitIntegrationTest : public testing::TestWithParam, + public Event::TestUsingSimulatedTime, + public BaseIntegrationTest { +public: + ConnectionLimitIntegrationTest() + : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) {} + + void setEmptyListenerLimit() { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + ""); + } + + void setListenerLimit(const uint32_t num_conns) { + config_helper_.addRuntimeOverride("envoy.resource_limits.listener.listener_0.connection_limit", + std::to_string(num_conns)); + } + + void initialize() override { BaseIntegrationTest::initialize(); } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionLimitIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { + setListenerLimit(2); + initialize(); + + std::vector tcp_clients; + std::vector raw_conns; + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_FALSE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + tcp_clients.back()->waitForDisconnect(); + + // Get rid of the client that failed to connect. + tcp_clients.back()->close(); + tcp_clients.pop_back(); + + // Close the first connection that was successful so that we can open a new successful connection. + tcp_clients.front()->close(); + ASSERT_TRUE(raw_conns.front()->close()); + ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); + + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); + + const bool isV4 = (version_ == Network::Address::IpVersion::v4); + auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress() + : Network::Utility::getIpv6LoopbackAddress(); + + const std::string counter_name = isV4 ? ("listener.127.0.0.1_0.downstream_cx_overflow") + : ("listener.[__1]_0.downstream_cx_overflow"); + + test_server_->waitForCounterEq(counter_name, 1); + + for (auto& tcp_client : tcp_clients) { + tcp_client->close(); + } + + tcp_clients.clear(); + raw_conns.clear(); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) { + const std::string log_line = + "Listener connection limit runtime key " + "envoy.resource_limits.listener.listener_0.connection_limit is empty. There are currently " + "no limitations on the number of accepted connections for listener listener_0."; + EXPECT_LOG_CONTAINS("warn", log_line, { + setEmptyListenerLimit(); + initialize(); + }); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 28cdba57cdcb..6afeb17b36a9 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -19,6 +19,7 @@ #include "common/buffer/buffer_impl.h" #include "common/buffer/zero_copy_input_stream_impl.h" +#include "common/common/basic_resource_impl.h" #include "common/common/callback_impl.h" #include "common/common/linked_object.h" #include "common/common/lock_guard.h" @@ -722,11 +723,18 @@ class FakeUpstream : Logger::Loggable, const std::vector& accessLogs() const override { return empty_access_logs_; } + ResourceLimit& openConnections() override { return connection_resource_; } + + void setMaxConnections(const uint32_t num_connections) { + connection_resource_.setMax(num_connections); + } + void clearMaxConnections() { connection_resource_.resetMax(); } FakeUpstream& parent_; const std::string name_; Network::NopConnectionBalancerImpl connection_balancer_; const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + BasicResourceLimitImpl connection_resource_; const std::vector empty_access_logs_; }; diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index dd4fb2bfc722..8cf76dc86608 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -273,6 +273,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/05/13 10531 44425 44600 Refactor resource manager // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 44491 44811 Make upstreams pluggable + // 2020/04/23 10661 44425 46000 per-listener connection limits // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -336,6 +337,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/05/13 10531 36537 36800 Refactor resource manager // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 36603 36923 Make upstreams pluggable + // 2020/04/23 10661 36537 37000 per-listener connection limits // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -350,7 +352,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. EXPECT_MEMORY_EQ(m_per_cluster, 36603); - EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_cluster, 37000); } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 315ce275fb95..185e7b1744be 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -345,6 +345,7 @@ class MockListenerConfig : public ListenerConfig { MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(Network::ActiveUdpListenerFactory*, udpListenerFactory, ()); MOCK_METHOD(ConnectionBalancer&, connectionBalancer, ()); + MOCK_METHOD(ResourceLimit&, openConnections, ()); envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index a30d2ded6399..a7f942f95d42 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -102,6 +102,12 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable& accessLogs() const override { return empty_access_logs_; } + ResourceLimit& openConnections() override { return open_connections_; } + + void setMaxConnections(const uint32_t num_connections) { + open_connections_.setMax(num_connections); + } + void clearMaxConnections() { open_connections_.resetMax(); } ConnectionHandlerTest& parent_; std::shared_ptr socket_; @@ -114,6 +120,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; Network::ConnectionBalancerPtr connection_balancer_; + BasicResourceLimitImpl open_connections_; const std::vector empty_access_logs_; std::shared_ptr> inline_filter_chain_manager_; }; @@ -230,6 +237,88 @@ TEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) { #endif } +TEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) { + Network::ListenerCallbacks* listener_callbacks1; + auto listener1 = new NiceMock(); + TestListener* test_listener1 = + addListener(1, false, false, "test_listener1", listener1, &listener_callbacks1); + Network::Address::InstanceConstSharedPtr normal_address( + new Network::Address::Ipv4Instance("127.0.0.1", 10001)); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + // Only allow a single connection on this listener. + test_listener1->setMaxConnections(1); + handler_->addListener(absl::nullopt, *test_listener1); + + auto listener2 = new NiceMock(); + Network::ListenerCallbacks* listener_callbacks2; + TestListener* test_listener2 = + addListener(2, false, false, "test_listener2", listener2, &listener_callbacks2); + Network::Address::InstanceConstSharedPtr alt_address( + new Network::Address::Ipv4Instance("127.0.0.2", 20002)); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); + // Do not allow any connections on this listener. + test_listener2->setMaxConnections(0); + handler_->addListener(absl::nullopt, *test_listener2); + + EXPECT_CALL(manager_, findFilterChain(_)).WillRepeatedly(Return(filter_chain_.get())); + EXPECT_CALL(factory_, createNetworkFilterChain(_, _)).WillRepeatedly(Return(true)); + Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); + EXPECT_CALL(*test_filter, destroy_()); + EXPECT_CALL(factory_, createListenerFilterChain(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterManager& manager) -> bool { + manager.addAcceptFilter(listener_filter_matcher_, Network::ListenerFilterPtr{test_filter}); + return true; + })); + EXPECT_CALL(*test_filter, onAccept(_)) + .WillRepeatedly(Invoke([&](Network::ListenerFilterCallbacks&) -> Network::FilterStatus { + return Network::FilterStatus::Continue; + })); + + // For listener 2, verify its connection limit is independent of listener 1. + + // We expect that listener 2 accepts the connection, so there will be a call to + // createServerConnection and active cx should increase, while cx overflow remains the same. + listener_callbacks2->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(0, handler_->numConnections()); + EXPECT_EQ(0, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(0, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // For listener 1, verify connections are limited after one goes active. + + // First connection attempt should result in an active connection being created. + auto conn1 = new NiceMock(); + EXPECT_CALL(dispatcher_, createServerConnection_()).WillOnce(Return(conn1)); + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + // Note that these stats are not the per-worker stats, but the per-listener stats. + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Don't expect server connection to be created, should be instantly closed and increment + // overflow stat. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(2, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + // Check behavior again for good measure. + listener_callbacks1->onAccept( + Network::ConnectionSocketPtr{new NiceMock()}); + EXPECT_EQ(1, handler_->numConnections()); + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "downstream_cx_total")->value()); + EXPECT_EQ(1, TestUtility::findGauge(stats_store_, "downstream_cx_active")->value()); + EXPECT_EQ(3, TestUtility::findCounter(stats_store_, "downstream_cx_overflow")->value()); + + EXPECT_CALL(*listener1, onDestroy()); + EXPECT_CALL(*listener2, onDestroy()); +} + TEST_F(ConnectionHandlerTest, RemoveListener) { InSequence s; diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index 30da5cfacaef..ba604854777c 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -2,6 +2,8 @@ #include "common/common/assert.h" +#include "absl/synchronization/mutex.h" + namespace Envoy { LogLevelSetter::LogLevelSetter(spdlog::level::level_enum log_level) { @@ -27,6 +29,8 @@ LogRecordingSink::~LogRecordingSink() = default; void LogRecordingSink::log(absl::string_view msg) { previous_delegate()->log(msg); + + absl::MutexLock ml(&mtx_); messages_.push_back(std::string(msg)); } diff --git a/test/test_common/logging.h b/test/test_common/logging.h index 686ef5618cca..7a080d903d58 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -8,6 +8,7 @@ #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" +#include "absl/synchronization/mutex.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -58,7 +59,8 @@ class LogRecordingSink : public Logger::SinkDelegate { const std::vector& messages() const { return messages_; } private: - std::vector messages_; + absl::Mutex mtx_; + std::vector messages_ ABSL_GUARDED_BY(mtx_); }; using StringPair = std::pair; From 542f84c66e9f6479bc31c6f53157c60472b25240 Mon Sep 17 00:00:00 2001 From: Tony Allen Date: Thu, 4 Jun 2020 14:43:20 -0700 Subject: [PATCH 487/909] overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen --- .../configuration/best_practices/edge.rst | 5 +- docs/root/configuration/listeners/stats.rst | 1 + .../overload_manager/overload_manager.rst | 24 ++++ .../faq/configuration/resource_limits.rst | 16 ++- docs/root/version_history/v1.14.2.rst | 2 + include/envoy/network/listener.h | 5 + source/common/network/BUILD | 1 + source/common/network/listen_socket_impl.cc | 2 + source/common/network/listen_socket_impl.h | 16 ++- source/common/network/listener_impl.cc | 31 +++++ source/common/network/listener_impl.h | 9 ++ source/server/connection_handler_impl.h | 4 +- source/server/server.cc | 10 ++ test/common/network/BUILD | 4 +- test/common/network/dns_impl_test.cc | 2 + test/common/network/listener_impl_test.cc | 67 ++++++++++ test/integration/BUILD | 15 +++ test/integration/cx_limit_integration_test.cc | 126 ++++++++++++------ test/mocks/network/mocks.h | 1 + 19 files changed, 289 insertions(+), 52 deletions(-) diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index d35b7ac8a1f7..d9b4f440afbd 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -24,7 +24,8 @@ HTTP proxies should additionally configure: * :ref:`HTTP/2 initial stream window size limit ` to 64 KiB, * :ref:`HTTP/2 initial connection window size limit ` to 1 MiB. * :ref:`headers_with_underscores_action setting ` to REJECT_REQUEST, to protect upstream services that treat '_' and '-' as interchangeable. -* :ref:`Connection limits. ` +* :ref:`Listener connection limits. ` +* :ref:`Global downstream connection limits `. The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP ` edge server configuration): @@ -132,3 +133,5 @@ The following is a YAML example of the above recommendation (taken from the :ref listener: example_listener_name: connection_limit: 10000 + overload: + global_downstream_max_connections: 50000 diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index 0b7936e4b2b4..ff70567aac8e 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -19,6 +19,7 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing + global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit no_filter_chain_match, Counter, Total connections that didn't match any filter chain ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications ssl.handshake, Counter, Total successful TLS connection handshakes diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index 8d28935536e2..2dd2e7fe5cc7 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -54,6 +54,30 @@ The following overload actions are supported: envoy.overload_actions.stop_accepting_connections, Envoy will stop accepting new network connections on its configured listeners envoy.overload_actions.shrink_heap, Envoy will periodically try to shrink the heap by releasing free memory to the system +Limiting Active Connections +--------------------------- + +Currently, the only supported way to limit the total number of active connections allowed across all +listeners is via specifying an integer through the runtime key +``overload.global_downstream_max_connections``. The connection limit is recommended to be less than +half of the system's file descriptor limit, to account for upstream connections, files, and other +usage of file descriptors. +If the value is unspecified, there is no global limit on the number of active downstream connections +and Envoy will emit a warning indicating this at startup. To disable the warning without setting a +limit on the number of active downstream connections, the runtime value may be set to a very large +limit (~2e9). + +If it is desired to only limit the number of downstream connections for a particular listener, +per-listener limits can be set via the :ref:`listener configuration `. + +One may simultaneously specify both per-listener and global downstream connection limits and the +conditions will be enforced independently. For instance, if it is known that a particular listener +should have a smaller number of open connections than others, one may specify a smaller connection +limit for that specific listener and allow the global limit to enforce resource utilization among +all listeners. + +An example configuration can be found in the :ref:`edge best practices document `. + Statistics ---------- diff --git a/docs/root/faq/configuration/resource_limits.rst b/docs/root/faq/configuration/resource_limits.rst index c20c64929b10..214096486eb6 100644 --- a/docs/root/faq/configuration/resource_limits.rst +++ b/docs/root/faq/configuration/resource_limits.rst @@ -3,12 +3,16 @@ How does Envoy prevent file descriptor exhaustion? ================================================== -:ref:`Per-listener connection limits ` may be configured as an upper bound on -the number of active connections a particular listener will accept. The listener may accept more -connections than the configured value on the order of the number of worker threads. On Unix-based -systems, it is recommended to keep the sum of all connection limits less than half of the system's -file descriptor limit to account for upstream connections, files, and other usage of file -descriptors. +:ref:`Per-listener connection limits ` may be configured as an upper bound +on the number of active connections a particular listener will accept. The listener may accept more +connections than the configured value on the order of the number of worker threads. + +In addition, one may configure a :ref:`global limit ` on the number of +connections that will apply across all listeners. + +On Unix-based systems, it is recommended to keep the sum of all connection limits less than half of +the system's file descriptor limit to account for upstream connections, files, and other usage of +file descriptors. .. note:: diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst index 1dce099f5cdf..dade825cfe4c 100644 --- a/docs/root/version_history/v1.14.2.rst +++ b/docs/root/version_history/v1.14.2.rst @@ -7,3 +7,5 @@ Changes * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. * listener: Add runtime support for `per-listener limits ` on active/accepted connections. +* overload management: Add runtime support for :ref:`global limits ` + on active/accepted connections. diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index 2226a50f84fe..373f25caaf2c 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -168,6 +168,11 @@ class ListenerCallbacks { * @param socket supplies the socket that is moved into the callee. */ virtual void onAccept(ConnectionSocketPtr&& socket) PURE; + + /** + * Called when a new connection is rejected. + */ + virtual void onReject() PURE; }; /** diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 1121ac26b16a..e97330223d37 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -224,6 +224,7 @@ envoy_cc_library( "//include/envoy/event:file_event_interface", "//include/envoy/network:exception_interface", "//include/envoy/network:listener_interface", + "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index b0f6d4d2254f..ae8dab60227c 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -72,5 +72,7 @@ UdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address) : ListenSocketImpl(std::move(io_handle), address) {} +std::atomic AcceptedSocketImpl::global_accepted_socket_count_; + } // namespace Network } // namespace Envoy diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index a77ccefbd78a..c0786536a67e 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -143,7 +143,21 @@ class AcceptedSocketImpl : public ConnectionSocketImpl { public: AcceptedSocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) - : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) {} + : ConnectionSocketImpl(std::move(io_handle), local_address, remote_address) { + ++global_accepted_socket_count_; + } + + ~AcceptedSocketImpl() override { + ASSERT(global_accepted_socket_count_.load() > 0); + --global_accepted_socket_count_; + } + + // TODO (tonya11en): Global connection count tracking is temporarily performed via a static + // variable until the logic is moved into the overload manager. + static uint64_t acceptedSocketCount() { return global_accepted_socket_count_.load(); } + +private: + static std::atomic global_accepted_socket_count_; }; // ConnectionSocket used with client connections. diff --git a/source/common/network/listener_impl.cc b/source/common/network/listener_impl.cc index 045f1f2ad759..96c1eded88dd 100644 --- a/source/common/network/listener_impl.cc +++ b/source/common/network/listener_impl.cc @@ -19,6 +19,30 @@ namespace Envoy { namespace Network { +const absl::string_view ListenerImpl::GlobalMaxCxRuntimeKey = + "overload.global_downstream_max_connections"; + +bool ListenerImpl::rejectCxOverGlobalLimit() { + // Enforce the global connection limit if necessary, immediately closing the accepted connection. + Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); + + if (runtime == nullptr) { + // The runtime singleton won't exist in most unit tests that do not need global downstream limit + // enforcement. Therefore, there is no need to enforce limits if the singleton doesn't exist. + // TODO(tonya11en): Revisit this once runtime is made globally available. + return false; + } + + // If the connection limit is not set, don't limit the connections, but still track them. + // TODO(tonya11en): In integration tests, threadsafeSnapshot is necessary since the FakeUpstreams + // use a listener and do not run in a worker thread. In practice, this code path will always be + // run on a worker thread, but to prevent failed assertions in test environments, threadsafe + // snapshots must be used. This must be revisited. + const uint64_t global_cx_limit = runtime->threadsafeSnapshot()->getInteger( + GlobalMaxCxRuntimeKey, std::numeric_limits::max()); + return AcceptedSocketImpl::acceptedSocketCount() >= global_cx_limit; +} + void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* remote_addr, int remote_addr_len, void* arg) { ListenerImpl* listener = static_cast(arg); @@ -26,6 +50,13 @@ void ListenerImpl::listenCallback(evconnlistener*, evutil_socket_t fd, sockaddr* // Wrap raw socket fd in IoHandle. IoHandlePtr io_handle = SocketInterfaceSingleton::get().socket(fd); + if (rejectCxOverGlobalLimit()) { + // The global connection limit has been reached. + io_handle->close(); + listener->cb_.onReject(); + return; + } + // Get the local address from the new socket if the listener is listening on IP ANY // (e.g., 0.0.0.0 for IPv4) (local_address_ is nullptr in this case). const Address::InstanceConstSharedPtr& local_address = diff --git a/source/common/network/listener_impl.h b/source/common/network/listener_impl.h index 953d82723b8f..c431d77f4610 100644 --- a/source/common/network/listener_impl.h +++ b/source/common/network/listener_impl.h @@ -1,5 +1,8 @@ #pragma once +#include "envoy/runtime/runtime.h" + +#include "absl/strings/string_view.h" #include "base_listener_impl.h" namespace Envoy { @@ -17,6 +20,8 @@ class ListenerImpl : public BaseListenerImpl { void disable() override; void enable() override; + static const absl::string_view GlobalMaxCxRuntimeKey; + protected: void setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket); @@ -27,6 +32,10 @@ class ListenerImpl : public BaseListenerImpl { int remote_addr_len, void* arg); static void errorCallback(evconnlistener* listener, void* context); + // Returns true if global connection limit has been reached and the accepted socket should be + // rejected/closed. If the accepted socket is to be admitted, false is returned. + static bool rejectCxOverGlobalLimit(); + Event::Libevent::ListenerPtr listener_; }; diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 90cf2cacab7e..df6fa758bd5b 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -28,8 +28,9 @@ namespace Server { #define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(downstream_cx_destroy) \ - COUNTER(downstream_cx_total) \ COUNTER(downstream_cx_overflow) \ + COUNTER(downstream_cx_total) \ + COUNTER(downstream_global_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ COUNTER(no_filter_chain_match) \ GAUGE(downstream_cx_active, Accumulate) \ @@ -129,6 +130,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, // Network::ListenerCallbacks void onAccept(Network::ConnectionSocketPtr&& socket) override; + void onReject() override { stats_.downstream_global_cx_overflow_.inc(); } // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } diff --git a/source/server/server.cc b/source/server/server.cc index 2f75353c64eb..f17ae5b7a5ef 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -34,6 +34,7 @@ #include "common/local_info/local_info_impl.h" #include "common/memory/stats.h" #include "common/network/address_impl.h" +#include "common/network/listener_impl.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/runtime/runtime_impl.h" @@ -526,6 +527,15 @@ void InstanceImpl::onRuntimeReady() { *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_); } + + // If there is no global limit to the number of active connections, warn on startup. + // TODO (tonya11en): Move this functionality into the overload manager. + if (!runtime().snapshot().get(Network::ListenerImpl::GlobalMaxCxRuntimeKey)) { + ENVOY_LOG(warn, + "there is no configured limit to the number of allowed active connections. Set a " + "limit via the runtime key {}", + Network::ListenerImpl::GlobalMaxCxRuntimeKey); + } } void InstanceImpl::startWorkers() { diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 403c7068859c..fd12d7478946 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -27,6 +27,7 @@ envoy_cc_test_library( "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) @@ -139,11 +140,11 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", @@ -190,6 +191,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index d7b21618cdc2..3b015ca2a94d 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -281,6 +281,8 @@ class TestDnsServer : public ListenerCallbacks { queries_.emplace_back(query); } + void onReject() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void addHosts(const std::string& hostname, const IpList& ip, const RecordType& type) { if (type == RecordType::A) { hosts_a_[hostname] = ip; diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index f02d27ea09d2..732ccf2460e3 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -10,6 +10,7 @@ #include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -139,6 +140,72 @@ TEST_P(ListenerImplTest, UseActualDst) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(ListenerImplTest, GlobalConnectionLimitEnforcement) { + // Required to manipulate runtime values when there is no test server. + TestScopedRuntime scoped_runtime; + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "2"}}); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + Network::MockListenerCallbacks listener_callbacks; + Network::MockConnectionHandler connection_handler; + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); + + std::vector client_connections; + std::vector server_connections; + StreamInfo::StreamInfoImpl stream_info(dispatcher_->timeSource()); + EXPECT_CALL(listener_callbacks, onAccept_(_)) + .WillRepeatedly(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { + server_connections.emplace_back(dispatcher_->createServerConnection( + std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info)); + dispatcher_->exit(); + })); + + auto initiate_connections = [&](const int count) { + for (int i = 0; i < count; ++i) { + client_connections.emplace_back(dispatcher_->createClientConnection( + socket->localAddress(), Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr)); + client_connections.back()->connect(); + } + }; + + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(3); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // We expect any server-side connections that get created to populate 'server_connections'. + EXPECT_EQ(2, server_connections.size()); + + // Let's increase the allowed connections and try sending more connections. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", "3"}}); + initiate_connections(5); + EXPECT_CALL(listener_callbacks, onReject()).Times(4); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(3, server_connections.size()); + + // Clear the limit and verify there's no longer a limit. + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); + initiate_connections(10); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_EQ(13, server_connections.size()); + + for (const auto& conn : client_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + for (const auto& conn : server_connections) { + conn->close(ConnectionCloseType::NoFlush); + } + + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"overload.global_downstream_max_connections", ""}}); +} + TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { auto socket = std::make_shared(Network::Test::getAnyAddress(version_), nullptr, true); diff --git a/test/integration/BUILD b/test/integration/BUILD index 4008f8681eca..5b4c57e572d5 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1305,6 +1305,21 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "cx_limit_integration_test", + srcs = ["cx_limit_integration_test.cc"], + deps = [ + ":http_integration_lib", + "//include/envoy/network:filter_interface", + "//include/envoy/registry", + "//source/extensions/filters/network/tcp_proxy:config", + "//test/config:utility_lib", + "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "local_reply_integration_test", srcs = [ diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc index df668624ba27..6ffb4952ae63 100644 --- a/test/integration/cx_limit_integration_test.cc +++ b/test/integration/cx_limit_integration_test.cc @@ -31,64 +31,82 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParam init_func, std::string&& check_stat) { + init_func(); -TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { - setListenerLimit(2); - initialize(); + std::vector tcp_clients; + std::vector raw_conns; - std::vector tcp_clients; - std::vector raw_conns; + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); - tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); - raw_conns.emplace_back(); - ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); - ASSERT_TRUE(tcp_clients.back()->connected()); + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); - tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); - raw_conns.emplace_back(); - ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); - ASSERT_TRUE(tcp_clients.back()->connected()); + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_FALSE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + tcp_clients.back()->waitForDisconnect(); - tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); - raw_conns.emplace_back(); - ASSERT_FALSE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); - tcp_clients.back()->waitForDisconnect(); + // Get rid of the client that failed to connect. + tcp_clients.back()->close(); + tcp_clients.pop_back(); - // Get rid of the client that failed to connect. - tcp_clients.back()->close(); - tcp_clients.pop_back(); + // Close the first connection that was successful so that we can open a new successful + // connection. + tcp_clients.front()->close(); + ASSERT_TRUE(raw_conns.front()->close()); + ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); - // Close the first connection that was successful so that we can open a new successful connection. - tcp_clients.front()->close(); - ASSERT_TRUE(raw_conns.front()->close()); - ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); + tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); + raw_conns.emplace_back(); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); + ASSERT_TRUE(tcp_clients.back()->connected()); - tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); - raw_conns.emplace_back(); - ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); - ASSERT_TRUE(tcp_clients.back()->connected()); + const bool isV4 = (version_ == Network::Address::IpVersion::v4); + auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress() + : Network::Utility::getIpv6LoopbackAddress(); - const bool isV4 = (version_ == Network::Address::IpVersion::v4); - auto local_address = isV4 ? Network::Utility::getCanonicalIpv4LoopbackAddress() - : Network::Utility::getIpv6LoopbackAddress(); + const std::string counter_prefix = (isV4 ? "listener.127.0.0.1_0." : "listener.[__1]_0."); - const std::string counter_name = isV4 ? ("listener.127.0.0.1_0.downstream_cx_overflow") - : ("listener.[__1]_0.downstream_cx_overflow"); + test_server_->waitForCounterEq(counter_prefix + check_stat, 1); - test_server_->waitForCounterEq(counter_name, 1); + for (auto& tcp_client : tcp_clients) { + tcp_client->close(); + } - for (auto& tcp_client : tcp_clients) { - tcp_client->close(); + tcp_clients.clear(); + raw_conns.clear(); } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectionLimitIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); - tcp_clients.clear(); - raw_conns.clear(); +TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { + std::function init_func = [this]() { + setListenerLimit(2); + initialize(); + }; + + doTest(init_func, "downstream_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestEmptyGlobalCxRuntimeLimit) { + const std::string log_line = "no configured limit to the number of allowed active connections."; + EXPECT_LOG_CONTAINS("warn", log_line, { initialize(); }); } TEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) { @@ -102,5 +120,29 @@ TEST_P(ConnectionLimitIntegrationTest, TestEmptyListenerRuntimeLimit) { }); } +TEST_P(ConnectionLimitIntegrationTest, TestGlobalLimit) { + std::function init_func = [this]() { + // Includes twice the number of connections expected because the tracking is performed via a + // static variable and the fake upstream has a listener. This causes upstream connections to the + // fake upstream to also be tracked as part of the global downstream connection tracking. + setGlobalLimit("4"); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + +TEST_P(ConnectionLimitIntegrationTest, TestBothLimits) { + std::function init_func = [this]() { + // Setting the listener limit to a much higher value and making sure the right stat gets + // incremented when both limits are set. + setGlobalLimit("4"); + setListenerLimit(100); + initialize(); + }; + + doTest(init_func, "downstream_global_cx_overflow"); +} + } // namespace } // namespace Envoy diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 185e7b1744be..6aa2d2d7c83b 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -130,6 +130,7 @@ class MockListenerCallbacks : public ListenerCallbacks { void onAccept(ConnectionSocketPtr&& socket) override { onAccept_(socket); } MOCK_METHOD(void, onAccept_, (ConnectionSocketPtr & socket)); + MOCK_METHOD(void, onReject, ()); }; class MockUdpListenerCallbacks : public UdpListenerCallbacks { From 7ca28ff7d46454ae930e193d97b7d08156b1ba59 Mon Sep 17 00:00:00 2001 From: antonio Date: Fri, 5 Jun 2020 15:37:27 -0400 Subject: [PATCH 488/909] [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio --- source/common/http/http1/codec_impl.cc | 44 ++++++++---- source/common/http/http1/codec_impl.h | 30 ++++++-- test/common/http/http1/codec_impl_test.cc | 69 ++++++++++++++++--- test/integration/http_integration.cc | 38 ++++++++++ test/integration/http_integration.h | 1 + test/integration/protocol_integration_test.cc | 10 +++ 6 files changed, 164 insertions(+), 28 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 1e821f7a0dec..96dd8a939ea7 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -503,6 +503,22 @@ void ConnectionImpl::completeLastHeader() { ASSERT(current_header_value_.empty()); } +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + headersOrTrailers().byteSize(); +} + +void ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } +} + bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { if (!handling_upgrade_) { // Only direct dispatch for Upgrade requests. @@ -581,12 +597,15 @@ void ConnectionImpl::onHeaderField(const char* data, size_t length) { } processing_trailers_ = true; header_parsing_state_ = HeaderParsingState::Field; + allocTrailers(); } if (header_parsing_state_ == HeaderParsingState::Value) { completeLastHeader(); } current_header_field_.append(data, length); + + checkMaxHeadersSize(); } void ConnectionImpl::onHeaderValue(const char* data, size_t length) { @@ -595,12 +614,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { return; } - if (processing_trailers_) { - maybeAllocTrailers(); - } - absl::string_view header_value{data, length}; - if (strict_header_validation_) { if (!Http::HeaderUtility::headerValueIsValid(header_value)) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); @@ -620,15 +634,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { } current_header_value_.append(header_value.data(), header_value.length()); - const uint32_t total = - current_header_field_.size() + current_header_value_.size() + headersOrTrailers().byteSize(); - if (total > (max_headers_kb_ * 1024)) { - const absl::string_view header_type = - processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); - } + checkMaxHeadersSize(); } int ConnectionImpl::onHeadersCompleteBase() { @@ -786,6 +792,14 @@ ServerConnectionImpl::ServerConnectionImpl( Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), headers_with_underscores_action_(headers_with_underscores_action) {} +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) + ? active_request_.value().request_url_.size() + : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} + void ServerConnectionImpl::onEncodeComplete() { if (active_request_.value().remote_complete_) { // Only do this if remote is complete. If we are replying before the request is complete the @@ -918,6 +932,8 @@ void ServerConnectionImpl::onMessageBegin() { void ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_.has_value()) { active_request_.value().request_url_.append(data, length); + + checkMaxHeadersSize(); } } diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 44e4282742c9..b18a482872d7 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -220,6 +220,20 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& activeRequest() { return active_request_; } // ConnectionImpl void onMessageComplete() override; + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; private: /** @@ -462,9 +478,10 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); } - void maybeAllocTrailers() override { + void allocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); @@ -547,9 +564,10 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { } void allocHeaders() override { ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } - void maybeAllocTrailers() override { + void allocTrailers() override { ASSERT(processing_trailers_); if (!absl::holds_alternative(headers_or_trailers_)) { headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); @@ -567,9 +585,9 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { bool ignore_message_complete_for_100_continue_{}; // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are - // populated on message begin. Trailers are populated on the first parsed trailer field (if - // trailers are enabled). The variant is reset to null headers on message complete for assertion - // purposes. + // populated on message begin. Trailers are populated when the switch to trailer processing is + // detected while parsing the first trailer field (if trailers are enabled). The variant is reset + // to null headers on message complete for assertion purposes. absl::variant headers_or_trailers_; // The default limit of 80 KiB is the vanilla http_parser behaviour. diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 347a26780f44..e0c450f2f26c 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -238,7 +238,7 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ "body\r\n0\r\n"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); - buffer = Buffer::OwnedImpl(trailer_string + "\r\n\r\n"); + buffer = Buffer::OwnedImpl(trailer_string); if (enable_trailers) { EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); status = codec_->dispatch(buffer); @@ -2520,26 +2520,60 @@ TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB - std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; + std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; + testTrailersExceedLimit(long_string, true); +} + +TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { + // Construct partial headers with a long field name that exceeds the default limit of 60KiB. + std::string long_string = "bigfield" + std::string(60 * 1024, 'q'); testTrailersExceedLimit(long_string, true); } // Tests that the default limit for the number of request headers is 100. TEST_F(Http1ServerConnectionImplTest, ManyTrailersRejected) { // Send a request with 101 headers. - testTrailersExceedLimit(createHeaderFragment(101), true); + testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", true); } TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { // Default limit of 60 KiB - std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; + std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; + testTrailersExceedLimit(long_string, false); +} + +TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { + // Default limit of 60 KiB + std::string long_string = "bigfield" + std::string(60 * 1024, 'q') + ": value\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } // Tests that the default limit for the number of request headers is 100. TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { // Send a request with 101 headers. - testTrailersExceedLimit(createHeaderFragment(101), false); + testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", false); +} + +TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { + initialize(); + + std::string exception_reason; + NiceMock decoder; + Http::ResponseEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder = &encoder; + return decoder; + })); + + // Default limit of 60 KiB + std::string long_url = "/" + std::string(60 * 1024, 'q'); + Buffer::OwnedImpl buffer("GET " + long_url + " HTTP/1.1\r\n"); + + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); + EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { @@ -2631,8 +2665,27 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { testRequestHeadersAccepted(createHeaderFragment(150)); } -// Tests that response headers of 80 kB fails. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { +// Tests that incomplete response headers of 80 kB header value fails. +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); + auto status = codec_->dispatch(buffer); + EXPECT_TRUE(status.ok()); + std::string long_header = "big: " + std::string(80 * 1024, 'q'); + buffer = Buffer::OwnedImpl(long_header); + status = codec_->dispatch(buffer); + EXPECT_TRUE(isCodecProtocolError(status)); + EXPECT_EQ(status.message(), "headers size exceeds limit"); +} + +// Tests that incomplete response headers with a 80 kB header field fails. +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { initialize(); NiceMock decoder; @@ -2644,7 +2697,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersRejected) { Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); - std::string long_header = "big: " + std::string(80 * 1024, 'q') + "\r\n"; + std::string long_header = "big: " + std::string(80 * 1024, 'q'); buffer = Buffer::OwnedImpl(long_header); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 01c6de72ad65..ca5fca51e403 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -958,6 +958,44 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { EXPECT_EQ(1024U, response->body().size()); } +void HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size) { + // `size` parameter dictates the size of each header that will be added to the request and `count` + // parameter is the number of headers to be added. The actual request byte size will exceed `size` + // due to the keys and other headers. The actual request header count will exceed `count` by four + // due to default headers. + + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.mutable_max_request_headers_kb()->set_value(max_headers_size); }); + max_request_headers_kb_ = max_headers_size; + + Http::TestRequestHeaderMapImpl big_headers{{":method", "GET"}, + {":path", "/" + std::string(url_size * 1024, 'a')}, + {":scheme", "http"}, + {":authority", "host"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + if (url_size >= max_headers_size) { + // header size includes keys too, so expect rejection when equal + auto encoder_decoder = codec_client_->startRequest(big_headers); + auto response = std::move(encoder_decoder.second); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + codec_client_->waitForDisconnect(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("431", response->headers().Status()->value().getStringView()); + } else { + response->waitForReset(); + codec_client_->close(); + } + } else { + auto response = sendRequestAndWaitForResponse(big_headers, 0, default_response_headers_, 0); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + } +} + void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size, uint32_t max_count) { // `size` parameter dictates the size of each header that will be added to the request and `count` diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 25cad95bb37b..bc04a94be36a 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -196,6 +196,7 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testLargeHeaders(Http::TestRequestHeaderMapImpl request_headers, Http::TestRequestTrailerMapImpl request_trailers, uint32_t size, uint32_t max_size); + void testLargeRequestUrl(uint32_t url_size, uint32_t max_headers_size); void testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size = 60, uint32_t max_count = 100); void testLargeRequestTrailers(uint32_t size, uint32_t max_size = 60); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index ccb9654c1b54..074181c73699 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1352,6 +1352,16 @@ name: decode-headers-only EXPECT_EQ(0, upstream_request_->body().length()); } +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlRejected) { + // Send one 95 kB URL with limit 60 kB headers. + testLargeRequestUrl(95, 60); +} + +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestUrlAccepted) { + // Send one 95 kB URL with limit 96 kB headers. + testLargeRequestUrl(95, 96); +} + TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersRejected) { // Send one 95 kB header with limit 60 kB and 100 headers. testLargeRequestHeaders(95, 1, 60, 100); From 0e49a495826ea9e29134c1bd54fdeb31a034f40c Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 8 Jun 2020 12:22:19 -0600 Subject: [PATCH 489/909] http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein --- .../v2/http_connection_manager.proto | 10 + .../v3/http_connection_manager.proto | 10 + .../v4alpha/http_connection_manager.proto | 10 + .../http/http_conn_man/stats.rst | 9 + docs/root/faq/configuration/timeouts.rst | 4 +- docs/root/version_history/v1.14.2.rst | 3 + .../v2/http_connection_manager.proto | 10 + .../v3/http_connection_manager.proto | 10 + .../v4alpha/http_connection_manager.proto | 10 + include/envoy/http/codec.h | 7 + source/common/http/codec_client.cc | 2 +- source/common/http/codec_client.h | 6 +- source/common/http/conn_manager_impl.cc | 4 + source/common/http/http1/codec_impl.h | 5 + source/common/http/http2/codec_impl.cc | 49 ++++- source/common/http/http2/codec_impl.h | 21 +- source/common/http/http2/codec_stats.h | 12 +- .../quiche/envoy_quic_client_stream.h | 1 + .../quiche/envoy_quic_server_stream.h | 3 + test/common/http/conn_manager_impl_test.cc | 1 + test/common/http/http2/codec_impl_test.cc | 188 +++++++++++++++--- .../integration/quic_http_integration_test.cc | 12 +- .../alts/alts_integration_test.cc | 4 +- .../tls/integration/ssl_integration_test.cc | 6 +- test/integration/http2_integration_test.cc | 22 ++ .../http2_upstream_integration_test.cc | 3 + test/integration/http_integration.cc | 16 +- test/integration/http_integration.h | 4 +- .../sds_dynamic_integration_test.cc | 2 +- test/mocks/http/stream.h | 1 + 30 files changed, 392 insertions(+), 53 deletions(-) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 4db4af690490..06b13acb2f63 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -332,6 +332,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to this corner + // case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 2d8b09b117f0..24c417bb133f 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -327,6 +327,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bc3826f80f29..7800832806b3 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -327,6 +327,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index b81d3ea06045..2210bfc6dd5c 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -138,7 +138,16 @@ All http2 statistics are rooted at *http2.* rx_reset, Counter, Total number of reset stream frames received by Envoy too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers trailers, Counter, Total number of trailers seen on requests coming from downstream + tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts ` waiting for open stream window to flush the remainder of a stream tx_reset, Counter, Total number of reset stream frames transmitted by Envoy + streams_active, Gauge, Active streams as observed by the codec + pending_send_bytes, Gauge, Currently buffered body data in bytes waiting to be written when stream/connection window is opened. + +.. attention:: + + The HTTP/2 `streams_active` gauge may be greater than the HTTP connection manager + `downstream_rq_active` gauge due to differences in stream accounting between the codec and the + HTTP connection manager. Tracing statistics ------------------ diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 11f6ae366f1d..3c87cca44d9d 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -52,7 +52,9 @@ context request/stream is interchangeable. ` is the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity. The default stream idle timeout is *5 minutes*. This timeout is strongly - recommended for streaming APIs (requests or responses that never end). + recommended for all requests (not just streaming requests/responses) as it additionally defends + against an HTTP/2 peer that does not open stream window once an entire response has been buffered + to be sent to a downstream client. * The HTTP protocol :ref:`max_stream_duration ` is defined in a generic message used by the HTTP connection manager. The max stream duration is the maximum time that a stream's lifetime will span. You can use this functionality when you want to reset diff --git a/docs/root/version_history/v1.14.2.rst b/docs/root/version_history/v1.14.2.rst index dade825cfe4c..c20f93650dca 100644 --- a/docs/root/version_history/v1.14.2.rst +++ b/docs/root/version_history/v1.14.2.rst @@ -5,6 +5,9 @@ Changes ------- * http: fixed CVE-2020-11080 by rejecting HTTP/2 SETTINGS frames with too many parameters. +* http: the :ref:`stream_idle_timeout ` + now also defends against an HTTP/2 peer that does not open stream window once an entire response + has been buffered to be sent to a downstream client. * listener: Add runtime support for `per-listener limits ` on active/accepted connections. * overload management: Add runtime support for :ref:`global limits ` diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 4db4af690490..06b13acb2f63 100644 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -332,6 +332,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to this corner + // case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 230a2b98e087..3142a0d8bea2 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -329,6 +329,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bc3826f80f29..7800832806b3 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -327,6 +327,16 @@ message HttpConnectionManager { // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index d7bb27d64998..46a9bf4e4f2b 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -337,6 +337,13 @@ class Stream { * with the stream. */ virtual const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() PURE; + + /** + * Set the flush timeout for the stream. At the codec level this is used to bound the amount of + * time the codec will wait to flush body data pending open stream window. It does *not* count + * small window updates as satisfying the idle timeout as this is a potential DoS vector. + */ + virtual void setFlushTimeout(std::chrono::milliseconds timeout) PURE; }; /** diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 935fb6476e33..557b5757414a 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -21,7 +21,7 @@ namespace Http { CodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) - : type_(type), connection_(std::move(connection)), host_(host), + : type_(type), host_(host), connection_(std::move(connection)), idle_timeout_(host_->cluster().idleTimeout()) { if (type_ != Type::HTTP3) { // Make sure upstream connections process data and then the FIN, rather than processing diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index c3bb9d4b3f9a..895b27473715 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -155,9 +155,11 @@ class CodecClient : Logger::Loggable, } const Type type_; - ClientConnectionPtr codec_; - Network::ClientConnectionPtr connection_; + // The order of host_, connection_, and codec_ matter as during destruction each can refer to + // the previous, at least in tests. Upstream::HostDescriptionConstSharedPtr host_; + Network::ClientConnectionPtr connection_; + ClientConnectionPtr codec_; Event::TimerPtr idle_timer_; const absl::optional idle_timeout_; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 243260c98bf2..b3e436a8978c 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -263,6 +263,7 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); + new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_); new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); // If the network connection is backed up, the stream should be made aware of it on creation. // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. @@ -970,7 +971,10 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (hasCachedRoute()) { const Router::RouteEntry* route_entry = cached_route_.value()->routeEntry(); if (route_entry != nullptr && route_entry->idleTimeout()) { + // TODO(mattklein123): Technically if the cached route changes, we should also see if the + // route idle timeout has changed and update the value. idle_timeout_ms_ = route_entry->idleTimeout().value(); + response_encoder_->getStream().setFlushTimeout(idle_timeout_ms_); if (idle_timeout_ms_.count()) { // If we have a route-level idle timeout but no global stream idle timeout, create a timer. if (stream_idle_timer_ == nullptr) { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index b18a482872d7..d2f8c47d7c8a 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -62,6 +62,11 @@ class StreamEncoderImpl : public virtual StreamEncoder, uint32_t bufferLimit() override; absl::string_view responseDetails() override { return details_; } const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index f3317c32bbb6..ba8553a3266e 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -18,6 +18,7 @@ #include "common/http/exception.h" #include "common/http/header_utility.h" #include "common/http/headers.h" +#include "common/http/http2/codec_stats.h" #include "common/http/utility.h" #include "absl/container/fixed_array.h" @@ -95,11 +96,25 @@ ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_l data_deferred_(false), waiting_for_non_informational_headers_(false), pending_receive_buffer_high_watermark_called_(false), pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); if (buffer_limit > 0) { setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); } } +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + if (stream_idle_timer_ != nullptr) { + // To ease testing and the destructor assertion. + stream_idle_timer_->disableTimer(); + stream_idle_timer_.reset(); + } + + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + static void insertHeader(std::vector& headers, const HeaderEntry& header) { uint8_t flags = 0; if (header.key().isReference()) { @@ -206,6 +221,7 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { // waiting on window updates. We need to save the trailers so that we can emit them later. ASSERT(!pending_trailers_to_encode_); pending_trailers_to_encode_ = cloneTrailers(trailers); + createPendingFlushTimer(); } else { submitTrailers(trailers); parent_.sendPendingFrames(); @@ -364,6 +380,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t return NGHTTP2_ERR_FLOODED; } + parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); return 0; @@ -385,9 +402,30 @@ void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + parent_.sendPendingFrames(); +} + void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!local_end_stream_); local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); pending_send_data_.move(data); if (data_deferred_) { int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); @@ -397,6 +435,9 @@ void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_str } parent_.sendPendingFrames(); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } } void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { @@ -473,7 +514,12 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} -ConnectionImpl::~ConnectionImpl() { nghttp2_session_del(session_); } +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either @@ -839,6 +885,7 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { stream->runResetCallbacks(reason); } + stream->destroy(); connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); // Any unconsumed data must be consumed before the stream is deleted. // nghttp2 does not appear to track this internally, and any stream deleted diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index bd2f6ecdc3aa..5751dba9c86d 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -168,6 +168,11 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable; @@ -304,6 +317,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(trailers); } + void createPendingFlushTimer() override { + // Client streams do not create a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + } // RequestEncoder void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; @@ -344,6 +361,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(trailers); } + void createPendingFlushTimer() override; // ResponseEncoder void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; @@ -468,12 +486,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable CodecStats* { - return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."))}; + return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."), + POOL_GAUGE_PREFIX(scope, "http2."))}; }); } - ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT) + ALL_HTTP2_CODEC_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; } // namespace Http2 diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h index 8884c63dac99..761201c16f7c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_stream.h @@ -39,6 +39,7 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, // Http::Stream void resetStream(Http::StreamResetReason reason) override; + void setFlushTimeout(std::chrono::milliseconds) override {} // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h index 59c03e79509a..a9393a1761ff 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_stream.h @@ -39,6 +39,9 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, // Http::Stream void resetStream(Http::StreamResetReason reason) override; + void setFlushTimeout(std::chrono::milliseconds) override { + // TODO(mattklein123): Actually implement this for HTTP/3 similar to HTTP/2. + } // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 7083b4f92d9d..c828c603fe12 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -195,6 +195,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan EXPECT_CALL(stream_, addCallbacks(_)) .WillOnce(Invoke( [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); + EXPECT_CALL(stream_, setFlushTimeout(_)); EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); } diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index de25ea53b040..400c4f564ce7 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -102,17 +102,36 @@ class Http2CodecImplTestFixture { Http2CodecImplTestFixture() = default; Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) - : client_settings_(client_settings), server_settings_(server_settings) {} - virtual ~Http2CodecImplTestFixture() = default; + : client_settings_(client_settings), server_settings_(server_settings) { + // Make sure we explicitly test for stream flush timer creation. + EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0); + EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0); + } + virtual ~Http2CodecImplTestFixture() { + client_connection_.dispatcher_.clearDeferredDeleteList(); + if (client_ != nullptr) { + client_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + } + server_connection_.dispatcher_.clearDeferredDeleteList(); + if (server_ != nullptr) { + server_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(server_stats_store_, "http2.pending_send_bytes")->value()); + } + } virtual void initialize() { http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); request_encoder_ = &client_->newStream(response_decoder_); @@ -122,6 +141,7 @@ class Http2CodecImplTestFixture { .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { response_encoder_ = &encoder; encoder.getStream().addCallbacks(server_stream_callbacks_); + encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000)); return request_decoder_; })); } @@ -203,12 +223,13 @@ class Http2CodecImplTestFixture { absl::optional server_settings_; bool allow_metadata_ = false; bool stream_error_on_invalid_http_messaging_ = false; - Stats::TestUtil::TestStore stats_store_; + Stats::TestUtil::TestStore client_stats_store_; envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; std::unique_ptr client_; ConnectionWrapper client_wrapper_; + Stats::TestUtil::TestStore server_stats_store_; envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; @@ -353,7 +374,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { @@ -382,7 +403,7 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); } @@ -399,7 +420,7 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { response_encoder_->encode100ContinueHeaders(continue_headers); EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { @@ -431,7 +452,7 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.violation.of.messaging.rule"); }; @@ -453,7 +474,7 @@ TEST_P(Http2CodecImplTest, Invalid103) { EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), ClientCodecError, "Unexpected 'trailers' with no end stream."); - EXPECT_EQ(1, stats_store_.counter("http2.too_many_header_frames").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.too_many_header_frames").value()); } TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { @@ -474,7 +495,7 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { } EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { @@ -512,7 +533,7 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); EXPECT_TRUE(status.ok()); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); expectDetailsRequest("http2.invalid.header.field"); }; @@ -536,7 +557,7 @@ TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { initialize(); EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError); - EXPECT_EQ(1, stats_store_.counter("http2.rx_messaging_error").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.rx_messaging_error").value()); } TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { @@ -581,7 +602,7 @@ TEST_P(Http2CodecImplTest, TrailingHeaders) { response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); } -TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { +TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { initialize(); // Buffer server data so we can make sure we don't get any window updates. @@ -596,11 +617,11 @@ TEST_P(Http2CodecImplTest, TrailingHeadersLargeBody) { EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); request_encoder_->encodeData(body, false); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); // Flush pending data. setupDefaultConnectionMocks(); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); EXPECT_TRUE(status.ok()); @@ -784,8 +805,11 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { response_encoder_->encodeHeaders(response_headers, false); Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); response_encoder_->encodeData(body, true); EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); MockStreamCallbacks client_stream_callbacks; @@ -820,6 +844,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // Force the server stream to be read disabled. This will cause it to stop sending window // updates to the client. server_->getStream(1)->readDisable(true); + EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); uint32_t initial_stream_window = nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); @@ -845,6 +871,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); // If we go over the limit, the stream callbacks should fire. @@ -852,6 +880,8 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // Now create a second stream on the connection. MockResponseDecoder response_decoder2; @@ -895,6 +925,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. EXPECT_EQ(initial_stream_window - 1, @@ -987,6 +1018,109 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { request_encoder_->encodeData(data, false); } +// Verify that we create and disable the stream flush timer when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) { + initialize(); + + InSequence s; + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Send window updates from the client. + setupDefaultConnectionMocks(); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when there is a large body that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { initialize(); MockStreamCallbacks callbacks; @@ -1041,10 +1175,10 @@ TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); for (int i = 0; i < 101; ++i) { @@ -1307,7 +1441,7 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { request_headers.addCopy("bad_header", "something"); EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(1, stats_store_.counter("http2.dropped_headers_with_underscores").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); } // Tests that request with header names containing underscore are rejected when the option is set to @@ -1321,7 +1455,9 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { request_headers.addCopy("bad_header", "something"); EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(1, stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); + EXPECT_EQ( + 1, + server_stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); } // Tests request headers with name containing underscore are allowed when the option is set to @@ -1337,7 +1473,7 @@ TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(0, stats_store_.counter("http2.dropped_headers_with_underscores").value()); + EXPECT_EQ(0, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); } // This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. @@ -1553,7 +1689,7 @@ TEST_P(Http2CodecImplTest, PingFlood) { EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_control_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } // Verify that codec allows PING flood when mitigation is disabled @@ -1647,7 +1783,7 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec detects flood of outbound DATA frames @@ -1680,7 +1816,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } // Verify that codec allows outbound DATA flood when mitigation is disabled @@ -1783,7 +1919,7 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); - EXPECT_EQ(1, stats_store_.counter("http2.outbound_flood").value()); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } TEST_P(Http2CodecImplTest, PriorityFlood) { @@ -1939,10 +2075,10 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); client_ = std::make_unique( - client_connection_, client_callbacks_, stats_store_, client_http2_options_, + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); server_ = std::make_unique( - server_connection_, server_callbacks_, stats_store_, server_http2_options_, + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index d7177aee7a04..dd9092be7e89 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -92,8 +92,11 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers // TODO(#8479) Propagate INVALID_VERSION error to caller and let caller to use server advertised // version list to create a new connection with mutually supported version and make client codec // again. - IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn) override { - IntegrationCodecClientPtr codec = HttpIntegrationTest::makeRawHttpConnection(std::move(conn)); + IntegrationCodecClientPtr makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options) override { + IntegrationCodecClientPtr codec = + HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); if (codec->disconnected()) { // Connection may get closed during version negotiation or handshake. ENVOY_LOG(error, "Fail to connect to server with error: {}", @@ -424,7 +427,7 @@ TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { updateResource(0.9); test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", 1); - codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http")))); + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt); EXPECT_TRUE(codec_client_->disconnected()); // Reduce load a little to allow the connection to be accepted connection. @@ -452,7 +455,8 @@ TEST_P(QuicHttpIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { EXPECT_EQ("envoy overloaded", response2->body()); codec_client_->close(); - EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort("http"))))->disconnected()); + EXPECT_TRUE(makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt) + ->disconnected()); } TEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) { diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index 8075ae85fdda..42eb79d56e3a 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -264,7 +264,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientInvalidPeer, // any account in config, the handshake will fail and client closes connection. TEST_P(AltsIntegrationTestClientInvalidPeer, ClientValidationFail) { initialize(); - codec_client_ = makeRawHttpConnection(makeAltsConnection()); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); } @@ -312,7 +312,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestClientWrongHandshaker, // and connection closes. TEST_P(AltsIntegrationTestClientWrongHandshaker, ConnectToWrongHandshakerAddress) { initialize(); - codec_client_ = makeRawHttpConnection(makeAltsConnection()); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); } diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index 06e0a549bd4c..9994f8ca314b 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -286,7 +286,8 @@ TEST_P(SslCertficateIntegrationTest, ServerEcdsaClientRsaOnly) { server_rsa_cert_ = false; server_ecdsa_cert_ = true; initialize(); - auto codec_client = makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions())); + auto codec_client = + makeRawHttpConnection(makeSslClientConnection(rsaOnlyClientOptions()), absl::nullopt); EXPECT_FALSE(codec_client->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); @@ -313,7 +314,8 @@ TEST_P(SslCertficateIntegrationTest, ServerRsaClientEcdsaOnly) { client_ecdsa_cert_ = true; initialize(); EXPECT_FALSE( - makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()))->connected()); + makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()), absl::nullopt) + ->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); test_server_->waitForCounterGe(counter_name, 1); diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 9de0d17da08b..f571b162a676 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -99,6 +99,28 @@ TEST_P(Http2IntegrationTest, RetryAttemptCount) { testRetryAttemptCountHeader(); TEST_P(Http2IntegrationTest, LargeRequestTrailersRejected) { testLargeRequestTrailers(66, 60); } +// Verify downstream codec stream flush timeout. +TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { + config_helper_.setBufferLimits(1024, 1024); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_idle_timeout()->set_seconds(0); + constexpr uint64_t IdleTimeoutMs = 400; + hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + }); + initialize(); + envoy::config::core::v3::Http2ProtocolOptions http2_options; + http2_options.mutable_initial_stream_window_size()->set_value(65535); + codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), http2_options); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(70000, true); + test_server_->waitForCounterEq("http2.tx_flush_timeout", 1); + response->waitForReset(); +} + static std::string response_metadata_filter = R"EOF( name: response-metadata-filter typed_config: diff --git a/test/integration/http2_upstream_integration_test.cc b/test/integration/http2_upstream_integration_test.cc index 8839a8737f0b..ba32580ec24b 100644 --- a/test/integration/http2_upstream_integration_test.cc +++ b/test/integration/http2_upstream_integration_test.cc @@ -242,6 +242,9 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt EXPECT_EQ("503", responses[i]->headers().getStatusValue()); } } + + EXPECT_EQ(0, test_server_->gauge("http2.streams_active")->value()); + EXPECT_EQ(0, test_server_->gauge("http2.pending_send_bytes")->value()); } TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) { diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index ca5fca51e403..7d27ae6d0e45 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -205,12 +205,18 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(uint32_t port) return makeHttpConnection(makeClientConnection(port)); } -IntegrationCodecClientPtr -HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) { +IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options) { std::shared_ptr cluster{new NiceMock()}; cluster->max_response_headers_count_ = 200; - cluster->http2_options_.set_allow_connect(true); - cluster->http2_options_.set_allow_metadata(true); + if (!http2_options.has_value()) { + http2_options = Http2::Utility::initializeAndValidateOptions( + envoy::config::core::v3::Http2ProtocolOptions()); + http2_options.value().set_allow_connect(true); + http2_options.value().set_allow_metadata(true); + } + cluster->http2_options_ = http2_options.value(); cluster->http1_settings_.enable_trailers_ = true; Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)))}; @@ -220,7 +226,7 @@ HttpIntegrationTest::makeRawHttpConnection(Network::ClientConnectionPtr&& conn) IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(Network::ClientConnectionPtr&& conn) { - auto codec = makeRawHttpConnection(std::move(conn)); + auto codec = makeRawHttpConnection(std::move(conn), absl::nullopt); EXPECT_TRUE(codec->connected()) << codec->connection()->transportFailureReason(); return codec; } diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index bc04a94be36a..4c04d5672507 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -106,7 +106,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationCodecClientPtr makeHttpConnection(uint32_t port); // Makes a http connection object without checking its connected state. - virtual IntegrationCodecClientPtr makeRawHttpConnection(Network::ClientConnectionPtr&& conn); + virtual IntegrationCodecClientPtr makeRawHttpConnection( + Network::ClientConnectionPtr&& conn, + absl::optional http2_options); // Makes a http connection object with asserting a connected state. IntegrationCodecClientPtr makeHttpConnection(Network::ClientConnectionPtr&& conn); diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 426e7c6a3a30..eaf5513374c1 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -226,7 +226,7 @@ TEST_P(SdsDynamicDownstreamIntegrationTest, WrongSecretFirst) { }; initialize(); - codec_client_ = makeRawHttpConnection(makeSslClientConnection()); + codec_client_ = makeRawHttpConnection(makeSslClientConnection(), absl::nullopt); // the connection state is not connected. EXPECT_FALSE(codec_client_->connected()); codec_client_->connection()->close(Network::ConnectionCloseType::NoFlush); diff --git a/test/mocks/http/stream.h b/test/mocks/http/stream.h index dddf63fbd478..b155af4a121d 100644 --- a/test/mocks/http/stream.h +++ b/test/mocks/http/stream.h @@ -20,6 +20,7 @@ class MockStream : public Stream { MOCK_METHOD(void, setWriteBufferWatermarks, (uint32_t, uint32_t)); MOCK_METHOD(uint32_t, bufferLimit, ()); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, connectionLocalAddress, ()); + MOCK_METHOD(void, setFlushTimeout, (std::chrono::milliseconds timeout)); std::list callbacks_{}; Network::Address::InstanceConstSharedPtr connection_local_address_; From 5eba69a1f375413fb93fab4173f9c393ac8c2818 Mon Sep 17 00:00:00 2001 From: antonio Date: Mon, 8 Jun 2020 17:59:54 -0400 Subject: [PATCH 490/909] [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144) Signed-off-by: antonio --- include/envoy/buffer/buffer.h | 9 + source/common/buffer/buffer_impl.cc | 14 +- source/common/buffer/buffer_impl.h | 21 +- source/common/http/http2/codec_impl.cc | 26 +- source/common/http/http2/codec_impl.h | 8 +- test/common/buffer/buffer_fuzz.cc | 6 + test/common/buffer/owned_impl_test.cc | 353 ++++++++++++++++++++-- test/common/http/http2/codec_impl_test.cc | 10 +- 8 files changed, 390 insertions(+), 57 deletions(-) diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index bb2d81259bc8..1f78f380f6b6 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -62,6 +62,15 @@ class Instance { public: virtual ~Instance() = default; + /** + * Register function to call when the last byte in the last slice of this + * buffer has fully drained. Note that slices may be transferred to + * downstream buffers, drain trackers are transferred along with the bytes + * they track so the function is called only after the last byte is drained + * from all buffers. + */ + virtual void addDrainTracker(std::function drain_tracker) PURE; + /** * Copy data into the buffer (deprecated, use absl::string_view variant * instead). diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index c53a51c02bd0..716869fac29b 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -33,6 +33,11 @@ void OwnedImpl::addImpl(const void* data, uint64_t size) { } } +void OwnedImpl::addDrainTracker(std::function drain_tracker) { + ASSERT(!slices_.empty()); + slices_.back()->addDrainTracker(std::move(drain_tracker)); +} + void OwnedImpl::add(const void* data, uint64_t size) { addImpl(data, size); } void OwnedImpl::addBufferFragment(BufferFragment& fragment) { @@ -231,9 +236,11 @@ void* OwnedImpl::linearize(uint32_t size) { auto dest = static_cast(reservation.mem_); do { uint64_t data_size = slices_.front()->dataSize(); - memcpy(dest, slices_.front()->data(), data_size); - bytes_copied += data_size; - dest += data_size; + if (data_size > 0) { + memcpy(dest, slices_.front()->data(), data_size); + bytes_copied += data_size; + dest += data_size; + } slices_.pop_front(); } while (bytes_copied < linearized_size); ASSERT(dest == static_cast(reservation.mem_) + linearized_size); @@ -256,6 +263,7 @@ void OwnedImpl::coalesceOrAddSlice(SlicePtr&& other_slice) { // Copy content of the `other_slice`. The `move` methods which call this method effectively // drain the source buffer. addImpl(other_slice->data(), slice_size); + other_slice->transferDrainTrackersTo(*slices_.back()); } else { // Take ownership of the slice. slices_.emplace_back(std::move(other_slice)); diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 7da3adb82195..90d76da81d39 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -35,7 +35,11 @@ class Slice { public: using Reservation = RawSlice; - virtual ~Slice() = default; + virtual ~Slice() { + for (const auto& drain_tracker : drain_trackers_) { + drain_tracker(); + } + } /** * @return a pointer to the start of the usable content. @@ -137,6 +141,9 @@ class Slice { */ uint64_t append(const void* data, uint64_t size) { uint64_t copy_size = std::min(size, reservableSize()); + if (copy_size == 0) { + return 0; + } uint8_t* dest = base_ + reservable_; reservable_ += copy_size; // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) @@ -193,6 +200,15 @@ class Slice { return SliceRepresentation{dataSize(), reservableSize(), capacity_}; } + void transferDrainTrackersTo(Slice& destination) { + destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_); + ASSERT(drain_trackers_.empty()); + } + + void addDrainTracker(std::function drain_tracker) { + drain_trackers_.emplace_back(std::move(drain_tracker)); + } + protected: Slice(uint64_t data, uint64_t reservable, uint64_t capacity) : data_(data), reservable_(reservable), capacity_(capacity) {} @@ -208,6 +224,8 @@ class Slice { /** Total number of bytes in the slice */ uint64_t capacity_; + + std::list> drain_trackers_; }; using SlicePtr = std::unique_ptr; @@ -510,6 +528,7 @@ class OwnedImpl : public LibEventInstance { OwnedImpl(const void* data, uint64_t size); // Buffer::Instance + void addDrainTracker(std::function drain_tracker) override; void add(const void* data, uint64_t size) override; void addBufferFragment(BufferFragment& fragment) override; void add(absl::string_view data) override; diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index ba8553a3266e..6c56463d4e76 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -499,13 +499,9 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat stream_error_on_invalid_http_messaging_( http2_options.stream_error_on_invalid_http_messaging()), flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), - frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundFrame(fragment); - }), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), - control_frame_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundControlFrame(fragment); - }), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), max_consecutive_inbound_frames_with_empty_payload_( http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), max_inbound_priority_frames_per_stream_( @@ -819,27 +815,21 @@ bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const u return false; } - auto fragment = Buffer::OwnedBufferFragmentImpl::create( - absl::string_view(reinterpret_cast(data), length), - is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); - - // The Buffer::OwnedBufferFragmentImpl object will be deleted in the *frame_buffer_releasor_ - // callback. - output.addBufferFragment(*fragment.release()); + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); return true; } -void ConnectionImpl::releaseOutboundFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundFrame() { ASSERT(outbound_frames_ >= 1); --outbound_frames_; - delete fragment; } -void ConnectionImpl::releaseOutboundControlFrame(const Buffer::OwnedBufferFragmentImpl* fragment) { +void ConnectionImpl::releaseOutboundControlFrame() { ASSERT(outbound_control_frames_ >= 1); --outbound_control_frames_; - releaseOutboundFrame(fragment); + releaseOutboundFrame(); } ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 5751dba9c86d..c977299b0174 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -424,7 +424,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable frame_buffer_releasor_; // This counter keeps track of the number of outbound frames of types PING, SETTINGS and // RST_STREAM (these that were buffered in the underlying connection but not yet written into the // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is @@ -433,7 +433,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable control_frame_buffer_releasor_; // This counter keeps track of the number of consecutive inbound frames of types HEADERS, // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. @@ -497,8 +497,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable drain_tracker) override { + // Not implemented well. + ASSERT(false); + drain_tracker(); + } + void add(const void* data, uint64_t size) override { FUZZ_ASSERT(start_ + size_ + size <= data_.size()); ::memcpy(mutableEnd(), data, size); diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 795a4416bc15..d622d6984e43 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -37,12 +37,21 @@ class OwnedImplTest : public testing::Test { static void expectSlices(std::vector> buffer_list, OwnedImpl& buffer) { const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_EQ(buffer_list.size(), buffer_slices.size()); for (uint64_t i = 0; i < buffer_slices.size(); i++) { EXPECT_EQ(buffer_slices[i].data, buffer_list[i][0]); EXPECT_EQ(buffer_slices[i].reservable, buffer_list[i][1]); EXPECT_EQ(buffer_slices[i].capacity, buffer_list[i][2]); } } + + static void expectFirstSlice(std::vector slice_description, OwnedImpl& buffer) { + const auto& buffer_slices = buffer.describeSlicesForTest(); + ASSERT_LE(1, buffer_slices.size()); + EXPECT_EQ(buffer_slices[0].data, slice_description[0]); + EXPECT_EQ(buffer_slices[0].reservable, slice_description[1]); + EXPECT_EQ(buffer_slices[0].capacity, slice_description[2]); + } }; TEST_F(OwnedImplTest, AddBufferFragmentNoCleanup) { @@ -80,6 +89,7 @@ TEST_F(OwnedImplTest, AddEmptyFragment) { BufferFragmentImpl frag2("", 0, [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + BufferFragmentImpl frag3(input, 11, [](const void*, size_t, const BufferFragmentImpl*) {}); Buffer::OwnedImpl buffer; buffer.addBufferFragment(frag1); EXPECT_EQ(11, buffer.length()); @@ -87,7 +97,18 @@ TEST_F(OwnedImplTest, AddEmptyFragment) { buffer.addBufferFragment(frag2); EXPECT_EQ(11, buffer.length()); - buffer.drain(11); + buffer.addBufferFragment(frag3); + EXPECT_EQ(22, buffer.length()); + + // Cover case of copying a buffer with an empty fragment. + Buffer::OwnedImpl buffer2; + buffer2.add(buffer); + + // Cover copyOut + std::unique_ptr outbuf(new char[buffer.length()]); + buffer.copyOut(0, buffer.length(), outbuf.get()); + + buffer.drain(22); EXPECT_EQ(0, buffer.length()); EXPECT_TRUE(release_callback_called_); } @@ -326,6 +347,282 @@ TEST_F(OwnedImplTest, Read) { EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); } +TEST_F(OwnedImplTest, DrainTracking) { + testing::InSequence s; + + Buffer::OwnedImpl buffer; + buffer.add("a"); + + testing::MockFunction tracker1; + testing::MockFunction tracker2; + buffer.addDrainTracker(tracker1.AsStdFunction()); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer.drain(buffer.length()); + done.Call(); +} + +TEST_F(OwnedImplTest, MoveDrainTrackersWhenTransferingSlices) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices().size()); + + buffer1.move(buffer2); + EXPECT_EQ(10002, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices().size()); + EXPECT_EQ(0, buffer2.getRawSlices().size()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); +} + +TEST_F(OwnedImplTest, MoveDrainTrackersWhenCopying) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer1.move(buffer2); + EXPECT_EQ(2, buffer1.length()); + EXPECT_EQ(0, buffer2.length()); + EXPECT_EQ(1, buffer1.getRawSlices().size()); + EXPECT_EQ(0, buffer2.getRawSlices().size()); + + buffer1.drain(1); + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(1); + done.Call(); +} + +TEST_F(OwnedImplTest, PartialMoveDrainTrackers) { + testing::InSequence s; + + Buffer::OwnedImpl buffer1; + buffer1.add("a"); + + testing::MockFunction tracker1; + buffer1.addDrainTracker(tracker1.AsStdFunction()); + + Buffer::OwnedImpl buffer2; + buffer2.add("b"); + + testing::MockFunction tracker2; + buffer2.addDrainTracker(tracker2.AsStdFunction()); + + buffer2.add(std::string(10000, 'c')); + testing::MockFunction tracker3; + buffer2.addDrainTracker(tracker3.AsStdFunction()); + EXPECT_EQ(2, buffer2.getRawSlices().size()); + + // Move the first slice and associated trackers and part of the second slice to buffer1. + buffer1.move(buffer2, 4999); + EXPECT_EQ(5000, buffer1.length()); + EXPECT_EQ(5002, buffer2.length()); + EXPECT_EQ(3, buffer1.getRawSlices().size()); + EXPECT_EQ(1, buffer2.getRawSlices().size()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + buffer1.drain(1); + + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + buffer1.drain(buffer1.length()); + done.Call(); + + // tracker3 remained in buffer2. + EXPECT_CALL(tracker3, Call()); + buffer2.drain(buffer2.length()); +} + +TEST_F(OwnedImplTest, DrainTrackingOnDestruction) { + testing::InSequence s; + + auto buffer = std::make_unique(); + buffer->add("a"); + + testing::MockFunction tracker; + buffer->addDrainTracker(tracker.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker, Call()); + EXPECT_CALL(done, Call()); + buffer.reset(); + done.Call(); +} + +TEST_F(OwnedImplTest, Linearize) { + Buffer::OwnedImpl buffer; + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer.addBufferFragment(frag); + + // Second slice with more data. + buffer.add(std::string(1000, 'b')); + + // Linearize does not change the pointer associated with the first slice if requested size is less + // than or equal to size of the first slice. + EXPECT_EQ(input.c_str(), buffer.linearize(input.size())); + EXPECT_FALSE(release_callback_called_); + + constexpr uint64_t LinearizeSize = 2000; + void* out_ptr = buffer.linearize(LinearizeSize); + EXPECT_TRUE(release_callback_called_); + EXPECT_EQ(input + std::string(1000, 'b'), + absl::string_view(reinterpret_cast(out_ptr), LinearizeSize)); +} + +TEST_F(OwnedImplTest, LinearizeEmptyBuffer) { + Buffer::OwnedImpl buffer; + EXPECT_EQ(nullptr, buffer.linearize(0)); +} + +TEST_F(OwnedImplTest, LinearizeSingleSlice) { + auto buffer = std::make_unique(); + + // Unowned slice to track when linearize kicks in. + std::string input(1000, 'a'); + BufferFragmentImpl frag( + input.c_str(), input.size(), + [this](const void*, size_t, const BufferFragmentImpl*) { release_callback_called_ = true; }); + buffer->addBufferFragment(frag); + + EXPECT_EQ(input.c_str(), buffer->linearize(buffer->length())); + EXPECT_FALSE(release_callback_called_); + + buffer.reset(); + EXPECT_TRUE(release_callback_called_); +} + +TEST_F(OwnedImplTest, LinearizeDrainTracking) { + constexpr uint32_t SmallChunk = 200; + constexpr uint32_t LargeChunk = 16384 - SmallChunk; + constexpr uint32_t LinearizeSize = SmallChunk + LargeChunk; + + // Create a buffer with a eclectic combination of buffer OwnedSlice and UnownedSlices that will + // help us explore the properties of linearize. + Buffer::OwnedImpl buffer; + + // Large add below the target linearize size. + testing::MockFunction tracker1; + buffer.add(std::string(LargeChunk, 'a')); + buffer.addDrainTracker(tracker1.AsStdFunction()); + + // Unowned slice which causes some fragmentation. + testing::MockFunction tracker2; + testing::MockFunction + release_callback_tracker; + std::string frag_input(2 * SmallChunk, 'b'); + BufferFragmentImpl frag(frag_input.c_str(), frag_input.size(), + release_callback_tracker.AsStdFunction()); + buffer.addBufferFragment(frag); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + // And an unowned slice with 0 size, because. + testing::MockFunction tracker3; + testing::MockFunction + release_callback_tracker2; + BufferFragmentImpl frag2(nullptr, 0, release_callback_tracker2.AsStdFunction()); + buffer.addBufferFragment(frag2); + buffer.addDrainTracker(tracker3.AsStdFunction()); + + // Add a very large chunk + testing::MockFunction tracker4; + buffer.add(std::string(LargeChunk + LinearizeSize, 'c')); + buffer.addDrainTracker(tracker4.AsStdFunction()); + + // Small adds that create no gaps. + testing::MockFunction tracker5; + for (int i = 0; i < 105; ++i) { + buffer.add(std::string(SmallChunk, 'd')); + } + buffer.addDrainTracker(tracker5.AsStdFunction()); + + expectSlices({{16184, 136, 16320}, + {400, 0, 400}, + {0, 0, 0}, + {32704, 0, 32704}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {4032, 0, 4032}, + {704, 3328, 4032}}, + buffer); + + testing::InSequence s; + testing::MockFunction drain_tracker; + testing::MockFunction done_tracker; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(release_callback_tracker, Call(_, _, _)); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384)); + EXPECT_CALL(release_callback_tracker2, Call(_, _, _)); + EXPECT_CALL(tracker3, Call()); + EXPECT_CALL(tracker4, Call()); + EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384)); + EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384)); + EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384)); + EXPECT_CALL(tracker5, Call()); + EXPECT_CALL(drain_tracker, Call(4616, 4616)); + EXPECT_CALL(done_tracker, Call()); + for (auto& expected_first_slice : std::vector>{{16584, 3832, 20416}, + {32904, 3896, 36800}, + {16520, 3896, 36800}, + {20296, 120, 20416}, + {4616, 3512, 8128}}) { + const uint32_t write_size = std::min(LinearizeSize, buffer.length()); + buffer.linearize(write_size); + expectFirstSlice(expected_first_slice, buffer); + drain_tracker.Call(buffer.length(), write_size); + buffer.drain(write_size); + } + done_tracker.Call(); + + expectSlices({}, buffer); +} + TEST_F(OwnedImplTest, ReserveCommit) { // This fragment will later be added to the buffer. It is declared in an enclosing scope to // ensure it is not destructed until after the buffer is. @@ -377,12 +674,12 @@ TEST_F(OwnedImplTest, ReserveCommit) { // Request a reservation that too big to fit in the existing slices. This should result // in the creation of a third slice. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.reserve(4096 - sizeof(OwnedSlice), iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}}, buffer); const void* slice2 = iovecs[1].mem_; num_reserved = buffer.reserve(8192, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {0, 4056, 4056}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {0, 4032, 4032}, {0, 4032, 4032}}, buffer); EXPECT_EQ(3, num_reserved); EXPECT_EQ(slice1, iovecs[0].mem_); EXPECT_EQ(slice2, iovecs[1].mem_); @@ -391,11 +688,11 @@ TEST_F(OwnedImplTest, ReserveCommit) { // Append a fragment to the buffer, and then request a small reservation. The buffer // should make a new slice to satisfy the reservation; it cannot safely use any of // the previously seen slices, because they are no longer at the end of the buffer. - expectSlices({{1, 4055, 4056}}, buffer); + expectSlices({{1, 4031, 4032}}, buffer); buffer.addBufferFragment(fragment); EXPECT_EQ(13, buffer.length()); num_reserved = buffer.reserve(1, iovecs, NumIovecs); - expectSlices({{1, 4055, 4056}, {12, 0, 12}, {0, 4056, 4056}}, buffer); + expectSlices({{1, 4031, 4032}, {12, 0, 12}, {0, 4032, 4032}}, buffer); EXPECT_EQ(1, num_reserved); EXPECT_NE(slice1, iovecs[0].mem_); commitReservation(iovecs, num_reserved, buffer); @@ -426,16 +723,16 @@ TEST_F(OwnedImplTest, ReserveCommitReuse) { EXPECT_EQ(2, num_reserved); const void* first_slice = iovecs[0].mem_; iovecs[0].len_ = 1; - expectSlices({{8000, 4248, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8000, 4224, 12224}, {0, 12224, 12224}}, buffer); buffer.commit(iovecs, 1); EXPECT_EQ(8001, buffer.length()); EXPECT_EQ(first_slice, iovecs[0].mem_); // The second slice is now released because there's nothing in the second slice. - expectSlices({{8001, 4247, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}}, buffer); // Reserve 16KB again. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); - expectSlices({{8001, 4247, 12248}, {0, 12248, 12248}}, buffer); + expectSlices({{8001, 4223, 12224}, {0, 12224, 12224}}, buffer); EXPECT_EQ(2, num_reserved); EXPECT_EQ(static_cast(first_slice) + 1, static_cast(iovecs[0].mem_)); @@ -462,7 +759,7 @@ TEST_F(OwnedImplTest, ReserveReuse) { EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}}, buffer); // Request a larger reservation, verify that the second entry is replaced with a block with a // larger size. @@ -470,51 +767,51 @@ TEST_F(OwnedImplTest, ReserveReuse) { const void* third_slice = iovecs[1].mem_; EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeating a the reservation request for a smaller block returns the previous entry. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); EXPECT_EQ(second_slice, iovecs[1].mem_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}}, buffer); // Repeat the larger reservation notice that it doesn't match the prior reservation for 30000 // bytes. num_reserved = buffer.reserve(30000, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); EXPECT_EQ(first_slice, iovecs[0].mem_); - EXPECT_EQ(12248, iovecs[0].len_); + EXPECT_EQ(12224, iovecs[0].len_); EXPECT_NE(second_slice, iovecs[1].mem_); EXPECT_NE(third_slice, iovecs[1].mem_); EXPECT_EQ(30000 - iovecs[0].len_, iovecs[1].len_); - expectSlices({{0, 12248, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {0, 20440, 20440}}, buffer); + expectSlices({{0, 12224, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {0, 20416, 20416}}, buffer); // Commit the most recent reservation and verify the representation. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, {0, 8152, 8152}, {0, 20440, 20440}, {17752, 2688, 20440}}, + expectSlices({{12224, 0, 12224}, {0, 8128, 8128}, {0, 20416, 20416}, {17776, 2640, 20416}}, buffer); // Do another reservation. num_reserved = buffer.reserve(16384, iovecs, NumIovecs); EXPECT_EQ(2, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {17752, 2688, 20440}, - {0, 16344, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {17776, 2640, 20416}, + {0, 16320, 16320}}, buffer); // And commit. buffer.commit(iovecs, num_reserved); - expectSlices({{12248, 0, 12248}, - {0, 8152, 8152}, - {0, 20440, 20440}, - {20440, 0, 20440}, - {13696, 2648, 16344}}, + expectSlices({{12224, 0, 12224}, + {0, 8128, 8128}, + {0, 20416, 20416}, + {20416, 0, 20416}, + {13744, 2576, 16320}}, buffer); } @@ -671,7 +968,7 @@ TEST_F(OwnedImplTest, ReserveZeroCommit) { ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length)); EXPECT_EQ("bbbbb", buf.toString().substr(0, 5)); - expectSlices({{5, 0, 4056}, {1953, 2103, 4056}}, buf); + expectSlices({{5, 0, 4032}, {1953, 2079, 4032}}, buf); } TEST_F(OwnedImplTest, ReadReserveAndCommit) { @@ -698,7 +995,7 @@ TEST_F(OwnedImplTest, ReadReserveAndCommit) { ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); EXPECT_EQ("bbbbbe", buf.toString()); - expectSlices({{6, 4050, 4056}}, buf); + expectSlices({{6, 4026, 4032}}, buf); } TEST(OverflowDetectingUInt64, Arithmetic) { diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 400c4f564ce7..82e842be6efe 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1715,7 +1715,10 @@ TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { // Verify that outbound control frame counter decreases when send buffer is drained TEST_P(Http2CodecImplTest, PingFloodCounterReset) { - static const int kMaxOutboundControlFrames = 100; + // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min + // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows + // the logic below that verifies drain and overflow thresholds. + static const int kMaxOutboundControlFrames = 475; max_outbound_control_frames_ = kMaxOutboundControlFrames; initialize(); @@ -1740,16 +1743,17 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { EXPECT_NO_THROW(client_->sendPendingFrames()); EXPECT_EQ(ack_count, kMaxOutboundControlFrames); - // Drain kMaxOutboundFrames / 2 slices from the send buffer + // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer buffer.drain(buffer.length() / 2); - // Send kMaxOutboundFrames / 2 more pings. + // Send floor(kMaxOutboundFrames / 2) more pings. for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) { EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); } // The number of outbound frames should be half of max so the connection should not be // terminated. EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); From 57c425f861bf2bee11b44c265c81d8933f7eca0e Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 16 Jun 2020 15:13:09 -0600 Subject: [PATCH 491/909] http2: fix stream flush timeout race with protocol error (#181) Fixes https://github.com/envoyproxy/envoy-setec/issues/180 Signed-off-by: Matt Klein --- .../configuration/best_practices/edge.rst | 3 +- docs/root/version_history/current.rst | 6 ++++ docs/root/version_history/v1.12.5.rst | 11 ++++++ docs/root/version_history/v1.13.3.rst | 12 +++++++ docs/root/version_history/v1.14.3.rst | 11 ++++++ docs/root/version_history/version_history.rst | 3 ++ source/common/http/http2/codec_impl.cc | 16 +++++---- source/common/http/http2/codec_impl.h | 7 ++++ test/common/http/http2/codec_impl_test.cc | 35 +++++++++++++++++++ .../alts/alts_integration_test.cc | 2 +- .../drain_close_integration_test.cc | 2 +- test/integration/http_integration.cc | 2 +- 12 files changed, 99 insertions(+), 11 deletions(-) create mode 100644 docs/root/version_history/v1.12.5.rst create mode 100644 docs/root/version_history/v1.13.3.rst create mode 100644 docs/root/version_history/v1.14.3.rst diff --git a/docs/root/configuration/best_practices/edge.rst b/docs/root/configuration/best_practices/edge.rst index d9b4f440afbd..fc717a5f9235 100644 --- a/docs/root/configuration/best_practices/edge.rst +++ b/docs/root/configuration/best_practices/edge.rst @@ -30,8 +30,7 @@ HTTP proxies should additionally configure: The following is a YAML example of the above recommendation (taken from the :ref:`Google VRP ` edge server configuration): -.. literalinclude:: envoy-edge.yaml - :language: yaml +.. code-block:: yaml overload_manager: refresh_interval: 0.25s diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 3ad224ad65ee..5710fa2114fb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -39,10 +39,16 @@ Bug Fixes * adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the start of the new minRTT window. +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. * grpc-json: fix a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. * prometheus stats: fix the sort order of output lines to comply with the standard. * udp: the :ref:`reuse_port ` listener option must now be specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a diff --git a/docs/root/version_history/v1.12.5.rst b/docs/root/version_history/v1.12.5.rst new file mode 100644 index 000000000000..b246e20d885b --- /dev/null +++ b/docs/root/version_history/v1.12.5.rst @@ -0,0 +1,11 @@ +1.12.5 (June 30, 2020) +====================== + +Changes +------- +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.13.3.rst b/docs/root/version_history/v1.13.3.rst new file mode 100644 index 000000000000..6002a62c496b --- /dev/null +++ b/docs/root/version_history/v1.13.3.rst @@ -0,0 +1,12 @@ +1.13.3 (June 30, 2020) +====================== + +Changes +------- + +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/v1.14.3.rst b/docs/root/version_history/v1.14.3.rst new file mode 100644 index 000000000000..8a3a3d91da08 --- /dev/null +++ b/docs/root/version_history/v1.14.3.rst @@ -0,0 +1,11 @@ +1.14.3 (June 30, 2020) +====================== + +Changes +------- +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 527dec86ca8d..2d7744bf2310 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,12 +7,15 @@ Version history :titlesonly: current + v1.14.3 v1.14.2 v1.14.1 v1.14.0 + v1.13.3 v1.13.2 v1.13.1 v1.13.0 + v1.12.5 v1.12.4 v1.12.3 v1.12.2 diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 6c56463d4e76..532831198760 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -105,12 +105,7 @@ ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_l ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } void ConnectionImpl::StreamImpl::destroy() { - if (stream_idle_timer_ != nullptr) { - // To ease testing and the destructor assertion. - stream_idle_timer_->disableTimer(); - stream_idle_timer_.reset(); - } - + disarmStreamIdleTimer(); parent_.stats_.streams_active_.dec(); parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); } @@ -733,6 +728,15 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { case NGHTTP2_GOAWAY: { ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } return NGHTTP2_ERR_CALLBACK_FAILURE; } break; diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index c977299b0174..895e8d21c088 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -173,6 +173,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggabledisableTimer(); + stream_idle_timer_.reset(); + } + } StreamImpl* base() { return this; } ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 82e842be6efe..f6005c0808ff 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1121,6 +1121,41 @@ TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); } +// Verify that when an incoming protocol error races with a stream flush timeout we correctly +// disable the flush timeout and do not attempt to reset the stream. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Force a protocol error. + Buffer::OwnedImpl garbage_data("this should cause a protocol error"); + EXPECT_CALL(client_callbacks_, onGoAway(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + auto status = server_wrapper_.dispatch(garbage_data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { initialize(); MockStreamCallbacks callbacks; diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index 42eb79d56e3a..eca7bbf6f7a0 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -332,7 +332,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, AltsIntegrationTestCapturingHandshaker, // Verifies that handshake request should include ALTS version. TEST_P(AltsIntegrationTestCapturingHandshaker, CheckAltsVersion) { initialize(); - codec_client_ = makeRawHttpConnection(makeAltsConnection()); + codec_client_ = makeRawHttpConnection(makeAltsConnection(), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); EXPECT_EQ(capturing_handshaker_service_->client_versions.max_rpc_version().major(), capturing_handshaker_service_->server_versions.max_rpc_version().major()); diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index aa0afd8d141b..a702e94068d8 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -116,7 +116,7 @@ TEST_P(DrainCloseIntegrationTest, AdminGracefulDrain) { } // New connections can still be made. - auto second_codec_client_ = makeRawHttpConnection(makeClientConnection(http_port)); + auto second_codec_client_ = makeRawHttpConnection(makeClientConnection(http_port), absl::nullopt); EXPECT_TRUE(second_codec_client_->connected()); // Invoke /drain_listeners and shut down listeners. diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 7d27ae6d0e45..abec325faaec 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -988,7 +988,7 @@ void HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_he auto response = std::move(encoder_decoder.second); if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { - codec_client_->waitForDisconnect(); + ASSERT_TRUE(codec_client_->waitForDisconnect()); EXPECT_TRUE(response->complete()); EXPECT_EQ("431", response->headers().Status()->value().getStringView()); } else { From edb83c759552d46aacaef7d11c7251ca3e8f29d2 Mon Sep 17 00:00:00 2001 From: Erik Bos Date: Tue, 30 Jun 2020 21:09:49 +0200 Subject: [PATCH 492/909] docs: fix description of cluster dns resolution counters (#11624) Signed-off-by: Erik Bos --- .../upstream/cluster_manager/cluster_stats.rst | 6 +++--- .../root/intro/arch_overview/upstream/service_discovery.rst | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index ffe1516bccb5..8fe03e0b55a4 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -89,9 +89,9 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi membership_total, Gauge, Current cluster membership total retry_or_shadow_abandoned, Counter, Total number of times shadowing or retry buffering was canceled due to buffer limits config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total cluster membership update attempts - update_success, Counter, Total cluster membership update successes - update_failure, Counter, Total cluster membership update failures + update_attempt, Counter, Total attempted cluster membership updates by service discovery + update_success, Counter, Total successful cluster membership updates by service discovery + update_failure, Counter, Total failed cluster membership updates by service discovery update_empty, Counter, Total cluster membership updates ending with empty cluster load assignment and continuing with previous config update_no_rebuild, Counter, Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuilds version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/intro/arch_overview/upstream/service_discovery.rst b/docs/root/intro/arch_overview/upstream/service_discovery.rst index 5fd032b57312..ee3c6c4db13e 100644 --- a/docs/root/intro/arch_overview/upstream/service_discovery.rst +++ b/docs/root/intro/arch_overview/upstream/service_discovery.rst @@ -48,6 +48,8 @@ will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. +DNS resolving emits :ref:`cluster statistics ` fields *update_attempt*, *update_success* and *update_failure*. + .. _arch_overview_service_discovery_types_logical_dns: Logical DNS @@ -77,6 +79,8 @@ will be used as the cluster's DNS refresh rate. :ref:`dns_refresh_rate ` controls the refresh frequency during failures, and, if not configured, the DNS refresh rate will be used. +DNS resolving emits :ref:`cluster statistics ` fields *update_attempt*, *update_success* and *update_failure*. + .. _arch_overview_service_discovery_types_original_destination: Original destination From 13ed52dcdf3f2d3449e0bab2806f7b98af795b3e Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 30 Jun 2020 13:10:29 -0600 Subject: [PATCH 493/909] test: add explicit test for custom registered headers (#11798) Signed-off-by: Matt Klein --- include/envoy/http/header_map.h | 1 + test/common/http/header_map_impl_test.cc | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 2bd77a2f6bfc..82a79d4fa6e5 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -605,6 +605,7 @@ class CustomInlineHeaderRegistry { // between concrete header map types. template struct Handle { Handle(RegistrationMap::const_iterator it) : it_(it) {} + bool operator==(const Handle& rhs) const { return it_ == rhs.it_; } RegistrationMap::const_iterator it_; }; diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index a9223f629046..6e0eac4b19dc 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -354,6 +354,23 @@ TEST(HeaderStringTest, All) { } } +Http::RegisterCustomInlineHeader + custom_header_1(Http::LowerCaseString{"foo_custom_header"}); +Http::RegisterCustomInlineHeader + custom_header_1_copy(Http::LowerCaseString{"foo_custom_header"}); + +// Make sure that the same header registered twice points to the same location. +TEST(HeaderMapImplTest, CustomRegisteredHeaders) { + TestRequestHeaderMapImpl headers; + EXPECT_EQ(custom_header_1.handle(), custom_header_1_copy.handle()); + EXPECT_EQ(nullptr, headers.getInline(custom_header_1.handle())); + EXPECT_EQ(nullptr, headers.getInline(custom_header_1_copy.handle())); + headers.setInline(custom_header_1.handle(), 42); + EXPECT_EQ("42", headers.getInlineValue(custom_header_1_copy.handle())); + EXPECT_EQ("foo_custom_header", + headers.getInline(custom_header_1.handle())->key().getStringView()); +} + #define TEST_INLINE_HEADER_FUNCS(name) \ header_map->addCopy(Headers::get().name, #name); \ EXPECT_EQ(header_map->name()->value().getStringView(), #name); \ From 078fd7d3c08e416991c8f2543e910b84f79b4c1d Mon Sep 17 00:00:00 2001 From: Ariane van der Steldt Date: Tue, 30 Jun 2020 20:13:39 +0100 Subject: [PATCH 494/909] Fix header-to-metadata if clause (#11747) The else-clause would trip when the on-header-present block was absent, irrespective of if the header was actually absent. Signed-off-by: Ariane van der Steldt --- .../header_to_metadata_filter.cc | 3 ++- .../header_to_metadata_filter_test.cc | 21 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index e96cbb55a53c..f9c060960eb9 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -210,7 +210,8 @@ void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, if (proto_rule.remove()) { headers.remove(header); } - } else if (proto_rule.has_on_header_missing()) { + } + if (header_entry == nullptr && proto_rule.has_on_header_missing()) { // Add metadata for the header missing case. const auto& keyval = proto_rule.on_header_missing(); diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index 906475013f93..cf09a67ae718 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -522,6 +522,27 @@ TEST_F(HeaderToMetadataTest, RegexSubstitution) { } } +/** + * Missing case is not executed when header is present. + */ +TEST_F(HeaderToMetadataTest, NoMissingWhenHeaderIsPresent) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", "19"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions From a9ae3b47892170b398455a5975800faba08fcc17 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 30 Jun 2020 14:31:49 -0600 Subject: [PATCH 495/909] redis: update to use inclusive language (#11800) Part of https://github.com/envoyproxy/envoy/issues/11596 Signed-off-by: Matt Klein --- .../config/cluster/redis/redis_cluster.proto | 2 +- .../network/redis_proxy/v2/redis_proxy.proto | 12 +- .../clusters/redis/v3/redis_cluster.proto | 2 +- .../network/redis_proxy/v3/redis_proxy.proto | 17 +-- .../arch_overview/intro/threading_model.rst | 2 +- .../arch_overview/other_protocols/redis.rst | 4 +- .../config/cluster/redis/redis_cluster.proto | 2 +- .../network/redis_proxy/v2/redis_proxy.proto | 12 +- .../clusters/redis/v3/redis_cluster.proto | 2 +- .../network/redis_proxy/v3/redis_proxy.proto | 17 +-- .../clusters/redis/redis_cluster.cc | 12 +- .../extensions/clusters/redis/redis_cluster.h | 14 +- .../clusters/redis/redis_cluster_lb.cc | 32 ++--- .../clusters/redis/redis_cluster_lb.h | 18 +-- .../filters/network/common/redis/client.h | 2 +- .../network/common/redis/client_impl.cc | 8 +- .../extensions/health_checkers/redis/redis.h | 2 +- .../redis/redis_cluster_integration_test.cc | 44 +++--- .../clusters/redis/redis_cluster_lb_test.cc | 90 ++++++------- .../clusters/redis/redis_cluster_test.cc | 126 +++++++++--------- .../network/common/redis/client_impl_test.cc | 8 +- .../redis_proxy/conn_pool_impl_test.cc | 2 +- 22 files changed, 216 insertions(+), 214 deletions(-) diff --git a/api/envoy/config/cluster/redis/redis_cluster.proto b/api/envoy/config/cluster/redis/redis_cluster.proto index b1872501e8eb..abe88f76a6ff 100644 --- a/api/envoy/config/cluster/redis/redis_cluster.proto +++ b/api/envoy/config/cluster/redis/redis_cluster.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index caca630fd297..948d7c349ff0 100644 --- a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -34,10 +34,10 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. + // Default mode. Read from the current primary node. MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. + // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1; // Read from replica nodes. If multiple replica nodes are present within a shard, a random @@ -45,11 +45,11 @@ message RedisProxy { REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -112,7 +112,7 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto index cf01359e55ab..afc19777edf2 100644 --- a/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/api/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 658ac1c16b8c..af69d33a6340 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -38,22 +39,22 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; + // Default mode. Read from the current primary node. + MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; + // Read from the primary, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -116,7 +117,7 @@ message RedisProxy { // count. These commands are measured in microseconds. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/docs/root/intro/arch_overview/intro/threading_model.rst b/docs/root/intro/arch_overview/intro/threading_model.rst index 7b86791a4a4a..ca83cb92e92c 100644 --- a/docs/root/intro/arch_overview/intro/threading_model.rst +++ b/docs/root/intro/arch_overview/intro/threading_model.rst @@ -3,7 +3,7 @@ Threading model =============== -Envoy uses a single process with multiple threads architecture. A single *master* thread controls +Envoy uses a single process with multiple threads architecture. A single *primary* thread controls various sporadic coordination tasks while some number of *worker* threads perform listening, filtering, and forwarding. Once a connection is accepted by a listener, the connection spends the rest of its lifetime bound to a single worker thread. This allows the majority of Envoy to be diff --git a/docs/root/intro/arch_overview/other_protocols/redis.rst b/docs/root/intro/arch_overview/other_protocols/redis.rst index deebe8ce7f47..5c670dea5d45 100644 --- a/docs/root/intro/arch_overview/other_protocols/redis.rst +++ b/docs/root/intro/arch_overview/other_protocols/redis.rst @@ -78,7 +78,7 @@ Envoy proxy tracks the topology of the cluster by sending periodic following information: * List of known nodes. -* The masters for each shard. +* The primaries for each shard. * Nodes entering or leaving the cluster. For topology configuration details, see the Redis Cluster @@ -106,7 +106,7 @@ Per-cluster command statistics can be enabled via the setting :ref:`enable_comma upstream_commands.[command].failure, Counter, Total number of failed or cancelled requests for a specific Redis command upstream_commands.[command].total, Counter, Total number of requests for a specific Redis command (sum of success and failure) upstream_commands.[command].latency, Histogram, Latency of requests for a specific Redis command - + Supported commands ------------------ diff --git a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto index b1872501e8eb..abe88f76a6ff 100644 --- a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto index caca630fd297..948d7c349ff0 100644 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -34,10 +34,10 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. + // Default mode. Read from the current primary node. MASTER = 0; - // Read from the master, but if it is unavailable, read from replica nodes. + // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1; // Read from replica nodes. If multiple replica nodes are present within a shard, a random @@ -45,11 +45,11 @@ message RedisProxy { REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -112,7 +112,7 @@ message RedisProxy { // count. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto index cf01359e55ab..afc19777edf2 100644 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its master fails over to a replica, and designates it as the new master). +// shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 098f5f4a2ea9..8f996c30f9ae 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -8,6 +8,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -38,22 +39,22 @@ message RedisProxy { // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { - // Default mode. Read from the current master node. - MASTER = 0; + // Default mode. Read from the current primary node. + MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - // Read from the master, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; + // Read from the primary, but if it is unavailable, read from replica nodes. + PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the master. + // present or unhealthy), read from the primary. PREFER_REPLICA = 3; - // Read from any node of the cluster. A random node is selected among the master and replicas, - // healthy nodes have precedent over unhealthy nodes. + // Read from any node of the cluster. A random node is selected among the primary and + // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } @@ -116,7 +117,7 @@ message RedisProxy { // count. These commands are measured in microseconds. bool enable_command_stats = 8; - // Read policy. The default is to read from the master. + // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 98e3763b9808..e4a07f005082 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -96,7 +96,7 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { Upstream::HostVector new_hosts; for (const ClusterSlot& slot : *slots) { - new_hosts.emplace_back(new RedisHost(info(), "", slot.master(), *this, true)); + new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true)); for (auto const& replica : slot.replicas()) { new_hosts.emplace_back(new RedisHost(info(), "", replica, *this, false)); } @@ -302,7 +302,7 @@ void RedisCluster::RedisDiscoverySession::onResponse( const uint32_t SlotRangeStart = 0; const uint32_t SlotRangeEnd = 1; - const uint32_t SlotMaster = 2; + const uint32_t SlotPrimary = 2; const uint32_t SlotReplicaStart = 3; // Do nothing if the cluster is empty. @@ -331,15 +331,15 @@ void RedisCluster::RedisDiscoverySession::onResponse( return; } - // Field 2: Master address for slot range - auto master_address = ProcessCluster(slot_range[SlotMaster]); - if (!master_address) { + // Field 2: Primary address for slot range + auto primary_address = ProcessCluster(slot_range[SlotPrimary]); + if (!primary_address) { onUnexpectedResponse(value); return; } slots->emplace_back(slot_range[SlotRangeStart].asInteger(), - slot_range[SlotRangeEnd].asInteger(), master_address); + slot_range[SlotRangeEnd].asInteger(), primary_address); for (auto replica = std::next(slot_range.begin(), SlotReplicaStart); replica != slot_range.end(); ++replica) { diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index ce0c32dd800e..b6b0c1643cea 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -144,7 +144,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { class RedisHost : public Upstream::HostImpl { public: RedisHost(Upstream::ClusterInfoConstSharedPtr cluster, const std::string& hostname, - Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool master) + Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool primary) : Upstream::HostImpl( cluster, hostname, address, // TODO(zyfjeff): Created through metadata shared pool @@ -153,12 +153,12 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { parent.localityLbEndpoint().locality(), parent.lbEndpoint().endpoint().health_check_config(), parent.localityLbEndpoint().priority(), parent.lbEndpoint().health_status()), - master_(master) {} + primary_(primary) {} - bool isMaster() const { return master_; } + bool isPrimary() const { return primary_; } private: - const bool master_; + const bool primary_; }; // Resolves the discovery endpoint. @@ -221,12 +221,12 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return buffer_timeout_; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } - // For any readPolicy other than Master, the RedisClientFactory will send a READONLY command + // For any readPolicy other than Primary, the RedisClientFactory will send a READONLY command // when establishing a new connection. Since we're only using this for making the "cluster // slots" commands, the READONLY command is not relevant in this context. We're setting it to - // Master to avoid the additional READONLY command. + // Primary to avoid the additional READONLY command. Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override { - return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Master; + return Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy::Primary; } // Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks diff --git a/source/extensions/clusters/redis/redis_cluster_lb.cc b/source/extensions/clusters/redis/redis_cluster_lb.cc index 631f061756cd..99f76013f60e 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.cc +++ b/source/extensions/clusters/redis/redis_cluster_lb.cc @@ -6,7 +6,7 @@ namespace Clusters { namespace Redis { bool ClusterSlot::operator==(const Envoy::Extensions::Clusters::Redis::ClusterSlot& rhs) const { - return start_ == rhs.start_ && end_ == rhs.end_ && master_ == rhs.master_ && + return start_ == rhs.start_ && end_ == rhs.end_ && primary_ == rhs.primary_ && replicas_ == rhs.replicas_; } @@ -30,28 +30,28 @@ bool RedisClusterLoadBalancerFactory::onClusterSlotUpdate(ClusterSlotsPtr&& slot for (const ClusterSlot& slot : *slots) { // look in the updated map - const std::string master_address = slot.master()->asString(); + const std::string primary_address = slot.primary()->asString(); - auto result = shards.try_emplace(master_address, shard_vector->size()); + auto result = shards.try_emplace(primary_address, shard_vector->size()); if (result.second) { - auto master_host = all_hosts.find(master_address); - ASSERT(master_host != all_hosts.end(), + auto primary_host = all_hosts.find(primary_address); + ASSERT(primary_host != all_hosts.end(), "we expect all address to be found in the updated_hosts"); - Upstream::HostVectorSharedPtr master_and_replicas = std::make_shared(); + Upstream::HostVectorSharedPtr primary_and_replicas = std::make_shared(); Upstream::HostVectorSharedPtr replicas = std::make_shared(); - master_and_replicas->push_back(master_host->second); + primary_and_replicas->push_back(primary_host->second); for (auto const& replica : slot.replicas()) { auto replica_host = all_hosts.find(replica->asString()); ASSERT(replica_host != all_hosts.end(), "we expect all address to be found in the updated_hosts"); replicas->push_back(replica_host->second); - master_and_replicas->push_back(replica_host->second); + primary_and_replicas->push_back(replica_host->second); } shard_vector->emplace_back( - std::make_shared(master_host->second, replicas, master_and_replicas)); + std::make_shared(primary_host->second, replicas, primary_and_replicas)); } for (auto i = slot.start(); i <= slot.end(); ++i) { @@ -84,7 +84,7 @@ void RedisClusterLoadBalancerFactory::onHostHealthUpdate() { for (auto const& shard : *current_shard_vector) { shard_vector->emplace_back(std::make_shared( - shard->master(), shard->replicas().hostsPtr(), shard->allHosts().hostsPtr())); + shard->primary(), shard->replicas().hostsPtr(), shard->allHosts().hostsPtr())); } { @@ -138,11 +138,11 @@ Upstream::HostConstSharedPtr RedisClusterLoadBalancerFactory::RedisClusterLoadBa auto redis_context = dynamic_cast(context); if (redis_context && redis_context->isReadCommand()) { switch (redis_context->readPolicy()) { - case NetworkFilters::Common::Redis::Client::ReadPolicy::Master: - return shard->master(); - case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster: - if (shard->master()->health() == Upstream::Host::Health::Healthy) { - return shard->master(); + case NetworkFilters::Common::Redis::Client::ReadPolicy::Primary: + return shard->primary(); + case NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary: + if (shard->primary()->health() == Upstream::Host::Health::Healthy) { + return shard->primary(); } else { return chooseRandomHost(shard->allHosts(), random_); } @@ -158,7 +158,7 @@ Upstream::HostConstSharedPtr RedisClusterLoadBalancerFactory::RedisClusterLoadBa return chooseRandomHost(shard->allHosts(), random_); } } - return shard->master(); + return shard->primary(); } bool RedisLoadBalancerContextImpl::isReadRequest( diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index 19d27cda3d0f..2c0fedf7394f 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -31,12 +31,12 @@ static const uint64_t MaxSlot = 16384; class ClusterSlot { public: - ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr master) - : start_(start), end_(end), master_(std::move(master)) {} + ClusterSlot(int64_t start, int64_t end, Network::Address::InstanceConstSharedPtr primary) + : start_(start), end_(end), primary_(std::move(primary)) {} int64_t start() const { return start_; } int64_t end() const { return end_; } - Network::Address::InstanceConstSharedPtr master() const { return master_; } + Network::Address::InstanceConstSharedPtr primary() const { return primary_; } const absl::flat_hash_set& replicas() const { return replicas_; } @@ -49,7 +49,7 @@ class ClusterSlot { private: int64_t start_; int64_t end_; - Network::Address::InstanceConstSharedPtr master_; + Network::Address::InstanceConstSharedPtr primary_; absl::flat_hash_set replicas_; }; @@ -82,7 +82,7 @@ class RedisLoadBalancerContextImpl : public RedisLoadBalancerContext, bool is_redis_cluster, const NetworkFilters::Common::Redis::RespValue& request, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy = - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); // Upstream::LoadBalancerContextBase absl::optional computeHashKey() override { return hash_key_; } @@ -143,9 +143,9 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, private: class RedisShard { public: - RedisShard(Upstream::HostConstSharedPtr master, Upstream::HostVectorConstSharedPtr replicas, + RedisShard(Upstream::HostConstSharedPtr primary, Upstream::HostVectorConstSharedPtr replicas, Upstream::HostVectorConstSharedPtr all_hosts) - : master_(std::move(master)) { + : primary_(std::move(primary)) { replicas_.updateHosts(Upstream::HostSetImpl::partitionHosts( std::move(replicas), Upstream::HostsPerLocalityImpl::empty()), nullptr, {}, {}); @@ -153,12 +153,12 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, std::move(all_hosts), Upstream::HostsPerLocalityImpl::empty()), nullptr, {}, {}); } - const Upstream::HostConstSharedPtr master() const { return master_; } + const Upstream::HostConstSharedPtr primary() const { return primary_; } const Upstream::HostSetImpl& replicas() const { return replicas_; } const Upstream::HostSetImpl& allHosts() const { return all_hosts_; } private: - const Upstream::HostConstSharedPtr master_; + const Upstream::HostConstSharedPtr primary_; Upstream::HostSetImpl replicas_{0, absl::nullopt}; Upstream::HostSetImpl all_hosts_{0, absl::nullopt}; }; diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h index 147abbcebf24..f0c573f92f82 100644 --- a/source/extensions/filters/network/common/redis/client.h +++ b/source/extensions/filters/network/common/redis/client.h @@ -114,7 +114,7 @@ using ClientPtr = std::unique_ptr; /** * Read policy to use for Redis cluster. */ -enum class ReadPolicy { Master, PreferMaster, Replica, PreferReplica, Any }; +enum class ReadPolicy { Primary, PreferPrimary, Replica, PreferReplica, Any }; /** * Configuration for a redis connection pool. diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc index 55c99ba05288..6cdc7b8ad007 100644 --- a/source/extensions/filters/network/common/redis/client_impl.cc +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -31,11 +31,11 @@ ConfigImpl::ConfigImpl( enable_command_stats_(config.enable_command_stats()) { switch (config.read_policy()) { case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::MASTER: - read_policy_ = ReadPolicy::Master; + read_policy_ = ReadPolicy::Primary; break; case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings:: PREFER_MASTER: - read_policy_ = ReadPolicy::PreferMaster; + read_policy_ = ReadPolicy::PreferPrimary; break; case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA: read_policy_ = ReadPolicy::Replica; @@ -300,9 +300,9 @@ void ClientImpl::initialize(const std::string& auth_username, const std::string& makeRequest(auth_request, null_pool_callbacks); } // Any connection to replica requires the READONLY command in order to perform read. - // Also the READONLY command is a no-opt for the master. + // Also the READONLY command is a no-opt for the primary. // We only need to send the READONLY command iff it's possible that the host is a replica. - if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Master) { + if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Primary) { makeRequest(Utility::ReadOnlyRequest::instance(), null_pool_callbacks); } } diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 6284c475eda5..211ff6a9d555 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -75,7 +75,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return true; } // Redirection errors are treated as check successes. NetworkFilters::Common::Redis::Client::ReadPolicy readPolicy() const override { - return NetworkFilters::Common::Redis::Client::ReadPolicy::Master; + return NetworkFilters::Common::Redis::Client::ReadPolicy::Primary; } // Batching diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 693ecc517854..02a1a1a4ce40 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -294,13 +294,13 @@ class RedisClusterIntegrationTest : public testing::TestWithParamaddressAsString(), master->port()) + << makeIp(primary->addressAsString(), primary->port()) << makeIp(replica->addressAsString(), replica->port()); return resp.str(); @@ -317,8 +317,8 @@ class RedisClusterIntegrationTest : public testing::TestWithParamlocalAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -452,7 +452,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -460,7 +460,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { initialize(); // foo hashes to slot 12182 which the proxy believes is at the server reachable via - // fake_upstreams_[0], based on the singleSlotMasterReplica() response above. + // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above. std::string request = makeBulkStringArray({"get", "foo"}); // The actual moved redirection error that redirects to the fake_upstreams_[1] server. std::string redirection_response = @@ -514,30 +514,30 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { // This test sends simple "set foo" and "get foo" command from a fake // downstream client through the proxy to a fake upstream -// Redis cluster with a single slot with master and replica. +// Redis cluster with a single slot with primary and replica. // The envoy proxy is set with read_policy to read from replica, the expected result -// is that the set command will be sent to the master and the get command will be sent +// is that the set command will be sent to the primary and the get command will be sent // to the replica -TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotMasterReplicaReadReplica) { +TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotPrimaryReplicaReadReplica) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; initialize(); - // foo hashes to slot 12182 which has master node in upstream 0 and replica in upstream 1 + // foo hashes to slot 12182 which has primary node in upstream 0 and replica in upstream 1 simpleRequestAndResponse(0, makeBulkStringArray({"set", "foo", "bar"}), ":1\r\n", true); simpleRequestAndResponse(1, makeBulkStringArray({"get", "foo"}), "$3\r\nbar\r\n", true); } // This test sends a simple "get foo" command from a fake // downstream client through the proxy to a fake upstream -// Redis cluster with a single slot with master and replica. +// Redis cluster with a single slot with primary and replica. // The fake server sends a valid response back to the client. // The request and response should make it through the envoy // proxy server code unchanged. @@ -547,11 +547,11 @@ TEST_P(RedisClusterWithReadPolicyIntegrationTest, SingleSlotMasterReplicaReadRep // "cluster slots" command), and one to authenticate the connection // that carries the "get foo" request. -TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotMasterReplica) { +TEST_P(RedisClusterWithAuthIntegrationTest, SingleSlotPrimaryReplica) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(0, cluster_slot_response, "", "somepassword"); }; @@ -576,7 +576,7 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { random_index_ = 0; on_server_init_function_ = [this]() { - std::string cluster_slot_response = singleSlotMasterReplica( + std::string cluster_slot_response = singleSlotPrimaryReplica( fake_upstreams_[0]->localAddress()->ip(), fake_upstreams_[1]->localAddress()->ip()); expectCallClusterSlot(random_index_, cluster_slot_response); }; @@ -584,7 +584,7 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { initialize(); // foo hashes to slot 12182 which the proxy believes is at the server reachable via - // fake_upstreams_[0], based on the singleSlotMasterReplica() response above. + // fake_upstreams_[0], based on the singleSlotPrimaryReplica() response above. std::string request = makeBulkStringArray({"get", "foo"}); // The actual error response. std::string error_response = "-CLUSTERDOWN The cluster is down\r\n"; diff --git a/test/extensions/clusters/redis/redis_cluster_lb_test.cc b/test/extensions/clusters/redis/redis_cluster_lb_test.cc index bfc1ae6e16be..572509c1b8a9 100644 --- a/test/extensions/clusters/redis/redis_cluster_lb_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_lb_test.cc @@ -51,7 +51,7 @@ class RedisClusterLoadBalancerTest : public testing::Test { const std::vector>& expected_assignments, bool read_command = false, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy = - NetworkFilters::Common::Redis::Client::ReadPolicy::Master) { + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary) { Upstream::LoadBalancerPtr lb = lb_->factory()->create(); for (auto& assignment : expected_assignments) { @@ -173,22 +173,22 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesHealthy) { validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); ON_CALL(random_, random()).WillByDefault(Return(1)); validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); } -TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyMaster) { +TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyPrimary) { Upstream::HostVector hosts{ Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), @@ -215,17 +215,17 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyMaster) { // A list of (hash: host_index) pair const std::vector> replica_assignments = { {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}}; - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); validateAssignment(hosts, replica_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); validateAssignment(hosts, replica_assignments, true, @@ -262,23 +262,23 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyReplica) { // A list of (hash: host_index) pair const std::vector> replica_assignments = { {0, 2}, {1100, 2}, {2000, 2}, {18382, 2}, {2001, 3}, {2100, 3}, {16383, 3}, {19382, 3}}; - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; validateAssignment(hosts, replica_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); ON_CALL(random_, random()).WillByDefault(Return(0)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); ON_CALL(random_, random()).WillByDefault(Return(1)); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); } @@ -296,15 +296,15 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesNoReplica) { factory_->onClusterSlotUpdate(std::move(slots), all_hosts); // A list of (hash: host_index) pair - const std::vector> master_assignments = { + const std::vector> primary_assignments = { {0, 0}, {1100, 0}, {2000, 0}, {18382, 0}, {2001, 1}, {2100, 1}, {16383, 1}, {19382, 1}}; - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); - validateAssignment(hosts, master_assignments, true, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); + validateAssignment(hosts, primary_assignments, true, + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::Any); - validateAssignment(hosts, master_assignments, true, + validateAssignment(hosts, primary_assignments, true, NetworkFilters::Common::Redis::Client::ReadPolicy::PreferReplica); Upstream::LoadBalancerPtr lb = lb_->factory()->create(); @@ -393,11 +393,11 @@ TEST_F(RedisLoadBalancerContextImplTest, Basic) { get_request.asArray().swap(get_foo); RedisLoadBalancerContextImpl context1("foo", true, true, get_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); // Simple write command std::vector set_foo(3); @@ -413,11 +413,11 @@ TEST_F(RedisLoadBalancerContextImplTest, Basic) { set_request.asArray().swap(set_foo); RedisLoadBalancerContextImpl context2("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { @@ -435,18 +435,18 @@ TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { NetworkFilters::Common::Redis::RespValue get_request2{base, get_command, 2, 2}; RedisLoadBalancerContextImpl context1("foo", true, true, get_request1, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); RedisLoadBalancerContextImpl context2("bar", true, true, get_request2, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(37829), context2.computeHashKey()); EXPECT_EQ(true, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); // Composite write command NetworkFilters::Common::Redis::RespValue set_command; @@ -455,11 +455,11 @@ TEST_F(RedisLoadBalancerContextImplTest, CompositeArray) { NetworkFilters::Common::Redis::RespValue set_request{base, set_command, 1, 2}; RedisLoadBalancerContextImpl context3("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context3.computeHashKey()); EXPECT_EQ(false, context3.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context3.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { @@ -475,11 +475,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { get_request.asArray().swap(get_foo); RedisLoadBalancerContextImpl context1("foo", true, true, get_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context1.computeHashKey()); EXPECT_EQ(true, context1.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context1.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context1.readPolicy()); // Simple write command std::vector set_foo(3); @@ -495,11 +495,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UpperCaseCommand) { set_request.asArray().swap(set_foo); RedisLoadBalancerContextImpl context2("foo", true, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, UnsupportedCommand) { @@ -511,11 +511,11 @@ TEST_F(RedisLoadBalancerContextImplTest, UnsupportedCommand) { unknown_request.asArray().swap(unknown); RedisLoadBalancerContextImpl context3("foo", true, true, unknown_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context3.computeHashKey()); EXPECT_EQ(false, context3.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context3.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context3.readPolicy()); } TEST_F(RedisLoadBalancerContextImplTest, EnforceHashTag) { @@ -534,11 +534,11 @@ TEST_F(RedisLoadBalancerContextImplTest, EnforceHashTag) { // Enable_hash tagging should be override when is_redis_cluster is true. This is treated like // "foo" RedisLoadBalancerContextImpl context2("{foo}bar", false, true, set_request, - NetworkFilters::Common::Redis::Client::ReadPolicy::Master); + NetworkFilters::Common::Redis::Client::ReadPolicy::Primary); EXPECT_EQ(absl::optional(44950), context2.computeHashKey()); EXPECT_EQ(false, context2.isReadCommand()); - EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Master, context2.readPolicy()); + EXPECT_EQ(NetworkFilters::Common::Redis::Client::ReadPolicy::Primary, context2.readPolicy()); } } // namespace Redis diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index c41793d816c2..f630cdbd1a28 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -185,14 +185,14 @@ class RedisClusterTest : public testing::Test, pool_callbacks_->onFailure(); } - NetworkFilters::Common::Redis::RespValuePtr singleSlotMasterReplica(const std::string& master, - const std::string& replica, - int64_t port) const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = master; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = port; + NetworkFilters::Common::Redis::RespValuePtr singleSlotPrimaryReplica(const std::string& primary, + const std::string& replica, + int64_t port) const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = primary; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = port; std::vector replica_1(2); replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); @@ -206,7 +206,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 16383; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_1[3].asArray().swap(replica_1); @@ -221,18 +221,18 @@ class RedisClusterTest : public testing::Test, return response; } - NetworkFilters::Common::Redis::RespValuePtr twoSlotsMasters() const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = "127.0.0.1"; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = 22120; + NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimaries() const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = "127.0.0.1"; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = 22120; - std::vector master_2(2); - master_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_2[0].asString() = "127.0.0.2"; - master_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_2[1].asInteger() = 22120; + std::vector primary_2(2); + primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_2[0].asString() = "127.0.0.2"; + primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_2[1].asInteger() = 22120; std::vector slot_1(3); slot_1[0].type(NetworkFilters::Common::Redis::RespType::Integer); @@ -240,7 +240,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 9999; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); std::vector slot_2(3); slot_2[0].type(NetworkFilters::Common::Redis::RespType::Integer); @@ -248,7 +248,7 @@ class RedisClusterTest : public testing::Test, slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_2[1].asInteger() = 16383; slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_2[2].asArray().swap(master_2); + slot_2[2].asArray().swap(primary_2); std::vector slots(2); slots[0].type(NetworkFilters::Common::Redis::RespType::Array); @@ -263,18 +263,18 @@ class RedisClusterTest : public testing::Test, return response; } - NetworkFilters::Common::Redis::RespValuePtr twoSlotsMastersWithReplica() const { - std::vector master_1(2); - master_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_1[0].asString() = "127.0.0.1"; - master_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_1[1].asInteger() = 22120; + NetworkFilters::Common::Redis::RespValuePtr twoSlotsPrimariesWithReplica() const { + std::vector primary_1(2); + primary_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_1[0].asString() = "127.0.0.1"; + primary_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_1[1].asInteger() = 22120; - std::vector master_2(2); - master_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); - master_2[0].asString() = "127.0.0.2"; - master_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); - master_2[1].asInteger() = 22120; + std::vector primary_2(2); + primary_2[0].type(NetworkFilters::Common::Redis::RespType::BulkString); + primary_2[0].asString() = "127.0.0.2"; + primary_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); + primary_2[1].asInteger() = 22120; std::vector replica_1(2); replica_1[0].type(NetworkFilters::Common::Redis::RespType::BulkString); @@ -294,7 +294,7 @@ class RedisClusterTest : public testing::Test, slot_1[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_1[1].asInteger() = 9999; slot_1[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_1[2].asArray().swap(master_1); + slot_1[2].asArray().swap(primary_1); slot_1[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_1[3].asArray().swap(replica_1); @@ -304,7 +304,7 @@ class RedisClusterTest : public testing::Test, slot_2[1].type(NetworkFilters::Common::Redis::RespType::Integer); slot_2[1].asInteger() = 16383; slot_2[2].type(NetworkFilters::Common::Redis::RespType::Array); - slot_2[2].asArray().swap(master_2); + slot_2[2].asArray().swap(primary_2); slot_2[3].type(NetworkFilters::Common::Redis::RespType::Array); slot_2[3].asArray().swap(replica_2); @@ -373,27 +373,27 @@ class RedisClusterTest : public testing::Test, int64_t slot1_size = idx++; int64_t slot1_range_start_type = idx++; int64_t slot1_range_end_type = idx++; - int64_t master_type = idx++; - int64_t master_size = idx++; - int64_t master_ip_type = idx++; - int64_t master_ip_value = idx++; - int64_t master_port_type = idx++; + int64_t primary_type = idx++; + int64_t primary_size = idx++; + int64_t primary_ip_type = idx++; + int64_t primary_ip_value = idx++; + int64_t primary_port_type = idx++; idx = 0; int64_t replica_size = idx++; int64_t replica_ip_type = idx++; int64_t replica_ip_value = idx++; int64_t replica_port_type = idx++; - std::vector master_1_array; - if (flags.test(master_size)) { + std::vector primary_1_array; + if (flags.test(primary_size)) { // Ip field. - if (flags.test(master_ip_value)) { - master_1_array.push_back(createStringField(flags.test(master_ip_type), "127.0.0.1")); + if (flags.test(primary_ip_value)) { + primary_1_array.push_back(createStringField(flags.test(primary_ip_type), "127.0.0.1")); } else { - master_1_array.push_back(createStringField(flags.test(master_ip_type), "bad ip foo")); + primary_1_array.push_back(createStringField(flags.test(primary_ip_type), "bad ip foo")); } // Port field. - master_1_array.push_back(createIntegerField(flags.test(master_port_type), 22120)); + primary_1_array.push_back(createIntegerField(flags.test(primary_port_type), 22120)); } std::vector replica_1_array; @@ -414,7 +414,7 @@ class RedisClusterTest : public testing::Test, if (flags.test(slot1_size)) { slot_1_array.push_back(createIntegerField(flags.test(slot1_range_start_type), 0)); slot_1_array.push_back(createIntegerField(flags.test(slot1_range_end_type), 16383)); - slot_1_array.push_back(createArrayField(flags.test(master_type), master_1_array)); + slot_1_array.push_back(createArrayField(flags.test(primary_type), primary_1_array)); if (replica_flags.any()) { slot_1_array.push_back(createArrayField(replica_flags.test(replica_size), replica_1_array)); } @@ -464,30 +464,30 @@ class RedisClusterTest : public testing::Test, cluster_->initialize([&]() -> void { initialized_.ready(); }); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); - // Promote replica to master + // Promote replica to primary expectRedisResolve(); EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(twoSlotsMasters()); + expectClusterSlotResponse(twoSlotsPrimaries()); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); // No change. expectRedisResolve(); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); - expectClusterSlotResponse(twoSlotsMasters()); + expectClusterSlotResponse(twoSlotsPrimaries()); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); - // Add replicas to masters + // Add replicas to primaries expectRedisResolve(); EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(twoSlotsMastersWithReplica()); + expectClusterSlotResponse(twoSlotsPrimariesWithReplica()); expectHealthyHosts(std::list( {"127.0.0.1:22120", "127.0.0.3:22120", "127.0.0.2:22120", "127.0.0.4:22120"})); @@ -495,7 +495,7 @@ class RedisClusterTest : public testing::Test, expectRedisResolve(); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); - expectClusterSlotResponse(twoSlotsMastersWithReplica()); + expectClusterSlotResponse(twoSlotsPrimariesWithReplica()); expectHealthyHosts(std::list( {"127.0.0.1:22120", "127.0.0.3:22120", "127.0.0.2:22120", "127.0.0.4:22120"})); @@ -504,7 +504,7 @@ class RedisClusterTest : public testing::Test, EXPECT_CALL(membership_updated_, ready()); resolve_timer_->invokeCallback(); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); } @@ -639,7 +639,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { TestUtility::makeDnsResponse(address_pair)); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); expectClusterSlotResponse( - singleSlotMasterReplica(address_pair.front(), address_pair.back(), 22120)); + singleSlotPrimaryReplica(address_pair.front(), address_pair.back(), 22120)); return nullptr; })); @@ -750,7 +750,7 @@ TEST_F(RedisClusterTest, RedisResolveFailure) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); expectHealthyHosts(std::list({"127.0.0.1:22120", "127.0.0.2:22120"})); // Expect no change if resolve failed. @@ -818,9 +818,9 @@ TEST_F(RedisClusterTest, RedisErrorResponse) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - std::bitset single_slot_master(0xfff); + std::bitset single_slot_primary(0xfff); std::bitset no_replica(0); - expectClusterSlotResponse(createResponse(single_slot_master, no_replica)); + expectClusterSlotResponse(createResponse(single_slot_primary, no_replica)); expectHealthyHosts(std::list({"127.0.0.1:22120"})); // Expect no change if resolve failed. @@ -854,9 +854,9 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - std::bitset single_slot_master(0xfff); + std::bitset single_slot_primary(0xfff); std::bitset no_replica(0); - expectClusterSlotResponse(createResponse(single_slot_master, no_replica)); + expectClusterSlotResponse(createResponse(single_slot_primary, no_replica)); expectHealthyHosts(std::list({"127.0.0.1:22120"})); // Expect no change if resolve failed. @@ -872,7 +872,7 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1).WillOnce(Return(false)); } expectHealthyHosts(std::list({"127.0.0.1:22120"})); - expectClusterSlotResponse(createResponse(single_slot_master, replica_flags)); + expectClusterSlotResponse(createResponse(single_slot_primary, replica_flags)); EXPECT_EQ(++update_attempt, cluster_->info()->stats().update_attempt_.value()); if (!(replica_flags.all() || replica_flags.none())) { EXPECT_EQ(++update_failure, cluster_->info()->stats().update_failure_.value()); @@ -952,7 +952,7 @@ TEST_F(RedisClusterTest, HostRemovalAfterHcFail) { cluster_->initialize([&]() -> void { initialized_.ready(); }); EXPECT_CALL(*cluster_callback_, onClusterSlotUpdate(_, _)).Times(1); - expectClusterSlotResponse(singleSlotMasterReplica("127.0.0.1", "127.0.0.2", 22120)); + expectClusterSlotResponse(singleSlotPrimaryReplica("127.0.0.1", "127.0.0.2", 22120)); // Verify that both hosts are initially marked with FAILED_ACTIVE_HC, then // clear the flag to simulate that these hosts have been successfully health diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc index d5402aeaa6db..fc9c8131aa07 100644 --- a/test/extensions/filters/network/common/redis/client_impl_test.cc +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -190,7 +190,7 @@ class ConfigBufferSizeGTSingleRequest : public Config { } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } }; TEST_F(RedisClientImplTest, BatchWithTimerFiring) { @@ -347,7 +347,7 @@ class ConfigEnableCommandStats : public Config { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return std::chrono::milliseconds(0); } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return true; } }; @@ -546,7 +546,7 @@ TEST_F(RedisClientImplTest, InitializedWithAuthAcl) { client_->close(); } -TEST_F(RedisClientImplTest, InitializedWithPreferMasterReadPolicy) { +TEST_F(RedisClientImplTest, InitializedWithPreferPrimaryReadPolicy) { testInitializeReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy:: ConnPoolSettings::PREFER_MASTER); } @@ -732,7 +732,7 @@ class ConfigOutlierDisabled : public Config { std::chrono::milliseconds bufferFlushTimeoutInMs() const override { return std::chrono::milliseconds(0); } - ReadPolicy readPolicy() const override { return ReadPolicy::Master; } + ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } uint32_t maxUpstreamUnknownConnections() const override { return 0; } bool enableCommandStats() const override { return false; } }; diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index ed83f593dfe6..4b686099dcfc 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -388,7 +388,7 @@ TEST_F(RedisConnPoolImplTest, ClientRequestFailed) { TEST_F(RedisConnPoolImplTest, BasicWithReadPolicy) { testReadPolicy(envoy::extensions::filters::network::redis_proxy::v3::RedisProxy:: ConnPoolSettings::PREFER_MASTER, - NetworkFilters::Common::Redis::Client::ReadPolicy::PreferMaster); + NetworkFilters::Common::Redis::Client::ReadPolicy::PreferPrimary); testReadPolicy( envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::REPLICA, NetworkFilters::Common::Redis::Client::ReadPolicy::Replica); From 8c92ed5fc18128e77708eb07168ed8f6c8a9764d Mon Sep 17 00:00:00 2001 From: Jian Zeng Date: Wed, 1 Jul 2020 05:58:34 +0800 Subject: [PATCH 496/909] test(router): fix PathRedirectQueryNotPreserved test (#11812) Signed-off-by: knight42 --- test/common/router/config_impl_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 500597c46e4d..392b7b9b79ee 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -6237,7 +6237,7 @@ name: AllRedirects TEST_F(RouteConfigurationV2, PathRedirectQueryNotPreserved) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.preserve_query_string_in_redirects", "false"}}); + {{"envoy.reloadable_features.preserve_query_string_in_path_redirects", "false"}}); std::string RouteDynPathRedirect = R"EOF( name: AllRedirects @@ -6262,7 +6262,7 @@ name: AllRedirects { Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("redirect.lyft.com", "/path/redirect/?lang=eng&con=US", true, false); - EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/?lang=eng&con=US", + EXPECT_EQ("https://redirect.lyft.com/new/path-redirect/", config.route(headers, 0)->directResponseEntry()->newPath(headers)); } { From 91905443ca743a22a347006e3e447a3d84e4b6e1 Mon Sep 17 00:00:00 2001 From: htuch Date: Tue, 30 Jun 2020 17:58:53 -0400 Subject: [PATCH 497/909] dynamic_forward_proxy: graduate HTTP filter to stable. (#11818) Based on conversations with @MattKlein123, this should be mature for production use, so removing alpha status. Risk level: Low Testing: N/A Signed-off-by: Harvey Tuch --- source/extensions/filters/http/dynamic_forward_proxy/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index 3e63ff181921..fb5f5ade7d12 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -28,7 +28,6 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", - status = "alpha", deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", From 83d12c822eaaa283adf5a8a05785a724410a1a37 Mon Sep 17 00:00:00 2001 From: antonio Date: Wed, 1 Jul 2020 02:42:03 -0400 Subject: [PATCH 498/909] build: Enable -Wc++20-extensions on clang builds. (#11828) Signed-off-by: Antonio Vicente --- bazel/envoy_internal.bzl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 1603523e575c..b006898a8b64 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -48,7 +48,7 @@ def envoy_copts(repository, test = False): repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], }) + select({ - repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand"], + repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++20-extensions"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], }) + select({ From 011945dcf92b8a461ab4ba309fa2bebeffc15895 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 08:13:19 -0400 Subject: [PATCH 499/909] conn_pool: cleaning up interfaces (#11796) Removing two "friend class" declarations by providing necessary accessors. Replacing void* context with castable context. Risk Level: Low (no-op refactor) Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/conn_pool/conn_pool_base.cc | 48 +++++++++++------------ source/common/conn_pool/conn_pool_base.h | 39 +++++++++++++----- source/common/http/conn_pool_base.cc | 22 ++++++----- source/common/http/conn_pool_base.h | 47 +++++++++++----------- source/common/http/http1/conn_pool.cc | 4 +- 5 files changed, 90 insertions(+), 70 deletions(-) diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 6e1be1ddc8a5..3c2c0e648db5 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -60,8 +60,8 @@ void ConnPoolImplBase::tryCreateNewConnection() { } } -void ConnPoolImplBase::attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, - void* context) { +void ConnPoolImplBase::attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, + AttachContext& context) { ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { @@ -115,11 +115,11 @@ void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& clie } } -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(void* context) { +ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) { if (!ready_clients_.empty()) { ActiveClient& client = *ready_clients_.front(); ENVOY_CONN_LOG(debug, "using existing connection", client); - attachRequestToClientImpl(client, context); + attachRequestToClient(client, context); return nullptr; } @@ -145,7 +145,7 @@ void ConnPoolImplBase::onUpstreamReady() { ActiveClientPtr& client = ready_clients_.front(); ENVOY_CONN_LOG(debug, "attaching to next request", *client); // Pending requests are pushed onto the front, so pull from the back. - attachRequestToClient(*client, *pending_requests_.back()); + attachRequestToClient(*client, pending_requests_.back()->context()); pending_requests_.pop_back(); } } @@ -317,14 +317,14 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view } PendingRequest::PendingRequest(ConnPoolImplBase& parent) : parent_(parent) { - parent_.host_->cluster().stats().upstream_rq_pending_total_.inc(); - parent_.host_->cluster().stats().upstream_rq_pending_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().inc(); + parent_.host()->cluster().stats().upstream_rq_pending_total_.inc(); + parent_.host()->cluster().stats().upstream_rq_pending_active_.inc(); + parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().inc(); } PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_pending_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).pendingRequests().dec(); + parent_.host()->cluster().stats().upstream_rq_pending_active_.dec(); + parent_.host()->cluster().resourceManager(parent_.priority()).pendingRequests().dec(); } void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { @@ -385,18 +385,18 @@ ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_l uint64_t concurrent_request_limit) : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), - connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { + connect_timer_(parent_.dispatcher().createTimer([this]() -> void { onConnectTimeout(); })) { conn_connect_ms_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher_.timeSource()); + parent_.host()->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher().timeSource()); conn_length_ = std::make_unique( - parent_.host_->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher_.timeSource()); - connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); - - parent_.host_->stats().cx_total_.inc(); - parent_.host_->stats().cx_active_.inc(); - parent_.host_->cluster().stats().upstream_cx_total_.inc(); - parent_.host_->cluster().stats().upstream_cx_active_.inc(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); + parent_.host()->cluster().stats().upstream_cx_length_ms_, parent_.dispatcher().timeSource()); + connect_timer_->enableTimer(parent_.host()->cluster().connectTimeout()); + + parent_.host()->stats().cx_total_.inc(); + parent_.host()->stats().cx_active_.inc(); + parent_.host()->cluster().stats().upstream_cx_total_.inc(); + parent_.host()->cluster().stats().upstream_cx_active_.inc(); + parent_.host()->cluster().resourceManager(parent_.priority()).connections().inc(); } ActiveClient::~ActiveClient() { releaseResources(); } @@ -411,15 +411,15 @@ void ActiveClient::releaseResources() { conn_length_->complete(); - parent_.host_->cluster().stats().upstream_cx_active_.dec(); - parent_.host_->stats().cx_active_.dec(); - parent_.host_->cluster().resourceManager(parent_.priority_).connections().dec(); + parent_.host()->cluster().stats().upstream_cx_active_.dec(); + parent_.host()->stats().cx_active_.dec(); + parent_.host()->cluster().resourceManager(parent_.priority()).connections().dec(); } } void ActiveClient::onConnectTimeout() { ENVOY_CONN_LOG(debug, "connect timeout", *this); - parent_.host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + parent_.host()->cluster().stats().upstream_cx_connect_timeout_.inc(); timed_out_ = true; close(); } diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 6e7cb0f2fbc3..d0d3f94b56e8 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -14,6 +14,13 @@ namespace ConnectionPool { class ConnPoolImplBase; +// A placeholder struct for whatever data a given connection pool needs to +// successfully attach and upstream connection to a downstream connection. +struct AttachContext { + // Add a virtual destructor to allow for the dynamic_cast ASSERT in typedContext. + virtual ~AttachContext() = default; +}; + // ActiveClient provides a base class for connection pool clients that handles connection timings // as well as managing the connection timeout. class ActiveClient : public LinkedObject, @@ -80,10 +87,9 @@ class PendingRequest : public LinkedObject, public ConnectionPoo // ConnectionPool::Cancellable void cancel(Envoy::ConnectionPool::CancelPolicy policy) override; - // TODO(alyssawilk) find an alternate to void* // The context here returns a pointer to whatever context is provided with newStream(), // which will be passed back to the parent in onPoolReady or onPoolFailure. - virtual void* context() PURE; + virtual AttachContext& context() PURE; ConnPoolImplBase& parent_; }; @@ -101,6 +107,12 @@ class ConnPoolImplBase : protected Logger::Loggable { const Network::TransportSocketOptionsSharedPtr& transport_socket_options); virtual ~ConnPoolImplBase(); + // A helper function to get the specific context type from the base class context. + template T& typedContext(AttachContext& context) { + ASSERT(dynamic_cast(&context) != nullptr); + return *static_cast(&context); + } + void addDrainedCallbackImpl(Instance::DrainedCb cb); void drainConnectionsImpl(); @@ -135,29 +147,36 @@ class ConnPoolImplBase : protected Logger::Loggable { Network::ConnectionEvent event); void checkForDrained(); void onUpstreamReady(); - ConnectionPool::Cancellable* newStream(void* context); + ConnectionPool::Cancellable* newStream(AttachContext& context); - virtual ConnectionPool::Cancellable* newPendingRequest(void* context) PURE; + virtual ConnectionPool::Cancellable* newPendingRequest(AttachContext& context) PURE; // Creates a new connection if allowed by resourceManager, or if created to avoid // starving this pool. void tryCreateNewConnection(); - virtual void attachRequestToClient(ActiveClient& client, PendingRequest& request) PURE; - void attachRequestToClientImpl(Envoy::ConnectionPool::ActiveClient& client, void* pair); + void attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, AttachContext& context); + virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason, ConnectionPool::PoolFailureReason pool_failure_reason, - void* context) PURE; - virtual void onPoolReady(ActiveClient& client, void* context) PURE; + AttachContext& context) PURE; + virtual void onPoolReady(ActiveClient& client, AttachContext& context) PURE; // Called by derived classes any time a request is completed or destroyed for any reason. void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); + const Upstream::HostConstSharedPtr& host() { return host_; } + Event::Dispatcher& dispatcher() { return dispatcher_; } + Upstream::ResourcePriority priority() const { return priority_; } + const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } + const Network::TransportSocketOptionsSharedPtr& transportSocketOptions() { + return transport_socket_options_; + } + +protected: const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; - friend class ActiveClient; - friend class PendingRequest; Event::Dispatcher& dispatcher_; const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 416f97c499d9..dc1af9718211 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -56,17 +56,18 @@ HttpConnPoolImplBase::HttpConnPoolImplBase( ConnectionPool::Cancellable* HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) { - AttachContext context = std::make_pair(&response_decoder, &callbacks); - return Envoy::ConnectionPool::ConnPoolImplBase::newStream(reinterpret_cast(&context)); + HttpAttachContext context({&response_decoder, &callbacks}); + return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); } bool HttpConnPoolImplBase::hasActiveConnections() const { return (!pending_requests_.empty() || (num_active_requests_ > 0)); } -ConnectionPool::Cancellable* HttpConnPoolImplBase::newPendingRequest(void* context) { - Http::ResponseDecoder& decoder = *reinterpret_cast(context)->first; - Http::ConnectionPool::Callbacks& callbacks = *reinterpret_cast(context)->second; +ConnectionPool::Cancellable* +HttpConnPoolImplBase::newPendingRequest(Envoy::ConnectionPool::AttachContext& context) { + Http::ResponseDecoder& decoder = *typedContext(context).decoder_; + Http::ConnectionPool::Callbacks& callbacks = *typedContext(context).callbacks_; ENVOY_LOG(debug, "queueing request due to no available connections"); Envoy::ConnectionPool::PendingRequestPtr pending_request( new HttpPendingRequest(*this, decoder, callbacks)); @@ -74,11 +75,12 @@ ConnectionPool::Cancellable* HttpConnPoolImplBase::newPendingRequest(void* conte return pending_requests_.front().get(); } -void HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client, void* context) { - ActiveClient* http_client = reinterpret_cast(&client); - auto* pair = reinterpret_cast(context); - Http::ResponseDecoder& response_decoder = *pair->first; - Http::ConnectionPool::Callbacks& callbacks = *pair->second; +void HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) { + ActiveClient* http_client = static_cast(&client); + auto& http_context = typedContext(context); + Http::ResponseDecoder& response_decoder = *http_context.decoder_; + Http::ConnectionPool::Callbacks& callbacks = *http_context.callbacks_; Http::RequestEncoder& new_encoder = http_client->newStreamEncoder(response_decoder); callbacks.onPoolReady(new_encoder, client.real_host_description_, http_client->codec_client_->streamInfo()); diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 78171c6e2411..45f3951570e9 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -14,19 +14,24 @@ namespace Envoy { namespace Http { +struct HttpAttachContext : public Envoy::ConnectionPool::AttachContext { + HttpAttachContext(Http::ResponseDecoder* d, Http::ConnectionPool::Callbacks* c) + : decoder_(d), callbacks_(c) {} + Http::ResponseDecoder* decoder_; + Http::ConnectionPool::Callbacks* callbacks_; +}; + // An implementation of Envoy::ConnectionPool::PendingRequest for HTTP/1.1 and HTTP/2 class HttpPendingRequest : public Envoy::ConnectionPool::PendingRequest { public: // OnPoolSuccess for HTTP requires both the decoder and callbacks. OnPoolFailure // requires only the callbacks, but passes both for consistency. - using AttachContext = std::pair; HttpPendingRequest(Envoy::ConnectionPool::ConnPoolImplBase& parent, Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) - : Envoy::ConnectionPool::PendingRequest(parent), - context_(std::make_pair(&decoder, &callbacks)) {} + : Envoy::ConnectionPool::PendingRequest(parent), context_(&decoder, &callbacks) {} - void* context() override { return static_cast(&context_); } - AttachContext context_; + Envoy::ConnectionPool::AttachContext& context() override { return context_; } + HttpAttachContext context_; }; // An implementation of Envoy::ConnectionPool::ConnPoolImplBase for shared code @@ -34,8 +39,6 @@ class HttpPendingRequest : public Envoy::ConnectionPool::PendingRequest { class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, public Http::ConnectionPool::Instance { public: - using AttachContext = std::pair; - HttpConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, @@ -50,21 +53,17 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, Http::ConnectionPool::Callbacks& callbacks) override; bool hasActiveConnections() const override; - void attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, - Envoy::ConnectionPool::PendingRequest& base_request) override { - HttpPendingRequest* request = reinterpret_cast(&base_request); - attachRequestToClientImpl(client, static_cast(&request->context_)); - } - // Creates a new PendingRequest and enqueues it into the request queue. - ConnectionPool::Cancellable* newPendingRequest(void* context) override; + ConnectionPool::Cancellable* + newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override; void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason, - void* context) override { - auto* callbacks = reinterpret_cast(context)->second; + Envoy::ConnectionPool::AttachContext& context) override { + auto* callbacks = typedContext(context).callbacks_; callbacks->onPoolFailure(reason, failure_reason, host_description); } - void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, void* context) override; + void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) override; virtual CodecClientPtr createCodecClient(Upstream::Host::CreateConnectionData& data) PURE; }; @@ -76,17 +75,17 @@ class ActiveClient : public Envoy::ConnectionPool::ActiveClient { uint64_t concurrent_request_limit) : Envoy::ConnectionPool::ActiveClient(parent, lifetime_request_limit, concurrent_request_limit) { - Upstream::Host::CreateConnectionData data = parent_.host_->createConnection( - parent_.dispatcher_, parent_.socket_options_, parent_.transport_socket_options_); + Upstream::Host::CreateConnectionData data = parent_.host()->createConnection( + parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions()); real_host_description_ = data.host_description_; codec_client_ = parent.createCodecClient(data); codec_client_->addConnectionCallbacks(*this); codec_client_->setConnectionStats( - {parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_total_, - parent_.host_->cluster().stats().upstream_cx_tx_bytes_buffered_, - &parent_.host_->cluster().stats().bind_errors_, nullptr}); + {parent_.host()->cluster().stats().upstream_cx_rx_bytes_total_, + parent_.host()->cluster().stats().upstream_cx_rx_bytes_buffered_, + parent_.host()->cluster().stats().upstream_cx_tx_bytes_total_, + parent_.host()->cluster().stats().upstream_cx_tx_bytes_buffered_, + &parent_.host()->cluster().stats().bind_errors_, nullptr}); } void close() override { codec_client_->close(); } virtual Http::RequestEncoder& newStreamEncoder(Http::ResponseDecoder& response_decoder) PURE; diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index f7a6453e2b28..e810c435f39e 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -87,7 +87,7 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, close_connection_ = HeaderUtility::shouldCloseConnection(parent_.codec_client_->protocol(), *headers); if (close_connection_) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc(); } } else { // If Connection: close OR @@ -100,7 +100,7 @@ void ConnPoolImpl::StreamWrapper::decodeHeaders(ResponseHeaderMapPtr&& headers, Headers::get().ConnectionValues.KeepAlive)) || (absl::EqualsIgnoreCase(headers->getProxyConnectionValue(), Headers::get().ConnectionValues.Close))) { - parent_.parent_.host_->cluster().stats().upstream_cx_close_notify_.inc(); + parent_.parent_.host()->cluster().stats().upstream_cx_close_notify_.inc(); close_connection_ = true; } } From 76525d8d9e3fb8b0b979f8ada5e152a3ddf14019 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 1 Jul 2020 11:09:17 -0400 Subject: [PATCH 500/909] udpa: UDPA URL encoding/decoding utils. (#11805) This patch introduces support for encoding/decoding udpa::core::v1::ResourceLocator, in addition to the existing support for udpa::core::v1::ResourceName. Some refactoring and attention to the URI/URN/URL distinction (as per RFC3986) is introduced. Part of #11264 Risk level: Low (not used) Testing: Additional unit tests for URLs added. Signed-off-by: Harvey Tuch --- api/bazel/repository_locations.bzl | 4 +- .../bazel/repository_locations.bzl | 4 +- source/common/config/udpa_resource.cc | 202 +++++++++++++++--- source/common/config/udpa_resource.h | 45 +++- test/common/config/udpa_resource_test.cc | 158 +++++++++++--- 5 files changed, 338 insertions(+), 75 deletions(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 80989a6f4d35..0a0379f7685e 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "ca580c4fcf87b178547c2e9e41a2481b0008efe9" # June 24, 2020 -UDPA_SHA256 = "a1dc305cd56f1dd393fec8ec6b19f4f7d76af9740c7746e9377c8dd480f77e70" +UDPA_GIT_SHA = "efcf912fb35470672231c7b7bef620f3d17f655a" # June 29, 2020 +UDPA_SHA256 = "0f8179fbe3d27b89a4c34b2fbd55832f3b27b6810ea9b03b36d18da2629cc871" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 80989a6f4d35..0a0379f7685e 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -13,8 +13,8 @@ GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d14 PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017 PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b" -UDPA_GIT_SHA = "ca580c4fcf87b178547c2e9e41a2481b0008efe9" # June 24, 2020 -UDPA_SHA256 = "a1dc305cd56f1dd393fec8ec6b19f4f7d76af9740c7746e9377c8dd480f77e70" +UDPA_GIT_SHA = "efcf912fb35470672231c7b7bef620f3d17f655a" # June 29, 2020 +UDPA_SHA256 = "0f8179fbe3d27b89a4c34b2fbd55832f3b27b6810ea9b03b36d18da2629cc871" ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019 ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b" diff --git a/source/common/config/udpa_resource.cc b/source/common/config/udpa_resource.cc index 6c10d4cc5e3f..d990ea875105 100644 --- a/source/common/config/udpa_resource.cc +++ b/source/common/config/udpa_resource.cc @@ -16,63 +16,199 @@ namespace Config { using PercentEncoding = Http::Utility::PercentEncoding; -std::string UdpaResourceName::encodeUri(const udpa::core::v1::ResourceName& resource_name, - const EncodeOptions& options) { - // We need to percent-encode authority, id, path and query params. Qualified types should not have - // reserved characters. - const std::string authority = PercentEncoding::encode(resource_name.authority(), "%/?#"); +namespace { + +// We need to percent-encode authority, id, path and query params. Resource types should not have +// reserved characters. + +std::string encodeAuthority(const std::string& authority) { + return PercentEncoding::encode(authority, "%/?#"); +} + +std::string encodeIdPath(const Protobuf::RepeatedPtrField& id) { std::vector path_components; - for (const auto& id_component : resource_name.id()) { + for (const auto& id_component : id) { path_components.emplace_back(PercentEncoding::encode(id_component, "%:/?#[]")); } const std::string path = absl::StrJoin(path_components, "/"); + return path.empty() ? "" : absl::StrCat("/", path); +} + +std::string encodeContextParams(const udpa::core::v1::ContextParams& context_params, + bool sort_context_params) { std::vector query_param_components; - for (const auto& context_param : resource_name.context().params()) { + for (const auto& context_param : context_params.params()) { query_param_components.emplace_back( absl::StrCat(PercentEncoding::encode(context_param.first, "%#[]&="), "=", PercentEncoding::encode(context_param.second, "%#[]&="))); } - if (options.sort_context_params_) { + if (sort_context_params) { std::sort(query_param_components.begin(), query_param_components.end()); } + return query_param_components.empty() ? "" : "?" + absl::StrJoin(query_param_components, "&"); +} + +std::string encodeDirectives( + const Protobuf::RepeatedPtrField& directives) { + std::vector fragment_components; + const std::string DirectiveEscapeChars = "%#[],"; + for (const auto& directive : directives) { + switch (directive.directive_case()) { + case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kAlt: + fragment_components.emplace_back(absl::StrCat( + "alt=", PercentEncoding::encode(UdpaResourceIdentifier::encodeUrl(directive.alt()), + DirectiveEscapeChars))); + break; + case udpa::core::v1::ResourceLocator::Directive::DirectiveCase::kEntry: + fragment_components.emplace_back( + absl::StrCat("entry=", PercentEncoding::encode(directive.entry(), DirectiveEscapeChars))); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + return fragment_components.empty() ? "" : "#" + absl::StrJoin(fragment_components, ","); +} + +} // namespace + +std::string UdpaResourceIdentifier::encodeUrn(const udpa::core::v1::ResourceName& resource_name, + const EncodeOptions& options) { + const std::string authority = encodeAuthority(resource_name.authority()); + const std::string id_path = encodeIdPath(resource_name.id()); const std::string query_params = - query_param_components.empty() ? "" : "?" + absl::StrJoin(query_param_components, "&"); - return absl::StrCat("udpa://", authority, "/", resource_name.resource_type(), - path.empty() ? "" : "/", path, query_params); + encodeContextParams(resource_name.context(), options.sort_context_params_); + return absl::StrCat("udpa://", authority, "/", resource_name.resource_type(), id_path, + query_params); +} + +std::string +UdpaResourceIdentifier::encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator, + const EncodeOptions& options) { + const std::string id_path = encodeIdPath(resource_locator.id()); + const std::string fragment = encodeDirectives(resource_locator.directives()); + std::string scheme = "udpa:"; + switch (resource_locator.scheme()) { + case udpa::core::v1::ResourceLocator::HTTP: + scheme = "http:"; + FALLTHRU; + case udpa::core::v1::ResourceLocator::UDPA: { + const std::string authority = encodeAuthority(resource_locator.authority()); + const std::string query_params = + encodeContextParams(resource_locator.exact_context(), options.sort_context_params_); + return absl::StrCat(scheme, "//", authority, "/", resource_locator.resource_type(), id_path, + query_params, fragment); + } + case udpa::core::v1::ResourceLocator::FILE: { + return absl::StrCat("file://", id_path, fragment); + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +namespace { + +void decodePath(absl::string_view path, std::string* resource_type, + Protobuf::RepeatedPtrField& id) { + // This is guaranteed by Http::Utility::extractHostPathFromUrn. + ASSERT(absl::StartsWith(path, "/")); + const std::vector path_components = absl::StrSplit(path.substr(1), '/'); + auto id_it = path_components.cbegin(); + if (resource_type != nullptr) { + *resource_type = std::string(path_components[0]); + if (resource_type->empty()) { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("Resource type missing from {}", path)); + } + id_it = std::next(id_it); + } + for (; id_it != path_components.cend(); id_it++) { + *id.Add() = PercentEncoding::decode(*id_it); + } +} + +void decodeQueryParams(absl::string_view query_params, + udpa::core::v1::ContextParams& context_params) { + Http::Utility::QueryParams query_params_components = + Http::Utility::parseQueryString(query_params); + for (const auto& it : query_params_components) { + (*context_params.mutable_params())[PercentEncoding::decode(it.first)] = + PercentEncoding::decode(it.second); + } } -udpa::core::v1::ResourceName UdpaResourceName::decodeUri(absl::string_view resource_uri) { - if (!absl::StartsWith(resource_uri, "udpa:")) { - throw UdpaResourceName::DecodeException( - fmt::format("{} does not have an udpa scheme", resource_uri)); +void decodeFragment( + absl::string_view fragment, + Protobuf::RepeatedPtrField& directives) { + const std::vector fragment_components = absl::StrSplit(fragment, ','); + for (const absl::string_view& fragment_component : fragment_components) { + if (absl::StartsWith(fragment_component, "alt=")) { + directives.Add()->mutable_alt()->MergeFrom( + UdpaResourceIdentifier::decodeUrl(PercentEncoding::decode(fragment_component.substr(4)))); + } else if (absl::StartsWith(fragment_component, "entry=")) { + directives.Add()->set_entry(PercentEncoding::decode(fragment_component.substr(6))); + } else { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("Unknown fragment component {}", fragment_component)); + ; + } + } +} + +} // namespace + +udpa::core::v1::ResourceName UdpaResourceIdentifier::decodeUrn(absl::string_view resource_urn) { + if (!absl::StartsWith(resource_urn, "udpa:")) { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("{} does not have an udpa: scheme", resource_urn)); } absl::string_view host, path; - Http::Utility::extractHostPathFromUri(resource_uri, host, path); + Http::Utility::extractHostPathFromUri(resource_urn, host, path); udpa::core::v1::ResourceName decoded_resource_name; decoded_resource_name.set_authority(PercentEncoding::decode(host)); const size_t query_params_start = path.find('?'); - Http::Utility::QueryParams query_params; if (query_params_start != absl::string_view::npos) { - query_params = Http::Utility::parseQueryString(path.substr(query_params_start)); - for (const auto& it : query_params) { - (*decoded_resource_name.mutable_context() - ->mutable_params())[PercentEncoding::decode(it.first)] = - PercentEncoding::decode(it.second); - } + decodeQueryParams(path.substr(query_params_start), *decoded_resource_name.mutable_context()); path = path.substr(0, query_params_start); } - // This is guaranteed by Http::Utility::extractHostPathFromUri. - ASSERT(absl::StartsWith(path, "/")); - const std::vector path_components = absl::StrSplit(path.substr(1), '/'); - decoded_resource_name.set_resource_type(std::string(path_components[0])); - if (decoded_resource_name.resource_type().empty()) { - throw UdpaResourceName::DecodeException( - fmt::format("Qualified type missing from {}", resource_uri)); + decodePath(path, decoded_resource_name.mutable_resource_type(), + *decoded_resource_name.mutable_id()); + return decoded_resource_name; +} + +udpa::core::v1::ResourceLocator UdpaResourceIdentifier::decodeUrl(absl::string_view resource_url) { + absl::string_view host, path; + Http::Utility::extractHostPathFromUri(resource_url, host, path); + udpa::core::v1::ResourceLocator decoded_resource_locator; + const size_t fragment_start = path.find('#'); + if (fragment_start != absl::string_view::npos) { + decodeFragment(path.substr(fragment_start + 1), *decoded_resource_locator.mutable_directives()); + path = path.substr(0, fragment_start); } - for (auto it = std::next(path_components.cbegin()); it != path_components.cend(); it++) { - decoded_resource_name.add_id(PercentEncoding::decode(*it)); + if (absl::StartsWith(resource_url, "udpa:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::UDPA); + } else if (absl::StartsWith(resource_url, "http:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::HTTP); + } else if (absl::StartsWith(resource_url, "file:")) { + decoded_resource_locator.set_scheme(udpa::core::v1::ResourceLocator::FILE); + // File URLs only have a path and fragment. + decodePath(path, nullptr, *decoded_resource_locator.mutable_id()); + return decoded_resource_locator; + } else { + throw UdpaResourceIdentifier::DecodeException( + fmt::format("{} does not have a udpa:, http: or file: scheme", resource_url)); } - return decoded_resource_name; + decoded_resource_locator.set_authority(PercentEncoding::decode(host)); + const size_t query_params_start = path.find('?'); + if (query_params_start != absl::string_view::npos) { + decodeQueryParams(path.substr(query_params_start), + *decoded_resource_locator.mutable_exact_context()); + path = path.substr(0, query_params_start); + } + decodePath(path, decoded_resource_locator.mutable_resource_type(), + *decoded_resource_locator.mutable_id()); + return decoded_resource_locator; } } // namespace Config diff --git a/source/common/config/udpa_resource.h b/source/common/config/udpa_resource.h index 8c81eab9143b..5f90dcf1b042 100644 --- a/source/common/config/udpa_resource.h +++ b/source/common/config/udpa_resource.h @@ -1,13 +1,14 @@ #include "envoy/common/exception.h" #include "absl/strings/string_view.h" +#include "udpa/core/v1/resource_locator.pb.h" #include "udpa/core/v1/resource_name.pb.h" namespace Envoy { namespace Config { -// Utilities for URI encoding/decoding of udpa::core::v1::ResourceName. -class UdpaResourceName { +// Utilities for URI encoding/decoding of udpa::core::v1::Resource{Name,Locator}. +class UdpaResourceIdentifier { public: // Options for encoded URIs. struct EncodeOptions { @@ -16,16 +17,29 @@ class UdpaResourceName { }; /** - * Encode a udpa::core::v1::ResourceName message as a udpa:// URI string. + * Encode a udpa::core::v1::ResourceName message as a udpa:// URN string. * * @param resource_name resource name message. * @param options encoding options. - * @return std::string udpa:// URI for resource_name. + * @return std::string udpa:// URN for resource_name. */ - static std::string encodeUri(const udpa::core::v1::ResourceName& resource_name, + static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name, const EncodeOptions& options); - static std::string encodeUri(const udpa::core::v1::ResourceName& resource_name) { - return encodeUri(resource_name, {}); + static std::string encodeUrn(const udpa::core::v1::ResourceName& resource_name) { + return encodeUrn(resource_name, {}); + } + + /** + * Encode a udpa::core::v1::ResourceLocator message as a udpa:// URL string. + * + * @param resource_name resource name message. + * @param options encoding options. + * @return std::string udpa:// URL for resource_name. + */ + static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator, + const EncodeOptions& options); + static std::string encodeUrl(const udpa::core::v1::ResourceLocator& resource_locator) { + return encodeUrl(resource_locator, {}); } // Thrown when an exception occurs during URI decoding. @@ -35,13 +49,22 @@ class UdpaResourceName { }; /** - * Decode a udpa:// URI string to a udpa::core::v1::ResourceName. + * Decode a udpa:// URN string to a udpa::core::v1::ResourceName. + * + * @param resource_urn udpa:// resource URN. + * @return udpa::core::v1::ResourceName resource name message for resource_urn. + * @throws DecodeException when parsing fails. + */ + static udpa::core::v1::ResourceName decodeUrn(absl::string_view resource_urn); + + /** + * Decode a udpa:// URL string to a udpa::core::v1::ResourceLocator. * - * @param resource_uri udpa:// resource URI. - * @return udpa::core::v1::ResourceName resource name message for resource_uri. + * @param resource_url udpa:// resource URL. + * @return udpa::core::v1::ResourceLocator resource name message for resource_url. * @throws DecodeException when parsing fails. */ - static udpa::core::v1::ResourceName decodeUri(absl::string_view resource_uri); + static udpa::core::v1::ResourceLocator decodeUrl(absl::string_view resource_url); }; } // namespace Config diff --git a/test/common/config/udpa_resource_test.cc b/test/common/config/udpa_resource_test.cc index ad05b37ed8e5..0cf6aeef45f8 100644 --- a/test/common/config/udpa_resource_test.cc +++ b/test/common/config/udpa_resource_test.cc @@ -4,20 +4,35 @@ #include "gtest/gtest.h" +using ::testing::ElementsAre; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +#define EXPECT_CONTEXT_PARAMS(context_params, ...) \ + { \ + std::map param_map((context_params).begin(), \ + (context_params).end()); \ + EXPECT_THAT(param_map, UnorderedElementsAre(__VA_ARGS__)); \ + } + namespace Envoy { namespace Config { namespace { -const std::string EscapedUri = +const std::string EscapedUrn = "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" "baz?%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df"; -const std::string EscapedUriWithManyQueryParams = +const std::string EscapedUrnWithManyQueryParams = "udpa://f123%25%2F%3F%23o/envoy.config.listener.v3.Listener/b%25%3A%2F%3F%23%5B%5Dar//" "baz?%25%23%5B%5D%26%3D=bar&%25%23%5B%5D%26%3Dab=cde%25%23%5B%5D%26%3Df&foo=%25%23%5B%5D%26%3D"; +const std::string EscapedUrlWithManyQueryParamsAndDirectives = + EscapedUrnWithManyQueryParams + + "#entry=some_en%25%23%5B%5D%2Ctry,alt=udpa://fo%2525%252F%253F%2523o/bar%23alt=udpa://bar/" + "baz%2Centry=h%2525%2523%255B%255D%252Cuh"; // for all x. encodeUri(decodeUri(x)) = x where x comes from sample of valid udpa:// URIs. // TODO(htuch): write a fuzzer that validates this property as well. -TEST(UdpaResourceNameTest, DecodeEncode) { +TEST(UdpaResourceIdentifierTest, DecodeEncode) { const std::vector uris = { "udpa:///envoy.config.listener.v3.Listener", "udpa://foo/envoy.config.listener.v3.Listener", @@ -30,55 +45,144 @@ TEST(UdpaResourceNameTest, DecodeEncode) { "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=", "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?=cd", "udpa://foo/envoy.config.listener.v3.Listener/bar/baz?ab=cde&ba=edc&z=f", - EscapedUri, - EscapedUriWithManyQueryParams, + EscapedUrn, + EscapedUrnWithManyQueryParams, }; - UdpaResourceName::EncodeOptions encode_options; + UdpaResourceIdentifier::EncodeOptions encode_options; encode_options.sort_context_params_ = true; for (const std::string& uri : uris) { - EXPECT_EQ(uri, UdpaResourceName::encodeUri(UdpaResourceName::decodeUri(uri), encode_options)); + EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrn(UdpaResourceIdentifier::decodeUrn(uri), + encode_options)); + EXPECT_EQ(uri, UdpaResourceIdentifier::encodeUrl(UdpaResourceIdentifier::decodeUrl(uri), + encode_options)); } } -// Validate that URI decoding behaves as expected component-wise. +// Validate that URN decoding behaves as expected component-wise. TEST(UdpaResourceNameTest, DecodeSuccess) { - const auto resource_name = UdpaResourceName::decodeUri(EscapedUriWithManyQueryParams); + const auto resource_name = UdpaResourceIdentifier::decodeUrn(EscapedUrnWithManyQueryParams); EXPECT_EQ("f123%/?#o", resource_name.authority()); EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); - EXPECT_EQ(3, resource_name.id().size()); - EXPECT_EQ("b%:/?#[]ar", resource_name.id()[0]); - EXPECT_EQ("", resource_name.id()[1]); - EXPECT_EQ("baz", resource_name.id()[2]); - EXPECT_EQ(3, resource_name.context().params().size()); - EXPECT_EQ("bar", resource_name.context().params().at("%#[]&=")); - EXPECT_EQ("cde%#[]&=f", resource_name.context().params().at("%#[]&=ab")); - EXPECT_EQ("%#[]&=", resource_name.context().params().at("foo")); + EXPECT_THAT(resource_name.id(), ElementsAre("b%:/?#[]ar", "", "baz")); + EXPECT_CONTEXT_PARAMS(resource_name.context().params(), Pair("%#[]&=", "bar"), + Pair("%#[]&=ab", "cde%#[]&=f"), Pair("foo", "%#[]&=")); } -// Validate that the URI decoding behaves with a near-empty UDPA resource name. -TEST(UdpaResourceNameTest, DecodeEmpty) { +// Validate that URL decoding behaves as expected component-wise. +TEST(UdpaResourceLocatorTest, DecodeSuccess) { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl(EscapedUrlWithManyQueryParamsAndDirectives); + EXPECT_EQ("f123%/?#o", resource_locator.authority()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("b%:/?#[]ar", "", "baz")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("%#[]&=", "bar"), + Pair("%#[]&=ab", "cde%#[]&=f"), Pair("foo", "%#[]&=")); + EXPECT_EQ(2, resource_locator.directives().size()); + EXPECT_EQ("some_en%#[],try", resource_locator.directives()[0].entry()); + const auto& alt = resource_locator.directives()[1].alt(); + EXPECT_EQ("fo%/?#o", alt.authority()); + EXPECT_EQ("bar", alt.resource_type()); + EXPECT_EQ(2, alt.directives().size()); + const auto& inner_alt = alt.directives()[0].alt(); + EXPECT_EQ("bar", inner_alt.authority()); + EXPECT_EQ("baz", inner_alt.resource_type()); + EXPECT_EQ("h%#[],uh", alt.directives()[1].entry()); +} + +// Validate that the URN decoding behaves with a near-empty UDPA resource name. +TEST(UdpaResourceLocatorTest, DecodeEmpty) { const auto resource_name = - UdpaResourceName::decodeUri("udpa:///envoy.config.listener.v3.Listener"); + UdpaResourceIdentifier::decodeUrn("udpa:///envoy.config.listener.v3.Listener"); EXPECT_TRUE(resource_name.authority().empty()); EXPECT_EQ("envoy.config.listener.v3.Listener", resource_name.resource_type()); EXPECT_TRUE(resource_name.id().empty()); EXPECT_TRUE(resource_name.context().params().empty()); } -// Negative tests for URI decoding. +// Validate that the URL decoding behaves with a near-empty UDPA resource locator. +TEST(UdpaResourceNameTest, DecodeEmpty) { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("udpa:///envoy.config.listener.v3.Listener"); + EXPECT_TRUE(resource_locator.authority().empty()); + EXPECT_EQ("envoy.config.listener.v3.Listener", resource_locator.resource_type()); + EXPECT_TRUE(resource_locator.id().empty()); + EXPECT_TRUE(resource_locator.exact_context().params().empty()); + EXPECT_TRUE(resource_locator.directives().empty()); +} + +// Negative tests for URN decoding. TEST(UdpaResourceNameTest, DecodeFail) { { - EXPECT_THROW_WITH_MESSAGE(UdpaResourceName::decodeUri("foo://"), - UdpaResourceName::DecodeException, - "foo:// does not have an udpa scheme"); + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn("foo://"), + UdpaResourceIdentifier::DecodeException, + "foo:// does not have an udpa: scheme"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrn("udpa://foo"), + UdpaResourceIdentifier::DecodeException, + "Resource type missing from /"); + } +} + +// Negative tests for URL decoding. +TEST(UdpaResourceLocatorTest, DecodeFail) { + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("foo://"), + UdpaResourceIdentifier::DecodeException, + "foo:// does not have a udpa:, http: or file: scheme"); + } + { + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("udpa://foo"), + UdpaResourceIdentifier::DecodeException, + "Resource type missing from /"); } { - EXPECT_THROW_WITH_MESSAGE(UdpaResourceName::decodeUri("udpa://foo"), - UdpaResourceName::DecodeException, - "Qualified type missing from udpa://foo"); + EXPECT_THROW_WITH_MESSAGE(UdpaResourceIdentifier::decodeUrl("udpa://foo/some-type#bar=baz"), + UdpaResourceIdentifier::DecodeException, + "Unknown fragment component bar=baz"); } } +// Validate parsing for udpa:, http: and file: schemes. +TEST(UdpaResourceLocatorTest, Schemes) { + { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("udpa://foo/bar/baz/blah?a=b#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::UDPA, resource_locator.scheme()); + EXPECT_EQ("foo", resource_locator.authority()); + EXPECT_EQ("bar", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("baz", "blah")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("a", "b")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("udpa://foo/bar/baz/blah?a=b#entry=m", + UdpaResourceIdentifier::encodeUrl(resource_locator)); + } + { + const auto resource_locator = + UdpaResourceIdentifier::decodeUrl("http://foo/bar/baz/blah?a=b#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::HTTP, resource_locator.scheme()); + EXPECT_EQ("foo", resource_locator.authority()); + EXPECT_EQ("bar", resource_locator.resource_type()); + EXPECT_THAT(resource_locator.id(), ElementsAre("baz", "blah")); + EXPECT_CONTEXT_PARAMS(resource_locator.exact_context().params(), Pair("a", "b")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("http://foo/bar/baz/blah?a=b#entry=m", + UdpaResourceIdentifier::encodeUrl(resource_locator)); + } + { + const auto resource_locator = UdpaResourceIdentifier::decodeUrl("file:///bar/baz/blah#entry=m"); + EXPECT_EQ(udpa::core::v1::ResourceLocator::FILE, resource_locator.scheme()); + EXPECT_THAT(resource_locator.id(), ElementsAre("bar", "baz", "blah")); + EXPECT_EQ(1, resource_locator.directives().size()); + EXPECT_EQ("m", resource_locator.directives()[0].entry()); + EXPECT_EQ("file:///bar/baz/blah#entry=m", UdpaResourceIdentifier::encodeUrl(resource_locator)); + } +} + +// extra tests for fragment handling + } // namespace } // namespace Config } // namespace Envoy From 9b86ed6085c8ed68328f8814d20bf25889a9488d Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 1 Jul 2020 11:16:59 -0400 Subject: [PATCH 501/909] discovery: refactor configuration ingestion. (#11638) Previously, the gRPC muxes required decoding an opaque resource to obtain its name, then dispatch to the relevant subscription, which would again decode the opaque resource. This is pretty horrible efficiency wise, in particular when upgrading from v2 -> v3. In this patch, we introduce a DecodedResource wrapper and OpaqueResourceDecoder. The config ingestion module, e.g. GrpcMuxImpl, uses the OpaqueResourceDecoder to produce a typed DecodedResource, performing the decode once. This DecodedResource is then dispatched to the watching subscription. This provides > 20% speedup on the v2 -> v3 tax for eds_speed_test, decreasing from an overhead of 3.2x to 2.5x. It's also likely to unlock further optimizations as we now have a wrapper resource and simplifies subscription implementations, as they no longer need to deal with delta vs. SotW resource decoding in different ways. Risk level: Medium (configuration ingestion path changes). Testing: New unit tests for DecodedResourceImpl/OpaqueResourceDecoderImpl, updated existing unit tests to work with new interfaces. Partial solution to #11362 Signed-off-by: Harvey Tuch --- include/envoy/config/grpc_mux.h | 4 +- include/envoy/config/subscription.h | 105 ++++++++++- include/envoy/config/subscription_factory.h | 5 +- include/envoy/router/BUILD | 1 + .../router/route_config_update_receiver.h | 15 +- source/common/config/BUILD | 25 +++ source/common/config/decoded_resource_impl.h | 77 ++++++++ .../common/config/delta_subscription_state.cc | 10 +- .../common/config/delta_subscription_state.h | 6 +- .../config/filesystem_subscription_impl.cc | 11 +- .../config/filesystem_subscription_impl.h | 4 +- source/common/config/grpc_mux_impl.cc | 35 ++-- source/common/config/grpc_mux_impl.h | 15 +- .../common/config/grpc_subscription_impl.cc | 29 ++-- source/common/config/grpc_subscription_impl.h | 20 +-- .../common/config/http_subscription_impl.cc | 15 +- source/common/config/http_subscription_impl.h | 5 +- source/common/config/new_grpc_mux_impl.cc | 7 +- source/common/config/new_grpc_mux_impl.h | 3 +- .../config/opaque_resource_decoder_impl.h | 37 ++++ source/common/config/subscription_base.h | 12 +- .../config/subscription_factory_impl.cc | 16 +- .../common/config/subscription_factory_impl.h | 3 +- source/common/config/watch_map.cc | 30 +++- source/common/config/watch_map.h | 13 +- source/common/protobuf/utility.h | 35 +++- source/common/router/rds_impl.cc | 24 ++- source/common/router/rds_impl.h | 14 +- .../route_config_update_receiver_impl.cc | 39 +---- .../route_config_update_receiver_impl.h | 21 +-- source/common/router/scoped_rds.cc | 46 ++--- source/common/router/scoped_rds.h | 22 +-- source/common/router/vhds.cc | 25 ++- source/common/router/vhds.h | 8 +- source/common/runtime/runtime_impl.cc | 23 +-- source/common/runtime/runtime_impl.h | 14 +- source/common/secret/sds_api.cc | 25 ++- source/common/secret/sds_api.h | 13 +- source/common/upstream/BUILD | 1 + source/common/upstream/cds_api_impl.cc | 40 ++--- source/common/upstream/cds_api_impl.h | 14 +- source/common/upstream/eds.cc | 40 ++--- source/common/upstream/eds.h | 13 +- source/server/lds_api.cc | 47 ++--- source/server/lds_api.h | 14 +- test/common/config/BUILD | 21 +++ .../config/config_provider_impl_test.cc | 63 ++++--- .../config/decoded_resource_impl_test.cc | 84 +++++++++ .../config/delta_subscription_impl_test.cc | 5 +- .../config/delta_subscription_state_test.cc | 2 +- .../config/delta_subscription_test_harness.h | 7 +- .../filesystem_subscription_impl_test.cc | 4 +- .../filesystem_subscription_test_harness.h | 11 +- test/common/config/grpc_mux_impl_test.cc | 141 +++++++-------- .../config/grpc_subscription_test_harness.h | 12 +- .../config/http_subscription_test_harness.h | 11 +- test/common/config/new_grpc_mux_impl_test.cc | 27 ++- .../opaque_resource_decoder_impl_test.cc | 106 +++++++++++ .../config/subscription_factory_impl_test.cc | 7 +- test/common/config/watch_map_test.cc | 148 ++++++++-------- test/common/router/rds_impl_test.cc | 61 +++---- test/common/router/scoped_rds_test.cc | 164 ++++++------------ test/common/router/vhds_test.cc | 36 +--- test/common/runtime/runtime_impl_test.cc | 41 ++--- test/common/secret/sds_api_test.cc | 83 ++++----- .../common/secret/secret_manager_impl_test.cc | 36 ++-- test/common/upstream/cds_api_impl_test.cc | 68 ++++---- test/common/upstream/eds_speed_test.cc | 4 +- test/common/upstream/eds_test.cc | 97 +++-------- test/integration/vhds_integration_test.cc | 4 +- test/mocks/config/mocks.cc | 30 ++-- test/mocks/config/mocks.h | 30 +++- test/server/lds_api_test.cc | 126 +++++--------- test/test_common/BUILD | 2 + test/test_common/utility.h | 97 +++++++++++ 75 files changed, 1442 insertions(+), 1067 deletions(-) create mode 100644 source/common/config/decoded_resource_impl.h create mode 100644 source/common/config/opaque_resource_decoder_impl.h create mode 100644 test/common/config/decoded_resource_impl_test.cc create mode 100644 test/common/config/opaque_resource_decoder_impl_test.cc diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 35729b9f7ea9..123026bb6087 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -113,12 +113,14 @@ class GrpcMux { * resources for type_url will result in callbacks. * @param callbacks the callbacks to be notified of configuration updates. These must be valid * until GrpcMuxWatch is destroyed. + * @param resource_decoder how incoming opaque resource objects are to be decoded. * @return GrpcMuxWatchPtr a handle to cancel the subscription with. E.g. when a cluster goes * away, its EDS updates should be cancelled by destroying the GrpcMuxWatchPtr. */ virtual GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) PURE; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) PURE; }; using GrpcMuxPtr = std::unique_ptr; diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index 5b041f2464e4..c05a6d567d70 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -25,10 +25,109 @@ enum class ConfigUpdateFailureReason { UpdateRejected }; +/** + * A wrapper for xDS resources that have been deserialized from the wire. + */ +class DecodedResource { +public: + virtual ~DecodedResource() = default; + + /** + * @return const std::string& resource name. + */ + virtual const std::string& name() const PURE; + + /** + * @return const std::vector& aliases() const PURE; + + /** + * @return const std::string& resource version. + */ + virtual const std::string& version() const PURE; + + /** + * @return const Protobuf::Message& resource message reference. If hasResource() is false, this + * will be the empty message. + */ + virtual const Protobuf::Message& resource() const PURE; + + /** + * @return bool does the xDS discovery response have a set resource payload? + */ + virtual bool hasResource() const PURE; +}; + +using DecodedResourcePtr = std::unique_ptr; +using DecodedResourceRef = std::reference_wrapper; + +class OpaqueResourceDecoder { +public: + virtual ~OpaqueResourceDecoder() = default; + + /** + * @param resource some opaque resource (ProtobufWkt::Any). + * @return ProtobufTypes::MessagePtr decoded protobuf message in the opaque resource, e.g. the + * RouteConfiguration for an Any containing envoy.config.route.v3.RouteConfiguration. + */ + virtual ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) PURE; + + /** + * @param resource some opaque resource (Protobuf::Message). + * @return std::String the resource name in a Protobuf::Message returned by decodeResource(), e.g. + * the route config name for a envoy.config.route.v3.RouteConfiguration message. + */ + virtual std::string resourceName(const Protobuf::Message& resource) PURE; +}; + +/** + * Subscription to DecodedResources. + */ class SubscriptionCallbacks { public: virtual ~SubscriptionCallbacks() = default; + /** + * Called when a state-of-the-world configuration update is received. (State-of-the-world is + * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC). + * @param resources vector of fetched resources corresponding to the configuration update. + * @param version_info supplies the version information as supplied by the xDS discovery response. + * @throw EnvoyException with reason if the configuration is rejected. Otherwise the configuration + * is accepted. Accepted configurations have their version_info reflected in subsequent + * requests. + */ + virtual void onConfigUpdate(const std::vector& resources, + const std::string& version_info) PURE; + + /** + * Called when a delta configuration update is received. + * @param added_resources resources newly added since the previous fetch. + * @param removed_resources names of resources that this fetch instructed to be removed. + * @param system_version_info aggregate response data "version", for debugging. + * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes + * are accepted. Accepted changes have their version_info reflected in subsequent requests. + */ + virtual void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) PURE; + + /** + * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate + * invokes an exception. + * @param reason supplies the update failure reason. + * @param e supplies any exception data on why the fetch failed. May be nullptr. + */ + virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE; +}; + +/** + * Invoked when raw config received from xDS wire. + */ +class UntypedConfigUpdateCallbacks { +public: + virtual ~UntypedConfigUpdateCallbacks() = default; + /** * Called when a state-of-the-world configuration update is received. (State-of-the-world is * everything other than delta gRPC - filesystem, HTTP, non-delta gRPC). @@ -61,12 +160,6 @@ class SubscriptionCallbacks { * @param e supplies any exception data on why the fetch failed. May be nullptr. */ virtual void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) PURE; - - /** - * Obtain the "name" of a v2 API resource in a google.protobuf.Any, e.g. the route config name for - * a RouteConfiguration, based on the underlying resource type. - */ - virtual std::string resourceName(const ProtobufWkt::Any& resource) PURE; }; /** diff --git a/include/envoy/config/subscription_factory.h b/include/envoy/config/subscription_factory.h index 0676c7368837..3d67d5526692 100644 --- a/include/envoy/config/subscription_factory.h +++ b/include/envoy/config/subscription_factory.h @@ -19,12 +19,15 @@ class SubscriptionFactory { * @param scope stats scope for any stats tracked by the subscription. * @param callbacks the callbacks needed by all Subscription objects, to deliver config updates. * The callbacks must not result in the deletion of the Subscription object. + * @param resource_decoder how incoming opaque resource objects are to be decoded. + * * @return SubscriptionPtr subscription object corresponding for config and type_url. */ virtual SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, Stats::Scope& scope, - SubscriptionCallbacks& callbacks) PURE; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) PURE; }; } // namespace Config diff --git a/include/envoy/router/BUILD b/include/envoy/router/BUILD index deb5de97578b..85b6058ed878 100644 --- a/include/envoy/router/BUILD +++ b/include/envoy/router/BUILD @@ -13,6 +13,7 @@ envoy_cc_library( hdrs = ["rds.h"], deps = [ ":router_interface", + "//include/envoy/http:filter_interface", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/router/route_config_update_receiver.h b/include/envoy/router/route_config_update_receiver.h index 717f3c017758..8f14d26ceec8 100644 --- a/include/envoy/router/route_config_update_receiver.h +++ b/include/envoy/router/route_config_update_receiver.h @@ -31,18 +31,21 @@ class RouteConfigUpdateReceiver { virtual bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) PURE; + using VirtualHostRefVector = + std::vector>; + /** * Called on updates via VHDS. - * @param added_resources supplies Resources (each containing a VirtualHost) that have been - * added. + * @param added_vhosts supplies VirtualHosts that have been added. + * @param added_resource_ids set of resources IDs (names + aliases) added. * @param removed_resources supplies names of VirtualHosts that have been removed. * @param version_info supplies RouteConfiguration version. * @return bool whether RouteConfiguration has been updated. */ - virtual bool onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) PURE; + virtual bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts, + const std::set& added_resource_ids, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) PURE; /** * @return std::string& the name of RouteConfiguration. diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 4a57dc587013..50b2cc615c35 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -60,6 +60,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "decoded_resource_lib", + hdrs = ["decoded_resource_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//source/common/protobuf:utility_lib", + ], +) + envoy_cc_library( name = "delta_subscription_state_lib", srcs = ["delta_subscription_state.cc"], @@ -68,6 +77,7 @@ envoy_cc_library( ":api_version_lib", ":pausable_ack_queue_lib", ":utility_lib", + ":watch_map_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", "//source/common/common:assert_lib", @@ -86,6 +96,7 @@ envoy_cc_library( srcs = ["filesystem_subscription_impl.cc"], hdrs = ["filesystem_subscription_impl.h"], deps = [ + ":decoded_resource_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", "//include/envoy/filesystem:filesystem_interface", @@ -122,6 +133,7 @@ envoy_cc_library( hdrs = ["grpc_mux_impl.h"], deps = [ ":api_version_lib", + ":decoded_resource_lib", ":grpc_stream_lib", ":utility_lib", "//include/envoy/config:grpc_mux_interface", @@ -131,6 +143,7 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/memory:utils_lib", "//source/common/protobuf", + "@com_google_absl//absl/container:btree", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], @@ -175,6 +188,7 @@ envoy_cc_library( ], deps = [ ":api_version_lib", + ":decoded_resource_lib", ":version_converter_lib", "//include/envoy/config:subscription_interface", "//include/envoy/event:dispatcher_interface", @@ -209,6 +223,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "opaque_resource_decoder_lib", + hdrs = ["opaque_resource_decoder_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//source/common/protobuf:utility_lib", + ], +) + envoy_cc_library( name = "pausable_ack_queue_lib", srcs = ["pausable_ack_queue.cc"], @@ -367,6 +390,7 @@ envoy_cc_library( srcs = ["watch_map.cc"], hdrs = ["watch_map.h"], deps = [ + ":decoded_resource_lib", "//include/envoy/config:subscription_interface", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", @@ -379,6 +403,7 @@ envoy_cc_library( name = "subscription_base_interface", hdrs = ["subscription_base.h"], deps = [ + ":opaque_resource_decoder_lib", ":resource_name_lib", "//include/envoy/config:subscription_interface", ], diff --git a/source/common/config/decoded_resource_impl.h b/source/common/config/decoded_resource_impl.h new file mode 100644 index 000000000000..669878020302 --- /dev/null +++ b/source/common/config/decoded_resource_impl.h @@ -0,0 +1,77 @@ +#pragma once + +#include "envoy/config/subscription.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +namespace { + +std::vector +repeatedPtrFieldToVector(const Protobuf::RepeatedPtrField& xs) { + std::vector ys; + std::copy(xs.begin(), xs.end(), std::back_inserter(ys)); + return ys; +} + +} // namespace + +class DecodedResourceImpl : public DecodedResource { +public: + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, const ProtobufWkt::Any& resource, + const std::string& version) + : DecodedResourceImpl(resource_decoder, {}, Protobuf::RepeatedPtrField(), + resource, true, version) {} + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, + const envoy::service::discovery::v3::Resource& resource) + : DecodedResourceImpl(resource_decoder, resource.name(), resource.aliases(), + resource.resource(), resource.has_resource(), resource.version()) {} + DecodedResourceImpl(ProtobufTypes::MessagePtr resource, const std::string& name, + const std::vector& aliases, const std::string& version) + : resource_(std::move(resource)), has_resource_(true), name_(name), aliases_(aliases), + version_(version) {} + + // Config::DecodedResource + const std::string& name() const override { return name_; } + const std::vector& aliases() const override { return aliases_; } + const std::string& version() const override { return version_; }; + const Protobuf::Message& resource() const override { return *resource_; }; + bool hasResource() const override { return has_resource_; } + +private: + DecodedResourceImpl(OpaqueResourceDecoder& resource_decoder, absl::optional name, + const Protobuf::RepeatedPtrField& aliases, + const ProtobufWkt::Any& resource, bool has_resource, + const std::string& version) + : resource_(resource_decoder.decodeResource(resource)), has_resource_(has_resource), + name_(name ? *name : resource_decoder.resourceName(*resource_)), + aliases_(repeatedPtrFieldToVector(aliases)), version_(version) {} + + const ProtobufTypes::MessagePtr resource_; + const bool has_resource_; + const std::string name_; + const std::vector aliases_; + const std::string version_; +}; + +using DecodedResourceImplPtr = std::unique_ptr; + +struct DecodedResourcesWrapper { + DecodedResourcesWrapper() = default; + DecodedResourcesWrapper(OpaqueResourceDecoder& resource_decoder, + const Protobuf::RepeatedPtrField& resources, + const std::string& version) { + for (const auto& resource : resources) { + owned_resources_.emplace_back(new DecodedResourceImpl(resource_decoder, resource, version)); + refvec_.emplace_back(*owned_resources_.back()); + } + } + + std::vector owned_resources_; + std::vector refvec_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index 5db0ce6f0bca..2763fdfd9dff 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -10,9 +10,9 @@ namespace Envoy { namespace Config { DeltaSubscriptionState::DeltaSubscriptionState(std::string type_url, - SubscriptionCallbacks& callbacks, + UntypedConfigUpdateCallbacks& watch_map, const LocalInfo::LocalInfo& local_info) - : type_url_(std::move(type_url)), callbacks_(callbacks), local_info_(local_info) {} + : type_url_(std::move(type_url)), watch_map_(watch_map), local_info_(local_info) {} void DeltaSubscriptionState::updateSubscriptionInterest(const std::set& cur_added, const std::set& cur_removed) { @@ -81,7 +81,7 @@ void DeltaSubscriptionState::handleGoodResponse( fmt::format("duplicate name {} found in the union of added+removed resources", name)); } } - callbacks_.onConfigUpdate(message.resources(), message.removed_resources(), + watch_map_.onConfigUpdate(message.resources(), message.removed_resources(), message.system_version_info()); for (const auto& resource : message.resources()) { setResourceVersion(resource.name(), resource.version()); @@ -108,11 +108,11 @@ void DeltaSubscriptionState::handleBadResponse(const EnvoyException& e, UpdateAc ack.error_detail_.set_code(Grpc::Status::WellKnownGrpcStatus::Internal); ack.error_detail_.set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); } void DeltaSubscriptionState::handleEstablishmentFailure() { - callbacks_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, + watch_map_.onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, nullptr); } diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h index 166b29608982..00693a1abe2c 100644 --- a/source/common/config/delta_subscription_state.h +++ b/source/common/config/delta_subscription_state.h @@ -11,6 +11,7 @@ #include "common/common/logger.h" #include "common/config/api_version.h" #include "common/config/pausable_ack_queue.h" +#include "common/config/watch_map.h" namespace Envoy { namespace Config { @@ -21,7 +22,7 @@ namespace Config { // being multiplexed together by ADS. class DeltaSubscriptionState : public Logger::Loggable { public: - DeltaSubscriptionState(std::string type_url, SubscriptionCallbacks& callbacks, + DeltaSubscriptionState(std::string type_url, UntypedConfigUpdateCallbacks& watch_map, const LocalInfo::LocalInfo& local_info); // Update which resources we're interested in subscribing to. @@ -86,8 +87,7 @@ class DeltaSubscriptionState : public Logger::Loggable { std::set resource_names_; const std::string type_url_; - // callbacks_ is expected to be a WatchMap. - SubscriptionCallbacks& callbacks_; + UntypedConfigUpdateCallbacks& watch_map_; const LocalInfo::LocalInfo& local_info_; std::chrono::milliseconds init_fetch_timeout_; diff --git a/source/common/config/filesystem_subscription_impl.cc b/source/common/config/filesystem_subscription_impl.cc index 8a42d1b42884..1373dc34c92e 100644 --- a/source/common/config/filesystem_subscription_impl.cc +++ b/source/common/config/filesystem_subscription_impl.cc @@ -4,6 +4,7 @@ #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" @@ -13,9 +14,11 @@ namespace Config { FilesystemSubscriptionImpl::FilesystemSubscriptionImpl( Event::Dispatcher& dispatcher, absl::string_view path, SubscriptionCallbacks& callbacks, - SubscriptionStats stats, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : path_(path), watcher_(dispatcher.createFilesystemWatcher()), callbacks_(callbacks), - stats_(stats), api_(api), validation_visitor_(validation_visitor) { + resource_decoder_(resource_decoder), stats_(stats), api_(api), + validation_visitor_(validation_visitor) { watcher_->addWatch(path_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t) { if (started_) { refresh(); @@ -51,7 +54,9 @@ void FilesystemSubscriptionImpl::refresh() { try { MessageUtil::loadFromFile(path_, message, validation_visitor_, api_); config_update_available = true; - callbacks_.onConfigUpdate(message.resources(), message.version_info()); + const auto decoded_resources = + DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); + callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(api_.timeSource())); stats_.version_.set(HashUtil::xxHash64(message.version_info())); stats_.version_text_.set(message.version_info()); diff --git a/source/common/config/filesystem_subscription_impl.h b/source/common/config/filesystem_subscription_impl.h index 39c86f1654da..75dd5f25b1e4 100644 --- a/source/common/config/filesystem_subscription_impl.h +++ b/source/common/config/filesystem_subscription_impl.h @@ -20,7 +20,8 @@ class FilesystemSubscriptionImpl : public Config::Subscription, Logger::Loggable { public: FilesystemSubscriptionImpl(Event::Dispatcher& dispatcher, absl::string_view path, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Config::Subscription @@ -37,6 +38,7 @@ class FilesystemSubscriptionImpl : public Config::Subscription, const std::string path_; std::unique_ptr watcher_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; Api::Api& api_; ProtobufMessage::ValidationVisitor& validation_visitor_; diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 6da8cf9e54b7..87e4a5b86b71 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -4,11 +4,14 @@ #include "envoy/service/discovery/v3/discovery.pb.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/memory/utils.h" #include "common/protobuf/protobuf.h" +#include "absl/container/btree_map.h" + namespace Envoy { namespace Config { @@ -70,8 +73,10 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { GrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) { - auto watch = std::make_unique(resources, callbacks, type_url, *this); + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { + auto watch = + std::make_unique(resources, callbacks, resource_decoder, type_url, *this); ENVOY_LOG(debug, "gRPC mux addWatch for " + type_url); // Lazily kick off the requests based on first subscription. This has the @@ -181,31 +186,37 @@ void GrpcMuxImpl::onDiscoveryResponse( // To avoid O(n^2) explosion (e.g. when we have 1000s of EDS watches), we // build a map here from resource name to resource and then walk watches_. // We have to walk all watches (and need an efficient map as a result) to - // ensure we deliver empty config updates when a resource is dropped. - std::unordered_map resources; - SubscriptionCallbacks& callbacks = api_state_[type_url].watches_.front()->callbacks_; + // ensure we deliver empty config updates when a resource is dropped. We make the map ordered + // for test determinism. + std::vector resources; + absl::btree_map resource_ref_map; + std::vector all_resource_refs; + OpaqueResourceDecoder& resource_decoder = + api_state_[type_url].watches_.front()->resource_decoder_; for (const auto& resource : message->resources()) { if (type_url != resource.type_url()) { throw EnvoyException( fmt::format("{} does not match the message-wide type URL {} in DiscoveryResponse {}", resource.type_url(), type_url, message->DebugString())); } - const std::string resource_name = callbacks.resourceName(resource); - resources.emplace(resource_name, resource); + resources.emplace_back( + new DecodedResourceImpl(resource_decoder, resource, message->version_info())); + all_resource_refs.emplace_back(*resources.back()); + resource_ref_map.emplace(resources.back()->name(), *resources.back()); } for (auto watch : api_state_[type_url].watches_) { // onConfigUpdate should be called in all cases for single watch xDS (Cluster and // Listener) even if the message does not have resources so that update_empty stat // is properly incremented and state-of-the-world semantics are maintained. if (watch->resources_.empty()) { - watch->callbacks_.onConfigUpdate(message->resources(), message->version_info()); + watch->callbacks_.onConfigUpdate(all_resource_refs, message->version_info()); continue; } - Protobuf::RepeatedPtrField found_resources; + std::vector found_resources; for (const auto& watched_resource_name : watch->resources_) { - auto it = resources.find(watched_resource_name); - if (it != resources.end()) { - found_resources.Add()->MergeFrom(it->second); + auto it = resource_ref_map.find(watched_resource_name); + if (it != resource_ref_map.end()) { + found_resources.emplace_back(it->second); } } // onConfigUpdate should be called only on watches(clusters/routes) that have diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 00120fed44f0..bb1d87e97fcd 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -46,7 +46,8 @@ class GrpcMuxImpl : public GrpcMux, bool paused(const std::vector type_urls) const override; GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) override; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) override; void handleDiscoveryResponse( std::unique_ptr&& message); @@ -73,9 +74,10 @@ class GrpcMuxImpl : public GrpcMux, struct GrpcMuxWatchImpl : public GrpcMuxWatch { GrpcMuxWatchImpl(const std::set& resources, SubscriptionCallbacks& callbacks, - const std::string& type_url, GrpcMuxImpl& parent) - : resources_(resources), callbacks_(callbacks), type_url_(type_url), parent_(parent), - watches_(parent.api_state_[type_url].watches_) { + OpaqueResourceDecoder& resource_decoder, const std::string& type_url, + GrpcMuxImpl& parent) + : resources_(resources), callbacks_(callbacks), resource_decoder_(resource_decoder), + type_url_(type_url), parent_(parent), watches_(parent.api_state_[type_url].watches_) { watches_.emplace(watches_.begin(), this); } @@ -99,6 +101,7 @@ class GrpcMuxImpl : public GrpcMux, std::set resources_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; const std::string type_url_; GrpcMuxImpl& parent_; @@ -151,8 +154,8 @@ class NullGrpcMuxImpl : public GrpcMux, bool paused(const std::string&) const override { return false; } bool paused(const std::vector) const override { return false; } - GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, - SubscriptionCallbacks&) override { + GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, SubscriptionCallbacks&, + OpaqueResourceDecoder&) override { throw EnvoyException("ADS must be configured to support an ADS config source"); } diff --git a/source/common/config/grpc_subscription_impl.cc b/source/common/config/grpc_subscription_impl.cc index 83ddcb5bccad..2c2708fcf58d 100644 --- a/source/common/config/grpc_subscription_impl.cc +++ b/source/common/config/grpc_subscription_impl.cc @@ -10,15 +10,13 @@ namespace Envoy { namespace Config { -GrpcSubscriptionImpl::GrpcSubscriptionImpl(GrpcMuxSharedPtr grpc_mux, - SubscriptionCallbacks& callbacks, - SubscriptionStats stats, absl::string_view type_url, - Event::Dispatcher& dispatcher, - std::chrono::milliseconds init_fetch_timeout, - bool is_aggregated) - : grpc_mux_(grpc_mux), callbacks_(callbacks), stats_(stats), type_url_(type_url), - dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout), - is_aggregated_(is_aggregated) {} +GrpcSubscriptionImpl::GrpcSubscriptionImpl( + GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, absl::string_view type_url, + Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout, bool is_aggregated) + : grpc_mux_(grpc_mux), callbacks_(callbacks), resource_decoder_(resource_decoder), + stats_(stats), type_url_(type_url), dispatcher_(dispatcher), + init_fetch_timeout_(init_fetch_timeout), is_aggregated_(is_aggregated) {} // Config::Subscription void GrpcSubscriptionImpl::start(const std::set& resources) { @@ -30,7 +28,7 @@ void GrpcSubscriptionImpl::start(const std::set& resources) { init_fetch_timeout_timer_->enableTimer(init_fetch_timeout_); } - watch_ = grpc_mux_->addWatch(type_url_, resources, *this); + watch_ = grpc_mux_->addWatch(type_url_, resources, *this, resource_decoder_); // The attempt stat here is maintained for the purposes of having consistency between ADS and // gRPC/filesystem/REST Subscriptions. Since ADS is push based and muxed, the notion of an @@ -51,9 +49,8 @@ void GrpcSubscriptionImpl::updateResourceInterest( } // Config::SubscriptionCallbacks -void GrpcSubscriptionImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, - const std::string& version_info) { +void GrpcSubscriptionImpl::onConfigUpdate(const std::vector& resources, + const std::string& version_info) { disableInitFetchTimeoutTimer(); // TODO(mattklein123): In the future if we start tracking per-resource versions, we need to // supply those versions to onConfigUpdate() along with the xDS response ("system") @@ -70,7 +67,7 @@ void GrpcSubscriptionImpl::onConfigUpdate( } void GrpcSubscriptionImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { disableInitFetchTimeoutTimer(); @@ -108,10 +105,6 @@ void GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason stats_.update_attempt_.inc(); } -std::string GrpcSubscriptionImpl::resourceName(const ProtobufWkt::Any& resource) { - return callbacks_.resourceName(resource); -} - void GrpcSubscriptionImpl::pause() { grpc_mux_->pause(type_url_); } void GrpcSubscriptionImpl::resume() { grpc_mux_->resume(type_url_); } diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index ffc179f15bf8..a9a9f7b77f70 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -17,27 +17,22 @@ class GrpcSubscriptionImpl : public Subscription, Logger::Loggable { public: GrpcSubscriptionImpl(GrpcMuxSharedPtr grpc_mux, SubscriptionCallbacks& callbacks, - SubscriptionStats stats, absl::string_view type_url, - Event::Dispatcher& dispatcher, std::chrono::milliseconds init_fetch_timeout, - bool is_aggregated); + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + absl::string_view type_url, Event::Dispatcher& dispatcher, + std::chrono::milliseconds init_fetch_timeout, bool is_aggregated); // Config::Subscription void start(const std::set& resource_names) override; void updateResourceInterest(const std::set& update_to_these_names) override; // Config::SubscriptionCallbacks (all pass through to callbacks_!) - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; - + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override; - GrpcMuxSharedPtr grpcMux() { return grpc_mux_; } void pause(); @@ -48,6 +43,7 @@ class GrpcSubscriptionImpl : public Subscription, GrpcMuxSharedPtr grpc_mux_; SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; const std::string type_url_; GrpcMuxWatchPtr watch_; diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index 223f09d6ea3f..fc5bf4dfb331 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -8,6 +8,7 @@ #include "common/common/assert.h" #include "common/common/macros.h" #include "common/common/utility.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/http/headers.h" @@ -25,14 +26,14 @@ HttpSubscriptionImpl::HttpSubscriptionImpl( Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, - std::chrono::milliseconds init_fetch_timeout, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor) : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval, request_timeout), - callbacks_(callbacks), stats_(stats), dispatcher_(dispatcher), - init_fetch_timeout_(init_fetch_timeout), validation_visitor_(validation_visitor), - transport_api_version_(transport_api_version) { + callbacks_(callbacks), resource_decoder_(resource_decoder), stats_(stats), + dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout), + validation_visitor_(validation_visitor), transport_api_version_(transport_api_version) { request_.mutable_node()->CopyFrom(local_info.node()); request_.set_type_url(std::string(type_url)); ASSERT(service_method.options().HasExtension(google::api::http)); @@ -85,7 +86,9 @@ void HttpSubscriptionImpl::parseResponse(const Http::ResponseMessage& response) return; } try { - callbacks_.onConfigUpdate(message.resources(), message.version_info()); + const auto decoded_resources = + DecodedResourcesWrapper(resource_decoder_, message.resources(), message.version_info()); + callbacks_.onConfigUpdate(decoded_resources.refvec_, message.version_info()); request_.set_version_info(message.version_info()); stats_.update_time_.set(DateUtil::nowToMilliseconds(dispatcher_.timeSource())); stats_.version_.set(HashUtil::xxHash64(request_.version_info())); diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index b5e8b33e94ea..9f2c01bda9f6 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -28,8 +28,8 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, - SubscriptionCallbacks& callbacks, SubscriptionStats stats, - std::chrono::milliseconds init_fetch_timeout, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, + SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor); // Config::Subscription @@ -50,6 +50,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, Protobuf::RepeatedPtrField resources_; envoy::service::discovery::v3::DiscoveryRequest request_; Config::SubscriptionCallbacks& callbacks_; + Config::OpaqueResourceDecoder& resource_decoder_; SubscriptionStats stats_; Event::Dispatcher& dispatcher_; std::chrono::milliseconds init_fetch_timeout_; diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index ff7d2568a921..81d00f4ae5cd 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -124,15 +124,16 @@ void NewGrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } GrpcMuxWatchPtr NewGrpcMuxImpl::addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) { + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { auto entry = subscriptions_.find(type_url); if (entry == subscriptions_.end()) { // We don't yet have a subscription for type_url! Make one! addSubscription(type_url); - return addWatch(type_url, resources, callbacks); + return addWatch(type_url, resources, callbacks, resource_decoder); } - Watch* watch = entry->second->watch_map_.addWatch(callbacks); + Watch* watch = entry->second->watch_map_.addWatch(callbacks, resource_decoder); // updateWatch() queues a discovery request if any of 'resources' are not yet subscribed. updateWatch(type_url, watch, resources); return std::make_unique(type_url, watch, *this); diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 73478991b17f..4b948339b735 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -35,7 +35,8 @@ class NewGrpcMuxImpl const LocalInfo::LocalInfo& local_info); GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks) override; + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) override; void pause(const std::string& type_url) override; void pause(const std::vector type_urls) override; diff --git a/source/common/config/opaque_resource_decoder_impl.h b/source/common/config/opaque_resource_decoder_impl.h new file mode 100644 index 000000000000..ef066101952b --- /dev/null +++ b/source/common/config/opaque_resource_decoder_impl.h @@ -0,0 +1,37 @@ +#pragma once + +#include "envoy/config/subscription.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +template class OpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoder { +public: + OpaqueResourceDecoderImpl(ProtobufMessage::ValidationVisitor& validation_visitor, + absl::string_view name_field) + : validation_visitor_(validation_visitor), name_field_(name_field) {} + + // Config::OpaqueResourceDecoder + ProtobufTypes::MessagePtr decodeResource(const ProtobufWkt::Any& resource) override { + auto typed_message = std::make_unique(); + // If the Any is a synthetic empty message (e.g. because the resource field was not set in + // Resource, this might be empty, so we shouldn't decode. + if (!resource.type_url().empty()) { + MessageUtil::anyConvertAndValidate(resource, *typed_message, validation_visitor_); + } + return typed_message; + } + + std::string resourceName(const Protobuf::Message& resource) override { + return MessageUtil::getStringField(resource, name_field_); + } + +private: + ProtobufMessage::ValidationVisitor& validation_visitor_; + const std::string name_field_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/subscription_base.h b/source/common/config/subscription_base.h index dd5686f2ffaf..765e28934ebc 100644 --- a/source/common/config/subscription_base.h +++ b/source/common/config/subscription_base.h @@ -2,6 +2,7 @@ #include "envoy/config/subscription.h" +#include "common/config/opaque_resource_decoder_impl.h" #include "common/config/resource_name.h" namespace Envoy { @@ -9,16 +10,21 @@ namespace Config { template struct SubscriptionBase : public Config::SubscriptionCallbacks { public: - SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version) - : api_version_(api_version) {} + SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version, + ProtobufMessage::ValidationVisitor& validation_visitor, + absl::string_view name_field) + : resource_decoder_(validation_visitor, name_field), api_version_(api_version) {} std::string getResourceName() const { return Envoy::Config::getResourceName(api_version_); } +protected: + Config::OpaqueResourceDecoderImpl resource_decoder_; + private: const envoy::config::core::v3::ApiVersion api_version_; }; } // namespace Config -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 342830ebc3d8..fee65c2a1d4e 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -23,7 +23,8 @@ SubscriptionFactoryImpl::SubscriptionFactoryImpl( SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, - Stats::Scope& scope, SubscriptionCallbacks& callbacks) { + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { Config::Utility::checkLocalInfo(type_url, local_info_); std::unique_ptr result; SubscriptionStats stats = Utility::generateStats(scope); @@ -41,7 +42,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kPath: { Utility::checkFilesystemSubscriptionBackingPath(config.path(), api_); return std::make_unique( - dispatcher_, config.path(), callbacks, stats, validation_visitor_, api_); + dispatcher_, config.path(), callbacks, resource_decoder, stats, validation_visitor_, api_); } case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kApiConfigSource: { const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); @@ -59,7 +60,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, random_, Utility::apiConfigSourceRefreshDelay(api_config_source), Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url), type_url, - api_config_source.transport_api_version(), callbacks, stats, + api_config_source.transport_api_version(), callbacks, resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), validation_visitor_); case envoy::config::core::v3::ApiConfigSource::GRPC: return std::make_unique( @@ -71,7 +72,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( dispatcher_, sotwGrpcMethod(type_url), api_config_source.transport_api_version(), random_, scope, Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), - callbacks, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), + callbacks, resource_decoder, stats, type_url, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false); case envoy::config::core::v3::ApiConfigSource::DELTA_GRPC: { return std::make_unique( @@ -81,8 +83,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( ->create(), dispatcher_, deltaGrpcMethod(type_url), api_config_source.transport_api_version(), random_, scope, Utility::parseRateLimitSettings(api_config_source), local_info_), - callbacks, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), - false); + callbacks, resource_decoder, stats, type_url, dispatcher_, + Utility::configSourceInitialFetchTimeout(config), false); } default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -90,7 +92,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( } case envoy::config::core::v3::ConfigSource::ConfigSourceSpecifierCase::kAds: { return std::make_unique( - cm_.adsMux(), callbacks, stats, type_url, dispatcher_, + cm_.adsMux(), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), true); } default: diff --git a/source/common/config/subscription_factory_impl.h b/source/common/config/subscription_factory_impl.h index 28e459ad20e2..0eadbc61aa76 100644 --- a/source/common/config/subscription_factory_impl.h +++ b/source/common/config/subscription_factory_impl.h @@ -22,7 +22,8 @@ class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable(callbacks); +Watch* WatchMap::addWatch(SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder) { + auto watch = std::make_unique(callbacks, resource_decoder); Watch* watch_ptr = watch.get(); wildcard_watches_.insert(watch_ptr); watches_.insert(std::move(watch)); @@ -58,17 +61,19 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField if (watches_.empty()) { return; } - SubscriptionCallbacks& name_getter = (*watches_.begin())->callbacks_; // Build a map from watches, to the set of updated resources that each watch cares about. Each // entry in the map is then a nice little bundle that can be fed directly into the individual // onConfigUpdate()s. - absl::flat_hash_map> per_watch_updates; + std::vector decoded_resources; + absl::flat_hash_map> per_watch_updates; for (const auto& r : resources) { + decoded_resources.emplace_back( + new DecodedResourceImpl((*watches_.begin())->resource_decoder_, r, version_info)); const absl::flat_hash_set& interested_in_r = - watchesInterestedIn(name_getter.resourceName(r)); + watchesInterestedIn(decoded_resources.back()->name()); for (const auto& interested_watch : interested_in_r) { - per_watch_updates[interested_watch].Add()->CopyFrom(r); + per_watch_updates[interested_watch].emplace_back(*decoded_resources.back()); } } @@ -128,12 +133,19 @@ void WatchMap::onConfigUpdate( // Build a pair of maps: from watches, to the set of resources {added,removed} that each watch // cares about. Each entry in the map-pair is then a nice little bundle that can be fed directly // into the individual onConfigUpdate()s. - absl::flat_hash_map> - per_watch_added; + std::vector decoded_resources; + absl::flat_hash_map> per_watch_added; for (const auto& r : added_resources) { const absl::flat_hash_set& interested_in_r = watchesInterestedIn(r.name()); + // If there are no watches, then we don't need to decode. If there are watches, they should all + // be for the same resource type, so we can just use the callbacks of the first watch to decode. + if (interested_in_r.empty()) { + continue; + } + decoded_resources.emplace_back( + new DecodedResourceImpl((*interested_in_r.begin())->resource_decoder_, r)); for (const auto& interested_watch : interested_in_r) { - per_watch_added[interested_watch].Add()->CopyFrom(r); + per_watch_added[interested_watch].emplace_back(*decoded_resources.back()); } } absl::flat_hash_map> per_watch_removed; diff --git a/source/common/config/watch_map.h b/source/common/config/watch_map.h index 36bcf23f88ea..e1b1236b5081 100644 --- a/source/common/config/watch_map.h +++ b/source/common/config/watch_map.h @@ -24,8 +24,10 @@ struct AddedRemoved { }; struct Watch { - Watch(SubscriptionCallbacks& callbacks) : callbacks_(callbacks) {} + Watch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder) + : callbacks_(callbacks), resource_decoder_(resource_decoder) {} SubscriptionCallbacks& callbacks_; + OpaqueResourceDecoder& resource_decoder_; std::set resource_names_; // must be sorted set, for set_difference. // Needed only for state-of-the-world. // Whether the most recent update contained any resources this watch cares about. @@ -56,14 +58,14 @@ struct Watch { // update the subscription accordingly. // // A WatchMap is assumed to be dedicated to a single type_url type of resource (EDS, CDS, etc). -class WatchMap : public SubscriptionCallbacks, public Logger::Loggable { +class WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable { public: WatchMap() = default; // Adds 'callbacks' to the WatchMap, with every possible resource being watched. // (Use updateWatchInterest() to narrow it down to some specific names). // Returns the newly added watch, to be used with updateWatchInterest and removeWatch. - Watch* addWatch(SubscriptionCallbacks& callbacks); + Watch* addWatch(SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder); // Updates the set of resource names that the given watch should watch. // Returns any resource name additions/removals that are unique across all watches. That is: @@ -81,18 +83,15 @@ class WatchMap : public SubscriptionCallbacks, public Logger::Loggable& resources, const std::string& version_info) override; void onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) override; - void onConfigUpdateFailed(ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - WatchMap(const WatchMap&) = delete; WatchMap& operator=(const WatchMap&) = delete; diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 871f6b219cef..38c8560eb057 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -301,10 +301,15 @@ class MessageUtil { * * @return MessageType the typed message inside the Any. */ + template + static inline void anyConvert(const ProtobufWkt::Any& message, MessageType& typed_message) { + unpackTo(message, typed_message); + }; + template static inline MessageType anyConvert(const ProtobufWkt::Any& message) { MessageType typed_message; - unpackTo(message, typed_message); + anyConvert(message, typed_message); return typed_message; }; @@ -315,15 +320,39 @@ class MessageUtil { * @return MessageType the typed message inside the Any. * @throw ProtoValidationException if the message does not satisfy its type constraints. */ + template + static inline void anyConvertAndValidate(const ProtobufWkt::Any& message, + MessageType& typed_message, + ProtobufMessage::ValidationVisitor& validation_visitor) { + anyConvert(message, typed_message); + validate(typed_message, validation_visitor); + }; + template static inline MessageType anyConvertAndValidate(const ProtobufWkt::Any& message, ProtobufMessage::ValidationVisitor& validation_visitor) { - MessageType typed_message = anyConvert(message); - validate(typed_message, validation_visitor); + MessageType typed_message; + anyConvertAndValidate(message, typed_message, validation_visitor); return typed_message; }; + /** + * Obtain a string field from a protobuf message dynamically. + * + * @param message message to extract from. + * @param field_name field name. + * + * @return std::string with field value. + */ + static inline std::string getStringField(const Protobuf::Message& message, + const std::string& field_name) { + const Protobuf::Descriptor* descriptor = message.GetDescriptor(); + const Protobuf::FieldDescriptor* name_field = descriptor->FindFieldByName(field_name); + const Protobuf::Reflection* reflection = message.GetReflection(); + return reflection->GetString(message, name_field); + } + /** * Convert between two protobufs via a JSON round-trip. This is used to translate arbitrary * messages to/from google.protobuf.Struct. diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 8a041ce0f12c..953da8a9cdc7 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -8,8 +8,6 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/api/v2/route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/route/v3/route.pb.h" -#include "envoy/config/route/v3/route.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -69,9 +67,9 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const std::string& stat_prefix, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) : Envoy::Config::SubscriptionBase( - rds.config_source().resource_api_version()), + rds.config_source().resource_api_version(), + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), route_config_name_(rds.route_config_name()), factory_context_(factory_context), - validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), parent_init_target_(fmt::format("RdsRouteConfigSubscription init {}", route_config_name_), [this]() { local_init_manager_.initialize(local_init_watcher_); }), local_init_watcher_(fmt::format("RDS local-init-watcher {}", rds.route_config_name()), @@ -87,10 +85,11 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( - rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this); + rds.config_source(), Grpc::Common::typeUrl(resource_name), *scope_, *this, + resource_decoder_); local_init_manager_.add(local_init_target_); config_update_info_ = - std::make_unique(factory_context.timeSource(), validator_); + std::make_unique(factory_context.timeSource()); } RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { @@ -105,14 +104,13 @@ RdsRouteConfigSubscription::~RdsRouteConfigSubscription() { } void RdsRouteConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, + const std::vector& resources, const std::string& version_info) { if (!validateUpdateSize(resources.size())) { return; } - auto route_config = - MessageUtil::anyConvertAndValidate(resources[0], - validator_); + const auto& route_config = dynamic_cast( + resources[0].get().resource()); if (route_config.name() != route_config_name_) { throw EnvoyException(fmt::format("Unexpected RDS configuration (expecting {}): {}", route_config_name_, route_config.name())); @@ -178,7 +176,7 @@ void RdsRouteConfigSubscription::maybeCreateInitManager( } void RdsRouteConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string&) { if (!removed_resources.empty()) { // TODO(#2500) when on-demand resource loading is supported, an RDS removal may make sense @@ -189,9 +187,7 @@ void RdsRouteConfigSubscription::onConfigUpdate( removed_resources[0]); } if (!added_resources.empty()) { - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = added_resources[0].resource(); - onConfigUpdate(unwrapped_resource, added_resources[0].version()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } } diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 5481e3398064..c547d1b5a8f7 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -10,6 +10,7 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route.pb.h" +#include "envoy/config/route/v3/route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/codes.h" @@ -128,17 +129,13 @@ class RdsRouteConfigSubscription private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } Common::CallbackHandle* addUpdateCallback(std::function callback) { return update_callback_manager_.add(callback); @@ -155,7 +152,6 @@ class RdsRouteConfigSubscription std::unique_ptr subscription_; const std::string route_config_name_; Server::Configuration::ServerFactoryContext& factory_context_; - ProtobufMessage::ValidationVisitor& validator_; // Init target used to notify the parent init manager that the subscription [and its sub resource] // is ready. diff --git a/source/common/router/route_config_update_receiver_impl.cc b/source/common/router/route_config_update_receiver_impl.cc index bdd3a1e188dc..144ee75d977b 100644 --- a/source/common/router/route_config_update_receiver_impl.cc +++ b/source/common/router/route_config_update_receiver_impl.cc @@ -3,8 +3,6 @@ #include #include "envoy/config/route/v3/route.pb.h" -#include "envoy/config/route/v3/route_components.pb.h" -#include "envoy/config/route/v3/route_components.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" @@ -40,26 +38,16 @@ void RouteConfigUpdateReceiverImpl::onUpdateCommon( } bool RouteConfigUpdateReceiverImpl::onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const VirtualHostRefVector& added_vhosts, const std::set& added_resource_ids, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { - collectResourceIdsInUpdate(added_resources); + resource_ids_in_last_update_ = added_resource_ids; const bool removed = removeVhosts(vhds_virtual_hosts_, removed_resources); - const bool updated = updateVhosts(vhds_virtual_hosts_, added_resources); + const bool updated = updateVhosts(vhds_virtual_hosts_, added_vhosts); onUpdateCommon(route_config_proto_, version_info); return removed || updated || !resource_ids_in_last_update_.empty(); } -void RouteConfigUpdateReceiverImpl::collectResourceIdsInUpdate( - const Protobuf::RepeatedPtrField& added_resources) { - resource_ids_in_last_update_.clear(); - for (const auto& resource : added_resources) { - resource_ids_in_last_update_.emplace(resource.name()); - std::copy(resource.aliases().begin(), resource.aliases().end(), - std::inserter(resource_ids_in_last_update_, resource_ids_in_last_update_.end())); - } -} - void RouteConfigUpdateReceiverImpl::initializeRdsVhosts( const envoy::config::route::v3::RouteConfiguration& route_configuration) { rds_virtual_hosts_.clear(); @@ -84,22 +72,14 @@ bool RouteConfigUpdateReceiverImpl::removeVhosts( bool RouteConfigUpdateReceiverImpl::updateVhosts( std::map& vhosts, - const Protobuf::RepeatedPtrField& added_resources) { + const VirtualHostRefVector& added_vhosts) { bool vhosts_added = false; - for (const auto& resource : added_resources) { - // the management server returns empty resources (they contain no virtual hosts in this case) - // for aliases that it couldn't resolve. - if (onDemandFetchFailed(resource)) { - continue; - } - envoy::config::route::v3::VirtualHost vhost = - MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); - auto found = vhosts.find(vhost.name()); + for (const auto& vhost : added_vhosts) { + auto found = vhosts.find(vhost.get().name()); if (found != vhosts.end()) { vhosts.erase(found); } - vhosts.emplace(vhost.name(), vhost); + vhosts.emplace(vhost.get().name(), vhost.get()); vhosts_added = true; } return vhosts_added; @@ -118,10 +98,5 @@ void RouteConfigUpdateReceiverImpl::rebuildRouteConfig( } } -bool RouteConfigUpdateReceiverImpl::onDemandFetchFailed( - const envoy::service::discovery::v3::Resource& resource) const { - return !resource.has_resource(); -} - } // namespace Router } // namespace Envoy diff --git a/source/common/router/route_config_update_receiver_impl.h b/source/common/router/route_config_update_receiver_impl.h index dc5cee4a422f..9bfa6940cbbe 100644 --- a/source/common/router/route_config_update_receiver_impl.h +++ b/source/common/router/route_config_update_receiver_impl.h @@ -17,19 +17,15 @@ namespace Router { class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { public: - RouteConfigUpdateReceiverImpl(TimeSource& time_source, - ProtobufMessage::ValidationVisitor& validation_visitor) + RouteConfigUpdateReceiverImpl(TimeSource& time_source) : time_source_(time_source), last_config_hash_(0ull), last_vhds_config_hash_(0ul), - validation_visitor_(validation_visitor), vhds_configuration_changed_(true) {} + vhds_configuration_changed_(true) {} void initializeRdsVhosts(const envoy::config::route::v3::RouteConfiguration& route_configuration); - void collectResourceIdsInUpdate( - const Protobuf::RepeatedPtrField& added_resources); bool removeVhosts(std::map& vhosts, const Protobuf::RepeatedPtrField& removed_vhost_names); - bool updateVhosts( - std::map& vhosts, - const Protobuf::RepeatedPtrField& added_resources); + bool updateVhosts(std::map& vhosts, + const VirtualHostRefVector& added_vhosts); void rebuildRouteConfig( const std::map& rds_vhosts, const std::map& vhds_vhosts, @@ -41,10 +37,10 @@ class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { // Router::RouteConfigUpdateReceiver bool onRdsUpdate(const envoy::config::route::v3::RouteConfiguration& rc, const std::string& version_info) override; - bool onVhdsUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) override; + bool onVhdsUpdate(const VirtualHostRefVector& added_vhosts, + const std::set& added_resource_ids, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) override; const std::string& routeConfigName() const override { return route_config_proto_.name(); } const std::string& configVersion() const override { return last_config_version_; } uint64_t configHash() const override { return last_config_hash_; } @@ -70,7 +66,6 @@ class RouteConfigUpdateReceiverImpl : public RouteConfigUpdateReceiver { std::map rds_virtual_hosts_; std::map vhds_virtual_hosts_; absl::optional config_info_; - ProtobufMessage::ValidationVisitor& validation_visitor_; std::set resource_ids_in_last_update_; bool vhds_configuration_changed_; }; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index a1ecd85c3639..ddd1d0073b43 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -6,7 +6,6 @@ #include "envoy/api/v2/scoped_route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" -#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -100,18 +99,18 @@ ScopedRdsConfigSubscription::ScopedRdsConfigSubscription( : DeltaConfigSubscriptionInstance("SRDS", manager_identifier, config_provider_manager, factory_context), Envoy::Config::SubscriptionBase( - rds_config_source.resource_api_version()), + rds_config_source.resource_api_version(), + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), factory_context_(factory_context), name_(name), scope_key_builder_(scope_key_builder), scope_(factory_context.scope().createScope(stat_prefix + "scoped_rds." + name + ".")), stats_({ALL_SCOPED_RDS_STATS(POOL_COUNTER(*scope_))}), - rds_config_source_(std::move(rds_config_source)), - validation_visitor_(factory_context.messageValidationContext().dynamicValidationVisitor()), - stat_prefix_(stat_prefix), route_config_provider_manager_(route_config_provider_manager) { + rds_config_source_(std::move(rds_config_source)), stat_prefix_(stat_prefix), + route_config_provider_manager_(route_config_provider_manager) { const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( scoped_rds.scoped_rds_config_source(), Grpc::Common::typeUrl(resource_name), *scope_, - *this); + *this, resource_decoder_); initialize([scope_key_builder]() -> Envoy::Config::ConfigProvider::ConfigConstSharedPtr { return std::make_shared( @@ -135,19 +134,18 @@ ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::RdsRouteConfigProvide })) {} bool ScopedRdsConfigSubscription::addOrUpdateScopes( - const Protobuf::RepeatedPtrField& resources, - Init::Manager& init_manager, const std::string& version_info, - std::vector& exception_msgs) { + const std::vector& resources, Init::Manager& init_manager, + const std::string& version_info, std::vector& exception_msgs) { bool any_applied = false; envoy::extensions::filters::network::http_connection_manager::v3::Rds rds; rds.mutable_config_source()->MergeFrom(rds_config_source_); absl::flat_hash_set unique_resource_names; for (const auto& resource : resources) { - envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config; try { - scoped_route_config = - MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); + // Explicit copy so that we can std::move later. + envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config = + dynamic_cast( + resource.get().resource()); const std::string scope_name = scoped_route_config.name(); if (!unique_resource_names.insert(scope_name).second) { throw EnvoyException( @@ -221,7 +219,7 @@ ScopedRdsConfigSubscription::removeScopes( } void ScopedRdsConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { // NOTE: deletes are done before adds/updates. @@ -306,17 +304,17 @@ void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_nam // TODO(stevenzzzz): see issue #7508, consider generalizing this function as it overlaps with // CdsApiImpl::onConfigUpdate. void ScopedRdsConfigSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, + const std::vector& resources, const std::string& version_info) { absl::flat_hash_map scoped_routes; absl::flat_hash_map scope_name_by_key_hash; - for (const auto& resource_any : resources) { + for (const auto& resource : resources) { // Throws (thus rejects all) on any error. - auto scoped_route = - MessageUtil::anyConvertAndValidate( - resource_any, validation_visitor_); - const std::string scope_name = scoped_route.name(); + const auto& scoped_route = + dynamic_cast( + resource.get().resource()); + const std::string& scope_name = scoped_route.name(); auto scope_config_inserted = scoped_routes.try_emplace(scope_name, std::move(scoped_route)); if (!scope_config_inserted.second) { throw EnvoyException( @@ -332,21 +330,15 @@ void ScopedRdsConfigSubscription::onConfigUpdate( } } ScopedRouteMap scoped_routes_to_remove = scoped_route_map_; - Protobuf::RepeatedPtrField to_add_repeated; Protobuf::RepeatedPtrField to_remove_repeated; for (auto& iter : scoped_routes) { const std::string& scope_name = iter.first; scoped_routes_to_remove.erase(scope_name); - auto* to_add = to_add_repeated.Add(); - to_add->set_name(scope_name); - to_add->set_version(version_info); - to_add->mutable_resource()->PackFrom(iter.second); } - for (const auto& scoped_route : scoped_routes_to_remove) { *to_remove_repeated.Add() = scoped_route.first; } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + onConfigUpdate(resources, to_remove_repeated, version_info); } ScopedRdsConfigProvider::ScopedRdsConfigProvider( diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index befa51a21dc2..3a9fa29e47a8 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -5,6 +5,7 @@ #include "envoy/common/callback.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" +#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/router/route_config_provider_manager.h" @@ -131,10 +132,9 @@ class ScopedRdsConfigSubscription // Adds or updates scopes, create a new RDS provider for each resource, if an exception is thrown // during updating, the exception message is collected via the exception messages vector. // Returns true if any scope updated, false otherwise. - bool addOrUpdateScopes( - const Protobuf::RepeatedPtrField& resources, - Init::Manager& init_manager, const std::string& version_info, - std::vector& exception_msgs); + bool addOrUpdateScopes(const std::vector& resources, + Init::Manager& init_manager, const std::string& version_info, + std::vector& exception_msgs); // Removes given scopes from the managed set of scopes. // Returns a list of to be removed helpers which is temporally held in the onConfigUpdate method, // to make sure new scopes sharing the same RDS source configs could reuse the subscriptions. @@ -151,21 +151,16 @@ class ScopedRdsConfigSubscription // EnvoyException on any error and essentially reject an update. While the Delta form // onConfigUpdate(added_resources, removed_resources, version_info) by design will partially // accept correct RouteConfiguration from management server. - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException*) override { ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); DeltaConfigSubscriptionInstance::onConfigUpdateFailed(); } - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .name(); - } // Propagate RDS updates to ScopeConfigImpl in workers. void onRdsConfigUpdate(const std::string& scope_name, RdsRouteConfigSubscription& rds_subscription); @@ -187,7 +182,6 @@ class ScopedRdsConfigSubscription Stats::ScopePtr scope_; ScopedRdsStats stats_; const envoy::config::core::v3::ConfigSource rds_config_source_; - ProtobufMessage::ValidationVisitor& validation_visitor_; const std::string stat_prefix_; RouteConfigProviderManager& route_config_provider_manager_; }; diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index 75f8a9570887..47552981f38f 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -25,7 +25,9 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, const std::string& stat_prefix, std::unordered_set& route_config_providers, envoy::config::core::v3::ApiVersion resource_api_version) - : Envoy::Config::SubscriptionBase(resource_api_version), + : Envoy::Config::SubscriptionBase( + resource_api_version, + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), config_update_info_(config_update_info), scope_(factory_context.scope().createScope(stat_prefix + "vhds." + config_update_info_->routeConfigName() + ".")), @@ -45,7 +47,7 @@ VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( config_update_info_->routeConfiguration().vhds().config_source(), - Grpc::Common::typeUrl(resource_name), *scope_, *this); + Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); } void VhdsSubscription::updateOnDemand(const std::string& with_route_config_name_prefix) { @@ -61,10 +63,25 @@ void VhdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureRe } void VhdsSubscription::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { - if (config_update_info_->onVhdsUpdate(added_resources, removed_resources, version_info)) { + RouteConfigUpdateReceiver::VirtualHostRefVector added_vhosts; + std::set added_resource_ids; + for (const auto& resource : added_resources) { + added_resource_ids.emplace(resource.get().name()); + std::copy(resource.get().aliases().begin(), resource.get().aliases().end(), + std::inserter(added_resource_ids, added_resource_ids.end())); + // the management server returns empty resources (they contain no virtual hosts in this case) + // for aliases that it couldn't resolve. + if (!resource.get().hasResource()) { + continue; + } + added_vhosts.emplace_back( + dynamic_cast(resource.get().resource())); + } + if (config_update_info_->onVhdsUpdate(added_vhosts, added_resource_ids, removed_resources, + version_info)) { stats_.config_reload_.inc(); ENVOY_LOG(debug, "vhds: loading new configuration: config_name={} hash={}", config_update_info_->routeConfigName(), config_update_info_->configHash()); diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index 372f5a08989c..956db775c64a 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -8,6 +8,7 @@ #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/config/route/v3/route_components.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/http/codes.h" #include "envoy/local_info/local_info.h" @@ -59,17 +60,14 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase&, + void onConfigUpdate(const std::vector&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } RouteConfigUpdatePtr& config_update_info_; Stats::ScopePtr scope_; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 49eae3c9c641..6eb490d2015b 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -10,8 +10,6 @@ #include "envoy/event/dispatcher.h" #include "envoy/service/discovery/v2/rtds.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" -#include "envoy/service/runtime/v3/rtds.pb.h" -#include "envoy/service/runtime/v3/rtds.pb.validate.h" #include "envoy/thread_local/thread_local.h" #include "envoy/type/v3/percent.pb.h" #include "envoy/type/v3/percent.pb.validate.h" @@ -531,17 +529,16 @@ RtdsSubscription::RtdsSubscription( LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer, Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - rtds_layer.rtds_config().resource_api_version()), + rtds_layer.rtds_config().resource_api_version(), validation_visitor, "name"), parent_(parent), config_source_(rtds_layer.rtds_config()), store_(store), resource_name_(rtds_layer.name()), - init_target_("RTDS " + resource_name_, [this]() { start(); }), - validation_visitor_(validation_visitor) {} + init_target_("RTDS " + resource_name_, [this]() { start(); }) {} -void RtdsSubscription::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void RtdsSubscription::onConfigUpdate(const std::vector& resources, const std::string&) { validateUpdateSize(resources.size()); - auto runtime = MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + const auto& runtime = + dynamic_cast(resources[0].get().resource()); if (runtime.name() != resource_name_) { throw EnvoyException( fmt::format("Unexpected RTDS runtime (expecting {}): {}", resource_name_, runtime.name())); @@ -553,12 +550,10 @@ void RtdsSubscription::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + const std::vector& added_resources, const Protobuf::RepeatedPtrField&, const std::string&) { - validateUpdateSize(resources.size()); - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); + validateUpdateSize(added_resources.size()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, @@ -575,7 +570,7 @@ void RtdsSubscription::start() { // instantiated in the server instance. const auto resource_name = getResourceName(); subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( - config_source_, Grpc::Common::typeUrl(resource_name), store_, *this); + config_source_, Grpc::Common::typeUrl(resource_name), store_, *this, resource_decoder_); subscription_->start({resource_name_}); } diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index f744261c5c27..387497ac4363 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -14,6 +14,7 @@ #include "envoy/runtime/runtime.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/service/runtime/v3/rtds.pb.h" +#include "envoy/service/runtime/v3/rtds.pb.validate.h" #include "envoy/stats/stats_macros.h" #include "envoy/stats/store.h" #include "envoy/thread_local/thread_local.h" @@ -209,18 +210,14 @@ struct RtdsSubscription : Envoy::Config::SubscriptionBase& resources, + void onConfigUpdate(const std::vector& resources, + const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, const std::string&) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } void start(); void validateUpdateSize(uint32_t num_resources); @@ -232,7 +229,6 @@ struct RtdsSubscription : Envoy::Config::SubscriptionBase; diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 351928f78ef4..42af9809767e 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -5,7 +5,6 @@ #include "envoy/api/v2/auth/cert.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" -#include "envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" @@ -21,10 +20,10 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi Init::Manager& init_manager, std::function destructor_cb, Event::Dispatcher& dispatcher, Api::Api& api) : Envoy::Config::SubscriptionBase( - sds_config.resource_api_version()), + sds_config.resource_api_version(), validation_visitor, "name"), init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), stats_(stats), sds_config_(std::move(sds_config)), sds_config_name_(sds_config_name), - secret_hash_(0), clean_up_(std::move(destructor_cb)), validation_visitor_(validation_visitor), + secret_hash_(0), clean_up_(std::move(destructor_cb)), subscription_factory_(subscription_factory), time_source_(time_source), secret_data_{sds_config_name_, "uninitialized", time_source_.systemTime()}, @@ -32,7 +31,7 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi const auto resource_name = getResourceName(); // This has to happen here (rather than in initialize()) as it can throw exceptions. subscription_ = subscription_factory_.subscriptionFromConfigSource( - sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this); + sds_config_, Grpc::Common::typeUrl(resource_name), stats_, *this, resource_decoder_); // TODO(JimmyCYJ): Implement chained_init_manager, so that multiple init_manager // can be chained together to behave as one init_manager. In that way, we let // two listeners which share same SdsApi to register at separate init managers, and @@ -40,12 +39,11 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi init_manager.add(init_target_); } -void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void SdsApi::onConfigUpdate(const std::vector& resources, const std::string& version_info) { validateUpdateSize(resources.size()); - auto secret = - MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + const auto& secret = dynamic_cast( + resources[0].get().resource()); if (secret.name() != sds_config_name_) { throw EnvoyException( @@ -88,13 +86,10 @@ void SdsApi::onConfigUpdate(const Protobuf::RepeatedPtrField& init_target_.ready(); } -void SdsApi::onConfigUpdate( - const Protobuf::RepeatedPtrField& resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - validateUpdateSize(resources.size()); - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); +void SdsApi::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, const std::string&) { + validateUpdateSize(added_resources.size()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } void SdsApi::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index d0173f8ae470..04afae8f60ad 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -8,6 +8,7 @@ #include "envoy/config/subscription_factory.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" +#include "envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.h" #include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" @@ -56,16 +57,13 @@ class SdsApi : public Envoy::Config::SubscriptionBase< Common::CallbackManager<> update_callback_manager_; // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate(const Protobuf::RepeatedPtrField&, - const Protobuf::RepeatedPtrField&, const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .name(); - } virtual std::vector getDataSourceFilenames() PURE; private: @@ -83,7 +81,6 @@ class SdsApi : public Envoy::Config::SubscriptionBase< uint64_t secret_hash_; uint64_t files_hash_; Cleanup clean_up_; - ProtobufMessage::ValidationVisitor& validation_visitor_; Config::SubscriptionFactory& subscription_factory_; TimeSource& time_source_; SecretData secret_data_; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 081a9b43d334..84f57d04edc6 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -365,6 +365,7 @@ envoy_cc_library( "//include/envoy/upstream:cluster_factory_interface", "//include/envoy/upstream:locality_lib", "//source/common/config:api_version_lib", + "//source/common/config:decoded_resource_lib", "//source/common/config:metadata_lib", "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 86759a9a3d94..c7748babd1e1 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -3,8 +3,6 @@ #include #include "envoy/api/v2/cluster.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -30,41 +28,30 @@ CdsApiPtr CdsApiImpl::create(const envoy::config::core::v3::ConfigSource& cds_co CdsApiImpl::CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - cds_config.resource_api_version()), - cm_(cm), scope_(scope.createScope("cluster_manager.cds.")), - validation_visitor_(validation_visitor) { + cds_config.resource_api_version(), validation_visitor, "name"), + cm_(cm), scope_(scope.createScope("cluster_manager.cds.")) { const auto resource_name = getResourceName(); subscription_ = cm_.subscriptionFactory().subscriptionFromConfigSource( - cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); + cds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); } -void CdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void CdsApiImpl::onConfigUpdate(const std::vector& resources, const std::string& version_info) { ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); std::vector clusters; - for (const auto& cluster_blob : resources) { - // No validation needed here the overloaded call to onConfigUpdate validates. - clusters.push_back(MessageUtil::anyConvert(cluster_blob)); - clusters_to_remove.erase(clusters.back().name()); + for (const auto& resource : resources) { + clusters_to_remove.erase(resource.get().name()); } Protobuf::RepeatedPtrField to_remove_repeated; for (const auto& cluster : clusters_to_remove) { *to_remove_repeated.Add() = cluster.first; } - Protobuf::RepeatedPtrField to_add_repeated; - for (const auto& cluster : clusters) { - envoy::service::discovery::v3::Resource* to_add = to_add_repeated.Add(); - to_add->set_name(cluster.name()); - to_add->set_version(version_info); - to_add->mutable_resource()->PackFrom(cluster); - } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + onConfigUpdate(resources, to_remove_repeated, version_info); } -void CdsApiImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) { +void CdsApiImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { std::unique_ptr maybe_eds_resume; if (cm_.adsMux()) { const auto type_urls = @@ -83,13 +70,12 @@ void CdsApiImpl::onConfigUpdate( for (const auto& resource : added_resources) { envoy::config::cluster::v3::Cluster cluster; try { - cluster = MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); + cluster = dynamic_cast(resource.get().resource()); if (!cluster_names.insert(cluster.name()).second) { // NOTE: at this point, the first of these duplicates has already been successfully applied. throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); } - if (cm_.addOrUpdateCluster(cluster, resource.version())) { + if (cm_.addOrUpdateCluster(cluster, resource.get().version())) { any_applied = true; ENVOY_LOG(info, "cds: add/update cluster '{}'", cluster.name()); } else { @@ -132,4 +118,4 @@ void CdsApiImpl::runInitializeCallbackIfAny() { } } // namespace Upstream -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index f2f66340e9b0..71eb8b351652 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -4,6 +4,7 @@ #include "envoy/api/api.h" #include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.validate.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" @@ -38,17 +39,13 @@ class CdsApiImpl : public CdsApi, private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor); void runInitializeCallbackIfAny(); @@ -58,7 +55,6 @@ class CdsApiImpl : public CdsApi, std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; }; } // namespace Upstream diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 03a12914cfa8..5a75a990d0dc 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -4,13 +4,12 @@ #include "envoy/common/exception.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/endpoint/v3/endpoint.pb.h" -#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/assert.h" #include "common/common/utility.h" #include "common/config/api_version.h" +#include "common/config/decoded_resource_impl.h" #include "common/config/version_converter.h" namespace Envoy { @@ -23,12 +22,12 @@ EdsClusterImpl::EdsClusterImpl( : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), Envoy::Config::SubscriptionBase( - cluster.eds_cluster_config().eds_config().resource_api_version()), + cluster.eds_cluster_config().eds_config().resource_api_version(), + factory_context.messageValidationVisitor(), "cluster_name"), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() - : cluster.eds_cluster_config().service_name()), - validation_visitor_(factory_context.messageValidationVisitor()) { + : cluster.eds_cluster_config().service_name()) { Event::Dispatcher& dispatcher = factory_context.dispatcher(); assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); const auto& eds_config = cluster.eds_cluster_config().eds_config(); @@ -41,7 +40,8 @@ EdsClusterImpl::EdsClusterImpl( const auto resource_name = getResourceName(); subscription_ = factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( - eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this); + eds_config, Grpc::Common::typeUrl(resource_name), info_->statsScope(), *this, + resource_decoder_); } void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}); } @@ -112,14 +112,14 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h parent_.onPreInitComplete(); } -void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void EdsClusterImpl::onConfigUpdate(const std::vector& resources, const std::string&) { if (!validateUpdateSize(resources.size())) { return; } - auto cluster_load_assignment = - MessageUtil::anyConvertAndValidate( - resources[0], validation_visitor_); + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = + dynamic_cast( + resources[0].get().resource()); if (cluster_load_assignment.cluster_name() != cluster_name_) { throw EnvoyException(fmt::format("Unexpected EDS cluster (expecting {}): {}", cluster_name_, cluster_load_assignment.cluster_name())); @@ -145,15 +145,13 @@ void EdsClusterImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - if (!validateUpdateSize(resources.size())) { +void EdsClusterImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + if (!validateUpdateSize(added_resources.size())) { return; } - Protobuf::RepeatedPtrField unwrapped_resource; - *unwrapped_resource.Add() = resources[0].resource(); - onConfigUpdate(unwrapped_resource, resources[0].version()); + onConfigUpdate(added_resources, added_resources[0].get().version()); } bool EdsClusterImpl::validateUpdateSize(int num_resources) { @@ -175,11 +173,13 @@ void EdsClusterImpl::onAssignmentTimeout() { // TODO(vishalpowar) This is not going to work for incremental updates, and we // need to instead change the health status to indicate the assignments are // stale. - Protobuf::RepeatedPtrField resources; envoy::config::endpoint::v3::ClusterLoadAssignment resource; resource.set_cluster_name(cluster_name_); - resources.Add()->PackFrom(resource); - onConfigUpdate(resources, ""); + ProtobufWkt::Any any_resource; + any_resource.PackFrom(resource); + Config::DecodedResourceImpl decoded_resource(resource_decoder_, any_resource, ""); + std::vector resource_refs = {decoded_resource}; + onConfigUpdate(resource_refs, ""); // Stat to track how often we end up with stale assignments. info_->stats().assignment_stale_.inc(); } diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index fa3b09eb8cca..a18ee5696630 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/local_info/local_info.h" @@ -37,16 +38,13 @@ class EdsClusterImpl private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate(const Protobuf::RepeatedPtrField&, - const Protobuf::RepeatedPtrField&, const std::string&) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource) - .cluster_name(); - } using LocalityWeightsMap = std::unordered_map; bool updateHostsPerLocality(const uint32_t priority, const uint32_t overprovisioning_factor, @@ -82,7 +80,6 @@ class EdsClusterImpl std::vector locality_weights_map_; HostMap all_hosts_; Event::TimerPtr assignment_timeout_; - ProtobufMessage::ValidationVisitor& validation_visitor_; InitializePhase initialize_phase_; }; diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index c767970aa173..0ea0b088d9df 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -5,8 +5,6 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/api/v2/listener.pb.h" #include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/config/listener/v3/listener.pb.validate.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -27,20 +25,18 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, Stats::Scope& scope, ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor) : Envoy::Config::SubscriptionBase( - lds_config.resource_api_version()), + lds_config.resource_api_version(), validation_visitor, "name"), listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), - init_target_("LDS", [this]() { subscription_->start({}); }), - validation_visitor_(validation_visitor) { + init_target_("LDS", [this]() { subscription_->start({}); }) { const auto resource_name = getResourceName(); subscription_ = cm.subscriptionFactory().subscriptionFromConfigSource( - lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this); + lds_config, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); init_manager.add(init_target_); } -void LdsApiImpl::onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) { +void LdsApiImpl::onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { std::unique_ptr maybe_rds_resume; if (cm_.adsMux()) { const auto type_urls = @@ -68,13 +64,13 @@ void LdsApiImpl::onConfigUpdate( for (const auto& resource : added_resources) { envoy::config::listener::v3::Listener listener; try { - listener = MessageUtil::anyConvertAndValidate( - resource.resource(), validation_visitor_); + listener = + dynamic_cast(resource.get().resource()); if (!listener_names.insert(listener.name()).second) { // NOTE: at this point, the first of these duplicates has already been successfully applied. throw EnvoyException(fmt::format("duplicate listener {} found", listener.name())); } - if (listener_manager_.addOrUpdateListener(listener, resource.version(), true)) { + if (listener_manager_.addOrUpdateListener(listener, resource.get().version(), true)) { ENVOY_LOG(info, "lds: add/update listener '{}'", listener.name()); any_applied = true; } else { @@ -84,7 +80,7 @@ void LdsApiImpl::onConfigUpdate( failure_state.push_back(std::make_unique()); auto& state = failure_state.back(); state->set_details(e.what()); - state->mutable_failed_configuration()->PackFrom(resource); + state->mutable_failed_configuration()->PackFrom(resource.get().resource()); absl::StrAppend(&message, listener.name(), ": ", e.what(), "\n"); } } @@ -99,7 +95,7 @@ void LdsApiImpl::onConfigUpdate( } } -void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField& resources, +void LdsApiImpl::onConfigUpdate(const std::vector& resources, const std::string& version_info) { // We need to keep track of which listeners need to remove. // Specifically, it's [listeners we currently have] - [listeners found in the response]. @@ -107,27 +103,16 @@ void LdsApiImpl::onConfigUpdate(const Protobuf::RepeatedPtrField to_add_repeated; - for (const auto& listener_blob : resources) { - // Add this resource to our delta added/updated pile... - envoy::service::discovery::v3::Resource* to_add = to_add_repeated.Add(); - // No validation needed here the overloaded call to onConfigUpdate validates. - const std::string listener_name = - MessageUtil::anyConvert(listener_blob).name(); - to_add->set_name(listener_name); - to_add->set_version(version_info); - to_add->mutable_resource()->MergeFrom(listener_blob); - // ...and remove its name from our delta removed pile. - listeners_to_remove.erase(listener_name); + for (const auto& resource : resources) { + // Remove its name from our delta removed pile. + listeners_to_remove.erase(resource.get().name()); } - // Copy our delta removed pile into the desired format. Protobuf::RepeatedPtrField to_remove_repeated; for (const auto& listener : listeners_to_remove) { *to_remove_repeated.Add() = listener; } - onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); + onConfigUpdate(resources, to_remove_repeated, version_info); } void LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, @@ -139,4 +124,4 @@ void LdsApiImpl::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason r } } // namespace Server -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 00a415563668..8f0954c93d5c 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/listener/v3/listener.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" #include "envoy/init/manager.h" @@ -34,17 +35,13 @@ class LdsApiImpl : public LdsApi, private: // Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; - void onConfigUpdate( - const Protobuf::RepeatedPtrField& added_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string& system_version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::string resourceName(const ProtobufWkt::Any& resource) override { - return MessageUtil::anyConvert(resource).name(); - } std::unique_ptr subscription_; std::string system_version_info_; @@ -52,7 +49,6 @@ class LdsApiImpl : public LdsApi, Stats::ScopePtr scope_; Upstream::ClusterManager& cm_; Init::TargetImpl init_target_; - ProtobufMessage::ValidationVisitor& validation_visitor_; }; } // namespace Server diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 06cda3b3dfd6..42b59faa4aed 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -26,6 +26,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "decoded_resource_impl_test", + srcs = ["decoded_resource_impl_test.cc"], + deps = [ + "//source/common/config:decoded_resource_lib", + "//test/mocks/config:config_mocks", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "delta_subscription_impl_test", srcs = ["delta_subscription_impl_test.cc"], @@ -248,6 +258,17 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "opaque_resource_decoder_impl_test", + srcs = ["opaque_resource_decoder_impl_test.cc"], + deps = [ + "//source/common/config:opaque_resource_decoder_lib", + "//source/common/protobuf:message_validator_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "subscription_factory_impl_test", srcs = ["subscription_factory_impl_test.cc"], diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 63cdc00669d3..61d45456c485 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -79,16 +79,17 @@ class DummyConfigSubscription : public ConfigSubscriptionInstance, } // Envoy::Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override { - auto config = TestUtility::anyConvert(resources[0]); + const auto& config = + dynamic_cast(resources[0].get().resource()); if (checkAndApplyConfigUpdate(config, "dummy_config", version_info)) { config_proto_ = config; } ConfigSubscriptionCommonBase::onConfigUpdate(); } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -97,10 +98,7 @@ class DummyConfigSubscription : public ConfigSubscriptionInstance, void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason, const EnvoyException*) override {} - // Envoy::Config::SubscriptionCallbacks - std::string resourceName(const ProtobufWkt::Any&) override { return ""; } - - const absl::optional& config_proto() const { + const absl::optional& configProto() const { return config_proto_; } @@ -122,10 +120,10 @@ class DummyDynamicConfigProvider : public MutableConfigProviderCommonBase { // Envoy::Config::ConfigProvider const Protobuf::Message* getConfigProto() const override { - if (!subscription_->config_proto().has_value()) { + if (!subscription_->configProto().has_value()) { return nullptr; } - return &subscription_->config_proto().value(); + return &subscription_->configProto().value(); } std::string getConfigVersion() const override { return ""; } @@ -152,7 +150,7 @@ class DummyConfigProviderManager : public ConfigProviderManagerImplBase { auto* dynamic_config = config_dump->mutable_dynamic_dummy_configs()->Add(); dynamic_config->set_version_info(subscription->configInfo().value().last_config_version_); dynamic_config->mutable_dummy_config()->MergeFrom( - static_cast(subscription.get())->config_proto().value()); + static_cast(subscription.get())->configProto().value()); TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(), *dynamic_config->mutable_last_updated()); } @@ -265,11 +263,12 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { EXPECT_FALSE(provider1->configProtoInfo().has_value()); Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); + const auto dummy_config = parseDummyConfigFromYaml("a: a dummy config"); DummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); // Check that a newly created provider with the same config source will share // the subscription, config proto and resulting ConfigProvider::Config. @@ -298,7 +297,7 @@ TEST_F(ConfigProviderImplTest, SharedOwnership) { dynamic_cast(*provider3) .subscription() - .onConfigUpdate(untyped_dummy_configs, "provider3"); + .onConfigUpdate(decoded_resources.refvec_, "provider3"); EXPECT_EQ(2UL, static_cast( provider_manager_->dumpConfigs().get()) @@ -364,15 +363,15 @@ TEST_F(ConfigProviderImplTest, DuplicateConfigProto) { auto& subscription = static_cast(typed_provider->subscription()); EXPECT_EQ(subscription.getConfig(), nullptr); // First time issuing a configUpdate(). A new ConfigProvider::Config should be created. - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + const auto dummy_config = parseDummyConfigFromYaml("a: a dynamic dummy config"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); EXPECT_NE(subscription.getConfig(), nullptr); auto config_ptr = subscription.getConfig(); EXPECT_EQ(typed_provider->config().get(), config_ptr.get()); // Second time issuing the configUpdate(), this time with a duplicate proto. A new // ConfigProvider::Config _should not_ be created. - subscription.onConfigUpdate(untyped_dummy_configs, "2"); + subscription.onConfigUpdate(decoded_resources.refvec_, "2"); EXPECT_EQ(config_ptr, subscription.getConfig()); EXPECT_EQ(typed_provider->config().get(), config_ptr.get()); } @@ -449,13 +448,13 @@ TEST_F(ConfigProviderImplTest, ConfigDump) { ConfigProviderManager::NullOptionalArg()); // Static + dynamic config dump. - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dynamic dummy config")); + const auto dummy_config = parseDummyConfigFromYaml("a: a dynamic dummy config"); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); DummyConfigSubscription& subscription = dynamic_cast(*dynamic_provider).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "v1"); + const auto decoded_resources = TestUtility::decodeResources({dummy_config}, "a"); + subscription.onConfigUpdate(decoded_resources.refvec_, "v1"); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["dummy"](); const auto& dummy_config_dump3 = @@ -527,7 +526,7 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, void start() override {} // Envoy::Config::SubscriptionCallbacks - void onConfigUpdate(const Protobuf::RepeatedPtrField& resources, + void onConfigUpdate(const std::vector& resources, const std::string& version_info) override { if (resources.empty()) { return; @@ -537,8 +536,9 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, // config proto set (i.e., this is append only). Real xDS APIs will need to track additions, // updates and removals to the config set and apply the diffs to the underlying config // implementations. - for (const auto& resource_any : resources) { - auto dummy_config = TestUtility::anyConvert(resource_any); + for (const auto& resource : resources) { + const auto& dummy_config = + dynamic_cast(resource.get().resource()); proto_map_[version_info] = dummy_config; // Propagate the new config proto to all worker threads. applyConfigUpdate([&dummy_config](ConfigProvider::ConfigConstSharedPtr prev_config) @@ -553,7 +553,7 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, ConfigSubscriptionCommonBase::onConfigUpdate(); setLastConfigInfo(absl::optional({absl::nullopt, version_info})); } - void onConfigUpdate(const Protobuf::RepeatedPtrField&, + void onConfigUpdate(const std::vector&, const Protobuf::RepeatedPtrField&, const std::string&) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -561,10 +561,6 @@ class DeltaDummyConfigSubscription : public DeltaConfigSubscriptionInstance, const EnvoyException*) override { ConfigSubscriptionCommonBase::onConfigUpdateFailed(); } - std::string resourceName(const ProtobufWkt::Any&) override { - return "test.common.config.DummyConfig"; - } - const ProtoMap& protoMap() const { return proto_map_; } private: @@ -689,13 +685,14 @@ TEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) { // No config protos have been received via the subscription yet. EXPECT_FALSE(provider1->configProtoInfoVector().has_value()); - Protobuf::RepeatedPtrField untyped_dummy_configs; - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: a dummy config")); - untyped_dummy_configs.Add()->PackFrom(parseDummyConfigFromYaml("a: another dummy config")); + const auto dummy_config_0 = parseDummyConfigFromYaml("a: a dummy config"); + const auto dummy_config_1 = parseDummyConfigFromYaml("a: another dummy config"); + const auto decoded_resources = + TestUtility::decodeResources({dummy_config_0, dummy_config_1}, "a"); DeltaDummyConfigSubscription& subscription = dynamic_cast(*provider1).subscription(); - subscription.onConfigUpdate(untyped_dummy_configs, "1"); + subscription.onConfigUpdate(decoded_resources.refvec_, "1"); ConfigProviderPtr provider2 = provider_manager_->createXdsConfigProvider( config_source_proto, server_factory_context_, init_manager_, "dummy_prefix", @@ -716,7 +713,7 @@ TEST_F(DeltaConfigProviderImplTest, MultipleDeltaSubscriptions) { // Issue a second config update to validate that having multiple providers bound to the // subscription causes a single update to the underlying shared config implementation. - subscription.onConfigUpdate(untyped_dummy_configs, "2"); + subscription.onConfigUpdate(decoded_resources.refvec_, "2"); // NOTE: the config implementation is append only and _does not_ track updates/removals to the // config proto set, so the expectation is to double the size of the set. EXPECT_EQ(provider1->config().get(), diff --git a/test/common/config/decoded_resource_impl_test.cc b/test/common/config/decoded_resource_impl_test.cc new file mode 100644 index 000000000000..938d29611d33 --- /dev/null +++ b/test/common/config/decoded_resource_impl_test.cc @@ -0,0 +1,84 @@ +#include "common/config/decoded_resource_impl.h" + +#include "test/mocks/config/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using ::testing::InvokeWithoutArgs; +using ::testing::Return; + +namespace Envoy { +namespace Config { +namespace { + +TEST(DecodedResourceImplTest, All) { + MockOpaqueResourceDecoder resource_decoder; + ProtobufWkt::Any some_opaque_resource; + some_opaque_resource.set_type_url("some_type_url"); + + { + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty()))) + .WillOnce(Return("some_name")); + DecodedResourceImpl decoded_resource(resource_decoder, some_opaque_resource, "foo"); + EXPECT_EQ("some_name", decoded_resource.name()); + EXPECT_TRUE(decoded_resource.aliases().empty()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } + + { + envoy::service::discovery::v3::Resource resource_wrapper; + resource_wrapper.set_name("real_name"); + resource_wrapper.add_aliases("bar"); + resource_wrapper.add_aliases("baz"); + resource_wrapper.mutable_resource()->MergeFrom(some_opaque_resource); + resource_wrapper.set_version("foo"); + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(some_opaque_resource))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(ProtoEq(ProtobufWkt::Empty()))).Times(0); + DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } + + { + envoy::service::discovery::v3::Resource resource_wrapper; + resource_wrapper.set_name("real_name"); + resource_wrapper.set_version("foo"); + resource_wrapper.add_aliases("bar"); + resource_wrapper.add_aliases("baz"); + EXPECT_CALL(resource_decoder, decodeResource(ProtoEq(ProtobufWkt::Any()))) + .WillOnce(InvokeWithoutArgs( + []() -> ProtobufTypes::MessagePtr { return std::make_unique(); })); + EXPECT_CALL(resource_decoder, resourceName(_)).Times(0); + DecodedResourceImpl decoded_resource(resource_decoder, resource_wrapper); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_FALSE(decoded_resource.hasResource()); + } + + { + auto message = std::make_unique(); + DecodedResourceImpl decoded_resource(std::move(message), "real_name", {"bar", "baz"}, "foo"); + EXPECT_EQ("real_name", decoded_resource.name()); + EXPECT_EQ((std::vector{"bar", "baz"}), decoded_resource.aliases()); + EXPECT_EQ("foo", decoded_resource.version()); + EXPECT_THAT(decoded_resource.resource(), ProtoEq(ProtobufWkt::Empty())); + EXPECT_TRUE(decoded_resource.hasResource()); + } +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 58a7ad0bb0fd..f93f9a742b6e 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -136,6 +136,7 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { NiceMock random; Envoy::Config::RateLimitSettings rate_limit_settings; NiceMock callbacks; + NiceMock resource_decoder; auto* async_client = new Grpc::MockAsyncClient(); const Protobuf::MethodDescriptor* method_descriptor = @@ -147,8 +148,8 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { local_info); std::unique_ptr subscription = std::make_unique( - xds_context, callbacks, stats, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher, - std::chrono::milliseconds(12345), false); + xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment, + dispatcher, std::chrono::milliseconds(12345), false); EXPECT_CALL(*async_client, startRaw(_, _, _, _)).WillOnce(Return(nullptr)); diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 474172e3c9a4..554ebe3884df 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -61,7 +61,7 @@ class DeltaSubscriptionStateTest : public testing::Test { return state_.handleResponse(message); } - NiceMock callbacks_; + NiceMock callbacks_; NiceMock local_info_; NiceMock dispatcher_; // We start out interested in three resources: name1, name2, and name3. diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 31439cd84bdb..8e4a33bce05c 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/config/grpc_subscription_impl.h" @@ -45,8 +46,8 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_, local_info_); subscription_ = std::make_unique( - xds_context_, callbacks_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, - init_fetch_timeout, false); + xds_context_, callbacks_, resource_decoder_, stats_, + Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); } @@ -204,6 +205,8 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { Event::MockTimer* init_timeout_timer_; envoy::config::core::v3::Node node_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; std::queue nonce_acks_required_; std::queue nonce_acks_sent_; bool subscription_started_{}; diff --git a/test/common/config/filesystem_subscription_impl_test.cc b/test/common/config/filesystem_subscription_impl_test.cc index 798c29b93821..cee04cea212a 100644 --- a/test/common/config/filesystem_subscription_impl_test.cc +++ b/test/common/config/filesystem_subscription_impl_test.cc @@ -48,8 +48,10 @@ TEST(MiscFilesystemSubscriptionImplTest, BadWatch) { EXPECT_CALL(dispatcher, createFilesystemWatcher_()).WillOnce(Return(watcher)); EXPECT_CALL(*watcher, addWatch(_, _, _)).WillOnce(Throw(EnvoyException("bad path"))); NiceMock callbacks; + NiceMock resource_decoder; EXPECT_THROW_WITH_MESSAGE(FilesystemSubscriptionImpl(dispatcher, "##!@/dev/null", callbacks, - stats, validation_visitor, *api), + resource_decoder, stats, validation_visitor, + *api), EnvoyException, "bad path"); } diff --git a/test/common/config/filesystem_subscription_test_harness.h b/test/common/config/filesystem_subscription_test_harness.h index 42bf2913e4a1..d8d721eb060a 100644 --- a/test/common/config/filesystem_subscription_test_harness.h +++ b/test/common/config/filesystem_subscription_test_harness.h @@ -3,6 +3,7 @@ #include #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/config/filesystem_subscription_impl.h" @@ -31,7 +32,8 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { : path_(TestEnvironment::temporaryPath("eds.json")), api_(Api::createApiForTest(stats_store_, simTime())), dispatcher_(api_->allocateDispatcher("test_thread")), - subscription_(*dispatcher_, path_, callbacks_, stats_, validation_visitor_, *api_) {} + subscription_(*dispatcher_, path_, callbacks_, resource_decoder_, stats_, + validation_visitor_, *api_) {} ~FilesystemSubscriptionTestHarness() override { TestEnvironment::removePath(path_); } @@ -74,7 +76,10 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { file_json += "]}"; envoy::service::discovery::v3::DiscoveryResponse response_pb; TestUtility::loadFromJson(file_json, response_pb); - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) + const auto decoded_resources = + TestUtility::decodeResources( + response_pb, "cluster_name"); + EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { version_ = version; @@ -114,6 +119,8 @@ class FilesystemSubscriptionTestHarness : public SubscriptionTestHarness { Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; FilesystemSubscriptionImpl subscription_; bool file_at_start_{false}; }; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 4f6abbc893c2..dc22bfdbb1a5 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -2,6 +2,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/empty_string.h" @@ -99,6 +100,7 @@ class GrpcMuxImplTestBase : public testing::Test { Grpc::MockAsyncStream async_stream_; std::unique_ptr grpc_mux_; NiceMock callbacks_; + NiceMock resource_decoder_; NiceMock local_info_; Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; @@ -115,17 +117,17 @@ class GrpcMuxImplTest : public GrpcMuxImplTestBase { TEST_F(GrpcMuxImplTest, MultipleTypeUrlStreams) { setup(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); - auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); grpc_mux_->start(); EXPECT_EQ(1, control_plane_connected_state_.value()); expectSendMessage("bar", {"z"}, ""); - auto bar_z_sub = grpc_mux_->addWatch("bar", {"z"}, callbacks_); + auto bar_z_sub = grpc_mux_->addWatch("bar", {"z"}, callbacks_, resource_decoder_); expectSendMessage("bar", {"zz", "z"}, ""); - auto bar_zz_sub = grpc_mux_->addWatch("bar", {"zz"}, callbacks_); + auto bar_zz_sub = grpc_mux_->addWatch("bar", {"zz"}, callbacks_, resource_decoder_); expectSendMessage("bar", {"z"}, ""); expectSendMessage("bar", {}, ""); expectSendMessage("foo", {}, ""); @@ -145,9 +147,9 @@ TEST_F(GrpcMuxImplTest, ResetStream) { })); setup(); - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); - auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_); - auto baz_sub = grpc_mux_->addWatch("baz", {"z"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + auto bar_sub = grpc_mux_->addWatch("bar", {}, callbacks_, resource_decoder_); + auto baz_sub = grpc_mux_->addWatch("baz", {"z"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); @@ -176,7 +178,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { TEST_F(GrpcMuxImplTest, PauseResume) { setup(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); grpc_mux_->pause("foo"); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -184,10 +186,10 @@ TEST_F(GrpcMuxImplTest, PauseResume) { grpc_mux_->resume("foo"); grpc_mux_->pause("bar"); expectSendMessage("foo", {"z", "x", "y"}, ""); - auto foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_); + auto foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_, resource_decoder_); grpc_mux_->resume("bar"); grpc_mux_->pause("foo"); - auto foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_); + auto foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); grpc_mux_->resume("foo"); grpc_mux_->pause("foo"); @@ -199,7 +201,7 @@ TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { auto invalid_response = std::make_unique(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); @@ -234,7 +236,7 @@ TEST_F(GrpcMuxImplTest, RpcErrorMessageTruncated) { setup(); auto invalid_response = std::make_unique(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); @@ -266,7 +268,9 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { InSequence s; const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; - auto foo_sub = grpc_mux_->addWatch(type_url, {}, callbacks_); + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + auto foo_sub = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {}, "", true); grpc_mux_->start(); @@ -279,15 +283,14 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { load_assignment.set_cluster_name("x"); response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) - .WillOnce( - Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, - const std::string&) { - EXPECT_EQ(1, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); expectSendMessage(type_url, {}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -297,11 +300,13 @@ TEST_F(GrpcMuxImplTest, WildcardWatch) { TEST_F(GrpcMuxImplTest, WatchDemux) { setup(); InSequence s; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder); NiceMock bar_callbacks; - auto bar_sub = grpc_mux_->addWatch(type_url, {"y", "z"}, bar_callbacks); + auto bar_sub = grpc_mux_->addWatch(type_url, {"y", "z"}, bar_callbacks, resource_decoder); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); // Should dedupe the "x" resource. expectSendMessage(type_url, {"y", "z", "x"}, "", true); @@ -316,15 +321,14 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")).Times(0); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) - .WillOnce( - Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, - const std::string&) { - EXPECT_EQ(1, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); expectSendMessage(type_url, {"y", "z", "x"}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -343,33 +347,31 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { load_assignment_z.set_cluster_name("z"); response->add_resources()->PackFrom(API_DOWNGRADE(load_assignment_z)); EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "2")) - .WillOnce(Invoke( - [&load_assignment_y, &load_assignment_z]( - const Protobuf::RepeatedPtrField& resources, const std::string&) { - EXPECT_EQ(2, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); - expected_assignment = - MessageUtil::anyConvert( - resources[1]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_z)); - })); + .WillOnce(Invoke([&load_assignment_y, &load_assignment_z]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_z)); + })); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "2")) - .WillOnce(Invoke( - [&load_assignment_x, &load_assignment_y]( - const Protobuf::RepeatedPtrField& resources, const std::string&) { - EXPECT_EQ(2, resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - resources[0]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x)); - expected_assignment = - MessageUtil::anyConvert( - resources[1]); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); - })); + .WillOnce(Invoke([&load_assignment_x, &load_assignment_y]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_y)); + })); expectSendMessage(type_url, {"y", "z", "x"}, "2"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -384,7 +386,7 @@ TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { InSequence s; const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {"x", "y"}, "", true); @@ -406,7 +408,7 @@ TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { setup(); const std::string& type_url = Config::TypeUrl::get().Cluster; NiceMock foo_callbacks; - auto foo_sub = grpc_mux_->addWatch(type_url, {}, foo_callbacks); + auto foo_sub = grpc_mux_->addWatch(type_url, {}, foo_callbacks, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage(type_url, {}, "", true); @@ -417,8 +419,9 @@ TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { response->set_version_info("1"); // Validate that onConfigUpdate is called with empty resources. EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, - const std::string&) { EXPECT_TRUE(resources.empty()); })); + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_TRUE(resources.empty()); + })); expectSendMessage(type_url, {}, "1"); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); } @@ -460,7 +463,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); @@ -512,7 +515,7 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); @@ -567,7 +570,7 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { } }; - auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_); + auto foo_sub = grpc_mux_->addWatch("foo", {"x"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"x"}, "", true); grpc_mux_->start(); @@ -604,7 +607,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { { // subscribe and unsubscribe to simulate a cluster added and removed expectSendMessage(type_url, {"y"}, "", true); - auto temp_sub = grpc_mux_->addWatch(type_url, {"y"}, callbacks_); + auto temp_sub = grpc_mux_->addWatch(type_url, {"y"}, callbacks_, resource_decoder_); expectSendMessage(type_url, {}, ""); } @@ -624,7 +627,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { expectSendMessage(type_url, {"x"}, "1", false, "bar"); // simulate a new cluster x is added. add CLA subscription for it. - auto sub = grpc_mux_->addWatch(type_url, {"x"}, callbacks_); + auto sub = grpc_mux_->addWatch(type_url, {"x"}, callbacks_, resource_decoder_); expectSendMessage(type_url, {}, "1", false, "bar"); } @@ -640,7 +643,7 @@ TEST_F(GrpcMuxImplTest, UnwatchedTypeRejectsResources) { // subscribe and unsubscribe (by not keeping the return watch) so that the type is known to envoy expectSendMessage(type_url, {"y"}, "", true); expectSendMessage(type_url, {}, ""); - grpc_mux_->addWatch(type_url, {"y"}, callbacks_); + grpc_mux_->addWatch(type_url, {"y"}, callbacks_, resource_decoder_); // simulate the server sending CLA message to notify envoy that the CLA was added, // even though envoy doesn't expect it. Envoy should reject this update. diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 1673e9a5a411..b2c19e43f095 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -5,6 +5,7 @@ #include "envoy/api/v2/discovery.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/hash.h" @@ -54,8 +55,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { *method_descriptor_, envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_, true); subscription_ = std::make_unique( - mux_, callbacks_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, - init_fetch_timeout, false); + mux_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, + dispatcher_, init_fetch_timeout, false); } ~GrpcSubscriptionTestHarness() override { EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); } @@ -115,7 +116,10 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { response->add_resources()->PackFrom(API_DOWNGRADE(*load_assignment)); } } - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response->resources()), version)) + const auto decoded_resources = + TestUtility::decodeResources( + *response, "cluster_name"); + EXPECT_CALL(callbacks_, onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); if (accept) { expectSendMessage(last_cluster_names_, version, false); @@ -179,6 +183,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { Event::TimerCb timer_cb_; envoy::config::core::v3::Node node_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; NiceMock async_stream_; std::shared_ptr mux_; std::unique_ptr subscription_; diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index 0165c06edabd..95623490e21c 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -4,6 +4,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/http/async_client.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -51,7 +52,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { local_info_, cm_, "eds_cluster", dispatcher_, random_gen_, std::chrono::milliseconds(1), std::chrono::milliseconds(1000), *method_descriptor_, Config::TypeUrl::get().ClusterLoadAssignment, envoy::config::core::v3::ApiVersion::AUTO, - callbacks_, stats_, init_fetch_timeout, validation_visitor_); + callbacks_, resource_decoder_, stats_, init_fetch_timeout, validation_visitor_); } ~HttpSubscriptionTestHarness() override { @@ -139,9 +140,13 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { new Http::TestResponseHeaderMapImpl{{":status", response_code}}}; Http::ResponseMessagePtr message{new Http::ResponseMessageImpl(std::move(response_headers))}; message->body() = std::make_unique(response_json); + const auto decoded_resources = + TestUtility::decodeResources( + response_pb, "cluster_name"); if (modify) { - EXPECT_CALL(callbacks_, onConfigUpdate(RepeatedProtoEq(response_pb.resources()), version)) + EXPECT_CALL(callbacks_, + onConfigUpdate(DecodedResourcesEq(decoded_resources.refvec_), version)) .WillOnce(ThrowOnRejectedConfig(accept)); } if (!accept) { @@ -191,6 +196,8 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Http::MockAsyncClientRequest http_request_; Http::AsyncClient::Callbacks* http_callbacks_; Config::MockSubscriptionCallbacks callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; std::unique_ptr subscription_; NiceMock local_info_; Event::MockTimer* init_timeout_timer_; diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index b1b3b18f2d0a..b0e3f4683fc4 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -1,6 +1,7 @@ #include #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/common/empty_string.h" @@ -60,6 +61,8 @@ class NewGrpcMuxImplTestBase : public testing::Test { NiceMock async_stream_; std::unique_ptr grpc_mux_; NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; NiceMock local_info_; Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; @@ -77,7 +80,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { setup(); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; - auto watch = grpc_mux_->addWatch(type_url, {}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -98,17 +101,13 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { load_assignment.set_cluster_name("x"); response->add_resources()->mutable_resource()->PackFrom(API_DOWNGRADE(load_assignment)); EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "1")) - .WillOnce( - Invoke([&load_assignment]( - const Protobuf::RepeatedPtrField& - added_resources, - const Protobuf::RepeatedPtrField&, const std::string&) { - EXPECT_EQ(1, added_resources.size()); - envoy::config::endpoint::v3::ClusterLoadAssignment expected_assignment = - MessageUtil::anyConvert( - added_resources[0].resource()); - EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); - })); + .WillOnce(Invoke([&load_assignment](const std::vector& added_resources, + const Protobuf::RepeatedPtrField&, + const std::string&) { + EXPECT_EQ(1, added_resources.size()); + EXPECT_TRUE( + TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); + })); grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); } } @@ -119,7 +118,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; - auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); @@ -154,7 +153,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; - auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_); + auto watch = grpc_mux_->addWatch(type_url, {"domain1.test"}, callbacks_, resource_decoder_); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); grpc_mux_->start(); diff --git a/test/common/config/opaque_resource_decoder_impl_test.cc b/test/common/config/opaque_resource_decoder_impl_test.cc new file mode 100644 index 000000000000..9aded46538ba --- /dev/null +++ b/test/common/config/opaque_resource_decoder_impl_test.cc @@ -0,0 +1,106 @@ +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" + +#include "common/config/opaque_resource_decoder_impl.h" +#include "common/protobuf/message_validator_impl.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Config { +namespace { + +class OpaqueResourceDecoderImplTest : public testing::Test { +public: + std::pair + decodeTypedResource(const envoy::config::endpoint::v3::ClusterLoadAssignment& typed_resource) { + ProtobufWkt::Any opaque_resource; + opaque_resource.PackFrom(typed_resource); + auto decoded_resource = resource_decoder_.decodeResource(opaque_resource); + const std::string name = resource_decoder_.resourceName(*decoded_resource); + return {std::move(decoded_resource), name}; + } + + ProtobufMessage::StrictValidationVisitorImpl validation_visitor_; + OpaqueResourceDecoderImpl resource_decoder_{ + validation_visitor_, "cluster_name"}; +}; + +// Negative test for bad type URL in Any. +TEST_F(OpaqueResourceDecoderImplTest, WrongType) { + ProtobufWkt::Any opaque_resource; + opaque_resource.set_type_url("huh"); + EXPECT_THROW_WITH_REGEX(resource_decoder_.decodeResource(opaque_resource), EnvoyException, + "Unable to unpack"); +} + +// If the Any is empty (no type set), the default instance of the opaque resource decoder type is +// created. +TEST_F(OpaqueResourceDecoderImplTest, Empty) { + ProtobufWkt::Any opaque_resource; + const auto decoded_resource = resource_decoder_.decodeResource(opaque_resource); + EXPECT_THAT(*decoded_resource, ProtoEq(envoy::config::endpoint::v3::ClusterLoadAssignment())); + EXPECT_EQ("", resource_decoder_.resourceName(*decoded_resource)); +} + +// Negative test for protoc-gen-validate constraints. +TEST_F(OpaqueResourceDecoderImplTest, ValidateFail) { + envoy::config::endpoint::v3::ClusterLoadAssignment invalid_resource; + EXPECT_THROW(decodeTypedResource(invalid_resource), ProtoValidationException); +} + +// When validation is skipped, verify that we can ignore unknown fields. +TEST_F(OpaqueResourceDecoderImplTest, ValidateIgnored) { + ProtobufMessage::NullValidationVisitorImpl validation_visitor; + OpaqueResourceDecoderImpl resource_decoder{ + validation_visitor, "cluster_name"}; + envoy::config::endpoint::v3::ClusterLoadAssignment strange_resource; + strange_resource.set_cluster_name("fare"); + auto* unknown = strange_resource.GetReflection()->MutableUnknownFields(&strange_resource); + // add a field that doesn't exist in the proto definition: + unknown->AddFixed32(1000, 1); + ProtobufWkt::Any opaque_resource; + opaque_resource.PackFrom(strange_resource); + const auto decoded_resource = resource_decoder.decodeResource(opaque_resource); + EXPECT_THAT(*decoded_resource, ProtoEq(strange_resource)); + EXPECT_EQ("fare", resource_decoder_.resourceName(*decoded_resource)); +} + +// Handling of smuggled deprecated fields during Any conversion. +TEST_F(OpaqueResourceDecoderImplTest, HiddenEnvoyDeprecatedFields) { + // This test is only valid in API-v3, and should be updated for API-v4, as + // the deprecated fields of API-v2 will be removed. + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = + TestUtility::parseYaml(R"EOF( + cluster_name: fare + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 1.2.3.4 + port_value: 80 + policy: + overprovisioning_factor: 100 + hidden_envoy_deprecated_disable_overprovisioning: true + )EOF"); + EXPECT_THROW_WITH_REGEX(decodeTypedResource(cluster_load_assignment), ProtoValidationException, + "Illegal use of hidden_envoy_deprecated_ V2 field " + "'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_" + "deprecated_disable_overprovisioning'"); +} + +// Happy path. +TEST_F(OpaqueResourceDecoderImplTest, Success) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_resource; + cluster_resource.set_cluster_name("foo"); + const auto result = decodeTypedResource(cluster_resource); + EXPECT_THAT(*result.first, ProtoEq(cluster_resource)); + EXPECT_EQ("foo", result.second); +} + +} // namespace +} // namespace Config +} // namespace Envoy diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index bc772b6cf073..b383841b5fe3 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -44,13 +44,15 @@ class SubscriptionFactoryTest : public testing::Test { std::unique_ptr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) { return subscription_factory_.subscriptionFromConfigSource( - config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_); + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_, + resource_decoder_); } Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; Runtime::MockRandomGenerator random_; MockSubscriptionCallbacks callbacks_; + MockOpaqueResourceDecoder resource_decoder_; Http::MockAsyncClientRequest http_request_; Stats::MockIsolatedStatsStore stats_store_; NiceMock local_info_; @@ -308,7 +310,8 @@ TEST_F(SubscriptionFactoryTest, LogWarningOnDeprecatedApi) { EXPECT_LOG_CONTAINS( "warn", "xDS of version v2 has been deprecated", try { subscription_factory_.subscriptionFromConfigSource( - config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_); + config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_, + resource_decoder_); } catch (EnvoyException&){/* expected, we pass an empty configuration */}); } diff --git a/test/common/config/watch_map_test.cc b/test/common/config/watch_map_test.cc index 57f0aaba360f..8742979a9653 100644 --- a/test/common/config/watch_map_test.cc +++ b/test/common/config/watch_map_test.cc @@ -2,6 +2,7 @@ #include "envoy/common/exception.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" #include "envoy/stats/scope.h" @@ -20,64 +21,51 @@ namespace Envoy { namespace Config { namespace { -class NamedMockSubscriptionCallbacks : public MockSubscriptionCallbacks { -public: - std::string resourceName(const ProtobufWkt::Any& resource) override { - return TestUtility::anyConvert(resource) - .cluster_name(); - } -}; - // expectDeltaAndSotwUpdate() EXPECTs two birds with one function call: we want to cover both SotW // and delta, which, while mechanically different, can behave identically for our testing purposes. // Specifically, as a simplification for these tests, every still-present resource is updated in // every update. Therefore, a resource can never show up in the SotW update but not the delta // update. We can therefore use the same expected_resources for both. void expectDeltaAndSotwUpdate( - NamedMockSubscriptionCallbacks& callbacks, + MockSubscriptionCallbacks& callbacks, const std::vector& expected_resources, const std::vector& expected_removals, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)) - .WillOnce(Invoke( - [expected_resources](const Protobuf::RepeatedPtrField& gotten_resources, - const std::string&) { - EXPECT_EQ(expected_resources.size(), gotten_resources.size()); - for (size_t i = 0; i < expected_resources.size(); i++) { - envoy::config::endpoint::v3::ClusterLoadAssignment cur_gotten_resource; - gotten_resources[i].UnpackTo(&cur_gotten_resource); - EXPECT_TRUE(TestUtility::protoEqual(cur_gotten_resource, expected_resources[i])); - } - })); + .WillOnce(Invoke([expected_resources](const std::vector& gotten_resources, + const std::string&) { + EXPECT_EQ(expected_resources.size(), gotten_resources.size()); + for (size_t i = 0; i < expected_resources.size(); i++) { + EXPECT_TRUE( + TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i])); + } + })); EXPECT_CALL(callbacks, onConfigUpdate(_, _, _)) - .WillOnce( - Invoke([expected_resources, expected_removals, version]( - const Protobuf::RepeatedPtrField& - gotten_resources, - const Protobuf::RepeatedPtrField& removed_resources, - const std::string&) { - EXPECT_EQ(expected_resources.size(), gotten_resources.size()); - for (size_t i = 0; i < expected_resources.size(); i++) { - EXPECT_EQ(gotten_resources[i].version(), version); - envoy::config::endpoint::v3::ClusterLoadAssignment cur_gotten_resource; - gotten_resources[i].resource().UnpackTo(&cur_gotten_resource); - EXPECT_TRUE(TestUtility::protoEqual(cur_gotten_resource, expected_resources[i])); - } - EXPECT_EQ(expected_removals.size(), removed_resources.size()); - for (size_t i = 0; i < expected_removals.size(); i++) { - EXPECT_EQ(expected_removals[i], removed_resources[i]); - } - })); + .WillOnce(Invoke([expected_resources, expected_removals, + version](const std::vector& gotten_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string&) { + EXPECT_EQ(expected_resources.size(), gotten_resources.size()); + for (size_t i = 0; i < expected_resources.size(); i++) { + EXPECT_EQ(gotten_resources[i].get().version(), version); + EXPECT_TRUE( + TestUtility::protoEqual(gotten_resources[i].get().resource(), expected_resources[i])); + } + EXPECT_EQ(expected_removals.size(), removed_resources.size()); + for (size_t i = 0; i < expected_removals.size(); i++) { + EXPECT_EQ(expected_removals[i], removed_resources[i]); + } + })); } -void expectNoUpdate(NamedMockSubscriptionCallbacks& callbacks, const std::string& version) { +void expectNoUpdate(MockSubscriptionCallbacks& callbacks, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)).Times(0); EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0); } -void expectEmptySotwNoDeltaUpdate(NamedMockSubscriptionCallbacks& callbacks, +void expectEmptySotwNoDeltaUpdate(MockSubscriptionCallbacks& callbacks, const std::string& version) { EXPECT_CALL(callbacks, onConfigUpdate(_, version)) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& gotten_resources, + .WillOnce(Invoke([](const std::vector& gotten_resources, const std::string&) { EXPECT_EQ(gotten_resources.size(), 0); })); EXPECT_CALL(callbacks, onConfigUpdate(_, _, version)).Times(0); } @@ -99,7 +87,7 @@ wrapInResource(const Protobuf::RepeatedPtrField& anys, // Similar to expectDeltaAndSotwUpdate(), but making the onConfigUpdate() happen, rather than // EXPECT-ing it. -void doDeltaAndSotwUpdate(SubscriptionCallbacks& watch_map, +void doDeltaAndSotwUpdate(WatchMap& watch_map, const Protobuf::RepeatedPtrField& sotw_resources, const std::vector& removed_names, const std::string& version) { @@ -118,9 +106,11 @@ void doDeltaAndSotwUpdate(SubscriptionCallbacks& watch_map, // resources it doesn't care about. Checks that the watch can later decide it does care about them, // and then receive subsequent updates to them. TEST(WatchMapTest, Basic) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); { // The watch is interested in Alice and Bob... @@ -182,11 +172,13 @@ TEST(WatchMapTest, Basic) { // NOTE: we need the resource name "dummy" to keep either watch from ever having no names watched, // which is treated as interest in all names. TEST(WatchMapTest, Overlap) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); Protobuf::RepeatedPtrField updated_resources; envoy::config::endpoint::v3::ClusterLoadAssignment alice; @@ -248,11 +240,13 @@ TEST(WatchMapTest, Overlap) { // NOTE: we need the resource name "dummy" to keep either watch from ever having no names watched, // which is treated as interest in all names. TEST(WatchMapTest, AddRemoveAdd) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); Protobuf::RepeatedPtrField updated_resources; envoy::config::endpoint::v3::ClusterLoadAssignment alice; @@ -301,9 +295,11 @@ TEST(WatchMapTest, AddRemoveAdd) { // Tests that nothing breaks if an update arrives that we entirely do not care about. TEST(WatchMapTest, UninterestingUpdate) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"alice"}); Protobuf::RepeatedPtrField alice_update; @@ -342,11 +338,13 @@ TEST(WatchMapTest, UninterestingUpdate) { // Tests that a watch that specifies no particular resource interest is treated as interested in // everything. TEST(WatchMapTest, WatchingEverything) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - /*Watch* watch1 = */ watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); + /*Watch* watch1 = */ watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); // watch1 never specifies any names, and so is treated as interested in everything. watch_map.updateWatchInterest(watch2, {"alice"}); @@ -375,13 +373,15 @@ TEST(WatchMapTest, WatchingEverything) { // onConfigUpdate even if none of the watch's interested resources are among the updated resources. // (Which ensures we deliver empty config updates when a resource is dropped.) TEST(WatchMapTest, DeltaOnConfigUpdate) { - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; - NamedMockSubscriptionCallbacks callbacks3; + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + MockSubscriptionCallbacks callbacks3; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch1 = watch_map.addWatch(callbacks1); - Watch* watch2 = watch_map.addWatch(callbacks2); - Watch* watch3 = watch_map.addWatch(callbacks3); + Watch* watch1 = watch_map.addWatch(callbacks1, resource_decoder); + Watch* watch2 = watch_map.addWatch(callbacks2, resource_decoder); + Watch* watch3 = watch_map.addWatch(callbacks3, resource_decoder); watch_map.updateWatchInterest(watch1, {"updated"}); watch_map.updateWatchInterest(watch2, {"updated", "removed"}); watch_map.updateWatchInterest(watch3, {"removed"}); @@ -415,10 +415,12 @@ TEST(WatchMapTest, OnConfigUpdateFailed) { // calling on empty map doesn't break watch_map.onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr); - NamedMockSubscriptionCallbacks callbacks1; - NamedMockSubscriptionCallbacks callbacks2; - watch_map.addWatch(callbacks1); - watch_map.addWatch(callbacks2); + MockSubscriptionCallbacks callbacks1; + MockSubscriptionCallbacks callbacks2; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + watch_map.addWatch(callbacks1, resource_decoder); + watch_map.addWatch(callbacks2, resource_decoder); EXPECT_CALL(callbacks1, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr)); EXPECT_CALL(callbacks2, onConfigUpdateFailed(ConfigUpdateFailureReason::UpdateRejected, nullptr)); @@ -427,9 +429,11 @@ TEST(WatchMapTest, OnConfigUpdateFailed) { // verifies that a watch is updated with the resource name TEST(WatchMapTest, ConvertAliasWatchesToNameWatches) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"alias"}); envoy::service::discovery::v3::Resource resource; @@ -448,9 +452,11 @@ TEST(WatchMapTest, ConvertAliasWatchesToNameWatches) { // verifies that if a resource contains an alias the same as its name, and the watch has been set // with that alias, the watch won't be updated TEST(WatchMapTest, ConvertAliasWatchesToNameWatchesAliasIsSameAsName) { - NamedMockSubscriptionCallbacks callbacks; + MockSubscriptionCallbacks callbacks; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); WatchMap watch_map; - Watch* watch = watch_map.addWatch(callbacks); + Watch* watch = watch_map.addWatch(callbacks, resource_decoder); watch_map.updateWatchInterest(watch, {"name-and-alias"}); envoy::service::discovery::v3::Resource resource; diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index d5f87e046635..73342acededd 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -175,13 +175,15 @@ TEST_F(RdsImplTest, Basic) { )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // 2nd request with same response. Based on hash should not reload config. - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ(nullptr, route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}})); // Load the config and verified shared count. @@ -220,10 +222,12 @@ TEST_F(RdsImplTest, Basic) { )EOF"; auto response2 = TestUtility::parseYaml(response2_json); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); // Make sure we don't lookup/verify clusters. EXPECT_CALL(server_factory_context_.cluster_manager_, get(Eq("bar"))).Times(0); - rds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + rds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("foo", route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}, {":path", "/foo"}}) ->routeEntry() ->clusterName()); @@ -253,10 +257,12 @@ TEST_F(RdsImplTest, FailureInvalidConfig) { )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException, "Unexpected RDS configuration (expecting foo_route_config): INVALID_NAME_FOR_route_config"); } @@ -433,9 +439,11 @@ name: foo )EOF"; auto response1 = TestUtility::parseYaml(response1_json); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); const auto& route_config_dump3 = @@ -476,8 +484,7 @@ TEST_F(RouteConfigProviderManagerImplTest, Basic) { EXPECT_FALSE(provider_->configInfo().has_value()); - Protobuf::RepeatedPtrField route_configs; - route_configs.Add()->PackFrom(parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV2Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -485,10 +492,11 @@ name: foo_route_config routes: - match: { prefix: "/" } route: { cluster: baz } -)EOF")); +)EOF"); + const auto decoded_resources = TestUtility::decodeResources({route_config}); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "1"); + decoded_resources.refvec_, "1"); RouteConfigProviderSharedPtr provider2 = route_config_provider_manager_->createRdsRouteConfigProvider( @@ -512,7 +520,7 @@ name: foo_route_config rds2, server_factory_context_, "foo_prefix", outer_init_manager_); EXPECT_NE(provider3, provider_); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "provider3"); + decoded_resources.refvec_, "provider3"); EXPECT_EQ(2UL, route_config_provider_manager_->dumpRouteConfigs()->dynamic_route_configs().size()); @@ -557,8 +565,7 @@ TEST_F(RouteConfigProviderManagerImplTest, SameProviderOnTwoInitManager) { EXPECT_EQ(Init::Manager::State::Initializing, real_init_manager.state()); { - Protobuf::RepeatedPtrField route_configs; - route_configs.Add()->PackFrom(parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV2Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -566,10 +573,11 @@ name: foo_route_config routes: - match: { prefix: "/" } route: { cluster: baz } -)EOF")); +)EOF"); + const auto decoded_resources = TestUtility::decodeResources({route_config}); server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, "1"); + decoded_resources.refvec_, "1"); EXPECT_TRUE(provider_->configInfo().has_value()); EXPECT_TRUE(provider2->configInfo().has_value()); @@ -577,20 +585,6 @@ name: foo_route_config } } -// Negative test for protoc-gen-validate constraints. -TEST_F(RouteConfigProviderManagerImplTest, ValidateFail) { - setup(); - Protobuf::RepeatedPtrField route_configs; - envoy::config::route::v3::RouteConfiguration route_config; - route_config.set_name("foo_route_config"); - route_config.mutable_virtual_hosts()->Add(); - route_configs.Add()->PackFrom(route_config); - EXPECT_THROW( - server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, ""), - ProtoValidationException); -} - TEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateEmpty) { setup(); EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_, @@ -605,13 +599,12 @@ TEST_F(RouteConfigProviderManagerImplTest, OnConfigUpdateWrongSize) { EXPECT_CALL(*server_factory_context_.cluster_manager_.subscription_factory_.subscription_, start(_)); outer_init_manager_.initialize(init_watcher_); - Protobuf::RepeatedPtrField route_configs; - route_configs.Add(); - route_configs.Add(); + envoy::config::route::v3::RouteConfiguration route_config; + const auto decoded_resources = TestUtility::decodeResources({route_config, route_config}); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( server_factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - route_configs, ""), + decoded_resources.refvec_, ""), EnvoyException, "Unexpected RDS resource length: 2"); } @@ -664,11 +657,13 @@ version_info: '1' )EOF"; auto response1 = TestUtility::parseYaml(response1_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response1); EXPECT_CALL(init_watcher_, ready()); EXPECT_THROW_WITH_MESSAGE( - rds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), + rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException, "Only a single wildcard domain is permitted"); message_ptr = diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 4aa8b8012d4d..bc0e119d6699 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -6,6 +6,7 @@ #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" +#include "envoy/config/route/v3/scoped_route.pb.validate.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/init/manager.h" @@ -51,11 +52,6 @@ parseScopedRouteConfigurationFromYaml(const std::string& yaml) { return scoped_route_config; } -void parseScopedRouteConfigurationFromYaml(ProtobufWkt::Any& scoped_route_config, - const std::string& yaml) { - scoped_route_config.PackFrom(parseScopedRouteConfigurationFromYaml(yaml)); -} - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager parseHttpConnectionManagerFromYaml(const std::string& config_yaml) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager @@ -125,7 +121,7 @@ class ScopedRdsTest : public ScopedRoutesTestBase { // srds subscription EXPECT_CALL(server_factory_context_.cluster_manager_.subscription_factory_, - subscriptionFromConfigSource(_, _, _, _)) + subscriptionFromConfigSource(_, _, _, _, _)) .Times(AnyNumber()); // rds subscription EXPECT_CALL( @@ -134,11 +130,12 @@ class ScopedRdsTest : public ScopedRoutesTestBase { _, Eq(Grpc::Common::typeUrl( API_NO_BOOST(envoy::api::v2::RouteConfiguration)().GetDescriptor()->full_name())), - _, _)) + _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, - Envoy::Config::SubscriptionCallbacks& callbacks) { + Envoy::Config::SubscriptionCallbacks& callbacks, + Envoy::Config::OpaqueResourceDecoder&) { auto ret = std::make_unique>(); rds_subscription_by_config_subscription_[ret.get()] = &callbacks; EXPECT_CALL(*ret, start(_)) @@ -197,11 +194,11 @@ name: foo_scoped_routes route: {{ cluster: bluh }} )EOF"; for (const std::string& name : route_config_names) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom( + const auto route_config = TestUtility::parseYaml( - fmt::format(route_config_tmpl, name))); - rds_subscription_by_name_[name]->onConfigUpdate(resources, version); + fmt::format(route_config_tmpl, name)); + const auto decoded_resources = TestUtility::decodeResources({route_config}); + rds_subscription_by_name_[name]->onConfigUpdate(decoded_resources.refvec_, version); } } @@ -224,54 +221,6 @@ name: foo_scoped_routes absl::flat_hash_map rds_subscription_by_name_; }; -TEST_F(ScopedRdsTest, ValidateFail) { - setup(); - - // 'name' validation: value must be > 1 byte. - const std::string config_yaml = R"EOF( -name: -route_configuration_name: foo_routes -key: - fragments: - - string_key: x-foo-key -)EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed.*"); - - // 'route_configuration_name' validation: value must be > 1 byte. - const std::string config_yaml2 = R"EOF( -name: foo_scope -route_configuration_name: -key: - fragments: - - string_key: x-foo-key -)EOF"; - Protobuf::RepeatedPtrField resources2; - parseScopedRouteConfigurationFromYaml(*resources2.Add(), config_yaml2); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources2, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources2, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed.*"); - - // 'key' validation: must define at least 1 fragment. - const std::string config_yaml3 = R"EOF( -name: foo_scope -route_configuration_name: foo_routes -key: -)EOF"; - Protobuf::RepeatedPtrField resources3; - parseScopedRouteConfigurationFromYaml(*resources3.Add(), config_yaml3); - EXPECT_THROW(srds_subscription_->onConfigUpdate(resources3, "1"), ProtoValidationException); - EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources3, "1"), {}, "1"), EnvoyException, - "Error adding/updating scoped route\\(s\\): Proto constraint validation failed .*value is " - "required.*"); -} - // Tests that multiple uniquely named non-conflict resources are allowed in config updates. TEST_F(ScopedRdsTest, MultipleResourcesSotw) { setup(); @@ -283,8 +232,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -292,10 +240,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); init_watcher_.expectReady().Times(1); // Only the SRDS parent_init_target_. context_init_manager_.initialize(init_watcher_); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "1")); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -328,8 +277,8 @@ route_configuration_name: foo_routes "foo_routes"); // Delete foo_scope2. - resources.RemoveLast(); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "3")); + const auto decoded_resources_2 = TestUtility::decodeResources({resource}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "3")); EXPECT_EQ(getScopedRouteMap().size(), 1); EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); EXPECT_EQ(2UL, @@ -357,8 +306,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -366,10 +314,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); // Delta API. - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(anyToResource(resources, "2"), {}, "1")); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); context_init_manager_.initialize(init_watcher_); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") @@ -404,10 +353,10 @@ route_configuration_name: foo_routes "foo_routes"); // Delete foo_scope2. - resources.RemoveLast(); Protobuf::RepeatedPtrField deletes; *deletes.Add() = "foo_scope2"; - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(anyToResource(resources, "4"), deletes, "2")); + const auto decoded_resources_2 = TestUtility::decodeResources({resource}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, deletes, "2")); EXPECT_EQ(getScopedRouteMap().size(), 1); EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); EXPECT_EQ(2UL, @@ -435,8 +384,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -444,11 +392,12 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception. context_init_manager_.initialize(init_watcher_); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(resources, "1"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), EnvoyException, ".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'"); EXPECT_EQ( // Fully rejected. @@ -475,8 +424,7 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); const std::string config_yaml2 = R"EOF( name: foo_scope2 route_configuration_name: foo_routes @@ -484,12 +432,13 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); init_watcher_.expectReady().Times(1); // Partial success gets the subscription ready. context_init_manager_.initialize(init_watcher_); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(anyToResource(resources, "2"), {}, "2"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "2"), EnvoyException, ".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'"); EXPECT_EQ( // Partially reject. @@ -527,10 +476,10 @@ route_configuration_name: bar_routes fragments: - string_key: x-bar-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml1); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "1")); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml1); + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -563,12 +512,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - resources.Clear(); // Remove foo_scope1 and add a new scope3 reuses the same scope_key. - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "2")); + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml3); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_2, resource_3}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "2")); EXPECT_EQ(2UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -593,12 +541,11 @@ route_configuration_name: foo_routes fragments: - string_key: x-bar-key )EOF"; - resources.Clear(); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml2); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml4); + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml4); + const auto decoded_resources_3 = + TestUtility::decodeResources({resource_2, resource_3, resource_4}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(resources, "3"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources_3.refvec_, "3"), EnvoyException, "scope key conflict found, first scope is 'foo_scope2', second scope is 'foo_scope4'"); EXPECT_EQ(getScopedRouteMap().size(), 2UL); EXPECT_EQ(getScopedRouteMap().count("foo_scope1"), 0); @@ -611,10 +558,8 @@ route_configuration_name: foo_routes "bar_routes"); // Delete foo_scope2, and push a new foo_scope4 with the same scope key but different route-table. - resources.Clear(); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml3); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml4); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(resources, "4")); + const auto decoded_resources_4 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_4.refvec_, "4")); EXPECT_EQ(server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value(), 3UL); @@ -647,10 +592,10 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(resources, "1"), EnvoyException, + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const auto decoded_resources = TestUtility::decodeResources({resource, resource}); + EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), + EnvoyException, "duplicate scoped route configuration 'foo_scope' found"); } @@ -667,11 +612,10 @@ route_configuration_name: foo_routes fragments: - string_key: x-foo-key )EOF"; - Protobuf::RepeatedPtrField resources; - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); - parseScopedRouteConfigurationFromYaml(*resources.Add(), config_yaml); + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const auto decoded_resources = TestUtility::decodeResources({resource, resource}); EXPECT_THROW_WITH_MESSAGE( - srds_subscription_->onConfigUpdate(anyToResource(resources, "1"), {}, "1"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1"), EnvoyException, "Error adding/updating scoped route(s): duplicate scoped route configuration 'foo_scope' " "found"); EXPECT_EQ( @@ -782,15 +726,16 @@ stat_prefix: foo // Now SRDS kicks off. Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(parseScopedRouteConfigurationFromYaml(R"EOF( + const auto resource = parseScopedRouteConfigurationFromYaml(R"EOF( name: dynamic-foo route_configuration_name: dynamic-foo-route-config key: fragments: { string_key: "172.30.30.10" } -)EOF")); +)EOF"); timeSystem().setSystemTime(std::chrono::milliseconds(1234567891567)); - srds_subscription_->onConfigUpdate(resources, "1"); + const auto decoded_resources = TestUtility::decodeResources({resource}); + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: @@ -830,8 +775,7 @@ route_configuration_name: dynamic-foo-route-config *message_ptr); EXPECT_THAT(expected_config_dump, ProtoEq(scoped_routes_config_dump3)); - resources.Clear(); - srds_subscription_->onConfigUpdate(resources, "2"); + srds_subscription_->onConfigUpdate({}, "2"); TestUtility::loadFromYaml(R"EOF( inline_scoped_route_configs: - name: foo-scoped-routes diff --git a/test/common/router/vhds_test.cc b/test/common/router/vhds_test.cc index 4048a86be306..79256e2d92ee 100644 --- a/test/common/router/vhds_test.cc +++ b/test/common/router/vhds_test.cc @@ -76,8 +76,8 @@ name: my_route } RouteConfigUpdatePtr makeRouteConfigUpdate(const envoy::config::route::v3::RouteConfiguration& rc) { - RouteConfigUpdatePtr config_update_info = std::make_unique( - factory_context_.timeSource(), factory_context_.messageValidationVisitor()); + RouteConfigUpdatePtr config_update_info = + std::make_unique(factory_context_.timeSource()); config_update_info->onRdsUpdate(rc, "1"); return config_update_info; } @@ -131,9 +131,11 @@ TEST_F(VhdsTest, VhdsAddsVirtualHosts) { auto vhost = buildVirtualHost("vhost1", "vhost.first"); const auto& added_resources = buildAddedResources({vhost}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); const Protobuf::RepeatedPtrField removed_resources; factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"); + decoded_resources.refvec_, removed_resources, "1"); EXPECT_EQ(1UL, config_update_info->routeConfiguration().virtual_hosts_size()); EXPECT_TRUE( @@ -189,9 +191,11 @@ name: my_route auto vhost = buildVirtualHost("vhost_vhds1", "vhost.first"); const auto& added_resources = buildAddedResources({vhost}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); const Protobuf::RepeatedPtrField removed_resources; factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"); + decoded_resources.refvec_, removed_resources, "1"); EXPECT_EQ(2UL, config_update_info->routeConfiguration().virtual_hosts_size()); config_update_info->onRdsUpdate(updated_route_config, "2"); @@ -208,30 +212,6 @@ name: my_route "vhost_vhds1" == actual_vhost_2.name()); } -// verify vhds validates VirtualHosts in added_resources -TEST_F(VhdsTest, VhdsValidatesAddedVirtualHosts) { - const auto route_config = - TestUtility::parseYaml(default_vhds_config_); - RouteConfigUpdatePtr config_update_info = makeRouteConfigUpdate(route_config); - - VhdsSubscription subscription(config_update_info, factory_context_, context_, providers_); - - auto vhost = TestUtility::parseYaml(R"EOF( - name: invalid_vhost - domains: [] - routes: - - match: { prefix: "/" } - route: { cluster: "my_service" } -)EOF"); - - const auto& added_resources = buildAddedResources({vhost}); - const Protobuf::RepeatedPtrField removed_resources; - - EXPECT_THROW(factory_context_.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - added_resources, removed_resources, "1"), - ProtoValidationException); -} - } // namespace } // namespace Router } // namespace Envoy diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 8092dde805df..e45588c7aa40 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -864,10 +864,11 @@ class RtdsLoaderImplTest : public LoaderImplTest { rtds_layer->mutable_rtds_config(); } EXPECT_CALL(cm_, subscriptionFactory()).Times(layers_.size()); - ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) - .WillByDefault(testing::Invoke( - [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, - Config::SubscriptionCallbacks& callbacks) -> Config::SubscriptionPtr { + ON_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) + .WillByDefault( + testing::Invoke([this](const envoy::config::core::v3::ConfigSource&, absl::string_view, + Stats::Scope&, Config::SubscriptionCallbacks& callbacks, + Config::OpaqueResourceDecoder&) -> Config::SubscriptionPtr { auto ret = std::make_unique>(); rtds_subscriptions_.push_back(ret.get()); rtds_callbacks_.push_back(&callbacks); @@ -900,17 +901,14 @@ class RtdsLoaderImplTest : public LoaderImplTest { void doOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime, uint32_t callback_index = 0) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(runtime); - VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[callback_index]->onConfigUpdate(resources, "")); + const auto decoded_resources = TestUtility::decodeResources({runtime}); + VERBOSE_EXPECT_NO_THROW( + rtds_callbacks_[callback_index]->onConfigUpdate(decoded_resources.refvec_, "")); } void doDeltaOnConfigUpdateVerifyNoThrow(const envoy::service::runtime::v3::Runtime& runtime) { - Protobuf::RepeatedPtrField resources; - auto* resource = resources.Add(); - resource->mutable_resource()->PackFrom(runtime); - resource->set_version(""); - VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[0]->onConfigUpdate(resources, {}, "")); + const auto decoded_resources = TestUtility::decodeResources({runtime}); + VERBOSE_EXPECT_NO_THROW(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, {}, "")); } std::vector layers_{"some_resource"}; @@ -923,10 +921,8 @@ class RtdsLoaderImplTest : public LoaderImplTest { TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { setup(); - Protobuf::RepeatedPtrField runtimes; - EXPECT_CALL(rtds_init_callback_, Call()); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate({}, ""), EnvoyException, "Unexpected RTDS resource length: 0"); EXPECT_EQ(0, store_.counter("runtime.load_error").value()); @@ -939,13 +935,12 @@ TEST_F(RtdsLoaderImplTest, UnexpectedSizeEmpty) { TEST_F(RtdsLoaderImplTest, UnexpectedSizeTooMany) { setup(); - Protobuf::RepeatedPtrField runtimes; - runtimes.Add(); - runtimes.Add(); + const envoy::service::runtime::v3::Runtime runtime; + const auto decoded_resources = TestUtility::decodeResources({runtime, runtime}); EXPECT_CALL(rtds_init_callback_, Call()); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(runtimes, ""), EnvoyException, - "Unexpected RTDS resource length: 2"); + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected RTDS resource length: 2"); EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); @@ -978,9 +973,9 @@ TEST_F(RtdsLoaderImplTest, WrongResourceName) { foo: bar baz: meh )EOF"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(runtime); - EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = TestUtility::decodeResources({runtime}); + EXPECT_THROW_WITH_MESSAGE(rtds_callbacks_[0]->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected RTDS runtime (expecting some_resource): other_resource"); EXPECT_EQ("whatevs", loader_->snapshot().get("foo").value().get()); diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 8834dc896c98..289fb79d6a66 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -72,7 +72,7 @@ TEST_F(SdsApiTest, BadConfigSource) { ::testing::InSequence s; NiceMock server; envoy::config::core::v3::ConfigSource config_source; - EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _)) + EXPECT_CALL(subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr { throw EnvoyException("bad config"); return nullptr; @@ -109,11 +109,10 @@ TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); const std::string cert_pem = @@ -140,11 +139,10 @@ class PartialMockSds : public SdsApi { server.stats(), init_manager, []() {}, dispatcher, api) {} MOCK_METHOD(void, onConfigUpdate, - (const Protobuf::RepeatedPtrField&, const std::string&)); - void - onConfigUpdate(const Protobuf::RepeatedPtrField& added, - const Protobuf::RepeatedPtrField& removed, - const std::string& version) override { + (const std::vector&, const std::string&)); + void onConfigUpdate(const std::vector& added, + const Protobuf::RepeatedPtrField& removed, + const std::string& version) override { SdsApi::onConfigUpdate(added, removed, version); } void setSecret(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} @@ -157,16 +155,10 @@ class PartialMockSds : public SdsApi { // Basic test of delta's passthrough call to the state-of-the-world variant, to // increase coverage. TEST_F(SdsApiTest, Delta) { - Protobuf::RepeatedPtrField resources; - envoy::extensions::transport_sockets::tls::v3::Secret secret; - secret.set_name("secret_1"); - auto* resource = resources.Add(); - resource->mutable_resource()->PackFrom(secret); - resource->set_name("secret_1"); - resource->set_version("version1"); - - Protobuf::RepeatedPtrField for_matching; - for_matching.Add()->PackFrom(secret); + auto secret = std::make_unique(); + secret->set_name("secret_1"); + Config::DecodedResourceImpl resource(std::move(secret), "name", {}, "version1"); + std::vector resources{resource}; NiceMock server; envoy::config::core::v3::ConfigSource config_source; @@ -175,16 +167,19 @@ TEST_F(SdsApiTest, Delta) { PartialMockSds sds(server, init_manager_, config_source, subscription_factory_, time_system, *dispatcher_, *api_); initialize(); - EXPECT_CALL(sds, onConfigUpdate(RepeatedProtoEq(for_matching), "version1")); + EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources), "version1")); subscription_factory_.callbacks_->onConfigUpdate(resources, {}, "ignored"); // An attempt to remove a resource logs an error, but otherwise just carries on (ignoring the // removal attempt). - resource->set_version("version2"); - EXPECT_CALL(sds, onConfigUpdate(RepeatedProtoEq(for_matching), "version2")); + auto secret_again = std::make_unique(); + secret_again->set_name("secret_1"); + Config::DecodedResourceImpl resource_v2(std::move(secret_again), "name", {}, "version2"); + std::vector resources_v2{resource_v2}; + EXPECT_CALL(sds, onConfigUpdate(DecodedResourcesEq(resources_v2), "version2")); Protobuf::RepeatedPtrField removals; *removals.Add() = "route_0"; - subscription_factory_.callbacks_->onConfigUpdate(resources, removals, "ignored"); + subscription_factory_.callbacks_->onConfigUpdate(resources_v2, removals, "ignored"); } // Tests SDS's use of the delta variant of onConfigUpdate(). @@ -211,12 +206,11 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->mutable_resource()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, {}, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, ""); Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); const std::string cert_pem = @@ -256,11 +250,10 @@ TEST_F(SdsApiTest, DynamicCertificateValidationContextUpdateSuccess) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); Ssl::CertificateValidationContextConfigImpl cvc_config(*sds_api.secret(), *api_); const std::string ca_cert = @@ -320,10 +313,9 @@ TEST_F(SdsApiTest, DefaultCertificateValidationContextTest) { EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); EXPECT_CALL(validation_callback, validateCvc(_)); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); const std::string default_verify_certificate_hash = "0000000000000000000000000000000000000000000000000000000000000000"; @@ -405,12 +397,11 @@ name: "encryption_key" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); EXPECT_CALL(validation_callback, validateGenericSecret(_)); initialize(); - subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""); + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *sds_api.secret()); @@ -432,10 +423,8 @@ TEST_F(SdsApiTest, EmptyResource) { config_source, "abc.com", subscription_factory_, time_system_, validation_visitor_, server.stats(), init_manager_, []() {}, *dispatcher_, *api_); - Protobuf::RepeatedPtrField secret_resources; - initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), + EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate({}, ""), EnvoyException, "Missing SDS resources for abc.com in onConfigUpdate()"); } @@ -461,13 +450,12 @@ TEST_F(SdsApiTest, SecretUpdateWrongSize) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret, typed_secret}); initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), - EnvoyException, "Unexpected SDS secrets length: 2"); + EXPECT_THROW_WITH_MESSAGE( + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected SDS secrets length: 2"); } // Validate that SdsApi throws exception if secret name passed to onConfigUpdate() @@ -492,13 +480,12 @@ TEST_F(SdsApiTest, SecretUpdateWrongSecretName) { envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); initialize(); - EXPECT_THROW_WITH_MESSAGE(subscription_factory_.callbacks_->onConfigUpdate(secret_resources, ""), - EnvoyException, - "Unexpected SDS secret (expecting abc.com): wrong.name.com"); + EXPECT_THROW_WITH_MESSAGE( + subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected SDS secret (expecting abc.com): wrong.name.com"); } } // namespace diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 3790fbdf7c95..f77451f55183 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -368,11 +368,10 @@ name: "abc.com" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - ""); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, ""); Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; @@ -419,11 +418,10 @@ name: "encryption_key" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - ""); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, ""); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *secret_provider->secret()); @@ -469,11 +467,10 @@ name: "abc.com" )EOF"; envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); - Protobuf::RepeatedPtrField secret_resources; - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); - secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate(secret_resources, - "keycert-v1"); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, "keycert-v1"); Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_CERT_CHAIN", tls_config.certificateChain()); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY", tls_config.privateKey()); @@ -511,12 +508,11 @@ name: "abc.com.validation" inline_string: "DUMMY_INLINE_STRING_TRUSTED_CA" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(validation_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_2 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "validation-context-v1"); + decoded_resources_2.refvec_, "validation-context-v1"); Ssl::CertificateValidationContextConfigImpl cert_validation_context( *context_secret_provider->secret(), *api_); EXPECT_EQ("DUMMY_INLINE_STRING_TRUSTED_CA", cert_validation_context.caCert()); @@ -563,12 +559,11 @@ name: "abc.com.stek" - inline_bytes: "RFVNTVlfSU5MSU5FX0JZVEVT" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(stek_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_3 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "stek-context-v1"); + decoded_resources_3.refvec_, "stek-context-v1"); EXPECT_EQ(stek_secret_provider->secret()->keys()[1].inline_string(), "DUMMY_INLINE_STRING"); const std::string updated_once_more_config_dump = R"EOF( @@ -625,11 +620,10 @@ name: "signing_key" inline_string: "DUMMY_ECDSA_KEY" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(generic_secret_yaml), typed_secret); - secret_resources.Clear(); - secret_resources.Add()->PackFrom(typed_secret); + const auto decoded_resources_4 = TestUtility::decodeResources({typed_secret}); init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( - secret_resources, "signing-key-v1"); + decoded_resources_4.refvec_, "signing-key-v1"); const envoy::extensions::transport_sockets::tls::v3::GenericSecret generic_secret( *generic_secret_provider->secret()); diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index 61721b797ffd..388fd9ed5ca8 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -71,21 +71,6 @@ class CdsApiImplTest : public testing::Test { NiceMock validation_visitor_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(CdsApiImplTest, ValidateFail) { - InSequence s; - - setup(); - - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster; - clusters.Add()->PackFrom(cluster); - - EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); - EXPECT_CALL(initialized_, ready()); - EXPECT_THROW(cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException); -} - // Regression test against only updating versionInfo() if at least one cluster // is are added/updated even if one or more are removed. TEST_F(CdsApiImplTest, UpdateVersionOnClusterRemove) { @@ -111,7 +96,9 @@ version_info: '0' EXPECT_CALL(initialized_, ready()); EXPECT_EQ("", cds_->versionInfo()); - cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", cds_->versionInfo()); const std::string response2_yaml = R"EOF( @@ -122,7 +109,9 @@ version_info: '1' TestUtility::parseYaml(response2_yaml); EXPECT_CALL(cm_, clusters()).WillOnce(Return(makeClusterMap({"cluster1"}))); EXPECT_CALL(cm_, removeCluster("cluster1")).WillOnce(Return(true)); - cds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", cds_->versionInfo()); } @@ -132,15 +121,14 @@ TEST_F(CdsApiImplTest, ValidateDuplicateClusters) { setup(); - Protobuf::RepeatedPtrField clusters; envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("duplicate_cluster"); - clusters.Add()->PackFrom(cluster_1); - clusters.Add()->PackFrom(cluster_1); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_1}); EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); - EXPECT_THROW_WITH_MESSAGE(cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating cluster(s) duplicate_cluster: duplicate cluster " "duplicate_cluster found"); } @@ -153,8 +141,7 @@ TEST_F(CdsApiImplTest, EmptyConfigUpdate) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - cds_callbacks_->onConfigUpdate(clusters, ""); + cds_callbacks_->onConfigUpdate({}, ""); } TEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) { @@ -166,19 +153,16 @@ TEST_F(CdsApiImplTest, ConfigUpdateWith2ValidClusters) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("cluster_1"); - clusters.Add()->PackFrom(cluster_1); expectAdd("cluster_1"); envoy::config::cluster::v3::Cluster cluster_2; cluster_2.set_name("cluster_2"); - clusters.Add()->PackFrom(cluster_2); expectAdd("cluster_2"); - cds_callbacks_->onConfigUpdate(clusters, ""); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2}); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } TEST_F(CdsApiImplTest, DeltaConfigUpdate) { @@ -208,7 +192,9 @@ TEST_F(CdsApiImplTest, DeltaConfigUpdate) { resource->set_name("cluster_2"); resource->set_version("v1"); } - cds_callbacks_->onConfigUpdate(resources, {}, "v1"); + const auto decoded_resources = + TestUtility::decodeResources(resources); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, "v1"); } { @@ -225,7 +211,9 @@ TEST_F(CdsApiImplTest, DeltaConfigUpdate) { Protobuf::RepeatedPtrField removed; *removed.Add() = "cluster_1"; EXPECT_CALL(cm_, removeCluster(StrEq("cluster_1"))).WillOnce(Return(true)); - cds_callbacks_->onConfigUpdate(resources, removed, "v2"); + const auto decoded_resources = + TestUtility::decodeResources(resources); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed, "v2"); } } @@ -238,25 +226,21 @@ TEST_F(CdsApiImplTest, ConfigUpdateAddsSecondClusterEvenIfFirstThrows) { EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); EXPECT_CALL(initialized_, ready()); - Protobuf::RepeatedPtrField clusters; - envoy::config::cluster::v3::Cluster cluster_1; cluster_1.set_name("cluster_1"); - clusters.Add()->PackFrom(cluster_1); expectAddToThrow("cluster_1", "An exception"); envoy::config::cluster::v3::Cluster cluster_2; cluster_2.set_name("cluster_2"); - clusters.Add()->PackFrom(cluster_2); expectAdd("cluster_2"); envoy::config::cluster::v3::Cluster cluster_3; cluster_3.set_name("cluster_3"); - clusters.Add()->PackFrom(cluster_3); expectAddToThrow("cluster_3", "Another exception"); + const auto decoded_resources = TestUtility::decodeResources({cluster_1, cluster_2, cluster_3}); EXPECT_THROW_WITH_MESSAGE( - cds_callbacks_->onConfigUpdate(clusters, ""), EnvoyException, + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), EnvoyException, "Error adding/updating cluster(s) cluster_1: An exception, cluster_3: Another exception"); } @@ -289,7 +273,9 @@ version_info: '0' expectAdd("cluster2", "0"); EXPECT_CALL(initialized_, ready()); EXPECT_EQ("", cds_->versionInfo()); - cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", cds_->versionInfo()); const std::string response2_yaml = R"EOF( @@ -315,7 +301,9 @@ version_info: '1' expectAdd("cluster1", "1"); expectAdd("cluster3", "1"); EXPECT_CALL(cm_, removeCluster("cluster2")); - cds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + cds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", cds_->versionInfo()); } @@ -347,7 +335,9 @@ version_info: '0' EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); - EXPECT_THROW(cds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), + const auto decoded_resources = + TestUtility::decodeResources(response1); + EXPECT_THROW(cds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), EnvoyException); EXPECT_EQ("", cds_->versionInfo()); } diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index 1f1c25c4ad9a..fbebc9a7af0c 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -59,7 +59,7 @@ class EdsSpeedTest { EXPECT_EQ(initialize_phase, cluster_->initializePhase()); eds_callbacks_ = cm_.subscription_factory_.callbacks_; subscription_ = std::make_unique( - grpc_mux_, *eds_callbacks_, subscription_stats_, type_url_, dispatcher_, + grpc_mux_, *eds_callbacks_, resource_decoder_, subscription_stats_, type_url_, dispatcher_, std::chrono::milliseconds(), false); } @@ -142,6 +142,8 @@ class EdsSpeedTest { NiceMock dispatcher_; std::shared_ptr cluster_; Config::SubscriptionCallbacks* eds_callbacks_{}; + Config::OpaqueResourceDecoderImpl + resource_decoder_{validation_visitor_, "cluster_name"}; NiceMock random_; NiceMock runtime_; NiceMock local_info_; diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 865e05bd9f98..149fc7cc5640 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -108,9 +108,9 @@ class EdsTest : public testing::Test { void doOnConfigUpdateVerifyNoThrow( const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment) { - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(resources, "")); + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, "")); } bool initialized_{}; @@ -218,43 +218,15 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(EdsTest, ValidateFail) { - initialize(); - envoy::config::endpoint::v3::ClusterLoadAssignment resource; - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(resource); - EXPECT_THROW(eds_callbacks_->onConfigUpdate(resources, ""), ProtoValidationException); - EXPECT_FALSE(initialized_); -} - -// Validate that onConfigUpdate() can ignore unknown fields. -// this doesn't test the actual functionality, as the ValidationVisitor is mocked out, -// however it is functionally tested in dynamic_validation_integration_test. -TEST_F(EdsTest, ValidateIgnored) { - validation_visitor_.setSkipValidation(true); - initialize(); - envoy::config::endpoint::v3::ClusterLoadAssignment resource; - resource.set_cluster_name("fare"); - auto* unknown = resource.GetReflection()->MutableUnknownFields(&resource); - // add a field that doesn't exist in the proto definition: - unknown->AddFixed32(1000, 1); - - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(resource); - doOnConfigUpdateVerifyNoThrow(resource); - EXPECT_TRUE(initialized_); -} - // Validate that onConfigUpdate() with unexpected cluster names rejects config. TEST_F(EdsTest, OnConfigUpdateWrongName) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("wrong name"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); initialize(); try { - eds_callbacks_->onConfigUpdate(resources, ""); + eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } catch (const EnvoyException& e) { eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); @@ -266,9 +238,8 @@ TEST_F(EdsTest, OnConfigUpdateWrongName) { TEST_F(EdsTest, OnConfigUpdateEmpty) { initialize(); eds_callbacks_->onConfigUpdate({}, ""); - Protobuf::RepeatedPtrField resources; Protobuf::RepeatedPtrField removed_resources; - eds_callbacks_->onConfigUpdate(resources, removed_resources, ""); + eds_callbacks_->onConfigUpdate({}, removed_resources, ""); EXPECT_EQ(2UL, stats_.counter("cluster.name.update_empty").value()); EXPECT_TRUE(initialized_); } @@ -278,11 +249,10 @@ TEST_F(EdsTest, OnConfigUpdateWrongSize) { initialize(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - resources.Add()->PackFrom(cluster_load_assignment); + const auto decoded_resources = TestUtility::decodeResources( + {cluster_load_assignment, cluster_load_assignment}, "cluster_name"); try { - eds_callbacks_->onConfigUpdate(resources, ""); + eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); } catch (const EnvoyException& e) { eds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, &e); @@ -310,7 +280,10 @@ TEST_F(EdsTest, DeltaOnConfigUpdateSuccess) { auto* resource = resources.Add(); resource->mutable_resource()->PackFrom(cluster_load_assignment); resource->set_version("v1"); - VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(resources, {}, "v1")); + const auto decoded_resources = + TestUtility::decodeResources( + resources, "cluster_name"); + VERBOSE_EXPECT_NO_THROW(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, "v1")); EXPECT_TRUE(initialized_); EXPECT_EQ(1UL, stats_.counter("cluster.name.update_no_rebuild").value()); @@ -1475,9 +1448,10 @@ TEST_F(EdsTest, NoPriorityForLocalCluster) { add_hosts_to_priority(0, 2); add_hosts_to_priority(1, 1); initialize(); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Unexpected non-zero priority for local cluster 'name'."); // Try an update which only has endpoints with P=0. This should go through. @@ -1764,9 +1738,10 @@ TEST_F(EdsTest, MalformedIP) { endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(80); initialize(); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(resources, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({cluster_load_assignment}, "cluster_name"); + EXPECT_THROW_WITH_MESSAGE(eds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "malformed IP address: foo.bar.com. Consider setting resolver_name or " "setting cluster type to 'STRICT_DNS' or 'LOGICAL_DNS'"); } @@ -1873,34 +1848,6 @@ TEST_F(EdsAssignmentTimeoutTest, AssignmentLeaseExpired) { } } -// Validate that onConfigUpdate() verifies that no deprecated fields are used. -TEST_F(EdsTest, DeprecatedFieldsError) { - // This test is only valid in API-v3, and should be updated for API-v4, as - // the deprecated fields of API-v2 will be removed. - envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = - TestUtility::parseYaml(R"EOF( - cluster_name: fare - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 1.2.3.4 - port_value: 80 - policy: - overprovisioning_factor: 100 - hidden_envoy_deprecated_disable_overprovisioning: true - )EOF"); - - initialize(); - Protobuf::RepeatedPtrField resources; - resources.Add()->PackFrom(cluster_load_assignment); - EXPECT_THROW_WITH_REGEX(eds_callbacks_->onConfigUpdate(resources, ""), ProtoValidationException, - "Illegal use of hidden_envoy_deprecated_ V2 field " - "'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_" - "deprecated_disable_overprovisioning'"); -} - } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index 52306a1907e3..89ecb0d42af1 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -368,7 +368,7 @@ class VhdsIntegrationTest : public HttpIntegrationTest, resource->set_version("4"); resource->mutable_resource()->PackFrom( API_DOWNGRADE(TestUtility::parseYaml( - virtualHostYaml("vhost_1", "vhost_1, vhost.first")))); + virtualHostYaml("my_route/vhost_1", "vhost_1, vhost.first")))); resource->add_aliases("my_route/vhost.first"); ret.set_nonce("test-nonce-0"); @@ -679,4 +679,4 @@ TEST_P(VhdsIntegrationTest, VhdsOnDemandUpdateHttpConnectionCloses) { } } // namespace -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/mocks/config/mocks.cc b/test/mocks/config/mocks.cc index 374eec10d071..f578384ab7d2 100644 --- a/test/mocks/config/mocks.cc +++ b/test/mocks/config/mocks.cc @@ -8,15 +8,15 @@ namespace Envoy { namespace Config { MockSubscriptionFactory::MockSubscriptionFactory() { - ON_CALL(*this, subscriptionFromConfigSource(_, _, _, _)) - .WillByDefault(testing::Invoke([this](const envoy::config::core::v3::ConfigSource&, - absl::string_view, Stats::Scope&, - SubscriptionCallbacks& callbacks) -> SubscriptionPtr { - auto ret = std::make_unique>(); - subscription_ = ret.get(); - callbacks_ = &callbacks; - return ret; - })); + ON_CALL(*this, subscriptionFromConfigSource(_, _, _, _, _)) + .WillByDefault(testing::Invoke( + [this](const envoy::config::core::v3::ConfigSource&, absl::string_view, Stats::Scope&, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder&) -> SubscriptionPtr { + auto ret = std::make_unique>(); + subscription_ = ret.get(); + callbacks_ = &callbacks; + return ret; + })); ON_CALL(*this, messageValidationVisitor()) .WillByDefault(testing::ReturnRef(ProtobufMessage::getStrictValidationVisitor())); } @@ -32,13 +32,15 @@ MockGrpcMux::~MockGrpcMux() = default; MockGrpcStreamCallbacks::MockGrpcStreamCallbacks() = default; MockGrpcStreamCallbacks::~MockGrpcStreamCallbacks() = default; -MockSubscriptionCallbacks::MockSubscriptionCallbacks() { - ON_CALL(*this, resourceName(testing::_)) - .WillByDefault(testing::Invoke(TestUtility::xdsResourceName)); -} - +MockSubscriptionCallbacks::MockSubscriptionCallbacks() = default; MockSubscriptionCallbacks::~MockSubscriptionCallbacks() = default; +MockOpaqueResourceDecoder::MockOpaqueResourceDecoder() = default; +MockOpaqueResourceDecoder::~MockOpaqueResourceDecoder() = default; + +MockUntypedConfigUpdateCallbacks::MockUntypedConfigUpdateCallbacks() = default; +MockUntypedConfigUpdateCallbacks::~MockUntypedConfigUpdateCallbacks() = default; + MockTypedFactory::~MockTypedFactory() = default; } // namespace Config } // namespace Envoy diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 99a6a08bc8ef..93dda3bd2574 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -24,6 +24,30 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { MockSubscriptionCallbacks(); ~MockSubscriptionCallbacks() override; + MOCK_METHOD(void, onConfigUpdate, + (const std::vector& resources, const std::string& version_info)); + MOCK_METHOD(void, onConfigUpdate, + (const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info)); + MOCK_METHOD(void, onConfigUpdateFailed, + (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e)); +}; + +class MockOpaqueResourceDecoder : public OpaqueResourceDecoder { +public: + MockOpaqueResourceDecoder(); + ~MockOpaqueResourceDecoder() override; + + MOCK_METHOD(ProtobufTypes::MessagePtr, decodeResource, (const ProtobufWkt::Any& resource)); + MOCK_METHOD(std::string, resourceName, (const Protobuf::Message& resource)); +}; + +class MockUntypedConfigUpdateCallbacks : public UntypedConfigUpdateCallbacks { +public: + MockUntypedConfigUpdateCallbacks(); + ~MockUntypedConfigUpdateCallbacks() override; + MOCK_METHOD(void, onConfigUpdate, (const Protobuf::RepeatedPtrField& resources, const std::string& version_info)); @@ -34,7 +58,6 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { const std::string& system_version_info)); MOCK_METHOD(void, onConfigUpdateFailed, (Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e)); - MOCK_METHOD(std::string, resourceName, (const ProtobufWkt::Any& resource)); }; class MockSubscription : public Subscription { @@ -50,7 +73,8 @@ class MockSubscriptionFactory : public SubscriptionFactory { MOCK_METHOD(SubscriptionPtr, subscriptionFromConfigSource, (const envoy::config::core::v3::ConfigSource& config, absl::string_view type_url, - Stats::Scope& scope, SubscriptionCallbacks& callbacks)); + Stats::Scope& scope, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder)); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); MockSubscription* subscription_{}; @@ -84,7 +108,7 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD(GrpcMuxWatchPtr, addWatch, (const std::string& type_url, const std::set& resources, - SubscriptionCallbacks& callbacks)); + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder)); }; class MockGrpcStreamCallbacks diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 513cd9ab5e41..3b15585cc2d4 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -77,15 +77,14 @@ class LdsApiTest : public testing::Test { EXPECT_CALL(listener_manager_, beginListenerUpdate()); } - void addListener(Protobuf::RepeatedPtrField& listeners, - const std::string& listener_name) { + envoy::config::listener::v3::Listener buildListener(const std::string& listener_name) { envoy::config::listener::v3::Listener listener; listener.set_name(listener_name); auto socket_address = listener.mutable_address()->mutable_socket_address(); socket_address->set_address(listener_name); socket_address->set_port_value(1); listener.add_filter_chains(); - listeners.Add()->PackFrom(listener); + return listener; } std::shared_ptr> grpc_mux_; @@ -103,38 +102,11 @@ class LdsApiTest : public testing::Test { std::list> listeners_; }; -// Negative test for protoc-gen-validate constraints. -TEST_F(LdsApiTest, ValidateFail) { - InSequence s; - - setup(); - - Protobuf::RepeatedPtrField listeners; - envoy::config::listener::v3::Listener listener; - listeners.Add()->PackFrom(listener); - std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); - EXPECT_CALL(listener_manager_, beginListenerUpdate()); - // Validate that the error state is passed to the listener manager. - EXPECT_CALL(listener_manager_, endListenerUpdate(_)) - .WillOnce(Invoke([](ListenerManager::FailureStates&& state) { - EXPECT_EQ(1, state.size()); - EXPECT_EQ("Proto constraint validation failed (ListenerValidationError.Address: " - "[\"value is required\"]): ", - state[0]->details()); - EXPECT_TRUE(state[0]->has_failed_configuration()); - })); - EXPECT_CALL(init_watcher_, ready()); - - EXPECT_THROW(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException); -} - TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { InSequence s; setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Construct a minimal listener that would pass proto validation. @@ -153,9 +125,9 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - listeners.Add()->PackFrom(listener); + const auto decoded_resources = TestUtility::decodeResources({listener}); EXPECT_THROW_WITH_MESSAGE( - lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), EnvoyException, "Error adding/updating listener(s) invalid-listener: something is wrong\n"); } @@ -164,7 +136,6 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); @@ -174,7 +145,7 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { ; EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(listeners, ""); + lds_callbacks_->onConfigUpdate({}, ""); } TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { @@ -182,14 +153,13 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { setup(); - Protobuf::RepeatedPtrField listeners; std::vector> existing_listeners; // Add 4 listeners - 2 valid and 2 invalid. - addListener(listeners, "valid-listener-1"); - addListener(listeners, "invalid-listener-1"); - addListener(listeners, "valid-listener-2"); - addListener(listeners, "invalid-listener-2"); + const auto listener_0 = buildListener("valid-listener-1"); + const auto listener_1 = buildListener("invalid-listener-1"); + const auto listener_2 = buildListener("valid-listener-2"); + const auto listener_3 = buildListener("invalid-listener-2"); EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); @@ -203,7 +173,10 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + const auto decoded_resources = + TestUtility::decodeResources({listener_0, listener_1, listener_2, listener_3}); + EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating listener(s) invalid-listener-1: something is " "wrong\ninvalid-listener-2: something else is wrong\n"); } @@ -216,9 +189,7 @@ TEST_F(LdsApiTest, ValidateDuplicateListeners) { setup(); - Protobuf::RepeatedPtrField listeners; - addListener(listeners, "duplicate_listener"); - addListener(listeners, "duplicate_listener"); + const auto listener = buildListener("duplicate_listener"); std::vector> existing_listeners; EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); @@ -227,7 +198,9 @@ TEST_F(LdsApiTest, ValidateDuplicateListeners) { EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(listeners, ""), EnvoyException, + const auto decoded_resources = TestUtility::decodeResources({listener, listener}); + EXPECT_THROW_WITH_MESSAGE(lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, ""), + EnvoyException, "Error adding/updating listener(s) duplicate_listener: duplicate " "listener duplicate_listener found\n"); } @@ -264,7 +237,9 @@ TEST_F(LdsApiTest, Basic) { expectAdd("listener2", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -295,7 +270,9 @@ TEST_F(LdsApiTest, Basic) { expectAdd("listener1", "1", false); expectAdd("listener3", "1", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", lds_->versionInfo()); } @@ -326,7 +303,9 @@ TEST_F(LdsApiTest, UpdateVersionOnListenerRemove) { expectAdd("listener1", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -342,7 +321,9 @@ TEST_F(LdsApiTest, UpdateVersionOnListenerRemove) { makeListenersAndExpectCall({"listener1"}); EXPECT_CALL(listener_manager_, removeListener("listener1")).WillOnce(Return(true)); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); EXPECT_EQ("1", lds_->versionInfo()); } @@ -372,7 +353,9 @@ version_info: '1' expectAdd("listener0", {}, true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); std::string response2_basic = R"EOF( version_info: '1' @@ -409,39 +392,10 @@ version_info: '1' // Can't check version here because of bazel sandbox paths for the certs. expectAdd("listener-8080", {}, true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - EXPECT_NO_THROW(lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info())); -} - -// Validate behavior when the config is delivered but it fails PGV validation. -TEST_F(LdsApiTest, FailureInvalidConfig) { - InSequence s; - - setup(); - - // To test the case of valid JSON with invalid config, create a listener with no address. - const std::string response1_json = R"EOF( -{ - "version_info": "1", - "resources": [ - { - "@type": "type.googleapis.com/envoy.api.v2.Listener", - "name": "listener1", - "filter_chains": [ { "filters": null } ] - } - ] -} - )EOF"; - auto response1 = - TestUtility::parseYaml(response1_json); - - std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); - EXPECT_CALL(listener_manager_, beginListenerUpdate()); - EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - EXPECT_CALL(init_watcher_, ready()); - EXPECT_THROW(lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()), - EnvoyException); - EXPECT_EQ("", lds_->versionInfo()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + EXPECT_NO_THROW( + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info())); } // Validate behavior when the config fails delivery at the subscription level. @@ -487,7 +441,9 @@ TEST_F(LdsApiTest, ReplacingListenerWithSameAddress) { expectAdd("listener2", "0", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); EXPECT_CALL(init_watcher_, ready()); - lds_callbacks_->onConfigUpdate(response1.resources(), response1.version_info()); + const auto decoded_resources = + TestUtility::decodeResources(response1); + lds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()); EXPECT_EQ("0", lds_->versionInfo()); @@ -518,7 +474,9 @@ TEST_F(LdsApiTest, ReplacingListenerWithSameAddress) { expectAdd("listener1", "1", false); expectAdd("listener3", "1", true); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); - lds_callbacks_->onConfigUpdate(response2.resources(), response2.version_info()); + const auto decoded_resources_2 = + TestUtility::decodeResources(response2); + lds_callbacks_->onConfigUpdate(decoded_resources_2.refvec_, response2.version_info()); } } // namespace diff --git a/test/test_common/BUILD b/test/test_common/BUILD index dc4e5634d4e9..d2a790f05c3d 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -127,6 +127,8 @@ envoy_cc_test_library( "//source/common/common:empty_string", "//source/common/common:thread_lib", "//source/common/common:utility_lib", + "//source/common/config:decoded_resource_lib", + "//source/common/config:opaque_resource_decoder_lib", "//source/common/config:version_converter_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e4cf7da7a6ee..e85776058e24 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -19,6 +19,8 @@ #include "common/common/c_smart_ptr.h" #include "common/common/empty_string.h" #include "common/common/thread.h" +#include "common/config/decoded_resource_impl.h" +#include "common/config/opaque_resource_decoder_impl.h" #include "common/config/version_converter.h" #include "common/http/header_map_impl.h" #include "common/protobuf/message_validator_impl.h" @@ -299,6 +301,20 @@ class TestUtility { return differencer.Compare(lhs, rhs); } + /** + * Compare two decoded resources for equality. + * + * @param lhs decoded resource on LHS. + * @param rhs decoded resource on RHS. + * @return bool indicating whether the decoded resources are equal. + */ + static bool decodedResourceEq(const Config::DecodedResource& lhs, + const Config::DecodedResource& rhs) { + return lhs.name() == rhs.name() && lhs.aliases() == rhs.aliases() && + lhs.version() == rhs.version() && lhs.hasResource() == rhs.hasResource() && + (!lhs.hasResource() || protoEqual(lhs.resource(), rhs.resource())); + } + /** * Compare two JSON strings serialized from ProtobufWkt::Struct for equality. When two identical * ProtobufWkt::Struct are serialized into JSON strings, the results have the same set of @@ -600,6 +616,58 @@ class TestUtility { return pb_binary_str; } + template + static Config::DecodedResourcesWrapper + decodeResources(std::initializer_list resources, + const std::string& name_field = "name") { + Config::DecodedResourcesWrapper decoded_resources; + for (const auto& resource : resources) { + auto owned_resource = std::make_unique(resource); + decoded_resources.owned_resources_.emplace_back(new Config::DecodedResourceImpl( + std::move(owned_resource), MessageUtil::getStringField(resource, name_field), {}, "")); + decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back()); + } + return decoded_resources; + } + + template + static Config::DecodedResourcesWrapper + decodeResources(const Protobuf::RepeatedPtrField& resources, + const std::string& version, const std::string& name_field = "name") { + TestOpaqueResourceDecoderImpl resource_decoder(name_field); + return Config::DecodedResourcesWrapper(resource_decoder, resources, version); + } + + template + static Config::DecodedResourcesWrapper + decodeResources(const envoy::service::discovery::v3::DiscoveryResponse& resources, + const std::string& name_field = "name") { + return decodeResources(resources.resources(), resources.version_info(), + name_field); + } + + template + static Config::DecodedResourcesWrapper decodeResources( + const Protobuf::RepeatedPtrField& resources, + const std::string& name_field = "name") { + Config::DecodedResourcesWrapper decoded_resources; + TestOpaqueResourceDecoderImpl resource_decoder(name_field); + for (const auto& resource : resources) { + decoded_resources.owned_resources_.emplace_back( + new Config::DecodedResourceImpl(resource_decoder, resource)); + decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back()); + } + return decoded_resources; + } + + template + class TestOpaqueResourceDecoderImpl : public Config::OpaqueResourceDecoderImpl { + public: + TestOpaqueResourceDecoderImpl(absl::string_view name_field) + : Config::OpaqueResourceDecoderImpl(ProtobufMessage::getStrictValidationVisitor(), + name_field) {} + }; + /** * Returns the string representation of a envoy::config::core::v3::ApiVersion. * @@ -1015,6 +1083,35 @@ MATCHER_P(RepeatedProtoEq, expected, "") { return equal; } +MATCHER_P(DecodedResourcesEq, expected, "") { + const bool equal = std::equal(arg.begin(), arg.end(), expected.begin(), expected.end(), + TestUtility::decodedResourceEq); + if (!equal) { + const auto format_resources = + [](const std::vector& resources) -> std::string { + std::vector resource_strs; + std::transform( + resources.begin(), resources.end(), std::back_inserter(resource_strs), + [](const Config::DecodedResourceRef& resource) -> std::string { + return fmt::format( + "", resource.get().name(), + absl::StrJoin(resource.get().aliases(), ","), resource.get().version(), + resource.get().hasResource() ? resource.get().resource().DebugString() : "(none)"); + }); + return absl::StrJoin(resource_strs, ", "); + }; + *result_listener << "\n" + << TestUtility::addLeftAndRightPadding("Expected resources:") << "\n" + << format_resources(expected) << "\n" + << TestUtility::addLeftAndRightPadding("are not equal to actual resources:") + << "\n" + << format_resources(arg) << "\n" + << TestUtility::addLeftAndRightPadding("") // line full of padding + << "\n"; + } + return equal; +} + MATCHER_P(Percent, rhs, "") { envoy::type::v3::FractionalPercent expected; expected.set_numerator(rhs); From 83841da7110b4ee87f78eff47c0f3555c55d1348 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 11:25:15 -0400 Subject: [PATCH 502/909] build: exempting a failing test on windows (#11839) Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a part of #11841 Signed-off-by: Alyssa Wilk --- test/integration/BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/BUILD b/test/integration/BUILD index 5b4c57e572d5..fe34c7575262 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1308,6 +1308,8 @@ envoy_cc_test( envoy_cc_test( name = "cx_limit_integration_test", srcs = ["cx_limit_integration_test.cc"], + # TODO(11841) See if this can be reenabled once the test is deflaked. + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//include/envoy/network:filter_interface", From a103da9ed80596ba72048d3dab985ce79cd0fe60 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 12:32:10 -0400 Subject: [PATCH 503/909] test: fixing an integration test flake (#11845) Signed-off-by: Alyssa Wilk --- test/integration/cx_limit_integration_test.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc index 6ffb4952ae63..5d70380995aa 100644 --- a/test/integration/cx_limit_integration_test.cc +++ b/test/integration/cx_limit_integration_test.cc @@ -66,7 +66,6 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParamclose(); - ASSERT_TRUE(raw_conns.front()->close()); ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); From 6f5bea6ee40b315a16e6a008301ef3e2815e059e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 12:33:50 -0400 Subject: [PATCH 504/909] test: removing more infinite timeouts (#11842) Signed-off-by: Alyssa Wilk --- .../tcp_grpc_access_log_integration_test.cc | 4 +- .../redis/redis_cluster_integration_test.cc | 6 +-- .../local_ratelimit_integration_test.cc | 2 +- .../mysql_proxy/mysql_integration_test.cc | 4 +- .../postgres_integration_test.cc | 2 +- .../filters/network/rbac/integration_test.cc | 4 +- .../redis_proxy_integration_test.cc | 22 +++++----- .../network/thrift_proxy/integration_test.cc | 22 +++++----- .../translation_integration_test.cc | 2 +- .../cluster_filter_integration_test.cc | 6 +-- .../filter_manager_integration_test.cc | 8 ++-- test/integration/h1_fuzz.cc | 2 +- test/integration/h2_fuzz.cc | 4 +- .../header_casing_integration_test.cc | 2 +- test/integration/http2_integration_test.cc | 10 ++--- test/integration/integration.cc | 30 +++++++++---- test/integration/integration.h | 8 +++- test/integration/integration_test.cc | 6 +-- .../tcp_conn_pool_integration_test.cc | 6 +-- .../integration/tcp_proxy_integration_test.cc | 42 +++++++++---------- .../tcp_tunneling_integration_test.cc | 20 ++++----- 21 files changed, 114 insertions(+), 98 deletions(-) diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index f7a7322616a8..6dab6bf6e024 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -136,11 +136,11 @@ TEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) { ASSERT_TRUE(fake_upstream_connection->write("hello")); tcp_client->waitForData("hello"); - tcp_client->write("bar", false); + ASSERT_TRUE(tcp_client->write("bar", false)); ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(3)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 02a1a1a4ce40..044d767b1ca1 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -201,7 +201,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParamclearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); if (fake_upstream_connection.get() == nullptr) { expect_auth_command = (!auth_password.empty()); @@ -471,7 +471,7 @@ TEST_P(RedisClusterIntegrationTest, ClusterSlotRequestAfterRedirection) { std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2, fake_upstream_connection_3; @@ -593,7 +593,7 @@ TEST_P(RedisClusterWithRefreshIntegrationTest, ClusterSlotRequestAfterFailure) { std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc index fcfcf968c9d2..63b684f49d86 100644 --- a/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_integration_test.cc @@ -35,7 +35,7 @@ name: ratelimit IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); diff --git a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc index e6b79b060eb2..ec528867d375 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc +++ b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc @@ -83,7 +83,7 @@ TEST_P(MySQLIntegrationTest, MySQLLoginTest) { // Client username/password and capabilities std::string login = encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, user, CHALLENGE_SEQ_NUM); - tcp_client->write(login); + ASSERT_TRUE(tcp_client->write(login)); ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data)); EXPECT_EQ(login, rcvd_data); @@ -130,7 +130,7 @@ TEST_P(MySQLIntegrationTest, MySQLUnitTestMultiClientsLoop) { // Client username/password and capabilities std::string login = encodeClientLogin(MYSQL_CLIENT_CAPAB_41VS320, user, CHALLENGE_SEQ_NUM); - tcp_client->write(login); + ASSERT_TRUE(tcp_client->write(login)); ASSERT_TRUE(fake_upstream_connection->waitForData(login.length(), &rcvd_data)); EXPECT_EQ(login, rcvd_data); diff --git a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc index 1d549419e7c0..32ad45f0ba24 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc @@ -52,7 +52,7 @@ TEST_P(PostgresIntegrationTest, Login) { data.writeBEInt(12); // Add 8 bytes of some data. data.add(buf, 8); - tcp_client->write(data.toString()); + ASSERT_TRUE(tcp_client->write(data.toString())); ASSERT_TRUE(fake_upstream_connection->waitForData(data.toString().length(), &rcvd)); data.drain(data.length()); diff --git a/test/extensions/filters/network/rbac/integration_test.cc b/test/extensions/filters/network/rbac/integration_test.cc index d4cc8da97456..22b7407af629 100644 --- a/test/extensions/filters/network/rbac/integration_test.cc +++ b/test/extensions/filters/network/rbac/integration_test.cc @@ -89,7 +89,7 @@ name: rbac any: true )EOF"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(tcp_client->connected()); tcp_client->close(); @@ -122,7 +122,7 @@ name: rbac - any: true )EOF"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); tcp_client->waitForDisconnect(); EXPECT_EQ(0U, test_server_->counter("tcp.rbac.allowed")->value()); diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index f44f10b9d1de..dd0e64375b8c 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -480,7 +480,7 @@ void RedisProxyIntegrationTest::roundtripToUpstreamStep( IntegrationTcpClientPtr& redis_client, FakeRawConnectionPtr& fake_upstream_connection, const std::string& auth_username, const std::string& auth_password) { redis_client->clearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(upstream, request, response, fake_upstream_connection, auth_username, auth_password); @@ -539,7 +539,7 @@ void RedisProxyIntegrationTest::proxyResponseStep(const std::string& request, const std::string& proxy_response, IntegrationTcpClientPtr& redis_client) { redis_client->clearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); redis_client->waitForData(proxy_response); // After sending the request to the proxy, the fake redis client should receive proxy_response. EXPECT_EQ(proxy_response, redis_client->data()); @@ -560,7 +560,7 @@ void RedisProxyWithRedirectionIntegrationTest::simpleRedirection( bool asking = (redirection_response.find("-ASK") != std::string::npos); std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; @@ -623,7 +623,7 @@ TEST_P(RedisProxyWithCommandStatsIntegrationTest, MGETRequestAndResponse) { // Make MGET request from downstream IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); redis_client->clearData(); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); // Make GET request to upstream (MGET is turned into GETs for upstream) FakeUpstreamPtr& upstream = fake_upstreams_[0]; @@ -788,7 +788,7 @@ TEST_P(RedisProxyWithRedirectionIntegrationTest, ConnectionFailureBeforeAskingRe std::string proxy_to_server; IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); FakeRawConnectionPtr fake_upstream_connection_1, fake_upstream_connection_2; @@ -851,8 +851,8 @@ TEST_P(RedisProxyWithBatchingIntegrationTest, SimpleBatching) { std::string proxy_to_server; IntegrationTcpClientPtr redis_client_1 = makeTcpConnection(lookupPort("redis_proxy")); IntegrationTcpClientPtr redis_client_2 = makeTcpConnection(lookupPort("redis_proxy")); - redis_client_1->write(request); - redis_client_2->write(request); + ASSERT_TRUE(redis_client_1->write(request)); + ASSERT_TRUE(redis_client_2->write(request)); FakeRawConnectionPtr fake_upstream_connection; EXPECT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -961,7 +961,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredCatchAllRequest) { const std::string& response = "$3\r\nbar\r\n"; // roundtrip to cluster_0 (catch_all route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]); @@ -991,7 +991,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, MirroredWriteOnlyRequest) { // roundtrip to cluster_0 (write_only route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(set_request); + ASSERT_TRUE(redis_client->write(set_request)); expectUpstreamRequestResponse(fake_upstreams_[0], set_request, set_response, fake_upstream_connection[0]); @@ -1018,7 +1018,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, ExcludeReadCommands) { // roundtrip to cluster_0 (write_only route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(get_request); + ASSERT_TRUE(redis_client->write(get_request)); expectUpstreamRequestResponse(fake_upstreams_[0], get_request, get_response, cluster_0_connection); @@ -1044,7 +1044,7 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, EnabledViaRuntimeFraction) { const std::string& response = "$3\r\nbar\r\n"; // roundtrip to cluster_0 (catch_all route) IntegrationTcpClientPtr redis_client = makeTcpConnection(lookupPort("redis_proxy")); - redis_client->write(request); + ASSERT_TRUE(redis_client->write(request)); expectUpstreamRequestResponse(fake_upstreams_[0], request, response, fake_upstream_connection[0]); diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index 97eecd997219..29dc1790df4a 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -175,7 +175,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Success) { initializeCall(DriverMode::Success); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; FakeUpstream* expected_upstream = getExpectedUpstream(false); @@ -202,7 +202,7 @@ TEST_P(ThriftConnManagerIntegrationTest, IDLException) { initializeCall(DriverMode::IDLException); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -229,7 +229,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Exception) { initializeCall(DriverMode::Exception); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -262,7 +262,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyClose) { expected_upstream->set_allow_unexpected_disconnects(true); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); tcp_client->close(); FakeRawConnectionPtr fake_upstream_connection; @@ -284,7 +284,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyCloseWithUpstream) { request_bytes_.toString().substr(0, request_bytes_.length() - 5); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -307,7 +307,7 @@ TEST_P(ThriftConnManagerIntegrationTest, EarlyUpstreamClose) { request_bytes_.toString().substr(0, request_bytes_.length() - 5); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(false); FakeRawConnectionPtr fake_upstream_connection; @@ -334,7 +334,7 @@ TEST_P(ThriftConnManagerIntegrationTest, Oneway) { initializeOneway(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeUpstream* expected_upstream = getExpectedUpstream(true); FakeRawConnectionPtr fake_upstream_connection; @@ -355,7 +355,7 @@ TEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClose) { initializeOneway(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); tcp_client->close(); FakeUpstream* expected_upstream = getExpectedUpstream(true); @@ -380,7 +380,7 @@ TEST_P(ThriftConnManagerIntegrationTest, OnewayEarlyClosePartialRequest) { expected_upstream->set_allow_unexpected_disconnects(true); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(partial_request); + ASSERT_TRUE(tcp_client->write(partial_request)); tcp_client->close(); FakeRawConnectionPtr fake_upstream_connection; @@ -415,13 +415,13 @@ TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { // Upgrade request/response happens without an upstream. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(upgrade_request_bytes.toString()); + ASSERT_TRUE(tcp_client->write(upgrade_request_bytes.toString())); tcp_client->waitForData(upgrade_response_bytes.toString()); EXPECT_TRUE( TestUtility::buffersEqual(Buffer::OwnedImpl(tcp_client->data()), upgrade_response_bytes)); // First real request triggers upstream connection. - tcp_client->write(request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; FakeUpstream* expected_upstream = getExpectedUpstream(false); ASSERT_TRUE(expected_upstream->waitForRawConnection(fake_upstream_connection)); diff --git a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc index 3c88a95348b9..7b07ad7ab623 100644 --- a/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/translation_integration_test.cc @@ -116,7 +116,7 @@ TEST_P(ThriftTranslationIntegrationTest, Translates) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(downstream_request_bytes_.toString()); + ASSERT_TRUE(tcp_client->write(downstream_request_bytes_.toString())); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); diff --git a/test/integration/cluster_filter_integration_test.cc b/test/integration/cluster_filter_integration_test.cc index 61eb7bbc7520..4162bc9273cf 100644 --- a/test/integration/cluster_filter_integration_test.cc +++ b/test/integration/cluster_filter_integration_test.cc @@ -109,19 +109,19 @@ TEST_P(ClusterFilterIntegrationTest, TestClusterFilter) { ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string observed_data; - tcp_client->write("test"); + ASSERT_TRUE(tcp_client->write("test")); ASSERT_TRUE(fake_upstream_connection->waitForData(11, &observed_data)); EXPECT_EQ("please test", observed_data); observed_data.clear(); - tcp_client->write(" everything"); + ASSERT_TRUE(tcp_client->write(" everything")); ASSERT_TRUE(fake_upstream_connection->waitForData(22, &observed_data)); EXPECT_EQ("please test everything", observed_data); ASSERT_TRUE(fake_upstream_connection->write("yes")); tcp_client->waitForData("surely yes"); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect(true)); diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index 0acb10a78b44..9bed3be54fa0 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -450,7 +450,7 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, UsageOfInjectDataMethodsShouldBe initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); tcp_client->waitForData("hello"); tcp_client->close(); @@ -468,7 +468,7 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); std::string access_log = absl::StrCat("NR ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); @@ -499,7 +499,7 @@ TEST_P(InjectDataWithTcpProxyFilterIntegrationTest, UsageOfInjectDataMethodsShou FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); std::string observed_data; ASSERT_TRUE(fake_upstream_connection->waitForData(5, &observed_data)); @@ -508,7 +508,7 @@ TEST_P(InjectDataWithTcpProxyFilterIntegrationTest, UsageOfInjectDataMethodsShou ASSERT_TRUE(fake_upstream_connection->write("hi")); tcp_client->waitForData("hi"); - tcp_client->write(" world!", true); + ASSERT_TRUE(tcp_client->write(" world!", true)); observed_data.clear(); ASSERT_TRUE(fake_upstream_connection->waitForData(12, &observed_data)); EXPECT_EQ("hello world!", observed_data); diff --git a/test/integration/h1_fuzz.cc b/test/integration/h1_fuzz.cc index f1abeb9481a1..3fe7886bd970 100644 --- a/test/integration/h1_fuzz.cc +++ b/test/integration/h1_fuzz.cc @@ -30,7 +30,7 @@ void H1FuzzIntegrationTest::replay(const test::integration::CaptureFuzzTestCase& } switch (event.event_selector_case()) { case test::integration::Event::kDownstreamSendBytes: - tcp_client->write(event.downstream_send_bytes(), false, false); + ASSERT_TRUE(tcp_client->write(event.downstream_send_bytes(), false, false)); break; case test::integration::Event::kDownstreamRecvBytes: // TODO(htuch): Should we wait for some data? diff --git a/test/integration/h2_fuzz.cc b/test/integration/h2_fuzz.cc index 40b6b2410788..c0eeae08152e 100644 --- a/test/integration/h2_fuzz.cc +++ b/test/integration/h2_fuzz.cc @@ -173,11 +173,11 @@ void H2FuzzIntegrationTest::replay(const test::integration::H2CaptureFuzzTestCas switch (event.event_selector_case()) { case test::integration::Event::kDownstreamSendEvent: { auto downstream_write_func = [&](const Http2Frame& h2_frame) -> void { - tcp_client->write(std::string(h2_frame), false, false); + ASSERT_TRUE(tcp_client->write(std::string(h2_frame), false, false)); }; if (!preamble_sent) { // Start H2 session - send hello string - tcp_client->write(Http2Frame::Preamble, false, false); + ASSERT_TRUE(tcp_client->write(Http2Frame::Preamble, false, false)); preamble_sent = true; } for (auto& frame : event.downstream_send_event().h2_frames()) { diff --git a/test/integration/header_casing_integration_test.cc b/test/integration/header_casing_integration_test.cc index a40e789de191..7700e48ab365 100644 --- a/test/integration/header_casing_integration_test.cc +++ b/test/integration/header_casing_integration_test.cc @@ -50,7 +50,7 @@ TEST_P(HeaderCasingIntegrationTest, VerifyCasedHeaders) { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); auto request = "GET / HTTP/1.1\r\nhost: host\r\nmy-header: foo\r\n\r\n"; - tcp_client->write(request, false); + ASSERT_TRUE(tcp_client->write(request, false)); Envoy::FakeRawConnectionPtr upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(upstream_connection)); diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index f571b162a676..94154029892b 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1540,22 +1540,22 @@ Http2Frame Http2FloodMitigationTest::readFrame() { void Http2FloodMitigationTest::sendFame(const Http2Frame& frame) { ASSERT_TRUE(tcp_client_->connected()); - tcp_client_->write(std::string(frame), false, false); + ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); } void Http2FloodMitigationTest::startHttp2Session() { - tcp_client_->write(Http2Frame::Preamble, false, false); + ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); // Send empty initial SETTINGS frame. auto settings = Http2Frame::makeEmptySettingsFrame(); - tcp_client_->write(std::string(settings), false, false); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); // Read initial SETTINGS frame from the server. readFrame(); // Send an SETTINGS ACK. settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); - tcp_client_->write(std::string(settings), false, false); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); // read pending SETTINGS and WINDOW_UPDATE frames readFrame(); @@ -1578,7 +1578,7 @@ void Http2FloodMitigationTest::floodServer(const Http2Frame& frame, const std::s // Add early stop if we have sent more than 100M of frames, as it this // point it is obvious something is wrong. while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { - tcp_client_->write({buf.begin(), buf.end()}, false, false); + ASSERT_TRUE(tcp_client_->write({buf.begin(), buf.end()}, false, false)); total_bytes_sent += buf.size(); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index e12e12a4f55b..608ea2bc742f 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -149,10 +149,11 @@ void IntegrationStreamDecoder::onResetStream(Http::StreamResetReason reason, abs } IntegrationTcpClient::IntegrationTcpClient(Event::Dispatcher& dispatcher, + Event::TestTimeSystem& time_system, MockBufferFactory& factory, uint32_t port, Network::Address::IpVersion version, bool enable_half_close) - : payload_reader_(new WaitForPayloadReader(dispatcher)), + : time_system_(time_system), payload_reader_(new WaitForPayloadReader(dispatcher)), callbacks_(new ConnectionCallbacks(*this)) { EXPECT_CALL(factory, create_(_, _, _)) .WillOnce(Invoke([&](std::function below_low, std::function above_high, @@ -219,7 +220,9 @@ void IntegrationTcpClient::waitForHalfClose() { void IntegrationTcpClient::readDisable(bool disabled) { connection_->readDisable(disabled); } -void IntegrationTcpClient::write(const std::string& data, bool end_stream, bool verify) { +AssertionResult IntegrationTcpClient::write(const std::string& data, bool end_stream, bool verify, + std::chrono::milliseconds timeout) { + auto end_time = time_system_.monotonicTime() + timeout; Buffer::OwnedImpl buffer(data); if (verify) { EXPECT_CALL(*client_write_buffer_, move(_)); @@ -233,12 +236,21 @@ void IntegrationTcpClient::write(const std::string& data, bool end_stream, bool connection_->write(buffer, end_stream); do { connection_->dispatcher().run(Event::Dispatcher::RunType::NonBlock); - } while (client_write_buffer_->bytes_written() != bytes_expected && !disconnected_); - if (verify) { - // If we disconnect part way through the write, then we should fail, since write() is always - // expected to succeed. - EXPECT_TRUE(!disconnected_ || client_write_buffer_->bytes_written() == bytes_expected); + if (client_write_buffer_->bytes_written() == bytes_expected || disconnected_) { + break; + } + } while (time_system_.monotonicTime() < end_time); + + if (time_system_.monotonicTime() >= end_time) { + return AssertionFailure() << "Timed out completing write"; + } else if (verify && (disconnected_ || client_write_buffer_->bytes_written() != bytes_expected)) { + return AssertionFailure() + << "Failed to complete write or unexpected disconnect. disconnected_: " << disconnected_ + << " bytes_written: " << client_write_buffer_->bytes_written() + << " bytes_expected: " << bytes_expected; } + + return AssertionSuccess(); } void IntegrationTcpClient::ConnectionCallbacks::onEvent(Network::ConnectionEvent event) { @@ -390,8 +402,8 @@ void BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) } IntegrationTcpClientPtr BaseIntegrationTest::makeTcpConnection(uint32_t port) { - return std::make_unique(*dispatcher_, *mock_buffer_factory_, port, version_, - enable_half_close_); + return std::make_unique(*dispatcher_, time_system_, *mock_buffer_factory_, + port, version_, enable_half_close_); } void BaseIntegrationTest::registerPort(const std::string& key, uint32_t port) { diff --git a/test/integration/integration.h b/test/integration/integration.h index a0a2699e834e..3ea127299066 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -96,7 +96,8 @@ using IntegrationStreamDecoderPtr = std::unique_ptr; */ class IntegrationTcpClient { public: - IntegrationTcpClient(Event::Dispatcher& dispatcher, MockBufferFactory& factory, uint32_t port, + IntegrationTcpClient(Event::Dispatcher& dispatcher, Event::TestTimeSystem& time_system, + MockBufferFactory& factory, uint32_t port, Network::Address::IpVersion version, bool enable_half_close = false); void close(); @@ -106,7 +107,9 @@ class IntegrationTcpClient { void waitForDisconnect(bool ignore_spurious_events = false); void waitForHalfClose(); void readDisable(bool disabled); - void write(const std::string& data, bool end_stream = false, bool verify = true); + ABSL_MUST_USE_RESULT AssertionResult + write(const std::string& data, bool end_stream = false, bool verify = true, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); const std::string& data() { return payload_reader_->data(); } bool connected() const { return !disconnected_; } // clear up to the `count` number of bytes of received data @@ -124,6 +127,7 @@ class IntegrationTcpClient { IntegrationTcpClient& parent_; }; + Event::TestTimeSystem& time_system_; std::shared_ptr payload_reader_; std::shared_ptr callbacks_; Network::ClientConnectionPtr connection_; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 0e052dd11d5f..3d90d5be067c 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -891,7 +891,7 @@ TEST_P(IntegrationTest, TestHeadWithExplicitTE) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("HEAD / HTTP/1.1\r\nHost: host\r\n\r\n"); + ASSERT_TRUE(tcp_client->write("HEAD / HTTP/1.1\r\nHost: host\r\n\r\n")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string data; @@ -1349,7 +1349,7 @@ TEST_P(IntegrationTest, ConnectWithNoBody) { // Send the payload early so we can regression test that body data does not // get proxied until after the response headers are sent. IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false); + ASSERT_TRUE(tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -1385,7 +1385,7 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("http")); - tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false); + ASSERT_TRUE(tcp_client->write("CONNECT host.com:80 HTTP/1.1\r\n\r\npayload", false)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); diff --git a/test/integration/tcp_conn_pool_integration_test.cc b/test/integration/tcp_conn_pool_integration_test.cc index fb5757836c91..592747627de2 100644 --- a/test/integration/tcp_conn_pool_integration_test.cc +++ b/test/integration/tcp_conn_pool_integration_test.cc @@ -138,7 +138,7 @@ TEST_P(TcpConnPoolIntegrationTest, SingleRequest) { std::string response("response"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - tcp_client->write(request); + ASSERT_TRUE(tcp_client->write(request)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -158,7 +158,7 @@ TEST_P(TcpConnPoolIntegrationTest, MultipleRequests) { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); // send request 1 - tcp_client->write(request1); + ASSERT_TRUE(tcp_client->write(request1)); FakeRawConnectionPtr fake_upstream_connection1; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection1)); std::string data; @@ -166,7 +166,7 @@ TEST_P(TcpConnPoolIntegrationTest, MultipleRequests) { EXPECT_EQ(request1, data); // send request 2 - tcp_client->write(request2); + ASSERT_TRUE(tcp_client->write(request2)); FakeRawConnectionPtr fake_upstream_connection2; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection2)); ASSERT_TRUE(fake_upstream_connection2->waitForData(request2.size(), &data)); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 7bc39b386557..50b90974a606 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -56,12 +56,12 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { tcp_client->waitForData("llo"); tcp_client->waitForData(3); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } @@ -71,7 +71,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamDisconnect) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); @@ -89,13 +89,13 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamDisconnect) { TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); @@ -109,7 +109,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyLargeWrite) { std::string data(1024 * 16, 'a'); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(data.size())); @@ -147,7 +147,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamFlush) { FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); tcp_client->readDisable(true); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); // This ensures that readDisable(true) has been run on it's thread // before tcp_client starts writing. @@ -192,7 +192,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlush) { // before tcp_client starts writing. tcp_client->waitForHalfClose(); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); test_server_->waitForGaugeEq("tcp.tcp_stats.upstream_flush_active", 1); ASSERT_TRUE(fake_upstream_connection->readDisable(false)); @@ -223,7 +223,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlushEnvoyExit) { // before tcp_client starts writing. tcp_client->waitForHalfClose(); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); test_server_->waitForGaugeEq("tcp.tcp_stats.upstream_flush_active", 1); test_server_.reset(); @@ -274,7 +274,7 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { ASSERT_TRUE(fake_upstream_connection->write("", true)); tcp_client->waitForHalfClose(); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); @@ -317,13 +317,13 @@ TEST_P(TcpProxyIntegrationTest, ShutdownWithOpenConnections) { }); initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); test_server_.reset(); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); @@ -383,7 +383,7 @@ TEST_P(TcpProxyIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); std::string data(1024 * 16, 'a'); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); ASSERT_TRUE(fake_upstream_connection->write(data)); tcp_client->waitForDisconnect(true); @@ -423,13 +423,13 @@ TEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); @@ -443,7 +443,7 @@ TEST_P(TcpProxyIntegrationTest, TestNoCloseOnHealthFailure) { ASSERT_TRUE(fake_upstream_health_connection_reconnect->waitForData( FakeRawConnection::waitForInexactMatch("Ping"))); - tcp_client->write("still"); + ASSERT_TRUE(tcp_client->write("still")); ASSERT_TRUE(fake_upstream_connection->waitForData(15)); ASSERT_TRUE(fake_upstream_connection->write("here")); tcp_client->waitForData("here", false); @@ -492,13 +492,13 @@ TEST_P(TcpProxyIntegrationTest, TestCloseOnHealthFailure) { initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_health_connection->waitForData(8)); @@ -585,13 +585,13 @@ void TcpProxyMetadataMatchIntegrationTest::initialize() { // Verifies successful connection. void TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute() { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello")); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); ASSERT_TRUE(fake_upstream_connection->write("world")); tcp_client->waitForData("world"); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(fake_upstream_connection->waitForData(10)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->write("", true)); @@ -604,7 +604,7 @@ void TcpProxyMetadataMatchIntegrationTest::expectEndpointToMatchRoute() { // Verifies connection failure. void TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute() { IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - tcp_client->write("hello"); + ASSERT_TRUE(tcp_client->write("hello", false, false)); // TODO(yskopets): 'tcp_client->waitForDisconnect(true);' gets stuck indefinitely on Linux builds, // e.g. on 'envoy-linux (bazel compile_time_options)' and 'envoy-linux (bazel release)' diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index b22fd48ca9e2..d621bf9aa9f6 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -297,7 +297,7 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { upstream_request_->encodeHeaders(default_response_headers_, false); // Send some data from downstream to upstream, and make sure it goes through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); // Send data from upstream to downstream. @@ -306,7 +306,7 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { // Now send more data and close the TCP client. This should be treated as half close, so the data // should go through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); tcp_client->close(); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); @@ -347,7 +347,7 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { upstream_request_->encodeHeaders(default_response_headers_, false); // Send data in both directions. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); // Send data from upstream to downstream with an end stream and make sure the data is received @@ -358,10 +358,10 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { // Attempt to send data upstream. // should go through. - tcp_client->write("hello", false); + ASSERT_TRUE(tcp_client->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - tcp_client->write("hello", true); + ASSERT_TRUE(tcp_client->write("hello", true)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -409,7 +409,7 @@ TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { upstream_request_->encodeHeaders(default_response_headers_, false); std::string data(1024 * 16, 'a'); - tcp_client->write(data); + ASSERT_TRUE(tcp_client->write(data)); upstream_request_->encodeData(data, false); tcp_client->waitForDisconnect(true); @@ -431,7 +431,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { upstream_request_->encodeHeaders(default_response_headers_, false); tcp_client->readDisable(true); - tcp_client->write("", true); + ASSERT_TRUE(tcp_client->write("", true)); // This ensures that readDisable(true) has been run on its thread // before tcp_client starts writing. @@ -465,7 +465,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { // before tcp_client starts writing. tcp_client->waitForHalfClose(); - tcp_client->write(data, true); + ASSERT_TRUE(tcp_client->write(data, true)); // Note that upstream_flush_active will *not* be incremented for the HTTP // tunneling case. The data is already written to the stream, so no drainer @@ -488,7 +488,7 @@ TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { upstream_request_->encodeHeaders(default_response_headers_, false); // Send data in both directions. - tcp_client1->write("hello1", false); + ASSERT_TRUE(tcp_client1->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream with an end stream and make sure the data is received @@ -507,7 +507,7 @@ TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - tcp_client2->write("hello2", false); + ASSERT_TRUE(tcp_client2->write("hello2", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); // Send data from upstream to downstream with an end stream and make sure the data is received From 556974437aef10c1eb00569967f4720251545634 Mon Sep 17 00:00:00 2001 From: antonio Date: Wed, 1 Jul 2020 14:02:27 -0400 Subject: [PATCH 505/909] Switch to -Wc++2a-extensions which is accepted by clang9 and provides equivalent warnings on clang10 and clang11. (#11843) Signed-off-by: Antonio Vicente --- bazel/envoy_internal.bzl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index b006898a8b64..6c9d125e199e 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -48,7 +48,7 @@ def envoy_copts(repository, test = False): repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], }) + select({ - repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++20-extensions"], + repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], }) + select({ From 99471fd8f4ef7406f50cf41cf1cfa22bbdfeacc7 Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 1 Jul 2020 14:45:22 -0400 Subject: [PATCH 506/909] udpa: plumb udpa:// resource names/locators. (#11810) These are currently set as [#not-implemented-hide:]. There should be enough plumbing here to start implementing the first roadmap steps in #11264, i.e. some simple delta discovery flows with core LDS/CDS/RDS/EDS/SDS. I've punted on VHDS and SRDS until later, as these will require special case mapping of their resource keys into context parameters. Risk level: Low (Unused API changes). Testing: Built docs and proto_format.sh. Part of #11264. Signed-off-by: Harvey Tuch --- api/envoy/config/bootstrap/v3/BUILD | 1 + api/envoy/config/bootstrap/v3/bootstrap.proto | 45 +++++++++++++++++- api/envoy/config/bootstrap/v4alpha/BUILD | 1 + .../config/bootstrap/v4alpha/bootstrap.proto | 47 +++++++++++++++++-- api/envoy/config/cluster/v3/BUILD | 1 + api/envoy/config/cluster/v3/cluster.proto | 17 ++++++- api/envoy/config/cluster/v4alpha/BUILD | 1 + .../config/cluster/v4alpha/cluster.proto | 26 ++++++++-- api/envoy/config/core/v3/BUILD | 1 + api/envoy/config/core/v3/config_source.proto | 23 ++++++++- api/envoy/config/core/v4alpha/BUILD | 1 + .../config/core/v4alpha/config_source.proto | 23 ++++++++- api/envoy/config/listener/v3/BUILD | 1 + api/envoy/config/listener/v3/listener.proto | 8 ++++ api/envoy/config/listener/v4alpha/BUILD | 1 + .../config/listener/v4alpha/listener.proto | 11 +++++ api/envoy/extensions/common/tap/v3/BUILD | 1 + .../extensions/common/tap/v3/common.proto | 10 +++- api/envoy/extensions/common/tap/v4alpha/BUILD | 1 + .../common/tap/v4alpha/common.proto | 12 ++++- .../network/http_connection_manager/v3/BUILD | 1 + .../v3/http_connection_manager.proto | 11 ++++- .../http_connection_manager/v4alpha/BUILD | 1 + .../v4alpha/http_connection_manager.proto | 18 +++++-- .../extensions/transport_sockets/tls/v3/BUILD | 1 + .../transport_sockets/tls/v3/secret.proto | 10 +++- .../transport_sockets/tls/v4alpha/BUILD | 1 + .../tls/v4alpha/secret.proto | 16 +++++-- api/envoy/service/discovery/v3/BUILD | 1 + .../service/discovery/v3/discovery.proto | 39 +++++++++++++-- .../envoy/config/bootstrap/v3/BUILD | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 45 +++++++++++++++++- .../envoy/config/bootstrap/v4alpha/BUILD | 1 + .../config/bootstrap/v4alpha/bootstrap.proto | 47 +++++++++++++++++-- .../envoy/config/cluster/v3/BUILD | 1 + .../envoy/config/cluster/v3/cluster.proto | 17 ++++++- .../envoy/config/cluster/v4alpha/BUILD | 1 + .../config/cluster/v4alpha/cluster.proto | 26 ++++++++-- .../envoy/config/core/v3/BUILD | 1 + .../envoy/config/core/v3/config_source.proto | 23 ++++++++- .../envoy/config/core/v4alpha/BUILD | 1 + .../config/core/v4alpha/config_source.proto | 23 ++++++++- .../envoy/config/listener/v3/BUILD | 1 + .../envoy/config/listener/v3/listener.proto | 8 ++++ .../envoy/config/listener/v4alpha/BUILD | 1 + .../config/listener/v4alpha/listener.proto | 11 +++++ .../envoy/extensions/common/tap/v3/BUILD | 1 + .../extensions/common/tap/v3/common.proto | 10 +++- .../envoy/extensions/common/tap/v4alpha/BUILD | 1 + .../common/tap/v4alpha/common.proto | 12 ++++- .../network/http_connection_manager/v3/BUILD | 1 + .../v3/http_connection_manager.proto | 11 ++++- .../http_connection_manager/v4alpha/BUILD | 1 + .../v4alpha/http_connection_manager.proto | 18 +++++-- .../extensions/transport_sockets/tls/v3/BUILD | 1 + .../transport_sockets/tls/v3/secret.proto | 10 +++- .../transport_sockets/tls/v4alpha/BUILD | 1 + .../tls/v4alpha/secret.proto | 16 +++++-- .../envoy/service/discovery/v3/BUILD | 1 + .../service/discovery/v3/discovery.proto | 39 +++++++++++++-- tools/proto_format/proto_sync.py | 4 ++ 61 files changed, 602 insertions(+), 64 deletions(-) diff --git a/api/envoy/config/bootstrap/v3/BUILD b/api/envoy/config/bootstrap/v3/BUILD index 645d50d891a2..0a4f9a6e1ede 100644 --- a/api/envoy/config/bootstrap/v3/BUILD +++ b/api/envoy/config/bootstrap/v3/BUILD @@ -16,5 +16,6 @@ api_proto_package( "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 22337ab514b5..57a455444579 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -19,7 +19,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -36,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 22] +// [#next-free-field: 24] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -62,6 +65,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; @@ -72,11 +76,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v3.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v3.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -186,6 +198,30 @@ message Bootstrap { // Specifies optional bootstrap extensions to be instantiated at startup time. // Each item contains extension specific configuration. repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v3.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v3.ConfigSource default_config_source = 23; } // Administration interface :ref:`operations documentation @@ -353,7 +389,12 @@ message RuntimeLayer { "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // RTDS configuration source. core.v3.ConfigSource rtds_config = 2; diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD index 2bb0248a4772..0fd53ed1c2b6 100644 --- a/api/envoy/config/bootstrap/v4alpha/BUILD +++ b/api/envoy/config/bootstrap/v4alpha/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 328ccae67a5e..b5a4bef5f65e 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -18,6 +18,8 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -35,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 22] +// [#next-free-field: 24] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -61,6 +63,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; @@ -71,11 +74,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v4alpha.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -178,6 +189,30 @@ message Bootstrap { // Specifies optional bootstrap extensions to be instantiated at startup time. // Each item contains extension specific configuration. repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v4alpha.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v4alpha.ConfigSource default_config_source = 23; } // Administration interface :ref:`operations documentation @@ -344,8 +379,14 @@ message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + oneof name_specifier { + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3; + } // RTDS configuration source. core.v4alpha.ConfigSource rtds_config = 2; diff --git a/api/envoy/config/cluster/v3/BUILD b/api/envoy/config/cluster/v3/BUILD index 2c838d35e6f1..27f31fad4d3d 100644 --- a/api/envoy/config/cluster/v3/BUILD +++ b/api/envoy/config/cluster/v3/BUILD @@ -13,5 +13,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 06bbb91afb99..fdaed973a16c 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -19,7 +19,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -32,6 +36,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. // [#next-free-field: 49] message Cluster { @@ -178,7 +188,12 @@ message Cluster { // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. - string service_name = 2; + string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // Optionally divide the endpoints in this cluster into subsets defined by diff --git a/api/envoy/config/cluster/v4alpha/BUILD b/api/envoy/config/cluster/v4alpha/BUILD index 3aff84b82faa..196ea73f908a 100644 --- a/api/envoy/config/cluster/v4alpha/BUILD +++ b/api/envoy/config/cluster/v4alpha/BUILD @@ -12,5 +12,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 3a347634c5a1..a6e58aef1d4c 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -19,6 +19,9 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -32,6 +35,15 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.ClusterCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. // [#next-free-field: 49] message Cluster { @@ -175,10 +187,16 @@ message Cluster { // Configuration for the source of EDS updates for this Cluster. core.v4alpha.ConfigSource eds_config = 1; - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; + oneof name_specifier { + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3; + } } // Optionally divide the endpoints in this cluster into subsets defined by diff --git a/api/envoy/config/core/v3/BUILD b/api/envoy/config/core/v3/BUILD index e52b984a61c7..60461220c20c 100644 --- a/api/envoy/config/core/v3/BUILD +++ b/api/envoy/config/core/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/core/v3/config_source.proto b/api/envoy/config/core/v3/config_source.proto index dc47586233c1..72837bb3bee1 100644 --- a/api/envoy/config/core/v3/config_source.proto +++ b/api/envoy/config/core/v3/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -52,13 +54,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -136,10 +148,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/api/envoy/config/core/v4alpha/BUILD b/api/envoy/config/core/v4alpha/BUILD index ef6414dadc09..a4aa06ce9b44 100644 --- a/api/envoy/config/core/v4alpha/BUILD +++ b/api/envoy/config/core/v4alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto index ec3f9dfe0971..72b4f0357439 100644 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ b/api/envoy/config/core/v4alpha/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -53,13 +55,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -138,10 +150,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/api/envoy/config/listener/v3/BUILD b/api/envoy/config/listener/v3/BUILD index 71c151c040bc..25a099645cce 100644 --- a/api/envoy/config/listener/v3/BUILD +++ b/api/envoy/config/listener/v3/BUILD @@ -13,5 +13,6 @@ api_proto_package( "//envoy/config/listener/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index 03214150e773..ab0b0ecac7c7 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -14,6 +14,8 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -27,6 +29,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; diff --git a/api/envoy/config/listener/v4alpha/BUILD b/api/envoy/config/listener/v4alpha/BUILD index 1d1761a3e941..cde02c932919 100644 --- a/api/envoy/config/listener/v4alpha/BUILD +++ b/api/envoy/config/listener/v4alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/listener/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index b7f32a821443..7c8c92fc4989 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -14,6 +14,8 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -27,6 +29,15 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; diff --git a/api/envoy/extensions/common/tap/v3/BUILD b/api/envoy/extensions/common/tap/v3/BUILD index 64688f5bb438..eb16b73a2111 100644 --- a/api/envoy/extensions/common/tap/v3/BUILD +++ b/api/envoy/extensions/common/tap/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/common/tap/v3/common.proto b/api/envoy/extensions/common/tap/v3/common.proto index 46a25b164d67..68e80dad76b4 100644 --- a/api/envoy/extensions/common/tap/v3/common.proto +++ b/api/envoy/extensions/common/tap/v3/common.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +33,12 @@ message CommonExtensionConfig { config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } oneof config_type { diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD index a6fffecd9621..351e64d86845 100644 --- a/api/envoy/extensions/common/tap/v4alpha/BUILD +++ b/api/envoy/extensions/common/tap/v4alpha/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto index f37889b90212..536f13d049c3 100644 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ b/api/envoy/extensions/common/tap/v4alpha/common.proto @@ -5,6 +5,8 @@ package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/tap/v4alpha/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,8 +32,14 @@ message CommonExtensionConfig { config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // Tap config to request from XDS server. + string name = 2; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3; + } } oneof config_type { diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD index bd07dbcbb020..283fd11e5f09 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 24c417bb133f..f2a80959c33b 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -18,7 +18,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -612,7 +615,13 @@ message Rds { // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_config_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // This message is used to work around the limitations with 'oneof' and repeated fields. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 57c9eebb5b19..837b7b898f26 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 7800832806b3..aaf146e1f568 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -18,6 +18,8 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -614,11 +616,17 @@ message Rds { // Configuration source specifier for RDS. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3; + } } // This message is used to work around the limitations with 'oneof' and repeated fields. diff --git a/api/envoy/extensions/transport_sockets/tls/v3/BUILD b/api/envoy/extensions/transport_sockets/tls/v3/BUILD index 62b69636c78c..14187bea65a7 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto index 2a77ec765c8f..80c68a56f5ce 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -6,6 +6,9 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -30,7 +33,12 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; config.core.v3.ConfigSource sds_config = 2; } diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index d294b69de40c..5471fdfbe0b1 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 001c1d2901bd..11306f21415a 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -6,6 +6,8 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -29,10 +31,16 @@ message SdsSecretConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + oneof name_specifier { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3; + } config.core.v4alpha.ConfigSource sds_config = 2; } diff --git a/api/envoy/service/discovery/v3/BUILD b/api/envoy/service/discovery/v3/BUILD index bfe0abc351df..d74aebc3424b 100644 --- a/api/envoy/service/discovery/v3/BUILD +++ b/api/envoy/service/discovery/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/api/envoy/service/discovery/v3/discovery.proto b/api/envoy/service/discovery/v3/discovery.proto index b8e31160a88b..40479539213c 100644 --- a/api/envoy/service/discovery/v3/discovery.proto +++ b/api/envoy/service/discovery/v3/discovery.proto @@ -7,6 +7,10 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/core/v1/resource_locator.proto"; +import "udpa/core/v1/resource_name.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -140,7 +144,7 @@ message DiscoveryResponse { // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. -// [#next-free-field: 8] +// [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; @@ -148,7 +152,9 @@ message DeltaDiscoveryRequest { config.core.v3.Node node = 1; // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if + // resources are only referenced via *udpa_resource_subscribe* and + // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual @@ -174,9 +180,22 @@ message DeltaDiscoveryRequest { // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; + // As with *resource_names_subscribe* but used when subscribing to resources indicated + // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator + // are ignored and the context parameters are matched with + // *context_param_specifier* specific semantics. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; + // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; + // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a + // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed + // resource locator provided in *udpa_resources_subscribe*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; + // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will @@ -199,7 +218,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; @@ -215,22 +234,34 @@ message DeltaDiscoveryResponse { // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + // This does not need to be set if *udpa_removed_resources* is used instead of + // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; + // As with *removed_resources* but used when a removed resource was named in + // its *Resource*s with a *udpa.core.v1.ResourceName*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; + // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } +// [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; + string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. + udpa.core.v1.ResourceName udpa_resource_name = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD index 645d50d891a2..0a4f9a6e1ede 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD @@ -16,5 +16,6 @@ api_proto_package( "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 224328ef5bd0..26752b16ebdc 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -19,7 +19,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -36,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 22] +// [#next-free-field: 24] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -62,6 +65,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; @@ -72,11 +76,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v3.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v3.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -185,6 +197,30 @@ message Bootstrap { // Each item contains extension specific configuration. repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v3.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v3.ConfigSource default_config_source = 23; + Runtime hidden_envoy_deprecated_runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } @@ -354,7 +390,12 @@ message RuntimeLayer { "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // RTDS configuration source. core.v3.ConfigSource rtds_config = 2; diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD index eb87a71ad68e..a0dac9234426 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD @@ -16,5 +16,6 @@ api_proto_package( "//envoy/config/trace/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 86bbf02e32f6..f75d169486a5 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -19,6 +19,8 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -36,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 22] +// [#next-free-field: 24] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -62,6 +64,7 @@ message Bootstrap { repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; } + // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; @@ -72,11 +75,19 @@ message Bootstrap { // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; + // Resource locator for listener collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator lds_resources_locator = 5; + // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v4alpha.ConfigSource cds_config = 2; + // Resource locator for cluster collection. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator cds_resources_locator = 6; + // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC @@ -186,6 +197,30 @@ message Bootstrap { // Specifies optional bootstrap extensions to be instantiated at startup time. // Each item contains extension specific configuration. repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; + + // Configuration sources that will participate in + // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call + // this *resource_authority*. + // 2. *resource_authority* is compared against the authorities in any peer + // *ConfigSource*. The peer *ConfigSource* is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer *ConfigSource* message is used. + // 3. *resource_authority* is compared sequentially with the authorities in + // each configuration source in *config_sources*. The first *ConfigSource* + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // *default_config_source* is used. + // 5. If *default_config_source* is not specified, resolution fails. + // [#not-implemented-hide:] + repeated core.v4alpha.ConfigSource config_sources = 22; + + // Default configuration source for *udpa.core.v1.ResourceLocator* if all + // other resolution fails. + // [#not-implemented-hide:] + core.v4alpha.ConfigSource default_config_source = 23; } // Administration interface :ref:`operations documentation @@ -352,8 +387,14 @@ message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; + oneof name_specifier { + // Resource to subscribe to at *rtds_config* for the RTDS layer. + string name = 1; + + // Resource locator for RTDS layer. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rtds_resource_locator = 3; + } // RTDS configuration source. core.v4alpha.ConfigSource rtds_config = 2; diff --git a/generated_api_shadow/envoy/config/cluster/v3/BUILD b/generated_api_shadow/envoy/config/cluster/v3/BUILD index 2c229a6ac8cc..7bbe1aa145be 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/BUILD +++ b/generated_api_shadow/envoy/config/cluster/v3/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 0e800ce5bf5b..32a6c4e3d7a4 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -20,7 +20,11 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -33,6 +37,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. // [#next-free-field: 49] message Cluster { @@ -178,7 +188,12 @@ message Cluster { // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. - string service_name = 2; + string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // Optionally divide the endpoints in this cluster into subsets defined by diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD index 3aff84b82faa..196ea73f908a 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD @@ -12,5 +12,6 @@ api_proto_package( "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 3a347634c5a1..a6e58aef1d4c 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -19,6 +19,9 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -32,6 +35,15 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Cluster configuration] +// Cluster list collections. Entries are *Cluster* resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.ClusterCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // Configuration for a single upstream cluster. // [#next-free-field: 49] message Cluster { @@ -175,10 +187,16 @@ message Cluster { // Configuration for the source of EDS updates for this Cluster. core.v4alpha.ConfigSource eds_config = 1; - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; + oneof name_specifier { + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + + // Resource locator for EDS. This is mutually exclusive to *service_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator eds_resource_locator = 3; + } } // Optionally divide the endpoints in this cluster into subsets defined by diff --git a/generated_api_shadow/envoy/config/core/v3/BUILD b/generated_api_shadow/envoy/config/core/v3/BUILD index e52b984a61c7..60461220c20c 100644 --- a/generated_api_shadow/envoy/config/core/v3/BUILD +++ b/generated_api_shadow/envoy/config/core/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto index beb670796932..9e7b8b777ec7 100644 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v3/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -52,13 +54,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -136,10 +148,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/config/core/v4alpha/BUILD b/generated_api_shadow/envoy/config/core/v4alpha/BUILD index ef6414dadc09..a4aa06ce9b44 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/core/v4alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto index 503d5c451cc0..b10e0377be65 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto @@ -7,6 +7,8 @@ import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/authority.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -53,13 +55,23 @@ message ApiConfigSource { // the v2 protos is used. REST = 1; - // gRPC v2 API. + // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) @@ -138,10 +150,17 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#next-free-field: 7] +// [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; + // Authorities that this config source may be used for. An authority specified + // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior + // to configuration fetch. This field provides the association between + // authority name and configuration source. + // [#not-implemented-hide:] + repeated udpa.core.v1.Authority authorities = 7; + oneof config_source_specifier { option (validate.required) = true; diff --git a/generated_api_shadow/envoy/config/listener/v3/BUILD b/generated_api_shadow/envoy/config/listener/v3/BUILD index e67314794940..2ae77584b119 100644 --- a/generated_api_shadow/envoy/config/listener/v3/BUILD +++ b/generated_api_shadow/envoy/config/listener/v3/BUILD @@ -14,5 +14,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index b2892906a484..fbf34d16442b 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -14,6 +14,8 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -27,6 +29,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + udpa.core.v1.CollectionEntry entries = 1; +} + // [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD index 1d1761a3e941..cde02c932919 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/listener/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index b7f32a821443..7c8c92fc4989 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -14,6 +14,8 @@ import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/collection_entry.proto"; + import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -27,6 +29,15 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` +// Listener list collections. Entries are *Listener* resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.ListenerCollection"; + + udpa.core.v1.CollectionEntry entries = 1; +} + // [#next-free-field: 23] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD index 64688f5bb438..eb16b73a2111 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto index 46a25b164d67..68e80dad76b4 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,7 +33,12 @@ message CommonExtensionConfig { config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } oneof config_type { diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD index a6fffecd9621..351e64d86845 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto index f37889b90212..536f13d049c3 100644 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto @@ -5,6 +5,8 @@ package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/tap/v4alpha/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -30,8 +32,14 @@ message CommonExtensionConfig { config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // Tap config to request from XDS server. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // Tap config to request from XDS server. + string name = 2; + + // Resource locator for TAP. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator tap_resource_locator = 3; + } } oneof config_type { diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD index bd07dbcbb020..283fd11e5f09 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 3142a0d8bea2..54e531ceb6a0 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -18,7 +18,10 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -617,7 +620,13 @@ message Rds { // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + string route_config_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // This message is used to work around the limitations with 'oneof' and repeated fields. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD index 57c9eebb5b19..837b7b898f26 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD @@ -15,5 +15,6 @@ api_proto_package( "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 7800832806b3..aaf146e1f568 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -18,6 +18,8 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -614,11 +616,17 @@ message Rds { // Configuration source specifier for RDS. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; + oneof name_specifier { + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; + + // Resource locator for RDS. This is mutually exclusive to *route_config_name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator rds_resource_locator = 3; + } } // This message is used to work around the limitations with 'oneof' and repeated fields. diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD index 62b69636c78c..14187bea65a7 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto index 2a77ec765c8f..80c68a56f5ce 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -6,6 +6,9 @@ import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -30,7 +33,12 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; config.core.v3.ConfigSource sds_config = 2; } diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD index d294b69de40c..5471fdfbe0b1 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 001c1d2901bd..11306f21415a 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -6,6 +6,8 @@ import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; +import "udpa/core/v1/resource_locator.proto"; + import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -29,10 +31,16 @@ message SdsSecretConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + oneof name_specifier { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. + string name = 1; + + // Resource locator for SDS. This is mutually exclusive to *name*. + // [#not-implemented-hide:] + udpa.core.v1.ResourceLocator sds_resource_locator = 3; + } config.core.v4alpha.ConfigSource sds_config = 2; } diff --git a/generated_api_shadow/envoy/service/discovery/v3/BUILD b/generated_api_shadow/envoy/service/discovery/v3/BUILD index bfe0abc351df..d74aebc3424b 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/BUILD +++ b/generated_api_shadow/envoy/service/discovery/v3/BUILD @@ -11,5 +11,6 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) diff --git a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto index b8e31160a88b..40479539213c 100644 --- a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto +++ b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto @@ -7,6 +7,10 @@ import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/core/v1/resource_locator.proto"; +import "udpa/core/v1/resource_name.proto"; + +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -140,7 +144,7 @@ message DiscoveryResponse { // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. -// [#next-free-field: 8] +// [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; @@ -148,7 +152,9 @@ message DeltaDiscoveryRequest { config.core.v3.Node node = 1; // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if + // resources are only referenced via *udpa_resource_subscribe* and + // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual @@ -174,9 +180,22 @@ message DeltaDiscoveryRequest { // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; + // As with *resource_names_subscribe* but used when subscribing to resources indicated + // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator + // are ignored and the context parameters are matched with + // *context_param_specifier* specific semantics. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; + // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; + // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a + // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed + // resource locator provided in *udpa_resources_subscribe*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; + // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will @@ -199,7 +218,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } -// [#next-free-field: 7] +// [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; @@ -215,22 +234,34 @@ message DeltaDiscoveryResponse { // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + // This does not need to be set if *udpa_removed_resources* is used instead of + // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; + // As with *removed_resources* but used when a removed resource was named in + // its *Resource*s with a *udpa.core.v1.ResourceName*. + // [#not-implemented-hide:] + repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; + // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } +// [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; + string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + + // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. + udpa.core.v1.ResourceName udpa_resource_name = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index cf952597e88d..acbb8f091935 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -203,6 +203,10 @@ def GetImportDeps(proto_path): if import_path.startswith('udpa/annotations/'): imports.append('@com_github_cncf_udpa//udpa/annotations:pkg') continue + # Special case handling for UDPA core. + if import_path.startswith('udpa/core/v1/'): + imports.append('@com_github_cncf_udpa//udpa/core/v1:pkg') + continue # Explicit remapping for external deps, compute paths for envoy/*. if import_path in external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP: imports.append(external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP[import_path]) From cf5fb59d35ac429512c0657c64c849afeb041595 Mon Sep 17 00:00:00 2001 From: Manish Date: Thu, 2 Jul 2020 00:39:17 +0530 Subject: [PATCH 507/909] examples/cors: Fixed unable to open file '/var/log/access.log' in front_envoy (#11837) examples/cors: Fixed unable to open file '/var/log/access.log' in front_envoy Signed-off-by: Manish Kumar --- examples/cors/backend/front-envoy.yaml | 2 +- examples/cors/frontend/front-envoy.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/cors/backend/front-envoy.yaml b/examples/cors/backend/front-envoy.yaml index 0343ac098495..0dd81339f578 100644 --- a/examples/cors/backend/front-envoy.yaml +++ b/examples/cors/backend/front-envoy.yaml @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: /dev/stdout route_config: name: local_route virtual_hosts: diff --git a/examples/cors/frontend/front-envoy.yaml b/examples/cors/frontend/front-envoy.yaml index 07da44803b6a..e871ebea1e91 100644 --- a/examples/cors/frontend/front-envoy.yaml +++ b/examples/cors/frontend/front-envoy.yaml @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: /dev/stdout route_config: name: local_route virtual_hosts: From 6f2390a5f4620bdab1b4a1cdf67389adc06c48cf Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 1 Jul 2020 14:13:10 -0600 Subject: [PATCH 508/909] test: fix http2_integration_test flake (#11853) Part of https://github.com/envoyproxy/envoy/issues/11801 Signed-off-by: Matt Klein --- test/integration/http2_integration_test.cc | 10 +++++++++- test/integration/integration.cc | 17 +++++++++-------- test/integration/integration.h | 7 +++++-- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 94154029892b..ebbd0e7ca66d 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -9,6 +9,7 @@ #include "common/buffer/buffer_impl.h" #include "common/http/header_map_impl.h" +#include "common/network/socket_option_impl.h" #include "test/integration/utility.h" #include "test/mocks/http/mocks.h" @@ -1520,7 +1521,13 @@ void Http2FloodMitigationTest::beginSession() { // set lower outbound frame limits to make tests run faster config_helper_.setOutboundFramesLimits(1000, 100); initialize(); - tcp_client_ = makeTcpConnection(lookupPort("http")); + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); startHttp2Session(); } @@ -1643,6 +1650,7 @@ TEST_P(Http2FloodMitigationTest, Data) { // Set large buffer limits so the test is not affected by the flow control. config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; beginSession(); fake_upstreams_[0]->set_allow_unexpected_disconnects(true); diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 608ea2bc742f..6079451ac710 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -148,11 +148,10 @@ void IntegrationStreamDecoder::onResetStream(Http::StreamResetReason reason, abs } } -IntegrationTcpClient::IntegrationTcpClient(Event::Dispatcher& dispatcher, - Event::TestTimeSystem& time_system, - MockBufferFactory& factory, uint32_t port, - Network::Address::IpVersion version, - bool enable_half_close) +IntegrationTcpClient::IntegrationTcpClient( + Event::Dispatcher& dispatcher, Event::TestTimeSystem& time_system, MockBufferFactory& factory, + uint32_t port, Network::Address::IpVersion version, bool enable_half_close, + const Network::ConnectionSocket::OptionsSharedPtr& options) : time_system_(time_system), payload_reader_(new WaitForPayloadReader(dispatcher)), callbacks_(new ConnectionCallbacks(*this)) { EXPECT_CALL(factory, create_(_, _, _)) @@ -166,7 +165,7 @@ IntegrationTcpClient::IntegrationTcpClient(Event::Dispatcher& dispatcher, connection_ = dispatcher.createClientConnection( Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version), port)), - Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); + Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), options); ON_CALL(*client_write_buffer_, drain(_)) .WillByDefault(testing::Invoke(client_write_buffer_, &MockWatermarkBuffer::baseDrain)); @@ -401,9 +400,11 @@ void BaseIntegrationTest::setUpstreamProtocol(FakeHttpConnection::Type protocol) } } -IntegrationTcpClientPtr BaseIntegrationTest::makeTcpConnection(uint32_t port) { +IntegrationTcpClientPtr +BaseIntegrationTest::makeTcpConnection(uint32_t port, + const Network::ConnectionSocket::OptionsSharedPtr& options) { return std::make_unique(*dispatcher_, time_system_, *mock_buffer_factory_, - port, version_, enable_half_close_); + port, version_, enable_half_close_, options); } void BaseIntegrationTest::registerPort(const std::string& key, uint32_t port) { diff --git a/test/integration/integration.h b/test/integration/integration.h index 3ea127299066..c68efaf0963e 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -98,7 +98,8 @@ class IntegrationTcpClient { public: IntegrationTcpClient(Event::Dispatcher& dispatcher, Event::TestTimeSystem& time_system, MockBufferFactory& factory, uint32_t port, - Network::Address::IpVersion version, bool enable_half_close = false); + Network::Address::IpVersion version, bool enable_half_close, + const Network::ConnectionSocket::OptionsSharedPtr& options); void close(); void waitForData(const std::string& data, bool exact_match = true); @@ -190,7 +191,9 @@ class BaseIntegrationTest : protected Logger::Loggable { FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } - IntegrationTcpClientPtr makeTcpConnection(uint32_t port); + IntegrationTcpClientPtr + makeTcpConnection(uint32_t port, + const Network::ConnectionSocket::OptionsSharedPtr& options = nullptr); // Test-wide port map. void registerPort(const std::string& key, uint32_t port); From 3cc670d82f44b8dda710c7e16ad23873c9d7544a Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Wed, 1 Jul 2020 21:13:42 +0100 Subject: [PATCH 509/909] fuzz: skeleton for xDS fuzzer (#11653) * update ads tests to get bootstrap from utility.cc Signed-off-by: Sam Flattery * initial skeleton for fuzzer Signed-off-by: Sam Flattery * working xDS skeleton Signed-off-by: Sam Flattery * add protobuf validation Signed-off-by: Sam Flattery * change proto input and refactor code Signed-off-by: Sam Flattery * fix compilation issues Signed-off-by: Sam Flattery * refactor skeleton with new utility functions Signed-off-by: Sam Flattery * remove num_lds_updates_ to add to verifier class instead Signed-off-by: Sam Flattery * add another corpus entry, disallow delta for now Signed-off-by: Sam Flattery * style fix Signed-off-by: Sam Flattery * initial feedback changes Signed-off-by: Sam Flattery * rest of feedback changes Signed-off-by: Sam Flattery * change case on constant member variables Signed-off-by: Sam Flattery * minor nit fixes Signed-off-by: Sam Flattery * fix typo in removing listener Signed-off-by: Sam Flattery * small cleanups Signed-off-by: Sam Flattery * remove old configs from corpora Signed-off-by: Sam Flattery * fix delta issue by removing sotw_or_delta_ from class Signed-off-by: Sam Flattery * nit fixes Signed-off-by: Sam Flattery * add waiting for ACKs Signed-off-by: Sam Flattery * add waiting for delta requests Signed-off-by: Sam Flattery * remove commented code Signed-off-by: Sam Flattery * fix timeout by not waiting for route acks before listeners added Signed-off-by: Sam Flattery * style fixes Signed-off-by: Sam Flattery * move constant variable to public Signed-off-by: Sam Flattery --- test/integration/ads_integration.h | 1 + test/server/config_validation/BUILD | 33 ++- .../config_validation/xds_corpus/example0 | 31 +++ .../config_validation/xds_corpus/example1 | 13 + .../config_validation/xds_corpus/example2 | 30 ++ .../config_validation/xds_corpus/example3 | 19 ++ test/server/config_validation/xds_fuzz.cc | 260 ++++++++++++++++++ test/server/config_validation/xds_fuzz.h | 67 +++++ test/server/config_validation/xds_fuzz.proto | 53 ++++ .../server/config_validation/xds_fuzz_test.cc | 21 ++ 10 files changed, 527 insertions(+), 1 deletion(-) create mode 100644 test/server/config_validation/xds_corpus/example0 create mode 100644 test/server/config_validation/xds_corpus/example1 create mode 100644 test/server/config_validation/xds_corpus/example2 create mode 100644 test/server/config_validation/xds_corpus/example3 create mode 100644 test/server/config_validation/xds_fuzz.cc create mode 100644 test/server/config_validation/xds_fuzz.h create mode 100644 test/server/config_validation/xds_fuzz.proto create mode 100644 test/server/config_validation/xds_fuzz_test.cc diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index 78cce1913de9..9bac58f602d0 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -10,6 +10,7 @@ #include "envoy/config/route/v3/route.pb.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" #include "test/integration/http_integration.h" namespace Envoy { diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 086575831d1f..617e54334dfc 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -1,4 +1,4 @@ -load("//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_package") +load("//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_library", "envoy_package", "envoy_proto_library") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//bazel:repositories.bzl", "PPC_SKIP_TARGETS", "WINDOWS_SKIP_TARGETS") @@ -107,3 +107,34 @@ envoy_cc_fuzz_test( "//conditions:default": envoy_all_extensions(), }), ) + +envoy_proto_library( + name = "xds_fuzz_proto", + srcs = ["xds_fuzz.proto"], +) + +envoy_cc_test_library( + name = "xds_fuzz_lib", + srcs = ["xds_fuzz.cc"], + hdrs = ["xds_fuzz.h"], + deps = [ + ":xds_fuzz_proto_cc_proto", + "//test/integration:http_integration_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "xds_fuzz_test", + srcs = ["xds_fuzz_test.cc"], + corpus = "xds_corpus", + deps = [ + ":xds_fuzz_lib", + "//source/common/protobuf:utility_lib", + ], +) diff --git a/test/server/config_validation/xds_corpus/example0 b/test/server/config_validation/xds_corpus/example0 new file mode 100644 index 000000000000..5f95c4c9cc56 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example0 @@ -0,0 +1,31 @@ +actions { + add_listener { + listener_num : 0 + route_num : 0 + } +} +actions { + add_route { + route_num : 0 + } +} +actions { + add_listener { + listener_num : 1 + route_num : 1 + } +} +actions { + add_route { + route_num : 1 + } +} +actions { + add_listener { + listener_num : 2 + route_num : 2 + } +} +config { + sotw_or_delta : SOTW +} diff --git a/test/server/config_validation/xds_corpus/example1 b/test/server/config_validation/xds_corpus/example1 new file mode 100644 index 000000000000..9b99995b1e4e --- /dev/null +++ b/test/server/config_validation/xds_corpus/example1 @@ -0,0 +1,13 @@ +actions { + remove_route { + route_num: 1 + } +} +actions { + remove_listener { + listener_num: 1 + } +} +config { + sotw_or_delta: SOTW +} diff --git a/test/server/config_validation/xds_corpus/example2 b/test/server/config_validation/xds_corpus/example2 new file mode 100644 index 000000000000..9b25dd51d01d --- /dev/null +++ b/test/server/config_validation/xds_corpus/example2 @@ -0,0 +1,30 @@ +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + remove_route { + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 2 + } +} +config { + sotw_or_delta : DELTA +} diff --git a/test/server/config_validation/xds_corpus/example3 b/test/server/config_validation/xds_corpus/example3 new file mode 100644 index 000000000000..e64420e81a2b --- /dev/null +++ b/test/server/config_validation/xds_corpus/example3 @@ -0,0 +1,19 @@ +actions { + add_route { + route_num : 0 + } +} +actions { + add_listener { + listener_num : 0 + route_num : 0 + } +} +actions { + remove_listener { + listener_num : 0 + } +} +config { + sotw_or_delta : SOTW +} diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc new file mode 100644 index 000000000000..b369b482ddd2 --- /dev/null +++ b/test/server/config_validation/xds_fuzz.cc @@ -0,0 +1,260 @@ +#include "test/server/config_validation/xds_fuzz.h" + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +namespace Envoy { + +// helper functions to build API responses +envoy::config::cluster::v3::Cluster XdsFuzzTest::buildCluster(const std::string& name) { + return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); +}; + +envoy::config::endpoint::v3::ClusterLoadAssignment +XdsFuzzTest::buildClusterLoadAssignment(const std::string& name) { + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ip_version_), + fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); +} + +envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(uint32_t listener_num, + uint32_t route_num) { + std::string name = absl::StrCat("listener_", listener_num % ListenersMax); + std::string route = absl::StrCat("route_config_", route_num % RoutesMax); + return ConfigHelper::buildListener( + name, route, Network::Test::getLoopbackAddressString(ip_version_), "ads_test", api_version_); +} + +envoy::config::route::v3::RouteConfiguration XdsFuzzTest::buildRouteConfig(uint32_t route_num) { + std::string route = absl::StrCat("route_config_", route_num % RoutesMax); + return ConfigHelper::buildRouteConfig(route, "cluster_0", api_version_); +} + +// helper functions to send API responses +void XdsFuzzTest::updateListener( + const std::vector& listeners, + const std::vector& added_or_updated, + const std::vector& removed) { + ENVOY_LOG_MISC(debug, "Sending Listener DiscoveryResponse version {}", version_); + sendDiscoveryResponse(Config::TypeUrl::get().Listener, + listeners, added_or_updated, removed, + std::to_string(version_)); +} + +void XdsFuzzTest::updateRoute( + const std::vector routes, + const std::vector& added_or_updated, + const std::vector& removed) { + ENVOY_LOG_MISC(debug, "Sending Route DiscoveryResponse version {}", version_); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, routes, added_or_updated, removed, + std::to_string(version_)); +} + +XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + envoy::config::core::v3::ApiVersion api_version) + : HttpIntegrationTest( + Http::CodecClient::Type::HTTP2, TestEnvironment::getIpVersionsForTest()[0], + ConfigHelper::adsBootstrap(input.config().sotw_or_delta() == + test::server::config_validation::Config::SOTW + ? "GRPC" + : "DELTA_GRPC", + api_version)), + actions_(input.actions()), version_(1), api_version_(api_version), + ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { + use_lds_ = false; + create_xds_upstream_ = true; + tls_xds_upstream_ = false; + + if (input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW) { + sotw_or_delta_ = Grpc::SotwOrDelta::Sotw; + } else { + sotw_or_delta_ = Grpc::SotwOrDelta::Delta; + } +} + +/** + * initialize an envoy configured with a fully dynamic bootstrap with ADS over gRPC + */ +void XdsFuzzTest::initialize() { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* ads_config = bootstrap.mutable_dynamic_resources()->mutable_ads_config(); + auto* grpc_service = ads_config->add_grpc_services(); + + std::string cluster_name = "ads_cluster"; + grpc_service->mutable_envoy_grpc()->set_cluster_name(cluster_name); + auto* ads_cluster = bootstrap.mutable_static_resources()->add_clusters(); + ads_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + ads_cluster->set_name("ads_cluster"); + }); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + HttpIntegrationTest::initialize(); + if (xds_stream_ == nullptr) { + createXdsConnection(); + AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + RELEASE_ASSERT(result, result.message()); + xds_stream_->startGrpcStream(); + } +} + +void XdsFuzzTest::close() { + cleanUpXdsConnection(); + test_server_.reset(); + fake_upstreams_.clear(); +} + +/** + * remove a listener from the list of listeners if it exists + * @param the listener number to be removed + * @return the listener as an optional so that it can be used in a delta request + */ +absl::optional XdsFuzzTest::removeListener(uint32_t listener_num) { + std::string match = absl::StrCat("listener_", listener_num % ListenersMax); + + for (auto it = listeners_.begin(); it != listeners_.end(); ++it) { + if (it->name() == match) { + listeners_.erase(it); + return match; + } + } + return {}; +} + +/** + * remove a route from the list of routes if it exists + * @param the route number to be removed + * @return the route as an optional so that it can be used in a delta request + */ +absl::optional XdsFuzzTest::removeRoute(uint32_t route_num) { + std::string match = absl::StrCat("route_config_", route_num % RoutesMax); + for (auto it = routes_.begin(); it != routes_.end(); ++it) { + if (it->name() == match) { + routes_.erase(it); + return match; + } + } + return {}; +} + +/** + * wait for a specific ACK, ignoring any other ACKs that are made in the meantime + * @param the expected API type url of the ack + * @param the expected version number + * @return AssertionSuccess() if the ack was received, else an AssertionError() + */ +AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, + const std::string& expected_version) { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request; + do { + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); + ENVOY_LOG_MISC(info, "Received gRPC message with type {} and version {}", + discovery_request.type_url(), expected_version); + } while (expected_type_url != discovery_request.type_url() && + expected_version != discovery_request.version_info()); + } else { + API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request; + do { + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request)); + ENVOY_LOG_MISC(info, "Received gRPC message with type {}", + delta_discovery_request.type_url()); + } while (expected_type_url != delta_discovery_request.type_url()); + } + version_++; + return AssertionSuccess(); +} + +/** + * run the sequence of actions defined in the fuzzed protobuf + */ +void XdsFuzzTest::replay() { + initialize(); + + // set up cluster + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "0"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "0"); + + // the client will not subscribe to the RouteConfiguration type URL until it + // receives a listener, and the ACKS it sends back seem to be an empty type + // URL so just don't check them until a listener is added + bool sent_listener = false; + + for (const auto& action : actions_) { + switch (action.action_selector_case()) { + case test::server::config_validation::Action::kAddListener: { + sent_listener = true; + uint32_t listener_num = action.add_listener().listener_num(); + removeListener(listener_num); + auto listener = buildListener(listener_num, action.add_listener().route_num()); + listeners_.push_back(listener); + + updateListener(listeners_, {listener}, {}); + // use waitForAck instead of compareDiscoveryRequest as the client makes + // additional discoveryRequests at launch that we might not want to + // respond to yet + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + break; + } + case test::server::config_validation::Action::kRemoveListener: { + /* sent_listener = true; */ + auto removed = removeListener(action.remove_listener().listener_num()); + + if (removed) { + updateListener(listeners_, {}, {*removed}); + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + } + + break; + } + case test::server::config_validation::Action::kAddRoute: { + uint32_t route_num = action.add_route().route_num(); + auto removed = removeRoute(route_num); + auto route = buildRouteConfig(route_num); + routes_.push_back(route); + + if (removed) { + // if the route was already in routes_, don't send a duplicate add in delta request + updateRoute(routes_, {}, {}); + } else { + updateRoute(routes_, {route}, {}); + } + + if (sent_listener) { + EXPECT_TRUE( + waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); + } + break; + } + case test::server::config_validation::Action::kRemoveRoute: { + if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { + // routes cannot be removed in SOTW updates + break; + } + + auto removed = removeRoute(action.remove_route().route_num()); + if (removed) { + updateRoute(routes_, {}, {*removed}); + EXPECT_TRUE( + waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); + } + break; + } + default: + break; + } + } + + close(); +} + +} // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h new file mode 100644 index 000000000000..160a18f2e48e --- /dev/null +++ b/test/server/config_validation/xds_fuzz.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include + +#include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/config/utility.h" +#include "test/integration/http_integration.h" +#include "test/server/config_validation/xds_fuzz.pb.h" + +#include "absl/types/optional.h" + +namespace Envoy { + +class XdsFuzzTest : public HttpIntegrationTest { +public: + XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, + envoy::config::core::v3::ApiVersion api_version); + + envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); + + envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignment(const std::string& name); + + envoy::config::listener::v3::Listener buildListener(uint32_t listener_num, uint32_t route_num); + + envoy::config::route::v3::RouteConfiguration buildRouteConfig(uint32_t route_num); + + void updateListener(const std::vector& listeners, + const std::vector& added_or_updated, + const std::vector& removed); + + void + updateRoute(const std::vector routes, + const std::vector& added_or_updated, + const std::vector& removed); + + void initialize() override; + void replay(); + void close(); + + const size_t ListenersMax = 3; + const size_t RoutesMax = 5; + +private: + absl::optional removeListener(uint32_t listener_num); + absl::optional removeRoute(uint32_t route_num); + AssertionResult waitForAck(const std::string& expected_type_url, + const std::string& expected_version); + + Protobuf::RepeatedPtrField actions_; + std::vector routes_; + std::vector listeners_; + + uint64_t version_; + envoy::config::core::v3::ApiVersion api_version_; + + Network::Address::IpVersion ip_version_; +}; + +} // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.proto b/test/server/config_validation/xds_fuzz.proto new file mode 100644 index 000000000000..087ab0d71372 --- /dev/null +++ b/test/server/config_validation/xds_fuzz.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package test.server.config_validation; + +import "validate/validate.proto"; + +message AddListener { + // generates a new listener listener_x with number listener_num, which can later be removed by + // RemoveListener + // if listener_x had already been added, it will update listener_x's route_config + uint32 listener_num = 1; + // listener_x references route_y, which has number route_num + uint32 route_num = 2; +} + +message AddRoute { + // generates a new route route_y with number route_num which can later be removed by a RemoveRoute + uint32 route_num = 1; +} + +message RemoveListener { + // removes listener_x + uint32 listener_num = 1; +} + +message RemoveRoute { + // removes route_y + uint32 route_num = 1; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + + AddListener add_listener = 1; + AddRoute add_route = 2; + RemoveListener remove_listener = 3; + RemoveRoute remove_route = 4; + } +} + +message Config { + enum SotwOrDelta { + SOTW = 0; + DELTA = 1; + } + SotwOrDelta sotw_or_delta = 1; +} + +message XdsTestCase { + repeated Action actions = 1; + Config config = 2; +} diff --git a/test/server/config_validation/xds_fuzz_test.cc b/test/server/config_validation/xds_fuzz_test.cc new file mode 100644 index 000000000000..87f3e5969023 --- /dev/null +++ b/test/server/config_validation/xds_fuzz_test.cc @@ -0,0 +1,21 @@ +/* #include "common/protobuf/utility.h" */ + +#include "test/fuzz/fuzz_runner.h" +#include "test/server/config_validation/xds_fuzz.h" +#include "test/server/config_validation/xds_fuzz.pb.validate.h" + +namespace Envoy { + +DEFINE_PROTO_FUZZER(const test::server::config_validation::XdsTestCase& input) { + RELEASE_ASSERT(!TestEnvironment::getIpVersionsForTest().empty(), ""); + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + XdsFuzzTest test(input, envoy::config::core::v3::ApiVersion::V2); + test.replay(); +} + +} // namespace Envoy From 58291523f107fd703cff09463bb36068e6fe8dc2 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 19:02:01 -0400 Subject: [PATCH 510/909] test: fixing a typo (#11803) Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/integration/http2_integration_test.cc | 32 +++++++++++----------- test/integration/http2_integration_test.h | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index ebbd0e7ca66d..50ec78b68787 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1545,7 +1545,7 @@ Http2Frame Http2FloodMitigationTest::readFrame() { return frame; } -void Http2FloodMitigationTest::sendFame(const Http2Frame& frame) { +void Http2FloodMitigationTest::sendFrame(const Http2Frame& frame) { ASSERT_TRUE(tcp_client_->connected()); ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); } @@ -1600,7 +1600,7 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ const std::string& flood_stat) { uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, host, path); - sendFame(request); + sendFrame(request); auto frame = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); EXPECT_EQ(expected_http_status, frame.responseStatus()); @@ -1608,7 +1608,7 @@ void Http2FloodMitigationTest::floodServer(absl::string_view host, absl::string_ uint64_t total_bytes_sent = 0; while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { request = Http2Frame::makeRequest(++request_idx, host, path); - sendFame(request); + sendFrame(request); total_bytes_sent += request.size(); } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; @@ -1669,7 +1669,7 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { int i = 0; auto request = Http::Http2::Http2Frame::makeMalformedRequest(i); - sendFame(request); + sendFrame(request); auto response = readFrame(); // Make sure we've got RST_STREAM from the server EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); @@ -1680,7 +1680,7 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { uint64_t total_bytes_sent = 0; while (total_bytes_sent < TransmitThreshold && tcp_client_->connected()) { request = Http::Http2::Http2Frame::makeMalformedRequest(++i); - sendFame(request); + sendFrame(request); total_bytes_sent += request.size(); } EXPECT_LE(total_bytes_sent, TransmitThreshold) << "Flood mitigation is broken."; @@ -1717,7 +1717,7 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { uint32_t request_idx = 0; auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFame(request); + sendFrame(request); tcp_client_->waitForDisconnect(); @@ -1731,11 +1731,11 @@ TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { uint32_t request_idx = 0; auto request = Http2Frame::makeEmptyHeadersFrame(request_idx); - sendFame(request); + sendFrame(request); for (int i = 0; i < 2; i++) { request = Http2Frame::makeEmptyContinuationFrame(request_idx); - sendFame(request); + sendFrame(request); } tcp_client_->waitForDisconnect(); @@ -1751,11 +1751,11 @@ TEST_P(Http2FloodMitigationTest, EmptyData) { uint32_t request_idx = 0; auto request = Http2Frame::makePostRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); for (int i = 0; i < 2; i++) { request = Http2Frame::makeEmptyDataFrame(request_idx); - sendFame(request); + sendFrame(request); } tcp_client_->waitForDisconnect(); @@ -1778,7 +1778,7 @@ TEST_P(Http2FloodMitigationTest, PriorityOpenStream) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); floodServer(Http2Frame::makePriorityFrame(request_idx, request_idx + 1), "http2.inbound_priority_frames_flood"); @@ -1792,7 +1792,7 @@ TEST_P(Http2FloodMitigationTest, PriorityClosedStream) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); // Reading response marks this stream as closed in nghttp2. auto frame = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); @@ -1808,7 +1808,7 @@ TEST_P(Http2FloodMitigationTest, WindowUpdate) { // Open stream. uint32_t request_idx = 0; auto request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); floodServer(Http2Frame::makeWindowUpdateFrame(request_idx, 1), "http2.inbound_window_update_frames_flood"); @@ -1822,7 +1822,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeader) { // Send invalid request. uint32_t request_idx = 0; auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); tcp_client_->waitForDisconnect(); @@ -1849,7 +1849,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { // Send invalid request. uint32_t request_idx = 0; auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); // Make sure we've got RST_STREAM from the server. auto response = readFrame(); EXPECT_EQ(Http2Frame::Type::RstStream, response.type()); @@ -1857,7 +1857,7 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { // Send valid request using the same connection. request_idx++; request = Http2Frame::makeRequest(request_idx, "host", "/"); - sendFame(request); + sendFrame(request); response = readFrame(); EXPECT_EQ(Http2Frame::Type::Headers, response.type()); EXPECT_EQ(Http2Frame::ResponseStatus::Ok, response.responseStatus()); diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index 88b019b57f1b..11c0477b4c61 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -82,7 +82,7 @@ class Http2FloodMitigationTest : public testing::TestWithParam Date: Wed, 1 Jul 2020 19:03:42 -0400 Subject: [PATCH 511/909] test: speeding up a test from 80s to 3s (#11850) Signed-off-by: Alyssa Wilk --- test/integration/cx_limit_integration_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc index 5d70380995aa..abdc5711fe2e 100644 --- a/test/integration/cx_limit_integration_test.cc +++ b/test/integration/cx_limit_integration_test.cc @@ -43,7 +43,6 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParam tcp_clients; std::vector raw_conns; - tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); raw_conns.emplace_back(); ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); @@ -56,7 +55,8 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParamwaitForRawConnection(raw_conns.back())); + ASSERT_FALSE( + fake_upstreams_[0]->waitForRawConnection(raw_conns.back(), std::chrono::milliseconds(500))); tcp_clients.back()->waitForDisconnect(); // Get rid of the client that failed to connect. From 75930e357165634f69814dcb3e42f75a373b88ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=A5=81=E6=97=A0=E5=BF=A7?= Date: Thu, 2 Jul 2020 07:08:57 +0800 Subject: [PATCH 512/909] lua: Add per filter config for Lua filter (#11235) This allows Lua filter to support per-route configuration. This patch enables the configured Lua filter to have multiple registered codes that can be referenced from each per-route config. Disabling running the global Lua filter for a route is also supported. Signed-off-by: wbpcode --- .../extensions/filters/http/lua/v3/BUILD | 1 + .../extensions/filters/http/lua/v3/lua.proto | 35 ++++ .../http/http_filters/lua_filter.rst | 80 +++++++- docs/root/version_history/current.rst | 1 + .../extensions/filters/http/lua/v3/BUILD | 1 + .../extensions/filters/http/lua/v3/lua.proto | 35 ++++ source/extensions/filters/http/lua/BUILD | 2 + source/extensions/filters/http/lua/config.cc | 9 +- source/extensions/filters/http/lua/config.h | 9 +- .../extensions/filters/http/lua/lua_filter.cc | 74 +++++--- .../extensions/filters/http/lua/lua_filter.h | 103 +++++++++-- test/extensions/filters/http/lua/BUILD | 1 + .../filters/http/lua/lua_filter_test.cc | 133 +++++++++++++- .../filters/http/lua/lua_integration_test.cc | 171 +++++++++++++++++- 14 files changed, 596 insertions(+), 59 deletions(-) diff --git a/api/envoy/extensions/filters/http/lua/v3/BUILD b/api/envoy/extensions/filters/http/lua/v3/BUILD index 69390e69786a..8878a585f46d 100644 --- a/api/envoy/extensions/filters/http/lua/v3/BUILD +++ b/api/envoy/extensions/filters/http/lua/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/lua/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto index da6b0c09a0f6..10ac92e83b01 100644 --- a/api/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "envoy/config/core/v3/base.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -24,4 +26,37 @@ message Lua { // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Map of named Lua source codes that can be referenced in :ref:` LuaPerRoute + // `. The Lua source codes can be + // loaded from inline string or local files. + // + // Example: + // + // .. code-block:: yaml + // + // source_codes: + // hello.lua: + // inline_string: | + // function envoy_on_response(response_handle) + // -- Do something. + // end + // world.lua: + // filename: /etc/lua/world.lua + // + map source_codes = 2; +} + +message LuaPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the Lua filter for this particular vhost or route. If disabled is specified in + // multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // A name of a Lua source code stored in + // :ref:`Lua.source_codes `. + string name = 2 [(validate.rules).string = {min_len: 1}]; + } } diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 0bea99a2ba0d..b5b720941eb1 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -23,10 +23,6 @@ supported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT docu supports more 5.2 features and additional architectures. Envoy can be built with moonjit support by using the following bazel option: ``--//source/extensions/filters/common/lua:moonjit=1``. -The filter only supports loading Lua code in-line in the configuration. If local filesystem code -is desired, a trivial in-line script can be used to load the rest of the code from the local -environment. - The design of the filter and Lua support at a high level is as follows: * All Lua environments are :ref:`per worker thread `. This means that @@ -63,6 +59,82 @@ Configuration * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.lua*. +A simple example of configuring Lua HTTP filter that contains only :ref:`inline_code +` is as follow: + +.. code-block:: yaml + + name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.lua + inline_code: | + -- Called on the request path. + function envoy_on_request(request_handle) + -- Do something. + end + -- Called on the response path. + function envoy_on_response(response_handle) + -- Do something. + end + +By default, Lua script defined in ``inline_code`` will be treated as a ``GLOBAL`` script. Envoy will +execute it for every HTTP request. + +Per-Route Configuration +----------------------- + +The Lua HTTP filter also can be disabled or overridden on a per-route basis by providing a +:ref:`LuaPerRoute ` configuration +on the virtual host, route, or weighted cluster. + +As a concrete example, given the following Lua filter configuration: + +.. code-block:: yaml + + name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.lua + inline_code: | + function envoy_on_request(request_handle) + -- do something + end + source_codes: + hello.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:logInfo("Hello World.") + end + bye.lua: + inline_string: | + function envoy_on_response(response_handle) + response_handle:logInfo("Bye Bye.") + end + +The HTTP Lua filter can be disabled on some virtual host, route, or weighted cluster by the +LuaPerRoute configuration as follow: + +.. code-block:: yaml + + per_filter_config: + envoy.filters.http.lua: + disabled: true + +We can also refer to a Lua script in the filter configuration by specifying a name in LuaPerRoute. +The ``GLOBAL`` Lua script will be overridden by the referenced script: + +.. code-block:: yaml + + per_filter_config: + envoy.filters.http.lua: + name: hello.lua + +.. attention:: + + The name ``GLOBAL`` is reserved for :ref:`Lua.inline_code + `. Therefore, do not use + ``GLOBAL`` as name for other Lua scripts. + + Script examples --------------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5710fa2114fb..01cbe59e6c84 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -110,6 +110,7 @@ New Features interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability in :ref:`client_features` field. * lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* lua: added :ref:`per route config ` for Lua filter. * lua: added tracing to the ``httpCall()`` API. * metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD index 69390e69786a..8878a585f46d 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/lua/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto index da6b0c09a0f6..10ac92e83b01 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; +import "envoy/config/core/v3/base.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -24,4 +26,37 @@ message Lua { // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Map of named Lua source codes that can be referenced in :ref:` LuaPerRoute + // `. The Lua source codes can be + // loaded from inline string or local files. + // + // Example: + // + // .. code-block:: yaml + // + // source_codes: + // hello.lua: + // inline_string: | + // function envoy_on_response(response_handle) + // -- Do something. + // end + // world.lua: + // filename: /etc/lua/world.lua + // + map source_codes = 2; +} + +message LuaPerRoute { + oneof override { + option (validate.required) = true; + + // Disable the Lua filter for this particular vhost or route. If disabled is specified in + // multiple per-filter-configs, the most specific one will be used. + bool disabled = 1 [(validate.rules).bool = {const: true}]; + + // A name of a Lua source code stored in + // :ref:`Lua.source_codes `. + string name = 2 [(validate.rules).string = {min_len: 1}]; + } } diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index 9df156c1eb32..657e3472a88f 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -23,12 +23,14 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/buffer:buffer_lib", "//source/common/common:enum_to_int", + "//source/common/config:datasource_lib", "//source/common/crypto:utility_lib", "//source/common/http:message_lib", "//source/extensions/common:utility_lib", "//source/extensions/filters/common/lua:lua_lib", "//source/extensions/filters/common/lua:wrappers_lib", "//source/extensions/filters/http:well_known_names", + "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/lua/config.cc b/source/extensions/filters/http/lua/config.cc index 9f1990ea346d..0166b9ed921a 100644 --- a/source/extensions/filters/http/lua/config.cc +++ b/source/extensions/filters/http/lua/config.cc @@ -15,12 +15,19 @@ Http::FilterFactoryCb LuaFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::lua::v3::Lua& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigConstSharedPtr filter_config(new FilterConfig{ - proto_config.inline_code(), context.threadLocal(), context.clusterManager()}); + proto_config, context.threadLocal(), context.clusterManager(), context.api()}); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config)); }; } +Router::RouteSpecificFilterConfigConstSharedPtr +LuaFilterConfig::createRouteSpecificFilterConfigTyped( + const envoy::extensions::filters::http::lua::v3::LuaPerRoute& proto_config, + Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { + return std::make_shared(proto_config, context.threadLocal(), context.api()); +} + /** * Static registration for the Lua filter. @see RegisterFactory. */ diff --git a/source/extensions/filters/http/lua/config.h b/source/extensions/filters/http/lua/config.h index d13eeb1b757b..b2057c532f2b 100644 --- a/source/extensions/filters/http/lua/config.h +++ b/source/extensions/filters/http/lua/config.h @@ -14,7 +14,9 @@ namespace Lua { /** * Config registration for the Lua filter. @see NamedHttpFilterConfigFactory. */ -class LuaFilterConfig : public Common::FactoryBase { +class LuaFilterConfig + : public Common::FactoryBase { public: LuaFilterConfig() : FactoryBase(HttpFilterNames::get().Lua) {} @@ -22,6 +24,11 @@ class LuaFilterConfig : public Common::FactoryBase(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + lua_state_.registerType(); + + request_function_slot_ = lua_state_.registerGlobal("envoy_on_request"); + if (lua_state_.getGlobalRef(request_function_slot_) == LUA_REFNIL) { + ENVOY_LOG(info, "envoy_on_request() function not found. Lua filter will not hook requests."); + } + + response_function_slot_ = lua_state_.registerGlobal("envoy_on_response"); + if (lua_state_.getGlobalRef(response_function_slot_) == LUA_REFNIL) { + ENVOY_LOG(info, "envoy_on_response() function not found. Lua filter will not hook responses."); + } +} + StreamHandleWrapper::StreamHandleWrapper(Filters::Common::Lua::Coroutine& coroutine, Http::HeaderMap& headers, bool end_stream, Filter& filter, FilterCallbacks& callbacks) @@ -569,33 +596,30 @@ int StreamHandleWrapper::luaImportPublicKey(lua_State* state) { return 1; } -FilterConfig::FilterConfig(const std::string& lua_code, ThreadLocal::SlotAllocator& tls, - Upstream::ClusterManager& cluster_manager) - : cluster_manager_(cluster_manager), lua_state_(lua_code, tls) { - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - lua_state_.registerType(); - - request_function_slot_ = lua_state_.registerGlobal("envoy_on_request"); - if (lua_state_.getGlobalRef(request_function_slot_) == LUA_REFNIL) { - ENVOY_LOG(info, "envoy_on_request() function not found. Lua filter will not hook requests."); +FilterConfig::FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config, + ThreadLocal::SlotAllocator& tls, + Upstream::ClusterManager& cluster_manager, Api::Api& api) + : cluster_manager_(cluster_manager) { + auto global_setup_ptr = std::make_unique(proto_config.inline_code(), tls); + if (global_setup_ptr) { + per_lua_code_setups_map_[GLOBAL_SCRIPT_NAME] = std::move(global_setup_ptr); } - response_function_slot_ = lua_state_.registerGlobal("envoy_on_response"); - if (lua_state_.getGlobalRef(response_function_slot_) == LUA_REFNIL) { - ENVOY_LOG(info, "envoy_on_response() function not found. Lua filter will not hook responses."); + for (const auto& source : proto_config.source_codes()) { + const std::string code = Config::DataSource::read(source.second, true, api); + auto per_lua_code_setup_ptr = std::make_unique(code, tls); + if (!per_lua_code_setup_ptr) { + continue; + } + per_lua_code_setups_map_[source.first] = std::move(per_lua_code_setup_ptr); } } +FilterConfigPerRoute::FilterConfigPerRoute( + const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, + ThreadLocal::SlotAllocator&, Api::Api&) + : disabled_(config.disabled()), name_(config.name()) {} + void Filter::onDestroy() { destroyed_ = true; if (request_stream_wrapper_.get()) { @@ -609,12 +633,14 @@ void Filter::onDestroy() { Http::FilterHeadersStatus Filter::doHeaders(StreamHandleRef& handle, Filters::Common::Lua::CoroutinePtr& coroutine, FilterCallbacks& callbacks, int function_ref, - Http::HeaderMap& headers, bool end_stream) { + PerLuaCodeSetup* setup, Http::HeaderMap& headers, + bool end_stream) { if (function_ref == LUA_REFNIL) { return Http::FilterHeadersStatus::Continue; } + ASSERT(setup); + coroutine = setup->createCoroutine(); - coroutine = config_->createCoroutine(); handle.reset(StreamHandleWrapper::create(coroutine->luaState(), *coroutine, headers, end_stream, *this, callbacks), true); diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index ce0e27cf5ec7..24909a95d649 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -1,9 +1,11 @@ #pragma once +#include "envoy/extensions/filters/http/lua/v3/lua.pb.h" #include "envoy/http/filter.h" #include "envoy/upstream/cluster_manager.h" #include "common/crypto/utility.h" +#include "common/http/utility.h" #include "extensions/common/utility.h" #include "extensions/filters/common/lua/wrappers.h" @@ -15,6 +17,31 @@ namespace Extensions { namespace HttpFilters { namespace Lua { +constexpr char GLOBAL_SCRIPT_NAME[] = "GLOBAL"; + +class PerLuaCodeSetup : Logger::Loggable { +public: + PerLuaCodeSetup(const std::string& lua_code, ThreadLocal::SlotAllocator& tls); + + Extensions::Filters::Common::Lua::CoroutinePtr createCoroutine() { + return lua_state_.createCoroutine(); + } + + int requestFunctionRef() { return lua_state_.getGlobalRef(request_function_slot_); } + int responseFunctionRef() { return lua_state_.getGlobalRef(response_function_slot_); } + + uint64_t runtimeBytesUsed() { return lua_state_.runtimeBytesUsed(); } + void runtimeGC() { return lua_state_.runtimeGC(); } + +private: + uint64_t request_function_slot_{}; + uint64_t response_function_slot_{}; + + Filters::Common::Lua::ThreadLocalState lua_state_; +}; + +using PerLuaCodeSetupPtr = std::unique_ptr; + /** * Callbacks used by a stream handler to access the filter. */ @@ -299,24 +326,67 @@ class NoopCallbacks : public Http::AsyncClient::Callbacks { */ class FilterConfig : Logger::Loggable { public: - FilterConfig(const std::string& lua_code, ThreadLocal::SlotAllocator& tls, - Upstream::ClusterManager& cluster_manager); - Filters::Common::Lua::CoroutinePtr createCoroutine() { return lua_state_.createCoroutine(); } - int requestFunctionRef() { return lua_state_.getGlobalRef(request_function_slot_); } - int responseFunctionRef() { return lua_state_.getGlobalRef(response_function_slot_); } - uint64_t runtimeBytesUsed() { return lua_state_.runtimeBytesUsed(); } - void runtimeGC() { return lua_state_.runtimeGC(); } + FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& proto_config, + ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cluster_manager, + Api::Api& api); + + PerLuaCodeSetup* perLuaCodeSetup(const std::string& name) const { + const auto iter = per_lua_code_setups_map_.find(name); + if (iter != per_lua_code_setups_map_.end()) { + return iter->second.get(); + } + return nullptr; + } Upstream::ClusterManager& cluster_manager_; private: - Filters::Common::Lua::ThreadLocalState lua_state_; - uint64_t request_function_slot_; - uint64_t response_function_slot_; + absl::flat_hash_map per_lua_code_setups_map_; }; using FilterConfigConstSharedPtr = std::shared_ptr; +/** + * Route configuration for the filter. + */ +class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { +public: + FilterConfigPerRoute(const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, + ThreadLocal::SlotAllocator& tls, Api::Api& api); + + bool disabled() const { return disabled_; } + const std::string& name() const { return name_; } + +private: + const bool disabled_; + const std::string name_; +}; + +namespace { + +PerLuaCodeSetup* getPerLuaCodeSetup(const FilterConfig* filter_config, + Http::StreamFilterCallbacks* callbacks) { + const FilterConfigPerRoute* config_per_route = nullptr; + if (callbacks && callbacks->route()) { + config_per_route = Http::Utility::resolveMostSpecificPerFilterConfig( + HttpFilterNames::get().Lua, callbacks->route()); + } + + if (config_per_route != nullptr) { + if (config_per_route->disabled()) { + return nullptr; + } else if (!config_per_route->name().empty()) { + ASSERT(filter_config); + return filter_config->perLuaCodeSetup(config_per_route->name()); + } + return nullptr; + } + ASSERT(filter_config); + return filter_config->perLuaCodeSetup(GLOBAL_SCRIPT_NAME); +} + +} // namespace + // TODO(mattklein123): Filter stats. /** @@ -336,8 +406,10 @@ class Filter : public Http::StreamFilter, Logger::Loggable { // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) override { - return doHeaders(request_stream_wrapper_, request_coroutine_, decoder_callbacks_, - config_->requestFunctionRef(), headers, end_stream); + PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_); + const int function_ref = setup ? setup->requestFunctionRef() : LUA_REFNIL; + return doHeaders(request_stream_wrapper_, request_coroutine_, decoder_callbacks_, function_ref, + setup, headers, end_stream); } Http::FilterDataStatus decodeData(Buffer::Instance& data, bool end_stream) override { return doData(request_stream_wrapper_, data, end_stream); @@ -355,8 +427,10 @@ class Filter : public Http::StreamFilter, Logger::Loggable { } Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { + PerLuaCodeSetup* setup = getPerLuaCodeSetup(config_.get(), decoder_callbacks_.callbacks_); + const int function_ref = setup ? setup->responseFunctionRef() : LUA_REFNIL; return doHeaders(response_stream_wrapper_, response_coroutine_, encoder_callbacks_, - config_->responseFunctionRef(), headers, end_stream); + function_ref, setup, headers, end_stream); } Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override { return doData(response_stream_wrapper_, data, end_stream); @@ -421,7 +495,8 @@ class Filter : public Http::StreamFilter, Logger::Loggable { Http::FilterHeadersStatus doHeaders(StreamHandleRef& handle, Filters::Common::Lua::CoroutinePtr& coroutine, FilterCallbacks& callbacks, int function_ref, - Http::HeaderMap& headers, bool end_stream); + PerLuaCodeSetup* setup, Http::HeaderMap& headers, + bool end_stream); Http::FilterDataStatus doData(StreamHandleRef& handle, Buffer::Instance& data, bool end_stream); Http::FilterTrailersStatus doTrailers(StreamHandleRef& handle, Http::HeaderMap& trailers); diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index 49889a1c90eb..cbcfc8223c84 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -19,6 +19,7 @@ envoy_extension_cc_test( deps = [ "//source/common/stream_info:stream_info_lib", "//source/extensions/filters/http/lua:lua_filter_lib", + "//test/mocks/api:api_mocks", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 6cfa049e31d5..807f9cbefba3 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -8,6 +8,7 @@ #include "extensions/filters/http/lua/lua_filter.h" +#include "test/mocks/api/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/ssl/mocks.h" @@ -72,11 +73,24 @@ class LuaHttpFilterTest : public testing::Test { ~LuaHttpFilterTest() override { filter_->onDestroy(); } + // Quickly set up a global configuration. In order to avoid extensive modification of existing + // test cases, the existing configuration methods must be compatible. void setup(const std::string& lua_code) { - config_ = std::make_shared(lua_code, tls_, cluster_manager_); + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(lua_code); + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + setupConfig(proto_config, per_route_proto_config); setupFilter(); } + void setupConfig(envoy::extensions::filters::http::lua::v3::Lua& proto_config, + envoy::extensions::filters::http::lua::v3::LuaPerRoute& per_route_proto_config) { + // Setup filter config for Lua filter. + config_ = std::make_shared(proto_config, tls_, cluster_manager_, api_); + // Setup per route config for Lua filter. + per_route_config_ = std::make_shared(per_route_proto_config, tls_, api_); + } + void setupFilter() { filter_ = std::make_unique(config_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); @@ -96,8 +110,10 @@ class LuaHttpFilterTest : public testing::Test { } NiceMock tls_; + NiceMock api_; Upstream::MockClusterManager cluster_manager_; std::shared_ptr config_; + std::shared_ptr per_route_config_; std::unique_ptr filter_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; @@ -183,6 +199,12 @@ class LuaHttpFilterTest : public testing::Test { end end )EOF"}; + + const std::string ADD_HEADERS_SCRIPT{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("hello", "world") + end + )EOF"}; }; // Bad code in initial config. @@ -193,7 +215,12 @@ TEST(LuaHttpFilterConfigTest, BadCode) { NiceMock tls; NiceMock cluster_manager; - EXPECT_THROW_WITH_MESSAGE(FilterConfig(SCRIPT, tls, cluster_manager), + NiceMock api; + + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(SCRIPT); + + EXPECT_THROW_WITH_MESSAGE(FilterConfig(proto_config, tls, cluster_manager, api), Filters::Common::Lua::LuaException, "script load error: [string \"...\"]:3: '=' expected near ''"); } @@ -1415,8 +1442,9 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { setup(SCRIPT); // Perform a GC and snap bytes currently used by the runtime. - config_->runtimeGC(); - const uint64_t mem_use_at_start = config_->runtimeBytesUsed(); + auto script_config = config_->perLuaCodeSetup(GLOBAL_SCRIPT_NAME); + script_config->runtimeGC(); + const uint64_t mem_use_at_start = script_config->runtimeBytesUsed(); uint64_t num_loops = 2000; #if defined(__has_feature) && (__has_feature(thread_sanitizer)) @@ -1444,8 +1472,8 @@ TEST_F(LuaHttpFilterTest, ImmediateResponse) { // to do a soft comparison here. In my own testing, without a fix for #3570, the memory // usage after is at least 20x higher after 2000 iterations so we just check to see if it's // within 2x. - config_->runtimeGC(); - EXPECT_TRUE(config_->runtimeBytesUsed() < mem_use_at_start * 2); + script_config->runtimeGC(); + EXPECT_TRUE(script_config->runtimeBytesUsed() < mem_use_at_start * 2); } // Respond with bad status. @@ -1923,6 +1951,99 @@ TEST_F(LuaHttpFilterTest, SignatureVerify) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } +// Test whether the route configuration can properly disable the Lua filter. +TEST_F(LuaHttpFilterTest, LuaFilterDisabled) { + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + per_route_proto_config.set_disabled(true); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(nullptr)); + + Http::TestRequestHeaderMapImpl request_headers_1{{":path", "/"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_1, true)); + EXPECT_EQ("world", request_headers_1.get_("hello")); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers_2{{":path", "/"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_2, true)); + EXPECT_EQ(nullptr, request_headers_2.get(Http::LowerCaseString("hello"))); +} + +// Test whether the route can directly reuse the Lua code in the global configuration. +TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodes) { + const std::string SCRIPT_FOR_ROUTE_ONE{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_ONE"); + end + )EOF"}; + const std::string SCRIPT_FOR_ROUTE_TWO{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_TWO"); + end + )EOF"}; + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::config::core::v3::DataSource source1, source2; + source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE); + source2.set_inline_string(SCRIPT_FOR_ROUTE_TWO); + proto_config.mutable_source_codes()->insert({"route_one.lua", source1}); + proto_config.mutable_source_codes()->insert({"route_two.lua", source2}); + + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + per_route_proto_config.set_name("route_two.lua"); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ("This request is routed by ROUTE_TWO", request_headers.get_("route_info")); +} + +// Lua filter do nothing when the referenced name does not exist. +TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodeNotExist) { + const std::string SCRIPT_FOR_ROUTE_ONE{R"EOF( + function envoy_on_request(request_handle) + request_handle:headers():add("route_info", "This request is routed by ROUTE_ONE"); + end + )EOF"}; + + envoy::extensions::filters::http::lua::v3::Lua proto_config; + proto_config.set_inline_code(ADD_HEADERS_SCRIPT); + envoy::config::core::v3::DataSource source1; + source1.set_inline_string(SCRIPT_FOR_ROUTE_ONE); + proto_config.mutable_source_codes()->insert({"route_one.lua", source1}); + + envoy::extensions::filters::http::lua::v3::LuaPerRoute per_route_proto_config; + // The global source codes do not contain a script named 'route_two.lua'. + per_route_proto_config.set_name("route_two.lua"); + + setupConfig(proto_config, per_route_proto_config); + setupFilter(); + + ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Lua)) + .WillByDefault(Return(per_route_config_.get())); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + EXPECT_EQ(nullptr, request_headers.get(Http::LowerCaseString("hello"))); +} + } // namespace } // namespace Lua } // namespace HttpFilters diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index cfc4b59ec4a2..b1c7bb61b0f8 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -29,15 +29,8 @@ class LuaIntegrationTest : public testing::TestWithParamadd_clusters(); - lua_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); - lua_cluster->set_name("lua_cluster"); - - auto* alt_cluster = bootstrap.mutable_static_resources()->add_clusters(); - alt_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); - alt_cluster->set_name("alt_cluster"); - }); + // Create static clusters. + createClusters(); config_helper_.addConfigModifier( [domain]( @@ -79,6 +72,31 @@ class LuaIntegrationTest : public testing::TestWithParamadd_clusters(); + lua_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + lua_cluster->set_name("lua_cluster"); + + auto* alt_cluster = bootstrap.mutable_static_resources()->add_clusters(); + alt_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + alt_cluster->set_name("alt_cluster"); + }); + } + void cleanup() { codec_client_->close(); if (fake_lua_connection_ != nullptr) { @@ -576,5 +594,140 @@ name: lua cleanup(); } +// Test whether LuaPerRoute works properly. Since this test is mainly for configuration, the Lua +// script can be very simple. +TEST_P(LuaIntegrationTest, BasicTestOfLuaPerRoute) { + const std::string FILTER_AND_CODE = + R"EOF( +name: lua +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_global") + end + source_codes: + hello.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_hello") + end + byebye.lua: + inline_string: | + function envoy_on_request(request_handle) + request_handle:headers():add("code", "code_from_byebye") + end +)EOF"; + const std::string INITIAL_ROUTE_CONFIG = + R"EOF( +name: basic_lua_routes +virtual_hosts: +- name: rds_vhost_1 + domains: ["lua.per.route"] + routes: + - match: + prefix: "/lua/per/route/default" + route: + cluster: lua_cluster + - match: + prefix: "/lua/per/route/disabled" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + disabled: true + - match: + prefix: "/lua/per/route/hello" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: hello.lua + - match: + prefix: "/lua/per/route/byebye" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: byebye.lua + - match: + prefix: "/lua/per/route/nocode" + route: + cluster: lua_cluster + typed_per_filter_config: + envoy.filters.http.lua: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.LuaPerRoute + name: nocode.lua +)EOF"; + + initializeWithYaml(FILTER_AND_CODE, INITIAL_ROUTE_CONFIG); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto check_request = [this](const Http::TestRequestHeaderMapImpl& request_headers, + const std::string& expected_value) { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(1); + + auto* entry = upstream_request_->headers().get(Http::LowerCaseString("code")); + if (!expected_value.empty()) { + EXPECT_EQ(expected_value, entry->value().getStringView()); + } else { + EXPECT_EQ(nullptr, entry); + } + + upstream_request_->encodeHeaders(default_response_headers_, true); + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + }; + + // Lua code defined in 'inline_code' will be executed by default. + Http::TestRequestHeaderMapImpl default_headers{{":method", "GET"}, + {":path", "/lua/per/route/default"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(default_headers, "code_from_global"); + + // Test whether LuaPerRoute can disable the Lua filter. + Http::TestRequestHeaderMapImpl disabled_headers{{":method", "GET"}, + {":path", "/lua/per/route/disabled"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(disabled_headers, ""); + + // Test whether LuaPerRoute can correctly reference Lua code defined in filter config. + Http::TestRequestHeaderMapImpl hello_headers{{":method", "GET"}, + {":path", "/lua/per/route/hello"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + + check_request(hello_headers, "code_from_hello"); + + Http::TestRequestHeaderMapImpl byebye_headers{{":method", "GET"}, + {":path", "/lua/per/route/byebye"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + check_request(byebye_headers, "code_from_byebye"); + + // When the name referenced by LuaPerRoute does not exist, Lua filter does nothing. + Http::TestRequestHeaderMapImpl nocode_headers{{":method", "GET"}, + {":path", "/lua/per/route/nocode"}, + {":scheme", "http"}, + {":authority", "lua.per.route"}, + {"x-forwarded-for", "10.0.0.1"}}; + + check_request(nocode_headers, ""); + cleanup(); +} + } // namespace } // namespace Envoy From d789ca08b2f25879589fa1d608cbc72915e02b51 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 1 Jul 2020 16:12:51 -0700 Subject: [PATCH 513/909] bazelci: fix bazel CI with coverage (#11859) Signed-off-by: Lizan Zhou --- .bazelci/presubmit.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.bazelci/presubmit.yml b/.bazelci/presubmit.yml index 196fca8c5b4e..ab83156fbc47 100644 --- a/.bazelci/presubmit.yml +++ b/.bazelci/presubmit.yml @@ -9,18 +9,22 @@ tasks: name: "RBE" platform: ubuntu1804 test_targets: - - "//test/..." + - "//test/common/common/..." + - "//test/integration/..." + - "//test/exe/..." test_flags: - - "--config=remote-clang" + - "--config=remote-clang-libc++" - "--config=remote-ci" - "--jobs=75" coverage: name: "Coverage" platform: ubuntu1804 + shell_commands: + - "bazel/setup_clang.sh /usr/lib/llvm-10" test_targets: + - "//test/common/common/..." - "//test/integration/..." - "//test/exe/..." test_flags: - - "--action_env=CC=clang" - - "--action_env=CXX=clang++" - "--config=coverage" + - "--config=clang" From c2f9d9907c34d00f7a7b0b7090f400bb74b51b8a Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 1 Jul 2020 16:14:04 -0700 Subject: [PATCH 514/909] Back-port SelfConfigSource.transport_api_version field to v2. (#11824) Signed-off-by: Mark D. Roth --- api/envoy/api/v2/core/config_source.proto | 3 +++ generated_api_shadow/envoy/api/v2/core/config_source.proto | 3 +++ 2 files changed, 6 insertions(+) diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index 8bbb961c1a32..7032b2c10d87 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -106,6 +106,9 @@ message AggregatedConfigSource { // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto index 8bbb961c1a32..7032b2c10d87 100644 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ b/generated_api_shadow/envoy/api/v2/core/config_source.proto @@ -106,6 +106,9 @@ message AggregatedConfigSource { // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. From de247a8140f4230726eaa4ea732d6d01bc530911 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Thu, 2 Jul 2020 08:57:59 +0900 Subject: [PATCH 515/909] dynamic_forward_proxy: DNS Cache circuit breaker (#11028) In the current implementation of the DNS cache, circuit breaker is derived from cluster config. It seems unnatural. In this PR, I implemented the DNS cache specific circuit breaker by adding resource manager to that. Additional Description: Risk Level: Mid Testing: Unit / Integration Docs Changes: Added Release Notes: Added Signed-off-by: shikugawa --- .../dynamic_forward_proxy/v3/dns_cache.proto | 14 ++- .../dynamic_forward_proxy_filter.rst | 18 +++ docs/root/version_history/current.rst | 4 + .../dynamic_forward_proxy/v3/dns_cache.proto | 14 ++- include/envoy/common/resource.h | 4 + source/common/runtime/runtime_features.cc | 1 + .../common/upstream/resource_manager_impl.h | 95 ++++++++-------- .../clusters/dynamic_forward_proxy/cluster.cc | 2 +- .../common/dynamic_forward_proxy/BUILD | 18 +++ .../common/dynamic_forward_proxy/dns_cache.h | 38 ++++++- .../dynamic_forward_proxy/dns_cache_impl.cc | 23 +++- .../dynamic_forward_proxy/dns_cache_impl.h | 9 +- .../dns_cache_manager_impl.cc | 10 +- .../dns_cache_manager_impl.h | 13 ++- .../dns_cache_resource_manager.cc | 26 +++++ .../dns_cache_resource_manager.h | 44 ++++++++ .../filters/http/dynamic_forward_proxy/BUILD | 1 + .../http/dynamic_forward_proxy/config.cc | 2 +- .../dynamic_forward_proxy/proxy_filter.cc | 23 +++- .../sni_dynamic_forward_proxy/config.cc | 2 +- .../common/dynamic_forward_proxy/BUILD | 19 +++- .../dns_cache_impl_test.cc | 34 +++++- .../dns_cache_resource_manager_test.cc | 77 +++++++++++++ .../common/dynamic_forward_proxy/mocks.cc | 9 +- .../common/dynamic_forward_proxy/mocks.h | 25 ++++- .../filters/http/dynamic_forward_proxy/BUILD | 3 + .../proxy_filter_integration_test.cc | 104 ++++++++++++++++++ .../proxy_filter_test.cc | 72 ++++++++++++ test/mocks/upstream/mocks.cc | 5 + test/mocks/upstream/mocks.h | 14 +++ 30 files changed, 644 insertions(+), 79 deletions(-) create mode 100644 source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc create mode 100644 source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h create mode 100644 test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7c72af35af33..d801e83fa008 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -18,9 +18,16 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] +// Configuration of circuit breakers for resolver. +message DnsCacheCircuitBreakers { + // The maximum number of pending requests that Envoy will allow to the + // resolver. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 1; +} + // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 7] +// [#next-free-field: 8] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -83,4 +90,9 @@ message DnsCacheConfig { // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; + + // The config of circuit breakers for resolver. It provides a configurable threshold. + // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, + // envoy will use dns cache circuit breakers with default settings even if this value is not set. + DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; } diff --git a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst index 54613fdde1a9..d3f3d57dc215 100644 --- a/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst +++ b/docs/root/configuration/http/http_filters/dynamic_forward_proxy_filter.rst @@ -32,6 +32,13 @@ host when forwarding. See the example below within the configured routes. the certificate chain. Additionally, Envoy will automatically perform SAN verification for the resolved host name as well as specify the host name via SNI. +.. _dns_cache_circuit_breakers: + + Dynamic forward proxy uses circuit breakers built in to the DNS cache with the configuration + of :ref:`DNS cache circuit breakers `. By default, this behavior is enabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. + If this runtime feature is disabled, cluster circuit breakers will be used even when setting the configuration + of :ref:`DNS cache circuit breakers `. + .. code-block:: yaml admin: @@ -119,3 +126,14 @@ namespace. host_added, Counter, Number of hosts that have been added to the cache. host_removed, Counter, Number of hosts that have been removed from the cache. num_hosts, Gauge, Number of hosts that are currently in the cache. + dns_rq_pending_overflow, Counter, Number of dns pending request overflow. + +The dynamic forward proxy DNS cache circuit breakers outputs statistics in the dns_cache..circuit_breakers* +namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + rq_pending_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1) + rq_pending_remaining, Gauge, Number of remaining requests until the circuit breaker opens \ No newline at end of file diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 01cbe59e6c84..e56dd5a66e50 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -79,6 +79,9 @@ New Features * config: added :ref:`version_text ` stat that reflects xDS version. * decompressor: generic :ref:`decompressor ` filter exposed to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. + This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. + If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. * dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. * ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. * ext_authz filter: added API version field for both :ref:`HTTP ` @@ -158,3 +161,4 @@ Deprecated in :ref:`predicates `. * File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. * A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. \ No newline at end of file diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 7c72af35af33..d801e83fa008 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -18,9 +18,16 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] +// Configuration of circuit breakers for resolver. +message DnsCacheCircuitBreakers { + // The maximum number of pending requests that Envoy will allow to the + // resolver. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 1; +} + // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 7] +// [#next-free-field: 8] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -83,4 +90,9 @@ message DnsCacheConfig { // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; + + // The config of circuit breakers for resolver. It provides a configurable threshold. + // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, + // envoy will use dns cache circuit breakers with default settings even if this value is not set. + DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; } diff --git a/include/envoy/common/resource.h b/include/envoy/common/resource.h index 6b04afcfdf4b..ef65d35c7115 100644 --- a/include/envoy/common/resource.h +++ b/include/envoy/common/resource.h @@ -2,6 +2,8 @@ #include "envoy/common/pure.h" +#include "absl/types/optional.h" + #pragma once namespace Envoy { @@ -44,4 +46,6 @@ class ResourceLimit { virtual uint64_t count() const PURE; }; +using ResourceLimitOptRef = absl::optional>; + } // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index a8d795eee314..742b7aba5282 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -65,6 +65,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.early_errors_via_hcm", "envoy.reloadable_features.enable_deprecated_v2_api_warning", + "envoy.reloadable_features.enable_dns_cache_circuit_breakers", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", "envoy.reloadable_features.fixed_connection_close", diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index c03f8f2d348a..f826d36fa198 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -16,6 +16,53 @@ namespace Envoy { namespace Upstream { +struct ManagedResourceImpl : public BasicResourceLimitImpl { + ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, + Stats::Gauge& open_gauge, Stats::Gauge& remaining) + : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge), + remaining_(remaining) { + remaining_.set(max); + } + + // Upstream::Resource + bool canCreate() override { return current_ < max(); } + void inc() override { + BasicResourceLimitImpl::inc(); + updateRemaining(); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); + } + void decBy(uint64_t amount) override { + BasicResourceLimitImpl::decBy(amount); + updateRemaining(); + open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); + } + + /** + * We set the gauge instead of incrementing and decrementing because, + * though atomics are used, it is possible for the current resource count + * to be greater than the supplied max. + */ + void updateRemaining() { + /** + * We cannot use std::max here because max() and current_ are + * unsigned and subtracting them may overflow. + */ + const uint64_t current_copy = current_; + remaining_.set(max() > current_copy ? max() - current_copy : 0); + } + + /** + * A gauge to notify the live circuit breaker state. The gauge is set to 0 + * to notify that the circuit breaker is not yet triggered. + */ + Stats::Gauge& open_gauge_; + + /** + * The number of resources remaining before the circuit breaker opens. + */ + Stats::Gauge& remaining_; +}; + /** * Implementation of ResourceManager. * NOTE: This implementation makes some assumptions which favor simplicity over correctness. @@ -53,54 +100,6 @@ class ResourceManagerImpl : public ResourceManager { ResourceLimit& connectionPools() override { return connection_pools_; } private: - struct ManagedResourceImpl : public BasicResourceLimitImpl { - ManagedResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge, Stats::Gauge& remaining) - : BasicResourceLimitImpl(max, runtime, runtime_key), open_gauge_(open_gauge), - remaining_(remaining) { - remaining_.set(max); - } - - ~ManagedResourceImpl() override { ASSERT(count() == 0); } - - void inc() override { - BasicResourceLimitImpl::inc(); - updateRemaining(); - open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); - } - - void decBy(uint64_t amount) override { - BasicResourceLimitImpl::decBy(amount); - updateRemaining(); - open_gauge_.set(BasicResourceLimitImpl::canCreate() ? 0 : 1); - } - - /** - * We set the gauge instead of incrementing and decrementing because, - * though atomics are used, it is possible for the current resource count - * to be greater than the supplied max. - */ - void updateRemaining() { - /** - * We cannot use std::max here because max() and current_ are - * unsigned and subtracting them may overflow. - */ - const uint64_t current_copy = current_; - remaining_.set(max() > current_copy ? max() - current_copy : 0); - } - - /** - * A gauge to notify the live circuit breaker state. The gauge is set to 0 - * to notify that the circuit breaker is not yet triggered. - */ - Stats::Gauge& open_gauge_; - - /** - * The number of resources remaining before the circuit breaker opens. - */ - Stats::Gauge& remaining_; - }; - class RetryBudgetImpl : public ResourceLimit { public: RetryBudgetImpl(absl::optional budget_percent, diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index f4ad845a8b63..c5af40dfd401 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -198,7 +198,7 @@ ClusterFactory::createClusterWithConfig( Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.tls(), context.random(), - context.stats()); + context.runtime(), context.stats()); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (cluster_config.has_upstream_http_protocol_options()) { if (!proto_config.allow_insecure_cluster_options() && diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index b4dbdb57eaa2..118b4ab44093 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( "//include/envoy/event:dispatcher_interface", "//include/envoy/singleton:manager_interface", "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:resource_manager_interface", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) @@ -37,6 +38,7 @@ envoy_cc_library( hdrs = ["dns_cache_impl.h"], deps = [ ":dns_cache_interface", + ":dns_cache_resource_manager", "//include/envoy/network:dns_interface", "//include/envoy/thread_local:thread_local_interface", "//source/common/common:cleanup_lib", @@ -46,3 +48,19 @@ envoy_cc_library( "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "dns_cache_resource_manager", + srcs = ["dns_cache_resource_manager.cc"], + hdrs = ["dns_cache_resource_manager.h"], + deps = [ + ":dns_cache_interface", + "//include/envoy/common:resource_interface", + "//include/envoy/stats:stats_interface", + "//source/common/common:assert_lib", + "//source/common/common:basic_resource_lib", + "//source/common/runtime:runtime_lib", + "//source/common/upstream:resource_manager_lib", + "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 52941be1083a..2b9d0263fb9f 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -4,6 +4,7 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/singleton/manager.h" #include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/resource_manager.h" namespace Envoy { namespace Extensions { @@ -43,6 +44,32 @@ class DnsHostInfo { using DnsHostInfoSharedPtr = std::shared_ptr; +#define ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(OPEN_GAUGE, REMAINING_GAUGE) \ + OPEN_GAUGE(rq_pending_open, Accumulate) \ + REMAINING_GAUGE(rq_pending_remaining, Accumulate) + +struct DnsCacheCircuitBreakersStats { + ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT) +}; + +/** + * A resource manager of DNS Cache. + */ +class DnsCacheResourceManager { +public: + virtual ~DnsCacheResourceManager() = default; + + /** + * Returns the resource limit of pending requests to DNS. + */ + virtual ResourceLimit& pendingRequests() PURE; + + /** + * Returns the reference of stats for dns cache circuit breakers. + */ + virtual DnsCacheCircuitBreakersStats& stats() PURE; +}; + /** * A cache of DNS hosts. Hosts will re-resolve their addresses or be automatically purged * depending on configured policy. @@ -148,6 +175,15 @@ class DnsCache { * @return all hosts currently stored in the cache. */ virtual absl::flat_hash_map hosts() PURE; + + /** + * Check if a DNS request is allowed given resource limits. + * @param pending_request optional pending request resource limit. If no resource limit is + * provided the internal DNS cache limit is used. + * @return RAII handle for pending request circuit breaker if the request was allowed. + */ + virtual Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_request) PURE; }; using DnsCacheSharedPtr = std::shared_ptr; @@ -176,7 +212,7 @@ using DnsCacheManagerSharedPtr = std::shared_ptr; DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, + Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope); /** diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index c24de298df1f..aa2c666c4242 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -16,13 +16,14 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope, + Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) : main_thread_dispatcher_(main_thread_dispatcher), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), resolver_(main_thread_dispatcher.createDnsResolver({}, false)), tls_slot_(tls.allocateSlot()), scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))), - stats_{ALL_DNS_CACHE_STATS(POOL_COUNTER(*scope_), POOL_GAUGE(*scope_))}, + stats_(generateDnsCacheStats(*scope_)), + resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()), refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)), failure_backoff_strategy_( Config::Utility::prepareDnsRefreshStrategy< @@ -46,6 +47,10 @@ DnsCacheImpl::~DnsCacheImpl() { } } +DnsCacheStats DnsCacheImpl::generateDnsCacheStats(Stats::Scope& scope) { + return {ALL_DNS_CACHE_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope))}; +} + DnsCacheImpl::LoadDnsCacheEntryResult DnsCacheImpl::loadDnsCacheEntry(absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks) { @@ -72,6 +77,20 @@ DnsCacheImpl::loadDnsCacheEntry(absl::string_view host, uint16_t default_port, } } +Upstream::ResourceAutoIncDecPtr +DnsCacheImpl::canCreateDnsRequest(ResourceLimitOptRef pending_requests) { + const auto has_pending_requests = pending_requests.has_value(); + auto& current_pending_requests = + has_pending_requests ? pending_requests->get() : resource_manager_.pendingRequests(); + if (!current_pending_requests.canCreate()) { + if (!has_pending_requests) { + stats_.dns_rq_pending_overflow_.inc(); + } + return nullptr; + } + return std::make_unique(current_pending_requests); +} + absl::flat_hash_map DnsCacheImpl::hosts() { absl::flat_hash_map ret; for (const auto& host : primary_hosts_) { diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 1f649c301ff4..35c72e8d8f23 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -2,12 +2,14 @@ #include "envoy/common/backoff_strategy.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/http/filter.h" #include "envoy/network/dns.h" #include "envoy/thread_local/thread_local.h" #include "common/common/cleanup.h" #include "extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" #include "absl/container/flat_hash_map.h" @@ -27,6 +29,7 @@ namespace DynamicForwardProxy { COUNTER(host_address_changed) \ COUNTER(host_overflow) \ COUNTER(host_removed) \ + COUNTER(dns_rq_pending_overflow) \ GAUGE(num_hosts, NeverImport) /** @@ -39,15 +42,18 @@ struct DnsCacheStats { class DnsCacheImpl : public DnsCache, Logger::Loggable { public: DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope, + Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; + static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); // DnsCache LoadDnsCacheEntryResult loadDnsCacheEntry(absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks) override; AddUpdateCallbacksHandlePtr addUpdateCallbacks(UpdateCallbacks& callbacks) override; absl::flat_hash_map hosts() override; + Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_requests) override; private: using TlsHostMap = absl::flat_hash_map; @@ -138,6 +144,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable update_callbacks_; absl::flat_hash_map primary_hosts_; + DnsCacheResourceManagerImpl resource_manager_; const std::chrono::milliseconds refresh_interval_; const BackOffStrategyPtr failure_backoff_strategy_; const std::chrono::milliseconds host_ttl_; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index f898749d5c83..5c9c5198ef41 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -27,8 +27,8 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( return existing_cache->second.cache_; } - DnsCacheSharedPtr new_cache = - std::make_shared(main_thread_dispatcher_, tls_, random_, root_scope_, config); + DnsCacheSharedPtr new_cache = std::make_shared( + main_thread_dispatcher_, tls_, random_, loader_, root_scope_, config); caches_.emplace(config.name(), ActiveCache{config, new_cache}); return new_cache; } @@ -36,12 +36,12 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& singleton_manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, + Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope) { return singleton_manager.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), - [&main_thread_dispatcher, &tls, &random, &root_scope] { - return std::make_shared(main_thread_dispatcher, tls, random, + [&main_thread_dispatcher, &tls, &random, &loader, &root_scope] { + return std::make_shared(main_thread_dispatcher, tls, random, loader, root_scope); }); } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 130ef570bbc2..52ac9fe0d800 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -14,9 +14,10 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Stats::Scope& root_scope) + Runtime::RandomGenerator& random, Runtime::Loader& loader, + Stats::Scope& root_scope) : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), - root_scope_(root_scope) {} + loader_(loader), root_scope_(root_scope) {} // DnsCacheManager DnsCacheSharedPtr getCache( @@ -35,6 +36,7 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { Event::Dispatcher& main_thread_dispatcher_; ThreadLocal::SlotAllocator& tls_; Runtime::RandomGenerator& random_; + Runtime::Loader& loader_; Stats::Scope& root_scope_; absl::flat_hash_map caches_; }; @@ -43,12 +45,12 @@ class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, Runtime::RandomGenerator& random, - Stats::Scope& root_scope) + Runtime::Loader& loader, Stats::Scope& root_scope) : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), random_(random), - root_scope_(root_scope) {} + loader_(loader), root_scope_(root_scope) {} DnsCacheManagerSharedPtr get() override { - return getCacheManager(singleton_manager_, dispatcher_, tls_, random_, root_scope_); + return getCacheManager(singleton_manager_, dispatcher_, tls_, random_, loader_, root_scope_); } private: @@ -56,6 +58,7 @@ class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { Event::Dispatcher& dispatcher_; ThreadLocal::SlotAllocator& tls_; Runtime::RandomGenerator& random_; + Runtime::Loader& loader_; Stats::Scope& root_scope_; }; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc new file mode 100644 index 000000000000..65ce809275ef --- /dev/null +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.cc @@ -0,0 +1,26 @@ +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { + +DnsCacheResourceManagerImpl::DnsCacheResourceManagerImpl( + Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name, + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers& cb_config) + : cb_stats_(generateDnsCacheCircuitBreakersStats(scope)), + pending_requests_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(cb_config, max_pending_requests, 1024), + loader, fmt::format("dns_cache.{}.circuit_breakers", config_name), + cb_stats_.rq_pending_open_, cb_stats_.rq_pending_remaining_) {} + +DnsCacheCircuitBreakersStats +DnsCacheResourceManagerImpl::generateDnsCacheCircuitBreakersStats(Stats::Scope& scope) { + std::string stat_prefix = "circuit_breakers"; + return {ALL_DNS_CACHE_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, stat_prefix), + POOL_GAUGE_PREFIX(scope, stat_prefix))}; +} + +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h new file mode 100644 index 000000000000..0d4762ede616 --- /dev/null +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include + +#include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/runtime/runtime.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/upstream/resource_manager.h" + +#include "common/common/assert.h" +#include "common/common/basic_resource_impl.h" +#include "common/upstream/resource_manager_impl.h" + +#include "extensions/common/dynamic_forward_proxy/dns_cache.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { + +class DnsCacheResourceManagerImpl : public DnsCacheResourceManager { +public: + DnsCacheResourceManagerImpl( + Stats::Scope& scope, Runtime::Loader& loader, const std::string& config_name, + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers& + cb_config); + + static DnsCacheCircuitBreakersStats generateDnsCacheCircuitBreakersStats(Stats::Scope& scope); + // Envoy::Upstream::DnsCacheResourceManager + ResourceLimit& pendingRequests() override { return pending_requests_; } + DnsCacheCircuitBreakersStats& stats() override { return cb_stats_; } + +private: + DnsCacheCircuitBreakersStats cb_stats_; + Upstream::ManagedResourceImpl pending_requests_; +}; + +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index fb5f5ade7d12..6925f57c7115 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -15,6 +15,7 @@ envoy_cc_library( hdrs = ["proxy_filter.h"], deps = [ "//include/envoy/http:filter_interface", + "//source/common/runtime:runtime_features_lib", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:pass_through_filter_lib", diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index 637711663317..30c984da4840 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -16,7 +16,7 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.scope()); + context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index 47d08e0dcab8..1ab5569cc7e0 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -3,6 +3,8 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "common/runtime/runtime_features.h" + #include "extensions/common/dynamic_forward_proxy/dns_cache.h" #include "extensions/filters/http/well_known_names.h" @@ -53,16 +55,25 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea } cluster_info_ = cluster->info(); - auto& resource = cluster_info_->resourceManager(route_entry->priority()).pendingRequests(); - if (!resource.canCreate()) { - ENVOY_STREAM_LOG(debug, "pending request overflow", *decoder_callbacks_); - cluster_info_->stats().upstream_rq_pending_overflow_.inc(); - decoder_callbacks_->sendLocalReply( + const bool should_use_dns_cache_circuit_breakers = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.enable_dns_cache_circuit_breakers"); + + circuit_breaker_ = config_->cache().canCreateDnsRequest( + !should_use_dns_cache_circuit_breakers + ? absl::make_optional(std::reference_wrapper( + cluster_info_->resourceManager(route_entry->priority()).pendingRequests())) + : absl::nullopt); + + if (circuit_breaker_ == nullptr) { + if (!should_use_dns_cache_circuit_breakers) { + cluster_info_->stats().upstream_rq_pending_overflow_.inc(); + } + ENVOY_STREAM_LOG(debug, "pending request overflow", *this->decoder_callbacks_); + this->decoder_callbacks_->sendLocalReply( Http::Code::ServiceUnavailable, ResponseStrings::get().PendingRequestOverflow, nullptr, absl::nullopt, ResponseStrings::get().PendingRequestOverflow); return Http::FilterHeadersStatus::StopIteration; } - circuit_breaker_ = std::make_unique(resource); uint16_t default_port = 80; if (cluster_info_->transportSocketMatcher() diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index a5f9fa9e1819..aaedee18a560 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -20,7 +20,7 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( context.singletonManager(), context.dispatcher(), context.threadLocal(), context.random(), - context.scope()); + context.runtime(), context.scope()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/test/extensions/common/dynamic_forward_proxy/BUILD b/test/extensions/common/dynamic_forward_proxy/BUILD index 5c33d024fde2..3452905e285b 100644 --- a/test/extensions/common/dynamic_forward_proxy/BUILD +++ b/test/extensions/common/dynamic_forward_proxy/BUILD @@ -21,17 +21,34 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) +envoy_cc_test( + name = "dns_cache_resource_manager_test", + srcs = ["dns_cache_resource_manager_test.cc"], + deps = [ + ":mocks", + "//source/common/config:utility_lib", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_resource_manager", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/stats:stats_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + ], +) + envoy_cc_mock( name = "mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ - "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", + "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index d3bf78619891..26db365de145 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -11,6 +11,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" using testing::InSequence; @@ -30,7 +31,8 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT config_.set_dns_lookup_family(envoy::config::cluster::v3::Cluster::V4_ONLY); EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); - dns_cache_ = std::make_unique(dispatcher_, tls_, random_, store_, config_); + dns_cache_ = + std::make_unique(dispatcher_, tls_, random_, loader_, store_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } @@ -59,6 +61,7 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT std::shared_ptr resolver_{std::make_shared()}; NiceMock tls_; NiceMock random_; + NiceMock loader_; Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; @@ -642,13 +645,40 @@ TEST_F(DnsCacheImplTest, MaxHostOverflow) { EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.host_overflow")->value()); } +TEST_F(DnsCacheImplTest, CircuitBreakersNotInvoked) { + initialize(); + + auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt); + EXPECT_NE(raii_ptr.get(), nullptr); +} + +TEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) { + config_.mutable_dns_cache_circuit_breaker()->mutable_max_pending_requests()->set_value(0); + initialize(); + + auto raii_ptr = dns_cache_->canCreateDnsRequest(absl::nullopt); + EXPECT_EQ(raii_ptr.get(), nullptr); + EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); +} + +TEST_F(DnsCacheImplTest, ClustersCircuitBreakersOverflow) { + initialize(); + NiceMock pending_requests_; + + EXPECT_CALL(pending_requests_, canCreate()).WillOnce(Return(false)); + auto raii_ptr = dns_cache_->canCreateDnsRequest(pending_requests_); + EXPECT_EQ(raii_ptr.get(), nullptr); + EXPECT_EQ(0, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); +} + // DNS cache manager config tests. TEST(DnsCacheManagerImplTest, LoadViaConfig) { NiceMock dispatcher; NiceMock tls; NiceMock random; + NiceMock loader; Stats::IsolatedStoreImpl store; - DnsCacheManagerImpl cache_manager(dispatcher, tls, random, store); + DnsCacheManagerImpl cache_manager(dispatcher, tls, random, loader, store); envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; config1.set_name("foo"); diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc new file mode 100644 index 000000000000..04127f486fff --- /dev/null +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_resource_manager_test.cc @@ -0,0 +1,77 @@ +#include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" + +#include "common/config/utility.h" + +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_resource_manager.h" + +#include "test/extensions/common/dynamic_forward_proxy/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace DynamicForwardProxy { +namespace { + +class DnsCacheResourceManagerTest : public testing::Test { +public: + DnsCacheResourceManagerTest() { ON_CALL(store_, gauge(_, _)).WillByDefault(ReturnRef(gauge_)); } + + void setupResourceManager(std::string& config_yaml) { + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheCircuitBreakers cb_config; + TestUtility::loadFromYaml(config_yaml, cb_config); + + resource_manager_ = + std::make_unique(store_, loader_, "dummy", cb_config); + } + + void cleanup() { + auto& pending_requests = resource_manager_->pendingRequests(); + while (pending_requests.count() != 0) { + pending_requests.dec(); + } + } + + std::unique_ptr resource_manager_; + NiceMock store_; + NiceMock gauge_; + NiceMock loader_; +}; + +TEST_F(DnsCacheResourceManagerTest, CheckDnsResource) { + std::string config_yaml = R"EOF( + max_pending_requests: 3 + )EOF"; + setupResourceManager(config_yaml); + + auto& pending_requests = resource_manager_->pendingRequests(); + EXPECT_EQ(3, pending_requests.max()); + EXPECT_EQ(0, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + pending_requests.inc(); + EXPECT_EQ(1, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + pending_requests.inc(); + pending_requests.inc(); + EXPECT_EQ(3, pending_requests.count()); + EXPECT_FALSE(pending_requests.canCreate()); + + pending_requests.dec(); + EXPECT_EQ(2, pending_requests.count()); + EXPECT_TRUE(pending_requests.canCreate()); + + cleanup(); +} +} // namespace +} // namespace DynamicForwardProxy +} // namespace Common +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.cc b/test/extensions/common/dynamic_forward_proxy/mocks.cc index 9fc213794334..ef27a4de5b00 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.cc +++ b/test/extensions/common/dynamic_forward_proxy/mocks.cc @@ -10,7 +10,14 @@ namespace Extensions { namespace Common { namespace DynamicForwardProxy { -MockDnsCache::MockDnsCache() = default; +MockDnsCacheResourceManager::MockDnsCacheResourceManager() { + ON_CALL(*this, pendingRequests()).WillByDefault(ReturnRef(pending_requests_)); +} +MockDnsCacheResourceManager::~MockDnsCacheResourceManager() = default; + +MockDnsCache::MockDnsCache() { + ON_CALL(*this, canCreateDnsRequest_(_)).WillByDefault(Return(nullptr)); +} MockDnsCache::~MockDnsCache() = default; MockLoadDnsCacheEntryHandle::MockLoadDnsCacheEntryHandle() = default; diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index 88b09cf69ef9..1a9e8c77e7c2 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -2,15 +2,30 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" -#include "extensions/common/dynamic_forward_proxy/dns_cache.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" + +#include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" +using testing::NiceMock; + namespace Envoy { namespace Extensions { namespace Common { namespace DynamicForwardProxy { +class MockDnsCacheResourceManager : public DnsCacheResourceManager { +public: + MockDnsCacheResourceManager(); + ~MockDnsCacheResourceManager() override; + + MOCK_METHOD(ResourceLimit&, pendingRequests, ()); + MOCK_METHOD(DnsCacheCircuitBreakersStats&, stats, ()); + + NiceMock pending_requests_; +}; + class MockDnsCache : public DnsCache { public: MockDnsCache(); @@ -26,6 +41,11 @@ class MockDnsCache : public DnsCache { MockLoadDnsCacheEntryResult result = loadDnsCacheEntry_(host, default_port, callbacks); return {result.status_, LoadDnsCacheEntryHandlePtr{result.handle_}}; } + Upstream::ResourceAutoIncDecPtr + canCreateDnsRequest(ResourceLimitOptRef pending_requests) override { + Upstream::ResourceAutoIncDec* raii_ptr = canCreateDnsRequest_(pending_requests); + return std::unique_ptr(raii_ptr); + } MOCK_METHOD(MockLoadDnsCacheEntryResult, loadDnsCacheEntry_, (absl::string_view host, uint16_t default_port, LoadDnsCacheEntryCallbacks& callbacks)); @@ -37,6 +57,7 @@ class MockDnsCache : public DnsCache { (UpdateCallbacks & callbacks)); MOCK_METHOD((absl::flat_hash_map), hosts, ()); + MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, (ResourceLimitOptRef)); }; class MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle { @@ -55,7 +76,7 @@ class MockDnsCacheManager : public DnsCacheManager { MOCK_METHOD(DnsCacheSharedPtr, getCache, (const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config)); - std::shared_ptr dns_cache_{new MockDnsCache()}; + std::shared_ptr> dns_cache_{new NiceMock()}; }; class MockDnsHostInfo : public DnsHostInfo { diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index f5fcd3868e1b..483d83fcf89b 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -16,11 +16,14 @@ envoy_extension_cc_test( srcs = ["proxy_filter_test.cc"], extension_name = "envoy.filters.http.dynamic_forward_proxy", deps = [ + "//source/common/stats:isolated_store_lib", + "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/dynamic_forward_proxy:config", "//test/extensions/common/dynamic_forward_proxy:mocks", "//test/mocks/http:http_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index bbe4a2a95fb8..deb95a24c6a7 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -103,6 +103,110 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +class ProxyFilterCircuitBreakerIntegrationTest : public ProxyFilterIntegrationTest { +public: + ProxyFilterCircuitBreakerIntegrationTest() = default; + + void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 0) { + setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); + + const std::string filter = fmt::format(R"EOF( +name: dynamic_forward_proxy +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig + dns_cache_config: + name: foo + dns_lookup_family: {} + max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} +)EOF", + Network::Test::ipVersionToDnsFamily(GetParam()), + max_hosts, max_pending_requests); + config_helper_.addFilter(filter); + + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Switch predefined cluster_0 to CDS filesystem sourcing. + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); + bootstrap.mutable_static_resources()->clear_clusters(); + }); + + // Enable dns cache circuit breakers. + config_helper_.addRuntimeOverride("envoy.reloadable_features.enable_dns_cache_circuit_breakers", + "true"); + + // Set validate_clusters to false to allow us to reference a CDS cluster. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); }); + + // Setup the initial CDS cluster. + cluster_.mutable_connect_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(100)); + cluster_.set_name("cluster_0"); + cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); + + if (upstream_tls_) { + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + auto* validation_context = + tls_context.mutable_common_tls_context()->mutable_validation_context(); + validation_context->mutable_trusted_ca()->set_filename( + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); + cluster_.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); + cluster_.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); + } + + const std::string cluster_type_config = fmt::format( + R"EOF( + name: envoy.clusters.dynamic_forward_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig + dns_cache_config: + name: foo + dns_lookup_family: {} + max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} + )EOF", + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); + + TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); + + // Load the CDS cluster and wait for it to initialize. + cds_helper_.setCds({cluster_}); + + HttpIntegrationTest::initialize(); + test_server_->waitForCounterEq("cluster_manager.cluster_added", 1); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterCircuitBreakerIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(ProxyFilterCircuitBreakerIntegrationTest, Basic) { + setup(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + response->waitForEndStream(); + EXPECT_EQ(1, test_server_->gauge("dns_cache.foo.circuit_breakers.rq_pending_open")); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); +} + // A basic test where we pause a request to lookup localhost, and then do another request which // should hit the TLS cache. TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index 17dd35d8cd3e..f1af413fc7e7 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -1,5 +1,6 @@ #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" #include "extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" #include "extensions/filters/http/well_known_names.h" @@ -7,6 +8,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/mocks/upstream/transport_socket_match.h" +#include "test/test_common/test_runtime.h" using testing::AtLeast; using testing::Eq; @@ -65,14 +67,19 @@ class ProxyFilterTest : public testing::Test, std::unique_ptr filter_; Http::MockStreamDecoderFilterCallbacks callbacks_; Http::TestRequestHeaderMapImpl request_headers_{{":authority", "foo"}}; + NiceMock pending_requests_; }; // Default port 80 if upstream TLS not configured. TEST_F(ProxyFilterTest, HttpDefaultPort) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -87,10 +94,14 @@ TEST_F(ProxyFilterTest, HttpDefaultPort) { // Default port 443 if upstream TLS is configured. TEST_F(ProxyFilterTest, HttpsDefaultPort) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -105,10 +116,14 @@ TEST_F(ProxyFilterTest, HttpsDefaultPort) { // Cache overflow. TEST_F(ProxyFilterTest, CacheOverflow) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr})); @@ -124,10 +139,18 @@ TEST_F(ProxyFilterTest, CacheOverflow) { // Circuit breaker overflow TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { + // Disable dns cache circuit breakers because which we expect to be used cluster circuit breakers. + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.enable_dns_cache_circuit_breakers", "false"}}); + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -141,6 +164,7 @@ TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { filter2->setDecoderFilterCallbacks(callbacks_); EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)); EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, Eq("Dynamic forward proxy pending request overflow"), _, _, Eq("Dynamic forward proxy pending request overflow"))); @@ -156,6 +180,46 @@ TEST_F(ProxyFilterTest, CircuitBreakerOverflow) { filter_->onDestroy(); } +// Circuit breaker overflow with DNS Cache resource manager +TEST_F(ProxyFilterTest, CircuitBreakerOverflowWithDnsCacheResourceManager) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); + EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(true)); + Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = + new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) + .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle})); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + + // Create a second filter for a 2nd request. + auto filter2 = std::make_unique(filter_config_); + filter2->setDecoderFilterCallbacks(callbacks_); + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)); + EXPECT_CALL(callbacks_, sendLocalReply(Http::Code::ServiceUnavailable, + Eq("Dynamic forward proxy pending request overflow"), _, _, + Eq("Dynamic forward proxy pending request overflow"))); + EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); + EXPECT_CALL(callbacks_, encodeData(_, true)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter2->decodeHeaders(request_headers_, false)); + + // Cluster circuit breaker overflow counter won't be incremented. + EXPECT_EQ(0, + cm_.thread_local_cluster_.cluster_.info_->stats_.upstream_rq_pending_overflow_.value()); + filter2->onDestroy(); + EXPECT_CALL(*handle, onDestroy()); + filter_->onDestroy(); +} + // No route handling. TEST_F(ProxyFilterTest, NoRoute) { InSequence s; @@ -174,6 +238,8 @@ TEST_F(ProxyFilterTest, NoCluster) { } TEST_F(ProxyFilterTest, HostRewrite) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config; @@ -182,6 +248,8 @@ TEST_F(ProxyFilterTest, HostRewrite) { EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); @@ -198,6 +266,8 @@ TEST_F(ProxyFilterTest, HostRewrite) { } TEST_F(ProxyFilterTest, HostRewriteViaHeader) { + Upstream::ResourceAutoIncDec* circuit_breakers_( + new Upstream::ResourceAutoIncDec(pending_requests_)); InSequence s; envoy::extensions::filters::http::dynamic_forward_proxy::v3::PerRouteConfig proto_config; @@ -206,6 +276,8 @@ TEST_F(ProxyFilterTest, HostRewriteViaHeader) { EXPECT_CALL(callbacks_, route()); EXPECT_CALL(cm_, get(_)); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); diff --git a/test/mocks/upstream/mocks.cc b/test/mocks/upstream/mocks.cc index e5843f0bc64b..990aa6b1a620 100644 --- a/test/mocks/upstream/mocks.cc +++ b/test/mocks/upstream/mocks.cc @@ -177,5 +177,10 @@ MockRetryHostPredicate::~MockRetryHostPredicate() = default; MockClusterManagerFactory::MockClusterManagerFactory() = default; MockClusterManagerFactory::~MockClusterManagerFactory() = default; +MockBasicResourceLimit::MockBasicResourceLimit() { + ON_CALL(*this, canCreate()).WillByDefault(Return(true)); +} +MockBasicResourceLimit::~MockBasicResourceLimit() = default; + } // namespace Upstream } // namespace Envoy diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index ed48b0a5a814..0e78b3fcf46e 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -433,5 +433,19 @@ class TestRetryHostPredicateFactory : public RetryHostPredicateFactory { return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; } }; + +class MockBasicResourceLimit : public ResourceLimit { +public: + MockBasicResourceLimit(); + ~MockBasicResourceLimit() override; + + MOCK_METHOD(bool, canCreate, ()); + MOCK_METHOD(void, inc, ()); + MOCK_METHOD(void, dec, ()); + MOCK_METHOD(void, decBy, (uint64_t)); + MOCK_METHOD(uint64_t, max, ()); + MOCK_METHOD(uint64_t, count, (), (const)); +}; + } // namespace Upstream } // namespace Envoy From f6b86a58b264b46a57d71a9b3b0989b2969df408 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 1 Jul 2020 22:57:53 -0400 Subject: [PATCH 516/909] test: one less infinite timeout (#11864) Replacing an infinite timeout in load stats test with a "fast" fail of 5s. Risk Level: n/a Testing: test passes Ideally gets debug info for #11784 Signed-off-by: Alyssa Wilk --- .../load_stats_integration_test.cc | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 621cfed89441..ef6402e71403 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -238,10 +238,11 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } } - void + ABSL_MUST_USE_RESULT AssertionResult waitForLoadStatsRequest(const std::vector& expected_locality_stats, uint64_t dropped = 0) { + auto end_time = timeSystem().monotonicTime() + TestUtility::DefaultTimeout; Protobuf::RepeatedPtrField expected_cluster_stats; if (!expected_locality_stats.empty() || dropped != 0) { auto* cluster_stats = expected_cluster_stats.Add(); @@ -288,8 +289,13 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara "StreamLoadStats", apiVersion()), loadstats_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", loadstats_stream_->headers().getContentTypeValue()); + if (timeSystem().monotonicTime() >= end_time) { + return TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, + loadstats_request.cluster_stats(), true); + } } while (!TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, loadstats_request.cluster_stats(), true)); + return testing::AssertionSuccess(); } void waitForUpstreamResponse(uint32_t endpoint_index, uint32_t response_code = 200) { @@ -388,7 +394,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); // Simple 50%/50% split between dragon/winter localities. Also include an @@ -402,8 +408,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { } // Verify we do not get empty stats for non-zero priorities. - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -421,8 +427,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { // No locality for priority=1 since there's no "winter" endpoints. // The hosts for dragon were received because membership_total is accurate. - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 4, 0, 0, 4)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2), localityStats("dragon", 4, 0, 0, 4)})); EXPECT_EQ(2, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(3, test_server_->counter("load_reporter.responses")->value()); @@ -437,8 +443,8 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(i % 2 + 3); } - waitForLoadStatsRequest( - {localityStats("winter", 2, 0, 0, 2, 1), localityStats("dragon", 2, 0, 0, 2, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 2, 0, 0, 2, 1), localityStats("dragon", 2, 0, 0, 2, 1)})); EXPECT_EQ(3, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(4, test_server_->counter("load_reporter.responses")->value()); EXPECT_EQ(0, test_server_->counter("load_reporter.errors")->value()); @@ -452,7 +458,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); } - waitForLoadStatsRequest({localityStats("winter", 1, 0, 0, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 1, 0, 0, 1)})); EXPECT_EQ(4, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(5, test_server_->counter("load_reporter.responses")->value()); EXPECT_EQ(0, test_server_->counter("load_reporter.errors")->value()); @@ -465,7 +471,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); sendAndReceiveUpstream(1); - waitForLoadStatsRequest({localityStats("winter", 3, 0, 0, 3)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 3, 0, 0, 3)})); EXPECT_EQ(6, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(6, test_server_->counter("load_reporter.responses")->value()); @@ -479,7 +485,7 @@ TEST_P(LoadStatsIntegrationTest, Success) { sendAndReceiveUpstream(1); sendAndReceiveUpstream(1); - waitForLoadStatsRequest({localityStats("winter", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 2, 0, 0, 2)})); EXPECT_EQ(8, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(7, test_server_->counter("load_reporter.responses")->value()); @@ -496,7 +502,7 @@ TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); requestLoadStatsResponse({"cluster_0"}); @@ -514,8 +520,8 @@ TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { sendAndReceiveUpstream(0); // Verify we get the expect request distribution. - waitForLoadStatsRequest( - {localityStats("winter", 4, 0, 0, 4), localityStats("dragon", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("winter", 4, 0, 0, 4), localityStats("dragon", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -531,7 +537,7 @@ TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); // Simple 50%/50% split between dragon/winter localities. Also include an @@ -548,8 +554,8 @@ TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { // order of locality stats is different to the Success case, where winter is // the local locality (and hence first in the list as per // HostsPerLocality::get()). - waitForLoadStatsRequest( - {localityStats("dragon", 2, 0, 0, 2), localityStats("winter", 2, 0, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest( + {localityStats("dragon", 2, 0, 0, 2), localityStats("winter", 2, 0, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); // On slow machines, more than one load stats response may be pushed while we are simulating load. @@ -564,7 +570,7 @@ TEST_P(LoadStatsIntegrationTest, Error) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); requestLoadStatsResponse({"cluster_0"}); @@ -576,7 +582,7 @@ TEST_P(LoadStatsIntegrationTest, Error) { // This should count as "success" since non-5xx. sendAndReceiveUpstream(0, 404); - waitForLoadStatsRequest({localityStats("winter", 1, 1, 0, 2)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 1, 1, 0, 2)})); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(2, test_server_->counter("load_reporter.responses")->value()); @@ -590,13 +596,13 @@ TEST_P(LoadStatsIntegrationTest, InProgress) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); updateClusterLoadAssignment({{0}}, {}, {}, {}); requestLoadStatsResponse({"cluster_0"}); initiateClientConnection(); - waitForLoadStatsRequest({localityStats("winter", 0, 0, 1, 1)}); + ASSERT_TRUE(waitForLoadStatsRequest({localityStats("winter", 0, 0, 1, 1)})); waitForUpstreamResponse(0, 503); cleanupUpstreamAndDownstream(); @@ -618,7 +624,7 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { initialize(); waitForLoadStatsStream(); - waitForLoadStatsRequest({}); + ASSERT_TRUE(waitForLoadStatsRequest({})); loadstats_stream_->startGrpcStream(); updateClusterLoadAssignment({{0}}, {}, {}, {}); @@ -630,7 +636,7 @@ TEST_P(LoadStatsIntegrationTest, Dropped) { EXPECT_EQ("503", response_->headers().getStatusValue()); cleanupUpstreamAndDownstream(); - waitForLoadStatsRequest({}, 1); + ASSERT_TRUE(waitForLoadStatsRequest({}, 1)); EXPECT_EQ(1, test_server_->counter("load_reporter.requests")->value()); EXPECT_LE(2, test_server_->counter("load_reporter.responses")->value()); From 895c85af7e20e98065f83fb19c630ba39c2d6e77 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 2 Jul 2020 08:56:53 -0400 Subject: [PATCH 517/909] test: deflaking filter manager integration test (#11866) Risk Level: n/a (test only) Testing: passes Docs Changes: n/a Fixes #11865 Signed-off-by: Alyssa Wilk --- test/integration/filter_manager_integration_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index 9bed3be54fa0..c601a0f20702 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -468,12 +468,12 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(tcp_client->write("hello", false)); std::string access_log = absl::StrCat("NR ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr(access_log)); - tcp_client->close(); + tcp_client->waitForDisconnect(); } /** From 73fc620a34135a16070083f3c94b93d074f6e59f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Thu, 2 Jul 2020 12:08:54 -0400 Subject: [PATCH 518/909] router: add regex substitution to header hashing (#11819) Extend hashing via header to support regex substitution. This is useful when you need to hash on a transformation of a header value (e.g.: extract a part of `:path`). Fixes #11811 Signed-off-by: Raul Gutierrez Segales --- .../config/route/v3/route_components.proto | 4 ++++ .../route/v4alpha/route_components.proto | 4 ++++ docs/root/version_history/current.rst | 1 + .../config/route/v3/route_components.proto | 4 ++++ .../route/v4alpha/route_components.proto | 4 ++++ source/common/http/hash_policy.cc | 24 +++++++++++++++---- test/common/router/config_impl_test.cc | 21 ++++++++++++++++ 7 files changed, 58 insertions(+), 4 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index f0957363f988..4120babdf069 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -615,6 +615,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 6f7298d29d67..2bc1da848606 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -608,6 +608,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e56dd5a66e50..4fe27e834363 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -128,6 +128,7 @@ New Features * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. +* router: add regex substitution support for header based hashing. * router: add support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters `. * router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 211d3c63f768..b91cf10620ed 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -626,6 +626,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index ea664e5d62b6..9cdb879eed97 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -617,6 +617,10 @@ message RouteAction { string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: diff --git a/source/common/http/hash_policy.cc b/source/common/http/hash_policy.cc index bb7e4211c658..d00dbb99fed7 100644 --- a/source/common/http/hash_policy.cc +++ b/source/common/http/hash_policy.cc @@ -2,6 +2,8 @@ #include "envoy/config/route/v3/route_components.pb.h" +#include "common/common/matchers.h" +#include "common/common/regex.h" #include "common/http/utility.h" #include "absl/strings/str_cat.h" @@ -21,8 +23,15 @@ class HashMethodImplBase : public HashPolicyImpl::HashMethod { class HeaderHashMethod : public HashMethodImplBase { public: - HeaderHashMethod(const std::string& header_name, bool terminal) - : HashMethodImplBase(terminal), header_name_(header_name) {} + HeaderHashMethod(const envoy::config::route::v3::RouteAction::HashPolicy::Header& header, + bool terminal) + : HashMethodImplBase(terminal), header_name_(header.header_name()) { + if (header.has_regex_rewrite()) { + const auto& rewrite_spec = header.regex_rewrite(); + regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); + regex_rewrite_substitution_ = rewrite_spec.substitution(); + } + } absl::optional evaluate(const Network::Address::Instance*, const RequestHeaderMap& headers, @@ -32,13 +41,20 @@ class HeaderHashMethod : public HashMethodImplBase { const HeaderEntry* header = headers.get(header_name_); if (header) { - hash = HashUtil::xxHash64(header->value().getStringView()); + if (regex_rewrite_ != nullptr) { + hash = HashUtil::xxHash64(regex_rewrite_->replaceAll(header->value().getStringView(), + regex_rewrite_substitution_)); + } else { + hash = HashUtil::xxHash64(header->value().getStringView()); + } } return hash; } private: const LowerCaseString header_name_; + Regex::CompiledMatcherPtr regex_rewrite_{}; + std::string regex_rewrite_substitution_{}; }; class CookieHashMethod : public HashMethodImplBase { @@ -145,7 +161,7 @@ HashPolicyImpl::HashPolicyImpl( switch (hash_policy->policy_specifier_case()) { case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kHeader: hash_impls_.emplace_back( - new HeaderHashMethod(hash_policy->header().header_name(), hash_policy->terminal())); + new HeaderHashMethod(hash_policy->header(), hash_policy->terminal())); break; case envoy::config::route::v3::RouteAction::HashPolicy::PolicySpecifierCase::kCookie: { absl::optional ttl; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 392b7b9b79ee..f116e51bee79 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2205,6 +2205,27 @@ TEST_F(RouterMatcherHashPolicyTest, HashHeaders) { } } +TEST_F(RouterMatcherHashPolicyTest, HashHeadersRegexSubstitution) { + // Apply a regex substitution before hashing. + auto* header = firstRouteHashPolicy()->mutable_header(); + header->set_header_name(":path"); + auto* regex_spec = header->mutable_regex_rewrite(); + regex_spec->set_substitution("\\1"); + auto* pattern = regex_spec->mutable_pattern(); + pattern->mutable_google_re2(); + pattern->set_regex("^/(\\w+)$"); + { + Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/foo", "GET"); + Router::RouteConstSharedPtr route = config().route(headers, 0); + const auto foo_hash_value = 3728699739546630719; + EXPECT_EQ(route->routeEntry() + ->hashPolicy() + ->generateHash(nullptr, headers, add_cookie_nop_, nullptr) + .value(), + foo_hash_value); + } +} + class RouterMatcherCookieHashPolicyTest : public RouterMatcherHashPolicyTest { public: RouterMatcherCookieHashPolicyTest() { From 093e2ffe046313242144d0431f1bb5cf18d82544 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Thu, 2 Jul 2020 12:37:34 -0400 Subject: [PATCH 519/909] tcp_proxy: only allow tunneling if the cluster is configured for HTTP/2 (#11852) Signed-off-by: Snow Pettersen --- source/common/tcp_proxy/tcp_proxy.cc | 36 ++++++++++++++----- source/common/tcp_proxy/tcp_proxy.h | 1 + .../tcp_tunneling_integration_test.cc | 17 +++++++++ 3 files changed, 45 insertions(+), 9 deletions(-) diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index b5b489238790..92dd68e4be4a 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -410,6 +410,16 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { downstreamConnection()->streamInfo().filterState()); } + if (!maybeTunnel(cluster_name)) { + // Either cluster is unknown or there are no healthy hosts. tcpConnPoolForCluster() increments + // cluster->stats().upstream_cx_none_healthy in the latter case. + getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream); + onInitFailure(UpstreamFailureReason::NoHealthyUpstream); + } + return Network::FilterStatus::StopIteration; +} + +bool Filter::maybeTunnel(const std::string& cluster_name) { if (!config_->tunnelingConfig()) { Tcp::ConnectionPool::Instance* conn_pool = cluster_manager_.tcpConnPoolForCluster( cluster_name, Upstream::ResourcePriority::Default, this); @@ -428,11 +438,23 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { } // Because we never return open connections to the pool, this either has a handle waiting on // connection completion, or onPoolFailure has been invoked. Either way, stop iteration. - return Network::FilterStatus::StopIteration; + return true; } } else { + auto* cluster = cluster_manager_.get(cluster_name); + if (!cluster) { + return false; + } + // TODO(snowp): Ideally we should prevent this from being configured, but that's tricky to get + // right since whether a cluster is invalid depends on both the tcp_proxy config + cluster + // config. + if ((cluster->info()->features() & Upstream::ClusterInfo::Features::HTTP2) == 0) { + ENVOY_LOG(error, "Attempted to tunnel over HTTP/1.1, this is not supported. Set " + "http2_protocol_options on the cluster."); + return false; + } Http::ConnectionPool::Instance* conn_pool = cluster_manager_.httpConnPoolForCluster( - cluster_name, Upstream::ResourcePriority::Default, Http::Protocol::Http2, this); + cluster_name, Upstream::ResourcePriority::Default, absl::nullopt, this); if (conn_pool) { upstream_ = std::make_unique(*upstream_callbacks_, config_->tunnelingConfig()->hostname()); @@ -443,16 +465,12 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { ASSERT(upstream_handle_.get() == nullptr); upstream_handle_ = std::make_shared(cancellable); } - return Network::FilterStatus::StopIteration; + return true; } } - // Either cluster is unknown or there are no healthy hosts. tcpConnPoolForCluster() increments - // cluster->stats().upstream_cx_none_healthy in the latter case. - getStreamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream); - onInitFailure(UpstreamFailureReason::NoHealthyUpstream); - return Network::FilterStatus::StopIteration; -} + return false; +} void Filter::onPoolFailure(ConnectionPool::PoolFailureReason reason, Upstream::HostDescriptionConstSharedPtr host) { upstream_handle_.reset(); diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 950f8f654dbe..cbdddc080ec4 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -353,6 +353,7 @@ class Filter : public Network::ReadFilter, void initialize(Network::ReadFilterCallbacks& callbacks, bool set_connection_stats); Network::FilterStatus initializeUpstreamConnection(); + bool maybeTunnel(const std::string& cluster_name); void onConnectTimeout(); void onDownstreamEvent(Network::ConnectionEvent event); void onUpstreamData(Buffer::Instance& data, bool end_stream); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index d621bf9aa9f6..01931f131ebd 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -315,6 +315,23 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { upstream_request_->encodeData(0, true); } +// Validates that if the cluster is not configured with HTTP/2 we don't attempt +// to tunnel the data. +TEST_P(TcpTunnelingIntegrationTest, InvalidCluster) { + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + bootstrap.mutable_static_resources() + ->mutable_clusters() + ->Mutable(0) + ->clear_http2_protocol_options(); + }); + initialize(); + + // Start a connection and see it close immediately due to the invalid cluster. + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->waitForHalfClose(); + tcp_client->close(); +} + TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { initialize(); From 2499ba4e9d9a894e9354467a18beea8167b96437 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Mon, 6 Jul 2020 19:57:58 +0700 Subject: [PATCH 520/909] ext_authz: Add test for headers_to_add and headers_to_append (#11617) This patch adds integration tests for the case when the authorization server sends back `headers_to_add` and `headers_to_append`. Signed-off-by: Dhi Aurrahman --- .../filters/http/ext_authz/ext_authz.cc | 23 ++- .../ext_authz/ext_authz_integration_test.cc | 177 +++++++++++++++++- 2 files changed, 186 insertions(+), 14 deletions(-) diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 8159b2b9e196..7e8899e3d972 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -49,7 +49,7 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers, context_extensions = maybe_merged_per_route_config.value().takeContextExtensions(); } - // If metadata_context_namespaces is specified, pass matching metadata to the ext_authz service + // If metadata_context_namespaces is specified, pass matching metadata to the ext_authz service. envoy::config::core::v3::Metadata metadata_context; const auto& request_metadata = callbacks_->streamInfo().dynamicMetadata().filter_metadata(); for (const auto& context_key : config_->metadataContextNamespaces()) { @@ -180,8 +180,19 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { } for (const auto& header : response->headers_to_append) { const Http::HeaderEntry* header_to_modify = request_headers_->get(header.first); - if (header_to_modify) { + // TODO(dio): Add a flag to allow appending non-existent headers, without setting it first + // (via `headers_to_add`). For example, given: + // 1. Original headers {"original": "true"} + // 2. Response headers from the authorization servers {{"append": "1"}, {"append": "2"}} + // + // Currently it is not possible to add {{"append": "1"}, {"append": "2"}} (the intended + // combined headers: {{"original": "true"}, {"append": "1"}, {"append": "2"}}) to the request + // to upstream server by only sets `headers_to_append`. + if (header_to_modify != nullptr) { ENVOY_STREAM_LOG(trace, "'{}':'{}'", *callbacks_, header.first.get(), header.second); + // The current behavior of appending is by combining entries with the same key, into one + // entry. The value of that combined entry is separated by ",". + // TODO(dio): Consider to use addCopy instead. request_headers_->appendCopy(header.first, header.second); } } @@ -220,13 +231,13 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { &callbacks = *callbacks_](Http::HeaderMap& response_headers) -> void { ENVOY_STREAM_LOG(trace, "ext_authz filter added header(s) to the local response:", callbacks); - // First remove all headers requested by the ext_authz filter, - // to ensure that they will override existing headers + // Firstly, remove all headers requested by the ext_authz filter, to ensure that they will + // override existing headers. for (const auto& header : headers) { response_headers.remove(header.first); } - // Then set all of the requested headers, allowing the - // same header to be set multiple times, e.g. `Set-Cookie` + // Then set all of the requested headers, allowing the same header to be set multiple + // times, e.g. `Set-Cookie`. for (const auto& header : headers) { ENVOY_STREAM_LOG(trace, " '{}':'{}'", callbacks, header.first.get(), header.second); response_headers.addCopy(header.first, header.second); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 83787a393f85..fe37da4eaa30 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -15,9 +15,14 @@ #include "gtest/gtest.h" using testing::AssertionResult; +using testing::Not; +using testing::TestWithParam; +using testing::ValuesIn; namespace Envoy { +using Headers = std::vector>; + class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, public HttpIntegrationTest { public: @@ -63,11 +68,27 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP } } - void initiateClientConnection(uint64_t request_body_length) { + void initiateClientConnection(uint64_t request_body_length, + const Headers& headers_to_add = Headers{}, + const Headers& headers_to_append = Headers{}) { auto conn = makeClientConnection(lookupPort("http")); codec_client_ = makeHttpConnection(std::move(conn)); Http::TestRequestHeaderMapImpl headers{ {":method", "POST"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "host"}}; + + // Initialize headers to append. If the authorization server returns any matching keys with one + // of value in headers_to_add, the header entry from authorization server replaces the one in + // headers_to_add. + for (const auto& header_to_add : headers_to_add) { + headers.addCopy(header_to_add.first, header_to_add.second); + } + + // Initialize headers to append. If the authorization server returns any matching keys with one + // of value in headers_to_append, it will be appended. + for (const auto& headers_to_append : headers_to_append) { + headers.addCopy(headers_to_append.first, headers_to_append.second); + } + TestUtility::feedBufferWithRandomCharacters(request_body_, request_body_length); response_ = codec_client_->makeRequestWithBody(headers, request_body_.toString()); } @@ -113,7 +134,13 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP RELEASE_ASSERT(result, result.message()); } - void waitForSuccessfulUpstreamResponse(const std::string& expected_response_code) { + void waitForSuccessfulUpstreamResponse( + const std::string& expected_response_code, const Headers& headers_to_add = Headers{}, + const Headers& headers_to_append = Headers{}, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream = + Http::TestRequestHeaderMapImpl{}, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple = + Http::TestRequestHeaderMapImpl{}) { AssertionResult result = fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); RELEASE_ASSERT(result, result.message()); @@ -124,6 +151,52 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); upstream_request_->encodeData(response_size_, true); + + for (const auto& header_to_add : headers_to_add) { + EXPECT_THAT(upstream_request_->headers(), + Http::HeaderValueOf(header_to_add.first, header_to_add.second)); + // For headers_to_add (with append = false), the original request headers have no "-replaced" + // suffix, but the ones from the authorization server have it. + EXPECT_TRUE(absl::EndsWith(header_to_add.second, "-replaced")); + } + + for (const auto& header_to_append : headers_to_append) { + // The current behavior of appending is using the "appendCopy", which ALWAYS combines entries + // with the same key into one key, and the values are separated by "," (regardless it is an + // inline-header or not). In addition to that, it only applies to the existing headers (the + // header is existed in the original request headers). + EXPECT_THAT( + upstream_request_->headers(), + Http::HeaderValueOf( + header_to_append.first, + // In this test, the keys and values of the original request headers have the same + // string value. Hence for "header2" key, the value is "header2,header2-appended". + absl::StrCat(header_to_append.first, ",", header_to_append.second))); + const auto value = upstream_request_->headers() + .get(Http::LowerCaseString(header_to_append.first)) + ->value() + .getStringView(); + EXPECT_TRUE(absl::EndsWith(value, "-appended")); + const auto values = StringUtil::splitToken(value, ","); + EXPECT_EQ(2, values.size()); + } + + if (!new_headers_from_upstream.empty()) { + // new_headers_from_upstream has append = true. The current implementation ignores to set + // multiple headers that are not present in the original request headers. In order to add + // headers with the same key multiple times, setting response headers with append = false and + // append = true is required. + EXPECT_THAT(new_headers_from_upstream, + Not(Http::IsSubsetOfHeaders(upstream_request_->headers()))); + } + + if (!headers_to_append_multiple.empty()) { + // headers_to_append_multiple has append = false for the first entry of multiple entries, and + // append = true for the rest entries. + EXPECT_THAT(upstream_request_->headers(), + Http::HeaderValueOf("multiple", "multiple-first,multiple-second")); + } + response_->waitForEndStream(); EXPECT_TRUE(upstream_request_->complete()); @@ -134,10 +207,61 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP EXPECT_EQ(response_size_, response_->body().size()); } - void sendExtAuthzResponse() { + void sendExtAuthzResponse(const Headers& headers_to_add, const Headers& headers_to_append, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { ext_authz_request_->startGrpcStream(); envoy::service::auth::v3::CheckResponse check_response; check_response.mutable_status()->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); + + for (const auto& header_to_add : headers_to_add) { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + entry->mutable_append()->set_value(false); + entry->mutable_header()->set_key(header_to_add.first); + entry->mutable_header()->set_value(header_to_add.second); + } + + for (const auto& header_to_append : headers_to_append) { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); + entry->mutable_append()->set_value(true); + entry->mutable_header()->set_key(header_to_append.first); + entry->mutable_header()->set_value(header_to_append.second); + } + + // Entries in this headers are not present in the original request headers. + new_headers_from_upstream.iterate( + [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { + auto* entry = static_cast(context) + ->mutable_ok_response() + ->mutable_headers() + ->Add(); + // Try to append to a non-existent field. + entry->mutable_append()->set_value(true); + entry->mutable_header()->set_key(std::string(h.key().getStringView())); + entry->mutable_header()->set_value(std::string(h.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }, + &check_response); + + // Entries in this headers are not present in the original request headers. But we set append = + // true and append = false. + headers_to_append_multiple.iterate( + [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { + auto* entry = static_cast(context) + ->mutable_ok_response() + ->mutable_headers() + ->Add(); + const auto key = std::string(h.key().getStringView()); + const auto value = std::string(h.value().getStringView()); + + // This scenario makes sure we have set the headers to be appended later. + entry->mutable_append()->set_value(!absl::EndsWith(value, "-first")); + entry->mutable_header()->set_key(key); + entry->mutable_header()->set_value(value); + return Http::HeaderMap::Iterate::Continue; + }, + &check_response); + ext_authz_request_->sendGrpcMessage(check_response); ext_authz_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -183,13 +307,37 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP void expectCheckRequestWithBody(Http::CodecClient::Type downstream_protocol, uint64_t request_size) { + expectCheckRequestWithBodyWithHeaders(downstream_protocol, request_size, Headers{}, Headers{}, + Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + } + + void expectCheckRequestWithBodyWithHeaders( + Http::CodecClient::Type downstream_protocol, uint64_t request_size, + const Headers& headers_to_add, const Headers& headers_to_append, + const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, + const Http::TestRequestHeaderMapImpl& headers_to_append_multiple) { initializeConfig(); setDownstreamProtocol(downstream_protocol); HttpIntegrationTest::initialize(); - initiateClientConnection(request_size); + initiateClientConnection(request_size, headers_to_add, headers_to_append); waitForExtAuthzRequest(expectedCheckRequest(downstream_protocol)); - sendExtAuthzResponse(); - waitForSuccessfulUpstreamResponse("200"); + + Headers updated_headers_to_add; + for (auto& header_to_add : headers_to_add) { + updated_headers_to_add.push_back( + std::make_pair(header_to_add.first, header_to_add.second + "-replaced")); + } + Headers updated_headers_to_append; + for (const auto& header_to_append : headers_to_append) { + updated_headers_to_append.push_back( + std::make_pair(header_to_append.first, header_to_append.second + "-appended")); + } + sendExtAuthzResponse(updated_headers_to_add, updated_headers_to_append, + new_headers_from_upstream, headers_to_append_multiple); + + waitForSuccessfulUpstreamResponse("200", updated_headers_to_add, updated_headers_to_append, + new_headers_from_upstream, headers_to_append_multiple); cleanup(); } @@ -221,7 +369,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP }; class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, - public testing::TestWithParam { + public TestWithParam { public: ExtAuthzHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} @@ -401,12 +549,25 @@ TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) { expectCheckRequestWithBody(Http::CodecClient::Type::HTTP2, 2048); } +// Verifies that the original request headers will be added and appended when the authorization +// server returns headers_to_add and headers_to_append in OkResponse message. +TEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) { + expectCheckRequestWithBodyWithHeaders( + Http::CodecClient::Type::HTTP1, 4, + /*headers_to_add=*/Headers{{"header1", "header1"}}, + /*headers_to_append=*/Headers{{"header2", "header2"}}, + /*new_headers_from_upstream=*/Http::TestRequestHeaderMapImpl{{"new1", "new1"}}, + /*headers_to_append_multiple=*/ + Http::TestRequestHeaderMapImpl{{"multiple", "multiple-first"}, + {"multiple", "multiple-second"}}); +} + TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { expectFilterDisableCheck(false, "200"); } TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { expectFilterDisableCheck(true, "403"); } INSTANTIATE_TEST_SUITE_P(IpVersions, ExtAuthzHttpIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); // Verifies that by default HTTP service uses the case-sensitive string matcher. From f483845b2d383d47c4c8854319a626295d5d4554 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Mon, 6 Jul 2020 10:04:40 -0400 Subject: [PATCH 521/909] Windows: untag passing tests (#11741) Risk Level: Low Testing: Enables tests to run on Windows in CI Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- test/common/network/BUILD | 1 - test/common/router/BUILD | 1 - test/common/upstream/BUILD | 2 -- test/extensions/common/aws/BUILD | 1 - test/extensions/filters/http/cors/BUILD | 1 - test/extensions/filters/http/router/BUILD | 1 - test/extensions/filters/network/rbac/BUILD | 1 - test/extensions/transport_sockets/alts/BUILD | 1 - test/integration/BUILD | 9 +-------- test/server/BUILD | 1 + 10 files changed, 2 insertions(+), 17 deletions(-) diff --git a/test/common/network/BUILD b/test/common/network/BUILD index fd12d7478946..7bfb1d0cbc0a 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -280,7 +280,6 @@ envoy_cc_test( envoy_cc_test( name = "addr_family_aware_socket_option_impl_test", srcs = ["addr_family_aware_socket_option_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ ":socket_option_test", "//source/common/network:addr_family_aware_socket_option_lib", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 7d88ae53b96e..2db462fb8e03 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -289,7 +289,6 @@ envoy_cc_test( name = "router_upstream_log_test", srcs = ["router_upstream_log_test.cc"], external_deps = ["abseil_optional"], - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/network:utility_lib", diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index fc6bc5d966ef..1679ac8eb89e 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -133,7 +133,6 @@ envoy_benchmark_test( envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", @@ -399,7 +398,6 @@ envoy_benchmark_test( name = "load_balancer_benchmark_test", timeout = "long", benchmark_binary = "load_balancer_benchmark", - tags = ["fails_on_windows"], ) envoy_cc_test( diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index eae532ee27f9..2a177200f6e7 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -76,7 +76,6 @@ envoy_cc_test( srcs = [ "aws_metadata_fetcher_integration_test.cc", ], - tags = ["fails_on_windows"], deps = [ "//source/common/common:fmt_lib", "//source/extensions/common/aws:utility_lib", diff --git a/test/extensions/filters/http/cors/BUILD b/test/extensions/filters/http/cors/BUILD index a91934cb1249..43a70cd13f01 100644 --- a/test/extensions/filters/http/cors/BUILD +++ b/test/extensions/filters/http/cors/BUILD @@ -30,7 +30,6 @@ envoy_extension_cc_test( name = "cors_filter_integration_test", srcs = ["cors_filter_integration_test.cc"], extension_name = "envoy.filters.http.cors", - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index 46aaecbb7ae2..5e0dca9e8224 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -30,7 +30,6 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.router", - tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index 8d4d479cefb0..440b07896711 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -41,7 +41,6 @@ envoy_extension_cc_test( name = "integration_test", srcs = ["integration_test.cc"], extension_name = "envoy.filters.network.rbac", - tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/echo:config", "//source/extensions/filters/network/rbac:config", diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index 386c25ace615..d2e0f2998f07 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -78,7 +78,6 @@ envoy_extension_cc_test( "grpc_alts_handshaker_proto", "grpc_alts_transport_security_common_proto", ], - tags = ["fails_on_windows"], deps = [ "//source/common/common:utility_lib", "//source/common/event:dispatcher_includes", diff --git a/test/integration/BUILD b/test/integration/BUILD index fe34c7575262..0dc69ee4c328 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -186,7 +186,6 @@ envoy_cc_test( envoy_cc_test( name = "cluster_filter_integration_test", srcs = ["cluster_filter_integration_test.cc"], - tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/network:filter_interface", @@ -200,7 +199,6 @@ envoy_cc_test( envoy_cc_test( name = "custom_cluster_integration_test", srcs = ["custom_cluster_integration_test.cc"], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/upstream:load_balancer_lib", @@ -376,7 +374,6 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/transport_sockets/tls:context_lib", @@ -393,7 +390,6 @@ envoy_cc_test( srcs = [ "header_casing_integration_test.cc", ], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -748,7 +744,6 @@ envoy_cc_test( # The symbol table cluster memory tests take a while to run specially under tsan. # Shard it to avoid test timeout. shard_count = 2, - tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/memory:stats_lib", @@ -947,7 +942,6 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], - tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", @@ -994,7 +988,6 @@ envoy_cc_test( srcs = [ "tcp_conn_pool_integration_test.cc", ], - tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/server:filter_config_interface", @@ -1046,6 +1039,7 @@ envoy_cc_test( name = "dynamic_validation_integration_test", srcs = ["dynamic_validation_integration_test.cc"], data = ["//test/config/integration:server_xds_files"], + # Fails on windows with cr/lf yaml file checkouts tags = ["fails_on_windows"], deps = [ ":http_integration_lib", @@ -1088,7 +1082,6 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], - tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", diff --git a/test/server/BUILD b/test/server/BUILD index 7706a8a85baf..09446f8f0c30 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -343,6 +343,7 @@ envoy_cc_test( ":server_test_data", ":static_validation_test_data", ], + # Fails on windows with cr/lf yaml file checkouts tags = ["fails_on_windows"], deps = [ "//source/common/common:version_lib", From e73738f16566c553447c94d96279eeb5441215ca Mon Sep 17 00:00:00 2001 From: ankatare Date: Mon, 6 Jul 2020 21:12:55 +0530 Subject: [PATCH 522/909] V2 to v3 fragment (#11213) Risk Level:Low Testing: integration and format testing Part of #10843 Signed-off-by: Abhay Narayan Katare --- source/common/protobuf/utility.h | 5 ++- test/common/tcp_proxy/tcp_proxy_test.cc | 28 ++++++------ .../upstream/cluster_manager_impl_test.cc | 19 ++++---- .../upstream/health_checker_impl_test.cc | 44 +++++++++---------- test/common/upstream/utility.h | 8 ++++ .../http_connection_manager/config_test.cc | 40 ++++++++--------- .../network/redis_proxy/config_test.cc | 4 +- .../health_checkers/redis/config_test.cc | 28 ++++++------ .../health_checkers/redis/redis_test.cc | 14 +++--- test/test_common/utility.h | 13 +++--- 10 files changed, 109 insertions(+), 94 deletions(-) diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 38c8560eb057..a605dbc684c6 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -262,8 +262,9 @@ class MessageUtil { template static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, - ProtobufMessage::ValidationVisitor& validation_visitor) { - loadFromYaml(yaml, message, validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + bool avoid_boosting = false) { + loadFromYaml(yaml, message, validation_visitor, !avoid_boosting); validate(message, validation_visitor); } diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 91a8b897c7ca..d8673ad8b1d2 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -53,9 +53,10 @@ using ::Envoy::Network::UpstreamServerName; namespace { Config constructConfigFromYaml(const std::string& yaml, - Server::Configuration::FactoryContext& context) { + Server::Configuration::FactoryContext& context, + bool avoid_boosting = true) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy); + TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); return Config(tcp_proxy, context); } @@ -122,7 +123,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(BadConfig)) { )EOF"; NiceMock factory_context; - EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context), EnvoyException); + EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context, false), EnvoyException); } TEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) { @@ -134,7 +135,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) { )EOF"; NiceMock factory_context_; - EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_), EnvoyException); + EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_, false), EnvoyException); } TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { @@ -185,7 +186,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { )EOF"; NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_)); + Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); { // hit route with destination_ip (10.10.10.10/32) @@ -364,7 +365,7 @@ TEST(ConfigTest, DEPRECATED_FEATURE_TEST(RouteWithTopLevelMetadataMatchConfig)) )EOF"; NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_)); + Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -1742,7 +1743,7 @@ class TcpProxyRoutingTest : public testing::Test { public: TcpProxyRoutingTest() = default; - void setup() { + void setup(bool avoid_boosting = true) { const std::string yaml = R"EOF( stat_prefix: name cluster: fallback_cluster @@ -1752,7 +1753,8 @@ class TcpProxyRoutingTest : public testing::Test { cluster: fake_cluster )EOF"; - config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); + config_ = + std::make_shared(constructConfigFromYaml(yaml, factory_context_, avoid_boosting)); } void initializeFilter() { @@ -1772,7 +1774,7 @@ class TcpProxyRoutingTest : public testing::Test { }; TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) { - setup(); + setup(false); const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); @@ -1793,7 +1795,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) { } TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) { - setup(); + setup(false); const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); @@ -1815,7 +1817,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) { // Test that the tcp proxy uses the cluster from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionCluster)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( @@ -1832,7 +1834,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionC // Test that the tcp proxy forwards the requested server name from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( @@ -1862,7 +1864,7 @@ TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { // Test that the tcp proxy override ALPN from FilterState if set TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(ApplicationProtocols)) { - setup(); + setup(false); initializeFilter(); connection_.streamInfo().filterState()->setData( diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index b997cf90f4a2..386031b45516 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -20,9 +20,10 @@ namespace Envoy { namespace Upstream { namespace { -envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml) { +envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromYaml(yaml, bootstrap, true); + TestUtility::loadFromYaml(yaml, bootstrap, true, avoid_boosting); return bootstrap; } @@ -200,7 +201,7 @@ TEST_F(ClusterManagerImplTest, MultipleProtocolCluster) { checkConfigDump(R"EOF( static_clusters: - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: http12_cluster connect_timeout: 0.250s lb_policy: ROUND_ROBIN @@ -442,7 +443,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction2) { port_value: 11001 )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB is not " "valid for Cluster type STATIC. " "'ORIGINAL_DST_LB' is allowed only with cluster type 'ORIGINAL_DST'"); @@ -559,7 +560,7 @@ TEST_F(ClusterManagerImplTest, SubsetLoadBalancerOriginalDstRestriction) { - keys: [ "x" ] )EOF"; - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB cannot be " "combined with lb_subset_config"); } @@ -1226,8 +1227,8 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { connect_timeout: 0.25s hosts: - socket_address: - address: "127.0.0.1" - port_value: 11001 + address: "127.0.0.1" + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1259,8 +1260,8 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { connect_timeout: 0.25s hosts: - socket_address: - address: "127.0.0.1" - port_value: 11002 + address: "127.0.0.1" + port_value: 11002 last_updated: seconds: 1234567891 nanos: 234000000 diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 5070c903e184..446fcce9a8f9 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -139,9 +139,9 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest std::unordered_map; - void allocHealthChecker(const std::string& yaml) { + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -2569,9 +2569,9 @@ class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { class ProdHttpHealthCheckerTest : public testing::Test, public HealthCheckerTestBase { public: - void allocHealthChecker(const std::string& yaml) { + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -2646,7 +2646,7 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { use_http2: false )EOF"; - allocHealthChecker(yaml); + allocHealthChecker(yaml, false); addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP1, health_checker_->codecClientType()); } @@ -2666,7 +2666,7 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { use_http2: true )EOF"; - allocHealthChecker(yaml); + allocHealthChecker(yaml, false); addCompletionCallback(); EXPECT_EQ(Http::CodecClient::Type::HTTP2, health_checker_->codecClientType()); } @@ -2755,7 +2755,7 @@ TEST(HttpStatusChecker, Default) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_TRUE(http_status_checker.inRange(200)); EXPECT_FALSE(http_status_checker.inRange(204)); @@ -2777,7 +2777,7 @@ TEST(HttpStatusChecker, Single100) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2802,7 +2802,7 @@ TEST(HttpStatusChecker, Single599) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2829,7 +2829,7 @@ TEST(HttpStatusChecker, Ranges_204_304) { )EOF"; HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200); + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200); EXPECT_FALSE(http_status_checker.inRange(200)); @@ -2858,7 +2858,7 @@ TEST(HttpStatusChecker, Below100) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start >= 100, but found start=99"); } @@ -2879,7 +2879,7 @@ TEST(HttpStatusChecker, Above599) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting end <= 600, but found end=601"); } @@ -2900,7 +2900,7 @@ TEST(HttpStatusChecker, InvalidRange) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start < end, but found start=200 and end=200"); } @@ -2922,7 +2922,7 @@ TEST(HttpStatusChecker, InvalidRange2) { EXPECT_THROW_WITH_MESSAGE( HttpHealthCheckerImpl::HttpStatusChecker http_status_checker( - parseHealthCheckFromV2Yaml(yaml).http_health_check().expected_statuses(), 200), + parseHealthCheckFromV3Yaml(yaml).http_health_check().expected_statuses(), 200), EnvoyException, "Invalid http status range: expecting start < end, but found start=201 and end=200"); } @@ -2988,9 +2988,9 @@ TEST(TcpHealthCheckMatcher, match) { class TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { public: - void allocHealthChecker(const std::string& yaml) { + void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV2Yaml(yaml), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -5009,7 +5009,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -5025,7 +5025,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -5041,7 +5041,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -5057,7 +5057,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value must be greater than.*"); } { @@ -5071,7 +5071,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value is required.*"); } { @@ -5085,7 +5085,7 @@ TEST(HealthCheckProto, Validation) { path: /healthcheck )EOF"; envoy::config::core::v3::HealthCheck health_check_proto; - EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV2Yaml(yaml)), EnvoyException, + EXPECT_THROW_WITH_REGEX(TestUtility::validate(parseHealthCheckFromV3Yaml(yaml)), EnvoyException, "Proto constraint validation failed.*value is required.*"); } } diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index da5d7df44a56..aba493bc22df 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -123,6 +123,14 @@ makeLocalityWeights(std::initializer_list locality_weights) { return std::make_shared(locality_weights); } +inline envoy::config::core::v3::HealthCheck +parseHealthCheckFromV3Yaml(const std::string& yaml_string, bool avoid_boosting = true) { + envoy::config::core::v3::HealthCheck health_check; + TestUtility::loadFromYamlAndValidate(yaml_string, health_check, false, avoid_boosting); + return health_check; +} + +// For DEPRECATED TEST CASES inline envoy::config::core::v3::HealthCheck parseHealthCheckFromV2Yaml(const std::string& yaml_string) { envoy::config::core::v3::HealthCheck health_check; diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 6d745c2d8b5d..b53042d80115 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -40,10 +40,10 @@ namespace NetworkFilters { namespace HttpConnectionManager { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager -parseHttpConnectionManagerFromV2Yaml(const std::string& yaml) { +parseHttpConnectionManagerFromV2Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager http_connection_manager; - TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager); + TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager, false, avoid_boosting); return http_connection_manager; } @@ -172,8 +172,8 @@ stat_prefix: router - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(128, config.tracingConfig()->max_path_tag_length_); @@ -379,8 +379,8 @@ stat_prefix: router EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(inlined_tracing_config)))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. @@ -430,8 +430,8 @@ stat_prefix: router request_headers_for_tags: - foo )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; @@ -462,8 +462,8 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_); } @@ -488,8 +488,8 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_); } @@ -507,8 +507,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(100, config.tracingConfig()->client_sampling_.numerator()); @@ -542,8 +542,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(1, config.tracingConfig()->client_sampling_.numerator()); @@ -576,8 +576,8 @@ TEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(0, config.tracingConfig()->client_sampling_.numerator()); @@ -688,8 +688,8 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); } diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 55b8c50193da..40420f298ddf 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -85,7 +85,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config, true); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); @@ -114,7 +114,7 @@ stat_prefix: foo )EOF"; envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config, true); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); NiceMock context; RedisProxyFilterConfigFactory factory; Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index 0e01e8e9a156..cf84215a5173 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -36,11 +36,11 @@ TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisDeprecated)) { NiceMock context; RedisHealthCheckerFactory factory; - EXPECT_NE( - nullptr, - dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) - .get())); + EXPECT_NE(nullptr, dynamic_cast( + factory + .createCustomHealthChecker( + Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) + .get())); } TEST(HealthCheckerFactoryTest, CreateRedis) { @@ -64,7 +64,7 @@ TEST(HealthCheckerFactoryTest, CreateRedis) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -84,11 +84,11 @@ TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisWithoutKeyDepr NiceMock context; RedisHealthCheckerFactory factory; - EXPECT_NE( - nullptr, - dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) - .get())); + EXPECT_NE(nullptr, dynamic_cast( + factory + .createCustomHealthChecker( + Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) + .get())); } TEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) { @@ -111,7 +111,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -136,7 +136,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisWithLogHCFailure) { EXPECT_NE( nullptr, dynamic_cast( - factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV2Yaml(yaml), context) + factory.createCustomHealthChecker(Upstream::parseHealthCheckFromV3Yaml(yaml), context) .get())); } @@ -165,7 +165,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { EXPECT_NE(nullptr, dynamic_cast( Upstream::HealthCheckerFactory::create( - Upstream::parseHealthCheckFromV2Yaml(yaml), cluster, runtime, random, + Upstream::parseHealthCheckFromV3Yaml(yaml), cluster, runtime, random, dispatcher, log_manager, ProtobufMessage::getStrictValidationVisitor(), api) .get())); } diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 7b1b850f0033..3e3386b1bbe0 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -49,7 +49,7 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -73,7 +73,7 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -97,7 +97,7 @@ class RedisHealthCheckerTest key: foo )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -106,7 +106,7 @@ class RedisHealthCheckerTest Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } - void setupExistsHealthcheckDeprecated() { + void setupExistsHealthcheckDeprecated(bool avoid_boosting = true) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -120,7 +120,7 @@ class RedisHealthCheckerTest key: foo )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml, avoid_boosting); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -144,7 +144,7 @@ class RedisHealthCheckerTest "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis )EOF"; - const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml); const auto& redis_config = getRedisHealthCheckConfig( health_check_config, ProtobufMessage::getStrictValidationVisitor()); @@ -398,7 +398,7 @@ TEST_F(RedisHealthCheckerTest, LogInitialFailure) { TEST_F(RedisHealthCheckerTest, DEPRECATED_FEATURE_TEST(ExistsDeprecated)) { InSequence s; - setupExistsHealthcheckDeprecated(); + setupExistsHealthcheckDeprecated(false); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e85776058e24..e95302726494 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -550,8 +550,10 @@ class TestUtility { } static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - bool preserve_original_type = false) { - MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor()); + bool preserve_original_type = false, bool avoid_boosting = false) { + MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor(), + !avoid_boosting); + if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } @@ -572,9 +574,10 @@ class TestUtility { template static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, - bool preserve_original_type = false) { - MessageUtil::loadFromYamlAndValidate(yaml, message, - ProtobufMessage::getStrictValidationVisitor()); + bool preserve_original_type = false, + bool avoid_boosting = false) { + MessageUtil::loadFromYamlAndValidate( + yaml, message, ProtobufMessage::getStrictValidationVisitor(), avoid_boosting); if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } From 91592cd32910676c022bbeb0cdb8bec2b3bf2e14 Mon Sep 17 00:00:00 2001 From: Yuval Kohavi Date: Mon, 6 Jul 2020 11:57:20 -0400 Subject: [PATCH 523/909] fix building tests with gcc (#11827) Add mocks to MockGrpcMuxWatch. follow-up to #11300. Signed-off-by: Yuval Kohavi --- test/mocks/config/mocks.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 93dda3bd2574..92db2b117e6d 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -96,8 +96,11 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD(void, start, ()); MOCK_METHOD(void, pause, (const std::string& type_url)); + MOCK_METHOD(void, pause, (const std::vector type_urls)); MOCK_METHOD(void, resume, (const std::string& type_url)); + MOCK_METHOD(void, resume, (const std::vector type_urls)); MOCK_METHOD(bool, paused, (const std::string& type_url), (const)); + MOCK_METHOD(bool, paused, (const std::vector type_urls), (const)); MOCK_METHOD(void, addSubscription, (const std::set& resources, const std::string& type_url, From 4ab8abdd72e9d5058be4410e60bd3ccbcff14dd5 Mon Sep 17 00:00:00 2001 From: antonio Date: Mon, 6 Jul 2020 12:31:07 -0400 Subject: [PATCH 524/909] test: Convert compressor_filter_speed_test and thread_local_store_speed_test to benchmark cc binary and test framework (#11856) Signed-off-by: Antonio Vicente --- test/common/stats/BUILD | 9 +++++++- .../stats/thread_local_store_speed_test.cc | 23 ++++++++----------- .../filters/http/common/compressor/BUILD | 10 ++++++-- .../compressor_filter_speed_test.cc | 2 -- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index fb5db97ba91e..863dfe71a841 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -1,5 +1,7 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", "envoy_cc_test_binary", @@ -236,7 +238,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "thread_local_store_speed_test", srcs = ["thread_local_store_speed_test.cc"], external_deps = [ @@ -256,6 +258,11 @@ envoy_cc_test_binary( ], ) +envoy_benchmark_test( + name = "thread_local_store_speed_test_benchmark_test", + benchmark_binary = "thread_local_store_speed_test", +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], diff --git a/test/common/stats/thread_local_store_speed_test.cc b/test/common/stats/thread_local_store_speed_test.cc index 8ad68fd7ba0b..e2e86e0fb603 100644 --- a/test/common/stats/thread_local_store_speed_test.cc +++ b/test/common/stats/thread_local_store_speed_test.cc @@ -40,6 +40,10 @@ class ThreadLocalStorePerf { store_.shutdownThreading(); if (tls_) { tls_->shutdownGlobalThreading(); + tls_->shutdownThread(); + } + if (dispatcher_) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } } @@ -50,8 +54,12 @@ class ThreadLocalStorePerf { } void initThreading() { + if (!Envoy::Event::Libevent::Global::initialized()) { + Envoy::Event::Libevent::Global::initialize(); + } dispatcher_ = api_->allocateDispatcher("test_thread"); tls_ = std::make_unique(); + tls_->registerThread(*dispatcher_, true); store_.initializeThreading(*dispatcher_, *tls_); } @@ -59,10 +67,10 @@ class ThreadLocalStorePerf { Stats::SymbolTablePtr symbol_table_; Event::SimulatedTimeSystem time_system_; Stats::AllocatorImpl heap_alloc_; - Stats::ThreadLocalStoreImpl store_; - Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; std::unique_ptr tls_; + Stats::ThreadLocalStoreImpl store_; + Api::ApiPtr api_; envoy::config::metrics::v3::StatsConfig stats_config_; std::vector> stat_names_; }; @@ -95,14 +103,3 @@ BENCHMARK(BM_StatsWithTls); // TODO(jmarantz): add multi-threaded variant of this test, that aggressively // looks up stats in multiple threads to try to trigger contention issues. - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - benchmark::Initialize(&argc, argv); - - Envoy::Event::Libevent::Global::initialize(); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} diff --git a/test/extensions/filters/http/common/compressor/BUILD b/test/extensions/filters/http/common/compressor/BUILD index a29d919d0be6..a6b214dd6b50 100644 --- a/test/extensions/filters/http/common/compressor/BUILD +++ b/test/extensions/filters/http/common/compressor/BUILD @@ -1,7 +1,8 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_benchmark_test", + "envoy_cc_benchmark_binary", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_package", ) @@ -25,7 +26,7 @@ envoy_cc_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "compressor_filter_speed_test", srcs = ["compressor_filter_speed_test.cc"], external_deps = [ @@ -44,3 +45,8 @@ envoy_cc_test_binary( "@envoy_api//envoy/extensions/filters/http/compressor/v3:pkg_cc_proto", ], ) + +envoy_benchmark_test( + name = "compressor_filter_speed_test_benchmark_test", + benchmark_binary = "compressor_filter_speed_test", +) diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc index cb3c4b26e425..9056ddc0ac3d 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_speed_test.cc @@ -291,5 +291,3 @@ BENCHMARK(compressChunks1024)->DenseRange(0, 8, 1)->UseManualTime()->Unit(benchm } // namespace HttpFilters } // namespace Extensions } // namespace Envoy - -BENCHMARK_MAIN(); From 5fb4377bef7847ca33f20c4694fd94def101ea73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 6 Jul 2020 12:39:31 -0400 Subject: [PATCH 525/909] Fix incorrect reference to v2 RouteConfiguration (#11878) Signed-off-by: Raul Gutierrez Segales --- include/envoy/router/route_config_update_receiver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/envoy/router/route_config_update_receiver.h b/include/envoy/router/route_config_update_receiver.h index 8f14d26ceec8..d18c6d554252 100644 --- a/include/envoy/router/route_config_update_receiver.h +++ b/include/envoy/router/route_config_update_receiver.h @@ -78,7 +78,7 @@ class RouteConfigUpdateReceiver { virtual absl::optional configInfo() const PURE; /** - * @return envoy::api::v2::RouteConfiguration& current RouteConfiguration. + * @return envoy::config::route::v3::RouteConfiguration& current RouteConfiguration. */ virtual const envoy::config::route::v3::RouteConfiguration& routeConfiguration() PURE; From 889e4e4dbaebe1ad6c70d773536fb1a6a5752838 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 6 Jul 2020 22:10:08 +0530 Subject: [PATCH 526/909] fix xds proto doc (#11882) Signed-off-by: Rama Chavali --- api/xds_protocol.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/xds_protocol.rst b/api/xds_protocol.rst index 6b580ad3d446..1b254ad7f1f3 100644 --- a/api/xds_protocol.rst +++ b/api/xds_protocol.rst @@ -77,7 +77,7 @@ API flow For typical HTTP routing scenarios, the core resource types for the client's configuration are `Listener`, `RouteConfiguration`, `Cluster`, and `ClusterLoadAssignment`. Each `Listener` resource may point to a `RouteConfiguration` resource, which may point to one or more `Cluster` resources, -and each Cluster` resource may point to a `ClusterLoadAssignment` resource. +and each `Cluster` resource may point to a `ClusterLoadAssignment` resource. Envoy fetches all `Listener` and `Cluster` resources at startup. It then fetches whatever `RouteConfiguration` and `ClusterLoadAssignment` resources that are required by the `Listener` and From 3aa6259f5cec4aebdbbeea60e744ea238e85c00b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 6 Jul 2020 12:40:54 -0400 Subject: [PATCH 527/909] tcp: add an (unused-by-default) connection pool based on a shared-with-HTTP class. (#11756) Risk Level: low (defaulted off until release) Testing: parameterized existing unit, integration tests. Docs Changes: n/a Release Notes: n/a (will add with flip Runtime guard: envoy.reloadable_features.new_tcp_connection_pool (default off) Fixes #11528 (modulo TODO and cleanup) Signed-off-by: Alyssa Wilk --- source/common/conn_pool/conn_pool_base.h | 3 +- source/common/runtime/runtime_features.cc | 2 + source/common/tcp/BUILD | 11 +- source/common/tcp/conn_pool.cc | 69 ++++++ source/common/tcp/conn_pool.h | 212 ++++++++++++++++++ .../common/upstream/cluster_manager_impl.cc | 11 +- test/common/tcp/conn_pool_test.cc | 129 +++++++++-- test/integration/BUILD | 1 + .../integration/tcp_proxy_integration_test.cc | 46 ++-- test/integration/tcp_proxy_integration_test.h | 10 +- 10 files changed, 455 insertions(+), 39 deletions(-) create mode 100644 source/common/tcp/conn_pool.cc create mode 100644 source/common/tcp/conn_pool.h diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index d0d3f94b56e8..2488542110e0 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -165,7 +165,7 @@ class ConnPoolImplBase : protected Logger::Loggable { // Called by derived classes any time a request is completed or destroyed for any reason. void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); - const Upstream::HostConstSharedPtr& host() { return host_; } + const Upstream::HostConstSharedPtr& host() const { return host_; } Event::Dispatcher& dispatcher() { return dispatcher_; } Upstream::ResourcePriority priority() const { return priority_; } const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } @@ -181,6 +181,7 @@ class ConnPoolImplBase : protected Logger::Loggable { const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; +protected: std::list drained_callbacks_; std::list pending_requests_; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 742b7aba5282..25d5f70e6c56 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -88,6 +88,8 @@ constexpr const char* runtime_features[] = { constexpr const char* disabled_runtime_features[] = { // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", + // TODO(alyssawilk) flip true after the release. + "envoy.reloadable_features.new_tcp_connection_pool", }; RuntimeFeatures::RuntimeFeatures() { diff --git a/source/common/tcp/BUILD b/source/common/tcp/BUILD index e0b1232dfa5b..9a4234e77bb8 100644 --- a/source/common/tcp/BUILD +++ b/source/common/tcp/BUILD @@ -10,8 +10,14 @@ envoy_package() envoy_cc_library( name = "conn_pool_lib", - srcs = ["original_conn_pool.cc"], - hdrs = ["original_conn_pool.h"], + srcs = [ + "conn_pool.cc", + "original_conn_pool.cc", + ], + hdrs = [ + "conn_pool.h", + "original_conn_pool.h", + ], external_deps = ["abseil_optional"], deps = [ "//include/envoy/event:deferred_deletable", @@ -24,6 +30,7 @@ envoy_cc_library( "//include/envoy/upstream:upstream_interface", "//source/common/common:linked_object", "//source/common/common:utility_lib", + "//source/common/http:conn_pool_base_lib", "//source/common/network:filter_lib", "//source/common/network:utility_lib", "//source/common/stats:timespan_lib", diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc new file mode 100644 index 000000000000..5cbb3093a919 --- /dev/null +++ b/source/common/tcp/conn_pool.cc @@ -0,0 +1,69 @@ +#include "common/tcp/conn_pool.h" + +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" +#include "envoy/upstream/upstream.h" + +#include "common/stats/timespan_impl.h" +#include "common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Tcp { + +ActiveTcpClient::ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, + uint64_t concurrent_request_limit) + : Envoy::ConnectionPool::ActiveClient(parent, host->cluster().maxRequestsPerConnection(), + concurrent_request_limit), + parent_(parent) { + Upstream::Host::CreateConnectionData data = host->createConnection( + parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions()); + real_host_description_ = data.host_description_; + connection_ = std::move(data.connection_); + connection_->addConnectionCallbacks(*this); + connection_->detectEarlyCloseWhenReadDisabled(false); + connection_->addReadFilter(std::make_shared(*this)); + connection_->connect(); +} + +ActiveTcpClient::~ActiveTcpClient() { + // Handle the case where deferred delete results in the ActiveClient being destroyed before + // TcpConnectionData. Make sure the TcpConnectionData will not refer to this ActiveTcpClient + // and handle clean up normally done in clearCallbacks() + if (tcp_connection_data_) { + ASSERT(state_ == ActiveClient::State::CLOSED); + tcp_connection_data_->release(); + parent_.onRequestClosed(*this, true); + parent_.checkForDrained(); + } + parent_.onConnDestroyed(); +} + +void ActiveTcpClient::clearCallbacks() { + if (state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY || + state_ == Envoy::ConnectionPool::ActiveClient::State::DRAINING) { + parent_.onConnReleased(*this); + } + callbacks_ = nullptr; + tcp_connection_data_ = nullptr; + parent_.onRequestClosed(*this, true); + parent_.checkForDrained(); +} + +void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { + Envoy::ConnectionPool::ActiveClient::onEvent(event); + // Do not pass the Connected event to TCP proxy sessions. + // The tcp proxy filter synthesizes its own Connected event in onPoolReadyBase + // and receiving it twice causes problems. + // TODO(alyssawilk) clean this up in a follow-up. It's confusing. + if (callbacks_ && event != Network::ConnectionEvent::Connected) { + callbacks_->onEvent(event); + // After receiving a disconnect event, the owner of callbacks_ will likely self-destruct. + // Clear the pointer to avoid using it again. + callbacks_ = nullptr; + } +} + +} // namespace Tcp +} // namespace Envoy diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h new file mode 100644 index 000000000000..d267ac24ed06 --- /dev/null +++ b/source/common/tcp/conn_pool.h @@ -0,0 +1,212 @@ +#pragma once + +#include +#include + +#include "envoy/event/deferred_deletable.h" +#include "envoy/event/timer.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" +#include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/http/conn_pool_base.h" +#include "common/network/filter_impl.h" + +namespace Envoy { +namespace Tcp { + +class ConnPoolImpl; + +struct TcpAttachContext : public Envoy::ConnectionPool::AttachContext { + TcpAttachContext(Tcp::ConnectionPool::Callbacks* callbacks) : callbacks_(callbacks) {} + Tcp::ConnectionPool::Callbacks* callbacks_; +}; + +class TcpPendingRequest : public Envoy::ConnectionPool::PendingRequest { +public: + TcpPendingRequest(Envoy::ConnectionPool::ConnPoolImplBase& parent, TcpAttachContext& context) + : Envoy::ConnectionPool::PendingRequest(parent), context_(context) {} + Envoy::ConnectionPool::AttachContext& context() override { return context_; } + + TcpAttachContext context_; +}; + +class ActiveTcpClient : public Envoy::ConnectionPool::ActiveClient { +public: + struct ConnReadFilter : public Network::ReadFilterBaseImpl { + ConnReadFilter(ActiveTcpClient& parent) : parent_(parent) {} + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override { + parent_.onUpstreamData(data, end_stream); + return Network::FilterStatus::StopIteration; + } + ActiveTcpClient& parent_; + }; + + // This acts as the bridge between the ActiveTcpClient and an individual TCP connection. + class TcpConnectionData : public Envoy::Tcp::ConnectionPool::ConnectionData { + public: + TcpConnectionData(ActiveTcpClient& parent, Network::ClientConnection& connection) + : parent_(&parent), connection_(connection) { + parent_->tcp_connection_data_ = this; + } + ~TcpConnectionData() override { + // Generally it is the case that TcpConnectionData will be destroyed before the + // ActiveTcpClient. Because ordering on the deferred delete list is not guaranteed in the + // case of a disconnect, make sure parent_ is valid before doing clean-up. + if (parent_) { + parent_->clearCallbacks(); + } + } + + Network::ClientConnection& connection() override { return connection_; } + void setConnectionState(ConnectionPool::ConnectionStatePtr&& state) override { + parent_->connection_state_ = std::move(state); + } + + void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callbacks) override { + parent_->callbacks_ = &callbacks; + } + void release() { parent_ = nullptr; } + + protected: + ConnectionPool::ConnectionState* connectionState() override { + return parent_->connection_state_.get(); + } + + private: + ActiveTcpClient* parent_; + Network::ClientConnection& connection_; + }; + + ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, + uint64_t concurrent_request_limit); + ~ActiveTcpClient() override; + + // Override the default's of Envoy::ConnectionPool::ActiveClient for class-specific functions. + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override { callbacks_->onAboveWriteBufferHighWatermark(); } + void onBelowWriteBufferLowWatermark() override { callbacks_->onBelowWriteBufferLowWatermark(); } + + void close() override { connection_->close(Network::ConnectionCloseType::NoFlush); } + size_t numActiveRequests() const override { return callbacks_ ? 1 : 0; } + bool closingWithIncompleteRequest() const override { return false; } + uint64_t id() const override { return connection_->id(); } + + void onUpstreamData(Buffer::Instance& data, bool end_stream) { + if (callbacks_) { + callbacks_->onUpstreamData(data, end_stream); + } else { + close(); + } + } + void clearCallbacks(); + + ConnPoolImpl& parent_; + Upstream::HostDescriptionConstSharedPtr real_host_description_; + ConnectionPool::UpstreamCallbacks* callbacks_{}; + Network::ClientConnectionPtr connection_; + ConnectionPool::ConnectionStatePtr connection_state_; + TcpConnectionData* tcp_connection_data_{}; +}; + +class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, + public Tcp::ConnectionPool::Instance { +public: + ConnPoolImpl(Event::Dispatcher& dispatcher, Upstream::HostConstSharedPtr host, + Upstream::ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr transport_socket_options) + : Envoy::ConnectionPool::ConnPoolImplBase(host, priority, dispatcher, options, + transport_socket_options), + upstream_ready_cb_(dispatcher.createSchedulableCallback([this]() { + upstream_ready_enabled_ = false; + onUpstreamReady(); + })) {} + ~ConnPoolImpl() override { destructAllConnections(); } + + void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + void drainConnections() override { + drainConnectionsImpl(); + // Legacy behavior for the TCP connection pool marks all connecting clients + // as draining. + for (auto& connecting_client : connecting_clients_) { + if (connecting_client->remaining_requests_ > 1) { + uint64_t old_limit = connecting_client->effectiveConcurrentRequestLimit(); + connecting_client->remaining_requests_ = 1; + if (connecting_client->effectiveConcurrentRequestLimit() < old_limit) { + connecting_request_capacity_ -= + (old_limit - connecting_client->effectiveConcurrentRequestLimit()); + } + } + } + } + + void closeConnections() override { + for (auto* list : {&ready_clients_, &busy_clients_, &connecting_clients_}) { + while (!list->empty()) { + list->front()->close(); + } + } + } + ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { + TcpAttachContext context(&callbacks); + return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + } + + ConnectionPool::Cancellable* + newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override { + Envoy::ConnectionPool::PendingRequestPtr pending_request = + std::make_unique(*this, typedContext(context)); + pending_request->moveIntoList(std::move(pending_request), pending_requests_); + return pending_requests_.front().get(); + } + + Upstream::HostDescriptionConstSharedPtr host() const override { + return Envoy::ConnectionPool::ConnPoolImplBase::host(); + } + + Envoy::ConnectionPool::ActiveClientPtr instantiateActiveClient() override { + return std::make_unique(*this, Envoy::ConnectionPool::ConnPoolImplBase::host(), + 1); + } + + void onPoolReady(Envoy::ConnectionPool::ActiveClient& client, + Envoy::ConnectionPool::AttachContext& context) override { + ActiveTcpClient* tcp_client = static_cast(&client); + auto* callbacks = typedContext(context).callbacks_; + std::unique_ptr connection_data = + std::make_unique(*tcp_client, *tcp_client->connection_); + callbacks->onPoolReady(std::move(connection_data), tcp_client->real_host_description_); + } + + void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, + absl::string_view, ConnectionPool::PoolFailureReason reason, + Envoy::ConnectionPool::AttachContext& context) override { + auto* callbacks = typedContext(context).callbacks_; + callbacks->onPoolFailure(reason, host_description); + } + + // These two functions exist for testing parity between old and new Tcp Connection Pools. + virtual void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) { + if (client.state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY) { + if (!pending_requests_.empty() && !upstream_ready_enabled_) { + upstream_ready_cb_->scheduleCallbackCurrentIteration(); + } + } + } + virtual void onConnDestroyed() {} + +protected: + Event::SchedulableCallbackPtr upstream_ready_cb_; + bool upstream_ready_enabled_{}; +}; + +} // namespace Tcp +} // namespace Envoy diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 423aba6425c8..ee8bf97d1f1a 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -32,6 +32,8 @@ #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/router/shadow_writer_impl.h" +#include "common/runtime/runtime_features.h" +#include "common/tcp/conn_pool.h" #include "common/tcp/original_conn_pool.h" #include "common/upstream/cds_api_impl.h" #include "common/upstream/load_balancer_impl.h" @@ -1418,8 +1420,13 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( Event::Dispatcher& dispatcher, HostConstSharedPtr host, ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsSharedPtr transport_socket_options) { - return Tcp::ConnectionPool::InstancePtr{ - new Tcp::OriginalConnPoolImpl(dispatcher, host, priority, options, transport_socket_options)}; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_tcp_connection_pool")) { + return std::make_unique(dispatcher, host, priority, options, + transport_socket_options); + } else { + return Tcp::ConnectionPool::InstancePtr{new Tcp::OriginalConnPoolImpl( + dispatcher, host, priority, options, transport_socket_options)}; + } } std::pair ProdClusterManagerFactory::clusterFromProto( diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 3b5511088fbd..70fb4a6bc3fa 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -3,6 +3,7 @@ #include "common/event/dispatcher_impl.h" #include "common/network/utility.h" +#include "common/tcp/conn_pool.h" #include "common/tcp/original_conn_pool.h" #include "common/upstream/upstream_impl.h" @@ -121,15 +122,31 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { bool test_new_connection_pool_; protected: - class ConnPoolImplForTest : public OriginalConnPoolImpl { + class ConnPoolImplForTest : public ConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, ConnPoolBase& parent) + : ConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), + parent_(parent) {} + + void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) override { + ConnPoolImpl::onConnReleased(client); + parent_.onConnReleasedForTest(); + } + + void onConnDestroyed() override { parent_.onConnDestroyedForTest(); } + ConnPoolBase& parent_; + }; + + class OriginalConnPoolImplForTest : public OriginalConnPoolImpl { + public: + OriginalConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::HostSharedPtr host, + ConnPoolBase& parent) : OriginalConnPoolImpl(dispatcher, host, Upstream::ResourcePriority::Default, nullptr, nullptr), parent_(parent) {} - ~ConnPoolImplForTest() override { + ~OriginalConnPoolImplForTest() override { EXPECT_EQ(0U, ready_conns_.size()); EXPECT_EQ(0U, busy_conns_.size()); EXPECT_EQ(0U, pending_requests_.size()); @@ -165,16 +182,16 @@ ConnPoolBase::ConnPoolBase(Event::MockDispatcher& dispatcher, Upstream::HostShar bool test_new_connection_pool) : mock_dispatcher_(dispatcher), mock_upstream_ready_cb_(upstream_ready_cb), test_new_connection_pool_(test_new_connection_pool) { - // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. - ASSERT(!test_new_connection_pool_); - if (!test_new_connection_pool_) { + if (test_new_connection_pool_) { conn_pool_ = std::make_unique(dispatcher, host, *this); + } else { + conn_pool_ = std::make_unique(dispatcher, host, *this); } } void ConnPoolBase::expectEnableUpstreamReady(bool run) { if (!test_new_connection_pool_) { - dynamic_cast(conn_pool_.get())->expectEnableUpstreamReady(run); + dynamic_cast(conn_pool_.get())->expectEnableUpstreamReady(run); } else { if (!run) { EXPECT_CALL(*mock_upstream_ready_cb_, scheduleCallbackCurrentIteration()) @@ -195,10 +212,7 @@ class TcpConnPoolImplTest : public testing::TestWithParam { : test_new_connection_pool_(GetParam()), upstream_ready_cb_(new NiceMock(&dispatcher_)), host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), - conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) { - // TODO(alyssarwilk) remove this assert and test the old and the new when it lands. - ASSERT(!test_new_connection_pool_); - } + conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) {} ~TcpConnPoolImplTest() override { EXPECT_TRUE(TestUtility::gaugesZeroed(cluster_->stats_store_.gauges())) @@ -223,8 +237,10 @@ class TcpConnPoolImplDestructorTest : public testing::TestWithParam { : test_new_connection_pool_(GetParam()), upstream_ready_cb_(new NiceMock(&dispatcher_)) { host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"); - ASSERT(!test_new_connection_pool_); - if (!test_new_connection_pool_) { + if (test_new_connection_pool_) { + conn_pool_ = std::make_unique( + dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); + } else { conn_pool_ = std::make_unique( dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr); } @@ -914,10 +930,18 @@ TEST_P(TcpConnPoolImplTest, DrainWhileConnecting) { EXPECT_NE(nullptr, handle); conn_pool_.addDrainedCallback([&]() -> void { drained.ready(); }); - handle->cancel(ConnectionPool::CancelPolicy::Default); - EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(drained, ready()); - conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + if (test_new_connection_pool_) { + // The shared connection pool removes and closes connecting clients if there are no + // pending requests. + EXPECT_CALL(drained, ready()); + handle->cancel(ConnectionPool::CancelPolicy::Default); + } else { + handle->cancel(ConnectionPool::CancelPolicy::Default); + EXPECT_CALL(*conn_pool_.test_conns_[0].connection_, + close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(drained, ready()); + conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + } EXPECT_CALL(conn_pool_, onConnDestroyedForTest()); dispatcher_.clearDeferredDeleteList(); } @@ -947,6 +971,74 @@ TEST_P(TcpConnPoolImplTest, DrainOnClose) { dispatcher_.clearDeferredDeleteList(); } +/** + * Test connecting_request_capacity logic. + */ +TEST_P(TcpConnPoolImplTest, RequestCapacity) { + if (!test_new_connection_pool_) { + return; + } + cluster_->resetResourceManager(5, 1024, 1024, 1, 1); + cluster_->max_requests_per_connection_ = 100; + + ConnPoolCallbacks callbacks1; + ConnPoolCallbacks callbacks2; + Tcp::ConnectionPool::Cancellable* handle1; + Tcp::ConnectionPool::Cancellable* handle2; + { + // Request 1 should kick off a new connection. + conn_pool_.expectConnCreate(); + handle1 = conn_pool_.newConnection(callbacks1); + EXPECT_NE(nullptr, handle1); + } + { + // Request 2 should kick off a new connection. + conn_pool_.expectConnCreate(); + handle2 = conn_pool_.newConnection(callbacks2); + EXPECT_NE(nullptr, handle2); + } + + // This should set the number of requests remaining to 1 on the active + // connections, and the connecting_request_capacity to 2 as well. + conn_pool_.drainConnections(); + + // Cancel the connections. Because neither used CloseExcess, the two connections should persist. + handle1->cancel(ConnectionPool::CancelPolicy::Default); + handle2->cancel(ConnectionPool::CancelPolicy::Default); + + Tcp::ConnectionPool::Cancellable* handle3; + Tcp::ConnectionPool::Cancellable* handle4; + Tcp::ConnectionPool::Cancellable* handle5; + ConnPoolCallbacks callbacks3; + ConnPoolCallbacks callbacks4; + ConnPoolCallbacks callbacks5; + + { + // The next two requests will use the connections in progress, bringing + // connecting_request_capacity to zero. + handle3 = conn_pool_.newConnection(callbacks3); + EXPECT_NE(nullptr, handle3); + + handle4 = conn_pool_.newConnection(callbacks4); + EXPECT_NE(nullptr, handle4); + } + { + // With connecting_request_capacity zero, a request for a new connection + // will kick off connection #3. + conn_pool_.expectConnCreate(); + handle5 = conn_pool_.newConnection(callbacks5); + EXPECT_NE(nullptr, handle5); + } + + // Clean up remaining connections. + handle3->cancel(ConnectionPool::CancelPolicy::Default); + handle4->cancel(ConnectionPool::CancelPolicy::Default); + handle5->cancel(ConnectionPool::CancelPolicy::Default); + conn_pool_.test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[1].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + conn_pool_.test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); +} + /** * Test that pending connections are closed when the connection pool is destroyed. */ @@ -990,9 +1082,8 @@ TEST_P(TcpConnPoolImplDestructorTest, TestReadyConnectionsAreClosed) { EXPECT_CALL(dispatcher_, clearDeferredDeleteList()); conn_pool_.reset(); } - -INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Values(false)); -INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Values(false)); +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplTest, testing::Bool()); +INSTANTIATE_TEST_SUITE_P(ConnectionPools, TcpConnPoolImplDestructorTest, testing::Bool()); } // namespace Tcp } // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index 0dc69ee4c328..0e927155b4c2 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -942,6 +942,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + shard_count = 2, deps = [ ":integration_lib", "//source/common/config:api_version_lib", diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 50b90974a606..ad50cf255793 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -26,14 +26,36 @@ using testing::NiceMock; namespace Envoy { +std::vector getProtocolTestParams() { + std::vector ret; + + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + ret.push_back(TcpProxyIntegrationTestParams{ip_version, true}); + ret.push_back(TcpProxyIntegrationTestParams{ip_version, false}); + } + return ret; +} + +std::string +protocolTestParamsToString(const ::testing::TestParamInfo& params) { + return absl::StrCat( + (params.param.version == Network::Address::IpVersion::v4 ? "IPv4_" : "IPv6_"), + (params.param.test_original_version == true ? "OriginalConnPool" : "NewConnPool")); +} + void TcpProxyIntegrationTest::initialize() { + if (GetParam().test_original_version) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.new_tcp_connection_pool", "false"); + } else { + config_helper_.addRuntimeOverride("envoy.reloadable_features.new_tcp_connection_pool", "true"); + } + config_helper_.renameListener("tcp_proxy"); BaseIntegrationTest::initialize(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxyIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); // Test upstream writing before downstream downstream does. TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { @@ -235,7 +257,7 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamFlushEnvoyExit) { TEST_P(TcpProxyIntegrationTest, AccessLog) { std::string access_log_path = TestEnvironment::temporaryPath( - fmt::format("access_log{}.txt", GetParam() == Network::Address::IpVersion::v4 ? "v4" : "v6")); + fmt::format("access_log{}.txt", version_ == Network::Address::IpVersion::v4 ? "v4" : "v6")); config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); auto* filter_chain = listener->mutable_filter_chains(0); @@ -286,17 +308,17 @@ TEST_P(TcpProxyIntegrationTest, AccessLog) { // Regex matching localhost:port #ifndef GTEST_USES_SIMPLE_RE - const std::string ip_port_regex = (GetParam() == Network::Address::IpVersion::v4) + const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1:[0-9]+)EOF" : R"EOF(\[::1\]:[0-9]+)EOF"; #else - const std::string ip_port_regex = (GetParam() == Network::Address::IpVersion::v4) + const std::string ip_port_regex = (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1:\d+)EOF" : R"EOF(\[::1\]:\d+)EOF"; #endif const std::string ip_regex = - (GetParam() == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1)EOF" : R"EOF(::1)EOF"; + (version_ == Network::Address::IpVersion::v4) ? R"EOF(127\.0\.0\.1)EOF" : R"EOF(::1)EOF"; // Test that all three addresses were populated correctly. Only check the first line of // log output for simplicity. @@ -616,9 +638,8 @@ void TcpProxyMetadataMatchIntegrationTest::expectEndpointNotToMatchRoute() { tcp_client->close(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxyMetadataMatchIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxyMetadataMatchIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); // Test subset load balancing for a regular cluster when endpoint selector is defined at the top // level. @@ -803,9 +824,8 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, expectEndpointNotToMatchRoute(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, TcpProxySslIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(TcpProxyIntegrationTestParams, TcpProxySslIntegrationTest, + testing::ValuesIn(getProtocolTestParams()), protocolTestParamsToString); void TcpProxySslIntegrationTest::initialize() { config_helper_.addSslConfig(); diff --git a/test/integration/tcp_proxy_integration_test.h b/test/integration/tcp_proxy_integration_test.h index f6b119b86d1d..d4a2248c4e3e 100644 --- a/test/integration/tcp_proxy_integration_test.h +++ b/test/integration/tcp_proxy_integration_test.h @@ -10,10 +10,16 @@ namespace Envoy { -class TcpProxyIntegrationTest : public testing::TestWithParam, +struct TcpProxyIntegrationTestParams { + Network::Address::IpVersion version; + bool test_original_version; +}; + +class TcpProxyIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: - TcpProxyIntegrationTest() : BaseIntegrationTest(GetParam(), ConfigHelper::tcpProxyConfig()) { + TcpProxyIntegrationTest() + : BaseIntegrationTest(GetParam().version, ConfigHelper::tcpProxyConfig()) { enable_half_close_ = true; } From 1d8e26ea4414c35f7eb75e0f22253d603ced5cee Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 6 Jul 2020 14:59:17 -0400 Subject: [PATCH 528/909] stats: remove stale reference to dynamic support from StatNameSet doc, and add reference to new helpers (#11392) Commit Message: StatNameSet originally had support for maintaining a dynamically accumulated map of stat names. This has been removed in favor of StatNameDynamicStorage and helper methods to make counters/gauges/histograms from them. This just updates the doc to reflect that. Additional Description: n/a Risk Level: low -- just a class doc change Testing: none Docs Changes: n/a Release Notes: n/a. Signed-off-by: Joshua Marantz --- source/common/stats/symbol_table_impl.h | 34 +++++++++++++++++++++---- tools/spelling/spelling_dictionary.txt | 1 + 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index baf862267227..f4104fa407e4 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -767,14 +767,22 @@ class StatNameStorageSet { HashSet hash_set_; }; -// Captures StatNames for lookup by string, keeping two maps: a map of -// 'built-ins' that is expected to be populated during initialization, and a map -// of dynamically discovered names. The latter map is protected by a mutex, and -// can be mutated at runtime. +// Captures StatNames for lookup by string, keeping a map of 'built-ins' that is +// expected to be populated during initialization. // // Ideally, builtins should be added during process initialization, in the // outermost relevant context. And as the builtins map is not mutex protected, -// builtins must *not* be added in the request-path. +// builtins must *not* be added to an existing StatNameSet in the request-path. +// +// It is fine to populate a new StatNameSet when (for example) an xDS +// message reveals a new set of names to be used as stats. The population must +// be completed prior to exposing the new StatNameSet to worker threads. +// +// To create stats using names discovered in the request path, dynamic stat +// names must be used (see StatNameDynamicStorage). Consider using helper +// methods such as Stats::Utility::counterFromElements in common/stats/utility.h +// to simplify the process of allocating and combining stat names and creating +// counters, gauges, and histograms from them. class StatNameSet { public: // This object must be instantiated via SymbolTable::makeSet(), thus constructor is private. @@ -810,6 +818,22 @@ class StatNameSet { /** * Adds a StatName using the pool, but without remembering it in any maps. + * + * For convenience, StatNameSet offers pass-through thread-safe access to + * its mutex-protected pool. This is useful in constructor initializers, when + * StatNames are needed both from compile-time constants, as well as from + * other constructor args, e.g. + * MyClass(const std::vector& strings, Stats::SymbolTable& symbol_table) + * : stat_name_set_(symbol_table), + * known_const_(stat_name_set_.add("known_const")) { // unmapped constants from pool + * stat_name_set_.rememberBuiltins(strings); // mapped builtins. + * } + * This avoids the need to make two different pools; one backing the + * StatNameSet mapped entries, and the other backing the set passed in via the + * constructor. + * + * @param str The string to add as a StatName + * @return The StatName for str. */ StatName add(absl::string_view str) { absl::MutexLock lock(&mutex_); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 015a02d29f3c..4a5aeb45e2de 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -667,6 +667,7 @@ inflight -ing init initializer +initializers inlined inlining inobservability From e71e4dc4204450175644fa89639f8046872f548f Mon Sep 17 00:00:00 2001 From: phlax Date: Mon, 6 Jul 2020 21:03:56 +0100 Subject: [PATCH 529/909] Add docker docs regarding file/logging (#11857) Signed-off-by: Ryan Northey --- docs/root/start/start.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 6c1bb45b33ca..07fea5347580 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -160,6 +160,25 @@ for controlling access to an ``envoy`` socket from outside of the container. If you wish to run the container as the ``root`` user you can set ``ENVOY_UID`` to ``0``. +The ``envoy`` image sends application logs to ``/dev/stdout`` and ``/dev/stderr`` by default, and these +can be viewed in the container log. + +If you send application, admin or access logs to a file output, the ``envoy`` user will require the +necessary permissions to write to this file. This can be achieved by setting the ``ENVOY_UID`` and/or +by making the file writeable by the envoy user. + +For example, to mount a log folder from the host and make it writable, you can: + +.. substitution-code-block:: none + + $ mkdir logs + $ chown 777 logs + $ docker run -d -v `pwd`/logs:/var/log --name envoy -e ENVOY_UID=777 -p 9901:9901 -p 10000:10000 envoy:v1 + +You can then configure ``envoy`` to log to files in ``/var/log`` + +The default ``envoy`` ``uid`` and ``gid`` are ``101``. + Sandboxes --------- From 12c8c79f40f6622109014321f99853e3f239dd63 Mon Sep 17 00:00:00 2001 From: James Adam Buckland Date: Mon, 6 Jul 2020 17:58:27 -0400 Subject: [PATCH 530/909] [ssl] Clarify ssl ownership semantics between SslSocket and SslSocketInfo (#11901) SslSocket no longer holds a SSL*; instead, it relies on the already-kept SslSocketInfo struct and its already-public ssl() accessor. Now SSL* is owned by exactly one class (SslSocketInfo). Signed-off-by: James Buckland --- .../transport_sockets/tls/ssl_socket.cc | 67 +++++++++---------- .../transport_sockets/tls/ssl_socket.h | 13 ++-- .../proxy_filter_integration_test.cc | 5 +- .../http/router/auto_sni_integration_test.cc | 8 +-- .../transport_sockets/tls/ssl_socket_test.cc | 18 ++--- 5 files changed, 55 insertions(+), 56 deletions(-) diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 1e3082f80653..03f3f8b44e4a 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -47,14 +47,13 @@ SslSocket::SslSocket(Envoy::Ssl::ContextSharedPtr ctx, InitialState state, : transport_socket_options_(transport_socket_options), ctx_(std::dynamic_pointer_cast(ctx)), state_(SocketState::PreHandshake) { bssl::UniquePtr ssl = ctx_->newSsl(transport_socket_options_.get()); - ssl_ = ssl.get(); info_ = std::make_shared(std::move(ssl), ctx_); if (state == InitialState::Client) { - SSL_set_connect_state(ssl_); + SSL_set_connect_state(rawSsl()); } else { ASSERT(state == InitialState::Server); - SSL_set_accept_state(ssl_); + SSL_set_accept_state(rawSsl()); } } @@ -65,11 +64,11 @@ void SslSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& c // Associate this SSL connection with all the certificates (with their potentially different // private key methods). for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) { - provider->registerPrivateKeyMethod(ssl_, *this, callbacks_->connection().dispatcher()); + provider->registerPrivateKeyMethod(rawSsl(), *this, callbacks_->connection().dispatcher()); } BIO* bio = BIO_new_socket(callbacks_->ioHandle().fd(), 0); - SSL_set_bio(ssl_, bio, bio); + SSL_set_bio(rawSsl(), bio, bio); } SslSocket::ReadResult SslSocket::sslReadIntoSlice(Buffer::RawSlice& slice) { @@ -77,7 +76,7 @@ SslSocket::ReadResult SslSocket::sslReadIntoSlice(Buffer::RawSlice& slice) { uint8_t* mem = static_cast(slice.mem_); size_t remaining = slice.len_; while (remaining > 0) { - int rc = SSL_read(ssl_, mem, remaining); + int rc = SSL_read(rawSsl(), mem, remaining); ENVOY_CONN_LOG(trace, "ssl read returns: {}", callbacks_->connection(), rc); if (rc > 0) { ASSERT(static_cast(rc) <= remaining); @@ -124,7 +123,7 @@ Network::IoResult SslSocket::doRead(Buffer::Instance& read_buffer) { } if (result.error_.has_value()) { keep_reading = false; - int err = SSL_get_error(ssl_, result.error_.value()); + int err = SSL_get_error(rawSsl(), result.error_.value()); switch (err) { case SSL_ERROR_WANT_READ: break; @@ -171,11 +170,11 @@ void SslSocket::onPrivateKeyMethodComplete() { PostIoAction SslSocket::doHandshake() { ASSERT(state_ != SocketState::HandshakeComplete && state_ != SocketState::ShutdownSent); - int rc = SSL_do_handshake(ssl_); + int rc = SSL_do_handshake(rawSsl()); if (rc == 1) { ENVOY_CONN_LOG(debug, "handshake complete", callbacks_->connection()); state_ = SocketState::HandshakeComplete; - ctx_->logHandshake(ssl_); + ctx_->logHandshake(rawSsl()); callbacks_->raiseEvent(Network::ConnectionEvent::Connected); // It's possible that we closed during the handshake callback. @@ -183,7 +182,7 @@ PostIoAction SslSocket::doHandshake() { ? PostIoAction::KeepOpen : PostIoAction::Close; } else { - int err = SSL_get_error(ssl_, rc); + int err = SSL_get_error(rawSsl(), rc); switch (err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: @@ -255,7 +254,7 @@ Network::IoResult SslSocket::doWrite(Buffer::Instance& write_buffer, bool end_st // it again with the same parameters. This is done by tracking last write size, but not write // data, since linearize() will return the same undrained data anyway. ASSERT(bytes_to_write <= write_buffer.length()); - int rc = SSL_write(ssl_, write_buffer.linearize(bytes_to_write), bytes_to_write); + int rc = SSL_write(rawSsl(), write_buffer.linearize(bytes_to_write), bytes_to_write); ENVOY_CONN_LOG(trace, "ssl write returns: {}", callbacks_->connection(), rc); if (rc > 0) { ASSERT(rc == static_cast(bytes_to_write)); @@ -263,7 +262,7 @@ Network::IoResult SslSocket::doWrite(Buffer::Instance& write_buffer, bool end_st write_buffer.drain(rc); bytes_to_write = std::min(write_buffer.length(), static_cast(16384)); } else { - int err = SSL_get_error(ssl_, rc); + int err = SSL_get_error(rawSsl(), rc); switch (err) { case SSL_ERROR_WANT_WRITE: bytes_to_retry_ = bytes_to_write; @@ -294,7 +293,7 @@ void SslSocket::shutdownSsl() { ASSERT(state_ != SocketState::PreHandshake); if (state_ != SocketState::ShutdownSent && callbacks_->connection().state() != Network::Connection::State::Closed) { - int rc = SSL_shutdown(ssl_); + int rc = SSL_shutdown(rawSsl()); ENVOY_CONN_LOG(debug, "SSL shutdown: rc={}", callbacks_->connection(), rc); drainErrorQueue(); state_ = SocketState::ShutdownSent; @@ -316,7 +315,7 @@ SslSocketInfo::SslSocketInfo(bssl::UniquePtr ssl, ContextImplSharedPtr ctx) } bool SslSocketInfo::peerCertificatePresented() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); return cert != nullptr; } @@ -331,7 +330,7 @@ absl::Span SslSocketInfo::uriSanLocalCertificate() const { } // The cert object is not owned. - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_uri_san_local_certificate_.empty()); return cached_uri_san_local_certificate_; @@ -345,7 +344,7 @@ absl::Span SslSocketInfo::dnsSansLocalCertificate() const { return cached_dns_san_local_certificate_; } - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_dns_san_local_certificate_.empty()); return cached_dns_san_local_certificate_; @@ -358,7 +357,7 @@ const std::string& SslSocketInfo::sha256PeerCertificateDigest() const { if (!cached_sha_256_peer_certificate_digest_.empty()) { return cached_sha_256_peer_certificate_digest_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_sha_256_peer_certificate_digest_.empty()); return cached_sha_256_peer_certificate_digest_; @@ -376,7 +375,7 @@ const std::string& SslSocketInfo::urlEncodedPemEncodedPeerCertificate() const { if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { return cached_url_encoded_pem_encoded_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_url_encoded_pem_encoded_peer_certificate_.empty()); return cached_url_encoded_pem_encoded_peer_certificate_; @@ -399,7 +398,7 @@ const std::string& SslSocketInfo::urlEncodedPemEncodedPeerCertificateChain() con return cached_url_encoded_pem_encoded_peer_cert_chain_; } - STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl_.get()); + STACK_OF(X509)* cert_chain = SSL_get_peer_full_cert_chain(ssl()); if (cert_chain == nullptr) { ASSERT(cached_url_encoded_pem_encoded_peer_cert_chain_.empty()); return cached_url_encoded_pem_encoded_peer_cert_chain_; @@ -429,7 +428,7 @@ absl::Span SslSocketInfo::uriSanPeerCertificate() const { return cached_uri_san_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_uri_san_peer_certificate_.empty()); return cached_uri_san_peer_certificate_; @@ -443,7 +442,7 @@ absl::Span SslSocketInfo::dnsSansPeerCertificate() const { return cached_dns_san_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_dns_san_peer_certificate_.empty()); return cached_dns_san_peer_certificate_; @@ -455,7 +454,7 @@ absl::Span SslSocketInfo::dnsSansPeerCertificate() const { void SslSocket::closeSocket(Network::ConnectionEvent) { // Unregister the SSL connection object from private key method providers. for (auto const& provider : ctx_->getPrivateKeyMethodProviders()) { - provider->unregisterPrivateKeyMethod(ssl_); + provider->unregisterPrivateKeyMethod(rawSsl()); } // Attempt to send a shutdown before closing the socket. It's possible this won't go out if @@ -469,12 +468,12 @@ void SslSocket::closeSocket(Network::ConnectionEvent) { std::string SslSocket::protocol() const { const unsigned char* proto; unsigned int proto_len; - SSL_get0_alpn_selected(ssl_, &proto, &proto_len); + SSL_get0_alpn_selected(rawSsl(), &proto, &proto_len); return std::string(reinterpret_cast(proto), proto_len); } uint16_t SslSocketInfo::ciphersuiteId() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl_.get()); + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); if (cipher == nullptr) { return 0xffff; } @@ -486,7 +485,7 @@ uint16_t SslSocketInfo::ciphersuiteId() const { } std::string SslSocketInfo::ciphersuiteString() const { - const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl_.get()); + const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl()); if (cipher == nullptr) { return {}; } @@ -498,12 +497,12 @@ const std::string& SslSocketInfo::tlsVersion() const { if (!cached_tls_version_.empty()) { return cached_tls_version_; } - cached_tls_version_ = SSL_get_version(ssl_.get()); + cached_tls_version_ = SSL_get_version(ssl()); return cached_tls_version_; } absl::optional SslSocketInfo::x509Extension(absl::string_view extension_name) const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -516,7 +515,7 @@ const std::string& SslSocketInfo::serialNumberPeerCertificate() const { if (!cached_serial_number_peer_certificate_.empty()) { return cached_serial_number_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_serial_number_peer_certificate_.empty()); return cached_serial_number_peer_certificate_; @@ -529,7 +528,7 @@ const std::string& SslSocketInfo::issuerPeerCertificate() const { if (!cached_issuer_peer_certificate_.empty()) { return cached_issuer_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_issuer_peer_certificate_.empty()); return cached_issuer_peer_certificate_; @@ -542,7 +541,7 @@ const std::string& SslSocketInfo::subjectPeerCertificate() const { if (!cached_subject_peer_certificate_.empty()) { return cached_subject_peer_certificate_; } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { ASSERT(cached_subject_peer_certificate_.empty()); return cached_subject_peer_certificate_; @@ -555,7 +554,7 @@ const std::string& SslSocketInfo::subjectLocalCertificate() const { if (!cached_subject_local_certificate_.empty()) { return cached_subject_local_certificate_; } - X509* cert = SSL_get_certificate(ssl_.get()); + X509* cert = SSL_get_certificate(ssl()); if (!cert) { ASSERT(cached_subject_local_certificate_.empty()); return cached_subject_local_certificate_; @@ -565,7 +564,7 @@ const std::string& SslSocketInfo::subjectLocalCertificate() const { } absl::optional SslSocketInfo::validFromPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -573,7 +572,7 @@ absl::optional SslSocketInfo::validFromPeerCertificate() const { } absl::optional SslSocketInfo::expirationPeerCertificate() const { - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl_.get())); + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); if (!cert) { return absl::nullopt; } @@ -584,7 +583,7 @@ const std::string& SslSocketInfo::sessionId() const { if (!cached_session_id_.empty()) { return cached_session_id_; } - SSL_SESSION* session = SSL_get_session(ssl_.get()); + SSL_SESSION* session = SSL_get_session(ssl()); if (session == nullptr) { ASSERT(cached_session_id_.empty()); return cached_session_id_; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 43ee5efdfceb..2a6ee3a056dd 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -76,8 +76,7 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { std::string ciphersuiteString() const override; const std::string& tlsVersion() const override; absl::optional x509Extension(absl::string_view extension_name) const override; - - SSL* rawSslForTest() const { return ssl_.get(); } + SSL* ssl() const { return ssl_.get(); } bssl::UniquePtr ssl_; @@ -98,6 +97,8 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { mutable SslExtendedSocketInfoImpl extended_socket_info_; }; +using SslSocketInfoConstSharedPtr = std::shared_ptr; + class SslSocket : public Network::TransportSocket, public Envoy::Ssl::PrivateKeyConnectionCallbacks, protected Logger::Loggable { @@ -118,7 +119,10 @@ class SslSocket : public Network::TransportSocket, // Ssl::PrivateKeyConnectionCallbacks void onPrivateKeyMethodComplete() override; - SSL* rawSslForTest() const { return ssl_; } + SSL* rawSslForTest() const { return rawSsl(); } + +protected: + SSL* rawSsl() const { return info_->ssl_.get(); } private: struct ReadResult { @@ -141,8 +145,7 @@ class SslSocket : public Network::TransportSocket, std::string failure_reason_; SocketState state_; - SSL* ssl_; - Ssl::ConnectionInfoConstSharedPtr info_; + SslSocketInfoConstSharedPtr info_; }; class ClientSslSocketFactory : public Network::TransportSocketFactory, diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index deb95a24c6a7..49c7c08f50ac 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -340,8 +340,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("localhost", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); @@ -366,7 +365,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 5404fcc9b711..10f0d7818e3f 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -79,8 +79,7 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("localhost", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } TEST_P(AutoSniIntegrationTest, PassingNotDNS) { @@ -97,7 +96,7 @@ TEST_P(AutoSniIntegrationTest, PassingNotDNS) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ(nullptr, SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } TEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) { @@ -116,8 +115,7 @@ TEST_P(AutoSniIntegrationTest, PassingHostWithoutPort) { const Extensions::TransportSockets::Tls::SslSocketInfo* ssl_socket = dynamic_cast( fake_upstream_connection_->connection().ssl().get()); - EXPECT_STREQ("example.com", - SSL_get_servername(ssl_socket->rawSslForTest(), TLSEXT_NAMETYPE_host_name)); + EXPECT_STREQ("example.com", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } } // namespace diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index b8481bc28a97..fc84d1e61e1b 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -606,7 +606,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { if (!options.clientSession().empty()) { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL* client_ssl_socket = ssl_socket->rawSslForTest(); + SSL* client_ssl_socket = ssl_socket->ssl(); SSL_CTX* client_ssl_context = SSL_get_SSL_CTX(client_ssl_socket); SSL_SESSION* client_ssl_session = SSL_SESSION_from_bytes(reinterpret_cast(options.clientSession().data()), @@ -649,7 +649,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL* client_ssl_socket = ssl_socket->rawSslForTest(); + SSL* client_ssl_socket = ssl_socket->ssl(); if (!options.expectedProtocolVersion().empty()) { EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion()); } @@ -664,7 +664,7 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { absl::optional server_ssl_requested_server_name; const SslSocketInfo* server_ssl_socket = dynamic_cast(server_connection->ssl().get()); - SSL* server_ssl = server_ssl_socket->rawSslForTest(); + SSL* server_ssl = server_ssl_socket->ssl(); auto requested_server_name = SSL_get_servername(server_ssl, TLSEXT_NAMETYPE_host_name); if (requested_server_name != nullptr) { server_ssl_requested_server_name = std::string(requested_server_name); @@ -2511,7 +2511,7 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); SSL_set_cert_cb( - ssl_socket->rawSslForTest(), + ssl_socket->ssl(), [](SSL* ssl, void*) -> int { STACK_OF(X509_NAME)* list = SSL_get_client_CA_list(ssl); EXPECT_NE(nullptr, list); @@ -2624,7 +2624,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - ssl_session = SSL_get1_session(ssl_socket->rawSslForTest()); + ssl_session = SSL_get1_session(ssl_socket->ssl()); EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session)); if (expected_lifetime_hint) { auto lifetime_hint = SSL_SESSION_get_ticket_lifetime_hint(ssl_session); @@ -2647,7 +2647,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, client_connection->addConnectionCallbacks(client_connection_callbacks); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL_set_session(ssl_socket->rawSslForTest(), ssl_session); + SSL_set_session(ssl_socket->ssl(), ssl_session); SSL_SESSION_free(ssl_session); client_connection->connect(); @@ -2753,7 +2753,7 @@ void testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml const SslSocketInfo* ssl_socket = dynamic_cast(server_connection->ssl().get()); - SSL* server_ssl_socket = ssl_socket->rawSslForTest(); + SSL* server_ssl_socket = ssl_socket->ssl(); SSL_CTX* server_ssl_context = SSL_get_SSL_CTX(server_ssl_socket); if (expect_support) { EXPECT_EQ(0, (SSL_CTX_get_options(server_ssl_context) & SSL_OP_NO_TICKET)); @@ -3207,7 +3207,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - ssl_session = SSL_get1_session(ssl_socket->rawSslForTest()); + ssl_session = SSL_get1_session(ssl_socket->ssl()); EXPECT_TRUE(SSL_SESSION_is_resumable(ssl_session)); server_connection->close(Network::ConnectionCloseType::NoFlush); client_connection->close(Network::ConnectionCloseType::NoFlush); @@ -3226,7 +3226,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { client_connection->addConnectionCallbacks(client_connection_callbacks); const SslSocketInfo* ssl_socket = dynamic_cast(client_connection->ssl().get()); - SSL_set_session(ssl_socket->rawSslForTest(), ssl_session); + SSL_set_session(ssl_socket->ssl(), ssl_session); SSL_SESSION_free(ssl_session); client_connection->connect(); From 7d5e12cb31bce4750838fa3f02a1e4e26261c067 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Mon, 6 Jul 2020 22:36:11 +0000 Subject: [PATCH 531/909] test: Break down huge monolith mock header to improve test compilation performance (#11797) Commit Message: breakdown `test/mocks/server/mocks.h` into different mock classes Additional Description: `test/mocks/server/mocks.h` is a wide-used mock header included by various test files. However it's very huge and most test files only used a small portion of it. Splitting it up into different mock classes will be helpful to reduce compilation time. Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/mocks/server/BUILD | 275 ++++++- test/mocks/server/admin.cc | 3 - test/mocks/server/admin.h | 1 - test/mocks/server/admin_stream.cc | 15 + test/mocks/server/admin_stream.h | 25 + .../server/bootstrap_extension_factory.cc | 12 + .../server/bootstrap_extension_factory.h | 22 + test/mocks/server/config_tracker.h | 1 - test/mocks/server/drain_manager.cc | 21 + test/mocks/server/drain_manager.h | 28 + test/mocks/server/factory_context.cc | 47 ++ test/mocks/server/factory_context.h | 75 ++ .../server/filter_chain_factory_context.cc | 17 + .../server/filter_chain_factory_context.h | 17 + test/mocks/server/guard_dog.cc | 20 + test/mocks/server/guard_dog.h | 23 + .../server/health_checker_factory_context.cc | 30 + .../server/health_checker_factory_context.h | 43 ++ test/mocks/server/hot_restart.cc | 22 + test/mocks/server/hot_restart.h | 38 + test/mocks/server/instance.cc | 81 +++ test/mocks/server/instance.h | 170 +++++ .../server/listener_component_factory.cc | 31 + .../mocks/server/listener_component_factory.h | 47 ++ test/mocks/server/listener_factory_context.cc | 15 + test/mocks/server/listener_factory_context.h | 23 + test/mocks/server/listener_manager.cc | 16 + test/mocks/server/listener_manager.h | 29 + test/mocks/server/main.cc | 24 + test/mocks/server/main.h | 37 + test/mocks/server/mocks.cc | 295 -------- test/mocks/server/mocks.h | 679 +----------------- test/mocks/server/options.cc | 51 ++ test/mocks/server/options.h | 76 ++ test/mocks/server/overload_manager.cc | 19 + test/mocks/server/overload_manager.h | 26 + .../mocks/server/server_lifecycle_notifier.cc | 16 + test/mocks/server/server_lifecycle_notifier.h | 20 + test/mocks/server/tracer_factory.cc | 24 + test/mocks/server/tracer_factory.h | 27 + test/mocks/server/tracer_factory_context.cc | 24 + test/mocks/server/tracer_factory_context.h | 25 + .../transport_socket_factory_context.cc | 26 + .../server/transport_socket_factory_context.h | 44 ++ test/mocks/server/watch_dog.cc | 16 + test/mocks/server/watch_dog.h | 21 + test/mocks/server/worker.cc | 50 ++ test/mocks/server/worker.h | 54 ++ test/mocks/server/worker_factory.cc | 14 + test/mocks/server/worker_factory.h | 23 + 50 files changed, 1763 insertions(+), 975 deletions(-) create mode 100644 test/mocks/server/admin_stream.cc create mode 100644 test/mocks/server/admin_stream.h create mode 100644 test/mocks/server/bootstrap_extension_factory.cc create mode 100644 test/mocks/server/bootstrap_extension_factory.h create mode 100644 test/mocks/server/drain_manager.cc create mode 100644 test/mocks/server/drain_manager.h create mode 100644 test/mocks/server/factory_context.cc create mode 100644 test/mocks/server/factory_context.h create mode 100644 test/mocks/server/filter_chain_factory_context.cc create mode 100644 test/mocks/server/filter_chain_factory_context.h create mode 100644 test/mocks/server/guard_dog.cc create mode 100644 test/mocks/server/guard_dog.h create mode 100644 test/mocks/server/health_checker_factory_context.cc create mode 100644 test/mocks/server/health_checker_factory_context.h create mode 100644 test/mocks/server/hot_restart.cc create mode 100644 test/mocks/server/hot_restart.h create mode 100644 test/mocks/server/instance.cc create mode 100644 test/mocks/server/instance.h create mode 100644 test/mocks/server/listener_component_factory.cc create mode 100644 test/mocks/server/listener_component_factory.h create mode 100644 test/mocks/server/listener_factory_context.cc create mode 100644 test/mocks/server/listener_factory_context.h create mode 100644 test/mocks/server/listener_manager.cc create mode 100644 test/mocks/server/listener_manager.h create mode 100644 test/mocks/server/main.cc create mode 100644 test/mocks/server/main.h delete mode 100644 test/mocks/server/mocks.cc create mode 100644 test/mocks/server/options.cc create mode 100644 test/mocks/server/options.h create mode 100644 test/mocks/server/overload_manager.cc create mode 100644 test/mocks/server/overload_manager.h create mode 100644 test/mocks/server/server_lifecycle_notifier.cc create mode 100644 test/mocks/server/server_lifecycle_notifier.h create mode 100644 test/mocks/server/tracer_factory.cc create mode 100644 test/mocks/server/tracer_factory.h create mode 100644 test/mocks/server/tracer_factory_context.cc create mode 100644 test/mocks/server/tracer_factory_context.h create mode 100644 test/mocks/server/transport_socket_factory_context.cc create mode 100644 test/mocks/server/transport_socket_factory_context.h create mode 100644 test/mocks/server/watch_dog.cc create mode 100644 test/mocks/server/watch_dog.h create mode 100644 test/mocks/server/worker.cc create mode 100644 test/mocks/server/worker.h create mode 100644 test/mocks/server/worker_factory.cc create mode 100644 test/mocks/server/worker_factory.h diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index a907ca21feb7..35a1ac14b80a 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -28,25 +28,138 @@ envoy_cc_mock( ) envoy_cc_mock( - name = "server_mocks", - srcs = ["mocks.cc"], - hdrs = ["mocks.h"], + name = "bootstrap_extension_factory_mocks", + srcs = ["bootstrap_extension_factory.cc"], + hdrs = ["bootstrap_extension_factory.h"], deps = [ - "//include/envoy/secret:secret_manager_interface", - "//include/envoy/server:admin_interface", "//include/envoy/server:bootstrap_extension_config_interface", - "//include/envoy/server:configuration_interface", + ], +) + +envoy_cc_mock( + name = "options_mocks", + srcs = ["options.cc"], + hdrs = ["options.h"], + deps = [ + "//include/envoy/server:options_interface", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "admin_stream_mocks", + srcs = ["admin_stream.cc"], + hdrs = ["admin_stream.h"], + deps = [ + "//include/envoy/server:admin_interface", + "//test/mocks/http:http_mocks", + ], +) + +envoy_cc_mock( + name = "drain_manager_mocks", + srcs = ["drain_manager.cc"], + hdrs = ["drain_manager.h"], + deps = [ "//include/envoy/server:drain_manager_interface", - "//include/envoy/server:filter_config_interface", + ], +) + +envoy_cc_mock( + name = "watch_dog_mocks", + srcs = ["watch_dog.cc"], + hdrs = ["watch_dog.h"], + deps = [ + "//include/envoy/server:watchdog_interface", + ], +) + +envoy_cc_mock( + name = "guard_dog_mocks", + srcs = ["guard_dog.cc"], + hdrs = ["guard_dog.h"], + deps = [ "//include/envoy/server:guarddog_interface", - "//include/envoy/server:health_checker_config_interface", + "//test/mocks/server:watch_dog_mocks", + ], +) + +envoy_cc_mock( + name = "hot_restart_mocks", + srcs = ["hot_restart.cc"], + hdrs = ["hot_restart.h"], + deps = [ "//include/envoy/server:instance_interface", - "//include/envoy/server:options_interface", - "//include/envoy/server:overload_manager_interface", - "//include/envoy/server:tracer_config_interface", + "//test/mocks/stats:stats_mocks", + ], +) + +envoy_cc_mock( + name = "listener_component_factory_mocks", + srcs = ["listener_component_factory.cc"], + hdrs = ["listener_component_factory.h"], + deps = [ + "//include/envoy/server:drain_manager_interface", + "//include/envoy/server:listener_manager_interface", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "listener_manager_mocks", + srcs = ["listener_manager.cc"], + hdrs = ["listener_manager.h"], + deps = [ + "//include/envoy/server:listener_manager_interface", + ], +) + +envoy_cc_mock( + name = "server_lifecycle_notifier_mocks", + srcs = ["server_lifecycle_notifier.cc"], + hdrs = ["server_lifecycle_notifier.h"], + deps = [ + "//include/envoy/server:lifecycle_notifier_interface", + ], +) + +envoy_cc_mock( + name = "worker_factory_mocks", + srcs = ["worker_factory.cc"], + hdrs = ["worker_factory.h"], + deps = [ "//include/envoy/server:worker_interface", - "//include/envoy/ssl:context_manager_interface", - "//include/envoy/upstream:health_checker_interface", + "//test/mocks/server:worker_mocks", + ], +) + +envoy_cc_mock( + name = "worker_mocks", + srcs = ["worker.cc"], + hdrs = ["worker.h"], + deps = [ + "//include/envoy/server:worker_interface", + ], +) + +envoy_cc_mock( + name = "overload_manager_mocks", + srcs = ["overload_manager.cc"], + hdrs = ["overload_manager.h"], + deps = [ + "//include/envoy/server:overload_manager_interface", + ], +) + +envoy_cc_mock( + name = "instance_mocks", + srcs = ["instance.cc"], + hdrs = ["instance.h"], + deps = [ + "//include/envoy/server:instance_interface", "//source/common/grpc:context_lib", "//source/common/http:context_lib", "//source/common/secret:secret_manager_impl_lib", @@ -64,14 +177,138 @@ envoy_cc_mock( "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", "//test/mocks/server:admin_mocks", - "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", - "//test/test_common:test_time_lib", - "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "main_mocks", + srcs = ["main.cc"], + hdrs = ["main.h"], + deps = [ + "//include/envoy/server:configuration_interface", + "//include/envoy/server:overload_manager_interface", + ], +) + +envoy_cc_mock( + name = "factory_context_mocks", + srcs = ["factory_context.cc"], + hdrs = ["factory_context.h"], + deps = [ + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + ], +) + +envoy_cc_mock( + name = "transport_socket_factory_context_mocks", + srcs = ["transport_socket_factory_context.cc"], + hdrs = ["transport_socket_factory_context.h"], + deps = [ + "//include/envoy/server:tracer_config_interface", + "//source/common/secret:secret_manager_impl_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_mock( + name = "listener_factory_context_mocks", + srcs = ["listener_factory_context.cc"], + hdrs = ["listener_factory_context.h"], + deps = [ + "//include/envoy/server:listener_manager_interface", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_cc_mock( + name = "health_checker_factory_context_mocks", + srcs = ["health_checker_factory_context.cc"], + hdrs = ["health_checker_factory_context.h"], + deps = [ + "//include/envoy/server:health_checker_config_interface", + "//test/mocks/api:api_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/router:router_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) + +envoy_cc_mock( + name = "filter_chain_factory_context_mocks", + srcs = ["filter_chain_factory_context.cc"], + hdrs = ["filter_chain_factory_context.h"], + deps = [ + "//include/envoy/server:filter_config_interface", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_cc_mock( + name = "tracer_factory_mocks", + srcs = ["tracer_factory.cc"], + hdrs = ["tracer_factory.h"], + deps = [ + "//include/envoy/protobuf:message_validator_interface", + "//include/envoy/server:tracer_config_interface", + ], +) + +envoy_cc_mock( + name = "tracer_factory_context_mocks", + srcs = ["tracer_factory_context.cc"], + hdrs = ["tracer_factory_context.h"], + deps = [ + "//include/envoy/server:configuration_interface", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_mocks", + ], +) + +envoy_cc_mock( + name = "server_mocks", + srcs = [], + hdrs = ["mocks.h"], + deps = [ + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", + "//test/mocks/server:bootstrap_extension_factory_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:filter_chain_factory_context_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:health_checker_factory_context_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:listener_factory_context_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/server:main_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", + "//test/mocks/server:server_lifecycle_notifier_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", + "//test/mocks/server:watch_dog_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", ], ) diff --git a/test/mocks/server/admin.cc b/test/mocks/server/admin.cc index 2411ef375f00..435c14f6e973 100644 --- a/test/mocks/server/admin.cc +++ b/test/mocks/server/admin.cc @@ -1,7 +1,5 @@ #include "admin.h" -#include - #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -14,5 +12,4 @@ MockAdmin::MockAdmin() { MockAdmin::~MockAdmin() = default; } // namespace Server - } // namespace Envoy diff --git a/test/mocks/server/admin.h b/test/mocks/server/admin.h index 2a82d23859c6..512e3286bfca 100644 --- a/test/mocks/server/admin.h +++ b/test/mocks/server/admin.h @@ -35,5 +35,4 @@ class MockAdmin : public Admin { ::testing::NiceMock config_tracker_; }; } // namespace Server - } // namespace Envoy diff --git a/test/mocks/server/admin_stream.cc b/test/mocks/server/admin_stream.cc new file mode 100644 index 000000000000..9ed778013b18 --- /dev/null +++ b/test/mocks/server/admin_stream.cc @@ -0,0 +1,15 @@ +#include "admin_stream.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +MockAdminStream::MockAdminStream() = default; + +MockAdminStream::~MockAdminStream() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/admin_stream.h b/test/mocks/server/admin_stream.h new file mode 100644 index 000000000000..d7972b4ae27c --- /dev/null +++ b/test/mocks/server/admin_stream.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/server/admin.h" + +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockAdminStream : public AdminStream { +public: + MockAdminStream(); + ~MockAdminStream() override; + + MOCK_METHOD(void, setEndStreamOnComplete, (bool)); + MOCK_METHOD(void, addOnDestroyCallback, (std::function)); + MOCK_METHOD(const Buffer::Instance*, getRequestBody, (), (const)); + MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const)); + MOCK_METHOD(NiceMock&, getDecoderFilterCallbacks, (), + (const)); + MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/bootstrap_extension_factory.cc b/test/mocks/server/bootstrap_extension_factory.cc new file mode 100644 index 000000000000..80984ea4093d --- /dev/null +++ b/test/mocks/server/bootstrap_extension_factory.cc @@ -0,0 +1,12 @@ +#include "bootstrap_extension_factory.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockBootstrapExtensionFactory::MockBootstrapExtensionFactory() = default; + +MockBootstrapExtensionFactory::~MockBootstrapExtensionFactory() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/bootstrap_extension_factory.h b/test/mocks/server/bootstrap_extension_factory.h new file mode 100644 index 000000000000..f6421f788788 --- /dev/null +++ b/test/mocks/server/bootstrap_extension_factory.h @@ -0,0 +1,22 @@ +#pragma once + +#include "envoy/server/bootstrap_extension_config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockBootstrapExtensionFactory : public BootstrapExtensionFactory { +public: + MockBootstrapExtensionFactory(); + ~MockBootstrapExtensionFactory() override; + + MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension, + (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override)); + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override)); + MOCK_METHOD(std::string, name, (), (const, override)); +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/config_tracker.h b/test/mocks/server/config_tracker.h index a84b87c7feb0..09f516f0e03c 100644 --- a/test/mocks/server/config_tracker.h +++ b/test/mocks/server/config_tracker.h @@ -26,5 +26,4 @@ class MockConfigTracker : public ConfigTracker { std::unordered_map config_tracker_callbacks_; }; } // namespace Server - } // namespace Envoy diff --git a/test/mocks/server/drain_manager.cc b/test/mocks/server/drain_manager.cc new file mode 100644 index 000000000000..15735d57834c --- /dev/null +++ b/test/mocks/server/drain_manager.cc @@ -0,0 +1,21 @@ +#include "drain_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::SaveArg; + +MockDrainManager::MockDrainManager() { + ON_CALL(*this, startDrainSequence(_)).WillByDefault(SaveArg<0>(&drain_sequence_completion_)); +} + +MockDrainManager::~MockDrainManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/drain_manager.h b/test/mocks/server/drain_manager.h new file mode 100644 index 000000000000..dc0331b05876 --- /dev/null +++ b/test/mocks/server/drain_manager.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/server/drain_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockDrainManager : public DrainManager { +public: + MockDrainManager(); + ~MockDrainManager() override; + + // Server::DrainManager + MOCK_METHOD(bool, drainClose, (), (const)); + MOCK_METHOD(bool, draining, (), (const)); + MOCK_METHOD(void, startDrainSequence, (std::function completion)); + MOCK_METHOD(void, startParentShutdownSequence, ()); + + std::function drain_sequence_completion_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc new file mode 100644 index 000000000000..974371a1d3b5 --- /dev/null +++ b/test/mocks/server/factory_context.cc @@ -0,0 +1,47 @@ +#include "factory_context.h" + +#include + +#include "common/singleton/manager_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockFactoryContext::MockFactoryContext() + : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) { + ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); + ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); + ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); + ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); +} + +MockFactoryContext::~MockFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h new file mode 100644 index 000000000000..cd1c70d4ecd6 --- /dev/null +++ b/test/mocks/server/factory_context.h @@ -0,0 +1,75 @@ +#pragma once + +#include "envoy/server/configuration.h" + +#include "extensions/transport_sockets/tls/context_manager_impl.h" + +#include "admin.h" +#include "drain_manager.h" +#include "gmock/gmock.h" +#include "instance.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockFactoryContext : public virtual FactoryContext { +public: + MockFactoryContext(); + ~MockFactoryContext() override; + + MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const)); + MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); + MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); + MOCK_METHOD(bool, healthCheckFailed, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(OverloadManager&, overloadManager, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(Stats::Scope&, listenerScope, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); + MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); + MOCK_METHOD(TimeSource&, timeSource, ()); + Event::TestTimeSystem& timeSystem() { return time_system_; } + Grpc::Context& grpcContext() override { return grpc_context_; } + Http::Context& httpContext() override { return http_context_; } + MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + + testing::NiceMock server_factory_context_; + testing::NiceMock access_log_manager_; + testing::NiceMock cluster_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock init_manager_; + testing::NiceMock lifecycle_notifier_; + testing::NiceMock local_info_; + testing::NiceMock random_; + testing::NiceMock runtime_loader_; + testing::NiceMock scope_; + testing::NiceMock thread_local_; + Singleton::ManagerPtr singleton_manager_; + testing::NiceMock admin_; + Stats::IsolatedStoreImpl listener_scope_; + Event::GlobalTimeSystem time_system_; + testing::NiceMock validation_context_; + testing::NiceMock overload_manager_; + Grpc::ContextImpl grpc_context_; + Http::ContextImpl http_context_; + testing::NiceMock api_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/filter_chain_factory_context.cc b/test/mocks/server/filter_chain_factory_context.cc new file mode 100644 index 000000000000..674f95f4a558 --- /dev/null +++ b/test/mocks/server/filter_chain_factory_context.cc @@ -0,0 +1,17 @@ +#include "filter_chain_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockFilterChainFactoryContext::MockFilterChainFactoryContext() = default; + +MockFilterChainFactoryContext::~MockFilterChainFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/filter_chain_factory_context.h b/test/mocks/server/filter_chain_factory_context.h new file mode 100644 index 000000000000..f09e7a565722 --- /dev/null +++ b/test/mocks/server/filter_chain_factory_context.h @@ -0,0 +1,17 @@ +#pragma once + +#include "envoy/server/filter_config.h" + +#include "factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockFilterChainFactoryContext : public MockFactoryContext, public FilterChainFactoryContext { +public: + MockFilterChainFactoryContext(); + ~MockFilterChainFactoryContext() override; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/guard_dog.cc b/test/mocks/server/guard_dog.cc new file mode 100644 index 000000000000..e5e552c234f5 --- /dev/null +++ b/test/mocks/server/guard_dog.cc @@ -0,0 +1,20 @@ +#include "guard_dog.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::NiceMock; +using ::testing::Return; + +MockGuardDog::MockGuardDog() : watch_dog_(new NiceMock()) { + ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_)); +} + +MockGuardDog::~MockGuardDog() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/guard_dog.h b/test/mocks/server/guard_dog.h new file mode 100644 index 000000000000..fed29041db3e --- /dev/null +++ b/test/mocks/server/guard_dog.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/guarddog.h" + +#include "gmock/gmock.h" +#include "watch_dog.h" + +namespace Envoy { +namespace Server { +class MockGuardDog : public GuardDog { +public: + MockGuardDog(); + ~MockGuardDog() override; + + // Server::GuardDog + MOCK_METHOD(WatchDogSharedPtr, createWatchDog, + (Thread::ThreadId thread_id, const std::string& thread_name)); + MOCK_METHOD(void, stopWatching, (WatchDogSharedPtr wd)); + + std::shared_ptr watch_dog_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/health_checker_factory_context.cc b/test/mocks/server/health_checker_factory_context.cc new file mode 100644 index 000000000000..f6a17d962e2a --- /dev/null +++ b/test/mocks/server/health_checker_factory_context.cc @@ -0,0 +1,30 @@ +#include "health_checker_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { + event_logger_ = new testing::NiceMock(); + ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); + ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); +} + +MockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h new file mode 100644 index 000000000000..1d49abffb481 --- /dev/null +++ b/test/mocks/server/health_checker_factory_context.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/server/health_checker_config.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/router/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryContext { +public: + MockHealthCheckerFactoryContext(); + ~MockHealthCheckerFactoryContext() override; + + MOCK_METHOD(Upstream::Cluster&, cluster, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + Upstream::HealthCheckEventLoggerPtr eventLogger() override { + return Upstream::HealthCheckEventLoggerPtr(eventLogger_()); + } + + testing::NiceMock cluster_; + testing::NiceMock dispatcher_; + testing::NiceMock random_; + testing::NiceMock runtime_; + testing::NiceMock* event_logger_{}; + testing::NiceMock api_{}; +}; +} // namespace Configuration + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/hot_restart.cc b/test/mocks/server/hot_restart.cc new file mode 100644 index 000000000000..8a11dbe8011d --- /dev/null +++ b/test/mocks/server/hot_restart.cc @@ -0,0 +1,22 @@ +#include "hot_restart.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::ReturnRef; + +MockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) { + ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_)); + ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_)); + ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_)); +} + +MockHotRestart::~MockHotRestart() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/hot_restart.h b/test/mocks/server/hot_restart.h new file mode 100644 index 000000000000..c6edd13d8905 --- /dev/null +++ b/test/mocks/server/hot_restart.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/server/instance.h" + +#include "test/mocks/stats/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockHotRestart : public HotRestart { +public: + MockHotRestart(); + ~MockHotRestart() override; + + // Server::HotRestart + MOCK_METHOD(void, drainParentListeners, ()); + MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address)); + MOCK_METHOD(std::unique_ptr, getParentStats, ()); + MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server)); + MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time)); + MOCK_METHOD(void, sendParentTerminateRequest, ()); + MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(uint32_t, baseId, ()); + MOCK_METHOD(std::string, version, ()); + MOCK_METHOD(Thread::BasicLockable&, logLock, ()); + MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ()); + MOCK_METHOD(Stats::Allocator&, statsAllocator, ()); + +private: + Stats::TestSymbolTable symbol_table_; + Thread::MutexBasicLockable log_lock_; + Thread::MutexBasicLockable access_log_lock_; + Stats::AllocatorImpl stats_allocator_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc new file mode 100644 index 000000000000..91c102c45267 --- /dev/null +++ b/test/mocks/server/instance.cc @@ -0,0 +1,81 @@ +#include "instance.h" + +#include "common/singleton/manager_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::Return; +using ::testing::ReturnRef; + +MockInstance::MockInstance() + : secret_manager_(std::make_unique(admin_.getConfigTracker())), + cluster_manager_(timeSource()), ssl_context_manager_(timeSource()), + singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), + server_factory_context_( + std::make_shared>()), + transport_socket_factory_context_( + std::make_shared>()) { + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_store_)); + ON_CALL(*this, grpcContext()).WillByDefault(ReturnRef(grpc_context_)); + ON_CALL(*this, httpContext()).WillByDefault(ReturnRef(http_context_)); + ON_CALL(*this, dnsResolver()).WillByDefault(Return(dns_resolver_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_)); + ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); + ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); + ON_CALL(*this, listenerManager()).WillByDefault(ReturnRef(listener_manager_)); + ON_CALL(*this, mutexTracer()).WillByDefault(Return(nullptr)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_)); + ON_CALL(*this, transportSocketFactoryContext()) + .WillByDefault(ReturnRef(*transport_socket_factory_context_)); +} + +MockInstance::~MockInstance() = default; + +namespace Configuration { + +MockServerFactoryContext::MockServerFactoryContext() + : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), + grpc_context_(scope_.symbolTable()) { + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); + ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); + ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); + ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); + ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); + ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); + ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); +} +MockServerFactoryContext::~MockServerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h new file mode 100644 index 000000000000..67da77794772 --- /dev/null +++ b/test/mocks/server/instance.h @@ -0,0 +1,170 @@ +#pragma once + +#include "envoy/server/instance.h" + +#include "common/grpc/context_impl.h" +#include "common/http/context_impl.h" +#include "common/stats/fake_symbol_table_impl.h" + +#include "extensions/transport_sockets/tls/context_manager_impl.h" + +#include "test/mocks/access_log/mocks.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/secret/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/tracing/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "admin.h" +#include "drain_manager.h" +#include "gmock/gmock.h" +#include "hot_restart.h" +#include "listener_manager.h" +#include "options.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" +#include "transport_socket_factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockServerFactoryContext; +} // namespace Configuration + +class MockInstance : public Instance { +public: + MockInstance(); + ~MockInstance() override; + + Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } + + MOCK_METHOD(Admin&, admin, ()); + MOCK_METHOD(Api::Api&, api, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Network::DnsResolverSharedPtr, dnsResolver, ()); + MOCK_METHOD(void, drainListeners, ()); + MOCK_METHOD(DrainManager&, drainManager, ()); + MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); + MOCK_METHOD(void, failHealthcheck, (bool fail)); + MOCK_METHOD(void, exportStatsToChild, (envoy::HotRestartMessage::Reply::Stats*)); + MOCK_METHOD(bool, healthCheckFailed, ()); + MOCK_METHOD(HotRestart&, hotRestart, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(ListenerManager&, listenerManager, ()); + MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); + MOCK_METHOD(const Options&, options, ()); + MOCK_METHOD(OverloadManager&, overloadManager, ()); + MOCK_METHOD(Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Runtime::Loader&, runtime, ()); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(bool, isShutdown, ()); + MOCK_METHOD(void, shutdownAdmin, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(time_t, startTimeCurrentEpoch, ()); + MOCK_METHOD(time_t, startTimeFirstEpoch, ()); + MOCK_METHOD(Stats::Store&, stats, ()); + MOCK_METHOD(Grpc::Context&, grpcContext, ()); + MOCK_METHOD(Http::Context&, httpContext, ()); + MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); + MOCK_METHOD(void, flushStats, ()); + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ()); + MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ()); + + void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { + http_context_.setDefaultTracingConfig(tracing_config); + } + + TimeSource& timeSource() override { return time_system_; } + + NiceMock stats_store_; + testing::NiceMock thread_local_; + std::shared_ptr> dns_resolver_{ + new testing::NiceMock()}; + testing::NiceMock api_; + testing::NiceMock admin_; + Event::GlobalTimeSystem time_system_; + std::unique_ptr secret_manager_; + testing::NiceMock cluster_manager_; + Thread::MutexBasicLockable access_log_lock_; + testing::NiceMock runtime_loader_; + Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock access_log_manager_; + testing::NiceMock hot_restart_; + testing::NiceMock options_; + testing::NiceMock random_; + testing::NiceMock lifecycle_notifier_; + testing::NiceMock local_info_; + testing::NiceMock init_manager_; + testing::NiceMock listener_manager_; + testing::NiceMock overload_manager_; + Singleton::ManagerPtr singleton_manager_; + Grpc::ContextImpl grpc_context_; + Http::ContextImpl http_context_; + testing::NiceMock validation_context_; + std::shared_ptr> + server_factory_context_; + std::shared_ptr> + transport_socket_factory_context_; +}; + +namespace Configuration { +class MockServerFactoryContext : public virtual ServerFactoryContext { +public: + MockServerFactoryContext(); + ~MockServerFactoryContext() override; + + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(TimeSource&, timeSource, ()); + Event::TestTimeSystem& timeSystem() { return time_system_; } + MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + Grpc::Context& grpcContext() override { return grpc_context_; } + MOCK_METHOD(Server::DrainManager&, drainManager, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + + testing::NiceMock cluster_manager_; + testing::NiceMock dispatcher_; + testing::NiceMock drain_manager_; + testing::NiceMock local_info_; + testing::NiceMock random_; + testing::NiceMock runtime_loader_; + testing::NiceMock scope_; + testing::NiceMock thread_local_; + testing::NiceMock validation_context_; + Singleton::ManagerPtr singleton_manager_; + testing::NiceMock admin_; + Event::GlobalTimeSystem time_system_; + testing::NiceMock api_; + Grpc::ContextImpl grpc_context_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_component_factory.cc b/test/mocks/server/listener_component_factory.cc new file mode 100644 index 000000000000..a0b00b7bc756 --- /dev/null +++ b/test/mocks/server/listener_component_factory.cc @@ -0,0 +1,31 @@ +#include "listener_component_factory.h" + +#include "envoy/config/core/v3/base.pb.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockListenerComponentFactory::MockListenerComponentFactory() + : socket_(std::make_shared>()) { + ON_CALL(*this, createListenSocket(_, _, _, _)) + .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, + const Network::Socket::OptionsSharedPtr& options, + const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + if (!Network::Socket::applyOptions(options, *socket_, + envoy::config::core::v3::SocketOption::STATE_PREBIND)) { + throw EnvoyException("MockListenerComponentFactory: Setting socket options failed"); + } + return socket_; + })); +} + +MockListenerComponentFactory::~MockListenerComponentFactory() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_component_factory.h b/test/mocks/server/listener_component_factory.h new file mode 100644 index 000000000000..84d73dd4bfd5 --- /dev/null +++ b/test/mocks/server/listener_component_factory.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/config/listener/v3/listener_components.pb.h" +#include "envoy/server/drain_manager.h" +#include "envoy/server/listener_manager.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockListenerComponentFactory : public ListenerComponentFactory { +public: + MockListenerComponentFactory(); + ~MockListenerComponentFactory() override; + + DrainManagerPtr + createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override { + return DrainManagerPtr{createDrainManager_(drain_type)}; + } + LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { + return LdsApiPtr{createLdsApi_(lds_config)}; + } + + MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(std::vector, createNetworkFilterFactoryList, + (const Protobuf::RepeatedPtrField& filters, + Configuration::FilterChainFactoryContext& filter_chain_factory_context)); + MOCK_METHOD(std::vector, createListenerFilterFactoryList, + (const Protobuf::RepeatedPtrField&, + Configuration::ListenerFactoryContext& context)); + MOCK_METHOD(std::vector, createUdpListenerFilterFactoryList, + (const Protobuf::RepeatedPtrField&, + Configuration::ListenerFactoryContext& context)); + MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, + (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, + const Network::Socket::OptionsSharedPtr& options, + const ListenSocketCreationParams& params)); + MOCK_METHOD(DrainManager*, createDrainManager_, + (envoy::config::listener::v3::Listener::DrainType drain_type)); + MOCK_METHOD(uint64_t, nextListenerTag, ()); + + std::shared_ptr socket_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_factory_context.cc b/test/mocks/server/listener_factory_context.cc new file mode 100644 index 000000000000..99a7678c1a27 --- /dev/null +++ b/test/mocks/server/listener_factory_context.cc @@ -0,0 +1,15 @@ +#include "listener_factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +MockListenerFactoryContext::MockListenerFactoryContext() = default; + +MockListenerFactoryContext::~MockListenerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h new file mode 100644 index 000000000000..8d322735b577 --- /dev/null +++ b/test/mocks/server/listener_factory_context.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/listener_manager.h" + +#include "factory_context.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockListenerFactoryContext : public MockFactoryContext, public ListenerFactoryContext { +public: + MockListenerFactoryContext(); + ~MockListenerFactoryContext() override; + + const Network::ListenerConfig& listenerConfig() const override { return listener_config_; } + MOCK_METHOD(const Network::ListenerConfig&, listenerConfig_, (), (const)); + + Network::MockListenerConfig listener_config_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_manager.cc b/test/mocks/server/listener_manager.cc new file mode 100644 index 000000000000..0448ff4e7122 --- /dev/null +++ b/test/mocks/server/listener_manager.cc @@ -0,0 +1,16 @@ +#include "listener_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockListenerManager::MockListenerManager() = default; + +MockListenerManager::~MockListenerManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/listener_manager.h b/test/mocks/server/listener_manager.h new file mode 100644 index 000000000000..889dfa1f521f --- /dev/null +++ b/test/mocks/server/listener_manager.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/server/listener_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockListenerManager : public ListenerManager { +public: + MockListenerManager(); + ~MockListenerManager() override; + + MOCK_METHOD(bool, addOrUpdateListener, + (const envoy::config::listener::v3::Listener& config, const std::string& version_info, + bool modifiable)); + MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); + MOCK_METHOD(std::vector>, listeners, ()); + MOCK_METHOD(uint64_t, numConnections, (), (const)); + MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); + MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); + MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type)); + MOCK_METHOD(void, stopWorkers, ()); + MOCK_METHOD(void, beginListenerUpdate, ()); + MOCK_METHOD(void, endListenerUpdate, (ListenerManager::FailureStates &&)); + MOCK_METHOD(ApiListenerOptRef, apiListener, ()); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/main.cc b/test/mocks/server/main.cc new file mode 100644 index 000000000000..2cc3e8dfeec4 --- /dev/null +++ b/test/mocks/server/main.cc @@ -0,0 +1,24 @@ +#include "main.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::Return; + +MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill) + : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill) { + ON_CALL(*this, wdMissTimeout()).WillByDefault(Return(wd_miss_)); + ON_CALL(*this, wdMegaMissTimeout()).WillByDefault(Return(wd_megamiss_)); + ON_CALL(*this, wdKillTimeout()).WillByDefault(Return(wd_kill_)); + ON_CALL(*this, wdMultiKillTimeout()).WillByDefault(Return(wd_multikill_)); +} + +MockMain::~MockMain() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/main.h b/test/mocks/server/main.h new file mode 100644 index 000000000000..573cda3df1f0 --- /dev/null +++ b/test/mocks/server/main.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/server/configuration.h" +#include "envoy/server/overload_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockMain : public Main { +public: + MockMain() : MockMain(0, 0, 0, 0) {} + MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill); + ~MockMain() override; + + MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); + MOCK_METHOD(std::list&, statsSinks, ()); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMissTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMegaMissTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdKillTimeout, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, wdMultiKillTimeout, (), (const)); + + std::chrono::milliseconds wd_miss_; + std::chrono::milliseconds wd_megamiss_; + std::chrono::milliseconds wd_kill_; + std::chrono::milliseconds wd_multikill_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc deleted file mode 100644 index e9fc293558c5..000000000000 --- a/test/mocks/server/mocks.cc +++ /dev/null @@ -1,295 +0,0 @@ -#include "mocks.h" - -#include - -#include "envoy/admin/v3/server_info.pb.h" -#include "envoy/config/core/v3/base.pb.h" - -#include "common/singleton/manager_impl.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Invoke; -using testing::Return; -using testing::ReturnPointee; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Server { - -MockOptions::MockOptions(const std::string& config_path) : config_path_(config_path) { - ON_CALL(*this, concurrency()).WillByDefault(ReturnPointee(&concurrency_)); - ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); - ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); - ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); - ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_)); - ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { - return allow_unknown_static_fields_; - })); - ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] { - return reject_unknown_dynamic_fields_; - })); - ON_CALL(*this, ignoreUnknownDynamicFields()).WillByDefault(Invoke([this] { - return ignore_unknown_dynamic_fields_; - })); - ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_)); - ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_)); - ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_)); - ON_CALL(*this, serviceZone()).WillByDefault(ReturnRef(service_zone_name_)); - ON_CALL(*this, logLevel()).WillByDefault(Return(log_level_)); - ON_CALL(*this, logPath()).WillByDefault(ReturnRef(log_path_)); - ON_CALL(*this, restartEpoch()).WillByDefault(ReturnPointee(&hot_restart_epoch_)); - ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_)); - ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_)); - ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_)); - ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_)); - ON_CALL(*this, disabledExtensions()).WillByDefault(ReturnRef(disabled_extensions_)); - ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { - return std::make_unique(); - })); -} -MockOptions::~MockOptions() = default; - -MockAdminStream::MockAdminStream() = default; -MockAdminStream::~MockAdminStream() = default; - -MockDrainManager::MockDrainManager() { - ON_CALL(*this, startDrainSequence(_)).WillByDefault(SaveArg<0>(&drain_sequence_completion_)); -} -MockDrainManager::~MockDrainManager() = default; - -MockWatchDog::MockWatchDog() = default; -MockWatchDog::~MockWatchDog() = default; - -MockGuardDog::MockGuardDog() : watch_dog_(new NiceMock()) { - ON_CALL(*this, createWatchDog(_, _)).WillByDefault(Return(watch_dog_)); -} -MockGuardDog::~MockGuardDog() = default; - -MockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) { - ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_)); - ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_)); - ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_)); -} -MockHotRestart::~MockHotRestart() = default; - -MockOverloadManager::MockOverloadManager() { - ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_)); -} -MockOverloadManager::~MockOverloadManager() = default; - -MockListenerComponentFactory::MockListenerComponentFactory() - : socket_(std::make_shared>()) { - ON_CALL(*this, createListenSocket(_, _, _, _)) - .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - if (!Network::Socket::applyOptions(options, *socket_, - envoy::config::core::v3::SocketOption::STATE_PREBIND)) { - throw EnvoyException("MockListenerComponentFactory: Setting socket options failed"); - } - return socket_; - })); -} -MockListenerComponentFactory::~MockListenerComponentFactory() = default; - -MockServerLifecycleNotifier::MockServerLifecycleNotifier() = default; -MockServerLifecycleNotifier::~MockServerLifecycleNotifier() = default; - -MockListenerManager::MockListenerManager() = default; -MockListenerManager::~MockListenerManager() = default; - -MockWorkerFactory::MockWorkerFactory() = default; -MockWorkerFactory::~MockWorkerFactory() = default; - -MockWorker::MockWorker() { - ON_CALL(*this, addListener(_, _, _)) - .WillByDefault( - Invoke([this](absl::optional overridden_listener, - Network::ListenerConfig& config, AddListenerCompletion completion) -> void { - UNREFERENCED_PARAMETER(overridden_listener); - config.listenSocketFactory().getListenSocket(); - EXPECT_EQ(nullptr, add_listener_completion_); - add_listener_completion_ = completion; - })); - - ON_CALL(*this, removeListener(_, _)) - .WillByDefault( - Invoke([this](Network::ListenerConfig&, std::function completion) -> void { - EXPECT_EQ(nullptr, remove_listener_completion_); - remove_listener_completion_ = completion; - })); - - ON_CALL(*this, stopListener(_, _)) - .WillByDefault(Invoke([](Network::ListenerConfig&, std::function completion) -> void { - if (completion != nullptr) { - completion(); - } - })); - - ON_CALL(*this, removeFilterChains(_, _, _)) - .WillByDefault(Invoke([this](uint64_t, const std::list&, - std::function completion) -> void { - EXPECT_EQ(nullptr, remove_filter_chains_completion_); - remove_filter_chains_completion_ = completion; - })); -} -MockWorker::~MockWorker() = default; - -MockInstance::MockInstance() - : secret_manager_(std::make_unique(admin_.getConfigTracker())), - cluster_manager_(timeSource()), ssl_context_manager_(timeSource()), - singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), - server_factory_context_( - std::make_shared>()), - transport_socket_factory_context_( - std::make_shared>()) { - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, stats()).WillByDefault(ReturnRef(stats_store_)); - ON_CALL(*this, grpcContext()).WillByDefault(ReturnRef(grpc_context_)); - ON_CALL(*this, httpContext()).WillByDefault(ReturnRef(http_context_)); - ON_CALL(*this, dnsResolver()).WillByDefault(Return(dns_resolver_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, sslContextManager()).WillByDefault(ReturnRef(ssl_context_manager_)); - ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, hotRestart()).WillByDefault(ReturnRef(hot_restart_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); - ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); - ON_CALL(*this, listenerManager()).WillByDefault(ReturnRef(listener_manager_)); - ON_CALL(*this, mutexTracer()).WillByDefault(Return(nullptr)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_)); - ON_CALL(*this, transportSocketFactoryContext()) - .WillByDefault(ReturnRef(*transport_socket_factory_context_)); -} - -MockInstance::~MockInstance() = default; - -namespace Configuration { - -MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill) - : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill) { - ON_CALL(*this, wdMissTimeout()).WillByDefault(Return(wd_miss_)); - ON_CALL(*this, wdMegaMissTimeout()).WillByDefault(Return(wd_megamiss_)); - ON_CALL(*this, wdKillTimeout()).WillByDefault(Return(wd_kill_)); - ON_CALL(*this, wdMultiKillTimeout()).WillByDefault(Return(wd_multikill_)); -} - -MockMain::~MockMain() = default; - -MockServerFactoryContext::MockServerFactoryContext() - : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(scope_.symbolTable()) { - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, drainManager()).WillByDefault(ReturnRef(drain_manager_)); -} -MockServerFactoryContext::~MockServerFactoryContext() = default; - -MockFactoryContext::MockFactoryContext() - : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), - grpc_context_(scope_.symbolTable()), http_context_(scope_.symbolTable()) { - ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); - ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); - ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); - ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); - ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); - ON_CALL(*this, scope()).WillByDefault(ReturnRef(scope_)); - ON_CALL(*this, singletonManager()).WillByDefault(ReturnRef(*singleton_manager_)); - ON_CALL(*this, threadLocal()).WillByDefault(ReturnRef(thread_local_)); - ON_CALL(*this, admin()).WillByDefault(ReturnRef(admin_)); - ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(listener_scope_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, timeSource()).WillByDefault(ReturnRef(time_system_)); - ON_CALL(*this, overloadManager()).WillByDefault(ReturnRef(overload_manager_)); - ON_CALL(*this, messageValidationContext()).WillByDefault(ReturnRef(validation_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); -} - -MockFactoryContext::~MockFactoryContext() = default; - -MockTransportSocketFactoryContext::MockTransportSocketFactoryContext() - : secret_manager_(std::make_unique(config_tracker_)) { - ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); -} - -MockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default; - -MockListenerFactoryContext::MockListenerFactoryContext() = default; -MockListenerFactoryContext::~MockListenerFactoryContext() = default; - -MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { - event_logger_ = new NiceMock(); - ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); - ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); - ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); - ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); - ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); -} - -MockHealthCheckerFactoryContext::~MockHealthCheckerFactoryContext() = default; - -MockFilterChainFactoryContext::MockFilterChainFactoryContext() = default; -MockFilterChainFactoryContext::~MockFilterChainFactoryContext() = default; - -MockTracerFactory::MockTracerFactory(const std::string& name) : name_(name) { - ON_CALL(*this, createEmptyConfigProto()).WillByDefault(Invoke([] { - return std::make_unique(); - })); -} -MockTracerFactory::~MockTracerFactory() = default; - -MockTracerFactoryContext::MockTracerFactoryContext() { - ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); - ON_CALL(*this, messageValidationVisitor()) - .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); -} - -MockTracerFactoryContext::~MockTracerFactoryContext() = default; - -MockBootstrapExtensionFactory::MockBootstrapExtensionFactory() = default; -MockBootstrapExtensionFactory::~MockBootstrapExtensionFactory() = default; -} // namespace Configuration -} // namespace Server -} // namespace Envoy diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 1685d0f706d2..d041412d01d1 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -1,661 +1,28 @@ #pragma once -#include -#include -#include -#include +// NOLINT(namespace-envoy) -#include "envoy/common/mutex_tracer.h" -#include "envoy/config/bootstrap/v3/bootstrap.pb.h" -#include "envoy/config/core/v3/base.pb.h" -#include "envoy/config/core/v3/config_source.pb.h" -#include "envoy/config/listener/v3/listener.pb.h" -#include "envoy/config/listener/v3/listener_components.pb.h" -#include "envoy/protobuf/message_validator.h" -#include "envoy/server/admin.h" -#include "envoy/server/bootstrap_extension_config.h" -#include "envoy/server/configuration.h" -#include "envoy/server/drain_manager.h" -#include "envoy/server/filter_config.h" -#include "envoy/server/health_checker_config.h" -#include "envoy/server/instance.h" -#include "envoy/server/options.h" -#include "envoy/server/overload_manager.h" -#include "envoy/server/tracer_config.h" -#include "envoy/server/transport_socket_config.h" -#include "envoy/server/worker.h" -#include "envoy/ssl/context_manager.h" -#include "envoy/stats/scope.h" -#include "envoy/thread/thread.h" - -#include "common/grpc/context_impl.h" -#include "common/http/context_impl.h" -#include "common/secret/secret_manager_impl.h" -#include "common/stats/fake_symbol_table_impl.h" - -#include "extensions/transport_sockets/tls/context_manager_impl.h" - -#include "test/mocks/access_log/mocks.h" -#include "test/mocks/api/mocks.h" -#include "test/mocks/event/mocks.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/router/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/mocks/secret/mocks.h" -#include "test/mocks/stats/mocks.h" -#include "test/mocks/thread_local/mocks.h" -#include "test/mocks/tracing/mocks.h" -#include "test/mocks/upstream/mocks.h" -#include "test/test_common/test_time_system.h" - -#include "absl/strings/string_view.h" #include "admin.h" +#include "admin_stream.h" +#include "bootstrap_extension_factory.h" #include "config_tracker.h" -#include "gmock/gmock.h" -#include "spdlog/spdlog.h" - -namespace Envoy { -namespace Server { - -namespace Configuration { -class MockServerFactoryContext; -class MockTransportSocketFactoryContext; -} // namespace Configuration - -class MockOptions : public Options { -public: - MockOptions() : MockOptions(std::string()) {} - MockOptions(const std::string& config_path); - ~MockOptions() override; - - MOCK_METHOD(uint64_t, baseId, (), (const)); - MOCK_METHOD(bool, useDynamicBaseId, (), (const)); - MOCK_METHOD(const std::string&, baseIdPath, (), (const)); - MOCK_METHOD(uint32_t, concurrency, (), (const)); - MOCK_METHOD(const std::string&, configPath, (), (const)); - MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); - MOCK_METHOD(const std::string&, configYaml, (), (const)); - MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); - MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); - MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); - MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const)); - MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); - MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); - MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); - MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); - MOCK_METHOD(Server::DrainStrategy, drainStrategy, (), (const)); - MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const)); - MOCK_METHOD((const std::vector>&), - componentLogLevels, (), (const)); - MOCK_METHOD(const std::string&, logFormat, (), (const)); - MOCK_METHOD(bool, logFormatEscaped, (), (const)); - MOCK_METHOD(const std::string&, logPath, (), (const)); - MOCK_METHOD(uint64_t, restartEpoch, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const)); - MOCK_METHOD(Mode, mode, (), (const)); - MOCK_METHOD(const std::string&, serviceClusterName, (), (const)); - MOCK_METHOD(const std::string&, serviceNodeName, (), (const)); - MOCK_METHOD(const std::string&, serviceZone, (), (const)); - MOCK_METHOD(bool, hotRestartDisabled, (), (const)); - MOCK_METHOD(bool, signalHandlingEnabled, (), (const)); - MOCK_METHOD(bool, mutexTracingEnabled, (), (const)); - MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const)); - MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); - MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); - MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); - - std::string config_path_; - envoy::config::bootstrap::v3::Bootstrap config_proto_; - std::string config_yaml_; - absl::optional bootstrap_version_; - bool allow_unknown_static_fields_{}; - bool reject_unknown_dynamic_fields_{}; - bool ignore_unknown_dynamic_fields_{}; - std::string admin_address_path_; - std::string service_cluster_name_; - std::string service_node_name_; - std::string service_zone_name_; - spdlog::level::level_enum log_level_{spdlog::level::trace}; - std::string log_path_; - uint32_t concurrency_{1}; - uint64_t hot_restart_epoch_{}; - bool hot_restart_disabled_{}; - bool signal_handling_enabled_{true}; - bool mutex_tracing_enabled_{}; - bool cpuset_threads_enabled_{}; - std::vector disabled_extensions_; -}; - -class MockAdminStream : public AdminStream { -public: - MockAdminStream(); - ~MockAdminStream() override; - - MOCK_METHOD(void, setEndStreamOnComplete, (bool)); - MOCK_METHOD(void, addOnDestroyCallback, (std::function)); - MOCK_METHOD(const Buffer::Instance*, getRequestBody, (), (const)); - MOCK_METHOD(Http::RequestHeaderMap&, getRequestHeaders, (), (const)); - MOCK_METHOD(NiceMock&, getDecoderFilterCallbacks, (), - (const)); - MOCK_METHOD(Http::Http1StreamEncoderOptionsOptRef, http1StreamEncoderOptions, ()); -}; - -class MockDrainManager : public DrainManager { -public: - MockDrainManager(); - ~MockDrainManager() override; - - // Server::DrainManager - MOCK_METHOD(bool, drainClose, (), (const)); - MOCK_METHOD(bool, draining, (), (const)); - MOCK_METHOD(void, startDrainSequence, (std::function completion)); - MOCK_METHOD(void, startParentShutdownSequence, ()); - - std::function drain_sequence_completion_; -}; - -class MockWatchDog : public WatchDog { -public: - MockWatchDog(); - ~MockWatchDog() override; - - // Server::WatchDog - MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher)); - MOCK_METHOD(void, touch, ()); - MOCK_METHOD(Thread::ThreadId, threadId, (), (const)); - MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const)); -}; - -class MockGuardDog : public GuardDog { -public: - MockGuardDog(); - ~MockGuardDog() override; - - // Server::GuardDog - MOCK_METHOD(WatchDogSharedPtr, createWatchDog, - (Thread::ThreadId thread_id, const std::string& thread_name)); - MOCK_METHOD(void, stopWatching, (WatchDogSharedPtr wd)); - - std::shared_ptr watch_dog_; -}; - -class MockHotRestart : public HotRestart { -public: - MockHotRestart(); - ~MockHotRestart() override; - - // Server::HotRestart - MOCK_METHOD(void, drainParentListeners, ()); - MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address)); - MOCK_METHOD(std::unique_ptr, getParentStats, ()); - MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server)); - MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time)); - MOCK_METHOD(void, sendParentTerminateRequest, ()); - MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(uint32_t, baseId, ()); - MOCK_METHOD(std::string, version, ()); - MOCK_METHOD(Thread::BasicLockable&, logLock, ()); - MOCK_METHOD(Thread::BasicLockable&, accessLogLock, ()); - MOCK_METHOD(Stats::Allocator&, statsAllocator, ()); - -private: - Stats::TestSymbolTable symbol_table_; - Thread::MutexBasicLockable log_lock_; - Thread::MutexBasicLockable access_log_lock_; - Stats::AllocatorImpl stats_allocator_; -}; - -class MockListenerComponentFactory : public ListenerComponentFactory { -public: - MockListenerComponentFactory(); - ~MockListenerComponentFactory() override; - - DrainManagerPtr - createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override { - return DrainManagerPtr{createDrainManager_(drain_type)}; - } - LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) override { - return LdsApiPtr{createLdsApi_(lds_config)}; - } - - MOCK_METHOD(LdsApi*, createLdsApi_, (const envoy::config::core::v3::ConfigSource& lds_config)); - MOCK_METHOD(std::vector, createNetworkFilterFactoryList, - (const Protobuf::RepeatedPtrField& filters, - Configuration::FilterChainFactoryContext& filter_chain_factory_context)); - MOCK_METHOD(std::vector, createListenerFilterFactoryList, - (const Protobuf::RepeatedPtrField&, - Configuration::ListenerFactoryContext& context)); - MOCK_METHOD(std::vector, createUdpListenerFilterFactoryList, - (const Protobuf::RepeatedPtrField&, - Configuration::ListenerFactoryContext& context)); - MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, - (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params)); - MOCK_METHOD(DrainManager*, createDrainManager_, - (envoy::config::listener::v3::Listener::DrainType drain_type)); - MOCK_METHOD(uint64_t, nextListenerTag, ()); - - std::shared_ptr socket_; -}; - -class MockListenerManager : public ListenerManager { -public: - MockListenerManager(); - ~MockListenerManager() override; - - MOCK_METHOD(bool, addOrUpdateListener, - (const envoy::config::listener::v3::Listener& config, const std::string& version_info, - bool modifiable)); - MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); - MOCK_METHOD(std::vector>, listeners, ()); - MOCK_METHOD(uint64_t, numConnections, (), (const)); - MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); - MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); - MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type)); - MOCK_METHOD(void, stopWorkers, ()); - MOCK_METHOD(void, beginListenerUpdate, ()); - MOCK_METHOD(void, endListenerUpdate, (ListenerManager::FailureStates &&)); - MOCK_METHOD(ApiListenerOptRef, apiListener, ()); -}; - -class MockServerLifecycleNotifier : public ServerLifecycleNotifier { -public: - MockServerLifecycleNotifier(); - ~MockServerLifecycleNotifier() override; - - MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback)); - MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, - (Stage, StageCallbackWithCompletion)); -}; - -class MockWorkerFactory : public WorkerFactory { -public: - MockWorkerFactory(); - ~MockWorkerFactory() override; - - // Server::WorkerFactory - WorkerPtr createWorker(OverloadManager&, const std::string&) override { - return WorkerPtr{createWorker_()}; - } - - MOCK_METHOD(Worker*, createWorker_, ()); -}; - -class MockWorker : public Worker { -public: - MockWorker(); - ~MockWorker() override; - - void callAddCompletion(bool success) { - EXPECT_NE(nullptr, add_listener_completion_); - add_listener_completion_(success); - add_listener_completion_ = nullptr; - } - - void callRemovalCompletion() { - EXPECT_NE(nullptr, remove_listener_completion_); - remove_listener_completion_(); - remove_listener_completion_ = nullptr; - } - - void callDrainFilterChainsComplete() { - EXPECT_NE(nullptr, remove_filter_chains_completion_); - remove_filter_chains_completion_(); - remove_filter_chains_completion_ = nullptr; - } - - // Server::Worker - MOCK_METHOD(void, addListener, - (absl::optional overridden_listener, Network::ListenerConfig& listener, - AddListenerCompletion completion)); - MOCK_METHOD(uint64_t, numConnections, (), (const)); - MOCK_METHOD(void, removeListener, - (Network::ListenerConfig & listener, std::function completion)); - MOCK_METHOD(void, start, (GuardDog & guard_dog)); - MOCK_METHOD(void, initializeStats, (Stats::Scope & scope)); - MOCK_METHOD(void, stop, ()); - MOCK_METHOD(void, stopListener, - (Network::ListenerConfig & listener, std::function completion)); - MOCK_METHOD(void, removeFilterChains, - (uint64_t listener_tag, const std::list& filter_chains, - std::function completion)); - - AddListenerCompletion add_listener_completion_; - std::function remove_listener_completion_; - std::function remove_filter_chains_completion_; -}; - -class MockOverloadManager : public OverloadManager { -public: - MockOverloadManager(); - ~MockOverloadManager() override; - - // OverloadManager - MOCK_METHOD(void, start, ()); - MOCK_METHOD(bool, registerForAction, - (const std::string& action, Event::Dispatcher& dispatcher, - OverloadActionCb callback)); - MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ()); - - ThreadLocalOverloadState overload_state_; -}; - -class MockInstance : public Instance { -public: - MockInstance(); - ~MockInstance() override; - - Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } - - MOCK_METHOD(Admin&, admin, ()); - MOCK_METHOD(Api::Api&, api, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Network::DnsResolverSharedPtr, dnsResolver, ()); - MOCK_METHOD(void, drainListeners, ()); - MOCK_METHOD(DrainManager&, drainManager, ()); - MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); - MOCK_METHOD(void, failHealthcheck, (bool fail)); - MOCK_METHOD(void, exportStatsToChild, (envoy::HotRestartMessage::Reply::Stats*)); - MOCK_METHOD(bool, healthCheckFailed, ()); - MOCK_METHOD(HotRestart&, hotRestart, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(ListenerManager&, listenerManager, ()); - MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); - MOCK_METHOD(const Options&, options, ()); - MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Runtime::Loader&, runtime, ()); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(bool, isShutdown, ()); - MOCK_METHOD(void, shutdownAdmin, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(time_t, startTimeCurrentEpoch, ()); - MOCK_METHOD(time_t, startTimeFirstEpoch, ()); - MOCK_METHOD(Stats::Store&, stats, ()); - MOCK_METHOD(Grpc::Context&, grpcContext, ()); - MOCK_METHOD(Http::Context&, httpContext, ()); - MOCK_METHOD(ProcessContextOptRef, processContext, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); - MOCK_METHOD(void, flushStats, ()); - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ()); - MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ()); - - void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { - http_context_.setDefaultTracingConfig(tracing_config); - } - - TimeSource& timeSource() override { return time_system_; } - - NiceMock stats_store_; - testing::NiceMock thread_local_; - std::shared_ptr> dns_resolver_{ - new testing::NiceMock()}; - testing::NiceMock api_; - testing::NiceMock admin_; - Event::GlobalTimeSystem time_system_; - std::unique_ptr secret_manager_; - testing::NiceMock cluster_manager_; - Thread::MutexBasicLockable access_log_lock_; - testing::NiceMock runtime_loader_; - Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock access_log_manager_; - testing::NiceMock hot_restart_; - testing::NiceMock options_; - testing::NiceMock random_; - testing::NiceMock lifecycle_notifier_; - testing::NiceMock local_info_; - testing::NiceMock init_manager_; - testing::NiceMock listener_manager_; - testing::NiceMock overload_manager_; - Singleton::ManagerPtr singleton_manager_; - Grpc::ContextImpl grpc_context_; - Http::ContextImpl http_context_; - testing::NiceMock validation_context_; - std::shared_ptr> - server_factory_context_; - std::shared_ptr> - transport_socket_factory_context_; -}; - -namespace Configuration { - -class MockMain : public Main { -public: - MockMain() : MockMain(0, 0, 0, 0) {} - MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill); - ~MockMain() override; - - MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); - MOCK_METHOD(std::list&, statsSinks, ()); - MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMissTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMegaMissTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdKillTimeout, (), (const)); - MOCK_METHOD(std::chrono::milliseconds, wdMultiKillTimeout, (), (const)); - - std::chrono::milliseconds wd_miss_; - std::chrono::milliseconds wd_megamiss_; - std::chrono::milliseconds wd_kill_; - std::chrono::milliseconds wd_multikill_; -}; - -class MockServerFactoryContext : public virtual ServerFactoryContext { -public: - MockServerFactoryContext(); - ~MockServerFactoryContext() override; - - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(TimeSource&, timeSource, ()); - Event::TestTimeSystem& timeSystem() { return time_system_; } - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - Grpc::Context& grpcContext() override { return grpc_context_; } - MOCK_METHOD(Server::DrainManager&, drainManager, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - - testing::NiceMock cluster_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock local_info_; - testing::NiceMock random_; - testing::NiceMock runtime_loader_; - testing::NiceMock scope_; - testing::NiceMock thread_local_; - testing::NiceMock validation_context_; - Singleton::ManagerPtr singleton_manager_; - testing::NiceMock admin_; - Event::GlobalTimeSystem time_system_; - testing::NiceMock api_; - Grpc::ContextImpl grpc_context_; -}; - -class MockFactoryContext : public virtual FactoryContext { -public: - MockFactoryContext(); - ~MockFactoryContext() override; - - MOCK_METHOD(ServerFactoryContext&, getServerFactoryContext, (), (const)); - MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); - MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); - MOCK_METHOD(bool, healthCheckFailed, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(Stats::Scope&, listenerScope, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); - MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); - MOCK_METHOD(TimeSource&, timeSource, ()); - Event::TestTimeSystem& timeSystem() { return time_system_; } - Grpc::Context& grpcContext() override { return grpc_context_; } - Http::Context& httpContext() override { return http_context_; } - MOCK_METHOD(ProcessContextOptRef, processContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - - testing::NiceMock server_factory_context_; - testing::NiceMock access_log_manager_; - testing::NiceMock cluster_manager_; - testing::NiceMock dispatcher_; - testing::NiceMock drain_manager_; - testing::NiceMock init_manager_; - testing::NiceMock lifecycle_notifier_; - testing::NiceMock local_info_; - testing::NiceMock random_; - testing::NiceMock runtime_loader_; - testing::NiceMock scope_; - testing::NiceMock thread_local_; - Singleton::ManagerPtr singleton_manager_; - testing::NiceMock admin_; - Stats::IsolatedStoreImpl listener_scope_; - Event::GlobalTimeSystem time_system_; - testing::NiceMock validation_context_; - testing::NiceMock overload_manager_; - Grpc::ContextImpl grpc_context_; - Http::ContextImpl http_context_; - testing::NiceMock api_; -}; - -class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { -public: - MockTransportSocketFactoryContext(); - ~MockTransportSocketFactoryContext() override; - - Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } - - MOCK_METHOD(Server::Admin&, admin, ()); - MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); - MOCK_METHOD(Stats::Scope&, scope, ()); - MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Stats::Store&, stats, ()); - MOCK_METHOD(Init::Manager&, initManager, ()); - MOCK_METHOD(Singleton::Manager&, singletonManager, ()); - MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - - testing::NiceMock cluster_manager_; - testing::NiceMock api_; - testing::NiceMock config_tracker_; - std::unique_ptr secret_manager_; -}; - -class MockListenerFactoryContext : public MockFactoryContext, public ListenerFactoryContext { -public: - MockListenerFactoryContext(); - ~MockListenerFactoryContext() override; - - const Network::ListenerConfig& listenerConfig() const override { return listener_config_; } - MOCK_METHOD(const Network::ListenerConfig&, listenerConfig_, (), (const)); - - Network::MockListenerConfig listener_config_; -}; - -class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryContext { -public: - MockHealthCheckerFactoryContext(); - ~MockHealthCheckerFactoryContext() override; - - MOCK_METHOD(Upstream::Cluster&, cluster, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); - MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); - MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - MOCK_METHOD(Api::Api&, api, ()); - Upstream::HealthCheckEventLoggerPtr eventLogger() override { - return Upstream::HealthCheckEventLoggerPtr(eventLogger_()); - } - - testing::NiceMock cluster_; - testing::NiceMock dispatcher_; - testing::NiceMock random_; - testing::NiceMock runtime_; - testing::NiceMock* event_logger_{}; - testing::NiceMock api_{}; -}; - -class MockFilterChainFactoryContext : public MockFactoryContext, public FilterChainFactoryContext { -public: - MockFilterChainFactoryContext(); - ~MockFilterChainFactoryContext() override; -}; - -class MockTracerFactory : public TracerFactory { -public: - explicit MockTracerFactory(const std::string& name); - ~MockTracerFactory() override; - - std::string name() const override { return name_; } - - MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ()); - MOCK_METHOD(Tracing::HttpTracerSharedPtr, createHttpTracer, - (const Protobuf::Message& config, TracerFactoryContext& context)); - -private: - std::string name_; -}; - -class MockTracerFactoryContext : public TracerFactoryContext { -public: - MockTracerFactoryContext(); - ~MockTracerFactoryContext() override; - - MOCK_METHOD(ServerFactoryContext&, serverFactoryContext, ()); - MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); - - testing::NiceMock server_factory_context_; -}; - -class MockBootstrapExtensionFactory : public BootstrapExtensionFactory { -public: - MockBootstrapExtensionFactory(); - ~MockBootstrapExtensionFactory() override; - - MOCK_METHOD(BootstrapExtensionPtr, createBootstrapExtension, - (const Protobuf::Message&, Configuration::ServerFactoryContext&), (override)); - MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, (), (override)); - MOCK_METHOD(std::string, name, (), (const, override)); -}; - -} // namespace Configuration -} // namespace Server -} // namespace Envoy +#include "drain_manager.h" +#include "factory_context.h" +#include "filter_chain_factory_context.h" +#include "guard_dog.h" +#include "health_checker_factory_context.h" +#include "hot_restart.h" +#include "instance.h" +#include "listener_component_factory.h" +#include "listener_factory_context.h" +#include "listener_manager.h" +#include "main.h" +#include "options.h" +#include "overload_manager.h" +#include "server_lifecycle_notifier.h" +#include "tracer_factory.h" +#include "tracer_factory_context.h" +#include "transport_socket_factory_context.h" +#include "watch_dog.h" +#include "worker.h" +#include "worker_factory.h" diff --git a/test/mocks/server/options.cc b/test/mocks/server/options.cc new file mode 100644 index 000000000000..c407ab6ef907 --- /dev/null +++ b/test/mocks/server/options.cc @@ -0,0 +1,51 @@ +#include "options.h" + +#include "envoy/admin/v3/server_info.pb.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::Invoke; +using ::testing::Return; +using ::testing::ReturnPointee; +using ::testing::ReturnRef; + +MockOptions::MockOptions(const std::string& config_path) : config_path_(config_path) { + ON_CALL(*this, concurrency()).WillByDefault(ReturnPointee(&concurrency_)); + ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); + ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); + ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); + ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_)); + ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { + return allow_unknown_static_fields_; + })); + ON_CALL(*this, rejectUnknownDynamicFields()).WillByDefault(Invoke([this] { + return reject_unknown_dynamic_fields_; + })); + ON_CALL(*this, ignoreUnknownDynamicFields()).WillByDefault(Invoke([this] { + return ignore_unknown_dynamic_fields_; + })); + ON_CALL(*this, adminAddressPath()).WillByDefault(ReturnRef(admin_address_path_)); + ON_CALL(*this, serviceClusterName()).WillByDefault(ReturnRef(service_cluster_name_)); + ON_CALL(*this, serviceNodeName()).WillByDefault(ReturnRef(service_node_name_)); + ON_CALL(*this, serviceZone()).WillByDefault(ReturnRef(service_zone_name_)); + ON_CALL(*this, logLevel()).WillByDefault(Return(log_level_)); + ON_CALL(*this, logPath()).WillByDefault(ReturnRef(log_path_)); + ON_CALL(*this, restartEpoch()).WillByDefault(ReturnPointee(&hot_restart_epoch_)); + ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_)); + ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_)); + ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_)); + ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_)); + ON_CALL(*this, disabledExtensions()).WillByDefault(ReturnRef(disabled_extensions_)); + ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { + return std::make_unique(); + })); +} + +MockOptions::~MockOptions() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/options.h b/test/mocks/server/options.h new file mode 100644 index 000000000000..31a6112dca35 --- /dev/null +++ b/test/mocks/server/options.h @@ -0,0 +1,76 @@ +#pragma once + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/server/options.h" + +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" +#include "spdlog/spdlog.h" + +namespace Envoy { +namespace Server { +class MockOptions : public Options { +public: + MockOptions() : MockOptions(std::string()) {} + MockOptions(const std::string& config_path); + ~MockOptions() override; + + MOCK_METHOD(uint64_t, baseId, (), (const)); + MOCK_METHOD(bool, useDynamicBaseId, (), (const)); + MOCK_METHOD(const std::string&, baseIdPath, (), (const)); + MOCK_METHOD(uint32_t, concurrency, (), (const)); + MOCK_METHOD(const std::string&, configPath, (), (const)); + MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); + MOCK_METHOD(const std::string&, configYaml, (), (const)); + MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); + MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); + MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); + MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const)); + MOCK_METHOD(const std::string&, adminAddressPath, (), (const)); + MOCK_METHOD(Network::Address::IpVersion, localAddressIpVersion, (), (const)); + MOCK_METHOD(std::chrono::seconds, drainTime, (), (const)); + MOCK_METHOD(std::chrono::seconds, parentShutdownTime, (), (const)); + MOCK_METHOD(Server::DrainStrategy, drainStrategy, (), (const)); + MOCK_METHOD(spdlog::level::level_enum, logLevel, (), (const)); + MOCK_METHOD((const std::vector>&), + componentLogLevels, (), (const)); + MOCK_METHOD(const std::string&, logFormat, (), (const)); + MOCK_METHOD(bool, logFormatEscaped, (), (const)); + MOCK_METHOD(const std::string&, logPath, (), (const)); + MOCK_METHOD(uint64_t, restartEpoch, (), (const)); + MOCK_METHOD(std::chrono::milliseconds, fileFlushIntervalMsec, (), (const)); + MOCK_METHOD(Mode, mode, (), (const)); + MOCK_METHOD(const std::string&, serviceClusterName, (), (const)); + MOCK_METHOD(const std::string&, serviceNodeName, (), (const)); + MOCK_METHOD(const std::string&, serviceZone, (), (const)); + MOCK_METHOD(bool, hotRestartDisabled, (), (const)); + MOCK_METHOD(bool, signalHandlingEnabled, (), (const)); + MOCK_METHOD(bool, mutexTracingEnabled, (), (const)); + MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const)); + MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); + MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); + MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); + + std::string config_path_; + envoy::config::bootstrap::v3::Bootstrap config_proto_; + std::string config_yaml_; + absl::optional bootstrap_version_; + bool allow_unknown_static_fields_{}; + bool reject_unknown_dynamic_fields_{}; + bool ignore_unknown_dynamic_fields_{}; + std::string admin_address_path_; + std::string service_cluster_name_; + std::string service_node_name_; + std::string service_zone_name_; + spdlog::level::level_enum log_level_{spdlog::level::trace}; + std::string log_path_; + uint32_t concurrency_{1}; + uint64_t hot_restart_epoch_{}; + bool hot_restart_disabled_{}; + bool signal_handling_enabled_{true}; + bool mutex_tracing_enabled_{}; + bool cpuset_threads_enabled_{}; + std::vector disabled_extensions_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/overload_manager.cc b/test/mocks/server/overload_manager.cc new file mode 100644 index 000000000000..d105df80e690 --- /dev/null +++ b/test/mocks/server/overload_manager.cc @@ -0,0 +1,19 @@ +#include "overload_manager.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::ReturnRef; +MockOverloadManager::MockOverloadManager() { + ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_)); +} + +MockOverloadManager::~MockOverloadManager() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/overload_manager.h b/test/mocks/server/overload_manager.h new file mode 100644 index 000000000000..8ce63cef1b12 --- /dev/null +++ b/test/mocks/server/overload_manager.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include "envoy/server/overload_manager.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockOverloadManager : public OverloadManager { +public: + MockOverloadManager(); + ~MockOverloadManager() override; + + // OverloadManager + MOCK_METHOD(void, start, ()); + MOCK_METHOD(bool, registerForAction, + (const std::string& action, Event::Dispatcher& dispatcher, + OverloadActionCb callback)); + MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ()); + + ThreadLocalOverloadState overload_state_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/server_lifecycle_notifier.cc b/test/mocks/server/server_lifecycle_notifier.cc new file mode 100644 index 000000000000..1cd6cd9d3b5c --- /dev/null +++ b/test/mocks/server/server_lifecycle_notifier.cc @@ -0,0 +1,16 @@ +#include "server_lifecycle_notifier.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockServerLifecycleNotifier::MockServerLifecycleNotifier() = default; + +MockServerLifecycleNotifier::~MockServerLifecycleNotifier() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/server_lifecycle_notifier.h b/test/mocks/server/server_lifecycle_notifier.h new file mode 100644 index 000000000000..3442a194ac32 --- /dev/null +++ b/test/mocks/server/server_lifecycle_notifier.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/server/lifecycle_notifier.h" + +#include "gmock/gmock.h" +#include "spdlog/spdlog.h" + +namespace Envoy { +namespace Server { +class MockServerLifecycleNotifier : public ServerLifecycleNotifier { +public: + MockServerLifecycleNotifier(); + ~MockServerLifecycleNotifier() override; + + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, (Stage, StageCallback)); + MOCK_METHOD(ServerLifecycleNotifier::HandlePtr, registerCallback, + (Stage, StageCallbackWithCompletion)); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory.cc b/test/mocks/server/tracer_factory.cc new file mode 100644 index 000000000000..7dcaa39f7ae1 --- /dev/null +++ b/test/mocks/server/tracer_factory.cc @@ -0,0 +1,24 @@ +#include "tracer_factory.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::Invoke; + +MockTracerFactory::MockTracerFactory(const std::string& name) : name_(name) { + ON_CALL(*this, createEmptyConfigProto()).WillByDefault(Invoke([] { + return std::make_unique(); + })); +} + +MockTracerFactory::~MockTracerFactory() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory.h b/test/mocks/server/tracer_factory.h new file mode 100644 index 000000000000..e342116ee2d5 --- /dev/null +++ b/test/mocks/server/tracer_factory.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/protobuf/message_validator.h" +#include "envoy/server/tracer_config.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTracerFactory : public TracerFactory { +public: + explicit MockTracerFactory(const std::string& name); + ~MockTracerFactory() override; + + std::string name() const override { return name_; } + + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ()); + MOCK_METHOD(Tracing::HttpTracerSharedPtr, createHttpTracer, + (const Protobuf::Message& config, TracerFactoryContext& context)); + +private: + std::string name_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory_context.cc b/test/mocks/server/tracer_factory_context.cc new file mode 100644 index 000000000000..992a3854179f --- /dev/null +++ b/test/mocks/server/tracer_factory_context.cc @@ -0,0 +1,24 @@ +#include "tracer_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockTracerFactoryContext::MockTracerFactoryContext() { + ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); +} + +MockTracerFactoryContext::~MockTracerFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/tracer_factory_context.h b/test/mocks/server/tracer_factory_context.h new file mode 100644 index 000000000000..66a0212d0bb7 --- /dev/null +++ b/test/mocks/server/tracer_factory_context.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/server/configuration.h" + +#include "gmock/gmock.h" +#include "instance.h" +#include "tracer_factory.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTracerFactoryContext : public TracerFactoryContext { +public: + MockTracerFactoryContext(); + ~MockTracerFactoryContext() override; + + MOCK_METHOD(ServerFactoryContext&, serverFactoryContext, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + + testing::NiceMock server_factory_context_; +}; +} // namespace Configuration + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/transport_socket_factory_context.cc b/test/mocks/server/transport_socket_factory_context.cc new file mode 100644 index 000000000000..0e4e50231a7d --- /dev/null +++ b/test/mocks/server/transport_socket_factory_context.cc @@ -0,0 +1,26 @@ +#include "transport_socket_factory_context.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { +namespace Configuration { + +using ::testing::ReturnRef; + +MockTransportSocketFactoryContext::MockTransportSocketFactoryContext() + : secret_manager_(std::make_unique(config_tracker_)) { + ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); + ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, messageValidationVisitor()) + .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); +} + +MockTransportSocketFactoryContext::~MockTransportSocketFactoryContext() = default; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h new file mode 100644 index 000000000000..a86f2b348485 --- /dev/null +++ b/test/mocks/server/transport_socket_factory_context.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/server/transport_socket_config.h" + +#include "common/secret/secret_manager_impl.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/upstream/mocks.h" + +#include "config_tracker.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { +public: + MockTransportSocketFactoryContext(); + ~MockTransportSocketFactoryContext() override; + + Secret::SecretManager& secretManager() override { return *(secret_manager_.get()); } + + MOCK_METHOD(Server::Admin&, admin, ()); + MOCK_METHOD(Ssl::ContextManager&, sslContextManager, ()); + MOCK_METHOD(Stats::Scope&, scope, ()); + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); + MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Stats::Store&, stats, ()); + MOCK_METHOD(Init::Manager&, initManager, ()); + MOCK_METHOD(Singleton::Manager&, singletonManager, ()); + MOCK_METHOD(ThreadLocal::SlotAllocator&, threadLocal, ()); + MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); + MOCK_METHOD(Api::Api&, api, ()); + + testing::NiceMock cluster_manager_; + testing::NiceMock api_; + testing::NiceMock config_tracker_; + std::unique_ptr secret_manager_; +}; +} // namespace Configuration +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/watch_dog.cc b/test/mocks/server/watch_dog.cc new file mode 100644 index 000000000000..f07755963fe6 --- /dev/null +++ b/test/mocks/server/watch_dog.cc @@ -0,0 +1,16 @@ +#include "watch_dog.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockWatchDog::MockWatchDog() = default; + +MockWatchDog::~MockWatchDog() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/watch_dog.h b/test/mocks/server/watch_dog.h new file mode 100644 index 000000000000..105761781c36 --- /dev/null +++ b/test/mocks/server/watch_dog.h @@ -0,0 +1,21 @@ +#pragma once + +#include "envoy/server/watchdog.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockWatchDog : public WatchDog { +public: + MockWatchDog(); + ~MockWatchDog() override; + + // Server::WatchDog + MOCK_METHOD(void, startWatchdog, (Event::Dispatcher & dispatcher)); + MOCK_METHOD(void, touch, ()); + MOCK_METHOD(Thread::ThreadId, threadId, (), (const)); + MOCK_METHOD(MonotonicTime, lastTouchTime, (), (const)); +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker.cc b/test/mocks/server/worker.cc new file mode 100644 index 000000000000..a7e981b299b1 --- /dev/null +++ b/test/mocks/server/worker.cc @@ -0,0 +1,50 @@ +#include "worker.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +using ::testing::_; +using ::testing::Invoke; + +MockWorker::MockWorker() { + ON_CALL(*this, addListener(_, _, _)) + .WillByDefault( + Invoke([this](absl::optional overridden_listener, + Network::ListenerConfig& config, AddListenerCompletion completion) -> void { + UNREFERENCED_PARAMETER(overridden_listener); + config.listenSocketFactory().getListenSocket(); + EXPECT_EQ(nullptr, add_listener_completion_); + add_listener_completion_ = completion; + })); + + ON_CALL(*this, removeListener(_, _)) + .WillByDefault( + Invoke([this](Network::ListenerConfig&, std::function completion) -> void { + EXPECT_EQ(nullptr, remove_listener_completion_); + remove_listener_completion_ = completion; + })); + + ON_CALL(*this, stopListener(_, _)) + .WillByDefault(Invoke([](Network::ListenerConfig&, std::function completion) -> void { + if (completion != nullptr) { + completion(); + } + })); + + ON_CALL(*this, removeFilterChains(_, _, _)) + .WillByDefault(Invoke([this](uint64_t, const std::list&, + std::function completion) -> void { + EXPECT_EQ(nullptr, remove_filter_chains_completion_); + remove_filter_chains_completion_ = completion; + })); +} + +MockWorker::~MockWorker() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker.h b/test/mocks/server/worker.h new file mode 100644 index 000000000000..978ab3bbcc21 --- /dev/null +++ b/test/mocks/server/worker.h @@ -0,0 +1,54 @@ +#pragma once + +#include "envoy/server/worker.h" + +#include "absl/strings/string_view.h" +#include "gmock/gmock.h" + +namespace Envoy { +namespace Server { +class MockWorker : public Worker { +public: + MockWorker(); + ~MockWorker() override; + + void callAddCompletion(bool success) { + EXPECT_NE(nullptr, add_listener_completion_); + add_listener_completion_(success); + add_listener_completion_ = nullptr; + } + + void callRemovalCompletion() { + EXPECT_NE(nullptr, remove_listener_completion_); + remove_listener_completion_(); + remove_listener_completion_ = nullptr; + } + + void callDrainFilterChainsComplete() { + EXPECT_NE(nullptr, remove_filter_chains_completion_); + remove_filter_chains_completion_(); + remove_filter_chains_completion_ = nullptr; + } + + // Server::Worker + MOCK_METHOD(void, addListener, + (absl::optional overridden_listener, Network::ListenerConfig& listener, + AddListenerCompletion completion)); + MOCK_METHOD(uint64_t, numConnections, (), (const)); + MOCK_METHOD(void, removeListener, + (Network::ListenerConfig & listener, std::function completion)); + MOCK_METHOD(void, start, (GuardDog & guard_dog)); + MOCK_METHOD(void, initializeStats, (Stats::Scope & scope)); + MOCK_METHOD(void, stop, ()); + MOCK_METHOD(void, stopListener, + (Network::ListenerConfig & listener, std::function completion)); + MOCK_METHOD(void, removeFilterChains, + (uint64_t listener_tag, const std::list& filter_chains, + std::function completion)); + + AddListenerCompletion add_listener_completion_; + std::function remove_listener_completion_; + std::function remove_filter_chains_completion_; +}; +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker_factory.cc b/test/mocks/server/worker_factory.cc new file mode 100644 index 000000000000..65844ae8cc21 --- /dev/null +++ b/test/mocks/server/worker_factory.cc @@ -0,0 +1,14 @@ +#include "worker_factory.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Server { + +MockWorkerFactory::MockWorkerFactory() = default; + +MockWorkerFactory::~MockWorkerFactory() = default; + +} // namespace Server +} // namespace Envoy diff --git a/test/mocks/server/worker_factory.h b/test/mocks/server/worker_factory.h new file mode 100644 index 000000000000..3c05ed76566c --- /dev/null +++ b/test/mocks/server/worker_factory.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/server/worker.h" + +#include "gmock/gmock.h" +#include "worker.h" + +namespace Envoy { +namespace Server { +class MockWorkerFactory : public WorkerFactory { +public: + MockWorkerFactory(); + ~MockWorkerFactory() override; + + // Server::WorkerFactory + WorkerPtr createWorker(OverloadManager&, const std::string&) override { + return WorkerPtr{createWorker_()}; + } + + MOCK_METHOD(Worker*, createWorker_, ()); +}; +} // namespace Server +} // namespace Envoy From 7093555e1e2a4655093868aac0ff1a6f31678db9 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 6 Jul 2020 20:46:59 -0400 Subject: [PATCH 532/909] tools: simple path heuristics for delta proto_sync.py. (#11862) By ignoring .proto file that haven't changed in the local tree, we can skip performing proto format/sync for all paths whenever there is an API change. This should be a significant speed up for most developers making small API changes. This optimization can be skipped with FORCE_PROTO_FORMAT=yet in the environment. Risk level: Low (tooling only) Testing: Manual changes in my local tree. Speedy. Fixes #11365 Signed-off-by: Harvey Tuch --- tools/proto_format/proto_sync.py | 55 ++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index acbb8f091935..e3b7668913fb 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -95,7 +95,7 @@ def GetDestinationPath(src): matches[0])).joinpath(src_path.name.split('.')[0] + ".proto") -def GetAbsDestinationPath(dst_root, src): +def GetAbsRelDestinationPath(dst_root, src): """Obtain absolute path from a proto file path combined with destination root. Creates the parent directory if necessary. @@ -107,7 +107,7 @@ def GetAbsDestinationPath(dst_root, src): rel_dst_path = GetDestinationPath(src) dst = dst_root.joinpath(rel_dst_path) dst.parent.mkdir(0o755, parents=True, exist_ok=True) - return dst + return dst, rel_dst_path def ProtoPrint(src, dst): @@ -330,7 +330,50 @@ def GitStatus(path): return subprocess.check_output(['git', 'status', '--porcelain', str(path)]).decode() +def GitModifiedFiles(path, suffix): + """Obtain a list of modified files since the last commit merged by GitHub. + + Args: + path: path to examine. + suffix: path suffix to filter with. + Return: + A list of strings providing the paths of modified files in the repo. + """ + try: + modified_files = subprocess.check_output( + ['tools/git/modified_since_last_github_commit.sh', 'api', 'proto']).decode().split() + return modified_files + except subprocess.CalledProcessError as e: + if e.returncode == 1: + return [] + raise + + +# If we're not forcing format, i.e. FORCE_PROTO_FORMAT=yes, in the environment, +# then try and see if we can skip reformatting based on some simple path +# heuristics. This saves a ton of time, since proto format and sync is not +# running under Bazel and can't do change detection. +def ShouldSync(path, api_proto_modified_files, py_tools_modified_files): + if os.getenv('FORCE_PROTO_FORMAT') == 'yes': + return True + # If tools change, safest thing to do is rebuild everything. + if len(py_tools_modified_files) > 0: + return True + # Check to see if the basename of the file has been modified since the last + # GitHub commit. If so, rebuild. This is safe and conservative across package + # migrations in v3 and v4alpha; we could achieve a lower rate of false + # positives if we examined package migration annotations, at the expense of + # complexity. + for p in api_proto_modified_files: + if os.path.basename(p) in path: + return True + # Otherwise we can safely skip syncing. + return False + + def Sync(api_root, mode, labels, shadow): + api_proto_modified_files = GitModifiedFiles('api', 'proto') + py_tools_modified_files = GitModifiedFiles('tools', 'py') with tempfile.TemporaryDirectory() as tmp: dst_dir = pathlib.Path(tmp).joinpath("b") paths = [] @@ -343,7 +386,13 @@ def Sync(api_root, mode, labels, shadow): dst_src_paths = defaultdict(list) for path in paths: if os.stat(path).st_size > 0: - dst_src_paths[GetAbsDestinationPath(dst_dir, path)].append(path) + abs_dst_path, rel_dst_path = GetAbsRelDestinationPath(dst_dir, path) + if ShouldSync(path, api_proto_modified_files, py_tools_modified_files): + dst_src_paths[abs_dst_path].append(path) + else: + print('Skipping sync of %s' % path) + src_path = str(pathlib.Path(api_root, rel_dst_path)) + shutil.copy(src_path, abs_dst_path) with mp.Pool() as p: pkg_deps = p.map(SyncProtoFile, dst_src_paths.items()) SyncBuildFiles(mode, dst_dir) From 4d4c58505c73971f65d947c7eb7b1313d376e359 Mon Sep 17 00:00:00 2001 From: chaoqin-li1123 <55518381+chaoqin-li1123@users.noreply.github.com> Date: Mon, 6 Jul 2020 20:02:57 -0500 Subject: [PATCH 533/909] Improve time complexity of removal of callback handle. (#11751) Improve the time complexity of removal of a callback handle from O(N) to O(1) by storing a list iterator to itself inside the callback_holder. Currently, when a callback handle is removed from the list, the pointer of the handle is used for linear search to locate the callback holder and delete it. Since list.erase() cost constant time if we provide the iterator, and list iterator don't get invalidated in other list operations. We can store the iterator inside the callback holder when we initialize it and use it for constant time removal. Risk Level: Low Testing: Pass all test in /test repo. Fixes #11665 Signed-off-by: chaoqinli --- source/common/common/callback_impl.h | 18 +++++++++--------- test/integration/stats_integration_test.cc | 10 +++++++--- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/source/common/common/callback_impl.h b/source/common/common/callback_impl.h index 3e9b554df7e2..d1d3ebdbe496 100644 --- a/source/common/common/callback_impl.h +++ b/source/common/common/callback_impl.h @@ -24,6 +24,9 @@ template class CallbackManager { */ CallbackHandle* add(Callback callback) { callbacks_.emplace_back(*this, callback); + // get the list iterator of added callback handle, which will be used to remove itself from + // callbacks_ list. + callbacks_.back().it_ = (--callbacks_.end()); return &callbacks_.back(); } @@ -46,24 +49,21 @@ template class CallbackManager { CallbackHolder(CallbackManager& parent, Callback cb) : parent_(parent), cb_(cb) {} // CallbackHandle - void remove() override { parent_.remove(this); } + void remove() override { parent_.remove(it_); } CallbackManager& parent_; Callback cb_; + + // the iterator of this callback holder inside callbacks_ list + // upon removal, use this iterator to delete callback holder in O(1) + typename std::list::iterator it_; }; /** * Remove a member update callback added via add(). * @param handle supplies the callback handle to remove. */ - void remove(CallbackHandle* handle) { - ASSERT(std::find_if(callbacks_.begin(), callbacks_.end(), - [handle](const CallbackHolder& holder) -> bool { - return handle == &holder; - }) != callbacks_.end()); - callbacks_.remove_if( - [handle](const CallbackHolder& holder) -> bool { return handle == &holder; }); - } + void remove(typename std::list::iterator& it) { callbacks_.erase(it); } std::list callbacks_; }; diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 8cf76dc86608..f57afd00ed81 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -274,6 +274,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 44491 44811 Make upstreams pluggable // 2020/04/23 10661 44425 46000 per-listener connection limits + // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle + // in callback manager. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -287,7 +289,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44491); + EXPECT_MEMORY_EQ(m_per_cluster, 44715); EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -338,6 +340,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 36603 36923 Make upstreams pluggable // 2020/04/23 10661 36537 37000 per-listener connection limits + // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. + // in callback manager. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -351,8 +355,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36603); - EXPECT_MEMORY_LE(m_per_cluster, 37000); + EXPECT_MEMORY_EQ(m_per_cluster, 36827); + EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { From 1cd568c46b165885e9846c3b1308dc81cd4cc15a Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Tue, 7 Jul 2020 10:27:10 +0900 Subject: [PATCH 534/909] re2: bump 2020-07-06 (#11892) Signed-off-by: Shikugawa --- bazel/repository_locations.bzl | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 4ea42e9748a3..f27a09d7eefe 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -432,14 +432,11 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["dataplane"], cpe = "N/A", ), - # TODO(shikugawa): replace this with release tag after released package which includes - # disable pthread when build with emscripten. We use hash temporary to enable our changes to - # build envoy-wasm library with emscripten. https://github.com/google/re2/pull/263 com_googlesource_code_re2 = dict( - sha256 = "455bcacd2b94fca8897decd81172c5a93e5303ea0e5816b410877c51d6179ffb", - strip_prefix = "re2-2b25567a8ee3b6e97c3cd05d616f296756c52759", - # 2020-06-08 - urls = ["https://github.com/google/re2/archive/2b25567a8ee3b6e97c3cd05d616f296756c52759.tar.gz"], + sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", + strip_prefix = "re2-2020-07-06", + # 2020-07-06 + urls = ["https://github.com/google/re2/archive/2020-07-06.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From d087601732f4414b3ab6c39918c6264545a9114f Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Tue, 7 Jul 2020 03:38:38 +0200 Subject: [PATCH 535/909] build: Update spdlog to 1.6.1 (#11844) Signed-off-by: Michal Rostecki --- bazel/repository_locations.bzl | 6 +++--- source/extensions/filters/http/lua/lua_filter.cc | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f27a09d7eefe..b2501eca906f 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -154,9 +154,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_gabime_spdlog = dict( - sha256 = "afd18f62d1bc466c60bef088e6b637b0284be88c515cedc59ad4554150af6043", - strip_prefix = "spdlog-1.4.0", - urls = ["https://github.com/gabime/spdlog/archive/v1.4.0.tar.gz"], + sha256 = "378a040d91f787aec96d269b0c39189f58a6b852e4cbf9150ccfacbe85ebbbfc", + strip_prefix = "spdlog-1.6.1", + urls = ["https://github.com/gabime/spdlog/archive/v1.6.1.tar.gz"], use_category = ["observability"], cpe = "N/A", ), diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 399c61180a88..4f69c77bc9e1 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -715,6 +715,9 @@ void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { return; case spdlog::level::off: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + return; + case spdlog::level::n_levels: + NOT_REACHED_GCOVR_EXCL_LINE; } } From e05e121a3fd8324686bc00dc0c57efe1c4020657 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Tue, 7 Jul 2020 07:29:24 -0500 Subject: [PATCH 536/909] [fuzz] added fuzzer for network filter "ext_authz" (#11702) Added fuzz test code for network filter ext_authz. Added test input protobuf. Added two simple test cases. After running for one minute with libfuzzer: Line coverage for the .cc and .h files of ext_authz is 94.9 % Function coverage for the .cc and .h files of ext_authz is 81.2% Signed-off-by: jianwen --- .../filters/network/ext_authz/BUILD | 25 ++++ ...h-309531f09ce8c0c71f272c7145da9d5528c3e8fc | 23 ++++ ...h-72c994c40b30ff66b72f401055681e9851fea7a2 | 38 ++++++ .../network/ext_authz/ext_authz_fuzz.proto | 41 ++++++ .../network/ext_authz/ext_authz_fuzz_test.cc | 125 ++++++++++++++++++ 5 files changed, 252 insertions(+) create mode 100644 test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc create mode 100644 test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 create mode 100644 test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto create mode 100644 test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc diff --git a/test/extensions/filters/network/ext_authz/BUILD b/test/extensions/filters/network/ext_authz/BUILD index 6004f220d387..20e8b566ebae 100644 --- a/test/extensions/filters/network/ext_authz/BUILD +++ b/test/extensions/filters/network/ext_authz/BUILD @@ -1,6 +1,8 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -43,3 +45,26 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", ], ) + +envoy_proto_library( + name = "ext_authz_fuzz_proto", + srcs = ["ext_authz_fuzz.proto"], + deps = [ + "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg", + ], +) + +envoy_cc_fuzz_test( + name = "ext_authz_fuzz_test", + srcs = ["ext_authz_fuzz_test.cc"], + corpus = "ext_authz_corpus", + deps = [ + ":ext_authz_fuzz_proto_cc_proto", + "//source/common/buffer:buffer_lib", + "//source/extensions/filters/network/ext_authz", + "//test/extensions/filters/common/ext_authz:ext_authz_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc new file mode 100644 index 000000000000..840c6e068f8c --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc @@ -0,0 +1,23 @@ +config { + stat_prefix: "\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\361\261\261\261\321\261" + failure_mode_allow: true + include_peer_certificate: true +} +actions { + on_data { + result { + check_status_ok { + } + } + data: "123" + } +} +actions { + remote_close { + } +} +actions { + local_close { + } +} + diff --git a/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 new file mode 100644 index 000000000000..b20e1a96bece --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_corpus/crash-72c994c40b30ff66b72f401055681e9851fea7a2 @@ -0,0 +1,38 @@ +config { + stat_prefix: "envoy.extensions.filters.network.e" + failure_mode_allow: true +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377" + end_stream: true + result { + check_status_denied { + } + } + } +} +actions { + on_data { + data: "CCCCCCCCCCCC" + end_stream: true + result { + check_status_denied { + } + } + } +} +actions { + on_data { + data: "\000\000\000\000" + end_stream: true + result { + check_status_error { + } + } + } +} diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto new file mode 100644 index 000000000000..326311003f95 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package envoy.extensions.filters.network.ext_authz; + +import "envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto"; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; + +message Result { + oneof result_selector { + option (validate.required) = true; + // Authorization check status + google.protobuf.Empty check_status_error = 1; + google.protobuf.Empty check_status_denied = 2; + google.protobuf.Empty check_status_ok = 3; + } +} + +message OnData { + bytes data = 1; + bool end_stream = 2; + // optional: to set the default authorization check result for this and the following onData() + Result result = 3; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection(). + google.protobuf.Empty on_new_connection = 1; + // Call onData(). + OnData on_data = 2; + // Connection close + google.protobuf.Empty remote_close = 3; + google.protobuf.Empty local_close = 4; + } +} +message ExtAuthzTestCase { + envoy.extensions.filters.network.ext_authz.v3.ExtAuthz config = 1 + [(validate.rules).message = {required: true}]; + repeated Action actions = 2; +} \ No newline at end of file diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc new file mode 100644 index 000000000000..b70ca28272b8 --- /dev/null +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc @@ -0,0 +1,125 @@ +#include "envoy/extensions/filters/network/ext_authz/v3/ext_authz.pb.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" + +#include "extensions/filters/network/ext_authz/ext_authz.h" + +#include "test/extensions/filters/common/ext_authz/mocks.h" +#include "test/extensions/filters/network/ext_authz/ext_authz_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/runtime/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::ReturnRef; +using testing::WithArgs; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ExtAuthz { + +Filters::Common::ExtAuthz::ResponsePtr +makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus status) { + Filters::Common::ExtAuthz::ResponsePtr response = + std::make_unique(); + response->status = status; + return response; +} + +Filters::Common::ExtAuthz::CheckStatus resultCaseToCheckStatus( + envoy::extensions::filters::network::ext_authz::Result::ResultSelectorCase result_case) { + Filters::Common::ExtAuthz::CheckStatus check_status; + switch (result_case) { + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusOk: { + check_status = Filters::Common::ExtAuthz::CheckStatus::OK; + break; + } + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusError: { + check_status = Filters::Common::ExtAuthz::CheckStatus::Error; + break; + } + case envoy::extensions::filters::network::ext_authz::Result::kCheckStatusDenied: { + check_status = Filters::Common::ExtAuthz::CheckStatus::Denied; + break; + } + default: { + // Unhandled status + PANIC("A check status handle is missing"); + } + } + return check_status; +} + +DEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAuthzTestCase& input) { + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } catch (const ProtobufMessage::DeprecatedProtoFieldException& e) { + ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); + return; + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException during validation: {}", e.what()); + } + + Stats::TestUtil::TestStore stats_store; + Filters::Common::ExtAuthz::MockClient* client = new Filters::Common::ExtAuthz::MockClient(); + envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config = input.config(); + + ConfigSharedPtr config = std::make_shared(proto_config, stats_store); + std::unique_ptr filter = + std::make_unique(config, Filters::Common::ExtAuthz::ClientPtr{client}); + + NiceMock filter_callbacks; + filter->initializeReadFilterCallbacks(filter_callbacks); + static Network::Address::InstanceConstSharedPtr addr = + std::make_shared("/test/test.sock"); + + ON_CALL(filter_callbacks.connection_, remoteAddress()).WillByDefault(ReturnRef(addr)); + ON_CALL(filter_callbacks.connection_, localAddress()).WillByDefault(ReturnRef(addr)); + + for (const auto& action : input.actions()) { + switch (action.action_selector_case()) { + case envoy::extensions::filters::network::ext_authz::Action::kOnData: { + // Optional input field to set default authorization check result for the following "onData()" + if (action.on_data().has_result()) { + ON_CALL(*client, check(_, _, _, _)) + .WillByDefault(WithArgs<0>( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + callbacks.onComplete(makeAuthzResponse( + resultCaseToCheckStatus(action.on_data().result().result_selector_case()))); + }))); + } + Buffer::OwnedImpl buffer(action.on_data().data()); + filter->onData(buffer, action.on_data().end_stream()); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kOnNewConnection: { + filter->onNewConnection(); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kRemoteClose: { + filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); + break; + } + case envoy::extensions::filters::network::ext_authz::Action::kLocalClose: { + filter_callbacks.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + break; + } + default: { + // Unhandled actions + PANIC("A case is missing for an action"); + } + } + } +} + +} // namespace ExtAuthz +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file From edc6e68192cc72c340adc85aa4ea4f0c625e5240 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Tue, 7 Jul 2020 20:11:46 +0530 Subject: [PATCH 537/909] change eds_service_name method to align with method naming style (#11917) Signed-off-by: Rama Chavali --- include/envoy/upstream/upstream.h | 2 +- source/common/upstream/load_stats_reporter.cc | 4 ++-- source/common/upstream/upstream_impl.h | 2 +- source/server/admin/admin.cc | 4 ++-- test/common/upstream/upstream_impl_test.cc | 2 +- test/mocks/upstream/cluster_info.cc | 2 +- test/mocks/upstream/cluster_info.h | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 139eb8ebb3a3..70c9dd9755c8 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -899,7 +899,7 @@ class ClusterInfo { /** * @return eds cluster service_name of the cluster. */ - virtual absl::optional eds_service_name() const PURE; + virtual absl::optional edsServiceName() const PURE; /** * Create network filters on a new upstream connection. diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 2a9404219d37..87e3982e8db9 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -73,8 +73,8 @@ void LoadStatsReporter::sendLoadStatsRequest() { auto& cluster = it->second.get(); auto* cluster_stats = request_.add_cluster_stats(); cluster_stats->set_cluster_name(cluster_name); - if (cluster.info()->eds_service_name().has_value()) { - cluster_stats->set_cluster_service_name(cluster.info()->eds_service_name().value()); + if (cluster.info()->edsServiceName().has_value()) { + cluster_stats->set_cluster_service_name(cluster.info()->edsServiceName().value()); } for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { ENVOY_LOG(trace, "Load report locality count {}", host_set->hostsPerLocality().get().size()); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index e76565fe6598..5cdb994b3f41 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -599,7 +599,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable eds_service_name() const override { return eds_service_name_; } + absl::optional edsServiceName() const override { return eds_service_name_; } void createNetworkFilterChain(Network::Connection&) const override; Http::Protocol diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 20b08bc3100a..dd52e2807ffb 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -569,8 +569,8 @@ ProtobufTypes::MessagePtr AdminImpl::dumpEndpointConfigs() const { Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; - if (cluster_info->eds_service_name().has_value()) { - cluster_load_assignment.set_cluster_name(cluster_info->eds_service_name().value()); + if (cluster_info->edsServiceName().has_value()) { + cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName().value()); } else { cluster_load_assignment.set_cluster_name(cluster_info->name()); } diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 28c658c76b27..bb1521e59e64 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -2144,7 +2144,7 @@ TEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) { value: 0.3 )EOF"; auto cluster = makeCluster(yaml); - EXPECT_EQ(cluster->info()->eds_service_name(), "service_foo"); + EXPECT_EQ(cluster->info()->edsServiceName(), "service_foo"); const std::string unexpected_eds_config_yaml = R"EOF( name: name diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 3a2c1aa8af8d..168395895ec7 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -52,7 +52,7 @@ MockClusterInfo::MockClusterInfo() ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); - ON_CALL(*this, eds_service_name()).WillByDefault(ReturnPointee(&eds_service_name_)); + ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); ON_CALL(*this, http2Options()).WillByDefault(ReturnRef(http2_options_)); ON_CALL(*this, commonHttpProtocolOptions()) diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 9e210997d337..f8bbe8363a81 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -130,7 +130,7 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, warmHosts, (), (const)); MOCK_METHOD(const absl::optional&, upstreamHttpProtocolOptions, (), (const)); - MOCK_METHOD(absl::optional, eds_service_name, (), (const)); + MOCK_METHOD(absl::optional, edsServiceName, (), (const)); MOCK_METHOD(void, createNetworkFilterChain, (Network::Connection&), (const)); MOCK_METHOD(Http::Protocol, upstreamHttpProtocol, (absl::optional), (const)); From 8c1a2279e3b5908b27411bad3aacad3fc2dea221 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Tue, 7 Jul 2020 12:51:10 -0400 Subject: [PATCH 538/909] stats: remove no-longer-needed stats options. (#11740) --max-stats and --max-obj-name-len have been deprecated for some time; removing them finally. Additional Description: Risk Level: envoy startup scripts that might set these will need to remove these options, as Envoy will no longe accept them. Testing: //test/... Docs Changes: will be added. Release Notes: will be added. Fixes: #10111 Signed-off-by: Joshua Marantz --- source/server/options_impl.cc | 6 ------ source/server/options_impl.h | 2 +- test/server/options_impl_test.cc | 9 ++++++++- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index b5608b346634..78dde475bc9c 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -142,12 +142,6 @@ OptionsImpl::OptionsImpl(std::vector args, "One of 'serve' (default; validate configs and then serve " "traffic normally) or 'validate' (validate configs and exit).", false, "serve", "string", cmd); - TCLAP::ValueArg max_stats("", "max-stats", - "Deprecated and unused; please do not specify.", false, 123, - "uint64_t", cmd); - TCLAP::ValueArg max_obj_name_len("", "max-obj-name-len", - "Deprecated and unused; please do not specify.", false, - 123, "uint64_t", cmd); TCLAP::SwitchArg disable_hot_restart("", "disable-hot-restart", "Disable hot restart functionality", cmd, false); TCLAP::SwitchArg enable_mutex_tracing( diff --git a/source/server/options_impl.h b/source/server/options_impl.h index d609986926ec..bb8fd78eaadd 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -20,7 +20,7 @@ namespace Envoy { class OptionsImpl : public Server::Options, protected Logger::Loggable { public: /** - * Parameters are max_stat_name_len, hot_restart_enabled + * Parameters are hot_restart_enabled */ using HotRestartVersionCb = std::function; diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 83247306bc3e..e2a52fafb781 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -263,6 +263,7 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions(); // Failure of this condition indicates that the server_info proto is not in sync with the options. // If an option is added/removed, please update server_info proto as well to keep it in sync. + // Currently the following 7 options are not defined in proto, hence the count differs by 7. // 1. version - default TCLAP argument. // 2. help - default TCLAP argument. @@ -271,7 +272,13 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // 5. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation. // 6. hot restart version - print the hot restart version and exit. // 7. log-format-prefix-with-location - short-term override for rollout of dynamic log format. - EXPECT_EQ(options->count() - 7, command_line_options->GetDescriptor()->field_count()); + const uint32_t options_not_in_proto = 7; + + // There are two deprecated options: "max_stats" and "max_obj_name_len". + const uint32_t deprecated_options = 2; + + EXPECT_EQ(options->count() - options_not_in_proto, + command_line_options->GetDescriptor()->field_count() - deprecated_options); } TEST_F(OptionsImplTest, OptionsFromArgv) { From 0724ca46a35feade705bc7a6e883c5234faf68e1 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 7 Jul 2020 10:15:52 -0700 Subject: [PATCH 539/909] coverage: keep trace logs (#11861) Signed-off-by: Lizan Zhou --- .bazelrc | 1 - ci/build_setup.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.bazelrc b/.bazelrc index 5f2ce2aa0f4f..3079386b6981 100644 --- a/.bazelrc +++ b/.bazelrc @@ -129,7 +129,6 @@ build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" -coverage:test-coverage --test_arg="--log-path /dev/null" coverage:test-coverage --test_arg="-l trace" coverage:fuzz-coverage --config=asan-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh diff --git a/ci/build_setup.sh b/ci/build_setup.sh index fcf9ae633fe4..ee60c484ca4e 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -83,7 +83,7 @@ trap cleanup EXIT export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" "$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}" -[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results --test_output=all" +[[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results" export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" export BAZEL_BUILD_OPTIONS="--verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ From 7a1f2bca8c6eed217f1e914695ea29985b3f860f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 7 Jul 2020 13:54:21 -0400 Subject: [PATCH 540/909] tls: improve wildcard matching (#11921) Patching in 11885 with runtime guards and release notes Risk Level: Medium (changes to cert matching) Testing: new unit test Docs Changes: n/a Release Notes: inline Runtime guard: envoy.reloadable_features.fix_wildcard_matching Co-authored-by: Yann Soubeyrand yann.soubeyrand@camptocamp.com Signed-off-by: Alyssa Wilk alyssar@chromium.org Signed-off-by: Yann Soubeyrand yann.soubeyrand@camptocamp.com Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 3 +- source/common/runtime/runtime_features.cc | 1 + source/extensions/transport_sockets/tls/BUILD | 1 + .../transport_sockets/tls/context_impl.cc | 8 ++- test/extensions/transport_sockets/tls/BUILD | 1 + .../tls/context_impl_test.cc | 63 +++++++++++++++++++ 6 files changed, 75 insertions(+), 2 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 4fe27e834363..7e162974f931 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -10,6 +10,7 @@ Incompatible Behavior Changes * client_ssl_auth: the `auth_ip_white_list` stat has been renamed to :ref:`auth_ip_allowlist `. * router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. Minor Behavior Changes ---------------------- @@ -162,4 +163,4 @@ Deprecated in :ref:`predicates `. * File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. * A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. -* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. \ No newline at end of file +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 25d5f70e6c56..ab28cf83f59b 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -68,6 +68,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.enable_dns_cache_circuit_breakers", "envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher", "envoy.reloadable_features.fix_upgrade_response", + "envoy.reloadable_features.fix_wildcard_matching", "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index f885d1c6bfa9..b26ce0cc4d14 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -109,6 +109,7 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_features_lib", "//source/common/stats:symbol_table_lib", "//source/common/stats:utility_lib", "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index ff8021f72558..f42f9077fc42 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -19,6 +19,7 @@ #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" #include "common/stats/utility.h" #include "extensions/transport_sockets/tls/utility.h" @@ -710,7 +711,12 @@ bool ContextImpl::dnsNameMatch(const std::string& dns_name, const char* pattern) if (pattern_len > 1 && pattern[0] == '*' && pattern[1] == '.') { if (dns_name.length() > pattern_len - 1) { const size_t off = dns_name.length() - pattern_len + 1; - return dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_wildcard_matching")) { + return dns_name.substr(0, off).find('.') == std::string::npos && + dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + } else { + return dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + } } } diff --git a/test/extensions/transport_sockets/tls/BUILD b/test/extensions/transport_sockets/tls/BUILD index 2e2586354fb9..595da8e44483 100644 --- a/test/extensions/transport_sockets/tls/BUILD +++ b/test/extensions/transport_sockets/tls/BUILD @@ -85,6 +85,7 @@ envoy_cc_test( "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 468e22f0bd26..b39d93c5096c 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -24,6 +24,7 @@ #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -48,6 +49,23 @@ class SslContextImplTest : public SslCertsTest { TEST_F(SslContextImplTest, TestDnsNameMatching) { EXPECT_TRUE(ContextImpl::dnsNameMatch("lyft.com", "lyft.com")); EXPECT_TRUE(ContextImpl::dnsNameMatch("a.lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("foo.test.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("alyft.com", "*.lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("alyft.com", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("", "*lyft.com")); + EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "")); +} + +TEST_F(SslContextImplTest, TestDnsNameMatchingLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + EXPECT_TRUE(ContextImpl::dnsNameMatch("lyft.com", "lyft.com")); + EXPECT_TRUE(ContextImpl::dnsNameMatch("a.lyft.com", "*.lyft.com")); + // Legacy behavior EXPECT_TRUE(ContextImpl::dnsNameMatch("a.b.lyft.com", "*.lyft.com")); EXPECT_FALSE(ContextImpl::dnsNameMatch("foo.test.com", "*.lyft.com")); EXPECT_FALSE(ContextImpl::dnsNameMatch("lyft.com", "*.lyft.com")); @@ -87,6 +105,32 @@ TEST_F(SslContextImplTest, TestMatchSubjectAltNameWildcardDNSMatched) { EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } +TEST_F(SslContextImplTest, TestMultiLevelMatch) { + // san_multiple_dns_cert matches *.example.com + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("foo.api.example.com"); + std::vector subject_alt_name_matchers; + subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + EXPECT_FALSE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); +} + +TEST_F(SslContextImplTest, TestMultiLevelMatchLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + envoy::type::matcher::v3::StringMatcher matcher; + matcher.set_exact("foo.api.example.com"); + std::vector subject_alt_name_matchers; + subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); + EXPECT_TRUE(ContextImpl::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); +} + TEST_F(SslContextImplTest, TestVerifySubjectAltNameURIMatched) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); @@ -95,6 +139,25 @@ TEST_F(SslContextImplTest, TestVerifySubjectAltNameURIMatched) { EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); } +TEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomain) { + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + std::vector verify_subject_alt_name_list = {"https://a.www.example.com"}; + EXPECT_FALSE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); +} + +TEST_F(SslContextImplTest, TestVerifySubjectAltMultiDomainLegacy) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.fix_wildcard_matching", "false"}}); + bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( + "{{ test_rundir " + "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); + std::vector verify_subject_alt_name_list = {"https://a.www.example.com"}; + EXPECT_TRUE(ContextImpl::verifySubjectAltName(cert.get(), verify_subject_alt_name_list)); +} + TEST_F(SslContextImplTest, TestMatchSubjectAltNameURIMatched) { bssl::UniquePtr cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); From 5a84467ec13955aba248f0c67cac8488dea85190 Mon Sep 17 00:00:00 2001 From: Kevin Pullin Date: Tue, 7 Jul 2020 11:11:25 -0700 Subject: [PATCH 541/909] Dubbo docs - Fix statistics root value (#11907) Update the dubbo doc to specify the statistics root as `dubbo.` instead of `redis.` (https://github.com/envoyproxy/envoy/blob/5248a4fb7d4c2a3d1fa151f944d3a63f6b7a06cf/source/extensions/filters/network/dubbo_proxy/config.cc#L101) Signed-off-by: Kevin Pullin --- .../listeners/network_filters/dubbo_proxy_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst index 02ae9a74ac0f..fd8c449cc0e6 100644 --- a/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/dubbo_proxy_filter.rst @@ -17,7 +17,7 @@ and parameter value for routing. Statistics ---------- -Every configured dubbo proxy filter has statistics rooted at *redis..* with the +Every configured dubbo proxy filter has statistics rooted at *dubbo..* with the following statistics: .. csv-table:: From c30ac5ea7abd212748acd789235974b6e5d2524d Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Tue, 7 Jul 2020 11:53:02 -0700 Subject: [PATCH 542/909] jwt_authn: fix a header name typo (#11911) Signed-off-by: Wayne Zhang --- .../filters/http/jwt_authn/filter.cc | 2 +- .../filters/http/jwt_authn/filter_test.cc | 27 +++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index 753c7d511a0b..65bc2b9a2896 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -19,7 +19,7 @@ namespace { Http::RegisterCustomInlineHeader access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); Http::RegisterCustomInlineHeader - origin_handle(Http::CustomHeaders::get().AccessControlRequestMethod); + origin_handle(Http::CustomHeaders::get().Origin); bool isCorsPreflightRequest(const Http::RequestHeaderMap& headers) { return headers.getMethodValue() == Http::Headers::get().MethodValues.Options && diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 9881cd25bab2..b3a97f90c92a 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -110,6 +110,33 @@ TEST_F(FilterTest, CorsPreflight) { EXPECT_EQ(0U, mock_config_->stats().denied_.value()); } +TEST_F(FilterTest, CorsPreflightMssingOrigin) { + auto headers = Http::TestRequestHeaderMapImpl{ + {":method", "OPTIONS"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"access-control-request-method", "GET"}, + }; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, mock_config_->stats().allowed_.value()); + // Should not be bypassed by cors_preflight since missing origin. + EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value()); + EXPECT_EQ(0U, mock_config_->stats().denied_.value()); +} + +TEST_F(FilterTest, CorsPreflightMssingAccessControlRequestMethod) { + auto headers = Http::TestRequestHeaderMapImpl{ + {":method", "OPTIONS"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}, + {"origin", "test-origin"}, + }; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_EQ(1U, mock_config_->stats().allowed_.value()); + // Should not be bypassed by cors_preflight since missing access-control-request-method. + EXPECT_EQ(0U, mock_config_->stats().cors_preflight_bypassed_.value()); + EXPECT_EQ(0U, mock_config_->stats().denied_.value()); +} + // This test verifies the setPayload call is handled correctly TEST_F(FilterTest, TestSetPayloadCall) { setupMockConfig(); From 2309f9bc2d47fde8e0c7db742f56e68beff8031c Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Wed, 8 Jul 2020 02:18:52 +0700 Subject: [PATCH 543/909] config: Honor transport_api_version for non-ADS xDS services (#11788) This patch makes sure the chosen endpoint is controlled by the specified transport_api_version. For transport_api_version that equals envoy::config::core::v3::ApiVersion::AUTO, the endpoint will be defaulted to envoy::config::core::v3::ApiVersion::V2. Risk Level: Low Testing: Unit. Docs Changes: N/A Release Notes: N/A (no behavior change) Fix #10650 Signed-off-by: Dhi Aurrahman --- .../config/subscription_factory_impl.cc | 13 +- source/common/config/type_to_endpoint.cc | 286 ++++++++++++++---- source/common/config/type_to_endpoint.h | 12 +- test/common/config/type_to_endpoint_test.cc | 84 ++++- 4 files changed, 312 insertions(+), 83 deletions(-) diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index fee65c2a1d4e..d8448f30a6d0 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -59,7 +59,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( return std::make_unique( local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, random_, Utility::apiConfigSourceRefreshDelay(api_config_source), - Utility::apiConfigSourceRequestTimeout(api_config_source), restMethod(type_url), type_url, + Utility::apiConfigSourceRequestTimeout(api_config_source), + restMethod(type_url, api_config_source.transport_api_version()), type_url, api_config_source.transport_api_version(), callbacks, resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), validation_visitor_); case envoy::config::core::v3::ApiConfigSource::GRPC: @@ -69,8 +70,9 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->create(), - dispatcher_, sotwGrpcMethod(type_url), api_config_source.transport_api_version(), - random_, scope, Utility::parseRateLimitSettings(api_config_source), + dispatcher_, sotwGrpcMethod(type_url, api_config_source.transport_api_version()), + api_config_source.transport_api_version(), random_, scope, + Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), @@ -81,8 +83,9 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->create(), - dispatcher_, deltaGrpcMethod(type_url), api_config_source.transport_api_version(), - random_, scope, Utility::parseRateLimitSettings(api_config_source), local_info_), + dispatcher_, deltaGrpcMethod(type_url, api_config_source.transport_api_version()), + api_config_source.transport_api_version(), random_, scope, + Utility::parseRateLimitSettings(api_config_source), local_info_), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), false); } diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 7fd3dd55ec2c..d7434aaa01f8 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -6,96 +6,252 @@ // API_NO_BOOST_FILE +#define SERVICE_VERSION_INFO(v2, v3) \ + createServiceVersionInfoMap(v2, {v2, v3}), createServiceVersionInfoMap(v3, {v2, v3}) + namespace Envoy { namespace Config { namespace { -// service RPC method fully qualified names. -struct Service { - std::string sotw_grpc_method_; - std::string delta_grpc_method_; - std::string rest_method_; +// A service's name, e.g. "envoy.api.v2.RouteDiscoveryService", +// "envoy.service.route.v3.RouteDiscoveryService". +using ServiceName = std::string; + +struct ServiceVersionInfo { + // This hold a name for each transport_api_version, for example for + // "envoy.api.v2.RouteDiscoveryService": + // { + // "V2": "envoy.api.v2.RouteDiscoveryService", + // "V3": "envoy.service.route.v3.RouteDiscoveryService" + // } + absl::flat_hash_map names_; +}; + +// A ServiceVersionInfoMap holds a service's transport_api_version and possible names for each +// available transport_api_version. For examples: +// +// Given "envoy.api.v2.RouteDiscoveryService" as the service name: +// { +// "envoy.api.v2.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +// +// And for "envoy.service.route.v3.RouteDiscoveryService": +// { +// "envoy.service.route.v3.RouteDiscoveryService": +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +using ServiceVersionInfoMap = absl::flat_hash_map; + +// This creates a ServiceVersionInfoMap, with service name (For example: +// "envoy.api.v2.RouteDiscoveryService") as the key. +ServiceVersionInfoMap +createServiceVersionInfoMap(absl::string_view service_name, + const std::array& versioned_service_names) { + const auto key = static_cast(service_name); + return ServiceVersionInfoMap{{ + // ServiceName as the key. + key, + + // ServiceVersionInfo as the value. + ServiceVersionInfo{{ + {envoy::config::core::v3::ApiVersion::V2, versioned_service_names[0]}, + {envoy::config::core::v3::ApiVersion::V3, versioned_service_names[1]}, + }}, + }}; +} + +// A resource type URL. For example: "type.googleapis.com/envoy.api.v2.RouteConfiguration". +using TypeUrl = std::string; + +TypeUrl getResourceTypeUrl(absl::string_view service_name) { + const auto* service_desc = Protobuf::DescriptorPool::generated_pool()->FindServiceByName( + static_cast(service_name)); + ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); + ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); + + return Grpc::Common::typeUrl( + service_desc->options().GetExtension(envoy::annotations::resource).type()); +} + +// A method name, e.g. "envoy.api.v2.RouteDiscoveryService.StreamRoutes". +using MethodName = std::string; + +struct VersionedDiscoveryType { + // A map of transport_api_version to discovery service RPC method fully qualified names. e.g. + // { + // "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", + // "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" + // } + absl::flat_hash_map methods_; +}; + +// This holds versioned discovery types. +struct VersionedService { + VersionedDiscoveryType sotw_grpc_; + VersionedDiscoveryType delta_grpc_; + VersionedDiscoveryType rest_; }; -// Map from resource type URL to service RPC methods. -using TypeUrlToServiceMap = std::unordered_map; +using TypeUrlToVersionedServiceMap = absl::flat_hash_map; + +// buildTypeUrlToServiceMap() builds a reverse map from a resource type URLs to a versioned service +// (by transport_api_version). +// +// The way we build it is by firstly constructing a list of ServiceVersionInfoMap: +// [ +// { +// "envoy.api.v2.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// }, +// { +// "envoy.service.route.v3.RouteDiscoveryService": { +// "names_": { +// "V2": "envoy.api.v2.RouteDiscoveryService", +// "V3": "envoy.service.route.v3.RouteDiscoveryService" +// } +// } +// } +// ... +// ] +// +// Then we convert it into the following map, with the inferred resource type URL as the key: +// +// { +// "type.googleapis.com/envoy.api.v2.RouteConfiguration": { +// "sotw_grpc_": { +// "methods_": { +// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", +// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" +// } +// }, +// ... +// }, +// "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { +// "sotw_grpc_": { +// "methods_": { +// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", +// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" +// } +// }, +// ... +// } +// } +// +TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { + auto* type_url_to_versioned_service_map = new TypeUrlToVersionedServiceMap(); -TypeUrlToServiceMap* buildTypeUrlToServiceMap() { - auto* type_url_to_service_map = new TypeUrlToServiceMap(); // This happens once in the lifetime of Envoy. We build a reverse map from resource type URL to - // service methods. We explicitly enumerate all services, since DescriptorPool doesn't support - // iterating over all descriptors, due its lazy load design, see - // https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html. - for (const std::string& service_name : { - "envoy.api.v2.RouteDiscoveryService", - "envoy.service.route.v3.RouteDiscoveryService", - "envoy.api.v2.ScopedRoutesDiscoveryService", - "envoy.service.route.v3.ScopedRoutesDiscoveryService", - "envoy.api.v2.VirtualHostDiscoveryService", - "envoy.service.route.v3.VirtualHostDiscoveryService", - "envoy.service.discovery.v2.SecretDiscoveryService", - "envoy.service.secret.v3.SecretDiscoveryService", - "envoy.api.v2.ClusterDiscoveryService", - "envoy.service.cluster.v3.ClusterDiscoveryService", - "envoy.api.v2.EndpointDiscoveryService", - "envoy.service.endpoint.v3.EndpointDiscoveryService", - "envoy.api.v2.ListenerDiscoveryService", - "envoy.service.listener.v3.ListenerDiscoveryService", - "envoy.service.discovery.v2.RuntimeDiscoveryService", - "envoy.service.runtime.v3.RuntimeDiscoveryService", + // service methods (versioned by transport_api_version). We explicitly enumerate all services, + // since DescriptorPool doesn't support iterating over all descriptors, due its lazy load design, + // see https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html. + for (const ServiceVersionInfoMap& registered : { + SERVICE_VERSION_INFO("envoy.api.v2.RouteDiscoveryService", + "envoy.service.route.v3.RouteDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.VirtualHostDiscoveryService", + "envoy.service.route.v3.VirtualHostDiscoveryService"), + SERVICE_VERSION_INFO("envoy.service.discovery.v2.SecretDiscoveryService", + "envoy.service.secret.v3.SecretDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ClusterDiscoveryService", + "envoy.service.cluster.v3.ClusterDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.EndpointDiscoveryService", + "envoy.service.endpoint.v3.EndpointDiscoveryService"), + SERVICE_VERSION_INFO("envoy.api.v2.ListenerDiscoveryService", + "envoy.service.listener.v3.ListenerDiscoveryService"), + SERVICE_VERSION_INFO("envoy.service.discovery.v2.RuntimeDiscoveryService", + "envoy.service.runtime.v3.RuntimeDiscoveryService"), }) { - const auto* service_desc = - Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); - // TODO(htuch): this should become an ASSERT once all v3 descriptors are linked in. - ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); - ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); - const std::string resource_type_url = Grpc::Common::typeUrl( - service_desc->options().GetExtension(envoy::annotations::resource).type()); - Service& service = (*type_url_to_service_map)[resource_type_url]; - // We populate the service methods that are known below, but it's possible that some services - // don't implement all, e.g. VHDS doesn't support SotW or REST. - for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { - const auto& method_desc = *service_desc->method(method_index); - if (absl::StartsWith(method_desc.name(), "Stream")) { - service.sotw_grpc_method_ = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Delta")) { - service.delta_grpc_method_ = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Fetch")) { - service.rest_method_ = method_desc.full_name(); - } else { - ASSERT(false, "Unknown xDS service method"); + for (const auto& registered_service : registered) { + const TypeUrl resource_type_url = getResourceTypeUrl(registered_service.first); + VersionedService& service = (*type_url_to_versioned_service_map)[resource_type_url]; + + for (const auto& versioned_service_name : registered_service.second.names_) { + const ServiceName& service_name = versioned_service_name.second; + const auto* service_desc = + Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); + ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); + ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); + + // We populate the service methods that are known below, but it's possible that some + // services don't implement all, e.g. VHDS doesn't support SotW or REST. + for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { + const auto& method_desc = *service_desc->method(method_index); + const auto transport_api_version = versioned_service_name.first; + if (absl::StartsWith(method_desc.name(), "Stream")) { + service.sotw_grpc_.methods_[transport_api_version] = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Delta")) { + service.delta_grpc_.methods_[transport_api_version] = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Fetch")) { + service.rest_.methods_[transport_api_version] = method_desc.full_name(); + } else { + ASSERT(false, "Unknown xDS service method"); + } + } } } } - return type_url_to_service_map; + return type_url_to_versioned_service_map; } -TypeUrlToServiceMap& typeUrlToServiceMap() { - static TypeUrlToServiceMap* type_url_to_service_map = buildTypeUrlToServiceMap(); - return *type_url_to_service_map; +TypeUrlToVersionedServiceMap& typeUrlToVersionedServiceMap() { + static TypeUrlToVersionedServiceMap* type_url_to_versioned_service_map = + buildTypeUrlToServiceMap(); + return *type_url_to_versioned_service_map; +} + +envoy::config::core::v3::ApiVersion +effectiveTransportApiVersion(envoy::config::core::v3::ApiVersion transport_api_version) { + // By default (when the transport_api_version is "AUTO"), the effective transport_api_version is + // envoy::config::core::v3::ApiVersion::V2. + if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO) { + return envoy::config::core::v3::ApiVersion::V2; + } + return transport_api_version; } } // namespace -const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); +const Protobuf::MethodDescriptor& +deltaGrpcMethod(absl::string_view type_url, + envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.delta_grpc_method_); + it->second.delta_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); } -const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); +const Protobuf::MethodDescriptor& +sotwGrpcMethod(absl::string_view type_url, + envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.sotw_grpc_method_); + it->second.sotw_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); } -const Protobuf::MethodDescriptor& restMethod(absl::string_view type_url) { - const auto it = typeUrlToServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToServiceMap().cend()); - return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.rest_method_); +const Protobuf::MethodDescriptor& +restMethod(absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version) { + const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToVersionedServiceMap().cend()); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + it->second.rest_.methods_[effectiveTransportApiVersion(transport_api_version)]); } } // namespace Config diff --git a/source/common/config/type_to_endpoint.h b/source/common/config/type_to_endpoint.h index 0f06d2007977..ed9f9e6e5c89 100644 --- a/source/common/config/type_to_endpoint.h +++ b/source/common/config/type_to_endpoint.h @@ -10,12 +10,18 @@ namespace Envoy { namespace Config { // Translates an xDS resource type_url to the name of the delta gRPC service that carries it. -const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +deltaGrpcMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); // Translates an xDS resource type_url to the name of the state-of-the-world gRPC service that // carries it. -const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +sotwGrpcMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); // Translates an xDS resource type_url to the name of the REST service that carries it. -const Protobuf::MethodDescriptor& restMethod(absl::string_view resource_type_url); +const Protobuf::MethodDescriptor& +restMethod(absl::string_view resource_type_url, + envoy::config::core::v3::ApiVersion transport_api_version); } // namespace Config } // namespace Envoy diff --git a/test/common/config/type_to_endpoint_test.cc b/test/common/config/type_to_endpoint_test.cc index 353580f17244..f163d832218c 100644 --- a/test/common/config/type_to_endpoint_test.cc +++ b/test/common/config/type_to_endpoint_test.cc @@ -16,23 +16,87 @@ TEST(TypeToEndpoint, All) { // The dummy messages are included for link purposes only. envoy::api::v2::RdsDummy _v2_rds_dummy; envoy::service::route::v3::RdsDummy _v3_rds_dummy; + // Delta gRPC endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); - EXPECT_EQ( - "envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + // SotW gRPC endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); - EXPECT_EQ( - "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + // REST endpoints. EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration").full_name()); + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); + EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); + + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::AUTO) + .full_name()); + EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V2) + .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration").full_name()); + restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + envoy::config::core::v3::ApiVersion::V3) + .full_name()); } } // namespace From 50ef0945fa2c5da4bff7627c3abf41fdd3b7cffd Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 7 Jul 2020 15:20:08 -0400 Subject: [PATCH 544/909] release: cutting 1.15 (#11898) Risk Level: n/a Testing: n/a Docs Changes: yes Release Notes: exactly Signed-off-by: Alyssa Wilk --- VERSION | 2 +- docs/root/version_history/current.rst | 62 +++++++++++++-------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/VERSION b/VERSION index 9a4866bbcede..141f2e805beb 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.15.0-dev +1.15.0 diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7e162974f931..94ac2469a69d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,5 +1,5 @@ -1.15.0 (Pending) -================ +1.15.0 (July 6, 2020) +===================== Incompatible Behavior Changes @@ -9,6 +9,7 @@ Incompatible Behavior Changes * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * client_ssl_auth: the `auth_ip_white_list` stat has been renamed to :ref:`auth_ip_allowlist `. +* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). * router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. * tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. @@ -17,8 +18,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. -* build: run as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. -* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). +* build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. * health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. * hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). * http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. @@ -30,8 +30,8 @@ Minor Behavior Changes * http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. * http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. * listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. -* router: allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. -* router: allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. +* router: extended to allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. +* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. Bug Fixes --------- @@ -41,7 +41,7 @@ Bug Fixes limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the start of the new minRTT window. * buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* grpc-json: fix a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. +* grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. * http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. * http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. * http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` @@ -50,7 +50,7 @@ Bug Fixes * http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. * listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. * overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. -* prometheus stats: fix the sort order of output lines to comply with the standard. +* prometheus stats: fixed the sort order of output lines to comply with the standard. * udp: the :ref:`reuse_port ` listener option must now be specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a bug fix. @@ -60,31 +60,31 @@ Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` -* http: remove legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and +* http: removed legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and `envoy.reloadable_features.new_http2_connection_pool_behavior`. New Features ------------ +* access loggers: added file access logger config :ref:`log_format `. * access loggers: added GRPC_STATUS operator on logging format. -* access loggers: extened specifier for FilterStateFormatter to output :ref:`unstructured log string `. -* access loggers: file access logger config added :ref:`log_format `. -* access loggers: gRPC access logger config added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. * admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. -* aggregate cluster: make route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. * build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. * build: official released binary is now built with Clang 10.0.0. * cluster: added an extension point for configurable :ref:`upstreams `. -* compressor: generic :ref:`compressor ` filter exposed to users. +* compressor: exposed generic :ref:`compressor ` filter to users. * config: added :ref:`identifier ` stat that reflects control plane identifier. * config: added :ref:`version_text ` stat that reflects xDS version. -* decompressor: generic :ref:`decompressor ` filter exposed to users. +* decompressor: exposed generic :ref:`decompressor ` filter to users. * dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. * dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. * dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. -* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows to force deny for protected path while filter gets disabled, by setting this key to true. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. * ext_authz filter: added API version field for both :ref:`HTTP ` and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. * ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. @@ -92,7 +92,7 @@ New Features are applied to using :ref:`HTTP headers ` to the HTTP fault filter. * fault: added support for specifying grpc_status code in abort faults using :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. -* filter: add `upstram_rq_time` stats to the GPRC stats filter. +* filter: added `upstram_rq_time` stats to the GPRC stats filter. Disabled by default and can be enabled via :ref:`enable_upstream_stats `. * grpc: added support for Google gRPC :ref:`custom channel arguments `. * grpc-json: added support for streaming response using @@ -101,7 +101,7 @@ New Features * gzip filter: added option to set zlib's next output buffer size. * hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. * header to metadata: added support for regex substitutions on header values. -* health checks: allow configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. * http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. * http: added :ref:`stripping port from host header ` support. * http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. @@ -116,12 +116,12 @@ New Features * lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. * lua: added :ref:`per route config ` for Lua filter. * lua: added tracing to the ``httpCall()`` API. -* metrics service: added added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * network filters: added a :ref:`postgres proxy filter `. * network filters: added a :ref:`rocketmq proxy filter `. -* performance: stats symbol table implementation (enabled by default; to disable it, add - `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy). -* ratelimit: add support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. +* performance: enabled stats symbol table implementation by default. To disable it, add + `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy. +* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. * ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. * ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. * redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. @@ -129,19 +129,19 @@ New Features * request_id: added to :ref:`always_set_request_id_in_response setting ` to set :ref:`x-request-id ` header in response even if tracing is not forced. -* router: add regex substitution support for header based hashing. -* router: add support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters +* router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy + ` field. +* router: added regex substitution support for header based hashing. +* router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters `. * router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. -* router: more fine grained internal redirect configs are added to the :ref:`internal_redirect_policy - ` field. -* runtime: add new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. -* server: add the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). +* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. +* server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). * server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. * stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. -* tracing: tracing configuration has been made fully dynamic and every HTTP connection manager +* tracing: made tracing configuration fully dynamic and every HTTP connection manager can now have a separate :ref:`tracing provider `. -* udp: :ref:`udp_proxy ` filter has been upgraded to v3 and is no longer considered alpha. +* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. Deprecated ---------- @@ -157,7 +157,7 @@ Deprecated * The :ref:`internal_redirect_action ` field and :ref:`max_internal_redirects ` field are now deprecated. This changes the implemented default cross scheme redirect behavior. - All cross scheme redirect are disallowed by default. To restore + All cross scheme redirects are disallowed by default. To restore the previous behavior, set allow_cross_scheme_redirect=true and use :ref:`safe_cross_scheme`, in :ref:`predicates `. From ef74d8fc64321e46e07d09e0867ddd154e715fd4 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 7 Jul 2020 18:25:50 -0400 Subject: [PATCH 545/909] release: kicking off 1.16.0 (#11930) Signed-off-by: Alyssa Wilk --- VERSION | 2 +- docs/root/version_history/current.rst | 145 +-------------- docs/root/version_history/v1.15.0.rst | 166 ++++++++++++++++++ docs/root/version_history/version_history.rst | 1 + 4 files changed, 170 insertions(+), 144 deletions(-) create mode 100644 docs/root/version_history/v1.15.0.rst diff --git a/VERSION b/VERSION index 141f2e805beb..1f0d2f335194 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.15.0 +1.16.0-dev diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 94ac2469a69d..22711b6123dd 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -1,166 +1,25 @@ -1.15.0 (July 6, 2020) -===================== - +1.16.0 (Pending) +================ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. -* client_ssl_auth: the `auth_ip_white_list` stat has been renamed to - :ref:`auth_ip_allowlist `. -* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). -* router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. -* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. - Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* -* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. -* build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. -* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. -* hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). -* http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. -* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. -* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. - Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. -* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. -* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. -* http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. -* http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. -* listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. -* router: extended to allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. -* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. - Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency - limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the - start of the new minRTT window. -* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. -* grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. -* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. -* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. -* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` - to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. -* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. -* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. -* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. -* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. -* prometheus stats: fixed the sort order of output lines to comply with the standard. -* udp: the :ref:`reuse_port ` listener option must now be - specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a - bug fix. -* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. - Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` -* http: removed legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and - `envoy.reloadable_features.new_http2_connection_pool_behavior`. - New Features ------------ -* access loggers: added file access logger config :ref:`log_format `. -* access loggers: added GRPC_STATUS operator on logging format. -* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. -* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. -* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. -* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. -* build: official released binary is now built with Clang 10.0.0. -* cluster: added an extension point for configurable :ref:`upstreams `. -* compressor: exposed generic :ref:`compressor ` filter to users. -* config: added :ref:`identifier ` stat that reflects control plane identifier. -* config: added :ref:`version_text ` stat that reflects xDS version. -* decompressor: exposed generic :ref:`decompressor ` filter to users. -* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. -* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. - This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. - If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. -* dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. -* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. -* ext_authz filter: added API version field for both :ref:`HTTP ` - and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. -* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. -* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults - are applied to using :ref:`HTTP headers ` to the HTTP fault filter. -* fault: added support for specifying grpc_status code in abort faults using - :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. -* filter: added `upstram_rq_time` stats to the GPRC stats filter. - Disabled by default and can be enabled via :ref:`enable_upstream_stats `. -* grpc: added support for Google gRPC :ref:`custom channel arguments `. -* grpc-json: added support for streaming response using - `google.api.HttpBody `_. -* grpc-json: send a `x-envoy-original-method` header to grpc services. -* gzip filter: added option to set zlib's next output buffer size. -* hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. -* header to metadata: added support for regex substitutions on header values. -* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. -* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. -* http: added :ref:`stripping port from host header ` support. -* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. -* listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. - Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. - Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. -* logger: added :option:`--log-format-prefix-with-location` command line option to prefix '%v' with file path and line number. -* lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field - in LRS response, which allows management servers to avoid explicitly listing all clusters it is - interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability - in :ref:`client_features` field. -* lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. -* lua: added :ref:`per route config ` for Lua filter. -* lua: added tracing to the ``httpCall()`` API. -* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* network filters: added a :ref:`postgres proxy filter `. -* network filters: added a :ref:`rocketmq proxy filter `. -* performance: enabled stats symbol table implementation by default. To disable it, add - `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy. -* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. -* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. -* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. -* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. -* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. -* request_id: added to :ref:`always_set_request_id_in_response setting ` - to set :ref:`x-request-id ` header in response even if - tracing is not forced. -* router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy - ` field. -* router: added regex substitution support for header based hashing. -* router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters - `. -* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. -* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. -* server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). -* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. -* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. -* tracing: made tracing configuration fully dynamic and every HTTP connection manager - can now have a separate :ref:`tracing provider `. -* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. - Deprecated ---------- -* Tracing provider configuration as part of :ref:`bootstrap config ` - has been deprecated in favor of configuration as part of :ref:`HTTP connection manager - `. -* The :ref:`HTTP Gzip filter ` has been deprecated in favor of - :ref:`Compressor `. -* The * :ref:`GoogleRE2.max_program_size` - field is now deprecated. Management servers are expected to validate regexp program sizes - instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. -* The :ref:`internal_redirect_action ` - field and :ref:`max_internal_redirects ` field - are now deprecated. This changes the implemented default cross scheme redirect behavior. - All cross scheme redirects are disallowed by default. To restore - the previous behavior, set allow_cross_scheme_redirect=true and use - :ref:`safe_cross_scheme`, - in :ref:`predicates `. -* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. -* A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. -* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. diff --git a/docs/root/version_history/v1.15.0.rst b/docs/root/version_history/v1.15.0.rst new file mode 100644 index 000000000000..eb214a33cc40 --- /dev/null +++ b/docs/root/version_history/v1.15.0.rst @@ -0,0 +1,166 @@ +1.15.0 (July 7, 2020) +===================== + + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* client_ssl_auth: the `auth_ip_white_list` stat has been renamed to + :ref:`auth_ip_allowlist `. +* header to metadata: on_header_missing rules with empty values are now rejected (they were skipped before). +* router: path_redirect now keeps query string by default. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.preserve_query_string_in_path_redirects` to false. +* tls: fixed a bug where wilcard matching for "\*.foo.com" also matched domains of the form "a.b.foo.com". This behavior can be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_wildcard_matching` to false. + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* access loggers: applied existing buffer limits to access logs, as well as :ref:`stats ` for logged / dropped logs. This can be reverted temporarily by setting runtime feature `envoy.reloadable_features.disallow_unbounded_access_logs` to false. +* build: runs as non-root inside Docker containers. Existing behaviour can be restored by setting the environment variable `ENVOY_UID` to `0`. `ENVOY_UID` and `ENVOY_GID` can be used to set the envoy user's `uid` and `gid` respectively. +* health check: in the health check filter the :ref:`percentage of healthy servers in upstream clusters ` is now interpreted as an integer. +* hot restart: added the option :option:`--use-dynamic-base-id` to select an unused base ID at startup and the option :option:`--base-id-path` to write the base id to a file (for reuse with later hot restarts). +* http: changed early error path for HTTP/1.1 so that responses consistently flow through the http connection manager, and the http filter chains. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.early_errors_via_hcm` to false. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* http: fixed a bug where the upgrade header was not cleared on responses to non-upgrade requests. + Can be reverted temporarily by setting runtime feature `envoy.reloadable_features.fix_upgrade_response` to false. +* http: stopped overwriting `date` response headers. Responses without a `date` header will still have the header properly set. This behavior can be temporarily reverted by setting `envoy.reloadable_features.preserve_upstream_date` to false. +* http: stopped adding a synthetic path to CONNECT requests, meaning unconfigured CONNECT requests will now return 404 instead of 403. This behavior can be temporarily reverted by setting `envoy.reloadable_features.stop_faking_paths` to false. +* http: stopped allowing upstream 1xx or 204 responses with Transfer-Encoding or non-zero Content-Length headers. Content-Length of 0 is allowed, but stripped. This behavior can be temporarily reverted by setting `envoy.reloadable_features.strict_1xx_and_204_response_headers` to false. +* http: upstream connections will now automatically set ALPN when this value is not explicitly set elsewhere (e.g. on the upstream TLS config). This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.http_default_alpn` to false. +* listener: fixed a bug where when a static listener fails to be added to a worker, the listener was not removed from the active listener list. +* router: extended to allow retries of streaming or incomplete requests. This removes stat `rq_retry_skipped_request_not_complete`. +* router: extended to allow retries by default when upstream responds with :ref:`x-envoy-overloaded `. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* adaptive concurrency: fixed a minRTT calculation bug where requests started before the concurrency + limit was pinned to the minimum would skew the new minRTT value if the replies arrived after the + start of the new minRTT window. +* buffer: fixed CVE-2020-12603 by avoiding fragmentation, and tracking of HTTP/2 data and control frames in the output buffer. +* grpc-json: fixed a bug when in trailers only gRPC response (e.g. error) HTTP status code is not being re-written. +* http: fixed a bug in the grpc_http1_reverse_bridge filter where header-only requests were forwarded with a non-zero content length. +* http: fixed a bug where in some cases slash was moved from path to query string when :ref:`merging of adjacent slashes` is enabled. +* http: fixed CVE-2020-12604 by changing :ref:`stream_idle_timeout ` + to also defend against an HTTP/2 peer that does not open stream window once an entire response has been buffered to be sent to a downstream client. +* http: fixed CVE-2020-12605 by including request URL in request header size computation, and rejecting partial headers that exceed configured limits. +* http: fixed several bugs with applying correct connection close behavior across the http connection manager, health checker, and connection pool. This behavior may be temporarily reverted by setting runtime feature `envoy.reloadable_features.fix_connection_close` to false. +* listener: fixed CVE-2020-8663 by adding runtime support for :ref:`per-listener limits ` on active/accepted connections. +* overload management: fixed CVE-2020-8663 by adding runtime support for :ref:`global limits ` on active/accepted connections. +* prometheus stats: fixed the sort order of output lines to comply with the standard. +* udp: the :ref:`reuse_port ` listener option must now be + specified for UDP listeners if concurrency is > 1. This previously crashed so is considered a + bug fix. +* upstream: fixed a bug where Envoy would panic when receiving a GRPC SERVICE_UNKNOWN status on the health check. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* http: removed legacy connection pool code and their runtime features: `envoy.reloadable_features.new_http1_connection_pool_behavior` and + `envoy.reloadable_features.new_http2_connection_pool_behavior`. + +New Features +------------ + +* access loggers: added file access logger config :ref:`log_format `. +* access loggers: added GRPC_STATUS operator on logging format. +* access loggers: added gRPC access logger config added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* access loggers: extended specifier for FilterStateFormatter to output :ref:`unstructured log string `. +* admin: added support for dumping EDS config at :ref:`/config_dump?include_eds `. +* aggregate cluster: made route :ref:`retry_priority ` predicates work with :ref:`this cluster type `. +* build: official released binary is now built on Ubuntu 18.04, requires glibc >= 2.27. +* build: official released binary is now built with Clang 10.0.0. +* cluster: added an extension point for configurable :ref:`upstreams `. +* compressor: exposed generic :ref:`compressor ` filter to users. +* config: added :ref:`identifier ` stat that reflects control plane identifier. +* config: added :ref:`version_text ` stat that reflects xDS version. +* decompressor: exposed generic :ref:`decompressor ` filter to users. +* dynamic forward proxy: added :ref:`SNI based dynamic forward proxy ` support. +* dynamic forward proxy: added configurable :ref:`circuit breakers ` for resolver on DNS cache. + This behavior can be temporarily disabled by the runtime feature `envoy.reloadable_features.enable_dns_cache_circuit_breakers`. + If this runtime feature is disabled, the upstream circuit breakers for the cluster will be used even if the :ref:`DNS Cache circuit breakers ` are configured. +* dynamic forward proxy: added :ref:`allow_insecure_cluster_options` to allow disabling of auto_san_validation and auto_sni. +* ext_authz filter: added :ref:`v2 deny_at_disable `, :ref:`v3 deny_at_disable `. This allows force denying protected paths while filter gets disabled, by setting this key to true. +* ext_authz filter: added API version field for both :ref:`HTTP ` + and :ref:`Network ` filters to explicitly set the version of gRPC service endpoint and message to be used. +* ext_authz filter: added :ref:`v3 allowed_upstream_headers_to_append ` to allow appending multiple header entries (returned by the authorization server) with the same key to the original request headers. +* fault: added support for controlling the percentage of requests that abort, delay and response rate limits faults + are applied to using :ref:`HTTP headers ` to the HTTP fault filter. +* fault: added support for specifying grpc_status code in abort faults using + :ref:`HTTP header ` or abort fault configuration in HTTP fault filter. +* filter: added `upstram_rq_time` stats to the GPRC stats filter. + Disabled by default and can be enabled via :ref:`enable_upstream_stats `. +* grpc: added support for Google gRPC :ref:`custom channel arguments `. +* grpc-json: added support for streaming response using + `google.api.HttpBody `_. +* grpc-json: send a `x-envoy-original-method` header to grpc services. +* gzip filter: added option to set zlib's next output buffer size. +* hds: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* header to metadata: added support for regex substitutions on header values. +* health checks: allowed configuring health check transport sockets by specifying :ref:`transport socket match criteria `. +* http: added :ref:`local_reply config ` to http_connection_manager to customize :ref:`local reply `. +* http: added :ref:`stripping port from host header ` support. +* http: added support for proxying CONNECT requests, terminating CONNECT requests, and converting raw TCP streams into HTTP/2 CONNECT requests. See :ref:`upgrade documentation` for details. +* listener: added in place filter chain update flow for tcp listener update which doesn't close connections if the corresponding network filter chain is equivalent during the listener update. + Can be disabled by setting runtime feature `envoy.reloadable_features.listener_in_place_filterchain_update` to false. + Also added additional draining filter chain stat for :ref:`listener manager ` to track the number of draining filter chains and the number of in place update attempts. +* logger: added :option:`--log-format-prefix-with-location` command line option to prefix '%v' with file path and line number. +* lrs: added new *envoy_api_field_service.load_stats.v2.LoadStatsResponse.send_all_clusters* field + in LRS response, which allows management servers to avoid explicitly listing all clusters it is + interested in; behavior is allowed based on new "envoy.lrs.supports_send_all_clusters" capability + in :ref:`client_features` field. +* lrs: updated to allow to explicitly set the API version of gRPC service endpoint and message to be used. +* lua: added :ref:`per route config ` for Lua filter. +* lua: added tracing to the ``httpCall()`` API. +* metrics service: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* network filters: added a :ref:`postgres proxy filter `. +* network filters: added a :ref:`rocketmq proxy filter `. +* performance: enabled stats symbol table implementation by default. To disable it, add + `--use-fake-symbol-table 1` to the command-line arguments when starting Envoy. +* ratelimit: added support for use of dynamic metadata :ref:`dynamic_metadata ` as a ratelimit action. +* ratelimit: added :ref:`API version ` to explicitly set the version of gRPC service endpoint and message to be used. +* ratelimit: support specifying dynamic overrides in rate limit descriptors using :ref:`limit override ` config. +* redis: added acl support :ref:`downstream_auth_username ` for downstream client ACL authentication, and :ref:`auth_username ` to configure authentication usernames for upstream Redis 6+ server clusters with ACL enabled. +* regex: added support for enforcing max program size via runtime and stats to monitor program size for :ref:`Google RE2 `. +* request_id: added to :ref:`always_set_request_id_in_response setting ` + to set :ref:`x-request-id ` header in response even if + tracing is not forced. +* router: added more fine grained internal redirect configs to the :ref:`internal_redirect_policy + ` field. +* router: added regex substitution support for header based hashing. +* router: added support for RESPONSE_FLAGS and RESPONSE_CODE_DETAILS :ref:`header formatters + `. +* router: allow Rate Limiting Service to be called in case of missing request header for a descriptor if the :ref:`skip_if_absent ` field is set to true. +* runtime: added new gauge :ref:`deprecated_feature_seen_since_process_start ` that gets reset across hot restarts. +* server: added the option :option:`--drain-strategy` to enable different drain strategies for DrainManager::drainClose(). +* server: added :ref:`server.envoy_bug_failures ` statistic to count ENVOY_BUG failures. +* stats: added the option to :ref:`report counters as deltas ` to the metrics service stats sink. +* tracing: made tracing configuration fully dynamic and every HTTP connection manager + can now have a separate :ref:`tracing provider `. +* udp: upgraded :ref:`udp_proxy ` filter to v3 and promoted it out of alpha. + +Deprecated +---------- + +* Tracing provider configuration as part of :ref:`bootstrap config ` + has been deprecated in favor of configuration as part of :ref:`HTTP connection manager + `. +* The :ref:`HTTP Gzip filter ` has been deprecated in favor of + :ref:`Compressor `. +* The * :ref:`GoogleRE2.max_program_size` + field is now deprecated. Management servers are expected to validate regexp program sizes + instead of expecting the client to do it. Alternatively, the max program size can be enforced by Envoy via runtime. +* The :ref:`internal_redirect_action ` + field and :ref:`max_internal_redirects ` field + are now deprecated. This changes the implemented default cross scheme redirect behavior. + All cross scheme redirects are disallowed by default. To restore + the previous behavior, set allow_cross_scheme_redirect=true and use + :ref:`safe_cross_scheme`, + in :ref:`predicates `. +* File access logger fields :ref:`format `, :ref:`json_format ` and :ref:`typed_json_format ` are deprecated in favor of :ref:`log_format `. +* A warning is now logged when v2 xDS api is used. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_deprecated_v2_api_warning` to `false`. +* Using cluster circuit breakers for DNS Cache is now deprecated in favor of :ref:`DNS cache circuit breakers `. This behavior can be temporarily disabled by setting `envoy.reloadable_features.enable_dns_cache_circuit_breakers` to `false`. diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 2d7744bf2310..07db664892d4 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,6 +7,7 @@ Version history :titlesonly: current + v1.15.0 v1.14.3 v1.14.2 v1.14.1 From 363b104ce88854c2ffcce46185c5cc4d448d4079 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 7 Jul 2020 18:27:10 -0400 Subject: [PATCH 546/909] threadlocal: avoiding a dynamic cast in opt builds (#11900) requested follow-up PR from #11796 as it's more consistent with our style guide. Risk Level: low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- include/envoy/runtime/BUILD | 1 + include/envoy/runtime/runtime.h | 5 ++--- include/envoy/thread_local/thread_local.h | 10 +++++++--- source/common/runtime/runtime_impl.h | 4 +--- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index 5fee4e86610e..cb0aff14c0ef 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -14,6 +14,7 @@ envoy_cc_library( external_deps = ["abseil_optional"], deps = [ "//include/envoy/stats:stats_interface", + "//include/envoy/thread_local:thread_local_interface", "//source/common/common:assert_lib", "//source/common/singleton:threadsafe_singleton", "@envoy_api//envoy/type/v3:pkg_cc_proto", diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 57df07919119..6b7f17191e75 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -10,6 +10,7 @@ #include "envoy/common/pure.h" #include "envoy/stats/store.h" +#include "envoy/thread_local/thread_local.h" #include "envoy/type/v3/percent.pb.h" #include "common/common/assert.h" @@ -70,10 +71,8 @@ using RandomGeneratorPtr = std::unique_ptr; /** * A snapshot of runtime data. */ -class Snapshot { +class Snapshot : public ThreadLocal::ThreadLocalObject { public: - virtual ~Snapshot() = default; - struct Entry { std::string raw_string_value_; absl::optional uint_value_; diff --git a/include/envoy/thread_local/thread_local.h b/include/envoy/thread_local/thread_local.h index 41c77d730d19..683617634a20 100644 --- a/include/envoy/thread_local/thread_local.h +++ b/include/envoy/thread_local/thread_local.h @@ -45,10 +45,14 @@ class Slot { /** * This is a helper on top of get() that casts the object stored in the slot to the specified - * type. Since the slot only stores pointers to the base interface, dynamic_cast provides some - * level of protection via RTTI. + * type. Since the slot only stores pointers to the base interface, the static_cast operates + * in production for performance, and the dynamic_cast validates correctness in tests and debug + * builds. */ - template T& getTyped() { return *std::dynamic_pointer_cast(get()); } + template T& getTyped() { + ASSERT(std::dynamic_pointer_cast(get()) != nullptr); + return *static_cast(get().get()); + } /** * Run a callback on all registered threads. diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 387497ac4363..22426335eddc 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -73,9 +73,7 @@ struct RuntimeStats { /** * Implementation of Snapshot whose source is the vector of layers passed to the constructor. */ -class SnapshotImpl : public Snapshot, - public ThreadLocal::ThreadLocalObject, - Logger::Loggable { +class SnapshotImpl : public Snapshot, Logger::Loggable { public: SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers); From 59605339c86228140ab814c20ae793768d8cc069 Mon Sep 17 00:00:00 2001 From: antonio Date: Tue, 7 Jul 2020 18:27:39 -0400 Subject: [PATCH 547/909] connection: Do not reset delayed closed timer if doWrite consumes 0 bytes from the output buffer. (#11833) Commit Message: connection: Do not reset delayed closed timer if doWrite consumes 0 bytes from the output buffer. Additional Description: Only reset the delayed close timer if the write attempt made progress. This works around spurious fd Write events which are delivered to a connection even after it manages to fully drain the output buffer and should be waiting for client FIN or the delay close timer to expire. This is known to happen when listening for level events instead of edge-trigger fd events. Risk Level: medium, changes to timeout behavior. Testing: unit Docs Changes: n/a Release Notes: n/a Fixes #11829 Signed-off-by: Antonio Vicente --- source/common/network/connection_impl.cc | 9 +- test/common/network/connection_impl_test.cc | 107 +++++++++++++++++++- 2 files changed, 110 insertions(+), 6 deletions(-) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 649e8057d6d2..2abbea352b76 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -645,15 +645,18 @@ void ConnectionImpl::onWriteReady() { } else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) { ENVOY_CONN_LOG(debug, "write flush complete", *this); if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) { - ASSERT(delayed_close_timer_ != nullptr); - delayed_close_timer_->enableTimer(delayed_close_timeout_); + ASSERT(delayed_close_timer_ != nullptr && delayed_close_timer_->enabled()); + if (result.bytes_processed_ > 0) { + delayed_close_timer_->enableTimer(delayed_close_timeout_); + } } else { ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush); closeConnectionImmediately(); } } else { ASSERT(result.action_ == PostIoAction::KeepOpen); - if (delayed_close_timer_ != nullptr) { + ASSERT(!delayed_close_timer_ || delayed_close_timer_->enabled()); + if (delayed_close_timer_ != nullptr && result.bytes_processed_ > 0) { delayed_close_timer_->enableTimer(delayed_close_timeout_); } if (result.bytes_processed_ > 0) { diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 72168e2a666d..61c4bb2e6348 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -1455,6 +1455,11 @@ TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) std::make_unique(std::move(io_handle), nullptr, nullptr), std::move(mocks.transport_socket_), stream_info_, true); +#ifndef NDEBUG + // Ignore timer enabled() calls used to check timer state in ASSERTs. + EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber()); +#endif + InSequence s1; // The actual timeout is insignificant, we just need to enable delayed close processing by // setting it to > 0. @@ -1477,18 +1482,114 @@ TEST_P(ConnectionImplTest, DelayedCloseTimerResetWithPendingWriteBufferFlushes) // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its // original timeout value to avoid triggering while the write buffer is being actively flushed. EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) - .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { // Partial flush. - return IoResult{PostIoAction::KeepOpen, 1, false}; + uint64_t bytes_drained = 1; + buffer.drain(bytes_drained); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + // Flush the entire buffer. + uint64_t bytes_drained = buffer.length(); + buffer.drain(buffer.length()); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; })); EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + // Force the delayed close timeout to trigger so the connection is cleaned up. + mocks.timer_->invokeCallback(); +} + +// Test that the delayed close timer is not reset by spurious fd Write events that either consume 0 +// bytes from the output buffer or are delivered after close(FlushWriteAndDelay). +TEST_P(ConnectionImplTest, IgnoreSpuriousFdWriteEventsDuringFlushWriteAndDelay) { + ConnectionMocks mocks = createConnectionMocks(); + MockTransportSocket* transport_socket = mocks.transport_socket_.get(); + IoHandlePtr io_handle = std::make_unique(0); + auto server_connection = std::make_unique( + *mocks.dispatcher_, + std::make_unique(std::move(io_handle), nullptr, nullptr), + std::move(mocks.transport_socket_), stream_info_, true); + +#ifndef NDEBUG + // Ignore timer enabled() calls used to check timer state in ASSERTs. + EXPECT_CALL(*mocks.timer_, enabled()).Times(AnyNumber()); +#endif + + InSequence s1; + // The actual timeout is insignificant, we just need to enable delayed close processing by + // setting it to > 0. + auto timeout = std::chrono::milliseconds(100); + server_connection->setDelayedCloseTimeout(timeout); + + EXPECT_CALL(*mocks.file_event_, activate(Event::FileReadyType::Write)) + .WillOnce(Invoke(*mocks.file_ready_cb_)); + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Do not drain the buffer and return 0 bytes processed to simulate backpressure. + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + Buffer::OwnedImpl data("data"); + server_connection->write(data, false); + + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + server_connection->close(ConnectionCloseType::FlushWriteAndDelay); + + // The write ready event cb (ConnectionImpl::onWriteReady()) will reset the timer to its + // original timeout value to avoid triggering while the write buffer is being actively flushed. EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("data"), _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { + // Partial flush. + uint64_t bytes_drained = 1; + buffer.drain(bytes_drained); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event and drain 0 bytes from the buffer. Verify that the timer is not reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + // Don't consume any bytes. + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event and drain the remainder of the buffer. Verify that the timer is reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual("ata"), _)) .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> IoResult { // Flush the entire buffer. + ASSERT(buffer.length() > 0); + uint64_t bytes_drained = buffer.length(); buffer.drain(buffer.length()); - return IoResult{PostIoAction::KeepOpen, buffer.length(), false}; + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, bytes_drained, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event after entering the half-closed state. Verify that the timer is not reset + // because write consumed 0 bytes from the empty buffer. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(""), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, 0, false}; + })); + EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(0); + (*mocks.file_ready_cb_)(Event::FileReadyType::Write); + + // Handle a write event that somehow drains bytes from an empty output buffer. Since + // some bytes were consumed, the timer is reset. + EXPECT_CALL(*transport_socket, doWrite(BufferStringEqual(""), _)) + .WillOnce(Invoke([&](Buffer::Instance&, bool) -> IoResult { + EXPECT_EQ(server_connection->state(), Connection::State::Closing); + return IoResult{PostIoAction::KeepOpen, 1, false}; })); EXPECT_CALL(*mocks.timer_, enableTimer(timeout, _)).Times(1); (*mocks.file_ready_cb_)(Event::FileReadyType::Write); From 9ad964db67b4d7bc9492b3cc097ba6f933e797af Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Tue, 7 Jul 2020 19:28:51 -0400 Subject: [PATCH 548/909] preliminary PR to Porting Envoy to C++17 (#11840) This is the PR that does the setup work needed to fix some incompatiblity issue with the current codebase and C++17. This is a part of the draft PR #11570 that intends to port Envoy to C++17, sans all the changes to build configurations. Signed-off-by: Yifan Yang needyyang@google.com --- source/common/formatter/substitution_formatter.cc | 2 +- source/common/grpc/google_async_client_impl.h | 2 +- source/common/upstream/outlier_detection_impl.h | 2 +- source/extensions/common/aws/credentials_provider.h | 1 + .../extensions/filters/http/grpc_http1_reverse_bridge/filter.cc | 2 +- tools/type_whisperer/api_type_db.h | 1 + 6 files changed, 6 insertions(+), 4 deletions(-) diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index 4400ca3f6510..aedc72ad87e1 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -424,7 +424,7 @@ class StreamInfoDurationFieldExtractor : public StreamInfoFormatter::FieldExtrac } private: - absl::optional extractMillis(const StreamInfo::StreamInfo& stream_info) const { + absl::optional extractMillis(const StreamInfo::StreamInfo& stream_info) const { const auto time = field_extractor_(stream_info); if (time) { return std::chrono::duration_cast(time.value()).count(); diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index f2dc3eded14d..75229bd5905f 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -255,7 +255,7 @@ class GoogleAsyncStreamImpl : public RawAsyncStream, // End-of-stream with no additional message. PendingMessage() = default; - const absl::optional buf_; + const absl::optional buf_{}; const bool end_stream_{true}; }; diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index ff42473f3cc0..c51cb134bfbf 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -41,7 +41,7 @@ class DetectorHostMonitorNullImpl : public DetectorHostMonitor { double successRate(SuccessRateMonitorType) const override { return -1; } private: - const absl::optional time_; + const absl::optional time_{}; }; /** diff --git a/source/extensions/common/aws/credentials_provider.h b/source/extensions/common/aws/credentials_provider.h index ed6bb312561c..7e1da55fb182 100644 --- a/source/extensions/common/aws/credentials_provider.h +++ b/source/extensions/common/aws/credentials_provider.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/pure.h" diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 972b31275ff6..d13b360ee07f 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -140,7 +140,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers headers.setGrpcStatus(Envoy::Grpc::Status::WellKnownGrpcStatus::Unknown); headers.setStatus(enumToInt(Http::Code::OK)); - if (content_type != nullptr) { + if (!content_type.empty()) { headers.setContentType(content_type_); } diff --git a/tools/type_whisperer/api_type_db.h b/tools/type_whisperer/api_type_db.h index d5ee2dc7a324..cec5627588ea 100644 --- a/tools/type_whisperer/api_type_db.h +++ b/tools/type_whisperer/api_type_db.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "absl/strings/string_view.h" From 11a4667d655d10bfed84c58fce9404d4b37cef45 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Wed, 8 Jul 2020 14:37:53 +0900 Subject: [PATCH 549/909] dynamic_forward_proxy: cleanup integration test (#11891) Commit Message: This is a cleanup of the dynamic forward proxy integration test. Especially, I added cluster circuit breaking check, destroyed redundant test fixture, and changed to v3 based config. Risk Level: Low Testing: Test only Docs Changes: N/A Release Notes: N/A Signed-off-by: Shikugawa --- .../proxy_filter_integration_test.cc | 180 ++++++++---------- 1 file changed, 76 insertions(+), 104 deletions(-) diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 49c7c08f50ac..e066cf482805 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -18,20 +18,22 @@ class ProxyFilterIntegrationTest : public testing::TestWithParammutable_typed_config()->PackFrom(tls_context); } - const std::string cluster_type_config = - fmt::format(R"EOF( + const std::string cluster_type_config = fmt::format( + R"EOF( name: envoy.clusters.dynamic_forward_proxy typed_config: - "@type": type.googleapis.com/envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig + "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig dns_cache_config: name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts); + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); + cluster_.mutable_circuit_breakers() + ->add_thresholds() + ->mutable_max_pending_requests() + ->set_value(max_pending_requests); // Load the CDS cluster and wait for it to initialize. cds_helper_.setCds({cluster_}); @@ -93,6 +101,11 @@ name: envoy.clusters.dynamic_forward_proxy } } + void disableDnsCacheCircuitBreakers() { + config_helper_.addRuntimeOverride("envoy.reloadable_features.enable_dns_cache_circuit_breakers", + "false"); + } + bool upstream_tls_{}; std::string upstream_cert_name_{"upstreamlocalhost"}; CdsHelper cds_helper_; @@ -103,93 +116,10 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -class ProxyFilterCircuitBreakerIntegrationTest : public ProxyFilterIntegrationTest { -public: - ProxyFilterCircuitBreakerIntegrationTest() = default; - - void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 0) { - setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); - - const std::string filter = fmt::format(R"EOF( -name: dynamic_forward_proxy -typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig - dns_cache_config: - name: foo - dns_lookup_family: {} - max_hosts: {} - dns_cache_circuit_breaker: - max_pending_requests: {} -)EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), - max_hosts, max_pending_requests); - config_helper_.addFilter(filter); - - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - // Switch predefined cluster_0 to CDS filesystem sourcing. - bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); - bootstrap.mutable_static_resources()->clear_clusters(); - }); - - // Enable dns cache circuit breakers. - config_helper_.addRuntimeOverride("envoy.reloadable_features.enable_dns_cache_circuit_breakers", - "true"); - - // Set validate_clusters to false to allow us to reference a CDS cluster. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); }); - - // Setup the initial CDS cluster. - cluster_.mutable_connect_timeout()->CopyFrom( - Protobuf::util::TimeUtil::MillisecondsToDuration(100)); - cluster_.set_name("cluster_0"); - cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); - - if (upstream_tls_) { - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; - auto* validation_context = - tls_context.mutable_common_tls_context()->mutable_validation_context(); - validation_context->mutable_trusted_ca()->set_filename( - TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - cluster_.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); - cluster_.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); - } - - const std::string cluster_type_config = fmt::format( - R"EOF( - name: envoy.clusters.dynamic_forward_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig - dns_cache_config: - name: foo - dns_lookup_family: {} - max_hosts: {} - dns_cache_circuit_breaker: - max_pending_requests: {} - )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); - - TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); - - // Load the CDS cluster and wait for it to initialize. - cds_helper_.setCds({cluster_}); - - HttpIntegrationTest::initialize(); - test_server_->waitForCounterEq("cluster_manager.cluster_added", 1); - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - - fake_upstreams_[0]->set_allow_unexpected_disconnects(true); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterCircuitBreakerIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(ProxyFilterCircuitBreakerIntegrationTest, Basic) { +// A basic test where we pause a request to lookup localhost, and then do another request which +// should hit the TLS cache. +TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { setup(); - codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -198,18 +128,21 @@ TEST_P(ProxyFilterCircuitBreakerIntegrationTest, Basic) { {":authority", fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; - auto response = codec_client_->makeRequestWithBody(request_headers, 1024); - response->waitForEndStream(); - EXPECT_EQ(1, test_server_->gauge("dns_cache.foo.circuit_breakers.rq_pending_open")); - EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); + auto response = + sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024); + checkSimpleRequestSuccess(1024, 1024, response.get()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); - EXPECT_TRUE(response->complete()); - EXPECT_EQ("503", response->headers().Status()->value().getStringView()); + // Now send another request. This should hit the DNS cache. + response = sendRequestAndWaitForResponse(request_headers, 512, default_response_headers_, 512); + checkSimpleRequestSuccess(512, 512, response.get()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } -// A basic test where we pause a request to lookup localhost, and then do another request which -// should hit the TLS cache. -TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { +TEST_P(ProxyFilterIntegrationTest, RequestWithBodyWithClusterCircuitBreaker) { + disableDnsCacheCircuitBreakers(); setup(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ @@ -397,5 +330,44 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) { EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ssl.fail_verify_san")->value()); } +TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { + setup(1024, 0); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + response->waitForEndStream(); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); +} + +TEST_P(ProxyFilterIntegrationTest, ClusterCircuitBreakersInvoked) { + disableDnsCacheCircuitBreakers(); + setup(1024, 0); + + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port())}}; + + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + response->waitForEndStream(); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_rq_pending_overflow")->value()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().Status()->value().getStringView()); +} + } // namespace } // namespace Envoy From 5e9fb8a33c4711a08a49142a3b6fa84c129e2ae8 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Wed, 8 Jul 2020 13:37:18 +0000 Subject: [PATCH 550/909] test: remove superfluous test dependencies (#11912) Commit Message: remove superfluous includes Additional Description: The monolith mock library mocks/server/mocks.h is included by several tests but never used. Remove them to speed up building phase Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/common/config/BUILD | 3 ++- test/common/config/datasource_test.cc | 3 ++- test/common/network/BUILD | 4 +--- test/common/network/connection_impl_test.cc | 1 - test/common/network/listener_impl_test.cc | 2 +- test/common/network/udp_listener_impl_test.cc | 1 - test/extensions/clusters/aggregate/BUILD | 1 - .../extensions/clusters/aggregate/cluster_integration_test.cc | 1 - test/extensions/common/proxy_protocol/BUILD | 1 - .../common/proxy_protocol/proxy_protocol_regression_test.cc | 1 - test/extensions/filters/http/jwt_authn/BUILD | 1 - test/extensions/filters/http/jwt_authn/filter_test.cc | 1 - test/extensions/filters/network/rocketmq_proxy/BUILD | 2 +- .../filters/network/rocketmq_proxy/route_matcher_test.cc | 4 ++-- test/extensions/filters/udp/dns_filter/BUILD | 1 - .../extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc | 1 - test/integration/BUILD | 2 -- test/integration/cds_integration_test.cc | 1 - test/integration/sds_dynamic_integration_test.cc | 1 - test/integration/sds_static_integration_test.cc | 1 - test/integration/server.cc | 1 - test/integration/vhds_integration_test.cc | 1 - test/server/BUILD | 3 ++- test/server/connection_handler_test.cc | 3 ++- 24 files changed, 13 insertions(+), 28 deletions(-) diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 42b59faa4aed..a89c45629e94 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -430,7 +430,8 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//source/extensions/common/crypto:utility_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 1897c1a867ce..4b76c39b59f5 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -6,7 +6,8 @@ #include "common/protobuf/protobuf.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 7bfb1d0cbc0a..e8cf67a6b9d1 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -88,7 +88,6 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/event:event_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", @@ -189,10 +188,10 @@ envoy_cc_test( "//source/common/network:listener_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", @@ -213,7 +212,6 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 61c4bb2e6348..4922309e427b 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -19,7 +19,6 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 732ccf2460e3..9f6f515dac05 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -4,10 +4,10 @@ #include "common/network/address_impl.h" #include "common/network/listener_impl.h" #include "common/network/utility.h" +#include "common/stream_info/stream_info_impl.h" #include "test/common/network/listener_impl_test_base.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/test_runtime.h" diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 0d139b887e43..f2cff8e49748 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -14,7 +14,6 @@ #include "test/common/network/listener_impl_test_base.h" #include "test/mocks/api/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/threadsafe_singleton_injector.h" diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index 2e445737fcf9..3001535ca9ad 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -64,7 +64,6 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/integration:integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index 5172bc8bbc4e..bd6af30b0808 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -9,7 +9,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/common/proxy_protocol/BUILD b/test/extensions/common/proxy_protocol/BUILD index 90a37c8cd60d..414674d84711 100644 --- a/test/extensions/common/proxy_protocol/BUILD +++ b/test/extensions/common/proxy_protocol/BUILD @@ -34,7 +34,6 @@ envoy_cc_test( "//source/server:connection_handler_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index cd0ed34c9f3f..8654352291de 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -13,7 +13,6 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index f19bf5269283..14b745e50b81 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -47,7 +47,6 @@ envoy_extension_cc_test( deps = [ ":mock_lib", "//source/extensions/filters/http/jwt_authn:filter_lib", - "//test/mocks/server:server_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index b3a97f90c92a..0f8f1ff9c549 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -4,7 +4,6 @@ #include "extensions/filters/http/well_known_names.h" #include "test/extensions/filters/http/jwt_authn/mock.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD index c5cf5e5b34f0..36381bf3f83d 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -131,7 +131,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.rocketmq_proxy", deps = [ "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", - "//test/mocks/server:server_mocks", + "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc index c908602fc25e..947e67481f9b 100644 --- a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/network/rocketmq_proxy/metadata.h" #include "extensions/filters/network/rocketmq_proxy/router/route_matcher.h" -#include "test/mocks/server/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -71,4 +71,4 @@ name: default_route } // namespace RocketmqProxy } // namespace NetworkFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index 395367532530..2ac152fc7f1a 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -60,7 +60,6 @@ envoy_cc_fuzz_test( deps = [ "//source/extensions/filters/udp/dns_filter:dns_filter_lib", "//test/fuzz:utility_lib", - "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", ], ) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc index 334147bf3b01..b8dfc48eb504 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc @@ -5,7 +5,6 @@ #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "gmock/gmock.h" diff --git a/test/integration/BUILD b/test/integration/BUILD index 0e927155b4c2..a4ef52e1ceb6 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -127,7 +127,6 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -223,7 +222,6 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 9ef423a0f7d1..93bfc5075d70 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -10,7 +10,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index eaf5513374c1..f7aa58316a10 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -23,7 +23,6 @@ #include "test/integration/server.h" #include "test/integration/ssl_utility.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/test_time_system.h" diff --git a/test/integration/sds_static_integration_test.cc b/test/integration/sds_static_integration_test.cc index 38f47be55031..da9e77c6e23f 100644 --- a/test/integration/sds_static_integration_test.cc +++ b/test/integration/sds_static_integration_test.cc @@ -17,7 +17,6 @@ #include "test/integration/server.h" #include "test/integration/ssl_utility.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/test_time_system.h" #include "test/test_common/utility.h" diff --git a/test/integration/server.cc b/test/integration/server.cc index 4b7d2beecb2e..2903c39d34ba 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -20,7 +20,6 @@ #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/environment.h" #include "absl/strings/str_replace.h" diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index 89ecb0d42af1..879fbcf8b90b 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -10,7 +10,6 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/server/BUILD b/test/server/BUILD index 09446f8f0c30..ac23c10adf56 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -69,13 +69,14 @@ envoy_cc_test( srcs = ["connection_handler_test.cc"], deps = [ "//source/common/common:utility_lib", + "//source/common/config:utility_lib", "//source/common/network:address_lib", "//source/common/network:connection_balancer_lib", "//source/common/stats:stats_lib", "//source/server:active_raw_udp_listener_config", "//source/server:connection_handler_lib", + "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index a7f942f95d42..148874e612e3 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -6,6 +6,7 @@ #include "envoy/stats/scope.h" #include "common/common/utility.h" +#include "common/config/utility.h" #include "common/network/address_impl.h" #include "common/network/connection_balancer_impl.h" #include "common/network/io_socket_handle_impl.h" @@ -14,9 +15,9 @@ #include "server/connection_handler_impl.h" +#include "test/mocks/api/mocks.h" #include "test/mocks/common.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/threadsafe_singleton_injector.h" From 09b96a54129813e333e275ff9317682fc7676faf Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 10:09:53 -0400 Subject: [PATCH 551/909] docs: updating release instructions (#11938) Risk Level: n/a Testing: n/a Docs Changes: yes Release Notes: meta Signed-off-by: Alyssa Wilk --- GOVERNANCE.md | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index fc59ba470247..c58ace42a9a0 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -88,18 +88,20 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". * Get a review and merge. +* Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build). * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match the [VERSION](VERSION).** -* Create a branch from the tagged release, e.g. "release/v1.6". It will be used for the +* From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch + from the tagged release, e.g. "release/v1.6". It will be used for the [stable releases](RELEASES.md#stable-releases). * Monitor the AZP tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy). * Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release. * Craft a witty/uplifting email and send it to all the email aliases including envoy-announce@. -* If possible post on Twitter (either have Matt do it or contact caniszczyk@ on Slack and have the - Envoy account post). +* Make sure we tweet the new release: either have Matt do it or email social@cncf.io and ask them to do an Envoy account + post. * Do a new PR to setup the next version * Update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". * `git mv docs/root/version_history/current.rst docs/root/version_history/v1.6.0.rst`, filling in the previous @@ -112,8 +114,24 @@ or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/get 1.7.0 (Pending) =============== -Changes -------- +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ Deprecated ---------- From 673cab860c76f2ba482e02e5de4639e72528b47c Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Wed, 8 Jul 2020 10:28:03 -0400 Subject: [PATCH 552/909] fuzz: added fuzz test for listener filter original_dst (#11847) Commit Message: Added fuzz test for listener filter original_dst Created original_dst_corpus and populated with testcases (different protocol schemes) Created original_dst_fuzz_test.cc and original_dst_fuzz_test.proto, updated BUILD increased function and line coverage of original_dst.cc to 100%. fuzzer routes correctly through getOriginalDst() and addressFromSockAddr(), covers all valid cases / lines. Signed-off-by: Arthur Yan --- .../filters/listener/original_dst/BUILD | 18 ++++ .../original_dst_corpus/invalid_test | 1 + .../original_dst_corpus/ipv4_test | 1 + .../original_dst_corpus/ipv6_test | 1 + .../original_dst_corpus/unix_test | 1 + .../original_dst/original_dst_fuzz_test.cc | 85 +++++++++++++++++++ .../original_dst/original_dst_fuzz_test.proto | 7 ++ 7 files changed, 114 insertions(+) create mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test create mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test create mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test create mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test create mode 100644 test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc create mode 100644 test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto diff --git a/test/extensions/filters/listener/original_dst/BUILD b/test/extensions/filters/listener/original_dst/BUILD index 4c2ad41b41ce..562775fc6cac 100644 --- a/test/extensions/filters/listener/original_dst/BUILD +++ b/test/extensions/filters/listener/original_dst/BUILD @@ -1,6 +1,8 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", + "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -20,3 +22,19 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_proto_library( + name = "original_dst_fuzz_test_proto", + srcs = ["original_dst_fuzz_test.proto"], +) + +envoy_cc_fuzz_test( + name = "original_dst_fuzz_test", + srcs = ["original_dst_fuzz_test.cc"], + corpus = "original_dst_corpus", + deps = [ + ":original_dst_fuzz_test_proto_cc_proto", + "//source/extensions/filters/listener/original_dst:original_dst_lib", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test new file mode 100644 index 000000000000..a015d2d6e09f --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test @@ -0,0 +1 @@ +address: "hello world" \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test new file mode 100644 index 000000000000..6a87711c09a6 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test @@ -0,0 +1 @@ +address: "tcp://0.0.0.0:0" \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test new file mode 100644 index 000000000000..9d8f333019e0 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test @@ -0,0 +1 @@ +address: "tcp://[a:b:c:d::]:0" \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test new file mode 100644 index 000000000000..7df146a6cabd --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test @@ -0,0 +1 @@ +address: "unix://tmp/server" \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc new file mode 100644 index 000000000000..90ac50f389aa --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -0,0 +1,85 @@ +#include "common/network/utility.h" + +#include "extensions/filters/listener/original_dst/original_dst.h" + +#include "test/extensions/filters/listener/original_dst/original_dst_fuzz_test.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace OriginalDst { + +class FakeConnectionSocket : public Network::MockConnectionSocket { + const Network::Address::InstanceConstSharedPtr& local_address_; + +public: + ~FakeConnectionSocket() override = default; + + FakeConnectionSocket(const Network::Address::InstanceConstSharedPtr& local_address) + : local_address_(local_address) {} + + const Network::Address::InstanceConstSharedPtr& localAddress() const override { + return local_address_; + } + + Network::Address::Type addressType() const override { return local_address_->type(); } + + absl::optional ipVersion() const override { + if (local_address_->type() != Network::Address::Type::Ip) { + return absl::nullopt; + } + + return local_address_->ip()->version(); + } + + Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override { + switch (level) { + case SOL_IPV6: + static_cast(optval)->ss_family = AF_INET6; + break; + case SOL_IP: + static_cast(optval)->ss_family = AF_INET; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + return Api::SysCallIntResult{0, 0}; + } +}; + +DEFINE_PROTO_FUZZER( + const envoy::extensions::filters::listener::original_dst::v3::OriginalDstTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + NiceMock callbacks; + Network::Address::InstanceConstSharedPtr address = nullptr; + + try { + address = Network::Utility::resolveUrl(input.address()); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + return; + } + + FakeConnectionSocket socket(address); + ON_CALL(callbacks, socket()).WillByDefault(testing::ReturnRef(socket)); + + auto filter = std::make_unique(); + filter->onAccept(callbacks); +} + +} // namespace OriginalDst +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto new file mode 100644 index 000000000000..f6e5e28e2def --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package envoy.extensions.filters.listener.original_dst.v3; + +message OriginalDstTestCase { + string address = 2; +} \ No newline at end of file From 2026ec2e3969c473662daeb775b1daecbbdf67e5 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 8 Jul 2020 07:54:24 -0700 Subject: [PATCH 553/909] test: shard http2_integration_test (#11939) This should mitigate TSAN timeout. Signed-off-by: Lizan Zhou --- test/integration/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/BUILD b/test/integration/BUILD index a4ef52e1ceb6..7e05463e2354 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -327,6 +327,7 @@ envoy_cc_test( "http2_integration_test.cc", "http2_integration_test.h", ], + shard_count = 4, tags = ["fails_on_windows"], deps = [ ":http_integration_lib", From 0c46dc3a84c517a06c8265cee5c90a7960dabe1e Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Wed, 8 Jul 2020 09:46:04 -0700 Subject: [PATCH 554/909] docs: Yuchen Dai to manage stable releases in 2020 Q3. (#11951) While there, update details for v1.15.x releases. Signed-off-by: Piotr Sikora --- RELEASES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/RELEASES.md b/RELEASES.md index a954e376e473..3ca3f28c376c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -49,6 +49,8 @@ stable releases and sending announcements about them. This role is rotating on a | Quarter | Release manager | |:-------:|:----------------------------:| | 2020 Q1 | Piotr Sikora ([PiotrSikora]) | +| 2020 Q2 | Piotr Sikora ([PiotrSikora]) | +| 2020 Q3 | Yuchen Dai ([lambdai]) | ## Release schedule @@ -61,7 +63,7 @@ deadline of 3 weeks. | 1.12.0 | 2019/09/30 | 2019/10/31 | +31 days | 2020/10/31 | | 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 | | 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 | -| 1.15.0 | 2020/06/30 | | | | +| 1.15.0 | 2020/06/30 | 2020/07/07 | +7 days | 2021/07/07 | | 1.16.0 | 2020/09/30 | | | | | 1.17.0 | 2020/12/31 | | | | From 2afd9db9b2fa0ecfedadbf805ceac7e74b08303a Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Wed, 8 Jul 2020 12:05:48 -0500 Subject: [PATCH 555/909] Use std::map for computing json object hash (#11875) - Reviewing all unordered_map, we discovered this test was flawed, because it assumed ordering of map elements. This is true of libc++ and msvc, but not consistant with one another, and not true absl::node_hash_map at all, resulting in test failing 50% of the time. - Reviewing the RFC, the hash comparison itself did not follow json. - Note this could be updated with the equivilant absl collection type if and when all std::map's are refactored. Co-authored-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr --- include/envoy/json/json_object.h | 9 +++++++-- source/common/json/json_loader.cc | 4 ++-- test/common/json/json_loader_test.cc | 8 +++++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/include/envoy/json/json_object.h b/include/envoy/json/json_object.h index a5161ccfa91b..7df162540ff5 100644 --- a/include/envoy/json/json_object.h +++ b/include/envoy/json/json_object.h @@ -147,8 +147,13 @@ class Object { virtual double getDouble(const std::string& name, double default_value) const PURE; /** - * @return a hash of the JSON object. This is a hash of each nested element in stable order. - * It does not consider white space that was originally in the parsed JSON. + * @return a hash of the JSON object. + * Per RFC 7159: + * An object is an unordered collection of zero or more name/value + * pairs, where a name is a string and a value is a string, number, + * boolean, null, object, or array. + * Objects with fields in different orders are equivalent and produce the same hash. + * It does not consider white space that was originally in the parsed JSON. */ virtual uint64_t hash() const PURE; diff --git a/source/common/json/json_loader.cc b/source/common/json/json_loader.cc index 0e3a084b5d7c..083f7b64f409 100644 --- a/source/common/json/json_loader.cc +++ b/source/common/json/json_loader.cc @@ -3,10 +3,10 @@ #include #include #include +#include #include #include #include -#include #include #include "common/common/assert.h" @@ -127,7 +127,7 @@ class Field : public Object { bool boolean_value_; double double_value_; int64_t integer_value_; - std::unordered_map object_value_; + std::map object_value_; std::string string_value_; }; diff --git a/test/common/json/json_loader_test.cc b/test/common/json/json_loader_test.cc index 47c4094f7969..1201a3668d10 100644 --- a/test/common/json/json_loader_test.cc +++ b/test/common/json/json_loader_test.cc @@ -246,8 +246,14 @@ TEST_F(JsonLoaderTest, Hash) { ObjectSharedPtr json1 = Factory::loadFromString("{\"value1\": 10.5, \"value2\": -12.3}"); ObjectSharedPtr json2 = Factory::loadFromString("{\"value2\": -12.3, \"value1\": 10.5}"); ObjectSharedPtr json3 = Factory::loadFromString(" { \"value2\": -12.3, \"value1\": 10.5} "); - EXPECT_NE(json1->hash(), json2->hash()); + ObjectSharedPtr json4 = Factory::loadFromString("{\"value1\": 10.5}"); + + // Objects with keys in different orders should be the same + EXPECT_EQ(json1->hash(), json2->hash()); + // Whitespace is ignored EXPECT_EQ(json2->hash(), json3->hash()); + // Ensure different hash is computed for different objects + EXPECT_NE(json1->hash(), json4->hash()); } TEST_F(JsonLoaderTest, Schema) { From 1dff14375fd0a2dfa75bc489ad05ca62282d2aef Mon Sep 17 00:00:00 2001 From: foreseeable Date: Wed, 8 Jul 2020 17:17:05 +0000 Subject: [PATCH 556/909] refactor header inclusion to speed up building (#11952) Signed-off-by: Muge Chen --- test/common/upstream/BUILD | 20 ++++++++++++------- .../upstream/cluster_factory_impl_test.cc | 3 ++- test/common/upstream/eds_speed_test.cc | 3 ++- test/common/upstream/eds_test.cc | 3 ++- test/common/upstream/hds_test.cc | 3 ++- .../upstream/logical_dns_cluster_test.cc | 3 ++- .../upstream/original_dst_cluster_test.cc | 3 ++- .../upstream/transport_socket_matcher_test.cc | 2 +- test/common/upstream/upstream_impl_test.cc | 3 ++- 9 files changed, 28 insertions(+), 15 deletions(-) diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 1679ac8eb89e..e95118222fe4 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -85,7 +85,8 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -114,7 +115,8 @@ envoy_cc_benchmark_binary( "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -243,7 +245,8 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", @@ -269,7 +272,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", @@ -294,7 +298,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", @@ -431,7 +436,7 @@ envoy_cc_test( "//source/server:transport_socket_config_lib", "//test/mocks:common_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -558,7 +563,8 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:registry_lib", diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index 467ba917d303..084d5b7a4b96 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -23,7 +23,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" using testing::NiceMock; diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index fbebc9a7af0c..b1ede181e491 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -20,7 +20,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 149fc7cc5640..ef7f38002993 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -18,7 +18,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index b7a15887d05b..d093ab2a03f1 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -16,7 +16,8 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index 74154fd82510..a5158e3e3f59 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -20,7 +20,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 72fb82425b02..4f25099ba615 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -21,7 +21,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/transport_socket_matcher_test.cc b/test/common/upstream/transport_socket_matcher_test.cc index a506ab014d7b..cfde130d1d1f 100644 --- a/test/common/upstream/transport_socket_matcher_test.cc +++ b/test/common/upstream/transport_socket_matcher_test.cc @@ -15,7 +15,7 @@ #include "server/transport_socket_config_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/registry.h" #include "test/test_common/utility.h" diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index bb1521e59e64..15aba3773fed 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -29,7 +29,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/registry.h" From 639370ff89023dbe7474e5218fe9eb7d1e11d3d7 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 14:13:22 -0400 Subject: [PATCH 557/909] tooling: fixing bug tracker (#11937) Fixing the check for preexisting bugs. Risk Level: n/a (tooling) Testing: manual Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- tools/deprecate_version/deprecate_version.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 6129ae585f01..9cbde123785e 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -90,17 +90,22 @@ def CreateIssues(access_token, runtime_and_pr): login = search_user[0].login if search_user else None title = '%s deprecation' % (runtime_guard) - body = ('%s (%s) introduced a runtime guarded feature. This issue ' - 'tracks source code cleanup.') % (number, change_title) + body = ('Your change %s (%s) introduced a runtime guarded feature. It has been 6 months since ' + 'the new code has been exercised by default, so it\'s time to remove the old code ' + 'path. This issue tracks source code cleanup so we don\'t forget.') % (number, + change_title) print(title) print(body) print(' >> Assigning to %s' % (login or email)) + search_title = '%s in:title' % title # TODO(htuch): Figure out how to do this without legacy and faster. - exists = repo.legacy_search_issues('open', '"%s"' % title) or repo.legacy_search_issues( - 'closed', '"%s"' % title) + exists = repo.legacy_search_issues('open', search_title) or repo.legacy_search_issues( + 'closed', search_title) if exists: + print("Issue with %s already exists" % search_title) + print(exists) print(' >> Issue already exists, not posting!') else: issues.append((title, body, login)) From ee745ac026db7f7f3547caa29bad2364c625c446 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 14:13:37 -0400 Subject: [PATCH 558/909] http: removing old path for strict header validation (#11954) Risk Level: low Testing: n/a Docs Changes: n/a Release Notes: yes Fixes #11932 Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 2 ++ source/common/http/http1/codec_impl.cc | 14 +++----- source/common/http/http1/codec_impl.h | 1 - source/common/runtime/runtime_features.cc | 1 - test/common/http/http1/codec_impl_test.cc | 44 ----------------------- 5 files changed, 7 insertions(+), 55 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 22711b6123dd..3ab0f76bfb13 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -17,6 +17,8 @@ Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` +* http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. + New Features ------------ diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 96dd8a939ea7..d622fe69016e 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -455,8 +455,6 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat : connection_(connection), stats_(stats), header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), - strict_header_validation_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_header_validation")), connection_header_sanitization_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.connection_header_sanitization")), enable_trailers_(enable_trailers), @@ -615,13 +613,11 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { } absl::string_view header_value{data, length}; - if (strict_header_validation_) { - if (!Http::HeaderUtility::headerValueIsValid(header_value)) { - ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); - } + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); } header_parsing_state_ = HeaderParsingState::Value; diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index d2f8c47d7c8a..994873701f85 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -253,7 +253,6 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggablemergeValues( - {{"envoy.reloadable_features.strict_header_validation", "false"}}); - - initialize(); - - MockRequestDecoder decoder; - EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - - Buffer::OwnedImpl buffer( - absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - auto status = codec_->dispatch(buffer); - EXPECT_TRUE(status.ok()); -} - // Ensures that requests with invalid HTTP header values are properly rejected -// when the runtime guard is enabled for the feature. TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "true"}}); initialize(); @@ -1140,27 +1117,6 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { EXPECT_EQ("http.invalid_authority", response_encoder->getStream().responseDetails()); } -// Regression test for http-parser allowing embedded NULs in header values, -// verify we reject them. -TEST_F(Http1ServerConnectionImplTest, HeaderEmbeddedNulRejection) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.strict_header_validation", "false"}}); - initialize(); - - InSequence sequence; - - MockRequestDecoder decoder; - EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); - - Buffer::OwnedImpl buffer( - absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: bar", std::string(1, '\0'), "baz\r\n")); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); - auto status = codec_->dispatch(buffer); - EXPECT_TRUE(isCodecProtocolError(status)); - EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_HEADER_TOKEN"); -} - // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { From b6aec770c968aab3d169e46a82c2057b95bc8888 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 16:30:45 -0400 Subject: [PATCH 559/909] test: rollback #11741 (#11959) Rolling back flaky test tagging to help the windows build stay green. Signed-off-by: Alyssa Wilk --- test/common/network/BUILD | 1 + test/common/router/BUILD | 1 + test/common/upstream/BUILD | 2 ++ test/extensions/common/aws/BUILD | 1 + test/extensions/filters/http/cors/BUILD | 1 + test/extensions/filters/http/router/BUILD | 1 + test/extensions/filters/network/rbac/BUILD | 1 + test/extensions/transport_sockets/alts/BUILD | 1 + test/integration/BUILD | 9 ++++++++- test/server/BUILD | 1 - 10 files changed, 17 insertions(+), 2 deletions(-) diff --git a/test/common/network/BUILD b/test/common/network/BUILD index e8cf67a6b9d1..2910ffb4d878 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -278,6 +278,7 @@ envoy_cc_test( envoy_cc_test( name = "addr_family_aware_socket_option_impl_test", srcs = ["addr_family_aware_socket_option_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":socket_option_test", "//source/common/network:addr_family_aware_socket_option_lib", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 2db462fb8e03..7d88ae53b96e 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -289,6 +289,7 @@ envoy_cc_test( name = "router_upstream_log_test", srcs = ["router_upstream_log_test.cc"], external_deps = ["abseil_optional"], + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/network:utility_lib", diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index e95118222fe4..7c417471775d 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -135,6 +135,7 @@ envoy_benchmark_test( envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], + tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", @@ -403,6 +404,7 @@ envoy_benchmark_test( name = "load_balancer_benchmark_test", timeout = "long", benchmark_binary = "load_balancer_benchmark", + tags = ["fails_on_windows"], ) envoy_cc_test( diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index 2a177200f6e7..eae532ee27f9 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -76,6 +76,7 @@ envoy_cc_test( srcs = [ "aws_metadata_fetcher_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:fmt_lib", "//source/extensions/common/aws:utility_lib", diff --git a/test/extensions/filters/http/cors/BUILD b/test/extensions/filters/http/cors/BUILD index 43a70cd13f01..a91934cb1249 100644 --- a/test/extensions/filters/http/cors/BUILD +++ b/test/extensions/filters/http/cors/BUILD @@ -30,6 +30,7 @@ envoy_extension_cc_test( name = "cors_filter_integration_test", srcs = ["cors_filter_integration_test.cc"], extension_name = "envoy.filters.http.cors", + tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/http:header_map_lib", diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index 5e0dca9e8224..46aaecbb7ae2 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -30,6 +30,7 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.filters.http.router", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/http/router:config", "//test/integration:http_integration_lib", diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index 440b07896711..8d4d479cefb0 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -41,6 +41,7 @@ envoy_extension_cc_test( name = "integration_test", srcs = ["integration_test.cc"], extension_name = "envoy.filters.network.rbac", + tags = ["fails_on_windows"], deps = [ "//source/extensions/filters/network/echo:config", "//source/extensions/filters/network/rbac:config", diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index d2e0f2998f07..386c25ace615 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -78,6 +78,7 @@ envoy_extension_cc_test( "grpc_alts_handshaker_proto", "grpc_alts_transport_security_common_proto", ], + tags = ["fails_on_windows"], deps = [ "//source/common/common:utility_lib", "//source/common/event:dispatcher_includes", diff --git a/test/integration/BUILD b/test/integration/BUILD index 7e05463e2354..5fe7aa10a5f1 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -185,6 +185,7 @@ envoy_cc_test( envoy_cc_test( name = "cluster_filter_integration_test", srcs = ["cluster_filter_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/network:filter_interface", @@ -198,6 +199,7 @@ envoy_cc_test( envoy_cc_test( name = "custom_cluster_integration_test", srcs = ["custom_cluster_integration_test.cc"], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/upstream:load_balancer_lib", @@ -373,6 +375,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/extensions/transport_sockets/tls:context_lib", @@ -389,6 +392,7 @@ envoy_cc_test( srcs = [ "header_casing_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -743,6 +747,7 @@ envoy_cc_test( # The symbol table cluster memory tests take a while to run specially under tsan. # Shard it to avoid test timeout. shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/memory:stats_lib", @@ -942,6 +947,7 @@ envoy_cc_test( "//test/config/integration/certs", ], shard_count = 2, + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//source/common/config:api_version_lib", @@ -988,6 +994,7 @@ envoy_cc_test( srcs = [ "tcp_conn_pool_integration_test.cc", ], + tags = ["fails_on_windows"], deps = [ ":integration_lib", "//include/envoy/server:filter_config_interface", @@ -1039,7 +1046,6 @@ envoy_cc_test( name = "dynamic_validation_integration_test", srcs = ["dynamic_validation_integration_test.cc"], data = ["//test/config/integration:server_xds_files"], - # Fails on windows with cr/lf yaml file checkouts tags = ["fails_on_windows"], deps = [ ":http_integration_lib", @@ -1082,6 +1088,7 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], + tags = ["fails_on_windows"], deps = [ ":http_integration_lib", "//source/common/http:header_map_lib", diff --git a/test/server/BUILD b/test/server/BUILD index ac23c10adf56..64b8c639188c 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -344,7 +344,6 @@ envoy_cc_test( ":server_test_data", ":static_validation_test_data", ], - # Fails on windows with cr/lf yaml file checkouts tags = ["fails_on_windows"], deps = [ "//source/common/common:version_lib", From a3ef3e70855b2a1658c786f6e6e9059993bcbf90 Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Thu, 9 Jul 2020 06:43:15 +1000 Subject: [PATCH 560/909] router: consume all retry related headers (#11913) Also remove `x-envoy-retriable-header-names` and `x-envoy-retriable-status-codes` to avoid these headers being propagated to the upstream. Signed-off-by: Martin Matusiak --- docs/root/version_history/current.rst | 2 + source/common/router/BUILD | 1 + source/common/router/retry_state_impl.cc | 9 ++ source/common/runtime/runtime_features.cc | 1 + test/common/router/BUILD | 1 + test/common/router/retry_state_impl_test.cc | 105 ++++++++++++++++++-- 6 files changed, 113 insertions(+), 6 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 3ab0f76bfb13..680d46f8672c 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,8 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. + Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 188242186a73..bd500d294888 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -252,6 +252,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 09d3b1016804..54d667af29ce 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -13,6 +13,7 @@ #include "common/http/codes.h" #include "common/http/headers.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Router { @@ -45,9 +46,17 @@ RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& dispatcher, priority)); } + // Consume all retry related headers to avoid them being propagated to the upstream request_headers.removeEnvoyRetryOn(); request_headers.removeEnvoyRetryGrpcOn(); request_headers.removeEnvoyMaxRetries(); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.consume_all_retry_headers")) { + request_headers.removeEnvoyHedgeOnPerTryTimeout(); + request_headers.removeEnvoyRetriableHeaderNames(); + request_headers.removeEnvoyRetriableStatusCodes(); + request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); + } + return ret; } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 7c6fe6e4b268..4659441c9251 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -61,6 +61,7 @@ constexpr const char* runtime_features[] = { // Begin alphabetically sorted section. "envoy.reloadable_features.activate_fds_next_event_loop", "envoy.deprecated_features.allow_deprecated_extension_names", + "envoy.reloadable_features.consume_all_retry_headers", "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.early_errors_via_hcm", "envoy.reloadable_features.enable_deprecated_v2_api_warning", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 7d88ae53b96e..0f72563074bb 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -161,6 +161,7 @@ envoy_cc_test( "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 25044563d5a1..3c5b3d5f37c9 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -760,9 +761,6 @@ TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { {"x-envoy-retry-grpc-on", "cancelled"}, {"x-envoy-max-retries", "3"}}; setup(request_headers); - EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); - EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); - EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); EXPECT_TRUE(state_->enabled()); expectTimerCreateAndEnable(); @@ -936,9 +934,6 @@ TEST_F(RouterRetryStateImplTest, ZeroMaxRetriesHeader) { {"x-envoy-retry-grpc-on", "cancelled"}, {"x-envoy-max-retries", "0"}}; setup(request_headers); - EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); - EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); - EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); EXPECT_TRUE(state_->enabled()); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, @@ -1127,6 +1122,104 @@ TEST_F(RouterRetryStateImplTest, ParseRetryGrpcOn) { EXPECT_FALSE(result.second); } +TEST_F(RouterRetryStateImplTest, RemoveAllRetryHeaders) { + // Make sure retry related headers are removed when the policy is enabled. + { + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retry-on", "5xx,retriable-header-names,retriable-status-codes"}, + {"x-envoy-retry-grpc-on", "resource-exhausted"}, + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_FALSE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_FALSE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Make sure retry related headers are removed even if the policy is disabled. + { + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_EQ(nullptr, state_); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_FALSE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_FALSE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_FALSE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Repeat policy is enabled case with runtime flag disabled. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.consume_all_retry_headers", "false"}}); + + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retry-on", "5xx,retriable-header-names,retriable-status-codes"}, + {"x-envoy-retry-grpc-on", "resource-exhausted"}, + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_TRUE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_TRUE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } + + // Repeat policy is disabled case with runtime flag disabled. + { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.consume_all_retry_headers", "false"}}); + + Http::TestRequestHeaderMapImpl request_headers{ + {"x-envoy-retriable-header-names", "X-Upstream-Pushback"}, + {"x-envoy-retriable-status-codes", "418,420"}, + {"x-envoy-max-retries", "7"}, + {"x-envoy-hedge-on-per-try-timeout", "true"}, + {"x-envoy-upstream-rq-per-try-timeout-ms", "2"}, + }; + setup(request_headers); + EXPECT_EQ(nullptr, state_); + + EXPECT_FALSE(request_headers.has("x-envoy-retry-on")); + EXPECT_FALSE(request_headers.has("x-envoy-retry-grpc-on")); + EXPECT_FALSE(request_headers.has("x-envoy-max-retries")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-header-names")); + EXPECT_TRUE(request_headers.has("x-envoy-retriable-status-codes")); + EXPECT_TRUE(request_headers.has("x-envoy-hedge-on-per-try-timeout")); + EXPECT_TRUE(request_headers.has("x-envoy-upstream-rq-per-try-timeout-ms")); + } +} + } // namespace } // namespace Router } // namespace Envoy From d84a57e5ea8a468aebc27b4120ea6c7f40a7ed3c Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 8 Jul 2020 14:29:05 -0700 Subject: [PATCH 561/909] build: use bazel 3.3.1 (#11918) Risk Level: Low Testing: CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Lizan Zhou --- .bazelrc | 8 ++++++-- .bazelversion | 2 +- bazel/repository_locations.bzl | 14 +++++++------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.bazelrc b/.bazelrc index 3079386b6981..fb683a2bc9b8 100644 --- a/.bazelrc +++ b/.bazelrc @@ -134,16 +134,18 @@ coverage:fuzz-coverage --config=asan-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html -build:rbe-toolchain --host_platform=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform -build:rbe-toolchain --platforms=@envoy_build_tools//toolchains:rbe_ubuntu_clang_platform build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 build:rbe-toolchain-clang --config=rbe-toolchain +build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform +build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --config=rbe-toolchain +build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform +build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin @@ -156,6 +158,8 @@ build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan build:rbe-toolchain-gcc --config=rbe-toolchain +build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform +build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain diff --git a/.bazelversion b/.bazelversion index fd2a01863fdd..bea438e9ade7 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.1.0 +3.3.1 diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index b2501eca906f..3f25e85da263 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -53,11 +53,11 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), bazel_toolchains = dict( - sha256 = "db48eed61552e25d36fe051a65d2a329cc0fb08442627e8f13960c5ab087a44e", - strip_prefix = "bazel-toolchains-3.2.0", + sha256 = "2431088b38fd8e2878db17e3c5babb431de9e5c52b6d8b509d3070fa279a5be2", + strip_prefix = "bazel-toolchains-3.3.1", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.2.0/bazel-toolchains-3.2.0.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.2.0.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.3.1/bazel-toolchains-3.3.1.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.3.1.tar.gz", ], use_category = ["build"], ), @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "c4d27c0a5db918e861b7164d69cdffe920daafbe3f597ffdda5a1d10c1abc992", - strip_prefix = "envoy-build-tools-557ee9b44a3d08cf38d9ce6f08adb872c385d6a5", + sha256 = "ff890c70d60e51c7ee80874f85c3905718b7f6929a6c367c850cdd0b9c01d44d", + strip_prefix = "envoy-build-tools-efaecf11d76b86551cf42e2354274ac2acd7042f", # 2020-06-16 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/557ee9b44a3d08cf38d9ce6f08adb872c385d6a5.tar.gz"], + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/efaecf11d76b86551cf42e2354274ac2acd7042f.tar.gz"], use_category = ["build"], ), boringssl = dict( From 17dd2da586eeb9a65a52743b2ca7112e096758ee Mon Sep 17 00:00:00 2001 From: Bill Gallagher Date: Wed, 8 Jul 2020 18:42:34 -0400 Subject: [PATCH 562/909] =?UTF-8?q?fix=20crash=20that=20happens=20when=20R?= =?UTF-8?q?TDS=20is=20configured=20with=20a=20non-existent=20cl=E2=80=A6?= =?UTF-8?q?=20(#11953)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix crash that happens when RTDS is configured with a non-existent cluster Risk Level: low Testing: added test Docs Changes: n/a Release Notes: n/a Signed-off-by: Bill Gallagher --- source/common/runtime/runtime_impl.cc | 24 ++++++++++++++---------- source/common/runtime/runtime_impl.h | 3 ++- test/common/runtime/BUILD | 1 + test/common/runtime/runtime_impl_test.cc | 23 +++++++++++++++++++++++ 4 files changed, 40 insertions(+), 11 deletions(-) diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 6eb490d2015b..295f9ee23ffa 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -513,7 +513,13 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator loadNewSnapshot(); } -void LoaderImpl::initialize(Upstream::ClusterManager& cm) { cm_ = &cm; } +void LoaderImpl::initialize(Upstream::ClusterManager& cm) { + cm_ = &cm; + + for (const auto& s : subscriptions_) { + s->createSubscription(); + } +} void LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) { on_rtds_initialized_ = on_done; @@ -534,6 +540,12 @@ RtdsSubscription::RtdsSubscription( resource_name_(rtds_layer.name()), init_target_("RTDS " + resource_name_, [this]() { start(); }) {} +void RtdsSubscription::createSubscription() { + const auto resource_name = getResourceName(); + subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( + config_source_, Grpc::Common::typeUrl(resource_name), store_, *this, resource_decoder_); +} + void RtdsSubscription::onConfigUpdate(const std::vector& resources, const std::string&) { validateUpdateSize(resources.size()); @@ -564,15 +576,7 @@ void RtdsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureRe init_target_.ready(); } -void RtdsSubscription::start() { - // We have to delay the subscription creation until init-time, since the - // cluster manager resources are not available in the constructor when - // instantiated in the server instance. - const auto resource_name = getResourceName(); - subscription_ = parent_.cm_->subscriptionFactory().subscriptionFromConfigSource( - config_source_, Grpc::Common::typeUrl(resource_name), store_, *this, resource_decoder_); - subscription_->start({resource_name_}); -} +void RtdsSubscription::start() { subscription_->start({resource_name_}); } void RtdsSubscription::validateUpdateSize(uint32_t num_resources) { if (num_resources != 1) { diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 22426335eddc..5f8297ec1a2a 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -219,6 +219,7 @@ struct RtdsSubscription : Envoy::Config::SubscriptionBase { friend RtdsSubscription; // Create a new Snapshot - virtual SnapshotImplPtr createNewSnapshot(); + SnapshotImplPtr createNewSnapshot(); // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index d6ffd6dfb1a7..8d29521e1206 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -55,6 +55,7 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index e45588c7aa40..168123abcf5a 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -18,6 +18,7 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" @@ -1112,6 +1113,28 @@ TEST_F(RtdsLoaderImplTest, MultipleRtdsLayers) { EXPECT_EQ(3, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } +TEST_F(RtdsLoaderImplTest, BadConfigSource) { + Upstream::MockClusterManager cm_; + EXPECT_CALL(cm_.subscription_factory_, subscriptionFromConfigSource(_, _, _, _, _)) + .WillOnce(InvokeWithoutArgs([]() -> Config::SubscriptionPtr { + throw EnvoyException("bad config"); + return nullptr; + })); + + envoy::config::bootstrap::v3::LayeredRuntime config; + auto* layer = config.add_layers(); + layer->set_name("some_other_resource"); + auto* rtds_layer = layer->mutable_rtds_layer(); + rtds_layer->set_name("some_resource"); + rtds_layer->mutable_rtds_config(); + + EXPECT_CALL(cm_, subscriptionFactory()).Times(1); + LoaderImpl loader(dispatcher_, tls_, config, local_info_, store_, generator_, validation_visitor_, + *api_); + + EXPECT_THROW_WITH_MESSAGE(loader.initialize(cm_), EnvoyException, "bad config"); +} + } // namespace } // namespace Runtime } // namespace Envoy From 773b8ca4bd861957b19f2c1154c1f308dde80022 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 9 Jul 2020 00:14:29 +0100 Subject: [PATCH 563/909] [fuzz] fix oss fuzz crashes due to validation (#11919) - add validation that an enum is within range - add validation that a header is well formed Risk Level: low Testing: passes added regression tests Docs Changes: N/A Release Notes: N/A Fixes: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=23879 https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=23881 Signed-off-by: Sam Flattery --- .../config/route/v3/route_components.proto | 4 +- .../route/v4alpha/route_components.proto | 4 +- .../v3/header_to_metadata.proto | 2 +- .../v4alpha/header_to_metadata.proto | 2 +- .../config/route/v3/route_components.proto | 4 +- .../route/v4alpha/route_components.proto | 4 +- .../v3/header_to_metadata.proto | 2 +- .../v4alpha/header_to_metadata.proto | 2 +- ...ized-route_fuzz_test-4803620674732032.fuzz | 485 ++++++++++++++++++ ...testcase-filter_fuzz_test-5082368313655296 | 7 + 10 files changed, 508 insertions(+), 8 deletions(-) create mode 100644 test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 4120babdf069..46e7ae99f54d 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -275,7 +275,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 2bc1da848606..711914d9d1be 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -274,7 +274,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 07fbba4089f7..189de8e7454f 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -78,7 +78,7 @@ message Config { [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index c7df11e3fcb6..603d0a002dc8 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -78,7 +78,7 @@ message Config { } // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index b91cf10620ed..1e077dee4d11 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -274,7 +274,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 9cdb879eed97..97fd33e535c7 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -274,7 +274,9 @@ message Route { // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 07fbba4089f7..189de8e7454f 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -78,7 +78,7 @@ message Config { [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index c7df11e3fcb6..603d0a002dc8 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -78,7 +78,7 @@ message Config { } // The value's type — defaults to string. - ValueType type = 4; + ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz new file mode 100644 index 000000000000..f47ad4226d39 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz @@ -0,0 +1,485 @@ +config { + virtual_hosts { + name: "/" + domains: "" + domains: "" + domains: "*" + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + name: "J" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\25537" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "W" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\020" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + response_headers_to_remove: "\022" + filter_action { + } + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + value: true + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + } + } + response_headers_to_remove: "\021" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\001" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\0s#" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + request_headers_to_remove: "J" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + routes { + match { + prefix: "" + grpc { + } + } + route { + cluster_header: "J" + upgrade_configs { + enabled { + } + } + } + response_headers_to_remove: "\022" + } + } +} diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 new file mode 100644 index 000000000000..2ac06ba4abef --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5082368313655296 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.header_to_metadata" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config" + value: "\n\033\n\002;;\032\023\022\001;\032\014stanotcci_fi \t \001\n+\n\001;\022\021\022\001;\032\014static_confi\032\023\022\001;\032\014static_confi \t\022\031\n\002m;\032\023\022\001;\032\014stanotcci_fi \t" + } +} From cc64cb93dac1fbcd409ad132ac81ecf7cd0b15aa Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Thu, 9 Jul 2020 09:00:12 +0900 Subject: [PATCH 564/909] dynamic_forward_proxy: SNI based Dynamic Forward Proxy Circuit Breakers (#11887) Commit Message: Add DNS Cache circuit breakers to SNI based dynamic forward proxy Additional Description: Risk Level: Low Testing: Unit / Integration Docs Changes: Required Release Notes: Required Signed-off-by: Shikugawa --- .../sni_dynamic_forward_proxy/proxy_filter.cc | 15 +++++++++-- .../sni_dynamic_forward_proxy/proxy_filter.h | 2 ++ .../proxy_filter_integration_test.cc | 26 ++++++++++++++----- .../proxy_filter_test.cc | 24 ++++++++++++++--- 4 files changed, 55 insertions(+), 12 deletions(-) diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc index 115901dd544e..9b3584de72c4 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc @@ -33,14 +33,23 @@ Network::FilterStatus ProxyFilter::onNewConnection() { return Network::FilterStatus::Continue; } - // TODO(lizan): implement circuit breaker in SNI dynamic forward proxy like it is in HTTP: - // https://github.com/envoyproxy/envoy/blob/master/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc#L65 + circuit_breaker_ = config_->cache().canCreateDnsRequest(absl::nullopt); + + if (circuit_breaker_ == nullptr) { + ENVOY_CONN_LOG(debug, "pending request overflow", read_callbacks_->connection()); + read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + return Network::FilterStatus::StopIteration; + } uint32_t default_port = config_->port(); auto result = config_->cache().loadDnsCacheEntry(sni, default_port, *this); cache_load_handle_ = std::move(result.handle_); + if (cache_load_handle_ == nullptr) { + circuit_breaker_.reset(); + } + switch (result.status_) { case LoadDnsCacheEntryStatus::InCache: { ASSERT(cache_load_handle_ == nullptr); @@ -66,6 +75,8 @@ Network::FilterStatus ProxyFilter::onNewConnection() { void ProxyFilter::onLoadDnsCacheComplete() { ENVOY_CONN_LOG(debug, "load DNS cache complete, continuing", read_callbacks_->connection()); + ASSERT(circuit_breaker_ != nullptr); + circuit_breaker_.reset(); read_callbacks_->continueReading(); } diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h index e171cb3b0cb5..65cd7235b71e 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.h @@ -40,6 +40,7 @@ class ProxyFilter Logger::Loggable { public: ProxyFilter(ProxyFilterConfigSharedPtr config); + // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance&, bool) override { return Network::FilterStatus::Continue; @@ -54,6 +55,7 @@ class ProxyFilter private: const ProxyFilterConfigSharedPtr config_; + Upstream::ResourceAutoIncDecPtr circuit_breaker_; Extensions::Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryHandlePtr cache_load_handle_; Network::ReadFilterCallbacks* read_callbacks_{}; }; diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc index 53ff3c2fd6f2..39a57e7781c7 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -23,12 +23,12 @@ class SniDynamicProxyFilterIntegrationTest : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), ConfigHelper::tcpProxyConfig()) {} - void setup(uint64_t max_hosts = 1024) { + void setup(uint64_t max_hosts = 1024, uint32_t max_pending_requests = 1024) { setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); config_helper_.addListenerFilter(ConfigHelper::tlsInspectorFilter()); - config_helper_.addConfigModifier([this, max_hosts]( + config_helper_.addConfigModifier([this, max_hosts, max_pending_requests]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { // Switch predefined cluster_0 to CDS filesystem sourcing. bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_path(cds_helper_.cds_path()); @@ -43,10 +43,12 @@ name: envoy.filters.http.dynamic_forward_proxy name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} port_value: {} )EOF", Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, - fake_upstreams_[0]->localAddress()->ip()->port()); + max_pending_requests, fake_upstreams_[0]->localAddress()->ip()->port()); config_helper_.addNetworkFilter(filter); }); @@ -56,8 +58,8 @@ name: envoy.filters.http.dynamic_forward_proxy cluster_.set_name("cluster_0"); cluster_.set_lb_policy(envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED); - const std::string cluster_type_config = - fmt::format(R"EOF( + const std::string cluster_type_config = fmt::format( + R"EOF( name: envoy.clusters.dynamic_forward_proxy typed_config: "@type": type.googleapis.com/envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig @@ -65,8 +67,10 @@ name: envoy.clusters.dynamic_forward_proxy name: foo dns_lookup_family: {} max_hosts: {} + dns_cache_circuit_breaker: + max_pending_requests: {} )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts); + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); @@ -129,5 +133,15 @@ TEST_P(SniDynamicProxyFilterIntegrationTest, UpstreamTls) { response->waitForEndStream(); checkSimpleRequestSuccess(0, 0, response.get()); } + +TEST_P(SniDynamicProxyFilterIntegrationTest, CircuitBreakerInvokedUpstreamTls) { + setup(1024, 0); + + codec_client_ = makeRawHttpConnection( + makeSslClientConnection(Ssl::ClientSslTransportOptions().setSni("localhost")), absl::nullopt); + ASSERT_FALSE(codec_client_->connected()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_rq_pending_overflow")->value()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc index 12755253776d..fa28ec75df9b 100644 --- a/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter_test.cc @@ -40,10 +40,6 @@ class SniDynamicProxyFilterTest // Allow for an otherwise strict mock. ON_CALL(callbacks_, connection()).WillByDefault(ReturnRef(connection_)); EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0)); - - // Configure max pending to 1 so we can test circuit breaking. - // TODO(lizan): implement circuit breaker in SNI dynamic forward proxy - cm_.thread_local_cluster_.cluster_.info_->resetResourceManager(0, 1, 0, 0, 0); } ~SniDynamicProxyFilterTest() override { @@ -62,6 +58,7 @@ class SniDynamicProxyFilterTest std::unique_ptr filter_; Network::MockReadFilterCallbacks callbacks_; NiceMock connection_; + NiceMock pending_requests_; }; // No SNI handling. @@ -72,6 +69,10 @@ TEST_F(SniDynamicProxyFilterTest, NoSNI) { TEST_F(SniDynamicProxyFilterTest, LoadDnsCache) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) @@ -86,6 +87,10 @@ TEST_F(SniDynamicProxyFilterTest, LoadDnsCache) { TEST_F(SniDynamicProxyFilterTest, LoadDnsInCache) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::InCache, nullptr})); @@ -95,12 +100,23 @@ TEST_F(SniDynamicProxyFilterTest, LoadDnsInCache) { // Cache overflow. TEST_F(SniDynamicProxyFilterTest, CacheOverflow) { EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)) + .WillOnce(Return(circuit_breakers_)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("foo"), 443, _)) .WillOnce(Return(MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Overflow, nullptr})); EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); } +TEST_F(SniDynamicProxyFilterTest, CircuitBreakerInvoked) { + EXPECT_CALL(connection_, requestedServerName()).WillRepeatedly(Return("foo")); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_(_)).WillOnce(Return(nullptr)); + EXPECT_CALL(connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + } // namespace } // namespace SniDynamicForwardProxy From 34f35dc42709c5183a9fa9e4d2dd8828323f80c2 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 20:17:39 -0400 Subject: [PATCH 565/909] flake (#11958) the disconnect due to match can occur before the write happens. don't validate write success. Risk Level: n/a (test only) Testing: yes Docs Changes: no Release Notes: no Signed-off-by: Alyssa Wilk --- test/integration/filter_manager_integration_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/filter_manager_integration_test.cc b/test/integration/filter_manager_integration_test.cc index c601a0f20702..0d1c55afa896 100644 --- a/test/integration/filter_manager_integration_test.cc +++ b/test/integration/filter_manager_integration_test.cc @@ -468,7 +468,7 @@ TEST_P(InjectDataWithEchoFilterIntegrationTest, FilterChainMismatch) { initialize(); auto tcp_client = makeTcpConnection(lookupPort("listener_0")); - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client->write("hello", false, false)); std::string access_log = absl::StrCat("NR ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); From 8bdfd88dc7fed27411c33527a6ffd4e8e6bf0d95 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 8 Jul 2020 20:19:43 -0400 Subject: [PATCH 566/909] http: adding a bit more http2 debug logging (#11956) Adding a bit of debug logging from nghttp2 libraries which works in opt builds. Risk Level: low (debug log only) Testing: new unit tests Docs Changes: added FAQ Release Notes: n/a May help debug #11774 Signed-off-by: Alyssa Wilk --- .../why_is_envoy_sending_http2_resets.rst | 20 +++++++++++++++++++ docs/root/faq/overview.rst | 1 + source/common/http/http2/codec_impl.cc | 10 ++++++++++ source/common/http/http2/codec_impl.h | 1 + test/common/http/http2/BUILD | 1 + test/common/http/http2/codec_impl_test.cc | 7 ++++++- 6 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst diff --git a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst new file mode 100644 index 000000000000..ab0c41cb8a60 --- /dev/null +++ b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst @@ -0,0 +1,20 @@ +.. _why_is_envoy_sending_http2_resets: + +Why is Envoy sending HTTP/2 resets? +=================================== + +The HTTP/2 reset path is mostly governed by the codec Envoy uses to frame HTTP/2, nghttp2. nghttp2 has +extremely good adherence to the HTTP/2 spec, but as many clients are not exactly as compliant, this +mismatch can cause unexpected resets. Unfortunately, unlike the debugging the +:ref:`internal response path `, Envoy has limited visibility into +the specific reason nghttp2 reset a given stream. + +If you have a reproducible failure case, you can run it against a debug Envoy with "-l trace" to get +detailed nghttp2 error logs, which often indicate which header failed compliance checks. Alternately, +if you can afford to run with "-l trace" on a machine encountering the errors, you can look for logs +from the file "source/common/http/http2/codec_impl.cc" of the form +`invalid http2: [nghttp2 error detail]` +for example: +`invalid http2: Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], value: [3]` + + diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index a0a160cb2118..d8225f95933e 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -35,6 +35,7 @@ Debugging :maxdepth: 2 debugging/why_is_envoy_sending_internal_responses + debugging/why_is_envoy_sending_http2_resets debugging/why_is_envoy_404ing_connect_requests debugging/why_is_envoy_sending_413s debugging/why_is_my_route_not_found diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 532831198760..4a94bb3aafdf 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -759,6 +759,11 @@ int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { return 0; } +int ConnectionImpl::onError(absl::string_view error) { + ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); + return 0; +} + int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), stream_id); @@ -1171,6 +1176,11 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { ASSERT(frame->hd.length <= len); return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); }); + + nghttp2_session_callbacks_set_error_callback2( + callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { + return static_cast(user_data)->onError(absl::string_view(msg, len)); + }); } ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 895e8d21c088..cf848599c800 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -491,6 +491,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggableencodeHeaders(response_headers, false), ClientCodecError); + EXPECT_LOG_CONTAINS( + "debug", + "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " + "value: [3]", + EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; From fdf2637cdbc9cd15e4507afe789e74aaf6b39903 Mon Sep 17 00:00:00 2001 From: Utsav Shah Date: Wed, 8 Jul 2020 18:49:55 -0700 Subject: [PATCH 567/909] check_format: use shutil to find binaries (#11946) Commit Message: The `lookPath` function is buggy and crashes for some malformed `$PATH` entries on macOS. We can use `shutil.which()` to find binaries in `$PATH` reliably. `shutil.which()` is available since Python3.3, which is pretty old now (released in 2012). Risk Level: N/A Testing: Ran it locally Signed-off-by: Utsav Shah --- tools/code_format/check_format.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 20387df2fbc6..351414da436b 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -218,11 +218,7 @@ def readFile(path): # lookPath searches for the given executable in all directories in PATH # environment variable. If it cannot be found, empty string is returned. def lookPath(executable): - for path_dir in os.environ["PATH"].split(os.pathsep): - executable_path = os.path.expanduser(os.path.join(path_dir, executable)) - if os.path.exists(executable_path): - return executable_path - return "" + return shutil.which(executable) or '' # pathExists checks whether the given path exists. This function assumes that From f9de409eb9ee2983d521c7defa741d1c4964d4cd Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 9 Jul 2020 16:46:20 +0100 Subject: [PATCH 568/909] [fuzz] fix timeouts in xDS fuzzer (#11924) Commit Message: Fix timeouts in xDS fuzzer Additional Description: xDS fuzzer was timing out frequently on OSS fuzz while waiting for ACKs, so I added more wait calls which fixed the issues added corpus entries that crashed on OSS fuzz Signed-off-by: Sam Flattery --- .../config_validation/xds_corpus/example4 | 60 +++++++++++++++++++ .../config_validation/xds_corpus/example5 | 48 +++++++++++++++ test/server/config_validation/xds_fuzz.cc | 53 ++++++++++------ 3 files changed, 142 insertions(+), 19 deletions(-) create mode 100644 test/server/config_validation/xds_corpus/example4 create mode 100644 test/server/config_validation/xds_corpus/example5 diff --git a/test/server/config_validation/xds_corpus/example4 b/test/server/config_validation/xds_corpus/example4 new file mode 100644 index 000000000000..3e87d8b305f1 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example4 @@ -0,0 +1,60 @@ +actions { + add_route { + } +} +actions { + remove_route { + route_num: 13107200 + } +} +actions { + add_listener { + listener_num: 1073741824 + route_num: 10752 + } +} +actions { + add_listener { + listener_num: 1073741824 + route_num: 1 + } +} +actions { + remove_route { + route_num: 8 + } +} +actions { + add_listener { + listener_num: 1073741824 + route_num: 10752 + } +} +actions { + remove_listener { + listener_num: 4 + } +} +actions { + remove_listener { + listener_num: 8 + } +} +actions { + remove_route { + route_num: 8 + } +} +actions { + remove_route { + route_num: 13107200 + } +} +actions { + add_listener { + listener_num: 8 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example5 b/test/server/config_validation/xds_corpus/example5 new file mode 100644 index 000000000000..037df55541f5 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example5 @@ -0,0 +1,48 @@ +actions { + add_route { + route_num: 4261412864 + } +} +actions { + remove_listener { + listener_num: 7012368 + } +} +actions { + remove_route { + route_num: 14849 + } +} +actions { + add_route { + route_num: 4261412864 + } +} +actions { + remove_route { + route_num: 14849 + } +} +actions { + remove_listener { + listener_num: 7012388 + } +} +actions { + add_route { + route_num: 7012388 + } +} +actions { + remove_route { + route_num: 7012388 + } +} +actions { + remove_listener { + listener_num: 7012352 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index b369b482ddd2..27ab4fa61053 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -152,8 +152,8 @@ AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, do { VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); ENVOY_LOG_MISC(info, "Received gRPC message with type {} and version {}", - discovery_request.type_url(), expected_version); - } while (expected_type_url != discovery_request.type_url() && + discovery_request.type_url(), discovery_request.version_info()); + } while (expected_type_url != discovery_request.type_url() || expected_version != discovery_request.version_info()); } else { API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request; @@ -189,12 +189,16 @@ void XdsFuzzTest::replay() { // URL so just don't check them until a listener is added bool sent_listener = false; + uint32_t added = 0; + uint32_t modified = 0; + uint32_t removed = 0; + for (const auto& action : actions_) { switch (action.action_selector_case()) { case test::server::config_validation::Action::kAddListener: { sent_listener = true; uint32_t listener_num = action.add_listener().listener_num(); - removeListener(listener_num); + auto removed_name = removeListener(listener_num); auto listener = buildListener(listener_num, action.add_listener().route_num()); listeners_.push_back(listener); @@ -203,47 +207,58 @@ void XdsFuzzTest::replay() { // additional discoveryRequests at launch that we might not want to // respond to yet EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + if (removed_name) { + modified++; + test_server_->waitForCounterGe("listener_manager.listener_modified", modified); + } else { + added++; + test_server_->waitForCounterGe("listener_manager.listener_added", added); + } break; } case test::server::config_validation::Action::kRemoveListener: { - /* sent_listener = true; */ - auto removed = removeListener(action.remove_listener().listener_num()); + auto removed_name = removeListener(action.remove_listener().listener_num()); - if (removed) { - updateListener(listeners_, {}, {*removed}); + if (removed_name) { + removed++; + updateListener(listeners_, {}, {*removed_name}); EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + test_server_->waitForCounterGe("listener_manager.listener_removed", removed); } break; } case test::server::config_validation::Action::kAddRoute: { + if (!sent_listener) { + ENVOY_LOG_MISC(info, "Ignoring request to add route_{}", action.add_route().route_num()); + break; + } uint32_t route_num = action.add_route().route_num(); - auto removed = removeRoute(route_num); + auto removed_name = removeRoute(route_num); auto route = buildRouteConfig(route_num); routes_.push_back(route); - if (removed) { + if (removed_name) { // if the route was already in routes_, don't send a duplicate add in delta request updateRoute(routes_, {}, {}); } else { updateRoute(routes_, {route}, {}); } - if (sent_listener) { - EXPECT_TRUE( - waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); - } + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); break; } case test::server::config_validation::Action::kRemoveRoute: { - if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { - // routes cannot be removed in SOTW updates - break; - } + // it seems like routes cannot be removed - leaving a route out of an SOTW request does not + // remove it and sending a remove message in a delta request is ignored + ENVOY_LOG_MISC(info, "Ignoring request to remove route_{}", + action.remove_route().route_num()); + break; - auto removed = removeRoute(action.remove_route().route_num()); + // TODO(samflattery): remove if it's true that routes cannot be removed + auto removed_name = removeRoute(action.remove_route().route_num()); if (removed) { - updateRoute(routes_, {}, {*removed}); + updateRoute(routes_, {}, {*removed_name}); EXPECT_TRUE( waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); } From c02fe0c6084830d0d9921dbba3be2d760fbb4fe4 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 9 Jul 2020 11:59:20 -0400 Subject: [PATCH 569/909] test: deflake coverage take two (#11970) Making the known-slow many header test slightly less slow in the hopes of reducing timeouts. Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- test/integration/http_integration.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index abec325faaec..3fb6e2627815 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1096,7 +1096,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) // This test uses an Http::HeaderMapImpl instead of an Http::TestHeaderMapImpl to avoid // time-consuming asserts when using a large number of headers. max_request_headers_kb_ = 96; - max_request_headers_count_ = 20005; + max_request_headers_count_ = 10005; config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -1112,7 +1112,7 @@ void HttpIntegrationTest::testManyRequestHeaders(std::chrono::milliseconds time) {Http::Headers::get().Scheme, "http"}, {Http::Headers::get().Host, "host"}}); - for (int i = 0; i < 20000; i++) { + for (int i = 0; i < 10000; i++) { big_headers->addCopy(Http::LowerCaseString(std::to_string(i)), std::string(0, 'a')); } initialize(); From 356fe40edd67ddb8181442548241664424d3ac05 Mon Sep 17 00:00:00 2001 From: Christoph Pakulski Date: Thu, 9 Jul 2020 11:59:42 -0400 Subject: [PATCH 570/909] tap: added generic body matcher (#11274) Added GenericBodyMatcher to tap filter to parse HTTP requests and responses. The matcher may be configured to look for text string or hex bytes. Risk Level: Low for tap filter. Med - modified buffer::search method to limit search to specified number of bytes. This method is used in other parts of the code. Added unit test cases to make sure that there are no side effects. Testing: Added unit tests. Docs Changes: Yes - updated section with example configs. Release Notes: Yes. Fixes #6107 Signed-off-by: Christoph Pakulski --- api/envoy/config/tap/v3/common.proto | 38 +- api/envoy/config/tap/v4alpha/common.proto | 44 +- .../http/http_filters/tap_filter.rst | 37 ++ docs/root/version_history/current.rst | 1 + .../envoy/config/tap/v3/common.proto | 38 +- .../envoy/config/tap/v4alpha/common.proto | 44 +- include/envoy/buffer/buffer.h | 15 +- source/common/buffer/buffer_impl.cc | 26 +- source/common/buffer/buffer_impl.h | 2 +- source/extensions/common/tap/BUILD | 3 + source/extensions/common/tap/tap.h | 7 + source/extensions/common/tap/tap_matcher.cc | 208 +++++++++ source/extensions/common/tap/tap_matcher.h | 154 ++++++- .../filters/http/tap/tap_config_impl.cc | 12 +- .../filters/http/tap/tap_config_impl.h | 2 +- .../extensions/filters/http/tap/tap_filter.cc | 4 +- test/common/buffer/buffer_fuzz.cc | 3 +- test/common/buffer/buffer_speed_test.cc | 4 +- test/common/buffer/owned_impl_test.cc | 67 ++- test/common/buffer/watermark_buffer_test.cc | 4 +- test/extensions/common/tap/common.h | 3 + .../extensions/common/tap/tap_matcher_test.cc | 419 +++++++++++++++++- .../filters/http/tap/tap_config_impl_test.cc | 11 +- .../http/tap/tap_filter_integration_test.cc | 47 ++ .../filters/http/tap/tap_filter_test.cc | 4 +- .../tap/tap_config_impl_test.cc | 3 +- test/server/admin/admin_test.cc | 8 +- 27 files changed, 1159 insertions(+), 49 deletions(-) diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index 0fea8f88a638..e51aba968d42 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -47,7 +47,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate"; @@ -89,6 +89,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -101,6 +107,36 @@ message HttpHeadersMatch { repeated route.v3.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto index b8e8dac291f3..53cb57e5d459 100644 --- a/api/envoy/config/tap/v4alpha/common.proto +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -46,7 +46,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; @@ -87,6 +87,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -99,6 +105,42 @@ message HttpHeadersMatch { repeated route.v4alpha.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; diff --git a/docs/root/configuration/http/http_filters/tap_filter.rst b/docs/root/configuration/http/http_filters/tap_filter.rst index 7db1c47c4bad..f5ed7c7a32ea 100644 --- a/docs/root/configuration/http/http_filters/tap_filter.rst +++ b/docs/root/configuration/http/http_filters/tap_filter.rst @@ -122,6 +122,43 @@ Another example POST body: The preceding configuration instructs the tap filter to match any HTTP requests. All requests will be tapped and streamed out the admin endpoint. +Another example POST body: + +.. code-block:: yaml + + config_id: test_config_id + tap_config: + match_config: + and_match: + rules: + - http_request_headers_match: + headers: + - name: foo + exact_match: bar + - http_request_generic_body_match: + patterns: + - string_match: test + - binary_match: 3q2+7w== + bytes_limit: 128 + - http_response_generic_body_match: + patterns: + - binary_match: vu8= + bytes_limit: 64 + output_config: + sinks: + - streaming_admin: {} + +The preceding configuration instructs the tap filter to match any HTTP requests in which a request +header ``foo: bar`` is present AND request body contains string ``test`` and hex bytes ``deadbeef`` (``3q2+7w==`` in base64 format) +in the first 128 bytes AND response body contains hex bytes ``beef`` (``vu8=`` in base64 format) in the first 64 bytes. If all of these +conditions are met, the request will be tapped and streamed out to the admin endpoint. + +.. attention:: + + Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. + If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified + to scan only part of the http body. + Output format ------------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 680d46f8672c..4735d14dff02 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -23,6 +23,7 @@ Removed Config or Runtime New Features ------------ +* tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated ---------- diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index 0fea8f88a638..e51aba968d42 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -47,7 +47,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate"; @@ -89,6 +89,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -101,6 +107,36 @@ message HttpHeadersMatch { repeated route.v3.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto index b8e8dac291f3..53cb57e5d459 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -46,7 +46,7 @@ message TapConfig { // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. -// [#next-free-field: 9] +// [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; @@ -87,6 +87,12 @@ message MatchPredicate { // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; } } @@ -99,6 +105,42 @@ message HttpHeadersMatch { repeated route.v4alpha.HeaderMatcher headers = 1; } +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 1f78f380f6b6..aca59b31d695 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -190,9 +190,22 @@ class Instance { * @param data supplies the data to search for. * @param size supplies the length of the data to search for. * @param start supplies the starting index to search from. + * @param length limits the search to specified number of bytes starting from start index. + * When length value is zero, entire length of data from starting index to the end is searched. * @return the index where the match starts or -1 if there is no match. */ - virtual ssize_t search(const void* data, uint64_t size, size_t start) const PURE; + virtual ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const PURE; + + /** + * Search for an occurrence of data within entire buffer. + * @param data supplies the data to search for. + * @param size supplies the length of the data to search for. + * @param start supplies the starting index to search from. + * @return the index where the match starts or -1 if there is no match. + */ + ssize_t search(const void* data, uint64_t size, size_t start) const { + return search(data, size, start, 0); + } /** * Search for an occurrence of data at the start of a buffer. diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 716869fac29b..7503104ea426 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -380,7 +380,7 @@ uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iove return num_slices_used; } -ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { +ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start, size_t length) const { // This implementation uses the same search algorithm as evbuffer_search(), a naive // scan that requires O(M*N) comparisons in the worst case. // TODO(brian-pane): replace this with a more efficient search if it shows up @@ -388,9 +388,17 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { if (size == 0) { return (start <= length_) ? start : -1; } + + // length equal to zero means that entire buffer must be searched. + // Adjust the length to buffer length taking the staring index into account. + size_t left_to_search = length; + if (0 == length) { + left_to_search = length_ - start; + } ssize_t offset = 0; const uint8_t* needle = static_cast(data); - for (size_t slice_index = 0; slice_index < slices_.size(); slice_index++) { + for (size_t slice_index = 0; slice_index < slices_.size() && (left_to_search > 0); + slice_index++) { const auto& slice = slices_[slice_index]; uint64_t slice_size = slice->dataSize(); if (slice_size <= start) { @@ -403,20 +411,28 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { const uint8_t* haystack_end = haystack + slice_size; haystack += start; while (haystack < haystack_end) { + const size_t slice_search_limit = + std::min(static_cast(haystack_end - haystack), left_to_search); // Search within this slice for the first byte of the needle. const uint8_t* first_byte_match = - static_cast(memchr(haystack, needle[0], haystack_end - haystack)); + static_cast(memchr(haystack, needle[0], slice_search_limit)); if (first_byte_match == nullptr) { + left_to_search -= slice_search_limit; break; } // After finding a match for the first byte of the needle, check whether the following // bytes in the buffer match the remainder of the needle. Note that the match can span // two or more slices. + left_to_search -= static_cast(first_byte_match - haystack + 1); + // Save the current number of bytes left to search. + // If the pattern is not found, the search will resume from the next byte + // and left_to_search value must be restored. + const size_t saved_left_to_search = left_to_search; size_t i = 1; size_t match_index = slice_index; const uint8_t* match_next = first_byte_match + 1; const uint8_t* match_end = haystack_end; - while (i < size) { + while ((i < size) && (0 < left_to_search)) { if (match_next >= match_end) { // We've hit the end of this slice, so continue checking against the next slice. match_index++; @@ -429,6 +445,7 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { match_end = match_next + match_slice->dataSize(); continue; } + left_to_search--; if (*match_next++ != needle[i]) { break; } @@ -440,6 +457,7 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const { } // If this wasn't a successful match, start scanning again at the next byte. haystack = first_byte_match + 1; + left_to_search = saved_left_to_search; } start = 0; offset += slice_size; diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 90d76da81d39..cc1981eb459b 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -545,7 +545,7 @@ class OwnedImpl : public LibEventInstance { void move(Instance& rhs, uint64_t length) override; Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; - ssize_t search(const void* data, uint64_t size, size_t start) const override; + ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override; bool startsWith(absl::string_view data) const override; Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; std::string toString() const override; diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index 8795a34b9170..480b2d05b6f3 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -27,6 +27,7 @@ envoy_cc_library( ":tap_interface", ":tap_matcher", "//source/common/common:assert_lib", + "//source/common/common:hex_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], @@ -37,6 +38,8 @@ envoy_cc_library( srcs = ["tap_matcher.cc"], hdrs = ["tap_matcher.h"], deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:matchers_lib", "//source/common/http:header_utility_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", ], diff --git a/source/extensions/common/tap/tap.h b/source/extensions/common/tap/tap.h index d91f7739c963..58ba4ba82d6d 100644 --- a/source/extensions/common/tap/tap.h +++ b/source/extensions/common/tap/tap.h @@ -138,6 +138,13 @@ class TapConfig { */ virtual const Matcher& rootMatcher() const PURE; + /** + * Non-const version of rootMatcher method. + */ + Matcher& rootMatcher() { + return const_cast(static_cast(*this).rootMatcher()); + } + /** * Return whether the tap session should run in streaming or buffering mode. */ diff --git a/source/extensions/common/tap/tap_matcher.cc b/source/extensions/common/tap/tap_matcher.cc index dc7894083524..71c270432563 100644 --- a/source/extensions/common/tap/tap_matcher.cc +++ b/source/extensions/common/tap/tap_matcher.cc @@ -50,6 +50,14 @@ void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, new_matcher = std::make_unique( match_config.http_response_trailers_match(), matchers); break; + case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestGenericBodyMatch: + new_matcher = std::make_unique( + match_config.http_request_generic_body_match(), matchers); + break; + case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseGenericBodyMatch: + new_matcher = std::make_unique( + match_config.http_response_generic_body_match(), matchers); + break; default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -121,6 +129,206 @@ void HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, statuses[my_index_].might_change_status_ = false; } +// HttpGenericBodyMatcher +// Scans the HTTP body and looks for patterns. +// HTTP body may be passed to the matcher in chunks. The search logic buffers +// only as many bytes as is the length of the longest pattern to be found. +HttpGenericBodyMatcher::HttpGenericBodyMatcher( + const envoy::config::tap::v3::HttpGenericBodyMatch& config, + const std::vector& matchers) + : HttpBodyMatcherBase(matchers) { + patterns_ = std::make_shared>(); + for (const auto& i : config.patterns()) { + switch (i.rule_case()) { + // For binary match 'i' contains sequence of bytes to locate in the body. + case envoy::config::tap::v3::HttpGenericBodyMatch::GenericTextMatch::kBinaryMatch: { + patterns_->push_back(i.binary_match()); + } break; + // For string match 'i' contains exact string to locate in the body. + case envoy::config::tap::v3::HttpGenericBodyMatch::GenericTextMatch::kStringMatch: + patterns_->push_back(i.string_match()); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + // overlap_size_ indicates how many bytes from previous data chunk(s) are buffered. + overlap_size_ = std::max(overlap_size_, patterns_->back().length() - 1); + } + limit_ = config.bytes_limit(); +} + +void HttpGenericBodyMatcher::onBody(const Buffer::Instance& data, MatchStatusVector& statuses) { + // Get the context associated with this stream. + HttpGenericBodyMatcherCtx* ctx = + static_cast(statuses[my_index_].ctx_.get()); + + if (statuses[my_index_].might_change_status_ == false) { + // End of search limit has been already reached or all patterns have been found. + // Status is not going to change. + ASSERT(((0 != limit_) && (limit_ == ctx->processed_bytes_)) || (ctx->patterns_index_.empty())); + return; + } + + // Iterate through all patterns to be found and check if they are located across body + // chunks: part of the pattern was in previous body chunk and remaining of the pattern + // is in the current body chunk on in the current body chunk. + bool resize_required = false; + auto body_search_limit = limit_ - ctx->processed_bytes_; + auto it = ctx->patterns_index_.begin(); + while (it != ctx->patterns_index_.end()) { + const auto& pattern = patterns_->at(*it); + if ((!ctx->overlap_.empty() && (locatePatternAcrossChunks(pattern, data, ctx))) || + (-1 != data.search(static_cast(pattern.data()), pattern.length(), 0, + body_search_limit))) { + // Pattern found. Remove it from the list of patterns to be found. + // If the longest pattern has been found, resize of overlap buffer may be + // required. + resize_required = resize_required || (ctx->capacity_ == (pattern.length() - 1)); + it = ctx->patterns_index_.erase(it); + } else { + it++; + } + } + + if (ctx->patterns_index_.empty()) { + // All patterns were found. + statuses[my_index_].matches_ = true; + statuses[my_index_].might_change_status_ = false; + return; + } + + // Check if next body chunks should be searched for patterns. If the search limit + // ends on the current body chunk, there is no need to check next chunks. + if (0 != limit_) { + ctx->processed_bytes_ = std::min(uint64_t(limit_), ctx->processed_bytes_ + data.length()); + if (limit_ == ctx->processed_bytes_) { + // End of search limit has been reached and not all patterns have been found. + statuses[my_index_].matches_ = false; + statuses[my_index_].might_change_status_ = false; + return; + } + } + + // If longest pattern has been located, there is possibility that overlap_ + // buffer size may be reduced. + if (resize_required) { + resizeOverlapBuffer(ctx); + } + + bufferLastBytes(data, ctx); +} + +// Here we handle a situation when a pattern is spread across multiple body buffers. +// overlap_ stores number of bytes from previous body chunks equal to longest pattern yet to be +// found minus one byte (-1). The logic below tries to find the beginning of the pattern in +// overlap_ buffer and the pattern should continue at the beginning of the next buffer. +bool HttpGenericBodyMatcher::locatePatternAcrossChunks(const std::string& pattern, + const Buffer::Instance& data, + const HttpGenericBodyMatcherCtx* ctx) { + // Take the first character from the pattern and locate it in overlap_. + auto pattern_index = 0; + // Start position in overlap_. overlap_ size was calculated based on the longest pattern to be + // found, but search for shorter patterns may start from some offset, not the beginning of the + // buffer. + size_t start_index = (ctx->overlap_.size() > (pattern.size() - 1)) + ? ctx->overlap_.size() - (pattern.size() - 1) + : 0; + auto match_iter = std::find(std::begin(ctx->overlap_) + start_index, std::end(ctx->overlap_), + pattern.at(pattern_index)); + + if (match_iter == std::end(ctx->overlap_)) { + return false; + } + + // Continue checking characters until end of overlap_ buffer. + while (match_iter != std::end(ctx->overlap_)) { + if (pattern[pattern_index] != *match_iter) { + return false; + } + pattern_index++; + match_iter++; + } + + // Now check if the remaining of the pattern matches the beginning of the body + // buffer.i Do it only if there is sufficient number of bytes in the data buffer. + auto pattern_remainder = pattern.substr(pattern_index); + if ((0 != limit_) && (pattern_remainder.length() > (limit_ - ctx->processed_bytes_))) { + // Even if we got match it would be outside the search limit + return false; + } + return ((pattern_remainder.length() <= data.length()) && data.startsWith(pattern_remainder)); +} + +// Method buffers last bytes from the currently processed body in overlap_. +// This is required to find patterns which spans across multiple body chunks. +void HttpGenericBodyMatcher::bufferLastBytes(const Buffer::Instance& data, + HttpGenericBodyMatcherCtx* ctx) { + // The matcher buffers the last seen X bytes where X is equal to the length of the + // longest pattern - 1. With the arrival of the new 'data' the following situations + // are possible: + // 1. The new data's length is larger or equal to X. In this case just copy last X bytes + // from the data to overlap_ buffer. + // 2. The new data length is smaller than X and there is enough room in overlap buffer to just + // copy the bytes from data. + // 3. The new data length is smaller than X and there is not enough room in overlap buffer. + if (data.length() >= ctx->capacity_) { + // Case 1: + // Just overwrite the entire overlap_ buffer with new data. + ctx->overlap_.resize(ctx->capacity_); + data.copyOut(data.length() - ctx->capacity_, ctx->capacity_, ctx->overlap_.data()); + } else { + if (data.length() <= (ctx->capacity_ - ctx->overlap_.size())) { + // Case 2. Just add the new data on top of already buffered. + const auto size = ctx->overlap_.size(); + ctx->overlap_.resize(ctx->overlap_.size() + data.length()); + data.copyOut(0, data.length(), ctx->overlap_.data() + size); + } else { + // Case 3. First shift data to make room for new data and then copy + // entire new buffer. + const size_t shift = ctx->overlap_.size() - (ctx->capacity_ - data.length()); + for (size_t i = 0; i < (ctx->overlap_.size() - shift); i++) { + ctx->overlap_[i] = ctx->overlap_[i + shift]; + } + const auto size = ctx->overlap_.size(); + ctx->overlap_.resize(ctx->capacity_); + data.copyOut(0, data.length(), ctx->overlap_.data() + (size - shift)); + } + } +} + +// Method takes list of indexes of patterns not yet located in the http body and returns the +// length of the longest pattern. +// This is used by matcher to buffer as minimum bytes as possible. +size_t HttpGenericBodyMatcher::calcLongestPatternSize(const std::list& indexes) const { + ASSERT(!indexes.empty()); + size_t max_len = 0; + for (const auto& i : indexes) { + max_len = std::max(max_len, patterns_->at(i).length()); + } + return max_len; +} + +// Method checks if it is possible to reduce the size of overlap_ buffer. +void HttpGenericBodyMatcher::resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx) { + // Check if we need to resize overlap_ buffer. Since it was initialized to size of the longest + // pattern, it will be shrunk only and memory allocations do not happen. + // Depending on how many bytes were already in the buffer, shift may be required if + // the new size is smaller than number of already buffered bytes. + const size_t max_len = calcLongestPatternSize(ctx->patterns_index_); + if (ctx->capacity_ != (max_len - 1)) { + const size_t new_size = max_len - 1; + const size_t shift = (ctx->overlap_.size() > new_size) ? (ctx->overlap_.size() - new_size) : 0; + // Copy the last new_size bytes to the beginning of the buffer. + for (size_t i = 0; (i < new_size) && (shift > 0); i++) { + ctx->overlap_[i] = ctx->overlap_[i + shift]; + } + ctx->capacity_ = new_size; + if (shift > 0) { + ctx->overlap_.resize(new_size); + } + } +} + } // namespace Tap } // namespace Common } // namespace Extensions diff --git a/source/extensions/common/tap/tap_matcher.h b/source/extensions/common/tap/tap_matcher.h index 55975cb6a70a..79705e3fe924 100644 --- a/source/extensions/common/tap/tap_matcher.h +++ b/source/extensions/common/tap/tap_matcher.h @@ -2,6 +2,8 @@ #include "envoy/config/tap/v3/common.pb.h" +#include "common/buffer/buffer_impl.h" +#include "common/common/matchers.h" #include "common/http/header_utility.h" namespace Envoy { @@ -12,6 +14,18 @@ namespace Tap { class Matcher; using MatcherPtr = std::unique_ptr; +/** + * Base class for context used by individual matchers. + * The context may be required by matchers which are called multiple times + * and need to carry state between the calls. For example body matchers may + * store information how any bytes of the body have been already processed + * or what what has been already found in the body and what has yet to be found. + */ +class MatcherCtx { +public: + virtual ~MatcherCtx() = default; +}; + /** * Base class for all tap matchers. * @@ -40,6 +54,7 @@ class Matcher { bool matches_{false}; // Does the matcher currently match? bool might_change_status_{true}; // Is it possible for matches_ to change in subsequent updates? + std::unique_ptr ctx_{}; // Context used by matchers to save interim context. }; using MatchStatusVector = std::vector; @@ -103,12 +118,30 @@ class Matcher { virtual void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses) const PURE; + /** + * Update match status given HTTP request body. + * @param data supplies the request body. + * @param statuses supplies the per-stream-request match status vector which must be the same + * size as the match tree vector (see above). + */ + virtual void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE; + + /** + * Update match status given HTTP response body. + * @param data supplies the response body. + * @param statuses supplies the per-stream-request match status vector which must be the same + * size as the match tree vector (see above). + */ + virtual void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) PURE; + /** * @return whether given currently available information, the matcher matches. * @param statuses supplies the per-stream-request match status vector which must be the same * size as the match tree vector (see above). */ - MatchStatus matchStatus(const MatchStatusVector& statuses) const { return statuses[my_index_]; } + const MatchStatus& matchStatus(const MatchStatusVector& statuses) const { + return statuses[my_index_]; + } protected: const size_t my_index_; @@ -158,6 +191,16 @@ class LogicMatcherBase : public Matcher { m.onHttpResponseTrailers(response_trailers, statuses); }); } + void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) { + m.onRequestBody(data, statuses); + }); + } + void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + updateLocalStatus(statuses, [&data](Matcher& m, MatchStatusVector& statuses) { + m.onResponseBody(data, statuses); + }); + } protected: using UpdateFunctor = std::function; @@ -212,6 +255,8 @@ class SimpleMatcher : public Matcher { void onHttpRequestTrailers(const Http::RequestTrailerMap&, MatchStatusVector&) const override {} void onHttpResponseHeaders(const Http::ResponseHeaderMap&, MatchStatusVector&) const override {} void onHttpResponseTrailers(const Http::ResponseTrailerMap&, MatchStatusVector&) const override {} + void onRequestBody(const Buffer::Instance&, MatchStatusVector&) override {} + void onResponseBody(const Buffer::Instance&, MatchStatusVector&) override {} }; /** @@ -298,6 +343,113 @@ class HttpResponseTrailersMatcher : public HttpHeaderMatcherBase { } }; +/** + * Base class for body matchers. + */ +class HttpBodyMatcherBase : public SimpleMatcher { +public: + HttpBodyMatcherBase(const std::vector& matchers) : SimpleMatcher(matchers) {} + +protected: + // Limit search to specified number of bytes. + // Value equal to zero means no limit. + uint32_t limit_{}; +}; + +/** + * Context is used by HttpGenericBodyMatcher to: + * - track how many bytes has been processed + * - track patterns which have been found + * - store last several seen bytes of the HTTP body (when pattern starts at the end of previous body + * chunk and continues at the beginning of the next body chunk) + */ +class HttpGenericBodyMatcherCtx : public MatcherCtx { +public: + HttpGenericBodyMatcherCtx(const std::shared_ptr>& patterns, + size_t overlap_size) + : patterns_(patterns) { + // Initialize overlap_ buffer's capacity to fit the longest pattern - 1. + // The length of the longest pattern is known and passed here as overlap_size. + patterns_index_.resize(patterns_->size()); + std::iota(patterns_index_.begin(), patterns_index_.end(), 0); + overlap_.reserve(overlap_size); + capacity_ = overlap_size; + } + ~HttpGenericBodyMatcherCtx() override = default; + + // The context is initialized per each http request. The patterns_ + // shared pointer attaches to matcher's list of patterns, so patterns + // can be referenced without copying data. + const std::shared_ptr> patterns_; + // List stores indexes of patterns in patterns_ shared memory which + // still need to be located in the body. When a pattern is found + // its index is removed from the list. + // When all patterns have been found, the list is empty. + std::list patterns_index_; + // Buffer to store the last bytes from previous body chunk(s). + // It will store only as many bytes as is the length of the longest + // pattern to be found minus 1. + // It is necessary to locate patterns which are spread across 2 or more + // body chunks. + std::vector overlap_; + // capacity_ tells how many bytes should be buffered. overlap_'s initial + // capacity is set to the length of the longest pattern - 1. As patterns + // are found, there is a possibility that not as many bytes are required to be buffered. + // It must be tracked outside of vector, because vector::reserve does not + // change capacity when new value is lower than current capacity. + uint32_t capacity_{}; + // processed_bytes_ tracks how many bytes of HTTP body have been processed. + uint32_t processed_bytes_{}; +}; + +class HttpGenericBodyMatcher : public HttpBodyMatcherBase { +public: + HttpGenericBodyMatcher(const envoy::config::tap::v3::HttpGenericBodyMatch& config, + const std::vector& matchers); + +protected: + void onBody(const Buffer::Instance&, MatchStatusVector&); + void onNewStream(MatchStatusVector& statuses) const override { + // Allocate a new context used for the new stream. + statuses[my_index_].ctx_ = + std::make_unique(patterns_, overlap_size_); + statuses[my_index_].matches_ = false; + statuses[my_index_].might_change_status_ = true; + } + bool locatePatternAcrossChunks(const std::string& pattern, const Buffer::Instance& data, + const HttpGenericBodyMatcherCtx* ctx); + void bufferLastBytes(const Buffer::Instance& data, HttpGenericBodyMatcherCtx* ctx); + + size_t calcLongestPatternSize(const std::list& indexes) const; + void resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx); + +private: + // The following fields are initialized based on matcher config and are used + // by all HTTP tappers. + // List of strings which body must contain to get match. + std::shared_ptr> patterns_; + // Stores the length of the longest pattern. + size_t overlap_size_{}; +}; + +class HttpRequestGenericBodyMatcher : public HttpGenericBodyMatcher { +public: + using HttpGenericBodyMatcher::HttpGenericBodyMatcher; + + void onRequestBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + onBody(data, statuses); + } +}; + +class HttpResponseGenericBodyMatcher : public HttpGenericBodyMatcher { +public: + using HttpGenericBodyMatcher::HttpGenericBodyMatcher; + + void onResponseBody(const Buffer::Instance& data, MatchStatusVector& statuses) override { + onBody(data, statuses); + } +}; + } // namespace Tap } // namespace Common } // namespace Extensions diff --git a/source/extensions/filters/http/tap/tap_config_impl.cc b/source/extensions/filters/http/tap/tap_config_impl.cc index 2ea89dc12914..fe602fbfb6e7 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.cc +++ b/source/extensions/filters/http/tap/tap_config_impl.cc @@ -61,7 +61,7 @@ void HttpPerRequestTapperImpl::streamBufferedRequestBody() { void HttpPerRequestTapperImpl::onRequestBody(const Buffer::Instance& data) { onBody(data, buffered_streamed_request_body_, config_->maxBufferedRxBytes(), &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_request_body_chunk, - &envoy::data::tap::v3::HttpBufferedTrace::mutable_request); + &envoy::data::tap::v3::HttpBufferedTrace::mutable_request, true); } void HttpPerRequestTapperImpl::streamRequestTrailers() { @@ -123,7 +123,7 @@ void HttpPerRequestTapperImpl::streamBufferedResponseBody() { void HttpPerRequestTapperImpl::onResponseBody(const Buffer::Instance& data) { onBody(data, buffered_streamed_response_body_, config_->maxBufferedTxBytes(), &envoy::data::tap::v3::HttpStreamedTraceSegment::mutable_response_body_chunk, - &envoy::data::tap::v3::HttpBufferedTrace::mutable_response); + &envoy::data::tap::v3::HttpBufferedTrace::mutable_response, false); } void HttpPerRequestTapperImpl::onResponseTrailers(const Http::ResponseTrailerMap& trailers) { @@ -177,10 +177,12 @@ bool HttpPerRequestTapperImpl::onDestroyLog() { void HttpPerRequestTapperImpl::onBody( const Buffer::Instance& data, Extensions::Common::Tap::TraceWrapperPtr& buffered_streamed_body, uint32_t max_buffered_bytes, MutableBodyChunk mutable_body_chunk, - MutableMessage mutable_message) { - // TODO(mattklein123): Body matching. + MutableMessage mutable_message, bool request) { + // Invoke body matcher. + request ? config_->rootMatcher().onRequestBody(data, statuses_) + : config_->rootMatcher().onResponseBody(data, statuses_); if (config_->streaming()) { - const auto match_status = config_->rootMatcher().matchStatus(statuses_); + const auto& match_status = config_->rootMatcher().matchStatus(statuses_); // Without body matching, we must have already started tracing or have not yet matched. ASSERT(started_streaming_trace_ || !match_status.matches_); diff --git a/source/extensions/filters/http/tap/tap_config_impl.h b/source/extensions/filters/http/tap/tap_config_impl.h index bb8bb0b48c4f..f61f275774c5 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.h +++ b/source/extensions/filters/http/tap/tap_config_impl.h @@ -53,7 +53,7 @@ class HttpPerRequestTapperImpl : public HttpPerRequestTapper, Logger::LoggableonRequestBody(data); } return Http::FilterDataStatus::Continue; @@ -56,7 +56,7 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool) { - if (tapper_ != nullptr) { + if ((tapper_ != nullptr) && (0 != data.length())) { tapper_->onResponseBody(data); } return Http::FilterDataStatus::Continue; diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 9b80fa2a7c29..9c80f4655b09 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -159,7 +159,8 @@ class StringBuffer : public Buffer::Instance { return 1; } - ssize_t search(const void* data, uint64_t size, size_t start) const override { + ssize_t search(const void* data, uint64_t size, size_t start, size_t length) const override { + UNREFERENCED_PARAMETER(length); return asStringView().find({static_cast(data), size}, start); } diff --git a/test/common/buffer/buffer_speed_test.cc b/test/common/buffer/buffer_speed_test.cc index d9456072b137..49240c69f356 100644 --- a/test/common/buffer/buffer_speed_test.cc +++ b/test/common/buffer/buffer_speed_test.cc @@ -291,7 +291,7 @@ static void bufferSearch(benchmark::State& state) { Buffer::OwnedImpl buffer(input); ssize_t result = 0; for (auto _ : state) { - result += buffer.search(Pattern.c_str(), Pattern.length(), 0); + result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0); } benchmark::DoNotOptimize(result); } @@ -314,7 +314,7 @@ static void bufferSearchPartialMatch(benchmark::State& state) { Buffer::OwnedImpl buffer(input); ssize_t result = 0; for (auto _ : state) { - result += buffer.search(Pattern.c_str(), Pattern.length(), 0); + result += buffer.search(Pattern.c_str(), Pattern.length(), 0, 0); } benchmark::DoNotOptimize(result); } diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index d622d6984e43..bb6799aed98e 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -825,21 +825,56 @@ TEST_F(OwnedImplTest, Search) { } EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); - EXPECT_EQ(-1, buffer.search("c", 1, 0)); - EXPECT_EQ(0, buffer.search("", 0, 0)); - EXPECT_EQ(buffer.length(), buffer.search("", 0, buffer.length())); - EXPECT_EQ(-1, buffer.search("", 0, buffer.length() + 1)); - EXPECT_EQ(0, buffer.search("a", 1, 0)); - EXPECT_EQ(1, buffer.search("b", 1, 1)); - EXPECT_EQ(2, buffer.search("a", 1, 1)); - EXPECT_EQ(0, buffer.search("abaa", 4, 0)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 0)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 1)); - EXPECT_EQ(2, buffer.search("aaaa", 4, 2)); - EXPECT_EQ(7, buffer.search("aaaaab", 6, 0)); - EXPECT_EQ(0, buffer.search("abaaaabaaaaaba", 14, 0)); - EXPECT_EQ(12, buffer.search("ba", 2, 10)); - EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0)); + EXPECT_EQ(-1, buffer.search("c", 1, 0, 0)); + EXPECT_EQ(0, buffer.search("", 0, 0, 0)); + EXPECT_EQ(buffer.length(), buffer.search("", 0, buffer.length(), 0)); + EXPECT_EQ(-1, buffer.search("", 0, buffer.length() + 1, 0)); + EXPECT_EQ(0, buffer.search("a", 1, 0, 0)); + EXPECT_EQ(1, buffer.search("b", 1, 1, 0)); + EXPECT_EQ(2, buffer.search("a", 1, 1, 0)); + EXPECT_EQ(0, buffer.search("abaa", 4, 0, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 0, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 1, 0)); + EXPECT_EQ(2, buffer.search("aaaa", 4, 2, 0)); + EXPECT_EQ(7, buffer.search("aaaaab", 6, 0, 0)); + EXPECT_EQ(0, buffer.search("abaaaabaaaaaba", 14, 0, 0)); + EXPECT_EQ(12, buffer.search("ba", 2, 10, 0)); + EXPECT_EQ(-1, buffer.search("abaaaabaaaaabaa", 15, 0, 0)); +} + +TEST_F(OwnedImplTest, SearchWithLengthLimit) { + // Populate a buffer with a string split across many small slices, to + // exercise edge cases in the search implementation. + static const char* Inputs[] = {"ab", "a", "", "aaa", "b", "a", "aaa", "ab", "a"}; + Buffer::OwnedImpl buffer; + for (const auto& input : Inputs) { + buffer.appendSliceForTest(input); + } + EXPECT_STREQ("abaaaabaaaaaba", buffer.toString().c_str()); + + // The string is there, but the search is limited to 1 byte. + EXPECT_EQ(-1, buffer.search("b", 1, 0, 1)); + // The string is there, but the search is limited to 1 byte. + EXPECT_EQ(-1, buffer.search("ab", 2, 0, 1)); + // The string is there, but spans over 2 slices. The search length is enough + // to find it. + EXPECT_EQ(1, buffer.search("ba", 2, 0, 3)); + EXPECT_EQ(1, buffer.search("ba", 2, 0, 5)); + EXPECT_EQ(1, buffer.search("ba", 2, 1, 2)); + EXPECT_EQ(1, buffer.search("ba", 2, 1, 5)); + // The string spans over 3 slices. test different variations of search length + // and starting position. + EXPECT_EQ(2, buffer.search("aaaab", 5, 2, 5)); + EXPECT_EQ(-1, buffer.search("aaaab", 5, 2, 3)); + EXPECT_EQ(2, buffer.search("aaaab", 5, 2, 6)); + EXPECT_EQ(2, buffer.search("aaaab", 5, 0, 8)); + EXPECT_EQ(-1, buffer.search("aaaab", 5, 0, 6)); + // Test searching for the string which in in the last slice. + EXPECT_EQ(12, buffer.search("ba", 2, 12, 2)); + EXPECT_EQ(12, buffer.search("ba", 2, 11, 3)); + EXPECT_EQ(-1, buffer.search("ba", 2, 11, 2)); + // Test cases when length to search is larger than buffer + EXPECT_EQ(12, buffer.search("ba", 2, 11, 10e6)); } TEST_F(OwnedImplTest, StartsWith) { @@ -966,7 +1001,7 @@ TEST_F(OwnedImplTest, ReserveZeroCommit) { Api::IoCallUint64Result result = buf.read(io_handle, max_length); ASSERT_EQ(result.rc_, static_cast(rc)); ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); - ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length)); + ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length, 0)); EXPECT_EQ("bbbbb", buf.toString().substr(0, 5)); expectSlices({{5, 0, 4032}, {1953, 2079, 4032}}, buf); } diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 776bd64a8217..db7fe530fcdb 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -295,9 +295,9 @@ TEST_F(WatermarkBufferTest, GetRawSlices) { TEST_F(WatermarkBufferTest, Search) { buffer_.add(TEN_BYTES, 10); - EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0)); + EXPECT_EQ(1, buffer_.search(&TEN_BYTES[1], 2, 0, 0)); - EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5)); + EXPECT_EQ(-1, buffer_.search(&TEN_BYTES[1], 2, 5, 0)); } TEST_F(WatermarkBufferTest, StartsWith) { diff --git a/test/extensions/common/tap/common.h b/test/extensions/common/tap/common.h index 7e8ca455d094..03b0d0b42840 100644 --- a/test/extensions/common/tap/common.h +++ b/test/extensions/common/tap/common.h @@ -64,6 +64,9 @@ class MockMatcher : public Matcher { MOCK_METHOD(void, onHttpResponseTrailers, (const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses), (const)); + MOCK_METHOD(void, onRequestBody, (const Buffer::Instance& data, MatchStatusVector& statuses)); + MOCK_METHOD(void, onResponseBody, (const Buffer::Instance& data, MatchStatusVector& statuses), + ()); }; } // namespace Tap diff --git a/test/extensions/common/tap/tap_matcher_test.cc b/test/extensions/common/tap/tap_matcher_test.cc index 04fb50227e39..2023f40d4f4d 100644 --- a/test/extensions/common/tap/tap_matcher_test.cc +++ b/test/extensions/common/tap/tap_matcher_test.cc @@ -14,17 +14,43 @@ namespace Common { namespace Tap { namespace { -class TapMatcherTest : public testing::Test { +class TapMatcherTestBase { public: std::vector matchers_; Matcher::MatchStatusVector statuses_; envoy::config::tap::v3::MatchPredicate config_; + + enum class Direction { Request, Response }; +}; + +class TapMatcherTest : public TapMatcherTestBase, public testing::Test { +public: Http::TestRequestHeaderMapImpl request_headers_; Http::TestRequestTrailerMapImpl request_trailers_; Http::TestResponseHeaderMapImpl response_headers_; Http::TestResponseTrailerMapImpl response_trailers_; }; +// Base test class for config parameterized tests. +class TapMatcherGenericBodyConfigTest + : public TapMatcherTestBase, + public ::testing::TestWithParam< + std::tuple, size_t>>> { +}; + +class TapMatcherGenericBodyTest + : public TapMatcherTestBase, + public ::testing::TestWithParam< + std::tuple, std::list>, + std::pair>>> { +public: + TapMatcherGenericBodyTest(); + + Buffer::OwnedImpl data_; + std::vector body_parts_; +}; + TEST_F(TapMatcherTest, Any) { const std::string matcher_yaml = R"EOF( @@ -97,6 +123,397 @@ TEST_F(TapMatcherTest, AndMightChangeStatus) { EXPECT_EQ((Matcher::MatchStatus{false, false}), matchers_[0]->matchStatus(statuses_)); } +TapMatcherGenericBodyTest::TapMatcherGenericBodyTest() { + std::string hex; + body_parts_.push_back("This is generic body matcher test for envoy"); // Index 0 + body_parts_.push_back("proxy used to create and assemble http body"); // Index 1 + body_parts_.push_back("env"); // Index 2 + body_parts_.push_back("oyp"); // Index 3 + body_parts_.push_back("roxy"); // Index 4 + body_parts_.push_back("roxy layer 7"); // Index 5 + body_parts_.push_back("blah"); // Index 6 + hex = "xx"; + unsigned char buf[] = {0xde, 0xad}; + memcpy(const_cast(hex.data()), buf, 2); + body_parts_.push_back(hex); // Index 7 + unsigned char buf1[] = {0xbe, 0xef}; + memcpy(const_cast(hex.data()), buf1, 2); + body_parts_.push_back(hex); // Index 8 +} + +// This test initializes matcher with several patterns. The length of the longest +// pattern is used to initialize overlap_ buffer. +// The longest pattern is found first. This should result in less buffering +// required for locating remaining patterns. +TEST_F(TapMatcherGenericBodyTest, ResizeOverlap) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: + - string_match: generic + - string_match: lay +)EOF"; + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + const auto& ctx = reinterpret_cast(statuses_[0].ctx_.get()); + // 6 is length of "generic" + ASSERT_THAT(ctx->overlap_.capacity(), 6); + // 2 patterns must be located + ASSERT_THAT(ctx->patterns_index_.size(), 2); + + // Process body chunk which produces no match. + // It should fill the overlap_ buffer to full capacity. + data_.add(body_parts_[1].data(), body_parts_[1].length()); + matchers_[0]->onRequestBody(data_, statuses_); + ASSERT_THAT(ctx->overlap_.size(), 6); + ASSERT_THAT(ctx->capacity_, 6); + + // Now pass the chunk which matches "generic" pattern. + data_.drain(data_.length()); + data_.add(body_parts_[0].data(), body_parts_[0].length()); + matchers_[0]->onRequestBody(data_, statuses_); + + // Size of patterns_index_ should drop down to one. + // Capacity of the overlap_ should drop to to 2, as the longest pattern not found yet is 3 chars + // long. Also 2 bytes should have been copied to overlap, so its size is 2. + ASSERT_THAT(ctx->patterns_index_.size(), 1); + ASSERT_THAT(ctx->overlap_.size(), 2); + ASSERT_THAT(ctx->capacity_, 2); +} + +// Test the case when hex string is not even number of characters +TEST_F(TapMatcherGenericBodyTest, WrongConfigTest) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: + - binary_match: 4rdHFh%2 +)EOF"; + ASSERT_ANY_THROW(TestUtility::loadFromYaml(matcher_yaml, config_)); +} + +INSTANTIATE_TEST_SUITE_P( + TapMatcherGenericBodyTestConfigSuite, TapMatcherGenericBodyConfigTest, + ::testing::Combine( + ::testing::Values(TapMatcherTestBase::Direction::Request, + TapMatcherTestBase::Direction::Response), + ::testing::Values( + // Should match - envoy is in the body + std::make_tuple(std::vector{" - string_match: \"envoy\""}, 5), + std::make_tuple(std::vector{" - string_match: \"envoy\""}, 5)))); + +// Test different configurations against the body. +// Parameterized test passes various configurations +// which are appended to the yaml string. +TEST_P(TapMatcherGenericBodyTest, GenericBodyTest) { + Direction dir = std::get<0>(GetParam()); + std::string matcher_yaml; + if (Direction::Request == dir) { + matcher_yaml = + R"EOF(http_request_generic_body_match: + patterns:)EOF"; + } else { + matcher_yaml = + R"EOF(http_response_generic_body_match: + patterns:)EOF"; + } + + auto text_and_result = std::get<1>(GetParam()); + // Append vector of matchers + for (const auto& i : std::get<0>(text_and_result)) { + matcher_yaml += '\n'; + matcher_yaml += i; + matcher_yaml += '\n'; + } + + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + // Now create data. The data is passed to matcher in several + // steps to simulate that body was not received in one continuous + // chunk. Data for each step is reassembled from body_parts_. + for (const auto& i : std::get<1>(text_and_result)) { + data_.drain(data_.length()); + for (const auto& j : i) { + data_.add(body_parts_[j].data(), body_parts_[j].length()); + } + + if (Direction::Request == dir) { + matchers_[0]->onRequestBody(data_, statuses_); + } else { + matchers_[0]->onResponseBody(data_, statuses_); + } + } + const std::pair& expected = std::get<2>(text_and_result); + EXPECT_EQ((Matcher::MatchStatus{expected.first, expected.second}), + matchers_[0]->matchStatus(statuses_)); +} + +INSTANTIATE_TEST_SUITE_P( + TapMatcherGenericBodyTestSuite, TapMatcherGenericBodyTest, + ::testing::Combine( + ::testing::Values(TapMatcherTestBase::Direction::Request, + TapMatcherTestBase::Direction::Response), + ::testing::Values( + // SEARCHING FOR SINGLE PATTERN - no limit + // Should match - there is a single body chunk and envoy is in the body + std::make_tuple(std::vector{" - string_match: \"envoy\""}, + std::list>{{0}}, std::make_pair(true, false)), + // Should match - single body and `envoyproxy` is there + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should not match - 2 body chunks. First chunk does not contain 'enwoy' at the end but + // should match 'en' and then bail out. + std::make_tuple(std::vector{" - string_match: \"enwoyproxy\""}, + std::list>{{0}, {1}}, std::make_pair(false, true)), + // Should match - 3 body chunks containing string `envoyproxy` when reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{2}, {3}, {4}}, + std::make_pair(true, false)), + // Should match - 3 body chunks containing string ``envoyproxy layer`` when reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{2}, {3}, {5}}, + std::make_pair(true, false)), + // Should match - 4 body chunks The last 3 contain string ``envoyproxy layer`` when + // reassembled. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{6}, {2}, {3}, {5}}, + std::make_pair(true, false)), + // Should match - First few chunks does not match, then 3 reassembled match + // `envoyproxy`. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\""}, + std::list>{{6}, {6}, {6}, {2}, {3}, {5}, {6}}, + std::make_pair(true, false)), + // Should match - chunk #7 contains hex '0xdead (3q0= in base64 format)'. + std::make_tuple(std::vector{" - binary_match: \"3q0=\""}, + std::list>{{6}, {6}, {7}, {6}}, + std::make_pair(true, false)), + // Should match - chunk #7 contains 0xdead and chunk 8 contains 0xbeef + // 0xdeadbeef encoded in base64 format is '3q2+7w=='. + std::make_tuple(std::vector{" - binary_match: \"3q2+7w==\""}, + std::list>{{6}, {6}, {7}, {8}, {6}}, + std::make_pair(true, false)), + // Should NOT match - hex 0xdeed (3u0= in base64 format) is not there + std::make_tuple(std::vector{" - binary_match: \"3u0=\""}, + std::list>{{6}, {6}, {7}, {8}, {6}}, + std::make_pair(false, true)), + + // SEARCHING FOR SINGLE PATTERN - with limit + // Should match - there is a single body chunk and 'This' is within + // search limit. + std::make_tuple(std::vector{" - string_match: \"This\"", + " bytes_limit: 10"}, + std::list>{{0}}, std::make_pair(true, false)), + // Should NOT match - there is a single body chunk and envoy is in the body + // but outside of the limit + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " bytes_limit: 10"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. Search is limited to the first 10 bytes + // - 'proxy' in the second chunk should not be found as it is outside of the search + // limit. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 10"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48 + // so should be found when search limit is 48. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 48"}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 47 bytes, so the last character of 'proxy' is outside of the search + // limit. + std::make_tuple(std::vector{" - string_match: \"proxy\"", + " bytes_limit: 47"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + // Should match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 46 bytes, which is enough to include 'envoypro' in search. + std::make_tuple(std::vector{" - string_match: \"envoypro\"", + " bytes_limit: 46"}, + std::list>{{0}, {1}}, std::make_pair(true, false)), + // Should NOT match - 2 body chunks. First contains 'envoy' at the end and the second + // chunk contains 'proxy' at the beginning. 'proxy' is located at bytes 44-48. + // Search limit is 45 bytes, so the last character of `envoyproxy` is outside of the + // search limit. + std::make_tuple(std::vector{" - string_match: \"envoypro\"", + " bytes_limit: 45"}, + std::list>{{0}, {1}}, std::make_pair(false, false)), + + // SEARCHING FOR MULTIPLE PATTERNS - no limit + // Should NOT match. None of the patterns is in the body. + std::make_tuple(std::vector{" - string_match: \"balancer\"", + " - string_match: \"error\""}, + std::list>{{0}}, std::make_pair(false, true)), + // Should NOT match. One pattern is in the body but the second is not. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\""}, + std::list>{{0}}, std::make_pair(false, true)), + // Should match. Both patterns are in the body (concatenated frags 0 and 1). + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"proxy\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // SPELLCHECKER(off) + // Should match. Both patterns should be found. 'envoy' is in the first + // chunk and '0xbeef' (`vu8=` in base64 format) is in the chunk 8. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"vu8=\""}, + std::list>{{0, 1}, {8}, {6}}, + std::make_pair(true, false)), + // Should match. Both patterns should be found. '0xdeadbeef' is spread + // across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\""}, + std::list>{{7}, {8}, {6, 0}}, + std::make_pair(true, false)), + // Should match. One pattern is substring of the other and they both + // are located part in chunk 0 and part in chunk 1. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\"", + " - string_match: \"voypro\""}, + std::list>{{6}, {0}, {1}, {8}, {6}}, + std::make_pair(true, false)), + // Should match. Duplicated pattern which is found in the body. + std::make_tuple(std::vector{" - string_match: \"envoyproxy\"", + " - string_match: \"envoyproxy\""}, + std::list>{{6}, {0}, {1}, {8}, {6}}, + std::make_pair(true, false)), + // Test starting search from some offset for shorter patterns. + // Overlap buffer size will be initialized for longest pattern but + // search for shorter patterns should start from some index in overlap + // buffer. Make sure that the index is enough for the shorter pattern to be found. + std::make_tuple(std::vector{" - string_match: \"assemble\"", + " - string_match: \"envoyp\""}, + std::list>{{0, 1}}, std::make_pair(true, false)), + // SEARCHING FOR MULTIPLE PATTERNS - with limit + // Should NOT match. None of the patterns is in the body. + std::make_tuple(std::vector{" - string_match: \"balancer\"", + " - string_match: \"error\"", + " bytes_limit: 15"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. One pattern is in the body but the second is not. + // Search limit is large enough to find the first pattern. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\"", + " bytes_limit: 35"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. One pattern is in the body but the second is not. + // Search limit is small so none of the patterns should be found. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"error\"", + " bytes_limit: 5"}, + std::list>{{0}}, std::make_pair(false, false)), + // Should NOT match. Both patterns are in the body (concatenated frags 0 and 1). + // Limit includes only the first pattern. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - string_match: \"proxy\"", + " bytes_limit: 30"}, + std::list>{{0, 1}}, std::make_pair(false, false)), + // Should match. Both patterns should be found. 'envoy' is in the first + // chunk and '0xbeef (vu8= in base64 format)' is in the chunk 8 and search limit is + // large enough to include 2 patterns + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"vu8=\"", " bytes_limit: 90"}, + std::list>{{0, 1}, {8}, {6}}, std::make_pair(true, false)), + // Should match. Both patterns should be found. '0xdeadbeef (3q2+7w== in base64)' is + // spread across two chunks - 7 and 8. The second pattern 'envoy' is in chunk 0. + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", " bytes_limit: 85"}, + std::list>{{7}, {8}, {6, 0}}, std::make_pair(true, false)), + // Should match. Search limit ends exactly where '0xdeadbeef (3q2+7w== in base64)' ends. + std::make_tuple( + std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", " bytes_limit: 47"}, + std::list>{{0}, {7}, {8}, {6, 0}}, std::make_pair(true, false)), + // Should NOT match. Search limit ends exactly one byte before end of '0xdeadbeef + // (3q2+7w== in base64)'. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 46"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(false, false)), + // Test the situation when end of the search limit overlaps with end of first chunk. + // Should NOT match. The second pattern should not be found. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 43"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(false, false)), + + // SPELLCHECKER(on) + // Now pass enormously large value. It should work just fine. + std::make_tuple(std::vector{" - string_match: \"envoy\"", + " - binary_match: \"3q2+7w==\"", + " bytes_limit: 50000000"}, + std::list>{{0}, {7}, {8}, {6, 0}}, + std::make_pair(true, false))))); + +// Test takes one long pattern existing on the boundary of two body chunks and generates random +// number of substrings of various lengths. All substrings and original long pattern are added to +// the matcher's config. Next the two body chunks are passed to the matcher. In all cases the +// matcher should report that match was found. +TEST_F(TapMatcherGenericBodyTest, RandomLengthOverlappingPatterns) { + std::string pattern = "envoyproxy"; + + // Loop through fairly large number of tests + for (size_t i = 0; i < 10 * pattern.length(); i++) { + std::string matcher_yaml = R"EOF( +http_request_generic_body_match: + patterns: +)EOF"; + // generate number of substrings which will be derived from pattern + uint32_t num = std::rand() % 10; + for (size_t j = 0; j < num; j++) { + std::string yaml_line = " - string_match: "; + + // Generate random start index. + const uint32_t start = std::rand() % (pattern.length() - 1); + // Generate random length. Minimum 1 character. + const uint32_t len = 1 + std::rand() % (pattern.length() - start - 1); + yaml_line += "\"" + pattern.substr(start, len) + "\"\n"; + matcher_yaml += yaml_line; + } + // Finally add the original pattern, but not in all cases + if (0 == (num % 2)) { + matcher_yaml += " - string_match: " + pattern + "\n"; + } + + // Initialize matcher. + TestUtility::loadFromYaml(matcher_yaml, config_); + buildMatcher(config_, matchers_); + EXPECT_EQ(1, matchers_.size()); + statuses_.resize(matchers_.size()); + matchers_[0]->onNewStream(statuses_); + + EXPECT_EQ((Matcher::MatchStatus{false, true}), matchers_[0]->matchStatus(statuses_)); + + // Use body chunks #0 and #1 + data_.drain(data_.length()); + data_.add(body_parts_[0].data(), body_parts_[0].length()); + matchers_[0]->onRequestBody(data_, statuses_); + data_.drain(data_.length()); + data_.add(body_parts_[1].data(), body_parts_[1].length()); + matchers_[0]->onRequestBody(data_, statuses_); + + // Check the result. All patterns should be found. + EXPECT_EQ((Matcher::MatchStatus{true, false}), matchers_[0]->matchStatus(statuses_)); + + matchers_.clear(); + } +} } // namespace } // namespace Tap } // namespace Common diff --git a/test/extensions/filters/http/tap/tap_config_impl_test.cc b/test/extensions/filters/http/tap/tap_config_impl_test.cc index 2033c1cd240d..4e3763e9520e 100644 --- a/test/extensions/filters/http/tap/tap_config_impl_test.cc +++ b/test/extensions/filters/http/tap/tap_config_impl_test.cc @@ -7,6 +7,7 @@ using testing::_; using testing::Assign; +using testing::ByMove; using testing::InSequence; using testing::Return; using testing::ReturnRef; @@ -24,7 +25,7 @@ class HttpPerRequestTapperImplTest : public testing::Test { HttpPerRequestTapperImplTest() { EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_)); EXPECT_CALL(*config_, createMatchStatusVector()) - .WillOnce(Return(TapCommon::Matcher::MatchStatusVector(1))); + .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1)))); EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_)); EXPECT_CALL(matcher_, onNewStream(_)).WillOnce(SaveArgAddress(&statuses_)); tapper_ = std::make_unique(config_, 1); @@ -53,11 +54,13 @@ TEST_F(HttpPerRequestTapperImplTest, BufferedFlowNoTap) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)); tapper_->onResponseTrailers(response_trailers_); @@ -73,11 +76,13 @@ TEST_F(HttpPerRequestTapperImplTest, BufferedFlowTap) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)); tapper_->onResponseTrailers(response_trailers_); @@ -116,6 +121,7 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchRequestTrailers) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)) .WillOnce(Assign(&(*statuses_)[0].matches_, true)); @@ -156,6 +162,7 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchRequestTrailers) { value: f )EOF"))); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); EXPECT_CALL(*sink_manager_, submitTrace_(TraceEqual( R"EOF( http_streamed_trace_segment: @@ -187,11 +194,13 @@ TEST_F(HttpPerRequestTapperImplTest, StreamedMatchResponseTrailers) { InSequence s; EXPECT_CALL(matcher_, onHttpRequestHeaders(_, _)); tapper_->onRequestHeaders(request_headers_); + EXPECT_CALL(matcher_, onRequestBody(_, _)); tapper_->onRequestBody(Buffer::OwnedImpl("hello")); EXPECT_CALL(matcher_, onHttpRequestTrailers(_, _)); tapper_->onRequestTrailers(request_trailers_); EXPECT_CALL(matcher_, onHttpResponseHeaders(_, _)); tapper_->onResponseHeaders(response_headers_); + EXPECT_CALL(matcher_, onResponseBody(_, _)); tapper_->onResponseBody(Buffer::OwnedImpl("world")); EXPECT_CALL(matcher_, onHttpResponseTrailers(_, _)) .WillOnce(Assign(&(*statuses_)[0].matches_, true)); diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index ff4f315a3236..a68b9d44ad19 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -538,5 +538,52 @@ name: tap EXPECT_EQ(1UL, test_server_->counter("http.config_test.tap.rq_tapped")->value()); } +// Verify that body matching works. +TEST_P(TapIntegrationTest, AdminBodyMatching) { + initializeFilter(admin_filter_config_); + + const std::string admin_request_yaml = + R"EOF( +config_id: test_config_id +tap_config: + match_config: + and_match: + rules: + - http_request_generic_body_match: + patterns: + - string_match: request + - http_response_generic_body_match: + patterns: + - string_match: response + output_config: + sinks: + - format: JSON_BODY_AS_STRING + streaming_admin: {} +)EOF"; + + startAdminRequest(admin_request_yaml); + + codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); + // Should not tap, request and response body do not match. + makeRequest(request_headers_no_tap_, {{"This is test payload"}}, nullptr, + response_headers_no_tap_, {{"This is test payload"}}, nullptr); + // Should not tap, request matches but response body does not match. + makeRequest(request_headers_no_tap_, {{"This is request payload"}}, nullptr, + response_headers_no_tap_, {{"This is test payload"}}, nullptr); + // Should tap, request and response body match. + makeRequest(request_headers_no_tap_, {{"This is request payload"}}, nullptr, + response_headers_no_tap_, {{"This is resp"}, {"onse payload"}}, nullptr); + + envoy::data::tap::v3::TraceWrapper trace; + admin_response_->waitForBodyData(1); + TestUtility::loadFromYaml(admin_response_->body(), trace); + EXPECT_NE(std::string::npos, + trace.http_buffered_trace().request().body().as_string().find("request")); + EXPECT_NE(std::string::npos, + trace.http_buffered_trace().response().body().as_string().find("response")); + + admin_client_->close(); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/tap/tap_filter_test.cc b/test/extensions/filters/http/tap/tap_filter_test.cc index 1f305ddbe79e..4da97e1af579 100644 --- a/test/extensions/filters/http/tap/tap_filter_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_test.cc @@ -101,7 +101,7 @@ TEST_F(TapFilterTest, Config) { Http::TestRequestHeaderMapImpl request_headers; EXPECT_CALL(*http_per_request_tapper_, onRequestHeaders(_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); - Buffer::OwnedImpl request_body; + Buffer::OwnedImpl request_body("hello"); EXPECT_CALL(*http_per_request_tapper_, onRequestBody(_)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(request_body, false)); Http::TestRequestTrailerMapImpl request_trailers; @@ -113,7 +113,7 @@ TEST_F(TapFilterTest, Config) { filter_->encode100ContinueHeaders(response_headers)); EXPECT_CALL(*http_per_request_tapper_, onResponseHeaders(_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); - Buffer::OwnedImpl response_body; + Buffer::OwnedImpl response_body("hello"); EXPECT_CALL(*http_per_request_tapper_, onResponseBody(_)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, false)); Http::TestResponseTrailerMapImpl response_trailers; diff --git a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc index ddf53b8258a5..c8bcefbf623d 100644 --- a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc +++ b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc @@ -7,6 +7,7 @@ #include "test/test_common/simulated_time_system.h" using testing::_; +using testing::ByMove; using testing::InSequence; using testing::Invoke; using testing::Return; @@ -52,7 +53,7 @@ class PerSocketTapperImplTest : public testing::Test { ON_CALL(connection_, id()).WillByDefault(Return(1)); EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_)); EXPECT_CALL(*config_, createMatchStatusVector()) - .WillOnce(Return(TapCommon::Matcher::MatchStatusVector(1))); + .WillOnce(Return(ByMove(TapCommon::Matcher::MatchStatusVector(1)))); EXPECT_CALL(*config_, rootMatcher()).WillRepeatedly(ReturnRef(matcher_)); EXPECT_CALL(matcher_, onNewStream(_)) .WillOnce(Invoke([this](TapCommon::Matcher::MatchStatusVector& statuses) { diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index 443ae80595af..0281af125fb9 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -132,9 +132,9 @@ TEST_P(AdminInstanceTest, EscapeHelpTextWithPunctuation) { EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response)); const Http::HeaderString& content_type = header_map.ContentType()->value(); EXPECT_THAT(std::string(content_type.getStringView()), testing::HasSubstr("text/html")); - EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0)); + EXPECT_EQ(-1, response.search(planets.data(), planets.size(), 0, 0)); const std::string escaped_planets = "jupiter>saturn>mars"; - EXPECT_NE(-1, response.search(escaped_planets.data(), escaped_planets.size(), 0)); + EXPECT_NE(-1, response.search(escaped_planets.data(), escaped_planets.size(), 0, 0)); } TEST_P(AdminInstanceTest, HelpUsesFormForMutations) { @@ -143,8 +143,8 @@ TEST_P(AdminInstanceTest, HelpUsesFormForMutations) { EXPECT_EQ(Http::Code::OK, getCallback("/", header_map, response)); const std::string logging_action = " Date: Thu, 9 Jul 2020 12:13:30 -0400 Subject: [PATCH 571/909] HCM: Inherit connection level FilterState (#11787) Ensures that the FilterState data set on the connection level FilterState object propagate to the HCM FilterState. This allows data to be set on the connection FilterState to be made visible through the HCM FilterState, for example during access logging. Signed-off-by: Snow Pettersen --- docs/root/version_history/current.rst | 2 ++ source/common/http/conn_manager_impl.cc | 6 +++++- source/common/http/conn_manager_impl.h | 4 ---- source/common/stream_info/filter_state_impl.h | 2 +- source/common/stream_info/stream_info_impl.h | 18 +++++++++--------- source/server/connection_handler_impl.cc | 3 ++- test/common/http/conn_manager_impl_test.cc | 14 ++++++++++++++ 7 files changed, 33 insertions(+), 16 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 4735d14dff02..69573ce36aa0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,8 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might + see a change in behavior. * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. Bug Fixes diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index b3e436a8978c..f8419a9d6b05 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -534,7 +534,8 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect request_response_timespan_(new Stats::HistogramCompletableTimespanImpl( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSource(), - connection_manager.filterState()), + connection_manager_.read_callbacks_->connection().streamInfo().filterState(), + StreamInfo::FilterState::LifeSpan::Connection), upstream_options_(std::make_shared()) { ASSERT(!connection_manager.config_.isRoutable() || ((connection_manager.config_.routeConfigProvider() == nullptr && @@ -2472,6 +2473,9 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // We don't need to copy over the old parent FilterState from the old StreamInfo if it did not // store any objects with a LifeSpan at or above DownstreamRequest. This is to avoid unnecessary // heap allocation. + // TODO(snowp): In the case where connection level filter state has been set on the connection + // FilterState that we inherit, we'll end up copying this every time even though we could get + // away with just resetting it to the HCM filter_state_. if (parent_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( StreamInfo::FilterState::LifeSpan::Request)) { (*parent_.connection_manager_.streams_.begin())->stream_info_.filter_state_ = diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index f51240bbc149..81647bda934f 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -102,9 +102,6 @@ class ConnectionManagerImpl : Logger::Loggable, TimeSource& timeSource() { return time_source_; } - // Return a reference to the shared_ptr so that it can be lazy created on demand. - std::shared_ptr& filterState() { return filter_state_; } - private: struct ActiveStream; @@ -803,7 +800,6 @@ class ConnectionManagerImpl : Logger::Loggable, const Server::OverloadActionState& overload_stop_accepting_requests_ref_; const Server::OverloadActionState& overload_disable_keepalive_ref_; TimeSource& time_source_; - std::shared_ptr filter_state_; }; } // namespace Http diff --git a/source/common/stream_info/filter_state_impl.h b/source/common/stream_info/filter_state_impl.h index 793938e29ead..319026e959ac 100644 --- a/source/common/stream_info/filter_state_impl.h +++ b/source/common/stream_info/filter_state_impl.h @@ -27,7 +27,7 @@ class FilterStateImpl : public FilterState { maybeCreateParent(ParentAccessMode::ReadOnly); } - using LazyCreateAncestor = std::pair; + using LazyCreateAncestor = std::pair; /** * @param ancestor a std::pair storing an ancestor, that can be passed in as a way to lazy * initialize a FilterState that's owned by an object with bigger scope than this. This is to diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index bf8af8d73b5b..a384cd401cf3 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -18,21 +18,21 @@ namespace Envoy { namespace StreamInfo { struct StreamInfoImpl : public StreamInfo { - StreamInfoImpl(TimeSource& time_source) - : StreamInfoImpl(absl::nullopt, time_source, - std::make_shared(FilterState::LifeSpan::FilterChain)) {} + StreamInfoImpl(TimeSource& time_source, + FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) + : StreamInfoImpl(absl::nullopt, time_source, std::make_shared(life_span)) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source) : StreamInfoImpl(protocol, time_source, std::make_shared(FilterState::LifeSpan::FilterChain)) {} StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, - FilterStateSharedPtr& parent_filter_state) - : StreamInfoImpl(protocol, time_source, - std::make_shared( - FilterStateImpl::LazyCreateAncestor(parent_filter_state, - FilterState::LifeSpan::Connection), - FilterState::LifeSpan::FilterChain)) {} + FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span) + : StreamInfoImpl( + protocol, time_source, + std::make_shared( + FilterStateImpl::LazyCreateAncestor(std::move(parent_filter_state), life_span), + FilterState::LifeSpan::FilterChain)) {} SystemTime startTime() const override { return start_time_; } diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 24b694f82659..3d2a38928dad 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -378,7 +378,8 @@ void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_in void ConnectionHandlerImpl::ActiveTcpListener::newConnection( Network::ConnectionSocketPtr&& socket, const envoy::config::core::v3::Metadata& dynamic_metadata) { - auto stream_info = std::make_unique(parent_.dispatcher_.timeSource()); + auto stream_info = std::make_unique( + parent_.dispatcher_.timeSource(), StreamInfo::FilterState::LifeSpan::Connection); stream_info->setDownstreamLocalAddress(socket->localAddress()); stream_info->setDownstreamRemoteAddress(socket->remoteAddress()); stream_info->setDownstreamDirectRemoteAddress(socket->directRemoteAddress()); diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index c828c603fe12..2aab9bc4ff0d 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -6111,6 +6111,10 @@ class SimpleType : public StreamInfo::FilterState::Object { } // namespace TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { + filter_callbacks_.connection_.stream_info_.filter_state_->setData( + "connection_provided_data", std::make_shared(555), + StreamInfo::FilterState::StateType::ReadOnly); + setup(false, "envoy-custom-server", false); setupFilterChain(1, 0, /* num_requests = */ 3); @@ -6153,6 +6157,9 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { EXPECT_TRUE( decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(*decoder_filters_[2], decodeHeaders(_, true)) @@ -6166,6 +6173,9 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { EXPECT_TRUE( decoder_filters_[2]->callbacks_->streamInfo().filterState()->hasData( "per_downstream_connection")); + EXPECT_TRUE( + decoder_filters_[1]->callbacks_->streamInfo().filterState()->hasData( + "connection_provided_data")); return FilterHeadersStatus::StopIteration; })); } @@ -6179,6 +6189,10 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { conn_manager_->onData(fake_input, false); decoder_filters_[0]->callbacks_->recreateStream(); conn_manager_->onData(fake_input, false); + + // The connection life time data should have been written to the connection filter state. + EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData( + "per_downstream_connection")); } class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { From e6da9f158cd43510182726d267812a3bb69daefb Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 9 Jul 2020 09:24:33 -0700 Subject: [PATCH 572/909] build: optimize builds in RBE (#11966) - schedule wee8 build in larger machines with size=large label - luajit/moonjit make with `-j` - delete empty globs (partly addresses #9492) Signed-off-by: Lizan Zhou --- bazel/dependency_imports.bzl | 8 ++++++++ bazel/external/fmtlib.BUILD | 3 --- bazel/external/spdlog.BUILD | 1 - bazel/external/wee8.BUILD | 2 ++ bazel/foreign_cc/luajit.patch | 4 ++-- bazel/foreign_cc/moonjit.patch | 4 ++-- configs/BUILD | 1 - 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 1bcc3a8f35e5..38be774dccd5 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -1,6 +1,7 @@ load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config") +load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") @@ -19,6 +20,13 @@ def envoy_dependency_imports(go_version = GO_VERSION): apple_rules_dependencies() upb_bazel_version_repository(name = "upb_bazel_version") + custom_exec_properties( + name = "envoy_large_machine_exec_property", + constants = { + "LARGE_MACHINE": create_rbe_exec_properties_dict(labels = dict(size = "large")), + }, + ) + go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", diff --git a/bazel/external/fmtlib.BUILD b/bazel/external/fmtlib.BUILD index c4d97a2c9e69..da85ce22ad8f 100644 --- a/bazel/external/fmtlib.BUILD +++ b/bazel/external/fmtlib.BUILD @@ -4,9 +4,6 @@ licenses(["notice"]) # Apache 2 cc_library( name = "fmtlib", - srcs = glob([ - "fmt/*.cc", - ]), hdrs = glob([ "include/fmt/*.h", ]), diff --git a/bazel/external/spdlog.BUILD b/bazel/external/spdlog.BUILD index 4be48da95173..41080ccda63f 100644 --- a/bazel/external/spdlog.BUILD +++ b/bazel/external/spdlog.BUILD @@ -5,7 +5,6 @@ licenses(["notice"]) # Apache 2 cc_library( name = "spdlog", hdrs = glob([ - "include/**/*.cc", "include/**/*.h", ]), defines = ["SPDLOG_FMT_EXTERNAL"], diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index 341e1ad66c07..b61f95748672 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -1,4 +1,5 @@ load("@rules_cc//cc:defs.bzl", "cc_library") +load("@envoy_large_machine_exec_property//:constants.bzl", "LARGE_MACHINE") load(":genrule_cmd.bzl", "genrule_cmd") licenses(["notice"]) # Apache 2 @@ -26,4 +27,5 @@ genrule( "libwee8.a", ], cmd = genrule_cmd("@envoy//bazel/external:wee8.genrule_cmd"), + exec_properties = LARGE_MACHINE, ) diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index 296d66c85e52..035aa61094e2 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -58,7 +58,7 @@ index 0000000..9c71271 --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ -+#!/usr/bin/env python ++#!/usr/bin/env python3 + +import argparse +import os @@ -93,7 +93,7 @@ index 0000000..9c71271 + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + -+ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index 8bb54f01d803..b4593afdf111 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -49,7 +49,7 @@ index 0000000..9c71271 --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ -+#!/usr/bin/env python ++#!/usr/bin/env python3 + +import argparse +import os @@ -84,7 +84,7 @@ index 0000000..9c71271 + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + -+ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) diff --git a/configs/BUILD b/configs/BUILD index f13fcb170f3a..7240a5cb3490 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -13,7 +13,6 @@ envoy_py_test_binary( srcs = ["configgen.py"], data = glob([ "*.yaml", - "*.json", ]), external_deps = ["jinja2"], ) From 36c3b10f7823b766e87b0703948f2c445c14b01c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 9 Jul 2020 12:36:51 -0400 Subject: [PATCH 573/909] http: removing envoy.reloadable_features.reject_unsupported_transfer_encodings (#11957) Removing envoy.reloadable_features.reject_unsupported_transfer_encodings and associated code path. Risk Level: low Testing: n/a Docs Changes: n/a Release Notes: yes Fixes #11935 Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + source/common/http/http1/codec_impl.cc | 5 +---- source/common/http/http1/codec_impl.h | 1 - source/common/runtime/runtime_features.cc | 1 - 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 69573ce36aa0..0bfc5674f112 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -22,6 +22,7 @@ Removed Config or Runtime *Normally occurs at the end of the* :ref:`deprecation period ` * http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. +* http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`. New Features ------------ diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index d622fe69016e..6085d6a6aea8 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -458,8 +458,6 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat connection_header_sanitization_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.connection_header_sanitization")), enable_trailers_(enable_trailers), - reject_unsupported_transfer_encodings_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.reject_unsupported_transfer_encodings")), strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.strict_1xx_and_204_response_headers")), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, @@ -689,8 +687,7 @@ int ConnectionImpl::onHeadersCompleteBase() { // CONNECT request has no defined semantics, and may be rejected. if (request_or_response_headers.TransferEncoding()) { const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); - if ((reject_unsupported_transfer_encodings_ && - !absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked)) || + if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || parser_.method == HTTP_CONNECT) { error_code_ = Http::Code::NotImplemented; sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 994873701f85..f21231d71fa5 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -255,7 +255,6 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable Date: Thu, 9 Jul 2020 10:02:54 -0700 Subject: [PATCH 574/909] Validating config in docs (#11394) Signed-off-by: Dmitri Dolguikh --- api/CONTRIBUTING.md | 6 ++ docs/README.md | 8 ++- docs/_ext/validating_code_block.py | 62 +++++++++++++++++++ docs/build.sh | 8 ++- docs/conf.py | 8 ++- .../root/configuration/operations/runtime.rst | 3 +- docs/root/configuration/overview/examples.rst | 29 +++++---- tools/config_validation/validate_fragment.py | 22 ++++++- 8 files changed, 126 insertions(+), 20 deletions(-) create mode 100644 docs/_ext/validating_code_block.py diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md index dc77573c683b..773248f2e2ea 100644 --- a/api/CONTRIBUTING.md +++ b/api/CONTRIBUTING.md @@ -26,6 +26,12 @@ The documentation can be built locally in the root of https://github.com/envoypr docs/build.sh ``` +To skip configuration examples validation: + +``` +SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh +``` + Or to use a hermetic Docker container: ``` diff --git a/docs/README.md b/docs/README.md index 119596fec980..b672f51c8a4f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,13 @@ ./docs/build.sh ``` -The output can be found in `generated/docs`. +The output can be found in `generated/docs`. By default configuration examples are going to be validated during build. +To disable validation, set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`: + +```bash +SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh +``` + # How the Envoy website and docs are updated diff --git a/docs/_ext/validating_code_block.py b/docs/_ext/validating_code_block.py new file mode 100644 index 000000000000..6220ae98618b --- /dev/null +++ b/docs/_ext/validating_code_block.py @@ -0,0 +1,62 @@ +from typing import List +from docutils import nodes +from docutils.parsers.rst import Directive +from docutils.parsers.rst import directives +from sphinx.application import Sphinx +from sphinx.util.docutils import SphinxDirective +from sphinx.directives.code import CodeBlock +from sphinx.errors import ExtensionError + +import os +import subprocess + + +class ValidatingCodeBlock(CodeBlock): + """A directive that provides protobuf yaml formatting and validation. + + 'type-name' option is required and expected to conain full Envoy API type. + An ExtensionError is raised on validation failure. + Validation will be skipped if SPHINX_SKIP_CONFIG_VALIDATION environment variable is set. + """ + has_content = True + required_arguments = CodeBlock.required_arguments + optional_arguments = CodeBlock.optional_arguments + final_argument_whitespace = CodeBlock.final_argument_whitespace + option_spec = { + 'type-name': directives.unchanged, + } + option_spec.update(CodeBlock.option_spec) + skip_validation = (os.getenv('SPHINX_SKIP_CONFIG_VALIDATION') or 'false').lower() == 'true' + + def run(self): + source, line = self.state_machine.get_source_and_line(self.lineno) + # built-in directives.unchanged_required option validator produces a confusing error message + if self.options.get('type-name') == None: + raise ExtensionError("Expected type name in: {0} line: {1}".format(source, line)) + + if not ValidatingCodeBlock.skip_validation: + args = [ + 'bazel-bin/tools/config_validation/validate_fragment', + self.options.get('type-name'), '-s', '\n'.join(self.content) + ] + completed = subprocess.run(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding='utf-8') + if completed.returncode != 0: + raise ExtensionError( + "Failed config validation for type: '{0}' in: {1} line: {2}:\n {3}".format( + self.options.get('type-name'), source, line, completed.stderr)) + + self.options.pop('type-name', None) + return list(CodeBlock.run(self)) + + +def setup(app): + app.add_directive("validated-code-block", ValidatingCodeBlock) + + return { + 'version': '0.1', + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } diff --git a/docs/build.sh b/docs/build.sh index bc0c302414a1..9ca1bec440eb 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# set SPHINX_SKIP_CONFIG_VALIDATION environment variable to true to skip +# validation of configuration examples + . tools/shell_utils.sh set -e @@ -126,6 +129,9 @@ cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configu rsync -rav $API_DIR/diagrams "${GENERATED_RST_DIR}/api-docs" -rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" +rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${SCRIPT_DIR}"/_ext "${GENERATED_RST_DIR}" + +# To speed up validate_fragment invocations in validating_code_block +bazel build ${BAZEL_BUILD_OPTIONS} //tools/config_validation:validate_fragment sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/conf.py b/docs/conf.py index a2f4d250d939..1eb5725b689b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -67,7 +67,13 @@ def setup(app): # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] + +sys.path.append(os.path.abspath("./_ext")) + +extensions = [ + 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', + 'validating_code_block' +] extlinks = { 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), 'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''), diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 45b1f7263480..2e72e52bb953 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -24,7 +24,8 @@ runtime ` bootstrap configu layering. Runtime settings in later layers override earlier layers. A typical configuration might be: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.LayeredRuntime layers: - name: static_layer_0 diff --git a/docs/root/configuration/overview/examples.rst b/docs/root/configuration/overview/examples.rst index a4758cb15104..bc8124c48882 100644 --- a/docs/root/configuration/overview/examples.rst +++ b/docs/root/configuration/overview/examples.rst @@ -9,7 +9,8 @@ Static A minimal fully static bootstrap config is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -61,7 +62,8 @@ discovery ` via an :ref:`EDS` gRPC management server listening on 127.0.0.1:5678 is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -100,8 +102,8 @@ on 127.0.0.1:5678 is provided below: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster - name: xds_cluster connect_timeout: 0.25s type: STATIC @@ -159,7 +161,8 @@ A fully dynamic bootstrap configuration, in which all resources other than those belonging to the management server are discovered via xDS is provided below: -.. code-block:: yaml +.. validated-code-block:: yaml + :type-name: envoy.config.bootstrap.v3.Bootstrap admin: access_log_path: /tmp/admin_access.log @@ -171,14 +174,14 @@ below: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster cds_config: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster static_resources: clusters: @@ -226,8 +229,8 @@ The management server could respond to LDS requests with: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster http_filters: - name: envoy.filters.http.router @@ -262,8 +265,8 @@ The management server could respond to CDS requests with: api_config_source: api_type: GRPC grpc_services: - envoy_grpc: - cluster_name: xds_cluster + - envoy_grpc: + cluster_name: xds_cluster The management server could respond to EDS requests with: diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py index 403b5540418f..faa9951114a8 100644 --- a/tools/config_validation/validate_fragment.py +++ b/tools/config_validation/validate_fragment.py @@ -19,6 +19,8 @@ from bazel_tools.tools.python.runfiles import runfiles +import argparse + def ValidateFragment(type_name, fragment): """Validate a dictionary representing a JSON/YAML fragment against an Envoy API proto3 type. @@ -50,7 +52,21 @@ def ValidateFragment(type_name, fragment): json_format.Parse(json_fragment, msg, descriptor_pool=pool) +def ParseArgs(): + parser = argparse.ArgumentParser( + description='Validate a YAML fragment against an Envoy API proto3 type.') + parser.add_argument( + 'message_type', + help='a string providing the type name, e.g. envoy.config.bootstrap.v3.Bootstrap.') + parser.add_argument('fragment_path', nargs='?', help='Path to a YAML configuration fragment.') + parser.add_argument('-s', required=False, help='YAML configuration fragment.') + + return parser.parse_args() + + if __name__ == '__main__': - type_name, yaml_path = sys.argv[1:] - ValidateFragment(type_name, yaml.load(pathlib.Path(yaml_path).read_text(), - Loader=yaml.FullLoader)) + parsed_args = ParseArgs() + message_type = parsed_args.message_type + content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path( + parsed_args.fragment_path).read_text() + ValidateFragment(message_type, yaml.load(content, Loader=yaml.FullLoader)) From e5366cc3d87c0a6d9e8b112b7f01abbcb6241b8f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 9 Jul 2020 13:12:23 -0400 Subject: [PATCH 575/909] test: removing more infinite timeouts (#11978) Removing infinite timeouts from tcp client wait-for-n-bytes and tcp client wait for disconnect Signed-off-by: Alyssa Wilk --- test/integration/http2_integration_test.cc | 4 ++-- test/integration/integration.cc | 16 +++++++++----- test/integration/integration.h | 3 ++- .../integration/tcp_proxy_integration_test.cc | 6 ++--- .../tcp_tunneling_integration_test.cc | 4 ++-- test/integration/utility.h | 22 ++++++++++++++++++- 6 files changed, 40 insertions(+), 15 deletions(-) diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 50ec78b68787..5ea7a7c2c734 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1533,12 +1533,12 @@ void Http2FloodMitigationTest::beginSession() { Http2Frame Http2FloodMitigationTest::readFrame() { Http2Frame frame; - tcp_client_->waitForData(frame.HeaderSize); + EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); frame.setHeader(tcp_client_->data()); tcp_client_->clearData(frame.HeaderSize); auto len = frame.payloadSize(); if (len) { - tcp_client_->waitForData(len); + EXPECT_TRUE(tcp_client_->waitForData(len)); frame.setPayload(tcp_client_->data()); tcp_client_->clearData(len); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 6079451ac710..a738a95b0fa4 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -189,24 +189,28 @@ void IntegrationTcpClient::waitForData(const std::string& data, bool exact_match connection_->dispatcher().run(Event::Dispatcher::RunType::Block); } -void IntegrationTcpClient::waitForData(size_t length) { +AssertionResult IntegrationTcpClient::waitForData(size_t length, + std::chrono::milliseconds timeout) { if (payload_reader_->data().size() >= length) { - return; + return AssertionSuccess(); } - payload_reader_->setLengthToWaitFor(length); - connection_->dispatcher().run(Event::Dispatcher::RunType::Block); + return payload_reader_->waitForLength(length, timeout); } void IntegrationTcpClient::waitForDisconnect(bool ignore_spurious_events) { + Event::TimerPtr timeout_timer = + connection_->dispatcher().createTimer([this]() -> void { connection_->dispatcher().exit(); }); + timeout_timer->enableTimer(TestUtility::DefaultTimeout); + if (ignore_spurious_events) { - while (!disconnected_) { + while (!disconnected_ && timeout_timer->enabled()) { connection_->dispatcher().run(Event::Dispatcher::RunType::Block); } } else { connection_->dispatcher().run(Event::Dispatcher::RunType::Block); - EXPECT_TRUE(disconnected_); } + EXPECT_TRUE(disconnected_); } void IntegrationTcpClient::waitForHalfClose() { diff --git a/test/integration/integration.h b/test/integration/integration.h index c68efaf0963e..dbf30d912bb4 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -104,7 +104,8 @@ class IntegrationTcpClient { void close(); void waitForData(const std::string& data, bool exact_match = true); // wait for at least `length` bytes to be received - void waitForData(size_t length); + ABSL_MUST_USE_RESULT AssertionResult + waitForData(size_t length, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); void waitForDisconnect(bool ignore_spurious_events = false); void waitForHalfClose(); void readDisable(bool disabled); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index ad50cf255793..bac7260a9e7e 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -70,13 +70,13 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { tcp_client->waitForData("ello", false); // Make sure length based wait works for the data already received - tcp_client->waitForData(5); - tcp_client->waitForData(4); + ASSERT_TRUE(tcp_client->waitForData(5)); + ASSERT_TRUE(tcp_client->waitForData(4)); // Drain part of the received message tcp_client->clearData(2); tcp_client->waitForData("llo"); - tcp_client->waitForData(3); + ASSERT_TRUE(tcp_client->waitForData(3)); ASSERT_TRUE(tcp_client->write("hello")); ASSERT_TRUE(fake_upstream_connection->waitForData(5)); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 01931f131ebd..83e5be19b533 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -302,7 +302,7 @@ TEST_P(TcpTunnelingIntegrationTest, Basic) { // Send data from upstream to downstream. upstream_request_->encodeData(12, false); - tcp_client->waitForData(12); + ASSERT_TRUE(tcp_client->waitForData(12)); // Now send more data and close the TCP client. This should be treated as half close, so the data // should go through. @@ -370,7 +370,7 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData(12, true); - tcp_client->waitForData(12); + ASSERT_TRUE(tcp_client->waitForData(12)); tcp_client->waitForHalfClose(); // Attempt to send data upstream. diff --git a/test/integration/utility.h b/test/integration/utility.h index 21235c2e2b42..6ff69ad27a83 100644 --- a/test/integration/utility.h +++ b/test/integration/utility.h @@ -18,6 +18,8 @@ #include "test/test_common/printers.h" #include "test/test_common/test_time.h" +#include "gtest/gtest.h" + namespace Envoy { /** * A buffering response decoder used for testing. @@ -197,11 +199,29 @@ class WaitForPayloadReader : public Network::ReadFilterBaseImpl { data_to_wait_for_ = data; exact_match_ = exact_match; } - void setLengthToWaitFor(size_t length) { + + ABSL_MUST_USE_RESULT testing::AssertionResult waitForLength(size_t length, + std::chrono::milliseconds timeout) { ASSERT(!wait_for_length_); length_to_wait_for_ = length; wait_for_length_ = true; + + Event::TimerPtr timeout_timer = + dispatcher_.createTimer([this]() -> void { dispatcher_.exit(); }); + timeout_timer->enableTimer(timeout); + + dispatcher_.run(Event::Dispatcher::RunType::Block); + + if (timeout_timer->enabled()) { + timeout_timer->disableTimer(); + return testing::AssertionSuccess(); + } + + length_to_wait_for_ = 0; + wait_for_length_ = false; + return testing::AssertionFailure() << "Timed out waiting for " << length << " bytes of data\n"; } + const std::string& data() { return data_; } bool readLastByte() { return read_end_stream_; } void clearData(size_t count = std::string::npos) { data_.erase(0, count); } From d67246d199526a73b1e3448850325dd4b12d1d78 Mon Sep 17 00:00:00 2001 From: ankatare Date: Thu, 9 Jul 2020 22:54:11 +0530 Subject: [PATCH 576/909] v2 to v3 fragment changes for parseClusterFromV2Yaml (#11894) v2 to v3 fragment changes for parseClusterFromV2Yaml Risk Level: low Testing: unit, format and integration test Docs Changes: NA Release Notes: Part of #10843 Signed-off-by: Abhay Narayan Katare --- .../upstream/cluster_factory_impl_test.cc | 15 +- .../upstream/cluster_manager_impl_test.cc | 4 +- test/common/upstream/eds_speed_test.cc | 2 +- test/common/upstream/eds_test.cc | 14 +- .../upstream/logical_dns_cluster_test.cc | 93 +++-- .../upstream/original_dst_cluster_test.cc | 26 +- test/common/upstream/upstream_impl_test.cc | 388 ++++++++++++++---- test/common/upstream/utility.h | 5 +- .../clusters/aggregate/cluster_test.cc | 2 +- .../dynamic_forward_proxy/cluster_test.cc | 10 +- .../clusters/redis/redis_cluster_test.cc | 82 ++-- test/test_common/utility.h | 1 - 12 files changed, 449 insertions(+), 193 deletions(-) diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index 084d5b7a4b96..8dc52cc0bd90 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -81,7 +81,6 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: - cluster_name: staticcluster endpoints: - lb_endpoints: - endpoint: @@ -96,7 +95,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { TestStaticClusterFactory factory; Registry::InjectFactory registered_factory(factory); - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -124,7 +123,6 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: - cluster_name: staticcluster endpoints: - lb_endpoints: - endpoint: @@ -142,7 +140,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { port_value: 80 )EOF"; - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -169,7 +167,6 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: - cluster_name: staticcluster endpoints: - lb_endpoints: - endpoint: @@ -186,7 +183,7 @@ TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { port_value: 80 )EOF"; - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); auto create_result = ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -213,7 +210,6 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: - cluster_name: staticcluster endpoints: - lb_endpoints: - endpoint: @@ -230,7 +226,7 @@ TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { // the factory is not registered, expect to throw EXPECT_THROW_WITH_MESSAGE( { - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, @@ -250,7 +246,6 @@ TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { consistent_hashing_lb_config: use_hostname_for_hashing: true load_assignment: - cluster_name: staticcluster endpoints: - lb_endpoints: - endpoint: @@ -264,7 +259,7 @@ TEST_F(TestStaticClusterImplTest, HostnameWithoutDNS) { EXPECT_THROW_WITH_MESSAGE( { - const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + const envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); ClusterFactoryImplBase::create( cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 386031b45516..37a078501974 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -2665,7 +2665,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { common_lb_config: update_merge_window: 3s )EOF"; - EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(parseClusterFromV2Yaml(yaml), "version1")); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml), "version1")); Cluster& cluster = cluster_manager_->activeClusters().find("new_cluster")->second; HostVectorSharedPtr hosts( @@ -2734,7 +2734,7 @@ TEST_F(ClusterManagerImplTest, MergedUpdatesDestroyedOnUpdate) { .gauge("cluster_manager.warming_clusters", Stats::Gauge::ImportMode::NeverImport) .value()); EXPECT_TRUE( - cluster_manager_->addOrUpdateCluster(parseClusterFromV2Yaml(yaml_updated), "version2")); + cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(yaml_updated), "version2")); EXPECT_EQ(2, factory_.stats_ .gauge("cluster_manager.active_clusters", Stats::Gauge::ImportMode::NeverImport) .value()); diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index b1ede181e491..a03af737483e 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -48,7 +48,7 @@ class EdsSpeedTest { void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); - eds_cluster_ = parseClusterFromV2Yaml(yaml_config); + eds_cluster_ = parseClusterFromV3Yaml(yaml_config); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index ef7f38002993..f08269380065 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -61,7 +61,7 @@ class EdsTest : public testing::Test { connect_timeout: 0.25s type: EDS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true + ignore_health_on_host_removal: true eds_cluster_config: service_name: fare eds_config: @@ -89,7 +89,7 @@ class EdsTest : public testing::Test { void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); - eds_cluster_ = parseClusterFromV2Yaml(yaml_config); + eds_cluster_ = parseClusterFromV3Yaml(yaml_config); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", eds_cluster_.alt_stat_name().empty() ? eds_cluster_.name() : eds_cluster_.alt_stat_name())); @@ -138,8 +138,8 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { // Build the initial cluster with some endpoints. void initializeCluster(const std::vector endpoint_ports, - const bool drain_connections_on_host_removal) { - resetCluster(drain_connections_on_host_removal); + const bool ignore_health_on_host_removal) { + resetCluster(ignore_health_on_host_removal); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); @@ -173,13 +173,13 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { } } - void resetCluster(const bool drain_connections_on_host_removal) { + void resetCluster(const bool ignore_health_on_host_removal) { const std::string config = R"EOF( name: name connect_timeout: 0.25s type: EDS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: {} + ignore_health_on_host_removal: {} eds_cluster_config: service_name: fare eds_config: @@ -189,7 +189,7 @@ class EdsWithHealthCheckUpdateTest : public EdsTest { - eds refresh_delay: 1s )EOF"; - EdsTest::resetCluster(fmt::format(config, drain_connections_on_host_removal), + EdsTest::resetCluster(fmt::format(config, ignore_health_on_host_removal), Cluster::InitializePhase::Secondary); } diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index a5158e3e3f59..df0aef7ca10e 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -43,10 +43,11 @@ class LogicalDnsClusterTest : public testing::Test { protected: LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_)) {} - void setupFromV2Yaml(const std::string& yaml) { + void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { resolve_timer_ = new Event::MockTimer(&dispatcher_); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = + parseClusterFromV3Yaml(yaml, avoid_boosting); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -75,7 +76,7 @@ class LogicalDnsClusterTest : public testing::Test { void testBasicSetup(const std::string& config, const std::string& expected_address, uint32_t expected_port, uint32_t expected_hc_port) { expectResolve(Network::DnsLookupFamily::V4Only, expected_address); - setupFromV2Yaml(config); + setupFromV3Yaml(config); EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(initialized_, ready()); @@ -264,10 +265,14 @@ TEST_P(LogicalDnsParamTest, ImmediateResolve) { lb_policy: round_robin )EOF" + std::get<0>(GetParam()) + R"EOF( - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; EXPECT_CALL(membership_updated_, ready()); @@ -280,7 +285,7 @@ TEST_P(LogicalDnsParamTest, ImmediateResolve) { TestUtility::makeDnsResponse(std::get<2>(GetParam()))); return nullptr; })); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); EXPECT_EQ("foo.bar.com", @@ -302,14 +307,18 @@ TEST_F(LogicalDnsParamTest, FailureRefreshRateBackoffResetsWhenSuccessHappens) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; expectResolve(Network::DnsLookupFamily::V4Only, "foo.bar.com"); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); // Failing response kicks the failure refresh backoff strategy. ON_CALL(random_, random()).WillByDefault(Return(8000)); @@ -342,14 +351,18 @@ TEST_F(LogicalDnsParamTest, TtlAsDnsRefreshRate) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; expectResolve(Network::DnsLookupFamily::V4Only, "foo.bar.com"); - setupFromV2Yaml(yaml); + setupFromV3Yaml(yaml); // TTL is recorded when the DNS response is successful and not empty EXPECT_CALL(membership_updated_, ready()); @@ -378,17 +391,25 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { dns_refresh_rate: 4s connect_timeout: 0.25s lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 - - socket_address: - address: foo2.bar.com - port_value: 443 + load_assignment: + cluster_name: name + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + - endpoint: + address: + socket_address: + address: foo2.bar.com + port_value: 443 )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFromV2Yaml(multiple_hosts_yaml), EnvoyException, - "LOGICAL_DNS clusters must have a single host"); + EXPECT_THROW_WITH_MESSAGE( + setupFromV3Yaml(multiple_hosts_yaml), EnvoyException, + "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string multiple_lb_endpoints_yaml = R"EOF( name: name @@ -418,7 +439,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromV2Yaml(multiple_lb_endpoints_yaml), EnvoyException, + setupFromV3Yaml(multiple_lb_endpoints_yaml), EnvoyException, "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string multiple_endpoints_yaml = R"EOF( @@ -451,7 +472,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromV2Yaml(multiple_endpoints_yaml), EnvoyException, + setupFromV3Yaml(multiple_endpoints_yaml), EnvoyException, "LOGICAL_DNS clusters must have a single locality_lb_endpoint and a single lb_endpoint"); const std::string custom_resolver_yaml = R"EOF( @@ -475,7 +496,7 @@ TEST_F(LogicalDnsClusterTest, BadConfig) { port_value: 8000 )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFromV2Yaml(custom_resolver_yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(setupFromV3Yaml(custom_resolver_yaml), EnvoyException, "LOGICAL_DNS clusters must NOT have a custom resolver name set"); } @@ -492,10 +513,14 @@ TEST_F(LogicalDnsClusterTest, Basic) { # Since the following expectResolve() requires Network::DnsLookupFamily::V4Only we need to set # dns_lookup_family to V4_ONLY explicitly for v2 .yaml config. dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; const std::string basic_yaml_load_assignment = R"EOF( diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 4f25099ba615..80bb3e2c8910 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -70,7 +70,9 @@ class OriginalDstClusterTest : public testing::Test { : cleanup_timer_(new Event::MockTimer(&dispatcher_)), api_(Api::createApiForTest(stats_store_)) {} - void setupFromYaml(const std::string& yaml) { setup(parseClusterFromV2Yaml(yaml)); } + void setupFromYaml(const std::string& yaml, bool avoid_boosting = true) { + setup(parseClusterFromV3Yaml(yaml, avoid_boosting)); + } void setup(const envoy::config::cluster::v3::Cluster& cluster_config) { NiceMock cm; @@ -115,7 +117,7 @@ TEST(OriginalDstClusterConfigTest, GoodConfig) { cleanup_interval: 1s )EOF"; // Help Emacs balance quotation marks: " - EXPECT_TRUE(parseClusterFromV2Yaml(yaml).has_cleanup_interval()); + EXPECT_TRUE(parseClusterFromV3Yaml(yaml).has_cleanup_interval()); } TEST_F(OriginalDstClusterTest, BadConfigWithLoadAssignment) { @@ -123,7 +125,7 @@ TEST_F(OriginalDstClusterTest, BadConfigWithLoadAssignment) { name: name connect_timeout: 0.25s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED cleanup_interval: 1s load_assignment: cluster_name: name @@ -155,7 +157,7 @@ TEST_F(OriginalDstClusterTest, BadConfigWithDeprecatedHosts) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - setupFromYaml(yaml), EnvoyException, + setupFromYaml(yaml, false), EnvoyException, "ORIGINAL_DST clusters must have no load assignment or hosts configured"); } @@ -164,7 +166,7 @@ TEST_F(OriginalDstClusterTest, CleanupInterval) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED cleanup_interval: 1s )EOF"; // Help Emacs balance quotation marks: " @@ -182,7 +184,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { name: name, connect_timeout: 0.125s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -240,7 +242,7 @@ TEST_F(OriginalDstClusterTest, Membership) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -331,7 +333,7 @@ TEST_F(OriginalDstClusterTest, Membership2) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -419,7 +421,7 @@ TEST_F(OriginalDstClusterTest, Connection) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -459,7 +461,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); @@ -511,7 +513,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderEnabled) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED original_dst_lb_config: use_http_header: true )EOF"; @@ -584,7 +586,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) { name: name connect_timeout: 1.250s type: ORIGINAL_DST - lb_policy: ORIGINAL_DST_LB + lb_policy: CLUSTER_PROVIDED )EOF"; EXPECT_CALL(initialized_, ready()); diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 15aba3773fed..c433faedc3d5 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -161,10 +161,14 @@ TEST_P(StrictDnsParamTest, ImmediateResolve) { )EOF" + std::get<0>(GetParam()) + R"EOF( lb_policy: round_robin - hosts: - - socket_address: - address: foo.bar.com - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; EXPECT_CALL(initialized, ready()); EXPECT_CALL(*dns_resolver, resolve("foo.bar.com", std::get<1>(GetParam()), _)) @@ -174,7 +178,7 @@ TEST_P(StrictDnsParamTest, ImmediateResolve) { TestUtility::makeDnsResponse(std::get<2>(GetParam()))); return nullptr; })); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -208,7 +212,7 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsIsInializedImmediately) { - lb_endpoints: )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -232,11 +236,18 @@ TEST_F(StrictDnsClusterImplTest, ZeroHostsHealthChecker) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -292,12 +303,22 @@ TEST_F(StrictDnsClusterImplTest, Basic) { http_protocol_options: header_key_format: proper_case_words: {} - hosts: - - { socket_address: { address: localhost1, port_value: 11001 }} - - { socket_address: { address: localhost2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: localhost2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -437,12 +458,19 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalActiveHealthSkipped) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + ignore_health_on_host_removal: true + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -492,11 +520,18 @@ TEST_F(StrictDnsClusterImplTest, HostRemovalAfterHcFail) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; ResolverData resolver(*dns_resolver_, dispatcher_); - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -623,7 +658,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -858,7 +893,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasicMultiplePriorities) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -959,11 +994,19 @@ TEST_F(StrictDnsClusterImplTest, CustomResolverFails) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - drain_connections_on_host_removal: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443, resolver_name: customresolver }}] + ignore_health_on_host_removal: true + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + resolver_name: customresolver )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( @@ -988,10 +1031,17 @@ TEST_F(StrictDnsClusterImplTest, FailureRefreshRateBackoffResetsWhenSuccessHappe dns_failure_refresh_rate: base_interval: 7s max_interval: 10s - hosts: [{ socket_address: { address: localhost1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1030,10 +1080,17 @@ TEST_F(StrictDnsClusterImplTest, TtlAsDnsRefreshRate) { lb_policy: ROUND_ROBIN dns_refresh_rate: 4s respect_dns_ttl: true - hosts: [{ socket_address: { address: localhost1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1098,12 +1155,22 @@ TEST_F(StrictDnsClusterImplTest, Http2UserDefinedSettingsParametersValidation) { http_protocol_options: header_key_format: proper_case_words: {} - hosts: - - { socket_address: { address: localhost1, port_value: 11001 }} - - { socket_address: { address: localhost2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: localhost2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1236,13 +1303,17 @@ TEST_F(StaticClusterImplTest, InitialHosts) { connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: - - socket_address: - address: 10.0.0.1 - port_value: 443 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1277,7 +1348,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEmptyHostname) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1312,7 +1383,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostname) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1347,7 +1418,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentNonEmptyHostnameWithHealthChecks) { hostname: "foo2" )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1400,7 +1471,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentMultiplePriorities) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1445,7 +1516,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentLocality) { port_value: 8000 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1491,7 +1562,7 @@ TEST_F(StaticClusterImplTest, LoadAssignmentEdsHealth) { )EOF"; NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1513,10 +1584,17 @@ TEST_F(StaticClusterImplTest, AltStatName) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: 10.0.0.1, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1536,10 +1614,17 @@ TEST_F(StaticClusterImplTest, RingHash) { connect_timeout: 0.25s type: static lb_policy: ring_hash - hosts: [{ socket_address: { address: 10.0.0.1, port_value: 11001 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1560,12 +1645,22 @@ TEST_F(StaticClusterImplTest, OutlierDetector) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.1, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1608,12 +1703,22 @@ TEST_F(StaticClusterImplTest, HealthyStat) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.1, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1739,12 +1844,22 @@ TEST_F(StaticClusterImplTest, UrlConfig) { connect_timeout: 0.25s type: static lb_policy: random - hosts: - - { socket_address: { address: 10.0.0.1, port_value: 11001 }} - - { socket_address: { address: 10.0.0.2, port_value: 11002 }} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 10.0.0.2 + port_value: 11002 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1795,7 +1910,7 @@ TEST_F(StaticClusterImplTest, UnsupportedLBType) { EXPECT_THROW_WITH_MESSAGE( { - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() @@ -1818,10 +1933,16 @@ TEST_F(StaticClusterImplTest, MalformedHostIP) { connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -1849,7 +1970,7 @@ TEST_F(StaticClusterImplTest, NoHostsTest) { - priority: 1 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format("cluster.{}.", cluster_config.name())); Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( @@ -1918,9 +2039,16 @@ TEST_F(ClusterImplTest, CloseConnectionsOnHostHealthFailure) { type: STRICT_DNS lb_policy: ROUND_ROBIN close_connections_on_host_health_failure: true - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 )EOF"; - envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -2050,8 +2178,9 @@ class ClusterInfoImplTest : public testing::Test { public: ClusterInfoImplTest() : api_(Api::createApiForTest(stats_)) {} - std::unique_ptr makeCluster(const std::string& yaml) { - cluster_config_ = parseClusterFromV2Yaml(yaml); + std::unique_ptr makeCluster(const std::string& yaml, + bool avoid_boosting = true) { + cluster_config_ = parseClusterFromV3Yaml(yaml, avoid_boosting); scope_ = stats_.createScope(fmt::format("cluster.{}.", cluster_config_.alt_stat_name().empty() ? cluster_config_.name() : cluster_config_.alt_stat_name())); @@ -2109,7 +2238,14 @@ TEST_F(ClusterInfoImplTest, Metadata) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: meh } } } common_lb_config: @@ -2139,7 +2275,14 @@ TEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) { lb_policy: MAGLEV eds_cluster_config: service_name: service_foo - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 common_lb_config: healthy_panic_threshold: value: 0.3 @@ -2154,7 +2297,14 @@ TEST_F(ClusterInfoImplTest, EdsServiceNamePopulation) { lb_policy: MAGLEV eds_cluster_config: service_name: service_foo - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 common_lb_config: healthy_panic_threshold: value: 0.3 @@ -2170,7 +2320,14 @@ TEST_F(ClusterInfoImplTest, BrokenTypedMetadata) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {boom: meh} } } common_lb_config: @@ -2191,12 +2348,22 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForUnknownFilter) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - no_such_filter: { option: value } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + no_such_filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "Didn't find a registered network or http filter implementation for " "name: 'no_such_filter'"); } @@ -2207,7 +2374,14 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForUnknownFilter) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: no_such_filter: "@type": type.googleapis.com/google.protobuf.Struct @@ -2218,6 +2392,7 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForUnknownFilter) { "name: 'no_such_filter'"); } +// This test case can't be converted for V3 API as it is specific for extension_protocol_options TEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) { const std::string yaml = R"EOF( name: name @@ -2232,7 +2407,7 @@ TEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) { "@type": type.googleapis.com/google.protobuf.Struct )EOF"; - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "Only one of typed_extension_protocol_options or " "extension_protocol_options can be specified"); } @@ -2289,7 +2464,14 @@ TEST_F(ClusterInfoImplTest, Timeouts) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: MAGLEV - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 metadata: { filter_metadata: { com.bar.foo: { baz: test_value }, baz: {name: meh } } } common_lb_config: @@ -2469,22 +2651,32 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithoutOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - envoy.test.filter: { option: value } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + envoy.test.filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; { TestNetworkFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry( factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "filter envoy.test.filter does not support protocol options"); } { TestHttpFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry(factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, "filter envoy.test.filter does not support protocol options"); } } @@ -2500,7 +2692,14 @@ TEST_F(ClusterInfoImplTest, TypedExtensionProtocolOptionsForFilterWithoutOptions connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: envoy.test.filter: { "@type": type.googleapis.com/google.protobuf.Struct } )EOF"; @@ -2538,9 +2737,19 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] - extension_protocol_options: - envoy.test.filter: { option: "value" } + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + typed_extension_protocol_options: + envoy.test.filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + option: "value" )EOF"; const std::string typed_yaml = R"EOF( @@ -2548,7 +2757,14 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) { connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: foo.bar.com, port_value: 443 }}] + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 typed_extension_protocol_options: envoy.test.filter: "@type": type.googleapis.com/google.protobuf.Struct diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index aba493bc22df..742efed4e9d3 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -56,9 +56,10 @@ inline envoy::config::cluster::v3::Cluster parseClusterFromV2Json(const std::str return cluster; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV2Yaml(const std::string& yaml) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromYaml(yaml, cluster, true); + TestUtility::loadFromYaml(yaml, cluster, true, avoid_boosting); return cluster; } diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 916fe3df8b10..2d3848206aae 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -88,7 +88,7 @@ class AggregateClusterTest : public testing::Test { void initialize(const std::string& yaml_config) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::aggregate::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(), diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 2a90f3ee3878..08570d556bbe 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -30,7 +30,7 @@ class ClusterTest : public testing::Test, public: void initialize(const std::string& yaml_config, bool uses_tls) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(), @@ -199,9 +199,9 @@ TEST_F(ClusterTest, PopulatedCache) { class ClusterFactoryTest : public testing::Test { protected: - void createCluster(const std::string& yaml_config) { + void createCluster(const std::string& yaml_config, bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV2Yaml(yaml_config); + Upstream::parseClusterFromV3Yaml(yaml_config, avoid_boosting); Upstream::ClusterFactoryContextImpl cluster_factory_context( cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, @@ -250,7 +250,7 @@ connect_timeout: 0.25s )EOF"); EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config), EnvoyException, + createCluster(yaml_config, false), EnvoyException, "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); } @@ -274,7 +274,7 @@ connect_timeout: 0.25s )EOF"); EXPECT_THROW_WITH_MESSAGE( - createCluster(yaml_config), EnvoyException, + createCluster(yaml_config, false), EnvoyException, "dynamic_forward_proxy cluster cannot configure 'sni' or 'verify_subject_alt_name'"); } diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index f630cdbd1a28..7c6a8cadf47d 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -41,10 +41,14 @@ const std::string BasicConfig = R"EOF( name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -85,10 +89,10 @@ class RedisClusterTest : public testing::Test, return addresses; } - void setupFromV2Yaml(const std::string& yaml) { + void setupFromV3Yaml(const std::string& yaml) { expectRedisSessionCreated(); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -118,7 +122,7 @@ class RedisClusterTest : public testing::Test, void setupFactoryFromV2Yaml(const std::string& yaml) { NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV2Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -453,7 +457,7 @@ class RedisClusterTest : public testing::Test, } void testBasicSetup(const std::string& config, const std::string& expected_discovery_address) { - setupFromV2Yaml(config); + setupFromV3Yaml(config); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, expected_discovery_address, resolved_addresses); @@ -615,10 +619,14 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { connect_timeout: 0.25s )EOF" + std::get<0>(GetParam()) + R"EOF( - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -628,7 +636,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { cluster_refresh_timeout: 0.25s )EOF"; - setupFromV2Yaml(config); + setupFromV3Yaml(config); expectRedisResolve(true); EXPECT_CALL(*dns_resolver_, resolve("foo.bar.com", std::get<1>(GetParam()), _)) @@ -652,7 +660,7 @@ TEST_P(RedisDnsParamTest, ImmediateResolveDns) { TEST_F(RedisClusterTest, EmptyDnsResponse) { Event::MockTimer* dns_timer = new NiceMock(&dispatcher_); - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{}; EXPECT_CALL(*dns_timer, enableTimer(_, _)); expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); @@ -676,7 +684,7 @@ TEST_F(RedisClusterTest, EmptyDnsResponse) { TEST_F(RedisClusterTest, FailedDnsResponse) { Event::MockTimer* dns_timer = new NiceMock(&dispatcher_); - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{}; EXPECT_CALL(*dns_timer, enableTimer(_, _)); expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses, @@ -733,7 +741,7 @@ TEST_F(RedisClusterTest, Basic) { } TEST_F(RedisClusterTest, RedisResolveFailure) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -767,10 +775,14 @@ TEST_F(RedisClusterTest, FactoryInitNotRedisClusterTypeFailure) { name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.memcached typed_config: @@ -789,7 +801,7 @@ TEST_F(RedisClusterTest, FactoryInitRedisClusterTypeSuccess) { } TEST_F(RedisClusterTest, RedisErrorResponse) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -844,7 +856,7 @@ TEST_F(RedisClusterTest, RedisErrorResponse) { } TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); const std::list resolved_addresses{"127.0.0.1", "127.0.0.2"}; expectResolveDiscovery(Network::DnsLookupFamily::V4Only, "foo.bar.com", resolved_addresses); expectRedisResolve(true); @@ -881,7 +893,7 @@ TEST_F(RedisClusterTest, RedisReplicaErrorResponse) { } TEST_F(RedisClusterTest, DnsDiscoveryResolverBasic) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); testDnsResolve("foo.bar.com", 22120); } @@ -890,13 +902,19 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { name: name connect_timeout: 0.25s dns_lookup_family: V4_ONLY - hosts: - - socket_address: - address: foo.bar.com - port_value: 22120 - - socket_address: - address: foo1.bar.com - port_value: 22120 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 22120 + - endpoint: + address: + socket_address: + address: foo1.bar.com + port_value: 22120 cluster_type: name: envoy.clusters.redis typed_config: @@ -906,7 +924,7 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { cluster_refresh_timeout: 0.25s )EOF"; - setupFromV2Yaml(config); + setupFromV3Yaml(config); // Only single in-flight "cluster slots" call. expectRedisResolve(true); @@ -937,7 +955,7 @@ TEST_F(RedisClusterTest, MultipleDnsDiscovery) { } TEST_F(RedisClusterTest, HostRemovalAfterHcFail) { - setupFromV2Yaml(BasicConfig); + setupFromV3Yaml(BasicConfig); auto health_checker = std::make_shared(); EXPECT_CALL(*health_checker, start()); EXPECT_CALL(*health_checker, addHostCheckCompleteCb(_)).Times(2); diff --git a/test/test_common/utility.h b/test/test_common/utility.h index e95302726494..0727b98729c7 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -553,7 +553,6 @@ class TestUtility { bool preserve_original_type = false, bool avoid_boosting = false) { MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor(), !avoid_boosting); - if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } From b1e62a3f3453ee79f29a8f7a545786f7e419fc11 Mon Sep 17 00:00:00 2001 From: Phil Genera Date: Thu, 9 Jul 2020 14:48:06 -0400 Subject: [PATCH 577/909] eds: decrease computational complexity of updates (#11442) Makes BaseDynamicClusterImpl::updateDynamicHostList O(n) rather than O(n^2) Instead of calling .erase() on list iterators as we find them, we swap with the end of the list and erase after iterating over the list. This shows a ~3x improvement in execution time in the included benchmark test. Risk Level: Medium. No reordering happens to the endpoint list. Not runtime guarded. Testing: New benchmark, existing unit tests pass (and cover the affected function). Docs Changes: N/A Release Notes: N/A Relates to #2874 #11362 Signed-off-by: Phil Genera --- bazel/test_for_benchmark_wrapper.sh | 6 +- source/common/upstream/upstream_impl.cc | 58 +++++++------ test/benchmark/BUILD | 2 + test/benchmark/main.cc | 32 ++++++- test/benchmark/main.h | 13 +++ test/common/upstream/eds_speed_test.cc | 111 ++++++++++++++++-------- 6 files changed, 155 insertions(+), 67 deletions(-) create mode 100644 test/benchmark/main.h diff --git a/bazel/test_for_benchmark_wrapper.sh b/bazel/test_for_benchmark_wrapper.sh index 7c1dc7a1def6..37de6d0d0d81 100755 --- a/bazel/test_for_benchmark_wrapper.sh +++ b/bazel/test_for_benchmark_wrapper.sh @@ -1,4 +1,6 @@ #!/bin/bash -# Set the benchmark time to 0 to just verify that the benchmark runs to completion. -"${TEST_SRCDIR}/envoy/$@" --benchmark_min_time=0 +# Set the benchmark time to 0 to just verify that the benchmark runs to +# completion. We're interacting with two different flag parsers, so the order +# of flags and the -- matters. +"${TEST_SRCDIR}/envoy/$@" --skip_expensive_benchmarks -- --benchmark_min_time=0 diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 7967a7d1ba96..97776daa4901 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -1332,9 +1332,7 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, bool hosts_changed = false; // Go through and see if the list we have is different from what we just got. If it is, we make a - // new host list and raise a change notification. This uses an N^2 search given that this does not - // happen very often and the list sizes should be small (see - // https://github.com/envoyproxy/envoy/issues/2874). We also check for duplicates here. It's + // new host list and raise a change notification. We also check for duplicates here. It's // possible for DNS to return the same address multiple times, and a bad EDS implementation could // do the same thing. @@ -1437,16 +1435,20 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // Remove hosts from current_priority_hosts that were matched to an existing host in the previous // loop. - for (auto itr = current_priority_hosts.begin(); itr != current_priority_hosts.end();) { - auto existing_itr = existing_hosts_for_current_priority.find((*itr)->address()->asString()); + auto erase_from = + std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(), + [&existing_hosts_for_current_priority](const HostSharedPtr& p) { + auto existing_itr = + existing_hosts_for_current_priority.find(p->address()->asString()); - if (existing_itr != existing_hosts_for_current_priority.end()) { - existing_hosts_for_current_priority.erase(existing_itr); - itr = current_priority_hosts.erase(itr); - } else { - itr++; - } - } + if (existing_itr != existing_hosts_for_current_priority.end()) { + existing_hosts_for_current_priority.erase(existing_itr); + return true; + } + + return false; + }); + current_priority_hosts.erase(erase_from, current_priority_hosts.end()); // If we saw existing hosts during this iteration from a different priority, then we've moved // a host from another priority into this one, so we should mark the priority as having changed. @@ -1464,21 +1466,23 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, const bool dont_remove_healthy_hosts = health_checker_ != nullptr && !info()->drainConnectionsOnHostRemoval(); if (!current_priority_hosts.empty() && dont_remove_healthy_hosts) { - for (auto i = current_priority_hosts.begin(); i != current_priority_hosts.end();) { - if (!((*i)->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) || - (*i)->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) { - if ((*i)->weight() > max_host_weight) { - max_host_weight = (*i)->weight(); - } - - final_hosts.push_back(*i); - updated_hosts[(*i)->address()->asString()] = *i; - (*i)->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); - i = current_priority_hosts.erase(i); - } else { - i++; - } - } + erase_from = + std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(), + [&updated_hosts, &final_hosts, &max_host_weight](const HostSharedPtr& p) { + if (!(p->healthFlagGet(Host::HealthFlag::FAILED_ACTIVE_HC) || + p->healthFlagGet(Host::HealthFlag::FAILED_EDS_HEALTH))) { + if (p->weight() > max_host_weight) { + max_host_weight = p->weight(); + } + + final_hosts.push_back(p); + updated_hosts[p->address()->asString()] = p; + p->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); + return true; + } + return false; + }); + current_priority_hosts.erase(erase_from, current_priority_hosts.end()); } // At this point we've accounted for all the new hosts as well the hosts that previously diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index afba86c9dd22..fa01e3b1ce63 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -11,8 +11,10 @@ envoy_package() envoy_cc_test_library( name = "main", srcs = ["main.cc"], + hdrs = ["main.h"], external_deps = [ "benchmark", + "tclap", ], deps = [ "//test/test_common:environment_lib", diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index 7afdf85e6558..6c23c1031a6c 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -1,16 +1,40 @@ // NOLINT(namespace-envoy) // This is an Envoy driver for benchmarks. +#include "test/benchmark/main.h" + #include "test/test_common/environment.h" #include "benchmark/benchmark.h" +#include "tclap/CmdLine.h" + +static bool skip_expensive_benchmarks = false; -// Boilerplate main(), which discovers benchmarks and runs them. +// Boilerplate main(), which discovers benchmarks and runs them. This uses two +// different flag parsers, so the order of flags matters: flags defined here +// must be passed first, and flags defined in benchmark::Initialize second, +// separated by --. +// TODO(pgenera): convert this to abseil/flags/ when benchmark also adopts abseil. int main(int argc, char** argv) { Envoy::TestEnvironment::initializeTestMain(argv[0]); - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) + TCLAP::CmdLine cmd("envoy-benchmark-test", ' ', "0.1"); + TCLAP::SwitchArg skip_switch("s", "skip_expensive_benchmarks", + "skip or minimize expensive benchmarks", cmd, false); + + cmd.setExceptionHandling(false); + try { + cmd.parse(argc, argv); + } catch (const TCLAP::ExitException& e) { + // parse() throws an ExitException with status 0 after printing the output + // for --help and --version. + return 0; } + + skip_expensive_benchmarks = skip_switch.getValue(); + + benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); } + +bool Envoy::benchmark::skipExpensiveBenchmarks() { return skip_expensive_benchmarks; } diff --git a/test/benchmark/main.h b/test/benchmark/main.h new file mode 100644 index 000000000000..efb6797a74ef --- /dev/null +++ b/test/benchmark/main.h @@ -0,0 +1,13 @@ +#pragma once + +/** + * Benchmarks can use this to skip or hurry through long-running tests in CI. + */ + +namespace Envoy { +namespace benchmark { + +bool skipExpensiveBenchmarks(); + +} +} // namespace Envoy diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index a03af737483e..c227dfe4f39d 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -16,6 +16,7 @@ #include "server/transport_socket_config_impl.h" +#include "test/benchmark/main.h" #include "test/common/upstream/utility.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" @@ -28,12 +29,15 @@ #include "benchmark/benchmark.h" +using ::benchmark::State; +using Envoy::benchmark::skipExpensiveBenchmarks; + namespace Envoy { namespace Upstream { class EdsSpeedTest { public: - EdsSpeedTest(benchmark::State& state, bool v2_config) + EdsSpeedTest(State& state, bool v2_config) : state_(state), v2_config_(v2_config), type_url_(v2_config_ ? "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" @@ -44,7 +48,26 @@ class EdsSpeedTest { local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) {} + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) { + resetCluster(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + cluster_names: + - eds + refresh_delay: 1s + )EOF", + Envoy::Upstream::Cluster::InitializePhase::Secondary); + + EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_)); + cluster_->initialize([this] { initialized_ = true; }); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(testing::Return(&async_stream_)); + subscription_->start({"fare"}); + } void resetCluster(const std::string& yaml_config, Cluster::InitializePhase initialize_phase) { local_info_.node_.mutable_locality()->set_zone("us-east-1a"); @@ -64,30 +87,14 @@ class EdsSpeedTest { std::chrono::milliseconds(), false); } - void initialize() { - EXPECT_CALL(*cm_.subscription_factory_.subscription_, start(_)); - cluster_->initialize([this] { initialized_ = true; }); - } - // Set up an EDS config with multiple priorities, localities, weights and make sure - // they are loaded and reloaded as expected. - void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, size_t num_hosts) { + // they are loaded as expected. + void priorityAndLocalityWeightedHelper(bool ignore_unknown_dynamic_fields, size_t num_hosts, + bool healthy) { state_.PauseTiming(); + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); - resetCluster(R"EOF( - name: name - connect_timeout: 0.25s - type: EDS - eds_cluster_config: - service_name: fare - eds_config: - api_config_source: - cluster_names: - - eds - refresh_delay: 1s - )EOF", - Envoy::Upstream::Cluster::InitializePhase::Secondary); // Add a whole bunch of hosts in a single place: auto* endpoints = cluster_load_assignment.add_endpoints(); @@ -100,10 +107,14 @@ class EdsSpeedTest { uint32_t port = 1000; for (size_t i = 0; i < num_hosts; ++i) { - auto* socket_address = endpoints->add_lb_endpoints() - ->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address(); + auto* lb_endpoint = endpoints->add_lb_endpoints(); + if (healthy) { + lb_endpoint->set_health_status(envoy::config::core::v3::HEALTHY); + } else { + lb_endpoint->set_health_status(envoy::config::core::v3::UNHEALTHY); + } + auto* socket_address = + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address(); socket_address->set_address("10.0.1." + std::to_string(i / 60000)); socket_address->set_port_value((port + i) % 60000); } @@ -111,7 +122,6 @@ class EdsSpeedTest { // this is what we're actually testing: validation_visitor_.setSkipValidation(ignore_unknown_dynamic_fields); - initialize(); auto response = std::make_unique(); response->set_type_url(type_url_); auto* resource = response->mutable_resources()->Add(); @@ -122,16 +132,13 @@ class EdsSpeedTest { ""); resource->set_type_url("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"); } - EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(testing::Return(&async_stream_)); - subscription_->start({"fare"}); state_.ResumeTiming(); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); - ASSERT(initialized_); ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() == num_hosts); } - benchmark::State& state_; + State& state_; const bool v2_config_; const std::string type_url_; bool initialized_{}; @@ -162,14 +169,50 @@ class EdsSpeedTest { } // namespace Upstream } // namespace Envoy -static void priorityAndLocalityWeighted(benchmark::State& state) { +static void priorityAndLocalityWeighted(State& state) { Envoy::Thread::MutexBasicLockable lock; Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(0)); - speed_test.priorityAndLocalityWeightedHelper(state.range(1), state.range(2)); + // if we've been instructed to skip tests, only run once no matter the argument: + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(2); + + speed_test.priorityAndLocalityWeightedHelper(state.range(1), endpoints, true); + } +} + +BENCHMARK(priorityAndLocalityWeighted) + ->Ranges({{false, true}, {false, true}, {1, 100000}}) + ->Unit(benchmark::kMillisecond); + +static void duplicateUpdate(State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test(state, false); + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); + + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + } +} + +BENCHMARK(duplicateUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); + +static void healthOnlyUpdate(State& state) { + Envoy::Thread::MutexBasicLockable lock; + Envoy::Logger::Context logging_state(spdlog::level::warn, + Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); + for (auto _ : state) { + Envoy::Upstream::EdsSpeedTest speed_test(state, false); + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); + + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); + speed_test.priorityAndLocalityWeightedHelper(true, endpoints, false); } } -BENCHMARK(priorityAndLocalityWeighted)->Ranges({{false, true}, {false, true}, {2000, 100000}}); +BENCHMARK(healthOnlyUpdate)->Range(1, 100000)->Unit(benchmark::kMillisecond); From 3b8c61b5e5eb4dfddf3c4c0cae9e04eafe1ac61a Mon Sep 17 00:00:00 2001 From: Jiangtao Li Date: Thu, 9 Jul 2020 11:50:35 -0700 Subject: [PATCH 578/909] Add checked expr as oneof RBAC policy condition (#11902) Add CheckedExpr as oneof expression in RBAC proto Additional Description: CheckedExpr is type check CEL expression. It is future proof and offers better evaluation performance (not for C++ yet, but for other languages). There will be a separate PR to plumb checked expr to RBAC filter. Risk Level: low Testing: existing tests Signed-off-by: jiangtaoli2016 --- api/bazel/api_build_system.bzl | 11 +++++++++-- api/bazel/external_proto_deps.bzl | 4 ++++ api/envoy/config/rbac/v3/BUILD | 1 + api/envoy/config/rbac/v3/rbac.proto | 12 +++++++++++- api/envoy/config/rbac/v4alpha/BUILD | 1 + api/envoy/config/rbac/v4alpha/rbac.proto | 17 +++++++++++++---- generated_api_shadow/bazel/api_build_system.bzl | 11 +++++++++-- .../bazel/external_proto_deps.bzl | 4 ++++ generated_api_shadow/envoy/config/rbac/v3/BUILD | 1 + .../envoy/config/rbac/v3/rbac.proto | 12 +++++++++++- .../envoy/config/rbac/v4alpha/BUILD | 1 + .../envoy/config/rbac/v4alpha/rbac.proto | 17 +++++++++++++---- 12 files changed, 78 insertions(+), 14 deletions(-) diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl index e9119b329d01..c0269d161f80 100644 --- a/api/bazel/api_build_system.bzl +++ b/api/bazel/api_build_system.bzl @@ -80,6 +80,10 @@ def py_proto_library(name, deps = [], plugin = None): if name == "annotations_py_proto": proto_deps = proto_deps + [":http_py_proto"] + # checked.proto depends on syntax.proto, we have to add this dependency manually as well. + if name == "checked_py_proto": + proto_deps = proto_deps + [":syntax_py_proto"] + # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. # plugin should also be passed in here when gRPC version is greater than v1.25.x. @@ -172,13 +176,16 @@ def api_proto_package( if has_services: compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] + # Because RBAC proro depends on googleapis syntax.proto and checked.proto, + # which share the same go proto library, it causes duplicative dependencies. + # Thus, we use depset().to_list() to remove duplicated depenencies. go_proto_library( name = name + _GO_PROTO_SUFFIX, compilers = compilers, importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), proto = name, visibility = ["//visibility:public"], - deps = [_go_proto_mapping(dep) for dep in deps] + [ + deps = depset([_go_proto_mapping(dep) for dep in deps] + [ "@com_github_golang_protobuf//ptypes:go_default_library", "@com_github_golang_protobuf//ptypes/any:go_default_library", "@com_github_golang_protobuf//ptypes/duration:go_default_library", @@ -188,5 +195,5 @@ def api_proto_package( "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", "@com_google_googleapis//google/api:annotations_go_proto", "@com_google_googleapis//google/rpc:status_go_proto", - ], + ]).to_list(), ) diff --git a/api/bazel/external_proto_deps.bzl b/api/bazel/external_proto_deps.bzl index 514093abef90..659c7a72d73e 100644 --- a/api/bazel/external_proto_deps.bzl +++ b/api/bazel/external_proto_deps.bzl @@ -9,6 +9,7 @@ # external dependencies. Since BUILD files are generated, this is the canonical # place to define this mapping. EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { + "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", "metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", @@ -17,6 +18,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Go language binding target for external dependencies. EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", @@ -24,6 +26,7 @@ EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", @@ -31,6 +34,7 @@ EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Python language binding target for external dependencies. EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", diff --git a/api/envoy/config/rbac/v3/BUILD b/api/envoy/config/rbac/v3/BUILD index bef4331a1e65..ce88bd5e6c62 100644 --- a/api/envoy/config/rbac/v3/BUILD +++ b/api/envoy/config/rbac/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index 040f537d1f5c..10520b1ba38f 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -8,8 +8,10 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -104,7 +106,15 @@ message Policy { // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } // Permission defines an action (or actions) that a principal can take. diff --git a/api/envoy/config/rbac/v4alpha/BUILD b/api/envoy/config/rbac/v4alpha/BUILD index f0707bae6eae..be78d751372e 100644 --- a/api/envoy/config/rbac/v4alpha/BUILD +++ b/api/envoy/config/rbac/v4alpha/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v4alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto index 3ca9f7f08a72..11b69b16e679 100644 --- a/api/envoy/config/rbac/v4alpha/rbac.proto +++ b/api/envoy/config/rbac/v4alpha/rbac.proto @@ -8,6 +8,7 @@ import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/path.proto"; import "envoy/type/matcher/v4alpha/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; @@ -101,10 +102,18 @@ message Policy { // Principal with the `any` field set to true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + oneof expression_specifier { + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; + } } // Permission defines an action (or actions) that a principal can take. diff --git a/generated_api_shadow/bazel/api_build_system.bzl b/generated_api_shadow/bazel/api_build_system.bzl index e9119b329d01..c0269d161f80 100644 --- a/generated_api_shadow/bazel/api_build_system.bzl +++ b/generated_api_shadow/bazel/api_build_system.bzl @@ -80,6 +80,10 @@ def py_proto_library(name, deps = [], plugin = None): if name == "annotations_py_proto": proto_deps = proto_deps + [":http_py_proto"] + # checked.proto depends on syntax.proto, we have to add this dependency manually as well. + if name == "checked_py_proto": + proto_deps = proto_deps + [":syntax_py_proto"] + # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. # plugin should also be passed in here when gRPC version is greater than v1.25.x. @@ -172,13 +176,16 @@ def api_proto_package( if has_services: compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] + # Because RBAC proro depends on googleapis syntax.proto and checked.proto, + # which share the same go proto library, it causes duplicative dependencies. + # Thus, we use depset().to_list() to remove duplicated depenencies. go_proto_library( name = name + _GO_PROTO_SUFFIX, compilers = compilers, importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), proto = name, visibility = ["//visibility:public"], - deps = [_go_proto_mapping(dep) for dep in deps] + [ + deps = depset([_go_proto_mapping(dep) for dep in deps] + [ "@com_github_golang_protobuf//ptypes:go_default_library", "@com_github_golang_protobuf//ptypes/any:go_default_library", "@com_github_golang_protobuf//ptypes/duration:go_default_library", @@ -188,5 +195,5 @@ def api_proto_package( "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", "@com_google_googleapis//google/api:annotations_go_proto", "@com_google_googleapis//google/rpc:status_go_proto", - ], + ]).to_list(), ) diff --git a/generated_api_shadow/bazel/external_proto_deps.bzl b/generated_api_shadow/bazel/external_proto_deps.bzl index 514093abef90..659c7a72d73e 100644 --- a/generated_api_shadow/bazel/external_proto_deps.bzl +++ b/generated_api_shadow/bazel/external_proto_deps.bzl @@ -9,6 +9,7 @@ # external dependencies. Since BUILD files are generated, this is the canonical # place to define this mapping. EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { + "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", "metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", @@ -17,6 +18,7 @@ EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Go language binding target for external dependencies. EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", @@ -24,6 +26,7 @@ EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", @@ -31,6 +34,7 @@ EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { # This maps from the Bazel proto_library target to the Python language binding target for external dependencies. EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", diff --git a/generated_api_shadow/envoy/config/rbac/v3/BUILD b/generated_api_shadow/envoy/config/rbac/v3/BUILD index bef4331a1e65..ce88bd5e6c62 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v3/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto index 040f537d1f5c..10520b1ba38f 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto @@ -8,8 +8,10 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -104,7 +106,15 @@ message Policy { // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } // Permission defines an action (or actions) that a principal can take. diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD index f0707bae6eae..be78d751372e 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD @@ -11,6 +11,7 @@ api_proto_package( "//envoy/config/route/v4alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto index 8bab830607b6..3d8dae2402ea 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto @@ -8,6 +8,7 @@ import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/path.proto"; import "envoy/type/matcher/v4alpha/string.proto"; +import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; @@ -101,10 +102,18 @@ message Policy { // Principal with the `any` field set to true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; + oneof expression_specifier { + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + google.api.expr.v1alpha1.Expr condition = 3; + + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; + } } // Permission defines an action (or actions) that a principal can take. From ec431a080f59b0ac46cb18111b3508f1d1f47056 Mon Sep 17 00:00:00 2001 From: Mike Schore Date: Fri, 10 Jul 2020 03:13:57 +0800 Subject: [PATCH 579/909] build: minor fix for 32-bit archs (#11726) Fixes an implicit cast error building for 32-bit Android here. Signed-off-by: Mike Schore --- source/common/stats/symbol_table_impl.cc | 41 ++++++++++++------------ source/common/stats/symbol_table_impl.h | 37 ++++++++++----------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index a36a4a2a9681..bf36b088f44e 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -31,7 +31,7 @@ static constexpr uint32_t Low7Bits = 0x7f; static constexpr Symbol FirstValidSymbol = 1; static constexpr uint8_t LiteralStringIndicator = 0; -uint64_t StatName::dataSize() const { +size_t StatName::dataSize() const { if (size_and_data_ == nullptr) { return 0; } @@ -46,9 +46,9 @@ void StatName::debugPrint() { if (size_and_data_ == nullptr) { std::cerr << "Null StatName" << std::endl; } else { - const uint64_t nbytes = dataSize(); + const size_t nbytes = dataSize(); std::cerr << "dataSize=" << nbytes << ":"; - for (uint64_t i = 0; i < nbytes; ++i) { + for (size_t i = 0; i < nbytes; ++i) { std::cerr << " " << static_cast(data()[i]); } const SymbolVec encoding = SymbolTableImpl::Encoding::decodeSymbols(data(), dataSize()); @@ -67,8 +67,8 @@ SymbolTableImpl::Encoding::~Encoding() { ASSERT(mem_block_.capacity() == 0); } -uint64_t SymbolTableImpl::Encoding::encodingSizeBytes(uint64_t number) { - uint64_t num_bytes = 0; +size_t SymbolTableImpl::Encoding::encodingSizeBytes(uint64_t number) { + size_t num_bytes = 0; do { ++num_bytes; number >>= 7; @@ -106,7 +106,7 @@ void SymbolTableImpl::Encoding::addSymbols(const std::vector& symbols) { } } -std::pair SymbolTableImpl::Encoding::decodeNumber(const uint8_t* encoding) { +std::pair SymbolTableImpl::Encoding::decodeNumber(const uint8_t* encoding) { uint64_t number = 0; uint64_t uc = SpilloverMask; const uint8_t* start = encoding; @@ -117,8 +117,7 @@ std::pair SymbolTableImpl::Encoding::decodeNumber(const uint return std::make_pair(number, encoding - start); } -SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, - uint64_t size) { +SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage array, size_t size) { SymbolVec symbol_vec; symbol_vec.reserve(size); decodeTokens( @@ -128,9 +127,9 @@ SymbolVec SymbolTableImpl::Encoding::decodeSymbols(const SymbolTable::Storage ar } void SymbolTableImpl::Encoding::decodeTokens( - const SymbolTable::Storage array, uint64_t size, - const std::function& symbolTokenFn, - const std::function& stringViewTokenFn) { + const SymbolTable::Storage array, size_t size, + const std::function& symbol_token_fn, + const std::function& string_view_token_fn) { while (size > 0) { if (*array == LiteralStringIndicator) { // To avoid scanning memory to find the literal size during decode, we @@ -138,17 +137,17 @@ void SymbolTableImpl::Encoding::decodeTokens( ASSERT(size > 1); ++array; --size; - std::pair length_consumed = decodeNumber(array); + std::pair length_consumed = decodeNumber(array); uint64_t length = length_consumed.first; array += length_consumed.second; size -= length_consumed.second; ASSERT(size >= length); - stringViewTokenFn(absl::string_view(reinterpret_cast(array), length)); + string_view_token_fn(absl::string_view(reinterpret_cast(array), length)); size -= length; array += length; } else { - std::pair symbol_consumed = decodeNumber(array); - symbolTokenFn(symbol_consumed.first); + std::pair symbol_consumed = decodeNumber(array); + symbol_token_fn(symbol_consumed.first); size -= symbol_consumed.second; array += symbol_consumed.second; } @@ -156,7 +155,7 @@ void SymbolTableImpl::Encoding::decodeTokens( } std::vector SymbolTableImpl::decodeStrings(const SymbolTable::Storage array, - uint64_t size) const { + size_t size) const { std::vector strings; Thread::LockGuard lock(lock_); Encoding::decodeTokens( @@ -451,8 +450,8 @@ StatNameStorage::StatNameStorage(absl::string_view name, SymbolTable& table) : StatNameStorageBase(table.encode(name)) {} StatNameStorage::StatNameStorage(StatName src, SymbolTable& table) { - const uint64_t size = src.size(); - MemBlockBuilder storage(size); + const size_t size = src.size(); + MemBlockBuilder storage(size); // Note: MemBlockBuilder takes uint64_t. src.copyToMemBlock(storage); setBytes(storage.release()); table.incRefCount(statName()); @@ -472,11 +471,11 @@ SymbolTable::StoragePtr SymbolTableImpl::makeDynamicStorage(absl::string_view na // payload_bytes is the total number of bytes needed to represent the // characters in name, plus their encoded size, plus the literal indicator. - const uint64_t payload_bytes = SymbolTableImpl::Encoding::totalSizeBytes(name.size()) + 1; + const size_t payload_bytes = SymbolTableImpl::Encoding::totalSizeBytes(name.size()) + 1; // total_bytes includes the payload_bytes, plus the LiteralStringIndicator, and // the length of those. - const uint64_t total_bytes = SymbolTableImpl::Encoding::totalSizeBytes(payload_bytes); + const size_t total_bytes = SymbolTableImpl::Encoding::totalSizeBytes(payload_bytes); MemBlockBuilder mem_block(total_bytes); SymbolTableImpl::Encoding::appendEncoding(payload_bytes, mem_block); @@ -550,7 +549,7 @@ void StatNameStorageSet::free(SymbolTable& symbol_table) { } SymbolTable::StoragePtr SymbolTableImpl::join(const StatNameVec& stat_names) const { - uint64_t num_bytes = 0; + size_t num_bytes = 0; for (StatName stat_name : stat_names) { if (!stat_name.empty()) { num_bytes += stat_name.dataSize(); diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index f4104fa407e4..7d89b7db8205 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -93,7 +93,7 @@ class SymbolTableImpl : public SymbolTable { /** * Decodes a uint8_t array into a SymbolVec. */ - static SymbolVec decodeSymbols(const SymbolTable::Storage array, uint64_t size); + static SymbolVec decodeSymbols(const SymbolTable::Storage array, size_t size); /** * Decodes a uint8_t array into a sequence of symbols and literal strings. @@ -103,18 +103,18 @@ class SymbolTableImpl : public SymbolTable { * * @param array the StatName encoded as a uint8_t array. * @param size the size of the array in bytes. - * @param symbolTokenFn a function to be called whenever a symbol is encountered in the array. - * @param stringVIewTokeNFn a function to be called whenever a string literal is encountered. + * @param symbol_token_fn a function to be called whenever a symbol is encountered in the array. + * @param string_view_token_fn a function to be called whenever a string literal is encountered. */ - static void decodeTokens(const SymbolTable::Storage array, uint64_t size, - const std::function& symbolTokenFn, - const std::function& stringViewTokenFn); + static void decodeTokens(const SymbolTable::Storage array, size_t size, + const std::function& symbol_token_fn, + const std::function& string_view_token_fn); /** * Returns the number of bytes required to represent StatName as a uint8_t * array, including the encoded size. */ - uint64_t bytesRequired() const { + size_t bytesRequired() const { return data_bytes_required_ + encodingSizeBytes(data_bytes_required_); } @@ -130,13 +130,13 @@ class SymbolTableImpl : public SymbolTable { * @param number A number to encode in a variable length byte-array. * @return The number of bytes it would take to encode the number. */ - static uint64_t encodingSizeBytes(uint64_t number); + static size_t encodingSizeBytes(uint64_t number); /** * @param num_data_bytes The number of bytes in a data-block. * @return The total number of bytes required for the data-block and its encoded size. */ - static uint64_t totalSizeBytes(uint64_t num_data_bytes) { + static size_t totalSizeBytes(size_t num_data_bytes) { return encodingSizeBytes(num_data_bytes) + num_data_bytes; } @@ -167,10 +167,10 @@ class SymbolTableImpl : public SymbolTable { * @param The encoded byte array, written previously by appendEncoding. * @return A pair containing the decoded number, and the number of bytes consumed from encoding. */ - static std::pair decodeNumber(const uint8_t* encoding); + static std::pair decodeNumber(const uint8_t* encoding); private: - uint64_t data_bytes_required_{0}; + size_t data_bytes_required_{0}; MemBlockBuilder mem_block_; }; @@ -229,7 +229,7 @@ class SymbolTableImpl : public SymbolTable { * @param size the size of the array in bytes. * @return std::string the retrieved stat name. */ - std::vector decodeStrings(const Storage array, uint64_t size) const; + std::vector decodeStrings(const Storage array, size_t size) const; /** * Convenience function for encode(), symbolizing one string segment at a time. @@ -403,16 +403,16 @@ class StatName { bool operator!=(const StatName& rhs) const { return !(*this == rhs); } /** - * @return uint64_t the number of bytes in the symbol array, excluding the - * overhead for the size itself. + * @return size_t the number of bytes in the symbol array, excluding the + * overhead for the size itself. */ - uint64_t dataSize() const; + size_t dataSize() const; /** - * @return uint64_t the number of bytes in the symbol array, including the + * @return size_t the number of bytes in the symbol array, including the * overhead for the size itself. */ - uint64_t size() const { return SymbolTableImpl::Encoding::totalSizeBytes(dataSize()); } + size_t size() const { return SymbolTableImpl::Encoding::totalSizeBytes(dataSize()); } /** * Copies the entire StatName representation into a MemBlockBuilder, including @@ -466,7 +466,8 @@ class StatName { * hasher and comparator. */ absl::string_view dataAsStringView() const { - return {reinterpret_cast(data()), dataSize()}; + return {reinterpret_cast(data()), + static_cast(dataSize())}; } const uint8_t* size_and_data_{nullptr}; From 99f1d77243bb3a72d8e745904c8eeb473be968cb Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 9 Jul 2020 15:42:39 -0400 Subject: [PATCH 580/909] test: attempting to fix raw buffer config coverage (#11980) Removing an unnecessary destructor declaration. (https://storage.googleapis.com/envoy-postsubmit/master/coverage/source/extensions/transport_sockets/raw_buffer/config.h.gcov.html) Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a Fixes #11979 Signed-off-by: Alyssa Wilk --- source/extensions/transport_sockets/raw_buffer/config.h | 1 - test/per_file_coverage.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/source/extensions/transport_sockets/raw_buffer/config.h b/source/extensions/transport_sockets/raw_buffer/config.h index a7c68d6875a7..b17f9836f555 100644 --- a/source/extensions/transport_sockets/raw_buffer/config.h +++ b/source/extensions/transport_sockets/raw_buffer/config.h @@ -16,7 +16,6 @@ namespace RawBuffer { */ class RawBufferSocketFactory : public virtual Server::Configuration::TransportSocketConfigFactory { public: - ~RawBufferSocketFactory() override = default; std::string name() const override { return TransportSocketNames::get().RawBuffer; } ProtobufTypes::MessagePtr createEmptyConfigProto() override; }; diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index cb7f14b81b51..9afed0555fdd 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -54,7 +54,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/tracers/opencensus:90.1" "source/extensions/tracers/xray:95.3" "source/extensions/transport_sockets:94.8" -"source/extensions/transport_sockets/raw_buffer:90.9" "source/extensions/transport_sockets/tap:95.6" "source/extensions/transport_sockets/tls:94.2" "source/extensions/transport_sockets/tls/private_key:76.9" From 81b5299dcb6d22925fd013f739075a319c13fbd9 Mon Sep 17 00:00:00 2001 From: Xin Date: Thu, 9 Jul 2020 15:47:35 -0400 Subject: [PATCH 581/909] make grpcmux pause/resume come in pair by returning a RAII obj which resumes requests on destruction. (#11739) * doc: fix SNI FAQ link (#10227) Signed-off-by: Lizan Zhou * make api pause/resume in pair by returning a RAII obj which resumes requests on destruction Signed-off-by: Xin Zhuang * cancel resume_cds_ on shutdown of clustermanagerImpl Signed-off-by: Xin Zhuang * format errors fix Signed-off-by: Xin Zhuang * move the ABSL_MUST_USE_RESULT attr to the beginning to see if gcc will be happy Signed-off-by: Xin Zhuang * pass by reference type_urls into cleanup in scoped rds Signed-off-by: Xin Zhuang * review fixes per Harvey comments. Signed-off-by: Xin Zhuang * remove unused parameter, weird it wasnt caught previously Signed-off-by: Xin Zhuang * fix-format using clang 10 Signed-off-by: Xin Zhuang Co-authored-by: Lizan Zhou --- include/envoy/config/BUILD | 1 + include/envoy/config/grpc_mux.h | 27 ++++----- source/common/config/BUILD | 1 + source/common/config/grpc_mux_impl.cc | 47 +++++++-------- source/common/config/grpc_mux_impl.h | 17 +++--- .../common/config/grpc_subscription_impl.cc | 4 +- source/common/config/grpc_subscription_impl.h | 3 +- source/common/config/new_grpc_mux_impl.cc | 24 ++++---- source/common/config/new_grpc_mux_impl.h | 6 +- source/common/router/scoped_rds.cc | 60 ++++++++++--------- source/common/upstream/cds_api_impl.cc | 6 +- .../common/upstream/cluster_manager_impl.cc | 11 ++-- source/common/upstream/cluster_manager_impl.h | 5 ++ source/server/lds_api.cc | 6 +- source/server/server.cc | 7 +-- .../config/delta_subscription_impl_test.cc | 7 +-- test/common/config/grpc_mux_impl_test.cc | 36 ++++++----- test/common/router/scoped_rds_test.cc | 14 ++--- test/mocks/config/mocks.h | 6 +- 19 files changed, 136 insertions(+), 152 deletions(-) diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 991ddee5d3b5..67ac833c2403 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -35,6 +35,7 @@ envoy_cc_library( deps = [ ":subscription_interface", "//include/envoy/stats:stats_macros", + "//source/common/common:cleanup_lib", "//source/common/protobuf", ], ) diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 123026bb6087..6e19534619bb 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -5,11 +5,13 @@ #include "envoy/config/subscription.h" #include "envoy/stats/stats_macros.h" +#include "common/common/cleanup.h" #include "common/protobuf/protobuf.h" namespace Envoy { namespace Config { +using ScopedResume = std::unique_ptr; /** * All control plane related stats. @see stats_macros.h */ @@ -62,8 +64,11 @@ class GrpcMux { * requests may later be resumed with resume(). * @param type_url type URL corresponding to xDS API, e.g. * type.googleapis.com/envoy.api.v2.Cluster. + * + * @return a ScopedResume object, which when destructed, resumes the paused discovery requests. + * A discovery request will be sent if one would have been sent during the pause. */ - virtual void pause(const std::string& type_url) PURE; + ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::string& type_url) PURE; /** * Pause discovery requests for given API types. This is useful when we're processing an update @@ -71,23 +76,11 @@ class GrpcMux { * requests may later be resumed with resume(). * @param type_urls type URLs corresponding to xDS API, e.g. * type.googleapis.com/envoy.api.v2.Cluster. + * + * @return a ScopedResume object, which when destructed, resumes the paused discovery requests. + * A discovery request will be sent if one would have been sent during the pause. */ - virtual void pause(const std::vector type_urls) PURE; - - /** - * Resume discovery requests for a given API type. This will send a discovery request if one would - * have been sent during the pause. - * @param type_url type URL corresponding to xDS API e.g. type.googleapis.com/envoy.api.v2.Cluster - */ - virtual void resume(const std::string& type_url) PURE; - - /** - * Resume discovery requests for given API types. This will send a discovery request if one would - * have been sent during the pause. - * @param type_urls type URLs corresponding to xDS API e.g. - * type.googleapis.com/envoy.api.v2.Cluster - */ - virtual void resume(const std::vector type_urls) PURE; + ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::vector type_urls) PURE; /** * Retrieves the current pause state as set by pause()/resume(). diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 50b2cc615c35..d30b0542144f 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -139,6 +139,7 @@ envoy_cc_library( "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:utility_lib", "//source/common/memory:utils_lib", diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 87e4a5b86b71..c9eedcfccd93 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -99,37 +99,32 @@ GrpcMuxWatchPtr GrpcMuxImpl::addWatch(const std::string& type_url, return watch; } -void GrpcMuxImpl::pause(const std::string& type_url) { - ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url); - ApiState& api_state = api_state_[type_url]; - ASSERT(!api_state.paused_); - ASSERT(!api_state.pending_); - api_state.paused_ = true; +ScopedResume GrpcMuxImpl::pause(const std::string& type_url) { + return pause(std::vector{type_url}); } -void GrpcMuxImpl::pause(const std::vector type_urls) { +ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { for (const auto& type_url : type_urls) { - pause(type_url); - } -} - -void GrpcMuxImpl::resume(const std::string& type_url) { - ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url); - ApiState& api_state = api_state_[type_url]; - ASSERT(api_state.paused_); - api_state.paused_ = false; - - if (api_state.pending_) { - ASSERT(api_state.subscribed_); - queueDiscoveryRequest(type_url); - api_state.pending_ = false; + ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url); + ApiState& api_state = api_state_[type_url]; + ASSERT(!api_state.paused_); + ASSERT(!api_state.pending_); + api_state.paused_ = true; } -} + return std::make_unique([this, type_urls]() { + for (const auto& type_url : type_urls) { + ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url); + ApiState& api_state = api_state_[type_url]; + ASSERT(api_state.paused_); + api_state.paused_ = false; -void GrpcMuxImpl::resume(const std::vector type_urls) { - for (const auto& type_url : type_urls) { - resume(type_url); - } + if (api_state.pending_) { + ASSERT(api_state.subscribed_); + queueDiscoveryRequest(type_url); + api_state.pending_ = false; + } + } + }); } bool GrpcMuxImpl::paused(const std::string& type_url) const { diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index bb1d87e97fcd..897eec63db49 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -20,7 +20,6 @@ namespace Envoy { namespace Config { - /** * ADS API implementation that fetches via gRPC. */ @@ -38,10 +37,8 @@ class GrpcMuxImpl : public GrpcMux, void start() override; // GrpcMux - void pause(const std::string& type_url) override; - void pause(const std::vector type_urls) override; - void resume(const std::string& type_url) override; - void resume(const std::vector type_urls) override; + ScopedResume pause(const std::string& type_url) override; + ScopedResume pause(const std::vector type_urls) override; bool paused(const std::string& type_url) const override; bool paused(const std::vector type_urls) const override; @@ -147,10 +144,12 @@ class NullGrpcMuxImpl : public GrpcMux, GrpcStreamCallbacks { public: void start() override {} - void pause(const std::string&) override {} - void pause(const std::vector) override {} - void resume(const std::string&) override {} - void resume(const std::vector) override {} + ScopedResume pause(const std::string&) override { + return std::make_unique([] {}); + } + ScopedResume pause(const std::vector) override { + return std::make_unique([] {}); + } bool paused(const std::string&) const override { return false; } bool paused(const std::vector) const override { return false; } diff --git a/source/common/config/grpc_subscription_impl.cc b/source/common/config/grpc_subscription_impl.cc index 2c2708fcf58d..ef8037f25006 100644 --- a/source/common/config/grpc_subscription_impl.cc +++ b/source/common/config/grpc_subscription_impl.cc @@ -105,9 +105,7 @@ void GrpcSubscriptionImpl::onConfigUpdateFailed(ConfigUpdateFailureReason reason stats_.update_attempt_.inc(); } -void GrpcSubscriptionImpl::pause() { grpc_mux_->pause(type_url_); } - -void GrpcSubscriptionImpl::resume() { grpc_mux_->resume(type_url_); } +ScopedResume GrpcSubscriptionImpl::pause() { return grpc_mux_->pause(type_url_); } void GrpcSubscriptionImpl::disableInitFetchTimeoutTimer() { if (init_fetch_timeout_timer_) { diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index a9a9f7b77f70..b53da3c6e254 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -35,8 +35,7 @@ class GrpcSubscriptionImpl : public Subscription, GrpcMuxSharedPtr grpcMux() { return grpc_mux_; } - void pause(); - void resume(); + ScopedResume pause(); private: void disableInitFetchTimeoutTimer(); diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 81d00f4ae5cd..5ffafafa397c 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -25,23 +25,21 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, rate_limit_settings), local_info_(local_info), transport_api_version_(transport_api_version) {} -void NewGrpcMuxImpl::pause(const std::string& type_url) { pausable_ack_queue_.pause(type_url); } - -void NewGrpcMuxImpl::pause(const std::vector type_urls) { - for (const auto& type_url : type_urls) { - pause(type_url); - } -} - -void NewGrpcMuxImpl::resume(const std::string& type_url) { - pausable_ack_queue_.resume(type_url); - trySendDiscoveryRequests(); +ScopedResume NewGrpcMuxImpl::pause(const std::string& type_url) { + return pause(std::vector{type_url}); } -void NewGrpcMuxImpl::resume(const std::vector type_urls) { +ScopedResume NewGrpcMuxImpl::pause(const std::vector type_urls) { for (const auto& type_url : type_urls) { - resume(type_url); + pausable_ack_queue_.pause(type_url); } + + return std::make_unique([this, type_urls]() { + for (const auto& type_url : type_urls) { + pausable_ack_queue_.resume(type_url); + trySendDiscoveryRequests(); + } + }); } bool NewGrpcMuxImpl::paused(const std::string& type_url) const { diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 4b948339b735..df394019c235 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -38,10 +38,8 @@ class NewGrpcMuxImpl SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder) override; - void pause(const std::string& type_url) override; - void pause(const std::vector type_urls) override; - void resume(const std::string& type_url) override; - void resume(const std::vector type_urls) override; + ScopedResume pause(const std::string& type_url) override; + ScopedResume pause(const std::vector type_urls) override; bool paused(const std::string& type_url) const override; bool paused(const std::vector type_urls) const override; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index ddd1d0073b43..171176aab037 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -28,6 +28,7 @@ using Envoy::Config::ConfigProvider; using Envoy::Config::ConfigProviderInstanceType; using Envoy::Config::ConfigProviderManager; using Envoy::Config::ConfigProviderPtr; +using Envoy::Config::ScopedResume; namespace Envoy { namespace Router { @@ -225,40 +226,41 @@ void ScopedRdsConfigSubscription::onConfigUpdate( // NOTE: deletes are done before adds/updates. absl::flat_hash_map to_be_removed_scopes; + // Destruction of resume_rds will lift the floodgate for new RDS subscriptions. + // Note in the case of partial acceptance, accepted RDS subscriptions should be started + // despite of any error. + ScopedResume resume_rds; // If new route config sources come after the local init manager's initialize() been // called, the init manager can't accept new targets. Instead we use a local override which will // start new subscriptions but not wait on them to be ready. - std::unique_ptr noop_init_manager; - // NOTE: This should be defined after noop_init_manager as it depends on the - // noop_init_manager. - std::unique_ptr resume_rds; + std::unique_ptr srds_init_mgr; + // NOTE: This should be defined after srds_init_mgr and resume_rds, as it depends on the + // srds_init_mgr, and we want a single RDS discovery request to be sent to management + // server. + std::unique_ptr srds_initialization_continuation; + ASSERT(localInitManager().state() > Init::Manager::State::Uninitialized); + const auto type_urls = + Envoy::Config::getAllVersionTypeUrls(); + // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. + // In the case that localInitManager is uninitialized, RDS is already paused + // either by Server init or LDS init. + if (factory_context_.clusterManager().adsMux()) { + resume_rds = factory_context_.clusterManager().adsMux()->pause(type_urls); + } // if local init manager is initialized, the parent init manager may have gone away. if (localInitManager().state() == Init::Manager::State::Initialized) { - const auto type_urls = - Envoy::Config::getAllVersionTypeUrls(); - noop_init_manager = + srds_init_mgr = std::make_unique(fmt::format("SRDS {}:{}", name_, version_info)); - // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. - // In the case if factory_context_.init_manager() is uninitialized, RDS is already paused - // either by Server init or LDS init. - if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->pause(type_urls); - } - resume_rds = std::make_unique([this, &noop_init_manager, version_info, type_urls] { - // For new RDS subscriptions created after listener warming up, we don't wait for them to - // warm up. - Init::WatcherImpl noop_watcher( - // Note: we just throw it away. - fmt::format("SRDS ConfigUpdate watcher {}:{}", name_, version_info), - []() { /*Do nothing.*/ }); - noop_init_manager->initialize(noop_watcher); - // New RDS subscriptions should have been created, now lift the floodgate. - // Note in the case of partial acceptance, accepted RDS subscriptions should be started - // despite of any error. - if (factory_context_.clusterManager().adsMux()) { - factory_context_.clusterManager().adsMux()->resume(type_urls); - } - }); + srds_initialization_continuation = + std::make_unique([this, &srds_init_mgr, version_info] { + // For new RDS subscriptions created after listener warming up, we don't wait for them to + // warm up. + Init::WatcherImpl noop_watcher( + // Note: we just throw it away. + fmt::format("SRDS ConfigUpdate watcher {}:{}", name_, version_info), + []() { /*Do nothing.*/ }); + srds_init_mgr->initialize(noop_watcher); + }); } std::vector exception_msgs; @@ -268,7 +270,7 @@ void ScopedRdsConfigSubscription::onConfigUpdate( to_be_removed_rds_providers = removeScopes(removed_resources, version_info); bool any_applied = addOrUpdateScopes(added_resources, - (noop_init_manager == nullptr ? localInitManager() : *noop_init_manager), + (srds_init_mgr == nullptr ? localInitManager() : *srds_init_mgr), version_info, exception_msgs) || !to_be_removed_rds_providers.empty(); ConfigSubscriptionCommonBase::onConfigUpdate(); diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index c7748babd1e1..246bb0de1d27 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -52,13 +52,11 @@ void CdsApiImpl::onConfigUpdate(const std::vector& r void CdsApiImpl::onConfigUpdate(const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { - std::unique_ptr maybe_eds_resume; + Config::ScopedResume maybe_resume_eds; if (cm_.adsMux()) { const auto type_urls = Config::getAllVersionTypeUrls(); - cm_.adsMux()->pause(type_urls); - maybe_eds_resume = - std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); + maybe_resume_eds = cm_.adsMux()->pause(type_urls); } ENVOY_LOG(info, "cds: add {} cluster(s), remove {} cluster(s)", added_resources.size(), diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index ee8bf97d1f1a..bd5c7e7b4b0a 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -155,14 +155,12 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // If the first CDS response doesn't have any primary cluster, ClusterLoadAssignment // should be already paused by CdsApiImpl::onConfigUpdate(). Need to check that to // avoid double pause ClusterLoadAssignment. - std::unique_ptr maybe_eds_resume; + Config::ScopedResume maybe_resume_eds; if (cm_.adsMux()) { const auto type_urls = Config::getAllVersionTypeUrls(); if (!cm_.adsMux()->paused(type_urls)) { - cm_.adsMux()->pause(type_urls); - maybe_eds_resume = - std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); + maybe_resume_eds = cm_.adsMux()->pause(type_urls); } } initializeSecondaryClusters(); @@ -805,9 +803,10 @@ void ClusterManagerImpl::updateClusterCounts() { const auto type_urls = Config::getAllVersionTypeUrls(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { - ads_mux_->pause(type_urls); + resume_cds_ = ads_mux_->pause(type_urls); } else if (previous_warming > 0 && warming_clusters_.empty()) { - ads_mux_->resume(type_urls); + ASSERT(resume_cds_ != nullptr); + resume_cds_.reset(); } } cm_stats_.active_clusters_.set(active_clusters_.size()); diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 3998d51edb68..9ad4a5b00fff 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -238,6 +238,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggablecancel(); + } // Make sure we destroy all potential outgoing connections before this returns. cds_api_.reset(); ads_mux_.reset(); @@ -498,6 +501,8 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable local_cluster_name_; diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 0ea0b088d9df..fc6ced5853ee 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -37,13 +37,11 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, void LdsApiImpl::onConfigUpdate(const std::vector& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { - std::unique_ptr maybe_rds_resume; + Config::ScopedResume maybe_resume_rds; if (cm_.adsMux()) { const auto type_urls = Config::getAllVersionTypeUrls(); - cm_.adsMux()->pause(type_urls); - maybe_rds_resume = - std::make_unique([this, type_urls] { cm_.adsMux()->resume(type_urls); }); + maybe_resume_rds = cm_.adsMux()->pause(type_urls); } bool any_applied = false; diff --git a/source/server/server.cc b/source/server/server.cc index f17ae5b7a5ef..d3e559cdaaf7 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -624,8 +624,9 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // Pause RDS to ensure that we don't send any requests until we've // subscribed to all the RDS resources. The subscriptions happen in the init callbacks, // so we pause RDS until we've completed all the callbacks. + Config::ScopedResume maybe_resume_rds; if (cm.adsMux()) { - cm.adsMux()->pause(type_urls); + maybe_resume_rds = cm.adsMux()->pause(type_urls); } ENVOY_LOG(info, "all clusters initialized. initializing init manager"); @@ -633,9 +634,7 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // Now that we're execute all the init callbacks we can resume RDS // as we've subscribed to all the statically defined RDS resources. - if (cm.adsMux()) { - cm.adsMux()->resume(type_urls); - } + // This is done by tearing down the maybe_resume_rds Cleanup object. }); } diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index f93f9a742b6e..6407aef51de1 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -42,7 +42,7 @@ TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { // can be sent, not just with pausing: rate limiting or a down gRPC stream would also do it). TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { startSubscription({"name1", "name2", "name3"}); - subscription_->pause(); + auto resume_sub = subscription_->pause(); expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); // If not for the pause, these updates would make the expectSendMessage fail due to too many @@ -52,8 +52,6 @@ TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { subscription_->updateResourceInterest({"name3", "name4"}); subscription_->updateResourceInterest({"name1", "name2", "name3", "name4"}); subscription_->updateResourceInterest({"name3", "name4"}); - - subscription_->resume(); } TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { @@ -65,7 +63,7 @@ TEST_F(DeltaSubscriptionImplTest, ResponseCausesAck) { // resume, *all* ACKs that arrived during the pause are sent (in order). TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { startSubscription({"name1", "name2", "name3"}); - subscription_->pause(); + auto resume_sub = subscription_->pause(); // The server gives us our first version of resource name1. // subscription_ now wants to ACK name1 (but can't due to pause). { @@ -118,7 +116,6 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { nonce_acks_sent_.push(nonce); } })); - subscription_->resume(); // DeltaSubscriptionTestHarness's dtor will check that all ACKs were sent with the correct nonces, // in the correct order. } diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index dc22bfdbb1a5..01bf43a5fe2b 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -178,21 +178,27 @@ TEST_F(GrpcMuxImplTest, ResetStream) { TEST_F(GrpcMuxImplTest, PauseResume) { setup(); InSequence s; - auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); - grpc_mux_->pause("foo"); - EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); - grpc_mux_->start(); - expectSendMessage("foo", {"x", "y"}, "", true); - grpc_mux_->resume("foo"); - grpc_mux_->pause("bar"); - expectSendMessage("foo", {"z", "x", "y"}, ""); - auto foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_, resource_decoder_); - grpc_mux_->resume("bar"); - grpc_mux_->pause("foo"); - auto foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); - expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); - grpc_mux_->resume("foo"); - grpc_mux_->pause("foo"); + GrpcMuxWatchPtr foo_sub; + GrpcMuxWatchPtr foo_z_sub; + GrpcMuxWatchPtr foo_zz_sub; + foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_); + { + ScopedResume a = grpc_mux_->pause("foo"); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + grpc_mux_->start(); + expectSendMessage("foo", {"x", "y"}, "", true); + } + { + ScopedResume a = grpc_mux_->pause("bar"); + expectSendMessage("foo", {"z", "x", "y"}, ""); + foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_, resource_decoder_); + } + { + ScopedResume a = grpc_mux_->pause("foo"); + foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); + expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); + } + grpc_mux_->pause("foo")->cancel(); } // Validate behavior when type URL mismatches occur. diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index bc0e119d6699..456b95df3b4a 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -241,7 +241,7 @@ route_configuration_name: foo_routes - string_key: x-bar-key )EOF"; const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); - init_watcher_.expectReady().Times(1); // Only the SRDS parent_init_target_. + init_watcher_.expectReady(); // Only the SRDS parent_init_target_. context_init_manager_.initialize(init_watcher_); const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); @@ -298,7 +298,7 @@ route_configuration_name: foo_routes // Tests that multiple uniquely named non-conflict resources are allowed in config updates. TEST_F(ScopedRdsTest, MultipleResourcesDelta) { setup(); - init_watcher_.expectReady().Times(1); + init_watcher_.expectReady(); const std::string config_yaml = R"EOF( name: foo_scope route_configuration_name: foo_routes @@ -318,8 +318,8 @@ route_configuration_name: foo_routes // Delta API. const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); - EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); @@ -433,7 +433,7 @@ route_configuration_name: foo_routes - string_key: x-foo-key )EOF"; const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); - init_watcher_.expectReady().Times(1); // Partial success gets the subscription ready. + init_watcher_.expectReady(); // Partial success gets the subscription ready. context_init_manager_.initialize(init_watcher_); const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); @@ -479,6 +479,8 @@ route_configuration_name: bar_routes const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml1); const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + init_watcher_.expectReady(); + context_init_manager_.initialize(init_watcher_); EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); EXPECT_EQ(1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") @@ -492,8 +494,6 @@ route_configuration_name: bar_routes ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) ->name(), ""); - init_watcher_.expectReady().Times(1); - context_init_manager_.initialize(init_watcher_); pushRdsConfig({"foo_routes", "bar_routes"}, "111"); EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), 1UL); @@ -646,7 +646,7 @@ TEST_F(ScopedRdsTest, ConfigUpdateFailure) { // config. TEST_F(ScopedRdsTest, ConfigDump) { setup(); - init_watcher_.expectReady().Times(1); + init_watcher_.expectReady(); context_init_manager_.initialize(init_watcher_); auto message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["route_scopes"](); diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 92db2b117e6d..f55f41357c9f 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -95,10 +95,8 @@ class MockGrpcMux : public GrpcMux { ~MockGrpcMux() override; MOCK_METHOD(void, start, ()); - MOCK_METHOD(void, pause, (const std::string& type_url)); - MOCK_METHOD(void, pause, (const std::vector type_urls)); - MOCK_METHOD(void, resume, (const std::string& type_url)); - MOCK_METHOD(void, resume, (const std::vector type_urls)); + MOCK_METHOD(ScopedResume, pause, (const std::string& type_url)); + MOCK_METHOD(ScopedResume, pause, (const std::vector type_urls)); MOCK_METHOD(bool, paused, (const std::string& type_url), (const)); MOCK_METHOD(bool, paused, (const std::vector type_urls), (const)); From 2d69e30c51f2418faf267aaa6c1126fce9948c62 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 10 Jul 2020 02:52:17 +0700 Subject: [PATCH 582/909] http: Use GURL as HTTP URL parser utility (#11670) This replaces http_parser's URL parser with GURL. Risk Level: Medium Testing: Unit Docs Changes: N/A Release Notes: N/A Signed-off-by: Dhi Aurrahman --- bazel/external/googleurl.patch | 119 +++++++++++++++++ bazel/external/icuuc.BUILD | 59 ++++++++ bazel/repositories.bzl | 14 ++ bazel/repository_locations.bzl | 7 + source/common/http/BUILD | 15 ++- source/common/http/http1/BUILD | 1 + source/common/http/http1/codec_impl.cc | 1 + source/common/http/url_utility.cc | 95 +++++++++++++ source/common/http/url_utility.h | 58 ++++++++ source/common/http/utility.cc | 39 ------ source/common/http/utility.h | 17 --- source/common/router/BUILD | 1 + source/common/router/router.cc | 1 + source/extensions/filters/http/csrf/BUILD | 1 + .../filters/http/csrf/csrf_filter.cc | 3 +- source/extensions/quic_listeners/quiche/BUILD | 1 + .../quic_listeners/quiche/platform/BUILD | 2 +- .../platform/quic_hostname_utils_impl.cc | 3 +- test/common/http/BUILD | 1 + test/common/http/utility_test.cc | 126 +++++++++++------- tools/spelling/spelling_dictionary.txt | 1 + 21 files changed, 454 insertions(+), 111 deletions(-) create mode 100644 bazel/external/googleurl.patch create mode 100644 bazel/external/icuuc.BUILD create mode 100644 source/common/http/url_utility.cc create mode 100644 source/common/http/url_utility.h diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch new file mode 100644 index 000000000000..72e3991b4ff0 --- /dev/null +++ b/bazel/external/googleurl.patch @@ -0,0 +1,119 @@ +# TODO(dio): Consider to remove this patch when we have the ability to compile the project using +# clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. + +diff --git a/base/compiler_specific.h b/base/compiler_specific.h +index 2962537..6193b56 100644 +--- a/base/compiler_specific.h ++++ b/base/compiler_specific.h +@@ -7,10 +7,6 @@ + + #include "build/build_config.h" + +-#if defined(COMPILER_MSVC) && !defined(__clang__) +-#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" +-#endif +- + // Annotate a variable indicating it's ok if the variable is not used. + // (Typically used to silence a compiler warning when the assignment + // is important for some other reason.) +@@ -212,7 +208,9 @@ + #endif + #endif + +-#if defined(__clang__) && __has_attribute(uninitialized) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(uninitialized) + // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for + // the specified variable. + // Library-wide alternative is +@@ -243,6 +241,8 @@ + // E.g. platform, bot, benchmark or test name in patch description or next to + // the attribute. + #define STACK_UNINITIALIZED __attribute__((uninitialized)) ++#endif ++#endif + #else + #define STACK_UNINITIALIZED + #endif +diff --git a/base/strings/BUILD b/base/strings/BUILD +index 7a06170..7c86a5f 100644 +--- a/base/strings/BUILD ++++ b/base/strings/BUILD +@@ -6,23 +6,21 @@ load("//:build_config.bzl", "build_config") + cc_library( + name = "strings", + srcs = [ +- "string16.cc", + "string_piece.cc", + "string_util.cc", + "string_util_constants.cc", + "utf_string_conversion_utils.cc", + "utf_string_conversions.cc", +- ], ++ ] + build_config.strings_srcs, + hdrs = [ + "char_traits.h", + "string16.h", + "string_piece.h", + "string_piece_forward.h", + "string_util.h", +- "string_util_posix.h", + "utf_string_conversion_utils.h", + "utf_string_conversions.h", +- ], ++ ] + build_config.strings_hdrs, + copts = build_config.default_copts, + visibility = ["//visibility:public"], + deps = [ +diff --git a/build_config.bzl b/build_config.bzl +index d5fca65..fc0d7e5 100644 +--- a/build_config/build_config.bzl ++++ b/build_config/build_config.bzl +@@ -1,8 +1,25 @@ +-_default_copts = [ +- "-std=c++14", +- "-fno-strict-aliasing", +-] ++_default_copts = select({ ++ "@envoy//bazel:windows_x86_64": [ ++ "/std:c++14", ++ ], ++ "//conditions:default": [ ++ "-std=c++14", ++ "-fno-strict-aliasing", ++ ], ++}) ++ ++_strings_srcs = select({ ++ "@envoy//bazel:windows_x86_64": [], ++ "//conditions:default": ["string16.cc"], ++}) ++ ++_strings_hdrs = select({ ++ "@envoy//bazel:windows_x86_64": ["string_util_win.h"], ++ "//conditions:default": ["string_util_posix.h"], ++}) + + build_config = struct( + default_copts = _default_copts, ++ strings_srcs = _strings_srcs, ++ strings_hdrs = _strings_hdrs, + ) +diff --git a/url/BUILD b/url/BUILD +index 0126bdc..5d1a171 100644 +--- a/url/BUILD ++++ b/url/BUILD +@@ -43,11 +43,11 @@ cc_library( + "url_util.h", + ], + copts = build_config.default_copts, +- linkopts = ["-licuuc"], + visibility = ["//visibility:public"], + deps = [ + "//base", + "//base/strings", + "//polyfills", ++ "@org_unicode_icuuc//:common", + ], + ) diff --git a/bazel/external/icuuc.BUILD b/bazel/external/icuuc.BUILD new file mode 100644 index 000000000000..e910a64af1aa --- /dev/null +++ b/bazel/external/icuuc.BUILD @@ -0,0 +1,59 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +licenses(["notice"]) # Apache 2 + +exports_files([ + "icu4c/LICENSE", + "icu4j/main/shared/licenses/LICENSE", +]) + +icuuc_copts = [ + "-DU_STATIC_IMPLEMENTATION", + "-DU_COMMON_IMPLEMENTATION", + "-DU_HAVE_STD_ATOMICS", +] + select({ + "@envoy//bazel:apple": [ + "-Wno-shorten-64-to-32", + "-Wno-unused-variable", + ], + "@envoy//bazel:windows_x86_64": [ + "/utf-8", + "/DLOCALE_ALLOW_NEUTRAL_NAMES=0", + ], + # TODO(dio): Add "@envoy//bazel:android" when we have it. + # "@envoy//bazel:android": [ + # "-fdata-sections", + # "-DU_HAVE_NL_LANGINFO_CODESET=0", + # "-Wno-deprecated-declarations", + # ], + "//conditions:default": [], +}) + +cc_library( + name = "headers", + hdrs = glob(["icu4c/source/common/unicode/*.h"]), + includes = ["icu4c/source/common"], + visibility = ["//visibility:public"], +) + +cc_library( + name = "common", + hdrs = glob(["icu4c/source/common/unicode/*.h"]), + includes = ["icu4c/source/common"], + visibility = ["//visibility:public"], + deps = [":icuuc"], +) + +cc_library( + name = "icuuc", + srcs = glob([ + "icu4c/source/common/*.c", + "icu4c/source/common/*.cpp", + "icu4c/source/stubdata/*.cpp", + ]), + hdrs = glob(["icu4c/source/common/*.h"]), + copts = icuuc_copts, + tags = ["requires-rtti"], + visibility = ["//visibility:private"], + deps = [":headers"], +) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index d31cbe33d267..cb7b3bfea5d9 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -197,6 +197,7 @@ def envoy_dependencies(skip_targets = []): _repository_impl("bazel_compdb") _repository_impl("envoy_build_tools") _repository_impl("rules_cc") + _org_unicode_icuuc() # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() @@ -699,6 +700,8 @@ def _com_googlesource_quiche(): def _com_googlesource_googleurl(): _repository_impl( name = "com_googlesource_googleurl", + patches = ["@envoy//bazel/external:googleurl.patch"], + patch_args = ["-p1"], ) native.bind( name = "googleurl", @@ -858,6 +861,17 @@ filegroup( **_get_location("kafka_python_client") ) +def _org_unicode_icuuc(): + _repository_impl( + name = "org_unicode_icuuc", + build_file = "@envoy//bazel/external:icuuc.BUILD", + # TODO(dio): Consider patching udata when we need to embed some data. + ) + native.bind( + name = "icuuc", + actual = "@org_unicode_icuuc//:common", + ) + def _foreign_cc_dependencies(): _repository_impl("rules_foreign_cc") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 3f25e85da263..5fabd0cbcb21 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -477,4 +477,11 @@ DEPENDENCY_REPOSITORIES = dict( urls = ["https://github.com/dpkp/kafka-python/archive/2.0.0.tar.gz"], use_category = ["test"], ), + org_unicode_icuuc = dict( + strip_prefix = "icu-release-64-2", + sha256 = "524960ac99d086cdb6988d2a92fc163436fd3c6ec0a84c475c6382fbf989be05", + urls = ["https://github.com/unicode-org/icu/archive/release-64-2.tar.gz"], + use_category = ["dataplane"], + cpe = "cpe:2.3:a:icu-project:international_components_for_unicode", + ), ) diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 782eb8d28f16..76c1f202e29e 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -338,7 +338,6 @@ envoy_cc_library( hdrs = ["utility.h"], external_deps = [ "abseil_optional", - "http_parser", "nghttp2", ], deps = [ @@ -428,3 +427,17 @@ envoy_cc_library( "//source/common/common:assert_lib", ], ) + +envoy_cc_library( + name = "url_utility_lib", + srcs = ["url_utility.cc"], + hdrs = ["url_utility.h"], + external_deps = [ + "googleurl", + ], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:empty_string", + "//source/common/common:utility_lib", + ], +) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 042870491232..278e9adaaae5 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/http:status_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 6085d6a6aea8..47d420026bbb 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -16,6 +16,7 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/header_formatter.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/runtime/runtime_features.h" diff --git a/source/common/http/url_utility.cc b/source/common/http/url_utility.cc new file mode 100644 index 000000000000..d2fd43015280 --- /dev/null +++ b/source/common/http/url_utility.cc @@ -0,0 +1,95 @@ +#include "common/http/url_utility.h" + +#include + +#include +#include + +#include "common/common/assert.h" +#include "common/common/empty_string.h" +#include "common/common/utility.h" + +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Http { +namespace Utility { + +bool Url::initialize(absl::string_view absolute_url, bool is_connect) { + // TODO(dio): When we have access to base::StringPiece, probably we can convert absolute_url to + // that instead. + GURL parsed(std::string{absolute_url}); + if (is_connect) { + return initializeForConnect(std::move(parsed)); + } + + // TODO(dio): Check if we need to accommodate to strictly validate only http(s) AND ws(s) schemes. + // Currently, we only accept http(s). + if (!parsed.is_valid() || !parsed.SchemeIsHTTPOrHTTPS()) { + return false; + } + + scheme_ = parsed.scheme(); + + // Only non-default ports will be rendered as part of host_and_port_. For example, + // http://www.host.com:80 has port component (i.e. 80). However, since 80 is a default port for + // http scheme, host_and_port_ will be rendered as www.host.com (without port). The same case with + // https scheme (with port 443) as well. + host_and_port_ = + absl::StrCat(parsed.host(), parsed.has_port() ? ":" : EMPTY_STRING, parsed.port()); + + const int port = parsed.EffectiveIntPort(); + if (port <= 0 || port > std::numeric_limits::max()) { + return false; + } + port_ = static_cast(port); + + // RFC allows the absolute URI to not end in "/", but the absolute path form must start with "/". + path_and_query_params_ = parsed.PathForRequest(); + if (parsed.has_ref()) { + absl::StrAppend(&path_and_query_params_, "#", parsed.ref()); + } + + return true; +} + +bool Url::initializeForConnect(GURL&& url) { + // CONNECT requests can only contain "hostname:port" + // https://github.com/nodejs/http-parser/blob/d9275da4650fd1133ddc96480df32a9efe4b059b/http_parser.c#L2503-L2506. + if (!url.is_valid() || url.IsStandard()) { + return false; + } + + const auto& parsed = url.parsed_for_possibly_invalid_spec(); + // The parsed.scheme contains the URL's hostname (stored by GURL). While host and port have -1 + // as its length. + if (parsed.scheme.len <= 0 || parsed.host.len > 0 || parsed.port.len > 0) { + return false; + } + + host_and_port_ = url.possibly_invalid_spec(); + const auto& parts = StringUtil::splitToken(host_and_port_, ":", /*keep_empty_string=*/true, + /*trim_whitespace=*/false); + if (parts.size() != 2 || static_cast(parsed.scheme.len) != parts.at(0).size() || + !validPortForConnect(parts.at(1))) { + return false; + } + + return true; +} + +bool Url::validPortForConnect(absl::string_view port_string) { + int port; + const bool valid = absl::SimpleAtoi(port_string, &port); + // Only a port value in valid range (1-65535) is allowed. + if (!valid || port <= 0 || port > std::numeric_limits::max()) { + return false; + } + port_ = static_cast(port); + return true; +} + +} // namespace Utility +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/url_utility.h b/source/common/http/url_utility.h new file mode 100644 index 000000000000..fa140c6d5f12 --- /dev/null +++ b/source/common/http/url_utility.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +#include "absl/strings/string_view.h" +#include "url/gurl.h" + +namespace Envoy { +namespace Http { +namespace Utility { + +/** + * Given a fully qualified URL, splits the string_view provided into scheme, host and path with + * query parameters components. + */ +class Url { +public: + /** + * Initializes a URL object from a URL string. + * @param absolute_url URL string to be parsed. + * @param is_connect whether to parse the absolute_url as CONNECT request URL or not. + * @return bool if the initialization is successful. + */ + bool initialize(absl::string_view absolute_url, bool is_connect); + + /** + * @return absl::string_view the scheme of a URL. + */ + absl::string_view scheme() const { return scheme_; } + + /** + * @return absl::string_view the host and port part of a URL. + */ + absl::string_view hostAndPort() const { return host_and_port_; } + + /** + * @return absl::string_view the path and query params part of a URL. + */ + absl::string_view pathAndQueryParams() const { return path_and_query_params_; } + + /** + * @return uint64_t the effective port of a URL. + */ + uint64_t port() const { return port_; } + +private: + bool initializeForConnect(GURL&& url); + bool validPortForConnect(absl::string_view port_string); + + std::string scheme_; + std::string host_and_port_; + std::string path_and_query_params_; + uint16_t port_{0}; +}; + +} // namespace Utility +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index a48bea5d08ae..da6076f75689 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -1,7 +1,5 @@ #include "common/http/utility.h" -#include - #include #include #include @@ -205,43 +203,6 @@ initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions namespace Http { -static const char kDefaultPath[] = "/"; - -bool Utility::Url::initialize(absl::string_view absolute_url, bool is_connect) { - struct http_parser_url u; - http_parser_url_init(&u); - const int result = - http_parser_parse_url(absolute_url.data(), absolute_url.length(), is_connect, &u); - - if (result != 0) { - return false; - } - if ((u.field_set & (1 << UF_HOST)) != (1 << UF_HOST) && - (u.field_set & (1 << UF_SCHEMA)) != (1 << UF_SCHEMA)) { - return false; - } - scheme_ = absl::string_view(absolute_url.data() + u.field_data[UF_SCHEMA].off, - u.field_data[UF_SCHEMA].len); - - uint16_t authority_len = u.field_data[UF_HOST].len; - if ((u.field_set & (1 << UF_PORT)) == (1 << UF_PORT)) { - authority_len = authority_len + u.field_data[UF_PORT].len + 1; - } - host_and_port_ = - absl::string_view(absolute_url.data() + u.field_data[UF_HOST].off, authority_len); - - // RFC allows the absolute-uri to not end in /, but the absolute path form - // must start with - uint64_t path_len = absolute_url.length() - (u.field_data[UF_HOST].off + hostAndPort().length()); - if (path_len > 0) { - uint64_t path_beginning = u.field_data[UF_HOST].off + hostAndPort().length(); - path_and_query_params_ = absl::string_view(absolute_url.data() + path_beginning, path_len); - } else if (!is_connect) { - path_and_query_params_ = absl::string_view(kDefaultPath, 1); - } - return true; -} - void Utility::appendXff(RequestHeaderMap& headers, const Network::Address::Instance& remote_address) { if (remote_address.type() != Network::Address::Type::Ip) { diff --git a/source/common/http/utility.h b/source/common/http/utility.h index e43f8fd977a8..69778024e8a7 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -122,23 +122,6 @@ initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions namespace Http { namespace Utility { -/** - * Given a fully qualified URL, splits the string_view provided into scheme, - * host and path with query parameters components. - */ -class Url { -public: - bool initialize(absl::string_view absolute_url, bool is_connect_request); - absl::string_view scheme() { return scheme_; } - absl::string_view hostAndPort() { return host_and_port_; } - absl::string_view pathAndQueryParams() { return path_and_query_params_; } - -private: - absl::string_view scheme_; - absl::string_view host_and_port_; - absl::string_view path_and_query_params_; -}; - class PercentEncoding { public: /** diff --git a/source/common/router/BUILD b/source/common/router/BUILD index bd500d294888..5f9e2b8cbc96 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -304,6 +304,7 @@ envoy_cc_library( "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/http:message_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:application_protocol_lib", "//source/common/network:transport_socket_options_lib", diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 8a373f263530..a466856315b3 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -26,6 +26,7 @@ #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/application_protocol.h" #include "common/network/transport_socket_options_impl.h" diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index a9361502dd10..c82dbf9764e2 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -22,6 +22,7 @@ envoy_cc_library( "//source/common/common:matchers_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/extensions/filters/http:well_known_names", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index d852a78e31a5..eb6885936893 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -6,6 +6,7 @@ #include "common/common/empty_string.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "extensions/filters/http/well_known_names.h" @@ -39,7 +40,7 @@ bool isModifyMethod(const Http::RequestHeaderMap& headers) { absl::string_view hostAndPort(const absl::string_view header) { Http::Utility::Url absolute_url; if (!header.empty()) { - if (absolute_url.initialize(header, false)) { + if (absolute_url.initialize(header, /*is_connect=*/false)) { return absolute_url.hostAndPort(); } return header; diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index 3082bdf98eca..d79c1a355e3a 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -265,6 +265,7 @@ envoy_cc_library( ":envoy_quic_server_connection_lib", ":envoy_quic_server_session_lib", "//include/envoy/network:listener_interface", + "//source/common/http:utility_lib", "//source/server:connection_handler_lib", "@com_googlesource_quiche//:quic_core_server_lib", "@com_googlesource_quiche//:quic_core_utils_lib", diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 4ef4fbbc8d64..9c9857842e75 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -188,7 +188,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", - "//source/common/http:utility_lib", + "//source/common/http:url_utility_lib", ], ) diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc index bcbafb56639e..7b26dac94e26 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc @@ -1,3 +1,4 @@ + // NOLINT(namespace-envoy) // This file is part of the QUICHE platform implementation, and is not to be @@ -8,7 +9,7 @@ #include -#include "common/http/utility.h" +#include "common/http/url_utility.h" #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" diff --git a/test/common/http/BUILD b/test/common/http/BUILD index e723a48abeb0..eeee6f0d6a09 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -363,6 +363,7 @@ envoy_cc_test( deps = [ "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http:url_utility_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", "//test/mocks/http:http_mocks", diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 8751acddb024..4a42d7d8fd9e 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -9,6 +9,7 @@ #include "common/common/fmt.h" #include "common/http/exception.h" #include "common/http/header_map_impl.h" +#include "common/http/url_utility.h" #include "common/http/utility.h" #include "common/network/address_impl.h" @@ -1084,87 +1085,110 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { TEST(Url, ParsingFails) { Utility::Url url; - EXPECT_FALSE(url.initialize("", false)); - EXPECT_FALSE(url.initialize("foo", false)); - EXPECT_FALSE(url.initialize("http://", false)); - EXPECT_FALSE(url.initialize("random_scheme://host.com/path", false)); - EXPECT_FALSE(url.initialize("http://www.foo.com", true)); - EXPECT_FALSE(url.initialize("foo.com", true)); + const bool is_connect = true; + EXPECT_FALSE(url.initialize("", !is_connect)); + EXPECT_FALSE(url.initialize("foo", !is_connect)); + EXPECT_FALSE(url.initialize("http://", !is_connect)); + EXPECT_FALSE(url.initialize("random_scheme://host.com/path", !is_connect)); + // Only port value in valid range (1-65535) is allowed. + EXPECT_FALSE(url.initialize("http://host.com:65536/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:0/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:-1/path", !is_connect)); + EXPECT_FALSE(url.initialize("http://host.com:port/path", !is_connect)); + + // Test parsing fails for CONNECT request URLs. + EXPECT_FALSE(url.initialize("http://www.foo.com", is_connect)); + EXPECT_FALSE(url.initialize("foo.com", is_connect)); + // Only port value in valid range (1-65535) is allowed. + EXPECT_FALSE(url.initialize("foo.com:65536", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:0", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:-1", is_connect)); + EXPECT_FALSE(url.initialize("foo.com:port", is_connect)); } void validateUrl(absl::string_view raw_url, absl::string_view expected_scheme, - absl::string_view expected_host_port, absl::string_view expected_path) { + absl::string_view expected_host_port, absl::string_view expected_path, + uint16_t expected_port) { Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, false)) << "Failed to initialize " << raw_url; + ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/false)) << "Failed to initialize " << raw_url; EXPECT_EQ(url.scheme(), expected_scheme); EXPECT_EQ(url.hostAndPort(), expected_host_port); EXPECT_EQ(url.pathAndQueryParams(), expected_path); -} - -void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port) { - Utility::Url url; - ASSERT_TRUE(url.initialize(raw_url, true)) << "Failed to initialize " << raw_url; - EXPECT_TRUE(url.scheme().empty()); - EXPECT_TRUE(url.pathAndQueryParams().empty()); - EXPECT_EQ(url.hostAndPort(), expected_host_port); + EXPECT_EQ(url.port(), expected_port); } TEST(Url, ParsingTest) { - // Test url with no explicit path (with and without port) - validateUrl("http://www.host.com", "http", "www.host.com", "/"); - validateUrl("http://www.host.com:80", "http", "www.host.com:80", "/"); + // Test url with no explicit path (with and without port). + validateUrl("http://www.host.com", "http", "www.host.com", "/", 80); + validateUrl("http://www.host.com:80", "http", "www.host.com", "/", 80); // Test url with "/" path. - validateUrl("http://www.host.com:80/", "http", "www.host.com:80", "/"); - validateUrl("http://www.host.com/", "http", "www.host.com", "/"); + validateUrl("http://www.host.com:80/", "http", "www.host.com", "/", 80); + validateUrl("http://www.host.com/", "http", "www.host.com", "/", 80); // Test url with "?". - validateUrl("http://www.host.com:80/?", "http", "www.host.com:80", "/?"); - validateUrl("http://www.host.com/?", "http", "www.host.com", "/?"); + validateUrl("http://www.host.com:80/?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com/?", "http", "www.host.com", "/?", 80); // Test url with "?" but without slash. - validateUrl("http://www.host.com:80?", "http", "www.host.com:80", "?"); - validateUrl("http://www.host.com?", "http", "www.host.com", "?"); + validateUrl("http://www.host.com:80?", "http", "www.host.com", "/?", 80); + validateUrl("http://www.host.com?", "http", "www.host.com", "/?", 80); - // Test url with multi-character path - validateUrl("http://www.host.com:80/path", "http", "www.host.com:80", "/path"); - validateUrl("http://www.host.com/path", "http", "www.host.com", "/path"); + // Test url with multi-character path. + validateUrl("http://www.host.com:80/path", "http", "www.host.com", "/path", 80); + validateUrl("http://www.host.com/path", "http", "www.host.com", "/path", 80); - // Test url with multi-character path and ? at the end - validateUrl("http://www.host.com:80/path?", "http", "www.host.com:80", "/path?"); - validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?"); + // Test url with multi-character path and ? at the end. + validateUrl("http://www.host.com:80/path?", "http", "www.host.com", "/path?", 80); + validateUrl("http://www.host.com/path?", "http", "www.host.com", "/path?", 80); - // Test https scheme - validateUrl("https://www.host.com", "https", "www.host.com", "/"); + // Test https scheme. + validateUrl("https://www.host.com", "https", "www.host.com", "/", 443); - // Test url with query parameter - validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com:80", "/?query=param"); - validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param"); + // Test url with query parameter. + validateUrl("http://www.host.com:80/?query=param", "http", "www.host.com", "/?query=param", 80); + validateUrl("http://www.host.com/?query=param", "http", "www.host.com", "/?query=param", 80); - // Test url with query parameter but without slash - validateUrl("http://www.host.com:80?query=param", "http", "www.host.com:80", "?query=param"); - validateUrl("http://www.host.com?query=param", "http", "www.host.com", "?query=param"); + // Test url with query parameter but without slash. It will be normalized. + validateUrl("http://www.host.com:80?query=param", "http", "www.host.com", "/?query=param", 80); + validateUrl("http://www.host.com?query=param", "http", "www.host.com", "/?query=param", 80); - // Test url with multi-character path and query parameter - validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com:80", - "/path?query=param"); - validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param"); + // Test url with multi-character path and query parameter. + validateUrl("http://www.host.com:80/path?query=param", "http", "www.host.com", + "/path?query=param", 80); + validateUrl("http://www.host.com/path?query=param", "http", "www.host.com", "/path?query=param", + 80); - // Test url with multi-character path and more than one query parameter - validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com:80", - "/path?query=param&query2=param2"); + // Test url with multi-character path and more than one query parameter. + validateUrl("http://www.host.com:80/path?query=param&query2=param2", "http", "www.host.com", + "/path?query=param&query2=param2", 80); validateUrl("http://www.host.com/path?query=param&query2=param2", "http", "www.host.com", - "/path?query=param&query2=param2"); + "/path?query=param&query2=param2", 80); + // Test url with multi-character path, more than one query parameter and fragment validateUrl("http://www.host.com:80/path?query=param&query2=param2#fragment", "http", - "www.host.com:80", "/path?query=param&query2=param2#fragment"); + "www.host.com", "/path?query=param&query2=param2#fragment", 80); validateUrl("http://www.host.com/path?query=param&query2=param2#fragment", "http", "www.host.com", - "/path?query=param&query2=param2#fragment"); + "/path?query=param&query2=param2#fragment", 80); + + // Test url with non-default ports. + validateUrl("https://www.host.com:8443", "https", "www.host.com:8443", "/", 8443); + validateUrl("http://www.host.com:8080", "http", "www.host.com:8080", "/", 8080); +} + +void validateConnectUrl(absl::string_view raw_url, absl::string_view expected_host_port, + uint16_t expected_port) { + Utility::Url url; + ASSERT_TRUE(url.initialize(raw_url, /*is_connect=*/true)) << "Failed to initialize " << raw_url; + EXPECT_TRUE(url.scheme().empty()); + EXPECT_TRUE(url.pathAndQueryParams().empty()); + EXPECT_EQ(url.hostAndPort(), expected_host_port); + EXPECT_EQ(url.port(), expected_port); } TEST(Url, ParsingForConnectTest) { - validateConnectUrl("host.com:443", "host.com:443"); - validateConnectUrl("host.com:80", "host.com:80"); + validateConnectUrl("host.com:443", "host.com:443", 443); + validateConnectUrl("host.com:80", "host.com:80", 80); } void validatePercentEncodingEncodeDecode(absl::string_view source, diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 4a5aeb45e2de..4b8f9a058a24 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -344,6 +344,7 @@ WRONGPASS WRR WS WSA +WSS Welford's Wi XDS From 8e006ebc5c51e7264f4e11fa61fb27236403d2e1 Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Fri, 10 Jul 2020 09:01:10 +1000 Subject: [PATCH 583/909] docs: fix grammar in outlier detection doc (#11971) Fixes grammar errors and does minor rephrasing to improve how the Outlier detection architecture doc reads. Risk Level: low Testing: N/A Docs Changes: grammar Release Notes: N/A Signed-off-by: Martin Matusiak --- .../intro/arch_overview/upstream/outlier.rst | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/outlier.rst b/docs/root/intro/arch_overview/upstream/outlier.rst index d8f35540513a..fd9dc7158a74 100644 --- a/docs/root/intro/arch_overview/upstream/outlier.rst +++ b/docs/root/intro/arch_overview/upstream/outlier.rst @@ -10,35 +10,35 @@ such as consecutive failures, temporal success rate, temporal latency, etc. Outl form of *passive* health checking. Envoy also supports :ref:`active health checking `. *Passive* and *active* health checking can be enabled together or independently, and form the basis for an overall upstream health checking solution. -Outlier detection is part of :ref:`cluster configuration ` -and it needs filters to report errors, timeouts, resets. Currently the following filters support +Outlier detection is part of the :ref:`cluster configuration ` +and it needs filters to report errors, timeouts, and resets. Currently, the following filters support outlier detection: :ref:`http router `, :ref:`tcp proxy ` and :ref:`redis proxy `. Detected errors fall into two categories: externally and locally originated errors. Externally generated errors -are transaction specific and occur on the upstream server in response to the received request. For example, HTTP server returning error code 500 or redis server returning payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has successfully connected to it. -Locally originated errors are generated by Envoy in response to an event which interrupted or prevented communication with the upstream host. Examples of locally originated errors are timeout, TCP reset, inability to connect to a specified port, etc. +are transaction specific and occur on the upstream server in response to the received request. For example, an HTTP server returning error code 500 or a redis server returning a payload which cannot be decoded. Those errors are generated on the upstream host after Envoy has connected to it successfully. +Locally originated errors are generated by Envoy in response to an event which interrupted or prevented communication with the upstream host. Examples of locally originated errors are timeout, TCP reset, inability to connect to a specified port, etc. -Type of detected errors depends on filter type. :ref:`http router ` filter for example +The type of detected errors depends on the filter type. The :ref:`http router ` filter, for example, detects locally originated errors (timeouts, resets - errors related to connection to upstream host) and because it -also understands HTTP protocol it reports -errors returned by HTTP server (externally generated errors). In such scenario, even when connection to upstream HTTP server is successful, -transaction with the server may fail. -On the contrary, :ref:`tcp proxy ` filter does not understand any protocol above -TCP layer and reports only locally originated errors. - -In default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) -locally originated errors are not distinguished from externally generated (transaction) errors and all end up -in the same bucket and are compared against +also understands the HTTP protocol it reports +errors returned by the HTTP server (externally generated errors). In such a scenario, even when the connection to the upstream HTTP server is successful, +the transaction with the server may fail. +By contrast, the :ref:`tcp proxy ` filter does not understand any protocol above +the TCP layer and reports only locally originated errors. + +Under the default configuration (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +locally originated errors are not distinguished from externally generated (transaction) errors, all end up +in the same bucket, and are compared against the :ref:`outlier_detection.consecutive_5xx`, :ref:`outlier_detection.consecutive_gateway_failure` and :ref:`outlier_detection.success_rate_stdev_factor` configuration items. For example, if connection to an upstream HTTP server fails twice because of timeout and -then, after successful connection, the server returns error code 500, the total error count will be 3. +then, after successful connection establishment, the server returns error code 500 then the total error count will be 3. Outlier detection may also be configured to distinguish locally originated errors from externally originated (transaction) errors. -It is done via -:ref:`outlier_detection.split_external_local_origin_errors` configuration item. +It is done via the +:ref:`outlier_detection.split_external_local_origin_errors` configuration item. In that mode locally originated errors are tracked by separate counters than externally originated (transaction) errors and the outlier detector may be configured to react to locally originated errors and ignore externally originated errors @@ -46,7 +46,7 @@ or vice-versa. It is important to understand that a cluster may be shared among several filter chains. If one filter chain ejects a host based on its outlier detection type, other filter chains will be also affected even though their -outlier detection type would not eject that host. +outlier detection type would not have ejected that host. Ejection algorithm ------------------ @@ -79,16 +79,16 @@ Envoy supports the following outlier detection types: Consecutive 5xx ^^^^^^^^^^^^^^^ -In default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally -originated and externally originated (transaction) type of errors. +In the default mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) this detection type takes into account all generated errors: locally +originated and externally originated (transaction) errors. Errors generated by non-HTTP filters, like :ref:`tcp proxy ` or :ref:`redis proxy ` are internally mapped to HTTP 5xx codes and treated as such. -In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors ignoring locally originated errors. -If an upstream host is HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure` for exceptions). +In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*) this detection type takes into account only externally originated (transaction) errors, ignoring locally originated errors. +If an upstream host is an HTTP-server, only 5xx types of error are taken into account (see :ref:`Consecutive Gateway Failure` for exceptions). For redis servers, served via :ref:`redis proxy ` only malformed responses from the server are taken into account. -Properly formatted responses, even when they carry operational error (like index not found, access denied) are not taken into account. +Properly formatted responses, even when they carry an operational error (like index not found, access denied) are not taken into account. If an upstream host returns some number of errors which are treated as consecutive 5xx type errors, it will be ejected. The number of consecutive 5xx required for ejection is controlled by @@ -99,8 +99,8 @@ the :ref:`outlier_detection.consecutive_5xx`. +This detection type takes into account a subset of 5xx errors, called "gateway errors" (502, 503 or 504 status code) +and is supported only by the :ref:`http router `. If an upstream host returns some number of consecutive "gateway errors" (502, 503 or 504 status code), it will be ejected. @@ -123,8 +123,8 @@ This detection type is supported by :ref:`http router ` value. Moreover, detection will not be performed for a cluster if the number of hosts @@ -132,8 +132,8 @@ with the minimum required request volume in an interval is less than the :ref:`outlier_detection.success_rate_minimum_hosts` value. -In default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) -this detection type takes into account all type of errors: locally and externally originated. +In the default configuration mode (:ref:`outlier_detection.split_external_local_origin_errors` is *false*) +this detection type takes into account all types of errors: locally and externally originated. The :ref:`outlier_detection.enforcing_local_origin_success` config item is ignored. In split mode (:ref:`outlier_detection.split_external_local_origin_errors` is *true*), @@ -150,15 +150,15 @@ to externally originated errors only and :ref:`outlier_detection.enforcing_local Failure Percentage ^^^^^^^^^^^^^^^^^^ -Failure Percentage based outlier ejection functions similarly to the success rate detecion type, in +Failure Percentage based outlier detection functions similarly to success rate detection, in that it relies on success rate data from each host in a cluster. However, rather than compare those values to the mean success rate of the cluster as a whole, they are compared to a flat user-configured threshold. This threshold is configured via the :ref:`outlier_detection.failure_percentage_threshold` field. -The other configuration fields for failure percentage based ejection are similar to the fields for -success rate ejection. Failure percentage based ejection also obeys +The other configuration fields for failure percentage based detection are similar to the fields for +success rate detection. Failure percentage based detection also obeys :ref:`outlier_detection.split_external_local_origin_errors`; the enforcement percentages for externally- and locally-originated errors are controlled by :ref:`outlier_detection.enforcing_failure_percentage` From a6e86a643b6eb537d978f11ff5a8ace16575b849 Mon Sep 17 00:00:00 2001 From: danzh Date: Thu, 9 Jul 2020 23:32:01 -0400 Subject: [PATCH 584/909] quiche: update tar (#11964) Update to commit b2b8ff25f5a565324b93411ca29c3403ccbca969 Risk Level: low Testing: existing tests pass Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 26 +++ bazel/repository_locations.bzl | 6 +- .../quiche/platform/flags_list.h | 159 +++++++++--------- .../quiche/active_quic_listener_test.cc | 6 +- .../quiche/envoy_quic_client_session_test.cc | 6 +- .../quiche/envoy_quic_client_stream_test.cc | 6 +- .../quiche/envoy_quic_dispatcher_test.cc | 6 +- .../quiche/envoy_quic_server_session_test.cc | 7 +- .../quiche/envoy_quic_server_stream_test.cc | 16 +- .../integration/quic_http_integration_test.cc | 30 ++-- 10 files changed, 148 insertions(+), 120 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index b9536fa6dbb3..79575bbe6545 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1824,6 +1824,7 @@ envoy_cc_library( ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_idle_network_detector_lib", + ":quic_core_legacy_version_encapsulator_lib", ":quic_core_mtu_discovery_lib", ":quic_core_network_blackhole_detector_lib", ":quic_core_one_block_arena_lib", @@ -2192,6 +2193,7 @@ envoy_cc_library( name = "quic_core_frames_frames_lib", srcs = [ "quiche/quic/core/frames/quic_ack_frame.cc", + "quiche/quic/core/frames/quic_ack_frequency_frame.cc", "quiche/quic/core/frames/quic_blocked_frame.cc", "quiche/quic/core/frames/quic_connection_close_frame.cc", "quiche/quic/core/frames/quic_crypto_frame.cc", @@ -2216,6 +2218,7 @@ envoy_cc_library( ], hdrs = [ "quiche/quic/core/frames/quic_ack_frame.h", + "quiche/quic/core/frames/quic_ack_frequency_frame.h", "quiche/quic/core/frames/quic_blocked_frame.h", "quiche/quic/core/frames/quic_connection_close_frame.h", "quiche/quic/core/frames/quic_crypto_frame.h", @@ -2567,6 +2570,29 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_legacy_version_encapsulator_lib", + srcs = [ + "quiche/quic/core/quic_legacy_version_encapsulator.cc", + ], + hdrs = [ + "quiche/quic/core/quic_legacy_version_encapsulator.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_crypto_crypto_handshake_lib", + ":quic_core_crypto_encryption_lib", + ":quic_core_packet_creator_lib", + ":quic_core_packets_lib", + ":quic_core_types_lib", + ":quic_core_utils_lib", + ":quic_platform", + ":quiche_common_platform", + ], +) + envoy_cc_library( name = "quic_core_linux_socket_utils_lib", srcs = select({ diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 5fabd0cbcb21..4c857edb853f 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -411,9 +411,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/d88a2f7a9ff5f9f6be2f50411b15b091affe04d3.tar.gz - sha256 = "c1c5dc165f0509097fa3917d81988e4ac5f9f3da4c2361ee435dfa7f8f428016", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/d88a2f7a9ff5f9f6be2f50411b15b091affe04d3.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz + sha256 = "792924bbf27203bb0d1d08c99597a30793ef8f4cfa2df99792aea7200f1b27e3", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 9ced2934b27f..776521f42d0d 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -8,6 +8,9 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +// This file is generated by //third_party/quic/tools:quic_flags_list in +// Google3. + #if defined(QUICHE_FLAG) QUICHE_FLAG( @@ -32,9 +35,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false, "When true, ensure the ACK delay is never less than the alarm granularity when ACK " "decimation is enabled.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_advance_ack_timeout_update, true, - "If true, update ack timeout upon receiving an retransmittable frame.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is provided, the " "stream TTL is set. A QUIC stream will be immediately canceled when tries to write " @@ -50,7 +50,7 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_always_send_earliest_ack, false, "If true, SendAllPendingAcks always send the earliest ACK.") QUICHE_FLAG( - bool, quic_reloadable_flag_quic_avoid_leak_writer_buffer, false, + bool, quic_reloadable_flag_quic_avoid_leak_writer_buffer, true, "If true, QUIC will free writer-allocated packet buffer if writer->WritePacket is not called.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, true, @@ -71,9 +71,6 @@ QUICHE_FLAG( bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false, "When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_default_exit_startup_on_loss, true, - "If true, QUIC will enable connection options LRTT+BBQ2 by default.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, "If true, do not inject bandwidth in BbrSender::AdjustNetworkParameters.") @@ -95,16 +92,13 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recove QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, "If true, bootstrap initial QUIC cwnd by SPDY priorities.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bw_sampler_app_limited_starting_value, true, - "If true, quic::BandwidthSampler will start in application limited phase.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, "If true, set burst token to 2 in cwnd bootstrapping experiment.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_cwnd_and_pacing_gains, false, "If true, uses conservative cwnd gain and pacing gain when cwnd gets bootstrapped.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_enable_5rto_blackhole_detection2, true, "If true, default-enable 5RTO blachole detection.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_on_pto, false, @@ -117,16 +111,25 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, "If true, use BBRv2 as the default congestion controller. Takes precedence over " "--quic_default_to_bbr.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_determine_serialized_packet_fate_early, false, + "If true, determine a serialized packet's fate before the packet gets serialized.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_25, false, + "If true, disable QUIC version h3-25.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_27, false, + "If true, disable QUIC version h3-27.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false, "If true, disable QUIC version Q043.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q046, false, "If true, disable QUIC version Q046.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q048, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q048, true, "If true, disable QUIC version Q048.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q049, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q049, true, "If true, disable QUIC version Q049.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, @@ -135,16 +138,29 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_t050, false, "If true, disable QUIC version h3-T050.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_dispatcher_legacy_version_encapsulation, false, + "When true, QuicDispatcher supports decapsulation of Legacy Version Encapsulation packets.") + QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_accept_stop_waiting, false, "In v44 and above, where STOP_WAITING is never sent, close the connection if it's received.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_change_queued_ack, false, - "If true, do not change ACK in PostProcessAckFrame if an ACK has been queued.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_do_not_close_stream_again_on_connection_close, false, + "If true, do not try to close stream again if stream fails to be closed upon connection close.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_pad_chlo, false, + "When true, do not pad the QUIC_CRYPTO CHLO message itself. Note that the packet " + "containing the CHLO will still be padded.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_send_max_ack_delay_if_default, false, + "When true, QUIC_CRYPTO versions of QUIC will not send the max ACK delay unless it is " + "configured to a non-default value.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, true, "Default enables QUIC ack decimation and adds a connection option to disable it.") @@ -154,65 +170,55 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_ QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, "If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_tls_resumption, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_tls_resumption_v2, false, "If true, enables support for TLS resumption in QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_25_v3, true, - "If true, enable QUIC version h3-25.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_27, true, - "If true, enable QUIC version h3-27.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_28, false, - "If true, enable QUIC version h3-28.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_29, false, + "If true, enable QUIC version h3-29.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_zero_rtt_for_tls, false, "If true, support for IETF QUIC 0-rtt is enabled.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_extend_idle_time_on_decryptable_packets, true, - "If true, only extend idle time on decryptable packets.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, "If true, adjust congestion window when doing bandwidth resumption in BBR.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_checking_should_generate_packet, false, - "If true, check ShouldGeneratePacket for every crypto packet.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_extra_padding_bytes, false, + "If true, consider frame expansion when calculating extra padding bytes to meet " + "minimum plaintext packet size required for header protection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_last_inflight_packets_sent_time, false, - "If true, clear last_inflight_packets_sent_time_ of a packet number space when there " - "is no bytes in flight.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_fix_gquic_stream_type, false, + "If true, do not use QuicUtil::IsBidirectionalStreamId() to determine gQUIC stream type.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_server_pto_timeout, false, - "If true, do not arm PTO on half RTT packets if they are the only ones in flight.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_min_crypto_frame_size, true, + "If true, include MinPlaintextPacketSize when deterine whether removing soft limit for " + "crypto frames.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_willing_and_able_to_write, false, - "If true, check connection level flow control for send control stream and qpack " - "streams in QuicSession::WillingAndAbleToWrite.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_packet_number_length, false, + "If true, take the largest acked packet into account when computing the sent packet " + "number length.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_fix_write_pending_crypto_retransmission, false, - "If true, return from QuicCryptoStream::WritePendingCryptoRetransmission after partial writes.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_pto_timeout, true, + "If true, use 0 as ack_delay when calculate PTO timeout for INITIAL and HANDSHAKE " + "packet number space.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_server_pto_timeout, true, + "If true, do not arm PTO on half RTT packets if they are the only ones in flight.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_undecryptable_packets, false, + "If true, remove the head of line blocking caused by an unprocessable packet in the " + "undecryptable packets list.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " "will never be a fake EPOLLOUT event in the next epoll iteration.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_move_amplification_limit, false, - "When true, always check the amplification limit before writing, not just for " - "handshake packets.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_negotiate_ack_delay_time, true, - "If true, will negotiate the ACK delay time.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_no_dup_experiment_id_2, false, "If true, transport connection stats doesn't report duplicated experiments for same " "connection.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_notify_stream_id_manager_when_disconnected, false, - "If true, notify stream ID manager even connection disconnects.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_only_truncate_long_cids, true, "In IETF QUIC, only truncate long CIDs from the client's Initial, don't modify them.") @@ -230,49 +236,42 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, fals "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_save_user_agent_in_quic_session, false, + "If true, save user agent into in QuicSession.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_two_alt_addresses, true, - "When true, GFE will send two AlternateServerAddress (IPv6+IPv4) instead of one.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_sending_duplicate_max_streams, false, "If true, session does not send duplicate MAX_STREAMS.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_support_handshake_done_in_t050, false, + "If true, support HANDSHAKE_DONE frame in T050.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, "A testonly reloadable flag that will always default to false.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_true, true, "A testonly reloadable flag that will always default to true.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_tls_enforce_valid_sni, false, - "If true, reject IETF QUIC connections with invalid SNI.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, "When true, set the initial congestion control window from connection options in " "QuicSentPacketManager rather than TcpCubicSenderBytes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_ack_alarm_in_send_all_pending_acks, false, - "If true, QuicConnection::SendAllPendingAcks will Update instead of Set the ack alarm.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_packet_size, false, + "If true, update packet size when the first frame gets queued.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_dispatcher_clock_for_read_timestamp, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_dispatcher_clock_for_read_timestamp, true, "If true, in QuicListener, use QuicDispatcher's clock as the source for packet read " "timestamps.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_use_idle_network_detector, false, - "If true, use idle network detector to detect handshake timeout and idle network timeout.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_ip_bandwidth_module, true, - "If true, use IpBandwidthModule for cwnd bootstrapping if it is registered.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_leto_key_exchange, false, "If true, QUIC will attempt to use the Leto key exchange service and only fall back to " "local key exchange if that fails.") @@ -295,28 +294,22 @@ QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") +QUICHE_FLAG(bool, quic_restart_flag_quic_dispatcher_track_top_1k_client_ip, true, + "If true, GfeQuicDispatcher will track the top 1000 client IPs.") + QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, false, "When true, QUIC+TLS will not send nor parse the old-format Google-specific transport " "parameters.") -QUICHE_FLAG( - bool, quic_restart_flag_quic_google_transport_param_send_new, false, - "When true, QUIC+TLS will send and parse the new-format Google-specific transport parameters.") +QUICHE_FLAG(bool, quic_restart_flag_quic_ignore_cid_first_byte_in_rx_ring_bpf, true, + "If true, ignore CID first byte in BPF for RX_RING.") -QUICHE_FLAG(bool, quic_restart_flag_quic_ignore_cid_first_byte_in_bpf, false, - "If true, ignore CID first byte in BPF for both UDP socket and RX_RING.") +QUICHE_FLAG(bool, quic_restart_flag_quic_memslice_ensure_ownership, true, + "Call gfe2::MemSlice::EnsureReferenceCounted in the constructor of QuicMemSlice.") QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") -QUICHE_FLAG(bool, quic_restart_flag_quic_replace_gfe_connection_ids, false, - "When true, GfeQuicDispatcher will replace long connection IDs with 64bit ones before " - "inserting them in the connection map.") - -QUICHE_FLAG(bool, quic_restart_flag_quic_replace_time_wait_list_encryption_level, false, - "Replace the usage of ConnectionData::encryption_level in quic_time_wait_list_manager " - "with a new TimeWaitAction.") - QUICHE_FLAG(bool, quic_restart_flag_quic_rx_ring_use_tpacket_v3, false, "If true, use TPACKET_V3 for QuicRxRing instead of TPACKET_V2.") @@ -341,7 +334,7 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend connections and switch " "to use it after successful handshake.") -QUICHE_FLAG(bool, spdy_reloadable_flag_fix_spdy_header_coalescing, false, +QUICHE_FLAG(bool, spdy_reloadable_flag_fix_spdy_header_coalescing, true, "If true, when coalescing multivalued spdy headers, only headers that exist in spdy " "headers block are updated.") @@ -366,8 +359,8 @@ QUICHE_FLAG(bool, quic_enforce_single_packet_chlo, true, "If true, enforce that QUIC CHLOs fit in one packet") QUICHE_FLAG(int64_t, quic_time_wait_list_max_connections, 600000, - "Maximum number of connections on the time-wait list. A negative value implies no " - "configured limit.") + "Maximum number of connections on the time-wait list. " + "A negative value implies no configured limit.") QUICHE_FLAG(int64_t, quic_time_wait_list_seconds, 200, "Time period for which a given connection_id should live in " @@ -452,7 +445,7 @@ QUICHE_FLAG(double, quic_ack_aggregation_bandwidth_threshold, 1.0, "bandwidth * this flag), consider the current aggregation completed " "and starts a new one.") -QUICHE_FLAG(int32_t, quic_anti_amplification_factor, 3, +QUICHE_FLAG(int32_t, quic_anti_amplification_factor, 5, "Anti-amplification factor. Before address validation, server will " "send no more than factor times bytes received.") diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index fb644bd68df9..cc9ef16f2abf 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -84,9 +84,9 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()[0]) {} diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index 5707ae2dbfca..5db43230cd7c 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -95,9 +95,9 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 120fc7d83b35..2a32df6319ed 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -25,9 +25,9 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 07f036571678..7036935c5a67 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -63,9 +63,9 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()), quic_version_(version_manager_.GetSupportedVersions()[0]), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index df0d61491990..28e04399b8c7 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -103,9 +103,9 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), @@ -143,6 +143,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { envoy_quic_session_.Initialize(); setQuicConfigWithDefaultValues(envoy_quic_session_.config()); envoy_quic_session_.OnConfigNegotiated(); + quic::test::QuicConnectionPeer::SetAddressValidated(quic_connection_); // Switch to a encryption forward secure crypto stream. quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 240a80cdf05e..9cfecf56bbe5 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -1,5 +1,14 @@ #include +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + +#include "quiche/quic/test_tools/quic_connection_peer.h" +#pragma GCC diagnostic pop + #include "common/event/libevent_scheduler.h" #include "common/http/headers.h" @@ -30,9 +39,9 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_28, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_27, GetParam()); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, GetParam()); + SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), @@ -51,6 +60,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { response_trailers_{{"trailer-key", "trailer-value"}} { quic_stream_->setRequestDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); + quic::test::QuicConnectionPeer::SetAddressValidated(&quic_connection_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); EXPECT_CALL(quic_session_, ShouldYield(_)).WillRepeatedly(testing::Return(false)); EXPECT_CALL(quic_session_, WritevData(_, _, _, _, _, _)) diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index dd9092be7e89..bbe34b658e7b 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -54,9 +54,9 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_28, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_27, use_http3); - SetQuicReloadableFlag(quic_enable_version_draft_25_v3, use_http3); + SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()), crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), @@ -77,7 +77,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers // TODO(danzh) Implement retry upon version mismatch and modify test frame work to specify a // different version set on server side to test that. auto connection = std::make_unique( - getNextServerDesignatedConnectionId(), server_addr_, conn_helper_, alarm_factory_, + getNextConnectionId(), server_addr_, conn_helper_, alarm_factory_, quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr); quic_connection_ = connection.get(); auto session = std::make_unique( @@ -107,14 +107,13 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers return codec; } - quic::QuicConnectionId getNextServerDesignatedConnectionId() { - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); - // If the cached state indicates that we should use a server-designated - // connection ID, then return that connection ID. - quic::QuicConnectionId conn_id = cached->has_server_designated_connection_id() - ? cached->GetNextServerDesignatedConnectionId() - : quic::EmptyQuicConnectionId(); - return conn_id.IsEmpty() ? quic::QuicUtils::CreateRandomConnectionId() : conn_id; + quic::QuicConnectionId getNextConnectionId() { + if (designated_connection_ids_.empty()) { + return quic::QuicUtils::CreateRandomConnectionId(); + } + quic::QuicConnectionId cid = designated_connection_ids_.front(); + designated_connection_ids_.pop_front(); + return cid; } void initialize() override { @@ -189,6 +188,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers bool set_reuse_port_{false}; const std::string injected_resource_filename_; AtomicFileUpdater file_updater_; + std::list designated_connection_ids_; }; INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest, @@ -290,13 +290,12 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersWithBPF) { set_reuse_port_ = true; initialize(); std::vector codec_clients; - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); for (size_t i = 1; i <= concurrency_; ++i) { // The BPF filter looks at the 1st word of connection id in the packet // header. And currently all QUIC versions support 8 bytes connection id. So // create connections with the first 4 bytes of connection id different from each // other so they should be evenly distributed. - cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); + designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } if (GetParam().first == Network::Address::IpVersion::v4) { @@ -333,13 +332,12 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicListenersNoBPF) { #undef SO_ATTACH_REUSEPORT_CBPF #endif std::vector codec_clients; - quic::QuicCryptoClientConfig::CachedState* cached = crypto_config_.LookupOrCreate(server_id_); for (size_t i = 1; i <= concurrency_; ++i) { // The BPF filter looks at the 1st byte of connection id in the packet // header. And currently all QUIC versions support 8 bytes connection id. So // create connections with the first 4 bytes of connection id different from each // other so they should be evenly distributed. - cached->add_server_designated_connection_id(quic::test::TestConnectionId(i << 32)); + designated_connection_ids_.push_back(quic::test::TestConnectionId(i << 32)); codec_clients.push_back(makeHttpConnection(lookupPort("http"))); } if (GetParam().first == Network::Address::IpVersion::v4) { From 706d9761dd1336ce0b650b34dae649312ec85c0b Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 9 Jul 2020 21:06:05 -0700 Subject: [PATCH 585/909] ci: use run_under to test tap (#12018) When `ssl_integration_test` are cached, no `pb_text` will be generated locally, testing it with `--run_under` makes it possible to be cached. Signed-off-by: Lizan Zhou --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 4 +++- bazel/test/BUILD | 3 +++ bazel/test/verify_tap_test.sh | 16 ++++++++++++++++ ci/do_ci.sh | 10 +--------- 5 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 bazel/test/BUILD create mode 100755 bazel/test/verify_tap_test.sh diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 0646398054d9..01a4fd3940ad 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -14,7 +14,7 @@ RUN apt-get -y update \ && groupmod -g 65515 pcap && chgrp pcap /usr/sbin/tcpdump \ # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ - && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -d /build \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -G pcap -d /build \ # [Optional] Add sudo support for non-root user && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && chmod 0440 /etc/sudoers.d/$USERNAME diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c4ffcb978205..462b00ee78d0 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -4,6 +4,8 @@ "runArgs": [ "--user=vscode", "--cap-add=SYS_PTRACE", + "--cap-add=NET_RAW", + "--cap-add=NET_ADMIN", "--security-opt=seccomp=unconfined", "--volume=${env:HOME}:${env:HOME}", "--volume=envoy-build:/build", @@ -39,4 +41,4 @@ "webfreak.debug", "ms-python.python" ] -} \ No newline at end of file +} diff --git a/bazel/test/BUILD b/bazel/test/BUILD new file mode 100644 index 000000000000..0a40c2f107a0 --- /dev/null +++ b/bazel/test/BUILD @@ -0,0 +1,3 @@ +licenses(["notice"]) # Apache 2 + +exports_files(["verify_tap_test.sh"]) diff --git a/bazel/test/verify_tap_test.sh b/bazel/test/verify_tap_test.sh new file mode 100755 index 000000000000..4a047e27e907 --- /dev/null +++ b/bazel/test/verify_tap_test.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +# Clear existing tap directory if previous run wasn't in sandbox +rm -rf tap + +mkdir -p tap +TAP_TMP="$(realpath tap)" + +TAP_PATH="${TAP_TMP}/tap" "$@" + +# TODO(htuch): Check for pcap, now CI (with or without RBE) does have +# enough capabilities. +# Verify that some pb_text files have been created. +ls -l "${TAP_TMP}"/tap_*.pb_text > /dev/null diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 7b1221ed82e0..e3b8f503e298 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -169,17 +169,9 @@ elif [[ "$CI_TARGET" == "bazel.asan" ]]; then # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. echo "Validating integration test traffic tapping..." - TAP_TMP=/tmp/tap/ - rm -rf "${TAP_TMP}" - mkdir -p "${TAP_TMP}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} \ - --strategy=TestRunner=local --test_env=TAP_PATH="${TAP_TMP}/tap" \ - --test_env=PATH="/usr/sbin:${PATH}" \ + --run_under=@envoy//bazel/test:verify_tap_test.sh \ //test/extensions/transport_sockets/tls/integration:ssl_integration_test - # Verify that some pb_text files have been created. We can't check for pcap, - # since tcpdump is not available in general due to CircleCI lack of support - # for privileged Docker executors. - ls -l "${TAP_TMP}"/tap_*.pb_text > /dev/null exit 0 elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain From d828958b591a6d79f4b5fa608ece9962b7afbe32 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 10 Jul 2020 22:01:44 +0700 Subject: [PATCH 586/909] build: Remove chromium_url (#12023) This removes chromium_url in favor of GURL. Risk Level: Low Testing: Existing tests Docs Changes: N/A Release Notes: N/A Fixes: #6588 Signed-off-by: Dhi Aurrahman --- .bazelrc | 2 +- ci/run_clang_tidy.sh | 8 +- source/common/chromium_url/BUILD | 28 -- source/common/chromium_url/LICENSE | 27 -- source/common/chromium_url/README.md | 16 - source/common/chromium_url/envoy_shim.h | 17 - source/common/chromium_url/url_canon.cc | 16 - source/common/chromium_url/url_canon.h | 186 -------- .../common/chromium_url/url_canon_internal.cc | 295 ------------- .../common/chromium_url/url_canon_internal.h | 204 --------- source/common/chromium_url/url_canon_path.cc | 413 ------------------ .../chromium_url/url_canon_stdstring.cc | 33 -- .../common/chromium_url/url_canon_stdstring.h | 58 --- source/common/chromium_url/url_parse.h | 49 --- .../common/chromium_url/url_parse_internal.h | 18 - source/common/http/BUILD | 6 +- source/common/http/path_utility.cc | 13 +- 17 files changed, 12 insertions(+), 1377 deletions(-) delete mode 100644 source/common/chromium_url/BUILD delete mode 100644 source/common/chromium_url/LICENSE delete mode 100644 source/common/chromium_url/README.md delete mode 100644 source/common/chromium_url/envoy_shim.h delete mode 100644 source/common/chromium_url/url_canon.cc delete mode 100644 source/common/chromium_url/url_canon.h delete mode 100644 source/common/chromium_url/url_canon_internal.cc delete mode 100644 source/common/chromium_url/url_canon_internal.h delete mode 100644 source/common/chromium_url/url_canon_path.cc delete mode 100644 source/common/chromium_url/url_canon_stdstring.cc delete mode 100644 source/common/chromium_url/url_canon_stdstring.h delete mode 100644 source/common/chromium_url/url_parse.h delete mode 100644 source/common/chromium_url/url_parse_internal.h diff --git a/.bazelrc b/.bazelrc index fb683a2bc9b8..5031f41ab8fa 100644 --- a/.bazelrc +++ b/.bazelrc @@ -128,7 +128,7 @@ build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage -build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" +build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" coverage:fuzz-coverage --config=asan-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index bbce5f8854f1..5b46c82789c8 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -50,19 +50,13 @@ function exclude_testdata() { grep -v tools/testdata/check_format/ } -# Do not run clang-tidy against Chromium URL import, this needs to largely -# reflect the upstream structure. -function exclude_chromium_url() { - grep -v source/common/chromium_url/ -} - # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } function filter_excludes() { - exclude_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party + exclude_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party } if [[ -z "${DIFF_REF}" && "${BUILD_REASON}" != "PullRequest" ]]; then diff --git a/source/common/chromium_url/BUILD b/source/common/chromium_url/BUILD deleted file mode 100644 index 2d4acb348765..000000000000 --- a/source/common/chromium_url/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "chromium_url", - srcs = [ - "url_canon.cc", - "url_canon_internal.cc", - "url_canon_path.cc", - "url_canon_stdstring.cc", - ], - hdrs = [ - "envoy_shim.h", - "url_canon.h", - "url_canon_internal.h", - "url_canon_stdstring.h", - "url_parse.h", - "url_parse_internal.h", - ], - deps = ["//source/common/common:assert_lib"], -) diff --git a/source/common/chromium_url/LICENSE b/source/common/chromium_url/LICENSE deleted file mode 100644 index a32e00ce6be3..000000000000 --- a/source/common/chromium_url/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Chromium Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source/common/chromium_url/README.md b/source/common/chromium_url/README.md deleted file mode 100644 index 32e251c82d4d..000000000000 --- a/source/common/chromium_url/README.md +++ /dev/null @@ -1,16 +0,0 @@ -This is a manually minified variant of -https://chromium.googlesource.com/chromium/src.git/+archive/74.0.3729.15/url.tar.gz, -providing just the parts needed for `url::CanonicalizePath()`. This is intended -to support a security release fix for CVE-2019-9901. Long term we need this to -be moved to absl or QUICHE for upgrades and long-term support. - -Some specific transforms of interest: -* The namespace `url` was changed to `chromium_url`. -* `url_parse.h` is minified to just `Component` and flattened back into the URL - directory. It does not contain any non-Chromium authored code any longer and - so does not have a separate LICENSE. -* `envoy_shim.h` adapts various macros to the Envoy context. -* Anything not reachable from `url::CanonicalizePath()` has been dropped. -* Header include paths have changed as needed. -* BUILD was manually written. -* Various clang-tidy and format fixes. diff --git a/source/common/chromium_url/envoy_shim.h b/source/common/chromium_url/envoy_shim.h deleted file mode 100644 index 2b7443926c1f..000000000000 --- a/source/common/chromium_url/envoy_shim.h +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "common/common/assert.h" - -// This is a minimal Envoy adaptation layer for the Chromium URL library. -// NOLINT(namespace-envoy) - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -#define EXPORT_TEMPLATE_DECLARE(x) -#define EXPORT_TEMPLATE_DEFINE(x) -#define COMPONENT_EXPORT(x) - -#define DCHECK(x) ASSERT(x) -#define NOTREACHED() NOT_REACHED_GCOVR_EXCL_LINE diff --git a/source/common/chromium_url/url_canon.cc b/source/common/chromium_url/url_canon.cc deleted file mode 100644 index b9ad1b829726..000000000000 --- a/source/common/chromium_url/url_canon.cc +++ /dev/null @@ -1,16 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon.h" - -#include "common/chromium_url/envoy_shim.h" - -namespace chromium_url { - -template class EXPORT_TEMPLATE_DEFINE(COMPONENT_EXPORT(URL)) CanonOutputT; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon.h b/source/common/chromium_url/url_canon.h deleted file mode 100644 index 0280de643ac8..000000000000 --- a/source/common/chromium_url/url_canon.h +++ /dev/null @@ -1,186 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_H_ -#define URL_URL_CANON_H_ - -#include -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_parse.h" - -namespace chromium_url { - -// Canonicalizer output ------------------------------------------------------- - -// Base class for the canonicalizer output, this maintains a buffer and -// supports simple resizing and append operations on it. -// -// It is VERY IMPORTANT that no virtual function calls be made on the common -// code path. We only have two virtual function calls, the destructor and a -// resize function that is called when the existing buffer is not big enough. -// The derived class is then in charge of setting up our buffer which we will -// manage. -template class CanonOutputT { -public: - CanonOutputT() : buffer_(NULL), buffer_len_(0), cur_len_(0) {} - virtual ~CanonOutputT() = default; - - // Implemented to resize the buffer. This function should update the buffer - // pointer to point to the new buffer, and any old data up to |cur_len_| in - // the buffer must be copied over. - // - // The new size |sz| must be larger than buffer_len_. - virtual void Resize(int sz) = 0; - - // Accessor for returning a character at a given position. The input offset - // must be in the valid range. - inline T at(int offset) const { return buffer_[offset]; } - - // Sets the character at the given position. The given position MUST be less - // than the length(). - inline void set(int offset, T ch) { buffer_[offset] = ch; } - - // Returns the number of characters currently in the buffer. - inline int length() const { return cur_len_; } - - // Returns the current capacity of the buffer. The length() is the number of - // characters that have been declared to be written, but the capacity() is - // the number that can be written without reallocation. If the caller must - // write many characters at once, it can make sure there is enough capacity, - // write the data, then use set_size() to declare the new length(). - int capacity() const { return buffer_len_; } - - // Called by the user of this class to get the output. The output will NOT - // be NULL-terminated. Call length() to get the - // length. - const T* data() const { return buffer_; } - T* data() { return buffer_; } - - // Shortens the URL to the new length. Used for "backing up" when processing - // relative paths. This can also be used if an external function writes a lot - // of data to the buffer (when using the "Raw" version below) beyond the end, - // to declare the new length. - // - // This MUST NOT be used to expand the size of the buffer beyond capacity(). - void set_length(int new_len) { cur_len_ = new_len; } - - // This is the most performance critical function, since it is called for - // every character. - void push_back(T ch) { - // In VC2005, putting this common case first speeds up execution - // dramatically because this branch is predicted as taken. - if (cur_len_ < buffer_len_) { - buffer_[cur_len_] = ch; - cur_len_++; - return; - } - - // Grow the buffer to hold at least one more item. Hopefully we won't have - // to do this very often. - if (!Grow(1)) - return; - - // Actually do the insertion. - buffer_[cur_len_] = ch; - cur_len_++; - } - - // Appends the given string to the output. - void Append(const T* str, int str_len) { - if (cur_len_ + str_len > buffer_len_) { - if (!Grow(cur_len_ + str_len - buffer_len_)) - return; - } - for (int i = 0; i < str_len; i++) - buffer_[cur_len_ + i] = str[i]; - cur_len_ += str_len; - } - - void ReserveSizeIfNeeded(int estimated_size) { - // Reserve a bit extra to account for escaped chars. - if (estimated_size > buffer_len_) - Resize(estimated_size + 8); - } - -protected: - // Grows the given buffer so that it can fit at least |min_additional| - // characters. Returns true if the buffer could be resized, false on OOM. - bool Grow(int min_additional) { - static const int kMinBufferLen = 16; - int new_len = (buffer_len_ == 0) ? kMinBufferLen : buffer_len_; - do { - if (new_len >= (1 << 30)) // Prevent overflow below. - return false; - new_len *= 2; - } while (new_len < buffer_len_ + min_additional); - Resize(new_len); - return true; - } - - T* buffer_; - int buffer_len_; - - // Used characters in the buffer. - int cur_len_; -}; - -// Simple implementation of the CanonOutput using new[]. This class -// also supports a static buffer so if it is allocated on the stack, most -// URLs can be canonicalized with no heap allocations. -template class RawCanonOutputT : public CanonOutputT { -public: - RawCanonOutputT() : CanonOutputT() { - this->buffer_ = fixed_buffer_; - this->buffer_len_ = fixed_capacity; - } - ~RawCanonOutputT() override { - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - } - - void Resize(int sz) override { - T* new_buf = new T[sz]; - memcpy(new_buf, this->buffer_, sizeof(T) * (this->cur_len_ < sz ? this->cur_len_ : sz)); - if (this->buffer_ != fixed_buffer_) - delete[] this->buffer_; - this->buffer_ = new_buf; - this->buffer_len_ = sz; - } - -protected: - T fixed_buffer_[fixed_capacity]; -}; - -// Explicitly instantiate commonly used instantiations. -extern template class EXPORT_TEMPLATE_DECLARE(COMPONENT_EXPORT(URL)) CanonOutputT; - -// Normally, all canonicalization output is in narrow characters. We support -// the templates so it can also be used internally if a wide buffer is -// required. -using CanonOutput = CanonOutputT; - -template -class RawCanonOutput : public RawCanonOutputT {}; - -// Path. If the input does not begin in a slash (including if the input is -// empty), we'll prepend a slash to the path to make it canonical. -// -// The 8-bit version assumes UTF-8 encoding, but does not verify the validity -// of the UTF-8 (i.e., you can have invalid UTF-8 sequences, invalid -// characters, etc.). Normally, URLs will come in as UTF-16, so this isn't -// an issue. Somebody giving us an 8-bit path is responsible for generating -// the path that the server expects (we'll escape high-bit characters), so -// if something is invalid, it's their problem. -COMPONENT_EXPORT(URL) -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path); - -} // namespace chromium_url - -#endif // URL_URL_CANON_H_ diff --git a/source/common/chromium_url/url_canon_internal.cc b/source/common/chromium_url/url_canon_internal.cc deleted file mode 100644 index 38c932cad5b4..000000000000 --- a/source/common/chromium_url/url_canon_internal.cc +++ /dev/null @@ -1,295 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon_internal.h" - -namespace chromium_url { - -// See the header file for this array's declaration. -const unsigned char kSharedCharTypeTable[0x100] = { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x00 - 0x0f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x10 - 0x1f - 0, // 0x20 ' ' (escape spaces in queries) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x21 ! - 0, // 0x22 " - 0, // 0x23 # (invalid in query since it marks the ref) - CHAR_QUERY | CHAR_USERINFO, // 0x24 $ - CHAR_QUERY | CHAR_USERINFO, // 0x25 % - CHAR_QUERY | CHAR_USERINFO, // 0x26 & - 0, // 0x27 ' (Try to prevent XSS.) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x28 ( - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x29 ) - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2a * - CHAR_QUERY | CHAR_USERINFO, // 0x2b + - CHAR_QUERY | CHAR_USERINFO, // 0x2c , - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x2d - - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x2e . - CHAR_QUERY, // 0x2f / - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x30 0 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x31 1 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x32 2 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x33 3 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x34 4 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x35 5 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x36 6 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_OCT | - CHAR_COMPONENT, // 0x37 7 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x38 8 - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_DEC | CHAR_COMPONENT, // 0x39 9 - CHAR_QUERY, // 0x3a : - CHAR_QUERY, // 0x3b ; - 0, // 0x3c < (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3d = - 0, // 0x3e > (Try to prevent certain types of XSS.) - CHAR_QUERY, // 0x3f ? - CHAR_QUERY, // 0x40 @ - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x41 A - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x42 B - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x43 C - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x44 D - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x45 E - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x46 F - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x47 G - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x48 H - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x49 I - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4a J - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4b K - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4c L - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4d M - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4e N - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x4f O - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x50 P - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x51 Q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x52 R - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x53 S - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x54 T - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x55 U - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x56 V - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x57 W - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x58 X - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x59 Y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5a Z - CHAR_QUERY, // 0x5b [ - CHAR_QUERY, // 0x5c '\' - CHAR_QUERY, // 0x5d ] - CHAR_QUERY, // 0x5e ^ - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x5f _ - CHAR_QUERY, // 0x60 ` - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x61 a - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x62 b - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x63 c - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x64 d - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x65 e - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_HEX | CHAR_COMPONENT, // 0x66 f - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x67 g - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x68 h - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x69 i - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6a j - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6b k - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6c l - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6d m - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6e n - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x6f o - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x70 p - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x71 q - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x72 r - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x73 s - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x74 t - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x75 u - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x76 v - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x77 w - CHAR_QUERY | CHAR_USERINFO | CHAR_IPV4 | CHAR_COMPONENT, // 0x78 x - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x79 y - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7a z - CHAR_QUERY, // 0x7b { - CHAR_QUERY, // 0x7c | - CHAR_QUERY, // 0x7d } - CHAR_QUERY | CHAR_USERINFO | CHAR_COMPONENT, // 0x7e ~ - 0, // 0x7f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x80 - 0x8f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0x90 - 0x9f - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xa0 - 0xaf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xb0 - 0xbf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xc0 - 0xcf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xd0 - 0xdf - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xe0 - 0xef - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, // 0xf0 - 0xff -}; - -const char kHexCharLookup[0x10] = { - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', -}; - -const char kCharToHexLookup[8] = { - 0, // 0x00 - 0x1f - '0', // 0x20 - 0x3f: digits 0 - 9 are 0x30 - 0x39 - 'A' - 10, // 0x40 - 0x5f: letters A - F are 0x41 - 0x46 - 'a' - 10, // 0x60 - 0x7f: letters a - f are 0x61 - 0x66 - 0, // 0x80 - 0x9F - 0, // 0xA0 - 0xBF - 0, // 0xC0 - 0xDF - 0, // 0xE0 - 0xFF -}; - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h deleted file mode 100644 index 8c405b49814a..000000000000 --- a/source/common/chromium_url/url_canon_internal.h +++ /dev/null @@ -1,204 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_INTERNAL_H_ -#define URL_URL_CANON_INTERNAL_H_ - -// This file is intended to be included in another C++ file where the character -// types are defined. This allows us to write mostly generic code, but not have -// template bloat because everything is inlined when anybody calls any of our -// functions. - -#include -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_canon.h" - -namespace chromium_url { - -// Character type handling ----------------------------------------------------- - -// Bits that identify different character types. These types identify different -// bits that are set for each 8-bit character in the kSharedCharTypeTable. -enum SharedCharTypes { - // Characters that do not require escaping in queries. Characters that do - // not have this flag will be escaped; see url_canon_query.cc - CHAR_QUERY = 1, - - // Valid in the username/password field. - CHAR_USERINFO = 2, - - // Valid in a IPv4 address (digits plus dot and 'x' for hex). - CHAR_IPV4 = 4, - - // Valid in an ASCII-representation of a hex digit (as in %-escaped). - CHAR_HEX = 8, - - // Valid in an ASCII-representation of a decimal digit. - CHAR_DEC = 16, - - // Valid in an ASCII-representation of an octal digit. - CHAR_OCT = 32, - - // Characters that do not require escaping in encodeURIComponent. Characters - // that do not have this flag will be escaped; see url_util.cc. - CHAR_COMPONENT = 64, -}; - -// This table contains the flags in SharedCharTypes for each 8-bit character. -// Some canonicalization functions have their own specialized lookup table. -// For those with simple requirements, we have collected the flags in one -// place so there are fewer lookup tables to load into the CPU cache. -// -// Using an unsigned char type has a small but measurable performance benefit -// over using a 32-bit number. -extern const unsigned char kSharedCharTypeTable[0x100]; - -// More readable wrappers around the character type lookup table. -inline bool IsCharOfType(unsigned char c, SharedCharTypes type) { - return !!(kSharedCharTypeTable[c] & type); -} -inline bool IsQueryChar(unsigned char c) { return IsCharOfType(c, CHAR_QUERY); } -inline bool IsIPv4Char(unsigned char c) { return IsCharOfType(c, CHAR_IPV4); } -inline bool IsHexChar(unsigned char c) { return IsCharOfType(c, CHAR_HEX); } -inline bool IsComponentChar(unsigned char c) { return IsCharOfType(c, CHAR_COMPONENT); } - -// Maps the hex numerical values 0x0 to 0xf to the corresponding ASCII digit -// that will be used to represent it. -COMPONENT_EXPORT(URL) extern const char kHexCharLookup[0x10]; - -// This lookup table allows fast conversion between ASCII hex letters and their -// corresponding numerical value. The 8-bit range is divided up into 8 -// regions of 0x20 characters each. Each of the three character types (numbers, -// uppercase, lowercase) falls into different regions of this range. The table -// contains the amount to subtract from characters in that range to get at -// the corresponding numerical value. -// -// See HexDigitToValue for the lookup. -extern const char kCharToHexLookup[8]; - -// Assumes the input is a valid hex digit! Call IsHexChar before using this. -inline unsigned char HexCharToValue(unsigned char c) { return c - kCharToHexLookup[c / 0x20]; } - -// Indicates if the given character is a dot or dot equivalent, returning the -// number of characters taken by it. This will be one for a literal dot, 3 for -// an escaped dot. If the character is not a dot, this will return 0. -template inline int IsDot(const CHAR* spec, int offset, int end) { - if (spec[offset] == '.') { - return 1; - } else if (spec[offset] == '%' && offset + 3 <= end && spec[offset + 1] == '2' && - (spec[offset + 2] == 'e' || spec[offset + 2] == 'E')) { - // Found "%2e" - return 3; - } - return 0; -} - -// Write a single character, escaped, to the output. This always escapes: it -// does no checking that thee character requires escaping. -// Escaping makes sense only 8 bit chars, so code works in all cases of -// input parameters (8/16bit). -template -inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { - output->push_back('%'); - output->push_back(kHexCharLookup[(ch >> 4) & 0xf]); - output->push_back(kHexCharLookup[ch & 0xf]); -} - -// UTF-8 functions ------------------------------------------------------------ - -// Generic To-UTF-8 converter. This will call the given append method for each -// character that should be appended, with the given output method. Wrappers -// are provided below for escaped and non-escaped versions of this. -// -// The char_value must have already been checked that it's a valid Unicode -// character. -template -inline void DoAppendUTF8(unsigned char_value, Output* output) { - if (char_value <= 0x7f) { - Appender(static_cast(char_value), output); - } else if (char_value <= 0x7ff) { - // 110xxxxx 10xxxxxx - Appender(static_cast(0xC0 | (char_value >> 6)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0xffff) { - // 1110xxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xe0 | (char_value >> 12)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else if (char_value <= 0x10FFFF) { // Max Unicode code point. - // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - Appender(static_cast(0xf0 | (char_value >> 18)), output); - Appender(static_cast(0x80 | ((char_value >> 12) & 0x3f)), output); - Appender(static_cast(0x80 | ((char_value >> 6) & 0x3f)), output); - Appender(static_cast(0x80 | (char_value & 0x3f)), output); - } else { - // Invalid UTF-8 character (>20 bits). - NOTREACHED(); - } -} - -// Helper used by AppendUTF8Value below. We use an unsigned parameter so there -// are no funny sign problems with the input, but then have to convert it to -// a regular char for appending. -inline void AppendCharToOutput(unsigned char ch, CanonOutput* output) { - output->push_back(static_cast(ch)); -} - -// Writes the given character to the output as UTF-8. This does NO checking -// of the validity of the Unicode characters; the caller should ensure that -// the value it is appending is valid to append. -inline void AppendUTF8Value(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Writes the given character to the output as UTF-8, escaping ALL -// characters (even when they are ASCII). This does NO checking of the -// validity of the Unicode characters; the caller should ensure that the value -// it is appending is valid to append. -inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { - DoAppendUTF8(char_value, output); -} - -// Given a '%' character at |*begin| in the string |spec|, this will decode -// the escaped value and put it into |*unescaped_value| on success (returns -// true). On failure, this will return false, and will not write into -// |*unescaped_value|. -// -// |*begin| will be updated to point to the last character of the escape -// sequence so that when called with the index of a for loop, the next time -// through it will point to the next character to be considered. On failure, -// |*begin| will be unchanged. -inline bool Is8BitChar(char /*c*/) { - return true; // this case is specialized to avoid a warning -} - -template -inline bool DecodeEscaped(const CHAR* spec, int* begin, int end, unsigned char* unescaped_value) { - if (*begin + 3 > end || !Is8BitChar(spec[*begin + 1]) || !Is8BitChar(spec[*begin + 2])) { - // Invalid escape sequence because there's not enough room, or the - // digits are not ASCII. - return false; - } - - unsigned char first = static_cast(spec[*begin + 1]); - unsigned char second = static_cast(spec[*begin + 2]); - if (!IsHexChar(first) || !IsHexChar(second)) { - // Invalid hex digits, fail. - return false; - } - - // Valid escape sequence. - *unescaped_value = (HexCharToValue(first) << 4) + HexCharToValue(second); - *begin += 2; - return true; -} - -} // namespace chromium_url - -#endif // URL_URL_CANON_INTERNAL_H_ diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc deleted file mode 100644 index 22587c0ab8a1..000000000000 --- a/source/common/chromium_url/url_canon_path.cc +++ /dev/null @@ -1,413 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include "common/chromium_url/url_canon.h" -#include "common/chromium_url/url_canon_internal.h" -#include "common/chromium_url/url_parse_internal.h" - -namespace chromium_url { - -namespace { - -enum CharacterFlags { - // Pass through unchanged, whether escaped or unescaped. This doesn't - // actually set anything so you can't OR it to check, it's just to make the - // table below more clear when neither ESCAPE or UNESCAPE is set. - PASS = 0, - - // This character requires special handling in DoPartialPath. Doing this test - // first allows us to filter out the common cases of regular characters that - // can be directly copied. - SPECIAL = 1, - - // This character must be escaped in the canonical output. Note that all - // escaped chars also have the "special" bit set so that the code that looks - // for this is triggered. Not valid with PASS or ESCAPE - ESCAPE_BIT = 2, - ESCAPE = ESCAPE_BIT | SPECIAL, - - // This character must be unescaped in canonical output. Not valid with - // ESCAPE or PASS. We DON'T set the SPECIAL flag since if we encounter these - // characters unescaped, they should just be copied. - UNESCAPE = 4, - - // This character is disallowed in URLs. Note that the "special" bit is also - // set to trigger handling. - INVALID_BIT = 8, - INVALID = INVALID_BIT | SPECIAL, -}; - -// This table contains one of the above flag values. Note some flags are more -// than one bits because they also turn on the "special" flag. Special is the -// only flag that may be combined with others. -// -// This table is designed to match exactly what IE does with the characters. -// -// Dot is even more special, and the escaped version is handled specially by -// IsDot. Therefore, we don't need the "escape" flag, and even the "unescape" -// bit is never handled (we just need the "special") bit. -const unsigned char kPathCharLookup[0x100] = { - // NULL control chars... - INVALID, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // control chars... - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, - // ' ' ! " # $ % & ' ( ) * - // + , - . / - ESCAPE, PASS, ESCAPE, ESCAPE, PASS, ESCAPE, PASS, PASS, PASS, PASS, PASS, PASS, PASS, UNESCAPE, - SPECIAL, PASS, - // 0 1 2 3 4 5 6 7 8 9 : - // ; < = > ? - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, PASS, PASS, ESCAPE, PASS, ESCAPE, ESCAPE, - // @ A B C D E F G H I J - // K L M N O - PASS, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // P Q R S T U V W X Y Z - // [ \ ] ^ _ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, PASS, ESCAPE, PASS, ESCAPE, UNESCAPE, - // ` a b c d e f g h i j - // k l m n o - ESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - // p q r s t u v w x y z - // { | } ~ - UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, UNESCAPE, - UNESCAPE, UNESCAPE, ESCAPE, ESCAPE, ESCAPE, UNESCAPE, ESCAPE, - // ...all the high-bit characters are escaped - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, - ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE, ESCAPE}; - -enum DotDisposition { - // The given dot is just part of a filename and is not special. - NOT_A_DIRECTORY, - - // The given dot is the current directory. - DIRECTORY_CUR, - - // The given dot is the first of a double dot that should take us up one. - DIRECTORY_UP -}; - -// When the path resolver finds a dot, this function is called with the -// character following that dot to see what it is. The return value -// indicates what type this dot is (see above). This code handles the case -// where the dot is at the end of the input. -// -// |*consumed_len| will contain the number of characters in the input that -// express what we found. -// -// If the input is "../foo", |after_dot| = 1, |end| = 6, and -// at the end, |*consumed_len| = 2 for the "./" this function consumed. The -// original dot length should be handled by the caller. -template -DotDisposition ClassifyAfterDot(const CHAR* spec, int after_dot, int end, int* consumed_len) { - if (after_dot == end) { - // Single dot at the end. - *consumed_len = 0; - return DIRECTORY_CUR; - } - if (IsURLSlash(spec[after_dot])) { - // Single dot followed by a slash. - *consumed_len = 1; // Consume the slash - return DIRECTORY_CUR; - } - - int second_dot_len = IsDot(spec, after_dot, end); - if (second_dot_len) { - int after_second_dot = after_dot + second_dot_len; - if (after_second_dot == end) { - // Double dot at the end. - *consumed_len = second_dot_len; - return DIRECTORY_UP; - } - if (IsURLSlash(spec[after_second_dot])) { - // Double dot followed by a slash. - *consumed_len = second_dot_len + 1; - return DIRECTORY_UP; - } - } - - // The dots are followed by something else, not a directory. - *consumed_len = 0; - return NOT_A_DIRECTORY; -} - -// Rewinds the output to the previous slash. It is assumed that the output -// ends with a slash and this doesn't count (we call this when we are -// appending directory paths, so the previous path component has and ending -// slash). -// -// This will stop at the first slash (assumed to be at position -// |path_begin_in_output| and not go any higher than that. Some web pages -// do ".." too many times, so we need to handle that brokenness. -// -// It searches for a literal slash rather than including a backslash as well -// because it is run only on the canonical output. -// -// The output is guaranteed to end in a slash when this function completes. -void BackUpToPreviousSlash(int path_begin_in_output, CanonOutput* output) { - DCHECK(output->length() > 0); - - int i = output->length() - 1; - DCHECK(output->at(i) == '/'); - if (i == path_begin_in_output) - return; // We're at the first slash, nothing to do. - - // Now back up (skipping the trailing slash) until we find another slash. - i--; - while (output->at(i) != '/' && i > path_begin_in_output) - i--; - - // Now shrink the output to just include that last slash we found. - output->set_length(i + 1); -} - -// Looks for problematic nested escape sequences and escapes the output as -// needed to ensure they can't be misinterpreted. -// -// Our concern is that in input escape sequence that's invalid because it -// contains nested escape sequences might look valid once those are unescaped. -// For example, "%%300" is not a valid escape sequence, but after unescaping the -// inner "%30" this becomes "%00" which is valid. Leaving this in the output -// string can result in callers re-canonicalizing the string and unescaping this -// sequence, thus resulting in something fundamentally different than the -// original input here. This can cause a variety of problems. -// -// This function is called after we've just unescaped a sequence that's within -// two output characters of a previous '%' that we know didn't begin a valid -// escape sequence in the input string. We look for whether the output is going -// to turn into a valid escape sequence, and if so, convert the initial '%' into -// an escaped "%25" so the output can't be misinterpreted. -// -// |spec| is the input string we're canonicalizing. -// |next_input_index| is the index of the next unprocessed character in |spec|. -// |input_len| is the length of |spec|. -// |last_invalid_percent_index| is the index in |output| of a previously-seen -// '%' character. The caller knows this '%' character isn't followed by a valid -// escape sequence in the input string. -// |output| is the canonicalized output thus far. The caller guarantees this -// ends with a '%' followed by one or two characters, and the '%' is the one -// pointed to by |last_invalid_percent_index|. The last character in the string -// was just unescaped. -template -void CheckForNestedEscapes(const CHAR* spec, int next_input_index, int input_len, - int last_invalid_percent_index, CanonOutput* output) { - const int length = output->length(); - const char last_unescaped_char = output->at(length - 1); - - // If |output| currently looks like "%c", we need to try appending the next - // input character to see if this will result in a problematic escape - // sequence. Note that this won't trigger on the first nested escape of a - // two-escape sequence like "%%30%30" -- we'll allow the conversion to - // "%0%30" -- but the second nested escape will be caught by this function - // when it's called again in that case. - const bool append_next_char = last_invalid_percent_index == length - 2; - if (append_next_char) { - // If the input doesn't contain a 7-bit character next, this case won't be a - // problem. - if ((next_input_index == input_len) || (spec[next_input_index] >= 0x80)) - return; - output->push_back(static_cast(spec[next_input_index])); - } - - // Now output ends like "%cc". Try to unescape this. - int begin = last_invalid_percent_index; - unsigned char temp; - if (DecodeEscaped(output->data(), &begin, output->length(), &temp)) { - // New escape sequence found. Overwrite the characters following the '%' - // with "25", and push_back() the one or two characters that were following - // the '%' when we were called. - if (!append_next_char) - output->push_back(output->at(last_invalid_percent_index + 1)); - output->set(last_invalid_percent_index + 1, '2'); - output->set(last_invalid_percent_index + 2, '5'); - output->push_back(last_unescaped_char); - } else if (append_next_char) { - // Not a valid escape sequence, but we still need to undo appending the next - // source character so the caller can process it normally. - output->set_length(length); - } -} - -// Appends the given path to the output. It assumes that if the input path -// starts with a slash, it should be copied to the output. If no path has -// already been appended to the output (the case when not resolving -// relative URLs), the path should begin with a slash. -// -// If there are already path components (this mode is used when appending -// relative paths for resolving), it assumes that the output already has -// a trailing slash and that if the input begins with a slash, it should be -// copied to the output. -// -// We do not collapse multiple slashes in a row to a single slash. It seems -// no web browsers do this, and we don't want incompatibilities, even though -// it would be correct for most systems. -template -bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_output, - CanonOutput* output) { - int end = path.end(); - - // We use this variable to minimize the amount of work done when unescaping -- - // we'll only call CheckForNestedEscapes() when this points at one of the last - // couple of characters in |output|. - int last_invalid_percent_index = INT_MIN; - - bool success = true; - for (int i = path.begin; i < end; i++) { - UCHAR uch = static_cast(spec[i]); - // Chromium UTF8 logic is unneeded, as the missing templated result - // refers only to char const* (single-byte) characters at this time. - // This only trips up MSVC, since linux gcc seems to optimize it away. - // Indention is to avoid gratuitous diffs to origin source - { - unsigned char out_ch = static_cast(uch); - unsigned char flags = kPathCharLookup[out_ch]; - if (flags & SPECIAL) { - // Needs special handling of some sort. - int dotlen; - if ((dotlen = IsDot(spec, i, end)) > 0) { - // See if this dot was preceded by a slash in the output. We - // assume that when canonicalizing paths, they will always - // start with a slash and not a dot, so we don't have to - // bounds check the output. - // - // Note that we check this in the case of dots so we don't have to - // special case slashes. Since slashes are much more common than - // dots, this actually increases performance measurably (though - // slightly). - DCHECK(output->length() > path_begin_in_output); - if (output->length() > path_begin_in_output && output->at(output->length() - 1) == '/') { - // Slash followed by a dot, check to see if this is means relative - int consumed_len; - switch (ClassifyAfterDot(spec, i + dotlen, end, &consumed_len)) { - case NOT_A_DIRECTORY: - // Copy the dot to the output, it means nothing special. - output->push_back('.'); - i += dotlen - 1; - break; - case DIRECTORY_CUR: // Current directory, just skip the input. - i += dotlen + consumed_len - 1; - break; - case DIRECTORY_UP: - BackUpToPreviousSlash(path_begin_in_output, output); - i += dotlen + consumed_len - 1; - break; - } - } else { - // This dot is not preceded by a slash, it is just part of some - // file name. - output->push_back('.'); - i += dotlen - 1; - } - - } else if (out_ch == '\\') { - // Convert backslashes to forward slashes - output->push_back('/'); - - } else if (out_ch == '%') { - // Handle escape sequences. - unsigned char unescaped_value; - if (DecodeEscaped(spec, &i, end, &unescaped_value)) { - // Valid escape sequence, see if we keep, reject, or unescape it. - // Note that at this point DecodeEscape() will have advanced |i| to - // the last character of the escape sequence. - char unescaped_flags = kPathCharLookup[unescaped_value]; - - if (unescaped_flags & UNESCAPE) { - // This escaped value shouldn't be escaped. Try to copy it. - output->push_back(unescaped_value); - // If we just unescaped a value within 2 output characters of the - // '%' from a previously-detected invalid escape sequence, we - // might have an input string with problematic nested escape - // sequences; detect and fix them. - if (last_invalid_percent_index >= (output->length() - 3)) { - CheckForNestedEscapes(spec, i + 1, end, last_invalid_percent_index, output); - } - } else { - // Either this is an invalid escaped character, or it's a valid - // escaped character we should keep escaped. In the first case we - // should just copy it exactly and remember the error. In the - // second we also copy exactly in case the server is sensitive to - // changing the case of any hex letters. - output->push_back('%'); - output->push_back(static_cast(spec[i - 1])); - output->push_back(static_cast(spec[i])); - if (unescaped_flags & INVALID_BIT) - success = false; - } - } else { - // Invalid escape sequence. IE7+ rejects any URLs with such - // sequences, while other browsers pass them through unchanged. We - // use the permissive behavior. - // TODO(brettw): Consider testing IE's strict behavior, which would - // allow removing the code to handle nested escapes above. - last_invalid_percent_index = output->length(); - output->push_back('%'); - } - - } else if (flags & INVALID_BIT) { - // For NULLs, etc. fail. - AppendEscapedChar(out_ch, output); - success = false; - - } else if (flags & ESCAPE_BIT) { - // This character should be escaped. - AppendEscapedChar(out_ch, output); - } - } else { - // Nothing special about this character, just append it. - output->push_back(out_ch); - } - } - } - return success; -} - -template -bool DoPath(const CHAR* spec, const Component& path, CanonOutput* output, Component* out_path) { - bool success = true; - out_path->begin = output->length(); - if (path.len > 0) { - // Write out an initial slash if the input has none. If we just parse a URL - // and then canonicalize it, it will of course have a slash already. This - // check is for the replacement and relative URL resolving cases of file - // URLs. - if (!IsURLSlash(spec[path.begin])) - output->push_back('/'); - - success = DoPartialPath(spec, path, out_path->begin, output); - } else { - // No input, canonical path is a slash. - output->push_back('/'); - } - out_path->len = output->length() - out_path->begin; - return success; -} - -} // namespace - -bool CanonicalizePath(const char* spec, const Component& path, CanonOutput* output, - Component* out_path) { - return DoPath(spec, path, output, out_path); -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.cc b/source/common/chromium_url/url_canon_stdstring.cc deleted file mode 100644 index 0c61831e5f1a..000000000000 --- a/source/common/chromium_url/url_canon_stdstring.cc +++ /dev/null @@ -1,33 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "common/chromium_url/url_canon_stdstring.h" - -namespace chromium_url { - -StdStringCanonOutput::StdStringCanonOutput(std::string* str) : CanonOutput(), str_(str) { - cur_len_ = static_cast(str_->size()); // Append to existing data. - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = static_cast(str_->size()); -} - -StdStringCanonOutput::~StdStringCanonOutput() { - // Nothing to do, we don't own the string. -} - -void StdStringCanonOutput::Complete() { - str_->resize(cur_len_); - buffer_len_ = cur_len_; -} - -void StdStringCanonOutput::Resize(int sz) { - str_->resize(sz); - buffer_ = str_->empty() ? NULL : &(*str_)[0]; - buffer_len_ = sz; -} - -} // namespace chromium_url diff --git a/source/common/chromium_url/url_canon_stdstring.h b/source/common/chromium_url/url_canon_stdstring.h deleted file mode 100644 index e14d6c22e74e..000000000000 --- a/source/common/chromium_url/url_canon_stdstring.h +++ /dev/null @@ -1,58 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_CANON_STDSTRING_H_ -#define URL_URL_CANON_STDSTRING_H_ - -// This header file defines a canonicalizer output method class for STL -// strings. Because the canonicalizer tries not to be dependent on the STL, -// we have segregated it here. - -#include - -#include "common/chromium_url/envoy_shim.h" -#include "common/chromium_url/url_canon.h" - -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete - -namespace chromium_url { - -// Write into a std::string given in the constructor. This object does not own -// the string itself, and the user must ensure that the string stays alive -// throughout the lifetime of this object. -// -// The given string will be appended to; any existing data in the string will -// be preserved. -// -// Note that when canonicalization is complete, the string will likely have -// unused space at the end because we make the string very big to start out -// with (by |initial_size|). This ends up being important because resize -// operations are slow, and because the base class needs to write directly -// into the buffer. -// -// Therefore, the user should call Complete() before using the string that -// this class wrote into. -class COMPONENT_EXPORT(URL) StdStringCanonOutput : public CanonOutput { -public: - StdStringCanonOutput(std::string* str); - ~StdStringCanonOutput() override; - - // Must be called after writing has completed but before the string is used. - void Complete(); - - void Resize(int sz) override; - -protected: - std::string* str_; - DISALLOW_COPY_AND_ASSIGN(StdStringCanonOutput); -}; - -} // namespace chromium_url - -#endif // URL_URL_CANON_STDSTRING_H_ diff --git a/source/common/chromium_url/url_parse.h b/source/common/chromium_url/url_parse.h deleted file mode 100644 index b840af60438d..000000000000 --- a/source/common/chromium_url/url_parse.h +++ /dev/null @@ -1,49 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_PARSE_H_ -#define URL_PARSE_H_ - -namespace chromium_url { - -// Component ------------------------------------------------------------------ - -// Represents a substring for URL parsing. -struct Component { - Component() : begin(0), len(-1) {} - - // Normal constructor: takes an offset and a length. - Component(int b, int l) : begin(b), len(l) {} - - int end() const { return begin + len; } - - // Returns true if this component is valid, meaning the length is given. Even - // valid components may be empty to record the fact that they exist. - bool is_valid() const { return (len != -1); } - - // Returns true if the given component is specified on false, the component - // is either empty or invalid. - bool is_nonempty() const { return (len > 0); } - - void reset() { - begin = 0; - len = -1; - } - - bool operator==(const Component& other) const { return begin == other.begin && len == other.len; } - - int begin; // Byte offset in the string of this component. - int len; // Will be -1 if the component is unspecified. -}; - -// Helper that returns a component created with the given begin and ending -// points. The ending point is non-inclusive. -inline Component MakeRange(int begin, int end) { return Component(begin, end - begin); } - -} // namespace chromium_url - -#endif // URL_PARSE_H_ diff --git a/source/common/chromium_url/url_parse_internal.h b/source/common/chromium_url/url_parse_internal.h deleted file mode 100644 index 0ca47bc48846..000000000000 --- a/source/common/chromium_url/url_parse_internal.h +++ /dev/null @@ -1,18 +0,0 @@ -// Envoy snapshot of Chromium URL path normalization, see README.md. -// NOLINT(namespace-envoy) - -// Copyright 2013 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef URL_URL_PARSE_INTERNAL_H_ -#define URL_URL_PARSE_INTERNAL_H_ - -namespace chromium_url { - -// We treat slashes and backslashes the same for IE compatibility. -inline bool IsURLSlash(char ch) { return ch == '/' || ch == '\\'; } - -} // namespace chromium_url - -#endif // URL_URL_PARSE_INTERNAL_H_ diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 76c1f202e29e..54de0782f5f6 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -388,10 +388,12 @@ envoy_cc_library( name = "path_utility_lib", srcs = ["path_utility.cc"], hdrs = ["path_utility.h"], - external_deps = ["abseil_optional"], + external_deps = [ + "abseil_optional", + "googleurl", + ], deps = [ "//include/envoy/http:header_map_interface", - "//source/common/chromium_url", "//source/common/common:logger_lib", ], ) diff --git a/source/common/http/path_utility.cc b/source/common/http/path_utility.cc index f12790b41103..5194a395e79b 100644 --- a/source/common/http/path_utility.cc +++ b/source/common/http/path_utility.cc @@ -1,12 +1,12 @@ #include "common/http/path_utility.h" -#include "common/chromium_url/url_canon.h" -#include "common/chromium_url/url_canon_stdstring.h" #include "common/common/logger.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" +#include "url/url_canon.h" +#include "url/url_canon_stdstring.h" namespace Envoy { namespace Http { @@ -14,11 +14,10 @@ namespace Http { namespace { absl::optional canonicalizePath(absl::string_view original_path) { std::string canonical_path; - chromium_url::Component in_component(0, original_path.size()); - chromium_url::Component out_component; - chromium_url::StdStringCanonOutput output(&canonical_path); - if (!chromium_url::CanonicalizePath(original_path.data(), in_component, &output, - &out_component)) { + url::Component in_component(0, original_path.size()); + url::Component out_component; + url::StdStringCanonOutput output(&canonical_path); + if (!CanonicalizePath(original_path.data(), in_component, &output, &out_component)) { return absl::nullopt; } else { output.Complete(); From 07bf5b5c48da92c4474d1ffd73ecedbc1b4ed0c3 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 10 Jul 2020 11:23:36 -0400 Subject: [PATCH 587/909] nghttp2: don't log unless ENVOY_NGHTTP2_TRACE is set. (#12016) Avoid nghttp2 debug logs by default is good for two reasons: 1. In regular developer integration test debugging at trace level, it's a pain to filter the very spammy nghttp2 traces. 2. Coverage runs -l trace, it's hard to debug test output without generating O(100k) log lines, which results in massive CI log files. Risk level: Low Testing: No new tests, happy to add if it makes sense for this tweak. Signed-off-by: Harvey Tuch --- bazel/EXTERNAL_DEPS.md | 2 ++ bazel/README.md | 8 ++++++++ docs/root/version_history/current.rst | 2 ++ source/common/http/http2/nghttp2.cc | 18 +++++++++++------- 4 files changed, 23 insertions(+), 7 deletions(-) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 160f925fdbf3..4f66ef80eac8 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -110,6 +110,8 @@ dependencies: in the `event` target in `bazel/foreign_cc/BUILD` for verbose tracing of libevent processing. +* `nghttp2`: set `ENVOY_NGHTTP2_TRACE` in the environment and run at `-l trace`. + # Distdir - prefetching dependencies Usually Bazel downloads all dependencies during build time. But there is a diff --git a/bazel/README.md b/bazel/README.md index 8bd7cf3dceb2..b45d55bce471 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -523,6 +523,14 @@ bazel test -c dbg --config=macos-asan //test/... Log verbosity is controlled at runtime in all builds. +To obtain `nghttp2` traces, you can set `ENVOY_NGHTTP2_TRACE` in the environment for enhanced +logging at `-l trace`. For example, in tests: + +``` +bazel test //test/integration:protocol_integration_test --test_output=streamed \ + --test_arg="-l trace" --test_env="ENVOY_NGHTTP2_TRACE=" +``` + ## Disabling optional features The following optional features can be disabled on the Bazel build command-line: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0bfc5674f112..e03bc0eb9069 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -11,6 +11,8 @@ Minor Behavior Changes * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. +* logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set + in the environment. * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. Bug Fixes diff --git a/source/common/http/http2/nghttp2.cc b/source/common/http/http2/nghttp2.cc index 448ea9bb2404..68e9c620e9c8 100644 --- a/source/common/http/http2/nghttp2.cc +++ b/source/common/http/http2/nghttp2.cc @@ -14,15 +14,19 @@ namespace Http { namespace Http2 { void initializeNghttp2Logging() { + // Event when ENVOY_NGHTTP2_TRACE is not set, we install a debug logger, to prevent nghttp2 + // logging directly to stdout at -l trace. nghttp2_set_debug_vprintf_callback([](const char* format, va_list args) { - char buf[2048]; - const int n = ::vsnprintf(buf, sizeof(buf), format, args); - // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG - // below, so avoid double \n. - if (n >= 1 && static_cast(n) < sizeof(buf) && buf[n - 1] == '\n') { - buf[n - 1] = '\0'; + if (std::getenv("ENVOY_NGHTTP2_TRACE") != nullptr) { + char buf[2048]; + const int n = ::vsnprintf(buf, sizeof(buf), format, args); + // nghttp2 inserts new lines, but we also insert a new line in the ENVOY_LOG + // below, so avoid double \n. + if (n >= 1 && static_cast(n) < sizeof(buf) && buf[n - 1] == '\n') { + buf[n - 1] = '\0'; + } + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, "nghttp2: {}", buf); } - ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::http2), trace, "nghttp2: {}", buf); }); } From 1a97d72790ee6fd96b38317949d14444f561659e Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Fri, 10 Jul 2020 11:35:56 -0400 Subject: [PATCH 588/909] stats: Keep well known stat tag names in a StatNameSet held in ThreadLocalStore (#11794) Commit Message: Most stats are created with a well known set of tag names. This establishes a StatNameSet for those, reducing the amount of pressure on the symbol-table for re-symbolizing the same tag names as every stat is created. Making this change exposed a latent sensitivity in the stats memory tests, where we allocated slightly different amounts of memory for ipv6 vs ipv4. So this works around the issue by only doing exact memory tests for ipv6. Also changes StatNameSet::getBuiltin to a const method, since it modifies nothing. Additional Description: Risk Level: low Testing: //test/..., and stats_integration_test with "-c opt". Docs Changes: n/a Release Notes: n/a Signed-off-by: Joshua Marantz --- source/common/stats/symbol_table_impl.cc | 2 +- source/common/stats/symbol_table_impl.h | 2 +- source/common/stats/thread_local_store.cc | 14 +++++++-- source/common/stats/thread_local_store.h | 7 +++++ test/common/stats/thread_local_store_test.cc | 4 +-- test/integration/stats_integration_test.cc | 31 ++++++++++++++++---- 6 files changed, 49 insertions(+), 11 deletions(-) diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index bf36b088f44e..5a9a6df7461d 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -628,7 +628,7 @@ void StatNameSet::rememberBuiltin(absl::string_view str) { builtin_stat_names_[str] = stat_name; } -StatName StatNameSet::getBuiltin(absl::string_view token, StatName fallback) { +StatName StatNameSet::getBuiltin(absl::string_view token, StatName fallback) const { // If token was recorded as a built-in during initialization, we can // service this request lock-free. const auto iter = builtin_stat_names_.find(token); diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 7d89b7db8205..664c5b56dacb 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -815,7 +815,7 @@ class StatNameSet { * * @return the StatName or fallback. */ - StatName getBuiltin(absl::string_view token, StatName fallback); + StatName getBuiltin(absl::string_view token, StatName fallback) const; /** * Adds a StatName using the pool, but without remembering it in any maps. diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index e2eeae81f2f4..328163801cb9 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -28,7 +28,12 @@ ThreadLocalStoreImpl::ThreadLocalStoreImpl(Allocator& alloc) tag_producer_(std::make_unique()), stats_matcher_(std::make_unique()), heap_allocator_(alloc.symbolTable()), null_counter_(alloc.symbolTable()), null_gauge_(alloc.symbolTable()), - null_histogram_(alloc.symbolTable()), null_text_readout_(alloc.symbolTable()) {} + null_histogram_(alloc.symbolTable()), null_text_readout_(alloc.symbolTable()), + well_known_tags_(alloc.symbolTable().makeSet("well_known_tags")) { + for (const auto& desc : Config::TagNames::get().descriptorVec()) { + well_known_tags_->rememberBuiltin(desc.name_); + } +} ThreadLocalStoreImpl::~ThreadLocalStoreImpl() { ASSERT(shutting_down_ || !threading_ever_initialized_); @@ -295,8 +300,13 @@ class StatNameTagHelper { tls.symbolTable().callWithStringView(name, [&tags, &tls, this](absl::string_view name_str) { tag_extracted_name_ = pool_.add(tls.tagProducer().produceTags(name_str, tags)); }); + StatName empty; for (const auto& tag : tags) { - stat_name_tags_.emplace_back(pool_.add(tag.name_), pool_.add(tag.value_)); + StatName tag_name = tls.wellKnownTags().getBuiltin(tag.name_, empty); + if (tag_name.empty()) { + tag_name = pool_.add(tag.name_); + } + stat_name_tags_.emplace_back(tag_name, pool_.add(tag.value_)); } } else { tag_extracted_name_ = name; diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 83c7b51e2a70..3702a9bd77ee 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -263,6 +263,11 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo */ Thread::ThreadSynchronizer& sync() { return sync_; } + /** + * @return a set of well known tag names; used to reduce symbol table churn. + */ + const StatNameSet& wellKnownTags() const { return *well_known_tags_; } + private: template using StatRefMap = StatNameHashMap>; @@ -454,6 +459,8 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Thread::ThreadSynchronizer sync_; std::atomic next_scope_id_{}; + + StatNameSetPtr well_known_tags_; }; } // namespace Stats diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 8bb02d1004d0..3aa877ca4121 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -1113,7 +1113,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithoutTlsRealSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 689648); // Jan 23, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 688080); // July 2, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.75 * million_); } @@ -1123,7 +1123,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 829232); // Apr 08, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827664); // July 2, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_); } diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index f57afd00ed81..d1e19cb32ab1 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -201,9 +201,15 @@ class ClusterMemoryTestHelper : public BaseIntegrationTest { return memory_test.consumedBytes(); } }; + class ClusterMemoryTestRunner : public testing::TestWithParam { protected: + ClusterMemoryTestRunner() + : ip_version_(testing::TestWithParam::GetParam()) {} + Stats::TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; + + Network::Address::IpVersion ip_version_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ClusterMemoryTestRunner, @@ -269,11 +275,11 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/04/02 10624 43356 44000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 43349 44000 fix clang tidy on master // 2020/04/23 10531 44169 44600 http: max stream duration upstream support. + // 2020/04/23 10661 44425 46000 per-listener connection limits // 2020/05/05 10908 44233 44600 router: add InternalRedirectPolicy and predicate // 2020/05/13 10531 44425 44600 Refactor resource manager // 2020/05/20 11223 44491 44600 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 44491 44811 Make upstreams pluggable - // 2020/04/23 10661 44425 46000 per-listener connection limits // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle // in callback manager. @@ -289,7 +295,12 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 44715); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + EXPECT_MEMORY_EQ(m_per_cluster, 44715); + } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -335,11 +346,11 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/04/02 10624 35564 36000 Use 100 clusters rather than 1000 to avoid timeouts // 2020/04/07 10661 35557 36000 fix clang tidy on master // 2020/04/23 10531 36281 36800 http: max stream duration upstream support. + // 2020/04/23 10661 36537 37000 per-listener connection limits // 2020/05/05 10908 36345 36800 router: add InternalRedirectPolicy and predicate // 2020/05/13 10531 36537 36800 Refactor resource manager // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 36603 36923 Make upstreams pluggable - // 2020/04/23 10661 36537 37000 per-listener connection limits // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. // in callback manager. @@ -355,7 +366,12 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_cluster, 36827); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + EXPECT_MEMORY_EQ(m_per_cluster, 36827); + } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } @@ -397,7 +413,12 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // If you encounter a failure here, please see // https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#stats-memory-tests // for details on how to fix. - EXPECT_MEMORY_EQ(m_per_host, 1380); + // + // We only run the exact test for ipv6 because ipv4 in some cases may allocate a + // different number of bytes. We still run the approximate test. + if (ip_version_ != Network::Address::IpVersion::v6) { + EXPECT_MEMORY_EQ(m_per_host, 1380); + } EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations. } From 9e854a0f13c9bc9a052d8a2c4e4f825a4774b295 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 10 Jul 2020 12:35:59 -0400 Subject: [PATCH 589/909] test: adding hcm config coverage (#12010) Also updating numbers which were out of date. Signed-off-by: Alyssa Wilk --- .../network/http_connection_manager/config.cc | 17 +-- .../http_connection_manager/config_test.cc | 128 +++++++++--------- test/per_file_coverage.sh | 4 - 3 files changed, 72 insertions(+), 77 deletions(-) diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 1c0958ad6f1d..33047d76d643 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -476,19 +476,16 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: { - Http::Http1::CodecStats& stats = - Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()); + case CodecType::HTTP1: return std::make_unique( - connection, stats, callbacks, http1_settings_, maxRequestHeadersKb(), - maxRequestHeadersCount(), headersWithUnderscoresAction()); - } + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); case CodecType::HTTP2: { - Http::Http2::CodecStats& stats = - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()); return std::make_unique( - connection, callbacks, stats, http2_options_, maxRequestHeadersKb(), - maxRequestHeadersCount(), headersWithUnderscoresAction()); + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index b53042d80115..20731f6e51e5 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -40,7 +40,7 @@ namespace NetworkFilters { namespace HttpConnectionManager { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager -parseHttpConnectionManagerFromV2Yaml(const std::string& yaml, bool avoid_boosting = true) { +parseHttpConnectionManagerFromYaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager http_connection_manager; TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager, false, avoid_boosting); @@ -57,8 +57,8 @@ class HttpConnectionManagerConfigTest : public testing::Test { std::shared_ptr> http_tracer_{ std::make_shared>()}; void createHttpConnectionManagerConfig(const std::string& yaml) { - HttpConnectionManagerConfig(parseHttpConnectionManagerFromV2Yaml(yaml), context_, - date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig(parseHttpConnectionManagerFromYaml(yaml), context_, date_provider_, + route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); } }; @@ -150,7 +150,9 @@ stat_prefix: router "chain."); } -TEST_F(HttpConnectionManagerConfigTest, MiscConfig) { +// When deprecating v2, remove the old style "operation_name: egress" config +// but retain the rest of the test. +TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(MiscConfig)) { const std::string yaml_string = R"EOF( codec_type: http1 server_name: foo @@ -166,13 +168,13 @@ stat_prefix: router route: cluster: cluster tracing: - operation_name: ingress + operation_name: egress max_path_tag_length: 128 http_filters: - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -207,7 +209,7 @@ stat_prefix: router // there is no reason to obtain an actual HttpTracer. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -245,7 +247,7 @@ stat_prefix: router // there is no reason to obtain an actual HttpTracer. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(_)).Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -280,7 +282,7 @@ tracing: {} # notice that tracing is enabled // an actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(nullptr)).WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -320,7 +322,7 @@ tracing: {} # notice that tracing is enabled EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(tracing_config.http())))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -379,7 +381,7 @@ stat_prefix: router EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(inlined_tracing_config)))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -410,7 +412,7 @@ stat_prefix: router key: com.bar.foo path: [ { key: xx }, { key: yy } ] )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -430,7 +432,7 @@ stat_prefix: router request_headers_for_tags: - foo )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -462,7 +464,7 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_); @@ -488,7 +490,7 @@ stat_prefix: router )EOF"; ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_); @@ -507,7 +509,7 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -542,7 +544,7 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -576,7 +578,7 @@ TEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -602,7 +604,7 @@ TEST_F(HttpConnectionManagerConfigTest, UnixSocketInternalAddress) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); Network::Address::PipeInstance unixAddress{"/foo"}; @@ -622,7 +624,7 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(60, config.maxRequestHeadersKb()); @@ -638,7 +640,7 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(16, config.maxRequestHeadersKb()); @@ -654,7 +656,7 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbMaxConfigurable) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(96, config.maxRequestHeadersKb()); @@ -671,7 +673,7 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(0, config.streamIdleTimeout().count()); @@ -688,7 +690,7 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string, false), + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); @@ -706,7 +708,7 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); @@ -722,7 +724,7 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(std::chrono::hours(1), config.idleTimeout().value()); @@ -740,7 +742,7 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutOff) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.idleTimeout().has_value()); @@ -756,7 +758,7 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultMaxRequestHeaderCount) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(100, config.maxRequestHeadersCount()); @@ -774,7 +776,7 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeaderCountConfigurable) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(200, config.maxRequestHeadersCount()); @@ -793,7 +795,7 @@ TEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, @@ -813,7 +815,7 @@ TEST_F(HttpConnectionManagerConfigTest, ServerAppendIfAbsent) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT, @@ -833,7 +835,7 @@ TEST_F(HttpConnectionManagerConfigTest, ServerPassThrough) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::PASS_THROUGH, @@ -854,7 +856,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled(_, An())) .WillOnce(Invoke(&context_.runtime_loader_.snapshot_, &Runtime::MockSnapshot::featureEnabledDefault)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); #ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT @@ -877,7 +879,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .WillOnce(Return(true)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_TRUE(config.shouldNormalizePath()); @@ -897,7 +899,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_TRUE(config.shouldNormalizePath()); @@ -917,7 +919,7 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) { EXPECT_CALL(context_.runtime_loader_.snapshot_, featureEnabled("http_connection_manager.normalize_path", An())) .Times(0); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.shouldNormalizePath()); @@ -933,7 +935,7 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); @@ -950,7 +952,7 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesTrue) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_TRUE(config.shouldMergeSlashes()); @@ -967,7 +969,7 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); @@ -983,7 +985,7 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.shouldStripMatchingPort()); @@ -1000,7 +1002,7 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortTrue) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_TRUE(config.shouldStripMatchingPort()); @@ -1017,7 +1019,7 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortFalse) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.shouldStripMatchingPort()); @@ -1033,7 +1035,7 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::ALLOW, @@ -1052,7 +1054,7 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresDroppedByConfig) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER, @@ -1071,7 +1073,7 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresRequestRejectedByC - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST, @@ -1088,7 +1090,7 @@ TEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(53 * 1000, config.requestTimeout().count()); @@ -1104,7 +1106,7 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(0, config.requestTimeout().count()); @@ -1119,7 +1121,7 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_EQ(0, config.requestTimeout().count()); @@ -1146,7 +1148,7 @@ stat_prefix: router - name: envoy.filters.http.router )EOF"; - auto proto_config = parseHttpConnectionManagerFromV2Yaml(yaml_string); + auto proto_config = parseHttpConnectionManagerFromYaml(yaml_string); HttpConnectionManagerFilterConfigFactory factory; // We expect a single slot allocation vs. multiple. EXPECT_CALL(context_.thread_local_, allocateSlot()); @@ -1173,7 +1175,7 @@ stat_prefix: my_stat_prefix - {} )EOF"; - EXPECT_THROW(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException); + EXPECT_THROW(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException); } TEST_F(HttpConnectionManagerConfigTest, BadAccessLogConfig) { @@ -1201,7 +1203,7 @@ stat_prefix: my_stat_prefix filter: [] )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "filter: Proto field is not repeating, cannot start list."); } @@ -1231,7 +1233,7 @@ stat_prefix: my_stat_prefix bad_type: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "bad_type: Cannot find field"); } @@ -1269,7 +1271,7 @@ stat_prefix: my_stat_prefix - not_health_check_filter: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromV2Yaml(yaml_string), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseHttpConnectionManagerFromYaml(yaml_string), EnvoyException, "bad_type: Cannot find field"); } @@ -1493,7 +1495,7 @@ TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_FALSE(config.alwaysSetRequestIdInResponse()); @@ -1509,7 +1511,7 @@ TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); EXPECT_TRUE(config.alwaysSetRequestIdInResponse()); @@ -1574,7 +1576,7 @@ TEST_F(HttpConnectionManagerConfigTest, CustomRequestIDExtension) { TestRequestIDExtensionFactory factory; Registry::InjectFactory registration(factory); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); auto request_id_extension = @@ -1609,7 +1611,7 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); auto request_id_extension = @@ -1641,7 +1643,7 @@ stat_prefix: router }; TEST_F(FilterChainTest, CreateFilterChain) { - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(basic_config_), context_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(basic_config_), context_, date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_); @@ -1653,7 +1655,7 @@ TEST_F(FilterChainTest, CreateFilterChain) { // Tests where upgrades are configured on via the HCM. TEST_F(FilterChainTest, CreateUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, @@ -1699,7 +1701,7 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChain) { // Tests where upgrades are configured off via the HCM. TEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); hcm_config.mutable_upgrade_configs(0)->mutable_enabled()->set_value(false); @@ -1737,7 +1739,7 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) { } TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); auto websocket_config = hcm_config.add_upgrade_configs(); websocket_config->set_upgrade_type("websocket"); @@ -1783,7 +1785,7 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { } TEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); auto websocket_config = hcm_config.add_upgrade_configs(); websocket_config->set_upgrade_type("websocket"); @@ -1810,7 +1812,7 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) { } TEST_F(FilterChainTest, InvalidConfig) { - auto hcm_config = parseHttpConnectionManagerFromV2Yaml(basic_config_); + auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); hcm_config.add_upgrade_configs()->set_upgrade_type("WEBSOCKET"); hcm_config.add_upgrade_configs()->set_upgrade_type("websocket"); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 9afed0555fdd..b1d3636b666a 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -5,7 +5,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/common:94.0" "source/extensions/common/crypto:91.5" -"source/extensions/common/tap:95.9" "source/extensions/common/wasm:85.4" "source/extensions/common/wasm/null:77.8" "source/extensions/common/wasm/v8:85.4" @@ -19,9 +18,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/cache:80.7" "source/extensions/filters/http/cache/simple_http_cache:84.5" "source/extensions/filters/http/dynamic_forward_proxy:91.5" -"source/extensions/filters/http/ext_authz:96.5" "source/extensions/filters/http/grpc_json_transcoder:93.3" -"source/extensions/filters/http/header_to_metadata:95.0" "source/extensions/filters/http/ip_tagging:91.2" "source/extensions/filters/listener:95.6" "source/extensions/filters/listener/http_inspector:93.3" @@ -31,7 +28,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/network/direct_response:89.3" "source/extensions/filters/network/dubbo_proxy:96.1" "source/extensions/filters/network/dubbo_proxy/router:95.1" -"source/extensions/filters/network/http_connection_manager:95.9" "source/extensions/filters/network/mongo_proxy:94.0" "source/extensions/filters/network/sni_cluster:90.3" "source/extensions/filters/network/sni_dynamic_forward_proxy:89.4" From 7b2698b23de27cef3f8bf6808a025aafc9cc0c84 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 10 Jul 2020 23:50:31 +0700 Subject: [PATCH 590/909] examples, ext_authz: Add opa-service as authorization server (#11869) This shows a simple example of using the Open Policy Agent server as the authorization server. Signed-off-by: Dhi Aurrahman --- docs/root/start/sandboxes/ext_authz.rst | 84 +++++++++++++++++++ .../ext_authz/config/opa-service/policy.rego | 11 +++ examples/ext_authz/config/opa-service/v2.yaml | 72 ++++++++++++++++ examples/ext_authz/docker-compose.yaml | 17 ++++ 4 files changed, 184 insertions(+) create mode 100644 examples/ext_authz/config/opa-service/policy.rego create mode 100644 examples/ext_authz/config/opa-service/v2.yaml diff --git a/docs/root/start/sandboxes/ext_authz.rst b/docs/root/start/sandboxes/ext_authz.rst index 9ff39229dad2..fd890c556299 100644 --- a/docs/root/start/sandboxes/ext_authz.rst +++ b/docs/root/start/sandboxes/ext_authz.rst @@ -111,3 +111,87 @@ An example of successful requests can be observed as follows:: < * Connection #0 to host localhost left intact Hello user1 from behind Envoy! + +We can also employ `Open Policy Agent `_ server +(with `envoy_ext_authz_grpc `_ plugin enabled) +as the authorization server. To run this example:: + + $ pwd + envoy/examples/ext_authz + $ docker-compose pull + $ # Tearing down the currently running setup + $ docker-compose down + $ FRONT_ENVOY_YAML=config/opa-service/v2.yaml docker-compose up --build -d + +And sending a request to the upstream service (via the Front Envoy) gives:: + + $ curl localhost:8000/service --verbose + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8000 (#0) + > GET /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 28 + < server: envoy + < date: Thu, 02 Jul 2020 06:29:58 GMT + < x-envoy-upstream-service-time: 2 + < + * Connection #0 to host localhost left intact + Hello OPA from behind Envoy! + +From the logs, we can observe the policy decision message from the Open Policy Agent server (for +the above request against the defined policy in ``config/opa-service/policy.rego``):: + + $ docker-compose logs ext_authz-opa-service | grep decision_id -A 30 + ext_authz-opa-service_1 | "decision_id": "8143ca68-42d8-43e6-ade6-d1169bf69110", + ext_authz-opa-service_1 | "input": { + ext_authz-opa-service_1 | "attributes": { + ext_authz-opa-service_1 | "destination": { + ext_authz-opa-service_1 | "address": { + ext_authz-opa-service_1 | "Address": { + ext_authz-opa-service_1 | "SocketAddress": { + ext_authz-opa-service_1 | "PortSpecifier": { + ext_authz-opa-service_1 | "PortValue": 8000 + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "address": "172.28.0.6" + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | } + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "metadata_context": {}, + ext_authz-opa-service_1 | "request": { + ext_authz-opa-service_1 | "http": { + ext_authz-opa-service_1 | "headers": { + ext_authz-opa-service_1 | ":authority": "localhost:8000", + ext_authz-opa-service_1 | ":method": "GET", + ext_authz-opa-service_1 | ":path": "/service", + ext_authz-opa-service_1 | "accept": "*/*", + ext_authz-opa-service_1 | "user-agent": "curl/7.64.1", + ext_authz-opa-service_1 | "x-forwarded-proto": "http", + ext_authz-opa-service_1 | "x-request-id": "b77919c0-f1d4-4b06-b444-5a8b32d5daf4" + ext_authz-opa-service_1 | }, + ext_authz-opa-service_1 | "host": "localhost:8000", + ext_authz-opa-service_1 | "id": "16617514055874272263", + ext_authz-opa-service_1 | "method": "GET", + ext_authz-opa-service_1 | "path": "/service", + +Trying to send a request with method other than ``GET`` gives a rejection:: + + $ curl -X POST localhost:8000/service --verbose + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8000 (#0) + > PUT /service HTTP/1.1 + > Host: localhost:8000 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 403 Forbidden + < date: Thu, 02 Jul 2020 06:46:13 GMT + < server: envoy + < content-length: 0 diff --git a/examples/ext_authz/config/opa-service/policy.rego b/examples/ext_authz/config/opa-service/policy.rego new file mode 100644 index 000000000000..2f9bdf5d2db9 --- /dev/null +++ b/examples/ext_authz/config/opa-service/policy.rego @@ -0,0 +1,11 @@ +package istio.authz + +default allow = false + +allow = response { + input.attributes.request.http.method == "GET" + response := { + "allowed": true, + "headers": {"x-current-user": "OPA"} + } +} diff --git a/examples/ext_authz/config/opa-service/v2.yaml b/examples/ext_authz/config/opa-service/v2.yaml new file mode 100644 index 000000000000..401c6df46832 --- /dev/null +++ b/examples/ext_authz/config/opa-service/v2.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: upstream + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: upstream-service + http_filters: + - name: envoy.filters.http.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + grpc_service: + envoy_grpc: + cluster_name: ext_authz-opa-service + timeout: 0.250s + transport_api_version: V2 + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: upstream-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: upstream-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: upstream-service + port_value: 8080 + + - name: ext_authz-opa-service + connect_timeout: 0.250s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-opa-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ext_authz-opa-service + port_value: 9002 + +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/ext_authz/docker-compose.yaml b/examples/ext_authz/docker-compose.yaml index 66714d1a7dc1..e7fb59a7f2c3 100644 --- a/examples/ext_authz/docker-compose.yaml +++ b/examples/ext_authz/docker-compose.yaml @@ -40,6 +40,23 @@ services: aliases: - ext_authz-grpc-service + ext_authz-opa-service: + image: openpolicyagent/opa:0.21.0-istio + volumes: + - ./config/opa-service/policy.rego:/etc/policy.rego + command: + - run + - --log-level=debug + - --server + - --log-format=json-pretty + - --set=plugins.envoy_ext_authz_grpc.addr=:9002 + - --set=decision_logs.console=true + - /etc/policy.rego + networks: + envoymesh: + aliases: + - ext_authz-opa-service + upstream-service: build: context: ./upstream From b0d0bb87f71d46831d7214d5c4bab6feb545eece Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 10 Jul 2020 13:01:19 -0400 Subject: [PATCH 591/909] code: moving random out of runtime (#11960) Signed-off-by: Alyssa Wilk --- include/envoy/common/BUILD | 5 + include/envoy/common/random_generator.h | 54 +++++++ include/envoy/runtime/runtime.h | 41 ------ include/envoy/server/BUILD | 2 + include/envoy/server/factory_context.h | 3 +- include/envoy/server/health_checker_config.h | 3 +- include/envoy/server/instance.h | 3 +- .../envoy/server/transport_socket_config.h | 4 +- include/envoy/upstream/BUILD | 1 + include/envoy/upstream/cluster_factory.h | 3 +- include/envoy/upstream/cluster_manager.h | 3 +- source/common/access_log/access_log_impl.cc | 10 +- source/common/access_log/access_log_impl.h | 16 +-- source/common/common/BUILD | 17 ++- source/common/common/backoff_strategy.cc | 2 +- source/common/common/backoff_strategy.h | 6 +- source/common/common/random_generator.cc | 135 ++++++++++++++++++ source/common/common/random_generator.h | 19 +++ source/common/config/datasource.cc | 2 +- source/common/config/datasource.h | 3 +- source/common/config/grpc_mux_impl.cc | 2 +- source/common/config/grpc_mux_impl.h | 3 +- source/common/config/grpc_stream.h | 5 +- .../common/config/http_subscription_impl.cc | 2 +- source/common/config/http_subscription_impl.h | 3 +- source/common/config/new_grpc_mux_impl.cc | 2 +- source/common/config/new_grpc_mux_impl.h | 3 +- .../config/subscription_factory_impl.cc | 2 +- .../common/config/subscription_factory_impl.h | 5 +- source/common/config/utility.h | 3 +- source/common/http/async_client_impl.cc | 2 +- source/common/http/async_client_impl.h | 3 +- source/common/http/conn_manager_impl.cc | 2 +- source/common/http/conn_manager_impl.h | 5 +- .../common/http/request_id_extension_impl.cc | 2 +- .../common/http/request_id_extension_impl.h | 3 +- .../http/request_id_extension_uuid_impl.cc | 6 +- .../http/request_id_extension_uuid_impl.h | 5 +- source/common/http/rest_api_fetcher.cc | 2 +- source/common/http/rest_api_fetcher.h | 5 +- source/common/router/retry_state_impl.cc | 13 +- source/common/router/retry_state_impl.h | 7 +- source/common/router/router.cc | 2 +- source/common/router/router.h | 9 +- source/common/runtime/BUILD | 2 +- source/common/runtime/runtime_impl.cc | 130 +---------------- source/common/runtime/runtime_impl.h | 24 +--- source/common/tcp_proxy/tcp_proxy.h | 3 +- source/common/upstream/BUILD | 1 + .../common/upstream/cluster_factory_impl.cc | 2 +- source/common/upstream/cluster_factory_impl.h | 9 +- .../common/upstream/cluster_manager_impl.cc | 2 +- source/common/upstream/cluster_manager_impl.h | 9 +- .../upstream/health_checker_base_impl.cc | 2 +- .../upstream/health_checker_base_impl.h | 5 +- source/common/upstream/health_checker_impl.cc | 14 +- source/common/upstream/health_checker_impl.h | 9 +- .../upstream/health_discovery_service.cc | 6 +- .../upstream/health_discovery_service.h | 9 +- source/common/upstream/load_balancer_impl.cc | 6 +- source/common/upstream/load_balancer_impl.h | 16 +-- source/common/upstream/maglev_lb.cc | 2 +- source/common/upstream/maglev_lb.h | 3 +- source/common/upstream/ring_hash_lb.cc | 2 +- source/common/upstream/ring_hash_lb.h | 2 +- source/common/upstream/subset_lb.cc | 2 +- source/common/upstream/subset_lb.h | 4 +- source/common/upstream/thread_aware_lb_impl.h | 10 +- source/common/upstream/upstream_impl.cc | 4 +- source/exe/main_common.cc | 6 +- source/exe/main_common.h | 4 +- .../extensions/clusters/aggregate/cluster.cc | 2 +- .../extensions/clusters/aggregate/cluster.h | 10 +- .../extensions/clusters/redis/redis_cluster.h | 3 +- .../clusters/redis/redis_cluster_lb.cc | 2 +- .../clusters/redis/redis_cluster_lb.h | 8 +- .../common/dynamic_forward_proxy/BUILD | 1 + .../common/dynamic_forward_proxy/dns_cache.h | 3 +- .../dynamic_forward_proxy/dns_cache_impl.cc | 2 +- .../dynamic_forward_proxy/dns_cache_impl.h | 2 +- .../dns_cache_manager_impl.cc | 2 +- .../dns_cache_manager_impl.h | 8 +- .../controller/gradient_controller.cc | 3 +- .../controller/gradient_controller.h | 5 +- .../admission_control/admission_control.cc | 3 +- .../admission_control/admission_control.h | 7 +- .../client_ssl_auth/client_ssl_auth.cc | 4 +- .../network/client_ssl_auth/client_ssl_auth.h | 6 +- .../network/dubbo_proxy/active_message.cc | 6 +- .../network/dubbo_proxy/conn_manager.cc | 2 +- .../network/dubbo_proxy/conn_manager.h | 8 +- .../network/thrift_proxy/conn_manager.cc | 2 +- .../network/thrift_proxy/conn_manager.h | 6 +- .../filters/udp/dns_filter/dns_filter.h | 4 +- .../filters/udp/dns_filter/dns_parser.h | 6 +- .../extensions/health_checkers/redis/redis.cc | 2 +- .../extensions/health_checkers/redis/redis.h | 2 +- .../tracers/xray/localized_sampling.h | 4 +- .../tracers/xray/sampling_strategy.h | 6 +- source/extensions/tracers/xray/tracer.cc | 4 +- source/extensions/tracers/zipkin/tracer.h | 6 +- .../tracers/zipkin/zipkin_tracer_impl.cc | 2 +- .../tracers/zipkin/zipkin_tracer_impl.h | 3 +- .../config_validation/cluster_manager.cc | 2 +- .../config_validation/cluster_manager.h | 4 +- source/server/config_validation/server.h | 5 +- source/server/filter_chain_manager_impl.cc | 4 +- source/server/filter_chain_manager_impl.h | 4 +- source/server/listener_impl.cc | 4 +- source/server/listener_impl.h | 4 +- source/server/server.cc | 2 +- source/server/server.h | 8 +- source/server/transport_socket_config_impl.h | 6 +- .../common/access_log/access_log_impl_test.cc | 10 +- test/common/common/BUILD | 12 ++ test/common/common/backoff_strategy_test.cc | 12 +- test/common/common/random_generator_test.cc | 72 ++++++++++ test/common/config/datasource_test.cc | 2 +- .../config/delta_subscription_impl_test.cc | 2 +- .../config/delta_subscription_test_harness.h | 4 +- test/common/config/grpc_mux_impl_test.cc | 2 +- test/common/config/grpc_stream_test.cc | 2 +- .../config/grpc_subscription_test_harness.h | 2 +- .../config/http_subscription_test_harness.h | 2 +- test/common/config/new_grpc_mux_impl_test.cc | 2 +- .../config/subscription_factory_impl_test.cc | 2 +- test/common/config/utility_test.cc | 2 +- .../grpc_client_integration_test_harness.h | 2 +- test/common/http/async_client_impl_test.cc | 2 +- .../http/conn_manager_impl_fuzz_test.cc | 4 +- test/common/http/conn_manager_impl_test.cc | 2 +- test/common/http/conn_manager_utility_test.cc | 7 +- .../http2/metadata_encoder_decoder_test.cc | 6 +- .../request_id_extension_uuid_impl_test.cc | 18 +-- test/common/protobuf/utility_test.cc | 4 +- test/common/router/retry_state_impl_test.cc | 2 +- test/common/router/router_test.cc | 4 +- .../common/router/router_upstream_log_test.cc | 2 +- test/common/runtime/runtime_impl_test.cc | 61 +------- .../common/secret/secret_manager_impl_test.cc | 14 +- .../common/stats/recent_lookups_speed_test.cc | 5 +- test/common/stream_info/test_util.h | 4 +- test/common/tracing/http_tracer_impl_test.cc | 4 +- .../upstream/cluster_factory_impl_test.cc | 2 +- test/common/upstream/eds_speed_test.cc | 2 +- test/common/upstream/eds_test.cc | 2 +- test/common/upstream/hds_test.cc | 2 +- .../upstream/health_checker_impl_test.cc | 6 +- .../upstream/load_balancer_benchmark.cc | 4 +- .../upstream/load_balancer_impl_test.cc | 4 +- .../upstream/load_balancer_simulation_test.cc | 6 +- .../upstream/logical_dns_cluster_test.cc | 2 +- test/common/upstream/maglev_lb_test.cc | 2 +- .../upstream/original_dst_cluster_test.cc | 2 +- test/common/upstream/ring_hash_lb_test.cc | 2 +- test/common/upstream/subset_lb_test.cc | 2 +- test/common/upstream/test_cluster_manager.h | 6 +- test/common/upstream/upstream_impl_test.cc | 4 +- test/config_test/config_test.cc | 2 +- test/exe/main_common_test.cc | 9 +- .../clusters/aggregate/cluster_test.cc | 2 +- .../dynamic_forward_proxy/cluster_test.cc | 4 +- .../redis/redis_cluster_integration_test.cc | 4 +- .../clusters/redis/redis_cluster_lb_test.cc | 2 +- .../clusters/redis/redis_cluster_test.cc | 2 +- .../dns_cache_impl_test.cc | 6 +- .../controller/gradient_controller_test.cc | 3 +- .../admission_control_filter_test.cc | 4 +- .../http/admission_control/config_test.cc | 2 +- .../client_ssl_auth/client_ssl_auth_test.cc | 2 +- .../network/dubbo_proxy/conn_manager_test.cc | 2 +- .../network/thrift_proxy/conn_manager_test.cc | 2 +- .../udp/dns_filter/dns_filter_fuzz_test.cc | 3 +- .../dns_filter/dns_filter_integration_test.cc | 2 +- .../filters/udp/dns_filter/dns_filter_test.cc | 2 +- .../udp/dns_filter/dns_filter_test_utils.cc | 4 +- .../health_checkers/redis/config_test.cc | 2 +- .../health_checkers/redis/redis_test.cc | 2 +- .../quiche/active_quic_listener_test.cc | 4 +- .../datadog/datadog_tracer_impl_test.cc | 2 +- .../lightstep/lightstep_tracer_impl_test.cc | 2 +- .../tracers/xray/localized_sampling_test.cc | 48 +++---- test/extensions/tracers/zipkin/tracer_test.cc | 14 +- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 2 +- .../tls/context_impl_test.cc | 2 +- test/integration/http2_integration_test.cc | 3 +- test/integration/server.cc | 9 +- test/integration/server.h | 4 +- test/mocks/BUILD | 1 + test/mocks/common.cc | 10 ++ test/mocks/common.h | 14 ++ test/mocks/runtime/mocks.cc | 4 - test/mocks/runtime/mocks.h | 11 -- test/mocks/server/factory_context.h | 4 +- .../server/health_checker_factory_context.h | 4 +- test/mocks/server/instance.h | 8 +- .../server/transport_socket_factory_context.h | 2 +- .../config_validation/cluster_manager_test.cc | 2 +- test/server/server_fuzz_test.cc | 3 +- test/server/server_test.cc | 6 +- test/test_common/test_runtime.h | 3 +- test/tools/router_check/router.cc | 4 +- 202 files changed, 786 insertions(+), 651 deletions(-) create mode 100644 include/envoy/common/random_generator.h create mode 100644 source/common/common/random_generator.cc create mode 100644 source/common/common/random_generator.h create mode 100644 test/common/common/random_generator_test.cc diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD index 30a931976d55..13bce53792c1 100644 --- a/include/envoy/common/BUILD +++ b/include/envoy/common/BUILD @@ -33,6 +33,11 @@ envoy_cc_library( hdrs = ["mutex_tracer.h"], ) +envoy_cc_library( + name = "random_generator_interface", + hdrs = ["random_generator.h"], +) + envoy_cc_library( name = "resource_interface", hdrs = ["resource.h"], diff --git a/include/envoy/common/random_generator.h b/include/envoy/common/random_generator.h new file mode 100644 index 000000000000..90fb1b7c1543 --- /dev/null +++ b/include/envoy/common/random_generator.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Random { + +/** + * Random number generator. Implementations should be thread safe. + */ +class RandomGenerator { +public: + virtual ~RandomGenerator() = default; + + using result_type = uint64_t; // NOLINT(readability-identifier-naming) + + /** + * @return uint64_t a new random number. + */ + virtual result_type random() PURE; + + /* + * @return the smallest value that `operator()` may return. The value is + * strictly less than `max()`. + */ + constexpr static result_type min() noexcept { return std::numeric_limits::min(); }; + + /* + * @return the largest value that `operator()` may return. The value is + * strictly greater than `min()`. + */ + constexpr static result_type max() noexcept { return std::numeric_limits::max(); }; + + /* + * @return a value in the closed interval `[min(), max()]`. Has amortized + * constant complexity. + */ + result_type operator()() { return result_type(random()); }; + + /** + * @return std::string containing uuid4 of 36 char length. + * for example, 7c25513b-0466-4558-a64c-12c6704f37ed + */ + virtual std::string uuid() PURE; +}; + +using RandomGeneratorPtr = std::unique_ptr; + +} // namespace Random +} // namespace Envoy diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index 6b7f17191e75..bdd2f67114fd 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -27,47 +27,6 @@ class ClusterManager; namespace Runtime { -/** - * Random number generator. Implementations should be thread safe. - */ -class RandomGenerator { -public: - virtual ~RandomGenerator() = default; - - using result_type = uint64_t; // NOLINT(readability-identifier-naming) - - /** - * @return uint64_t a new random number. - */ - virtual result_type random() PURE; - - /* - * @return the smallest value that `operator()` may return. The value is - * strictly less than `max()`. - */ - constexpr static result_type min() noexcept { return std::numeric_limits::min(); }; - - /* - * @return the largest value that `operator()` may return. The value is - * strictly greater than `min()`. - */ - constexpr static result_type max() noexcept { return std::numeric_limits::max(); }; - - /* - * @return a value in the closed interval `[min(), max()]`. Has amortized - * constant complexity. - */ - result_type operator()() { return result_type(random()); }; - - /** - * @return std::string containing uuid4 of 36 char length. - * for example, 7c25513b-0466-4558-a64c-12c6704f37ed - */ - virtual std::string uuid() PURE; -}; - -using RandomGeneratorPtr = std::unique_ptr; - /** * A snapshot of runtime data. */ diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD index dd72215ae18e..534270f24e74 100644 --- a/include/envoy/server/BUILD +++ b/include/envoy/server/BUILD @@ -78,6 +78,7 @@ envoy_cc_library( name = "health_checker_config_interface", hdrs = ["health_checker_config.h"], deps = [ + "//include/envoy/common:random_generator_interface", "//include/envoy/config:typed_config_interface", "//include/envoy/upstream:health_checker_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -257,6 +258,7 @@ envoy_cc_library( "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/protobuf", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h index 56dac952be3e..71248cfd37ca 100644 --- a/include/envoy/server/factory_context.h +++ b/include/envoy/server/factory_context.h @@ -4,6 +4,7 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_config.h" #include "envoy/grpc/context.h" @@ -65,7 +66,7 @@ class CommonFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; + virtual Envoy::Random::RandomGenerator& random() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. diff --git a/include/envoy/server/health_checker_config.h b/include/envoy/server/health_checker_config.h index 5994e37f231b..5bb6344b8907 100644 --- a/include/envoy/server/health_checker_config.h +++ b/include/envoy/server/health_checker_config.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/typed_config.h" #include "envoy/runtime/runtime.h" @@ -26,7 +27,7 @@ class HealthCheckerFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; + virtual Envoy::Random::RandomGenerator& random() PURE; /** * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used diff --git a/include/envoy/server/instance.h b/include/envoy/server/instance.h index 1e6335962e71..c2d294ac3cb8 100644 --- a/include/envoy/server/instance.h +++ b/include/envoy/server/instance.h @@ -7,6 +7,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" #include "envoy/common/mutex_tracer.h" +#include "envoy/common/random_generator.h" #include "envoy/config/trace/v3/http_tracer.pb.h" #include "envoy/event/timer.h" #include "envoy/grpc/context.h" @@ -137,7 +138,7 @@ class Instance { /** * @return RandomGenerator& the random generator for the server. */ - virtual Runtime::RandomGenerator& random() PURE; + virtual Random::RandomGenerator& random() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. diff --git a/include/envoy/server/transport_socket_config.h b/include/envoy/server/transport_socket_config.h index ac3337738017..e08405f9b4ab 100644 --- a/include/envoy/server/transport_socket_config.h +++ b/include/envoy/server/transport_socket_config.h @@ -2,12 +2,12 @@ #include +#include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/typed_config.h" #include "envoy/event/dispatcher.h" #include "envoy/init/manager.h" #include "envoy/local_info/local_info.h" #include "envoy/network/transport_socket.h" -#include "envoy/runtime/runtime.h" #include "envoy/secret/secret_manager.h" #include "envoy/server/factory_context.h" #include "envoy/singleton/manager.h" @@ -67,7 +67,7 @@ class TransportSocketFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Envoy::Runtime::RandomGenerator& random() PURE; + virtual Envoy::Random::RandomGenerator& random() PURE; /** * @return the server-wide stats store. diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index d31ba0fa6d97..2755da336b56 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( ":thread_local_cluster_interface", ":upstream_interface", "//include/envoy/access_log:access_log_interface", + "//include/envoy/common:random_generator_interface", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_factory_interface", "//include/envoy/grpc:async_client_manager_interface", diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h index 389a804ba044..68bbff008baf 100644 --- a/include/envoy/upstream/cluster_factory.h +++ b/include/envoy/upstream/cluster_factory.h @@ -10,6 +10,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/typed_config.h" #include "envoy/event/dispatcher.h" @@ -81,7 +82,7 @@ class ClusterFactoryContext { /** * @return RandomGenerator& the random generator for the server. */ - virtual Runtime::RandomGenerator& random() PURE; + virtual Random::RandomGenerator& random() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index de2fa32b75f1..ed0c3935bb08 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -8,6 +8,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -367,7 +368,7 @@ class ClusterInfoFactory { ClusterManager& cm_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; ProtobufMessage::ValidationVisitor& validation_visitor_; diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index 9aec83a15fc5..ea3e9a1330c5 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -51,7 +51,7 @@ bool ComparisonFilter::compareAgainstValue(uint64_t lhs) const { } FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) { switch (config.filter_specifier_case()) { case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kStatusCodeFilter: @@ -118,7 +118,7 @@ bool DurationFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::Re } RuntimeFilter::RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random) + Runtime::Loader& runtime, Random::RandomGenerator& random) : runtime_(runtime), random_(random), runtime_key_(config.runtime_key()), percent_(config.percent_sampled()), use_independent_randomness_(config.use_independent_randomness()) {} @@ -143,7 +143,7 @@ bool RuntimeFilter::evaluate(const StreamInfo::StreamInfo& stream_info, OperatorFilter::OperatorFilter( const Protobuf::RepeatedPtrField& configs, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) { for (const auto& config : configs) { filters_.emplace_back(FilterFactory::fromProto(config, runtime, random, validation_visitor)); @@ -151,12 +151,12 @@ OperatorFilter::OperatorFilter( } OrFilter::OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) : OperatorFilter(config.filters(), runtime, random, validation_visitor) {} AndFilter::AndFilter(const envoy::config::accesslog::v3::AndFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor) : OperatorFilter(config.filters(), runtime, random, validation_visitor) {} diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 518687e77967..19264b2238d6 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -6,6 +6,7 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/config/accesslog/v3/accesslog.pb.h" #include "envoy/config/typed_config.h" #include "envoy/runtime/runtime.h" @@ -30,7 +31,7 @@ class FilterFactory { * Read a filter definition from proto and instantiate a concrete filter class. */ static FilterPtr fromProto(const envoy::config::accesslog::v3::AccessLogFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); }; @@ -85,7 +86,7 @@ class OperatorFilter : public Filter { public: OperatorFilter( const Protobuf::RepeatedPtrField& configs, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); protected: @@ -98,7 +99,7 @@ class OperatorFilter : public Filter { class AndFilter : public OperatorFilter { public: AndFilter(const envoy::config::accesslog::v3::AndFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); // AccessLog::Filter @@ -113,8 +114,7 @@ class AndFilter : public OperatorFilter { class OrFilter : public OperatorFilter { public: OrFilter(const envoy::config::accesslog::v3::OrFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, - ProtobufMessage::ValidationVisitor& validation_visitor); + Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, @@ -152,7 +152,7 @@ class TraceableRequestFilter : public Filter { class RuntimeFilter : public Filter { public: RuntimeFilter(const envoy::config::accesslog::v3::RuntimeFilter& config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random); + Random::RandomGenerator& random); // AccessLog::Filter bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, @@ -161,7 +161,7 @@ class RuntimeFilter : public Filter { private: Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const std::string runtime_key_; const envoy::type::v3::FractionalPercent percent_; const bool use_independent_randomness_; @@ -245,7 +245,7 @@ class ExtensionFilterFactory : public Config::TypedFactory { * @return an instance of extension filter implementation from a config proto. */ virtual FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader& runtime, Runtime::RandomGenerator& random) PURE; + Runtime::Loader& runtime, Random::RandomGenerator& random) PURE; std::string category() const override { return "envoy.access_logger.extension_filters"; } }; diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 30d27bd3d66f..ac2e6430ef66 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -38,7 +38,7 @@ envoy_cc_library( deps = [ ":assert_lib", "//include/envoy/common:backoff_strategy_interface", - "//include/envoy/runtime:runtime_interface", + "//include/envoy/common:random_generator_interface", ], ) @@ -235,6 +235,21 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "random_generator_lib", + srcs = [ + "random_generator.cc", + ], + hdrs = [ + "random_generator.h", + ], + external_deps = ["ssl"], + deps = [ + ":assert_lib", + "//include/envoy/common:random_generator_interface", + ], +) + envoy_cc_library( name = "regex_lib", srcs = ["regex.cc"], diff --git a/source/common/common/backoff_strategy.cc b/source/common/common/backoff_strategy.cc index b8241436aa21..c9b5b61b733b 100644 --- a/source/common/common/backoff_strategy.cc +++ b/source/common/common/backoff_strategy.cc @@ -3,7 +3,7 @@ namespace Envoy { JitteredBackOffStrategy::JitteredBackOffStrategy(uint64_t base_interval, uint64_t max_interval, - Runtime::RandomGenerator& random) + Random::RandomGenerator& random) : base_interval_(base_interval), max_interval_(max_interval), next_interval_(base_interval), random_(random) { ASSERT(base_interval_ > 0); diff --git a/source/common/common/backoff_strategy.h b/source/common/common/backoff_strategy.h index be84ec5e865e..2484f3e11b20 100644 --- a/source/common/common/backoff_strategy.h +++ b/source/common/common/backoff_strategy.h @@ -4,7 +4,7 @@ #include #include "envoy/common/backoff_strategy.h" -#include "envoy/runtime/runtime.h" +#include "envoy/common/random_generator.h" #include "common/common/assert.h" @@ -24,7 +24,7 @@ class JitteredBackOffStrategy : public BackOffStrategy { * @param random the random generator. */ JitteredBackOffStrategy(uint64_t base_interval, uint64_t max_interval, - Runtime::RandomGenerator& random); + Random::RandomGenerator& random); // BackOffStrategy methods uint64_t nextBackOffMs() override; @@ -34,7 +34,7 @@ class JitteredBackOffStrategy : public BackOffStrategy { const uint64_t base_interval_; const uint64_t max_interval_{}; uint64_t next_interval_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; }; /** diff --git a/source/common/common/random_generator.cc b/source/common/common/random_generator.cc new file mode 100644 index 000000000000..69eabc5e6c07 --- /dev/null +++ b/source/common/common/random_generator.cc @@ -0,0 +1,135 @@ +#include "common/common/random_generator.h" + +#include "common/common/assert.h" + +#include "openssl/rand.h" + +namespace Envoy { +namespace Random { + +const size_t RandomGeneratorImpl::UUID_LENGTH = 36; + +uint64_t RandomGeneratorImpl::random() { + // Prefetch 256 * sizeof(uint64_t) bytes of randomness. buffered_idx is initialized to 256, + // i.e. out-of-range value, so the buffer will be filled with randomness on the first call + // to this function. + // + // There is a diminishing return when increasing the prefetch size, as illustrated below in + // a test that generates 1,000,000,000 uint64_t numbers (results on Intel Xeon E5-1650v3). + // + // //test/common/runtime:runtime_impl_test - Random.DISABLED_benchmarkRandom + // + // prefetch | time | improvement + // (uint64_t) | (ms) | (% vs prev) + // --------------------------------- + // 32 | 25,931 | + // 64 | 15,124 | 42% faster + // 128 | 9,653 | 36% faster + // 256 | 6,930 | 28% faster <-- used right now + // 512 | 5,571 | 20% faster + // 1024 | 4,888 | 12% faster + // 2048 | 4,594 | 6% faster + // 4096 | 4,424 | 4% faster + // 8192 | 4,386 | 1% faster + + const size_t prefetch = 256; + static thread_local uint64_t buffered[prefetch]; + static thread_local size_t buffered_idx = prefetch; + + if (buffered_idx >= prefetch) { + int rc = RAND_bytes(reinterpret_cast(buffered), sizeof(buffered)); + ASSERT(rc == 1); + buffered_idx = 0; + } + + // Consume uint64_t from the buffer. + return buffered[buffered_idx++]; +} + +std::string RandomGeneratorImpl::uuid() { + // Prefetch 2048 bytes of randomness. buffered_idx is initialized to sizeof(buffered), + // i.e. out-of-range value, so the buffer will be filled with randomness on the first + // call to this function. + // + // There is a diminishing return when increasing the prefetch size, as illustrated below + // in a test that generates 100,000,000 UUIDs (results on Intel Xeon E5-1650v3). + // + // //test/common/runtime:uuid_util_test - UUIDUtilsTest.DISABLED_benchmark + // + // prefetch | time | improvement + // (bytes) | (ms) | (% vs prev) + // --------------------------------- + // 128 | 16,353 | + // 256 | 11,827 | 28% faster + // 512 | 9,676 | 18% faster + // 1024 | 8,594 | 11% faster + // 2048 | 8,097 | 6% faster <-- used right now + // 4096 | 7,790 | 4% faster + // 8192 | 7,737 | 1% faster + + static thread_local uint8_t buffered[2048]; + static thread_local size_t buffered_idx = sizeof(buffered); + + if (buffered_idx + 16 > sizeof(buffered)) { + int rc = RAND_bytes(buffered, sizeof(buffered)); + ASSERT(rc == 1); + buffered_idx = 0; + } + + // Consume 16 bytes from the buffer. + ASSERT(buffered_idx + 16 <= sizeof(buffered)); + uint8_t* rand = &buffered[buffered_idx]; + buffered_idx += 16; + + // Create UUID from Truly Random or Pseudo-Random Numbers. + // See: https://tools.ietf.org/html/rfc4122#section-4.4 + rand[6] = (rand[6] & 0x0f) | 0x40; // UUID version 4 (random) + rand[8] = (rand[8] & 0x3f) | 0x80; // UUID variant 1 (RFC4122) + + // Convert UUID to a string representation, e.g. a121e9e1-feae-4136-9e0e-6fac343d56c9. + static const char* const hex = "0123456789abcdef"; + char uuid[UUID_LENGTH]; + + for (uint8_t i = 0; i < 4; i++) { + const uint8_t d = rand[i]; + uuid[2 * i] = hex[d >> 4]; + uuid[2 * i + 1] = hex[d & 0x0f]; + } + + uuid[8] = '-'; + + for (uint8_t i = 4; i < 6; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 1] = hex[d >> 4]; + uuid[2 * i + 2] = hex[d & 0x0f]; + } + + uuid[13] = '-'; + + for (uint8_t i = 6; i < 8; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 2] = hex[d >> 4]; + uuid[2 * i + 3] = hex[d & 0x0f]; + } + + uuid[18] = '-'; + + for (uint8_t i = 8; i < 10; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 3] = hex[d >> 4]; + uuid[2 * i + 4] = hex[d & 0x0f]; + } + + uuid[23] = '-'; + + for (uint8_t i = 10; i < 16; i++) { + const uint8_t d = rand[i]; + uuid[2 * i + 4] = hex[d >> 4]; + uuid[2 * i + 5] = hex[d & 0x0f]; + } + + return std::string(uuid, UUID_LENGTH); +} + +} // namespace Random +} // namespace Envoy diff --git a/source/common/common/random_generator.h b/source/common/common/random_generator.h new file mode 100644 index 000000000000..56aa638b54c8 --- /dev/null +++ b/source/common/common/random_generator.h @@ -0,0 +1,19 @@ +#include "envoy/common/random_generator.h" + +namespace Envoy { +namespace Random { +/** + * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current + * time. + */ +class RandomGeneratorImpl : public RandomGenerator { +public: + // Random::RandomGenerator + uint64_t random() override; + std::string uuid() override; + + static const size_t UUID_LENGTH; +}; + +} // namespace Random +} // namespace Envoy diff --git a/source/common/config/datasource.cc b/source/common/config/datasource.cc index 7c089ebfe565..d3e286d0b27a 100644 --- a/source/common/config/datasource.cc +++ b/source/common/config/datasource.cc @@ -40,7 +40,7 @@ absl::optional getPath(const envoy::config::core::v3::DataSource& s RemoteAsyncDataProvider::RemoteAsyncDataProvider( Upstream::ClusterManager& cm, Init::Manager& manager, const envoy::config::core::v3::RemoteDataSource& source, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback) + Random::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback) : allow_empty_(allow_empty), callback_(std::move(callback)), fetcher_(std::make_unique(cm, source.http_uri(), source.sha256(), *this)), diff --git a/source/common/config/datasource.h b/source/common/config/datasource.h index 1e35e119518b..4b3ccdb17ffd 100644 --- a/source/common/config/datasource.h +++ b/source/common/config/datasource.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/init/manager.h" #include "envoy/upstream/cluster_manager.h" @@ -63,7 +64,7 @@ class RemoteAsyncDataProvider : public Config::DataFetcher::RemoteDataFetcherCal public: RemoteAsyncDataProvider(Upstream::ClusterManager& cm, Init::Manager& manager, const envoy::config::core::v3::RemoteDataSource& source, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, bool allow_empty, AsyncDataSourceCb&& callback); ~RemoteAsyncDataProvider() override { diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index c9eedcfccd93..abfaf0735859 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -19,7 +19,7 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 897eec63db49..a623bed2a08a 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -4,6 +4,7 @@ #include #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" @@ -30,7 +31,7 @@ class GrpcMuxImpl : public GrpcMux, GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node); ~GrpcMuxImpl() override = default; diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index 7700b96a7dfc..2b4187aac313 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -2,6 +2,7 @@ #include +#include "envoy/common/random_generator.h" #include "envoy/config/grpc_mux.h" #include "envoy/grpc/async_client.h" @@ -21,7 +22,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, public Logger::Loggable { public: GrpcStream(GrpcStreamCallbacks* callbacks, Grpc::RawAsyncClientPtr async_client, - const Protobuf::MethodDescriptor& service_method, Runtime::RandomGenerator& random, + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings) : callbacks_(callbacks), async_client_(std::move(async_client)), @@ -135,7 +136,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, // Reestablishes the gRPC channel when necessary, with some backoff politeness. Event::TimerPtr retry_timer_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; TimeSource& time_source_; BackOffStrategyPtr backoff_strategy_; diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index fc5bf4dfb331..8c0d55d5e749 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -23,7 +23,7 @@ namespace Config { HttpSubscriptionImpl::HttpSubscriptionImpl( const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, + Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index 9f2c01bda9f6..ec3d2e6ad0de 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -24,7 +25,7 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, public: HttpSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, std::chrono::milliseconds refresh_interval, + Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 5ffafafa397c..5814b605a444 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -18,7 +18,7 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index df394019c235..5eb226992f78 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/random_generator.h" #include "envoy/common/token_bucket.h" #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" @@ -30,7 +31,7 @@ class NewGrpcMuxImpl NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, envoy::config::core::v3::ApiVersion transport_api_version, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info); diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index d8448f30a6d0..505206b5bf73 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -16,7 +16,7 @@ namespace Config { SubscriptionFactoryImpl::SubscriptionFactoryImpl( const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, + Upstream::ClusterManager& cm, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime) : local_info_(local_info), dispatcher_(dispatcher), cm_(cm), random_(random), validation_visitor_(validation_visitor), api_(api), runtime_(runtime) {} diff --git a/source/common/config/subscription_factory_impl.h b/source/common/config/subscription_factory_impl.h index 0eadbc61aa76..1241229861d4 100644 --- a/source/common/config/subscription_factory_impl.h +++ b/source/common/config/subscription_factory_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/config/subscription_factory.h" @@ -15,7 +16,7 @@ namespace Config { class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable { public: SubscriptionFactoryImpl(const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Upstream::ClusterManager& cm, Runtime::RandomGenerator& random, + Upstream::ClusterManager& cm, Random::RandomGenerator& random, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, Runtime::Loader& runtime); @@ -29,7 +30,7 @@ class SubscriptionFactoryImpl : public SubscriptionFactory, Logger::Loggable static BackOffStrategyPtr prepareDnsRefreshStrategy(const T& config, uint64_t dns_refresh_rate_ms, - Runtime::RandomGenerator& random) { + Random::RandomGenerator& random) { if (config.has_dns_failure_refresh_rate()) { uint64_t base_interval_ms = PROTOBUF_GET_MS_REQUIRED(config.dns_failure_refresh_rate(), base_interval); diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index ce46de45a374..a989adce30aa 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -39,7 +39,7 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Stats::Store& stats_store, Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, Http::Context& http_context) : cluster_(cluster), config_("http.async-client.", local_info, stats_store, cm, runtime, random, diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 90acf1aadda0..15478553627d 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -9,6 +9,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route_components.pb.h" @@ -47,7 +48,7 @@ class AsyncClientImpl final : public AsyncClient { AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, Stats::Store& stats_store, Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, + Random::RandomGenerator& random, Router::ShadowWriterPtr&& shadow_writer, Http::Context& http_context); ~AsyncClientImpl() override; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index f8419a9d6b05..fb9c36d5d730 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -100,7 +100,7 @@ ConnectionManagerImpl::generateListenerStats(const std::string& prefix, Stats::S ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, const Network::DrainDecision& drain_close, - Runtime::RandomGenerator& random_generator, + Random::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 81647bda934f..4e8f18814b09 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -9,6 +9,7 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/api_listener.h" @@ -54,7 +55,7 @@ class ConnectionManagerImpl : Logger::Loggable, public Http::ApiListener { public: ConnectionManagerImpl(ConnectionManagerConfig& config, const Network::DrainDecision& drain_close, - Runtime::RandomGenerator& random_generator, Http::Context& http_context, + Random::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, Server::OverloadManager* overload_manager, TimeSource& time_system); @@ -788,7 +789,7 @@ class ConnectionManagerImpl : Logger::Loggable, // A connection duration timer. Armed during handling new connection if enabled in config. Event::TimerPtr connection_duration_timer_; Event::TimerPtr drain_timer_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; Http::Context& http_context_; Runtime::Loader& runtime_; const LocalInfo::LocalInfo& local_info_; diff --git a/source/common/http/request_id_extension_impl.cc b/source/common/http/request_id_extension_impl.cc index ed4022712fc0..f2917959bcd1 100644 --- a/source/common/http/request_id_extension_impl.cc +++ b/source/common/http/request_id_extension_impl.cc @@ -40,7 +40,7 @@ RequestIDExtensionSharedPtr RequestIDExtensionFactory::fromProto( } RequestIDExtensionSharedPtr -RequestIDExtensionFactory::defaultInstance(Envoy::Runtime::RandomGenerator& random) { +RequestIDExtensionFactory::defaultInstance(Envoy::Random::RandomGenerator& random) { return std::make_shared(random); } diff --git a/source/common/http/request_id_extension_impl.h b/source/common/http/request_id_extension_impl.h index b77ad9b8142b..822dff8bc242 100644 --- a/source/common/http/request_id_extension_impl.h +++ b/source/common/http/request_id_extension_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/http/request_id_extension.h" #include "envoy/server/request_id_extension_config.h" @@ -16,7 +17,7 @@ class RequestIDExtensionFactory { /** * Return a newly created instance of the default RequestIDExtension implementation. */ - static RequestIDExtensionSharedPtr defaultInstance(Envoy::Runtime::RandomGenerator& random); + static RequestIDExtensionSharedPtr defaultInstance(Envoy::Random::RandomGenerator& random); /** * Return a globally shared instance of the noop RequestIDExtension implementation. diff --git a/source/common/http/request_id_extension_uuid_impl.cc b/source/common/http/request_id_extension_uuid_impl.cc index a7ec65fb6612..7da58d05ac1d 100644 --- a/source/common/http/request_id_extension_uuid_impl.cc +++ b/source/common/http/request_id_extension_uuid_impl.cc @@ -5,8 +5,8 @@ #include "envoy/http/header_map.h" +#include "common/common/random_generator.h" #include "common/common/utility.h" -#include "common/runtime/runtime_impl.h" #include "absl/strings/string_view.h" @@ -55,7 +55,7 @@ TraceStatus UUIDRequestIDExtension::getTraceStatus(const RequestHeaderMap& reque return TraceStatus::NoTrace; } absl::string_view uuid = request_headers.getRequestIdValue(); - if (uuid.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + if (uuid.length() != Random::RandomGeneratorImpl::UUID_LENGTH) { return TraceStatus::NoTrace; } @@ -76,7 +76,7 @@ void UUIDRequestIDExtension::setTraceStatus(RequestHeaderMap& request_headers, T return; } absl::string_view uuid_view = request_headers.getRequestIdValue(); - if (uuid_view.length() != Runtime::RandomGeneratorImpl::UUID_LENGTH) { + if (uuid_view.length() != Random::RandomGeneratorImpl::UUID_LENGTH) { return; } std::string uuid(uuid_view); diff --git a/source/common/http/request_id_extension_uuid_impl.h b/source/common/http/request_id_extension_uuid_impl.h index c3a660e3a2bc..ca5868dc6610 100644 --- a/source/common/http/request_id_extension_uuid_impl.h +++ b/source/common/http/request_id_extension_uuid_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/http/request_id_extension.h" #include "common/runtime/runtime_impl.h" @@ -11,7 +12,7 @@ namespace Http { // configured. class UUIDRequestIDExtension : public RequestIDExtension { public: - explicit UUIDRequestIDExtension(Envoy::Runtime::RandomGenerator& random) : random_(random) {} + explicit UUIDRequestIDExtension(Envoy::Random::RandomGenerator& random) : random_(random) {} void set(RequestHeaderMap& request_headers, bool force) override; void setInResponse(ResponseHeaderMap& response_headers, @@ -22,7 +23,7 @@ class UUIDRequestIDExtension : public RequestIDExtension { private: // Reference to the random generator used to generate new request IDs - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; // Byte on this position has predefined value of 4 for UUID4. static const int TRACE_BYTE_POSITION = 14; diff --git a/source/common/http/rest_api_fetcher.cc b/source/common/http/rest_api_fetcher.cc index 612fff3708a3..ef4cca9bcd7b 100644 --- a/source/common/http/rest_api_fetcher.cc +++ b/source/common/http/rest_api_fetcher.cc @@ -13,7 +13,7 @@ namespace Envoy { namespace Http { RestApiFetcher::RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout) : remote_cluster_name_(remote_cluster_name), cm_(cm), random_(random), diff --git a/source/common/http/rest_api_fetcher.h b/source/common/http/rest_api_fetcher.h index 7dac6f3ae836..f4b19ab17a65 100644 --- a/source/common/http/rest_api_fetcher.h +++ b/source/common/http/rest_api_fetcher.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/runtime/runtime.h" @@ -18,7 +19,7 @@ namespace Http { class RestApiFetcher : public Http::AsyncClient::Callbacks { protected: RestApiFetcher(Upstream::ClusterManager& cm, const std::string& remote_cluster_name, - Event::Dispatcher& dispatcher, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout); ~RestApiFetcher() override; @@ -68,7 +69,7 @@ class RestApiFetcher : public Http::AsyncClient::Callbacks { void onBeforeFinalizeUpstreamSpan(Envoy::Tracing::Span&, const Http::ResponseHeaderMap*) override {} - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const std::chrono::milliseconds refresh_interval_; const std::chrono::milliseconds request_timeout_; Event::TimerPtr refresh_timer_; diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 54d667af29ce..043a726d3464 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -32,11 +32,12 @@ const uint32_t RetryPolicy::RETRY_ON_GRPC_DEADLINE_EXCEEDED; const uint32_t RetryPolicy::RETRY_ON_GRPC_RESOURCE_EXHAUSTED; const uint32_t RetryPolicy::RETRY_ON_GRPC_UNAVAILABLE; -RetryStatePtr -RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, - const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, - Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { +RetryStatePtr RetryStateImpl::create(const RetryPolicy& route_policy, + Http::RequestHeaderMap& request_headers, + const Upstream::ClusterInfo& cluster, + const VirtualCluster* vcluster, Runtime::Loader& runtime, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, + Upstream::ResourcePriority priority) { RetryStatePtr ret; // We short circuit here and do not bother with an allocation if there is no chance we will retry. @@ -63,7 +64,7 @@ RetryStateImpl::create(const RetryPolicy& route_policy, Http::RequestHeaderMap& RetryStateImpl::RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) : cluster_(cluster), vcluster_(vcluster), runtime_(runtime), random_(random), dispatcher_(dispatcher), retry_on_(route_policy.retryOn()), diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 6fc4f7125095..9b0a19a77911 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/event/timer.h" #include "envoy/http/codec.h" #include "envoy/http/header_map.h" @@ -27,7 +28,7 @@ class RetryStateImpl : public RetryState { static RetryStatePtr create(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); ~RetryStateImpl() override; @@ -90,7 +91,7 @@ class RetryStateImpl : public RetryState { private: RetryStateImpl(const RetryPolicy& route_policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority); void enableBackoffTimer(); @@ -101,7 +102,7 @@ class RetryStateImpl : public RetryState { const Upstream::ClusterInfo& cluster_; const VirtualCluster* vcluster_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; uint32_t retry_on_{}; uint32_t retries_remaining_{}; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index a466856315b3..3da9226fcc81 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1554,7 +1554,7 @@ uint32_t Filter::numRequestsAwaitingHeaders() { RetryStatePtr ProdFilter::createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { return RetryStateImpl::create(policy, request_headers, cluster, vcluster, runtime, random, dispatcher, priority); diff --git a/source/common/router/router.h b/source/common/router/router.h index 18259f9c2dc4..7ba3e98e29fb 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -6,6 +6,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/extensions/filters/http/router/v3/router.pb.h" #include "envoy/http/codec.h" #include "envoy/http/codes.h" @@ -180,7 +181,7 @@ class FilterConfig { public: FilterConfig(const std::string& stat_prefix, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, Upstream::ClusterManager& cm, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, ShadowWriterPtr&& shadow_writer, + Random::RandomGenerator& random, ShadowWriterPtr&& shadow_writer, bool emit_dynamic_stats, bool start_child_span, bool suppress_envoy_headers, bool respect_expected_rq_timeout, const Protobuf::RepeatedPtrField& strict_check_headers, @@ -225,7 +226,7 @@ class FilterConfig { const LocalInfo::LocalInfo& local_info_; Upstream::ClusterManager& cm_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; FilterStats stats_; const bool emit_dynamic_stats_; const bool start_child_span_; @@ -474,7 +475,7 @@ class Filter : Logger::Loggable, virtual RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) PURE; std::unique_ptr createConnPool(); @@ -563,7 +564,7 @@ class ProdFilter : public Filter { RetryStatePtr createRetryState(const RetryPolicy& policy, Http::RequestHeaderMap& request_headers, const Upstream::ClusterInfo& cluster, const VirtualCluster* vcluster, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) override; }; diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index 4e76ab315760..3c3201b69a9e 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -47,7 +47,6 @@ envoy_cc_library( hdrs = [ "runtime_impl.h", ], - external_deps = ["ssl"], deps = [ ":runtime_features_lib", ":runtime_protos_lib", @@ -61,6 +60,7 @@ envoy_cc_library( "//include/envoy/upstream:cluster_manager_interface", "//source/common/common:empty_string", "//source/common/common:minimal_logger_lib", + "//source/common/common:random_generator_lib", #FIXME "//source/common/common:thread_lib", "//source/common/common:utility_lib", "//source/common/config:api_version_lib", diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 295f9ee23ffa..07181ccc5728 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -1,7 +1,6 @@ #include "common/runtime/runtime_impl.h" #include -#include #include #include @@ -26,135 +25,10 @@ #include "absl/strings/match.h" #include "absl/strings/numbers.h" -#include "openssl/rand.h" namespace Envoy { namespace Runtime { -const size_t RandomGeneratorImpl::UUID_LENGTH = 36; - -uint64_t RandomGeneratorImpl::random() { - // Prefetch 256 * sizeof(uint64_t) bytes of randomness. buffered_idx is initialized to 256, - // i.e. out-of-range value, so the buffer will be filled with randomness on the first call - // to this function. - // - // There is a diminishing return when increasing the prefetch size, as illustrated below in - // a test that generates 1,000,000,000 uint64_t numbers (results on Intel Xeon E5-1650v3). - // - // //test/common/runtime:runtime_impl_test - Random.DISABLED_benchmarkRandom - // - // prefetch | time | improvement - // (uint64_t) | (ms) | (% vs prev) - // --------------------------------- - // 32 | 25,931 | - // 64 | 15,124 | 42% faster - // 128 | 9,653 | 36% faster - // 256 | 6,930 | 28% faster <-- used right now - // 512 | 5,571 | 20% faster - // 1024 | 4,888 | 12% faster - // 2048 | 4,594 | 6% faster - // 4096 | 4,424 | 4% faster - // 8192 | 4,386 | 1% faster - - const size_t prefetch = 256; - static thread_local uint64_t buffered[prefetch]; - static thread_local size_t buffered_idx = prefetch; - - if (buffered_idx >= prefetch) { - int rc = RAND_bytes(reinterpret_cast(buffered), sizeof(buffered)); - ASSERT(rc == 1); - buffered_idx = 0; - } - - // Consume uint64_t from the buffer. - return buffered[buffered_idx++]; -} - -std::string RandomGeneratorImpl::uuid() { - // Prefetch 2048 bytes of randomness. buffered_idx is initialized to sizeof(buffered), - // i.e. out-of-range value, so the buffer will be filled with randomness on the first - // call to this function. - // - // There is a diminishing return when increasing the prefetch size, as illustrated below - // in a test that generates 100,000,000 UUIDs (results on Intel Xeon E5-1650v3). - // - // //test/common/runtime:uuid_util_test - UUIDUtilsTest.DISABLED_benchmark - // - // prefetch | time | improvement - // (bytes) | (ms) | (% vs prev) - // --------------------------------- - // 128 | 16,353 | - // 256 | 11,827 | 28% faster - // 512 | 9,676 | 18% faster - // 1024 | 8,594 | 11% faster - // 2048 | 8,097 | 6% faster <-- used right now - // 4096 | 7,790 | 4% faster - // 8192 | 7,737 | 1% faster - - static thread_local uint8_t buffered[2048]; - static thread_local size_t buffered_idx = sizeof(buffered); - - if (buffered_idx + 16 > sizeof(buffered)) { - int rc = RAND_bytes(buffered, sizeof(buffered)); - ASSERT(rc == 1); - buffered_idx = 0; - } - - // Consume 16 bytes from the buffer. - ASSERT(buffered_idx + 16 <= sizeof(buffered)); - uint8_t* rand = &buffered[buffered_idx]; - buffered_idx += 16; - - // Create UUID from Truly Random or Pseudo-Random Numbers. - // See: https://tools.ietf.org/html/rfc4122#section-4.4 - rand[6] = (rand[6] & 0x0f) | 0x40; // UUID version 4 (random) - rand[8] = (rand[8] & 0x3f) | 0x80; // UUID variant 1 (RFC4122) - - // Convert UUID to a string representation, e.g. a121e9e1-feae-4136-9e0e-6fac343d56c9. - static const char* const hex = "0123456789abcdef"; - char uuid[UUID_LENGTH]; - - for (uint8_t i = 0; i < 4; i++) { - const uint8_t d = rand[i]; - uuid[2 * i] = hex[d >> 4]; - uuid[2 * i + 1] = hex[d & 0x0f]; - } - - uuid[8] = '-'; - - for (uint8_t i = 4; i < 6; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 1] = hex[d >> 4]; - uuid[2 * i + 2] = hex[d & 0x0f]; - } - - uuid[13] = '-'; - - for (uint8_t i = 6; i < 8; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 2] = hex[d >> 4]; - uuid[2 * i + 3] = hex[d & 0x0f]; - } - - uuid[18] = '-'; - - for (uint8_t i = 8; i < 10; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 3] = hex[d >> 4]; - uuid[2 * i + 4] = hex[d & 0x0f]; - } - - uuid[23] = '-'; - - for (uint8_t i = 10; i < 16; i++) { - const uint8_t d = rand[i]; - uuid[2 * i + 4] = hex[d >> 4]; - uuid[2 * i + 5] = hex[d & 0x0f]; - } - - return std::string(uuid, UUID_LENGTH); -} - void SnapshotImpl::countDeprecatedFeatureUse() const { stats_.deprecated_feature_use_.inc(); // Similar to the above, but a gauge that isn't imported during a hot restart. @@ -283,7 +157,7 @@ const std::vector& SnapshotImpl::getLayers() co return layers_; } -SnapshotImpl::SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, +SnapshotImpl::SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers) : layers_{std::move(layers)}, generator_{generator}, stats_{stats} { for (const auto& layer : layers_) { @@ -471,7 +345,7 @@ void ProtoLayer::walkProtoValue(const ProtobufWkt::Value& v, const std::string& LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, + Random::RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), config_(config), service_cluster_(local_info.clusterName()), api_(api), diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index 5f8297ec1a2a..ff0b4c434411 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -7,6 +7,7 @@ #include "envoy/api/api.h" #include "envoy/common/exception.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" @@ -36,19 +37,6 @@ namespace Runtime { using RuntimeSingleton = ThreadSafeSingleton; -/** - * Implementation of RandomGenerator that uses per-thread RANLUX generators seeded with current - * time. - */ -class RandomGeneratorImpl : public RandomGenerator { -public: - // Runtime::RandomGenerator - uint64_t random() override; - std::string uuid() override; - - static const size_t UUID_LENGTH; -}; - /** * All runtime stats. @see stats_macros.h */ @@ -75,7 +63,7 @@ struct RuntimeStats { */ class SnapshotImpl : public Snapshot, Logger::Loggable { public: - SnapshotImpl(RandomGenerator& generator, RuntimeStats& stats, + SnapshotImpl(Random::RandomGenerator& generator, RuntimeStats& stats, std::vector&& layers); // Runtime::Snapshot @@ -125,7 +113,7 @@ class SnapshotImpl : public Snapshot, Logger::Loggable { const std::vector layers_; EntryMap values_; - RandomGenerator& generator_; + Random::RandomGenerator& generator_; RuntimeStats& stats_; }; @@ -243,8 +231,8 @@ class LoaderImpl : public Loader, Logger::Loggable { LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator& tls, const envoy::config::bootstrap::v3::LayeredRuntime& config, const LocalInfo::LocalInfo& local_info, Stats::Store& store, - RandomGenerator& generator, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api); + Random::RandomGenerator& generator, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); // Runtime::Loader void initialize(Upstream::ClusterManager& cm) override; @@ -264,7 +252,7 @@ class LoaderImpl : public Loader, Logger::Loggable { RuntimeStats generateStats(Stats::Store& store); void onRdtsReady(); - RandomGenerator& generator_; + Random::RandomGenerator& generator_; RuntimeStats stats_; AdminLayerPtr admin_layer_; ThreadLocal::SlotPtr tls_; diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index cbdddc080ec4..faa5727df255 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -7,6 +7,7 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/random_generator.h" #include "envoy/event/timer.h" #include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h" #include "envoy/network/connection.h" @@ -206,7 +207,7 @@ class Config { ThreadLocal::SlotPtr upstream_drain_manager_slot_; SharedConfigSharedPtr shared_config_; std::unique_ptr cluster_metadata_match_criteria_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; std::unique_ptr hash_policy_; }; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 84f57d04edc6..4182da69370f 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -175,6 +175,7 @@ envoy_cc_library( hdrs = ["load_balancer_impl.h"], deps = [ ":edf_scheduler_lib", + "//include/envoy/common:random_generator_interface", "//include/envoy/runtime:runtime_interface", "//include/envoy/stats:stats_interface", "//include/envoy/upstream:load_balancer_interface", diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 8233ae6ac6be..2f85ef31188f 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -27,7 +27,7 @@ std::pair ClusterFactoryImplBase:: const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index 63af89ead3fe..4e8c6d1a811d 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -11,6 +11,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/typed_metadata.h" #include "envoy/event/timer.h" @@ -55,7 +56,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { ThreadLocal::SlotAllocator& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, @@ -74,7 +75,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } - Runtime::RandomGenerator& random() override { return random_; } + Random::RandomGenerator& random() override { return random_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } const LocalInfo::LocalInfo& localInfo() override { return local_info_; } @@ -94,7 +95,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Event::Dispatcher& dispatcher_; AccessLog::AccessLogManager& log_manager_; const LocalInfo::LocalInfo& local_info_; @@ -120,7 +121,7 @@ class ClusterFactoryImplBase : public ClusterFactory { create(const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index bd5c7e7b4b0a..758a4aa25025 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -236,7 +236,7 @@ void ClusterManagerInitHelper::setPrimaryClustersInitializedCb( ClusterManagerImpl::ClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context) diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 9ad4a5b00fff..2c8bf10e2469 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -10,6 +10,7 @@ #include #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" @@ -41,7 +42,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { public: ProdClusterManagerFactory( Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Runtime::RandomGenerator& random, + ThreadLocal::Instance& tls, Random::RandomGenerator& random, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, @@ -83,7 +84,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Runtime::Loader& runtime_; Stats::Store& stats_; ThreadLocal::Instance& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Network::DnsResolverSharedPtr dns_resolver_; Ssl::ContextManager& ssl_context_manager_; const LocalInfo::LocalInfo& local_info_; @@ -192,7 +193,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::LoggableFindMethodByName( diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 154a4d6a0d35..dc2fa6c1bbbf 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -2,6 +2,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/data/core/v3/health_check_event.pb.h" #include "envoy/grpc/status.h" @@ -39,7 +40,7 @@ class HealthCheckerFactory : public Logger::Loggable */ static HealthCheckerSharedPtr create(const envoy::config::core::v3::HealthCheck& health_check_config, - Upstream::Cluster& cluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Upstream::Cluster& cluster, Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); }; @@ -51,7 +52,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { public: HttpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); /** * Utility class checking if given http status matches configured expectations. @@ -222,7 +223,7 @@ class TcpHealthCheckerImpl : public HealthCheckerImplBase { public: TcpHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); private: struct TcpActiveHealthCheckSession; @@ -287,7 +288,7 @@ class GrpcHealthCheckerImpl : public HealthCheckerImplBase { public: GrpcHealthCheckerImpl(const Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); + Random::RandomGenerator& random, HealthCheckEventLoggerPtr&& event_logger); private: struct GrpcActiveHealthCheckSession : public ActiveHealthCheckSession, diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index a133b93f4c45..821179d84f15 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -29,7 +29,7 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, - Runtime::RandomGenerator& random, ClusterInfoFactory& info_factory, + Random::RandomGenerator& random, ClusterInfoFactory& info_factory, AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Server::Admin& admin, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, @@ -222,7 +222,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, Singleton::Manager& singleton_manager, + Random::RandomGenerator& random, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : runtime_(runtime), cluster_(cluster), bind_config_(bind_config), stats_(stats), @@ -269,7 +269,7 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) } void HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Api::Api& api) { for (auto& health_check : cluster_.health_checks()) { health_checkers_.push_back( diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 3506e974f368..003ed4bb2dcd 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" #include "envoy/event/dispatcher.h" @@ -46,7 +47,7 @@ class HdsCluster : public Cluster, Logger::Loggable { Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, - Runtime::RandomGenerator& random, Singleton::Manager& singleton_manager, + Random::RandomGenerator& random, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); @@ -63,7 +64,7 @@ class HdsCluster : public Cluster, Logger::Loggable { // Creates and starts healthcheckers to its endpoints void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + Random::RandomGenerator& random, Event::Dispatcher& dispatcher, Api::Api& api); std::vector healthCheckers() { return health_checkers_; }; @@ -119,7 +120,7 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks #include +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/runtime/runtime.h" #include "envoy/upstream/load_balancer.h" @@ -68,7 +69,7 @@ class LoadBalancerBase : public LoadBalancer { void recalculateLoadInTotalPanic(); LoadBalancerBase(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); // Choose host set randomly, based on the healthy_per_priority_load_ and @@ -89,7 +90,7 @@ class LoadBalancerBase : public LoadBalancer { ClusterStats& stats_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const uint32_t default_healthy_panic_percent_; // The priority-ordered set of hosts to use for load balancing. const PrioritySet& priority_set_; @@ -171,7 +172,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { // Both priority_set and local_priority_set if non-null must have at least one host set. ZoneAwareLoadBalancerBase( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); ~ZoneAwareLoadBalancerBase() override; @@ -351,7 +352,7 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { public: EdfLoadBalancerBase(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); // Upstream::LoadBalancerBase @@ -393,7 +394,7 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { public: RoundRobinLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : EdfLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config) { @@ -441,7 +442,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { public: LeastRequestLoadBalancer( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, const absl::optional least_request_config) @@ -478,8 +479,7 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { class RandomLoadBalancer : public ZoneAwareLoadBalancerBase { public: RandomLoadBalancer(const PrioritySet& priority_set, const PrioritySet* local_priority_set, - ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : ZoneAwareLoadBalancerBase(priority_set, local_priority_set, stats, runtime, random, common_config) {} diff --git a/source/common/upstream/maglev_lb.cc b/source/common/upstream/maglev_lb.cc index c7e454a97ea7..72e97126296d 100644 --- a/source/common/upstream/maglev_lb.cc +++ b/source/common/upstream/maglev_lb.cc @@ -101,7 +101,7 @@ uint64_t MaglevTable::permutation(const TableBuildEntry& entry) { MaglevLoadBalancer::MaglevLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, uint64_t table_size) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), scope_(scope.createScope("maglev_lb.")), stats_(generateStats(*scope_)), diff --git a/source/common/upstream/maglev_lb.h b/source/common/upstream/maglev_lb.h index 12a71e4fcb2d..24eac7f6e665 100644 --- a/source/common/upstream/maglev_lb.h +++ b/source/common/upstream/maglev_lb.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" @@ -70,7 +71,7 @@ class MaglevTable : public ThreadAwareLoadBalancerBase::HashingLoadBalancer, class MaglevLoadBalancer : public ThreadAwareLoadBalancerBase { public: MaglevLoadBalancer(const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config, uint64_t table_size = MaglevTable::DefaultTableSize); diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index 7953d55a4bae..296f5fbf0613 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -18,7 +18,7 @@ namespace Upstream { RingHashLoadBalancer::RingHashLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const absl::optional& config, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : ThreadAwareLoadBalancerBase(priority_set, stats, runtime, random, common_config), diff --git a/source/common/upstream/ring_hash_lb.h b/source/common/upstream/ring_hash_lb.h index 9353d34715a7..288f2cfb105c 100644 --- a/source/common/upstream/ring_hash_lb.h +++ b/source/common/upstream/ring_hash_lb.h @@ -42,7 +42,7 @@ class RingHashLoadBalancer : public ThreadAwareLoadBalancerBase, public: RingHashLoadBalancer( const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& scope, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const absl::optional& config, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config); diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 5c1d745f41a8..cce4fe9ffff1 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -21,7 +21,7 @@ namespace Upstream { SubsetLoadBalancer::SubsetLoadBalancer( LoadBalancerType lb_type, PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, Stats::Scope& scope, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, + Random::RandomGenerator& random, const LoadBalancerSubsetInfo& subsets, const absl::optional& lb_ring_hash_config, const absl::optional& diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index 9d90671d1f94..f1691768f682 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -26,7 +26,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable& lb_ring_hash_config, const absl::optional& @@ -231,7 +231,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable; struct LoadBalancerImpl : public LoadBalancer { - LoadBalancerImpl(ClusterStats& stats, Runtime::RandomGenerator& random) + LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random) : stats_(stats), random_(random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; ClusterStats& stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; std::shared_ptr> per_priority_state_; std::shared_ptr healthy_per_priority_load_; std::shared_ptr degraded_per_priority_load_; }; struct LoadBalancerFactoryImpl : public LoadBalancerFactory { - LoadBalancerFactoryImpl(ClusterStats& stats, Runtime::RandomGenerator& random) + LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random) : stats_(stats), random_(random) {} // Upstream::LoadBalancerFactory LoadBalancerPtr create() override; ClusterStats& stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; absl::Mutex mutex_; std::shared_ptr> per_priority_state_ ABSL_GUARDED_BY(mutex_); // This is split out of PerPriorityState so LoadBalancerBase::ChoosePriority can be reused. diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 97776daa4901..69ef701a5cf1 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -634,7 +634,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Envoy::Runtime::RandomGenerator& random() override { return random_; } + Envoy::Random::RandomGenerator& random() override { return random_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } Stats::Scope& scope() override { return stats_scope_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } @@ -653,7 +653,7 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; Envoy::Runtime::Loader& runtime_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 85f1cad8919b..ace645ebb1aa 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -46,7 +46,7 @@ Runtime::LoaderPtr ProdComponentFactory::createRuntime(Server::Instance& server, MainCommonBase::MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory, - std::unique_ptr&& random_generator, + std::unique_ptr&& random_generator, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context) @@ -102,7 +102,7 @@ void MainCommonBase::configureComponentLogLevels() { } } -void MainCommonBase::configureHotRestarter(Runtime::RandomGenerator& random_generator) { +void MainCommonBase::configureHotRestarter(Random::RandomGenerator& random_generator) { #ifdef ENVOY_HOT_RESTART if (!options_.hotRestartDisabled()) { uint32_t base_id = options_.baseId(); @@ -188,7 +188,7 @@ void MainCommonBase::adminRequest(absl::string_view path_and_query, absl::string MainCommon::MainCommon(int argc, const char* const* argv) : options_(argc, argv, &MainCommon::hotRestartVersion, spdlog::level::info), base_(options_, real_time_system_, default_listener_hooks_, prod_component_factory_, - std::make_unique(), platform_impl_.threadFactory(), + std::make_unique(), platform_impl_.threadFactory(), platform_impl_.fileSystem(), nullptr) {} std::string MainCommon::hotRestartVersion(bool hot_restart_enabled) { diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 8f55253bf55e..e548efc5c491 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -38,7 +38,7 @@ class MainCommonBase { // destructed. MainCommonBase(const OptionsImpl& options, Event::TimeSystem& time_system, ListenerHooks& listener_hooks, Server::ComponentFactory& component_factory, - std::unique_ptr&& random_generator, + std::unique_ptr&& random_generator, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context); @@ -87,7 +87,7 @@ class MainCommonBase { private: void configureComponentLogLevels(); - void configureHotRestarter(Runtime::RandomGenerator& random_generator); + void configureHotRestarter(Random::RandomGenerator& random_generator); }; // TODO(jmarantz): consider removing this class; I think it'd be more useful to diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index 965d06a5b9c5..836ef16627cc 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -14,7 +14,7 @@ namespace Aggregate { Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config, Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api) : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 296a1bf9d6cd..417a8e8de156 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -33,7 +33,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac Cluster(const envoy::config::cluster::v3::Cluster& cluster, const envoy::extensions::clusters::aggregate::v3::ClusterConfig& config, Upstream::ClusterManager& cluster_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api); @@ -53,7 +53,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac Upstream::ClusterUpdateCallbacksHandlePtr handle_; Upstream::ClusterManager& cluster_manager_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; ThreadLocal::SlotPtr tls_; const std::vector clusters_; @@ -71,7 +71,7 @@ class Cluster : public Upstream::ClusterImplBase, Upstream::ClusterUpdateCallbac class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { public: AggregateClusterLoadBalancer( - Upstream::ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Upstream::ClusterStats& stats, Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : stats_(stats), runtime_(runtime), random_(random), common_config_(common_config) {} @@ -84,7 +84,7 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { class LoadBalancerImpl : public Upstream::LoadBalancerBase { public: LoadBalancerImpl(const PriorityContext& priority_context, Upstream::ClusterStats& stats, - Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Runtime::Loader& runtime, Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : Upstream::LoadBalancerBase(priority_context.priority_set_, stats, runtime, random, common_config), @@ -109,7 +109,7 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer { LoadBalancerImplPtr load_balancer_; Upstream::ClusterStats& stats_; Runtime::Loader& runtime_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config_; PriorityContextPtr priority_context_; diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index b6b0c1643cea..a3e4574c7032 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -13,6 +13,7 @@ #include #include "envoy/api/api.h" +#include "envoy/common/random_generator.h" #include "envoy/config/cluster/redis/redis_cluster.pb.h" #include "envoy/config/cluster/redis/redis_cluster.pb.validate.h" #include "envoy/config/cluster/v3/cluster.pb.h" @@ -269,7 +270,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { Network::DnsLookupFamily dns_lookup_family_; const envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_; const LocalInfo::LocalInfo& local_info_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; RedisDiscoverySession redis_discovery_session_; const ClusterSlotUpdateCallBackSharedPtr lb_factory_; diff --git a/source/extensions/clusters/redis/redis_cluster_lb.cc b/source/extensions/clusters/redis/redis_cluster_lb.cc index 99f76013f60e..54a559fa522b 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.cc +++ b/source/extensions/clusters/redis/redis_cluster_lb.cc @@ -100,7 +100,7 @@ Upstream::LoadBalancerPtr RedisClusterLoadBalancerFactory::create() { namespace { Upstream::HostConstSharedPtr chooseRandomHost(const Upstream::HostSetImpl& host_set, - Runtime::RandomGenerator& random) { + Random::RandomGenerator& random) { auto hosts = host_set.healthyHosts(); if (hosts.empty()) { hosts = host_set.degradedHosts(); diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index 2c0fedf7394f..0c5142a8290a 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -130,7 +130,7 @@ using ClusterSlotUpdateCallBackSharedPtr = std::shared_ptr; DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Runtime::Loader& loader, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope); /** diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index aa2c666c4242..a75fcb337aec 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -16,7 +16,7 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) : main_thread_dispatcher_(main_thread_dispatcher), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 35c72e8d8f23..6ba35d5a5f31 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -42,7 +42,7 @@ struct DnsCacheStats { class DnsCacheImpl : public DnsCache, Logger::Loggable { public: DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index 5c9c5198ef41..ff2eb6add5e8 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -36,7 +36,7 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( DnsCacheManagerSharedPtr getCacheManager(Singleton::Manager& singleton_manager, Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Runtime::Loader& loader, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope) { return singleton_manager.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 52ac9fe0d800..15db6a928a0b 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -14,7 +14,7 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Runtime::RandomGenerator& random, Runtime::Loader& loader, + Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope) : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), loader_(loader), root_scope_(root_scope) {} @@ -35,7 +35,7 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { Event::Dispatcher& main_thread_dispatcher_; ThreadLocal::SlotAllocator& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Runtime::Loader& loader_; Stats::Scope& root_scope_; absl::flat_hash_map caches_; @@ -44,7 +44,7 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, Runtime::RandomGenerator& random, + ThreadLocal::SlotAllocator& tls, Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope) : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), random_(random), loader_(loader), root_scope_(root_scope) {} @@ -57,7 +57,7 @@ class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { Singleton::Manager& singleton_manager_; Event::Dispatcher& dispatcher_; ThreadLocal::SlotAllocator& tls_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Runtime::Loader& loader_; Stats::Scope& root_scope_; }; diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc index fc30b1ffb423..d8063ec6723f 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.cc @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h" #include "envoy/runtime/runtime.h" @@ -46,7 +47,7 @@ GradientControllerConfig::GradientControllerConfig( GradientController::GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader&, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random, TimeSource& time_source) + Random::RandomGenerator& random, TimeSource& time_source) : config_(std::move(config)), dispatcher_(dispatcher), scope_(scope), stats_(generateStats(scope_, stats_prefix)), random_(random), time_source_(time_source), deferred_limit_value_(0), num_rq_outstanding_(0), diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h index 46bbe50aa909..176bb52095d6 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h +++ b/source/extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.pb.h" @@ -211,7 +212,7 @@ class GradientController : public ConcurrencyController { public: GradientController(GradientControllerConfig config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, const std::string& stats_prefix, Stats::Scope& scope, - Runtime::RandomGenerator& random, TimeSource& time_source); + Random::RandomGenerator& random, TimeSource& time_source); // ConcurrencyController. RequestForwardingAction forwardingDecision() override; @@ -238,7 +239,7 @@ class GradientController : public ConcurrencyController { Event::Dispatcher& dispatcher_; Stats::Scope& scope_; GradientControllerStats stats_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; TimeSource& time_source_; // Protects data related to latency sampling and RTT values. In addition to protecting the latency diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index 8886d73596f1..1ea8fdaf71e4 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -5,6 +5,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" #include "envoy/grpc/status.h" #include "envoy/http/codes.h" @@ -32,7 +33,7 @@ static constexpr double defaultAggression = 2.0; AdmissionControlFilterConfig::AdmissionControlFilterConfig( const AdmissionControlProto& proto_config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, std::shared_ptr response_evaluator) : random_(random), scope_(scope), tls_(std::move(tls)), admission_control_feature_(proto_config.enabled(), runtime), diff --git a/source/extensions/filters/http/admission_control/admission_control.h b/source/extensions/filters/http/admission_control/admission_control.h index a962096ae8ce..54d793236e6b 100644 --- a/source/extensions/filters/http/admission_control/admission_control.h +++ b/source/extensions/filters/http/admission_control/admission_control.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/extensions/filters/http/admission_control/v3alpha/admission_control.pb.h" #include "envoy/http/codes.h" @@ -49,7 +50,7 @@ using AdmissionControlProto = class AdmissionControlFilterConfig { public: AdmissionControlFilterConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Stats::Scope& scope, + Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, std::shared_ptr response_evaluator); virtual ~AdmissionControlFilterConfig() = default; @@ -58,14 +59,14 @@ class AdmissionControlFilterConfig { return tls_->getTyped(); } - Runtime::RandomGenerator& random() const { return random_; } + Random::RandomGenerator& random() const { return random_; } bool filterEnabled() const { return admission_control_feature_.enabled(); } Stats::Scope& scope() const { return scope_; } double aggression() const; ResponseEvaluator& responseEvaluator() const { return *response_evaluator_; } private: - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; Stats::Scope& scope_; const ThreadLocal::SlotPtr tls_; Runtime::FeatureFlag admission_control_feature_; diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index fcf1bc346fcf..4892e8107c61 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -24,7 +24,7 @@ namespace ClientSslAuth { ClientSslAuthConfig::ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random) + Stats::Scope& scope, Random::RandomGenerator& random) : RestApiFetcher( cm, config.auth_api_cluster(), dispatcher, random, std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, refresh_delay, 60000)), @@ -45,7 +45,7 @@ ClientSslAuthConfig::ClientSslAuthConfig( ClientSslAuthConfigSharedPtr ClientSslAuthConfig::create( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random) { + Stats::Scope& scope, Random::RandomGenerator& random) { ClientSslAuthConfigSharedPtr new_config( new ClientSslAuthConfig(config, tls, cm, dispatcher, scope, random)); new_config->initialize(); diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h index f766a28017da..e5d1bf793706 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h @@ -5,10 +5,10 @@ #include #include +#include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.pb.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" #include "envoy/thread_local/thread_local.h" @@ -77,7 +77,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { static ClientSslAuthConfigSharedPtr create(const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, - Event::Dispatcher& dispatcher, Stats::Scope& scope, Runtime::RandomGenerator& random); + Event::Dispatcher& dispatcher, Stats::Scope& scope, Random::RandomGenerator& random); const AllowedPrincipals& allowedPrincipals(); const Network::Address::IpList& ipAllowlist() { return ip_allowlist_; } @@ -87,7 +87,7 @@ class ClientSslAuthConfig : public Http::RestApiFetcher { ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, - Stats::Scope& scope, Runtime::RandomGenerator& random); + Stats::Scope& scope, Random::RandomGenerator& random); static GlobalStats generateStats(Stats::Scope& scope, const std::string& prefix); diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index 8af50551f616..b4dcf23f8467 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -185,9 +185,9 @@ void ActiveMessageEncoderFilter::continueEncoding() { // class ActiveMessage ActiveMessage::ActiveMessage(ConnectionManager& parent) : parent_(parent), request_timer_(std::make_unique( - parent_.stats().request_time_ms_, parent.time_system())), - request_id_(-1), stream_id_(parent.random_generator().random()), - stream_info_(parent.time_system()), pending_stream_decoded_(false), + parent_.stats().request_time_ms_, parent.timeSystem())), + request_id_(-1), stream_id_(parent.randomGenerator().random()), + stream_info_(parent.timeSystem()), pending_stream_decoded_(false), local_response_sent_(false) { parent_.stats().request_active_.inc(); stream_info_.setDownstreamLocalAddress(parent_.connection().localAddress()); diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc index 4546ed746348..2970352d5f84 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -18,7 +18,7 @@ namespace DubboProxy { constexpr uint32_t BufferLimit = UINT32_MAX; -ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, +ConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system) : config_(config), time_system_(time_system), stats_(config_.stats()), random_generator_(random_generator), protocol_(config.createProtocol()), diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.h b/source/extensions/filters/network/dubbo_proxy/conn_manager.h index 246df5aebfc7..f09ebfa1ac54 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.h +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.h @@ -47,7 +47,7 @@ class ConnectionManager : public Network::ReadFilter, using ConfigSerializationType = envoy::extensions::filters::network::dubbo_proxy::v3::SerializationType; - ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system); ~ConnectionManager() override = default; @@ -67,8 +67,8 @@ class ConnectionManager : public Network::ReadFilter, DubboFilterStats& stats() const { return stats_; } Network::Connection& connection() const { return read_callbacks_->connection(); } - TimeSource& time_system() const { return time_system_; } - Runtime::RandomGenerator& random_generator() const { return random_generator_; } + TimeSource& timeSystem() const { return time_system_; } + Random::RandomGenerator& randomGenerator() const { return random_generator_; } Config& config() const { return config_; } SerializationType downstreamSerializationType() const { return protocol_->serializer()->type(); } ProtocolType downstreamProtocolType() const { return protocol_->type(); } @@ -94,7 +94,7 @@ class ConnectionManager : public Network::ReadFilter, Config& config_; TimeSource& time_system_; DubboFilterStats& stats_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; SerializerPtr serializer_; ProtocolPtr protocol_; diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 8f697c0a606d..c7715986034a 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -12,7 +12,7 @@ namespace Extensions { namespace NetworkFilters { namespace ThriftProxy { -ConnectionManager::ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, +ConnectionManager::ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_source) : config_(config), stats_(config_.stats()), transport_(config.createTransport()), protocol_(config.createProtocol()), diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 7bbf35710ced..52a6e0782b4d 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -1,10 +1,10 @@ #pragma once #include "envoy/common/pure.h" +#include "envoy/common/random_generator.h" #include "envoy/event/deferred_deletable.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" -#include "envoy/runtime/runtime.h" #include "envoy/stats/timespan.h" #include "common/buffer/buffer_impl.h" @@ -60,7 +60,7 @@ class ConnectionManager : public Network::ReadFilter, public DecoderCallbacks, Logger::Loggable { public: - ConnectionManager(Config& config, Runtime::RandomGenerator& random_generator, + ConnectionManager(Config& config, Random::RandomGenerator& random_generator, TimeSource& time_system); ~ConnectionManager() override; @@ -267,7 +267,7 @@ class ConnectionManager : public Network::ReadFilter, DecoderPtr decoder_; std::list rpcs_; Buffer::OwnedImpl request_buffer_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; bool stopped_{false}; bool half_closed_{false}; TimeSource& time_source_; diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.h b/source/extensions/filters/udp/dns_filter/dns_filter.h index d1ccbd18e207..780f63a32c2a 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter.h @@ -87,7 +87,7 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { const std::chrono::milliseconds resolverTimeout() const { return resolver_timeout_; } Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } uint64_t retryCount() const { return retry_count_; } - Runtime::RandomGenerator& random() const { return random_; } + Random::RandomGenerator& random() const { return random_; } uint64_t maxPendingLookups() const { return max_pending_lookups_; } private: @@ -114,7 +114,7 @@ class DnsFilterEnvoyConfig : public Logger::Loggable { uint64_t retry_count_; AddressConstPtrVec resolvers_; std::chrono::milliseconds resolver_timeout_; - Runtime::RandomGenerator& random_; + Random::RandomGenerator& random_; uint64_t max_pending_lookups_; }; diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.h b/source/extensions/filters/udp/dns_filter/dns_parser.h index d06e6bb80afa..f32d34e3ede8 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.h +++ b/source/extensions/filters/udp/dns_filter/dns_parser.h @@ -2,12 +2,12 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/platform.h" +#include "envoy/common/random_generator.h" #include "envoy/network/address.h" #include "envoy/network/dns.h" #include "envoy/network/listener.h" #include "common/buffer/buffer_impl.h" -#include "common/runtime/runtime_impl.h" #include "common/stats/timespan_impl.h" namespace Envoy { @@ -163,7 +163,7 @@ class DnsMessageParser : public Logger::Loggable { }); DnsMessageParser(bool recurse, TimeSource& timesource, uint64_t retry_count, - Runtime::RandomGenerator& random, Stats::Histogram& latency_histogram) + Random::RandomGenerator& random, Stats::Histogram& latency_histogram) : recursion_available_(recurse), timesource_(timesource), retry_count_(retry_count), query_latency_histogram_(latency_histogram), rng_(random) {} @@ -270,7 +270,7 @@ class DnsMessageParser : public Logger::Loggable { Stats::Histogram& query_latency_histogram_; DnsHeader header_; DnsHeader response_header_; - Runtime::RandomGenerator& rng_; + Random::RandomGenerator& rng_; }; } // namespace DnsFilter diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index 2f403b9742e5..e0ba8989457e 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -14,7 +14,7 @@ namespace RedisHealthChecker { RedisHealthChecker::RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 211ff6a9d555..4bc6b44e7db5 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -28,7 +28,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { RedisHealthChecker( const Upstream::Cluster& cluster, const envoy::config::core::v3::HealthCheck& config, const envoy::config::health_checker::redis::v2::Redis& redis_config, - Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, + Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Random::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, Api::Api& api, Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); diff --git a/source/extensions/tracers/xray/localized_sampling.h b/source/extensions/tracers/xray/localized_sampling.h index aefcd795b058..f622d9f8874b 100644 --- a/source/extensions/tracers/xray/localized_sampling.h +++ b/source/extensions/tracers/xray/localized_sampling.h @@ -5,9 +5,9 @@ #include #include "envoy/common/time.h" -#include "envoy/runtime/runtime.h" #include "common/common/logger.h" +#include "common/common/random_generator.h" #include "extensions/tracers/xray/reservoir.h" #include "extensions/tracers/xray/sampling_strategy.h" @@ -137,7 +137,7 @@ class LocalizedSamplingManifest { class LocalizedSamplingStrategy : public SamplingStrategy { public: - LocalizedSamplingStrategy(const std::string& sampling_rules_json, Runtime::RandomGenerator& rng, + LocalizedSamplingStrategy(const std::string& sampling_rules_json, Random::RandomGenerator& rng, TimeSource& time_source) : SamplingStrategy(rng), default_manifest_(LocalizedSamplingManifest::createDefault()), custom_manifest_(sampling_rules_json), time_source_(time_source), diff --git a/source/extensions/tracers/xray/sampling_strategy.h b/source/extensions/tracers/xray/sampling_strategy.h index 2cb488a2c33f..908c28c6414e 100644 --- a/source/extensions/tracers/xray/sampling_strategy.h +++ b/source/extensions/tracers/xray/sampling_strategy.h @@ -3,7 +3,7 @@ #include #include "envoy/common/pure.h" -#include "envoy/runtime/runtime.h" +#include "envoy/common/random_generator.h" #include "common/common/macros.h" @@ -25,7 +25,7 @@ struct SamplingRequest { */ class SamplingStrategy { public: - explicit SamplingStrategy(Runtime::RandomGenerator& rng) : rng_(rng) {} + explicit SamplingStrategy(Random::RandomGenerator& rng) : rng_(rng) {} virtual ~SamplingStrategy() = default; /** @@ -38,7 +38,7 @@ class SamplingStrategy { uint64_t random() const { return rng_.random(); } private: - Runtime::RandomGenerator& rng_; + Random::RandomGenerator& rng_; }; using SamplingStrategyPtr = std::unique_ptr; diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index d28fd8ff3066..3dfb0c8cf75e 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -10,9 +10,9 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/hex.h" +#include "common/common/random_generator.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" -#include "common/runtime/runtime_impl.h" #include "source/extensions/tracers/xray/daemon.pb.validate.h" @@ -39,7 +39,7 @@ constexpr auto XRaySerializationVersion = "1"; std::string generateTraceId(SystemTime point_in_time) { using std::chrono::seconds; using std::chrono::time_point_cast; - Runtime::RandomGeneratorImpl rng; + Random::RandomGeneratorImpl rng; const auto epoch = time_point_cast(point_in_time).time_since_epoch().count(); std::string out; out.reserve(35); diff --git a/source/extensions/tracers/zipkin/tracer.h b/source/extensions/tracers/zipkin/tracer.h index 74f04bf6c919..109982af7ff0 100644 --- a/source/extensions/tracers/zipkin/tracer.h +++ b/source/extensions/tracers/zipkin/tracer.h @@ -1,8 +1,8 @@ #pragma once #include "envoy/common/pure.h" +#include "envoy/common/random_generator.h" #include "envoy/common/time.h" -#include "envoy/runtime/runtime.h" #include "envoy/tracing/http_tracer.h" #include "extensions/tracers/zipkin/span_context.h" @@ -60,7 +60,7 @@ class Tracer : public TracerInterface { * @param shared_span_context Whether shared span id should be used. */ Tracer(const std::string& service_name, Network::Address::InstanceConstSharedPtr address, - Runtime::RandomGenerator& random_generator, const bool trace_id_128bit, + Random::RandomGenerator& random_generator, const bool trace_id_128bit, const bool shared_span_context, TimeSource& time_source) : service_name_(service_name), address_(address), reporter_(nullptr), random_generator_(random_generator), trace_id_128bit_(trace_id_128bit), @@ -106,7 +106,7 @@ class Tracer : public TracerInterface { const std::string service_name_; Network::Address::InstanceConstSharedPtr address_; ReporterPtr reporter_; - Runtime::RandomGenerator& random_generator_; + Random::RandomGenerator& random_generator_; const bool trace_id_128bit_; const bool shared_span_context_; TimeSource& time_source_; diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 25d1e60ee799..238ffe64bc0b 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -68,7 +68,7 @@ Driver::TlsTracer::TlsTracer(TracerPtr&& tracer, Driver& driver) Driver::Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime, - const LocalInfo::LocalInfo& local_info, Runtime::RandomGenerator& random_generator, + const LocalInfo::LocalInfo& local_info, Random::RandomGenerator& random_generator, TimeSource& time_source) : cm_(cluster_manager), tracer_stats_{ZIPKIN_TRACER_STATS( POOL_COUNTER_PREFIX(scope, "tracing.zipkin."))}, diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index def08e83e50c..1624ddf59cc5 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/random_generator.h" #include "envoy/config/trace/v3/zipkin.pb.h" #include "envoy/local_info/local_info.h" #include "envoy/runtime/runtime.h" @@ -99,7 +100,7 @@ class Driver : public Tracing::Driver { Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, Upstream::ClusterManager& cluster_manager, Stats::Scope& scope, ThreadLocal::SlotAllocator& tls, Runtime::Loader& runtime, - const LocalInfo::LocalInfo& localinfo, Runtime::RandomGenerator& random_generator, + const LocalInfo::LocalInfo& localinfo, Random::RandomGenerator& random_generator, TimeSource& time_source); /** diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 7cdbf0b1df4b..d5f4489c918c 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -28,7 +28,7 @@ ValidationClusterManagerFactory::createCds(const envoy::config::core::v3::Config ValidationClusterManager::ValidationClusterManager( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, Event::TimeSystem& time_system) diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index e2a8157e34e6..6ce2c46941fe 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -23,7 +23,7 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { explicit ValidationClusterManagerFactory( Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Runtime::RandomGenerator& random, + ThreadLocal::Instance& tls, Random::RandomGenerator& random, Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, ProtobufMessage::ValidationContext& validation_context, @@ -57,7 +57,7 @@ class ValidationClusterManager : public ClusterManagerImpl { ValidationClusterManager(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 14682097f400..c9108ffaec91 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -13,6 +13,7 @@ #include "common/access_log/access_log_manager_impl.h" #include "common/common/assert.h" +#include "common/common/random_generator.h" #include "common/grpc/common.h" #include "common/protobuf/message_validator_impl.h" #include "common/router/rds_impl.h" @@ -84,7 +85,7 @@ class ValidationInstance final : Logger::Loggable, ServerLifecycleNotifier& lifecycleNotifier() override { return *this; } ListenerManager& listenerManager() override { return *listener_manager_; } Secret::SecretManager& secretManager() override { return *secret_manager_; } - Runtime::RandomGenerator& random() override { return random_generator_; } + Random::RandomGenerator& random() override { return random_generator_; } Runtime::Loader& runtime() override { return Runtime::LoaderSingleton::get(); } void shutdown() override; bool isShutdown() override { return false; } @@ -193,7 +194,7 @@ class ValidationInstance final : Logger::Loggable, Server::ValidationAdmin admin_; Singleton::ManagerPtr singleton_manager_; std::unique_ptr runtime_singleton_; - Runtime::RandomGeneratorImpl random_generator_; + Random::RandomGeneratorImpl random_generator_; std::unique_ptr ssl_context_manager_; Configuration::MainImpl config_; LocalInfo::LocalInfoPtr local_info_; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 7c544ca66521..00f71743fa5b 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -87,7 +87,7 @@ const LocalInfo::LocalInfo& PerFilterChainFactoryContextImpl::localInfo() const return parent_context_.localInfo(); } -Envoy::Runtime::RandomGenerator& PerFilterChainFactoryContextImpl::random() { +Envoy::Random::RandomGenerator& PerFilterChainFactoryContextImpl::random() { return parent_context_.random(); } @@ -633,7 +633,7 @@ bool FactoryContextImpl::healthCheckFailed() { return server_.healthCheckFailed( Http::Context& FactoryContextImpl::httpContext() { return server_.httpContext(); } Init::Manager& FactoryContextImpl::initManager() { return server_.initManager(); } const LocalInfo::LocalInfo& FactoryContextImpl::localInfo() const { return server_.localInfo(); } -Envoy::Runtime::RandomGenerator& FactoryContextImpl::random() { return server_.random(); } +Envoy::Random::RandomGenerator& FactoryContextImpl::random() { return server_.random(); } Envoy::Runtime::Loader& FactoryContextImpl::runtime() { return server_.runtime(); } Stats::Scope& FactoryContextImpl::scope() { return global_scope_; } Singleton::Manager& FactoryContextImpl::singletonManager() { return server_.singletonManager(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 6857bba4620b..59af0bb78ac5 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -58,7 +58,7 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -131,7 +131,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index e995732ce8d4..fd98d5526210 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -168,7 +168,7 @@ Http::Context& ListenerFactoryContextBaseImpl::httpContext() { return server_.ht const LocalInfo::LocalInfo& ListenerFactoryContextBaseImpl::localInfo() const { return server_.localInfo(); } -Envoy::Runtime::RandomGenerator& ListenerFactoryContextBaseImpl::random() { +Envoy::Random::RandomGenerator& ListenerFactoryContextBaseImpl::random() { return server_.random(); } Envoy::Runtime::Loader& ListenerFactoryContextBaseImpl::runtime() { return server_.runtime(); } @@ -538,7 +538,7 @@ Http::Context& PerListenerFactoryContextImpl::httpContext() { const LocalInfo::LocalInfo& PerListenerFactoryContextImpl::localInfo() const { return listener_factory_context_base_->localInfo(); } -Envoy::Runtime::RandomGenerator& PerListenerFactoryContextImpl::random() { +Envoy::Random::RandomGenerator& PerListenerFactoryContextImpl::random() { return listener_factory_context_base_->random(); } Envoy::Runtime::Loader& PerListenerFactoryContextImpl::runtime() { diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index cdc0ded801f6..aa1bd2ca1b0d 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -106,7 +106,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; @@ -172,7 +172,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte Http::Context& httpContext() override; Init::Manager& initManager() override; const LocalInfo::LocalInfo& localInfo() const override; - Envoy::Runtime::RandomGenerator& random() override; + Envoy::Random::RandomGenerator& random() override; Envoy::Runtime::Loader& runtime() override; Stats::Scope& scope() override; Singleton::Manager& singletonManager() override; diff --git a/source/server/server.cc b/source/server/server.cc index d3e559cdaaf7..e0f7157c54c6 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -57,7 +57,7 @@ InstanceImpl::InstanceImpl( Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, - ComponentFactory& component_factory, Runtime::RandomGeneratorPtr&& random_generator, + ComponentFactory& component_factory, Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context) : init_manager_(init_manager), workers_started_(false), live_(false), shutdown_(false), diff --git a/source/server/server.h b/source/server/server.h index b41fafbca8b3..cfa61a9b2bd9 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -162,7 +162,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, ProtobufMessage::ValidationContext& messageValidationContext() override { return server_.messageValidationContext(); } - Envoy::Runtime::RandomGenerator& random() override { return server_.random(); } + Envoy::Random::RandomGenerator& random() override { return server_.random(); } Envoy::Runtime::Loader& runtime() override { return server_.runtime(); } Stats::Scope& scope() override { return *server_scope_; } Singleton::Manager& singletonManager() override { return server_.singletonManager(); } @@ -209,7 +209,7 @@ class InstanceImpl final : Logger::Loggable, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, + Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context); @@ -235,7 +235,7 @@ class InstanceImpl final : Logger::Loggable, Secret::SecretManager& secretManager() override { return *secret_manager_; } Envoy::MutexTracer* mutexTracer() override { return mutex_tracer_; } OverloadManager& overloadManager() override { return *overload_manager_; } - Runtime::RandomGenerator& random() override { return *random_generator_; } + Random::RandomGenerator& random() override { return *random_generator_; } Runtime::Loader& runtime() override; void shutdown() override; bool isShutdown() final { return shutdown_; } @@ -326,7 +326,7 @@ class InstanceImpl final : Logger::Loggable, std::unique_ptr admin_; Singleton::ManagerPtr singleton_manager_; Network::ConnectionHandlerPtr handler_; - Runtime::RandomGeneratorPtr random_generator_; + Random::RandomGeneratorPtr random_generator_; std::unique_ptr runtime_singleton_; std::unique_ptr ssl_context_manager_; ProdListenerComponentFactory listener_component_factory_; diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 6a7a8ec17613..560b9cf61aed 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -15,7 +15,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { TransportSocketFactoryContextImpl( Server::Admin& admin, Ssl::ContextManager& context_manager, Stats::Scope& stats_scope, Upstream::ClusterManager& cm, const LocalInfo::LocalInfo& local_info, - Event::Dispatcher& dispatcher, Envoy::Runtime::RandomGenerator& random, Stats::Store& stats, + Event::Dispatcher& dispatcher, Envoy::Random::RandomGenerator& random, Stats::Store& stats, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : admin_(admin), context_manager_(context_manager), stats_scope_(stats_scope), @@ -39,7 +39,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } - Envoy::Runtime::RandomGenerator& random() override { return random_; } + Envoy::Random::RandomGenerator& random() override { return random_; } Stats::Store& stats() override { return stats_; } Init::Manager& initManager() override { ASSERT(init_manager_ != nullptr); @@ -59,7 +59,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { Upstream::ClusterManager& cluster_manager_; const LocalInfo::LocalInfo& local_info_; Event::Dispatcher& dispatcher_; - Envoy::Runtime::RandomGenerator& random_; + Envoy::Random::RandomGenerator& random_; Stats::Store& stats_; Singleton::Manager& singleton_manager_; ThreadLocal::SlotAllocator& tls_; diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 451c44bf52f8..a5b9c66279d0 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -278,7 +278,7 @@ name: accesslog path: /dev/null )EOF"; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); // Value is taken from random generator. @@ -321,7 +321,7 @@ name: accesslog path: /dev/null )EOF"; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); // Value is taken from random generator. @@ -439,7 +439,7 @@ name: accesslog } TEST_F(AccessLogImplTest, RequestTracing) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; const std::string yaml = R"EOF( name: accesslog @@ -1269,7 +1269,7 @@ class TestHeaderFilterFactory : public ExtensionFilterFactory { ~TestHeaderFilterFactory() override = default; FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader&, Runtime::RandomGenerator&) override { + Runtime::Loader&, Random::RandomGenerator&) override { auto factory_config = Config::Utility::translateToFactoryConfig( config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this); const auto& header_config = @@ -1345,7 +1345,7 @@ class SampleExtensionFilterFactory : public ExtensionFilterFactory { ~SampleExtensionFilterFactory() override = default; FilterPtr createFilter(const envoy::config::accesslog::v3::ExtensionFilter& config, - Runtime::Loader&, Runtime::RandomGenerator&) override { + Runtime::Loader&, Random::RandomGenerator&) override { auto factory_config = Config::Utility::translateToFactoryConfig( config, Envoy::ProtobufMessage::getNullValidationVisitor(), *this); diff --git a/test/common/common/BUILD b/test/common/common/BUILD index dbd4473d555b..8983d0f4483b 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -154,6 +154,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "random_generator_test", + srcs = ["random_generator_test.cc"], + # Fails on windows with cr/lf yaml file checkouts + tags = ["fails_on_windows"], + deps = [ + "//source/common/common:random_generator_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:environment_lib", + ], +) + envoy_cc_test( name = "utility_test", srcs = ["utility_test.cc"], diff --git a/test/common/common/backoff_strategy_test.cc b/test/common/common/backoff_strategy_test.cc index 5db265e66bfa..20ffe937065c 100644 --- a/test/common/common/backoff_strategy_test.cc +++ b/test/common/common/backoff_strategy_test.cc @@ -1,6 +1,6 @@ #include "common/common/backoff_strategy.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "gtest/gtest.h" @@ -10,7 +10,7 @@ using testing::Return; namespace Envoy { TEST(BackOffStrategyTest, JitteredBackOffBasicFlow) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(27)); JitteredBackOffStrategy jittered_back_off(25, 30, random); @@ -19,7 +19,7 @@ TEST(BackOffStrategyTest, JitteredBackOffBasicFlow) { } TEST(BackOffStrategyTest, JitteredBackOffBasicReset) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(27)); JitteredBackOffStrategy jittered_back_off(25, 30, random); @@ -31,7 +31,7 @@ TEST(BackOffStrategyTest, JitteredBackOffBasicReset) { } TEST(BackOffStrategyTest, JitteredBackOffDoesntOverflow) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(std::numeric_limits::max() - 1)); JitteredBackOffStrategy jittered_back_off(1, std::numeric_limits::max(), random); @@ -42,7 +42,7 @@ TEST(BackOffStrategyTest, JitteredBackOffDoesntOverflow) { } TEST(BackOffStrategyTest, JitteredBackOffWithMaxInterval) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(9999)); JitteredBackOffStrategy jittered_back_off(5, 100, random); @@ -56,7 +56,7 @@ TEST(BackOffStrategyTest, JitteredBackOffWithMaxInterval) { } TEST(BackOffStrategyTest, JitteredBackOffWithMaxIntervalReset) { - NiceMock random; + NiceMock random; ON_CALL(random, random()).WillByDefault(Return(9999)); JitteredBackOffStrategy jittered_back_off(5, 100, random); diff --git a/test/common/common/random_generator_test.cc b/test/common/common/random_generator_test.cc new file mode 100644 index 000000000000..b2098f987b62 --- /dev/null +++ b/test/common/common/random_generator_test.cc @@ -0,0 +1,72 @@ +#include + +#include "common/common/random_generator.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Random { +namespace { + +TEST(Random, DISABLED_benchmarkRandom) { + Random::RandomGeneratorImpl random; + + for (size_t i = 0; i < 1000000000; ++i) { + random.random(); + } +} + +TEST(Random, SanityCheckOfUniquenessRandom) { + Random::RandomGeneratorImpl random; + std::set results; + const size_t num_of_results = 1000000; + + for (size_t i = 0; i < num_of_results; ++i) { + results.insert(random.random()); + } + + EXPECT_EQ(num_of_results, results.size()); +} + +TEST(Random, SanityCheckOfStdLibRandom) { + Random::RandomGeneratorImpl random; + + static const auto num_of_items = 100; + std::vector v(num_of_items); + std::iota(v.begin(), v.end(), 0); + + static const auto num_of_checks = 10000; + for (size_t i = 0; i < num_of_checks; ++i) { + const auto prev = v; + std::shuffle(v.begin(), v.end(), random); + EXPECT_EQ(v.size(), prev.size()); + EXPECT_NE(v, prev); + EXPECT_FALSE(std::is_sorted(v.begin(), v.end())); + } +} + +TEST(UUID, CheckLengthOfUUID) { + Random::RandomGeneratorImpl random; + + std::string result = random.uuid(); + + size_t expected_length = 36; + EXPECT_EQ(expected_length, result.length()); +} + +TEST(UUID, SanityCheckOfUniqueness) { + std::set uuids; + const size_t num_of_uuids = 100000; + + Random::RandomGeneratorImpl random; + for (size_t i = 0; i < num_of_uuids; ++i) { + uuids.insert(random.uuid()); + } + + EXPECT_EQ(num_of_uuids, uuids.size()); +} + +} // namespace +} // namespace Random +} // namespace Envoy diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 4b76c39b59f5..340ae1c5a365 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -30,7 +30,7 @@ class AsyncDataSourceTest : public testing::Test { Init::ExpectableWatcherImpl init_watcher_; Init::TargetHandlePtr init_target_handle_; Api::ApiPtr api_{Api::createApiForTest()}; - NiceMock random_; + NiceMock random_; Event::MockDispatcher dispatcher_; Event::MockTimer* retry_timer_; Event::TimerCb retry_timer_cb_; diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 6407aef51de1..0368e630d773 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -130,7 +130,7 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { EXPECT_CALL(local_info, node()).WillRepeatedly(testing::ReturnRef(node)); NiceMock dispatcher; - NiceMock random; + NiceMock random; Envoy::Config::RateLimitSettings rate_limit_settings; NiceMock callbacks; NiceMock resource_decoder; diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index 8e4a33bce05c..d030e7d46288 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -13,11 +13,11 @@ #include "common/grpc/common.h" #include "test/common/config/subscription_test_harness.h" +#include "test/mocks/common.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/local_info/mocks.h" -#include "test/mocks/runtime/mocks.h" #include "test/mocks/stats/mocks.h" #include "gmock/gmock.h" @@ -194,7 +194,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { const Protobuf::MethodDescriptor* method_descriptor_; Grpc::MockAsyncClient* async_client_; Event::MockDispatcher dispatcher_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; Grpc::MockAsyncStream async_stream_; std::shared_ptr xds_context_; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 01bf43a5fe2b..bf18620b8a35 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -95,7 +95,7 @@ class GrpcMuxImplTestBase : public testing::Test { } NiceMock dispatcher_; - NiceMock random_; + NiceMock random_; Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncStream async_stream_; std::unique_ptr grpc_mux_; diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 4e5c620cc472..3f28cc9691e7 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -33,7 +33,7 @@ class GrpcStreamTest : public testing::Test { NiceMock dispatcher_; Grpc::MockAsyncStream async_stream_; Stats::TestUtil::TestStore stats_; - NiceMock random_; + NiceMock random_; Envoy::Config::RateLimitSettings rate_limit_settings_; NiceMock callbacks_; std::unique_ptr async_client_owner_; diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index b2c19e43f095..0cc685486a29 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -178,7 +178,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { Grpc::MockAsyncClient* async_client_; NiceMock cm_; Event::MockDispatcher dispatcher_; - Runtime::MockRandomGenerator random_; + Random::MockRandomGenerator random_; Event::MockTimer* timer_; Event::TimerCb timer_cb_; envoy::config::core::v3::Node node_; diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index 95623490e21c..d0a1dc18fd87 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -192,7 +192,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { Event::MockTimer* timer_; Event::TimerCb timer_cb_; envoy::config::core::v3::Node node_; - Runtime::MockRandomGenerator random_gen_; + Random::MockRandomGenerator random_gen_; Http::MockAsyncClientRequest http_request_; Http::AsyncClient::Callbacks* http_callbacks_; Config::MockSubscriptionCallbacks callbacks_; diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index b0e3f4683fc4..86d88dcfd4aa 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -56,7 +56,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { } NiceMock dispatcher_; - NiceMock random_; + NiceMock random_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; std::unique_ptr grpc_mux_; diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index b383841b5fe3..185f7bb4f13a 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -50,7 +50,7 @@ class SubscriptionFactoryTest : public testing::Test { Upstream::MockClusterManager cm_; Event::MockDispatcher dispatcher_; - Runtime::MockRandomGenerator random_; + Random::MockRandomGenerator random_; MockSubscriptionCallbacks callbacks_; MockOpaqueResourceDecoder resource_decoder_; Http::MockAsyncClientRequest http_request_; diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index 00f23e5b6557..23ab3e0b0235 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -246,7 +246,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { } TEST(UtilityTest, PrepareDnsRefreshStrategy) { - NiceMock random; + NiceMock random; { // dns_failure_refresh_rate not set. diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 3e472fa67b37..7ab7facc976f 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -454,7 +454,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { NiceMock local_info_; Runtime::MockLoader runtime_; Extensions::TransportSockets::Tls::ContextManagerImpl context_manager_{test_time_.timeSystem()}; - NiceMock random_; + NiceMock random_; Http::AsyncClientPtr http_async_client_; Http::ConnectionPool::InstancePtr http_conn_pool_; Http::ContextImpl http_context_; diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 385fb22dd3bb..80cc5af92695 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -82,7 +82,7 @@ class AsyncClientImplTest : public testing::Test { NiceMock* timer_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; Http::ContextImpl http_context_; AsyncClientImpl client_; diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 196b8bdc24d2..c563f72ec381 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -198,7 +198,7 @@ class FuzzConfig : public ConnectionManagerConfig { const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; - NiceMock random_; + NiceMock random_; RequestIDExtensionSharedPtr request_id_extension_; std::list access_logs_; MockServerConnection* codec_{}; @@ -534,7 +534,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { FuzzConfig config(input.forward_client_cert()); NiceMock drain_close; - NiceMock random; + NiceMock random; Stats::SymbolTablePtr symbol_table(Stats::SymbolTableCreator::makeSymbolTable()); Http::ContextImpl http_context(*symbol_table); NiceMock runtime; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 2aab9bc4ff0d..a22aa5432296 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -393,7 +393,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan std::chrono::milliseconds request_timeout_{}; std::chrono::milliseconds delayed_close_timeout_{}; absl::optional max_stream_duration_{}; - NiceMock random_; + NiceMock random_; NiceMock local_info_; NiceMock factory_context_; std::shared_ptr ssl_connection_; diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 3cb307cd6b71..97d680b67c93 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -4,6 +4,7 @@ #include "envoy/http/request_id_extension.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/random_generator.h" #include "common/http/conn_manager_utility.h" #include "common/http/header_utility.h" #include "common/http/headers.h" @@ -36,7 +37,7 @@ namespace Http { class MockRequestIDExtension : public RequestIDExtension { public: - explicit MockRequestIDExtension(Runtime::RandomGenerator& random) + explicit MockRequestIDExtension(Random::RandomGenerator& random) : real_(RequestIDExtensionFactory::defaultInstance(random)) { ON_CALL(*this, set(_, _)) .WillByDefault([this](Http::RequestHeaderMap& request_headers, bool force) { @@ -195,7 +196,7 @@ class ConnectionManagerUtilityTest : public testing::Test { } NiceMock connection_; - NiceMock random_; + NiceMock random_; std::shared_ptr> request_id_extension_; NiceMock config_; NiceMock route_config_; @@ -608,7 +609,7 @@ TEST_F(ConnectionManagerUtilityTest, RequestIdGeneratedWhenItsNotPresent) { } { - Runtime::RandomGeneratorImpl rand; + Random::RandomGeneratorImpl rand; TestRequestHeaderMapImpl headers{{"x-client-trace-id", "trace-id"}}; const std::string uuid = rand.uuid(); EXPECT_CALL(random_, uuid()).WillOnce(Return(uuid)); diff --git a/test/common/http/http2/metadata_encoder_decoder_test.cc b/test/common/http/http2/metadata_encoder_decoder_test.cc index 5cce5b40893f..c038c7a01a65 100644 --- a/test/common/http/http2/metadata_encoder_decoder_test.cc +++ b/test/common/http/http2/metadata_encoder_decoder_test.cc @@ -1,8 +1,8 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "common/common/random_generator.h" #include "common/http/http2/metadata_decoder.h" #include "common/http/http2/metadata_encoder.h" -#include "common/runtime/runtime_impl.h" #include "test/test_common/logging.h" @@ -152,7 +152,7 @@ class MetadataEncoderDecoderTest : public testing::Test { // Application data passed to nghttp2. UserData user_data_; - Runtime::RandomGeneratorImpl random_generator_; + Random::RandomGeneratorImpl random_generator_; }; TEST_F(MetadataEncoderDecoderTest, TestMetadataSizeLimit) { @@ -306,7 +306,7 @@ TEST_F(MetadataEncoderDecoderTest, EncodeMetadataMapVectorLarge) { TEST_F(MetadataEncoderDecoderTest, EncodeFuzzedMetadata) { MetadataMapVector metadata_map_vector; for (int i = 0; i < 10; i++) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; int value_size_1 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1; int value_size_2 = random.random() % (2 * Http::METADATA_MAX_PAYLOAD_SIZE) + 1; MetadataMap metadata_map = { diff --git a/test/common/http/request_id_extension_uuid_impl_test.cc b/test/common/http/request_id_extension_uuid_impl_test.cc index 7efd54948e31..0b471c3a88cf 100644 --- a/test/common/http/request_id_extension_uuid_impl_test.cc +++ b/test/common/http/request_id_extension_uuid_impl_test.cc @@ -1,9 +1,9 @@ #include +#include "common/common/random_generator.h" #include "common/http/request_id_extension_uuid_impl.h" -#include "common/runtime/runtime_impl.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -14,7 +14,7 @@ namespace Envoy { namespace Http { TEST(UUIDRequestIDExtensionTest, SetRequestID) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -28,7 +28,7 @@ TEST(UUIDRequestIDExtensionTest, SetRequestID) { } TEST(UUIDRequestIDExtensionTest, EnsureRequestID) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -42,7 +42,7 @@ TEST(UUIDRequestIDExtensionTest, EnsureRequestID) { } TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { - testing::StrictMock random; + testing::StrictMock random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; TestResponseHeaderMapImpl response_headers; @@ -65,7 +65,7 @@ TEST(UUIDRequestIDExtensionTest, PreserveRequestIDInResponse) { } TEST(UUIDRequestIDExtensionTest, ModRequestIDBy) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -115,7 +115,7 @@ TEST(UUIDRequestIDExtensionTest, ModRequestIDBy) { } TEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; @@ -145,7 +145,7 @@ TEST(UUIDRequestIDExtensionTest, RequestIDModDistribution) { } TEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; for (int i = 0; i < 100000000; ++i) { random.uuid(); @@ -153,7 +153,7 @@ TEST(UUIDRequestIDExtensionTest, DISABLED_benchmark) { } TEST(UUIDRequestIDExtensionTest, SetTraceStatus) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; UUIDRequestIDExtension uuid_utils(random); TestRequestHeaderMapImpl request_headers; request_headers.setRequestId(random.uuid()); diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 94f8e6aef158..bf6dbb7b3cbd 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1459,9 +1459,9 @@ class DeprecatedFieldsTest : public testing::TestWithParam { Event::MockDispatcher dispatcher_; NiceMock tls_; Stats::TestUtil::TestStore store_; - Runtime::MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; Api::ApiPtr api_; - Runtime::MockRandomGenerator rand_; + Random::MockRandomGenerator rand_; std::unique_ptr loader_; Stats::Counter& runtime_deprecated_feature_use_; Stats::Gauge& deprecated_feature_seen_since_process_start_; diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index 3c5b3d5f37c9..fed1d6cc3fc3 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -127,7 +127,7 @@ class RouterRetryStateImplTest : public testing::Test { NiceMock cluster_; TestVirtualCluster virtual_cluster_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Event::MockDispatcher dispatcher_; Event::MockTimer* retry_timer_{}; RetryStatePtr state_; diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 169a8d801d29..919df0a67545 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -70,7 +70,7 @@ class RouterTestFilter : public Filter { // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, const Upstream::ClusterInfo&, const VirtualCluster*, - Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, + Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); @@ -352,7 +352,7 @@ class RouterTestBase : public testing::Test { NiceMock stats_store_; NiceMock cm_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Envoy::ConnectionPool::MockCancellable cancellable_; Http::ContextImpl http_context_; NiceMock callbacks_; diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index d62043effadb..d0caea49ec29 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -63,7 +63,7 @@ class TestFilter : public Filter { // Filter RetryStatePtr createRetryState(const RetryPolicy&, Http::RequestHeaderMap&, const Upstream::ClusterInfo&, const VirtualCluster*, - Runtime::Loader&, Runtime::RandomGenerator&, Event::Dispatcher&, + Runtime::Loader&, Random::RandomGenerator&, Event::Dispatcher&, Upstream::ResourcePriority) override { EXPECT_EQ(nullptr, retry_state_); retry_state_ = new NiceMock(); diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 168123abcf5a..3f0706a608bc 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -12,6 +12,7 @@ #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/init/mocks.h" @@ -37,64 +38,6 @@ namespace Envoy { namespace Runtime { namespace { -TEST(Random, DISABLED_benchmarkRandom) { - Runtime::RandomGeneratorImpl random; - - for (size_t i = 0; i < 1000000000; ++i) { - random.random(); - } -} - -TEST(Random, SanityCheckOfUniquenessRandom) { - Runtime::RandomGeneratorImpl random; - std::set results; - const size_t num_of_results = 1000000; - - for (size_t i = 0; i < num_of_results; ++i) { - results.insert(random.random()); - } - - EXPECT_EQ(num_of_results, results.size()); -} - -TEST(Random, SanityCheckOfStdLibRandom) { - Runtime::RandomGeneratorImpl random; - - static const auto num_of_items = 100; - std::vector v(num_of_items); - std::iota(v.begin(), v.end(), 0); - - static const auto num_of_checks = 10000; - for (size_t i = 0; i < num_of_checks; ++i) { - const auto prev = v; - std::shuffle(v.begin(), v.end(), random); - EXPECT_EQ(v.size(), prev.size()); - EXPECT_NE(v, prev); - EXPECT_FALSE(std::is_sorted(v.begin(), v.end())); - } -} - -TEST(UUID, CheckLengthOfUUID) { - RandomGeneratorImpl random; - - std::string result = random.uuid(); - - size_t expected_length = 36; - EXPECT_EQ(expected_length, result.length()); -} - -TEST(UUID, SanityCheckOfUniqueness) { - std::set uuids; - const size_t num_of_uuids = 100000; - - RandomGeneratorImpl random; - for (size_t i = 0; i < num_of_uuids; ++i) { - uuids.insert(random.uuid()); - } - - EXPECT_EQ(num_of_uuids, uuids.size()); -} - class LoaderImplTest : public testing::Test { protected: LoaderImplTest() : api_(Api::createApiForTest(store_)) { local_info_.node_.set_cluster(""); } @@ -115,7 +58,7 @@ class LoaderImplTest : public testing::Test { Event::MockDispatcher dispatcher_; NiceMock tls_; Stats::TestUtil::TestStore store_; - MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; std::unique_ptr loader_; Api::ApiPtr api_; Upstream::MockClusterManager cm_; diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index f77451f55183..00466f1bddf6 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -257,7 +257,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -340,7 +340,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -438,7 +438,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -692,7 +692,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -825,7 +825,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -897,7 +897,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; @@ -942,7 +942,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { envoy::config::core::v3::ConfigSource config_source; NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock init_manager; NiceMock init_watcher; diff --git a/test/common/stats/recent_lookups_speed_test.cc b/test/common/stats/recent_lookups_speed_test.cc index e3ac80b2ad39..9af07ad4ef93 100644 --- a/test/common/stats/recent_lookups_speed_test.cc +++ b/test/common/stats/recent_lookups_speed_test.cc @@ -20,6 +20,7 @@ // BM_LookupsNoEvictions 45662 ns 45662 ns 15329 // BM_LookupsAllEvictions 83015 ns 83015 ns 8435 +#include "common/common/random_generator.h" #include "common/runtime/runtime_impl.h" #include "common/stats/recent_lookups.h" @@ -30,7 +31,7 @@ class RecentLookupsSpeedTest { public: RecentLookupsSpeedTest(uint64_t lookup_variants, uint64_t capacity) { recent_lookups_.setCapacity(capacity); - Envoy::Runtime::RandomGeneratorImpl random; + Envoy::Random::RandomGeneratorImpl random; lookups_.reserve(lookup_variants); for (size_t i = 0; i < lookup_variants; ++i) { lookups_.push_back(absl::StrCat("lookup #", random.random())); @@ -39,7 +40,7 @@ class RecentLookupsSpeedTest { void test(benchmark::State& state) { for (auto _ : state) { - Envoy::Runtime::RandomGeneratorImpl random; + Envoy::Random::RandomGeneratorImpl random; for (uint64_t i = 0; i < lookups_.size(); ++i) { recent_lookups_.lookup(lookups_[random.random() % lookups_.size()]); } diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index b141abeb0c2e..5767592c7406 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -4,8 +4,8 @@ #include "envoy/stream_info/stream_info.h" #include "common/common/assert.h" +#include "common/common/random_generator.h" #include "common/http/request_id_extension_impl.h" -#include "common/runtime/runtime_impl.h" #include "common/stream_info/filter_state_impl.h" #include "test/test_common/simulated_time_system.h" @@ -227,7 +227,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { return upstream_cluster_info_; } - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index eaf7437d0bc4..6bb18da079f8 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -7,12 +7,12 @@ #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "common/common/base64.h" +#include "common/common/random_generator.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/request_id_extension_impl.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/tracing/http_tracer_impl.h" #include "test/mocks/http/mocks.h" @@ -44,7 +44,7 @@ namespace { TEST(HttpTracerUtilityTest, IsTracing) { NiceMock stream_info; NiceMock stats; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; std::string not_traceable_guid = random.uuid(); auto rid_extension = Http::RequestIDExtensionFactory::defaultInstance(random); diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index 8dc52cc0bd90..bf6032ca4f6f 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -62,7 +62,7 @@ class ClusterFactoryTestBase { const NiceMock local_info_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::IsolatedStoreImpl stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index c227dfe4f39d..6f82390c9514 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -152,7 +152,7 @@ class EdsSpeedTest { Config::SubscriptionCallbacks* eds_callbacks_{}; Config::OpaqueResourceDecoderImpl resource_decoder_{validation_visitor_, "cluster_name"}; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; NiceMock admin_; diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index f08269380065..1370b41b180d 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -122,7 +122,7 @@ class EdsTest : public testing::Test { NiceMock dispatcher_; std::shared_ptr cluster_; Config::SubscriptionCallbacks* eds_callbacks_{}; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; NiceMock admin_; diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index d093ab2a03f1..7f3d076b5edb 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -127,7 +127,7 @@ class HdsTest : public testing::Test { NiceMock validation_visitor_; Api::ApiPtr api_; Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_; - NiceMock random_; + NiceMock random_; NiceMock log_manager_; NiceMock cm_; NiceMock local_info_; diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 446fcce9a8f9..46ea30ccd6d5 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -64,7 +64,7 @@ TEST(HealthCheckerFactoryTest, GrpcHealthCheckHTTP2NotConfiguredException) { EXPECT_CALL(*cluster.info_, features()).WillRepeatedly(Return(0)); Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; @@ -83,7 +83,7 @@ TEST(HealthCheckerFactoryTest, CreateGrpc) { .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP2)); Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock validation_visitor; @@ -104,7 +104,7 @@ class HealthCheckerTestBase { std::unique_ptr event_logger_storage_{ std::make_unique()}; MockHealthCheckEventLogger& event_logger_{*event_logger_storage_}; - NiceMock random_; + NiceMock random_; NiceMock runtime_; }; diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index f8e1177da0f9..a70f4d920a9a 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -4,8 +4,8 @@ #include "envoy/config/cluster/v3/cluster.pb.h" +#include "common/common/random_generator.h" #include "common/memory/stats.h" -#include "common/runtime/runtime_impl.h" #include "common/upstream/maglev_lb.h" #include "common/upstream/ring_hash_lb.h" #include "common/upstream/upstream_impl.h" @@ -51,7 +51,7 @@ class BaseTester { Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_{ClusterInfoImpl::generateStats(stats_store_)}; NiceMock runtime_; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; std::shared_ptr info_{new NiceMock()}; }; diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index ced9ec06ae29..06e398018e95 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -39,7 +39,7 @@ class LoadBalancerTestBase : public testing::TestWithParam { Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock priority_set_; MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); MockHostSet& failover_host_set_ = *priority_set_.getMockHostSet(1); @@ -51,7 +51,7 @@ class LoadBalancerTestBase : public testing::TestWithParam { class TestLb : public LoadBalancerBase { public: TestLb(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, + Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : LoadBalancerBase(priority_set, stats, runtime, random, common_config) {} using LoadBalancerBase::chooseHostSet; diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index d220d1410731..3d8dd616eec2 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -8,8 +8,8 @@ #include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "common/common/fmt.h" +#include "common/common/random_generator.h" #include "common/network/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/upstream/load_balancer_impl.h" #include "common/upstream/upstream_impl.h" @@ -69,7 +69,7 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { ClusterStats stats{ClusterInfoImpl::generateStats(stats_store)}; stats.max_host_weight_.set(weight); NiceMock runtime; - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config; LeastRequestLoadBalancer lb_{ @@ -235,7 +235,7 @@ class DISABLED_SimulationTest : public testing::Test { MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); std::shared_ptr info_{new NiceMock()}; NiceMock runtime_; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index df0aef7ca10e..54404aab2c5f 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -198,7 +198,7 @@ class LogicalDnsClusterTest : public testing::Test { std::shared_ptr> dns_resolver_{ new NiceMock}; Network::MockActiveDnsQuery active_dns_query_; - NiceMock random_; + NiceMock random_; Network::DnsResolver::ResolveCb dns_callback_; NiceMock tls_; Event::MockTimer* resolve_timer_; diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index 3fce26252ac4..25456820dd4d 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -53,7 +53,7 @@ class MaglevLoadBalancerTest : public testing::Test { ClusterStats stats_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; std::unique_ptr lb_; }; diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 80bb3e2c8910..6e920a6e6370 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -99,7 +99,7 @@ class OriginalDstClusterTest : public testing::Test { NiceMock runtime_; NiceMock dispatcher_; Event::MockTimer* cleanup_timer_; - NiceMock random_; + NiceMock random_; NiceMock local_info_; NiceMock admin_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index fbd4906e01fe..9c8a0c7b2652 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -71,7 +71,7 @@ class RingHashLoadBalancerTest : public testing::TestWithParam { absl::optional config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; std::unique_ptr lb_; }; diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 40ae53bddf19..e9acf9f407bc 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -463,7 +463,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { envoy::config::cluster::v3::Cluster::LeastRequestLbConfig least_request_lb_config_; envoy::config::cluster::v3::Cluster::CommonLbConfig common_config_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; PrioritySetImpl local_priority_set_; diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index a71d276471fa..e6c51563c668 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -126,7 +126,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { std::shared_ptr> dns_resolver_{ new NiceMock}; NiceMock runtime_; - NiceMock random_; + NiceMock random_; NiceMock dispatcher_; Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager_{ dispatcher_.timeSource()}; @@ -160,7 +160,7 @@ class TestClusterManagerImpl : public ClusterManagerImpl { TestClusterManagerImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, @@ -185,7 +185,7 @@ class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl { MockedUpdatedClusterManagerImpl( const envoy::config::bootstrap::v3::Bootstrap& bootstrap, ClusterManagerFactory& factory, Stats::Store& stats, ThreadLocal::Instance& tls, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, + Random::RandomGenerator& random, const LocalInfo::LocalInfo& local_info, AccessLog::AccessLogManager& log_manager, Event::Dispatcher& main_thread_dispatcher, Server::Admin& admin, ProtobufMessage::ValidationContext& validation_context, Api::Api& api, MockLocalClusterUpdate& local_cluster_update, MockLocalHostsRemoved& local_hosts_removed, diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index c433faedc3d5..624637edcbee 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -59,7 +59,7 @@ class UpstreamImplTestBase { NiceMock local_info_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Stats::TestUtil::TestStore stats_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; @@ -2199,7 +2199,7 @@ class ClusterInfoImplTest : public testing::Test { NiceMock runtime_; NiceMock cm_; NiceMock local_info_; - NiceMock random_; + NiceMock random_; NiceMock admin_; Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; NiceMock tls_; diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 2b25b9df8523..1074186abd65 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -146,7 +146,7 @@ class ConfigTest { NiceMock worker_factory_; Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_, false}; - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; Runtime::SnapshotConstSharedPtr snapshot_{std::make_shared>()}; NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls{&os_sys_calls_}; diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 3dcd35ea111e..2305986ec677 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -2,6 +2,7 @@ #include "common/common/lock_guard.h" #include "common/common/mutex_tracer_impl.h" +#include "common/common/random_generator.h" #include "common/common/thread.h" #include "common/runtime/runtime_impl.h" @@ -9,7 +10,7 @@ #include "server/options_impl.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "test/test_common/contention.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -113,14 +114,14 @@ TEST_P(MainCommonTest, RetryDynamicBaseIdFails) { config_file_, "--base-id-path", base_id_path}); OptionsImpl first_options(first_args, &MainCommon::hotRestartVersion, spdlog::level::info); MainCommonBase first(first_options, real_time_system, default_listener_hooks, - prod_component_factory, std::make_unique(), + prod_component_factory, std::make_unique(), platform.threadFactory(), platform.fileSystem(), nullptr); const std::string base_id_str = TestEnvironment::readFileToStringForTest(base_id_path); uint32_t base_id; ASSERT_TRUE(absl::SimpleAtoi(base_id_str, &base_id)); - auto* mock_rng = new NiceMock(); + auto* mock_rng = new NiceMock(); EXPECT_CALL(*mock_rng, random()).WillRepeatedly(Return(base_id)); const auto second_args = @@ -129,7 +130,7 @@ TEST_P(MainCommonTest, RetryDynamicBaseIdFails) { EXPECT_THROW_WITH_MESSAGE( MainCommonBase(second_options, real_time_system, default_listener_hooks, - prod_component_factory, std::unique_ptr{mock_rng}, + prod_component_factory, std::unique_ptr{mock_rng}, platform.threadFactory(), platform.fileSystem(), nullptr), EnvoyException, "unable to select a dynamic base id"); #endif diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 2d3848206aae..748ab076a90f 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -122,7 +122,7 @@ class AggregateClusterTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; Ssl::MockContextManager ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 08570d556bbe..cf9116a3d56c 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -107,7 +107,7 @@ class ClusterTest : public testing::Test, Stats::IsolatedStoreImpl stats_store_; Ssl::MockContextManager ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; @@ -216,7 +216,7 @@ class ClusterFactoryTest : public testing::Test { Stats::IsolatedStoreImpl stats_store_; NiceMock ssl_context_manager_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock tls_; NiceMock runtime_; NiceMock dispatcher_; diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 044d767b1ca1..33f160e8d43f 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -170,7 +170,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParam(&(test_server.server().random())); + mock_rng_ = dynamic_cast(&(test_server.server().random())); // Abort now if we cannot downcast the server's random number generator pointer. ASSERT_TRUE(mock_rng_ != nullptr); // Ensure that fake_upstreams_[0] is the load balancer's host of choice by default. @@ -356,7 +356,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParam factory_; std::unique_ptr lb_; std::shared_ptr info_{new NiceMock()}; - NiceMock random_; + NiceMock random_; }; class RedisLoadBalancerContextImplTest : public testing::Test { diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 7c6a8cadf47d..5ce57a167231 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -547,7 +547,7 @@ class RedisClusterTest : public testing::Test, Ssl::MockContextManager ssl_context_manager_; std::shared_ptr> dns_resolver_{ new NiceMock}; - NiceMock random_; + NiceMock random_; NiceMock tls_; Event::MockTimer* resolve_timer_; ReadyWatcher membership_updated_; diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 26db365de145..9e99ee4b956f 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -60,7 +60,7 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT NiceMock dispatcher_; std::shared_ptr resolver_{std::make_shared()}; NiceMock tls_; - NiceMock random_; + NiceMock random_; NiceMock loader_; Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; @@ -675,7 +675,7 @@ TEST_F(DnsCacheImplTest, ClustersCircuitBreakersOverflow) { TEST(DnsCacheManagerImplTest, LoadViaConfig) { NiceMock dispatcher; NiceMock tls; - NiceMock random; + NiceMock random; NiceMock loader; Stats::IsolatedStoreImpl store; DnsCacheManagerImpl cache_manager(dispatcher, tls, random, loader, store); @@ -710,7 +710,7 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { // I spent too much time trying to figure this out. So for the moment I have copied this test body // here. I will spend some more time fixing this, but wanted to land unblocking functionality first. TEST(UtilityTest, PrepareDnsRefreshStrategy) { - NiceMock random; + NiceMock random; { // dns_failure_refresh_rate not set. diff --git a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc index 5317009fb78b..3134e30c906c 100644 --- a/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc +++ b/test/extensions/filters/http/adaptive_concurrency/controller/gradient_controller_test.cc @@ -11,6 +11,7 @@ #include "extensions/filters/http/adaptive_concurrency/controller/gradient_controller.h" #include "test/common/stats/stat_test_utility.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -111,7 +112,7 @@ class GradientControllerTest : public testing::Test { NiceMock runtime_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; - NiceMock random_; + NiceMock random_; }; TEST_F(GradientControllerConfigTest, BasicTest) { diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index 0b188f78c139..5774f5beb7b0 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -48,7 +48,7 @@ class MockResponseEvaluator : public ResponseEvaluator { class TestConfig : public AdmissionControlFilterConfig { public: TestConfig(const AdmissionControlProto& proto_config, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, + Random::RandomGenerator& random, Stats::Scope& scope, ThreadLocal::SlotPtr&& tls, MockThreadLocalController& controller, std::shared_ptr evaluator) : AdmissionControlFilterConfig(proto_config, runtime, random, scope, std::move(tls), std::move(evaluator)), @@ -104,7 +104,7 @@ class AdmissionControlTest : public testing::Test { NiceMock context_; Stats::IsolatedStoreImpl scope_; Event::SimulatedTimeSystem time_system_; - NiceMock random_; + NiceMock random_; std::shared_ptr filter_; NiceMock decoder_callbacks_; NiceMock controller_; diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index aa716a054e1e..49432e4a6605 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -43,7 +43,7 @@ class AdmissionControlConfigTest : public testing::Test { NiceMock runtime_; NiceMock context_; Stats::IsolatedStoreImpl scope_; - NiceMock random_; + NiceMock random_; }; // Verify the configuration when all fields are set. diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index ee92f3dbdb19..cd18c5da8c54 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -113,7 +113,7 @@ stat_prefix: vpn Event::MockTimer* interval_timer_; Http::AsyncClient::Callbacks* callbacks_; Stats::TestUtil::TestStore stats_store_; - NiceMock random_; + NiceMock random_; Api::ApiPtr api_; std::shared_ptr ssl_; }; diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 226a9bc5161b..9bafde1f0e8b 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -318,7 +318,7 @@ class ConnectionManagerTest : public testing::Test { Buffer::OwnedImpl buffer_; Buffer::OwnedImpl write_buffer_; NiceMock filter_callbacks_; - NiceMock random_; + NiceMock random_; std::unique_ptr conn_manager_; MockSerializer* custom_serializer_{}; MockProtocol* custom_protocol_{}; diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 5de6de271950..e2cd685452cf 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -304,7 +304,7 @@ class ThriftConnectionManagerTest : public testing::Test { Buffer::OwnedImpl buffer_; Buffer::OwnedImpl write_buffer_; NiceMock filter_callbacks_; - NiceMock random_; + NiceMock random_; std::unique_ptr filter_; MockTransport* custom_transport_{}; MockProtocol* custom_protocol_{}; diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc index b8dfc48eb504..4eceac1226eb 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_fuzz_test.cc @@ -4,6 +4,7 @@ #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/test_common/environment.h" @@ -20,7 +21,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { static const auto local = Network::Utility::parseInternetAddressAndPort("127.0.2.1:5353"); static const auto peer = Network::Utility::parseInternetAddressAndPort("127.0.2.1:55088"); - static NiceMock random; + static NiceMock random; static NiceMock histogram; histogram.unit_ = Stats::Histogram::Unit::Milliseconds; static Api::ApiPtr api = Api::createApiForTest(); diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc index 46d1e8ff070f..26628682a227 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -183,7 +183,7 @@ name: listener_1 Api::ApiPtr api_; NiceMock histogram_; - NiceMock random_; + NiceMock random_; NiceMock mock_query_buffer_underflow_; NiceMock mock_record_name_overflow_; NiceMock query_parsing_failure_; diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index d0d1b151be7d..80914f2f7cbd 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -100,7 +100,7 @@ class DnsFilterTest : public testing::Test, public Event::TestUsingSimulatedTime Network::UdpRecvData udp_response_; NiceMock file_system_; NiceMock histogram_; - NiceMock random_; + NiceMock random_; NiceMock listener_factory_; Stats::IsolatedStoreImpl stats_store_; std::shared_ptr resolver_; diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc index ca184dbf601b..364ee8b3094c 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test_utils.cc @@ -1,6 +1,6 @@ #include "dns_filter_test_utils.h" -#include "common/runtime/runtime_impl.h" +#include "common/common/random_generator.h" #include "test/test_common/utility.h" @@ -19,7 +19,7 @@ std::string buildQueryFromBytes(const char* bytes, const size_t count) { } std::string buildQueryForDomain(const std::string& name, uint16_t rec_type, uint16_t rec_class) { - Runtime::RandomGeneratorImpl random_; + Random::RandomGeneratorImpl random_; struct DnsMessageParser::DnsHeader query {}; uint16_t id = random_.random() & 0xFFFF; diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index cf84215a5173..84945209284e 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -157,7 +157,7 @@ TEST(HealthCheckerFactoryTest, CreateRedisViaUpstreamHealthCheckerFactory) { NiceMock cluster; Runtime::MockLoader runtime; - Runtime::MockRandomGenerator random; + Random::MockRandomGenerator random; Event::MockDispatcher dispatcher; AccessLog::MockAccessLogManager log_manager; NiceMock api; diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 3e3386b1bbe0..225de586128c 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -206,7 +206,7 @@ class RedisHealthCheckerTest std::shared_ptr cluster_; NiceMock dispatcher_; NiceMock runtime_; - NiceMock random_; + NiceMock random_; Upstream::MockHealthCheckEventLogger* event_logger_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index cc9ef16f2abf..36c8d58429ad 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -257,8 +257,8 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { NiceMock tls_; Stats::TestUtil::TestStore store_; - Runtime::MockRandomGenerator generator_; - Runtime::MockRandomGenerator rand_; + Random::MockRandomGenerator generator_; + Random::MockRandomGenerator rand_; NiceMock local_info_; Init::MockManager init_manager_; NiceMock validation_visitor_; diff --git a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc index 5043808dd78b..97414ee5a7f2 100644 --- a/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc +++ b/test/extensions/tracers/datadog/datadog_tracer_impl_test.cc @@ -84,7 +84,7 @@ class DatadogDriverTest : public testing::Test { NiceMock* timer_; Stats::TestUtil::TestStore stats_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index e515d5dad230..caba59fce679 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -124,7 +124,7 @@ class LightStepDriverTest : public testing::Test { std::unique_ptr driver_; NiceMock* timer_; NiceMock cm_; - NiceMock random_; + NiceMock random_; NiceMock runtime_; NiceMock local_info_; diff --git a/test/extensions/tracers/xray/localized_sampling_test.cc b/test/extensions/tracers/xray/localized_sampling_test.cc index 1d291ab60306..31b753032672 100644 --- a/test/extensions/tracers/xray/localized_sampling_test.cc +++ b/test/extensions/tracers/xray/localized_sampling_test.cc @@ -1,6 +1,6 @@ #include "extensions/tracers/xray/localized_sampling.h" -#include "test/mocks/runtime/mocks.h" +#include "test/mocks/common.h" #include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" @@ -22,19 +22,19 @@ class LocalizedSamplingStrategyTest : public ::testing::Test { }; TEST_F(LocalizedSamplingStrategyTest, EmptyRules) { - NiceMock random_generator; + NiceMock random_generator; LocalizedSamplingStrategy strategy{"", random_generator, time_system_}; ASSERT_TRUE(strategy.usingDefaultManifest()); } TEST_F(LocalizedSamplingStrategyTest, BadJson) { - NiceMock random_generator; + NiceMock random_generator; LocalizedSamplingStrategy strategy{"{{}", random_generator, time_system_}; ASSERT_TRUE(strategy.usingDefaultManifest()); } TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -59,7 +59,7 @@ TEST_F(LocalizedSamplingStrategyTest, ValidCustomRules) { } TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -84,7 +84,7 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidRate) { } TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -109,7 +109,7 @@ TEST_F(LocalizedSamplingStrategyTest, InvalidFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -133,7 +133,7 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingRate) { } TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -157,7 +157,7 @@ TEST_F(LocalizedSamplingStrategyTest, DefaultRuleMissingFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto wrong_version = R"EOF( { "version": 1, @@ -182,7 +182,7 @@ TEST_F(LocalizedSamplingStrategyTest, WrongVersion) { } TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto missing_version = R"EOF( { "rules": [ @@ -206,7 +206,7 @@ TEST_F(LocalizedSamplingStrategyTest, MissingVersion) { } TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -227,7 +227,7 @@ TEST_F(LocalizedSamplingStrategyTest, MissingDefaultRules) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -252,7 +252,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHostIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -277,7 +277,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleHttpMethodIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -302,7 +302,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleUrlPathIsNotString) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -326,7 +326,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingFixedTarget) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -350,7 +350,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleMissingRate) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -375,7 +375,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleArrayElementWithWrongType) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -400,7 +400,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeFixedRate) { } TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { - NiceMock random_generator; + NiceMock random_generator; constexpr auto rules_json = R"EOF( { "version": 2, @@ -425,7 +425,7 @@ TEST_F(LocalizedSamplingStrategyTest, CustomRuleNegativeRate) { } TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { - NiceMock rng; + NiceMock rng; EXPECT_CALL(rng, random()).WillRepeatedly(Return(90)); constexpr auto rules_json = R"EOF( { @@ -459,7 +459,7 @@ TEST_F(LocalizedSamplingStrategyTest, TraceOnlyFromReservoir) { } TEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) { - NiceMock rng; + NiceMock rng; EXPECT_CALL(rng, random()).WillRepeatedly(Return(1)); constexpr auto rules_json = R"EOF( { @@ -492,7 +492,7 @@ TEST_F(LocalizedSamplingStrategyTest, TraceFromReservoirAndByRate) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: @@ -530,7 +530,7 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHost) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: @@ -568,7 +568,7 @@ TEST_F(LocalizedSamplingStrategyTest, NoMatchingHttpMethod) { } TEST_F(LocalizedSamplingStrategyTest, NoMatchingPath) { - NiceMock rng; + NiceMock rng; // this following value doesn't affect the test EXPECT_CALL(rng, random()).WillRepeatedly(Return(50 /*50 percent*/)); // the following rules say: diff --git a/test/extensions/tracers/zipkin/tracer_test.cc b/test/extensions/tracers/zipkin/tracer_test.cc index 878af93da699..a085e7cc721b 100644 --- a/test/extensions/tracers/zipkin/tracer_test.cc +++ b/test/extensions/tracers/zipkin/tracer_test.cc @@ -45,7 +45,7 @@ class ZipkinTracerTest : public testing::Test { TEST_F(ZipkinTracerTest, SpanCreation) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -229,7 +229,7 @@ TEST_F(ZipkinTracerTest, SpanCreation) { TEST_F(ZipkinTracerTest, FinishSpan) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -312,7 +312,7 @@ TEST_F(ZipkinTracerTest, FinishSpan) { TEST_F(ZipkinTracerTest, FinishNotSampledSpan) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -340,7 +340,7 @@ TEST_F(ZipkinTracerTest, FinishNotSampledSpan) { TEST_F(ZipkinTracerTest, SpanSampledPropagatedToChild) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, false, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -368,7 +368,7 @@ TEST_F(ZipkinTracerTest, SpanSampledPropagatedToChild) { TEST_F(ZipkinTracerTest, RootSpan128bitTraceId) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; Tracer tracer("my_service_name", addr, random_generator, true, true, time_system_); SystemTime timestamp = time_system_.systemTime(); @@ -387,7 +387,7 @@ TEST_F(ZipkinTracerTest, RootSpan128bitTraceId) { TEST_F(ZipkinTracerTest, SharedSpanContext) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; const bool shared_span_context = true; Tracer tracer("my_service_name", addr, random_generator, false, shared_span_context, @@ -411,7 +411,7 @@ TEST_F(ZipkinTracerTest, SharedSpanContext) { TEST_F(ZipkinTracerTest, NotSharedSpanContext) { Network::Address::InstanceConstSharedPtr addr = Network::Utility::parseInternetAddressAndPort("127.0.0.1:9000"); - NiceMock random_generator; + NiceMock random_generator; const bool shared_span_context = false; Tracer tracer("my_service_name", addr, random_generator, false, shared_span_context, diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 4cbc21c3bc77..a873f77c01f2 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -146,7 +146,7 @@ class ZipkinDriverTest : public testing::Test { NiceMock cm_; NiceMock runtime_; NiceMock local_info_; - NiceMock random_; + NiceMock random_; NiceMock config_; Event::SimulatedTimeSystem test_time_; diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index b39d93c5096c..eb9646266a83 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -647,7 +647,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { NiceMock local_info; NiceMock dispatcher; - NiceMock random; + NiceMock random; Stats::IsolatedStoreImpl stats; NiceMock cluster_manager; NiceMock init_manager; diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 5ea7a7c2c734..f663fcc0ef2c 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -8,6 +8,7 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "common/buffer/buffer_impl.h" +#include "common/common/random_generator.h" #include "common/http/header_map_impl.h" #include "common/network/socket_option_impl.h" @@ -262,7 +263,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadata) { const int size = 4; std::vector multiple_vecs(size); for (int i = 0; i < size; i++) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; int value_size = random.random() % Http::METADATA_MAX_PAYLOAD_SIZE + 1; Http::MetadataMap metadata_map = {{std::string(i, 'a'), std::string(value_size, 'b')}}; Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); diff --git a/test/integration/server.cc b/test/integration/server.cc index 2903c39d34ba..1b89ed3b995b 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -5,6 +5,7 @@ #include "envoy/http/header_map.h" +#include "common/common/random_generator.h" #include "common/common/thread.h" #include "common/local_info/local_info_impl.h" #include "common/network/utility.h" @@ -176,11 +177,11 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers concurrency, drain_time, drain_strategy)); Thread::MutexBasicLockable lock; - Runtime::RandomGeneratorPtr random_generator; + Random::RandomGeneratorPtr random_generator; if (deterministic) { - random_generator = std::make_unique>(); + random_generator = std::make_unique>(); } else { - random_generator = std::make_unique(); + random_generator = std::make_unique(); } createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this, lock, *this, std::move(random_generator), process_object); @@ -200,7 +201,7 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( OptionsImpl& options, Event::TimeSystem& time_system, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) { + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) { { Init::ManagerImpl init_manager{"Server"}; Server::HotRestartNopImpl restarter; diff --git a/test/integration/server.h b/test/integration/server.h index d584dd61631b..67e860ea4a4b 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -465,7 +465,7 @@ class IntegrationTestServer : public Logger::Loggable, Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) PURE; // Will be called by subclass on server thread when the server is ready to be accessed. The @@ -528,7 +528,7 @@ class IntegrationTestServerImpl : public IntegrationTestServer { Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, Thread::BasicLockable& access_log_lock, Server::ComponentFactory& component_factory, - Runtime::RandomGeneratorPtr&& random_generator, + Random::RandomGeneratorPtr&& random_generator, ProcessObjectOptRef process_object) override; // Owned by this class. An owning pointer is not used because the actual allocation is done diff --git a/test/mocks/BUILD b/test/mocks/BUILD index e7f7a132092d..f12ced49f118 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -14,6 +14,7 @@ envoy_cc_test_library( hdrs = ["common.h"], deps = [ "//include/envoy/common:conn_pool_interface", + "//include/envoy/common:random_generator_interface", "//include/envoy/common:time_interface", "//include/envoy/common:token_bucket_interface", "//source/common/common:minimal_logger_lib", diff --git a/test/mocks/common.cc b/test/mocks/common.cc index bb5fda7a3ff5..ea4012729a8c 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -1,11 +1,21 @@ #include "test/mocks/common.h" +using testing::Return; + namespace Envoy { namespace ConnectionPool { MockCancellable::MockCancellable() = default; MockCancellable::~MockCancellable() = default; } // namespace ConnectionPool +namespace Random { + +MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } + +MockRandomGenerator::~MockRandomGenerator() = default; + +} // namespace Random + ReadyWatcher::ReadyWatcher() = default; ReadyWatcher::~ReadyWatcher() = default; diff --git a/test/mocks/common.h b/test/mocks/common.h index c9bf312e4c31..cf5a915825b2 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -3,6 +3,7 @@ #include #include "envoy/common/conn_pool.h" +#include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" #include "envoy/common/token_bucket.h" @@ -109,4 +110,17 @@ class MockCancellable : public Cancellable { }; } // namespace ConnectionPool +namespace Random { +class MockRandomGenerator : public RandomGenerator { +public: + MockRandomGenerator(); + ~MockRandomGenerator() override; + + MOCK_METHOD(uint64_t, random, ()); + MOCK_METHOD(std::string, uuid, ()); + + const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; +}; +} // namespace Random + } // namespace Envoy diff --git a/test/mocks/runtime/mocks.cc b/test/mocks/runtime/mocks.cc index 2ad66ac70629..3afd1bbbf743 100644 --- a/test/mocks/runtime/mocks.cc +++ b/test/mocks/runtime/mocks.cc @@ -10,10 +10,6 @@ using testing::ReturnArg; namespace Envoy { namespace Runtime { -MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } - -MockRandomGenerator::~MockRandomGenerator() = default; - MockSnapshot::MockSnapshot() { ON_CALL(*this, getInteger(_, _)).WillByDefault(ReturnArg<1>()); ON_CALL(*this, getDouble(_, _)).WillByDefault(ReturnArg<1>()); diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index af0601436493..8d99b4bc402d 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -15,17 +15,6 @@ namespace Envoy { namespace Runtime { -class MockRandomGenerator : public RandomGenerator { -public: - MockRandomGenerator(); - ~MockRandomGenerator() override; - - MOCK_METHOD(uint64_t, random, ()); - MOCK_METHOD(std::string, uuid, ()); - - const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; -}; - class MockSnapshot : public Snapshot { public: MockSnapshot(); diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index cd1c70d4ecd6..1bdab558962e 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -28,7 +28,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(bool, healthCheckFailed, ()); MOCK_METHOD(Init::Manager&, initManager, ()); MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); @@ -56,7 +56,7 @@ class MockFactoryContext : public virtual FactoryContext { testing::NiceMock init_manager_; testing::NiceMock lifecycle_notifier_; testing::NiceMock local_info_; - testing::NiceMock random_; + testing::NiceMock random_; testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h index 1d49abffb481..6de7f59dd77c 100644 --- a/test/mocks/server/health_checker_factory_context.h +++ b/test/mocks/server/health_checker_factory_context.h @@ -21,7 +21,7 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte MOCK_METHOD(Upstream::Cluster&, cluster, ()); MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); MOCK_METHOD(ProtobufMessage::ValidationVisitor&, messageValidationVisitor, ()); @@ -32,7 +32,7 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte testing::NiceMock cluster_; testing::NiceMock dispatcher_; - testing::NiceMock random_; + testing::NiceMock random_; testing::NiceMock runtime_; testing::NiceMock* event_logger_{}; testing::NiceMock api_{}; diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 67da77794772..c966eeaffe0a 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -65,7 +65,7 @@ class MockInstance : public Instance { MOCK_METHOD(Envoy::MutexTracer*, mutexTracer, ()); MOCK_METHOD(const Options&, options, ()); MOCK_METHOD(OverloadManager&, overloadManager, ()); - MOCK_METHOD(Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Random::RandomGenerator&, random, ()); MOCK_METHOD(Runtime::Loader&, runtime, ()); MOCK_METHOD(void, shutdown, ()); MOCK_METHOD(bool, isShutdown, ()); @@ -108,7 +108,7 @@ class MockInstance : public Instance { testing::NiceMock access_log_manager_; testing::NiceMock hot_restart_; testing::NiceMock options_; - testing::NiceMock random_; + testing::NiceMock random_; testing::NiceMock lifecycle_notifier_; testing::NiceMock local_info_; testing::NiceMock init_manager_; @@ -134,7 +134,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); @@ -154,7 +154,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { testing::NiceMock dispatcher_; testing::NiceMock drain_manager_; testing::NiceMock local_info_; - testing::NiceMock random_; + testing::NiceMock random_; testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h index a86f2b348485..1995fb573d64 100644 --- a/test/mocks/server/transport_socket_factory_context.h +++ b/test/mocks/server/transport_socket_factory_context.h @@ -26,7 +26,7 @@ class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); - MOCK_METHOD(Envoy::Runtime::RandomGenerator&, random, ()); + MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Stats::Store&, stats, ()); MOCK_METHOD(Init::Manager&, initManager, ()); MOCK_METHOD(Singleton::Manager&, singletonManager, ()); diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index a1475f197281..55f7ccd98738 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -36,7 +36,7 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Api::ApiPtr api(Api::createApiForTest(stats_store, time_system)); NiceMock runtime; NiceMock tls; - NiceMock random; + NiceMock random; testing::NiceMock secret_manager; auto dns_resolver = std::make_shared>(); Extensions::TransportSockets::Tls::ContextManagerImpl ssl_context_manager{api->timeSource()}; diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index d070444fdac8..b9778fad367f 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -3,6 +3,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/address.pb.h" +#include "common/common/random_generator.h" #include "common/network/address_impl.h" #include "common/thread_local/thread_local_impl.h" @@ -100,7 +101,7 @@ DEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) { server = std::make_unique( init_manager, options, test_time.timeSystem(), std::make_shared("127.0.0.1"), hooks, restart, stats_store, - fakelock, component_factory, std::make_unique(), + fakelock, component_factory, std::make_unique(), thread_local_instance, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); } catch (const EnvoyException& ex) { diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 8f99b100ab55..556f71160bed 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -187,7 +187,7 @@ class ServerInstanceImplTestBase { *init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), *thread_local_, + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), std::move(process_context_)); EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); @@ -206,7 +206,7 @@ class ServerInstanceImplTestBase { *init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), *thread_local_, + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); EXPECT_TRUE(server_->api().fileSystem().fileExists(TestEnvironment::nullDevicePath())); @@ -990,7 +990,7 @@ TEST_P(ServerInstanceImplTest, NoOptionsPassed) { server_.reset(new InstanceImpl(*init_manager_, options_, time_system_, std::make_shared("127.0.0.1"), hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), + std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr)), EnvoyException, diff --git a/test/test_common/test_runtime.h b/test/test_common/test_runtime.h index 0532b5529f9f..08e0c441efc3 100644 --- a/test/test_common/test_runtime.h +++ b/test/test_common/test_runtime.h @@ -16,6 +16,7 @@ #include "common/runtime/runtime_impl.h" #include "common/stats/isolated_store_impl.h" +#include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" @@ -43,7 +44,7 @@ class TestScopedRuntime { Event::MockDispatcher dispatcher_; testing::NiceMock tls_; Stats::IsolatedStoreImpl store_; - Runtime::MockRandomGenerator generator_; + Random::MockRandomGenerator generator_; Api::ApiPtr api_; testing::NiceMock local_info_; testing::NiceMock validation_visitor_; diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 74520ac641b8..e1aaf0782e3c 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -9,10 +9,10 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/random_generator.h" #include "common/network/utility.h" #include "common/protobuf/message_validator_impl.h" #include "common/protobuf/utility.h" -#include "common/runtime/runtime_impl.h" #include "common/stream_info/stream_info_impl.h" #include "test/test_common/printers.h" @@ -84,7 +84,7 @@ RouterCheckTool RouterCheckTool::create(const std::string& router_config_file, void RouterCheckTool::assignUniqueRouteNames( envoy::config::route::v3::RouteConfiguration& route_config) { - Runtime::RandomGeneratorImpl random; + Random::RandomGeneratorImpl random; for (auto& host : *route_config.mutable_virtual_hosts()) { for (auto& route : *host.mutable_routes()) { route.set_name(random.uuid()); From 8920bcc681e97b0b4a63fc62d2cf969d8eba96e2 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Sat, 11 Jul 2020 00:21:25 +0700 Subject: [PATCH 592/909] examples: Update front-proxy image and add listener with upstream TLS context (#11895) This updates the front-proxy docker-compose illustration and adds a listener upstream TLS context configured. Signed-off-by: Dhi Aurrahman --- .../_static/docker_compose_front_proxy.svg | 1 + docs/root/_static/docker_compose_v0.1.svg | 4 - docs/root/start/sandboxes/front_proxy.rst | 215 +++++++++++------- examples/front-proxy/docker-compose.yaml | 6 +- examples/front-proxy/front-envoy.yaml | 97 +++++++- examples/front-proxy/service.py | 2 +- 6 files changed, 233 insertions(+), 92 deletions(-) create mode 100644 docs/root/_static/docker_compose_front_proxy.svg delete mode 100644 docs/root/_static/docker_compose_v0.1.svg diff --git a/docs/root/_static/docker_compose_front_proxy.svg b/docs/root/_static/docker_compose_front_proxy.svg new file mode 100644 index 000000000000..12dc03b13253 --- /dev/null +++ b/docs/root/_static/docker_compose_front_proxy.svg @@ -0,0 +1 @@ + diff --git a/docs/root/_static/docker_compose_v0.1.svg b/docs/root/_static/docker_compose_v0.1.svg deleted file mode 100644 index 55236771d500..000000000000 --- a/docs/root/_static/docker_compose_v0.1.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst index d9c359fa37fb..41baf801a509 100644 --- a/docs/root/start/sandboxes/front_proxy.rst +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -3,26 +3,25 @@ Front Proxy =========== -To get a flavor of what Envoy has to offer as a front proxy, we are releasing a -`docker compose `_ sandbox that deploys a front -envoy and a couple of services (simple flask apps) colocated with a running -service Envoy. The three containers will be deployed inside a virtual network -called ``envoymesh``. +To get a flavor of what Envoy has to offer as a front proxy, we are releasing a `docker compose `_ +sandbox that deploys a front Envoy and a couple of services (simple Flask apps) colocated with a +running service Envoy. The three containers will be deployed inside a virtual network called +``envoymesh``. Below you can see a graphic showing the docker compose deployment: -.. image:: /_static/docker_compose_v0.1.svg +.. image:: /_static/docker_compose_front_proxy.svg :width: 100% All incoming requests are routed via the front Envoy, which is acting as a reverse proxy sitting on -the edge of the ``envoymesh`` network. Port ``8000`` is exposed by docker compose -(see :repo:`/examples/front-proxy/docker-compose.yaml`). Moreover, notice -that all traffic routed by the front Envoy to the service containers is actually routed to the -service Envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service -envoys route the request to the flask app via the loopback address (routes setup in -:repo:`/examples/front-proxy/service-envoy.yaml`). This setup -illustrates the advantage of running service Envoys collocated with your services: all requests are -handled by the service Envoy, and efficiently routed to your services. +the edge of the ``envoymesh`` network. Port ``8080``, ``8443``, and ``8001`` are exposed by docker +compose (see :repo:`/examples/front-proxy/docker-compose.yaml`) to handle ``HTTP``, ``HTTPS`` calls +to the services and requests to ``/admin`` respectively. Moreover, notice that all traffic routed +by the front Envoy to the service containers is actually routed to the service Envoys +(routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service Envoys route +the request to the Flask app via the loopback address (routes setup in :repo:`/examples/front-proxy/service-envoy.yaml`). +This setup illustrates the advantage of running service Envoys collocated with your services: all +requests are handled by the service Envoy, and efficiently routed to your services. Running the Sandbox ~~~~~~~~~~~~~~~~~~~ @@ -43,98 +42,139 @@ or ``git clone https://github.com/envoyproxy/envoy.git``:: $ pwd envoy/examples/front-proxy - $ docker-compose pull - $ docker-compose up --build -d + $ docker-compose build --pull + $ docker-compose up -d $ docker-compose ps - Name Command State Ports - -------------------------------------------------------------------------------------------------------------------------- - front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8001->8001/tcp + Name Command State Ports + ------------------------------------------------------------------------------------------------------------------------------------------------------ + front-proxy_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8001->8001/tcp, 0.0.0.0:8443->8443/tcp front-proxy_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp front-proxy_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp **Step 3: Test Envoy's routing capabilities** -You can now send a request to both services via the front-envoy. +You can now send a request to both services via the ``front-envoy``. -For service1:: +For ``service1``:: - $ curl -v localhost:8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/1 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.54.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:39:19 GMT + < date: Mon, 06 Jul 2020 06:20:00 GMT + < x-envoy-upstream-service-time: 2 < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 -For service2:: +For ``service2``:: - $ curl -v localhost:8000/service/2 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/2 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/2 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.54.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 + < content-length: 92 + < server: envoy + < date: Mon, 06 Jul 2020 06:23:13 GMT < x-envoy-upstream-service-time: 2 + < + Hello from behind Envoy (service 2)! hostname: ea6165ee4fee resolvedhostname: 192.168.160.2 + +Notice that each request, while sent to the front Envoy, was correctly routed to the respective +application. + +We can also use ``HTTPS`` to call services behind the front Envoy. For example, calling ``service1``:: + + $ curl https://localhost:8443/service/1 -k -v + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8443 (#0) + * ALPN, offering h2 + * ALPN, offering http/1.1 + * successfully set certificate verify locations: + * CAfile: /etc/ssl/cert.pem + CApath: none + * TLSv1.2 (OUT), TLS handshake, Client hello (1): + * TLSv1.2 (IN), TLS handshake, Server hello (2): + * TLSv1.2 (IN), TLS handshake, Certificate (11): + * TLSv1.2 (IN), TLS handshake, Server key exchange (12): + * TLSv1.2 (IN), TLS handshake, Server finished (14): + * TLSv1.2 (OUT), TLS handshake, Client key exchange (16): + * TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): + * TLSv1.2 (OUT), TLS handshake, Finished (20): + * TLSv1.2 (IN), TLS change cipher, Change cipher spec (1): + * TLSv1.2 (IN), TLS handshake, Finished (20): + * SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305 + * ALPN, server did not agree to a protocol + * Server certificate: + * subject: CN=front-envoy + * start date: Jul 5 15:18:44 2020 GMT + * expire date: Jul 5 15:18:44 2021 GMT + * issuer: CN=front-envoy + * SSL certificate verify result: self signed certificate (18), continuing anyway. + > GET /service/1 HTTP/1.1 + > Host: localhost:8443 + > User-Agent: curl/7.64.1 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:39:23 GMT + < date: Mon, 06 Jul 2020 06:17:14 GMT + < x-envoy-upstream-service-time: 3 < - Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 - * Connection #0 to host 192.168.99.100 left intact - -Notice that each request, while sent to the front Envoy, was correctly routed -to the respective application. + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 **Step 4: Test Envoy's load balancing capabilities** -Now let's scale up our service1 nodes to demonstrate the load balancing abilities -of Envoy.:: +Now let's scale up our ``service1`` nodes to demonstrate the load balancing abilities of Envoy:: $ docker-compose scale service1=3 Creating and starting example_service1_2 ... done Creating and starting example_service1_3 ... done -Now if we send a request to service1 multiple times, the front Envoy will load balance the -requests by doing a round robin of the three service1 machines:: +Now if we send a request to ``service1`` multiple times, the front Envoy will load balance the +requests by doing a round robin of the three ``service1`` machines:: - $ curl -v localhost:8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + $ curl -v localhost:8080/service/1 + * Trying ::1... + * TCP_NODELAY set + * Connected to localhost (::1) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 + > Host: localhost:8080 + > User-Agent: curl/7.64.1 > Accept: */* > < HTTP/1.1 200 OK < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 + < content-length: 92 < server: envoy - < date: Fri, 26 Aug 2018 19:40:21 GMT - < x-envoy-protocol-version: HTTP/1.1 + < date: Mon, 06 Jul 2020 06:21:47 GMT + < x-envoy-upstream-service-time: 6 < - Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v localhost:8000/service/1 + Hello from behind Envoy (service 1)! hostname: 3dc787578c23 resolvedhostname: 192.168.160.6 + $ curl -v localhost:8080/service/1 * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 + > Host: 192.168.99.100:8080 > User-Agent: curl/7.54.0 > Accept: */* > @@ -145,13 +185,12 @@ requests by doing a round robin of the three service1 machines:: < server: envoy < date: Fri, 26 Aug 2018 19:40:22 GMT < - Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v localhost:8000/service/1 + Hello from behind Envoy (service 1)! hostname: 3a93ece62129 resolvedhostname: 192.168.160.5 + $ curl -v localhost:8080/service/1 * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + * Connected to 192.168.99.100 (192.168.99.100) port 8080 (#0) > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 + > Host: 192.168.99.100:8080 > User-Agent: curl/7.43.0 > Accept: */* > @@ -163,8 +202,7 @@ requests by doing a round robin of the three service1 machines:: < date: Fri, 26 Aug 2018 19:40:24 GMT < x-envoy-protocol-version: HTTP/1.1 < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact + Hello from behind Envoy (service 1)! hostname: 36418bc3c824 resolvedhostname: 192.168.160.4 **Step 5: enter containers and curl services** @@ -174,13 +212,13 @@ can use ``docker-compose exec /bin/bash``. For example we can enter the ``front-envoy`` container, and ``curl`` for services locally:: $ docker-compose exec front-envoy /bin/bash - root@81288499f9d7:/# curl localhost:8000/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - root@81288499f9d7:/# curl localhost:8000/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - root@81288499f9d7:/# curl localhost:8000/service/1 + root@81288499f9d7:/# curl localhost:8080/service/1 Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - root@81288499f9d7:/# curl localhost:8000/service/2 + root@81288499f9d7:/# curl localhost:8080/service/2 Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 **Step 6: enter containers and curl admin** @@ -188,8 +226,8 @@ enter the ``front-envoy`` container, and ``curl`` for services locally:: When Envoy runs it also attaches an ``admin`` to your desired port. In the example configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. For example you can ``curl`` ``/server_info`` to get information about the -envoy version you are running. Additionally you can ``curl`` ``/stats`` to get -statistics. For example inside ``frontenvoy`` we can get:: +Envoy version you are running. Additionally you can ``curl`` ``/stats`` to get +statistics. For example inside ``front-envoy`` we can get:: $ docker-compose exec front-envoy /bin/bash root@e654c2c83277:/# curl localhost:8001/server_info @@ -197,35 +235,45 @@ statistics. For example inside ``frontenvoy`` we can get:: .. code-block:: json { - "version": "3ba949a9cb5b0b1cccd61e76159969a49377fd7d/1.10.0-dev/Clean/RELEASE/BoringSSL", + "version": "093e2ffe046313242144d0431f1bb5cf18d82544/1.15.0-dev/Clean/RELEASE/BoringSSL", "state": "LIVE", + "hot_restart_version": "11.104", "command_line_options": { "base_id": "0", - "concurrency": 4, + "use_dynamic_base_id": false, + "base_id_path": "", + "concurrency": 8, "config_path": "/etc/front-envoy.yaml", "config_yaml": "", "allow_unknown_static_fields": false, + "reject_unknown_dynamic_fields": false, + "ignore_unknown_dynamic_fields": false, "admin_address_path": "", "local_address_ip_version": "v4", "log_level": "info", "component_log_level": "", - "log_format": "[%Y-%m-%d %T.%e][%t][%l][%n] %v", + "log_format": "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v", + "log_format_escaped": false, "log_path": "", - "hot_restart_version": false, "service_cluster": "front-proxy", "service_node": "", "service_zone": "", + "drain_strategy": "Gradual", "mode": "Serve", "disable_hot_restart": false, "enable_mutex_tracing": false, "restart_epoch": 0, "cpuset_threads": false, + "disabled_extensions": [], + "bootstrap_version": 0, + "hidden_envoy_deprecated_max_stats": "0", + "hidden_envoy_deprecated_max_obj_name_len": "0", "file_flush_interval": "10s", "drain_time": "600s", "parent_shutdown_time": "900s" }, - "uptime_current_epoch": "401s", - "uptime_all_epochs": "401s" + "uptime_current_epoch": "188s", + "uptime_all_epochs": "188s" } .. code-block:: text @@ -250,6 +298,5 @@ statistics. For example inside ``frontenvoy`` we can get:: cluster.service2.upstream_rq_total: 2 ... -Notice that we can get the number of members of upstream clusters, number of requests -fulfilled by them, information about http ingress, and a plethora of other useful -stats. +Notice that we can get the number of members of upstream clusters, number of requests fulfilled by +them, information about http ingress, and a plethora of other useful stats. diff --git a/examples/front-proxy/docker-compose.yaml b/examples/front-proxy/docker-compose.yaml index 4e5f4590245b..f8de52edd298 100644 --- a/examples/front-proxy/docker-compose.yaml +++ b/examples/front-proxy/docker-compose.yaml @@ -10,10 +10,12 @@ services: networks: - envoymesh expose: - - "8000" + - "8080" + - "8443" - "8001" ports: - - "8000:8000" + - "8080:8080" + - "8443:8443" - "8001:8001" service1: diff --git a/examples/front-proxy/front-envoy.yaml b/examples/front-proxy/front-envoy.yaml index 2d3c5f1a95ab..c266022e6806 100644 --- a/examples/front-proxy/front-envoy.yaml +++ b/examples/front-proxy/front-envoy.yaml @@ -3,7 +3,7 @@ static_resources: - address: socket_address: address: 0.0.0.0 - port_value: 8000 + port_value: 8080 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager @@ -29,6 +29,101 @@ static_resources: http_filters: - name: envoy.filters.http.router typed_config: {} + + - address: + socket_address: + address: 0.0.0.0 + port_value: 8443 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/service/1" + route: + cluster: service1 + - match: + prefix: "/service/2" + route: + cluster: service2 + http_filters: + - name: envoy.filters.http.router + typed_config: {} + + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificates: + # The following self-signed certificate pair is generated using: + # $ openssl req -x509 -newkey rsa:2048 -keyout a/front-proxy-key.pem -out a/front-proxy-crt.pem -days 3650 -nodes -subj '/CN=front-envoy' + # + # Instead of feeding it as an inline_string, certificate pair can also be fed to Envoy + # via filename. Reference: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/base.proto#config-core-v3-datasource. + # + # Or in a dynamic configuration scenario, certificate pair can be fetched remotely via + # Secret Discovery Service (SDS). Reference: https://www.envoyproxy.io/docs/envoy/latest/configuration/security/secret. + certificate_chain: + inline_string: | + -----BEGIN CERTIFICATE----- + MIICqDCCAZACCQCquzpHNpqBcDANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtm + cm9udC1lbnZveTAeFw0yMDA3MDgwMTMxNDZaFw0zMDA3MDYwMTMxNDZaMBYxFDAS + BgNVBAMMC2Zyb250LWVudm95MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC + AQEAthnYkqVQBX+Wg7aQWyCCb87hBce1hAFhbRM8Y9dQTqxoMXZiA2n8G089hUou + oQpEdJgitXVS6YMFPFUUWfwcqxYAynLK4X5im26Yfa1eO8La8sZUS+4Bjao1gF5/ + VJxSEo2yZ7fFBo8M4E44ZehIIocipCRS+YZehFs6dmHoq/MGvh2eAHIa+O9xssPt + ofFcQMR8rwBHVbKy484O10tNCouX4yUkyQXqCRy6HRu7kSjOjNKSGtjfG+h5M8bh + 10W7ZrsJ1hWhzBulSaMZaUY3vh5ngpws1JATQVSK1Jm/dmMRciwlTK7KfzgxHlSX + 58ENpS7yPTISkEICcLbXkkKGEQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmj6Hg + vwOxWz0xu+6fSfRL6PGJUGq6wghCfUvjfwZ7zppDUqU47fk+yqPIOzuGZMdAqi7N + v1DXkeO4A3hnMD22Rlqt25vfogAaZVToBeQxCPd/ALBLFrvLUFYuSlS3zXSBpQqQ + Ny2IKFYsMllz5RSROONHBjaJOn5OwqenJ91MPmTAG7ujXKN6INSBM0PjX9Jy4Xb9 + zT+I85jRDQHnTFce1WICBDCYidTIvJtdSSokGSuy4/xyxAAc/BpZAfOjBQ4G1QRe + 9XwOi790LyNUYFJVyeOvNJwveloWuPLHb9idmY5YABwikUY6QNcXwyHTbRCkPB2I + m+/R4XnmL4cKQ+5Z + -----END CERTIFICATE----- + private_key: + inline_string: | + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2GdiSpVAFf5aD + tpBbIIJvzuEFx7WEAWFtEzxj11BOrGgxdmIDafwbTz2FSi6hCkR0mCK1dVLpgwU8 + VRRZ/ByrFgDKcsrhfmKbbph9rV47wtryxlRL7gGNqjWAXn9UnFISjbJnt8UGjwzg + Tjhl6EgihyKkJFL5hl6EWzp2Yeir8wa+HZ4Achr473Gyw+2h8VxAxHyvAEdVsrLj + zg7XS00Ki5fjJSTJBeoJHLodG7uRKM6M0pIa2N8b6HkzxuHXRbtmuwnWFaHMG6VJ + oxlpRje+HmeCnCzUkBNBVIrUmb92YxFyLCVMrsp/ODEeVJfnwQ2lLvI9MhKQQgJw + tteSQoYRAgMBAAECggEAeDGdEkYNCGQLe8pvg8Z0ccoSGpeTxpqGrNEKhjfi6NrB + NwyVav10iq4FxEmPd3nobzDPkAftfvWc6hKaCT7vyTkPspCMOsQJ39/ixOk+jqFx + lNa1YxyoZ9IV2DIHR1iaj2Z5gB367PZUoGTgstrbafbaNY9IOSyojCIO935ubbcx + DWwL24XAf51ez6sXnI8V5tXmrFlNXhbhJdH8iIxNyM45HrnlUlOk0lCK4gmLJjy9 + 10IS2H2Wh3M5zsTpihH1JvM56oAH1ahrhMXs/rVFXXkg50yD1KV+HQiEbglYKUxO + eMYtfaY9i2CuLwhDnWp3oxP3HfgQQhD09OEN3e0IlQKBgQDZ/3poG9TiMZSjfKqL + xnCABMXGVQsfFWNC8THoW6RRx5Rqi8q08yJrmhCu32YKvccsOljDQJQQJdQO1g09 + e/adJmCnTrqxNtjPkX9txV23Lp6Ak7emjiQ5ICu7iWxrcO3zf7hmKtj7z+av8sjO + mDI7NkX5vnlE74nztBEjp3eC0wKBgQDV2GeJV028RW3b/QyP3Gwmax2+cKLR9PKR + nJnmO5bxAT0nQ3xuJEAqMIss/Rfb/macWc2N/6CWJCRT6a2vgy6xBW+bqG6RdQMB + xEZXFZl+sSKhXPkc5Wjb4lQ14YWyRPrTjMlwez3k4UolIJhJmwl+D7OkMRrOUERO + EtUvc7odCwKBgBi+nhdZKWXveM7B5N3uzXBKmmRz3MpPdC/yDtcwJ8u8msUpTv4R + JxQNrd0bsIqBli0YBmFLYEMg+BwjAee7vXeDFq+HCTv6XMva2RsNryCO4yD3I359 + XfE6DJzB8ZOUgv4Dvluie3TB2Y6ZQV/p+LGt7G13yG4hvofyJYvlg3RPAoGAcjDg + +OH5zLN2eqah8qBN0CYa9/rFt0AJ19+7/smLTJ7QvQq4g0gwS1couplcCEnNGWiK + 72y1n/ckvvplmPeAE19HveMvR9UoCeV5ej86fACy8V/oVpnaaLBvL2aCMjPLjPP9 + DWeCIZp8MV86cvOrGfngf6kJG2qZTueXl4NAuwkCgYEArKkhlZVXjwBoVvtHYmN2 + o+F6cGMlRJTLhNc391WApsgDZfTZSdeJsBsvvzS/Nc0burrufJg0wYioTlpReSy4 + ohhtprnQQAddfjHP7rh2LGt+irFzhdXXQ1ybGaGM9D764KUNCXLuwdly0vzXU4HU + q5sGxGrC1RECGB5Zwx2S2ZY= + -----END PRIVATE KEY----- + clusters: - name: service1 connect_timeout: 0.25s diff --git a/examples/front-proxy/service.py b/examples/front-proxy/service.py index 30e8d5219b31..1d5d5920a8e3 100644 --- a/examples/front-proxy/service.py +++ b/examples/front-proxy/service.py @@ -38,7 +38,7 @@ def trace(service_number): for header in TRACE_HEADERS_TO_PROPAGATE: if header in request.headers: headers[header] = request.headers[header] - ret = requests.get("http://localhost:9000/trace/2", headers=headers) + requests.get("http://localhost:9000/trace/2", headers=headers) return ('Hello from behind Envoy (service {})! hostname: {} resolved' 'hostname: {}\n'.format(os.environ['SERVICE_NAME'], socket.gethostname(), socket.gethostbyname(socket.gethostname()))) From a023627e0f6c1b032598822b29137823b8175689 Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Fri, 10 Jul 2020 13:25:30 -0400 Subject: [PATCH 593/909] Added fuzz test case and initalized the boolean that was causing the issue. (#12014) Commit Message: Fixes a filter related fuzz bug due to an uninitialized bool Additional Description: Initialized the boolean member to false, based on observations of usage in unit tests Signed-off-by: Kevin Baichoo --- .../http/admission_control/admission_control.cc | 2 +- ...stcase-minimized-filter_fuzz_test-6133921480966144 | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 diff --git a/source/extensions/filters/http/admission_control/admission_control.cc b/source/extensions/filters/http/admission_control/admission_control.cc index 1ea8fdaf71e4..fe880fefc47b 100644 --- a/source/extensions/filters/http/admission_control/admission_control.cc +++ b/source/extensions/filters/http/admission_control/admission_control.cc @@ -50,7 +50,7 @@ double AdmissionControlFilterConfig::aggression() const { AdmissionControlFilter::AdmissionControlFilter(AdmissionControlFilterConfigSharedPtr config, const std::string& stats_prefix) : config_(std::move(config)), stats_(generateStats(config_->scope(), stats_prefix)), - record_request_(true) {} + expect_grpc_status_in_trailer_(false), record_request_(true) {} Http::FilterHeadersStatus AdmissionControlFilter::decodeHeaders(Http::RequestHeaderMap&, bool) { // TODO(tonya11en): Ensure we document the fact that healthchecks are ignored. diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 new file mode 100644 index 000000000000..1d3dd81ed0ec --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 @@ -0,0 +1,11 @@ +config { + name: "envoy.filters.http.admission_control" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.admission_control.v3alpha.AdmissionControl" + value: "\022\000" + } +} +upstream_data { + trailers { + } +} From ca7dc5344765f3e7fc3c1a47927e8c1e3e6cefd3 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Sat, 11 Jul 2020 02:51:48 +0900 Subject: [PATCH 594/909] pgv: update (#12025) Signed-off-by: Shikugawa --- api/bazel/repository_locations.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 0a0379f7685e..38bdd08091bb 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 -PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" +PGV_GIT_SHA = "ef00e9c655af0fbc7fa159ca44647d01794b3251" # July 9, 2020 +PGV_SHA256 = "55fcf809ac85d851fbc488b2e25632e74a150567371225f9b0b2c2eaa4f15a0a" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" From 1d161e7ce33f0d39059046e24c16c2a930249644 Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Fri, 10 Jul 2020 20:57:43 +0300 Subject: [PATCH 595/909] compressor: always insert Vary headers for compressible resources (#11950) Even if we decide not to compress a response due to incompatible Accept-Encoding value, the Vary header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. Fixes #11343 Co-authored-by: Gil Pedersen Signed-off-by: Dmitry Rozhkov --- .../http/http_filters/compressor_filter.rst | 7 +++ docs/root/version_history/current.rst | 1 + .../http/common/compressor/compressor.cc | 20 ++++++--- .../compressor/compressor_filter_test.cc | 45 +++++++++++++------ 4 files changed, 55 insertions(+), 18 deletions(-) diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index acb02e0f44db..862af5304065 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -81,6 +81,13 @@ When compression is *applied*: "*content-encoding*" header. - The "*vary: accept-encoding*" header is inserted on every response. +Also the "*vary: accept-encoding*" header may be inserted even if compression is *not* +applied due to incompatible "*accept-encoding*" header in a request. This happens +when the requested resource still can be compressed given compatible "*accept-encoding*". +Otherwise, if an uncompressed response is cached by a caching proxy in front of Envoy, +the proxy won't know to fetch a new incoming request with compatible "*accept-encoding*" +from upstream. + .. _compressor-statistics: Statistics diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e03bc0eb9069..3d664813d44a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,7 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. * logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set diff --git a/source/extensions/filters/http/common/compressor/compressor.cc b/source/extensions/filters/http/common/compressor/compressor.cc index 482abaf348dd..4e0a1b48ce9b 100644 --- a/source/extensions/filters/http/common/compressor/compressor.cc +++ b/source/extensions/filters/http/common/compressor/compressor.cc @@ -111,13 +111,15 @@ void CompressorFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallba Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (!end_stream && config_->enabled() && isMinimumContentLength(headers) && - isAcceptEncodingAllowed(headers) && isContentTypeAllowed(headers) && - !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && - isTransferEncodingAllowed(headers) && !headers.getInline(content_encoding_handle.handle())) { + const bool isEnabledAndContentLengthBigEnough = + config_->enabled() && isMinimumContentLength(headers); + const bool isCompressible = isEnabledAndContentLengthBigEnough && isContentTypeAllowed(headers) && + !hasCacheControlNoTransform(headers) && isEtagAllowed(headers) && + !headers.getInline(content_encoding_handle.handle()); + if (!end_stream && isEnabledAndContentLengthBigEnough && isAcceptEncodingAllowed(headers) && + isCompressible && isTransferEncodingAllowed(headers)) { skip_compression_ = false; sanitizeEtagHeader(headers); - insertVaryHeader(headers); headers.removeContentLength(); headers.setInline(content_encoding_handle.handle(), config_->contentEncoding()); config_->stats().compressed_.inc(); @@ -126,6 +128,14 @@ Http::FilterHeadersStatus CompressorFilter::encodeHeaders(Http::ResponseHeaderMa } else { config_->stats().not_compressed_.inc(); } + + // Even if we decided not to compress due to incompatible Accept-Encoding value, + // the Vary header would need to be inserted to let a caching proxy in front of Envoy + // know that the requested resource still can be served with compression applied. + if (isCompressible) { + insertVaryHeader(headers); + } + return Http::FilterHeadersStatus::Continue; } diff --git a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc index 225edcaeaa32..92c8d45190ab 100644 --- a/test/extensions/filters/http/common/compressor/compressor_filter_test.cc +++ b/test/extensions/filters/http/common/compressor/compressor_filter_test.cc @@ -154,7 +154,7 @@ class CompressorFilterTest : public testing::Test { EXPECT_EQ(1U, stats_.counter("test.test.compressed").value()); } - void doResponseNoCompression(Http::TestResponseHeaderMapImpl&& headers) { + void doResponseNoCompression(Http::TestResponseHeaderMapImpl& headers) { NiceMock decoder_callbacks; filter_->setDecoderFilterCallbacks(decoder_callbacks); uint64_t content_length; @@ -201,7 +201,9 @@ TEST_F(CompressorFilterTest, DecodeHeadersWithRuntimeDisabled) { .Times(2) .WillRepeatedly(Return(false)); doRequest({{":method", "get"}, {"accept-encoding", "deflate, test"}}, false); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Default config values. @@ -254,8 +256,10 @@ TEST_F(CompressorFilterTest, HasCacheControlNoTransform) { // Verifies that compression is skipped when cache-control header has no-transform value. TEST_F(CompressorFilterTest, HasCacheControlNoTransformNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test;q=1, deflate"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"cache-control", "no-transform"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"cache-control", "no-transform"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when cache-control header does NOT have no-transform @@ -269,8 +273,10 @@ TEST_F(CompressorFilterTest, HasCacheControlNoTransformCompression) { TEST_F(CompressorFilterTest, NoAcceptEncodingHeader) { doRequest({{":method", "get"}, {}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.no_accept_header").value()); + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Verifies isAcceptEncodingAllowed function. @@ -568,7 +574,11 @@ TEST_F(CompressorFilterTest, IsAcceptEncodingAllowed) { // Verifies that compression is skipped when accept-encoding header is not allowed. TEST_F(CompressorFilterTest, AcceptEncodingNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test;q=0, deflate"}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "256"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "256"}}; + doResponseNoCompression(headers); + // Even if compression is disallowed by a client we must let her know the resource is + // compressible. + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Verifies that compression is NOT skipped when accept-encoding header is allowed. @@ -624,7 +634,9 @@ TEST_F(CompressorFilterTest, IsMinimumContentLength) { // Verifies that compression is skipped when content-length header is NOT allowed. TEST_F(CompressorFilterTest, ContentLengthNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression({{":method", "get"}, {"content-length", "10"}}); + Http::TestResponseHeaderMapImpl headers{{":method", "get"}, {"content-length", "10"}}; + doResponseNoCompression(headers); + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when content-length header is allowed. @@ -754,9 +766,12 @@ TEST_F(CompressorFilterTest, ContentTypeNoCompression) { } )EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"content-type", "image/jpeg"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"content-type", "image/jpeg"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.header_not_valid").value()); + // Assert the resource is not compressible. + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is NOT skipped when content-encoding header is allowed. @@ -847,9 +862,11 @@ TEST_F(CompressorFilterTest, EtagNoCompression) { } )EOF"); doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"etag", R"EOF(W/"686897696a7c876b7e")EOF"}}; + doResponseNoCompression(headers); EXPECT_EQ(1, stats_.counter("test.test.not_compressed_etag").value()); + EXPECT_FALSE(headers.has("vary")); } // Verifies that compression is not skipped when strong etag header is present. @@ -910,8 +927,10 @@ TEST_F(CompressorFilterTest, TransferEncodingChunked) { TEST_F(CompressorFilterTest, AcceptanceTransferEncoding) { doRequest({{":method", "get"}, {"accept-encoding", "test"}}, true); - doResponseNoCompression( - {{":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked, deflate"}}); + Http::TestResponseHeaderMapImpl headers{ + {":method", "get"}, {"content-length", "256"}, {"transfer-encoding", "chunked, deflate"}}; + doResponseNoCompression(headers); + EXPECT_EQ("Accept-Encoding", headers.get_("vary")); } // Content-Encoding: upstream response is already encoded. From f2306b35a73fe1c037c7006889ad31d8e54234cc Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Fri, 10 Jul 2020 12:15:49 -0700 Subject: [PATCH 596/909] Tests are now being executed during gcc builds (#10236) Signed-off-by: Dmitri Dolguikh --- bazel/external/quiche.BUILD | 3 +++ ci/do_ci.sh | 6 +++++- source/common/common/macros.h | 3 +++ test/common/stats/stat_test_utility.cc | 14 +++++++------- .../http/decompressor/decompressor_filter_test.cc | 4 ++-- .../local_ratelimit/local_ratelimit_fuzz_test.cc | 2 +- .../quiche/platform/quic_platform_test.cc | 3 ++- test/extensions/quic_listeners/quiche/test_utils.h | 1 + .../dynamic_opentracing_driver_impl_test.cc | 4 ++++ test/mocks/config/mocks.h | 10 +++++----- 10 files changed, 33 insertions(+), 17 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 79575bbe6545..2ec3f85a4e67 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -59,6 +59,7 @@ quiche_copts = select({ # Remove these after upstream fix. "-Wno-unused-parameter", "-Wno-unused-function", + "-Wno-return-type", "-Wno-unknown-warning-option", "-Wno-deprecated-copy", "-Wno-ignored-qualifiers", @@ -66,6 +67,8 @@ quiche_copts = select({ "-Wno-inconsistent-missing-override", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", + # to suppress errors re: size_t vs. int comparisons + "-Wno-sign-compare", ], }) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index e3b8f503e298..55718a63b00f 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -137,9 +137,13 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then bazel test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" setup_gcc_toolchain echo "bazel fastbuild build..." - bazel_binary_build fastbuild + bazel_binary_build release + + echo "Testing ${TEST_TARGETS}" + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain diff --git a/source/common/common/macros.h b/source/common/common/macros.h index 386f54d384c6..f2b06b84f340 100644 --- a/source/common/common/macros.h +++ b/source/common/common/macros.h @@ -54,4 +54,7 @@ namespace Envoy { #define FALLTHRU #endif +#if (defined(__GNUC__) && !defined(__clang__)) +#define GCC_COMPILER +#endif } // namespace Envoy diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index cc0f0a8d47a7..55670f7942d5 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -124,14 +124,14 @@ MemoryTest::Mode MemoryTest::mode() { "$ENVOY_MEMORY_TEST_EXACT is set for canonical memory measurements, " "but memory measurement looks broken"); return Mode::Canonical; - } else { - // Different versions of STL and other compiler/architecture differences may - // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT, - // memory comparisons must be given some slack. There have recently emerged - // some memory-allocation differences between development and Envoy CI and - // Bazel CI (which compiles Envoy as a test of Bazel). - return can_measure_memory ? Mode::Approximate : Mode::Disabled; } + + // Different versions of STL and other compiler/architecture differences may + // also impact memory usage, so when not compiling with MEMORY_TEST_EXACT, + // memory comparisons must be given some slack. There have recently emerged + // some memory-allocation differences between development and Envoy CI and + // Bazel CI (which compiles Envoy as a test of Bazel). + return can_measure_memory ? Mode::Approximate : Mode::Disabled; #endif } diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index 14e82e9a034a..eb1f42426bbd 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -55,12 +55,12 @@ class DecompressorFilterTest : public testing::TestWithParam { auto request_headers = std::make_unique(headers); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(*request_headers, end_stream)); - return std::move(request_headers); + return request_headers; } else { auto response_headers = std::make_unique(headers); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(*response_headers, end_stream)); - return std::move(response_headers); + return response_headers; } } diff --git a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc index a632b2612b07..5e597bb552ad 100644 --- a/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc +++ b/test/extensions/filters/network/local_ratelimit/local_ratelimit_fuzz_test.cc @@ -59,7 +59,7 @@ DEFINE_PROTO_FUZZER( ConfigSharedPtr config = nullptr; try { config = std::make_shared(proto_config, dispatcher, stats_store, runtime); - } catch (EnvoyException e) { + } catch (EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException in config's constructor: {}", e.what()); return; } diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 32bca008f064..d8408586a96e 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -240,9 +240,10 @@ TEST_F(QuicPlatformTest, QuicServerStats) { } TEST_F(QuicPlatformTest, QuicStackTraceTest) { -#ifndef ENVOY_CONFIG_COVERAGE +#if !defined(ENVOY_CONFIG_COVERAGE) && !defined(GCC_COMPILER) // This doesn't work in coverage build because part of the stacktrace will be overwritten by // __llvm_coverage_mapping + // Stack trace under gcc with optimizations on (-c opt) doesn't include the test name EXPECT_THAT(QuicStackTrace(), HasSubstr("QuicStackTraceTest")); #endif } diff --git a/test/extensions/quic_listeners/quiche/test_utils.h b/test/extensions/quic_listeners/quiche/test_utils.h index 3f5862115b12..b9cc942af840 100644 --- a/test/extensions/quic_listeners/quiche/test_utils.h +++ b/test/extensions/quic_listeners/quiche/test_utils.h @@ -191,6 +191,7 @@ std::string testParamsToString( case QuicVersionType::Iquic: return absl::StrCat(ip_version, "_UseHttp3"); } + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } } // namespace Quic diff --git a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc index 19a995d6e537..775270a09b69 100644 --- a/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl_test.cc @@ -70,6 +70,9 @@ TEST_F(DynamicOpenTracingDriverTest, InitializeDriver) { } } +// This test fails under gcc, please see https://github.com/envoyproxy/envoy/issues/7647 +// for more details. +#ifndef GCC_COMPILER TEST_F(DynamicOpenTracingDriverTest, FlushSpans) { setupValidDriver(); @@ -87,6 +90,7 @@ TEST_F(DynamicOpenTracingDriverTest, FlushSpans) { EXPECT_NE(spans_json, nullptr); EXPECT_EQ(spans_json->asObjectArray().size(), 1); } +#endif } // namespace } // namespace DynamicOt diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index f55f41357c9f..0ad8e2f15088 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -94,11 +94,11 @@ class MockGrpcMux : public GrpcMux { MockGrpcMux(); ~MockGrpcMux() override; - MOCK_METHOD(void, start, ()); - MOCK_METHOD(ScopedResume, pause, (const std::string& type_url)); - MOCK_METHOD(ScopedResume, pause, (const std::vector type_urls)); - MOCK_METHOD(bool, paused, (const std::string& type_url), (const)); - MOCK_METHOD(bool, paused, (const std::vector type_urls), (const)); + MOCK_METHOD(void, start, (), (override)); + MOCK_METHOD(ScopedResume, pause, (const std::string& type_url), (override)); + MOCK_METHOD(ScopedResume, pause, (const std::vector type_urls), (override)); + MOCK_METHOD(bool, paused, (const std::string& type_url), (const, override)); + MOCK_METHOD(bool, paused, (const std::vector type_urls), (const, override)); MOCK_METHOD(void, addSubscription, (const std::set& resources, const std::string& type_url, From 379c4d285299300d5527ef6a28d3bd8452db2d86 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 10 Jul 2020 15:33:59 -0400 Subject: [PATCH 597/909] protobuf: optimize unpackTo() for message upgrades. (#12026) While looking at eds_speed_test flamegraphs as part of #10875, it seemed we were doing some redundant work, namely first unpacking to v2 message and then upgrading from v2 to v3. Since v2 and v3 are wire compatible, and upgrade is just a wirecast, we might as well just unpack to v2. This improves eds_speed_test significantly, the penalty for v3 vs. v2 drops from 2.6x to 2x. Part of #11362 #10875. Risk level: Low Testing: Coverage of behavior from existing tests. Signed-off-by: Harvey Tuch --- source/common/config/version_converter.cc | 24 +++++++++++------------ source/common/config/version_converter.h | 9 +++++++++ source/common/protobuf/utility.cc | 18 +++++++++-------- 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/source/common/config/version_converter.cc b/source/common/config/version_converter.cc index c123a101b50e..db2bd1cfc216 100644 --- a/source/common/config/version_converter.cc +++ b/source/common/config/version_converter.cc @@ -60,10 +60,19 @@ DynamicMessagePtr createForDescriptorWithCast(const Protobuf::Message& message, return dynamic_message; } +} // namespace + +void VersionConverter::upgrade(const Protobuf::Message& prev_message, + Protobuf::Message& next_message) { + wireCast(prev_message, next_message); + // Track original type to support recoverOriginal(). + annotateWithOriginalType(*prev_message.GetDescriptor(), next_message); +} + // This needs to be recursive, since sub-messages are consumed and stored // internally, we later want to recover their original types. -void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, - Protobuf::Message& next_message) { +void VersionConverter::annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, + Protobuf::Message& upgraded_message) { class TypeAnnotatingProtoVisitor : public ProtobufMessage::ProtoVisitor { public: void onMessage(Protobuf::Message& message, const void* ctxt) override { @@ -103,16 +112,7 @@ void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, } }; TypeAnnotatingProtoVisitor proto_visitor; - ProtobufMessage::traverseMutableMessage(proto_visitor, next_message, &prev_descriptor); -} - -} // namespace - -void VersionConverter::upgrade(const Protobuf::Message& prev_message, - Protobuf::Message& next_message) { - wireCast(prev_message, next_message); - // Track original type to support recoverOriginal(). - annotateWithOriginalType(*prev_message.GetDescriptor(), next_message); + ProtobufMessage::traverseMutableMessage(proto_visitor, upgraded_message, &prev_descriptor); } void VersionConverter::eraseOriginalTypeInformation(Protobuf::Message& message) { diff --git a/source/common/config/version_converter.h b/source/common/config/version_converter.h index 22ecc383d491..db9c76523931 100644 --- a/source/common/config/version_converter.h +++ b/source/common/config/version_converter.h @@ -91,6 +91,15 @@ class VersionConverter { static void prepareMessageForGrpcWire(Protobuf::Message& message, envoy::config::core::v3::ApiVersion api_version); + /** + * Annotate an upgraded message with original message type information. + * + * @param prev_descriptor descriptor for original type. + * @param upgraded_message upgraded message. + */ + static void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, + Protobuf::Message& upgraded_message); + /** * For a message that may have been upgraded, recover the original message. * This is useful for config dump, debug output etc. diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index f29c39430773..09e2fcc82f81 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -577,16 +577,18 @@ void MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Messag Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); // If the earlier version matches, unpack and upgrade. if (earlier_version_desc != nullptr && any_full_name == earlier_version_desc->full_name()) { - Protobuf::DynamicMessageFactory dmf; - auto earlier_message = - ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New()); - ASSERT(earlier_message != nullptr); - if (!any_message.UnpackTo(earlier_message.get())) { + // Take the Any message but adjust its type URL, since earlier/later versions are wire + // compatible. + ProtobufWkt::Any any_message_with_fixup; + any_message_with_fixup.MergeFrom(any_message); + any_message_with_fixup.set_type_url("type.googleapis.com/" + + message.GetDescriptor()->full_name()); + if (!any_message_with_fixup.UnpackTo(&message)) { throw EnvoyException(fmt::format("Unable to unpack as {}: {}", - earlier_message->GetDescriptor()->full_name(), - any_message.DebugString())); + earlier_version_desc->full_name(), + any_message_with_fixup.DebugString())); } - Config::VersionConverter::upgrade(*earlier_message, message); + Config::VersionConverter::annotateWithOriginalType(*earlier_version_desc, message); return; } } From 6e9273419b598dda99267405d8079ffdc2b5c78c Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 10 Jul 2020 16:57:02 -0400 Subject: [PATCH 598/909] [build] [fuzz] Reduce fuzz test binary sizes and build coverage w/o sanitizers (#11903) Commit Message: This PR does a few things to make fuzz coverage reports reliable. (1) Remove unnecessary deps in large fuzz tests, including splitting http filter fuzzer to only link http filters with `envoy_all_http_filters`. (2) Build fuzz coverage reports without any sanitizers (`plain-fuzzer`). - Caveat: Avoid linking libfuzzer with `-fsanitize=fuzzer`, since the libfuzzer implementation is built with a private libc++ implementation whose exception handling conflicts with libc++. I had to build it myself.... (3) Unify fuzz targets and their libfuzzer linked targets. Now, you invoke libfuzzer linked targets just by indicating `--config asan-fuzzer`. Risk Level: Low Testing: Local testing of coverage was successful. Binary size of `filter_fuzz_test` with libfuzzer for coverage builds goes from > 2.2 GB to 1.14 GB. Follow-up: * if coverage without sanitizers is successful, this will be the basis of fuzz coverage CI * just building fuzzer with `--config asan-fuzzer` for the purpose of running the fuzzer (not coverage) is still a reasonable/build-able size Signed-off-by: Asra Ali --- .bazelrc | 2 +- bazel/BUILD | 13 ++++++++ bazel/README.md | 4 +-- bazel/envoy_test.bzl | 29 ++++++++-------- bazel/external/compiler_rt.BUILD | 33 ++++++++++++++++++- bazel/repository_locations.bzl | 2 +- source/extensions/all_extensions.bzl | 7 ++++ .../extensions/filters/http/common/fuzz/BUILD | 4 +-- ...m-da39a3ee5e6b4b0d3255bfef95601890afd80709 | 0 test/fuzz/README.md | 16 ++++----- test/run_envoy_bazel_coverage.sh | 2 +- test/server/BUILD | 3 -- test/server/config_validation/BUILD | 2 -- .../config_validation/config_fuzz_test.cc | 2 -- test/server/server_fuzz_test.cc | 3 -- 15 files changed, 80 insertions(+), 42 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 diff --git a/.bazelrc b/.bazelrc index 5031f41ab8fa..71f457fb5e4a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -130,7 +130,7 @@ build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage build:coverage --instrumentation_filter="//source(?!/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" -coverage:fuzz-coverage --config=asan-fuzzer +coverage:fuzz-coverage --config=plain-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html diff --git a/bazel/BUILD b/bazel/BUILD index c670adc0620d..982d3fa3ac70 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -341,6 +341,19 @@ config_setting( values = {"define": "logger=android"}, ) +config_setting( + name = "libfuzzer_coverage", + define_values = { + "FUZZING_ENGINE": "libfuzzer", + "ENVOY_CONFIG_COVERAGE": "1", + }, +) + +config_setting( + name = "libfuzzer", + values = {"define": "FUZZING_ENGINE=libfuzzer"}, +) + alias( name = "apple", actual = select( diff --git a/bazel/README.md b/bazel/README.md index b45d55bce471..4675f592157f 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -649,10 +649,10 @@ The latest coverage report for master is available It's also possible to specialize the coverage build to a specified test or test dir. This is useful when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by passing coverage targets as the command-line arguments and using the `VALIDATE_COVERAGE` environment -variable, e.g.: +variable, e.g. for a fuzz test: ``` -VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test +FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test ``` # Cleaning the build and test artifacts diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 4f603671933b..ace2fe600cb4 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -110,14 +110,24 @@ def envoy_cc_fuzz_test( cc_test( name = name, copts = fuzz_copts + envoy_copts("@envoy", test = True), - linkopts = _envoy_test_linkopts(), - linkstatic = 1, - args = ["$(locations %s)" % corpus_name], + linkopts = _envoy_test_linkopts() + select({ + "@envoy//bazel:libfuzzer": ["-fsanitize=fuzzer"], + "//conditions:default": [], + }), + linkstatic = envoy_linkstatic(), + args = select({ + "@envoy//bazel:libfuzzer_coverage": ["$(locations %s)" % corpus_name], + "@envoy//bazel:libfuzzer": [], + "//conditions:default": ["$(locations %s)" % corpus_name], + }), data = [corpus_name], # No fuzzing on macOS or Windows deps = select({ "@envoy//bazel:apple": [repository + "//test:dummy_main"], "@envoy//bazel:windows_x86_64": [repository + "//test:dummy_main"], + "@envoy//bazel:libfuzzer": [ + ":" + test_lib_name, + ], "//conditions:default": [ ":" + test_lib_name, repository + "//test/fuzz:main", @@ -140,19 +150,6 @@ def envoy_cc_fuzz_test( deps = [":" + test_lib_name], tags = ["manual"] + tags, ) - cc_test( - name = name + "_with_libfuzzer", - copts = fuzz_copts + envoy_copts("@envoy", test = True), - linkopts = ["-fsanitize=fuzzer"] + _envoy_test_linkopts(), - linkstatic = 1, - args = select({ - "@envoy//bazel:coverage_build": ["$(locations %s)" % corpus_name], - "//conditions:default": [], - }), - data = [corpus_name], - deps = [":" + test_lib_name], - tags = ["manual", "fuzzer"] + tags, - ) # Envoy C++ test targets should be specified with this function. def envoy_cc_test( diff --git a/bazel/external/compiler_rt.BUILD b/bazel/external/compiler_rt.BUILD index dbcb1be5134d..666c4eca06a0 100644 --- a/bazel/external/compiler_rt.BUILD +++ b/bazel/external/compiler_rt.BUILD @@ -4,6 +4,37 @@ licenses(["notice"]) # Apache 2 cc_library( name = "fuzzed_data_provider", - hdrs = ["fuzzer/FuzzedDataProvider.h"], + hdrs = ["include/fuzzer/FuzzedDataProvider.h"], + strip_include_prefix = "include", visibility = ["//visibility:public"], ) + +libfuzzer_copts = [ + "-fno-sanitize=address,thread,undefined", + "-fsanitize-coverage=0", + "-O3", +] + +cc_library( + name = "libfuzzer_main", + srcs = ["lib/fuzzer/FuzzerMain.cpp"], + copts = libfuzzer_copts, + visibility = ["//visibility:public"], + deps = [":libfuzzer_no_main"], + alwayslink = True, +) + +cc_library( + name = "libfuzzer_no_main", + srcs = glob( + ["lib/fuzzer/Fuzzer*.cpp"], + exclude = ["lib/fuzzer/FuzzerMain.cpp"], + ), + hdrs = glob([ + "lib/fuzzer/Fuzzer*.h", + "lib/fuzzer/Fuzzer*.def", + ]), + copts = libfuzzer_copts, + visibility = ["//visibility:public"], + alwayslink = True, +) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 4c857edb853f..40b6ed7d33e7 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -446,7 +446,7 @@ DEPENDENCY_REPOSITORIES = dict( org_llvm_releases_compiler_rt = dict( sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75", # Only allow peeking at fuzzer related files for now. - strip_prefix = "compiler-rt-10.0.0.src/include", + strip_prefix = "compiler-rt-10.0.0.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/compiler-rt-10.0.0.src.tar.xz"], use_category = ["test"], ), diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index 73e3da62c5d7..8e151ad42d2d 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -31,3 +31,10 @@ def envoy_all_core_extensions(): # These extensions can be removed on a site specific basis. return [v for k, v in all_extensions.items() if k in _core_extensions] + +_http_filter_prefix = "envoy.filters.http" + +def envoy_all_http_filters(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + return [v for k, v in all_extensions.items() if k.startswith(_http_filter_prefix)] diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index e372825fc6c4..ccad6764604a 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -7,7 +7,7 @@ load( ) load( "//source/extensions:all_extensions.bzl", - "envoy_all_extensions", + "envoy_all_http_filters", ) licenses(["notice"]) # Apache 2 @@ -63,5 +63,5 @@ envoy_cc_fuzz_test( "//test/config:utility_lib", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v2alpha:pkg_cc_proto", - ] + envoy_all_extensions(), + ] + envoy_all_http_filters(), ) diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 b/test/extensions/filters/http/common/fuzz/filter_corpus/oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 10849dd15c9b..aa3cf68057a3 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -45,7 +45,7 @@ The fuzz test will be executed in three environments: and basic sanitizers just on the supplied corpus. 1. Using the libFuzzer fuzzing engine and ASAN when run in the Envoy repository with `bazel run - //test/path/to/some_fuzz_test_with_libfuzzer --config asan-fuzzer`. This is where real fuzzing + //test/path/to/some_fuzz_test --config asan-fuzzer`. This is where real fuzzing takes place locally. The built binary can take libFuzzer command-line flags, including the number of runs and the maximum input length. @@ -66,7 +66,7 @@ The fuzz test will be executed in three environments: 4. Run the `envoy_cc_fuzz_test` target to test against the seed corpus. E.g. `bazel test //test/common/common:base64_fuzz_test`. -5. Run the `*_fuzz_test_with_libfuzzer` target against libFuzzer. E.g. `bazel run +5. Run the `*_fuzz_test` target against libFuzzer. E.g. `bazel run //test/common/common:base64_fuzz_test --config asan-fuzzer`. ## Protobuf fuzz tests @@ -88,15 +88,15 @@ Within the Envoy repository, we have various `*_fuzz_test` targets. When run und these will exercise the corpus as inputs but not actually link and run against any fuzzer (e.g. [`libfuzzer`](https://llvm.org/docs/LibFuzzer.html)). -To get actual fuzzing performed, the `*_fuzz_test_with_libfuzzer` target needs to be built with -`--config asan-fuzzer`. This links the target to the libFuzzer fuzzing engine. This is recommended -when writing new fuzz tests to check if they pick up any low hanging fruit (i.e. what you can find -on your local machine vs. the fuzz cluster). The binary takes the location of the seed corpus +To get actual fuzzing performed, the `*_fuzz_test` target needs to be built with `--config +asan-fuzzer`. This links the target to the libFuzzer fuzzing engine. This is recommended when +writing new fuzz tests to check if they pick up any low hanging fruit (i.e. what you can find on +your local machine vs. the fuzz cluster). The binary takes the location of the seed corpus directory. Fuzzing continues indefinitely until a bug is found or the number of iterations it should perform is specified with `-runs`. For example, -`bazel run //test/common/common:base64_fuzz_test_with_libfuzzer --config asan-fuzzer -- -test/common/common/base64_corpus -runs=1000` +`bazel run //test/common/common:base64_fuzz_test --config asan-fuzzer +--test/common/common/base64_corpus -runs=1000` The fuzzer prints information to stderr: diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 7b34381d7167..270ca9412ec7 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -21,7 +21,7 @@ elif [[ -n "${COVERAGE_TARGET}" ]]; then else # For fuzz builds, this overrides to just fuzz targets. COVERAGE_TARGETS=//test/... && [[ ${FUZZ_COVERAGE} == "true" ]] && - COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzzer", //test/...)')" + COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzz_target", //test/...)')" fi if [[ "${FUZZ_COVERAGE}" == "true" ]]; then diff --git a/test/server/BUILD b/test/server/BUILD index 64b8c639188c..da24f5a1109a 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -307,13 +307,10 @@ envoy_cc_fuzz_test( corpus = "server_corpus", deps = [ "//source/common/thread_local:thread_local_lib", - "//source/server:proto_descriptors_lib", "//source/server:server_lib", "//test/integration:integration_lib", "//test/mocks/server:server_mocks", - "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", - "//test/test_common:test_time_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), "//bazel:linux_ppc": envoy_all_extensions(PPC_SKIP_TARGETS), diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 617e54334dfc..4c2c1105d9f6 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -97,10 +97,8 @@ envoy_cc_fuzz_test( deps = [ "//source/common/common:thread_lib", "//source/server/config_validation:server_lib", - "//source/server:proto_descriptors_lib", "//test/integration:integration_lib", "//test/mocks/server:server_mocks", - "//test/test_common:environment_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), "//bazel:linux_ppc": envoy_all_extensions(PPC_SKIP_TARGETS), diff --git a/test/server/config_validation/config_fuzz_test.cc b/test/server/config_validation/config_fuzz_test.cc index bd40a453634a..33ce725e67d2 100644 --- a/test/server/config_validation/config_fuzz_test.cc +++ b/test/server/config_validation/config_fuzz_test.cc @@ -6,12 +6,10 @@ #include "common/network/address_impl.h" #include "server/config_validation/server.h" -#include "server/proto_descriptors.h" #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" #include "test/mocks/server/mocks.h" -#include "test/test_common/environment.h" namespace Envoy { namespace Server { diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index b9778fad367f..1bf26f04b7f2 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -8,15 +8,12 @@ #include "common/thread_local/thread_local_impl.h" #include "server/listener_hooks.h" -#include "server/proto_descriptors.h" #include "server/server.h" #include "test/common/runtime/utility.h" #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" #include "test/mocks/server/mocks.h" -#include "test/mocks/stats/mocks.h" -#include "test/test_common/environment.h" #include "test/test_common/test_time.h" namespace Envoy { From 46ef88fcd6e06cf36a0ef0b86dbfbe1e4e31d3ff Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Fri, 10 Jul 2020 16:57:56 -0400 Subject: [PATCH 599/909] test: fix path quotes (#12029) Signed-off-by: Yan Avlasov --- test/integration/run_envoy_test.sh | 2 +- test/integration/test_utility.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/run_envoy_test.sh b/test/integration/run_envoy_test.sh index 8009dce7bf56..4c9c21d73d34 100755 --- a/test/integration/run_envoy_test.sh +++ b/test/integration/run_envoy_test.sh @@ -1,6 +1,6 @@ #!/bin/bash -export ENVOY_BIN="${TEST_SRCDIR}"/envoy/test/integration/hotrestart_main +export ENVOY_BIN="${TEST_SRCDIR}/envoy/test/integration/hotrestart_main" source "${TEST_SRCDIR}/envoy/test/integration/test_utility.sh" function expect_fail_with_error() { diff --git a/test/integration/test_utility.sh b/test/integration/test_utility.sh index 5b872f59e0c6..33b3bfa6838a 100644 --- a/test/integration/test_utility.sh +++ b/test/integration/test_utility.sh @@ -111,4 +111,4 @@ wait_for_stat() { echo "$ret" } -[[ -z "${ENVOY_BIN}" ]] && ENVOY_BIN="${TEST_SRCDIR}"/envoy/source/exe/envoy-static +[[ -z "${ENVOY_BIN}" ]] && ENVOY_BIN="${TEST_SRCDIR}/envoy/source/exe/envoy-static" From e712d9d8a95a1af9ad24eb90991db800a4eca705 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 10 Jul 2020 15:11:04 -0700 Subject: [PATCH 600/909] prometheus: allow stats to be opt out from automatic namespacing (#11808) Filters can register prefixes that will not have the normal `envoy_` namespace. Signed-off-by: Lizan Zhou --- source/server/admin/prometheus_stats.cc | 45 ++++++++++++++++++---- source/server/admin/prometheus_stats.h | 18 +++++++++ test/server/admin/prometheus_stats_test.cc | 16 ++++++++ 3 files changed, 72 insertions(+), 7 deletions(-) diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc index 8ca8fcd0fc14..dc4eee79e7c3 100644 --- a/source/server/admin/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -1,6 +1,7 @@ #include "server/admin/prometheus_stats.h" #include "common/common/empty_string.h" +#include "common/common/macros.h" #include "common/stats/histogram_impl.h" #include "absl/strings/str_cat.h" @@ -11,6 +12,9 @@ namespace Server { namespace { const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } +const std::regex& namespaceRegex() { + CONSTRUCT_ON_FIRST_USE(std::regex, "^[a-zA-Z_][a-zA-Z0-9]*$"); +} /** * Take a string and sanitize it according to Prometheus conventions. @@ -18,12 +22,8 @@ const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_ std::string sanitizeName(const std::string& name) { // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. - std::string stats_name = std::regex_replace(name, promRegex(), "_"); - if (stats_name[0] >= '0' && stats_name[0] <= '9') { - return absl::StrCat("_", stats_name); - } else { - return stats_name; - } + // The initial [a-zA-Z_] constraint is always satisfied by the namespace prefix. + return std::regex_replace(name, promRegex(), "_"); } /* @@ -176,6 +176,10 @@ std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, return output; }; +absl::flat_hash_set& prometheusNamespaces() { + MUTABLE_CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set); +} + } // namespace std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { @@ -188,10 +192,19 @@ std::string PrometheusStatsFormatter::formattedTags(const std::vector="" pairs. */ static std::string formattedTags(const std::vector& tags); + /** * Format the given metric name, prefixed with "envoy_". */ static std::string metricName(const std::string& extracted_name); + + /** + * Register a prometheus namespace, stats starting with the namespace will not be + * automatically prefixed with envoy namespace. + * This method must be called from the main thread. + * @returns bool if a new namespace is registered, false if the namespace is already + * registered or the namespace is invalid. + */ + static bool registerPrometheusNamespace(absl::string_view prometheus_namespace); + + /** + * Unregister a prometheus namespace registered by `registerPrometheusNamespace` + * This method must be called from the main thread. + * @returns bool if the Prometheus namespace is unregistered. false if the namespace + * wasn't registered. + */ + static bool unregisterPrometheusNamespace(absl::string_view prometheus_namespace); }; } // namespace Server diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 35528290dace..2338c6da64ef 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -119,6 +119,22 @@ TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { EXPECT_EQ(expected, actual); } +TEST_F(PrometheusStatsFormatterTest, NamespaceRegistry) { + std::string raw = "vulture.eats-liver"; + std::string expected = "vulture_eats_liver"; + + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("3vulture")); + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(".vulture")); + + EXPECT_FALSE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); + EXPECT_TRUE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); + EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); + EXPECT_EQ(expected, PrometheusStatsFormatter::metricName(raw)); + EXPECT_TRUE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); + + EXPECT_EQ("envoy_" + expected, PrometheusStatsFormatter::metricName(raw)); +} + TEST_F(PrometheusStatsFormatterTest, FormattedTags) { std::vector tags; Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; From cb03985d90295ac5d3399d8d8dbecd30166a232b Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Sat, 11 Jul 2020 06:48:09 +0700 Subject: [PATCH 601/909] http, utility: Add utility for parsing and decoding query string (#11795) This introduces `parseAndDecodeQueryString` method to parse the URL into percent-decoded query params. Especially for fixing the Prometheus handler decoding problem as mentioned in #10926. Risk Level: Low Testing: Unit Fixes #10926 Signed-off-by: Dhi Aurrahman --- source/common/http/utility.cc | 24 +++++-- source/common/http/utility.h | 11 +++- .../filters/http/jwt_authn/extractor.cc | 2 +- source/server/admin/admin.cc | 4 +- source/server/admin/profiling_handler.cc | 4 +- source/server/admin/runtime_handler.cc | 4 +- source/server/admin/stats_handler.cc | 5 +- test/common/http/utility_fuzz_test.cc | 5 +- test/common/http/utility_test.cc | 63 ++++++++++++++++++- 9 files changed, 105 insertions(+), 17 deletions(-) diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index da6076f75689..bee04a98cabe 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -233,14 +233,26 @@ Utility::QueryParams Utility::parseQueryString(absl::string_view url) { } start++; - return parseParameters(url, start); + return parseParameters(url, start, /*decode_params=*/false); +} + +Utility::QueryParams Utility::parseAndDecodeQueryString(absl::string_view url) { + size_t start = url.find('?'); + if (start == std::string::npos) { + QueryParams params; + return params; + } + + start++; + return parseParameters(url, start, /*decode_params=*/true); } Utility::QueryParams Utility::parseFromBody(absl::string_view body) { - return parseParameters(body, 0); + return parseParameters(body, 0, /*decode_params=*/true); } -Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t start) { +Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t start, + bool decode_params) { QueryParams params; while (start < data.size()) { @@ -252,8 +264,10 @@ Utility::QueryParams Utility::parseParameters(absl::string_view data, size_t sta const size_t equal = param.find('='); if (equal != std::string::npos) { - params.emplace(StringUtil::subspan(data, start, start + equal), - StringUtil::subspan(data, start + equal + 1, end)); + const auto param_name = StringUtil::subspan(data, start, start + equal); + const auto param_value = StringUtil::subspan(data, start + equal + 1, end); + params.emplace(decode_params ? PercentEncoding::decode(param_name) : param_name, + decode_params ? PercentEncoding::decode(param_value) : param_value); } else { params.emplace(StringUtil::subspan(data, start, end), ""); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 69778024e8a7..4fef6cc23327 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -176,6 +176,13 @@ std::string createSslRedirectPath(const RequestHeaderMap& headers); */ QueryParams parseQueryString(absl::string_view url); +/** + * Parse a URL into query parameters. + * @param url supplies the url to parse. + * @return QueryParams the parsed and percent-decoded parameters, if any. + */ +QueryParams parseAndDecodeQueryString(absl::string_view url); + /** * Parse a a request body into query parameters. * @param body supplies the body to parse. @@ -187,9 +194,11 @@ QueryParams parseFromBody(absl::string_view body); * Parse query parameters from a URL or body. * @param data supplies the data to parse. * @param start supplies the offset within the data. + * @param decode_params supplies the flag whether to percent-decode the parsed parameters (both name + * and value). Set to false to keep the parameters encoded. * @return QueryParams the parsed parameters, if any. */ -QueryParams parseParameters(absl::string_view data, size_t start); +QueryParams parseParameters(absl::string_view data, size_t start, bool decode_params); /** * Finds the start of the query string in a path diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 6e02093c0749..338187e6a139 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -208,7 +208,7 @@ ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { } // Check query parameter locations. - const auto& params = Http::Utility::parseQueryString(headers.getPathValue()); + const auto& params = Http::Utility::parseAndDecodeQueryString(headers.getPathValue()); for (const auto& location_it : param_locations_) { const auto& param_key = location_it.first; const auto& location_spec = location_it.second; diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index dd52e2807ffb..5b3d3b3c4255 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -442,7 +442,7 @@ void AdminImpl::writeClustersAsText(Buffer::Instance& response) { Http::Code AdminImpl::handlerClusters(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); const auto format_value = Utility::formatParam(query_params); if (format_value.has_value() && format_value.value() == "json") { @@ -622,7 +622,7 @@ ProtobufTypes::MessagePtr AdminImpl::dumpEndpointConfigs() const { Http::Code AdminImpl::handlerConfigDump(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) const { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); const auto resource = resourceParam(query_params); const auto mask = maskParam(query_params); const bool include_eds = shouldIncludeEdsInDump(query_params); diff --git a/source/server/admin/profiling_handler.cc b/source/server/admin/profiling_handler.cc index 121daeb9976b..243b8292a0af 100644 --- a/source/server/admin/profiling_handler.cc +++ b/source/server/admin/profiling_handler.cc @@ -11,7 +11,7 @@ ProfilingHandler::ProfilingHandler(const std::string& profile_path) : profile_pa Http::Code ProfilingHandler::handlerCpuProfiler(absl::string_view url, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&) { - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); if (query_params.size() != 1 || query_params.begin()->first != "enable" || (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { response.add("?enable=\n"); @@ -40,7 +40,7 @@ Http::Code ProfilingHandler::handlerHeapProfiler(absl::string_view url, Http::Re return Http::Code::NotImplemented; } - Http::Utility::QueryParams query_params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams query_params = Http::Utility::parseAndDecodeQueryString(url); if (query_params.size() != 1 || query_params.begin()->first != "enable" || (query_params.begin()->second != "y" && query_params.begin()->second != "n")) { response.add("?enable=\n"); diff --git a/source/server/admin/runtime_handler.cc b/source/server/admin/runtime_handler.cc index d2ee6dd84813..869427f694b8 100644 --- a/source/server/admin/runtime_handler.cc +++ b/source/server/admin/runtime_handler.cc @@ -18,7 +18,7 @@ RuntimeHandler::RuntimeHandler(Server::Instance& server) : HandlerContextBase(se Http::Code RuntimeHandler::handlerRuntime(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); // TODO(jsedgwick): Use proto to structure this output instead of arbitrary JSON. @@ -80,7 +80,7 @@ Http::Code RuntimeHandler::handlerRuntime(absl::string_view url, Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream& admin_stream) { - Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); if (params.empty()) { // Check if the params are in the request's body. if (admin_stream.getRequestBody() != nullptr && diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index 3ec4702b2c5a..753774f59dc9 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -72,7 +72,7 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream& admin_stream) { Http::Code rc = Http::Code::OK; - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(url); + const Http::Utility::QueryParams params = Http::Utility::parseAndDecodeQueryString(url); const bool used_only = params.find("usedonly") != params.end(); absl::optional regex; @@ -140,7 +140,8 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&) { - const Http::Utility::QueryParams params = Http::Utility::parseQueryString(path_and_query); + const Http::Utility::QueryParams params = + Http::Utility::parseAndDecodeQueryString(path_and_query); const bool used_only = params.find("usedonly") != params.end(); absl::optional regex; if (!Utility::filterParam(params, response, regex)) { diff --git a/test/common/http/utility_fuzz_test.cc b/test/common/http/utility_fuzz_test.cc index 18d5c0c4c388..2b665893f50f 100644 --- a/test/common/http/utility_fuzz_test.cc +++ b/test/common/http/utility_fuzz_test.cc @@ -18,6 +18,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { } switch (input.utility_selector_case()) { case test::common::http::UtilityTestCase::kParseQueryString: { + // TODO(dio): Add the case when using parseAndDecodeQueryString(). Http::Utility::parseQueryString(input.parse_query_string()); break; } @@ -57,7 +58,9 @@ DEFINE_PROTO_FUZZER(const test::common::http::UtilityTestCase& input) { } case test::common::http::UtilityTestCase::kParseParameters: { const auto& parse_parameters = input.parse_parameters(); - Http::Utility::parseParameters(parse_parameters.data(), parse_parameters.start()); + // TODO(dio): Add a case when doing parse_parameters with decode_params flag true. + Http::Utility::parseParameters(parse_parameters.data(), parse_parameters.start(), + /*decode_params*/ false); break; } case test::common::http::UtilityTestCase::kFindQueryString: { diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 4a42d7d8fd9e..47c12303d0b6 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -29,16 +29,65 @@ namespace Http { TEST(HttpUtility, parseQueryString) { EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString("/hello")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString("/hello")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseQueryString("/hello?")); + EXPECT_EQ(Utility::QueryParams(), Utility::parseAndDecodeQueryString("/hello?")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello")); + EXPECT_EQ(Utility::QueryParams({{"hello", "world"}}), Utility::parseQueryString("/hello?hello=world")); + EXPECT_EQ(Utility::QueryParams({{"hello", "world"}}), + Utility::parseAndDecodeQueryString("/hello?hello=world")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello=")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello=")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), Utility::parseQueryString("/hello?hello=&")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}}), + Utility::parseAndDecodeQueryString("/hello?hello=&")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}, {"hello2", "world2"}}), Utility::parseQueryString("/hello?hello=&hello2=world2")); + EXPECT_EQ(Utility::QueryParams({{"hello", ""}, {"hello2", "world2"}}), + Utility::parseAndDecodeQueryString("/hello?hello=&hello2=world2")); + EXPECT_EQ(Utility::QueryParams({{"name", "admin"}, {"level", "trace"}}), Utility::parseQueryString("/logging?name=admin&level=trace")); + EXPECT_EQ(Utility::QueryParams({{"name", "admin"}, {"level", "trace"}}), + Utility::parseAndDecodeQueryString("/logging?name=admin&level=trace")); + + EXPECT_EQ(Utility::QueryParams({{"param_value_has_encoded_ampersand", "a%26b"}}), + Utility::parseQueryString("/hello?param_value_has_encoded_ampersand=a%26b")); + EXPECT_EQ(Utility::QueryParams({{"param_value_has_encoded_ampersand", "a&b"}}), + Utility::parseAndDecodeQueryString("/hello?param_value_has_encoded_ampersand=a%26b")); + + EXPECT_EQ(Utility::QueryParams({{"params_has_encoded_%26", "a%26b"}, {"ok", "1"}}), + Utility::parseQueryString("/hello?params_has_encoded_%26=a%26b&ok=1")); + EXPECT_EQ(Utility::QueryParams({{"params_has_encoded_&", "a&b"}, {"ok", "1"}}), + Utility::parseAndDecodeQueryString("/hello?params_has_encoded_%26=a%26b&ok=1")); + + // A sample of request path with query strings by Prometheus: + // https://github.com/envoyproxy/envoy/issues/10926#issuecomment-651085261. + EXPECT_EQ( + Utility::QueryParams( + {{"filter", + "%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29"}}), + Utility::parseQueryString( + "/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); + EXPECT_EQ( + Utility::QueryParams( + {{"filter", "(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|" + "rq_complete|rq_active|cx_active))|(server.version)"}}), + Utility::parseAndDecodeQueryString( + "/stats?filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_" + "bucket%7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29")); } TEST(HttpUtility, getResponseStatus) { @@ -1212,7 +1261,19 @@ TEST(PercentEncoding, EncodeDecode) { validatePercentEncodingEncodeDecode("_-ok-_", "_-ok-_"); } -TEST(PercentEncoding, Trailing) { +TEST(PercentEncoding, Decoding) { + EXPECT_EQ(Utility::PercentEncoding::decode("a%26b"), "a&b"); + EXPECT_EQ(Utility::PercentEncoding::decode("hello%20world"), "hello world"); + EXPECT_EQ(Utility::PercentEncoding::decode("upstream%7Cdownstream"), "upstream|downstream"); + EXPECT_EQ( + Utility::PercentEncoding::decode( + "filter=%28cluster.upstream_%28rq_total%7Crq_time_sum%7Crq_time_count%7Crq_time_bucket%" + "7Crq_xx%7Crq_complete%7Crq_active%7Ccx_active%29%29%7C%28server.version%29"), + "filter=(cluster.upstream_(rq_total|rq_time_sum|rq_time_count|rq_time_bucket|rq_xx|rq_" + "complete|rq_active|cx_active))|(server.version)"); +} + +TEST(PercentEncoding, DecodingWithTrailingInput) { EXPECT_EQ(Utility::PercentEncoding::decode("too%20lar%20"), "too lar "); EXPECT_EQ(Utility::PercentEncoding::decode("too%20larg%e"), "too larg%e"); EXPECT_EQ(Utility::PercentEncoding::decode("too%20large%"), "too large%"); From 84affaad1173a1e48bbee5b3049c305a555eb3b2 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Sat, 11 Jul 2020 18:50:31 -0700 Subject: [PATCH 602/909] decompressor library: Add stats to zlib library (#11782) This adds decompressor error stats to the zlib decompressor library. This introduces a lot of boilerplate but will make it easier to continue to add stats in the future. Additionally, the compressor library can use the same pattern. Risk Level: Low. Testing: Updated. Docs Changes: Added. Signed-off-by: Jose Nino --- .../http/http_filters/decompressor_filter.rst | 3 + .../envoy/compression/decompressor/factory.h | 2 +- .../common/decompressor/factory_base.h | 6 +- .../compression/gzip/decompressor/BUILD | 2 + .../compression/gzip/decompressor/config.cc | 15 +- .../compression/gzip/decompressor/config.h | 10 +- .../decompressor/zlib_decompressor_impl.cc | 50 +++++-- .../decompressor/zlib_decompressor_impl.h | 36 ++++- .../http/decompressor/decompressor_filter.cc | 1 + .../http/decompressor/decompressor_filter.h | 3 +- .../compression/gzip/compressor_fuzz_test.cc | 5 +- .../compression/gzip/decompressor/BUILD | 1 + .../zlib_decompressor_impl_test.cc | 54 ++++++-- .../compressor_filter_integration_test.cc | 4 +- .../decompressor_filter_integration_test.cc | 130 ++++++++++++++++-- .../decompressor/decompressor_filter_test.cc | 84 ++++++++--- .../http/gzip/gzip_filter_integration_test.cc | 4 +- .../filters/http/gzip/gzip_filter_test.cc | 3 +- test/mocks/compression/decompressor/mocks.h | 2 +- 19 files changed, 343 insertions(+), 72 deletions(-) diff --git a/docs/root/configuration/http/http_filters/decompressor_filter.rst b/docs/root/configuration/http/http_filters/decompressor_filter.rst index 898f9d1f0fd6..eb71d1c2df66 100644 --- a/docs/root/configuration/http/http_filters/decompressor_filter.rst +++ b/docs/root/configuration/http/http_filters/decompressor_filter.rst @@ -110,3 +110,6 @@ with the following: not_decompressed, Counter, Number of request/responses not compressed. total_uncompressed_bytes, Counter, The total uncompressed bytes of all the request/responses that were marked for decompression. total_compressed_bytes, Counter, The total compressed bytes of all the request/responses that were marked for decompression. + +Additional stats for the decompressor library are rooted at +.decompressor...decompressor_library. diff --git a/include/envoy/compression/decompressor/factory.h b/include/envoy/compression/decompressor/factory.h index e0a38713b42f..8e3692f56ede 100644 --- a/include/envoy/compression/decompressor/factory.h +++ b/include/envoy/compression/decompressor/factory.h @@ -10,7 +10,7 @@ class DecompressorFactory { public: virtual ~DecompressorFactory() = default; - virtual DecompressorPtr createDecompressor() PURE; + virtual DecompressorPtr createDecompressor(const std::string& stats_prefix) PURE; virtual const std::string& statsPrefix() const PURE; // TODO(junr03): this method assumes that decompressors are used on http messages. // A more generic method might be `hint()` which gives the user of the decompressor a hint about diff --git a/source/extensions/compression/common/decompressor/factory_base.h b/source/extensions/compression/common/decompressor/factory_base.h index 98144e02e1b6..7bf3c1571f7f 100644 --- a/source/extensions/compression/common/decompressor/factory_base.h +++ b/source/extensions/compression/common/decompressor/factory_base.h @@ -17,7 +17,8 @@ class DecompressorLibraryFactoryBase Server::Configuration::FactoryContext& context) override { return createDecompressorFactoryFromProtoTyped( MessageUtil::downcastAndValidate(proto_config, - context.messageValidationVisitor())); + context.messageValidationVisitor()), + context); } ProtobufTypes::MessagePtr createEmptyConfigProto() override { @@ -31,7 +32,8 @@ class DecompressorLibraryFactoryBase private: virtual Envoy::Compression::Decompressor::DecompressorFactoryPtr - createDecompressorFactoryFromProtoTyped(const ConfigProto&) PURE; + createDecompressorFactoryFromProtoTyped(const ConfigProto& proto_config, + Server::Configuration::FactoryContext& context) PURE; const std::string name_; }; diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD index f31199e80811..9c86b64ef61b 100644 --- a/source/extensions/compression/gzip/decompressor/BUILD +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -16,6 +16,8 @@ envoy_cc_library( external_deps = ["zlib"], deps = [ "//include/envoy/compression/decompressor:decompressor_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", diff --git a/source/extensions/compression/gzip/decompressor/config.cc b/source/extensions/compression/gzip/decompressor/config.cc index bf73e3eba697..f2b845de1433 100644 --- a/source/extensions/compression/gzip/decompressor/config.cc +++ b/source/extensions/compression/gzip/decompressor/config.cc @@ -15,21 +15,24 @@ const uint32_t GzipHeaderValue = 16; } // namespace GzipDecompressorFactory::GzipDecompressorFactory( - const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip) - : window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip, Stats::Scope& scope) + : scope_(scope), + window_bits_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, window_bits, DefaultWindowBits) | GzipHeaderValue), chunk_size_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(gzip, chunk_size, DefaultChunkSize)) {} -Envoy::Compression::Decompressor::DecompressorPtr GzipDecompressorFactory::createDecompressor() { - auto decompressor = std::make_unique(chunk_size_); +Envoy::Compression::Decompressor::DecompressorPtr +GzipDecompressorFactory::createDecompressor(const std::string& stats_prefix) { + auto decompressor = std::make_unique(scope_, stats_prefix, chunk_size_); decompressor->init(window_bits_); return decompressor; } Envoy::Compression::Decompressor::DecompressorFactoryPtr GzipDecompressorLibraryFactory::createDecompressorFactoryFromProtoTyped( - const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config) { - return std::make_unique(proto_config); + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config, + Server::Configuration::FactoryContext& context) { + return std::make_unique(proto_config, context.scope()); } /** diff --git a/source/extensions/compression/gzip/decompressor/config.h b/source/extensions/compression/gzip/decompressor/config.h index c2b8ded22562..34c9ca11bf57 100644 --- a/source/extensions/compression/gzip/decompressor/config.h +++ b/source/extensions/compression/gzip/decompressor/config.h @@ -25,16 +25,19 @@ const std::string& gzipExtensionName() { class GzipDecompressorFactory : public Envoy::Compression::Decompressor::DecompressorFactory { public: - GzipDecompressorFactory(const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip); + GzipDecompressorFactory(const envoy::extensions::compression::gzip::decompressor::v3::Gzip& gzip, + Stats::Scope& scope); // Envoy::Compression::Decompressor::DecompressorFactory - Envoy::Compression::Decompressor::DecompressorPtr createDecompressor() override; + Envoy::Compression::Decompressor::DecompressorPtr + createDecompressor(const std::string& stats_prefix) override; const std::string& statsPrefix() const override { return gzipStatsPrefix(); } const std::string& contentEncoding() const override { return Http::CustomHeaders::get().ContentEncodingValues.Gzip; } private: + Stats::Scope& scope_; const int32_t window_bits_; const uint32_t chunk_size_; }; @@ -47,7 +50,8 @@ class GzipDecompressorLibraryFactory private: Envoy::Compression::Decompressor::DecompressorFactoryPtr createDecompressorFactoryFromProtoTyped( - const envoy::extensions::compression::gzip::decompressor::v3::Gzip& config) override; + const envoy::extensions::compression::gzip::decompressor::v3::Gzip& proto_config, + Server::Configuration::FactoryContext& context) override; }; DECLARE_FACTORY(GzipDecompressorLibraryFactory); diff --git a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc index 4a1ca6251098..9066af8f0426 100644 --- a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc @@ -1,5 +1,7 @@ #include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" +#include + #include #include "envoy/common/exception.h" @@ -14,13 +16,17 @@ namespace Compression { namespace Gzip { namespace Decompressor { -ZlibDecompressorImpl::ZlibDecompressorImpl() : ZlibDecompressorImpl(4096) {} +ZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix) + : ZlibDecompressorImpl(scope, stats_prefix, 4096) {} -ZlibDecompressorImpl::ZlibDecompressorImpl(uint64_t chunk_size) - : Zlib::Base(chunk_size, [](z_stream* z) { - inflateEnd(z); - delete z; - }) { +ZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, + uint64_t chunk_size) + : Zlib::Base(chunk_size, + [](z_stream* z) { + inflateEnd(z); + delete z; + }), + stats_(generateStats(stats_prefix, scope)) { zstream_ptr_->zalloc = Z_NULL; zstream_ptr_->zfree = Z_NULL; zstream_ptr_->opaque = Z_NULL; @@ -67,16 +73,40 @@ bool ZlibDecompressorImpl::inflateNext() { if (result < 0) { decompression_error_ = result; - ENVOY_LOG( - trace, - "zlib decompression error: {}. Error codes are defined in https://www.zlib.net/manual.html", - result); + ENVOY_LOG(trace, + "zlib decompression error: {}, msg: {}. Error codes are defined in " + "https://www.zlib.net/manual.html", + result, zstream_ptr_->msg); + chargeErrorStats(result); return false; } return true; } +void ZlibDecompressorImpl::chargeErrorStats(const int result) { + switch (result) { + case Z_ERRNO: + stats_.zlib_errno_.inc(); + break; + case Z_STREAM_ERROR: + stats_.zlib_stream_error_.inc(); + break; + case Z_DATA_ERROR: + stats_.zlib_data_error_.inc(); + break; + case Z_MEM_ERROR: + stats_.zlib_mem_error_.inc(); + break; + case Z_BUF_ERROR: + stats_.zlib_buf_error_.inc(); + break; + case Z_VERSION_ERROR: + stats_.zlib_version_error_.inc(); + break; + } +} + } // namespace Decompressor } // namespace Gzip } // namespace Compression diff --git a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h index a4f27adb5658..ec20b8c8dbca 100644 --- a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.h @@ -1,6 +1,8 @@ #pragma once #include "envoy/compression/decompressor/decompressor.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" #include "common/common/logger.h" @@ -14,6 +16,24 @@ namespace Compression { namespace Gzip { namespace Decompressor { +/** + * All zlib decompressor stats. @see stats_macros.h + */ +#define ALL_ZLIB_DECOMPRESSOR_STATS(COUNTER) \ + COUNTER(zlib_errno) \ + COUNTER(zlib_stream_error) \ + COUNTER(zlib_data_error) \ + COUNTER(zlib_mem_error) \ + COUNTER(zlib_buf_error) \ + COUNTER(zlib_version_error) + +/** + * Struct definition for zlib decompressor stats. @see stats_macros.h + */ +struct ZlibDecompressorStats { + ALL_ZLIB_DECOMPRESSOR_STATS(GENERATE_COUNTER_STRUCT) +}; + /** * Implementation of decompressor's interface. */ @@ -21,7 +41,7 @@ class ZlibDecompressorImpl : public Zlib::Base, public Envoy::Compression::Decompressor::Decompressor, public Logger::Loggable { public: - ZlibDecompressorImpl(); + ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix); /** * Constructor that allows setting the size of decompressor's output buffer. It @@ -31,7 +51,7 @@ class ZlibDecompressorImpl : public Zlib::Base, * 256K bytes. @see http://zlib.net/zlib_how.html * @param chunk_size amount of memory reserved for the decompressor output. */ - ZlibDecompressorImpl(uint64_t chunk_size); + ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, uint64_t chunk_size); /** * Init must be called in order to initialize the decompressor. Once decompressor is initialized, @@ -49,7 +69,19 @@ class ZlibDecompressorImpl : public Zlib::Base, int decompression_error_{0}; private: + // TODO: clean up friend class. This is here to allow coverage of chargeErrorStats as it isn't + // completely straightforward + // to cause zlib's inflate function to return all the error codes necessary to hit all the cases + // in the switch statement. + friend class ZlibDecompressorStatsTest; + static ZlibDecompressorStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return ZlibDecompressorStats{ALL_ZLIB_DECOMPRESSOR_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + bool inflateNext(); + void chargeErrorStats(const int result); + + const ZlibDecompressorStats stats_; }; } // namespace Decompressor diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc index ae36a25457cd..1c5efe245dac 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.cc +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -27,6 +27,7 @@ DecompressorFilterConfig::DecompressorFilterConfig( : stats_prefix_(fmt::format("{}decompressor.{}.{}", stats_prefix, proto_config.decompressor_library().name(), decompressor_factory->statsPrefix())), + decompressor_stats_prefix_(stats_prefix_ + "decompressor_library"), decompressor_factory_(std::move(decompressor_factory)), request_direction_config_(proto_config.request_direction_config(), stats_prefix_, scope, runtime), diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.h b/source/extensions/filters/http/decompressor/decompressor_filter.h index ec6df0b35539..9dabae66f0aa 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.h +++ b/source/extensions/filters/http/decompressor/decompressor_filter.h @@ -95,7 +95,7 @@ class DecompressorFilterConfig { Compression::Decompressor::DecompressorFactoryPtr decompressor_factory); Compression::Decompressor::DecompressorPtr makeDecompressor() { - return decompressor_factory_->createDecompressor(); + return decompressor_factory_->createDecompressor(decompressor_stats_prefix_); } const std::string& contentEncoding() { return decompressor_factory_->contentEncoding(); } const RequestDirectionConfig& requestDirectionConfig() { return request_direction_config_; } @@ -103,6 +103,7 @@ class DecompressorFilterConfig { private: const std::string stats_prefix_; + const std::string decompressor_stats_prefix_; const Compression::Decompressor::DecompressorFactoryPtr decompressor_factory_; const RequestDirectionConfig request_direction_config_; const ResponseDirectionConfig response_direction_config_; diff --git a/test/extensions/compression/gzip/compressor_fuzz_test.cc b/test/extensions/compression/gzip/compressor_fuzz_test.cc index da76007c8989..bdaa5283e53a 100644 --- a/test/extensions/compression/gzip/compressor_fuzz_test.cc +++ b/test/extensions/compression/gzip/compressor_fuzz_test.cc @@ -1,5 +1,6 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" +#include "common/stats/isolated_store_impl.h" #include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" @@ -19,9 +20,11 @@ namespace Fuzz { // trip compress-decompress pair; the decompressor itself is not fuzzed beyond // whatever the compressor emits, as it exists only as a test utility today. DEFINE_FUZZER(const uint8_t* buf, size_t len) { + FuzzedDataProvider provider(buf, len); ZlibCompressorImpl compressor; - Decompressor::ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store; + Decompressor::ZlibDecompressorImpl decompressor{stats_store, "test"}; // Select target compression level. We can't use ConsumeEnum() since the range // is non-contiguous. diff --git a/test/extensions/compression/gzip/decompressor/BUILD b/test/extensions/compression/gzip/decompressor/BUILD index 19520c24b545..bc732fc1a7c3 100644 --- a/test/extensions/compression/gzip/decompressor/BUILD +++ b/test/extensions/compression/gzip/decompressor/BUILD @@ -18,6 +18,7 @@ envoy_extension_cc_test( deps = [ "//source/common/common:assert_lib", "//source/common/common:hex_lib", + "//source/common/stats:isolated_store_lib", "//source/extensions/compression/gzip/compressor:compressor_lib", "//source/extensions/compression/gzip/decompressor:zlib_decompressor_impl_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc index 87cd0ecf5b1a..782fd9af090e 100644 --- a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc +++ b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc @@ -1,5 +1,6 @@ #include "common/buffer/buffer_impl.h" #include "common/common/hex.h" +#include "common/stats/isolated_store_impl.h" #include "extensions/compression/gzip/compressor/zlib_compressor_impl.h" #include "extensions/compression/gzip/decompressor/zlib_decompressor_impl.h" @@ -13,7 +14,6 @@ namespace Extensions { namespace Compression { namespace Gzip { namespace Decompressor { -namespace { class ZlibDecompressorImplTest : public testing::Test { protected: @@ -46,7 +46,8 @@ class ZlibDecompressorImplTest : public testing::Test { drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -66,17 +67,20 @@ class ZlibDecompressorImplTest : public testing::Test { class ZlibDecompressorImplFailureTest : public ZlibDecompressorImplTest { protected: static void decompressorBadInitTestHelper(int64_t window_bits) { - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(window_bits); } static void uninitializedDecompressorTestHelper() { Buffer::OwnedImpl input_buffer; Buffer::OwnedImpl output_buffer; - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; TestUtility::feedBufferWithRandomCharacters(input_buffer, 100); decompressor.decompress(input_buffer, output_buffer); ASSERT_TRUE(decompressor.decompression_error_ < 0); + ASSERT_EQ(stats_store.counterFromString("test.zlib_stream_error").value(), 1); } }; @@ -105,7 +109,8 @@ TEST_F(ZlibDecompressorImplTest, CallingChecksum) { compressor.compress(compressor_buffer, Envoy::Compression::Compressor::State::Flush); ASSERT_TRUE(compressor.checksum() > 0); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); EXPECT_EQ(0, decompressor.checksum()); @@ -150,7 +155,8 @@ TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -180,12 +186,14 @@ TEST_F(ZlibDecompressorImplTest, FailedDecompression) { accumulation_buffer.add(buffer); drainBuffer(buffer); } - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); ASSERT_TRUE(decompressor.decompression_error_ < 0); + ASSERT_EQ(stats_store.counterFromString("test.zlib_data_error").value(), 17); } // Exercises decompression with a very small output buffer. @@ -218,7 +226,8 @@ TEST_F(ZlibDecompressorImplTest, DecompressWithSmallOutputBuffer) { drainBuffer(buffer); ASSERT_EQ(0, buffer.length()); - ZlibDecompressorImpl decompressor(16); + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test.", 16}; decompressor.init(gzip_window_bits); decompressor.decompress(accumulation_buffer, buffer); @@ -284,7 +293,8 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { compressor.compress(buffer, Envoy::Compression::Compressor::State::Flush); accumulation_buffer.add(buffer); - ZlibDecompressorImpl decompressor; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; decompressor.init(gzip_window_bits); drainBuffer(buffer); @@ -298,7 +308,31 @@ TEST_F(ZlibDecompressorImplTest, CompressDecompressOfMultipleSlices) { EXPECT_EQ(original_text, decompressed_text); } -} // namespace +class ZlibDecompressorStatsTest : public testing::Test { +protected: + void chargeErrorStats(const int result) { decompressor_.chargeErrorStats(result); } + + Stats::IsolatedStoreImpl stats_store_{}; + ZlibDecompressorImpl decompressor_{stats_store_, "test."}; +}; + +TEST_F(ZlibDecompressorStatsTest, ChargeErrorStats) { + decompressor_.init(31); + + chargeErrorStats(Z_ERRNO); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_errno").value(), 1); + chargeErrorStats(Z_STREAM_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_stream_error").value(), 1); + chargeErrorStats(Z_DATA_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_data_error").value(), 1); + chargeErrorStats(Z_MEM_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_mem_error").value(), 1); + chargeErrorStats(Z_BUF_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_buf_error").value(), 1); + chargeErrorStats(Z_VERSION_ERROR); + ASSERT_EQ(stats_store_.counterFromString("test.zlib_version_error").value(), 1); +} + } // namespace Decompressor } // namespace Gzip } // namespace Compression diff --git a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc index 2c112f07eb6a..0ded467631a2 100644 --- a/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc +++ b/test/extensions/filters/http/compressor/compressor_filter_integration_test.cc @@ -97,7 +97,9 @@ class CompressorIntegrationTest : public testing::TestWithParamcompress(request_data1, Envoy::Compression::Compressor::State::Flush); auto compressed_request_length = request_data1.length(); codec_client_->sendData(*request_encoder, request_data1, false); // Send second data chunk upstream and finish the request stream. - Buffer::OwnedImpl request_data2(std::string(16384, 'a')); + Buffer::OwnedImpl request_data2; + TestUtility::feedBufferWithRandomCharacters(request_data2, 16384); uncompressed_request_length += request_data2.length(); request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish); compressed_request_length += request_data2.length(); @@ -91,13 +93,12 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { // Assert that the total bytes received upstream equal the sum of the uncompressed byte buffers // sent. EXPECT_TRUE(upstream_request_->complete()); - TestUtility::headerMapEqualIgnoreOrder( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":scheme", "http"}, - {":path", "/test/long/url"}, - {":authority", "host"}, - {"accept-encoding", "wroong"}}, - upstream_request_->headers()); + EXPECT_EQ("chunked", upstream_request_->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("gzip", upstream_request_->headers() + .get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); EXPECT_EQ(uncompressed_request_length, upstream_request_->bodyLength()); // Verify stats @@ -117,14 +118,16 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-encoding", "gzip"}}, false); // Send first data chunk downstream. - Buffer::OwnedImpl response_data1(std::string(4096, 'a')); + Buffer::OwnedImpl response_data1; + TestUtility::feedBufferWithRandomCharacters(response_data1, 4096); auto uncompressed_response_length = response_data1.length(); response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush); auto compressed_response_length = response_data1.length(); upstream_request_->encodeData(response_data1, false); // Send second data chunk downstream and finish the response stream. - Buffer::OwnedImpl response_data2(std::string(8192, 'a')); + Buffer::OwnedImpl response_data2; + TestUtility::feedBufferWithRandomCharacters(response_data2, 8192); uncompressed_response_length += response_data2.length(); response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush); compressed_response_length += response_data2.length(); @@ -136,8 +139,7 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { // Assert that the total bytes received downstream equal the sum of the uncompressed byte buffers // sent. EXPECT_TRUE(response->complete()); - TestUtility::headerMapEqualIgnoreOrder(Http::TestRequestHeaderMapImpl{{":status", "200"}}, - response->headers()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); EXPECT_EQ(uncompressed_response_length, response->body().length()); // Verify stats @@ -153,4 +155,106 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompression) { uncompressed_response_length); } +/** + * Exercises gzip decompression bidirectionally with configuration using incompatible window bits + * resulting in an error. + */ +TEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) { + const std::string bad_config{R"EOF( + name: default_decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: testlib + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip + window_bits: 10 + )EOF"}; + // Use gzip for decompression. + initializeFilter(bad_config); + + // Enable request decompression by setting the Content-Encoding header to gzip. + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}, + {"content-encoding", "gzip"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // Send first data chunk upstream. + Buffer::OwnedImpl request_data1; + TestUtility::feedBufferWithRandomCharacters(request_data1, 8192); + request_compressor_->compress(request_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_request_length = request_data1.length(); + codec_client_->sendData(*request_encoder, request_data1, false); + + // Send second data chunk upstream and finish the request stream. + Buffer::OwnedImpl request_data2; + TestUtility::feedBufferWithRandomCharacters(request_data2, 16384); + request_compressor_->compress(request_data2, Envoy::Compression::Compressor::State::Finish); + compressed_request_length += request_data2.length(); + codec_client_->sendData(*request_encoder, request_data2, true); + + // Wait for frames to arrive upstream. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ("chunked", upstream_request_->headers().TransferEncoding()->value().getStringView()); + EXPECT_EQ("gzip", upstream_request_->headers() + .get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView()); + EXPECT_EQ(nullptr, upstream_request_->headers().get(Http::LowerCaseString("content-encoding"))); + + // Verify stats. While the stream was decompressed, there should be a decompression failure. + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.request.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.request.total_compressed_bytes", + compressed_request_length); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error", 2); + + // Enable response decompression by setting the Content-Encoding header to gzip. + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, {"content-encoding", "gzip"}}, false); + + // Send first data chunk downstream. + Buffer::OwnedImpl response_data1; + TestUtility::feedBufferWithRandomCharacters(response_data1, 4096); + response_compressor_->compress(response_data1, Envoy::Compression::Compressor::State::Flush); + auto compressed_response_length = response_data1.length(); + upstream_request_->encodeData(response_data1, false); + + // Send second data chunk downstream and finish the response stream. + Buffer::OwnedImpl response_data2; + TestUtility::feedBufferWithRandomCharacters(response_data2, 8192); + response_compressor_->compress(response_data2, Envoy::Compression::Compressor::State::Flush); + compressed_response_length += response_data2.length(); + upstream_request_->encodeData(response_data2, true); + + // Wait for frames to arrive downstream. + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().Status()->value().getStringView()); + + // Verify stats. While the stream was decompressed, there should be a decompression failure. + test_server_->waitForCounterEq("http.config_test.decompressor.testlib.gzip.response.decompressed", + 1); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.not_decompressed", 0); + test_server_->waitForCounterEq( + "http.config_test.decompressor.testlib.gzip.response.total_compressed_bytes", + compressed_response_length); + test_server_->waitForCounterGe( + "http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error", 3); +} + } // namespace Envoy diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index eb1f42426bbd..b903a9e8b7ee 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -102,7 +102,7 @@ class DecompressorFilterTest : public testing::TestWithParam { // Keep the decompressor to set expectations about it auto decompressor = std::make_unique(); auto* decompressor_ptr = decompressor.get(); - EXPECT_CALL(*decompressor_factory_, createDecompressor()) + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)) .WillOnce(Return(ByMove(std::move(decompressor)))); std::unique_ptr headers_after_filter = @@ -233,12 +233,12 @@ TEST_P(DecompressorFilterTest, DecompressionDisabled) { runtime_key: does_not_exist )EOF"); - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "mock"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); expectNoDecompression(); } @@ -259,10 +259,15 @@ TEST_P(DecompressorFilterTest, RequestDecompressionDisabled) { {"content-length", "256"}}; if (isRequestDirection()) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + expectNoDecompression(); } else { decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, @@ -291,71 +296,102 @@ TEST_P(DecompressorFilterTest, ResponseDecompressionDisabled) { decompressionActive(headers_before_filter, absl::nullopt /* expected_content_encoding*/, absl::nullopt /* expected_accept_encoding */); } else { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); + expectNoDecompression(); } } TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, true /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); } TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingAbsent) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{{"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); expectNoDecompression(); } TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingDoesNotMatch) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "not-matching"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); expectNoDecompression(); } TEST_P(DecompressorFilterTest, NoDecompressionContentEncodingNotCurrent) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); // The decompressor's content scheme is not the first value in the comma-delimited list in the // Content-Encoding header. Therefore, compression will not occur. Http::TestRequestHeaderMapImpl headers_before_filter{{"content-encoding", "gzip,mock"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); expectNoDecompression(); } TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresent) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{ {"cache-control", Http::CustomHeaders::get().CacheControlValues.NoTransform}, {"content-encoding", "mock"}, {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); expectNoDecompression(); } TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) { - EXPECT_CALL(*decompressor_factory_, createDecompressor()).Times(0); + EXPECT_CALL(*decompressor_factory_, createDecompressor(_)).Times(0); Http::TestRequestHeaderMapImpl headers_before_filter{ {"cache-control", fmt::format("{}, {}", Http::CustomHeaders::get().CacheControlValues.NoCache, Http::CustomHeaders::get().CacheControlValues.NoTransform)}, @@ -363,7 +399,17 @@ TEST_P(DecompressorFilterTest, NoResponseDecompressionNoTransformPresentInList) {"content-length", "256"}}; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, false /* end_stream */); - TestUtility::headerMapEqualIgnoreOrder(headers_before_filter, *headers_after_filter); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default. Other than this header, the rest of + // the headers should be the same before and after the filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } + EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); expectNoDecompression(); } diff --git a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc index d2b874fde742..8996e2aa0684 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_integration_test.cc @@ -103,7 +103,9 @@ class GzipIntegrationTest : public testing::TestWithParam config_; std::unique_ptr filter_; Buffer::OwnedImpl data_; - Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_; + Stats::IsolatedStoreImpl stats_store_; + Compression::Gzip::Decompressor::ZlibDecompressorImpl decompressor_{stats_store_, "test"}; Buffer::OwnedImpl decompressed_data_; std::string expected_str_; Stats::TestUtil::TestStore stats_; diff --git a/test/mocks/compression/decompressor/mocks.h b/test/mocks/compression/decompressor/mocks.h index 07ce0f6fa701..5910ab9336a8 100644 --- a/test/mocks/compression/decompressor/mocks.h +++ b/test/mocks/compression/decompressor/mocks.h @@ -25,7 +25,7 @@ class MockDecompressorFactory : public DecompressorFactory { ~MockDecompressorFactory() override; // Decompressor::DecompressorFactory - MOCK_METHOD(DecompressorPtr, createDecompressor, ()); + MOCK_METHOD(DecompressorPtr, createDecompressor, (const std::string&)); MOCK_METHOD(const std::string&, statsPrefix, (), (const)); MOCK_METHOD(const std::string&, contentEncoding, (), (const)); From 1784ba1829a5d57a449432c0669760fac2639f82 Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Mon, 13 Jul 2020 09:35:33 -0400 Subject: [PATCH 603/909] http1 204 response with body fails codec assertion (#11922) Removing the codec assert checking that a 204 response must be called with end_stream set to true. The spec states that a 204 response cannot contain a body, and Envoy removes the body from the response. Risk Level: Low Testing: Added integration test, and 2 fuzz test examples added Fixes oss-fuzz bug: 23853 Signed-off-by: Adi Suissa-Peleg --- source/common/http/http1/codec_impl.cc | 3 -- .../http/codec_impl_corpus/response_204_A | 41 +++++++++++++++++++ .../http/codec_impl_corpus/response_204_B | 37 +++++++++++++++++ test/integration/integration_test.cc | 23 +++++++++++ 4 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 test/common/http/codec_impl_corpus/response_204_A create mode 100644 test/common/http/codec_impl_corpus/response_204_B diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 47d420026bbb..ad47f9fe564e 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -174,9 +174,6 @@ void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& head // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 chunk_encoding_ = false; - - // Assert 1xx (may have content) OR 204 and end stream. - ASSERT(*status < 200 || end_stream); } else { // For responses to connect requests, do not send the chunked encoding header: // https://tools.ietf.org/html/rfc7231#section-4.3.6. diff --git a/test/common/http/codec_impl_corpus/response_204_A b/test/common/http/codec_impl_corpus/response_204_A new file mode 100644 index 000000000000..61d1fc05b28a --- /dev/null +++ b/test/common/http/codec_impl_corpus/response_204_A @@ -0,0 +1,41 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + actions { +} +actions { + mutate { + offset: 255 + value: 255 + } +} +actions { +} +actions { +} headers { + key: ":method" + value: "GET" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "204" + } + } + } + } +} diff --git a/test/common/http/codec_impl_corpus/response_204_B b/test/common/http/codec_impl_corpus/response_204_B new file mode 100644 index 000000000000..985ed79951a3 --- /dev/null +++ b/test/common/http/codec_impl_corpus/response_204_B @@ -0,0 +1,37 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":method" + value: "GET" + } + } + } +} +actions { + client_drain { + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "204" + } + } + } + } +} +actions { + stream_action { + response { + data: 64 + } + } +} diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 3d90d5be067c..e89400b690c8 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1404,6 +1404,29 @@ TEST_P(IntegrationTest, ConnectWithChunkedBody) { ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); } +// Verifies that a 204 response returns without a body +TEST_P(IntegrationTest, Response204WithBody) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + // Create a response with a body + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "204"}}, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(fake_upstream_connection_->close()); + + response->waitForEndStream(); + + EXPECT_TRUE(response->complete()); + EXPECT_THAT(response->headers(), HttpStatusIs("204")); + // The body should be removed + EXPECT_EQ(0, response->body().size()); +} + TEST_P(IntegrationTest, QuitQuitQuit) { initialize(); test_server_->useAdminInterfaceToQuit(true); From 7a83dbbdd668dd1b90c2b532c18f8c4f5f609a39 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Mon, 13 Jul 2020 11:02:02 -0400 Subject: [PATCH 604/909] test: improve test coverage for extensions/resource_monitors/fixed_heap (#12030) Add unit test to improve coverage of extensions/resource_monitors/fixed_heap Risk Level: low Testing: unit test Fixes #12006 Signed-off-by: Elisha Ziskind --- .../fixed_heap/fixed_heap_monitor_test.cc | 15 +++++++++++++++ test/per_file_coverage.sh | 1 - 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc b/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc index 6b71e7bb5ced..905f083e15aa 100644 --- a/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc +++ b/test/extensions/resource_monitors/fixed_heap/fixed_heap_monitor_test.cc @@ -53,6 +53,21 @@ TEST(FixedHeapMonitorTest, ComputesCorrectUsage) { EXPECT_EQ(resource.pressure(), 0.7); } +TEST(FixedHeapMonitorTest, ComputeUsageWithRealMemoryStats) { + envoy::config::resource_monitor::fixed_heap::v2alpha::FixedHeapConfig config; + uint64_t max_heap = 1024 * 1024 * 1024; + config.set_max_heap_size_bytes(max_heap); + auto stats_reader = std::make_unique(); + const double expected_usage = + (stats_reader->reservedHeapBytes() - stats_reader->unmappedHeapBytes()) / + static_cast(max_heap); + std::unique_ptr monitor(new FixedHeapMonitor(config, std::move(stats_reader))); + + ResourcePressure resource; + monitor->updateResourceUsage(resource); + EXPECT_NEAR(resource.pressure(), expected_usage, 0.0005); +} + } // namespace } // namespace FixedHeapMonitor } // namespace ResourceMonitors diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index b1d3636b666a..521d5c6760ff 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -40,7 +40,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/health_checkers/redis:95.9" "source/extensions/quic_listeners:84.8" "source/extensions/quic_listeners/quiche:84.8" -"source/extensions/resource_monitors/fixed_heap:90.9" "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" "source/extensions/retry/host/omit_canary_hosts:64.3" From 58fc9d2c89af17c5e92db25969cf80ea05ff3845 Mon Sep 17 00:00:00 2001 From: danzh Date: Mon, 13 Jul 2020 14:55:41 -0400 Subject: [PATCH 605/909] quiche: retain filter chain via ProofSource::Details (#11886) Implement quic::ProofSource::Details to retain filter chain and pass it around during CHLO processing. Replace the current way of filter chain retrieval upon receiving first packet on a new connection with this new approach. Use the filter chain to initialize L4 filters when encryption level become forward secure. Risk Level: low, not in use Testing: re-use exiting proof source test. Simplify active_quic_listener_test.cc and envoy_quic_dispatcher_test.cc because of the behavior change. Part of: #2557 Signed-off-by: Dan Zhang --- source/extensions/quic_listeners/quiche/BUILD | 13 + .../quiche/active_quic_listener.cc | 19 +- .../quiche/active_quic_listener.h | 7 - .../quiche/envoy_quic_crypto_server_stream.cc | 48 +++ .../quiche/envoy_quic_crypto_server_stream.h | 89 +++++ .../quiche/envoy_quic_dispatcher.cc | 5 +- .../quiche/envoy_quic_fake_proof_source.h | 45 ++- .../quiche/envoy_quic_proof_source.cc | 28 +- .../quiche/envoy_quic_proof_source.h | 27 +- .../quiche/envoy_quic_server_connection.cc | 26 +- .../quiche/envoy_quic_server_connection.h | 10 +- .../quiche/envoy_quic_server_session.cc | 49 ++- .../quiche/envoy_quic_server_session.h | 6 +- .../quiche/quic_transport_socket_factory.h | 2 + test/extensions/quic_listeners/quiche/BUILD | 1 + .../quiche/active_quic_listener_test.cc | 38 +-- .../quiche/envoy_quic_dispatcher_test.cc | 319 ++++++------------ .../quiche/envoy_quic_proof_source_test.cc | 25 +- .../quiche/envoy_quic_server_session_test.cc | 124 ++++--- .../quiche/envoy_quic_server_stream_test.cc | 3 +- .../quic_listeners/quiche/test_proof_source.h | 9 +- 21 files changed, 496 insertions(+), 397 deletions(-) create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index d79c1a355e3a..eb2e1922e182 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -85,6 +85,7 @@ envoy_cc_library( ":quic_transport_socket_factory_lib", "//include/envoy/ssl:tls_certificate_config_interface", "//source/extensions/transport_sockets:well_known_names", + "//source/server:connection_handler_lib", "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", ], ) @@ -171,6 +172,7 @@ envoy_cc_library( ], tags = ["nofips"], deps = [ + ":envoy_quic_crypto_server_stream_lib", ":envoy_quic_stream_lib", ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", @@ -344,3 +346,14 @@ envoy_cc_extension( "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "envoy_quic_crypto_server_stream_lib", + srcs = ["envoy_quic_crypto_server_stream.cc"], + hdrs = ["envoy_quic_crypto_server_stream.h"], + tags = ["nofips"], + deps = [ + ":envoy_quic_proof_source_lib", + "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + ], +) diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index 280d778e31e3..c691e39a5551 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -35,20 +35,6 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options, const envoy::config::core::v3::RuntimeFeatureFlag& enabled) - : ActiveQuicListener(dispatcher, parent, listen_socket, listener_config, quic_config, - std::move(options), - std::make_unique( - listen_socket, listener_config.filterChainManager()), - enabled) {} - -ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, - Network::ConnectionHandler& parent, - Network::SocketSharedPtr listen_socket, - Network::ListenerConfig& listener_config, - const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, - std::unique_ptr proof_source, - const envoy::config::core::v3::RuntimeFeatureFlag& enabled) : Server::ConnectionHandlerImpl::ActiveListenerImplBase(parent, &listener_config), dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), listen_socket_(*listen_socket), enabled_(enabled, Runtime::LoaderSingleton::get()) { @@ -67,7 +53,10 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, random->RandBytes(random_seed_, sizeof(random_seed_)); crypto_config_ = std::make_unique( quiche::QuicheStringPiece(reinterpret_cast(random_seed_), sizeof(random_seed_)), - quic::QuicRandom::GetInstance(), std::move(proof_source), quic::KeyExchangeSource::Default()); + quic::QuicRandom::GetInstance(), + std::make_unique(listen_socket_, listener_config.filterChainManager(), + stats_), + quic::KeyExchangeSource::Default()); auto connection_helper = std::make_unique(dispatcher_); crypto_config_->AddDefaultConfig(random, connection_helper->GetClock(), quic::QuicCryptoServerConfig::ConfigOptions()); diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index a9c87d4b4d66..8d0d5c9dd46e 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -36,13 +36,6 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, Network::Socket::OptionsSharedPtr options, const envoy::config::core::v3::RuntimeFeatureFlag& enabled); - ActiveQuicListener(Event::Dispatcher& dispatcher, Network::ConnectionHandler& parent, - Network::SocketSharedPtr listen_socket, - Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, - std::unique_ptr proof_source, - const envoy::config::core::v3::RuntimeFeatureFlag& enabled); - ~ActiveQuicListener() override; void onListenerShutdown(); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc new file mode 100644 index 000000000000..fb52d075c374 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.cc @@ -0,0 +1,48 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" + +namespace Envoy { +namespace Quic { + +void EnvoyQuicCryptoServerStream::EnvoyProcessClientHelloResultCallback::Run( + quic::QuicErrorCode error, const std::string& error_details, + std::unique_ptr message, + std::unique_ptr diversification_nonce, + std::unique_ptr proof_source_details) { + if (parent_ == nullptr) { + return; + } + + if (proof_source_details != nullptr) { + // Retain a copy of the proof source details after getting filter chain. + parent_->details_ = std::make_unique( + static_cast(*proof_source_details)); + } + parent_->done_cb_wrapper_ = nullptr; + parent_ = nullptr; + done_cb_->Run(error, error_details, std::move(message), std::move(diversification_nonce), + std::move(proof_source_details)); +} + +EnvoyQuicCryptoServerStream::~EnvoyQuicCryptoServerStream() { + if (done_cb_wrapper_ != nullptr) { + done_cb_wrapper_->cancel(); + } +} + +void EnvoyQuicCryptoServerStream::ProcessClientHello( + quic::QuicReferenceCountedPointer result, + std::unique_ptr proof_source_details, + std::unique_ptr done_cb) { + auto done_cb_wrapper = + std::make_unique(this, std::move(done_cb)); + ASSERT(done_cb_wrapper_ == nullptr); + done_cb_wrapper_ = done_cb_wrapper.get(); + // Old QUICHE code might call GetProof() earlier and pass in proof source instance here. But this + // is no longer the case, so proof_source_details should always be null. + ASSERT(proof_source_details == nullptr); + quic::QuicCryptoServerStream::ProcessClientHello(result, std::move(proof_source_details), + std::move(done_cb_wrapper)); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h new file mode 100644 index 000000000000..faaa6254bdf8 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h @@ -0,0 +1,89 @@ +#pragma once + +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + +#include "quiche/quic/core/quic_crypto_server_stream.h" +#include "quiche/quic/core/tls_server_handshaker.h" + +#pragma GCC diagnostic pop + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" + +#include + +namespace Envoy { +namespace Quic { + +class EnvoyCryptoServerStream : protected Logger::Loggable { +public: + virtual ~EnvoyCryptoServerStream() = default; + virtual const EnvoyQuicProofSourceDetails* proofSourceDetails() const = 0; +}; + +// A dedicated stream to do QUIC crypto handshake. +class EnvoyQuicCryptoServerStream : public quic::QuicCryptoServerStream, + public EnvoyCryptoServerStream { +public: + // A wrapper to retain proof source details which has filter chain. + class EnvoyProcessClientHelloResultCallback : public quic::ProcessClientHelloResultCallback { + public: + EnvoyProcessClientHelloResultCallback( + EnvoyQuicCryptoServerStream* parent, + std::unique_ptr done_cb) + : parent_(parent), done_cb_(std::move(done_cb)) {} + + // quic::ProcessClientHelloResultCallback + void Run(quic::QuicErrorCode error, const std::string& error_details, + std::unique_ptr message, + std::unique_ptr diversification_nonce, + std::unique_ptr proof_source_details) override; + + void cancel() { parent_ = nullptr; } + + private: + EnvoyQuicCryptoServerStream* parent_; + std::unique_ptr done_cb_; + }; + + EnvoyQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, + quic::QuicCompressedCertsCache* compressed_certs_cache, + quic::QuicSession* session, + quic::QuicCryptoServerStreamBase::Helper* helper) + : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + + ~EnvoyQuicCryptoServerStream() override; + + // quic::QuicCryptoServerStream + // Override to retain ProofSource::Details. + void ProcessClientHello( + quic::QuicReferenceCountedPointer result, + std::unique_ptr proof_source_details, + std::unique_ptr done_cb) override; + // EnvoyCryptoServerStream + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + +private: + EnvoyProcessClientHelloResultCallback* done_cb_wrapper_{nullptr}; + std::unique_ptr details_; +}; + +// A dedicated stream to do TLS1.3 handshake. +class EnvoyQuicTlsServerHandshaker : public quic::TlsServerHandshaker, + public EnvoyCryptoServerStream { +public: + EnvoyQuicTlsServerHandshaker(quic::QuicSession* session, + const quic::QuicCryptoServerConfig& crypto_config) + : quic::TlsServerHandshaker(session, crypto_config) {} + + // EnvoyCryptoServerStream + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { + return dynamic_cast(proof_source_details()); + } +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc index ab999d5b204d..08564b722580 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc @@ -52,12 +52,11 @@ std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( quiche::QuicheStringPiece /*alpn*/, const quic::ParsedQuicVersion& version) { auto quic_connection = std::make_unique( server_connection_id, peer_address, *helper(), *alarm_factory(), writer(), - /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listener_config_, - listener_stats_, listen_socket_); + /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listen_socket_); auto quic_session = std::make_unique( config(), quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this, session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_, - listener_config_.perConnectionBufferLimitBytes()); + listener_config_.perConnectionBufferLimitBytes(), listener_config_); quic_session->Initialize(); // Filter chain can't be retrieved here as self address is unknown at this // point. diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h index cddf10b7799c..f4a2a9466f42 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h @@ -24,6 +24,20 @@ namespace Envoy { namespace Quic { +// A ProofSource::Detail implementation which retains filter chain. +class EnvoyQuicProofSourceDetails : public quic::ProofSource::Details { +public: + explicit EnvoyQuicProofSourceDetails(const Network::FilterChain& filter_chain) + : filter_chain_(filter_chain) {} + EnvoyQuicProofSourceDetails(const EnvoyQuicProofSourceDetails& other) + : filter_chain_(other.filter_chain_) {} + + const Network::FilterChain& filterChain() const { return filter_chain_; } + +private: + const Network::FilterChain& filter_chain_; +}; + // A fake implementation of quic::ProofSource which uses RSA cipher suite to sign in GetProof(). // TODO(danzh) Rename it to EnvoyQuicProofSource once it's fully implemented. class EnvoyQuicFakeProofSource : public quic::ProofSource { @@ -41,36 +55,39 @@ class EnvoyQuicFakeProofSource : public quic::ProofSource { quic::QuicReferenceCountedPointer chain = GetCertChain(server_address, client_address, hostname); quic::QuicCryptoProof proof; - bool success = false; // TODO(danzh) Get the signature algorithm from leaf cert. - auto signature_callback = std::make_unique(success, proof.signature); + auto signature_callback = std::make_unique(std::move(callback), chain); ComputeTlsSignature(server_address, client_address, hostname, SSL_SIGN_RSA_PSS_RSAE_SHA256, server_config, std::move(signature_callback)); - ASSERT(success); - proof.leaf_cert_scts = "Fake timestamp"; - callback->Run(true, chain, proof, nullptr /* details */); } TicketCrypter* GetTicketCrypter() override { return nullptr; } private: - // Used by GetProof() to get fake signature. - class FakeSignatureCallback : public quic::ProofSource::SignatureCallback { + // Used by GetProof() to get signature. + class SignatureCallback : public quic::ProofSource::SignatureCallback { public: // TODO(danzh) Pass in Details to retain the certs chain, and quic::ProofSource::Callback to be // triggered in Run(). - FakeSignatureCallback(bool& success, std::string& signature) - : success_(success), signature_(signature) {} + SignatureCallback(std::unique_ptr callback, + quic::QuicReferenceCountedPointer chain) + : callback_(std::move(callback)), chain_(chain) {} // quic::ProofSource::SignatureCallback - void Run(bool ok, std::string signature, std::unique_ptr
/*details*/) override { - success_ = ok; - signature_ = signature; + void Run(bool ok, std::string signature, std::unique_ptr
details) override { + quic::QuicCryptoProof proof; + if (!ok) { + callback_->Run(false, chain_, proof, nullptr); + return; + } + proof.signature = signature; + proof.leaf_cert_scts = "Fake timestamp"; + callback_->Run(true, chain_, proof, std::move(details)); } private: - bool& success_; - std::string& signature_; + std::unique_ptr callback_; + quic::QuicReferenceCountedPointer chain_; }; }; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc index ffb567a4dbf3..66fe7017436d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc @@ -18,8 +18,10 @@ quic::QuicReferenceCountedPointer EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname) { + CertConfigWithFilterChain res = + getTlsCertConfigAndFilterChain(server_address, client_address, hostname); absl::optional> cert_config_ref = - getTlsCertConfig(server_address, client_address, hostname); + res.cert_config_; if (!cert_config_ref.has_value()) { ENVOY_LOG(warn, "No matching filter chain found for handshake."); return nullptr; @@ -42,8 +44,10 @@ void EnvoyQuicProofSource::ComputeTlsSignature( const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, std::unique_ptr callback) { + CertConfigWithFilterChain res = + getTlsCertConfigAndFilterChain(server_address, client_address, hostname); absl::optional> cert_config_ref = - getTlsCertConfig(server_address, client_address, hostname); + res.cert_config_; if (!cert_config_ref.has_value()) { ENVOY_LOG(warn, "No matching filter chain found for handshake."); callback->Run(false, "", nullptr); @@ -60,19 +64,20 @@ void EnvoyQuicProofSource::ComputeTlsSignature( std::string sig = pem_key->Sign(in, signature_algorithm); bool success = !sig.empty(); - callback->Run(success, sig, nullptr); + ASSERT(res.filter_chain_.has_value()); + callback->Run(success, sig, + std::make_unique(res.filter_chain_.value().get())); } -absl::optional> -EnvoyQuicProofSource::getTlsCertConfig(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, - const std::string& hostname) { +EnvoyQuicProofSource::CertConfigWithFilterChain +EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname) { ENVOY_LOG(trace, "Getting cert chain for {}", hostname); Network::ConnectionSocketImpl connection_socket( - std::make_unique(listen_socket_->ioHandle()), + std::make_unique(listen_socket_.ioHandle()), quicAddressToEnvoyAddressInstance(server_address), quicAddressToEnvoyAddressInstance(client_address)); - connection_socket.setDetectedTransportProtocol( Extensions::TransportSockets::TransportProtocolNames::get().Quic); connection_socket.setRequestedServerName(hostname); @@ -81,7 +86,8 @@ EnvoyQuicProofSource::getTlsCertConfig(const quic::QuicSocketAddress& server_add filter_chain_manager_.findFilterChain(connection_socket); if (filter_chain == nullptr) { ENVOY_LOG(warn, "No matching filter chain found for handshake."); - return absl::nullopt; + listener_stats_.no_filter_chain_match_.inc(); + return {absl::nullopt, absl::nullopt}; } const Network::TransportSocketFactory& transport_socket_factory = filter_chain->transportSocketFactory(); @@ -93,7 +99,7 @@ EnvoyQuicProofSource::getTlsCertConfig(const quic::QuicSocketAddress& server_add // Only return the first TLS cert config. // TODO(danzh) Choose based on supported cipher suites in TLS1.3 CHLO and prefer EC // certs if supported. - return {tls_cert_configs[0].get()}; + return {tls_cert_configs[0].get(), *filter_chain}; } } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h index 4204f4b13634..4dab673687d8 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h @@ -1,3 +1,7 @@ +#pragma once + +#include "server/connection_handler_impl.h" + #include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" #include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" @@ -7,9 +11,11 @@ namespace Quic { class EnvoyQuicProofSource : public EnvoyQuicFakeProofSource, protected Logger::Loggable { public: - EnvoyQuicProofSource(Network::SocketSharedPtr listen_socket, - Network::FilterChainManager& filter_chain_manager) - : listen_socket_(std::move(listen_socket)), filter_chain_manager_(filter_chain_manager) {} + EnvoyQuicProofSource(Network::Socket& listen_socket, + Network::FilterChainManager& filter_chain_manager, + Server::ListenerStats& listener_stats) + : listen_socket_(listen_socket), filter_chain_manager_(filter_chain_manager), + listener_stats_(listener_stats) {} ~EnvoyQuicProofSource() override = default; @@ -23,12 +29,19 @@ class EnvoyQuicProofSource : public EnvoyQuicFakeProofSource, std::unique_ptr callback) override; private: - absl::optional> - getTlsCertConfig(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, const std::string& hostname); + struct CertConfigWithFilterChain { + absl::optional> cert_config_; + absl::optional> filter_chain_; + }; + + CertConfigWithFilterChain + getTlsCertConfigAndFilterChain(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname); - Network::SocketSharedPtr listen_socket_; + Network::Socket& listen_socket_; Network::FilterChainManager& filter_chain_manager_; + Server::ListenerStats& listener_stats_; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc index c8a18a45acfb..b8fa94221f05 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.cc @@ -13,17 +13,14 @@ EnvoyQuicServerConnection::EnvoyQuicServerConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicSocketAddress initial_peer_address, quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer, - const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, Server::ListenerStats& listener_stats, - Network::Socket& listen_socket) + const quic::ParsedQuicVersionVector& supported_versions, Network::Socket& listen_socket) : EnvoyQuicConnection(server_connection_id, initial_peer_address, helper, alarm_factory, writer, owns_writer, quic::Perspective::IS_SERVER, supported_versions, std::make_unique( // Wraps the real IoHandle instance so that if the connection socket // gets closed, the real IoHandle won't be affected. std::make_unique(listen_socket.ioHandle()), - nullptr, quicAddressToEnvoyAddressInstance(initial_peer_address))), - listener_config_(listener_config), listener_stats_(listener_stats) {} + nullptr, quicAddressToEnvoyAddressInstance(initial_peer_address))) {} bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& header) { if (!EnvoyQuicConnection::OnPacketHeader(header)) { @@ -33,27 +30,10 @@ bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& hea return true; } ASSERT(self_address().IsInitialized()); - // Self address should be initialized by now. It's time to install filters. + // Self address should be initialized by now. connectionSocket()->setLocalAddress(quicAddressToEnvoyAddressInstance(self_address())); connectionSocket()->setDetectedTransportProtocol( Extensions::TransportSockets::TransportProtocolNames::get().Quic); - ASSERT(filter_chain_ == nullptr); - filter_chain_ = listener_config_.filterChainManager().findFilterChain(*connectionSocket()); - if (filter_chain_ == nullptr) { - listener_stats_.no_filter_chain_match_.inc(); - CloseConnection(quic::QUIC_CRYPTO_INTERNAL_ERROR, - "closing connection: no matching filter chain found for handshake", - quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); - return false; - } - const bool empty_filter_chain = !listener_config_.filterChainFactory().createNetworkFilterChain( - envoyConnection(), filter_chain_->networkFilterFactories()); - if (empty_filter_chain) { - // TODO(danzh) check empty filter chain at config load time instead of here. - CloseConnection(quic::QUIC_CRYPTO_INTERNAL_ERROR, "closing connection: filter chain is empty", - quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); - return false; - } return true; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h index ad4614710750..7b7fac05e925 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_connection.h @@ -15,19 +15,11 @@ class EnvoyQuicServerConnection : public EnvoyQuicConnection { quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter* writer, bool owns_writer, const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, - Server::ListenerStats& listener_stats, Network::Socket& listen_socket); + Network::Socket& listen_socket); // EnvoyQuicConnection // Overridden to set connection_socket_ with initialized self address and retrieve filter chain. bool OnPacketHeader(const quic::QuicPacketHeader& header) override; - -private: - Network::ListenerConfig& listener_config_; - Server::ListenerStats& listener_stats_; - // Latched to the corresponding quic FilterChain after connection_socket_ is - // initialized. - const Network::FilterChain* filter_chain_{nullptr}; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc index 9c621ad3d690..bc708dea4866 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.cc @@ -1,15 +1,10 @@ #include "extensions/quic_listeners/quiche/envoy_quic_server_session.h" -#pragma GCC diagnostic push -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" -// QUICHE uses offsetof(). -#pragma GCC diagnostic ignored "-Winvalid-offsetof" - -#include "quiche/quic/core/quic_crypto_server_stream.h" -#pragma GCC diagnostic pop +#include #include "common/common/assert.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" namespace Envoy { @@ -20,11 +15,11 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( std::unique_ptr connection, quic::QuicSession::Visitor* visitor, quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, - uint32_t send_buffer_limit) + uint32_t send_buffer_limit, Network::ListenerConfig& listener_config) : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper, crypto_config, compressed_certs_cache), QuicFilterManagerConnectionImpl(*connection, dispatcher, send_buffer_limit), - quic_connection_(std::move(connection)) {} + quic_connection_(std::move(connection)), listener_config_(listener_config) {} EnvoyQuicServerSession::~EnvoyQuicServerSession() { ASSERT(!quic_connection_->connected()); @@ -39,8 +34,17 @@ std::unique_ptr EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { - return quic::CreateCryptoServerStream(crypto_config, compressed_certs_cache, this, - stream_helper()); + switch (connection()->version().handshake_protocol) { + case quic::PROTOCOL_QUIC_CRYPTO: + return std::make_unique(crypto_config, compressed_certs_cache, + this, stream_helper()); + case quic::PROTOCOL_TLS1_3: + return std::make_unique(this, *crypto_config); + case quic::PROTOCOL_UNSUPPORTED: + PANIC(fmt::format("Unknown handshake protocol: {}", + static_cast(connection()->version().handshake_protocol))); + } + return nullptr; } quic::QuicSpdyStream* EnvoyQuicServerSession::CreateIncomingStream(quic::QuicStreamId id) { @@ -103,18 +107,33 @@ void EnvoyQuicServerSession::OnCanWrite() { void EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) { quic::QuicServerSessionBase::SetDefaultEncryptionLevel(level); - if (level == quic::ENCRYPTION_FORWARD_SECURE) { - // This is only reached once, when handshake is done. - raiseConnectionEvent(Network::ConnectionEvent::Connected); + if (level != quic::ENCRYPTION_FORWARD_SECURE) { + return; } + maybeCreateNetworkFilters(); + // This is only reached once, when handshake is done. + raiseConnectionEvent(Network::ConnectionEvent::Connected); } bool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); } void EnvoyQuicServerSession::OnOneRttKeysAvailable() { quic::QuicServerSessionBase::OnOneRttKeysAvailable(); + maybeCreateNetworkFilters(); raiseConnectionEvent(Network::ConnectionEvent::Connected); } +void EnvoyQuicServerSession::maybeCreateNetworkFilters() { + const EnvoyQuicProofSourceDetails* proof_source_details = + dynamic_cast(GetCryptoStream())->proofSourceDetails(); + ASSERT(proof_source_details != nullptr, + "ProofSource didn't provide ProofSource::Details. No filter chain will be installed."); + + const bool has_filter_initialized = + listener_config_.filterChainFactory().createNetworkFilterChain( + *this, proof_source_details->filterChain().networkFilterFactories()); + ASSERT(has_filter_initialized); +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h index c0cbc334d8e3..a50e6fbe8f44 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_server_session.h @@ -15,6 +15,7 @@ #include "extensions/quic_listeners/quiche/quic_filter_manager_connection_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_server_stream.h" +#include "extensions/quic_listeners/quiche/envoy_quic_crypto_server_stream.h" namespace Envoy { namespace Quic { @@ -33,7 +34,8 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::QuicCryptoServerStreamBase::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, - Event::Dispatcher& dispatcher, uint32_t send_buffer_limit); + Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, + Network::ListenerConfig& listener_config); ~EnvoyQuicServerSession() override; @@ -74,8 +76,10 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, private: void setUpRequestDecoder(EnvoyQuicServerStream& stream); + void maybeCreateNetworkFilters(); std::unique_ptr quic_connection_; + Network::ListenerConfig& listener_config_; // These callbacks are owned by network filters and quic session should out live // them. Http::ServerConnectionCallbacks* http_connection_callbacks_{nullptr}; diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h index 009af3008368..2ada9e2de17b 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h @@ -1,3 +1,5 @@ +#pragma once + #include "envoy/network/transport_socket.h" #include "envoy/server/transport_socket_config.h" #include "envoy/ssl/context_config.h" diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 37eb776630cf..482bae22257e 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -221,6 +221,7 @@ envoy_cc_test_library( tags = ["nofips"], deps = [ "//source/extensions/quic_listeners/quiche:envoy_quic_fake_proof_source_lib", + "//test/mocks/network:network_mocks", "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 36c8d58429ad..13c38062a376 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -119,7 +119,9 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { quic::QuicCryptoServerConfig& crypto_config = ActiveQuicListenerPeer::cryptoConfig(*quic_listener_); quic::test::QuicCryptoServerConfigPeer crypto_config_peer(&crypto_config); - crypto_config_peer.ResetProofSource(std::make_unique()); + auto proof_source = std::make_unique(); + filter_chain_ = &proof_source->filterChain(); + crypto_config_peer.ResetProofSource(std::move(proof_source)); simulated_time_system_.advanceTimeWait(std::chrono::milliseconds(100)); } @@ -133,10 +135,10 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { return config_factory.createActiveUdpListenerFactory(*config_proto, /*concurrency=*/1); } - void configureMocks(int connection_count) { - EXPECT_CALL(listener_config_, filterChainManager()) - .Times(connection_count) - .WillRepeatedly(ReturnRef(filter_chain_manager_)); + void maybeConfigureMocks(int connection_count) { + if (quic_version_.UsesTls()) { + return; + } EXPECT_CALL(listener_config_, filterChainFactory()).Times(connection_count); EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) .Times(connection_count) @@ -146,10 +148,8 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); return true; })); - if (!quic_version_.UsesTls()) { - EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)) - .Times(connection_count); - } + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)) + .Times(connection_count); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)) .Times(connection_count); @@ -166,16 +166,11 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { EXPECT_CALL(*read_filter, onNewConnection()) .WillOnce(Return(Network::FilterStatus::StopIteration)); read_filters_.push_back(std::move(read_filter)); - - filter_chains_.emplace_back(); - EXPECT_CALL(filter_chains_.back(), networkFilterFactories()) - .WillOnce(ReturnRef(filter_factories_.back())); - // A Sequence must be used to allow multiple EXPECT_CALL().WillOnce() // calls for the same object. - EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + EXPECT_CALL(*filter_chain_, networkFilterFactories()) .InSequence(seq) - .WillOnce(Return(&filter_chains_.back())); + .WillOnce(ReturnRef(filter_factories_.back())); } } @@ -269,7 +264,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { // The following two containers must guarantee pointer stability as addresses // of elements are saved in expectations before new elements are added. std::list> filter_factories_; - std::list filter_chains_; + const Network::MockFilterChain* filter_chain_; quic::ParsedQuicVersion quic_version_; }; @@ -282,7 +277,6 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { .WillOnce(Return(false)); auto options = std::make_shared>(); options->emplace_back(std::move(option)); - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_)); EXPECT_THROW_WITH_REGEX( std::make_unique( *dispatcher_, connection_handler_, listen_socket_, listener_config_, quic_config_, @@ -295,7 +289,7 @@ TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); - configureMocks(/* connection_count = */ 1); + maybeConfigureMocks(/* connection_count = */ 1); sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(buffered_packets->HasChlosBuffered()); @@ -306,7 +300,7 @@ TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { TEST_P(ActiveQuicListenerTest, ProcessBufferedChlos) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); - configureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); + maybeConfigureMocks(ActiveQuicListener::kNumSessionsToCreatePerLoop + 2); // Generate one more CHLO than can be processed immediately. for (size_t i = 1; i <= ActiveQuicListener::kNumSessionsToCreatePerLoop + 1; ++i) { @@ -346,7 +340,7 @@ TEST_P(ActiveQuicListenerTest, QuicProcessingDisabledAndEnabled) { EXPECT_TRUE(quic_dispatcher_->session_map().empty()); EXPECT_FALSE(ActiveQuicListenerPeer::enabled(*quic_listener_)); Runtime::LoaderSingleton::getExisting()->mergeValues({{"quic.enabled", " true"}}); - configureMocks(/* connection_count = */ 1); + maybeConfigureMocks(/* connection_count = */ 1); sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(quic_dispatcher_->session_map().empty()); @@ -370,7 +364,7 @@ INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerEmptyFlagConfigTests, TEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) { quic::QuicBufferedPacketStore* const buffered_packets = quic::test::QuicDispatcherPeer::GetBufferedPackets(quic_dispatcher_); - configureMocks(/* connection_count = */ 1); + maybeConfigureMocks(/* connection_count = */ 1); sendCHLO(quic::test::TestConnectionId(1)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); EXPECT_FALSE(buffered_packets->HasChlosBuffered()); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index 7036935c5a67..c3ab38f57ff5 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -26,6 +26,7 @@ #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" +#include "extensions/quic_listeners/quiche/envoy_quic_server_session.h" #include "test/extensions/quic_listeners/quiche/test_proof_source.h" #include "test/extensions/quic_listeners/quiche/test_utils.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" @@ -55,9 +56,10 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, listen_socket_(std::make_unique>>( Network::Test::getCanonicalLoopbackAddress(version_), nullptr, /*bind*/ true)), - connection_helper_(*dispatcher_), + connection_helper_(*dispatcher_), proof_source_(new TestProofSource()), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), - std::make_unique(), quic::KeyExchangeSource::Default()), + std::unique_ptr(proof_source_), + quic::KeyExchangeSource::Default()), version_manager_([]() { if (GetParam().second == QuicVersionType::GquicQuicCrypto) { return quic::CurrentSupportedVersionsWithQuicCrypto(); @@ -106,19 +108,104 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } - std::unique_ptr - createChloReceivedPacket(quic::QuicSocketAddress client_address) { + void processValidChloPacketAndCheckStatus(bool should_buffer) { + quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 + ? quic::QuicIpAddress::Loopback4() + : quic::QuicIpAddress::Loopback6(), + 54321); + quic::QuicBufferedPacketStore* buffered_packets = + quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); + if (!should_buffer) { + // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to + // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be + // processed immediately. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); + } + + // Create a Quic Crypto or TLS1.3 CHLO packet. EnvoyQuicClock clock(*dispatcher_); Buffer::OwnedImpl payload = generateChloPacketToSend( quic_version_, quic_config_, crypto_config_, connection_id_, clock, - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), client_address, + envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, "test.example.org"); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT(slice.size() == 1); auto encrypted_packet = std::make_unique( static_cast(slice[0].mem_), slice[0].len_); - return std::unique_ptr( - quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); + std::unique_ptr received_packet = + std::unique_ptr( + quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); + + envoy_quic_dispatcher_.ProcessPacket( + envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, + *received_packet); + + if (should_buffer) { + // Incoming CHLO packet is buffered, because ProcessPacket() is called before + // ProcessBufferedChlos(). + EXPECT_TRUE(buffered_packets->HasChlosBuffered()); + EXPECT_TRUE(buffered_packets->HasBufferedPackets(connection_id_)); + + // Process the buffered CHLO now. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + } + + EXPECT_FALSE(buffered_packets->HasChlosBuffered()); + EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); + + // A new QUIC connection is created and its filter installed based on self and peer address. + EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); + quic::QuicSession* session = + envoy_quic_dispatcher_.session_map().find(connection_id_)->second.get(); + ASSERT(session != nullptr); + EXPECT_TRUE(session->IsEncryptionEstablished()); + EXPECT_EQ(1u, connection_handler_.numConnections()); + auto envoy_connection = static_cast(session); + EXPECT_EQ("test.example.org", envoy_connection->requestedServerName()); + EXPECT_EQ(peer_addr, + envoyAddressInstanceToQuicSocketAddress(envoy_connection->remoteAddress())); + ASSERT(envoy_connection->localAddress() != nullptr); + EXPECT_EQ(*listen_socket_->localAddress(), *envoy_connection->localAddress()); + } + + void processValidChloPacketAndInitializeFilters(bool should_buffer) { + Network::MockFilterChainManager filter_chain_manager; + std::shared_ptr read_filter(new Network::MockReadFilter()); + Network::MockConnectionCallbacks network_connection_callbacks; + testing::StrictMock read_total; + testing::StrictMock read_current; + testing::StrictMock write_total; + testing::StrictMock write_current; + + std::vector filter_factory( + {[&](Network::FilterManager& filter_manager) { + filter_manager.addReadFilter(read_filter); + read_filter->callbacks_->connection().addConnectionCallbacks( + network_connection_callbacks); + read_filter->callbacks_->connection().setConnectionStats( + {read_total, read_current, write_total, write_current, nullptr, nullptr}); + }}); + EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) + .WillOnce(ReturnRef(filter_factory)); + EXPECT_CALL(listener_config_, filterChainFactory()); + EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) + .WillOnce(Invoke([](Network::Connection& connection, + const std::vector& filter_factories) { + EXPECT_EQ(1u, filter_factories.size()); + Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); + return true; + })); + EXPECT_CALL(*read_filter, onNewConnection()) + // Stop iteration to avoid calling getRead/WriteBuffer(). + .WillOnce(Return(Network::FilterStatus::StopIteration)); + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); + + processValidChloPacketAndCheckStatus(should_buffer); + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); } bool quicVersionUsesTls() { return quic_version_.UsesTls(); } @@ -130,6 +217,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, Event::DispatcherPtr dispatcher_; Network::SocketPtr listen_socket_; EnvoyQuicConnectionHelper connection_helper_; + TestProofSource* proof_source_; quic::QuicCryptoServerConfig crypto_config_; quic::QuicConfig quic_config_; quic::QuicVersionManager version_manager_; @@ -146,216 +234,25 @@ INSTANTIATE_TEST_SUITE_P(EnvoyQuicDispatcherTests, EnvoyQuicDispatcherTest, testing::ValuesIn(generateTestParam()), testParamsToString); TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponCHLO) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChain filter_chain; - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - std::shared_ptr read_filter(new Network::MockReadFilter()); - Network::MockConnectionCallbacks network_connection_callbacks; - testing::StrictMock read_total; - testing::StrictMock read_current; - testing::StrictMock write_total; - testing::StrictMock write_current; - - std::vector filter_factory( - {[&](Network::FilterManager& filter_manager) { - filter_manager.addReadFilter(read_filter); - read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); - read_filter->callbacks_->connection().setConnectionStats( - {read_total, read_current, write_total, write_current, nullptr, nullptr}); - }}); - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - EXPECT_CALL(listener_config_, filterChainFactory()); - EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) - .WillOnce(Invoke([](Network::Connection& connection, - const std::vector& filter_factories) { - EXPECT_EQ(1u, filter_factories.size()); - Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); - return true; - })); - EXPECT_CALL(*read_filter, onNewConnection()) - // Stop iteration to avoid calling getRead/WriteBuffer(). - .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesTls()) { + if (quicVersionUsesTls()) { // QUICHE doesn't support 0-RTT TLS1.3 handshake yet. - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); + processValidChloPacketAndCheckStatus(false); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); + return; } - - quic::QuicBufferedPacketStore* buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to - // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be - // processed immediately. - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - - std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // A new QUIC connection is created and its filter installed based on self and peer address. - EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_TRUE( - envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); - EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("test.example.org", read_filter->callbacks_->connection().requestedServerName()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( - read_filter->callbacks_->connection().remoteAddress())); - EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); - // Shutdown() to close the connection. - envoy_quic_dispatcher_.Shutdown(); + processValidChloPacketAndInitializeFilters(false); } TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponBufferedCHLO) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChain filter_chain; - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - std::shared_ptr read_filter(new Network::MockReadFilter()); - Network::MockConnectionCallbacks network_connection_callbacks; - testing::StrictMock read_total; - testing::StrictMock read_current; - testing::StrictMock write_total; - testing::StrictMock write_current; - - std::vector filter_factory( - {[&](Network::FilterManager& filter_manager) { - filter_manager.addReadFilter(read_filter); - read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); - read_filter->callbacks_->connection().setConnectionStats( - {read_total, read_current, write_total, write_current, nullptr, nullptr}); - }}); - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - EXPECT_CALL(listener_config_, filterChainFactory()); - EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) - .WillOnce(Invoke([](Network::Connection& connection, - const std::vector& filter_factories) { - EXPECT_EQ(1u, filter_factories.size()); - Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); - return true; - })); - EXPECT_CALL(*read_filter, onNewConnection()) - // Stop iteration to avoid calling getRead/WriteBuffer(). - .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesTls()) { - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); + if (quicVersionUsesTls()) { + // QUICHE doesn't support 0-RTT TLS1.3 handshake yet. + processValidChloPacketAndCheckStatus(true); + // Shutdown() to close the connection. + envoy_quic_dispatcher_.Shutdown(); + return; } - quic::QuicBufferedPacketStore* buffered_packets = - quic::test::QuicDispatcherPeer::GetBufferedPackets(&envoy_quic_dispatcher_); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Incoming CHLO packet is buffered, because ProcessPacket() is called before - // ProcessBufferedChlos(). - std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_TRUE(buffered_packets->HasChlosBuffered()); - EXPECT_TRUE(buffered_packets->HasBufferedPackets(connection_id_)); - - // Process buffered CHLO. - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - EXPECT_FALSE(buffered_packets->HasChlosBuffered()); - EXPECT_FALSE(buffered_packets->HasBufferedPackets(connection_id_)); - - // A new QUIC connection is created and its filter installed based on self and peer address. - EXPECT_EQ(1u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_TRUE( - envoy_quic_dispatcher_.session_map().find(connection_id_)->second->IsEncryptionEstablished()); - EXPECT_EQ(1u, connection_handler_.numConnections()); - EXPECT_EQ("test.example.org", read_filter->callbacks_->connection().requestedServerName()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress( - read_filter->callbacks_->connection().remoteAddress())); - EXPECT_EQ(*listen_socket_->localAddress(), *read_filter->callbacks_->connection().localAddress()); - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); - // Shutdown() to close the connection. - envoy_quic_dispatcher_.Shutdown(); -} - -TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToMissingFilterChain) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return nullptr; - })); - std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_EQ(0u, connection_handler_.numConnections()); - EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_) - ->IsConnectionIdInTimeWait(connection_id_)); - EXPECT_EQ(1u, listener_stats_.downstream_cx_total_.value()); - EXPECT_EQ(0u, listener_stats_.downstream_cx_active_.value()); - EXPECT_EQ(1u, listener_stats_.no_filter_chain_match_.value()); -} - -TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDueToEmptyFilterChain) { - quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 - ? quic::QuicIpAddress::Loopback4() - : quic::QuicIpAddress::Loopback6(), - 54321); - Network::MockFilterChain filter_chain; - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*listen_socket_->localAddress(), *socket.localAddress()); - EXPECT_EQ(peer_addr, envoyAddressInstanceToQuicSocketAddress(socket.remoteAddress())); - return &filter_chain; - })); - // Empty filter_factory should cause connection close. - std::vector filter_factory; - EXPECT_CALL(filter_chain, networkFilterFactories()).WillOnce(ReturnRef(filter_factory)); - - std::unique_ptr received_packet = createChloReceivedPacket(peer_addr); - envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); - envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, - *received_packet); - EXPECT_EQ(0u, envoy_quic_dispatcher_.session_map().size()); - EXPECT_EQ(0u, connection_handler_.numConnections()); - EXPECT_TRUE(quic::test::QuicDispatcherPeer::GetTimeWaitListManager(&envoy_quic_dispatcher_) - ->IsConnectionIdInTimeWait(connection_id_)); + processValidChloPacketAndInitializeFilters(true); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc index 965b090fbf13..e61e34eac270 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc @@ -23,9 +23,10 @@ namespace Quic { class TestGetProofCallback : public quic::ProofSource::Callback { public: - TestGetProofCallback(bool& called, std::string leaf_cert_scts, const absl::string_view cert) + TestGetProofCallback(bool& called, std::string leaf_cert_scts, const absl::string_view cert, + Network::FilterChain& filter_chain) : called_(called), expected_leaf_certs_scts_(std::move(leaf_cert_scts)), - expected_leaf_cert_(cert) {} + expected_leaf_cert_(cert), expected_filter_chain_(filter_chain) {} // quic::ProofSource::Callback void Run(bool ok, const quic::QuicReferenceCountedPointer& chain, @@ -35,7 +36,8 @@ class TestGetProofCallback : public quic::ProofSource::Callback { EXPECT_EQ(expected_leaf_certs_scts_, proof.leaf_cert_scts); EXPECT_EQ(2, chain->certs.size()); EXPECT_EQ(expected_leaf_cert_, chain->certs[0]); - EXPECT_EQ(nullptr, details); + EXPECT_EQ(&expected_filter_chain_, + &static_cast(details.get())->filterChain()); called_ = true; } @@ -43,6 +45,7 @@ class TestGetProofCallback : public quic::ProofSource::Callback { bool& called_; std::string expected_leaf_certs_scts_; absl::string_view expected_leaf_cert_; + Network::FilterChain& expected_filter_chain_; }; class EnvoyQuicProofSourceTest : public ::testing::Test { @@ -50,8 +53,10 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { EnvoyQuicProofSourceTest() : server_address_(quic::QuicIpAddress::Loopback4(), 12345), client_address_(quic::QuicIpAddress::Loopback4(), 54321), - listen_socket_(std::make_shared()), - proof_source_(listen_socket_, filter_chain_manager_) {} + listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), + POOL_GAUGE(listener_config_.listenerScope()), + POOL_HISTOGRAM(listener_config_.listenerScope()))}), + proof_source_(listen_socket_, filter_chain_manager_, listener_stats_) {} protected: std::string hostname_{"www.fake.com"}; @@ -64,16 +69,18 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { std::string pkey_{quic::test::kTestCertificatePrivateKeyPem}; Network::MockFilterChain filter_chain_; Network::MockFilterChainManager filter_chain_manager_; - std::shared_ptr listen_socket_; + Network::MockListenSocket listen_socket_; + testing::NiceMock listener_config_; + Server::ListenerStats listener_stats_; EnvoyQuicProofSource proof_source_; EnvoyQuicFakeProofVerifier proof_verifier_; }; TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { bool called = false; - auto callback = std::make_unique(called, "Fake timestamp", - quic::test::kTestCertificate); - EXPECT_CALL(*listen_socket_, ioHandle()).Times(2); + auto callback = std::make_unique( + called, "Fake timestamp", quic::test::kTestCertificate, filter_chain_); + EXPECT_CALL(listen_socket_, ioHandle()).Times(2); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 28e04399b8c7..6ddae3c80624 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -57,12 +57,11 @@ class TestEnvoyQuicServerConnection : public EnvoyQuicServerConnection { quic::QuicAlarmFactory& alarm_factory, quic::QuicPacketWriter& writer, const quic::ParsedQuicVersionVector& supported_versions, - Network::ListenerConfig& listener_config, - Server::ListenerStats& stats, Network::Socket& listen_socket) + Network::Socket& listen_socket) : EnvoyQuicServerConnection(quic::test::TestConnectionId(), quic::QuicSocketAddress(quic::QuicIpAddress::Loopback4(), 12345), helper, alarm_factory, &writer, /*owns_writer=*/false, - supported_versions, listener_config, stats, listen_socket) {} + supported_versions, listen_socket) {} Network::Connection::ConnectionStats& connectionStats() const { return EnvoyQuicConnection::connectionStats(); @@ -84,17 +83,60 @@ class TestEnvoyQuicServerSession : public EnvoyQuicServerSession { } }; -class TestQuicCryptoServerStream : public quic::QuicCryptoServerStream { +class ProofSourceDetailsSetter { public: + virtual ~ProofSourceDetailsSetter() = default; + + virtual void setProofSourceDetails(std::unique_ptr details) = 0; +}; + +class TestQuicCryptoServerStream : public EnvoyQuicCryptoServerStream, + public ProofSourceDetailsSetter { +public: + ~TestQuicCryptoServerStream() override = default; + explicit TestQuicCryptoServerStream(const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, quic::QuicSession* session, quic::QuicCryptoServerStreamBase::Helper* helper) - : quic::QuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + : EnvoyQuicCryptoServerStream(crypto_config, compressed_certs_cache, session, helper) {} + + bool encryption_established() const override { return true; } + + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + + void setProofSourceDetails(std::unique_ptr details) override { + details_ = std::move(details); + } + +private: + std::unique_ptr details_; +}; + +class TestEnvoyQuicTlsServerHandshaker : public EnvoyQuicTlsServerHandshaker, + public ProofSourceDetailsSetter { +public: + ~TestEnvoyQuicTlsServerHandshaker() override = default; - using quic::QuicCryptoServerStream::QuicCryptoServerStream; + TestEnvoyQuicTlsServerHandshaker(quic::QuicSession* session, + const quic::QuicCryptoServerConfig& crypto_config) + : EnvoyQuicTlsServerHandshaker(session, crypto_config), + params_(new quic::QuicCryptoNegotiatedParameters) { + params_->cipher_suite = 1; + } bool encryption_established() const override { return true; } + const EnvoyQuicProofSourceDetails* proofSourceDetails() const override { return details_.get(); } + void setProofSourceDetails(std::unique_ptr details) override { + details_ = std::move(details); + } + const quic::QuicCryptoNegotiatedParameters& crypto_negotiated_params() const override { + return *params_; + } + +private: + std::unique_ptr details_; + quic::QuicReferenceCountedPointer params_; }; class EnvoyQuicServerSessionTest : public testing::TestWithParam { @@ -108,19 +150,16 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), - listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), - POOL_GAUGE(listener_config_.listenerScope()), - POOL_HISTOGRAM(listener_config_.listenerScope()))}), quic_connection_(new TestEnvoyQuicServerConnection( - connection_helper_, alarm_factory_, writer_, quic_version_, listener_config_, - listener_stats_, *listener_config_.socket_)), + connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)), crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), std::make_unique(), quic::KeyExchangeSource::Default()), envoy_quic_session_(quic_config_, quic_version_, std::unique_ptr(quic_connection_), /*visitor=*/nullptr, &crypto_stream_helper_, &crypto_config_, &compressed_certs_cache_, *dispatcher_, - /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5), + /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5, + listener_config_), read_filter_(new Network::MockReadFilter()) { EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); @@ -143,14 +182,23 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { envoy_quic_session_.Initialize(); setQuicConfigWithDefaultValues(envoy_quic_session_.config()); envoy_quic_session_.OnConfigNegotiated(); + quic::test::QuicConfigPeer::SetNegotiated(envoy_quic_session_.config(), true); quic::test::QuicConnectionPeer::SetAddressValidated(quic_connection_); - // Switch to a encryption forward secure crypto stream. quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); - quic::test::QuicServerSessionBasePeer::SetCryptoStream( - &envoy_quic_session_, - new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, - &envoy_quic_session_, &crypto_stream_helper_)); + quic::QuicCryptoServerStreamBase* crypto_stream = nullptr; + if (quic_version_[0].handshake_protocol == quic::PROTOCOL_QUIC_CRYPTO) { + auto test_crypto_stream = new TestQuicCryptoServerStream( + &crypto_config_, &compressed_certs_cache_, &envoy_quic_session_, &crypto_stream_helper_); + crypto_stream = test_crypto_stream; + crypto_stream_ = test_crypto_stream; + } else { + auto test_crypto_stream = + new TestEnvoyQuicTlsServerHandshaker(&envoy_quic_session_, crypto_config_); + crypto_stream = test_crypto_stream; + crypto_stream_ = test_crypto_stream; + } + quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, crypto_stream); quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); quic_connection_->SetEncrypter( quic::ENCRYPTION_FORWARD_SECURE, @@ -213,11 +261,11 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { quic::ParsedQuicVersionVector quic_version_; testing::NiceMock writer_; testing::NiceMock listener_config_; - Server::ListenerStats listener_stats_; TestEnvoyQuicServerConnection* quic_connection_; quic::QuicConfig quic_config_; quic::QuicCryptoServerConfig crypto_config_; testing::NiceMock crypto_stream_helper_; + ProofSourceDetailsSetter* crypto_stream_; TestEnvoyQuicServerSession envoy_quic_session_; quic::QuicCompressedCertsCache compressed_certs_cache_{100}; std::shared_ptr read_filter_; @@ -718,31 +766,9 @@ TEST_P(EnvoyQuicServerSessionTest, GoAway) { } TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { - std::string packet_content("random payload"); - auto encrypted_packet = - std::unique_ptr(quic::test::ConstructEncryptedPacket( - quic_connection_->connection_id(), quic::EmptyQuicConnectionId(), /*version_flag=*/true, - /*reset_flag*/ false, /*packet_number=*/1, packet_content)); - - quic::QuicSocketAddress self_address( - envoyAddressInstanceToQuicSocketAddress(listener_config_.socket_->localAddress())); - auto packet = std::unique_ptr( - quic::test::ConstructReceivedPacket(*encrypted_packet, connection_helper_.GetClock()->Now())); - - // Receiving above packet should trigger filter chain retrieval. - Network::MockFilterChainManager filter_chain_manager; - EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); Network::MockFilterChain filter_chain; - EXPECT_CALL(filter_chain_manager, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket& socket) { - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(quic_connection_->peer_address()), - *socket.remoteAddress()); - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(self_address), *socket.localAddress()); - EXPECT_EQ(listener_config_.socket_->ioHandle().fd(), socket.ioHandle().fd()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - socket.detectedTransportProtocol()); - return &filter_chain; - })); + crypto_stream_->setProofSourceDetails( + std::make_unique(filter_chain)); std::vector filter_factory{[this]( Network::FilterManager& filter_manager) { filter_manager.addReadFilter(read_filter_); @@ -761,11 +787,15 @@ TEST_P(EnvoyQuicServerSessionTest, InitializeFilterChain) { Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); return true; })); - // Connection should be closed because this packet has invalid payload. - EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(_, _)); - EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); - quic_connection_->ProcessUdpPacket(self_address, quic_connection_->peer_address(), *packet); - EXPECT_FALSE(quic_connection_->connected()); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)); + if (!quic_version_[0].UsesTls()) { + envoy_quic_session_.SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + } else { + if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + } + envoy_quic_session_.OnOneRttKeysAvailable(); + } EXPECT_EQ(nullptr, envoy_quic_session_.socketOptions()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().isOpen()); EXPECT_TRUE(quic_connection_->connectionSocket()->ioHandle().close().ok()); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 9cfecf56bbe5..6468f95fe9fa 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -50,8 +50,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_connection_(quic::test::TestConnectionId(), quic::QuicSocketAddress(quic::QuicIpAddress::Any6(), 12345), connection_helper_, alarm_factory_, &writer_, - /*owns_writer=*/false, {quic_version_}, listener_config_, listener_stats_, - *listener_config_.socket_), + /*owns_writer=*/false, {quic_version_}, *listener_config_.socket_), quic_session_(quic_config_, {quic_version_}, &quic_connection_, *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), diff --git a/test/extensions/quic_listeners/quiche/test_proof_source.h b/test/extensions/quic_listeners/quiche/test_proof_source.h index 3df1aec6b23a..ad8bae60a540 100644 --- a/test/extensions/quic_listeners/quiche/test_proof_source.h +++ b/test/extensions/quic_listeners/quiche/test_proof_source.h @@ -13,6 +13,8 @@ #endif #include + +#include "test/mocks/network/mocks.h" #include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" namespace Envoy { @@ -35,13 +37,18 @@ class TestProofSource : public Quic::EnvoyQuicFakeProofSource { const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, quiche::QuicheStringPiece in, std::unique_ptr callback) override { - callback->Run(true, absl::StrCat("Fake signature for { ", in, " }"), nullptr); + callback->Run(true, absl::StrCat("Fake signature for { ", in, " }"), + std::make_unique(filter_chain_)); } + const Network::MockFilterChain& filterChain() const { return filter_chain_; } + private: quic::QuicReferenceCountedPointer cert_chain_{ new quic::ProofSource::Chain( std::vector{std::string(quic::test::kTestCertificate)})}; + + Network::MockFilterChain filter_chain_; }; } // namespace Quic From 1302b2acf969d4380698e14e3308b2b59eaef19e Mon Sep 17 00:00:00 2001 From: Tengpeng <66138273+tengpengli@users.noreply.github.com> Date: Mon, 13 Jul 2020 15:36:06 -0400 Subject: [PATCH 606/909] Fuzz: add fuzz test for functions in common/stats/utility.cc (#12060) Commit Message: Add fuzz test for counterFromElements() and counterFromStatNames() in common/stats/utility.cc Additional Description: The changes in this PR are copied from PR #11644 due to the DCO check failure Risk Level: Low Testing: Add fuzz test for counterFromElements() and counterFromStatNames() Docs Changes: N/A Release Notes: N/A Signed-off-by: tengpeng --- test/common/stats/utility_fuzz_test.cc | 64 +++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/test/common/stats/utility_fuzz_test.cc b/test/common/stats/utility_fuzz_test.cc index bcf2bca6e4f9..7d34966a7ada 100644 --- a/test/common/stats/utility_fuzz_test.cc +++ b/test/common/stats/utility_fuzz_test.cc @@ -1,3 +1,8 @@ +#include +#include + +#include "common/stats/isolated_store_impl.h" +#include "common/stats/symbol_table_creator.h" #include "common/stats/utility.h" #include "test/fuzz/fuzz_runner.h" @@ -8,8 +13,63 @@ namespace Envoy { namespace Fuzz { DEFINE_FUZZER(const uint8_t* buf, size_t len) { - const absl::string_view string_buffer(reinterpret_cast(buf), len); - Stats::Utility::sanitizeStatsName(string_buffer); + + Stats::Utility::sanitizeStatsName(absl::string_view(reinterpret_cast(buf), len)); + + if (len < 4) { + return; + } + + // Create a greater scope vector to store the string to prevent the string memory from being free + std::list string_list; + auto make_string = [&string_list](absl::string_view str) -> absl::string_view { + string_list.push_back(std::string(str)); + return string_list.back(); + }; + + // generate a random number as the maximum length of the stat name + const size_t max_len = *reinterpret_cast(buf) % (len - 3); + FuzzedDataProvider provider(buf, len); + + // model common/stats/utility_test.cc, initialize those objects to create random elements as + // input + Stats::SymbolTablePtr symbol_table; + if (provider.ConsumeBool()) { + symbol_table = std::make_unique(); + } else { + symbol_table = std::make_unique(); + } + std::unique_ptr store = + std::make_unique(*symbol_table); + Stats::StatNamePool pool(*symbol_table); + Stats::ScopePtr scope = store->createScope(provider.ConsumeRandomLengthString(max_len)); + Stats::ElementVec ele_vec; + Stats::StatNameVec sn_vec; + Stats::StatNameTagVector tags; + Stats::StatName key, val; + + if (provider.remaining_bytes() == 0) { + Stats::Utility::counterFromStatNames(*scope, {}); + Stats::Utility::counterFromElements(*scope, {}); + } else { + // add random length string in each loop + while (provider.remaining_bytes() > 3) { + if (provider.ConsumeBool()) { + absl::string_view str = make_string( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes()))); + ele_vec.push_back(Stats::DynamicName(str)); + sn_vec.push_back(pool.add(str)); + } else { + key = pool.add( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes() / 2))); + val = pool.add( + provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes()))); + tags.push_back({key, val}); + } + Stats::Utility::counterFromStatNames(*scope, sn_vec, tags); + Stats::Utility::counterFromElements(*scope, ele_vec, tags); + } + } } } // namespace Fuzz From 602fd97605cb52ef3c102482e6afc1e2da829692 Mon Sep 17 00:00:00 2001 From: Garrett Date: Mon, 13 Jul 2020 18:21:32 -0400 Subject: [PATCH 607/909] Clarify Xcode requirements for building on macOS (#11977) Signed-off-by: Garrett Heel --- bazel/README.md | 2 +- ci/README.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/bazel/README.md b/bazel/README.md index 4675f592157f..a7e3f0165f24 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -92,7 +92,7 @@ for how to update or override dependencies. ``` _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum` - Xcode is also required to build Envoy on macOS. + The full version of Xcode (not just Command Line Tools) is also required to build Envoy on macOS. Envoy compiles and passes tests with the version of clang installed by Xcode 11.1: Apple clang version 11.0.0 (clang-1100.0.33.8). diff --git a/ci/README.md b/ci/README.md index a137ea7e6129..4e11ef0327d7 100644 --- a/ci/README.md +++ b/ci/README.md @@ -176,8 +176,7 @@ The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/ Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh), which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that -build and test Envoy. If Envoy cannot be built (`error: /Library/Developer/CommandLineTools/usr/bin/libtool: no output file specified (specify with -o output)`), -ensure that Xcode is installed. +build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required. # Coverity Scan Build Flow From 5f7ffee2d4cd1bedbf014c47e2921c6f6b7a908c Mon Sep 17 00:00:00 2001 From: Bibby Date: Mon, 13 Jul 2020 19:45:28 -0400 Subject: [PATCH 608/909] [redis] Add the auth_username_ to health checks (#11759) Signed-off-by: bibby --- .../extensions/health_checkers/redis/redis.cc | 4 +- .../extensions/health_checkers/redis/redis.h | 1 + .../health_checkers/redis/redis_test.cc | 174 +++++++++++++++++- 3 files changed, 177 insertions(+), 2 deletions(-) diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index e0ba8989457e..7eb66b5f8b0c 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -19,6 +19,8 @@ RedisHealthChecker::RedisHealthChecker( Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()), + auth_username_( + NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authUsername(cluster.info(), api)), auth_password_(NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword( cluster.info(), api)) { if (!key_.empty()) { @@ -65,7 +67,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() { if (!client_) { client_ = parent_.client_factory_.create( host_, parent_.dispatcher_, *this, redis_command_stats_, - parent_.cluster_.info()->statsScope(), "", parent_.auth_password_); + parent_.cluster_.info()->statsScope(), parent_.auth_username_, parent_.auth_password_); client_->addConnectionCallbacks(*this); } diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 4bc6b44e7db5..73088832f18c 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -125,6 +125,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_; Type type_; const std::string key_; + const std::string auth_username_; const std::string auth_password_; }; diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 225de586128c..a16fad088aa8 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -58,6 +58,42 @@ class RedisHealthCheckerTest Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } + void setupWithAuth() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: redis + typed_config: + "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis + )EOF"; + + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig( + health_check_config, ProtobufMessage::getStrictValidationVisitor()); + + std::string auth_yaml = R"EOF( + auth_username: { inline_string: "test user" } + auth_password: { inline_string: "test password" } + )EOF"; + envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{}; + TestUtility::loadFromYaml(auth_yaml, proto_config); + + Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared< + const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>( + proto_config); + + EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); + + health_checker_ = std::make_shared( + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); + } + void setupAlwaysLogHealthCheckFailures() { const std::string yaml = R"EOF( timeout: 1s @@ -106,6 +142,43 @@ class RedisHealthCheckerTest Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } + void setupExistsHealthcheckWithAuth() { + const std::string yaml = R"EOF( + timeout: 1s + interval: 1s + no_traffic_interval: 5s + interval_jitter: 1s + unhealthy_threshold: 1 + healthy_threshold: 1 + custom_health_check: + name: redis + typed_config: + "@type": type.googleapis.com/envoy.config.health_checker.redis.v2.Redis + key: foo + )EOF"; + + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig( + health_check_config, ProtobufMessage::getStrictValidationVisitor()); + + std::string auth_yaml = R"EOF( + auth_username: { inline_string: "test user" } + auth_password: { inline_string: "test password" } + )EOF"; + envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions proto_config{}; + TestUtility::loadFromYaml(auth_yaml, proto_config); + + Upstream::ProtocolOptionsConfigConstSharedPtr options = std::make_shared< + const Envoy::Extensions::NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl>( + proto_config); + + EXPECT_CALL(*cluster_->info_, extensionProtocolOptions(_)).WillRepeatedly(Return(options)); + + health_checker_ = std::make_shared( + *cluster_, health_check_config, redis_config, dispatcher_, runtime_, random_, + Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); + } + void setupExistsHealthcheckDeprecated(bool avoid_boosting = true) { const std::string yaml = R"EOF( timeout: 1s @@ -157,7 +230,9 @@ class RedisHealthCheckerTest create(Upstream::HostConstSharedPtr, Event::Dispatcher&, const Extensions::NetworkFilters::Common::Redis::Client::Config&, const Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr&, - Stats::Scope&, const std::string&, const std::string&) override { + Stats::Scope&, const std::string& username, const std::string& password) override { + EXPECT_EQ(auth_username_, username); + EXPECT_EQ(auth_password_, password); return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{create_()}; } @@ -215,8 +290,105 @@ class RedisHealthCheckerTest Extensions::NetworkFilters::Common::Redis::Client::ClientCallbacks* pool_callbacks_{}; std::shared_ptr health_checker_; Api::ApiPtr api_; + std::string auth_username_; + std::string auth_password_; }; +TEST_F(RedisHealthCheckerTest, PingWithAuth) { + InSequence s; + + auth_username_ = "test user"; + auth_password_ = "test password"; + + setupWithAuth(); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectClientCreate(); + expectPingRequestCreate(); + health_checker_->start(); + + client_->runHighWatermarkCallbacks(); + client_->runLowWatermarkCallbacks(); + + // Success + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); + response->asString() = "PONG"; + pool_callbacks_->onResponse(std::move(response)); + + expectPingRequestCreate(); + interval_timer_->invokeCallback(); + + // Failure, invalid auth + EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + response = std::make_unique(); + response->type(NetworkFilters::Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + pool_callbacks_->onResponse(std::move(response)); + + EXPECT_CALL(*client_, close()); + + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); + EXPECT_EQ(0UL, cluster_->info_->stats_store_.counter("health_check.network_failure").value()); +} + +TEST_F(RedisHealthCheckerTest, ExistsWithAuth) { + InSequence s; + + auth_username_ = "test user"; + auth_password_ = "test password"; + + setupExistsHealthcheckWithAuth(); + + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + + expectSessionCreate(); + expectClientCreate(); + expectExistsRequestCreate(); + health_checker_->start(); + + client_->runHighWatermarkCallbacks(); + client_->runLowWatermarkCallbacks(); + + // Success + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::Integer); + response->asInteger() = 0; + pool_callbacks_->onResponse(std::move(response)); + + expectExistsRequestCreate(); + interval_timer_->invokeCallback(); + + // Failure, invalid auth + EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); + EXPECT_CALL(*timeout_timer_, disableTimer()); + EXPECT_CALL(*interval_timer_, enableTimer(_, _)); + response = std::make_unique(); + response->type(NetworkFilters::Common::Redis::RespType::Error); + response->asString() = "WRONGPASS invalid username-password pair"; + pool_callbacks_->onResponse(std::move(response)); + + EXPECT_CALL(*client_, close()); + + EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value()); + EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); +} + TEST_F(RedisHealthCheckerTest, PingAndVariousFailures) { InSequence s; setup(); From 47f89b494d86cc0832198d4025b91a2cbda296dc Mon Sep 17 00:00:00 2001 From: James Peach Date: Tue, 14 Jul 2020 09:54:33 +1000 Subject: [PATCH 609/909] lua: release assert state allocations (#12065) Since `lua_open` can fail on memory allocation, it should be checked with `RELEASE_ASSERT` so that the NULL Lua state is never dereferenced. Signed-off-by: James Peach --- source/extensions/filters/common/lua/lua.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/common/lua/lua.cc b/source/extensions/filters/common/lua/lua.cc index 3f8b5cd7ec01..02a45f817ec5 100644 --- a/source/extensions/filters/common/lua/lua.cc +++ b/source/extensions/filters/common/lua/lua.cc @@ -51,7 +51,7 @@ ThreadLocalState::ThreadLocalState(const std::string& code, ThreadLocal::SlotAll // First verify that the supplied code can be parsed. CSmartPtr state(lua_open()); - ASSERT(state.get() != nullptr, "unable to create new lua state object"); + RELEASE_ASSERT(state.get() != nullptr, "unable to create new Lua state object"); luaL_openlibs(state.get()); if (0 != luaL_dostring(state.get(), code.c_str())) { @@ -92,7 +92,7 @@ CoroutinePtr ThreadLocalState::createCoroutine() { } ThreadLocalState::LuaThreadLocal::LuaThreadLocal(const std::string& code) : state_(lua_open()) { - ASSERT(state_.get() != nullptr, "unable to create new lua state object"); + RELEASE_ASSERT(state_.get() != nullptr, "unable to create new Lua state object"); luaL_openlibs(state_.get()); int rc = luaL_dostring(state_.get(), code.c_str()); ASSERT(rc == 0); From 7f6e1e05b4e860822c3fe76b7ed86d7946e9135f Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Mon, 13 Jul 2020 17:03:03 -0700 Subject: [PATCH 610/909] android: Disable getifaddrs call for old NDK versions (#11863) `getifaddrs` and `freeifaddrs` were added in Android NDK API version 24. This patch skips that logic, as was being done for WIN32, if that's your target version. Signed-off-by: Keith Smiley --- include/envoy/common/platform.h | 12 ++++++++++++ source/common/network/utility.cc | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index 80870d24240d..e3df7066df85 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -235,3 +235,15 @@ struct mmsghdr { unsigned int msg_len; }; #endif + +#define SUPPORTS_GETIFADDRS +#ifdef WIN32 +#undef SUPPORTS_GETIFADDRS +#endif + +// https://android.googlesource.com/platform/prebuilts/ndk/+/dev/platform/sysroot/usr/include/ifaddrs.h +#ifdef __ANDROID_API__ +#if __ANDROID_API__ < 24 +#undef SUPPORTS_GETIFADDRS +#endif // __ANDROID_API__ < 24 +#endif // ifdef __ANDROID_API__ diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 2cdb3e614de4..348e7957a7b7 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -196,7 +196,7 @@ void Utility::throwWithMalformedIp(absl::string_view ip_address) { // need to be updated in the future. Discussion can be found at Github issue #939. Address::InstanceConstSharedPtr Utility::getLocalAddress(const Address::IpVersion version) { Address::InstanceConstSharedPtr ret; -#ifndef WIN32 +#ifdef SUPPORTS_GETIFADDRS struct ifaddrs* ifaddr; struct ifaddrs* ifa; From 3400266a6a3ea9febb6c26cfcd240566ddb18c1f Mon Sep 17 00:00:00 2001 From: antonio Date: Mon, 13 Jul 2020 20:03:54 -0400 Subject: [PATCH 611/909] test: Use benchmark build and test target framework for lc_trie_speed_test (#11854) * Use benchmark build and test target framework for lc_trie_speed_test Signed-off-by: Antonio Vicente * rename benchmark functions to make clang-tidy happy. Signed-off-by: Antonio Vicente --- test/common/network/BUILD | 8 +- test/common/network/lc_trie_speed_test.cc | 161 ++++++++++++---------- 2 files changed, 91 insertions(+), 78 deletions(-) diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 2910ffb4d878..1ee7cb294c14 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -4,7 +4,6 @@ load( "envoy_cc_benchmark_binary", "envoy_cc_fuzz_test", "envoy_cc_test", - "envoy_cc_test_binary", "envoy_cc_test_library", "envoy_package", ) @@ -310,7 +309,7 @@ envoy_cc_fuzz_test( ], ) -envoy_cc_test_binary( +envoy_cc_benchmark_binary( name = "lc_trie_speed_test", srcs = ["lc_trie_speed_test.cc"], external_deps = [ @@ -322,6 +321,11 @@ envoy_cc_test_binary( ], ) +envoy_benchmark_test( + name = "lc_trie_speed_test_benchmark_test", + benchmark_binary = "lc_trie_speed_test", +) + envoy_cc_test( name = "io_socket_handle_impl_test", srcs = ["io_socket_handle_impl_test.cc"], diff --git a/test/common/network/lc_trie_speed_test.cc b/test/common/network/lc_trie_speed_test.cc index 632754af8cde..24d52fe0fcaa 100644 --- a/test/common/network/lc_trie_speed_test.cc +++ b/test/common/network/lc_trie_speed_test.cc @@ -5,136 +5,145 @@ namespace { -std::vector addresses; - -std::vector>> tag_data; - -std::vector>> - tag_data_nested_prefixes; - -std::vector>> - tag_data_minimal; - -std::unique_ptr> lc_trie; +struct AddressInputs { + AddressInputs() { + // Random test addresses from RFC 5737 netblocks + static const std::string test_addresses[] = { + "192.0.2.225", "198.51.100.55", "198.51.100.105", "192.0.2.150", "203.0.113.162", + "203.0.113.110", "203.0.113.99", "198.51.100.23", "198.51.100.24", "203.0.113.12"}; + for (const auto& address : test_addresses) { + addresses_.push_back(Envoy::Network::Utility::parseInternetAddress(address)); + } + } -std::unique_ptr> lc_trie_nested_prefixes; + std::vector addresses_; +}; + +struct CidrInputs { + CidrInputs() { + // Construct three sets of prefixes: one consisting of 1,024 addresses in an + // RFC 5737 netblock, another consisting of those same addresses plus + // 0.0.0.0/0 (to exercise the LC Trie's support for nested prefixes), + // and finally a set containing only 0.0.0.0/0. + for (int i = 0; i < 32; i++) { + for (int j = 0; j < 32; j++) { + tag_data_.emplace_back( + std::pair>( + {"tag_1", + {Envoy::Network::Address::CidrRange::create( + fmt::format("192.0.{}.{}/32", i, j))}})); + } + } + tag_data_nested_prefixes_ = tag_data_; + tag_data_nested_prefixes_.emplace_back( + std::pair>( + {"tag_0", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); + tag_data_minimal_.emplace_back( + std::pair>( + {"tag_1", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); + } -std::unique_ptr> lc_trie_minimal; + std::vector>> tag_data_; + std::vector>> + tag_data_nested_prefixes_; + std::vector>> + tag_data_minimal_; +}; } // namespace namespace Envoy { -static void BM_LcTrieConstruct(benchmark::State& state) { +static void lcTrieConstruct(benchmark::State& state) { + CidrInputs inputs; + std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data); + trie = std::make_unique>(inputs.tag_data_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstruct); +BENCHMARK(lcTrieConstruct); + +static void lcTrieConstructNested(benchmark::State& state) { + CidrInputs inputs; -static void BM_LcTrieConstructNested(benchmark::State& state) { std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data_nested_prefixes); + trie = std::make_unique>( + inputs.tag_data_nested_prefixes_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstructNested); +BENCHMARK(lcTrieConstructNested); -static void BM_LcTrieConstructMinimal(benchmark::State& state) { +static void lcTrieConstructMinimal(benchmark::State& state) { + CidrInputs inputs; std::unique_ptr> trie; for (auto _ : state) { - trie = std::make_unique>(tag_data_minimal); + trie = std::make_unique>(inputs.tag_data_minimal_); } benchmark::DoNotOptimize(trie); } -BENCHMARK(BM_LcTrieConstructMinimal); +BENCHMARK(lcTrieConstructMinimal); + +static void lcTrieLookup(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie = + std::make_unique>(cidr_inputs.tag_data_); -static void BM_LcTrieLookup(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookup); +BENCHMARK(lcTrieLookup); + +static void lcTrieLookupWithNestedPrefixes(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie_nested_prefixes = + std::make_unique>( + cidr_inputs.tag_data_nested_prefixes_); -static void BM_LcTrieLookupWithNestedPrefixes(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie_nested_prefixes->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie_nested_prefixes->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookupWithNestedPrefixes); +BENCHMARK(lcTrieLookupWithNestedPrefixes); + +static void lcTrieLookupMinimal(benchmark::State& state) { + CidrInputs cidr_inputs; + AddressInputs address_inputs; + std::unique_ptr> lc_trie_minimal = + std::make_unique>(cidr_inputs.tag_data_minimal_); -static void BM_LcTrieLookupMinimal(benchmark::State& state) { static size_t i = 0; size_t output_tags = 0; for (auto _ : state) { i++; - i %= addresses.size(); - output_tags += lc_trie_minimal->getData(addresses[i]).size(); + i %= address_inputs.addresses_.size(); + output_tags += lc_trie_minimal->getData(address_inputs.addresses_[i]).size(); } benchmark::DoNotOptimize(output_tags); } -BENCHMARK(BM_LcTrieLookupMinimal); +BENCHMARK(lcTrieLookupMinimal); } // namespace Envoy - -// Boilerplate main(), which discovers benchmarks in the same file and runs them. -int main(int argc, char** argv) { - - // Random test addresses from RFC 5737 netblocks - static const std::string test_addresses[] = { - "192.0.2.225", "198.51.100.55", "198.51.100.105", "192.0.2.150", "203.0.113.162", - "203.0.113.110", "203.0.113.99", "198.51.100.23", "198.51.100.24", "203.0.113.12"}; - for (const auto& address : test_addresses) { - addresses.push_back(Envoy::Network::Utility::parseInternetAddress(address)); - } - - // Construct three sets of prefixes: one consisting of 1,024 addresses in an - // RFC 5737 netblock, another consisting of those same addresses plus - // 0.0.0.0/0 (to exercise the LC Trie's support for nested prefixes), - // and finally a set containing only 0.0.0.0/0. - for (int i = 0; i < 32; i++) { - for (int j = 0; j < 32; j++) { - tag_data.emplace_back(std::pair>( - {"tag_1", - {Envoy::Network::Address::CidrRange::create(fmt::format("192.0.{}.{}/32", i, j))}})); - } - } - tag_data_nested_prefixes = tag_data; - tag_data_nested_prefixes.emplace_back( - std::pair>( - {"tag_0", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); - tag_data_minimal.emplace_back( - std::pair>( - {"tag_1", {Envoy::Network::Address::CidrRange::create("0.0.0.0/0")}})); - - lc_trie = std::make_unique>(tag_data); - lc_trie_nested_prefixes = - std::make_unique>(tag_data_nested_prefixes); - lc_trie_minimal = std::make_unique>(tag_data_minimal); - - benchmark::Initialize(&argc, argv); - if (benchmark::ReportUnrecognizedArguments(argc, argv)) { - return 1; - } - benchmark::RunSpecifiedBenchmarks(); -} From 3afa3b50eacfa39fa5b3518b05b03689dc56ef42 Mon Sep 17 00:00:00 2001 From: Clara Date: Mon, 13 Jul 2020 20:04:39 -0400 Subject: [PATCH 612/909] Ratelimit: Add default value to dynamic metadata action (#11873) Modified dynamic_metadata action to now accept an optional default value for instances where no value is queried from the dynamic metadata. Signed-off-by: Clara Andrew-Wani --- .../config/route/v3/route_components.proto | 4 ++ .../route/v4alpha/route_components.proto | 4 ++ .../config/route/v3/route_components.proto | 4 ++ .../route/v4alpha/route_components.proto | 4 ++ source/common/router/router_ratelimit.cc | 15 +++-- source/common/router/router_ratelimit.h | 1 + test/common/router/router_ratelimit_test.cc | 65 +++++++++++++++++++ 7 files changed, 92 insertions(+), 5 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 46e7ae99f54d..e4ad52e66220 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1490,6 +1490,10 @@ message RateLimit { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; } oneof action_specifier { diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 711914d9d1be..01b138c7a7a6 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1472,6 +1472,10 @@ message RateLimit { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; } oneof action_specifier { diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 1e077dee4d11..ee95088a439f 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1502,6 +1502,10 @@ message RateLimit { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; } oneof action_specifier { diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 97fd33e535c7..7292f6258fce 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1500,6 +1500,10 @@ message RateLimit { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; } oneof action_specifier { diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 9e7f9dab3630..e4840d3417ed 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -103,7 +103,8 @@ bool GenericKeyAction::populateDescriptor(const Router::RouteEntry&, DynamicMetaDataAction::DynamicMetaDataAction( const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action) - : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()) {} + : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), + default_value_(action.default_value()) {} bool DynamicMetaDataAction::populateDescriptor( const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, @@ -111,12 +112,16 @@ bool DynamicMetaDataAction::populateDescriptor( const envoy::config::core::v3::Metadata* dynamic_metadata) const { const ProtobufWkt::Value& metadata_value = Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_); - if (metadata_value.kind_case() != ProtobufWkt::Value::kStringValue) { - return false; + + if (!metadata_value.string_value().empty()) { + descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); + return true; + } else if (metadata_value.string_value().empty() && !default_value_.empty()) { + descriptor.entries_.push_back({descriptor_key_, default_value_}); + return true; } - descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); - return !metadata_value.string_value().empty(); + return false; } HeaderValueMatchAction::HeaderValueMatchAction( diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 5343c2aab43c..02af468b9f98 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -125,6 +125,7 @@ class DynamicMetaDataAction : public RateLimitAction { private: const Envoy::Config::MetadataKey metadata_key_; const std::string descriptor_key_; + const std::string default_value_; }; /** diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index ca2ab96779ee..ee3d16403792 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -453,6 +453,7 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { actions: - dynamic_metadata: descriptor_key: fake_key + default_value: fake_value metadata_key: key: 'envoy.xxx' path: @@ -479,6 +480,39 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { testing::ContainerEq(descriptors_)); } +// Tests that the default_value is used in the descriptor when the metadata_key is empty. +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "fake_value"}}}}), + testing::ContainerEq(descriptors_)); +} + TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { const std::string yaml = R"EOF( actions: @@ -538,6 +572,37 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { EXPECT_TRUE(descriptors_.empty()); } +// Tests that no descriptor is generated when both the metadata_key and default_value are empty. +TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { + const std::string yaml = R"EOF( +actions: +- dynamic_metadata: + descriptor_key: fake_key + default_value: "" + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + another_key: + prop: "" + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_TRUE(descriptors_.empty()); +} TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) { const std::string yaml = R"EOF( From c9a31345807b912b4dda24d1778c5d4ba2f8f6a2 Mon Sep 17 00:00:00 2001 From: Shriram Rajagopalan Date: Mon, 13 Jul 2020 20:05:32 -0400 Subject: [PATCH 613/909] Update validation in Inline DNS table (#11897) Signed-off-by: Shriram Rajagopalan --- api/envoy/data/dns/v3/dns_table.proto | 4 ++-- api/envoy/data/dns/v4alpha/dns_table.proto | 4 ++-- generated_api_shadow/envoy/data/dns/v3/dns_table.proto | 4 ++-- generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/envoy/data/dns/v3/dns_table.proto b/api/envoy/data/dns/v3/dns_table.proto index 5615c96e2891..fd68847b892f 100644 --- a/api/envoy/data/dns/v3/dns_table.proto +++ b/api/envoy/data/dns/v3/dns_table.proto @@ -59,13 +59,13 @@ message DnsTable { "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto index f7050bedc1c1..22fe377281dd 100644 --- a/api/envoy/data/dns/v4alpha/dns_table.proto +++ b/api/envoy/data/dns/v4alpha/dns_table.proto @@ -59,13 +59,13 @@ message DnsTable { "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto index 5615c96e2891..fd68847b892f 100644 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto @@ -59,13 +59,13 @@ message DnsTable { "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto index f7050bedc1c1..22fe377281dd 100644 --- a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto +++ b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto @@ -59,13 +59,13 @@ message DnsTable { "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; + string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 60}}]; + google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server From 6f8b62e5dd4f1408f4e4b199743d33c6cdb5255d Mon Sep 17 00:00:00 2001 From: Otto van der Schaaf Date: Tue, 14 Jul 2020 02:06:46 +0200 Subject: [PATCH 614/909] Add pragma once to random_generator.h (#12057) Signed-off-by: Otto van der Schaaf --- source/common/common/random_generator.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/common/common/random_generator.h b/source/common/common/random_generator.h index 56aa638b54c8..47c5da6b1f9d 100644 --- a/source/common/common/random_generator.h +++ b/source/common/common/random_generator.h @@ -1,3 +1,5 @@ +#pragma once + #include "envoy/common/random_generator.h" namespace Envoy { From 9e4bf70c18f1827fbdc2de691d1e5759231c282c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 13 Jul 2020 20:07:06 -0400 Subject: [PATCH 615/909] docs: updating rotation calendar links (#12059) Signed-off-by: Alyssa Wilk --- GOVERNANCE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index c58ace42a9a0..ce60a6e82ca7 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -65,8 +65,8 @@ questions and do all reviews, but it is their responsibility to make sure that everything is being actively covered by someone. * The on-call rotation is tracked at Opsgenie. The calendar is visible -[here](https://calendar.google.com/calendar/embed?src=ms6efr2erlvum9aolnvg1688cd3mu85e%40import.calendar.google.com&ctz=America%2FNew_York) -or you can subscribe to the iCal feed [here](https://app.opsgenie.com/webcal/getRecentSchedule?webcalToken=75f2990470ca21de1033ecf4586bea1e40bae32bf3c39e2289f6186da1904ee0&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) +[here](https://calendar.google.com/calendar/embed?src=d6glc0l5rc3v235q9l2j29dgovh3dn48%40import.calendar.google.com&ctz=America%2FNew_York) +or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.com/webapi/webcal/getRecentSchedule?webcalToken=39dd1a892faa8d0d689f889b9d09ae787355ddff894396546726a5a02bac5b26&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) ## Cutting a release From ba0a600eb20121eaf1dcebbafcc1a8afbcb0b8fc Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 13 Jul 2020 20:07:53 -0400 Subject: [PATCH 616/909] coverage: update numbers (#12054) Signed-off-by: Alyssa Wilk --- .../host/omit_canary_hosts/config_test.cc | 11 ++++ .../host/omit_host_metadata/config_test.cc | 2 + .../retry/host/previous_hosts/config_test.cc | 11 ++++ test/per_file_coverage.sh | 56 +++++++++---------- 4 files changed, 51 insertions(+), 29 deletions(-) diff --git a/test/extensions/retry/host/omit_canary_hosts/config_test.cc b/test/extensions/retry/host/omit_canary_hosts/config_test.cc index 4794aefa32a1..20f94af2320a 100644 --- a/test/extensions/retry/host/omit_canary_hosts/config_test.cc +++ b/test/extensions/retry/host/omit_canary_hosts/config_test.cc @@ -36,6 +36,17 @@ TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2)); } +TEST(OmitCanaryHostsRetryPredicateTest, EmptyConfig) { + auto factory = Registry::FactoryRegistry::getFactory( + RetryHostPredicateValues::get().OmitCanaryHostsPredicate); + + ASSERT_NE(nullptr, factory); + + ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto(); + EXPECT_TRUE(dynamic_cast( + config.get())); +} + } // namespace } // namespace Host } // namespace Retry diff --git a/test/extensions/retry/host/omit_host_metadata/config_test.cc b/test/extensions/retry/host/omit_host_metadata/config_test.cc index 69a2b47e8e3d..f69394bac9d7 100644 --- a/test/extensions/retry/host/omit_host_metadata/config_test.cc +++ b/test/extensions/retry/host/omit_host_metadata/config_test.cc @@ -82,6 +82,8 @@ TEST(OmitHostsRetryPredicateTest, PredicateTest) { )EOF")))); ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host)); + + predicate->onHostAttempted(host); } } // namespace } // namespace Host diff --git a/test/extensions/retry/host/previous_hosts/config_test.cc b/test/extensions/retry/host/previous_hosts/config_test.cc index 5e83f445f63a..84ccaad32a69 100644 --- a/test/extensions/retry/host/previous_hosts/config_test.cc +++ b/test/extensions/retry/host/previous_hosts/config_test.cc @@ -48,6 +48,17 @@ TEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) { ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2)); } +TEST(PreviousHostsRetryPredicateConfigTest, EmptyConfig) { + auto factory = Registry::FactoryRegistry::getFactory( + RetryHostPredicateValues::get().PreviousHostsPredicate); + + ASSERT_NE(nullptr, factory); + + ProtobufTypes::MessagePtr config = factory->createEmptyConfigProto(); + EXPECT_TRUE(dynamic_cast( + config.get())); +} + } // namespace } // namespace Host } // namespace Retry diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 521d5c6760ff..e06bf489cb07 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,37 +3,37 @@ # directory:coverage_percent # for existing extensions with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/extensions/common:94.0" +"source/extensions/common:94.4" "source/extensions/common/crypto:91.5" "source/extensions/common/wasm:85.4" -"source/extensions/common/wasm/null:77.8" "source/extensions/common/wasm/v8:85.4" -"source/extensions/filters/common:94.6" -"source/extensions/filters/common/expr:92.2" -"source/extensions/filters/common/fault:95.8" -"source/extensions/filters/common/lua:95.9" -"source/extensions/filters/common/rbac:87.2" -"source/extensions/filters/http/aws_lambda:96.4" -"source/extensions/filters/http/aws_request_signing:93.3" +"source/extensions/common/wasm/null:77.8" +"source/extensions/filters/network/sni_cluster:90.3" +"source/extensions/filters/network/thrift_proxy/router:96.0" +"source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" +"source/extensions/filters/network/dubbo_proxy:96.1" +"source/extensions/filters/network/dubbo_proxy/router:95.1" +"source/extensions/filters/network/direct_response:89.3" +"source/extensions/filters/network/mongo_proxy:94.0" +"source/extensions/filters/network/common:96.1" +"source/extensions/filters/network/common/redis:96.2" +"source/extensions/filters/http/dynamic_forward_proxy:92.1" "source/extensions/filters/http/cache:80.7" "source/extensions/filters/http/cache/simple_http_cache:84.5" -"source/extensions/filters/http/dynamic_forward_proxy:91.5" -"source/extensions/filters/http/grpc_json_transcoder:93.3" "source/extensions/filters/http/ip_tagging:91.2" -"source/extensions/filters/listener:95.6" -"source/extensions/filters/listener/http_inspector:93.3" +"source/extensions/filters/http/grpc_json_transcoder:93.3" +"source/extensions/filters/http/aws_lambda:96.4" +"source/extensions/filters/http/aws_request_signing:93.3" +"source/extensions/filters/listener:96.0" "source/extensions/filters/listener/tls_inspector:92.4" -"source/extensions/filters/network/common:96.0" -"source/extensions/filters/network/common/redis:96.2" -"source/extensions/filters/network/direct_response:89.3" -"source/extensions/filters/network/dubbo_proxy:96.1" -"source/extensions/filters/network/dubbo_proxy/router:95.1" -"source/extensions/filters/network/mongo_proxy:94.0" -"source/extensions/filters/network/sni_cluster:90.3" -"source/extensions/filters/network/sni_dynamic_forward_proxy:89.4" -"source/extensions/filters/network/thrift_proxy/router:96.0" -"source/extensions/filters/udp:91.0" -"source/extensions/filters/udp/dns_filter:88.5" +"source/extensions/filters/listener/http_inspector:93.3" +"source/extensions/filters/udp:91.1" +"source/extensions/filters/udp/dns_filter:89.2" +"source/extensions/filters/common:94.7" +"source/extensions/filters/common/expr:92.2" +"source/extensions/filters/common/rbac:87.2" +"source/extensions/filters/common/fault:95.8" +"source/extensions/filters/common/lua:95.9" "source/extensions/grpc_credentials:92.0" "source/extensions/grpc_credentials/aws_iam:86.8" "source/extensions/health_checkers:95.9" @@ -42,13 +42,11 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/quic_listeners/quiche:84.8" "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" -"source/extensions/retry/host/omit_canary_hosts:64.3" -"source/extensions/retry/host/previous_hosts:82.4" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.3" -"source/extensions/tracers/opencensus:90.1" +"source/extensions/tracers:96.5" +"source/extensions/tracers/opencensus:92.4" "source/extensions/tracers/xray:95.3" -"source/extensions/transport_sockets:94.8" +"source/extensions/transport_sockets:94.9" "source/extensions/transport_sockets/tap:95.6" "source/extensions/transport_sockets/tls:94.2" "source/extensions/transport_sockets/tls/private_key:76.9" From d7140cbfad9418c8bbbdbc2a321bca2492856ce0 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Mon, 13 Jul 2020 17:09:18 -0700 Subject: [PATCH 617/909] grpc json transcoder: Support `response_body` (#10892) Signed-off-by: Ruslan Nigmatullin --- docs/root/version_history/current.rst | 1 + .../grpc_json_transcoder/http_body_utils.cc | 53 ++++++++- .../grpc_json_transcoder/http_body_utils.h | 3 + .../json_transcoder_filter.cc | 68 +++++++---- .../json_transcoder_filter.h | 5 + .../http_body_utils_test.cc | 112 +++++++++++++++--- .../json_transcoder_filter_test.cc | 61 ++++++++++ test/proto/bookstore.proto | 41 +++++++ 8 files changed, 307 insertions(+), 37 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 3d664813d44a..91b93b797ffa 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -29,6 +29,7 @@ Removed Config or Runtime New Features ------------ +* grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc index e516a7f2d567..5e55b57482c8 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc @@ -2,23 +2,70 @@ #include "google/api/httpbody.pb.h" +using Envoy::Protobuf::io::CodedInputStream; using Envoy::Protobuf::io::CodedOutputStream; using Envoy::Protobuf::io::StringOutputStream; +using Envoy::Protobuf::io::ZeroCopyInputStream; namespace Envoy { namespace Extensions { namespace HttpFilters { namespace GrpcJsonTranscoder { +namespace { + +// Embedded messages are treated the same way as strings (wire type 2). +constexpr uint32_t ProtobufLengthDelimitedField = 2; + +bool parseMessageByFieldPath(CodedInputStream* input, + absl::Span field_path, + Protobuf::Message* message) { + if (field_path.empty()) { + return message->MergeFromCodedStream(input); + } + + const uint32_t expected_tag = (field_path.front()->number() << 3) | ProtobufLengthDelimitedField; + for (;;) { + const uint32_t tag = input->ReadTag(); + if (tag == expected_tag) { + uint32_t length = 0; + if (!input->ReadVarint32(&length)) { + return false; + } + auto limit = input->IncrementRecursionDepthAndPushLimit(length); + if (!parseMessageByFieldPath(input, field_path.subspan(1), message)) { + return false; + } + if (!input->DecrementRecursionDepthAndPopLimit(limit.first)) { + return false; + } + } else if (tag == 0) { + return true; + } else { + if (!Protobuf::internal::WireFormatLite::SkipField(input, tag)) { + return false; + } + } + } +} +} // namespace + +bool HttpBodyUtils::parseMessageByFieldPath(ZeroCopyInputStream* stream, + const std::vector& field_path, + Protobuf::Message* message) { + CodedInputStream input(stream); + input.SetRecursionLimit(field_path.size()); + + return GrpcJsonTranscoder::parseMessageByFieldPath(&input, absl::MakeConstSpan(field_path), + message); +} + void HttpBodyUtils::appendHttpBodyEnvelope( Buffer::Instance& output, const std::vector& request_body_field_path, std::string content_type, uint64_t content_length) { // Manually encode the protobuf envelope for the body. // See https://developers.google.com/protocol-buffers/docs/encoding#embedded for wire format. - // Embedded messages are treated the same way as strings (wire type 2). - constexpr uint32_t ProtobufLengthDelimitedField = 2; - std::string proto_envelope; { // For memory safety, the StringOutputStream needs to be destroyed before diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h index dc2af9c3859b..629af665a069 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h @@ -13,6 +13,9 @@ namespace GrpcJsonTranscoder { class HttpBodyUtils { public: + static bool parseMessageByFieldPath(Protobuf::io::ZeroCopyInputStream* stream, + const std::vector& field_path, + Protobuf::Message* message); static void appendHttpBodyEnvelope(Buffer::Instance& output, const std::vector& request_body_field_path, diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 3f04c64f1b35..15b65e026d6d 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -213,37 +213,59 @@ void JsonTranscoderConfig::addBuiltinSymbolDescriptor(const std::string& symbol_ addFileDescriptor(file_proto); } +Status JsonTranscoderConfig::resolveField(const Protobuf::Descriptor* descriptor, + const std::string& field_path_str, + std::vector* field_path, + bool* is_http_body) { + const Protobuf::Type* message_type = + type_helper_->Info()->GetTypeByTypeUrl(Grpc::Common::typeUrl(descriptor->full_name())); + if (message_type == nullptr) { + return ProtobufUtil::Status(Code::NOT_FOUND, + "Could not resolve type: " + descriptor->full_name()); + } + + Status status = type_helper_->ResolveFieldPath( + *message_type, field_path_str == "*" ? "" : field_path_str, field_path); + if (!status.ok()) { + return status; + } + + if (field_path->empty()) { + *is_http_body = descriptor->full_name() == google::api::HttpBody::descriptor()->full_name(); + } else { + const Protobuf::Type* body_type = + type_helper_->Info()->GetTypeByTypeUrl(field_path->back()->type_url()); + *is_http_body = body_type != nullptr && + body_type->name() == google::api::HttpBody::descriptor()->full_name(); + } + return Status::OK; +} + Status JsonTranscoderConfig::createMethodInfo(const Protobuf::MethodDescriptor* descriptor, const HttpRule& http_rule, MethodInfoSharedPtr& method_info) { method_info = std::make_shared(); method_info->descriptor_ = descriptor; - method_info->response_type_is_http_body_ = - descriptor->output_type()->full_name() == google::api::HttpBody::descriptor()->full_name(); - const Protobuf::Type* request_type = type_helper_->Info()->GetTypeByTypeUrl( - Grpc::Common::typeUrl(descriptor->input_type()->full_name())); - if (request_type == nullptr) { - return ProtobufUtil::Status(Code::NOT_FOUND, - "Could not resolve type: " + descriptor->input_type()->full_name()); + Status status = + resolveField(descriptor->input_type(), http_rule.body(), + &method_info->request_body_field_path, &method_info->request_type_is_http_body_); + if (!status.ok()) { + return status; } - Status status = - type_helper_->ResolveFieldPath(*request_type, http_rule.body() == "*" ? "" : http_rule.body(), - &method_info->request_body_field_path); + status = resolveField(descriptor->output_type(), http_rule.response_body(), + &method_info->response_body_field_path, + &method_info->response_type_is_http_body_); if (!status.ok()) { return status; } - if (method_info->request_body_field_path.empty()) { - method_info->request_type_is_http_body_ = - descriptor->input_type()->full_name() == google::api::HttpBody::descriptor()->full_name(); - } else { - const Protobuf::Type* body_type = type_helper_->Info()->GetTypeByTypeUrl( - method_info->request_body_field_path.back()->type_url()); - method_info->request_type_is_http_body_ = - body_type != nullptr && - body_type->name() == google::api::HttpBody::descriptor()->full_name(); + if (!method_info->response_body_field_path.empty() && !method_info->response_type_is_http_body_) { + // TODO(euroelessar): Implement https://github.com/envoyproxy/envoy/issues/11136. + return Status(Code::UNIMPLEMENTED, + "Setting \"response_body\" is not supported yet for non-HttpBody fields: " + + descriptor->full_name()); } return Status::OK; @@ -681,8 +703,14 @@ bool JsonTranscoderFilter::buildResponseFromHttpBodyOutput( google::api::HttpBody http_body; for (auto& frame : frames) { if (frame.length_ > 0) { + http_body.Clear(); Buffer::ZeroCopyInputStreamImpl stream(std::move(frame.data_)); - http_body.ParseFromZeroCopyStream(&stream); + if (!HttpBodyUtils::parseMessageByFieldPath(&stream, method_->response_body_field_path, + &http_body)) { + // TODO(euroelessar): Return error to client. + encoder_callbacks_->resetStream(); + return true; + } const auto& body = http_body.data(); data.add(body); diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h index 31180556ac54..a0fabc85bfdd 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.h @@ -44,6 +44,7 @@ struct VariableBinding { struct MethodInfo { const Protobuf::MethodDescriptor* descriptor_ = nullptr; std::vector request_body_field_path; + std::vector response_body_field_path; bool request_type_is_http_body_ = false; bool response_type_is_http_body_ = false; }; @@ -112,6 +113,10 @@ class JsonTranscoderConfig : public Logger::Loggable { private: void addFileDescriptor(const Protobuf::FileDescriptorProto& file); void addBuiltinSymbolDescriptor(const std::string& symbol_name); + ProtobufUtil::Status resolveField(const Protobuf::Descriptor* descriptor, + const std::string& field_path_str, + std::vector* field_path, + bool* is_http_body); ProtobufUtil::Status createMethodInfo(const Protobuf::MethodDescriptor* descriptor, const google::api::HttpRule& http_rule, MethodInfoSharedPtr& method_info); diff --git a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc index 1446c4d8874d..e1a3ed763c61 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/http_body_utils_test.cc @@ -18,10 +18,7 @@ class HttpBodyUtilsTest : public testing::Test { public: HttpBodyUtilsTest() = default; - template - void basicTest(const std::string& content, const std::string& content_type, - const std::vector& body_field_path, - std::function get_http_body) { + void setBodyFieldPath(const std::vector& body_field_path) { for (int field_number : body_field_path) { Protobuf::Field field; field.set_number(field_number); @@ -30,20 +27,53 @@ class HttpBodyUtilsTest : public testing::Test { for (auto& field : raw_body_field_path_) { body_field_path_.push_back(&field); } + } - Buffer::InstancePtr message_buffer = std::make_unique(); - HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, - content.length()); - message_buffer->add(content); + template + void basicTest(const std::string& content, const std::string& content_type, + const std::vector& body_field_path, + std::function get_http_body) { + setBodyFieldPath(body_field_path); - Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + // Parse using concrete message type. + { + Buffer::InstancePtr message_buffer = std::make_unique(); + HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, + content.length()); + message_buffer->add(content); - Message message; - message.ParseFromZeroCopyStream(&stream); + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + + Message message; + message.ParseFromZeroCopyStream(&stream); + + google::api::HttpBody http_body = get_http_body(std::move(message)); + EXPECT_EQ(http_body.content_type(), content_type); + EXPECT_EQ(http_body.data(), content); + } - google::api::HttpBody http_body = get_http_body(std::move(message)); - EXPECT_EQ(http_body.content_type(), content_type); - EXPECT_EQ(http_body.data(), content); + // Parse message dynamically by field path. + { + Buffer::InstancePtr message_buffer = std::make_unique(); + HttpBodyUtils::appendHttpBodyEnvelope(*message_buffer, body_field_path_, content_type, + content.length()); + message_buffer->add(content); + + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); + EXPECT_EQ(http_body.content_type(), content_type); + EXPECT_EQ(http_body.data(), content); + } + } + + void testInvalidMessage(const std::string& content, const std::vector& body_field_path) { + setBodyFieldPath(body_field_path); + Buffer::InstancePtr message_buffer = std::make_unique(); + message_buffer->add(content); + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_FALSE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); } std::vector raw_body_field_path_; @@ -77,6 +107,60 @@ TEST_F(HttpBodyUtilsTest, NestedFieldsList) { [](bookstore::DeepNestedBody message) { return message.nested().nested().nested().body(); }); } +TEST_F(HttpBodyUtilsTest, SkipUnknownFields) { + bookstore::DeepNestedBody message; + auto* body = message.mutable_nested()->mutable_nested()->mutable_nested()->mutable_body(); + body->set_content_type("text/nested"); + body->set_data("abcd"); + message.mutable_extra()->set_field("test"); + message.mutable_nested()->mutable_extra()->set_field(123); + + Buffer::InstancePtr message_buffer = std::make_unique(); + std::string serialized_message; + EXPECT_TRUE(message.SerializeToString(&serialized_message)); + message_buffer->add(serialized_message); + setBodyFieldPath({1, 1000000, 100000000, 500000000}); + + google::api::HttpBody http_body; + Buffer::ZeroCopyInputStreamImpl stream(std::move(message_buffer)); + EXPECT_TRUE(HttpBodyUtils::parseMessageByFieldPath(&stream, body_field_path_, &http_body)); + EXPECT_EQ(http_body.content_type(), "text/nested"); + EXPECT_EQ(http_body.data(), "abcd"); +} + +TEST_F(HttpBodyUtilsTest, FailInvalidLength) { + std::string message; + // First field tag. + message += static_cast((1 << 3) | 2); + // Invalid length. + message += '\x02'; + // Second field tag. + message += static_cast((2 << 3) | 2); + // Invalid length. + message += '\x80'; + testInvalidMessage(message, {1, 2}); +} + +TEST_F(HttpBodyUtilsTest, FailSkipField) { + std::string message; + // Field tag. + message += static_cast((2 << 3) | 2); + // Invalid length. + message += '\x80'; + testInvalidMessage(message, {1}); +} + +TEST_F(HttpBodyUtilsTest, FailShortMessage) { + std::string message; + // Field tag. + message += static_cast((1 << 3) | 2); + // Length less then remaining message size. + message += '\x02'; + // Invalid tag. + message += '\x00'; + testInvalidMessage(message, {1, 2}); +} + } // namespace } // namespace GrpcJsonTranscoder } // namespace HttpFilters diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index bb72aee46454..19f5ef8f9b39 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -165,6 +165,33 @@ TEST_F(GrpcJsonTranscoderConfigTest, NonProto) { EnvoyException, "transcoding_filter: Unable to parse proto descriptor"); } +TEST_F(GrpcJsonTranscoderConfigTest, JsonResponseBody) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithResponseBody"), + *api_), + EnvoyException, "Setting \"response_body\" is not supported yet for non-HttpBody fields"); +} + +TEST_F(GrpcJsonTranscoderConfigTest, InvalidRequestBodyPath) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithInvalidRequestBodyPath"), + *api_), + EnvoyException, "Could not find field"); +} + +TEST_F(GrpcJsonTranscoderConfigTest, InvalidResponseBodyPath) { + EXPECT_THROW_WITH_REGEX( + JsonTranscoderConfig config( + getProtoConfig(TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"), + "bookstore.ServiceWithInvalidResponseBodyPath"), + *api_), + EnvoyException, "Could not find field"); +} + TEST_F(GrpcJsonTranscoderConfigTest, NonBinaryProto) { envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder proto_config; proto_config.set_proto_descriptor_bin("This is invalid proto"); @@ -705,6 +732,40 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutput) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers)); } +TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithInvalidHttpBodyAsOutput) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/echoResponseBodyPath"}}; + + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ("application/grpc", request_headers.get_("content-type")); + EXPECT_EQ("/echoResponseBodyPath", request_headers.get_("x-envoy-original-path")); + EXPECT_EQ("GET", request_headers.get_("x-envoy-original-method")); + EXPECT_EQ("/bookstore.Bookstore/EchoResponseBodyPath", request_headers.get_(":path")); + EXPECT_EQ("trailers", request_headers.get_("te")); + + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "application/grpc"}, + {":status", "200"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.encodeHeaders(response_headers, false)); + EXPECT_EQ("application/json", response_headers.get_("content-type")); + + google::api::HttpBody response; + response.set_content_type("text/html"); + response.set_data("

Hello, world!

"); + + Buffer::OwnedImpl response_data; + // Some invalid message. + response_data.add("\x10\x80"); + Grpc::Common::prependGrpcFrameHeader(response_data); + + EXPECT_CALL(encoder_callbacks_, resetStream()); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, + filter_.encodeData(response_data, false)); +} + TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSplitTwoEncodeData) { Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, {":path", "/index"}}; diff --git a/test/proto/bookstore.proto b/test/proto/bookstore.proto index c4ecf02e7502..62e697e219ee 100644 --- a/test/proto/bookstore.proto +++ b/test/proto/bookstore.proto @@ -114,6 +114,12 @@ service Bookstore { body: "nested.content" }; } + rpc EchoResponseBodyPath(google.protobuf.Empty) returns (EchoBodyRequest) { + option (google.api.http) = { + get: "/echoResponseBodyPath" + response_body: "nested.content" + }; + } rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { option (google.api.http) = { post: "/echoStruct" @@ -130,6 +136,33 @@ service Bookstore { } } +service ServiceWithResponseBody { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + response_body: "content" + }; + } +} + +service ServiceWithInvalidRequestBodyPath { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + body: "unknown.field" + }; + } +} + +service ServiceWithInvalidResponseBodyPath { + rpc EchoStruct(EchoStructReqResp) returns (EchoStructReqResp) { + option (google.api.http) = { + get: "/echoStruct" + response_body: "unknown.field" + }; + } +} + // A shelf resource. message Shelf { // A unique shelf id. @@ -256,9 +289,17 @@ message DeepNestedBody { } Nested nested = 100000000; } + message Extra { + int32 field = 1; + } Nested nested = 1000000; + Extra extra = 50; + } + message Extra { + string field = 1; } Nested nested = 1; + Extra extra = 2; } // gRPC server is using BigBook, but envoy transcoder filter is using From 0c6523c0925d260de41d89002044d0d6271550d0 Mon Sep 17 00:00:00 2001 From: Alex Konradi Date: Mon, 13 Jul 2020 20:15:10 -0400 Subject: [PATCH 618/909] server: make overload manager non-optional (#11777) The only user of a null OverloadManager was the admin console. Switch that to using a no-op OverloadManager subclass so that the OM can be provided via reference in all cases. This makes the HCM initialization simpler since it doesn't need to worry about the null pointer. Signed-off-by: Alex Konradi --- include/envoy/server/overload_manager.h | 32 ++--------------- source/common/http/conn_manager_impl.cc | 14 +++----- source/common/http/conn_manager_impl.h | 2 +- .../network/http_connection_manager/config.cc | 8 ++--- source/server/admin/admin.cc | 9 +++-- source/server/admin/admin.h | 36 +++++++++++++++++++ source/server/overload_manager_impl.cc | 25 +++++++++++-- source/server/server.cc | 35 +++++++++--------- test/common/http/BUILD | 1 + .../http/conn_manager_impl_fuzz_test.cc | 4 ++- test/common/http/conn_manager_impl_test.cc | 19 +++++----- test/mocks/server/overload_manager.cc | 6 ++++ test/mocks/server/overload_manager.h | 13 ++++++- 13 files changed, 127 insertions(+), 77 deletions(-) diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h index 010ac8ee9468..e10812add8fd 100644 --- a/include/envoy/server/overload_manager.h +++ b/include/envoy/server/overload_manager.h @@ -33,25 +33,8 @@ using OverloadActionCb = std::function; */ class ThreadLocalOverloadState : public ThreadLocal::ThreadLocalObject { public: - const OverloadActionState& getState(const std::string& action) { - auto it = actions_.find(action); - if (it == actions_.end()) { - it = actions_.insert(std::make_pair(action, OverloadActionState::Inactive)).first; - } - return it->second; - } - - void setState(const std::string& action, OverloadActionState state) { - auto it = actions_.find(action); - if (it == actions_.end()) { - actions_[action] = state; - } else { - it->second = state; - } - } - -private: - std::unordered_map actions_; + // Get a thread-local reference to the value for the given action key. + virtual const OverloadActionState& getState(const std::string& action) PURE; }; /** @@ -106,17 +89,6 @@ class OverloadManager { * an alternative to registering a callback for overload action state changes. */ virtual ThreadLocalOverloadState& getThreadLocalOverloadState() PURE; - - /** - * Convenience method to get a statically allocated reference to the inactive overload - * action state. Useful for code that needs to initialize a reference either to an - * entry in the ThreadLocalOverloadState map (if overload behavior is enabled) or to - * some other static memory location set to the inactive state (if overload behavior - * is disabled). - */ - static const OverloadActionState& getInactiveState() { - CONSTRUCT_ON_FIRST_USE(OverloadActionState, OverloadActionState::Inactive); - } }; } // namespace Server diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index fb9c36d5d730..4a76e84be282 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -104,7 +104,7 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, - Server::OverloadManager* overload_manager, + Server::OverloadManager& overload_manager, TimeSource& time_source) : config_(config), stats_(config_.stats()), conn_length_(new Stats::HistogramCompletableTimespanImpl( @@ -113,14 +113,10 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, random_generator_(random_generator), http_context_(http_context), runtime_(runtime), local_info_(local_info), cluster_manager_(cluster_manager), listener_stats_(config_.listenerStats()), - overload_stop_accepting_requests_ref_( - overload_manager ? overload_manager->getThreadLocalOverloadState().getState( - Server::OverloadActionNames::get().StopAcceptingRequests) - : Server::OverloadManager::getInactiveState()), - overload_disable_keepalive_ref_( - overload_manager ? overload_manager->getThreadLocalOverloadState().getState( - Server::OverloadActionNames::get().DisableHttpKeepAlive) - : Server::OverloadManager::getInactiveState()), + overload_stop_accepting_requests_ref_(overload_manager.getThreadLocalOverloadState().getState( + Server::OverloadActionNames::get().StopAcceptingRequests)), + overload_disable_keepalive_ref_(overload_manager.getThreadLocalOverloadState().getState( + Server::OverloadActionNames::get().DisableHttpKeepAlive)), time_source_(time_source) {} const ResponseHeaderMap& ConnectionManagerImpl::continueHeader() { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 4e8f18814b09..04d3896a3ff8 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -58,7 +58,7 @@ class ConnectionManagerImpl : Logger::Loggable, Random::RandomGenerator& random_generator, Http::Context& http_context, Runtime::Loader& runtime, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager, - Server::OverloadManager* overload_manager, TimeSource& time_system); + Server::OverloadManager& overload_manager, TimeSource& time_system); ~ConnectionManagerImpl() override; static ConnectionManagerStats generateStats(const std::string& prefix, Stats::Scope& scope); diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 33047d76d643..3f8abde5ae24 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -144,8 +144,8 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( return [singletons, filter_config, &context](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), - &context.overloadManager(), context.dispatcher().timeSource())}); + context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), + context.dispatcher().timeSource())}); }; } @@ -588,8 +588,8 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr { auto conn_manager = std::make_unique( *filter_config, context.drainDecision(), context.random(), context.httpContext(), - context.runtime(), context.localInfo(), context.clusterManager(), - &context.overloadManager(), context.dispatcher().timeSource()); + context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), + context.dispatcher().timeSource()); // This factory creates a new ConnectionManagerImpl in the absence of its usual environment as // an L4 filter, so this factory needs to take a few actions. diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 5b3d3b3c4255..227f0a92277e 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -660,6 +660,7 @@ void AdminImpl::startHttpListener(const std::string& access_log_path, access_logs_.emplace_back(new Extensions::AccessLoggers::File::FileAccessLog( access_log_path, {}, Formatter::SubstitutionFormatUtils::defaultSubstitutionFormatter(), server_.accessLogManager())); + null_overload_manager_.start(); socket_ = std::make_shared(address, socket_options, true); socket_factory_ = std::make_shared(socket_); listener_ = std::make_unique(*this, std::move(listener_scope)); @@ -679,6 +680,7 @@ AdminImpl::AdminImpl(const std::string& profile_path, Server::Instance& server) request_id_extension_(Http::RequestIDExtensionFactory::defaultInstance(server_.random())), profile_path_(profile_path), stats_(Http::ConnectionManagerImpl::generateStats("http.admin.", server_.stats())), + null_overload_manager_(server_.threadLocal()), tracing_stats_( Http::ConnectionManagerImpl::generateTracingStats("http.admin.", no_op_store_)), route_config_provider_(server.timeSource()), @@ -760,11 +762,12 @@ Http::ServerConnectionPtr AdminImpl::createCodec(Network::Connection& connection bool AdminImpl::createNetworkFilterChain(Network::Connection& connection, const std::vector&) { - // Don't pass in the overload manager so that the admin interface is accessible even when - // the envoy is overloaded. + // Pass in the null overload manager so that the admin interface is accessible even when Envoy is + // overloaded. connection.addReadFilter(Network::ReadFilterSharedPtr{new Http::ConnectionManagerImpl( *this, server_.drainManager(), server_.random(), server_.httpContext(), server_.runtime(), - server_.localInfo(), server_.clusterManager(), nullptr, server_.timeSource())}); + server_.localInfo(), server_.clusterManager(), null_overload_manager_, + server_.timeSource())}); return true; } diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 9c035d123e80..278aa30c342b 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -20,9 +20,11 @@ #include "envoy/server/admin.h" #include "envoy/server/instance.h" #include "envoy/server/listener_manager.h" +#include "envoy/server/overload_manager.h" #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/resource_manager.h" +#include "common/common/assert.h" #include "common/common/basic_resource_impl.h" #include "common/common/empty_string.h" #include "common/common/logger.h" @@ -245,6 +247,39 @@ class AdminImpl : public Admin, TimeSource& time_source_; }; + /** + * Implementation of OverloadManager that is never overloaded. Using this instead of the real + * OverloadManager keeps the admin interface accessible even when the proxy is overloaded. + */ + struct NullOverloadManager : public OverloadManager { + struct NullThreadLocalOverloadState : public ThreadLocalOverloadState { + const OverloadActionState& getState(const std::string&) override { return inactive_; } + + const OverloadActionState inactive_ = OverloadActionState::Inactive; + }; + + NullOverloadManager(ThreadLocal::SlotAllocator& slot_allocator) + : tls_(slot_allocator.allocateSlot()) {} + + void start() override { + tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(); + }); + } + + ThreadLocalOverloadState& getThreadLocalOverloadState() override { + return tls_->getTyped(); + } + + bool registerForAction(const std::string&, Event::Dispatcher&, OverloadActionCb) override { + // This method shouldn't be called by the admin listener + NOT_REACHED_GCOVR_EXCL_LINE; + return false; + } + + ThreadLocal::SlotPtr tls_; + }; + /** * Helper methods for the /clusters url handler. */ @@ -389,6 +424,7 @@ class AdminImpl : public Admin, std::list access_logs_; const std::string profile_path_; Http::ConnectionManagerStats stats_; + NullOverloadManager null_overload_manager_; // Note: this is here to essentially blackhole the tracing stats since they aren't used in the // Admin case. Stats::IsolatedStoreImpl no_op_store_; diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index ef56fc8d7fae..40156ed9b179 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -35,6 +35,25 @@ class ThresholdTriggerImpl : public OverloadAction::Trigger { absl::optional value_; }; +/** + * Thread-local copy of the state of each configured overload action. + */ +class ThreadLocalOverloadStateImpl : public ThreadLocalOverloadState { +public: + const OverloadActionState& getState(const std::string& action) override { + auto it = actions_.find(action); + if (it == actions_.end()) { + it = actions_.insert(std::make_pair(action, OverloadActionState::Inactive)).first; + } + return it->second; + } + + void setState(const std::string& action, OverloadActionState state) { actions_[action] = state; } + +private: + std::unordered_map actions_; +}; + Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), scope.symbolTable()); @@ -148,7 +167,7 @@ void OverloadManagerImpl::start() { started_ = true; tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { - return std::make_shared(); + return std::make_shared(); }); if (resources_.empty()) { @@ -191,7 +210,7 @@ bool OverloadManagerImpl::registerForAction(const std::string& action, } ThreadLocalOverloadState& OverloadManagerImpl::getThreadLocalOverloadState() { - return tls_->getTyped(); + return tls_->getTyped(); } void OverloadManagerImpl::updateResourcePressure(const std::string& resource, double pressure) { @@ -208,7 +227,7 @@ void OverloadManagerImpl::updateResourcePressure(const std::string& resource, do ENVOY_LOG(info, "Overload action {} became {}", action, is_active ? "active" : "inactive"); tls_->runOnAllThreads([this, action, state] { - tls_->getTyped().setState(action, state); + tls_->getTyped().setState(action, state); }); auto callback_range = action_to_callbacks_.equal_range(action); std::for_each(callback_range.first, callback_range.second, diff --git a/source/server/server.cc b/source/server/server.cc index e0f7157c54c6..80c48c279c06 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -382,23 +382,6 @@ void InstanceImpl::initialize(const Options& options, // Learn original_start_time_ if our parent is still around to inform us of it. restarter_.sendParentAdminShutdownRequest(original_start_time_); admin_ = std::make_unique(initial_config.admin().profilePath(), *this); - if (initial_config.admin().address()) { - if (initial_config.admin().accessLogPath().empty()) { - throw EnvoyException("An admin access log path is required for a listening server."); - } - ENVOY_LOG(info, "admin address: {}", initial_config.admin().address()->asString()); - admin_->startHttpListener(initial_config.admin().accessLogPath(), options.adminAddressPath(), - initial_config.admin().address(), - initial_config.admin().socketOptions(), - stats_store_.createScope("listener.admin.")); - } else { - ENVOY_LOG(warn, "No admin address given, so no admin HTTP server started."); - } - config_tracker_entry_ = - admin_->getConfigTracker().add("bootstrap", [this] { return dumpBootstrapConfig(); }); - if (initial_config.admin().address()) { - admin_->addListenerToHandler(handler_.get()); - } loadServerFlags(initial_config.flagsPath()); @@ -428,6 +411,24 @@ void InstanceImpl::initialize(const Options& options, dispatcher_->initializeStats(stats_store_, "server."); } + if (initial_config.admin().address()) { + if (initial_config.admin().accessLogPath().empty()) { + throw EnvoyException("An admin access log path is required for a listening server."); + } + ENVOY_LOG(info, "admin address: {}", initial_config.admin().address()->asString()); + admin_->startHttpListener(initial_config.admin().accessLogPath(), options.adminAddressPath(), + initial_config.admin().address(), + initial_config.admin().socketOptions(), + stats_store_.createScope("listener.admin.")); + } else { + ENVOY_LOG(warn, "No admin address given, so no admin HTTP server started."); + } + config_tracker_entry_ = + admin_->getConfigTracker().add("bootstrap", [this] { return dumpBootstrapConfig(); }); + if (initial_config.admin().address()) { + admin_->addListenerToHandler(handler_.get()); + } + // The broad order of initialization from this point on is the following: // 1. Statically provisioned configuration (bootstrap) are loaded. // 2. Cluster manager is created and all primary clusters (i.e. with endpoint assignments diff --git a/test/common/http/BUILD b/test/common/http/BUILD index eeee6f0d6a09..ec2987c48403 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -182,6 +182,7 @@ envoy_cc_fuzz_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index c563f72ec381..7c85051528ab 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -34,6 +34,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/mocks.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -541,6 +542,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { NiceMock local_info; NiceMock cluster_manager; NiceMock filter_callbacks; + NiceMock overload_manager; auto ssl_connection = std::make_shared(); bool connection_alive = true; @@ -554,7 +556,7 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { std::make_shared("0.0.0.0"); ConnectionManagerImpl conn_manager(config, drain_close, random, http_context, runtime, local_info, - cluster_manager, nullptr, config.time_system_); + cluster_manager, overload_manager, config.time_system_); conn_manager.initializeReadFilterCallbacks(filter_callbacks); std::vector streams; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a22aa5432296..efd7222b9123 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -133,7 +133,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan std::make_shared("0.0.0.0"); conn_manager_ = std::make_unique( *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, - &overload_manager_, test_time_.timeSystem()); + overload_manager_, test_time_.timeSystem()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); if (tracing) { @@ -5601,11 +5601,12 @@ TEST(HttpConnectionManagerTracingStatsTest, verifyTracingStats) { } TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { - setup(false, ""); + Server::OverloadActionState stop_accepting_requests = Server::OverloadActionState::Active; + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().StopAcceptingRequests)) + .WillByDefault(ReturnRef(stop_accepting_requests)); - overload_manager_.overload_state_.setState( - Server::OverloadActionNames::get().StopAcceptingRequests, - Server::OverloadActionState::Active); + setup(false, ""); EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); @@ -5631,10 +5632,12 @@ TEST_F(HttpConnectionManagerImplTest, NoNewStreamWhenOverloaded) { } TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { - setup(false, ""); + Server::OverloadActionState disable_http_keep_alive = Server::OverloadActionState::Active; + ON_CALL(overload_manager_.overload_state_, + getState(Server::OverloadActionNames::get().DisableHttpKeepAlive)) + .WillByDefault(ReturnRef(disable_http_keep_alive)); - overload_manager_.overload_state_.setState( - Server::OverloadActionNames::get().DisableHttpKeepAlive, Server::OverloadActionState::Active); + setup(false, ""); std::shared_ptr filter(new NiceMock()); EXPECT_CALL(filter_factory_, createFilterChain(_)) diff --git a/test/mocks/server/overload_manager.cc b/test/mocks/server/overload_manager.cc index d105df80e690..d0fd9b545ec6 100644 --- a/test/mocks/server/overload_manager.cc +++ b/test/mocks/server/overload_manager.cc @@ -9,6 +9,12 @@ namespace Envoy { namespace Server { using ::testing::ReturnRef; + +MockThreadLocalOverloadState::MockThreadLocalOverloadState() + : disabled_state_(OverloadActionState::Inactive) { + ON_CALL(*this, getState).WillByDefault(ReturnRef(disabled_state_)); +} + MockOverloadManager::MockOverloadManager() { ON_CALL(*this, getThreadLocalOverloadState()).WillByDefault(ReturnRef(overload_state_)); } diff --git a/test/mocks/server/overload_manager.h b/test/mocks/server/overload_manager.h index 8ce63cef1b12..86c194d5586d 100644 --- a/test/mocks/server/overload_manager.h +++ b/test/mocks/server/overload_manager.h @@ -8,6 +8,16 @@ namespace Envoy { namespace Server { + +class MockThreadLocalOverloadState : public ThreadLocalOverloadState { +public: + MockThreadLocalOverloadState(); + MOCK_METHOD(const OverloadActionState&, getState, (const std::string&), (override)); + +private: + const OverloadActionState disabled_state_; +}; + class MockOverloadManager : public OverloadManager { public: MockOverloadManager(); @@ -20,7 +30,8 @@ class MockOverloadManager : public OverloadManager { OverloadActionCb callback)); MOCK_METHOD(ThreadLocalOverloadState&, getThreadLocalOverloadState, ()); - ThreadLocalOverloadState overload_state_; + testing::NiceMock overload_state_; }; + } // namespace Server } // namespace Envoy From f481e9de7b4754611a7613d8bd9a4a8f146812ec Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 13 Jul 2020 23:53:26 -0700 Subject: [PATCH 619/909] ci: temproray disable vhds_integration_test in TSAN (#12067) - fix coverage number missed by #12054 Signed-off-by: Lizan Zhou --- .bazelrc | 2 ++ test/integration/BUILD | 6 +++++- test/per_file_coverage.sh | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.bazelrc b/.bazelrc index 71f457fb5e4a..b88d5079faee 100644 --- a/.bazelrc +++ b/.bazelrc @@ -81,6 +81,8 @@ build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread build:clang-tsan --linkopt -fuse-ld=lld +build:clang-tsan --build_tag_filters=-no_san,-no_tsan +build:clang-tsan --test_tag_filters=-no_san,-no_tsan # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE diff --git a/test/integration/BUILD b/test/integration/BUILD index 5fe7aa10a5f1..a8ed5cb5dbcc 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -217,7 +217,11 @@ envoy_cc_test( data = [ "//test/config/integration/certs", ], - tags = ["fails_on_windows"], + tags = [ + "fails_on_windows", + # https://github.com/envoyproxy/envoy/issues/9784 + "no_tsan", + ], deps = [ ":http_integration_lib", "//source/common/config:protobuf_link_hacks", diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index e06bf489cb07..8dd9531fc065 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -42,6 +42,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/quic_listeners/quiche:84.8" "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" +"source/extensions/retry/host/omit_canary_hosts:92.9" "source/extensions/stat_sinks/statsd:85.2" "source/extensions/tracers:96.5" "source/extensions/tracers/opencensus:92.4" From d27fd11d4bdf2d74df2c2575b3bbd77d5a443ede Mon Sep 17 00:00:00 2001 From: Alan Chiu Date: Tue, 14 Jul 2020 08:17:07 -0700 Subject: [PATCH 620/909] android: add SUPPORTS_PTHREAD_GETNAME_NP definition (#12011) Signed-off-by: Alan Chiu --- include/envoy/common/platform.h | 17 +++++++++++++++++ source/common/common/posix/thread_impl.cc | 4 ++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index e3df7066df85..c3398829f8ab 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -247,3 +247,20 @@ struct mmsghdr { #undef SUPPORTS_GETIFADDRS #endif // __ANDROID_API__ < 24 #endif // ifdef __ANDROID_API__ + +#ifdef __linux__ +#define SUPPORTS_PTHREAD_GETNAME_NP 1 +#endif + +// https://android.googlesource.com/platform/bionic/+/master/docs/status.md +// ``pthread_getname_np`` is introduced in API 26 +#ifdef __ANDROID_API__ +#if __ANDROID_API__ > 26 +#define SUPPORTS_PTHREAD_GETNAME_NP 1 +#endif // __ANDROID_API__ > 26 +#endif // ifdef __ANDROID_API__ + +// Ensure `SUPPORTS_PTHREAD_GETNAME_NP` is set +#ifndef SUPPORTS_PTHREAD_GETNAME_NP +#define SUPPORTS_PTHREAD_GETNAME_NP 0 +#endif diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index 359af8245ed9..c85bbfce57ec 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -52,7 +52,7 @@ class ThreadImplPosix : public Thread { this); RELEASE_ASSERT(rc == 0, ""); -#ifdef __linux__ +#if SUPPORTS_PTHREAD_GETNAME_NP // If the name was not specified, get it from the OS. If the name was // specified, write it into the thread, and assert that the OS sees it the // same way. @@ -93,7 +93,7 @@ class ThreadImplPosix : public Thread { } private: -#ifdef __linux__ +#if SUPPORTS_PTHREAD_GETNAME_NP // Attempts to get the name from the operating system, returning true and // updating 'name' if successful. Note that during normal operation this // may fail, if the thread exits prior to the system call. From 8a21fc79914db909d98b326b2c622aed3d94b6f8 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Wed, 15 Jul 2020 00:17:40 +0900 Subject: [PATCH 621/909] absl: update to enable wasm32 build (#12042) Signed-off-by: Shikugawa --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 40b6ed7d33e7..2bb2ada34ae2 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -95,10 +95,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_google_absl = dict( - sha256 = "cd477bfd0d19f803f85d118c7943b7908930310d261752730afa981118fee230", - strip_prefix = "abseil-cpp-ca9856cabc23d771bcce634677650eb6fc4363ae", - # 2020-04-30 - urls = ["https://github.com/abseil/abseil-cpp/archive/ca9856cabc23d771bcce634677650eb6fc4363ae.tar.gz"], + sha256 = "ec8ef47335310cc3382bdc0d0cc1097a001e67dc83fcba807845aa5696e7e1e4", + strip_prefix = "abseil-cpp-302b250e1d917ede77b5ff00a6fd9f28430f1563", + # 2020-07-13 + urls = ["https://github.com/abseil/abseil-cpp/archive/302b250e1d917ede77b5ff00a6fd9f28430f1563.tar.gz"], use_category = ["dataplane", "controlplane"], cpe = "N/A", ), From 6faaf40ee50f2f65ecddb276bb8d2b2b92ea3216 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 16:24:54 +0000 Subject: [PATCH 622/909] resolve dependencies (#12052) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/config_test/BUILD | 5 ++++- test/config_test/config_test.cc | 5 ++++- test/extensions/access_loggers/file/BUILD | 2 +- test/extensions/access_loggers/file/config_test.cc | 2 +- test/extensions/access_loggers/grpc/BUILD | 2 +- test/extensions/access_loggers/grpc/http_config_test.cc | 2 +- test/extensions/clusters/aggregate/BUILD | 5 +++-- test/extensions/clusters/aggregate/cluster_test.cc | 3 ++- test/extensions/clusters/aggregate/cluster_update_test.cc | 2 +- test/extensions/clusters/dynamic_forward_proxy/BUILD | 3 ++- .../clusters/dynamic_forward_proxy/cluster_test.cc | 3 ++- test/extensions/clusters/redis/BUILD | 3 ++- test/extensions/clusters/redis/redis_cluster_test.cc | 3 ++- test/extensions/filters/listener/http_inspector/BUILD | 2 +- .../listener/http_inspector/http_inspector_config_test.cc | 2 +- test/extensions/filters/listener/original_src/BUILD | 2 +- .../original_src/original_src_config_factory_test.cc | 2 +- test/extensions/filters/listener/proxy_protocol/BUILD | 2 +- .../filters/listener/proxy_protocol/proxy_protocol_test.cc | 2 +- test/extensions/filters/udp/dns_filter/BUILD | 3 ++- test/extensions/filters/udp/dns_filter/dns_filter_test.cc | 3 ++- test/extensions/health_checkers/redis/BUILD | 3 ++- test/extensions/health_checkers/redis/config_test.cc | 3 ++- test/extensions/quic_listeners/quiche/BUILD | 2 +- .../quic_listeners/quiche/active_quic_listener_test.cc | 3 ++- test/extensions/stats_sinks/dog_statsd/BUILD | 2 +- test/extensions/stats_sinks/dog_statsd/config_test.cc | 2 +- test/extensions/stats_sinks/hystrix/BUILD | 6 ++++-- test/extensions/stats_sinks/hystrix/config_test.cc | 2 +- test/extensions/stats_sinks/hystrix/hystrix_test.cc | 4 +++- test/extensions/stats_sinks/statsd/BUILD | 2 +- test/extensions/stats_sinks/statsd/config_test.cc | 2 +- test/tools/router_check/BUILD | 2 +- test/tools/router_check/coverage.h | 2 -- test/tools/router_check/router.h | 2 +- 35 files changed, 57 insertions(+), 38 deletions(-) diff --git a/test/config_test/BUILD b/test/config_test/BUILD index 154dc3ec4c1e..304e92005a49 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -40,7 +40,10 @@ envoy_cc_test_library( "//source/server:configuration_lib", "//source/server/config_validation:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 1074186abd65..47f7e88037ba 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -15,7 +15,10 @@ #include "server/options_impl.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/threadsafe_singleton_injector.h" diff --git a/test/extensions/access_loggers/file/BUILD b/test/extensions/access_loggers/file/BUILD index 76361877e881..4fe61e776483 100644 --- a/test/extensions/access_loggers/file/BUILD +++ b/test/extensions/access_loggers/file/BUILD @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.access_loggers.file", deps = [ "//source/extensions/access_loggers/file:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc index f0c7e6dd9359..9d7215e3f713 100644 --- a/test/extensions/access_loggers/file/config_test.cc +++ b/test/extensions/access_loggers/file/config_test.cc @@ -9,7 +9,7 @@ #include "extensions/access_loggers/file/file_access_log_impl.h" #include "extensions/access_loggers/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index 219520a97056..b573bc6082f5 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -68,7 +68,7 @@ envoy_extension_cc_test( extension_name = "envoy.access_loggers.http_grpc", deps = [ "//source/extensions/access_loggers/grpc:http_config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", ], diff --git a/test/extensions/access_loggers/grpc/http_config_test.cc b/test/extensions/access_loggers/grpc/http_config_test.cc index 1c6be1e2dec4..37ba5220244f 100644 --- a/test/extensions/access_loggers/grpc/http_config_test.cc +++ b/test/extensions/access_loggers/grpc/http_config_test.cc @@ -7,7 +7,7 @@ #include "extensions/access_loggers/grpc/http_grpc_access_log_impl.h" #include "extensions/access_loggers/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index 3001535ca9ad..4e75753f133b 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -20,7 +20,8 @@ envoy_extension_cc_test( "//source/extensions/transport_sockets/raw_buffer:config", "//test/common/upstream:utility_lib", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", @@ -40,7 +41,7 @@ envoy_extension_cc_test( "//test/common/upstream:test_cluster_manager", "//test/common/upstream:utility_lib", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 748ab076a90f..b2c4174fbd65 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -8,7 +8,8 @@ #include "test/common/upstream/utility.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index e7cbbcb4311d..cf80d8599b7f 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -11,7 +11,7 @@ #include "test/common/upstream/test_cluster_manager.h" #include "test/common/upstream/utility.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/clusters/dynamic_forward_proxy/BUILD b/test/extensions/clusters/dynamic_forward_proxy/BUILD index baaf5068a1b5..126bb1aa7840 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/test/extensions/clusters/dynamic_forward_proxy/BUILD @@ -23,7 +23,8 @@ envoy_extension_cc_test( "//test/common/upstream:utility_lib", "//test/extensions/common/dynamic_forward_proxy:mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index cf9116a3d56c..725e62590c85 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -10,7 +10,8 @@ #include "test/common/upstream/utility.h" #include "test/extensions/common/dynamic_forward_proxy/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index 1ae33bf15b0c..6c88308d93d6 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -36,7 +36,8 @@ envoy_extension_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 5ce57a167231..284dab85ea05 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -21,7 +21,8 @@ #include "test/extensions/filters/network/common/redis/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" using testing::_; diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index defcc0d8c7da..8530e24434d9 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -37,7 +37,7 @@ envoy_extension_cc_test( "//source/extensions/filters/listener/http_inspector:http_inspector_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:threadsafe_singleton_injector_lib", ], diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc index c2037ed8ca67..4fba684513d2 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_config_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/listener/http_inspector/http_inspector.h" #include "extensions/filters/listener/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index 235c5263793d..c3cf3365ad0a 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -32,7 +32,7 @@ envoy_extension_cc_test( "//source/extensions/filters/listener/original_src:config", "//source/extensions/filters/listener/original_src:config_lib", "//source/extensions/filters/listener/original_src:original_src_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", ], ) diff --git a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc index dca16ca9e455..eac36949aa96 100644 --- a/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_config_factory_test.cc @@ -2,7 +2,7 @@ #include "extensions/filters/listener/original_src/original_src.h" #include "extensions/filters/listener/original_src/original_src_config_factory.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/listener/proxy_protocol/BUILD b/test/extensions/filters/listener/proxy_protocol/BUILD index 1ff78bc0b820..f37389795778 100644 --- a/test/extensions/filters/listener/proxy_protocol/BUILD +++ b/test/extensions/filters/listener/proxy_protocol/BUILD @@ -30,7 +30,7 @@ envoy_extension_cc_test( "//test/mocks/api:api_mocks", "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 05a65551e6de..b14e18c7ade6 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -23,7 +23,7 @@ #include "test/mocks/api/mocks.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" diff --git a/test/extensions/filters/udp/dns_filter/BUILD b/test/extensions/filters/udp/dns_filter/BUILD index 2ac152fc7f1a..31583a527a63 100644 --- a/test/extensions/filters/udp/dns_filter/BUILD +++ b/test/extensions/filters/udp/dns_filter/BUILD @@ -32,7 +32,8 @@ envoy_extension_cc_test( deps = [ ":dns_filter_test_lib", "//source/extensions/filters/udp/dns_filter:dns_filter_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg_cc_proto", diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 80914f2f7cbd..9da9c52ced95 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -4,7 +4,8 @@ #include "common/common/logger.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/health_checkers/redis/BUILD b/test/extensions/health_checkers/redis/BUILD index aa4f80e21330..e03d6efc0a52 100644 --- a/test/extensions/health_checkers/redis/BUILD +++ b/test/extensions/health_checkers/redis/BUILD @@ -39,9 +39,10 @@ envoy_extension_cc_test( "//source/common/upstream:health_checker_lib", "//source/extensions/health_checkers/redis:config", "//test/common/upstream:utility_lib", + "//test/mocks/access_log:access_log_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:health_checker_factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index 84945209284e..cd18ecc1644c 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -6,9 +6,10 @@ #include "extensions/health_checkers/redis/config.h" #include "test/common/upstream/utility.h" +#include "test/mocks/access_log/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/health_checker_factory_context.h" #include "test/mocks/upstream/mocks.h" namespace Envoy { diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 482bae22257e..e14bc1f36fef 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -179,7 +179,7 @@ envoy_cc_test( "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", "//source/server:configuration_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", "@com_googlesource_quiche//:quic_test_tools_crypto_server_config_peer_lib", diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 13c38062a376..8c1e7e222790 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -32,7 +32,8 @@ #include "test/test_common/simulated_time_system.h" #include "test/test_common/environment.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" + #include "test/mocks/runtime/mocks.h" #include "test/test_common/utility.h" #include "test/test_common/network_utility.h" diff --git a/test/extensions/stats_sinks/dog_statsd/BUILD b/test/extensions/stats_sinks/dog_statsd/BUILD index 8c909f5d54f0..a9a0057336fa 100644 --- a/test/extensions/stats_sinks/dog_statsd/BUILD +++ b/test/extensions/stats_sinks/dog_statsd/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/dog_statsd:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index 8108dd6618d2..e3a20a067ca7 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -9,7 +9,7 @@ #include "extensions/stat_sinks/dog_statsd/config.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" diff --git a/test/extensions/stats_sinks/hystrix/BUILD b/test/extensions/stats_sinks/hystrix/BUILD index 093fa1f1d516..127f263d8d42 100644 --- a/test/extensions/stats_sinks/hystrix/BUILD +++ b/test/extensions/stats_sinks/hystrix/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/hystrix:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", @@ -34,7 +34,9 @@ envoy_extension_cc_test( deps = [ "//source/common/stats:stats_lib", "//source/extensions/stat_sinks/hystrix:hystrix_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", ], diff --git a/test/extensions/stats_sinks/hystrix/config_test.cc b/test/extensions/stats_sinks/hystrix/config_test.cc index e99723421022..79bf2c4bd21e 100644 --- a/test/extensions/stats_sinks/hystrix/config_test.cc +++ b/test/extensions/stats_sinks/hystrix/config_test.cc @@ -7,7 +7,7 @@ #include "extensions/stat_sinks/hystrix/hystrix.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index a09fed9ffb12..f7e9671a2956 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -5,7 +5,9 @@ #include "extensions/stat_sinks/hystrix/hystrix.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/admin_stream.h" +#include "test/mocks/server/instance.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/upstream/mocks.h" diff --git a/test/extensions/stats_sinks/statsd/BUILD b/test/extensions/stats_sinks/statsd/BUILD index 0b21cee0f5c3..b1c56f0466d8 100644 --- a/test/extensions/stats_sinks/statsd/BUILD +++ b/test/extensions/stats_sinks/statsd/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//include/envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/stat_sinks/statsd:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/stats_sinks/statsd/config_test.cc b/test/extensions/stats_sinks/statsd/config_test.cc index 44c89aa5e579..48e2a575b087 100644 --- a/test/extensions/stats_sinks/statsd/config_test.cc +++ b/test/extensions/stats_sinks/statsd/config_test.cc @@ -10,7 +10,7 @@ #include "extensions/stat_sinks/statsd/config.h" #include "extensions/stat_sinks/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" diff --git a/test/tools/router_check/BUILD b/test/tools/router_check/BUILD index 610e09311027..992fb394d424 100644 --- a/test/tools/router_check/BUILD +++ b/test/tools/router_check/BUILD @@ -35,7 +35,7 @@ envoy_cc_test_library( "//source/common/router:config_lib", "//source/common/stats:stats_lib", "//source/exe:platform_impl_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:printers_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/tools/router_check/coverage.h b/test/tools/router_check/coverage.h index 13019eec1af5..051ea3a50f7c 100644 --- a/test/tools/router_check/coverage.h +++ b/test/tools/router_check/coverage.h @@ -3,8 +3,6 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/router/router.h" -#include "test/mocks/server/mocks.h" - namespace Envoy { class RouteCoverage : Logger::Loggable { public: diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index 19d2f86d746e..04da7a40af7d 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -14,7 +14,7 @@ #include "common/router/config_impl.h" #include "common/stats/fake_symbol_table_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/global.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" From 28dd4d3bb5774904ddb3ce523ca607e52e19cecd Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Tue, 14 Jul 2020 12:41:44 -0700 Subject: [PATCH 623/909] Update jwt_verify_lib to 2020-07-09 with added PS alg support (#12031) Signed-off-by: Wayne Zhang --- bazel/repository_locations.bzl | 8 ++++---- .../http/http_filters/jwt_authn_filter.rst | 11 +++++++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 2bb2ada34ae2..d2dd2feb2a76 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -278,10 +278,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_jwt_verify = dict( - sha256 = "d2e28897c297bd04429e43a1b485f7350acc23cbfee6365b8a3634c17840b2f6", - strip_prefix = "jwt_verify_lib-f44cf49d185ad0694b472da78071b4d67313fb86", - # 2020-06-03 - urls = ["https://github.com/google/jwt_verify_lib/archive/f44cf49d185ad0694b472da78071b4d67313fb86.tar.gz"], + sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1", + strip_prefix = "jwt_verify_lib-7276a339af8426724b744216f619c99152f8c141", + # 2020-07-09 + urls = ["https://github.com/google/jwt_verify_lib/archive/7276a339af8426724b744216f619c99152f8c141.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 27f2d739a330..d084f1c9f44d 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -7,8 +7,15 @@ This HTTP filter can be used to verify JSON Web Token (JWT). It will verify its JWKS is needed to verify JWT signatures. They can be specified in the filter config or can be fetched remotely from a JWKS server. -.. attention:: - EdDSA, ES256, ES384, ES512, HS256, HS384, HS512, RS256, RS384 and RS512 are supported for the JWT alg. +Following are supported JWT alg: + +.. code-block:: + + ES256, ES384, ES512, + HS256, HS384, HS512, + RS256, RS384, RS512, + PS256, PS384, PS512, + EdDSA Configuration ------------- From 964da242e69936c1aee60b67128727d77e7bbf64 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:09:21 +0000 Subject: [PATCH 624/909] test: refactor header inclusion to speed up building (for test/integration/...) (#12053) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/extensions/transport_sockets/alts/BUILD | 1 + test/integration/BUILD | 5 +++-- test/integration/integration.h | 4 +++- test/integration/ssl_utility.cc | 2 +- test/integration/xfcc_integration_test.h | 2 +- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index 386c25ace615..edfa523ba12f 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -89,6 +89,7 @@ envoy_extension_cc_test( "//test/integration:http_integration_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", + "//test/mocks/server:server_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/transport_socket/alts/v2alpha:pkg_cc_proto", ], diff --git a/test/integration/BUILD b/test/integration/BUILD index a8ed5cb5dbcc..2efec82ce37b 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -624,6 +624,7 @@ envoy_cc_test_library( "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tap:config", "//source/extensions/transport_sockets/tls:config", + "//source/extensions/transport_sockets/tls:context_lib", "//source/server:connection_handler_lib", "//source/server:drain_manager_lib", "//source/server:hot_restart_nop_lib", @@ -635,7 +636,7 @@ envoy_cc_test_library( "//test/common/upstream:utility_lib", "//test/config:utility_lib", "//test/mocks/buffer:buffer_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", @@ -1097,7 +1098,7 @@ envoy_cc_test( ":http_integration_lib", "//source/common/http:header_map_lib", "//source/extensions/transport_sockets/tls:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/integration/integration.h b/test/integration/integration.h index dbf30d912bb4..0d0081711f93 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -13,13 +13,15 @@ #include "common/config/version_converter.h" #include "common/http/codec_client.h" +#include "extensions/transport_sockets/tls/context_manager_impl.h" + #include "test/common/grpc/grpc_client_integration.h" #include "test/config/utility.h" #include "test/integration/fake_upstream.h" #include "test/integration/server.h" #include "test/integration/utility.h" #include "test/mocks/buffer/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" diff --git a/test/integration/ssl_utility.cc b/test/integration/ssl_utility.cc index 14c1a0bf85c8..aab3dd5d4adf 100644 --- a/test/integration/ssl_utility.cc +++ b/test/integration/ssl_utility.cc @@ -12,7 +12,7 @@ #include "test/config/utility.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" diff --git a/test/integration/xfcc_integration_test.h b/test/integration/xfcc_integration_test.h index 538a1bc86d97..6e4997f8b0dc 100644 --- a/test/integration/xfcc_integration_test.h +++ b/test/integration/xfcc_integration_test.h @@ -8,7 +8,7 @@ #include "test/config/integration/certs/clientcert_hash.h" #include "test/integration/http_integration.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "absl/strings/ascii.h" #include "absl/strings/str_replace.h" From c342493b3e64732dcbcb0a1687a3328b209f314a Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:10:08 +0000 Subject: [PATCH 625/909] test: refactor header inclusion to speed up building (for test/extensions/filters/network/...) (#12051) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- .../extensions/filters/network/client_ssl_auth/BUILD | 2 +- .../filters/network/client_ssl_auth/config_test.cc | 2 +- test/extensions/filters/network/dubbo_proxy/BUILD | 10 +++++----- .../filters/network/dubbo_proxy/config_test.cc | 2 +- .../filters/network/dubbo_proxy/conn_manager_test.cc | 2 +- .../network/dubbo_proxy/route_matcher_test.cc | 2 +- .../network/dubbo_proxy/router_filter_config_test.cc | 2 +- .../filters/network/dubbo_proxy/router_test.cc | 2 +- test/extensions/filters/network/ext_authz/BUILD | 2 +- .../filters/network/ext_authz/config_test.cc | 2 +- .../filters/network/http_connection_manager/BUILD | 2 +- .../network/http_connection_manager/config_test.cc | 2 +- test/extensions/filters/network/kafka/broker/BUILD | 2 +- .../filters/network/kafka/broker/config_unit_test.cc | 2 +- test/extensions/filters/network/mongo_proxy/BUILD | 2 +- .../filters/network/mongo_proxy/config_test.cc | 2 +- test/extensions/filters/network/ratelimit/BUILD | 3 ++- .../filters/network/ratelimit/config_test.cc | 3 ++- test/extensions/filters/network/rbac/BUILD | 2 +- test/extensions/filters/network/rbac/config_test.cc | 2 +- test/extensions/filters/network/redis_proxy/BUILD | 2 +- .../filters/network/redis_proxy/config_test.cc | 2 +- test/extensions/filters/network/rocketmq_proxy/BUILD | 12 ++++++------ .../network/rocketmq_proxy/active_message_test.cc | 2 +- .../filters/network/rocketmq_proxy/codec_test.cc | 4 +++- .../filters/network/rocketmq_proxy/config_test.cc | 3 ++- .../network/rocketmq_proxy/conn_manager_test.cc | 3 ++- .../filters/network/rocketmq_proxy/mocks.h | 1 - .../filters/network/rocketmq_proxy/router_test.cc | 2 +- .../filters/network/rocketmq_proxy/utility.h | 2 -- test/extensions/filters/network/sni_cluster/BUILD | 2 +- .../filters/network/sni_cluster/sni_cluster_test.cc | 2 +- test/extensions/filters/network/tcp_proxy/BUILD | 2 +- .../filters/network/tcp_proxy/config_test.cc | 2 +- test/extensions/filters/network/thrift_proxy/BUILD | 8 ++++---- .../filters/network/thrift_proxy/config_test.cc | 2 +- .../network/thrift_proxy/conn_manager_test.cc | 2 +- .../network/thrift_proxy/filters/ratelimit/BUILD | 2 +- .../thrift_proxy/filters/ratelimit/config_test.cc | 2 +- .../network/thrift_proxy/router_ratelimit_test.cc | 2 +- .../filters/network/thrift_proxy/router_test.cc | 2 +- .../extensions/filters/network/zookeeper_proxy/BUILD | 2 +- .../filters/network/zookeeper_proxy/config_test.cc | 2 +- 43 files changed, 59 insertions(+), 56 deletions(-) diff --git a/test/extensions/filters/network/client_ssl_auth/BUILD b/test/extensions/filters/network/client_ssl_auth/BUILD index b24854cbce09..a168a2a0b3b0 100644 --- a/test/extensions/filters/network/client_ssl_auth/BUILD +++ b/test/extensions/filters/network/client_ssl_auth/BUILD @@ -36,7 +36,7 @@ envoy_extension_cc_test( deps = [ "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/client_ssl_auth:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/client_ssl_auth/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/client_ssl_auth/config_test.cc b/test/extensions/filters/network/client_ssl_auth/config_test.cc index 7426428f7593..0acd58fa1bbc 100644 --- a/test/extensions/filters/network/client_ssl_auth/config_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/config_test.cc @@ -7,7 +7,7 @@ #include "extensions/filters/network/client_ssl_auth/config.h" #include "extensions/filters/network/well_known_names.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/dubbo_proxy/BUILD b/test/extensions/filters/network/dubbo_proxy/BUILD index 20410865388d..1a2b5e0aa879 100644 --- a/test/extensions/filters/network/dubbo_proxy/BUILD +++ b/test/extensions/filters/network/dubbo_proxy/BUILD @@ -89,7 +89,7 @@ envoy_extension_cc_test( ":utility_lib", "//source/extensions/filters/network/dubbo_proxy:config", "//source/extensions/filters/network/dubbo_proxy/filters:filter_config_interface", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], @@ -112,7 +112,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/network/dubbo_proxy:metadata_lib", "//source/extensions/filters/network/dubbo_proxy/router:route_matcher", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], ) @@ -128,7 +128,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib", "//source/extensions/filters/network/dubbo_proxy:metadata_lib", "//source/extensions/filters/network/dubbo_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", ], ) @@ -156,7 +156,7 @@ envoy_extension_cc_test( ":mocks_lib", "//source/extensions/filters/network/dubbo_proxy/filters:well_known_names", "//source/extensions/filters/network/dubbo_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg_cc_proto", ], ) @@ -185,7 +185,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/dubbo_proxy:dubbo_hessian2_serializer_impl_lib", "//source/extensions/filters/network/dubbo_proxy:dubbo_protocol_impl_lib", "//test/common/stats:stat_test_utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/network/dubbo_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/dubbo_proxy/config_test.cc b/test/extensions/filters/network/dubbo_proxy/config_test.cc index 559d5995fe17..c72283f56a3d 100644 --- a/test/extensions/filters/network/dubbo_proxy/config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/dubbo_proxy/filters/filter_config.h" #include "test/extensions/filters/network/dubbo_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/registry.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 9bafde1f0e8b..f97573ff5da9 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -14,7 +14,7 @@ #include "test/extensions/filters/network/dubbo_proxy/mocks.h" #include "test/extensions/filters/network/dubbo_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc index 07592bcad833..7aab9ba07553 100644 --- a/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/route_matcher_test.cc @@ -8,7 +8,7 @@ #include "extensions/filters/network/dubbo_proxy/router/route_matcher.h" #include "extensions/filters/network/dubbo_proxy/serializer_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc b/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc index 1ef5e61dacda..6f9c65ffb429 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_filter_config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/dubbo_proxy/router/config.h" #include "test/extensions/filters/network/dubbo_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index 26405ea3a6b1..7367a3341697 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -9,7 +9,7 @@ #include "test/extensions/filters/network/dubbo_proxy/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" diff --git a/test/extensions/filters/network/ext_authz/BUILD b/test/extensions/filters/network/ext_authz/BUILD index 20e8b566ebae..f8b80bc75c63 100644 --- a/test/extensions/filters/network/ext_authz/BUILD +++ b/test/extensions/filters/network/ext_authz/BUILD @@ -40,7 +40,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.ext_authz", deps = [ "//source/extensions/filters/network/ext_authz:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/ext_authz/config_test.cc b/test/extensions/filters/network/ext_authz/config_test.cc index bdee7d9a7a69..d1093ccdb483 100644 --- a/test/extensions/filters/network/ext_authz/config_test.cc +++ b/test/extensions/filters/network/ext_authz/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/ext_authz/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index 1725e4dd9de3..4f264b13a029 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -31,7 +31,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/http_connection_manager:config", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 20731f6e51e5..d1d32fdd634e 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -18,7 +18,7 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index da35cc9ea7c4..c765de8405cf 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.kafka_broker", deps = [ "//source/extensions/filters/network/kafka:kafka_broker_config_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) diff --git a/test/extensions/filters/network/kafka/broker/config_unit_test.cc b/test/extensions/filters/network/kafka/broker/config_unit_test.cc index d316bb65d5d9..e464acad0b96 100644 --- a/test/extensions/filters/network/kafka/broker/config_unit_test.cc +++ b/test/extensions/filters/network/kafka/broker/config_unit_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/network/kafka/broker/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/mongo_proxy/BUILD b/test/extensions/filters/network/mongo_proxy/BUILD index 07c6cc33cca0..b8ed6e190b3b 100644 --- a/test/extensions/filters/network/mongo_proxy/BUILD +++ b/test/extensions/filters/network/mongo_proxy/BUILD @@ -71,7 +71,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.mongo_proxy", deps = [ "//source/extensions/filters/network/mongo_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/mongo_proxy/config_test.cc b/test/extensions/filters/network/mongo_proxy/config_test.cc index bab4fcc44661..35f46fad6cbc 100644 --- a/test/extensions/filters/network/mongo_proxy/config_test.cc +++ b/test/extensions/filters/network/mongo_proxy/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/network/mongo_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/ratelimit/BUILD b/test/extensions/filters/network/ratelimit/BUILD index 38c2eb7a8512..99a0f31eb85b 100644 --- a/test/extensions/filters/network/ratelimit/BUILD +++ b/test/extensions/filters/network/ratelimit/BUILD @@ -35,7 +35,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.ratelimit", deps = [ "//source/extensions/filters/network/ratelimit:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/ratelimit/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/ratelimit/config_test.cc b/test/extensions/filters/network/ratelimit/config_test.cc index d8bbeff1bd7e..ab6b01d753c8 100644 --- a/test/extensions/filters/network/ratelimit/config_test.cc +++ b/test/extensions/filters/network/ratelimit/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/network/ratelimit/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/rbac/BUILD b/test/extensions/filters/network/rbac/BUILD index 8d4d479cefb0..d4db04fee972 100644 --- a/test/extensions/filters/network/rbac/BUILD +++ b/test/extensions/filters/network/rbac/BUILD @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.rbac", deps = [ "//source/extensions/filters/network/rbac:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/rbac/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/rbac/config_test.cc b/test/extensions/filters/network/rbac/config_test.cc index 7da1addf69a2..ace7d65fb7b1 100644 --- a/test/extensions/filters/network/rbac/config_test.cc +++ b/test/extensions/filters/network/rbac/config_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/network/rbac/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "fmt/printf.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 749e93663fea..3f83e1eeda4b 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -98,7 +98,7 @@ envoy_extension_cc_test( deps = [ "//source/common/protobuf:utility_lib", "//source/extensions/filters/network/redis_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 40420f298ddf..a9043af8cd6e 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/redis_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/test_runtime.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD index 36381bf3f83d..f01055ab6742 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -20,7 +20,6 @@ envoy_cc_mock( deps = [ "//source/extensions/filters/network/rocketmq_proxy:config", "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", - "//test/mocks/server:server_mocks", "//test/mocks/upstream:upstream_mocks", ], ) @@ -31,7 +30,6 @@ envoy_cc_test_library( hdrs = ["utility.h"], deps = [ "//source/extensions/filters/network/rocketmq_proxy:config", - "//test/mocks/server:server_mocks", ], ) @@ -54,7 +52,7 @@ envoy_extension_cc_test( ":mocks_lib", ":utility_lib", "//source/extensions/filters/network/rocketmq_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", ], ) @@ -79,7 +77,8 @@ envoy_extension_cc_test( "//test/common/stats:stat_test_utility_lib", "//test/common/upstream:utility_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:utility_lib", ], @@ -93,7 +92,7 @@ envoy_extension_cc_test( ":utility_lib", "//source/extensions/filters/network/rocketmq_proxy:config", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:utility_lib", ], @@ -106,7 +105,8 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/network/rocketmq_proxy:config", "//test/mocks/local_info:local_info_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:registry_lib", "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc index 8b87e034692b..4715637df2b5 100644 --- a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc @@ -6,7 +6,7 @@ #include "test/extensions/filters/network/rocketmq_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc index 08d8dd5021a1..902584b5b3fc 100644 --- a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/codec_test.cc @@ -1,3 +1,5 @@ +#include "common/common/empty_string.h" +#include "common/common/enum_to_int.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" @@ -794,4 +796,4 @@ TEST_F(RocketmqCodecTest, DecodeTopicFailure) { } // namespace RocketmqProxy } // namespace NetworkFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/network/rocketmq_proxy/config_test.cc b/test/extensions/filters/network/rocketmq_proxy/config_test.cc index af4d5ef745e4..aa56bbe0a29c 100644 --- a/test/extensions/filters/network/rocketmq_proxy/config_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/network/rocketmq_proxy/config.h" #include "test/mocks/local_info/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/registry.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc index 46f3af3adef3..84af7bdd9758 100644 --- a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc @@ -9,7 +9,8 @@ #include "test/extensions/filters/network/rocketmq_proxy/utility.h" #include "test/mocks/network/connection.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.h b/test/extensions/filters/network/rocketmq_proxy/mocks.h index a6cc6a05dd4c..2cef60f964f0 100644 --- a/test/extensions/filters/network/rocketmq_proxy/mocks.h +++ b/test/extensions/filters/network/rocketmq_proxy/mocks.h @@ -3,7 +3,6 @@ #include "extensions/filters/network/rocketmq_proxy/active_message.h" #include "extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "test/mocks/server/mocks.h" #include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/router_test.cc b/test/extensions/filters/network/rocketmq_proxy/router_test.cc index a80d837d1b10..95d74a527dc7 100644 --- a/test/extensions/filters/network/rocketmq_proxy/router_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/router_test.cc @@ -5,7 +5,7 @@ #include "test/extensions/filters/network/rocketmq_proxy/mocks.h" #include "test/extensions/filters/network/rocketmq_proxy/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.h b/test/extensions/filters/network/rocketmq_proxy/utility.h index 1dc57d5f2a76..ad3809fd07cc 100644 --- a/test/extensions/filters/network/rocketmq_proxy/utility.h +++ b/test/extensions/filters/network/rocketmq_proxy/utility.h @@ -3,8 +3,6 @@ #include "extensions/filters/network/rocketmq_proxy/config.h" #include "extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "test/mocks/server/mocks.h" - #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/sni_cluster/BUILD b/test/extensions/filters/network/sni_cluster/BUILD index 3bc852b873d7..b0024fecf442 100644 --- a/test/extensions/filters/network/sni_cluster/BUILD +++ b/test/extensions/filters/network/sni_cluster/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/sni_cluster", "//source/extensions/filters/network/sni_cluster:config", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", ], ) diff --git a/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc b/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc index a047869ee054..61c9a79ee681 100644 --- a/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc +++ b/test/extensions/filters/network/sni_cluster/sni_cluster_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/network/sni_cluster/sni_cluster.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/tcp_proxy/BUILD b/test/extensions/filters/network/tcp_proxy/BUILD index 73c4717e8ef3..ad332adc27ac 100644 --- a/test/extensions/filters/network/tcp_proxy/BUILD +++ b/test/extensions/filters/network/tcp_proxy/BUILD @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.tcp_proxy", deps = [ "//source/extensions/filters/network/tcp_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index d1a3c2d6f1a2..3f2296dfc5d7 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/tcp_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index 9428df5d84e7..5311f28d5f3c 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -209,7 +209,7 @@ envoy_extension_cc_test( ":mocks", "//source/extensions/filters/network/thrift_proxy:config", "//source/extensions/filters/network/thrift_proxy/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:registry_lib", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", ], @@ -228,7 +228,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_interface", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:printers_lib", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", @@ -271,7 +271,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:printers_lib", "//test/test_common:registry_lib", @@ -291,7 +291,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_lib", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:printers_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", diff --git a/test/extensions/filters/network/thrift_proxy/config_test.cc b/test/extensions/filters/network/thrift_proxy/config_test.cc index 8a37946c2383..b4afd50866e0 100644 --- a/test/extensions/filters/network/thrift_proxy/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/thrift_proxy/filters/factory_base.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/registry.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index e2cd685452cf..dd1810d17412 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -16,7 +16,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 69b40d35e0e7..bff8a2ae1a21 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -38,7 +38,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/network/thrift_proxy/filters/ratelimit:config", "//test/extensions/filters/network/thrift_proxy:mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc index b18981195ab9..98d1e8eebef6 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/network/thrift_proxy/filters/ratelimit/config.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 458412b7d0e0..18d6ddaaa5db 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -14,7 +14,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/mocks/ratelimit/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index ff7e57a5e14e..a8c9192bb258 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -13,7 +13,7 @@ #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" diff --git a/test/extensions/filters/network/zookeeper_proxy/BUILD b/test/extensions/filters/network/zookeeper_proxy/BUILD index ec24601e81b9..33767ff1f06c 100644 --- a/test/extensions/filters/network/zookeeper_proxy/BUILD +++ b/test/extensions/filters/network/zookeeper_proxy/BUILD @@ -32,7 +32,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.zookeeper_proxy", deps = [ "//source/extensions/filters/network/zookeeper_proxy:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/network/zookeeper_proxy/config_test.cc b/test/extensions/filters/network/zookeeper_proxy/config_test.cc index 274287b07d6e..34dc5ca0a651 100644 --- a/test/extensions/filters/network/zookeeper_proxy/config_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/network/zookeeper_proxy/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" From 69454ff5cf8c6c8b83d33ffbe0357051e2986cbd Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:11:15 +0000 Subject: [PATCH 626/909] test: refactor header inclusion to speed up building (for test/extensions/transport_sockets/...) (#12050) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/extensions/transport_sockets/alts/BUILD | 3 ++- .../transport_sockets/alts/alts_integration_test.cc | 3 ++- test/extensions/transport_sockets/alts/config_test.cc | 2 +- test/extensions/transport_sockets/tls/BUILD | 6 +++++- test/extensions/transport_sockets/tls/context_impl_test.cc | 4 +++- test/extensions/transport_sockets/tls/ssl_certs_test.h | 2 +- test/extensions/transport_sockets/tls/ssl_socket_test.cc | 4 +++- 7 files changed, 17 insertions(+), 7 deletions(-) diff --git a/test/extensions/transport_sockets/alts/BUILD b/test/extensions/transport_sockets/alts/BUILD index edfa523ba12f..489c29f78273 100644 --- a/test/extensions/transport_sockets/alts/BUILD +++ b/test/extensions/transport_sockets/alts/BUILD @@ -17,10 +17,11 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.transport_sockets.alts", deps = [ + "//source/common/singleton:manager_impl_lib", "//source/extensions/transport_sockets/alts:config", "//test/mocks/event:event_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", ], ) diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index eca7bbf6f7a0..609d70b73b3b 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -21,7 +21,8 @@ #include "test/integration/integration.h" #include "test/integration/server.h" #include "test/integration/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" + #include "test/test_common/network_utility.h" #include "test/test_common/utility.h" diff --git a/test/extensions/transport_sockets/alts/config_test.cc b/test/extensions/transport_sockets/alts/config_test.cc index 6e1ae044c469..a3c233ad7822 100644 --- a/test/extensions/transport_sockets/alts/config_test.cc +++ b/test/extensions/transport_sockets/alts/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/transport_sockets/alts/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/transport_sockets/tls/BUILD b/test/extensions/transport_sockets/tls/BUILD index 595da8e44483..4a7a2cd9481e 100644 --- a/test/extensions/transport_sockets/tls/BUILD +++ b/test/extensions/transport_sockets/tls/BUILD @@ -46,6 +46,8 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls/private_key:private_key_manager_lib", "//test/extensions/transport_sockets/tls/test_data:cert_infos", "//test/mocks/buffer:buffer_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/server:server_mocks", @@ -79,9 +81,11 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", "//test/extensions/transport_sockets/tls/test_data:cert_infos", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/mocks/ssl:ssl_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index eb9646266a83..c53c6a03cfc1 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -19,8 +19,10 @@ #include "test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h" #include "test/extensions/transport_sockets/tls/test_data/san_dns3_cert_info.h" #include "test/extensions/transport_sockets/tls/test_data/san_ip_cert_info.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/ssl/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/transport_sockets/tls/ssl_certs_test.h b/test/extensions/transport_sockets/tls/ssl_certs_test.h index d6d450ca748d..843273acfcfa 100644 --- a/test/extensions/transport_sockets/tls/ssl_certs_test.h +++ b/test/extensions/transport_sockets/tls/ssl_certs_test.h @@ -1,6 +1,6 @@ #pragma once -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index fc84d1e61e1b..29705379547a 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -32,9 +32,11 @@ #include "test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert_info.h" #include "test/extensions/transport_sockets/tls/test_private_key_method_provider.h" #include "test/mocks/buffer/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" From cba4261b7df8521af9c111aecd4006cba35f1506 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:12:45 +0000 Subject: [PATCH 627/909] test: refactor header inclusion to speed up building (for test/extensions/filters/http) (#12047) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/extensions/filters/http/admission_control/BUILD | 4 ++-- .../admission_control/admission_control_filter_test.cc | 2 +- .../filters/http/admission_control/config_test.cc | 2 +- test/extensions/filters/http/aws_lambda/BUILD | 3 ++- test/extensions/filters/http/aws_lambda/config_test.cc | 3 ++- test/extensions/filters/http/aws_request_signing/BUILD | 2 +- .../filters/http/aws_request_signing/config_test.cc | 2 +- test/extensions/filters/http/buffer/BUILD | 3 ++- test/extensions/filters/http/buffer/config_test.cc | 3 ++- test/extensions/filters/http/cache/BUILD | 4 ++-- test/extensions/filters/http/cache/cache_filter_test.cc | 2 +- test/extensions/filters/http/cache/config_test.cc | 2 +- test/extensions/filters/http/common/BUILD | 3 ++- test/extensions/filters/http/common/fuzz/BUILD | 2 +- test/extensions/filters/http/common/fuzz/uber_filter.h | 2 +- test/extensions/filters/http/common/jwks_fetcher_test.cc | 1 + test/extensions/filters/http/common/mock.h | 2 +- test/extensions/filters/http/compressor/BUILD | 2 +- test/extensions/filters/http/compressor/config_test.cc | 2 +- test/extensions/filters/http/decompressor/BUILD | 2 +- .../decompressor/decompressor_filter_integration_test.cc | 2 +- test/extensions/filters/http/dynamo/BUILD | 2 +- test/extensions/filters/http/dynamo/config_test.cc | 2 +- test/extensions/filters/http/ext_authz/BUILD | 2 +- test/extensions/filters/http/ext_authz/config_test.cc | 2 +- test/extensions/filters/http/fault/BUILD | 2 +- test/extensions/filters/http/fault/config_test.cc | 2 +- test/extensions/filters/http/grpc_http1_bridge/BUILD | 2 +- .../filters/http/grpc_http1_bridge/config_test.cc | 2 +- .../filters/http/grpc_http1_reverse_bridge/BUILD | 3 ++- .../http/grpc_http1_reverse_bridge/config_test.cc | 3 ++- test/extensions/filters/http/grpc_json_transcoder/BUILD | 2 +- .../filters/http/grpc_json_transcoder/config_test.cc | 2 +- test/extensions/filters/http/grpc_stats/BUILD | 2 +- test/extensions/filters/http/grpc_stats/config_test.cc | 2 +- test/extensions/filters/http/grpc_web/BUILD | 2 +- test/extensions/filters/http/grpc_web/config_test.cc | 2 +- test/extensions/filters/http/gzip/BUILD | 2 +- test/extensions/filters/http/gzip/gzip_filter_test.cc | 2 +- test/extensions/filters/http/header_to_metadata/BUILD | 3 ++- .../filters/http/header_to_metadata/config_test.cc | 3 ++- test/extensions/filters/http/health_check/BUILD | 4 ++-- test/extensions/filters/http/health_check/config_test.cc | 2 +- .../filters/http/health_check/health_check_test.cc | 2 +- test/extensions/filters/http/jwt_authn/BUILD | 9 +++++---- .../filters/http/jwt_authn/all_verifier_test.cc | 2 +- .../filters/http/jwt_authn/authenticator_test.cc | 2 +- .../filters/http/jwt_authn/filter_config_test.cc | 3 ++- .../filters/http/jwt_authn/filter_factory_test.cc | 2 +- .../filters/http/jwt_authn/provider_verifier_test.cc | 2 +- test/extensions/filters/http/lua/BUILD | 2 +- test/extensions/filters/http/lua/config_test.cc | 2 +- test/extensions/filters/http/original_src/BUILD | 2 +- .../original_src/original_src_config_factory_test.cc | 2 +- test/extensions/filters/http/ratelimit/BUILD | 3 ++- test/extensions/filters/http/ratelimit/config_test.cc | 3 ++- test/extensions/filters/http/rbac/BUILD | 3 ++- test/extensions/filters/http/rbac/config_test.cc | 3 ++- test/extensions/filters/http/router/BUILD | 2 +- test/extensions/filters/http/router/config_test.cc | 2 +- test/extensions/filters/http/squash/BUILD | 4 ++-- test/extensions/filters/http/squash/config_test.cc | 2 +- .../extensions/filters/http/squash/squash_filter_test.cc | 2 +- test/extensions/filters/http/tap/BUILD | 2 +- test/extensions/filters/http/tap/tap_filter_test.cc | 2 +- 65 files changed, 87 insertions(+), 71 deletions(-) diff --git a/test/extensions/filters/http/admission_control/BUILD b/test/extensions/filters/http/admission_control/BUILD index 301e4dd2f884..fea174f10b1c 100644 --- a/test/extensions/filters/http/admission_control/BUILD +++ b/test/extensions/filters/http/admission_control/BUILD @@ -21,7 +21,7 @@ envoy_extension_cc_test( "//source/common/http:headers_lib", "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", @@ -38,7 +38,7 @@ envoy_extension_cc_test( "//source/common/http:headers_lib", "//source/extensions/filters/http/admission_control:admission_control_filter_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc index 5774f5beb7b0..d8ba63e72382 100644 --- a/test/extensions/filters/http/admission_control/admission_control_filter_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_filter_test.cc @@ -12,7 +12,7 @@ #include "extensions/filters/http/admission_control/thread_local_controller.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/admission_control/config_test.cc b/test/extensions/filters/http/admission_control/config_test.cc index 49432e4a6605..cd7b6b212f1e 100644 --- a/test/extensions/filters/http/admission_control/config_test.cc +++ b/test/extensions/filters/http/admission_control/config_test.cc @@ -9,7 +9,7 @@ #include "extensions/filters/http/admission_control/evaluators/success_criteria_evaluator.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/aws_lambda/BUILD b/test/extensions/filters/http/aws_lambda/BUILD index 4d14f2477630..5c35774f4438 100644 --- a/test/extensions/filters/http/aws_lambda/BUILD +++ b/test/extensions/filters/http/aws_lambda/BUILD @@ -54,7 +54,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.aws_lambda", deps = [ "//source/extensions/filters/http/aws_lambda:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/extensions/filters/http/aws_lambda/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/aws_lambda/config_test.cc b/test/extensions/filters/http/aws_lambda/config_test.cc index 178fe1ff7c6a..752f763292b6 100644 --- a/test/extensions/filters/http/aws_lambda/config_test.cc +++ b/test/extensions/filters/http/aws_lambda/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/aws_lambda/aws_lambda_filter.h" #include "extensions/filters/http/aws_lambda/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/aws_request_signing/BUILD b/test/extensions/filters/http/aws_request_signing/BUILD index 7496366a2b72..25301010ded5 100644 --- a/test/extensions/filters/http/aws_request_signing/BUILD +++ b/test/extensions/filters/http/aws_request_signing/BUILD @@ -28,7 +28,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.aws_request_signing", deps = [ "//source/extensions/filters/http/aws_request_signing:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/aws_request_signing/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/aws_request_signing/config_test.cc b/test/extensions/filters/http/aws_request_signing/config_test.cc index 424524b999ef..7d86e759b77e 100644 --- a/test/extensions/filters/http/aws_request_signing/config_test.cc +++ b/test/extensions/filters/http/aws_request_signing/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/aws_request_signing/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/buffer/BUILD b/test/extensions/filters/http/buffer/BUILD index 900ab38a6747..d04528928c19 100644 --- a/test/extensions/filters/http/buffer/BUILD +++ b/test/extensions/filters/http/buffer/BUILD @@ -52,7 +52,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.buffer", deps = [ "//source/extensions/filters/http/buffer:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/buffer/config_test.cc b/test/extensions/filters/http/buffer/config_test.cc index a3b7b9e8142d..268fe142931b 100644 --- a/test/extensions/filters/http/buffer/config_test.cc +++ b/test/extensions/filters/http/buffer/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/buffer/buffer_filter.h" #include "extensions/filters/http/buffer/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index b43da2f3d190..eb924a70e9bb 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -40,7 +40,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/cache:cache_filter_lib", "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], @@ -64,7 +64,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/cache:config", "//source/extensions/filters/http/cache/simple_http_cache:simple_http_cache_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 99d79a1e478a..67bcd808f7ca 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/http/cache/cache_filter.h" #include "extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/cache/config_test.cc b/test/extensions/filters/http/cache/config_test.cc index c314897c33a4..2b05de007256 100644 --- a/test/extensions/filters/http/cache/config_test.cc +++ b/test/extensions/filters/http/cache/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/cache/cache_filter.h" #include "extensions/filters/http/cache/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/common/BUILD b/test/extensions/filters/http/common/BUILD index 9994b2f0f218..9c5b60eb9789 100644 --- a/test/extensions/filters/http/common/BUILD +++ b/test/extensions/filters/http/common/BUILD @@ -23,7 +23,7 @@ envoy_cc_test_library( ], deps = [ "//source/extensions/filters/http/common:jwks_fetcher_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -46,6 +46,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/common:jwks_fetcher_lib", "//test/extensions/filters/http/common:mock_lib", "//test/mocks/http:http_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index ccad6764604a..cd99336234b8 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -40,7 +40,7 @@ envoy_cc_test_library( "//test/fuzz:utility_lib", "//test/mocks/buffer:buffer_mocks", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/proto:bookstore_proto_cc_proto", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 772b94320b51..07aa3322de79 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,7 +1,7 @@ #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" namespace Envoy { diff --git a/test/extensions/filters/http/common/jwks_fetcher_test.cc b/test/extensions/filters/http/common/jwks_fetcher_test.cc index 6cfcd8f14af4..79a04018e8cb 100644 --- a/test/extensions/filters/http/common/jwks_fetcher_test.cc +++ b/test/extensions/filters/http/common/jwks_fetcher_test.cc @@ -10,6 +10,7 @@ #include "test/extensions/filters/http/common/mock.h" #include "test/mocks/http/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" using envoy::config::core::v3::HttpUri; diff --git a/test/extensions/filters/http/common/mock.h b/test/extensions/filters/http/common/mock.h index 5f8ef9900430..804c00b0eb66 100644 --- a/test/extensions/filters/http/common/mock.h +++ b/test/extensions/filters/http/common/mock.h @@ -4,7 +4,7 @@ #include "extensions/filters/http/common/jwks_fetcher.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/upstream/mocks.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/compressor/BUILD b/test/extensions/filters/http/compressor/BUILD index a76be193b710..6c9a99ca3db9 100644 --- a/test/extensions/filters/http/compressor/BUILD +++ b/test/extensions/filters/http/compressor/BUILD @@ -58,7 +58,7 @@ envoy_extension_cc_test( ":mock_config_cc_proto", "//source/extensions/filters/http/compressor:config", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/filters/http/compressor/config_test.cc b/test/extensions/filters/http/compressor/config_test.cc index c3f865108f81..cea48bc00cff 100644 --- a/test/extensions/filters/http/compressor/config_test.cc +++ b/test/extensions/filters/http/compressor/config_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/http/compressor/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD index da72c1dc7a9e..cb78711b4020 100644 --- a/test/extensions/filters/http/decompressor/BUILD +++ b/test/extensions/filters/http/decompressor/BUILD @@ -42,7 +42,7 @@ envoy_extension_cc_test( "//source/extensions/compression/gzip/decompressor:config", "//source/extensions/filters/http/decompressor:config", "//test/integration:http_integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc index 29600b318018..c924bb4a5fc2 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -3,7 +3,7 @@ #include "extensions/compression/gzip/compressor/config.h" #include "test/integration/http_integration.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/dynamo/BUILD b/test/extensions/filters/http/dynamo/BUILD index e1761811ec40..1fb51d2ffff2 100644 --- a/test/extensions/filters/http/dynamo/BUILD +++ b/test/extensions/filters/http/dynamo/BUILD @@ -56,7 +56,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.dynamo", deps = [ "//source/extensions/filters/http/dynamo:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/dynamo/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/dynamo/config_test.cc b/test/extensions/filters/http/dynamo/config_test.cc index 54d5dbc6f143..e1ed56fbcb8a 100644 --- a/test/extensions/filters/http/dynamo/config_test.cc +++ b/test/extensions/filters/http/dynamo/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/dynamo/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index f1524fddc23b..d6e0ff429e5a 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -47,7 +47,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.ext_authz", deps = [ "//source/extensions/filters/http/ext_authz:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 2081bc00bd11..7a3f011032fe 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/ext_authz/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/fault/BUILD b/test/extensions/filters/http/fault/BUILD index 7938aad25d2c..f545e439b5c8 100644 --- a/test/extensions/filters/http/fault/BUILD +++ b/test/extensions/filters/http/fault/BUILD @@ -44,7 +44,7 @@ envoy_extension_cc_test( deps = [ ":utility_lib", "//source/extensions/filters/http/fault:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/fault/config_test.cc b/test/extensions/filters/http/fault/config_test.cc index cc70285a578d..080e5a3e41de 100644 --- a/test/extensions/filters/http/fault/config_test.cc +++ b/test/extensions/filters/http/fault/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/fault/config.h" #include "test/extensions/filters/http/fault/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_http1_bridge/BUILD b/test/extensions/filters/http/grpc_http1_bridge/BUILD index 946a6c189c3f..d8ee636ac045 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_bridge/BUILD @@ -33,6 +33,6 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_http1_bridge", deps = [ "//source/extensions/filters/http/grpc_http1_bridge:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) diff --git a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc index c7894d2390ae..8c890afcaf7e 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/config_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/config_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/http/grpc_http1_bridge/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index b50e0b1295dd..71291b0623e7 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -47,7 +47,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_http1_reverse_bridge", deps = [ "//source/extensions/filters/http/grpc_http1_reverse_bridge:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc index ea47b673187f..33acd2fef256 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/config_test.cc @@ -3,7 +3,8 @@ #include "extensions/filters/http/grpc_http1_reverse_bridge/config.h" #include "extensions/filters/http/grpc_http1_reverse_bridge/filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_json_transcoder/BUILD b/test/extensions/filters/http/grpc_json_transcoder/BUILD index 6f2a2deac803..51068ae50e99 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/test/extensions/filters/http/grpc_json_transcoder/BUILD @@ -78,7 +78,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_json_transcoder", deps = [ "//source/extensions/filters/http/grpc_json_transcoder:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc index 601b627aecd6..164649e3228a 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/config_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/grpc_json_transcoder/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/grpc_stats/BUILD b/test/extensions/filters/http/grpc_stats/BUILD index 03f148c20036..df0dd7b0f877 100644 --- a/test/extensions/filters/http/grpc_stats/BUILD +++ b/test/extensions/filters/http/grpc_stats/BUILD @@ -20,7 +20,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/grpc_stats:config", "//test/common/buffer:utility_lib", "//test/common/stream_info:test_util", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:logging_lib", "@envoy_api//envoy/extensions/filters/http/grpc_stats/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index 3b303fe0a8ea..6c4d71292800 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -7,7 +7,7 @@ #include "test/common/buffer/utility.h" #include "test/common/stream_info/test_util.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/logging.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/grpc_web/BUILD b/test/extensions/filters/http/grpc_web/BUILD index 5cf7788382b4..cf5187b68455 100644 --- a/test/extensions/filters/http/grpc_web/BUILD +++ b/test/extensions/filters/http/grpc_web/BUILD @@ -31,7 +31,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.grpc_web", deps = [ "//source/extensions/filters/http/grpc_web:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", ], ) diff --git a/test/extensions/filters/http/grpc_web/config_test.cc b/test/extensions/filters/http/grpc_web/config_test.cc index 76d1e435d4ab..0ad9da56c86f 100644 --- a/test/extensions/filters/http/grpc_web/config_test.cc +++ b/test/extensions/filters/http/grpc_web/config_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/http/grpc_web/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/gzip/BUILD b/test/extensions/filters/http/gzip/BUILD index 2126067c5bee..28f6d0d14792 100644 --- a/test/extensions/filters/http/gzip/BUILD +++ b/test/extensions/filters/http/gzip/BUILD @@ -23,7 +23,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/gzip:gzip_filter_lib", "//test/mocks/http:http_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index faac3763cb7b..7f92d1c06e46 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -12,7 +12,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/header_to_metadata/BUILD b/test/extensions/filters/http/header_to_metadata/BUILD index 80fd87fb9605..9b976fe7f772 100644 --- a/test/extensions/filters/http/header_to_metadata/BUILD +++ b/test/extensions/filters/http/header_to_metadata/BUILD @@ -29,7 +29,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.header_to_metadata", deps = [ "//source/extensions/filters/http/header_to_metadata:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/header_to_metadata/config_test.cc b/test/extensions/filters/http/header_to_metadata/config_test.cc index a9e108371dfb..f10d78f63604 100644 --- a/test/extensions/filters/http/header_to_metadata/config_test.cc +++ b/test/extensions/filters/http/header_to_metadata/config_test.cc @@ -6,7 +6,8 @@ #include "extensions/filters/http/header_to_metadata/config.h" #include "extensions/filters/http/header_to_metadata/header_to_metadata_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/health_check/BUILD b/test/extensions/filters/http/health_check/BUILD index 04852922e7fb..30beabc500f0 100644 --- a/test/extensions/filters/http/health_check/BUILD +++ b/test/extensions/filters/http/health_check/BUILD @@ -19,7 +19,7 @@ envoy_extension_cc_test( "//source/common/buffer:buffer_lib", "//source/common/http:header_utility_lib", "//source/extensions/filters/http/health_check:health_check_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -31,7 +31,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.health_check", deps = [ "//source/extensions/filters/http/health_check:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/health_check/config_test.cc b/test/extensions/filters/http/health_check/config_test.cc index 1a66473ba05a..dcf4e37fb0c8 100644 --- a/test/extensions/filters/http/health_check/config_test.cc +++ b/test/extensions/filters/http/health_check/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/http/health_check/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/health_check/health_check_test.cc b/test/extensions/filters/http/health_check/health_check_test.cc index 16393b403359..c8f4da639e5e 100644 --- a/test/extensions/filters/http/health_check/health_check_test.cc +++ b/test/extensions/filters/http/health_check/health_check_test.cc @@ -9,7 +9,7 @@ #include "extensions/filters/http/health_check/health_check.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index 14b745e50b81..dd23ce92ae54 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -61,7 +61,8 @@ envoy_extension_cc_test( "//source/common/stream_info:filter_state_lib", "//source/extensions/filters/http/jwt_authn:config", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) @@ -73,7 +74,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/jwt_authn:config", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) @@ -105,7 +106,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/jwt_authn:matchers_lib", "//test/extensions/filters/http/common:mock_lib", "//test/extensions/filters/http/jwt_authn:test_common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", @@ -169,7 +170,7 @@ envoy_extension_cc_test( ":test_common_lib", "//source/extensions/filters/http/jwt_authn:filter_config_interface", "//source/extensions/filters/http/jwt_authn:matchers_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc index 148550c1a2eb..9f9d1685e239 100644 --- a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc @@ -5,7 +5,7 @@ #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "absl/strings/string_view.h" diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index 349fe3045ceb..40850b1bd31f 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -11,7 +11,7 @@ #include "test/extensions/filters/http/common/mock.h" #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/jwt_authn/filter_config_test.cc b/test/extensions/filters/http/jwt_authn/filter_config_test.cc index 662b8a97a87e..45da3fea600f 100644 --- a/test/extensions/filters/http/jwt_authn/filter_config_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_config_test.cc @@ -6,7 +6,8 @@ #include "extensions/filters/http/jwt_authn/filter_config.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/jwt_authn/filter_factory_test.cc b/test/extensions/filters/http/jwt_authn/filter_factory_test.cc index 3095be69e32c..ea89ea616225 100644 --- a/test/extensions/filters/http/jwt_authn/filter_factory_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_factory_test.cc @@ -4,7 +4,7 @@ #include "extensions/filters/http/jwt_authn/filter_factory.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc index ef6f24de8785..effc6ae0f7b2 100644 --- a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc @@ -5,7 +5,7 @@ #include "test/extensions/filters/http/jwt_authn/mock.h" #include "test/extensions/filters/http/jwt_authn/test_common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index cbcfc8223c84..dcb41ab264e7 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -67,7 +67,7 @@ envoy_extension_cc_test( tags = ["skip_on_windows"], deps = [ "//source/extensions/filters/http/lua:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/lua/config_test.cc b/test/extensions/filters/http/lua/config_test.cc index 5addb9ca2693..c01772b2a222 100644 --- a/test/extensions/filters/http/lua/config_test.cc +++ b/test/extensions/filters/http/lua/config_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/lua/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/original_src/BUILD b/test/extensions/filters/http/original_src/BUILD index 411bbe6d25a7..86ba61d9d6bf 100644 --- a/test/extensions/filters/http/original_src/BUILD +++ b/test/extensions/filters/http/original_src/BUILD @@ -29,7 +29,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/original_src:config", "//source/extensions/filters/http/original_src:config_lib", "//source/extensions/filters/http/original_src:original_src_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "@envoy_api//envoy/extensions/filters/http/original_src/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/original_src/original_src_config_factory_test.cc b/test/extensions/filters/http/original_src/original_src_config_factory_test.cc index 7f52f2e566e5..4967371847dd 100644 --- a/test/extensions/filters/http/original_src/original_src_config_factory_test.cc +++ b/test/extensions/filters/http/original_src/original_src_config_factory_test.cc @@ -5,7 +5,7 @@ #include "extensions/filters/http/original_src/original_src.h" #include "extensions/filters/http/original_src/original_src_config_factory.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index bfa69da7e296..3f3a1e47b70f 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -40,7 +40,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.ratelimit", deps = [ "//source/extensions/filters/http/ratelimit:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/ratelimit/config_test.cc b/test/extensions/filters/http/ratelimit/config_test.cc index fa0a8cf20769..a611082d4383 100644 --- a/test/extensions/filters/http/ratelimit/config_test.cc +++ b/test/extensions/filters/http/ratelimit/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/filters/http/ratelimit/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/rbac/BUILD b/test/extensions/filters/http/rbac/BUILD index 3ce6b5fd175f..f08fffcd9683 100644 --- a/test/extensions/filters/http/rbac/BUILD +++ b/test/extensions/filters/http/rbac/BUILD @@ -18,7 +18,8 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.rbac", deps = [ "//source/extensions/filters/http/rbac:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/rbac/config_test.cc b/test/extensions/filters/http/rbac/config_test.cc index 7c296bc5f718..318a6e2f1033 100644 --- a/test/extensions/filters/http/rbac/config_test.cc +++ b/test/extensions/filters/http/rbac/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/filters/common/rbac/engine.h" #include "extensions/filters/http/rbac/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/router/BUILD b/test/extensions/filters/http/router/BUILD index 46aaecbb7ae2..1410160daf46 100644 --- a/test/extensions/filters/http/router/BUILD +++ b/test/extensions/filters/http/router/BUILD @@ -17,7 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.router", deps = [ "//source/extensions/filters/http/router:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/router/config_test.cc b/test/extensions/filters/http/router/config_test.cc index 68ebadaa6ef9..34aa098a76ec 100644 --- a/test/extensions/filters/http/router/config_test.cc +++ b/test/extensions/filters/http/router/config_test.cc @@ -6,7 +6,7 @@ #include "extensions/filters/http/router/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/squash/BUILD b/test/extensions/filters/http/squash/BUILD index f98a5695d5ad..9ddeb07480f0 100644 --- a/test/extensions/filters/http/squash/BUILD +++ b/test/extensions/filters/http/squash/BUILD @@ -21,7 +21,7 @@ envoy_extension_cc_test( "//source/common/stats:stats_lib", "//source/extensions/filters/http/squash:squash_filter_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", @@ -46,7 +46,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.squash", deps = [ "//source/extensions/filters/http/squash:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/squash/config_test.cc b/test/extensions/filters/http/squash/config_test.cc index 62e7a18826e5..f7f9dda19b04 100644 --- a/test/extensions/filters/http/squash/config_test.cc +++ b/test/extensions/filters/http/squash/config_test.cc @@ -3,7 +3,7 @@ #include "extensions/filters/http/squash/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" diff --git a/test/extensions/filters/http/squash/squash_filter_test.cc b/test/extensions/filters/http/squash/squash_filter_test.cc index d2cb53bf52b6..84d6ac9cb41c 100644 --- a/test/extensions/filters/http/squash/squash_filter_test.cc +++ b/test/extensions/filters/http/squash/squash_filter_test.cc @@ -9,7 +9,7 @@ #include "extensions/filters/http/squash/squash_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index 414ce2effd3e..a6b1a6967278 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -29,7 +29,7 @@ envoy_extension_cc_test( "//source/extensions/filters/http/tap:config", "//source/extensions/filters/http/tap:tap_config_interface", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:utility_lib", ], diff --git a/test/extensions/filters/http/tap/tap_filter_test.cc b/test/extensions/filters/http/tap/tap_filter_test.cc index 4da97e1af579..c4d229fd192d 100644 --- a/test/extensions/filters/http/tap/tap_filter_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_test.cc @@ -3,7 +3,7 @@ #include "test/extensions/filters/http/tap/common.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/test_common/utility.h" From 74369e16d21206b83ddb1635eebaac507f7df214 Mon Sep 17 00:00:00 2001 From: ankatare Date: Wed, 15 Jul 2020 01:44:41 +0530 Subject: [PATCH 628/909] v2 to v3 changes for function constructConfigFromV2Yaml (#12072) v2 to v3 changes for function constructConfigFromV2Yaml under file "test/common/tcp_proxy/tcp_proxy_test.cc" Additional Description: This PR is duplicate of PR #11923 Part of #10843 Signed-off-by: Abhay Narayan Katare --- test/common/tcp_proxy/tcp_proxy_test.cc | 33 +++++++++++++------------ 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index d8673ad8b1d2..4db82a3fa5f3 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -60,10 +60,11 @@ Config constructConfigFromYaml(const std::string& yaml, return Config(tcp_proxy, context); } -Config constructConfigFromV2Yaml(const std::string& yaml, - Server::Configuration::FactoryContext& context) { +Config constructConfigFromV3Yaml(const std::string& yaml, + Server::Configuration::FactoryContext& context, + bool avoid_boosting = true) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy); + TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); return Config(tcp_proxy, context); } @@ -76,7 +77,7 @@ cluster: foo )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(std::chrono::hours(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -88,7 +89,7 @@ idle_timeout: 0s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_FALSE(config_obj.sharedConfig()->idleTimeout().has_value()); } @@ -100,7 +101,7 @@ idle_timeout: 1s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(std::chrono::seconds(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -403,7 +404,7 @@ TEST(ConfigTest, WeightedClusterWithZeroWeightConfig) { )EOF"; NiceMock factory_context; - EXPECT_THROW(constructConfigFromV2Yaml(yaml, factory_context), EnvoyException); + EXPECT_THROW(constructConfigFromV3Yaml(yaml, factory_context), EnvoyException); } // Tests that it is possible to define a list of weighted clusters. @@ -419,7 +420,7 @@ TEST(ConfigTest, WeightedClustersConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); NiceMock connection; EXPECT_CALL(factory_context.random_, random()).WillOnce(Return(0)); @@ -453,7 +454,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); { ProtobufWkt::Value v1, v2; @@ -540,7 +541,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v00, v01, v04; v00.set_string_value("v00"); @@ -631,7 +632,7 @@ TEST(ConfigTest, WeightedClustersWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -670,7 +671,7 @@ TEST(ConfigTest, TopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -703,7 +704,7 @@ TEST(ConfigTest, ClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -742,7 +743,7 @@ TEST(ConfigTest, PerConnectionClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -781,7 +782,7 @@ TEST(ConfigTest, HashWithSourceIpConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_NE(nullptr, config_obj.hashPolicy()); } @@ -792,7 +793,7 @@ TEST(ConfigTest, HashWithSourceIpDefaultConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV2Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); EXPECT_EQ(nullptr, config_obj.hashPolicy()); } From 54f6f0e7acf4e34cab9c2ef85cc7aa4058ee821e Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:15:20 +0000 Subject: [PATCH 629/909] test: refactor header inclusion to speed up building (for test/extensions/[upstream|common|tracer]/...) (#12049) Commit Message: refactor header inclusion to speed up building Additional Description: Risk Level: low Testing: exsiting tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/extensions/common/tap/BUILD | 3 ++- test/extensions/common/tap/admin_test.cc | 3 ++- test/extensions/tracers/datadog/config_test.cc | 3 ++- test/extensions/tracers/dynamic_ot/config_test.cc | 3 ++- test/extensions/tracers/lightstep/config_test.cc | 3 ++- test/extensions/tracers/opencensus/config_test.cc | 3 ++- test/extensions/tracers/xray/BUILD | 7 +++++-- test/extensions/tracers/xray/config_test.cc | 4 +++- test/extensions/tracers/xray/tracer_test.cc | 2 +- test/extensions/tracers/xray/xray_tracer_impl_test.cc | 4 +++- test/extensions/tracers/zipkin/BUILD | 3 ++- test/extensions/tracers/zipkin/config_test.cc | 3 ++- test/extensions/upstreams/http/tcp/BUILD | 3 ++- .../extensions/upstreams/http/tcp/upstream_request_test.cc | 3 ++- 14 files changed, 32 insertions(+), 15 deletions(-) diff --git a/test/extensions/common/tap/BUILD b/test/extensions/common/tap/BUILD index 5483de65de26..9775f2873b05 100644 --- a/test/extensions/common/tap/BUILD +++ b/test/extensions/common/tap/BUILD @@ -25,7 +25,8 @@ envoy_cc_test( srcs = ["admin_test.cc"], deps = [ "//source/extensions/common/tap:admin", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:admin_stream_mocks", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/common/tap/admin_test.cc b/test/extensions/common/tap/admin_test.cc index 0ee2bc2af042..2e3d940eadc5 100644 --- a/test/extensions/common/tap/admin_test.cc +++ b/test/extensions/common/tap/admin_test.cc @@ -2,7 +2,8 @@ #include "extensions/common/tap/admin.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/admin_stream.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/datadog/config_test.cc b/test/extensions/tracers/datadog/config_test.cc index 87d259c0f4e7..52a44719367c 100644 --- a/test/extensions/tracers/datadog/config_test.cc +++ b/test/extensions/tracers/datadog/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/datadog/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/dynamic_ot/config_test.cc b/test/extensions/tracers/dynamic_ot/config_test.cc index 90c77529f568..a655b23e5f6c 100644 --- a/test/extensions/tracers/dynamic_ot/config_test.cc +++ b/test/extensions/tracers/dynamic_ot/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/dynamic_ot/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/test_common/environment.h" #include "fmt/printf.h" diff --git a/test/extensions/tracers/lightstep/config_test.cc b/test/extensions/tracers/lightstep/config_test.cc index ec09bf27c6c9..e56ff7b0c507 100644 --- a/test/extensions/tracers/lightstep/config_test.cc +++ b/test/extensions/tracers/lightstep/config_test.cc @@ -4,7 +4,8 @@ #include "extensions/tracers/lightstep/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/opencensus/config_test.cc b/test/extensions/tracers/opencensus/config_test.cc index 0f5baa929bdc..c27186c9b675 100644 --- a/test/extensions/tracers/opencensus/config_test.cc +++ b/test/extensions/tracers/opencensus/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/tracers/opencensus/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/tracers/xray/BUILD b/test/extensions/tracers/xray/BUILD index 6ebfdf8071d0..bf4e36dcd7be 100644 --- a/test/extensions/tracers/xray/BUILD +++ b/test/extensions/tracers/xray/BUILD @@ -27,7 +27,8 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_context_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", @@ -43,7 +44,9 @@ envoy_extension_cc_test( extension_name = "envoy.tracers.xray", deps = [ "//source/extensions/tracers/xray:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/test/extensions/tracers/xray/config_test.cc b/test/extensions/tracers/xray/config_test.cc index b71092f60eda..ff39c0dbeaf8 100644 --- a/test/extensions/tracers/xray/config_test.cc +++ b/test/extensions/tracers/xray/config_test.cc @@ -5,7 +5,9 @@ #include "extensions/tracers/xray/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/test_common/environment.h" #include "gmock/gmock.h" diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index fcd448ba4df0..c82e73056a5f 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -10,7 +10,7 @@ #include "extensions/tracers/xray/tracer.h" #include "extensions/tracers/xray/xray_configuration.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" diff --git a/test/extensions/tracers/xray/xray_tracer_impl_test.cc b/test/extensions/tracers/xray/xray_tracer_impl_test.cc index 81f9532cb4a8..0019ef66544e 100644 --- a/test/extensions/tracers/xray/xray_tracer_impl_test.cc +++ b/test/extensions/tracers/xray/xray_tracer_impl_test.cc @@ -5,7 +5,9 @@ #include "extensions/tracers/xray/xray_tracer_impl.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/utility.h" diff --git a/test/extensions/tracers/zipkin/BUILD b/test/extensions/tracers/zipkin/BUILD index 08da01bfa49b..efa94415b031 100644 --- a/test/extensions/tracers/zipkin/BUILD +++ b/test/extensions/tracers/zipkin/BUILD @@ -52,7 +52,8 @@ envoy_extension_cc_test( extension_name = "envoy.tracers.zipkin", deps = [ "//source/extensions/tracers/zipkin:config", - "//test/mocks/server:server_mocks", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index 352a923ffe23..0f62b8f7fd7f 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -5,7 +5,8 @@ #include "extensions/tracers/zipkin/config.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/tracer_factory.h" +#include "test/mocks/server/tracer_factory_context.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/upstreams/http/tcp/BUILD b/test/extensions/upstreams/http/tcp/BUILD index 39a5a8d33a89..057aeff867b2 100644 --- a/test/extensions/upstreams/http/tcp/BUILD +++ b/test/extensions/upstreams/http/tcp/BUILD @@ -22,7 +22,8 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index 3f5fc02692df..114648c00b00 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -10,7 +10,8 @@ #include "test/mocks/common.h" #include "test/mocks/http/mocks.h" #include "test/mocks/router/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tcp/mocks.h" #include "test/test_common/utility.h" From 7d2ed85169b994334bfede2dbe7f4a9f9343787a Mon Sep 17 00:00:00 2001 From: foreseeable Date: Tue, 14 Jul 2020 20:15:54 +0000 Subject: [PATCH 630/909] test: refactor header inclusion to speed up building (for test/server/...) (#12045) Commit Message: refactor header inclusion to speed up building (for test/server/...) Additional Description: Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/server/BUILD | 41 ++++++++++++++----- test/server/admin/BUILD | 4 +- test/server/admin/admin_filter_test.cc | 2 +- test/server/admin/admin_instance.h | 2 +- test/server/api_listener_test.cc | 5 ++- test/server/config_validation/BUILD | 5 ++- .../config_validation/config_fuzz_test.cc | 3 +- test/server/config_validation/server_test.cc | 3 +- test/server/configuration_impl_test.cc | 2 +- test/server/drain_manager_impl_test.cc | 2 +- test/server/filter_chain_benchmark_test.cc | 2 +- test/server/filter_chain_manager_impl_test.cc | 3 +- test/server/guarddog_impl_test.cc | 2 +- test/server/hot_restart_impl_test.cc | 2 +- test/server/hot_restarting_parent_test.cc | 3 +- test/server/lds_api_test.cc | 15 ++++--- test/server/listener_manager_impl_test.h | 7 +++- test/server/server_fuzz_test.cc | 5 ++- test/server/server_test.cc | 6 ++- test/server/worker_impl_test.cc | 4 +- 20 files changed, 81 insertions(+), 37 deletions(-) diff --git a/test/server/BUILD b/test/server/BUILD index da24f5a1109a..9868dc06062e 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -22,7 +22,10 @@ envoy_cc_test( ":utility_lib", "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -55,7 +58,7 @@ envoy_cc_test( "//test/common/upstream:utility_lib", "//test/mocks:common_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -89,7 +92,7 @@ envoy_cc_test( srcs = ["drain_manager_impl_test.cc"], deps = [ "//source/server:drain_manager_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) @@ -130,7 +133,7 @@ envoy_cc_test( "//source/common/stats:stats_lib", "//source/server:guarddog_lib", "//test/mocks:common_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:main_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", @@ -188,8 +191,10 @@ envoy_cc_test( "//source/common/protobuf:utility_lib", "//source/server:lds_api_lib", "//test/mocks/config:config_mocks", + "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:listener_manager_mocks", + "//test/mocks/upstream:upstream_mocks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -207,7 +212,12 @@ envoy_cc_test_library( "//source/server:listener_manager_lib", "//test/mocks/init:init_mocks", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:listener_component_factory_mocks", + "//test/mocks/server:worker_factory_mocks", + "//test/mocks/server:worker_mocks", "//test/test_common:environment_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", @@ -291,7 +301,8 @@ envoy_cc_test( "//source/server:filter_chain_manager_lib", "//source/server:listener_manager_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:drain_manager_mocks", + "//test/mocks/server:factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -309,7 +320,9 @@ envoy_cc_fuzz_test( "//source/common/thread_local:thread_local_lib", "//source/server:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), @@ -358,7 +371,11 @@ envoy_cc_test( "//test/common/config:dummy_config_proto_cc_proto", "//test/common/stats:stat_test_utility_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:bootstrap_extension_factory_mocks", + "//test/mocks/server:hot_restart_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:options_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -398,7 +415,9 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/server:worker_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:guard_dog_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:utility_lib", ], @@ -416,7 +435,7 @@ envoy_cc_benchmark_binary( "//source/server:filter_chain_manager_lib", "//test/test_common:environment_lib", "//test/mocks/network:network_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", # tranport socket config registration "//source/extensions/transport_sockets/tls:config", ], diff --git a/test/server/admin/BUILD b/test/server/admin/BUILD index 75e3fc35f8e5..b2af4c15f6f0 100644 --- a/test/server/admin/BUILD +++ b/test/server/admin/BUILD @@ -16,7 +16,7 @@ envoy_cc_test_library( deps = [ "//source/server/admin:admin_lib", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", ], @@ -54,7 +54,7 @@ envoy_cc_test( srcs = ["admin_filter_test.cc"], deps = [ "//source/server/admin:admin_filter_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", ], ) diff --git a/test/server/admin/admin_filter_test.cc b/test/server/admin/admin_filter_test.cc index 07b5f9b6c98c..524bafedb475 100644 --- a/test/server/admin/admin_filter_test.cc +++ b/test/server/admin/admin_filter_test.cc @@ -1,6 +1,6 @@ #include "server/admin/admin_filter.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "gmock/gmock.h" diff --git a/test/server/admin/admin_instance.h b/test/server/admin/admin_instance.h index b6231ee856a9..7092d973c00e 100644 --- a/test/server/admin/admin_instance.h +++ b/test/server/admin/admin_instance.h @@ -4,7 +4,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" diff --git a/test/server/api_listener_test.cc b/test/server/api_listener_test.cc index f229823c59c3..aec3e64d2bf7 100644 --- a/test/server/api_listener_test.cc +++ b/test/server/api_listener_test.cc @@ -6,7 +6,10 @@ #include "server/listener_manager_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/server/utility.h" #include "test/test_common/utility.h" diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 4c2c1105d9f6..d3358e63dfbc 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -68,7 +68,7 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:config", "//source/server/config_validation:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:options_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", @@ -98,7 +98,8 @@ envoy_cc_fuzz_test( "//source/common/common:thread_lib", "//source/server/config_validation:server_lib", "//test/integration:integration_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:options_mocks", + "//test/test_common:environment_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), "//bazel:linux_ppc": envoy_all_extensions(PPC_SKIP_TARGETS), diff --git a/test/server/config_validation/config_fuzz_test.cc b/test/server/config_validation/config_fuzz_test.cc index 33ce725e67d2..107bb2eeb415 100644 --- a/test/server/config_validation/config_fuzz_test.cc +++ b/test/server/config_validation/config_fuzz_test.cc @@ -9,7 +9,8 @@ #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/options.h" +#include "test/test_common/environment.h" namespace Envoy { namespace Server { diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index 4bd903a65fb1..c1e6de23ec48 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -5,10 +5,11 @@ #include "server/config_validation/server.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/options.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" +#include "test/test_common/test_time.h" namespace Envoy { namespace Server { diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 6806d8d2a639..cd7f2ce5c7e4 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -18,7 +18,7 @@ #include "test/common/upstream/utility.h" #include "test/mocks/common.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" diff --git a/test/server/drain_manager_impl_test.cc b/test/server/drain_manager_impl_test.cc index 9afeba1b7955..160080b34b1b 100644 --- a/test/server/drain_manager_impl_test.cc +++ b/test/server/drain_manager_impl_test.cc @@ -4,7 +4,7 @@ #include "server/drain_manager_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 819932e968f9..bda0e1d81b60 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -12,7 +12,7 @@ #include "extensions/transport_sockets/well_known_names.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 4779dc32576e..4b78f2a70d88 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -25,7 +25,8 @@ #include "extensions/transport_sockets/tls/ssl_socket.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/drain_manager.h" +#include "test/mocks/server/factory_context.h" #include "test/server/utility.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 067ee8403aaf..2ab5e94332e7 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -11,7 +11,7 @@ #include "server/guarddog_impl.h" #include "test/mocks/common.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/main.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_time.h" diff --git a/test/server/hot_restart_impl_test.cc b/test/server/hot_restart_impl_test.cc index 8237c2ba9aaf..1d127c2572c4 100644 --- a/test/server/hot_restart_impl_test.cc +++ b/test/server/hot_restart_impl_test.cc @@ -8,7 +8,7 @@ #include "test/mocks/api/hot_restart.h" #include "test/mocks/api/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/hot_restart.h" #include "test/test_common/logging.h" #include "test/test_common/threadsafe_singleton_injector.h" diff --git a/test/server/hot_restarting_parent_test.cc b/test/server/hot_restarting_parent_test.cc index e178bb9b2ff3..80ce667bb50d 100644 --- a/test/server/hot_restarting_parent_test.cc +++ b/test/server/hot_restarting_parent_test.cc @@ -4,7 +4,8 @@ #include "server/hot_restarting_parent.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_manager.h" #include "gtest/gtest.h" diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index 3b15585cc2d4..f4c5aee7b72c 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -9,18 +9,21 @@ #include "server/lds_api.h" #include "test/mocks/config/mocks.h" +#include "test/mocks/init/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/listener_manager.h" +#include "test/mocks/upstream/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" -using testing::_; -using testing::InSequence; -using testing::Invoke; -using testing::Return; -using testing::Throw; +using ::testing::_; +using ::testing::InSequence; +using ::testing::Invoke; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::Throw; namespace Envoy { namespace Server { diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index d1f3256fd4a4..747859f669a1 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -12,7 +12,12 @@ #include "server/listener_manager_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/drain_manager.h" +#include "test/mocks/server/guard_dog.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/listener_component_factory.h" +#include "test/mocks/server/worker.h" +#include "test/mocks/server/worker_factory.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_runtime.h" diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index 1bf26f04b7f2..4859db0e97cf 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -13,7 +13,10 @@ #include "test/common/runtime/utility.h" #include "test/fuzz/fuzz_runner.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/hot_restart.h" +#include "test/mocks/server/options.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/environment.h" #include "test/test_common/test_time.h" namespace Envoy { diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 556f71160bed..6a9522e80bf5 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -18,7 +18,11 @@ #include "test/common/config/dummy_config.pb.h" #include "test/common/stats/stat_test_utility.h" #include "test/integration/server.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/bootstrap_extension_factory.h" +#include "test/mocks/server/hot_restart.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/options.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index 4a7c560c33cb..6fd4291f2d9a 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -6,7 +6,9 @@ #include "server/worker_impl.h" #include "test/mocks/network/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/guard_dog.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/utility.h" From 7abb0e0bbed4f6b6304403b93762614ad385f80d Mon Sep 17 00:00:00 2001 From: Christoph Pakulski Date: Tue, 14 Jul 2020 16:17:28 -0400 Subject: [PATCH 631/909] Validation admin returns true for addHandler and removeHandler. (#12033) config validation: ValidationAdmin should pretend that addHandler and removeHandler complete successfully Risk Level: Low. Testing: Manual testing. Docs Changes: No. Release Notes: No. Fixes #10154 Signed-off-by: Christoph Pakulski --- source/server/config_validation/admin.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/server/config_validation/admin.cc b/source/server/config_validation/admin.cc index 44368513e137..dec3bb773c70 100644 --- a/source/server/config_validation/admin.cc +++ b/source/server/config_validation/admin.cc @@ -3,11 +3,12 @@ namespace Envoy { namespace Server { +// Pretend that handler was added successfully. bool ValidationAdmin::addHandler(const std::string&, const std::string&, HandlerCb, bool, bool) { - return false; + return true; } -bool ValidationAdmin::removeHandler(const std::string&) { return false; } +bool ValidationAdmin::removeHandler(const std::string&) { return true; } const Network::Socket& ValidationAdmin::socket() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } From d8b573851f0927d7f63563a7df1f0bce2b7d9931 Mon Sep 17 00:00:00 2001 From: Michael Rebello Date: Tue, 14 Jul 2020 17:41:10 -0700 Subject: [PATCH 632/909] android: fix pthread compiler conditionals (#12081) android: fix pthread compiler conditionals Risk Level: Low Testing: Local builds / CI in Envoy Mobile Docs Changes: None Signed-off-by: Michael Rebello --- include/envoy/common/platform.h | 24 ++++++++++------------- source/common/common/posix/thread_impl.cc | 4 ++-- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index c3398829f8ab..d5606ca32ee4 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -248,19 +248,15 @@ struct mmsghdr { #endif // __ANDROID_API__ < 24 #endif // ifdef __ANDROID_API__ -#ifdef __linux__ -#define SUPPORTS_PTHREAD_GETNAME_NP 1 -#endif - // https://android.googlesource.com/platform/bionic/+/master/docs/status.md // ``pthread_getname_np`` is introduced in API 26 -#ifdef __ANDROID_API__ -#if __ANDROID_API__ > 26 -#define SUPPORTS_PTHREAD_GETNAME_NP 1 -#endif // __ANDROID_API__ > 26 -#endif // ifdef __ANDROID_API__ - -// Ensure `SUPPORTS_PTHREAD_GETNAME_NP` is set -#ifndef SUPPORTS_PTHREAD_GETNAME_NP -#define SUPPORTS_PTHREAD_GETNAME_NP 0 -#endif +#define SUPPORTS_PTHREAD_NAMING 0 +#if defined(__ANDROID_API__) +#if __ANDROID_API__ >= 26 +#undef SUPPORTS_PTHREAD_NAMING +#define SUPPORTS_PTHREAD_NAMING 1 +#endif // __ANDROID_API__ >= 26 +#elif defined(__linux__) +#undef SUPPORTS_PTHREAD_NAMING +#define SUPPORTS_PTHREAD_NAMING 1 +#endif // defined(__ANDROID_API__) diff --git a/source/common/common/posix/thread_impl.cc b/source/common/common/posix/thread_impl.cc index c85bbfce57ec..71cbf2b02eb6 100644 --- a/source/common/common/posix/thread_impl.cc +++ b/source/common/common/posix/thread_impl.cc @@ -52,7 +52,7 @@ class ThreadImplPosix : public Thread { this); RELEASE_ASSERT(rc == 0, ""); -#if SUPPORTS_PTHREAD_GETNAME_NP +#if SUPPORTS_PTHREAD_NAMING // If the name was not specified, get it from the OS. If the name was // specified, write it into the thread, and assert that the OS sees it the // same way. @@ -93,7 +93,7 @@ class ThreadImplPosix : public Thread { } private: -#if SUPPORTS_PTHREAD_GETNAME_NP +#if SUPPORTS_PTHREAD_NAMING // Attempts to get the name from the operating system, returning true and // updating 'name' if successful. Note that during normal operation this // may fail, if the thread exits prior to the system call. From 2e6aed49f9b4ee1cbb2daaebfc27d4f2514cfa80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Augustyniak?= Date: Tue, 14 Jul 2020 19:39:47 -0700 Subject: [PATCH 633/909] fault: fix decrementing of active_faults gauge (#12083) Signed-off-by: Rafal Augustyniak --- docs/root/version_history/current.rst | 2 ++ .../filters/http/fault/fault_filter.cc | 2 +- .../fault/fault_filter_integration_test.cc | 22 +++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 91b93b797ffa..44086e92fc00 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -20,6 +20,8 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. + Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index ad40112619e2..245f44b98a56 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -430,10 +430,10 @@ void FaultFilter::postDelayInjection(const Http::RequestHeaderMap& request_heade void FaultFilter::abortWithStatus(Http::Code http_status_code, absl::optional grpc_status) { + recordAbortsInjectedStats(); decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::FaultInjected); decoder_callbacks_->sendLocalReply(http_status_code, "fault filter abort", nullptr, grpc_status, RcDetails::get().FaultAbort); - recordAbortsInjectedStats(); } bool FaultFilter::matchesTargetUpstreamCluster() { diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index b6ad8536ece7..a8cd15e97381 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -83,6 +83,7 @@ name: fault EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Response rate limited with no trailers. @@ -92,6 +93,10 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); + + // Active faults gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); + upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, true); @@ -107,6 +112,7 @@ TEST_P(FaultIntegrationTestAllProtocols, ResponseRateLimitNoTrailers) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request delay and response rate limited via header configuration. @@ -125,6 +131,8 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { // At least 200ms of simulated time should have elapsed before we got the upstream request. EXPECT_LE(std::chrono::milliseconds(200), simTime().monotonicTime() - current_time); + // Active faults gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); // Verify response body throttling. upstream_request_->encodeHeaders(default_response_headers_, false); @@ -142,6 +150,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfig) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request abort controlled via header configuration. @@ -163,6 +172,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortConfig) { EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request faults controlled via header configuration. @@ -188,6 +198,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig0PercentageHeaders) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request faults controlled via header configuration. @@ -211,6 +222,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultsConfig100PercentageHeaders) EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Header configuration with no headers, so no fault injection. @@ -223,6 +235,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultConfigNoHeaders) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request abort with grpc status, controlled via header configuration. @@ -251,6 +264,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig) { EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request abort with grpc status, controlled via header configuration. @@ -273,6 +287,7 @@ TEST_P(FaultIntegrationTestAllProtocols, HeaderFaultAbortGrpcConfig0PercentageHe EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Request abort with grpc status, controlled via configuration. @@ -300,6 +315,7 @@ TEST_P(FaultIntegrationTestAllProtocols, FaultAbortGrpcConfig) { EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Fault integration tests that run with HTTP/2 only, used for fully testing trailers. @@ -316,6 +332,10 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { IntegrationStreamDecoderPtr decoder = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); + + // Active fault gauge is incremented. + EXPECT_EQ(1UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); + upstream_request_->encodeHeaders(default_response_headers_, false); Buffer::OwnedImpl data(std::string(127, 'a')); upstream_request_->encodeData(data, false); @@ -336,6 +356,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyFlushed) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } // Rate limiting with trailers received before the body has been flushed. @@ -363,6 +384,7 @@ TEST_P(FaultIntegrationTestHttp2, ResponseRateLimitTrailersBodyNotFlushed) { EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.aborts_injected")->value()); EXPECT_EQ(0UL, test_server_->counter("http.config_test.fault.delays_injected")->value()); EXPECT_EQ(1UL, test_server_->counter("http.config_test.fault.response_rl_injected")->value()); + EXPECT_EQ(0UL, test_server_->gauge("http.config_test.fault.active_faults")->value()); } } // namespace From d683687d7adc54ed6cb71306e93c280705cbf818 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 14 Jul 2020 22:05:43 -0700 Subject: [PATCH 634/909] build: update gperftools to 2.8 (#12087) Signed-off-by: Lizan Zhou --- bazel/repositories.bzl | 1 - bazel/repository_locations.bzl | 9 +++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index cb7b3bfea5d9..a83b8ad416bd 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -814,7 +814,6 @@ def _com_github_gperftools_gperftools(): http_archive( name = "com_github_gperftools_gperftools", build_file_content = BUILD_ALL_CONTENT, - patch_cmds = ["./autogen.sh"], **location ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d2dd2feb2a76..414bd0dca05c 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -168,12 +168,9 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["test"], ), com_github_gperftools_gperftools = dict( - # TODO(cmluciano): Bump to release 2.8 - # The currently used version is specifically chosen to fix ppc64le builds that require inclusion - # of asm/ptrace.h, and also s390x builds that require special handling of mmap syscall. - sha256 = "97f0bc2b389c29305f5d1d8cc4d95e9212c33b55827ae65476fc761d78e3ec5d", - strip_prefix = "gperftools-gperftools-2.7.90", - urls = ["https://github.com/gperftools/gperftools/archive/gperftools-2.7.90.tar.gz"], + sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", + strip_prefix = "gperftools-2.8", + urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-2.8/gperftools-2.8.tar.gz"], use_category = ["test"], ), com_github_grpc_grpc = dict( From 4cd3510237030b7a1fb1a485e6fd71c63817aa17 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Wed, 15 Jul 2020 07:22:29 -0500 Subject: [PATCH 635/909] [ext_authz_fuzzer] cleaned up the try-catch logic (#12055) Fixed the style in ext_authz fuzzer protobuf file: Added a new line. Cleaned up the try-catch logic in the fuzz test. Signed-off-by: jianwen --- .../filters/network/ext_authz/ext_authz_fuzz.proto | 2 +- .../filters/network/ext_authz/ext_authz_fuzz_test.cc | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto index 326311003f95..a590752fcbd5 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz.proto @@ -38,4 +38,4 @@ message ExtAuthzTestCase { envoy.extensions.filters.network.ext_authz.v3.ExtAuthz config = 1 [(validate.rules).message = {required: true}]; repeated Action actions = 2; -} \ No newline at end of file +} diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc index b70ca28272b8..c2e816c748d5 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc @@ -57,14 +57,9 @@ Filters::Common::ExtAuthz::CheckStatus resultCaseToCheckStatus( DEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAuthzTestCase& input) { try { TestUtility::validate(input); - } catch (const ProtoValidationException& e) { - ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); - return; - } catch (const ProtobufMessage::DeprecatedProtoFieldException& e) { - ENVOY_LOG_MISC(debug, "DeprecatedProtoFieldException: {}", e.what()); - return; } catch (const EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException during validation: {}", e.what()); + return; } Stats::TestUtil::TestStore stats_store; From 9f7d44850310999ecbd0cfbe8fadb70db6f258f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gast=C3=B3n=20Kleiman?= Date: Wed, 15 Jul 2020 05:22:58 -0700 Subject: [PATCH 636/909] Introduce Least Request LB active request bias config (#11252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add support for making Least Requests LB behave like Round Robin in weighted hosts case Signed-off-by: Gastón Kleiman * Address feedback Signed-off-by: Gastón Kleiman * Perf/logging improvements Signed-off-by: Gastón Kleiman * Address feedback and cleanup BUILD file Signed-off-by: Gastón Kleiman * Make active requests exponent configurable via CDS/runtime Signed-off-by: Gastón Kleiman * Address feedback Signed-off-by: Gastón Kleiman * Validate log message Signed-off-by: Gastón Kleiman * Update cluster memory test golden values Signed-off-by: Gastón Kleiman * Fix method name Signed-off-by: Gastón Kleiman * Explicitly initialize active_request_bias_ Signed-off-by: Gastón Kleiman * Try to make clang-tidy happy Signed-off-by: Gastón Kleiman * Use unique_ptr instead of optional Signed-off-by: Gastón Kleiman * Update stats integration test Signed-off-by: Gastón Kleiman * Check whether memory footprint is reduced without LB changes Signed-off-by: Gastón Kleiman * Use plain double for active request bias Use a plain double instead of a runtime double to store the per-cluster active request bias. Note: The goal of this commit is to evaluate the memory overhead of this approach. A commit with te Least Requests LB changes might follow if we deem the memory overhead of this approach acceptable. Signed-off-by: Gastón Kleiman * Revert back to approved implementation using RuntimeDouble Signed-off-by: Gastón Kleiman * Add extra fields to CDS cluster proto to check memory usage Signed-off-by: Gastón Kleiman * Revert "Add extra fields to CDS cluster proto to check memory usage" This reverts commit a6a285dcee9e0fe618286d3dfbfab98c957dd9c7. Signed-off-by: Gastón Kleiman * Add changelog entry Signed-off-by: Gastón Kleiman --- api/envoy/config/cluster/v3/cluster.proto | 25 ++++++ .../config/cluster/v4alpha/cluster.proto | 25 ++++++ .../load_balancing/load_balancers.rst | 24 ++++-- docs/root/version_history/current.rst | 1 + .../envoy/config/cluster/v3/cluster.proto | 25 ++++++ .../config/cluster/v4alpha/cluster.proto | 25 ++++++ source/common/runtime/BUILD | 1 + source/common/runtime/runtime_protos.h | 2 + source/common/upstream/BUILD | 1 + source/common/upstream/load_balancer_impl.h | 74 +++++++++++++--- test/common/upstream/BUILD | 2 + .../upstream/load_balancer_impl_test.cc | 85 +++++++++++++++++++ test/integration/stats_integration_test.cc | 6 +- 13 files changed, 277 insertions(+), 19 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index fdaed973a16c..6123bd59c14e 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -334,6 +334,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v3.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index a6e58aef1d4c..6c1302d28941 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -337,6 +337,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v4alpha.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst index 5336dccb14c3..38b6c6fae88a 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/load_balancers.rst @@ -41,11 +41,25 @@ same or different weights. less than or equal to all of the other hosts. * *all weights not equal*: If two or more hosts in the cluster have different load balancing weights, the load balancer shifts into a mode where it uses a weighted round robin schedule in - which weights are dynamically adjusted based on the host's request load at the time of selection - (weight is divided by the current active request count. For example, a host with weight 2 and an - active request count of 4 will have a synthetic weight of 2 / 4 = 0.5). This algorithm provides - good balance at steady state but may not adapt to load imbalance as quickly. Additionally, unlike - P2C, a host will never truly drain, though it will receive fewer requests over time. + which weights are dynamically adjusted based on the host's request load at the time of selection. + + In this case the weights are calculated at the time a host is picked using the following formula: + + `weight = load_balancing_weight / (active_requests + 1)^active_request_bias`. + + :ref:`active_request_bias` + can be configured via runtime and defaults to 1.0. It must be greater than or equal to 0.0. + + The larger the active request bias is, the more aggressively active requests will lower the + effective weight. + + If `active_request_bias` is set to 0.0, the least request load balancer behaves like the round + robin load balancer and ignores the active request count at the time of picking. + + For example, if active_request_bias is 1.0, a host with weight 2 and an active request count of 4 + will have an effective weight of 2 / (4 + 1)^1 = 0.4. This algorithm provides good balance at + steady state but may not adapt to load imbalance as quickly. Additionally, unlike P2C, a host will + never truly drain, though it will receive fewer requests over time. .. _arch_overview_load_balancing_types_ring_hash: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 44086e92fc00..7ac7174e12b2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -32,6 +32,7 @@ Removed Config or Runtime New Features ------------ * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 32a6c4e3d7a4..cf6b9cb652b3 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -334,6 +334,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v3.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index a6e58aef1d4c..6c1302d28941 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -337,6 +337,31 @@ message Cluster { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // `active_request_bias` must be greater than or equal to 0.0. + // + // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v4alpha.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` diff --git a/source/common/runtime/BUILD b/source/common/runtime/BUILD index 3c3201b69a9e..61b63093da25 100644 --- a/source/common/runtime/BUILD +++ b/source/common/runtime/BUILD @@ -34,6 +34,7 @@ envoy_cc_library( ], deps = [ "//include/envoy/runtime:runtime_interface", + "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/type/v3:pkg_cc_proto", ], diff --git a/source/common/runtime/runtime_protos.h b/source/common/runtime/runtime_protos.h index 06b0e5816d5a..855b145121db 100644 --- a/source/common/runtime/runtime_protos.h +++ b/source/common/runtime/runtime_protos.h @@ -35,6 +35,8 @@ class Double { : runtime_key_(double_proto.runtime_key()), default_value_(double_proto.default_value()), runtime_(runtime) {} + const std::string& runtimeKey() const { return runtime_key_; } + double value() const { return runtime_.snapshot().getDouble(runtime_key_, default_value_); } private: diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 4182da69370f..14eb0236b542 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -182,6 +182,7 @@ envoy_cc_library( "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_protos_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index acf15027ab30..2ec2f605cbd7 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -1,6 +1,8 @@ #pragma once +#include #include +#include #include #include #include @@ -12,6 +14,7 @@ #include "envoy/upstream/upstream.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" #include "common/upstream/edf_scheduler.h" namespace Envoy { @@ -368,6 +371,8 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { void initialize(); + virtual void refresh(uint32_t priority); + // Seed to allow us to desynchronize load balancers across a fleet. If we don't // do this, multiple Envoys that receive an update at the same time (or even // multiple load balancers on the same host) will send requests to @@ -376,7 +381,6 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { const uint64_t seed_; private: - void refresh(uint32_t priority); virtual void refreshHostSource(const HostsSource& source) PURE; virtual double hostWeight(const Host& host) PURE; virtual HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, @@ -438,7 +442,8 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { * The benefit of the Maglev table is at the expense of resolution, memory usage is capped. * Additionally, the Maglev table can be shared amongst all threads. */ -class LeastRequestLoadBalancer : public EdfLoadBalancerBase { +class LeastRequestLoadBalancer : public EdfLoadBalancerBase, + Logger::Loggable { public: LeastRequestLoadBalancer( const PrioritySet& priority_set, const PrioritySet* local_priority_set, ClusterStats& stats, @@ -451,26 +456,71 @@ class LeastRequestLoadBalancer : public EdfLoadBalancerBase { choice_count_( least_request_config.has_value() ? PROTOBUF_GET_WRAPPED_OR_DEFAULT(least_request_config.value(), choice_count, 2) - : 2) { + : 2), + active_request_bias_runtime_( + least_request_config.has_value() && least_request_config->has_active_request_bias() + ? std::make_unique(least_request_config->active_request_bias(), + runtime) + : nullptr) { initialize(); } +protected: + void refresh(uint32_t priority) override { + active_request_bias_ = + active_request_bias_runtime_ != nullptr ? active_request_bias_runtime_->value() : 1.0; + + if (active_request_bias_ < 0.0) { + ENVOY_LOG(warn, "upstream: invalid active request bias supplied (runtime key {}), using 1.0", + active_request_bias_runtime_->runtimeKey()); + active_request_bias_ = 1.0; + } + + EdfLoadBalancerBase::refresh(priority); + } + private: void refreshHostSource(const HostsSource&) override {} double hostWeight(const Host& host) override { - // Here we scale host weight by the number of active requests at the time we do the pick. We - // always add 1 to avoid division by 0. It might be possible to do better by picking two hosts - // off of the schedule, and selecting the one with fewer active requests at the time of - // selection. - // TODO(mattklein123): @htuch brings up the point that how we are scaling weight here might not - // be the only/best way of doing this. Essentially, it makes weight and active requests equally - // important. Are they equally important in practice? There is no right answer here and we might - // want to iterate on this as we gain more experience. - return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + // This method is called to calculate the dynamic weight as following when all load balancing + // weights are not equal: + // + // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // + // `active_request_bias` can be configured via runtime and its value is cached in + // `active_request_bias_` to avoid having to do a runtime lookup each time a host weight is + // calculated. + // + // When `active_request_bias == 0.0` we behave like `RoundRobinLoadBalancer` and return the + // host weight without considering the number of active requests at the time we do the pick. + // + // When `active_request_bias > 0.0` we scale the host weight by the number of active + // requests at the time we do the pick. We always add 1 to avoid division by 0. + // + // It might be possible to do better by picking two hosts off of the schedule, and selecting the + // one with fewer active requests at the time of selection. + if (active_request_bias_ == 0.0) { + return host.weight(); + } + + if (active_request_bias_ == 1.0) { + return static_cast(host.weight()) / (host.stats().rq_active_.value() + 1); + } + + return static_cast(host.weight()) / + std::pow(host.stats().rq_active_.value() + 1, active_request_bias_); } HostConstSharedPtr unweightedHostPick(const HostVector& hosts_to_use, const HostsSource& source) override; + const uint32_t choice_count_; + + // The exponent used to calculate host weights can be configured via runtime. We cache it for + // performance reasons and refresh it in `LeastRequestLoadBalancer::refresh(uint32_t priority)` + // whenever a `HostSet` is updated. + double active_request_bias_{}; + + const std::unique_ptr active_request_bias_runtime_; }; /** diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 7c417471775d..32f280f2d6fb 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -194,6 +194,8 @@ envoy_cc_test( "//source/common/upstream:upstream_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 06e398018e95..ed89540e2968 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -13,6 +13,8 @@ #include "test/common/upstream/utility.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "test/test_common/logging.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -1532,6 +1534,89 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_.chooseHost(nullptr)); } +// Validate that the load balancer defaults to an active request bias value of 1.0 if the runtime +// value is invalid (less than 0.0). +TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias) { + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config}; + + EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(-1.0)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), + makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + + // Trigger callbacks. The added/removed lists are not relevant. + EXPECT_LOG_CONTAINS( + "warn", "upstream: invalid active request bias supplied (runtime key ar_bias), using 1.0", + hostSet().runCallbacks({}, {})); + + EXPECT_CALL(random_, random()).WillRepeatedly(Return(0)); + + // We should see 2:1 ratio for hosts[1] to hosts[0]. + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // Bringing hosts[1] to an active request should yield a 1:1 ratio. + hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + + // Settings hosts[0] to an active request and hosts[1] to no active requests should yield a 4:1 + // ratio. + hostSet().healthy_hosts_[0]->stats().rq_active_.set(1); + hostSet().healthy_hosts_[1]->stats().rq_active_.set(0); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); +} + +TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) { + // Create a load balancer with a custom active request bias. + envoy::config::cluster::v3::Cluster::LeastRequestLbConfig lr_lb_config; + lr_lb_config.mutable_active_request_bias()->set_runtime_key("ar_bias"); + lr_lb_config.mutable_active_request_bias()->set_default_value(1.0); + LeastRequestLoadBalancer lb_2{priority_set_, nullptr, stats_, runtime_, + random_, common_config_, lr_lb_config}; + + EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(0.0)); + + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), + makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + + hostSet().hosts_ = hostSet().healthy_hosts_; + hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. + + EXPECT_CALL(random_, random()).WillRepeatedly(Return(0)); + + // We should see 2:1 ratio for hosts[1] to hosts[0], regardless of the active request count. + hostSet().healthy_hosts_[1]->stats().rq_active_.set(1); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[0], lb_2.chooseHost(nullptr)); + EXPECT_EQ(hostSet().healthy_hosts_[1], lb_2.chooseHost(nullptr)); +} + TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index d1e19cb32ab1..904a7f94504d 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -282,6 +282,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/06/10 11561 44491 44811 Make upstreams pluggable // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle // in callback manager. + // 2020/07/07 11252 44971 46000 Introduce Least Request LB active request bias config // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -299,7 +300,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 44715); + EXPECT_MEMORY_EQ(m_per_cluster, 44971); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -353,6 +354,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/06/10 11561 36603 36923 Make upstreams pluggable // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. // in callback manager. + // 2020/07/07 11252 37083 38000 Introduce Least Request LB active request bias config // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -370,7 +372,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 36827); + EXPECT_MEMORY_EQ(m_per_cluster, 37083); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } From dee8b8d03cbf6975c70449b6a23cf00ef9af5e1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=A5=81=E6=97=A0=E5=BF=A7?= Date: Thu, 16 Jul 2020 00:37:55 +0800 Subject: [PATCH 637/909] =?UTF-8?q?Lua:=20Change=20the=20TLS=20callback=20?= =?UTF-8?q?function=20type=20of=20ThreadLocalState=20to=20Upd=E2=80=A6=20(?= =?UTF-8?q?#11944)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change the type of ThreadLocalState's TLS callback to UpdateCb. Through this method, we can avoid capturing this (ThreadLocalState instance) in the callback function, and avoid memory security problems caused by the inconsistency between the lifetime of the ThreadLocalSate instance and the lifetime of the callback function. Signed-off-by: wbpcode --- source/extensions/filters/common/lua/lua.cc | 5 +- source/extensions/filters/common/lua/lua.h | 7 ++- test/extensions/filters/common/lua/BUILD | 1 + .../extensions/filters/common/lua/lua_test.cc | 51 +++++++++++++++++++ 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/source/extensions/filters/common/lua/lua.cc b/source/extensions/filters/common/lua/lua.cc index 02a45f817ec5..c907fef9fd6a 100644 --- a/source/extensions/filters/common/lua/lua.cc +++ b/source/extensions/filters/common/lua/lua.cc @@ -71,8 +71,8 @@ int ThreadLocalState::getGlobalRef(uint64_t slot) { } uint64_t ThreadLocalState::registerGlobal(const std::string& global) { - tls_slot_->runOnAllThreads([this, global]() { - LuaThreadLocal& tls = tls_slot_->getTyped(); + tls_slot_->runOnAllThreads([global](ThreadLocal::ThreadLocalObjectSharedPtr previous) { + LuaThreadLocal& tls = *std::dynamic_pointer_cast(previous); lua_getglobal(tls.state_.get(), global.c_str()); if (lua_isfunction(tls.state_.get(), -1)) { tls.global_slots_.push_back(luaL_ref(tls.state_.get(), LUA_REGISTRYINDEX)); @@ -81,6 +81,7 @@ uint64_t ThreadLocalState::registerGlobal(const std::string& global) { lua_pop(tls.state_.get(), 1); tls.global_slots_.push_back(LUA_REFNIL); } + return previous; }); return current_global_slot_++; diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index b9bb7caa157e..7071b375303f 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -386,8 +386,11 @@ class ThreadLocalState : Logger::Loggable { * all threaded workers. */ template void registerType() { - tls_slot_->runOnAllThreads( - [this]() { T::registerType(tls_slot_->getTyped().state_.get()); }); + tls_slot_->runOnAllThreads([](ThreadLocal::ThreadLocalObjectSharedPtr previous) { + LuaThreadLocal& tls = *std::dynamic_pointer_cast(previous); + T::registerType(tls.state_.get()); + return previous; + }); } /** diff --git a/test/extensions/filters/common/lua/BUILD b/test/extensions/filters/common/lua/BUILD index b6d7bfecd6d5..88d42f01aab0 100644 --- a/test/extensions/filters/common/lua/BUILD +++ b/test/extensions/filters/common/lua/BUILD @@ -14,6 +14,7 @@ envoy_cc_test( srcs = ["lua_test.cc"], tags = ["skip_on_windows"], deps = [ + "//source/common/thread_local:thread_local_lib", "//source/extensions/filters/common/lua:lua_lib", "//test/mocks:common_lib", "//test/mocks/thread_local:thread_local_mocks", diff --git a/test/extensions/filters/common/lua/lua_test.cc b/test/extensions/filters/common/lua/lua_test.cc index b5770a0b20d7..5f4462e7d3c4 100644 --- a/test/extensions/filters/common/lua/lua_test.cc +++ b/test/extensions/filters/common/lua/lua_test.cc @@ -1,5 +1,7 @@ #include +#include "common/thread_local/thread_local_impl.h" + #include "extensions/filters/common/lua/lua.h" #include "test/mocks/common.h" @@ -157,6 +159,55 @@ TEST_F(LuaTest, MarkDead) { lua_gc(cr1->luaState(), LUA_GCCOLLECT, 0); } +class ThreadSafeTest : public testing::Test { +public: + ThreadSafeTest() + : api_(Api::createApiForTest()), main_dispatcher_(api_->allocateDispatcher("main")), + worker_dispatcher_(api_->allocateDispatcher("worker")) {} + + // Use real dispatchers to verify that callback functions can be executed correctly. + Api::ApiPtr api_; + Event::DispatcherPtr main_dispatcher_; + Event::DispatcherPtr worker_dispatcher_; + ThreadLocal::InstanceImpl tls_; + + std::unique_ptr state_; +}; + +// Test whether ThreadLocalState can be safely released. +TEST_F(ThreadSafeTest, StateDestructedBeforeWorkerRun) { + const std::string SCRIPT{R"EOF( + function HelloWorld() + print("Hello World!") + end + )EOF"}; + + tls_.registerThread(*main_dispatcher_, true); + EXPECT_EQ(main_dispatcher_.get(), &tls_.dispatcher()); + tls_.registerThread(*worker_dispatcher_, false); + + // Some callback functions waiting to be executed will be added to the dispatcher of the Worker + // thread. The callback functions in the main thread will be executed directly. + state_ = std::make_unique(SCRIPT, tls_); + state_->registerType(); + + main_dispatcher_->run(Event::Dispatcher::RunType::Block); + + // Destroy state_. + state_.reset(nullptr); + + // Start a new worker thread to execute the callback functions in the worker dispatcher. + Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([this]() { + worker_dispatcher_->run(Event::Dispatcher::RunType::Block); + // Verify we have the expected dispatcher for the new worker thread. + EXPECT_EQ(worker_dispatcher_.get(), &tls_.dispatcher()); + }); + thread->join(); + + tls_.shutdownGlobalThreading(); + tls_.shutdownThread(); +} + } // namespace } // namespace Lua } // namespace Common From 7feaadb004dc3d577fb1c0f4f383094a0732ee36 Mon Sep 17 00:00:00 2001 From: James Adam Buckland Date: Wed, 15 Jul 2020 12:52:11 -0400 Subject: [PATCH 638/909] Revert "pgv: update (#12025)" (#12100) This reverts commit ca7dc5344765f3e7fc3c1a47927e8c1e3e6cefd3. This PGV upgrade is breaking my local build. Risk Level: Low Testing: N/a Signed-off-by: James Buckland --- api/bazel/repository_locations.bzl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 38bdd08091bb..0a0379f7685e 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -4,8 +4,8 @@ BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf0 OPENCENSUS_PROTO_GIT_SHA = "be218fb6bd674af7519b1850cdf8410d8cbd48e8" # Dec 20, 2019 OPENCENSUS_PROTO_SHA256 = "e3bbdc94375e86c0edfb2fc5851507e08a3f26ee725ffff7c5c0e73264bdfcde" -PGV_GIT_SHA = "ef00e9c655af0fbc7fa159ca44647d01794b3251" # July 9, 2020 -PGV_SHA256 = "55fcf809ac85d851fbc488b2e25632e74a150567371225f9b0b2c2eaa4f15a0a" +PGV_GIT_SHA = "278964a8052f96a2f514add0298098f63fb7f47f" # June 9, 2020 +PGV_SHA256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8" GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019 GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405" From 23b6068a7057cb5c3b1bf990c4d4be7efda128c7 Mon Sep 17 00:00:00 2001 From: ankatare Date: Wed, 15 Jul 2020 22:28:58 +0530 Subject: [PATCH 639/909] V2 v3 fragment change for router directory (#11973) V2 to V3 changes related to test/commom/router/ directory. Risk Level: Low Testing: unit, integration and format testing Docs Changes: NA Relates to #10843 Signed-off-by: Abhay Narayan Katare --- test/common/router/header_formatter_test.cc | 21 +++++++++++---------- test/common/router/rds_impl_test.cc | 18 +++++++++--------- test/common/router/router_ratelimit_test.cc | 15 ++++++++------- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 7f5f47b9dba6..f0e7ecd10f89 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -34,9 +34,10 @@ namespace Envoy { namespace Router { namespace { -static envoy::config::route::v3::Route parseRouteFromV2Yaml(const std::string& yaml) { +static envoy::config::route::v3::Route parseRouteFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::route::v3::Route route; - TestUtility::loadFromYaml(yaml, route); + TestUtility::loadFromYaml(yaml, route, false, avoid_boosting); return route; } @@ -931,7 +932,7 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); @@ -953,7 +954,7 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; std::shared_ptr> host( new NiceMock()); @@ -979,7 +980,7 @@ match: { prefix: "/new_endpoint" } )EOF"; HeaderParserPtr req_header_parser = - HeaderParser::configure(parseRouteFromV2Yaml(ymal).request_headers_to_add()); + HeaderParser::configure(parseRouteFromV3Yaml(ymal).request_headers_to_add()); Http::TestRequestHeaderMapImpl header_map{{":method", "POST"}}; NiceMock stream_info; req_header_parser->evaluateHeaders(header_map, stream_info); @@ -1023,7 +1024,7 @@ match: { prefix: "/new_endpoint" } request_headers_to_remove: ["x-nope"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); Http::TestRequestHeaderMapImpl header_map{ @@ -1118,7 +1119,7 @@ match: { prefix: "/new_endpoint" } )EOF"; // Disable append mode. - envoy::config::route::v3::Route route = parseRouteFromV2Yaml(ymal); + envoy::config::route::v3::Route route = parseRouteFromV3Yaml(ymal); route.mutable_request_headers_to_add(0)->mutable_append()->set_value(false); route.mutable_request_headers_to_add(1)->mutable_append()->set_value(false); route.mutable_request_headers_to_add(2)->mutable_append()->set_value(false); @@ -1211,7 +1212,7 @@ match: { prefix: "/new_endpoint" } response_headers_to_remove: ["x-nope"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); Http::TestRequestHeaderMapImpl header_map{ @@ -1262,7 +1263,7 @@ match: { prefix: "/new_endpoint" } request_headers_to_remove: ["x-foo-header"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr req_header_parser = HeaderParser::configure(route.request_headers_to_add(), route.request_headers_to_remove()); Http::TestRequestHeaderMapImpl header_map{{"x-foo-header", "foo"}}; @@ -1284,7 +1285,7 @@ match: { prefix: "/new_endpoint" } response_headers_to_remove: ["x-foo-header"] )EOF"; - const auto route = parseRouteFromV2Yaml(yaml); + const auto route = parseRouteFromV3Yaml(yaml); HeaderParserPtr resp_header_parser = HeaderParser::configure(route.response_headers_to_add(), route.response_headers_to_remove()); Http::TestResponseHeaderMapImpl header_map{{"x-foo-header", "foo"}}; diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 73342acededd..2116ae2a90b9 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -358,9 +358,9 @@ class RouteConfigProviderManagerImplTest : public RdsTestBase { }; envoy::config::route::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::route::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config, true); + TestUtility::loadFromYaml(yaml, route_config, true, avoid_boosting); return route_config; } @@ -394,7 +394,7 @@ name: foo // Only static route. RouteConfigProviderPtr static_config = route_config_provider_manager_->createStaticRouteConfigProvider( - parseRouteConfigurationFromV2Yaml(config_yaml), server_factory_context_, + parseRouteConfigurationFromV3Yaml(config_yaml), server_factory_context_, validation_visitor_); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); @@ -403,7 +403,7 @@ name: foo TestUtility::loadFromYaml(R"EOF( static_route_configs: - route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo virtual_hosts: - name: bar @@ -430,7 +430,7 @@ name: foo "version_info": "1", "resources": [ { - "@type": "type.googleapis.com/envoy.api.v2.RouteConfiguration", + "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", "name": "foo_route_config", "virtual_hosts": null } @@ -451,7 +451,7 @@ name: foo TestUtility::loadFromYaml(R"EOF( static_route_configs: - route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo virtual_hosts: - name: bar @@ -465,7 +465,7 @@ name: foo dynamic_route_configs: - version_info: "1" route_config: - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration name: foo_route_config virtual_hosts: last_updated: @@ -484,7 +484,7 @@ TEST_F(RouteConfigProviderManagerImplTest, Basic) { EXPECT_FALSE(provider_->configInfo().has_value()); - const auto route_config = parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV3Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar @@ -565,7 +565,7 @@ TEST_F(RouteConfigProviderManagerImplTest, SameProviderOnTwoInitManager) { EXPECT_EQ(Init::Manager::State::Initializing, real_init_manager.state()); { - const auto route_config = parseRouteConfigurationFromV2Yaml(R"EOF( + const auto route_config = parseRouteConfigurationFromV3Yaml(R"EOF( name: foo_route_config virtual_hosts: - name: bar diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index ee3d16403792..2d0ff4988ae2 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -28,15 +28,16 @@ namespace Envoy { namespace Router { namespace { -envoy::config::route::v3::RateLimit parseRateLimitFromV2Yaml(const std::string& yaml_string) { +envoy::config::route::v3::RateLimit parseRateLimitFromV3Yaml(const std::string& yaml_string, + bool avoid_boosting = true) { envoy::config::route::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml_string, rate_limit); + TestUtility::loadFromYaml(yaml_string, rate_limit, false, avoid_boosting); TestUtility::validate(rate_limit); return rate_limit; } TEST(BadRateLimitConfiguration, MissingActions) { - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml("{}"), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml("{}"), EnvoyException, "value must contain at least"); } @@ -46,7 +47,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { - request_headers: {} )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_one), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_one), EnvoyException, "value length must be at least"); const std::string yaml_two = R"EOF( @@ -55,7 +56,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { header_name: test )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_two), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_two), EnvoyException, "value length must be at least"); const std::string yaml_three = R"EOF( @@ -64,7 +65,7 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { descriptor_key: test )EOF"; - EXPECT_THROW_WITH_REGEX(parseRateLimitFromV2Yaml(yaml_three), EnvoyException, + EXPECT_THROW_WITH_REGEX(parseRateLimitFromV3Yaml(yaml_three), EnvoyException, "value length must be at least"); } @@ -268,7 +269,7 @@ TEST_F(RateLimitConfiguration, Stages) { class RateLimitPolicyEntryTest : public testing::Test { public: void setupTest(const std::string& yaml) { - rate_limit_entry_ = std::make_unique(parseRateLimitFromV2Yaml(yaml)); + rate_limit_entry_ = std::make_unique(parseRateLimitFromV3Yaml(yaml)); descriptors_.clear(); } From c469c11a81ca6229f99cf0061c321d2b3dfb4ef2 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Wed, 15 Jul 2020 18:55:01 +0100 Subject: [PATCH 640/909] fuzz: xDS fuzzer refactor (#12024) Commit Message: Refactor xDS fuzzer to break up replay function and avoid string construction repetition Additional Description: move the code for each action case to its own function to increase readability make sure the server is properly initialized per #11871 by sending the route the first listener added refers to straight away as the server is only initialized when it has an active listener this change broke the previous fix that fixed the timeout for xds_corpus/example4, so I've temporarily removed it while I work on a more complex fix. The timeout is caused when I `waitForCounterGe("listener_manager.listeners_modified", num_modified_), as it seems like changing an active listener to warming does not increment this counter, so I'll probably factor this change into the verifier to avoid too much duplication and complexity in the main fuzzer. the verifier is pretty much ready apart from the above change so I'll make a new PR once this is merged and I've figured out that fix Risk Level: Low Signed-off-by: Sam Flattery --- .../config_validation/xds_corpus/example4 | 60 ------ test/server/config_validation/xds_fuzz.cc | 194 ++++++++++-------- test/server/config_validation/xds_fuzz.h | 18 +- 3 files changed, 127 insertions(+), 145 deletions(-) delete mode 100644 test/server/config_validation/xds_corpus/example4 diff --git a/test/server/config_validation/xds_corpus/example4 b/test/server/config_validation/xds_corpus/example4 deleted file mode 100644 index 3e87d8b305f1..000000000000 --- a/test/server/config_validation/xds_corpus/example4 +++ /dev/null @@ -1,60 +0,0 @@ -actions { - add_route { - } -} -actions { - remove_route { - route_num: 13107200 - } -} -actions { - add_listener { - listener_num: 1073741824 - route_num: 10752 - } -} -actions { - add_listener { - listener_num: 1073741824 - route_num: 1 - } -} -actions { - remove_route { - route_num: 8 - } -} -actions { - add_listener { - listener_num: 1073741824 - route_num: 10752 - } -} -actions { - remove_listener { - listener_num: 4 - } -} -actions { - remove_listener { - listener_num: 8 - } -} -actions { - remove_route { - route_num: 8 - } -} -actions { - remove_route { - route_num: 13107200 - } -} -actions { - add_listener { - listener_num: 8 - } -} -config { - sotw_or_delta: DELTA -} diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 27ab4fa61053..6ff1c7cff283 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -20,17 +20,15 @@ XdsFuzzTest::buildClusterLoadAssignment(const std::string& name) { fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); } -envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(uint32_t listener_num, - uint32_t route_num) { - std::string name = absl::StrCat("listener_", listener_num % ListenersMax); - std::string route = absl::StrCat("route_config_", route_num % RoutesMax); - return ConfigHelper::buildListener( - name, route, Network::Test::getLoopbackAddressString(ip_version_), "ads_test", api_version_); +envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(std::string listener_name, + std::string route_name) { + return ConfigHelper::buildListener(listener_name, route_name, + Network::Test::getLoopbackAddressString(ip_version_), + "ads_test", api_version_); } -envoy::config::route::v3::RouteConfiguration XdsFuzzTest::buildRouteConfig(uint32_t route_num) { - std::string route = absl::StrCat("route_config_", route_num % RoutesMax); - return ConfigHelper::buildRouteConfig(route, "cluster_0", api_version_); +envoy::config::route::v3::RouteConfiguration XdsFuzzTest::buildRouteConfig(std::string route_name) { + return ConfigHelper::buildRouteConfig(route_name, "cluster_0", api_version_); } // helper functions to send API responses @@ -38,7 +36,7 @@ void XdsFuzzTest::updateListener( const std::vector& listeners, const std::vector& added_or_updated, const std::vector& removed) { - ENVOY_LOG_MISC(debug, "Sending Listener DiscoveryResponse version {}", version_); + ENVOY_LOG_MISC(info, "Sending Listener DiscoveryResponse version {}", version_); sendDiscoveryResponse(Config::TypeUrl::get().Listener, listeners, added_or_updated, removed, std::to_string(version_)); @@ -48,7 +46,7 @@ void XdsFuzzTest::updateRoute( const std::vector routes, const std::vector& added_or_updated, const std::vector& removed) { - ENVOY_LOG_MISC(debug, "Sending Route DiscoveryResponse version {}", version_); + ENVOY_LOG_MISC(info, "Sending Route DiscoveryResponse version {}", version_); sendDiscoveryResponse( Config::TypeUrl::get().RouteConfiguration, routes, added_or_updated, removed, std::to_string(version_)); @@ -64,7 +62,8 @@ XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& inp : "DELTA_GRPC", api_version)), actions_(input.actions()), version_(1), api_version_(api_version), - ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { + ip_version_(TestEnvironment::getIpVersionsForTest()[0]), num_added_(0), num_modified_(0), + num_removed_(0) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = false; @@ -111,16 +110,14 @@ void XdsFuzzTest::close() { * @param the listener number to be removed * @return the listener as an optional so that it can be used in a delta request */ -absl::optional XdsFuzzTest::removeListener(uint32_t listener_num) { - std::string match = absl::StrCat("listener_", listener_num % ListenersMax); - +bool XdsFuzzTest::eraseListener(std::string listener_name) { for (auto it = listeners_.begin(); it != listeners_.end(); ++it) { - if (it->name() == match) { + if (it->name() == listener_name) { listeners_.erase(it); - return match; + return true; } } - return {}; + return false; } /** @@ -128,15 +125,87 @@ absl::optional XdsFuzzTest::removeListener(uint32_t listener_num) { * @param the route number to be removed * @return the route as an optional so that it can be used in a delta request */ -absl::optional XdsFuzzTest::removeRoute(uint32_t route_num) { - std::string match = absl::StrCat("route_config_", route_num % RoutesMax); +bool XdsFuzzTest::eraseRoute(std::string route_name) { for (auto it = routes_.begin(); it != routes_.end(); ++it) { - if (it->name() == match) { + if (it->name() == route_name) { routes_.erase(it); - return match; + return true; } } - return {}; + return false; +} + +/** + * send an xDS response to add a listener and update state accordingly + */ +void XdsFuzzTest::addListener(std::string listener_name, std::string route_name) { + ENVOY_LOG_MISC(info, "Adding {} with reference to {}", listener_name, route_name); + bool removed = eraseListener(listener_name); + auto listener = buildListener(listener_name, route_name); + listeners_.push_back(listener); + + updateListener(listeners_, {listener}, {}); + // use waitForAck instead of compareDiscoveryRequest as the client makes + // additional discoveryRequests at launch that we might not want to + // respond to yet + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + if (removed) { + num_modified_++; + test_server_->waitForCounterGe("listener_manager.listener_modified", num_modified_); + } else { + num_added_++; + test_server_->waitForCounterGe("listener_manager.listener_added", num_added_); + } +} + +/** + * send an xDS response to remove a listener and update state accordingly + */ +void XdsFuzzTest::removeListener(std::string listener_name) { + ENVOY_LOG_MISC(info, "Removing {}", listener_name); + bool removed = eraseListener(listener_name); + + if (removed) { + num_removed_++; + updateListener(listeners_, {}, {listener_name}); + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); + test_server_->waitForCounterGe("listener_manager.listener_removed", num_removed_); + } +} + +/** + * send an xDS response to add a route and update state accordingly + */ +void XdsFuzzTest::addRoute(std::string route_name) { + ENVOY_LOG_MISC(info, "Adding {}", route_name); + bool removed = eraseRoute(route_name); + auto route = buildRouteConfig(route_name); + routes_.push_back(route); + + if (removed) { + // if the route was already in routes_, don't send a duplicate add in delta request + updateRoute(routes_, {}, {}); + } else { + updateRoute(routes_, {route}, {}); + } + + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); +} + +/** + * this is a no-op for now because it seems like routes cannot be removed - leaving a route out of + * a SOTW request does not remove it and sending a remove message in a delta request is ignored + */ +void XdsFuzzTest::removeRoute(std::string route_name) { + ENVOY_LOG_MISC(info, "Ignoring request to remove {}", route_name); + return; + + // TODO(samflattery): remove if it's true that routes cannot be removed + auto removed = eraseRoute(route_name); + if (removed) { + updateRoute(routes_, {}, {route_name}); + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); + } } /** @@ -189,83 +258,46 @@ void XdsFuzzTest::replay() { // URL so just don't check them until a listener is added bool sent_listener = false; - uint32_t added = 0; - uint32_t modified = 0; - uint32_t removed = 0; - for (const auto& action : actions_) { switch (action.action_selector_case()) { case test::server::config_validation::Action::kAddListener: { - sent_listener = true; - uint32_t listener_num = action.add_listener().listener_num(); - auto removed_name = removeListener(listener_num); - auto listener = buildListener(listener_num, action.add_listener().route_num()); - listeners_.push_back(listener); - - updateListener(listeners_, {listener}, {}); - // use waitForAck instead of compareDiscoveryRequest as the client makes - // additional discoveryRequests at launch that we might not want to - // respond to yet - EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); - if (removed_name) { - modified++; - test_server_->waitForCounterGe("listener_manager.listener_modified", modified); - } else { - added++; - test_server_->waitForCounterGe("listener_manager.listener_added", added); + std::string listener_name = + absl::StrCat("listener_", action.add_listener().listener_num() % ListenersMax); + std::string route_name = + absl::StrCat("route_config_", action.add_listener().route_num() % RoutesMax); + addListener(listener_name, route_name); + if (!sent_listener) { + addRoute(route_name); + test_server_->waitForCounterEq("listener_manager.listener_create_success", 1); } + sent_listener = true; break; } case test::server::config_validation::Action::kRemoveListener: { - auto removed_name = removeListener(action.remove_listener().listener_num()); - - if (removed_name) { - removed++; - updateListener(listeners_, {}, {*removed_name}); - EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); - test_server_->waitForCounterGe("listener_manager.listener_removed", removed); - } - + std::string listener_name = + absl::StrCat("listener_", action.remove_listener().listener_num() % ListenersMax); + removeListener(listener_name); break; } case test::server::config_validation::Action::kAddRoute: { if (!sent_listener) { - ENVOY_LOG_MISC(info, "Ignoring request to add route_{}", action.add_route().route_num()); + ENVOY_LOG_MISC(info, "Ignoring request to add route_{}", + action.add_route().route_num() % RoutesMax); break; } - uint32_t route_num = action.add_route().route_num(); - auto removed_name = removeRoute(route_num); - auto route = buildRouteConfig(route_num); - routes_.push_back(route); - - if (removed_name) { - // if the route was already in routes_, don't send a duplicate add in delta request - updateRoute(routes_, {}, {}); - } else { - updateRoute(routes_, {route}, {}); - } - - EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); + std::string route_name = + absl::StrCat("route_config_", action.add_route().route_num() % RoutesMax); + addRoute(route_name); break; } case test::server::config_validation::Action::kRemoveRoute: { - // it seems like routes cannot be removed - leaving a route out of an SOTW request does not - // remove it and sending a remove message in a delta request is ignored - ENVOY_LOG_MISC(info, "Ignoring request to remove route_{}", - action.remove_route().route_num()); - break; - - // TODO(samflattery): remove if it's true that routes cannot be removed - auto removed_name = removeRoute(action.remove_route().route_num()); - if (removed) { - updateRoute(routes_, {}, {*removed_name}); - EXPECT_TRUE( - waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); - } + std::string route_name = + absl::StrCat("route_config_", action.remove_route().route_num() % RoutesMax); + removeRoute(route_name); break; } default: - break; + NOT_REACHED_GCOVR_EXCL_LINE; } } diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index 160a18f2e48e..d4a6999c20a7 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -28,9 +28,10 @@ class XdsFuzzTest : public HttpIntegrationTest { envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name); - envoy::config::listener::v3::Listener buildListener(uint32_t listener_num, uint32_t route_num); + envoy::config::listener::v3::Listener buildListener(std::string listener_name, + std::string route_name); - envoy::config::route::v3::RouteConfiguration buildRouteConfig(uint32_t route_num); + envoy::config::route::v3::RouteConfiguration buildRouteConfig(std::string route_name); void updateListener(const std::vector& listeners, const std::vector& added_or_updated, @@ -49,8 +50,13 @@ class XdsFuzzTest : public HttpIntegrationTest { const size_t RoutesMax = 5; private: - absl::optional removeListener(uint32_t listener_num); - absl::optional removeRoute(uint32_t route_num); + void addListener(std::string listener_name, std::string route_name); + void removeListener(std::string listener_name); + void addRoute(std::string route_name); + void removeRoute(std::string route_name); + + bool eraseListener(std::string listener_name); + bool eraseRoute(std::string route_num); AssertionResult waitForAck(const std::string& expected_type_url, const std::string& expected_version); @@ -62,6 +68,10 @@ class XdsFuzzTest : public HttpIntegrationTest { envoy::config::core::v3::ApiVersion api_version_; Network::Address::IpVersion ip_version_; + + uint32_t num_added_; + uint32_t num_modified_; + uint32_t num_removed_; }; } // namespace Envoy From 8fd337d214421f9dd17c21c85c6881b3f2ff93f2 Mon Sep 17 00:00:00 2001 From: asraa Date: Wed, 15 Jul 2020 15:54:54 -0400 Subject: [PATCH 641/909] [runtime] Add runtime disabler for tests (#12105) Commit Message: Add a flag --runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_true for use in tests that flips an enabled by default flag to disabled for the duration of the test. Risk Level: Low, test-only Testing: Added a test in the referenced test in CONTRIBUTING.md Docs Changes: Reference added Signed-off-by: Asra Ali --- CONTRIBUTING.md | 2 +- source/common/runtime/runtime_features.cc | 4 +-- test/common/runtime/BUILD | 14 ++++++++ .../runtime_flag_override_noop_test.cc | 24 +++++++++++++ .../runtime/runtime_flag_override_test.cc | 7 ++++ test/common/runtime/utility.h | 11 ++++-- test/test_runner.cc | 36 ++++++++++++++----- 7 files changed, 85 insertions(+), 13 deletions(-) create mode 100644 test/common/runtime/runtime_flag_override_noop_test.cc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8a56feda06b7..ce51f2d59f30 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -198,7 +198,7 @@ There are four suggested options for testing new runtime features: GetParam() as outlined in (1). 3. Set up integration tests with custom runtime defaults as documented in the [integration test README](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md) -4. Run a given unit test with the new runtime value explicitly set true as done +4. Run a given unit test with the new runtime value explicitly set true or false as done for [runtime_flag_override_test](https://github.com/envoyproxy/envoy/blob/master/test/common/runtime/BUILD) Runtime code is held to the same standard as regular Envoy code, so both the old diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index dca9335cf9ef..9ad2ee1617a4 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -86,10 +86,10 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { - // Sentinel and test flag. - "envoy.reloadable_features.test_feature_false", // TODO(alyssawilk) flip true after the release. "envoy.reloadable_features.new_tcp_connection_pool", + // Sentinel and test flag. + "envoy.reloadable_features.test_feature_false", }; RuntimeFeatures::RuntimeFeatures() { diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 8d29521e1206..878ebe377ce1 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -72,6 +72,20 @@ envoy_cc_test( srcs = ["runtime_flag_override_test.cc"], args = [ "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false", + "--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_true", + ], + coverage = False, + deps = [ + "//source/common/runtime:runtime_lib", + ], +) + +envoy_cc_test( + name = "runtime_flag_override_noop_test", + srcs = ["runtime_flag_override_noop_test.cc"], + args = [ + "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_true", + "--runtime-feature-disable-for-tests=envoy.reloadable_features.test_feature_false", ], coverage = False, deps = [ diff --git a/test/common/runtime/runtime_flag_override_noop_test.cc b/test/common/runtime/runtime_flag_override_noop_test.cc new file mode 100644 index 000000000000..ab19fac7293b --- /dev/null +++ b/test/common/runtime/runtime_flag_override_noop_test.cc @@ -0,0 +1,24 @@ +#include "common/runtime/runtime_features.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Runtime { + +// Features not in runtime_features.cc are false by default (and this particular one is verified to +// be false in runtime_impl_test.cc). However in in the envoy_cc_test declaration, the flag is set +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" +// to override the return value of runtimeFeatureEnabled to true. +TEST(RuntimeFlagOverrideNoopTest, OverridesNoop) { + EXPECT_FALSE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_false")); +} + +// For features in runtime_features.cc that are true by default, this flag +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" is set in the +// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false. +TEST(RuntimeFlagOverrideNoopTest, OverrideDisableFeatureNoop) { + EXPECT_TRUE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_true")); +} + +} // namespace Runtime +} // namespace Envoy diff --git a/test/common/runtime/runtime_flag_override_test.cc b/test/common/runtime/runtime_flag_override_test.cc index 37a1ab644473..6d90407e3857 100644 --- a/test/common/runtime/runtime_flag_override_test.cc +++ b/test/common/runtime/runtime_flag_override_test.cc @@ -13,5 +13,12 @@ TEST(RuntimeFlagOverrideTest, OverridesWork) { EXPECT_TRUE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_false")); } +// For features in runtime_features.cc that are true by default, this flag +// "--runtime-feature-override-for-tests=envoy.reloadable_features.test_feature_false" is set in the +// envoy_cc_test declaration to override the return value of runtimeFeatureEnabled to false. +TEST(RuntimeFlagOverrideTest, OverrideDisableFeatureWork) { + EXPECT_FALSE(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.test_feature_true")); +} + } // namespace Runtime } // namespace Envoy diff --git a/test/common/runtime/utility.h b/test/common/runtime/utility.h index e442e8940e00..ba1bb9f1b398 100644 --- a/test/common/runtime/utility.h +++ b/test/common/runtime/utility.h @@ -8,14 +8,21 @@ namespace Runtime { class RuntimeFeaturesPeer { public: - static bool addFeature(const std::string& feature) { + static bool enableFeature(const std::string& feature) { + // Remove from disabled features and add to enabled features. + const_cast(&Runtime::RuntimeFeaturesDefaults::get()) + ->disabled_features_.erase(feature); return const_cast(&Runtime::RuntimeFeaturesDefaults::get()) ->enabled_features_.insert(feature) .second; } - static void removeFeature(const std::string& feature) { + static bool disableFeature(const std::string& feature) { + // Remove from enabled features and add to disabled features. const_cast(&Runtime::RuntimeFeaturesDefaults::get()) ->enabled_features_.erase(feature); + return const_cast(&Runtime::RuntimeFeaturesDefaults::get()) + ->disabled_features_.insert(feature) + .second; } }; diff --git a/test/test_runner.cc b/test/test_runner.cc index 5eedad5ae23f..c90555899f1a 100644 --- a/test/test_runner.cc +++ b/test/test_runner.cc @@ -44,12 +44,15 @@ std::string findAndRemove(const std::regex& pattern, int& argc, char**& argv) { // This class is created iff a test is run with the special runtime override flag. class RuntimeManagingListener : public ::testing::EmptyTestEventListener { public: - RuntimeManagingListener(std::string& runtime_override) : runtime_override_(runtime_override) {} + RuntimeManagingListener(std::string& runtime_override, bool disable = false) + : runtime_override_(runtime_override), disable_(disable) {} // On each test start, edit RuntimeFeaturesDefaults with our custom runtime defaults. void OnTestStart(const ::testing::TestInfo&) override { if (!runtime_override_.empty()) { - if (!Runtime::RuntimeFeaturesPeer::addFeature(runtime_override_)) { + bool reset = disable_ ? Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_) + : Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_); + if (!reset) { // If the entry was already in the hash map, don't remove it OnTestEnd. runtime_override_.clear(); } @@ -59,10 +62,14 @@ class RuntimeManagingListener : public ::testing::EmptyTestEventListener { // As each test ends, clean up the RuntimeFeaturesDefaults state. void OnTestEnd(const ::testing::TestInfo&) override { if (!runtime_override_.empty()) { - Runtime::RuntimeFeaturesPeer::removeFeature(runtime_override_); + disable_ ? Runtime::RuntimeFeaturesPeer::enableFeature(runtime_override_) + : Runtime::RuntimeFeaturesPeer::disableFeature(runtime_override_); } } std::string runtime_override_; + // This marks whether the runtime feature was enabled by default and needs to be overridden to + // false. + bool disable_; }; } // namespace @@ -94,15 +101,28 @@ int TestRunner::RunTests(int argc, char** argv) { // Before letting TestEnvironment latch argv and argc, remove any runtime override flag. // This allows doing test overrides of Envoy runtime features without adding // test flags to the Envoy production command line. - const std::regex PATTERN{"--runtime-feature-override-for-tests=(.*)", std::regex::optimize}; - std::string runtime_override = findAndRemove(PATTERN, argc, argv); - if (!runtime_override.empty()) { + const std::regex ENABLE_PATTERN{"--runtime-feature-override-for-tests=(.*)", + std::regex::optimize}; + std::string runtime_override_enable = findAndRemove(ENABLE_PATTERN, argc, argv); + if (!runtime_override_enable.empty()) { ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info, - "Running with runtime feature override {}", runtime_override); + "Running with runtime feature override enable {}", runtime_override_enable); // Set up a listener which will create a global runtime and set the feature // to true for the duration of each test instance. ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners(); - listeners.Append(new RuntimeManagingListener(runtime_override)); + listeners.Append(new RuntimeManagingListener(runtime_override_enable)); + } + const std::regex DISABLE_PATTERN{"--runtime-feature-disable-for-tests=(.*)", + std::regex::optimize}; + std::string runtime_override_disable = findAndRemove(DISABLE_PATTERN, argc, argv); + if (!runtime_override_disable.empty()) { + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::testing), info, + "Running with runtime feature override disable {}", + runtime_override_disable); + // Set up a listener which will create a global runtime and set the feature + // to false for the duration of each test instance. + ::testing::TestEventListeners& listeners = ::testing::UnitTest::GetInstance()->listeners(); + listeners.Append(new RuntimeManagingListener(runtime_override_disable, true)); } #ifdef ENVOY_CONFIG_COVERAGE From 0e6cff307d02673a78348fc7ae9881739226dcc7 Mon Sep 17 00:00:00 2001 From: Naoya Yoshizawa Date: Thu, 16 Jul 2020 08:32:25 +0900 Subject: [PATCH 642/909] aws-signing: add es and glacier for payloads special treatment (#12020) Signed-off-by: azihsoyn --- source/extensions/common/aws/signer_impl.cc | 3 +- source/extensions/common/aws/signer_impl.h | 17 ++++- .../extensions/common/aws/signer_impl_test.cc | 71 ++++++++----------- 3 files changed, 47 insertions(+), 44 deletions(-) diff --git a/source/extensions/common/aws/signer_impl.cc b/source/extensions/common/aws/signer_impl.cc index 157ad46aa4b3..86730647966b 100644 --- a/source/extensions/common/aws/signer_impl.cc +++ b/source/extensions/common/aws/signer_impl.cc @@ -24,8 +24,7 @@ void SignerImpl::sign(Http::RequestMessage& message, bool sign_body) { } void SignerImpl::sign(Http::RequestHeaderMap& headers) { - // S3 payloads require special treatment. - if (service_name_ == "s3") { + if (require_content_hash_) { headers.setReference(SignatureHeaders::get().ContentSha256, SignatureConstants::get().UnsignedPayload); sign(headers, SignatureConstants::get().UnsignedPayload); diff --git a/source/extensions/common/aws/signer_impl.h b/source/extensions/common/aws/signer_impl.h index f925b6046b9d..78908874e042 100644 --- a/source/extensions/common/aws/signer_impl.h +++ b/source/extensions/common/aws/signer_impl.h @@ -47,8 +47,19 @@ class SignerImpl : public Signer, public Logger::Loggable { public: SignerImpl(absl::string_view service_name, absl::string_view region, const CredentialsProviderSharedPtr& credentials_provider, TimeSource& time_source) - : service_name_(service_name), region_(region), credentials_provider_(credentials_provider), - time_source_(time_source), long_date_formatter_(SignatureConstants::get().LongDateFormat), + : service_name_(service_name), region_(region), + + // S3, Glacier, ES payloads require special treatment. + // S3: + // https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. + // ES: + // https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-request-signing.html. + // Glacier: + // https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-signing-requests.html. + require_content_hash_{service_name_ == "s3" || service_name_ == "glacier" || + service_name_ == "es"}, + credentials_provider_(credentials_provider), time_source_(time_source), + long_date_formatter_(SignatureConstants::get().LongDateFormat), short_date_formatter_(SignatureConstants::get().ShortDateFormat) {} void sign(Http::RequestMessage& message, bool sign_body = false) override; @@ -74,6 +85,8 @@ class SignerImpl : public Signer, public Logger::Loggable { const std::string service_name_; const std::string region_; + + const bool require_content_hash_; CredentialsProviderSharedPtr credentials_provider_; TimeSource& time_source_; DateFormatter long_date_formatter_; diff --git a/test/extensions/common/aws/signer_impl_test.cc b/test/extensions/common/aws/signer_impl_test.cc index 2bae6a72b25e..857399749fb1 100644 --- a/test/extensions/common/aws/signer_impl_test.cc +++ b/test/extensions/common/aws/signer_impl_test.cc @@ -41,6 +41,27 @@ class SignerImplTest : public testing::Test { message_->body() = std::make_unique(body); } + void expectSignHeaders(absl::string_view service_name, absl::string_view signature, + absl::string_view payload) { + auto* credentials_provider = new NiceMock(); + EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); + Http::TestRequestHeaderMapImpl headers{}; + headers.setMethod("GET"); + headers.setPath("/"); + headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); + + SignerImpl signer(service_name, "region", CredentialsProviderSharedPtr{credentials_provider}, + time_system_); + signer.sign(headers); + + EXPECT_EQ(fmt::format("AWS4-HMAC-SHA256 Credential=akid/20180102/region/{}/aws4_request, " + "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " + "Signature={}", + service_name, signature), + headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); + EXPECT_EQ(payload, headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); + } + NiceMock* credentials_provider_; Event::SimulatedTimeSystem time_system_; Http::RequestMessagePtr message_; @@ -169,46 +190,16 @@ TEST_F(SignerImplTest, SignHostHeader) { message_->headers().get(Http::CustomHeaders::get().Authorization)->value().getStringView()); } -// Verify signing headers for S3 -TEST_F(SignerImplTest, SignHeadersS3) { - auto* credentials_provider = new NiceMock(); - EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); - Http::TestRequestHeaderMapImpl headers{}; - headers.setMethod("GET"); - headers.setPath("/"); - headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); - - SignerImpl signer("s3", "region", CredentialsProviderSharedPtr{credentials_provider}, - time_system_); - signer.sign(headers); - - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/s3/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", - headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); - EXPECT_EQ(SignatureConstants::get().UnsignedPayload, - headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); -} - -// Verify signing headers for non S3 -TEST_F(SignerImplTest, SignHeadersNonS3) { - auto* credentials_provider = new NiceMock(); - EXPECT_CALL(*credentials_provider, getCredentials()).WillOnce(Return(credentials_)); - Http::TestRequestHeaderMapImpl headers{}; - headers.setMethod("GET"); - headers.setPath("/"); - headers.addCopy(Http::LowerCaseString("host"), "www.example.com"); - - SignerImpl signer("service", "region", CredentialsProviderSharedPtr{credentials_provider}, - time_system_); - signer.sign(headers); - - EXPECT_EQ("AWS4-HMAC-SHA256 Credential=akid/20180102/region/service/aws4_request, " - "SignedHeaders=host;x-amz-content-sha256;x-amz-date, " - "Signature=d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", - headers.get(Http::CustomHeaders::get().Authorization)->value().getStringView()); - EXPECT_EQ(SignatureConstants::get().HashedEmptyString, - headers.get(SignatureHeaders::get().ContentSha256)->value().getStringView()); +// Verify signing headers for services. +TEST_F(SignerImplTest, SignHeadersByService) { + expectSignHeaders("s3", "d97cae067345792b78d2bad746f25c729b9eb4701127e13a7c80398f8216a167", + SignatureConstants::get().UnsignedPayload); + expectSignHeaders("service", "d9fd9be575a254c924d843964b063d770181d938ae818f5b603ef0575a5ce2cd", + SignatureConstants::get().HashedEmptyString); + expectSignHeaders("es", "0fd9c974bb2ad16c8d8a314dca4f6db151d32cbd04748d9c018afee2a685a02e", + SignatureConstants::get().UnsignedPayload); + expectSignHeaders("glacier", "8d1f241d77c64cda57b042cd312180f16e98dbd7a96e5545681430f8dbde45a0", + SignatureConstants::get().UnsignedPayload); } } // namespace From 9ead261f63cf2d1833f2282d98c7071516970945 Mon Sep 17 00:00:00 2001 From: yugantrana Date: Wed, 15 Jul 2020 19:35:46 -0400 Subject: [PATCH 643/909] quiche: adds udp_gro support (#11686) * Changes to add supportsUdpGro flag under ioHandle. Also adds other necessary ENVOY_UDP_GRO flags Signed-off-by: Yugant * Code changes to enable UDP_GRO Variables Introduced - supportsUdpGro : flag under IO Handle to indicate that UDP_GRO is enabled. - ENVOY_UDP_GRO_MORE : set as 1 when the platform is linux version >=5.0 - GRO_UDP : Identifier to hold UDP_GRO - gso_size_ : Under RecvMsgPerPacketInfo to hold gso_size. Code Changes: - IOHandle/recvmsg - Modified function to extract gso_size, and set it under output.msg_[0].gso_size - readFromSocket - Segment the read slice into gso_size'd sub-slices and pass it along to the server Other Minor Changes: - Verison checking Signed-off-by: Yugant * Ran code_format/check_format.py to fix formatting errors Signed-off-by: Yugant * Pedantic Spelling Changes Signed-off-by: Yugant * Added GRO/GSO to spelling_dictionary Signed-off-by: Yugant * Test for UDP-GRO, changes to code, and minor tweaks o Added UDPGroBasic test case under udp_listenter_impl_test - Test mocks a recvmsg syscall delivering 3 messages of same size stacked together in a single payload - Expects the three packets to be segmented as per gso. o Replaced memcpy under udp_gro with Buffer::OwnedImpl::copyOut Signed-off-by: Yugant * Minor NITs. Added sizeof(gso_size) to cmsg_space_ Signed-off-by: Yugant * Formatting Fixes Signed-off-by: Yugant * Spellings Signed-off-by: Yugant * - Added Mock Class to mock supportsUdpGro in UdpEcho Test - Modified UdpGroBasicTest to cover more cases - Fixed failing tests under test/server - Fixed TODOs - Added comments to test Signed-off-by: Yugant * Pedantic Spelling Check Signed-off-by: Yugant * Fixed some comments Signed-off-by: Yugant * Check Format Changes as suggested by the failing format build on github Signed-off-by: Yugant * Fixed Failing CIs Signed-off-by: Yugant * Fixes for macOS failing CI. Removed unnecessary includes. Signed-off-by: Yugant * Fixed CI failures on Windows and macOS. - Removed SOL_UDP and GRO_UDP definitions - Following similar implementation as SO_RXQ_OVFL Signed-off-by: Yugant * Addressed Bin's Review Comments Signed-off-by: Yugant * Addressed Bin's latest comments - Added setsockopt test to verify supportsUdpGro at runtime. - Changed log level for Unrecognized gso warning. Signed-off-by: Yugant * Addressed Dan's Review Comments - Replaced Buffer::OwnedImpl::copyOut with Buffer::OwnedImpl::move - Merged validateGroRecvCallbackParams into validateRecvCallbackParams - Changed EXPECT_CALL for mocked supportsUdpGro syscalls to ON_CALL - Corrected kernel's setting of destination address - Added SO_RXQ_OVFL control message in the UdpGro mock recvmsg syscall - Minor NITs and spelling corrections Signed-off-by: Yugant * Fixed Minor Spelling and Format Issues. Signed-off-by: Yugant * Ran ci/check_and_fix_format.sh locally Signed-off-by: Yugant * Fixed Failing CI Tests Signed-off-by: Yugant * Fixed Failing CIs Signed-off-by: Yugant * Minor NITs for udp_listener_impl_test Signed-off-by: Yugant * Addressed Dan's new comments on the PR - Modified passPayloadtoProcessor function to move the buffer commit calls closer to reserve calls - Added checks for supportsUdpGro around buildUdpGroOptions in test implementations - Modified validateRecvCallbackParams to also include num_packets_per_recv as arg - Added a few comments and other minor NITs Signed-off-by: Yugant * - Added a common receiveMessage function, and moved common code into it - Added supportsUdpGro check around buildUdpGroOptions - Minor changes to tests Signed-off-by: Yugant * Fixed Minor Test Failures Signed-off-by: Yugant * NIT change to rerun CIs Signed-off-by: Yugant * Revert NIT to rerun CIs Signed-off-by: Yugant * Minor Change Signed-off-by: Yugant * Fixed macOS failure, std::min requires same type Signed-off-by: Yugant * Removed unused function definition Signed-off-by: Yugant * Fixed clang_tidy issues! Signed-off-by: Yugant * Minor NITs Signed-off-by: Yugant * Addressed Dan's Comments. Minor NITs Signed-off-by: Yugant * - Remove slice parameter from the arguments for the receiveMessage function - Using buffer->length() to iterate over the buffer during gro_segmentation. Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * Addressed Harvey's comments. Moved receiveMessage to anonymous namespace. Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * NIT: Rerun CIs Signed-off-by: Yugant * Minor NIT, Updated test comment Signed-off-by: Yugant Co-authored-by: Yugant --- include/envoy/api/os_sys_calls.h | 5 + include/envoy/common/platform.h | 1 + include/envoy/network/io_handle.h | 7 + source/common/api/posix/os_sys_calls_impl.cc | 22 +++ source/common/api/posix/os_sys_calls_impl.h | 1 + source/common/api/win32/os_sys_calls_impl.cc | 5 + source/common/api/win32/os_sys_calls_impl.h | 1 + .../common/network/io_socket_handle_impl.cc | 15 +- source/common/network/io_socket_handle_impl.h | 9 +- .../common/network/socket_option_factory.cc | 7 + source/common/network/socket_option_factory.h | 1 + source/common/network/socket_option_impl.h | 6 + source/common/network/utility.cc | 89 ++++++++-- .../quiche/quic_io_handle_wrapper.h | 1 + source/server/listener_impl.cc | 6 + test/common/network/udp_listener_impl_test.cc | 164 ++++++++++++++++-- .../udp/udp_proxy/udp_proxy_filter_test.cc | 2 + .../quiche/quic_io_handle_wrapper_test.cc | 3 + test/mocks/api/mocks.h | 1 + test/mocks/network/io_handle.h | 1 + .../listener_manager_impl_quic_only_test.cc | 17 +- tools/spelling/spelling_dictionary.txt | 3 + 22 files changed, 331 insertions(+), 36 deletions(-) diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index 9fab9c1cd01b..28dd3d305652 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -62,6 +62,11 @@ class OsSysCalls { */ virtual bool supportsMmsg() const PURE; + /** + * return true if the OS supports UDP GRO. + */ + virtual bool supportsUdpGro() const PURE; + /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index d5606ca32ee4..30da6aa87039 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -150,6 +150,7 @@ struct msghdr { #include #include #include +#include // for UDP_GRO #include #include // for mode_t #include diff --git a/include/envoy/network/io_handle.h b/include/envoy/network/io_handle.h index bd569c56179e..f5d18b532332 100644 --- a/include/envoy/network/io_handle.h +++ b/include/envoy/network/io_handle.h @@ -85,6 +85,8 @@ class IoHandle { Address::InstanceConstSharedPtr peer_address_; // The payload length of this packet. unsigned int msg_len_{0}; + // The gso_size, if specified in the transport header + unsigned int gso_size_{0}; }; /** @@ -141,6 +143,11 @@ class IoHandle { */ virtual bool supportsMmsg() const PURE; + /** + * return true if the platform supports udp_gro + */ + virtual bool supportsUdpGro() const PURE; + /** * Bind to address. The handle should have been created with a call to socket() * @param address address to bind to. diff --git a/source/common/api/posix/os_sys_calls_impl.cc b/source/common/api/posix/os_sys_calls_impl.cc index 2c00e6998772..e1366dbbf942 100644 --- a/source/common/api/posix/os_sys_calls_impl.cc +++ b/source/common/api/posix/os_sys_calls_impl.cc @@ -73,6 +73,28 @@ bool OsSysCallsImpl::supportsMmsg() const { #endif } +bool OsSysCallsImpl::supportsUdpGro() const { +#if !defined(__linux__) + return false; +#else +#ifndef UDP_GRO + return false; +#else + static const bool is_supported = [] { + int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP); + if (fd < 0) { + return false; + } + int val = 1; + bool result = (0 == ::setsockopt(fd, IPPROTO_UDP, UDP_GRO, &val, sizeof(val))); + ::close(fd); + return result; + }(); + return is_supported; +#endif +#endif +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::ftruncate(fd, length); return {rc, rc != -1 ? 0 : errno}; diff --git a/source/common/api/posix/os_sys_calls_impl.h b/source/common/api/posix/os_sys_calls_impl.h index fc63bbc07ca4..a35b15113806 100644 --- a/source/common/api/posix/os_sys_calls_impl.h +++ b/source/common/api/posix/os_sys_calls_impl.h @@ -22,6 +22,7 @@ class OsSysCallsImpl : public OsSysCalls { SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, struct timespec* timeout) override; bool supportsMmsg() const override; + bool supportsUdpGro() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index 49a05b9fda2a..22bd2d60d72b 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -170,6 +170,11 @@ bool OsSysCallsImpl::supportsMmsg() const { return false; } +bool OsSysCallsImpl::supportsUdpGro() const { + // Windows doesn't support it. + return false; +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::_chsize_s(fd, length); return {rc, rc == 0 ? 0 : errno}; diff --git a/source/common/api/win32/os_sys_calls_impl.h b/source/common/api/win32/os_sys_calls_impl.h index 1f6b56608b2b..d82e156de6b9 100644 --- a/source/common/api/win32/os_sys_calls_impl.h +++ b/source/common/api/win32/os_sys_calls_impl.h @@ -22,6 +22,7 @@ class OsSysCallsImpl : public OsSysCalls { SysCallIntResult recvmmsg(os_fd_t sockfd, struct mmsghdr* msgvec, unsigned int vlen, int flags, struct timespec* timeout) override; bool supportsMmsg() const override; + bool supportsUdpGro() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index bb30a4a3b204..12f3a04f00b7 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -245,11 +245,13 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, RELEASE_ASSERT(hdr.msg_namelen > 0, fmt::format("Unable to get remote address from recvmsg() for fd: {}", fd_)); output.msg_[0].peer_address_ = getAddressFromSockAddrOrDie(peer_addr, hdr.msg_namelen, fd_); + output.msg_[0].gso_size_ = 0; if (hdr.msg_controllen > 0) { - // Get overflow, local address from control message. + // Get overflow, local address and gso_size from control message. for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr); cmsg != nullptr; cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + if (output.msg_[0].local_address_ == nullptr) { Address::InstanceConstSharedPtr addr = maybeGetDstAddressFromHeader(*cmsg, self_port, fd_); if (addr != nullptr) { @@ -262,10 +264,17 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, absl::optional maybe_dropped = maybeGetPacketsDroppedFromHeader(*cmsg); if (maybe_dropped) { *output.dropped_packets_ = *maybe_dropped; + continue; } } +#ifdef UDP_GRO + if (cmsg->cmsg_level == SOL_UDP && cmsg->cmsg_type == UDP_GRO) { + output.msg_[0].gso_size_ = *reinterpret_cast(CMSG_DATA(cmsg)); + } +#endif } } + return sysCallResultToIoCallResult(result); } @@ -366,6 +375,10 @@ bool IoSocketHandleImpl::supportsMmsg() const { return Api::OsSysCallsSingleton::get().supportsMmsg(); } +bool IoSocketHandleImpl::supportsUdpGro() const { + return Api::OsSysCallsSingleton::get().supportsUdpGro(); +} + Api::SysCallIntResult IoSocketHandleImpl::bind(Address::InstanceConstSharedPtr address) { return Api::OsSysCallsSingleton::get().bind(fd_, address->sockAddr(), address->sockAddrLen()); } diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 03c9c28ca08d..305fc1765bf6 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -44,6 +44,7 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable SocketOptionFactory::buildReusePortOptions() { return options; } +std::unique_ptr SocketOptionFactory::buildUdpGroOptions() { + std::unique_ptr options = std::make_unique(); + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_SOCKET_UDP_GRO, 1)); + return options; +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h index e93885e844b9..1da9f3c1780e 100644 --- a/source/common/network/socket_option_factory.h +++ b/source/common/network/socket_option_factory.h @@ -32,6 +32,7 @@ class SocketOptionFactory : Logger::Loggable { static std::unique_ptr buildIpPacketInfoOptions(); static std::unique_ptr buildRxQueueOverFlowOptions(); static std::unique_ptr buildReusePortOptions(); + static std::unique_ptr buildUdpGroOptions(); }; } // namespace Network } // namespace Envoy diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index 95338adf6f9d..69b4989b20a4 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -53,6 +53,12 @@ namespace Network { #define ENVOY_SOCKET_SO_REUSEPORT Network::SocketOptionName() #endif +#ifdef UDP_GRO +#define ENVOY_SOCKET_UDP_GRO ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_UDP, UDP_GRO) +#else +#define ENVOY_SOCKET_UDP_GRO Network::SocketOptionName() +#endif + #ifdef TCP_KEEPCNT #define ENVOY_SOCKET_TCP_KEEPCNT ENVOY_MAKE_SOCKET_OPTION_NAME(IPPROTO_TCP, TCP_KEEPCNT) #else diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 348e7957a7b7..ebb83c43bb25 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -7,6 +7,7 @@ #include #include +#include "envoy/buffer/buffer.h" #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/config/core/v3/address.pb.h" @@ -96,6 +97,28 @@ uint32_t portFromUrl(const std::string& url, absl::string_view scheme, } } +Api::IoCallUint64Result receiveMessage(uint64_t max_packet_size, Buffer::InstancePtr& buffer, + IoHandle::RecvMsgOutput& output, IoHandle& handle, + const Address::Instance& local_address) { + + Buffer::RawSlice slice; + const uint64_t num_slices = buffer->reserve(max_packet_size, &slice, 1); + ASSERT(num_slices == 1u); + + Api::IoCallUint64Result result = + handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output); + + if (!result.ok()) { + return result; + } + + // Adjust memory length and commit slice to buffer + slice.len_ = std::min(slice.len_, static_cast(result.rc_)); + buffer->commit(&slice, 1); + + return result; +} + } // namespace std::string Utility::hostFromTcpUrl(const std::string& url) { @@ -520,14 +543,10 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlic return send_result; } -void passPayloadToProcessor(uint64_t bytes_read, Buffer::RawSlice& slice, - Buffer::InstancePtr buffer, Address::InstanceConstSharedPtr peer_addess, +void passPayloadToProcessor(uint64_t bytes_read, Buffer::InstancePtr buffer, + Address::InstanceConstSharedPtr peer_addess, Address::InstanceConstSharedPtr local_address, UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time) { - // Adjust used memory length. - slice.len_ = std::min(slice.len_, static_cast(bytes_read)); - buffer->commit(&slice, 1); - RELEASE_ASSERT( peer_addess != nullptr, fmt::format("Unable to get remote address on the socket bount to local address: {} ", @@ -547,6 +566,44 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, UdpPacketProcessor& udp_packet_processor, MonotonicTime receive_time, uint32_t* packets_dropped) { + + if (handle.supportsUdpGro()) { + Buffer::InstancePtr buffer = std::make_unique(); + IoHandle::RecvMsgOutput output(1, packets_dropped); + + // TODO(yugant): Avoid allocating 24k for each read by getting memory from UdpPacketProcessor + const uint64_t max_packet_size_with_gro = 16 * udp_packet_processor.maxPacketSize(); + + Api::IoCallUint64Result result = + receiveMessage(max_packet_size_with_gro, buffer, output, handle, local_address); + + if (!result.ok()) { + return result; + } + + const uint64_t gso_size = output.msg_[0].gso_size_; + ENVOY_LOG_MISC(trace, "recvmsg bytes {} with gso_size as {}", result.rc_, gso_size); + + // Skip gso segmentation and proceed as a single payload. + if (gso_size == 0u) { + passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, + receive_time); + return result; + } + + // Segment the buffer read by the recvmsg syscall into gso_sized sub buffers. + while (buffer->length() > 0) { + const uint64_t bytes_to_copy = std::min(buffer->length(), gso_size); + Buffer::InstancePtr sub_buffer = std::make_unique(); + sub_buffer->move(*buffer, bytes_to_copy); + passPayloadToProcessor(bytes_to_copy, std::move(sub_buffer), output.msg_[0].peer_address_, + output.msg_[0].local_address_, udp_packet_processor, receive_time); + } + + return result; + } + if (handle.supportsMmsg()) { const uint32_t num_packets_per_mmsg_call = 16u; const uint32_t num_slices_per_packet = 1u; @@ -574,20 +631,22 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, ASSERT(msg_len <= slice->len_); ENVOY_LOG_MISC(debug, "Receive a packet with {} bytes from {}", msg_len, output.msg_[i].peer_address_->asString()); - passPayloadToProcessor(msg_len, *slice, std::move(buffers[i]), output.msg_[i].peer_address_, + + // Adjust used memory length and commit slice to buffer + slice->len_ = std::min(slice->len_, static_cast(msg_len)); + buffers[i]->commit(slice, 1); + + passPayloadToProcessor(msg_len, std::move(buffers[i]), output.msg_[i].peer_address_, output.msg_[i].local_address_, udp_packet_processor, receive_time); } return result; } Buffer::InstancePtr buffer = std::make_unique(); - Buffer::RawSlice slice; - const uint64_t num_slices = buffer->reserve(udp_packet_processor.maxPacketSize(), &slice, 1); - ASSERT(num_slices == 1u); - IoHandle::RecvMsgOutput output(1, packets_dropped); + Api::IoCallUint64Result result = - handle.recvmsg(&slice, num_slices, local_address.ip()->port(), output); + receiveMessage(udp_packet_processor.maxPacketSize(), buffer, output, handle, local_address); if (!result.ok()) { return result; @@ -595,9 +654,9 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, ENVOY_LOG_MISC(trace, "recvmsg bytes {}", result.rc_); - passPayloadToProcessor( - result.rc_, slice, std::move(buffer), std::move(output.msg_[0].peer_address_), - std::move(output.msg_[0].local_address_), udp_packet_processor, receive_time); + passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, + receive_time); return result; } diff --git a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h index f80d73afa92f..84420816b917 100644 --- a/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h +++ b/source/extensions/quic_listeners/quiche/quic_io_handle_wrapper.h @@ -63,6 +63,7 @@ class QuicIoHandleWrapper : public Network::IoHandle { return io_handle_.recvmmsg(slices, self_port, output); } bool supportsMmsg() const override { return io_handle_.supportsMmsg(); } + bool supportsUdpGro() const override { return io_handle_.supportsUdpGro(); } Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override { return io_handle_.bind(address); } diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index fd98d5526210..2e46bca84d12 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -11,6 +11,7 @@ #include "envoy/stats/scope.h" #include "common/access_log/access_log_impl.h" +#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/config/utility.h" #include "common/network/connection_balancer_impl.h" @@ -389,6 +390,11 @@ void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { addListenSocketOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); // Needed to return receive buffer overflown indicator. addListenSocketOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + // TODO(yugant) : Add a config option for UDP_GRO + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + // Needed to receive gso_size option + addListenSocketOptions(Network::SocketOptionFactory::buildUdpGroOptions()); + } } } diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index f2cff8e49748..20c83106dce6 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -3,8 +3,10 @@ #include #include +#include "envoy/api/os_sys_calls.h" #include "envoy/config/core/v3/base.pb.h" +#include "common/api/os_sys_calls_impl.h" #include "common/network/address_impl.h" #include "common/network/socket_option_factory.h" #include "common/network/socket_option_impl.h" @@ -31,17 +33,33 @@ namespace Envoy { namespace Network { namespace { +// UdpGro is only supported on Linux versions >= 5.0. Also, the +// underlying platform only performs the payload concatenation when +// packets are sent from a network namespace different to that of +// the client. Currently, the testing framework does not support +// this behavior. +// This helper allows to intercept the supportsUdpGro syscall and +// toggle the gro behavior as per individual test requirements. +class MockSupportsUdpGro : public Api::OsSysCallsImpl { +public: + MOCK_METHOD(bool, supportsUdpGro, (), (const)); +}; + class UdpListenerImplTest : public ListenerImplTestBase { public: UdpListenerImplTest() : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) { time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + ON_CALL(udp_gro_syscall_, supportsUdpGro()).WillByDefault(Return(false)); } void SetUp() override { // Set listening socket options. server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions()); server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions()); + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + server_socket_->addOptions(SocketOptionFactory::buildUdpGroOptions()); + } listener_ = std::make_unique( dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource()); @@ -69,7 +87,7 @@ class UdpListenerImplTest : public ListenerImplTestBase { } // Validates receive data, source/destination address and received time. - void validateRecvCallbackParams(const UdpRecvData& data) { + void validateRecvCallbackParams(const UdpRecvData& data, size_t num_packet_per_recv) { ASSERT_NE(data.addresses_.local_, nullptr); ASSERT_NE(data.addresses_.peer_, nullptr); @@ -82,10 +100,6 @@ class UdpListenerImplTest : public ListenerImplTestBase { EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); - size_t num_packet_per_recv = 1u; - if (Api::OsSysCallsSingleton::get().supportsMmsg()) { - num_packet_per_recv = 16u; - } EXPECT_EQ(time_system_.monotonicTime(), data.receive_time_ + std::chrono::milliseconds( @@ -101,6 +115,8 @@ class UdpListenerImplTest : public ListenerImplTestBase { MockUdpListenerCallbacks listener_callbacks_; std::unique_ptr listener_; size_t num_packets_received_by_listener_{0}; + NiceMock udp_gro_syscall_; + TestThreadsafeSingletonInjector os_calls{&udp_gro_syscall_}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplTest, @@ -143,13 +159,11 @@ TEST_P(UdpListenerImplTest, UseActualDstUdp) { EXPECT_CALL(listener_callbacks_, onReadReady()); EXPECT_CALL(listener_callbacks_, onData(_)) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); - + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), first); })) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); - + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), second); dispatcher_->exit(); @@ -184,7 +198,7 @@ TEST_P(UdpListenerImplTest, UdpEcho) { EXPECT_CALL(listener_callbacks_, onReadReady()); EXPECT_CALL(listener_callbacks_, onData(_)) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); test_peer_address = data.addresses_.peer_; @@ -194,7 +208,7 @@ TEST_P(UdpListenerImplTest, UdpEcho) { server_received_data.push_back(data_str); })) .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); const std::string data_str = data.buffer_->toString(); EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); @@ -270,7 +284,7 @@ TEST_P(UdpListenerImplTest, UdpListenerEnableDisable) { .Times(2) .WillOnce(Return()) .WillOnce(Invoke([&](const UdpRecvData& data) -> void { - validateRecvCallbackParams(data); + validateRecvCallbackParams(data, Api::OsSysCallsSingleton::get().supportsMmsg() ? 16u : 1u); EXPECT_EQ(data.buffer_->toString(), second); @@ -313,6 +327,7 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { // Inject mocked OsSysCalls implementation to mock a read failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, supportsUdpGro()); EXPECT_CALL(os_sys_calls, supportsMmsg()); EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)) .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); @@ -399,6 +414,131 @@ TEST_P(UdpListenerImplTest, SendDataError) { EXPECT_DEATH(listener_->send(send_data), "Invalid argument passed in"); } +/** + * Test that multiple stacked packets of the same size are properly segmented + * when UDP GRO is enabled on the platform. + */ +#ifdef UDP_GRO +TEST_P(UdpListenerImplTest, UdpGroBasic) { + // We send 4 packets (3 of equal length and 1 as a trail), which are concatenated together by + // kernel supporting udp gro. Verify the concatenated packet is transformed back into individual + // packets + absl::FixedArray client_data({"Equal!!!", "Length!!", "Messages", "trail"}); + + for (const auto& i : client_data) { + client_.write(i, *send_to_addr_); + } + + // The concatenated payload received from kernel supporting udp_gro + std::string stacked_message = absl::StrJoin(client_data, ""); + + // Mock OsSysCalls to mimic kernel behavior for packet concatenation + // based on udp_gro. supportsUdpGro should return true and recvmsg should + // return the concatenated payload with the gso_size set appropriately. + Api::MockOsSysCalls os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, supportsUdpGro).WillRepeatedly(Return(true)); + EXPECT_CALL(os_sys_calls, supportsMmsg).Times(0); + + EXPECT_CALL(os_sys_calls, recvmsg(_, _, _)) + .WillOnce(Invoke([&](os_fd_t, msghdr* msg, int) { + // Set msg_name and msg_namelen + if (client_.localAddress()->ip()->version() == Address::IpVersion::v4) { + sockaddr_storage ss; + auto ipv4_addr = reinterpret_cast(&ss); + memset(ipv4_addr, 0, sizeof(sockaddr_in)); + ipv4_addr->sin_family = AF_INET; + ipv4_addr->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + ipv4_addr->sin_port = client_.localAddress()->ip()->port(); + msg->msg_namelen = sizeof(sockaddr_in); + *reinterpret_cast(msg->msg_name) = *ipv4_addr; + } else if (client_.localAddress()->ip()->version() == Address::IpVersion::v6) { + sockaddr_storage ss; + auto ipv6_addr = reinterpret_cast(&ss); + memset(ipv6_addr, 0, sizeof(sockaddr_in6)); + ipv6_addr->sin6_family = AF_INET6; + ipv6_addr->sin6_addr = in6addr_loopback; + ipv6_addr->sin6_port = client_.localAddress()->ip()->port(); + *reinterpret_cast(msg->msg_name) = *ipv6_addr; + msg->msg_namelen = sizeof(sockaddr_in6); + } + + // Set msg_iovec + EXPECT_EQ(msg->msg_iovlen, 1); + memcpy(msg->msg_iov[0].iov_base, stacked_message.data(), stacked_message.length()); + msg->msg_iov[0].iov_len = stacked_message.length(); + + // Set control headers + memset(msg->msg_control, 0, msg->msg_controllen); + cmsghdr* cmsg = CMSG_FIRSTHDR(msg); + if (send_to_addr_->ip()->version() == Address::IpVersion::v4) { + cmsg->cmsg_level = IPPROTO_IP; +#ifndef IP_RECVDSTADDR + cmsg->cmsg_type = IP_PKTINFO; + cmsg->cmsg_len = CMSG_LEN(sizeof(in_pktinfo)); + reinterpret_cast(CMSG_DATA(cmsg))->ipi_addr.s_addr = + send_to_addr_->ip()->ipv4()->address(); +#else + cmsg.cmsg_type = IP_RECVDSTADDR; + cmsg->cmsg_len = CMSG_LEN(sizeof(in_addr)); + *reinterpret_cast(CMSG_DATA(cmsg)) = send_to_addr_->ip()->ipv4()->address(); +#endif + } else if (send_to_addr_->ip()->version() == Address::IpVersion::v6) { + cmsg->cmsg_len = CMSG_LEN(sizeof(in6_pktinfo)); + cmsg->cmsg_level = IPPROTO_IPV6; + cmsg->cmsg_type = IPV6_PKTINFO; + auto pktinfo = reinterpret_cast(CMSG_DATA(cmsg)); + pktinfo->ipi6_ifindex = 0; + *(reinterpret_cast(pktinfo->ipi6_addr.s6_addr)) = + send_to_addr_->ip()->ipv6()->address(); + } + + // Set gso_size + cmsg = CMSG_NXTHDR(msg, cmsg); + cmsg->cmsg_level = SOL_UDP; + cmsg->cmsg_type = UDP_GRO; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint16_t)); + const uint16_t gso_size = 8; + *reinterpret_cast(CMSG_DATA(cmsg)) = gso_size; + +#ifdef SO_RXQ_OVFL + // Set SO_RXQ_OVFL + cmsg = CMSG_NXTHDR(msg, cmsg); + EXPECT_NE(cmsg, nullptr); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_RXQ_OVFL; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); + const uint32_t overflow = 0; + *reinterpret_cast(CMSG_DATA(cmsg)) = overflow; +#endif + return Api::SysCallSizeResult{static_cast(stacked_message.length()), 0}; + })) + .WillRepeatedly(Return(Api::SysCallSizeResult{-1, EAGAIN})); + + EXPECT_CALL(listener_callbacks_, onReadReady()); + EXPECT_CALL(listener_callbacks_, onData(_)) + .WillOnce(Invoke([&](const UdpRecvData& data) -> void { + validateRecvCallbackParams(data, client_data.size()); + + const std::string data_str = data.buffer_->toString(); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); + })) + .WillRepeatedly(Invoke([&](const UdpRecvData& data) -> void { + validateRecvCallbackParams(data, client_data.size()); + + const std::string data_str = data.buffer_->toString(); + EXPECT_EQ(data_str, client_data[num_packets_received_by_listener_ - 1]); + })); + + EXPECT_CALL(listener_callbacks_, onWriteReady(_)).WillOnce(Invoke([&](const Socket& socket) { + EXPECT_EQ(socket.ioHandle().fd(), server_socket_->ioHandle().fd()); + dispatcher_->exit(); + })); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} +#endif + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index 0e4d98458bf6..8a29983cf32d 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -66,6 +66,7 @@ class UdpProxyFilterTest : public testing::Test { int send_sys_errno = 0) { EXPECT_CALL(*idle_timer_, enableTimer(parent_.config_->sessionTimeout(), nullptr)); + EXPECT_CALL(*io_handle_, supportsUdpGro()); EXPECT_CALL(*io_handle_, supportsMmsg()); // Return the datagram. EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) @@ -97,6 +98,7 @@ class UdpProxyFilterTest : public testing::Test { } })); // Return an EAGAIN result. + EXPECT_CALL(*io_handle_, supportsUdpGro()); EXPECT_CALL(*io_handle_, supportsMmsg()); EXPECT_CALL(*io_handle_, recvmsg(_, 1, _, _)) .WillOnce(Return(ByMove(Api::IoCallUint64Result( diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index 4357d75edeae..5027ada97d11 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -113,6 +113,9 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_DEBUG_DEATH(wrapper_->recvmmsg(slices, /*self_port=*/12345, output2), "recvmmsg is called after close"); + EXPECT_CALL(os_sys_calls_, supportsUdpGro()); + wrapper_->supportsUdpGro(); + EXPECT_CALL(os_sys_calls_, supportsMmsg()); wrapper_->supportsMmsg(); } diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index cd2ef8dadf30..22fd91168e39 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -90,6 +90,7 @@ class MockOsSysCalls : public OsSysCallsImpl { MOCK_METHOD(SysCallIntResult, listen, (os_fd_t sockfd, int backlog)); MOCK_METHOD(SysCallSizeResult, write, (os_fd_t sockfd, const void* buffer, size_t length)); MOCK_METHOD(bool, supportsMmsg, (), (const)); + MOCK_METHOD(bool, supportsUdpGro, (), (const)); // Map from (sockfd,level,optname) to boolean socket option. using SockOptKey = std::tuple; diff --git a/test/mocks/network/io_handle.h b/test/mocks/network/io_handle.h index 4724b2fcea10..0b27d262bebc 100644 --- a/test/mocks/network/io_handle.h +++ b/test/mocks/network/io_handle.h @@ -29,6 +29,7 @@ class MockIoHandle : public IoHandle { MOCK_METHOD(Api::IoCallUint64Result, recvmmsg, (RawSliceArrays & slices, uint32_t self_port, RecvMsgOutput& output)); MOCK_METHOD(bool, supportsMmsg, (), (const)); + MOCK_METHOD(bool, supportsUdpGro, (), (const)); MOCK_METHOD(Api::SysCallIntResult, bind, (Address::InstanceConstSharedPtr address)); MOCK_METHOD(Api::SysCallIntResult, listen, (int backlog)); MOCK_METHOD(Api::SysCallIntResult, connect, (Address::InstanceConstSharedPtr address)); diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index b54079590eef..a4465dcdb937 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -50,10 +50,12 @@ reuse_port: true envoy::config::listener::v3::Listener listener_proto = parseListenerFromV2Yaml(yaml); EXPECT_CALL(server_.random_, uuid()); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, -#ifdef SO_RXQ_OVFL - /* expected_num_options */ 3, // SO_REUSEPORT is on as configured +#ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured + /* expected_num_options */ + Api::OsSysCallsSingleton::get().supportsUdpGro() ? 4 : 3, #else - /* expected_num_options */ 2, + /* expected_num_options */ + Api::OsSysCallsSingleton::get().supportsUdpGro() ? 3 : 2, #endif /* expected_creation_params */ {true, false}); @@ -67,11 +69,18 @@ reuse_port: true /* expected_value */ 1, /* expected_num_calls */ 1); #endif - expectSetsockopt(/* expected_sockopt_level */ SOL_SOCKET, /* expected_sockopt_name */ SO_REUSEPORT, /* expected_value */ 1, /* expected_num_calls */ 1); +#ifdef UDP_GRO + if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { + expectSetsockopt(/* expected_sockopt_level */ SOL_UDP, + /* expected_sockopt_name */ UDP_GRO, + /* expected_value */ 1, + /* expected_num_calls */ 1); + } +#endif manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 4b8f9a058a24..6170e93bec86 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -122,6 +122,8 @@ GETting GLB GOAWAY GRPC +GRO +GSO GSS GTEST GURL @@ -776,6 +778,7 @@ mutexes mux muxed mysql +namelen nameserver namespace namespaces From 42a4b9fae616adebc9bba11fd5f77db8baaba02d Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Wed, 15 Jul 2020 19:38:09 -0400 Subject: [PATCH 644/909] Subject: Porting Envoy to C++17 #11570 (#11941) Signed-off-by: Yifan Yang --- .bazelrc | 3 +++ bazel/envoy_internal.bzl | 5 +++-- bazel/repository_locations.bzl | 8 ++++---- ci/do_ci.sh | 1 + source/extensions/filters/http/cache/http_cache_utils.cc | 2 +- test/common/json/json_loader_test.cc | 2 +- .../filters/network/kafka/serialization_test.cc | 3 ++- tools/clang_tools/support/clang_tools.bzl | 1 - 8 files changed, 15 insertions(+), 10 deletions(-) diff --git a/.bazelrc b/.bazelrc index b88d5079faee..bf32a37e6b02 100644 --- a/.bazelrc +++ b/.bazelrc @@ -22,6 +22,7 @@ build --enable_platform_specific_config # Enable position independent code, this option is not supported on Windows and default on on macOS. build:linux --copt=-fPIC +build:linux --cxxopt=-std=c++17 # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 @@ -65,6 +66,7 @@ build:clang-asan --config=asan build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN +build:macos --cxxopt=-std=c++17 build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined @@ -265,6 +267,7 @@ build:windows --define manual_stamp=manual_stamp build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" +build:windows --cxxopt="/std:c++17" # Required to work around build defects on Windows MSVC cl # Unguarded gcc pragmas in quiche are not recognized by MSVC diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index 6c9d125e199e..07dad501e5a4 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -14,7 +14,6 @@ def envoy_copts(repository, test = False): "-Wformat", "-Wformat-security", "-Wvla", - "-std=c++14", ] # Windows options for cleanest service compilation; @@ -25,7 +24,6 @@ def envoy_copts(repository, test = False): msvc_options = [ "-WX", "-Zc:__cplusplus", - "-std:c++14", "-DWIN32", "-D_WIN32_WINNT=0x0A00", # _WIN32_WINNT_WIN10 "-DNTDDI_VERSION=0x0A000000", # NTDDI_WIN10 @@ -34,6 +32,9 @@ def envoy_copts(repository, test = False): "-DNOMCX", "-DNOIME", "-DNOCRYPT", + # this is to silence the incorrect MSVC compiler warning when trying to convert between + # std::optional data types while conversions between primitive types are producing no error + "-wd4244", ] return select({ diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 414bd0dca05c..ebe3b0177ac9 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -422,10 +422,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_google_cel_cpp = dict( - sha256 = "1b283f93619b130504880d2f400bd449de9ab6be94ef26ecd2bb96921f48dd6c", - strip_prefix = "cel-cpp-50196761917300bbd47b59bd162e84817b67b7ab", - # 2020-06-08 - urls = ["https://github.com/google/cel-cpp/archive/50196761917300bbd47b59bd162e84817b67b7ab.tar.gz"], + sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e", + strip_prefix = "cel-cpp-b9453a09b28a1531c4917e8792b3ea61f6b1a447", + # 2020-07-14 + urls = ["https://github.com/google/cel-cpp/archive/b9453a09b28a1531c4917e8792b3ea61f6b1a447.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 55718a63b00f..d13c7be545bd 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -220,6 +220,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ + --define --cxxopt=-std=c++14 \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain diff --git a/source/extensions/filters/http/cache/http_cache_utils.cc b/source/extensions/filters/http/cache/http_cache_utils.cc index 43dd624d8563..b93ed5f2f3a7 100644 --- a/source/extensions/filters/http/cache/http_cache_utils.cc +++ b/source/extensions/filters/http/cache/http_cache_utils.cc @@ -89,7 +89,7 @@ SystemTime::duration HttpCacheUtils::eatLeadingDuration(absl::string_view& s) { if (digits_length == 0) { return SystemTime::duration::zero(); } - const absl::string_view digits(s.begin(), digits_length); + const absl::string_view digits(s.data(), digits_length); s.remove_prefix(digits_length); uint64_t num; return absl::SimpleAtoi(digits, &num) ? std::chrono::seconds(num) : SystemTime::duration::max(); diff --git a/test/common/json/json_loader_test.cc b/test/common/json/json_loader_test.cc index 1201a3668d10..884caf5b0d0c 100644 --- a/test/common/json/json_loader_test.cc +++ b/test/common/json/json_loader_test.cc @@ -198,7 +198,7 @@ TEST_F(JsonLoaderTest, Basic) { { ObjectSharedPtr json = Factory::loadFromString("{}"); - EXPECT_THROW(json->getObjectArray("hello").empty(), Exception); + EXPECT_THROW((void)json->getObjectArray("hello").empty(), Exception); } { diff --git a/test/extensions/filters/network/kafka/serialization_test.cc b/test/extensions/filters/network/kafka/serialization_test.cc index 3d8a5af14e53..2cc656926d64 100644 --- a/test/extensions/filters/network/kafka/serialization_test.cc +++ b/test/extensions/filters/network/kafka/serialization_test.cc @@ -323,7 +323,8 @@ TEST(NullableBytesDeserializer, ShouldDeserialize) { } TEST(NullableBytesDeserializer, ShouldDeserializeEmptyBytes) { - const NullableBytes value{{}}; + // gcc refuses to initialize optional with empty vector with value{{}} + const NullableBytes value = {{}}; serializeThenDeserializeAndCheckEquality(value); } diff --git a/tools/clang_tools/support/clang_tools.bzl b/tools/clang_tools/support/clang_tools.bzl index 949903aaff05..398b80330b66 100644 --- a/tools/clang_tools/support/clang_tools.bzl +++ b/tools/clang_tools/support/clang_tools.bzl @@ -1,7 +1,6 @@ load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") _clang_tools_copts = [ - "-std=c++14", "-fno-exceptions", "-fno-rtti", ] From 2966597391b9c7743dab1186f214229ca95e0243 Mon Sep 17 00:00:00 2001 From: "Drew S. Ortega" Date: Wed, 15 Jul 2020 16:39:33 -0700 Subject: [PATCH 645/909] hds: validate messages on receive to catch a missing field exception (#11969) Add an integration test for health check discovery service to make sure on receiving of a message it checks for missing fields before processing. Add outline for checking for exception in hc discovery service, using MessageUtil validate. Signed-off-by: Drew S. Ortega --- source/common/upstream/BUILD | 2 + .../upstream/health_discovery_service.cc | 19 +++- test/common/upstream/hds_test.cc | 86 +++++++++++++++++++ 3 files changed, 105 insertions(+), 2 deletions(-) diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 14eb0236b542..50effb27094a 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -223,6 +223,8 @@ envoy_cc_library( "//source/common/config:version_converter_lib", "//source/common/grpc:async_client_lib", "//source/common/network:resolver_lib", + "//source/common/protobuf:message_validator_lib", + "//source/common/protobuf:utility_lib", "//source/extensions/transport_sockets:well_known_names", "//source/server:transport_socket_config_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 821179d84f15..a4b084d708ca 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -6,10 +6,13 @@ #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/endpoint/v3/endpoint_components.pb.h" #include "envoy/service/health/v3/hds.pb.h" +#include "envoy/service/health/v3/hds.pb.validate.h" #include "envoy/stats/scope.h" #include "common/config/version_converter.h" +#include "common/protobuf/message_validator_impl.h" #include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" #include "common/upstream/upstream_impl.h" namespace Envoy { @@ -176,6 +179,7 @@ void HdsDelegate::processMessage( store_stats_, ssl_context_manager_, false, info_factory_, cm_, local_info_, dispatcher_, random_, singleton_manager_, tls_, validation_visitor_, api_)); + hds_clusters_.back()->initialize([] {}); hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, random_, dispatcher_, api_); @@ -189,13 +193,25 @@ void HdsDelegate::onReceiveMessage( stats_.requests_.inc(); ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); + // Validate message fields + try { + MessageUtil::validate(*message, validation_visitor_); + } catch (const ProtoValidationException& ex) { + // Increment error count + stats_.errors_.inc(); + ENVOY_LOG(warn, "Unable to validate health check specifier: {}", ex.what()); + + // Do not continue processing message + return; + } + // Reset hds_clusters_.clear(); // Set response auto server_response_ms = PROTOBUF_GET_MS_OR_DEFAULT(*message, interval, 1000); - // Process the HealthCheckSpecifier message + // Process the HealthCheckSpecifier message. processMessage(std::move(message)); if (server_response_ms_ != server_response_ms) { @@ -242,7 +258,6 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, envoy::config::core::v3::UNKNOWN)); } - initialize([] {}); } ClusterSharedPtr HdsCluster::create() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index 7f3d076b5edb..3f1ffeff0355 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -45,6 +45,7 @@ class HdsDelegateFriend { std::unique_ptr&& message) { hd.processMessage(std::move(message)); }; + HdsDelegateStats getStats(HdsDelegate& hd) { return hd.stats_; }; }; class HdsTest : public testing::Test { @@ -227,6 +228,91 @@ TEST_F(HdsTest, TestProcessMessageHealthChecks) { EXPECT_EQ(hds_delegate_->hdsClusters()[1]->healthCheckers().size(), 3); } +// Test if processMessage exits gracefully upon receiving a malformed message +TEST_F(HdsTest, TestProcessMessageMissingFields) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + // remove healthy threshold field to create an error + message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold(); + + // call onReceiveMessage function for testing. Should increment stat_ errors upon + // getting a bad message + hds_delegate_->onReceiveMessage(std::move(message)); + + // Ensure that we never enabled the response timer that would start health checks, + // since this config was invalid. + EXPECT_FALSE(server_response_timer_->enabled_); + + // ensure that no partial information was stored in hds_clusters_ + EXPECT_TRUE(hds_delegate_->hdsClusters().empty()); + + // Check Correctness by verifying one request and one error has been generated in stat_ + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1); + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 1); +} + +// Test if processMessage exits gracefully upon receiving a malformed message +// There was a previous valid config, so we go back to that. +TEST_F(HdsTest, TestProcessMessageMissingFieldsWithFallback) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + + Network::MockClientConnection* connection_ = new NiceMock(); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection_)); + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillOnce(Return(cluster_info_)); + EXPECT_CALL(*connection_, setBufferLimits(_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + connection_->raiseEvent(Network::ConnectionEvent::Connected); + + // Create a invalid message + message.reset(createSimpleMessage()); + + // set this address to be distinguishable from the previous message in sendResponse() + message->mutable_cluster_health_checks(0) + ->mutable_locality_endpoints(0) + ->mutable_endpoints(0) + ->mutable_address() + ->mutable_socket_address() + ->set_address("9.9.9.9"); + + // remove healthy threshold field to create an error + message->mutable_cluster_health_checks(0)->mutable_health_checks(0)->clear_healthy_threshold(); + + // Pass invalid message through. Should increment stat_ errors upon + // getting a bad message. + hds_delegate_->onReceiveMessage(std::move(message)); + + // Ensure that the timer is enabled since there was a previous valid specifier. + EXPECT_TRUE(server_response_timer_->enabled_); + + // read the response and check that it is pinging the old + // address 127.0.0.0 instead of the new 9.9.9.9 + auto response = hds_delegate_->sendResponse(); + EXPECT_EQ(response.endpoint_health_response() + .endpoints_health(0) + .endpoint() + .address() + .socket_address() + .address(), + "127.0.0.0"); + + // Check Correctness by verifying one request and one error has been generated in stat_ + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).errors_.value(), 1); + EXPECT_EQ(hds_delegate_friend_.getStats(*hds_delegate_).requests_.value(), 2); +} + // Tests OnReceiveMessage given a minimal HealthCheckSpecifier message TEST_F(HdsTest, TestMinimalOnReceiveMessage) { EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); From 270c0dc133be33084fea4f3ed7a3bc3f177e44ed Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Thu, 16 Jul 2020 10:31:53 -0700 Subject: [PATCH 646/909] Updated envoy-build-ubuntu image tag (#12117) Signed-off-by: Dmitri Dolguikh --- .bazelrc | 2 +- .circleci/config.yml | 2 +- .devcontainer/Dockerfile | 2 +- bazel/repository_locations.bzl | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.bazelrc b/.bazelrc index bf32a37e6b02..17cd6008e399 100644 --- a/.bazelrc +++ b/.bazelrc @@ -207,7 +207,7 @@ build:remote-msvc-cl --config=rbe-toolchain-msvc-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 3bdaab466ba6..822d995b2d3c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ executors: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:f21773ab398a879f976936f72c78c9dd3718ca1e + - image: envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b resource_class: xlarge working_directory: /source diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 01a4fd3940ad..33ca454d55ad 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:f21773ab398a879f976936f72c78c9dd3718ca1e +FROM gcr.io/envoy-ci/envoy-build:736b8db2e1f0b55edb50719d2f8ddf383f46030b ARG USERNAME=vscode ARG USER_UID=501 diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index ebe3b0177ac9..eee90bc45c46 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "ff890c70d60e51c7ee80874f85c3905718b7f6929a6c367c850cdd0b9c01d44d", - strip_prefix = "envoy-build-tools-efaecf11d76b86551cf42e2354274ac2acd7042f", - # 2020-06-16 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/efaecf11d76b86551cf42e2354274ac2acd7042f.tar.gz"], + sha256 = "dd5cc89bb69544659b20b88b28e642da0174739b68c82f029617b9749d61ab1d", + strip_prefix = "envoy-build-tools-289a5ca65aefd5a76f18f103d1425cfec5591417", + # 2020-07-15 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/289a5ca65aefd5a76f18f103d1425cfec5591417.tar.gz"], use_category = ["build"], ), boringssl = dict( From d1fd1c60afdce415fb4388dd85d93f6c77511338 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 16 Jul 2020 11:32:41 -0600 Subject: [PATCH 647/909] runtime: cleanup strict_authority_validation (#12119) Fixes https://github.com/envoyproxy/envoy/issues/11934 Signed-off-by: Matt Klein --- docs/root/version_history/current.rst | 1 + source/common/http/header_utility.cc | 3 +-- source/common/runtime/runtime_features.cc | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7ac7174e12b2..6df03dbda0e3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -28,6 +28,7 @@ Removed Config or Runtime * http: removed legacy header sanitization and the runtime guard `envoy.reloadable_features.strict_header_validation`. * http: removed legacy transfer-encoding enforcement and runtime guard `envoy.reloadable_features.reject_unsupported_transfer_encodings`. +* http: removed configurable strict host validation and runtime guard `envoy.reloadable_features.strict_authority_validation`. New Features ------------ diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index b180ad3ead3b..b6119cc79729 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -223,8 +223,7 @@ void HeaderUtility::stripPortFromHost(RequestHeaderMap& headers, uint32_t listen absl::optional> HeaderUtility::requestHeadersValid(const RequestHeaderMap& headers) { // Make sure the host is valid. - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_authority_validation") && - headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) { + if (headers.Host() && !HeaderUtility::authorityIsValid(headers.Host()->value().getStringView())) { return SharedResponseCodeDetails::get().InvalidAuthority; } return absl::nullopt; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 9ad2ee1617a4..8c1864ff69a2 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -56,7 +56,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http1_flood_protection", "envoy.reloadable_features.test_feature_true", "envoy.reloadable_features.connection_header_sanitization", - "envoy.reloadable_features.strict_authority_validation", // Begin alphabetically sorted section. "envoy.reloadable_features.activate_fds_next_event_loop", "envoy.deprecated_features.allow_deprecated_extension_names", From ff44d731ecc8d744b971b31aec816ad0b56bad42 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 17 Jul 2020 00:36:19 +0700 Subject: [PATCH 648/909] ext_authz: Allow to emit dynamic metadata (#11820) This patch enables ext_authz backed with gRPC service to emit metadata. The authorization can set the dynamic metadata (an opaque google.protobuf.Struct) as part of the `CheckResponse` when it is successful (i.e. when `http_response` is `OkHttpResponse`). Signed-off-by: Dhi Aurrahman --- api/envoy/service/auth/v3/external_auth.proto | 11 +++- .../advanced/well_known_dynamic_metadata.rst | 6 ++- .../http/http_filters/ext_authz_filter.rst | 15 ++++++ .../network_filters/ext_authz_filter.rst | 12 ++++- docs/root/version_history/current.rst | 3 +- .../envoy/service/auth/v3/external_auth.proto | 11 +++- .../filters/common/ext_authz/ext_authz.h | 4 ++ .../common/ext_authz/ext_authz_grpc_impl.cc | 1 + .../common/ext_authz/ext_authz_http_impl.cc | 8 +-- .../filters/http/ext_authz/ext_authz.cc | 6 +++ .../filters/network/ext_authz/BUILD | 1 + .../filters/network/ext_authz/config.cc | 2 +- .../filters/network/ext_authz/ext_authz.cc | 8 +++ .../filters/http/ext_authz/ext_authz_test.cc | 51 ++++++++++++++++++ .../filters/network/ext_authz/BUILD | 1 + .../network/ext_authz/ext_authz_test.cc | 54 +++++++++++-------- 16 files changed, 164 insertions(+), 30 deletions(-) diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index b93b61a3bde9..e2ee274fdfdb 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/service/auth/v3/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; +import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; @@ -57,7 +58,7 @@ message DeniedHttpResponse { string body = 3; } -// HTTP attributes for an ok response. +// HTTP attributes for an OK response. message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -69,6 +70,14 @@ message OkHttpResponse { // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + + // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next + // filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + google.protobuf.Struct dynamic_metadata = 3; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index 78bca6614d93..1eb74e6c920c 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -13,6 +13,8 @@ by looking at the operational metadata emitted by the MongoDB filter. The following Envoy filters emit dynamic metadata that other filters can leverage. +* :ref:`External Authorization Filter ` +* :ref:`External Authorization Network Filter ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` * :ref:`Role Based Access Control (RBAC) Filter ` @@ -21,4 +23,6 @@ The following Envoy filters emit dynamic metadata that other filters can leverag The following Envoy filters can be configured to consume dynamic metadata emitted by other filters. -* :ref:`RateLimit Filter limit override ` \ No newline at end of file +* :ref:`External Authorization Filter via the metadata context namespaces + ` +* :ref:`RateLimit Filter limit override ` diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index 5da29d891ee4..ba7bfe0b8751 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -138,6 +138,21 @@ The HTTP filter outputs statistics in the *cluster..ext_au failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." +Dynamic Metadata +---------------- +.. _config_http_filters_ext_authz_dynamic_metadata: + +.. note:: + + The External Authorization filter emits dynamic metadata only when it is configured to use + gRPC service as the authorization server. + +The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` +*only* when the gRPC authorization server returns an :ref:`OK +` :ref:`CheckResponse +` with a filled :ref:`dynamic_metadata +` field. + Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key diff --git a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst index 9df08d8f4c2b..83dffae8a7a1 100644 --- a/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst +++ b/docs/root/configuration/listeners/network_filters/ext_authz_filter.rst @@ -65,9 +65,19 @@ The network filter outputs statistics in the *config.ext_authz.* namespace. total, Counter, Total responses from the filter. error, Counter, Total errors contacting the external service. - denied, Counter, Total responses from the authorizations service that were to deny the traffic. + denied, Counter, Total responses from the authorizations service that were to deny the traffic. failure_mode_allowed, Counter, "Total requests that were error(s) but were allowed through because of failure_mode_allow set to true." ok, Counter, Total responses from the authorization service that were to allow the traffic. cx_closed, Counter, Total connections that were closed. active, Gauge, Total currently active requests in transit to the authorization service. + +Dynamic Metadata +---------------- +.. _config_network_filters_ext_authz_dynamic_metadata: + +The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` +*only* when the gRPC authorization server returns an :ref:`OK +` :ref:`CheckResponse +` with a filled :ref:`dynamic_metadata +` field. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6df03dbda0e3..b49649b892d3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -32,10 +32,11 @@ Removed Config or Runtime New Features ------------ + +* ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated ---------- - diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto index b93b61a3bde9..e2ee274fdfdb 100644 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto @@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto"; import "envoy/service/auth/v3/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; +import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; @@ -57,7 +58,7 @@ message DeniedHttpResponse { string body = 3; } -// HTTP attributes for an ok response. +// HTTP attributes for an OK response. message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; @@ -69,6 +70,14 @@ message OkHttpResponse { // by Leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; + + // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next + // filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + google.protobuf.Struct dynamic_metadata = 3; } // Intended for gRPC and Network Authorization servers `only`. diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index a46d6e3b7191..ba34d2e8a9fc 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -62,6 +62,10 @@ struct Response { std::string body; // Optional http status used only on denied response. Http::Code status_code{}; + + // A set of metadata returned by the authorization server, that will be emitted as filter's + // dynamic metadata that other filters can leverage. + ProtobufWkt::Struct dynamic_metadata; }; using ResponsePtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index ae5ce707fb37..c82435a23edc 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -56,6 +56,7 @@ void GrpcClientImpl::onSuccess(std::unique_ptrstatus = CheckStatus::OK; if (response->has_ok_response()) { + authz_response->dynamic_metadata = response->ok_response().dynamic_metadata(); toAuthzResponseHeader(authz_response, response->ok_response().headers()); } } else { diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 01894546ab0f..5505298fd881 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -33,7 +33,8 @@ const Http::HeaderMap& lengthZeroHeader() { const Response& errorResponse() { CONSTRUCT_ON_FIRST_USE(Response, Response{CheckStatus::Error, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, EMPTY_STRING, Http::Code::Forbidden}); + Http::HeaderVector{}, EMPTY_STRING, Http::Code::Forbidden, + ProtobufWkt::Struct{}}); } // SuccessResponse used for creating either DENIED or OK authorization responses. @@ -317,7 +318,8 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { SuccessResponse ok{message->headers(), config_->upstreamHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK}}; + Http::HeaderVector{}, EMPTY_STRING, Http::Code::OK, + ProtobufWkt::Struct{}}}; return std::move(ok.response_); } @@ -326,7 +328,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { config_->upstreamHeaderToAppendMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, message->bodyAsString(), - static_cast(status_code)}}; + static_cast(status_code), ProtobufWkt::Struct{}}}; return std::move(denied.response_); } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 7e8899e3d972..176c168d6e65 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -196,6 +196,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { request_headers_->appendCopy(header.first, header.second); } } + + if (!response->dynamic_metadata.fields().empty()) { + callbacks_->streamInfo().setDynamicMetadata(HttpFilterNames::get().ExtAuthorization, + response->dynamic_metadata); + } + if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_); } diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index e321d1b1221d..1a7277d7ac4d 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -26,6 +26,7 @@ envoy_cc_library( "//source/common/tracing:http_tracer_lib", "//source/extensions/filters/common/ext_authz:ext_authz_grpc_lib", "//source/extensions/filters/common/ext_authz:ext_authz_interface", + "//source/extensions/filters/network:well_known_names", "@envoy_api//envoy/extensions/filters/network/ext_authz/v3:pkg_cc_proto", "@envoy_api//envoy/service/auth/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index f42e2957512b..8bfdf1f81f7d 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -23,7 +23,7 @@ namespace ExtAuthz { Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::network::ext_authz::v3::ExtAuthz& proto_config, Server::Configuration::FactoryContext& context) { - ConfigSharedPtr ext_authz_config(new Config(proto_config, context.scope())); + ConfigSharedPtr ext_authz_config = std::make_shared(proto_config, context.scope()); const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, 200); return [grpc_service = proto_config.grpc_service(), &context, ext_authz_config, diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index af9117884608..97feb62e0d22 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -8,6 +8,8 @@ #include "common/common/assert.h" #include "common/tracing/http_tracer_impl.h" +#include "extensions/filters/network/well_known_names.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -90,6 +92,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { // Status is Error and yet we are configured to allow traffic. Click a counter. config_->stats().failure_mode_allowed_.inc(); } + + if (!response->dynamic_metadata.fields().empty()) { + filter_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().ExtAuthorization, response->dynamic_metadata); + } + // We can get completion inline, so only call continue if that isn't happening. if (!calling_check_) { filter_callbacks_->continueReading(); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index a075909cd4ca..3948822584f8 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -1126,6 +1126,8 @@ TEST_P(HttpFilterTestParam, OkResponse) { Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + // Send an OK response Without setting the dynamic metadata field. + EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)).Times(0); request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); @@ -1445,6 +1447,55 @@ TEST_P(HttpFilterTestParam, OverrideEncodingHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromString("upstream_rq_403").value()); } +// Verify that when returning an OK response with dynamic_metadata field set, the filter emits +// dynamic metadata. +TEST_F(HttpFilterTest, EmitDynamicMetadata) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + )EOF"); + + prepareCheck(); + + EXPECT_CALL(*client_, check(_, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + + auto* fields = response.dynamic_metadata.mutable_fields(); + (*fields)["foo"] = ValueUtil::stringValue("ok"); + (*fields)["bar"] = ValueUtil::numberValue(1); + + EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&response](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, HttpFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); + })); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + .Times(0); + request_callbacks_->onComplete(std::make_unique(response)); + + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromString("ext_authz.ok").value()); + EXPECT_EQ(1U, config_->stats().ok_.value()); +} + // Test that when a connection awaiting a authorization response is canceled then the // authorization call is closed. TEST_P(HttpFilterTestParam, ResetDuringCall) { diff --git a/test/extensions/filters/network/ext_authz/BUILD b/test/extensions/filters/network/ext_authz/BUILD index f8b80bc75c63..df951f392f32 100644 --- a/test/extensions/filters/network/ext_authz/BUILD +++ b/test/extensions/filters/network/ext_authz/BUILD @@ -24,6 +24,7 @@ envoy_extension_cc_test( "//source/common/network:address_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", + "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/ext_authz", "//test/extensions/filters/common/ext_authz:ext_authz_mocks", "//test/mocks/network:network_mocks", diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index 47a208dc8c3b..b977c88d0176 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -12,6 +12,7 @@ #include "common/protobuf/utility.h" #include "extensions/filters/network/ext_authz/ext_authz.h" +#include "extensions/filters/network/well_known_names.h" #include "test/extensions/filters/common/ext_authz/mocks.h" #include "test/mocks/network/mocks.h" @@ -37,19 +38,11 @@ namespace ExtAuthz { class ExtAuthzFilterTest : public testing::Test { public: - ExtAuthzFilterTest() { - std::string json = R"EOF( - { - "grpc_service": { - "envoy_grpc": { "cluster_name": "ext_authz_server" } - }, - "failure_mode_allow": true, - "stat_prefix": "name" - } - )EOF"; + ExtAuthzFilterTest() { initialize(); } + void initialize() { envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; - TestUtility::loadFromJson(json, proto_config); + TestUtility::loadFromYaml(default_yaml_string_, proto_config); config_ = std::make_shared(proto_config, stats_store_); client_ = new Filters::Common::ExtAuthz::MockClient(); filter_ = std::make_unique(config_, Filters::Common::ExtAuthz::ClientPtr{client_}); @@ -82,18 +75,24 @@ class ExtAuthzFilterTest : public testing::Test { NiceMock filter_callbacks_; Network::Address::InstanceConstSharedPtr addr_; Filters::Common::ExtAuthz::RequestCallbacks* request_callbacks_{}; + const std::string default_yaml_string_ = R"EOF( +grpc_service: + envoy_grpc: + cluster_name: ext_authz_server + +failure_mode_allow: true +stat_prefix: name + )EOF"; }; TEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) { - std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "grpc_service": {} - } + std::string yaml_string = R"EOF( +grpc_service: {} +stat_prefix: name )EOF"; envoy::extensions::filters::network::ext_authz::v3::ExtAuthz proto_config{}; - TestUtility::loadFromJson(json_string, proto_config); + TestUtility::loadFromYaml(yaml_string, proto_config); EXPECT_THROW( TestUtility::downcastAndValidate< @@ -102,8 +101,6 @@ TEST_F(ExtAuthzFilterTest, BadExtAuthzConfig) { } TEST_F(ExtAuthzFilterTest, OKWithOnData) { - InSequence s; - EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(filter_callbacks_.connection_, localAddress()).WillOnce(ReturnRef(addr_)); EXPECT_CALL(*client_, check(_, _, testing::A(), _)) @@ -126,8 +123,23 @@ TEST_F(ExtAuthzFilterTest, OKWithOnData) { 1U, stats_store_.gauge("ext_authz.name.active", Stats::Gauge::ImportMode::Accumulate).value()); + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.headers_to_set = Http::HeaderVector{{Http::LowerCaseString{"foo"}, "bar"}}; + + auto* fields = response.dynamic_metadata.mutable_fields(); + (*fields)["foo"] = ValueUtil::stringValue("ok"); + (*fields)["bar"] = ValueUtil::numberValue(1); + + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&response](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, response.dynamic_metadata)); + })); + EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); + request_callbacks_->onComplete(std::make_unique(response)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -314,7 +326,7 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); }))); - + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)).Times(0); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); From c9f80653a427bf1624971cdbf1a27f87fd3422f1 Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Thu, 16 Jul 2020 13:40:31 -0400 Subject: [PATCH 649/909] fuzz: rewrote fuzz tests for listener filters original_dst and original_src (#12078) Rewrote fuzz tests for listener filters original_dst and original_src to utilize generic uber_filter Signed-off-by: Arthur Yan --- test/extensions/filters/listener/common/BUILD | 27 ++++++++ .../listener/common/listener_filter_fuzzer.cc | 30 +++++++++ .../listener/common/listener_filter_fuzzer.h | 32 ++++++++++ .../common/listener_filter_fuzzer.proto | 12 ++++ .../filters/listener/original_dst/BUILD | 9 +-- .../original_dst_corpus/invalid_test | 5 +- .../original_dst_corpus/ipv4_test | 5 +- .../original_dst_corpus/ipv6_test | 5 +- .../original_dst_corpus/unix_test | 5 +- .../original_dst/original_dst_fuzz_test.cc | 63 ++----------------- .../original_dst/original_dst_fuzz_test.proto | 7 --- .../filters/listener/original_src/BUILD | 4 +- .../original_src_corpus/ipv4_test | 8 ++- .../original_src_corpus/unix_test | 7 ++- .../original_src/original_src_fuzz_test.cc | 20 ++---- .../original_src/original_src_fuzz_test.proto | 5 +- test/mocks/network/BUILD | 8 +++ test/mocks/network/fakes.h | 62 ++++++++++++++++++ 18 files changed, 217 insertions(+), 97 deletions(-) create mode 100644 test/extensions/filters/listener/common/BUILD create mode 100644 test/extensions/filters/listener/common/listener_filter_fuzzer.cc create mode 100644 test/extensions/filters/listener/common/listener_filter_fuzzer.h create mode 100644 test/extensions/filters/listener/common/listener_filter_fuzzer.proto delete mode 100644 test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto create mode 100644 test/mocks/network/fakes.h diff --git a/test/extensions/filters/listener/common/BUILD b/test/extensions/filters/listener/common/BUILD new file mode 100644 index 000000000000..e306f3a43382 --- /dev/null +++ b/test/extensions/filters/listener/common/BUILD @@ -0,0 +1,27 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", + "envoy_proto_library", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_proto_library( + name = "listener_filter_fuzzer_proto", + srcs = ["listener_filter_fuzzer.proto"], +) + +envoy_cc_test_library( + name = "listener_filter_fuzzer_lib", + srcs = ["listener_filter_fuzzer.cc"], + hdrs = ["listener_filter_fuzzer.h"], + deps = [ + ":listener_filter_fuzzer_proto_cc_proto", + "//include/envoy/network:filter_interface", + "//test/mocks/network:network_fakes", + "//test/mocks/network:network_mocks", + ], +) diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/listener_filter_fuzzer.cc new file mode 100644 index 000000000000..6ae3d9f689d8 --- /dev/null +++ b/test/extensions/filters/listener/common/listener_filter_fuzzer.cc @@ -0,0 +1,30 @@ +#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" + +#include "common/network/utility.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +void ListenerFilterFuzzer::fuzz( + Network::ListenerFilter& filter, + const test::extensions::filters::listener::FilterFuzzTestCase& input) { + try { + fuzzerSetup(input); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); + return; + } + + filter.onAccept(cb_); +} + +void ListenerFilterFuzzer::socketSetup( + const test::extensions::filters::listener::FilterFuzzTestCase& input) { + socket_.setLocalAddress(Network::Utility::resolveUrl(input.sock().local_address())); + socket_.setRemoteAddress(Network::Utility::resolveUrl(input.sock().remote_address())); +} + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.h b/test/extensions/filters/listener/common/listener_filter_fuzzer.h new file mode 100644 index 000000000000..d32eda68dbd7 --- /dev/null +++ b/test/extensions/filters/listener/common/listener_filter_fuzzer.h @@ -0,0 +1,32 @@ +#include "envoy/network/filter.h" + +#include "test/extensions/filters/listener/common/listener_filter_fuzzer.pb.validate.h" +#include "test/mocks/network/fakes.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +class ListenerFilterFuzzer { +public: + void fuzz(Network::ListenerFilter& filter, + const test::extensions::filters::listener::FilterFuzzTestCase& input); + +private: + void fuzzerSetup(const test::extensions::filters::listener::FilterFuzzTestCase& input) { + ON_CALL(cb_, socket()).WillByDefault(testing::ReturnRef(socket_)); + socketSetup(input); + } + + void socketSetup(const test::extensions::filters::listener::FilterFuzzTestCase& input); + + NiceMock cb_; + Network::FakeConnectionSocket socket_; +}; + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.proto b/test/extensions/filters/listener/common/listener_filter_fuzzer.proto new file mode 100644 index 000000000000..916c645d41ba --- /dev/null +++ b/test/extensions/filters/listener/common/listener_filter_fuzzer.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package test.extensions.filters.listener; + +message Socket { + string local_address = 1; + string remote_address = 2; +} + +message FilterFuzzTestCase { + Socket sock = 1; +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/BUILD b/test/extensions/filters/listener/original_dst/BUILD index 562775fc6cac..50c3b5931b3a 100644 --- a/test/extensions/filters/listener/original_dst/BUILD +++ b/test/extensions/filters/listener/original_dst/BUILD @@ -2,7 +2,6 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_fuzz_test", "envoy_package", - "envoy_proto_library", ) load( "//test/extensions:extensions_build_system.bzl", @@ -23,18 +22,12 @@ envoy_extension_cc_test( ], ) -envoy_proto_library( - name = "original_dst_fuzz_test_proto", - srcs = ["original_dst_fuzz_test.proto"], -) - envoy_cc_fuzz_test( name = "original_dst_fuzz_test", srcs = ["original_dst_fuzz_test.cc"], corpus = "original_dst_corpus", deps = [ - ":original_dst_fuzz_test_proto_cc_proto", "//source/extensions/filters/listener/original_dst:original_dst_lib", - "//test/mocks/network:network_mocks", + "//test/extensions/filters/listener/common:listener_filter_fuzzer_lib", ], ) diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test index a015d2d6e09f..7c650514ebbb 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test @@ -1 +1,4 @@ -address: "hello world" \ No newline at end of file +sock { + local_address: "hello world" + remote_address: "tcp://0.0.0.0:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test index 6a87711c09a6..20cdd6796db9 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test @@ -1 +1,4 @@ -address: "tcp://0.0.0.0:0" \ No newline at end of file +sock { + local_address: "tcp://0.0.0.0:0" + remote_address: "tcp://0.0.0.0:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test index 9d8f333019e0..bda8f2989203 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test @@ -1 +1,4 @@ -address: "tcp://[a:b:c:d::]:0" \ No newline at end of file +sock { + local_address: "tcp://[a:b:c:d::]:0" + remote_address: "tcp://0.0.0.0:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test index 7df146a6cabd..3936a8a1c0b1 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test @@ -1 +1,4 @@ -address: "unix://tmp/server" \ No newline at end of file +sock { + local_address: "unix://tmp/server" + remote_address: "tcp://0.0.0.0:0" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc index 90ac50f389aa..db982456e34f 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -1,59 +1,15 @@ -#include "common/network/utility.h" - #include "extensions/filters/listener/original_dst/original_dst.h" -#include "test/extensions/filters/listener/original_dst/original_dst_fuzz_test.pb.validate.h" +#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/listener_filter_fuzzer.pb.validate.h" #include "test/fuzz/fuzz_runner.h" -#include "test/mocks/network/mocks.h" - -#include "gmock/gmock.h" namespace Envoy { namespace Extensions { namespace ListenerFilters { namespace OriginalDst { -class FakeConnectionSocket : public Network::MockConnectionSocket { - const Network::Address::InstanceConstSharedPtr& local_address_; - -public: - ~FakeConnectionSocket() override = default; - - FakeConnectionSocket(const Network::Address::InstanceConstSharedPtr& local_address) - : local_address_(local_address) {} - - const Network::Address::InstanceConstSharedPtr& localAddress() const override { - return local_address_; - } - - Network::Address::Type addressType() const override { return local_address_->type(); } - - absl::optional ipVersion() const override { - if (local_address_->type() != Network::Address::Type::Ip) { - return absl::nullopt; - } - - return local_address_->ip()->version(); - } - - Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override { - switch (level) { - case SOL_IPV6: - static_cast(optval)->ss_family = AF_INET6; - break; - case SOL_IP: - static_cast(optval)->ss_family = AF_INET; - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - - return Api::SysCallIntResult{0, 0}; - } -}; - -DEFINE_PROTO_FUZZER( - const envoy::extensions::filters::listener::original_dst::v3::OriginalDstTestCase& input) { +DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) { try { TestUtility::validate(input); @@ -62,21 +18,14 @@ DEFINE_PROTO_FUZZER( return; } - NiceMock callbacks; - Network::Address::InstanceConstSharedPtr address = nullptr; + auto filter = std::make_unique(); try { - address = Network::Utility::resolveUrl(input.address()); + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input); } catch (const EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); - return; } - - FakeConnectionSocket socket(address); - ON_CALL(callbacks, socket()).WillByDefault(testing::ReturnRef(socket)); - - auto filter = std::make_unique(); - filter->onAccept(callbacks); } } // namespace OriginalDst diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto deleted file mode 100644 index f6e5e28e2def..000000000000 --- a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.proto +++ /dev/null @@ -1,7 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.original_dst.v3; - -message OriginalDstTestCase { - string address = 2; -} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index c3cf3365ad0a..e4ab73504612 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -56,6 +56,7 @@ envoy_proto_library( name = "original_src_fuzz_test_proto", srcs = ["original_src_fuzz_test.proto"], deps = [ + "//test/extensions/filters/listener/common:listener_filter_fuzzer_proto", "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg", ], ) @@ -67,7 +68,6 @@ envoy_cc_fuzz_test( deps = [ ":original_src_fuzz_test_proto_cc_proto", "//source/extensions/filters/listener/original_src:original_src_lib", - "//test/mocks/network:network_mocks", - "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg_cc_proto", + "//test/extensions/filters/listener/common:listener_filter_fuzzer_lib", ], ) diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test b/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test index bd2aeb926935..7d439bab6cad 100644 --- a/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test +++ b/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test @@ -3,5 +3,9 @@ config { mark: 0 } -address: "tcp://1.2.3.4:0" - +data { + sock { + local_address: "tcp://0.0.0.0:0" + remote_address: "tcp://1.2.3.4:0" + } +} diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/unix_test b/test/extensions/filters/listener/original_src/original_src_corpus/unix_test index 841e6094d5ca..ecb14359bd52 100644 --- a/test/extensions/filters/listener/original_src/original_src_corpus/unix_test +++ b/test/extensions/filters/listener/original_src/original_src_corpus/unix_test @@ -3,4 +3,9 @@ config { mark: 0 } -address: "unix://domain.socket" \ No newline at end of file +data { + sock { + local_address: "tcp://0.0.0.0:0" + remote_address: "unix://domain.socket" + } +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc index 970303b9da67..3423b71560fc 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -1,14 +1,8 @@ -#include "envoy/extensions/filters/listener/original_src/v3/original_src.pb.h" - -#include "common/network/utility.h" - #include "extensions/filters/listener/original_src/original_src.h" +#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" #include "test/extensions/filters/listener/original_src/original_src_fuzz_test.pb.validate.h" #include "test/fuzz/fuzz_runner.h" -#include "test/mocks/network/mocks.h" - -#include "gmock/gmock.h" namespace Envoy { namespace Extensions { @@ -25,18 +19,16 @@ DEFINE_PROTO_FUZZER( return; } - NiceMock callbacks_; + Config config(input.config()); + auto filter = std::make_unique(config); + try { - callbacks_.socket_.remote_address_ = Network::Utility::resolveUrl(input.address()); + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input.data()); } catch (const EnvoyException& e) { ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); return; } - - Config config(input.config()); - auto filter = std::make_unique(config); - - filter->onAccept(callbacks_); } } // namespace OriginalSrc diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto index 187aa9114d3d..6934664f7fe0 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto @@ -3,11 +3,12 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_src; import "envoy/extensions/filters/listener/original_src/v3/original_src.proto"; -import "google/protobuf/empty.proto"; +import "test/extensions/filters/listener/common/listener_filter_fuzzer.proto"; import "validate/validate.proto"; message OriginalSrcTestCase { envoy.extensions.filters.listener.original_src.v3.OriginalSrc config = 1 [(validate.rules).message.required = true]; - string address = 2; + test.extensions.filters.listener.FilterFuzzTestCase data = 2 + [(validate.rules).message.required = true]; } \ No newline at end of file diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 020e4b6db404..5f16adc6206f 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -63,3 +63,11 @@ envoy_cc_mock( "//source/common/network:utility_lib", ], ) + +envoy_cc_mock( + name = "network_fakes", + hdrs = ["fakes.h"], + deps = [ + ":network_mocks", + ], +) diff --git a/test/mocks/network/fakes.h b/test/mocks/network/fakes.h new file mode 100644 index 000000000000..ec69dce0ec0d --- /dev/null +++ b/test/mocks/network/fakes.h @@ -0,0 +1,62 @@ +#include "common/network/utility.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Network { + +class FakeConnectionSocket : public MockConnectionSocket { +public: + ~FakeConnectionSocket() override = default; + + FakeConnectionSocket() : local_address_(nullptr), remote_address_(nullptr) {} + + FakeConnectionSocket(const Address::InstanceConstSharedPtr& local_address, + const Address::InstanceConstSharedPtr& remote_address) + : local_address_(local_address), remote_address_(remote_address) {} + + void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { + local_address_ = local_address; + } + + void setRemoteAddress(const Address::InstanceConstSharedPtr& remote_address) override { + remote_address_ = remote_address; + } + + const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } + + const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; } + + Address::Type addressType() const override { return local_address_->type(); } + + absl::optional ipVersion() const override { + if (local_address_->type() != Address::Type::Ip) { + return absl::nullopt; + } + + return local_address_->ip()->version(); + } + + Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override { + switch (level) { + case SOL_IPV6: + static_cast(optval)->ss_family = AF_INET6; + break; + case SOL_IP: + static_cast(optval)->ss_family = AF_INET; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + return Api::SysCallIntResult{0, 0}; + } + + Address::InstanceConstSharedPtr local_address_; + Address::InstanceConstSharedPtr remote_address_; +}; + +} // namespace Network +} // namespace Envoy From 1ea93f6e371e6f03223081e3001022e59e1badc4 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 16 Jul 2020 13:56:52 -0400 Subject: [PATCH 650/909] test: small coverage improvements (#12130) Risk Level: n/a Testing: new unit tests Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/extensions/transport_sockets/tap/config.h | 1 - test/extensions/retry/host/omit_canary_hosts/config_test.cc | 1 + test/per_file_coverage.sh | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/source/extensions/transport_sockets/tap/config.h b/source/extensions/transport_sockets/tap/config.h index 8068779ada01..ac41dd19c9fc 100644 --- a/source/extensions/transport_sockets/tap/config.h +++ b/source/extensions/transport_sockets/tap/config.h @@ -15,7 +15,6 @@ namespace Tap { */ class TapSocketConfigFactory : public virtual Server::Configuration::TransportSocketConfigFactory { public: - ~TapSocketConfigFactory() override = default; std::string name() const override { return TransportSocketNames::get().Tap; } ProtobufTypes::MessagePtr createEmptyConfigProto() override; }; diff --git a/test/extensions/retry/host/omit_canary_hosts/config_test.cc b/test/extensions/retry/host/omit_canary_hosts/config_test.cc index 20f94af2320a..df042d83e9f5 100644 --- a/test/extensions/retry/host/omit_canary_hosts/config_test.cc +++ b/test/extensions/retry/host/omit_canary_hosts/config_test.cc @@ -34,6 +34,7 @@ TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { ASSERT_FALSE(predicate->shouldSelectAnotherHost(*host1)); ASSERT_TRUE(predicate->shouldSelectAnotherHost(*host2)); + predicate->onHostAttempted(host1); } TEST(OmitCanaryHostsRetryPredicateTest, EmptyConfig) { diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 8dd9531fc065..e06bf489cb07 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -42,7 +42,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/quic_listeners/quiche:84.8" "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" -"source/extensions/retry/host/omit_canary_hosts:92.9" "source/extensions/stat_sinks/statsd:85.2" "source/extensions/tracers:96.5" "source/extensions/tracers/opencensus:92.4" From 15d212c30f3d37df697efc814dcf9918f1463d44 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 16 Jul 2020 11:12:02 -0700 Subject: [PATCH 651/909] ci: toplevel download on macOS (#12115) Signed-off-by: Lizan Zhou --- .azure-pipelines/pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 582d2e221699..b1c14670cfab 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -149,6 +149,7 @@ jobs: - script: ./ci/mac_ci_steps.sh displayName: "Run Mac CI" env: + BAZEL_BUILD_EXTRA_OPTIONS: --remote_download_toplevel BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) From 9af3796721b47432cd8bdea484a4c07ddda901ec Mon Sep 17 00:00:00 2001 From: ankatare Date: Fri, 17 Jul 2020 00:15:35 +0530 Subject: [PATCH 652/909] V2 v3 changes for parse foo from v2 json (#12113) V2 to V3 changes for parseFoofromV2json : Risk Level: NA Testing: unit and format Docs Changes: NA Part of #10843 Signed-off-by: Abhay Narayan Katare --- .../upstream/cluster_manager_impl_test.cc | 164 +++++++++++------- test/common/upstream/utility.h | 24 ++- test/server/configuration_impl_test.cc | 46 +++-- test/test_common/utility.h | 5 +- 4 files changed, 148 insertions(+), 91 deletions(-) diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 37a078501974..a5fb36b1f98e 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -231,7 +231,7 @@ TEST_F(ClusterManagerImplTest, OutlierEventLog) { )EOF"; EXPECT_CALL(log_manager_, createAccessLog("foo")); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); } TEST_F(ClusterManagerImplTest, NoSdsConfig) { @@ -262,7 +262,7 @@ TEST_F(ClusterManagerImplTest, UnknownClusterType) { } )EOF"; - EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException, "invalid value \"foo\" for type TYPE_ENUM"); } @@ -280,7 +280,7 @@ TEST_F(ClusterManagerImplTest, LocalClusterNotDefined) { )EOF", clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_2")})); - EXPECT_THROW(create(parseBootstrapFromV2Json(json)), EnvoyException); + EXPECT_THROW(create(parseBootstrapFromV3Json(json)), EnvoyException); } TEST_F(ClusterManagerImplTest, BadClusterManagerConfig) { @@ -298,7 +298,7 @@ TEST_F(ClusterManagerImplTest, BadClusterManagerConfig) { } )EOF"; - EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_REGEX(create(parseBootstrapFromV3Json(json)), EnvoyException, "fake_property: Cannot find field"); } @@ -317,7 +317,7 @@ TEST_F(ClusterManagerImplTest, LocalClusterDefined) { clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_2"), defaultStaticClusterJson("new_cluster")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); checkStats(3 /*added*/, 0 /*modified*/, 0 /*removed*/, 3 /*active*/, 0 /*warming*/); factory_.tls_.shutdownThread(); @@ -327,7 +327,7 @@ TEST_F(ClusterManagerImplTest, DuplicateCluster) { const std::string json = fmt::sprintf( "{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1"), defaultStaticClusterJson("cluster_1")})); - const auto config = parseBootstrapFromV2Json(json); + const auto config = parseBootstrapFromV3Json(json); EXPECT_THROW(create(config), EnvoyException); } @@ -697,7 +697,7 @@ TEST_F(ClusterManagerImplTest, ClusterProvidedLbNoLb) { cluster1->info_->lb_type_ = LoadBalancerType::ClusterProvided; EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, nullptr))); - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException, "cluster manager: cluster provided LB specified but cluster " "'cluster_0' did not provide one. Check cluster documentation."); } @@ -711,7 +711,7 @@ TEST_F(ClusterManagerImplTest, ClusterProvidedLbNotConfigured) { cluster1->info_->name_ = "cluster_0"; EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, new MockThreadAwareLoadBalancer()))); - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV2Json(json)), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Json(json)), EnvoyException, "cluster manager: cluster provided LB not specified but cluster " "'cluster_0' provided one. Check cluster documentation."); } @@ -731,7 +731,7 @@ class ClusterManagerImplThreadAwareLbTest : public ClusterManagerImplTest { EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster1, nullptr))); ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_EQ(nullptr, cluster_manager_->get("cluster_0")->loadBalancer().chooseHost(nullptr)); @@ -834,7 +834,7 @@ TEST_F(ClusterManagerImplTest, UnknownCluster) { const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_EQ(nullptr, cluster_manager_->get("hello")); EXPECT_EQ(nullptr, cluster_manager_->httpConnPoolForCluster("hello", ResourcePriority::Default, Http::Protocol::Http2, nullptr)); @@ -890,7 +890,7 @@ TEST_F(ClusterManagerImplTest, ShutdownOrder) { const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", clustersJson({defaultStaticClusterJson("cluster_1")})); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); Cluster& cluster = cluster_manager_->activeClusters().begin()->second; EXPECT_EQ("cluster_1", cluster.info()->name()); EXPECT_EQ(cluster.info(), cluster_manager_->get("cluster_1")->info()); @@ -918,7 +918,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { "dynamic_resources": { "cds_config": { "api_config_source": { - "api_type": "UNSUPPORTED_REST_LEGACY", + "api_type": "0", "refresh_delay": "30s", "cluster_names": ["cds_cluster"] } @@ -958,7 +958,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { EXPECT_CALL(*cds_cluster, initialize(_)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -1000,78 +1000,102 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { version_info: version3 static_clusters: - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cds_cluster" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster2" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 dynamic_active_clusters: - version_info: "version1" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster3" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - version_info: "version2" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster4" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "cluster5" type: "STATIC" connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1120,7 +1144,7 @@ TEST_F(ClusterManagerImplTest, DynamicRemoveWithLocalCluster) { ON_CALL(*foo, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*foo, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); foo->initialize_callback_(); // Now add a dynamic cluster. This cluster will have a member update callback from the local @@ -1178,14 +1202,18 @@ TEST_F(ClusterManagerImplTest, RemoveWarmingCluster) { dynamic_warming_clusters: - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1221,14 +1249,18 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { dynamic_warming_clusters: - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11001 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1242,7 +1274,7 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { EXPECT_CALL(*cluster2, initializePhase()).Times(0); EXPECT_CALL(*cluster2, initialize(_)); EXPECT_TRUE(cluster_manager_->addOrUpdateCluster( - parseClusterFromV2Json(fmt::sprintf(kDefaultStaticClusterTmpl, "fake_cluster", + parseClusterFromV3Json(fmt::sprintf(kDefaultStaticClusterTmpl, "fake_cluster", R"EOF( "socket_address": { "address": "127.0.0.1", @@ -1254,14 +1286,18 @@ TEST_F(ClusterManagerImplTest, ModifyWarmingCluster) { dynamic_warming_clusters: - version_info: "version3" cluster: - "@type": type.googleapis.com/envoy.api.v2.Cluster + "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster name: "fake_cluster" type: STATIC connect_timeout: 0.25s - hosts: - - socket_address: - address: "127.0.0.1" - port_value: 11002 + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11002 last_updated: seconds: 1234567891 nanos: 234000000 @@ -1404,7 +1440,7 @@ TEST_F(ClusterManagerImplTest, AddOrUpdateClusterStaticExists) { ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -1433,7 +1469,7 @@ TEST_F(ClusterManagerImplTest, HostsPostedToTlsCluster) { ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); EXPECT_CALL(*cluster1, initialize(_)); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); ReadyWatcher initialized; cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); @@ -1501,7 +1537,7 @@ TEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_CALL(factory_, allocateConnPool_(_, _, _)).WillOnce(Return(cp1)); cluster_manager_->httpConnPoolForCluster("some_cluster", ResourcePriority::Default, @@ -1564,7 +1600,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) { // Test inline init. initialize_callback(); })); - create(parseBootstrapFromV2Json(json)); + create(parseBootstrapFromV3Json(json)); EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp1)); cluster_manager_->tcpConnPoolForCluster("some_cluster", ResourcePriority::Default, nullptr); diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index 742efed4e9d3..ef3dd210adf3 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -27,12 +27,21 @@ constexpr static const char* kDefaultStaticClusterTmpl = R"EOF( "connect_timeout": "0.250s", "type": "static", "lb_policy": "round_robin", - "hosts": [ + "load_assignment": { + "endpoints": [ { - %s, + "lb_endpoints": [ + { + "endpoint": { + "address": { + %s, } + } + } + ] } ] } + } )EOF"; inline std::string defaultStaticClusterJson(const std::string& name) { @@ -44,15 +53,16 @@ inline std::string defaultStaticClusterJson(const std::string& name) { } inline envoy::config::bootstrap::v3::Bootstrap -parseBootstrapFromV2Json(const std::string& json_string) { +parseBootstrapFromV3Json(const std::string& json_string, bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromJson(json_string, bootstrap, true); + TestUtility::loadFromJson(json_string, bootstrap, true, avoid_boosting); return bootstrap; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV2Json(const std::string& json_string) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Json(const std::string& json_string, + bool avoid_boosting = true) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromJson(json_string, cluster, true); + TestUtility::loadFromJson(json_string, cluster, true, avoid_boosting); return cluster; } @@ -64,7 +74,7 @@ inline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::str } inline envoy::config::cluster::v3::Cluster defaultStaticCluster(const std::string& name) { - return parseClusterFromV2Json(defaultStaticClusterJson(name)); + return parseClusterFromV3Json(defaultStaticClusterJson(name)); } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& hostname, diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index cd7f2ce5c7e4..5576179da9b8 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -102,7 +102,7 @@ TEST_F(ConfigurationImplTest, CustomStatsFlushInterval) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); @@ -122,14 +122,24 @@ TEST_F(ConfigurationImplTest, SetUpstreamClusterPerConnectionBufferLimit) { "connect_timeout": "0.01s", "per_connection_buffer_limit_bytes": 8192, "lb_policy": "round_robin", - "hosts": [ - { - "socket_address" : { - "address": "127.0.0.1", - "port_value": 9999 + "load_assignment": { + "endpoints": [ + { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "address": "127.0.0.1", + "port_value": 9999 + } } } - ] + } + ] + } + ] + } } ] }, @@ -145,7 +155,7 @@ TEST_F(ConfigurationImplTest, SetUpstreamClusterPerConnectionBufferLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; config.initialize(bootstrap, server_, cluster_manager_factory_); @@ -189,7 +199,7 @@ TEST_F(ConfigurationImplTest, NullTracerSetWhenTracingConfigurationAbsent) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); server_.local_info_.node_.set_cluster(""); MainImpl config; @@ -229,7 +239,7 @@ TEST_F(ConfigurationImplTest, NullTracerSetWhenHttpKeyAbsentFromTracerConfigurat } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); server_.local_info_.node_.set_cluster(""); MainImpl config; @@ -281,7 +291,7 @@ TEST_F(ConfigurationImplTest, ConfigurationFailsWhenInvalidTracerSpecified) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE(config.initialize(bootstrap, server_, cluster_manager_factory_), EnvoyException, @@ -307,7 +317,7 @@ TEST_F(ConfigurationImplTest, ProtoSpecifiedStatsSink) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); sink.set_name(Extensions::StatSinks::StatsSinkNames::get().Statsd); @@ -338,7 +348,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithInvalidName) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); envoy::config::metrics::v3::StatsSink& sink = *bootstrap.mutable_stats_sinks()->Add(); sink.set_name("envoy.invalid"); @@ -368,7 +378,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithNoName) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); bootstrap.mutable_stats_sinks()->Add(); @@ -397,7 +407,7 @@ TEST_F(ConfigurationImplTest, StatsSinkWithNoType) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); udpa::type::v1::TypedStruct typed_struct; @@ -530,7 +540,7 @@ TEST_F(ConfigurationImplTest, AdminSocketOptions) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); InitialImpl config(bootstrap); Network::MockListenSocket socket_mock; @@ -616,7 +626,7 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerHostWeightsLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE( @@ -722,7 +732,7 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerLocalityWeightsLimit) { } )EOF"; - auto bootstrap = Upstream::parseBootstrapFromV2Json(json); + auto bootstrap = Upstream::parseBootstrapFromV3Json(json); MainImpl config; EXPECT_THROW_WITH_MESSAGE( diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 0727b98729c7..aa01d2f6f35b 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -538,8 +538,9 @@ class TestUtility { // Strict variants of Protobuf::MessageUtil static void loadFromJson(const std::string& json, Protobuf::Message& message, - bool preserve_original_type = false) { - MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor()); + bool preserve_original_type = false, bool avoid_boosting = false) { + MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor(), + !avoid_boosting); if (!preserve_original_type) { Config::VersionConverter::eraseOriginalTypeInformation(message); } From 63f0d12a7396757989580f7005b8e7c63c91cc5d Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Thu, 16 Jul 2020 15:52:29 -0400 Subject: [PATCH 653/909] Extended registery test coverage with 2 additional testcases. (#11965) Commit Message: Added additional test coverage to registry.h. Additional Description: Risk Level: Low Testing: Ran the additional unit tests added. Docs Changes: N/A Release Notes: N/A Signed-off-by: Kevin Baichoo --- include/envoy/registry/registry.h | 3 +- test/common/config/registry_test.cc | 56 +++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h index d48a48dd0b66..a14baea43a69 100644 --- a/include/envoy/registry/registry.h +++ b/include/envoy/registry/registry.h @@ -528,7 +528,8 @@ template class RegisterFactory { if (!instance_.name().empty()) { FactoryRegistry::registerFactory(instance_, instance_.name()); } else { - ASSERT(deprecated_names.size() != 0); + ASSERT(deprecated_names.size() != 0, + "Attempted to register a factory without a name or deprecated name"); } for (auto deprecated_name : deprecated_names) { diff --git a/test/common/config/registry_test.cc b/test/common/config/registry_test.cc index ada81ac0bfd7..be6ea155d8bd 100644 --- a/test/common/config/registry_test.cc +++ b/test/common/config/registry_test.cc @@ -15,6 +15,8 @@ namespace Envoy { namespace Config { namespace { +using ::testing::Optional; + class InternalFactory : public Config::UntypedFactory { public: ~InternalFactory() override = default; @@ -103,6 +105,18 @@ TEST(RegistryTest, DEPRECATED_FEATURE_TEST(WithDeprecatedFactoryPublished)) { ->name()); } +class NoNamePublishedFactory : public PublishedFactory { +public: + std::string name() const override { return ""; } +}; + +TEST(RegistryTest, DEPRECATED_FEATURE_TEST(AssertsIfNoDeprecatedNameGiven)) { + // Expects an assert to raise if we register a factory that has an empty name + // and no associated deprecated names. + EXPECT_DEBUG_DEATH((Registry::RegisterFactory({})), + "Attempted to register a factory without a name or deprecated name"); +} + class TestVersionedFactory : public PublishedFactory { public: std::string name() const override { return "testing.published.versioned"; } @@ -182,6 +196,48 @@ TEST(RegistryTest, TestDoubleRegistrationByName) { "Double registration for name: 'testing.published.test'"); } +class PublishedFactoryWithNameAndCategory : public PublishedFactory { +public: + std::string category() const override { return "testing.published.additional.category"; } + std::string name() const override { + return "testing.published.versioned.instead_name_and_category"; + } +}; + +TEST(RegistryTest, DEPRECATED_FEATURE_TEST(VersionedWithDeprecatedNamesFactoryAndNewCategory)) { + PublishedFactoryWithNameAndCategory test; + + // Check the category is not registered + ASSERT_FALSE(Registry::FactoryCategoryRegistry::isRegistered(test.category())); + + auto factory = Registry::RegisterFactory( + FACTORY_VERSION(0, 0, 1, {{"build.kind", "private"}}), + {"testing.published.versioned.deprecated_name_and_category"}); + + // Check the category now registered + ASSERT_TRUE(Registry::FactoryCategoryRegistry::isRegistered(test.category())); + + const auto& factories = Envoy::Registry::FactoryCategoryRegistry::registeredFactories(); + + auto version = + factories.find("testing.published.additional.category") + ->second->getFactoryVersion("testing.published.versioned.instead_name_and_category"); + + ASSERT_TRUE(version.has_value()); + EXPECT_EQ(0, version.value().version().major_number()); + EXPECT_EQ(0, version.value().version().minor_number()); + EXPECT_EQ(1, version.value().version().patch()); + EXPECT_EQ(1, version.value().metadata().fields().size()); + EXPECT_EQ("private", version.value().metadata().fields().at("build.kind").string_value()); + + // Get the version using deprecated name and check that it matches the + // version obtained through the new name. + auto deprecated_version = + factories.find("testing.published.additional.category") + ->second->getFactoryVersion("testing.published.versioned.deprecated_name_and_category"); + EXPECT_THAT(deprecated_version, Optional(ProtoEq(version.value()))); +} + } // namespace } // namespace Config } // namespace Envoy From 7b24ef49641b327a335e0986a14853c7e4aa8058 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Thu, 16 Jul 2020 12:54:41 -0700 Subject: [PATCH 654/909] Docs: corrected outdated command (#12110) Corrected the command to build an envoy docker image from source. Additional Description: N/A Risk Level: N/A Testing: N/A Docs Changes: Building an Envoy Docker image - Updated step 2 Release Notes: N/A Signed-off-by: Yosry Ahmed --- docs/root/install/sandboxes/local_docker_build.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/install/sandboxes/local_docker_build.rst index d5dda8c03191..fe275ec8aef7 100644 --- a/docs/root/install/sandboxes/local_docker_build.rst +++ b/docs/root/install/sandboxes/local_docker_build.rst @@ -26,7 +26,7 @@ of the software used to build it.:: $ pwd src/envoy/ - $ docker build -f ci/Dockerfile-envoy-image -t envoy . + $ docker build -f ci/Dockerfile-envoy -t envoy . Now you can use this ``envoy`` image to build the any of the sandboxes if you change the ``FROM`` line in any Dockerfile. From faec5927767b19c7a8c1859586bcadbc4009c5a2 Mon Sep 17 00:00:00 2001 From: Michael Rebello Date: Thu, 16 Jul 2020 13:02:15 -0700 Subject: [PATCH 655/909] test: remove dead integration function (#12122) Risk Level: Low Testing: N/A Docs Changes: N/A Signed-off-by: Michael Rebello --- test/integration/integration.cc | 9 --------- test/integration/integration.h | 8 -------- 2 files changed, 17 deletions(-) diff --git a/test/integration/integration.cc b/test/integration/integration.cc index a738a95b0fa4..309548595313 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -539,15 +539,6 @@ void BaseIntegrationTest::sendRawHttpAndWaitForResponse(int port, const char* ra connection->run(); } -IntegrationTestServerPtr BaseIntegrationTest::createIntegrationTestServer( - const std::string& bootstrap_path, - std::function on_server_ready_function, - std::function on_server_init_function, Event::TestTimeSystem& time_system) { - return IntegrationTestServer::create(bootstrap_path, version_, on_server_ready_function, - on_server_init_function, deterministic_, time_system, *api_, - defer_listener_finalization_); -} - void BaseIntegrationTest::useListenerAccessLog(absl::string_view format) { listener_access_log_name_ = TestEnvironment::temporaryPath(TestUtility::uniqueFilename()); ASSERT_TRUE(config_helper_.setListenerAccessLog(listener_access_log_name_, format)); diff --git a/test/integration/integration.h b/test/integration/integration.h index 0d0081711f93..0ec9133736cb 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -396,14 +396,6 @@ class BaseIntegrationTest : protected Logger::Loggable { } protected: - // Create the envoy server in another thread and start it. - // Will not return until that server is listening. - virtual IntegrationTestServerPtr - createIntegrationTestServer(const std::string& bootstrap_path, - std::function on_server_ready_function, - std::function on_server_init_function, - Event::TestTimeSystem& time_system); - bool initialized() const { return initialized_; } std::unique_ptr upstream_stats_store_; From acb884768f257c32af4a17cc5b00b4fb274e30af Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Fri, 17 Jul 2020 05:12:52 +0900 Subject: [PATCH 656/909] use type alias (#12126) Commit Message: format: use type alias Additional Description: N/A Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Part of #11634 Signed-off-by: tomocy --- include/envoy/event/dispatcher.h | 4 ++++ source/common/event/dispatcher_impl.h | 2 +- source/common/stats/thread_local_store.cc | 6 ++++-- source/common/stats/thread_local_store.h | 8 +++++++- .../common/thread_local/thread_local_impl.cc | 18 +++++++++--------- source/common/thread_local/thread_local_impl.h | 13 +++++++++---- source/exe/main_common.h | 4 ++-- source/extensions/filters/common/lua/lua.h | 2 ++ test/common/stats/stat_test_utility.cc | 7 +++++-- .../stats/thread_local_store_speed_test.cc | 2 +- test/common/stats/thread_local_store_test.cc | 10 +++++----- test/extensions/filters/common/lua/lua_test.cc | 2 +- .../filters/common/lua/lua_wrappers.h | 2 +- test/server/admin/stats_handler_test.cc | 2 +- test/server/server_test.cc | 2 +- 15 files changed, 53 insertions(+), 31 deletions(-) diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 1cbd3e212f1d..eca836980102 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -41,11 +41,15 @@ struct DispatcherStats { ALL_DISPATCHER_STATS(GENERATE_HISTOGRAM_STRUCT) }; +using DispatcherStatsPtr = std::unique_ptr; + /** * Callback invoked when a dispatcher post() runs. */ using PostCb = std::function; +using PostCbSharedPtr = std::shared_ptr; + /** * Abstract event dispatching loop. */ diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 104791708d58..143ff4eb065c 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -105,7 +105,7 @@ class DispatcherImpl : Logger::Loggable, const std::string name_; Api::Api& api_; std::string stats_prefix_; - std::unique_ptr stats_; + DispatcherStatsPtr stats_; Thread::ThreadId run_tid_; Buffer::WatermarkFactoryPtr buffer_factory_; LibeventScheduler base_scheduler_; diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 328163801cb9..2b7574e42484 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -401,8 +401,10 @@ StatType& ThreadLocalStoreImpl::ScopeImpl::safeMakeStat( } template -absl::optional> -ThreadLocalStoreImpl::ScopeImpl::findStatLockHeld( +using StatTypeOptConstRef = absl::optional>; + +template +StatTypeOptConstRef ThreadLocalStoreImpl::ScopeImpl::findStatLockHeld( StatName name, StatNameHashMap>& central_cache_map) const { auto iter = central_cache_map.find(name); if (iter == central_cache_map.end()) { diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 3702a9bd77ee..3496c0790e15 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include "envoy/stats/tag.h" @@ -380,6 +381,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo MakeStatFn make_stat, StatRefMap* tls_cache, StatNameHashSet* tls_rejected_stats, StatType& null_stat); + template + using StatTypeOptConstRef = absl::optional>; + /** * Looks up an existing stat, populating the local cache if necessary. Does * not check the TLS or rejects, and does not create a stat if it does not @@ -390,7 +394,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo * @return a reference to the stat, if it exists. */ template - absl::optional> + StatTypeOptConstRef findStatLockHeld(StatName name, StatNameHashMap>& central_cache_map) const; @@ -463,5 +467,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo StatNameSetPtr well_known_tags_; }; +using ThreadLocalStoreImplPtr = std::unique_ptr; + } // namespace Stats } // namespace Envoy diff --git a/source/common/thread_local/thread_local_impl.cc b/source/common/thread_local/thread_local_impl.cc index 8bfb093befae..d4d02f8b2f5f 100644 --- a/source/common/thread_local/thread_local_impl.cc +++ b/source/common/thread_local/thread_local_impl.cc @@ -26,7 +26,7 @@ SlotPtr InstanceImpl::allocateSlot() { ASSERT(!shutdown_); if (free_slot_indexes_.empty()) { - std::unique_ptr slot(new SlotImpl(*this, slots_.size())); + SlotImplPtr slot(new SlotImpl(*this, slots_.size())); auto wrapper = std::make_unique(*this, std::move(slot)); slots_.push_back(wrapper->slot_.get()); return wrapper; @@ -34,7 +34,7 @@ SlotPtr InstanceImpl::allocateSlot() { const uint32_t idx = free_slot_indexes_.front(); free_slot_indexes_.pop_front(); ASSERT(idx < slots_.size()); - std::unique_ptr slot(new SlotImpl(*this, idx)); + SlotImplPtr slot(new SlotImpl(*this, idx)); slots_[idx] = slot.get(); return std::make_unique(*this, std::move(slot)); } @@ -56,7 +56,7 @@ ThreadLocalObjectSharedPtr InstanceImpl::SlotImpl::get() { return thread_local_data_.data_[index_]; } -InstanceImpl::Bookkeeper::Bookkeeper(InstanceImpl& parent, std::unique_ptr&& slot) +InstanceImpl::Bookkeeper::Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot) : parent_(parent), slot_(std::move(slot)), ref_count_(/*not used.*/ nullptr, [slot = slot_.get(), &parent = this->parent_](uint32_t* /* not used */) { @@ -117,7 +117,7 @@ void InstanceImpl::registerThread(Event::Dispatcher& dispatcher, bool main_threa // Puts the slot into a deferred delete container, the slot will be destructed when its out-going // callback reference count goes to 0. -void InstanceImpl::recycle(std::unique_ptr&& slot) { +void InstanceImpl::recycle(SlotImplPtr&& slot) { ASSERT(std::this_thread::get_id() == main_thread_id_); ASSERT(slot != nullptr); auto* slot_addr = slot.get(); @@ -194,11 +194,11 @@ void InstanceImpl::runOnAllThreads(Event::PostCb cb, Event::PostCb all_threads_c // for programming simplicity here. cb(); - std::shared_ptr cb_guard(new Event::PostCb(cb), - [this, all_threads_complete_cb](Event::PostCb* cb) { - main_thread_dispatcher_->post(all_threads_complete_cb); - delete cb; - }); + Event::PostCbSharedPtr cb_guard(new Event::PostCb(cb), + [this, all_threads_complete_cb](Event::PostCb* cb) { + main_thread_dispatcher_->post(all_threads_complete_cb); + delete cb; + }); for (Event::Dispatcher& dispatcher : registered_threads_) { dispatcher.post([cb_guard]() -> void { (*cb_guard)(); }); diff --git a/source/common/thread_local/thread_local_impl.h b/source/common/thread_local/thread_local_impl.h index b451c4eb236a..71153107fb3d 100644 --- a/source/common/thread_local/thread_local_impl.h +++ b/source/common/thread_local/thread_local_impl.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "envoy/thread_local/thread_local.h" @@ -50,10 +51,12 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub const uint64_t index_; }; + using SlotImplPtr = std::unique_ptr; + // A Wrapper of SlotImpl which on destruction returns the SlotImpl to the deferred delete queue // (detaches it). struct Bookkeeper : public Slot { - Bookkeeper(InstanceImpl& parent, std::unique_ptr&& slot); + Bookkeeper(InstanceImpl& parent, SlotImplPtr&& slot); ~Bookkeeper() override { parent_.recycle(std::move(slot_)); } // ThreadLocal::Slot @@ -66,7 +69,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub void set(InitializeCb cb) override; InstanceImpl& parent_; - std::unique_ptr slot_; + SlotImplPtr slot_; std::shared_ptr ref_count_; }; @@ -75,7 +78,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub std::vector data_; }; - void recycle(std::unique_ptr&& slot); + void recycle(SlotImplPtr&& slot); // Cleanup the deferred deletes queue. void scheduleCleanup(SlotImpl* slot); @@ -89,7 +92,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub // A indexed container for Slots that has to be deferred to delete due to out-going callbacks // pointing to the Slot. To let the ref_count_ deleter find the SlotImpl by address, the container // is defined as a map of SlotImpl address to the unique_ptr. - absl::flat_hash_map> deferred_deletes_; + absl::flat_hash_map deferred_deletes_; std::vector slots_; // A list of index of freed slots. @@ -104,5 +107,7 @@ class InstanceImpl : Logger::Loggable, public NonCopyable, pub friend class ThreadLocalInstanceImplTest; }; +using InstanceImplPtr = std::unique_ptr; + } // namespace ThreadLocal } // namespace Envoy diff --git a/source/exe/main_common.h b/source/exe/main_common.h index e548efc5c491..91ea197def3c 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -78,9 +78,9 @@ class MainCommonBase { Stats::SymbolTablePtr symbol_table_; Stats::AllocatorImpl stats_allocator_; - std::unique_ptr tls_; + ThreadLocal::InstanceImplPtr tls_; std::unique_ptr restarter_; - std::unique_ptr stats_store_; + Stats::ThreadLocalStoreImplPtr stats_store_; std::unique_ptr logging_context_; std::unique_ptr init_manager_{std::make_unique("Server")}; std::unique_ptr server_; diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index 7071b375303f..726b6c149e16 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -420,6 +420,8 @@ class ThreadLocalState : Logger::Loggable { uint64_t current_global_slot_{}; }; +using ThreadLocalStatePtr = std::unique_ptr; + /** * An exception specific to Lua errors. */ diff --git a/test/common/stats/stat_test_utility.cc b/test/common/stats/stat_test_utility.cc index 55670f7942d5..7cdbc08ab4dc 100644 --- a/test/common/stats/stat_test_utility.cc +++ b/test/common/stats/stat_test_utility.cc @@ -203,9 +203,12 @@ Histogram& TestStore::histogramFromStatNameWithTags(const StatName& stat_name, } template -static absl::optional> +using StatTypeOptConstRef = absl::optional>; + +template +static StatTypeOptConstRef findByString(const std::string& name, const absl::flat_hash_map& map) { - absl::optional> ret; + StatTypeOptConstRef ret; auto iter = map.find(name); if (iter != map.end()) { ret = *iter->second; diff --git a/test/common/stats/thread_local_store_speed_test.cc b/test/common/stats/thread_local_store_speed_test.cc index e2e86e0fb603..6e2c62ace9ef 100644 --- a/test/common/stats/thread_local_store_speed_test.cc +++ b/test/common/stats/thread_local_store_speed_test.cc @@ -68,7 +68,7 @@ class ThreadLocalStorePerf { Event::SimulatedTimeSystem time_system_; Stats::AllocatorImpl heap_alloc_; Event::DispatcherPtr dispatcher_; - std::unique_ptr tls_; + ThreadLocal::InstanceImplPtr tls_; Stats::ThreadLocalStoreImpl store_; Api::ApiPtr api_; envoy::config::metrics::v3::StatsConfig stats_config_; diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 3aa877ca4121..a7906ff2d7ee 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -58,7 +58,7 @@ class StatsThreadLocalStoreTest : public testing::Test { NiceMock tls_; AllocatorImpl alloc_; MockSink sink_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; }; class HistogramWrapper { @@ -176,7 +176,7 @@ class HistogramTest : public testing::Test { NiceMock tls_; AllocatorImpl alloc_; MockSink sink_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; InSequence s; std::vector h1_cumulative_values_, h2_cumulative_values_, h1_interval_values_, h2_interval_values_; @@ -587,7 +587,7 @@ class ThreadLocalStoreNoMocksTestBase : public testing::Test { SymbolTablePtr symbol_table_; AllocatorImpl alloc_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; StatNamePool pool_; }; @@ -1080,7 +1080,7 @@ class StatsThreadLocalStoreTestNoFixture : public testing::Test { MockSink sink_; SymbolTablePtr symbol_table_; std::unique_ptr alloc_; - std::unique_ptr store_; + ThreadLocalStoreImplPtr store_; NiceMock main_thread_dispatcher_; NiceMock tls_; TestUtil::SymbolTableCreatorTestPeer symbol_table_creator_test_peer_; @@ -1514,7 +1514,7 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB Event::DispatcherPtr main_dispatcher_; std::vector thread_dispatchers_; Thread::ThreadFactory& thread_factory_; - std::unique_ptr tls_; + ThreadLocal::InstanceImplPtr tls_; Thread::ThreadPtr main_thread_; std::vector threads_; StatNamePool pool_; diff --git a/test/extensions/filters/common/lua/lua_test.cc b/test/extensions/filters/common/lua/lua_test.cc index 5f4462e7d3c4..27c9e35f8cfe 100644 --- a/test/extensions/filters/common/lua/lua_test.cc +++ b/test/extensions/filters/common/lua/lua_test.cc @@ -48,7 +48,7 @@ class LuaTest : public testing::Test { } NiceMock tls_; - std::unique_ptr state_; + ThreadLocalStatePtr state_; std::function yield_callback_; ReadyWatcher on_yield_; }; diff --git a/test/extensions/filters/common/lua/lua_wrappers.h b/test/extensions/filters/common/lua/lua_wrappers.h index 4791f9e5109a..4b2e7f1f8b0a 100644 --- a/test/extensions/filters/common/lua/lua_wrappers.h +++ b/test/extensions/filters/common/lua/lua_wrappers.h @@ -41,7 +41,7 @@ template class LuaWrappersTestBase : public testing::Test { MOCK_METHOD(void, testPrint, (const std::string&)); NiceMock tls_; - std::unique_ptr state_; + ThreadLocalStatePtr state_; std::function yield_callback_; CoroutinePtr coroutine_; }; diff --git a/test/server/admin/stats_handler_test.cc b/test/server/admin/stats_handler_test.cc index ce80844b635e..623438013b97 100644 --- a/test/server/admin/stats_handler_test.cc +++ b/test/server/admin/stats_handler_test.cc @@ -39,7 +39,7 @@ class AdminStatsTest : public testing::TestWithParam tls_; Stats::AllocatorImpl alloc_; Stats::MockSink sink_; - std::unique_ptr store_; + Stats::ThreadLocalStoreImplPtr store_; }; INSTANTIATE_TEST_SUITE_P(IpVersions, AdminStatsTest, diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 6a9522e80bf5..b849689ef63f 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -258,7 +258,7 @@ class ServerInstanceImplTestBase { testing::NiceMock options_; DefaultListenerHooks hooks_; testing::NiceMock restart_; - std::unique_ptr thread_local_; + ThreadLocal::InstanceImplPtr thread_local_; Stats::TestIsolatedStoreImpl stats_store_; Thread::MutexBasicLockable fakelock_; TestComponentFactory component_factory_; From 88dcb292817946510bb87f8f379a954962cece79 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 16 Jul 2020 17:01:04 -0400 Subject: [PATCH 657/909] http: stream error on invalid messaging (#11748) This unifies HTTP/1.1 and HTTP/2 stream error on invalid messaging. Previously HTTP/1.1 defaulted permissive and HTTP/2 defaulted to strict. This defaults both to strict, resetting connections on invalid requests. This will have a major latency impact if downstream is sending a mix of valid and invalid requests over HTTP/1.1 Additional Description: This change is runtime guarded per default behavioral change rules. It can also be reverted by setting the default to permissive (for prior HTTP/1 behvior) then overriding HTTP/2 to struct (for prior HTTP/2 behavior). This works in conjunction with #11714, as the HTTP connection manager enforces the strictness, so the responses need to be sent via the HTTP connection manager to have strictness applied correctly. Risk Level: High (HCM changes) Testing: new unit tests, updated integration tests Docs Changes: n/a Release Notes: inline Runtime guard: envoy.reloadable_features.hcm_stream_error_on_invalid_message Fixes #9846 Signed-off-by: Alyssa Wilk --- api/envoy/config/core/v3/protocol.proto | 21 +++++- api/envoy/config/core/v4alpha/protocol.proto | 11 +++- .../v3/http_connection_manager.proto | 19 +++++- .../v4alpha/http_connection_manager.proto | 19 +++++- .../best_practices/level_two.rst | 16 ++--- docs/root/version_history/current.rst | 1 + .../envoy/config/core/v3/protocol.proto | 21 +++++- .../envoy/config/core/v4alpha/protocol.proto | 21 +++++- .../v3/http_connection_manager.proto | 19 +++++- .../v4alpha/http_connection_manager.proto | 19 +++++- source/common/http/BUILD | 1 + source/common/http/conn_manager_config.h | 6 ++ source/common/http/conn_manager_impl.cc | 8 +++ source/common/http/http2/codec_impl.cc | 2 +- source/common/http/utility.cc | 20 ++++++ source/common/http/utility.h | 4 ++ source/common/runtime/runtime_features.cc | 1 + .../network/http_connection_manager/config.cc | 6 +- .../network/http_connection_manager/config.h | 4 ++ source/server/admin/admin.h | 1 + test/common/http/BUILD | 1 + .../http/conn_manager_impl_fuzz_test.cc | 4 ++ test/common/http/conn_manager_impl_test.cc | 6 +- test/common/http/conn_manager_utility_test.cc | 1 + test/common/http/http2/codec_impl_test.cc | 3 +- test/common/http/utility_test.cc | 65 +++++++++++++++++++ test/integration/http2_integration_test.cc | 8 ++- test/integration/integration_test.cc | 14 ++++ test/integration/protocol_integration_test.cc | 14 ++-- test/integration/stats_integration_test.cc | 6 +- test/mocks/runtime/mocks.cc | 4 ++ 31 files changed, 313 insertions(+), 33 deletions(-) diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 339601feab3d..0ab6289e9659 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -159,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -280,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index 2ec9244124bd..955c29335a3f 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -159,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -180,6 +180,10 @@ message Http2ProtocolOptions { google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; } + reserved 12; + + reserved "stream_error_on_invalid_http_messaging"; + // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header @@ -280,8 +284,11 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index f2a80959c33b..a23fcc99e07c 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -36,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 40] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -540,6 +540,23 @@ message HttpConnectionManager { // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; } // The configuration to customize local reply returned by Envoy. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index aaf146e1f568..bdf3618ba328 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -35,7 +35,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 40] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -539,6 +539,23 @@ message HttpConnectionManager { // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; } // The configuration to customize local reply returned by Envoy. diff --git a/docs/root/configuration/best_practices/level_two.rst b/docs/root/configuration/best_practices/level_two.rst index c38dae7cdb68..44c52ace8a8e 100644 --- a/docs/root/configuration/best_practices/level_two.rst +++ b/docs/root/configuration/best_practices/level_two.rst @@ -5,14 +5,14 @@ Configuring Envoy as a level two proxy Envoy is a production-ready proxy, however, the default settings that are tailored for the edge use case may need to be adjusted when using Envoy in a multi-level deployment as a -"level two" HTTP/2 proxy. +"level two" proxy. .. image:: /_static/multilevel_deployment.svg **In summary, if you run level two Envoy version 1.11.1 or greater which terminates -HTTP/2, we strongly advise you to change the HTTP/2 configuration of your level +HTTP/2, we strongly advise you to change the HttpConnectionManager configuration of your level two Envoy, by setting its downstream** -:ref:`validation of HTTP/2 messaging option ` +:ref:`validation of HTTP messaging option ` **to true.** If there is an invalid HTTP/2 request and this option is not set, the Envoy in @@ -29,9 +29,7 @@ user has insight into what traffic will bypass level one checks, they could spra “bad” traffic across the level one fleet, causing serious disruption to other users’ traffic. -Please note that the -:ref:`validation of HTTP/2 messaging option ` -is planned to be deprecated and replaced with mandatory configuration in the HttpConnectionManager, to ensure -that what is now an easily overlooked option would need to be configured, ideally -appropriately for the given Envoy deployment. Please refer to the -https://github.com/envoyproxy/envoy/issues/9285 for more information. +This configuration option also has implications for invalid HTTP/1.1 though slightly less +severe ones. For Envoy L1s, invalid HTTP/1 requests will also result in connection +reset. If the option is set to true, and the request is completely read, the connection +will persist and can be reused for a subsequent request. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b49649b892d3..eb3465f99eeb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -10,6 +10,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. +* http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. * logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 339601feab3d..0ab6289e9659 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -159,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -280,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index 2ec9244124bd..cc1b99d0a048 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -159,7 +159,7 @@ message Http1ProtocolOptions { bool enable_trailers = 5; } -// [#next-free-field: 14] +// [#next-free-field: 15] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; @@ -280,8 +280,25 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool hidden_envoy_deprecated_stream_error_on_invalid_http_messaging = 12 [deprecated = true]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 54e531ceb6a0..322212670988 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -36,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 40] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -543,6 +543,23 @@ message HttpConnectionManager { // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; + google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index aaf146e1f568..bdf3618ba328 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -35,7 +35,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 40] +// [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; @@ -539,6 +539,23 @@ message HttpConnectionManager { // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; + + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one *must* use the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; } // The configuration to customize local reply returned by Envoy. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 54de0782f5f6..5a07a56b9f00 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -358,6 +358,7 @@ envoy_cc_library( "//source/common/json:json_loader_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_features_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index faf691b6fc20..b67afc95a64c 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -409,6 +409,12 @@ class ConnectionManagerConfig { */ virtual bool proxy100Continue() const PURE; + /** + * @return bool supplies if the HttpConnectionManager should handle invalid HTTP with a stream + * error or connection error. + */ + virtual bool streamErrorOnInvalidHttpMessaging() const PURE; + /** * @return supplies the http1 settings. */ diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 4a76e84be282..df958ed68530 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1521,6 +1521,14 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( // a no-op. createFilterChain(); + // The BadRequest error code indicates there has been a messaging error. + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.hcm_stream_error_on_invalid_message") && + !connection_manager_.config_.streamErrorOnInvalidHttpMessaging() && + code == Http::Code::BadRequest && connection_manager_.codec_->protocol() < Protocol::Http2) { + state_.saw_connection_close_ = true; + } + stream_info_.setResponseCodeDetails(details); Utility::sendLocalReply( state_.destroyed_, diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 4a94bb3aafdf..b25b8fa4bef5 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -492,7 +492,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat max_headers_count_(max_headers_count), per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), stream_error_on_invalid_http_messaging_( - http2_options.stream_error_on_invalid_http_messaging()), + http2_options.override_stream_error_on_invalid_http_message().value()), flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index bee04a98cabe..12006e143fa2 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -21,6 +21,7 @@ #include "common/http/message_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "common/runtime/runtime_features.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" @@ -141,12 +142,31 @@ const uint32_t OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_ const uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; const uint32_t OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; +envoy::config::core::v3::Http2ProtocolOptions +initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options, + bool hcm_stream_error_set, + const Protobuf::BoolValue& hcm_stream_error) { + auto ret = initializeAndValidateOptions(options); + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.hcm_stream_error_on_invalid_message") && + !options.has_override_stream_error_on_invalid_http_message() && hcm_stream_error_set) { + ret.mutable_override_stream_error_on_invalid_http_message()->set_value( + hcm_stream_error.value()); + } + return ret; +} + envoy::config::core::v3::Http2ProtocolOptions initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options) { envoy::config::core::v3::Http2ProtocolOptions options_clone(options); // This will throw an exception when a custom parameter and a named parameter collide. validateCustomSettingsParameters(options); + if (!options.has_override_stream_error_on_invalid_http_message()) { + options_clone.mutable_override_stream_error_on_invalid_http_message()->set_value( + options.stream_error_on_invalid_http_messaging()); + } + if (!options_clone.has_hpack_table_size()) { options_clone.mutable_hpack_table_size()->set_value(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 4fef6cc23327..492193c4e2ff 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -116,6 +116,10 @@ struct OptionsLimits { envoy::config::core::v3::Http2ProtocolOptions initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options); +envoy::config::core::v3::Http2ProtocolOptions +initializeAndValidateOptions(const envoy::config::core::v3::Http2ProtocolOptions& options, + bool hcm_stream_error_set, + const Protobuf::BoolValue& hcm_stream_error); } // namespace Utility } // namespace Http2 diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 8c1864ff69a2..ba4817e19a53 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -73,6 +73,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", + "envoy.reloadable_features.hcm_stream_error_on_invalid_message", "envoy.reloadable_features.strict_1xx_and_204_response_headers", }; diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 3f8abde5ae24..190db7f475b4 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -178,7 +178,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( skip_xff_append_(config.skip_xff_append()), via_(config.via()), route_config_provider_manager_(route_config_provider_manager), scoped_routes_config_provider_manager_(scoped_routes_config_provider_manager), - http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), + http2_options_(Http2::Utility::initializeAndValidateOptions( + config.http2_protocol_options(), config.has_stream_error_on_invalid_http_message(), + config.stream_error_on_invalid_http_message())), http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), max_request_headers_kb_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( config, max_request_headers_kb, Http::DEFAULT_MAX_REQUEST_HEADERS_KB)), @@ -202,6 +204,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( listener_stats_(Http::ConnectionManagerImpl::generateListenerStats(stats_prefix_, context_.listenerScope())), proxy_100_continue_(config.proxy_100_continue()), + stream_error_on_invalid_http_messaging_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stream_error_on_invalid_http_message, false)), delayed_close_timeout_(PROTOBUF_GET_MS_OR_DEFAULT(config, delayed_close_timeout, 1000)), #ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT normalize_path_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index f10ee8ccee48..b522fad49b66 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -157,6 +157,9 @@ class HttpConnectionManagerConfig : Logger::Loggable, const absl::optional& userAgent() override { return user_agent_; } Http::ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } @@ -228,6 +231,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, Http::DateProvider& date_provider_; Http::ConnectionManagerListenerStats listener_stats_; const bool proxy_100_continue_; + const bool stream_error_on_invalid_http_messaging_; std::chrono::milliseconds delayed_close_timeout_; const bool normalize_path_; const bool merge_slashes_; diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 278aa30c342b..a1e32e34c74b 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -170,6 +170,7 @@ class AdminImpl : public Admin, const Http::TracingConnectionManagerConfig* tracingConfig() override { return nullptr; } Http::ConnectionManagerListenerStats& listenerStats() override { return listener_->stats_; } bool proxy100Continue() const override { return false; } + bool streamErrorOnInvalidHttpMessaging() const override { return false; } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return true; } bool shouldMergeSlashes() const override { return true; } diff --git a/test/common/http/BUILD b/test/common/http/BUILD index ec2987c48403..13e7911ee08f 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -369,6 +369,7 @@ envoy_cc_test( "//source/common/network:address_lib", "//test/mocks/http:http_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 7c85051528ab..ef957d113c00 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -187,6 +187,9 @@ class FuzzConfig : public ConnectionManagerConfig { const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return false; } bool shouldMergeSlashes() const override { return false; } @@ -234,6 +237,7 @@ class FuzzConfig : public ConnectionManagerConfig { Tracing::HttpTracerSharedPtr http_tracer_{std::make_shared>()}; TracingConnectionManagerConfigPtr tracing_config_; bool proxy_100_continue_{true}; + bool stream_error_on_invalid_http_messaging_ = false; bool preserve_external_request_id_{false}; Http::Http1Settings http1_settings_; Http::DefaultInternalAddressConfig internal_address_config_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index efd7222b9123..5fc4624d37d4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -95,7 +95,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, - listener_stats_{CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}, + listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}), request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), local_reply_(LocalReply::Factory::createDefault()) { @@ -348,6 +348,9 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan const TracingConnectionManagerConfig* tracingConfig() override { return tracing_config_.get(); } ConnectionManagerListenerStats& listenerStats() override { return listener_stats_; } bool proxy100Continue() const override { return proxy_100_continue_; } + bool streamErrorOnInvalidHttpMessaging() const override { + return stream_error_on_invalid_http_messaging_; + } const Http::Http1Settings& http1Settings() const override { return http1_settings_; } bool shouldNormalizePath() const override { return normalize_path_; } bool shouldMergeSlashes() const override { return merge_slashes_; } @@ -410,6 +413,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan Stats::IsolatedStoreImpl fake_listener_stats_; ConnectionManagerListenerStats listener_stats_; bool proxy_100_continue_ = false; + bool stream_error_on_invalid_http_messaging_ = false; bool preserve_external_request_id_ = false; Http::Http1Settings http1_settings_; bool normalize_path_ = false; diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index 97d680b67c93..4b4348710844 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -135,6 +135,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { MOCK_METHOD(Tracing::HttpTracerSharedPtr, tracer, ()); MOCK_METHOD(ConnectionManagerListenerStats&, listenerStats, ()); MOCK_METHOD(bool, proxy100Continue, (), (const)); + MOCK_METHOD(bool, streamErrorOnInvalidHttpMessaging, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); MOCK_METHOD(bool, shouldNormalizePath, (), (const)); MOCK_METHOD(bool, shouldMergeSlashes, (), (const)); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 55bc37182288..1deb3c412284 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -182,7 +182,8 @@ class Http2CodecImplTestFixture { (tp.has_value()) ? ::testing::get(*tp) : CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); options.set_allow_metadata(allow_metadata_); - options.set_stream_error_on_invalid_http_messaging(stream_error_on_invalid_http_messaging_); + options.mutable_override_stream_error_on_invalid_http_message()->set_value( + stream_error_on_invalid_http_messaging_); options.mutable_max_outbound_frames()->set_value(max_outbound_frames_); options.mutable_max_outbound_control_frames()->set_value(max_outbound_control_frames_); options.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value( diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 47c12303d0b6..0c1dd6c1277c 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -15,6 +15,7 @@ #include "test/mocks/http/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -355,6 +356,70 @@ initial_connection_window_size: 65535 } } +TEST(HttpUtility, ValidateStreamErrors) { + // Both false, the result should be false. + envoy::config::core::v3::Http2ProtocolOptions http2_options; + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the new value is not present, the legacy value is respected. + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the new value is present, it is used. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true); + http2_options.set_stream_error_on_invalid_http_messaging(false); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // Invert values - the new value should still be used. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(false); + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); +} + +TEST(HttpUtility, ValidateStreamErrorsWithHcm) { + envoy::config::core::v3::Http2ProtocolOptions http2_options; + http2_options.set_stream_error_on_invalid_http_messaging(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options) + .override_stream_error_on_invalid_http_message() + .value()); + + // If the HCM value is present it will take precedence over the old value. + Protobuf::BoolValue hcm_value; + hcm_value.set_value(false); + EXPECT_FALSE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + // The HCM value will be ignored if initializeAndValidateOptions is told it is not present. + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, false, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + + // The override_stream_error_on_invalid_http_message takes precedence over the + // global one. + http2_options.mutable_override_stream_error_on_invalid_http_message()->set_value(true); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + + { + // With runtime flipped, override is ignored. + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.hcm_stream_error_on_invalid_message", "false"}}); + EXPECT_TRUE(Envoy::Http2::Utility::initializeAndValidateOptions(http2_options, true, hcm_value) + .override_stream_error_on_invalid_http_message() + .value()); + } +} + TEST(HttpUtility, getLastAddressFromXFF) { { const std::string first_address = "192.0.2.10"; diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index f663fcc0ef2c..73e0e615aa7a 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1664,7 +1664,9 @@ TEST_P(Http2FloodMitigationTest, RST_STREAM) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); beginSession(); @@ -1841,7 +1843,9 @@ TEST_P(Http2FloodMitigationTest, ZerolenHeaderAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); autonomous_upstream_ = true; beginSession(); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index e89400b690c8..37dce6d1a0e9 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -717,6 +717,10 @@ TEST_P(IntegrationTest, PipelineWithTrailers) { // an inline sendLocalReply to make sure the "kick" works under the call stack // of dispatch as well as when a response is proxied from upstream. TEST_P(IntegrationTest, PipelineInline) { + // When deprecating this flag, set hcm.mutable_stream_error_on_invalid_http_message true. + config_helper_.addRuntimeOverride("envoy.reloadable_features.hcm_stream_error_on_invalid_message", + "false"); + autonomous_upstream_ = true; initialize(); std::string response; @@ -1223,6 +1227,11 @@ TEST_P(UpstreamEndpointIntegrationTest, TestUpstreamEndpointAddress) { // Send continuous pipelined requests while not reading responses, to check // HTTP/1.1 response flood protection. TEST_P(IntegrationTest, TestFlood) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_error_on_invalid_http_message()->set_value(true); + }); initialize(); // Set up a raw connection to easily send requests without reading responses. @@ -1300,6 +1309,11 @@ TEST_P(IntegrationTest, TestFloodUpstreamErrors) { // Make sure flood protection doesn't kick in with many requests sent serially. TEST_P(IntegrationTest, TestManyBadRequests) { + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { + hcm.mutable_stream_error_on_invalid_http_message()->set_value(true); + }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 074181c73699..e41cd9eabe90 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1114,7 +1114,9 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); @@ -1170,7 +1172,9 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); @@ -1958,7 +1962,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { } } -// Make sure that with stream_error_on_invalid_http_messaging true, CONNECT +// Make sure that with override_stream_error_on_invalid_http_message true, CONNECT // results in stream teardown not connection teardown. TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { @@ -1967,7 +1971,9 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - hcm.mutable_http2_protocol_options()->set_stream_error_on_invalid_http_messaging(true); + hcm.mutable_http2_protocol_options() + ->mutable_override_stream_error_on_invalid_http_message() + ->set_value(true); }); initialize(); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 904a7f94504d..9b339b8e4dc1 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -283,6 +283,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/06/29 11751 44715 46000 Improve time complexity of removing callback handle // in callback manager. // 2020/07/07 11252 44971 46000 Introduce Least Request LB active request bias config + // 2020/07/15 11748 45003 46000 Stream error on invalid messaging // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -300,7 +301,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 44971); + EXPECT_MEMORY_EQ(m_per_cluster, 45003); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -355,6 +356,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. // in callback manager. // 2020/07/07 11252 37083 38000 Introduce Least Request LB active request bias config + // 2020/07/15 11748 37115 38000 Stream error on invalid messaging // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -372,7 +374,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 37083); + EXPECT_MEMORY_EQ(m_per_cluster, 37115); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } diff --git a/test/mocks/runtime/mocks.cc b/test/mocks/runtime/mocks.cc index 3afd1bbbf743..d2f22d414c8f 100644 --- a/test/mocks/runtime/mocks.cc +++ b/test/mocks/runtime/mocks.cc @@ -4,6 +4,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::NiceMock; using testing::Return; using testing::ReturnArg; @@ -20,6 +21,9 @@ MockSnapshot::MockSnapshot() { MockSnapshot::~MockSnapshot() = default; MockLoader::MockLoader() { + ON_CALL(*this, threadsafeSnapshot()).WillByDefault(testing::Invoke([]() { + return std::make_shared>(); + })); ON_CALL(*this, snapshot()).WillByDefault(ReturnRef(snapshot_)); ON_CALL(*this, getRootScope()).WillByDefault(ReturnRef(store_)); } From b7a4788703009d9ce6254798a5b5606b501403c2 Mon Sep 17 00:00:00 2001 From: antonio Date: Thu, 16 Jul 2020 17:32:19 -0400 Subject: [PATCH 658/909] test: Randomize the order in which simulated timers scheduled for the same monotonic time execute. (#11962) The order in which real timers scheduled for the exact same wall time execute is non-deterministic. Simulated timer behavior should be as similar as possible as that of real timers. Signed-off-by: Antonio Vicente --- test/test_common/BUILD | 1 + test/test_common/simulated_time_system.cc | 97 +++++++------------ test/test_common/simulated_time_system.h | 45 ++++++--- .../test_common/simulated_time_system_test.cc | 53 +++++++++- 4 files changed, 123 insertions(+), 73 deletions(-) diff --git a/test/test_common/BUILD b/test/test_common/BUILD index d2a790f05c3d..71693951076f 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -268,6 +268,7 @@ envoy_cc_test_library( deps = [ ":only_one_thread_lib", ":test_time_system_interface", + ":utility_lib", "//source/common/event:event_impl_base_lib", "//source/common/event:real_time_system_lib", "//source/common/event:timer_lib", diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index ecc707b699d6..d50560a1e3af 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -52,8 +52,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { public: Alarm(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler, TimerCb cb) : cb_(cb_scheduler.createSchedulableCallback([this, cb] { runAlarm(cb); })), - time_system_(time_system), index_(time_system.nextIndex()), armed_(false), pending_(false) { - } + time_system_(time_system), armed_(false), pending_(false) {} ~Alarm() override; @@ -72,10 +71,6 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { void disableTimerLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_); - void setTimeLockHeld(MonotonicTime time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { - time_ = time; - } - /** * Activates the timer so it will be run the next time the libevent loop is run, * typically via Dispatcher::run(). @@ -98,17 +93,9 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { cb_->scheduleCallbackCurrentIteration(); } - MonotonicTime time() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_system_.mutex_) { - ASSERT(armed_); - return time_; - } - SimulatedTimeSystemHelper& timeSystem() { return time_system_; } - uint64_t index() const { return index_; } private: - friend SimulatedTimeSystemHelper::CompareAlarms; - void runAlarm(TimerCb cb) { { absl::MutexLock lock(&time_system_.mutex_); @@ -122,26 +109,10 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { SchedulableCallbackPtr cb_; SimulatedTimeSystemHelper& time_system_; - MonotonicTime time_ ABSL_GUARDED_BY(time_system_.mutex_); - const uint64_t index_; bool armed_ ABSL_GUARDED_BY(time_system_.mutex_); bool pending_ ABSL_GUARDED_BY(time_system_.mutex_); }; -// Compare two alarms, based on wakeup time and insertion order. Returns true if -// a comes before b. -bool SimulatedTimeSystemHelper::CompareAlarms::operator()(const Alarm* a, const Alarm* b) const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(a->time_system_.mutex_, b->time_system_.mutex_) { - if (a != b) { - if (a->time() < b->time()) { - return true; - } else if (a->time() == b->time() && a->index() < b->index()) { - return true; - } - } - return false; -}; - // Each timer is maintained and ordered by a common TimeSystem, but is // associated with a scheduler. The scheduler creates the timers with a libevent // context, so that the timer callbacks can be executed via Dispatcher::run() in @@ -173,7 +144,7 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimer() { void SimulatedTimeSystemHelper::Alarm::Alarm::disableTimerLockHeld() { if (armed_) { - time_system_.removeAlarmLockHeld(this); + time_system_.removeAlarmLockHeld(*this); armed_ = false; } if (pending_) { @@ -200,7 +171,7 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( if (duration.count() == 0) { activateLockHeld(); } else { - time_system_.addAlarmLockHeld(this, duration); + time_system_.addAlarmLockHeld(*this, duration); } } @@ -216,7 +187,7 @@ static int instance_count = 0; // will march forward only by calling.advanceTimeAsync(). SimulatedTimeSystemHelper::SimulatedTimeSystemHelper() : monotonic_time_(MonotonicTime(std::chrono::seconds(0))), - system_time_(real_time_source_.systemTime()), index_(0), pending_alarms_(0) { + system_time_(real_time_source_.systemTime()), pending_alarms_(0) { ++instance_count; ASSERT(instance_count <= 1); } @@ -291,8 +262,8 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasi MonotonicTime next_wakeup = end_time; if (!alarms_.empty()) { // If there's another alarm pending, sleep forward to it. - Alarm* alarm = (*alarms_.begin()); - next_wakeup = std::min(alarmTimeLockHeld(alarm), next_wakeup); + const AlarmRegistration& alarm_registration = *alarms_.begin(); + next_wakeup = std::min(alarm_registration.time_, next_wakeup); } setMonotonicTimeLockHeld(next_wakeup); waitForNoPendingLockHeld(); @@ -308,34 +279,40 @@ Thread::CondVar::WaitStatus SimulatedTimeSystemHelper::waitFor(Thread::MutexBasi return Thread::CondVar::WaitStatus::Timeout; } -MonotonicTime -SimulatedTimeSystemHelper::alarmTimeLockHeld(Alarm* alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { +void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm& alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { // We disable thread-safety analysis as the compiler can't detect that - // alarm_->timeSystem() == this, so we must be holding the right mutex. - ASSERT(&(alarm->timeSystem()) == this); - return alarm->time(); + // alarm_.timeSystem() == this, so we must be holding the right mutex. + ASSERT(&(alarm.timeSystem()) == this); + alarm.activateLockHeld(); } -void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm* alarm) ABSL_NO_THREAD_SAFETY_ANALYSIS { - // We disable thread-safety analysis as the compiler can't detect that - // alarm_->timeSystem() == this, so we must be holding the right mutex. - ASSERT(&(alarm->timeSystem()) == this); - alarm->activateLockHeld(); +void SimulatedTimeSystemHelper::addAlarmLockHeld( + Alarm& alarm, const std::chrono::microseconds& duration) ABSL_NO_THREAD_SAFETY_ANALYSIS { + ASSERT(&(alarm.timeSystem()) == this); + ASSERT(alarms_.size() == alarm_registrations_map_.size()); + ASSERT(alarm_registrations_map_.find(&alarm) == alarm_registrations_map_.end()); + + auto insert_result = alarms_.insert({monotonic_time_ + duration, random_source_.random(), alarm}); + ASSERT(insert_result.second); + alarm_registrations_map_.emplace(&alarm, insert_result.first); + + // Sanity check that the parallel data structures used for alarm registration have the same number + // of entries. + ASSERT(alarms_.size() == alarm_registrations_map_.size()); } -int64_t SimulatedTimeSystemHelper::nextIndex() { - absl::MutexLock lock(&mutex_); - return index_++; -} +void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm& alarm) { + ASSERT(alarms_.size() == alarm_registrations_map_.size()); -void SimulatedTimeSystemHelper::addAlarmLockHeld( - Alarm* alarm, const std::chrono::microseconds& duration) ABSL_NO_THREAD_SAFETY_ANALYSIS { - ASSERT(&(alarm->timeSystem()) == this); - alarm->setTimeLockHeld(monotonic_time_ + duration); - alarms_.insert(alarm); -} + auto it = alarm_registrations_map_.find(&alarm); + ASSERT(it != alarm_registrations_map_.end()); + alarms_.erase(it->second); + alarm_registrations_map_.erase(it); -void SimulatedTimeSystemHelper::removeAlarmLockHeld(Alarm* alarm) { alarms_.erase(alarm); } + // Sanity check that the parallel data structures used for alarm registration have the same number + // of entries. + ASSERT(alarms_.size() == alarm_registrations_map_.size()); +} SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& /*base_scheduler*/, CallbackScheduler& cb_scheduler) { @@ -354,9 +331,8 @@ void SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& mo // or removed during the call to activate() so it would not be correct to // range-iterate over the set. while (!alarms_.empty()) { - AlarmSet::iterator pos = alarms_.begin(); - Alarm* alarm = *pos; - MonotonicTime alarm_time = alarmTimeLockHeld(alarm); + const AlarmRegistration& alarm_registration = *alarms_.begin(); + MonotonicTime alarm_time = alarm_registration.time_; if (alarm_time > monotonic_time) { break; } @@ -364,7 +340,8 @@ void SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& mo system_time_ += std::chrono::duration_cast(alarm_time - monotonic_time_); monotonic_time_ = alarm_time; - alarms_.erase(pos); + Alarm& alarm = alarm_registration.alarm_; + removeAlarmLockHeld(alarm); alarmActivateLockHeld(alarm); } system_time_ += diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 521beaf3f28b..3b4c938d0e9e 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -8,6 +8,9 @@ #include "test/test_common/only_one_thread.h" #include "test/test_common/test_time_system.h" +#include "test/test_common/utility.h" + +#include "absl/container/flat_hash_map.h" namespace Envoy { namespace Event { @@ -64,10 +67,31 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { class SimulatedScheduler; class Alarm; friend class Alarm; // Needed to reference mutex for thread annotations. - struct CompareAlarms { - bool operator()(const Alarm* a, const Alarm* b) const; + struct AlarmRegistration { + AlarmRegistration(MonotonicTime time, uint64_t randomness, Alarm& alarm) + : time_(time), randomness_(randomness), alarm_(alarm) {} + + MonotonicTime time_; + // Random tie-breaker for alarms scheduled for the same monotonic time used to mimic + // non-deterministic execution of real alarms scheduled for the same wall time. + uint64_t randomness_; + Alarm& alarm_; + + friend bool operator<(const AlarmRegistration& lhs, const AlarmRegistration& rhs) { + if (lhs.time_ != rhs.time_) { + return lhs.time_ < rhs.time_; + } + if (lhs.randomness_ != rhs.randomness_) { + return lhs.randomness_ < rhs.randomness_; + } + // Out of paranoia, use pointer comparison on the alarms as a final tie-breaker but also + // ASSERT that this branch isn't hit in debug modes since in practice the randomness_ + // associated with two registrations should never be equal. + ASSERT(false, "Alarm registration randomness_ for two alarms should never be equal."); + return &lhs.alarm_ < &rhs.alarm_; + } }; - using AlarmSet = std::set; + using AlarmSet = std::set; /** * Sets the time forward monotonically. If the supplied argument moves @@ -80,17 +104,12 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { void setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - MonotonicTime alarmTimeLockHeld(Alarm* alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void alarmActivateLockHeld(Alarm* alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - - // The simulation keeps a unique ID for each alarm to act as a deterministic - // tie-breaker for alarm-ordering. - int64_t nextIndex(); + void alarmActivateLockHeld(Alarm& alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Adds/removes an alarm. - void addAlarmLockHeld(Alarm*, const std::chrono::microseconds& duration) + void addAlarmLockHeld(Alarm&, const std::chrono::microseconds& duration) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeAlarmLockHeld(Alarm*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void removeAlarmLockHeld(Alarm&) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Keeps track of how many alarms have been activated but not yet called, // which helps waitFor() determine when to give up and declare a timeout. @@ -105,8 +124,10 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { RealTimeSource real_time_source_; // Used to initialize monotonic_time_ and system_time_; MonotonicTime monotonic_time_ ABSL_GUARDED_BY(mutex_); SystemTime system_time_ ABSL_GUARDED_BY(mutex_); + TestRandomGenerator random_source_ ABSL_GUARDED_BY(mutex_); AlarmSet alarms_ ABSL_GUARDED_BY(mutex_); - uint64_t index_ ABSL_GUARDED_BY(mutex_); + absl::flat_hash_map + alarm_registrations_map_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; uint32_t pending_alarms_ ABSL_GUARDED_BY(mutex_); Thread::OnlyOneThread only_one_thread_; diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index 5eba67a83bb8..9fd5314c9b92 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -76,7 +76,7 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, TimerOrdering) { +TEST_F(SimulatedTimeSystemTest, TimerTotalOrdering) { trackPrepareCalls(); addTask(0, '0'); @@ -90,6 +90,57 @@ TEST_F(SimulatedTimeSystemTest, TimerOrdering) { EXPECT_EQ("p012", output_); } +TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering) { + trackPrepareCalls(); + + std::set outputs; + for (int i = 0; i < 100; ++i) { + addTask(0, '0'); + addTask(1, '1'); + addTask(1, '2'); + addTask(3, '3'); + EXPECT_EQ(4, timers_.size()); + + advanceMsAndLoop(5); + + outputs.insert(output_); + + // Cleanup before the next iteration. + output_.clear(); + timers_.clear(); + } + + // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled + // for the same time. Verify that both orderings were observed. + EXPECT_THAT(outputs, testing::ElementsAre("p0123", "p0213")); +} + +TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering2) { + trackPrepareCalls(); + + std::set outputs; + for (int i = 0; i < 100; ++i) { + addTask(0, '0'); + addTask(15, '1'); + advanceMsAndLoop(10); + + // Timer 1 has 5ms remaining, so timer 2 ends up scheduled at the same monotonic time as 1. + addTask(5, '2'); + addTask(6, '3'); + advanceMsAndLoop(10); + + outputs.insert(output_); + + // Cleanup before the next iteration. + output_.clear(); + timers_.clear(); + } + + // Execution order of timers 1 and 2 is non-deterministic because the two timers were scheduled + // for the same time. Verify that both orderings were observed. + EXPECT_THAT(outputs, testing::ElementsAre("p0p123", "p0p213")); +} + // Timers that are scheduled to execute and but are disabled first do not trigger. TEST_F(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { trackPrepareCalls(); From 568b75960dcf0650cf600d5e1181480ee4f519e0 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 16 Jul 2020 19:22:10 -0400 Subject: [PATCH 659/909] [http] Initial codec splitting with test parametrization (#10591) This just introduces plumbing and clones codecs for a legacy versions. There is no changes to the codecs at this point (the files are just duplicated and the namespace Legacy is used). Users can set which codecs are used at runtime via the feature new_codec_behavior. This will be the case for O(4-6 weeks) while the change to new no-exception codecs are pushed (see issue linked) to the new codecs. Format scripts check that h/1 header and source, h/2 header and source, and h/2 codec tests are kept in sync to enforce that devs who make changes to one codec port them to the old one. Devs will need to update the diff if they are making a change that exists in one and not the other. The format script will alert them on what to change in the diff script. Signed-off-by: Asra Ali --- bazel/BUILD | 5 + bazel/envoy_build_system.bzl | 2 + bazel/envoy_select.bzl | 7 + ci/do_ci.sh | 5 + docs/root/version_history/current.rst | 1 + source/common/http/BUILD | 7 + source/common/http/codec_client.cc | 32 +- source/common/http/conn_manager_utility.cc | 26 +- source/common/http/http1/BUILD | 60 +- source/common/http/http1/codec_impl_legacy.cc | 1246 ++++++++++ source/common/http/http1/codec_impl_legacy.h | 607 +++++ source/common/http/http2/BUILD | 74 +- source/common/http/http2/codec_impl.cc | 10 +- source/common/http/http2/codec_impl.h | 11 +- source/common/http/http2/codec_impl_legacy.cc | 1473 +++++++++++ source/common/http/http2/codec_impl_legacy.h | 602 +++++ source/common/runtime/runtime_features.cc | 1 + .../network/http_connection_manager/BUILD | 2 + .../network/http_connection_manager/config.cc | 37 +- test/common/http/codec_impl_fuzz_test.cc | 24 +- test/common/http/http1/BUILD | 1 + test/common/http/http1/codec_impl_test.cc | 360 +-- test/common/http/http2/BUILD | 62 +- .../http/http2/codec_impl_legacy_test.cc | 2163 +++++++++++++++++ test/common/http/http2/codec_impl_test.cc | 152 +- test/common/http/http2/codec_impl_test_util.h | 109 +- test/common/http/http2/frame_replay_test.cc | 18 +- .../http/http2/request_header_fuzz_test.cc | 2 +- .../http/http2/response_header_fuzz_test.cc | 2 +- test/common/stats/stat_test_utility.h | 5 + test/config/utility.cc | 4 + test/config/utility.h | 3 + .../http_connection_manager/config_test.cc | 60 + test/integration/BUILD | 7 + .../api_version_integration_test.cc | 8 +- test/integration/fake_upstream.cc | 38 +- test/integration/integration.cc | 5 + test/integration/integration.h | 1 + tools/code_format/check_format.py | 51 + .../codec_diffs/http1_codec_impl_cc | 35 + .../codec_diffs/http1_codec_impl_h | 130 + .../codec_diffs/http2_codec_impl_cc | 34 + .../codec_diffs/http2_codec_impl_h | 77 + 43 files changed, 7201 insertions(+), 358 deletions(-) create mode 100644 source/common/http/http1/codec_impl_legacy.cc create mode 100644 source/common/http/http1/codec_impl_legacy.h create mode 100644 source/common/http/http2/codec_impl_legacy.cc create mode 100644 source/common/http/http2/codec_impl_legacy.h create mode 100644 test/common/http/http2/codec_impl_legacy_test.cc create mode 100644 tools/code_format/codec_diffs/http1_codec_impl_cc create mode 100644 tools/code_format/codec_diffs/http1_codec_impl_h create mode 100644 tools/code_format/codec_diffs/http2_codec_impl_cc create mode 100644 tools/code_format/codec_diffs/http2_codec_impl_h diff --git a/bazel/BUILD b/bazel/BUILD index 982d3fa3ac70..97d9d79fb6be 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -199,6 +199,11 @@ config_setting( values = {"define": "path_normalization_by_default=true"}, ) +config_setting( + name = "enable_legacy_codecs_in_integration_tests", + values = {"define": "use_legacy_codecs_in_integration_tests=true"}, +) + cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 70ef3df4f1d2..0f062cbfe8d8 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -18,6 +18,7 @@ load( _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", + _envoy_select_legacy_codecs_in_integration_tests = "envoy_select_legacy_codecs_in_integration_tests", ) load( ":envoy_test.bzl", @@ -168,6 +169,7 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart +envoy_select_legacy_codecs_in_integration_tests = _envoy_select_legacy_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index f2167f29bec4..ba7704ceb02f 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -31,3 +31,10 @@ def envoy_select_hot_restart(xs, repository = ""): repository + "//bazel:disable_hot_restart_or_apple": [], "//conditions:default": xs, }) + +# Select the given values if use legacy codecs in test is on in the current build. +def envoy_select_legacy_codecs_in_integration_tests(xs, repository = ""): + return select({ + repository + "//bazel:enable_legacy_codecs_in_integration_tests": xs, + "//conditions:default": [], + }) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index d13c7be545bd..a8063dd4d7bc 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -220,6 +220,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ + --define use_legacy_codecs_in_integration_tests=true \ --define --cxxopt=-std=c++14 \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" @@ -237,6 +238,10 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then echo "Building and testing ${TEST_TARGETS}" bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in + # integration tests with asan. + bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only + # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index eb3465f99eeb..42346d258da0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -36,6 +36,7 @@ New Features * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 5a07a56b9f00..041c3508d650 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -57,16 +57,21 @@ envoy_cc_library( "//include/envoy/http:codec_interface", "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", + "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", "//source/common/network:filter_lib", + "//source/common/runtime:runtime_features_lib", + "//source/common/runtime:runtime_lib", ], ) @@ -207,7 +212,9 @@ envoy_cc_library( "//source/common/common:scope_tracker", "//source/common/common:utility_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 557b5757414a..e3fbc23ef921 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -9,11 +9,15 @@ #include "common/config/utility.h" #include "common/http/exception.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/status.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" +#include "common/runtime/runtime_impl.h" namespace Envoy { namespace Http { @@ -150,16 +154,29 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne switch (type) { case Type::HTTP1: { - codec_ = std::make_unique( - *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), - host->cluster().maxResponseHeadersCount()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } else { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } break; } case Type::HTTP2: { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } else { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } break; } case Type::HTTP3: { @@ -167,6 +184,7 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Config::Utility::getAndCheckFactoryByName( Http::QuicCodecNames::get().Quiche) .createQuicClientConnection(*connection_, *this)); + break; } } } diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 65b5af861cc1..c8ce01993cfa 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -11,7 +11,9 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" @@ -51,14 +53,26 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) { Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); - return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } else { Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope); - return std::make_unique( - connection, stats, callbacks, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } } diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 278e9adaaae5..9451c4e29ae3 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -24,36 +24,46 @@ envoy_cc_library( ], ) +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":header_formatter_lib", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:statusor_lib", + "//source/common/common:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:url_utility_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], external_deps = ["http_parser"], - deps = [ - ":codec_stats_lib", - ":header_formatter_lib", - "//include/envoy/buffer:buffer_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:statusor_lib", - "//source/common/common:utility_lib", - "//source/common/grpc:common_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:url_utility_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], + deps = CODEC_LIB_DEPS, +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = ["codec_impl_legacy.h"], + external_deps = ["http_parser"], + deps = CODEC_LIB_DEPS, ) envoy_cc_library( diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc new file mode 100644 index 000000000000..d41d0a41401e --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -0,0 +1,1246 @@ +#include "common/http/http1/codec_impl_legacy.h" + +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codec.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/enum_to_int.h" +#include "common/common/utility.h" +#include "common/grpc/common.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/url_utility.h" +#include "common/http/utility.h" +#include "common/runtime/runtime_features.h" + +#include "absl/container/fixed_array.h" +#include "absl/strings/ascii.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { +namespace { + +struct Http1ResponseCodeDetailValues { + const absl::string_view TooManyHeaders = "http1.too_many_headers"; + const absl::string_view HeadersTooLarge = "http1.headers_too_large"; + const absl::string_view HttpCodecError = "http1.codec_error"; + const absl::string_view InvalidCharacters = "http1.invalid_characters"; + const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; + const absl::string_view InvalidUrl = "http1.invalid_url"; + const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; + const absl::string_view BodyDisallowed = "http1.body_disallowed"; + const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; + const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; +}; + +struct Http1HeaderTypesValues { + const absl::string_view Headers = "headers"; + const absl::string_view Trailers = "trailers"; +}; + +using Http1ResponseCodeDetails = ConstSingleton; +using Http1HeaderTypes = ConstSingleton; +using Http::Http1::CodecStats; +using Http::Http1::HeaderKeyFormatter; +using Http::Http1::HeaderKeyFormatterPtr; +using Http::Http1::ProperCaseHeaderKeyFormatter; + +const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { + CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, + Http::Headers::get().ConnectionValues.Upgrade, + Http::Headers::get().ConnectionValues.Http2Settings); +} + +HeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) { + if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) { + return std::make_unique(); + } + + return nullptr; +} + +} // namespace + +const std::string StreamEncoderImpl::CRLF = "\r\n"; +// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1 +const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; + +StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, + HeaderKeyFormatter* header_key_formatter) + : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), + is_response_to_head_request_(false), is_response_to_connect_request_(false), + header_key_formatter_(header_key_formatter) { + if (connection_.connection().aboveHighWatermark()) { + runHighWatermarkCallbacks(); + } +} + +void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value, + uint32_t value_size) { + + ASSERT(key_size > 0); + + connection_.copyToBuffer(key, key_size); + connection_.addCharToBuffer(':'); + connection_.addCharToBuffer(' '); + connection_.copyToBuffer(value, value_size); + connection_.addToBuffer(CRLF); +} +void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { + this->encodeHeader(key.data(), key.size(), value.data(), value.size()); +} + +void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) { + if (header_key_formatter_ != nullptr) { + encodeHeader(header_key_formatter_->format(key), value); + } else { + encodeHeader(key, value); + } +} + +void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, + absl::optional status, bool end_stream) { + bool saw_content_length = false; + headers.iterate( + [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { + absl::string_view key_to_use = header.key().getStringView(); + uint32_t key_size_to_use = header.key().size(); + // Translate :authority -> host so that upper layers do not need to deal with this. + if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); + key_size_to_use = Headers::get().HostLegacy.get().size(); + } + + // Skip all headers starting with ':' that make it here. + if (key_to_use[0] == ':') { + return HeaderMap::Iterate::Continue; + } + + static_cast(context)->encodeFormattedHeader( + key_to_use, header.value().getStringView()); + + return HeaderMap::Iterate::Continue; + }, + this); + + if (headers.ContentLength()) { + saw_content_length = true; + } + + ASSERT(!headers.TransferEncoding()); + + // Assume we are chunk encoding unless we are passed a content length or this is a header only + // response. Upper layers generally should strip transfer-encoding since it only applies to + // HTTP/1.1. The codec will infer it based on the type of response. + // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want + // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is + // consulted before enabling chunk encoding. + // + // Note that for HEAD requests Envoy does best-effort guessing when there is no + // content-length. If a client makes a HEAD request for an upstream resource + // with no bytes but the upstream response doesn't include "Content-length: 0", + // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded. + if (saw_content_length || disable_chunk_encoding_) { + chunk_encoding_ = false; + } else { + if (status && *status == 100) { + // Make sure we don't serialize chunk information with 100-Continue headers. + chunk_encoding_ = false; + } else if (end_stream && !is_response_to_head_request_) { + // If this is a headers-only stream, append an explicit "Content-Length: 0" unless it's a + // response to a HEAD request. + // For 204s and 1xx where content length is disallowed, don't append the content length but + // also don't chunk encode. + if (!status || (*status >= 200 && *status != 204)) { + encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); + } + chunk_encoding_ = false; + } else if (connection_.protocol() == Protocol::Http10) { + chunk_encoding_ = false; + } else if (status && (*status < 200 || *status == 204) && + connection_.strict1xxAnd204Headers()) { + // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" + // feature flag is removed, this block can be coalesced with the 100 Continue logic above. + + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked + // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 + chunk_encoding_ = false; + } else { + // For responses to connect requests, do not send the chunked encoding header: + // https://tools.ietf.org/html/rfc7231#section-4.3.6. + if (!is_response_to_connect_request_) { + encodeFormattedHeader(Headers::get().TransferEncoding.get(), + Headers::get().TransferEncodingValues.Chunked); + } + // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. + // If there is a body in a response on the upgrade path, the chunks will be + // passed through via maybeDirectDispatch so we need to avoid appending + // extra chunk boundaries. + // + // When sending a response to a HEAD request Envoy may send an informational + // "Transfer-Encoding: chunked" header, but should not send a chunk encoded body. + chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ && + !is_response_to_connect_request_; + } + } + + connection_.addToBuffer(CRLF); + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { + // end_stream may be indicated with a zero length data buffer. If that is the case, so not + // actually write the zero length buffer out. + if (data.length() > 0) { + if (chunk_encoding_) { + connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF)); + } + + connection_.buffer().move(data); + + if (chunk_encoding_) { + connection_.buffer().add(CRLF); + } + } + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { + if (!connection_.enableTrailers()) { + return endEncode(); + } + // Trailers only matter if it is a chunk transfer encoding + // https://tools.ietf.org/html/rfc7230#section-4.4 + if (chunk_encoding_) { + // Finalize the body + connection_.buffer().add(LAST_CHUNK); + + trailers.iterate( + [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { + static_cast(context)->encodeFormattedHeader( + header.key().getStringView(), header.value().getStringView()); + return HeaderMap::Iterate::Continue; + }, + this); + + connection_.flushOutput(); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(); + connection_.onEncodeComplete(); +} + +void StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) { + connection_.stats().metadata_not_supported_error_.inc(); +} + +void StreamEncoderImpl::endEncode() { + if (chunk_encoding_) { + connection_.buffer().add(LAST_CHUNK); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(true); + connection_.onEncodeComplete(); +} + +void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { + if (!flood_protection_) { + return; + } + // It's messy and complicated to try to tag the final write of an HTTP response for response + // tracking for flood protection. Instead, write an empty buffer fragment after the response, + // to allow for tracking. + // When the response is written out, the fragment will be deleted and the counter will be updated + // by ServerConnectionImpl::releaseOutboundResponse() + auto fragment = + Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), response_buffer_releasor_); + output_buffer.addBufferFragment(*fragment.release()); + ASSERT(outbound_responses_ < max_outbound_responses_); + outbound_responses_++; +} + +void ServerConnectionImpl::doFloodProtectionChecks() const { + if (!flood_protection_) { + return; + } + // Before processing another request, make sure that we are below the response flood protection + // threshold. + if (outbound_responses_ >= max_outbound_responses_) { + ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", + connection_); + stats_.response_flood_.inc(); + throw FrameFloodException("Too many responses queued."); + } +} + +void ConnectionImpl::flushOutput(bool end_encode) { + if (end_encode) { + // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood + // protection + maybeAddSentinelBufferFragment(output_buffer_); + } + connection().write(output_buffer_, false); + ASSERT(0UL == output_buffer_.length()); +} + +void ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); } + +void ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); } + +void ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); } + +void ConnectionImpl::copyToBuffer(const char* data, uint64_t length) { + output_buffer_.add(data, length); +} + +void StreamEncoderImpl::resetStream(StreamResetReason reason) { + connection_.onResetStreamBase(reason); +} + +void StreamEncoderImpl::readDisable(bool disable) { + if (disable) { + ++read_disable_calls_; + } else { + ASSERT(read_disable_calls_ != 0); + if (read_disable_calls_ != 0) { + --read_disable_calls_; + } + } + connection_.readDisable(disable); +} + +uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } + +const Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() { + return connection_.connection().localAddress(); +} + +static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; +static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; + +void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) { + started_response_ = true; + + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + uint64_t numeric_status = Utility::getResponseStatus(headers); + + if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { + connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); + } else { + connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); + } + connection_.addIntToBuffer(numeric_status); + connection_.addCharToBuffer(' '); + + const char* status_string = CodeUtility::toString(static_cast(numeric_status)); + uint32_t status_string_len = strlen(status_string); + connection_.copyToBuffer(status_string, status_string_len); + + connection_.addCharToBuffer('\r'); + connection_.addCharToBuffer('\n'); + + if (numeric_status >= 300) { + // Don't do special CONNECT logic if the CONNECT was rejected. + is_response_to_connect_request_ = false; + } + + encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); +} + +static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; + +void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) { + const HeaderEntry* method = headers.Method(); + const HeaderEntry* path = headers.Path(); + const HeaderEntry* host = headers.Host(); + bool is_connect = HeaderUtility::isConnect(headers); + + if (!method || (!path && !is_connect)) { + // TODO(#10878): This exception does not occur during dispatch and would not be triggered under + // normal circumstances since inputs would fail parsing at ingress. Replace with proper error + // handling when exceptions are removed. Include missing host header for CONNECT. + throw CodecClientException(":method and :path must be specified"); + } + if (method->value() == Headers::get().MethodValues.Head) { + head_request_ = true; + } else if (method->value() == Headers::get().MethodValues.Connect) { + disableChunkEncoding(); + connect_request_ = true; + } + if (Utility::isUpgrade(headers)) { + upgrade_request_ = true; + } + + connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); + connection_.addCharToBuffer(' '); + if (is_connect) { + connection_.copyToBuffer(host->value().getStringView().data(), host->value().size()); + } else { + connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); + } + connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); + + encodeHeadersBase(headers, absl::nullopt, end_stream); +} + +http_parser_settings ConnectionImpl::settings_{ + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageBeginBase(); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onUrl(at, length); + return 0; + }, + nullptr, // on_status + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderField(at, length); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderValue(at, length); + return 0; + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onHeadersCompleteBase(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->bufferBody(at, length); + return 0; + }, + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageCompleteBase(); + return 0; + }, + [](http_parser* parser) -> int { + // A 0-byte chunk header is used to signal the end of the chunked body. + // When this function is called, http-parser holds the size of the chunk in + // parser->content_length. See + // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336 + const bool is_final_chunk = (parser->content_length == 0); + static_cast(parser->data)->onChunkHeader(is_final_chunk); + return 0; + }, + nullptr // on_chunk_complete +}; + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, + const uint32_t max_headers_count, + HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) + : connection_(connection), stats_(stats), + header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), + handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), + connection_header_sanitization_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.connection_header_sanitization")), + enable_trailers_(enable_trailers), + strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.strict_1xx_and_204_response_headers")), + output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, + [&]() -> void { this->onAboveHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), + max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { + output_buffer_.setWatermarks(connection.bufferLimit()); + http_parser_init(&parser_, type); + parser_.data = this; +} + +void ConnectionImpl::completeLastHeader() { + ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, + current_header_field_.getStringView(), current_header_value_.getStringView()); + + checkHeaderNameForUnderscores(); + auto& headers_or_trailers = headersOrTrailers(); + if (!current_header_field_.empty()) { + current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); + // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed + // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + current_header_value_.rtrim(); + headers_or_trailers.addViaMove(std::move(current_header_field_), + std::move(current_header_value_)); + } + + // Check if the number of headers exceeds the limit. + if (headers_or_trailers.size() > max_headers_count_) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } + + header_parsing_state_ = HeaderParsingState::Field; + ASSERT(current_header_field_.empty()); + ASSERT(current_header_value_.empty()); +} + +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + headersOrTrailers().byteSize(); +} + +void ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } +} + +bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { + if (!handling_upgrade_) { + // Only direct dispatch for Upgrade requests. + return false; + } + + ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length()); + onBody(data); + data.drain(data.length()); + return true; +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + ASSERT(buffered_body_.length() == 0); + + if (maybeDirectDispatch(data)) { + return Http::okStatus(); + } + + // Always unpause before dispatch. + http_parser_pause(&parser_, 0); + + ssize_t total_parsed = 0; + if (data.length() > 0) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { + // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at + // this point. + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + break; + } + } + dispatchBufferedBody(); + } else { + dispatchSlice(nullptr, 0); + } + ASSERT(buffered_body_.length() == 0); + + ENVOY_CONN_LOG(trace, "parsed {} bytes", connection_, total_parsed); + data.drain(total_parsed); + + // If an upgrade has been handled and there is body data or early upgrade + // payload to send on, send it on. + maybeDirectDispatch(data); + return Http::okStatus(); +} + +size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); + throw CodecProtocolException("http/1.1 protocol error: " + + std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + } + + return rc; +} + +void ConnectionImpl::onHeaderField(const char* data, size_t length) { + // We previously already finished up the headers, these headers are + // now trailers. + if (header_parsing_state_ == HeaderParsingState::Done) { + if (!enable_trailers_) { + // Ignore trailers. + return; + } + processing_trailers_ = true; + header_parsing_state_ = HeaderParsingState::Field; + allocTrailers(); + } + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + current_header_field_.append(data, length); + + checkMaxHeadersSize(); +} + +void ConnectionImpl::onHeaderValue(const char* data, size_t length) { + if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { + // Ignore trailers. + return; + } + + absl::string_view header_value{data, length}; + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); + } + + header_parsing_state_ = HeaderParsingState::Value; + if (current_header_value_.empty()) { + // Strip leading whitespace if the current header value input contains the first bytes of the + // encoded header value. Trailing whitespace is stripped once the full header value is known in + // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . + header_value = StringUtil::ltrim(header_value); + } + current_header_value_.append(header_value.data(), header_value.length()); + + checkMaxHeadersSize(); +} + +int ConnectionImpl::onHeadersCompleteBase() { + ASSERT(!processing_trailers_); + ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); + completeLastHeader(); + + if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { + // This is not necessarily true, but it's good enough since higher layers only care if this is + // HTTP/1.1 or not. + protocol_ = Protocol::Http10; + } + RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders(); + if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { + // Ignore h2c upgrade requests until we support them. + // See https://github.com/envoyproxy/envoy/issues/7161 for details. + if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), + Http::Headers::get().UpgradeValues.H2c)) { + ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); + request_or_response_headers.removeUpgrade(); + if (request_or_response_headers.Connection()) { + const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); + std::string new_value = StringUtil::removeTokens( + request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); + if (new_value.empty()) { + request_or_response_headers.removeConnection(); + } else { + request_or_response_headers.setConnection(new_value); + } + } + request_or_response_headers.remove(Headers::get().Http2Settings); + } else { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode.", connection_); + handling_upgrade_ = true; + } + } + if (parser_.method == HTTP_CONNECT) { + if (request_or_response_headers.ContentLength()) { + if (request_or_response_headers.getContentLengthValue() == "0") { + request_or_response_headers.removeContentLength(); + } else { + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); + throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); + } + } + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); + handling_upgrade_ = true; + } + + // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject + // transfer-codings it does not understand. + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + if (request_or_response_headers.TransferEncoding()) { + const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); + if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || + parser_.method == HTTP_CONNECT) { + error_code_ = Http::Code::NotImplemented; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + int rc = onHeadersComplete(); + header_parsing_state_ = HeaderParsingState::Done; + + // Returning 2 informs http_parser to not expect a body or further data on this connection. + return handling_upgrade_ ? 2 : rc; +} + +void ConnectionImpl::bufferBody(const char* data, size_t length) { + buffered_body_.add(data, length); +} + +void ConnectionImpl::dispatchBufferedBody() { + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + if (buffered_body_.length() > 0) { + onBody(buffered_body_); + buffered_body_.drain(buffered_body_.length()); + } +} + +void ConnectionImpl::onChunkHeader(bool is_final_chunk) { + if (is_final_chunk) { + // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found + // while processing trailers. + dispatchBufferedBody(); + } +} + +void ConnectionImpl::onMessageCompleteBase() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + + dispatchBufferedBody(); + + if (handling_upgrade_) { + // If this is an upgrade request, swallow the onMessageComplete. The + // upgrade payload will be treated as stream body. + ASSERT(!deferred_end_stream_headers_); + ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); + http_parser_pause(&parser_, 1); + return; + } + + // If true, this indicates we were processing trailers and must + // move the last header into current_header_map_ + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + onMessageComplete(); +} + +void ConnectionImpl::onMessageBeginBase() { + ENVOY_CONN_LOG(trace, "message begin", connection_); + // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets + // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable + // in onHeadersCompleteBase + protocol_ = Protocol::Http11; + processing_trailers_ = false; + header_parsing_state_ = HeaderParsingState::Field; + allocHeaders(); + onMessageBegin(); +} + +void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { + ASSERT(!reset_stream_called_); + reset_stream_called_ = true; + onResetStream(reason); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, + const Http1Settings& settings, uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, + max_request_headers_count, formatter(settings), settings.enable_trailers_), + callbacks_(callbacks), codec_settings_(settings), + response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { + releaseOutboundResponse(fragment); + }), + // Pipelining is generally not well supported on the internet and has a series of dangerous + // overflow bugs. As such we are disabling it for now, and removing this temporary override if + // no one objects. If you use this integer to restore prior behavior, contact the + // maintainer team as it will otherwise be removed entirely soon. + max_outbound_responses_( + Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), + flood_protection_( + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), + headers_with_underscores_action_(headers_with_underscores_action) {} + +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) + ? active_request_.value().request_url_.size() + : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} + +void ServerConnectionImpl::onEncodeComplete() { + if (active_request_.value().remote_complete_) { + // Only do this if remote is complete. If we are replying before the request is complete the + // only logical thing to do is for higher level code to reset() / close the connection so we + // leave the request around so that it can fire reset callbacks. + active_request_.reset(); + } +} + +void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { + HeaderString path(Headers::get().Path); + + bool is_connect = (method == HTTP_CONNECT); + + // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. + auto& active_request = active_request_.value(); + if (!is_connect && !active_request.request_url_.getStringView().empty() && + (active_request.request_url_.getStringView()[0] == '/' || + ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + // If absolute_urls and/or connect are not going be handled, copy the url and return. + // This forces the behavior to be backwards compatible with the old codec behavior. + // CONNECT "urls" are actually host:port so look like absolute URLs to the above checks. + // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. + if (!codec_settings_.allow_absolute_url_ && !is_connect) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + Utility::Url absolute_url; + if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); + throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + } + // RFC7230#5.7 + // When a proxy receives a request with an absolute-form of + // request-target, the proxy MUST ignore the received Host header field + // (if any) and instead replace it with the host information of the + // request-target. A proxy that forwards such a request MUST generate a + // new Host field-value based on the received request-target rather than + // forward the received Host field-value. + headers.setHost(absolute_url.hostAndPort()); + + if (!absolute_url.pathAndQueryParams().empty()) { + headers.setPath(absolute_url.pathAndQueryParams()); + } + active_request.request_url_.clear(); +} + +int ServerConnectionImpl::onHeadersComplete() { + // Handle the case where response happens prior to request complete. It's up to upper layer code + // to disconnect the connection but we shouldn't fire any more events since it doesn't make + // sense. + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Server: onHeadersComplete size={}", connection_, headers->size()); + const char* method_string = http_method_str(static_cast(parser_.method)); + + if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { + // If we fail to sanitize the request, return a 400 to the client + if (!Utility::sanitizeConnectionHeader(*headers)) { + absl::string_view header_value = headers->getConnectionValue(); + ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, + header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); + throw CodecProtocolException("Invalid nominated headers in Connection."); + } + } + + // Inform the response encoder about any HEAD method, so it can set content + // length and transfer encoding headers correctly. + active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); + active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); + + handlePath(*headers, parser_.method); + ASSERT(active_request.request_url_.empty()); + + headers->setMethod(method_string); + + // Make sure the host is valid. + auto details = HeaderUtility::requestHeadersValid(*headers); + if (details.has_value()) { + sendProtocolError(details.value().get()); + throw CodecProtocolException( + "http/1.1 protocol error: request headers failed spec compliance checks"); + } + + // Determine here whether we have a body or not. This uses the new RFC semantics where the + // presence of content-length or chunked transfer-encoding indicates a body vs. a particular + // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed + // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy + // scenario where the higher layers stream through and implicitly switch to chunked transfer + // encoding because end stream with zero body length has not yet been indicated. + if (parser_.flags & F_CHUNKED || + (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) { + active_request.request_decoder_->decodeHeaders(std::move(headers), false); + + // If the connection has been closed (or is closing) after decoding headers, pause the parser + // so we return control to the caller. + if (connection_.state() != Network::Connection::State::Open) { + http_parser_pause(&parser_, 1); + } + } else { + deferred_end_stream_headers_ = true; + } + } + + return 0; +} + +void ServerConnectionImpl::onMessageBegin() { + if (!resetStreamCalled()) { + ASSERT(!active_request_.has_value()); + active_request_.emplace(*this, header_key_formatter_.get()); + auto& active_request = active_request_.value(); + active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); + + // Check for pipelined request flood as we prepare to accept a new request. + // Parse errors that happen prior to onMessageBegin result in stream termination, it is not + // possible to overflow output buffers with early parse errors. + doFloodProtectionChecks(); + } +} + +void ServerConnectionImpl::onUrl(const char* data, size_t length) { + if (active_request_.has_value()) { + active_request_.value().request_url_.append(data, length); + + checkMaxHeadersSize(); + } +} + +void ServerConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (active_request_.has_value()) { + ENVOY_CONN_LOG(trace, "body size={}", connection_, data.length()); + active_request_.value().request_decoder_->decodeData(data, false); + } +} + +void ServerConnectionImpl::onMessageComplete() { + ASSERT(!handling_upgrade_); + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + + if (active_request.request_decoder_) { + active_request.response_encoder_.readDisable(true); + } + active_request.remote_complete_ = true; + if (deferred_end_stream_headers_) { + active_request.request_decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + active_request.request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + active_request.request_decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + headers_or_trailers_.emplace(nullptr); + } + + // Always pause the parser so that the calling code can process 1 request at a time and apply + // back pressure. However this means that the calling code needs to detect if there is more data + // in the buffer and dispatch it again. + http_parser_pause(&parser_, 1); +} + +void ServerConnectionImpl::onResetStream(StreamResetReason reason) { + active_request_.value().response_encoder_.runResetCallbacks(reason); + active_request_.reset(); +} + +void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.setDetails(details); + } + // We do this here because we may get a protocol error before we have a logical stream. Higher + // layers can only operate on streams, so there is no coherent way to allow them to send an error + // "out of band." On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1 + // to look more like HTTP/2 to higher layers. + if (!active_request_.has_value() || + !active_request_.value().response_encoder_.startedResponse()) { + Buffer::OwnedImpl bad_request_response( + absl::StrCat("HTTP/1.1 ", error_code_, " ", CodeUtility::toString(error_code_), + "\r\ncontent-length: 0\r\nconnection: close\r\n\r\n")); + + connection_.write(bad_request_response, false); + } +} + +void ServerConnectionImpl::sendProtocolError(absl::string_view details) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { + sendProtocolErrorOld(details); + return; + } + // We do this here because we may get a protocol error before we have a logical stream. + if (!active_request_.has_value()) { + onMessageBeginBase(); + } + ASSERT(active_request_.has_value()); + + active_request_.value().response_encoder_.setDetails(details); + if (!active_request_.value().response_encoder_.startedResponse()) { + // Note that the correctness of is_grpc_request and is_head_request is best-effort. + // If headers have not been fully parsed they may not be inferred correctly. + bool is_grpc_request = false; + if (absl::holds_alternative(headers_or_trailers_) && + absl::get(headers_or_trailers_) != nullptr) { + is_grpc_request = + Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); + } + const bool is_head_request = parser_.method == HTTP_HEAD; + active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, + CodeUtility::toString(error_code_), nullptr, + is_head_request, absl::nullopt, details); + return; + } +} + +void ServerConnectionImpl::onAboveHighWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runHighWatermarkCallbacks(); + } +} +void ServerConnectionImpl::onBelowLowWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runLowWatermarkCallbacks(); + } +} + +void ServerConnectionImpl::releaseOutboundResponse( + const Buffer::OwnedBufferFragmentImpl* fragment) { + ASSERT(outbound_responses_ >= 1); + --outbound_responses_; + delete fragment; +} + +void ServerConnectionImpl::checkHeaderNameForUnderscores() { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + current_header_field_.getStringView()); + stats_.dropped_headers_with_underscores_.inc(); + current_header_field_.clear(); + current_header_value_.clear(); + } else { + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", + connection_, current_header_field_.getStringView()); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + } + } +} + +ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, + ConnectionCallbacks&, const Http1Settings& settings, + const uint32_t max_response_headers_count) + : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, + max_response_headers_count, formatter(settings), settings.enable_trailers_) {} + +bool ClientConnectionImpl::cannotHaveBody() { + if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) { + ASSERT(!pending_response_done_); + return true; + } else if (parser_.status_code == 204 || parser_.status_code == 304 || + (parser_.status_code >= 200 && parser_.content_length == 0)) { + return true; + } else { + return false; + } +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { + if (resetStreamCalled()) { + throw CodecClientException("cannot create new streams after calling reset"); + } + + // If reads were disabled due to flow control, we expect reads to always be enabled again before + // reusing this connection. This is done when the response is received. + ASSERT(connection_.readEnabled()); + + ASSERT(!pending_response_.has_value()); + ASSERT(pending_response_done_); + pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder); + pending_response_done_ = false; + return pending_response_.value().encoder_; +} + +int ClientConnectionImpl::onHeadersComplete() { + // Handle the case where the client is closing a kept alive connection (by sending a 408 + // with a 'Connection: close' header). In this case we just let response flush out followed + // by the remote close. + if (!pending_response_.has_value() && !resetStreamCalled()) { + throw PrematureResponseException(static_cast(parser_.status_code)); + } else if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Client: onHeadersComplete size={}", connection_, headers->size()); + headers->setStatus(parser_.status_code); + + if (parser_.status_code >= 200 && parser_.status_code < 300 && + pending_response_.value().encoder_.connectRequest()) { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); + handling_upgrade_ = true; + + // For responses to connect requests, do not accept the chunked + // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (headers->TransferEncoding() && + absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), + Headers::get().TransferEncodingValues.Chunked)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { + if (headers->TransferEncoding()) { + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); + } + + if (headers->ContentLength()) { + // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. + if (headers->ContentLength()->value().getStringView() != "0") { + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: content length not allowed in 1xx or 204"); + } + + headers->removeContentLength(); + } + } + + if (parser_.status_code == 100) { + // http-parser treats 100 continue headers as their own complete response. + // Swallow the spurious onMessageComplete and continue processing. + ignore_message_complete_for_100_continue_ = true; + pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); + + // Reset to ensure no information from the continue headers is used for the response headers + // in case the callee does not move the headers out. + headers_or_trailers_.emplace(nullptr); + } else if (cannotHaveBody() && !handling_upgrade_) { + deferred_end_stream_headers_ = true; + } else { + pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); + } + } + + // Here we deal with cases where the response cannot have a body, but http_parser does not deal + // with it for us. + return cannotHaveBody() ? 1 : 0; +} + +bool ClientConnectionImpl::upgradeAllowed() const { + if (pending_response_.has_value()) { + return pending_response_->encoder_.upgradeRequest(); + } + return false; +} + +void ClientConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().decoder_->decodeData(data, false); + } +} + +void ClientConnectionImpl::onMessageComplete() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + if (ignore_message_complete_for_100_continue_) { + ignore_message_complete_for_100_continue_ = false; + return; + } + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + // After calling decodeData() with end stream set to true, we should no longer be able to reset. + PendingResponse& response = pending_response_.value(); + // Encoder is used as part of decode* calls later in this function so pending_response_ can not + // be reset just yet. Preserve the state in pending_response_done_ instead. + pending_response_done_ = true; + + if (deferred_end_stream_headers_) { + response.decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + response.decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + response.decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + pending_response_.reset(); + headers_or_trailers_.emplace(nullptr); + } +} + +void ClientConnectionImpl::onResetStream(StreamResetReason reason) { + // Only raise reset if we did not already dispatch a complete response. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runResetCallbacks(reason); + pending_response_done_ = true; + pending_response_.reset(); + } +} + +void ClientConnectionImpl::sendProtocolError(absl::string_view details) { + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().encoder_.setDetails(details); + } +} + +void ClientConnectionImpl::onAboveHighWatermark() { + // This should never happen without an active stream/request. + pending_response_.value().encoder_.runHighWatermarkCallbacks(); +} + +void ClientConnectionImpl::onBelowLowWatermark() { + // This can get called without an active stream/request when the response completion causes us to + // close the connection, but in doing so go below low watermark. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runLowWatermarkCallbacks(); + } +} + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h new file mode 100644 index 000000000000..f5e9811ede87 --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.h @@ -0,0 +1,607 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/watermark_buffer.h" +#include "common/common/assert.h" +#include "common/common/statusor.h" +#include "common/http/codec_helper.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/status.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +class ConnectionImpl; + +/** + * Base class for HTTP/1.1 request and response encoders. + */ +class StreamEncoderImpl : public virtual StreamEncoder, + public Stream, + Logger::Loggable, + public StreamCallbackHelper, + public Http1StreamEncoderOptions { +public: + ~StreamEncoderImpl() override { + // When the stream goes away, undo any read blocks to resume reading. + while (read_disable_calls_ != 0) { + StreamEncoderImpl::readDisable(false); + } + } + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const MetadataMapVector&) override; + Stream& getStream() override { return *this; } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; } + + // Http::Http1StreamEncoderOptions + void disableChunkEncoding() override { disable_chunk_encoding_ = true; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further + // progress may be made with the codec. + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override; + absl::string_view responseDetails() override { return details_; } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } + + void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } + void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } + void setDetails(absl::string_view details) { details_ = details; } + + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + +protected: + StreamEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); + void encodeTrailersBase(const HeaderMap& headers); + + static const std::string CRLF; + static const std::string LAST_CHUNK; + + ConnectionImpl& connection_; + uint32_t read_disable_calls_{}; + bool disable_chunk_encoding_ : 1; + bool chunk_encoding_ : 1; + bool is_response_to_head_request_ : 1; + bool is_response_to_connect_request_ : 1; + +private: + /** + * Called to encode an individual header. + * @param key supplies the header to encode. + * @param key_size supplies the byte size of the key. + * @param value supplies the value to encode. + * @param value_size supplies the byte size of the value. + */ + void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); + + /** + * Called to encode an individual header. + * @param key supplies the header to encode as a string_view. + * @param value supplies the value to encode as a string_view. + */ + void encodeHeader(absl::string_view key, absl::string_view value); + + /** + * Called to finalize a stream encode. + */ + void endEncode(); + + void encodeFormattedHeader(absl::string_view key, absl::string_view value); + + const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; + absl::string_view details_; +}; + +/** + * HTTP/1.1 response encoder. + */ +class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { +public: + ResponseEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + + bool startedResponse() { return started_response_; } + + // Http::ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool started_response_{}; +}; + +/** + * HTTP/1.1 request encoder. + */ +class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { +public: + RequestEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } + bool connectRequest() const { return connect_request_; } + + // Http::RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool upgrade_request_{}; + bool head_request_{}; + bool connect_request_{}; +}; + +/** + * Base class for HTTP/1.1 client and server connections. + * Handles the callbacks of http_parser with its own base routine and then + * virtual dispatches to its subclasses. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + /** + * @return Network::Connection& the backing network connection. + */ + Network::Connection& connection() { return connection_; } + + /** + * Called when the active encoder has completed encoding the outbound half of the stream. + */ + virtual void onEncodeComplete() PURE; + + /** + * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only + * valid operation after this point is for the connection to get blown away, but we will not + * fire any more callbacks in case some stack has to unwind. + */ + void onResetStreamBase(StreamResetReason reason); + + /** + * Flush all pending output from encoding. + */ + void flushOutput(bool end_encode = false); + + void addToBuffer(absl::string_view data); + void addCharToBuffer(char c); + void addIntToBuffer(uint64_t i); + Buffer::WatermarkBuffer& buffer() { return output_buffer_; } + uint64_t bufferRemainingSize(); + void copyToBuffer(const char* data, uint64_t length); + void reserveBuffer(uint64_t size); + void readDisable(bool disable) { + if (connection_.state() == Network::Connection::State::Open) { + connection_.readDisable(disable); + } + } + uint32_t bufferLimit() { return connection_.bufferLimit(); } + virtual bool supportsHttp10() { return false; } + bool maybeDirectDispatch(Buffer::Instance& data); + virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} + Http::Http1::CodecStats& stats() { return stats_; } + bool enableTrailers() const { return enable_trailers_; } + + // Http::Connection + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override {} // Called during connection manager drain flow + Protocol protocol() override { return protocol_; } + void shutdownNotice() override {} // Called during connection manager drain flow + bool wantsToWrite() override { return false; } + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); } + + bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } + +protected: + ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, + Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + + bool resetStreamCalled() { return reset_stream_called_; } + void onMessageBeginBase(); + + /** + * Get memory used to represent HTTP headers or trailers currently being parsed. + * Computed by adding the partial header field and value that is currently being parsed and the + * estimated header size for previous header lines provided by HeaderMap::byteSize(). + */ + virtual uint32_t getHeadersSize(); + + /** + * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the + * configured max header size limit. Throws a CodecProtocolException if headers exceed the size + * limit. + */ + void checkMaxHeadersSize(); + + Network::Connection& connection_; + Http::Http1::CodecStats& stats_; + http_parser parser_; + Http::Code error_code_{Http::Code::BadRequest}; + const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; + HeaderString current_header_field_; + HeaderString current_header_value_; + bool processing_trailers_ : 1; + bool handling_upgrade_ : 1; + bool reset_stream_called_ : 1; + // Deferred end stream headers indicate that we are not going to raise headers until the full + // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers + // block with end stream set to true with no further protocol data remaining. + bool deferred_end_stream_headers_ : 1; + const bool connection_header_sanitization_ : 1; + const bool enable_trailers_ : 1; + const bool strict_1xx_and_204_headers_ : 1; + +private: + enum class HeaderParsingState { Field, Value, Done }; + + virtual HeaderMap& headersOrTrailers() PURE; + virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE; + virtual void allocHeaders() PURE; + virtual void allocTrailers() PURE; + + /** + * Called in order to complete an in progress header decode. + */ + void completeLastHeader(); + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const { + return false; + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * TODO(#10878): Remove this when exception removal is complete. + */ + Http::Status innerDispatch(Buffer::Instance& data); + + /** + * Dispatch a memory span. + * @param slice supplies the start address. + * @len supplies the length of the span. + */ + size_t dispatchSlice(const char* slice, size_t len); + + /** + * Called by the http_parser when body data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void bufferBody(const char* data, size_t length); + + /** + * Push the accumulated body through the filter pipeline. + */ + void dispatchBufferedBody(); + + /** + * Called when a request/response is beginning. A base routine happens first then a virtual + * dispatch is invoked. + */ + virtual void onMessageBegin() PURE; + + /** + * Called when URL data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual void onUrl(const char* data, size_t length) PURE; + + /** + * Called when header field data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderField(const char* data, size_t length); + + /** + * Called when header value data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderValue(const char* data, size_t length); + + /** + * Called when headers are complete. A base routine happens first then a virtual dispatch is + * invoked. Note that this only applies to headers and NOT trailers. End of + * trailers are signaled via onMessageCompleteBase(). + * @return 0 if no error, 1 if there should be no body. + */ + int onHeadersCompleteBase(); + virtual int onHeadersComplete() PURE; + + /** + * Called to see if upgrade transition is allowed. + */ + virtual bool upgradeAllowed() const PURE; + + /** + * Called with body data is available for processing when either: + * - There is an accumulated partial body after the parser is done processing bytes read from the + * socket + * - The parser encounters the last byte of the body + * - The codec does a direct dispatch from the read buffer + * For performance reasons there is at most one call to onBody per call to HTTP/1 + * ConnectionImpl::dispatch call. + * @param data supplies the body data + */ + virtual void onBody(Buffer::Instance& data) PURE; + + /** + * Called when the request/response is complete. + */ + void onMessageCompleteBase(); + virtual void onMessageComplete() PURE; + + /** + * Called when accepting a chunk header. + */ + void onChunkHeader(bool is_final_chunk); + + /** + * @see onResetStreamBase(). + */ + virtual void onResetStream(StreamResetReason reason) PURE; + + /** + * Send a protocol error response to remote. + */ + virtual void sendProtocolError(absl::string_view details) PURE; + + /** + * Called when output_buffer_ or the underlying connection go from below a low watermark to over + * a high watermark. + */ + virtual void onAboveHighWatermark() PURE; + + /** + * Called when output_buffer_ or the underlying connection go from above a high watermark to + * below a low watermark. + */ + virtual void onBelowLowWatermark() PURE; + + /** + * Check if header name contains underscore character. + * The ServerConnectionImpl may drop header or reject request based on configuration. + */ + virtual void checkHeaderNameForUnderscores() {} + + static http_parser_settings settings_; + + HeaderParsingState header_parsing_state_{HeaderParsingState::Field}; + // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body + // is pushed through the filter pipeline either at the end of the current dispatch call, or when + // the last byte of the body is processed (whichever happens first). + Buffer::OwnedImpl buffered_body_; + Buffer::WatermarkBuffer output_buffer_; + Protocol protocol_{Protocol::Http11}; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; +}; + +/** + * Implementation of Http::ServerConnection for HTTP/1.1. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ServerConnectionCallbacks& callbacks, const Http1Settings& settings, + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + bool supportsHttp10() override { return codec_settings_.accept_http_10_; } + +protected: + /** + * An active HTTP/1.1 request. + */ + struct ActiveRequest { + ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) + : response_encoder_(connection, header_key_formatter) {} + + HeaderString request_url_; + RequestDecoder* request_decoder_{}; + ResponseEncoderImpl response_encoder_; + bool remote_complete_{}; + }; + absl::optional& activeRequest() { return active_request_; } + // ConnectionImpl + void onMessageComplete() override; + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; + +private: + /** + * Manipulate the request's first line, parsing the url and converting to a relative path if + * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 + * + * @param is_connect true if the request has the CONNECT method + * @param headers the request's headers + * @throws CodecProtocolException on an invalid url in the request line + */ + void handlePath(RequestHeaderMap& headers, unsigned int method); + + // ConnectionImpl + void onEncodeComplete() override; + void onMessageBegin() override; + void onUrl(const char* data, size_t length) override; + int onHeadersComplete() override; + // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. + bool upgradeAllowed() const override { return true; } + void onBody(Buffer::Instance& data) override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + } + + void sendProtocolErrorOld(absl::string_view details); + + void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); + void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; + void doFloodProtectionChecks() const; + void checkHeaderNameForUnderscores() override; + + ServerConnectionCallbacks& callbacks_; + absl::optional active_request_; + Http1Settings codec_settings_; + const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_; + uint32_t outbound_responses_{}; + // This defaults to 2, which functionally disables pipelining. If any users + // of Envoy wish to enable pipelining (which is dangerous and ill supported) + // we could make this configurable. + uint32_t max_outbound_responses_{}; + bool flood_protection_{}; + // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated on the first parsed trailer field (if + // trailers are enabled). The variant is reset to null headers on message complete for assertion + // purposes. + absl::variant headers_or_trailers_; + // The action to take when a request header name contains underscore characters. + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +/** + * Implementation of Http::ClientConnection for HTTP/1.1. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ConnectionCallbacks& callbacks, const Http1Settings& settings, + const uint32_t max_response_headers_count); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + struct PendingResponse { + PendingResponse(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) + : encoder_(connection, header_key_formatter), decoder_(decoder) {} + + RequestEncoderImpl encoder_; + ResponseDecoder* decoder_; + }; + + bool cannotHaveBody(); + + // ConnectionImpl + void onEncodeComplete() override {} + void onMessageBegin() override {} + void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + int onHeadersComplete() override; + bool upgradeAllowed() const override; + void onBody(Buffer::Instance& data) override; + void onMessageComplete() override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } + } + + absl::optional pending_response_; + // TODO(mattklein123): The following bool tracks whether a pending response is complete before + // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks + // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once + // the response is complete. The existence of this variable is hard to reason about and it should + // be combined with pending_response_ somehow in a follow up cleanup. + bool pending_response_done_{true}; + // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_100_continue_{}; + // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated when the switch to trailer processing is + // detected while parsing the first trailer field (if trailers are enabled). The variant is reset + // to null headers on message complete for assertion purposes. + absl::variant headers_or_trailers_; + + // The default limit of 80 KiB is the vanilla http_parser behaviour. + static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; +}; + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index dd0333ffa847..d2e4ed011311 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -18,6 +18,36 @@ envoy_cc_library( ], ) +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":metadata_decoder_lib", + ":metadata_encoder_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:stats_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], @@ -28,35 +58,23 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = [ - ":codec_stats_lib", - ":metadata_decoder_lib", - ":metadata_encoder_lib", - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:codes_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:enum_to_int", - "//source/common/common:linked_object", - "//source/common/common:minimal_logger_lib", - "//source/common/common:utility_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + deps = CODEC_LIB_DEPS, +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = [ + "codec_impl.h", + "codec_impl_legacy.h", + ], + external_deps = [ + "nghttp2", + "abseil_optional", + "abseil_inlined_vector", + "abseil_algorithm", ], + deps = CODEC_LIB_DEPS, ) # Separate library for some nghttp2 setup stuff to avoid having tests take a diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index b25b8fa4bef5..9c56c034cbd5 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -87,7 +87,7 @@ void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connectio * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though * it copies them. */ -template static T* remove_const(const void* object) { +template static T* removeConst(const void* object) { return const_cast(reinterpret_cast(object)); } @@ -120,8 +120,8 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he } const absl::string_view header_key = header.key().getStringView(); const absl::string_view header_value = header.value().getStringView(); - headers.push_back({remove_const(header_key.data()), - remove_const(header_value.data()), header_key.size(), + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), header_value.size(), flags}); } @@ -244,7 +244,7 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { } else { ASSERT(read_disable_count_ > 0); --read_disable_count_; - if (!buffers_overrun()) { + if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; parent_.sendPendingFrames(); @@ -560,7 +560,7 @@ int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { stream->pending_recv_data_.add(data, len); // Update the window to the peer unless some consumer of this stream's data has hit a flow control // limit and disabled reads on this stream - if (!stream->buffers_overrun()) { + if (!stream->buffersOverrun()) { nghttp2_session_consume(session_, stream_id, len); } else { stream->unconsumed_bytes_ += len; diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index cf848599c800..8bd5d8b3d1fd 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -54,14 +54,16 @@ class ConnectionImpl; // Abstract nghttp2_session factory. Used to enable injection of factories for testing. class Nghttp2SessionFactory { public: + using ConnectionImplType = ConnectionImpl; virtual ~Nghttp2SessionFactory() = default; // Returns a new nghttp2_session to be used with |connection|. virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, - ConnectionImpl* connection, const nghttp2_option* options) PURE; + ConnectionImplType* connection, + const nghttp2_option* options) PURE; // Initializes the |session|. - virtual void init(nghttp2_session* session, ConnectionImpl* connection, + virtual void init(nghttp2_session* session, ConnectionImplType* connection, const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; }; @@ -256,7 +258,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + bool buffersOverrun() const { return read_disable_count_ > 0; } ConnectionImpl& parent_; int32_t stream_id_{-1}; @@ -518,12 +520,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/assert.h" +#include "common/common/cleanup.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/utility.h" + +#include "absl/container/fixed_array.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +class Http2ResponseCodeDetailValues { + // Invalid HTTP header field was received and stream is going to be + // closed. + const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; + + // Violation in HTTP messaging rule. + const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; + + // none of the above + const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + +public: + const absl::string_view errorDetails(int error_code) const { + switch (error_code) { + case NGHTTP2_ERR_HTTP_HEADER: + return ng_http2_err_http_header_; + case NGHTTP2_ERR_HTTP_MESSAGING: + return ng_http2_err_http_messaging_; + default: + return ng_http2_err_unknown_; + } + } +}; + +using Http2ResponseCodeDetails = ConstSingleton; +using Http::Http2::CodecStats; +using Http::Http2::MetadataDecoder; +using Http::Http2::MetadataEncoder; + +bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies) { + if (key != Headers::get().Cookie.get().c_str()) { + return false; + } + + if (!cookies.empty()) { + cookies.append("; ", 2); + } + + const absl::string_view value_view = value.getStringView(); + cookies.append(value_view.data(), value_view.size()); + return true; +} + +ConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_; + +nghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks, + ConnectionImpl* connection, + const nghttp2_option* options) { + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks, connection, options); + return session; +} + +void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) { + connection->sendSettings(options, true); +} + +/** + * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though + * it copies them. + */ +template static T* removeConst(const void* object) { + return const_cast(reinterpret_cast(object)); +} + +ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), + data_deferred_(false), waiting_for_non_informational_headers_(false), + pending_receive_buffer_high_watermark_called_(false), + pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); + if (buffer_limit > 0) { + setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); + } +} + +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + disarmStreamIdleTimer(); + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + +static void insertHeader(std::vector& headers, const HeaderEntry& header) { + uint8_t flags = 0; + if (header.key().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME; + } + if (header.value().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; + } + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), + header_value.size(), flags}); +} + +void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, + const HeaderMap& headers) { + final_headers.reserve(headers.size()); + headers.iterate( + [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { + std::vector* final_headers = static_cast*>(context); + insertHeader(*final_headers, header); + return HeaderMap::Iterate::Continue; + }, + &final_headers); +} + +void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector& final_headers, + bool end_stream) { + nghttp2_data_provider provider; + if (!end_stream) { + provider.source.ptr = this; + provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length, + uint32_t* data_flags, nghttp2_data_source* source, + void*) -> ssize_t { + return static_cast(source->ptr)->onDataSourceRead(length, data_flags); + }; + } + + local_end_stream_ = end_stream; + submitHeaders(final_headers, end_stream ? nullptr : &provider); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, + bool end_stream) { + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::RequestHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + upgrade_type_ = std::string(headers.getUpgradeValue()); + Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else if (headers.Method() && headers.Method()->value() == "CONNECT") { + // If this is not an upgrade style connect (above branch) it is a bytestream + // connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 + modified_headers = createHeaderMap(headers); + modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); + if (!headers.Path()) { + modified_headers->setPath("/"); + } + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers, + bool end_stream) { + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::ResponseHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { + ASSERT(!local_end_stream_); + local_end_stream_ = true; + if (pending_send_data_.length() > 0) { + // In this case we want trailers to come after we release all pending body data that is + // waiting on window updates. We need to save the trailers so that we can emit them later. + ASSERT(!pending_trailers_to_encode_); + pending_trailers_to_encode_ = cloneTrailers(trailers); + createPendingFlushTimer(); + } else { + submitTrailers(trailers); + parent_.sendPendingFrames(); + } +} + +void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) { + ASSERT(parent_.allow_metadata_); + MetadataEncoder& metadata_encoder = getMetadataEncoder(); + if (!metadata_encoder.createPayload(metadata_map_vector)) { + return; + } + for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { + submitMetadata(flags); + } + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::readDisable(bool disable) { + ENVOY_CONN_LOG(debug, "Stream {} {}, unconsumed_bytes {} read_disable_count {}", + parent_.connection_, stream_id_, (disable ? "disabled" : "enabled"), + unconsumed_bytes_, read_disable_count_); + if (disable) { + ++read_disable_count_; + } else { + ASSERT(read_disable_count_ > 0); + --read_disable_count_; + if (!buffersOverrun()) { + nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); + unconsumed_bytes_ = 0; + parent_.sendPendingFrames(); + } + } +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer over limit ", parent_.connection_); + ASSERT(!pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = true; + readDisable(true); +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer under limit ", parent_.connection_); + ASSERT(pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = false; + readDisable(false); +} + +void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { + auto& headers = absl::get(headers_or_trailers_); + if (allow_waiting_for_informational_headers && + CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { + waiting_for_non_informational_headers_ = true; + } + + if (!upgrade_type_.empty() && headers->Status()) { + Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); + } + + if (headers->Status()->value() == "100") { + ASSERT(!remote_end_stream_); + response_decoder_.decode100ContinueHeaders(std::move(headers)); + } else { + response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_); + } +} + +void ConnectionImpl::ClientStreamImpl::decodeTrailers() { + response_decoder_.decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { + ASSERT(!allow_waiting_for_informational_headers); + auto& headers = absl::get(headers_or_trailers_); + if (Http::Utility::isH2UpgradeRequest(*headers)) { + Http::Utility::transformUpgradeRequestFromH2toH1(*headers); + } + request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_); +} + +void ConnectionImpl::ServerStreamImpl::decodeTrailers() { + request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "send buffer over limit ", parent_.connection_); + ASSERT(!pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = true; + runHighWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "send buffer under limit ", parent_.connection_); + ASSERT(pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = false; + runLowWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) { + if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) { + headers().addViaMove(std::move(name), std::move(value)); + } +} + +void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + std::vector final_headers; + buildHeaders(final_headers, trailers); + int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), + final_headers.size()); + ASSERT(rc == 0); +} + +void ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) { + ASSERT(stream_id_ > 0); + const int result = + nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr); + ASSERT(result == 0); +} + +ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) { + if (pending_send_data_.length() == 0 && !local_end_stream_) { + ASSERT(!data_deferred_); + data_deferred_ = true; + return NGHTTP2_ERR_DEFERRED; + } else { + *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; + if (local_end_stream_ && pending_send_data_.length() <= length) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + if (pending_trailers_to_encode_) { + // We need to tell the library to not set end stream so that we can emit the trailers. + *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM; + submitTrailers(*pending_trailers_to_encode_); + pending_trailers_to_encode_.reset(); + } + } + + return std::min(length, pending_send_data_.length()); + } +} + +int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { + // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we + // "just know" that the frame header is 9 bytes. + // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback + static const uint64_t FRAME_HEADER_SIZE = 9; + + parent_.outbound_data_frames_++; + + Buffer::OwnedImpl output; + if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { + ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", + parent_.connection_); + return NGHTTP2_ERR_FLOODED; + } + + parent_.stats_.pending_send_bytes_.sub(length); + output.move(pending_send_data_, length); + parent_.connection_.write(output, false); + return 0; +} + +void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ == -1); + stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), + final_headers.size(), provider, base()); + ASSERT(stream_id_ > 0); +} + +void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ != -1); + int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), + final_headers.size(), provider); + ASSERT(rc == 0); +} + +void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { + ASSERT(stream_idle_timer_ == nullptr); + if (stream_idle_timeout_.count() > 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { + ASSERT(!local_end_stream_); + local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); + pending_send_data_.move(data); + if (data_deferred_) { + int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); + ASSERT(rc == 0); + + data_deferred_ = false; + } + + parent_.sendPendingFrames(); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } +} + +void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { + // Higher layers expect calling resetStream() to immediately raise reset callbacks. + runResetCallbacks(reason); + + // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. + // We want these frames to go out so we defer the reset until we send all of the frames that + // end the local stream. + if (local_end_stream_ && !local_end_stream_sent_) { + parent_.pending_deferred_reset_ = true; + deferred_reset_ = reason; + ENVOY_CONN_LOG(trace, "deferred reset stream", parent_.connection_); + } else { + resetStreamWorker(reason); + } + + // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces + // the cleanup logic to run which will reset the stream in all cases if all data frames could not + // be sent. + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { + int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, + reason == StreamResetReason::LocalRefusedStreamReset + ? NGHTTP2_REFUSED_STREAM + : NGHTTP2_NO_ERROR); + ASSERT(rc == 0); +} + +MetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() { + if (metadata_encoder_ == nullptr) { + metadata_encoder_ = std::make_unique(); + } + return *metadata_encoder_; +} + +MetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() { + if (metadata_decoder_ == nullptr) { + auto cb = [this](MetadataMapPtr&& metadata_map_ptr) { + this->onMetadataDecoded(std::move(metadata_map_ptr)); + }; + metadata_decoder_ = std::make_unique(cb); + } + return *metadata_decoder_; +} + +void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) { + decoder().decodeMetadata(std::move(metadata_map_ptr)); +} + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count) + : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), + max_headers_count_(max_headers_count), + per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), + stream_error_on_invalid_http_messaging_( + http2_options.stream_error_on_invalid_http_messaging()), + flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), + max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), + max_consecutive_inbound_frames_with_empty_payload_( + http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), + max_inbound_priority_frames_per_stream_( + http2_options.max_inbound_priority_frames_per_stream().value()), + max_inbound_window_update_frames_per_data_frame_sent_( + http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} + +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a + // single return after exception removal (#10878)). + Cleanup cleanup([this]() { dispatching_ = false; }); + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + dispatching_ = true; + ssize_t rc = + nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); + if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { + throw FrameFloodException( + "Flooding was detected in this HTTP/2 session, and it must be closed"); + } + if (rc != static_cast(slice.len_)) { + throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); + } + + dispatching_ = false; + } + + ENVOY_CONN_LOG(trace, "dispatched {} bytes", connection_, data.length()); + data.drain(data.length()); + + // Decoding incoming frames can generate outbound frames so flush pending. + sendPendingFrames(); + return Http::okStatus(); +} + +ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { + return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); +} + +int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { + StreamImpl* stream = getStream(stream_id); + // If this results in buffering too much data, the watermark buffer will call + // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_ + stream->pending_recv_data_.add(data, len); + // Update the window to the peer unless some consumer of this stream's data has hit a flow control + // limit and disabled reads on this stream + if (!stream->buffersOverrun()) { + nghttp2_session_consume(session_, stream_id, len); + } else { + stream->unconsumed_bytes_ += len; + } + return 0; +} + +void ConnectionImpl::goAway() { + int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE, + nghttp2_session_get_last_proc_stream_id(session_), + NGHTTP2_NO_ERROR, nullptr, 0); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +void ConnectionImpl::shutdownNotice() { + int rc = nghttp2_submit_shutdown_notice(session_); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { + ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, + static_cast(hd->type), static_cast(hd->flags)); + + // Track all the frames without padding here, since this is the only callback we receive + // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). + // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). + if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { + if (!trackInboundFrames(hd, 0)) { + return NGHTTP2_ERR_FLOODED; + } + } + + return 0; +} + +ABSL_MUST_USE_RESULT +enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { + switch (code) { + case NGHTTP2_NO_ERROR: + return GoAwayErrorCode::NoError; + default: + return GoAwayErrorCode::Other; + } +} + +int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); + + // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS + // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders() + // and CONTINUATION frames in onBeforeFrameReceived(). + ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); + + if (frame->hd.type == NGHTTP2_DATA) { + if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + } + + // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown + // notifications are the same as a normal GOAWAY. + // TODO: handle multiple GOAWAY frames. + if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { + ASSERT(frame->hd.stream_id == 0); + raised_goaway_ = true; + callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); + return 0; + } + + if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { + onSettingsForTest(frame->settings); + } + + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + return 0; + } + + switch (frame->hd.type) { + case NGHTTP2_HEADERS: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + if (!stream->cookies_.empty()) { + HeaderString key(Headers::get().Cookie); + stream->headers().addViaMove(std::move(key), std::move(stream->cookies_)); + } + + switch (frame->headers.cat) { + case NGHTTP2_HCAT_RESPONSE: + case NGHTTP2_HCAT_REQUEST: { + stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); + break; + } + + case NGHTTP2_HCAT_HEADERS: { + // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers + // if local is not complete. + if (!stream->deferred_reset_) { + if (!stream->waiting_for_non_informational_headers_) { + if (!stream->remote_end_stream_) { + // This indicates we have received more headers frames than Envoy + // supports. Even if this is valid HTTP (something like 103 early hints) fail here + // rather than trying to push unexpected headers through the Envoy pipeline as that + // will likely result in Envoy crashing. + // It would be cleaner to reset the stream rather than reset the/ entire connection but + // it's also slightly more dangerous so currently we err on the side of safety. + stats_.too_many_header_frames_.inc(); + throw CodecProtocolException("Unexpected 'trailers' with no end stream."); + } else { + stream->decodeTrailers(); + } + } else { + ASSERT(!nghttp2_session_check_server_session(session_)); + stream->waiting_for_non_informational_headers_ = false; + + // Even if we have :status 100 in the client case in a response, when + // we received a 1xx to start out with, nghttp2 message checking + // guarantees proper flow here. + stream->decodeHeaders(false); + } + } + + break; + } + + default: + // We do not currently support push. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + break; + } + case NGHTTP2_DATA: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + + // It's possible that we are waiting to send a deferred reset, so only raise data if local + // is not complete. + if (!stream->deferred_reset_) { + stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_); + } + + stream->pending_recv_data_.drain(stream->pending_recv_data_.length()); + break; + } + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(trace, "remote reset: {}", connection_, frame->rst_stream.error_code); + stats_.rx_reset_.inc(); + break; + } + } + + return 0; +} + +int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { + // The nghttp2 library does not cleanly give us a way to determine whether we received invalid + // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not. + // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see + // an outgoing frame of this type, we will return an error code so that we can abort execution. + ENVOY_CONN_LOG(trace, "sent frame type={}", connection_, static_cast(frame->hd.type)); + switch (frame->hd.type) { + case NGHTTP2_GOAWAY: { + ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); + if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + break; + } + + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(debug, "sent reset code={}", connection_, frame->rst_stream.error_code); + stats_.tx_reset_.inc(); + break; + } + + case NGHTTP2_HEADERS: + case NGHTTP2_DATA: { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + break; + } + } + + return 0; +} + +int ConnectionImpl::onError(absl::string_view error) { + ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); + return 0; +} + +int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { + ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), + stream_id); + + // Set details of error_code in the stream whenever we have one. + StreamImpl* stream = getStream(stream_id); + if (stream != nullptr) { + stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); + } + + if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { + stats_.rx_messaging_error_.inc(); + + if (stream_error_on_invalid_http_messaging_) { + // The stream is about to be closed due to an invalid header or messaging. Don't kill the + // entire connection if one stream has bad headers or messaging. + if (stream != nullptr) { + // See comment below in onStreamClose() for why we do this. + stream->reset_due_to_messaging_error_ = true; + } + return 0; + } + } + + // Cause dispatch to return with an error code. + return NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "about to send frame type={}, flags={}", connection_, + static_cast(frame->hd.type), static_cast(frame->hd.flags)); + ASSERT(!is_outbound_flood_monitored_control_frame_); + // Flag flood monitored outbound control frames. + is_outbound_flood_monitored_control_frame_ = + ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) && + frame->hd.flags & NGHTTP2_FLAG_ACK) || + frame->hd.type == NGHTTP2_RST_STREAM; + return 0; +} + +void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { + ++outbound_frames_; + if (is_outbound_flood_monitored_control_frame) { + ++outbound_control_frames_; + } + checkOutboundQueueLimits(); +} + +bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { + // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the + // onBeforeFrameSend callback is not called for DATA frames. + bool is_outbound_flood_monitored_control_frame = false; + std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); + try { + incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); + } catch (const FrameFloodException&) { + return false; + } + + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); + return true; +} + +void ConnectionImpl::releaseOutboundFrame() { + ASSERT(outbound_frames_ >= 1); + --outbound_frames_; +} + +void ConnectionImpl::releaseOutboundControlFrame() { + ASSERT(outbound_control_frames_ >= 1); + --outbound_control_frames_; + releaseOutboundFrame(); +} + +ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { + ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); + Buffer::OwnedImpl buffer; + if (!addOutboundFrameFragment(buffer, data, length)) { + ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", + connection_); + return NGHTTP2_ERR_FLOODED; + } + + // While the buffer is transient the fragment it contains will be moved into the + // write_buffer_ of the underlying connection_ by the write method below. + // This creates lifetime dependency between the write_buffer_ of the underlying connection + // and the codec object. Specifically the write_buffer_ MUST be either fully drained or + // deleted before the codec object is deleted. This is presently guaranteed by the + // destruction order of the Network::ConnectionImpl object where write_buffer_ is + // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl. + connection_.write(buffer, false); + return length; +} + +int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { + StreamImpl* stream = getStream(stream_id); + if (stream) { + ENVOY_CONN_LOG(debug, "stream closed: {}", connection_, error_code); + if (!stream->remote_end_stream_ || !stream->local_end_stream_) { + StreamResetReason reason; + if (stream->reset_due_to_messaging_error_) { + // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand + // the flow of resets. I.e., did the reset originate locally? Was it remote? Here, + // we attempt to track cases in which we sent a reset locally due to an invalid frame + // received from the remote. We only do that in two cases currently (HTTP messaging layer + // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict + // about). In other cases we treat invalid frames as a protocol error and just kill + // the connection. + reason = StreamResetReason::LocalReset; + } else { + reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset + : StreamResetReason::RemoteReset; + } + + stream->runResetCallbacks(reason); + } + + stream->destroy(); + connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); + // Any unconsumed data must be consumed before the stream is deleted. + // nghttp2 does not appear to track this internally, and any stream deleted + // with outstanding window will contribute to a slow connection-window leak. + nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_); + stream->unconsumed_bytes_ = 0; + nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr); + } + + return 0; +} + +int ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) { + ENVOY_CONN_LOG(trace, "recv {} bytes METADATA", connection_, len); + + StreamImpl* stream = getStream(stream_id); + if (!stream) { + return 0; + } + + bool success = stream->getMetadataDecoder().receiveMetadata(data, len); + return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) { + ENVOY_CONN_LOG(trace, "recv METADATA frame on stream {}, end_metadata: {}", connection_, + stream_id, end_metadata); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata); + return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +ssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) { + ENVOY_CONN_LOG(trace, "pack METADATA frame on stream {}", connection_, stream_id); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + MetadataEncoder& encoder = stream->getMetadataEncoder(); + return encoder.packNextFramePayload(buf, len); +} + +int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + // We have seen 1 or 2 crashes where we get a headers callback but there is no associated + // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2 + // code it looks possible that inflate_header_block() can safely inflate headers for an already + // closed stream, but will still call the headers callback. Since that seems possible, we should + // ignore this case here. + // TODO(mattklein123): Figure out a test case that can hit this. + stats_.headers_cb_no_stream_.inc(); + return 0; + } + + auto should_return = checkHeaderNameForUnderscores(name.getStringView()); + if (should_return) { + name.clear(); + value.clear(); + return should_return.value(); + } + + stream->saveHeader(std::move(name), std::move(value)); + + if (stream->headers().byteSize() > max_headers_kb_ * 1024 || + stream->headers().size() > max_headers_count_) { + // This will cause the library to reset/close the stream. + stats_.header_overflow_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } else { + return 0; + } +} + +void ConnectionImpl::sendPendingFrames() { + if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { + return; + } + + const int rc = nghttp2_session_send(session_); + if (rc != 0) { + ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); + // For errors caused by the pending outbound frame flood the FrameFloodException has + // to be thrown. However the nghttp2 library returns only the generic error code for + // all failure types. Check queue limits and throw FrameFloodException if they were + // exceeded. + if (outbound_frames_ > max_outbound_frames_ || + outbound_control_frames_ > max_outbound_control_frames_) { + throw FrameFloodException("Too many frames in the outbound queue."); + } + + throw CodecProtocolException(std::string(nghttp2_strerror(rc))); + } + + // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, + // so iterating through every stream to find the ones that have a deferred reset is not a big + // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback. + // This is only done when the reset frame is sent. Thus, it's safe to work directly with the + // stream map. + // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a + // deferred reset, we try to finish the stream, including writing any pending data frames. + // If we cannot do this (potentially due to not enough window), we just reset the stream. + // In general this behavior occurs only when we are trying to send immediate error messages + // to short circuit requests. In the best effort case, we complete the stream before + // resetting. In other cases, we just do the reset now which will blow away pending data + // frames and release any memory associated with the stream. + if (pending_deferred_reset_) { + pending_deferred_reset_ = false; + for (auto& stream : active_streams_) { + if (stream->deferred_reset_) { + stream->resetStreamWorker(stream->deferred_reset_.value()); + } + } + sendPendingFrames(); + } +} + +void ConnectionImpl::sendSettings( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) { + absl::InlinedVector settings; + auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool { + const auto it = std::find_if(settings.cbegin(), settings.cend(), + [&entry](const nghttp2_settings_entry& existing) { + return entry.settings_id == existing.settings_id; + }); + if (it != settings.end()) { + return false; + } + settings.push_back(entry); + return true; + }; + + // Universally disable receiving push promise frames as we don't currently support + // them. nghttp2 will fail the connection if the other side still sends them. + // TODO(mattklein123): Remove this when we correctly proxy push promise. + // NOTE: This is a special case with respect to custom parameter overrides in that server push is + // not supported and therefore not end user configurable. + if (disable_push) { + settings.push_back( + {static_cast(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U}); + } + + for (const auto& it : http2_options.custom_settings_parameters()) { + ASSERT(it.identifier().value() <= std::numeric_limits::max()); + const bool result = + insertParameter({static_cast(it.identifier().value()), it.value().value()}); + ASSERT(result); + ENVOY_CONN_LOG(debug, "adding custom settings parameter with id {:#x} to {}", connection_, + it.identifier().value(), it.value().value()); + } + + // Insert named parameters. + settings.insert( + settings.end(), + {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()}, + {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()}, + {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, + {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); + if (!settings.empty()) { + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); + ASSERT(rc == 0); + } else { + // nghttp2_submit_settings need to be called at least once + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0); + ASSERT(rc == 0); + } + + const uint32_t initial_connection_window_size = + http2_options.initial_connection_window_size().value(); + // Increase connection window size up to our default size. + if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) { + ENVOY_CONN_LOG(debug, "updating connection-level initial window size to {}", connection_, + initial_connection_window_size); + int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0, + initial_connection_window_size - + NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE); + ASSERT(rc == 0); + } +} + +ConnectionImpl::Http2Callbacks::Http2Callbacks() { + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + return static_cast(user_data)->onSend(data, length); + }); + + nghttp2_session_callbacks_set_send_data_callback( + callbacks_, + [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, + nghttp2_data_source* source, void*) -> int { + ASSERT(frame->data.padlen == 0); + return static_cast(source->ptr)->onDataSourceSend(framehd, length); + }); + + nghttp2_session_callbacks_set_on_begin_headers_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeginHeaders(frame); + }); + + nghttp2_session_callbacks_set_on_header_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length, + const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int { + // TODO PERF: Can reference count here to avoid copies. + HeaderString name; + name.setCopy(reinterpret_cast(raw_name), name_length); + HeaderString value; + value.setCopy(reinterpret_cast(raw_value), value_length); + return static_cast(user_data)->onHeader(frame, std::move(name), + std::move(value)); + }); + + nghttp2_session_callbacks_set_on_data_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len, + void* user_data) -> int { + return static_cast(user_data)->onData(stream_id, data, len); + }); + + nghttp2_session_callbacks_set_on_begin_frame_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameReceived(hd); + }); + + nghttp2_session_callbacks_set_on_frame_recv_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameReceived(frame); + }); + + nghttp2_session_callbacks_set_on_stream_close_callback( + callbacks_, + [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int { + return static_cast(user_data)->onStreamClose(stream_id, error_code); + }); + + nghttp2_session_callbacks_set_on_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameSend(frame); + }); + + nghttp2_session_callbacks_set_before_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameSend(frame); + }); + + nghttp2_session_callbacks_set_on_frame_not_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int { + // We used to always return failure here but it looks now this can get called if the other + // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now. + return 0; + }); + + nghttp2_session_callbacks_set_on_invalid_frame_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int { + return static_cast(user_data)->onInvalidFrame(frame->hd.stream_id, + error_code); + }); + + nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len, + void* user_data) -> int { + ASSERT(hd->length >= len); + return static_cast(user_data)->onMetadataReceived(hd->stream_id, data, + len); + }); + + nghttp2_session_callbacks_set_unpack_extension_callback( + callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onMetadataFrameComplete( + hd->stream_id, hd->flags == END_METADATA_FLAG); + }); + + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame, + void* user_data) -> ssize_t { + ASSERT(frame->hd.length <= len); + return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); + }); + + nghttp2_session_callbacks_set_error_callback2( + callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { + return static_cast(user_data)->onError(absl::string_view(msg, len)); + }); +} + +ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } + +ConnectionImpl::Http2Options::Http2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) { + nghttp2_option_new(&options_); + // Currently we do not do anything with stream priority. Setting the following option prevents + // nghttp2 from keeping around closed streams for use during stream priority dependency graph + // calculations. This saves a tremendous amount of memory in cases where there are a large + // number of kept alive HTTP/2 connections. + nghttp2_option_set_no_closed_streams(options_, 1); + nghttp2_option_set_no_auto_window_update(options_, 1); + + // The max send header block length is configured to an arbitrarily high number so as to never + // trigger the check within nghttp2, as we check request headers length in + // codec_impl::saveHeader. + nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); + + if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { + nghttp2_option_set_max_deflate_dynamic_table_size(options_, + http2_options.hpack_table_size().value()); + } + + if (http2_options.allow_metadata()) { + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + } + + // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames. + // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it + // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same + // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely + // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec + // behavior. + nghttp2_option_set_max_outbound_ack(options_, 10000); +} + +ConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); } + +ConnectionImpl::ClientHttp2Options::ClientHttp2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) + : Http2Options(http2_options) { + // Temporarily disable initial max streams limit/protection, since we might want to create + // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server. + // + // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented. + nghttp2_option_set_peer_max_concurrent_streams( + options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); +} + +ClientConnectionImpl::ClientConnectionImpl( + Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, + Nghttp2SessionFactory& http2_session_factory) + : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, + max_response_headers_count), + callbacks_(callbacks) { + ClientHttp2Options client_http2_options(http2_options); + session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(), + client_http2_options.options()); + http2_session_factory.init(session_, base(), http2_options); + allow_metadata_ = http2_options.allow_metadata(); +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { + ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder)); + // If the connection is currently above the high watermark, make sure to inform the new stream. + // The connection can not pass this on automatically as it has no awareness that a new stream is + // created. + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + ClientStreamImpl& stream_ref = *stream; + stream->moveIntoList(std::move(stream), active_streams_); + return stream_ref; +} + +int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // The client code explicitly does not currently support push promise. + RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); + RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || + frame->headers.cat == NGHTTP2_HCAT_HEADERS, + ""); + if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + } + + return 0; +} + +int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // The client code explicitly does not currently support push promise. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, + max_request_headers_count), + callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { + Http2Options h2_options(http2_options); + + nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), + h2_options.options()); + sendSettings(http2_options, false); + allow_metadata_ = http2_options.allow_metadata(); +} + +int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + + if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + + if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { + stats_.trailers_.inc(); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS); + + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + return 0; + } + + ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + stream->request_decoder_ = &callbacks_.newStream(*stream); + stream->stream_id_ = frame->hd.stream_id; + stream->moveIntoList(std::move(stream), active_streams_); + nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, + active_streams_.front().get()); + return 0; +} + +int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { + ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", + connection_, static_cast(hd->type), static_cast(hd->flags), + static_cast(hd->length), padding_length); + switch (hd->type) { + case NGHTTP2_HEADERS: + case NGHTTP2_CONTINUATION: + // Track new streams. + if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { + inbound_streams_++; + } + FALLTHRU; + case NGHTTP2_DATA: + // Track frames with an empty payload and no end stream flag. + if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { + ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); + consecutive_inbound_frames_with_empty_payload_++; + } else { + consecutive_inbound_frames_with_empty_payload_ = 0; + } + break; + case NGHTTP2_PRIORITY: + inbound_priority_frames_++; + break; + case NGHTTP2_WINDOW_UPDATE: + inbound_window_update_frames_++; + break; + default: + break; + } + + if (!checkInboundFrameLimits()) { + // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate + // all the way to nghttp2_session_mem_recv() where we need it. + flood_detected_ = true; + return false; + } + + return true; +} + +bool ServerConnectionImpl::checkInboundFrameLimits() { + ASSERT(dispatching_downstream_data_); + + if (consecutive_inbound_frames_with_empty_payload_ > + max_consecutive_inbound_frames_with_empty_payload_) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many consecutive frames with an empty payload " + "received in this HTTP/2 session.", + connection_); + stats_.inbound_empty_frames_flood_.inc(); + return false; + } + + if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", + connection_); + stats_.inbound_priority_frames_flood_.inc(); + return false; + } + + if (inbound_window_update_frames_ > + 1 + 2 * (inbound_streams_ + + max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { + ENVOY_CONN_LOG( + trace, + "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", + connection_); + stats_.inbound_window_update_frames_flood_.inc(); + return false; + } + + return true; +} + +void ServerConnectionImpl::checkOutboundQueueLimits() { + if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { + stats_.outbound_flood_.inc(); + throw FrameFloodException("Too many frames in the outbound queue."); + } + if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { + stats_.outbound_control_flood_.inc(); + throw FrameFloodException("Too many control frames in the outbound queue."); + } +} + +Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { + ASSERT(!dispatching_downstream_data_); + dispatching_downstream_data_ = true; + + // Make sure the dispatching_downstream_data_ is set to false even + // when ConnectionImpl::dispatch throws an exception. + Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); + + // Make sure downstream outbound queue was not flooded by the upstream frames. + checkOutboundQueueLimits(); + + return ConnectionImpl::innerDispatch(data); +} + +absl::optional +ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + header_name); + stats_.dropped_headers_with_underscores_.inc(); + return 0; + } + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, + header_name); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return absl::nullopt; +} + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h new file mode 100644 index 000000000000..ebb40b18d8a7 --- /dev/null +++ b/source/common/http/http2/codec_impl_legacy.h @@ -0,0 +1,602 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/buffer_impl.h" +#include "common/buffer/watermark_buffer.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/common/thread.h" +#include "common/http/codec_helper.h" +#include "common/http/header_map_impl.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/http2/metadata_decoder.h" +#include "common/http/http2/metadata_encoder.h" +#include "common/http/status.h" +#include "common/http/utility.h" + +#include "absl/types/optional.h" +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +// This is not the full client magic, but it's the smallest size that should be able to +// differentiate between HTTP/1 and HTTP/2. +const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; + +class Utility { +public: + /** + * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5 + * @param key supplies the incoming header key. + * @param value supplies the incoming header value. + * @param cookies supplies the header string to fill if this is a cookie header that needs to be + * rebuilt. + */ + static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies); +}; + +class ConnectionImpl; + +// Abstract nghttp2_session factory. Used to enable injection of factories for testing. +class Nghttp2SessionFactory { +public: + using ConnectionImplType = ConnectionImpl; + virtual ~Nghttp2SessionFactory() = default; + + // Returns a new nghttp2_session to be used with |connection|. + virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, + ConnectionImplType* connection, + const nghttp2_option* options) PURE; + + // Initializes the |session|. + virtual void init(nghttp2_session* session, ConnectionImplType* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; +}; + +class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { +public: + nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection, + const nghttp2_option* options) override; + + void init(nghttp2_session* session, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) override; + + // Returns a global factory instance. Note that this is possible because no internal state is + // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's + // worker based threading model. + static ProdNghttp2SessionFactory& get() { + static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory(); + return *instance; + } +}; + +/** + * Base class for HTTP/2 client and server codecs. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count); + + ~ConnectionImpl() override; + + // Http::Connection + // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override; + Protocol protocol() override { return Protocol::Http2; } + void shutdownNotice() override; + bool wantsToWrite() override { return nghttp2_session_want_write(session_); } + // Propagate network connection watermark events to each stream on the connection. + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { + for (auto& stream : active_streams_) { + stream->runHighWatermarkCallbacks(); + } + } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { + for (auto& stream : active_streams_) { + stream->runLowWatermarkCallbacks(); + } + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * This needs to be virtual so that ServerConnectionImpl can override. + * TODO(#10878): Remove this when exception removal is complete. + */ + virtual Http::Status innerDispatch(Buffer::Instance& data); + +protected: + friend class ProdNghttp2SessionFactory; + + /** + * Wrapper for static nghttp2 callback dispatchers. + */ + class Http2Callbacks { + public: + Http2Callbacks(); + ~Http2Callbacks(); + + const nghttp2_session_callbacks* callbacks() { return callbacks_; } + + private: + nghttp2_session_callbacks* callbacks_; + }; + + /** + * Wrapper for static nghttp2 session options. + */ + class Http2Options { + public: + Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + ~Http2Options(); + + const nghttp2_option* options() { return options_; } + + protected: + nghttp2_option* options_; + }; + + class ClientHttp2Options : public Http2Options { + public: + ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + }; + + /** + * Base class for client and server side streams. + */ + struct StreamImpl : public virtual StreamEncoder, + public Stream, + public LinkedObject, + public Event::DeferredDeletable, + public StreamCallbackHelper { + + StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit); + ~StreamImpl() override; + // TODO(mattklein123): Optimally this would be done in the destructor but there are currently + // deferred delete lifetime issues that need sorting out if the destructor of the stream is + // going to be able to refer to the parent connection. + void destroy(); + void disarmStreamIdleTimer() { + if (stream_idle_timer_ != nullptr) { + // To ease testing and the destructor assertion. + stream_idle_timer_->disableTimer(); + stream_idle_timer_.reset(); + } + } + + StreamImpl* base() { return this; } + ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); + int onDataSourceSend(const uint8_t* framehd, size_t length); + void resetStreamWorker(StreamResetReason reason); + static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); + void saveHeader(HeaderString&& name, HeaderString&& value); + void encodeHeadersBase(const std::vector& final_headers, bool end_stream); + virtual void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) PURE; + void encodeTrailersBase(const HeaderMap& headers); + void submitTrailers(const HeaderMap& trailers); + void submitMetadata(uint8_t flags); + virtual StreamDecoder& decoder() PURE; + virtual HeaderMap& headers() PURE; + virtual void allocTrailers() PURE; + virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE; + virtual void createPendingFlushTimer() PURE; + void onPendingFlushTimer(); + + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + Stream& getStream() override { return *this; } + void encodeMetadata(const MetadataMapVector& metadata_map_vector) override; + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { + return parent_.connection_.localAddress(); + } + absl::string_view responseDetails() override { return details_; } + void setFlushTimeout(std::chrono::milliseconds timeout) override { + stream_idle_timeout_ = timeout; + } + + // This code assumes that details is a static string, so that we + // can avoid copying it. + void setDetails(absl::string_view details) { + // It is probably a mistake to call setDetails() twice, so + // assert that details_ is empty. + ASSERT(details_.empty()); + + details_ = details; + } + + void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) { + pending_recv_data_.setWatermarks(low_watermark, high_watermark); + pending_send_data_.setWatermarks(low_watermark, high_watermark); + } + + // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream. + void pendingRecvBufferHighWatermark(); + void pendingRecvBufferLowWatermark(); + + // If the send buffer encounters watermark callbacks, propagate this information to the streams. + // The router and connection manager will propagate them on as appropriate. + void pendingSendBufferHighWatermark(); + void pendingSendBufferLowWatermark(); + + // Does any necessary WebSocket/Upgrade conversion, then passes the headers + // to the decoder_. + virtual void decodeHeaders(bool allow_waiting_for_informational_headers) PURE; + virtual void decodeTrailers() PURE; + + // Get MetadataEncoder for this stream. + Http::Http2::MetadataEncoder& getMetadataEncoder(); + // Get MetadataDecoder for this stream. + Http::Http2::MetadataDecoder& getMetadataDecoder(); + // Callback function for MetadataDecoder. + void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); + + bool buffersOverrun() const { return read_disable_count_ > 0; } + + ConnectionImpl& parent_; + int32_t stream_id_{-1}; + uint32_t unconsumed_bytes_{0}; + uint32_t read_disable_count_{0}; + Buffer::WatermarkBuffer pending_recv_data_{ + [this]() -> void { this->pendingRecvBufferLowWatermark(); }, + [this]() -> void { this->pendingRecvBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + Buffer::WatermarkBuffer pending_send_data_{ + [this]() -> void { this->pendingSendBufferLowWatermark(); }, + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + HeaderMapPtr pending_trailers_to_encode_; + std::unique_ptr metadata_decoder_; + std::unique_ptr metadata_encoder_; + absl::optional deferred_reset_; + HeaderString cookies_; + bool local_end_stream_sent_ : 1; + bool remote_end_stream_ : 1; + bool data_deferred_ : 1; + bool waiting_for_non_informational_headers_ : 1; + bool pending_receive_buffer_high_watermark_called_ : 1; + bool pending_send_buffer_high_watermark_called_ : 1; + bool reset_due_to_messaging_error_ : 1; + absl::string_view details_; + // See HttpConnectionManager.stream_idle_timeout. + std::chrono::milliseconds stream_idle_timeout_{}; + Event::TimerPtr stream_idle_timer_; + }; + + using StreamImplPtr = std::unique_ptr; + + /** + * Client side stream (request). + */ + struct ClientStreamImpl : public StreamImpl, public RequestEncoder { + ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit, + ResponseDecoder& response_decoder) + : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder), + headers_or_trailers_(ResponseHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return response_decoder_; } + void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + // If we are waiting for informational headers, make a new response header map, otherwise + // we are about to receive trailers. The codec makes sure this is the only valid sequence. + if (waiting_for_non_informational_headers_) { + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } else { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override { + // Client streams do not create a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + } + + // RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + ResponseDecoder& response_decoder_; + absl::variant headers_or_trailers_; + std::string upgrade_type_; + }; + + using ClientStreamImplPtr = std::unique_ptr; + + /** + * Server side stream (response). + */ + struct ServerStreamImpl : public StreamImpl, public ResponseEncoder { + ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return *request_decoder_; } + void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override; + + // ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + RequestDecoder* request_decoder_{}; + absl::variant headers_or_trailers_; + }; + + using ServerStreamImplPtr = std::unique_ptr; + + ConnectionImpl* base() { return this; } + // NOTE: Always use non debug nullptr checks against the return value of this function. There are + // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id + // that is not associated with an existing stream. + StreamImpl* getStream(int32_t stream_id); + int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value); + void sendPendingFrames(); + void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + bool disable_push); + // Callback triggered when the peer's SETTINGS frame is received. + // NOTE: This is only used for tests. + virtual void onSettingsForTest(const nghttp2_settings&) {} + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual absl::optional checkHeaderNameForUnderscores(absl::string_view /* header_name */) { + return absl::nullopt; + } + + static Http2Callbacks http2_callbacks_; + + std::list active_streams_; + nghttp2_session* session_{}; + Http::Http2::CodecStats& stats_; + Network::Connection& connection_; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; + uint32_t per_stream_buffer_limit_; + bool allow_metadata_; + const bool stream_error_on_invalid_http_messaging_; + bool flood_detected_; + + // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or + // RST_STREAM. + bool is_outbound_flood_monitored_control_frame_ = 0; + // This counter keeps track of the number of outbound frames of all types (these that were + // buffered in the underlying connection but not yet written into the socket). If this counter + // exceeds the `max_outbound_frames_' value the connection is terminated. + uint32_t outbound_frames_ = 0; + // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options. + // Default value is 10000. + const uint32_t max_outbound_frames_; + const std::function frame_buffer_releasor_; + // This counter keeps track of the number of outbound frames of types PING, SETTINGS and + // RST_STREAM (these that were buffered in the underlying connection but not yet written into the + // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is + // terminated. + uint32_t outbound_control_frames_ = 0; + // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from + // corresponding http2_protocol_options. Default value is 1000. + const uint32_t max_outbound_control_frames_; + const std::function control_frame_buffer_releasor_; + // This counter keeps track of the number of consecutive inbound frames of types HEADERS, + // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds + // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. + uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; + // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without + // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. + const uint32_t max_consecutive_inbound_frames_with_empty_payload_; + + // This counter keeps track of the number of inbound streams. + uint32_t inbound_streams_ = 0; + // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds + // the value calculated using this formula: + // + // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) + // + // the connection is terminated. + uint64_t inbound_priority_frames_ = 0; + // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding + // http2_protocol_options. Default value is 100. + const uint32_t max_inbound_priority_frames_per_stream_; + + // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds + // the value calculated using this formula: + // + // 1 + 2 * (inbound_streams_ + + // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) + // + // the connection is terminated. + uint64_t inbound_window_update_frames_ = 0; + // This counter keeps track of the number of outbound DATA frames. + uint64_t outbound_data_frames_ = 0; + // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized + // from corresponding http2_protocol_options. Default value is 10. + const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; + + // For the flood mitigation to work the onSend callback must be called once for each outbound + // frame. This is what the nghttp2 library is doing, however this is not documented. The + // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if + // this changes in the future. Also it is important that onSend does not do partial writes, as the + // nghttp2 library will keep calling this callback to write the rest of the frame. + ssize_t onSend(const uint8_t* data, size_t length); + +private: + virtual ConnectionCallbacks& callbacks() PURE; + virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; + int onData(int32_t stream_id, const uint8_t* data, size_t len); + int onBeforeFrameReceived(const nghttp2_frame_hd* hd); + int onFrameReceived(const nghttp2_frame* frame); + int onBeforeFrameSend(const nghttp2_frame* frame); + int onFrameSend(const nghttp2_frame* frame); + int onError(absl::string_view error); + virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE; + int onInvalidFrame(int32_t stream_id, int error_code); + int onStreamClose(int32_t stream_id, uint32_t error_code); + int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len); + int onMetadataFrameComplete(int32_t stream_id, bool end_metadata); + ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len); + // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl. + // Returns true on success or false if outbound queue limits were exceeded. + bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); + virtual void checkOutboundQueueLimits() PURE; + void incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); + virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; + virtual bool checkInboundFrameLimits() PURE; + void releaseOutboundFrame(); + void releaseOutboundControlFrame(); + + bool dispatching_ : 1; + bool raised_goaway_ : 1; + bool pending_deferred_reset_ : 1; +}; + +/** + * HTTP/2 client connection codec. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + using SessionFactory = Nghttp2SessionFactory; + ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, + const uint32_t max_response_headers_count, + SessionFactory& http2_session_factory); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + + // Presently client connections only perform accounting of outbound frames and do not + // terminate connections when queue limits are exceeded. The primary reason is the complexity of + // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM + // messages to be sent on corresponding downstream connections. This may actually trigger flood + // mitigation on the downstream connections, which causes an exception to be thrown in the middle + // of the clean-up loop, leaving resources in a half cleaned up state. + // TODO(yanavlasov): add flood mitigation for upstream connections as well. + void checkOutboundQueueLimits() override {} + bool trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return true; } + bool checkInboundFrameLimits() override { return true; } + + Http::ConnectionCallbacks& callbacks_; +}; + +/** + * HTTP/2 server connection codec. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + void checkOutboundQueueLimits() override; + bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; + bool checkInboundFrameLimits() override; + absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; + + // Http::Connection + // The reason for overriding the dispatch method is to do flood mitigation only when + // processing data from downstream client. Doing flood mitigation when processing upstream + // responses makes clean-up tricky, which needs to be improved (see comments for the + // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the + // ServerConnectionImpl objects is called only when processing data from the downstream client in + // the ConnectionManagerImpl::onData method. + Http::Status dispatch(Buffer::Instance& data) override; + Http::Status innerDispatch(Buffer::Instance& data) override; + + ServerConnectionCallbacks& callbacks_; + + // This flag indicates that downstream data is being dispatched and turns on flood mitigation + // in the checkMaxOutbound*Framed methods. + bool dispatching_downstream_data_{false}; + + // The action to take when a request header name contains underscore characters. + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index ba4817e19a53..35b990cd39b7 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -70,6 +70,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index ec576a8abac9..7ab7817d80fd 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -35,7 +35,9 @@ envoy_cc_extension( "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/json:json_loader_lib", "//source/common/local_reply:local_reply_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 190db7f475b4..e1d374526c58 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -20,7 +20,9 @@ #include "common/http/conn_manager_utility.h" #include "common/http/default_server_string.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/request_id_extension_impl.h" @@ -480,16 +482,33 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: - return std::make_unique( - connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), - callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), - headersWithUnderscoresAction()); + case CodecType::HTTP1: { + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } + } case CodecType::HTTP2: { - return std::make_unique( - connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 16e4eee2c960..492d2889aa8f 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -462,32 +462,34 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + Http1::CodecStats::AtomicPtr http1_stats; + Http2::CodecStats::AtomicPtr http2_stats; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; - Http1::CodecStats::AtomicPtr stats; if (http2) { - client = std::make_unique( - client_connection, client_callbacks, stats_store, client_http2_options, - max_request_headers_kb, max_response_headers_count, + client = std::make_unique( + client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + client_http2_options, max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { client = std::make_unique( - client_connection, Http1::CodecStats::atomicGet(stats, stats_store), client_callbacks, + client_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), client_callbacks, client_http1settings, max_response_headers_count); } if (http2) { const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{ fromHttp2Settings(input.h2_settings().server())}; - server = std::make_unique( - server_connection, server_callbacks, stats_store, server_http2_options, - max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); + server = std::make_unique( + server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + server_http2_options, max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( - server_connection, Http1::CodecStats::atomicGet(stats, stats_store), server_callbacks, + server_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), server_callbacks, server_http1settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } @@ -643,8 +645,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } } if (!codec_error && http2) { - dynamic_cast(*client).goAway(); - dynamic_cast(*server).goAway(); + dynamic_cast(*client).goAway(); + dynamic_cast(*server).goAway(); } } diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 715e6dbf0c23..dbcdcd4d4c8b 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -26,6 +26,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/buffer:buffer_mocks", diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index fade286cfb44..77565550dc26 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -9,6 +9,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -33,7 +34,6 @@ using testing::StrictMock; namespace Envoy { namespace Http { -namespace Http1 { namespace { std::string createHeaderFragment(int num_headers) { // Create a header field with num_headers headers. @@ -55,7 +55,7 @@ Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t ma } } // namespace -class Http1CodecTestBase : public testing::Test { +class Http1CodecTestBase { protected: Http::Http1::CodecStats& http1CodecStats() { return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_); @@ -65,12 +65,19 @@ class Http1CodecTestBase : public testing::Test { Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; }; -class Http1ServerConnectionImplTest : public Http1CodecTestBase { +class Http1ServerConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: void initialize() { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, headers_with_underscores_action_); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } } NiceMock connection_; @@ -128,9 +135,15 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; @@ -158,9 +171,15 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; @@ -179,9 +198,15 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } InSequence sequence; @@ -215,9 +240,15 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } std::string exception_reason; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -295,7 +326,12 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ServerConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { initialize(); InSequence sequence; @@ -319,7 +355,7 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { // We support the identity encoding, but because it does not end in chunked encoding we reject it // per RFC 7230 Section 3.3.3 -TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { +TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { initialize(); InSequence sequence; @@ -334,7 +370,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { +TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { initialize(); InSequence sequence; @@ -350,7 +386,7 @@ TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { } // Verify that data in the two body chunks is merged before the call to decodeData. -TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; @@ -381,7 +417,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { // Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a // second dispatch. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { initialize(); InSequence sequence; @@ -419,7 +455,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { // Verify that headers and chunked body are processed correctly and data is merged before the // decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { initialize(); InSequence sequence; @@ -448,7 +484,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) { initialize(); InSequence sequence; @@ -475,7 +511,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { // Verify that body dispatch does not happen after detecting a parse error processing a chunk // header. -TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { +TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { initialize(); InSequence sequence; @@ -501,7 +537,7 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); } -TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { initialize(); InSequence sequence; @@ -518,7 +554,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { +TEST_P(Http1ServerConnectionImplTest, HostWithLWS) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -539,7 +575,7 @@ TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { // Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the // beginning and end of a header value should be stripped. Whitespace in the middle should be // preserved. -TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { +TEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { initialize(); // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is @@ -572,7 +608,7 @@ TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { } } -TEST_F(Http1ServerConnectionImplTest, Http10) { +TEST_P(Http1ServerConnectionImplTest, Http10) { initialize(); InSequence sequence; @@ -590,7 +626,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { EXPECT_EQ(Protocol::Http10, codec_->protocol()); } -TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; @@ -598,7 +634,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { +TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -607,7 +643,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { +TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { initialize(); MockRequestDecoder decoder; @@ -653,7 +689,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { } } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -662,7 +698,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -671,7 +707,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -681,7 +717,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -690,7 +726,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -698,7 +734,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { expect400(Protocol::Http11, true, buffer, "http1.codec_error"); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { initialize(); MockRequestDecoder decoder; @@ -723,7 +759,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -732,21 +768,21 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer, "http1.invalid_url"); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { initialize(); Buffer::OwnedImpl buffer("GET http://foobar.com:1000000 HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { +TEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) { initialize(); Buffer::OwnedImpl buffer( @@ -754,7 +790,7 @@ TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { expect400(Protocol::Http11, true, buffer, "http1.connection_header_rejected"); } -TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { +TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -763,7 +799,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11Options) { +TEST_P(Http1ServerConnectionImplTest, Http11Options) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -772,7 +808,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11Options) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, SimpleGet) { +TEST_P(Http1ServerConnectionImplTest, SimpleGet) { initialize(); InSequence sequence; @@ -789,7 +825,7 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.early_errors_via_hcm", "false"}}); @@ -809,7 +845,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { // Test that if the stream is not created at the time an error is detected, it // is created as part of sending the protocol error. -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { initialize(); MockRequestDecoder decoder; @@ -828,7 +864,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { } // Make sure that if the first line is parsed, that sendLocalReply tracks HEAD requests correctly. -TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { +TEST_P(Http1ServerConnectionImplTest, BadHeadRequest) { initialize(); MockRequestDecoder decoder; @@ -848,7 +884,7 @@ TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { } // Make sure that if gRPC headers are parsed, they are tracked by sendLocalReply. -TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { +TEST_P(Http1ServerConnectionImplTest, BadGrpcRequest) { initialize(); MockRequestDecoder decoder; @@ -869,7 +905,7 @@ TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { // This behavior was observed during CVE-2019-18801 and helped to limit the // scope of affected Envoy configurations. -TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { +TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { initialize(); MockRequestDecoder decoder; @@ -881,7 +917,7 @@ TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); MockRequestDecoder decoder; @@ -897,7 +933,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, FloodProtection) { +TEST_P(Http1ServerConnectionImplTest, FloodProtection) { initialize(); NiceMock decoder; @@ -948,7 +984,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { } } -TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { +TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.http1_flood_protection", "false"}}); @@ -984,7 +1020,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { } } -TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { +TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); InSequence sequence; @@ -1003,7 +1039,8 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { } // Ensures that requests with invalid HTTP header values are properly rejected -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { +// when the runtime guard is enabled for the feature. +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. @@ -1028,7 +1065,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { // Ensures that request headers with names containing the underscore character are allowed // when the option is set to allow. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; initialize(); @@ -1052,7 +1089,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { // Ensures that request headers with names containing the underscore character are dropped // when the option is set to drop headers. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; initialize(); @@ -1075,7 +1112,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { // Ensures that request with header names containing the underscore character are rejected // when the option is set to reject request. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; initialize(); @@ -1096,7 +1133,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { TestScopedRuntime scoped_runtime; initialize(); @@ -1119,7 +1156,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (size_t n = 1; n < example_input.size(); ++n) { @@ -1143,7 +1180,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an error status or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -1163,7 +1200,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { } } -TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -1185,7 +1222,7 @@ TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_NE(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { initialize(); InSequence sequence; @@ -1211,7 +1248,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { // Verify that headers and body with content length are processed correctly and data is merged // before the decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { initialize(); InSequence sequence; @@ -1236,7 +1273,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); NiceMock decoder; @@ -1262,7 +1299,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { // As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate // the response encoder instead of request encoder. -TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { +TEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { initialize(); NiceMock decoder; @@ -1288,7 +1325,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1315,7 +1352,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { initialize(); NiceMock decoder; @@ -1339,7 +1376,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { EXPECT_EQ("HTTP/1.1 204 No Content\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { initialize(); NiceMock decoder; @@ -1370,7 +1407,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, MetadataTest) { +TEST_P(Http1ServerConnectionImplTest, MetadataTest) { initialize(); NiceMock decoder; @@ -1393,7 +1430,7 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { EXPECT_EQ(1, store_.counter("http1.metadata_not_supported_error").value()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { initialize(); NiceMock decoder; @@ -1429,7 +1466,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { output); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { codec_settings_.enable_trailers_ = true; initialize(); NiceMock decoder; @@ -1462,7 +1499,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { output); } -TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { +TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { initialize(); NiceMock decoder; @@ -1489,7 +1526,7 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 11\r\n\r\nHello World", output); } -TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { initialize(); NiceMock decoder; @@ -1513,7 +1550,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 5\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { initialize(); NiceMock decoder; @@ -1537,7 +1574,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -1563,11 +1600,11 @@ TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -1578,7 +1615,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1591,7 +1628,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1604,7 +1641,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { initialize(); InSequence sequence; @@ -1628,7 +1665,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1644,7 +1681,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1662,7 +1699,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1681,7 +1718,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { } // Test that 101 upgrade responses do not contain content-length or transfer-encoding headers. -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { initialize(); NiceMock decoder; @@ -1705,7 +1742,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { EXPECT_EQ("HTTP/1.1 101 Switching Protocols\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); InSequence sequence; @@ -1729,7 +1766,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { // We use the absolute URL parsing code for CONNECT requests, but it does not // actually allow absolute URLs. -TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { initialize(); InSequence sequence; @@ -1742,7 +1779,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1757,7 +1794,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1774,7 +1811,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { initialize(); InSequence sequence; @@ -1790,7 +1827,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { initialize(); InSequence sequence; @@ -1807,7 +1844,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { +TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -1841,24 +1878,43 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public Http1CodecTestBase { +class Http1ClientConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: void initialize() { - codec_ = std::make_unique(connection_, http1CodecStats(), callbacks_, - codec_settings_, max_response_headers_count_); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } + } + + void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) { + if (GetParam()) { + dynamic_cast(request_encoder)->readDisable(disable); + } else { + dynamic_cast(request_encoder)->readDisable(disable); + } } NiceMock connection_; NiceMock callbacks_; NiceMock codec_settings_; - std::unique_ptr codec_; + Http::ClientConnectionPtr codec_; protected: Stats::TestUtil::TestStore store_; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; -TEST_F(Http1ClientConnectionImplTest, SimpleGet) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ClientConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; @@ -1872,7 +1928,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGet) { EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { +TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1888,7 +1944,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { +TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); MockResponseDecoder response_decoder; @@ -1902,7 +1958,7 @@ TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, Reset) { +TEST_P(Http1ClientConnectionImplTest, Reset) { initialize(); MockResponseDecoder response_decoder; @@ -1916,16 +1972,15 @@ TEST_F(Http1ClientConnectionImplTest, Reset) { // Verify that we correctly enable reads on the connection when the final response is // received. -TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { +TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); MockResponseDecoder response_decoder; - Http::RequestEncoder* request_encoder = &codec_->newStream(response_decoder); + auto* request_encoder = &codec_->newStream(response_decoder); // Manually read disable. EXPECT_CALL(connection_, readDisable(true)).Times(2); - RequestEncoderImpl* encoder = dynamic_cast(request_encoder); - encoder->readDisable(true); - encoder->readDisable(true); + readDisableOnRequestEncoder(request_encoder, true); + readDisableOnRequestEncoder(request_encoder, true); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -1946,7 +2001,7 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); @@ -1954,7 +2009,7 @@ TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { initialize(); NiceMock response_decoder; @@ -1968,7 +2023,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { initialize(); NiceMock response_decoder; @@ -1982,7 +2037,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, HeadRequest) { +TEST_P(Http1ClientConnectionImplTest, HeadRequest) { initialize(); NiceMock response_decoder; @@ -1996,7 +2051,7 @@ TEST_F(Http1ClientConnectionImplTest, HeadRequest) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, 204Response) { +TEST_P(Http1ClientConnectionImplTest, 204Response) { initialize(); NiceMock response_decoder; @@ -2011,7 +2066,7 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { } // 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2. -TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // By default, content-length is barred. { initialize(); @@ -2047,7 +2102,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we // allow it. -TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { { initialize(); @@ -2081,7 +2136,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { } // 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2115,7 +2170,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { } } -TEST_F(Http1ClientConnectionImplTest, 100Response) { +TEST_P(Http1ClientConnectionImplTest, 100Response) { initialize(); NiceMock response_decoder; @@ -2136,7 +2191,7 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { } // 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2172,7 +2227,7 @@ TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { } } -TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { +TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { initialize(); NiceMock response_decoder; @@ -2185,7 +2240,7 @@ TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { CodecClientException); } -TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { +TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { initialize(); NiceMock response_decoder; @@ -2207,7 +2262,7 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -2222,7 +2277,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, GiantPath) { +TEST_P(Http1ClientConnectionImplTest, GiantPath) { initialize(); NiceMock response_decoder; @@ -2237,7 +2292,7 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { initialize(); // make sure upgradeAllowed doesn't cause crashes if run with no pending response. @@ -2247,7 +2302,7 @@ TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; @@ -2283,7 +2338,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; @@ -2307,7 +2362,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponse) { initialize(); InSequence s; @@ -2338,7 +2393,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { initialize(); InSequence s; @@ -2357,7 +2412,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { +TEST_P(Http1ClientConnectionImplTest, ConnectRejected) { initialize(); InSequence s; @@ -2375,7 +2430,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { +TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -2410,7 +2465,7 @@ TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { // caller attempts to close the connection. This causes the network connection to attempt to write // pending data, even in the no flush scenario, which can cause us to go below low watermark // which then raises callbacks for a stream that no longer exists. -TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { +TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { initialize(); InSequence s; @@ -2444,7 +2499,7 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { // Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly // handle going below low watermark when closing the connection during a completion callback. -TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { +TEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { initialize(); InSequence s; @@ -2474,43 +2529,43 @@ TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, true); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { // Construct partial headers with a long field name that exceeds the default limit of 60KiB. std::string long_string = "bigfield" + std::string(60 * 1024, 'q'); testTrailersExceedLimit(long_string, true); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersRejected) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", true); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "bigfield" + std::string(60 * 1024, 'q') + ": value\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersIgnored) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", false); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { initialize(); std::string exception_reason; @@ -2532,19 +2587,19 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; testRequestHeadersExceedLimit(long_string, ""); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { // Send a request with 101 headers. testRequestHeadersExceedLimit(createHeaderFragment(101), "http1.too_many_headers"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Default limit of 60 KiB initialize(); @@ -2575,7 +2630,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Tests that the 101th request header causes overflow with the default max number of request // headers. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // Default limit of 100. initialize(); @@ -2602,27 +2657,27 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { EXPECT_EQ(status.message(), "headers size exceeds limit"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { max_request_headers_kb_ = 65; std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { max_request_headers_kb_ = 96; std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } // Tests that the number of request headers is configurable. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { max_request_headers_count_ = 150; // Create a request with 150 headers. testRequestHeadersAccepted(createHeaderFragment(150)); } // Tests that incomplete response headers of 80 kB header value fails. -TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { initialize(); NiceMock response_decoder; @@ -2641,7 +2696,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { } // Tests that incomplete response headers with a 80 kB header field fails. -TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { initialize(); NiceMock decoder; @@ -2661,7 +2716,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { } // Tests that the size of response headers for HTTP/1 must be under 80 kB. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { initialize(); NiceMock response_decoder; @@ -2679,7 +2734,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { // Regression test for CVE-2019-18801. Large method headers should not trigger // ASSERTs or ASAN, which they previously did. -TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { initialize(); NiceMock response_decoder; @@ -2697,7 +2752,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { // in CVE-2019-18801, but the related code does explicit size calculations on // both path and method (these are the two distinguished headers). So, // belt-and-braces. -TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) { initialize(); NiceMock response_decoder; @@ -2713,7 +2768,7 @@ TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { // As with LargeMethodEncode, but for an arbitrary header. This was not an issue // in CVE-2019-18801. -TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { initialize(); NiceMock response_decoder; @@ -2730,7 +2785,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { } // Exception called when the number of response headers exceeds the default value of 100. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -2748,7 +2803,7 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { } // Tests that the number of response headers is configurable. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { max_response_headers_count_ = 152; initialize(); @@ -2765,6 +2820,5 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { status = codec_->dispatch(buffer); } -} // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 82627e08201d..9d64120d5dd7 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -10,35 +10,50 @@ licenses(["notice"]) # Apache 2 envoy_package() +CODEC_TEST_DEPS = [ + ":codec_impl_test_util", + "//source/common/event:dispatcher_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http/http2:codec_legacy_lib", + "//source/common/http/http2:codec_lib", + "//source/common/runtime:runtime_lib", + "//source/common/stats:stats_lib", + "//test/common/http:common_lib", + "//test/common/http/http2:http2_frame", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:transport_socket_match_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:registry_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", +] + envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], shard_count = 5, tags = ["fails_on_windows"], - deps = [ - ":codec_impl_test_util", - "//source/common/event:dispatcher_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http/http2:codec_lib", - "//source/common/stats:stats_lib", - "//test/common/http:common_lib", - "//test/common/http/http2:http2_frame", - "//test/common/stats:stat_test_utility_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/init:init_mocks", - "//test/mocks/local_info:local_info_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/thread_local:thread_local_mocks", - "//test/mocks/upstream:transport_socket_match_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:logging_lib", - "//test/test_common:registry_lib", - "//test/test_common:test_runtime_lib", - "//test/test_common:utility_lib", + deps = CODEC_TEST_DEPS, +) + +envoy_cc_test( + name = "codec_impl_legacy_test", + srcs = ["codec_impl_test.cc"], + args = [ + "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], + shard_count = 5, + tags = ["fails_on_windows"], + deps = CODEC_TEST_DEPS, ) envoy_cc_test_library( @@ -46,6 +61,7 @@ envoy_cc_test_library( hdrs = ["codec_impl_test_util.h"], external_deps = ["abseil_optional"], deps = [ + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", ], ) diff --git a/test/common/http/http2/codec_impl_legacy_test.cc b/test/common/http/http2/codec_impl_legacy_test.cc new file mode 100644 index 000000000000..fef22ab9c492 --- /dev/null +++ b/test/common/http/http2/codec_impl_legacy_test.cc @@ -0,0 +1,2163 @@ +#include +#include + +#include "envoy/http/codec.h" +#include "envoy/stats/scope.h" + +#include "common/http/exception.h" +#include "common/http/header_map_impl.h" +#include "common/http/http2/codec_impl_legacy.h" + +#include "test/common/http/common.h" +#include "test/common/http/http2/http2_frame.h" +#include "test/common/stats/stat_test_utility.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/logging.h" +#include "test/test_common/printers.h" +#include "test/test_common/registry.h" +#include "test/test_common/test_runtime.h" +#include "test/test_common/utility.h" + +#include "codec_impl_legacy_test_util.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AnyNumber; +using testing::AtLeast; +using testing::InSequence; +using testing::Invoke; +using testing::InvokeWithoutArgs; +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +using Http2SettingsTuple = ::testing::tuple; +using Http2SettingsTestParam = ::testing::tuple; +using Http::Http2::Http2Frame; +namespace CommonUtility = ::Envoy::Http2::Utility; + +class Http2CodecImplTestFixture { +public: + // The Http::Connection::dispatch method does not throw (any more). However unit tests in this + // file use codecs for sending test data through mock network connections to the codec under test. + // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under + // test, through mock connections and sending codec. As a result error returned by the dispatch + // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note + // that exception goes only through the mock network connection and sending codec, i.e. it is + // thrown only through the test harness code. Specific exception types are to distinguish error + // codes returned when processing requests or responses. + // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec + // under test through the ON_CALL expectations in the + // setupDefaultConnectionMocks() method. This will make the exceptions below + // unnecessary. + struct ClientCodecError : public std::runtime_error { + ClientCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + + struct ServerCodecError : public std::runtime_error { + ServerCodecError(Http::Status&& status) + : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} + const char* what() const noexcept override { return status_.message().data(); } + const Http::Status status_; + }; + + struct ConnectionWrapper { + Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status status = Http::okStatus(); + buffer_.add(data); + if (!dispatching_) { + while (buffer_.length() > 0) { + dispatching_ = true; + status = connection.dispatch(buffer_); + if (!status.ok()) { + // Exit early if we hit an error status. + return status; + } + dispatching_ = false; + } + } + return status; + } + + bool dispatching_{}; + Buffer::OwnedImpl buffer_; + }; + + enum SettingsTupleIndex { + HpackTableSize = 0, + MaxConcurrentStreams, + InitialStreamWindowSize, + InitialConnectionWindowSize + }; + + Http2CodecImplTestFixture() = default; + Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) + : client_settings_(client_settings), server_settings_(server_settings) { + // Make sure we explicitly test for stream flush timer creation. + EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0); + EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0); + } + virtual ~Http2CodecImplTestFixture() { + client_connection_.dispatcher_.clearDeferredDeleteList(); + if (client_ != nullptr) { + client_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + } + server_connection_.dispatcher_.clearDeferredDeleteList(); + if (server_ != nullptr) { + server_.reset(); + EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(0, + TestUtility::findGauge(server_stats_store_, "http2.pending_send_bytes")->value()); + } + } + + virtual void initialize() { + http2OptionsFromTuple(client_http2_options_, client_settings_); + http2OptionsFromTuple(server_http2_options_, server_settings_); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + + request_encoder_ = &client_->newStream(response_decoder_); + setupDefaultConnectionMocks(); + + EXPECT_CALL(server_callbacks_, newStream(_, _)) + .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder_ = &encoder; + encoder.getStream().addCallbacks(server_stream_callbacks_); + encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000)); + return request_decoder_; + })); + } + + void setupDefaultConnectionMocks() { + ON_CALL(client_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + if (corrupt_metadata_frame_) { + corruptMetadataFramePayload(data); + } + auto status = server_wrapper_.dispatch(data, *server_); + if (!status.ok()) { + throw ServerCodecError(std::move(status)); + } + })); + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + auto status = client_wrapper_.dispatch(data, *client_); + if (!status.ok()) { + throw ClientCodecError(std::move(status)); + } + })); + } + + void http2OptionsFromTuple(envoy::config::core::v3::Http2ProtocolOptions& options, + const absl::optional& tp) { + options.mutable_hpack_table_size()->set_value( + (tp.has_value()) ? ::testing::get(*tp) + : CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE); + options.mutable_max_concurrent_streams()->set_value( + (tp.has_value()) ? ::testing::get(*tp) + : CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); + options.mutable_initial_stream_window_size()->set_value( + (tp.has_value()) ? ::testing::get(*tp) + : CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE); + options.mutable_initial_connection_window_size()->set_value( + (tp.has_value()) ? ::testing::get(*tp) + : CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); + options.set_allow_metadata(allow_metadata_); + options.set_stream_error_on_invalid_http_messaging(stream_error_on_invalid_http_messaging_); + options.mutable_max_outbound_frames()->set_value(max_outbound_frames_); + options.mutable_max_outbound_control_frames()->set_value(max_outbound_control_frames_); + options.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value( + max_consecutive_inbound_frames_with_empty_payload_); + options.mutable_max_inbound_priority_frames_per_stream()->set_value( + max_inbound_priority_frames_per_stream_); + options.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value( + max_inbound_window_update_frames_per_data_frame_sent_); + } + + // corruptMetadataFramePayload assumes data contains at least 10 bytes of the beginning of a + // frame. + void corruptMetadataFramePayload(Buffer::Instance& data) { + const size_t length = data.length(); + const size_t corrupt_start = 10; + if (length < corrupt_start || length > METADATA_MAX_PAYLOAD_SIZE) { + ENVOY_LOG_MISC(error, "data size too big or too small"); + return; + } + corruptAtOffset(data, corrupt_start, 0xff); + } + + void corruptAtOffset(Buffer::Instance& data, size_t index, char new_value) { + if (data.length() == 0) { + return; + } + reinterpret_cast(data.linearize(data.length()))[index % data.length()] = new_value; + } + + void expectDetailsRequest(const absl::string_view details) { + EXPECT_EQ(details, request_encoder_->getStream().responseDetails()); + } + + void expectDetailsResponse(const absl::string_view details) { + EXPECT_EQ(details, response_encoder_->getStream().responseDetails()); + } + + absl::optional client_settings_; + absl::optional server_settings_; + bool allow_metadata_ = false; + bool stream_error_on_invalid_http_messaging_ = false; + Stats::TestUtil::TestStore client_stats_store_; + envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; + NiceMock client_connection_; + MockConnectionCallbacks client_callbacks_; + std::unique_ptr client_; + ConnectionWrapper client_wrapper_; + Stats::TestUtil::TestStore server_stats_store_; + envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; + NiceMock server_connection_; + MockServerConnectionCallbacks server_callbacks_; + std::unique_ptr server_; + ConnectionWrapper server_wrapper_; + MockResponseDecoder response_decoder_; + RequestEncoder* request_encoder_; + MockRequestDecoder request_decoder_; + ResponseEncoder* response_encoder_{}; + MockStreamCallbacks server_stream_callbacks_; + // Corrupt a metadata frame payload. + bool corrupt_metadata_frame_ = false; + + uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; + uint32_t max_request_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; + uint32_t max_response_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; + uint32_t max_outbound_frames_ = CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; + uint32_t max_outbound_control_frames_ = + CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES; + uint32_t max_consecutive_inbound_frames_with_empty_payload_ = + CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD; + uint32_t max_inbound_priority_frames_per_stream_ = + CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; + uint32_t max_inbound_window_update_frames_per_data_frame_sent_ = + CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; +}; + +class Http2CodecImplTest : public ::testing::TestWithParam, + protected Http2CodecImplTestFixture { +public: + Http2CodecImplTest() + : Http2CodecImplTestFixture(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam())) {} + +protected: + void priorityFlood() { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers, "POST"); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + nghttp2_priority_spec spec = {0, 10, 0}; + // HTTP/2 codec adds 1 to the number of active streams when computing PRIORITY frames limit + constexpr uint32_t max_allowed = + 2 * CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; + for (uint32_t i = 0; i < max_allowed + 1; ++i) { + EXPECT_EQ(0, nghttp2_submit_priority(client_->session(), NGHTTP2_FLAG_NONE, 1, &spec)); + } + } + + void windowUpdateFlood() { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + // Send one DATA frame back + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + + // See the limit formula in the + // `Envoy::Http::Http2::ServerConnectionImpl::checkInboundFrameLimits()' method. + constexpr uint32_t max_allowed = + 1 + 2 * (CommonUtility::OptionsLimits:: + DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT + + 1); + for (uint32_t i = 0; i < max_allowed + 1; ++i) { + EXPECT_EQ(0, nghttp2_submit_window_update(client_->session(), NGHTTP2_FLAG_NONE, 1, 1)); + } + } + + void emptyDataFlood(Buffer::OwnedImpl& data) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers, "POST"); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // HTTP/2 codec does not send empty DATA frames with no END_STREAM flag. + // To make this work, send raw bytes representing empty DATA frames bypassing client codec. + Http2Frame emptyDataFrame = Http2Frame::makeEmptyDataFrame(0); + constexpr uint32_t max_allowed = + CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD; + for (uint32_t i = 0; i < max_allowed + 1; ++i) { + data.add(emptyDataFrame.data(), emptyDataFrame.size()); + } + } +}; + +TEST_P(Http2CodecImplTest, ShutdownNotice) { + initialize(); + EXPECT_EQ(absl::nullopt, request_encoder_->http1StreamEncoderOptions()); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + EXPECT_CALL(client_callbacks_, onGoAway(_)); + server_->shutdownNotice(); + server_->goAway(); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); +} + +TEST_P(Http2CodecImplTest, ContinueHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); +}; + +TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +} + +TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { + stream_error_on_invalid_http_messaging_ = true; + initialize(); + + MockStreamCallbacks request_callbacks; + request_encoder_->getStream().addCallbacks(request_callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + // Buffer client data to avoid mock recursion causing lifetime issues. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + response_encoder_->encodeHeaders(continue_headers, true); + + // Flush pending data. + EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); + + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); + expectDetailsRequest("http2.violation.of.messaging.rule"); +} + +TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +}; + +TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { + stream_error_on_invalid_http_messaging_ = true; + initialize(); + + MockStreamCallbacks request_callbacks; + request_encoder_->getStream().addCallbacks(request_callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + // Buffer client data to avoid mock recursion causing lifetime issues. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + + response_encoder_->encodeHeaders(continue_headers, true); + + // Flush pending data. + EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); + + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); + expectDetailsRequest("http2.violation.of.messaging.rule"); +}; + +TEST_P(Http2CodecImplTest, Invalid103) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl early_hint_headers{{":status", "103"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(early_hint_headers, false); + + EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), + ClientCodecError, "Unexpected 'trailers' with no end stream."); + EXPECT_EQ(1, client_stats_store_.counter("http2.too_many_header_frames").value()); +} + +TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "3"}}; + // What follows is a hack to get headers that should span into continuation frames. The default + // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and + // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data + // which should contain a continuation.) + for (unsigned i = 1; i < 3000; i++) { + response_headers.addCopy(std::to_string(i), std::to_string(i)); + } + + EXPECT_LOG_CONTAINS( + "debug", + "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " + "value: [3]", + EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +}; + +TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { + stream_error_on_invalid_http_messaging_ = true; + initialize(); + + MockStreamCallbacks request_callbacks; + request_encoder_->getStream().addCallbacks(request_callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + // Buffer client data to avoid mock recursion causing lifetime issues. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + + TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "3"}}; + // What follows is a hack to get headers that should span into continuation frames. The default + // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and + // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data + // which should contain a continuation.) + for (int i = 1; i < 3000; i++) { + response_headers.addCopy(std::to_string(i), std::to_string(i)); + } + + response_encoder_->encodeHeaders(response_headers, false); + + // Flush pending data. + EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); + + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); + expectDetailsRequest("http2.invalid.header.field"); +}; + +TEST_P(Http2CodecImplTest, RefusedStreamReset) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + MockStreamCallbacks callbacks; + request_encoder_->getStream().addCallbacks(callbacks); + EXPECT_CALL(server_stream_callbacks_, + onResetStream(StreamResetReason::LocalRefusedStreamReset, _)); + EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _)); + response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset); +} + +TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { + initialize(); + + EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError); + EXPECT_EQ(1, server_stats_store_.counter("http2.rx_messaging_error").value()); +} + +TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { + stream_error_on_invalid_http_messaging_ = true; + initialize(); + + MockStreamCallbacks request_callbacks; + request_encoder_->getStream().addCallbacks(request_callbacks); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + + request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); + expectDetailsResponse("http2.violation.of.messaging.rule"); +} + +TEST_P(Http2CodecImplTest, TrailingHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, false); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); +} + +TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { + initialize(); + + // Buffer server data so we can make sure we don't get any window updates. + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + request_encoder_->encodeData(body, false); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); + + // Flush pending data. + setupDefaultConnectionMocks(); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); +} + +TEST_P(Http2CodecImplTest, SmallMetadataVecTest) { + allow_metadata_ = true; + initialize(); + + // Generates a valid stream_id by sending a request header. + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + MetadataMapVector metadata_map_vector; + const int size = 10; + for (int i = 0; i < size; i++) { + MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + {"header_key3", "header_value3"}, + {"header_key4", "header_value4"}, + }; + MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + } + + EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size); + request_encoder_->encodeMetadata(metadata_map_vector); + + EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); + response_encoder_->encodeMetadata(metadata_map_vector); +} + +TEST_P(Http2CodecImplTest, LargeMetadataVecTest) { + allow_metadata_ = true; + initialize(); + + // Generates a valid stream_id by sending a request header. + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + MetadataMapVector metadata_map_vector; + const int size = 10; + for (int i = 0; i < size; i++) { + MetadataMap metadata_map = { + {"header_key1", std::string(50 * 1024, 'a')}, + }; + MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + } + + EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size); + request_encoder_->encodeMetadata(metadata_map_vector); + + EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); + response_encoder_->encodeMetadata(metadata_map_vector); +} + +TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { + allow_metadata_ = true; + initialize(); + + // Generates a valid stream_id by sending a request header. + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + {"header_key3", "header_value3"}, + {"header_key4", "header_value4"}, + }; + MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + MetadataMapVector metadata_map_vector; + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + + corrupt_metadata_frame_ = true; + EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, + "The user callback function failed"); +} + +// Encode response metadata while dispatching request data from the client, so +// that nghttp2 can't fill the metadata frames' payloads until dispatching +// is finished. +TEST_P(Http2CodecImplTest, EncodeMetadataWhileDispatchingTest) { + allow_metadata_ = true; + initialize(); + + MetadataMapVector metadata_map_vector; + const int size = 10; + for (int i = 0; i < size; i++) { + MetadataMap metadata_map = { + {"header_key1", "header_value1"}, + {"header_key2", "header_value2"}, + {"header_key3", "header_value3"}, + {"header_key4", "header_value4"}, + }; + MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); + metadata_map_vector.push_back(std::move(metadata_map_ptr)); + } + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void { + response_encoder_->encodeMetadata(metadata_map_vector); + })); + EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); + request_encoder_->encodeHeaders(request_headers, true); +} +class Http2CodecImplDeferredResetTest : public Http2CodecImplTest {}; + +TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { + initialize(); + + InSequence s; + + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + + // Do a request, but pause server dispatch so we don't send window updates. This will result in a + // deferred reset, followed by a pending frames flush which will cause the stream to actually + // be reset immediately since we are outside of dispatch context. + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_encoder_->encodeHeaders(request_headers, false); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + EXPECT_CALL(client_stream_callbacks, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + request_encoder_->encodeData(body, true); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::LocalReset, _)); + request_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + + // Dispatch server. We expect to see some data. + EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void { + // Start a response inside the headers callback. This should not result in the client + // seeing any headers as the stream should already be reset on the other side, even though + // we don't know about it yet. + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + })); + EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); + + setupDefaultConnectionMocks(); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); +} + +TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { + initialize(); + + InSequence s; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // In this case we do the same thing as DeferredResetClient but on the server side. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + response_encoder_->encodeData(body, true); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); + response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); +} + +class Http2CodecImplFlowControlTest : public Http2CodecImplTest {}; + +// Back up the pending_sent_data_ buffer in the client connection and make sure the watermarks fire +// as expected. +// +// This also tests the readDisable logic in StreamImpl, verifying that h2 bytes are consumed +// when the stream has readDisable(true) called. +TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { + initialize(); + MockStreamCallbacks callbacks; + request_encoder_->getStream().addCallbacks(callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + request_encoder_->encodeHeaders(request_headers, false); + + // Force the server stream to be read disabled. This will cause it to stop sending window + // updates to the client. + server_->getStream(1)->readDisable(true); + EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); + EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + + uint32_t initial_stream_window = + nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); + // If this limit is changed, this test will fail due to the initial large writes being divided + // into more than 4 frames. Fast fail here with this explanatory comment. + ASSERT_EQ(65535, initial_stream_window); + // Make sure the limits were configured properly in test set up. + EXPECT_EQ(initial_stream_window, server_->getStream(1)->bufferLimit()); + EXPECT_EQ(initial_stream_window, client_->getStream(1)->bufferLimit()); + + // One large write gets broken into smaller frames. + EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber()); + Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a')); + request_encoder_->encodeData(long_data, false); + + // Verify that the window is full. The client will not send more data to the server for this + // stream. + EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); + EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + + // Now that the flow control window is full, further data causes the send buffer to back up. + Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); + request_encoder_->encodeData(more_long_data, false); + EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + + // If we go over the limit, the stream callbacks should fire. + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); + Buffer::OwnedImpl last_byte("!"); + request_encoder_->encodeData(last_byte, false); + EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, + TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + + // Now create a second stream on the connection. + MockResponseDecoder response_decoder2; + RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_); + StreamEncoder* response_encoder2; + MockStreamCallbacks server_stream_callbacks2; + MockRequestDecoder request_decoder2; + // When the server stream is created it should check the status of the + // underlying connection. Pretend it is overrun. + EXPECT_CALL(server_connection_, aboveHighWatermark()).WillOnce(Return(true)); + EXPECT_CALL(server_stream_callbacks2, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(server_callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder2 = &encoder; + encoder.getStream().addCallbacks(server_stream_callbacks2); + return request_decoder2; + })); + EXPECT_CALL(request_decoder2, decodeHeaders_(_, false)); + request_encoder2->encodeHeaders(request_headers, false); + + // Add the stream callbacks belatedly. On creation the stream should have + // been noticed that the connection was backed up. Any new subscriber to + // stream callbacks should get a callback when they addCallbacks. + MockStreamCallbacks callbacks2; + EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); + request_encoder_->getStream().addCallbacks(callbacks2); + + // Add a third callback to make testing removal mid-watermark call below more interesting. + MockStreamCallbacks callbacks3; + EXPECT_CALL(callbacks3, onAboveWriteBufferHighWatermark()); + request_encoder_->getStream().addCallbacks(callbacks3); + + // Now unblock the server's stream. This will cause the bytes to be consumed, flow control + // updates to be sent, and the client to flush all queued data. + // For bonus corner case coverage, remove callback2 in the middle of runLowWatermarkCallbacks() + // and ensure it is not called. + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([&]() -> void { + request_encoder_->getStream().removeCallbacks(callbacks2); + })); + EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); + EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); + server_->getStream(1)->readDisable(false); + EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); + // The extra 1 byte sent won't trigger another window update, so the final window should be the + // initial window minus the last 1 byte flush from the client to server. + EXPECT_EQ(initial_stream_window - 1, + nghttp2_session_get_stream_local_window_size(server_->session(), 1)); + EXPECT_EQ(initial_stream_window - 1, + nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); +} + +// Set up the same asTestFlowControlInPendingSendData, but tears the stream down with an early reset +// once the flow control window is full up. +TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { + initialize(); + MockStreamCallbacks callbacks; + request_encoder_->getStream().addCallbacks(callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + request_encoder_->encodeHeaders(request_headers, false); + + // Force the server stream to be read disabled. This will cause it to stop sending window + // updates to the client. + server_->getStream(1)->readDisable(true); + + uint32_t initial_stream_window = + nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); + uint32_t initial_connection_window = nghttp2_session_get_remote_window_size(client_->session()); + // If this limit is changed, this test will fail due to the initial large writes being divided + // into more than 4 frames. Fast fail here with this explanatory comment. + ASSERT_EQ(65535, initial_stream_window); + // One large write may get broken into smaller frames. + EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber()); + Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a')); + // The one giant write will cause the buffer to go over the limit, then drain and go back under + // the limit. + request_encoder_->encodeData(long_data, false); + + // Verify that the window is full. The client will not send more data to the server for this + // stream. + EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); + EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); + + EXPECT_CALL(server_stream_callbacks_, + onResetStream(StreamResetReason::LocalRefusedStreamReset, _)); + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0); + EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _)) + .WillOnce(Invoke([&](StreamResetReason, absl::string_view) -> void { + // Test the case where the reset callbacks cause the socket to fill up, + // causing the underlying connection to back up. Given the stream is + // being destroyed the watermark callbacks should not fire (mocks for Times(0) + // above) + client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + })); + response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset); + + // Regression test that the window is consumed even if the stream is destroyed early. + EXPECT_EQ(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); +} + +// Test the HTTP2 pending_recv_data_ buffer going over and under watermark limits. +TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { + initialize(); + MockStreamCallbacks callbacks; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + request_encoder_->encodeHeaders(request_headers, false); + + // Set artificially small watermarks to make the recv buffer easy to overrun. In production, + // the recv buffer can be overrun by a client which negotiates a larger + // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in + // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. + server_->getStream(1)->setWriteBufferWatermarks(10, 20); + + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl data(std::string(40, 'a')); + request_encoder_->encodeData(data, false); +} + +// Verify that we create and disable the stream flush timer when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) { + initialize(); + + InSequence s; + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Send window updates from the client. + setupDefaultConnectionMocks(); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); + EXPECT_TRUE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when trailers follow a stream that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, false); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that we create and handle the stream flush timeout when there is a large body that +// does not have enough window. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but + // we do get a reset on the client. + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + flush_timer->invokeCallback(); + EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +// Verify that when an incoming protocol error races with a stream flush timeout we correctly +// disable the flush timeout and do not attempt to reset the stream. +TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { + initialize(); + + InSequence s; + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + ON_CALL(client_connection_, write(_, _)) + .WillByDefault( + Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + response_encoder_->encodeData(body, true); + + // Force a protocol error. + Buffer::OwnedImpl garbage_data("this should cause a protocol error"); + EXPECT_CALL(client_callbacks_, onGoAway(_)); + EXPECT_CALL(*flush_timer, disableTimer()); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + auto status = server_wrapper_.dispatch(garbage_data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); +} + +TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { + initialize(); + MockStreamCallbacks callbacks; + request_encoder_->getStream().addCallbacks(callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // The 'true' on encodeData will set local_end_stream_ on the client but not + // the server. Verify that client watermark callbacks will not be called, but + // server callbacks may be called by simulating connection overflow on both + // ends. + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); + EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()); + EXPECT_CALL(request_decoder_, decodeData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void { + client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + })); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, true); + + // The 'true' on encodeData will set local_end_stream_ on the server. Verify + // that neither client nor server watermark callbacks will be called again. + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0); + EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(HeaderMapEqual(&response_headers), true)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); + server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); + })); + response_encoder_->encodeHeaders(response_headers, true); +} + +class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; + +// Regression test for issue #3076. +// +// TODO(PiotrSikora): add tests that exercise both scenarios: before and after receiving +// the HTTP/2 SETTINGS frame. +TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { + http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); + http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + + for (int i = 0; i < 101; ++i) { + request_encoder_ = &client_->newStream(response_decoder_); + setupDefaultConnectionMocks(); + EXPECT_CALL(server_callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { + response_encoder_ = &encoder; + encoder.getStream().addCallbacks(server_stream_callbacks_); + return request_decoder_; + })); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + } +} + +#define HTTP2SETTINGS_SMALL_WINDOW_COMBINE \ + ::testing::Combine( \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS), \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE)) + +// Deferred reset tests use only small windows so that we can test certain conditions. +INSTANTIATE_TEST_SUITE_P(Http2CodecImplDeferredResetTest, Http2CodecImplDeferredResetTest, + ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE, + HTTP2SETTINGS_SMALL_WINDOW_COMBINE)); + +// Flow control tests only use only small windows so that we can test certain conditions. +INSTANTIATE_TEST_SUITE_P(Http2CodecImplFlowControlTest, Http2CodecImplFlowControlTest, + ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE, + HTTP2SETTINGS_SMALL_WINDOW_COMBINE)); + +// we separate default/edge cases here to avoid combinatorial explosion +#define HTTP2SETTINGS_DEFAULT_COMBINE \ + ::testing::Combine( \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS), \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE)) + +// Stream limit test only uses the default values because not all combinations of +// edge settings allow for the number of streams needed by the test. +INSTANTIATE_TEST_SUITE_P(Http2CodecImplStreamLimitTest, Http2CodecImplStreamLimitTest, + ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, + HTTP2SETTINGS_DEFAULT_COMBINE)); + +INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTest, + ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, + HTTP2SETTINGS_DEFAULT_COMBINE)); + +#define HTTP2SETTINGS_EDGE_COMBINE \ + ::testing::Combine( \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_HPACK_TABLE_SIZE, \ + CommonUtility::OptionsLimits::MAX_HPACK_TABLE_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_MAX_CONCURRENT_STREAMS, \ + CommonUtility::OptionsLimits::MAX_MAX_CONCURRENT_STREAMS), \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE, \ + CommonUtility::OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE), \ + ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE, \ + CommonUtility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE)) + +// Make sure we have coverage for high and low values for various combinations and permutations +// of HTTP settings in at least one test fixture. +// Use with caution as any test using this runs 255 times. +using Http2CodecImplTestAll = Http2CodecImplTest; + +INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTestAll, + ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, + HTTP2SETTINGS_DEFAULT_COMBINE)); +INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CodecImplTestAll, + ::testing::Combine(HTTP2SETTINGS_EDGE_COMBINE, + HTTP2SETTINGS_EDGE_COMBINE)); + +TEST(Http2CodecUtility, reconstituteCrumbledCookies) { + { + HeaderString key; + HeaderString value; + HeaderString cookies; + EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies)); + EXPECT_TRUE(cookies.empty()); + } + + { + HeaderString key(Headers::get().ContentLength); + HeaderString value; + value.setInteger(5); + HeaderString cookies; + EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies)); + EXPECT_TRUE(cookies.empty()); + } + + { + HeaderString key(Headers::get().Cookie); + HeaderString value; + value.setCopy("a=b", 3); + HeaderString cookies; + EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key, value, cookies)); + EXPECT_EQ(cookies, "a=b"); + + HeaderString key2(Headers::get().Cookie); + HeaderString value2; + value2.setCopy("c=d", 3); + EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key2, value2, cookies)); + EXPECT_EQ(cookies, "a=b; c=d"); + } +} + +MATCHER_P(HasValue, m, "") { + if (!arg.has_value()) { + *result_listener << "does not contain a value"; + return false; + } + const auto& value = arg.value(); + return ExplainMatchResult(m, value, result_listener); +}; + +class Http2CustomSettingsTestBase : public Http2CodecImplTestFixture { +public: + struct SettingsParameter { + uint16_t identifier; + uint32_t value; + }; + + Http2CustomSettingsTestBase(Http2SettingsTuple client_settings, + Http2SettingsTuple server_settings, bool validate_client) + : Http2CodecImplTestFixture(client_settings, server_settings), + validate_client_(validate_client) {} + + ~Http2CustomSettingsTestBase() override = default; + + // Sets the custom settings parameters specified by |parameters| in the |options| proto. + void setHttp2CustomSettingsParameters(envoy::config::core::v3::Http2ProtocolOptions& options, + std::vector parameters) { + for (const auto& parameter : parameters) { + envoy::config::core::v3::Http2ProtocolOptions::SettingsParameter* custom_param = + options.mutable_custom_settings_parameters()->Add(); + custom_param->mutable_identifier()->set_value(parameter.identifier); + custom_param->mutable_value()->set_value(parameter.value); + } + } + + // Returns the Http2ProtocolOptions proto which specifies the settings parameters to be sent to + // the endpoint being validated. + envoy::config::core::v3::Http2ProtocolOptions& getCustomOptions() { + return validate_client_ ? server_http2_options_ : client_http2_options_; + } + + // Returns the endpoint being validated. + const TestCodecSettingsProvider& getSettingsProvider() { + if (validate_client_) { + return *client_; + } + return *server_; + } + + // Returns the settings tuple which specifies a subset of the settings parameters to be sent to + // the endpoint being validated. + const Http2SettingsTuple& getSettingsTuple() { + ASSERT(client_settings_.has_value() && server_settings_.has_value()); + return validate_client_ ? *server_settings_ : *client_settings_; + } + +protected: + bool validate_client_{false}; +}; + +class Http2CustomSettingsTest + : public Http2CustomSettingsTestBase, + public ::testing::TestWithParam< + ::testing::tuple> { +public: + Http2CustomSettingsTest() + : Http2CustomSettingsTestBase(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam()), + ::testing::get<2>(GetParam())) {} +}; +INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CustomSettingsTest, + ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, + HTTP2SETTINGS_DEFAULT_COMBINE, ::testing::Bool())); + +// Validates that custom parameters (those which are not explicitly named in the +// envoy::config::core::v3::Http2ProtocolOptions proto) are properly sent and processed by +// client and server connections. +TEST_P(Http2CustomSettingsTest, UserDefinedSettings) { + std::vector custom_parameters{{0x10, 10}, {0x11, 20}}; + setHttp2CustomSettingsParameters(getCustomOptions(), custom_parameters); + initialize(); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); + request_encoder_->encodeHeaders(request_headers, false); + uint32_t hpack_table_size = + ::testing::get(getSettingsTuple()); + if (hpack_table_size != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { + EXPECT_THAT( + getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_HEADER_TABLE_SIZE), + HasValue(hpack_table_size)); + } + uint32_t max_concurrent_streams = + ::testing::get(getSettingsTuple()); + if (max_concurrent_streams != NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS) { + EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue( + NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS), + HasValue(max_concurrent_streams)); + } + uint32_t initial_stream_window_size = + ::testing::get(getSettingsTuple()); + if (max_concurrent_streams != NGHTTP2_INITIAL_WINDOW_SIZE) { + EXPECT_THAT( + getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE), + HasValue(initial_stream_window_size)); + } + // Validate that custom parameters are received by the endpoint (client or server) under + // test. + for (const auto& parameter : custom_parameters) { + EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue(parameter.identifier), + HasValue(parameter.value)); + } +} + +// Tests request headers whose size is larger than the default limit of 60K. +TEST_P(Http2CodecImplTest, LargeRequestHeadersInvokeResetStream) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(63 * 1024, 'q'); + request_headers.addCopy("big", long_string); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); + request_encoder_->encodeHeaders(request_headers, false); +} + +// Large request headers are accepted when max limit configured. +TEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) { + max_request_headers_kb_ = 64; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(63 * 1024, 'q'); + request_headers.addCopy("big", long_string); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); +} + +// Tests request headers with name containing underscore are dropped when the option is set to drop +// header. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + TestRequestHeaderMapImpl expected_headers(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(1, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + +// Tests that request with header names containing underscore are rejected when the option is set to +// reject request. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ( + 1, + server_stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); +} + +// Tests request headers with name containing underscore are allowed when the option is set to +// allow. +TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { + headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.addCopy("bad_header", "something"); + TestRequestHeaderMapImpl expected_headers(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_EQ(0, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); +} + +// This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. +// Large method headers should not trigger ASSERTs or ASAN. The underlying issue +// in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test +// here for belt-and-braces. This also demonstrates that the HTTP/2 codec will +// accept arbitrary :method headers, unlike the HTTP/1 codec (see +// Http1ServerConnectionImplTest.RejectInvalidMethod for comparison). +TEST_P(Http2CodecImplTest, LargeMethodRequestEncode) { + max_request_headers_kb_ = 80; + initialize(); + + const std::string long_method = std::string(79 * 1024, 'a'); + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.setReferenceKey(Headers::get().Method, long_method); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&request_headers), false)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); +} + +// Tests stream reset when the number of request headers exceeds the default maximum of 100. +TEST_P(Http2CodecImplTest, ManyRequestHeadersInvokeResetStream) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + for (int i = 0; i < 100; i++) { + request_headers.addCopy(std::to_string(i), ""); + } + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); + request_encoder_->encodeHeaders(request_headers, false); +} + +// Tests that max number of request headers is configurable. +TEST_P(Http2CodecImplTest, ManyRequestHeadersAccepted) { + max_request_headers_count_ = 150; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + for (int i = 0; i < 145; i++) { + request_headers.addCopy(std::to_string(i), ""); + } + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); +} + +// Tests that max number of response headers is configurable. +TEST_P(Http2CodecImplTest, ManyResponseHeadersAccepted) { + max_response_headers_count_ = 110; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"compression", "test"}}; + for (int i = 0; i < 105; i++) { + response_headers.addCopy(std::to_string(i), ""); + } + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); +} + +TEST_P(Http2CodecImplTest, LargeRequestHeadersAtLimitAccepted) { + uint32_t codec_limit_kb = 64; + max_request_headers_kb_ = codec_limit_kb; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string key = "big"; + uint32_t head_room = 77; + uint32_t long_string_length = + codec_limit_kb * 1024 - request_headers.byteSize() - key.length() - head_room; + std::string long_string = std::string(long_string_length, 'q'); + request_headers.addCopy(key, long_string); + + // The amount of data sent to the codec is not equivalent to the size of the + // request headers that Envoy computes, as the codec limits based on the + // entire http2 frame. The exact head room needed (76) was found through iteration. + ASSERT_EQ(request_headers.byteSize() + head_room, codec_limit_kb * 1024); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, LargeRequestHeadersOverDefaultCodecLibraryLimit) { + max_request_headers_kb_ = 66; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(65 * 1024, 'q'); + request_headers.addCopy("big", long_string); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, LargeRequestHeadersExceedPerHeaderLimit) { + // The name-value pair max is set by NGHTTP2_HD_MAX_NV in lib/nghttp2_hd.h to 64KB, and + // creates a per-request header limit for us in h2. Note that the nghttp2 + // calculated byte size will differ from envoy due to H2 compression and frames. + + max_request_headers_kb_ = 81; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(80 * 1024, 'q'); + request_headers.addCopy("big", long_string); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(client_callbacks_, onGoAway(_)); + server_->shutdownNotice(); + server_->goAway(); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, ManyLargeRequestHeadersUnderPerHeaderLimit) { + max_request_headers_kb_ = 81; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(1024, 'q'); + for (int i = 0; i < 80; i++) { + request_headers.addCopy(std::to_string(i), long_string); + } + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, LargeRequestHeadersAtMaxConfigurable) { + // Raising the limit past this triggers some unexpected nghttp2 error. + // Further debugging required to increase past ~96 KiB. + max_request_headers_kb_ = 96; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(1024, 'q'); + for (int i = 0; i < 95; i++) { + request_headers.addCopy(std::to_string(i), long_string); + } + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + +// Note this is Http2CodecImplTestAll not Http2CodecImplTest, to test +// compression with min and max HPACK table size. +TEST_P(Http2CodecImplTestAll, TestCodecHeaderCompression) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"compression", "test"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); + + // Sanity check to verify that state of encoders and decoders matches. + EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session()), + nghttp2_session_get_hd_inflate_dynamic_table_size(client_->session())); + EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session()), + nghttp2_session_get_hd_inflate_dynamic_table_size(server_->session())); + + // Verify that headers are compressed only when both client and server advertise table size + // > 0: + if (client_http2_options_.hpack_table_size().value() && + server_http2_options_.hpack_table_size().value()) { + EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session())); + EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session())); + } else { + EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session())); + EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session())); + } +} + +// Verify that codec detects PING flood +TEST_P(Http2CodecImplTest, PingFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // Send one frame above the outbound control queue size limit + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1; + ++i) { + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + } + + int ack_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) { + ++ack_count; + buffer.move(frame); + })); + + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); +} + +// Verify that codec allows PING flood when mitigation is disabled +TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { + max_outbound_control_frames_ = 2147483647; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // Send one frame above the outbound control queue size limit + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1; + ++i) { + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + } + + EXPECT_CALL(server_connection_, write(_, _)) + .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1); + EXPECT_NO_THROW(client_->sendPendingFrames()); +} + +// Verify that outbound control frame counter decreases when send buffer is drained +TEST_P(Http2CodecImplTest, PingFloodCounterReset) { + // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min + // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows + // the logic below that verifies drain and overflow thresholds. + static const int kMaxOutboundControlFrames = 475; + max_outbound_control_frames_ = kMaxOutboundControlFrames; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + for (int i = 0; i < kMaxOutboundControlFrames; ++i) { + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + } + + int ack_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) { + ++ack_count; + buffer.move(frame); + })); + + // We should be 1 frame under the control frame flood mitigation threshold. + EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_EQ(ack_count, kMaxOutboundControlFrames); + + // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer + buffer.drain(buffer.length() / 2); + + // Send floor(kMaxOutboundFrames / 2) more pings. + for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) { + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + } + // The number of outbound frames should be half of max so the connection should not be + // terminated. + EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); + + // 1 more ping frame should overflow the outbound frame limit. + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); +} + +// Verify that codec detects flood of outbound HEADER frames +TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1; ++i) { + EXPECT_NO_THROW(response_encoder_->encodeHeaders(response_headers, false)); + } + // Presently flood mitigation is done only when processing downstream data + // So we need to send stream from downstream client to trigger mitigation + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec detects flood of outbound DATA frames +TEST_P(Http2CodecImplTest, ResponseDataFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + // Presently flood mitigation is done only when processing downstream data + // So we need to send stream from downstream client to trigger mitigation + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +// Verify that codec allows outbound DATA flood when mitigation is disabled +TEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) { + max_outbound_control_frames_ = 2147483647; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + // +2 is to account for HEADERS and PING ACK, that is used to trigger mitigation + EXPECT_CALL(server_connection_, write(_, _)) + .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 2); + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)).Times(1); + EXPECT_CALL(response_decoder_, decodeData(_, false)) + .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + // Presently flood mitigation is done only when processing downstream data + // So we need to send stream from downstream client to trigger mitigation + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_NO_THROW(client_->sendPendingFrames()); +} + +// Verify that outbound frame counter decreases when send buffer is drained +TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { + static const int kMaxOutboundFrames = 100; + max_outbound_frames_ = kMaxOutboundFrames; + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < kMaxOutboundFrames - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + EXPECT_EQ(frame_count, kMaxOutboundFrames); + // Drain kMaxOutboundFrames / 2 slices from the send buffer + buffer.drain(buffer.length() / 2); + + for (uint32_t i = 0; i < kMaxOutboundFrames / 2 + 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + + // Presently flood mitigation is done only when processing downstream data + // So we need to send a frame from downstream client to trigger mitigation + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); +} + +// Verify that control frames are added to the counter of outbound frames of all types. +TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + + int frame_count = 0; + Buffer::OwnedImpl buffer; + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { + ++frame_count; + buffer.move(frame); + })); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + // Account for the single HEADERS frame above + for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { + Buffer::OwnedImpl data("0"); + EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); + } + // Send one PING frame above the outbound queue size limit + EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + + EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); + EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); +} + +TEST_P(Http2CodecImplTest, PriorityFlood) { + priorityFlood(); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); +} + +TEST_P(Http2CodecImplTest, PriorityFloodOverride) { + max_inbound_priority_frames_per_stream_ = 2147483647; + + priorityFlood(); + EXPECT_NO_THROW(client_->sendPendingFrames()); +} + +TEST_P(Http2CodecImplTest, WindowUpdateFlood) { + windowUpdateFlood(); + EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); +} + +TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { + max_inbound_window_update_frames_per_data_frame_sent_ = 2147483647; + windowUpdateFlood(); + EXPECT_NO_THROW(client_->sendPendingFrames()); +} + +TEST_P(Http2CodecImplTest, EmptyDataFlood) { + Buffer::OwnedImpl data; + emptyDataFlood(data); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_FALSE(status.ok()); + EXPECT_TRUE(isBufferFloodError(status)); +} + +TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { + max_consecutive_inbound_frames_with_empty_payload_ = 2147483647; + Buffer::OwnedImpl data; + emptyDataFlood(data); + EXPECT_CALL(request_decoder_, decodeData(_, false)) + .Times( + CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD + + 1); + auto status = server_wrapper_.dispatch(data, *server_); + EXPECT_TRUE(status.ok()); +} + +// CONNECT without upgrade type gets tagged with "bytestream" +TEST_P(Http2CodecImplTest, ConnectTest) { + client_http2_options_.set_allow_connect(true); + server_http2_options_.set_allow_connect(true); + initialize(); + MockStreamCallbacks callbacks; + request_encoder_->getStream().addCallbacks(callbacks); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.setReferenceKey(Headers::get().Method, Http::Headers::get().MethodValues.Connect); + TestRequestHeaderMapImpl expected_headers; + HttpTestUtility::addDefaultHeaders(expected_headers); + expected_headers.setReferenceKey(Headers::get().Method, + Http::Headers::get().MethodValues.Connect); + expected_headers.setReferenceKey(Headers::get().Protocol, "bytestream"); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); + request_encoder_->encodeHeaders(request_headers, false); +} + +class TestNghttp2SessionFactory; + +// Test client for H/2 METADATA frame edge cases. +class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { +public: + MetadataTestClientConnectionImpl( + Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + uint32_t max_request_headers_kb, uint32_t max_request_headers_count, + Nghttp2SessionFactory& http2_session_factory) + : TestClientConnectionImpl(connection, callbacks, scope, http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} + + // Overrides TestClientConnectionImpl::submitMetadata(). + bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { + // Creates metadata payload. + encoder_.createPayload(metadata_map_vector); + for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { + int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, + stream_id, nullptr); + if (result != 0) { + return false; + } + } + // Triggers nghttp2 to populate the payloads of the METADATA frames. + int result = nghttp2_session_send(session()); + return result == 0; + } + +protected: + friend class TestNghttp2SessionFactory; + + Http::Http2::MetadataEncoder encoder_; +}; + +class TestNghttp2SessionFactory : public Nghttp2SessionFactory { +public: + ~TestNghttp2SessionFactory() override { + nghttp2_session_callbacks_del(callbacks_); + nghttp2_option_del(options_); + } + + nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, + const nghttp2_option*) override { + // Only need to provide callbacks required to send METADATA frames. + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, + void* user_data) -> ssize_t { + // Double cast required due to multiple inheritance. + return static_cast( + static_cast(user_data)) + ->encoder_.packNextFramePayload(data, length); + }); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + // Cast down to MetadataTestClientConnectionImpl to leverage friendship. + return static_cast( + static_cast(user_data)) + ->onSend(data, length); + }); + nghttp2_option_new(&options_); + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks_, connection, options_); + return session; + } + + void init(nghttp2_session*, ConnectionImpl*, + const envoy::config::core::v3::Http2ProtocolOptions&) override {} + +private: + nghttp2_session_callbacks* callbacks_; + nghttp2_option* options_; +}; + +class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { +public: + Http2CodecMetadataTest() = default; + +protected: + void initialize() override { + allow_metadata_ = true; + http2OptionsFromTuple(client_http2_options_, client_settings_); + http2OptionsFromTuple(server_http2_options_, server_settings_); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + ON_CALL(client_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); + })); + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { + ASSERT_TRUE(client_wrapper_.dispatch(data, *client_).ok()); + })); + } + +private: + TestNghttp2SessionFactory http2_session_factory_; +}; + +// Validates noop handling of METADATA frames without a known stream ID. +// This is required per RFC 7540, section 5.1.1, which states that stream ID = 0 can be used for +// "connection control" messages, and per the H2 METADATA spec (source/docs/h2_metadata.md), which +// states that these frames can be received prior to the headers. +TEST_F(Http2CodecMetadataTest, UnknownStreamId) { + initialize(); + MetadataMap metadata_map = {{"key", "value"}}; + MetadataMapVector metadata_vector; + metadata_vector.emplace_back(std::make_unique(metadata_map)); + // SETTINGS are required as part of the preface. + ASSERT_EQ(nghttp2_submit_settings(client_->session(), NGHTTP2_FLAG_NONE, nullptr, 0), 0); + // Validate both the ID = 0 special case and a non-zero ID not already bound to a stream (any ID > + // 0 for this test). + EXPECT_TRUE(client_->submitMetadata(metadata_vector, 0)); + EXPECT_TRUE(client_->submitMetadata(metadata_vector, 1000)); +} + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 1deb3c412284..fa1c92346e73 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -7,6 +7,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/runtime/runtime_features.h" #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" @@ -73,7 +74,7 @@ class Http2CodecImplTestFixture { }; struct ConnectionWrapper { - Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status dispatch(const Buffer::Instance& data, Connection& connection) { Http::Status status = Http::okStatus(); buffer_.add(data); if (!dispatching_) { @@ -128,13 +129,23 @@ class Http2CodecImplTestFixture { virtual void initialize() { http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -229,13 +240,13 @@ class Http2CodecImplTestFixture { envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; - std::unique_ptr client_; + std::unique_ptr client_; ConnectionWrapper client_wrapper_; Stats::TestUtil::TestStore server_stats_store_; envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; - std::unique_ptr server_; + std::unique_ptr server_; ConnectionWrapper server_wrapper_; MockResponseDecoder response_decoder_; RequestEncoder* request_encoder_; @@ -871,21 +882,21 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // Now that the flow control window is full, further data causes the send buffer to back up. Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); - EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(initial_stream_window, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // If we go over the limit, the stream callbacks should fire. EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); - EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(initial_stream_window + 1, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); @@ -930,7 +941,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); - EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. @@ -975,7 +986,7 @@ TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); EXPECT_CALL(server_stream_callbacks_, @@ -1017,7 +1028,7 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { // the recv buffer can be overrun by a client which negotiates a larger // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. - server_->getStream(1)->setWriteBufferWatermarks(10, 20); + server_->setStreamWriteBufferWatermarks(1, 10, 20); EXPECT_CALL(request_decoder_, decodeData(_, false)); Buffer::OwnedImpl data(std::string(40, 'a')); @@ -1215,13 +1226,23 @@ class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -2031,50 +2052,64 @@ TEST_P(Http2CodecImplTest, ConnectTest) { request_encoder_->encodeHeaders(request_headers, false); } -class TestNghttp2SessionFactory; +template class TestNghttp2SessionFactory; // Test client for H/2 METADATA frame edge cases. -class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { +template +class MetadataTestClientConnectionImpl : public TestClientConnectionImplType { public: MetadataTestClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestClientConnectionImpl(connection, callbacks, scope, http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} + typename TestClientConnectionImplType::SessionFactory& http2_session_factory) + : TestClientConnectionImplType(connection, callbacks, scope, http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} // Overrides TestClientConnectionImpl::submitMetadata(). bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { // Creates metadata payload. encoder_.createPayload(metadata_map_vector); for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { - int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, - stream_id, nullptr); + int result = + nghttp2_submit_extension(TestClientConnectionImplType::session(), + ::Envoy::Http::METADATA_FRAME_TYPE, flags, stream_id, nullptr); if (result != 0) { return false; } } // Triggers nghttp2 to populate the payloads of the METADATA frames. - int result = nghttp2_session_send(session()); + int result = nghttp2_session_send(TestClientConnectionImplType::session()); return result == 0; } protected: - friend class TestNghttp2SessionFactory; + template friend class TestNghttp2SessionFactory; MetadataEncoder encoder_; }; -class TestNghttp2SessionFactory : public Nghttp2SessionFactory { +using MetadataTestClientConnectionImplNew = + MetadataTestClientConnectionImpl; +using MetadataTestClientConnectionImplLegacy = + MetadataTestClientConnectionImpl; + +struct Nghttp2SessionFactoryDeleter { + virtual ~Nghttp2SessionFactoryDeleter() = default; +}; + +template +class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, + public Nghttp2SessionFactoryDeleter { public: ~TestNghttp2SessionFactory() override { nghttp2_session_callbacks_del(callbacks_); nghttp2_option_del(options_); } - nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, + nghttp2_session* create(const nghttp2_session_callbacks*, + typename Nghttp2SessionFactoryType::ConnectionImplType* connection, const nghttp2_option*) override { // Only need to provide callbacks required to send METADATA frames. nghttp2_session_callbacks_new(&callbacks_); @@ -2083,16 +2118,18 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, void* user_data) -> ssize_t { // Double cast required due to multiple inheritance. - return static_cast( - static_cast(user_data)) + return static_cast*>( + static_cast( + user_data)) ->encoder_.packNextFramePayload(data, length); }); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast( - static_cast(user_data)) + return static_cast*>( + static_cast( + user_data)) ->onSend(data, length); }); nghttp2_option_new(&options_); @@ -2102,7 +2139,7 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { return session; } - void init(nghttp2_session*, ConnectionImpl*, + void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*, const envoy::config::core::v3::Http2ProtocolOptions&) override {} private: @@ -2110,6 +2147,12 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { nghttp2_option* options_; }; +using TestNghttp2SessionFactoryNew = + TestNghttp2SessionFactory; +using TestNghttp2SessionFactoryLegacy = + TestNghttp2SessionFactory; + class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { public: Http2CodecMetadataTest() = default; @@ -2119,12 +2162,27 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin allow_metadata_ = true; http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } else { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); @@ -2136,7 +2194,7 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin } private: - TestNghttp2SessionFactory http2_session_factory_; + std::unique_ptr http2_session_factory_; }; // Validates noop handling of METADATA frames without a known stream ID. diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 1eb8bd581a9e..2ba9f545a20c 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -3,6 +3,7 @@ #include "envoy/http/codec.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/utility.h" namespace Envoy { @@ -32,7 +33,7 @@ class TestCodecSettingsProvider { return it->second; } -protected: + // protected: // Stores SETTINGS parameters contained in |settings_frame| to make them available via // getRemoteSettingsParameterValue(). void onSettingsFrame(const nghttp2_settings& settings_frame) { @@ -57,9 +58,23 @@ class TestCodecSettingsProvider { std::unordered_map settings_; }; -class TestServerConnectionImpl : public TestCodecStatsProvider, - public ServerConnectionImpl, - public TestCodecSettingsProvider { +struct ServerCodecFacade : public virtual Connection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint32_t getStreamUnconsumedBytes(int32_t stream_id) PURE; + virtual void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) PURE; +}; + +class TestServerConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ServerCodecFacade { +public: + TestServerConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} +}; + +template +class TestServerConnectionImpl : public TestServerConnection, public CodecImplType { public: TestServerConnectionImpl( Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, @@ -67,50 +82,94 @@ class TestServerConnectionImpl : public TestCodecStatsProvider, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : TestCodecStatsProvider(scope), - ServerConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, - headers_with_underscores_action) {} - nghttp2_session* session() { return session_; } - using ServerConnectionImpl::getStream; + : TestServerConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action) {} + + // ServerCodecFacade + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint32_t getStreamUnconsumedBytes(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->unconsumed_bytes_; + } + void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) override { + CodecImplType::getStream(stream_id)->setWriteBufferWatermarks(low_watermark, high_watermark); + } protected: // Overrides ServerConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -class TestClientConnectionImpl : public TestCodecStatsProvider, - public ClientConnectionImpl, - public TestCodecSettingsProvider { +using TestServerConnectionImplLegacy = + TestServerConnectionImpl; +using TestServerConnectionImplNew = + TestServerConnectionImpl; + +struct ClientCodecFacade : public ClientConnection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE; + virtual void sendPendingFrames() PURE; + virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE; +}; + +class TestClientConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ClientCodecFacade { +public: + TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} +}; + +template +class TestClientConnectionImpl : public TestClientConnection, public CodecImplType { public: TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestCodecStatsProvider(scope), - ClientConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} - - nghttp2_session* session() { return session_; } - + typename CodecImplType::SessionFactory& http2_session_factory) + : TestClientConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, http2_session_factory) {} + + // ClientCodecFacade + RequestEncoder& newStream(ResponseDecoder& response_decoder) override { + return CodecImplType::newStream(response_decoder); + } + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint64_t getStreamPendingSendDataLength(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->pending_send_data_.length(); + } + void sendPendingFrames() override { CodecImplType::sendPendingFrames(); } // Submits an H/2 METADATA frame to the peer. // Returns true on success, false otherwise. - virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) { + bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override { UNREFERENCED_PARAMETER(mm_vector); UNREFERENCED_PARAMETER(stream_id); return false; } - using ClientConnectionImpl::getStream; - using ConnectionImpl::sendPendingFrames; - protected: // Overrides ClientConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; +using TestClientConnectionImplLegacy = + TestClientConnectionImpl; +using TestClientConnectionImplNew = + TestClientConnectionImpl; + +using ProdNghttp2SessionFactoryLegacy = Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory; +using ProdNghttp2SessionFactoryNew = Envoy::Http::Http2::ProdNghttp2SessionFactory; + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index c88458e10c7e..aadda98c8b3d 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -26,7 +26,7 @@ class RequestFrameCommentTest : public ::testing::Test {}; class ResponseFrameCommentTest : public ::testing::Test {}; // Creates and sets up a stream to reply to. -void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImpl& connection) { +void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImplNew& connection) { codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -56,7 +56,7 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -89,7 +89,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -134,7 +134,7 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -169,7 +169,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -199,7 +199,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -232,7 +232,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -267,7 +267,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -305,7 +305,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 5dc75d58ebbb..d925ed1bb002 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -14,7 +14,7 @@ namespace { void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index 8b1a5d3d0797..e73b88ab954d 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -15,7 +15,7 @@ namespace { void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { // Create the client connection containing the nghttp2 session. - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index 6b46a0f05aea..b1df35b6d189 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -91,6 +91,11 @@ class MemoryTest { class TestStore : public IsolatedStoreImpl { public: TestStore() = default; + ~TestStore() { + counter_map_.clear(); + gauge_map_.clear(); + histogram_map_.clear(); + } // Constructs a store using a symbol table, allowing for explicit sharing. explicit TestStore(SymbolTable& symbol_table) : IsolatedStoreImpl(symbol_table) {} diff --git a/test/config/utility.cc b/test/config/utility.cc index ea96c7ddb142..f57e7af4286c 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -586,6 +586,10 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } +void ConfigHelper::setLegacyCodecs() { + addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "false"); +} + void ConfigHelper::finalize(const std::vector& ports) { RELEASE_ASSERT(!finalized_, ""); diff --git a/test/config/utility.h b/test/config/utility.h index 39bcb00a4454..b4217d4f31f5 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -229,6 +229,9 @@ class ConfigHelper { const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& config); + // Set legacy codecs to use for upstream and downstream codecs. + void setLegacyCodecs(); + private: static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { return api_version == envoy::config::core::v3::ApiVersion::V2; diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index d1d32fdd634e..5e7648ba1ce4 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1619,6 +1619,66 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { ASSERT_NE(nullptr, request_id_extension); } +TEST_F(HttpConnectionManagerConfigTest, LegacyH1Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + +TEST_F(HttpConnectionManagerConfigTest, LegacyH2Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http2 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( diff --git a/test/integration/BUILD b/test/integration/BUILD index 2efec82ce37b..d16bfcbe114b 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -8,6 +8,7 @@ load( "envoy_package", "envoy_proto_library", "envoy_select_hot_restart", + "envoy_select_legacy_codecs_in_integration_tests", "envoy_sh_test", ) @@ -576,6 +577,10 @@ envoy_cc_test_library( "ssl_utility.h", "utility.h", ], + copts = envoy_select_legacy_codecs_in_integration_tests( + ["-DENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS"], + "@envoy", + ), data = ["//test/common/runtime:filesystem_test_data"], deps = [ ":server_stats_interface", @@ -607,7 +612,9 @@ envoy_cc_test_library( "//source/common/http:codec_client_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/local_info:local_info_lib", "//source/common/network:filter_lib", diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index c8bf5164b028..952c095a820e 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -316,9 +316,11 @@ TEST_P(ApiVersionIntegrationTest, Eds) { TEST_P(ApiVersionIntegrationTest, Rtds) { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); - admin_layer->set_name("admin layer"); - admin_layer->mutable_admin_layer(); + if (bootstrap.mutable_layered_runtime()->layers_size() == 0) { + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); + } auto* rtds_layer = bootstrap.mutable_layered_runtime()->add_layers(); rtds_layer->set_name("rtds_layer"); setupConfigSource(*rtds_layer->mutable_rtds_layer()->mutable_rtds_config()); diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 4763c9bbfe05..a6c94d91a0c4 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -12,7 +12,9 @@ #include "common/common/fmt.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" @@ -249,6 +251,29 @@ class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { } }; +namespace Legacy { +class TestHttp1ServerConnectionImpl : public Http::Legacy::Http1::ServerConnectionImpl { +public: + using Http::Legacy::Http1::ServerConnectionImpl::ServerConnectionImpl; + + void onMessageComplete() override { + ServerConnectionImpl::onMessageComplete(); + + if (activeRequest().has_value() && activeRequest().value().request_decoder_) { + // Undo the read disable from the base class - we have many tests which + // waitForDisconnect after a full request has been read which will not + // receive the disconnect if reading is disabled. + activeRequest().value().response_encoder_.readDisable(false); + } + } + ~TestHttp1ServerConnectionImpl() override { + if (activeRequest().has_value()) { + activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); + } + } +}; +} // namespace Legacy + FakeHttpConnection::FakeHttpConnection( FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, @@ -261,9 +286,15 @@ FakeHttpConnection::FakeHttpConnection( // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); +#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#endif } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( @@ -271,12 +302,17 @@ FakeHttpConnection::FakeHttpConnection( http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); +#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#endif ASSERT(type == Type::HTTP2); } - shared_connection_.connection().addReadFilter( Network::ReadFilterSharedPtr{new ReadFilter(*this)}); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 309548595313..2e4b846870e4 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -286,6 +286,11 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); + // In ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. +#ifdef ENVOY_USE_LEGACY_CODECS_IN__INTEGRATION_TESTS + ENVOY_LOG_MISC(debug, "Using legacy codecs"); + setLegacyCodecs(); +#endif } BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, diff --git a/test/integration/integration.h b/test/integration/integration.h index 0ec9133736cb..d345b5051095 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -191,6 +191,7 @@ class BaseIntegrationTest : protected Logger::Loggable { void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. void setDeterministic() { deterministic_ = true; } + void setLegacyCodecs() { config_helper_.setLegacyCodecs(); } FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 351414da436b..ea5214dda67f 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -2,6 +2,7 @@ import argparse import common +import difflib import functools import multiprocessing import os @@ -84,6 +85,21 @@ "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") +# These triples (file1, file2, diff) represent two files, file1 and file2 that should maintain +# the diff diff. This is meant to keep these two files in sync. +CODEC_DIFFS = (("./source/common/http/http1/codec_impl.h", + "./source/common/http/http1/codec_impl_legacy.h", + "./tools/code_format/codec_diffs/http1_codec_impl_h"), + ("./source/common/http/http1/codec_impl.cc", + "./source/common/http/http1/codec_impl_legacy.cc", + "./tools/code_format/codec_diffs/http1_codec_impl_cc"), + ("./source/common/http/http2/codec_impl.h", + "./source/common/http/http2/codec_impl_legacy.h", + "./tools/code_format/codec_diffs/http2_codec_impl_h"), + ("./source/common/http/http2/codec_impl.cc", + "./source/common/http/http2/codec_impl_legacy.cc", + "./tools/code_format/codec_diffs/http2_codec_impl_cc")) + # Only one C++ file should instantiate grpc_init GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") @@ -532,6 +548,38 @@ def fixSourceLine(line, line_number): return line +def codecDiffHelper(file1, file2, diff): + f1 = readLines(file1) + f2 = readLines(file2) + + # Create diff between two files + code_diff = list(difflib.unified_diff(f1, f2, lineterm="")) + # Compare with golden diff. + golden_diff = readLines(diff) + # It is fairly ugly to diff a diff, so return a warning to sync codec changes + # and/or update golden_diff. + if code_diff != golden_diff: + error_message = "Codecs are not synced: %s does not match %s. Update codec implementations to sync and/or update the diff manually to:\n%s" % ( + file1, file2, '\n'.join(code_diff)) + # The following line will write the diff to the file diff if it does not match. + # Do not uncomment unless you know the change is safe! + # new_diff = pathlib.Path(diff) + #new_diff.open('w') + # new_diff.write_text('\n'.join(code_diff), encoding='utf-8') + return error_message + + +def checkCodecDiffs(error_messages): + try: + for triple in CODEC_DIFFS: + codec_diff = codecDiffHelper(*triple) + if codec_diff != None: + error_messages.append(codecDiffHelper(*triple)) + return error_messages + except IOError: # for check format tests + return error_messages + + # We want to look for a call to condvar.waitFor, but there's no strong pattern # to the variable name of the condvar. If we just look for ".waitFor" we'll also # pick up time_system_.waitFor(...), and we don't want to return true for that @@ -1048,6 +1096,9 @@ def ownedDirectories(error_messages): error_messages = [] owned_directories = ownedDirectories(error_messages) + # Check codec synchronization once per run. + checkCodecDiffs(error_messages) + if os.path.isfile(target_path): error_messages += checkFormat("./" + target_path) else: diff --git a/tools/code_format/codec_diffs/http1_codec_impl_cc b/tools/code_format/codec_diffs/http1_codec_impl_cc new file mode 100644 index 000000000000..b9ea3f3ae002 --- /dev/null +++ b/tools/code_format/codec_diffs/http1_codec_impl_cc @@ -0,0 +1,35 @@ +--- ++++ +@@ -1,4 +1,4 @@ +-#include "common/http/http1/codec_impl.h" ++#include "common/http/http1/codec_impl_legacy.h" + + #include + #include +@@ -25,6 +25,7 @@ + + namespace Envoy { + namespace Http { ++namespace Legacy { + namespace Http1 { + namespace { + +@@ -48,6 +49,10 @@ + + using Http1ResponseCodeDetails = ConstSingleton; + using Http1HeaderTypes = ConstSingleton; ++using Http::Http1::CodecStats; ++using Http::Http1::HeaderKeyFormatter; ++using Http::Http1::HeaderKeyFormatterPtr; ++using Http::Http1::ProperCaseHeaderKeyFormatter; + + const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { + CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, +@@ -1236,6 +1241,7 @@ + } + + } // namespace Http1 ++} // namespace Legacy + } // namespace Http + } // namespace Envoy + \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http1_codec_impl_h b/tools/code_format/codec_diffs/http1_codec_impl_h new file mode 100644 index 000000000000..e81d2c838d5e --- /dev/null +++ b/tools/code_format/codec_diffs/http1_codec_impl_h @@ -0,0 +1,130 @@ +--- ++++ +@@ -24,6 +24,7 @@ + + namespace Envoy { + namespace Http { ++namespace Legacy { + namespace Http1 { + + class ConnectionImpl; +@@ -75,7 +76,8 @@ + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + + protected: +- StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); ++ StreamEncoderImpl(ConnectionImpl& connection, ++ Http::Http1::HeaderKeyFormatter* header_key_formatter); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); + void encodeTrailersBase(const HeaderMap& headers); +@@ -114,7 +116,7 @@ + + void encodeFormattedHeader(absl::string_view key, absl::string_view value); + +- const HeaderKeyFormatter* const header_key_formatter_; ++ const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; + absl::string_view details_; + }; + +@@ -123,7 +125,8 @@ + */ + class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { + public: +- ResponseEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) ++ ResponseEncoderImpl(ConnectionImpl& connection, ++ Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + + bool startedResponse() { return started_response_; } +@@ -142,7 +145,8 @@ + */ + class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { + public: +- RequestEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) ++ RequestEncoderImpl(ConnectionImpl& connection, ++ Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } +@@ -203,7 +207,7 @@ + virtual bool supportsHttp10() { return false; } + bool maybeDirectDispatch(Buffer::Instance& data); + virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} +- CodecStats& stats() { return stats_; } ++ Http::Http1::CodecStats& stats() { return stats_; } + bool enableTrailers() const { return enable_trailers_; } + + // Http::Connection +@@ -218,9 +222,9 @@ + bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } + + protected: +- ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, +- uint32_t max_headers_kb, const uint32_t max_headers_count, +- HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); ++ ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, ++ http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, ++ Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + + bool resetStreamCalled() { return reset_stream_called_; } + void onMessageBeginBase(); +@@ -240,10 +244,10 @@ + void checkMaxHeadersSize(); + + Network::Connection& connection_; +- CodecStats& stats_; ++ Http::Http1::CodecStats& stats_; + http_parser parser_; + Http::Code error_code_{Http::Code::BadRequest}; +- const HeaderKeyFormatterPtr header_key_formatter_; ++ const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; + HeaderString current_header_field_; + HeaderString current_header_value_; + bool processing_trailers_ : 1; +@@ -420,7 +424,7 @@ + */ + class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { + public: +- ServerConnectionImpl(Network::Connection& connection, CodecStats& stats, ++ ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ServerConnectionCallbacks& callbacks, const Http1Settings& settings, + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction +@@ -432,7 +436,7 @@ + * An active HTTP/1.1 request. + */ + struct ActiveRequest { +- ActiveRequest(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) ++ ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) + : response_encoder_(connection, header_key_formatter) {} + + HeaderString request_url_; +@@ -524,7 +528,7 @@ + */ + class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { + public: +- ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, ++ ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ConnectionCallbacks& callbacks, const Http1Settings& settings, + const uint32_t max_response_headers_count); + +@@ -533,8 +537,8 @@ + + private: + struct PendingResponse { +- PendingResponse(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter, +- ResponseDecoder* decoder) ++ PendingResponse(ConnectionImpl& connection, ++ Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) + : encoder_(connection, header_key_formatter), decoder_(decoder) {} + + RequestEncoderImpl encoder_; +@@ -598,6 +602,7 @@ + }; + + } // namespace Http1 ++} // namespace Legacy + } // namespace Http + } // namespace Envoy + \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http2_codec_impl_cc b/tools/code_format/codec_diffs/http2_codec_impl_cc new file mode 100644 index 000000000000..46123d9ef031 --- /dev/null +++ b/tools/code_format/codec_diffs/http2_codec_impl_cc @@ -0,0 +1,34 @@ +--- ++++ +@@ -1,4 +1,4 @@ +-#include "common/http/http2/codec_impl.h" ++#include "common/http/http2/codec_impl_legacy.h" + + #include + #include +@@ -25,6 +25,7 @@ + + namespace Envoy { + namespace Http { ++namespace Legacy { + namespace Http2 { + + class Http2ResponseCodeDetailValues { +@@ -52,6 +53,9 @@ + }; + + using Http2ResponseCodeDetails = ConstSingleton; ++using Http::Http2::CodecStats; ++using Http::Http2::MetadataDecoder; ++using Http::Http2::MetadataEncoder; + + bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies) { +@@ -1464,6 +1468,7 @@ + } + + } // namespace Http2 ++} // namespace Legacy + } // namespace Http + } // namespace Envoy + \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http2_codec_impl_h b/tools/code_format/codec_diffs/http2_codec_impl_h new file mode 100644 index 000000000000..70c306568061 --- /dev/null +++ b/tools/code_format/codec_diffs/http2_codec_impl_h @@ -0,0 +1,77 @@ +--- ++++ +@@ -30,6 +30,7 @@ + + namespace Envoy { + namespace Http { ++namespace Legacy { + namespace Http2 { + + // This is not the full client magic, but it's the smallest size that should be able to +@@ -89,7 +90,7 @@ + */ + class ConnectionImpl : public virtual Connection, protected Logger::Loggable { + public: +- ConnectionImpl(Network::Connection& connection, CodecStats& stats, ++ ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count); + +@@ -252,9 +253,9 @@ + virtual void decodeTrailers() PURE; + + // Get MetadataEncoder for this stream. +- MetadataEncoder& getMetadataEncoder(); ++ Http::Http2::MetadataEncoder& getMetadataEncoder(); + // Get MetadataDecoder for this stream. +- MetadataDecoder& getMetadataDecoder(); ++ Http::Http2::MetadataDecoder& getMetadataDecoder(); + // Callback function for MetadataDecoder. + void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); + +@@ -273,8 +274,8 @@ + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + HeaderMapPtr pending_trailers_to_encode_; +- std::unique_ptr metadata_decoder_; +- std::unique_ptr metadata_encoder_; ++ std::unique_ptr metadata_decoder_; ++ std::unique_ptr metadata_encoder_; + absl::optional deferred_reset_; + HeaderString cookies_; + bool local_end_stream_sent_ : 1; +@@ -414,7 +415,7 @@ + + std::list active_streams_; + nghttp2_session* session_{}; +- CodecStats& stats_; ++ Http::Http2::CodecStats& stats_; + Network::Connection& connection_; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; +@@ -522,7 +523,7 @@ + public: + using SessionFactory = Nghttp2SessionFactory; + ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, +- CodecStats& stats, ++ Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, + const uint32_t max_response_headers_count, +@@ -557,7 +558,7 @@ + class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { + public: + ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, +- CodecStats& stats, ++ Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, +@@ -596,6 +597,7 @@ + }; + + } // namespace Http2 ++} // namespace Legacy + } // namespace Http + } // namespace Envoy + \ No newline at end of file From 7498c33e269217beaafe348d079893f80efabd18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Augustyniak?= Date: Thu, 16 Jul 2020 17:20:08 -0700 Subject: [PATCH 660/909] Fix typo (#12139) Signed-off-by: Rafal Augustyniak --- .../http_connection_manager/v3/http_connection_manager.proto | 2 +- .../v4alpha/http_connection_manager.proto | 2 +- .../http_connection_manager/v3/http_connection_manager.proto | 2 +- .../v4alpha/http_connection_manager.proto | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index a23fcc99e07c..4788afef2434 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -613,7 +613,7 @@ message ResponseMapper { google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_foramt`. + // command operator in the `body_format`. config.core.v3.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bdf3618ba328..705f5e5fdcc6 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -618,7 +618,7 @@ message ResponseMapper { google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_foramt`. + // command operator in the `body_format`. config.core.v4alpha.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 322212670988..6d505f748222 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -618,7 +618,7 @@ message ResponseMapper { google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_foramt`. + // command operator in the `body_format`. config.core.v3.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index bdf3618ba328..705f5e5fdcc6 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -618,7 +618,7 @@ message ResponseMapper { google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_foramt`. + // command operator in the `body_format`. config.core.v4alpha.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. From 6f17ae351ce006ea121abca3e1877c320f5530ca Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 16 Jul 2020 19:35:12 -0700 Subject: [PATCH 661/909] Revert "[http] Initial codec splitting with test parametrization (#10591)" (#12142) This reverts commit 568b75960dcf0650cf600d5e1181480ee4f519e0. Signed-off-by: Lizan Zhou --- bazel/BUILD | 5 - bazel/envoy_build_system.bzl | 2 - bazel/envoy_select.bzl | 7 - ci/do_ci.sh | 5 - docs/root/version_history/current.rst | 1 - source/common/http/BUILD | 7 - source/common/http/codec_client.cc | 32 +- source/common/http/conn_manager_utility.cc | 26 +- source/common/http/http1/BUILD | 60 +- source/common/http/http1/codec_impl_legacy.cc | 1246 ---------- source/common/http/http1/codec_impl_legacy.h | 607 ----- source/common/http/http2/BUILD | 74 +- source/common/http/http2/codec_impl.cc | 10 +- source/common/http/http2/codec_impl.h | 11 +- source/common/http/http2/codec_impl_legacy.cc | 1473 ----------- source/common/http/http2/codec_impl_legacy.h | 602 ----- source/common/runtime/runtime_features.cc | 1 - .../network/http_connection_manager/BUILD | 2 - .../network/http_connection_manager/config.cc | 37 +- test/common/http/codec_impl_fuzz_test.cc | 24 +- test/common/http/http1/BUILD | 1 - test/common/http/http1/codec_impl_test.cc | 360 ++- test/common/http/http2/BUILD | 62 +- .../http/http2/codec_impl_legacy_test.cc | 2163 ----------------- test/common/http/http2/codec_impl_test.cc | 152 +- test/common/http/http2/codec_impl_test_util.h | 109 +- test/common/http/http2/frame_replay_test.cc | 18 +- .../http/http2/request_header_fuzz_test.cc | 2 +- .../http/http2/response_header_fuzz_test.cc | 2 +- test/common/stats/stat_test_utility.h | 5 - test/config/utility.cc | 4 - test/config/utility.h | 3 - .../http_connection_manager/config_test.cc | 60 - test/integration/BUILD | 7 - .../api_version_integration_test.cc | 8 +- test/integration/fake_upstream.cc | 38 +- test/integration/integration.cc | 5 - test/integration/integration.h | 1 - tools/code_format/check_format.py | 51 - .../codec_diffs/http1_codec_impl_cc | 35 - .../codec_diffs/http1_codec_impl_h | 130 - .../codec_diffs/http2_codec_impl_cc | 34 - .../codec_diffs/http2_codec_impl_h | 77 - 43 files changed, 358 insertions(+), 7201 deletions(-) delete mode 100644 source/common/http/http1/codec_impl_legacy.cc delete mode 100644 source/common/http/http1/codec_impl_legacy.h delete mode 100644 source/common/http/http2/codec_impl_legacy.cc delete mode 100644 source/common/http/http2/codec_impl_legacy.h delete mode 100644 test/common/http/http2/codec_impl_legacy_test.cc delete mode 100644 tools/code_format/codec_diffs/http1_codec_impl_cc delete mode 100644 tools/code_format/codec_diffs/http1_codec_impl_h delete mode 100644 tools/code_format/codec_diffs/http2_codec_impl_cc delete mode 100644 tools/code_format/codec_diffs/http2_codec_impl_h diff --git a/bazel/BUILD b/bazel/BUILD index 97d9d79fb6be..982d3fa3ac70 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -199,11 +199,6 @@ config_setting( values = {"define": "path_normalization_by_default=true"}, ) -config_setting( - name = "enable_legacy_codecs_in_integration_tests", - values = {"define": "use_legacy_codecs_in_integration_tests=true"}, -) - cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 0f062cbfe8d8..70ef3df4f1d2 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -18,7 +18,6 @@ load( _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", - _envoy_select_legacy_codecs_in_integration_tests = "envoy_select_legacy_codecs_in_integration_tests", ) load( ":envoy_test.bzl", @@ -169,7 +168,6 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart -envoy_select_legacy_codecs_in_integration_tests = _envoy_select_legacy_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index ba7704ceb02f..f2167f29bec4 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -31,10 +31,3 @@ def envoy_select_hot_restart(xs, repository = ""): repository + "//bazel:disable_hot_restart_or_apple": [], "//conditions:default": xs, }) - -# Select the given values if use legacy codecs in test is on in the current build. -def envoy_select_legacy_codecs_in_integration_tests(xs, repository = ""): - return select({ - repository + "//bazel:enable_legacy_codecs_in_integration_tests": xs, - "//conditions:default": [], - }) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index a8063dd4d7bc..d13c7be545bd 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -220,7 +220,6 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ - --define use_legacy_codecs_in_integration_tests=true \ --define --cxxopt=-std=c++14 \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" @@ -238,10 +237,6 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then echo "Building and testing ${TEST_TARGETS}" bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only - # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in - # integration tests with asan. - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only - # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 42346d258da0..eb3465f99eeb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -36,7 +36,6 @@ New Features * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. -* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 041c3508d650..5a07a56b9f00 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -57,21 +57,16 @@ envoy_cc_library( "//include/envoy/http:codec_interface", "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", - "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", - "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", "//source/common/network:filter_lib", - "//source/common/runtime:runtime_features_lib", - "//source/common/runtime:runtime_lib", ], ) @@ -212,9 +207,7 @@ envoy_cc_library( "//source/common/common:scope_tracker", "//source/common/common:utility_lib", "//source/common/config:utility_lib", - "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index e3fbc23ef921..557b5757414a 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -9,15 +9,11 @@ #include "common/config/utility.h" #include "common/http/exception.h" #include "common/http/http1/codec_impl.h" -#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/status.h" #include "common/http/utility.h" -#include "common/runtime/runtime_features.h" -#include "common/runtime/runtime_impl.h" namespace Envoy { namespace Http { @@ -154,29 +150,16 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne switch (type) { case Type::HTTP1: { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - codec_ = std::make_unique( - *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), - host->cluster().maxResponseHeadersCount()); - } else { - codec_ = std::make_unique( - *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), - host->cluster().maxResponseHeadersCount()); - } + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); break; } case Type::HTTP2: { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); - } else { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); - } + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); break; } case Type::HTTP3: { @@ -184,7 +167,6 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Config::Utility::getAndCheckFactoryByName( Http::QuicCodecNames::get().Quiche) .createQuicClientConnection(*connection_, *this)); - break; } } } diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index c8ce01993cfa..65b5af861cc1 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -11,9 +11,7 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" -#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/codec_impl_legacy.h" #include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" @@ -53,26 +51,14 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) { Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); - } else { - return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); - } + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } else { Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope); - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - return std::make_unique( - connection, stats, callbacks, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); - } else { - return std::make_unique( - connection, stats, callbacks, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); - } + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); } } diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 9451c4e29ae3..278e9adaaae5 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -24,46 +24,36 @@ envoy_cc_library( ], ) -CODEC_LIB_DEPS = [ - ":codec_stats_lib", - ":header_formatter_lib", - "//include/envoy/buffer:buffer_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:statusor_lib", - "//source/common/common:utility_lib", - "//source/common/grpc:common_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:url_utility_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", -] - envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], external_deps = ["http_parser"], - deps = CODEC_LIB_DEPS, -) - -envoy_cc_library( - name = "codec_legacy_lib", - srcs = ["codec_impl_legacy.cc"], - hdrs = ["codec_impl_legacy.h"], - external_deps = ["http_parser"], - deps = CODEC_LIB_DEPS, + deps = [ + ":codec_stats_lib", + ":header_formatter_lib", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:statusor_lib", + "//source/common/common:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:url_utility_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], ) envoy_cc_library( diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc deleted file mode 100644 index d41d0a41401e..000000000000 --- a/source/common/http/http1/codec_impl_legacy.cc +++ /dev/null @@ -1,1246 +0,0 @@ -#include "common/http/http1/codec_impl_legacy.h" - -#include -#include -#include - -#include "envoy/buffer/buffer.h" -#include "envoy/http/codec.h" -#include "envoy/http/header_map.h" -#include "envoy/network/connection.h" - -#include "common/common/enum_to_int.h" -#include "common/common/utility.h" -#include "common/grpc/common.h" -#include "common/http/exception.h" -#include "common/http/header_utility.h" -#include "common/http/headers.h" -#include "common/http/http1/header_formatter.h" -#include "common/http/url_utility.h" -#include "common/http/utility.h" -#include "common/runtime/runtime_features.h" - -#include "absl/container/fixed_array.h" -#include "absl/strings/ascii.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { -namespace { - -struct Http1ResponseCodeDetailValues { - const absl::string_view TooManyHeaders = "http1.too_many_headers"; - const absl::string_view HeadersTooLarge = "http1.headers_too_large"; - const absl::string_view HttpCodecError = "http1.codec_error"; - const absl::string_view InvalidCharacters = "http1.invalid_characters"; - const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; - const absl::string_view InvalidUrl = "http1.invalid_url"; - const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; - const absl::string_view BodyDisallowed = "http1.body_disallowed"; - const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; - const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; -}; - -struct Http1HeaderTypesValues { - const absl::string_view Headers = "headers"; - const absl::string_view Trailers = "trailers"; -}; - -using Http1ResponseCodeDetails = ConstSingleton; -using Http1HeaderTypes = ConstSingleton; -using Http::Http1::CodecStats; -using Http::Http1::HeaderKeyFormatter; -using Http::Http1::HeaderKeyFormatterPtr; -using Http::Http1::ProperCaseHeaderKeyFormatter; - -const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { - CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, - Http::Headers::get().ConnectionValues.Upgrade, - Http::Headers::get().ConnectionValues.Http2Settings); -} - -HeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) { - if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) { - return std::make_unique(); - } - - return nullptr; -} - -} // namespace - -const std::string StreamEncoderImpl::CRLF = "\r\n"; -// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1 -const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; - -StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, - HeaderKeyFormatter* header_key_formatter) - : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), - is_response_to_head_request_(false), is_response_to_connect_request_(false), - header_key_formatter_(header_key_formatter) { - if (connection_.connection().aboveHighWatermark()) { - runHighWatermarkCallbacks(); - } -} - -void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value, - uint32_t value_size) { - - ASSERT(key_size > 0); - - connection_.copyToBuffer(key, key_size); - connection_.addCharToBuffer(':'); - connection_.addCharToBuffer(' '); - connection_.copyToBuffer(value, value_size); - connection_.addToBuffer(CRLF); -} -void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { - this->encodeHeader(key.data(), key.size(), value.data(), value.size()); -} - -void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) { - if (header_key_formatter_ != nullptr) { - encodeHeader(header_key_formatter_->format(key), value); - } else { - encodeHeader(key, value); - } -} - -void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { - ASSERT(headers.Status()->value() == "100"); - encodeHeaders(headers, false); -} - -void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, - absl::optional status, bool end_stream) { - bool saw_content_length = false; - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - absl::string_view key_to_use = header.key().getStringView(); - uint32_t key_size_to_use = header.key().size(); - // Translate :authority -> host so that upper layers do not need to deal with this. - if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { - key_to_use = absl::string_view(Headers::get().HostLegacy.get()); - key_size_to_use = Headers::get().HostLegacy.get().size(); - } - - // Skip all headers starting with ':' that make it here. - if (key_to_use[0] == ':') { - return HeaderMap::Iterate::Continue; - } - - static_cast(context)->encodeFormattedHeader( - key_to_use, header.value().getStringView()); - - return HeaderMap::Iterate::Continue; - }, - this); - - if (headers.ContentLength()) { - saw_content_length = true; - } - - ASSERT(!headers.TransferEncoding()); - - // Assume we are chunk encoding unless we are passed a content length or this is a header only - // response. Upper layers generally should strip transfer-encoding since it only applies to - // HTTP/1.1. The codec will infer it based on the type of response. - // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want - // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is - // consulted before enabling chunk encoding. - // - // Note that for HEAD requests Envoy does best-effort guessing when there is no - // content-length. If a client makes a HEAD request for an upstream resource - // with no bytes but the upstream response doesn't include "Content-length: 0", - // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded. - if (saw_content_length || disable_chunk_encoding_) { - chunk_encoding_ = false; - } else { - if (status && *status == 100) { - // Make sure we don't serialize chunk information with 100-Continue headers. - chunk_encoding_ = false; - } else if (end_stream && !is_response_to_head_request_) { - // If this is a headers-only stream, append an explicit "Content-Length: 0" unless it's a - // response to a HEAD request. - // For 204s and 1xx where content length is disallowed, don't append the content length but - // also don't chunk encode. - if (!status || (*status >= 200 && *status != 204)) { - encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); - } - chunk_encoding_ = false; - } else if (connection_.protocol() == Protocol::Http10) { - chunk_encoding_ = false; - } else if (status && (*status < 200 || *status == 204) && - connection_.strict1xxAnd204Headers()) { - // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" - // feature flag is removed, this block can be coalesced with the 100 Continue logic above. - - // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked - // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 - chunk_encoding_ = false; - } else { - // For responses to connect requests, do not send the chunked encoding header: - // https://tools.ietf.org/html/rfc7231#section-4.3.6. - if (!is_response_to_connect_request_) { - encodeFormattedHeader(Headers::get().TransferEncoding.get(), - Headers::get().TransferEncodingValues.Chunked); - } - // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. - // If there is a body in a response on the upgrade path, the chunks will be - // passed through via maybeDirectDispatch so we need to avoid appending - // extra chunk boundaries. - // - // When sending a response to a HEAD request Envoy may send an informational - // "Transfer-Encoding: chunked" header, but should not send a chunk encoded body. - chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ && - !is_response_to_connect_request_; - } - } - - connection_.addToBuffer(CRLF); - - if (end_stream) { - endEncode(); - } else { - connection_.flushOutput(); - } -} - -void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { - // end_stream may be indicated with a zero length data buffer. If that is the case, so not - // actually write the zero length buffer out. - if (data.length() > 0) { - if (chunk_encoding_) { - connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF)); - } - - connection_.buffer().move(data); - - if (chunk_encoding_) { - connection_.buffer().add(CRLF); - } - } - - if (end_stream) { - endEncode(); - } else { - connection_.flushOutput(); - } -} - -void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { - if (!connection_.enableTrailers()) { - return endEncode(); - } - // Trailers only matter if it is a chunk transfer encoding - // https://tools.ietf.org/html/rfc7230#section-4.4 - if (chunk_encoding_) { - // Finalize the body - connection_.buffer().add(LAST_CHUNK); - - trailers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - static_cast(context)->encodeFormattedHeader( - header.key().getStringView(), header.value().getStringView()); - return HeaderMap::Iterate::Continue; - }, - this); - - connection_.flushOutput(); - connection_.buffer().add(CRLF); - } - - connection_.flushOutput(); - connection_.onEncodeComplete(); -} - -void StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) { - connection_.stats().metadata_not_supported_error_.inc(); -} - -void StreamEncoderImpl::endEncode() { - if (chunk_encoding_) { - connection_.buffer().add(LAST_CHUNK); - connection_.buffer().add(CRLF); - } - - connection_.flushOutput(true); - connection_.onEncodeComplete(); -} - -void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { - if (!flood_protection_) { - return; - } - // It's messy and complicated to try to tag the final write of an HTTP response for response - // tracking for flood protection. Instead, write an empty buffer fragment after the response, - // to allow for tracking. - // When the response is written out, the fragment will be deleted and the counter will be updated - // by ServerConnectionImpl::releaseOutboundResponse() - auto fragment = - Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), response_buffer_releasor_); - output_buffer.addBufferFragment(*fragment.release()); - ASSERT(outbound_responses_ < max_outbound_responses_); - outbound_responses_++; -} - -void ServerConnectionImpl::doFloodProtectionChecks() const { - if (!flood_protection_) { - return; - } - // Before processing another request, make sure that we are below the response flood protection - // threshold. - if (outbound_responses_ >= max_outbound_responses_) { - ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", - connection_); - stats_.response_flood_.inc(); - throw FrameFloodException("Too many responses queued."); - } -} - -void ConnectionImpl::flushOutput(bool end_encode) { - if (end_encode) { - // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood - // protection - maybeAddSentinelBufferFragment(output_buffer_); - } - connection().write(output_buffer_, false); - ASSERT(0UL == output_buffer_.length()); -} - -void ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); } - -void ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); } - -void ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); } - -void ConnectionImpl::copyToBuffer(const char* data, uint64_t length) { - output_buffer_.add(data, length); -} - -void StreamEncoderImpl::resetStream(StreamResetReason reason) { - connection_.onResetStreamBase(reason); -} - -void StreamEncoderImpl::readDisable(bool disable) { - if (disable) { - ++read_disable_calls_; - } else { - ASSERT(read_disable_calls_ != 0); - if (read_disable_calls_ != 0) { - --read_disable_calls_; - } - } - connection_.readDisable(disable); -} - -uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } - -const Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() { - return connection_.connection().localAddress(); -} - -static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; -static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; - -void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) { - started_response_ = true; - - // The contract is that client codecs must ensure that :status is present. - ASSERT(headers.Status() != nullptr); - uint64_t numeric_status = Utility::getResponseStatus(headers); - - if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { - connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); - } else { - connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); - } - connection_.addIntToBuffer(numeric_status); - connection_.addCharToBuffer(' '); - - const char* status_string = CodeUtility::toString(static_cast(numeric_status)); - uint32_t status_string_len = strlen(status_string); - connection_.copyToBuffer(status_string, status_string_len); - - connection_.addCharToBuffer('\r'); - connection_.addCharToBuffer('\n'); - - if (numeric_status >= 300) { - // Don't do special CONNECT logic if the CONNECT was rejected. - is_response_to_connect_request_ = false; - } - - encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); -} - -static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; - -void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) { - const HeaderEntry* method = headers.Method(); - const HeaderEntry* path = headers.Path(); - const HeaderEntry* host = headers.Host(); - bool is_connect = HeaderUtility::isConnect(headers); - - if (!method || (!path && !is_connect)) { - // TODO(#10878): This exception does not occur during dispatch and would not be triggered under - // normal circumstances since inputs would fail parsing at ingress. Replace with proper error - // handling when exceptions are removed. Include missing host header for CONNECT. - throw CodecClientException(":method and :path must be specified"); - } - if (method->value() == Headers::get().MethodValues.Head) { - head_request_ = true; - } else if (method->value() == Headers::get().MethodValues.Connect) { - disableChunkEncoding(); - connect_request_ = true; - } - if (Utility::isUpgrade(headers)) { - upgrade_request_ = true; - } - - connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); - connection_.addCharToBuffer(' '); - if (is_connect) { - connection_.copyToBuffer(host->value().getStringView().data(), host->value().size()); - } else { - connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); - } - connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); - - encodeHeadersBase(headers, absl::nullopt, end_stream); -} - -http_parser_settings ConnectionImpl::settings_{ - [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageBeginBase(); - return 0; - }, - [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onUrl(at, length); - return 0; - }, - nullptr, // on_status - [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderField(at, length); - return 0; - }, - [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderValue(at, length); - return 0; - }, - [](http_parser* parser) -> int { - return static_cast(parser->data)->onHeadersCompleteBase(); - }, - [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->bufferBody(at, length); - return 0; - }, - [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageCompleteBase(); - return 0; - }, - [](http_parser* parser) -> int { - // A 0-byte chunk header is used to signal the end of the chunked body. - // When this function is called, http-parser holds the size of the chunk in - // parser->content_length. See - // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336 - const bool is_final_chunk = (parser->content_length == 0); - static_cast(parser->data)->onChunkHeader(is_final_chunk); - return 0; - }, - nullptr // on_chunk_complete -}; - -ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, - http_parser_type type, uint32_t max_headers_kb, - const uint32_t max_headers_count, - HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) - : connection_(connection), stats_(stats), - header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), - handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), - connection_header_sanitization_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.connection_header_sanitization")), - enable_trailers_(enable_trailers), - strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.strict_1xx_and_204_response_headers")), - output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }, - []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), - max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { - output_buffer_.setWatermarks(connection.bufferLimit()); - http_parser_init(&parser_, type); - parser_.data = this; -} - -void ConnectionImpl::completeLastHeader() { - ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, - current_header_field_.getStringView(), current_header_value_.getStringView()); - - checkHeaderNameForUnderscores(); - auto& headers_or_trailers = headersOrTrailers(); - if (!current_header_field_.empty()) { - current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); - // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed - // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace - // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 - current_header_value_.rtrim(); - headers_or_trailers.addViaMove(std::move(current_header_field_), - std::move(current_header_value_)); - } - - // Check if the number of headers exceeds the limit. - if (headers_or_trailers.size() > max_headers_count_) { - error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); - const absl::string_view header_type = - processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); - } - - header_parsing_state_ = HeaderParsingState::Field; - ASSERT(current_header_field_.empty()); - ASSERT(current_header_value_.empty()); -} - -uint32_t ConnectionImpl::getHeadersSize() { - return current_header_field_.size() + current_header_value_.size() + - headersOrTrailers().byteSize(); -} - -void ConnectionImpl::checkMaxHeadersSize() { - const uint32_t total = getHeadersSize(); - if (total > (max_headers_kb_ * 1024)) { - const absl::string_view header_type = - processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); - } -} - -bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { - if (!handling_upgrade_) { - // Only direct dispatch for Upgrade requests. - return false; - } - - ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length()); - onBody(data); - data.drain(data.length()); - return true; -} - -Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { - // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either - // throw an exception or return an error status. The utility wrapper catches exceptions and - // converts them to error statuses. - return Utility::exceptionToStatus( - [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); -} - -Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { - ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); - ASSERT(buffered_body_.length() == 0); - - if (maybeDirectDispatch(data)) { - return Http::okStatus(); - } - - // Always unpause before dispatch. - http_parser_pause(&parser_, 0); - - ssize_t total_parsed = 0; - if (data.length() > 0) { - for (const Buffer::RawSlice& slice : data.getRawSlices()) { - total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); - if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { - // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at - // this point. - ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); - break; - } - } - dispatchBufferedBody(); - } else { - dispatchSlice(nullptr, 0); - } - ASSERT(buffered_body_.length() == 0); - - ENVOY_CONN_LOG(trace, "parsed {} bytes", connection_, total_parsed); - data.drain(total_parsed); - - // If an upgrade has been handled and there is body data or early upgrade - // payload to send on, send it on. - maybeDirectDispatch(data); - return Http::okStatus(); -} - -size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { - ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); - if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { - sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); - throw CodecProtocolException("http/1.1 protocol error: " + - std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); - } - - return rc; -} - -void ConnectionImpl::onHeaderField(const char* data, size_t length) { - // We previously already finished up the headers, these headers are - // now trailers. - if (header_parsing_state_ == HeaderParsingState::Done) { - if (!enable_trailers_) { - // Ignore trailers. - return; - } - processing_trailers_ = true; - header_parsing_state_ = HeaderParsingState::Field; - allocTrailers(); - } - if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); - } - - current_header_field_.append(data, length); - - checkMaxHeadersSize(); -} - -void ConnectionImpl::onHeaderValue(const char* data, size_t length) { - if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { - // Ignore trailers. - return; - } - - absl::string_view header_value{data, length}; - if (!Http::HeaderUtility::headerValueIsValid(header_value)) { - ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); - } - - header_parsing_state_ = HeaderParsingState::Value; - if (current_header_value_.empty()) { - // Strip leading whitespace if the current header value input contains the first bytes of the - // encoded header value. Trailing whitespace is stripped once the full header value is known in - // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace - // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . - header_value = StringUtil::ltrim(header_value); - } - current_header_value_.append(header_value.data(), header_value.length()); - - checkMaxHeadersSize(); -} - -int ConnectionImpl::onHeadersCompleteBase() { - ASSERT(!processing_trailers_); - ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); - completeLastHeader(); - - if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { - // This is not necessarily true, but it's good enough since higher layers only care if this is - // HTTP/1.1 or not. - protocol_ = Protocol::Http10; - } - RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders(); - if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { - // Ignore h2c upgrade requests until we support them. - // See https://github.com/envoyproxy/envoy/issues/7161 for details. - if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), - Http::Headers::get().UpgradeValues.H2c)) { - ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); - request_or_response_headers.removeUpgrade(); - if (request_or_response_headers.Connection()) { - const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); - std::string new_value = StringUtil::removeTokens( - request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); - if (new_value.empty()) { - request_or_response_headers.removeConnection(); - } else { - request_or_response_headers.setConnection(new_value); - } - } - request_or_response_headers.remove(Headers::get().Http2Settings); - } else { - ENVOY_CONN_LOG(trace, "codec entering upgrade mode.", connection_); - handling_upgrade_ = true; - } - } - if (parser_.method == HTTP_CONNECT) { - if (request_or_response_headers.ContentLength()) { - if (request_or_response_headers.getContentLengthValue() == "0") { - request_or_response_headers.removeContentLength(); - } else { - // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a - // CONNECT request has no defined semantics, and may be rejected. - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); - throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); - } - } - ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); - handling_upgrade_ = true; - } - - // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject - // transfer-codings it does not understand. - // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a - // CONNECT request has no defined semantics, and may be rejected. - if (request_or_response_headers.TransferEncoding()) { - const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); - if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || - parser_.method == HTTP_CONNECT) { - error_code_ = Http::Code::NotImplemented; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); - } - } - - int rc = onHeadersComplete(); - header_parsing_state_ = HeaderParsingState::Done; - - // Returning 2 informs http_parser to not expect a body or further data on this connection. - return handling_upgrade_ ? 2 : rc; -} - -void ConnectionImpl::bufferBody(const char* data, size_t length) { - buffered_body_.add(data, length); -} - -void ConnectionImpl::dispatchBufferedBody() { - ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); - if (buffered_body_.length() > 0) { - onBody(buffered_body_); - buffered_body_.drain(buffered_body_.length()); - } -} - -void ConnectionImpl::onChunkHeader(bool is_final_chunk) { - if (is_final_chunk) { - // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found - // while processing trailers. - dispatchBufferedBody(); - } -} - -void ConnectionImpl::onMessageCompleteBase() { - ENVOY_CONN_LOG(trace, "message complete", connection_); - - dispatchBufferedBody(); - - if (handling_upgrade_) { - // If this is an upgrade request, swallow the onMessageComplete. The - // upgrade payload will be treated as stream body. - ASSERT(!deferred_end_stream_headers_); - ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); - http_parser_pause(&parser_, 1); - return; - } - - // If true, this indicates we were processing trailers and must - // move the last header into current_header_map_ - if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); - } - - onMessageComplete(); -} - -void ConnectionImpl::onMessageBeginBase() { - ENVOY_CONN_LOG(trace, "message begin", connection_); - // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets - // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable - // in onHeadersCompleteBase - protocol_ = Protocol::Http11; - processing_trailers_ = false; - header_parsing_state_ = HeaderParsingState::Field; - allocHeaders(); - onMessageBegin(); -} - -void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { - ASSERT(!reset_stream_called_); - reset_stream_called_ = true; - onResetStream(reason); -} - -ServerConnectionImpl::ServerConnectionImpl( - Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, - const Http1Settings& settings, uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action) - : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, - max_request_headers_count, formatter(settings), settings.enable_trailers_), - callbacks_(callbacks), codec_settings_(settings), - response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { - releaseOutboundResponse(fragment); - }), - // Pipelining is generally not well supported on the internet and has a series of dangerous - // overflow bugs. As such we are disabling it for now, and removing this temporary override if - // no one objects. If you use this integer to restore prior behavior, contact the - // maintainer team as it will otherwise be removed entirely soon. - max_outbound_responses_( - Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), - flood_protection_( - Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), - headers_with_underscores_action_(headers_with_underscores_action) {} - -uint32_t ServerConnectionImpl::getHeadersSize() { - // Add in the the size of the request URL if processing request headers. - const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) - ? active_request_.value().request_url_.size() - : 0; - return url_size + ConnectionImpl::getHeadersSize(); -} - -void ServerConnectionImpl::onEncodeComplete() { - if (active_request_.value().remote_complete_) { - // Only do this if remote is complete. If we are replying before the request is complete the - // only logical thing to do is for higher level code to reset() / close the connection so we - // leave the request around so that it can fire reset callbacks. - active_request_.reset(); - } -} - -void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { - HeaderString path(Headers::get().Path); - - bool is_connect = (method == HTTP_CONNECT); - - // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. - auto& active_request = active_request_.value(); - if (!is_connect && !active_request.request_url_.getStringView().empty() && - (active_request.request_url_.getStringView()[0] == '/' || - ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { - headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; - } - - // If absolute_urls and/or connect are not going be handled, copy the url and return. - // This forces the behavior to be backwards compatible with the old codec behavior. - // CONNECT "urls" are actually host:port so look like absolute URLs to the above checks. - // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. - if (!codec_settings_.allow_absolute_url_ && !is_connect) { - headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; - } - - Utility::Url absolute_url; - if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { - sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); - throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); - } - // RFC7230#5.7 - // When a proxy receives a request with an absolute-form of - // request-target, the proxy MUST ignore the received Host header field - // (if any) and instead replace it with the host information of the - // request-target. A proxy that forwards such a request MUST generate a - // new Host field-value based on the received request-target rather than - // forward the received Host field-value. - headers.setHost(absolute_url.hostAndPort()); - - if (!absolute_url.pathAndQueryParams().empty()) { - headers.setPath(absolute_url.pathAndQueryParams()); - } - active_request.request_url_.clear(); -} - -int ServerConnectionImpl::onHeadersComplete() { - // Handle the case where response happens prior to request complete. It's up to upper layer code - // to disconnect the connection but we shouldn't fire any more events since it doesn't make - // sense. - if (active_request_.has_value()) { - auto& active_request = active_request_.value(); - auto& headers = absl::get(headers_or_trailers_); - ENVOY_CONN_LOG(trace, "Server: onHeadersComplete size={}", connection_, headers->size()); - const char* method_string = http_method_str(static_cast(parser_.method)); - - if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { - // If we fail to sanitize the request, return a 400 to the client - if (!Utility::sanitizeConnectionHeader(*headers)) { - absl::string_view header_value = headers->getConnectionValue(); - ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, - header_value); - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); - throw CodecProtocolException("Invalid nominated headers in Connection."); - } - } - - // Inform the response encoder about any HEAD method, so it can set content - // length and transfer encoding headers correctly. - active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); - active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); - - handlePath(*headers, parser_.method); - ASSERT(active_request.request_url_.empty()); - - headers->setMethod(method_string); - - // Make sure the host is valid. - auto details = HeaderUtility::requestHeadersValid(*headers); - if (details.has_value()) { - sendProtocolError(details.value().get()); - throw CodecProtocolException( - "http/1.1 protocol error: request headers failed spec compliance checks"); - } - - // Determine here whether we have a body or not. This uses the new RFC semantics where the - // presence of content-length or chunked transfer-encoding indicates a body vs. a particular - // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed - // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy - // scenario where the higher layers stream through and implicitly switch to chunked transfer - // encoding because end stream with zero body length has not yet been indicated. - if (parser_.flags & F_CHUNKED || - (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) { - active_request.request_decoder_->decodeHeaders(std::move(headers), false); - - // If the connection has been closed (or is closing) after decoding headers, pause the parser - // so we return control to the caller. - if (connection_.state() != Network::Connection::State::Open) { - http_parser_pause(&parser_, 1); - } - } else { - deferred_end_stream_headers_ = true; - } - } - - return 0; -} - -void ServerConnectionImpl::onMessageBegin() { - if (!resetStreamCalled()) { - ASSERT(!active_request_.has_value()); - active_request_.emplace(*this, header_key_formatter_.get()); - auto& active_request = active_request_.value(); - active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); - - // Check for pipelined request flood as we prepare to accept a new request. - // Parse errors that happen prior to onMessageBegin result in stream termination, it is not - // possible to overflow output buffers with early parse errors. - doFloodProtectionChecks(); - } -} - -void ServerConnectionImpl::onUrl(const char* data, size_t length) { - if (active_request_.has_value()) { - active_request_.value().request_url_.append(data, length); - - checkMaxHeadersSize(); - } -} - -void ServerConnectionImpl::onBody(Buffer::Instance& data) { - ASSERT(!deferred_end_stream_headers_); - if (active_request_.has_value()) { - ENVOY_CONN_LOG(trace, "body size={}", connection_, data.length()); - active_request_.value().request_decoder_->decodeData(data, false); - } -} - -void ServerConnectionImpl::onMessageComplete() { - ASSERT(!handling_upgrade_); - if (active_request_.has_value()) { - auto& active_request = active_request_.value(); - - if (active_request.request_decoder_) { - active_request.response_encoder_.readDisable(true); - } - active_request.remote_complete_ = true; - if (deferred_end_stream_headers_) { - active_request.request_decoder_->decodeHeaders( - std::move(absl::get(headers_or_trailers_)), true); - deferred_end_stream_headers_ = false; - } else if (processing_trailers_) { - active_request.request_decoder_->decodeTrailers( - std::move(absl::get(headers_or_trailers_))); - } else { - Buffer::OwnedImpl buffer; - active_request.request_decoder_->decodeData(buffer, true); - } - - // Reset to ensure no information from one requests persists to the next. - headers_or_trailers_.emplace(nullptr); - } - - // Always pause the parser so that the calling code can process 1 request at a time and apply - // back pressure. However this means that the calling code needs to detect if there is more data - // in the buffer and dispatch it again. - http_parser_pause(&parser_, 1); -} - -void ServerConnectionImpl::onResetStream(StreamResetReason reason) { - active_request_.value().response_encoder_.runResetCallbacks(reason); - active_request_.reset(); -} - -void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { - if (active_request_.has_value()) { - active_request_.value().response_encoder_.setDetails(details); - } - // We do this here because we may get a protocol error before we have a logical stream. Higher - // layers can only operate on streams, so there is no coherent way to allow them to send an error - // "out of band." On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1 - // to look more like HTTP/2 to higher layers. - if (!active_request_.has_value() || - !active_request_.value().response_encoder_.startedResponse()) { - Buffer::OwnedImpl bad_request_response( - absl::StrCat("HTTP/1.1 ", error_code_, " ", CodeUtility::toString(error_code_), - "\r\ncontent-length: 0\r\nconnection: close\r\n\r\n")); - - connection_.write(bad_request_response, false); - } -} - -void ServerConnectionImpl::sendProtocolError(absl::string_view details) { - if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { - sendProtocolErrorOld(details); - return; - } - // We do this here because we may get a protocol error before we have a logical stream. - if (!active_request_.has_value()) { - onMessageBeginBase(); - } - ASSERT(active_request_.has_value()); - - active_request_.value().response_encoder_.setDetails(details); - if (!active_request_.value().response_encoder_.startedResponse()) { - // Note that the correctness of is_grpc_request and is_head_request is best-effort. - // If headers have not been fully parsed they may not be inferred correctly. - bool is_grpc_request = false; - if (absl::holds_alternative(headers_or_trailers_) && - absl::get(headers_or_trailers_) != nullptr) { - is_grpc_request = - Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); - } - const bool is_head_request = parser_.method == HTTP_HEAD; - active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, - CodeUtility::toString(error_code_), nullptr, - is_head_request, absl::nullopt, details); - return; - } -} - -void ServerConnectionImpl::onAboveHighWatermark() { - if (active_request_.has_value()) { - active_request_.value().response_encoder_.runHighWatermarkCallbacks(); - } -} -void ServerConnectionImpl::onBelowLowWatermark() { - if (active_request_.has_value()) { - active_request_.value().response_encoder_.runLowWatermarkCallbacks(); - } -} - -void ServerConnectionImpl::releaseOutboundResponse( - const Buffer::OwnedBufferFragmentImpl* fragment) { - ASSERT(outbound_responses_ >= 1); - --outbound_responses_; - delete fragment; -} - -void ServerConnectionImpl::checkHeaderNameForUnderscores() { - if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && - Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { - if (headers_with_underscores_action_ == - envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { - ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, - current_header_field_.getStringView()); - stats_.dropped_headers_with_underscores_.inc(); - current_header_field_.clear(); - current_header_value_.clear(); - } else { - ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", - connection_, current_header_field_.getStringView()); - error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - stats_.requests_rejected_with_underscores_in_headers_.inc(); - throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); - } - } -} - -ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, - ConnectionCallbacks&, const Http1Settings& settings, - const uint32_t max_response_headers_count) - : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, - max_response_headers_count, formatter(settings), settings.enable_trailers_) {} - -bool ClientConnectionImpl::cannotHaveBody() { - if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) { - ASSERT(!pending_response_done_); - return true; - } else if (parser_.status_code == 204 || parser_.status_code == 304 || - (parser_.status_code >= 200 && parser_.content_length == 0)) { - return true; - } else { - return false; - } -} - -RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { - if (resetStreamCalled()) { - throw CodecClientException("cannot create new streams after calling reset"); - } - - // If reads were disabled due to flow control, we expect reads to always be enabled again before - // reusing this connection. This is done when the response is received. - ASSERT(connection_.readEnabled()); - - ASSERT(!pending_response_.has_value()); - ASSERT(pending_response_done_); - pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder); - pending_response_done_ = false; - return pending_response_.value().encoder_; -} - -int ClientConnectionImpl::onHeadersComplete() { - // Handle the case where the client is closing a kept alive connection (by sending a 408 - // with a 'Connection: close' header). In this case we just let response flush out followed - // by the remote close. - if (!pending_response_.has_value() && !resetStreamCalled()) { - throw PrematureResponseException(static_cast(parser_.status_code)); - } else if (pending_response_.has_value()) { - ASSERT(!pending_response_done_); - auto& headers = absl::get(headers_or_trailers_); - ENVOY_CONN_LOG(trace, "Client: onHeadersComplete size={}", connection_, headers->size()); - headers->setStatus(parser_.status_code); - - if (parser_.status_code >= 200 && parser_.status_code < 300 && - pending_response_.value().encoder_.connectRequest()) { - ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); - handling_upgrade_ = true; - - // For responses to connect requests, do not accept the chunked - // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 - if (headers->TransferEncoding() && - absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), - Headers::get().TransferEncodingValues.Chunked)) { - sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); - } - } - - if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { - if (headers->TransferEncoding()) { - sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); - throw CodecProtocolException( - "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); - } - - if (headers->ContentLength()) { - // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. - if (headers->ContentLength()->value().getStringView() != "0") { - sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); - throw CodecProtocolException( - "http/1.1 protocol error: content length not allowed in 1xx or 204"); - } - - headers->removeContentLength(); - } - } - - if (parser_.status_code == 100) { - // http-parser treats 100 continue headers as their own complete response. - // Swallow the spurious onMessageComplete and continue processing. - ignore_message_complete_for_100_continue_ = true; - pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); - - // Reset to ensure no information from the continue headers is used for the response headers - // in case the callee does not move the headers out. - headers_or_trailers_.emplace(nullptr); - } else if (cannotHaveBody() && !handling_upgrade_) { - deferred_end_stream_headers_ = true; - } else { - pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); - } - } - - // Here we deal with cases where the response cannot have a body, but http_parser does not deal - // with it for us. - return cannotHaveBody() ? 1 : 0; -} - -bool ClientConnectionImpl::upgradeAllowed() const { - if (pending_response_.has_value()) { - return pending_response_->encoder_.upgradeRequest(); - } - return false; -} - -void ClientConnectionImpl::onBody(Buffer::Instance& data) { - ASSERT(!deferred_end_stream_headers_); - if (pending_response_.has_value()) { - ASSERT(!pending_response_done_); - pending_response_.value().decoder_->decodeData(data, false); - } -} - -void ClientConnectionImpl::onMessageComplete() { - ENVOY_CONN_LOG(trace, "message complete", connection_); - if (ignore_message_complete_for_100_continue_) { - ignore_message_complete_for_100_continue_ = false; - return; - } - if (pending_response_.has_value()) { - ASSERT(!pending_response_done_); - // After calling decodeData() with end stream set to true, we should no longer be able to reset. - PendingResponse& response = pending_response_.value(); - // Encoder is used as part of decode* calls later in this function so pending_response_ can not - // be reset just yet. Preserve the state in pending_response_done_ instead. - pending_response_done_ = true; - - if (deferred_end_stream_headers_) { - response.decoder_->decodeHeaders( - std::move(absl::get(headers_or_trailers_)), true); - deferred_end_stream_headers_ = false; - } else if (processing_trailers_) { - response.decoder_->decodeTrailers( - std::move(absl::get(headers_or_trailers_))); - } else { - Buffer::OwnedImpl buffer; - response.decoder_->decodeData(buffer, true); - } - - // Reset to ensure no information from one requests persists to the next. - pending_response_.reset(); - headers_or_trailers_.emplace(nullptr); - } -} - -void ClientConnectionImpl::onResetStream(StreamResetReason reason) { - // Only raise reset if we did not already dispatch a complete response. - if (pending_response_.has_value() && !pending_response_done_) { - pending_response_.value().encoder_.runResetCallbacks(reason); - pending_response_done_ = true; - pending_response_.reset(); - } -} - -void ClientConnectionImpl::sendProtocolError(absl::string_view details) { - if (pending_response_.has_value()) { - ASSERT(!pending_response_done_); - pending_response_.value().encoder_.setDetails(details); - } -} - -void ClientConnectionImpl::onAboveHighWatermark() { - // This should never happen without an active stream/request. - pending_response_.value().encoder_.runHighWatermarkCallbacks(); -} - -void ClientConnectionImpl::onBelowLowWatermark() { - // This can get called without an active stream/request when the response completion causes us to - // close the connection, but in doing so go below low watermark. - if (pending_response_.has_value() && !pending_response_done_) { - pending_response_.value().encoder_.runLowWatermarkCallbacks(); - } -} - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h deleted file mode 100644 index f5e9811ede87..000000000000 --- a/source/common/http/http1/codec_impl_legacy.h +++ /dev/null @@ -1,607 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include - -#include "envoy/config/core/v3/protocol.pb.h" -#include "envoy/http/codec.h" -#include "envoy/network/connection.h" - -#include "common/buffer/watermark_buffer.h" -#include "common/common/assert.h" -#include "common/common/statusor.h" -#include "common/http/codec_helper.h" -#include "common/http/codes.h" -#include "common/http/header_map_impl.h" -#include "common/http/http1/codec_stats.h" -#include "common/http/http1/header_formatter.h" -#include "common/http/status.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http1 { - -class ConnectionImpl; - -/** - * Base class for HTTP/1.1 request and response encoders. - */ -class StreamEncoderImpl : public virtual StreamEncoder, - public Stream, - Logger::Loggable, - public StreamCallbackHelper, - public Http1StreamEncoderOptions { -public: - ~StreamEncoderImpl() override { - // When the stream goes away, undo any read blocks to resume reading. - while (read_disable_calls_ != 0) { - StreamEncoderImpl::readDisable(false); - } - } - // Http::StreamEncoder - void encodeData(Buffer::Instance& data, bool end_stream) override; - void encodeMetadata(const MetadataMapVector&) override; - Stream& getStream() override { return *this; } - Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; } - - // Http::Http1StreamEncoderOptions - void disableChunkEncoding() override { disable_chunk_encoding_ = true; } - - // Http::Stream - void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } - void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } - // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further - // progress may be made with the codec. - void resetStream(StreamResetReason reason) override; - void readDisable(bool disable) override; - uint32_t bufferLimit() override; - absl::string_view responseDetails() override { return details_; } - const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; - void setFlushTimeout(std::chrono::milliseconds) override { - // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the - // connection, invoking any watermarks as necessary. There is no internal buffering that would - // require a flush timeout not already covered by other timeouts. - } - - void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } - void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } - void setDetails(absl::string_view details) { details_ = details; } - - void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } - -protected: - StreamEncoderImpl(ConnectionImpl& connection, - Http::Http1::HeaderKeyFormatter* header_key_formatter); - void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, - bool end_stream); - void encodeTrailersBase(const HeaderMap& headers); - - static const std::string CRLF; - static const std::string LAST_CHUNK; - - ConnectionImpl& connection_; - uint32_t read_disable_calls_{}; - bool disable_chunk_encoding_ : 1; - bool chunk_encoding_ : 1; - bool is_response_to_head_request_ : 1; - bool is_response_to_connect_request_ : 1; - -private: - /** - * Called to encode an individual header. - * @param key supplies the header to encode. - * @param key_size supplies the byte size of the key. - * @param value supplies the value to encode. - * @param value_size supplies the byte size of the value. - */ - void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); - - /** - * Called to encode an individual header. - * @param key supplies the header to encode as a string_view. - * @param value supplies the value to encode as a string_view. - */ - void encodeHeader(absl::string_view key, absl::string_view value); - - /** - * Called to finalize a stream encode. - */ - void endEncode(); - - void encodeFormattedHeader(absl::string_view key, absl::string_view value); - - const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; - absl::string_view details_; -}; - -/** - * HTTP/1.1 response encoder. - */ -class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { -public: - ResponseEncoderImpl(ConnectionImpl& connection, - Http::Http1::HeaderKeyFormatter* header_key_formatter) - : StreamEncoderImpl(connection, header_key_formatter) {} - - bool startedResponse() { return started_response_; } - - // Http::ResponseEncoder - void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; - void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; - void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); } - -private: - bool started_response_{}; -}; - -/** - * HTTP/1.1 request encoder. - */ -class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { -public: - RequestEncoderImpl(ConnectionImpl& connection, - Http::Http1::HeaderKeyFormatter* header_key_formatter) - : StreamEncoderImpl(connection, header_key_formatter) {} - bool upgradeRequest() const { return upgrade_request_; } - bool headRequest() const { return head_request_; } - bool connectRequest() const { return connect_request_; } - - // Http::RequestEncoder - void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; - void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } - -private: - bool upgrade_request_{}; - bool head_request_{}; - bool connect_request_{}; -}; - -/** - * Base class for HTTP/1.1 client and server connections. - * Handles the callbacks of http_parser with its own base routine and then - * virtual dispatches to its subclasses. - */ -class ConnectionImpl : public virtual Connection, protected Logger::Loggable { -public: - /** - * @return Network::Connection& the backing network connection. - */ - Network::Connection& connection() { return connection_; } - - /** - * Called when the active encoder has completed encoding the outbound half of the stream. - */ - virtual void onEncodeComplete() PURE; - - /** - * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only - * valid operation after this point is for the connection to get blown away, but we will not - * fire any more callbacks in case some stack has to unwind. - */ - void onResetStreamBase(StreamResetReason reason); - - /** - * Flush all pending output from encoding. - */ - void flushOutput(bool end_encode = false); - - void addToBuffer(absl::string_view data); - void addCharToBuffer(char c); - void addIntToBuffer(uint64_t i); - Buffer::WatermarkBuffer& buffer() { return output_buffer_; } - uint64_t bufferRemainingSize(); - void copyToBuffer(const char* data, uint64_t length); - void reserveBuffer(uint64_t size); - void readDisable(bool disable) { - if (connection_.state() == Network::Connection::State::Open) { - connection_.readDisable(disable); - } - } - uint32_t bufferLimit() { return connection_.bufferLimit(); } - virtual bool supportsHttp10() { return false; } - bool maybeDirectDispatch(Buffer::Instance& data); - virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} - Http::Http1::CodecStats& stats() { return stats_; } - bool enableTrailers() const { return enable_trailers_; } - - // Http::Connection - Http::Status dispatch(Buffer::Instance& data) override; - void goAway() override {} // Called during connection manager drain flow - Protocol protocol() override { return protocol_; } - void shutdownNotice() override {} // Called during connection manager drain flow - bool wantsToWrite() override { return false; } - void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); } - void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); } - - bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } - -protected: - ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, - http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, - Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); - - bool resetStreamCalled() { return reset_stream_called_; } - void onMessageBeginBase(); - - /** - * Get memory used to represent HTTP headers or trailers currently being parsed. - * Computed by adding the partial header field and value that is currently being parsed and the - * estimated header size for previous header lines provided by HeaderMap::byteSize(). - */ - virtual uint32_t getHeadersSize(); - - /** - * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the - * configured max header size limit. Throws a CodecProtocolException if headers exceed the size - * limit. - */ - void checkMaxHeadersSize(); - - Network::Connection& connection_; - Http::Http1::CodecStats& stats_; - http_parser parser_; - Http::Code error_code_{Http::Code::BadRequest}; - const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; - HeaderString current_header_field_; - HeaderString current_header_value_; - bool processing_trailers_ : 1; - bool handling_upgrade_ : 1; - bool reset_stream_called_ : 1; - // Deferred end stream headers indicate that we are not going to raise headers until the full - // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers - // block with end stream set to true with no further protocol data remaining. - bool deferred_end_stream_headers_ : 1; - const bool connection_header_sanitization_ : 1; - const bool enable_trailers_ : 1; - const bool strict_1xx_and_204_headers_ : 1; - -private: - enum class HeaderParsingState { Field, Value, Done }; - - virtual HeaderMap& headersOrTrailers() PURE; - virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE; - virtual void allocHeaders() PURE; - virtual void allocTrailers() PURE; - - /** - * Called in order to complete an in progress header decode. - */ - void completeLastHeader(); - - /** - * Check if header name contains underscore character. - * Underscore character is allowed in header names by the RFC-7230 and this check is implemented - * as a security measure due to systems that treat '_' and '-' as interchangeable. - * The ServerConnectionImpl may drop header or reject request based on the - * `common_http_protocol_options.headers_with_underscores_action` configuration option in the - * HttpConnectionManager. - */ - virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const { - return false; - } - - /** - * An inner dispatch call that executes the dispatching logic. While exception removal is in - * migration (#10878), this function may either throw an exception or return an error status. - * Exceptions are caught and translated to their corresponding statuses in the outer level - * dispatch. - * TODO(#10878): Remove this when exception removal is complete. - */ - Http::Status innerDispatch(Buffer::Instance& data); - - /** - * Dispatch a memory span. - * @param slice supplies the start address. - * @len supplies the length of the span. - */ - size_t dispatchSlice(const char* slice, size_t len); - - /** - * Called by the http_parser when body data is received. - * @param data supplies the start address. - * @param length supplies the length. - */ - void bufferBody(const char* data, size_t length); - - /** - * Push the accumulated body through the filter pipeline. - */ - void dispatchBufferedBody(); - - /** - * Called when a request/response is beginning. A base routine happens first then a virtual - * dispatch is invoked. - */ - virtual void onMessageBegin() PURE; - - /** - * Called when URL data is received. - * @param data supplies the start address. - * @param length supplies the length. - */ - virtual void onUrl(const char* data, size_t length) PURE; - - /** - * Called when header field data is received. - * @param data supplies the start address. - * @param length supplies the length. - */ - void onHeaderField(const char* data, size_t length); - - /** - * Called when header value data is received. - * @param data supplies the start address. - * @param length supplies the length. - */ - void onHeaderValue(const char* data, size_t length); - - /** - * Called when headers are complete. A base routine happens first then a virtual dispatch is - * invoked. Note that this only applies to headers and NOT trailers. End of - * trailers are signaled via onMessageCompleteBase(). - * @return 0 if no error, 1 if there should be no body. - */ - int onHeadersCompleteBase(); - virtual int onHeadersComplete() PURE; - - /** - * Called to see if upgrade transition is allowed. - */ - virtual bool upgradeAllowed() const PURE; - - /** - * Called with body data is available for processing when either: - * - There is an accumulated partial body after the parser is done processing bytes read from the - * socket - * - The parser encounters the last byte of the body - * - The codec does a direct dispatch from the read buffer - * For performance reasons there is at most one call to onBody per call to HTTP/1 - * ConnectionImpl::dispatch call. - * @param data supplies the body data - */ - virtual void onBody(Buffer::Instance& data) PURE; - - /** - * Called when the request/response is complete. - */ - void onMessageCompleteBase(); - virtual void onMessageComplete() PURE; - - /** - * Called when accepting a chunk header. - */ - void onChunkHeader(bool is_final_chunk); - - /** - * @see onResetStreamBase(). - */ - virtual void onResetStream(StreamResetReason reason) PURE; - - /** - * Send a protocol error response to remote. - */ - virtual void sendProtocolError(absl::string_view details) PURE; - - /** - * Called when output_buffer_ or the underlying connection go from below a low watermark to over - * a high watermark. - */ - virtual void onAboveHighWatermark() PURE; - - /** - * Called when output_buffer_ or the underlying connection go from above a high watermark to - * below a low watermark. - */ - virtual void onBelowLowWatermark() PURE; - - /** - * Check if header name contains underscore character. - * The ServerConnectionImpl may drop header or reject request based on configuration. - */ - virtual void checkHeaderNameForUnderscores() {} - - static http_parser_settings settings_; - - HeaderParsingState header_parsing_state_{HeaderParsingState::Field}; - // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body - // is pushed through the filter pipeline either at the end of the current dispatch call, or when - // the last byte of the body is processed (whichever happens first). - Buffer::OwnedImpl buffered_body_; - Buffer::WatermarkBuffer output_buffer_; - Protocol protocol_{Protocol::Http11}; - const uint32_t max_headers_kb_; - const uint32_t max_headers_count_; -}; - -/** - * Implementation of Http::ServerConnection for HTTP/1.1. - */ -class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { -public: - ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, - ServerConnectionCallbacks& callbacks, const Http1Settings& settings, - uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action); - bool supportsHttp10() override { return codec_settings_.accept_http_10_; } - -protected: - /** - * An active HTTP/1.1 request. - */ - struct ActiveRequest { - ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) - : response_encoder_(connection, header_key_formatter) {} - - HeaderString request_url_; - RequestDecoder* request_decoder_{}; - ResponseEncoderImpl response_encoder_; - bool remote_complete_{}; - }; - absl::optional& activeRequest() { return active_request_; } - // ConnectionImpl - void onMessageComplete() override; - // Add the size of the request_url to the reported header size when processing request headers. - uint32_t getHeadersSize() override; - -private: - /** - * Manipulate the request's first line, parsing the url and converting to a relative path if - * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 - * - * @param is_connect true if the request has the CONNECT method - * @param headers the request's headers - * @throws CodecProtocolException on an invalid url in the request line - */ - void handlePath(RequestHeaderMap& headers, unsigned int method); - - // ConnectionImpl - void onEncodeComplete() override; - void onMessageBegin() override; - void onUrl(const char* data, size_t length) override; - int onHeadersComplete() override; - // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. - bool upgradeAllowed() const override { return true; } - void onBody(Buffer::Instance& data) override; - void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; - void onAboveHighWatermark() override; - void onBelowLowWatermark() override; - HeaderMap& headersOrTrailers() override { - if (absl::holds_alternative(headers_or_trailers_)) { - return *absl::get(headers_or_trailers_); - } else { - return *absl::get(headers_or_trailers_); - } - } - RequestOrResponseHeaderMap& requestOrResponseHeaders() override { - return *absl::get(headers_or_trailers_); - } - void allocHeaders() override { - ASSERT(nullptr == absl::get(headers_or_trailers_)); - ASSERT(!processing_trailers_); - headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); - } - void allocTrailers() override { - ASSERT(processing_trailers_); - if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); - } - } - - void sendProtocolErrorOld(absl::string_view details); - - void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); - void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; - void doFloodProtectionChecks() const; - void checkHeaderNameForUnderscores() override; - - ServerConnectionCallbacks& callbacks_; - absl::optional active_request_; - Http1Settings codec_settings_; - const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_; - uint32_t outbound_responses_{}; - // This defaults to 2, which functionally disables pipelining. If any users - // of Envoy wish to enable pipelining (which is dangerous and ill supported) - // we could make this configurable. - uint32_t max_outbound_responses_{}; - bool flood_protection_{}; - // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated - // thought as some of the reset and no header code paths make this difficult. Headers are - // populated on message begin. Trailers are populated on the first parsed trailer field (if - // trailers are enabled). The variant is reset to null headers on message complete for assertion - // purposes. - absl::variant headers_or_trailers_; - // The action to take when a request header name contains underscore characters. - const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action_; -}; - -/** - * Implementation of Http::ClientConnection for HTTP/1.1. - */ -class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { -public: - ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, - ConnectionCallbacks& callbacks, const Http1Settings& settings, - const uint32_t max_response_headers_count); - - // Http::ClientConnection - RequestEncoder& newStream(ResponseDecoder& response_decoder) override; - -private: - struct PendingResponse { - PendingResponse(ConnectionImpl& connection, - Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) - : encoder_(connection, header_key_formatter), decoder_(decoder) {} - - RequestEncoderImpl encoder_; - ResponseDecoder* decoder_; - }; - - bool cannotHaveBody(); - - // ConnectionImpl - void onEncodeComplete() override {} - void onMessageBegin() override {} - void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - int onHeadersComplete() override; - bool upgradeAllowed() const override; - void onBody(Buffer::Instance& data) override; - void onMessageComplete() override; - void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; - void onAboveHighWatermark() override; - void onBelowLowWatermark() override; - HeaderMap& headersOrTrailers() override { - if (absl::holds_alternative(headers_or_trailers_)) { - return *absl::get(headers_or_trailers_); - } else { - return *absl::get(headers_or_trailers_); - } - } - RequestOrResponseHeaderMap& requestOrResponseHeaders() override { - return *absl::get(headers_or_trailers_); - } - void allocHeaders() override { - ASSERT(nullptr == absl::get(headers_or_trailers_)); - ASSERT(!processing_trailers_); - headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); - } - void allocTrailers() override { - ASSERT(processing_trailers_); - if (!absl::holds_alternative(headers_or_trailers_)) { - headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); - } - } - - absl::optional pending_response_; - // TODO(mattklein123): The following bool tracks whether a pending response is complete before - // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks - // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once - // the response is complete. The existence of this variable is hard to reason about and it should - // be combined with pending_response_ somehow in a follow up cleanup. - bool pending_response_done_{true}; - // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. - bool ignore_message_complete_for_100_continue_{}; - // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated - // thought as some of the reset and no header code paths make this difficult. Headers are - // populated on message begin. Trailers are populated when the switch to trailer processing is - // detected while parsing the first trailer field (if trailers are enabled). The variant is reset - // to null headers on message complete for assertion purposes. - absl::variant headers_or_trailers_; - - // The default limit of 80 KiB is the vanilla http_parser behaviour. - static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; -}; - -} // namespace Http1 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index d2e4ed011311..dd0333ffa847 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -18,36 +18,6 @@ envoy_cc_library( ], ) -CODEC_LIB_DEPS = [ - ":codec_stats_lib", - ":metadata_decoder_lib", - ":metadata_encoder_lib", - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:codes_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:enum_to_int", - "//source/common/common:linked_object", - "//source/common/common:minimal_logger_lib", - "//source/common/common:utility_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", -] - envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], @@ -58,23 +28,35 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = CODEC_LIB_DEPS, -) - -envoy_cc_library( - name = "codec_legacy_lib", - srcs = ["codec_impl_legacy.cc"], - hdrs = [ - "codec_impl.h", - "codec_impl_legacy.h", - ], - external_deps = [ - "nghttp2", - "abseil_optional", - "abseil_inlined_vector", - "abseil_algorithm", + deps = [ + ":codec_stats_lib", + ":metadata_decoder_lib", + ":metadata_encoder_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:stats_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], - deps = CODEC_LIB_DEPS, ) # Separate library for some nghttp2 setup stuff to avoid having tests take a diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 9c56c034cbd5..b25b8fa4bef5 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -87,7 +87,7 @@ void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connectio * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though * it copies them. */ -template static T* removeConst(const void* object) { +template static T* remove_const(const void* object) { return const_cast(reinterpret_cast(object)); } @@ -120,8 +120,8 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he } const absl::string_view header_key = header.key().getStringView(); const absl::string_view header_value = header.value().getStringView(); - headers.push_back({removeConst(header_key.data()), - removeConst(header_value.data()), header_key.size(), + headers.push_back({remove_const(header_key.data()), + remove_const(header_value.data()), header_key.size(), header_value.size(), flags}); } @@ -244,7 +244,7 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { } else { ASSERT(read_disable_count_ > 0); --read_disable_count_; - if (!buffersOverrun()) { + if (!buffers_overrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; parent_.sendPendingFrames(); @@ -560,7 +560,7 @@ int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { stream->pending_recv_data_.add(data, len); // Update the window to the peer unless some consumer of this stream's data has hit a flow control // limit and disabled reads on this stream - if (!stream->buffersOverrun()) { + if (!stream->buffers_overrun()) { nghttp2_session_consume(session_, stream_id, len); } else { stream->unconsumed_bytes_ += len; diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 8bd5d8b3d1fd..cf848599c800 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -54,16 +54,14 @@ class ConnectionImpl; // Abstract nghttp2_session factory. Used to enable injection of factories for testing. class Nghttp2SessionFactory { public: - using ConnectionImplType = ConnectionImpl; virtual ~Nghttp2SessionFactory() = default; // Returns a new nghttp2_session to be used with |connection|. virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, - ConnectionImplType* connection, - const nghttp2_option* options) PURE; + ConnectionImpl* connection, const nghttp2_option* options) PURE; // Initializes the |session|. - virtual void init(nghttp2_session* session, ConnectionImplType* connection, + virtual void init(nghttp2_session* session, ConnectionImpl* connection, const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; }; @@ -258,7 +256,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + bool buffers_overrun() const { return read_disable_count_ > 0; } ConnectionImpl& parent_; int32_t stream_id_{-1}; @@ -520,13 +518,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable -#include -#include - -#include "envoy/event/dispatcher.h" -#include "envoy/http/codes.h" -#include "envoy/http/header_map.h" -#include "envoy/network/connection.h" - -#include "common/common/assert.h" -#include "common/common/cleanup.h" -#include "common/common/enum_to_int.h" -#include "common/common/fmt.h" -#include "common/common/utility.h" -#include "common/http/codes.h" -#include "common/http/exception.h" -#include "common/http/header_utility.h" -#include "common/http/headers.h" -#include "common/http/http2/codec_stats.h" -#include "common/http/utility.h" - -#include "absl/container/fixed_array.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -class Http2ResponseCodeDetailValues { - // Invalid HTTP header field was received and stream is going to be - // closed. - const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; - - // Violation in HTTP messaging rule. - const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; - - // none of the above - const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; - -public: - const absl::string_view errorDetails(int error_code) const { - switch (error_code) { - case NGHTTP2_ERR_HTTP_HEADER: - return ng_http2_err_http_header_; - case NGHTTP2_ERR_HTTP_MESSAGING: - return ng_http2_err_http_messaging_; - default: - return ng_http2_err_unknown_; - } - } -}; - -using Http2ResponseCodeDetails = ConstSingleton; -using Http::Http2::CodecStats; -using Http::Http2::MetadataDecoder; -using Http::Http2::MetadataEncoder; - -bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, - HeaderString& cookies) { - if (key != Headers::get().Cookie.get().c_str()) { - return false; - } - - if (!cookies.empty()) { - cookies.append("; ", 2); - } - - const absl::string_view value_view = value.getStringView(); - cookies.append(value_view.data(), value_view.size()); - return true; -} - -ConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_; - -nghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks, - ConnectionImpl* connection, - const nghttp2_option* options) { - nghttp2_session* session; - nghttp2_session_client_new2(&session, callbacks, connection, options); - return session; -} - -void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection, - const envoy::config::core::v3::Http2ProtocolOptions& options) { - connection->sendSettings(options, true); -} - -/** - * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though - * it copies them. - */ -template static T* removeConst(const void* object) { - return const_cast(reinterpret_cast(object)); -} - -ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) - : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), - data_deferred_(false), waiting_for_non_informational_headers_(false), - pending_receive_buffer_high_watermark_called_(false), - pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { - parent_.stats_.streams_active_.inc(); - if (buffer_limit > 0) { - setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); - } -} - -ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } - -void ConnectionImpl::StreamImpl::destroy() { - disarmStreamIdleTimer(); - parent_.stats_.streams_active_.dec(); - parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); -} - -static void insertHeader(std::vector& headers, const HeaderEntry& header) { - uint8_t flags = 0; - if (header.key().isReference()) { - flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME; - } - if (header.value().isReference()) { - flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; - } - const absl::string_view header_key = header.key().getStringView(); - const absl::string_view header_value = header.value().getStringView(); - headers.push_back({removeConst(header_key.data()), - removeConst(header_value.data()), header_key.size(), - header_value.size(), flags}); -} - -void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, - const HeaderMap& headers) { - final_headers.reserve(headers.size()); - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - std::vector* final_headers = static_cast*>(context); - insertHeader(*final_headers, header); - return HeaderMap::Iterate::Continue; - }, - &final_headers); -} - -void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { - ASSERT(headers.Status()->value() == "100"); - encodeHeaders(headers, false); -} - -void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector& final_headers, - bool end_stream) { - nghttp2_data_provider provider; - if (!end_stream) { - provider.source.ptr = this; - provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length, - uint32_t* data_flags, nghttp2_data_source* source, - void*) -> ssize_t { - return static_cast(source->ptr)->onDataSourceRead(length, data_flags); - }; - } - - local_end_stream_ = end_stream; - submitHeaders(final_headers, end_stream ? nullptr : &provider); - parent_.sendPendingFrames(); -} - -void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, - bool end_stream) { - // This must exist outside of the scope of isUpgrade as the underlying memory is - // needed until encodeHeadersBase has been called. - std::vector final_headers; - Http::RequestHeaderMapPtr modified_headers; - if (Http::Utility::isUpgrade(headers)) { - modified_headers = createHeaderMap(headers); - upgrade_type_ = std::string(headers.getUpgradeValue()); - Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); - buildHeaders(final_headers, *modified_headers); - } else if (headers.Method() && headers.Method()->value() == "CONNECT") { - // If this is not an upgrade style connect (above branch) it is a bytestream - // connect and should have :path and :protocol set accordingly - // As HTTP/1.1 does not require a path for CONNECT, we may have to add one - // if shifting codecs. For now, default to "/" - this can be made - // configurable if necessary. - // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 - modified_headers = createHeaderMap(headers); - modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); - if (!headers.Path()) { - modified_headers->setPath("/"); - } - buildHeaders(final_headers, *modified_headers); - } else { - buildHeaders(final_headers, headers); - } - encodeHeadersBase(final_headers, end_stream); -} - -void ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers, - bool end_stream) { - // The contract is that client codecs must ensure that :status is present. - ASSERT(headers.Status() != nullptr); - - // This must exist outside of the scope of isUpgrade as the underlying memory is - // needed until encodeHeadersBase has been called. - std::vector final_headers; - Http::ResponseHeaderMapPtr modified_headers; - if (Http::Utility::isUpgrade(headers)) { - modified_headers = createHeaderMap(headers); - Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers); - buildHeaders(final_headers, *modified_headers); - } else { - buildHeaders(final_headers, headers); - } - encodeHeadersBase(final_headers, end_stream); -} - -void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { - ASSERT(!local_end_stream_); - local_end_stream_ = true; - if (pending_send_data_.length() > 0) { - // In this case we want trailers to come after we release all pending body data that is - // waiting on window updates. We need to save the trailers so that we can emit them later. - ASSERT(!pending_trailers_to_encode_); - pending_trailers_to_encode_ = cloneTrailers(trailers); - createPendingFlushTimer(); - } else { - submitTrailers(trailers); - parent_.sendPendingFrames(); - } -} - -void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) { - ASSERT(parent_.allow_metadata_); - MetadataEncoder& metadata_encoder = getMetadataEncoder(); - if (!metadata_encoder.createPayload(metadata_map_vector)) { - return; - } - for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { - submitMetadata(flags); - } - parent_.sendPendingFrames(); -} - -void ConnectionImpl::StreamImpl::readDisable(bool disable) { - ENVOY_CONN_LOG(debug, "Stream {} {}, unconsumed_bytes {} read_disable_count {}", - parent_.connection_, stream_id_, (disable ? "disabled" : "enabled"), - unconsumed_bytes_, read_disable_count_); - if (disable) { - ++read_disable_count_; - } else { - ASSERT(read_disable_count_ > 0); - --read_disable_count_; - if (!buffersOverrun()) { - nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); - unconsumed_bytes_ = 0; - parent_.sendPendingFrames(); - } - } -} - -void ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() { - ENVOY_CONN_LOG(debug, "recv buffer over limit ", parent_.connection_); - ASSERT(!pending_receive_buffer_high_watermark_called_); - pending_receive_buffer_high_watermark_called_ = true; - readDisable(true); -} - -void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { - ENVOY_CONN_LOG(debug, "recv buffer under limit ", parent_.connection_); - ASSERT(pending_receive_buffer_high_watermark_called_); - pending_receive_buffer_high_watermark_called_ = false; - readDisable(false); -} - -void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { - auto& headers = absl::get(headers_or_trailers_); - if (allow_waiting_for_informational_headers && - CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { - waiting_for_non_informational_headers_ = true; - } - - if (!upgrade_type_.empty() && headers->Status()) { - Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); - } - - if (headers->Status()->value() == "100") { - ASSERT(!remote_end_stream_); - response_decoder_.decode100ContinueHeaders(std::move(headers)); - } else { - response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_); - } -} - -void ConnectionImpl::ClientStreamImpl::decodeTrailers() { - response_decoder_.decodeTrailers( - std::move(absl::get(headers_or_trailers_))); -} - -void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { - ASSERT(!allow_waiting_for_informational_headers); - auto& headers = absl::get(headers_or_trailers_); - if (Http::Utility::isH2UpgradeRequest(*headers)) { - Http::Utility::transformUpgradeRequestFromH2toH1(*headers); - } - request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_); -} - -void ConnectionImpl::ServerStreamImpl::decodeTrailers() { - request_decoder_->decodeTrailers( - std::move(absl::get(headers_or_trailers_))); -} - -void ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() { - ENVOY_CONN_LOG(debug, "send buffer over limit ", parent_.connection_); - ASSERT(!pending_send_buffer_high_watermark_called_); - pending_send_buffer_high_watermark_called_ = true; - runHighWatermarkCallbacks(); -} - -void ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() { - ENVOY_CONN_LOG(debug, "send buffer under limit ", parent_.connection_); - ASSERT(pending_send_buffer_high_watermark_called_); - pending_send_buffer_high_watermark_called_ = false; - runLowWatermarkCallbacks(); -} - -void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) { - if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) { - headers().addViaMove(std::move(name), std::move(value)); - } -} - -void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { - std::vector final_headers; - buildHeaders(final_headers, trailers); - int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), - final_headers.size()); - ASSERT(rc == 0); -} - -void ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) { - ASSERT(stream_id_ > 0); - const int result = - nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr); - ASSERT(result == 0); -} - -ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) { - if (pending_send_data_.length() == 0 && !local_end_stream_) { - ASSERT(!data_deferred_); - data_deferred_ = true; - return NGHTTP2_ERR_DEFERRED; - } else { - *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; - if (local_end_stream_ && pending_send_data_.length() <= length) { - *data_flags |= NGHTTP2_DATA_FLAG_EOF; - if (pending_trailers_to_encode_) { - // We need to tell the library to not set end stream so that we can emit the trailers. - *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM; - submitTrailers(*pending_trailers_to_encode_); - pending_trailers_to_encode_.reset(); - } - } - - return std::min(length, pending_send_data_.length()); - } -} - -int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { - // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we - // "just know" that the frame header is 9 bytes. - // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback - static const uint64_t FRAME_HEADER_SIZE = 9; - - parent_.outbound_data_frames_++; - - Buffer::OwnedImpl output; - if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { - ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", - parent_.connection_); - return NGHTTP2_ERR_FLOODED; - } - - parent_.stats_.pending_send_bytes_.sub(length); - output.move(pending_send_data_, length); - parent_.connection_.write(output, false); - return 0; -} - -void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) { - ASSERT(stream_id_ == -1); - stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), - final_headers.size(), provider, base()); - ASSERT(stream_id_ > 0); -} - -void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) { - ASSERT(stream_id_ != -1); - int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), - final_headers.size(), provider); - ASSERT(rc == 0); -} - -void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { - ASSERT(stream_idle_timer_ == nullptr); - if (stream_idle_timeout_.count() > 0) { - stream_idle_timer_ = - parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); - stream_idle_timer_->enableTimer(stream_idle_timeout_); - } -} - -void ConnectionImpl::StreamImpl::onPendingFlushTimer() { - ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); - stream_idle_timer_.reset(); - parent_.stats_.tx_flush_timeout_.inc(); - ASSERT(local_end_stream_ && !local_end_stream_sent_); - // This will emit a reset frame for this stream and close the stream locally. No reset callbacks - // will be run because higher layers think the stream is already finished. - resetStreamWorker(StreamResetReason::LocalReset); - parent_.sendPendingFrames(); -} - -void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { - ASSERT(!local_end_stream_); - local_end_stream_ = end_stream; - parent_.stats_.pending_send_bytes_.add(data.length()); - pending_send_data_.move(data); - if (data_deferred_) { - int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); - ASSERT(rc == 0); - - data_deferred_ = false; - } - - parent_.sendPendingFrames(); - if (local_end_stream_ && pending_send_data_.length() > 0) { - createPendingFlushTimer(); - } -} - -void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { - // Higher layers expect calling resetStream() to immediately raise reset callbacks. - runResetCallbacks(reason); - - // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. - // We want these frames to go out so we defer the reset until we send all of the frames that - // end the local stream. - if (local_end_stream_ && !local_end_stream_sent_) { - parent_.pending_deferred_reset_ = true; - deferred_reset_ = reason; - ENVOY_CONN_LOG(trace, "deferred reset stream", parent_.connection_); - } else { - resetStreamWorker(reason); - } - - // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces - // the cleanup logic to run which will reset the stream in all cases if all data frames could not - // be sent. - parent_.sendPendingFrames(); -} - -void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { - int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, - reason == StreamResetReason::LocalRefusedStreamReset - ? NGHTTP2_REFUSED_STREAM - : NGHTTP2_NO_ERROR); - ASSERT(rc == 0); -} - -MetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() { - if (metadata_encoder_ == nullptr) { - metadata_encoder_ = std::make_unique(); - } - return *metadata_encoder_; -} - -MetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() { - if (metadata_decoder_ == nullptr) { - auto cb = [this](MetadataMapPtr&& metadata_map_ptr) { - this->onMetadataDecoded(std::move(metadata_map_ptr)); - }; - metadata_decoder_ = std::make_unique(cb); - } - return *metadata_decoder_; -} - -void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) { - decoder().decodeMetadata(std::move(metadata_map_ptr)); -} - -ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_headers_kb, const uint32_t max_headers_count) - : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), - max_headers_count_(max_headers_count), - per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), - stream_error_on_invalid_http_messaging_( - http2_options.stream_error_on_invalid_http_messaging()), - flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), - frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), - max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), - control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), - max_consecutive_inbound_frames_with_empty_payload_( - http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), - max_inbound_priority_frames_per_stream_( - http2_options.max_inbound_priority_frames_per_stream().value()), - max_inbound_window_update_frames_per_data_frame_sent_( - http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), - dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} - -ConnectionImpl::~ConnectionImpl() { - for (const auto& stream : active_streams_) { - stream->destroy(); - } - nghttp2_session_del(session_); -} - -Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { - // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either - // throw an exception or return an error status. The utility wrapper catches exceptions and - // converts them to error statuses. - return Http::Utility::exceptionToStatus( - [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); -} - -Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { - ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); - // Make sure that dispatching_ is set to false after dispatching, even when - // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a - // single return after exception removal (#10878)). - Cleanup cleanup([this]() { dispatching_ = false; }); - for (const Buffer::RawSlice& slice : data.getRawSlices()) { - dispatching_ = true; - ssize_t rc = - nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); - if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { - throw FrameFloodException( - "Flooding was detected in this HTTP/2 session, and it must be closed"); - } - if (rc != static_cast(slice.len_)) { - throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); - } - - dispatching_ = false; - } - - ENVOY_CONN_LOG(trace, "dispatched {} bytes", connection_, data.length()); - data.drain(data.length()); - - // Decoding incoming frames can generate outbound frames so flush pending. - sendPendingFrames(); - return Http::okStatus(); -} - -ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { - return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); -} - -int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { - StreamImpl* stream = getStream(stream_id); - // If this results in buffering too much data, the watermark buffer will call - // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_ - stream->pending_recv_data_.add(data, len); - // Update the window to the peer unless some consumer of this stream's data has hit a flow control - // limit and disabled reads on this stream - if (!stream->buffersOverrun()) { - nghttp2_session_consume(session_, stream_id, len); - } else { - stream->unconsumed_bytes_ += len; - } - return 0; -} - -void ConnectionImpl::goAway() { - int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE, - nghttp2_session_get_last_proc_stream_id(session_), - NGHTTP2_NO_ERROR, nullptr, 0); - ASSERT(rc == 0); - - sendPendingFrames(); -} - -void ConnectionImpl::shutdownNotice() { - int rc = nghttp2_submit_shutdown_notice(session_); - ASSERT(rc == 0); - - sendPendingFrames(); -} - -int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { - ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, - static_cast(hd->type), static_cast(hd->flags)); - - // Track all the frames without padding here, since this is the only callback we receive - // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). - // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). - if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { - if (!trackInboundFrames(hd, 0)) { - return NGHTTP2_ERR_FLOODED; - } - } - - return 0; -} - -ABSL_MUST_USE_RESULT -enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { - switch (code) { - case NGHTTP2_NO_ERROR: - return GoAwayErrorCode::NoError; - default: - return GoAwayErrorCode::Other; - } -} - -int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { - ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); - - // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS - // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders() - // and CONTINUATION frames in onBeforeFrameReceived(). - ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); - - if (frame->hd.type == NGHTTP2_DATA) { - if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { - return NGHTTP2_ERR_FLOODED; - } - } - - // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown - // notifications are the same as a normal GOAWAY. - // TODO: handle multiple GOAWAY frames. - if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { - ASSERT(frame->hd.stream_id == 0); - raised_goaway_ = true; - callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); - return 0; - } - - if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { - onSettingsForTest(frame->settings); - } - - StreamImpl* stream = getStream(frame->hd.stream_id); - if (!stream) { - return 0; - } - - switch (frame->hd.type) { - case NGHTTP2_HEADERS: { - stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; - if (!stream->cookies_.empty()) { - HeaderString key(Headers::get().Cookie); - stream->headers().addViaMove(std::move(key), std::move(stream->cookies_)); - } - - switch (frame->headers.cat) { - case NGHTTP2_HCAT_RESPONSE: - case NGHTTP2_HCAT_REQUEST: { - stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); - break; - } - - case NGHTTP2_HCAT_HEADERS: { - // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers - // if local is not complete. - if (!stream->deferred_reset_) { - if (!stream->waiting_for_non_informational_headers_) { - if (!stream->remote_end_stream_) { - // This indicates we have received more headers frames than Envoy - // supports. Even if this is valid HTTP (something like 103 early hints) fail here - // rather than trying to push unexpected headers through the Envoy pipeline as that - // will likely result in Envoy crashing. - // It would be cleaner to reset the stream rather than reset the/ entire connection but - // it's also slightly more dangerous so currently we err on the side of safety. - stats_.too_many_header_frames_.inc(); - throw CodecProtocolException("Unexpected 'trailers' with no end stream."); - } else { - stream->decodeTrailers(); - } - } else { - ASSERT(!nghttp2_session_check_server_session(session_)); - stream->waiting_for_non_informational_headers_ = false; - - // Even if we have :status 100 in the client case in a response, when - // we received a 1xx to start out with, nghttp2 message checking - // guarantees proper flow here. - stream->decodeHeaders(false); - } - } - - break; - } - - default: - // We do not currently support push. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - } - - break; - } - case NGHTTP2_DATA: { - stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; - - // It's possible that we are waiting to send a deferred reset, so only raise data if local - // is not complete. - if (!stream->deferred_reset_) { - stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_); - } - - stream->pending_recv_data_.drain(stream->pending_recv_data_.length()); - break; - } - case NGHTTP2_RST_STREAM: { - ENVOY_CONN_LOG(trace, "remote reset: {}", connection_, frame->rst_stream.error_code); - stats_.rx_reset_.inc(); - break; - } - } - - return 0; -} - -int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { - // The nghttp2 library does not cleanly give us a way to determine whether we received invalid - // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not. - // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see - // an outgoing frame of this type, we will return an error code so that we can abort execution. - ENVOY_CONN_LOG(trace, "sent frame type={}", connection_, static_cast(frame->hd.type)); - switch (frame->hd.type) { - case NGHTTP2_GOAWAY: { - ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); - if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { - // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. - // As such, it is not reliable to call sendPendingFrames() again after this and we assume - // that the connection is going to get torn down immediately. One byproduct of this is that - // we need to cancel all pending flush stream timeouts since they can race with connection - // teardown. As part of the work to remove exceptions we should aim to clean up all of this - // error handling logic and only handle this type of case at the end of dispatch. - for (auto& stream : active_streams_) { - stream->disarmStreamIdleTimer(); - } - return NGHTTP2_ERR_CALLBACK_FAILURE; - } - break; - } - - case NGHTTP2_RST_STREAM: { - ENVOY_CONN_LOG(debug, "sent reset code={}", connection_, frame->rst_stream.error_code); - stats_.tx_reset_.inc(); - break; - } - - case NGHTTP2_HEADERS: - case NGHTTP2_DATA: { - StreamImpl* stream = getStream(frame->hd.stream_id); - stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; - break; - } - } - - return 0; -} - -int ConnectionImpl::onError(absl::string_view error) { - ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); - return 0; -} - -int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { - ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), - stream_id); - - // Set details of error_code in the stream whenever we have one. - StreamImpl* stream = getStream(stream_id); - if (stream != nullptr) { - stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); - } - - if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { - stats_.rx_messaging_error_.inc(); - - if (stream_error_on_invalid_http_messaging_) { - // The stream is about to be closed due to an invalid header or messaging. Don't kill the - // entire connection if one stream has bad headers or messaging. - if (stream != nullptr) { - // See comment below in onStreamClose() for why we do this. - stream->reset_due_to_messaging_error_ = true; - } - return 0; - } - } - - // Cause dispatch to return with an error code. - return NGHTTP2_ERR_CALLBACK_FAILURE; -} - -int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { - ENVOY_CONN_LOG(trace, "about to send frame type={}, flags={}", connection_, - static_cast(frame->hd.type), static_cast(frame->hd.flags)); - ASSERT(!is_outbound_flood_monitored_control_frame_); - // Flag flood monitored outbound control frames. - is_outbound_flood_monitored_control_frame_ = - ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) && - frame->hd.flags & NGHTTP2_FLAG_ACK) || - frame->hd.type == NGHTTP2_RST_STREAM; - return 0; -} - -void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { - ++outbound_frames_; - if (is_outbound_flood_monitored_control_frame) { - ++outbound_control_frames_; - } - checkOutboundQueueLimits(); -} - -bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, - size_t length) { - // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the - // onBeforeFrameSend callback is not called for DATA frames. - bool is_outbound_flood_monitored_control_frame = false; - std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); - try { - incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); - } catch (const FrameFloodException&) { - return false; - } - - output.add(data, length); - output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ - : frame_buffer_releasor_); - return true; -} - -void ConnectionImpl::releaseOutboundFrame() { - ASSERT(outbound_frames_ >= 1); - --outbound_frames_; -} - -void ConnectionImpl::releaseOutboundControlFrame() { - ASSERT(outbound_control_frames_ >= 1); - --outbound_control_frames_; - releaseOutboundFrame(); -} - -ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { - ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); - Buffer::OwnedImpl buffer; - if (!addOutboundFrameFragment(buffer, data, length)) { - ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", - connection_); - return NGHTTP2_ERR_FLOODED; - } - - // While the buffer is transient the fragment it contains will be moved into the - // write_buffer_ of the underlying connection_ by the write method below. - // This creates lifetime dependency between the write_buffer_ of the underlying connection - // and the codec object. Specifically the write_buffer_ MUST be either fully drained or - // deleted before the codec object is deleted. This is presently guaranteed by the - // destruction order of the Network::ConnectionImpl object where write_buffer_ is - // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl. - connection_.write(buffer, false); - return length; -} - -int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { - StreamImpl* stream = getStream(stream_id); - if (stream) { - ENVOY_CONN_LOG(debug, "stream closed: {}", connection_, error_code); - if (!stream->remote_end_stream_ || !stream->local_end_stream_) { - StreamResetReason reason; - if (stream->reset_due_to_messaging_error_) { - // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand - // the flow of resets. I.e., did the reset originate locally? Was it remote? Here, - // we attempt to track cases in which we sent a reset locally due to an invalid frame - // received from the remote. We only do that in two cases currently (HTTP messaging layer - // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict - // about). In other cases we treat invalid frames as a protocol error and just kill - // the connection. - reason = StreamResetReason::LocalReset; - } else { - reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset - : StreamResetReason::RemoteReset; - } - - stream->runResetCallbacks(reason); - } - - stream->destroy(); - connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); - // Any unconsumed data must be consumed before the stream is deleted. - // nghttp2 does not appear to track this internally, and any stream deleted - // with outstanding window will contribute to a slow connection-window leak. - nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_); - stream->unconsumed_bytes_ = 0; - nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr); - } - - return 0; -} - -int ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) { - ENVOY_CONN_LOG(trace, "recv {} bytes METADATA", connection_, len); - - StreamImpl* stream = getStream(stream_id); - if (!stream) { - return 0; - } - - bool success = stream->getMetadataDecoder().receiveMetadata(data, len); - return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; -} - -int ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) { - ENVOY_CONN_LOG(trace, "recv METADATA frame on stream {}, end_metadata: {}", connection_, - stream_id, end_metadata); - - StreamImpl* stream = getStream(stream_id); - if (stream == nullptr) { - return 0; - } - - bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata); - return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; -} - -ssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) { - ENVOY_CONN_LOG(trace, "pack METADATA frame on stream {}", connection_, stream_id); - - StreamImpl* stream = getStream(stream_id); - if (stream == nullptr) { - return 0; - } - - MetadataEncoder& encoder = stream->getMetadataEncoder(); - return encoder.packNextFramePayload(buf, len); -} - -int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, - HeaderString&& value) { - StreamImpl* stream = getStream(frame->hd.stream_id); - if (!stream) { - // We have seen 1 or 2 crashes where we get a headers callback but there is no associated - // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2 - // code it looks possible that inflate_header_block() can safely inflate headers for an already - // closed stream, but will still call the headers callback. Since that seems possible, we should - // ignore this case here. - // TODO(mattklein123): Figure out a test case that can hit this. - stats_.headers_cb_no_stream_.inc(); - return 0; - } - - auto should_return = checkHeaderNameForUnderscores(name.getStringView()); - if (should_return) { - name.clear(); - value.clear(); - return should_return.value(); - } - - stream->saveHeader(std::move(name), std::move(value)); - - if (stream->headers().byteSize() > max_headers_kb_ * 1024 || - stream->headers().size() > max_headers_count_) { - // This will cause the library to reset/close the stream. - stats_.header_overflow_.inc(); - return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; - } else { - return 0; - } -} - -void ConnectionImpl::sendPendingFrames() { - if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { - return; - } - - const int rc = nghttp2_session_send(session_); - if (rc != 0) { - ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); - // For errors caused by the pending outbound frame flood the FrameFloodException has - // to be thrown. However the nghttp2 library returns only the generic error code for - // all failure types. Check queue limits and throw FrameFloodException if they were - // exceeded. - if (outbound_frames_ > max_outbound_frames_ || - outbound_control_frames_ > max_outbound_control_frames_) { - throw FrameFloodException("Too many frames in the outbound queue."); - } - - throw CodecProtocolException(std::string(nghttp2_strerror(rc))); - } - - // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, - // so iterating through every stream to find the ones that have a deferred reset is not a big - // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback. - // This is only done when the reset frame is sent. Thus, it's safe to work directly with the - // stream map. - // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a - // deferred reset, we try to finish the stream, including writing any pending data frames. - // If we cannot do this (potentially due to not enough window), we just reset the stream. - // In general this behavior occurs only when we are trying to send immediate error messages - // to short circuit requests. In the best effort case, we complete the stream before - // resetting. In other cases, we just do the reset now which will blow away pending data - // frames and release any memory associated with the stream. - if (pending_deferred_reset_) { - pending_deferred_reset_ = false; - for (auto& stream : active_streams_) { - if (stream->deferred_reset_) { - stream->resetStreamWorker(stream->deferred_reset_.value()); - } - } - sendPendingFrames(); - } -} - -void ConnectionImpl::sendSettings( - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) { - absl::InlinedVector settings; - auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool { - const auto it = std::find_if(settings.cbegin(), settings.cend(), - [&entry](const nghttp2_settings_entry& existing) { - return entry.settings_id == existing.settings_id; - }); - if (it != settings.end()) { - return false; - } - settings.push_back(entry); - return true; - }; - - // Universally disable receiving push promise frames as we don't currently support - // them. nghttp2 will fail the connection if the other side still sends them. - // TODO(mattklein123): Remove this when we correctly proxy push promise. - // NOTE: This is a special case with respect to custom parameter overrides in that server push is - // not supported and therefore not end user configurable. - if (disable_push) { - settings.push_back( - {static_cast(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U}); - } - - for (const auto& it : http2_options.custom_settings_parameters()) { - ASSERT(it.identifier().value() <= std::numeric_limits::max()); - const bool result = - insertParameter({static_cast(it.identifier().value()), it.value().value()}); - ASSERT(result); - ENVOY_CONN_LOG(debug, "adding custom settings parameter with id {:#x} to {}", connection_, - it.identifier().value(), it.value().value()); - } - - // Insert named parameters. - settings.insert( - settings.end(), - {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()}, - {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()}, - {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, - {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); - if (!settings.empty()) { - int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); - ASSERT(rc == 0); - } else { - // nghttp2_submit_settings need to be called at least once - int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0); - ASSERT(rc == 0); - } - - const uint32_t initial_connection_window_size = - http2_options.initial_connection_window_size().value(); - // Increase connection window size up to our default size. - if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) { - ENVOY_CONN_LOG(debug, "updating connection-level initial window size to {}", connection_, - initial_connection_window_size); - int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0, - initial_connection_window_size - - NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE); - ASSERT(rc == 0); - } -} - -ConnectionImpl::Http2Callbacks::Http2Callbacks() { - nghttp2_session_callbacks_new(&callbacks_); - nghttp2_session_callbacks_set_send_callback( - callbacks_, - [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - return static_cast(user_data)->onSend(data, length); - }); - - nghttp2_session_callbacks_set_send_data_callback( - callbacks_, - [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, - nghttp2_data_source* source, void*) -> int { - ASSERT(frame->data.padlen == 0); - return static_cast(source->ptr)->onDataSourceSend(framehd, length); - }); - - nghttp2_session_callbacks_set_on_begin_headers_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onBeginHeaders(frame); - }); - - nghttp2_session_callbacks_set_on_header_callback( - callbacks_, - [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length, - const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int { - // TODO PERF: Can reference count here to avoid copies. - HeaderString name; - name.setCopy(reinterpret_cast(raw_name), name_length); - HeaderString value; - value.setCopy(reinterpret_cast(raw_value), value_length); - return static_cast(user_data)->onHeader(frame, std::move(name), - std::move(value)); - }); - - nghttp2_session_callbacks_set_on_data_chunk_recv_callback( - callbacks_, - [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len, - void* user_data) -> int { - return static_cast(user_data)->onData(stream_id, data, len); - }); - - nghttp2_session_callbacks_set_on_begin_frame_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { - return static_cast(user_data)->onBeforeFrameReceived(hd); - }); - - nghttp2_session_callbacks_set_on_frame_recv_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onFrameReceived(frame); - }); - - nghttp2_session_callbacks_set_on_stream_close_callback( - callbacks_, - [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int { - return static_cast(user_data)->onStreamClose(stream_id, error_code); - }); - - nghttp2_session_callbacks_set_on_frame_send_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onFrameSend(frame); - }); - - nghttp2_session_callbacks_set_before_frame_send_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onBeforeFrameSend(frame); - }); - - nghttp2_session_callbacks_set_on_frame_not_send_callback( - callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int { - // We used to always return failure here but it looks now this can get called if the other - // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now. - return 0; - }); - - nghttp2_session_callbacks_set_on_invalid_frame_recv_callback( - callbacks_, - [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int { - return static_cast(user_data)->onInvalidFrame(frame->hd.stream_id, - error_code); - }); - - nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( - callbacks_, - [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len, - void* user_data) -> int { - ASSERT(hd->length >= len); - return static_cast(user_data)->onMetadataReceived(hd->stream_id, data, - len); - }); - - nghttp2_session_callbacks_set_unpack_extension_callback( - callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int { - return static_cast(user_data)->onMetadataFrameComplete( - hd->stream_id, hd->flags == END_METADATA_FLAG); - }); - - nghttp2_session_callbacks_set_pack_extension_callback( - callbacks_, - [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame, - void* user_data) -> ssize_t { - ASSERT(frame->hd.length <= len); - return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); - }); - - nghttp2_session_callbacks_set_error_callback2( - callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { - return static_cast(user_data)->onError(absl::string_view(msg, len)); - }); -} - -ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } - -ConnectionImpl::Http2Options::Http2Options( - const envoy::config::core::v3::Http2ProtocolOptions& http2_options) { - nghttp2_option_new(&options_); - // Currently we do not do anything with stream priority. Setting the following option prevents - // nghttp2 from keeping around closed streams for use during stream priority dependency graph - // calculations. This saves a tremendous amount of memory in cases where there are a large - // number of kept alive HTTP/2 connections. - nghttp2_option_set_no_closed_streams(options_, 1); - nghttp2_option_set_no_auto_window_update(options_, 1); - - // The max send header block length is configured to an arbitrarily high number so as to never - // trigger the check within nghttp2, as we check request headers length in - // codec_impl::saveHeader. - nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); - - if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { - nghttp2_option_set_max_deflate_dynamic_table_size(options_, - http2_options.hpack_table_size().value()); - } - - if (http2_options.allow_metadata()) { - nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); - } - - // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames. - // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it - // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same - // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely - // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec - // behavior. - nghttp2_option_set_max_outbound_ack(options_, 10000); -} - -ConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); } - -ConnectionImpl::ClientHttp2Options::ClientHttp2Options( - const envoy::config::core::v3::Http2ProtocolOptions& http2_options) - : Http2Options(http2_options) { - // Temporarily disable initial max streams limit/protection, since we might want to create - // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server. - // - // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented. - nghttp2_option_set_peer_max_concurrent_streams( - options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); -} - -ClientConnectionImpl::ClientConnectionImpl( - Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, - max_response_headers_count), - callbacks_(callbacks) { - ClientHttp2Options client_http2_options(http2_options); - session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(), - client_http2_options.options()); - http2_session_factory.init(session_, base(), http2_options); - allow_metadata_ = http2_options.allow_metadata(); -} - -RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { - ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder)); - // If the connection is currently above the high watermark, make sure to inform the new stream. - // The connection can not pass this on automatically as it has no awareness that a new stream is - // created. - if (connection_.aboveHighWatermark()) { - stream->runHighWatermarkCallbacks(); - } - ClientStreamImpl& stream_ref = *stream; - stream->moveIntoList(std::move(stream), active_streams_); - return stream_ref; -} - -int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { - // The client code explicitly does not currently support push promise. - RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); - RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || - frame->headers.cat == NGHTTP2_HCAT_HEADERS, - ""); - if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { - StreamImpl* stream = getStream(frame->hd.stream_id); - stream->allocTrailers(); - } - - return 0; -} - -int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, - HeaderString&& value) { - // The client code explicitly does not currently support push promise. - ASSERT(frame->hd.type == NGHTTP2_HEADERS); - ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); - return saveHeader(frame, std::move(name), std::move(value)); -} - -ServerConnectionImpl::ServerConnectionImpl( - Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action) - : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, - max_request_headers_count), - callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { - Http2Options h2_options(http2_options); - - nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), - h2_options.options()); - sendSettings(http2_options, false); - allow_metadata_ = http2_options.allow_metadata(); -} - -int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { - // For a server connection, we should never get push promise frames. - ASSERT(frame->hd.type == NGHTTP2_HEADERS); - - if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { - return NGHTTP2_ERR_FLOODED; - } - - if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { - stats_.trailers_.inc(); - ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS); - - StreamImpl* stream = getStream(frame->hd.stream_id); - stream->allocTrailers(); - return 0; - } - - ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); - if (connection_.aboveHighWatermark()) { - stream->runHighWatermarkCallbacks(); - } - stream->request_decoder_ = &callbacks_.newStream(*stream); - stream->stream_id_ = frame->hd.stream_id; - stream->moveIntoList(std::move(stream), active_streams_); - nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, - active_streams_.front().get()); - return 0; -} - -int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, - HeaderString&& value) { - // For a server connection, we should never get push promise frames. - ASSERT(frame->hd.type == NGHTTP2_HEADERS); - ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS); - return saveHeader(frame, std::move(name), std::move(value)); -} - -bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { - ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", - connection_, static_cast(hd->type), static_cast(hd->flags), - static_cast(hd->length), padding_length); - switch (hd->type) { - case NGHTTP2_HEADERS: - case NGHTTP2_CONTINUATION: - // Track new streams. - if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { - inbound_streams_++; - } - FALLTHRU; - case NGHTTP2_DATA: - // Track frames with an empty payload and no end stream flag. - if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { - ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); - consecutive_inbound_frames_with_empty_payload_++; - } else { - consecutive_inbound_frames_with_empty_payload_ = 0; - } - break; - case NGHTTP2_PRIORITY: - inbound_priority_frames_++; - break; - case NGHTTP2_WINDOW_UPDATE: - inbound_window_update_frames_++; - break; - default: - break; - } - - if (!checkInboundFrameLimits()) { - // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate - // all the way to nghttp2_session_mem_recv() where we need it. - flood_detected_ = true; - return false; - } - - return true; -} - -bool ServerConnectionImpl::checkInboundFrameLimits() { - ASSERT(dispatching_downstream_data_); - - if (consecutive_inbound_frames_with_empty_payload_ > - max_consecutive_inbound_frames_with_empty_payload_) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many consecutive frames with an empty payload " - "received in this HTTP/2 session.", - connection_); - stats_.inbound_empty_frames_flood_.inc(); - return false; - } - - if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { - ENVOY_CONN_LOG(trace, - "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", - connection_); - stats_.inbound_priority_frames_flood_.inc(); - return false; - } - - if (inbound_window_update_frames_ > - 1 + 2 * (inbound_streams_ + - max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { - ENVOY_CONN_LOG( - trace, - "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", - connection_); - stats_.inbound_window_update_frames_flood_.inc(); - return false; - } - - return true; -} - -void ServerConnectionImpl::checkOutboundQueueLimits() { - if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { - stats_.outbound_flood_.inc(); - throw FrameFloodException("Too many frames in the outbound queue."); - } - if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { - stats_.outbound_control_flood_.inc(); - throw FrameFloodException("Too many control frames in the outbound queue."); - } -} - -Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { - // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either - // throw an exception or return an error status. The utility wrapper catches exceptions and - // converts them to error statuses. - return Http::Utility::exceptionToStatus( - [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); -} - -Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { - ASSERT(!dispatching_downstream_data_); - dispatching_downstream_data_ = true; - - // Make sure the dispatching_downstream_data_ is set to false even - // when ConnectionImpl::dispatch throws an exception. - Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); - - // Make sure downstream outbound queue was not flooded by the upstream frames. - checkOutboundQueueLimits(); - - return ConnectionImpl::innerDispatch(data); -} - -absl::optional -ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { - if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && - Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { - if (headers_with_underscores_action_ == - envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { - ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, - header_name); - stats_.dropped_headers_with_underscores_.inc(); - return 0; - } - ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, - header_name); - stats_.requests_rejected_with_underscores_in_headers_.inc(); - return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; - } - return absl::nullopt; -} - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h deleted file mode 100644 index ebb40b18d8a7..000000000000 --- a/source/common/http/http2/codec_impl_legacy.h +++ /dev/null @@ -1,602 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "envoy/config/core/v3/protocol.pb.h" -#include "envoy/event/deferred_deletable.h" -#include "envoy/http/codec.h" -#include "envoy/network/connection.h" - -#include "common/buffer/buffer_impl.h" -#include "common/buffer/watermark_buffer.h" -#include "common/common/linked_object.h" -#include "common/common/logger.h" -#include "common/common/thread.h" -#include "common/http/codec_helper.h" -#include "common/http/header_map_impl.h" -#include "common/http/http2/codec_stats.h" -#include "common/http/http2/metadata_decoder.h" -#include "common/http/http2/metadata_encoder.h" -#include "common/http/status.h" -#include "common/http/utility.h" - -#include "absl/types/optional.h" -#include "nghttp2/nghttp2.h" - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -// This is not the full client magic, but it's the smallest size that should be able to -// differentiate between HTTP/1 and HTTP/2. -const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; - -class Utility { -public: - /** - * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5 - * @param key supplies the incoming header key. - * @param value supplies the incoming header value. - * @param cookies supplies the header string to fill if this is a cookie header that needs to be - * rebuilt. - */ - static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, - HeaderString& cookies); -}; - -class ConnectionImpl; - -// Abstract nghttp2_session factory. Used to enable injection of factories for testing. -class Nghttp2SessionFactory { -public: - using ConnectionImplType = ConnectionImpl; - virtual ~Nghttp2SessionFactory() = default; - - // Returns a new nghttp2_session to be used with |connection|. - virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, - ConnectionImplType* connection, - const nghttp2_option* options) PURE; - - // Initializes the |session|. - virtual void init(nghttp2_session* session, ConnectionImplType* connection, - const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; -}; - -class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { -public: - nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection, - const nghttp2_option* options) override; - - void init(nghttp2_session* session, ConnectionImpl* connection, - const envoy::config::core::v3::Http2ProtocolOptions& options) override; - - // Returns a global factory instance. Note that this is possible because no internal state is - // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's - // worker based threading model. - static ProdNghttp2SessionFactory& get() { - static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory(); - return *instance; - } -}; - -/** - * Base class for HTTP/2 client and server codecs. - */ -class ConnectionImpl : public virtual Connection, protected Logger::Loggable { -public: - ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_headers_kb, const uint32_t max_headers_count); - - ~ConnectionImpl() override; - - // Http::Connection - // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class - Http::Status dispatch(Buffer::Instance& data) override; - void goAway() override; - Protocol protocol() override { return Protocol::Http2; } - void shutdownNotice() override; - bool wantsToWrite() override { return nghttp2_session_want_write(session_); } - // Propagate network connection watermark events to each stream on the connection. - void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { - for (auto& stream : active_streams_) { - stream->runHighWatermarkCallbacks(); - } - } - void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { - for (auto& stream : active_streams_) { - stream->runLowWatermarkCallbacks(); - } - } - - /** - * An inner dispatch call that executes the dispatching logic. While exception removal is in - * migration (#10878), this function may either throw an exception or return an error status. - * Exceptions are caught and translated to their corresponding statuses in the outer level - * dispatch. - * This needs to be virtual so that ServerConnectionImpl can override. - * TODO(#10878): Remove this when exception removal is complete. - */ - virtual Http::Status innerDispatch(Buffer::Instance& data); - -protected: - friend class ProdNghttp2SessionFactory; - - /** - * Wrapper for static nghttp2 callback dispatchers. - */ - class Http2Callbacks { - public: - Http2Callbacks(); - ~Http2Callbacks(); - - const nghttp2_session_callbacks* callbacks() { return callbacks_; } - - private: - nghttp2_session_callbacks* callbacks_; - }; - - /** - * Wrapper for static nghttp2 session options. - */ - class Http2Options { - public: - Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); - ~Http2Options(); - - const nghttp2_option* options() { return options_; } - - protected: - nghttp2_option* options_; - }; - - class ClientHttp2Options : public Http2Options { - public: - ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); - }; - - /** - * Base class for client and server side streams. - */ - struct StreamImpl : public virtual StreamEncoder, - public Stream, - public LinkedObject, - public Event::DeferredDeletable, - public StreamCallbackHelper { - - StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit); - ~StreamImpl() override; - // TODO(mattklein123): Optimally this would be done in the destructor but there are currently - // deferred delete lifetime issues that need sorting out if the destructor of the stream is - // going to be able to refer to the parent connection. - void destroy(); - void disarmStreamIdleTimer() { - if (stream_idle_timer_ != nullptr) { - // To ease testing and the destructor assertion. - stream_idle_timer_->disableTimer(); - stream_idle_timer_.reset(); - } - } - - StreamImpl* base() { return this; } - ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); - int onDataSourceSend(const uint8_t* framehd, size_t length); - void resetStreamWorker(StreamResetReason reason); - static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); - void saveHeader(HeaderString&& name, HeaderString&& value); - void encodeHeadersBase(const std::vector& final_headers, bool end_stream); - virtual void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) PURE; - void encodeTrailersBase(const HeaderMap& headers); - void submitTrailers(const HeaderMap& trailers); - void submitMetadata(uint8_t flags); - virtual StreamDecoder& decoder() PURE; - virtual HeaderMap& headers() PURE; - virtual void allocTrailers() PURE; - virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE; - virtual void createPendingFlushTimer() PURE; - void onPendingFlushTimer(); - - // Http::StreamEncoder - void encodeData(Buffer::Instance& data, bool end_stream) override; - Stream& getStream() override { return *this; } - void encodeMetadata(const MetadataMapVector& metadata_map_vector) override; - Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; } - - // Http::Stream - void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } - void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } - void resetStream(StreamResetReason reason) override; - void readDisable(bool disable) override; - uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); } - const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { - return parent_.connection_.localAddress(); - } - absl::string_view responseDetails() override { return details_; } - void setFlushTimeout(std::chrono::milliseconds timeout) override { - stream_idle_timeout_ = timeout; - } - - // This code assumes that details is a static string, so that we - // can avoid copying it. - void setDetails(absl::string_view details) { - // It is probably a mistake to call setDetails() twice, so - // assert that details_ is empty. - ASSERT(details_.empty()); - - details_ = details; - } - - void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) { - pending_recv_data_.setWatermarks(low_watermark, high_watermark); - pending_send_data_.setWatermarks(low_watermark, high_watermark); - } - - // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream. - void pendingRecvBufferHighWatermark(); - void pendingRecvBufferLowWatermark(); - - // If the send buffer encounters watermark callbacks, propagate this information to the streams. - // The router and connection manager will propagate them on as appropriate. - void pendingSendBufferHighWatermark(); - void pendingSendBufferLowWatermark(); - - // Does any necessary WebSocket/Upgrade conversion, then passes the headers - // to the decoder_. - virtual void decodeHeaders(bool allow_waiting_for_informational_headers) PURE; - virtual void decodeTrailers() PURE; - - // Get MetadataEncoder for this stream. - Http::Http2::MetadataEncoder& getMetadataEncoder(); - // Get MetadataDecoder for this stream. - Http::Http2::MetadataDecoder& getMetadataDecoder(); - // Callback function for MetadataDecoder. - void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); - - bool buffersOverrun() const { return read_disable_count_ > 0; } - - ConnectionImpl& parent_; - int32_t stream_id_{-1}; - uint32_t unconsumed_bytes_{0}; - uint32_t read_disable_count_{0}; - Buffer::WatermarkBuffer pending_recv_data_{ - [this]() -> void { this->pendingRecvBufferLowWatermark(); }, - [this]() -> void { this->pendingRecvBufferHighWatermark(); }, - []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; - Buffer::WatermarkBuffer pending_send_data_{ - [this]() -> void { this->pendingSendBufferLowWatermark(); }, - [this]() -> void { this->pendingSendBufferHighWatermark(); }, - []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; - HeaderMapPtr pending_trailers_to_encode_; - std::unique_ptr metadata_decoder_; - std::unique_ptr metadata_encoder_; - absl::optional deferred_reset_; - HeaderString cookies_; - bool local_end_stream_sent_ : 1; - bool remote_end_stream_ : 1; - bool data_deferred_ : 1; - bool waiting_for_non_informational_headers_ : 1; - bool pending_receive_buffer_high_watermark_called_ : 1; - bool pending_send_buffer_high_watermark_called_ : 1; - bool reset_due_to_messaging_error_ : 1; - absl::string_view details_; - // See HttpConnectionManager.stream_idle_timeout. - std::chrono::milliseconds stream_idle_timeout_{}; - Event::TimerPtr stream_idle_timer_; - }; - - using StreamImplPtr = std::unique_ptr; - - /** - * Client side stream (request). - */ - struct ClientStreamImpl : public StreamImpl, public RequestEncoder { - ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit, - ResponseDecoder& response_decoder) - : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder), - headers_or_trailers_(ResponseHeaderMapImpl::create()) {} - - // StreamImpl - void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) override; - StreamDecoder& decoder() override { return response_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; - void decodeTrailers() override; - HeaderMap& headers() override { - if (absl::holds_alternative(headers_or_trailers_)) { - return *absl::get(headers_or_trailers_); - } else { - return *absl::get(headers_or_trailers_); - } - } - void allocTrailers() override { - // If we are waiting for informational headers, make a new response header map, otherwise - // we are about to receive trailers. The codec makes sure this is the only valid sequence. - if (waiting_for_non_informational_headers_) { - headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); - } else { - headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); - } - } - HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { - return createHeaderMap(trailers); - } - void createPendingFlushTimer() override { - // Client streams do not create a flush timer because we currently assume that any failure - // to flush would be covered by a request/stream/etc. timeout. - } - - // RequestEncoder - void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; - void encodeTrailers(const RequestTrailerMap& trailers) override { - encodeTrailersBase(trailers); - } - - ResponseDecoder& response_decoder_; - absl::variant headers_or_trailers_; - std::string upgrade_type_; - }; - - using ClientStreamImplPtr = std::unique_ptr; - - /** - * Server side stream (response). - */ - struct ServerStreamImpl : public StreamImpl, public ResponseEncoder { - ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) - : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} - - // StreamImpl - void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) override; - StreamDecoder& decoder() override { return *request_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; - void decodeTrailers() override; - HeaderMap& headers() override { - if (absl::holds_alternative(headers_or_trailers_)) { - return *absl::get(headers_or_trailers_); - } else { - return *absl::get(headers_or_trailers_); - } - } - void allocTrailers() override { - headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); - } - HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { - return createHeaderMap(trailers); - } - void createPendingFlushTimer() override; - - // ResponseEncoder - void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; - void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; - void encodeTrailers(const ResponseTrailerMap& trailers) override { - encodeTrailersBase(trailers); - } - - RequestDecoder* request_decoder_{}; - absl::variant headers_or_trailers_; - }; - - using ServerStreamImplPtr = std::unique_ptr; - - ConnectionImpl* base() { return this; } - // NOTE: Always use non debug nullptr checks against the return value of this function. There are - // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id - // that is not associated with an existing stream. - StreamImpl* getStream(int32_t stream_id); - int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value); - void sendPendingFrames(); - void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - bool disable_push); - // Callback triggered when the peer's SETTINGS frame is received. - // NOTE: This is only used for tests. - virtual void onSettingsForTest(const nghttp2_settings&) {} - - /** - * Check if header name contains underscore character. - * Underscore character is allowed in header names by the RFC-7230 and this check is implemented - * as a security measure due to systems that treat '_' and '-' as interchangeable. - * The ServerConnectionImpl may drop header or reject request based on the - * `common_http_protocol_options.headers_with_underscores_action` configuration option in the - * HttpConnectionManager. - */ - virtual absl::optional checkHeaderNameForUnderscores(absl::string_view /* header_name */) { - return absl::nullopt; - } - - static Http2Callbacks http2_callbacks_; - - std::list active_streams_; - nghttp2_session* session_{}; - Http::Http2::CodecStats& stats_; - Network::Connection& connection_; - const uint32_t max_headers_kb_; - const uint32_t max_headers_count_; - uint32_t per_stream_buffer_limit_; - bool allow_metadata_; - const bool stream_error_on_invalid_http_messaging_; - bool flood_detected_; - - // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or - // RST_STREAM. - bool is_outbound_flood_monitored_control_frame_ = 0; - // This counter keeps track of the number of outbound frames of all types (these that were - // buffered in the underlying connection but not yet written into the socket). If this counter - // exceeds the `max_outbound_frames_' value the connection is terminated. - uint32_t outbound_frames_ = 0; - // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options. - // Default value is 10000. - const uint32_t max_outbound_frames_; - const std::function frame_buffer_releasor_; - // This counter keeps track of the number of outbound frames of types PING, SETTINGS and - // RST_STREAM (these that were buffered in the underlying connection but not yet written into the - // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is - // terminated. - uint32_t outbound_control_frames_ = 0; - // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from - // corresponding http2_protocol_options. Default value is 1000. - const uint32_t max_outbound_control_frames_; - const std::function control_frame_buffer_releasor_; - // This counter keeps track of the number of consecutive inbound frames of types HEADERS, - // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds - // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. - uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; - // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without - // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. - const uint32_t max_consecutive_inbound_frames_with_empty_payload_; - - // This counter keeps track of the number of inbound streams. - uint32_t inbound_streams_ = 0; - // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds - // the value calculated using this formula: - // - // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) - // - // the connection is terminated. - uint64_t inbound_priority_frames_ = 0; - // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding - // http2_protocol_options. Default value is 100. - const uint32_t max_inbound_priority_frames_per_stream_; - - // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds - // the value calculated using this formula: - // - // 1 + 2 * (inbound_streams_ + - // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) - // - // the connection is terminated. - uint64_t inbound_window_update_frames_ = 0; - // This counter keeps track of the number of outbound DATA frames. - uint64_t outbound_data_frames_ = 0; - // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized - // from corresponding http2_protocol_options. Default value is 10. - const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; - - // For the flood mitigation to work the onSend callback must be called once for each outbound - // frame. This is what the nghttp2 library is doing, however this is not documented. The - // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if - // this changes in the future. Also it is important that onSend does not do partial writes, as the - // nghttp2 library will keep calling this callback to write the rest of the frame. - ssize_t onSend(const uint8_t* data, size_t length); - -private: - virtual ConnectionCallbacks& callbacks() PURE; - virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; - int onData(int32_t stream_id, const uint8_t* data, size_t len); - int onBeforeFrameReceived(const nghttp2_frame_hd* hd); - int onFrameReceived(const nghttp2_frame* frame); - int onBeforeFrameSend(const nghttp2_frame* frame); - int onFrameSend(const nghttp2_frame* frame); - int onError(absl::string_view error); - virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE; - int onInvalidFrame(int32_t stream_id, int error_code); - int onStreamClose(int32_t stream_id, uint32_t error_code); - int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len); - int onMetadataFrameComplete(int32_t stream_id, bool end_metadata); - ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len); - // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl. - // Returns true on success or false if outbound queue limits were exceeded. - bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); - virtual void checkOutboundQueueLimits() PURE; - void incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); - virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; - virtual bool checkInboundFrameLimits() PURE; - void releaseOutboundFrame(); - void releaseOutboundControlFrame(); - - bool dispatching_ : 1; - bool raised_goaway_ : 1; - bool pending_deferred_reset_ : 1; -}; - -/** - * HTTP/2 client connection codec. - */ -class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { -public: - using SessionFactory = Nghttp2SessionFactory; - ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, - Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_response_headers_kb, - const uint32_t max_response_headers_count, - SessionFactory& http2_session_factory); - - // Http::ClientConnection - RequestEncoder& newStream(ResponseDecoder& response_decoder) override; - -private: - // ConnectionImpl - ConnectionCallbacks& callbacks() override { return callbacks_; } - int onBeginHeaders(const nghttp2_frame* frame) override; - int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; - - // Presently client connections only perform accounting of outbound frames and do not - // terminate connections when queue limits are exceeded. The primary reason is the complexity of - // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM - // messages to be sent on corresponding downstream connections. This may actually trigger flood - // mitigation on the downstream connections, which causes an exception to be thrown in the middle - // of the clean-up loop, leaving resources in a half cleaned up state. - // TODO(yanavlasov): add flood mitigation for upstream connections as well. - void checkOutboundQueueLimits() override {} - bool trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return true; } - bool checkInboundFrameLimits() override { return true; } - - Http::ConnectionCallbacks& callbacks_; -}; - -/** - * HTTP/2 server connection codec. - */ -class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { -public: - ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, - Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action); - -private: - // ConnectionImpl - ConnectionCallbacks& callbacks() override { return callbacks_; } - int onBeginHeaders(const nghttp2_frame* frame) override; - int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; - void checkOutboundQueueLimits() override; - bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; - bool checkInboundFrameLimits() override; - absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; - - // Http::Connection - // The reason for overriding the dispatch method is to do flood mitigation only when - // processing data from downstream client. Doing flood mitigation when processing upstream - // responses makes clean-up tricky, which needs to be improved (see comments for the - // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the - // ServerConnectionImpl objects is called only when processing data from the downstream client in - // the ConnectionManagerImpl::onData method. - Http::Status dispatch(Buffer::Instance& data) override; - Http::Status innerDispatch(Buffer::Instance& data) override; - - ServerConnectionCallbacks& callbacks_; - - // This flag indicates that downstream data is being dispatched and turns on flood mitigation - // in the checkMaxOutbound*Framed methods. - bool dispatching_downstream_data_{false}; - - // The action to take when a request header name contains underscore characters. - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action_; -}; - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 35b990cd39b7..ba4817e19a53 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -70,7 +70,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", - "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 7ab7817d80fd..ec576a8abac9 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -35,9 +35,7 @@ envoy_cc_extension( "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", - "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/json:json_loader_lib", "//source/common/local_reply:local_reply_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index e1d374526c58..190db7f475b4 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -20,9 +20,7 @@ #include "common/http/conn_manager_utility.h" #include "common/http/default_server_string.h" #include "common/http/http1/codec_impl.h" -#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/request_id_extension_impl.h" @@ -482,33 +480,16 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: { - if (context_.runtime().snapshot().runtimeFeatureEnabled( - "envoy.reloadable_features.new_codec_behavior")) { - return std::make_unique( - connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), - callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), - headersWithUnderscoresAction()); - } else { - return std::make_unique( - connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), - callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), - headersWithUnderscoresAction()); - } - } + case CodecType::HTTP1: + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); case CodecType::HTTP2: { - if (context_.runtime().snapshot().runtimeFeatureEnabled( - "envoy.reloadable_features.new_codec_behavior")) { - return std::make_unique( - connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); - } else { - return std::make_unique( - connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); - } + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 492d2889aa8f..16e4eee2c960 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -462,34 +462,32 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; - Http1::CodecStats::AtomicPtr http1_stats; - Http2::CodecStats::AtomicPtr http2_stats; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; + Http1::CodecStats::AtomicPtr stats; if (http2) { - client = std::make_unique( - client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), - client_http2_options, max_request_headers_kb, max_response_headers_count, + client = std::make_unique( + client_connection, client_callbacks, stats_store, client_http2_options, + max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { client = std::make_unique( - client_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), client_callbacks, + client_connection, Http1::CodecStats::atomicGet(stats, stats_store), client_callbacks, client_http1settings, max_response_headers_count); } if (http2) { const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{ fromHttp2Settings(input.h2_settings().server())}; - server = std::make_unique( - server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), - server_http2_options, max_request_headers_kb, max_request_headers_count, - headers_with_underscores_action); + server = std::make_unique( + server_connection, server_callbacks, stats_store, server_http2_options, + max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( - server_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), server_callbacks, + server_connection, Http1::CodecStats::atomicGet(stats, stats_store), server_callbacks, server_http1settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } @@ -645,8 +643,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } } if (!codec_error && http2) { - dynamic_cast(*client).goAway(); - dynamic_cast(*server).goAway(); + dynamic_cast(*client).goAway(); + dynamic_cast(*server).goAway(); } } diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index dbcdcd4d4c8b..715e6dbf0c23 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -26,7 +26,6 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", - "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/buffer:buffer_mocks", diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 77565550dc26..fade286cfb44 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -9,7 +9,6 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" -#include "common/http/http1/codec_impl_legacy.h" #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -34,6 +33,7 @@ using testing::StrictMock; namespace Envoy { namespace Http { +namespace Http1 { namespace { std::string createHeaderFragment(int num_headers) { // Create a header field with num_headers headers. @@ -55,7 +55,7 @@ Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t ma } } // namespace -class Http1CodecTestBase { +class Http1CodecTestBase : public testing::Test { protected: Http::Http1::CodecStats& http1CodecStats() { return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_); @@ -65,19 +65,12 @@ class Http1CodecTestBase { Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; }; -class Http1ServerConnectionImplTest : public Http1CodecTestBase, - public testing::TestWithParam { +class Http1ServerConnectionImplTest : public Http1CodecTestBase { public: void initialize() { - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, headers_with_underscores_action_); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, headers_with_underscores_action_); - } + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); } NiceMock connection_; @@ -135,15 +128,9 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } MockRequestDecoder decoder; @@ -171,15 +158,9 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } MockRequestDecoder decoder; @@ -198,15 +179,9 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); } InSequence sequence; @@ -240,15 +215,9 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); - } + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); std::string exception_reason; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -326,12 +295,7 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade EXPECT_TRUE(status.ok()); } -INSTANTIATE_TEST_SUITE_P(Codecs, Http1ServerConnectionImplTest, testing::Bool(), - [](const testing::TestParamInfo& param) { - return param.param ? "New" : "Legacy"; - }); - -TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { +TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { initialize(); InSequence sequence; @@ -355,7 +319,7 @@ TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { // We support the identity encoding, but because it does not end in chunked encoding we reject it // per RFC 7230 Section 3.3.3 -TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { +TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { initialize(); InSequence sequence; @@ -370,7 +334,7 @@ TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { +TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { initialize(); InSequence sequence; @@ -386,7 +350,7 @@ TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { } // Verify that data in the two body chunks is merged before the call to decodeData. -TEST_P(Http1ServerConnectionImplTest, ChunkedBody) { +TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; @@ -417,7 +381,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedBody) { // Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a // second dispatch. -TEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { +TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { initialize(); InSequence sequence; @@ -455,7 +419,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { // Verify that headers and chunked body are processed correctly and data is merged before the // decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { +TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { initialize(); InSequence sequence; @@ -484,7 +448,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) { +TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { initialize(); InSequence sequence; @@ -511,7 +475,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) { // Verify that body dispatch does not happen after detecting a parse error processing a chunk // header. -TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { +TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { initialize(); InSequence sequence; @@ -537,7 +501,7 @@ TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); } -TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { +TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { initialize(); InSequence sequence; @@ -554,7 +518,7 @@ TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_P(Http1ServerConnectionImplTest, HostWithLWS) { +TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -575,7 +539,7 @@ TEST_P(Http1ServerConnectionImplTest, HostWithLWS) { // Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the // beginning and end of a header value should be stripped. Whitespace in the middle should be // preserved. -TEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { +TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { initialize(); // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is @@ -608,7 +572,7 @@ TEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { } } -TEST_P(Http1ServerConnectionImplTest, Http10) { +TEST_F(Http1ServerConnectionImplTest, Http10) { initialize(); InSequence sequence; @@ -626,7 +590,7 @@ TEST_P(Http1ServerConnectionImplTest, Http10) { EXPECT_EQ(Protocol::Http10, codec_->protocol()); } -TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { +TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; @@ -634,7 +598,7 @@ TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { +TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -643,7 +607,7 @@ TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { +TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { initialize(); MockRequestDecoder decoder; @@ -689,7 +653,7 @@ TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { } } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -698,7 +662,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -707,7 +671,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -717,7 +681,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -726,7 +690,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -734,7 +698,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { expect400(Protocol::Http11, true, buffer, "http1.codec_error"); } -TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { +TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { initialize(); MockRequestDecoder decoder; @@ -759,7 +723,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -768,21 +732,21 @@ TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer, "http1.invalid_url"); } -TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { +TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { initialize(); Buffer::OwnedImpl buffer("GET http://foobar.com:1000000 HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) { +TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { initialize(); Buffer::OwnedImpl buffer( @@ -790,7 +754,7 @@ TEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) { expect400(Protocol::Http11, true, buffer, "http1.connection_header_rejected"); } -TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { +TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -799,7 +763,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, Http11Options) { +TEST_F(Http1ServerConnectionImplTest, Http11Options) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -808,7 +772,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11Options) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, SimpleGet) { +TEST_F(Http1ServerConnectionImplTest, SimpleGet) { initialize(); InSequence sequence; @@ -825,7 +789,7 @@ TEST_P(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_EQ(0U, buffer.length()); } -TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { +TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.early_errors_via_hcm", "false"}}); @@ -845,7 +809,7 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { // Test that if the stream is not created at the time an error is detected, it // is created as part of sending the protocol error. -TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { initialize(); MockRequestDecoder decoder; @@ -864,7 +828,7 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { } // Make sure that if the first line is parsed, that sendLocalReply tracks HEAD requests correctly. -TEST_P(Http1ServerConnectionImplTest, BadHeadRequest) { +TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { initialize(); MockRequestDecoder decoder; @@ -884,7 +848,7 @@ TEST_P(Http1ServerConnectionImplTest, BadHeadRequest) { } // Make sure that if gRPC headers are parsed, they are tracked by sendLocalReply. -TEST_P(Http1ServerConnectionImplTest, BadGrpcRequest) { +TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { initialize(); MockRequestDecoder decoder; @@ -905,7 +869,7 @@ TEST_P(Http1ServerConnectionImplTest, BadGrpcRequest) { // This behavior was observed during CVE-2019-18801 and helped to limit the // scope of affected Envoy configurations. -TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { +TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { initialize(); MockRequestDecoder decoder; @@ -917,7 +881,7 @@ TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { +TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); MockRequestDecoder decoder; @@ -933,7 +897,7 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_P(Http1ServerConnectionImplTest, FloodProtection) { +TEST_F(Http1ServerConnectionImplTest, FloodProtection) { initialize(); NiceMock decoder; @@ -984,7 +948,7 @@ TEST_P(Http1ServerConnectionImplTest, FloodProtection) { } } -TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { +TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.http1_flood_protection", "false"}}); @@ -1020,7 +984,7 @@ TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { } } -TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { +TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); InSequence sequence; @@ -1039,8 +1003,7 @@ TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { } // Ensures that requests with invalid HTTP header values are properly rejected -// when the runtime guard is enabled for the feature. -TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { +TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. @@ -1065,7 +1028,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { // Ensures that request headers with names containing the underscore character are allowed // when the option is set to allow. -TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; initialize(); @@ -1089,7 +1052,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { // Ensures that request headers with names containing the underscore character are dropped // when the option is set to drop headers. -TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; initialize(); @@ -1112,7 +1075,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { // Ensures that request with header names containing the underscore character are rejected // when the option is set to reject request. -TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { +TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; initialize(); @@ -1133,7 +1096,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } -TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { +TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { TestScopedRuntime scoped_runtime; initialize(); @@ -1156,7 +1119,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). -TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { +TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (size_t n = 1; n < example_input.size(); ++n) { @@ -1180,7 +1143,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an error status or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -1200,7 +1163,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { } } -TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -1222,7 +1185,7 @@ TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_NE(0U, buffer.length()); } -TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { +TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { initialize(); InSequence sequence; @@ -1248,7 +1211,7 @@ TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { // Verify that headers and body with content length are processed correctly and data is merged // before the decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { +TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { initialize(); InSequence sequence; @@ -1273,7 +1236,7 @@ TEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { +TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); NiceMock decoder; @@ -1299,7 +1262,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { // As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate // the response encoder instead of request encoder. -TEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { +TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { initialize(); NiceMock decoder; @@ -1325,7 +1288,7 @@ TEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { output); } -TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { +TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1352,7 +1315,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { output); } -TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { +TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { initialize(); NiceMock decoder; @@ -1376,7 +1339,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { EXPECT_EQ("HTTP/1.1 204 No Content\r\n\r\n", output); } -TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { +TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { initialize(); NiceMock decoder; @@ -1407,7 +1370,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_P(Http1ServerConnectionImplTest, MetadataTest) { +TEST_F(Http1ServerConnectionImplTest, MetadataTest) { initialize(); NiceMock decoder; @@ -1430,7 +1393,7 @@ TEST_P(Http1ServerConnectionImplTest, MetadataTest) { EXPECT_EQ(1, store_.counter("http1.metadata_not_supported_error").value()); } -TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { +TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { initialize(); NiceMock decoder; @@ -1466,7 +1429,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { output); } -TEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { +TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { codec_settings_.enable_trailers_ = true; initialize(); NiceMock decoder; @@ -1499,7 +1462,7 @@ TEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { output); } -TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { +TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { initialize(); NiceMock decoder; @@ -1526,7 +1489,7 @@ TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 11\r\n\r\nHello World", output); } -TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { +TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { initialize(); NiceMock decoder; @@ -1550,7 +1513,7 @@ TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 5\r\n\r\n", output); } -TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { +TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { initialize(); NiceMock decoder; @@ -1574,7 +1537,7 @@ TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -1600,11 +1563,11 @@ TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { EXPECT_EQ(0U, buffer.length()); } -TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } +TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } -TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } +TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } -TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -1615,7 +1578,7 @@ TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1628,7 +1591,7 @@ TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1641,7 +1604,7 @@ TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { initialize(); InSequence sequence; @@ -1665,7 +1628,7 @@ TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1681,7 +1644,7 @@ TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1699,7 +1662,7 @@ TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1718,7 +1681,7 @@ TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { } // Test that 101 upgrade responses do not contain content-length or transfer-encoding headers. -TEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { +TEST_F(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { initialize(); NiceMock decoder; @@ -1742,7 +1705,7 @@ TEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { EXPECT_EQ("HTTP/1.1 101 Switching Protocols\r\n\r\n", output); } -TEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); InSequence sequence; @@ -1766,7 +1729,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { // We use the absolute URL parsing code for CONNECT requests, but it does not // actually allow absolute URLs. -TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { initialize(); InSequence sequence; @@ -1779,7 +1742,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1794,7 +1757,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1811,7 +1774,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { initialize(); InSequence sequence; @@ -1827,7 +1790,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); } -TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { +TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { initialize(); InSequence sequence; @@ -1844,7 +1807,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { +TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -1878,43 +1841,24 @@ TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public Http1CodecTestBase, - public testing::TestWithParam { +class Http1ClientConnectionImplTest : public Http1CodecTestBase { public: void initialize() { - if (GetParam()) { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); - } else { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); - } - } - - void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) { - if (GetParam()) { - dynamic_cast(request_encoder)->readDisable(disable); - } else { - dynamic_cast(request_encoder)->readDisable(disable); - } + codec_ = std::make_unique(connection_, http1CodecStats(), callbacks_, + codec_settings_, max_response_headers_count_); } NiceMock connection_; NiceMock callbacks_; NiceMock codec_settings_; - Http::ClientConnectionPtr codec_; + std::unique_ptr codec_; protected: Stats::TestUtil::TestStore store_; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; -INSTANTIATE_TEST_SUITE_P(Codecs, Http1ClientConnectionImplTest, testing::Bool(), - [](const testing::TestParamInfo& param) { - return param.param ? "New" : "Legacy"; - }); - -TEST_P(Http1ClientConnectionImplTest, SimpleGet) { +TEST_F(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; @@ -1928,7 +1872,7 @@ TEST_P(Http1ClientConnectionImplTest, SimpleGet) { EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); } -TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { +TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1944,7 +1888,7 @@ TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); } -TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { +TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); MockResponseDecoder response_decoder; @@ -1958,7 +1902,7 @@ TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); } -TEST_P(Http1ClientConnectionImplTest, Reset) { +TEST_F(Http1ClientConnectionImplTest, Reset) { initialize(); MockResponseDecoder response_decoder; @@ -1972,15 +1916,16 @@ TEST_P(Http1ClientConnectionImplTest, Reset) { // Verify that we correctly enable reads on the connection when the final response is // received. -TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { +TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); MockResponseDecoder response_decoder; - auto* request_encoder = &codec_->newStream(response_decoder); + Http::RequestEncoder* request_encoder = &codec_->newStream(response_decoder); // Manually read disable. EXPECT_CALL(connection_, readDisable(true)).Times(2); - readDisableOnRequestEncoder(request_encoder, true); - readDisableOnRequestEncoder(request_encoder, true); + RequestEncoderImpl* encoder = dynamic_cast(request_encoder); + encoder->readDisable(true); + encoder->readDisable(true); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -2001,7 +1946,7 @@ TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { +TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); @@ -2009,7 +1954,7 @@ TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { +TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { initialize(); NiceMock response_decoder; @@ -2023,7 +1968,7 @@ TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { +TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { initialize(); NiceMock response_decoder; @@ -2037,7 +1982,7 @@ TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, HeadRequest) { +TEST_F(Http1ClientConnectionImplTest, HeadRequest) { initialize(); NiceMock response_decoder; @@ -2051,7 +1996,7 @@ TEST_P(Http1ClientConnectionImplTest, HeadRequest) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, 204Response) { +TEST_F(Http1ClientConnectionImplTest, 204Response) { initialize(); NiceMock response_decoder; @@ -2066,7 +2011,7 @@ TEST_P(Http1ClientConnectionImplTest, 204Response) { } // 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2. -TEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { +TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // By default, content-length is barred. { initialize(); @@ -2102,7 +2047,7 @@ TEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we // allow it. -TEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { +TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { { initialize(); @@ -2136,7 +2081,7 @@ TEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { } // 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { +TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2170,7 +2115,7 @@ TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { } } -TEST_P(Http1ClientConnectionImplTest, 100Response) { +TEST_F(Http1ClientConnectionImplTest, 100Response) { initialize(); NiceMock response_decoder; @@ -2191,7 +2136,7 @@ TEST_P(Http1ClientConnectionImplTest, 100Response) { } // 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { +TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2227,7 +2172,7 @@ TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { } } -TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { +TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { initialize(); NiceMock response_decoder; @@ -2240,7 +2185,7 @@ TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { CodecClientException); } -TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { +TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { initialize(); NiceMock response_decoder; @@ -2262,7 +2207,7 @@ TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -2277,7 +2222,7 @@ TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, GiantPath) { +TEST_F(Http1ClientConnectionImplTest, GiantPath) { initialize(); NiceMock response_decoder; @@ -2292,7 +2237,7 @@ TEST_P(Http1ClientConnectionImplTest, GiantPath) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { +TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { initialize(); // make sure upgradeAllowed doesn't cause crashes if run with no pending response. @@ -2302,7 +2247,7 @@ TEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; @@ -2338,7 +2283,7 @@ TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; @@ -2362,7 +2307,7 @@ TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, ConnectResponse) { +TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { initialize(); InSequence s; @@ -2393,7 +2338,7 @@ TEST_P(Http1ClientConnectionImplTest, ConnectResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { +TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { initialize(); InSequence s; @@ -2412,7 +2357,7 @@ TEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, ConnectRejected) { +TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { initialize(); InSequence s; @@ -2430,7 +2375,7 @@ TEST_P(Http1ClientConnectionImplTest, ConnectRejected) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { +TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -2465,7 +2410,7 @@ TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { // caller attempts to close the connection. This causes the network connection to attempt to write // pending data, even in the no flush scenario, which can cause us to go below low watermark // which then raises callbacks for a stream that no longer exists. -TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { +TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { initialize(); InSequence s; @@ -2499,7 +2444,7 @@ TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { // Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly // handle going below low watermark when closing the connection during a completion callback. -TEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { +TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { initialize(); InSequence s; @@ -2529,43 +2474,43 @@ TEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { EXPECT_TRUE(status.ok()); } -TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejected) { +TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, true); } -TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { +TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { // Construct partial headers with a long field name that exceeds the default limit of 60KiB. std::string long_string = "bigfield" + std::string(60 * 1024, 'q'); testTrailersExceedLimit(long_string, true); } // Tests that the default limit for the number of request headers is 100. -TEST_P(Http1ServerConnectionImplTest, ManyTrailersRejected) { +TEST_F(Http1ServerConnectionImplTest, ManyTrailersRejected) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", true); } -TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { +TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } -TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { +TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "bigfield" + std::string(60 * 1024, 'q') + ": value\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } // Tests that the default limit for the number of request headers is 100. -TEST_P(Http1ServerConnectionImplTest, ManyTrailersIgnored) { +TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", false); } -TEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { +TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { initialize(); std::string exception_reason; @@ -2587,19 +2532,19 @@ TEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } -TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { +TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; testRequestHeadersExceedLimit(long_string, ""); } // Tests that the default limit for the number of request headers is 100. -TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { +TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { // Send a request with 101 headers. testRequestHeadersExceedLimit(createHeaderFragment(101), "http1.too_many_headers"); } -TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { +TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Default limit of 60 KiB initialize(); @@ -2630,7 +2575,7 @@ TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Tests that the 101th request header causes overflow with the default max number of request // headers. -TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { +TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // Default limit of 100. initialize(); @@ -2657,27 +2602,27 @@ TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { EXPECT_EQ(status.message(), "headers size exceeds limit"); } -TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { +TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { max_request_headers_kb_ = 65; std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } -TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { +TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { max_request_headers_kb_ = 96; std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } // Tests that the number of request headers is configurable. -TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { +TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { max_request_headers_count_ = 150; // Create a request with 150 headers. testRequestHeadersAccepted(createHeaderFragment(150)); } // Tests that incomplete response headers of 80 kB header value fails. -TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { initialize(); NiceMock response_decoder; @@ -2696,7 +2641,7 @@ TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { } // Tests that incomplete response headers with a 80 kB header field fails. -TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { +TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { initialize(); NiceMock decoder; @@ -2716,7 +2661,7 @@ TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { } // Tests that the size of response headers for HTTP/1 must be under 80 kB. -TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { +TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { initialize(); NiceMock response_decoder; @@ -2734,7 +2679,7 @@ TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { // Regression test for CVE-2019-18801. Large method headers should not trigger // ASSERTs or ASAN, which they previously did. -TEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { +TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { initialize(); NiceMock response_decoder; @@ -2752,7 +2697,7 @@ TEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { // in CVE-2019-18801, but the related code does explicit size calculations on // both path and method (these are the two distinguished headers). So, // belt-and-braces. -TEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) { +TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { initialize(); NiceMock response_decoder; @@ -2768,7 +2713,7 @@ TEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) { // As with LargeMethodEncode, but for an arbitrary header. This was not an issue // in CVE-2019-18801. -TEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { +TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { initialize(); NiceMock response_decoder; @@ -2785,7 +2730,7 @@ TEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { } // Exception called when the number of response headers exceeds the default value of 100. -TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { +TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -2803,7 +2748,7 @@ TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { } // Tests that the number of response headers is configurable. -TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { +TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { max_response_headers_count_ = 152; initialize(); @@ -2820,5 +2765,6 @@ TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { status = codec_->dispatch(buffer); } +} // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 9d64120d5dd7..82627e08201d 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -10,50 +10,35 @@ licenses(["notice"]) # Apache 2 envoy_package() -CODEC_TEST_DEPS = [ - ":codec_impl_test_util", - "//source/common/event:dispatcher_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http/http2:codec_legacy_lib", - "//source/common/http/http2:codec_lib", - "//source/common/runtime:runtime_lib", - "//source/common/stats:stats_lib", - "//test/common/http:common_lib", - "//test/common/http/http2:http2_frame", - "//test/common/stats:stat_test_utility_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/init:init_mocks", - "//test/mocks/local_info:local_info_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/thread_local:thread_local_mocks", - "//test/mocks/upstream:transport_socket_match_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:logging_lib", - "//test/test_common:registry_lib", - "//test/test_common:test_runtime_lib", - "//test/test_common:utility_lib", -] - envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], shard_count = 5, tags = ["fails_on_windows"], - deps = CODEC_TEST_DEPS, -) - -envoy_cc_test( - name = "codec_impl_legacy_test", - srcs = ["codec_impl_test.cc"], - args = [ - "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", + deps = [ + ":codec_impl_test_util", + "//source/common/event:dispatcher_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http/http2:codec_lib", + "//source/common/stats:stats_lib", + "//test/common/http:common_lib", + "//test/common/http/http2:http2_frame", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:transport_socket_match_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:registry_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", ], - shard_count = 5, - tags = ["fails_on_windows"], - deps = CODEC_TEST_DEPS, ) envoy_cc_test_library( @@ -61,7 +46,6 @@ envoy_cc_test_library( hdrs = ["codec_impl_test_util.h"], external_deps = ["abseil_optional"], deps = [ - "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", ], ) diff --git a/test/common/http/http2/codec_impl_legacy_test.cc b/test/common/http/http2/codec_impl_legacy_test.cc deleted file mode 100644 index fef22ab9c492..000000000000 --- a/test/common/http/http2/codec_impl_legacy_test.cc +++ /dev/null @@ -1,2163 +0,0 @@ -#include -#include - -#include "envoy/http/codec.h" -#include "envoy/stats/scope.h" - -#include "common/http/exception.h" -#include "common/http/header_map_impl.h" -#include "common/http/http2/codec_impl_legacy.h" - -#include "test/common/http/common.h" -#include "test/common/http/http2/http2_frame.h" -#include "test/common/stats/stat_test_utility.h" -#include "test/mocks/http/mocks.h" -#include "test/mocks/init/mocks.h" -#include "test/mocks/local_info/mocks.h" -#include "test/mocks/network/mocks.h" -#include "test/mocks/protobuf/mocks.h" -#include "test/mocks/thread_local/mocks.h" -#include "test/test_common/logging.h" -#include "test/test_common/printers.h" -#include "test/test_common/registry.h" -#include "test/test_common/test_runtime.h" -#include "test/test_common/utility.h" - -#include "codec_impl_legacy_test_util.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::AnyNumber; -using testing::AtLeast; -using testing::InSequence; -using testing::Invoke; -using testing::InvokeWithoutArgs; -using testing::NiceMock; -using testing::Return; - -namespace Envoy { -namespace Http { -namespace Legacy { -namespace Http2 { - -using Http2SettingsTuple = ::testing::tuple; -using Http2SettingsTestParam = ::testing::tuple; -using Http::Http2::Http2Frame; -namespace CommonUtility = ::Envoy::Http2::Utility; - -class Http2CodecImplTestFixture { -public: - // The Http::Connection::dispatch method does not throw (any more). However unit tests in this - // file use codecs for sending test data through mock network connections to the codec under test. - // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under - // test, through mock connections and sending codec. As a result error returned by the dispatch - // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note - // that exception goes only through the mock network connection and sending codec, i.e. it is - // thrown only through the test harness code. Specific exception types are to distinguish error - // codes returned when processing requests or responses. - // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec - // under test through the ON_CALL expectations in the - // setupDefaultConnectionMocks() method. This will make the exceptions below - // unnecessary. - struct ClientCodecError : public std::runtime_error { - ClientCodecError(Http::Status&& status) - : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} - const char* what() const noexcept override { return status_.message().data(); } - const Http::Status status_; - }; - - struct ServerCodecError : public std::runtime_error { - ServerCodecError(Http::Status&& status) - : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} - const char* what() const noexcept override { return status_.message().data(); } - const Http::Status status_; - }; - - struct ConnectionWrapper { - Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { - Http::Status status = Http::okStatus(); - buffer_.add(data); - if (!dispatching_) { - while (buffer_.length() > 0) { - dispatching_ = true; - status = connection.dispatch(buffer_); - if (!status.ok()) { - // Exit early if we hit an error status. - return status; - } - dispatching_ = false; - } - } - return status; - } - - bool dispatching_{}; - Buffer::OwnedImpl buffer_; - }; - - enum SettingsTupleIndex { - HpackTableSize = 0, - MaxConcurrentStreams, - InitialStreamWindowSize, - InitialConnectionWindowSize - }; - - Http2CodecImplTestFixture() = default; - Http2CodecImplTestFixture(Http2SettingsTuple client_settings, Http2SettingsTuple server_settings) - : client_settings_(client_settings), server_settings_(server_settings) { - // Make sure we explicitly test for stream flush timer creation. - EXPECT_CALL(client_connection_.dispatcher_, createTimer_(_)).Times(0); - EXPECT_CALL(server_connection_.dispatcher_, createTimer_(_)).Times(0); - } - virtual ~Http2CodecImplTestFixture() { - client_connection_.dispatcher_.clearDeferredDeleteList(); - if (client_ != nullptr) { - client_.reset(); - EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); - EXPECT_EQ(0, - TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - } - server_connection_.dispatcher_.clearDeferredDeleteList(); - if (server_ != nullptr) { - server_.reset(); - EXPECT_EQ(0, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); - EXPECT_EQ(0, - TestUtility::findGauge(server_stats_store_, "http2.pending_send_bytes")->value()); - } - } - - virtual void initialize() { - http2OptionsFromTuple(client_http2_options_, client_settings_); - http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - - request_encoder_ = &client_->newStream(response_decoder_); - setupDefaultConnectionMocks(); - - EXPECT_CALL(server_callbacks_, newStream(_, _)) - .WillRepeatedly(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { - response_encoder_ = &encoder; - encoder.getStream().addCallbacks(server_stream_callbacks_); - encoder.getStream().setFlushTimeout(std::chrono::milliseconds(30000)); - return request_decoder_; - })); - } - - void setupDefaultConnectionMocks() { - ON_CALL(client_connection_, write(_, _)) - .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - if (corrupt_metadata_frame_) { - corruptMetadataFramePayload(data); - } - auto status = server_wrapper_.dispatch(data, *server_); - if (!status.ok()) { - throw ServerCodecError(std::move(status)); - } - })); - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - auto status = client_wrapper_.dispatch(data, *client_); - if (!status.ok()) { - throw ClientCodecError(std::move(status)); - } - })); - } - - void http2OptionsFromTuple(envoy::config::core::v3::Http2ProtocolOptions& options, - const absl::optional& tp) { - options.mutable_hpack_table_size()->set_value( - (tp.has_value()) ? ::testing::get(*tp) - : CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE); - options.mutable_max_concurrent_streams()->set_value( - (tp.has_value()) ? ::testing::get(*tp) - : CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); - options.mutable_initial_stream_window_size()->set_value( - (tp.has_value()) ? ::testing::get(*tp) - : CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE); - options.mutable_initial_connection_window_size()->set_value( - (tp.has_value()) ? ::testing::get(*tp) - : CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE); - options.set_allow_metadata(allow_metadata_); - options.set_stream_error_on_invalid_http_messaging(stream_error_on_invalid_http_messaging_); - options.mutable_max_outbound_frames()->set_value(max_outbound_frames_); - options.mutable_max_outbound_control_frames()->set_value(max_outbound_control_frames_); - options.mutable_max_consecutive_inbound_frames_with_empty_payload()->set_value( - max_consecutive_inbound_frames_with_empty_payload_); - options.mutable_max_inbound_priority_frames_per_stream()->set_value( - max_inbound_priority_frames_per_stream_); - options.mutable_max_inbound_window_update_frames_per_data_frame_sent()->set_value( - max_inbound_window_update_frames_per_data_frame_sent_); - } - - // corruptMetadataFramePayload assumes data contains at least 10 bytes of the beginning of a - // frame. - void corruptMetadataFramePayload(Buffer::Instance& data) { - const size_t length = data.length(); - const size_t corrupt_start = 10; - if (length < corrupt_start || length > METADATA_MAX_PAYLOAD_SIZE) { - ENVOY_LOG_MISC(error, "data size too big or too small"); - return; - } - corruptAtOffset(data, corrupt_start, 0xff); - } - - void corruptAtOffset(Buffer::Instance& data, size_t index, char new_value) { - if (data.length() == 0) { - return; - } - reinterpret_cast(data.linearize(data.length()))[index % data.length()] = new_value; - } - - void expectDetailsRequest(const absl::string_view details) { - EXPECT_EQ(details, request_encoder_->getStream().responseDetails()); - } - - void expectDetailsResponse(const absl::string_view details) { - EXPECT_EQ(details, response_encoder_->getStream().responseDetails()); - } - - absl::optional client_settings_; - absl::optional server_settings_; - bool allow_metadata_ = false; - bool stream_error_on_invalid_http_messaging_ = false; - Stats::TestUtil::TestStore client_stats_store_; - envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; - NiceMock client_connection_; - MockConnectionCallbacks client_callbacks_; - std::unique_ptr client_; - ConnectionWrapper client_wrapper_; - Stats::TestUtil::TestStore server_stats_store_; - envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; - NiceMock server_connection_; - MockServerConnectionCallbacks server_callbacks_; - std::unique_ptr server_; - ConnectionWrapper server_wrapper_; - MockResponseDecoder response_decoder_; - RequestEncoder* request_encoder_; - MockRequestDecoder request_decoder_; - ResponseEncoder* response_encoder_{}; - MockStreamCallbacks server_stream_callbacks_; - // Corrupt a metadata frame payload. - bool corrupt_metadata_frame_ = false; - - uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; - uint32_t max_request_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; - uint32_t max_response_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; - uint32_t max_outbound_frames_ = CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; - uint32_t max_outbound_control_frames_ = - CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES; - uint32_t max_consecutive_inbound_frames_with_empty_payload_ = - CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD; - uint32_t max_inbound_priority_frames_per_stream_ = - CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; - uint32_t max_inbound_window_update_frames_per_data_frame_sent_ = - CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT; - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action_{envoy::config::core::v3::HttpProtocolOptions::ALLOW}; -}; - -class Http2CodecImplTest : public ::testing::TestWithParam, - protected Http2CodecImplTestFixture { -public: - Http2CodecImplTest() - : Http2CodecImplTestFixture(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam())) {} - -protected: - void priorityFlood() { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers, "POST"); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - nghttp2_priority_spec spec = {0, 10, 0}; - // HTTP/2 codec adds 1 to the number of active streams when computing PRIORITY frames limit - constexpr uint32_t max_allowed = - 2 * CommonUtility::OptionsLimits::DEFAULT_MAX_INBOUND_PRIORITY_FRAMES_PER_STREAM; - for (uint32_t i = 0; i < max_allowed + 1; ++i) { - EXPECT_EQ(0, nghttp2_submit_priority(client_->session(), NGHTTP2_FLAG_NONE, 1, &spec)); - } - } - - void windowUpdateFlood() { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - // Send one DATA frame back - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - EXPECT_CALL(response_decoder_, decodeData(_, false)); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - - // See the limit formula in the - // `Envoy::Http::Http2::ServerConnectionImpl::checkInboundFrameLimits()' method. - constexpr uint32_t max_allowed = - 1 + 2 * (CommonUtility::OptionsLimits:: - DEFAULT_MAX_INBOUND_WINDOW_UPDATE_FRAMES_PER_DATA_FRAME_SENT + - 1); - for (uint32_t i = 0; i < max_allowed + 1; ++i) { - EXPECT_EQ(0, nghttp2_submit_window_update(client_->session(), NGHTTP2_FLAG_NONE, 1, 1)); - } - } - - void emptyDataFlood(Buffer::OwnedImpl& data) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers, "POST"); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // HTTP/2 codec does not send empty DATA frames with no END_STREAM flag. - // To make this work, send raw bytes representing empty DATA frames bypassing client codec. - Http2Frame emptyDataFrame = Http2Frame::makeEmptyDataFrame(0); - constexpr uint32_t max_allowed = - CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD; - for (uint32_t i = 0; i < max_allowed + 1; ++i) { - data.add(emptyDataFrame.data(), emptyDataFrame.size()); - } - } -}; - -TEST_P(Http2CodecImplTest, ShutdownNotice) { - initialize(); - EXPECT_EQ(absl::nullopt, request_encoder_->http1StreamEncoderOptions()); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - EXPECT_CALL(client_callbacks_, onGoAway(_)); - server_->shutdownNotice(); - server_->goAway(); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); - response_encoder_->encodeHeaders(response_headers, true); -} - -TEST_P(Http2CodecImplTest, ContinueHeaders) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); - response_encoder_->encodeHeaders(response_headers, true); -}; - -TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); -} - -TEST_P(Http2CodecImplTest, InvalidContinueWithFinAllowed) { - stream_error_on_invalid_http_messaging_ = true; - initialize(); - - MockStreamCallbacks request_callbacks; - request_encoder_->getStream().addCallbacks(request_callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - // Buffer client data to avoid mock recursion causing lifetime issues. - ON_CALL(server_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - response_encoder_->encodeHeaders(continue_headers, true); - - // Flush pending data. - EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); - setupDefaultConnectionMocks(); - auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_TRUE(status.ok()); - - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); - expectDetailsRequest("http2.violation.of.messaging.rule"); -} - -TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); -}; - -TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { - stream_error_on_invalid_http_messaging_ = true; - initialize(); - - MockStreamCallbacks request_callbacks; - request_encoder_->getStream().addCallbacks(request_callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - // Buffer client data to avoid mock recursion causing lifetime issues. - ON_CALL(server_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); - - response_encoder_->encodeHeaders(continue_headers, true); - - // Flush pending data. - EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); - setupDefaultConnectionMocks(); - auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_TRUE(status.ok()); - - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); - expectDetailsRequest("http2.violation.of.messaging.rule"); -}; - -TEST_P(Http2CodecImplTest, Invalid103) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - TestResponseHeaderMapImpl early_hint_headers{{":status", "103"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(early_hint_headers, false); - - EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), - ClientCodecError, "Unexpected 'trailers' with no end stream."); - EXPECT_EQ(1, client_stats_store_.counter("http2.too_many_header_frames").value()); -} - -TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "3"}}; - // What follows is a hack to get headers that should span into continuation frames. The default - // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and - // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data - // which should contain a continuation.) - for (unsigned i = 1; i < 3000; i++) { - response_headers.addCopy(std::to_string(i), std::to_string(i)); - } - - EXPECT_LOG_CONTAINS( - "debug", - "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " - "value: [3]", - EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); -}; - -TEST_P(Http2CodecImplTest, Invalid204WithContentLengthAllowed) { - stream_error_on_invalid_http_messaging_ = true; - initialize(); - - MockStreamCallbacks request_callbacks; - request_encoder_->getStream().addCallbacks(request_callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - // Buffer client data to avoid mock recursion causing lifetime issues. - ON_CALL(server_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); - - TestResponseHeaderMapImpl response_headers{{":status", "204"}, {"content-length", "3"}}; - // What follows is a hack to get headers that should span into continuation frames. The default - // maximum frame size is 16K. We will add 3,000 headers that will take us above this size and - // not easily compress with HPACK. (I confirmed this generates 26,468 bytes of header data - // which should contain a continuation.) - for (int i = 1; i < 3000; i++) { - response_headers.addCopy(std::to_string(i), std::to_string(i)); - } - - response_encoder_->encodeHeaders(response_headers, false); - - // Flush pending data. - EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::LocalReset, _)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); - setupDefaultConnectionMocks(); - auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_TRUE(status.ok()); - - EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); - expectDetailsRequest("http2.invalid.header.field"); -}; - -TEST_P(Http2CodecImplTest, RefusedStreamReset) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - MockStreamCallbacks callbacks; - request_encoder_->getStream().addCallbacks(callbacks); - EXPECT_CALL(server_stream_callbacks_, - onResetStream(StreamResetReason::LocalRefusedStreamReset, _)); - EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _)); - response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset); -} - -TEST_P(Http2CodecImplTest, InvalidHeadersFrame) { - initialize(); - - EXPECT_THROW(request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true), ServerCodecError); - EXPECT_EQ(1, server_stats_store_.counter("http2.rx_messaging_error").value()); -} - -TEST_P(Http2CodecImplTest, InvalidHeadersFrameAllowed) { - stream_error_on_invalid_http_messaging_ = true; - initialize(); - - MockStreamCallbacks request_callbacks; - request_encoder_->getStream().addCallbacks(request_callbacks); - - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - - request_encoder_->encodeHeaders(TestRequestHeaderMapImpl{}, true); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); - EXPECT_CALL(request_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); - EXPECT_TRUE(status.ok()); - expectDetailsResponse("http2.violation.of.messaging.rule"); -} - -TEST_P(Http2CodecImplTest, TrailingHeaders) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - EXPECT_CALL(request_decoder_, decodeData(_, false)); - Buffer::OwnedImpl hello("hello"); - request_encoder_->encodeData(hello, false); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); - request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(response_decoder_, decodeData(_, false)); - Buffer::OwnedImpl world("world"); - response_encoder_->encodeData(world, false); - EXPECT_CALL(response_decoder_, decodeTrailers_(_)); - response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); -} - -TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { - initialize(); - - // Buffer server data so we can make sure we don't get any window updates. - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - request_encoder_->encodeData(body, false); - request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{{"trailing", "header"}}); - - // Flush pending data. - setupDefaultConnectionMocks(); - EXPECT_CALL(request_decoder_, decodeTrailers_(_)); - auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); - EXPECT_TRUE(status.ok()); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(response_decoder_, decodeData(_, false)); - Buffer::OwnedImpl world("world"); - response_encoder_->encodeData(world, false); - EXPECT_CALL(response_decoder_, decodeTrailers_(_)); - response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); -} - -TEST_P(Http2CodecImplTest, SmallMetadataVecTest) { - allow_metadata_ = true; - initialize(); - - // Generates a valid stream_id by sending a request header. - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - MetadataMapVector metadata_map_vector; - const int size = 10; - for (int i = 0; i < size; i++) { - MetadataMap metadata_map = { - {"header_key1", "header_value1"}, - {"header_key2", "header_value2"}, - {"header_key3", "header_value3"}, - {"header_key4", "header_value4"}, - }; - MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); - metadata_map_vector.push_back(std::move(metadata_map_ptr)); - } - - EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size); - request_encoder_->encodeMetadata(metadata_map_vector); - - EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); - response_encoder_->encodeMetadata(metadata_map_vector); -} - -TEST_P(Http2CodecImplTest, LargeMetadataVecTest) { - allow_metadata_ = true; - initialize(); - - // Generates a valid stream_id by sending a request header. - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - MetadataMapVector metadata_map_vector; - const int size = 10; - for (int i = 0; i < size; i++) { - MetadataMap metadata_map = { - {"header_key1", std::string(50 * 1024, 'a')}, - }; - MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); - metadata_map_vector.push_back(std::move(metadata_map_ptr)); - } - - EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size); - request_encoder_->encodeMetadata(metadata_map_vector); - - EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); - response_encoder_->encodeMetadata(metadata_map_vector); -} - -TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { - allow_metadata_ = true; - initialize(); - - // Generates a valid stream_id by sending a request header. - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - MetadataMap metadata_map = { - {"header_key1", "header_value1"}, - {"header_key2", "header_value2"}, - {"header_key3", "header_value3"}, - {"header_key4", "header_value4"}, - }; - MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); - MetadataMapVector metadata_map_vector; - metadata_map_vector.push_back(std::move(metadata_map_ptr)); - - corrupt_metadata_frame_ = true; - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, - "The user callback function failed"); -} - -// Encode response metadata while dispatching request data from the client, so -// that nghttp2 can't fill the metadata frames' payloads until dispatching -// is finished. -TEST_P(Http2CodecImplTest, EncodeMetadataWhileDispatchingTest) { - allow_metadata_ = true; - initialize(); - - MetadataMapVector metadata_map_vector; - const int size = 10; - for (int i = 0; i < size; i++) { - MetadataMap metadata_map = { - {"header_key1", "header_value1"}, - {"header_key2", "header_value2"}, - {"header_key3", "header_value3"}, - {"header_key4", "header_value4"}, - }; - MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); - metadata_map_vector.push_back(std::move(metadata_map_ptr)); - } - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void { - response_encoder_->encodeMetadata(metadata_map_vector); - })); - EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size); - request_encoder_->encodeHeaders(request_headers, true); -} -class Http2CodecImplDeferredResetTest : public Http2CodecImplTest {}; - -TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { - initialize(); - - InSequence s; - - MockStreamCallbacks client_stream_callbacks; - request_encoder_->getStream().addCallbacks(client_stream_callbacks); - - // Do a request, but pause server dispatch so we don't send window updates. This will result in a - // deferred reset, followed by a pending frames flush which will cause the stream to actually - // be reset immediately since we are outside of dispatch context. - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - request_encoder_->encodeHeaders(request_headers, false); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - EXPECT_CALL(client_stream_callbacks, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); - request_encoder_->encodeData(body, true); - EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::LocalReset, _)); - request_encoder_->getStream().resetStream(StreamResetReason::LocalReset); - - // Dispatch server. We expect to see some data. - EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void { - // Start a response inside the headers callback. This should not result in the client - // seeing any headers as the stream should already be reset on the other side, even though - // we don't know about it yet. - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - })); - EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); - - setupDefaultConnectionMocks(); - auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); - EXPECT_TRUE(status.ok()); -} - -TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { - initialize(); - - InSequence s; - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // In this case we do the same thing as DeferredResetClient but on the server side. - ON_CALL(server_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); - auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - response_encoder_->encodeData(body, true); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); - EXPECT_CALL(*flush_timer, disableTimer()); - response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); - - MockStreamCallbacks client_stream_callbacks; - request_encoder_->getStream().addCallbacks(client_stream_callbacks); - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - setupDefaultConnectionMocks(); - auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); - EXPECT_TRUE(status.ok()); -} - -class Http2CodecImplFlowControlTest : public Http2CodecImplTest {}; - -// Back up the pending_sent_data_ buffer in the client connection and make sure the watermarks fire -// as expected. -// -// This also tests the readDisable logic in StreamImpl, verifying that h2 bytes are consumed -// when the stream has readDisable(true) called. -TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { - initialize(); - MockStreamCallbacks callbacks; - request_encoder_->getStream().addCallbacks(callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - TestRequestHeaderMapImpl expected_headers; - HttpTestUtility::addDefaultHeaders(expected_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); - request_encoder_->encodeHeaders(request_headers, false); - - // Force the server stream to be read disabled. This will cause it to stop sending window - // updates to the client. - server_->getStream(1)->readDisable(true); - EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); - EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); - - uint32_t initial_stream_window = - nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); - // If this limit is changed, this test will fail due to the initial large writes being divided - // into more than 4 frames. Fast fail here with this explanatory comment. - ASSERT_EQ(65535, initial_stream_window); - // Make sure the limits were configured properly in test set up. - EXPECT_EQ(initial_stream_window, server_->getStream(1)->bufferLimit()); - EXPECT_EQ(initial_stream_window, client_->getStream(1)->bufferLimit()); - - // One large write gets broken into smaller frames. - EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber()); - Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a')); - request_encoder_->encodeData(long_data, false); - - // Verify that the window is full. The client will not send more data to the server for this - // stream. - EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); - EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); - - // Now that the flow control window is full, further data causes the send buffer to back up. - Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); - request_encoder_->encodeData(more_long_data, false); - EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); - EXPECT_EQ(initial_stream_window, - TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); - - // If we go over the limit, the stream callbacks should fire. - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); - Buffer::OwnedImpl last_byte("!"); - request_encoder_->encodeData(last_byte, false); - EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); - EXPECT_EQ(initial_stream_window + 1, - TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - - // Now create a second stream on the connection. - MockResponseDecoder response_decoder2; - RequestEncoder* request_encoder2 = &client_->newStream(response_decoder_); - StreamEncoder* response_encoder2; - MockStreamCallbacks server_stream_callbacks2; - MockRequestDecoder request_decoder2; - // When the server stream is created it should check the status of the - // underlying connection. Pretend it is overrun. - EXPECT_CALL(server_connection_, aboveHighWatermark()).WillOnce(Return(true)); - EXPECT_CALL(server_stream_callbacks2, onAboveWriteBufferHighWatermark()); - EXPECT_CALL(server_callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { - response_encoder2 = &encoder; - encoder.getStream().addCallbacks(server_stream_callbacks2); - return request_decoder2; - })); - EXPECT_CALL(request_decoder2, decodeHeaders_(_, false)); - request_encoder2->encodeHeaders(request_headers, false); - - // Add the stream callbacks belatedly. On creation the stream should have - // been noticed that the connection was backed up. Any new subscriber to - // stream callbacks should get a callback when they addCallbacks. - MockStreamCallbacks callbacks2; - EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); - request_encoder_->getStream().addCallbacks(callbacks2); - - // Add a third callback to make testing removal mid-watermark call below more interesting. - MockStreamCallbacks callbacks3; - EXPECT_CALL(callbacks3, onAboveWriteBufferHighWatermark()); - request_encoder_->getStream().addCallbacks(callbacks3); - - // Now unblock the server's stream. This will cause the bytes to be consumed, flow control - // updates to be sent, and the client to flush all queued data. - // For bonus corner case coverage, remove callback2 in the middle of runLowWatermarkCallbacks() - // and ensure it is not called. - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).WillOnce(Invoke([&]() -> void { - request_encoder_->getStream().removeCallbacks(callbacks2); - })); - EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); - EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); - server_->getStream(1)->readDisable(false); - EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); - EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - // The extra 1 byte sent won't trigger another window update, so the final window should be the - // initial window minus the last 1 byte flush from the client to server. - EXPECT_EQ(initial_stream_window - 1, - nghttp2_session_get_stream_local_window_size(server_->session(), 1)); - EXPECT_EQ(initial_stream_window - 1, - nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); -} - -// Set up the same asTestFlowControlInPendingSendData, but tears the stream down with an early reset -// once the flow control window is full up. -TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { - initialize(); - MockStreamCallbacks callbacks; - request_encoder_->getStream().addCallbacks(callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - TestRequestHeaderMapImpl expected_headers; - HttpTestUtility::addDefaultHeaders(expected_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); - request_encoder_->encodeHeaders(request_headers, false); - - // Force the server stream to be read disabled. This will cause it to stop sending window - // updates to the client. - server_->getStream(1)->readDisable(true); - - uint32_t initial_stream_window = - nghttp2_session_get_stream_effective_local_window_size(client_->session(), 1); - uint32_t initial_connection_window = nghttp2_session_get_remote_window_size(client_->session()); - // If this limit is changed, this test will fail due to the initial large writes being divided - // into more than 4 frames. Fast fail here with this explanatory comment. - ASSERT_EQ(65535, initial_stream_window); - // One large write may get broken into smaller frames. - EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AnyNumber()); - Buffer::OwnedImpl long_data(std::string(initial_stream_window, 'a')); - // The one giant write will cause the buffer to go over the limit, then drain and go back under - // the limit. - request_encoder_->encodeData(long_data, false); - - // Verify that the window is full. The client will not send more data to the server for this - // stream. - EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); - EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); - EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); - - EXPECT_CALL(server_stream_callbacks_, - onResetStream(StreamResetReason::LocalRefusedStreamReset, _)); - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0); - EXPECT_CALL(callbacks, onResetStream(StreamResetReason::RemoteRefusedStreamReset, _)) - .WillOnce(Invoke([&](StreamResetReason, absl::string_view) -> void { - // Test the case where the reset callbacks cause the socket to fill up, - // causing the underlying connection to back up. Given the stream is - // being destroyed the watermark callbacks should not fire (mocks for Times(0) - // above) - client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - })); - response_encoder_->getStream().resetStream(StreamResetReason::LocalRefusedStreamReset); - - // Regression test that the window is consumed even if the stream is destroyed early. - EXPECT_EQ(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); -} - -// Test the HTTP2 pending_recv_data_ buffer going over and under watermark limits. -TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { - initialize(); - MockStreamCallbacks callbacks; - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - TestRequestHeaderMapImpl expected_headers; - HttpTestUtility::addDefaultHeaders(expected_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); - request_encoder_->encodeHeaders(request_headers, false); - - // Set artificially small watermarks to make the recv buffer easy to overrun. In production, - // the recv buffer can be overrun by a client which negotiates a larger - // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in - // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. - server_->getStream(1)->setWriteBufferWatermarks(10, 20); - - EXPECT_CALL(request_decoder_, decodeData(_, false)); - Buffer::OwnedImpl data(std::string(40, 'a')); - request_encoder_->encodeData(data, false); -} - -// Verify that we create and disable the stream flush timer when trailers follow a stream that -// does not have enough window. -TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBody) { - initialize(); - - InSequence s; - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - response_encoder_->encodeData(body, false); - response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); - - // Send window updates from the client. - setupDefaultConnectionMocks(); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - EXPECT_CALL(response_decoder_, decodeTrailers_(_)); - EXPECT_CALL(*flush_timer, disableTimer()); - auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); - EXPECT_TRUE(status.ok()); - EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); -} - -// Verify that we create and handle the stream flush timeout when trailers follow a stream that -// does not have enough window. -TEST_P(Http2CodecImplFlowControlTest, TrailingHeadersLargeServerBodyFlushTimeout) { - initialize(); - - InSequence s; - MockStreamCallbacks client_stream_callbacks; - request_encoder_->getStream().addCallbacks(client_stream_callbacks); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - response_encoder_->encodeData(body, false); - response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); - - // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but - // we do get a reset on the client. - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - flush_timer->invokeCallback(); - EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); -} - -// Verify that we create and handle the stream flush timeout when there is a large body that -// does not have enough window. -TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeout) { - initialize(); - - InSequence s; - MockStreamCallbacks client_stream_callbacks; - request_encoder_->getStream().addCallbacks(client_stream_callbacks); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - response_encoder_->encodeData(body, true); - - // Invoke a stream flush timeout. Make sure we don't get a reset locally for higher layers but - // we do get a reset on the client. - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); - flush_timer->invokeCallback(); - EXPECT_EQ(1, server_stats_store_.counter("http2.tx_flush_timeout").value()); -} - -// Verify that when an incoming protocol error races with a stream flush timeout we correctly -// disable the flush timeout and do not attempt to reset the stream. -TEST_P(Http2CodecImplFlowControlTest, LargeServerBodyFlushTimeoutAfterGoaway) { - initialize(); - - InSequence s; - MockStreamCallbacks client_stream_callbacks; - request_encoder_->getStream().addCallbacks(client_stream_callbacks); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - ON_CALL(client_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(response_headers, false); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); - auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - response_encoder_->encodeData(body, true); - - // Force a protocol error. - Buffer::OwnedImpl garbage_data("this should cause a protocol error"); - EXPECT_CALL(client_callbacks_, onGoAway(_)); - EXPECT_CALL(*flush_timer, disableTimer()); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - auto status = server_wrapper_.dispatch(garbage_data, *server_); - EXPECT_FALSE(status.ok()); - EXPECT_EQ(0, server_stats_store_.counter("http2.tx_flush_timeout").value()); -} - -TEST_P(Http2CodecImplTest, WatermarkUnderEndStream) { - initialize(); - MockStreamCallbacks callbacks; - request_encoder_->getStream().addCallbacks(callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // The 'true' on encodeData will set local_end_stream_ on the client but not - // the server. Verify that client watermark callbacks will not be called, but - // server callbacks may be called by simulating connection overflow on both - // ends. - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()); - EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()); - EXPECT_CALL(request_decoder_, decodeData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> void { - client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - })); - Buffer::OwnedImpl hello("hello"); - request_encoder_->encodeData(hello, true); - - // The 'true' on encodeData will set local_end_stream_ on the server. Verify - // that neither client nor server watermark callbacks will be called again. - EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(0); - EXPECT_CALL(server_stream_callbacks_, onBelowWriteBufferLowWatermark()).Times(0); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(HeaderMapEqual(&response_headers), true)) - .WillOnce(InvokeWithoutArgs([&]() -> void { - client_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - client_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - server_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); - server_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); - })); - response_encoder_->encodeHeaders(response_headers, true); -} - -class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; - -// Regression test for issue #3076. -// -// TODO(PiotrSikora): add tests that exercise both scenarios: before and after receiving -// the HTTP/2 SETTINGS frame. -TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { - http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); - http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - - for (int i = 0; i < 101; ++i) { - request_encoder_ = &client_->newStream(response_decoder_); - setupDefaultConnectionMocks(); - EXPECT_CALL(server_callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { - response_encoder_ = &encoder; - encoder.getStream().addCallbacks(server_stream_callbacks_); - return request_decoder_; - })); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - } -} - -#define HTTP2SETTINGS_SMALL_WINDOW_COMBINE \ - ::testing::Combine( \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS), \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE)) - -// Deferred reset tests use only small windows so that we can test certain conditions. -INSTANTIATE_TEST_SUITE_P(Http2CodecImplDeferredResetTest, Http2CodecImplDeferredResetTest, - ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE, - HTTP2SETTINGS_SMALL_WINDOW_COMBINE)); - -// Flow control tests only use only small windows so that we can test certain conditions. -INSTANTIATE_TEST_SUITE_P(Http2CodecImplFlowControlTest, Http2CodecImplFlowControlTest, - ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE, - HTTP2SETTINGS_SMALL_WINDOW_COMBINE)); - -// we separate default/edge cases here to avoid combinatorial explosion -#define HTTP2SETTINGS_DEFAULT_COMBINE \ - ::testing::Combine( \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_HPACK_TABLE_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS), \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_STREAM_WINDOW_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE)) - -// Stream limit test only uses the default values because not all combinations of -// edge settings allow for the number of streams needed by the test. -INSTANTIATE_TEST_SUITE_P(Http2CodecImplStreamLimitTest, Http2CodecImplStreamLimitTest, - ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, - HTTP2SETTINGS_DEFAULT_COMBINE)); - -INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTest, - ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, - HTTP2SETTINGS_DEFAULT_COMBINE)); - -#define HTTP2SETTINGS_EDGE_COMBINE \ - ::testing::Combine( \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_HPACK_TABLE_SIZE, \ - CommonUtility::OptionsLimits::MAX_HPACK_TABLE_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_MAX_CONCURRENT_STREAMS, \ - CommonUtility::OptionsLimits::MAX_MAX_CONCURRENT_STREAMS), \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE, \ - CommonUtility::OptionsLimits::MAX_INITIAL_STREAM_WINDOW_SIZE), \ - ::testing::Values(CommonUtility::OptionsLimits::MIN_INITIAL_CONNECTION_WINDOW_SIZE, \ - CommonUtility::OptionsLimits::MAX_INITIAL_CONNECTION_WINDOW_SIZE)) - -// Make sure we have coverage for high and low values for various combinations and permutations -// of HTTP settings in at least one test fixture. -// Use with caution as any test using this runs 255 times. -using Http2CodecImplTestAll = Http2CodecImplTest; - -INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestDefaultSettings, Http2CodecImplTestAll, - ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, - HTTP2SETTINGS_DEFAULT_COMBINE)); -INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CodecImplTestAll, - ::testing::Combine(HTTP2SETTINGS_EDGE_COMBINE, - HTTP2SETTINGS_EDGE_COMBINE)); - -TEST(Http2CodecUtility, reconstituteCrumbledCookies) { - { - HeaderString key; - HeaderString value; - HeaderString cookies; - EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies)); - EXPECT_TRUE(cookies.empty()); - } - - { - HeaderString key(Headers::get().ContentLength); - HeaderString value; - value.setInteger(5); - HeaderString cookies; - EXPECT_FALSE(Utility::reconstituteCrumbledCookies(key, value, cookies)); - EXPECT_TRUE(cookies.empty()); - } - - { - HeaderString key(Headers::get().Cookie); - HeaderString value; - value.setCopy("a=b", 3); - HeaderString cookies; - EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key, value, cookies)); - EXPECT_EQ(cookies, "a=b"); - - HeaderString key2(Headers::get().Cookie); - HeaderString value2; - value2.setCopy("c=d", 3); - EXPECT_TRUE(Utility::reconstituteCrumbledCookies(key2, value2, cookies)); - EXPECT_EQ(cookies, "a=b; c=d"); - } -} - -MATCHER_P(HasValue, m, "") { - if (!arg.has_value()) { - *result_listener << "does not contain a value"; - return false; - } - const auto& value = arg.value(); - return ExplainMatchResult(m, value, result_listener); -}; - -class Http2CustomSettingsTestBase : public Http2CodecImplTestFixture { -public: - struct SettingsParameter { - uint16_t identifier; - uint32_t value; - }; - - Http2CustomSettingsTestBase(Http2SettingsTuple client_settings, - Http2SettingsTuple server_settings, bool validate_client) - : Http2CodecImplTestFixture(client_settings, server_settings), - validate_client_(validate_client) {} - - ~Http2CustomSettingsTestBase() override = default; - - // Sets the custom settings parameters specified by |parameters| in the |options| proto. - void setHttp2CustomSettingsParameters(envoy::config::core::v3::Http2ProtocolOptions& options, - std::vector parameters) { - for (const auto& parameter : parameters) { - envoy::config::core::v3::Http2ProtocolOptions::SettingsParameter* custom_param = - options.mutable_custom_settings_parameters()->Add(); - custom_param->mutable_identifier()->set_value(parameter.identifier); - custom_param->mutable_value()->set_value(parameter.value); - } - } - - // Returns the Http2ProtocolOptions proto which specifies the settings parameters to be sent to - // the endpoint being validated. - envoy::config::core::v3::Http2ProtocolOptions& getCustomOptions() { - return validate_client_ ? server_http2_options_ : client_http2_options_; - } - - // Returns the endpoint being validated. - const TestCodecSettingsProvider& getSettingsProvider() { - if (validate_client_) { - return *client_; - } - return *server_; - } - - // Returns the settings tuple which specifies a subset of the settings parameters to be sent to - // the endpoint being validated. - const Http2SettingsTuple& getSettingsTuple() { - ASSERT(client_settings_.has_value() && server_settings_.has_value()); - return validate_client_ ? *server_settings_ : *client_settings_; - } - -protected: - bool validate_client_{false}; -}; - -class Http2CustomSettingsTest - : public Http2CustomSettingsTestBase, - public ::testing::TestWithParam< - ::testing::tuple> { -public: - Http2CustomSettingsTest() - : Http2CustomSettingsTestBase(::testing::get<0>(GetParam()), ::testing::get<1>(GetParam()), - ::testing::get<2>(GetParam())) {} -}; -INSTANTIATE_TEST_SUITE_P(Http2CodecImplTestEdgeSettings, Http2CustomSettingsTest, - ::testing::Combine(HTTP2SETTINGS_DEFAULT_COMBINE, - HTTP2SETTINGS_DEFAULT_COMBINE, ::testing::Bool())); - -// Validates that custom parameters (those which are not explicitly named in the -// envoy::config::core::v3::Http2ProtocolOptions proto) are properly sent and processed by -// client and server connections. -TEST_P(Http2CustomSettingsTest, UserDefinedSettings) { - std::vector custom_parameters{{0x10, 10}, {0x11, 20}}; - setHttp2CustomSettingsParameters(getCustomOptions(), custom_parameters); - initialize(); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); - request_encoder_->encodeHeaders(request_headers, false); - uint32_t hpack_table_size = - ::testing::get(getSettingsTuple()); - if (hpack_table_size != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { - EXPECT_THAT( - getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_HEADER_TABLE_SIZE), - HasValue(hpack_table_size)); - } - uint32_t max_concurrent_streams = - ::testing::get(getSettingsTuple()); - if (max_concurrent_streams != NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS) { - EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue( - NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS), - HasValue(max_concurrent_streams)); - } - uint32_t initial_stream_window_size = - ::testing::get(getSettingsTuple()); - if (max_concurrent_streams != NGHTTP2_INITIAL_WINDOW_SIZE) { - EXPECT_THAT( - getSettingsProvider().getRemoteSettingsParameterValue(NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE), - HasValue(initial_stream_window_size)); - } - // Validate that custom parameters are received by the endpoint (client or server) under - // test. - for (const auto& parameter : custom_parameters) { - EXPECT_THAT(getSettingsProvider().getRemoteSettingsParameterValue(parameter.identifier), - HasValue(parameter.value)); - } -} - -// Tests request headers whose size is larger than the default limit of 60K. -TEST_P(Http2CodecImplTest, LargeRequestHeadersInvokeResetStream) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(63 * 1024, 'q'); - request_headers.addCopy("big", long_string); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); - request_encoder_->encodeHeaders(request_headers, false); -} - -// Large request headers are accepted when max limit configured. -TEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) { - max_request_headers_kb_ = 64; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(63 * 1024, 'q'); - request_headers.addCopy("big", long_string); - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, false); -} - -// Tests request headers with name containing underscore are dropped when the option is set to drop -// header. -TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreDropped) { - headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - TestRequestHeaderMapImpl expected_headers(request_headers); - request_headers.addCopy("bad_header", "something"); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); - request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(1, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); -} - -// Tests that request with header names containing underscore are rejected when the option is set to -// reject request. -TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAreRejectedByDefault) { - headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - request_headers.addCopy("bad_header", "something"); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); - request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ( - 1, - server_stats_store_.counter("http2.requests_rejected_with_underscores_in_headers").value()); -} - -// Tests request headers with name containing underscore are allowed when the option is set to -// allow. -TEST_P(Http2CodecImplTest, HeaderNameWithUnderscoreAllowed) { - headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - request_headers.addCopy("bad_header", "something"); - TestRequestHeaderMapImpl expected_headers(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), _)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, false); - EXPECT_EQ(0, server_stats_store_.counter("http2.dropped_headers_with_underscores").value()); -} - -// This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. -// Large method headers should not trigger ASSERTs or ASAN. The underlying issue -// in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test -// here for belt-and-braces. This also demonstrates that the HTTP/2 codec will -// accept arbitrary :method headers, unlike the HTTP/1 codec (see -// Http1ServerConnectionImplTest.RejectInvalidMethod for comparison). -TEST_P(Http2CodecImplTest, LargeMethodRequestEncode) { - max_request_headers_kb_ = 80; - initialize(); - - const std::string long_method = std::string(79 * 1024, 'a'); - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - request_headers.setReferenceKey(Headers::get().Method, long_method); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&request_headers), false)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, false); -} - -// Tests stream reset when the number of request headers exceeds the default maximum of 100. -TEST_P(Http2CodecImplTest, ManyRequestHeadersInvokeResetStream) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - for (int i = 0; i < 100; i++) { - request_headers.addCopy(std::to_string(i), ""); - } - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(1); - request_encoder_->encodeHeaders(request_headers, false); -} - -// Tests that max number of request headers is configurable. -TEST_P(Http2CodecImplTest, ManyRequestHeadersAccepted) { - max_request_headers_count_ = 150; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - for (int i = 0; i < 145; i++) { - request_headers.addCopy(std::to_string(i), ""); - } - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, false); -} - -// Tests that max number of response headers is configurable. -TEST_P(Http2CodecImplTest, ManyResponseHeadersAccepted) { - max_response_headers_count_ = 110; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"compression", "test"}}; - for (int i = 0; i < 105; i++) { - response_headers.addCopy(std::to_string(i), ""); - } - EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); - response_encoder_->encodeHeaders(response_headers, true); -} - -TEST_P(Http2CodecImplTest, LargeRequestHeadersAtLimitAccepted) { - uint32_t codec_limit_kb = 64; - max_request_headers_kb_ = codec_limit_kb; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string key = "big"; - uint32_t head_room = 77; - uint32_t long_string_length = - codec_limit_kb * 1024 - request_headers.byteSize() - key.length() - head_room; - std::string long_string = std::string(long_string_length, 'q'); - request_headers.addCopy(key, long_string); - - // The amount of data sent to the codec is not equivalent to the size of the - // request headers that Envoy computes, as the codec limits based on the - // entire http2 frame. The exact head room needed (76) was found through iteration. - ASSERT_EQ(request_headers.byteSize() + head_room, codec_limit_kb * 1024); - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)); - request_encoder_->encodeHeaders(request_headers, true); -} - -TEST_P(Http2CodecImplTest, LargeRequestHeadersOverDefaultCodecLibraryLimit) { - max_request_headers_kb_ = 66; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(65 * 1024, 'q'); - request_headers.addCopy("big", long_string); - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, true); -} - -TEST_P(Http2CodecImplTest, LargeRequestHeadersExceedPerHeaderLimit) { - // The name-value pair max is set by NGHTTP2_HD_MAX_NV in lib/nghttp2_hd.h to 64KB, and - // creates a per-request header limit for us in h2. Note that the nghttp2 - // calculated byte size will differ from envoy due to H2 compression and frames. - - max_request_headers_kb_ = 81; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(80 * 1024, 'q'); - request_headers.addCopy("big", long_string); - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_CALL(client_callbacks_, onGoAway(_)); - server_->shutdownNotice(); - server_->goAway(); - request_encoder_->encodeHeaders(request_headers, true); -} - -TEST_P(Http2CodecImplTest, ManyLargeRequestHeadersUnderPerHeaderLimit) { - max_request_headers_kb_ = 81; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(1024, 'q'); - for (int i = 0; i < 80; i++) { - request_headers.addCopy(std::to_string(i), long_string); - } - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, true); -} - -TEST_P(Http2CodecImplTest, LargeRequestHeadersAtMaxConfigurable) { - // Raising the limit past this triggers some unexpected nghttp2 error. - // Further debugging required to increase past ~96 KiB. - max_request_headers_kb_ = 96; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - std::string long_string = std::string(1024, 'q'); - for (int i = 0; i < 95; i++) { - request_headers.addCopy(std::to_string(i), long_string); - } - - EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); - EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); - request_encoder_->encodeHeaders(request_headers, true); -} - -// Note this is Http2CodecImplTestAll not Http2CodecImplTest, to test -// compression with min and max HPACK table size. -TEST_P(Http2CodecImplTestAll, TestCodecHeaderCompression) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"compression", "test"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); - response_encoder_->encodeHeaders(response_headers, true); - - // Sanity check to verify that state of encoders and decoders matches. - EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session()), - nghttp2_session_get_hd_inflate_dynamic_table_size(client_->session())); - EXPECT_EQ(nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session()), - nghttp2_session_get_hd_inflate_dynamic_table_size(server_->session())); - - // Verify that headers are compressed only when both client and server advertise table size - // > 0: - if (client_http2_options_.hpack_table_size().value() && - server_http2_options_.hpack_table_size().value()) { - EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session())); - EXPECT_NE(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session())); - } else { - EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(client_->session())); - EXPECT_EQ(0, nghttp2_session_get_hd_deflate_dynamic_table_size(server_->session())); - } -} - -// Verify that codec detects PING flood -TEST_P(Http2CodecImplTest, PingFlood) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // Send one frame above the outbound control queue size limit - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1; - ++i) { - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - } - - int ack_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) { - ++ack_count; - buffer.move(frame); - })); - - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); - EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); - EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); -} - -// Verify that codec allows PING flood when mitigation is disabled -TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { - max_outbound_control_frames_ = 2147483647; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // Send one frame above the outbound control queue size limit - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1; - ++i) { - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - } - - EXPECT_CALL(server_connection_, write(_, _)) - .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1); - EXPECT_NO_THROW(client_->sendPendingFrames()); -} - -// Verify that outbound control frame counter decreases when send buffer is drained -TEST_P(Http2CodecImplTest, PingFloodCounterReset) { - // Ping frames are 17 bytes each so 237 full frames and a partial frame fit in the current min - // size for buffer slices. Setting the limit to 2x+1 the number that fits in a single slice allows - // the logic below that verifies drain and overflow thresholds. - static const int kMaxOutboundControlFrames = 475; - max_outbound_control_frames_ = kMaxOutboundControlFrames; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - for (int i = 0; i < kMaxOutboundControlFrames; ++i) { - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - } - - int ack_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &ack_count](Buffer::Instance& frame, bool) { - ++ack_count; - buffer.move(frame); - })); - - // We should be 1 frame under the control frame flood mitigation threshold. - EXPECT_NO_THROW(client_->sendPendingFrames()); - EXPECT_EQ(ack_count, kMaxOutboundControlFrames); - - // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer - buffer.drain(buffer.length() / 2); - - // Send floor(kMaxOutboundFrames / 2) more pings. - for (int i = 0; i < kMaxOutboundControlFrames / 2; ++i) { - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - } - // The number of outbound frames should be half of max so the connection should not be - // terminated. - EXPECT_NO_THROW(client_->sendPendingFrames()); - EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); - - // 1 more ping frame should overflow the outbound frame limit. - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); -} - -// Verify that codec detects flood of outbound HEADER frames -TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - int frame_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { - ++frame_count; - buffer.move(frame); - })); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1; ++i) { - EXPECT_NO_THROW(response_encoder_->encodeHeaders(response_headers, false)); - } - // Presently flood mitigation is done only when processing downstream data - // So we need to send stream from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); - - EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); -} - -// Verify that codec detects flood of outbound DATA frames -TEST_P(Http2CodecImplTest, ResponseDataFlood) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - int frame_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { - ++frame_count; - buffer.move(frame); - })); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - // Account for the single HEADERS frame above - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) { - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - } - // Presently flood mitigation is done only when processing downstream data - // So we need to send stream from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); - - EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); - EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); -} - -// Verify that codec allows outbound DATA flood when mitigation is disabled -TEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) { - max_outbound_control_frames_ = 2147483647; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - // +2 is to account for HEADERS and PING ACK, that is used to trigger mitigation - EXPECT_CALL(server_connection_, write(_, _)) - .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 2); - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)).Times(1); - EXPECT_CALL(response_decoder_, decodeData(_, false)) - .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - // Account for the single HEADERS frame above - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) { - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - } - // Presently flood mitigation is done only when processing downstream data - // So we need to send stream from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_NO_THROW(client_->sendPendingFrames()); -} - -// Verify that outbound frame counter decreases when send buffer is drained -TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { - static const int kMaxOutboundFrames = 100; - max_outbound_frames_ = kMaxOutboundFrames; - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - int frame_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { - ++frame_count; - buffer.move(frame); - })); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - // Account for the single HEADERS frame above - for (uint32_t i = 0; i < kMaxOutboundFrames - 1; ++i) { - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - } - - EXPECT_EQ(frame_count, kMaxOutboundFrames); - // Drain kMaxOutboundFrames / 2 slices from the send buffer - buffer.drain(buffer.length() / 2); - - for (uint32_t i = 0; i < kMaxOutboundFrames / 2 + 1; ++i) { - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - } - - // Presently flood mitigation is done only when processing downstream data - // So we need to send a frame from downstream client to trigger mitigation - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); -} - -// Verify that control frames are added to the counter of outbound frames of all types. -TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); - request_encoder_->encodeHeaders(request_headers, false); - - int frame_count = 0; - Buffer::OwnedImpl buffer; - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&buffer, &frame_count](Buffer::Instance& frame, bool) { - ++frame_count; - buffer.move(frame); - })); - - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - // Account for the single HEADERS frame above - for (uint32_t i = 0; i < CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES - 1; ++i) { - Buffer::OwnedImpl data("0"); - EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); - } - // Send one PING frame above the outbound queue size limit - EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); - - EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); - EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); -} - -TEST_P(Http2CodecImplTest, PriorityFlood) { - priorityFlood(); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); -} - -TEST_P(Http2CodecImplTest, PriorityFloodOverride) { - max_inbound_priority_frames_per_stream_ = 2147483647; - - priorityFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); -} - -TEST_P(Http2CodecImplTest, WindowUpdateFlood) { - windowUpdateFlood(); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); -} - -TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { - max_inbound_window_update_frames_per_data_frame_sent_ = 2147483647; - windowUpdateFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); -} - -TEST_P(Http2CodecImplTest, EmptyDataFlood) { - Buffer::OwnedImpl data; - emptyDataFlood(data); - EXPECT_CALL(request_decoder_, decodeData(_, false)); - auto status = server_wrapper_.dispatch(data, *server_); - EXPECT_FALSE(status.ok()); - EXPECT_TRUE(isBufferFloodError(status)); -} - -TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { - max_consecutive_inbound_frames_with_empty_payload_ = 2147483647; - Buffer::OwnedImpl data; - emptyDataFlood(data); - EXPECT_CALL(request_decoder_, decodeData(_, false)) - .Times( - CommonUtility::OptionsLimits::DEFAULT_MAX_CONSECUTIVE_INBOUND_FRAMES_WITH_EMPTY_PAYLOAD + - 1); - auto status = server_wrapper_.dispatch(data, *server_); - EXPECT_TRUE(status.ok()); -} - -// CONNECT without upgrade type gets tagged with "bytestream" -TEST_P(Http2CodecImplTest, ConnectTest) { - client_http2_options_.set_allow_connect(true); - server_http2_options_.set_allow_connect(true); - initialize(); - MockStreamCallbacks callbacks; - request_encoder_->getStream().addCallbacks(callbacks); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - request_headers.setReferenceKey(Headers::get().Method, Http::Headers::get().MethodValues.Connect); - TestRequestHeaderMapImpl expected_headers; - HttpTestUtility::addDefaultHeaders(expected_headers); - expected_headers.setReferenceKey(Headers::get().Method, - Http::Headers::get().MethodValues.Connect); - expected_headers.setReferenceKey(Headers::get().Protocol, "bytestream"); - EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); - request_encoder_->encodeHeaders(request_headers, false); -} - -class TestNghttp2SessionFactory; - -// Test client for H/2 METADATA frame edge cases. -class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { -public: - MetadataTestClientConnectionImpl( - Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestClientConnectionImpl(connection, callbacks, scope, http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} - - // Overrides TestClientConnectionImpl::submitMetadata(). - bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { - // Creates metadata payload. - encoder_.createPayload(metadata_map_vector); - for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { - int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, - stream_id, nullptr); - if (result != 0) { - return false; - } - } - // Triggers nghttp2 to populate the payloads of the METADATA frames. - int result = nghttp2_session_send(session()); - return result == 0; - } - -protected: - friend class TestNghttp2SessionFactory; - - Http::Http2::MetadataEncoder encoder_; -}; - -class TestNghttp2SessionFactory : public Nghttp2SessionFactory { -public: - ~TestNghttp2SessionFactory() override { - nghttp2_session_callbacks_del(callbacks_); - nghttp2_option_del(options_); - } - - nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, - const nghttp2_option*) override { - // Only need to provide callbacks required to send METADATA frames. - nghttp2_session_callbacks_new(&callbacks_); - nghttp2_session_callbacks_set_pack_extension_callback( - callbacks_, - [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, - void* user_data) -> ssize_t { - // Double cast required due to multiple inheritance. - return static_cast( - static_cast(user_data)) - ->encoder_.packNextFramePayload(data, length); - }); - nghttp2_session_callbacks_set_send_callback( - callbacks_, - [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast( - static_cast(user_data)) - ->onSend(data, length); - }); - nghttp2_option_new(&options_); - nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); - nghttp2_session* session; - nghttp2_session_client_new2(&session, callbacks_, connection, options_); - return session; - } - - void init(nghttp2_session*, ConnectionImpl*, - const envoy::config::core::v3::Http2ProtocolOptions&) override {} - -private: - nghttp2_session_callbacks* callbacks_; - nghttp2_option* options_; -}; - -class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { -public: - Http2CodecMetadataTest() = default; - -protected: - void initialize() override { - allow_metadata_ = true; - http2OptionsFromTuple(client_http2_options_, client_settings_); - http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - ON_CALL(client_connection_, write(_, _)) - .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); - })); - ON_CALL(server_connection_, write(_, _)) - .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - ASSERT_TRUE(client_wrapper_.dispatch(data, *client_).ok()); - })); - } - -private: - TestNghttp2SessionFactory http2_session_factory_; -}; - -// Validates noop handling of METADATA frames without a known stream ID. -// This is required per RFC 7540, section 5.1.1, which states that stream ID = 0 can be used for -// "connection control" messages, and per the H2 METADATA spec (source/docs/h2_metadata.md), which -// states that these frames can be received prior to the headers. -TEST_F(Http2CodecMetadataTest, UnknownStreamId) { - initialize(); - MetadataMap metadata_map = {{"key", "value"}}; - MetadataMapVector metadata_vector; - metadata_vector.emplace_back(std::make_unique(metadata_map)); - // SETTINGS are required as part of the preface. - ASSERT_EQ(nghttp2_submit_settings(client_->session(), NGHTTP2_FLAG_NONE, nullptr, 0), 0); - // Validate both the ID = 0 special case and a non-zero ID not already bound to a stream (any ID > - // 0 for this test). - EXPECT_TRUE(client_->submitMetadata(metadata_vector, 0)); - EXPECT_TRUE(client_->submitMetadata(metadata_vector, 1000)); -} - -} // namespace Http2 -} // namespace Legacy -} // namespace Http -} // namespace Envoy diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index fa1c92346e73..1deb3c412284 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -7,7 +7,6 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http2/codec_impl.h" -#include "common/runtime/runtime_features.h" #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" @@ -74,7 +73,7 @@ class Http2CodecImplTestFixture { }; struct ConnectionWrapper { - Http::Status dispatch(const Buffer::Instance& data, Connection& connection) { + Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { Http::Status status = Http::okStatus(); buffer_.add(data); if (!dispatching_) { @@ -129,23 +128,13 @@ class Http2CodecImplTestFixture { virtual void initialize() { http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, - ProdNghttp2SessionFactoryNew::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - } else { - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, - ProdNghttp2SessionFactoryLegacy::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - } + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -240,13 +229,13 @@ class Http2CodecImplTestFixture { envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; - std::unique_ptr client_; + std::unique_ptr client_; ConnectionWrapper client_wrapper_; Stats::TestUtil::TestStore server_stats_store_; envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; - std::unique_ptr server_; + std::unique_ptr server_; ConnectionWrapper server_wrapper_; MockResponseDecoder response_decoder_; RequestEncoder* request_encoder_; @@ -882,21 +871,21 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); // Now that the flow control window is full, further data causes the send buffer to back up. Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); - EXPECT_EQ(initial_stream_window, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); EXPECT_EQ(initial_stream_window, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); // If we go over the limit, the stream callbacks should fire. EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); - EXPECT_EQ(initial_stream_window + 1, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); EXPECT_EQ(initial_stream_window + 1, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); @@ -941,7 +930,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); - EXPECT_EQ(0, client_->getStreamPendingSendDataLength(1)); + EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. @@ -986,7 +975,7 @@ TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); + EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); EXPECT_CALL(server_stream_callbacks_, @@ -1028,7 +1017,7 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { // the recv buffer can be overrun by a client which negotiates a larger // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. - server_->setStreamWriteBufferWatermarks(1, 10, 20); + server_->getStream(1)->setWriteBufferWatermarks(10, 20); EXPECT_CALL(request_decoder_, decodeData(_, false)); Buffer::OwnedImpl data(std::string(40, 'a')); @@ -1226,23 +1215,13 @@ class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactoryNew::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - } else { - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, - ProdNghttp2SessionFactoryLegacy::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - } for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -2052,64 +2031,50 @@ TEST_P(Http2CodecImplTest, ConnectTest) { request_encoder_->encodeHeaders(request_headers, false); } -template class TestNghttp2SessionFactory; +class TestNghttp2SessionFactory; // Test client for H/2 METADATA frame edge cases. -template -class MetadataTestClientConnectionImpl : public TestClientConnectionImplType { +class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { public: MetadataTestClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - typename TestClientConnectionImplType::SessionFactory& http2_session_factory) - : TestClientConnectionImplType(connection, callbacks, scope, http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} + Nghttp2SessionFactory& http2_session_factory) + : TestClientConnectionImpl(connection, callbacks, scope, http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} // Overrides TestClientConnectionImpl::submitMetadata(). bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { // Creates metadata payload. encoder_.createPayload(metadata_map_vector); for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { - int result = - nghttp2_submit_extension(TestClientConnectionImplType::session(), - ::Envoy::Http::METADATA_FRAME_TYPE, flags, stream_id, nullptr); + int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, + stream_id, nullptr); if (result != 0) { return false; } } // Triggers nghttp2 to populate the payloads of the METADATA frames. - int result = nghttp2_session_send(TestClientConnectionImplType::session()); + int result = nghttp2_session_send(session()); return result == 0; } protected: - template friend class TestNghttp2SessionFactory; + friend class TestNghttp2SessionFactory; MetadataEncoder encoder_; }; -using MetadataTestClientConnectionImplNew = - MetadataTestClientConnectionImpl; -using MetadataTestClientConnectionImplLegacy = - MetadataTestClientConnectionImpl; - -struct Nghttp2SessionFactoryDeleter { - virtual ~Nghttp2SessionFactoryDeleter() = default; -}; - -template -class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, - public Nghttp2SessionFactoryDeleter { +class TestNghttp2SessionFactory : public Nghttp2SessionFactory { public: ~TestNghttp2SessionFactory() override { nghttp2_session_callbacks_del(callbacks_); nghttp2_option_del(options_); } - nghttp2_session* create(const nghttp2_session_callbacks*, - typename Nghttp2SessionFactoryType::ConnectionImplType* connection, + nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, const nghttp2_option*) override { // Only need to provide callbacks required to send METADATA frames. nghttp2_session_callbacks_new(&callbacks_); @@ -2118,18 +2083,16 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, void* user_data) -> ssize_t { // Double cast required due to multiple inheritance. - return static_cast*>( - static_cast( - user_data)) + return static_cast( + static_cast(user_data)) ->encoder_.packNextFramePayload(data, length); }); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast*>( - static_cast( - user_data)) + return static_cast( + static_cast(user_data)) ->onSend(data, length); }); nghttp2_option_new(&options_); @@ -2139,7 +2102,7 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, return session; } - void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*, + void init(nghttp2_session*, ConnectionImpl*, const envoy::config::core::v3::Http2ProtocolOptions&) override {} private: @@ -2147,12 +2110,6 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, nghttp2_option* options_; }; -using TestNghttp2SessionFactoryNew = - TestNghttp2SessionFactory; -using TestNghttp2SessionFactoryLegacy = - TestNghttp2SessionFactory; - class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { public: Http2CodecMetadataTest() = default; @@ -2162,27 +2119,12 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin allow_metadata_ = true; http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - std::unique_ptr session_factory = - std::make_unique(); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, *session_factory); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - http2_session_factory_ = std::move(session_factory); - } else { - std::unique_ptr session_factory = - std::make_unique(); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, *session_factory); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - http2_session_factory_ = std::move(session_factory); - } + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); @@ -2194,7 +2136,7 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin } private: - std::unique_ptr http2_session_factory_; + TestNghttp2SessionFactory http2_session_factory_; }; // Validates noop handling of METADATA frames without a known stream ID. diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 2ba9f545a20c..1eb8bd581a9e 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -3,7 +3,6 @@ #include "envoy/http/codec.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/codec_impl_legacy.h" #include "common/http/utility.h" namespace Envoy { @@ -33,7 +32,7 @@ class TestCodecSettingsProvider { return it->second; } - // protected: +protected: // Stores SETTINGS parameters contained in |settings_frame| to make them available via // getRemoteSettingsParameterValue(). void onSettingsFrame(const nghttp2_settings& settings_frame) { @@ -58,23 +57,9 @@ class TestCodecSettingsProvider { std::unordered_map settings_; }; -struct ServerCodecFacade : public virtual Connection { - virtual nghttp2_session* session() PURE; - virtual Http::Stream* getStream(int32_t stream_id) PURE; - virtual uint32_t getStreamUnconsumedBytes(int32_t stream_id) PURE; - virtual void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, - uint32_t high_watermark) PURE; -}; - -class TestServerConnection : public TestCodecStatsProvider, - public TestCodecSettingsProvider, - public ServerCodecFacade { -public: - TestServerConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} -}; - -template -class TestServerConnectionImpl : public TestServerConnection, public CodecImplType { +class TestServerConnectionImpl : public TestCodecStatsProvider, + public ServerConnectionImpl, + public TestCodecSettingsProvider { public: TestServerConnectionImpl( Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, @@ -82,94 +67,50 @@ class TestServerConnectionImpl : public TestServerConnection, public CodecImplTy uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : TestServerConnection(scope), - CodecImplType(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, - headers_with_underscores_action) {} - - // ServerCodecFacade - nghttp2_session* session() override { return CodecImplType::session_; } - Http::Stream* getStream(int32_t stream_id) override { - return CodecImplType::getStream(stream_id); - } - uint32_t getStreamUnconsumedBytes(int32_t stream_id) override { - return CodecImplType::getStream(stream_id)->unconsumed_bytes_; - } - void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, - uint32_t high_watermark) override { - CodecImplType::getStream(stream_id)->setWriteBufferWatermarks(low_watermark, high_watermark); - } + : TestCodecStatsProvider(scope), + ServerConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action) {} + nghttp2_session* session() { return session_; } + using ServerConnectionImpl::getStream; protected: // Overrides ServerConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -using TestServerConnectionImplLegacy = - TestServerConnectionImpl; -using TestServerConnectionImplNew = - TestServerConnectionImpl; - -struct ClientCodecFacade : public ClientConnection { - virtual nghttp2_session* session() PURE; - virtual Http::Stream* getStream(int32_t stream_id) PURE; - virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE; - virtual void sendPendingFrames() PURE; - virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE; -}; - -class TestClientConnection : public TestCodecStatsProvider, - public TestCodecSettingsProvider, - public ClientCodecFacade { -public: - TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} -}; - -template -class TestClientConnectionImpl : public TestClientConnection, public CodecImplType { +class TestClientConnectionImpl : public TestCodecStatsProvider, + public ClientConnectionImpl, + public TestCodecSettingsProvider { public: TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - typename CodecImplType::SessionFactory& http2_session_factory) - : TestClientConnection(scope), - CodecImplType(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, http2_session_factory) {} - - // ClientCodecFacade - RequestEncoder& newStream(ResponseDecoder& response_decoder) override { - return CodecImplType::newStream(response_decoder); - } - nghttp2_session* session() override { return CodecImplType::session_; } - Http::Stream* getStream(int32_t stream_id) override { - return CodecImplType::getStream(stream_id); - } - uint64_t getStreamPendingSendDataLength(int32_t stream_id) override { - return CodecImplType::getStream(stream_id)->pending_send_data_.length(); - } - void sendPendingFrames() override { CodecImplType::sendPendingFrames(); } + Nghttp2SessionFactory& http2_session_factory) + : TestCodecStatsProvider(scope), + ClientConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} + + nghttp2_session* session() { return session_; } + // Submits an H/2 METADATA frame to the peer. // Returns true on success, false otherwise. - bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override { + virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) { UNREFERENCED_PARAMETER(mm_vector); UNREFERENCED_PARAMETER(stream_id); return false; } + using ClientConnectionImpl::getStream; + using ConnectionImpl::sendPendingFrames; + protected: // Overrides ClientConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -using TestClientConnectionImplLegacy = - TestClientConnectionImpl; -using TestClientConnectionImplNew = - TestClientConnectionImpl; - -using ProdNghttp2SessionFactoryLegacy = Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory; -using ProdNghttp2SessionFactoryNew = Envoy::Http::Http2::ProdNghttp2SessionFactory; - } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index aadda98c8b3d..c88458e10c7e 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -26,7 +26,7 @@ class RequestFrameCommentTest : public ::testing::Test {}; class ResponseFrameCommentTest : public ::testing::Test {}; // Creates and sets up a stream to reply to. -void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImplNew& connection) { +void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImpl& connection) { codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -56,7 +56,7 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImplNew connection( + TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -89,7 +89,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImplNew connection( + TestClientConnectionImpl connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -134,7 +134,7 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImplNew connection( + TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -169,7 +169,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImplNew connection( + TestClientConnectionImpl connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -199,7 +199,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImplNew connection( + TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -232,7 +232,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImplNew connection( + TestClientConnectionImpl connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -267,7 +267,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImplNew connection( + TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -305,7 +305,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImplNew connection( + TestClientConnectionImpl connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index d925ed1bb002..5dc75d58ebbb 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -14,7 +14,7 @@ namespace { void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. - TestServerConnectionImplNew connection( + TestServerConnectionImpl connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index e73b88ab954d..8b1a5d3d0797 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -15,7 +15,7 @@ namespace { void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { // Create the client connection containing the nghttp2 session. - TestClientConnectionImplNew connection( + TestClientConnectionImpl connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index b1df35b6d189..6b46a0f05aea 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -91,11 +91,6 @@ class MemoryTest { class TestStore : public IsolatedStoreImpl { public: TestStore() = default; - ~TestStore() { - counter_map_.clear(); - gauge_map_.clear(); - histogram_map_.clear(); - } // Constructs a store using a symbol table, allowing for explicit sharing. explicit TestStore(SymbolTable& symbol_table) : IsolatedStoreImpl(symbol_table) {} diff --git a/test/config/utility.cc b/test/config/utility.cc index f57e7af4286c..ea96c7ddb142 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -586,10 +586,6 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } -void ConfigHelper::setLegacyCodecs() { - addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "false"); -} - void ConfigHelper::finalize(const std::vector& ports) { RELEASE_ASSERT(!finalized_, ""); diff --git a/test/config/utility.h b/test/config/utility.h index b4217d4f31f5..39bcb00a4454 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -229,9 +229,6 @@ class ConfigHelper { const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& config); - // Set legacy codecs to use for upstream and downstream codecs. - void setLegacyCodecs(); - private: static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { return api_version == envoy::config::core::v3::ApiVersion::V2; diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 5e7648ba1ce4..d1d32fdd634e 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1619,66 +1619,6 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { ASSERT_NE(nullptr, request_id_extension); } -TEST_F(HttpConnectionManagerConfigTest, LegacyH1Codecs) { - const std::string yaml_string = R"EOF( -codec_type: http1 -server_name: foo -stat_prefix: router -route_config: - virtual_hosts: - - name: service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: cluster -http_filters: -- name: envoy.filters.http.router - )EOF"; - - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager - proto_config; - TestUtility::loadFromYaml(yaml_string, proto_config); - NiceMock filter_callbacks; - EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); - auto http_connection_manager_factory = - HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( - proto_config, context_, filter_callbacks); - http_connection_manager_factory(); -} - -TEST_F(HttpConnectionManagerConfigTest, LegacyH2Codecs) { - const std::string yaml_string = R"EOF( -codec_type: http2 -server_name: foo -stat_prefix: router -route_config: - virtual_hosts: - - name: service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: cluster -http_filters: -- name: envoy.filters.http.router - )EOF"; - - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager - proto_config; - TestUtility::loadFromYaml(yaml_string, proto_config); - NiceMock filter_callbacks; - EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); - auto http_connection_manager_factory = - HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( - proto_config, context_, filter_callbacks); - http_connection_manager_factory(); -} - class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( diff --git a/test/integration/BUILD b/test/integration/BUILD index d16bfcbe114b..2efec82ce37b 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -8,7 +8,6 @@ load( "envoy_package", "envoy_proto_library", "envoy_select_hot_restart", - "envoy_select_legacy_codecs_in_integration_tests", "envoy_sh_test", ) @@ -577,10 +576,6 @@ envoy_cc_test_library( "ssl_utility.h", "utility.h", ], - copts = envoy_select_legacy_codecs_in_integration_tests( - ["-DENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS"], - "@envoy", - ), data = ["//test/common/runtime:filesystem_test_data"], deps = [ ":server_stats_interface", @@ -612,9 +607,7 @@ envoy_cc_test_library( "//source/common/http:codec_client_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", - "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", - "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/local_info:local_info_lib", "//source/common/network:filter_lib", diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index 952c095a820e..c8bf5164b028 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -316,11 +316,9 @@ TEST_P(ApiVersionIntegrationTest, Eds) { TEST_P(ApiVersionIntegrationTest, Rtds) { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - if (bootstrap.mutable_layered_runtime()->layers_size() == 0) { - auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); - admin_layer->set_name("admin layer"); - admin_layer->mutable_admin_layer(); - } + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); auto* rtds_layer = bootstrap.mutable_layered_runtime()->add_layers(); rtds_layer->set_name("rtds_layer"); setupConfigSource(*rtds_layer->mutable_rtds_layer()->mutable_rtds_config()); diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index a6c94d91a0c4..4763c9bbfe05 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -12,9 +12,7 @@ #include "common/common/fmt.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" -#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" -#include "common/http/http2/codec_impl_legacy.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" @@ -251,29 +249,6 @@ class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { } }; -namespace Legacy { -class TestHttp1ServerConnectionImpl : public Http::Legacy::Http1::ServerConnectionImpl { -public: - using Http::Legacy::Http1::ServerConnectionImpl::ServerConnectionImpl; - - void onMessageComplete() override { - ServerConnectionImpl::onMessageComplete(); - - if (activeRequest().has_value() && activeRequest().value().request_decoder_) { - // Undo the read disable from the base class - we have many tests which - // waitForDisconnect after a full request has been read which will not - // receive the disconnect if reading is disabled. - activeRequest().value().response_encoder_.readDisable(false); - } - } - ~TestHttp1ServerConnectionImpl() override { - if (activeRequest().has_value()) { - activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); - } - } -}; -} // namespace Legacy - FakeHttpConnection::FakeHttpConnection( FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, @@ -286,15 +261,9 @@ FakeHttpConnection::FakeHttpConnection( // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); -#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); -#else - codec_ = std::make_unique( - shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); -#endif } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( @@ -302,17 +271,12 @@ FakeHttpConnection::FakeHttpConnection( http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); -#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); -#else - codec_ = std::make_unique( - shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); -#endif ASSERT(type == Type::HTTP2); } + shared_connection_.connection().addReadFilter( Network::ReadFilterSharedPtr{new ReadFilter(*this)}); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 2e4b846870e4..309548595313 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -286,11 +286,6 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); - // In ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. -#ifdef ENVOY_USE_LEGACY_CODECS_IN__INTEGRATION_TESTS - ENVOY_LOG_MISC(debug, "Using legacy codecs"); - setLegacyCodecs(); -#endif } BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, diff --git a/test/integration/integration.h b/test/integration/integration.h index d345b5051095..0ec9133736cb 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -191,7 +191,6 @@ class BaseIntegrationTest : protected Logger::Loggable { void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. void setDeterministic() { deterministic_ = true; } - void setLegacyCodecs() { config_helper_.setLegacyCodecs(); } FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index ea5214dda67f..351414da436b 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -2,7 +2,6 @@ import argparse import common -import difflib import functools import multiprocessing import os @@ -85,21 +84,6 @@ "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") -# These triples (file1, file2, diff) represent two files, file1 and file2 that should maintain -# the diff diff. This is meant to keep these two files in sync. -CODEC_DIFFS = (("./source/common/http/http1/codec_impl.h", - "./source/common/http/http1/codec_impl_legacy.h", - "./tools/code_format/codec_diffs/http1_codec_impl_h"), - ("./source/common/http/http1/codec_impl.cc", - "./source/common/http/http1/codec_impl_legacy.cc", - "./tools/code_format/codec_diffs/http1_codec_impl_cc"), - ("./source/common/http/http2/codec_impl.h", - "./source/common/http/http2/codec_impl_legacy.h", - "./tools/code_format/codec_diffs/http2_codec_impl_h"), - ("./source/common/http/http2/codec_impl.cc", - "./source/common/http/http2/codec_impl_legacy.cc", - "./tools/code_format/codec_diffs/http2_codec_impl_cc")) - # Only one C++ file should instantiate grpc_init GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") @@ -548,38 +532,6 @@ def fixSourceLine(line, line_number): return line -def codecDiffHelper(file1, file2, diff): - f1 = readLines(file1) - f2 = readLines(file2) - - # Create diff between two files - code_diff = list(difflib.unified_diff(f1, f2, lineterm="")) - # Compare with golden diff. - golden_diff = readLines(diff) - # It is fairly ugly to diff a diff, so return a warning to sync codec changes - # and/or update golden_diff. - if code_diff != golden_diff: - error_message = "Codecs are not synced: %s does not match %s. Update codec implementations to sync and/or update the diff manually to:\n%s" % ( - file1, file2, '\n'.join(code_diff)) - # The following line will write the diff to the file diff if it does not match. - # Do not uncomment unless you know the change is safe! - # new_diff = pathlib.Path(diff) - #new_diff.open('w') - # new_diff.write_text('\n'.join(code_diff), encoding='utf-8') - return error_message - - -def checkCodecDiffs(error_messages): - try: - for triple in CODEC_DIFFS: - codec_diff = codecDiffHelper(*triple) - if codec_diff != None: - error_messages.append(codecDiffHelper(*triple)) - return error_messages - except IOError: # for check format tests - return error_messages - - # We want to look for a call to condvar.waitFor, but there's no strong pattern # to the variable name of the condvar. If we just look for ".waitFor" we'll also # pick up time_system_.waitFor(...), and we don't want to return true for that @@ -1096,9 +1048,6 @@ def ownedDirectories(error_messages): error_messages = [] owned_directories = ownedDirectories(error_messages) - # Check codec synchronization once per run. - checkCodecDiffs(error_messages) - if os.path.isfile(target_path): error_messages += checkFormat("./" + target_path) else: diff --git a/tools/code_format/codec_diffs/http1_codec_impl_cc b/tools/code_format/codec_diffs/http1_codec_impl_cc deleted file mode 100644 index b9ea3f3ae002..000000000000 --- a/tools/code_format/codec_diffs/http1_codec_impl_cc +++ /dev/null @@ -1,35 +0,0 @@ ---- -+++ -@@ -1,4 +1,4 @@ --#include "common/http/http1/codec_impl.h" -+#include "common/http/http1/codec_impl_legacy.h" - - #include - #include -@@ -25,6 +25,7 @@ - - namespace Envoy { - namespace Http { -+namespace Legacy { - namespace Http1 { - namespace { - -@@ -48,6 +49,10 @@ - - using Http1ResponseCodeDetails = ConstSingleton; - using Http1HeaderTypes = ConstSingleton; -+using Http::Http1::CodecStats; -+using Http::Http1::HeaderKeyFormatter; -+using Http::Http1::HeaderKeyFormatterPtr; -+using Http::Http1::ProperCaseHeaderKeyFormatter; - - const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { - CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, -@@ -1236,6 +1241,7 @@ - } - - } // namespace Http1 -+} // namespace Legacy - } // namespace Http - } // namespace Envoy - \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http1_codec_impl_h b/tools/code_format/codec_diffs/http1_codec_impl_h deleted file mode 100644 index e81d2c838d5e..000000000000 --- a/tools/code_format/codec_diffs/http1_codec_impl_h +++ /dev/null @@ -1,130 +0,0 @@ ---- -+++ -@@ -24,6 +24,7 @@ - - namespace Envoy { - namespace Http { -+namespace Legacy { - namespace Http1 { - - class ConnectionImpl; -@@ -75,7 +76,8 @@ - void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } - - protected: -- StreamEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter); -+ StreamEncoderImpl(ConnectionImpl& connection, -+ Http::Http1::HeaderKeyFormatter* header_key_formatter); - void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, - bool end_stream); - void encodeTrailersBase(const HeaderMap& headers); -@@ -114,7 +116,7 @@ - - void encodeFormattedHeader(absl::string_view key, absl::string_view value); - -- const HeaderKeyFormatter* const header_key_formatter_; -+ const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; - absl::string_view details_; - }; - -@@ -123,7 +125,8 @@ - */ - class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { - public: -- ResponseEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) -+ ResponseEncoderImpl(ConnectionImpl& connection, -+ Http::Http1::HeaderKeyFormatter* header_key_formatter) - : StreamEncoderImpl(connection, header_key_formatter) {} - - bool startedResponse() { return started_response_; } -@@ -142,7 +145,8 @@ - */ - class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { - public: -- RequestEncoderImpl(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) -+ RequestEncoderImpl(ConnectionImpl& connection, -+ Http::Http1::HeaderKeyFormatter* header_key_formatter) - : StreamEncoderImpl(connection, header_key_formatter) {} - bool upgradeRequest() const { return upgrade_request_; } - bool headRequest() const { return head_request_; } -@@ -203,7 +207,7 @@ - virtual bool supportsHttp10() { return false; } - bool maybeDirectDispatch(Buffer::Instance& data); - virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} -- CodecStats& stats() { return stats_; } -+ Http::Http1::CodecStats& stats() { return stats_; } - bool enableTrailers() const { return enable_trailers_; } - - // Http::Connection -@@ -218,9 +222,9 @@ - bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } - - protected: -- ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, -- uint32_t max_headers_kb, const uint32_t max_headers_count, -- HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); -+ ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, -+ http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, -+ Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); - - bool resetStreamCalled() { return reset_stream_called_; } - void onMessageBeginBase(); -@@ -240,10 +244,10 @@ - void checkMaxHeadersSize(); - - Network::Connection& connection_; -- CodecStats& stats_; -+ Http::Http1::CodecStats& stats_; - http_parser parser_; - Http::Code error_code_{Http::Code::BadRequest}; -- const HeaderKeyFormatterPtr header_key_formatter_; -+ const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; - HeaderString current_header_field_; - HeaderString current_header_value_; - bool processing_trailers_ : 1; -@@ -420,7 +424,7 @@ - */ - class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { - public: -- ServerConnectionImpl(Network::Connection& connection, CodecStats& stats, -+ ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, - ServerConnectionCallbacks& callbacks, const Http1Settings& settings, - uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction -@@ -432,7 +436,7 @@ - * An active HTTP/1.1 request. - */ - struct ActiveRequest { -- ActiveRequest(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter) -+ ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) - : response_encoder_(connection, header_key_formatter) {} - - HeaderString request_url_; -@@ -524,7 +528,7 @@ - */ - class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { - public: -- ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, -+ ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, - ConnectionCallbacks& callbacks, const Http1Settings& settings, - const uint32_t max_response_headers_count); - -@@ -533,8 +537,8 @@ - - private: - struct PendingResponse { -- PendingResponse(ConnectionImpl& connection, HeaderKeyFormatter* header_key_formatter, -- ResponseDecoder* decoder) -+ PendingResponse(ConnectionImpl& connection, -+ Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) - : encoder_(connection, header_key_formatter), decoder_(decoder) {} - - RequestEncoderImpl encoder_; -@@ -598,6 +602,7 @@ - }; - - } // namespace Http1 -+} // namespace Legacy - } // namespace Http - } // namespace Envoy - \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http2_codec_impl_cc b/tools/code_format/codec_diffs/http2_codec_impl_cc deleted file mode 100644 index 46123d9ef031..000000000000 --- a/tools/code_format/codec_diffs/http2_codec_impl_cc +++ /dev/null @@ -1,34 +0,0 @@ ---- -+++ -@@ -1,4 +1,4 @@ --#include "common/http/http2/codec_impl.h" -+#include "common/http/http2/codec_impl_legacy.h" - - #include - #include -@@ -25,6 +25,7 @@ - - namespace Envoy { - namespace Http { -+namespace Legacy { - namespace Http2 { - - class Http2ResponseCodeDetailValues { -@@ -52,6 +53,9 @@ - }; - - using Http2ResponseCodeDetails = ConstSingleton; -+using Http::Http2::CodecStats; -+using Http::Http2::MetadataDecoder; -+using Http::Http2::MetadataEncoder; - - bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, - HeaderString& cookies) { -@@ -1464,6 +1468,7 @@ - } - - } // namespace Http2 -+} // namespace Legacy - } // namespace Http - } // namespace Envoy - \ No newline at end of file diff --git a/tools/code_format/codec_diffs/http2_codec_impl_h b/tools/code_format/codec_diffs/http2_codec_impl_h deleted file mode 100644 index 70c306568061..000000000000 --- a/tools/code_format/codec_diffs/http2_codec_impl_h +++ /dev/null @@ -1,77 +0,0 @@ ---- -+++ -@@ -30,6 +30,7 @@ - - namespace Envoy { - namespace Http { -+namespace Legacy { - namespace Http2 { - - // This is not the full client magic, but it's the smallest size that should be able to -@@ -89,7 +90,7 @@ - */ - class ConnectionImpl : public virtual Connection, protected Logger::Loggable { - public: -- ConnectionImpl(Network::Connection& connection, CodecStats& stats, -+ ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_headers_kb, const uint32_t max_headers_count); - -@@ -252,9 +253,9 @@ - virtual void decodeTrailers() PURE; - - // Get MetadataEncoder for this stream. -- MetadataEncoder& getMetadataEncoder(); -+ Http::Http2::MetadataEncoder& getMetadataEncoder(); - // Get MetadataDecoder for this stream. -- MetadataDecoder& getMetadataDecoder(); -+ Http::Http2::MetadataDecoder& getMetadataDecoder(); - // Callback function for MetadataDecoder. - void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); - -@@ -273,8 +274,8 @@ - [this]() -> void { this->pendingSendBufferHighWatermark(); }, - []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; - HeaderMapPtr pending_trailers_to_encode_; -- std::unique_ptr metadata_decoder_; -- std::unique_ptr metadata_encoder_; -+ std::unique_ptr metadata_decoder_; -+ std::unique_ptr metadata_encoder_; - absl::optional deferred_reset_; - HeaderString cookies_; - bool local_end_stream_sent_ : 1; -@@ -414,7 +415,7 @@ - - std::list active_streams_; - nghttp2_session* session_{}; -- CodecStats& stats_; -+ Http::Http2::CodecStats& stats_; - Network::Connection& connection_; - const uint32_t max_headers_kb_; - const uint32_t max_headers_count_; -@@ -522,7 +523,7 @@ - public: - using SessionFactory = Nghttp2SessionFactory; - ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, -- CodecStats& stats, -+ Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_response_headers_kb, - const uint32_t max_response_headers_count, -@@ -557,7 +558,7 @@ - class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { - public: - ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, -- CodecStats& stats, -+ Http::Http2::CodecStats& stats, - const envoy::config::core::v3::Http2ProtocolOptions& http2_options, - const uint32_t max_request_headers_kb, - const uint32_t max_request_headers_count, -@@ -596,6 +597,7 @@ - }; - - } // namespace Http2 -+} // namespace Legacy - } // namespace Http - } // namespace Envoy - \ No newline at end of file From 06fd1d1cdd024993e039d42bd54600b807802022 Mon Sep 17 00:00:00 2001 From: Dmitri Dolguikh Date: Thu, 16 Jul 2020 22:24:30 -0700 Subject: [PATCH 662/909] Switch to a tsan-instrumented libc++ for tsan tests (#12134) This fixes https://github.com/envoyproxy/envoy/issues/9784 and re-enables vhds_integration_test Risk Level: Low, but will most likely increase memory usage Signed-off-by: Dmitri Dolguikh --- .bazelrc | 8 ++++++++ ci/do_ci.sh | 2 +- source/common/filesystem/posix/filesystem_impl.cc | 2 -- test/integration/BUILD | 2 -- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.bazelrc b/.bazelrc index 17cd6008e399..5d29db5979fa 100644 --- a/.bazelrc +++ b/.bazelrc @@ -161,6 +161,10 @@ build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan +build:rbe-toolchain-tsan --linkopt=-L/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib +build:rbe-toolchain-tsan --config=clang-tsan + build:rbe-toolchain-gcc --config=rbe-toolchain build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform @@ -229,6 +233,10 @@ build:docker-msan --config=docker-sandbox build:docker-msan --config=rbe-toolchain-clang-libc++ build:docker-msan --config=rbe-toolchain-msan +build:docker-tsan --config=docker-sandbox +build:docker-tsan --config=rbe-toolchain-clang-libc++ +build:docker-tsan --config=rbe-toolchain-tsan + # CI configurations build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com diff --git a/ci/do_ci.sh b/ci/do_ci.sh index d13c7be545bd..384433e2ccf1 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -181,7 +181,7 @@ elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS}" - bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c dbg --config=clang-tsan --build_tests_only ${TEST_TARGETS} + bazel_with_collection test --config=rbe-toolchain-tsan ${BAZEL_BUILD_OPTIONS} -c dbg --build_tests_only ${TEST_TARGETS} if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index 580e980273a9..e24814d0ca70 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -96,8 +96,6 @@ std::string InstanceImplPosix::fileReadToEnd(const std::string& path) { throw EnvoyException(absl::StrCat("Invalid path: ", path)); } - std::ios::sync_with_stdio(false); - std::ifstream file(path); if (file.fail()) { throw EnvoyException(absl::StrCat("unable to read file: ", path)); diff --git a/test/integration/BUILD b/test/integration/BUILD index 2efec82ce37b..b08549380079 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -219,8 +219,6 @@ envoy_cc_test( ], tags = [ "fails_on_windows", - # https://github.com/envoyproxy/envoy/issues/9784 - "no_tsan", ], deps = [ ":http_integration_lib", From 23df4fcf4ec14eeb905708bf3b771af1f1371f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Augustyniak?= Date: Fri, 17 Jul 2020 09:28:55 -0700 Subject: [PATCH 663/909] http: add headers via local reply mapper (#12093) Signed-off-by: Rafal Augustyniak --- .../v3/http_connection_manager.proto | 6 +++ .../v4alpha/http_connection_manager.proto | 6 +++ .../http/http_conn_man/local_reply.rst | 7 ++- docs/root/version_history/current.rst | 1 + .../v3/http_connection_manager.proto | 6 +++ .../v4alpha/http_connection_manager.proto | 6 +++ source/common/local_reply/BUILD | 1 + source/common/local_reply/local_reply.cc | 7 +++ test/common/local_reply/local_reply_test.cc | 44 +++++++++++++++++++ .../local_reply_integration_test.cc | 14 +++++- 10 files changed, 96 insertions(+), 2 deletions(-) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 4788afef2434..87e629f4f441 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -605,6 +605,7 @@ message LocalReplyConfig { } // The configuration to filter and change local response. +// [#next-free-field: 6] message ResponseMapper { // Filter to determine if this mapper should apply. config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; @@ -619,6 +620,11 @@ message ResponseMapper { // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v3.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v3.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 705f5e5fdcc6..ac31bf1ecd62 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -607,6 +607,7 @@ message LocalReplyConfig { } // The configuration to filter and change local response. +// [#next-free-field: 6] message ResponseMapper { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; @@ -624,6 +625,11 @@ message ResponseMapper { // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v4alpha.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { diff --git a/docs/root/configuration/http/http_conn_man/local_reply.rst b/docs/root/configuration/http/http_conn_man/local_reply.rst index c2f9d59e18d6..5b87d9e3ef5c 100644 --- a/docs/root/configuration/http/http_conn_man/local_reply.rst +++ b/docs/root/configuration/http/http_conn_man/local_reply.rst @@ -15,7 +15,7 @@ Features: Local reply content modification -------------------------------- -The local response content returned by Envoy can be customized. A list of :ref:`mappers ` can be specified. Each mapper must have a :ref:`filter `. It may have following rewrite rules; a :ref:`status_code ` rule to rewrite response code, a :ref:`body ` rule to rewrite the local reply body and a :ref:`body_format_override ` to specify the response body format. Envoy checks each `mapper` according to the specified order until the first one is matched. If a `mapper` is matched, all its rewrite rules will apply. +The local response content returned by Envoy can be customized. A list of :ref:`mappers ` can be specified. Each mapper must have a :ref:`filter `. It may have following rewrite rules; a :ref:`status_code ` rule to rewrite response code, a :ref:`headers_to_add ` rule to add/override/append response HTTP headers, a :ref:`body ` rule to rewrite the local reply body and a :ref:`body_format_override ` to specify the response body format. Envoy checks each `mapper` according to the specified order until the first one is matched. If a `mapper` is matched, all its rewrite rules will apply. Example of a LocalReplyConfig @@ -29,6 +29,11 @@ Example of a LocalReplyConfig value: default_value: 400 runtime_key: key_b + headers_to_add: + - header: + key: "foo" + value: "bar" + append: false status_code: 401 body: inline_string: "not allowed" diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index eb3465f99eeb..13b638ba9ad1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -10,6 +10,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. +* http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 6d505f748222..a25759c85fc7 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -610,6 +610,7 @@ message LocalReplyConfig { } // The configuration to filter and change local response. +// [#next-free-field: 6] message ResponseMapper { // Filter to determine if this mapper should apply. config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; @@ -624,6 +625,11 @@ message ResponseMapper { // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v3.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v3.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 705f5e5fdcc6..ac31bf1ecd62 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -607,6 +607,7 @@ message LocalReplyConfig { } // The configuration to filter and change local response. +// [#next-free-field: 6] message ResponseMapper { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; @@ -624,6 +625,11 @@ message ResponseMapper { // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v4alpha.SubstitutionFormatString body_format_override = 4; + + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; } message Rds { diff --git a/source/common/local_reply/BUILD b/source/common/local_reply/BUILD index 6de00f364e0c..16995a49ea86 100644 --- a/source/common/local_reply/BUILD +++ b/source/common/local_reply/BUILD @@ -23,6 +23,7 @@ envoy_cc_library( "//source/common/formatter:substitution_format_string_lib", "//source/common/formatter:substitution_formatter_lib", "//source/common/http:header_map_lib", + "//source/common/router:header_parser_lib", "//source/common/stream_info:stream_info_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], diff --git a/source/common/local_reply/local_reply.cc b/source/common/local_reply/local_reply.cc index 9574de79c6fd..d4549dc1a135 100644 --- a/source/common/local_reply/local_reply.cc +++ b/source/common/local_reply/local_reply.cc @@ -9,6 +9,7 @@ #include "common/formatter/substitution_format_string.h" #include "common/formatter/substitution_formatter.h" #include "common/http/header_map_impl.h" +#include "common/router/header_parser.h" namespace Envoy { namespace LocalReply { @@ -43,6 +44,7 @@ class BodyFormatter { }; using BodyFormatterPtr = std::unique_ptr; +using HeaderParserPtr = std::unique_ptr; class ResponseMapper { public: @@ -63,6 +65,8 @@ class ResponseMapper { if (config.has_body_format_override()) { body_formatter_ = std::make_unique(config.body_format_override()); } + + header_parser_ = Envoy::Router::HeaderParser::configure(config.headers_to_add()); } bool matchAndRewrite(const Http::RequestHeaderMap& request_headers, @@ -79,6 +83,8 @@ class ResponseMapper { body = body_.value(); } + header_parser_->evaluateHeaders(response_headers, stream_info); + if (status_code_.has_value() && code != status_code_.value()) { code = status_code_.value(); response_headers.setStatus(std::to_string(enumToInt(code))); @@ -95,6 +101,7 @@ class ResponseMapper { const AccessLog::FilterPtr filter_; absl::optional status_code_; absl::optional body_; + HeaderParserPtr header_parser_; BodyFormatterPtr body_formatter_; }; diff --git a/test/common/local_reply/local_reply_test.cc b/test/common/local_reply/local_reply_test.cc index 2bf9149d0a94..0807a12982cd 100644 --- a/test/common/local_reply/local_reply_test.cc +++ b/test/common/local_reply/local_reply_test.cc @@ -291,5 +291,49 @@ TEST_F(LocalReplyTest, TestMapperFormat) { EXPECT_EQ(content_type_, "text/plain"); } +TEST_F(LocalReplyTest, TestHeaderAddition) { + // Default text formatter without any mappers + const std::string yaml = R"( + mappers: + - filter: + status_code_filter: + comparison: + op: GE + value: + default_value: 0 + runtime_key: key_b + headers_to_add: + - header: + key: foo-1 + value: bar1 + append: true + - header: + key: foo-2 + value: override-bar2 + append: false + - header: + key: foo-3 + value: append-bar3 + append: true +)"; + TestUtility::loadFromYaml(yaml, config_); + auto local = Factory::create(config_, context_); + + response_headers_.addCopy("foo-2", "bar2"); + response_headers_.addCopy("foo-3", "bar3"); + local->rewrite(nullptr, response_headers_, stream_info_, code_, body_, content_type_); + EXPECT_EQ(code_, TestInitCode); + EXPECT_EQ(stream_info_.response_code_, static_cast(TestInitCode)); + EXPECT_EQ(content_type_, "text/plain"); + + EXPECT_EQ(response_headers_.get_("foo-1"), "bar1"); + EXPECT_EQ(response_headers_.get_("foo-2"), "override-bar2"); + std::vector out; + Http::HeaderUtility::getAllOfHeader(response_headers_, "foo-3", out); + ASSERT_EQ(out.size(), 2); + ASSERT_EQ(out[0], "bar3"); + ASSERT_EQ(out[1], "append-bar3"); +} + } // namespace LocalReply } // namespace Envoy diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index 472aaf8220be..dacd7fcad033 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -28,6 +28,11 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { name: test-header exact_match: exact-match-value status_code: 550 + headers_to_add: + - header: + key: foo + value: bar + append: false body_format: json_format: level: TRACE @@ -74,6 +79,7 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson) { EXPECT_EQ("application/json", response->headers().ContentType()->value().getStringView()); EXPECT_EQ("150", response->headers().ContentLength()->value().getStringView()); EXPECT_EQ("550", response->headers().Status()->value().getStringView()); + EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); // Check if returned json is same as expected EXPECT_TRUE(TestUtility::jsonStringEqual(response->body(), expected_body)); } @@ -131,7 +137,7 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson4Grpc) { expected_grpc_message)); } -// Matched second filter has code and body rewrite and its format +// Matched second filter has code, headers and body rewrite and its format TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) { const std::string yaml = R"EOF( mappers: @@ -147,6 +153,11 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFi name: test-header exact_match: exact-match-value status_code: 551 + headers_to_add: + - header: + key: foo + value: bar + append: false body: inline_string: "customized body text" body_format_override: @@ -199,6 +210,7 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFi EXPECT_EQ("text/plain", response->headers().ContentType()->value().getStringView()); EXPECT_EQ("24", response->headers().ContentLength()->value().getStringView()); EXPECT_EQ("551", response->headers().Status()->value().getStringView()); + EXPECT_EQ("bar", response->headers().get(Http::LowerCaseString("foo"))->value().getStringView()); // Check if returned json is same as expected EXPECT_EQ(response->body(), expected_body); } From 7d8b9acbbd27e1d1aac966def59a9474d53c125d Mon Sep 17 00:00:00 2001 From: Michael Rebello Date: Fri, 17 Jul 2020 09:32:03 -0700 Subject: [PATCH 664/909] network: catch SIGPIPEs via SO_NOSIGPIPE on Darwin (#12039) Signed-off-by: Michael Rebello Co-authored-by: Jose Nino --- .../common/network/socket_option_factory.cc | 8 ++ source/common/network/socket_option_factory.h | 1 + source/common/network/socket_option_impl.h | 6 ++ source/common/upstream/BUILD | 1 + source/common/upstream/upstream_impl.cc | 7 ++ source/server/BUILD | 1 + source/server/listener_impl.cc | 6 ++ .../upstream/cluster_manager_impl_test.cc | 82 ++++++++++++++++--- 8 files changed, 99 insertions(+), 13 deletions(-) diff --git a/source/common/network/socket_option_factory.cc b/source/common/network/socket_option_factory.cc index 8655d5ac971a..e6ed92c56e73 100644 --- a/source/common/network/socket_option_factory.cc +++ b/source/common/network/socket_option_factory.cc @@ -61,6 +61,14 @@ std::unique_ptr SocketOptionFactory::buildSocketMarkOptions(uin return options; } +std::unique_ptr SocketOptionFactory::buildSocketNoSigpipeOptions() { + // Provide additional handling for `SIGPIPE` at the socket layer by converting it to `EPIPE`. + std::unique_ptr options = std::make_unique(); + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + return options; +} + std::unique_ptr SocketOptionFactory::buildLiteralOptions( const Protobuf::RepeatedPtrField& socket_options) { auto options = std::make_unique(); diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h index 1da9f3c1780e..72c67bfd8996 100644 --- a/source/common/network/socket_option_factory.h +++ b/source/common/network/socket_option_factory.h @@ -26,6 +26,7 @@ class SocketOptionFactory : Logger::Loggable { static std::unique_ptr buildIpFreebindOptions(); static std::unique_ptr buildIpTransparentOptions(); static std::unique_ptr buildSocketMarkOptions(uint32_t mark); + static std::unique_ptr buildSocketNoSigpipeOptions(); static std::unique_ptr buildTcpFastOpenOptions(uint32_t queue_length); static std::unique_ptr buildLiteralOptions( const Protobuf::RepeatedPtrField& socket_options); diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index 69b4989b20a4..ce6ebb926579 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -47,6 +47,12 @@ namespace Network { #define ENVOY_SOCKET_SO_MARK Network::SocketOptionName() #endif +#ifdef SO_NOSIGPIPE +#define ENVOY_SOCKET_SO_NOSIGPIPE ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_NOSIGPIPE) +#else +#define ENVOY_SOCKET_SO_NOSIGPIPE Network::SocketOptionName() +#endif + #ifdef SO_REUSEPORT #define ENVOY_SOCKET_SO_REUSEPORT ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_REUSEPORT) #else diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 50effb27094a..2d0fb940cf00 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -440,6 +440,7 @@ envoy_cc_library( "//source/common/network:address_lib", "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", + "//source/common/network:socket_option_lib", "//source/common/network:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 69ef701a5cf1..269745bd765a 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -37,6 +37,7 @@ #include "common/network/address_impl.h" #include "common/network/resolver_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" #include "common/router/config_utility.h" @@ -102,6 +103,12 @@ parseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config, const envoy::config::core::v3::BindConfig bind_config) { Network::ConnectionSocket::OptionsSharedPtr cluster_options = std::make_shared(); + // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden + // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer: + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + Network::Socket::appendOptions(cluster_options, + Network::SocketOptionFactory::buildSocketNoSigpipeOptions()); + } // Cluster IP_FREEBIND settings, when set, will override the cluster manager wide settings. if ((bind_config.freebind().value() && !config.upstream_bind_config().has_freebind()) || config.upstream_bind_config().freebind().value()) { diff --git a/source/server/BUILD b/source/server/BUILD index 16e4ddfb8d63..33266f13f3f8 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -47,6 +47,7 @@ envoy_cc_library( "//source/common/config:utility_lib", "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", + "//source/common/network:socket_option_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 2e46bca84d12..bd6c81b6c62e 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -17,6 +17,7 @@ #include "common/network/connection_balancer_impl.h" #include "common/network/resolver_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" #include "common/network/utility.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" @@ -372,6 +373,11 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, } void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { + // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden + // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer: + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + addListenSocketOptions(Network::SocketOptionFactory::buildSocketNoSigpipeOptions()); + } if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, transparent, false)) { addListenSocketOptions(Network::SocketOptionFactory::buildIpTransparentOptions()); } diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index a5fb36b1f98e..ae2c282fdd4f 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -67,7 +67,7 @@ class ClusterManagerImplTest : public testing::Test { address: socket_address: address: 127.0.0.1 - port_value: 11002 + port_value: 11002 )EOF"; const std::string merge_window_enabled = R"EOF( common_lb_config: @@ -3125,6 +3125,8 @@ TEST_F(ClusterManagerInitHelperTest, RemoveClusterWithinInitLoop) { init_helper_.startInitializingSecondaryClusters(); } +using NameVals = std::vector>; + // Validate that when options are set in the ClusterManager and/or Cluster, we see the socket option // propagated to setsockopt(). This is as close to an end-to-end test as we have for this feature, // due to the complexity of creating an integration test involving the network stack. We only test @@ -3137,8 +3139,7 @@ class SockoptsTest : public ClusterManagerImplTest { void TearDown() override { factory_.tls_.shutdownThread(); } // TODO(tschroed): Extend this to support socket state as well. - void expectSetsockopts(const std::vector>& names_vals) { - + void expectSetsockopts(const NameVals& names_vals) { NiceMock os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); NiceMock socket; @@ -3181,8 +3182,15 @@ class SockoptsTest : public ClusterManagerImplTest { } void expectSetsockoptFreebind() { - std::vector> names_vals{ - {ENVOY_SOCKET_IP_FREEBIND, 1}}; + NameVals names_vals{{ENVOY_SOCKET_IP_FREEBIND, 1}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } + expectSetsockopts(names_vals); + } + + void expectOnlyNoSigpipeOptions() { + NameVals names_vals{{std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)}}; expectSetsockopts(names_vals); } @@ -3222,7 +3230,11 @@ TEST_F(SockoptsTest, SockoptsUnset) { port_value: 11001 )EOF"; initialize(yaml); - expectNoSocketOptions(); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + expectOnlyNoSigpipeOptions(); + } else { + expectNoSocketOptions(); + } } TEST_F(SockoptsTest, FreebindClusterOnly) { @@ -3325,8 +3337,11 @@ TEST_F(SockoptsTest, SockoptsClusterOnly) { )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3354,8 +3369,11 @@ TEST_F(SockoptsTest, SockoptsClusterManagerOnly) { { level: 4, name: 5, int_value: 6, state: STATE_PREBIND }] )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3385,8 +3403,11 @@ TEST_F(SockoptsTest, SockoptsClusterOverride) { socket_options: [{ level: 7, name: 8, int_value: 9, state: STATE_PREBIND }] )EOF"; initialize(yaml); - std::vector> names_vals{ - {ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + NameVals names_vals{{ENVOY_MAKE_SOCKET_OPTION_NAME(1, 2), 3}, + {ENVOY_MAKE_SOCKET_OPTION_NAME(4, 5), 6}}; + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + names_vals.emplace_back(std::make_pair(ENVOY_SOCKET_SO_NOSIGPIPE, 1)); + } expectSetsockopts(names_vals); } @@ -3435,6 +3456,14 @@ class TcpKeepaliveTest : public ClusterManagerImplTest { options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); return connection_; })); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(), + ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(1, *static_cast(optval)); + return {0, 0}; + })); + } EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_KEEPALIVE.level(), ENVOY_SOCKET_SO_KEEPALIVE.option(), _, sizeof(int))) .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { @@ -3472,6 +3501,29 @@ class TcpKeepaliveTest : public ClusterManagerImplTest { EXPECT_EQ(connection_, conn_data.connection_.get()); } + void expectOnlyNoSigpipeOptions() { + NiceMock socket; + EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce(Invoke([this, &socket](Network::Address::InstanceConstSharedPtr, + Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, + const Network::ConnectionSocket::OptionsSharedPtr& options) + -> Network::ClientConnection* { + EXPECT_NE(nullptr, options.get()); + EXPECT_TRUE((Network::Socket::applyOptions( + options, socket, envoy::config::core::v3::SocketOption::STATE_PREBIND))); + return connection_; + })); + EXPECT_CALL(socket, setSocketOption(ENVOY_SOCKET_SO_NOSIGPIPE.level(), + ENVOY_SOCKET_SO_NOSIGPIPE.option(), _, sizeof(int))) + .WillOnce(Invoke([](int, int, const void* optval, socklen_t) -> Api::SysCallIntResult { + EXPECT_EQ(1, *static_cast(optval)); + return {0, 0}; + })); + auto conn_data = cluster_manager_->tcpConnForCluster("TcpKeepaliveCluster", nullptr); + EXPECT_EQ(connection_, conn_data.connection_.get()); + } + void expectNoSocketOptions() { EXPECT_CALL(factory_.tls_.dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce( @@ -3508,7 +3560,11 @@ TEST_F(TcpKeepaliveTest, TcpKeepaliveUnset) { port_value: 11001 )EOF"; initialize(yaml); - expectNoSocketOptions(); + if (ENVOY_SOCKET_SO_NOSIGPIPE.hasValue()) { + expectOnlyNoSigpipeOptions(); + } else { + expectNoSocketOptions(); + } } TEST_F(TcpKeepaliveTest, TcpKeepaliveCluster) { From f84dd5c40e4d225d3ab1068671098d94bd61aadc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 17 Jul 2020 12:33:50 -0400 Subject: [PATCH 665/909] csrf: fix issues with host/origin header parsing (#12133) After #11670, the CSRF filter started failing for us. This change fixes 3 issues that were uncovered after moving to gURL for parsing URLs: 1) the hostAndPort() utility method, in the CSRF filter, was returning a string view of a stack variable. 2) the Origin header always includes the scheme, so let's ensure this is illustrated in tests (which were missing this and passing due to relaxed checks). 3) the Url::initialize method expects an absolute URL, something that the CSRF filter wasn't complying with. Signed-off-by: Raul Gutierrez Segales --- docs/root/version_history/current.rst | 1 + .../filters/http/csrf/csrf_filter.cc | 40 +++--- .../http/csrf/csrf_filter_integration_test.cc | 20 +-- .../filters/http/csrf/csrf_filter_test.cc | 114 +++++++++++++++--- 4 files changed, 131 insertions(+), 44 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 13b638ba9ad1..9229315345d0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -22,6 +22,7 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* csrf: fixed issues with regards to origin and host header parsing. * fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. Removed Config or Runtime diff --git a/source/extensions/filters/http/csrf/csrf_filter.cc b/source/extensions/filters/http/csrf/csrf_filter.cc index eb6885936893..bb7db21b36eb 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.cc +++ b/source/extensions/filters/http/csrf/csrf_filter.cc @@ -37,27 +37,39 @@ bool isModifyMethod(const Http::RequestHeaderMap& headers) { method_type == method_values.Delete || method_type == method_values.Patch); } -absl::string_view hostAndPort(const absl::string_view header) { - Http::Utility::Url absolute_url; - if (!header.empty()) { - if (absolute_url.initialize(header, /*is_connect=*/false)) { - return absolute_url.hostAndPort(); +std::string hostAndPort(const absl::string_view absolute_url) { + Http::Utility::Url url; + if (!absolute_url.empty()) { + if (url.initialize(absolute_url, /*is_connect=*/false)) { + return std::string(url.hostAndPort()); } - return header; + return std::string(absolute_url); } return EMPTY_STRING; } -absl::string_view sourceOriginValue(const Http::RequestHeaderMap& headers) { - const absl::string_view origin = hostAndPort(headers.getInlineValue(origin_handle.handle())); - if (origin != EMPTY_STRING) { +// Note: per https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin, +// the Origin header must include the scheme (and hostAndPort expects +// an absolute URL). +std::string sourceOriginValue(const Http::RequestHeaderMap& headers) { + const auto origin = hostAndPort(headers.getInlineValue(origin_handle.handle())); + if (!origin.empty()) { return origin; } return hostAndPort(headers.getInlineValue(referer_handle.handle())); } -absl::string_view targetOriginValue(const Http::RequestHeaderMap& headers) { - return hostAndPort(headers.getHostValue()); +std::string targetOriginValue(const Http::RequestHeaderMap& headers) { + const auto host_value = headers.getHostValue(); + + // Don't even bother if there's not Host header. + if (host_value.empty()) { + return EMPTY_STRING; + } + + const auto absolute_url = fmt::format( + "{}://{}", headers.Scheme() != nullptr ? headers.getSchemeValue() : "http", host_value); + return hostAndPort(absolute_url); } static CsrfStats generateStats(const std::string& prefix, Stats::Scope& scope) { @@ -91,8 +103,8 @@ Http::FilterHeadersStatus CsrfFilter::decodeHeaders(Http::RequestHeaderMap& head } bool is_valid = true; - const absl::string_view source_origin = sourceOriginValue(headers); - if (source_origin == EMPTY_STRING) { + const auto source_origin = sourceOriginValue(headers); + if (source_origin.empty()) { is_valid = false; config_->stats().missing_source_origin_.inc(); } @@ -128,7 +140,7 @@ void CsrfFilter::determinePolicy() { } bool CsrfFilter::isValid(const absl::string_view source_origin, Http::RequestHeaderMap& headers) { - const absl::string_view target_origin = targetOriginValue(headers); + const auto target_origin = targetOriginValue(headers); if (source_origin == target_origin) { return true; } diff --git a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc index 91b43cad095c..6500bf77b61a 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc @@ -84,7 +84,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "localhost"}, + {"origin", "http://localhost"}, {"host", "localhost"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); @@ -98,7 +98,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); @@ -112,7 +112,7 @@ TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); @@ -126,7 +126,7 @@ TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); @@ -140,7 +140,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { {":method", "POST"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); @@ -154,7 +154,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { {":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); @@ -168,7 +168,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { {":method", "PATCH"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "test-origin"}, }}; const auto& response = sendRequest(headers); @@ -181,7 +181,7 @@ TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { Http::TestRequestHeaderMapImpl headers = {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, - {"referer", "test-origin"}, + {"referer", "http://test-origin"}, {"host", "test-origin"}}; const auto& response = sendRequestAndWaitForResponse(headers); EXPECT_TRUE(response->complete()); @@ -203,7 +203,7 @@ TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "localhost"}, }}; const auto& response = sendRequestAndWaitForResponse(headers); @@ -217,7 +217,7 @@ TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { {":method", "PUT"}, {":path", "/"}, {":scheme", "http"}, - {"origin", "cross-origin"}, + {"origin", "http://cross-origin"}, {"host", "localhost"}, }}; const auto& response = sendRequest(headers); diff --git a/test/extensions/filters/http/csrf/csrf_filter_test.cc b/test/extensions/filters/http/csrf/csrf_filter_test.cc index 634a01401ea9..dbac2d629e2e 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_test.cc @@ -124,7 +124,8 @@ TEST_F(CsrfFilterTest, RequestWithoutOrigin) { } TEST_F(CsrfFilterTest, RequestWithoutDestination) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}}; EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers, false)); @@ -138,7 +139,31 @@ TEST_F(CsrfFilterTest, RequestWithoutDestination) { TEST_F(CsrfFilterTest, RequestWithInvalidOrigin) { Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {":authority", "localhost"}}; + {":method", "PUT"}, {"origin", "http://cross-origin"}, {":authority", "localhost"}}; + + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "403"}, + {"content-length", "14"}, + {"content-type", "text/plain"}, + }; + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(1U, config_->stats().request_invalid_.value()); + EXPECT_EQ(0U, config_->stats().request_valid_.value()); + EXPECT_EQ("csrf_origin_mismatch", decoder_callbacks_.details_); +} + +TEST_F(CsrfFilterTest, RequestWithInvalidOriginDifferentNonStandardPorts) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost:90"}, + {":authority", "localhost:91"}, + {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers{ {":status", "403"}, @@ -159,8 +184,42 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOrigin) { } TEST_F(CsrfFilterTest, RequestWithValidOrigin) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(0U, config_->stats().request_invalid_.value()); + EXPECT_EQ(1U, config_->stats().request_valid_.value()); +} + +TEST_F(CsrfFilterTest, RequestWithValidOriginNonStandardPort) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost:88"}, + {"host", "localhost:88"}, + {":scheme", "http"}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(request_trailers_)); + + EXPECT_EQ(0U, config_->stats().missing_source_origin_.value()); + EXPECT_EQ(0U, config_->stats().request_invalid_.value()); + EXPECT_EQ(1U, config_->stats().request_valid_.value()); +} + +// This works because gURL drops the port for hostAndPort() when they are standard +// ports (e.g.: 80 & 443). +TEST_F(CsrfFilterTest, RequestWithValidOriginHttpVsHttps) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "https://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -173,7 +232,7 @@ TEST_F(CsrfFilterTest, RequestWithValidOrigin) { TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowDisabled) { Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + {":method", "PUT"}, {"origin", "http://cross-origin"}, {"host", "localhost"}}; setFilterEnabled(false); @@ -188,8 +247,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowDisabled) { } TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setFilterEnabled(false); setShadowEnabled(true); @@ -204,8 +265,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfDisabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfDisabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; setFilterEnabled(false); setShadowEnabled(true); @@ -220,8 +283,10 @@ TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfDisabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfEnabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setShadowEnabled(true); @@ -243,8 +308,10 @@ TEST_F(CsrfFilterTest, RequestWithInvalidOriginCsrfEnabledShadowEnabled) { } TEST_F(CsrfFilterTest, RequestWithValidOriginCsrfEnabledShadowEnabled) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "localhost"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://localhost"}, + {"host", "localhost"}, + {":scheme", "http"}}; setShadowEnabled(true); @@ -295,8 +362,10 @@ TEST_F(CsrfFilterTest, EmptyRouteEntry) { } TEST_F(CsrfFilterTest, NoCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{ - {":method", "PUT"}, {"origin", "cross-origin"}, {"host", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://cross-origin"}, + {"host", "localhost"}, + {":scheme", "http"}}; setRoutePolicy(nullptr); setVirtualHostPolicy(nullptr); @@ -311,7 +380,8 @@ TEST_F(CsrfFilterTest, NoCsrfEntry) { } TEST_F(CsrfFilterTest, NoRouteCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "POST"}, + {"origin", "http://localhost"}}; setRoutePolicy(nullptr); @@ -326,7 +396,8 @@ TEST_F(CsrfFilterTest, NoRouteCsrfEntry) { } TEST_F(CsrfFilterTest, NoVHostCsrfEntry) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "DELETE"}, {"origin", "localhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "DELETE"}, + {"origin", "http://localhost"}}; setVirtualHostPolicy(nullptr); @@ -341,7 +412,8 @@ TEST_F(CsrfFilterTest, NoVHostCsrfEntry) { } TEST_F(CsrfFilterTest, RequestFromAdditionalExactOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "additionalhost"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://additionalhost"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -353,7 +425,8 @@ TEST_F(CsrfFilterTest, RequestFromAdditionalExactOrigin) { } TEST_F(CsrfFilterTest, RequestFromAdditionalRegexOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "www-1.allow.com"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://www-1.allow.com"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data_, false)); @@ -365,7 +438,8 @@ TEST_F(CsrfFilterTest, RequestFromAdditionalRegexOrigin) { } TEST_F(CsrfFilterTest, RequestFromInvalidAdditionalRegexOrigin) { - Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, {"origin", "www.allow.com"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "PUT"}, + {"origin", "http://www.allow.com"}}; EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers, false)); From bfab9edadb1937bc1eac3e50f978175b3ddcdebc Mon Sep 17 00:00:00 2001 From: Alex Konradi Date: Fri, 17 Jul 2020 12:36:34 -0400 Subject: [PATCH 666/909] http: use std::function for headermap iteration (#12103) The existing cast-to-void*-then-back method for passing in a context is type-unsafe and can make for hard-to-locate errors. By using a std::function instead of a function pointer, the caller can get compilation error instead of runtime errors, and doesn't have to do any sort of bundling dance with std::pair or a custom struct to pass multiple context items in. Signed-off-by: Alex Konradi --- include/envoy/http/header_map.h | 11 +- .../common/grpc/google_async_client_impl.cc | 13 +- source/common/http/header_list_view.cc | 14 +- source/common/http/header_map_impl.cc | 60 +++---- source/common/http/header_map_impl.h | 12 +- source/common/http/header_utility.cc | 37 ++-- source/common/http/http1/codec_impl.cc | 44 +++-- source/common/http/http2/codec_impl.cc | 11 +- source/common/http/utility.cc | 73 ++++---- source/extensions/common/aws/utility.cc | 59 +++---- .../common/ext_authz/check_request_utils.cc | 21 +-- .../common/ext_authz/ext_authz_http_impl.cc | 41 +++-- .../http/aws_lambda/aws_lambda_filter.cc | 33 ++-- .../filters/http/grpc_web/grpc_web_filter.cc | 17 +- .../extensions/filters/http/lua/lua_filter.cc | 20 +-- .../extensions/filters/http/lua/wrappers.cc | 11 +- .../filters/http/tap/tap_config_impl.cc | 45 +++-- .../thrift_proxy/header_transport_impl.cc | 13 +- .../thrift_proxy/twitter_protocol_impl.cc | 57 +++---- .../grpc_credentials/aws_iam/config.cc | 21 +-- .../quic_listeners/quiche/envoy_quic_utils.cc | 13 +- .../common/ot/opentracing_driver_impl.cc | 27 +-- test/common/http/header_map_impl_fuzz_test.cc | 24 ++- .../common/http/header_map_impl_speed_test.cc | 28 +-- test/common/http/header_map_impl_test.cc | 160 +++++------------- test/common/http/header_utility_test.cc | 13 +- test/common/router/header_formatter_test.cc | 23 ++- test/common/router/router_test.cc | 15 +- .../aws_lambda_filter_integration_test.cc | 24 +-- .../http/aws_lambda/aws_lambda_filter_test.cc | 32 ++-- .../ext_authz/ext_authz_integration_test.cc | 20 +-- .../grpc_json_transcoder_integration_test.cc | 6 +- .../ratelimit/ratelimit_integration_test.cc | 44 ++--- .../aws_iam/aws_iam_grpc_credentials_test.cc | 30 +++- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 7 +- test/fuzz/utility.h | 14 +- test/integration/http_integration.cc | 18 +- test/mocks/http/mocks.h | 43 +++-- test/test_common/printers.cc | 13 +- test/test_common/utility.cc | 32 ++-- test/test_common/utility.h | 8 +- 41 files changed, 499 insertions(+), 708 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 82a79d4fa6e5..90466744ce42 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -519,24 +519,21 @@ class HeaderMap { /** * Callback when calling iterate() over a const header map. * @param header supplies the header entry. - * @param context supplies the context passed to iterate(). - * @return Iterate::Continue to continue iteration. + * @return Iterate::Continue to continue iteration, or Iterate::Break to stop; */ - using ConstIterateCb = Iterate (*)(const HeaderEntry&, void*); + using ConstIterateCb = std::function; /** * Iterate over a constant header map. * @param cb supplies the iteration callback. - * @param context supplies the context that will be passed to the callback. */ - virtual void iterate(ConstIterateCb cb, void* context) const PURE; + virtual void iterate(ConstIterateCb cb) const PURE; /** * Iterate over a constant header map in reverse order. * @param cb supplies the iteration callback. - * @param context supplies the context that will be passed to the callback. */ - virtual void iterateReverse(ConstIterateCb cb, void* context) const PURE; + virtual void iterateReverse(ConstIterateCb cb) const PURE; /** * Clears the headers in the map. diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index d22903da723e..37f5e858a495 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -173,14 +173,11 @@ void GoogleAsyncStreamImpl::initialize(bool /*buffer_body_for_retry*/) { // copy headers here. auto initial_metadata = Http::RequestHeaderMapImpl::create(); callbacks_.onCreateInitialMetadata(*initial_metadata); - initial_metadata->iterate( - [](const Http::HeaderEntry& header, void* ctxt) { - auto* client_context = static_cast(ctxt); - client_context->AddMetadata(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; - }, - &ctxt_); + initial_metadata->iterate([this](const Http::HeaderEntry& header) { + ctxt_.AddMetadata(std::string(header.key().getStringView()), + std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); // Invoke stub call. rw_ = parent_.stub_->PrepareCall(&ctxt_, "/" + service_full_name_ + "/" + method_name_, &parent_.tls_.completionQueue()); diff --git a/source/common/http/header_list_view.cc b/source/common/http/header_list_view.cc index a29bc84bf86f..adfb3f0657fa 100644 --- a/source/common/http/header_list_view.cc +++ b/source/common/http/header_list_view.cc @@ -4,15 +4,11 @@ namespace Envoy { namespace Http { HeaderListView::HeaderListView(const HeaderMap& header_map) { - header_map.iterate( - [](const Http::HeaderEntry& header, void* context) -> HeaderMap::Iterate { - auto* context_ptr = static_cast(context); - context_ptr->keys_.emplace_back(std::reference_wrapper(header.key())); - context_ptr->values_.emplace_back( - std::reference_wrapper(header.value())); - return HeaderMap::Iterate::Continue; - }, - this); + header_map.iterate([this](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + keys_.emplace_back(std::reference_wrapper(header.key())); + values_.emplace_back(std::reference_wrapper(header.value())); + return HeaderMap::Iterate::Continue; + }); } } // namespace Http diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 8803997b706d..35572390820f 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -264,28 +264,27 @@ void HeaderMapImpl::subtractSize(uint64_t size) { } void HeaderMapImpl::copyFrom(HeaderMap& lhs, const HeaderMap& header_map) { - header_map.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - // TODO(mattklein123) PERF: Avoid copying here if not necessary. - HeaderString key_string; - key_string.setCopy(header.key().getStringView()); - HeaderString value_string; - value_string.setCopy(header.value().getStringView()); - - static_cast(context)->addViaMove(std::move(key_string), - std::move(value_string)); - return HeaderMap::Iterate::Continue; - }, - &lhs); + header_map.iterate([&lhs](const HeaderEntry& header) -> HeaderMap::Iterate { + // TODO(mattklein123) PERF: Avoid copying here if not necessary. + HeaderString key_string; + key_string.setCopy(header.key().getStringView()); + HeaderString value_string; + value_string.setCopy(header.value().getStringView()); + + lhs.addViaMove(std::move(key_string), std::move(value_string)); + return HeaderMap::Iterate::Continue; + }); } namespace { // This is currently only used in tests and is not optimized for performance. -HeaderMap::Iterate collectAllHeaders(const HeaderEntry& header, void* headers) { - static_cast>*>(headers)->push_back( - std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; +HeaderMap::ConstIterateCb +collectAllHeaders(std::vector>* dest) { + return [dest](const HeaderEntry& header) -> HeaderMap::Iterate { + dest->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; }; } // namespace @@ -298,7 +297,7 @@ bool HeaderMapImpl::operator==(const HeaderMap& rhs) const { std::vector> rhs_headers; rhs_headers.reserve(rhs.size()); - rhs.iterate(collectAllHeaders, &rhs_headers); + rhs.iterate(collectAllHeaders(&rhs_headers)); auto i = headers_.begin(); auto j = rhs_headers.begin(); @@ -462,17 +461,17 @@ HeaderEntry* HeaderMapImpl::getExisting(const LowerCaseString& key) { return nullptr; } -void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb) const { for (const HeaderEntryImpl& header : headers_) { - if (cb(header, context) == HeaderMap::Iterate::Break) { + if (cb(header) == HeaderMap::Iterate::Break) { break; } } } -void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const { +void HeaderMapImpl::iterateReverse(HeaderMap::ConstIterateCb cb) const { for (auto it = headers_.rbegin(); it != headers_.rend(); it++) { - if (cb(*it, context) == HeaderMap::Iterate::Break) { + if (cb(*it) == HeaderMap::Iterate::Break) { break; } } @@ -527,17 +526,12 @@ size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { } void HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const { - using IterateData = std::pair; - const char* spaces = spacesForLevel(indent_level); - IterateData iterate_data = std::make_pair(&os, spaces); - iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - auto* data = static_cast(context); - *data->first << data->second << "'" << header.key().getStringView() << "', '" - << header.value().getStringView() << "'\n"; - return HeaderMap::Iterate::Continue; - }, - &iterate_data); + iterate([&os, + spaces = spacesForLevel(indent_level)](const HeaderEntry& header) -> HeaderMap::Iterate { + os << spaces << "'" << header.key().getStringView() << "', '" << header.value().getStringView() + << "'\n"; + return HeaderMap::Iterate::Continue; + }); } HeaderMapImpl::HeaderEntryImpl& HeaderMapImpl::maybeCreateInline(HeaderEntryImpl** entry, diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 693762a21aeb..f9dba6b56ab5 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -86,8 +86,8 @@ class HeaderMapImpl : NonCopyable { void setCopy(const LowerCaseString& key, absl::string_view value); uint64_t byteSize() const; const HeaderEntry* get(const LowerCaseString& key) const; - void iterate(HeaderMap::ConstIterateCb cb, void* context) const; - void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const; + void iterate(HeaderMap::ConstIterateCb cb) const; + void iterateReverse(HeaderMap::ConstIterateCb cb) const; void clear(); size_t remove(const LowerCaseString& key); size_t removePrefix(const LowerCaseString& key); @@ -298,11 +298,9 @@ template class TypedHeaderMapImpl : public HeaderMapImpl, publ const HeaderEntry* get(const LowerCaseString& key) const override { return HeaderMapImpl::get(key); } - void iterate(HeaderMap::ConstIterateCb cb, void* context) const override { - HeaderMapImpl::iterate(cb, context); - } - void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { - HeaderMapImpl::iterateReverse(cb, context); + void iterate(HeaderMap::ConstIterateCb cb) const override { HeaderMapImpl::iterate(cb); } + void iterateReverse(HeaderMap::ConstIterateCb cb) const override { + HeaderMapImpl::iterateReverse(cb); } void clear() override { HeaderMapImpl::clear(); } size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); } diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index b6119cc79729..ebe6d26e597e 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -78,18 +78,13 @@ HeaderUtility::HeaderData::HeaderData(const envoy::config::route::v3::HeaderMatc void HeaderUtility::getAllOfHeader(const HeaderMap& headers, absl::string_view key, std::vector& out) { - auto args = std::make_pair(LowerCaseString(std::string(key)), &out); - - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - auto key_ret = - static_cast*>*>(context); - if (header.key() == key_ret->first.get().c_str()) { - key_ret->second->emplace_back(header.value().getStringView()); - } - return HeaderMap::Iterate::Continue; - }, - &args); + headers.iterate([key = LowerCaseString(std::string(key)), + &out](const HeaderEntry& header) -> HeaderMap::Iterate { + if (header.key() == key.get().c_str()) { + out.emplace_back(header.value().getStringView()); + } + return HeaderMap::Iterate::Continue; + }); } bool HeaderUtility::matchHeaders(const HeaderMap& request_headers, @@ -170,16 +165,14 @@ bool HeaderUtility::isConnectResponse(const RequestHeaderMapPtr& request_headers } void HeaderUtility::addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add) { - headers_to_add.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - HeaderString k; - k.setCopy(header.key().getStringView()); - HeaderString v; - v.setCopy(header.value().getStringView()); - static_cast(context)->addViaMove(std::move(k), std::move(v)); - return HeaderMap::Iterate::Continue; - }, - &headers); + headers_to_add.iterate([&headers](const HeaderEntry& header) -> HeaderMap::Iterate { + HeaderString k; + k.setCopy(header.key().getStringView()); + HeaderString v; + v.setCopy(header.value().getStringView()); + headers.addViaMove(std::move(k), std::move(v)); + return HeaderMap::Iterate::Continue; + }); } bool HeaderUtility::isEnvoyInternalRequest(const RequestHeaderMap& headers) { diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index ad47f9fe564e..eb71d3fa70c1 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -110,27 +110,24 @@ void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& head void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, bool end_stream) { bool saw_content_length = false; - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - absl::string_view key_to_use = header.key().getStringView(); - uint32_t key_size_to_use = header.key().size(); - // Translate :authority -> host so that upper layers do not need to deal with this. - if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { - key_to_use = absl::string_view(Headers::get().HostLegacy.get()); - key_size_to_use = Headers::get().HostLegacy.get().size(); - } + headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + absl::string_view key_to_use = header.key().getStringView(); + uint32_t key_size_to_use = header.key().size(); + // Translate :authority -> host so that upper layers do not need to deal with this. + if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); + key_size_to_use = Headers::get().HostLegacy.get().size(); + } - // Skip all headers starting with ':' that make it here. - if (key_to_use[0] == ':') { - return HeaderMap::Iterate::Continue; - } + // Skip all headers starting with ':' that make it here. + if (key_to_use[0] == ':') { + return HeaderMap::Iterate::Continue; + } - static_cast(context)->encodeFormattedHeader( - key_to_use, header.value().getStringView()); + encodeFormattedHeader(key_to_use, header.value().getStringView()); - return HeaderMap::Iterate::Continue; - }, - this); + return HeaderMap::Iterate::Continue; + }); if (headers.ContentLength()) { saw_content_length = true; @@ -234,13 +231,10 @@ void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { // Finalize the body connection_.buffer().add(LAST_CHUNK); - trailers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - static_cast(context)->encodeFormattedHeader( - header.key().getStringView(), header.value().getStringView()); - return HeaderMap::Iterate::Continue; - }, - this); + trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + encodeFormattedHeader(header.key().getStringView(), header.value().getStringView()); + return HeaderMap::Iterate::Continue; + }); connection_.flushOutput(); connection_.buffer().add(CRLF); diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index b25b8fa4bef5..35c2ee68186e 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -128,13 +128,10 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, const HeaderMap& headers) { final_headers.reserve(headers.size()); - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - std::vector* final_headers = static_cast*>(context); - insertHeader(*final_headers, header); - return HeaderMap::Iterate::Continue; - }, - &final_headers); + headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate { + insertHeader(final_headers, header); + return HeaderMap::Iterate::Continue; + }); } void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 12006e143fa2..cac031e3b2a0 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -310,50 +310,41 @@ absl::string_view Utility::findQueryStringStart(const HeaderString& path) { std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { - struct State { - std::string key_; - std::string ret_; - }; - - State state; - state.key_ = key; - - headers.iterateReverse( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - // Find the cookie headers in the request (typically, there's only one). - if (header.key() == Http::Headers::get().Cookie.get()) { - - // Split the cookie header into individual cookies. - for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { - // Find the key part of the cookie (i.e. the name of the cookie). - size_t first_non_space = s.find_first_not_of(" "); - size_t equals_index = s.find('='); - if (equals_index == absl::string_view::npos) { - // The cookie is malformed if it does not have an `=`. Continue - // checking other cookies in this header. - continue; - } - const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); - State* state = static_cast(context); - // If the key matches, parse the value from the rest of the cookie string. - if (k == state->key_) { - absl::string_view v = s.substr(equals_index + 1, s.size() - 1); - - // Cookie values may be wrapped in double quotes. - // https://tools.ietf.org/html/rfc6265#section-4.1.1 - if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { - v = v.substr(1, v.size() - 2); - } - state->ret_ = std::string{v}; - return HeaderMap::Iterate::Break; - } + std::string ret; + + headers.iterateReverse([&key, &ret](const HeaderEntry& header) -> HeaderMap::Iterate { + // Find the cookie headers in the request (typically, there's only one). + if (header.key() == Http::Headers::get().Cookie.get()) { + + // Split the cookie header into individual cookies. + for (const auto s : StringUtil::splitToken(header.value().getStringView(), ";")) { + // Find the key part of the cookie (i.e. the name of the cookie). + size_t first_non_space = s.find_first_not_of(" "); + size_t equals_index = s.find('='); + if (equals_index == absl::string_view::npos) { + // The cookie is malformed if it does not have an `=`. Continue + // checking other cookies in this header. + continue; + } + const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); + // If the key matches, parse the value from the rest of the cookie string. + if (k == key) { + absl::string_view v = s.substr(equals_index + 1, s.size() - 1); + + // Cookie values may be wrapped in double quotes. + // https://tools.ietf.org/html/rfc6265#section-4.1.1 + if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { + v = v.substr(1, v.size() - 2); } + ret = std::string{v}; + return HeaderMap::Iterate::Break; } - return HeaderMap::Iterate::Continue; - }, - &state); + } + } + return HeaderMap::Iterate::Continue; + }); - return state.ret_; + return ret; } std::string Utility::makeSetCookieValue(const std::string& key, const std::string& value, diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index f13012860e70..794051e0d67a 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -14,37 +14,34 @@ namespace Aws { std::map Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { std::map out; - headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* map = static_cast*>(context); - // Skip empty headers - if (entry.key().empty() || entry.value().empty()) { - return Http::HeaderMap::Iterate::Continue; - } - // Pseudo-headers should not be canonicalized - if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { - return Http::HeaderMap::Iterate::Continue; - } - // Skip headers that are likely to mutate, when crossing proxies - const auto key = entry.key().getStringView(); - if (key == Http::Headers::get().ForwardedFor.get() || - key == Http::Headers::get().ForwardedProto.get() || key == "x-amzn-trace-id") { - return Http::HeaderMap::Iterate::Continue; - } - - std::string value(entry.value().getStringView()); - // Remove leading, trailing, and deduplicate repeated ascii spaces - absl::RemoveExtraAsciiWhitespace(&value); - const auto iter = map->find(std::string(entry.key().getStringView())); - // If the entry already exists, append the new value to the end - if (iter != map->end()) { - iter->second += fmt::format(",{}", value); - } else { - map->emplace(std::string(entry.key().getStringView()), value); - } - return Http::HeaderMap::Iterate::Continue; - }, - &out); + headers.iterate([&out](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + // Skip empty headers + if (entry.key().empty() || entry.value().empty()) { + return Http::HeaderMap::Iterate::Continue; + } + // Pseudo-headers should not be canonicalized + if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { + return Http::HeaderMap::Iterate::Continue; + } + // Skip headers that are likely to mutate, when crossing proxies + const auto key = entry.key().getStringView(); + if (key == Http::Headers::get().ForwardedFor.get() || + key == Http::Headers::get().ForwardedProto.get() || key == "x-amzn-trace-id") { + return Http::HeaderMap::Iterate::Continue; + } + + std::string value(entry.value().getStringView()); + // Remove leading, trailing, and deduplicate repeated ascii spaces + absl::RemoveExtraAsciiWhitespace(&value); + const auto iter = out.find(std::string(entry.key().getStringView())); + // If the entry already exists, append the new value to the end + if (iter != out.end()) { + iter->second += fmt::format(",{}", value); + } else { + out.emplace(std::string(entry.key().getStringView()), value); + } + return Http::HeaderMap::Iterate::Continue; + }); // The AWS SDK has a quirk where it removes "default ports" (80, 443) from the host headers // Additionally, we canonicalize the :authority header as "host" // TODO(lavignes): This may need to be tweaked to canonicalize :authority for HTTP/2 requests diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 7906ad3607f6..55847df1a946 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -115,18 +115,15 @@ void CheckRequestUtils::setHttpRequest( } // Fill in the headers. - auto mutable_headers = httpreq.mutable_headers(); - headers.iterate( - [](const Envoy::Http::HeaderEntry& e, void* ctx) { - // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. - if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { - auto* mutable_headers = static_cast*>(ctx); - (*mutable_headers)[std::string(e.key().getStringView())] = - std::string(e.value().getStringView()); - } - return Envoy::Http::HeaderMap::Iterate::Continue; - }, - mutable_headers); + auto* mutable_headers = httpreq.mutable_headers(); + headers.iterate([mutable_headers](const Envoy::Http::HeaderEntry& e) { + // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. + if (e.key().getStringView() != Http::Headers::get().EnvoyAuthPartialBody.get()) { + (*mutable_headers)[std::string(e.key().getStringView())] = + std::string(e.value().getStringView()); + } + return Envoy::Http::HeaderMap::Iterate::Continue; + }); // Set request body. if (max_request_bytes > 0 && decoding_buffer != nullptr) { diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 5505298fd881..fe7207bdfbd3 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -43,28 +43,25 @@ struct SuccessResponse { const MatcherSharedPtr& append_matchers, Response&& response) : headers_(headers), matchers_(matchers), append_matchers_(append_matchers), response_(std::make_unique(response)) { - headers_.iterate( - [](const Http::HeaderEntry& header, void* ctx) -> Http::HeaderMap::Iterate { - auto* context = static_cast(ctx); - // UpstreamHeaderMatcher - if (context->matchers_->matches(header.key().getStringView())) { - context->response_->headers_to_set.emplace_back( - Http::LowerCaseString{std::string(header.key().getStringView())}, - std::string(header.value().getStringView())); - } - if (context->append_matchers_->matches(header.key().getStringView())) { - // If there is an existing matching key in the current headers, the new entry will be - // appended with the same key. For example, given {"key": "value1"} headers, if there is - // a matching "key" from the authorization response headers {"key": "value2"}, the - // request to upstream server will have two entries for "key": {"key": "value1", "key": - // "value2"}. - context->response_->headers_to_add.emplace_back( - Http::LowerCaseString{std::string(header.key().getStringView())}, - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - this); + headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + // UpstreamHeaderMatcher + if (matchers_->matches(header.key().getStringView())) { + response_->headers_to_set.emplace_back( + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); + } + if (append_matchers_->matches(header.key().getStringView())) { + // If there is an existing matching key in the current headers, the new entry will be + // appended with the same key. For example, given {"key": "value1"} headers, if there is + // a matching "key" from the authorization response headers {"key": "value2"}, the + // request to upstream server will have two entries for "key": {"key": "value1", "key": + // "value2"}. + response_->headers_to_add.emplace_back( + Http::LowerCaseString{std::string(header.key().getStringView())}, + std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } const Http::HeaderMap& headers_; diff --git a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc index cd702fcba3c9..e6b93b3f90e2 100644 --- a/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc +++ b/source/extensions/filters/http/aws_lambda/aws_lambda_filter.cc @@ -275,24 +275,21 @@ void Filter::jsonizeRequest(Http::RequestHeaderMap const& headers, const Buffer: } // Wrap the headers - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) -> Http::HeaderMap::Iterate { - auto* req = static_cast(ctx); - // ignore H2 pseudo-headers - if (absl::StartsWith(entry.key().getStringView(), ":")) { - return Http::HeaderMap::Iterate::Continue; - } - std::string name = std::string(entry.key().getStringView()); - auto it = req->mutable_headers()->find(name); - if (it == req->headers().end()) { - req->mutable_headers()->insert({name, std::string(entry.value().getStringView())}); - } else { - // Coalesce headers with multiple values - it->second += fmt::format(",{}", entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &json_req); + headers.iterate([&json_req](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + // ignore H2 pseudo-headers + if (absl::StartsWith(entry.key().getStringView(), ":")) { + return Http::HeaderMap::Iterate::Continue; + } + std::string name = std::string(entry.key().getStringView()); + auto it = json_req.mutable_headers()->find(name); + if (it == json_req.headers().end()) { + json_req.mutable_headers()->insert({name, std::string(entry.value().getStringView())}); + } else { + // Coalesce headers with multiple values + it->second += fmt::format(",{}", entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); // Wrap the Query String if (headers.Path()) { diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc index 8cecc0a4003b..7595839881c4 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.cc @@ -203,16 +203,13 @@ Http::FilterTrailersStatus GrpcWebFilter::encodeTrailers(Http::ResponseTrailerMa // Trailers are expected to come all in once, and will be encoded into one single trailers frame. // Trailers in the trailers frame are separated by CRLFs. Buffer::OwnedImpl temp; - trailers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - Buffer::Instance* temp = static_cast(context); - temp->add(header.key().getStringView().data(), header.key().size()); - temp->add(":"); - temp->add(header.value().getStringView().data(), header.value().size()); - temp->add("\r\n"); - return Http::HeaderMap::Iterate::Continue; - }, - &temp); + trailers.iterate([&temp](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + temp.add(header.key().getStringView().data(), header.key().size()); + temp.add(":"); + temp.add(header.value().getStringView().data(), header.value().size()); + temp.add("\r\n"); + return Http::HeaderMap::Iterate::Continue; + }); // Clear out the trailers so they don't get added since it is now in the body trailers.clear(); diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 4f69c77bc9e1..4d09421d5e3e 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -330,17 +330,15 @@ void StreamHandleWrapper::onSuccess(const Http::AsyncClient::Request&, // We need to build a table with the headers as return param 1. The body will be return param 2. lua_newtable(coroutine_.luaState()); - response->headers().iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - lua_State* state = static_cast(context); - lua_pushlstring(state, header.key().getStringView().data(), - header.key().getStringView().length()); - lua_pushlstring(state, header.value().getStringView().data(), - header.value().getStringView().length()); - lua_settable(state, -3); - return Http::HeaderMap::Iterate::Continue; - }, - coroutine_.luaState()); + response->headers().iterate([lua_State = coroutine_.luaState()]( + const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + lua_pushlstring(lua_State, header.key().getStringView().data(), + header.key().getStringView().length()); + lua_pushlstring(lua_State, header.value().getStringView().data(), + header.value().getStringView().length()); + lua_settable(lua_State, -3); + return Http::HeaderMap::Iterate::Continue; + }); // TODO(mattklein123): Avoid double copy here. if (response->body() != nullptr) { diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index a772f3c1edfe..4a24fafaa6a6 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -11,13 +11,10 @@ namespace Lua { HeaderMapIterator::HeaderMapIterator(HeaderMapWrapper& parent) : parent_(parent) { entries_.reserve(parent_.headers_.size()); - parent_.headers_.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - HeaderMapIterator* iterator = static_cast(context); - iterator->entries_.push_back(&header); - return Http::HeaderMap::Iterate::Continue; - }, - this); + parent_.headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + entries_.push_back(&header); + return Http::HeaderMap::Iterate::Continue; + }); } int HeaderMapIterator::luaPairsIterator(lua_State* state) { diff --git a/source/extensions/filters/http/tap/tap_config_impl.cc b/source/extensions/filters/http/tap/tap_config_impl.cc index fe602fbfb6e7..dfe31d083ab2 100644 --- a/source/extensions/filters/http/tap/tap_config_impl.cc +++ b/source/extensions/filters/http/tap/tap_config_impl.cc @@ -15,13 +15,14 @@ namespace TapFilter { namespace TapCommon = Extensions::Common::Tap; namespace { -Http::HeaderMap::Iterate fillHeaderList(const Http::HeaderEntry& header, void* context) { - Protobuf::RepeatedPtrField& header_list = - *reinterpret_cast*>(context); - auto& new_header = *header_list.Add(); - new_header.set_key(std::string(header.key().getStringView())); - new_header.set_value(std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; +Http::HeaderMap::ConstIterateCb +fillHeaderList(Protobuf::RepeatedPtrField* output) { + return [output](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + auto& new_header = *output->Add(); + new_header.set_key(std::string(header.key().getStringView())); + new_header.set_value(std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }; } } // namespace @@ -35,9 +36,8 @@ HttpPerRequestTapperPtr HttpTapConfigImpl::createPerRequestTapper(uint64_t strea void HttpPerRequestTapperImpl::streamRequestHeaders() { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - request_headers_->iterate( - fillHeaderList, - trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers()); + request_headers_->iterate(fillHeaderList( + trace->mutable_http_streamed_trace_segment()->mutable_request_headers()->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } @@ -67,9 +67,9 @@ void HttpPerRequestTapperImpl::onRequestBody(const Buffer::Instance& data) { void HttpPerRequestTapperImpl::streamRequestTrailers() { if (request_trailers_ != nullptr) { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - request_trailers_->iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() - ->mutable_request_trailers() - ->mutable_headers()); + request_trailers_->iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment() + ->mutable_request_trailers() + ->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } } @@ -91,9 +91,8 @@ void HttpPerRequestTapperImpl::onRequestTrailers(const Http::RequestTrailerMap& void HttpPerRequestTapperImpl::streamResponseHeaders() { TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - response_headers_->iterate( - fillHeaderList, - trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers()); + response_headers_->iterate(fillHeaderList( + trace->mutable_http_streamed_trace_segment()->mutable_response_headers()->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } @@ -141,9 +140,9 @@ void HttpPerRequestTapperImpl::onResponseTrailers(const Http::ResponseTrailerMap } TapCommon::TraceWrapperPtr trace = makeTraceSegment(); - trailers.iterate(fillHeaderList, trace->mutable_http_streamed_trace_segment() - ->mutable_response_trailers() - ->mutable_headers()); + trailers.iterate(fillHeaderList(trace->mutable_http_streamed_trace_segment() + ->mutable_response_trailers() + ->mutable_headers())); sink_handle_->submitTrace(std::move(trace)); } } @@ -156,16 +155,16 @@ bool HttpPerRequestTapperImpl::onDestroyLog() { makeBufferedFullTraceIfNeeded(); auto& http_trace = *buffered_full_trace_->mutable_http_buffered_trace(); if (request_headers_ != nullptr) { - request_headers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_headers()); + request_headers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_headers())); } if (request_trailers_ != nullptr) { - request_trailers_->iterate(fillHeaderList, http_trace.mutable_request()->mutable_trailers()); + request_trailers_->iterate(fillHeaderList(http_trace.mutable_request()->mutable_trailers())); } if (response_headers_ != nullptr) { - response_headers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_headers()); + response_headers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_headers())); } if (response_trailers_ != nullptr) { - response_trailers_->iterate(fillHeaderList, http_trace.mutable_response()->mutable_trailers()); + response_trailers_->iterate(fillHeaderList(http_trace.mutable_response()->mutable_trailers())); } ENVOY_LOG(debug, "submitting buffered trace sink"); diff --git a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc index 26885a842de4..08903d539c49 100644 --- a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc @@ -205,14 +205,11 @@ void HeaderTransportImpl::encodeFrame(Buffer::Instance& buffer, const MessageMet // Num headers BufferHelper::writeVarIntI32(header_buffer, static_cast(headers.size())); - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - Buffer::Instance* hb = static_cast(context); - writeVarString(*hb, header.key().getStringView()); - writeVarString(*hb, header.value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &header_buffer); + headers.iterate([&header_buffer](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + writeVarString(header_buffer, header.key().getStringView()); + writeVarString(header_buffer, header.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } uint64_t header_size = header_buffer.length(); diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 37a2961f891e..068646839ed2 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -384,28 +384,24 @@ class RequestHeader { sampled_ = metadata.sampled().value(); } - metadata.headers().iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - absl::string_view key = header.key().getStringView(); - if (key.empty()) { - return Http::HeaderMap::Iterate::Continue; - } - - RequestHeader& rh = *static_cast(cb); - if (key == Headers::get().ClientId.get()) { - rh.client_id_ = ClientId(std::string(header.value().getStringView())); - } else if (key == Headers::get().Dest.get()) { - rh.dest_ = std::string(header.value().getStringView()); - } else if (key.find(":d:") == 0 && key.size() > 3) { - rh.delegations_.emplace_back(std::string(key.substr(3)), - std::string(header.value().getStringView())); - } else if (key[0] != ':') { - rh.contexts_.emplace_back(std::string(key), - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - this); + metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + if (key.empty()) { + return Http::HeaderMap::Iterate::Continue; + } + + if (key == Headers::get().ClientId.get()) { + client_id_ = ClientId(std::string(header.value().getStringView())); + } else if (key == Headers::get().Dest.get()) { + dest_ = std::string(header.value().getStringView()); + } else if (key.find(":d:") == 0 && key.size() > 3) { + delegations_.emplace_back(std::string(key.substr(3)), + std::string(header.value().getStringView())); + } else if (key[0] != ':') { + contexts_.emplace_back(std::string(key), std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } void write(Buffer::Instance& buffer) { @@ -575,16 +571,13 @@ class ResponseHeader { } } ResponseHeader(const MessageMetadata& metadata) : spans_(metadata.spans()) { - metadata.headers().iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - absl::string_view key = header.key().getStringView(); - if (!key.empty() && key[0] != ':') { - static_cast*>(cb)->emplace_back( - std::string(key), std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - &contexts_); + metadata.headers().iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + if (!key.empty() && key[0] != ':') { + contexts_.emplace_back(std::string(key), std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); } void write(Buffer::Instance& buffer) { diff --git a/source/extensions/grpc_credentials/aws_iam/config.cc b/source/extensions/grpc_credentials/aws_iam/config.cc index 5f60eab1464f..fed537a7c674 100644 --- a/source/extensions/grpc_credentials/aws_iam/config.cc +++ b/source/extensions/grpc_credentials/aws_iam/config.cc @@ -129,18 +129,15 @@ AwsIamHeaderAuthenticator::buildMessageToSign(absl::string_view service_url, void AwsIamHeaderAuthenticator::signedHeadersToMetadata( const Http::HeaderMap& headers, std::multimap& metadata) { - headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* md = static_cast*>(context); - const auto& key = entry.key().getStringView(); - // Skip pseudo-headers - if (key.empty() || key[0] == ':') { - return Http::HeaderMap::Iterate::Continue; - } - md->emplace(key, entry.value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &metadata); + headers.iterate([&metadata](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + const auto& key = entry.key().getStringView(); + // Skip pseudo-headers + if (key.empty() || key[0] == ':') { + return Http::HeaderMap::Iterate::Continue; + } + metadata.emplace(key, entry.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } REGISTER_FACTORY(AwsIamGrpcCredentialsFactory, Grpc::GoogleGrpcCredentialsFactory); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index 611cf7b7b721..aefb6a860e5e 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -47,14 +47,11 @@ quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( spdy::SpdyHeaderBlock envoyHeadersToSpdyHeaderBlock(const Http::HeaderMap& headers) { spdy::SpdyHeaderBlock header_block; - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - auto spdy_headers = static_cast(context); - // The key-value pairs are copied. - spdy_headers->insert({header.key().getStringView(), header.value().getStringView()}); - return Http::HeaderMap::Iterate::Continue; - }, - &header_block); + headers.iterate([&header_block](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + // The key-value pairs are copied. + header_block.insert({header.key().getStringView(), header.value().getStringView()}); + return Http::HeaderMap::Iterate::Continue; + }); return header_block; } diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 710d94615d59..080742af2ddd 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -58,25 +58,26 @@ class OpenTracingHTTPHeadersReader : public opentracing::HTTPHeadersReader { } opentracing::expected ForeachKey(OpenTracingCb f) const override { - request_headers_.iterate(headerMapCallback, static_cast(&f)); + request_headers_.iterate(headerMapCallback(f)); return {}; } private: const Http::RequestHeaderMap& request_headers_; - static Http::HeaderMap::Iterate headerMapCallback(const Http::HeaderEntry& header, - void* context) { - auto* callback = static_cast(context); - opentracing::string_view key{header.key().getStringView().data(), - header.key().getStringView().length()}; - opentracing::string_view value{header.value().getStringView().data(), - header.value().getStringView().length()}; - if ((*callback)(key, value)) { - return Http::HeaderMap::Iterate::Continue; - } else { - return Http::HeaderMap::Iterate::Break; - } + static Http::HeaderMap::ConstIterateCb headerMapCallback(OpenTracingCb callback) { + return [callback = + std::move(callback)](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + opentracing::string_view key{header.key().getStringView().data(), + header.key().getStringView().length()}; + opentracing::string_view value{header.value().getStringView().data(), + header.value().getStringView().length()}; + if (callback(key, value)) { + return Http::HeaderMap::Iterate::Continue; + } else { + return Http::HeaderMap::Iterate::Break; + } + }; } }; } // namespace diff --git a/test/common/http/header_map_impl_fuzz_test.cc b/test/common/http/header_map_impl_fuzz_test.cc index 7c3d8e6a4296..97e327ce57f1 100644 --- a/test/common/http/header_map_impl_fuzz_test.cc +++ b/test/common/http/header_map_impl_fuzz_test.cc @@ -172,20 +172,16 @@ DEFINE_PROTO_FUZZER(const test::common::http::HeaderMapImplFuzzTestCase& input) // Exercise some read-only accessors. header_map->size(); header_map->byteSize(); - header_map->iterate( - [](const Http::HeaderEntry& header, void * /*context*/) -> Http::HeaderMap::Iterate { - header.key(); - header.value(); - return Http::HeaderMap::Iterate::Continue; - }, - nullptr); - header_map->iterateReverse( - [](const Http::HeaderEntry& header, void * /*context*/) -> Http::HeaderMap::Iterate { - header.key(); - header.value(); - return Http::HeaderMap::Iterate::Continue; - }, - nullptr); + header_map->iterate([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + header.key(); + header.value(); + return Http::HeaderMap::Iterate::Continue; + }); + header_map->iterateReverse([](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + header.key(); + header.value(); + return Http::HeaderMap::Iterate::Continue; + }); } } diff --git a/test/common/http/header_map_impl_speed_test.cc b/test/common/http/header_map_impl_speed_test.cc index 6f45f39825cd..32020e841c90 100644 --- a/test/common/http/header_map_impl_speed_test.cc +++ b/test/common/http/header_map_impl_speed_test.cc @@ -20,7 +20,7 @@ static void addDummyHeaders(HeaderMap& headers, size_t num_headers) { static void headerMapImplCreate(benchmark::State& state) { // Make sure first time construction is not counted. Http::ResponseHeaderMapImpl::create(); - for (auto _ : state) { + for (auto _ : state) { // NOLINT auto headers = Http::ResponseHeaderMapImpl::create(); benchmark::DoNotOptimize(headers->size()); } @@ -39,7 +39,7 @@ static void headerMapImplSetReference(benchmark::State& state) { const std::string value("01234567890123456789"); auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); - for (auto _ : state) { + for (auto _ : state) { // NOLINT headers->setReference(key, value); } benchmark::DoNotOptimize(headers->size()); @@ -61,7 +61,7 @@ static void headerMapImplGet(benchmark::State& state) { addDummyHeaders(*headers, state.range(0)); headers->setReference(key, value); size_t successes = 0; - for (auto _ : state) { + for (auto _ : state) { // NOLINT successes += (headers->get(key) != nullptr); } benchmark::DoNotOptimize(successes); @@ -78,7 +78,7 @@ static void headerMapImplGetInline(benchmark::State& state) { addDummyHeaders(*headers, state.range(0)); headers->setReferenceConnection(value); size_t size = 0; - for (auto _ : state) { + for (auto _ : state) { // NOLINT size += headers->Connection()->value().size(); } benchmark::DoNotOptimize(size); @@ -93,7 +93,7 @@ static void headerMapImplSetInlineMacro(benchmark::State& state) { const std::string value("01234567890123456789"); auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); - for (auto _ : state) { + for (auto _ : state) { // NOLINT headers->setReferenceConnection(value); } benchmark::DoNotOptimize(headers->size()); @@ -108,7 +108,7 @@ static void headerMapImplSetInlineInteger(benchmark::State& state) { uint64_t value = 12345; auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); - for (auto _ : state) { + for (auto _ : state) { // NOLINT headers->setConnection(value); } benchmark::DoNotOptimize(headers->size()); @@ -120,7 +120,7 @@ static void headerMapImplGetByteSize(benchmark::State& state) { auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); uint64_t size = 0; - for (auto _ : state) { + for (auto _ : state) { // NOLINT size += headers->byteSize(); } benchmark::DoNotOptimize(size); @@ -132,12 +132,12 @@ static void headerMapImplIterate(benchmark::State& state) { auto headers = Http::ResponseHeaderMapImpl::create(); size_t num_callbacks = 0; addDummyHeaders(*headers, state.range(0)); - auto counting_callback = [](const HeaderEntry&, void* context) -> HeaderMap::Iterate { - (*static_cast(context))++; + auto counting_callback = [&num_callbacks](const HeaderEntry&) -> HeaderMap::Iterate { + num_callbacks++; return HeaderMap::Iterate::Continue; }; - for (auto _ : state) { - headers->iterate(counting_callback, &num_callbacks); + for (auto _ : state) { // NOLINT + headers->iterate(counting_callback); } benchmark::DoNotOptimize(num_callbacks); } @@ -153,7 +153,7 @@ static void headerMapImplRemove(benchmark::State& state) { const std::string value("01234567890123456789"); auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); - for (auto _ : state) { + for (auto _ : state) { // NOLINT headers->addReference(key, value); headers->remove(key); } @@ -172,7 +172,7 @@ static void headerMapImplRemoveInline(benchmark::State& state) { const std::string value("01234567890123456789"); auto headers = Http::ResponseHeaderMapImpl::create(); addDummyHeaders(*headers, state.range(0)); - for (auto _ : state) { + for (auto _ : state) { // NOLINT headers->addReference(key, value); headers->remove(key); } @@ -197,7 +197,7 @@ static void headerMapImplPopulate(benchmark::State& state) { {LowerCaseString("set-cookie"), "_cookie1=12345678; path = /; secure"}, {LowerCaseString("set-cookie"), "_cookie2=12345678; path = /; secure"}, }; - for (auto _ : state) { + for (auto _ : state) { // NOLINT auto headers = Http::ResponseHeaderMapImpl::create(); for (const auto& key_value : headers_to_add) { headers->addReference(key_value.first, key_value.second); diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 6e0eac4b19dc..6f5552156b40 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -561,6 +561,17 @@ TEST(HeaderMapImplTest, RemoveRegex) { EXPECT_EQ(nullptr, headers.ContentLength()); } +class HeaderAndValueCb + : public testing::MockFunction { +public: + HeaderMap::ConstIterateCb asIterateCb() { + return [this](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; + } +}; + TEST(HeaderMapImplTest, SetRemovesAllValues) { TestRequestHeaderMapImpl headers; @@ -577,10 +588,8 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { headers.addReference(key1, ref_value3); headers.addReference(key1, ref_value4); - using MockCb = testing::MockFunction; - { - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "world")); @@ -588,31 +597,19 @@ TEST(HeaderMapImplTest, SetRemovesAllValues) { EXPECT_CALL(cb, Call("hello", "globe")); EXPECT_CALL(cb, Call("hello", "earth")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } headers.setReference(key1, ref_value5); // set moves key to end { - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("olleh", "planet")); EXPECT_CALL(cb, Call("hello", "blue marble")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } } @@ -714,19 +711,12 @@ TEST(HeaderMapImplTest, SetCopy) { headers.setCopy(foo, "override-monde"); EXPECT_EQ(headers.size(), 2); - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "override-monde")); EXPECT_CALL(cb, Call("hello", "monde2")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Test setting an empty string and then overriding. EXPECT_EQ(2UL, headers.remove(foo)); @@ -847,20 +837,13 @@ TEST(HeaderMapImplTest, Iterate) { LowerCaseString foo_key("foo"); headers.setReferenceKey(foo_key, "bar"); // set moves key to end - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("hello", "world")); EXPECT_CALL(cb, Call("world", "hello")); EXPECT_CALL(cb, Call("foo", "bar")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } TEST(HeaderMapImplTest, IterateReverse) { @@ -870,24 +853,20 @@ TEST(HeaderMapImplTest, IterateReverse) { LowerCaseString world_key("world"); headers.setReferenceKey(world_key, "hello"); - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; InSequence seq; EXPECT_CALL(cb, Call("world", "hello")); EXPECT_CALL(cb, Call("foo", "bar")); // no "hello" - headers.iterateReverse( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - if (header.key().getStringView() != "foo") { - return HeaderMap::Iterate::Continue; - } else { - return HeaderMap::Iterate::Break; - } - }, - &cb); + headers.iterateReverse([&cb](const Http::HeaderEntry& header) -> HeaderMap::Iterate { + cb.Call(std::string(header.key().getStringView()), std::string(header.value().getStringView())); + if (header.key().getStringView() != "foo") { + return HeaderMap::Iterate::Continue; + } else { + return HeaderMap::Iterate::Break; + } + }); } TEST(HeaderMapImplTest, Get) { @@ -1002,8 +981,7 @@ TEST(TestHeaderMapImplDeathTest, TestHeaderLengthChecks) { } TEST(HeaderMapImplTest, PseudoHeaderOrder) { - using MockCb = testing::MockFunction; - MockCb cb; + HeaderAndValueCb cb; { LowerCaseString foo("hello"); @@ -1029,13 +1007,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("hello", "world")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removal of the header before which pseudo-headers are inserted EXPECT_EQ(1UL, headers.remove(foo)); @@ -1045,13 +1017,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Next pseudo-header goes after other pseudo-headers, but before normal headers headers.setReferenceKey(Headers::get().Path, "/test"); @@ -1062,13 +1028,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":path", "/test")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing the last normal header EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType)); @@ -1078,13 +1038,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":method", "PUT")); EXPECT_CALL(cb, Call(":path", "/test")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Adding a new pseudo-header after removing the last normal header headers.setReferenceKey(Headers::get().Host, "host"); @@ -1095,13 +1049,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":path", "/test")); EXPECT_CALL(cb, Call(":authority", "host")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Adding the first normal header headers.setReferenceKey(Headers::get().ContentType, "text/html"); @@ -1113,13 +1061,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":authority", "host")); EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing all pseudo-headers EXPECT_EQ(1UL, headers.remove(Headers::get().Path)); @@ -1130,13 +1072,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/html")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); // Removing all headers EXPECT_EQ(1UL, headers.remove(Headers::get().ContentType)); @@ -1150,13 +1086,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call(":status", "200")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } // Starting with a normal header @@ -1174,13 +1104,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } // Starting with a pseudo-header @@ -1198,13 +1122,7 @@ TEST(HeaderMapImplTest, PseudoHeaderOrder) { EXPECT_CALL(cb, Call("content-type", "text/plain")); EXPECT_CALL(cb, Call("hello", "world")); - headers.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> HeaderMap::Iterate { - static_cast(cb_v)->Call(std::string(header.key().getStringView()), - std::string(header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate(cb.asIterateCb()); } } diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index e28ec4fd17b9..6f557b1f017d 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -547,14 +547,11 @@ TEST(HeaderAddTest, HeaderAdd) { HeaderUtility::addHeaders(headers, headers_to_add); - headers_to_add.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - TestRequestHeaderMapImpl* headers = static_cast(context); - Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value().getStringView(), headers->get(lower_key)->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - &headers); + headers_to_add.iterate([&headers](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value().getStringView(), headers.get(lower_key)->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); } TEST(HeaderIsValidTest, HeaderNameContainsUnderscore) { diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index f0e7ecd10f89..99be7eb35476 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -1148,19 +1148,16 @@ match: { prefix: "/new_endpoint" } using CountMap = absl::flat_hash_map; CountMap counts; - header_map.iterate( - [](const Http::HeaderEntry& header, void* cb_v) -> Http::HeaderMap::Iterate { - CountMap* m = static_cast(cb_v); - absl::string_view key = header.key().getStringView(); - CountMap::iterator i = m->find(key); - if (i == m->end()) { - m->insert({std::string(key), 1}); - } else { - i->second++; - } - return Http::HeaderMap::Iterate::Continue; - }, - &counts); + header_map.iterate([&counts](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + absl::string_view key = header.key().getStringView(); + CountMap::iterator i = counts.find(key); + if (i == counts.end()) { + counts.insert({std::string(key), 1}); + } else { + i->second++; + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(1, counts["static-header"]); EXPECT_EQ(1, counts["x-client-ip"]); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 919df0a67545..ddc452e108bf 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -768,15 +768,12 @@ TEST_F(RouterTest, AddMultipleCookies) { EXPECT_CALL(cb, Call("foo=\"" + foo_c + "\"; Max-Age=1337; Path=/path; HttpOnly")); EXPECT_CALL(cb, Call("choco=\"" + choco_c + "\"; Max-Age=15; HttpOnly")); - headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - if (header.key() == Http::Headers::get().SetCookie.get()) { - static_cast*>(context)->Call( - std::string(header.value().getStringView())); - } - return Http::HeaderMap::Iterate::Continue; - }, - &cb); + headers.iterate([&cb](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + if (header.key() == Http::Headers::get().SetCookie.get()) { + cb.Call(std::string(header.value().getStringView())); + } + return Http::HeaderMap::Iterate::Continue; + }); })); expectResponseTimerCreate(); diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc index 0b2da9a6a379..c752f4ca651b 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc @@ -123,30 +123,22 @@ class AwsLambdaFilterIntegrationTest : public testing::TestWithParam(ctx); + [actual_headers = &response->headers()](const Http::HeaderEntry& expected_entry) { const auto* actual_entry = actual_headers->get( Http::LowerCaseString(std::string(expected_entry.key().getStringView()))); EXPECT_EQ(actual_entry->value().getStringView(), expected_entry.value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - // Because headers() returns a pointer to const we have to cast it - // away to match the callback signature. This is safe because we do - // not call any non-const functions on the headers in the callback. - const_cast(&response->headers())); + }); // verify cookies if we have any if (!expected_response_cookies.empty()) { std::vector actual_cookies; - response->headers().iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* list = static_cast*>(ctx); - if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { - list->emplace_back(entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &actual_cookies); + response->headers().iterate([&actual_cookies](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + actual_cookies.emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(expected_response_cookies, actual_cookies); } diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index 0144f67269c4..ab0cf4c2c900 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -194,16 +194,13 @@ TEST_F(AwsLambdaFilterTest, DecodeHeadersInvocationModeSetsHeader) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, header_result); std::string invocation_header_value; - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* out = static_cast(ctx); - if (entry.key().getStringView() == "x-amz-invocation-type") { - out->append(std::string(entry.value().getStringView())); - return Http::HeaderMap::Iterate::Break; - } - return Http::HeaderMap::Iterate::Continue; - }, - &invocation_header_value); + headers.iterate([&invocation_header_value](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == "x-amz-invocation-type") { + invocation_header_value.append(std::string(entry.value().getStringView())); + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ("RequestResponse", invocation_header_value); } @@ -527,15 +524,12 @@ TEST_F(AwsLambdaFilterTest, EncodeDataJsonModeTransformToHttp) { EXPECT_EQ("awesome value", custom_header->value().getStringView()); std::vector cookies; - headers.iterate( - [](const Http::HeaderEntry& entry, void* ctx) { - auto* list = static_cast*>(ctx); - if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { - list->emplace_back(entry.value().getStringView()); - } - return Http::HeaderMap::Iterate::Continue; - }, - &cookies); + headers.iterate([&cookies](const Http::HeaderEntry& entry) { + if (entry.key().getStringView() == Http::Headers::get().SetCookie.get()) { + cookies.emplace_back(entry.value().getStringView()); + } + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_THAT(cookies, ElementsAre("session-id=42; Secure; HttpOnly", "user=joe")); } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index fe37da4eaa30..e1d4d2e059ac 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -230,27 +230,20 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP // Entries in this headers are not present in the original request headers. new_headers_from_upstream.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto* entry = static_cast(context) - ->mutable_ok_response() - ->mutable_headers() - ->Add(); + [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); // Try to append to a non-existent field. entry->mutable_append()->set_value(true); entry->mutable_header()->set_key(std::string(h.key().getStringView())); entry->mutable_header()->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; - }, - &check_response); + }); // Entries in this headers are not present in the original request headers. But we set append = // true and append = false. headers_to_append_multiple.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto* entry = static_cast(context) - ->mutable_ok_response() - ->mutable_headers() - ->Add(); + [&check_response](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto* entry = check_response.mutable_ok_response()->mutable_headers()->Add(); const auto key = std::string(h.key().getStringView()); const auto value = std::string(h.value().getStringView()); @@ -259,8 +252,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP entry->mutable_header()->set_key(key); entry->mutable_header()->set_value(value); return Http::HeaderMap::Iterate::Continue; - }, - &check_response); + }); ext_authz_request_->sendGrpcMessage(check_response); ext_authz_request_->finishGrpcStream(Grpc::Status::Ok); diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index fc130bd47e6d..c0384a71dc94 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -130,8 +130,7 @@ class GrpcJsonTranscoderIntegrationTest } response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - auto* response = static_cast(context); + [response = response.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; if (entry.value() == UnexpectedHeaderValue) { EXPECT_FALSE(response->headers().get(lower_key)); @@ -140,8 +139,7 @@ class GrpcJsonTranscoderIntegrationTest response->headers().get(lower_key)->value().getStringView()); } return Http::HeaderMap::Iterate::Continue; - }, - response.get()); + }); if (!response_body.empty()) { if (full_response) { EXPECT_EQ(response_body, response->body()); diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index db2c5849c651..60b7812718e1 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -135,25 +135,19 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara response_msg.set_overall_code(code); response_headers_to_add.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto header = static_cast(context) - ->mutable_response_headers_to_add() - ->Add(); + [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto header = response_msg.mutable_response_headers_to_add()->Add(); header->set_key(std::string(h.key().getStringView())); header->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; - }, - &response_msg); + }); request_headers_to_add.iterate( - [](const Http::HeaderEntry& h, void* context) -> Http::HeaderMap::Iterate { - auto header = static_cast(context) - ->mutable_request_headers_to_add() - ->Add(); + [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { + auto header = response_msg.mutable_request_headers_to_add()->Add(); header->set_key(std::string(h.key().getStringView())); header->set_value(std::string(h.value().getStringView())); return Http::HeaderMap::Iterate::Continue; - }, - &response_msg); + }); ratelimit_request_->sendGrpcMessage(response_msg); ratelimit_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -223,22 +217,18 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { waitForSuccessfulUpstreamResponse(); ratelimit_response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - IntegrationStreamDecoder* response = static_cast(context); + [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - response_.get()); + }); - request_headers_to_add.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - FakeStream* upstream = static_cast(context); - Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; - EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - upstream_request_.get()); + request_headers_to_add.iterate([upstream = upstream_request_.get()]( + const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; + EXPECT_EQ(entry.value(), upstream->headers().get(lower_key)->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); cleanup(); @@ -270,13 +260,11 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { waitForFailedUpstreamResponse(429); ratelimit_response_headers.iterate( - [](const Http::HeaderEntry& entry, void* context) -> Http::HeaderMap::Iterate { - IntegrationStreamDecoder* response = static_cast(context); + [response = response_.get()](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { Http::LowerCaseString lower_key{std::string(entry.key().getStringView())}; EXPECT_EQ(entry.value(), response->headers().get(lower_key)->value().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - response_.get()); + }); cleanup(); diff --git a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc index 689f37b33945..40d09b78e23b 100644 --- a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc @@ -58,30 +58,40 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); std::string config_yaml; - if (region_in_env_) { + switch (region_location_) { + case RegionLocation::InEnvironment: TestEnvironment::setEnvVar("AWS_REGION", region_name_, 1); + ABSL_FALLTHROUGH_INTENDED; + case RegionLocation::NotProvided: config_yaml = fmt::format(R"EOF( "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig service_name: {} )EOF", service_name_); - } else { + break; + case RegionLocation::InConfig: config_yaml = fmt::format(R"EOF( "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig service_name: {} region: {} )EOF", service_name_, region_name_); + break; } auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin(); plugin_config->set_name(credentials_factory_name_); - envoy::config::grpc_credential::v3::AwsIamConfig metadata_config; Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config()); return config; } - bool region_in_env_{}; + enum class RegionLocation { + NotProvided, + InEnvironment, + InConfig, + }; + + RegionLocation region_location_ = RegionLocation::NotProvided; std::string service_name_{}; std::string region_name_{}; std::string credentials_factory_name_{}; @@ -94,6 +104,7 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_ConfigRegion) { SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); service_name_ = "test_service"; region_name_ = "test_region_static"; + region_location_ = RegionLocation::InConfig; credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; initialize(); auto request = createRequest(empty_metadata_); @@ -105,7 +116,7 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_EnvRegion) { SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); service_name_ = "test_service"; region_name_ = "test_region_env"; - region_in_env_ = true; + region_location_ = RegionLocation::InEnvironment; credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; initialize(); auto request = createRequest(empty_metadata_); @@ -113,6 +124,15 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_EnvRegion) { dispatcher_helper_.runDispatcher(); } +TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_NoRegion) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + service_name_ = "test_service"; + region_name_ = "test_region_env"; + region_location_ = RegionLocation::NotProvided; + credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; + EXPECT_THROW_WITH_REGEX(initialize();, EnvoyException, "AWS region"); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index a873f77c01f2..12041fdd5860 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -865,11 +865,10 @@ TEST_F(ZipkinDriverTest, DuplicatedHeader) { span->setSampled(true); span->injectContext(request_headers_); request_headers_.iterate( - [](const Http::HeaderEntry& header, void* cb) -> Http::HeaderMap::Iterate { - EXPECT_FALSE(static_cast(cb)->operator()(header.key().getStringView())); + [&dup_callback](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + dup_callback(header.key().getStringView()); return Http::HeaderMap::Iterate::Continue; - }, - &dup_callback); + }); } } // namespace diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index 8347e09f2e77..171674a7ff04 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -116,14 +116,12 @@ inline Http::MetadataMapVector fromMetadata(const test::fuzz::Metadata& metadata // Convert from HeaderMap to test proto Headers. inline test::fuzz::Headers toHeaders(const Http::HeaderMap& headers) { test::fuzz::Headers fuzz_headers; - headers.iterate( - [](const Http::HeaderEntry& header, void* ctxt) -> Http::HeaderMap::Iterate { - auto* fuzz_header = static_cast(ctxt)->add_headers(); - fuzz_header->set_key(std::string(header.key().getStringView())); - fuzz_header->set_value(std::string(header.value().getStringView())); - return Http::HeaderMap::Iterate::Continue; - }, - &fuzz_headers); + headers.iterate([&fuzz_headers](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + auto* fuzz_header = fuzz_headers.add_headers(); + fuzz_header->set_key(std::string(header.key().getStringView())); + fuzz_header->set_value(std::string(header.value().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); return fuzz_headers; } diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 3fb6e2627815..02ae0e96a690 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -342,16 +342,14 @@ void HttpIntegrationTest::verifyResponse(IntegrationStreamDecoderPtr response, const std::string& expected_body) { EXPECT_TRUE(response->complete()); EXPECT_EQ(response_code, response->headers().getStatusValue()); - expected_headers.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - auto response_headers = static_cast(context); - const Http::HeaderEntry* entry = - response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); - EXPECT_NE(entry, nullptr); - EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); - return Http::HeaderMap::Iterate::Continue; - }, - const_cast(static_cast(&response->headers()))); + expected_headers.iterate([response_headers = &response->headers()]( + const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const Http::HeaderEntry* entry = + response_headers->get(Http::LowerCaseString{std::string(header.key().getStringView())}); + EXPECT_NE(entry, nullptr); + EXPECT_EQ(header.value().getStringView(), entry->value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); EXPECT_EQ(response->body(), expected_body); } diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index c196501f7c1f..afbad757fcd7 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -422,21 +422,18 @@ class HeaderValueOfMatcherImpl : public testing::MatcherInterface { testing::Matcher matcher) : key_(std::move(key)), matcher_(std::move(matcher)) {} + // NOLINTNEXTLINE(readability-identifier-naming) bool MatchAndExplain(HeaderMapT headers, testing::MatchResultListener* listener) const override { // Get all headers with matching keys. std::vector values; - std::pair*> context = - std::make_pair(key_.get(), &values); Envoy::Http::HeaderMap::ConstIterateCb get_headers_cb = - [](const Envoy::Http::HeaderEntry& header, void* context) { - auto* typed_context = - static_cast*>*>(context); - if (header.key().getStringView() == typed_context->first) { - typed_context->second->push_back(header.value().getStringView()); + [key = key_.get(), &values](const Envoy::Http::HeaderEntry& header) { + if (header.key().getStringView() == key) { + values.push_back(header.value().getStringView()); } return Envoy::Http::HeaderMap::Iterate::Continue; }; - headers.iterate(get_headers_cb, &context); + headers.iterate(get_headers_cb); if (values.empty()) { *listener << "which has no '" << key_.get() << "' header"; @@ -506,6 +503,14 @@ MATCHER_P(HttpStatusIs, expected_code, "") { return true; } +inline HeaderMap::ConstIterateCb +saveHeaders(std::vector>* output) { + return [output](const HeaderEntry& header) { + output->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); + return HeaderMap::Iterate::Continue; + }; +} + template class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface { public: @@ -518,17 +523,14 @@ class IsSubsetOfHeadersMatcherImpl : public testing::MatcherInterface>*>(headers) - ->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }; std::vector> arg_headers_vec; - headers.iterate(get_headers_cb, &arg_headers_vec); + headers.iterate(saveHeaders(&arg_headers_vec)); + std::vector> expected_headers_vec; - expected_headers_.iterate(get_headers_cb, &expected_headers_vec); + expected_headers_.iterate(saveHeaders(&expected_headers_vec)); return ExplainMatchResult(testing::IsSubsetOf(expected_headers_vec), arg_headers_vec, listener); } @@ -573,17 +575,14 @@ class IsSupersetOfHeadersMatcherImpl : public testing::MatcherInterface>*>(headers) - ->push_back(std::make_pair(header.key().getStringView(), header.value().getStringView())); - return HeaderMap::Iterate::Continue; - }; std::vector> arg_headers_vec; - headers.iterate(get_headers_cb, &arg_headers_vec); + headers.iterate(saveHeaders(&arg_headers_vec)); + std::vector> expected_headers_vec; - expected_headers_.iterate(get_headers_cb, &expected_headers_vec); + expected_headers_.iterate(saveHeaders(&expected_headers_vec)); return ExplainMatchResult(testing::IsSupersetOf(expected_headers_vec), arg_headers_vec, listener); diff --git a/test/test_common/printers.cc b/test/test_common/printers.cc index c6573e80a585..8a2ece63ad13 100644 --- a/test/test_common/printers.cc +++ b/test/test_common/printers.cc @@ -8,15 +8,12 @@ namespace Envoy { namespace Http { +// NOLINTNEXTLINE(readability-identifier-naming) void PrintTo(const HeaderMapImpl& headers, std::ostream* os) { - headers.iterate( - [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { - std::ostream* os = static_cast(context); - *os << "{'" << header.key().getStringView() << "','" << header.value().getStringView() - << "'}"; - return HeaderMap::Iterate::Continue; - }, - os); + headers.iterate([os](const HeaderEntry& header) -> HeaderMap::Iterate { + *os << "{'" << header.key().getStringView() << "','" << header.value().getStringView() << "'}"; + return HeaderMap::Iterate::Continue; + }); } void PrintTo(const HeaderMapPtr& headers, std::ostream* os) { diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 36e266db8e7c..b9a1adbf06e9 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -68,26 +68,18 @@ bool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, return false; } - struct State { - const Http::HeaderMap& lhs; - bool equal; - }; - - State state{lhs, true}; - rhs.iterate( - [](const Http::HeaderEntry& header, void* context) -> Http::HeaderMap::Iterate { - State* state = static_cast(context); - const Http::HeaderEntry* entry = - state->lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); - if (entry == nullptr || (entry->value() != header.value().getStringView())) { - state->equal = false; - return Http::HeaderMap::Iterate::Break; - } - return Http::HeaderMap::Iterate::Continue; - }, - &state); - - return state.equal; + bool equal = true; + rhs.iterate([&lhs, &equal](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const Http::HeaderEntry* entry = + lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); + if (entry == nullptr || (entry->value() != header.value().getStringView())) { + equal = false; + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }); + + return equal; } bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs) { diff --git a/test/test_common/utility.h b/test/test_common/utility.h index aa01d2f6f35b..804f34b82219 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -907,11 +907,9 @@ template class TestHeaderMapImplBase : public Inte const HeaderEntry* get(const LowerCaseString& key) const override { return header_map_->get(key); } - void iterate(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_->iterate(cb, context); - } - void iterateReverse(HeaderMap::ConstIterateCb cb, void* context) const override { - header_map_->iterateReverse(cb, context); + void iterate(HeaderMap::ConstIterateCb cb) const override { header_map_->iterate(cb); } + void iterateReverse(HeaderMap::ConstIterateCb cb) const override { + header_map_->iterateReverse(cb); } void clear() override { header_map_->clear(); From 13da8f415c372e7fde3b0105f58f57348e84acb5 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Fri, 17 Jul 2020 11:08:53 -0700 Subject: [PATCH 667/909] network: add socket interface factory and config option (#11630) Add bootstrap config option that allows startup injection of a custom SocketInterface. Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Florin Coras --- api/BUILD | 1 + api/envoy/config/bootstrap/v3/bootstrap.proto | 6 +- .../config/bootstrap/v4alpha/bootstrap.proto | 6 +- .../network/socket_interface/v3/BUILD | 9 +++ .../v3/default_socket_interface.proto | 17 +++++ api/versioning/BUILD | 1 + .../common_messages/common_messages.rst | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 6 +- .../config/bootstrap/v4alpha/bootstrap.proto | 6 +- .../network/socket_interface/v3/BUILD | 9 +++ .../v3/default_socket_interface.proto | 17 +++++ include/envoy/network/BUILD | 1 + include/envoy/network/socket.h | 2 + source/common/network/BUILD | 16 ++++- source/common/network/address_impl.cc | 2 +- source/common/network/socket_interface.h | 56 ++++++++++++++++ .../common/network/socket_interface_impl.cc | 14 ++++ source/common/network/socket_interface_impl.h | 17 +++-- source/server/BUILD | 1 + source/server/server.cc | 31 ++++++--- test/integration/BUILD | 11 +++ .../socket_interface_integration_test.cc | 67 +++++++++++++++++++ 22 files changed, 276 insertions(+), 21 deletions(-) create mode 100644 api/envoy/extensions/network/socket_interface/v3/BUILD create mode 100644 api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto create mode 100644 generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto create mode 100644 source/common/network/socket_interface.h create mode 100644 test/integration/socket_interface_integration_test.cc diff --git a/api/BUILD b/api/BUILD index 3bbdd21a64fa..9d4f802dfe5f 100644 --- a/api/BUILD +++ b/api/BUILD @@ -227,6 +227,7 @@ proto_library( "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 57a455444579..27c59bfc8cc9 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 24] +// [#next-free-field: 25] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -222,6 +222,10 @@ message Bootstrap { // other resolution fails. // [#not-implemented-hide:] core.v3.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; } // Administration interface :ref:`operations documentation diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index b5a4bef5f65e..41de2a875d2e 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -37,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 24] +// [#next-free-field: 25] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -213,6 +213,10 @@ message Bootstrap { // other resolution fails. // [#not-implemented-hide:] core.v4alpha.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; } // Administration interface :ref:`operations documentation diff --git a/api/envoy/extensions/network/socket_interface/v3/BUILD b/api/envoy/extensions/network/socket_interface/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/api/envoy/extensions/network/socket_interface/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto new file mode 100644 index 000000000000..d2c747ec49fb --- /dev/null +++ b/api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.network.socket_interface.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; +option java_outer_classname = "DefaultSocketInterfaceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Default Socket Interface configuration] + +// Configuration for default socket interface that relies on OS dependent syscall to create +// sockets. +message DefaultSocketInterface { +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 1d91b1724b1c..e00a0fbbb55d 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -110,6 +110,7 @@ proto_library( "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index 59e40f63b7fb..ceff6d6681ee 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -20,3 +20,4 @@ Common messages ../config/core/v3/substitution_format_string.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto + ../extensions/network/socket_interface/v3/default_socket_interface.proto diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 26752b16ebdc..53b587c8bc0b 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 24] +// [#next-free-field: 25] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -221,6 +221,10 @@ message Bootstrap { // [#not-implemented-hide:] core.v3.ConfigSource default_config_source = 23; + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; + Runtime hidden_envoy_deprecated_runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index f75d169486a5..6690c6cd0c30 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -38,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 24] +// [#next-free-field: 25] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -221,6 +221,10 @@ message Bootstrap { // other resolution fails. // [#not-implemented-hide:] core.v4alpha.ConfigSource default_config_source = 23; + + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + string default_socket_interface = 24; } // Administration interface :ref:`operations documentation diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD new file mode 100644 index 000000000000..ef3541ebcb1d --- /dev/null +++ b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto new file mode 100644 index 000000000000..d2c747ec49fb --- /dev/null +++ b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.extensions.network.socket_interface.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; +option java_outer_classname = "DefaultSocketInterfaceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Default Socket Interface configuration] + +// Configuration for default socket interface that relies on OS dependent syscall to create +// sockets. +message DefaultSocketInterface { +} diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index f4a342d26bb8..a9f4dc7bd739 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -96,6 +96,7 @@ envoy_cc_library( deps = [ ":address_interface", ":io_handle_interface", + "//include/envoy/config:typed_config_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index 0951f0a04617..ff558f7760b9 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -271,5 +271,7 @@ class SocketInterface { virtual bool ipFamilySupported(int domain) PURE; }; +using SocketInterfacePtr = std::unique_ptr; + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/network/BUILD b/source/common/network/BUILD index e97330223d37..95d412a0a71a 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -17,10 +17,10 @@ envoy_cc_library( hdrs = [ "address_impl.h", "io_socket_handle_impl.h", - "socket_interface_impl.h", ], deps = [ ":io_socket_error_lib", + ":socket_interface_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", "//include/envoy/network:io_handle_interface", @@ -172,6 +172,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "socket_interface_lib", + hdrs = ["socket_interface.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/network:socket_interface", + "//include/envoy/registry", + "//include/envoy/server:bootstrap_extension_config_interface", + ], +) + envoy_cc_library( name = "socket_lib", srcs = [ @@ -184,10 +195,11 @@ envoy_cc_library( ], deps = [ ":address_lib", + ":socket_interface_lib", "//include/envoy/network:socket_interface", "//source/common/common:assert_lib", "//source/common/common:utility_lib", - "//source/common/singleton:threadsafe_singleton", + "@envoy_api//envoy/extensions/network/socket_interface/v3:pkg_cc_proto", ], ) diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 1f3dcbb3fb74..5b4d57e55bd4 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -10,7 +10,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" -#include "common/network/socket_interface_impl.h" +#include "common/network/socket_interface.h" namespace Envoy { namespace Network { diff --git a/source/common/network/socket_interface.h b/source/common/network/socket_interface.h new file mode 100644 index 000000000000..66d717857500 --- /dev/null +++ b/source/common/network/socket_interface.h @@ -0,0 +1,56 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/network/socket.h" +#include "envoy/registry/registry.h" +#include "envoy/server/bootstrap_extension_config.h" + +#include "common/singleton/threadsafe_singleton.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Network { + +// Wrapper for SocketInterface instances returned by createBootstrapExtension() which must be +// implemented by all factories that derive SocketInterfaceBase +class SocketInterfaceExtension : public Server::BootstrapExtension { +public: + SocketInterfaceExtension(SocketInterface& sock_interface) : sock_interface_(sock_interface) {} + SocketInterface& socketInterface() { return sock_interface_; } + +private: + SocketInterface& sock_interface_; +}; + +// Class to be derived by all SocketInterface implementations. +// +// It acts both as a SocketInterface and as a BootstrapExtensionFactory. The latter is used, on the +// one hand, to configure and initialize the interface and, on the other, for SocketInterface lookup +// by leveraging the FactoryRegistry. As required for all bootstrap extensions, all derived classes +// should register via the REGISTER_FACTORY() macro as BootstrapExtensionFactory. +// +// SocketInterface instances can be retrieved using the factory name, i.e., string returned by +// name() function implemented by all classes that derive SocketInterfaceBase, via +// Network::socketInterface(). When instantiating addresses, address resolvers should +// set the socket interface field to the name of the socket interface implementation that should +// be used to create sockets for said addresses. +class SocketInterfaceBase : public SocketInterface, + public Server::Configuration::BootstrapExtensionFactory {}; + +/** + * Lookup SocketInterface instance by name + * @param name Name of the socket interface to be looked up + * @return Pointer to @ref SocketInterface instance that registered using the name of nullptr + */ +static inline const SocketInterface* socketInterface(std::string name) { + auto factory = + Registry::FactoryRegistry::getFactory(name); + return dynamic_cast(factory); +} + +using SocketInterfaceSingleton = InjectableSingleton; +using SocketInterfaceLoader = ScopedInjectableLoader; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index acdc8214c55d..ba0b4a9b3fc5 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -1,6 +1,7 @@ #include "common/network/socket_interface_impl.h" #include "envoy/common/exception.h" +#include "envoy/extensions/network/socket_interface/v3/default_socket_interface.pb.h" #include "envoy/network/socket.h" #include "common/api/os_sys_calls_impl.h" @@ -81,6 +82,19 @@ bool SocketInterfaceImpl::ipFamilySupported(int domain) { return SOCKET_VALID(result.rc_); } +Server::BootstrapExtensionPtr +SocketInterfaceImpl::createBootstrapExtension(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext&) { + return std::make_unique(*this); +} + +ProtobufTypes::MessagePtr SocketInterfaceImpl::createEmptyConfigProto() { + return std::make_unique< + envoy::extensions::network::socket_interface::v3::DefaultSocketInterface>(); +} + +REGISTER_FACTORY(SocketInterfaceImpl, Server::Configuration::BootstrapExtensionFactory); + static SocketInterfaceLoader* socket_interface_ = new SocketInterfaceLoader(std::make_unique()); diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index 88798559844a..034aea25feaf 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -3,22 +3,31 @@ #include "envoy/network/address.h" #include "envoy/network/socket.h" -#include "common/singleton/threadsafe_singleton.h" +#include "common/network/socket_interface.h" namespace Envoy { namespace Network { -class SocketInterfaceImpl : public SocketInterface { +class SocketInterfaceImpl : public SocketInterfaceBase { public: + // SocketInterface IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version) override; IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) override; IoHandlePtr socket(os_fd_t fd) override; bool ipFamilySupported(int domain) override; + + // Server::Configuration::BootstrapExtensionFactory + Server::BootstrapExtensionPtr + createBootstrapExtension(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& context) override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + std::string name() const override { + return "envoy.extensions.network.socket_interface.default_socket_interface"; + }; }; -using SocketInterfaceSingleton = InjectableSingleton; -using SocketInterfaceLoader = ScopedInjectableLoader; +DECLARE_FACTORY(SocketInterfaceImpl); } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/server/BUILD b/source/server/BUILD index 33266f13f3f8..10ed76a2d077 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -46,6 +46,7 @@ envoy_cc_library( "//source/common/config:runtime_utility_lib", "//source/common/config:utility_lib", "//source/common/network:resolver_lib", + "//source/common/network:socket_interface_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:socket_option_lib", "//source/common/network:utility_lib", diff --git a/source/server/server.cc b/source/server/server.cc index 80c48c279c06..b91177ef38bd 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -35,6 +35,8 @@ #include "common/memory/stats.h" #include "common/network/address_impl.h" #include "common/network/listener_impl.h" +#include "common/network/socket_interface.h" +#include "common/network/socket_interface_impl.h" #include "common/protobuf/utility.h" #include "common/router/rds_impl.h" #include "common/runtime/runtime_impl.h" @@ -395,6 +397,25 @@ void InstanceImpl::initialize(const Options& options, heap_shrinker_ = std::make_unique(*dispatcher_, *overload_manager_, stats_store_); + for (const auto& bootstrap_extension : bootstrap_.bootstrap_extensions()) { + auto& factory = Config::Utility::getAndCheckFactory( + bootstrap_extension); + auto config = Config::Utility::translateAnyToFactoryConfig( + bootstrap_extension.typed_config(), messageValidationContext().staticValidationVisitor(), + factory); + bootstrap_extensions_.push_back( + factory.createBootstrapExtension(*config, serverFactoryContext())); + } + + if (!bootstrap_.default_socket_interface().empty()) { + auto& sock_name = bootstrap_.default_socket_interface(); + auto sock = const_cast(Network::socketInterface(sock_name)); + if (sock != nullptr) { + Network::SocketInterfaceSingleton::clear(); + Network::SocketInterfaceSingleton::initialize(sock); + } + } + // Workers get created first so they register for thread local updates. listener_manager_ = std::make_unique( *this, listener_component_factory_, worker_factory_, bootstrap_.enable_dispatcher_stats()); @@ -493,16 +514,6 @@ void InstanceImpl::initialize(const Options& options, // GuardDog (deadlock detection) object and thread setup before workers are // started and before our own run() loop runs. guard_dog_ = std::make_unique(stats_store_, config_, *api_); - - for (const auto& bootstrap_extension : bootstrap_.bootstrap_extensions()) { - auto& factory = Config::Utility::getAndCheckFactory( - bootstrap_extension); - auto config = Config::Utility::translateAnyToFactoryConfig( - bootstrap_extension.typed_config(), messageValidationContext().staticValidationVisitor(), - factory); - bootstrap_extensions_.push_back( - factory.createBootstrapExtension(*config, serverFactoryContext())); - } } void InstanceImpl::onClusterManagerPrimaryInitializationComplete() { diff --git a/test/integration/BUILD b/test/integration/BUILD index b08549380079..40bcd665e111 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -744,6 +744,17 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "socket_interface_integration_test", + srcs = ["socket_interface_integration_test.cc"], + tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + "//source/common/network:socket_interface_lib", + "//source/extensions/filters/network/echo:config", + ], +) + envoy_cc_test( name = "stats_integration_test", srcs = ["stats_integration_test.cc"], diff --git a/test/integration/socket_interface_integration_test.cc b/test/integration/socket_interface_integration_test.cc new file mode 100644 index 000000000000..c2b798c9801e --- /dev/null +++ b/test/integration/socket_interface_integration_test.cc @@ -0,0 +1,67 @@ +#include "common/network/socket_interface.h" + +#include "test/integration/integration.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class SocketInterfaceIntegrationTest : public BaseIntegrationTest, + public testing::TestWithParam { +public: + SocketInterfaceIntegrationTest() : BaseIntegrationTest(GetParam(), config()) { + use_lds_ = false; + }; + + static std::string config() { + // At least one empty filter chain needs to be specified. + return absl::StrCat(echoConfig(), R"EOF( +bootstrap_extensions: + - name: envoy.extensions.network.socket_interface.default_socket_interface + typed_config: + "@type": type.googleapis.com/envoy.extensions.network.socket_interface.v3.DefaultSocketInterface +default_socket_interface: "envoy.extensions.network.socket_interface.default_socket_interface" + )EOF"); + } + static std::string echoConfig() { + return absl::StrCat(ConfigHelper::baseConfig(), R"EOF( + filter_chains: + filters: + name: ratelimit + typed_config: + "@type": type.googleapis.com/envoy.config.filter.network.rate_limit.v2.RateLimit + domain: foo + stats_prefix: name + descriptors: [{"key": "foo", "value": "bar"}] + filters: + name: envoy.filters.network.echo + )EOF"); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, SocketInterfaceIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(SocketInterfaceIntegrationTest, Basic) { + BaseIntegrationTest::initialize(); + const Network::SocketInterface* factory = Network::socketInterface( + "envoy.extensions.network.socket_interface.default_socket_interface"); + ASSERT_TRUE(Network::SocketInterfaceSingleton::getExisting() == factory); + + std::string response; + auto connection = createConnectionDriver( + lookupPort("listener_0"), "hello", + [&response](Network::ClientConnection& conn, const Buffer::Instance& data) -> void { + response.append(data.toString()); + conn.close(Network::ConnectionCloseType::FlushWrite); + }); + connection->run(); + EXPECT_EQ("hello", response); +} + +} // namespace +} // namespace Envoy \ No newline at end of file From bb53b8af4c7bf436ab7eeaaf7f2dcf9d4039e49d Mon Sep 17 00:00:00 2001 From: Pengyuan Bian Date: Fri, 17 Jul 2020 14:11:16 -0700 Subject: [PATCH 668/909] Make opencensus Stackdriver exporter respects initial_metadata option (#11831) Currently Opencensus tracer uses grpcService proto to configure its tracing client stub. It uses GoogleGrpcUtil to construct a channel and create client stub with that. However, the channel created there does not take care of initial_metadata from grpcService configure. This change fills in OCprepare_client_context option in export to make it respect initial metadata. Risk level: Low Signed-off-by: Pengyuan Bian --- bazel/repository_locations.bzl | 8 ++++---- .../tracers/opencensus/opencensus_tracer_impl.cc | 10 ++++++++++ test/extensions/tracers/opencensus/config_test.cc | 3 +++ test/per_file_coverage.sh | 4 ++-- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index eee90bc45c46..5c098ce04975 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -385,10 +385,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["other"], ), io_opencensus_cpp = dict( - sha256 = "193ffb4e13bd7886757fd22b61b7f7a400634412ad8e7e1071e73f57bedd7fc6", - strip_prefix = "opencensus-cpp-04ed0211931f12b03c1a76b3907248ca4db7bc90", - # 2020-03-24 - urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/04ed0211931f12b03c1a76b3907248ca4db7bc90.tar.gz"], + sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212", + strip_prefix = "opencensus-cpp-7877337633466358ed680f9b26967da5b310d7aa", + # 2020-06-01 + urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/7877337633466358ed680f9b26967da5b310d7aa.tar.gz"], use_category = ["observability"], cpe = "N/A", ), diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index 53e82591350f..c836bcc49ba6 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -281,7 +281,17 @@ Driver::Driver(const envoy::config::trace::v3::OpenCensusConfig& oc_config, stackdriver_service.mutable_google_grpc()->set_target_uri(GoogleStackdriverTraceAddress); } auto channel = Envoy::Grpc::GoogleGrpcUtils::createChannel(stackdriver_service, api); + // TODO(bianpengyuan): add tests for trace_service_stub and initial_metadata options with mock + // stubs. opts.trace_service_stub = ::google::devtools::cloudtrace::v2::TraceService::NewStub(channel); + const auto& initial_metadata = stackdriver_service.initial_metadata(); + if (!initial_metadata.empty()) { + opts.prepare_client_context = [initial_metadata](grpc::ClientContext* ctx) { + for (const auto& metadata : initial_metadata) { + ctx->AddMetadata(metadata.key(), metadata.value()); + } + }; + } #else throw EnvoyException("Opencensus tracer: cannot handle stackdriver google grpc service, " "google grpc is not built in."); diff --git a/test/extensions/tracers/opencensus/config_test.cc b/test/extensions/tracers/opencensus/config_test.cc index c27186c9b675..227ed0d353c8 100644 --- a/test/extensions/tracers/opencensus/config_test.cc +++ b/test/extensions/tracers/opencensus/config_test.cc @@ -323,6 +323,9 @@ TEST(OpenCensusTracerConfigTest, OpenCensusHttpTracerStackdriverGrpc) { google_grpc: target_uri: 127.0.0.1:55678 stat_prefix: test + initial_metadata: + - key: foo + value: bar )EOF"; envoy::config::trace::v3::Tracing configuration; diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index e06bf489cb07..b71a45f126df 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -43,8 +43,8 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.5" -"source/extensions/tracers/opencensus:92.4" +"source/extensions/tracers:96.3" +"source/extensions/tracers/opencensus:91.2" "source/extensions/tracers/xray:95.3" "source/extensions/transport_sockets:94.9" "source/extensions/transport_sockets/tap:95.6" From 9a5de98bf26280a3b57882413dfb9084c5dbb2a6 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 17 Jul 2020 14:13:10 -0700 Subject: [PATCH 669/909] refactor version to source/common/version (#12141) Pulling out version to a separate package Risk Level: Low Signed-off-by: Lizan Zhou --- api/envoy/api/v2/core/base.proto | 2 +- api/envoy/config/core/v3/base.proto | 2 +- api/envoy/config/core/v4alpha/base.proto | 2 +- .../envoy/api/v2/core/base.proto | 2 +- .../envoy/config/core/v3/base.proto | 2 +- .../envoy/config/core/v4alpha/base.proto | 2 +- source/common/common/BUILD | 66 ---------------- source/common/signal/signal_action.cc | 2 +- source/common/version/BUILD | 75 +++++++++++++++++++ .../generate_version_linkstamp.sh | 0 source/common/{common => version}/version.cc | 4 +- source/common/{common => version}/version.h | 2 +- .../{common => version}/version_linkstamp.cc | 0 source/extensions/tracers/datadog/BUILD | 2 +- .../tracers/datadog/datadog_tracer_impl.cc | 2 +- source/server/BUILD | 6 +- source/server/admin/BUILD | 2 +- source/server/admin/server_info_handler.cc | 2 +- source/server/backtrace.h | 2 +- source/server/config_validation/BUILD | 2 +- source/server/config_validation/server.cc | 2 +- source/server/options_impl.cc | 2 +- source/server/server.cc | 2 +- test/common/common/BUILD | 2 +- test/common/common/version_test.cc | 2 +- .../http_grpc_access_log_integration_test.cc | 2 +- .../tcp_grpc_access_log_integration_test.cc | 2 +- .../metrics_service_integration_test.cc | 2 +- test/integration/BUILD | 2 +- test/integration/ads_integration_test.cc | 2 +- test/server/BUILD | 2 +- test/server/server_test.cc | 2 +- tools/code_format/check_format.py | 2 +- tools/testdata/check_format/header_order.cc | 4 +- .../check_format/header_order.cc.gold | 4 +- 35 files changed, 111 insertions(+), 102 deletions(-) create mode 100644 source/common/version/BUILD rename source/common/{common => version}/generate_version_linkstamp.sh (100%) rename source/common/{common => version}/version.cc (97%) rename source/common/{common => version}/version.h (96%) rename source/common/{common => version}/version_linkstamp.cc (100%) diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto index b7145d77efd3..39846bc658a8 100644 --- a/api/envoy/api/v2/core/base.proto +++ b/api/envoy/api/v2/core/base.proto @@ -93,7 +93,7 @@ message BuildVersion { type.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index 6175e585d708..4509c1662567 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -95,7 +95,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto index 29364d51b5b8..d7b5fd5836ff 100644 --- a/api/envoy/config/core/v4alpha/base.proto +++ b/api/envoy/config/core/v4alpha/base.proto @@ -94,7 +94,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/generated_api_shadow/envoy/api/v2/core/base.proto b/generated_api_shadow/envoy/api/v2/core/base.proto index b7145d77efd3..39846bc658a8 100644 --- a/generated_api_shadow/envoy/api/v2/core/base.proto +++ b/generated_api_shadow/envoy/api/v2/core/base.proto @@ -93,7 +93,7 @@ message BuildVersion { type.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index af93ab8e9a09..9b2db0d99fda 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -95,7 +95,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto index 29364d51b5b8..d7b5fd5836ff 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -94,7 +94,7 @@ message BuildVersion { type.v3.SemanticVersion version = 1; // Free-form build information. - // Envoy defines several well known keys in the source/common/common/version.h file + // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } diff --git a/source/common/common/BUILD b/source/common/common/BUILD index ac2e6430ef66..05758f4fe50e 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -7,7 +7,6 @@ load( "envoy_cc_win32_library", "envoy_include_prefix", "envoy_package", - "envoy_select_boringssl", ) licenses(["notice"]) # Apache 2 @@ -358,71 +357,6 @@ envoy_cc_library( ], ) -genrule( - name = "generate_version_number", - srcs = ["//:VERSION"], - outs = ["version_number.h"], - cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", -) - -genrule( - name = "generate_version_linkstamp", - outs = ["lib/version_linkstamp.h"], - cmd = select({ - # Only iOS builds typically follow this logic, OS/X is built as a normal binary - "//bazel:apple": "$(location :generate_version_linkstamp.sh) Library >> $@", - "//conditions:default": "$(location :generate_version_linkstamp.sh) >> $@", - }), - # Undocumented attr to depend on workspace status files. - # https://github.com/bazelbuild/bazel/issues/4942 - # Used here because generate_version_linkstamp.sh depends on the workspace status files. - stamp = 1, - tools = [":generate_version_linkstamp.sh"], -) - -genrule( - name = "generate_version_linkstamp_empty", - outs = ["empty/version_linkstamp.h"], - cmd = """>$@""", -) - -envoy_cc_library( - name = "version_includes", - hdrs = [ - "version.h", - ":generate_version_number", - ], - deps = [ - "//source/common/singleton:const_singleton", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "version_lib", - srcs = ["version.cc"], - hdrs = select({ - "//bazel:manual_stamp": [":generate_version_linkstamp"], - # By default the header file is empty. - # This is done so that the definitions linked via the linkstamp rule don't cause collisions. - "//conditions:default": [":generate_version_linkstamp_empty"], - }), - copts = envoy_select_boringssl( - ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], - ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], - ), - linkstamp = "version_linkstamp.cc", - strip_include_prefix = select({ - "//bazel:manual_stamp": "lib", - "//conditions:default": "empty", - }), - deps = [ - ":version_includes", - "//source/common/common:macros", - "//source/common/protobuf:utility_lib", - ], -) - envoy_cc_library( name = "callback_impl_lib", hdrs = ["callback_impl.h"], diff --git a/source/common/signal/signal_action.cc b/source/common/signal/signal_action.cc index 1b9e5cf78fa6..11797843cd17 100644 --- a/source/common/signal/signal_action.cc +++ b/source/common/signal/signal_action.cc @@ -5,7 +5,7 @@ #include #include "common/common/assert.h" -#include "common/common/version.h" +#include "common/version/version.h" namespace Envoy { diff --git a/source/common/version/BUILD b/source/common/version/BUILD new file mode 100644 index 000000000000..0b05efd9a4f4 --- /dev/null +++ b/source/common/version/BUILD @@ -0,0 +1,75 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", + "envoy_select_boringssl", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +genrule( + name = "generate_version_number", + srcs = ["//:VERSION"], + outs = ["version_number.h"], + cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", +) + +genrule( + name = "generate_version_linkstamp", + outs = ["lib/version_linkstamp.h"], + cmd = select({ + # Only iOS builds typically follow this logic, OS/X is built as a normal binary + "//bazel:apple": "$(location :generate_version_linkstamp.sh) Library >> $@", + "//conditions:default": "$(location :generate_version_linkstamp.sh) >> $@", + }), + # Undocumented attr to depend on workspace status files. + # https://github.com/bazelbuild/bazel/issues/4942 + # Used here because generate_version_linkstamp.sh depends on the workspace status files. + stamp = 1, + tools = [":generate_version_linkstamp.sh"], +) + +genrule( + name = "generate_version_linkstamp_empty", + outs = ["empty/version_linkstamp.h"], + cmd = """>$@""", +) + +envoy_cc_library( + name = "version_includes", + hdrs = [ + "version.h", + ":generate_version_number", + ], + deps = [ + "//source/common/singleton:const_singleton", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "version_lib", + srcs = ["version.cc"], + hdrs = select({ + "//bazel:manual_stamp": [":generate_version_linkstamp"], + # By default the header file is empty. + # This is done so that the definitions linked via the linkstamp rule don't cause collisions. + "//conditions:default": [":generate_version_linkstamp_empty"], + }), + copts = envoy_select_boringssl( + ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], + ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], + ), + linkstamp = "version_linkstamp.cc", + strip_include_prefix = select({ + "//bazel:manual_stamp": "lib", + "//conditions:default": "empty", + }), + deps = [ + ":version_includes", + "//source/common/common:macros", + "//source/common/protobuf:utility_lib", + ], +) diff --git a/source/common/common/generate_version_linkstamp.sh b/source/common/version/generate_version_linkstamp.sh similarity index 100% rename from source/common/common/generate_version_linkstamp.sh rename to source/common/version/generate_version_linkstamp.sh diff --git a/source/common/common/version.cc b/source/common/version/version.cc similarity index 97% rename from source/common/common/version.cc rename to source/common/version/version.cc index 1e930e61a3ec..7bc95c5c8c38 100644 --- a/source/common/common/version.cc +++ b/source/common/version/version.cc @@ -1,4 +1,4 @@ -#include "common/common/version.h" +#include "common/version/version.h" #include #include @@ -6,8 +6,8 @@ #include "common/common/fmt.h" #include "common/common/macros.h" -#include "common/common/version_linkstamp.h" #include "common/protobuf/utility.h" +#include "common/version/version_linkstamp.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" diff --git a/source/common/common/version.h b/source/common/version/version.h similarity index 96% rename from source/common/common/version.h rename to source/common/version/version.h index 6189f8801af2..a5720105ef85 100644 --- a/source/common/common/version.h +++ b/source/common/version/version.h @@ -4,8 +4,8 @@ #include "envoy/config/core/v3/base.pb.h" -#include "common/common/version_number.h" #include "common/singleton/const_singleton.h" +#include "common/version/version_number.h" namespace Envoy { diff --git a/source/common/common/version_linkstamp.cc b/source/common/version/version_linkstamp.cc similarity index 100% rename from source/common/common/version_linkstamp.cc rename to source/common/version/version_linkstamp.cc diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index bd35a9a25431..d09597b5eaf9 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -21,11 +21,11 @@ envoy_cc_library( ], external_deps = ["dd_opentracing_cpp"], deps = [ - "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/http:async_client_utility_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", + "//source/common/version:version_lib", "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.cc b/source/extensions/tracers/datadog/datadog_tracer_impl.cc index 1f56b1718528..a2be59579ca8 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.cc +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.cc @@ -5,11 +5,11 @@ #include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/utility.h" #include "common/http/message_impl.h" #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" +#include "common/version/version.h" #include "extensions/tracers/well_known_names.h" diff --git a/source/server/BUILD b/source/server/BUILD index 10ed76a2d077..fb1efa139ec4 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -21,7 +21,7 @@ envoy_cc_library( tags = ["backtrace"], deps = [ "//source/common/common:minimal_logger_lib", - "//source/common/common:version_lib", + "//source/common/version:version_lib", ], ) @@ -234,9 +234,9 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:logger_lib", "//source/common/common:macros", - "//source/common/common:version_lib", "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", + "//source/common/version:version_lib", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", ], ) @@ -420,7 +420,6 @@ envoy_cc_library( "//source/common/common:logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", - "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/grpc:async_client_manager_lib", "//source/common/grpc:context_lib", @@ -439,6 +438,7 @@ envoy_cc_library( "//source/common/stats:thread_local_store_lib", "//source/common/upstream:cluster_manager_lib", "//source/common/upstream:health_discovery_service_lib", + "//source/common/version:version_lib", "//source/server:overload_manager_lib", "//source/server/admin:admin_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 2fb1cd56b1ac..370d803b0fab 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -220,10 +220,10 @@ envoy_cc_library( "//include/envoy/server:admin_interface", "//include/envoy/server:instance_interface", "//source/common/buffer:buffer_lib", - "//source/common/common:version_includes", "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/memory:stats_lib", + "//source/common/version:version_includes", "@envoy_api//envoy/admin/v3:pkg_cc_proto", ], ) diff --git a/source/server/admin/server_info_handler.cc b/source/server/admin/server_info_handler.cc index 4c4c8322b3df..d668dac83992 100644 --- a/source/server/admin/server_info_handler.cc +++ b/source/server/admin/server_info_handler.cc @@ -2,8 +2,8 @@ #include "envoy/admin/v3/memory.pb.h" -#include "common/common/version.h" #include "common/memory/stats.h" +#include "common/version/version.h" #include "server/admin/utils.h" diff --git a/source/server/backtrace.h b/source/server/backtrace.h index 966d9017baf2..fd391a691c81 100644 --- a/source/server/backtrace.h +++ b/source/server/backtrace.h @@ -3,7 +3,7 @@ #include #include "common/common/logger.h" -#include "common/common/version.h" +#include "common/version/version.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index 713ea38dad22..f56d6069dd57 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -97,7 +97,6 @@ envoy_cc_library( "//source/common/access_log:access_log_manager_lib", "//source/common/common:assert_lib", "//source/common/common:utility_lib", - "//source/common/common:version_lib", "//source/common/config:utility_lib", "//source/common/grpc:common_lib", "//source/common/local_info:local_info_lib", @@ -106,6 +105,7 @@ envoy_cc_library( "//source/common/runtime:runtime_lib", "//source/common/stats:stats_lib", "//source/common/thread_local:thread_local_lib", + "//source/common/version:version_lib", "//source/server:configuration_lib", "//source/server:server_lib", "//source/server/admin:admin_lib", diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index b418e5fe867a..4df691f6b96e 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -5,12 +5,12 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/utility.h" #include "common/event/real_time_system.h" #include "common/local_info/local_info_impl.h" #include "common/protobuf/utility.h" #include "common/singleton/manager_impl.h" +#include "common/version/version.h" #include "server/ssl_context_manager.h" diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 78dde475bc9c..51734175acec 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -10,8 +10,8 @@ #include "common/common/fmt.h" #include "common/common/logger.h" #include "common/common/macros.h" -#include "common/common/version.h" #include "common/protobuf/utility.h" +#include "common/version/version.h" #include "server/options_impl_platform.h" diff --git a/source/server/server.cc b/source/server/server.cc index b91177ef38bd..cf3957118eed 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -27,7 +27,6 @@ #include "common/common/enum_to_int.h" #include "common/common/mutex_tracer_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/utility.h" #include "common/config/version_converter.h" #include "common/http/codes.h" @@ -44,6 +43,7 @@ #include "common/stats/thread_local_store.h" #include "common/stats/timespan_impl.h" #include "common/upstream/cluster_manager_impl.h" +#include "common/version/version.h" #include "server/admin/utils.h" #include "server/configuration_impl.h" diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 8983d0f4483b..8c4ca910649b 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -295,7 +295,7 @@ envoy_cc_test( "abseil_strings", ], deps = [ - "//source/common/common:version_lib", + "//source/common/version:version_lib", ], ) diff --git a/test/common/common/version_test.cc b/test/common/common/version_test.cc index 8dee39254f26..5177f5ac1661 100644 --- a/test/common/common/version_test.cc +++ b/test/common/common/version_test.cc @@ -1,4 +1,4 @@ -#include "common/common/version.h" +#include "common/version/version.h" #include "absl/strings/str_cat.h" #include "gmock/gmock.h" diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index 148921270f23..98096bb2386e 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -4,9 +4,9 @@ #include "envoy/service/accesslog/v3/als.pb.h" #include "common/buffer/zero_copy_input_stream_impl.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index 6dab6bf6e024..e79fb234eaa1 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -5,9 +5,9 @@ #include "envoy/service/accesslog/v3/als.pb.h" #include "common/buffer/zero_copy_input_stream_impl.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 0e227d6469dd..a15de2201aed 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -2,10 +2,10 @@ #include "envoy/config/metrics/v3/metrics_service.pb.h" #include "envoy/service/metrics/v3/metrics_service.pb.h" -#include "common/common/version.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" #include "common/stats/histogram_impl.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" diff --git a/test/integration/BUILD b/test/integration/BUILD index 40bcd665e111..95c57fe9522d 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -29,10 +29,10 @@ envoy_cc_test_library( ], deps = [ ":http_integration_lib", - "//source/common/common:version_lib", "//source/common/config:protobuf_link_hacks", "//source/common/config:version_converter_lib", "//source/common/protobuf:utility_lib", + "//source/common/version:version_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/common/grpc:grpc_client_integration_lib", "//test/test_common:network_utility_lib", diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index c62af3e4d01e..71917d02b0a3 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -5,11 +5,11 @@ #include "envoy/config/route/v3/route.pb.h" #include "envoy/grpc/status.h" -#include "common/common/version.h" #include "common/config/protobuf_link_hacks.h" #include "common/config/version_converter.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/ads_integration.h" diff --git a/test/server/BUILD b/test/server/BUILD index 9868dc06062e..51d24f7de755 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -356,7 +356,7 @@ envoy_cc_test( ], tags = ["fails_on_windows"], deps = [ - "//source/common/common:version_lib", + "//source/common/version:version_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/grpc_http1_bridge:config", diff --git a/test/server/server_test.cc b/test/server/server_test.cc index b849689ef63f..5671a71b2bf8 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -5,12 +5,12 @@ #include "envoy/server/bootstrap_extension_config.h" #include "common/common/assert.h" -#include "common/common/version.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_impl.h" #include "common/protobuf/protobuf.h" #include "common/thread_local/thread_local_impl.h" +#include "common/version/version.h" #include "server/process_context_impl.h" #include "server/server.h" diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 351414da436b..e75ebef1c074 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -82,7 +82,7 @@ "./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h", "./source/server/admin/stats_handler.cc", "./source/server/admin/prometheus_stats.h", "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", - "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") + "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/version/version.cc") # Only one C++ file should instantiate grpc_init GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") diff --git a/tools/testdata/check_format/header_order.cc b/tools/testdata/check_format/header_order.cc index 76cc4fb07fb0..d387bb3415d5 100644 --- a/tools/testdata/check_format/header_order.cc +++ b/tools/testdata/check_format/header_order.cc @@ -2,7 +2,7 @@ #include "common/api/api_impl.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" +#include "common/version/version.h" #include "common/config/resources.h" #include "common/config/utility.h" #include "common/local_info/local_info_impl.h" @@ -37,4 +37,4 @@ namespace Envoy { // Something awesome goes here. -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/tools/testdata/check_format/header_order.cc.gold b/tools/testdata/check_format/header_order.cc.gold index bb098bb0ac98..5f6d6433a91b 100644 --- a/tools/testdata/check_format/header_order.cc.gold +++ b/tools/testdata/check_format/header_order.cc.gold @@ -18,7 +18,6 @@ #include "common/api/api_impl.h" #include "common/api/os_sys_calls_impl.h" #include "common/common/utility.h" -#include "common/common/version.h" #include "common/config/resources.h" #include "common/config/utility.h" #include "common/local_info/local_info_impl.h" @@ -30,6 +29,7 @@ #include "common/singleton/manager_impl.h" #include "common/stats/thread_local_store.h" #include "common/upstream/cluster_manager_impl.h" +#include "common/version/version.h" #include "server/configuration_impl.h" #include "server/connection_handler_impl.h" @@ -42,4 +42,4 @@ namespace Envoy { // Something awesome goes here. -} // namespace Envoy \ No newline at end of file +} // namespace Envoy From 5d8588affe4dbc4145548ca9078280f742104cb9 Mon Sep 17 00:00:00 2001 From: Rei Shimizu Date: Sat, 18 Jul 2020 06:18:20 +0900 Subject: [PATCH 670/909] format: add api-shadow starlark files checker (#12147) #12129 (review), to ensure the consistency between api/bazel/*.bzl and generated_api_shadow/bazel/*.bzl Risk Level: Low Signed-off-by: Shikugawa --- tools/code_format/check_format.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index e75ebef1c074..b647b827fc3c 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -898,6 +898,21 @@ def checkOwners(dir_name, owned_directories, error_messages): error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) +def checkApiShadowStarlarkFiles(api_shadow_root, file_path, error_messages): + command = "diff -u " + command += file_path + " " + api_shadow_starlark_path = api_shadow_root + re.sub(r"\./api/", '', file_path) + command += api_shadow_starlark_path + + error_message = executeCommand(command, "invalid .bzl in generated_api_shadow", file_path) + if operation_type == "check": + error_messages += error_message + elif operation_type == "fix" and len(error_message) != 0: + shutil.copy(file_path, api_shadow_starlark_path) + + return error_messages + + def checkFormatVisitor(arg, dir_name, names): """Run checkFormat in parallel for the given files. @@ -914,7 +929,7 @@ def checkFormatVisitor(arg, dir_name, names): # python lists are passed as references, this is used to collect the list of # async results (futures) from running checkFormat and passing them back to # the caller. - pool, result_list, owned_directories, error_messages = arg + pool, result_list, owned_directories, api_shadow_root, error_messages = arg # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded # manner as it is a small and limited list. @@ -927,6 +942,10 @@ def checkFormatVisitor(arg, dir_name, names): checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: + if dir_name.startswith("./api") and isSkylarkFile(file_name): + result = pool.apply_async(checkApiShadowStarlarkFiles, + args=(api_shadow_root, dir_name + "/" + file_name, error_messages)) + result_list.append(result) result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,)) result_list.append(result) @@ -993,6 +1012,7 @@ def checkErrorMessages(error_messages): operation_type = args.operation_type target_path = args.target_path + api_shadow_root = args.api_shadow_prefix envoy_build_rule_check = not args.skip_envoy_build_rule_check namespace_check = args.namespace_check namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ @@ -1058,8 +1078,8 @@ def PooledCheckFormat(path_predicate): # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). for root, _, files in os.walk(target_path): - checkFormatVisitor((pool, results, owned_directories, error_messages), root, - [f for f in files if path_predicate(f)]) + checkFormatVisitor((pool, results, owned_directories, api_shadow_root, error_messages), + root, [f for f in files if path_predicate(f)]) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. From 57b4a1ec65c233362f31dfcd37584e93bde19b25 Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Fri, 17 Jul 2020 17:25:50 -0400 Subject: [PATCH 671/909] Remove unit test parameterization for the TapMatcherGenericBodyConfigTest suite (#12143) Remove unit test parameterization for the TapMatcherGenericBodyConfigTest suite. It does not actually use test parameters. Risk Level: Low, test only Testing: Unit Tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Yan Avlasov --- test/extensions/common/tap/tap_matcher_test.cc | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/test/extensions/common/tap/tap_matcher_test.cc b/test/extensions/common/tap/tap_matcher_test.cc index 2023f40d4f4d..9898c7b4ee89 100644 --- a/test/extensions/common/tap/tap_matcher_test.cc +++ b/test/extensions/common/tap/tap_matcher_test.cc @@ -31,12 +31,7 @@ class TapMatcherTest : public TapMatcherTestBase, public testing::Test { Http::TestResponseTrailerMapImpl response_trailers_; }; -// Base test class for config parameterized tests. -class TapMatcherGenericBodyConfigTest - : public TapMatcherTestBase, - public ::testing::TestWithParam< - std::tuple, size_t>>> { -}; +class TapMatcherGenericBodyConfigTest : public TapMatcherTestBase, public ::testing::Test {}; class TapMatcherGenericBodyTest : public TapMatcherTestBase, @@ -194,16 +189,6 @@ TEST_F(TapMatcherGenericBodyTest, WrongConfigTest) { ASSERT_ANY_THROW(TestUtility::loadFromYaml(matcher_yaml, config_)); } -INSTANTIATE_TEST_SUITE_P( - TapMatcherGenericBodyTestConfigSuite, TapMatcherGenericBodyConfigTest, - ::testing::Combine( - ::testing::Values(TapMatcherTestBase::Direction::Request, - TapMatcherTestBase::Direction::Response), - ::testing::Values( - // Should match - envoy is in the body - std::make_tuple(std::vector{" - string_match: \"envoy\""}, 5), - std::make_tuple(std::vector{" - string_match: \"envoy\""}, 5)))); - // Test different configurations against the body. // Parameterized test passes various configurations // which are appended to the yaml string. From bc02b7748a69fd3854f74b9e5bec236b5499f7fd Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Sat, 18 Jul 2020 06:28:10 +0900 Subject: [PATCH 672/909] format: use type alias (#12146) Commit Message: format: use type alias Additional Description: N/A Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Part of #11634 Signed-off-by: tomocy --- .../envoy/router/route_config_provider_manager.h | 4 ++++ source/common/router/rds_impl.cc | 2 +- source/common/router/rds_impl.h | 7 ++++++- source/common/router/scoped_rds.cc | 6 +++--- source/common/router/scoped_rds.h | 16 +++++++++++----- source/common/router/vhds.h | 2 +- source/common/secret/sds_api.h | 2 +- source/common/upstream/cds_api_impl.h | 2 +- source/common/upstream/eds.h | 6 +++++- .../network/http_connection_manager/config.cc | 4 ++-- .../network/http_connection_manager/config.h | 5 ++--- source/server/lds_api.h | 2 +- .../config/subscription_factory_impl_test.cc | 2 +- test/common/router/rds_impl_test.cc | 6 +++--- test/common/router/scoped_rds_test.cc | 4 ++-- test/common/upstream/eds_speed_test.cc | 2 +- test/common/upstream/eds_test.cc | 2 +- 17 files changed, 46 insertions(+), 28 deletions(-) diff --git a/include/envoy/router/route_config_provider_manager.h b/include/envoy/router/route_config_provider_manager.h index f266407a3651..67a184f2ba8e 100644 --- a/include/envoy/router/route_config_provider_manager.h +++ b/include/envoy/router/route_config_provider_manager.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/config/route/v3/route.pb.h" @@ -55,5 +56,8 @@ class RouteConfigProviderManager { ProtobufMessage::ValidationVisitor& validator) PURE; }; +using RouteConfigProviderManagerPtr = std::unique_ptr; +using RouteConfigProviderManagerSharedPtr = std::shared_ptr; + } // namespace Router } // namespace Envoy diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 953da8a9cdc7..414480e208b8 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -335,7 +335,7 @@ Router::RouteConfigProviderSharedPtr RouteConfigProviderManagerImpl::createRdsRo RdsRouteConfigSubscriptionSharedPtr subscription(new RdsRouteConfigSubscription( rds, manager_identifier, factory_context, stat_prefix, *this)); init_manager.add(subscription->parent_init_target_); - std::shared_ptr new_provider{ + RdsRouteConfigProviderImplSharedPtr new_provider{ new RdsRouteConfigProviderImpl(std::move(subscription), factory_context)}; dynamic_route_config_providers_.insert( {manager_identifier, std::weak_ptr(new_provider)}); diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index c547d1b5a8f7..5cf1a235c554 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -149,7 +150,7 @@ class RdsRouteConfigSubscription bool validateUpdateSize(int num_resources); - std::unique_ptr subscription_; + Envoy::Config::SubscriptionPtr subscription_; const std::string route_config_name_; Server::Configuration::ServerFactoryContext& factory_context_; @@ -227,6 +228,8 @@ class RdsRouteConfigProviderImpl : public RouteConfigProvider, friend class RouteConfigProviderManagerImpl; }; +using RdsRouteConfigProviderImplSharedPtr = std::shared_ptr; + class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, public Singleton::Instance { public: @@ -258,5 +261,7 @@ class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, friend class StaticRouteConfigProviderImpl; }; +using RouteConfigProviderManagerImplPtr = std::unique_ptr; + } // namespace Router } // namespace Envoy diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 171176aab037..e54ad6ae530a 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -190,10 +190,10 @@ bool ScopedRdsConfigSubscription::addOrUpdateScopes( return any_applied; } -std::list> +std::list ScopedRdsConfigSubscription::removeScopes( const Protobuf::RepeatedPtrField& scope_names, const std::string& version_info) { - std::list> + std::list to_be_removed_rds_providers; for (const auto& scope_name : scope_names) { auto iter = scoped_route_map_.find(scope_name); @@ -266,7 +266,7 @@ void ScopedRdsConfigSubscription::onConfigUpdate( std::vector exception_msgs; // Do not delete RDS config providers just yet, in case the to be deleted RDS subscriptions could // be reused by some to be added scopes. - std::list> + std::list to_be_removed_rds_providers = removeScopes(removed_resources, version_info); bool any_applied = addOrUpdateScopes(added_resources, diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index 3a9fa29e47a8..390f524be192 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/callback.h" @@ -123,12 +124,14 @@ class ScopedRdsConfigSubscription ScopedRdsConfigSubscription& parent_; std::string scope_name_; - std::shared_ptr route_provider_; + RdsRouteConfigProviderImplSharedPtr route_provider_; // This handle_ is owned by the route config provider's RDS subscription, when the helper // destructs, the handle is deleted as well. Common::CallbackHandle* rds_update_callback_handle_; }; + using RdsRouteConfigProviderHelperPtr = std::unique_ptr; + // Adds or updates scopes, create a new RDS provider for each resource, if an exception is thrown // during updating, the exception message is collected via the exception messages vector. // Returns true if any scope updated, false otherwise. @@ -138,7 +141,7 @@ class ScopedRdsConfigSubscription // Removes given scopes from the managed set of scopes. // Returns a list of to be removed helpers which is temporally held in the onConfigUpdate method, // to make sure new scopes sharing the same RDS source configs could reuse the subscriptions. - std::list> + std::list removeScopes(const Protobuf::RepeatedPtrField& scope_names, const std::string& version_info); @@ -169,14 +172,13 @@ class ScopedRdsConfigSubscription ScopedRouteMap scoped_route_map_; // RdsRouteConfigProvider by scope name. - absl::flat_hash_map> - route_provider_by_scope_; + absl::flat_hash_map route_provider_by_scope_; // A map of (hash, scope-name), used to detect the key conflict between scopes. absl::flat_hash_map scope_name_by_hash_; // For creating RDS subscriptions. Server::Configuration::ServerFactoryContext& factory_context_; const std::string name_; - std::unique_ptr subscription_; + Envoy::Config::SubscriptionPtr subscription_; const envoy::extensions::filters::network::http_connection_manager::v3::ScopedRoutes:: ScopeKeyBuilder scope_key_builder_; Stats::ScopePtr scope_; @@ -240,6 +242,10 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa RouteConfigProviderManager& route_config_provider_manager_; }; +using ScopedRoutesConfigProviderManagerPtr = std::unique_ptr; +using ScopedRoutesConfigProviderManagerSharedPtr = + std::shared_ptr; + // The optional argument passed to the ConfigProviderManager::create*() functions. class ScopedRoutesConfigProviderManagerOptArg : public Envoy::Config::ConfigProviderManager::OptionalArg { diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index 956db775c64a..dc8bd87ded74 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -72,7 +72,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase subscription_; + Envoy::Config::SubscriptionPtr subscription_; Init::TargetImpl init_target_; std::unordered_set& route_config_providers_; }; diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index 04afae8f60ad..e06467104a9d 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -75,7 +75,7 @@ class SdsApi : public Envoy::Config::SubscriptionBase< Stats::Store& stats_; const envoy::config::core::v3::ConfigSource sds_config_; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; const std::string sds_config_name_; uint64_t secret_hash_; diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index 71eb8b351652..970c12a4ba16 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -51,7 +51,7 @@ class CdsApiImpl : public CdsApi, void runInitializeCallbackIfAny(); ClusterManager& cm_; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index a18ee5696630..b1eab5a10972 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -74,7 +76,7 @@ class EdsClusterImpl const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment_; }; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; @@ -83,6 +85,8 @@ class EdsClusterImpl InitializePhase initialize_phase_; }; +using EdsClusterImplSharedPtr = std::shared_ptr; + class EdsClusterFactory : public ClusterFactoryImplBase { public: EdsClusterFactory() : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Eds) {} diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 190db7f475b4..5c830c1ecca4 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -89,13 +89,13 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont context.threadLocal()); }); - std::shared_ptr route_config_provider_manager = + Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(route_config_provider_manager), [&context] { return std::make_shared(context.admin()); }); - std::shared_ptr scoped_routes_config_provider_manager = + Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(scoped_routes_config_provider_manager), [&context, route_config_provider_manager] { diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index b522fad49b66..d2fef63dedb1 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -264,9 +264,8 @@ class Utility { public: struct Singletons { std::shared_ptr date_provider_; - std::shared_ptr route_config_provider_manager_; - std::shared_ptr - scoped_routes_config_provider_manager_; + Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager_; + Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager_; Tracing::HttpTracerManagerSharedPtr http_tracer_manager_; }; diff --git a/source/server/lds_api.h b/source/server/lds_api.h index 8f0954c93d5c..0ace5e7b937c 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -43,7 +43,7 @@ class LdsApiImpl : public LdsApi, void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - std::unique_ptr subscription_; + Config::SubscriptionPtr subscription_; std::string system_version_info_; ListenerManager& listener_manager_; Stats::ScopePtr scope_; diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 185f7bb4f13a..5d975a0f1ba3 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -41,7 +41,7 @@ class SubscriptionFactoryTest : public testing::Test { subscription_factory_(local_info_, dispatcher_, cm_, random_, validation_visitor_, *api_, runtime_) {} - std::unique_ptr + SubscriptionPtr subscriptionFromConfigSource(const envoy::config::core::v3::ConfigSource& config) { return subscription_factory_.subscriptionFromConfigSource( config, Config::TypeUrl::get().ClusterLoadAssignment, stats_store_, callbacks_, diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 2116ae2a90b9..25a20e50935e 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -120,7 +120,7 @@ stat_prefix: foo } NiceMock server_; - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; RouteConfigProviderSharedPtr rds_; }; @@ -290,7 +290,7 @@ class RdsRouteConfigSubscriptionTest : public RdsTestBase { server_factory_context_.thread_local_.shutdownThread(); } - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; }; // Verifies that maybeCreateInitManager() creates a noop init manager if the main init manager is in @@ -353,7 +353,7 @@ class RouteConfigProviderManagerImplTest : public RdsTestBase { } envoy::extensions::filters::network::http_connection_manager::v3::Rds rds_; - std::unique_ptr route_config_provider_manager_; + RouteConfigProviderManagerImplPtr route_config_provider_manager_; RouteConfigProviderSharedPtr provider_; }; diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 456b95df3b4a..b3383ec0ad9d 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -101,8 +101,8 @@ class ScopedRoutesTestBase : public testing::Test { NiceMock validation_context_; // server_factory_context_ is used by rds NiceMock server_factory_context_; - std::unique_ptr route_config_provider_manager_; - std::unique_ptr config_provider_manager_; + RouteConfigProviderManagerPtr route_config_provider_manager_; + ScopedRoutesConfigProviderManagerPtr config_provider_manager_; Event::SimulatedTimeSystem time_system_; }; diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index 6f82390c9514..4ed25db80da9 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -148,7 +148,7 @@ class EdsSpeedTest { envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; NiceMock dispatcher_; - std::shared_ptr cluster_; + EdsClusterImplSharedPtr cluster_; Config::SubscriptionCallbacks* eds_callbacks_{}; Config::OpaqueResourceDecoderImpl resource_decoder_{validation_visitor_, "cluster_name"}; diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index 1370b41b180d..144d29ad78a0 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -120,7 +120,7 @@ class EdsTest : public testing::Test { envoy::config::cluster::v3::Cluster eds_cluster_; NiceMock cm_; NiceMock dispatcher_; - std::shared_ptr cluster_; + EdsClusterImplSharedPtr cluster_; Config::SubscriptionCallbacks* eds_callbacks_{}; NiceMock random_; NiceMock runtime_; From de4c2244ed85f5aa2845ebeb8d259b9006635161 Mon Sep 17 00:00:00 2001 From: Benjamin Peterson Date: Fri, 17 Jul 2020 16:07:19 -0700 Subject: [PATCH 673/909] docs: fix link. (#12136) Signed-off-by: Benjamin Peterson --- .../http/http_filters/grpc_json_transcoder_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index 1eb2fe082718..a8c796b5bcb4 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -85,7 +85,7 @@ as its output message type. The implementation needs to set Multiple `google.api.HttpBody `_ can be send by the gRPC server in the server streaming case. In this case, HTTP response header `Content-Type` will use the `content-type` from the first -`google.api.HttpBody `. +`google.api.HttpBody `_. Headers -------- From e6601239f64d2ee79fb501aa6eb7de3deee2cae7 Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Sat, 18 Jul 2020 08:07:51 +0900 Subject: [PATCH 674/909] format: use type alias (#12104) Signed-off-by: tomocy --- include/envoy/filesystem/filesystem.h | 2 ++ include/envoy/thread/thread.h | 2 ++ source/exe/platform_impl.h | 4 ++-- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/envoy/filesystem/filesystem.h b/include/envoy/filesystem/filesystem.h index 033d418683f2..09764415b6a9 100644 --- a/include/envoy/filesystem/filesystem.h +++ b/include/envoy/filesystem/filesystem.h @@ -130,6 +130,8 @@ class Instance { virtual bool illegalPath(const std::string& path) PURE; }; +using InstancePtr = std::unique_ptr; + enum class FileType { Regular, Directory, Other }; struct DirectoryEntry { diff --git a/include/envoy/thread/thread.h b/include/envoy/thread/thread.h index 8633c03e1ebe..bcc6864d1466 100644 --- a/include/envoy/thread/thread.h +++ b/include/envoy/thread/thread.h @@ -81,6 +81,8 @@ class ThreadFactory { virtual ThreadId currentThreadId() PURE; }; +using ThreadFactoryPtr = std::unique_ptr; + /** * Like the C++11 "basic lockable concept" but a pure virtual interface vs. a template, and * with thread annotations. diff --git a/source/exe/platform_impl.h b/source/exe/platform_impl.h index 4c05dff22584..6e1ec22ce872 100644 --- a/source/exe/platform_impl.h +++ b/source/exe/platform_impl.h @@ -13,8 +13,8 @@ class PlatformImpl { Filesystem::Instance& fileSystem() { return *file_system_; } private: - std::unique_ptr thread_factory_; - std::unique_ptr file_system_; + Thread::ThreadFactoryPtr thread_factory_; + Filesystem::InstancePtr file_system_; }; } // namespace Envoy From 3890abeba0bad5a3d2bbaac9af1e6ac67ad66605 Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 17 Jul 2020 19:24:39 -0400 Subject: [PATCH 675/909] [http] introduce legacy codec with no diff checker (#12149) This just introduces plumbing and clones codecs for a legacy versions. There is no changes to the codecs at this point (the files are just duplicated and the namespace Legacy is used). Users can set which codecs are used at runtime via the feature new_codec_behavior. This will be the case for O(4-6 weeks) while the change to new no-exception codecs are pushed (see issue linked) to the new codecs. Signed-off-by: Asra Ali --- bazel/BUILD | 5 + bazel/envoy_build_system.bzl | 2 + bazel/envoy_select.bzl | 7 + ci/do_ci.sh | 5 + docs/root/version_history/current.rst | 1 + source/common/http/BUILD | 7 + source/common/http/codec_client.cc | 32 +- source/common/http/conn_manager_utility.cc | 26 +- source/common/http/http1/BUILD | 60 +- source/common/http/http1/codec_impl_legacy.cc | 1240 ++++++++++++++ source/common/http/http1/codec_impl_legacy.h | 607 +++++++ source/common/http/http2/BUILD | 74 +- source/common/http/http2/codec_impl.cc | 10 +- source/common/http/http2/codec_impl.h | 11 +- source/common/http/http2/codec_impl_legacy.cc | 1470 +++++++++++++++++ source/common/http/http2/codec_impl_legacy.h | 602 +++++++ source/common/runtime/runtime_features.cc | 1 + .../network/http_connection_manager/BUILD | 2 + .../network/http_connection_manager/config.cc | 37 +- test/common/http/codec_impl_fuzz_test.cc | 24 +- test/common/http/http1/BUILD | 1 + test/common/http/http1/codec_impl_test.cc | 360 ++-- test/common/http/http2/BUILD | 62 +- test/common/http/http2/codec_impl_test.cc | 152 +- test/common/http/http2/codec_impl_test_util.h | 109 +- test/common/http/http2/frame_replay_test.cc | 18 +- .../http/http2/request_header_fuzz_test.cc | 2 +- .../http/http2/response_header_fuzz_test.cc | 2 +- test/config/utility.cc | 4 + test/config/utility.h | 3 + .../http_connection_manager/config_test.cc | 60 + test/integration/BUILD | 7 + .../api_version_integration_test.cc | 8 +- test/integration/fake_upstream.cc | 38 +- test/integration/integration.cc | 5 + test/integration/integration.h | 1 + 36 files changed, 4697 insertions(+), 358 deletions(-) create mode 100644 source/common/http/http1/codec_impl_legacy.cc create mode 100644 source/common/http/http1/codec_impl_legacy.h create mode 100644 source/common/http/http2/codec_impl_legacy.cc create mode 100644 source/common/http/http2/codec_impl_legacy.h diff --git a/bazel/BUILD b/bazel/BUILD index 982d3fa3ac70..97d9d79fb6be 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -199,6 +199,11 @@ config_setting( values = {"define": "path_normalization_by_default=true"}, ) +config_setting( + name = "enable_legacy_codecs_in_integration_tests", + values = {"define": "use_legacy_codecs_in_integration_tests=true"}, +) + cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 70ef3df4f1d2..0f062cbfe8d8 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -18,6 +18,7 @@ load( _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", + _envoy_select_legacy_codecs_in_integration_tests = "envoy_select_legacy_codecs_in_integration_tests", ) load( ":envoy_test.bzl", @@ -168,6 +169,7 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart +envoy_select_legacy_codecs_in_integration_tests = _envoy_select_legacy_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index f2167f29bec4..ba7704ceb02f 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -31,3 +31,10 @@ def envoy_select_hot_restart(xs, repository = ""): repository + "//bazel:disable_hot_restart_or_apple": [], "//conditions:default": xs, }) + +# Select the given values if use legacy codecs in test is on in the current build. +def envoy_select_legacy_codecs_in_integration_tests(xs, repository = ""): + return select({ + repository + "//bazel:enable_legacy_codecs_in_integration_tests": xs, + "//conditions:default": [], + }) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 384433e2ccf1..a2f4959adf71 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -220,6 +220,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ + --define use_legacy_codecs_in_integration_tests=true \ --define --cxxopt=-std=c++14 \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" @@ -237,6 +238,10 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then echo "Building and testing ${TEST_TARGETS}" bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in + # integration tests with asan. + bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only + # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9229315345d0..be4d1f06972f 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -38,6 +38,7 @@ New Features * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 5a07a56b9f00..041c3508d650 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -57,16 +57,21 @@ envoy_cc_library( "//include/envoy/http:codec_interface", "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", + "//include/envoy/runtime:runtime_interface", "//source/common/common:assert_lib", "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", "//source/common/network:filter_lib", + "//source/common/runtime:runtime_features_lib", + "//source/common/runtime:runtime_lib", ], ) @@ -207,7 +212,9 @@ envoy_cc_library( "//source/common/common:scope_tracker", "//source/common/common:utility_lib", "//source/common/config:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/http/http3:quic_codec_factory_lib", "//source/common/http/http3:well_known_names", diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 557b5757414a..e3fbc23ef921 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -9,11 +9,15 @@ #include "common/config/utility.h" #include "common/http/exception.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/status.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" +#include "common/runtime/runtime_impl.h" namespace Envoy { namespace Http { @@ -150,16 +154,29 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne switch (type) { case Type::HTTP1: { - codec_ = std::make_unique( - *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), - host->cluster().maxResponseHeadersCount()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } else { + codec_ = std::make_unique( + *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), + host->cluster().maxResponseHeadersCount()); + } break; } case Type::HTTP2: { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), - Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), - Http2::ProdNghttp2SessionFactory::get()); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } else { + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), host->cluster().http2Options(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), + Http2::ProdNghttp2SessionFactory::get()); + } break; } case Type::HTTP3: { @@ -167,6 +184,7 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne Config::Utility::getAndCheckFactoryByName( Http::QuicCodecNames::get().Quiche) .createQuicClientConnection(*connection_, *this)); + break; } } } diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 65b5af861cc1..c8ce01993cfa 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -11,7 +11,9 @@ #include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/path_utility.h" #include "common/http/utility.h" #include "common/network/utility.h" @@ -51,14 +53,26 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( headers_with_underscores_action) { if (determineNextProtocol(connection, data) == Utility::AlpnNames::get().Http2) { Http2::CodecStats& stats = Http2::CodecStats::atomicGet(http2_codec_stats, scope); - return std::make_unique( - connection, callbacks, stats, http2_options, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, callbacks, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } else { Http1::CodecStats& stats = Http1::CodecStats::atomicGet(http1_codec_stats, scope); - return std::make_unique( - connection, stats, callbacks, http1_settings, max_request_headers_kb, - max_request_headers_count, headers_with_underscores_action); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } else { + return std::make_unique( + connection, stats, callbacks, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); + } } } diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 278e9adaaae5..9451c4e29ae3 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -24,36 +24,46 @@ envoy_cc_library( ], ) +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":header_formatter_lib", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:statusor_lib", + "//source/common/common:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:url_utility_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], external_deps = ["http_parser"], - deps = [ - ":codec_stats_lib", - ":header_formatter_lib", - "//include/envoy/buffer:buffer_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:statusor_lib", - "//source/common/common:utility_lib", - "//source/common/grpc:common_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:url_utility_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], + deps = CODEC_LIB_DEPS, +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = ["codec_impl_legacy.h"], + external_deps = ["http_parser"], + deps = CODEC_LIB_DEPS, ) envoy_cc_library( diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc new file mode 100644 index 000000000000..9fa3cd43eb04 --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -0,0 +1,1240 @@ +#include "common/http/http1/codec_impl_legacy.h" + +#include +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/http/codec.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/enum_to_int.h" +#include "common/common/utility.h" +#include "common/grpc/common.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/url_utility.h" +#include "common/http/utility.h" +#include "common/runtime/runtime_features.h" + +#include "absl/container/fixed_array.h" +#include "absl/strings/ascii.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { +namespace { + +struct Http1ResponseCodeDetailValues { + const absl::string_view TooManyHeaders = "http1.too_many_headers"; + const absl::string_view HeadersTooLarge = "http1.headers_too_large"; + const absl::string_view HttpCodecError = "http1.codec_error"; + const absl::string_view InvalidCharacters = "http1.invalid_characters"; + const absl::string_view ConnectionHeaderSanitization = "http1.connection_header_rejected"; + const absl::string_view InvalidUrl = "http1.invalid_url"; + const absl::string_view InvalidTransferEncoding = "http1.invalid_transfer_encoding"; + const absl::string_view BodyDisallowed = "http1.body_disallowed"; + const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; + const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; +}; + +struct Http1HeaderTypesValues { + const absl::string_view Headers = "headers"; + const absl::string_view Trailers = "trailers"; +}; + +using Http1ResponseCodeDetails = ConstSingleton; +using Http1HeaderTypes = ConstSingleton; +using Http::Http1::CodecStats; +using Http::Http1::HeaderKeyFormatter; +using Http::Http1::HeaderKeyFormatterPtr; +using Http::Http1::ProperCaseHeaderKeyFormatter; + +const StringUtil::CaseUnorderedSet& caseUnorderdSetContainingUpgradeAndHttp2Settings() { + CONSTRUCT_ON_FIRST_USE(StringUtil::CaseUnorderedSet, + Http::Headers::get().ConnectionValues.Upgrade, + Http::Headers::get().ConnectionValues.Http2Settings); +} + +HeaderKeyFormatterPtr formatter(const Http::Http1Settings& settings) { + if (settings.header_key_format_ == Http1Settings::HeaderKeyFormat::ProperCase) { + return std::make_unique(); + } + + return nullptr; +} + +} // namespace + +const std::string StreamEncoderImpl::CRLF = "\r\n"; +// Last chunk as defined here https://tools.ietf.org/html/rfc7230#section-4.1 +const std::string StreamEncoderImpl::LAST_CHUNK = "0\r\n"; + +StreamEncoderImpl::StreamEncoderImpl(ConnectionImpl& connection, + HeaderKeyFormatter* header_key_formatter) + : connection_(connection), disable_chunk_encoding_(false), chunk_encoding_(true), + is_response_to_head_request_(false), is_response_to_connect_request_(false), + header_key_formatter_(header_key_formatter) { + if (connection_.connection().aboveHighWatermark()) { + runHighWatermarkCallbacks(); + } +} + +void StreamEncoderImpl::encodeHeader(const char* key, uint32_t key_size, const char* value, + uint32_t value_size) { + + ASSERT(key_size > 0); + + connection_.copyToBuffer(key, key_size); + connection_.addCharToBuffer(':'); + connection_.addCharToBuffer(' '); + connection_.copyToBuffer(value, value_size); + connection_.addToBuffer(CRLF); +} +void StreamEncoderImpl::encodeHeader(absl::string_view key, absl::string_view value) { + this->encodeHeader(key.data(), key.size(), value.data(), value.size()); +} + +void StreamEncoderImpl::encodeFormattedHeader(absl::string_view key, absl::string_view value) { + if (header_key_formatter_ != nullptr) { + encodeHeader(header_key_formatter_->format(key), value); + } else { + encodeHeader(key, value); + } +} + +void ResponseEncoderImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void StreamEncoderImpl::encodeHeadersBase(const RequestOrResponseHeaderMap& headers, + absl::optional status, bool end_stream) { + bool saw_content_length = false; + headers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + absl::string_view key_to_use = header.key().getStringView(); + uint32_t key_size_to_use = header.key().size(); + // Translate :authority -> host so that upper layers do not need to deal with this. + if (key_size_to_use > 1 && key_to_use[0] == ':' && key_to_use[1] == 'a') { + key_to_use = absl::string_view(Headers::get().HostLegacy.get()); + key_size_to_use = Headers::get().HostLegacy.get().size(); + } + + // Skip all headers starting with ':' that make it here. + if (key_to_use[0] == ':') { + return HeaderMap::Iterate::Continue; + } + + encodeFormattedHeader(key_to_use, header.value().getStringView()); + + return HeaderMap::Iterate::Continue; + }); + + if (headers.ContentLength()) { + saw_content_length = true; + } + + ASSERT(!headers.TransferEncoding()); + + // Assume we are chunk encoding unless we are passed a content length or this is a header only + // response. Upper layers generally should strip transfer-encoding since it only applies to + // HTTP/1.1. The codec will infer it based on the type of response. + // for streaming (e.g. SSE stream sent to hystrix dashboard), we do not want + // chunk transfer encoding but we don't have a content-length so disable_chunk_encoding_ is + // consulted before enabling chunk encoding. + // + // Note that for HEAD requests Envoy does best-effort guessing when there is no + // content-length. If a client makes a HEAD request for an upstream resource + // with no bytes but the upstream response doesn't include "Content-length: 0", + // Envoy will incorrectly assume a subsequent response to GET will be chunk encoded. + if (saw_content_length || disable_chunk_encoding_) { + chunk_encoding_ = false; + } else { + if (status && *status == 100) { + // Make sure we don't serialize chunk information with 100-Continue headers. + chunk_encoding_ = false; + } else if (end_stream && !is_response_to_head_request_) { + // If this is a headers-only stream, append an explicit "Content-Length: 0" unless it's a + // response to a HEAD request. + // For 204s and 1xx where content length is disallowed, don't append the content length but + // also don't chunk encode. + if (!status || (*status >= 200 && *status != 204)) { + encodeFormattedHeader(Headers::get().ContentLength.get(), "0"); + } + chunk_encoding_ = false; + } else if (connection_.protocol() == Protocol::Http10) { + chunk_encoding_ = false; + } else if (status && (*status < 200 || *status == 204) && + connection_.strict1xxAnd204Headers()) { + // TODO(zuercher): when the "envoy.reloadable_features.strict_1xx_and_204_response_headers" + // feature flag is removed, this block can be coalesced with the 100 Continue logic above. + + // For 1xx and 204 responses, do not send the chunked encoding header or enable chunked + // encoding: https://tools.ietf.org/html/rfc7230#section-3.3.1 + chunk_encoding_ = false; + } else { + // For responses to connect requests, do not send the chunked encoding header: + // https://tools.ietf.org/html/rfc7231#section-4.3.6. + if (!is_response_to_connect_request_) { + encodeFormattedHeader(Headers::get().TransferEncoding.get(), + Headers::get().TransferEncodingValues.Chunked); + } + // We do not apply chunk encoding for HTTP upgrades, including CONNECT style upgrades. + // If there is a body in a response on the upgrade path, the chunks will be + // passed through via maybeDirectDispatch so we need to avoid appending + // extra chunk boundaries. + // + // When sending a response to a HEAD request Envoy may send an informational + // "Transfer-Encoding: chunked" header, but should not send a chunk encoded body. + chunk_encoding_ = !Utility::isUpgrade(headers) && !is_response_to_head_request_ && + !is_response_to_connect_request_; + } + } + + connection_.addToBuffer(CRLF); + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeData(Buffer::Instance& data, bool end_stream) { + // end_stream may be indicated with a zero length data buffer. If that is the case, so not + // actually write the zero length buffer out. + if (data.length() > 0) { + if (chunk_encoding_) { + connection_.buffer().add(absl::StrCat(absl::Hex(data.length()), CRLF)); + } + + connection_.buffer().move(data); + + if (chunk_encoding_) { + connection_.buffer().add(CRLF); + } + } + + if (end_stream) { + endEncode(); + } else { + connection_.flushOutput(); + } +} + +void StreamEncoderImpl::encodeTrailersBase(const HeaderMap& trailers) { + if (!connection_.enableTrailers()) { + return endEncode(); + } + // Trailers only matter if it is a chunk transfer encoding + // https://tools.ietf.org/html/rfc7230#section-4.4 + if (chunk_encoding_) { + // Finalize the body + connection_.buffer().add(LAST_CHUNK); + + trailers.iterate([this](const HeaderEntry& header) -> HeaderMap::Iterate { + encodeFormattedHeader(header.key().getStringView(), header.value().getStringView()); + return HeaderMap::Iterate::Continue; + }); + + connection_.flushOutput(); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(); + connection_.onEncodeComplete(); +} + +void StreamEncoderImpl::encodeMetadata(const MetadataMapVector&) { + connection_.stats().metadata_not_supported_error_.inc(); +} + +void StreamEncoderImpl::endEncode() { + if (chunk_encoding_) { + connection_.buffer().add(LAST_CHUNK); + connection_.buffer().add(CRLF); + } + + connection_.flushOutput(true); + connection_.onEncodeComplete(); +} + +void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) { + if (!flood_protection_) { + return; + } + // It's messy and complicated to try to tag the final write of an HTTP response for response + // tracking for flood protection. Instead, write an empty buffer fragment after the response, + // to allow for tracking. + // When the response is written out, the fragment will be deleted and the counter will be updated + // by ServerConnectionImpl::releaseOutboundResponse() + auto fragment = + Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), response_buffer_releasor_); + output_buffer.addBufferFragment(*fragment.release()); + ASSERT(outbound_responses_ < max_outbound_responses_); + outbound_responses_++; +} + +void ServerConnectionImpl::doFloodProtectionChecks() const { + if (!flood_protection_) { + return; + } + // Before processing another request, make sure that we are below the response flood protection + // threshold. + if (outbound_responses_ >= max_outbound_responses_) { + ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", + connection_); + stats_.response_flood_.inc(); + throw FrameFloodException("Too many responses queued."); + } +} + +void ConnectionImpl::flushOutput(bool end_encode) { + if (end_encode) { + // If this is an HTTP response in ServerConnectionImpl, track outbound responses for flood + // protection + maybeAddSentinelBufferFragment(output_buffer_); + } + connection().write(output_buffer_, false); + ASSERT(0UL == output_buffer_.length()); +} + +void ConnectionImpl::addToBuffer(absl::string_view data) { output_buffer_.add(data); } + +void ConnectionImpl::addCharToBuffer(char c) { output_buffer_.add(&c, 1); } + +void ConnectionImpl::addIntToBuffer(uint64_t i) { output_buffer_.add(absl::StrCat(i)); } + +void ConnectionImpl::copyToBuffer(const char* data, uint64_t length) { + output_buffer_.add(data, length); +} + +void StreamEncoderImpl::resetStream(StreamResetReason reason) { + connection_.onResetStreamBase(reason); +} + +void StreamEncoderImpl::readDisable(bool disable) { + if (disable) { + ++read_disable_calls_; + } else { + ASSERT(read_disable_calls_ != 0); + if (read_disable_calls_ != 0) { + --read_disable_calls_; + } + } + connection_.readDisable(disable); +} + +uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } + +const Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() { + return connection_.connection().localAddress(); +} + +static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; +static const char HTTP_10_RESPONSE_PREFIX[] = "HTTP/1.0 "; + +void ResponseEncoderImpl::encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) { + started_response_ = true; + + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + uint64_t numeric_status = Utility::getResponseStatus(headers); + + if (connection_.protocol() == Protocol::Http10 && connection_.supportsHttp10()) { + connection_.copyToBuffer(HTTP_10_RESPONSE_PREFIX, sizeof(HTTP_10_RESPONSE_PREFIX) - 1); + } else { + connection_.copyToBuffer(RESPONSE_PREFIX, sizeof(RESPONSE_PREFIX) - 1); + } + connection_.addIntToBuffer(numeric_status); + connection_.addCharToBuffer(' '); + + const char* status_string = CodeUtility::toString(static_cast(numeric_status)); + uint32_t status_string_len = strlen(status_string); + connection_.copyToBuffer(status_string, status_string_len); + + connection_.addCharToBuffer('\r'); + connection_.addCharToBuffer('\n'); + + if (numeric_status >= 300) { + // Don't do special CONNECT logic if the CONNECT was rejected. + is_response_to_connect_request_ = false; + } + + encodeHeadersBase(headers, absl::make_optional(numeric_status), end_stream); +} + +static const char REQUEST_POSTFIX[] = " HTTP/1.1\r\n"; + +void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end_stream) { + const HeaderEntry* method = headers.Method(); + const HeaderEntry* path = headers.Path(); + const HeaderEntry* host = headers.Host(); + bool is_connect = HeaderUtility::isConnect(headers); + + if (!method || (!path && !is_connect)) { + // TODO(#10878): This exception does not occur during dispatch and would not be triggered under + // normal circumstances since inputs would fail parsing at ingress. Replace with proper error + // handling when exceptions are removed. Include missing host header for CONNECT. + throw CodecClientException(":method and :path must be specified"); + } + if (method->value() == Headers::get().MethodValues.Head) { + head_request_ = true; + } else if (method->value() == Headers::get().MethodValues.Connect) { + disableChunkEncoding(); + connect_request_ = true; + } + if (Utility::isUpgrade(headers)) { + upgrade_request_ = true; + } + + connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); + connection_.addCharToBuffer(' '); + if (is_connect) { + connection_.copyToBuffer(host->value().getStringView().data(), host->value().size()); + } else { + connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); + } + connection_.copyToBuffer(REQUEST_POSTFIX, sizeof(REQUEST_POSTFIX) - 1); + + encodeHeadersBase(headers, absl::nullopt, end_stream); +} + +http_parser_settings ConnectionImpl::settings_{ + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageBeginBase(); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onUrl(at, length); + return 0; + }, + nullptr, // on_status + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderField(at, length); + return 0; + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->onHeaderValue(at, length); + return 0; + }, + [](http_parser* parser) -> int { + return static_cast(parser->data)->onHeadersCompleteBase(); + }, + [](http_parser* parser, const char* at, size_t length) -> int { + static_cast(parser->data)->bufferBody(at, length); + return 0; + }, + [](http_parser* parser) -> int { + static_cast(parser->data)->onMessageCompleteBase(); + return 0; + }, + [](http_parser* parser) -> int { + // A 0-byte chunk header is used to signal the end of the chunked body. + // When this function is called, http-parser holds the size of the chunk in + // parser->content_length. See + // https://github.com/nodejs/http-parser/blob/v2.9.3/http_parser.h#L336 + const bool is_final_chunk = (parser->content_length == 0); + static_cast(parser->data)->onChunkHeader(is_final_chunk); + return 0; + }, + nullptr // on_chunk_complete +}; + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, + const uint32_t max_headers_count, + HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers) + : connection_(connection), stats_(stats), + header_key_formatter_(std::move(header_key_formatter)), processing_trailers_(false), + handling_upgrade_(false), reset_stream_called_(false), deferred_end_stream_headers_(false), + connection_header_sanitization_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.connection_header_sanitization")), + enable_trailers_(enable_trailers), + strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.strict_1xx_and_204_response_headers")), + output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, + [&]() -> void { this->onAboveHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), + max_headers_kb_(max_headers_kb), max_headers_count_(max_headers_count) { + output_buffer_.setWatermarks(connection.bufferLimit()); + http_parser_init(&parser_, type); + parser_.data = this; +} + +void ConnectionImpl::completeLastHeader() { + ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, + current_header_field_.getStringView(), current_header_value_.getStringView()); + + checkHeaderNameForUnderscores(); + auto& headers_or_trailers = headersOrTrailers(); + if (!current_header_field_.empty()) { + current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); + // Strip trailing whitespace of the current header value if any. Leading whitespace was trimmed + // in ConnectionImpl::onHeaderValue. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + current_header_value_.rtrim(); + headers_or_trailers.addViaMove(std::move(current_header_field_), + std::move(current_header_value_)); + } + + // Check if the number of headers exceeds the limit. + if (headers_or_trailers.size() > max_headers_count_) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } + + header_parsing_state_ = HeaderParsingState::Field; + ASSERT(current_header_field_.empty()); + ASSERT(current_header_value_.empty()); +} + +uint32_t ConnectionImpl::getHeadersSize() { + return current_header_field_.size() + current_header_value_.size() + + headersOrTrailers().byteSize(); +} + +void ConnectionImpl::checkMaxHeadersSize() { + const uint32_t total = getHeadersSize(); + if (total > (max_headers_kb_ * 1024)) { + const absl::string_view header_type = + processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); + throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + } +} + +bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { + if (!handling_upgrade_) { + // Only direct dispatch for Upgrade requests. + return false; + } + + ENVOY_CONN_LOG(trace, "direct-dispatched {} bytes", connection_, data.length()); + onBody(data); + data.drain(data.length()); + return true; +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + ASSERT(buffered_body_.length() == 0); + + if (maybeDirectDispatch(data)) { + return Http::okStatus(); + } + + // Always unpause before dispatch. + http_parser_pause(&parser_, 0); + + ssize_t total_parsed = 0; + if (data.length() > 0) { + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { + // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at + // this point. + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + break; + } + } + dispatchBufferedBody(); + } else { + dispatchSlice(nullptr, 0); + } + ASSERT(buffered_body_.length() == 0); + + ENVOY_CONN_LOG(trace, "parsed {} bytes", connection_, total_parsed); + data.drain(total_parsed); + + // If an upgrade has been handled and there is body data or early upgrade + // payload to send on, send it on. + maybeDirectDispatch(data); + return Http::okStatus(); +} + +size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); + throw CodecProtocolException("http/1.1 protocol error: " + + std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + } + + return rc; +} + +void ConnectionImpl::onHeaderField(const char* data, size_t length) { + // We previously already finished up the headers, these headers are + // now trailers. + if (header_parsing_state_ == HeaderParsingState::Done) { + if (!enable_trailers_) { + // Ignore trailers. + return; + } + processing_trailers_ = true; + header_parsing_state_ = HeaderParsingState::Field; + allocTrailers(); + } + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + current_header_field_.append(data, length); + + checkMaxHeadersSize(); +} + +void ConnectionImpl::onHeaderValue(const char* data, size_t length) { + if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { + // Ignore trailers. + return; + } + + absl::string_view header_value{data, length}; + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); + } + + header_parsing_state_ = HeaderParsingState::Value; + if (current_header_value_.empty()) { + // Strip leading whitespace if the current header value input contains the first bytes of the + // encoded header value. Trailing whitespace is stripped once the full header value is known in + // ConnectionImpl::completeLastHeader. http_parser does not strip leading or trailing whitespace + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 . + header_value = StringUtil::ltrim(header_value); + } + current_header_value_.append(header_value.data(), header_value.length()); + + checkMaxHeadersSize(); +} + +int ConnectionImpl::onHeadersCompleteBase() { + ASSERT(!processing_trailers_); + ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); + completeLastHeader(); + + if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { + // This is not necessarily true, but it's good enough since higher layers only care if this is + // HTTP/1.1 or not. + protocol_ = Protocol::Http10; + } + RequestOrResponseHeaderMap& request_or_response_headers = requestOrResponseHeaders(); + if (Utility::isUpgrade(request_or_response_headers) && upgradeAllowed()) { + // Ignore h2c upgrade requests until we support them. + // See https://github.com/envoyproxy/envoy/issues/7161 for details. + if (absl::EqualsIgnoreCase(request_or_response_headers.getUpgradeValue(), + Http::Headers::get().UpgradeValues.H2c)) { + ENVOY_CONN_LOG(trace, "removing unsupported h2c upgrade headers.", connection_); + request_or_response_headers.removeUpgrade(); + if (request_or_response_headers.Connection()) { + const auto& tokens_to_remove = caseUnorderdSetContainingUpgradeAndHttp2Settings(); + std::string new_value = StringUtil::removeTokens( + request_or_response_headers.getConnectionValue(), ",", tokens_to_remove, ","); + if (new_value.empty()) { + request_or_response_headers.removeConnection(); + } else { + request_or_response_headers.setConnection(new_value); + } + } + request_or_response_headers.remove(Headers::get().Http2Settings); + } else { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode.", connection_); + handling_upgrade_ = true; + } + } + if (parser_.method == HTTP_CONNECT) { + if (request_or_response_headers.ContentLength()) { + if (request_or_response_headers.getContentLengthValue() == "0") { + request_or_response_headers.removeContentLength(); + } else { + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); + throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); + } + } + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); + handling_upgrade_ = true; + } + + // Per https://tools.ietf.org/html/rfc7230#section-3.3.1 Envoy should reject + // transfer-codings it does not understand. + // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a + // CONNECT request has no defined semantics, and may be rejected. + if (request_or_response_headers.TransferEncoding()) { + const absl::string_view encoding = request_or_response_headers.getTransferEncodingValue(); + if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || + parser_.method == HTTP_CONNECT) { + error_code_ = Http::Code::NotImplemented; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + int rc = onHeadersComplete(); + header_parsing_state_ = HeaderParsingState::Done; + + // Returning 2 informs http_parser to not expect a body or further data on this connection. + return handling_upgrade_ ? 2 : rc; +} + +void ConnectionImpl::bufferBody(const char* data, size_t length) { + buffered_body_.add(data, length); +} + +void ConnectionImpl::dispatchBufferedBody() { + ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + if (buffered_body_.length() > 0) { + onBody(buffered_body_); + buffered_body_.drain(buffered_body_.length()); + } +} + +void ConnectionImpl::onChunkHeader(bool is_final_chunk) { + if (is_final_chunk) { + // Dispatch body before parsing trailers, so body ends up dispatched even if an error is found + // while processing trailers. + dispatchBufferedBody(); + } +} + +void ConnectionImpl::onMessageCompleteBase() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + + dispatchBufferedBody(); + + if (handling_upgrade_) { + // If this is an upgrade request, swallow the onMessageComplete. The + // upgrade payload will be treated as stream body. + ASSERT(!deferred_end_stream_headers_); + ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); + http_parser_pause(&parser_, 1); + return; + } + + // If true, this indicates we were processing trailers and must + // move the last header into current_header_map_ + if (header_parsing_state_ == HeaderParsingState::Value) { + completeLastHeader(); + } + + onMessageComplete(); +} + +void ConnectionImpl::onMessageBeginBase() { + ENVOY_CONN_LOG(trace, "message begin", connection_); + // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets + // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable + // in onHeadersCompleteBase + protocol_ = Protocol::Http11; + processing_trailers_ = false; + header_parsing_state_ = HeaderParsingState::Field; + allocHeaders(); + onMessageBegin(); +} + +void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { + ASSERT(!reset_stream_called_); + reset_stream_called_ = true; + onResetStream(reason); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, CodecStats& stats, ServerConnectionCallbacks& callbacks, + const Http1Settings& settings, uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, HTTP_REQUEST, max_request_headers_kb, + max_request_headers_count, formatter(settings), settings.enable_trailers_), + callbacks_(callbacks), codec_settings_(settings), + response_buffer_releasor_([this](const Buffer::OwnedBufferFragmentImpl* fragment) { + releaseOutboundResponse(fragment); + }), + // Pipelining is generally not well supported on the internet and has a series of dangerous + // overflow bugs. As such we are disabling it for now, and removing this temporary override if + // no one objects. If you use this integer to restore prior behavior, contact the + // maintainer team as it will otherwise be removed entirely soon. + max_outbound_responses_( + Runtime::getInteger("envoy.do_not_use_going_away_max_http2_outbound_responses", 2)), + flood_protection_( + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http1_flood_protection")), + headers_with_underscores_action_(headers_with_underscores_action) {} + +uint32_t ServerConnectionImpl::getHeadersSize() { + // Add in the the size of the request URL if processing request headers. + const uint32_t url_size = (!processing_trailers_ && active_request_.has_value()) + ? active_request_.value().request_url_.size() + : 0; + return url_size + ConnectionImpl::getHeadersSize(); +} + +void ServerConnectionImpl::onEncodeComplete() { + if (active_request_.value().remote_complete_) { + // Only do this if remote is complete. If we are replying before the request is complete the + // only logical thing to do is for higher level code to reset() / close the connection so we + // leave the request around so that it can fire reset callbacks. + active_request_.reset(); + } +} + +void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { + HeaderString path(Headers::get().Path); + + bool is_connect = (method == HTTP_CONNECT); + + // The url is relative or a wildcard when the method is OPTIONS. Nothing to do here. + auto& active_request = active_request_.value(); + if (!is_connect && !active_request.request_url_.getStringView().empty() && + (active_request.request_url_.getStringView()[0] == '/' || + ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + // If absolute_urls and/or connect are not going be handled, copy the url and return. + // This forces the behavior to be backwards compatible with the old codec behavior. + // CONNECT "urls" are actually host:port so look like absolute URLs to the above checks. + // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. + if (!codec_settings_.allow_absolute_url_ && !is_connect) { + headers.addViaMove(std::move(path), std::move(active_request.request_url_)); + return; + } + + Utility::Url absolute_url; + if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); + throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + } + // RFC7230#5.7 + // When a proxy receives a request with an absolute-form of + // request-target, the proxy MUST ignore the received Host header field + // (if any) and instead replace it with the host information of the + // request-target. A proxy that forwards such a request MUST generate a + // new Host field-value based on the received request-target rather than + // forward the received Host field-value. + headers.setHost(absolute_url.hostAndPort()); + + if (!absolute_url.pathAndQueryParams().empty()) { + headers.setPath(absolute_url.pathAndQueryParams()); + } + active_request.request_url_.clear(); +} + +int ServerConnectionImpl::onHeadersComplete() { + // Handle the case where response happens prior to request complete. It's up to upper layer code + // to disconnect the connection but we shouldn't fire any more events since it doesn't make + // sense. + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Server: onHeadersComplete size={}", connection_, headers->size()); + const char* method_string = http_method_str(static_cast(parser_.method)); + + if (!handling_upgrade_ && connection_header_sanitization_ && headers->Connection()) { + // If we fail to sanitize the request, return a 400 to the client + if (!Utility::sanitizeConnectionHeader(*headers)) { + absl::string_view header_value = headers->getConnectionValue(); + ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, + header_value); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); + throw CodecProtocolException("Invalid nominated headers in Connection."); + } + } + + // Inform the response encoder about any HEAD method, so it can set content + // length and transfer encoding headers correctly. + active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); + active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); + + handlePath(*headers, parser_.method); + ASSERT(active_request.request_url_.empty()); + + headers->setMethod(method_string); + + // Make sure the host is valid. + auto details = HeaderUtility::requestHeadersValid(*headers); + if (details.has_value()) { + sendProtocolError(details.value().get()); + throw CodecProtocolException( + "http/1.1 protocol error: request headers failed spec compliance checks"); + } + + // Determine here whether we have a body or not. This uses the new RFC semantics where the + // presence of content-length or chunked transfer-encoding indicates a body vs. a particular + // method. If there is no body, we defer raising decodeHeaders() until the parser is flushed + // with message complete. This allows upper layers to behave like HTTP/2 and prevents a proxy + // scenario where the higher layers stream through and implicitly switch to chunked transfer + // encoding because end stream with zero body length has not yet been indicated. + if (parser_.flags & F_CHUNKED || + (parser_.content_length > 0 && parser_.content_length != ULLONG_MAX) || handling_upgrade_) { + active_request.request_decoder_->decodeHeaders(std::move(headers), false); + + // If the connection has been closed (or is closing) after decoding headers, pause the parser + // so we return control to the caller. + if (connection_.state() != Network::Connection::State::Open) { + http_parser_pause(&parser_, 1); + } + } else { + deferred_end_stream_headers_ = true; + } + } + + return 0; +} + +void ServerConnectionImpl::onMessageBegin() { + if (!resetStreamCalled()) { + ASSERT(!active_request_.has_value()); + active_request_.emplace(*this, header_key_formatter_.get()); + auto& active_request = active_request_.value(); + active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); + + // Check for pipelined request flood as we prepare to accept a new request. + // Parse errors that happen prior to onMessageBegin result in stream termination, it is not + // possible to overflow output buffers with early parse errors. + doFloodProtectionChecks(); + } +} + +void ServerConnectionImpl::onUrl(const char* data, size_t length) { + if (active_request_.has_value()) { + active_request_.value().request_url_.append(data, length); + + checkMaxHeadersSize(); + } +} + +void ServerConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (active_request_.has_value()) { + ENVOY_CONN_LOG(trace, "body size={}", connection_, data.length()); + active_request_.value().request_decoder_->decodeData(data, false); + } +} + +void ServerConnectionImpl::onMessageComplete() { + ASSERT(!handling_upgrade_); + if (active_request_.has_value()) { + auto& active_request = active_request_.value(); + + if (active_request.request_decoder_) { + active_request.response_encoder_.readDisable(true); + } + active_request.remote_complete_ = true; + if (deferred_end_stream_headers_) { + active_request.request_decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + active_request.request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + active_request.request_decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + headers_or_trailers_.emplace(nullptr); + } + + // Always pause the parser so that the calling code can process 1 request at a time and apply + // back pressure. However this means that the calling code needs to detect if there is more data + // in the buffer and dispatch it again. + http_parser_pause(&parser_, 1); +} + +void ServerConnectionImpl::onResetStream(StreamResetReason reason) { + active_request_.value().response_encoder_.runResetCallbacks(reason); + active_request_.reset(); +} + +void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.setDetails(details); + } + // We do this here because we may get a protocol error before we have a logical stream. Higher + // layers can only operate on streams, so there is no coherent way to allow them to send an error + // "out of band." On one hand this is kind of a hack but on the other hand it normalizes HTTP/1.1 + // to look more like HTTP/2 to higher layers. + if (!active_request_.has_value() || + !active_request_.value().response_encoder_.startedResponse()) { + Buffer::OwnedImpl bad_request_response( + absl::StrCat("HTTP/1.1 ", error_code_, " ", CodeUtility::toString(error_code_), + "\r\ncontent-length: 0\r\nconnection: close\r\n\r\n")); + + connection_.write(bad_request_response, false); + } +} + +void ServerConnectionImpl::sendProtocolError(absl::string_view details) { + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { + sendProtocolErrorOld(details); + return; + } + // We do this here because we may get a protocol error before we have a logical stream. + if (!active_request_.has_value()) { + onMessageBeginBase(); + } + ASSERT(active_request_.has_value()); + + active_request_.value().response_encoder_.setDetails(details); + if (!active_request_.value().response_encoder_.startedResponse()) { + // Note that the correctness of is_grpc_request and is_head_request is best-effort. + // If headers have not been fully parsed they may not be inferred correctly. + bool is_grpc_request = false; + if (absl::holds_alternative(headers_or_trailers_) && + absl::get(headers_or_trailers_) != nullptr) { + is_grpc_request = + Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); + } + const bool is_head_request = parser_.method == HTTP_HEAD; + active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, + CodeUtility::toString(error_code_), nullptr, + is_head_request, absl::nullopt, details); + return; + } +} + +void ServerConnectionImpl::onAboveHighWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runHighWatermarkCallbacks(); + } +} +void ServerConnectionImpl::onBelowLowWatermark() { + if (active_request_.has_value()) { + active_request_.value().response_encoder_.runLowWatermarkCallbacks(); + } +} + +void ServerConnectionImpl::releaseOutboundResponse( + const Buffer::OwnedBufferFragmentImpl* fragment) { + ASSERT(outbound_responses_ >= 1); + --outbound_responses_; + delete fragment; +} + +void ServerConnectionImpl::checkHeaderNameForUnderscores() { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + current_header_field_.getStringView()); + stats_.dropped_headers_with_underscores_.inc(); + current_header_field_.clear(); + current_header_value_.clear(); + } else { + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", + connection_, current_header_field_.getStringView()); + error_code_ = Http::Code::BadRequest; + sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + } + } +} + +ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, + ConnectionCallbacks&, const Http1Settings& settings, + const uint32_t max_response_headers_count) + : ConnectionImpl(connection, stats, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB, + max_response_headers_count, formatter(settings), settings.enable_trailers_) {} + +bool ClientConnectionImpl::cannotHaveBody() { + if (pending_response_.has_value() && pending_response_.value().encoder_.headRequest()) { + ASSERT(!pending_response_done_); + return true; + } else if (parser_.status_code == 204 || parser_.status_code == 304 || + (parser_.status_code >= 200 && parser_.content_length == 0)) { + return true; + } else { + return false; + } +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { + if (resetStreamCalled()) { + throw CodecClientException("cannot create new streams after calling reset"); + } + + // If reads were disabled due to flow control, we expect reads to always be enabled again before + // reusing this connection. This is done when the response is received. + ASSERT(connection_.readEnabled()); + + ASSERT(!pending_response_.has_value()); + ASSERT(pending_response_done_); + pending_response_.emplace(*this, header_key_formatter_.get(), &response_decoder); + pending_response_done_ = false; + return pending_response_.value().encoder_; +} + +int ClientConnectionImpl::onHeadersComplete() { + // Handle the case where the client is closing a kept alive connection (by sending a 408 + // with a 'Connection: close' header). In this case we just let response flush out followed + // by the remote close. + if (!pending_response_.has_value() && !resetStreamCalled()) { + throw PrematureResponseException(static_cast(parser_.status_code)); + } else if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + auto& headers = absl::get(headers_or_trailers_); + ENVOY_CONN_LOG(trace, "Client: onHeadersComplete size={}", connection_, headers->size()); + headers->setStatus(parser_.status_code); + + if (parser_.status_code >= 200 && parser_.status_code < 300 && + pending_response_.value().encoder_.connectRequest()) { + ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT response.", connection_); + handling_upgrade_ = true; + + // For responses to connect requests, do not accept the chunked + // encoding header: https://tools.ietf.org/html/rfc7231#section-4.3.6 + if (headers->TransferEncoding() && + absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), + Headers::get().TransferEncodingValues.Chunked)) { + sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); + throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + } + } + + if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { + if (headers->TransferEncoding()) { + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); + } + + if (headers->ContentLength()) { + // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. + if (headers->ContentLength()->value().getStringView() != "0") { + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); + throw CodecProtocolException( + "http/1.1 protocol error: content length not allowed in 1xx or 204"); + } + + headers->removeContentLength(); + } + } + + if (parser_.status_code == 100) { + // http-parser treats 100 continue headers as their own complete response. + // Swallow the spurious onMessageComplete and continue processing. + ignore_message_complete_for_100_continue_ = true; + pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); + + // Reset to ensure no information from the continue headers is used for the response headers + // in case the callee does not move the headers out. + headers_or_trailers_.emplace(nullptr); + } else if (cannotHaveBody() && !handling_upgrade_) { + deferred_end_stream_headers_ = true; + } else { + pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); + } + } + + // Here we deal with cases where the response cannot have a body, but http_parser does not deal + // with it for us. + return cannotHaveBody() ? 1 : 0; +} + +bool ClientConnectionImpl::upgradeAllowed() const { + if (pending_response_.has_value()) { + return pending_response_->encoder_.upgradeRequest(); + } + return false; +} + +void ClientConnectionImpl::onBody(Buffer::Instance& data) { + ASSERT(!deferred_end_stream_headers_); + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().decoder_->decodeData(data, false); + } +} + +void ClientConnectionImpl::onMessageComplete() { + ENVOY_CONN_LOG(trace, "message complete", connection_); + if (ignore_message_complete_for_100_continue_) { + ignore_message_complete_for_100_continue_ = false; + return; + } + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + // After calling decodeData() with end stream set to true, we should no longer be able to reset. + PendingResponse& response = pending_response_.value(); + // Encoder is used as part of decode* calls later in this function so pending_response_ can not + // be reset just yet. Preserve the state in pending_response_done_ instead. + pending_response_done_ = true; + + if (deferred_end_stream_headers_) { + response.decoder_->decodeHeaders( + std::move(absl::get(headers_or_trailers_)), true); + deferred_end_stream_headers_ = false; + } else if (processing_trailers_) { + response.decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); + } else { + Buffer::OwnedImpl buffer; + response.decoder_->decodeData(buffer, true); + } + + // Reset to ensure no information from one requests persists to the next. + pending_response_.reset(); + headers_or_trailers_.emplace(nullptr); + } +} + +void ClientConnectionImpl::onResetStream(StreamResetReason reason) { + // Only raise reset if we did not already dispatch a complete response. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runResetCallbacks(reason); + pending_response_done_ = true; + pending_response_.reset(); + } +} + +void ClientConnectionImpl::sendProtocolError(absl::string_view details) { + if (pending_response_.has_value()) { + ASSERT(!pending_response_done_); + pending_response_.value().encoder_.setDetails(details); + } +} + +void ClientConnectionImpl::onAboveHighWatermark() { + // This should never happen without an active stream/request. + pending_response_.value().encoder_.runHighWatermarkCallbacks(); +} + +void ClientConnectionImpl::onBelowLowWatermark() { + // This can get called without an active stream/request when the response completion causes us to + // close the connection, but in doing so go below low watermark. + if (pending_response_.has_value() && !pending_response_done_) { + pending_response_.value().encoder_.runLowWatermarkCallbacks(); + } +} + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h new file mode 100644 index 000000000000..f5e9811ede87 --- /dev/null +++ b/source/common/http/http1/codec_impl_legacy.h @@ -0,0 +1,607 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/watermark_buffer.h" +#include "common/common/assert.h" +#include "common/common/statusor.h" +#include "common/http/codec_helper.h" +#include "common/http/codes.h" +#include "common/http/header_map_impl.h" +#include "common/http/http1/codec_stats.h" +#include "common/http/http1/header_formatter.h" +#include "common/http/status.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http1 { + +class ConnectionImpl; + +/** + * Base class for HTTP/1.1 request and response encoders. + */ +class StreamEncoderImpl : public virtual StreamEncoder, + public Stream, + Logger::Loggable, + public StreamCallbackHelper, + public Http1StreamEncoderOptions { +public: + ~StreamEncoderImpl() override { + // When the stream goes away, undo any read blocks to resume reading. + while (read_disable_calls_ != 0) { + StreamEncoderImpl::readDisable(false); + } + } + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeMetadata(const MetadataMapVector&) override; + Stream& getStream() override { return *this; } + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return *this; } + + // Http::Http1StreamEncoderOptions + void disableChunkEncoding() override { disable_chunk_encoding_ = true; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + // After this is called, for the HTTP/1 codec, the connection should be closed, i.e. no further + // progress may be made with the codec. + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override; + absl::string_view responseDetails() override { return details_; } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override; + void setFlushTimeout(std::chrono::milliseconds) override { + // HTTP/1 has one stream per connection, thus any data encoded is immediately written to the + // connection, invoking any watermarks as necessary. There is no internal buffering that would + // require a flush timeout not already covered by other timeouts. + } + + void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } + void setIsResponseToConnectRequest(bool value) { is_response_to_connect_request_ = value; } + void setDetails(absl::string_view details) { details_ = details; } + + void clearReadDisableCallsForTests() { read_disable_calls_ = 0; } + +protected: + StreamEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter); + void encodeHeadersBase(const RequestOrResponseHeaderMap& headers, absl::optional status, + bool end_stream); + void encodeTrailersBase(const HeaderMap& headers); + + static const std::string CRLF; + static const std::string LAST_CHUNK; + + ConnectionImpl& connection_; + uint32_t read_disable_calls_{}; + bool disable_chunk_encoding_ : 1; + bool chunk_encoding_ : 1; + bool is_response_to_head_request_ : 1; + bool is_response_to_connect_request_ : 1; + +private: + /** + * Called to encode an individual header. + * @param key supplies the header to encode. + * @param key_size supplies the byte size of the key. + * @param value supplies the value to encode. + * @param value_size supplies the byte size of the value. + */ + void encodeHeader(const char* key, uint32_t key_size, const char* value, uint32_t value_size); + + /** + * Called to encode an individual header. + * @param key supplies the header to encode as a string_view. + * @param value supplies the value to encode as a string_view. + */ + void encodeHeader(absl::string_view key, absl::string_view value); + + /** + * Called to finalize a stream encode. + */ + void endEncode(); + + void encodeFormattedHeader(absl::string_view key, absl::string_view value); + + const Http::Http1::HeaderKeyFormatter* const header_key_formatter_; + absl::string_view details_; +}; + +/** + * HTTP/1.1 response encoder. + */ +class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { +public: + ResponseEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + + bool startedResponse() { return started_response_; } + + // Http::ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool started_response_{}; +}; + +/** + * HTTP/1.1 request encoder. + */ +class RequestEncoderImpl : public StreamEncoderImpl, public RequestEncoder { +public: + RequestEncoderImpl(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter) + : StreamEncoderImpl(connection, header_key_formatter) {} + bool upgradeRequest() const { return upgrade_request_; } + bool headRequest() const { return head_request_; } + bool connectRequest() const { return connect_request_; } + + // Http::RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { encodeTrailersBase(trailers); } + +private: + bool upgrade_request_{}; + bool head_request_{}; + bool connect_request_{}; +}; + +/** + * Base class for HTTP/1.1 client and server connections. + * Handles the callbacks of http_parser with its own base routine and then + * virtual dispatches to its subclasses. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + /** + * @return Network::Connection& the backing network connection. + */ + Network::Connection& connection() { return connection_; } + + /** + * Called when the active encoder has completed encoding the outbound half of the stream. + */ + virtual void onEncodeComplete() PURE; + + /** + * Called when resetStream() has been called on an active stream. In HTTP/1.1 the only + * valid operation after this point is for the connection to get blown away, but we will not + * fire any more callbacks in case some stack has to unwind. + */ + void onResetStreamBase(StreamResetReason reason); + + /** + * Flush all pending output from encoding. + */ + void flushOutput(bool end_encode = false); + + void addToBuffer(absl::string_view data); + void addCharToBuffer(char c); + void addIntToBuffer(uint64_t i); + Buffer::WatermarkBuffer& buffer() { return output_buffer_; } + uint64_t bufferRemainingSize(); + void copyToBuffer(const char* data, uint64_t length); + void reserveBuffer(uint64_t size); + void readDisable(bool disable) { + if (connection_.state() == Network::Connection::State::Open) { + connection_.readDisable(disable); + } + } + uint32_t bufferLimit() { return connection_.bufferLimit(); } + virtual bool supportsHttp10() { return false; } + bool maybeDirectDispatch(Buffer::Instance& data); + virtual void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer&) {} + Http::Http1::CodecStats& stats() { return stats_; } + bool enableTrailers() const { return enable_trailers_; } + + // Http::Connection + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override {} // Called during connection manager drain flow + Protocol protocol() override { return protocol_; } + void shutdownNotice() override {} // Called during connection manager drain flow + bool wantsToWrite() override { return false; } + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { onAboveHighWatermark(); } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { onBelowLowWatermark(); } + + bool strict1xxAnd204Headers() { return strict_1xx_and_204_headers_; } + +protected: + ConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, + Http::Http1::HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + + bool resetStreamCalled() { return reset_stream_called_; } + void onMessageBeginBase(); + + /** + * Get memory used to represent HTTP headers or trailers currently being parsed. + * Computed by adding the partial header field and value that is currently being parsed and the + * estimated header size for previous header lines provided by HeaderMap::byteSize(). + */ + virtual uint32_t getHeadersSize(); + + /** + * Called from onUrl, onHeaderField and onHeaderValue to verify that the headers do not exceed the + * configured max header size limit. Throws a CodecProtocolException if headers exceed the size + * limit. + */ + void checkMaxHeadersSize(); + + Network::Connection& connection_; + Http::Http1::CodecStats& stats_; + http_parser parser_; + Http::Code error_code_{Http::Code::BadRequest}; + const Http::Http1::HeaderKeyFormatterPtr header_key_formatter_; + HeaderString current_header_field_; + HeaderString current_header_value_; + bool processing_trailers_ : 1; + bool handling_upgrade_ : 1; + bool reset_stream_called_ : 1; + // Deferred end stream headers indicate that we are not going to raise headers until the full + // HTTP/1 message has been flushed from the parser. This allows raising an HTTP/2 style headers + // block with end stream set to true with no further protocol data remaining. + bool deferred_end_stream_headers_ : 1; + const bool connection_header_sanitization_ : 1; + const bool enable_trailers_ : 1; + const bool strict_1xx_and_204_headers_ : 1; + +private: + enum class HeaderParsingState { Field, Value, Done }; + + virtual HeaderMap& headersOrTrailers() PURE; + virtual RequestOrResponseHeaderMap& requestOrResponseHeaders() PURE; + virtual void allocHeaders() PURE; + virtual void allocTrailers() PURE; + + /** + * Called in order to complete an in progress header decode. + */ + void completeLastHeader(); + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual bool shouldDropHeaderWithUnderscoresInNames(absl::string_view /* header_name */) const { + return false; + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * TODO(#10878): Remove this when exception removal is complete. + */ + Http::Status innerDispatch(Buffer::Instance& data); + + /** + * Dispatch a memory span. + * @param slice supplies the start address. + * @len supplies the length of the span. + */ + size_t dispatchSlice(const char* slice, size_t len); + + /** + * Called by the http_parser when body data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void bufferBody(const char* data, size_t length); + + /** + * Push the accumulated body through the filter pipeline. + */ + void dispatchBufferedBody(); + + /** + * Called when a request/response is beginning. A base routine happens first then a virtual + * dispatch is invoked. + */ + virtual void onMessageBegin() PURE; + + /** + * Called when URL data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + virtual void onUrl(const char* data, size_t length) PURE; + + /** + * Called when header field data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderField(const char* data, size_t length); + + /** + * Called when header value data is received. + * @param data supplies the start address. + * @param length supplies the length. + */ + void onHeaderValue(const char* data, size_t length); + + /** + * Called when headers are complete. A base routine happens first then a virtual dispatch is + * invoked. Note that this only applies to headers and NOT trailers. End of + * trailers are signaled via onMessageCompleteBase(). + * @return 0 if no error, 1 if there should be no body. + */ + int onHeadersCompleteBase(); + virtual int onHeadersComplete() PURE; + + /** + * Called to see if upgrade transition is allowed. + */ + virtual bool upgradeAllowed() const PURE; + + /** + * Called with body data is available for processing when either: + * - There is an accumulated partial body after the parser is done processing bytes read from the + * socket + * - The parser encounters the last byte of the body + * - The codec does a direct dispatch from the read buffer + * For performance reasons there is at most one call to onBody per call to HTTP/1 + * ConnectionImpl::dispatch call. + * @param data supplies the body data + */ + virtual void onBody(Buffer::Instance& data) PURE; + + /** + * Called when the request/response is complete. + */ + void onMessageCompleteBase(); + virtual void onMessageComplete() PURE; + + /** + * Called when accepting a chunk header. + */ + void onChunkHeader(bool is_final_chunk); + + /** + * @see onResetStreamBase(). + */ + virtual void onResetStream(StreamResetReason reason) PURE; + + /** + * Send a protocol error response to remote. + */ + virtual void sendProtocolError(absl::string_view details) PURE; + + /** + * Called when output_buffer_ or the underlying connection go from below a low watermark to over + * a high watermark. + */ + virtual void onAboveHighWatermark() PURE; + + /** + * Called when output_buffer_ or the underlying connection go from above a high watermark to + * below a low watermark. + */ + virtual void onBelowLowWatermark() PURE; + + /** + * Check if header name contains underscore character. + * The ServerConnectionImpl may drop header or reject request based on configuration. + */ + virtual void checkHeaderNameForUnderscores() {} + + static http_parser_settings settings_; + + HeaderParsingState header_parsing_state_{HeaderParsingState::Field}; + // Used to accumulate the HTTP message body during the current dispatch call. The accumulated body + // is pushed through the filter pipeline either at the end of the current dispatch call, or when + // the last byte of the body is processed (whichever happens first). + Buffer::OwnedImpl buffered_body_; + Buffer::WatermarkBuffer output_buffer_; + Protocol protocol_{Protocol::Http11}; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; +}; + +/** + * Implementation of Http::ServerConnection for HTTP/1.1. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ServerConnectionCallbacks& callbacks, const Http1Settings& settings, + uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + bool supportsHttp10() override { return codec_settings_.accept_http_10_; } + +protected: + /** + * An active HTTP/1.1 request. + */ + struct ActiveRequest { + ActiveRequest(ConnectionImpl& connection, Http::Http1::HeaderKeyFormatter* header_key_formatter) + : response_encoder_(connection, header_key_formatter) {} + + HeaderString request_url_; + RequestDecoder* request_decoder_{}; + ResponseEncoderImpl response_encoder_; + bool remote_complete_{}; + }; + absl::optional& activeRequest() { return active_request_; } + // ConnectionImpl + void onMessageComplete() override; + // Add the size of the request_url to the reported header size when processing request headers. + uint32_t getHeadersSize() override; + +private: + /** + * Manipulate the request's first line, parsing the url and converting to a relative path if + * necessary. Compute Host / :authority headers based on 7230#5.7 and 7230#6 + * + * @param is_connect true if the request has the CONNECT method + * @param headers the request's headers + * @throws CodecProtocolException on an invalid url in the request line + */ + void handlePath(RequestHeaderMap& headers, unsigned int method); + + // ConnectionImpl + void onEncodeComplete() override; + void onMessageBegin() override; + void onUrl(const char* data, size_t length) override; + int onHeadersComplete() override; + // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. + bool upgradeAllowed() const override { return true; } + void onBody(Buffer::Instance& data) override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(RequestHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + } + + void sendProtocolErrorOld(absl::string_view details); + + void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); + void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; + void doFloodProtectionChecks() const; + void checkHeaderNameForUnderscores() override; + + ServerConnectionCallbacks& callbacks_; + absl::optional active_request_; + Http1Settings codec_settings_; + const Buffer::OwnedBufferFragmentImpl::Releasor response_buffer_releasor_; + uint32_t outbound_responses_{}; + // This defaults to 2, which functionally disables pipelining. If any users + // of Envoy wish to enable pipelining (which is dangerous and ill supported) + // we could make this configurable. + uint32_t max_outbound_responses_{}; + bool flood_protection_{}; + // TODO(mattklein123): This should be a member of ActiveRequest but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated on the first parsed trailer field (if + // trailers are enabled). The variant is reset to null headers on message complete for assertion + // purposes. + absl::variant headers_or_trailers_; + // The action to take when a request header name contains underscore characters. + const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +/** + * Implementation of Http::ClientConnection for HTTP/1.1. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + ClientConnectionImpl(Network::Connection& connection, Http::Http1::CodecStats& stats, + ConnectionCallbacks& callbacks, const Http1Settings& settings, + const uint32_t max_response_headers_count); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + struct PendingResponse { + PendingResponse(ConnectionImpl& connection, + Http::Http1::HeaderKeyFormatter* header_key_formatter, ResponseDecoder* decoder) + : encoder_(connection, header_key_formatter), decoder_(decoder) {} + + RequestEncoderImpl encoder_; + ResponseDecoder* decoder_; + }; + + bool cannotHaveBody(); + + // ConnectionImpl + void onEncodeComplete() override {} + void onMessageBegin() override {} + void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + int onHeadersComplete() override; + bool upgradeAllowed() const override; + void onBody(Buffer::Instance& data) override; + void onMessageComplete() override; + void onResetStream(StreamResetReason reason) override; + void sendProtocolError(absl::string_view details) override; + void onAboveHighWatermark() override; + void onBelowLowWatermark() override; + HeaderMap& headersOrTrailers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + RequestOrResponseHeaderMap& requestOrResponseHeaders() override { + return *absl::get(headers_or_trailers_); + } + void allocHeaders() override { + ASSERT(nullptr == absl::get(headers_or_trailers_)); + ASSERT(!processing_trailers_); + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } + void allocTrailers() override { + ASSERT(processing_trailers_); + if (!absl::holds_alternative(headers_or_trailers_)) { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } + } + + absl::optional pending_response_; + // TODO(mattklein123): The following bool tracks whether a pending response is complete before + // dispatching callbacks. This is needed so that pending_response_ stays valid during callbacks + // in order to access the stream, but to avoid invoking callbacks that shouldn't be called once + // the response is complete. The existence of this variable is hard to reason about and it should + // be combined with pending_response_ somehow in a follow up cleanup. + bool pending_response_done_{true}; + // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_100_continue_{}; + // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated + // thought as some of the reset and no header code paths make this difficult. Headers are + // populated on message begin. Trailers are populated when the switch to trailer processing is + // detected while parsing the first trailer field (if trailers are enabled). The variant is reset + // to null headers on message complete for assertion purposes. + absl::variant headers_or_trailers_; + + // The default limit of 80 KiB is the vanilla http_parser behaviour. + static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; +}; + +} // namespace Http1 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index dd0333ffa847..d2e4ed011311 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -18,6 +18,36 @@ envoy_cc_library( ], ) +CODEC_LIB_DEPS = [ + ":codec_stats_lib", + ":metadata_decoder_lib", + ":metadata_encoder_lib", + "//include/envoy/event:deferred_deletable", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/stats:stats_interface", + "//source/common/buffer:buffer_lib", + "//source/common/buffer:watermark_buffer_lib", + "//source/common/common:assert_lib", + "//source/common/common:enum_to_int", + "//source/common/common:linked_object", + "//source/common/common:minimal_logger_lib", + "//source/common/common:utility_lib", + "//source/common/http:codec_helper_lib", + "//source/common/http:codes_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http:headers_lib", + "//source/common/http:status_lib", + "//source/common/http:utility_lib", + "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", +] + envoy_cc_library( name = "codec_lib", srcs = ["codec_impl.cc"], @@ -28,35 +58,23 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = [ - ":codec_stats_lib", - ":metadata_decoder_lib", - ":metadata_encoder_lib", - "//include/envoy/event:deferred_deletable", - "//include/envoy/event:dispatcher_interface", - "//include/envoy/http:codec_interface", - "//include/envoy/http:codes_interface", - "//include/envoy/http:header_map_interface", - "//include/envoy/network:connection_interface", - "//include/envoy/stats:stats_interface", - "//source/common/buffer:buffer_lib", - "//source/common/buffer:watermark_buffer_lib", - "//source/common/common:assert_lib", - "//source/common/common:enum_to_int", - "//source/common/common:linked_object", - "//source/common/common:minimal_logger_lib", - "//source/common/common:utility_lib", - "//source/common/http:codec_helper_lib", - "//source/common/http:codes_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http:headers_lib", - "//source/common/http:status_lib", - "//source/common/http:utility_lib", - "//source/common/runtime:runtime_features_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + deps = CODEC_LIB_DEPS, +) + +envoy_cc_library( + name = "codec_legacy_lib", + srcs = ["codec_impl_legacy.cc"], + hdrs = [ + "codec_impl.h", + "codec_impl_legacy.h", + ], + external_deps = [ + "nghttp2", + "abseil_optional", + "abseil_inlined_vector", + "abseil_algorithm", ], + deps = CODEC_LIB_DEPS, ) # Separate library for some nghttp2 setup stuff to avoid having tests take a diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 35c2ee68186e..d365e8e73d8a 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -87,7 +87,7 @@ void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connectio * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though * it copies them. */ -template static T* remove_const(const void* object) { +template static T* removeConst(const void* object) { return const_cast(reinterpret_cast(object)); } @@ -120,8 +120,8 @@ static void insertHeader(std::vector& headers, const HeaderEntry& he } const absl::string_view header_key = header.key().getStringView(); const absl::string_view header_value = header.value().getStringView(); - headers.push_back({remove_const(header_key.data()), - remove_const(header_value.data()), header_key.size(), + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), header_value.size(), flags}); } @@ -241,7 +241,7 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { } else { ASSERT(read_disable_count_ > 0); --read_disable_count_; - if (!buffers_overrun()) { + if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; parent_.sendPendingFrames(); @@ -557,7 +557,7 @@ int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { stream->pending_recv_data_.add(data, len); // Update the window to the peer unless some consumer of this stream's data has hit a flow control // limit and disabled reads on this stream - if (!stream->buffers_overrun()) { + if (!stream->buffersOverrun()) { nghttp2_session_consume(session_, stream_id, len); } else { stream->unconsumed_bytes_ += len; diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index cf848599c800..8bd5d8b3d1fd 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -54,14 +54,16 @@ class ConnectionImpl; // Abstract nghttp2_session factory. Used to enable injection of factories for testing. class Nghttp2SessionFactory { public: + using ConnectionImplType = ConnectionImpl; virtual ~Nghttp2SessionFactory() = default; // Returns a new nghttp2_session to be used with |connection|. virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, - ConnectionImpl* connection, const nghttp2_option* options) PURE; + ConnectionImplType* connection, + const nghttp2_option* options) PURE; // Initializes the |session|. - virtual void init(nghttp2_session* session, ConnectionImpl* connection, + virtual void init(nghttp2_session* session, ConnectionImplType* connection, const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; }; @@ -256,7 +258,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + bool buffersOverrun() const { return read_disable_count_ > 0; } ConnectionImpl& parent_; int32_t stream_id_{-1}; @@ -518,12 +520,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable +#include +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/http/codes.h" +#include "envoy/http/header_map.h" +#include "envoy/network/connection.h" + +#include "common/common/assert.h" +#include "common/common/cleanup.h" +#include "common/common/enum_to_int.h" +#include "common/common/fmt.h" +#include "common/common/utility.h" +#include "common/http/codes.h" +#include "common/http/exception.h" +#include "common/http/header_utility.h" +#include "common/http/headers.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/utility.h" + +#include "absl/container/fixed_array.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +class Http2ResponseCodeDetailValues { + // Invalid HTTP header field was received and stream is going to be + // closed. + const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; + + // Violation in HTTP messaging rule. + const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; + + // none of the above + const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + +public: + const absl::string_view errorDetails(int error_code) const { + switch (error_code) { + case NGHTTP2_ERR_HTTP_HEADER: + return ng_http2_err_http_header_; + case NGHTTP2_ERR_HTTP_MESSAGING: + return ng_http2_err_http_messaging_; + default: + return ng_http2_err_unknown_; + } + } +}; + +using Http2ResponseCodeDetails = ConstSingleton; +using Http::Http2::CodecStats; +using Http::Http2::MetadataDecoder; +using Http::Http2::MetadataEncoder; + +bool Utility::reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies) { + if (key != Headers::get().Cookie.get().c_str()) { + return false; + } + + if (!cookies.empty()) { + cookies.append("; ", 2); + } + + const absl::string_view value_view = value.getStringView(); + cookies.append(value_view.data(), value_view.size()); + return true; +} + +ConnectionImpl::Http2Callbacks ConnectionImpl::http2_callbacks_; + +nghttp2_session* ProdNghttp2SessionFactory::create(const nghttp2_session_callbacks* callbacks, + ConnectionImpl* connection, + const nghttp2_option* options) { + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks, connection, options); + return session; +} + +void ProdNghttp2SessionFactory::init(nghttp2_session*, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) { + connection->sendSettings(options, true); +} + +/** + * Helper to remove const during a cast. nghttp2 takes non-const pointers for headers even though + * it copies them. + */ +template static T* removeConst(const void* object) { + return const_cast(reinterpret_cast(object)); +} + +ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), + data_deferred_(false), waiting_for_non_informational_headers_(false), + pending_receive_buffer_high_watermark_called_(false), + pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { + parent_.stats_.streams_active_.inc(); + if (buffer_limit > 0) { + setWriteBufferWatermarks(buffer_limit / 2, buffer_limit); + } +} + +ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } + +void ConnectionImpl::StreamImpl::destroy() { + disarmStreamIdleTimer(); + parent_.stats_.streams_active_.dec(); + parent_.stats_.pending_send_bytes_.sub(pending_send_data_.length()); +} + +static void insertHeader(std::vector& headers, const HeaderEntry& header) { + uint8_t flags = 0; + if (header.key().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME; + } + if (header.value().isReference()) { + flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; + } + const absl::string_view header_key = header.key().getStringView(); + const absl::string_view header_value = header.value().getStringView(); + headers.push_back({removeConst(header_key.data()), + removeConst(header_value.data()), header_key.size(), + header_value.size(), flags}); +} + +void ConnectionImpl::StreamImpl::buildHeaders(std::vector& final_headers, + const HeaderMap& headers) { + final_headers.reserve(headers.size()); + headers.iterate([&final_headers](const HeaderEntry& header) -> HeaderMap::Iterate { + insertHeader(final_headers, header); + return HeaderMap::Iterate::Continue; + }); +} + +void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHeaderMap& headers) { + ASSERT(headers.Status()->value() == "100"); + encodeHeaders(headers, false); +} + +void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector& final_headers, + bool end_stream) { + nghttp2_data_provider provider; + if (!end_stream) { + provider.source.ptr = this; + provider.read_callback = [](nghttp2_session*, int32_t, uint8_t*, size_t length, + uint32_t* data_flags, nghttp2_data_source* source, + void*) -> ssize_t { + return static_cast(source->ptr)->onDataSourceRead(length, data_flags); + }; + } + + local_end_stream_ = end_stream; + submitHeaders(final_headers, end_stream ? nullptr : &provider); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, + bool end_stream) { + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::RequestHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + upgrade_type_ = std::string(headers.getUpgradeValue()); + Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else if (headers.Method() && headers.Method()->value() == "CONNECT") { + // If this is not an upgrade style connect (above branch) it is a bytestream + // connect and should have :path and :protocol set accordingly + // As HTTP/1.1 does not require a path for CONNECT, we may have to add one + // if shifting codecs. For now, default to "/" - this can be made + // configurable if necessary. + // https://tools.ietf.org/html/draft-kinnear-httpbis-http2-transport-02 + modified_headers = createHeaderMap(headers); + modified_headers->setProtocol(Headers::get().ProtocolValues.Bytestream); + if (!headers.Path()) { + modified_headers->setPath("/"); + } + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& headers, + bool end_stream) { + // The contract is that client codecs must ensure that :status is present. + ASSERT(headers.Status() != nullptr); + + // This must exist outside of the scope of isUpgrade as the underlying memory is + // needed until encodeHeadersBase has been called. + std::vector final_headers; + Http::ResponseHeaderMapPtr modified_headers; + if (Http::Utility::isUpgrade(headers)) { + modified_headers = createHeaderMap(headers); + Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers); + buildHeaders(final_headers, *modified_headers); + } else { + buildHeaders(final_headers, headers); + } + encodeHeadersBase(final_headers, end_stream); +} + +void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { + ASSERT(!local_end_stream_); + local_end_stream_ = true; + if (pending_send_data_.length() > 0) { + // In this case we want trailers to come after we release all pending body data that is + // waiting on window updates. We need to save the trailers so that we can emit them later. + ASSERT(!pending_trailers_to_encode_); + pending_trailers_to_encode_ = cloneTrailers(trailers); + createPendingFlushTimer(); + } else { + submitTrailers(trailers); + parent_.sendPendingFrames(); + } +} + +void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadata_map_vector) { + ASSERT(parent_.allow_metadata_); + MetadataEncoder& metadata_encoder = getMetadataEncoder(); + if (!metadata_encoder.createPayload(metadata_map_vector)) { + return; + } + for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { + submitMetadata(flags); + } + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::readDisable(bool disable) { + ENVOY_CONN_LOG(debug, "Stream {} {}, unconsumed_bytes {} read_disable_count {}", + parent_.connection_, stream_id_, (disable ? "disabled" : "enabled"), + unconsumed_bytes_, read_disable_count_); + if (disable) { + ++read_disable_count_; + } else { + ASSERT(read_disable_count_ > 0); + --read_disable_count_; + if (!buffersOverrun()) { + nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); + unconsumed_bytes_ = 0; + parent_.sendPendingFrames(); + } + } +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer over limit ", parent_.connection_); + ASSERT(!pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = true; + readDisable(true); +} + +void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "recv buffer under limit ", parent_.connection_); + ASSERT(pending_receive_buffer_high_watermark_called_); + pending_receive_buffer_high_watermark_called_ = false; + readDisable(false); +} + +void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { + auto& headers = absl::get(headers_or_trailers_); + if (allow_waiting_for_informational_headers && + CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { + waiting_for_non_informational_headers_ = true; + } + + if (!upgrade_type_.empty() && headers->Status()) { + Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); + } + + if (headers->Status()->value() == "100") { + ASSERT(!remote_end_stream_); + response_decoder_.decode100ContinueHeaders(std::move(headers)); + } else { + response_decoder_.decodeHeaders(std::move(headers), remote_end_stream_); + } +} + +void ConnectionImpl::ClientStreamImpl::decodeTrailers() { + response_decoder_.decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { + ASSERT(!allow_waiting_for_informational_headers); + auto& headers = absl::get(headers_or_trailers_); + if (Http::Utility::isH2UpgradeRequest(*headers)) { + Http::Utility::transformUpgradeRequestFromH2toH1(*headers); + } + request_decoder_->decodeHeaders(std::move(headers), remote_end_stream_); +} + +void ConnectionImpl::ServerStreamImpl::decodeTrailers() { + request_decoder_->decodeTrailers( + std::move(absl::get(headers_or_trailers_))); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferHighWatermark() { + ENVOY_CONN_LOG(debug, "send buffer over limit ", parent_.connection_); + ASSERT(!pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = true; + runHighWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::pendingSendBufferLowWatermark() { + ENVOY_CONN_LOG(debug, "send buffer under limit ", parent_.connection_); + ASSERT(pending_send_buffer_high_watermark_called_); + pending_send_buffer_high_watermark_called_ = false; + runLowWatermarkCallbacks(); +} + +void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& value) { + if (!Utility::reconstituteCrumbledCookies(name, value, cookies_)) { + headers().addViaMove(std::move(name), std::move(value)); + } +} + +void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + std::vector final_headers; + buildHeaders(final_headers, trailers); + int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), + final_headers.size()); + ASSERT(rc == 0); +} + +void ConnectionImpl::StreamImpl::submitMetadata(uint8_t flags) { + ASSERT(stream_id_ > 0); + const int result = + nghttp2_submit_extension(parent_.session_, METADATA_FRAME_TYPE, flags, stream_id_, nullptr); + ASSERT(result == 0); +} + +ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* data_flags) { + if (pending_send_data_.length() == 0 && !local_end_stream_) { + ASSERT(!data_deferred_); + data_deferred_ = true; + return NGHTTP2_ERR_DEFERRED; + } else { + *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; + if (local_end_stream_ && pending_send_data_.length() <= length) { + *data_flags |= NGHTTP2_DATA_FLAG_EOF; + if (pending_trailers_to_encode_) { + // We need to tell the library to not set end stream so that we can emit the trailers. + *data_flags |= NGHTTP2_DATA_FLAG_NO_END_STREAM; + submitTrailers(*pending_trailers_to_encode_); + pending_trailers_to_encode_.reset(); + } + } + + return std::min(length, pending_send_data_.length()); + } +} + +int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { + // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we + // "just know" that the frame header is 9 bytes. + // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback + static const uint64_t FRAME_HEADER_SIZE = 9; + + parent_.outbound_data_frames_++; + + Buffer::OwnedImpl output; + if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { + ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", + parent_.connection_); + return NGHTTP2_ERR_FLOODED; + } + + parent_.stats_.pending_send_bytes_.sub(length); + output.move(pending_send_data_, length); + parent_.connection_.write(output, false); + return 0; +} + +void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ == -1); + stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), + final_headers.size(), provider, base()); + ASSERT(stream_id_ > 0); +} + +void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) { + ASSERT(stream_id_ != -1); + int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), + final_headers.size(), provider); + ASSERT(rc == 0); +} + +void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { + ASSERT(stream_idle_timer_ == nullptr); + if (stream_idle_timeout_.count() > 0) { + stream_idle_timer_ = + parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } +} + +void ConnectionImpl::StreamImpl::onPendingFlushTimer() { + ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); + stream_idle_timer_.reset(); + parent_.stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !local_end_stream_sent_); + // This will emit a reset frame for this stream and close the stream locally. No reset callbacks + // will be run because higher layers think the stream is already finished. + resetStreamWorker(StreamResetReason::LocalReset); + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { + ASSERT(!local_end_stream_); + local_end_stream_ = end_stream; + parent_.stats_.pending_send_bytes_.add(data.length()); + pending_send_data_.move(data); + if (data_deferred_) { + int rc = nghttp2_session_resume_data(parent_.session_, stream_id_); + ASSERT(rc == 0); + + data_deferred_ = false; + } + + parent_.sendPendingFrames(); + if (local_end_stream_ && pending_send_data_.length() > 0) { + createPendingFlushTimer(); + } +} + +void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { + // Higher layers expect calling resetStream() to immediately raise reset callbacks. + runResetCallbacks(reason); + + // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. + // We want these frames to go out so we defer the reset until we send all of the frames that + // end the local stream. + if (local_end_stream_ && !local_end_stream_sent_) { + parent_.pending_deferred_reset_ = true; + deferred_reset_ = reason; + ENVOY_CONN_LOG(trace, "deferred reset stream", parent_.connection_); + } else { + resetStreamWorker(reason); + } + + // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces + // the cleanup logic to run which will reset the stream in all cases if all data frames could not + // be sent. + parent_.sendPendingFrames(); +} + +void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { + int rc = nghttp2_submit_rst_stream(parent_.session_, NGHTTP2_FLAG_NONE, stream_id_, + reason == StreamResetReason::LocalRefusedStreamReset + ? NGHTTP2_REFUSED_STREAM + : NGHTTP2_NO_ERROR); + ASSERT(rc == 0); +} + +MetadataEncoder& ConnectionImpl::StreamImpl::getMetadataEncoder() { + if (metadata_encoder_ == nullptr) { + metadata_encoder_ = std::make_unique(); + } + return *metadata_encoder_; +} + +MetadataDecoder& ConnectionImpl::StreamImpl::getMetadataDecoder() { + if (metadata_decoder_ == nullptr) { + auto cb = [this](MetadataMapPtr&& metadata_map_ptr) { + this->onMetadataDecoded(std::move(metadata_map_ptr)); + }; + metadata_decoder_ = std::make_unique(cb); + } + return *metadata_decoder_; +} + +void ConnectionImpl::StreamImpl::onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr) { + decoder().decodeMetadata(std::move(metadata_map_ptr)); +} + +ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count) + : stats_(stats), connection_(connection), max_headers_kb_(max_headers_kb), + max_headers_count_(max_headers_count), + per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), + stream_error_on_invalid_http_messaging_( + http2_options.override_stream_error_on_invalid_http_message().value()), + flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), + frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), + max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), + control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), + max_consecutive_inbound_frames_with_empty_payload_( + http2_options.max_consecutive_inbound_frames_with_empty_payload().value()), + max_inbound_priority_frames_per_stream_( + http2_options.max_inbound_priority_frames_per_stream().value()), + max_inbound_window_update_frames_per_data_frame_sent_( + http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} + +ConnectionImpl::~ConnectionImpl() { + for (const auto& stream : active_streams_) { + stream->destroy(); + } + nghttp2_session_del(session_); +} + +Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { + ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // ConnectionImpl::dispatch returns early or throws an exception (consider removing if there is a + // single return after exception removal (#10878)). + Cleanup cleanup([this]() { dispatching_ = false; }); + for (const Buffer::RawSlice& slice : data.getRawSlices()) { + dispatching_ = true; + ssize_t rc = + nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); + if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { + throw FrameFloodException( + "Flooding was detected in this HTTP/2 session, and it must be closed"); + } + if (rc != static_cast(slice.len_)) { + throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); + } + + dispatching_ = false; + } + + ENVOY_CONN_LOG(trace, "dispatched {} bytes", connection_, data.length()); + data.drain(data.length()); + + // Decoding incoming frames can generate outbound frames so flush pending. + sendPendingFrames(); + return Http::okStatus(); +} + +ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { + return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); +} + +int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { + StreamImpl* stream = getStream(stream_id); + // If this results in buffering too much data, the watermark buffer will call + // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_ + stream->pending_recv_data_.add(data, len); + // Update the window to the peer unless some consumer of this stream's data has hit a flow control + // limit and disabled reads on this stream + if (!stream->buffersOverrun()) { + nghttp2_session_consume(session_, stream_id, len); + } else { + stream->unconsumed_bytes_ += len; + } + return 0; +} + +void ConnectionImpl::goAway() { + int rc = nghttp2_submit_goaway(session_, NGHTTP2_FLAG_NONE, + nghttp2_session_get_last_proc_stream_id(session_), + NGHTTP2_NO_ERROR, nullptr, 0); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +void ConnectionImpl::shutdownNotice() { + int rc = nghttp2_submit_shutdown_notice(session_); + ASSERT(rc == 0); + + sendPendingFrames(); +} + +int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { + ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, + static_cast(hd->type), static_cast(hd->flags)); + + // Track all the frames without padding here, since this is the only callback we receive + // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). + // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). + if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { + if (!trackInboundFrames(hd, 0)) { + return NGHTTP2_ERR_FLOODED; + } + } + + return 0; +} + +ABSL_MUST_USE_RESULT +enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { + switch (code) { + case NGHTTP2_NO_ERROR: + return GoAwayErrorCode::NoError; + default: + return GoAwayErrorCode::Other; + } +} + +int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); + + // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS + // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders() + // and CONTINUATION frames in onBeforeFrameReceived(). + ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); + + if (frame->hd.type == NGHTTP2_DATA) { + if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + } + + // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown + // notifications are the same as a normal GOAWAY. + // TODO: handle multiple GOAWAY frames. + if (frame->hd.type == NGHTTP2_GOAWAY && !raised_goaway_) { + ASSERT(frame->hd.stream_id == 0); + raised_goaway_ = true; + callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); + return 0; + } + + if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { + onSettingsForTest(frame->settings); + } + + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + return 0; + } + + switch (frame->hd.type) { + case NGHTTP2_HEADERS: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + if (!stream->cookies_.empty()) { + HeaderString key(Headers::get().Cookie); + stream->headers().addViaMove(std::move(key), std::move(stream->cookies_)); + } + + switch (frame->headers.cat) { + case NGHTTP2_HCAT_RESPONSE: + case NGHTTP2_HCAT_REQUEST: { + stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); + break; + } + + case NGHTTP2_HCAT_HEADERS: { + // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers + // if local is not complete. + if (!stream->deferred_reset_) { + if (!stream->waiting_for_non_informational_headers_) { + if (!stream->remote_end_stream_) { + // This indicates we have received more headers frames than Envoy + // supports. Even if this is valid HTTP (something like 103 early hints) fail here + // rather than trying to push unexpected headers through the Envoy pipeline as that + // will likely result in Envoy crashing. + // It would be cleaner to reset the stream rather than reset the/ entire connection but + // it's also slightly more dangerous so currently we err on the side of safety. + stats_.too_many_header_frames_.inc(); + throw CodecProtocolException("Unexpected 'trailers' with no end stream."); + } else { + stream->decodeTrailers(); + } + } else { + ASSERT(!nghttp2_session_check_server_session(session_)); + stream->waiting_for_non_informational_headers_ = false; + + // Even if we have :status 100 in the client case in a response, when + // we received a 1xx to start out with, nghttp2 message checking + // guarantees proper flow here. + stream->decodeHeaders(false); + } + } + + break; + } + + default: + // We do not currently support push. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + + break; + } + case NGHTTP2_DATA: { + stream->remote_end_stream_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + + // It's possible that we are waiting to send a deferred reset, so only raise data if local + // is not complete. + if (!stream->deferred_reset_) { + stream->decoder().decodeData(stream->pending_recv_data_, stream->remote_end_stream_); + } + + stream->pending_recv_data_.drain(stream->pending_recv_data_.length()); + break; + } + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(trace, "remote reset: {}", connection_, frame->rst_stream.error_code); + stats_.rx_reset_.inc(); + break; + } + } + + return 0; +} + +int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { + // The nghttp2 library does not cleanly give us a way to determine whether we received invalid + // data from our peer. Sometimes it raises the invalid frame callback, and sometimes it does not. + // In all cases however it will attempt to send a GOAWAY frame with an error status. If we see + // an outgoing frame of this type, we will return an error code so that we can abort execution. + ENVOY_CONN_LOG(trace, "sent frame type={}", connection_, static_cast(frame->hd.type)); + switch (frame->hd.type) { + case NGHTTP2_GOAWAY: { + ENVOY_CONN_LOG(debug, "sent goaway code={}", connection_, frame->goaway.error_code); + if (frame->goaway.error_code != NGHTTP2_NO_ERROR) { + // TODO(mattklein123): Returning this error code abandons standard nghttp2 frame accounting. + // As such, it is not reliable to call sendPendingFrames() again after this and we assume + // that the connection is going to get torn down immediately. One byproduct of this is that + // we need to cancel all pending flush stream timeouts since they can race with connection + // teardown. As part of the work to remove exceptions we should aim to clean up all of this + // error handling logic and only handle this type of case at the end of dispatch. + for (auto& stream : active_streams_) { + stream->disarmStreamIdleTimer(); + } + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + break; + } + + case NGHTTP2_RST_STREAM: { + ENVOY_CONN_LOG(debug, "sent reset code={}", connection_, frame->rst_stream.error_code); + stats_.tx_reset_.inc(); + break; + } + + case NGHTTP2_HEADERS: + case NGHTTP2_DATA: { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->local_end_stream_sent_ = frame->hd.flags & NGHTTP2_FLAG_END_STREAM; + break; + } + } + + return 0; +} + +int ConnectionImpl::onError(absl::string_view error) { + ENVOY_CONN_LOG(debug, "invalid http2: {}", connection_, error); + return 0; +} + +int ConnectionImpl::onInvalidFrame(int32_t stream_id, int error_code) { + ENVOY_CONN_LOG(debug, "invalid frame: {} on stream {}", connection_, nghttp2_strerror(error_code), + stream_id); + + // Set details of error_code in the stream whenever we have one. + StreamImpl* stream = getStream(stream_id); + if (stream != nullptr) { + stream->setDetails(Http2ResponseCodeDetails::get().errorDetails(error_code)); + } + + if (error_code == NGHTTP2_ERR_HTTP_HEADER || error_code == NGHTTP2_ERR_HTTP_MESSAGING) { + stats_.rx_messaging_error_.inc(); + + if (stream_error_on_invalid_http_messaging_) { + // The stream is about to be closed due to an invalid header or messaging. Don't kill the + // entire connection if one stream has bad headers or messaging. + if (stream != nullptr) { + // See comment below in onStreamClose() for why we do this. + stream->reset_due_to_messaging_error_ = true; + } + return 0; + } + } + + // Cause dispatch to return with an error code. + return NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { + ENVOY_CONN_LOG(trace, "about to send frame type={}, flags={}", connection_, + static_cast(frame->hd.type), static_cast(frame->hd.flags)); + ASSERT(!is_outbound_flood_monitored_control_frame_); + // Flag flood monitored outbound control frames. + is_outbound_flood_monitored_control_frame_ = + ((frame->hd.type == NGHTTP2_PING || frame->hd.type == NGHTTP2_SETTINGS) && + frame->hd.flags & NGHTTP2_FLAG_ACK) || + frame->hd.type == NGHTTP2_RST_STREAM; + return 0; +} + +void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { + ++outbound_frames_; + if (is_outbound_flood_monitored_control_frame) { + ++outbound_control_frames_; + } + checkOutboundQueueLimits(); +} + +bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { + // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the + // onBeforeFrameSend callback is not called for DATA frames. + bool is_outbound_flood_monitored_control_frame = false; + std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); + try { + incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); + } catch (const FrameFloodException&) { + return false; + } + + output.add(data, length); + output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ + : frame_buffer_releasor_); + return true; +} + +void ConnectionImpl::releaseOutboundFrame() { + ASSERT(outbound_frames_ >= 1); + --outbound_frames_; +} + +void ConnectionImpl::releaseOutboundControlFrame() { + ASSERT(outbound_control_frames_ >= 1); + --outbound_control_frames_; + releaseOutboundFrame(); +} + +ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { + ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); + Buffer::OwnedImpl buffer; + if (!addOutboundFrameFragment(buffer, data, length)) { + ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", + connection_); + return NGHTTP2_ERR_FLOODED; + } + + // While the buffer is transient the fragment it contains will be moved into the + // write_buffer_ of the underlying connection_ by the write method below. + // This creates lifetime dependency between the write_buffer_ of the underlying connection + // and the codec object. Specifically the write_buffer_ MUST be either fully drained or + // deleted before the codec object is deleted. This is presently guaranteed by the + // destruction order of the Network::ConnectionImpl object where write_buffer_ is + // destroyed before the filter_manager_ which owns the codec through Http::ConnectionManagerImpl. + connection_.write(buffer, false); + return length; +} + +int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { + StreamImpl* stream = getStream(stream_id); + if (stream) { + ENVOY_CONN_LOG(debug, "stream closed: {}", connection_, error_code); + if (!stream->remote_end_stream_ || !stream->local_end_stream_) { + StreamResetReason reason; + if (stream->reset_due_to_messaging_error_) { + // Unfortunately, the nghttp2 API makes it incredibly difficult to clearly understand + // the flow of resets. I.e., did the reset originate locally? Was it remote? Here, + // we attempt to track cases in which we sent a reset locally due to an invalid frame + // received from the remote. We only do that in two cases currently (HTTP messaging layer + // errors from https://tools.ietf.org/html/rfc7540#section-8 which nghttp2 is very strict + // about). In other cases we treat invalid frames as a protocol error and just kill + // the connection. + reason = StreamResetReason::LocalReset; + } else { + reason = error_code == NGHTTP2_REFUSED_STREAM ? StreamResetReason::RemoteRefusedStreamReset + : StreamResetReason::RemoteReset; + } + + stream->runResetCallbacks(reason); + } + + stream->destroy(); + connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); + // Any unconsumed data must be consumed before the stream is deleted. + // nghttp2 does not appear to track this internally, and any stream deleted + // with outstanding window will contribute to a slow connection-window leak. + nghttp2_session_consume(session_, stream_id, stream->unconsumed_bytes_); + stream->unconsumed_bytes_ = 0; + nghttp2_session_set_stream_user_data(session_, stream->stream_id_, nullptr); + } + + return 0; +} + +int ConnectionImpl::onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len) { + ENVOY_CONN_LOG(trace, "recv {} bytes METADATA", connection_, len); + + StreamImpl* stream = getStream(stream_id); + if (!stream) { + return 0; + } + + bool success = stream->getMetadataDecoder().receiveMetadata(data, len); + return success ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +int ConnectionImpl::onMetadataFrameComplete(int32_t stream_id, bool end_metadata) { + ENVOY_CONN_LOG(trace, "recv METADATA frame on stream {}, end_metadata: {}", connection_, + stream_id, end_metadata); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + bool result = stream->getMetadataDecoder().onMetadataFrameComplete(end_metadata); + return result ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + +ssize_t ConnectionImpl::packMetadata(int32_t stream_id, uint8_t* buf, size_t len) { + ENVOY_CONN_LOG(trace, "pack METADATA frame on stream {}", connection_, stream_id); + + StreamImpl* stream = getStream(stream_id); + if (stream == nullptr) { + return 0; + } + + MetadataEncoder& encoder = stream->getMetadataEncoder(); + return encoder.packNextFramePayload(buf, len); +} + +int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + StreamImpl* stream = getStream(frame->hd.stream_id); + if (!stream) { + // We have seen 1 or 2 crashes where we get a headers callback but there is no associated + // stream data. I honestly am not sure how this can happen. However, from reading the nghttp2 + // code it looks possible that inflate_header_block() can safely inflate headers for an already + // closed stream, but will still call the headers callback. Since that seems possible, we should + // ignore this case here. + // TODO(mattklein123): Figure out a test case that can hit this. + stats_.headers_cb_no_stream_.inc(); + return 0; + } + + auto should_return = checkHeaderNameForUnderscores(name.getStringView()); + if (should_return) { + name.clear(); + value.clear(); + return should_return.value(); + } + + stream->saveHeader(std::move(name), std::move(value)); + + if (stream->headers().byteSize() > max_headers_kb_ * 1024 || + stream->headers().size() > max_headers_count_) { + // This will cause the library to reset/close the stream. + stats_.header_overflow_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } else { + return 0; + } +} + +void ConnectionImpl::sendPendingFrames() { + if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { + return; + } + + const int rc = nghttp2_session_send(session_); + if (rc != 0) { + ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); + // For errors caused by the pending outbound frame flood the FrameFloodException has + // to be thrown. However the nghttp2 library returns only the generic error code for + // all failure types. Check queue limits and throw FrameFloodException if they were + // exceeded. + if (outbound_frames_ > max_outbound_frames_ || + outbound_control_frames_ > max_outbound_control_frames_) { + throw FrameFloodException("Too many frames in the outbound queue."); + } + + throw CodecProtocolException(std::string(nghttp2_strerror(rc))); + } + + // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, + // so iterating through every stream to find the ones that have a deferred reset is not a big + // deal. Furthermore, queueing a reset frame does not actually invoke the close stream callback. + // This is only done when the reset frame is sent. Thus, it's safe to work directly with the + // stream map. + // NOTE: The way we handle deferred reset is essentially best effort. If we intend to do a + // deferred reset, we try to finish the stream, including writing any pending data frames. + // If we cannot do this (potentially due to not enough window), we just reset the stream. + // In general this behavior occurs only when we are trying to send immediate error messages + // to short circuit requests. In the best effort case, we complete the stream before + // resetting. In other cases, we just do the reset now which will blow away pending data + // frames and release any memory associated with the stream. + if (pending_deferred_reset_) { + pending_deferred_reset_ = false; + for (auto& stream : active_streams_) { + if (stream->deferred_reset_) { + stream->resetStreamWorker(stream->deferred_reset_.value()); + } + } + sendPendingFrames(); + } +} + +void ConnectionImpl::sendSettings( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, bool disable_push) { + absl::InlinedVector settings; + auto insertParameter = [&settings](const nghttp2_settings_entry& entry) mutable -> bool { + const auto it = std::find_if(settings.cbegin(), settings.cend(), + [&entry](const nghttp2_settings_entry& existing) { + return entry.settings_id == existing.settings_id; + }); + if (it != settings.end()) { + return false; + } + settings.push_back(entry); + return true; + }; + + // Universally disable receiving push promise frames as we don't currently support + // them. nghttp2 will fail the connection if the other side still sends them. + // TODO(mattklein123): Remove this when we correctly proxy push promise. + // NOTE: This is a special case with respect to custom parameter overrides in that server push is + // not supported and therefore not end user configurable. + if (disable_push) { + settings.push_back( + {static_cast(NGHTTP2_SETTINGS_ENABLE_PUSH), disable_push ? 0U : 1U}); + } + + for (const auto& it : http2_options.custom_settings_parameters()) { + ASSERT(it.identifier().value() <= std::numeric_limits::max()); + const bool result = + insertParameter({static_cast(it.identifier().value()), it.value().value()}); + ASSERT(result); + ENVOY_CONN_LOG(debug, "adding custom settings parameter with id {:#x} to {}", connection_, + it.identifier().value(), it.value().value()); + } + + // Insert named parameters. + settings.insert( + settings.end(), + {{NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, http2_options.hpack_table_size().value()}, + {NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL, http2_options.allow_connect()}, + {NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()}, + {NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, http2_options.initial_stream_window_size().value()}}); + if (!settings.empty()) { + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, settings.data(), settings.size()); + ASSERT(rc == 0); + } else { + // nghttp2_submit_settings need to be called at least once + int rc = nghttp2_submit_settings(session_, NGHTTP2_FLAG_NONE, nullptr, 0); + ASSERT(rc == 0); + } + + const uint32_t initial_connection_window_size = + http2_options.initial_connection_window_size().value(); + // Increase connection window size up to our default size. + if (initial_connection_window_size != NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE) { + ENVOY_CONN_LOG(debug, "updating connection-level initial window size to {}", connection_, + initial_connection_window_size); + int rc = nghttp2_submit_window_update(session_, NGHTTP2_FLAG_NONE, 0, + initial_connection_window_size - + NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE); + ASSERT(rc == 0); + } +} + +ConnectionImpl::Http2Callbacks::Http2Callbacks() { + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + return static_cast(user_data)->onSend(data, length); + }); + + nghttp2_session_callbacks_set_send_data_callback( + callbacks_, + [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, + nghttp2_data_source* source, void*) -> int { + ASSERT(frame->data.padlen == 0); + return static_cast(source->ptr)->onDataSourceSend(framehd, length); + }); + + nghttp2_session_callbacks_set_on_begin_headers_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeginHeaders(frame); + }); + + nghttp2_session_callbacks_set_on_header_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, const uint8_t* raw_name, size_t name_length, + const uint8_t* raw_value, size_t value_length, uint8_t, void* user_data) -> int { + // TODO PERF: Can reference count here to avoid copies. + HeaderString name; + name.setCopy(reinterpret_cast(raw_name), name_length); + HeaderString value; + value.setCopy(reinterpret_cast(raw_value), value_length); + return static_cast(user_data)->onHeader(frame, std::move(name), + std::move(value)); + }); + + nghttp2_session_callbacks_set_on_data_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, uint8_t, int32_t stream_id, const uint8_t* data, size_t len, + void* user_data) -> int { + return static_cast(user_data)->onData(stream_id, data, len); + }); + + nghttp2_session_callbacks_set_on_begin_frame_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameReceived(hd); + }); + + nghttp2_session_callbacks_set_on_frame_recv_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameReceived(frame); + }); + + nghttp2_session_callbacks_set_on_stream_close_callback( + callbacks_, + [](nghttp2_session*, int32_t stream_id, uint32_t error_code, void* user_data) -> int { + return static_cast(user_data)->onStreamClose(stream_id, error_code); + }); + + nghttp2_session_callbacks_set_on_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onFrameSend(frame); + }); + + nghttp2_session_callbacks_set_before_frame_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { + return static_cast(user_data)->onBeforeFrameSend(frame); + }); + + nghttp2_session_callbacks_set_on_frame_not_send_callback( + callbacks_, [](nghttp2_session*, const nghttp2_frame*, int, void*) -> int { + // We used to always return failure here but it looks now this can get called if the other + // side sends GOAWAY and we are trying to send a SETTINGS ACK. Just ignore this for now. + return 0; + }); + + nghttp2_session_callbacks_set_on_invalid_frame_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame* frame, int error_code, void* user_data) -> int { + return static_cast(user_data)->onInvalidFrame(frame->hd.stream_id, + error_code); + }); + + nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( + callbacks_, + [](nghttp2_session*, const nghttp2_frame_hd* hd, const uint8_t* data, size_t len, + void* user_data) -> int { + ASSERT(hd->length >= len); + return static_cast(user_data)->onMetadataReceived(hd->stream_id, data, + len); + }); + + nghttp2_session_callbacks_set_unpack_extension_callback( + callbacks_, [](nghttp2_session*, void**, const nghttp2_frame_hd* hd, void* user_data) -> int { + return static_cast(user_data)->onMetadataFrameComplete( + hd->stream_id, hd->flags == END_METADATA_FLAG); + }); + + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* buf, size_t len, const nghttp2_frame* frame, + void* user_data) -> ssize_t { + ASSERT(frame->hd.length <= len); + return static_cast(user_data)->packMetadata(frame->hd.stream_id, buf, len); + }); + + nghttp2_session_callbacks_set_error_callback2( + callbacks_, [](nghttp2_session*, int, const char* msg, size_t len, void* user_data) -> int { + return static_cast(user_data)->onError(absl::string_view(msg, len)); + }); +} + +ConnectionImpl::Http2Callbacks::~Http2Callbacks() { nghttp2_session_callbacks_del(callbacks_); } + +ConnectionImpl::Http2Options::Http2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) { + nghttp2_option_new(&options_); + // Currently we do not do anything with stream priority. Setting the following option prevents + // nghttp2 from keeping around closed streams for use during stream priority dependency graph + // calculations. This saves a tremendous amount of memory in cases where there are a large + // number of kept alive HTTP/2 connections. + nghttp2_option_set_no_closed_streams(options_, 1); + nghttp2_option_set_no_auto_window_update(options_, 1); + + // The max send header block length is configured to an arbitrarily high number so as to never + // trigger the check within nghttp2, as we check request headers length in + // codec_impl::saveHeader. + nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); + + if (http2_options.hpack_table_size().value() != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { + nghttp2_option_set_max_deflate_dynamic_table_size(options_, + http2_options.hpack_table_size().value()); + } + + if (http2_options.allow_metadata()) { + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + } + + // nghttp2 v1.39.2 lowered the internal flood protection limit from 10K to 1K of ACK frames. + // This new limit may cause the internal nghttp2 mitigation to trigger more often (as it + // requires just 9K of incoming bytes for smallest 9 byte SETTINGS frame), bypassing the same + // mitigation and its associated behavior in the envoy HTTP/2 codec. Since envoy does not rely + // on this mitigation, set back to the old 10K number to avoid any changes in the HTTP/2 codec + // behavior. + nghttp2_option_set_max_outbound_ack(options_, 10000); +} + +ConnectionImpl::Http2Options::~Http2Options() { nghttp2_option_del(options_); } + +ConnectionImpl::ClientHttp2Options::ClientHttp2Options( + const envoy::config::core::v3::Http2ProtocolOptions& http2_options) + : Http2Options(http2_options) { + // Temporarily disable initial max streams limit/protection, since we might want to create + // more than 100 streams before receiving the HTTP/2 SETTINGS frame from the server. + // + // TODO(PiotrSikora): remove this once multiple upstream connections or queuing are implemented. + nghttp2_option_set_peer_max_concurrent_streams( + options_, ::Envoy::Http2::Utility::OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS); +} + +ClientConnectionImpl::ClientConnectionImpl( + Network::Connection& connection, Http::ConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, const uint32_t max_response_headers_count, + Nghttp2SessionFactory& http2_session_factory) + : ConnectionImpl(connection, stats, http2_options, max_response_headers_kb, + max_response_headers_count), + callbacks_(callbacks) { + ClientHttp2Options client_http2_options(http2_options); + session_ = http2_session_factory.create(http2_callbacks_.callbacks(), base(), + client_http2_options.options()); + http2_session_factory.init(session_, base(), http2_options); + allow_metadata_ = http2_options.allow_metadata(); +} + +RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { + ClientStreamImplPtr stream(new ClientStreamImpl(*this, per_stream_buffer_limit_, decoder)); + // If the connection is currently above the high watermark, make sure to inform the new stream. + // The connection can not pass this on automatically as it has no awareness that a new stream is + // created. + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + ClientStreamImpl& stream_ref = *stream; + stream->moveIntoList(std::move(stream), active_streams_); + return stream_ref; +} + +int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // The client code explicitly does not currently support push promise. + RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); + RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || + frame->headers.cat == NGHTTP2_HCAT_HEADERS, + ""); + if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + } + + return 0; +} + +int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // The client code explicitly does not currently support push promise. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +ServerConnectionImpl::ServerConnectionImpl( + Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks, CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action) + : ConnectionImpl(connection, stats, http2_options, max_request_headers_kb, + max_request_headers_count), + callbacks_(callbacks), headers_with_underscores_action_(headers_with_underscores_action) { + Http2Options h2_options(http2_options); + + nghttp2_session_server_new2(&session_, http2_callbacks_.callbacks(), base(), + h2_options.options()); + sendSettings(http2_options, false); + allow_metadata_ = http2_options.allow_metadata(); +} + +int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + + if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { + return NGHTTP2_ERR_FLOODED; + } + + if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { + stats_.trailers_.inc(); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_HEADERS); + + StreamImpl* stream = getStream(frame->hd.stream_id); + stream->allocTrailers(); + return 0; + } + + ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); + if (connection_.aboveHighWatermark()) { + stream->runHighWatermarkCallbacks(); + } + stream->request_decoder_ = &callbacks_.newStream(*stream); + stream->stream_id_ = frame->hd.stream_id; + stream->moveIntoList(std::move(stream), active_streams_); + nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, + active_streams_.front().get()); + return 0; +} + +int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, + HeaderString&& value) { + // For a server connection, we should never get push promise frames. + ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(frame->headers.cat == NGHTTP2_HCAT_REQUEST || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + return saveHeader(frame, std::move(name), std::move(value)); +} + +bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { + ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", + connection_, static_cast(hd->type), static_cast(hd->flags), + static_cast(hd->length), padding_length); + switch (hd->type) { + case NGHTTP2_HEADERS: + case NGHTTP2_CONTINUATION: + // Track new streams. + if (hd->flags & NGHTTP2_FLAG_END_HEADERS) { + inbound_streams_++; + } + FALLTHRU; + case NGHTTP2_DATA: + // Track frames with an empty payload and no end stream flag. + if (hd->length - padding_length == 0 && !(hd->flags & NGHTTP2_FLAG_END_STREAM)) { + ENVOY_CONN_LOG(trace, "frame with an empty payload and no end stream flag.", connection_); + consecutive_inbound_frames_with_empty_payload_++; + } else { + consecutive_inbound_frames_with_empty_payload_ = 0; + } + break; + case NGHTTP2_PRIORITY: + inbound_priority_frames_++; + break; + case NGHTTP2_WINDOW_UPDATE: + inbound_window_update_frames_++; + break; + default: + break; + } + + if (!checkInboundFrameLimits()) { + // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate + // all the way to nghttp2_session_mem_recv() where we need it. + flood_detected_ = true; + return false; + } + + return true; +} + +bool ServerConnectionImpl::checkInboundFrameLimits() { + ASSERT(dispatching_downstream_data_); + + if (consecutive_inbound_frames_with_empty_payload_ > + max_consecutive_inbound_frames_with_empty_payload_) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many consecutive frames with an empty payload " + "received in this HTTP/2 session.", + connection_); + stats_.inbound_empty_frames_flood_.inc(); + return false; + } + + if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { + ENVOY_CONN_LOG(trace, + "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", + connection_); + stats_.inbound_priority_frames_flood_.inc(); + return false; + } + + if (inbound_window_update_frames_ > + 1 + 2 * (inbound_streams_ + + max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_)) { + ENVOY_CONN_LOG( + trace, + "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", + connection_); + stats_.inbound_window_update_frames_flood_.inc(); + return false; + } + + return true; +} + +void ServerConnectionImpl::checkOutboundQueueLimits() { + if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { + stats_.outbound_flood_.inc(); + throw FrameFloodException("Too many frames in the outbound queue."); + } + if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { + stats_.outbound_control_flood_.inc(); + throw FrameFloodException("Too many control frames in the outbound queue."); + } +} + +Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { + // TODO(#10878): Remove this wrapper when exception removal is complete. innerDispatch may either + // throw an exception or return an error status. The utility wrapper catches exceptions and + // converts them to error statuses. + return Http::Utility::exceptionToStatus( + [&](Buffer::Instance& data) -> Http::Status { return innerDispatch(data); }, data); +} + +Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { + ASSERT(!dispatching_downstream_data_); + dispatching_downstream_data_ = true; + + // Make sure the dispatching_downstream_data_ is set to false even + // when ConnectionImpl::dispatch throws an exception. + Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); + + // Make sure downstream outbound queue was not flooded by the upstream frames. + checkOutboundQueueLimits(); + + return ConnectionImpl::innerDispatch(data); +} + +absl::optional +ServerConnectionImpl::checkHeaderNameForUnderscores(absl::string_view header_name) { + if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && + Http::HeaderUtility::headerNameContainsUnderscore(header_name)) { + if (headers_with_underscores_action_ == + envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER) { + ENVOY_CONN_LOG(debug, "Dropping header with invalid characters in its name: {}", connection_, + header_name); + stats_.dropped_headers_with_underscores_.inc(); + return 0; + } + ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, + header_name); + stats_.requests_rejected_with_underscores_in_headers_.inc(); + return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; + } + return absl::nullopt; +} + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h new file mode 100644 index 000000000000..ebb40b18d8a7 --- /dev/null +++ b/source/common/http/http2/codec_impl_legacy.h @@ -0,0 +1,602 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/config/core/v3/protocol.pb.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/http/codec.h" +#include "envoy/network/connection.h" + +#include "common/buffer/buffer_impl.h" +#include "common/buffer/watermark_buffer.h" +#include "common/common/linked_object.h" +#include "common/common/logger.h" +#include "common/common/thread.h" +#include "common/http/codec_helper.h" +#include "common/http/header_map_impl.h" +#include "common/http/http2/codec_stats.h" +#include "common/http/http2/metadata_decoder.h" +#include "common/http/http2/metadata_encoder.h" +#include "common/http/status.h" +#include "common/http/utility.h" + +#include "absl/types/optional.h" +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Legacy { +namespace Http2 { + +// This is not the full client magic, but it's the smallest size that should be able to +// differentiate between HTTP/1 and HTTP/2. +const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; + +class Utility { +public: + /** + * Deal with https://tools.ietf.org/html/rfc7540#section-8.1.2.5 + * @param key supplies the incoming header key. + * @param value supplies the incoming header value. + * @param cookies supplies the header string to fill if this is a cookie header that needs to be + * rebuilt. + */ + static bool reconstituteCrumbledCookies(const HeaderString& key, const HeaderString& value, + HeaderString& cookies); +}; + +class ConnectionImpl; + +// Abstract nghttp2_session factory. Used to enable injection of factories for testing. +class Nghttp2SessionFactory { +public: + using ConnectionImplType = ConnectionImpl; + virtual ~Nghttp2SessionFactory() = default; + + // Returns a new nghttp2_session to be used with |connection|. + virtual nghttp2_session* create(const nghttp2_session_callbacks* callbacks, + ConnectionImplType* connection, + const nghttp2_option* options) PURE; + + // Initializes the |session|. + virtual void init(nghttp2_session* session, ConnectionImplType* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) PURE; +}; + +class ProdNghttp2SessionFactory : public Nghttp2SessionFactory { +public: + nghttp2_session* create(const nghttp2_session_callbacks* callbacks, ConnectionImpl* connection, + const nghttp2_option* options) override; + + void init(nghttp2_session* session, ConnectionImpl* connection, + const envoy::config::core::v3::Http2ProtocolOptions& options) override; + + // Returns a global factory instance. Note that this is possible because no internal state is + // maintained; the thread safety of create() and init()'s side effects is guaranteed by Envoy's + // worker based threading model. + static ProdNghttp2SessionFactory& get() { + static ProdNghttp2SessionFactory* instance = new ProdNghttp2SessionFactory(); + return *instance; + } +}; + +/** + * Base class for HTTP/2 client and server codecs. + */ +class ConnectionImpl : public virtual Connection, protected Logger::Loggable { +public: + ConnectionImpl(Network::Connection& connection, Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_headers_kb, const uint32_t max_headers_count); + + ~ConnectionImpl() override; + + // Http::Connection + // NOTE: the `dispatch` method is also overridden in the ServerConnectionImpl class + Http::Status dispatch(Buffer::Instance& data) override; + void goAway() override; + Protocol protocol() override { return Protocol::Http2; } + void shutdownNotice() override; + bool wantsToWrite() override { return nghttp2_session_want_write(session_); } + // Propagate network connection watermark events to each stream on the connection. + void onUnderlyingConnectionAboveWriteBufferHighWatermark() override { + for (auto& stream : active_streams_) { + stream->runHighWatermarkCallbacks(); + } + } + void onUnderlyingConnectionBelowWriteBufferLowWatermark() override { + for (auto& stream : active_streams_) { + stream->runLowWatermarkCallbacks(); + } + } + + /** + * An inner dispatch call that executes the dispatching logic. While exception removal is in + * migration (#10878), this function may either throw an exception or return an error status. + * Exceptions are caught and translated to their corresponding statuses in the outer level + * dispatch. + * This needs to be virtual so that ServerConnectionImpl can override. + * TODO(#10878): Remove this when exception removal is complete. + */ + virtual Http::Status innerDispatch(Buffer::Instance& data); + +protected: + friend class ProdNghttp2SessionFactory; + + /** + * Wrapper for static nghttp2 callback dispatchers. + */ + class Http2Callbacks { + public: + Http2Callbacks(); + ~Http2Callbacks(); + + const nghttp2_session_callbacks* callbacks() { return callbacks_; } + + private: + nghttp2_session_callbacks* callbacks_; + }; + + /** + * Wrapper for static nghttp2 session options. + */ + class Http2Options { + public: + Http2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + ~Http2Options(); + + const nghttp2_option* options() { return options_; } + + protected: + nghttp2_option* options_; + }; + + class ClientHttp2Options : public Http2Options { + public: + ClientHttp2Options(const envoy::config::core::v3::Http2ProtocolOptions& http2_options); + }; + + /** + * Base class for client and server side streams. + */ + struct StreamImpl : public virtual StreamEncoder, + public Stream, + public LinkedObject, + public Event::DeferredDeletable, + public StreamCallbackHelper { + + StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit); + ~StreamImpl() override; + // TODO(mattklein123): Optimally this would be done in the destructor but there are currently + // deferred delete lifetime issues that need sorting out if the destructor of the stream is + // going to be able to refer to the parent connection. + void destroy(); + void disarmStreamIdleTimer() { + if (stream_idle_timer_ != nullptr) { + // To ease testing and the destructor assertion. + stream_idle_timer_->disableTimer(); + stream_idle_timer_.reset(); + } + } + + StreamImpl* base() { return this; } + ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); + int onDataSourceSend(const uint8_t* framehd, size_t length); + void resetStreamWorker(StreamResetReason reason); + static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); + void saveHeader(HeaderString&& name, HeaderString&& value); + void encodeHeadersBase(const std::vector& final_headers, bool end_stream); + virtual void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) PURE; + void encodeTrailersBase(const HeaderMap& headers); + void submitTrailers(const HeaderMap& trailers); + void submitMetadata(uint8_t flags); + virtual StreamDecoder& decoder() PURE; + virtual HeaderMap& headers() PURE; + virtual void allocTrailers() PURE; + virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE; + virtual void createPendingFlushTimer() PURE; + void onPendingFlushTimer(); + + // Http::StreamEncoder + void encodeData(Buffer::Instance& data, bool end_stream) override; + Stream& getStream() override { return *this; } + void encodeMetadata(const MetadataMapVector& metadata_map_vector) override; + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { return absl::nullopt; } + + // Http::Stream + void addCallbacks(StreamCallbacks& callbacks) override { addCallbacksHelper(callbacks); } + void removeCallbacks(StreamCallbacks& callbacks) override { removeCallbacksHelper(callbacks); } + void resetStream(StreamResetReason reason) override; + void readDisable(bool disable) override; + uint32_t bufferLimit() override { return pending_recv_data_.highWatermark(); } + const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { + return parent_.connection_.localAddress(); + } + absl::string_view responseDetails() override { return details_; } + void setFlushTimeout(std::chrono::milliseconds timeout) override { + stream_idle_timeout_ = timeout; + } + + // This code assumes that details is a static string, so that we + // can avoid copying it. + void setDetails(absl::string_view details) { + // It is probably a mistake to call setDetails() twice, so + // assert that details_ is empty. + ASSERT(details_.empty()); + + details_ = details; + } + + void setWriteBufferWatermarks(uint32_t low_watermark, uint32_t high_watermark) { + pending_recv_data_.setWatermarks(low_watermark, high_watermark); + pending_send_data_.setWatermarks(low_watermark, high_watermark); + } + + // If the receive buffer encounters watermark callbacks, enable/disable reads on this stream. + void pendingRecvBufferHighWatermark(); + void pendingRecvBufferLowWatermark(); + + // If the send buffer encounters watermark callbacks, propagate this information to the streams. + // The router and connection manager will propagate them on as appropriate. + void pendingSendBufferHighWatermark(); + void pendingSendBufferLowWatermark(); + + // Does any necessary WebSocket/Upgrade conversion, then passes the headers + // to the decoder_. + virtual void decodeHeaders(bool allow_waiting_for_informational_headers) PURE; + virtual void decodeTrailers() PURE; + + // Get MetadataEncoder for this stream. + Http::Http2::MetadataEncoder& getMetadataEncoder(); + // Get MetadataDecoder for this stream. + Http::Http2::MetadataDecoder& getMetadataDecoder(); + // Callback function for MetadataDecoder. + void onMetadataDecoded(MetadataMapPtr&& metadata_map_ptr); + + bool buffersOverrun() const { return read_disable_count_ > 0; } + + ConnectionImpl& parent_; + int32_t stream_id_{-1}; + uint32_t unconsumed_bytes_{0}; + uint32_t read_disable_count_{0}; + Buffer::WatermarkBuffer pending_recv_data_{ + [this]() -> void { this->pendingRecvBufferLowWatermark(); }, + [this]() -> void { this->pendingRecvBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + Buffer::WatermarkBuffer pending_send_data_{ + [this]() -> void { this->pendingSendBufferLowWatermark(); }, + [this]() -> void { this->pendingSendBufferHighWatermark(); }, + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }}; + HeaderMapPtr pending_trailers_to_encode_; + std::unique_ptr metadata_decoder_; + std::unique_ptr metadata_encoder_; + absl::optional deferred_reset_; + HeaderString cookies_; + bool local_end_stream_sent_ : 1; + bool remote_end_stream_ : 1; + bool data_deferred_ : 1; + bool waiting_for_non_informational_headers_ : 1; + bool pending_receive_buffer_high_watermark_called_ : 1; + bool pending_send_buffer_high_watermark_called_ : 1; + bool reset_due_to_messaging_error_ : 1; + absl::string_view details_; + // See HttpConnectionManager.stream_idle_timeout. + std::chrono::milliseconds stream_idle_timeout_{}; + Event::TimerPtr stream_idle_timer_; + }; + + using StreamImplPtr = std::unique_ptr; + + /** + * Client side stream (request). + */ + struct ClientStreamImpl : public StreamImpl, public RequestEncoder { + ClientStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit, + ResponseDecoder& response_decoder) + : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder), + headers_or_trailers_(ResponseHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return response_decoder_; } + void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + // If we are waiting for informational headers, make a new response header map, otherwise + // we are about to receive trailers. The codec makes sure this is the only valid sequence. + if (waiting_for_non_informational_headers_) { + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); + } else { + headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override { + // Client streams do not create a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + } + + // RequestEncoder + void encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const RequestTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + ResponseDecoder& response_decoder_; + absl::variant headers_or_trailers_; + std::string upgrade_type_; + }; + + using ClientStreamImplPtr = std::unique_ptr; + + /** + * Server side stream (response). + */ + struct ServerStreamImpl : public StreamImpl, public ResponseEncoder { + ServerStreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) + : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} + + // StreamImpl + void submitHeaders(const std::vector& final_headers, + nghttp2_data_provider* provider) override; + StreamDecoder& decoder() override { return *request_decoder_; } + void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeTrailers() override; + HeaderMap& headers() override { + if (absl::holds_alternative(headers_or_trailers_)) { + return *absl::get(headers_or_trailers_); + } else { + return *absl::get(headers_or_trailers_); + } + } + void allocTrailers() override { + headers_or_trailers_.emplace(RequestTrailerMapImpl::create()); + } + HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { + return createHeaderMap(trailers); + } + void createPendingFlushTimer() override; + + // ResponseEncoder + void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; + void encodeHeaders(const ResponseHeaderMap& headers, bool end_stream) override; + void encodeTrailers(const ResponseTrailerMap& trailers) override { + encodeTrailersBase(trailers); + } + + RequestDecoder* request_decoder_{}; + absl::variant headers_or_trailers_; + }; + + using ServerStreamImplPtr = std::unique_ptr; + + ConnectionImpl* base() { return this; } + // NOTE: Always use non debug nullptr checks against the return value of this function. There are + // edge cases (such as for METADATA frames) where nghttp2 will issue a callback for a stream_id + // that is not associated with an existing stream. + StreamImpl* getStream(int32_t stream_id); + int saveHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value); + void sendPendingFrames(); + void sendSettings(const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + bool disable_push); + // Callback triggered when the peer's SETTINGS frame is received. + // NOTE: This is only used for tests. + virtual void onSettingsForTest(const nghttp2_settings&) {} + + /** + * Check if header name contains underscore character. + * Underscore character is allowed in header names by the RFC-7230 and this check is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. + * The ServerConnectionImpl may drop header or reject request based on the + * `common_http_protocol_options.headers_with_underscores_action` configuration option in the + * HttpConnectionManager. + */ + virtual absl::optional checkHeaderNameForUnderscores(absl::string_view /* header_name */) { + return absl::nullopt; + } + + static Http2Callbacks http2_callbacks_; + + std::list active_streams_; + nghttp2_session* session_{}; + Http::Http2::CodecStats& stats_; + Network::Connection& connection_; + const uint32_t max_headers_kb_; + const uint32_t max_headers_count_; + uint32_t per_stream_buffer_limit_; + bool allow_metadata_; + const bool stream_error_on_invalid_http_messaging_; + bool flood_detected_; + + // Set if the type of frame that is about to be sent is PING or SETTINGS with the ACK flag set, or + // RST_STREAM. + bool is_outbound_flood_monitored_control_frame_ = 0; + // This counter keeps track of the number of outbound frames of all types (these that were + // buffered in the underlying connection but not yet written into the socket). If this counter + // exceeds the `max_outbound_frames_' value the connection is terminated. + uint32_t outbound_frames_ = 0; + // Maximum number of outbound frames. Initialized from corresponding http2_protocol_options. + // Default value is 10000. + const uint32_t max_outbound_frames_; + const std::function frame_buffer_releasor_; + // This counter keeps track of the number of outbound frames of types PING, SETTINGS and + // RST_STREAM (these that were buffered in the underlying connection but not yet written into the + // socket). If this counter exceeds the `max_outbound_control_frames_' value the connection is + // terminated. + uint32_t outbound_control_frames_ = 0; + // Maximum number of outbound frames of types PING, SETTINGS and RST_STREAM. Initialized from + // corresponding http2_protocol_options. Default value is 1000. + const uint32_t max_outbound_control_frames_; + const std::function control_frame_buffer_releasor_; + // This counter keeps track of the number of consecutive inbound frames of types HEADERS, + // CONTINUATION and DATA with an empty payload and no end stream flag. If this counter exceeds + // the `max_consecutive_inbound_frames_with_empty_payload_` value the connection is terminated. + uint32_t consecutive_inbound_frames_with_empty_payload_ = 0; + // Maximum number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA without + // a payload. Initialized from corresponding http2_protocol_options. Default value is 1. + const uint32_t max_consecutive_inbound_frames_with_empty_payload_; + + // This counter keeps track of the number of inbound streams. + uint32_t inbound_streams_ = 0; + // This counter keeps track of the number of inbound PRIORITY frames. If this counter exceeds + // the value calculated using this formula: + // + // max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_) + // + // the connection is terminated. + uint64_t inbound_priority_frames_ = 0; + // Maximum number of inbound PRIORITY frames per stream. Initialized from corresponding + // http2_protocol_options. Default value is 100. + const uint32_t max_inbound_priority_frames_per_stream_; + + // This counter keeps track of the number of inbound WINDOW_UPDATE frames. If this counter exceeds + // the value calculated using this formula: + // + // 1 + 2 * (inbound_streams_ + + // max_inbound_window_update_frames_per_data_frame_sent_ * outbound_data_frames_) + // + // the connection is terminated. + uint64_t inbound_window_update_frames_ = 0; + // This counter keeps track of the number of outbound DATA frames. + uint64_t outbound_data_frames_ = 0; + // Maximum number of inbound WINDOW_UPDATE frames per outbound DATA frame sent. Initialized + // from corresponding http2_protocol_options. Default value is 10. + const uint32_t max_inbound_window_update_frames_per_data_frame_sent_; + + // For the flood mitigation to work the onSend callback must be called once for each outbound + // frame. This is what the nghttp2 library is doing, however this is not documented. The + // Http2FloodMitigationTest.* tests in test/integration/http2_integration_test.cc will break if + // this changes in the future. Also it is important that onSend does not do partial writes, as the + // nghttp2 library will keep calling this callback to write the rest of the frame. + ssize_t onSend(const uint8_t* data, size_t length); + +private: + virtual ConnectionCallbacks& callbacks() PURE; + virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; + int onData(int32_t stream_id, const uint8_t* data, size_t len); + int onBeforeFrameReceived(const nghttp2_frame_hd* hd); + int onFrameReceived(const nghttp2_frame* frame); + int onBeforeFrameSend(const nghttp2_frame* frame); + int onFrameSend(const nghttp2_frame* frame); + int onError(absl::string_view error); + virtual int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) PURE; + int onInvalidFrame(int32_t stream_id, int error_code); + int onStreamClose(int32_t stream_id, uint32_t error_code); + int onMetadataReceived(int32_t stream_id, const uint8_t* data, size_t len); + int onMetadataFrameComplete(int32_t stream_id, bool end_metadata); + ssize_t packMetadata(int32_t stream_id, uint8_t* buf, size_t len); + // Adds buffer fragment for a new outbound frame to the supplied Buffer::OwnedImpl. + // Returns true on success or false if outbound queue limits were exceeded. + bool addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); + virtual void checkOutboundQueueLimits() PURE; + void incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame); + virtual bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; + virtual bool checkInboundFrameLimits() PURE; + void releaseOutboundFrame(); + void releaseOutboundControlFrame(); + + bool dispatching_ : 1; + bool raised_goaway_ : 1; + bool pending_deferred_reset_ : 1; +}; + +/** + * HTTP/2 client connection codec. + */ +class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { +public: + using SessionFactory = Nghttp2SessionFactory; + ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_response_headers_kb, + const uint32_t max_response_headers_count, + SessionFactory& http2_session_factory); + + // Http::ClientConnection + RequestEncoder& newStream(ResponseDecoder& response_decoder) override; + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + + // Presently client connections only perform accounting of outbound frames and do not + // terminate connections when queue limits are exceeded. The primary reason is the complexity of + // the clean-up of upstream connections. The clean-up of upstream connection causes RST_STREAM + // messages to be sent on corresponding downstream connections. This may actually trigger flood + // mitigation on the downstream connections, which causes an exception to be thrown in the middle + // of the clean-up loop, leaving resources in a half cleaned up state. + // TODO(yanavlasov): add flood mitigation for upstream connections as well. + void checkOutboundQueueLimits() override {} + bool trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return true; } + bool checkInboundFrameLimits() override { return true; } + + Http::ConnectionCallbacks& callbacks_; +}; + +/** + * HTTP/2 server connection codec. + */ +class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { +public: + ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, + Http::Http2::CodecStats& stats, + const envoy::config::core::v3::Http2ProtocolOptions& http2_options, + const uint32_t max_request_headers_kb, + const uint32_t max_request_headers_count, + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action); + +private: + // ConnectionImpl + ConnectionCallbacks& callbacks() override { return callbacks_; } + int onBeginHeaders(const nghttp2_frame* frame) override; + int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; + void checkOutboundQueueLimits() override; + bool trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; + bool checkInboundFrameLimits() override; + absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; + + // Http::Connection + // The reason for overriding the dispatch method is to do flood mitigation only when + // processing data from downstream client. Doing flood mitigation when processing upstream + // responses makes clean-up tricky, which needs to be improved (see comments for the + // ClientConnectionImpl::checkOutboundQueueLimits method). The dispatch method on the + // ServerConnectionImpl objects is called only when processing data from the downstream client in + // the ConnectionManagerImpl::onData method. + Http::Status dispatch(Buffer::Instance& data) override; + Http::Status innerDispatch(Buffer::Instance& data) override; + + ServerConnectionCallbacks& callbacks_; + + // This flag indicates that downstream data is being dispatched and turns on flood mitigation + // in the checkMaxOutbound*Framed methods. + bool dispatching_downstream_data_{false}; + + // The action to take when a request header name contains underscore characters. + envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction + headers_with_underscores_action_; +}; + +} // namespace Http2 +} // namespace Legacy +} // namespace Http +} // namespace Envoy diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index ba4817e19a53..35b990cd39b7 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -70,6 +70,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index ec576a8abac9..7ab7817d80fd 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -35,7 +35,9 @@ envoy_cc_extension( "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/json:json_loader_lib", "//source/common/local_reply:local_reply_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 5c830c1ecca4..888709fe4251 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -20,7 +20,9 @@ #include "common/http/conn_manager_utility.h" #include "common/http/default_server_string.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/http3/quic_codec_factory.h" #include "common/http/http3/well_known_names.h" #include "common/http/request_id_extension_impl.h" @@ -480,16 +482,33 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, const Buffer::Instance& data, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { - case CodecType::HTTP1: - return std::make_unique( - connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), - callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), - headersWithUnderscoresAction()); + case CodecType::HTTP1: { + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, Http::Http1::CodecStats::atomicGet(http1_codec_stats_, context_.scope()), + callbacks, http1_settings_, maxRequestHeadersKb(), maxRequestHeadersCount(), + headersWithUnderscoresAction()); + } + } case CodecType::HTTP2: { - return std::make_unique( - connection, callbacks, - Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, - maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + if (context_.runtime().snapshot().runtimeFeatureEnabled( + "envoy.reloadable_features.new_codec_behavior")) { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } else { + return std::make_unique( + connection, callbacks, + Http::Http2::CodecStats::atomicGet(http2_codec_stats_, context_.scope()), http2_options_, + maxRequestHeadersKb(), maxRequestHeadersCount(), headersWithUnderscoresAction()); + } } case CodecType::HTTP3: // Hard code Quiche factory name here to instantiate a QUIC codec implemented. diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 16e4eee2c960..492d2889aa8f 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -462,32 +462,34 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi const envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action = envoy::config::core::v3::HttpProtocolOptions::ALLOW; + Http1::CodecStats::AtomicPtr http1_stats; + Http2::CodecStats::AtomicPtr http2_stats; ClientConnectionPtr client; ServerConnectionPtr server; const bool http2 = http_version == HttpVersion::Http2; - Http1::CodecStats::AtomicPtr stats; if (http2) { - client = std::make_unique( - client_connection, client_callbacks, stats_store, client_http2_options, - max_request_headers_kb, max_response_headers_count, + client = std::make_unique( + client_connection, client_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + client_http2_options, max_request_headers_kb, max_response_headers_count, Http2::ProdNghttp2SessionFactory::get()); } else { client = std::make_unique( - client_connection, Http1::CodecStats::atomicGet(stats, stats_store), client_callbacks, + client_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), client_callbacks, client_http1settings, max_response_headers_count); } if (http2) { const envoy::config::core::v3::Http2ProtocolOptions server_http2_options{ fromHttp2Settings(input.h2_settings().server())}; - server = std::make_unique( - server_connection, server_callbacks, stats_store, server_http2_options, - max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); + server = std::make_unique( + server_connection, server_callbacks, Http2::CodecStats::atomicGet(http2_stats, stats_store), + server_http2_options, max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; server = std::make_unique( - server_connection, Http1::CodecStats::atomicGet(stats, stats_store), server_callbacks, + server_connection, Http1::CodecStats::atomicGet(http1_stats, stats_store), server_callbacks, server_http1settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); } @@ -643,8 +645,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } } if (!codec_error && http2) { - dynamic_cast(*client).goAway(); - dynamic_cast(*server).goAway(); + dynamic_cast(*client).goAway(); + dynamic_cast(*server).goAway(); } } diff --git a/test/common/http/http1/BUILD b/test/common/http/http1/BUILD index 715e6dbf0c23..dbcdcd4d4c8b 100644 --- a/test/common/http/http1/BUILD +++ b/test/common/http/http1/BUILD @@ -26,6 +26,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/buffer:buffer_mocks", diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index fade286cfb44..77565550dc26 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -9,6 +9,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/runtime/runtime_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -33,7 +34,6 @@ using testing::StrictMock; namespace Envoy { namespace Http { -namespace Http1 { namespace { std::string createHeaderFragment(int num_headers) { // Create a header field with num_headers headers. @@ -55,7 +55,7 @@ Buffer::OwnedImpl createBufferWithNByteSlices(absl::string_view input, size_t ma } } // namespace -class Http1CodecTestBase : public testing::Test { +class Http1CodecTestBase { protected: Http::Http1::CodecStats& http1CodecStats() { return Http::Http1::CodecStats::atomicGet(http1_codec_stats_, store_); @@ -65,12 +65,19 @@ class Http1CodecTestBase : public testing::Test { Http::Http1::CodecStats::AtomicPtr http1_codec_stats_; }; -class Http1ServerConnectionImplTest : public Http1CodecTestBase { +class Http1ServerConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: void initialize() { - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, headers_with_underscores_action_); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, headers_with_underscores_action_); + } } NiceMock connection_; @@ -128,9 +135,15 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; @@ -158,9 +171,15 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } MockRequestDecoder decoder; @@ -179,9 +198,15 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } } InSequence sequence; @@ -215,9 +240,15 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - codec_ = std::make_unique( - connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, - max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, + max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); + } std::string exception_reason; NiceMock decoder; EXPECT_CALL(callbacks_, newStream(_, _)) @@ -295,7 +326,12 @@ void Http1ServerConnectionImplTest::testRequestHeadersAccepted(std::string heade EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ServerConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ServerConnectionImplTest, EmptyHeader) { initialize(); InSequence sequence; @@ -319,7 +355,7 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { // We support the identity encoding, but because it does not end in chunked encoding we reject it // per RFC 7230 Section 3.3.3 -TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { +TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { initialize(); InSequence sequence; @@ -334,7 +370,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { +TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { initialize(); InSequence sequence; @@ -350,7 +386,7 @@ TEST_F(Http1ServerConnectionImplTest, UnsupportedEncoding) { } // Verify that data in the two body chunks is merged before the call to decodeData. -TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; @@ -381,7 +417,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { // Verify dispatch behavior when dispatching an incomplete chunk, and resumption of the parse via a // second dispatch. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { initialize(); InSequence sequence; @@ -419,7 +455,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodySplitOverTwoDispatches) { // Verify that headers and chunked body are processed correctly and data is merged before the // decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { initialize(); InSequence sequence; @@ -448,7 +484,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { +TEST_P(Http1ServerConnectionImplTest, ChunkedBodyCase) { initialize(); InSequence sequence; @@ -475,7 +511,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedBodyCase) { // Verify that body dispatch does not happen after detecting a parse error processing a chunk // header. -TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { +TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { initialize(); InSequence sequence; @@ -501,7 +537,7 @@ TEST_F(Http1ServerConnectionImplTest, InvalidChunkHeader) { EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); } -TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { +TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { initialize(); InSequence sequence; @@ -518,7 +554,7 @@ TEST_F(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { +TEST_P(Http1ServerConnectionImplTest, HostWithLWS) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -539,7 +575,7 @@ TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { // Regression test for https://github.com/envoyproxy/envoy/issues/10270. Linear whitespace at the // beginning and end of a header value should be stripped. Whitespace in the middle should be // preserved. -TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { +TEST_P(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { initialize(); // Header with many spaces surrounded by non-whitespace characters to ensure that dispatching is @@ -572,7 +608,7 @@ TEST_F(Http1ServerConnectionImplTest, InnerLWSIsPreserved) { } } -TEST_F(Http1ServerConnectionImplTest, Http10) { +TEST_P(Http1ServerConnectionImplTest, Http10) { initialize(); InSequence sequence; @@ -590,7 +626,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10) { EXPECT_EQ(Protocol::Http10, codec_->protocol()); } -TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{{":path", "/"}, {":method", "GET"}}; @@ -598,7 +634,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10AbsoluteNoOp) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { +TEST_P(Http1ServerConnectionImplTest, Http10Absolute) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -607,7 +643,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10Absolute) { expectHeadersTest(Protocol::Http10, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { +TEST_P(Http1ServerConnectionImplTest, Http10MultipleResponses) { initialize(); MockRequestDecoder decoder; @@ -653,7 +689,7 @@ TEST_F(Http1ServerConnectionImplTest, Http10MultipleResponses) { } } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath1) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -662,7 +698,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath1) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePath2) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -671,7 +707,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePath2) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -681,7 +717,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathWithPort) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -690,7 +726,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsoluteEnabledNoOp) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidRequest) { initialize(); // Invalid because www.somewhere.com is not an absolute path nor an absolute url @@ -698,7 +734,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidRequest) { expect400(Protocol::Http11, true, buffer, "http1.codec_error"); } -TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { +TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { initialize(); MockRequestDecoder decoder; @@ -723,7 +759,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -732,21 +768,21 @@ TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathNoSlash) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePathBad) { initialize(); Buffer::OwnedImpl buffer("GET * HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer, "http1.invalid_url"); } -TEST_F(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { +TEST_P(Http1ServerConnectionImplTest, Http11AbsolutePortTooLarge) { initialize(); Buffer::OwnedImpl buffer("GET http://foobar.com:1000000 HTTP/1.1\r\nHost: bah\r\n\r\n"); expect400(Protocol::Http11, true, buffer); } -TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { +TEST_P(Http1ServerConnectionImplTest, SketchyConnectionHeader) { initialize(); Buffer::OwnedImpl buffer( @@ -754,7 +790,7 @@ TEST_F(Http1ServerConnectionImplTest, SketchyConnectionHeader) { expect400(Protocol::Http11, true, buffer, "http1.connection_header_rejected"); } -TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { +TEST_P(Http1ServerConnectionImplTest, Http11RelativeOnly) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -763,7 +799,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11RelativeOnly) { expectHeadersTest(Protocol::Http11, false, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, Http11Options) { +TEST_P(Http1ServerConnectionImplTest, Http11Options) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -772,7 +808,7 @@ TEST_F(Http1ServerConnectionImplTest, Http11Options) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, SimpleGet) { +TEST_P(Http1ServerConnectionImplTest, SimpleGet) { initialize(); InSequence sequence; @@ -789,7 +825,7 @@ TEST_F(Http1ServerConnectionImplTest, SimpleGet) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.early_errors_via_hcm", "false"}}); @@ -809,7 +845,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { // Test that if the stream is not created at the time an error is detected, it // is created as part of sending the protocol error. -TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { initialize(); MockRequestDecoder decoder; @@ -828,7 +864,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { } // Make sure that if the first line is parsed, that sendLocalReply tracks HEAD requests correctly. -TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { +TEST_P(Http1ServerConnectionImplTest, BadHeadRequest) { initialize(); MockRequestDecoder decoder; @@ -848,7 +884,7 @@ TEST_F(Http1ServerConnectionImplTest, BadHeadRequest) { } // Make sure that if gRPC headers are parsed, they are tracked by sendLocalReply. -TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { +TEST_P(Http1ServerConnectionImplTest, BadGrpcRequest) { initialize(); MockRequestDecoder decoder; @@ -869,7 +905,7 @@ TEST_F(Http1ServerConnectionImplTest, BadGrpcRequest) { // This behavior was observed during CVE-2019-18801 and helped to limit the // scope of affected Envoy configurations. -TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { +TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { initialize(); MockRequestDecoder decoder; @@ -881,7 +917,7 @@ TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { +TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); MockRequestDecoder decoder; @@ -897,7 +933,7 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, FloodProtection) { +TEST_P(Http1ServerConnectionImplTest, FloodProtection) { initialize(); NiceMock decoder; @@ -948,7 +984,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtection) { } } -TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { +TEST_P(Http1ServerConnectionImplTest, FloodProtectionOff) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.http1_flood_protection", "false"}}); @@ -984,7 +1020,7 @@ TEST_F(Http1ServerConnectionImplTest, FloodProtectionOff) { } } -TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { +TEST_P(Http1ServerConnectionImplTest, HostHeaderTranslation) { initialize(); InSequence sequence; @@ -1003,7 +1039,8 @@ TEST_F(Http1ServerConnectionImplTest, HostHeaderTranslation) { } // Ensures that requests with invalid HTTP header values are properly rejected -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { +// when the runtime guard is enabled for the feature. +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { TestScopedRuntime scoped_runtime; // When the runtime-guarded feature is enabled, invalid header values // should result in a rejection. @@ -1028,7 +1065,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { // Ensures that request headers with names containing the underscore character are allowed // when the option is set to allow. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::ALLOW; initialize(); @@ -1052,7 +1089,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAllowed) { // Ensures that request headers with names containing the underscore character are dropped // when the option is set to drop headers. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER; initialize(); @@ -1075,7 +1112,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreAreDropped) { // Ensures that request with header names containing the underscore character are rejected // when the option is set to reject request. -TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { +TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestRejected) { headers_with_underscores_action_ = envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST; initialize(); @@ -1096,7 +1133,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } -TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { +TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { TestScopedRuntime scoped_runtime; initialize(); @@ -1119,7 +1156,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { // Mutate an HTTP GET with embedded NULs, this should always be rejected in some // way (not necessarily with "head value contains NUL" though). -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (size_t n = 1; n < example_input.size(); ++n) { @@ -1143,7 +1180,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { // Mutate an HTTP GET with CR or LF. These can cause an error status or maybe // result in a valid decodeHeaders(). In any case, the validHeaderString() // ASSERTs should validate we never have any embedded CR or LF. -TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { +TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { const std::string example_input = "GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: barbaz\r\n"; for (const char c : {'\r', '\n'}) { @@ -1163,7 +1200,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderMutateEmbeddedCRLF) { } } -TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { +TEST_P(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { initialize(); InSequence sequence; @@ -1185,7 +1222,7 @@ TEST_F(Http1ServerConnectionImplTest, CloseDuringHeadersComplete) { EXPECT_NE(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLength) { initialize(); InSequence sequence; @@ -1211,7 +1248,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLength) { // Verify that headers and body with content length are processed correctly and data is merged // before the decodeData call even if delivered in a buffer that holds 1 byte per slice. -TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { +TEST_P(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { initialize(); InSequence sequence; @@ -1236,7 +1273,7 @@ TEST_F(Http1ServerConnectionImplTest, PostWithContentLengthFragmentedBuffer) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponse) { initialize(); NiceMock decoder; @@ -1262,7 +1299,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { // As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate // the response encoder instead of request encoder. -TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { +TEST_P(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { initialize(); NiceMock decoder; @@ -1288,7 +1325,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1315,7 +1352,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { initialize(); NiceMock decoder; @@ -1339,7 +1376,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith204) { EXPECT_EQ("HTTP/1.1 204 No Content\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { +TEST_P(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { initialize(); NiceMock decoder; @@ -1370,7 +1407,7 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseWith100Then200) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, MetadataTest) { +TEST_P(Http1ServerConnectionImplTest, MetadataTest) { initialize(); NiceMock decoder; @@ -1393,7 +1430,7 @@ TEST_F(Http1ServerConnectionImplTest, MetadataTest) { EXPECT_EQ(1, store_.counter("http1.metadata_not_supported_error").value()); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponse) { initialize(); NiceMock decoder; @@ -1429,7 +1466,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponse) { output); } -TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { +TEST_P(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { codec_settings_.enable_trailers_ = true; initialize(); NiceMock decoder; @@ -1462,7 +1499,7 @@ TEST_F(Http1ServerConnectionImplTest, ChunkedResponseWithTrailers) { output); } -TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { +TEST_P(Http1ServerConnectionImplTest, ContentLengthResponse) { initialize(); NiceMock decoder; @@ -1489,7 +1526,7 @@ TEST_F(Http1ServerConnectionImplTest, ContentLengthResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 11\r\n\r\nHello World", output); } -TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadRequestResponse) { initialize(); NiceMock decoder; @@ -1513,7 +1550,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 5\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { +TEST_P(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { initialize(); NiceMock decoder; @@ -1537,7 +1574,7 @@ TEST_F(Http1ServerConnectionImplTest, HeadChunkedRequestResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { +TEST_P(Http1ServerConnectionImplTest, DoubleRequest) { initialize(); NiceMock decoder; @@ -1563,11 +1600,11 @@ TEST_F(Http1ServerConnectionImplTest, DoubleRequest) { EXPECT_EQ(0U, buffer.length()); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersDropped) { expectTrailersTest(false); } -TEST_F(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } +TEST_P(Http1ServerConnectionImplTest, RequestWithTrailersKept) { expectTrailersTest(true); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { initialize(); TestRequestHeaderMapImpl expected_headers{ @@ -1578,7 +1615,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2c) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1591,7 +1628,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cClose) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { +TEST_P(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { initialize(); TestRequestHeaderMapImpl expected_headers{{":authority", "www.somewhere.com"}, @@ -1604,7 +1641,7 @@ TEST_F(Http1ServerConnectionImplTest, IgnoreUpgradeH2cCloseEtc) { expectHeadersTest(Protocol::Http11, true, buffer, expected_headers); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequest) { initialize(); InSequence sequence; @@ -1628,7 +1665,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequest) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1644,7 +1681,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1662,7 +1699,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithTEChunked) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { initialize(); InSequence sequence; @@ -1681,7 +1718,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestWithNoBody) { } // Test that 101 upgrade responses do not contain content-length or transfer-encoding headers. -TEST_F(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { +TEST_P(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { initialize(); NiceMock decoder; @@ -1705,7 +1742,7 @@ TEST_F(Http1ServerConnectionImplTest, UpgradeRequestResponseHeaders) { EXPECT_EQ("HTTP/1.1 101 Switching Protocols\r\n\r\n", output); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); InSequence sequence; @@ -1729,7 +1766,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { // We use the absolute URL parsing code for CONNECT requests, but it does not // actually allow absolute URLs. -TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { initialize(); InSequence sequence; @@ -1742,7 +1779,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_TRUE(isCodecProtocolError(status)); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { initialize(); InSequence sequence; @@ -1757,7 +1794,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { initialize(); InSequence sequence; @@ -1774,7 +1811,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { initialize(); InSequence sequence; @@ -1790,7 +1827,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); } -TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { +TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { initialize(); InSequence sequence; @@ -1807,7 +1844,7 @@ TEST_F(Http1ServerConnectionImplTest, ConnectRequestWithZeroContentLength) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { +TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -1841,24 +1878,43 @@ TEST_F(Http1ServerConnectionImplTest, WatermarkTest) { ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } -class Http1ClientConnectionImplTest : public Http1CodecTestBase { +class Http1ClientConnectionImplTest : public Http1CodecTestBase, + public testing::TestWithParam { public: void initialize() { - codec_ = std::make_unique(connection_, http1CodecStats(), callbacks_, - codec_settings_, max_response_headers_count_); + if (GetParam()) { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } else { + codec_ = std::make_unique( + connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); + } + } + + void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) { + if (GetParam()) { + dynamic_cast(request_encoder)->readDisable(disable); + } else { + dynamic_cast(request_encoder)->readDisable(disable); + } } NiceMock connection_; NiceMock callbacks_; NiceMock codec_settings_; - std::unique_ptr codec_; + Http::ClientConnectionPtr codec_; protected: Stats::TestUtil::TestStore store_; uint32_t max_response_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; }; -TEST_F(Http1ClientConnectionImplTest, SimpleGet) { +INSTANTIATE_TEST_SUITE_P(Codecs, Http1ClientConnectionImplTest, testing::Bool(), + [](const testing::TestParamInfo& param) { + return param.param ? "New" : "Legacy"; + }); + +TEST_P(Http1ClientConnectionImplTest, SimpleGet) { initialize(); MockResponseDecoder response_decoder; @@ -1872,7 +1928,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGet) { EXPECT_EQ("GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { +TEST_P(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1888,7 +1944,7 @@ TEST_F(Http1ClientConnectionImplTest, SimpleGetWithHeaderCasing) { EXPECT_EQ("GET / HTTP/1.1\r\nMy-Custom-Header: hey\r\nContent-Length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { +TEST_P(Http1ClientConnectionImplTest, HostHeaderTranslate) { initialize(); MockResponseDecoder response_decoder; @@ -1902,7 +1958,7 @@ TEST_F(Http1ClientConnectionImplTest, HostHeaderTranslate) { EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); } -TEST_F(Http1ClientConnectionImplTest, Reset) { +TEST_P(Http1ClientConnectionImplTest, Reset) { initialize(); MockResponseDecoder response_decoder; @@ -1916,16 +1972,15 @@ TEST_F(Http1ClientConnectionImplTest, Reset) { // Verify that we correctly enable reads on the connection when the final response is // received. -TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { +TEST_P(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { initialize(); MockResponseDecoder response_decoder; - Http::RequestEncoder* request_encoder = &codec_->newStream(response_decoder); + auto* request_encoder = &codec_->newStream(response_decoder); // Manually read disable. EXPECT_CALL(connection_, readDisable(true)).Times(2); - RequestEncoderImpl* encoder = dynamic_cast(request_encoder); - encoder->readDisable(true); - encoder->readDisable(true); + readDisableOnRequestEncoder(request_encoder, true); + readDisableOnRequestEncoder(request_encoder, true); std::string output; ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); @@ -1946,7 +2001,7 @@ TEST_F(Http1ClientConnectionImplTest, FlowControlReadDisabledReenable) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureResponse) { initialize(); Buffer::OwnedImpl response("HTTP/1.1 408 Request Timeout\r\nConnection: Close\r\n\r\n"); @@ -1954,7 +2009,7 @@ TEST_F(Http1ClientConnectionImplTest, PrematureResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse503) { initialize(); NiceMock response_decoder; @@ -1968,7 +2023,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse503) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { +TEST_P(Http1ClientConnectionImplTest, EmptyBodyResponse200) { initialize(); NiceMock response_decoder; @@ -1982,7 +2037,7 @@ TEST_F(Http1ClientConnectionImplTest, EmptyBodyResponse200) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, HeadRequest) { +TEST_P(Http1ClientConnectionImplTest, HeadRequest) { initialize(); NiceMock response_decoder; @@ -1996,7 +2051,7 @@ TEST_F(Http1ClientConnectionImplTest, HeadRequest) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, 204Response) { +TEST_P(Http1ClientConnectionImplTest, 204Response) { initialize(); NiceMock response_decoder; @@ -2011,7 +2066,7 @@ TEST_F(Http1ClientConnectionImplTest, 204Response) { } // 204 No Content with Content-Length is barred by RFC 7230, Section 3.3.2. -TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // By default, content-length is barred. { initialize(); @@ -2047,7 +2102,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseContentLengthNotAllowed) { // 204 No Content with Content-Length: 0 is technically barred by RFC 7230, Section 3.3.2, but we // allow it. -TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { { initialize(); @@ -2081,7 +2136,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseWithContentLength0) { } // 204 No Content with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2115,7 +2170,7 @@ TEST_F(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { } } -TEST_F(Http1ClientConnectionImplTest, 100Response) { +TEST_P(Http1ClientConnectionImplTest, 100Response) { initialize(); NiceMock response_decoder; @@ -2136,7 +2191,7 @@ TEST_F(Http1ClientConnectionImplTest, 100Response) { } // 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. -TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { +TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. { initialize(); @@ -2172,7 +2227,7 @@ TEST_F(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { } } -TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { +TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { initialize(); NiceMock response_decoder; @@ -2185,7 +2240,7 @@ TEST_F(Http1ClientConnectionImplTest, BadEncodeParams) { CodecClientException); } -TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { +TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { initialize(); NiceMock response_decoder; @@ -2207,7 +2262,7 @@ TEST_F(Http1ClientConnectionImplTest, NoContentLengthResponse) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { +TEST_P(Http1ClientConnectionImplTest, ResponseWithTrailers) { initialize(); NiceMock response_decoder; @@ -2222,7 +2277,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseWithTrailers) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, GiantPath) { +TEST_P(Http1ClientConnectionImplTest, GiantPath) { initialize(); NiceMock response_decoder; @@ -2237,7 +2292,7 @@ TEST_F(Http1ClientConnectionImplTest, GiantPath) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { initialize(); // make sure upgradeAllowed doesn't cause crashes if run with no pending response. @@ -2247,7 +2302,7 @@ TEST_F(Http1ClientConnectionImplTest, PrematureUpgradeResponse) { EXPECT_TRUE(isPrematureResponseError(status)); } -TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponse) { initialize(); InSequence s; @@ -2283,7 +2338,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { initialize(); InSequence s; @@ -2307,7 +2362,7 @@ TEST_F(Http1ClientConnectionImplTest, UpgradeResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponse) { initialize(); InSequence s; @@ -2338,7 +2393,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { // Same data as above, but make sure directDispatch immediately hands off any // outstanding data. -TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { +TEST_P(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { initialize(); InSequence s; @@ -2357,7 +2412,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { +TEST_P(Http1ClientConnectionImplTest, ConnectRejected) { initialize(); InSequence s; @@ -2375,7 +2430,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { +TEST_P(Http1ClientConnectionImplTest, WatermarkTest) { EXPECT_CALL(connection_, bufferLimit()).WillOnce(Return(10)); initialize(); @@ -2410,7 +2465,7 @@ TEST_F(Http1ClientConnectionImplTest, WatermarkTest) { // caller attempts to close the connection. This causes the network connection to attempt to write // pending data, even in the no flush scenario, which can cause us to go below low watermark // which then raises callbacks for a stream that no longer exists. -TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { +TEST_P(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { initialize(); InSequence s; @@ -2444,7 +2499,7 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { // Regression test for https://github.com/envoyproxy/envoy/issues/10655. Make sure we correctly // handle going below low watermark when closing the connection during a completion callback. -TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { +TEST_P(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { initialize(); InSequence s; @@ -2474,43 +2529,43 @@ TEST_F(Http1ClientConnectionImplTest, LowWatermarkDuringClose) { EXPECT_TRUE(status.ok()); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, true); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejected) { // Construct partial headers with a long field name that exceeds the default limit of 60KiB. std::string long_string = "bigfield" + std::string(60 * 1024, 'q'); testTrailersExceedLimit(long_string, true); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersRejected) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", true); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailersRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } -TEST_F(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { +TEST_P(Http1ServerConnectionImplTest, LargeTrailerFieldRejectedIgnored) { // Default limit of 60 KiB std::string long_string = "bigfield" + std::string(60 * 1024, 'q') + ": value\r\n\r\n\r\n"; testTrailersExceedLimit(long_string, false); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyTrailersIgnored) { +TEST_P(Http1ServerConnectionImplTest, ManyTrailersIgnored) { // Send a request with 101 headers. testTrailersExceedLimit(createHeaderFragment(101) + "\r\n\r\n", false); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { initialize(); std::string exception_reason; @@ -2532,19 +2587,19 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) { EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails()); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersRejected) { // Default limit of 60 KiB std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; testRequestHeadersExceedLimit(long_string, ""); } // Tests that the default limit for the number of request headers is 100. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersRejected) { // Send a request with 101 headers. testRequestHeadersExceedLimit(createHeaderFragment(101), "http1.too_many_headers"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Default limit of 60 KiB initialize(); @@ -2575,7 +2630,7 @@ TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { // Tests that the 101th request header causes overflow with the default max number of request // headers. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // Default limit of 100. initialize(); @@ -2602,27 +2657,27 @@ TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { EXPECT_EQ(status.message(), "headers size exceeds limit"); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAccepted) { max_request_headers_kb_ = 65; std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } -TEST_F(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { +TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersAcceptedMaxConfigurable) { max_request_headers_kb_ = 96; std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; testRequestHeadersAccepted(long_string); } // Tests that the number of request headers is configurable. -TEST_F(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { +TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersAccepted) { max_request_headers_count_ = 150; // Create a request with 150 headers. testRequestHeadersAccepted(createHeaderFragment(150)); } // Tests that incomplete response headers of 80 kB header value fails. -TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { initialize(); NiceMock response_decoder; @@ -2641,7 +2696,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeValueRejected) { } // Tests that incomplete response headers with a 80 kB header field fails. -TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { +TEST_P(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { initialize(); NiceMock decoder; @@ -2661,7 +2716,7 @@ TEST_F(Http1ClientConnectionImplTest, ResponseHeadersWithLargeFieldRejected) { } // Tests that the size of response headers for HTTP/1 must be under 80 kB. -TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { initialize(); NiceMock response_decoder; @@ -2679,7 +2734,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { // Regression test for CVE-2019-18801. Large method headers should not trigger // ASSERTs or ASAN, which they previously did. -TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { initialize(); NiceMock response_decoder; @@ -2697,7 +2752,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { // in CVE-2019-18801, but the related code does explicit size calculations on // both path and method (these are the two distinguished headers). So, // belt-and-braces. -TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargePathRequestEncode) { initialize(); NiceMock response_decoder; @@ -2713,7 +2768,7 @@ TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { // As with LargeMethodEncode, but for an arbitrary header. This was not an issue // in CVE-2019-18801. -TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { +TEST_P(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { initialize(); NiceMock response_decoder; @@ -2730,7 +2785,7 @@ TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { } // Exception called when the number of response headers exceeds the default value of 100. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); NiceMock response_decoder; @@ -2748,7 +2803,7 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { } // Tests that the number of response headers is configurable. -TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { +TEST_P(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { max_response_headers_count_ = 152; initialize(); @@ -2765,6 +2820,5 @@ TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) { status = codec_->dispatch(buffer); } -} // namespace Http1 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 82627e08201d..9d64120d5dd7 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -10,35 +10,50 @@ licenses(["notice"]) # Apache 2 envoy_package() +CODEC_TEST_DEPS = [ + ":codec_impl_test_util", + "//source/common/event:dispatcher_lib", + "//source/common/http:exception_lib", + "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", + "//source/common/http/http2:codec_legacy_lib", + "//source/common/http/http2:codec_lib", + "//source/common/runtime:runtime_lib", + "//source/common/stats:stats_lib", + "//test/common/http:common_lib", + "//test/common/http/http2:http2_frame", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/init:init_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:transport_socket_match_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:logging_lib", + "//test/test_common:registry_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", +] + envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], shard_count = 5, tags = ["fails_on_windows"], - deps = [ - ":codec_impl_test_util", - "//source/common/event:dispatcher_lib", - "//source/common/http:exception_lib", - "//source/common/http:header_map_lib", - "//source/common/http:header_utility_lib", - "//source/common/http/http2:codec_lib", - "//source/common/stats:stats_lib", - "//test/common/http:common_lib", - "//test/common/http/http2:http2_frame", - "//test/common/stats:stat_test_utility_lib", - "//test/mocks/http:http_mocks", - "//test/mocks/init:init_mocks", - "//test/mocks/local_info:local_info_mocks", - "//test/mocks/network:network_mocks", - "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/thread_local:thread_local_mocks", - "//test/mocks/upstream:transport_socket_match_mocks", - "//test/mocks/upstream:upstream_mocks", - "//test/test_common:logging_lib", - "//test/test_common:registry_lib", - "//test/test_common:test_runtime_lib", - "//test/test_common:utility_lib", + deps = CODEC_TEST_DEPS, +) + +envoy_cc_test( + name = "codec_impl_legacy_test", + srcs = ["codec_impl_test.cc"], + args = [ + "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], + shard_count = 5, + tags = ["fails_on_windows"], + deps = CODEC_TEST_DEPS, ) envoy_cc_test_library( @@ -46,6 +61,7 @@ envoy_cc_test_library( hdrs = ["codec_impl_test_util.h"], external_deps = ["abseil_optional"], deps = [ + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", ], ) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 1deb3c412284..fa1c92346e73 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -7,6 +7,7 @@ #include "common/http/exception.h" #include "common/http/header_map_impl.h" #include "common/http/http2/codec_impl.h" +#include "common/runtime/runtime_features.h" #include "test/common/http/common.h" #include "test/common/http/http2/http2_frame.h" @@ -73,7 +74,7 @@ class Http2CodecImplTestFixture { }; struct ConnectionWrapper { - Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { + Http::Status dispatch(const Buffer::Instance& data, Connection& connection) { Http::Status status = Http::okStatus(); buffer_.add(data); if (!dispatching_) { @@ -128,13 +129,23 @@ class Http2CodecImplTestFixture { virtual void initialize() { http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); - + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -229,13 +240,13 @@ class Http2CodecImplTestFixture { envoy::config::core::v3::Http2ProtocolOptions client_http2_options_; NiceMock client_connection_; MockConnectionCallbacks client_callbacks_; - std::unique_ptr client_; + std::unique_ptr client_; ConnectionWrapper client_wrapper_; Stats::TestUtil::TestStore server_stats_store_; envoy::config::core::v3::Http2ProtocolOptions server_http2_options_; NiceMock server_connection_; MockServerConnectionCallbacks server_callbacks_; - std::unique_ptr server_; + std::unique_ptr server_; ConnectionWrapper server_wrapper_; MockResponseDecoder response_decoder_; RequestEncoder* request_encoder_; @@ -871,21 +882,21 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // Now that the flow control window is full, further data causes the send buffer to back up. Buffer::OwnedImpl more_long_data(std::string(initial_stream_window, 'a')); request_encoder_->encodeData(more_long_data, false); - EXPECT_EQ(initial_stream_window, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(initial_stream_window, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); // If we go over the limit, the stream callbacks should fire. EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); Buffer::OwnedImpl last_byte("!"); request_encoder_->encodeData(last_byte, false); - EXPECT_EQ(initial_stream_window + 1, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(initial_stream_window + 1, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(initial_stream_window + 1, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); @@ -930,7 +941,7 @@ TEST_P(Http2CodecImplFlowControlTest, TestFlowControlInPendingSendData) { EXPECT_CALL(callbacks2, onBelowWriteBufferLowWatermark()).Times(0); EXPECT_CALL(callbacks3, onBelowWriteBufferLowWatermark()); server_->getStream(1)->readDisable(false); - EXPECT_EQ(0, client_->getStream(1)->pending_send_data_.length()); + EXPECT_EQ(0, client_->getStreamPendingSendDataLength(1)); EXPECT_EQ(0, TestUtility::findGauge(client_stats_store_, "http2.pending_send_bytes")->value()); // The extra 1 byte sent won't trigger another window update, so the final window should be the // initial window minus the last 1 byte flush from the client to server. @@ -975,7 +986,7 @@ TEST_P(Http2CodecImplFlowControlTest, EarlyResetRestoresWindow) { // stream. EXPECT_EQ(0, nghttp2_session_get_stream_local_window_size(server_->session(), 1)); EXPECT_EQ(0, nghttp2_session_get_stream_remote_window_size(client_->session(), 1)); - EXPECT_EQ(initial_stream_window, server_->getStream(1)->unconsumed_bytes_); + EXPECT_EQ(initial_stream_window, server_->getStreamUnconsumedBytes(1)); EXPECT_GT(initial_connection_window, nghttp2_session_get_remote_window_size(client_->session())); EXPECT_CALL(server_stream_callbacks_, @@ -1017,7 +1028,7 @@ TEST_P(Http2CodecImplFlowControlTest, FlowControlPendingRecvData) { // the recv buffer can be overrun by a client which negotiates a larger // SETTINGS_MAX_FRAME_SIZE but there's no current easy way to tweak that in // envoy (without sending raw HTTP/2 frames) so we lower the buffer limit instead. - server_->getStream(1)->setWriteBufferWatermarks(10, 20); + server_->setStreamWriteBufferWatermarks(1, 10, 20); EXPECT_CALL(request_decoder_, decodeData(_, false)); Buffer::OwnedImpl data(std::string(40, 'a')); @@ -1215,13 +1226,23 @@ class Http2CodecImplStreamLimitTest : public Http2CodecImplTest {}; TEST_P(Http2CodecImplStreamLimitTest, MaxClientStreams) { http2OptionsFromTuple(client_http2_options_, ::testing::get<0>(GetParam())); http2OptionsFromTuple(server_http2_options_, ::testing::get<1>(GetParam())); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactory::get()); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, ProdNghttp2SessionFactoryNew::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } else { + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, + ProdNghttp2SessionFactoryLegacy::get()); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + } for (int i = 0; i < 101; ++i) { request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); @@ -2031,50 +2052,64 @@ TEST_P(Http2CodecImplTest, ConnectTest) { request_encoder_->encodeHeaders(request_headers, false); } -class TestNghttp2SessionFactory; +template class TestNghttp2SessionFactory; // Test client for H/2 METADATA frame edge cases. -class MetadataTestClientConnectionImpl : public TestClientConnectionImpl { +template +class MetadataTestClientConnectionImpl : public TestClientConnectionImplType { public: MetadataTestClientConnectionImpl( Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestClientConnectionImpl(connection, callbacks, scope, http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} + typename TestClientConnectionImplType::SessionFactory& http2_session_factory) + : TestClientConnectionImplType(connection, callbacks, scope, http2_options, + max_request_headers_kb, max_request_headers_count, + http2_session_factory) {} // Overrides TestClientConnectionImpl::submitMetadata(). bool submitMetadata(const MetadataMapVector& metadata_map_vector, int32_t stream_id) override { // Creates metadata payload. encoder_.createPayload(metadata_map_vector); for (uint8_t flags : encoder_.payloadFrameFlagBytes()) { - int result = nghttp2_submit_extension(session(), ::Envoy::Http::METADATA_FRAME_TYPE, flags, - stream_id, nullptr); + int result = + nghttp2_submit_extension(TestClientConnectionImplType::session(), + ::Envoy::Http::METADATA_FRAME_TYPE, flags, stream_id, nullptr); if (result != 0) { return false; } } // Triggers nghttp2 to populate the payloads of the METADATA frames. - int result = nghttp2_session_send(session()); + int result = nghttp2_session_send(TestClientConnectionImplType::session()); return result == 0; } protected: - friend class TestNghttp2SessionFactory; + template friend class TestNghttp2SessionFactory; MetadataEncoder encoder_; }; -class TestNghttp2SessionFactory : public Nghttp2SessionFactory { +using MetadataTestClientConnectionImplNew = + MetadataTestClientConnectionImpl; +using MetadataTestClientConnectionImplLegacy = + MetadataTestClientConnectionImpl; + +struct Nghttp2SessionFactoryDeleter { + virtual ~Nghttp2SessionFactoryDeleter() = default; +}; + +template +class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, + public Nghttp2SessionFactoryDeleter { public: ~TestNghttp2SessionFactory() override { nghttp2_session_callbacks_del(callbacks_); nghttp2_option_del(options_); } - nghttp2_session* create(const nghttp2_session_callbacks*, ConnectionImpl* connection, + nghttp2_session* create(const nghttp2_session_callbacks*, + typename Nghttp2SessionFactoryType::ConnectionImplType* connection, const nghttp2_option*) override { // Only need to provide callbacks required to send METADATA frames. nghttp2_session_callbacks_new(&callbacks_); @@ -2083,16 +2118,18 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, void* user_data) -> ssize_t { // Double cast required due to multiple inheritance. - return static_cast( - static_cast(user_data)) + return static_cast*>( + static_cast( + user_data)) ->encoder_.packNextFramePayload(data, length); }); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast( - static_cast(user_data)) + return static_cast*>( + static_cast( + user_data)) ->onSend(data, length); }); nghttp2_option_new(&options_); @@ -2102,7 +2139,7 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { return session; } - void init(nghttp2_session*, ConnectionImpl*, + void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*, const envoy::config::core::v3::Http2ProtocolOptions&) override {} private: @@ -2110,6 +2147,12 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactory { nghttp2_option* options_; }; +using TestNghttp2SessionFactoryNew = + TestNghttp2SessionFactory; +using TestNghttp2SessionFactoryLegacy = + TestNghttp2SessionFactory; + class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testing::Test { public: Http2CodecMetadataTest() = default; @@ -2119,12 +2162,27 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin allow_metadata_ = true; http2OptionsFromTuple(client_http2_options_, client_settings_); http2OptionsFromTuple(server_http2_options_, server_settings_); - client_ = std::make_unique( - client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, - max_request_headers_kb_, max_response_headers_count_, http2_session_factory_); - server_ = std::make_unique( - server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, - max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } else { + std::unique_ptr session_factory = + std::make_unique(); + client_ = std::make_unique( + client_connection_, client_callbacks_, client_stats_store_, client_http2_options_, + max_request_headers_kb_, max_response_headers_count_, *session_factory); + server_ = std::make_unique( + server_connection_, server_callbacks_, server_stats_store_, server_http2_options_, + max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); + http2_session_factory_ = std::move(session_factory); + } ON_CALL(client_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { ASSERT_TRUE(server_wrapper_.dispatch(data, *server_).ok()); @@ -2136,7 +2194,7 @@ class Http2CodecMetadataTest : public Http2CodecImplTestFixture, public ::testin } private: - TestNghttp2SessionFactory http2_session_factory_; + std::unique_ptr http2_session_factory_; }; // Validates noop handling of METADATA frames without a known stream ID. diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 1eb8bd581a9e..2ba9f545a20c 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -3,6 +3,7 @@ #include "envoy/http/codec.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/http/utility.h" namespace Envoy { @@ -32,7 +33,7 @@ class TestCodecSettingsProvider { return it->second; } -protected: + // protected: // Stores SETTINGS parameters contained in |settings_frame| to make them available via // getRemoteSettingsParameterValue(). void onSettingsFrame(const nghttp2_settings& settings_frame) { @@ -57,9 +58,23 @@ class TestCodecSettingsProvider { std::unordered_map settings_; }; -class TestServerConnectionImpl : public TestCodecStatsProvider, - public ServerConnectionImpl, - public TestCodecSettingsProvider { +struct ServerCodecFacade : public virtual Connection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint32_t getStreamUnconsumedBytes(int32_t stream_id) PURE; + virtual void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) PURE; +}; + +class TestServerConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ServerCodecFacade { +public: + TestServerConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} +}; + +template +class TestServerConnectionImpl : public TestServerConnection, public CodecImplType { public: TestServerConnectionImpl( Network::Connection& connection, ServerConnectionCallbacks& callbacks, Stats::Scope& scope, @@ -67,50 +82,94 @@ class TestServerConnectionImpl : public TestCodecStatsProvider, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action) - : TestCodecStatsProvider(scope), - ServerConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, - headers_with_underscores_action) {} - nghttp2_session* session() { return session_; } - using ServerConnectionImpl::getStream; + : TestServerConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, + headers_with_underscores_action) {} + + // ServerCodecFacade + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint32_t getStreamUnconsumedBytes(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->unconsumed_bytes_; + } + void setStreamWriteBufferWatermarks(int32_t stream_id, uint32_t low_watermark, + uint32_t high_watermark) override { + CodecImplType::getStream(stream_id)->setWriteBufferWatermarks(low_watermark, high_watermark); + } protected: // Overrides ServerConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; -class TestClientConnectionImpl : public TestCodecStatsProvider, - public ClientConnectionImpl, - public TestCodecSettingsProvider { +using TestServerConnectionImplLegacy = + TestServerConnectionImpl; +using TestServerConnectionImplNew = + TestServerConnectionImpl; + +struct ClientCodecFacade : public ClientConnection { + virtual nghttp2_session* session() PURE; + virtual Http::Stream* getStream(int32_t stream_id) PURE; + virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE; + virtual void sendPendingFrames() PURE; + virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE; +}; + +class TestClientConnection : public TestCodecStatsProvider, + public TestCodecSettingsProvider, + public ClientCodecFacade { +public: + TestClientConnection(Stats::Scope& scope) : TestCodecStatsProvider(scope) {} +}; + +template +class TestClientConnectionImpl : public TestClientConnection, public CodecImplType { public: TestClientConnectionImpl(Network::Connection& connection, Http::ConnectionCallbacks& callbacks, Stats::Scope& scope, const envoy::config::core::v3::Http2ProtocolOptions& http2_options, uint32_t max_request_headers_kb, uint32_t max_request_headers_count, - Nghttp2SessionFactory& http2_session_factory) - : TestCodecStatsProvider(scope), - ClientConnectionImpl(connection, callbacks, http2CodecStats(), http2_options, - max_request_headers_kb, max_request_headers_count, - http2_session_factory) {} - - nghttp2_session* session() { return session_; } - + typename CodecImplType::SessionFactory& http2_session_factory) + : TestClientConnection(scope), + CodecImplType(connection, callbacks, http2CodecStats(), http2_options, + max_request_headers_kb, max_request_headers_count, http2_session_factory) {} + + // ClientCodecFacade + RequestEncoder& newStream(ResponseDecoder& response_decoder) override { + return CodecImplType::newStream(response_decoder); + } + nghttp2_session* session() override { return CodecImplType::session_; } + Http::Stream* getStream(int32_t stream_id) override { + return CodecImplType::getStream(stream_id); + } + uint64_t getStreamPendingSendDataLength(int32_t stream_id) override { + return CodecImplType::getStream(stream_id)->pending_send_data_.length(); + } + void sendPendingFrames() override { CodecImplType::sendPendingFrames(); } // Submits an H/2 METADATA frame to the peer. // Returns true on success, false otherwise. - virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) { + bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override { UNREFERENCED_PARAMETER(mm_vector); UNREFERENCED_PARAMETER(stream_id); return false; } - using ClientConnectionImpl::getStream; - using ConnectionImpl::sendPendingFrames; - protected: // Overrides ClientConnectionImpl::onSettingsForTest(). void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; +using TestClientConnectionImplLegacy = + TestClientConnectionImpl; +using TestClientConnectionImplNew = + TestClientConnectionImpl; + +using ProdNghttp2SessionFactoryLegacy = Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory; +using ProdNghttp2SessionFactoryNew = Envoy::Http::Http2::ProdNghttp2SessionFactory; + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index c88458e10c7e..aadda98c8b3d 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -26,7 +26,7 @@ class RequestFrameCommentTest : public ::testing::Test {}; class ResponseFrameCommentTest : public ::testing::Test {}; // Creates and sets up a stream to reply to. -void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImpl& connection) { +void setupStream(ClientCodecFrameInjector& codec, TestClientConnectionImplNew& connection) { codec.request_encoder_ = &connection.newStream(codec.response_decoder_); codec.request_encoder_->getStream().addCallbacks(codec.client_stream_callbacks_); // Setup a single stream to inject frames as a reply to. @@ -56,7 +56,7 @@ TEST_F(RequestFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -89,7 +89,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExampleHuffman) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -134,7 +134,7 @@ TEST_F(RequestFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -169,7 +169,7 @@ TEST_F(ResponseFrameCommentTest, SimpleExamplePlain) { // Validate HEADERS decode. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -199,7 +199,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -232,7 +232,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderFrame) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); @@ -267,7 +267,7 @@ TEST_F(RequestFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ServerCodecFrameInjector codec; - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -305,7 +305,7 @@ TEST_F(ResponseFrameCommentTest, SingleByteNulCrLfInHeaderField) { header.frame()[offset] = c; // Play the frames back. ClientCodecFrameInjector codec; - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index 5dc75d58ebbb..d925ed1bb002 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -14,7 +14,7 @@ namespace { void Replay(const Frame& frame, ServerCodecFrameInjector& codec) { // Create the server connection containing the nghttp2 session. - TestServerConnectionImpl connection( + TestServerConnectionImplNew connection( codec.server_connection_, codec.server_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, envoy::config::core::v3::HttpProtocolOptions::ALLOW); diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index 8b1a5d3d0797..e73b88ab954d 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -15,7 +15,7 @@ namespace { void Replay(const Frame& frame, ClientCodecFrameInjector& codec) { // Create the client connection containing the nghttp2 session. - TestClientConnectionImpl connection( + TestClientConnectionImplNew connection( codec.client_connection_, codec.client_callbacks_, codec.stats_store_, codec.options_, Http::DEFAULT_MAX_REQUEST_HEADERS_KB, Http::DEFAULT_MAX_HEADERS_COUNT, ProdNghttp2SessionFactory::get()); diff --git a/test/config/utility.cc b/test/config/utility.cc index ea96c7ddb142..f57e7af4286c 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -586,6 +586,10 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } +void ConfigHelper::setLegacyCodecs() { + addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "false"); +} + void ConfigHelper::finalize(const std::vector& ports) { RELEASE_ASSERT(!finalized_, ""); diff --git a/test/config/utility.h b/test/config/utility.h index 39bcb00a4454..b4217d4f31f5 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -229,6 +229,9 @@ class ConfigHelper { const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& config); + // Set legacy codecs to use for upstream and downstream codecs. + void setLegacyCodecs(); + private: static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { return api_version == envoy::config::core::v3::ApiVersion::V2; diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index d1d32fdd634e..5e7648ba1ce4 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -1619,6 +1619,66 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { ASSERT_NE(nullptr, request_id_extension); } +TEST_F(HttpConnectionManagerConfigTest, LegacyH1Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + +TEST_F(HttpConnectionManagerConfigTest, LegacyH2Codecs) { + const std::string yaml_string = R"EOF( +codec_type: http2 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager + proto_config; + TestUtility::loadFromYaml(yaml_string, proto_config); + NiceMock filter_callbacks; + EXPECT_CALL(context_.runtime_loader_.snapshot_, runtimeFeatureEnabled(_)).WillOnce(Return(false)); + auto http_connection_manager_factory = + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + proto_config, context_, filter_callbacks); + http_connection_manager_factory(); +} + class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( diff --git a/test/integration/BUILD b/test/integration/BUILD index 95c57fe9522d..d753260c31f0 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -8,6 +8,7 @@ load( "envoy_package", "envoy_proto_library", "envoy_select_hot_restart", + "envoy_select_legacy_codecs_in_integration_tests", "envoy_sh_test", ) @@ -574,6 +575,10 @@ envoy_cc_test_library( "ssl_utility.h", "utility.h", ], + copts = envoy_select_legacy_codecs_in_integration_tests( + ["-DENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS"], + "@envoy", + ), data = ["//test/common/runtime:filesystem_test_data"], deps = [ ":server_stats_interface", @@ -605,7 +610,9 @@ envoy_cc_test_library( "//source/common/http:codec_client_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http/http1:codec_legacy_lib", "//source/common/http/http1:codec_lib", + "//source/common/http/http2:codec_legacy_lib", "//source/common/http/http2:codec_lib", "//source/common/local_info:local_info_lib", "//source/common/network:filter_lib", diff --git a/test/integration/api_version_integration_test.cc b/test/integration/api_version_integration_test.cc index c8bf5164b028..952c095a820e 100644 --- a/test/integration/api_version_integration_test.cc +++ b/test/integration/api_version_integration_test.cc @@ -316,9 +316,11 @@ TEST_P(ApiVersionIntegrationTest, Eds) { TEST_P(ApiVersionIntegrationTest, Rtds) { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); - admin_layer->set_name("admin layer"); - admin_layer->mutable_admin_layer(); + if (bootstrap.mutable_layered_runtime()->layers_size() == 0) { + auto* admin_layer = bootstrap.mutable_layered_runtime()->add_layers(); + admin_layer->set_name("admin layer"); + admin_layer->mutable_admin_layer(); + } auto* rtds_layer = bootstrap.mutable_layered_runtime()->add_layers(); rtds_layer->set_name("rtds_layer"); setupConfigSource(*rtds_layer->mutable_rtds_layer()->mutable_rtds_config()); diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 4763c9bbfe05..a6c94d91a0c4 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -12,7 +12,9 @@ #include "common/common/fmt.h" #include "common/http/header_map_impl.h" #include "common/http/http1/codec_impl.h" +#include "common/http/http1/codec_impl_legacy.h" #include "common/http/http2/codec_impl.h" +#include "common/http/http2/codec_impl_legacy.h" #include "common/network/address_impl.h" #include "common/network/listen_socket_impl.h" #include "common/network/raw_buffer_socket.h" @@ -249,6 +251,29 @@ class TestHttp1ServerConnectionImpl : public Http::Http1::ServerConnectionImpl { } }; +namespace Legacy { +class TestHttp1ServerConnectionImpl : public Http::Legacy::Http1::ServerConnectionImpl { +public: + using Http::Legacy::Http1::ServerConnectionImpl::ServerConnectionImpl; + + void onMessageComplete() override { + ServerConnectionImpl::onMessageComplete(); + + if (activeRequest().has_value() && activeRequest().value().request_decoder_) { + // Undo the read disable from the base class - we have many tests which + // waitForDisconnect after a full request has been read which will not + // receive the disconnect if reading is disabled. + activeRequest().value().response_encoder_.readDisable(false); + } + } + ~TestHttp1ServerConnectionImpl() override { + if (activeRequest().has_value()) { + activeRequest().value().response_encoder_.clearReadDisableCallsForTests(); + } + } +}; +} // namespace Legacy + FakeHttpConnection::FakeHttpConnection( FakeUpstream& fake_upstream, SharedConnectionWrapper& shared_connection, Type type, Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb, @@ -261,9 +286,15 @@ FakeHttpConnection::FakeHttpConnection( // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); +#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#endif } else { envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( @@ -271,12 +302,17 @@ FakeHttpConnection::FakeHttpConnection( http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); +#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); +#else + codec_ = std::make_unique( + shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, + max_request_headers_count, headers_with_underscores_action); +#endif ASSERT(type == Type::HTTP2); } - shared_connection_.connection().addReadFilter( Network::ReadFilterSharedPtr{new ReadFilter(*this)}); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 309548595313..2e4b846870e4 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -286,6 +286,11 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); + // In ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. +#ifdef ENVOY_USE_LEGACY_CODECS_IN__INTEGRATION_TESTS + ENVOY_LOG_MISC(debug, "Using legacy codecs"); + setLegacyCodecs(); +#endif } BaseIntegrationTest::BaseIntegrationTest(Network::Address::IpVersion version, diff --git a/test/integration/integration.h b/test/integration/integration.h index 0ec9133736cb..d345b5051095 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -191,6 +191,7 @@ class BaseIntegrationTest : protected Logger::Loggable { void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. void setDeterministic() { deterministic_ = true; } + void setLegacyCodecs() { config_helper_.setLegacyCodecs(); } FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } From 048583b924e6c5c7812af56ec344ae210c168b3b Mon Sep 17 00:00:00 2001 From: Nicolas Flacco <47160394+FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg@users.noreply.github.com> Date: Fri, 17 Jul 2020 16:44:18 -0700 Subject: [PATCH 676/909] Redis fault injection (#10784) This PR implements fault injection for Redis; specifically delay and error faults (which themselves can have delays added). I chose not to implement a separate filter after discussing with Henry; we concluded that the faults we felt were useful didn't need many levels- just a delay on top of the original fault, if any. In addition, as the Redis protocol doesn't support headers that makes it a bit different again from Envoy's http fault injection. Signed-off-by: FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg --- .../network/redis_proxy/v3/redis_proxy.proto | 56 ++++- .../network_filters/redis_proxy_filter.rst | 52 ++++- docs/root/version_history/current.rst | 1 + .../network/redis_proxy/v3/redis_proxy.proto | 56 ++++- .../filters/network/common/redis/BUILD | 22 ++ .../filters/network/common/redis/fault.h | 52 +++++ .../network/common/redis/fault_impl.cc | 148 +++++++++++++ .../filters/network/common/redis/fault_impl.h | 108 +++++++++ .../filters/network/redis_proxy/BUILD | 3 + .../network/redis_proxy/command_splitter.h | 22 ++ .../redis_proxy/command_splitter_impl.cc | 137 +++++++++--- .../redis_proxy/command_splitter_impl.h | 148 ++++++++++--- .../filters/network/redis_proxy/config.cc | 17 +- .../network/redis_proxy/proxy_filter.cc | 10 +- .../network/redis_proxy/proxy_filter.h | 8 +- .../filters/network/common/redis/BUILD | 13 ++ .../network/common/redis/fault_test.cc | 206 ++++++++++++++++++ .../filters/network/redis_proxy/BUILD | 9 + .../redis_proxy/command_lookup_speed_test.cc | 12 +- .../redis_proxy/command_splitter_impl_test.cc | 162 +++++++++++++- .../network/redis_proxy/config_test.cc | 37 ++++ .../filters/network/redis_proxy/mocks.cc | 7 + .../filters/network/redis_proxy/mocks.h | 20 ++ .../network/redis_proxy/proxy_filter_test.cc | 41 ++-- .../redis_proxy_integration_test.cc | 56 ++++- 25 files changed, 1305 insertions(+), 98 deletions(-) create mode 100644 source/extensions/filters/network/common/redis/fault.h create mode 100644 source/extensions/filters/network/common/redis/fault_impl.cc create mode 100644 source/extensions/filters/network/common/redis/fault_impl.h create mode 100644 test/extensions/filters/network/common/redis/fault_test.cc diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index af69d33a6340..402937fff28f 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 8] +// [#next-free-field: 9] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -183,6 +183,31 @@ message RedisProxy { Route catch_all_route = 4; } + // RedisFault defines faults used for fault injection. + message RedisFault { + enum RedisFaultType { + // Delays requests. This is the base fault; other faults can have delays added. + DELAY = 0; + + // Returns errors on requests. + ERROR = 1; + } + + // Fault type. + RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // Percentage of requests fault applies to. + config.core.v3.RuntimeFractionalPercent fault_enabled = 2 + [(validate.rules).message = {required: true}]; + + // Delay for all faults. If not set, defaults to zero + google.protobuf.Duration delay = 3; + + // Commands fault is restricted to, if any. If not set, fault applies to all commands + // other than auth and ping (due to special handling of those commands in Envoy). + repeated string commands = 4; + } + reserved 2; reserved "cluster"; @@ -236,6 +261,35 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + // List of faults to inject. Faults currently come in two flavors: + // - Delay, which delays a request. + // - Error, which responds to a request with an error. Errors can also have delays attached. + // + // Example: + // + // .. code-block:: yaml + // + // faults: + // - fault_type: ERROR + // fault_enabled: + // default_value: + // numerator: 10 + // denominator: HUNDRED + // runtime_key: "bogus_key" + // commands: + // - GET + // - fault_type: DELAY + // fault_enabled: + // default_value: + // numerator: 10 + // denominator: HUNDRED + // runtime_key: "bogus_key" + // delay: 2s + // + // See the :ref:`fault injection section + // ` for more information on how to configure this. + repeated RedisFault faults = 8; + // If a username is provided an ACL style AUTH command will be required with a username and password. // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this username and the *downstream_auth_password* diff --git a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst index 3c3fb77f3861..6adf7c8ffb27 100644 --- a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst @@ -58,7 +58,9 @@ changed to microseconds by setting the configuration parameter :ref:`latency_in_ total, Counter, Number of commands success, Counter, Number of commands that were successful error, Counter, Number of commands that returned a partial or complete error response - latency, Histogram, Command execution time in milliseconds + latency, Histogram, Command execution time in milliseconds (including delay faults) + error_fault, Counter, Number of commands that had an error fault injected + delay_fault, Counter, Number of commands that had a delay fault injected .. _config_network_filters_redis_proxy_per_command_stats: @@ -70,3 +72,51 @@ The Redis proxy filter supports the following runtime settings: redis.drain_close_enabled % of connections that will be drain closed if the server is draining and would otherwise attempt a drain close. Defaults to 100. + +.. _config_network_filters_redis_proxy_fault_injection: + +Fault Injection +--------------- + +The Redis filter can perform fault injection. Currently, Delay and Error faults are supported. +Delay faults delay a request, and Error faults respond with an error. Moreover, errors can be delayed. + +Note that the Redis filter does not check for correctness in your configuration - it is the user's +responsibility to make sure both the default and runtime percentages are correct! This is because +percentages can be changed during runtime, and validating correctness at request time is expensive. +If multiple faults are specified, the fault injection percentage should not exceed 100% for a given +fault and Redis command combination. For example, if two faults are specified; one applying to GET at 60 +%, and one applying to all commands at 50%, that is a bad configuration as GET now has 110% chance of +applying a fault. This means that every request will have a fault. + +If a delay is injected, the delay is additive - if the request took 400ms and a delay of 100ms +is injected, then the total request latency is 500ms. Also, due to implementation of the redis protocol, +a delayed request will delay everything that comes in after it, due to the proxy's need to respect the +order of commands it receives. + +Note that faults must have a `fault_enabled` field, and are not enabled by default (if no default value +or runtime key are set). + +Example configuration: + +.. code-block:: yaml + + faults: + - fault_type: ERROR + fault_enabled: + default_value: + numerator: 10 + denominator: HUNDRED + runtime_key: "bogus_key" + commands: + - GET + - fault_type: DELAY + fault_enabled: + default_value: + numerator: 10 + denominator: HUNDRED + runtime_key: "bogus_key" + delay: 2s + +This creates two faults- an error, applying only to GET commands at 10%, and a delay, applying to all +commands at 10%. This means that 20% of GET commands will have a fault applied, as discussed earlier. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index be4d1f06972f..5acb53a13a3d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -40,6 +40,7 @@ New Features * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. +* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 8f996c30f9ae..0bc52493bb29 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 8] +// [#next-free-field: 9] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -182,6 +182,31 @@ message RedisProxy { [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } + // RedisFault defines faults used for fault injection. + message RedisFault { + enum RedisFaultType { + // Delays requests. This is the base fault; other faults can have delays added. + DELAY = 0; + + // Returns errors on requests. + ERROR = 1; + } + + // Fault type. + RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // Percentage of requests fault applies to. + config.core.v3.RuntimeFractionalPercent fault_enabled = 2 + [(validate.rules).message = {required: true}]; + + // Delay for all faults. If not set, defaults to zero + google.protobuf.Duration delay = 3; + + // Commands fault is restricted to, if any. If not set, fault applies to all commands + // other than auth and ping (due to special handling of those commands in Envoy). + repeated string commands = 4; + } + // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -231,6 +256,35 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; + // List of faults to inject. Faults currently come in two flavors: + // - Delay, which delays a request. + // - Error, which responds to a request with an error. Errors can also have delays attached. + // + // Example: + // + // .. code-block:: yaml + // + // faults: + // - fault_type: ERROR + // fault_enabled: + // default_value: + // numerator: 10 + // denominator: HUNDRED + // runtime_key: "bogus_key" + // commands: + // - GET + // - fault_type: DELAY + // fault_enabled: + // default_value: + // numerator: 10 + // denominator: HUNDRED + // runtime_key: "bogus_key" + // delay: 2s + // + // See the :ref:`fault injection section + // ` for more information on how to configure this. + repeated RedisFault faults = 8; + // If a username is provided an ACL style AUTH command will be required with a username and password. // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this username and the *downstream_auth_password* diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index 3b4dcedbb01e..8c9c9e32d3ac 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -100,3 +100,25 @@ envoy_cc_library( "//source/common/stats:utility_lib", ], ) + +envoy_cc_library( + name = "fault_interface", + hdrs = ["fault.h"], + deps = [ + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "fault_lib", + srcs = ["fault_impl.cc"], + hdrs = ["fault_impl.h"], + deps = [ + ":codec_lib", + ":fault_interface", + "//include/envoy/common:random_generator_interface", + "//include/envoy/upstream:upstream_interface", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/network/common/redis/fault.h b/source/extensions/filters/network/common/redis/fault.h new file mode 100644 index 000000000000..158969455c92 --- /dev/null +++ b/source/extensions/filters/network/common/redis/fault.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/type/v3/percent.pb.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +/** + * Fault Type. + */ +enum class FaultType { Delay, Error }; + +class Fault { +public: + virtual ~Fault() = default; + + virtual FaultType faultType() const PURE; + virtual std::chrono::milliseconds delayMs() const PURE; + virtual const std::vector commands() const PURE; + virtual envoy::type::v3::FractionalPercent defaultValue() const PURE; + virtual absl::optional runtimeKey() const PURE; +}; + +using FaultSharedPtr = std::shared_ptr; + +class FaultManager { +public: + virtual ~FaultManager() = default; + + /** + * Get fault type and delay given a Redis command. + * @param command supplies the Redis command string. + */ + virtual const Fault* getFaultForCommand(const std::string& command) const PURE; +}; + +using FaultManagerPtr = std::unique_ptr; + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/common/redis/fault_impl.cc b/source/extensions/filters/network/common/redis/fault_impl.cc new file mode 100644 index 000000000000..4b813ad9d799 --- /dev/null +++ b/source/extensions/filters/network/common/redis/fault_impl.cc @@ -0,0 +1,148 @@ +#include "extensions/filters/network/common/redis/fault_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +struct FaultManagerKeyNamesValues { + // The rbac filter rejected the request + const std::string AllKey = "ALL_KEY"; +}; +using FaultManagerKeyNames = ConstSingleton; + +FaultManagerImpl::FaultImpl::FaultImpl( + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault) + : commands_(buildCommands(base_fault)) { + delay_ms_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(base_fault, delay, 0)); + + switch (base_fault.fault_type()) { + case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::DELAY: + fault_type_ = FaultType::Delay; + break; + case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::ERROR: + fault_type_ = FaultType::Error; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + break; + } + + default_value_ = base_fault.fault_enabled().default_value(); + runtime_key_ = base_fault.fault_enabled().runtime_key(); +}; + +std::vector FaultManagerImpl::FaultImpl::buildCommands( + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault) { + std::vector commands; + for (const std::string& command : base_fault.commands()) { + commands.emplace_back(absl::AsciiStrToLower(command)); + } + return commands; +} + +FaultManagerImpl::FaultManagerImpl( + Random::RandomGenerator& random, Runtime::Loader& runtime, + const Protobuf::RepeatedPtrField< + ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> + faults) + : fault_map_(buildFaultMap(faults)), random_(random), runtime_(runtime) {} + +FaultMap FaultManagerImpl::buildFaultMap( + const Protobuf::RepeatedPtrField< + ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> + faults) { + // Next, create the fault map that maps commands to pointers to Fault objects. + // Group faults by command + FaultMap fault_map; + for (auto base_fault : faults) { + auto fault_ptr = std::make_shared(base_fault); + if (!fault_ptr->commands().empty()) { + for (const std::string& command : fault_ptr->commands()) { + fault_map[command].emplace_back(fault_ptr); + } + } else { + // Generic "ALL" entry in map for faults that map to all keys; also add to each command + fault_map[FaultManagerKeyNames::get().AllKey].emplace_back(fault_ptr); + } + } + + // Add the ALL keys faults to each command too so that we can just query faults by command. + // Get all ALL_KEY faults. + FaultMap::iterator it_outer = fault_map.find(FaultManagerKeyNames::get().AllKey); + if (it_outer != fault_map.end()) { + for (const FaultSharedPtr& fault_ptr : it_outer->second) { + FaultMap::iterator it_inner; + for (it_inner = fault_map.begin(); it_inner != fault_map.end(); it_inner++) { + std::string command = it_inner->first; + if (command != FaultManagerKeyNames::get().AllKey) { + fault_map[command].push_back(fault_ptr); + } + } + } + } + return fault_map; +} + +uint64_t FaultManagerImpl::getIntegerNumeratorOfFractionalPercent( + absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const { + uint64_t numerator; + if (default_value.denominator() == envoy::type::v3::FractionalPercent::HUNDRED) { + numerator = default_value.numerator(); + } else { + int denominator = + ProtobufPercentHelper::fractionalPercentDenominatorToInt(default_value.denominator()); + numerator = (default_value.numerator() * 100) / denominator; + } + return runtime_.snapshot().getInteger(key, numerator); +} + +// Fault checking algorithm: +// +// For example, if we have an ERROR fault at 5% for all commands, and a DELAY fault at 10% for GET, +// if we receive a GET, we want 5% of GETs to get DELAY, and 10% to get ERROR. Thus, we need to +// amortize the percentages. +// +// 0. Get random number. +// 1. Get faults for given command. +// 2. For each fault, calculate the amortized fault injection percentage. +// +// Note that we do not check to make sure the probabilities of faults are <= 100%! +const Fault* FaultManagerImpl::getFaultForCommandInternal(const std::string& command) const { + FaultMap::const_iterator it_outer = fault_map_.find(command); + if (it_outer != fault_map_.end()) { + auto random_number = random_.random() % 100; + int amortized_fault = 0; + + for (const FaultSharedPtr& fault_ptr : it_outer->second) { + uint64_t fault_injection_percentage = getIntegerNumeratorOfFractionalPercent( + fault_ptr->runtimeKey().value(), fault_ptr->defaultValue()); + if (random_number < (fault_injection_percentage + amortized_fault)) { + return fault_ptr.get(); + } else { + amortized_fault += fault_injection_percentage; + } + } + } + + return nullptr; +} + +const Fault* FaultManagerImpl::getFaultForCommand(const std::string& command) const { + if (!fault_map_.empty()) { + if (fault_map_.count(command) > 0) { + return getFaultForCommandInternal(command); + } else { + return getFaultForCommandInternal(FaultManagerKeyNames::get().AllKey); + } + } + + return nullptr; +} + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/common/redis/fault_impl.h b/source/extensions/filters/network/common/redis/fault_impl.h new file mode 100644 index 000000000000..3850a8a4b4c9 --- /dev/null +++ b/source/extensions/filters/network/common/redis/fault_impl.h @@ -0,0 +1,108 @@ +#pragma once + +#include +#include + +#include "envoy/api/api.h" +#include "envoy/common/random_generator.h" +#include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h" +#include "envoy/upstream/upstream.h" + +#include "common/protobuf/utility.h" +#include "common/singleton/const_singleton.h" + +#include "extensions/filters/network/common/redis/fault.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +using FaultMap = absl::flat_hash_map>; + +/** + * Message returned for particular types of faults. + */ +struct FaultMessagesValues { + const std::string Error = "Fault Injected: Error"; +}; +using FaultMessages = ConstSingleton; + +/** + * Fault management- creation, storage and retrieval. Faults are queried for by command, + * so they are stored in an unordered map using the command as key. For faults that apply to + * all commands, we use a special ALL_KEYS entry in the map. + */ +class FaultManagerImpl : public FaultManager { +public: + FaultManagerImpl( + Random::RandomGenerator& random, Runtime::Loader& runtime, + const Protobuf::RepeatedPtrField< + ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> + base_faults); + + const Fault* getFaultForCommand(const std::string& command) const override; + + static FaultSharedPtr makeFaultForTest(Common::Redis::FaultType fault_type, + std::chrono::milliseconds delay_ms) { + envoy::type::v3::FractionalPercent default_value; + default_value.set_numerator(100); + default_value.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED); + FaultImpl fault = + FaultImpl(fault_type, delay_ms, std::vector(), default_value, "foo"); + return std::make_shared(fault); + } + + // Allow the unit test to have access to private members. + friend class FaultTest; + +private: + class FaultImpl : public Fault { + public: + FaultImpl( + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault); + FaultImpl(FaultType fault_type, std::chrono::milliseconds delay_ms, + const std::vector commands, + envoy::type::v3::FractionalPercent default_value, + absl::optional runtime_key) + : fault_type_(fault_type), delay_ms_(delay_ms), commands_(commands), + default_value_(default_value), runtime_key_(runtime_key) {} // For testing only + + FaultType faultType() const override { return fault_type_; }; + std::chrono::milliseconds delayMs() const override { return delay_ms_; }; + const std::vector commands() const override { return commands_; }; + envoy::type::v3::FractionalPercent defaultValue() const override { return default_value_; }; + absl::optional runtimeKey() const override { return runtime_key_; }; + + private: + static std::vector buildCommands( + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault); + + FaultType fault_type_; + std::chrono::milliseconds delay_ms_; + const std::vector commands_; + envoy::type::v3::FractionalPercent default_value_; + absl::optional runtime_key_; + }; + + static FaultMap + buildFaultMap(const Protobuf::RepeatedPtrField< + ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> + faults); + + uint64_t getIntegerNumeratorOfFractionalPercent( + absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const; + const Fault* getFaultForCommandInternal(const std::string& command) const; + const FaultMap fault_map_; + +protected: + Random::RandomGenerator& random_; + Runtime::Loader& runtime_; +}; + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 4d452f0cad3c..b7503144a5f8 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( name = "command_splitter_interface", hdrs = ["command_splitter.h"], deps = [ + "//include/envoy/event:dispatcher_interface", "//source/extensions/filters/network/common/redis:codec_interface", ], ) @@ -64,6 +65,7 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/stats:timespan_lib", "//source/extensions/filters/network/common/redis:client_lib", + "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/common/redis:supported_commands_lib", "//source/extensions/filters/network/common/redis:utility_lib", ], @@ -125,6 +127,7 @@ envoy_cc_extension( "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/common/redis:codec_lib", + "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/common/redis:redis_command_stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index e03d0a92e137..d5408b11ab0c 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -3,6 +3,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/event/dispatcher.h" #include "extensions/filters/network/common/redis/codec.h" @@ -80,6 +81,27 @@ class Instance { SplitCallbacks& callbacks) PURE; }; +using CommandSplitterPtr = std::unique_ptr; + +/** + * A command splitter factory that allows creation of the command splitter when + * we have access to the dispatcher parameter. This supports fault injection, + * specifically delay faults, which rely on the dispatcher for creating delay timers. + */ +class CommandSplitterFactory { +public: + virtual ~CommandSplitterFactory() = default; + + /** + * Create a command splitter. + * @param dispatcher supplies the dispatcher . + * @return CommandSplitterPtr a handle to a newly created command splitter. + */ + virtual CommandSplitterPtr create(Event::Dispatcher& dispatcher) PURE; +}; + +using CommandSplitterFactorySharedPtr = std::shared_ptr; + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index a5bd89588f51..624cead01a9c 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -79,7 +79,9 @@ void SplitRequestBase::updateStats(const bool success) { } else { command_stats_.error_.inc(); } - command_latency_->complete(); + if (command_latency_ != nullptr) { + command_latency_->complete(); + } } SingleServerRequest::~SingleServerRequest() { ASSERT(!handle_); } @@ -90,10 +92,12 @@ void SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) { callbacks_.onResponse(std::move(response)); } -void SingleServerRequest::onFailure() { +void SingleServerRequest::onFailure() { onFailure(Response::get().UpstreamFailure); } + +void SingleServerRequest::onFailure(std::string error_msg) { handle_ = nullptr; updateStats(false); - callbacks_.onResponse(Common::Redis::Utility::makeError(Response::get().UpstreamFailure)); + callbacks_.onResponse(Common::Redis::Utility::makeError(error_msg)); } void SingleServerRequest::cancel() { @@ -101,13 +105,44 @@ void SingleServerRequest::cancel() { handle_ = nullptr; } +SplitRequestPtr ErrorFaultRequest::create(SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool delay_command_latency) { + std::unique_ptr request_ptr{ + new ErrorFaultRequest(callbacks, command_stats, time_source, delay_command_latency)}; + + request_ptr->onFailure(Common::Redis::FaultMessages::get().Error); + command_stats.error_fault_.inc(); + return nullptr; +} + +std::unique_ptr DelayFaultRequest::create(SplitCallbacks& callbacks, + CommandStats& command_stats, + TimeSource& time_source, + Event::Dispatcher& dispatcher, + std::chrono::milliseconds delay) { + return std::make_unique(callbacks, command_stats, time_source, dispatcher, + delay); +} + +void DelayFaultRequest::onResponse(Common::Redis::RespValuePtr&& response) { + response_ = std::move(response); + delay_timer_->enableTimer(delay_); +} + +void DelayFaultRequest::onDelayResponse() { + command_stats_.delay_fault_.inc(); + command_latency_->complete(); // Complete latency of the command stats of the wrapped request + callbacks_.onResponse(std::move(response_)); +} + +void DelayFaultRequest::cancel() { delay_timer_->disableTimer(); } + SplitRequestPtr SimpleRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) { + TimeSource& time_source, bool delay_command_latency) { std::unique_ptr request_ptr{ - new SimpleRequest(callbacks, command_stats, time_source)}; - + new SimpleRequest(callbacks, command_stats, time_source, delay_command_latency)}; const auto route = router.upstreamPool(incoming_request->asArray()[1].asString()); if (route) { Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request); @@ -126,7 +161,7 @@ SplitRequestPtr SimpleRequest::create(Router& router, SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) { + TimeSource& time_source, bool delay_command_latency) { // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] // Ensure there are at least three args to the command or it cannot be hashed. if (incoming_request->asArray().size() < 4) { @@ -135,7 +170,8 @@ SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr& return nullptr; } - std::unique_ptr request_ptr{new EvalRequest(callbacks, command_stats, time_source)}; + std::unique_ptr request_ptr{ + new EvalRequest(callbacks, command_stats, time_source, delay_command_latency)}; const auto route = router.upstreamPool(incoming_request->asArray()[3].asString()); if (route) { @@ -177,8 +213,9 @@ void FragmentedRequest::onChildFailure(uint32_t index) { SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) { - std::unique_ptr request_ptr{new MGETRequest(callbacks, command_stats, time_source)}; + TimeSource& time_source, bool delay_command_latency) { + std::unique_ptr request_ptr{ + new MGETRequest(callbacks, command_stats, time_source, delay_command_latency)}; request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -250,13 +287,14 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) { + TimeSource& time_source, bool delay_command_latency) { if ((incoming_request->asArray().size() - 1) % 2 != 0) { onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } - std::unique_ptr request_ptr{new MSETRequest(callbacks, command_stats, time_source)}; + std::unique_ptr request_ptr{ + new MSETRequest(callbacks, command_stats, time_source, delay_command_latency)}; request_ptr->num_pending_responses_ = (incoming_request->asArray().size() - 1) / 2; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -321,13 +359,12 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, - Common::Redis::RespValuePtr&& incoming_request, - SplitCallbacks& callbacks, - CommandStats& command_stats, - TimeSource& time_source) { +SplitRequestPtr +SplitKeysSumResultRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool delay_command_latency) { std::unique_ptr request_ptr{ - new SplitKeysSumResultRequest(callbacks, command_stats, time_source)}; + new SplitKeysSumResultRequest(callbacks, command_stats, time_source, delay_command_latency)}; request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -391,13 +428,15 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } -InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros) - : router_(std::move(router)), simple_command_handler_(*router_), - eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), - split_keys_sum_result_handler_(*router_), - stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, - time_source_(time_source) { +InstanceImpl::InstanceImpl(Router& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros, + Common::Redis::FaultManager& fault_manager, + Event::Dispatcher& dispatcher) + : simple_command_handler_(router), eval_command_handler_(router), mget_handler_(router), + mset_handler_(router), + split_keys_sum_result_handler_(router), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX( + scope, stat_prefix + "splitter."))}, + time_source_(time_source), fault_manager_(fault_manager), dispatcher_(dispatcher) { for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { addHandler(scope, stat_prefix, command, latency_in_micros, simple_command_handler_); } @@ -468,6 +507,7 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, return nullptr; } + // Get the handler for the downstream request auto handler = handler_lookup_table_.find(to_lower_string.c_str()); if (handler == nullptr) { stats_.unsupported_command_.inc(); @@ -475,11 +515,46 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, fmt::format("unsupported command '{}'", request->asArray()[0].asString()))); return nullptr; } + + // Fault Injection Check + const Common::Redis::Fault* fault_ptr = fault_manager_.getFaultForCommand(to_lower_string); + + // Check if delay, which determines which callbacks to use. If a delay fault is enabled, + // the delay fault itself wraps the request (or other fault) and the delay fault itself + // implements the callbacks functions, and in turn calls the real callbacks after injecting + // delay on the result of the wrapped request or fault. + const bool has_delay_fault = + fault_ptr != nullptr && fault_ptr->delayMs() > std::chrono::milliseconds(0); + std::unique_ptr delay_fault_ptr; + if (has_delay_fault) { + delay_fault_ptr = DelayFaultRequest::create(callbacks, handler->command_stats_, time_source_, + dispatcher_, fault_ptr->delayMs()); + } + + // Note that the command_stats_ object of the original request is used for faults, so that our + // downstream metrics reflect any faults added (with special fault metrics) or extra latency from + // a delay. 2) we use a ternary operator for the callback parameter- we want to use the + // delay_fault as callback if there is a delay per the earlier comment. ENVOY_LOG(debug, "redis: splitting '{}'", request->toString()); handler->command_stats_.total_.inc(); - SplitRequestPtr request_ptr = handler->handler_.get().startRequest( - std::move(request), callbacks, handler->command_stats_, time_source_); - return request_ptr; + + SplitRequestPtr request_ptr; + if (fault_ptr != nullptr && fault_ptr->faultType() == Common::Redis::FaultType::Error) { + request_ptr = ErrorFaultRequest::create(has_delay_fault ? *delay_fault_ptr : callbacks, + handler->command_stats_, time_source_, has_delay_fault); + } else { + request_ptr = handler->handler_.get().startRequest( + std::move(request), has_delay_fault ? *delay_fault_ptr : callbacks, handler->command_stats_, + time_source_, has_delay_fault); + } + + // Complete delay, if any. The delay fault takes ownership of the wrapped request. + if (has_delay_fault) { + delay_fault_ptr->wrapped_request_ptr_ = std::move(request_ptr); + return delay_fault_ptr; + } else { + return request_ptr; + } } void InstanceImpl::onInvalidRequest(SplitCallbacks& callbacks) { @@ -505,6 +580,12 @@ void InstanceImpl::addHandler(Stats::Scope& scope, const std::string& stat_prefi handler})); } +CommandSplitterPtr CommandSplitterFactoryImpl::create(Event::Dispatcher& dispatcher) { + return std::make_unique(*router_, scope_, stat_prefix_, + time_source_, latency_in_micros_, + *fault_manager_, dispatcher); +} + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 8057f9a91b2c..813597dc0d2a 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -11,10 +11,10 @@ #include "common/common/logger.h" #include "common/common/utility.h" -#include "common/singleton/const_singleton.h" #include "common/stats/timespan_impl.h" #include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/common/redis/utility.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" @@ -43,7 +43,9 @@ using Response = ConstSingleton; #define ALL_COMMAND_STATS(COUNTER) \ COUNTER(total) \ COUNTER(success) \ - COUNTER(error) + COUNTER(error) \ + COUNTER(error_fault) \ + COUNTER(delay_fault) /** * Struct definition for all command stats. @see stats_macros.h @@ -59,7 +61,7 @@ class CommandHandler { virtual SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) PURE; + TimeSource& time_source, bool delay_command_latency) PURE; }; class CommandHandlerBase { @@ -75,10 +77,14 @@ class SplitRequestBase : public SplitRequest { const Common::Redis::RespValue& request); void updateStats(const bool success); - SplitRequestBase(CommandStats& command_stats, TimeSource& time_source) + SplitRequestBase(CommandStats& command_stats, TimeSource& time_source, bool delay_command_latency) : command_stats_(command_stats) { - command_latency_ = std::make_unique( - command_stats_.latency_, time_source); + if (!delay_command_latency) { + command_latency_ = std::make_unique( + command_stats_.latency_, time_source); + } else { + command_latency_ = nullptr; + } } CommandStats& command_stats_; Stats::TimespanPtr command_latency_; @@ -94,14 +100,16 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba // ConnPool::PoolCallbacks void onResponse(Common::Redis::RespValuePtr&& response) override; void onFailure() override; + void onFailure(std::string error_msg); // RedisProxy::CommandSplitter::SplitRequest void cancel() override; protected: SingleServerRequest(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source) - : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} + TimeSource& time_source, bool delay_command_latency) + : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) { + } SplitCallbacks& callbacks_; ConnPool::InstanceSharedPtr conn_pool_; @@ -109,6 +117,57 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba Common::Redis::RespValuePtr incoming_request_; }; +/** + * ErrorFaultRequest returns an error. + */ +class ErrorFaultRequest : public SingleServerRequest { +public: + static SplitRequestPtr create(SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source, bool has_delaydelay_command_latency_fault); + +private: + ErrorFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool delay_command_latency) + : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} +}; + +/** + * DelayFaultRequest wraps a request- either a normal request or a fault- and delays it. + */ +class DelayFaultRequest : public SplitRequestBase, public SplitCallbacks { +public: + static std::unique_ptr + create(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + Event::Dispatcher& dispatcher, std::chrono::milliseconds delay); + + DelayFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + Event::Dispatcher& dispatcher, std::chrono::milliseconds delay) + : SplitRequestBase(command_stats, time_source, false), callbacks_(callbacks), delay_(delay) { + delay_timer_ = dispatcher.createTimer([this]() -> void { onDelayResponse(); }); + } + + // SplitCallbacks + bool connectionAllowed() override { return callbacks_.connectionAllowed(); } + void onAuth(const std::string& password) override { callbacks_.onAuth(password); } + void onAuth(const std::string& username, const std::string& password) override { + callbacks_.onAuth(username, password); + } + void onResponse(Common::Redis::RespValuePtr&& response) override; + + // RedisProxy::CommandSplitter::SplitRequest + void cancel() override; + + SplitRequestPtr wrapped_request_ptr_; + +private: + void onDelayResponse(); + + SplitCallbacks& callbacks_; + std::chrono::milliseconds delay_; + Event::TimerPtr delay_timer_; + Common::Redis::RespValuePtr response_; +}; + /** * SimpleRequest hashes the first argument as the key. */ @@ -116,11 +175,12 @@ class SimpleRequest : public SingleServerRequest { public: static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool delay_command_latency); private: - SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SingleServerRequest(callbacks, command_stats, time_source) {} + SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool delay_command_latency) + : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} }; /** @@ -130,11 +190,12 @@ class EvalRequest : public SingleServerRequest { public: static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source); + TimeSource& time_source, bool delay_command_latency); private: - EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SingleServerRequest(callbacks, command_stats, time_source) {} + EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool delay_command_latency) + : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} }; /** @@ -150,8 +211,10 @@ class FragmentedRequest : public SplitRequestBase { void cancel() override; protected: - FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) - : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} + FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, + bool delay_command_latency) + : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) { + } struct PendingRequest : public ConnPool::PoolCallbacks { PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {} @@ -186,11 +249,12 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros); + InstanceImpl(Router& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros, + Common::Redis::FaultManager& fault_manager, Event::Dispatcher& dispatcher); // RedisProxy::CommandSplitter::Instance SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, @@ -287,7 +355,6 @@ class InstanceImpl : public Instance, Logger::Loggable { bool latency_in_micros, CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); - RouterPtr router_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; @@ -296,6 +363,27 @@ class InstanceImpl : public Instance, Logger::Loggable { TrieLookupTable handler_lookup_table_; InstanceStats stats_; TimeSource& time_source_; + Common::Redis::FaultManager& fault_manager_; + Event::Dispatcher& dispatcher_; +}; + +class CommandSplitterFactoryImpl : public CommandSplitterFactory { +public: + CommandSplitterFactoryImpl(RouterPtr&& router, Common::Redis::FaultManagerPtr fault_manager, + Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros) + : router_(std::move(router)), fault_manager_(std::move(fault_manager)), scope_(scope), + stat_prefix_(stat_prefix), time_source_(time_source), + latency_in_micros_(latency_in_micros){}; + CommandSplitterPtr create(Event::Dispatcher& dispatcher) override; + +private: + RouterPtr router_; + Common::Redis::FaultManagerPtr fault_manager_; + Stats::Scope& scope_; + const std::string& stat_prefix_; + TimeSource& time_source_; + bool latency_in_micros_; }; } // namespace CommandSplitter diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index 2d62f511b393..c75e61b2fcb0 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -5,6 +5,7 @@ #include "extensions/common/redis/cluster_refresh_manager_impl.h" #include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" #include "extensions/filters/network/redis_proxy/router_impl.h" @@ -86,14 +87,18 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP auto router = std::make_unique(prefix_routes, std::move(upstreams), context.runtime()); - std::shared_ptr splitter = - std::make_shared( - std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), - proto_config.latency_in_micros()); - return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { + auto fault_manager = std::make_unique( + context.random(), context.runtime(), proto_config.faults()); + + auto splitter_factory = std::make_shared( + std::move(router), std::move(fault_manager), context.scope(), filter_config->stat_prefix_, + context.timeSource(), proto_config.latency_in_micros()); + + return [splitter_factory, refresh_manager, + filter_config](Network::FilterManager& filter_manager) -> void { Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( - factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter, + factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter_factory, filter_config)); }; } diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index aa2f558cc51a..bbbadb29de5d 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -34,10 +34,11 @@ ProxyStats ProxyFilterConfig::generateStats(const std::string& prefix, Stats::Sc } ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, - Common::Redis::EncoderPtr&& encoder, CommandSplitter::Instance& splitter, + Common::Redis::EncoderPtr&& encoder, + CommandSplitter::CommandSplitterFactory& splitter_factory, ProxyFilterConfigSharedPtr config) - : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter), - config_(config) { + : decoder_(factory.create(*this)), encoder_(std::move(encoder)), + splitter_factory_(splitter_factory), config_(config) { config_->stats_.downstream_cx_total_.inc(); config_->stats_.downstream_cx_active_.inc(); connection_allowed_ = @@ -57,12 +58,13 @@ void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& ca config_->stats_.downstream_cx_tx_bytes_total_, config_->stats_.downstream_cx_tx_bytes_buffered_, nullptr, nullptr}); + splitter_ = splitter_factory_.create(callbacks_->connection().dispatcher()); } void ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) { pending_requests_.emplace_back(*this); PendingRequest& request = pending_requests_.back(); - CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(std::move(value), request); + CommandSplitter::SplitRequestPtr split = splitter_->makeRequest(std::move(value), request); if (split) { // The splitter can immediately respond and destroy the pending request. Only store the handle // if the request is still alive. diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 1694a2a0640e..4b7877bc00d7 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -75,7 +75,8 @@ class ProxyFilter : public Network::ReadFilter, public Network::ConnectionCallbacks { public: ProxyFilter(Common::Redis::DecoderFactory& factory, Common::Redis::EncoderPtr&& encoder, - CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config); + CommandSplitter::CommandSplitterFactory& splitter_factory, + ProxyFilterConfigSharedPtr config); ~ProxyFilter() override; // Network::ReadFilter @@ -94,6 +95,8 @@ class ProxyFilter : public Network::ReadFilter, bool connectionAllowed() { return connection_allowed_; } private: + friend class RedisProxyFilterTest; + struct PendingRequest : public CommandSplitter::SplitCallbacks { PendingRequest(ProxyFilter& parent); ~PendingRequest() override; @@ -119,7 +122,8 @@ class ProxyFilter : public Network::ReadFilter, Common::Redis::DecoderPtr decoder_; Common::Redis::EncoderPtr encoder_; - CommandSplitter::Instance& splitter_; + CommandSplitter::CommandSplitterFactory& splitter_factory_; + CommandSplitter::CommandSplitterPtr splitter_; ProxyFilterConfigSharedPtr config_; Buffer::OwnedImpl encoder_buffer_; Network::ReadFilterCallbacks* callbacks_{}; diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD index dffc23954488..e8e445c8b608 100644 --- a/test/extensions/filters/network/common/redis/BUILD +++ b/test/extensions/filters/network/common/redis/BUILD @@ -60,3 +60,16 @@ envoy_cc_test( "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "fault_test", + srcs = ["fault_test.cc"], + deps = [ + ":redis_mocks", + "//source/common/common:assert_lib", + "//source/extensions/filters/network/common/redis:fault_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/network/common/redis/fault_test.cc b/test/extensions/filters/network/common/redis/fault_test.cc new file mode 100644 index 000000000000..a80caf5c2d2a --- /dev/null +++ b/test/extensions/filters/network/common/redis/fault_test.cc @@ -0,0 +1,206 @@ +#include "envoy/common/random_generator.h" + +#include "common/common/assert.h" + +#include "extensions/filters/network/common/redis/fault_impl.h" + +#include "test/extensions/filters/network/common/redis/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +using RedisProxy = envoy::extensions::filters::network::redis_proxy::v3::RedisProxy; +using FractionalPercent = envoy::type::v3::FractionalPercent; +class FaultTest : public testing::Test { +public: + const std::string RUNTIME_KEY = "runtime_key"; + + void + createCommandFault(RedisProxy::RedisFault* fault, std::string command_str, int delay_seconds, + absl::optional fault_percentage, + absl::optional denominator, + absl::optional runtime_key) { + // We don't set fault type as it isn't used in the test + + auto* commands = fault->mutable_commands(); + auto* command = commands->Add(); + command->assign(command_str); + + fault->set_fault_type(envoy::extensions::filters::network::redis_proxy::v3:: + RedisProxy_RedisFault_RedisFaultType_ERROR); + + addFaultPercentage(fault, fault_percentage, denominator, runtime_key); + addDelay(fault, delay_seconds); + } + + void + createAllKeyFault(RedisProxy::RedisFault* fault, int delay_seconds, + absl::optional fault_percentage, + absl::optional denominator, + absl::optional runtime_key) { + addFaultPercentage(fault, fault_percentage, denominator, runtime_key); + addDelay(fault, delay_seconds); + } + + void + addFaultPercentage(RedisProxy::RedisFault* fault, absl::optional fault_percentage, + absl::optional denominator, + absl::optional runtime_key) { + envoy::config::core::v3::RuntimeFractionalPercent* fault_enabled = + fault->mutable_fault_enabled(); + + if (runtime_key.has_value()) { + fault_enabled->set_runtime_key(runtime_key.value()); + } + auto* percentage = fault_enabled->mutable_default_value(); + if (fault_percentage.has_value()) { + percentage->set_numerator(fault_percentage.value()); + } + if (denominator.has_value()) { + percentage->set_denominator(denominator.value()); + } + } + + void addDelay(RedisProxy::RedisFault* fault, int delay_seconds) { + std::chrono::seconds duration = std::chrono::seconds(delay_seconds); + fault->mutable_delay()->set_seconds(duration.count()); + } + + testing::NiceMock random_; + testing::NiceMock runtime_; +}; + +TEST_F(FaultTest, MakeFaultForTestHelper) { + Common::Redis::FaultSharedPtr fault_ptr = + FaultManagerImpl::makeFaultForTest(FaultType::Error, std::chrono::milliseconds(10)); + + ASSERT_TRUE(fault_ptr->faultType() == FaultType::Error); + ASSERT_TRUE(fault_ptr->delayMs() == std::chrono::milliseconds(10)); +} + +TEST_F(FaultTest, NoFaults) { + RedisProxy redis_config; + auto* faults = redis_config.mutable_faults(); + + TestScopedRuntime scoped_runtime; + FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); + + const Fault* fault_ptr = fault_manager.getFaultForCommand("get"); + ASSERT_TRUE(fault_ptr == nullptr); +} + +TEST_F(FaultTest, SingleCommandFaultNotEnabled) { + RedisProxy redis_config; + auto* faults = redis_config.mutable_faults(); + createCommandFault(faults->Add(), "get", 0, 0, FractionalPercent::HUNDRED, RUNTIME_KEY); + + TestScopedRuntime scoped_runtime; + FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); + + EXPECT_CALL(random_, random()).WillOnce(Return(0)); + EXPECT_CALL(runtime_, snapshot()); + const Fault* fault_ptr = fault_manager.getFaultForCommand("get"); + ASSERT_TRUE(fault_ptr == nullptr); +} + +TEST_F(FaultTest, SingleCommandFault) { + // Inject a single fault. Notably we use a different denominator to test that code path; normally + // we use FractionalPercent::HUNDRED. + RedisProxy redis_config; + auto* faults = redis_config.mutable_faults(); + createCommandFault(faults->Add(), "ttl", 0, 5000, FractionalPercent::TEN_THOUSAND, RUNTIME_KEY); + + TestScopedRuntime scoped_runtime; + FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); + + EXPECT_CALL(random_, random()).WillOnce(Return(1)); + EXPECT_CALL(runtime_.snapshot_, getInteger(RUNTIME_KEY, 50)).WillOnce(Return(10)); + + const Fault* fault_ptr = fault_manager.getFaultForCommand("ttl"); + ASSERT_TRUE(fault_ptr != nullptr); +} + +TEST_F(FaultTest, SingleCommandFaultWithNoDefaultValueOrRuntimeValue) { + // Inject a single fault with no default value or runtime value. + RedisProxy redis_config; + auto* faults = redis_config.mutable_faults(); + createCommandFault(faults->Add(), "ttl", 0, absl::nullopt, absl::nullopt, absl::nullopt); + + TestScopedRuntime scoped_runtime; + FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); + + EXPECT_CALL(random_, random()).WillOnce(Return(1)); + const Fault* fault_ptr = fault_manager.getFaultForCommand("ttl"); + ASSERT_TRUE(fault_ptr == nullptr); +} + +TEST_F(FaultTest, MultipleFaults) { + // This creates 2 faults, but the map will have 3 entries, as each command points to + // command specific faults AND the general fault. The second fault has no runtime key, + // forcing the runtime key check to be false in application code and falling back to the + // default value. + RedisProxy redis_config; + auto* faults = redis_config.mutable_faults(); + createCommandFault(faults->Add(), "get", 0, 25, FractionalPercent::HUNDRED, RUNTIME_KEY); + createAllKeyFault(faults->Add(), 2, 25, FractionalPercent::HUNDRED, absl::nullopt); + + TestScopedRuntime scoped_runtime; + FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); + const Fault* fault_ptr; + + // Get command - should have a fault 50% of time + // For the first call we mock the random percentage to be 10%, which will give us the first fault + // with 0s delay. + EXPECT_CALL(random_, random()).WillOnce(Return(1)); + EXPECT_CALL(runtime_.snapshot_, getInteger(_, 25)).WillOnce(Return(10)); + fault_ptr = fault_manager.getFaultForCommand("get"); + ASSERT_TRUE(fault_ptr != nullptr); + ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(0)); + + // Another Get; we mock the random percentage to be 25%, giving us the ALL_KEY fault + EXPECT_CALL(random_, random()).WillOnce(Return(25)); + EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)) + .Times(2) + .WillOnce(Return(10)) + .WillOnce(Return(50)); + fault_ptr = fault_manager.getFaultForCommand("get"); + ASSERT_TRUE(fault_ptr != nullptr); + ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000)); + + // No fault for Get command with mocked random percentage >= 50%. + EXPECT_CALL(random_, random()).WillOnce(Return(50)); + EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).Times(2); + fault_ptr = fault_manager.getFaultForCommand("get"); + ASSERT_TRUE(fault_ptr == nullptr); + + // Any other command; we mock the random percentage to be 1%, giving us the ALL_KEY fault + EXPECT_CALL(random_, random()).WillOnce(Return(1)); + EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).WillOnce(Return(10)); + + fault_ptr = fault_manager.getFaultForCommand("ttl"); + ASSERT_TRUE(fault_ptr != nullptr); + ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000)); + + // No fault for any other command with mocked random percentage >= 25%. + EXPECT_CALL(random_, random()).WillOnce(Return(25)); + EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)); + fault_ptr = fault_manager.getFaultForCommand("ttl"); + ASSERT_TRUE(fault_ptr == nullptr); +} + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 3f83e1eeda4b..24d847f5306f 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -18,16 +18,22 @@ envoy_extension_cc_test( name = "command_splitter_impl_test", srcs = ["command_splitter_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", + # This test takes a while to run specially under tsan. + # Shard it to avoid test timeout. + shard_count = 2, deps = [ ":redis_mocks", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", + "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:router_interface", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", + "//test/mocks/event:event_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", ], ) @@ -85,6 +91,7 @@ envoy_cc_mock( "//source/extensions/common/redis:cluster_refresh_manager_interface", "//source/extensions/filters/network/common/redis:client_interface", "//source/extensions/filters/network/common/redis:codec_lib", + "//source/extensions/filters/network/common/redis:fault_interface", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", "//source/extensions/filters/network/redis_proxy:router_interface", @@ -116,6 +123,7 @@ envoy_extension_cc_benchmark_binary( "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", + "//test/mocks/event:event_mocks", "//test/test_common:printers_lib", "//test/test_common:simulated_time_system_lib", ], @@ -148,6 +156,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.redis_proxy", tags = ["fails_on_windows"], deps = [ + "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/integration:integration_lib", ], diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index edf29c973092..d7ddc451bcb4 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -12,10 +12,14 @@ #include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" +#include "test/extensions/filters/network/redis_proxy/mocks.h" +#include "test/mocks/event/mocks.h" #include "test/test_common/simulated_time_system.h" #include "benchmark/benchmark.h" +using testing::NiceMock; + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -64,11 +68,13 @@ class CommandLookUpSpeedTest { } } - Router* router_{new NullRouterImpl()}; + RouterPtr router_{std::make_unique()}; Stats::IsolatedStoreImpl store_; Event::SimulatedTimeSystem time_system_; - CommandSplitter::InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, - false}; + NiceMock fault_manager_; + NiceMock dispatcher_; + CommandSplitter::InstanceImpl splitter_{*router_, store_, "redis.foo.", time_system_, + false, fault_manager_, dispatcher_}; NoOpSplitCallbacks callbacks_; CommandSplitter::SplitRequestPtr handle_; }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index 097cb3d49f4c..e864cb29e65c 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -6,12 +6,14 @@ #include "common/common/fmt.h" #include "common/stats/isolated_store_impl.h" +#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/common.h" +#include "test/mocks/event/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -32,7 +34,12 @@ namespace CommandSplitter { class RedisCommandSplitterImplTest : public testing::Test { public: RedisCommandSplitterImplTest() : RedisCommandSplitterImplTest(false) {} - RedisCommandSplitterImplTest(bool latency_in_macro) : latency_in_micros_(latency_in_macro) {} + RedisCommandSplitterImplTest(bool latency_in_macro) + : RedisCommandSplitterImplTest(latency_in_macro, nullptr) {} + RedisCommandSplitterImplTest(bool latency_in_macro, Common::Redis::FaultSharedPtr fault_ptr) + : latency_in_micros_(latency_in_macro) { + ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr.get())); + } void makeBulkStringArray(Common::Redis::RespValue& value, const std::vector& strings) { std::vector values(strings.size()); @@ -52,14 +59,18 @@ class RedisCommandSplitterImplTest : public testing::Test { const bool latency_in_micros_; ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; + ConnPool::InstanceSharedPtr conn_pool_shared_ptr_{conn_pool_}; ConnPool::MockInstance* mirror_conn_pool_{new ConnPool::MockInstance()}; ConnPool::InstanceSharedPtr mirror_conn_pool_shared_ptr_{mirror_conn_pool_}; - std::shared_ptr> route_{ - new NiceMock(ConnPool::InstanceSharedPtr{conn_pool_})}; + std::shared_ptr> route_{new NiceMock(conn_pool_shared_ptr_)}; + std::shared_ptr> router_{new NiceMock(route_)}; NiceMock store_; + NiceMock dispatcher_; + NiceMock fault_manager_; + Event::SimulatedTimeSystem time_system_; - InstanceImpl splitter_{std::make_unique>(route_), store_, "redis.foo.", - time_system_, latency_in_micros_}; + InstanceImpl splitter_{*router_, store_, "redis.foo.", time_system_, + latency_in_micros_, fault_manager_, dispatcher_}; MockSplitCallbacks callbacks_; SplitRequestPtr handle_; }; @@ -993,6 +1004,147 @@ INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithLatencyMicrosTest, RedisSingleServerRequestWithLatencyMicrosTest, testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); +// In subclasses of fault test, we mock the expected faults in the constructor, as the +// fault manager is owned by the splitter, which is also generated later in construction +// of the base test class. +class RedisSingleServerRequestWithFaultTest : public RedisSingleServerRequestTest { +public: + NiceMock* timer_; + Event::TimerCb timer_cb_; + int delay_ms_; + Common::Redis::FaultSharedPtr fault_ptr_; +}; + +class RedisSingleServerRequestWithErrorFaultTest : public RedisSingleServerRequestWithFaultTest { +public: + RedisSingleServerRequestWithErrorFaultTest() { + delay_ms_ = 0; + fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( + Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_)); + ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); + } +}; + +TEST_P(RedisSingleServerRequestWithErrorFaultTest, Fault) { + InSequence s; + + std::string lower_command = absl::AsciiStrToLower(GetParam()); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + + EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); + EXPECT_CALL(callbacks_, onResponse_(_)); + handle_ = splitter_.makeRequest(std::move(request), callbacks_); + EXPECT_EQ(nullptr, handle_); + + EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); + EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.error", lower_command)).value()); + EXPECT_EQ(1UL, + store_.counter(fmt::format("redis.foo.command.{}.error_fault", lower_command)).value()); +}; + +class RedisSingleServerRequestWithErrorWithDelayFaultTest + : public RedisSingleServerRequestWithFaultTest { +public: + RedisSingleServerRequestWithErrorWithDelayFaultTest() { + delay_ms_ = 5; + fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( + Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_)); + ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); + timer_ = new NiceMock(); + } +}; + +INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorFaultTest, + RedisSingleServerRequestWithErrorFaultTest, + testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); + +TEST_P(RedisSingleServerRequestWithErrorWithDelayFaultTest, Fault) { + InSequence s; + + std::string lower_command = absl::AsciiStrToLower(GetParam()); + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + + // As error faults have zero latency, recorded latency is equal to the delay. + EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); + EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { + timer_cb_ = timer_cb; + return timer_; + })); + + handle_ = splitter_.makeRequest(std::move(request), callbacks_); + EXPECT_NE(nullptr, handle_); + time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + fmt::format("redis.foo.command.{}.latency", lower_command)), + delay_ms_)); + EXPECT_CALL(callbacks_, onResponse_(_)); + timer_cb_(); + + EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); + EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.error", lower_command)).value()); + EXPECT_EQ(1UL, + store_.counter(fmt::format("redis.foo.command.{}.error_fault", lower_command)).value()); +}; + +INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorWithDelayFaultTest, + RedisSingleServerRequestWithErrorWithDelayFaultTest, + testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); + +class RedisSingleServerRequestWithDelayFaultTest : public RedisSingleServerRequestWithFaultTest { +public: + RedisSingleServerRequestWithDelayFaultTest() { + delay_ms_ = 15; + fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( + Common::Redis::FaultType::Delay, std::chrono::milliseconds(delay_ms_)); + ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); + timer_ = new NiceMock(); + } +}; + +TEST_P(RedisSingleServerRequestWithDelayFaultTest, Fault) { + InSequence s; + + std::string lower_command = absl::AsciiStrToLower(GetParam()); + std::string hash_key = "hello"; + + Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; + makeBulkStringArray(*request, {GetParam(), "hello"}); + + EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); + EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { + timer_cb_ = timer_cb; + return timer_; + })); + EXPECT_CALL(*conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _)) + .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); + + handle_ = splitter_.makeRequest(std::move(request), callbacks_); + + EXPECT_NE(nullptr, handle_); + + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + fmt::format("redis.foo.command.{}.latency", lower_command)), + delay_ms_)); + respond(); + + time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_)); + timer_cb_(); + + EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); + EXPECT_EQ(1UL, + store_.counter(fmt::format("redis.foo.command.{}.success", lower_command)).value()); + EXPECT_EQ(1UL, + store_.counter(fmt::format("redis.foo.command.{}.delay_fault", lower_command)).value()); +}; + +INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithDelayFaultTest, + RedisSingleServerRequestWithDelayFaultTest, + testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index a9043af8cd6e..155b72689284 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -169,6 +169,43 @@ stat_prefix: foo cb(connection); } +TEST(RedisProxyFilterConfigFactoryTest, RedisProxyFaultProto) { + const std::string yaml = R"EOF( +prefix_routes: + catch_all_route: + cluster: fake_cluster +stat_prefix: foo +faults: +- fault_type: ERROR + fault_enabled: + default_value: + numerator: 30 + denominator: HUNDRED + runtime_key: "bogus_key" + commands: + - GET +- fault_type: DELAY + fault_enabled: + default_value: + numerator: 20 + denominator: HUNDRED + runtime_key: "bogus_key" + delay: 2s +settings: + op_timeout: 0.02s + )EOF"; + + envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + NiceMock context; + RedisProxyFilterConfigFactory factory; + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); + EXPECT_TRUE(factory.isTerminalFilter()); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + cb(connection); +} + // Test that the deprecated extension name still functions. TEST(RedisProxyFilterConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.redis_proxy"; diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index d51809ba27f6..fa1ff637af61 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -26,6 +26,10 @@ MockMirrorPolicy::MockMirrorPolicy(ConnPool::InstanceSharedPtr conn_pool) ON_CALL(*this, shouldMirror(_)).WillByDefault(Return(true)); } +MockFaultManager::MockFaultManager() = default; +MockFaultManager::MockFaultManager(const MockFaultManager&) {} +MockFaultManager::~MockFaultManager() = default; + namespace ConnPool { MockPoolCallbacks::MockPoolCallbacks() = default; @@ -47,6 +51,9 @@ MockSplitCallbacks::~MockSplitCallbacks() = default; MockInstance::MockInstance() = default; MockInstance::~MockInstance() = default; +MockCommandSplitterFactory::MockCommandSplitterFactory() = default; +MockCommandSplitterFactory::~MockCommandSplitterFactory() = default; + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index b093ad35b9b9..d76a1f2598b7 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -7,6 +7,7 @@ #include "extensions/common/redis/cluster_refresh_manager.h" #include "extensions/filters/network/common/redis/client.h" #include "extensions/filters/network/common/redis/codec_impl.h" +#include "extensions/filters/network/common/redis/fault.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" #include "extensions/filters/network/redis_proxy/router.h" @@ -51,6 +52,15 @@ class MockMirrorPolicy : public MirrorPolicy { ConnPool::InstanceSharedPtr conn_pool_; }; +class MockFaultManager : public Common::Redis::FaultManager { +public: + MockFaultManager(); + MockFaultManager(const MockFaultManager& other); + ~MockFaultManager() override; + + MOCK_METHOD(const Common::Redis::Fault*, getFaultForCommand, (const std::string&), (const)); +}; + namespace ConnPool { class MockPoolCallbacks : public PoolCallbacks { @@ -119,6 +129,16 @@ class MockInstance : public Instance { (const Common::Redis::RespValue& request, SplitCallbacks& callbacks)); }; +class MockCommandSplitterFactory : public CommandSplitterFactory { +public: + MockCommandSplitterFactory(); + ~MockCommandSplitterFactory() override; + + CommandSplitterPtr create(Event::Dispatcher& dispatcher) override { return create_(dispatcher); }; + + MOCK_METHOD(CommandSplitterPtr, create_, (Event::Dispatcher & dispatcher)); +}; + } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index f094c02b665a..66def36798c0 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -19,6 +19,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::ByMove; using testing::ByRef; using testing::DoAll; using testing::Eq; @@ -131,8 +132,12 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder parseProtoFromYaml(yaml_string); config_ = std::make_shared(proto_config, store_, drain_decision_, runtime_, api_); - filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, splitter_, - config_); + + std::unique_ptr splitter_ptr = + std::make_unique(); + EXPECT_CALL(splitter_factory_, create_(_)).WillOnce(Return(ByMove(std::move(splitter_ptr)))); + filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, + splitter_factory_, config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); EXPECT_EQ(1UL, config_->stats_.downstream_cx_total_.value()); @@ -152,6 +157,10 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder } } + CommandSplitter::MockInstance& getSplitter() { + return reinterpret_cast(*filter_->splitter_); + } + // Common::Redis::DecoderFactory Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override { decoder_callbacks_ = &callbacks; @@ -161,7 +170,7 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder Common::Redis::MockEncoder* encoder_{new Common::Redis::MockEncoder()}; Common::Redis::MockDecoder* decoder_{new Common::Redis::MockDecoder()}; Common::Redis::DecoderCallbacks* decoder_callbacks_{}; - CommandSplitter::MockInstance splitter_; + CommandSplitter::MockCommandSplitterFactory splitter_factory_; Stats::TestUtil::TestStore store_; NiceMock drain_decision_; NiceMock runtime_; @@ -181,12 +190,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseWithDrainClose) { CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); })); @@ -222,12 +231,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); })); @@ -251,7 +260,7 @@ TEST_F(RedisProxyFilterTest, DownstreamDisconnectWithActive) { CommandSplitter::SplitCallbacks* request_callbacks1; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); })); @@ -269,7 +278,7 @@ TEST_F(RedisProxyFilterTest, ImmediateResponse) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request1)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -313,7 +322,7 @@ TEST_F(RedisProxyFilterTest, AuthWhenNotRequired) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -340,7 +349,7 @@ TEST_F(RedisProxyFilterTest, AuthAclWhenNotRequired) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -383,7 +392,7 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordCorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -410,7 +419,7 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -455,7 +464,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclCorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -482,7 +491,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclUsernameIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -509,7 +518,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclPasswordIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) + EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index dd0e64375b8c..b9760e147206 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -1,6 +1,9 @@ #include #include +#include "common/common/fmt.h" + +#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "test/integration/integration.h" @@ -52,7 +55,7 @@ const std::string CONFIG = R"EOF( filters: name: redis typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy stat_prefix: redis_stats prefix_routes: catch_all_route: @@ -273,6 +276,27 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( cluster: cluster_2 )EOF"; +// This is a configuration with fault injection enabled. +const std::string CONFIG_WITH_FAULT_INJECTION = CONFIG + R"EOF( + faults: + - fault_type: ERROR + fault_enabled: + default_value: + numerator: 100 + denominator: HUNDRED + commands: + - GET + - fault_type: DELAY + fault_enabled: + default_value: + numerator: 20 + denominator: HUNDRED + runtime_key: "bogus_key" + delay: 2s + commands: + - SET +)EOF"; + // This function encodes commands as an array of bulkstrings as transmitted by Redis clients to // Redis servers, according to the Redis protocol. std::string makeBulkStringArray(std::vector&& command_strings) { @@ -436,6 +460,12 @@ class RedisProxyWithCommandStatsIntegrationTest : public RedisProxyIntegrationTe : RedisProxyIntegrationTest(CONFIG_WITH_COMMAND_STATS, 2) {} }; +class RedisProxyWithFaultInjectionIntegrationTest : public RedisProxyIntegrationTest { +public: + RedisProxyWithFaultInjectionIntegrationTest() + : RedisProxyIntegrationTest(CONFIG_WITH_FAULT_INJECTION, 2) {} +}; + INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -468,6 +498,10 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithCommandStatsIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithFaultInjectionIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + void RedisProxyIntegrationTest::initialize() { setUpstreamCount(num_upstreams_); setDeterministic(); @@ -1062,5 +1096,25 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, EnabledViaRuntimeFraction) { redis_client->close(); } +TEST_P(RedisProxyWithFaultInjectionIntegrationTest, ErrorFault) { + std::string fault_response = + fmt::format("-{}\r\n", Extensions::NetworkFilters::Common::Redis::FaultMessages::get().Error); + initialize(); + simpleProxyResponse(makeBulkStringArray({"get", "foo"}), fault_response); + + EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.get.error")->value()); + EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.get.error_fault")->value()); +} + +TEST_P(RedisProxyWithFaultInjectionIntegrationTest, DelayFault) { + const std::string& set_request = makeBulkStringArray({"set", "write_only:toto", "bar"}); + const std::string& set_response = ":1\r\n"; + initialize(); + simpleRequestAndResponse(set_request, set_response); + + EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.set.success")->value()); + EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.set.delay_fault")->value()); +} + } // namespace } // namespace Envoy From 847eafef27dbac6b127a5f5500ccb60f60727c0e Mon Sep 17 00:00:00 2001 From: ankatare Date: Sat, 18 Jul 2020 05:43:22 +0530 Subject: [PATCH 677/909] changes for http and access_log folders (#12027) Commit Message: changes for http and access_log folders Risk Level:NA Testing: Unit and format testing Docs Changes: NA part of #10843 Signed-off-by: Abhay Narayan Katare --- .../common/access_log/access_log_impl_test.cc | 97 ++++++++++--------- test/common/http/utility_test.cc | 9 +- 2 files changed, 54 insertions(+), 52 deletions(-) diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index a5b9c66279d0..0d23058b5768 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -37,9 +37,10 @@ namespace Envoy { namespace AccessLog { namespace { -envoy::config::accesslog::v3::AccessLog parseAccessLogFromV2Yaml(const std::string& yaml) { +envoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::accesslog::v3::AccessLog access_log; - TestUtility::loadFromYamlAndValidate(yaml, access_log); + TestUtility::loadFromYamlAndValidate(yaml, access_log, false, avoid_boosting); return access_log; } @@ -72,7 +73,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); stream_info_.response_flags_ = StreamInfo::ResponseFlag::UpstreamConnectionFailure; @@ -95,7 +96,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); @@ -118,7 +119,7 @@ name: accesslog format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %ROUTE_NAME% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); stream_info_.route_name_ = "route-test-name"; @@ -144,7 +145,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().EnvoyUpstreamServiceTime, "999"); @@ -163,7 +164,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -183,7 +184,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -215,7 +216,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -253,7 +254,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(3); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -279,7 +280,7 @@ name: accesslog )EOF"; Random::RandomGeneratorImpl random; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); @@ -322,7 +323,7 @@ name: accesslog )EOF"; Random::RandomGeneratorImpl random; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value is taken from random generator. EXPECT_CALL(context_.random_, random()).WillOnce(Return(42)); @@ -365,7 +366,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // Value should not be taken from x-request-id. request_headers_.addCopy("x-request-id", "000000ff-0000-0000-0000-000000000000"); @@ -392,7 +393,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -411,7 +412,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); Http::TestRequestHeaderMapImpl header_map{}; stream_info_.health_check_request_ = true; @@ -430,7 +431,7 @@ name: accesslog path: "/dev/null" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); Http::TestRequestHeaderMapImpl header_map{}; EXPECT_CALL(*file_, write(_)); @@ -450,7 +451,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); { Http::TestRequestHeaderMapImpl forced_header{{"x-request-id", random.uuid()}}; @@ -487,7 +488,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context), EnvoyException); } @@ -501,7 +502,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context), EnvoyException); } } @@ -524,7 +525,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -560,7 +561,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -603,7 +604,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 500; { @@ -703,7 +704,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); stream_info_.response_code_ = 499; EXPECT_CALL(runtime_.snapshot_, getInteger("hello", 499)).WillOnce(Return(499)); @@ -728,7 +729,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -752,7 +753,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -782,7 +783,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -817,7 +818,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -857,7 +858,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -879,7 +880,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -906,7 +907,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -978,7 +979,7 @@ name: accesslog StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached, StreamInfo::ResponseFlag::ResponseFromCacheFilter}; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); for (const auto response_flag : all_response_flags) { TestStreamInfo stream_info; @@ -1001,7 +1002,7 @@ name: accesslog )EOF"; EXPECT_THROW_WITH_MESSAGE( - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), ProtoValidationException, "Proto constraint validation failed (AccessLogValidationError.Filter: [\"embedded message " "failed validation\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: " @@ -1028,7 +1029,7 @@ name: accesslog )EOF"; EXPECT_THROW_WITH_MESSAGE( - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), ProtoValidationException, "Proto constraint validation failed (AccessLogValidationError.Filter: [\"embedded message " "failed validation\"] | caused by AccessLogFilterValidationError.ResponseFlagFilter: " @@ -1051,7 +1052,7 @@ name: accesslog format: "%GRPC_STATUS%\n" )EOF"; - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); { EXPECT_CALL(*file_, write(_)); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "0"); @@ -1060,7 +1061,7 @@ name: accesslog response_trailers_.remove(Http::Headers::get().GrpcStatus); } { - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().GrpcStatus, "1"); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1068,7 +1069,7 @@ name: accesslog response_headers_.remove(Http::Headers::get().GrpcStatus); } { - InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); response_headers_.addCopy(Http::Headers::get().GrpcStatus, "-1"); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1098,7 +1099,7 @@ name: accesslog for (int i = 0; i < desc->value_count(); i++) { InstanceSharedPtr log = AccessLogFactory::fromProto( - parseAccessLogFromV2Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_); + parseAccessLogFromV3Yaml(fmt::format(yaml_template, desc->value(i)->name())), context_); EXPECT_CALL(*file_, write(_)); @@ -1120,7 +1121,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW_WITH_REGEX(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException, ".*\"NOT_A_VALID_CODE\" for type TYPE_ENUM.*"); } @@ -1137,7 +1138,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "1"); @@ -1170,7 +1171,7 @@ name: accesslog stream_info_.response_code_ = pair.second; const InstanceSharedPtr log = AccessLogFactory::fromProto( - parseAccessLogFromV2Yaml(fmt::format(yaml_template, pair.first)), context_); + parseAccessLogFromV3Yaml(fmt::format(yaml_template, pair.first)), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1190,7 +1191,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1210,7 +1211,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); for (int i = 0; i <= static_cast(Grpc::Status::WellKnownGrpcStatus::MaximumKnown); i++) { EXPECT_CALL(*file_, write(_)).Times(i == 0 ? 0 : 1); @@ -1235,7 +1236,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); response_trailers_.addCopy(Http::Headers::get().GrpcStatus, "0"); @@ -1256,7 +1257,7 @@ name: accesslog )EOF"; const InstanceSharedPtr log = - AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)); @@ -1303,7 +1304,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); EXPECT_CALL(*file_, write(_)).Times(0); logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1380,7 +1381,7 @@ name: accesslog path: /dev/null )EOF"; - InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_); + InstanceSharedPtr logger = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); // For rate=5 expect 1st request to be recorded, 2nd-5th skipped, and 6th recorded. EXPECT_CALL(*file_, write(_)); logger->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -1408,7 +1409,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException); } @@ -1423,7 +1424,7 @@ name: accesslog path: /dev/null )EOF"; - EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV2Yaml(yaml), context_), + EXPECT_THROW(AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_), EnvoyException); } } diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 0c1dd6c1277c..687c5255ee36 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -310,9 +310,10 @@ TEST(HttpUtility, createSslRedirectPath) { namespace { -envoy::config::core::v3::Http2ProtocolOptions parseHttp2OptionsFromV2Yaml(const std::string& yaml) { +envoy::config::core::v3::Http2ProtocolOptions +parseHttp2OptionsFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::core::v3::Http2ProtocolOptions http2_options; - TestUtility::loadFromYamlAndValidate(yaml, http2_options); + TestUtility::loadFromYamlAndValidate(yaml, http2_options, false, avoid_boosting); return ::Envoy::Http2::Utility::initializeAndValidateOptions(http2_options); } @@ -321,7 +322,7 @@ envoy::config::core::v3::Http2ProtocolOptions parseHttp2OptionsFromV2Yaml(const TEST(HttpUtility, parseHttp2Settings) { { using ::Envoy::Http2::Utility::OptionsLimits; - auto http2_options = parseHttp2OptionsFromV2Yaml("{}"); + auto http2_options = parseHttp2OptionsFromV3Yaml("{}"); EXPECT_EQ(OptionsLimits::DEFAULT_HPACK_TABLE_SIZE, http2_options.hpack_table_size().value()); EXPECT_EQ(OptionsLimits::DEFAULT_MAX_CONCURRENT_STREAMS, http2_options.max_concurrent_streams().value()); @@ -348,7 +349,7 @@ max_concurrent_streams: 2 initial_stream_window_size: 65535 initial_connection_window_size: 65535 )EOF"; - auto http2_options = parseHttp2OptionsFromV2Yaml(yaml); + auto http2_options = parseHttp2OptionsFromV3Yaml(yaml); EXPECT_EQ(1U, http2_options.hpack_table_size().value()); EXPECT_EQ(2U, http2_options.max_concurrent_streams().value()); EXPECT_EQ(65535U, http2_options.initial_stream_window_size().value()); From 2b3d95efa57cee3cae9c82dc0959cf8067f50357 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Sat, 18 Jul 2020 00:14:42 +0000 Subject: [PATCH 678/909] test: refactor header inclusion to speed up building (for test/common/...) (#12046) Commit Message: refactor header inclusion to speed up building (for test/common/...) Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/common/access_log/BUILD | 2 +- test/common/access_log/access_log_impl_test.cc | 2 +- test/common/config/BUILD | 2 +- test/common/config/config_provider_impl_test.cc | 2 +- test/common/config/datasource_test.cc | 8 +++----- test/common/grpc/BUILD | 4 +++- .../grpc/grpc_client_integration_test_harness.h | 4 +++- test/common/http/BUILD | 4 +++- test/common/http/conn_manager_impl_test.cc | 4 +++- test/common/local_reply/BUILD | 2 +- test/common/local_reply/local_reply_test.cc | 2 +- test/common/memory/BUILD | 2 +- test/common/memory/heap_shrinker_test.cc | 2 +- test/common/network/BUILD | 4 +++- test/common/network/filter_manager_impl_test.cc | 3 ++- test/common/protobuf/BUILD | 2 +- test/common/protobuf/utility_test.cc | 2 +- test/common/router/BUILD | 14 +++++++------- test/common/router/config_impl_test.cc | 2 +- test/common/router/rds_impl_test.cc | 2 +- test/common/router/route_fuzz_test.cc | 2 +- test/common/router/router_ratelimit_test.cc | 2 +- test/common/router/router_upstream_log_test.cc | 2 +- test/common/router/scoped_rds_test.cc | 2 +- test/common/router/vhds_test.cc | 2 +- test/common/secret/BUILD | 6 ++++-- test/common/secret/sds_api_test.cc | 2 +- test/common/secret/secret_manager_impl_test.cc | 4 +++- test/common/stats/BUILD | 2 +- test/common/stats/thread_local_store_test.cc | 2 +- test/common/tcp_proxy/BUILD | 3 ++- test/common/tcp_proxy/tcp_proxy_test.cc | 3 ++- test/common/tracing/BUILD | 3 ++- .../tracing/http_tracer_manager_impl_test.cc | 3 ++- test/common/upstream/BUILD | 3 ++- test/common/upstream/test_cluster_manager.h | 3 ++- 36 files changed, 66 insertions(+), 47 deletions(-) diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index bcd51b067bd6..91e0a1665c41 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -22,7 +22,7 @@ envoy_cc_test( "//test/mocks/event:event_mocks", "//test/mocks/filesystem:filesystem_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 0d23058b5768..0bcac1bd72e5 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -19,7 +19,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" diff --git a/test/common/config/BUILD b/test/common/config/BUILD index a89c45629e94..7fec979a8a8f 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -414,7 +414,7 @@ envoy_cc_test( ":dummy_config_proto_cc_proto", "//source/common/config:config_provider_lib", "//source/common/protobuf:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 61d45456c485..c8308e42e500 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -7,7 +7,7 @@ #include "common/protobuf/utility.h" #include "test/common/config/dummy_config.pb.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 340ae1c5a365..8ef1710b465b 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -7,19 +7,17 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/init/mocks.h" -#include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" -using testing::AtLeast; -using testing::NiceMock; -using testing::Return; - namespace Envoy { namespace Config { namespace { +using ::testing::AtLeast; +using ::testing::NiceMock; +using ::testing::Return; class AsyncDataSourceTest : public testing::Test { protected: diff --git a/test/common/grpc/BUILD b/test/common/grpc/BUILD index 71e1a071604d..edfdfbda404a 100644 --- a/test/common/grpc/BUILD +++ b/test/common/grpc/BUILD @@ -145,10 +145,12 @@ envoy_cc_test_library( ":utility_lib", "//source/common/api:api_lib", "//source/common/event:dispatcher_lib", + "//source/common/grpc:context_lib", + "//source/common/http:context_lib", "//source/common/http/http2:conn_pool_lib", "//test/integration:integration_lib", "//test/mocks/local_info:local_info_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/proto:helloworld_proto_cc_proto", "//test/test_common:global_lib", "//test/test_common:test_time_lib", diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 7ab7facc976f..5803379810a5 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -8,6 +8,8 @@ #include "common/api/api_impl.h" #include "common/event/dispatcher_impl.h" #include "common/grpc/async_client_impl.h" +#include "common/grpc/context_impl.h" +#include "common/http/context_impl.h" #ifdef ENVOY_GOOGLE_GRPC #include "common/grpc/google_async_client_impl.h" @@ -28,7 +30,7 @@ #include "test/integration/fake_upstream.h" #include "test/mocks/grpc/mocks.h" #include "test/mocks/local_info/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/proto/helloworld.pb.h" diff --git a/test/common/http/BUILD b/test/common/http/BUILD index 13e7911ee08f..b690d0a1d04a 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -226,7 +226,9 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 5fc4624d37d4..520d27561d1c 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -39,7 +39,9 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/overload_manager.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/cluster_info.h" diff --git a/test/common/local_reply/BUILD b/test/common/local_reply/BUILD index df768fb66dd5..9b5fc8f50ec1 100644 --- a/test/common/local_reply/BUILD +++ b/test/common/local_reply/BUILD @@ -14,7 +14,7 @@ envoy_cc_test( deps = [ "//source/common/local_reply:local_reply_lib", "//test/mocks/http:http_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/common/local_reply/local_reply_test.cc b/test/common/local_reply/local_reply_test.cc index 0807a12982cd..a43519128fc5 100644 --- a/test/common/local_reply/local_reply_test.cc +++ b/test/common/local_reply/local_reply_test.cc @@ -4,7 +4,7 @@ #include "common/local_reply/local_reply.h" #include "test/mocks/http/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/common/memory/BUILD b/test/common/memory/BUILD index 1688511f5db9..3123d827b949 100644 --- a/test/common/memory/BUILD +++ b/test/common/memory/BUILD @@ -23,7 +23,7 @@ envoy_cc_test( "//source/common/memory:stats_lib", "//test/common/stats:stat_test_utility_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:overload_manager_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], diff --git a/test/common/memory/heap_shrinker_test.cc b/test/common/memory/heap_shrinker_test.cc index 68071e1e4f1f..5889424f5435 100644 --- a/test/common/memory/heap_shrinker_test.cc +++ b/test/common/memory/heap_shrinker_test.cc @@ -4,7 +4,7 @@ #include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/overload_manager.h" #include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 1ee7cb294c14..592a709050c7 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -21,6 +21,7 @@ envoy_cc_test_library( "//source/common/network:listener_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/common/stream_info:stream_info_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:server_mocks", "//test/test_common:environment_lib", @@ -138,7 +139,8 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/ratelimit:ratelimit_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:host_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index f8dea3442a22..9a3feee48b1e 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -17,7 +17,8 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/host.h" #include "test/mocks/upstream/mocks.h" diff --git a/test/common/protobuf/BUILD b/test/common/protobuf/BUILD index 851765f03b58..bb018981c290 100644 --- a/test/common/protobuf/BUILD +++ b/test/common/protobuf/BUILD @@ -30,7 +30,7 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/proto:deprecated_proto_cc_proto", "//test/proto:sensitive_proto_cc_proto", "//test/test_common:environment_lib", diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index bf6dbb7b3cbd..3ae2fb03bcb6 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -23,7 +23,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/proto/deprecated.pb.h" #include "test/proto/sensitive.pb.h" #include "test/test_common/environment.h" diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 0f72563074bb..3ddc0305b16b 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -31,7 +31,7 @@ envoy_cc_test_library( "//source/common/stream_info:filter_state_lib", "//test/extensions/filters/http/common:empty_http_filter_config_lib", "//test/fuzz:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", @@ -72,7 +72,7 @@ envoy_cc_test( "//source/server/admin:admin_lib", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", @@ -118,7 +118,7 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", @@ -141,7 +141,7 @@ envoy_cc_test( "//source/server/admin:admin_lib", "//test/mocks/config:config_mocks", "//test/mocks/local_info:local_info_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:simulated_time_system_lib", @@ -229,7 +229,7 @@ envoy_cc_fuzz_test( ":route_fuzz_proto_cc_proto", "//source/common/router:config_lib", "//test/fuzz:utility_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], ) @@ -245,7 +245,7 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/router:router_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", ], @@ -307,7 +307,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/router:router_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:utility_lib", diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index f116e51bee79..4fc092ed910c 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -23,7 +23,7 @@ #include "test/common/router/route_fuzz.pb.h" #include "test/extensions/filters/http/common/empty_http_filter_config.h" #include "test/fuzz/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 25a20e50935e..8e0e62652631 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -18,7 +18,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/protobuf/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" diff --git a/test/common/router/route_fuzz_test.cc b/test/common/router/route_fuzz_test.cc index f43540976c79..17e6b532e36b 100644 --- a/test/common/router/route_fuzz_test.cc +++ b/test/common/router/route_fuzz_test.cc @@ -7,7 +7,7 @@ #include "test/common/router/route_fuzz.pb.validate.h" #include "test/fuzz/fuzz_runner.h" #include "test/fuzz/utility.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" namespace Envoy { namespace Router { diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 2d0ff4988ae2..496c33633e8b 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -15,7 +15,7 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/router/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index d0caea49ec29..8662e760364a 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -18,7 +18,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/router/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index b3383ec0ad9d..f2f13ed03dec 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -21,7 +21,7 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/router/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/common/router/vhds_test.cc b/test/common/router/vhds_test.cc index 79256e2d92ee..88bd464d7bbb 100644 --- a/test/common/router/vhds_test.cc +++ b/test/common/router/vhds_test.cc @@ -15,7 +15,7 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/init/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index 8ff77b980fa7..b5b05456c3fd 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -20,7 +20,9 @@ envoy_cc_test( "//source/common/secret:secret_manager_impl_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:config_tracker_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", @@ -46,7 +48,7 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/test_common:environment_lib", "//test/test_common:registry_lib", "//test/test_common:utility_lib", diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 289fb79d6a66..bec6c41a0a47 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -15,7 +15,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 00466f1bddf6..a947df8417d0 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -15,7 +15,9 @@ #include "common/ssl/tls_certificate_config_impl.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/config_tracker.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 863dfe71a841..5125844c5817 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -228,7 +228,7 @@ envoy_cc_test( "//source/common/stats:thread_local_store_lib", "//source/common/thread_local:thread_local_lib", "//test/mocks/event:event_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:logging_lib", diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index a7906ff2d7ee..daf20f6f2349 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -17,7 +17,7 @@ #include "test/common/stats/stat_test_utility.h" #include "test/mocks/event/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/logging.h" diff --git a/test/common/tcp_proxy/BUILD b/test/common/tcp_proxy/BUILD index 955a2b05070e..e6149dc13e73 100644 --- a/test/common/tcp_proxy/BUILD +++ b/test/common/tcp_proxy/BUILD @@ -28,7 +28,8 @@ envoy_cc_test( "//test/mocks/buffer:buffer_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/ssl:ssl_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/mocks/upstream:host_mocks", diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 4db82a3fa5f3..5fd7d8b21ce5 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -25,7 +25,8 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/server/instance.h" #include "test/mocks/ssl/mocks.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/tcp/mocks.h" diff --git a/test/common/tracing/BUILD b/test/common/tracing/BUILD index acc6850f3f72..57640551c346 100644 --- a/test/common/tracing/BUILD +++ b/test/common/tracing/BUILD @@ -46,7 +46,8 @@ envoy_cc_test( "//source/common/tracing:http_tracer_config_lib", "//source/common/tracing:http_tracer_lib", "//source/common/tracing:http_tracer_manager_lib", - "//test/mocks/server:server_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/server:tracer_factory_mocks", "//test/mocks/tracing:tracing_mocks", "//test/test_common:registry_lib", ], diff --git a/test/common/tracing/http_tracer_manager_impl_test.cc b/test/common/tracing/http_tracer_manager_impl_test.cc index f6e719fa434e..67f802af9edc 100644 --- a/test/common/tracing/http_tracer_manager_impl_test.cc +++ b/test/common/tracing/http_tracer_manager_impl_test.cc @@ -2,7 +2,8 @@ #include "common/tracing/http_tracer_impl.h" #include "common/tracing/http_tracer_manager_impl.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/server/tracer_factory.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/registry.h" diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 32f280f2d6fb..cfbabbb44f43 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -529,7 +529,8 @@ envoy_cc_test_library( "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", - "//test/mocks/server:server_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", "//test/mocks/tcp:tcp_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index e6c51563c668..20edfaa9b59b 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -35,7 +35,8 @@ #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/secret/mocks.h" -#include "test/mocks/server/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" #include "test/mocks/tcp/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" From b8996ddb7c3a6deec74ac19e33975499fc068dbc Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Sat, 18 Jul 2020 10:29:59 +1000 Subject: [PATCH 679/909] test: fix typos and make cleanups in router tests (#12094) test: fix typos and make cleanups in router tests Risk Level: low Testing: bazel test //test/... Docs Changes: n/a Release Notes: n/a Signed-off-by: Martin Matusiak --- test/common/router/config_impl_test.cc | 153 +++++++------------------ 1 file changed, 43 insertions(+), 110 deletions(-) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 4fc092ed910c..8b68b9c1d04d 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -163,7 +163,6 @@ class ConfigImplTestBase { std::string responseHeadersConfig(const bool most_specific_wins, const bool append) const { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -249,7 +248,6 @@ most_specific_header_mutations_wins: {0} std::string requestHeadersConfig(const bool most_specific_wins) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -1267,7 +1265,7 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidRegex) { // Virtual cluster that contains neither pattern nor regex. This must be checked while pattern is // deprecated. TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { - const std::string invalid_virtual_cluster = R"EOF( + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: ["*"] @@ -1278,10 +1276,9 @@ TEST_F(RouteMatcherTest, TestRoutesWithInvalidVirtualCluster) { - name: "invalid" )EOF"; - EXPECT_THROW_WITH_REGEX(TestConfigImpl(parseRouteConfigurationFromYaml(invalid_virtual_cluster), - factory_context_, true), - EnvoyException, - "virtual clusters must define either 'pattern' or 'headers'"); + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "virtual clusters must define either 'pattern' or 'headers'"); } // Validates behavior of request_headers_to_add at router, vhost, and route levels. @@ -1605,7 +1602,6 @@ TEST_F(RouteMatcherTest, TestAddRemoveResponseHeadersAppendMostSpecificWins) { TEST_F(RouteMatcherTest, TestAddGlobalResponseHeaderRemoveFromRoute) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["www.lyft.com"] @@ -1651,7 +1647,6 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToAddNoPseudoHeader) { for (const std::string& header : {":path", ":authority", ":method", ":scheme", ":status", ":protocol"}) { const std::string yaml = fmt::format(R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -1678,7 +1673,6 @@ TEST_F(RouteMatcherTest, TestRequestHeadersToRemoveNoPseudoHeader) { for (const std::string& header : {":path", ":authority", ":method", ":scheme", ":status", ":protocol", "host"}) { const std::string yaml = fmt::format(R"EOF( -name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -3185,7 +3179,6 @@ class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; // When removing runtime_key: this test can be removed. TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: mirror domains: [mirror.lyft.com] @@ -3302,7 +3295,6 @@ TEST_F(RouteMatcherTest, Retry) { TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { const std::string yaml = R"EOF( -name: RetryVirtualHostLevel virtual_hosts: - domains: [www.lyft.com] per_request_buffer_limit_bytes: 8 @@ -3542,7 +3534,7 @@ TEST_F(RouteMatcherTest, RetryBackOffIntervals) { // Test invalid route-specific retry back-off configs. TEST_F(RouteMatcherTest, InvalidRetryBackOff) { - const std::string invalid_max = R"EOF( + const std::string yaml = R"EOF( virtual_hosts: - name: backoff domains: ["*"] @@ -3557,13 +3549,12 @@ TEST_F(RouteMatcherTest, InvalidRetryBackOff) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(invalid_max), factory_context_, true), - EnvoyException, "retry_policy.max_interval must greater than or equal to the base_interval"); + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "retry_policy.max_interval must greater than or equal to the base_interval"); } TEST_F(RouteMatcherTest, HedgeRouteLevel) { const std::string yaml = R"EOF( -name: HedgeRouteLevel virtual_hosts: - domains: [www.lyft.com] name: www @@ -3639,7 +3630,6 @@ name: HedgeRouteLevel TEST_F(RouteMatcherTest, HedgeVirtualHostLevel) { const std::string yaml = R"EOF( -name: HedgeVirtualHostLevel virtual_hosts: - domains: [www.lyft.com] name: www @@ -3760,7 +3750,7 @@ TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { // Test to detect if hostname matches are case-insensitive TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { - std::string config_with_case_sensitive_domains = R"EOF( + std::string yaml = R"EOF( virtual_hosts: - name: www2 domains: [www.lyft.com] @@ -3775,9 +3765,7 @@ TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { )EOF"; EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(config_with_case_sensitive_domains), - factory_context_, true), - EnvoyException, + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com"); } @@ -4080,7 +4068,6 @@ TEST_F(RouteMatcherTest, DirectResponse) { TestEnvironment::writeStringToFileForTest("direct_response_body", "Example text 3"); static const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: www2 domains: [www.lyft.com] @@ -5905,7 +5892,6 @@ TEST_F(ConfigUtilityTest, ParseDirectResponseBody) { TEST_F(RouteConfigurationV2, RedirectCode) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -5932,7 +5918,6 @@ name: foo // Test the parsing of direct response configurations within routes. TEST_F(RouteConfigurationV2, DirectResponse) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: direct domains: [example.com] @@ -5954,7 +5939,6 @@ name: foo TEST_F(RouteConfigurationV2, DirectResponseTooLarge) { std::string response_body(4097, 'A'); const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: direct domains: [example.com] @@ -5984,7 +5968,6 @@ void checkPathMatchCriterion(const Route* route, const std::string& expected_mat // Test loading broken config throws EnvoyException. TEST_F(RouteConfigurationV2, BrokenTypedMetadata) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6045,7 +6028,6 @@ name: foo TEST_F(RouteConfigurationV2, RouteTracingConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -6127,8 +6109,7 @@ name: foo // Test to check Prefix Rewrite for redirects TEST_F(RouteConfigurationV2, RedirectPrefixRewrite) { - std::string RedirectPrefixRewrite = R"EOF( -name: AllRedirects + std::string yaml = R"EOF( virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -6156,8 +6137,7 @@ name: AllRedirects redirect: { prefix_rewrite: "/" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(RedirectPrefixRewrite), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6260,8 +6240,7 @@ TEST_F(RouteConfigurationV2, PathRedirectQueryNotPreserved) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.preserve_query_string_in_path_redirects", "false"}}); - std::string RouteDynPathRedirect = R"EOF( -name: AllRedirects + std::string yaml = R"EOF( virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -6276,8 +6255,7 @@ name: AllRedirects redirect: { path_redirect: "/new/path-redirect?foo=2", strip_query: "true" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(RouteDynPathRedirect), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); { @@ -6320,8 +6298,7 @@ name: AllRedirects // Test to check Strip Query for redirect messages TEST_F(RouteConfigurationV2, RedirectStripQuery) { - std::string RouteDynPathRedirect = R"EOF( -name: AllRedirects + std::string yaml = R"EOF( virtual_hosts: - name: redirect domains: [redirect.lyft.com] @@ -6344,8 +6321,7 @@ name: AllRedirects redirect: { host_redirect: "new.lyft.com", prefix_rewrite: "/new/prefix" , https_redirect: "true", strip_query: "true" } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(RouteDynPathRedirect), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); EXPECT_EQ(nullptr, config.route(genRedirectHeaders("www.foo.com", "/foo", true, true), 0)); @@ -6417,7 +6393,6 @@ name: AllRedirects TEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: local_service domains: ["*"] @@ -6596,7 +6571,6 @@ name: foo // Validate configured and default settings are routed to the correct cluster. TEST_F(RouteMatcherTest, TlsContextMatching) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: local_service domains: ["*"] @@ -6755,8 +6729,7 @@ TEST_F(RouteConfigurationV2, RegexPrefixWithNoRewriteWorksWhenPathChanged) { // Setup regex route entry. the regex is trivial, that's ok as we only want to test that // path change works. - std::string RegexRewrite = R"EOF( -name: RegexNoMatch + std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [regex.lyft.com] @@ -6768,7 +6741,7 @@ name: RegexNoMatch route: { cluster: some-cluster } )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(RegexRewrite), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); { // Get our regex route entry @@ -6787,8 +6760,7 @@ name: RegexNoMatch } TEST_F(RouteConfigurationV2, NoIdleTimeout) { - const std::string NoIdleTimeout = R"EOF( -name: NoIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6801,7 +6773,7 @@ name: NoIdleTimeout cluster: some-cluster )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(NoIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6809,8 +6781,7 @@ name: NoIdleTimeout } TEST_F(RouteConfigurationV2, ZeroIdleTimeout) { - const std::string ZeroIdleTimeout = R"EOF( -name: ZeroIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6824,7 +6795,7 @@ name: ZeroIdleTimeout idle_timeout: 0s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(ZeroIdleTimeout), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6832,8 +6803,7 @@ name: ZeroIdleTimeout } TEST_F(RouteConfigurationV2, ExplicitIdleTimeout) { - const std::string ExplicitIdleTimeout = R"EOF( -name: ExplicitIdleTimeout + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6847,8 +6817,7 @@ name: ExplicitIdleTimeout idle_timeout: 7s )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry* route_entry = config.route(headers, 0)->routeEntry(); @@ -6856,8 +6825,7 @@ name: ExplicitIdleTimeout } TEST_F(RouteConfigurationV2, RetriableStatusCodes) { - const std::string ExplicitIdleTimeout = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6872,8 +6840,7 @@ name: RetriableStatusCodes retriable_status_codes: [100, 200] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -6882,8 +6849,7 @@ name: RetriableStatusCodes } TEST_F(RouteConfigurationV2, RetriableHeaders) { - const std::string RetriableHeaders = R"EOF( -name: RetriableHeaders + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6901,7 +6867,7 @@ name: RetriableHeaders - name: X-Upstream-Pushback )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(RetriableHeaders), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -6919,8 +6885,7 @@ name: RetriableHeaders } TEST_F(RouteConfigurationV2, UpgradeConfigs) { - const std::string UpgradeYaml = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6937,7 +6902,7 @@ name: RetriableStatusCodes enabled: false )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(UpgradeYaml), factory_context_, true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const RouteEntry::UpgradeMap& upgrade_map = config.route(headers, 0)->routeEntry()->upgradeMap(); @@ -6948,7 +6913,6 @@ name: RetriableStatusCodes TEST_F(RouteConfigurationV2, DuplicateUpgradeConfigs) { const std::string yaml = R"EOF( -name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6972,7 +6936,6 @@ name: RetriableStatusCodes TEST_F(RouteConfigurationV2, BadConnectConfig) { const std::string yaml = R"EOF( -name: RetriableStatusCodes virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -6997,8 +6960,7 @@ name: RetriableStatusCodes // Verifies that we're creating a new instance of the retry plugins on each call instead of always // returning the same one. TEST_F(RouteConfigurationV2, RetryPluginsAreNotReused) { - const std::string ExplicitIdleTimeout = R"EOF( -name: RetriableStatusCodes + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -7024,8 +6986,7 @@ name: RetriableStatusCodes Registry::InjectFactory inject_predicate_factory( host_predicate_factory); - TestConfigImpl config(parseRouteConfigurationFromYaml(ExplicitIdleTimeout), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& retry_policy = config.route(headers, 0)->routeEntry()->retryPolicy(); @@ -7037,9 +6998,8 @@ name: RetriableStatusCodes EXPECT_NE(predicates1, predicates2); } -TEST_F(RouteConfigurationV2, InternalRedirctIsDisabledWhenNotSpecifiedInRouteAction) { - const std::string InternalRedirectEnabled = R"EOF( -name: InternalRedirectEnabled +TEST_F(RouteConfigurationV2, InternalRedirectIsDisabledWhenNotSpecifiedInRouteAction) { + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -7052,8 +7012,7 @@ name: InternalRedirectEnabled cluster: some-cluster )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7061,9 +7020,8 @@ name: InternalRedirectEnabled EXPECT_FALSE(internal_redirect_policy.enabled()); } -TEST_F(RouteConfigurationV2, DefaultInternalRedirctPolicyIsSensible) { - const std::string InternalRedirectEnabled = R"EOF( -name: InternalRedirectEnabled +TEST_F(RouteConfigurationV2, DefaultInternalRedirectPolicyIsSensible) { + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -7077,8 +7035,7 @@ name: InternalRedirectEnabled internal_redirect_policy: {} )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7092,9 +7049,8 @@ name: InternalRedirectEnabled EXPECT_FALSE(internal_redirect_policy.isCrossSchemeRedirectAllowed()); } -TEST_F(RouteConfigurationV2, InternalRedirctPolicyDropsInvalidRedirectCode) { - const std::string InternalRedirectEnabled = R"EOF( -name: InternalRedirectEnabled +TEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCode) { + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -7109,8 +7065,7 @@ name: InternalRedirectEnabled redirect_response_codes: [301, 302, 303, 304] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7129,9 +7084,8 @@ name: InternalRedirectEnabled internal_redirect_policy.shouldRedirectForResponseCode(static_cast(307))); } -TEST_F(RouteConfigurationV2, InternalRedirctPolicyDropsInvalidRedirectCodeCauseEmptySet) { - const std::string InternalRedirectEnabled = R"EOF( -name: InternalRedirectEnabled +TEST_F(RouteConfigurationV2, InternalRedirectPolicyDropsInvalidRedirectCodeCauseEmptySet) { + const std::string yaml = R"EOF( virtual_hosts: - name: regex domains: [idle.lyft.com] @@ -7146,8 +7100,7 @@ name: InternalRedirectEnabled redirect_response_codes: [200, 304] )EOF"; - TestConfigImpl config(parseRouteConfigurationFromYaml(InternalRedirectEnabled), factory_context_, - true); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); Http::TestRequestHeaderMapImpl headers = genRedirectHeaders("idle.lyft.com", "/regex", true, false); const auto& internal_redirect_policy = @@ -7251,7 +7204,6 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(TypedConfigFilterError)) { { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7271,7 +7223,6 @@ name: foo { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7292,7 +7243,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(UnknownFilterStruct)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7309,7 +7259,6 @@ name: foo TEST_F(PerFilterConfigsTest, UnknownFilterAny) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7330,7 +7279,6 @@ name: foo // error. TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStruct)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7345,7 +7293,6 @@ name: foo TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAny) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7364,7 +7311,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(RouteLocalConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7380,7 +7326,6 @@ name: foo TEST_F(PerFilterConfigsTest, RouteLocalTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7404,7 +7349,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7424,7 +7368,6 @@ name: foo TEST_F(PerFilterConfigsTest, WeightedClusterTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7452,7 +7395,6 @@ name: foo TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterFallthroughConfig)) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7472,7 +7414,6 @@ name: foo TEST_F(PerFilterConfigsTest, WeightedClusterFallthroughTypedConfig) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7502,7 +7443,6 @@ class RouteMatchOverrideTest : public testing::Test, public ConfigImplTestBase { TEST_F(RouteMatchOverrideTest, VerifyAllMatchableRoutes) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7543,7 +7483,6 @@ name: foo TEST_F(RouteMatchOverrideTest, VerifyRouteOverrideStops) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7584,7 +7523,6 @@ name: foo TEST_F(RouteMatchOverrideTest, StopWhenNoMoreRoutes) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7627,7 +7565,6 @@ name: foo TEST_F(RouteMatchOverrideTest, NullRouteOnNoRouteMatch) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7656,7 +7593,6 @@ name: foo TEST_F(RouteMatchOverrideTest, NullRouteOnNoHostMatch) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["www.acme.com"] @@ -7685,7 +7621,6 @@ name: foo TEST_F(RouteMatchOverrideTest, NullRouteOnNullXForwardedProto) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7714,7 +7649,6 @@ name: foo TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsAll) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] @@ -7744,7 +7678,6 @@ name: foo TEST_F(RouteMatchOverrideTest, NullRouteOnRequireTlsInternal) { const std::string yaml = R"EOF( -name: foo virtual_hosts: - name: bar domains: ["*"] From c077fa14bffe56e31fdc9dafee0b48b6b87aa944 Mon Sep 17 00:00:00 2001 From: asraa Date: Sun, 19 Jul 2020 05:39:41 -0400 Subject: [PATCH 680/909] [fuzz] add fuzz coverage CI check (#11045) Commit Message: Add `bazel.fuzz_coverage` to CI. Publishes fuzz coverage report in `coverage_publish` Risk Level: Medium Testing: Local testing of `./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.fuzz_coverage'`, as well as `FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh` with single test args/directory, VALIDATE on/off. Fixes: https://github.com/envoyproxy/envoy/issues/9573 Docs: Updated bazel/README.md and fuzz/README.md for docs about running local coverage. Signed-off-by: Asra Ali --- .azure-pipelines/pipelines.yml | 17 ++- .bazelrc | 10 +- bazel/README.md | 8 +- bazel/coverage/fuzz_coverage_wrapper.sh | 7 +- ci/build_setup.sh | 3 + ci/do_ci.sh | 7 ++ test/fuzz/README.md | 10 +- test/run_envoy_bazel_coverage.sh | 17 ++- test/server/BUILD | 1 - test/server/server_corpus/api_boost_crash | 138 ++++++++++++++++++++++ 10 files changed, 198 insertions(+), 20 deletions(-) create mode 100644 test/server/server_corpus/api_boost_crash diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index b1c14670cfab..30ed536530ed 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -90,21 +90,28 @@ jobs: ciTarget: $(CI_TARGET) - job: coverage - displayName: "Linux-x64 coverage" - dependsOn: ["format"] + displayName: "Linux-x64" + dependsOn: ["release"] timeoutInMinutes: 360 pool: "x64-large" + strategy: + maxParallel: 2 + matrix: + coverage: + CI_TARGET: "coverage" + fuzz_coverage: + CI_TARGET: "fuzz_coverage" steps: - template: bazel.yml parameters: managedAgent: false - ciTarget: bazel.coverage + ciTarget: bazel.$(CI_TARGET) rbe: false # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces bazelBuildExtraOptions: "--test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/coverage coverage' - displayName: "Upload Report to GCS" + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' + displayName: "Upload $(CI_TARGET) Report to GCS" env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) diff --git a/.bazelrc b/.bazelrc index 5d29db5979fa..2a9d4f0943e5 100644 --- a/.bazelrc +++ b/.bazelrc @@ -242,18 +242,20 @@ build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds -build:asan-fuzzer --config=clang-asan +# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target +# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build +# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod +# behavior from RE2 library. +build:asan-fuzzer --config=asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link build:asan-fuzzer --copt=-fno-omit-frame-pointer -build:asan-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 # Compile database generation config # We don't care about built binaries so always strip and use fastbuild. diff --git a/bazel/README.md b/bazel/README.md index a7e3f0165f24..62d7b6e8f2c0 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -638,13 +638,19 @@ test/run_envoy_bazel_coverage.sh The summary results are printed to the standard output and the full coverage report is available in `generated/coverage/coverage.html`. +To generate coverage results for fuzz targets, use the `FUZZ_COVERAGE` environment variable, e.g.: +``` +FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh +``` +This generates a coverage report for fuzz targets after running the target for one minute against fuzzing engine libfuzzer using its coprus as initial seed inputs. The full coverage report will be available in `generated/fuzz_coverage/coverage.html`. + Coverage for every PR is available in Circle in the "artifacts" tab of the coverage job. You will need to navigate down and open "coverage.html" but then you can navigate per normal. NOTE: We have seen some issues with seeing the artifacts tab. If you can't see it, log out of Circle, and then log back in and it should start working. The latest coverage report for master is available -[here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). +[here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). The latest fuzz coverage report for master is available [here](https://storage.googleapis.com/envoy-postsubmit/master/fuzz_coverage/index.html). It's also possible to specialize the coverage build to a specified test or test dir. This is useful when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by diff --git a/bazel/coverage/fuzz_coverage_wrapper.sh b/bazel/coverage/fuzz_coverage_wrapper.sh index 14c94bd545ae..0510befd60bc 100755 --- a/bazel/coverage/fuzz_coverage_wrapper.sh +++ b/bazel/coverage/fuzz_coverage_wrapper.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -ex +set -x TEST_BINARY=$1 shift @@ -11,4 +11,7 @@ rm -rf fuzz_corpus mkdir -p fuzz_corpus/seed_corpus cp -r $@ fuzz_corpus/seed_corpus -${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} +# TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing. +LLVM_PROFILE_FILE= ${TEST_BINARY} fuzz_corpus -seed=${FUZZ_CORPUS_SEED:-1} -max_total_time=${FUZZ_CORPUS_TIME:-60} -max_len=2048 || true + +${TEST_BINARY} fuzz_corpus -runs=0 diff --git a/ci/build_setup.sh b/ci/build_setup.sh index ee60c484ca4e..97419a425618 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -105,6 +105,9 @@ mkdir -p "${ENVOY_DELIVERY_DIR}" # This is where we copy the coverage report to. export ENVOY_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/coverage.tar.gz +# This is where we copy the fuzz coverage report to. +export ENVOY_FUZZ_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/fuzz_coverage.tar.gz + # This is where we dump failed test logs for CI collection. export ENVOY_FAILED_TEST_LOGS="${ENVOY_BUILD_DIR}"/generated/failed-testlogs mkdir -p "${ENVOY_FAILED_TEST_LOGS}" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index a2f4959adf71..344026c5e88e 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -274,6 +274,13 @@ elif [[ "$CI_TARGET" == "bazel.coverage" ]]; then test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} collect_build_profile coverage exit 0 +elif [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then + setup_clang_toolchain + echo "bazel coverage build with fuzz tests ${COVERAGE_TEST_TARGETS}" + + FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} + collect_build_profile coverage + exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then setup_clang_toolchain NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh diff --git a/test/fuzz/README.md b/test/fuzz/README.md index aa3cf68057a3..0104affcfd3d 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -158,7 +158,15 @@ to provide fuzzers some interesting starting points for invalid inputs. ## Coverage reports Coverage reports, where individual lines are annotated with fuzzing hit counts, are a useful way to -understand the scope and efficacy of the Envoy fuzzers. You can generate such reports from the +understand the scope and efficacy of the Envoy fuzzers. You can generate fuzz coverage reports both locally, and using the OSS-Fuzz infrastructure. + +To generate fuzz coverage reports locally (see [Coverage builds](bazel/README.md), run +``` +FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh +``` +This generates a coverage report after running the fuzz targets for one minute against the fuzzing engine libfuzzer and using the checked-in corpus as an initial seed. + +Otherwise, you can generate reports from the ClusterFuzz corpus following the general ClusterFuzz [instructions for profiling setup](https://github.com/google/oss-fuzz/blob/master/docs/code_coverage.md). diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index 270ca9412ec7..7bf6cc92e36e 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -19,12 +19,12 @@ if [[ $# -gt 0 ]]; then elif [[ -n "${COVERAGE_TARGET}" ]]; then COVERAGE_TARGETS=${COVERAGE_TARGET} else - # For fuzz builds, this overrides to just fuzz targets. - COVERAGE_TARGETS=//test/... && [[ ${FUZZ_COVERAGE} == "true" ]] && - COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzz_target", //test/...)')" + COVERAGE_TARGETS=//test/... fi if [[ "${FUZZ_COVERAGE}" == "true" ]]; then + # Filter targets to just fuzz tests. + COVERAGE_TARGETS=$(bazel query "attr("tags", "fuzz_target", ${COVERAGE_TARGETS})") BAZEL_BUILD_OPTIONS+=" --config=fuzz-coverage --test_tag_filters=-nocoverage" else BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage,-fuzz_target" @@ -36,7 +36,7 @@ bazel coverage ${BAZEL_BUILD_OPTIONS} ${COVERAGE_TARGETS} [[ -z "${ENVOY_BUILD_PROFILE}" ]] || cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/coverage.profile.gz" || true [[ -z "${ENVOY_BUILD_DIR}" ]] || find bazel-testlogs/ -name test.log | tar zcf "${ENVOY_BUILD_DIR}/testlogs.tar.gz" -T - -COVERAGE_DIR="${SRCDIR}"/generated/coverage +COVERAGE_DIR="${SRCDIR}"/generated/coverage && [[ ${FUZZ_COVERAGE} == "true" ]] && COVERAGE_DIR="${SRCDIR}"/generated/fuzz_coverage rm -rf "${COVERAGE_DIR}" mkdir -p "${COVERAGE_DIR}" @@ -47,7 +47,12 @@ cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}" COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4) COVERAGE_VALUE=${COVERAGE_VALUE%?} -[[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./coverage/' . +if [ "${FUZZ_COVERAGE}" == "true" ] +then + [[ -z "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_FUZZ_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./fuzz_coverage/' . +else + [[ -z "${ENVOY_COVERAGE_ARTIFACT}" ]] || tar zcf "${ENVOY_COVERAGE_ARTIFACT}" -C ${COVERAGE_DIR} --transform 's/^\./coverage/' . +fi if [[ "$VALIDATE_COVERAGE" == "true" ]]; then if [[ "${FUZZ_COVERAGE}" == "true" ]]; then @@ -66,7 +71,7 @@ fi # We want to allow per_file_coverage to fail without exiting this script. set +e -if [[ "$VALIDATE_COVERAGE" == "true" ]]; then +if [[ "$VALIDATE_COVERAGE" == "true" ]] && [[ "{FUZZ_COVERAGE}" == "false" ]]; then echo "Checking per-extension coverage" output=$(./test/per_file_coverage.sh) diff --git a/test/server/BUILD b/test/server/BUILD index 51d24f7de755..007ae24ab5d0 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -322,7 +322,6 @@ envoy_cc_fuzz_test( "//test/integration:integration_lib", "//test/mocks/server:options_mocks", "//test/mocks/server:hot_restart_mocks", - "//test/mocks/stats:stats_mocks", "//test/test_common:environment_lib", ] + select({ "//bazel:windows_x86_64": envoy_all_extensions(WINDOWS_SKIP_TARGETS), diff --git a/test/server/server_corpus/api_boost_crash b/test/server/server_corpus/api_boost_crash new file mode 100644 index 000000000000..2dc13ace4237 --- /dev/null +++ b/test/server/server_corpus/api_boost_crash @@ -0,0 +1,138 @@ +node { + client_features: "&" +} +static_resources { + listeners { + name: " " + address { + pipe { + path: "aa\000" + } + } + transparent { + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 42 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 108 + retriable_status_codes: 117 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 114 + retriable_status_codes: 123 + retriable_status_codes: 115 + } + } + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 105 + retriable_status_codes: 99 + retriable_status_codes: 95 + retriable_status_codes: 114 + retriable_status_codes: 101 + retriable_status_codes: 115 + retriable_status_codes: 111 + retriable_status_codes: 117 + retriable_status_codes: 114 + retriable_status_codes: 99 + retriable_status_codes: 65 + retriable_status_codes: 101 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + } + } + } + } +} +stats_sinks { + typed_config { + [type.googleapis.com/envoy.api.v2.route.Route] { + route { + retry_policy { + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 42 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 97 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 113 + retriable_status_codes: 32 + retriable_status_codes: 123 + retriable_status_codes: 40 + retriable_status_codes: 36 + retriable_status_codes: 32 + retriable_status_codes: 32 + retriable_status_codes: 99 + retriable_status_codes: 108 + retriable_status_codes: 117 + retriable_status_codes: 115 + retriable_status_codes: 116 + retriable_status_codes: 101 + retriable_status_codes: 114 + retriable_status_codes: 123 + retriable_status_codes: 115 + } + } + } + } +} \ No newline at end of file From b7f3561bec43085997ad29703f3e63b42b288715 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 20 Jul 2020 03:55:20 +0530 Subject: [PATCH 681/909] eds: change eds cluster factory comments (#12175) Signed-off-by: Rama Chavali --- source/common/upstream/eds.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 5a75a990d0dc..3433d608898c 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -293,7 +293,7 @@ EdsClusterFactory::createClusterImpl( } /** - * Static registration for the strict dns cluster factory. @see RegisterFactory. + * Static registration for the Eds cluster factory. @see RegisterFactory. */ REGISTER_FACTORY(EdsClusterFactory, ClusterFactory); From cb525708f06b738ae79349a41e331b120a27b5e5 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 20 Jul 2020 15:01:51 +0530 Subject: [PATCH 682/909] imprve ssl socket test coverage (#12177) Signed-off-by: Rama Chavali --- test/extensions/transport_sockets/tls/ssl_socket_test.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 29705379547a..95395c0640de 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -345,8 +345,13 @@ void testUtil(const TestUtilOptions& options) { EXPECT_EQ(options.expectedDigest(), server_connection->ssl()->sha256PeerCertificateDigest()); } + // Assert twice to ensure a cached value is returned and still valid. EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); + EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); + if (!options.expectedLocalUri().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate()); EXPECT_EQ(options.expectedLocalUri(), server_connection->ssl()->uriSanLocalCertificate()); } EXPECT_EQ(options.expectedSerialNumber(), @@ -653,6 +658,8 @@ const std::string testUtilV2(const TestUtilOptionsV2& options) { dynamic_cast(client_connection->ssl().get()); SSL* client_ssl_socket = ssl_socket->ssl(); if (!options.expectedProtocolVersion().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion()); EXPECT_EQ(options.expectedProtocolVersion(), client_connection->ssl()->tlsVersion()); } if (!options.expectedCiphersuite().empty()) { From b4b8210099759700da34f08fde4ba68f61c1ccdd Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Mon, 20 Jul 2020 04:50:09 -0700 Subject: [PATCH 683/909] tls: update BoringSSL-FIPS to 20190808. (#12114) Signed-off-by: Piotr Sikora --- api/envoy/api/v2/auth/common.proto | 4 +-- .../transport_sockets/tls/v3/common.proto | 4 +-- .../tls/v4alpha/common.proto | 4 +-- bazel/external/boringssl_fips.genrule_cmd | 31 ++++++++++--------- bazel/external/boringssl_fips.patch | 18 +++++++++++ bazel/repositories.bzl | 1 + bazel/repository_locations.bzl | 6 ++-- .../root/intro/arch_overview/security/ssl.rst | 6 ++-- .../envoy/api/v2/auth/common.proto | 4 +-- .../transport_sockets/tls/v3/common.proto | 4 +-- .../tls/v4alpha/common.proto | 4 +-- .../tls/context_config_impl.cc | 7 +---- .../transport_sockets/tls/ssl_socket_test.cc | 10 +----- 13 files changed, 55 insertions(+), 48 deletions(-) create mode 100644 bazel/external/boringssl_fips.patch diff --git a/api/envoy/api/v2/auth/common.proto b/api/envoy/api/v2/auth/common.proto index ab4b9c13493d..c8122f401029 100644 --- a/api/envoy/api/v2/auth/common.proto +++ b/api/envoy/api/v2/auth/common.proto @@ -45,8 +45,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto index b468f5b7e412..115ecad72f99 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -45,8 +45,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index f81442f4dbcd..0b63ade128d3 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -46,8 +46,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/bazel/external/boringssl_fips.genrule_cmd b/bazel/external/boringssl_fips.genrule_cmd index cff25f0f084e..25455c91e564 100644 --- a/bazel/external/boringssl_fips.genrule_cmd +++ b/bazel/external/boringssl_fips.genrule_cmd @@ -2,8 +2,8 @@ set -e -# BoringSSL build as described in the Security Policy for BoringCrypto module (2018-10-25): -# https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3318.pdf +# BoringSSL build as described in the Security Policy for BoringCrypto module (2020-07-02): +# https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf # This works only on Linux-x86_64. if [[ `uname` != "Linux" || `uname -m` != "x86_64" ]]; then @@ -16,16 +16,16 @@ ROOT=$$(dirname $(rootpath boringssl/BUILDING.md))/.. pushd $$ROOT # Build tools requirements: -# - Clang compiler version 6.0.1 (https://releases.llvm.org/download.html) -# - Go programming language version 1.10.3 (https://golang.org/dl/) -# - Ninja build system version 1.8.2 (https://github.com/ninja-build/ninja/releases) +# - Clang compiler version 7.0.1 (https://releases.llvm.org/download.html) +# - Go programming language version 1.12.7 (https://golang.org/dl/) +# - Ninja build system version 1.9.0 (https://github.com/ninja-build/ninja/releases) # Override $$PATH for build tools, to avoid picking up anything else. export PATH="$$(dirname `which cmake`):/usr/bin:/bin" -# Clang 6.0.1 -VERSION=6.0.1 -SHA256=7ea204ecd78c39154d72dfc0d4a79f7cce1b2264da2551bb2eef10e266d54d91 +# Clang 7.0.1 +VERSION=7.0.1 +SHA256=02ad925add5b2b934d64c3dd5cbd1b2002258059f7d962993ba7f16524c3089c PLATFORM="x86_64-linux-gnu-ubuntu-16.04" curl -sLO https://releases.llvm.org/"$$VERSION"/clang+llvm-"$$VERSION"-"$$PLATFORM".tar.xz \ @@ -41,26 +41,27 @@ if [[ `clang --version | head -1 | awk '{print $$3}'` != "$$VERSION" ]]; then exit 1 fi -# Go 1.10.3 -VERSION=1.10.3 -SHA256=fa1b0e45d3b647c252f51f5e1204aba049cde4af177ef9f2181f43004f901035 +# Go 1.12.7 +VERSION=1.12.7 +SHA256=66d83bfb5a9ede000e33c6579a91a29e6b101829ad41fffb5c5bb6c900e109d9 PLATFORM="linux-amd64" curl -sLO https://dl.google.com/go/go"$$VERSION"."$$PLATFORM".tar.gz \ && echo "$$SHA256" go"$$VERSION"."$$PLATFORM".tar.gz | sha256sum --check tar xf go"$$VERSION"."$$PLATFORM".tar.gz +export GOPATH="$$PWD/gopath" export GOROOT="$$PWD/go" -export PATH="$$GOROOT/bin:$$PATH" +export PATH="$$GOPATH/bin:$$GOROOT/bin:$$PATH" if [[ `go version | awk '{print $$3}'` != "go$$VERSION" ]]; then echo "ERROR: Go version doesn't match." exit 1 fi -# Ninja 1.8.2 -VERSION=1.8.2 -SHA256=d2fea9ff33b3ef353161ed906f260d565ca55b8ca0568fa07b1d2cab90a84a07 +# Ninja 1.9.0 +VERSION=1.9.0 +SHA256=1b1235f2b0b4df55ac6d80bbe681ea3639c9d2c505c7ff2159a3daf63d196305 PLATFORM="linux" curl -sLO https://github.com/ninja-build/ninja/releases/download/v"$$VERSION"/ninja-"$$PLATFORM".zip \ diff --git a/bazel/external/boringssl_fips.patch b/bazel/external/boringssl_fips.patch new file mode 100644 index 000000000000..37247dc2f5c5 --- /dev/null +++ b/bazel/external/boringssl_fips.patch @@ -0,0 +1,18 @@ +# Fix FIPS build (from BoringSSL commit 4ca15d5dcbe6e8051a4654df7c971ea8307abfe0). +# +# The modulewrapper is not a part of the FIPS module, so it can be patched without +# concern about breaking the FIPS validation. +--- boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc ++++ boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc +@@ -12,9 +12,11 @@ + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + ++#include + #include + + #include ++#include + #include + #include + #include diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index a83b8ad416bd..cd63791ed747 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -236,6 +236,7 @@ def _boringssl_fips(): sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:boringssl_fips.genrule_cmd", build_file = "@envoy//bazel/external:boringssl_fips.BUILD", + patches = ["@envoy//bazel/external:boringssl_fips.patch"], ) def _com_github_circonus_labs_libcircllhist(): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 5c098ce04975..2e748134acff 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -88,9 +88,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), boringssl_fips = dict( - sha256 = "b12ad676ee533824f698741bd127f6fbc82c46344398a6d78d25e62c6c418c73", - # fips-20180730 - urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-docs/fips/boringssl-66005f41fbc3529ffe8d007708756720529da20d.tar.xz"], + sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8", + # fips-20190808 + urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index 7790ac42ed6d..4a5d4f0ea246 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -42,7 +42,7 @@ FIPS 140-2 BoringSSL can be built in a `FIPS-compliant mode `_, following the build instructions from the `Security Policy for BoringCrypto module -`_, +`_, using ``--define boringssl=fips`` Bazel option. Currently, this option is only available on Linux-x86_64. The correctness of the FIPS build can be verified by checking the presence of ``BoringSSL-FIPS`` @@ -53,11 +53,11 @@ it's not sufficient by itself, and depending on the context, additional steps mi The extra requirements may include using only approved algorithms and/or using only private keys generated by a module operating in FIPS-approved mode. For more information, please refer to the `Security Policy for BoringCrypto module -`_ +`_ and/or an `accredited CMVP laboratory `_. Please note that the FIPS-compliant build is based on an older version of BoringSSL than -the non-FIPS build, and it predates the final version of TLS 1.3. +the non-FIPS build, and it doesn't support the most recent QUIC APIs. .. _arch_overview_ssl_enabling_verification: diff --git a/generated_api_shadow/envoy/api/v2/auth/common.proto b/generated_api_shadow/envoy/api/v2/auth/common.proto index ab4b9c13493d..c8122f401029 100644 --- a/generated_api_shadow/envoy/api/v2/auth/common.proto +++ b/generated_api_shadow/envoy/api/v2/auth/common.proto @@ -45,8 +45,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto index a54ba1faeb97..417cf0054df4 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -45,8 +45,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto index f81442f4dbcd..0b63ade128d3 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto @@ -46,8 +46,8 @@ message TlsParameters { // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - // ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 793adc7597de..6f20081eed80 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -353,12 +353,7 @@ ClientContextConfigImpl::ClientContextConfigImpl( } const unsigned ServerContextConfigImpl::DEFAULT_MIN_VERSION = TLS1_VERSION; -const unsigned ServerContextConfigImpl::DEFAULT_MAX_VERSION = -#ifndef BORINGSSL_FIPS - TLS1_3_VERSION; -#else // BoringSSL FIPS - TLS1_2_VERSION; -#endif +const unsigned ServerContextConfigImpl::DEFAULT_MAX_VERSION = TLS1_3_VERSION; const std::string ServerContextConfigImpl::DEFAULT_CIPHER_SUITES = #ifndef BORINGSSL_FIPS diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 95395c0640de..f4f76258fb31 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -3649,7 +3649,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); - // Connection using TLSv1.3 (client) and defaults (server) succeeds (non-FIPS) or fails (FIPS). + // Connection using TLSv1.3 (client) and defaults (server) succeeds. client_params->set_tls_minimum_protocol_version( envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3); client_params->set_tls_maximum_protocol_version( @@ -3659,11 +3659,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { TestUtilOptionsV2 error_test_options(listener, client, false, GetParam()); error_test_options.setExpectedServerStats("ssl.connection_error") .setExpectedTransportFailureReasonContains("TLSV1_ALERT_PROTOCOL_VERSION"); -#ifndef BORINGSSL_FIPS testUtilV2(tls_v1_3_test_options); -#else // BoringSSL FIPS - testUtilV2(error_test_options); -#endif client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); @@ -3672,11 +3668,7 @@ TEST_P(SslSocketTest, ProtocolVersions) { envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_0); client_params->set_tls_maximum_protocol_version( envoy::extensions::transport_sockets::tls::v3::TlsParameters::TLSv1_3); -#ifndef BORINGSSL_FIPS testUtilV2(tls_v1_3_test_options); -#else // BoringSSL FIPS - testUtilV2(tls_v1_2_test_options); -#endif client_params->clear_tls_minimum_protocol_version(); client_params->clear_tls_maximum_protocol_version(); From 4b3801c4dfc483a0c112d95cd7690b7817b2cbad Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Mon, 20 Jul 2020 08:20:41 -0400 Subject: [PATCH 684/909] test: move tests to fuzz subdirectory (noop) (#12151) Moved generic listener filter fuzzer into common/fuzz for consistency with other (network, http) filter fuzzers. Updated dependencies in related files Signed-off-by: Arthur Yan --- test/extensions/filters/listener/common/{ => fuzz}/BUILD | 0 .../listener/common/{ => fuzz}/listener_filter_fuzzer.cc | 2 +- .../listener/common/{ => fuzz}/listener_filter_fuzzer.h | 2 +- .../listener/common/{ => fuzz}/listener_filter_fuzzer.proto | 0 test/extensions/filters/listener/original_dst/BUILD | 2 +- .../filters/listener/original_dst/original_dst_fuzz_test.cc | 4 ++-- test/extensions/filters/listener/original_src/BUILD | 4 ++-- .../filters/listener/original_src/original_src_fuzz_test.cc | 2 +- .../listener/original_src/original_src_fuzz_test.proto | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) rename test/extensions/filters/listener/common/{ => fuzz}/BUILD (100%) rename test/extensions/filters/listener/common/{ => fuzz}/listener_filter_fuzzer.cc (90%) rename test/extensions/filters/listener/common/{ => fuzz}/listener_filter_fuzzer.h (90%) rename test/extensions/filters/listener/common/{ => fuzz}/listener_filter_fuzzer.proto (100%) diff --git a/test/extensions/filters/listener/common/BUILD b/test/extensions/filters/listener/common/fuzz/BUILD similarity index 100% rename from test/extensions/filters/listener/common/BUILD rename to test/extensions/filters/listener/common/fuzz/BUILD diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc similarity index 90% rename from test/extensions/filters/listener/common/listener_filter_fuzzer.cc rename to test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc index 6ae3d9f689d8..f38aba8918f2 100644 --- a/test/extensions/filters/listener/common/listener_filter_fuzzer.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc @@ -1,4 +1,4 @@ -#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" #include "common/network/utility.h" diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h similarity index 90% rename from test/extensions/filters/listener/common/listener_filter_fuzzer.h rename to test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h index d32eda68dbd7..fe81a9e12cc4 100644 --- a/test/extensions/filters/listener/common/listener_filter_fuzzer.h +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h @@ -1,6 +1,6 @@ #include "envoy/network/filter.h" -#include "test/extensions/filters/listener/common/listener_filter_fuzzer.pb.validate.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" #include "test/mocks/network/fakes.h" #include "test/mocks/network/mocks.h" diff --git a/test/extensions/filters/listener/common/listener_filter_fuzzer.proto b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto similarity index 100% rename from test/extensions/filters/listener/common/listener_filter_fuzzer.proto rename to test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto diff --git a/test/extensions/filters/listener/original_dst/BUILD b/test/extensions/filters/listener/original_dst/BUILD index 50c3b5931b3a..ba71d8f6bbb9 100644 --- a/test/extensions/filters/listener/original_dst/BUILD +++ b/test/extensions/filters/listener/original_dst/BUILD @@ -28,6 +28,6 @@ envoy_cc_fuzz_test( corpus = "original_dst_corpus", deps = [ "//source/extensions/filters/listener/original_dst:original_dst_lib", - "//test/extensions/filters/listener/common:listener_filter_fuzzer_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", ], ) diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc index db982456e34f..5476b6326e3a 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -1,7 +1,7 @@ #include "extensions/filters/listener/original_dst/original_dst.h" -#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" -#include "test/extensions/filters/listener/common/listener_filter_fuzzer.pb.validate.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" #include "test/fuzz/fuzz_runner.h" namespace Envoy { diff --git a/test/extensions/filters/listener/original_src/BUILD b/test/extensions/filters/listener/original_src/BUILD index e4ab73504612..217e8b2586de 100644 --- a/test/extensions/filters/listener/original_src/BUILD +++ b/test/extensions/filters/listener/original_src/BUILD @@ -56,7 +56,7 @@ envoy_proto_library( name = "original_src_fuzz_test_proto", srcs = ["original_src_fuzz_test.proto"], deps = [ - "//test/extensions/filters/listener/common:listener_filter_fuzzer_proto", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_proto", "@envoy_api//envoy/extensions/filters/listener/original_src/v3:pkg", ], ) @@ -68,6 +68,6 @@ envoy_cc_fuzz_test( deps = [ ":original_src_fuzz_test_proto_cc_proto", "//source/extensions/filters/listener/original_src:original_src_lib", - "//test/extensions/filters/listener/common:listener_filter_fuzzer_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", ], ) diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc index 3423b71560fc..c677a55b8d55 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -1,6 +1,6 @@ #include "extensions/filters/listener/original_src/original_src.h" -#include "test/extensions/filters/listener/common/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" #include "test/extensions/filters/listener/original_src/original_src_fuzz_test.pb.validate.h" #include "test/fuzz/fuzz_runner.h" diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto index 6934664f7fe0..303b3c86daaa 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.listener.original_src; import "envoy/extensions/filters/listener/original_src/v3/original_src.proto"; -import "test/extensions/filters/listener/common/listener_filter_fuzzer.proto"; +import "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto"; import "validate/validate.proto"; message OriginalSrcTestCase { From b250fed7b9f37b95014ce29108a1dc41170d2bf0 Mon Sep 17 00:00:00 2001 From: Michael Rebello Date: Mon, 20 Jul 2020 05:31:53 -0700 Subject: [PATCH 685/909] build: update bazel to 3.4.1 (#12123) Note that Bazel's toolchain for 3.4.1 is tagged 3.4.0: https://github.com/bazelbuild/bazel-toolchains/releases/tag/3.4.0. Depends on envoyproxy/envoy-build-tools#85. Signed-off-by: Michael Rebello me@michaelrebello.com Risk Level: Medium Testing: CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Michael Rebello --- .bazelversion | 2 +- bazel/repository_locations.bzl | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.bazelversion b/.bazelversion index bea438e9ade7..47b322c971c3 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -3.3.1 +3.4.1 diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 2e748134acff..5df269d463ae 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -53,11 +53,11 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), bazel_toolchains = dict( - sha256 = "2431088b38fd8e2878db17e3c5babb431de9e5c52b6d8b509d3070fa279a5be2", - strip_prefix = "bazel-toolchains-3.3.1", + sha256 = "882fecfc88d3dc528f5c5681d95d730e213e39099abff2e637688a91a9619395", + strip_prefix = "bazel-toolchains-3.4.0", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.3.1/bazel-toolchains-3.3.1.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.3.1.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/3.4.0/bazel-toolchains-3.4.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/3.4.0.tar.gz", ], use_category = ["build"], ), @@ -67,10 +67,10 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["build"], ), envoy_build_tools = dict( - sha256 = "dd5cc89bb69544659b20b88b28e642da0174739b68c82f029617b9749d61ab1d", - strip_prefix = "envoy-build-tools-289a5ca65aefd5a76f18f103d1425cfec5591417", - # 2020-07-15 - urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/289a5ca65aefd5a76f18f103d1425cfec5591417.tar.gz"], + sha256 = "88e58fdb42021e64a0b35ae3554a82e92f5c37f630a4dab08a132fc77f8db4b7", + strip_prefix = "envoy-build-tools-1d6573e60207efaae6436b25ecc594360294f63a", + # 2020-07-18 + urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/1d6573e60207efaae6436b25ecc594360294f63a.tar.gz"], use_category = ["build"], ), boringssl = dict( From e6c57fab8022d9df228052f1f517a57584030654 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Mon, 20 Jul 2020 13:38:22 +0100 Subject: [PATCH 686/909] [fuzz] fix filter crashes from OSS fuzz (#12152) * fix filter fuzzer crash from OSS fuzz by checking for non-implemented proto fields This will raise an exception that will be caught and abort the test run when an unimplemented oneof field is hit by the fuzzer Signed-off-by: Sam Flattery --- api/envoy/config/tap/v3/common.proto | 1 + api/envoy/config/tap/v4alpha/common.proto | 1 + .../envoy/config/tap/v3/common.proto | 1 + .../envoy/config/tap/v4alpha/common.proto | 1 + ...case-minimized-filter_fuzz_test-5635252589690880 | 7 +++++++ .../filters/http/common/fuzz/uber_per_filter.cc | 13 +++++++++++++ tools/spelling/spelling_dictionary.txt | 1 + 7 files changed, 25 insertions(+) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index e51aba968d42..812c30399e75 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -231,6 +231,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto index 53cb57e5d459..281150715c1b 100644 --- a/api/envoy/config/tap/v4alpha/common.proto +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -233,6 +233,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index e51aba968d42..812c30399e75 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -231,6 +231,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto index 53cb57e5d459..281150715c1b 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -233,6 +233,7 @@ message OutputSink { // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 new file mode 100644 index 000000000000..6d7a709ef7ac --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\nZ\022X\n\010\032\006\032\004\032\002 \001\022L\nH\"F\n)envoy.service.health.v3.HealthCheckReques\022\031\022\027\n\010BB\017\000\000\000\000\000\"\001R*\010P\000\000\000\000\000\000\000 \001" + } +} diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index c6db8b6ffebe..1abdf751940b 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -103,6 +103,19 @@ void cleanTapConfig(Protobuf::Message* message) { config.mutable_common_config()->mutable_static_config()->mutable_match_config()->set_any_match( true); } + // TODO(samflattery): remove once StreamingGrpcSink is implemented + else if (config.common_config().config_type_case() == + envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase:: + kStaticConfig && + config.common_config() + .static_config() + .output_config() + .sinks(0) + .output_sink_type_case() == + envoy::config::tap::v3::OutputSink::OutputSinkTypeCase::kStreamingGrpc) { + // will be caught in UberFilterFuzzer::fuzz + throw EnvoyException("received input with not implemented output_sink_type StreamingGrpcSink"); + } } void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 6170e93bec86..e8aa2fcd6b50 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -1111,6 +1111,7 @@ tuples typedef typeid typesafe +uber ucontext udpa uint From 6f55a3e82cbcb72f1377f1cd40cd33fc265e34b4 Mon Sep 17 00:00:00 2001 From: tomocy <36136133+tomocy@users.noreply.github.com> Date: Tue, 21 Jul 2020 02:53:04 +0900 Subject: [PATCH 687/909] format: use type alias (#12125) Commit Message: format: use type alias Additional Description: N/A Risk Level: N/A Testing: N/A Docs Changes: N/A Release Notes: N/A Part of #11634 Signed-off-by: tomocy --- include/envoy/config/grpc_mux.h | 5 ++- source/common/config/grpc_mux_impl.h | 4 +++ source/common/config/grpc_stream.h | 5 ++- source/common/config/grpc_subscription_impl.h | 5 +++ source/common/config/new_grpc_mux_impl.h | 9 ++++-- source/common/grpc/async_client_impl.cc | 2 +- source/common/grpc/async_client_impl.h | 6 +++- .../common/grpc/google_async_client_impl.cc | 4 +-- source/common/grpc/google_async_client_impl.h | 7 ++++- source/common/grpc/typed_async_client.h | 11 ++++--- .../access_loggers/grpc/config_utils.cc | 2 +- .../grpc/grpc_access_log_impl.h | 5 +++ .../grpc/http_grpc_access_log_impl.h | 3 ++ .../extensions/common/aws/region_provider.h | 3 ++ .../common/ext_authz/ext_authz_grpc_impl.h | 3 ++ .../http/grpc_http1_reverse_bridge/filter.h | 3 ++ .../json_transcoder_filter.cc | 31 +++++++++++-------- .../grpc_credentials/aws_iam/config.cc | 2 +- .../grpc_metrics_service_impl.h | 4 +++ .../config/delta_subscription_impl_test.cc | 4 +-- .../config/delta_subscription_test_harness.h | 4 +-- test/common/config/grpc_mux_impl_test.cc | 2 +- .../config/grpc_subscription_test_harness.h | 4 +-- test/common/config/new_grpc_mux_impl_test.cc | 2 +- .../grpc/google_async_client_impl_test.cc | 2 +- .../grpc_client_integration_test_harness.h | 12 +++++-- test/common/upstream/eds_speed_test.cc | 4 +-- .../grpc/grpc_access_log_impl_test.cc | 4 +-- .../grpc/http_grpc_access_log_impl_test.cc | 2 +- .../ext_authz/ext_authz_grpc_impl_test.cc | 2 +- .../reverse_bridge_test.cc | 2 +- .../json_transcoder_filter_test.cc | 14 +++++---- .../filters/http/grpc_stats/config_test.cc | 2 +- .../grpc_metrics_service_impl_test.cc | 2 +- test/mocks/grpc/mocks.h | 7 +++-- 35 files changed, 126 insertions(+), 57 deletions(-) diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 6e19534619bb..2d16c66270dd 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/common/exception.h" #include "envoy/common/pure.h" #include "envoy/config/subscription.h" @@ -119,6 +121,7 @@ class GrpcMux { using GrpcMuxPtr = std::unique_ptr; using GrpcMuxSharedPtr = std::shared_ptr; +template using ResponseProtoPtr = std::unique_ptr; /** * A grouping of callbacks that a GrpcMux should provide to its GrpcStream. */ @@ -141,7 +144,7 @@ template class GrpcStreamCallbacks { /** * For the GrpcStream to pass received protos to the context. */ - virtual void onDiscoveryResponse(std::unique_ptr&& message, + virtual void onDiscoveryResponse(ResponseProtoPtr&& message, ControlPlaneStats& control_plane_stats) PURE; /** diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index a623bed2a08a..f2dfc9529714 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -141,6 +142,9 @@ class GrpcMuxImpl : public GrpcMux, const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcMuxImplPtr = std::unique_ptr; +using GrpcMuxImplSharedPtr = std::shared_ptr; + class NullGrpcMuxImpl : public GrpcMux, GrpcStreamCallbacks { public: diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index 2b4187aac313..5ac368deea0d 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include "envoy/common/random_generator.h" #include "envoy/config/grpc_mux.h" @@ -14,6 +15,8 @@ namespace Envoy { namespace Config { +template using ResponseProtoPtr = std::unique_ptr; + // Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta // xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of // requests. @@ -75,7 +78,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, UNREFERENCED_PARAMETER(metadata); } - void onReceiveMessage(std::unique_ptr&& message) override { + void onReceiveMessage(ResponseProtoPtr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to diff --git a/source/common/config/grpc_subscription_impl.h b/source/common/config/grpc_subscription_impl.h index b53da3c6e254..a5102055a08c 100644 --- a/source/common/config/grpc_subscription_impl.h +++ b/source/common/config/grpc_subscription_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/grpc_mux.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" @@ -54,5 +56,8 @@ class GrpcSubscriptionImpl : public Subscription, const bool is_aggregated_; }; +using GrpcSubscriptionImplPtr = std::unique_ptr; +using GrpcSubscriptionImplSharedPtr = std::shared_ptr; + } // namespace Config } // namespace Envoy diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 5eb226992f78..6c3198f94cc1 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/api/v2/discovery.pb.h" #include "envoy/common/random_generator.h" #include "envoy/common/token_bucket.h" @@ -70,8 +72,10 @@ class NewGrpcMuxImpl SubscriptionStuff& operator=(const SubscriptionStuff&) = delete; }; + using SubscriptionStuffPtr = std::unique_ptr; + // for use in tests only - const absl::flat_hash_map>& subscriptions() { + const absl::flat_hash_map& subscriptions() { return subscriptions_; } @@ -130,7 +134,7 @@ class NewGrpcMuxImpl PausableAckQueue pausable_ack_queue_; // Map key is type_url. - absl::flat_hash_map> subscriptions_; + absl::flat_hash_map subscriptions_; // Determines the order of initial discovery requests. (Assumes that subscriptions are added in // the order of Envoy's dependency ordering). @@ -145,6 +149,7 @@ class NewGrpcMuxImpl const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using NewGrpcMuxImplPtr = std::unique_ptr; using NewGrpcMuxImplSharedPtr = std::shared_ptr; } // namespace Config diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index ecb5709288f7..c35a5fb60033 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -31,7 +31,7 @@ AsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name, const Http::AsyncClient::RequestOptions& options) { auto* const async_request = new AsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); - std::unique_ptr grpc_stream{async_request}; + AsyncStreamImplPtr grpc_stream{async_request}; grpc_stream->initialize(true); if (grpc_stream->hasResetStream()) { diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index 750183afd4f5..9b49826eb692 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/grpc/async_client.h" @@ -13,7 +15,9 @@ namespace Envoy { namespace Grpc { class AsyncRequestImpl; + class AsyncStreamImpl; +using AsyncStreamImplPtr = std::unique_ptr; class AsyncClientImpl final : public RawAsyncClient { public: @@ -34,7 +38,7 @@ class AsyncClientImpl final : public RawAsyncClient { Upstream::ClusterManager& cm_; const std::string remote_cluster_name_; const Protobuf::RepeatedPtrField initial_metadata_; - std::list> active_streams_; + std::list active_streams_; TimeSource& time_source_; friend class AsyncRequestImpl; diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 37f5e858a495..fc31fbbc7a14 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -112,7 +112,7 @@ AsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name const Http::AsyncClient::RequestOptions& options) { auto* const async_request = new GoogleAsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); - std::unique_ptr grpc_stream{async_request}; + GoogleAsyncStreamImplPtr grpc_stream{async_request}; grpc_stream->initialize(true); if (grpc_stream->callFailed()) { @@ -378,7 +378,7 @@ void GoogleAsyncStreamImpl::deferredDelete() { // Hence, it is safe here to create a unique_ptr to this and transfer // ownership to dispatcher_.deferredDelete(). After this call, no further // methods may be invoked on this object. - dispatcher_.deferredDelete(std::unique_ptr(this)); + dispatcher_.deferredDelete(GoogleAsyncStreamImplPtr(this)); } void GoogleAsyncStreamImpl::cleanup() { diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 75229bd5905f..6a576df3497a 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -29,6 +29,9 @@ namespace Envoy { namespace Grpc { class GoogleAsyncStreamImpl; + +using GoogleAsyncStreamImplPtr = std::unique_ptr; + class GoogleAsyncRequestImpl; struct GoogleAsyncTag { @@ -109,6 +112,8 @@ class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, std::unordered_set streams_; }; +using GoogleAsyncClientThreadLocalPtr = std::unique_ptr; + // Google gRPC client stats. TODO(htuch): consider how a wider set of stats collected by the // library, such as the census related ones, can be externalized as needed. struct GoogleAsyncClientStats { @@ -189,7 +194,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable> active_streams_; + std::list active_streams_; const std::string stat_prefix_; const Protobuf::RepeatedPtrField initial_metadata_; Stats::ScopeSharedPtr scope_; diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 1de73ff6e8d9..241926ee4ed7 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include "envoy/grpc/async_client.h" @@ -62,17 +63,19 @@ template class AsyncStream /* : public RawAsyncStream */ { RawAsyncStream* stream_{}; }; +template using ResponsePtr = std::unique_ptr; + /** * Convenience subclasses for AsyncRequestCallbacks. */ template class AsyncRequestCallbacks : public RawAsyncRequestCallbacks { public: ~AsyncRequestCallbacks() override = default; - virtual void onSuccess(std::unique_ptr&& response, Tracing::Span& span) PURE; + virtual void onSuccess(ResponsePtr&& response, Tracing::Span& span) PURE; private: void onSuccessRaw(Buffer::InstancePtr&& response, Tracing::Span& span) override { - auto message = std::unique_ptr(dynamic_cast( + auto message = ResponsePtr(dynamic_cast( Internal::parseMessageUntyped(std::make_unique(), std::move(response)) .release())); if (!message) { @@ -138,11 +141,11 @@ class VersionedMethods { template class AsyncStreamCallbacks : public RawAsyncStreamCallbacks { public: ~AsyncStreamCallbacks() override = default; - virtual void onReceiveMessage(std::unique_ptr&& message) PURE; + virtual void onReceiveMessage(ResponsePtr&& message) PURE; private: bool onReceiveMessageRaw(Buffer::InstancePtr&& response) override { - auto message = std::unique_ptr(dynamic_cast( + auto message = ResponsePtr(dynamic_cast( Internal::parseMessageUntyped(std::make_unique(), std::move(response)) .release())); if (!message) { diff --git a/source/extensions/access_loggers/grpc/config_utils.cc b/source/extensions/access_loggers/grpc/config_utils.cc index e950aea731fb..aa59f2f28018 100644 --- a/source/extensions/access_loggers/grpc/config_utils.cc +++ b/source/extensions/access_loggers/grpc/config_utils.cc @@ -10,7 +10,7 @@ namespace GrpcCommon { // Singleton registration via macro defined in envoy/singleton/manager.h SINGLETON_MANAGER_REGISTRATION(grpc_access_logger_cache); -std::shared_ptr +GrpcCommon::GrpcAccessLoggerCacheSharedPtr getGrpcAccessLoggerCacheSingleton(Server::Configuration::FactoryContext& context) { return context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(grpc_access_logger_cache), [&context] { diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index b79a7ad1b8d5..5cf04837c49b 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -129,6 +130,8 @@ class GrpcAccessLoggerImpl : public GrpcAccessLogger { const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcAccessLoggerImplPtr = std::unique_ptr; + class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessLoggerCache { public: GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& async_client_manager, Stats::Scope& scope, @@ -158,6 +161,8 @@ class GrpcAccessLoggerCacheImpl : public Singleton::Instance, public GrpcAccessL const LocalInfo::LocalInfo& local_info_; }; +using GrpcAccessLoggerCacheImplPtr = std::unique_ptr; + } // namespace GrpcCommon } // namespace AccessLoggers } // namespace Extensions diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index 0c1a80180fa9..0d6ec73ac0fd 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -59,6 +60,8 @@ class HttpGrpcAccessLog : public Common::ImplBase { std::vector filter_states_to_log_; }; +using HttpGrpcAccessLogPtr = std::unique_ptr; + } // namespace HttpGrpc } // namespace AccessLoggers } // namespace Extensions diff --git a/source/extensions/common/aws/region_provider.h b/source/extensions/common/aws/region_provider.h index aa87f90c173d..33a4fa2803b2 100644 --- a/source/extensions/common/aws/region_provider.h +++ b/source/extensions/common/aws/region_provider.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/common/pure.h" #include "absl/types/optional.h" @@ -23,6 +25,7 @@ class RegionProvider { virtual absl::optional getRegion() PURE; }; +using RegionProviderPtr = std::unique_ptr; using RegionProviderSharedPtr = std::shared_ptr; } // namespace Aws diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index ad9799940e9f..da1ed1d2ebf1 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -73,6 +74,8 @@ class GrpcClientImpl : public Client, const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcClientImplPtr = std::unique_ptr; + } // namespace ExtAuthz } // namespace Common } // namespace Filters diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h index 8a518783bf5d..12707aac9f6c 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.pb.h" @@ -48,6 +49,8 @@ class Filter : public Envoy::Http::PassThroughFilter { Buffer::OwnedImpl buffer_{}; }; +using FilterPtr = std::unique_ptr; + class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { public: FilterConfigPerRoute( diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 15b65e026d6d..222b9dbf00fb 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -31,14 +31,19 @@ using Envoy::ProtobufUtil::Status; using Envoy::ProtobufUtil::error::Code; using google::api::HttpRule; using google::grpc::transcoding::JsonRequestTranslator; +using JsonRequestTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::MessageStream; using google::grpc::transcoding::PathMatcherBuilder; using google::grpc::transcoding::PathMatcherUtility; using google::grpc::transcoding::RequestInfo; using google::grpc::transcoding::RequestMessageTranslator; +using RequestMessageTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::ResponseToJsonTranslator; +using ResponseToJsonTranslatorPtr = std::unique_ptr; using google::grpc::transcoding::Transcoder; +using TranscoderPtr = std::unique_ptr; using google::grpc::transcoding::TranscoderInputStream; +using TranscoderInputStreamPtr = std::unique_ptr; namespace Envoy { namespace Extensions { @@ -71,9 +76,9 @@ class TranscoderImpl : public Transcoder { * @param request_translator a JsonRequestTranslator that does the request translation * @param response_translator a ResponseToJsonTranslator that does the response translation */ - TranscoderImpl(std::unique_ptr request_translator, - std::unique_ptr json_request_translator, - std::unique_ptr response_translator) + TranscoderImpl(RequestMessageTranslatorPtr request_translator, + JsonRequestTranslatorPtr json_request_translator, + ResponseToJsonTranslatorPtr response_translator) : request_translator_(std::move(request_translator)), json_request_translator_(std::move(json_request_translator)), request_message_stream_(request_translator_ ? *request_translator_ @@ -92,12 +97,12 @@ class TranscoderImpl : public Transcoder { ProtobufUtil::Status ResponseStatus() override { return response_translator_->Status(); } private: - std::unique_ptr request_translator_; - std::unique_ptr json_request_translator_; + RequestMessageTranslatorPtr request_translator_; + JsonRequestTranslatorPtr json_request_translator_; MessageStream& request_message_stream_; - std::unique_ptr response_translator_; - std::unique_ptr request_stream_; - std::unique_ptr response_stream_; + ResponseToJsonTranslatorPtr response_translator_; + TranscoderInputStreamPtr request_stream_; + TranscoderInputStreamPtr response_stream_; }; } // namespace @@ -279,8 +284,8 @@ bool JsonTranscoderConfig::convertGrpcStatus() const { return convert_grpc_statu ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const Http::RequestHeaderMap& headers, ZeroCopyInputStream& request_input, - google::grpc::transcoding::TranscoderInputStream& response_input, - std::unique_ptr& transcoder, MethodInfoSharedPtr& method_info) { + google::grpc::transcoding::TranscoderInputStream& response_input, TranscoderPtr& transcoder, + MethodInfoSharedPtr& method_info) { if (Grpc::Common::isGrpcRequestHeaders(headers)) { return ProtobufUtil::Status(Code::INVALID_ARGUMENT, "Request headers has application/grpc content-type"); @@ -324,8 +329,8 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( request_info.variable_bindings.emplace_back(std::move(resolved_binding)); } - std::unique_ptr request_translator; - std::unique_ptr json_request_translator; + RequestMessageTranslatorPtr request_translator; + JsonRequestTranslatorPtr json_request_translator; if (method_info->request_type_is_http_body_) { request_translator = std::make_unique(*type_helper_->Resolver(), false, std::move(request_info)); @@ -338,7 +343,7 @@ ProtobufUtil::Status JsonTranscoderConfig::createTranscoder( const auto response_type_url = Grpc::Common::typeUrl(method_info->descriptor_->output_type()->full_name()); - std::unique_ptr response_translator{new ResponseToJsonTranslator( + ResponseToJsonTranslatorPtr response_translator{new ResponseToJsonTranslator( type_helper_->Resolver(), response_type_url, method_info->descriptor_->server_streaming(), &response_input, print_options_)}; diff --git a/source/extensions/grpc_credentials/aws_iam/config.cc b/source/extensions/grpc_credentials/aws_iam/config.cc index fed537a7c674..345d975fedbd 100644 --- a/source/extensions/grpc_credentials/aws_iam/config.cc +++ b/source/extensions/grpc_credentials/aws_iam/config.cc @@ -74,7 +74,7 @@ std::shared_ptr AwsIamGrpcCredentialsFactory::getChann std::string AwsIamGrpcCredentialsFactory::getRegion( const envoy::config::grpc_credential::v3::AwsIamConfig& config) { - std::unique_ptr region_provider; + Common::Aws::RegionProviderPtr region_provider; if (!config.region().empty()) { region_provider = std::make_unique(config.region()); } else { diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index 0e35d3b06304..d65bae27f9bb 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/grpc/async_client.h" #include "envoy/local_info/local_info.h" #include "envoy/network/connection.h" @@ -69,6 +71,8 @@ class GrpcMetricsStreamerImpl : public Singleton::Instance, public GrpcMetricsSt const envoy::config::core::v3::ApiVersion transport_api_version_; }; +using GrpcMetricsStreamerImplPtr = std::unique_ptr; + /** * Stat Sink implementation of Metrics Service. */ diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 0368e630d773..29c46f5d8096 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -139,12 +139,12 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { const Protobuf::MethodDescriptor* method_descriptor = Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"); - std::shared_ptr xds_context = std::make_shared( + NewGrpcMuxImplSharedPtr xds_context = std::make_shared( std::unique_ptr(async_client), dispatcher, *method_descriptor, envoy::config::core::v3::ApiVersion::AUTO, random, stats_store, rate_limit_settings, local_info); - std::unique_ptr subscription = std::make_unique( + GrpcSubscriptionImplPtr subscription = std::make_unique( xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher, std::chrono::milliseconds(12345), false); diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index d030e7d46288..04fca753ab05 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -197,8 +197,8 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { NiceMock random_; NiceMock local_info_; Grpc::MockAsyncStream async_stream_; - std::shared_ptr xds_context_; - std::unique_ptr subscription_; + NewGrpcMuxImplSharedPtr xds_context_; + GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; Envoy::Config::RateLimitSettings rate_limit_settings_; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index bf18620b8a35..04938ce6adc1 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -98,7 +98,7 @@ class GrpcMuxImplTestBase : public testing::Test { NiceMock random_; Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncStream async_stream_; - std::unique_ptr grpc_mux_; + GrpcMuxImplPtr grpc_mux_; NiceMock callbacks_; NiceMock resource_decoder_; NiceMock local_info_; diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index 0cc685486a29..8649dcb01afe 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -186,8 +186,8 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { TestUtility::TestOpaqueResourceDecoderImpl resource_decoder_{"cluster_name"}; NiceMock async_stream_; - std::shared_ptr mux_; - std::unique_ptr subscription_; + GrpcMuxImplSharedPtr mux_; + GrpcSubscriptionImplPtr subscription_; std::string last_response_nonce_; std::set last_cluster_names_; NiceMock local_info_; diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index 86d88dcfd4aa..a35a38d57725 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -59,7 +59,7 @@ class NewGrpcMuxImplTestBase : public testing::Test { NiceMock random_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; - std::unique_ptr grpc_mux_; + NewGrpcMuxImplPtr grpc_mux_; NiceMock callbacks_; TestUtility::TestOpaqueResourceDecoderImpl resource_decoder_{"cluster_name"}; diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index 1fa083234e1d..1474899621f1 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -72,7 +72,7 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; Stats::ScopeSharedPtr scope_; - std::unique_ptr tls_; + GoogleAsyncClientThreadLocalPtr tls_; MockStubFactory stub_factory_; const Protobuf::MethodDescriptor* method_descriptor_; StatNames stat_names_; diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 5803379810a5..c60342eb968a 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" @@ -201,6 +203,8 @@ class HelloworldStream : public MockAsyncStreamCallbacks const TestMetadata empty_metadata_; }; +using HelloworldStreamPtr = std::unique_ptr; + // Request related test utilities. class HelloworldRequest : public MockAsyncRequestCallbacks { public: @@ -224,6 +228,8 @@ class HelloworldRequest : public MockAsyncRequestCallbacks; + class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { public: GrpcClientIntegrationTest() @@ -348,7 +354,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { virtual void expectExtraHeaders(FakeStream&) {} - std::unique_ptr createRequest(const TestMetadata& initial_metadata) { + HelloworldRequestPtr createRequest(const TestMetadata& initial_metadata) { auto request = std::make_unique(dispatcher_helper_); EXPECT_CALL(*request, onCreateInitialMetadata(_)) .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) { @@ -394,7 +400,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { return request; } - std::unique_ptr createStream(const TestMetadata& initial_metadata) { + HelloworldStreamPtr createStream(const TestMetadata& initial_metadata) { auto stream = std::make_unique(dispatcher_helper_); EXPECT_CALL(*stream, onCreateInitialMetadata(_)) .WillOnce(Invoke([&initial_metadata](Http::HeaderMap& headers) { @@ -440,7 +446,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { std::unique_ptr stream_headers_; std::vector> channel_args_; #ifdef ENVOY_GOOGLE_GRPC - std::unique_ptr google_tls_; + GoogleAsyncClientThreadLocalPtr google_tls_; #endif AsyncClient grpc_client_; Event::TimerPtr timeout_timer_; diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index 4ed25db80da9..3aab5a54f991 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -162,8 +162,8 @@ class EdsSpeedTest { Api::ApiPtr api_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; - std::shared_ptr grpc_mux_; - std::unique_ptr subscription_; + Config::GrpcMuxImplSharedPtr grpc_mux_; + Config::GrpcSubscriptionImplPtr subscription_; }; } // namespace Upstream diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 4cb7054017b8..6ea00508fc7f 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -77,7 +77,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { Event::MockTimer* timer_ = nullptr; Event::MockDispatcher dispatcher_; Grpc::MockAsyncClient* async_client_{new Grpc::MockAsyncClient}; - std::unique_ptr logger_; + GrpcAccessLoggerImplPtr logger_; }; // Test basic stream logging flow. @@ -372,7 +372,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { Grpc::MockAsyncClientManager async_client_manager_; Grpc::MockAsyncClient* async_client_ = nullptr; Grpc::MockAsyncClientFactory* factory_ = nullptr; - std::unique_ptr logger_cache_; + GrpcAccessLoggerCacheImplPtr logger_cache_; NiceMock scope_; }; diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index c39fb8165545..d6cf63b11395 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -122,7 +122,7 @@ response: {{}} envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; std::shared_ptr logger_{new MockGrpcAccessLogger()}; std::shared_ptr logger_cache_{new MockGrpcAccessLoggerCache()}; - std::unique_ptr access_log_; + HttpGrpcAccessLogPtr access_log_; }; class TestSerializedFilterState : public StreamInfo::FilterState::Object { diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 711680fa6394..ab0f7b37d6fd 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -63,7 +63,7 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Grpc::MockAsyncClient* async_client_; absl::optional timeout_; Grpc::MockAsyncRequest async_request_; - std::unique_ptr client_; + GrpcClientImplPtr client_; MockRequestCallbacks request_callbacks_; Tracing::MockSpan span_; bool use_alpha_{}; diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index 15f5bf70687f..54ee07792c48 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -38,7 +38,7 @@ class ReverseBridgeTest : public testing::Test { filter_->setEncoderFilterCallbacks(encoder_callbacks_); } - std::unique_ptr filter_; + FilterPtr filter_; std::shared_ptr route_ = std::make_shared(); Router::RouteSpecificFilterConfig filter_config_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 19f5ef8f9b39..c0fbd516472d 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -1,5 +1,6 @@ #include #include +#include #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" @@ -31,6 +32,7 @@ using Envoy::Protobuf::util::MessageDifferencer; using Envoy::ProtobufUtil::error::Code; using google::api::HttpRule; using google::grpc::transcoding::Transcoder; +using TranscoderPtr = std::unique_ptr; namespace Envoy { namespace Extensions { @@ -222,7 +224,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoder) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -243,7 +245,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, CreateTranscoderAutoMap) { {":path", "/bookstore.Bookstore/DeleteShelf"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -262,7 +264,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, InvalidQueryParameter) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?foo=bar"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -282,7 +284,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, UnknownQueryParameterIsIgnored) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?foo=bar"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -301,7 +303,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, IgnoredQueryParameter) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/shelves?key=API_KEY"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); @@ -323,7 +325,7 @@ TEST_F(GrpcJsonTranscoderConfigTest, InvalidVariableBinding) { Http::TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/book/1"}}; TranscoderInputStreamImpl request_in, response_in; - std::unique_ptr transcoder; + TranscoderPtr transcoder; MethodInfoSharedPtr method_info; const auto status = config.createTranscoder(headers, request_in, response_in, transcoder, method_info); diff --git a/test/extensions/filters/http/grpc_stats/config_test.cc b/test/extensions/filters/http/grpc_stats/config_test.cc index 6c4d71292800..68bf0bde27f0 100644 --- a/test/extensions/filters/http/grpc_stats/config_test.cc +++ b/test/extensions/filters/http/grpc_stats/config_test.cc @@ -64,7 +64,7 @@ class GrpcStatsFilterConfigTest : public testing::Test { envoy::extensions::filters::http::grpc_stats::v3::FilterConfig config_; NiceMock context_; - std::shared_ptr filter_; + Http::StreamFilterSharedPtr filter_; NiceMock decoder_callbacks_; NiceMock stream_info_; NiceMock stats_store_; diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index 649b383496b6..ce940b650136 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -49,7 +49,7 @@ class GrpcMetricsStreamerImplTest : public testing::Test { LocalInfo::MockLocalInfo local_info_; Grpc::MockAsyncClient* async_client_{new NiceMock}; Grpc::MockAsyncClientFactory* factory_{new Grpc::MockAsyncClientFactory}; - std::unique_ptr streamer_; + GrpcMetricsStreamerImplPtr streamer_; }; // Test basic metrics streaming flow. diff --git a/test/mocks/grpc/mocks.h b/test/mocks/grpc/mocks.h index 0a13f5a5fb08..476ba677f945 100644 --- a/test/mocks/grpc/mocks.h +++ b/test/mocks/grpc/mocks.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include "envoy/config/core/v3/grpc_service.pb.h" @@ -39,10 +40,12 @@ class MockAsyncStream : public RawAsyncStream { MOCK_METHOD(bool, isAboveWriteBufferHighWatermark, (), (const)); }; +template using ResponseTypePtr = std::unique_ptr; + template class MockAsyncRequestCallbacks : public AsyncRequestCallbacks { public: - void onSuccess(std::unique_ptr&& response, Tracing::Span& span) { + void onSuccess(ResponseTypePtr&& response, Tracing::Span& span) { onSuccess_(*response, span); } @@ -59,7 +62,7 @@ class MockAsyncStreamCallbacks : public AsyncStreamCallbacks { onReceiveInitialMetadata_(*metadata); } - void onReceiveMessage(std::unique_ptr&& message) { onReceiveMessage_(*message); } + void onReceiveMessage(ResponseTypePtr&& message) { onReceiveMessage_(*message); } void onReceiveTrailingMetadata(Http::ResponseTrailerMapPtr&& metadata) { onReceiveTrailingMetadata_(*metadata); From f50fba14928c0fa08d02447db8fade9d56e9164a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 20 Jul 2020 13:56:35 -0400 Subject: [PATCH 688/909] tcp: switching to the new pool (#12180) Risk Level: High (roll back first, ask questions later) Testing: cloned all unit tests and all integration tests. Docs Changes: n/a Release Notes: inline Runtime guard: envoy.reloadable_features.new_tcp_connection_pool Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + source/common/runtime/runtime_features.cc | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5acb53a13a3d..09ccd54908c7 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -42,6 +42,7 @@ New Features * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. +* tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. Deprecated ---------- diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 35b990cd39b7..055d350e497c 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -71,6 +71,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.new_codec_behavior", + "envoy.reloadable_features.new_tcp_connection_pool", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", @@ -87,8 +88,6 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { - // TODO(alyssawilk) flip true after the release. - "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", }; From e7bdb1f1f3395ae2996b4e6c7cbc35926204c0d3 Mon Sep 17 00:00:00 2001 From: nigriMSFT Date: Mon, 20 Jul 2020 11:07:11 -0700 Subject: [PATCH 689/909] test: fix bad route entry test cases on Windows (#11830) Signed-off-by: Nick Grifka --- test/common/router/BUILD | 1 - test/common/router/config_impl_test.cc | 36 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 3ddc0305b16b..518dfec196bb 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -15,7 +15,6 @@ envoy_package() envoy_cc_test( name = "config_impl_test", - tags = ["fails_on_windows"], deps = [":config_impl_test_lib"], ) diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 8b68b9c1d04d..eb327298f0aa 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -5087,10 +5087,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPath) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|path)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) { @@ -5123,10 +5135,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|regex)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'prefix' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { @@ -5159,10 +5183,22 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { cluster: www2 )EOF"; +#ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, "invalid value oneof field 'path_specifier' is already set. Cannot set '(path|regex)' for " "type oneof"); +#else + EXPECT_THAT_THROWS_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + ::testing::AnyOf( + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'path' for " + "type oneof"), + ::testing::ContainsRegex( + "invalid value oneof field 'path_specifier' is already set. Cannot set 'regex' for " + "type oneof"))); +#endif } TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) { From 2d25680a9e085fbd132dd6d87cf2c2f1c4ea4b56 Mon Sep 17 00:00:00 2001 From: James Fish Date: Mon, 20 Jul 2020 11:07:47 -0700 Subject: [PATCH 690/909] thrift_proxy: Handle thrift void response as a success (#12070) When a successful thrift response was receive we never called fieldBegin for void response because fieldBegin is skipped if the field type is stop. Therefore check at the message end if we never saw a field begin callback so we can mark a void response as a success. Signed-off-by: James Fish --- .../network/thrift_proxy/conn_manager.cc | 19 ++++++- .../network/thrift_proxy/conn_manager.h | 1 + .../network/thrift_proxy/conn_manager_test.cc | 54 +++++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index c7715986034a..09cd67aa67a7 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -201,13 +201,30 @@ FilterStatus ConnectionManager::ResponseDecoder::fieldBegin(absl::string_view na // Reply messages contain a struct where field 0 is the call result and fields 1+ are // exceptions, if defined. At most one field may be set. Therefore, the very first field we // encounter in a reply is either field 0 (success) or not (IDL exception returned). - success_ = field_id == 0 && field_type != FieldType::Stop; + // If first fieldType is FieldType::Stop then it is a void success and handled in messageEnd() + // because decoder state machine does not call decoder event callback fieldBegin on + // FieldType::Stop. + success_ = (field_id == 0); first_reply_field_ = false; } return ProtocolConverter::fieldBegin(name, field_type, field_id); } +FilterStatus ConnectionManager::ResponseDecoder::messageEnd() { + if (first_reply_field_) { + // When the response is thrift void type there is never a fieldBegin call on a success + // because the response struct has no fields and so the first field type is FieldType::Stop. + // The decoder state machine handles FieldType::Stop by going immediately to structEnd, + // skipping fieldBegin callback. Therefore if we are still waiting for the first reply field + // at end of message then it is a void success. + success_ = true; + first_reply_field_ = false; + } + + return ProtocolConverter::messageEnd(); +} + FilterStatus ConnectionManager::ResponseDecoder::transportEnd() { ASSERT(metadata_ != nullptr); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 52a6e0782b4d..b06476a9dde6 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -91,6 +91,7 @@ class ConnectionManager : public Network::ReadFilter, // ProtocolConverter FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; + FilterStatus messageEnd() override; FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, int16_t& field_id) override; FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index dd1810d17412..6d60f9cf0963 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -237,6 +237,26 @@ class ThriftConnectionManagerTest : public testing::Test { } } + void writeVoidFramedBinaryMessage(Buffer::Instance& buffer, int32_t seq_id) { + Buffer::OwnedImpl msg; + ProtocolPtr proto = + NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol(); + MessageMetadata metadata; + metadata.setMethodName("name"); + metadata.setMessageType(MessageType::Reply); + metadata.setSequenceId(seq_id); + + proto->writeMessageBegin(msg, metadata); + proto->writeStructBegin(msg, ""); + proto->writeFieldBegin(msg, "", FieldType::Stop, 0); + proto->writeStructEnd(msg); + proto->writeMessageEnd(msg); + + TransportPtr transport = + NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); + transport->encodeFrame(buffer, metadata, msg); + } + void writeFramedBinaryTApplicationException(Buffer::Instance& buffer, int32_t seq_id) { Buffer::OwnedImpl msg; ProtocolPtr proto = @@ -676,6 +696,40 @@ TEST_F(ThriftConnectionManagerTest, RequestAndResponse) { EXPECT_EQ(0U, store_.counter("test.response_error").value()); } +TEST_F(ThriftConnectionManagerTest, RequestAndVoidResponse) { + initializeFilter(); + writeComplexFramedBinaryMessage(buffer_, MessageType::Call, 0x0F); + + ThriftFilters::DecoderFilterCallbacks* callbacks{}; + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce( + Invoke([&](ThriftFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); + + EXPECT_EQ(filter_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + + writeVoidFramedBinaryMessage(write_buffer_, 0x0F); + + FramedTransportImpl transport; + BinaryProtocolImpl proto; + callbacks->startUpstreamResponse(transport, proto); + + EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)).Times(1); + EXPECT_EQ(ThriftFilters::ResponseStatus::Complete, callbacks->upstreamData(write_buffer_)); + + filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); + + EXPECT_EQ(1U, store_.counter("test.request").value()); + EXPECT_EQ(1U, store_.counter("test.request_call").value()); + EXPECT_EQ(0U, stats_.request_active_.value()); + EXPECT_EQ(1U, store_.counter("test.response").value()); + EXPECT_EQ(1U, store_.counter("test.response_reply").value()); + EXPECT_EQ(0U, store_.counter("test.response_exception").value()); + EXPECT_EQ(0U, store_.counter("test.response_invalid_type").value()); + EXPECT_EQ(1U, store_.counter("test.response_success").value()); + EXPECT_EQ(0U, store_.counter("test.response_error").value()); +} + // Tests that the downstream request's sequence number is used for the response. TEST_F(ThriftConnectionManagerTest, RequestAndResponseSequenceIdHandling) { initializeFilter(); From cd15dc4a9e612b2f21a36f16e8727e09de193f72 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 20 Jul 2020 16:09:23 -0400 Subject: [PATCH 691/909] cleanup: clang tidy naming (#12181) Risk Level: low (function renames) Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- include/envoy/registry/registry.h | 8 +-- source/common/common/hash.cc | 6 +- source/common/common/hash.h | 6 +- source/common/common/logger.cc | 2 +- source/common/common/logger.h | 4 +- .../common/protobuf/message_validator_impl.h | 4 +- source/common/router/scoped_rds.cc | 2 +- source/common/router/scoped_rds.h | 2 +- source/common/upstream/ring_hash_lb.cc | 2 +- .../clusters/redis/redis_cluster_lb.cc | 2 +- .../filters/http/cache/http_cache.h | 2 +- .../network/dubbo_proxy/active_message.cc | 30 ++++----- .../network/dubbo_proxy/active_message.h | 4 +- .../network/dubbo_proxy/conn_manager.cc | 2 +- .../filters/network/dubbo_proxy/decoder.cc | 13 ++-- .../dubbo_hessian2_serializer_impl.cc | 14 ++-- .../dubbo_proxy/dubbo_protocol_impl.cc | 26 ++++---- .../network/dubbo_proxy/heartbeat_response.cc | 4 +- .../filters/network/dubbo_proxy/message.h | 16 ++--- .../network/dubbo_proxy/message_impl.h | 20 +++--- .../filters/network/dubbo_proxy/metadata.h | 16 ++--- .../dubbo_proxy/router/route_matcher.cc | 20 +++--- .../network/dubbo_proxy/router/router_impl.cc | 12 ++-- .../filters/network/mongo_proxy/proxy.cc | 2 +- .../filters/network/mongo_proxy/utility.h | 2 +- .../filters/network/thrift_proxy/metadata.h | 2 +- .../thrift_proxy/twitter_protocol_impl.cc | 2 +- .../extensions/tracers/zipkin/span_context.h | 6 +- source/extensions/tracers/zipkin/tracer.cc | 8 +-- source/server/admin/admin.h | 2 +- source/server/server.cc | 4 +- test/common/common/hash_fuzz_test.cc | 2 +- test/common/common/hash_test.cc | 23 ++++--- .../network/dubbo_proxy/app_exception_test.cc | 6 +- .../network/dubbo_proxy/conn_manager_test.cc | 30 ++++----- .../network/dubbo_proxy/decoder_test.cc | 20 +++--- .../dubbo_hessian2_serializer_impl_test.cc | 26 ++++---- .../dubbo_proxy/dubbo_protocol_impl_test.cc | 40 +++++------ .../network/dubbo_proxy/metadata_test.cc | 18 ++--- .../network/dubbo_proxy/router_test.cc | 2 +- .../network/mongo_proxy/utility_test.cc | 8 +-- .../redis_proxy/conn_pool_impl_test.cc | 16 ++--- .../twitter_protocol_impl_test.cc | 10 +-- .../zipkin/span_context_extractor_test.cc | 66 +++++++++---------- test/extensions/tracers/zipkin/tracer_test.cc | 6 +- test/test_common/logging.cc | 4 +- test/test_common/logging.h | 2 +- 47 files changed, 260 insertions(+), 264 deletions(-) diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h index a14baea43a69..ef12fff187b6 100644 --- a/include/envoy/registry/registry.h +++ b/include/envoy/registry/registry.h @@ -187,7 +187,7 @@ template class FactoryRegistry : public Logger::Loggable& - versioned_factories() { + versionedFactories() { using VersionedFactoryMap = absl::flat_hash_map; MUTABLE_CONSTRUCT_ON_FIRST_USE(VersionedFactoryMap); @@ -236,7 +236,7 @@ template class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable getFactoryVersion(absl::string_view name) { - auto it = versioned_factories().find(name); - if (it == versioned_factories().end()) { + auto it = versionedFactories().find(name); + if (it == versionedFactories().end()) { return absl::nullopt; } return it->second; diff --git a/source/common/common/hash.cc b/source/common/common/hash.cc index eb1e0765da87..c39ec996bf83 100644 --- a/source/common/common/hash.cc +++ b/source/common/common/hash.cc @@ -8,7 +8,7 @@ namespace Envoy { // platforms are needed. // from // (https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=libstdc%2b%2b-v3/libsupc%2b%2b/hash_bytes.cc) -uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { +uint64_t MurmurHash::murmurHash2(absl::string_view key, uint64_t seed) { static const uint64_t mul = 0xc6a4a7935bd1e995UL; const char* const buf = static_cast(key.data()); uint64_t len = key.size(); @@ -19,13 +19,13 @@ uint64_t MurmurHash::murmurHash2_64(absl::string_view key, uint64_t seed) { const char* const end = buf + len_aligned; uint64_t hash = seed ^ (len * mul); for (const char* p = buf; p != end; p += 8) { - const uint64_t data = shiftMix(unaligned_load(p) * mul) * mul; + const uint64_t data = shiftMix(unalignedLoad(p) * mul) * mul; hash ^= data; hash *= mul; } if ((len & 0x7) != 0) { - const uint64_t data = load_bytes(end, len & 0x7); + const uint64_t data = loadBytes(end, len & 0x7); hash ^= data; hash *= mul; } diff --git a/source/common/common/hash.h b/source/common/common/hash.h index 30d27b9b2022..38fb20a0f9cc 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -55,17 +55,17 @@ class MurmurHash { * @param seed the seed to use for the hash * @return 64-bit hash representation of the supplied string view */ - static uint64_t murmurHash2_64(absl::string_view key, uint64_t seed = STD_HASH_SEED); + static uint64_t murmurHash2(absl::string_view key, uint64_t seed = STD_HASH_SEED); private: - static inline uint64_t unaligned_load(const char* p) { + static inline uint64_t unalignedLoad(const char* p) { uint64_t result; memcpy(&result, p, sizeof(result)); return result; } // Loads n bytes, where 1 <= n < 8. - static inline uint64_t load_bytes(const char* p, int n) { + static inline uint64_t loadBytes(const char* p, int n) { uint64_t result = 0; --n; do { diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index 3816c0f61ca7..080b9a08d85d 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -105,7 +105,7 @@ Context::~Context() { void Context::activate() { Registry::getSink()->setLock(lock_); - Registry::getSink()->set_should_escape(should_escape_); + Registry::getSink()->setShouldEscape(should_escape_); Registry::setLogLevel(log_level_); Registry::setLogFormat(log_format_); } diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 5cbc11e15a9b..d315f8ef56d6 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -104,7 +104,7 @@ class SinkDelegate : NonCopyable { virtual void flush() PURE; protected: - SinkDelegate* previous_delegate() { return previous_delegate_; } + SinkDelegate* previousDelegate() { return previous_delegate_; } private: SinkDelegate* previous_delegate_; @@ -146,7 +146,7 @@ class DelegatingLogSink : public spdlog::sinks::sink { set_formatter(spdlog::details::make_unique(pattern)); } void set_formatter(std::unique_ptr formatter) override; - void set_should_escape(bool should_escape) { should_escape_ = should_escape; } + void setShouldEscape(bool should_escape) { should_escape_ = should_escape; } /** * @return bool whether a lock has been established. diff --git a/source/common/protobuf/message_validator_impl.h b/source/common/protobuf/message_validator_impl.h index e2f49ce9dec7..54644f49b843 100644 --- a/source/common/protobuf/message_validator_impl.h +++ b/source/common/protobuf/message_validator_impl.h @@ -85,11 +85,11 @@ class ProdValidationContextImpl : public ValidationContextImpl { : dynamic_warning_validation_visitor_) : ProtobufMessage::getStrictValidationVisitor()) {} - ProtobufMessage::WarningValidationVisitorImpl& static_warning_validation_visitor() { + ProtobufMessage::WarningValidationVisitorImpl& staticWarningValidationVisitor() { return static_warning_validation_visitor_; } - ProtobufMessage::WarningValidationVisitorImpl& dynamic_warning_validation_visitor() { + ProtobufMessage::WarningValidationVisitorImpl& dynamicWarningValidationVisitor() { return dynamic_warning_validation_visitor_; } diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index e54ad6ae530a..32dd71981aff 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -406,7 +406,7 @@ ConfigProviderPtr ScopedRoutesConfigProviderManager::createXdsConfigProvider( typed_optarg.scope_key_builder_, factory_context, stat_prefix, typed_optarg.rds_config_source_, static_cast(config_provider_manager) - .route_config_provider_manager(), + .routeConfigProviderPanager(), static_cast(config_provider_manager)); }); diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index 390f524be192..66b2d53301f7 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -234,7 +234,7 @@ class ScopedRoutesConfigProviderManager : public Envoy::Config::ConfigProviderMa Server::Configuration::ServerFactoryContext& factory_context, const Envoy::Config::ConfigProviderManager::OptionalArg& optarg) override; - RouteConfigProviderManager& route_config_provider_manager() { + RouteConfigProviderManager& routeConfigProviderPanager() { return route_config_provider_manager_; } diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index 296f5fbf0613..28b64bdefd75 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -168,7 +168,7 @@ RingHashLoadBalancer::Ring::Ring(const NormalizedHostWeightVector& normalized_ho const uint64_t hash = (hash_function == HashFunction::Cluster_RingHashLbConfig_HashFunction_MURMUR_HASH_2) - ? MurmurHash::murmurHash2_64(hash_key, MurmurHash::STD_HASH_SEED) + ? MurmurHash::murmurHash2(hash_key, MurmurHash::STD_HASH_SEED) : HashUtil::xxHash64(hash_key); ENVOY_LOG(trace, "ring hash: hash_key={} hash={}", hash_key.data(), hash); diff --git a/source/extensions/clusters/redis/redis_cluster_lb.cc b/source/extensions/clusters/redis/redis_cluster_lb.cc index 54a559fa522b..43bb0f9b3222 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.cc +++ b/source/extensions/clusters/redis/redis_cluster_lb.cc @@ -185,7 +185,7 @@ RedisLoadBalancerContextImpl::RedisLoadBalancerContextImpl( const NetworkFilters::Common::Redis::RespValue& request, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy) : hash_key_(is_redis_cluster ? Crc16::crc16(hashtag(key, true)) - : MurmurHash::murmurHash2_64(hashtag(key, enabled_hashtagging))), + : MurmurHash::murmurHash2(hashtag(key, enabled_hashtagging))), is_read_(isReadRequest(request)), read_policy_(read_policy) {} // Inspired by the redis-cluster hashtagging algorithm diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index de3dde3120f4..e3c09d2cf4a1 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -214,7 +214,7 @@ class InsertContext { // The insertion is streamed into the cache in chunks whose size is determined // by the client, but with a pace determined by the cache. To avoid streaming // data into cache too fast for the cache to handle, clients should wait for - // the cache to call ready_for_next_chunk() before streaming the next chunk. + // the cache to call readyForNextChunk() before streaming the next chunk. // // The client can abort the streaming insertion by dropping the // InsertContextPtr. A cache can abort the insertion by passing 'false' into diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index b4dcf23f8467..1baaf5771c92 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -31,8 +31,8 @@ DubboFilters::UpstreamResponseStatus ActiveResponseDecoder::onData(Buffer::Insta void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) { - ASSERT(metadata->message_type() == MessageType::Response || - metadata->message_type() == MessageType::Exception); + ASSERT(metadata->messageType() == MessageType::Response || + metadata->messageType() == MessageType::Exception); ASSERT(metadata->hasResponseStatus()); metadata_ = metadata; @@ -45,24 +45,23 @@ void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, throw DownstreamConnectionCloseException("Downstream has closed or closing"); } - response_connection_.write(ctx->message_origin_data(), false); + response_connection_.write(ctx->messageOriginData(), false); ENVOY_LOG(debug, "dubbo response: the upstream response message has been forwarded to the downstream"); stats_.response_.inc(); stats_.response_decoding_success_.inc(); - if (metadata->message_type() == MessageType::Exception) { + if (metadata->messageType() == MessageType::Exception) { stats_.response_business_exception_.inc(); } - switch (metadata->response_status()) { + switch (metadata->responseStatus()) { case ResponseStatus::Ok: stats_.response_success_.inc(); break; default: stats_.response_error_.inc(); - ENVOY_LOG(error, "dubbo response status: {}", - static_cast(metadata->response_status())); + ENVOY_LOG(error, "dubbo response status: {}", static_cast(metadata->responseStatus())); break; } @@ -70,7 +69,7 @@ void ActiveResponseDecoder::onStreamDecoded(MessageMetadataSharedPtr metadata, response_status_ = DubboFilters::UpstreamResponseStatus::Complete; ENVOY_LOG(debug, "dubbo response: complete processing of upstream response messages, id is {}", - metadata->request_id()); + metadata->requestId()); } FilterStatus ActiveResponseDecoder::applyMessageEncodedFilters(MessageMetadataSharedPtr metadata, @@ -129,7 +128,7 @@ ActiveMessageDecoderFilter::ActiveMessageDecoderFilter(ActiveMessage& parent, void ActiveMessageDecoderFilter::continueDecoding() { ASSERT(parent_.context()); auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext; - if (0 != parent_.context()->message_origin_data().length()) { + if (0 != parent_.context()->messageOriginData().length()) { state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent; ENVOY_LOG(warn, "The original message data is not consumed, triggering the decoder filter from " "the current location"); @@ -138,7 +137,7 @@ void ActiveMessageDecoderFilter::continueDecoding() { if (status == FilterStatus::Continue) { ENVOY_LOG(debug, "dubbo response: start upstream"); // All filters have been executed for the current decoder state. - if (parent_.pending_stream_decoded()) { + if (parent_.pendingStreamDecoded()) { // If the filter stack was paused during messageEnd, handle end-of-request details. parent_.finalizeRequest(); } @@ -171,7 +170,7 @@ ActiveMessageEncoderFilter::ActiveMessageEncoderFilter(ActiveMessage& parent, void ActiveMessageEncoderFilter::continueEncoding() { ASSERT(parent_.context()); auto state = ActiveMessage::FilterIterationStartState::AlwaysStartFromNext; - if (0 != parent_.context()->message_origin_data().length()) { + if (0 != parent_.context()->messageOriginData().length()) { state = ActiveMessage::FilterIterationStartState::CanStartFromCurrent; ENVOY_LOG(warn, "The original message data is not consumed, triggering the encoder filter from " "the current location"); @@ -256,8 +255,7 @@ void ActiveMessage::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSh auto status = applyDecoderFilters(nullptr, FilterIterationStartState::CanStartFromCurrent); if (status == FilterStatus::StopIteration) { - ENVOY_LOG(debug, "dubbo request: stop calling decoder filter, id is {}", - metadata->request_id()); + ENVOY_LOG(debug, "dubbo request: stop calling decoder filter, id is {}", metadata->requestId()); pending_stream_decoded_ = true; return; } @@ -265,14 +263,14 @@ void ActiveMessage::onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSh finalizeRequest(); ENVOY_LOG(debug, "dubbo request: complete processing of downstream request messages, id is {}", - metadata->request_id()); + metadata->requestId()); } void ActiveMessage::finalizeRequest() { pending_stream_decoded_ = false; parent_.stats().request_.inc(); bool is_one_way = false; - switch (metadata_->message_type()) { + switch (metadata_->messageType()) { case MessageType::Request: parent_.stats().request_twoway_.inc(); break; @@ -415,7 +413,7 @@ void ActiveMessage::resetDownstreamConnection() { void ActiveMessage::resetStream() { parent_.deferredMessage(*this); } uint64_t ActiveMessage::requestId() const { - return metadata_ != nullptr ? metadata_->request_id() : 0; + return metadata_ != nullptr ? metadata_->requestId() : 0; } uint64_t ActiveMessage::streamId() const { return stream_id_; } diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h index a0209fd4271a..2870e8d501d7 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.h +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -44,7 +44,7 @@ class ActiveResponseDecoder : public ResponseDecoderCallbacks, StreamHandler& newStream() override { return *this; } void onHeartbeat(MessageMetadataSharedPtr) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - uint64_t requestId() const { return metadata_ ? metadata_->request_id() : 0; } + uint64_t requestId() const { return metadata_ ? metadata_->requestId() : 0; } private: FilterStatus applyMessageEncodedFilters(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx); @@ -185,7 +185,7 @@ class ActiveMessage : public LinkedObject, void onError(const std::string& what); MessageMetadataSharedPtr metadata() const { return metadata_; } ContextSharedPtr context() const { return context_; } - bool pending_stream_decoded() const { return pending_stream_decoded_; } + bool pendingStreamDecoded() const { return pending_stream_decoded_; } private: void addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr filter, bool dual_filter); diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc index 2970352d5f84..31d91e3897a8 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -38,7 +38,7 @@ Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end if (stopped_) { ASSERT(!active_message_list_.empty()); auto metadata = (*active_message_list_.begin())->metadata(); - if (metadata && metadata->message_type() == MessageType::Oneway) { + if (metadata && metadata->messageType() == MessageType::Oneway) { ENVOY_CONN_LOG(trace, "waiting for one-way completion", read_callbacks_->connection()); half_closed_ = true; return Network::FilterStatus::StopIteration; diff --git a/source/extensions/filters/network/dubbo_proxy/decoder.cc b/source/extensions/filters/network/dubbo_proxy/decoder.cc index 3715acf865d5..0f838a9a06f9 100644 --- a/source/extensions/filters/network/dubbo_proxy/decoder.cc +++ b/source/extensions/filters/network/dubbo_proxy/decoder.cc @@ -19,22 +19,22 @@ DecoderStateMachine::onDecodeStreamHeader(Buffer::Instance& buffer) { } auto context = ret.first; - if (metadata->message_type() == MessageType::HeartbeatRequest || - metadata->message_type() == MessageType::HeartbeatResponse) { - if (buffer.length() < (context->header_size() + context->body_size())) { + if (metadata->messageType() == MessageType::HeartbeatRequest || + metadata->messageType() == MessageType::HeartbeatResponse) { + if (buffer.length() < (context->headerSize() + context->bodySize())) { ENVOY_LOG(debug, "dubbo decoder: need more data for {} protocol heartbeat", protocol_.name()); return {ProtocolState::WaitForData}; } ENVOY_LOG(debug, "dubbo decoder: this is the {} heartbeat message", protocol_.name()); - buffer.drain(context->header_size() + context->body_size()); + buffer.drain(context->headerSize() + context->bodySize()); delegate_.onHeartbeat(metadata); return {ProtocolState::Done}; } active_stream_ = delegate_.newStream(metadata, context); ASSERT(active_stream_); - context->message_origin_data().move(buffer, context->header_size()); + context->messageOriginData().move(buffer, context->headerSize()); return {ProtocolState::OnDecodeStreamData}; } @@ -49,8 +49,7 @@ DecoderStateMachine::onDecodeStreamData(Buffer::Instance& buffer) { return {ProtocolState::WaitForData}; } - active_stream_->context_->message_origin_data().move(buffer, - active_stream_->context_->body_size()); + active_stream_->context_->messageOriginData().move(buffer, active_stream_->context_->bodySize()); active_stream_->onStreamDecoded(); active_stream_ = nullptr; diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc index 19e40d284808..473a0ce5c6f5 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl.cc @@ -29,9 +29,9 @@ DubboHessian2SerializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, std::string method_name = HessianUtils::peekString(buffer, &size, total_size); total_size += size; - if (static_cast(context->body_size()) < total_size) { + if (static_cast(context->bodySize()) < total_size) { throw EnvoyException(fmt::format("RpcInvocation size({}) large than body size({})", total_size, - context->body_size())); + context->bodySize())); } auto invo = std::make_shared(); @@ -45,7 +45,7 @@ DubboHessian2SerializerImpl::deserializeRpcInvocation(Buffer::Instance& buffer, std::pair DubboHessian2SerializerImpl::deserializeRpcResult(Buffer::Instance& buffer, ContextSharedPtr context) { - ASSERT(buffer.length() >= context->body_size()); + ASSERT(buffer.length() >= context->bodySize()); size_t total_size = 0; bool has_value = true; @@ -69,15 +69,15 @@ DubboHessian2SerializerImpl::deserializeRpcResult(Buffer::Instance& buffer, throw EnvoyException(fmt::format("not supported return type {}", static_cast(type))); } - if (context->body_size() < total_size) { + if (context->bodySize() < total_size) { throw EnvoyException(fmt::format("RpcResult size({}) large than body size({})", total_size, - context->body_size())); + context->bodySize())); } - if (!has_value && context->body_size() != total_size) { + if (!has_value && context->bodySize() != total_size) { throw EnvoyException( fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", - (context->body_size() - total_size))); + (context->bodySize() - total_size))); } return std::pair(result, true); diff --git a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc index b91c4e972ee2..3a05491134d9 100644 --- a/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl.cc @@ -66,7 +66,7 @@ void parseRequestInfoFromBuffer(Buffer::Instance& data, MessageMetadataSharedPtr static_cast::type>(type))); } - if (!is_two_way && metadata->message_type() != MessageType::HeartbeatRequest) { + if (!is_two_way && metadata->messageType() != MessageType::HeartbeatRequest) { metadata->setMessageType(MessageType::Oneway); } @@ -129,9 +129,9 @@ DubboProtocolImpl::decodeHeader(Buffer::Instance& buffer, MessageMetadataSharedP } auto context = std::make_shared(); - context->set_header_size(DubboProtocolImpl::MessageSize); - context->set_body_size(body_size); - context->set_heartbeat(is_event); + context->setHeaderSize(DubboProtocolImpl::MessageSize); + context->setBodySize(body_size); + context->setHeartbeat(is_event); return std::pair(context, true); } @@ -140,11 +140,11 @@ bool DubboProtocolImpl::decodeData(Buffer::Instance& buffer, ContextSharedPtr co MessageMetadataSharedPtr metadata) { ASSERT(serializer_); - if ((buffer.length()) < static_cast(context->body_size())) { + if ((buffer.length()) < static_cast(context->bodySize())) { return false; } - switch (metadata->message_type()) { + switch (metadata->messageType()) { case MessageType::Oneway: case MessageType::Request: { auto ret = serializer_->deserializeRpcInvocation(buffer, context); @@ -175,16 +175,16 @@ bool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& const std::string& content, RpcResponseType type) { ASSERT(serializer_); - switch (metadata.message_type()) { + switch (metadata.messageType()) { case MessageType::HeartbeatResponse: { ASSERT(metadata.hasResponseStatus()); ASSERT(content.empty()); buffer.writeBEInt(MagicNumber); - uint8_t flag = static_cast(metadata.serialization_type()); + uint8_t flag = static_cast(metadata.serializationType()); flag = flag ^ EventMask; buffer.writeByte(flag); - buffer.writeByte(static_cast(metadata.response_status())); - buffer.writeBEInt(metadata.request_id()); + buffer.writeByte(static_cast(metadata.responseStatus())); + buffer.writeBEInt(metadata.requestId()); buffer.writeBEInt(0); return true; } @@ -195,9 +195,9 @@ bool DubboProtocolImpl::encode(Buffer::Instance& buffer, const MessageMetadata& size_t serialized_body_size = serializer_->serializeRpcResult(body_buffer, content, type); buffer.writeBEInt(MagicNumber); - buffer.writeByte(static_cast(metadata.serialization_type())); - buffer.writeByte(static_cast(metadata.response_status())); - buffer.writeBEInt(metadata.request_id()); + buffer.writeByte(static_cast(metadata.serializationType())); + buffer.writeByte(static_cast(metadata.responseStatus())); + buffer.writeBEInt(metadata.requestId()); buffer.writeBEInt(serialized_body_size); buffer.move(body_buffer, serialized_body_size); diff --git a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc index 3d9f7a648844..69df071a3527 100644 --- a/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc +++ b/source/extensions/filters/network/dubbo_proxy/heartbeat_response.cc @@ -8,8 +8,8 @@ namespace DubboProxy { DubboFilters::DirectResponse::ResponseType HeartbeatResponse::encode(MessageMetadata& metadata, DubboProxy::Protocol& protocol, Buffer::Instance& buffer) const { - ASSERT(metadata.response_status() == ResponseStatus::Ok); - ASSERT(metadata.message_type() == MessageType::HeartbeatResponse); + ASSERT(metadata.responseStatus() == ResponseStatus::Ok); + ASSERT(metadata.messageType() == MessageType::HeartbeatResponse); if (!protocol.encode(buffer, metadata, "")) { throw EnvoyException("failed to encode heartbeat message"); diff --git a/source/extensions/filters/network/dubbo_proxy/message.h b/source/extensions/filters/network/dubbo_proxy/message.h index 8e74e25dcb41..19a1f91d90d9 100644 --- a/source/extensions/filters/network/dubbo_proxy/message.h +++ b/source/extensions/filters/network/dubbo_proxy/message.h @@ -93,11 +93,11 @@ class Context { bool hasAttachments() const { return !attachments_.empty(); } const AttachmentMap& attachments() const { return attachments_; } - Buffer::Instance& message_origin_data() { return message_origin_buffer_; } - size_t message_size() const { return header_size() + body_size(); } + Buffer::Instance& messageOriginData() { return message_origin_buffer_; } + size_t messageSize() const { return headerSize() + bodySize(); } - virtual size_t body_size() const PURE; - virtual size_t header_size() const PURE; + virtual size_t bodySize() const PURE; + virtual size_t headerSize() const PURE; protected: Context() = default; @@ -118,10 +118,10 @@ class RpcInvocation { public: virtual ~RpcInvocation() = default; - virtual const std::string& service_name() const PURE; - virtual const std::string& method_name() const PURE; - virtual const absl::optional& service_version() const PURE; - virtual const absl::optional& service_group() const PURE; + virtual const std::string& serviceName() const PURE; + virtual const std::string& methodName() const PURE; + virtual const absl::optional& serviceVersion() const PURE; + virtual const absl::optional& serviceGroup() const PURE; }; using RpcInvocationSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/dubbo_proxy/message_impl.h b/source/extensions/filters/network/dubbo_proxy/message_impl.h index 1fc20c5f7a11..c535b522e90e 100644 --- a/source/extensions/filters/network/dubbo_proxy/message_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/message_impl.h @@ -13,11 +13,11 @@ class ContextBase : public Context { ~ContextBase() override = default; // Override from Context - size_t body_size() const override { return body_size_; } - size_t header_size() const override { return header_size_; } + size_t bodySize() const override { return body_size_; } + size_t headerSize() const override { return header_size_; } - void set_body_size(size_t size) { body_size_ = size; } - void set_header_size(size_t size) { header_size_ = size; } + void setBodySize(size_t size) { body_size_ = size; } + void setHeaderSize(size_t size) { header_size_ = size; } protected: size_t body_size_{0}; @@ -29,8 +29,8 @@ class ContextImpl : public ContextBase { ContextImpl() = default; ~ContextImpl() override = default; - bool is_heartbeat() const { return is_heartbeat_; } - void set_heartbeat(bool is_heartbeat) { is_heartbeat_ = is_heartbeat; } + bool isHeartbeat() const { return is_heartbeat_; } + void setHeartbeat(bool is_heartbeat) { is_heartbeat_ = is_heartbeat; } private: bool is_heartbeat_{false}; @@ -41,16 +41,16 @@ class RpcInvocationBase : public RpcInvocation { ~RpcInvocationBase() override = default; void setServiceName(const std::string& name) { service_name_ = name; } - const std::string& service_name() const override { return service_name_; } + const std::string& serviceName() const override { return service_name_; } void setMethodName(const std::string& name) { method_name_ = name; } - const std::string& method_name() const override { return method_name_; } + const std::string& methodName() const override { return method_name_; } void setServiceVersion(const std::string& version) { service_version_ = version; } - const absl::optional& service_version() const override { return service_version_; } + const absl::optional& serviceVersion() const override { return service_version_; } void setServiceGroup(const std::string& group) { group_ = group; } - const absl::optional& service_group() const override { return group_; } + const absl::optional& serviceGroup() const override { return group_; } protected: std::string service_name_; diff --git a/source/extensions/filters/network/dubbo_proxy/metadata.h b/source/extensions/filters/network/dubbo_proxy/metadata.h index 41a7f3976f4d..5f0037ca6ae3 100644 --- a/source/extensions/filters/network/dubbo_proxy/metadata.h +++ b/source/extensions/filters/network/dubbo_proxy/metadata.h @@ -23,31 +23,31 @@ class MessageMetadata { invocation_info_ = invocation_info; } bool hasInvocationInfo() const { return invocation_info_ != nullptr; } - const RpcInvocation& invocation_info() const { return *invocation_info_; } + const RpcInvocation& invocationInfo() const { return *invocation_info_; } void setProtocolType(ProtocolType type) { proto_type_ = type; } - ProtocolType protocol_type() const { return proto_type_; } + ProtocolType protocolType() const { return proto_type_; } void setProtocolVersion(uint8_t version) { protocol_version_ = version; } - uint8_t protocol_version() const { return protocol_version_; } + uint8_t protocolVersion() const { return protocol_version_; } void setMessageType(MessageType type) { message_type_ = type; } - MessageType message_type() const { return message_type_; } + MessageType messageType() const { return message_type_; } void setRequestId(int64_t id) { request_id_ = id; } - int64_t request_id() const { return request_id_; } + int64_t requestId() const { return request_id_; } void setTimeout(uint32_t timeout) { timeout_ = timeout; } absl::optional timeout() const { return timeout_; } void setTwoWayFlag(bool two_way) { is_two_way_ = two_way; } - bool is_two_way() const { return is_two_way_; } + bool isTwoWay() const { return is_two_way_; } template void setSerializationType(T type) { ASSERT((std::is_same::type>::value)); serialization_type_ = static_cast(type); } - template T serialization_type() const { + template T serializationType() const { ASSERT((std::is_same::type>::value)); return static_cast(serialization_type_); } @@ -56,7 +56,7 @@ class MessageMetadata { ASSERT((std::is_same::type>::value)); response_status_ = static_cast(status); } - template T response_status() const { + template T responseStatus() const { ASSERT((std::is_same::type>::value)); return static_cast(response_status_.value()); } diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc index 62f483e6933b..1ff5c79e3fbf 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.cc @@ -82,7 +82,7 @@ bool ParameterRouteEntryImpl::matchParameter(absl::string_view request_data, RouteConstSharedPtr ParameterRouteEntryImpl::matches(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto invocation = dynamic_cast(&metadata.invocation_info()); + const auto invocation = dynamic_cast(&metadata.invocationInfo()); ASSERT(invocation); if (!invocation->hasParameters()) { return nullptr; @@ -141,7 +141,7 @@ MethodRouteEntryImpl::~MethodRouteEntryImpl() = default; RouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto invocation = dynamic_cast(&metadata.invocation_info()); + const auto invocation = dynamic_cast(&metadata.invocationInfo()); ASSERT(invocation); if (invocation->hasHeaders() && !RouteEntryImplBase::headersMatch(invocation->headers())) { @@ -149,14 +149,14 @@ RouteConstSharedPtr MethodRouteEntryImpl::matches(const MessageMetadata& metadat return nullptr; } - if (invocation->method_name().empty()) { + if (invocation->methodName().empty()) { ENVOY_LOG(error, "dubbo route matcher: there is no method name in the metadata"); return nullptr; } - if (!method_name_.match(invocation->method_name())) { + if (!method_name_.match(invocation->methodName())) { ENVOY_LOG(debug, "dubbo route matcher: method matching failed, input method '{}'", - invocation->method_name()); + invocation->methodName()); return nullptr; } @@ -182,13 +182,13 @@ SingleRouteMatcherImpl::SingleRouteMatcherImpl(const RouteConfig& config, RouteConstSharedPtr SingleRouteMatcherImpl::route(const MessageMetadata& metadata, uint64_t random_value) const { ASSERT(metadata.hasInvocationInfo()); - const auto& invocation = metadata.invocation_info(); + const auto& invocation = metadata.invocationInfo(); - if (service_name_ == invocation.service_name() && + if (service_name_ == invocation.serviceName() && (group_.value().empty() || - (invocation.service_group().has_value() && invocation.service_group().value() == group_)) && - (version_.value().empty() || (invocation.service_version().has_value() && - invocation.service_version().value() == version_))) { + (invocation.serviceGroup().has_value() && invocation.serviceGroup().value() == group_)) && + (version_.value().empty() || (invocation.serviceVersion().has_value() && + invocation.serviceVersion().value() == version_))) { for (const auto& route : routes_) { RouteConstSharedPtr route_entry = route->matches(metadata, random_value); if (nullptr != route_entry) { diff --git a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc index 5bd9aab94677..ffe3ef9ce1c1 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/dubbo_proxy/router/router_impl.cc @@ -24,15 +24,15 @@ void Router::setDecoderFilterCallbacks(DubboFilters::DecoderFilterCallbacks& cal FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) { ASSERT(metadata->hasInvocationInfo()); - const auto& invocation = metadata->invocation_info(); + const auto& invocation = metadata->invocationInfo(); route_ = callbacks_->route(); if (!route_) { ENVOY_STREAM_LOG(debug, "dubbo router: no cluster match for interface '{}'", *callbacks_, - invocation.service_name()); + invocation.serviceName()); callbacks_->sendLocalReply(AppException(ResponseStatus::ServiceNotFound, fmt::format("dubbo router: no route for interface '{}'", - invocation.service_name())), + invocation.serviceName())), false); return FilterStatus::StopIteration; } @@ -52,7 +52,7 @@ FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, Context cluster_ = cluster->info(); ENVOY_STREAM_LOG(debug, "dubbo router: cluster '{}' match for interface '{}'", *callbacks_, - route_entry_->clusterName(), invocation.service_name()); + route_entry_->clusterName(), invocation.serviceName()); if (cluster_->maintenanceMode()) { callbacks_->sendLocalReply( @@ -75,7 +75,7 @@ FilterStatus Router::onMessageDecoded(MessageMetadataSharedPtr metadata, Context } ENVOY_STREAM_LOG(debug, "dubbo router: decoding request", *callbacks_); - upstream_request_buffer_.move(ctx->message_origin_data(), ctx->message_size()); + upstream_request_buffer_.move(ctx->messageOriginData(), ctx->messageSize()); upstream_request_ = std::make_unique( *this, *conn_pool, metadata, callbacks_->serializationType(), callbacks_->protocolType()); @@ -262,7 +262,7 @@ void Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionCo } void Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { - if (metadata_->message_type() == MessageType::Oneway) { + if (metadata_->messageType() == MessageType::Oneway) { // For oneway requests, we should not attempt a response. Reset the downstream to signal // an error. ENVOY_LOG(debug, "dubbo upstream request: the request is oneway, reset downstream stream"); diff --git a/source/extensions/filters/network/mongo_proxy/proxy.cc b/source/extensions/filters/network/mongo_proxy/proxy.cc index c764c618df1b..fa70d4ae9801 100644 --- a/source/extensions/filters/network/mongo_proxy/proxy.cc +++ b/source/extensions/filters/network/mongo_proxy/proxy.cc @@ -166,7 +166,7 @@ void ProxyFilter::decodeQuery(QueryMessagePtr&& message) { } // Global stats. - if (active_query->query_info_.max_time() < 1) { + if (active_query->query_info_.maxTime() < 1) { stats_.op_query_no_max_time_.inc(); } if (query_type == QueryMessageInfo::QueryType::ScatterGet) { diff --git a/source/extensions/filters/network/mongo_proxy/utility.h b/source/extensions/filters/network/mongo_proxy/utility.h index e7fda7f26b1c..3b8a6773601c 100644 --- a/source/extensions/filters/network/mongo_proxy/utility.h +++ b/source/extensions/filters/network/mongo_proxy/utility.h @@ -40,7 +40,7 @@ class QueryMessageInfo { /** * @return the value of maxTimeMS or 0 if not given. */ - int32_t max_time() { return max_time_; } + int32_t maxTime() { return max_time_; } /** * @return the type of a query message. diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 525c9fb4ae2a..f99be53d812b 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -65,7 +65,7 @@ class MessageMetadata { /** * @return SpanList& a reference to a mutable list of Spans */ - SpanList& mutable_spans() { return spans_; } + SpanList& mutableSpans() { return spans_; } bool hasAppException() const { return app_ex_type_.has_value(); } void setAppException(AppExceptionType app_ex_type, const std::string& message) { diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 068646839ed2..6d748c2817e5 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -1054,7 +1054,7 @@ void TwitterProtocolImpl::updateMetadataWithResponseHeader(const ThriftObject& h } SpanList& spans = resp_header.spans(); - std::copy(spans.begin(), spans.end(), std::back_inserter(metadata.mutable_spans())); + std::copy(spans.begin(), spans.end(), std::back_inserter(metadata.mutableSpans())); } void TwitterProtocolImpl::writeResponseHeader(Buffer::Instance& buffer, diff --git a/source/extensions/tracers/zipkin/span_context.h b/source/extensions/tracers/zipkin/span_context.h index 6dd08c3b291b..c06381272cbe 100644 --- a/source/extensions/tracers/zipkin/span_context.h +++ b/source/extensions/tracers/zipkin/span_context.h @@ -52,17 +52,17 @@ class SpanContext { /** * @return the span's parent id as an integer. */ - uint64_t parent_id() const { return parent_id_; } + uint64_t parentId() const { return parent_id_; } /** * @return the high 64 bits of the trace id as an integer. */ - uint64_t trace_id_high() const { return trace_id_high_; } + uint64_t traceIdHigh() const { return trace_id_high_; } /** * @return the low 64 bits of the trace id as an integer. */ - uint64_t trace_id() const { return trace_id_; } + uint64_t traceId() const { return trace_id_; } /** * @return whether using 128 bit trace id. diff --git a/source/extensions/tracers/zipkin/tracer.cc b/source/extensions/tracers/zipkin/tracer.cc index 866f40813382..f334246d4c51 100644 --- a/source/extensions/tracers/zipkin/tracer.cc +++ b/source/extensions/tracers/zipkin/tracer.cc @@ -86,8 +86,8 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span // Initialize the shared context for the new span span_ptr->setId(previous_context.id()); - if (previous_context.parent_id()) { - span_ptr->setParentId(previous_context.parent_id()); + if (previous_context.parentId()) { + span_ptr->setParentId(previous_context.parentId()); } // Set the SR annotation value @@ -105,9 +105,9 @@ SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::string& span span_ptr->addAnnotation(std::move(annotation)); // Keep the same trace id - span_ptr->setTraceId(previous_context.trace_id()); + span_ptr->setTraceId(previous_context.traceId()); if (previous_context.is128BitTraceId()) { - span_ptr->setTraceIdHigh(previous_context.trace_id_high()); + span_ptr->setTraceIdHigh(previous_context.traceIdHigh()); } // Keep the same sampled flag diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index a1e32e34c74b..4cf81cba4f9e 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -78,7 +78,7 @@ class AdminImpl : public Admin, Http::ResponseHeaderMap& response_headers, Buffer::Instance& response, AdminStream& admin_stream); const Network::Socket& socket() override { return *socket_; } - Network::Socket& mutable_socket() { return *socket_; } + Network::Socket& mutableSocket() { return *socket_; } // Server::Admin // TODO(jsedgwick) These can be managed with a generic version of ConfigTracker. diff --git a/source/server/server.cc b/source/server/server.cc index cf3957118eed..b71fd650f295 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -330,9 +330,9 @@ void InstanceImpl::initialize(const Options& options, ServerStats{ALL_SERVER_STATS(POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_stats_prefix), POOL_HISTOGRAM_PREFIX(stats_store_, server_stats_prefix))}); - validation_context_.static_warning_validation_visitor().setUnknownCounter( + validation_context_.staticWarningValidationVisitor().setUnknownCounter( server_stats_->static_unknown_fields_); - validation_context_.dynamic_warning_validation_visitor().setUnknownCounter( + validation_context_.dynamicWarningValidationVisitor().setUnknownCounter( server_stats_->dynamic_unknown_fields_); initialization_timer_ = std::make_unique( diff --git a/test/common/common/hash_fuzz_test.cc b/test/common/common/hash_fuzz_test.cc index a4b3f6c032a3..c9d84205831e 100644 --- a/test/common/common/hash_fuzz_test.cc +++ b/test/common/common/hash_fuzz_test.cc @@ -12,7 +12,7 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { const std::string input(reinterpret_cast(buf), len); { HashUtil::xxHash64(input); } { HashUtil::djb2CaseInsensitiveHash(input); } - { MurmurHash::murmurHash2_64(input); } + { MurmurHash::murmurHash2(input); } if (len > 0) { // Split the input string into two parts to make a key-value pair. const size_t split_point = *reinterpret_cast(buf) % len; diff --git a/test/common/common/hash_test.cc b/test/common/common/hash_test.cc index 4112b67d59c5..facb7be4c1f2 100644 --- a/test/common/common/hash_test.cc +++ b/test/common/common/hash_test.cc @@ -19,22 +19,21 @@ TEST(Hash, djb2CaseInsensitiveHash) { EXPECT_EQ(5381U, HashUtil::djb2CaseInsensitiveHash("")); } -TEST(Hash, murmurHash2_64) { - EXPECT_EQ(9631199822919835226U, MurmurHash::murmurHash2_64("foo")); - EXPECT_EQ(11474628671133349555U, MurmurHash::murmurHash2_64("bar")); - EXPECT_EQ(16306510975912980159U, MurmurHash::murmurHash2_64("foo\nbar")); - EXPECT_EQ(12847078931730529320U, MurmurHash::murmurHash2_64("lyft")); - EXPECT_EQ(6142509188972423790U, MurmurHash::murmurHash2_64("")); +TEST(Hash, murmurHash2) { + EXPECT_EQ(9631199822919835226U, MurmurHash::murmurHash2("foo")); + EXPECT_EQ(11474628671133349555U, MurmurHash::murmurHash2("bar")); + EXPECT_EQ(16306510975912980159U, MurmurHash::murmurHash2("foo\nbar")); + EXPECT_EQ(12847078931730529320U, MurmurHash::murmurHash2("lyft")); + EXPECT_EQ(6142509188972423790U, MurmurHash::murmurHash2("")); } #if __GLIBCXX__ >= 20130411 && __GLIBCXX__ <= 20180726 TEST(Hash, stdhash) { - EXPECT_EQ(std::hash()(std::string("foo")), MurmurHash::murmurHash2_64("foo")); - EXPECT_EQ(std::hash()(std::string("bar")), MurmurHash::murmurHash2_64("bar")); - EXPECT_EQ(std::hash()(std::string("foo\nbar")), - MurmurHash::murmurHash2_64("foo\nbar")); - EXPECT_EQ(std::hash()(std::string("lyft")), MurmurHash::murmurHash2_64("lyft")); - EXPECT_EQ(std::hash()(std::string("")), MurmurHash::murmurHash2_64("")); + EXPECT_EQ(std::hash()(std::string("foo")), MurmurHash::murmurHash2("foo")); + EXPECT_EQ(std::hash()(std::string("bar")), MurmurHash::murmurHash2("bar")); + EXPECT_EQ(std::hash()(std::string("foo\nbar")), MurmurHash::murmurHash2("foo\nbar")); + EXPECT_EQ(std::hash()(std::string("lyft")), MurmurHash::murmurHash2("lyft")); + EXPECT_EQ(std::hash()(std::string("")), MurmurHash::murmurHash2("")); } #endif diff --git a/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc b/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc index 3856f893bf2c..d58bea0e47e6 100644 --- a/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/app_exception_test.cc @@ -49,9 +49,9 @@ TEST_F(AppExceptionTest, Encode) { EXPECT_TRUE(result.second); const ContextImpl* context = static_cast(result.first.get()); - EXPECT_EQ(expect_body_size, context->body_size()); - EXPECT_EQ(metadata->message_type(), MessageType::Response); - buffer.drain(context->header_size()); + EXPECT_EQ(expect_body_size, context->bodySize()); + EXPECT_EQ(metadata->messageType(), MessageType::Response); + buffer.drain(context->headerSize()); // Verify the response type and content. size_t hessian_int_size; diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index f97573ff5da9..54951ea41c31 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -366,13 +366,13 @@ TEST_F(ConnectionManagerTest, OnDataHandlesHeartbeatEvent) { auto result = protocol->decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); const DubboProxy::ContextImpl& ctx = *static_cast(result.first.get()); - EXPECT_TRUE(ctx.is_heartbeat()); + EXPECT_TRUE(ctx.isHeartbeat()); EXPECT_TRUE(metadata->hasResponseStatus()); - EXPECT_FALSE(metadata->is_two_way()); - EXPECT_EQ(ProtocolType::Dubbo, metadata->protocol_type()); - EXPECT_EQ(metadata->response_status(), ResponseStatus::Ok); - EXPECT_EQ(metadata->message_type(), MessageType::HeartbeatResponse); - buffer.drain(ctx.header_size()); + EXPECT_FALSE(metadata->isTwoWay()); + EXPECT_EQ(ProtocolType::Dubbo, metadata->protocolType()); + EXPECT_EQ(metadata->responseStatus(), ResponseStatus::Ok); + EXPECT_EQ(metadata->messageType(), MessageType::HeartbeatResponse); + buffer.drain(ctx.headerSize()); })); EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); @@ -1187,7 +1187,7 @@ serialization_type: Hessian2 .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - auto invo = static_cast(&metadata->invocation_info()); + auto invo = static_cast(&metadata->invocationInfo()); auto data = const_cast(invo); data->setServiceName("org.apache.dubbo.demo.DemoService"); data->setMethodName("test"); @@ -1248,7 +1248,7 @@ TEST_F(ConnectionManagerTest, MessageDecodedReturnStopIteration) { size_t buf_size = buffer_.length(); EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(ctx->message_size(), buf_size); + EXPECT_EQ(ctx->messageSize(), buf_size); return FilterStatus::StopIteration; })); @@ -1336,8 +1336,8 @@ TEST_F(ConnectionManagerTest, HandleResponseWithEncoderFilter) { EXPECT_CALL(*encoder_filter, onMessageEncoded(_, _)) .WillOnce( Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); - EXPECT_EQ(ctx->message_size(), expect_response_length); + EXPECT_EQ(metadata->requestId(), request_id); + EXPECT_EQ(ctx->messageSize(), expect_response_length); return FilterStatus::Continue; })); @@ -1364,7 +1364,7 @@ TEST_F(ConnectionManagerTest, HandleResponseWithCodecFilter) { .WillOnce(Invoke([&](DubboFilters::DecoderFilterCallbacks& cb) -> void { callbacks = &cb; })); EXPECT_CALL(*mock_codec_filter, onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); @@ -1386,8 +1386,8 @@ TEST_F(ConnectionManagerTest, HandleResponseWithCodecFilter) { EXPECT_CALL(*mock_codec_filter, onMessageEncoded(_, _)) .WillOnce( Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); - EXPECT_EQ(ctx->message_size(), expect_response_length); + EXPECT_EQ(metadata->requestId(), request_id); + EXPECT_EQ(ctx->messageSize(), expect_response_length); return FilterStatus::Continue; })); @@ -1410,7 +1410,7 @@ TEST_F(ConnectionManagerTest, AddDataWithStopAndContinue) { EXPECT_CALL(*config_->decoder_filters_[0], onMessageDecoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); EXPECT_CALL(*config_->decoder_filters_[1], onMessageDecoded(_, _)) @@ -1425,7 +1425,7 @@ TEST_F(ConnectionManagerTest, AddDataWithStopAndContinue) { // For encode direction EXPECT_CALL(*config_->encoder_filters_[0], onMessageEncoded(_, _)) .WillOnce(Invoke([&](MessageMetadataSharedPtr metadata, ContextSharedPtr) -> FilterStatus { - EXPECT_EQ(metadata->request_id(), request_id); + EXPECT_EQ(metadata->requestId(), request_id); return FilterStatus::Continue; })); EXPECT_CALL(*config_->encoder_filters_[1], onMessageEncoded(_, _)) diff --git a/test/extensions/filters/network/dubbo_proxy/decoder_test.cc b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc index cf285ba5bb77..bcc3997772cd 100644 --- a/test/extensions/filters/network/dubbo_proxy/decoder_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/decoder_test.cc @@ -38,8 +38,8 @@ class DecoderStateMachineTestBase { Invoke([=](Buffer::Instance&, MessageMetadataSharedPtr metadata) -> std::pair { auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(body_size); + context->setHeaderSize(16); + context->setBodySize(body_size); metadata->setMessageType(type); return std::pair(context, true); @@ -99,7 +99,7 @@ TEST_F(DubboDecoderStateMachineTest, RequestMessageCallbacks) { Buffer::OwnedImpl buffer; EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); - EXPECT_EQ(active_stream_->metadata_->message_type(), MessageType::Request); + EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Request); } TEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) { @@ -114,7 +114,7 @@ TEST_F(DubboDecoderStateMachineTest, ResponseMessageCallbacks) { Buffer::OwnedImpl buffer; EXPECT_EQ(dsm.run(buffer), ProtocolState::Done); - EXPECT_EQ(active_stream_->metadata_->message_type(), MessageType::Response); + EXPECT_EQ(active_stream_->metadata_->messageType(), MessageType::Response); } TEST_F(DubboDecoderStateMachineTest, SerializeRpcInvocationException) { @@ -194,8 +194,8 @@ TEST_F(DubboDecoderTest, NeedMoreDataForProtocolBody) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)) @@ -228,8 +228,8 @@ TEST_F(DubboDecoderTest, DecodeResponseMessage) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true)); @@ -251,8 +251,8 @@ TEST_F(DubboDecoderTest, DecodeResponseMessage) { MessageMetadataSharedPtr metadate) -> std::pair { metadate->setMessageType(MessageType::Response); auto context = std::make_shared(); - context->set_header_size(16); - context->set_body_size(10); + context->setHeaderSize(16); + context->setBodySize(10); return std::pair(context, true); })); EXPECT_CALL(protocol_, decodeData(_, _, _)).WillOnce(Return(true)); diff --git a/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc index 17a500b4719b..94ff9f3f7360 100644 --- a/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/dubbo_hessian2_serializer_impl_test.cc @@ -32,14 +32,14 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { 0x04, 't', 'e', 's', 't', // method name })); std::shared_ptr context = std::make_shared(); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); auto result = serializer.deserializeRpcInvocation(buffer, context); EXPECT_TRUE(result.second); auto invo = result.first; - EXPECT_STREQ("test", invo->method_name().c_str()); - EXPECT_STREQ("test", invo->service_name().c_str()); - EXPECT_STREQ("0.0.0", invo->service_version().value().c_str()); + EXPECT_STREQ("test", invo->methodName().c_str()); + EXPECT_STREQ("test", invo->serviceName().c_str()); + EXPECT_STREQ("0.0.0", invo->serviceVersion().value().c_str()); } // incorrect body size @@ -54,7 +54,7 @@ TEST(HessianProtocolTest, deserializeRpcInvocation) { std::string exception_string = fmt::format("RpcInvocation size({}) large than body size({})", buffer.length(), buffer.length() - 1); std::shared_ptr context = std::make_shared(); - context->set_body_size(buffer.length() - 1); + context->setBodySize(buffer.length() - 1); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcInvocation(buffer, context), EnvoyException, exception_string); } @@ -70,7 +70,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x94', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_FALSE(result.first->hasException()); @@ -82,7 +82,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x93', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -94,7 +94,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x90', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -106,7 +106,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x91', // return type 0x04, 't', 'e', 's', 't', // return body })); - context->set_body_size(4); + context->setBodySize(4); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.second); EXPECT_TRUE(result.first->hasException()); @@ -119,7 +119,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x94', // return type 0x05, 't', 'e', 's', 't', // return body })); - context->set_body_size(0); + context->setBodySize(0); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, "RpcResult size(1) large than body size(0)"); } @@ -131,7 +131,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { '\x96', // incorrect return type 0x05, 't', 'e', 's', 't', // return body })); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, "not supported return type 6"); } @@ -146,7 +146,7 @@ TEST(HessianProtocolTest, deserializeRpcResult) { std::string exception_string = fmt::format("RpcResult is no value, but the rest of the body size({}) not equal 0", buffer.length() - 1); - context->set_body_size(buffer.length()); + context->setBodySize(buffer.length()); EXPECT_THROW_WITH_MESSAGE(serializer.deserializeRpcResult(buffer, context), EnvoyException, exception_string); } @@ -180,7 +180,7 @@ TEST(HessianProtocolTest, serializeRpcResult) { size_t body_size = mock_response.size() + sizeof(mock_response_type); std::shared_ptr context = std::make_shared(); - context->set_body_size(body_size); + context->setBodySize(body_size); auto result = serializer.deserializeRpcResult(buffer, context); EXPECT_TRUE(result.first->hasException()); } diff --git a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc index 9aadf58aeffd..0dafbc8fef70 100644 --- a/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/dubbo_protocol_impl_test.cc @@ -44,9 +44,9 @@ TEST(DubboProtocolImplTest, Normal) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); auto context = result.first; EXPECT_TRUE(result.second); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Request, metadata->message_type()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Request, metadata->messageType()); } // Normal dubbo response message @@ -59,9 +59,9 @@ TEST(DubboProtocolImplTest, Normal) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); auto context = result.first; EXPECT_TRUE(result.second); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Response, metadata->message_type()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Response, metadata->messageType()); } } @@ -136,20 +136,20 @@ TEST(DubboProtocolImplTest, encode) { auto result = dubbo_protocol.decodeHeader(buffer, output_metadata); EXPECT_TRUE(result.second); - EXPECT_EQ(metadata.message_type(), output_metadata->message_type()); - EXPECT_EQ(metadata.response_status(), output_metadata->response_status()); - EXPECT_EQ(metadata.serialization_type(), output_metadata->serialization_type()); - EXPECT_EQ(metadata.request_id(), output_metadata->request_id()); + EXPECT_EQ(metadata.messageType(), output_metadata->messageType()); + EXPECT_EQ(metadata.responseStatus(), output_metadata->responseStatus()); + EXPECT_EQ(metadata.serializationType(), output_metadata->serializationType()); + EXPECT_EQ(metadata.requestId(), output_metadata->requestId()); Buffer::OwnedImpl body_buffer; size_t serialized_body_size = dubbo_protocol.serializer()->serializeRpcResult( body_buffer, content, RpcResponseType::ResponseWithValue); auto context = result.first; - EXPECT_EQ(context->body_size(), serialized_body_size); + EXPECT_EQ(context->bodySize(), serialized_body_size); EXPECT_EQ(false, context->hasAttachments()); EXPECT_EQ(0, context->attachments().size()); - buffer.drain(context->header_size()); + buffer.drain(context->headerSize()); EXPECT_TRUE(dubbo_protocol.decodeData(buffer, context, output_metadata)); } @@ -216,10 +216,10 @@ TEST(DubboProtocolImplTest, decode) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); auto context = result.first; - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Request, metadata->message_type()); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(SerializationType::Hessian2, metadata->serialization_type()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Request, metadata->messageType()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType()); buffer.drain(buffer.length()); } @@ -231,10 +231,10 @@ TEST(DubboProtocolImplTest, decode) { auto result = dubbo_protocol.decodeHeader(buffer, metadata); EXPECT_TRUE(result.second); auto context = result.first; - EXPECT_EQ(1, context->body_size()); - EXPECT_EQ(MessageType::Oneway, metadata->message_type()); - EXPECT_EQ(1, metadata->request_id()); - EXPECT_EQ(SerializationType::Hessian2, metadata->serialization_type()); + EXPECT_EQ(1, context->bodySize()); + EXPECT_EQ(MessageType::Oneway, metadata->messageType()); + EXPECT_EQ(1, metadata->requestId()); + EXPECT_EQ(SerializationType::Hessian2, metadata->serializationType()); } } diff --git a/test/extensions/filters/network/dubbo_proxy/metadata_test.cc b/test/extensions/filters/network/dubbo_proxy/metadata_test.cc index ab94547762f7..4ac1e14e6301 100644 --- a/test/extensions/filters/network/dubbo_proxy/metadata_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/metadata_test.cc @@ -22,19 +22,19 @@ TEST(MessageMetadataTest, Fields) { EXPECT_TRUE(metadata.timeout().has_value()); invo->setMethodName("method"); - EXPECT_EQ("method", invo->method_name()); + EXPECT_EQ("method", invo->methodName()); - EXPECT_FALSE(invo->service_version().has_value()); - EXPECT_THROW(invo->service_version().value(), absl::bad_optional_access); + EXPECT_FALSE(invo->serviceVersion().has_value()); + EXPECT_THROW(invo->serviceVersion().value(), absl::bad_optional_access); invo->setServiceVersion("1.0.0"); - EXPECT_TRUE(invo->service_version().has_value()); - EXPECT_EQ("1.0.0", invo->service_version().value()); + EXPECT_TRUE(invo->serviceVersion().has_value()); + EXPECT_EQ("1.0.0", invo->serviceVersion().value()); - EXPECT_FALSE(invo->service_group().has_value()); - EXPECT_THROW(invo->service_group().value(), absl::bad_optional_access); + EXPECT_FALSE(invo->serviceGroup().has_value()); + EXPECT_THROW(invo->serviceGroup().value(), absl::bad_optional_access); invo->setServiceGroup("group"); - EXPECT_TRUE(invo->service_group().has_value()); - EXPECT_EQ("group", invo->service_group().value()); + EXPECT_TRUE(invo->serviceGroup().has_value()); + EXPECT_EQ("group", invo->serviceGroup().value()); } TEST(MessageMetadataTest, Headers) { diff --git a/test/extensions/filters/network/dubbo_proxy/router_test.cc b/test/extensions/filters/network/dubbo_proxy/router_test.cc index 7367a3341697..d812ac054605 100644 --- a/test/extensions/filters/network/dubbo_proxy/router_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/router_test.cc @@ -380,7 +380,7 @@ TEST_F(DubboRouterTest, UnexpectedRouterDestroy) { buffer.add("test"); // Body auto ctx = static_cast(message_context_.get()); - ctx->message_origin_data().move(buffer, buffer.length()); + ctx->messageOriginData().move(buffer, buffer.length()); startRequest(MessageType::Request); connectUpstream(); destroyRouter(); diff --git a/test/extensions/filters/network/mongo_proxy/utility_test.cc b/test/extensions/filters/network/mongo_proxy/utility_test.cc index cdffb8608dc9..ad28e35e9cc1 100644 --- a/test/extensions/filters/network/mongo_proxy/utility_test.cc +++ b/test/extensions/filters/network/mongo_proxy/utility_test.cc @@ -137,7 +137,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()); QueryMessageInfo info(q); - EXPECT_EQ(0, info.max_time()); + EXPECT_EQ(0, info.maxTime()); } { @@ -145,7 +145,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt32("$maxTimeMS", 1212)); QueryMessageInfo info(q); - EXPECT_EQ(1212, info.max_time()); + EXPECT_EQ(1212, info.maxTime()); } { @@ -153,7 +153,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt64("$maxTimeMS", 1212)); QueryMessageInfo info(q); - EXPECT_EQ(1212, info.max_time()); + EXPECT_EQ(1212, info.maxTime()); } { @@ -161,7 +161,7 @@ TEST(QueryMessageInfoTest, MaxTime) { q.fullCollectionName("db.foo"); q.query(Bson::DocumentImpl::create()->addInt64("maxTimeMS", 2400)); QueryMessageInfo info(q); - EXPECT_EQ(2400, info.max_time()); + EXPECT_EQ(2400, info.maxTime()); } } diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 4b686099dcfc..1d1694cdbfc3 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -134,7 +134,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce( Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return this->cm_.thread_local_cluster_.lb_.host_; @@ -230,7 +230,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce( Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); auto redis_context = @@ -306,7 +306,7 @@ TEST_F(RedisConnPoolImplTest, Basic) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -337,7 +337,7 @@ TEST_F(RedisConnPoolImplTest, BasicRespVariant) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -367,7 +367,7 @@ TEST_F(RedisConnPoolImplTest, ClientRequestFailed) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return cm_.thread_local_cluster_.lb_.host_; @@ -410,7 +410,7 @@ TEST_F(RedisConnPoolImplTest, Hashtagging) { auto expectHashKey = [](const std::string& s) { return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64(s)); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s)); return nullptr; }; }; @@ -441,7 +441,7 @@ TEST_F(RedisConnPoolImplTest, HashtaggingNotEnabled) { auto expectHashKey = [](const std::string& s) { return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64(s)); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2(s)); return nullptr; }; }; @@ -1186,7 +1186,7 @@ TEST_F(RedisConnPoolImplTest, MakeRequestAndRedirectFollowedByDelete) { MockPoolCallbacks callbacks; EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { - EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2_64("hash_key")); + EXPECT_EQ(context->computeHashKey().value(), MurmurHash::murmurHash2("hash_key")); EXPECT_EQ(context->metadataMatchCriteria(), nullptr); EXPECT_EQ(context->downstreamConnection(), nullptr); return this->cm_.thread_local_cluster_.lb_.host_; diff --git a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc index 4d57ab1b03d4..061fbe1ddb1b 100644 --- a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc @@ -99,8 +99,8 @@ class TwitterProtocolTest : public testing::Test { TestTwitterProtocolImpl proto; - metadata_->mutable_spans().emplace_back(trace_id, "", span_id, absl::optional(), - AnnotationList(), BinaryAnnotationList(), false); + metadata_->mutableSpans().emplace_back(trace_id, "", span_id, absl::optional(), + AnnotationList(), BinaryAnnotationList(), false); metadata_->headers().addCopy(Http::LowerCaseString("test-header"), "test-header-value"); proto.writeResponseHeaderForTest(buffer, *metadata_); @@ -724,7 +724,7 @@ TEST_F(TwitterProtocolTest, WriteResponseHeader) { headers.addCopy(Http::LowerCaseString("key1"), "value1"); headers.addCopy(Http::LowerCaseString("key2"), "value2"); - SpanList& spans = metadata_->mutable_spans(); + SpanList& spans = metadata_->mutableSpans(); spans.emplace_back(1, "s1", 100, absl::optional(10), AnnotationList({ Annotation(100000, "a1", {Endpoint(0xC0A80001, 0, "")}), @@ -924,8 +924,8 @@ TEST_F(TwitterProtocolTest, TestUpgradedWriteMessageBegin) { metadata_->setMethodName("message"); metadata_->setSequenceId(1); metadata_->setTraceId(1); - metadata_->mutable_spans().emplace_back(100, "", 100, absl::optional(), AnnotationList(), - BinaryAnnotationList(), false); + metadata_->mutableSpans().emplace_back(100, "", 100, absl::optional(), AnnotationList(), + BinaryAnnotationList(), false); { // Call diff --git a/test/extensions/tracers/zipkin/span_context_extractor_test.cc b/test/extensions/tracers/zipkin/span_context_extractor_test.cc index 0dfa4b8ade85..17977d4451c9 100644 --- a/test/extensions/tracers/zipkin/span_context_extractor_test.cc +++ b/test/extensions/tracers/zipkin/span_context_extractor_test.cc @@ -27,10 +27,10 @@ TEST(ZipkinSpanContextExtractorTest, Largest) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -42,10 +42,10 @@ TEST(ZipkinSpanContextExtractorTest, WithoutParentDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -73,10 +73,10 @@ TEST(ZipkinSpanContextExtractorTest, DebugOnly) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -87,10 +87,10 @@ TEST(ZipkinSpanContextExtractorTest, Sampled) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -101,10 +101,10 @@ TEST(ZipkinSpanContextExtractorTest, SampledFalse) { auto context = extractor.extractSpanContext(true); EXPECT_FALSE(context.second); EXPECT_EQ(0, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(0, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(0, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -116,10 +116,10 @@ TEST(ZipkinSpanContextExtractorTest, IdNotYetSampled128) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_TRUE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(9, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(9, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -130,10 +130,10 @@ TEST(ZipkinSpanContextExtractorTest, IdsUnsampled) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -145,10 +145,10 @@ TEST(ZipkinSpanContextExtractorTest, ParentUnsampled) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_FALSE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } @@ -160,10 +160,10 @@ TEST(ZipkinSpanContextExtractorTest, ParentDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(2, context.first.parent_id()); + EXPECT_EQ(2, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -174,10 +174,10 @@ TEST(ZipkinSpanContextExtractorTest, IdsWithDebug) { auto context = extractor.extractSpanContext(true); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_TRUE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, false})); } @@ -188,10 +188,10 @@ TEST(ZipkinSpanContextExtractorTest, WithoutSampled) { auto context = extractor.extractSpanContext(false); EXPECT_TRUE(context.second); EXPECT_EQ(3, context.first.id()); - EXPECT_EQ(0, context.first.parent_id()); + EXPECT_EQ(0, context.first.parentId()); EXPECT_FALSE(context.first.is128BitTraceId()); - EXPECT_EQ(1, context.first.trace_id()); - EXPECT_EQ(0, context.first.trace_id_high()); + EXPECT_EQ(1, context.first.traceId()); + EXPECT_EQ(0, context.first.traceIdHigh()); EXPECT_FALSE(context.first.sampled()); EXPECT_TRUE(extractor.extractSampled({Tracing::Reason::Sampling, true})); } diff --git a/test/extensions/tracers/zipkin/tracer_test.cc b/test/extensions/tracers/zipkin/tracer_test.cc index a085e7cc721b..549437dc0bb4 100644 --- a/test/extensions/tracers/zipkin/tracer_test.cc +++ b/test/extensions/tracers/zipkin/tracer_test.cc @@ -185,8 +185,8 @@ TEST_F(ZipkinTracerTest, SpanCreation) { ON_CALL(config, operationName()).WillByDefault(Return(Tracing::OperationName::Ingress)); TestRandomGenerator generator; const uint64_t generated_parent_id = generator.random(); - SpanContext modified_root_span_context(root_span_context.trace_id_high(), - root_span_context.trace_id(), root_span_context.id(), + SpanContext modified_root_span_context(root_span_context.traceIdHigh(), + root_span_context.traceId(), root_span_context.id(), generated_parent_id, root_span_context.sampled()); SpanPtr new_shared_context_span = tracer.startSpan(config, "new_shared_context_span", timestamp, modified_root_span_context); @@ -202,7 +202,7 @@ TEST_F(ZipkinTracerTest, SpanCreation) { // The parent should be the same as in the CS side EXPECT_TRUE(new_shared_context_span->isSetParentId()); - EXPECT_EQ(modified_root_span_context.parent_id(), new_shared_context_span->parentId()); + EXPECT_EQ(modified_root_span_context.parentId(), new_shared_context_span->parentId()); // span timestamp should not be set (it was set in the CS side) EXPECT_FALSE(new_shared_context_span->isSetTimestamp()); diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index ba604854777c..6d259a75f12b 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -28,12 +28,12 @@ LogRecordingSink::LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink) LogRecordingSink::~LogRecordingSink() = default; void LogRecordingSink::log(absl::string_view msg) { - previous_delegate()->log(msg); + previousDelegate()->log(msg); absl::MutexLock ml(&mtx_); messages_.push_back(std::string(msg)); } -void LogRecordingSink::flush() { previous_delegate()->flush(); } +void LogRecordingSink::flush() { previousDelegate()->flush(); } } // namespace Envoy diff --git a/test/test_common/logging.h b/test/test_common/logging.h index 7a080d903d58..d2c79a0a5901 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -94,7 +94,7 @@ using ExpectedLogMessages = std::vector; ASSERT_FALSE(expected_messages.empty()) << "Expected messages cannot be empty."; \ Envoy::LogLevelSetter save_levels(spdlog::level::trace); \ Envoy::Logger::DelegatingLogSinkSharedPtr sink_ptr = Envoy::Logger::Registry::getSink(); \ - sink_ptr->set_should_escape(escaped); \ + sink_ptr->setShouldEscape(escaped); \ Envoy::LogRecordingSink log_recorder(sink_ptr); \ stmt; \ if (log_recorder.messages().empty()) { \ From 10fed474642a90760b2a73f866087ef12d67e542 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 20 Jul 2020 13:09:43 -0700 Subject: [PATCH 692/909] http: add removeIf() header map function (#12160) To be used by out of tree extensions. Signed-off-by: Matt Klein --- include/envoy/http/header_map.h | 8 +++++ source/common/http/header_map_impl.cc | 37 ++++++++++-------------- source/common/http/header_map_impl.h | 4 +++ test/common/http/header_map_impl_test.cc | 25 ++++++++++++++-- test/test_common/utility.h | 5 ++++ 5 files changed, 56 insertions(+), 23 deletions(-) diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 90466744ce42..8692e3526735 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -547,6 +547,14 @@ class HeaderMap { */ virtual size_t remove(const LowerCaseString& key) PURE; + /** + * Remove all instances of headers where the header matches the predicate. + * @param predicate supplies the predicate to match headers against. + * @return the number of headers removed. + */ + using HeaderMatchPredicate = std::function; + virtual size_t removeIf(const HeaderMatchPredicate& predicate) PURE; + /** * Remove all instances of headers where the key begins with the supplied prefix. * @param prefix supplies the prefix to match header keys against. diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index 35572390820f..c97d51e810f1 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -483,28 +483,10 @@ void HeaderMapImpl::clear() { cached_byte_size_ = 0; } -size_t HeaderMapImpl::remove(const LowerCaseString& key) { +size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) { const size_t old_size = headers_.size(); - auto lookup = staticLookup(key.get()); - if (lookup.has_value()) { - removeInline(lookup.value().entry_); - } else { - for (auto i = headers_.begin(); i != headers_.end();) { - if (i->key() == key.get().c_str()) { - subtractSize(i->key().size() + i->value().size()); - i = headers_.erase(i); - } else { - ++i; - } - } - } - return old_size - headers_.size(); -} - -size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { - const size_t old_size = headers_.size(); - headers_.remove_if([&prefix, this](const HeaderEntryImpl& entry) { - bool to_remove = absl::StartsWith(entry.key().getStringView(), prefix.get()); + headers_.remove_if([&predicate, this](const HeaderEntryImpl& entry) { + const bool to_remove = predicate(entry); if (to_remove) { // If this header should be removed, make sure any references in the // static lookup table are cleared as well. @@ -525,6 +507,19 @@ size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { return old_size - headers_.size(); } +size_t HeaderMapImpl::remove(const LowerCaseString& key) { + // TODO(mattklein123): When the lazy map is implemented we can stop using removeIf() here. + return HeaderMapImpl::removeIf([&key](const HeaderEntry& entry) -> bool { + return key.get() == entry.key().getStringView(); + }); +} + +size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { + return HeaderMapImpl::removeIf([&prefix](const HeaderEntry& entry) -> bool { + return absl::StartsWith(entry.key().getStringView(), prefix.get()); + }); +} + void HeaderMapImpl::dumpState(std::ostream& os, int indent_level) const { iterate([&os, spaces = spacesForLevel(indent_level)](const HeaderEntry& header) -> HeaderMap::Iterate { diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index f9dba6b56ab5..d4cb88bdcacb 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -90,6 +90,7 @@ class HeaderMapImpl : NonCopyable { void iterateReverse(HeaderMap::ConstIterateCb cb) const; void clear(); size_t remove(const LowerCaseString& key); + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate); size_t removePrefix(const LowerCaseString& key); size_t size() const { return headers_.size(); } bool empty() const { return headers_.empty(); } @@ -304,6 +305,9 @@ template class TypedHeaderMapImpl : public HeaderMapImpl, publ } void clear() override { HeaderMapImpl::clear(); } size_t remove(const LowerCaseString& key) override { return HeaderMapImpl::remove(key); } + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override { + return HeaderMapImpl::removeIf(predicate); + } size_t removePrefix(const LowerCaseString& key) override { return HeaderMapImpl::removePrefix(key); } diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 6f5552156b40..2862ae564c20 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -520,7 +520,28 @@ TEST(HeaderMapImplTest, Remove) { EXPECT_EQ(0UL, headers.remove(Headers::get().ContentLength)); } -TEST(HeaderMapImplTest, RemoveRegex) { +TEST(HeaderMapImplTest, RemoveIf) { + LowerCaseString key1 = LowerCaseString("X-postfix-foo"); + LowerCaseString key2 = LowerCaseString("X-postfix-"); + LowerCaseString key3 = LowerCaseString("x-postfix-eep"); + + TestRequestHeaderMapImpl headers; + headers.addReference(key1, "value"); + headers.addReference(key2, "value"); + headers.addReference(key3, "value"); + + EXPECT_EQ(0UL, headers.removeIf([](const HeaderEntry&) -> bool { return false; })); + + EXPECT_EQ(2UL, headers.removeIf([](const HeaderEntry& entry) -> bool { + return absl::EndsWith(entry.key().getStringView(), "foo") || + absl::EndsWith(entry.key().getStringView(), "eep"); + })); + + TestRequestHeaderMapImpl expected{{"X-postfix-", "value"}}; + EXPECT_EQ(expected, headers); +} + +TEST(HeaderMapImplTest, RemovePrefix) { // These will match. LowerCaseString key1 = LowerCaseString("X-prefix-foo"); LowerCaseString key3 = LowerCaseString("X-Prefix-"); @@ -552,7 +573,7 @@ TEST(HeaderMapImplTest, RemoveRegex) { EXPECT_EQ(nullptr, headers.get(key2)); EXPECT_EQ(nullptr, headers.get(key4)); - // Add inline and remove by regex + // Add inline and remove by prefix headers.setContentLength(5); EXPECT_EQ("5", headers.getContentLengthValue()); EXPECT_EQ(1UL, headers.size()); diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 804f34b82219..4b2788e275f2 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -920,6 +920,11 @@ template class TestHeaderMapImplBase : public Inte header_map_->verifyByteSizeInternalForTest(); return headers_removed; } + size_t removeIf(const HeaderMap::HeaderMatchPredicate& predicate) override { + size_t headers_removed = header_map_->removeIf(predicate); + header_map_->verifyByteSizeInternalForTest(); + return headers_removed; + } size_t removePrefix(const LowerCaseString& key) override { size_t headers_removed = header_map_->removePrefix(key); header_map_->verifyByteSizeInternalForTest(); From 3e92500dfae541b95f22823b03dfdd711e80538a Mon Sep 17 00:00:00 2001 From: xdzhai Date: Mon, 20 Jul 2020 14:19:03 -0700 Subject: [PATCH 693/909] hds: change HdsCluster::cluster_ to a value instead of reference (#12137) Signed-off-by: Xiaodong Zhai --- source/common/upstream/health_discovery_service.cc | 14 +++++++------- source/common/upstream/health_discovery_service.h | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index a4b084d708ca..a0a585138d4f 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -175,10 +175,10 @@ void HdsDelegate::processMessage( ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); // Create HdsCluster - hds_clusters_.emplace_back(new HdsCluster(admin_, runtime_, cluster_config, bind_config, - store_stats_, ssl_context_manager_, false, - info_factory_, cm_, local_info_, dispatcher_, random_, - singleton_manager_, tls_, validation_visitor_, api_)); + hds_clusters_.emplace_back( + new HdsCluster(admin_, runtime_, std::move(cluster_config), bind_config, store_stats_, + ssl_context_manager_, false, info_factory_, cm_, local_info_, dispatcher_, + random_, singleton_manager_, tls_, validation_visitor_, api_)); hds_clusters_.back()->initialize([] {}); hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, random_, dispatcher_, @@ -233,7 +233,7 @@ void HdsDelegate::onRemoteClose(Grpc::Status::GrpcStatus status, const std::stri } HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, - const envoy::config::cluster::v3::Cluster& cluster, + envoy::config::cluster::v3::Cluster cluster, const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, @@ -241,7 +241,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, Random::RandomGenerator& random, Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) - : runtime_(runtime), cluster_(cluster), bind_config_(bind_config), stats_(stats), + : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats), ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api), initial_hosts_(new HostVector()), validation_visitor_(validation_visitor) { ENVOY_LOG(debug, "Creating an HdsCluster"); @@ -251,7 +251,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, cm, local_info, dispatcher, random, singleton_manager, tls, validation_visitor, api}); - for (const auto& host : cluster.load_assignment().endpoints(0).lb_endpoints()) { + for (const auto& host : cluster_.load_assignment().endpoints(0).lb_endpoints()) { initial_hosts_->emplace_back( new HostImpl(info_, "", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1, envoy::config::core::v3::Locality().default_instance(), diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 003ed4bb2dcd..6f21bc0701d3 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -42,7 +42,7 @@ class HdsCluster : public Cluster, Logger::Loggable { public: static ClusterSharedPtr create(); HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, - const envoy::config::cluster::v3::Cluster& cluster, + envoy::config::cluster::v3::Cluster cluster, const envoy::config::core::v3::BindConfig& bind_config, Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, bool added_via_api, ClusterInfoFactory& info_factory, ClusterManager& cm, @@ -78,7 +78,7 @@ class HdsCluster : public Cluster, Logger::Loggable { std::function initialization_complete_callback_; Runtime::Loader& runtime_; - const envoy::config::cluster::v3::Cluster& cluster_; + const envoy::config::cluster::v3::Cluster cluster_; const envoy::config::core::v3::BindConfig& bind_config_; Stats::Store& stats_; Ssl::ContextManager& ssl_context_manager_; From f76e2bb9902b9caac17d62d192aeda39b6ba364b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 20 Jul 2020 18:26:28 -0400 Subject: [PATCH 694/909] cleanup: removing tracer well_known_name file (#12187) Signed-off-by: Alyssa Wilk --- source/extensions/tracers/BUILD | 17 ---------- source/extensions/tracers/datadog/BUILD | 2 -- source/extensions/tracers/datadog/config.cc | 3 +- .../tracers/datadog/datadog_tracer_impl.cc | 4 +-- source/extensions/tracers/dynamic_ot/BUILD | 1 - .../extensions/tracers/dynamic_ot/config.cc | 3 +- source/extensions/tracers/lightstep/BUILD | 2 -- source/extensions/tracers/lightstep/config.cc | 3 +- .../lightstep/lightstep_tracer_impl.cc | 4 +-- source/extensions/tracers/opencensus/BUILD | 1 - .../extensions/tracers/opencensus/config.cc | 3 +- source/extensions/tracers/well_known_names.h | 34 ------------------- source/extensions/tracers/xray/BUILD | 1 - source/extensions/tracers/xray/config.cc | 3 +- source/extensions/tracers/zipkin/BUILD | 2 -- source/extensions/tracers/zipkin/config.cc | 3 +- .../tracers/zipkin/zipkin_tracer_impl.cc | 3 +- test/per_file_coverage.sh | 1 - 18 files changed, 9 insertions(+), 81 deletions(-) delete mode 100644 source/extensions/tracers/BUILD delete mode 100644 source/extensions/tracers/well_known_names.h diff --git a/source/extensions/tracers/BUILD b/source/extensions/tracers/BUILD deleted file mode 100644 index 06456dbbcb5e..000000000000 --- a/source/extensions/tracers/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], - deps = [ - "//source/common/singleton:const_singleton", - ], -) diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index d09597b5eaf9..325f4345a717 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -26,7 +26,6 @@ envoy_cc_library( "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", "//source/common/version:version_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -39,7 +38,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":datadog_tracer_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/datadog/config.cc b/source/extensions/tracers/datadog/config.cc index e03a6d82573c..1113708d5fde 100644 --- a/source/extensions/tracers/datadog/config.cc +++ b/source/extensions/tracers/datadog/config.cc @@ -8,7 +8,6 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/datadog/datadog_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "datadog/opentracing.h" @@ -17,7 +16,7 @@ namespace Extensions { namespace Tracers { namespace Datadog { -DatadogTracerFactory::DatadogTracerFactory() : FactoryBase(TracerNames::get().Datadog) {} +DatadogTracerFactory::DatadogTracerFactory() : FactoryBase("envoy.tracers.datadog") {} Tracing::HttpTracerSharedPtr DatadogTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::DatadogConfig& proto_config, diff --git a/source/extensions/tracers/datadog/datadog_tracer_impl.cc b/source/extensions/tracers/datadog/datadog_tracer_impl.cc index a2be59579ca8..3635e490d659 100644 --- a/source/extensions/tracers/datadog/datadog_tracer_impl.cc +++ b/source/extensions/tracers/datadog/datadog_tracer_impl.cc @@ -11,8 +11,6 @@ #include "common/tracing/http_tracer_impl.h" #include "common/version/version.h" -#include "extensions/tracers/well_known_names.h" - namespace Envoy { namespace Extensions { namespace Tracers { @@ -30,7 +28,7 @@ Driver::Driver(const envoy::config::trace::v3::DatadogConfig& datadog_config, POOL_COUNTER_PREFIX(scope, "tracing.datadog."))}, tls_(tls.allocateSlot()) { - Config::Utility::checkCluster(TracerNames::get().Datadog, datadog_config.collector_cluster(), cm_, + Config::Utility::checkCluster("envoy.tracers.datadog", datadog_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = datadog_config.collector_cluster(); diff --git a/source/extensions/tracers/dynamic_ot/BUILD b/source/extensions/tracers/dynamic_ot/BUILD index bd5a269fc130..eb9cc5ee24c6 100644 --- a/source/extensions/tracers/dynamic_ot/BUILD +++ b/source/extensions/tracers/dynamic_ot/BUILD @@ -32,7 +32,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":dynamic_opentracing_driver_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/dynamic_ot/config.cc b/source/extensions/tracers/dynamic_ot/config.cc index c9667ac2a5d6..f8ddf4ceeeb5 100644 --- a/source/extensions/tracers/dynamic_ot/config.cc +++ b/source/extensions/tracers/dynamic_ot/config.cc @@ -8,7 +8,6 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/dynamic_ot/dynamic_opentracing_driver_impl.h" -#include "extensions/tracers/well_known_names.h" namespace Envoy { namespace Extensions { @@ -16,7 +15,7 @@ namespace Tracers { namespace DynamicOt { DynamicOpenTracingTracerFactory::DynamicOpenTracingTracerFactory() - : FactoryBase(TracerNames::get().DynamicOt) {} + : FactoryBase("envoy.tracers.dynamic_ot") {} Tracing::HttpTracerSharedPtr DynamicOpenTracingTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::DynamicOtConfig& proto_config, diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index 840162fe4f37..1fb5d0e30171 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -26,7 +26,6 @@ envoy_cc_library( "//source/common/stats:symbol_table_lib", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common/ot:opentracing_driver_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -39,7 +38,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":lightstep_tracer_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/lightstep/config.cc b/source/extensions/tracers/lightstep/config.cc index 52509819dcaf..3a636b76dd9a 100644 --- a/source/extensions/tracers/lightstep/config.cc +++ b/source/extensions/tracers/lightstep/config.cc @@ -8,7 +8,6 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/lightstep/lightstep_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "lightstep/tracer.h" @@ -17,7 +16,7 @@ namespace Extensions { namespace Tracers { namespace Lightstep { -LightstepTracerFactory::LightstepTracerFactory() : FactoryBase(TracerNames::get().Lightstep) {} +LightstepTracerFactory::LightstepTracerFactory() : FactoryBase("envoy.tracers.lightstep") {} Tracing::HttpTracerSharedPtr LightstepTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::LightstepConfig& proto_config, diff --git a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc index 9cafe7e8a9fa..e66a3c355845 100644 --- a/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc +++ b/source/extensions/tracers/lightstep/lightstep_tracer_impl.cc @@ -16,8 +16,6 @@ #include "common/http/message_impl.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" - namespace Envoy { namespace Extensions { namespace Tracers { @@ -198,7 +196,7 @@ LightStepDriver::LightStepDriver(const envoy::config::trace::v3::LightstepConfig pool_.add(lightstep::CollectorServiceFullName()), pool_.add(lightstep::CollectorMethodName())} { - Config::Utility::checkCluster(TracerNames::get().Lightstep, lightstep_config.collector_cluster(), + Config::Utility::checkCluster("envoy.tracers.lightstep", lightstep_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = lightstep_config.collector_cluster(); diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index eb8ee9cb879e..3494746500a1 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -19,7 +19,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":opencensus_tracer_impl", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/opencensus/config.cc b/source/extensions/tracers/opencensus/config.cc index af778ad04f7c..24a439a98a65 100644 --- a/source/extensions/tracers/opencensus/config.cc +++ b/source/extensions/tracers/opencensus/config.cc @@ -7,14 +7,13 @@ #include "common/tracing/http_tracer_impl.h" #include "extensions/tracers/opencensus/opencensus_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" namespace Envoy { namespace Extensions { namespace Tracers { namespace OpenCensus { -OpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase(TracerNames::get().OpenCensus) {} +OpenCensusTracerFactory::OpenCensusTracerFactory() : FactoryBase("envoy.tracers.opencensus") {} Tracing::HttpTracerSharedPtr OpenCensusTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::OpenCensusConfig& proto_config, diff --git a/source/extensions/tracers/well_known_names.h b/source/extensions/tracers/well_known_names.h deleted file mode 100644 index 8a83cdf21d02..000000000000 --- a/source/extensions/tracers/well_known_names.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once -#include - -#include "common/singleton/const_singleton.h" - -namespace Envoy { -namespace Extensions { -namespace Tracers { - -/** - * Well-known tracer names. - * NOTE: New tracers should use the well known name: envoy.tracers.name. - */ -class TracerNameValues { -public: - // Lightstep tracer - const std::string Lightstep = "envoy.tracers.lightstep"; - // Zipkin tracer - const std::string Zipkin = "envoy.tracers.zipkin"; - // Dynamic tracer - const std::string DynamicOt = "envoy.tracers.dynamic_ot"; - // Datadog tracer - const std::string Datadog = "envoy.tracers.datadog"; - // OpenCensus tracer - const std::string OpenCensus = "envoy.tracers.opencensus"; - // AWS XRay tracer - const std::string XRay = "envoy.tracers.xray"; -}; - -using TracerNames = ConstSingleton; - -} // namespace Tracers -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index 09fa6ea67191..a186e661eaad 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -61,7 +61,6 @@ envoy_cc_extension( deps = [ ":xray_lib", "//source/common/config:datasource_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", diff --git a/source/extensions/tracers/xray/config.cc b/source/extensions/tracers/xray/config.cc index ad4bfc0ebcfe..b9af01f887ad 100644 --- a/source/extensions/tracers/xray/config.cc +++ b/source/extensions/tracers/xray/config.cc @@ -11,7 +11,6 @@ #include "common/config/datasource.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/xray/xray_tracer_impl.h" namespace Envoy { @@ -19,7 +18,7 @@ namespace Extensions { namespace Tracers { namespace XRay { -XRayTracerFactory::XRayTracerFactory() : FactoryBase(TracerNames::get().XRay) {} +XRayTracerFactory::XRayTracerFactory() : FactoryBase("envoy.tracers.xray") {} Tracing::HttpTracerSharedPtr XRayTracerFactory::createHttpTracerTyped(const envoy::config::trace::v3::XRayConfig& proto_config, diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index 942d2c3744e2..ee0328353fa0 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -58,7 +58,6 @@ envoy_cc_library( "//source/common/singleton:const_singleton", "//source/common/tracing:http_tracer_lib", "//source/common/upstream:cluster_update_tracker_lib", - "//source/extensions/tracers:well_known_names", "@com_github_openzipkin_zipkinapi//:zipkin_cc_proto", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], @@ -71,7 +70,6 @@ envoy_cc_extension( security_posture = "robust_to_untrusted_downstream", deps = [ ":zipkin_lib", - "//source/extensions/tracers:well_known_names", "//source/extensions/tracers/common:factory_base_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", ], diff --git a/source/extensions/tracers/zipkin/config.cc b/source/extensions/tracers/zipkin/config.cc index 0fca39dd4a31..36d1f38fae8e 100644 --- a/source/extensions/tracers/zipkin/config.cc +++ b/source/extensions/tracers/zipkin/config.cc @@ -7,7 +7,6 @@ #include "common/common/utility.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/zipkin/zipkin_tracer_impl.h" namespace Envoy { @@ -15,7 +14,7 @@ namespace Extensions { namespace Tracers { namespace Zipkin { -ZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase(TracerNames::get().Zipkin) {} +ZipkinTracerFactory::ZipkinTracerFactory() : FactoryBase("envoy.tracers.zipkin") {} Tracing::HttpTracerSharedPtr ZipkinTracerFactory::createHttpTracerTyped( const envoy::config::trace::v3::ZipkinConfig& proto_config, diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 238ffe64bc0b..a96d91aab565 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -11,7 +11,6 @@ #include "common/http/utility.h" #include "common/tracing/http_tracer_impl.h" -#include "extensions/tracers/well_known_names.h" #include "extensions/tracers/zipkin/span_context_extractor.h" #include "extensions/tracers/zipkin/zipkin_core_constants.h" @@ -74,7 +73,7 @@ Driver::Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, POOL_COUNTER_PREFIX(scope, "tracing.zipkin."))}, tls_(tls.allocateSlot()), runtime_(runtime), local_info_(local_info), time_source_(time_source) { - Config::Utility::checkCluster(TracerNames::get().Zipkin, zipkin_config.collector_cluster(), cm_, + Config::Utility::checkCluster("envoy.tracers.zipkin", zipkin_config.collector_cluster(), cm_, /* allow_added_via_api */ true); cluster_ = zipkin_config.collector_cluster(); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index b71a45f126df..76126145cda7 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -43,7 +43,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/retry:95.5" "source/extensions/retry/host:85.7" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.3" "source/extensions/tracers/opencensus:91.2" "source/extensions/tracers/xray:95.3" "source/extensions/transport_sockets:94.9" From e355c58dcf6519e5a55c84e9efd50d597e3b5a69 Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Mon, 20 Jul 2020 18:27:11 -0400 Subject: [PATCH 695/909] fuzz: fix oss-fuzz crash in route_fuzz_test due to validation (#12176) Signed-off-by: Arthur Yan --- api/envoy/config/route/v3/route_components.proto | 4 +++- api/envoy/config/route/v4alpha/route_components.proto | 4 +++- .../envoy/config/route/v3/route_components.proto | 4 +++- .../envoy/config/route/v4alpha/route_components.proto | 4 +++- ...ase-minimized-route_fuzz_test-4701452596674560.fuzz | 10 ++++++++++ 5 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index e4ad52e66220..c35e210691c5 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -127,7 +127,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 01b138c7a7a6..f921ea506d99 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -126,7 +126,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index ee95088a439f..f79f399d2140 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -125,7 +125,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 7292f6258fce..a8b6ae4459ce 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -126,7 +126,9 @@ message VirtualHost { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. - repeated string response_headers_to_remove = 11; + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; diff --git a/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz new file mode 100644 index 000000000000..a147ab239251 --- /dev/null +++ b/test/common/router/route_corpus/clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz @@ -0,0 +1,10 @@ +config { + virtual_hosts { + name: "&\006\000\000\000" + domains: "-" + require_tls: ALL + response_headers_to_remove: "\0Ï3\022\362\211\245\247V\036" + request_headers_to_remove: "\003\022\360\234\254\265V\036" + } +} +random_value: 67070975 From f556b410a3f57c6c7fd5780e0a957edb2f113700 Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 20 Jul 2020 18:57:31 -0400 Subject: [PATCH 696/909] security: add weekly patches (#12156) Signed-off-by: Asra Ali --- SECURITY.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 8b5a8504bc58..a195ce706bc7 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -172,7 +172,8 @@ patches, understand exact mitigation steps, etc. should be reserved for remotely exploitable or privilege escalation issues. Otherwise, this process can be skipped. - The Fix Lead will email the patches to cncf-envoy-distributors-announce@lists.cncf.io so - distributors can prepare builds to be available to users on the day of the issue's announcement. + distributors can prepare builds to be available to users on the day of the issue's announcement. Any + patches against main will be updated and resent weekly. Distributors should read about the [Private Distributors List](#private-distributors-list) to find out the requirements for being added to this list. - **What if a vendor breaks embargo?** The PST will assess the damage. The Fix Lead will make the @@ -326,7 +327,7 @@ use of Envoy should: have a way to privately stage and validate your updates that does not violate the embargo. 7. Be willing to [contribute back](#contributing-back) as outlined above. -8. Be able to perform a security release of your product within a two week window from candidate fix +8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. 9. Have someone already on the list vouch for the person requesting membership on behalf of your distribution. @@ -406,7 +407,7 @@ We accept. We are definitely willing to help! -> 8. Be able to perform a security release of your product within a two week window from candidate fix +> 8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. We affirm we can spin out new security releases within a 2 week window. From 796a6745954cd134c6449da2237a99b35d7e8d80 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Mon, 20 Jul 2020 17:16:53 -0700 Subject: [PATCH 697/909] Revert "tcp: switching to the new pool (#12180)" (#12192) This reverts commit f50fba14928c0fa08d02447db8fade9d56e9164a. Signed-off-by: Greg Greenway --- docs/root/version_history/current.rst | 1 - source/common/runtime/runtime_features.cc | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 09ccd54908c7..5acb53a13a3d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -42,7 +42,6 @@ New Features * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. -* tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. Deprecated ---------- diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 055d350e497c..35b990cd39b7 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -71,7 +71,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.new_codec_behavior", - "envoy.reloadable_features.new_tcp_connection_pool", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", @@ -88,6 +87,8 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { + // TODO(alyssawilk) flip true after the release. + "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", }; From 8285ef6dbf990cf483cf8436d4b59c893a6d2555 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 21 Jul 2020 02:03:00 -0700 Subject: [PATCH 698/909] dev: remove pcap workaround from devcontainer (#12191) Signed-off-by: Lizan Zhou --- .bazelrc | 2 +- .circleci/config.yml | 2 +- .devcontainer/Dockerfile | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.bazelrc b/.bazelrc index 2a9d4f0943e5..85db6cdc6933 100644 --- a/.bazelrc +++ b/.bazelrc @@ -211,7 +211,7 @@ build:remote-msvc-cl --config=rbe-toolchain-msvc-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 -build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b +build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 822d995b2d3c..a9f9145da241 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ executors: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:736b8db2e1f0b55edb50719d2f8ddf383f46030b + - image: envoyproxy/envoy-build-ubuntu:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 resource_class: xlarge working_directory: /source diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 33ca454d55ad..53d721238dde 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM gcr.io/envoy-ci/envoy-build:736b8db2e1f0b55edb50719d2f8ddf383f46030b +FROM gcr.io/envoy-ci/envoy-build:923df85a4ba7f30dcd0cb6b0c6d8d604f0e20f48 ARG USERNAME=vscode ARG USER_UID=501 @@ -10,8 +10,6 @@ ENV ENVOY_STDLIB=libstdc++ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get -y update \ && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \ - # Change pcap gid to some larger number which doesn't conflict with common gid (1000) - && groupmod -g 65515 pcap && chgrp pcap /usr/sbin/tcpdump \ # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -G pcap -d /build \ From 606ccbdc2ab956d0f8190e30990d3aa6643d7547 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 21 Jul 2020 02:07:53 -0700 Subject: [PATCH 699/909] make header_order.py python3 (#12193) Signed-off-by: Lizan Zhou --- tools/code_format/header_order.py | 13 +++++-------- tools/code_format/requirements.txt | 4 ++-- tools/protoxform/migrate.py | 4 ++-- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/code_format/header_order.py b/tools/code_format/header_order.py index 9962d825a3f5..427fb0c053b1 100755 --- a/tools/code_format/header_order.py +++ b/tools/code_format/header_order.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Enforce header order in a given file. This will only reorder in the first sequence of contiguous # #include statements, so it will not play well with #ifdef. @@ -12,17 +12,15 @@ # enough to handle block splitting and correctly detecting the main header subject to the Envoy # canonical paths. -from __future__ import print_function - import argparse import common +import pathlib import re import sys def ReorderHeaders(path): - with open(path, 'r') as f: - source = f.read() + source = pathlib.Path(path).read_text(encoding='utf-8') all_lines = iter(source.split('\n')) before_includes_lines = [] @@ -117,7 +115,6 @@ def regex_filter(regex): include_dir_order = args.include_dir_order.split(',') reorderd_source = ReorderHeaders(target_path) if args.rewrite: - with open(target_path, 'w') as f: - f.write(reorderd_source) + pathlib.Path(target_path).write_text(reorderd_source, encoding='utf-8') else: - sys.stdout.write(reorderd_source) + sys.stdout.buffer.write(reorderd_source.encode('utf-8')) diff --git a/tools/code_format/requirements.txt b/tools/code_format/requirements.txt index 4ab3842b87d9..c27e0d44afaa 100644 --- a/tools/code_format/requirements.txt +++ b/tools/code_format/requirements.txt @@ -1,2 +1,2 @@ -flake8==3.7.8 -yapf==0.28.0 +flake8==3.8.3 +yapf==0.30.0 diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index e7481b0ccbbb..10d6be274014 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -57,8 +57,8 @@ def UpgradeType(match): # We need to deal with envoy.api.* normalization in the v2 API. We won't # need this in v3+, so rather than churn docs, we just have this workaround. type_desc = self._typedb.types[api_v2_type_name] - repl_type = type_desc.next_version_type_name[len( - 'envoy.'):] if type_desc.next_version_type_name else normalized_type_name + repl_type = type_desc.next_version_type_name[ + len('envoy.'):] if type_desc.next_version_type_name else normalized_type_name # TODO(htuch): this should really either go through the type database or # via the descriptor pool and annotations, but there are only two of these # we need for the initial v2 -> v3 docs cut, so hard coding for now. From ffd8a6ea3d7d5bd33ccc97d136132e1fc756bcb2 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Tue, 21 Jul 2020 11:03:36 -0500 Subject: [PATCH 700/909] direct response filter: added unit test cases (#12064) Added unit test cases for "direct_response". Removed "direct_response" from test/per_file_coverage.sh Risk Level:low Testing: covered onData() and onNewConnection() method of the filter. Fixes: #11998 Signed-off-by: jianwen --- .../filters/network/direct_response/BUILD | 12 ++++ .../direct_response/direct_response_test.cc | 61 +++++++++++++++++++ test/per_file_coverage.sh | 1 - 3 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 test/extensions/filters/network/direct_response/direct_response_test.cc diff --git a/test/extensions/filters/network/direct_response/BUILD b/test/extensions/filters/network/direct_response/BUILD index 06fa488357ec..8ebb790617e3 100644 --- a/test/extensions/filters/network/direct_response/BUILD +++ b/test/extensions/filters/network/direct_response/BUILD @@ -29,3 +29,15 @@ envoy_extension_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_extension_cc_test( + name = "direct_response_test", + srcs = ["direct_response_test.cc"], + extension_name = "envoy.filters.network.direct_response", + deps = [ + "//source/extensions/filters/network/direct_response:filter", + "//test/mocks/api:api_mocks", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/direct_response/direct_response_test.cc b/test/extensions/filters/network/direct_response/direct_response_test.cc new file mode 100644 index 000000000000..7218bd194426 --- /dev/null +++ b/test/extensions/filters/network/direct_response/direct_response_test.cc @@ -0,0 +1,61 @@ +#include "envoy/extensions/filters/network/direct_response/v3/config.pb.validate.h" + +#include "extensions/filters/network/direct_response/filter.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace DirectResponse { + +class DirectResponseFilterTest : public testing::Test { +public: + void initialize(const std::string& response) { + filter_ = std::make_shared(response); + filter_->initializeReadFilterCallbacks(read_filter_callbacks_); + } + std::shared_ptr filter_; + NiceMock read_filter_callbacks_; +}; + +// Test the filter's onNewConnection() with a non-empty response +TEST_F(DirectResponseFilterTest, OnNewConnection) { + initialize("hello"); + Buffer::OwnedImpl response("hello"); + EXPECT_CALL(read_filter_callbacks_.connection_, write(BufferEqual(&response), false)); + EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + +// Test the filter's onNewConnection() with an empty response +TEST_F(DirectResponseFilterTest, OnNewConnectionEmptyResponse) { + initialize(""); + EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0); + EXPECT_CALL(read_filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); + EXPECT_CALL(read_filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().DirectResponse)); + EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); +} + +// Test the filter's onData() +TEST_F(DirectResponseFilterTest, OnData) { + initialize("hello"); + Buffer::OwnedImpl data("data"); + EXPECT_CALL(read_filter_callbacks_.connection_, write(_, _)).Times(0); + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); +} + +} // namespace DirectResponse +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 76126145cda7..ef5987d99c37 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -13,7 +13,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" "source/extensions/filters/network/dubbo_proxy:96.1" "source/extensions/filters/network/dubbo_proxy/router:95.1" -"source/extensions/filters/network/direct_response:89.3" "source/extensions/filters/network/mongo_proxy:94.0" "source/extensions/filters/network/common:96.1" "source/extensions/filters/network/common/redis:96.2" From 7f78581116ecdc9dcca319ebe68d4c8ac1d817ba Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Tue, 21 Jul 2020 12:49:54 -0400 Subject: [PATCH 701/909] Added watchdog support for a Multi-Kill threshold. (#12108) WatchDog will now kill if max(2, registered_threads * multi_kill_threshold) threads have gone above the multikill_timeout. Signed-off-by: Kevin Baichoo --- api/envoy/config/bootstrap/v3/BUILD | 1 + api/envoy/config/bootstrap/v3/bootstrap.proto | 14 +++- api/envoy/config/bootstrap/v4alpha/BUILD | 1 + .../config/bootstrap/v4alpha/bootstrap.proto | 14 +++- .../envoy/config/bootstrap/v3/BUILD | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 14 +++- .../envoy/config/bootstrap/v4alpha/BUILD | 1 + .../config/bootstrap/v4alpha/bootstrap.proto | 14 +++- include/envoy/server/configuration.h | 9 +++ source/server/configuration_impl.cc | 2 + source/server/configuration_impl.h | 3 + source/server/guarddog_impl.cc | 22 +++--- source/server/guarddog_impl.h | 1 + test/mocks/server/main.cc | 7 +- test/mocks/server/main.h | 7 +- test/server/guarddog_impl_test.cc | 72 +++++++++++++++++-- tools/spelling/spelling_dictionary.txt | 1 + 17 files changed, 154 insertions(+), 30 deletions(-) diff --git a/api/envoy/config/bootstrap/v3/BUILD b/api/envoy/config/bootstrap/v3/BUILD index 0a4f9a6e1ede..63eb22d36ea0 100644 --- a/api/envoy/config/bootstrap/v3/BUILD +++ b/api/envoy/config/bootstrap/v3/BUILD @@ -15,6 +15,7 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 27c59bfc8cc9..9abd3a37fed4 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -14,6 +14,7 @@ import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; @@ -297,6 +298,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 6] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -314,10 +316,16 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD index 0fd53ed1c2b6..97d0d49f07ff 100644 --- a/api/envoy/config/bootstrap/v4alpha/BUILD +++ b/api/envoy/config/bootstrap/v4alpha/BUILD @@ -14,6 +14,7 @@ api_proto_package( "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 41de2a875d2e..84959f40ade9 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -13,6 +13,7 @@ import "envoy/config/listener/v4alpha/listener.proto"; import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; @@ -288,6 +289,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 6] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -305,10 +307,16 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD index 0a4f9a6e1ede..63eb22d36ea0 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD @@ -15,6 +15,7 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 53b587c8bc0b..dbfc503fff26 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -14,6 +14,7 @@ import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; @@ -298,6 +299,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 6] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -315,10 +317,16 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD index a0dac9234426..b5609e3cc43f 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD @@ -15,6 +15,7 @@ api_proto_package( "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 6690c6cd0c30..96b84eaf4cfa 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -14,6 +14,7 @@ import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; +import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; @@ -296,6 +297,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 6] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -313,10 +315,16 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; + + // Sets the threshold for *multikill_timeout* in terms of the percentage of + // nonresponsive threads required for the *multikill_timeout*. + // If not specified the default is 0. + type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). diff --git a/include/envoy/server/configuration.h b/include/envoy/server/configuration.h index 88a167566e80..aee4ecf01c04 100644 --- a/include/envoy/server/configuration.h +++ b/include/envoy/server/configuration.h @@ -64,6 +64,15 @@ class Main { * multiple nonresponsive threads. */ virtual std::chrono::milliseconds wdMultiKillTimeout() const PURE; + + /** + * @return double the percentage of threads that need to meet the MultiKillTimeout before we + * kill the process. This is used in the calculation below + * Max(2, ceil(registered_threads * Fraction(MultiKillThreshold))) + * which computes the number of threads that need to be be nonresponsive + * for at least MultiKillTimeout before we kill the process. + */ + virtual double wdMultiKillThreshold() const PURE; }; /** diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 533bbdfef866..2a10e00388dc 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -94,6 +94,8 @@ void MainImpl::initialize(const envoy::config::bootstrap::v3::Bootstrap& bootstr std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0)); watchdog_multikill_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, multikill_timeout, 0)); + watchdog_multikill_threshold_ = + PROTOBUF_PERCENT_TO_DOUBLE_OR_DEFAULT(watchdog, multikill_threshold, 0.0); initializeStatsSinks(bootstrap, server); } diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index 5faf632f8959..7ad844d565a1 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -110,6 +110,8 @@ class MainImpl : Logger::Loggable, public Main { return watchdog_multikill_timeout_; } + double wdMultiKillThreshold() const override { return watchdog_multikill_threshold_; } + private: /** * Initialize tracers and corresponding sinks. @@ -126,6 +128,7 @@ class MainImpl : Logger::Loggable, public Main { std::chrono::milliseconds watchdog_megamiss_timeout_; std::chrono::milliseconds watchdog_kill_timeout_; std::chrono::milliseconds watchdog_multikill_timeout_; + double watchdog_multikill_threshold_; }; /** diff --git a/source/server/guarddog_impl.cc b/source/server/guarddog_impl.cc index ad66b59a5299..add9ca270d51 100644 --- a/source/server/guarddog_impl.cc +++ b/source/server/guarddog_impl.cc @@ -1,5 +1,7 @@ #include "server/guarddog_impl.h" +#include + #include #include @@ -23,6 +25,7 @@ GuardDogImpl::GuardDogImpl(Stats::Scope& stats_scope, const Server::Configuratio time_source_(api.timeSource()), miss_timeout_(config.wdMissTimeout()), megamiss_timeout_(config.wdMegaMissTimeout()), kill_timeout_(config.wdKillTimeout()), multi_kill_timeout_(config.wdMultiKillTimeout()), + multi_kill_fraction_(config.wdMultiKillThreshold() / 100.0), loop_interval_([&]() -> std::chrono::milliseconds { // The loop interval is simply the minimum of all specified intervals, // but we must account for the 0=disabled case. This lambda takes care @@ -60,8 +63,14 @@ void GuardDogImpl::step() { const auto now = time_source_.monotonicTime(); { - bool seen_one_multi_timeout(false); + size_t multi_kill_count = 0; Thread::LockGuard guard(wd_lock_); + + // Compute the multikill threshold + const size_t required_for_multi_kill = + std::max(static_cast(2), + static_cast(ceil(multi_kill_fraction_ * watched_dogs_.size()))); + for (auto& watched_dog : watched_dogs_) { const auto ltt = watched_dog->dog_->lastTouchTime(); const auto delta = now - ltt; @@ -90,13 +99,10 @@ void GuardDogImpl::step() { watched_dog->dog_->threadId().debugString())); } if (multikillEnabled() && delta > multi_kill_timeout_) { - if (seen_one_multi_timeout) { - - PANIC(fmt::format( - "GuardDog: multiple threads ({},...) stuck for more than watchdog_multikill_timeout", - watched_dog->dog_->threadId().debugString())); - } else { - seen_one_multi_timeout = true; + if (++multi_kill_count >= required_for_multi_kill) { + PANIC(fmt::format("GuardDog: At least {} threads ({},...) stuck for more than " + "watchdog_multikill_timeout", + multi_kill_count, watched_dog->dog_->threadId().debugString())); } } } diff --git a/source/server/guarddog_impl.h b/source/server/guarddog_impl.h index b570043bcbf3..2fba7f0edcbb 100644 --- a/source/server/guarddog_impl.h +++ b/source/server/guarddog_impl.h @@ -120,6 +120,7 @@ class GuardDogImpl : public GuardDog { const std::chrono::milliseconds megamiss_timeout_; const std::chrono::milliseconds kill_timeout_; const std::chrono::milliseconds multi_kill_timeout_; + const double multi_kill_fraction_; const std::chrono::milliseconds loop_interval_; Stats::Counter& watchdog_miss_counter_; Stats::Counter& watchdog_megamiss_counter_; diff --git a/test/mocks/server/main.cc b/test/mocks/server/main.cc index 2cc3e8dfeec4..26bde5941bed 100644 --- a/test/mocks/server/main.cc +++ b/test/mocks/server/main.cc @@ -9,12 +9,15 @@ namespace Configuration { using ::testing::Return; -MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill) - : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill) { +MockMain::MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill, + double wd_multikill_threshold) + : wd_miss_(wd_miss), wd_megamiss_(wd_megamiss), wd_kill_(wd_kill), wd_multikill_(wd_multikill), + wd_multikill_threshold_(wd_multikill_threshold) { ON_CALL(*this, wdMissTimeout()).WillByDefault(Return(wd_miss_)); ON_CALL(*this, wdMegaMissTimeout()).WillByDefault(Return(wd_megamiss_)); ON_CALL(*this, wdKillTimeout()).WillByDefault(Return(wd_kill_)); ON_CALL(*this, wdMultiKillTimeout()).WillByDefault(Return(wd_multikill_)); + ON_CALL(*this, wdMultiKillThreshold()).WillByDefault(Return(wd_multikill_threshold_)); } MockMain::~MockMain() = default; diff --git a/test/mocks/server/main.h b/test/mocks/server/main.h index 573cda3df1f0..c89b637e669a 100644 --- a/test/mocks/server/main.h +++ b/test/mocks/server/main.h @@ -15,8 +15,9 @@ namespace Server { namespace Configuration { class MockMain : public Main { public: - MockMain() : MockMain(0, 0, 0, 0) {} - MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill); + MockMain() : MockMain(0, 0, 0, 0, 0.0) {} + MockMain(int wd_miss, int wd_megamiss, int wd_kill, int wd_multikill, + double wd_multikill_threshold); ~MockMain() override; MOCK_METHOD(Upstream::ClusterManager*, clusterManager, ()); @@ -26,11 +27,13 @@ class MockMain : public Main { MOCK_METHOD(std::chrono::milliseconds, wdMegaMissTimeout, (), (const)); MOCK_METHOD(std::chrono::milliseconds, wdKillTimeout, (), (const)); MOCK_METHOD(std::chrono::milliseconds, wdMultiKillTimeout, (), (const)); + MOCK_METHOD(double, wdMultiKillThreshold, (), (const)); std::chrono::milliseconds wd_miss_; std::chrono::milliseconds wd_megamiss_; std::chrono::milliseconds wd_kill_; std::chrono::milliseconds wd_multikill_; + double wd_multikill_threshold_; }; } // namespace Configuration } // namespace Server diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 2ab5e94332e7..e26856e011db 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -3,6 +3,7 @@ #include #include "envoy/common/time.h" +#include "envoy/server/watchdog.h" #include "common/api/api_impl.h" #include "common/common/macros.h" @@ -89,7 +90,8 @@ INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogTestBase, class GuardDogDeathTest : public GuardDogTestBase { protected: GuardDogDeathTest() - : config_kill_(1000, 1000, 100, 1000), config_multikill_(1000, 1000, 1000, 500) {} + : config_kill_(1000, 1000, 100, 1000, 0), config_multikill_(1000, 1000, 1000, 500, 0), + config_multikill_threshold_(1000, 1000, 1000, 500, 60) {} /** * This does everything but the final forceCheckForTest() that should cause @@ -99,6 +101,7 @@ class GuardDogDeathTest : public GuardDogTestBase { InSequence s; initGuardDog(fakestats_, config_kill_); unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(unpet_dog_); guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(99)); // 1 ms shy of death. } @@ -112,18 +115,47 @@ class GuardDogDeathTest : public GuardDogTestBase { initGuardDog(fakestats_, config_multikill_); auto unpet_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(unpet_dog_); guard_dog_->forceCheckForTest(); auto second_dog_ = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(second_dog_); guard_dog_->forceCheckForTest(); time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. } + /** + * This does everything but the final forceCheckForTest() that should cause + * death for the multiple kill case using threshold (100% of watchdogs over the threshold). + */ + void setupForMultiDeathThreshold() { + InSequence s; + initGuardDog(fakestats_, config_multikill_threshold_); + + // Creates 5 watchdogs. + for (int i = 0; i < 5; ++i) { + auto dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); + dogs_.emplace_back(dog); + + if (i == 0) { + unpet_dog_ = dog; + } else if (i == 1) { + second_dog_ = dog; + } + + guard_dog_->forceCheckForTest(); + } + + time_system_->advanceTimeWait(std::chrono::milliseconds(499)); // 1 ms shy of multi-death. + } + NiceMock config_kill_; NiceMock config_multikill_; + NiceMock config_multikill_threshold_; NiceMock fakestats_; WatchDogSharedPtr unpet_dog_; WatchDogSharedPtr second_dog_; + std::vector dogs_; // Tracks all watchdogs created. }; INSTANTIATE_TEST_SUITE_P(TimeSystemType, GuardDogDeathTest, @@ -174,6 +206,34 @@ TEST_P(GuardDogAlmostDeadTest, MultiKillNoFinalCheckTest) { SetupForMultiDeath(); } +TEST_P(GuardDogDeathTest, MultiKillThresholdDeathTest) { + auto die_function = [&]() -> void { + setupForMultiDeathThreshold(); + + // Pet the last two dogs so we're just at the threshold that causes death. + dogs_.at(4)->touch(); + dogs_.at(3)->touch(); + + time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death. + guard_dog_->forceCheckForTest(); + }; + EXPECT_DEATH(die_function(), ""); +} + +TEST_P(GuardDogAlmostDeadTest, MultiKillUnderThreshold) { + // This does everything the death test does except it pets an additional watchdog + // that causes us to be under the threshold (60%) of multikill death. + setupForMultiDeathThreshold(); + + // Pet the last three dogs so we're just under the threshold that causes death. + dogs_.at(4)->touch(); + dogs_.at(3)->touch(); + dogs_.at(2)->touch(); + + time_system_->advanceTimeWait(std::chrono::milliseconds(2)); // 1 ms past multi-death. + guard_dog_->forceCheckForTest(); +} + TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { // This ensures that if only one thread surpasses the multiple kill threshold // there is no death. The positive case is covered in MultiKillDeathTest. @@ -195,7 +255,7 @@ TEST_P(GuardDogAlmostDeadTest, NearDeathTest) { class GuardDogMissTest : public GuardDogTestBase { protected: - GuardDogMissTest() : config_miss_(500, 1000, 0, 0), config_mega_(1000, 500, 0, 0) {} + GuardDogMissTest() : config_miss_(500, 1000, 0, 0, 0), config_mega_(1000, 500, 0, 0, 0) {} void checkMiss(uint64_t count, const std::string& descriptor) { EXPECT_EQ(count, TestUtility::findCounter(stats_store_, "server.watchdog_miss")->value()) @@ -315,27 +375,27 @@ TEST_P(GuardDogMissTest, MissCountTest) { TEST_P(GuardDogTestBase, StartStopTest) { NiceMock stats; - NiceMock config(0, 0, 0, 0); + NiceMock config(0, 0, 0, 0, 0); initGuardDog(stats, config); } TEST_P(GuardDogTestBase, LoopIntervalNoKillTest) { NiceMock stats; - NiceMock config(40, 50, 0, 0); + NiceMock config(40, 50, 0, 0, 0); initGuardDog(stats, config); EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(40)); } TEST_P(GuardDogTestBase, LoopIntervalTest) { NiceMock stats; - NiceMock config(100, 90, 1000, 500); + NiceMock config(100, 90, 1000, 500, 0); initGuardDog(stats, config); EXPECT_EQ(guard_dog_->loopIntervalForTest(), std::chrono::milliseconds(90)); } TEST_P(GuardDogTestBase, WatchDogThreadIdTest) { NiceMock stats; - NiceMock config(100, 90, 1000, 500); + NiceMock config(100, 90, 1000, 500, 0); initGuardDog(stats, config); auto watched_dog = guard_dog_->createWatchDog(api_->threadFactory().currentThreadId(), "test_thread"); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index e8aa2fcd6b50..eb38bfe47eba 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -26,6 +26,7 @@ CAS CB CDS CEL +ceil CHACHA CHLO CHMOD From 90c6a27bb28b4f8288392a53af8d2e88ef059a8b Mon Sep 17 00:00:00 2001 From: John Murray <63568820+murray-stripe@users.noreply.github.com> Date: Tue, 21 Jul 2020 14:17:10 -0400 Subject: [PATCH 702/909] Statsd UDP Buffer (#11724) Optional buffer on statsd udp Signed-off-by: John Murray --- api/envoy/config/metrics/v3/stats.proto | 8 + api/envoy/config/metrics/v4alpha/stats.proto | 8 + .../envoy/config/metrics/v3/stats.proto | 8 + .../envoy/config/metrics/v4alpha/stats.proto | 8 + .../stat_sinks/common/statsd/statsd.cc | 54 ++++++- .../stat_sinks/common/statsd/statsd.h | 19 ++- .../stat_sinks/dog_statsd/config.cc | 8 +- .../common/statsd/udp_statsd_test.cc | 145 ++++++++++++++++-- .../stats_sinks/dog_statsd/config_test.cc | 62 +++++++- 9 files changed, 295 insertions(+), 25 deletions(-) diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index f2f12d73a625..75c73bd7acc2 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -330,6 +330,14 @@ message DogStatsdSink { // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto index f9a4549746c6..d8ced1aca34c 100644 --- a/api/envoy/config/metrics/v4alpha/stats.proto +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -330,6 +330,14 @@ message DogStatsdSink { // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index c6295b8326ac..458e0aa60519 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -328,6 +328,14 @@ message DogStatsdSink { // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto index f9a4549746c6..d8ced1aca34c 100644 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -330,6 +330,14 @@ message DogStatsdSink { // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; + + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index c502cb6e02ca..3384013ce8cf 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -4,6 +4,7 @@ #include #include +#include "envoy/buffer/buffer.h" #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/event/dispatcher.h" @@ -11,6 +12,7 @@ #include "envoy/upstream/cluster_manager.h" #include "common/api/os_sys_calls_impl.h" +#include "common/buffer/buffer_impl.h" #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/common/utility.h" @@ -38,11 +40,16 @@ void UdpStatsdSink::WriterImpl::write(const std::string& message) { Network::Utility::writeToSocket(*io_handle_, &slice, 1, nullptr, *parent_.server_address_); } +void UdpStatsdSink::WriterImpl::writeBuffer(Buffer::Instance& data) { + Network::Utility::writeToSocket(*io_handle_, data, nullptr, *parent_.server_address_); +} + UdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls, Network::Address::InstanceConstSharedPtr address, const bool use_tag, - const std::string& prefix) + const std::string& prefix, absl::optional buffer_size) : tls_(tls.allocateSlot()), server_address_(std::move(address)), use_tag_(use_tag), - prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix) { + prefix_(prefix.empty() ? Statsd::getDefaultPrefix() : prefix), + buffer_size_(buffer_size.value_or(0)) { tls_->set([this](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return std::make_shared(*this); }); @@ -50,22 +57,57 @@ UdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls, void UdpStatsdSink::flush(Stats::MetricSnapshot& snapshot) { Writer& writer = tls_->getTyped(); + Buffer::OwnedImpl buffer; + for (const auto& counter : snapshot.counters()) { if (counter.counter_.get().used()) { - writer.write(absl::StrCat(prefix_, ".", getName(counter.counter_.get()), ":", counter.delta_, - "|c", buildTagStr(counter.counter_.get().tags()))); + const std::string counter_str = + absl::StrCat(prefix_, ".", getName(counter.counter_.get()), ":", counter.delta_, "|c", + buildTagStr(counter.counter_.get().tags())); + writeBuffer(buffer, writer, counter_str); } } for (const auto& gauge : snapshot.gauges()) { if (gauge.get().used()) { - writer.write(absl::StrCat(prefix_, ".", getName(gauge.get()), ":", gauge.get().value(), "|g", - buildTagStr(gauge.get().tags()))); + const std::string gauge_str = + absl::StrCat(prefix_, ".", getName(gauge.get()), ":", gauge.get().value(), "|g", + buildTagStr(gauge.get().tags())); + writeBuffer(buffer, writer, gauge_str); } } + + flushBuffer(buffer, writer); // TODO(efimki): Add support of text readouts stats. } +void UdpStatsdSink::writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, + const std::string& statsd_metric) const { + if (statsd_metric.length() >= buffer_size_) { + // Our statsd_metric is too large to fit into the buffer, skip buffering and write directly + writer.write(statsd_metric); + } else { + if ((buffer.length() + statsd_metric.length() + 1) > buffer_size_) { + // If we add the new statsd_metric, we'll overflow our buffer. Flush the buffer to make + // room for the new statsd_metric. + flushBuffer(buffer, writer); + } else if (buffer.length() > 0) { + // We have room and have metrics already in the buffer, add a newline to separate + // metric entries. + buffer.add("\n"); + } + buffer.add(statsd_metric); + } +} + +void UdpStatsdSink::flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const { + if (buffer.length() == 0) { + return; + } + writer.writeBuffer(buffer); + buffer.drain(buffer.length()); +} + void UdpStatsdSink::onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) { // For statsd histograms are all timers in milliseconds, Envoy histograms are however // not necessarily timers in milliseconds, for Envoy histograms suffixed with their corresponding diff --git a/source/extensions/stat_sinks/common/statsd/statsd.h b/source/extensions/stat_sinks/common/statsd/statsd.h index 41218ace192d..b7eb8bfac627 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.h +++ b/source/extensions/stat_sinks/common/statsd/statsd.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/buffer/buffer.h" #include "envoy/common/platform.h" #include "envoy/local_info/local_info.h" #include "envoy/network/connection.h" @@ -15,6 +16,8 @@ #include "common/common/macros.h" #include "common/network/io_socket_handle_impl.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Extensions { namespace StatSinks { @@ -34,15 +37,19 @@ class UdpStatsdSink : public Stats::Sink { class Writer : public ThreadLocal::ThreadLocalObject { public: virtual void write(const std::string& message) PURE; + virtual void writeBuffer(Buffer::Instance& data) PURE; }; UdpStatsdSink(ThreadLocal::SlotAllocator& tls, Network::Address::InstanceConstSharedPtr address, - const bool use_tag, const std::string& prefix = getDefaultPrefix()); + const bool use_tag, const std::string& prefix = getDefaultPrefix(), + absl::optional buffer_size = absl::nullopt); // For testing. UdpStatsdSink(ThreadLocal::SlotAllocator& tls, const std::shared_ptr& writer, - const bool use_tag, const std::string& prefix = getDefaultPrefix()) + const bool use_tag, const std::string& prefix = getDefaultPrefix(), + absl::optional buffer_size = absl::nullopt) : tls_(tls.allocateSlot()), use_tag_(use_tag), - prefix_(prefix.empty() ? getDefaultPrefix() : prefix) { + prefix_(prefix.empty() ? getDefaultPrefix() : prefix), + buffer_size_(buffer_size.value_or(0)) { tls_->set( [writer](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { return writer; }); } @@ -52,6 +59,7 @@ class UdpStatsdSink : public Stats::Sink { void onHistogramComplete(const Stats::Histogram& histogram, uint64_t value) override; bool getUseTagForTest() { return use_tag_; } + uint64_t getBufferSizeForTest() { return buffer_size_; } const std::string& getPrefix() { return prefix_; } private: @@ -64,12 +72,16 @@ class UdpStatsdSink : public Stats::Sink { // Writer void write(const std::string& message) override; + void writeBuffer(Buffer::Instance& data) override; private: UdpStatsdSink& parent_; const Network::IoHandlePtr io_handle_; }; + void flushBuffer(Buffer::OwnedImpl& buffer, Writer& writer) const; + void writeBuffer(Buffer::OwnedImpl& buffer, Writer& writer, const std::string& data) const; + const std::string getName(const Stats::Metric& metric) const; const std::string buildTagStr(const std::vector& tags) const; @@ -78,6 +90,7 @@ class UdpStatsdSink : public Stats::Sink { const bool use_tag_; // Prefix for all flushed stats. const std::string prefix_; + const uint64_t buffer_size_; }; /** diff --git a/source/extensions/stat_sinks/dog_statsd/config.cc b/source/extensions/stat_sinks/dog_statsd/config.cc index fecd087b2f7f..8e346db5b32e 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.cc +++ b/source/extensions/stat_sinks/dog_statsd/config.cc @@ -11,6 +11,8 @@ #include "extensions/stat_sinks/common/statsd/statsd.h" #include "extensions/stat_sinks/well_known_names.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Extensions { namespace StatSinks { @@ -24,8 +26,12 @@ Stats::SinkPtr DogStatsdSinkFactory::createStatsSink(const Protobuf::Message& co Network::Address::InstanceConstSharedPtr address = Network::Address::resolveProtoAddress(sink_config.address()); ENVOY_LOG(debug, "dog_statsd UDP ip address: {}", address->asString()); + absl::optional max_bytes; + if (sink_config.has_max_bytes_per_datagram()) { + max_bytes = sink_config.max_bytes_per_datagram().value(); + } return std::make_unique(server.threadLocal(), std::move(address), - true, sink_config.prefix()); + true, sink_config.prefix(), max_bytes); } ProtobufTypes::MessagePtr DogStatsdSinkFactory::createEmptyConfigProto() { diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index 9bad9d78a873..a97f07dbce00 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -32,6 +32,15 @@ namespace { class MockWriter : public UdpStatsdSink::Writer { public: MOCK_METHOD(void, write, (const std::string& message)); + MOCK_METHOD(void, writeBuffer, (Buffer::Instance & buffer)); + + void delegateBufferFake() { + ON_CALL(*this, writeBuffer).WillByDefault([this](Buffer::Instance& buffer) { + this->buffer_writes.push_back(buffer.toString()); + }); + } + + std::vector buffer_writes; }; // Regression test for https://github.com/envoyproxy/envoy/issues/8911 @@ -152,8 +161,9 @@ TEST_P(UdpStatsdSinkWithTagsTest, InitWithIpAddress) { TEST(UdpStatsdSinkTest, CheckActualStats) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, false); + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), 1024); NiceMock counter; counter.name_ = "test_counter"; @@ -161,9 +171,11 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { counter.latch_ = 1; snapshot.counters_.push_back({1, counter}); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_counter:1|c")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(1); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c"); counter.used_ = false; NiceMock gauge; @@ -172,9 +184,10 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { gauge.used_ = true; snapshot.gauges_.push_back(gauge); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_gauge:1|g")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g"); NiceMock timer; timer.name_ = "test_timer"; @@ -185,11 +198,13 @@ TEST(UdpStatsdSinkTest, CheckActualStats) { tls_.shutdownThread(); } -TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { +TEST(UdpStatsdSinkTest, CheckMetricLargerThanBuffer) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, false, "test_prefix"); + uint64_t buffer_size = 4; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); NiceMock counter; counter.name_ = "test_counter"; @@ -197,11 +212,112 @@ TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { counter.latch_ = 1; snapshot.counters_.push_back({1, counter}); + // Expect the metric to skip the buffer EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("test_prefix.test_counter:1|c")); + write("envoy.test_counter:1|c")); sink.flush(snapshot); counter.used_ = false; + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect the metric to skip the buffer + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), + write("envoy.test_gauge:1|g")); + sink.flush(snapshot); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckBufferedWritesWithinBufferSize) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + uint64_t buffer_size = 1024; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); + + NiceMock counter; + counter.name_ = "test_counter"; + counter.used_ = true; + counter.latch_ = 1; + snapshot.counters_.push_back({1, counter}); + + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect both metrics to be present in single write + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(1); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c\nenvoy.test_gauge:1|g"); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckBufferedWritesExceedingBufferSize) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + uint64_t buffer_size = 64; + UdpStatsdSink sink(tls_, writer_ptr, false, getDefaultPrefix(), buffer_size); + + NiceMock counter_1; + counter_1.name_ = "test_counter_1"; + counter_1.used_ = true; + counter_1.latch_ = 1; + snapshot.counters_.push_back({1, counter_1}); + + NiceMock counter_2; + counter_2.name_ = "test_counter_2"; + counter_2.used_ = true; + counter_2.latch_ = 1; + snapshot.counters_.push_back({1, counter_2}); + + NiceMock gauge; + gauge.name_ = "test_gauge"; + gauge.value_ = 1; + gauge.used_ = true; + snapshot.gauges_.push_back(gauge); + + // Expect both metrics to be present in single write + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)) + .Times(2); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter_1:1|c\nenvoy.test_counter_2:1|c"); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g"); + + tls_.shutdownThread(); +} + +TEST(UdpStatsdSinkTest, CheckActualStatsWithCustomPrefix) { + NiceMock snapshot; + auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); + NiceMock tls_; + UdpStatsdSink sink(tls_, writer_ptr, false, "test_prefix", 1024); + + NiceMock counter; + counter.name_ = "test_counter"; + counter.used_ = true; + counter.latch_ = 1; + snapshot.counters_.push_back({1, counter}); + + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); + sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "test_prefix.test_counter:1|c"); + counter.used_ = false; + tls_.shutdownThread(); } @@ -249,8 +365,9 @@ TEST(UdpStatsdSinkTest, SiSuffix) { TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { NiceMock snapshot; auto writer_ptr = std::make_shared>(); + writer_ptr->delegateBufferFake(); NiceMock tls_; - UdpStatsdSink sink(tls_, writer_ptr, true); + UdpStatsdSink sink(tls_, writer_ptr, true, getDefaultPrefix(), 1024); std::vector tags = {Stats::Tag{"key1", "value1"}, Stats::Tag{"key2", "value2"}}; NiceMock counter; @@ -260,9 +377,10 @@ TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { counter.setTags(tags); snapshot.counters_.push_back({1, counter}); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_counter:1|c|#key1:value1,key2:value2")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 1); + EXPECT_EQ(writer_ptr->buffer_writes.at(0), "envoy.test_counter:1|c|#key1:value1,key2:value2"); counter.used_ = false; NiceMock gauge; @@ -272,9 +390,10 @@ TEST(UdpStatsdSinkWithTagsTest, CheckActualStats) { gauge.setTags(tags); snapshot.gauges_.push_back(gauge); - EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), - write("envoy.test_gauge:1|g|#key1:value1,key2:value2")); + EXPECT_CALL(*std::dynamic_pointer_cast>(writer_ptr), writeBuffer(_)); sink.flush(snapshot); + EXPECT_EQ(writer_ptr->buffer_writes.size(), 2); + EXPECT_EQ(writer_ptr->buffer_writes.at(1), "envoy.test_gauge:1|g|#key1:value1,key2:value2"); NiceMock timer; timer.name_ = "test_timer"; diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index e3a20a067ca7..1e84b2b16e09 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -37,7 +37,8 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); - auto loopback_flavor = Network::Test::getCanonicalLoopbackAddress(GetParam()); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); socket_address.set_address(loopback_flavor->ip()->addressAsString()); socket_address.set_port_value(8125); @@ -65,6 +66,62 @@ TEST(DogStatsdConfigTest, ValidateFail) { ProtoValidationException); } +TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { + const std::string name = StatsSinkNames::get().DogStatsd; + + envoy::config::metrics::v3::DogStatsdSink sink_config; + sink_config.mutable_max_bytes_per_datagram()->set_value(128); + envoy::config::core::v3::Address& address = *sink_config.mutable_address(); + envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); + socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); + socket_address.set_address(loopback_flavor->ip()->addressAsString()); + socket_address.set_port_value(8125); + + Server::Configuration::StatsSinkFactory* factory = + Registry::FactoryRegistry::getFactory(name); + ASSERT_NE(factory, nullptr); + + ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); + TestUtility::jsonConvert(sink_config, *message); + + NiceMock server; + Stats::SinkPtr sink = factory->createStatsSink(*message, server); + ASSERT_NE(sink, nullptr); + auto udp_sink = dynamic_cast(sink.get()); + ASSERT_NE(udp_sink, nullptr); + EXPECT_EQ(udp_sink->getBufferSizeForTest(), 128); +} + +TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { + const std::string name = StatsSinkNames::get().DogStatsd; + + envoy::config::metrics::v3::DogStatsdSink sink_config; + envoy::config::core::v3::Address& address = *sink_config.mutable_address(); + envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); + socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); + socket_address.set_address(loopback_flavor->ip()->addressAsString()); + socket_address.set_port_value(8125); + + Server::Configuration::StatsSinkFactory* factory = + Registry::FactoryRegistry::getFactory(name); + ASSERT_NE(factory, nullptr); + + ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); + TestUtility::jsonConvert(sink_config, *message); + + NiceMock server; + Stats::SinkPtr sink = factory->createStatsSink(*message, server); + ASSERT_NE(sink, nullptr); + auto udp_sink = dynamic_cast(sink.get()); + ASSERT_NE(udp_sink, nullptr); + // Expect default buffer size of 0 (no buffering) + EXPECT_EQ(udp_sink->getBufferSizeForTest(), 0); +} + TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { const std::string name = StatsSinkNames::get().DogStatsd; @@ -72,7 +129,8 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); socket_address.set_protocol(envoy::config::core::v3::SocketAddress::UDP); - auto loopback_flavor = Network::Test::getCanonicalLoopbackAddress(GetParam()); + Network::Address::InstanceConstSharedPtr loopback_flavor = + Network::Test::getCanonicalLoopbackAddress(GetParam()); socket_address.set_address(loopback_flavor->ip()->addressAsString()); socket_address.set_port_value(8125); From 7d875281d5633cf63aaa4e2d47aa5f4be573a26f Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 21 Jul 2020 11:49:57 -0700 Subject: [PATCH 703/909] test: link test-only version linkstamp (#12171) * split version_linkstamp to another bazel target * add test_version_linkstamp which is static and link it to tests All tests except those take envoy binary directly (i.e. //test/exe/...) will be cached more efficiently, especially in a manual_stamp defined environment such as Windows. Signed-off-by: Lizan Zhou --- bazel/envoy_library.bzl | 5 --- bazel/envoy_test.bzl | 6 +++ source/common/version/BUILD | 44 +++++++++++++--------- source/common/version/version.cc | 1 - source/exe/BUILD | 15 ++++++-- test/exe/BUILD | 2 +- test/test_common/BUILD | 6 +++ test/test_common/test_version_linkstamp.cc | 6 +++ 8 files changed, 56 insertions(+), 29 deletions(-) create mode 100644 test/test_common/test_version_linkstamp.cc diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 40cd6683836e..dd35bcac6f9a 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -87,7 +87,6 @@ def envoy_cc_library( external_deps = [], tcmalloc_dep = None, repository = "", - linkstamp = None, tags = [], deps = [], strip_include_prefix = None, @@ -122,10 +121,6 @@ def envoy_cc_library( include_prefix = envoy_include_prefix(native.package_name()), alwayslink = 1, linkstatic = envoy_linkstatic(), - linkstamp = select({ - repository + "//bazel:windows_x86_64": None, - "//conditions:default": linkstamp, - }), strip_include_prefix = strip_include_prefix, ) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index ace2fe600cb4..0406dda976ee 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -102,6 +102,7 @@ def envoy_cc_fuzz_test( name = test_lib_name, deps = deps + envoy_stdlib_deps() + [ repository + "//test/fuzz:fuzz_runner_lib", + repository + "//test/test_common:test_version_linkstamp", ], repository = repository, tags = tags, @@ -179,6 +180,7 @@ def envoy_cc_test( malloc = tcmalloc_external_dep(repository), deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ repository + "//test:main", + repository + "//test/test_common:test_version_linkstamp", ], # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. @@ -232,12 +234,16 @@ def envoy_cc_test_library( def envoy_cc_test_binary( name, tags = [], + deps = [], **kargs): envoy_cc_binary( name, testonly = 1, linkopts = _envoy_test_linkopts(), tags = tags + ["compilation_db_dep"], + deps = deps + [ + "@envoy//test/test_common:test_version_linkstamp", + ], **kargs ) diff --git a/source/common/version/BUILD b/source/common/version/BUILD index 0b05efd9a4f4..9d726567378a 100644 --- a/source/common/version/BUILD +++ b/source/common/version/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_basic_cc_library", "envoy_cc_library", "envoy_package", "envoy_select_boringssl", @@ -14,11 +15,12 @@ genrule( srcs = ["//:VERSION"], outs = ["version_number.h"], cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", + visibility = ["//visibility:private"], ) genrule( name = "generate_version_linkstamp", - outs = ["lib/version_linkstamp.h"], + outs = ["manual_linkstamp.cc"], cmd = select({ # Only iOS builds typically follow this logic, OS/X is built as a normal binary "//bazel:apple": "$(location :generate_version_linkstamp.sh) Library >> $@", @@ -29,12 +31,7 @@ genrule( # Used here because generate_version_linkstamp.sh depends on the workspace status files. stamp = 1, tools = [":generate_version_linkstamp.sh"], -) - -genrule( - name = "generate_version_linkstamp_empty", - outs = ["empty/version_linkstamp.h"], - cmd = """>$@""", + visibility = ["//visibility:private"], ) envoy_cc_library( @@ -52,24 +49,35 @@ envoy_cc_library( envoy_cc_library( name = "version_lib", srcs = ["version.cc"], - hdrs = select({ - "//bazel:manual_stamp": [":generate_version_linkstamp"], - # By default the header file is empty. - # This is done so that the definitions linked via the linkstamp rule don't cause collisions. - "//conditions:default": [":generate_version_linkstamp_empty"], - }), copts = envoy_select_boringssl( ["-DENVOY_SSL_VERSION=\\\"BoringSSL-FIPS\\\""], ["-DENVOY_SSL_VERSION=\\\"BoringSSL\\\""], ), - linkstamp = "version_linkstamp.cc", - strip_include_prefix = select({ - "//bazel:manual_stamp": "lib", - "//conditions:default": "empty", - }), deps = [ ":version_includes", "//source/common/common:macros", "//source/common/protobuf:utility_lib", ], ) + +envoy_basic_cc_library( + name = "manual_version_linkstamp", + srcs = [":generate_version_linkstamp"], + visibility = ["//visibility:private"], +) + +envoy_basic_cc_library( + name = "version_linkstamp", + linkstamp = select({ + "//bazel:manual_stamp": None, + "//conditions:default": "version_linkstamp.cc", + }), + # Linking this library makes build cache inefficient, limiting this to //source/exe package only. + # Tests are linked with //test/test_common:test_version_linkstamp. + visibility = ["//source/exe:__pkg__"], + deps = select({ + "//bazel:manual_stamp": [":manual_version_linkstamp"], + "//conditions:default": [], + }), + alwayslink = 1, +) diff --git a/source/common/version/version.cc b/source/common/version/version.cc index 7bc95c5c8c38..d2ddbae3c818 100644 --- a/source/common/version/version.cc +++ b/source/common/version/version.cc @@ -7,7 +7,6 @@ #include "common/common/fmt.h" #include "common/common/macros.h" #include "common/protobuf/utility.h" -#include "common/version/version_linkstamp.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" diff --git a/source/exe/BUILD b/source/exe/BUILD index fc00d543a415..a88969a6ae21 100644 --- a/source/exe/BUILD +++ b/source/exe/BUILD @@ -22,7 +22,7 @@ alias( envoy_cc_binary( name = "envoy-static", stamped = True, - deps = ["envoy_main_entry_lib"], + deps = [":envoy_main_entry_lib"], ) envoy_cc_library( @@ -56,7 +56,7 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_main_common_lib", + name = "main_common_lib", srcs = ["main_common.cc"], hdrs = ["main_common.h"], deps = [ @@ -80,6 +80,14 @@ envoy_cc_library( }), ) +envoy_cc_library( + name = "envoy_main_common_lib", + deps = [ + ":main_common_lib", + "//source/common/version:version_linkstamp", + ], +) + envoy_cc_library( name = "envoy_common_with_core_extensions_lib", deps = [ @@ -96,10 +104,9 @@ envoy_cc_library( envoy_cc_library( name = "envoy_main_common_with_core_extensions_lib", - srcs = ["main_common.cc"], - hdrs = ["main_common.h"], deps = [ ":envoy_common_with_core_extensions_lib", + ":main_common_lib", ":platform_impl_lib", ":process_wide_lib", "//source/common/api:os_sys_calls_lib", diff --git a/test/exe/BUILD b/test/exe/BUILD index 2b1d4b80aab5..7b1378dab34b 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -67,7 +67,7 @@ envoy_cc_test( tags = ["fails_on_windows"], deps = [ "//source/common/api:api_lib", - "//source/exe:envoy_main_common_lib", + "//source/exe:main_common_lib", "//test/mocks/runtime:runtime_mocks", "//test/test_common:contention_lib", "//test/test_common:environment_lib", diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 71693951076f..b01ceabe8925 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -295,3 +295,9 @@ envoy_cc_test( ":utility_lib", ], ) + +envoy_basic_cc_library( + name = "test_version_linkstamp", + srcs = ["test_version_linkstamp.cc"], + alwayslink = 1, +) diff --git a/test/test_common/test_version_linkstamp.cc b/test/test_common/test_version_linkstamp.cc new file mode 100644 index 000000000000..76d1a8290781 --- /dev/null +++ b/test/test_common/test_version_linkstamp.cc @@ -0,0 +1,6 @@ +// NOLINT(namespace-envoy) +extern const char build_scm_revision[]; +extern const char build_scm_status[]; + +const char build_scm_revision[] = "0"; +const char build_scm_status[] = "test"; From 939ab3911818d0afe59cb9353ba06f1adfa0303e Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 21 Jul 2020 20:22:56 +0100 Subject: [PATCH 704/909] docs: note file permissions when using Docker (#12208) Add further info on file permissions to Docker docs. See #12112 Risk Level: very low Testing: n/a Docs Changes: yes Release Notes: n/a Signed-off-by: Ryan Northey --- docs/root/start/start.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 07fea5347580..2d3e81951aff 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -179,6 +179,20 @@ You can then configure ``envoy`` to log to files in ``/var/log`` The default ``envoy`` ``uid`` and ``gid`` are ``101``. +The ``envoy`` user also needs to have permission to access any required configuration files mounted +into the container. + +If you are running in an environment with a strict ``umask`` setting, you may need to provide envoy with +access either by setting the ``uid`` or ``gid`` of the file, or by making the configuration file readable +by the envoy user. + +One method of doing this without changing any file permissions or running as root inside the container +is to start the container with the host user's ``uid``, for example: + +.. substitution-code-block:: none + + $ docker run -d --name envoy -e ENVOY_UID=`id -u` -p 9901:9901 -p 10000:10000 envoy:v1 + Sandboxes --------- From ae11ba2402769ce33dfb11096673b72efd6eaa39 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 21 Jul 2020 13:21:41 -0700 Subject: [PATCH 705/909] hds: fix integration test flakes (#12214) Part of https://github.com/envoyproxy/envoy/issues/12184 Signed-off-by: Matt Klein --- test/integration/hds_integration_test.cc | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index f5fc80cdd556..1c950e2499b1 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -61,8 +61,10 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, // Endpoint connections host_upstream_ = std::make_unique(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem()); + host_upstream_->set_allow_unexpected_disconnects(true); host2_upstream_ = std::make_unique(0, FakeHttpConnection::Type::HTTP1, version_, timeSystem()); + host2_upstream_->set_allow_unexpected_disconnects(true); } // Sets up a connection between Envoy and the management server. @@ -80,7 +82,6 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, ASSERT_TRUE(host_fake_connection_->waitForNewStream(*dispatcher_, host_stream_)); ASSERT_TRUE(host_stream_->waitForEndStream(*dispatcher_)); - host_upstream_->set_allow_unexpected_disconnects(true); EXPECT_EQ(host_stream_->headers().getPathValue(), "/healthcheck"); EXPECT_EQ(host_stream_->headers().getMethodValue(), "GET"); EXPECT_EQ(host_stream_->headers().getHostValue(), "anna"); @@ -90,7 +91,6 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); - host2_upstream_->set_allow_unexpected_disconnects(true); EXPECT_EQ(host2_stream_->headers().getPathValue(), "/healthcheck"); EXPECT_EQ(host2_stream_->headers().getMethodValue(), "GET"); EXPECT_EQ(host2_stream_->headers().getHostValue(), cluster2); @@ -314,9 +314,10 @@ TEST_P(HdsIntegrationTest, SingleEndpointTimeoutHttp) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy sends a health check message to an endpoint - healthcheckEndpoints(); + ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); // Endpoint doesn't respond to the health check + ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect(true)); // Receive updates until the one we expect arrives waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT); @@ -380,19 +381,17 @@ TEST_P(HdsIntegrationTest, SingleEndpointTimeoutTcp) { server_health_check_specifier_.mutable_cluster_health_checks(0) ->mutable_health_checks(0) ->mutable_timeout() - ->set_nanos(500000000); // 0.5 seconds + ->set_nanos(100000000); // 0.1 seconds hds_stream_->startGrpcStream(); hds_stream_->sendGrpcMessage(server_health_check_specifier_); test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoys asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); - ASSERT_TRUE( - host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); // No response from the endpoint + ASSERT_TRUE(host_fake_raw_connection_->waitForDisconnect(true)); // Receive updates until the one we expect arrives waitForEndpointHealthResponse(envoy::config::core::v3::TIMEOUT); @@ -418,7 +417,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyTcp) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); ASSERT_TRUE( host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); @@ -453,7 +451,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTcp) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy asks the endpoint if it's healthy - host_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host_upstream_->waitForRawConnection(host_fake_raw_connection_)); ASSERT_TRUE( host_fake_raw_connection_->waitForData(FakeRawConnection::waitForInexactMatch("Ping"))); @@ -691,7 +688,6 @@ TEST_P(HdsIntegrationTest, TestUpdateMessage) { test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); // Envoy sends a health check message to an endpoint - host2_upstream_->set_allow_unexpected_disconnects(true); ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); From d5ae400b889d4ba1ec77b152d9598ae94dbf22b7 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Jul 2020 16:29:53 -0400 Subject: [PATCH 706/909] http: allowing 100 then 500 (#12185) Fixing a bug where the router filter wouldn't send a local response had already been sent. This could have been fixed in a lower-risk fashion by keeping the existing logic, and doing sendLocalReply only iff non-informational headers had not been sent. This design has the added benefit of being usable in a follow-up PR to call sendLocalResponse from encoder filters which currently force-reset the stream on the response path Risk Level: Medium/High Testing: updated unit tests, new integration tests Docs Changes: n/a Release Notes: inline Runtime guard: envoy.reloadable_features.allow_500_after_100 Fixes #12131 Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + include/envoy/http/filter.h | 11 +- source/common/http/async_client_impl.cc | 1 + source/common/http/async_client_impl.h | 5 + source/common/http/conn_manager_impl.cc | 112 +++++++++++------- source/common/http/conn_manager_impl.h | 8 +- source/common/router/router.cc | 9 +- source/common/runtime/runtime_features.cc | 1 + test/common/http/conn_manager_impl_test.cc | 3 +- test/common/router/router_test.cc | 7 +- test/integration/protocol_integration_test.cc | 34 ++++++ 11 files changed, 137 insertions(+), 55 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5acb53a13a3d..e3263fee3d41 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -12,6 +12,7 @@ Minor Behavior Changes * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. * logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index ee3133b3b1ee..a7967982ffab 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -326,10 +326,13 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { virtual RequestTrailerMap& addDecodedTrailers() PURE; /** - * Create a locally generated response using the provided response_code and body_text parameters. - * If the request was a gRPC request the local reply will be encoded as a gRPC response with a 200 - * HTTP response code and grpc-status and grpc-message headers mapped from the provided - * parameters. + * Attempts to create a locally generated response using the provided response_code and body_text + * parameters. If the request was a gRPC request the local reply will be encoded as a gRPC + * response with a 200 HTTP response code and grpc-status and grpc-message headers mapped from the + * provided parameters. + * + * If a response has already started (e.g. if the router calls sendSendLocalReply after encoding + * headers) this will either ship the reply directly to the downstream codec, or reset the stream. * * @param response_code supplies the HTTP response code. * @param body_text supplies the optional body text which is sent using the text/plain content diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index a989adce30aa..910955355892 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -98,6 +98,7 @@ void AsyncStreamImpl::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_str ENVOY_LOG(debug, "async http request response headers (end_stream={}):\n{}", end_stream, *headers); ASSERT(!remote_closed_); + encoded_response_headers_ = true; stream_callbacks_.onHeaders(std::move(headers), end_stream); closeRemote(end_stream); // At present, the router cleans up stream state as soon as the remote is closed, making a diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 15478553627d..8a5826a49e5b 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -361,6 +361,10 @@ class AsyncStreamImpl : public AsyncClient::Stream, const absl::optional grpc_status, absl::string_view details) override { stream_info_.setResponseCodeDetails(details); + if (encoded_response_headers_) { + resetStream(); + return; + } Utility::sendLocalReply( remote_closed_, Utility::EncodeFunctions{ @@ -415,6 +419,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, bool local_closed_{}; bool remote_closed_{}; Buffer::InstancePtr buffered_body_; + bool encoded_response_headers_{}; bool is_grpc_request_{}; bool is_head_request_{false}; bool send_xff_{true}; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index df958ed68530..b37a31afe165 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1514,12 +1514,7 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, absl::string_view details) { - ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); - ASSERT(response_headers_ == nullptr); - // For early error handling, do a best-effort attempt to create a filter chain - // to ensure access logging. If the filter chain already exists this will be - // a no-op. - createFilterChain(); + stream_info_.setResponseCodeDetails(details); // The BadRequest error code indicates there has been a messaging error. if (Runtime::runtimeFeatureEnabled( @@ -1529,7 +1524,63 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( state_.saw_connection_close_ = true; } - stream_info_.setResponseCodeDetails(details); + if (response_headers_.get() == nullptr) { + // If the response has not started at all, send the response through the filter chain. + sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request, + grpc_status, details); + } else if (!state_.non_100_response_headers_encoded_) { + ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this, + details); + // In this case, at least the header and possibly the body has started + // processing through the filter chain, but no non-informational headers + // have been sent downstream. To ensure that filters don't get their + // state machine screwed up, bypass the filter chain and send the local + // reply directly to the codec. + // + // Make sure we won't end up with nested watermark calls from the body buffer. + state_.encoder_filters_streaming_ = true; + Http::Utility::sendLocalReply( + state_.destroyed_, + Utility::EncodeFunctions{ + [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + connection_manager_.config_.localReply().rewrite( + request_headers_.get(), response_headers, stream_info_, code, body, content_type); + }, + [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*response_headers); + } + response_headers_ = std::move(response_headers); + encodeHeadersInternal(*response_headers_, end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + encodeDataInternal(data, end_stream); + }}, + Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, + grpc_status, state_.is_head_request_}); + maybeEndEncode(state_.local_complete_); + } else { + stream_info_.setResponseCodeDetails(details); + // If we land in this branch, response headers have already been sent to the client. + // All we can do at this point is reset the stream. + ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", + *this, details); + connection_manager_.doEndStream(*this); + } +} + +void ConnectionManagerImpl::ActiveStream::sendLocalReplyViaFilterChain( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status, absl::string_view details) { + ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); + ASSERT(response_headers_ == nullptr); + // For early error handling, do a best-effort attempt to create a filter chain + // to ensure access logging. If the filter chain already exists this will be + // a no-op. + createFilterChain(); + Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ @@ -1776,6 +1827,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa } } + // 100-continue headers are handled via encode100ContinueHeaders. + state_.non_100_response_headers_encoded_ = true; chargeStats(headers); ENVOY_STREAM_LOG(debug, "encoding headers via codec (end_stream={}):\n{}", *this, end_stream, @@ -2564,44 +2617,13 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { } else { parent_.connection_manager_.stats_.named_.rs_too_large_.inc(); - // If headers have not been sent to the user, send a 500. - if (!headers_continued_) { - // Make sure we won't end up with nested watermark calls from the body buffer. - parent_.state_.encoder_filters_streaming_ = true; - allowIteration(); - parent_.stream_info_.setResponseCodeDetails( - StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); - // This does not call the standard sendLocalReply because if there is already response data - // we do not want to pass a second set of response headers through the filter chain. - // Instead, call the encodeHeadersInternal / encodeDataInternal helpers - // directly, which maximizes shared code with the normal response path. - Http::Utility::sendLocalReply( - parent_.state_.destroyed_, - Utility::EncodeFunctions{ - [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, - absl::string_view& content_type) -> void { - parent_.connection_manager_.config_.localReply().rewrite( - parent_.request_headers_.get(), response_headers, parent_.stream_info_, code, - body, content_type); - }, - [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { - parent_.response_headers_ = std::move(response_headers); - parent_.encodeHeadersInternal(*parent_.response_headers_, end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - parent_.encodeDataInternal(data, end_stream); - }}, - Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*parent_.request_headers_), - Http::Code::InternalServerError, - CodeUtility::toString(Http::Code::InternalServerError), - absl::nullopt, parent_.state_.is_head_request_}); - parent_.maybeEndEncode(parent_.state_.local_complete_); - } else { - ENVOY_STREAM_LOG( - debug, "Resetting stream. Response data too large and headers have already been sent", - *this); - resetStream(); - } + // In this case, sendLocalReply will either send a response directly to the encoder, or + // reset the stream. + parent_.sendLocalReply( + parent_.request_headers_ && Grpc::Common::isGrpcRequestHeaders(*parent_.request_headers_), + Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), + nullptr, parent_.state_.is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 04d3896a3ff8..104f640c1312 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -494,6 +494,10 @@ class ConnectionManagerImpl : Logger::Loggable, bool is_head_request, const absl::optional grpc_status, absl::string_view details) override; + void sendLocalReplyViaFilterChain( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status, absl::string_view details); void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers); // As with most of the encode functions, this runs encodeHeaders on various // filters before calling encodeHeadersInternal which does final header munging and passes the @@ -625,7 +629,7 @@ class ConnectionManagerImpl : Logger::Loggable, : remote_complete_(false), local_complete_(false), codec_saw_local_complete_(false), saw_connection_close_(false), successful_upgrade_(false), created_filter_chain_(false), is_internally_created_(false), decorated_propagate_(true), has_continue_headers_(false), - is_head_request_(false) {} + is_head_request_(false), non_100_response_headers_encoded_(false) {} uint32_t filter_call_state_{0}; // The following 3 members are booleans rather than part of the space-saving bitfield as they @@ -653,6 +657,8 @@ class ConnectionManagerImpl : Logger::Loggable, // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. bool has_continue_headers_ : 1; bool is_head_request_ : 1; + // Tracks if headers other than 100-Continue have been encoded to the codec. + bool non_100_response_headers_encoded_ : 1; // Whether a filter has indicated that the request should be treated as a headers only // request. bool decoding_headers_only_{false}; diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 3da9226fcc81..b2d587c33434 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -36,6 +36,7 @@ #include "common/router/debug_config.h" #include "common/router/retry_state_impl.h" #include "common/router/upstream_request.h" +#include "common/runtime/runtime_features.h" #include "common/runtime/runtime_impl.h" #include "common/stream_info/uint32_accessor_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -902,13 +903,15 @@ void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) { upstream_request.removeFromList(upstream_requests_); cleanup(); - if (downstream_response_started_) { + if (downstream_response_started_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) { callbacks_->streamInfo().setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); callbacks_->resetStream(); } else { callbacks_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached); + // sendLocalReply may instead reset the stream if downstream_response_started_ is true. callbacks_->sendLocalReply( Http::Code::RequestTimeout, "upstream max stream duration reached", modify_headers_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached); @@ -963,7 +966,8 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ absl::string_view body, bool dropped, absl::string_view details) { // If we have not yet sent anything downstream, send a response with an appropriate status code. // Otherwise just reset the ongoing response. - if (downstream_response_started_) { + if (downstream_response_started_ && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) { // This will destroy any created retry timers. callbacks_->streamInfo().setResponseCodeDetails(details); cleanup(); @@ -974,6 +978,7 @@ void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_ callbacks_->streamInfo().setResponseFlag(response_flags); + // sendLocalReply may instead reset the stream if downstream_response_started_ is true. callbacks_->sendLocalReply( code, body, [dropped, this](Http::ResponseHeaderMap& headers) { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 35b990cd39b7..5a8ce4bcbcd4 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -58,6 +58,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.connection_header_sanitization", // Begin alphabetically sorted section. "envoy.reloadable_features.activate_fds_next_event_loop", + "envoy.reloadable_features.allow_500_after_100", "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.consume_all_retry_headers", "envoy.reloadable_features.disallow_unbounded_access_logs", diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 520d27561d1c..af2c7325c46f 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -4826,7 +4826,8 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { .WillOnce(Return(FilterDataStatus::StopIterationAndBuffer)); EXPECT_CALL(stream_, resetStream(_)); EXPECT_LOG_CONTAINS( - "debug", "Resetting stream. Response data too large and headers have already been sent", + "debug", + "Resetting stream due to response_payload_too_large. Prior headers have already been sent", decoder_filters_[0]->callbacks_->encodeData(fake_response, false);); EXPECT_EQ(1U, stats_.named_.rs_too_large_.value()); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index ddc452e108bf..0a1ef34fc1b2 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -3751,10 +3751,13 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { new Http::TestResponseHeaderMapImpl{{":status", "200"}}); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); response_decoder->decodeHeaders(std::move(response_headers), false); - absl::string_view rc_details2 = "upstream_reset_after_response_started{remote reset}"; - EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails(rc_details2)); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); + // Normally, sendLocalReply will actually send the reply, but in this case the + // HCM will detect the headers have already been sent and not route through + // the encoder again. + EXPECT_CALL(callbacks_, sendLocalReply(_, _, _, _, _)).WillOnce(testing::InvokeWithoutArgs([] { + })); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); // For normal HTTP, once we have a 200 we consider this a success, even if a // later reset occurs. diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e41cd9eabe90..46f8af0dbb58 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1985,6 +1985,40 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { EXPECT_FALSE(codec_client_->disconnected()); } +// Regression test for https://github.com/envoyproxy/envoy/issues/12131 +TEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnect) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{":status", "100"}}); + ASSERT_TRUE(fake_upstream_connection_->close()); + + // Make sure that a disconnect results in valid 5xx response headers even when preceded by a 100. + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + +TEST_P(DownstreamProtocolIntegrationTest, Test100AndDisconnectLegacy) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.allow_500_after_100", "false"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(); + upstream_request_->encode100ContinueHeaders(Http::TestResponseHeaderMapImpl{{":status", "100"}}); + ASSERT_TRUE(fake_upstream_connection_->close()); + + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_FALSE(response->complete()); + } else { + response->waitForReset(); + EXPECT_FALSE(response->complete()); + } +} + // For tests which focus on downstream-to-Envoy behavior, and don't need to be // run with both HTTP/1 and HTTP/2 upstreams. INSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest, From e07af92eac72dd319e6faf48628ebf3c845fa941 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Jul 2020 16:36:16 -0400 Subject: [PATCH 707/909] test: extending directory coverage checks to core code. (#12213) Signed-off-by: Alyssa Wilk --- test/per_file_coverage.sh | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index ef5987d99c37..8feb9ae3be6e 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -1,8 +1,25 @@ #!/bin/bash # directory:coverage_percent -# for existing extensions with low coverage. +# for existing directories with low coverage. declare -a KNOWN_LOW_COVERAGE=( +"source/common/network:94.0" +"source/common/http/http3:50.0" +"source/common/tracing:94.9" +"source/common/protobuf:94.9" +"source/common/secret:95.2" +"source/common/singleton:95.1" +"source/common/api:92.1" +"source/common/api/posix:92.1" +"source/common/json:90.6" +"source/common/filesystem:96.1" +"source/common/filesystem/posix:93.7" +"source/common/thread_local:95.7" +"source/common/crypto:0.0" +"source/common/common/posix:94.1" +"source/common/signal:85.1" +"source/exe:93.7" +"source/extensions:96.3" "source/extensions/common:94.4" "source/extensions/common/crypto:91.5" "source/extensions/common/wasm:85.4" @@ -48,6 +65,9 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/transport_sockets/tap:95.6" "source/extensions/transport_sockets/tls:94.2" "source/extensions/transport_sockets/tls/private_key:76.9" +"source/server:94.7" +"source/server/config_validation:77.2" +"source/server/admin:95.6" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" @@ -58,7 +78,7 @@ FAILED=0 DEFAULT_COVERAGE_THRESHOLD=96.6 DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD -# Unfortunately we have a bunch of preexisting extensions with low coverage. +# Unfortunately we have a bunch of preexisting directory with low coverage. # Set their low bar as their current coverage level. get_coverage_target() { DIRECTORY_THRESHOLD=$DEFAULT_COVERAGE_THRESHOLD @@ -71,9 +91,9 @@ get_coverage_target() { done } -# Make sure that for each extension directory with code, coverage doesn't dip +# Make sure that for each directory with code, coverage doesn't dip # below the default coverage threshold. -for DIRECTORY in $(find source/extensions/* -type d) +for DIRECTORY in $(find source/* -type d) do get_coverage_target $DIRECTORY COVERAGE_VALUE=$(lcov -e $COVERAGE_DATA "$DIRECTORY/*" -o /dev/null | grep line | cut -d ' ' -f 4) @@ -90,7 +110,7 @@ do fi; COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${DIRECTORY_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then - echo Code coverage for extension ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} \(${COVERAGE_VALUE}\) + echo Code coverage for ${DIRECTORY} is lower than limit of ${DIRECTORY_THRESHOLD} \(${COVERAGE_VALUE}\) FAILED=1 fi done From a24c95e4fe035ae5d05691ee2b2bd7011093a9e9 Mon Sep 17 00:00:00 2001 From: Ranjith Kumar Date: Wed, 22 Jul 2020 04:02:59 +0530 Subject: [PATCH 708/909] stats: add histograms for request/response headers and body sizes (#11559) Created a new struct for optional cluster stats. Moved timeout budget stats and added request response headers and body stats in the new struct. Risk Level: Low Testing: Added test cases Docs Changes: added Release Notes: added Fixes #10308 , Fixes #3621 Signed-off-by: Ranjith Kumar --- api/envoy/config/cluster/v3/cluster.proto | 26 ++- .../config/cluster/v4alpha/cluster.proto | 33 +++- .../cluster_manager/cluster_stats.rst | 17 ++ docs/root/version_history/current.rst | 5 +- .../envoy/config/cluster/v3/cluster.proto | 26 ++- .../config/cluster/v4alpha/cluster.proto | 29 ++- include/envoy/upstream/upstream.h | 35 +++- source/common/http/conn_manager_impl.cc | 32 +++ source/common/router/router.cc | 10 +- source/common/router/upstream_request.cc | 7 +- source/common/upstream/upstream_impl.cc | 24 ++- source/common/upstream/upstream_impl.h | 31 ++- test/common/http/conn_manager_impl_test.cc | 183 ++++++++++++++++++ test/common/upstream/upstream_impl_test.cc | 123 +++++++++++- .../http/tcp/upstream_request_test.cc | 3 +- test/integration/stats_integration_test.cc | 8 +- test/mocks/upstream/cluster_info.cc | 11 +- test/mocks/upstream/cluster_info.h | 7 +- 18 files changed, 567 insertions(+), 43 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 6123bd59c14e..b4ea53bb0933 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 49] +// [#next-free-field: 50] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -856,7 +856,12 @@ message Cluster { // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool track_timeout_budgets = 47 [deprecated = true]; // Optional customization and configuration of upstream connection pool, and upstream type. // @@ -876,6 +881,9 @@ message Cluster { // CONNECT only if a custom filter indicates it is appropriate, the custom factories // can be registered and configured here. core.v3.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -936,3 +944,17 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 6c1302d28941..4172b07e0538 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 49] +// [#next-free-field: 50] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -545,9 +545,9 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } - reserved 12, 15, 7, 11, 35; + reserved 12, 15, 7, 11, 35, 47; - reserved "hosts", "tls_context", "extension_protocol_options"; + reserved "hosts", "tls_context", "extension_protocol_options", "track_timeout_budgets"; // Configuration to use different transport sockets for different endpoints. // The entry of *envoy.transport_socket_match* in the @@ -855,13 +855,6 @@ message Cluster { // from the LRS stream here.] core.v4alpha.ConfigSource lrs_server = 42; - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; - // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom @@ -880,6 +873,9 @@ message Cluster { // CONNECT only if a custom filter indicates it is appropriate, the custom factories // can be registered and configured here. core.v4alpha.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -942,3 +938,20 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v4alpha.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.TrackClusterStats"; + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 8fe03e0b55a4..5d956c28d2b3 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -314,3 +314,20 @@ Statistics for monitoring effective host weights when using the min_entries_per_host, Gauge, Minimum number of entries for a single host max_entries_per_host, Gauge, Maximum number of entries for a single host + +.. _config_cluster_manager_cluster_stats_request_response_sizes: + +Request Response Size statistics +-------------------------------- + +If :ref:`request response size statistics ` are tracked, +statistics will be added to *cluster.* and contain the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_headers_size, Histogram, Request headers size in bytes per upstream + upstream_rq_body_size, Histogram, Request body size in bytes per upstream + upstream_rs_headers_size, Histogram, Response headers size in bytes per upstream + upstream_rs_body_size, Histogram, Response body size in bytes per upstream diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e3263fee3d41..47e8e2fbec95 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -36,13 +36,16 @@ Removed Config or Runtime New Features ------------ - * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. +* stats: added optional histograms to :ref:`cluster stats ` + that track headers and body sizes of requests and responses. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated ---------- +* The :ref:`track_timeout_budgets ` + field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index cf6b9cb652b3..ac93934e72bf 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -44,7 +44,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 49] +// [#next-free-field: 50] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -854,7 +854,12 @@ message Cluster { // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool track_timeout_budgets = 47 [deprecated = true]; // Optional customization and configuration of upstream connection pool, and upstream type. // @@ -875,6 +880,9 @@ message Cluster { // can be registered and configured here. core.v3.TypedExtensionConfig upstream_config = 48; + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = @@ -940,3 +948,17 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 6c1302d28941..facc5d38d16c 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 49] +// [#next-free-field: 50] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -860,7 +860,12 @@ message Cluster { // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; + // + // .. attention:: + // + // This field has been deprecated in favor of `timeout_budgets`, part of + // :ref:`track_cluster_stats `. + bool hidden_envoy_deprecated_track_timeout_budgets = 47 [deprecated = true]; // Optional customization and configuration of upstream connection pool, and upstream type. // @@ -880,6 +885,9 @@ message Cluster { // CONNECT only if a custom filter indicates it is appropriate, the custom factories // can be registered and configured here. core.v4alpha.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; } // [#not-implemented-hide:] Extensible load balancing policy configuration. @@ -942,3 +950,20 @@ message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v4alpha.TcpKeepalive tcp_keepalive = 1; } + +message TrackClusterStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.TrackClusterStats"; + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + bool request_response_sizes = 2; +} diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 70c9dd9755c8..cd15d0bb3dff 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -622,6 +622,15 @@ class PrioritySet { REMAINING_GAUGE(remaining_retries, Accumulate) \ REMAINING_GAUGE(remaining_rq, Accumulate) +/** + * All stats tracking request/response headers and body sizes. Not used by default. + */ +#define ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(HISTOGRAM) \ + HISTOGRAM(upstream_rq_headers_size, Bytes) \ + HISTOGRAM(upstream_rq_body_size, Bytes) \ + HISTOGRAM(upstream_rs_headers_size, Bytes) \ + HISTOGRAM(upstream_rs_body_size, Bytes) + /** * All stats around timeout budgets. Not used by default. */ @@ -650,6 +659,17 @@ struct ClusterCircuitBreakersStats { ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT, GENERATE_GAUGE_STRUCT) }; +/** + * Struct definition for cluster timeout budget stats. @see stats_macros.h + */ +struct ClusterRequestResponseSizeStats { + ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(GENERATE_HISTOGRAM_STRUCT) +}; + +using ClusterRequestResponseSizeStatsPtr = std::unique_ptr; +using ClusterRequestResponseSizeStatsOptRef = + absl::optional>; + /** * Struct definition for cluster timeout budget stats. @see stats_macros.h */ @@ -657,6 +677,10 @@ struct ClusterTimeoutBudgetStats { ALL_CLUSTER_TIMEOUT_BUDGET_STATS(GENERATE_HISTOGRAM_STRUCT) }; +using ClusterTimeoutBudgetStatsPtr = std::unique_ptr; +using ClusterTimeoutBudgetStatsOptRef = + absl::optional>; + /** * All extension protocol specific options returned by the method at * NamedNetworkFilterConfigFactory::createProtocolOptions @@ -851,9 +875,16 @@ class ClusterInfo { virtual ClusterLoadReportStats& loadReportStats() const PURE; /** - * @return absl::optional& stats on timeout budgets for this cluster. + * @return absl::optional> stats to track + * headers/body sizes of request/response for this cluster. + */ + virtual ClusterRequestResponseSizeStatsOptRef requestResponseSizeStats() const PURE; + + /** + * @return absl::optional> stats on timeout + * budgets for this cluster. */ - virtual const absl::optional& timeoutBudgetStats() const PURE; + virtual ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const PURE; /** * Returns an optional source address for upstream connections to bind to. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index b37a31afe165..c633ff28ca68 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -607,6 +607,17 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect ConnectionManagerImpl::ActiveStream::~ActiveStream() { stream_info_.onRequestComplete(); + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rq_body_size_.recordValue(stream_info_.bytesReceived()); + req_resp_stats->get().upstream_rs_body_size_.recordValue(stream_info_.bytesSent()); + } + } // A downstream disconnect can be identified for HTTP requests when the upstream returns with a 0 // response code and when no other response flags are set. @@ -722,6 +733,17 @@ void ConnectionManagerImpl::ActiveStream::chargeStats(const ResponseHeaderMap& h return; } + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rs_headers_size_.recordValue(headers.byteSize()); + } + } + connection_manager_.stats_.named_.downstream_rq_completed_.inc(); connection_manager_.listener_stats_.downstream_rq_completed_.inc(); if (CodeUtility::is1xx(response_code)) { @@ -769,6 +791,16 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); request_headers_ = std::move(headers); + Upstream::HostDescriptionConstSharedPtr upstream_host = + connection_manager_.read_callbacks_->upstreamHost(); + + if (upstream_host != nullptr) { + Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = + upstream_host->cluster().requestResponseSizeStats(); + if (req_resp_stats.has_value()) { + req_resp_stats->get().upstream_rq_headers_size_.recordValue(request_headers_->byteSize()); + } + } // Both saw_connection_close_ and is_head_request_ affect local replies: set // them as early as possible. diff --git a/source/common/router/router.cc b/source/common/router/router.cc index b2d587c33434..c7fd54d418ba 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -948,12 +948,13 @@ void Filter::chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest& void Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags, absl::string_view details) { - if (cluster_->timeoutBudgetStats().has_value()) { + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats(); + if (tb_stats.has_value()) { Event::Dispatcher& dispatcher = callbacks_->dispatcher(); std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - cluster_->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.recordValue( + tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue( FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_)); } @@ -1340,8 +1341,9 @@ void Filter::onUpstreamComplete(UpstreamRequest& upstream_request) { std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - if (cluster_->timeoutBudgetStats().has_value()) { - cluster_->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.recordValue( + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats(); + if (tb_stats.has_value()) { + tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue( FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_)); } diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index e4d97c82f0bb..c2db2ca31af8 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -90,10 +90,9 @@ UpstreamRequest::~UpstreamRequest() { const MonotonicTime end_time = dispatcher.timeSource().monotonicTime(); const std::chrono::milliseconds response_time = std::chrono::duration_cast(end_time - start_time_); - parent_.cluster() - ->timeoutBudgetStats() - ->upstream_rq_timeout_budget_per_try_percent_used_.recordValue( - FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_)); + Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = parent_.cluster()->timeoutBudgetStats(); + tb_stats->get().upstream_rq_timeout_budget_per_try_percent_used_.recordValue( + FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_)); } stream_info_.setUpstreamTiming(upstream_timing_); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 269745bd765a..44fc4860a931 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -26,6 +26,7 @@ #include "envoy/ssl/context_manager.h" #include "envoy/stats/scope.h" #include "envoy/upstream/health_checker.h" +#include "envoy/upstream/upstream.h" #include "common/common/enum_to_int.h" #include "common/common/fmt.h" @@ -618,6 +619,11 @@ ClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope) { return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; } +ClusterRequestResponseSizeStats +ClusterInfoImpl::generateRequestResponseSizeStats(Stats::Scope& scope) { + return {ALL_CLUSTER_REQUEST_RESPONSE_SIZE_STATS(POOL_HISTOGRAM(scope))}; +} + ClusterLoadReportStats ClusterInfoImpl::generateLoadReportStats(Stats::Scope& scope) { return {ALL_CLUSTER_LOAD_REPORT_STATS(POOL_COUNTER(scope))}; } @@ -687,10 +693,9 @@ ClusterInfoImpl::ClusterInfoImpl( socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)), stats_(generateStats(*stats_scope_)), load_report_stats_store_(stats_scope_->symbolTable()), load_report_stats_(generateLoadReportStats(load_report_stats_store_)), - timeout_budget_stats_(config.track_timeout_budgets() - ? absl::make_optional( - generateTimeoutBudgetStats(*stats_scope_)) - : absl::nullopt), + optional_cluster_stats_((config.has_track_cluster_stats() || config.track_timeout_budgets()) + ? std::make_unique(config, *stats_scope_) + : nullptr), features_(parseFeatures(config)), http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), @@ -1095,6 +1100,17 @@ void ClusterImplBase::validateEndpointsForZoneAwareRouting( } } +ClusterInfoImpl::OptionalClusterStats::OptionalClusterStats( + const envoy::config::cluster::v3::Cluster& config, Stats::Scope& stats_scope) + : timeout_budget_stats_( + (config.track_cluster_stats().timeout_budgets() || config.track_timeout_budgets()) + ? std::make_unique(generateTimeoutBudgetStats(stats_scope)) + : nullptr), + request_response_size_stats_(config.track_cluster_stats().request_response_sizes() + ? std::make_unique( + generateRequestResponseSizeStats(stats_scope)) + : nullptr) {} + ClusterInfoImpl::ResourceManagers::ResourceManagers( const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, const std::string& cluster_name, Stats::Scope& stats_scope) { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 5cdb994b3f41..3b42d06818c3 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -523,6 +523,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggablerequest_response_size_stats_ == nullptr) { + return absl::nullopt; + } + + return std::ref(*(optional_cluster_stats_->request_response_size_stats_)); + } + ClusterLoadReportStats& loadReportStats() const override { return load_report_stats_; } - const absl::optional& timeoutBudgetStats() const override { - return timeout_budget_stats_; + + ClusterTimeoutBudgetStatsOptRef timeoutBudgetStats() const override { + if (optional_cluster_stats_ == nullptr || + optional_cluster_stats_->timeout_budget_stats_ == nullptr) { + return absl::nullopt; + } + + return std::ref(*(optional_cluster_stats_->timeout_budget_stats_)); } + const Network::Address::InstanceConstSharedPtr& sourceAddress() const override { return source_address_; }; @@ -622,6 +640,13 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable timeout_budget_stats_; + const std::unique_ptr optional_cluster_stats_; const uint64_t features_; const Http::Http1Settings http1_settings_; const envoy::config::core::v3::Http2ProtocolOptions http2_options_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index af2c7325c46f..88b6712252d9 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -64,6 +64,7 @@ using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; +using testing::Property; using testing::Ref; using testing::Return; using testing::ReturnRef; @@ -6060,6 +6061,188 @@ TEST_F(HttpConnectionManagerImplTest, NewConnection) { EXPECT_EQ(1U, stats_.named_.downstream_cx_http3_active_.value()); } +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) { + // Test with Headers only request, No Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 0)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { + // Test Request with Headers and Data, No response. + setup(false, ""); + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("12345"); + decoder->decodeData(fake_data, true); + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 5)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { + // Test with Header only response. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + + // Response headers are internally mutated and we record final response headers. + // for example in the below test case, response headers are modified as + // {':status', '200' 'date', 'Mon, 06 Jul 2020 06:08:55 GMT' 'server', ''} + // whose size is 49 instead of original response headers size 10({":status", "200"}). + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); + expectOnDestroy(); + + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); +} + +TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) { + // Test with response headers and body. + setup(false, ""); + + EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { + RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); + RequestHeaderMapPtr headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + decoder->decodeHeaders(std::move(headers), false); + + Buffer::OwnedImpl fake_data("1234"); + decoder->decodeData(fake_data, true); + + return Http::okStatus(); + })); + + setupFilterChain(1, 0); + + EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, false)) + .WillOnce(Return(FilterHeadersStatus::StopIteration)); + EXPECT_CALL(*decoder_filters_[0], decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + + EXPECT_CALL(*decoder_filters_[0], decodeComplete()); + + std::shared_ptr> host_{ + new NiceMock()}; + filter_callbacks_.upstreamHost(host_); + + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 30)); + EXPECT_CALL( + host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 49)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 4)); + EXPECT_CALL(host_->cluster_.request_response_size_stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 11)); + + Buffer::OwnedImpl fake_input("1234"); + conn_manager_->onData(fake_input, false); + + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + + decoder_filters_[0]->callbacks_->encodeHeaders( + ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); + + EXPECT_CALL(response_encoder_, encodeData(_, true)); + expectOnDestroy(); + + Buffer::OwnedImpl fake_response("hello-world"); + decoder_filters_[0]->callbacks_->encodeData(fake_response, true); +} + TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { setup(false, "envoy-custom-server", false); diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 624637edcbee..2adbf136be49 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -13,6 +13,7 @@ #include "envoy/http/codec.h" #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/upstream.h" #include "common/config/metadata.h" #include "common/network/utility.h" @@ -2411,6 +2412,63 @@ TEST_F(ClusterInfoImplTest, OneofExtensionProtocolOptionsForUnknownFilter) { "Only one of typed_extension_protocol_options or " "extension_protocol_options can be specified"); } + +TEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizesNotSetInConfig) { + const std::string yaml_disabled = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + )EOF"; + + auto cluster = makeCluster(yaml_disabled); + // By default, histograms tracking request/response sizes are not published. + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); + + const std::string yaml_disabled2 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : true } + )EOF"; + + cluster = makeCluster(yaml_disabled2); + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); + + const std::string yaml_disabled3 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : false } + )EOF"; + + cluster = makeCluster(yaml_disabled3); + EXPECT_FALSE(cluster->info()->requestResponseSizeStats().has_value()); +} + +TEST_F(ClusterInfoImplTest, TestTrackRequestResponseSizes) { + const std::string yaml = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : true } + )EOF"; + + auto cluster = makeCluster(yaml); + // The stats should be created. + ASSERT_TRUE(cluster->info()->requestResponseSizeStats().has_value()); + + Upstream::ClusterRequestResponseSizeStats req_resp_stats = + cluster->info()->requestResponseSizeStats()->get(); + + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_headers_size_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rq_body_size_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Bytes, req_resp_stats.upstream_rs_body_size_.unit()); +} + TEST_F(ClusterInfoImplTest, TestTrackRemainingResourcesGauges) { const std::string yaml = R"EOF( name: name @@ -2502,7 +2560,64 @@ TEST_F(ClusterInfoImplTest, Timeouts) { EXPECT_FALSE(cluster3->info()->idleTimeout().has_value()); } +TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgetsNotSetInConfig) { + // Check that without the flag specified, the histogram is null. + const std::string yaml_disabled = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + )EOF"; + + auto cluster = makeCluster(yaml_disabled); + // The stats will be null if they have not been explicitly turned on. + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); + + const std::string yaml_disabled2 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { request_response_sizes : true } + )EOF"; + + cluster = makeCluster(yaml_disabled2); + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); + + const std::string yaml_disabled3 = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : false } + )EOF"; + + cluster = makeCluster(yaml_disabled3); + EXPECT_FALSE(cluster->info()->timeoutBudgetStats().has_value()); +} + TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgets) { + // Check that with the flag, the histogram is created. + const std::string yaml = R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + track_cluster_stats: { timeout_budgets : true } + )EOF"; + + auto cluster = makeCluster(yaml); + // The stats should be created. + ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + + Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get(); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_percent_used_.unit()); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit()); +} + +TEST_F(ClusterInfoImplTest, DEPRECATED_FEATURE_TEST(TestTrackTimeoutBudgetsOld)) { // Check that without the flag specified, the histogram is null. const std::string yaml_disabled = R"EOF( name: name @@ -2526,9 +2641,13 @@ TEST_F(ClusterInfoImplTest, TestTrackTimeoutBudgets) { cluster = makeCluster(yaml); // The stats should be created. - EXPECT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + ASSERT_TRUE(cluster->info()->timeoutBudgetStats().has_value()); + + Upstream::ClusterTimeoutBudgetStats tb_stats = cluster->info()->timeoutBudgetStats()->get(); + EXPECT_EQ(Stats::Histogram::Unit::Unspecified, + tb_stats.upstream_rq_timeout_budget_percent_used_.unit()); EXPECT_EQ(Stats::Histogram::Unit::Unspecified, - cluster->info()->timeoutBudgetStats()->upstream_rq_timeout_budget_percent_used_.unit()); + tb_stats.upstream_rq_timeout_budget_per_try_percent_used_.unit()); } // Validates HTTP2 SETTINGS config. diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index 114648c00b00..cb5ee535357d 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -35,7 +35,8 @@ class MockRouterFilterInterface : public RouterFilterInterface { MockRouterFilterInterface() : config_("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) { auto cluster_info = new NiceMock(); - cluster_info->timeout_budget_stats_ = absl::nullopt; + cluster_info->timeout_budget_stats_ = nullptr; + ON_CALL(*cluster_info, timeoutBudgetStats()).WillByDefault(Return(absl::nullopt)); cluster_info_.reset(cluster_info); ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_)); ON_CALL(*this, config()).WillByDefault(ReturnRef(config_)); diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 9b339b8e4dc1..8d4fe14d4d41 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -284,6 +284,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // in callback manager. // 2020/07/07 11252 44971 46000 Introduce Least Request LB active request bias config // 2020/07/15 11748 45003 46000 Stream error on invalid messaging + // 2020/07/20 11559 44747 46000 stats: add histograms for request/response headers + // and body sizes. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -301,7 +303,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 45003); + EXPECT_MEMORY_EQ(m_per_cluster, 44747); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -357,6 +359,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // in callback manager. // 2020/07/07 11252 37083 38000 Introduce Least Request LB active request bias config // 2020/07/15 11748 37115 38000 Stream error on invalid messaging + // 2020/07/20 11559 36859 38000 stats: add histograms for request/response headers + // and body sizes. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -374,7 +378,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 37115); + EXPECT_MEMORY_EQ(m_per_cluster, 36859); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 168395895ec7..e45cfe0bc521 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -42,7 +42,9 @@ MockClusterInfo::MockClusterInfo() stats_(ClusterInfoImpl::generateStats(stats_store_)), transport_socket_matcher_(new NiceMock()), load_report_stats_(ClusterInfoImpl::generateLoadReportStats(load_report_stats_store_)), - timeout_budget_stats_(absl::make_optional( + request_response_size_stats_(std::make_unique( + ClusterInfoImpl::generateRequestResponseSizeStats(request_response_size_stats_store_))), + timeout_budget_stats_(std::make_unique( ClusterInfoImpl::generateTimeoutBudgetStats(timeout_budget_stats_store_))), circuit_breakers_stats_( ClusterInfoImpl::generateCircuitBreakersStats(stats_store_, "default", true)), @@ -71,7 +73,12 @@ MockClusterInfo::MockClusterInfo() .WillByDefault( Invoke([this]() -> TransportSocketMatcher& { return *transport_socket_matcher_; })); ON_CALL(*this, loadReportStats()).WillByDefault(ReturnRef(load_report_stats_)); - ON_CALL(*this, timeoutBudgetStats()).WillByDefault(ReturnRef(timeout_budget_stats_)); + ON_CALL(*this, requestResponseSizeStats()) + .WillByDefault(Return( + std::reference_wrapper(*request_response_size_stats_))); + ON_CALL(*this, timeoutBudgetStats()) + .WillByDefault( + Return(std::reference_wrapper(*timeout_budget_stats_))); ON_CALL(*this, sourceAddress()).WillByDefault(ReturnRef(source_address_)); ON_CALL(*this, resourceManager(_)) .WillByDefault(Invoke( diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index f8bbe8363a81..e8f3d47869ca 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -119,7 +119,8 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(ClusterStats&, stats, (), (const)); MOCK_METHOD(Stats::Scope&, statsScope, (), (const)); MOCK_METHOD(ClusterLoadReportStats&, loadReportStats, (), (const)); - MOCK_METHOD(absl::optional&, timeoutBudgetStats, (), (const)); + MOCK_METHOD(ClusterRequestResponseSizeStatsOptRef, requestResponseSizeStats, (), (const)); + MOCK_METHOD(ClusterTimeoutBudgetStatsOptRef, timeoutBudgetStats, (), (const)); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); MOCK_METHOD(const LoadBalancerSubsetInfo&, lbSubsetInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const)); @@ -150,8 +151,10 @@ class MockClusterInfo : public ClusterInfo { Upstream::TransportSocketMatcherPtr transport_socket_matcher_; NiceMock load_report_stats_store_; ClusterLoadReportStats load_report_stats_; + NiceMock request_response_size_stats_store_; + ClusterRequestResponseSizeStatsPtr request_response_size_stats_; NiceMock timeout_budget_stats_store_; - absl::optional timeout_budget_stats_; + ClusterTimeoutBudgetStatsPtr timeout_budget_stats_; ClusterCircuitBreakersStats circuit_breakers_stats_; NiceMock runtime_; std::unique_ptr resource_manager_; From 4f1ee95e36ad233aac0d37a8b1d7c4536d4972c7 Mon Sep 17 00:00:00 2001 From: "Drew S. Ortega" Date: Tue, 21 Jul 2020 16:08:22 -0700 Subject: [PATCH 709/909] hds: extend response protos group endpoint health by cluster and locality (#12153) Currently, the HDS specifier proto gives endpoint information organized by their locality and grouped by cluster. This information is retained when doing health checks, however when responding with health statuses from each endpoint locality and cluster information is dropped. This puts all endpoint information into a flat list when returning, making it the responsibility of the receiver to re-structure data by cluster and locality. This change follows a similar format to the specifier coming into HDS, organizing it in the same way. The proposed changed adds consistency to how messages are flowing in and out of HDS. Note that his PR only contains the protos changes, and does not include implementation. This is done to get approval from api-shepherds for the changes before continuing. Risk Level: Low Testing: No new tests. New tests will be added upon implementation, but for now the fields remain empty. Docs Changes: Inline comments in hds.proto. Release Notes: N/A - will append to release notes upon implementation completion. Deprecated: The endpoints_health field in EndpointHealthResponse should be deprecated in favor of the new cluster_endpoints_health field. Signed-off-by: Drew S. Ortega --- api/envoy/service/health/v3/hds.proto | 22 +++++++++++++++ api/envoy/service/health/v4alpha/hds.proto | 27 +++++++++++++++++++ .../envoy/service/health/v3/hds.proto | 22 +++++++++++++++ .../envoy/service/health/v4alpha/hds.proto | 27 +++++++++++++++++++ 4 files changed, 98 insertions(+) diff --git a/api/envoy/service/health/v3/hds.proto b/api/envoy/service/health/v3/hds.proto index 0b09134709c8..484c0477ae46 100644 --- a/api/envoy/service/health/v3/hds.proto +++ b/api/envoy/service/health/v3/hds.proto @@ -9,6 +9,7 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -108,11 +109,32 @@ message EndpointHealth { config.core.v3.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + config.core.v3.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { diff --git a/api/envoy/service/health/v4alpha/hds.proto b/api/envoy/service/health/v4alpha/hds.proto index 826d5eeb0301..957f058b9c57 100644 --- a/api/envoy/service/health/v4alpha/hds.proto +++ b/api/envoy/service/health/v4alpha/hds.proto @@ -107,11 +107,38 @@ message EndpointHealth { config.core.v4alpha.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpointsHealth"; + + config.core.v4alpha.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterEndpointsHealth"; + + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto index 0b09134709c8..484c0477ae46 100644 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ b/generated_api_shadow/envoy/service/health/v3/hds.proto @@ -9,6 +9,7 @@ import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -108,11 +109,32 @@ message EndpointHealth { config.core.v3.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + config.core.v3.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { diff --git a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto index 826d5eeb0301..957f058b9c57 100644 --- a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto +++ b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto @@ -107,11 +107,38 @@ message EndpointHealth { config.core.v4alpha.HealthStatus health_status = 2; } +// Group endpoint health by locality under each cluster. +message LocalityEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.LocalityEndpointsHealth"; + + config.core.v4alpha.Locality locality = 1; + + repeated EndpointHealth endpoints_health = 2; +} + +// The health status of endpoints in a cluster. The cluster name and locality +// should match the corresponding fields in ClusterHealthCheck message. +message ClusterEndpointsHealth { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.health.v3.ClusterEndpointsHealth"; + + string cluster_name = 1; + + repeated LocalityEndpointsHealth locality_endpoints_health = 2; +} + message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.EndpointHealthResponse"; + // [#comment:TODO(drewsortega): add deprecate annotation once cluster_endpoints_health is implemented] + // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1; + + // [#not-implemented-hide:] + // Organize Endpoint health information by cluster. + repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { From 54652f03ea0b3504bd4d64e8ae31985be2892a95 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Jul 2020 21:15:06 -0400 Subject: [PATCH 710/909] http: more and more granular response code details. (#12216) Adding rc details to a bunch of cases where Envoy does its own HTTP/2 validity checks Risk Level: low/medium (changes to H2 codec) Testing: extended integration tests Docs Changes: better debug docs Release Notes: n/a Fixes #11774 Signed-off-by: Alyssa Wilk --- .../why_is_envoy_sending_http2_resets.rst | 4 +++ source/common/http/http1/codec_impl.cc | 3 ++- source/common/http/http1/codec_impl_legacy.cc | 3 ++- source/common/http/http2/codec_impl.cc | 25 ++++++++++++++----- source/common/http/http2/codec_impl.h | 6 ++--- test/common/http/http1/codec_impl_test.cc | 2 +- test/integration/http2_integration_test.cc | 4 +++ test/integration/http_integration.cc | 6 +++++ test/integration/protocol_integration_test.cc | 2 ++ 9 files changed, 43 insertions(+), 12 deletions(-) diff --git a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst index ab0c41cb8a60..8427ed9ba761 100644 --- a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst +++ b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst @@ -17,4 +17,8 @@ from the file "source/common/http/http2/codec_impl.cc" of the form for example: `invalid http2: Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], value: [3]` +You can also check :ref:`HTTP/2 stats``: in many cases where +Envoy resets streams, for example if there are more headers than allowed by configuration or flood +detection kicks in, http2 counters will be incremented as the streams are reset. + diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index eb71d3fa70c1..d7be4e2ac897 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -39,6 +39,7 @@ struct Http1ResponseCodeDetailValues { const absl::string_view BodyDisallowed = "http1.body_disallowed"; const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; + const absl::string_view InvalidUnderscore = "http1.unexpected_underscore"; }; struct Http1HeaderTypesValues { @@ -1045,7 +1046,7 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, current_header_field_.getStringView()); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore); stats_.requests_rejected_with_underscores_in_headers_.inc(); throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); } diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc index 9fa3cd43eb04..82976e869ff4 100644 --- a/source/common/http/http1/codec_impl_legacy.cc +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -40,6 +40,7 @@ struct Http1ResponseCodeDetailValues { const absl::string_view BodyDisallowed = "http1.body_disallowed"; const absl::string_view TransferEncodingNotAllowed = "http1.transfer_encoding_not_allowed"; const absl::string_view ContentLengthNotAllowed = "http1.content_length_not_allowed"; + const absl::string_view InvalidUnderscore = "http1.unexpected_underscore"; }; struct Http1HeaderTypesValues { @@ -1050,7 +1051,7 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, current_header_field_.getStringView()); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); + sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore); stats_.requests_rejected_with_underscores_in_headers_.inc(); throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index d365e8e73d8a..87b62d644f94 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -28,17 +28,23 @@ namespace Http { namespace Http2 { class Http2ResponseCodeDetailValues { +public: // Invalid HTTP header field was received and stream is going to be // closed. const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; - // Violation in HTTP messaging rule. const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; - // none of the above const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + // The number of headers (or trailers) exceeded the configured limits + const absl::string_view too_many_headers = "http2.too_many_headers"; + // Envoy detected an HTTP/2 frame flood from the server. + const absl::string_view outbound_frame_flood = "http2.outbound_frames_flood"; + // Envoy detected an inbound HTTP/2 frame flood. + const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; + // Envoy was configured to drop requests with header keys beginning with underscores. + const absl::string_view invalid_underscore = "http2.unexpected_underscore"; -public: const absl::string_view errorDetails(int error_code) const { switch (error_code) { case NGHTTP2_ERR_HTTP_HEADER: @@ -369,6 +375,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); + setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); return NGHTTP2_ERR_FLOODED; } @@ -947,6 +954,7 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, auto should_return = checkHeaderNameForUnderscores(name.getStringView()); if (should_return) { + stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore); name.clear(); value.clear(); return should_return.value(); @@ -956,8 +964,9 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, if (stream->headers().byteSize() > max_headers_kb_ * 1024 || stream->headers().size() > max_headers_count_) { - // This will cause the library to reset/close the stream. + stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers); stats_.header_overflow_.inc(); + // This will cause the library to reset/close the stream. return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } else { return 0; @@ -1363,7 +1372,7 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 break; } - if (!checkInboundFrameLimits()) { + if (!checkInboundFrameLimits(hd->stream_id)) { // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate // all the way to nghttp2_session_mem_recv() where we need it. flood_detected_ = true; @@ -1373,8 +1382,9 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 return true; } -bool ServerConnectionImpl::checkInboundFrameLimits() { +bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { ASSERT(dispatching_downstream_data_); + ConnectionImpl::StreamImpl* stream = getStream(stream_id); if (consecutive_inbound_frames_with_empty_payload_ > max_consecutive_inbound_frames_with_empty_payload_) { @@ -1382,6 +1392,9 @@ bool ServerConnectionImpl::checkInboundFrameLimits() { "error reading frame: Too many consecutive frames with an empty payload " "received in this HTTP/2 session.", connection_); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } stats_.inbound_empty_frames_flood_.inc(); return false; } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 8bd5d8b3d1fd..649f7e77a1f0 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -506,7 +506,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 77565550dc26..258e89767011 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -1129,7 +1129,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: header name contains underscores"); - EXPECT_EQ("http1.invalid_characters", response_encoder->getStream().responseDetails()); + EXPECT_EQ("http1.unexpected_underscore", response_encoder->getStream().responseDetails()); EXPECT_EQ(1, store_.counter("http1.requests_rejected_with_underscores_in_headers").value()); } diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 73e0e615aa7a..aea7937dc7db 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1730,6 +1730,7 @@ TEST_P(Http2FloodMitigationTest, EmptyHeaders) { } TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); beginSession(); uint32_t request_idx = 0; @@ -1743,12 +1744,14 @@ TEST_P(Http2FloodMitigationTest, EmptyHeadersContinuation) { tcp_client_->waitForDisconnect(); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); } TEST_P(Http2FloodMitigationTest, EmptyData) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); beginSession(); fake_upstreams_[0]->set_allow_unexpected_disconnects(true); @@ -1763,6 +1766,7 @@ TEST_P(Http2FloodMitigationTest, EmptyData) { tcp_client_->waitForDisconnect(); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("http2.inbound_empty_frames_flood")); EXPECT_EQ(1, test_server_->counter("http2.inbound_empty_frames_flood")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 02ae0e96a690..b3ceabccef0e 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -40,6 +40,8 @@ namespace Envoy { namespace { +using testing::HasSubstr; + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager::CodecType typeToCodecType(Http::CodecClient::Type type) { switch (type) { @@ -1002,6 +1004,7 @@ void HttpIntegrationTest::testLargeRequestUrl(uint32_t url_size, uint32_t max_he void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, uint32_t max_size, uint32_t max_count) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); // `size` parameter dictates the size of each header that will be added to the request and `count` // parameter is the number of headers to be added. The actual request byte size will exceed `size` // due to the keys and other headers. The actual request header count will exceed `count` by four @@ -1045,6 +1048,9 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t count, EXPECT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); } + if (count > max_count) { + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("too_many_headers")); + } } void HttpIntegrationTest::testLargeRequestTrailers(uint32_t size, uint32_t max_size) { diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 46f8af0dbb58..4176a1cd391c 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -942,6 +942,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresRemainByDefault) { // Verify that request with headers containing underscores is rejected when configured. TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefault) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { @@ -967,6 +968,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefa ASSERT_TRUE(response->reset()); EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); } + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("unexpected_underscore")); } TEST_P(DownstreamProtocolIntegrationTest, ValidZeroLengthContent) { From b510dd075ca453ea4893ab5fcfe41485606f146b Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 22 Jul 2020 04:55:40 -0700 Subject: [PATCH 711/909] caching: Fully parse and handle request and response cache-control headers (#11727) Request and response cache-control headers are fully parsed in CacheHeadersUtils. isCacheableResponse in the CacheFilter now uses parsed cache-control headers to check if a received response can be cached. HttpCache now uses parsed cache-control headers to check if a cached response requires validation before it is served. Cache entries validation is not yet implemented. Signed-off-by: Yosry Ahmed yosryahmed@google.com Risk Level: Low Testing: Unit testing Docs Changes: N/A Release Notes: N/A Fixes #9833 Signed-off-by: Yosry Ahmed --- source/extensions/filters/http/cache/BUILD | 18 +- .../filters/http/cache/cache_filter.cc | 21 +- .../filters/http/cache/cache_filter.h | 7 +- .../filters/http/cache/cache_filter_utils.cc | 38 --- .../filters/http/cache/cache_filter_utils.h | 21 -- .../filters/http/cache/cache_headers_utils.cc | 174 ++++++++++ .../filters/http/cache/cache_headers_utils.h | 100 ++++++ .../filters/http/cache/cacheability_utils.cc | 58 ++++ .../filters/http/cache/cacheability_utils.h | 30 ++ .../filters/http/cache/http_cache.cc | 70 +++- .../filters/http/cache/http_cache.h | 9 +- .../filters/http/cache/http_cache_utils.cc | 188 ----------- .../filters/http/cache/http_cache_utils.h | 34 -- test/extensions/filters/http/cache/BUILD | 12 +- .../http/cache/cache_filter_utils_test.cc | 68 ---- .../http/cache/cache_headers_utils_test.cc | 318 ++++++++++++++++++ .../http/cache/cacheability_utils_test.cc | 123 +++++++ .../filters/http/cache/http_cache_test.cc | 14 +- .../http/cache/http_cache_utils_test.cc | 82 ----- .../simple_http_cache_test.cc | 1 + tools/spelling/spelling_dictionary.txt | 1 + 21 files changed, 904 insertions(+), 483 deletions(-) delete mode 100644 source/extensions/filters/http/cache/cache_filter_utils.cc delete mode 100644 source/extensions/filters/http/cache/cache_filter_utils.h create mode 100644 source/extensions/filters/http/cache/cache_headers_utils.cc create mode 100644 source/extensions/filters/http/cache/cache_headers_utils.h create mode 100644 source/extensions/filters/http/cache/cacheability_utils.cc create mode 100644 source/extensions/filters/http/cache/cacheability_utils.h delete mode 100644 source/extensions/filters/http/cache/http_cache_utils.cc delete mode 100644 source/extensions/filters/http/cache/http_cache_utils.h delete mode 100644 test/extensions/filters/http/cache/cache_filter_utils_test.cc create mode 100644 test/extensions/filters/http/cache/cache_headers_utils_test.cc create mode 100644 test/extensions/filters/http/cache/cacheability_utils_test.cc delete mode 100644 test/extensions/filters/http/cache/http_cache_utils_test.cc diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 9fd2e8f27ba9..ee97d6f0a9a8 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -17,7 +17,8 @@ envoy_cc_library( srcs = ["cache_filter.cc"], hdrs = ["cache_filter.h"], deps = [ - ":cache_filter_utils_lib", + ":cache_headers_utils_lib", + ":cacheability_utils_lib", ":http_cache_lib", "//source/common/common:logger_lib", "//source/common/common:macros", @@ -29,10 +30,11 @@ envoy_cc_library( ) envoy_cc_library( - name = "cache_filter_utils_lib", - srcs = ["cache_filter_utils.cc"], - hdrs = ["cache_filter_utils.h"], + name = "cacheability_utils_lib", + srcs = ["cacheability_utils.cc"], + hdrs = ["cacheability_utils.h"], deps = [ + ":cache_headers_utils_lib", "//source/common/common:utility_lib", "//source/common/http:headers_lib", ], @@ -48,7 +50,7 @@ envoy_cc_library( srcs = ["http_cache.cc"], hdrs = ["http_cache.h"], deps = [ - ":http_cache_utils_lib", + ":cache_headers_utils_lib", ":key_cc_proto", "//include/envoy/buffer:buffer_interface", "//include/envoy/common:time_interface", @@ -63,9 +65,9 @@ envoy_cc_library( ) envoy_cc_library( - name = "http_cache_utils_lib", - srcs = ["http_cache_utils.cc"], - hdrs = ["http_cache_utils.h"], + name = "cache_headers_utils_lib", + srcs = ["cache_headers_utils.cc"], + hdrs = ["cache_headers_utils.h"], deps = [ "//include/envoy/common:time_interface", "//include/envoy/http:header_map_interface", diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 0cdc0dbc65cc..039cb33b37ed 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -2,7 +2,7 @@ #include "common/http/headers.h" -#include "extensions/filters/http/cache/cache_filter_utils.h" +#include "extensions/filters/http/cache/cacheability_utils.h" #include "absl/strings/string_view.h" @@ -37,13 +37,17 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; } - if (!CacheFilterUtils::isCacheableRequest(headers)) { + if (!CacheabilityUtils::isCacheableRequest(headers)) { ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders ignoring uncacheable request: {}", *decoder_callbacks_, headers); return Http::FilterHeadersStatus::Continue; } ASSERT(decoder_callbacks_); - lookup_ = cache_.makeLookupContext(LookupRequest(headers, time_source_.systemTime())); + + LookupRequest lookup_request(headers, time_source_.systemTime()); + request_allows_inserts_ = !lookup_request.requestCacheControl().no_store_; + lookup_ = cache_.makeLookupContext(std::move(lookup_request)); + ASSERT(lookup_); ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders starting lookup", *decoder_callbacks_); @@ -60,7 +64,10 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { - if (lookup_ && CacheFilterUtils::isCacheableResponse(headers)) { + // If lookup_ is null, the request wasn't cacheable, so the response isn't either. + if (lookup_ && request_allows_inserts_ && CacheabilityUtils::isCacheableResponse(headers)) { + // TODO(yosrym93): Add date internal header or metadata to cached responses and use it instead + // of the date header ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); insert_ = cache_.makeInsertContext(std::move(lookup_)); insert_->insertHeaders(headers, end_stream); @@ -79,11 +86,15 @@ Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_ } void CacheFilter::onHeaders(LookupResult&& result) { + // TODO(yosrym93): Handle request only-if-cached directive switch (result.cache_entry_status_) { - case CacheEntryStatus::RequiresValidation: case CacheEntryStatus::FoundNotModified: case CacheEntryStatus::UnsatisfiableRange: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. + case CacheEntryStatus::RequiresValidation: + // Cache entries that require validation are treated as unusable entries + // until validation is implemented + // TODO(yosrym93): Implement response validation case CacheEntryStatus::Unusable: if (state_ == GetHeadersState::FinishedGetHeadersCall) { // decodeHeader returned Http::FilterHeadersStatus::StopAllIterationAndWatermark--restart it diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 67d8193f00e5..78072c92e3cd 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -9,6 +9,7 @@ #include "common/common/logger.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/http_cache.h" #include "extensions/filters/http/common/pass_through_filter.h" @@ -54,7 +55,11 @@ class CacheFilter : public Http::PassThroughFilter, // True if the response has trailers. // TODO(toddmgreer): cache trailers. - bool response_has_trailers_; + bool response_has_trailers_ = false; + + // True if a request allows cache inserts according to: + // https://httpwg.org/specs/rfc7234.html#response.cacheability + bool request_allows_inserts_ = false; // Used for coordinating between decodeHeaders and onHeaders. enum class GetHeadersState { Initial, FinishedGetHeadersCall, GetHeadersResultUnusable }; diff --git a/source/extensions/filters/http/cache/cache_filter_utils.cc b/source/extensions/filters/http/cache/cache_filter_utils.cc deleted file mode 100644 index e4f51bc5c611..000000000000 --- a/source/extensions/filters/http/cache/cache_filter_utils.cc +++ /dev/null @@ -1,38 +0,0 @@ -#include "extensions/filters/http/cache/cache_filter_utils.h" - -#include "common/common/utility.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { - -Http::RegisterCustomInlineHeader - authorization_handle(Http::CustomHeaders::get().Authorization); -Http::RegisterCustomInlineHeader - cache_control_handle(Http::CustomHeaders::get().Referer); - -bool CacheFilterUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) { - const absl::string_view method = headers.getMethodValue(); - const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); - const Http::HeaderValues& header_values = Http::Headers::get(); - // TODO(toddmgreer): Also serve HEAD requests from cache. - // TODO(toddmgreer): Check all the other cache-related headers. - return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) && - (method == header_values.MethodValues.Get) && - (forwarded_proto == header_values.SchemeValues.Http || - forwarded_proto == header_values.SchemeValues.Https); -} - -bool CacheFilterUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers) { - const absl::string_view cache_control = headers.getInlineValue(cache_control_handle.handle()); - // TODO(toddmgreer): fully check for cacheability. See for example - // https://github.com/apache/incubator-pagespeed-mod/blob/master/pagespeed/kernel/http/caching_headers.h. - return !StringUtil::caseFindToken(cache_control, ",", - Http::CustomHeaders::get().CacheControlValues.Private); -} - -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/cache/cache_filter_utils.h b/source/extensions/filters/http/cache/cache_filter_utils.h deleted file mode 100644 index 6af3ae764d54..000000000000 --- a/source/extensions/filters/http/cache/cache_filter_utils.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include "common/common/utility.h" -#include "common/http/headers.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -class CacheFilterUtils { -public: - // Checks if a request can be served from cache - static bool isCacheableRequest(const Http::RequestHeaderMap& headers); - - // Checks if a response can be stored in cache - static bool isCacheableResponse(const Http::ResponseHeaderMap& headers); -}; -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc new file mode 100644 index 000000000000..988c9e8e0568 --- /dev/null +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -0,0 +1,174 @@ +#include "extensions/filters/http/cache/cache_headers_utils.h" + +#include +#include + +#include "envoy/common/time.h" + +#include "absl/algorithm/container.h" +#include "absl/strings/ascii.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_split.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +// Utility functions used in RequestCacheControl & ResponseCacheControl +namespace { +// A directive with an invalid duration is ignored, the RFC does not specify a behavior: +// https://httpwg.org/specs/rfc7234.html#delta-seconds +OptionalDuration parseDuration(absl::string_view s) { + OptionalDuration duration; + // Strip quotation marks if any + if (s.size() > 1 && s.front() == '"' && s.back() == '"') { + s = s.substr(1, s.size() - 2); + } + long num; + if (absl::SimpleAtoi(s, &num) && num >= 0) { + // s is a valid string of digits representing a positive number + duration = std::chrono::seconds(num); + } + return duration; +} + +inline std::pair +separateDirectiveAndArgument(absl::string_view full_directive) { + return absl::StrSplit(absl::StripAsciiWhitespace(full_directive), absl::MaxSplits('=', 1)); +} +} // namespace + +// The grammar for This Cache-Control header value should be: +// Cache-Control = 1#cache-directive +// cache-directive = token [ "=" ( token / quoted-string ) ] +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" +// / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE +// qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text +// obs-text = %x80-FF +// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) +// VCHAR = %x21-7E ; visible (printing) characters + +// Multiple directives are comma separated according to: +// https://httpwg.org/specs/rfc7234.html#collected.abnf + +RequestCacheControl::RequestCacheControl(absl::string_view cache_control_header) { + const std::vector directives = absl::StrSplit(cache_control_header, ','); + + for (auto full_directive : directives) { + absl::string_view directive, argument; + std::tie(directive, argument) = separateDirectiveAndArgument(full_directive); + + if (directive == "no-cache") { + must_validate_ = true; + } else if (directive == "no-store") { + no_store_ = true; + } else if (directive == "no-transform") { + no_transform_ = true; + } else if (directive == "only-if-cached") { + only_if_cached_ = true; + } else if (directive == "max-age") { + max_age_ = parseDuration(argument); + } else if (directive == "min-fresh") { + min_fresh_ = parseDuration(argument); + } else if (directive == "max-stale") { + max_stale_ = argument.empty() ? SystemTime::duration::max() : parseDuration(argument); + } + } +} + +ResponseCacheControl::ResponseCacheControl(absl::string_view cache_control_header) { + const std::vector directives = absl::StrSplit(cache_control_header, ','); + + for (auto full_directive : directives) { + absl::string_view directive, argument; + std::tie(directive, argument) = separateDirectiveAndArgument(full_directive); + + if (directive == "no-cache") { + // If no-cache directive has arguments they are ignored - not handled + must_validate_ = true; + } else if (directive == "must-revalidate" || directive == "proxy-revalidate") { + no_stale_ = true; + } else if (directive == "no-store" || directive == "private") { + // If private directive has arguments they are ignored - not handled + no_store_ = true; + } else if (directive == "no-transform") { + no_transform_ = true; + } else if (directive == "public") { + is_public_ = true; + } else if (directive == "s-maxage") { + max_age_ = parseDuration(argument); + } else if (!max_age_.has_value() && directive == "max-age") { + max_age_ = parseDuration(argument); + } + } +} + +std::ostream& operator<<(std::ostream& os, const OptionalDuration& duration) { + return duration.has_value() ? os << duration.value().count() : os << " "; +} + +std::ostream& operator<<(std::ostream& os, const RequestCacheControl& request_cache_control) { + return os << "{" + << "must_validate: " << request_cache_control.must_validate_ << ", " + << "no_store: " << request_cache_control.no_store_ << ", " + << "no_transform: " << request_cache_control.no_transform_ << ", " + << "only_if_cached: " << request_cache_control.only_if_cached_ << ", " + << "max_age: " << request_cache_control.max_age_ << ", " + << "min_fresh: " << request_cache_control.min_fresh_ << ", " + << "max_stale: " << request_cache_control.max_stale_ << "}"; +} + +std::ostream& operator<<(std::ostream& os, const ResponseCacheControl& response_cache_control) { + return os << "{" + << "must_validate: " << response_cache_control.must_validate_ << ", " + << "no_store: " << response_cache_control.no_store_ << ", " + << "no_transform: " << response_cache_control.no_transform_ << ", " + << "no_stale: " << response_cache_control.no_stale_ << ", " + << "public: " << response_cache_control.is_public_ << ", " + << "max_age: " << response_cache_control.max_age_ << "}"; +} + +bool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs) { + return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) && + (lhs.no_transform_ == rhs.no_transform_) && (lhs.only_if_cached_ == rhs.only_if_cached_) && + (lhs.max_age_ == rhs.max_age_) && (lhs.min_fresh_ == rhs.min_fresh_) && + (lhs.max_stale_ == rhs.max_stale_); +} + +bool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs) { + return (lhs.must_validate_ == rhs.must_validate_) && (lhs.no_store_ == rhs.no_store_) && + (lhs.no_transform_ == rhs.no_transform_) && (lhs.no_stale_ == rhs.no_stale_) && + (lhs.is_public_ == rhs.is_public_) && (lhs.max_age_ == rhs.max_age_); +} + +SystemTime CacheHeadersUtils::httpTime(const Http::HeaderEntry* header_entry) { + if (!header_entry) { + return {}; + } + absl::Time time; + const std::string input(header_entry->value().getStringView()); + + // Acceptable Date/Time Formats per + // https://tools.ietf.org/html/rfc7231#section-7.1.1.1 + // + // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate + // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format + // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format + static const char* rfc7231_date_formats[] = {"%a, %d %b %Y %H:%M:%S GMT", + "%A, %d-%b-%y %H:%M:%S GMT", "%a %b %e %H:%M:%S %Y"}; + + for (const std::string& format : rfc7231_date_formats) { + if (absl::ParseTime(format, input, &time, nullptr)) { + return ToChronoTime(time); + } + } + return {}; +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h new file mode 100644 index 000000000000..9bf50370e61e --- /dev/null +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -0,0 +1,100 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/http/header_map.h" + +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +using OptionalDuration = absl::optional; + +// According to: https://httpwg.org/specs/rfc7234.html#cache-request-directive +struct RequestCacheControl { + RequestCacheControl() = default; + explicit RequestCacheControl(absl::string_view cache_control_header); + + // must_validate is true if 'no-cache' directive is present + // A cached response must not be served without successful validation with the origin + bool must_validate_ = false; + + // The response to this request must not be cached (stored) + bool no_store_ = false; + + // 'no-transform' directive is not used now + // No transformations should be done to the response of this request, as defined by: + // https://httpwg.org/specs/rfc7230.html#message.transformations + bool no_transform_ = false; + + // 'only-if-cached' directive is not used now + // The request should be satisfied using a cached response, or respond with 504 (Gateway Error) + bool only_if_cached_ = false; + + // The client is unwilling to receive a cached response whose age exceeds the max-age + OptionalDuration max_age_; + + // The client is unwilling to received a cached response that satisfies: + // expiration_time - now < min-fresh + OptionalDuration min_fresh_; + + // The client is willing to receive a stale response that satisfies: + // now - expiration_time < max-stale + // If max-stale has no value then the client is willing to receive any stale response + OptionalDuration max_stale_; +}; + +// According to: https://httpwg.org/specs/rfc7234.html#cache-response-directive +struct ResponseCacheControl { + ResponseCacheControl() = default; + explicit ResponseCacheControl(absl::string_view cache_control_header); + + // must_validate is true if 'no-cache' directive is present; arguments are ignored for now + // This response must not be used to satisfy subsequent requests without successful validation + // with the origin + bool must_validate_ = false; + + // no_store is true if any of 'no-store' or 'private' directives is present. + // 'private' arguments are ignored for now so it is equivalent to 'no-store' + // This response must not be cached (stored) + bool no_store_ = false; + + // 'no-transform' directive is not used now + // No transformations should be done to this response , as defined by: + // https://httpwg.org/specs/rfc7230.html#message.transformations + bool no_transform_ = false; + + // no_stale is true if any of 'must-revalidate' or 'proxy-revalidate' directives is present + // This response must not be served stale without successful validation with the origin + bool no_stale_ = false; + + // 'public' directive is not used now + // This response may be stored, even if the response would normally be non-cacheable or cacheable + // only within a private cache, see: + // https://httpwg.org/specs/rfc7234.html#cache-response-directive.public + bool is_public_ = false; + + // max_age is set if to 's-maxage' if present, if not it is set to 'max-age' if present. + // Indicates the maximum time after which this response will be considered stale + OptionalDuration max_age_; +}; + +std::ostream& operator<<(std::ostream& os, const OptionalDuration& duration); +std::ostream& operator<<(std::ostream& os, const RequestCacheControl& request_cache_control); +std::ostream& operator<<(std::ostream& os, const ResponseCacheControl& response_cache_control); +bool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs); +bool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs); + +class CacheHeadersUtils { +public: + // Parses header_entry as an HTTP time. Returns SystemTime() if + // header_entry is null or malformed. + static SystemTime httpTime(const Http::HeaderEntry* header_entry); +}; +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc new file mode 100644 index 000000000000..fef63c201444 --- /dev/null +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -0,0 +1,58 @@ +#include "extensions/filters/http/cache/cacheability_utils.h" + +#include "common/common/macros.h" +#include "common/common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +namespace { +const absl::flat_hash_set& cacheableStatusCodes() { + // As defined by: + // https://tools.ietf.org/html/rfc7231#section-6.1, + // https://tools.ietf.org/html/rfc7538#section-3, + // https://tools.ietf.org/html/rfc7725#section-3 + // TODO(yosrym93): the list of cacheable status codes should be configurable + CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set, "200", "203", "204", "206", "300", + "301", "308", "404", "405", "410", "414", "451", "501"); +} +} // namespace + +Http::RegisterCustomInlineHeader + authorization_handle(Http::CustomHeaders::get().Authorization); +Http::RegisterCustomInlineHeader + cache_control_handle(Http::CustomHeaders::get().CacheControl); + +bool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers) { + const absl::string_view method = headers.getMethodValue(); + const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); + const Http::HeaderValues& header_values = Http::Headers::get(); + // TODO(toddmgreer): Also serve HEAD requests from cache. + // TODO(toddmgreer): Check all the other cache-related headers. + return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) && + (method == header_values.MethodValues.Get) && + (forwarded_proto == header_values.SchemeValues.Http || + forwarded_proto == header_values.SchemeValues.Https); +} + +bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers) { + absl::string_view cache_control = headers.getInlineValue(cache_control_handle.handle()); + ResponseCacheControl response_cache_control(cache_control); + + // Only cache responses with explicit validation data, either: + // max-age or s-maxage cache-control directives with date header + // expires header + const bool has_validation_data = + (headers.Date() && response_cache_control.max_age_.has_value()) || + headers.get(Http::Headers::get().Expires); + + return !response_cache_control.no_store_ && + cacheableStatusCodes().contains((headers.getStatusValue())) && has_validation_data; +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cacheability_utils.h b/source/extensions/filters/http/cache/cacheability_utils.h new file mode 100644 index 000000000000..97f88aebbfbd --- /dev/null +++ b/source/extensions/filters/http/cache/cacheability_utils.h @@ -0,0 +1,30 @@ +#pragma once + +#include "common/common/utility.h" +#include "common/http/headers.h" + +#include "extensions/filters/http/cache/cache_headers_utils.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +class CacheabilityUtils { +public: + // Checks if a request can be served from cache, + // this does not depend on cache-control headers as + // request cache-control headers only decide whether + // validation is required and whether the response can be cached + static bool isCacheableRequest(const Http::RequestHeaderMap& headers); + + // Checks if a response can be stored in cache + // Note that if a request is not cacheable according to 'isCacheableRequest' + // then its response is also not cacheable + // Therefore, isCacheableRequest, isCacheableResponse and CacheFilter::request_allows_inserts_ + // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability + static bool isCacheableResponse(const Http::ResponseHeaderMap& headers); +}; +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index dc75dfdfe38a..b9e17d495e5a 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -4,12 +4,11 @@ #include #include "envoy/http/codes.h" +#include "envoy/http/header_map.h" #include "common/http/headers.h" #include "common/protobuf/utility.h" -#include "extensions/filters/http/cache/http_cache_utils.h" - #include "absl/time/time.h" namespace Envoy { @@ -74,21 +73,57 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst size_t stableHashKey(const Key& key) { return MessageUtil::hash(key); } size_t localHashKey(const Key& key) { return stableHashKey(key); } -// Returns true if response_headers is fresh. -bool LookupRequest::isFresh(const Http::ResponseHeaderMap& response_headers) const { - if (!response_headers.Date()) { - return false; +bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_headers) const { + // TODO(yosrym93): Store parsed response cache-control in cache instead of parsing it on every + // lookup + const absl::string_view cache_control = + response_headers.getInlineValue(response_cache_control_handle.handle()); + const ResponseCacheControl response_cache_control(cache_control); + + const SystemTime response_time = CacheHeadersUtils::httpTime(response_headers.Date()); + + if (timestamp_ < response_time) { + // Response time is in the future, validate response + return true; } - const Http::HeaderEntry* cache_control_header = - response_headers.getInline(response_cache_control_handle.handle()); - if (cache_control_header) { - const SystemTime::duration effective_max_age = - HttpCacheUtils::effectiveMaxAge(cache_control_header->value().getStringView()); - return timestamp_ - HttpCacheUtils::httpTime(response_headers.Date()) < effective_max_age; + + const SystemTime::duration response_age = timestamp_ - response_time; + const bool request_max_age_exceeded = request_cache_control_.max_age_.has_value() && + request_cache_control_.max_age_.value() < response_age; + if (response_cache_control.must_validate_ || request_cache_control_.must_validate_ || + request_max_age_exceeded) { + // Either the request or response explicitly require validation or a request max-age requirement + // is not satisfied + return true; + } + + // CacheabilityUtils::isCacheableResponse(..) guarantees that any cached response satisfies this + // When date metadata injection for responses with no date + // is implemented, this ASSERT will need to be updated + ASSERT((response_headers.Date() && response_cache_control.max_age_.has_value()) || + response_headers.get(Http::Headers::get().Expires), + "Cache entry does not have valid expiration data."); + + const SystemTime expiration_time = + response_cache_control.max_age_.has_value() + ? response_time + response_cache_control.max_age_.value() + : CacheHeadersUtils::httpTime(response_headers.get(Http::Headers::get().Expires)); + + if (timestamp_ > expiration_time) { + // Response is stale, requires validation + // if the response does not allow being served stale + // or the request max-stale directive does not allow it + const bool allowed_by_max_stale = + request_cache_control_.max_stale_.has_value() && + request_cache_control_.max_stale_.value() > timestamp_ - expiration_time; + return response_cache_control.no_stale_ || !allowed_by_max_stale; + } else { + // Response is fresh, requires validation only if there is an unsatisfied min-fresh requirement + const bool min_fresh_unsatisfied = + request_cache_control_.min_fresh_.has_value() && + request_cache_control_.min_fresh_.value() > expiration_time - timestamp_; + return min_fresh_unsatisfied; } - // We didn't find a cache-control header with enough info to determine - // freshness, so fall back to the expires header. - return timestamp_ <= HttpCacheUtils::httpTime(response_headers.get(Http::Headers::get().Expires)); } LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers, @@ -96,8 +131,9 @@ LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& respon // TODO(toddmgreer): Implement all HTTP caching semantics. ASSERT(response_headers); LookupResult result; - result.cache_entry_status_ = - isFresh(*response_headers) ? CacheEntryStatus::Ok : CacheEntryStatus::RequiresValidation; + result.cache_entry_status_ = requiresValidation(*response_headers) + ? CacheEntryStatus::RequiresValidation + : CacheEntryStatus::Ok; result.headers_ = std::move(response_headers); result.content_length_ = content_length; if (!adjustByteRangeSet(result.response_ranges_, request_range_spec_, content_length)) { diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index e3c09d2cf4a1..b29c88d8c6db 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -14,6 +14,8 @@ #include "source/extensions/filters/http/cache/key.pb.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" + #include "absl/strings/string_view.h" namespace Envoy { @@ -162,6 +164,8 @@ class LookupRequest { // Prereq: request_headers's Path(), Scheme(), and Host() are non-null. LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp); + const RequestCacheControl& requestCacheControl() const { return request_cache_control_; } + // Caches may modify the key according to local needs, though care must be // taken to ensure that meaningfully distinct responses have distinct keys. const Key& key() const { return key_; } @@ -179,7 +183,7 @@ class LookupRequest { uint64_t content_length) const; private: - bool isFresh(const Http::ResponseHeaderMap& response_headers) const; + bool requiresValidation(const Http::ResponseHeaderMap& response_headers) const; Key key_; std::vector request_range_spec_; @@ -191,7 +195,8 @@ class LookupRequest { // headers, that server may need to see these headers. For local implementations, it may be // simpler to instead call makeLookupResult with each potential response. HeaderVector vary_headers_; - const std::string request_cache_control_; + + const RequestCacheControl request_cache_control_; }; // Statically known information about a cache. diff --git a/source/extensions/filters/http/cache/http_cache_utils.cc b/source/extensions/filters/http/cache/http_cache_utils.cc deleted file mode 100644 index b93ed5f2f3a7..000000000000 --- a/source/extensions/filters/http/cache/http_cache_utils.cc +++ /dev/null @@ -1,188 +0,0 @@ -#include "extensions/filters/http/cache/http_cache_utils.h" - -#include -#include - -#include "absl/algorithm/container.h" -#include "absl/strings/ascii.h" -#include "absl/strings/numbers.h" -#include "absl/strings/strip.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { - -// True for characters defined as tchars by -// https://tools.ietf.org/html/rfc7230#section-3.2.6 -// -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" -// / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -bool HttpCacheUtils::tchar(char c) { - switch (c) { - case '!': - case '#': - case '$': - case '%': - case '&': - case '*': - case '+': - case '-': - case '.': - case '^': - case '_': - case '`': - case '|': - case '~': - return true; - } - return absl::ascii_isalnum(c); -} - -// Removes an initial HTTP header field value token, as defined by -// https://tools.ietf.org/html/rfc7230#section-3.2.6. Returns true if an initial -// token was present. -// -// token = 1*tchar -bool HttpCacheUtils::eatToken(absl::string_view& s) { - const absl::string_view::iterator token_end = absl::c_find_if_not(s, &tchar); - if (token_end == s.begin()) { - return false; - } - s.remove_prefix(token_end - s.begin()); - return true; -} - -// Removes an initial token or quoted-string (if present), as defined by -// https://tools.ietf.org/html/rfc7234#section-5.2. If a cache-control directive -// has an argument (as indicated by '='), it should be in this form. -// -// quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE -// qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text -// obs-text = %x80-FF -// quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) -// VCHAR = %x21-7E ; visible (printing) characters -// -// For example, the directive "my-extension=42" has an argument of "42", so an -// input of "public, my-extension=42, max-age=999" -void HttpCacheUtils::eatDirectiveArgument(absl::string_view& s) { - if (s.empty()) { - return; - } - if (s.front() == '"') { - // TODO(#9833): handle \-escaped quotes - const size_t closing_quote = s.find('"', 1); - s.remove_prefix(closing_quote); - } else { - eatToken(s); - } -} - -// If s is non-null and begins with a decimal number ([0-9]+), removes it from -// the input and returns a SystemTime::duration representing that many seconds. -// If s is null or doesn't begin with digits, returns -// SystemTime::duration::zero(). If parsing overflows, returns -// SystemTime::duration::max(). -SystemTime::duration HttpCacheUtils::eatLeadingDuration(absl::string_view& s) { - const absl::string_view::iterator digits_end = absl::c_find_if_not(s, &absl::ascii_isdigit); - const size_t digits_length = digits_end - s.begin(); - if (digits_length == 0) { - return SystemTime::duration::zero(); - } - const absl::string_view digits(s.data(), digits_length); - s.remove_prefix(digits_length); - uint64_t num; - return absl::SimpleAtoi(digits, &num) ? std::chrono::seconds(num) : SystemTime::duration::max(); -} - -// Returns the effective max-age represented by cache-control. If the result is -// SystemTime::duration::zero(), or is less than the response's, the response -// should be validated. -// -// TODO(#9833): Write a CacheControl class to fully parse the cache-control -// header value. Consider sharing with the gzip filter. -SystemTime::duration HttpCacheUtils::effectiveMaxAge(absl::string_view cache_control) { - // The grammar for This Cache-Control header value should be: - // Cache-Control = 1#cache-directive - // cache-directive = token [ "=" ( token / quoted-string ) ] - // token = 1*tchar - // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" - // / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA - // quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE - // qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text - // obs-text = %x80-FF - // quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) - // VCHAR = %x21-7E ; visible (printing) characters - SystemTime::duration max_age = SystemTime::duration::zero(); - bool found_s_maxage = false; - while (!cache_control.empty()) { - // Each time through the loop, we eat one cache-directive. Each branch - // either returns or completely eats a cache-directive. - if (absl::ConsumePrefix(&cache_control, "no-cache")) { - if (eatToken(cache_control)) { - // The token wasn't no-cache; it just started that way, so we must - // finish eating this cache-directive. - if (absl::ConsumePrefix(&cache_control, "=")) { - eatDirectiveArgument(cache_control); - } - } else { - // Found a no-cache directive, so validation is required. - return SystemTime::duration::zero(); - } - } else if (absl::ConsumePrefix(&cache_control, "s-maxage=")) { - max_age = eatLeadingDuration(cache_control); - found_s_maxage = true; - cache_control = absl::StripLeadingAsciiWhitespace(cache_control); - if (!cache_control.empty() && cache_control[0] != ',') { - // Unexpected text at end of directive - return SystemTime::duration::zero(); - } - } else if (!found_s_maxage && absl::ConsumePrefix(&cache_control, "max-age=")) { - max_age = eatLeadingDuration(cache_control); - if (!cache_control.empty() && cache_control[0] != ',') { - // Unexpected text at end of directive - return SystemTime::duration::zero(); - } - } else if (eatToken(cache_control)) { - // Unknown directive--ignore. - if (absl::ConsumePrefix(&cache_control, "=")) { - eatDirectiveArgument(cache_control); - } - } else { - // This directive starts with illegal characters. Require validation. - return SystemTime::duration::zero(); - } - // Whichever branch we took should have consumed the entire cache-directive, - // so we just need to eat the delimiter and optional whitespace. - absl::ConsumePrefix(&cache_control, ","); - cache_control = absl::StripLeadingAsciiWhitespace(cache_control); - } - return max_age; -} - -SystemTime HttpCacheUtils::httpTime(const Http::HeaderEntry* header_entry) { - if (!header_entry) { - return {}; - } - absl::Time time; - const std::string input(header_entry->value().getStringView()); - - // Acceptable Date/Time Formats per - // https://tools.ietf.org/html/rfc7231#section-7.1.1.1 - // - // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate - // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format - // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - static const auto& rfc7231_date_formats = *new std::array{ - "%a, %d %b %Y %H:%M:%S GMT", "%A, %d-%b-%y %H:%M:%S GMT", "%a %b %e %H:%M:%S %Y"}; - for (const std::string& format : rfc7231_date_formats) { - if (absl::ParseTime(format, input, &time, nullptr)) { - return ToChronoTime(time); - } - } - return {}; -} -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/http/cache/http_cache_utils.h b/source/extensions/filters/http/cache/http_cache_utils.h deleted file mode 100644 index 248b3fda4ace..000000000000 --- a/source/extensions/filters/http/cache/http_cache_utils.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include "envoy/common/time.h" -#include "envoy/http/header_map.h" - -#include "absl/strings/string_view.h" -#include "absl/time/time.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -class HttpCacheUtils { -public: - // Parses and returns max-age or s-maxage (with s-maxage taking precedence), - // parsed into a SystemTime::Duration. Returns SystemTime::Duration::zero if - // neither is present, or there is a no-cache directive, or if max-age or - // s-maxage is malformed. - static SystemTime::duration effectiveMaxAge(absl::string_view cache_control); - - // Parses header_entry as an HTTP time. Returns SystemTime() if - // header_entry is null or malformed. - static SystemTime httpTime(const Http::HeaderEntry* header_entry); - -private: - static bool tchar(char c); - static bool eatToken(absl::string_view& s); - static void eatDirectiveArgument(absl::string_view& s); - static SystemTime::duration eatLeadingDuration(absl::string_view& s); -}; -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/http/cache/BUILD b/test/extensions/filters/http/cache/BUILD index eb924a70e9bb..db5d5ea50fd5 100644 --- a/test/extensions/filters/http/cache/BUILD +++ b/test/extensions/filters/http/cache/BUILD @@ -9,13 +9,13 @@ licenses(["notice"]) # Apache 2 envoy_package() envoy_extension_cc_test( - name = "http_cache_utils_test", - srcs = ["http_cache_utils_test.cc"], + name = "cache_headers_utils_test", + srcs = ["cache_headers_utils_test.cc"], extension_name = "envoy.filters.http.cache", deps = [ "//include/envoy/http:header_map_interface", "//source/common/http:header_map_lib", - "//source/extensions/filters/http/cache:http_cache_utils_lib", + "//source/extensions/filters/http/cache:cache_headers_utils_lib", "//test/test_common:utility_lib", ], ) @@ -47,11 +47,11 @@ envoy_extension_cc_test( ) envoy_extension_cc_test( - name = "cache_filter_utils_test", - srcs = ["cache_filter_utils_test.cc"], + name = "cacheability_utils_test", + srcs = ["cacheability_utils_test.cc"], extension_name = "envoy.filters.http.cache", deps = [ - "//source/extensions/filters/http/cache:cache_filter_utils_lib", + "//source/extensions/filters/http/cache:cacheability_utils_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/filters/http/cache/cache_filter_utils_test.cc b/test/extensions/filters/http/cache/cache_filter_utils_test.cc deleted file mode 100644 index c3d01fbddb7f..000000000000 --- a/test/extensions/filters/http/cache/cache_filter_utils_test.cc +++ /dev/null @@ -1,68 +0,0 @@ -#include "extensions/filters/http/cache/cache_filter_utils.h" - -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -namespace { - -class IsCacheableRequestTest : public testing::Test { -protected: - const Http::TestRequestHeaderMapImpl cacheable_request_headers = {{":path", "/"}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}, - {":authority", "test.com"}}; -}; - -TEST_F(IsCacheableRequestTest, PathHeader) { - Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; - EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.removePath(); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); -} - -TEST_F(IsCacheableRequestTest, HostHeader) { - Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; - EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.removeHost(); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); -} - -TEST_F(IsCacheableRequestTest, MethodHeader) { - const Http::HeaderValues& header_values = Http::Headers::get(); - Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; - EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.setMethod(header_values.MethodValues.Post); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.setMethod(header_values.MethodValues.Put); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.removeMethod(); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); -} - -TEST_F(IsCacheableRequestTest, ForwardedProtoHeader) { - Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; - EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.setForwardedProto("ftp"); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.removeForwardedProto(); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); -} - -TEST_F(IsCacheableRequestTest, AuthorizationHeader) { - Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers; - EXPECT_TRUE(CacheFilterUtils::isCacheableRequest(request_headers)); - request_headers.setCopy(Http::CustomHeaders::get().Authorization, - "basic YWxhZGRpbjpvcGVuc2VzYW1l"); - EXPECT_FALSE(CacheFilterUtils::isCacheableRequest(request_headers)); -} - -} // namespace -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc new file mode 100644 index 000000000000..b9c20baf20d3 --- /dev/null +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -0,0 +1,318 @@ +#include +#include +#include + +#include "envoy/common/time.h" + +#include "common/common/macros.h" +#include "common/http/header_map_impl.h" + +#include "extensions/filters/http/cache/cache_headers_utils.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +namespace { + +struct TestRequestCacheControl : public RequestCacheControl { + TestRequestCacheControl(bool must_validate, bool no_store, bool no_transform, bool only_if_cached, + OptionalDuration max_age, OptionalDuration min_fresh, + OptionalDuration max_stale) { + must_validate_ = must_validate; + no_store_ = no_store; + no_transform_ = no_transform; + only_if_cached_ = only_if_cached; + max_age_ = max_age; + min_fresh_ = min_fresh; + max_stale_ = max_stale; + } +}; + +struct TestResponseCacheControl : public ResponseCacheControl { + TestResponseCacheControl(bool must_validate, bool no_store, bool no_transform, bool no_stale, + bool is_public, OptionalDuration max_age) { + must_validate_ = must_validate; + no_store_ = no_store; + no_transform_ = no_transform; + no_stale_ = no_stale; + is_public_ = is_public; + max_age_ = max_age; + } +}; + +struct RequestCacheControlTestCase { + absl::string_view cache_control_header; + TestRequestCacheControl request_cache_control; +}; + +struct ResponseCacheControlTestCase { + absl::string_view cache_control_header; + TestResponseCacheControl response_cache_control; +}; + +class RequestCacheControlTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + // Empty header + { + "", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + // Valid cache-control headers + { + "max-age=3600, min-fresh=10, no-transform, only-if-cached, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, true, true, std::chrono::seconds(3600), std::chrono::seconds(10), absl::nullopt} + }, + { + "min-fresh=100, max-stale, no-cache", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, false, false, false, absl::nullopt, std::chrono::seconds(100), SystemTime::duration::max()} + }, + { + "max-age=10, max-stale=50", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Quoted arguments are interpreted correctly + { + "max-age=\"3600\", min-fresh=\"10\", no-transform, only-if-cached, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, true, true, std::chrono::seconds(3600), std::chrono::seconds(10), absl::nullopt} + }, + { + "max-age=\"10\", max-stale=\"50\", only-if-cached", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, true, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Unknown directives are ignored + { + "max-age=10, max-stale=50, unknown-directive", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive-with-arg=arg1", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive-with-quoted-arg=\"arg1\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + { + "max-age=10, max-stale=50, unknown-directive, unknown-directive-with-quoted-arg=\"arg1\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, std::chrono::seconds(10), absl::nullopt, std::chrono::seconds(50)} + }, + // Invalid durations are ignored + { + "max-age=five, min-fresh=30, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, true, false, false, absl::nullopt, std::chrono::seconds(30), absl::nullopt} + }, + { + "max-age=five, min-fresh=30s, max-stale=-2", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + { + "max-age=\"", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {false, false, false, false, absl::nullopt, absl::nullopt, absl::nullopt} + }, + // Invalid parts of the header are ignored + { + "no-cache, ,,,fjfwioen3298, max-age=20, min-fresh=30=40", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, false, false, false, std::chrono::seconds(20), absl::nullopt, absl::nullopt} + }, + // If a directive argument contains a comma by mistake + // the part before the comma will be interpreted as the argument + // and the part after it will be ignored + { + "no-cache, max-age=10,0, no-store", + // {must_validate_, no_store_, no_transform_, only_if_cached_, max_age_, min_fresh_, max_stale_} + {true, true, false, false, std::chrono::seconds(10), absl::nullopt, absl::nullopt} + }, + ); + // clang-format on + } +}; + +class ResponseCacheControlTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + // Empty header + { + "", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + // Valid cache-control headers + { + "s-maxage=1000, max-age=2000, proxy-revalidate, no-store", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, true, false, std::chrono::seconds(1000)} + }, + { + "max-age=500, must-revalidate, no-cache, no-transform", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, true, true, false, std::chrono::seconds(500)} + }, + { + "s-maxage=10, private=content-length, no-cache=content-encoding", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(10)} + }, + { + "private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, absl::nullopt} + }, + { + "public, max-age=0", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, true, std::chrono::seconds(0)} + }, + // Quoted arguments are interpreted correctly + { + "s-maxage=\"20\", max-age=\"10\", public", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, true, std::chrono::seconds(20)} + }, + { + "max-age=\"50\", private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, std::chrono::seconds(50)} + }, + { + "s-maxage=\"0\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, std::chrono::seconds(0)} + }, + // Unknown directives are ignored + { + "private, no-cache, max-age=30, unknown-directive", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive-with-arg=arg", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive-with-quoted-arg=\"arg\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + { + "private, no-cache, max-age=30, unknown-directive, unknown-directive-with-quoted-arg=\"arg\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(30)} + }, + // Invalid durations are ignored + { + "max-age=five", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + { + "max-age=10s, private", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, true, false, false, false, absl::nullopt} + }, + { + "s-maxage=\"50s\", max-age=\"zero\", no-cache", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, false, false, false, absl::nullopt} + }, + { + "s-maxage=five, max-age=10, no-transform", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, true, false, false, std::chrono::seconds(10)} + }, + { + "max-age=\"", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {false, false, false, false, false, absl::nullopt} + }, + // Invalid parts of the header are ignored + { + "no-cache, ,,,fjfwioen3298, max-age=20", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, false, false, false, false, std::chrono::seconds(20)} + }, + // If a directive argument contains a comma by mistake + // the part before the comma will be interpreted as the argument + // and the part after it will be ignored + { + "no-cache, max-age=10,0, no-store", + // {must_validate_, no_store_, no_transform_, no_stale_, is_public_, max_age_} + {true, true, false, false, false, std::chrono::seconds(10)} + }, + ); + // clang-format on + } +}; + +// TODO(#9872): More tests for httpTime +class HttpTimeTest : public testing::TestWithParam { +public: + static const std::vector& getOkTestCases() { + // clang-format off + CONSTRUCT_ON_FIRST_USE(std::vector, + "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate + "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format + "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format + ); + // clang-format on + } +}; + +INSTANTIATE_TEST_SUITE_P(RequestCacheControlTest, RequestCacheControlTest, + testing::ValuesIn(RequestCacheControlTest::getTestCases())); + +TEST_P(RequestCacheControlTest, RequestCacheControlTest) { + const absl::string_view cache_control_header = GetParam().cache_control_header; + const RequestCacheControl expected_request_cache_control = GetParam().request_cache_control; + EXPECT_EQ(expected_request_cache_control, RequestCacheControl(cache_control_header)); +} + +INSTANTIATE_TEST_SUITE_P(ResponseCacheControlTest, ResponseCacheControlTest, + testing::ValuesIn(ResponseCacheControlTest::getTestCases())); + +TEST_P(ResponseCacheControlTest, ResponseCacheControlTest) { + const absl::string_view cache_control_header = GetParam().cache_control_header; + const ResponseCacheControl expected_response_cache_control = GetParam().response_cache_control; + EXPECT_EQ(expected_response_cache_control, ResponseCacheControl(cache_control_header)); +} + +INSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(HttpTimeTest::getOkTestCases())); + +TEST_P(HttpTimeTest, Ok) { + const Http::TestResponseHeaderMapImpl response_headers{{"date", GetParam()}}; + // Manually confirmed that 784111777 is 11/6/94, 8:46:37. + EXPECT_EQ(784111777, + SystemTime::clock::to_time_t(CacheHeadersUtils::httpTime(response_headers.Date()))); +} + +TEST(HttpTime, Null) { EXPECT_EQ(CacheHeadersUtils::httpTime(nullptr), SystemTime()); } + +} // namespace +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc new file mode 100644 index 000000000000..cdd049f5884f --- /dev/null +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -0,0 +1,123 @@ +#include "extensions/filters/http/cache/cacheability_utils.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { +namespace { + +class IsCacheableRequestTest : public testing::Test { +protected: + const Http::TestRequestHeaderMapImpl cacheable_request_headers_ = {{":path", "/"}, + {":method", "GET"}, + {"x-forwarded-proto", "http"}, + {":authority", "test.com"}}; +}; + +class IsCacheableResponseTest : public testing::Test { +protected: + std::string cache_control_ = "max-age=3600"; + const Http::TestResponseHeaderMapImpl cacheable_response_headers_ = { + {":status", "200"}, + {"date", "Sun, 06 Nov 1994 08:49:37 GMT"}, + {"cache-control", cache_control_}}; +}; + +TEST_F(IsCacheableRequestTest, CacheableRequest) { + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(cacheable_request_headers_)); +} + +TEST_F(IsCacheableRequestTest, PathHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removePath(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, HostHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeHost(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, MethodHeader) { + const Http::HeaderValues& header_values = Http::Headers::get(); + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Post); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setMethod(header_values.MethodValues.Put); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeMethod(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, ForwardedProtoHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setForwardedProto("ftp"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.removeForwardedProto(); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableRequestTest, AuthorizationHeader) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setCopy(Http::CustomHeaders::get().Authorization, + "basic YWxhZGRpbjpvcGVuc2VzYW1l"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + +TEST_F(IsCacheableResponseTest, CacheableResponse) { + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(cacheable_response_headers_)); +} + +TEST_F(IsCacheableResponseTest, UncacheableStatusCode) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setStatus("700"); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.removeStatus(); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ValidationData) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, "s-maxage=1000"); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, "public, no-transform"); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.remove(Http::CustomHeaders::get().CacheControl); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); + response_headers.setCopy(Http::Headers::get().Expires, "Sun, 06 Nov 1994 09:49:37 GMT"); + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ResponseNoStore) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + std::string cache_control_no_store = absl::StrCat(cache_control_, ", no-store"); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, cache_control_no_store); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +TEST_F(IsCacheableResponseTest, ResponsePrivate) { + Http::TestResponseHeaderMapImpl response_headers = cacheable_response_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(response_headers)); + std::string cache_control_private = absl::StrCat(cache_control_, ", private"); + response_headers.setCopy(Http::CustomHeaders::get().CacheControl, cache_control_private); + EXPECT_FALSE(CacheabilityUtils::isCacheableResponse(response_headers)); +} + +} // namespace +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 3fd9cf699293..a4946bae7c7a 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -1,3 +1,4 @@ +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/http_cache.h" #include "test/mocks/http/mocks.h" @@ -105,19 +106,6 @@ TEST_F(LookupRequestTest, MakeLookupResultBody) { EXPECT_FALSE(lookup_response.has_trailers_); } -TEST_F(LookupRequestTest, MakeLookupResultNoDate) { - const LookupRequest lookup_request(request_headers_, current_time_); - const Http::TestResponseHeaderMapImpl response_headers( - {{"cache-control", "public, max-age=3600"}}); - const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); - ASSERT_TRUE(lookup_response.headers_); - EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); - EXPECT_EQ(lookup_response.content_length_, 0); - EXPECT_TRUE(lookup_response.response_ranges_.empty()); - EXPECT_FALSE(lookup_response.has_trailers_); -} - TEST_F(LookupRequestTest, PrivateResponse) { const LookupRequest lookup_request(request_headers_, current_time_); const Http::TestResponseHeaderMapImpl response_headers( diff --git a/test/extensions/filters/http/cache/http_cache_utils_test.cc b/test/extensions/filters/http/cache/http_cache_utils_test.cc deleted file mode 100644 index bd13d392eadd..000000000000 --- a/test/extensions/filters/http/cache/http_cache_utils_test.cc +++ /dev/null @@ -1,82 +0,0 @@ -#include - -#include "common/http/header_map_impl.h" - -#include "extensions/filters/http/cache/http_cache_utils.h" - -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Extensions { -namespace HttpFilters { -namespace Cache { -namespace { - -// TODO(#9872): Add tests for eat* functions -// TODO(#9872): More tests for httpTime, effectiveMaxAge - -class HttpTimeTest : public testing::TestWithParam {}; - -const char* const ok_times[] = { - "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate - "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format - "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format -}; - -INSTANTIATE_TEST_SUITE_P(Ok, HttpTimeTest, testing::ValuesIn(ok_times)); - -TEST_P(HttpTimeTest, Ok) { - Http::TestResponseHeaderMapImpl response_headers{{"date", GetParam()}}; - // Manually confirmed that 784111777 is 11/6/94, 8:46:37. - EXPECT_EQ(784111777, - SystemTime::clock::to_time_t(HttpCacheUtils::httpTime(response_headers.Date()))); -} - -TEST(HttpTime, Null) { EXPECT_EQ(HttpCacheUtils::httpTime(nullptr), SystemTime()); } - -struct EffectiveMaxAgeParams { - absl::string_view cache_control; - int effective_max_age_secs; -}; - -EffectiveMaxAgeParams params[] = { - {"public, max-age=3600", 3600}, - {"public, max-age=-1", 0}, - {"max-age=20", 20}, - {"max-age=86400, public", 86400}, - {"public,max-age=\"0\"", 0}, - {"public,max-age=8", 8}, - {"public,max-age=3,no-cache", 0}, - {"s-maxage=0", 0}, - {"max-age=10,s-maxage=0", 0}, - {"s-maxage=10", 10}, - {"no-cache", 0}, - {"max-age=0", 0}, - {"no-cache", 0}, - {"public", 0}, - // TODO(#9833): parse quoted forms - // {"max-age=20, s-maxage=\"25\"",25}, - // {"public,max-age=\"8\",foo=11",8}, - // {"public,max-age=\"8\",bar=\"11\"",8}, - // TODO(#9833): parse public/private - // {"private,max-age=10",0} - // {"private",0}, - // {"private,s-maxage=8",0}, -}; - -class EffectiveMaxAgeTest : public testing::TestWithParam {}; - -INSTANTIATE_TEST_SUITE_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest, testing::ValuesIn(params)); - -TEST_P(EffectiveMaxAgeTest, EffectiveMaxAgeTest) { - EXPECT_EQ(HttpCacheUtils::effectiveMaxAge(GetParam().cache_control), - std::chrono::seconds(GetParam().effective_max_age_secs)); -} - -} // namespace -} // namespace Cache -} // namespace HttpFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index af6271f8c58b..301009223163 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -3,6 +3,7 @@ #include "common/buffer/buffer_impl.h" +#include "extensions/filters/http/cache/cache_headers_utils.h" #include "extensions/filters/http/cache/simple_http_cache/simple_http_cache.h" #include "test/test_common/simulated_time_system.h" diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index eb38bfe47eba..64e5d768f114 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -437,6 +437,7 @@ bulkstrings bursty bytecode bytestream +cacheable cacheability callee callsite From 4fb691897390331c1ca5f87c87e2f63009f8f028 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Wed, 22 Jul 2020 08:07:56 -0400 Subject: [PATCH 712/909] stats: add iterate methods for Scope, and a lazy find-from-string implementation. (#12128) Commit Message: Adds iterate() functionality for stats scopes. This makes it possible to robustly find a stat by string in prod code, even if the caller does not know whether some segments of the name were created via StatNameDynamicStorage, so a new Stats::Utility API is added to encapsulate the lookup and caching of stats by string. Note that the existing deprecated counterFromStatName only works for fully symbolic names. Additional Description: This can potentially enable a hierarchal stats exploration admin interface (though we'd need to be able to iterate over sub-scopes too). Risk Level: low -- these new capabilities are tested, but not called from prod code yet. They may prove useful at Google and in Nighthawk, per the referenced bug. Testing: //test/... Docs Changes: n/a Release Notes: n/a Fixes: #11799 Signed-off-by: Joshua Marantz --- include/envoy/stats/scope.h | 43 +++++ source/common/stats/isolated_store_impl.h | 16 ++ source/common/stats/scope_prefixer.h | 27 ++++ source/common/stats/thread_local_store.h | 37 +++++ source/common/stats/utility.h | 51 ++++++ test/common/stats/BUILD | 1 + test/common/stats/utility_test.cc | 183 ++++++++++++++++++++-- test/integration/server.h | 14 ++ 8 files changed, 363 insertions(+), 9 deletions(-) diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index 408655bbb8a5..93e5f00c7c5f 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -28,6 +28,8 @@ using TextReadoutOptConstRef = absl::optional; using ScopeSharedPtr = std::shared_ptr; +template using IterateFn = std::function&)>; + /** * A named scope for stats. Scopes are a grouping of stats that can be acted on as a unit if needed * (for example to free/delete all of them). @@ -194,6 +196,47 @@ class Scope { */ virtual const SymbolTable& constSymbolTable() const PURE; virtual SymbolTable& symbolTable() PURE; + + /** + * Calls 'fn' for every counter. Note that in the case of overlapping scopes, + * the implementation may call fn more than one time for each counter. Iteration + * stops if `fn` returns false; + * + * @param fn Function to be run for every counter, or until fn return false. + * @return false if fn(counter) return false during iteration, true if every counter was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every gauge. Note that in the case of overlapping scopes, + * the implementation may call fn more than one time for each gauge. Iteration + * stops if `fn` returns false; + * + * @param fn Function to be run for every gauge, or until fn return false. + * @return false if fn(gauge) return false during iteration, true if every gauge was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every histogram. Note that in the case of overlapping + * scopes, the implementation may call fn more than one time for each + * histogram. Iteration stops if `fn` returns false; + * + * @param fn Function to be run for every histogram, or until fn return false. + * @return false if fn(histogram) return false during iteration, true if every histogram was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; + + /** + * Calls 'fn' for every text readout. Note that in the case of overlapping + * scopes, the implementation may call fn more than one time for each + * text readout. Iteration stops if `fn` returns false; + * + * @param fn Function to be run for every text readout, or until fn return false. + * @return false if fn(text_readout) return false during iteration, true if every text readout + * was hit. + */ + virtual bool iterate(const IterateFn& fn) const PURE; }; } // namespace Stats diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index 2427e71a1b31..57d14ebf45fd 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -91,6 +91,15 @@ template class IsolatedStatsCache { return vec; } + bool iterate(const IterateFn& fn) const { + for (auto& stat : stats_) { + if (!fn(stat.second)) { + return false; + } + } + return true; + } + private: friend class IsolatedStoreImpl; @@ -154,6 +163,13 @@ class IsolatedStoreImpl : public StoreImpl { return text_readouts_.find(name); } + bool iterate(const IterateFn& fn) const override { return counters_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return gauges_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return histograms_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { + return text_readouts_.iterate(fn); + } + // Stats::Store std::vector counters() const override { return counters_.toVector(); } std::vector gauges() const override { diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h index b7c874375620..4257c1dd5ddf 100644 --- a/source/common/stats/scope_prefixer.h +++ b/source/common/stats/scope_prefixer.h @@ -54,7 +54,34 @@ class ScopePrefixer : public Scope { NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + private: + template bool iterHelper(const IterateFn& fn) const { + // We determine here what's in the scope by looking at name + // prefixes. Strictly speaking this is not correct, as a stat name can be in + // different scopes. But there is no data in `ScopePrefixer` to resurrect + // actual membership of a stat in a scope, so we go by name matching. Note + // that `ScopePrefixer` is not used in `ThreadLocalStore`, which has + // accurate maps describing which stats are in which scopes. + // + // TODO(jmarantz): In the scope of this limited implementation, it would be + // faster to match on the StatName prefix. This would be possible if + // SymbolTable exposed a split() method. + std::string prefix_str = scope_.symbolTable().toString(prefix_.statName()); + if (!prefix_str.empty() && !absl::EndsWith(prefix_str, ".")) { + prefix_str += "."; + } + IterateFn filter_scope = [&fn, + &prefix_str](const RefcountPtr& stat) -> bool { + return !absl::StartsWith(stat->name(), prefix_str) || fn(stat); + }; + return scope_.iterate(filter_scope); + } + Scope& scope_; StatNameStorage prefix_; }; diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 3496c0790e15..bf57eed14d95 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -242,6 +242,11 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo return absl::nullopt; } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + bool iterate(const IterateFn& fn) const override { return iterHelper(fn); } + // Stats::Store std::vector counters() const override; std::vector gauges() const override; @@ -349,6 +354,28 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo NullGaugeImpl& nullGauge(const std::string&) override { return parent_.null_gauge_; } + template bool iterHelper(StatFn fn, const StatMap& map) const { + for (auto& iter : map) { + if (!fn(iter.second)) { + return false; + } + } + return true; + } + + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->counters_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->gauges_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->histograms_); + } + bool iterate(const IterateFn& fn) const override { + return iterHelper(fn, central_cache_->text_readouts_); + } + // NOTE: The find methods assume that `name` is fully-qualified. // Implementations will not add the scope prefix. CounterOptConstRef findCounter(StatName name) const override; @@ -418,6 +445,16 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo absl::flat_hash_map scope_cache_; }; + template bool iterHelper(StatFn fn) const { + Thread::LockGuard lock(lock_); + for (ScopeImpl* scope : scopes_) { + if (!scope->iterate(fn)) { + return false; + } + } + return true; + } + std::string getTagsForName(const std::string& name, TagVector& tags) const; void clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache); void releaseScopeCrossThread(ScopeImpl* scope); diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 46b72234da3a..4328c2ef5875 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -5,6 +5,7 @@ #include "envoy/stats/scope.h" #include "envoy/stats/stats.h" +#include "common/common/thread.h" #include "common/stats/symbol_table_impl.h" #include "absl/container/inlined_vector.h" @@ -206,5 +207,55 @@ class Utility { StatNameTagVectorOptConstRef tags = absl::nullopt); }; +/** + * Holds a reference to a stat by name. Note that the stat may not be created + * yet at the time CachedReference is created. Calling get() then does a lazy + * lookup, potentially returning absl::nullopt if the stat doesn't exist yet. + * StatReference works whether the name was constructed symbolically, or with + * StatNameDynamicStorage. + * + * Lookups are very slow, taking time proportional to the size of the scope, + * holding mutexes during the lookup. However once the lookup succeeds, the + * result is cached atomically, and further calls to get() are thus fast and + * mutex-free. The implementation may be faster for stats that are named + * symbolically. + * + * CachedReference is valid for the lifetime of the Scope. When the Scope + * becomes invalid, CachedReferences must also be dropped as they will hold + * pointers into the scope. + */ +template class CachedReference { +public: + CachedReference(Scope& scope, absl::string_view name) : scope_(scope), name_(std::string(name)) {} + + /** + * Finds the named stat, if it exists, returning it as an optional. + */ + absl::optional> get() { + StatType* stat = stat_.get([this]() -> StatType* { + StatType* stat = nullptr; + IterateFn check_stat = [this, + &stat](const RefcountPtr& shared_stat) -> bool { + if (shared_stat->name() == name_) { + stat = shared_stat.get(); + return false; // Stop iteration. + } + return true; + }; + scope_.iterate(check_stat); + return stat; + }); + if (stat == nullptr) { + return absl::nullopt; + } + return *stat; + } + +private: + Scope& scope_; + const std::string name_; + Thread::AtomicPtr stat_; +}; + } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index 5125844c5817..e7c6ebbb01d7 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -268,6 +268,7 @@ envoy_cc_test( srcs = ["utility_test.cc"], deps = [ "//source/common/stats:isolated_store_lib", + "//source/common/stats:thread_local_store_lib", "//source/common/stats:utility_lib", ], ) diff --git a/test/common/stats/utility_test.cc b/test/common/stats/utility_test.cc index 8f4ec260d3bb..bf1643ff4ed5 100644 --- a/test/common/stats/utility_test.cc +++ b/test/common/stats/utility_test.cc @@ -6,36 +6,169 @@ #include "common/stats/null_counter.h" #include "common/stats/null_gauge.h" #include "common/stats/symbol_table_creator.h" +#include "common/stats/thread_local_store.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::UnorderedElementsAre; + namespace Envoy { namespace Stats { namespace { -class StatsUtilityTest : public testing::Test { +// All the tests should be run for both IsolatedStore and ThreadLocalStore. +enum class StoreType { + ThreadLocal, + Isolated, +}; + +class StatsUtilityTest : public testing::TestWithParam { protected: + template + using IterateFn = std::function& stat)>; + using MakeStatFn = std::function; + StatsUtilityTest() - : symbol_table_(SymbolTableCreator::makeSymbolTable()), - store_(std::make_unique(*symbol_table_)), pool_(*symbol_table_), + : symbol_table_(SymbolTableCreator::makeSymbolTable()), pool_(*symbol_table_), tags_( - {{pool_.add("tag1"), pool_.add("value1")}, {pool_.add("tag2"), pool_.add("value2")}}) {} + {{pool_.add("tag1"), pool_.add("value1")}, {pool_.add("tag2"), pool_.add("value2")}}) { + switch (GetParam()) { + case StoreType::ThreadLocal: + alloc_ = std::make_unique(*symbol_table_), + store_ = std::make_unique(*alloc_); + break; + case StoreType::Isolated: + store_ = std::make_unique(*symbol_table_); + break; + } + scope_ = store_->createScope("scope"); + } ~StatsUtilityTest() override { + scope_.reset(); pool_.clear(); store_.reset(); EXPECT_EQ(0, symbol_table_->numSymbols()); } + void init(MakeStatFn make_stat) { + make_stat(*store_, {pool_.add("symbolic1")}); + make_stat(*store_, {Stats::DynamicName("dynamic1")}); + make_stat(*scope_, {pool_.add("symbolic2")}); + make_stat(*scope_, {Stats::DynamicName("dynamic2")}); + } + + template IterateFn iterOnce() { + return [this](const RefcountPtr& stat) -> bool { + results_.insert(stat->name()); + return false; + }; + } + + template IterateFn iterAll() { + return [this](const RefcountPtr& stat) -> bool { + results_.insert(stat->name()); + return true; + }; + } + + static MakeStatFn makeCounter() { + return [](Scope& scope, const ElementVec& elements) { + Utility::counterFromElements(scope, elements).inc(); + }; + } + + static bool checkValue(const Counter& counter) { return counter.value() == 1; } + + static MakeStatFn makeGauge() { + return [](Scope& scope, const ElementVec& elements) { + Utility::gaugeFromElements(scope, elements, Gauge::ImportMode::Accumulate).inc(); + }; + } + + static bool checkValue(const Gauge& gauge) { return gauge.value() == 1; } + + static MakeStatFn makeHistogram() { + return [](Scope& scope, const ElementVec& elements) { + Utility::histogramFromElements(scope, elements, Histogram::Unit::Milliseconds); + }; + } + + static bool checkValue(const Histogram& histogram) { + return histogram.unit() == Histogram::Unit::Milliseconds; + } + + static MakeStatFn makeTextReadout() { + return [](Scope& scope, const ElementVec& elements) { + Utility::textReadoutFromElements(scope, elements).set("my-value"); + }; + } + + static bool checkValue(const TextReadout& text_readout) { + return text_readout.value() == "my-value"; + } + + template void storeOnce(const MakeStatFn make_stat) { + CachedReference symbolic1_ref(*store_, "symbolic1"); + CachedReference dynamic1_ref(*store_, "dynamic1"); + EXPECT_FALSE(symbolic1_ref.get()); + EXPECT_FALSE(dynamic1_ref.get()); + + init(make_stat); + + ASSERT_TRUE(symbolic1_ref.get()); + ASSERT_TRUE(dynamic1_ref.get()); + EXPECT_FALSE(store_->iterate(iterOnce())); + EXPECT_EQ(1, results_.size()); + EXPECT_TRUE(checkValue(*symbolic1_ref.get())); + EXPECT_TRUE(checkValue(*dynamic1_ref.get())); + } + + template void storeAll(const MakeStatFn make_stat) { + init(make_stat); + EXPECT_TRUE(store_->iterate(iterAll())); + EXPECT_THAT(results_, + UnorderedElementsAre("symbolic1", "dynamic1", "scope.symbolic2", "scope.dynamic2")); + } + + template void scopeOnce(const MakeStatFn make_stat) { + CachedReference symbolic2_ref(*store_, "scope.symbolic2"); + CachedReference dynamic2_ref(*store_, "scope.dynamic2"); + EXPECT_FALSE(symbolic2_ref.get()); + EXPECT_FALSE(dynamic2_ref.get()); + + init(make_stat); + + ASSERT_TRUE(symbolic2_ref.get()); + ASSERT_TRUE(dynamic2_ref.get()); + EXPECT_FALSE(scope_->iterate(iterOnce())); + EXPECT_EQ(1, results_.size()); + EXPECT_TRUE(checkValue(*symbolic2_ref.get())); + EXPECT_TRUE(checkValue(*dynamic2_ref.get())); + } + + template void scopeAll(const MakeStatFn make_stat) { + init(make_stat); + EXPECT_TRUE(scope_->iterate(iterAll())); + EXPECT_THAT(results_, UnorderedElementsAre("scope.symbolic2", "scope.dynamic2")); + } + SymbolTablePtr symbol_table_; - std::unique_ptr store_; StatNamePool pool_; + std::unique_ptr alloc_; + std::unique_ptr store_; + ScopePtr scope_; + absl::flat_hash_set results_; StatNameTagVector tags_; }; -TEST_F(StatsUtilityTest, Counters) { +INSTANTIATE_TEST_SUITE_P(StatsUtilityTest, StatsUtilityTest, + testing::ValuesIn({StoreType::ThreadLocal, StoreType::Isolated})); + +TEST_P(StatsUtilityTest, Counters) { ScopePtr scope = store_->createScope("scope."); Counter& c1 = Utility::counterFromElements(*scope, {DynamicName("a"), DynamicName("b")}); EXPECT_EQ("scope.a.b", c1.name()); @@ -54,7 +187,7 @@ TEST_F(StatsUtilityTest, Counters) { EXPECT_EQ("scope.x.token.y.tag1.value1.tag2.value2", ctags.name()); } -TEST_F(StatsUtilityTest, Gauges) { +TEST_P(StatsUtilityTest, Gauges) { ScopePtr scope = store_->createScope("scope."); Gauge& g1 = Utility::gaugeFromElements(*scope, {DynamicName("a"), DynamicName("b")}, Gauge::ImportMode::NeverImport); @@ -73,7 +206,7 @@ TEST_F(StatsUtilityTest, Gauges) { EXPECT_EQ(&g3, &g4); } -TEST_F(StatsUtilityTest, Histograms) { +TEST_P(StatsUtilityTest, Histograms) { ScopePtr scope = store_->createScope("scope."); Histogram& h1 = Utility::histogramFromElements(*scope, {DynamicName("a"), DynamicName("b")}, Histogram::Unit::Milliseconds); @@ -92,7 +225,7 @@ TEST_F(StatsUtilityTest, Histograms) { EXPECT_EQ(&h3, &h4); } -TEST_F(StatsUtilityTest, TextReadouts) { +TEST_P(StatsUtilityTest, TextReadouts) { ScopePtr scope = store_->createScope("scope."); TextReadout& t1 = Utility::textReadoutFromElements(*scope, {DynamicName("a"), DynamicName("b")}); EXPECT_EQ("scope.a.b", t1.name()); @@ -107,6 +240,38 @@ TEST_F(StatsUtilityTest, TextReadouts) { EXPECT_EQ(&t3, &t4); } +TEST_P(StatsUtilityTest, StoreCounterOnce) { storeOnce(makeCounter()); } + +TEST_P(StatsUtilityTest, StoreCounterAll) { storeAll(makeCounter()); } + +TEST_P(StatsUtilityTest, ScopeCounterOnce) { scopeOnce(makeCounter()); } + +TEST_P(StatsUtilityTest, ScopeCounterAll) { scopeAll(makeCounter()); } + +TEST_P(StatsUtilityTest, StoreGaugeOnce) { storeOnce(makeGauge()); } + +TEST_P(StatsUtilityTest, StoreGaugeAll) { storeAll(makeGauge()); } + +TEST_P(StatsUtilityTest, ScopeGaugeOnce) { scopeOnce(makeGauge()); } + +TEST_P(StatsUtilityTest, ScopeGaugeAll) { scopeAll(makeGauge()); } + +TEST_P(StatsUtilityTest, StoreHistogramOnce) { storeOnce(makeHistogram()); } + +TEST_P(StatsUtilityTest, StoreHistogramAll) { storeAll(makeHistogram()); } + +TEST_P(StatsUtilityTest, ScopeHistogramOnce) { scopeOnce(makeHistogram()); } + +TEST_P(StatsUtilityTest, ScopeHistogramAll) { scopeAll(makeHistogram()); } + +TEST_P(StatsUtilityTest, StoreTextReadoutOnce) { storeOnce(makeTextReadout()); } + +TEST_P(StatsUtilityTest, StoreTextReadoutAll) { storeAll(makeTextReadout()); } + +TEST_P(StatsUtilityTest, ScopeTextReadoutOnce) { scopeOnce(makeTextReadout()); } + +TEST_P(StatsUtilityTest, ScopeTextReadoutAll) { scopeAll(makeTextReadout()); } + } // namespace } // namespace Stats } // namespace Envoy diff --git a/test/integration/server.h b/test/integration/server.h index 67e860ea4a4b..55149d0b6c16 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -150,6 +150,15 @@ class TestScopeWrapper : public Scope { } SymbolTable& symbolTable() override { return wrapped_scope_->symbolTable(); } + bool iterate(const IterateFn& fn) const override { return wrapped_scope_->iterate(fn); } + bool iterate(const IterateFn& fn) const override { return wrapped_scope_->iterate(fn); } + bool iterate(const IterateFn& fn) const override { + return wrapped_scope_->iterate(fn); + } + bool iterate(const IterateFn& fn) const override { + return wrapped_scope_->iterate(fn); + } + private: Thread::MutexBasicLockable& lock_; ScopePtr wrapped_scope_; @@ -333,6 +342,11 @@ class TestIsolatedStoreImpl : public StoreRoot { return store_.textReadouts(); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + bool iterate(const IterateFn& fn) const override { return store_.iterate(fn); } + // Stats::StoreRoot void addSink(Sink&) override {} void setTagProducer(TagProducerPtr&&) override {} From 1267241bcb9a15c79cb5929ae93001ad060e802b Mon Sep 17 00:00:00 2001 From: Phil Genera Date: Wed, 22 Jul 2020 12:00:15 -0400 Subject: [PATCH 713/909] docs: add some verbiage for benchmark test rules (#12121) Commit Message: Add documentation about benchmark test bazel rules. Additional Description: I'm trying to communicate that envoy_cc_benchmark_binary is for humans measuring performance, and envoy_benchmark_test is for machines running CI builds. Risk Level: None. Documentation only. Testing: automated spell check. Release Notes: N/A Signed-off-by: Phil Genera --- bazel/envoy_test.bzl | 7 +++++-- test/README.md | 17 +++++++++++++++++ test/benchmark/BUILD | 1 + test/benchmark/main.cc | 16 +++++++++++++--- 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 0406dda976ee..7f04e152b77f 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -247,7 +247,8 @@ def envoy_cc_test_binary( **kargs ) -# Envoy benchmark binaries should be specified with this function. +# Envoy benchmark binaries should be specified with this function. bazel run +# these targets to measure performance. def envoy_cc_benchmark_binary( name, deps = [], @@ -258,7 +259,9 @@ def envoy_cc_benchmark_binary( **kargs ) -# Tests to validate that Envoy benchmarks run successfully should be specified with this function. +# Tests to validate that Envoy benchmarks run successfully should be specified +# with this function. Not for actual performance measurements: iteratons and +# expensive benchmarks will be skipped in the interest of execution time. def envoy_benchmark_test( name, benchmark_binary, diff --git a/test/README.md b/test/README.md index 3617c73719c1..2746efe98c8d 100644 --- a/test/README.md +++ b/test/README.md @@ -119,3 +119,20 @@ test infrastructure that wants to be agnostic to which `TimeSystem` is used in a test. When no `TimeSystem` is instantiated in a test, the `Event::GlobalTimeSystem` will lazy-initialize itself into a concrete `TimeSystem`. Currently this is `TestRealTimeSystem` but will be changed in the future to `SimulatedTimeSystem`. + + +## Benchmark tests + +Envoy uses [Google Benchmark](https://github.com/google/benchmark/) for +microbenchmarks. There are custom bazel rules, `envoy_cc_benchmark_binary` and +`envoy_benchmark_test`, to execute them locally and in CI environments +respectively. `envoy_benchmark_test` rules call the benchmark binary from a +[script](https://github.com/envoyproxy/envoy/blob/master/bazel/test_for_benchmark_wrapper.sh) +which runs the benchmark with a minimal number of iterations and skipping +expensive benchmarks to quickly verify that the binary is able to run to +completion. In order to collect meaningful bechmarks, `bazel run -c opt` the +benchmark binary target on a quiescent machine. + +If you would like to detect when your benchmark test is running under the +wrapper, call +[`Envoy::benchmark::skipExpensiveBechmarks()`](https://github.com/envoyproxy/envoy/blob/master/test/benchmark/main.h). diff --git a/test/benchmark/BUILD b/test/benchmark/BUILD index fa01e3b1ce63..afcb2602898d 100644 --- a/test/benchmark/BUILD +++ b/test/benchmark/BUILD @@ -17,6 +17,7 @@ envoy_cc_test_library( "tclap", ], deps = [ + "//source/common/common:minimal_logger_lib", "//test/test_common:environment_lib", ], ) diff --git a/test/benchmark/main.cc b/test/benchmark/main.cc index 6c23c1031a6c..3c79ff36b2e0 100644 --- a/test/benchmark/main.cc +++ b/test/benchmark/main.cc @@ -2,11 +2,15 @@ // This is an Envoy driver for benchmarks. #include "test/benchmark/main.h" +#include "common/common/logger.h" + #include "test/test_common/environment.h" #include "benchmark/benchmark.h" #include "tclap/CmdLine.h" +using namespace Envoy; + static bool skip_expensive_benchmarks = false; // Boilerplate main(), which discovers benchmarks and runs them. This uses two @@ -15,7 +19,7 @@ static bool skip_expensive_benchmarks = false; // separated by --. // TODO(pgenera): convert this to abseil/flags/ when benchmark also adopts abseil. int main(int argc, char** argv) { - Envoy::TestEnvironment::initializeTestMain(argv[0]); + TestEnvironment::initializeTestMain(argv[0]); // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) TCLAP::CmdLine cmd("envoy-benchmark-test", ' ', "0.1"); @@ -33,8 +37,14 @@ int main(int argc, char** argv) { skip_expensive_benchmarks = skip_switch.getValue(); - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); + ::benchmark::Initialize(&argc, argv); + + if (skip_expensive_benchmarks) { + ENVOY_LOG_MISC( + critical, + "Expensive benchmarks are being skipped; see test/README.md for more information"); + } + ::benchmark::RunSpecifiedBenchmarks(); } bool Envoy::benchmark::skipExpensiveBenchmarks() { return skip_expensive_benchmarks; } From f20c9168aa2e92b2c5b6f8103d792e66fd813c62 Mon Sep 17 00:00:00 2001 From: Kuat Date: Wed, 22 Jul 2020 10:03:02 -0700 Subject: [PATCH 714/909] runtime: fix RDTS typo (#12221) Risk Level: low Signed-off-by: Kuat Yessenov --- source/common/runtime/runtime_impl.cc | 4 ++-- source/common/runtime/runtime_impl.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 07181ccc5728..6b61b2211913 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -349,7 +349,7 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), config_(config), service_cluster_(local_info.clusterName()), api_(api), - init_watcher_("RDTS", [this]() { onRdtsReady(); }), store_(store) { + init_watcher_("RTDS", [this]() { onRtdsReady(); }), store_(store) { std::unordered_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); @@ -400,7 +400,7 @@ void LoaderImpl::startRtdsSubscriptions(ReadyCallback on_done) { init_manager_.initialize(init_watcher_); } -void LoaderImpl::onRdtsReady() { +void LoaderImpl::onRtdsReady() { ENVOY_LOG(info, "RTDS has finished initialization"); on_rtds_initialized_(); } diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index ff0b4c434411..d25e8d8ed25f 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -250,7 +250,7 @@ class LoaderImpl : public Loader, Logger::Loggable { // Load a new Snapshot into TLS void loadNewSnapshot(); RuntimeStats generateStats(Stats::Store& store); - void onRdtsReady(); + void onRtdsReady(); Random::RandomGenerator& generator_; RuntimeStats stats_; From 18e452d714ac68ae1d825f37eae3d3fd38681069 Mon Sep 17 00:00:00 2001 From: Stephan Zuercher Date: Wed, 22 Jul 2020 11:22:58 -0700 Subject: [PATCH 715/909] thrift proxy: improve router code coverage (#12194) Adds some additional tests to cover untested lines in the Thrift router filter. Risk Level: low Testing: adds test cases Docs Changes: n/a Release Notes: n/a Fixes: #12003 Signed-off-by: Stephan Zuercher --- CODEOWNERS | 2 +- .../network/thrift_proxy/filters/BUILD | 18 +++ .../filters/pass_through_filter_test.cc | 118 +++++++++++++++++ .../thrift_proxy/route_matcher_test.cc | 116 +++++++++++++++++ .../thrift_proxy/router_ratelimit_test.cc | 123 ++++++++++++++++++ .../network/thrift_proxy/router_test.cc | 69 ++++++++++ test/per_file_coverage.sh | 1 - 7 files changed, 445 insertions(+), 2 deletions(-) create mode 100644 test/extensions/filters/network/thrift_proxy/filters/BUILD create mode 100644 test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index 6038872ce0cc..5c30c1bb9923 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,7 +22,7 @@ extensions/filters/common/original_src @snowp @klarose # rocketmq_proxy extension /*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan # thrift_proxy extension -/*/extensions/filters/network/thrift_proxy @zuercher @brian-pane +/*/extensions/filters/network/thrift_proxy @zuercher @rgs1 # compressor used by http compression filters /*/extensions/filters/http/common/compressor @gsagula @rojkov @dio /*/extensions/filters/http/compressor @rojkov @dio diff --git a/test/extensions/filters/network/thrift_proxy/filters/BUILD b/test/extensions/filters/network/thrift_proxy/filters/BUILD new file mode 100644 index 000000000000..0af3863cfb2c --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/filters/BUILD @@ -0,0 +1,18 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "pass_through_filter_test", + srcs = ["pass_through_filter_test.cc"], + deps = [ + "//source/extensions/filters/network/thrift_proxy/filters:pass_through_filter_lib", + "//test/extensions/filters/network/thrift_proxy:mocks", + ], +) diff --git a/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc b/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc new file mode 100644 index 000000000000..bd0952258bbc --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/filters/pass_through_filter_test.cc @@ -0,0 +1,118 @@ +#include +#include + +#include "extensions/filters/network/thrift_proxy/filters/pass_through_filter.h" + +#include "test/extensions/filters/network/thrift_proxy/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace ThriftFilters { + +using namespace Envoy::Extensions::NetworkFilters; + +class ThriftPassThroughDecoderFilterTest : public testing::Test { +public: + class Filter : public PassThroughDecoderFilter { + public: + DecoderFilterCallbacks* decoderFilterCallbacks() { return decoder_callbacks_; } + }; + + void initialize() { + filter_ = std::make_unique(); + filter_->setDecoderFilterCallbacks(filter_callbacks_); + } + + std::unique_ptr filter_; + NiceMock filter_callbacks_; + ThriftProxy::MessageMetadataSharedPtr request_metadata_; +}; + +// Tests that each method returns ThriftProxy::FilterStatus::Continue. +TEST_F(ThriftPassThroughDecoderFilterTest, AllMethodsAreImplementedTrivially) { + initialize(); + + EXPECT_EQ(&filter_callbacks_, filter_->decoderFilterCallbacks()); + + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportBegin(request_metadata_)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageBegin(request_metadata_)); + { + std::string dummy_str = "dummy"; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structBegin(dummy_str)); + } + { + std::string dummy_str = "dummy"; + ThriftProxy::FieldType dummy_ft{ThriftProxy::FieldType::I32}; + int16_t dummy_id{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, + filter_->fieldBegin(dummy_str, dummy_ft, dummy_id)); + } + { + bool dummy_val{false}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->boolValue(dummy_val)); + } + { + uint8_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->byteValue(dummy_val)); + } + { + int16_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int16Value(dummy_val)); + } + { + int32_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int32Value(dummy_val)); + } + { + int64_t dummy_val{0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->int64Value(dummy_val)); + } + { + double dummy_val{0.0}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->doubleValue(dummy_val)); + } + { + std::string dummy_str = "dummy"; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->stringValue(dummy_str)); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, + filter_->mapBegin(dummy_ft, dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->mapEnd()); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listBegin(dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->listEnd()); + } + { + ThriftProxy::FieldType dummy_ft = ThriftProxy::FieldType::I32; + uint32_t dummy_size{1}; + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setBegin(dummy_ft, dummy_size)); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->setEnd()); + } + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->structEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->fieldEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd()); + EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->transportEnd()); + + EXPECT_NO_THROW(filter_->onDestroy()); +} + +} // namespace ThriftFilters +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index b39e45d391c3..0e89c355ae26 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -3,6 +3,8 @@ #include "envoy/extensions/filters/network/thrift_proxy/v3/route.pb.h" #include "envoy/extensions/filters/network/thrift_proxy/v3/route.pb.validate.h" +#include "common/config/metadata.h" + #include "extensions/filters/network/thrift_proxy/router/config.h" #include "extensions/filters/network/thrift_proxy/router/router_impl.h" @@ -756,6 +758,8 @@ name: config EXPECT_EQ("k2", mmc[1]->name()); EXPECT_EQ(hv2, mmc[1]->value()); + + EXPECT_EQ(Http::LowerCaseString{""}, route->routeEntry()->clusterHeader()); } // match with weighted cluster with different metadata key @@ -889,6 +893,118 @@ name: config } } +// Test that the route entry has metadata match criteria when using a cluster header. +TEST(ThriftRouteMatcherTest, ClusterHeaderMetadataMatch) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + auto* metadata = action->mutable_metadata_match(); + Envoy::Config::Metadata::mutableMetadataValue(*metadata, "envoy.lb", "k1") + .set_string_value("v1"); + Envoy::Config::Metadata::mutableMetadataValue(*metadata, "envoy.lb", "k2") + .set_string_value("v2"); + + auto* route2 = config.add_routes(); + route2->mutable_match()->set_method_name("method2"); + auto* action2 = route2->mutable_route(); + action2->set_cluster("cluster2"); + } + + RouteMatcher matcher(config); + + // match with metadata + { + MessageMetadata metadata; + metadata.setMethodName("method1"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "cluster1"); + RouteConstSharedPtr route = matcher.route(metadata, 0); + EXPECT_NE(nullptr, route); + EXPECT_NE(nullptr, route->routeEntry()); + + EXPECT_EQ(Http::LowerCaseString{"header_name"}, route->routeEntry()->clusterHeader()); + + const Envoy::Router::MetadataMatchCriteria* criteria = + route->routeEntry()->metadataMatchCriteria(); + EXPECT_NE(nullptr, criteria); + const std::vector& mmc = + criteria->metadataMatchCriteria(); + EXPECT_EQ(2, mmc.size()); + + ProtobufWkt::Value v1, v2; + v1.set_string_value("v1"); + v2.set_string_value("v2"); + HashedValue hv1(v1), hv2(v2); + + EXPECT_EQ("k1", mmc[0]->name()); + EXPECT_EQ(hv1, mmc[0]->value()); + + EXPECT_EQ("k2", mmc[1]->name()); + EXPECT_EQ(hv2, mmc[1]->value()); + } + + // match with no metadata + { + MessageMetadata metadata; + metadata.setMethodName("method2"); + RouteConstSharedPtr route = matcher.route(metadata, 0); + EXPECT_NE(nullptr, route); + EXPECT_NE(nullptr, route->routeEntry()); + EXPECT_EQ(nullptr, route->routeEntry()->metadataMatchCriteria()); + + EXPECT_EQ(Http::LowerCaseString{""}, route->routeEntry()->clusterHeader()); + } +} + +// Tests that weighted cluster route entries can be configured to strip the service name. +TEST(RouteMatcherTest, WeightedClusterWithStripServiceEnabled) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + auto* cluster1 = action->mutable_weighted_clusters()->add_clusters(); + cluster1->set_name("cluster1"); + cluster1->mutable_weight()->set_value(50); + auto* cluster2 = action->mutable_weighted_clusters()->add_clusters(); + cluster2->set_name("cluster2"); + cluster2->mutable_weight()->set_value(50); + action->set_strip_service_name(true); + } + + RouteMatcher matcher(config); + + MessageMetadata metadata; + metadata.setMethodName("method1"); + + EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName()); +} + +// Tests that dynamic route entries can be configured to strip the service name. +TEST(RouteMatcherTest, ClusterHeaderWithStripServiceEnabled) { + envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config; + { + config.set_name("config"); + auto* route = config.add_routes(); + route->mutable_match()->set_method_name("method1"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + action->set_strip_service_name(true); + } + + RouteMatcher matcher(config); + + MessageMetadata metadata; + metadata.setMethodName("method1"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "cluster1"); + + EXPECT_TRUE(matcher.route(metadata, 0)->routeEntry()->stripServiceName()); +} + } // namespace } // namespace Router } // namespace ThriftProxy diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 18d6ddaaa5db..4460b8c11c6e 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -34,6 +34,10 @@ class ThriftRateLimitConfigurationTest : public testing::Test { void initialize(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; TestUtility::loadFromYaml(yaml, config); + initialize(config); + } + + void initialize(envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy& config) { config_ = std::make_unique(config, factory_context_); } @@ -170,6 +174,125 @@ TEST_F(ThriftRateLimitConfigurationTest, Stages) { EXPECT_TRUE(rate_limits.empty()); } +// Test that rate limiter stages work with weighted cluster route entries. +TEST_F(ThriftRateLimitConfigurationTest, WeightedClusterStages) { + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; + { + auto* route_config = config.mutable_route_config(); + route_config->set_name("config"); + auto* route = route_config->add_routes(); + route->mutable_match()->set_method_name("foo"); + auto* action = route->mutable_route(); + auto* cluster1 = action->mutable_weighted_clusters()->add_clusters(); + cluster1->set_name("thrift"); + cluster1->mutable_weight()->set_value(50); + auto* cluster2 = action->mutable_weighted_clusters()->add_clusters(); + cluster2->set_name("thrift2"); + cluster2->mutable_weight()->set_value(50); + + auto* limit1 = action->add_rate_limits(); + limit1->mutable_stage()->set_value(1); + limit1->add_actions()->mutable_remote_address(); + + action->add_rate_limits()->add_actions()->mutable_destination_cluster(); + + auto* limit3 = action->add_rate_limits(); + limit3->add_actions()->mutable_destination_cluster(); + limit3->add_actions()->mutable_source_cluster(); + } + initialize(config); + + auto route = config_->route(genMetadata("foo"), 0)->routeEntry(); + std::vector> rate_limits = + route->rateLimitPolicy().getApplicableRateLimit(0); + EXPECT_EQ(2U, rate_limits.size()); + + std::vector descriptors; + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector( + {{{{"destination_cluster", "thrift"}}}, + {{{"destination_cluster", "thrift"}, {"source_cluster", "service_cluster"}}}}), + testing::ContainerEq(descriptors)); + + descriptors.clear(); + rate_limits = route->rateLimitPolicy().getApplicableRateLimit(1); + EXPECT_EQ(1U, rate_limits.size()); + + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), + testing::ContainerEq(descriptors)); + + rate_limits = route->rateLimitPolicy().getApplicableRateLimit(10); + EXPECT_TRUE(rate_limits.empty()); +} + +// Test that rate limiter stages work with dynamic route entries. +TEST_F(ThriftRateLimitConfigurationTest, ClusterHeaderStages) { + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; + { + auto* route_config = config.mutable_route_config(); + route_config->set_name("config"); + auto* route = route_config->add_routes(); + route->mutable_match()->set_method_name("foo"); + auto* action = route->mutable_route(); + action->set_cluster_header("header_name"); + + auto* limit1 = action->add_rate_limits(); + limit1->mutable_stage()->set_value(1); + limit1->add_actions()->mutable_remote_address(); + + action->add_rate_limits()->add_actions()->mutable_destination_cluster(); + + auto* limit3 = action->add_rate_limits(); + limit3->add_actions()->mutable_destination_cluster(); + limit3->add_actions()->mutable_source_cluster(); + } + initialize(config); + + auto& metadata = genMetadata("foo"); + metadata.headers().addCopy(Http::LowerCaseString{"header_name"}, "thrift"); + + // Keep hold of route, it's a newly minted shared pointer. + auto route = config_->route(metadata, 0); + auto* route_entry = route->routeEntry(); + + std::vector> rate_limits = + route_entry->rateLimitPolicy().getApplicableRateLimit(0); + + EXPECT_EQ(2U, rate_limits.size()); + + std::vector descriptors; + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route_entry, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + + EXPECT_THAT(std::vector( + {{{{"destination_cluster", "thrift"}}}, + {{{"destination_cluster", "thrift"}, {"source_cluster", "service_cluster"}}}}), + testing::ContainerEq(descriptors)); + + descriptors.clear(); + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(1); + EXPECT_EQ(1U, rate_limits.size()); + + for (const RateLimitPolicyEntry& rate_limit : rate_limits) { + rate_limit.populateDescriptors(*route_entry, descriptors, "service_cluster", *metadata_, + default_remote_address_); + } + EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), + testing::ContainerEq(descriptors)); + + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(10); + EXPECT_TRUE(rate_limits.empty()); +} + class ThriftRateLimitPolicyEntryTest : public testing::Test { public: void initialize(const std::string& yaml) { diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index a8c9192bb258..2034941d59a3 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -700,6 +700,75 @@ TEST_F(ThriftRouterTest, ProtocolUpgrade) { destroyRouter(); } +// Test the case where an upgrade will occur, but the conn pool +// returns immediately with a valid, but never, used connection. +TEST_F(ThriftRouterTest, ProtocolUpgradeOnExistingUnusedConnection) { + initializeRouter(); + + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::UpstreamCallbacks& cb) -> void { upstream_callbacks_ = &cb; })); + + conn_state_.reset(); + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, connectionState()) + .WillRepeatedly( + Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); })); + EXPECT_CALL(*context_.cluster_manager_.tcp_conn_pool_.connection_data_, setConnectionState_(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); })); + + MockThriftObject* upgrade_response = new NiceMock(); + + EXPECT_CALL(upstream_connection_, write(_, false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> void { + EXPECT_EQ("upgrade request", buffer.toString()); + })); + + // Simulate an existing connection that's never been used. + EXPECT_CALL(context_.cluster_manager_.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + context_.cluster_manager_.tcp_conn_pool_.newConnectionImpl(cb); + + EXPECT_CALL(*protocol_, supportsUpgrade()).WillOnce(Return(true)); + + EXPECT_CALL(*protocol_, attemptUpgrade(_, _, _)) + .WillOnce(Invoke([&](Transport&, ThriftConnectionState&, + Buffer::Instance& buffer) -> ThriftObjectPtr { + buffer.add("upgrade request"); + return ThriftObjectPtr{upgrade_response}; + })); + + context_.cluster_manager_.tcp_conn_pool_.poolReady(upstream_connection_); + return nullptr; + })); + + startRequest(MessageType::Call); + + EXPECT_NE(nullptr, upstream_callbacks_); + + Buffer::OwnedImpl buffer; + EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(false)); + upstream_callbacks_->onUpstreamData(buffer, false); + + EXPECT_CALL(*upgrade_response, onData(Ref(buffer))).WillOnce(Return(true)); + EXPECT_CALL(*protocol_, completeUpgrade(_, Ref(*upgrade_response))); + EXPECT_CALL(callbacks_, continueDecoding()); + EXPECT_CALL(*protocol_, writeMessageBegin(_, _)) + .WillOnce(Invoke([&](Buffer::Instance&, const MessageMetadata& metadata) -> void { + EXPECT_EQ(metadata_->methodName(), metadata.methodName()); + EXPECT_EQ(metadata_->messageType(), metadata.messageType()); + EXPECT_EQ(metadata_->sequenceId(), metadata.sequenceId()); + })); + upstream_callbacks_->onUpstreamData(buffer, false); + + // Then the actual request... + sendTrivialStruct(FieldType::String); + completeRequest(); + returnResponse(); + destroyRouter(); +} + TEST_F(ThriftRouterTest, ProtocolUpgradeSkippedOnExistingConnection) { initializeRouter(); startRequest(MessageType::Call); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 8feb9ae3be6e..581588cc0b24 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -26,7 +26,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/common/wasm/v8:85.4" "source/extensions/common/wasm/null:77.8" "source/extensions/filters/network/sni_cluster:90.3" -"source/extensions/filters/network/thrift_proxy/router:96.0" "source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" "source/extensions/filters/network/dubbo_proxy:96.1" "source/extensions/filters/network/dubbo_proxy/router:95.1" From 36288d52aed18045e0c9e470062142c4609cecce Mon Sep 17 00:00:00 2001 From: justin-mp Date: Wed, 22 Jul 2020 14:26:54 -0400 Subject: [PATCH 716/909] Only do DNS lookups for routes to Dynamic Forward Proxy clusters (#12207) We only need to do DNS lookups for routes to dynamic forward proxy clusters because the other cluster types handle DNS lookup themselves. Signed-off-by: Justin Mazzola Paluska --- docs/root/version_history/current.rst | 1 + .../filters/http/dynamic_forward_proxy/BUILD | 2 ++ .../dynamic_forward_proxy/proxy_filter.cc | 15 ++++++++ .../filters/http/dynamic_forward_proxy/BUILD | 2 ++ .../proxy_filter_test.cc | 34 +++++++++++++++++++ 5 files changed, 54 insertions(+) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 47e8e2fbec95..e7cc10d49155 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -24,6 +24,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * csrf: fixed issues with regards to origin and host header parsing. +* dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. * fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. Removed Config or Runtime diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index 6925f57c7115..56d4ff9be0b6 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -16,9 +16,11 @@ envoy_cc_library( deps = [ "//include/envoy/http:filter_interface", "//source/common/runtime:runtime_features_lib", + "//source/extensions/clusters:well_known_names", "//source/extensions/common/dynamic_forward_proxy:dns_cache_interface", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/common:pass_through_filter_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc index 1ab5569cc7e0..b41b0cf07d91 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/proxy_filter.cc @@ -1,10 +1,12 @@ #include "extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" +#include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" #include "common/runtime/runtime_features.h" +#include "extensions/clusters/well_known_names.h" #include "extensions/common/dynamic_forward_proxy/dns_cache.h" #include "extensions/filters/http/well_known_names.h" @@ -18,6 +20,8 @@ struct ResponseStringValues { const std::string PendingRequestOverflow = "Dynamic forward proxy pending request overflow"; }; +using CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType; + using ResponseStrings = ConstSingleton; using LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus; @@ -55,6 +59,17 @@ Http::FilterHeadersStatus ProxyFilter::decodeHeaders(Http::RequestHeaderMap& hea } cluster_info_ = cluster->info(); + // We only need to do DNS lookups for hosts in dynamic forward proxy clusters, + // since the other cluster types do their own DNS management. + const absl::optional& cluster_type = cluster_info_->clusterType(); + if (!cluster_type) { + return Http::FilterHeadersStatus::Continue; + } + if (cluster_type->name() != + Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy) { + return Http::FilterHeadersStatus::Continue; + } + const bool should_use_dns_cache_circuit_breakers = Runtime::runtimeFeatureEnabled("envoy.reloadable_features.enable_dns_cache_circuit_breakers"); diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index 483d83fcf89b..58d9e9a92246 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -17,6 +17,7 @@ envoy_extension_cc_test( extension_name = "envoy.filters.http.dynamic_forward_proxy", deps = [ "//source/common/stats:isolated_store_lib", + "//source/extensions/clusters:well_known_names", "//source/extensions/common/dynamic_forward_proxy:dns_cache_impl", "//source/extensions/filters/http:well_known_names", "//source/extensions/filters/http/dynamic_forward_proxy:config", @@ -24,6 +25,7 @@ envoy_extension_cc_test( "//test/mocks/http:http_mocks", "//test/mocks/upstream:upstream_mocks", "//test/test_common:test_runtime_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index f1af413fc7e7..7a2bffbbcaec 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -1,5 +1,7 @@ +#include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.pb.h" +#include "extensions/clusters/well_known_names.h" #include "extensions/common/dynamic_forward_proxy/dns_cache_impl.h" #include "extensions/filters/http/dynamic_forward_proxy/proxy_filter.h" #include "extensions/filters/http/well_known_names.h" @@ -21,6 +23,8 @@ namespace HttpFilters { namespace DynamicForwardProxy { namespace { +using CustomClusterType = envoy::config::cluster::v3::Cluster::CustomClusterType; + using LoadDnsCacheEntryStatus = Common::DynamicForwardProxy::DnsCache::LoadDnsCacheEntryStatus; using MockLoadDnsCacheEntryResult = Common::DynamicForwardProxy::MockDnsCache::MockLoadDnsCacheEntryResult; @@ -44,6 +48,12 @@ class ProxyFilterTest : public testing::Test, EXPECT_CALL(callbacks_, connection()).Times(AtLeast(0)); EXPECT_CALL(callbacks_, streamId()).Times(AtLeast(0)); + // Configure upstream cluster to be a Dynamic Forward Proxy since that's the + // kind we need to do DNS entries for. + CustomClusterType cluster_type; + cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().DynamicForwardProxy); + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type; + // Configure max pending to 1 so we can test circuit breaking. cm_.thread_local_cluster_.cluster_.info_->resetResourceManager(0, 1, 0, 0, 0); } @@ -237,6 +247,30 @@ TEST_F(ProxyFilterTest, NoCluster) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } +// No cluster type leads to skipping DNS lookups. +TEST_F(ProxyFilterTest, NoClusterType) { + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = absl::nullopt; + + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + +// Cluster that isn't a dynamic forward proxy cluster +TEST_F(ProxyFilterTest, NonDynamicForwardProxy) { + CustomClusterType cluster_type; + cluster_type.set_name(Envoy::Extensions::Clusters::ClusterTypes::get().Static); + cm_.thread_local_cluster_.cluster_.info_->cluster_type_ = cluster_type; + + InSequence s; + + EXPECT_CALL(callbacks_, route()); + EXPECT_CALL(cm_, get(_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + TEST_F(ProxyFilterTest, HostRewrite) { Upstream::ResourceAutoIncDec* circuit_breakers_( new Upstream::ResourceAutoIncDec(pending_requests_)); From af8aa37c2148c7dc9a064f36e8d14d0859d35cb2 Mon Sep 17 00:00:00 2001 From: justin-mp Date: Wed, 22 Jul 2020 14:27:52 -0400 Subject: [PATCH 717/909] Add use_tcp_for_dns_lookups option to Dynamic Forward Proxy DNS cache (#12217) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We will want the same DNS configuration options from the Cluster in the Dynamic Forward Proxy’s DNS configuration. Signed-off-by: Justin Mazzola Paluska --- .../dynamic_forward_proxy/v3/dns_cache.proto | 6 +++- docs/root/version_history/current.rst | 2 ++ .../dynamic_forward_proxy/v3/dns_cache.proto | 6 +++- .../dynamic_forward_proxy/dns_cache_impl.cc | 3 +- .../dns_cache_impl_test.cc | 28 +++++++++++++++++++ 5 files changed, 42 insertions(+), 3 deletions(-) diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index d801e83fa008..79cd583486ac 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -27,7 +27,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 8] +// [#next-free-field: 9] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -95,4 +95,8 @@ message DnsCacheConfig { // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, // envoy will use dns cache circuit breakers with default settings even if this value is not set. DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 8; } diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e7cc10d49155..38efea242eeb 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -37,6 +37,8 @@ Removed Config or Runtime New Features ------------ + +* dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index d801e83fa008..79cd583486ac 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -27,7 +27,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 8] +// [#next-free-field: 9] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -95,4 +95,8 @@ message DnsCacheConfig { // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, // envoy will use dns cache circuit breakers with default settings even if this value is not set. DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 8; } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index a75fcb337aec..b2e2d5defce1 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -20,7 +20,8 @@ DnsCacheImpl::DnsCacheImpl( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) : main_thread_dispatcher_(main_thread_dispatcher), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), - resolver_(main_thread_dispatcher.createDnsResolver({}, false)), tls_slot_(tls.allocateSlot()), + resolver_(main_thread_dispatcher.createDnsResolver({}, config.use_tcp_for_dns_lookups())), + tls_slot_(tls.allocateSlot()), scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))), stats_(generateDnsCacheStats(*scope_)), resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()), diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 9e99ee4b956f..c12f94d4e99b 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -671,6 +671,34 @@ TEST_F(DnsCacheImplTest, ClustersCircuitBreakersOverflow) { EXPECT_EQ(0, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); } +TEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionSet) { + NiceMock dispatcher; + std::shared_ptr resolver{std::make_shared()}; + NiceMock tls; + NiceMock random; + NiceMock loader; + Stats::IsolatedStoreImpl store; + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + config.set_use_tcp_for_dns_lookups(true); + EXPECT_CALL(dispatcher, createDnsResolver(_, true)).WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); +} + +TEST(DnsCacheImplOptionsTest, UseTcpForDnsLookupsOptionUnSet) { + NiceMock dispatcher; + std::shared_ptr resolver{std::make_shared()}; + NiceMock tls; + NiceMock random; + NiceMock loader; + Stats::IsolatedStoreImpl store; + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + config.set_use_tcp_for_dns_lookups(false); + EXPECT_CALL(dispatcher, createDnsResolver(_, false)).WillOnce(Return(resolver)); + DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); +} + // DNS cache manager config tests. TEST(DnsCacheManagerImplTest, LoadViaConfig) { NiceMock dispatcher; From f9773269566002fb79593acc2d7089c897d67ca1 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 22 Jul 2020 14:12:24 -0700 Subject: [PATCH 718/909] ci: enable full test on arm64 (#12124) Risk Level: Low Testing: CI Docs Changes: N/A Release Notes: N/A (should be added when docker build is enabled) Part of #1861 Signed-off-by: Lizan Zhou --- .azure-pipelines/pipelines.yml | 9 ++++++--- .bazelrc | 1 + ci/build_setup.sh | 2 ++ ci/do_ci.sh | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 30ed536530ed..83b51e4e32f0 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -51,17 +51,20 @@ jobs: ciTarget: bazel.release - job: release_arm64 - displayName: "Linux-arm64 release.server_only" + displayName: "Linux-arm64 release" dependsOn: ["format"] - condition: ne(variables['Build.Reason'], 'PullRequest') + # For master builds, continue even if format fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + timeoutInMinutes: 360 pool: "arm-large" steps: - template: bazel.yml parameters: managedAgent: false - ciTarget: bazel.release.server_only + ciTarget: bazel.release rbe: false artifactSuffix: ".arm64" + bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" - job: bazel displayName: "Linux-x64" diff --git a/.bazelrc b/.bazelrc index 85db6cdc6933..3c61e6d8c32d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -23,6 +23,7 @@ build --enable_platform_specific_config # Enable position independent code, this option is not supported on Windows and default on on macOS. build:linux --copt=-fPIC build:linux --cxxopt=-std=c++17 +build:linux --conlyopt=-fexceptions # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 97419a425618..d8a62e1c8193 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -92,6 +92,8 @@ export BAZEL_BUILD_OPTIONS="--verbose_failures ${BAZEL_OPTIONS} --action_env=HOM --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" +[[ "$(uname -m)" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --define=hot_restart=disabled --test_env=HEAPCHECK=" + [[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge # Also setup some space for building Envoy standalone. diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 344026c5e88e..620efefb8cc1 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -110,7 +110,7 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then # toolchain is kept consistent. This ifdef is checked in # test/common/stats/stat_test_utility.cc when computing # Stats::TestUtil::MemoryTest::mode(). - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" + [[ "$(uname -m)" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" setup_clang_toolchain echo "bazel release build with tests..." From 31cb2b44b02f5a1e59dbea97d3b31675e809435f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Wed, 22 Jul 2020 18:33:15 -0400 Subject: [PATCH 719/909] aws_request_signing: fix coverage (#12229) Fixes #11990. Signed-off-by: Raul Gutierrez Segales --- .../aws_request_signing_filter_test.cc | 12 ++++++++++++ test/per_file_coverage.sh | 1 - 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc index e53ec917318a..b280b21eee92 100644 --- a/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc +++ b/test/extensions/filters/http/aws_request_signing/aws_request_signing_filter_test.cc @@ -72,6 +72,18 @@ TEST_F(AwsRequestSigningFilterTest, SignFails) { EXPECT_EQ(1UL, filter_config_->stats_.signing_failed_.value()); } +// Verify FilterConfigImpl's getters. +TEST_F(AwsRequestSigningFilterTest, FilterConfigImplGetters) { + Stats::IsolatedStoreImpl stats; + auto signer = std::make_unique(); + const auto* signer_ptr = signer.get(); + FilterConfigImpl config(std::move(signer), "prefix", stats, "foo"); + + EXPECT_EQ(signer_ptr, &config.signer()); + EXPECT_EQ(0UL, config.stats().signing_added_.value()); + EXPECT_EQ("foo", config.hostRewrite()); +} + } // namespace } // namespace AwsRequestSigningFilter } // namespace HttpFilters diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 581588cc0b24..ba62f6adcd92 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -38,7 +38,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/ip_tagging:91.2" "source/extensions/filters/http/grpc_json_transcoder:93.3" "source/extensions/filters/http/aws_lambda:96.4" -"source/extensions/filters/http/aws_request_signing:93.3" "source/extensions/filters/listener:96.0" "source/extensions/filters/listener/tls_inspector:92.4" "source/extensions/filters/listener/http_inspector:93.3" From 8b34cb109f312c466e9fab8c9bc306dab0f0df82 Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Thu, 23 Jul 2020 08:43:56 +1000 Subject: [PATCH 720/909] router: add envoy-ratelimited retry policy (#12201) Adds new retry policy envoy-ratelimited that retries responses containing the header x-envoy-ratelimited. Signed-off-by: Martin Matusiak --- .../http/http_filters/router_filter.rst | 14 +++++++++++--- docs/root/version_history/current.rst | 3 +++ include/envoy/router/router.h | 1 + source/common/http/headers.h | 1 + source/common/router/retry_state_impl.cc | 8 ++++++-- test/common/router/retry_state_impl_test.cc | 16 ++++++++++++++++ 6 files changed, 38 insertions(+), 5 deletions(-) diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 14638a2c3d14..1446c02c0256 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -94,6 +94,12 @@ connect-failure configuration ` or via :ref:`virtual host retry policy `. +.. _config_http_filters_router_retry_policy-envoy-ratelimited: + +envoy-ratelimited + Envoy will retry if the header :ref:`x-envoy-ratelimited` + is present. + retriable-4xx Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code. Currently, the only response code in this category is 409. @@ -294,9 +300,11 @@ information. x-envoy-ratelimited ^^^^^^^^^^^^^^^^^^^ -If this header is set by upstream, Envoy will not retry. Currently the value of the header is not -looked at, only its presence. This header is set by :ref:`rate limit filter` -when the request is rate limited. +If this header is set by upstream, Envoy will not retry unless the retry policy +:ref:`envoy-ratelimited` +is enabled. Currently, the value of the header is not looked at, only its +presence. This header is set by :ref:`rate limit +filter` when the request is rate limited. .. _config_http_filters_router_headers_set: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 38efea242eeb..18d56f2994b0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -44,6 +44,9 @@ New Features * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. +* router: added new + :ref:`envoy-ratelimited` + retry policy, which allows retrying envoy's own rate limited responses. * stats: added optional histograms to :ref:`cluster stats ` that track headers and body sizes of requests and responses. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 27ab91591d9f..35449ec4cf70 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -169,6 +169,7 @@ class RetryPolicy { static const uint32_t RETRY_ON_RETRIABLE_STATUS_CODES = 0x400; static const uint32_t RETRY_ON_RESET = 0x800; static const uint32_t RETRY_ON_RETRIABLE_HEADERS = 0x1000; + static const uint32_t RETRY_ON_ENVOY_RATE_LIMITED = 0x2000; // clang-format on virtual ~RetryPolicy() = default; diff --git a/source/common/http/headers.h b/source/common/http/headers.h index b13a683d80a9..5906f02b794c 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -252,6 +252,7 @@ class HeaderValues { const std::string _5xx{"5xx"}; const std::string GatewayError{"gateway-error"}; const std::string ConnectFailure{"connect-failure"}; + const std::string EnvoyRateLimited{"envoy-ratelimited"}; const std::string RefusedStream{"refused-stream"}; const std::string Retriable4xx{"retriable-4xx"}; const std::string RetriableStatusCodes{"retriable-status-codes"}; diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 043a726d3464..9912f041709e 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -23,6 +23,7 @@ namespace Router { const uint32_t RetryPolicy::RETRY_ON_5XX; const uint32_t RetryPolicy::RETRY_ON_GATEWAY_ERROR; const uint32_t RetryPolicy::RETRY_ON_CONNECT_FAILURE; +const uint32_t RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_4XX; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_HEADERS; const uint32_t RetryPolicy::RETRY_ON_RETRIABLE_STATUS_CODES; @@ -169,6 +170,8 @@ std::pair RetryStateImpl::parseRetryOn(absl::string_view config) ret |= RetryPolicy::RETRY_ON_GATEWAY_ERROR; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.ConnectFailure) { ret |= RetryPolicy::RETRY_ON_CONNECT_FAILURE; + } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.EnvoyRateLimited) { + ret |= RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.Retriable4xx) { ret |= RetryPolicy::RETRY_ON_RETRIABLE_4XX; } else if (retry_on == Http::Headers::get().EnvoyRetryOnValues.RefusedStream) { @@ -290,9 +293,10 @@ RetryStatus RetryStateImpl::shouldHedgeRetryPerTryTimeout(DoRetryCallback callba } bool RetryStateImpl::wouldRetryFromHeaders(const Http::ResponseHeaderMap& response_headers) { - // We never retry if the request is rate limited. + // A response that contains the x-envoy-ratelimited header comes from an upstream envoy. + // We retry these only when the envoy-ratelimited policy is in effect. if (response_headers.EnvoyRateLimited() != nullptr) { - return false; + return retry_on_ & RetryPolicy::RETRY_ON_ENVOY_RATE_LIMITED; } if (retry_on_ & RetryPolicy::RETRY_ON_5XX) { diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index fed1d6cc3fc3..6f3d6441baaf 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -218,6 +218,22 @@ TEST_F(RouterRetryStateImplTest, PolicyResourceExhaustedRemoteRateLimited) { EXPECT_EQ(RetryStatus::No, state_->shouldRetryHeaders(response_headers, callback_)); } +TEST_F(RouterRetryStateImplTest, PolicyEnvoyRateLimitedRemoteRateLimited) { + Http::TestRequestHeaderMapImpl request_headers{{"x-envoy-retry-on", "envoy-ratelimited"}}; + setup(request_headers); + EXPECT_TRUE(state_->enabled()); + + expectTimerCreateAndEnable(); + Http::TestResponseHeaderMapImpl response_headers{{":status", "429"}, + {"x-envoy-ratelimited", "true"}}; + EXPECT_EQ(RetryStatus::Yes, state_->shouldRetryHeaders(response_headers, callback_)); + EXPECT_CALL(callback_ready_, ready()); + retry_timer_->invokeCallback(); + + EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, + state_->shouldRetryHeaders(response_headers, callback_)); +} + TEST_F(RouterRetryStateImplTest, PolicyGatewayErrorRemote502) { verifyPolicyWithRemoteResponse("gateway-error" /* retry_on */, "502" /* response_status */, false /* is_grpc */); From 9a4980cf2aa67916376747677b754b55dc189d3a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 22 Jul 2020 18:55:55 -0400 Subject: [PATCH 721/909] test: fixing a race in cx_limit_integration_test (#12233) Signed-off-by: Alyssa Wilk --- test/integration/cx_limit_integration_test.cc | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc index abdc5711fe2e..126c5c236664 100644 --- a/test/integration/cx_limit_integration_test.cc +++ b/test/integration/cx_limit_integration_test.cc @@ -37,6 +37,24 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParam init_func, std::string&& check_stat) { init_func(); @@ -68,6 +86,8 @@ class ConnectionLimitIntegrationTest : public testing::TestWithParamclose(); ASSERT_TRUE(raw_conns.front()->waitForDisconnect()); + // Make sure to not try to connect again until the acceptedSocketCount is updated. + ASSERT_TRUE(waitForConnections(1)); tcp_clients.emplace_back(makeTcpConnection(lookupPort("listener_0"))); raw_conns.emplace_back(); ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(raw_conns.back())); From 958745d658752f90f544296d9e75030519a9fb84 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 23 Jul 2020 00:00:44 +0100 Subject: [PATCH 722/909] fuzz: add verification to xDS fuzzer (#12132) - ensure that all listeners are in the correct state using config dumps and stats - adds a verification class that holds an abstract representation of the state Envoy should be in which is then compared to Envoy's actual state from the config dumps - currently working on also actually draining the listeners to ensure they are in the correct state during/after but Envoy crashes when trying to drain using simulated time so I'm looking into this for a future PR - I also added a few more corpus entries that had failed previously on libfuzzer removed removeRoute since RDS cannot explicitly remove routes, it's up to Envoy to remove them when no listeners refer to them Risk Level: Low Testing: ran with libfuzzer, fixes previous timeouts/crashes Docs Changes: N/A Release Notes: N/A Signed-off-by: Sam Flattery --- test/server/config_validation/BUILD | 19 ++ .../config_validation/xds_corpus/example1 | 5 - .../config_validation/xds_corpus/example10 | 22 ++ .../config_validation/xds_corpus/example13 | 57 ++++ .../config_validation/xds_corpus/example2 | 5 - .../config_validation/xds_corpus/example4 | 26 ++ .../config_validation/xds_corpus/example5 | 15 - .../config_validation/xds_corpus/example6 | 24 ++ .../config_validation/xds_corpus/example7 | 11 + .../config_validation/xds_corpus/example8 | 15 + .../config_validation/xds_corpus/example9 | 21 ++ test/server/config_validation/xds_fuzz.cc | 244 +++++++++----- test/server/config_validation/xds_fuzz.h | 33 +- test/server/config_validation/xds_fuzz.proto | 6 - test/server/config_validation/xds_verifier.cc | 304 ++++++++++++++++++ test/server/config_validation/xds_verifier.h | 81 +++++ 16 files changed, 768 insertions(+), 120 deletions(-) create mode 100644 test/server/config_validation/xds_corpus/example10 create mode 100644 test/server/config_validation/xds_corpus/example13 create mode 100644 test/server/config_validation/xds_corpus/example4 create mode 100644 test/server/config_validation/xds_corpus/example6 create mode 100644 test/server/config_validation/xds_corpus/example7 create mode 100644 test/server/config_validation/xds_corpus/example8 create mode 100644 test/server/config_validation/xds_corpus/example9 create mode 100644 test/server/config_validation/xds_verifier.cc create mode 100644 test/server/config_validation/xds_verifier.h diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index d3358e63dfbc..83a4937749dc 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -112,14 +112,33 @@ envoy_proto_library( srcs = ["xds_fuzz.proto"], ) +envoy_cc_test_library( + name = "xds_verifier_lib", + srcs = ["xds_verifier.cc"], + hdrs = ["xds_verifier.h"], + deps = [ + ":xds_fuzz_proto_cc_proto", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + ], +) + envoy_cc_test_library( name = "xds_fuzz_lib", srcs = ["xds_fuzz.cc"], hdrs = ["xds_fuzz.h"], deps = [ ":xds_fuzz_proto_cc_proto", + ":xds_verifier_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", + "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", diff --git a/test/server/config_validation/xds_corpus/example1 b/test/server/config_validation/xds_corpus/example1 index 9b99995b1e4e..be2117ef6f6c 100644 --- a/test/server/config_validation/xds_corpus/example1 +++ b/test/server/config_validation/xds_corpus/example1 @@ -1,8 +1,3 @@ -actions { - remove_route { - route_num: 1 - } -} actions { remove_listener { listener_num: 1 diff --git a/test/server/config_validation/xds_corpus/example10 b/test/server/config_validation/xds_corpus/example10 new file mode 100644 index 000000000000..6c1736bb4b9b --- /dev/null +++ b/test/server/config_validation/xds_corpus/example10 @@ -0,0 +1,22 @@ +actions { + add_listener { + route_num: 100728832 + } +} +actions { + add_route { + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example13 b/test/server/config_validation/xds_corpus/example13 new file mode 100644 index 000000000000..62b21361326b --- /dev/null +++ b/test/server/config_validation/xds_corpus/example13 @@ -0,0 +1,57 @@ +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +actions { + add_route { + route_num: 3 + } +} +actions { + add_route { + route_num: 3 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 0 + route_num: 1 + } +} +actions { + add_route { + route_num: 0 + } +} +actions { + remove_listener { + listener_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2 + route_num: 3 + } +} +config { + sotw_or_delta: SOTW +} diff --git a/test/server/config_validation/xds_corpus/example2 b/test/server/config_validation/xds_corpus/example2 index 9b25dd51d01d..31c3023cacab 100644 --- a/test/server/config_validation/xds_corpus/example2 +++ b/test/server/config_validation/xds_corpus/example2 @@ -9,11 +9,6 @@ actions { route_num: 1 } } -actions { - remove_route { - route_num: 1 - } -} actions { add_route { route_num: 1 diff --git a/test/server/config_validation/xds_corpus/example4 b/test/server/config_validation/xds_corpus/example4 new file mode 100644 index 000000000000..3a7bb0203cdb --- /dev/null +++ b/test/server/config_validation/xds_corpus/example4 @@ -0,0 +1,26 @@ +actions { + add_listener { + listener_num: 1 + route_num: 2 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 2 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example5 b/test/server/config_validation/xds_corpus/example5 index 037df55541f5..692d470f8c87 100644 --- a/test/server/config_validation/xds_corpus/example5 +++ b/test/server/config_validation/xds_corpus/example5 @@ -8,21 +8,11 @@ actions { listener_num: 7012368 } } -actions { - remove_route { - route_num: 14849 - } -} actions { add_route { route_num: 4261412864 } } -actions { - remove_route { - route_num: 14849 - } -} actions { remove_listener { listener_num: 7012388 @@ -33,11 +23,6 @@ actions { route_num: 7012388 } } -actions { - remove_route { - route_num: 7012388 - } -} actions { remove_listener { listener_num: 7012352 diff --git a/test/server/config_validation/xds_corpus/example6 b/test/server/config_validation/xds_corpus/example6 new file mode 100644 index 000000000000..ee8c9cd55c8b --- /dev/null +++ b/test/server/config_validation/xds_corpus/example6 @@ -0,0 +1,24 @@ +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + remove_listener { + listener_num: 1 + } +} +config { + sotw_or_delta: DELTA +} diff --git a/test/server/config_validation/xds_corpus/example7 b/test/server/config_validation/xds_corpus/example7 new file mode 100644 index 000000000000..b54642b3bca2 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example7 @@ -0,0 +1,11 @@ +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example8 b/test/server/config_validation/xds_corpus/example8 new file mode 100644 index 000000000000..604b7e8dd491 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example8 @@ -0,0 +1,15 @@ +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + } +} +actions { + remove_listener { + } +} +config { +} diff --git a/test/server/config_validation/xds_corpus/example9 b/test/server/config_validation/xds_corpus/example9 new file mode 100644 index 000000000000..7dc3edc7ac62 --- /dev/null +++ b/test/server/config_validation/xds_corpus/example9 @@ -0,0 +1,21 @@ +actions { + add_listener { + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 1 + route_num: 1 + } +} +actions { + add_route { + } +} +config { +} diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 6ff1c7cff283..a14d09cc0252 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -1,5 +1,6 @@ #include "test/server/config_validation/xds_fuzz.h" +#include "envoy/api/v2/route.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" @@ -20,14 +21,15 @@ XdsFuzzTest::buildClusterLoadAssignment(const std::string& name) { fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); } -envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(std::string listener_name, - std::string route_name) { +envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(const std::string& listener_name, + const std::string& route_name) { return ConfigHelper::buildListener(listener_name, route_name, Network::Test::getLoopbackAddressString(ip_version_), "ads_test", api_version_); } -envoy::config::route::v3::RouteConfiguration XdsFuzzTest::buildRouteConfig(std::string route_name) { +envoy::config::route::v3::RouteConfiguration +XdsFuzzTest::buildRouteConfig(const std::string& route_name) { return ConfigHelper::buildRouteConfig(route_name, "cluster_0", api_version_); } @@ -36,17 +38,17 @@ void XdsFuzzTest::updateListener( const std::vector& listeners, const std::vector& added_or_updated, const std::vector& removed) { - ENVOY_LOG_MISC(info, "Sending Listener DiscoveryResponse version {}", version_); + ENVOY_LOG_MISC(debug, "Sending Listener DiscoveryResponse version {}", version_); sendDiscoveryResponse(Config::TypeUrl::get().Listener, listeners, added_or_updated, removed, std::to_string(version_)); } void XdsFuzzTest::updateRoute( - const std::vector routes, + const std::vector& routes, const std::vector& added_or_updated, const std::vector& removed) { - ENVOY_LOG_MISC(info, "Sending Route DiscoveryResponse version {}", version_); + ENVOY_LOG_MISC(debug, "Sending Route DiscoveryResponse version {}", version_); sendDiscoveryResponse( Config::TypeUrl::get().RouteConfiguration, routes, added_or_updated, removed, std::to_string(version_)); @@ -61,9 +63,8 @@ XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& inp ? "GRPC" : "DELTA_GRPC", api_version)), - actions_(input.actions()), version_(1), api_version_(api_version), - ip_version_(TestEnvironment::getIpVersionsForTest()[0]), num_added_(0), num_modified_(0), - num_removed_(0) { + verifier_(input.config().sotw_or_delta()), actions_(input.actions()), version_(1), + api_version_(api_version), ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = false; @@ -106,108 +107,80 @@ void XdsFuzzTest::close() { } /** - * remove a listener from the list of listeners if it exists - * @param the listener number to be removed - * @return the listener as an optional so that it can be used in a delta request + * @return true iff listener_name is in listeners_ (and removes it from the vector) */ -bool XdsFuzzTest::eraseListener(std::string listener_name) { - for (auto it = listeners_.begin(); it != listeners_.end(); ++it) { - if (it->name() == listener_name) { - listeners_.erase(it); - return true; - } - } - return false; +bool XdsFuzzTest::eraseListener(const std::string& listener_name) { + const auto orig_size = listeners_.size(); + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.name() == listener_name; }), + listeners_.end()); + return orig_size != listeners_.size(); } /** - * remove a route from the list of routes if it exists - * @param the route number to be removed - * @return the route as an optional so that it can be used in a delta request + * @return true iff route_name has already been added to routes_ */ -bool XdsFuzzTest::eraseRoute(std::string route_name) { - for (auto it = routes_.begin(); it != routes_.end(); ++it) { - if (it->name() == route_name) { - routes_.erase(it); - return true; - } - } - return false; +bool XdsFuzzTest::hasRoute(const std::string& route_name) { + return std::any_of(routes_.begin(), routes_.end(), + [&](auto& route) { return route.name() == route_name; }); } /** * send an xDS response to add a listener and update state accordingly */ -void XdsFuzzTest::addListener(std::string listener_name, std::string route_name) { - ENVOY_LOG_MISC(info, "Adding {} with reference to {}", listener_name, route_name); +void XdsFuzzTest::addListener(const std::string& listener_name, const std::string& route_name) { + ENVOY_LOG_MISC(debug, "Adding {} with reference to {}", listener_name, route_name); bool removed = eraseListener(listener_name); auto listener = buildListener(listener_name, route_name); listeners_.push_back(listener); updateListener(listeners_, {listener}, {}); - // use waitForAck instead of compareDiscoveryRequest as the client makes - // additional discoveryRequests at launch that we might not want to - // respond to yet + + // use waitForAck instead of compareDiscoveryRequest as the client makes additional + // discoveryRequests at launch that we might not want to respond to yet EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); if (removed) { - num_modified_++; - test_server_->waitForCounterGe("listener_manager.listener_modified", num_modified_); + verifier_.listenerUpdated(listener); } else { - num_added_++; - test_server_->waitForCounterGe("listener_manager.listener_added", num_added_); + verifier_.listenerAdded(listener); } } /** * send an xDS response to remove a listener and update state accordingly */ -void XdsFuzzTest::removeListener(std::string listener_name) { - ENVOY_LOG_MISC(info, "Removing {}", listener_name); +void XdsFuzzTest::removeListener(const std::string& listener_name) { + ENVOY_LOG_MISC(debug, "Removing {}", listener_name); bool removed = eraseListener(listener_name); if (removed) { - num_removed_++; updateListener(listeners_, {}, {listener_name}); EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); - test_server_->waitForCounterGe("listener_manager.listener_removed", num_removed_); + verifier_.listenerRemoved(listener_name); } } /** * send an xDS response to add a route and update state accordingly */ -void XdsFuzzTest::addRoute(std::string route_name) { - ENVOY_LOG_MISC(info, "Adding {}", route_name); - bool removed = eraseRoute(route_name); +void XdsFuzzTest::addRoute(const std::string& route_name) { + ENVOY_LOG_MISC(debug, "Adding {}", route_name); + bool has_route = hasRoute(route_name); auto route = buildRouteConfig(route_name); routes_.push_back(route); - if (removed) { + if (has_route) { // if the route was already in routes_, don't send a duplicate add in delta request updateRoute(routes_, {}, {}); + verifier_.routeUpdated(route); } else { updateRoute(routes_, {route}, {}); + verifier_.routeAdded(route); } EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); } -/** - * this is a no-op for now because it seems like routes cannot be removed - leaving a route out of - * a SOTW request does not remove it and sending a remove message in a delta request is ignored - */ -void XdsFuzzTest::removeRoute(std::string route_name) { - ENVOY_LOG_MISC(info, "Ignoring request to remove {}", route_name); - return; - - // TODO(samflattery): remove if it's true that routes cannot be removed - auto removed = eraseRoute(route_name); - if (removed) { - updateRoute(routes_, {}, {route_name}); - EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); - } -} - /** * wait for a specific ACK, ignoring any other ACKs that are made in the meantime * @param the expected API type url of the ack @@ -220,7 +193,7 @@ AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, API_NO_BOOST(envoy::api::v2::DiscoveryRequest) discovery_request; do { VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, discovery_request)); - ENVOY_LOG_MISC(info, "Received gRPC message with type {} and version {}", + ENVOY_LOG_MISC(debug, "Received gRPC message with type {} and version {}", discovery_request.type_url(), discovery_request.version_info()); } while (expected_type_url != discovery_request.type_url() || expected_version != discovery_request.version_info()); @@ -228,7 +201,7 @@ AssertionResult XdsFuzzTest::waitForAck(const std::string& expected_type_url, API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) delta_discovery_request; do { VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, delta_discovery_request)); - ENVOY_LOG_MISC(info, "Received gRPC message with type {}", + ENVOY_LOG_MISC(debug, "Received gRPC message with type {}", delta_discovery_request.type_url()); } while (expected_type_url != delta_discovery_request.type_url()); } @@ -253,9 +226,9 @@ void XdsFuzzTest::replay() { Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, {buildClusterLoadAssignment("cluster_0")}, {}, "0"); - // the client will not subscribe to the RouteConfiguration type URL until it - // receives a listener, and the ACKS it sends back seem to be an empty type - // URL so just don't check them until a listener is added + // the client will not subscribe to the RouteConfiguration type URL until it receives a listener, + // and the ACKS it sends back seem to be an empty type URL so just don't check them until a + // listener is added bool sent_listener = false; for (const auto& action : actions_) { @@ -281,7 +254,7 @@ void XdsFuzzTest::replay() { } case test::server::config_validation::Action::kAddRoute: { if (!sent_listener) { - ENVOY_LOG_MISC(info, "Ignoring request to add route_{}", + ENVOY_LOG_MISC(debug, "Ignoring request to add route_{}", action.add_route().route_num() % RoutesMax); break; } @@ -290,18 +263,139 @@ void XdsFuzzTest::replay() { addRoute(route_name); break; } - case test::server::config_validation::Action::kRemoveRoute: { - std::string route_name = - absl::StrCat("route_config_", action.remove_route().route_num() % RoutesMax); - removeRoute(route_name); + default: break; } + if (sent_listener) { + // wait for all of the updates to take effect + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", + verifier_.numWarming()); + test_server_->waitForGaugeEq("listener_manager.total_listeners_active", + verifier_.numActive()); + test_server_->waitForGaugeEq("listener_manager.total_listeners_draining", + verifier_.numDraining()); + test_server_->waitForCounterEq("listener_manager.listener_modified", verifier_.numModified()); + test_server_->waitForCounterEq("listener_manager.listener_added", verifier_.numAdded()); + test_server_->waitForCounterEq("listener_manager.listener_removed", verifier_.numRemoved()); + } + ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", + verifier_.numWarming(), + test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numActive(), + test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numDraining(), + test_server_->gauge("listener_manager.total_listeners_draining")->value()); + ENVOY_LOG_MISC(debug, "added {} ({}), modified {} ({}), removed {} ({})", verifier_.numAdded(), + test_server_->counter("listener_manager.listener_added")->value(), + verifier_.numModified(), + test_server_->counter("listener_manager.listener_modified")->value(), + verifier_.numRemoved(), + test_server_->counter("listener_manager.listener_removed")->value()); + } + + verifyState(); + close(); +} + +/** + * verify that each listener in the verifier has a matching listener in the config dump + */ +void XdsFuzzTest::verifyListeners() { + ENVOY_LOG_MISC(debug, "Verifying listeners"); + const auto& abstract_rep = verifier_.listeners(); + const auto dump = getListenersConfigDump().dynamic_listeners(); + + for (const auto& rep : abstract_rep) { + ENVOY_LOG_MISC(debug, "Verifying {} with state {}", rep.listener.name(), rep.state); + + auto listener_dump = std::find_if(dump.begin(), dump.end(), [&](auto& listener) { + return listener.name() == rep.listener.name(); + }); + + // there should be a listener of the same name in the dump + if (listener_dump == dump.end()) { + throw EnvoyException(fmt::format("Expected to find {} in config dump", rep.listener.name())); + } + + ENVOY_LOG_MISC(debug, "warm {}, active {}, drain: {}", listener_dump->has_warming_state(), + listener_dump->has_active_state(), listener_dump->has_draining_state()); + // the state should match + switch (rep.state) { + case XdsVerifier::DRAINING: + EXPECT_TRUE(listener_dump->has_draining_state()); + break; + case XdsVerifier::WARMING: + EXPECT_TRUE(listener_dump->has_warming_state()); + break; + case XdsVerifier::ACTIVE: + EXPECT_TRUE(listener_dump->has_active_state()); + break; default: NOT_REACHED_GCOVR_EXCL_LINE; } } +} - close(); +void XdsFuzzTest::verifyRoutes() { + auto dump = getRoutesConfigDump(); + + // go through routes in verifier and make sure each is in the config dump + auto routes = verifier_.routes(); + EXPECT_EQ(routes.size(), dump.size()); + for (const auto& route : routes) { + EXPECT_TRUE(std::any_of(dump.begin(), dump.end(), [&](const auto& dump_route) { + return route.first == dump_route.name(); + })); + } +} + +void XdsFuzzTest::verifyState() { + verifyListeners(); + ENVOY_LOG_MISC(debug, "Verified listeners"); + verifyRoutes(); + ENVOY_LOG_MISC(debug, "Verified routes"); + + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_draining")->value(), + verifier_.numDraining()); + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numWarming()); + EXPECT_EQ(test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numActive()); + ENVOY_LOG_MISC(debug, "Verified stats"); + ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", verifier_.numWarming(), + test_server_->gauge("listener_manager.total_listeners_warming")->value(), + verifier_.numActive(), + test_server_->gauge("listener_manager.total_listeners_active")->value(), + verifier_.numDraining(), + test_server_->gauge("listener_manager.total_listeners_draining")->value()); +} + +envoy::admin::v3::ListenersConfigDump XdsFuzzTest::getListenersConfigDump() { + auto message_ptr = + test_server_->server().admin().getConfigTracker().getCallbacksMap().at("listeners")(); + return dynamic_cast(*message_ptr); +} + +std::vector XdsFuzzTest::getRoutesConfigDump() { + auto map = test_server_->server().admin().getConfigTracker().getCallbacksMap(); + + // there is no route config dump before envoy has a route + if (map.find("routes") == map.end()) { + return {}; + } + + auto message_ptr = map.at("routes")(); + auto dump = dynamic_cast(*message_ptr); + + // since the route config dump gives the RouteConfigurations as an Any, go through and cast them + // back to RouteConfigurations + std::vector dump_routes; + for (const auto& route : dump.dynamic_route_configs()) { + envoy::api::v2::RouteConfiguration dyn_route; + route.route_config().UnpackTo(&dyn_route); + dump_routes.push_back(dyn_route); + } + return dump_routes; } } // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index d4a6999c20a7..bdc37951c7f8 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -13,6 +13,7 @@ #include "test/config/utility.h" #include "test/integration/http_integration.h" #include "test/server/config_validation/xds_fuzz.pb.h" +#include "test/server/config_validation/xds_verifier.h" #include "absl/types/optional.h" @@ -28,17 +29,17 @@ class XdsFuzzTest : public HttpIntegrationTest { envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name); - envoy::config::listener::v3::Listener buildListener(std::string listener_name, - std::string route_name); + envoy::config::listener::v3::Listener buildListener(const std::string& listener_name, + const std::string& route_name); - envoy::config::route::v3::RouteConfiguration buildRouteConfig(std::string route_name); + envoy::config::route::v3::RouteConfiguration buildRouteConfig(const std::string& route_name); void updateListener(const std::vector& listeners, const std::vector& added_or_updated, const std::vector& removed); void - updateRoute(const std::vector routes, + updateRoute(const std::vector& routes, const std::vector& added_or_updated, const std::vector& removed); @@ -50,16 +51,24 @@ class XdsFuzzTest : public HttpIntegrationTest { const size_t RoutesMax = 5; private: - void addListener(std::string listener_name, std::string route_name); - void removeListener(std::string listener_name); - void addRoute(std::string route_name); - void removeRoute(std::string route_name); + void addListener(const std::string& listener_name, const std::string& route_name); + void removeListener(const std::string& listener_name); + void addRoute(const std::string& route_name); - bool eraseListener(std::string listener_name); - bool eraseRoute(std::string route_num); + void verifyState(); + void verifyListeners(); + void verifyRoutes(); + + envoy::admin::v3::ListenersConfigDump getListenersConfigDump(); + std::vector getRoutesConfigDump(); + + bool eraseListener(const std::string& listener_name); + bool hasRoute(const std::string& route_num); AssertionResult waitForAck(const std::string& expected_type_url, const std::string& expected_version); + XdsVerifier verifier_; + Protobuf::RepeatedPtrField actions_; std::vector routes_; std::vector listeners_; @@ -68,10 +77,6 @@ class XdsFuzzTest : public HttpIntegrationTest { envoy::config::core::v3::ApiVersion api_version_; Network::Address::IpVersion ip_version_; - - uint32_t num_added_; - uint32_t num_modified_; - uint32_t num_removed_; }; } // namespace Envoy diff --git a/test/server/config_validation/xds_fuzz.proto b/test/server/config_validation/xds_fuzz.proto index 087ab0d71372..5a672282f946 100644 --- a/test/server/config_validation/xds_fuzz.proto +++ b/test/server/config_validation/xds_fuzz.proto @@ -23,11 +23,6 @@ message RemoveListener { uint32 listener_num = 1; } -message RemoveRoute { - // removes route_y - uint32 route_num = 1; -} - message Action { oneof action_selector { option (validate.required) = true; @@ -35,7 +30,6 @@ message Action { AddListener add_listener = 1; AddRoute add_route = 2; RemoveListener remove_listener = 3; - RemoveRoute remove_route = 4; } } diff --git a/test/server/config_validation/xds_verifier.cc b/test/server/config_validation/xds_verifier.cc new file mode 100644 index 000000000000..c6932a74bf79 --- /dev/null +++ b/test/server/config_validation/xds_verifier.cc @@ -0,0 +1,304 @@ +#include "test/server/config_validation/xds_verifier.h" + +#include "common/common/logger.h" + +namespace Envoy { + +XdsVerifier::XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta) + : num_warming_(0), num_active_(0), num_draining_(0), num_added_(0), num_modified_(0), + num_removed_(0) { + if (sotw_or_delta == test::server::config_validation::Config::SOTW) { + sotw_or_delta_ = SOTW; + } else { + sotw_or_delta_ = DELTA; + } + ENVOY_LOG_MISC(debug, "sotw_or_delta_ = {}", sotw_or_delta_); +} + +/** + * get the route referenced by a listener + */ +std::string XdsVerifier::getRoute(const envoy::config::listener::v3::Listener& listener) { + envoy::config::listener::v3::Filter filter0 = listener.filter_chains()[0].filters()[0]; + envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager conn_man; + filter0.typed_config().UnpackTo(&conn_man); + return conn_man.rds().route_config_name(); +} + +/** + * @return true iff the route listener refers to is in all_routes_ + */ +bool XdsVerifier::hasRoute(const envoy::config::listener::v3::Listener& listener) { + return all_routes_.contains(getRoute(listener)); +} + +bool XdsVerifier::hasActiveRoute(const envoy::config::listener::v3::Listener& listener) { + return active_routes_.contains(getRoute(listener)); +} + +/** + * prints the currently stored listeners and their states + */ +void XdsVerifier::dumpState() { + ENVOY_LOG_MISC(debug, "Listener Dump:"); + for (const auto& rep : listeners_) { + ENVOY_LOG_MISC(debug, "Name: {}, Route {}, State: {}", rep.listener.name(), + getRoute(rep.listener), rep.state); + } +} + +/* + * if a listener is added for the first time, it will be added as active/warming depending on if + * envoy knows about its route config + * + * if a listener is updated (i.e. there is a already a listener by this name), there are 3 cases: + * 1. the old listener is active and the new is warming: + * - old will remain active + * - new will be added as warming, to replace the old when it gets its route + * 2. the old listener is active and new is active: + * - old is drained (seemingly instantaneously) + * - new is added as active + * 3. the old listener is warming and new is active/warming: + * - old is completely removed + * - new is added as warming/active as normal + */ + +/** + * update a listener when its route is changed, draining/removing the old listener and adding the + * updated listener + */ +void XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& listener) { + ENVOY_LOG_MISC(debug, "About to update listener {}", listener.name()); + dumpState(); + + if (std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { + return rep.listener.name() == listener.name() && + getRoute(listener) == getRoute(rep.listener) && rep.state != DRAINING; + })) { + ENVOY_LOG_MISC(debug, "Ignoring duplicate add of {}", listener.name()); + return; + } + + for (unsigned long i = 0; i < listeners_.size(); ++i) { + const auto& rep = listeners_[i]; + if (rep.listener.name() == listener.name()) { + if (rep.state == ACTIVE) { + num_modified_++; + if (hasActiveRoute(listener)) { + // if the new listener is ready to take traffic, the old listener will be removed + // it seems to be directly removed without being added to the config dump as draining + ENVOY_LOG_MISC(debug, "Removing {} after update", listener.name()); + num_active_--; + listeners_.erase(listeners_.begin() + i); + } else { + // if the new listener has not gotten its route yet, the old listener will remain active + // until that happens + ENVOY_LOG_MISC(debug, "Keeping {} as ACTIVE", listener.name()); + } + } else if (rep.state == WARMING) { + // if the old listener is warming, it will be removed and replaced with the new + ENVOY_LOG_MISC(debug, "Removed warming listener {}", listener.name()); + num_warming_--; + listeners_.erase(listeners_.begin() + i); + } + } + } + dumpState(); + listenerAdded(listener, true); +} + +/** + * add a new listener to listeners_ in either an active or warming state + * @param listener the listener to be added + * @param from_update whether this function was called from listenerUpdated, in which case + * num_added_ should not be incremented + */ +void XdsVerifier::listenerAdded(const envoy::config::listener::v3::Listener& listener, + bool from_update) { + if (!from_update) { + num_added_++; + } + + if (hasActiveRoute(listener)) { + ENVOY_LOG_MISC(debug, "Adding {} to listeners_ as ACTIVE", listener.name()); + listeners_.push_back({listener, ACTIVE}); + num_active_++; + } else { + num_warming_++; + ENVOY_LOG_MISC(debug, "Adding {} to listeners_ as WARMING", listener.name()); + listeners_.push_back({listener, WARMING}); + } + + ENVOY_LOG_MISC(debug, "listenerAdded({})", listener.name()); + dumpState(); +} + +/** + * remove a listener and drain it if it was active + * @param name the name of the listener to be removed + */ +void XdsVerifier::listenerRemoved(const std::string& name) { + bool found = false; + for (unsigned long i = 0; i < listeners_.size(); ++i) { + auto& rep = listeners_[i]; + if (rep.listener.name() != name) { + continue; + } + + if (rep.state == ACTIVE) { + // the listener will be drained before being removed + ENVOY_LOG_MISC(debug, "Changing {} to DRAINING", name); + num_removed_++; + num_active_--; + num_draining_++; + rep.state = DRAINING; + } else if (rep.state == WARMING) { + // the listener will be removed immediately + ENVOY_LOG_MISC(debug, "Removed warming listener {}", name); + listeners_.erase(listeners_.begin() + i); + num_warming_--; + } + } + + if (found) { + num_removed_++; + } +} + +/** + * after a SOTW update, see if any listeners that are currently warming can become active + */ +void XdsVerifier::updateSotwListeners() { + ASSERT(sotw_or_delta_ == SOTW); + for (auto& rep : listeners_) { + // check all_routes_, not active_routes_ since this is SOTW, so any inactive routes will become + // active if this listener refers to them + if (hasRoute(rep.listener) && rep.state == WARMING) { + // it should successfully warm now + ENVOY_LOG_MISC(debug, "Moving {} to ACTIVE state", rep.listener.name()); + + // if the route was not originally added as active, change it now + if (!hasActiveRoute(rep.listener)) { + std::string route_name = getRoute(rep.listener); + auto it = all_routes_.find(route_name); + // all added routes should be in all_routes_ in SOTW + ASSERT(it != all_routes_.end()); + active_routes_.insert({route_name, it->second}); + } + + // if there were any active listeners that were waiting to be updated, they will now be + // removed and the warming listener will take their place + markForRemoval(rep); + num_warming_--; + num_active_++; + rep.state = ACTIVE; + } + } + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.state == REMOVED; }), + listeners_.end()); +} + +/** + * after a delta update, update any listeners that refer to the added route + */ +void XdsVerifier::updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route) { + for (auto& rep : listeners_) { + if (getRoute(rep.listener) == route.name() && rep.state == WARMING) { + // it should successfully warm now + ENVOY_LOG_MISC(debug, "Moving {} to ACTIVE state", rep.listener.name()); + + // if there were any active listeners that were waiting to be updated, they will now be + // removed and the warming listener will take their place + markForRemoval(rep); + num_warming_--; + num_active_++; + rep.state = ACTIVE; + } + } + // erase any active listeners that were replaced + listeners_.erase(std::remove_if(listeners_.begin(), listeners_.end(), + [&](auto& listener) { return listener.state == REMOVED; }), + listeners_.end()); +} + +/** + * @param listener a warming listener that has a corresponding active listener of the same name + * called after listener receives its route, so it will be moved to active and the old listener will + * be removed + */ +void XdsVerifier::markForRemoval(ListenerRepresentation& rep) { + ASSERT(rep.state == WARMING); + // find the old listener and mark it for removal + for (auto& old_rep : listeners_) { + if (old_rep.listener.name() == rep.listener.name() && + getRoute(old_rep.listener) != getRoute(rep.listener) && old_rep.state == ACTIVE) { + // mark it as removed to remove it after the loop so as not to invalidate the iterator in + // the caller function + old_rep.state = REMOVED; + /* num_modified_++; */ + num_active_--; + } + } +} + +/** + * called when a route that was previously added is re-added + * the original route might have been ignored if no resources refer to it, so we can add it here + */ +void XdsVerifier::routeUpdated(const envoy::config::route::v3::RouteConfiguration& route) { + if (!all_routes_.contains(route.name()) && + std::any_of(listeners_.begin(), listeners_.end(), + [&](auto& rep) { return getRoute(rep.listener) == route.name(); })) { + all_routes_.insert({route.name(), route}); + active_routes_.insert({route.name(), route}); + } + + ENVOY_LOG_MISC(debug, "Updating {}", route.name()); + if (sotw_or_delta_ == DELTA) { + updateDeltaListeners(route); + } else { + updateSotwListeners(); + } +} + +/** + * add a new route and update any listeners that refer to this route + */ +void XdsVerifier::routeAdded(const envoy::config::route::v3::RouteConfiguration& route) { + // routes that are not referenced by any resource are ignored, so this creates a distinction + // between SOTW and delta + // if an unreferenced route is sent in delta, it is ignored forever as it will not be sent in + // future RDS updates, whereas in SOTW it will be present in all future RDS updates, so if a + // listener that refers to it is added in the meantime, it will become active + + // in delta, active_routes_ and all_routes_ should be the same as we only send one route at a + // time, so it either becomes active or not + if (sotw_or_delta_ == DELTA && std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { + return getRoute(rep.listener) == route.name(); + })) { + active_routes_.insert({route.name(), route}); + all_routes_.insert({route.name(), route}); + updateDeltaListeners(route); + } else if (sotw_or_delta_ == SOTW) { + all_routes_.insert({route.name(), route}); + updateSotwListeners(); + } +} + +/** + * called after draining a listener, will remove it from listeners_ + */ +void XdsVerifier::drainedListener(const std::string& name) { + for (auto it = listeners_.begin(); it != listeners_.end(); ++it) { + if (it->listener.name() == name && it->state == DRAINING) { + ENVOY_LOG_MISC(debug, "Drained and removed {}", name); + num_draining_--; + listeners_.erase(it); + return; + } + } + throw EnvoyException(fmt::format("Tried to drain {} which is not draining", name)); +} + +} // namespace Envoy diff --git a/test/server/config_validation/xds_verifier.h b/test/server/config_validation/xds_verifier.h new file mode 100644 index 000000000000..b1c1c511c26d --- /dev/null +++ b/test/server/config_validation/xds_verifier.h @@ -0,0 +1,81 @@ +#include "envoy/admin/v3/config_dump.pb.h" +#include "envoy/common/exception.h" +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.h" +#include "envoy/config/listener/v3/listener.pb.h" +#include "envoy/config/route/v3/route.pb.h" + +#include "common/common/assert.h" + +#include "test/server/config_validation/xds_fuzz.pb.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { + +class XdsVerifier { +public: + XdsVerifier(test::server::config_validation::Config::SotwOrDelta sotw_or_delta); + void listenerAdded(const envoy::config::listener::v3::Listener& listener, + bool from_update = false); + void listenerUpdated(const envoy::config::listener::v3::Listener& listener); + void listenerRemoved(const std::string& name); + void drainedListener(const std::string& name); + + void routeAdded(const envoy::config::route::v3::RouteConfiguration& route); + void routeUpdated(const envoy::config::route::v3::RouteConfiguration& route); + + enum ListenerState { WARMING, ACTIVE, DRAINING, REMOVED }; + struct ListenerRepresentation { + envoy::config::listener::v3::Listener listener; + ListenerState state; + }; + + const std::vector& listeners() const { return listeners_; } + + const absl::flat_hash_map& + routes() const { + return active_routes_; + }; + + uint32_t numWarming() const { return num_warming_; } + uint32_t numActive() const { return num_active_; } + uint32_t numDraining() const { return num_draining_; } + + uint32_t numAdded() const { return num_added_; } + uint32_t numModified() const { return num_modified_; } + uint32_t numRemoved() const { return num_removed_; } + + void dumpState(); + +private: + enum SotwOrDelta { SOTW, DELTA }; + + std::string getRoute(const envoy::config::listener::v3::Listener& listener); + bool hasRoute(const envoy::config::listener::v3::Listener& listener); + bool hasActiveRoute(const envoy::config::listener::v3::Listener& listener); + void updateSotwListeners(); + void updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route); + void markForRemoval(ListenerRepresentation& rep); + std::vector listeners_; + + // envoy ignores routes that are not referenced by any resources + // all_routes_ is used for SOTW, as every previous route is sent in each request + // active_routes_ holds the routes that envoy knows about, i.e. the routes that are/were + // referenced by a listener + absl::flat_hash_map all_routes_; + absl::flat_hash_map active_routes_; + + uint32_t num_warming_; + uint32_t num_active_; + uint32_t num_draining_; + + uint32_t num_added_; + uint32_t num_modified_; + uint32_t num_removed_; + + SotwOrDelta sotw_or_delta_; +}; + +} // namespace Envoy From 0a0fc51e25599665d772f274c54addf3891ef5c7 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Wed, 22 Jul 2020 17:08:39 -0700 Subject: [PATCH 723/909] stats: Allow configuring histogram buckets for prometheus (#12034) Previously, a hard-coded set of buckets were used, which were not appropriate for the range or required precision of some histograms. Fixes #7599 Signed-off-by: Greg Greenway --- api/envoy/config/metrics/v3/stats.proto | 39 +++++++++ api/envoy/config/metrics/v4alpha/stats.proto | 42 +++++++++ docs/root/version_history/current.rst | 1 + .../envoy/config/metrics/v3/stats.proto | 39 +++++++++ .../envoy/config/metrics/v4alpha/stats.proto | 42 +++++++++ include/envoy/stats/histogram.h | 19 +++- include/envoy/stats/store.h | 7 ++ source/common/config/BUILD | 1 + source/common/config/utility.cc | 6 ++ source/common/config/utility.h | 7 ++ source/common/stats/BUILD | 3 + source/common/stats/histogram_impl.cc | 54 +++++++++--- source/common/stats/histogram_impl.h | 29 ++++++- source/common/stats/thread_local_store.cc | 46 +++++++--- source/common/stats/thread_local_store.h | 5 +- source/server/admin/prometheus_stats.cc | 2 +- source/server/server.cc | 1 + test/common/stats/BUILD | 9 ++ test/common/stats/histogram_impl_test.cc | 86 +++++++++++++++++++ .../metrics_service_integration_test.cc | 3 +- test/integration/BUILD | 1 + test/integration/integration_admin_test.cc | 19 ++++ test/integration/integration_admin_test.h | 11 +++ test/integration/server.h | 1 + test/integration/stats_integration_test.cc | 7 +- test/server/admin/prometheus_stats_test.cc | 46 +++++++--- 26 files changed, 477 insertions(+), 49 deletions(-) create mode 100644 test/common/stats/histogram_impl_test.cc diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index 75c73bd7acc2..1c28a8f61065 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -83,6 +83,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -259,6 +288,16 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a successive bucket. + repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto index d8ced1aca34c..e2c5ae9dc2c7 100644 --- a/api/envoy/config/metrics/v4alpha/stats.proto +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -83,6 +83,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -259,6 +288,19 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HistogramBucketSettings"; + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a successive bucket. + repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 18d56f2994b0..e48bd61afed4 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -49,6 +49,7 @@ New Features retry policy, which allows retrying envoy's own rate limited responses. * stats: added optional histograms to :ref:`cluster stats ` that track headers and body sizes of requests and responses. +* stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. Deprecated diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index 458e0aa60519..4fa05259054e 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -81,6 +81,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -257,6 +286,16 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a successive bucket. + repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto index d8ced1aca34c..e2c5ae9dc2c7 100644 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -83,6 +83,35 @@ message StatsConfig { // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; + + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. @@ -259,6 +288,19 @@ message TagSpecifier { } } +// Specifies a matcher for stats and the buckets that matching stats should use. +message HistogramBucketSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.metrics.v3.HistogramBucketSettings"; + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; + + // Each value is the upper bound of a successive bucket. + repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; +} + // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] diff --git a/include/envoy/stats/histogram.h b/include/envoy/stats/histogram.h index 719a83fea6e2..aa87d59e7360 100644 --- a/include/envoy/stats/histogram.h +++ b/include/envoy/stats/histogram.h @@ -11,6 +11,23 @@ namespace Envoy { namespace Stats { +using ConstSupportedBuckets = const std::vector; + +class HistogramSettings { +public: + virtual ~HistogramSettings() = default; + + /** + * For formats like Prometheus where the entire histogram is published (but not + * like statsd where each value to include in the histogram is emitted separately), + * get the limits for each histogram bucket. + * @return The buckets for the histogram. Each value is an upper bound of a bucket. + */ + virtual ConstSupportedBuckets& buckets(absl::string_view stat_name) const PURE; +}; + +using HistogramSettingsConstPtr = std::unique_ptr; + /** * Holds the computed statistics for a histogram. */ @@ -43,7 +60,7 @@ class HistogramStatistics { * with 0 as the implicit lower bound. For timers, these bucket thresholds * are in milliseconds but the thresholds are applicable to all types of data. */ - virtual const std::vector& supportedBuckets() const PURE; + virtual ConstSupportedBuckets& supportedBuckets() const PURE; /** * Returns computed bucket values during the period. The vector contains an approximation diff --git a/include/envoy/stats/store.h b/include/envoy/stats/store.h index 158f00518a51..191ed0f8589c 100644 --- a/include/envoy/stats/store.h +++ b/include/envoy/stats/store.h @@ -5,6 +5,7 @@ #include #include "envoy/common/pure.h" +#include "envoy/stats/histogram.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_matcher.h" #include "envoy/stats/tag_producer.h" @@ -78,6 +79,12 @@ class StoreRoot : public Store { */ virtual void setStatsMatcher(StatsMatcherPtr&& stats_matcher) PURE; + /** + * Attach a HistogramSettings to this StoreRoot to generate histogram configurations + * according to some ruleset. + */ + virtual void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) PURE; + /** * Initialize the store for threading. This will be called once after all worker threads have * been initialized. At this point the store can initialize itself for multi-threaded operation. diff --git a/source/common/config/BUILD b/source/common/config/BUILD index d30b0542144f..e42e10d7803b 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -361,6 +361,7 @@ envoy_cc_library( "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/common/singleton:const_singleton", + "//source/common/stats:histogram_lib", "//source/common/stats:stats_lib", "//source/common/stats:stats_matcher_lib", "//source/common/stats:tag_producer_lib", diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 65ce74a5150b..6b5499def902 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -20,6 +20,7 @@ #include "common/config/well_known_names.h" #include "common/protobuf/protobuf.h" #include "common/protobuf/utility.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "common/stats/tag_producer_impl.h" @@ -221,6 +222,11 @@ Utility::createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& boots return std::make_unique(bootstrap.stats_config()); } +Stats::HistogramSettingsConstPtr +Utility::createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + return std::make_unique(bootstrap.stats_config()); +} + Grpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource( Grpc::AsyncClientManager& async_client_manager, const envoy::config::core::v3::ApiConfigSource& api_config_source, Stats::Scope& scope, diff --git a/source/common/config/utility.h b/source/common/config/utility.h index ec6c6ac1d80e..09b9e0ea6f37 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -13,6 +13,7 @@ #include "envoy/local_info/local_info.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" +#include "envoy/stats/histogram.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_matcher.h" #include "envoy/stats/tag_producer.h" @@ -330,6 +331,12 @@ class Utility { static Stats::StatsMatcherPtr createStatsMatcher(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); + /** + * Create HistogramSettings instance. + */ + static Stats::HistogramSettingsConstPtr + createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); + /** * Obtain gRPC async client factory from a envoy::api::v2::core::ApiConfigSource. * @param async_client_manager gRPC async client manager. diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 37d4cb76df4f..2a671b4c07ef 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -35,7 +35,9 @@ envoy_cc_library( ":metric_impl_lib", "//source/common/common:assert_lib", "//source/common/common:hash_lib", + "//source/common/common:matchers_lib", "//source/common/common:utility_lib", + "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], ) @@ -249,6 +251,7 @@ envoy_cc_library( hdrs = ["thread_local_store.h"], deps = [ ":allocator_lib", + ":histogram_lib", ":null_counter_lib", ":null_gauge_lib", ":null_text_readout_lib", diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index 3755f17fefc9..da633c4d707a 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -10,8 +10,17 @@ namespace Envoy { namespace Stats { -HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr) - : computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) { +namespace { +const ConstSupportedBuckets default_buckets{}; +} + +HistogramStatisticsImpl::HistogramStatisticsImpl() + : supported_buckets_(default_buckets), computed_quantiles_(supportedQuantiles().size(), 0.0) {} + +HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_ptr, + ConstSupportedBuckets& supported_buckets) + : supported_buckets_(supported_buckets), + computed_quantiles_(HistogramStatisticsImpl::supportedQuantiles().size(), 0.0) { hist_approx_quantile(histogram_ptr, supportedQuantiles().data(), HistogramStatisticsImpl::supportedQuantiles().size(), computed_quantiles_.data()); @@ -19,9 +28,8 @@ HistogramStatisticsImpl::HistogramStatisticsImpl(const histogram_t* histogram_pt sample_count_ = hist_sample_count(histogram_ptr); sample_sum_ = hist_approx_sum(histogram_ptr); - const std::vector& supported_buckets = supportedBuckets(); - computed_buckets_.reserve(supported_buckets.size()); - for (const auto bucket : supported_buckets) { + computed_buckets_.reserve(supported_buckets_.size()); + for (const auto bucket : supported_buckets_) { computed_buckets_.emplace_back(hist_approx_count_below(histogram_ptr, bucket)); } } @@ -31,12 +39,6 @@ const std::vector& HistogramStatisticsImpl::supportedQuantiles() const { {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 0.999, 1}); } -const std::vector& HistogramStatisticsImpl::supportedBuckets() const { - CONSTRUCT_ON_FIRST_USE(std::vector, - {0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, - 60000, 300000, 600000, 1800000, 3600000}); -} - std::string HistogramStatisticsImpl::quantileSummary() const { std::vector summary; const std::vector& supported_quantiles = supportedQuantiles(); @@ -50,7 +52,7 @@ std::string HistogramStatisticsImpl::quantileSummary() const { std::string HistogramStatisticsImpl::bucketSummary() const { std::vector bucket_summary; - const std::vector& supported_buckets = supportedBuckets(); + ConstSupportedBuckets& supported_buckets = supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { bucket_summary.push_back(fmt::format("B{:g}: {}", supported_buckets[i], computed_buckets_[i])); @@ -72,12 +74,38 @@ void HistogramStatisticsImpl::refresh(const histogram_t* new_histogram_ptr) { ASSERT(supportedBuckets().size() == computed_buckets_.size()); computed_buckets_.clear(); - const std::vector& supported_buckets = supportedBuckets(); + ConstSupportedBuckets& supported_buckets = supportedBuckets(); computed_buckets_.reserve(supported_buckets.size()); for (const auto bucket : supported_buckets) { computed_buckets_.emplace_back(hist_approx_count_below(new_histogram_ptr, bucket)); } } +HistogramSettingsImpl::HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config) + : configs_([&config]() { + std::vector configs; + for (const auto& matcher : config.histogram_bucket_settings()) { + configs.emplace_back(matcher.match(), ConstSupportedBuckets{matcher.buckets().begin(), + matcher.buckets().end()}); + } + + return configs; + }()) {} + +const ConstSupportedBuckets& HistogramSettingsImpl::buckets(absl::string_view stat_name) const { + for (const auto& config : configs_) { + if (config.first.match(stat_name)) { + return config.second; + } + } + return defaultBuckets(); +} + +const ConstSupportedBuckets& HistogramSettingsImpl::defaultBuckets() { + CONSTRUCT_ON_FIRST_USE(ConstSupportedBuckets, + {0.5, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000, + 60000, 300000, 600000, 1800000, 3600000}); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index 657bbbdf357c..a58c60fd5fc5 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -3,10 +3,12 @@ #include #include +#include "envoy/config/metrics/v3/stats.pb.h" #include "envoy/stats/histogram.h" #include "envoy/stats/stats.h" #include "envoy/stats/store.h" +#include "common/common/matchers.h" #include "common/common/non_copyable.h" #include "common/stats/metric_impl.h" @@ -15,18 +17,38 @@ namespace Envoy { namespace Stats { +class HistogramSettingsImpl : public HistogramSettings { +public: + HistogramSettingsImpl() = default; + HistogramSettingsImpl(const envoy::config::metrics::v3::StatsConfig& config); + + // HistogramSettings + const ConstSupportedBuckets& buckets(absl::string_view stat_name) const override; + + static ConstSupportedBuckets& defaultBuckets(); + +private: + using Config = std::pair; + const std::vector configs_{}; +}; + /** * Implementation of HistogramStatistics for circllhist. */ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { public: - HistogramStatisticsImpl() : computed_quantiles_(supportedQuantiles().size(), 0.0) {} + HistogramStatisticsImpl(); + /** * HistogramStatisticsImpl object is constructed using the passed in histogram. * @param histogram_ptr pointer to the histogram for which stats will be calculated. This pointer * will not be retained. */ - HistogramStatisticsImpl(const histogram_t* histogram_ptr); + HistogramStatisticsImpl( + const histogram_t* histogram_ptr, + ConstSupportedBuckets& supported_buckets = HistogramSettingsImpl::defaultBuckets()); + + static ConstSupportedBuckets& defaultSupportedBuckets(); void refresh(const histogram_t* new_histogram_ptr); @@ -35,12 +57,13 @@ class HistogramStatisticsImpl : public HistogramStatistics, NonCopyable { std::string bucketSummary() const override; const std::vector& supportedQuantiles() const final; const std::vector& computedQuantiles() const override { return computed_quantiles_; } - const std::vector& supportedBuckets() const override; + ConstSupportedBuckets& supportedBuckets() const override { return supported_buckets_; } const std::vector& computedBuckets() const override { return computed_buckets_; } uint64_t sampleCount() const override { return sample_count_; } double sampleSum() const override { return sample_sum_; } private: + ConstSupportedBuckets& supported_buckets_; std::vector computed_quantiles_; std::vector computed_buckets_; uint64_t sample_count_; diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 2b7574e42484..697760ed1a4b 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -12,6 +12,7 @@ #include "envoy/stats/stats.h" #include "common/common/lock_guard.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "common/stats/tag_producer_impl.h" #include "common/stats/tag_utility.h" @@ -24,11 +25,13 @@ namespace Stats { const char ThreadLocalStoreImpl::MainDispatcherCleanupSync[] = "main-dispatcher-cleanup"; ThreadLocalStoreImpl::ThreadLocalStoreImpl(Allocator& alloc) - : alloc_(alloc), default_scope_(createScope("")), + : alloc_(alloc), default_scope_(ThreadLocalStoreImpl::createScope("")), tag_producer_(std::make_unique()), - stats_matcher_(std::make_unique()), heap_allocator_(alloc.symbolTable()), - null_counter_(alloc.symbolTable()), null_gauge_(alloc.symbolTable()), - null_histogram_(alloc.symbolTable()), null_text_readout_(alloc.symbolTable()), + stats_matcher_(std::make_unique()), + histogram_settings_(std::make_unique()), + heap_allocator_(alloc.symbolTable()), null_counter_(alloc.symbolTable()), + null_gauge_(alloc.symbolTable()), null_histogram_(alloc.symbolTable()), + null_text_readout_(alloc.symbolTable()), well_known_tags_(alloc.symbolTable().makeSet("well_known_tags")) { for (const auto& desc : Config::TagNames::get().descriptorVec()) { well_known_tags_->rememberBuiltin(desc.name_); @@ -41,6 +44,14 @@ ThreadLocalStoreImpl::~ThreadLocalStoreImpl() { ASSERT(scopes_.empty()); } +void ThreadLocalStoreImpl::setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) { + Thread::LockGuard lock(lock_); + for (ScopeImpl* scope : scopes_) { + ASSERT(scope->central_cache_->histograms_.empty()); + } + histogram_settings_ = std::move(histogram_settings); +} + void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { stats_matcher_ = std::move(stats_matcher); if (stats_matcher_->acceptsAll()) { @@ -274,8 +285,8 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, ThreadLocalStoreImpl::ScopeImpl::ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix) : scope_id_(parent.next_scope_id_++), parent_(parent), - prefix_(Utility::sanitizeStatsName(prefix), parent.symbolTable()), - central_cache_(new CentralCacheEntry(parent.symbolTable())) {} + prefix_(Utility::sanitizeStatsName(prefix), parent.alloc_.symbolTable()), + central_cache_(new CentralCacheEntry(parent.alloc_.symbolTable())) {} ThreadLocalStoreImpl::ScopeImpl::~ScopeImpl() { parent_.releaseScopeCrossThread(this); @@ -550,9 +561,14 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( } else { StatNameTagHelper tag_helper(parent_, joiner.tagExtractedName(), stat_name_tags); - RefcountPtr stat( - new ParentHistogramImpl(final_stat_name, unit, parent_, *this, - tag_helper.tagExtractedName(), tag_helper.statNameTags())); + ConstSupportedBuckets* buckets = nullptr; + symbolTable().callWithStringView(final_stat_name, + [&buckets, this](absl::string_view stat_name) { + buckets = &parent_.histogram_settings_->buckets(stat_name); + }); + RefcountPtr stat(new ParentHistogramImpl( + final_stat_name, unit, parent_, *this, tag_helper.tagExtractedName(), + tag_helper.statNameTags(), *buckets)); central_ref = ¢ral_cache_->histograms_[stat->statName()]; *central_ref = stat; } @@ -685,14 +701,16 @@ void ThreadLocalHistogramImpl::merge(histogram_t* target) { ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, TlsScope& tls_scope, StatName tag_extracted_name, - const StatNameTagVector& stat_name_tags) + const StatNameTagVector& stat_name_tags, + ConstSupportedBuckets& supported_buckets) : MetricImpl(name, tag_extracted_name, stat_name_tags, parent.symbolTable()), unit_(unit), parent_(parent), tls_scope_(tls_scope), interval_histogram_(hist_alloc()), - cumulative_histogram_(hist_alloc()), interval_statistics_(interval_histogram_), - cumulative_statistics_(cumulative_histogram_), merged_(false) {} + cumulative_histogram_(hist_alloc()), + interval_statistics_(interval_histogram_, supported_buckets), + cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false) {} ParentHistogramImpl::~ParentHistogramImpl() { - MetricImpl::clear(symbolTable()); + MetricImpl::clear(parent_.symbolTable()); hist_free(interval_histogram_); hist_free(cumulative_histogram_); } @@ -749,7 +767,7 @@ const std::string ParentHistogramImpl::quantileSummary() const { const std::string ParentHistogramImpl::bucketSummary() const { if (used()) { std::vector bucket_summary; - const std::vector& supported_buckets = interval_statistics_.supportedBuckets(); + ConstSupportedBuckets& supported_buckets = interval_statistics_.supportedBuckets(); bucket_summary.reserve(supported_buckets.size()); for (size_t i = 0; i < supported_buckets.size(); ++i) { bucket_summary.push_back(fmt::format("B{:g}({},{})", supported_buckets[i], diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index bf57eed14d95..23ce40e5fc15 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -82,7 +82,8 @@ class TlsScope; class ParentHistogramImpl : public MetricImpl { public: ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, TlsScope& tls_scope, - StatName tag_extracted_name, const StatNameTagVector& stat_name_tags); + StatName tag_extracted_name, const StatNameTagVector& stat_name_tags, + ConstSupportedBuckets& supported_buckets); ~ParentHistogramImpl() override; void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr); @@ -259,6 +260,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo tag_producer_ = std::move(tag_producer); } void setStatsMatcher(StatsMatcherPtr&& stats_matcher) override; + void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) override; void initializeThreading(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::Instance& tls) override; void shutdownThreading() override; @@ -475,6 +477,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::list> timer_sinks_; TagProducerPtr tag_producer_; StatsMatcherPtr stats_matcher_; + HistogramSettingsConstPtr histogram_settings_; std::atomic threading_ever_initialized_{}; std::atomic shutting_down_{}; std::atomic merge_in_progress_{}; diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc index dc4eee79e7c3..a82d59878d93 100644 --- a/source/server/admin/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -151,7 +151,7 @@ std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + ","); const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics(); - const std::vector& supported_buckets = stats.supportedBuckets(); + Stats::ConstSupportedBuckets& supported_buckets = stats.supportedBuckets(); const std::vector& computed_buckets = stats.computedBuckets(); std::string output; for (size_t i = 0; i < supported_buckets.size(); ++i) { diff --git a/source/server/server.cc b/source/server/server.cc index b71fd650f295..f192817978e0 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -324,6 +324,7 @@ void InstanceImpl::initialize(const Options& options, // stats. stats_store_.setTagProducer(Config::Utility::createTagProducer(bootstrap_)); stats_store_.setStatsMatcher(Config::Utility::createStatsMatcher(bootstrap_)); + stats_store_.setHistogramSettings(Config::Utility::createHistogramSettings(bootstrap_)); const std::string server_stats_prefix = "server."; server_stats_ = std::make_unique( diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index e7c6ebbb01d7..a01eb1a1ae26 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -33,6 +33,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "histogram_impl_test", + srcs = ["histogram_impl_test.cc"], + deps = [ + "//source/common/stats:histogram_lib", + "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "metric_impl_test", srcs = ["metric_impl_test.cc"], diff --git a/test/common/stats/histogram_impl_test.cc b/test/common/stats/histogram_impl_test.cc new file mode 100644 index 000000000000..3cbde4b280d0 --- /dev/null +++ b/test/common/stats/histogram_impl_test.cc @@ -0,0 +1,86 @@ +#include "envoy/config/metrics/v3/stats.pb.h" + +#include "common/stats/histogram_impl.h" + +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Stats { + +class HistogramSettingsImplTest : public testing::Test { +public: + void initialize() { + envoy::config::metrics::v3::StatsConfig config; + auto& bucket_settings = *config.mutable_histogram_bucket_settings(); + for (auto& item : buckets_configs_) { + bucket_settings.Add(std::move(item)); + } + settings_ = std::make_unique(config); + } + + std::vector buckets_configs_; + std::unique_ptr settings_; +}; + +// Test that a matching stat returns the configured buckets, and a non-matching +// stat returns the defaults. +TEST_F(HistogramSettingsImplTest, Basic) { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(0.1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + + initialize(); + EXPECT_EQ(settings_->buckets("test"), settings_->defaultBuckets()); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({0.1, 2})); +} + +// Test that only matching configurations are applied. +TEST_F(HistogramSettingsImplTest, Matching) { + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + } + + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("b"); + setting.mutable_buckets()->Add(3); + setting.mutable_buckets()->Add(4); + buckets_configs_.push_back(setting); + } + + initialize(); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({1, 2})); + EXPECT_EQ(settings_->buckets("bcde"), ConstSupportedBuckets({3, 4})); +} + +// Test that earlier configs take precedence over later configs when both match. +TEST_F(HistogramSettingsImplTest, Priority) { + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("a"); + setting.mutable_buckets()->Add(1); + setting.mutable_buckets()->Add(2); + buckets_configs_.push_back(setting); + } + + { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_prefix("ab"); + setting.mutable_buckets()->Add(3); + setting.mutable_buckets()->Add(4); + } + + initialize(); + EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({1, 2})); +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index a15de2201aed..560b52552d3f 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -110,9 +110,8 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio if (metrics_family.name() == "cluster.cluster_0.upstream_rq_time" && metrics_family.type() == ::io::prometheus::client::MetricType::HISTOGRAM) { known_histogram_exists = true; - Stats::HistogramStatisticsImpl empty_statistics; EXPECT_EQ(metrics_family.metric(0).histogram().bucket_size(), - empty_statistics.supportedBuckets().size()); + Stats::HistogramSettingsImpl::defaultBuckets().size()); } ASSERT(metrics_family.metric(0).has_timestamp_ms()); if (known_counter_exists && known_gauge_exists && known_histogram_exists) { diff --git a/test/integration/BUILD b/test/integration/BUILD index d753260c31f0..e89acf52eded 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -471,6 +471,7 @@ envoy_cc_test( deps = [ ":http_protocol_integration_lib", "//include/envoy/http:header_map_interface", + "//source/common/stats:histogram_lib", "//source/common/stats:stats_matcher_lib", "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/health_check:config", diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 7dec5deabe8f..cbfa1ef8559c 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -12,6 +12,7 @@ #include "common/common/fmt.h" #include "common/config/api_version.h" #include "common/profiler/profiler.h" +#include "common/stats/histogram_impl.h" #include "common/stats/stats_matcher_impl.h" #include "test/common/stats/stat_test_utility.h" @@ -226,6 +227,24 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_THAT(response->body(), HasSubstr("envoy_cluster_upstream_cx_active{envoy_cluster_name=\"cluster_0\"} 0\n")); + // Test that a specific bucket config is applied. Buckets 0-3 (inclusive) are set in initialize(). + for (int i = 0; i <= 3; i++) { + EXPECT_THAT( + response->body(), + HasSubstr(fmt::format("envoy_cluster_upstream_cx_connect_ms_bucket{{envoy_cluster_name=" + "\"cluster_0\",le=\"{}\"}} 0\n", + i))); + } + + // Test that other histograms use the default buckets. + for (double bucket : Stats::HistogramSettingsImpl::defaultBuckets()) { + EXPECT_THAT( + response->body(), + HasSubstr(fmt::format("envoy_cluster_upstream_cx_length_ms_bucket{{envoy_cluster_name=" + "\"cluster_0\",le=\"{0:.32g}\"}} 0\n", + bucket))); + } + EXPECT_EQ("200", request("admin", "GET", "/stats/prometheus", response)); EXPECT_THAT( response->body(), diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index b84ef6b83faa..dc4b38be7fdc 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -16,6 +16,17 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.addConfigModifier( + [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto& hist_settings = + *bootstrap.mutable_stats_config()->mutable_histogram_bucket_settings(); + envoy::config::metrics::v3::HistogramBucketSettings* setting = hist_settings.Add(); + setting->mutable_match()->set_suffix("upstream_cx_connect_ms"); + setting->mutable_buckets()->Add(0); + setting->mutable_buckets()->Add(1); + setting->mutable_buckets()->Add(2); + setting->mutable_buckets()->Add(3); + }); HttpIntegrationTest::initialize(); } diff --git a/test/integration/server.h b/test/integration/server.h index 55149d0b6c16..1197aa3e8364 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -351,6 +351,7 @@ class TestIsolatedStoreImpl : public StoreRoot { void addSink(Sink&) override {} void setTagProducer(TagProducerPtr&&) override {} void setStatsMatcher(StatsMatcherPtr&&) override {} + void setHistogramSettings(HistogramSettingsConstPtr&&) override {} void initializeThreading(Event::Dispatcher&, ThreadLocal::Instance&) override {} void shutdownThreading() override {} void mergeHistograms(PostMergeCb) override {} diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 8d4fe14d4d41..23e57c38d341 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -286,6 +286,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/07/15 11748 45003 46000 Stream error on invalid messaging // 2020/07/20 11559 44747 46000 stats: add histograms for request/response headers // and body sizes. + // 2020/07/21 12034 44811 46000 Add configurable histogram buckets. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -303,7 +304,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 44747); + EXPECT_MEMORY_EQ(m_per_cluster, 44811); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -356,11 +357,11 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/05/20 11223 36603 36800 Add primary clusters tracking to cluster manager. // 2020/06/10 11561 36603 36923 Make upstreams pluggable // 2020/06/29 11751 36827 38000 Improve time complexity of removing callback handle. - // in callback manager. // 2020/07/07 11252 37083 38000 Introduce Least Request LB active request bias config // 2020/07/15 11748 37115 38000 Stream error on invalid messaging // 2020/07/20 11559 36859 38000 stats: add histograms for request/response headers // and body sizes. + // 2020/07/21 12034 36923 38000 Add configurable histogram buckets. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -378,7 +379,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 36859); + EXPECT_MEMORY_EQ(m_per_cluster, 36923); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 2338c6da64ef..ee0cae35a0c3 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -6,6 +6,7 @@ #include "test/test_common/utility.h" using testing::NiceMock; +using testing::ReturnRef; namespace Envoy { namespace Server { @@ -194,8 +195,7 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); auto histogram = makeHistogram("histogram1", {}); - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); addHistogram(histogram); @@ -233,6 +233,34 @@ envoy_histogram1_count{} 0 EXPECT_EQ(expected_output, response.toString()); } +TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::ConstSupportedBuckets buckets{10, 20}; + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram(), buckets); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="10"} 0 +envoy_histogram1_bucket{le="20"} 0 +envoy_histogram1_bucket{le="+Inf"} 0 +envoy_histogram1_sum{} 0 +envoy_histogram1_count{} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { HistogramWrapper h1_cumulative; @@ -246,8 +274,7 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); auto histogram = makeHistogram("histogram1", {}); - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + ON_CALL(*histogram, cumulativeStatistics()).WillByDefault(ReturnRef(h1_cumulative_statistics)); addHistogram(histogram); @@ -305,8 +332,7 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { {makeStat("key2"), makeStat("value2")}}); histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, @@ -379,7 +405,7 @@ TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; addHistogram(histogram1); EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + .WillOnce(ReturnRef(h1_cumulative_statistics)); } } @@ -569,8 +595,7 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { {makeStat("key2"), makeStat("value2")}}); histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, @@ -631,8 +656,7 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { { const bool used_only = false; - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, From e1d7d8e14cfec79d9d1732ed60db8196e1869a8d Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 22 Jul 2020 19:40:25 -0700 Subject: [PATCH 724/909] stats: disable EXPECT_EQ in memory integration test (#12239) Until a real solution for https://github.com/envoyproxy/envoy/issues/12209 is found.: Signed-off-by: Matt Klein --- test/integration/stats_integration_test.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 23e57c38d341..deab7fc53c00 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -304,7 +304,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 44811); + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 44811); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -379,7 +380,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 36923); + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 36923); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } @@ -426,7 +428,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_host, 1380); + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_host, 1380); } EXPECT_MEMORY_LE(m_per_host, 1800); // Round up to allow platform variations. } From 2539e0b7878e84b2f28b9766be7de120c3246580 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 23 Jul 2020 13:05:12 +0100 Subject: [PATCH 725/909] fuzz: fix crash into filter fuzz test (#12224) Commit Message: Fix crash in filter fuzz test due to validation not running until after cleaning since the input config message contains an Any field, validation is not run on it, so I added a check to make sure that there was at least one sink before trying to deference Fixes: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=24301 Signed-off-by: Sam Flattery --- ...zz-testcase-minimized-filter_fuzz_test-5726031248621568 | 7 +++++++ test/extensions/filters/http/common/fuzz/uber_filter.cc | 2 +- .../extensions/filters/http/common/fuzz/uber_per_filter.cc | 6 +++++- 3 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 new file mode 100644 index 000000000000..a3aa016972ed --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\n\002\022\000" + } +} diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 8052888886e5..da0e353d1b20 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -195,7 +195,7 @@ void UberFilterFuzzer::fuzz( Server::Configuration::NamedHttpFilterConfigFactory>(proto_config.name()); ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( proto_config, factory_context_.messageValidationVisitor(), factory); - // Clean-up config with filter-specific logic. + // Clean-up config with filter-specific logic before it runs through validations. cleanFuzzedConfig(proto_config.name(), message.get()); cb_ = factory.createFilterFactoryFromProto(*message, "stats", factory_context_); cb_(filter_callback_); diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 1abdf751940b..d816d5a26ab0 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -104,9 +104,13 @@ void cleanTapConfig(Protobuf::Message* message) { true); } // TODO(samflattery): remove once StreamingGrpcSink is implemented + // a static config filter is required to have one sink, but since validation isn't performed on + // the filter until after this function runs, we have to manually check that there are sinks + // before checking that they are not StreamingGrpc else if (config.common_config().config_type_case() == envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase:: kStaticConfig && + !config.common_config().static_config().output_config().sinks().empty() && config.common_config() .static_config() .output_config() @@ -129,7 +133,7 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, } else if (name == HttpFilterNames::get().Squash) { cleanAttachmentTemplate(message); } else if (name == HttpFilterNames::get().Tap) { - // TapDS oneof field not implemented. + // TapDS oneof field and OutputSinkType StreamingGrpc not implemented cleanTapConfig(message); } if (filter_name == HttpFilterNames::get().JwtAuthn) { From 142b27533ba110afc13651bd75d05b4e25fa1c0d Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 23 Jul 2020 06:00:06 -0700 Subject: [PATCH 726/909] test: fix admin drain flake (#12243) The stat is incremented before the socket close. Risk Level: None Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Matt Klein --- .../integration/drain_close_integration_test.cc | 10 ++-------- test/integration/http_integration.cc | 5 +---- test/integration/integration.cc | 17 +++++++++++++++++ test/integration/integration.h | 4 ++++ 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index a702e94068d8..51d6e95de1a2 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -126,10 +126,7 @@ TEST_P(DrainCloseIntegrationTest, AdminGracefulDrain) { EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - http_port), - nullptr, true)); + ASSERT_TRUE(waitForPortAvailable(http_port)); } TEST_P(DrainCloseIntegrationTest, RepeatedAdminGracefulDrain) { @@ -169,10 +166,7 @@ TEST_P(DrainCloseIntegrationTest, RepeatedAdminGracefulDrain) { EXPECT_EQ(admin_response->headers().Status()->value().getStringView(), "200"); test_server_->waitForCounterEq("listener_manager.listener_stopped", 1); - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - http_port), - nullptr, true)); + ASSERT_TRUE(waitForPortAvailable(http_port)); } INSTANTIATE_TEST_SUITE_P(Protocols, DrainCloseIntegrationTest, diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index b3ceabccef0e..a2d317be5637 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1253,10 +1253,7 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecClient::Type admin_request_t // This does not work for HTTP/3 because the port is not closed until the listener is completely // destroyed. TODO(danzh) Match TCP behavior as much as possible. if (downstreamProtocol() != Http::CodecClient::Type::HTTP3) { - EXPECT_NO_THROW(Network::TcpListenSocket( - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - http_port), - nullptr, true)); + ASSERT_TRUE(waitForPortAvailable(http_port)); } } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index 2e4b846870e4..f471ca50bbb8 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -694,6 +694,23 @@ AssertionResult compareSets(const std::set& set1, const std::set& expected_resource_subscriptions, diff --git a/test/integration/integration.h b/test/integration/integration.h index d345b5051095..6f3825c054d8 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -241,6 +241,10 @@ class BaseIntegrationTest : protected Logger::Loggable { void createXdsConnection(); void cleanUpXdsConnection(); + // See if a port can be successfully bound within the given timeout. + ABSL_MUST_USE_RESULT AssertionResult waitForPortAvailable( + uint32_t port, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + // Helpers for setting up expectations and making the internal gears turn for xDS request/response // sending/receiving to/from the (imaginary) xDS server. You should almost always use // compareDiscoveryRequest() and sendDiscoveryResponse(), but the SotW/delta-specific versions are From 21e41f345bf11a7225dcf149a2c1b65de0d08dd9 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 23 Jul 2020 10:36:58 -0700 Subject: [PATCH 727/909] http: restore previous HeaderMap::remove() behavior (#12244) https://github.com/envoyproxy/envoy/pull/12160 changed the behavior of remove() to not first look in the inline header map for a header. This is a subtle change in behavior that specifically breaks attempting to remove ":authority" via the "host" mapping. This restores that behavior and is thus a bug fix and low risk. Signed-off-by: Matt Klein --- source/common/http/header_map_impl.cc | 15 +++++++++++---- test/common/http/header_map_impl_test.cc | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index c97d51e810f1..ce63493486b7 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -508,10 +508,17 @@ size_t HeaderMapImpl::removeIf(const HeaderMap::HeaderMatchPredicate& predicate) } size_t HeaderMapImpl::remove(const LowerCaseString& key) { - // TODO(mattklein123): When the lazy map is implemented we can stop using removeIf() here. - return HeaderMapImpl::removeIf([&key](const HeaderEntry& entry) -> bool { - return key.get() == entry.key().getStringView(); - }); + auto lookup = staticLookup(key.get()); + if (lookup.has_value()) { + const size_t old_size = headers_.size(); + removeInline(lookup.value().entry_); + return old_size - headers_.size(); + } else { + // TODO(mattklein123): When the lazy map is implemented we can stop using removeIf() here. + return HeaderMapImpl::removeIf([&key](const HeaderEntry& entry) -> bool { + return key.get() == entry.key().getStringView(); + }); + } } size_t HeaderMapImpl::removePrefix(const LowerCaseString& prefix) { diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index 2862ae564c20..ad9d1ccb912c 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -520,6 +520,20 @@ TEST(HeaderMapImplTest, Remove) { EXPECT_EQ(0UL, headers.remove(Headers::get().ContentLength)); } +TEST(HeaderMapImplTest, RemoveHost) { + TestRequestHeaderMapImpl headers; + headers.setHost("foo"); + EXPECT_EQ("foo", headers.get_("host")); + EXPECT_EQ("foo", headers.get_(":authority")); + // Make sure that when we remove by "host" without using the inline functions, the mapping to + // ":authority" still takes place. + // https://github.com/envoyproxy/envoy/pull/12160 + EXPECT_EQ(1UL, headers.remove("host")); + EXPECT_EQ("", headers.get_("host")); + EXPECT_EQ("", headers.get_(":authority")); + EXPECT_EQ(nullptr, headers.Host()); +} + TEST(HeaderMapImplTest, RemoveIf) { LowerCaseString key1 = LowerCaseString("X-postfix-foo"); LowerCaseString key2 = LowerCaseString("X-postfix-"); From 69816f3c5b7704f520271020e7c45c8c6d7f7384 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Jul 2020 14:33:50 -0400 Subject: [PATCH 728/909] test: fixing a double close bug (#12255) Signed-off-by: Alyssa Wilk --- test/integration/fake_upstream.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index a6c94d91a0c4..68f4ab559bdb 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -318,6 +318,9 @@ FakeHttpConnection::FakeHttpConnection( } AssertionResult FakeConnectionBase::close(std::chrono::milliseconds timeout) { + if (!shared_connection_.connected()) { + return AssertionSuccess(); + } return shared_connection_.executeOnDispatcher( [](Network::Connection& connection) { connection.close(Network::ConnectionCloseType::FlushWrite); From 8d4e7053aeb8a379d05b4b202cd8c4e54a24dd10 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 23 Jul 2020 17:28:52 -0400 Subject: [PATCH 729/909] http: tighten handling of non-{100,101} 1xx and multiple 100 headers. (#12037) This PR provides intentional semantics for: Multiple-100 headers; these are always coalesced to a single 100 header when proxying. 101 headers: these are always passed to decodeHeaders(). Non-{100,101} 1xx headers, e.g. 102/103, are always ignored by Envoy. UpstreamRequest is responsible for guaranteeing that HCM observes the above properties. Codecs should pass 100 headers to decode100ContinueHeaders and all other 1xx to decodeHeaders. This PR follows the discussion in #11433 (comment). Risk level: Medium Testing: Unit tests for codecs and UpstreamRequest added. Integration tests for non-101 and multiple 101 headers added. Fixes OSS-fuzz issue https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=21628. Fixes #11433 Signed-off-by: Harvey Tuch --- .../http/http_conn_man/stats.rst | 1 - docs/root/version_history/current.rst | 1 + include/envoy/http/filter.h | 16 +++- source/common/http/conn_manager_impl.cc | 5 + source/common/http/http1/codec_impl.cc | 26 +++-- source/common/http/http1/codec_impl.h | 4 +- source/common/http/http1/codec_impl_legacy.cc | 24 +++-- source/common/http/http1/codec_impl_legacy.h | 4 +- source/common/http/http2/codec_impl.cc | 48 ++++------ source/common/http/http2/codec_impl.h | 14 +-- source/common/http/http2/codec_impl_legacy.cc | 48 ++++------ source/common/http/http2/codec_impl_legacy.h | 14 +-- source/common/http/http2/codec_stats.h | 1 - source/common/router/router.cc | 12 ++- source/common/router/router.h | 4 +- source/common/router/upstream_request.cc | 18 +++- ...nn_manager_impl_fuzz_test-5701624673861632 | 30 ++++++ .../http/conn_manager_impl_fuzz_test.cc | 13 ++- test/common/http/http1/codec_impl_test.cc | 48 +++++++++- test/common/http/http2/codec_impl_test.cc | 94 ++++++++++++++----- test/common/router/BUILD | 9 ++ test/common/router/router_test.cc | 52 ++++++++++ test/common/router/upstream_request_test.cc | 48 ++++++++++ test/extensions/upstreams/http/tcp/BUILD | 2 +- .../http/tcp/upstream_request_test.cc | 71 +------------- test/integration/http_integration.cc | 19 +++- test/integration/http_integration.h | 5 +- test/integration/integration_test.cc | 6 +- test/integration/protocol_integration_test.cc | 16 +++- test/mocks/router/BUILD | 12 +++ test/mocks/router/router_filter_interface.cc | 29 ++++++ test/mocks/router/router_filter_interface.h | 63 +++++++++++++ 32 files changed, 543 insertions(+), 214 deletions(-) create mode 100644 test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 create mode 100644 test/common/router/upstream_request_test.cc create mode 100644 test/mocks/router/router_filter_interface.cc create mode 100644 test/mocks/router/router_filter_interface.h diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index 2210bfc6dd5c..b8d4bf23591f 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -136,7 +136,6 @@ All http2 statistics are rooted at *http2.* requests_rejected_with_underscores_in_headers, Counter, Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the :ref:`headers_with_underscores_action config setting `. rx_messaging_error, Counter, Total number of invalid received frames that violated `section 8 `_ of the HTTP/2 spec. This will result in a *tx_reset* rx_reset, Counter, Total number of reset stream frames received by Envoy - too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers trailers, Counter, Total number of trailers seen on requests coming from downstream tx_flush_timeout, Counter, Total number of :ref:`stream idle timeouts ` waiting for open stream window to flush the remainder of a stream tx_reset, Counter, Total number of reset stream frames transmitted by Envoy diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e48bd61afed4..e2fcea52f2c2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -12,6 +12,7 @@ Minor Behavior Changes * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. * http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index a7967982ffab..cee23a153616 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -359,9 +359,10 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { /** * Called with 100-Continue headers to be encoded. * - * This is not folded into encodeHeaders because most Envoy users and filters - * will not be proxying 100-continue and with it split out, can ignore the - * complexity of multiple encodeHeaders calls. + * This is not folded into encodeHeaders because most Envoy users and filters will not be proxying + * 100-continue and with it split out, can ignore the complexity of multiple encodeHeaders calls. + * + * This must not be invoked more than once per request. * * @param headers supplies the headers to be encoded. */ @@ -373,6 +374,9 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks { * The connection manager inspects certain pseudo headers that are not actually sent downstream. * - See source/common/http/headers.h * + * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final + * encodeHeaders() for a response. + * * @param headers supplies the headers to be encoded. * @param end_stream supplies whether this is a header only request/response. */ @@ -718,6 +722,8 @@ class StreamEncoderFilter : public StreamFilterBase { * will not be proxying 100-continue and with it split out, can ignore the * complexity of multiple encodeHeaders calls. * + * This will only be invoked once per request. + * * @param headers supplies the 100-continue response headers to be encoded. * @return FilterHeadersStatus determines how filter chain iteration proceeds. * @@ -726,6 +732,10 @@ class StreamEncoderFilter : public StreamFilterBase { /** * Called with headers to be encoded, optionally indicating end of stream. + * + * The only 1xx that may be provided to encodeHeaders() is a 101 upgrade, which will be the final + * encodeHeaders() for a response. + * * @param headers supplies the headers to be encoded. * @param end_stream supplies whether this is a header only request/response. * @return FilterHeadersStatus determines how filter chain iteration proceeds. diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index c633ff28ca68..78cfb69a599e 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1642,6 +1642,8 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { resetIdleTimer(); ASSERT(connection_manager_.config_.proxy100Continue()); + // The caller must guarantee that encode100ContinueHeaders() is invoked at most once. + ASSERT(!state_.has_continue_headers_ || filter != nullptr); // Make sure commonContinue continues encode100ContinueHeaders. state_.has_continue_headers_ = true; @@ -1694,6 +1696,9 @@ void ConnectionManagerImpl::ActiveStream::maybeContinueEncoding( void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream) { + // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds. + ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) || + Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols)); resetIdleTimer(); disarmRequestTimeout(); diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index d7be4e2ac897..596540e56a66 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -1088,6 +1088,8 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode } int ClientConnectionImpl::onHeadersComplete() { + ENVOY_CONN_LOG(trace, "status_code {}", connection_, parser_.status_code); + // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. @@ -1133,20 +1135,24 @@ int ClientConnectionImpl::onHeadersComplete() { } } - if (parser_.status_code == 100) { - // http-parser treats 100 continue headers as their own complete response. - // Swallow the spurious onMessageComplete and continue processing. - ignore_message_complete_for_100_continue_ = true; + if (parser_.status_code == enumToInt(Http::Code::Continue)) { pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); - - // Reset to ensure no information from the continue headers is used for the response headers - // in case the callee does not move the headers out. - headers_or_trailers_.emplace(nullptr); } else if (cannotHaveBody() && !handling_upgrade_) { deferred_end_stream_headers_ = true; } else { pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); } + + // http-parser treats 1xx headers as their own complete response. Swallow the spurious + // onMessageComplete and continue processing for purely informational headers. + // 101-SwitchingProtocols is exempt as all data after the header is proxied through after + // upgrading. + if (CodeUtility::is1xx(parser_.status_code) && + parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) { + ignore_message_complete_for_1xx_ = true; + // Reset to ensure no information from the 1xx headers is used for the response headers. + headers_or_trailers_.emplace(nullptr); + } } // Here we deal with cases where the response cannot have a body, but http_parser does not deal @@ -1171,8 +1177,8 @@ void ClientConnectionImpl::onBody(Buffer::Instance& data) { void ClientConnectionImpl::onMessageComplete() { ENVOY_CONN_LOG(trace, "message complete", connection_); - if (ignore_message_complete_for_100_continue_) { - ignore_message_complete_for_100_continue_ = false; + if (ignore_message_complete_for_1xx_) { + ignore_message_complete_for_1xx_ = false; return; } if (pending_response_.has_value()) { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index f21231d71fa5..c74c0adae87c 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -584,8 +584,8 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // the response is complete. The existence of this variable is hard to reason about and it should // be combined with pending_response_ somehow in a follow up cleanup. bool pending_response_done_{true}; - // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. - bool ignore_message_complete_for_100_continue_{}; + // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_1xx_{}; // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are // populated on message begin. Trailers are populated when the switch to trailer processing is diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc index 82976e869ff4..cabba6db5dda 100644 --- a/source/common/http/http1/codec_impl_legacy.cc +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -1138,20 +1138,24 @@ int ClientConnectionImpl::onHeadersComplete() { } } - if (parser_.status_code == 100) { - // http-parser treats 100 continue headers as their own complete response. - // Swallow the spurious onMessageComplete and continue processing. - ignore_message_complete_for_100_continue_ = true; + if (parser_.status_code == enumToInt(Http::Code::Continue)) { pending_response_.value().decoder_->decode100ContinueHeaders(std::move(headers)); - - // Reset to ensure no information from the continue headers is used for the response headers - // in case the callee does not move the headers out. - headers_or_trailers_.emplace(nullptr); } else if (cannotHaveBody() && !handling_upgrade_) { deferred_end_stream_headers_ = true; } else { pending_response_.value().decoder_->decodeHeaders(std::move(headers), false); } + + // http-parser treats 1xx headers as their own complete response. Swallow the spurious + // onMessageComplete and continue processing for purely informational headers. + // 101-SwitchingProtocols is exempt as all data after the header is proxied through after + // upgrading. + if (CodeUtility::is1xx(parser_.status_code) && + parser_.status_code != enumToInt(Http::Code::SwitchingProtocols)) { + ignore_message_complete_for_1xx_ = true; + // Reset to ensure no information from the 1xx headers is used for the response headers. + headers_or_trailers_.emplace(nullptr); + } } // Here we deal with cases where the response cannot have a body, but http_parser does not deal @@ -1176,8 +1180,8 @@ void ClientConnectionImpl::onBody(Buffer::Instance& data) { void ClientConnectionImpl::onMessageComplete() { ENVOY_CONN_LOG(trace, "message complete", connection_); - if (ignore_message_complete_for_100_continue_) { - ignore_message_complete_for_100_continue_ = false; + if (ignore_message_complete_for_1xx_) { + ignore_message_complete_for_1xx_ = false; return; } if (pending_response_.has_value()) { diff --git a/source/common/http/http1/codec_impl_legacy.h b/source/common/http/http1/codec_impl_legacy.h index f5e9811ede87..622d9441459b 100644 --- a/source/common/http/http1/codec_impl_legacy.h +++ b/source/common/http/http1/codec_impl_legacy.h @@ -588,8 +588,8 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // the response is complete. The existence of this variable is hard to reason about and it should // be combined with pending_response_ somehow in a follow up cleanup. bool pending_response_done_{true}; - // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. - bool ignore_message_complete_for_100_continue_{}; + // Set true between receiving non-101 1xx headers and receiving the spurious onMessageComplete. + bool ignore_message_complete_for_1xx_{}; // TODO(mattklein123): This should be a member of PendingResponse but this change needs dedicated // thought as some of the reset and no header code paths make this difficult. Headers are // populated on message begin. Trailers are populated when the switch to trailer processing is diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 87b62d644f94..9e6b5140beaa 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -99,7 +99,7 @@ template static T* removeConst(const void* object) { ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), - data_deferred_(false), waiting_for_non_informational_headers_(false), + data_deferred_(false), received_noninformational_headers_(false), pending_receive_buffer_high_watermark_called_(false), pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { parent_.stats_.streams_active_.inc(); @@ -269,18 +269,20 @@ void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { readDisable(false); } -void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { +void ConnectionImpl::ClientStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); - if (allow_waiting_for_informational_headers && - CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { - waiting_for_non_informational_headers_ = true; - } + const uint64_t status = Http::Utility::getResponseStatus(*headers); if (!upgrade_type_.empty() && headers->Status()) { Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); } - if (headers->Status()->value() == "100") { + // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further + // proxying is on an upgrade path. + received_noninformational_headers_ = + !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols); + + if (status == enumToInt(Http::Code::Continue)) { ASSERT(!remote_end_stream_); response_decoder_.decode100ContinueHeaders(std::move(headers)); } else { @@ -293,8 +295,7 @@ void ConnectionImpl::ClientStreamImpl::decodeTrailers() { std::move(absl::get(headers_or_trailers_))); } -void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { - ASSERT(!allow_waiting_for_informational_headers); +void ConnectionImpl::ServerStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); if (Http::Utility::isH2UpgradeRequest(*headers)) { Http::Utility::transformUpgradeRequestFromH2toH1(*headers); @@ -658,7 +659,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { switch (frame->headers.cat) { case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_REQUEST: { - stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); + stream->decodeHeaders(); break; } @@ -666,30 +667,15 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers // if local is not complete. if (!stream->deferred_reset_) { - if (!stream->waiting_for_non_informational_headers_) { - if (!stream->remote_end_stream_) { - // This indicates we have received more headers frames than Envoy - // supports. Even if this is valid HTTP (something like 103 early hints) fail here - // rather than trying to push unexpected headers through the Envoy pipeline as that - // will likely result in Envoy crashing. - // It would be cleaner to reset the stream rather than reset the/ entire connection but - // it's also slightly more dangerous so currently we err on the side of safety. - stats_.too_many_header_frames_.inc(); - throw CodecProtocolException("Unexpected 'trailers' with no end stream."); - } else { - stream->decodeTrailers(); - } + if (nghttp2_session_check_server_session(session_) || + stream->received_noninformational_headers_) { + ASSERT(stream->remote_end_stream_); + stream->decodeTrailers(); } else { - ASSERT(!nghttp2_session_check_server_session(session_)); - stream->waiting_for_non_informational_headers_ = false; - - // Even if we have :status 100 in the client case in a response, when - // we received a 1xx to start out with, nghttp2 message checking - // guarantees proper flow here. - stream->decodeHeaders(false); + // We're a client session and still waiting for non-informational headers. + stream->decodeHeaders(); } } - break; } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 649f7e77a1f0..b19f9880a3db 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -248,7 +248,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return response_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { @@ -317,10 +317,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(ResponseHeaderMapImpl::create()); - } else { + if (received_noninformational_headers_) { headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } else { + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } } HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { @@ -355,7 +355,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return *request_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc index 5e3df045f623..7f2f45978ae7 100644 --- a/source/common/http/http2/codec_impl_legacy.cc +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -97,7 +97,7 @@ template static T* removeConst(const void* object) { ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) : parent_(parent), local_end_stream_sent_(false), remote_end_stream_(false), - data_deferred_(false), waiting_for_non_informational_headers_(false), + data_deferred_(false), received_noninformational_headers_(false), pending_receive_buffer_high_watermark_called_(false), pending_send_buffer_high_watermark_called_(false), reset_due_to_messaging_error_(false) { parent_.stats_.streams_active_.inc(); @@ -267,18 +267,20 @@ void ConnectionImpl::StreamImpl::pendingRecvBufferLowWatermark() { readDisable(false); } -void ConnectionImpl::ClientStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { +void ConnectionImpl::ClientStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); - if (allow_waiting_for_informational_headers && - CodeUtility::is1xx(Http::Utility::getResponseStatus(*headers))) { - waiting_for_non_informational_headers_ = true; - } + const uint64_t status = Http::Utility::getResponseStatus(*headers); if (!upgrade_type_.empty() && headers->Status()) { Http::Utility::transformUpgradeResponseFromH2toH1(*headers, upgrade_type_); } - if (headers->Status()->value() == "100") { + // Non-informational headers are non-1xx OR 101-SwitchingProtocols, since 101 implies that further + // proxying is on an upgrade path. + received_noninformational_headers_ = + !CodeUtility::is1xx(status) || status == enumToInt(Http::Code::SwitchingProtocols); + + if (status == enumToInt(Http::Code::Continue)) { ASSERT(!remote_end_stream_); response_decoder_.decode100ContinueHeaders(std::move(headers)); } else { @@ -291,8 +293,7 @@ void ConnectionImpl::ClientStreamImpl::decodeTrailers() { std::move(absl::get(headers_or_trailers_))); } -void ConnectionImpl::ServerStreamImpl::decodeHeaders(bool allow_waiting_for_informational_headers) { - ASSERT(!allow_waiting_for_informational_headers); +void ConnectionImpl::ServerStreamImpl::decodeHeaders() { auto& headers = absl::get(headers_or_trailers_); if (Http::Utility::isH2UpgradeRequest(*headers)) { Http::Utility::transformUpgradeRequestFromH2toH1(*headers); @@ -655,7 +656,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { switch (frame->headers.cat) { case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_REQUEST: { - stream->decodeHeaders(frame->headers.cat == NGHTTP2_HCAT_RESPONSE); + stream->decodeHeaders(); break; } @@ -663,30 +664,15 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { // It's possible that we are waiting to send a deferred reset, so only raise headers/trailers // if local is not complete. if (!stream->deferred_reset_) { - if (!stream->waiting_for_non_informational_headers_) { - if (!stream->remote_end_stream_) { - // This indicates we have received more headers frames than Envoy - // supports. Even if this is valid HTTP (something like 103 early hints) fail here - // rather than trying to push unexpected headers through the Envoy pipeline as that - // will likely result in Envoy crashing. - // It would be cleaner to reset the stream rather than reset the/ entire connection but - // it's also slightly more dangerous so currently we err on the side of safety. - stats_.too_many_header_frames_.inc(); - throw CodecProtocolException("Unexpected 'trailers' with no end stream."); - } else { - stream->decodeTrailers(); - } + if (nghttp2_session_check_server_session(session_) || + stream->received_noninformational_headers_) { + ASSERT(stream->remote_end_stream_); + stream->decodeTrailers(); } else { - ASSERT(!nghttp2_session_check_server_session(session_)); - stream->waiting_for_non_informational_headers_ = false; - - // Even if we have :status 100 in the client case in a response, when - // we received a 1xx to start out with, nghttp2 message checking - // guarantees proper flow here. - stream->decodeHeaders(false); + // We're a client session and still waiting for non-informational headers. + stream->decodeHeaders(); } } - break; } diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h index ebb40b18d8a7..26ad399541a1 100644 --- a/source/common/http/http2/codec_impl_legacy.h +++ b/source/common/http/http2/codec_impl_legacy.h @@ -249,7 +249,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return response_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { @@ -318,10 +318,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(ResponseHeaderMapImpl::create()); - } else { + if (received_noninformational_headers_) { headers_or_trailers_.emplace(ResponseTrailerMapImpl::create()); + } else { + headers_or_trailers_.emplace(ResponseHeaderMapImpl::create()); } } HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { @@ -356,7 +356,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, nghttp2_data_provider* provider) override; StreamDecoder& decoder() override { return *request_decoder_; } - void decodeHeaders(bool allow_waiting_for_informational_headers) override; + void decodeHeaders() override; void decodeTrailers() override; HeaderMap& headers() override { if (absl::holds_alternative(headers_or_trailers_)) { diff --git a/source/common/http/http2/codec_stats.h b/source/common/http/http2/codec_stats.h index 32a31365d9d5..05ea11bbe764 100644 --- a/source/common/http/http2/codec_stats.h +++ b/source/common/http/http2/codec_stats.h @@ -24,7 +24,6 @@ namespace Http2 { COUNTER(requests_rejected_with_underscores_in_headers) \ COUNTER(rx_messaging_error) \ COUNTER(rx_reset) \ - COUNTER(too_many_header_frames) \ COUNTER(trailers) \ COUNTER(tx_flush_timeout) \ COUNTER(tx_reset) \ diff --git a/source/common/router/router.cc b/source/common/router/router.cc index c7fd54d418ba..ab81ba482ddf 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1125,7 +1125,17 @@ void Filter::onUpstream100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers, // the complexity until someone asks for it. retry_state_.reset(); - callbacks_->encode100ContinueHeaders(std::move(headers)); + // We coalesce 100-continue headers here, to prevent encoder filters and HCM from having to worry + // about this. This is done in the router filter, rather than UpstreamRequest, since we want to + // potentially coalesce across retries and multiple upstream requests in the future, even though + // we currently don't support retry after 100. + // It's plausible that this functionality might need to move to HCM in the future for internal + // redirects, but we would need to maintain the "only call encode100ContinueHeaders() once" + // invariant. + if (!downstream_100_continue_headers_encoded_) { + downstream_100_continue_headers_encoded_ = true; + callbacks_->encode100ContinueHeaders(std::move(headers)); + } } void Filter::resetAll() { diff --git a/source/common/router/router.h b/source/common/router/router.h index 7ba3e98e29fb..65ef129ccf3a 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -298,7 +298,8 @@ class Filter : Logger::Loggable, public RouterFilterInterface { public: Filter(FilterConfig& config) - : config_(config), final_upstream_request_(nullptr), downstream_response_started_(false), + : config_(config), final_upstream_request_(nullptr), + downstream_100_continue_headers_encoded_(false), downstream_response_started_(false), downstream_end_stream_(false), is_retry_(false), attempting_internal_redirect_with_complete_stream_(false) {} @@ -544,6 +545,7 @@ class Filter : Logger::Loggable, // list of cookies to add to upstream headers std::vector downstream_set_cookies_; + bool downstream_100_continue_headers_encoded_ : 1; bool downstream_response_started_ : 1; bool downstream_end_stream_ : 1; bool is_retry_ : 1; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index c2db2ca31af8..6e722145cb45 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -119,6 +119,23 @@ void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& head void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + // We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as + // part of the final response. 100-continue headers are handled in onUpstream100ContinueHeaders. + // + // We could in principle handle other headers here, but this might result in the double invocation + // of decodeHeaders() (once for informational, again for non-informational), which is likely an + // easy to miss corner case in the filter and HCM contract. + // + // This filtering is done early in upstream request, unlike 100 coalescing which is performed in + // the router filter, since the filtering only depends on the state of a single upstream, and we + // don't want to confuse accounting such as onFirstUpstreamRxByteReceived() with informational + // headers. + const uint64_t response_code = Http::Utility::getResponseStatus(*headers); + if (Http::CodeUtility::is1xx(response_code) && + response_code != enumToInt(Http::Code::SwitchingProtocols)) { + return; + } + // TODO(rodaine): This is actually measuring after the headers are parsed and not the first // byte. upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource()); @@ -128,7 +145,6 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e if (!parent_.config().upstream_logs_.empty()) { upstream_headers_ = Http::createHeaderMap(*headers); } - const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); if (paused_for_connect_ && response_code == 200) { diff --git a/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 new file mode 100644 index 000000000000..8cc7ae72c499 --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 @@ -0,0 +1,30 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + headers { + key: ":authority" + value: "foo.com" + } + } + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} +actions { + stream_action { + response { + continue_headers { + } + } + } +} diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index ef957d113c00..bfca81e4145c 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -255,7 +255,12 @@ class FuzzStream { // course, it's the codecs must be robust to wire-level violations. We // explore these violations via MutateAction and SwapAction at the connection // buffer level. - enum class StreamState { PendingHeaders, PendingDataOrTrailers, Closed }; + enum class StreamState { + PendingHeaders, + PendingNonInformationalHeaders, + PendingDataOrTrailers, + Closed + }; FuzzStream(ConnectionManagerImpl& conn_manager, FuzzConfig& config, const HeaderMap& request_headers, @@ -455,11 +460,15 @@ class FuzzStream { Fuzz::fromHeaders(response_action.continue_headers())); headers->setReferenceKey(Headers::get().Status, "100"); decoder_filter_->callbacks_->encode100ContinueHeaders(std::move(headers)); + // We don't allow multiple 100-continue headers in HCM, UpstreamRequest is responsible + // for coalescing. + state = StreamState::PendingNonInformationalHeaders; } break; } case test::common::http::ResponseAction::kHeaders: { - if (state == StreamState::PendingHeaders) { + if (state == StreamState::PendingHeaders || + state == StreamState::PendingNonInformationalHeaders) { auto headers = std::make_unique( Fuzz::fromHeaders(response_action.headers())); // The client codec will ensure we always have a valid :status. diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 258e89767011..9f21744cdd0a 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -2170,7 +2170,8 @@ TEST_P(Http1ClientConnectionImplTest, 204ResponseTransferEncodingNotAllowed) { } } -TEST_P(Http1ClientConnectionImplTest, 100Response) { +// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence. +TEST_P(Http1ClientConnectionImplTest, ContinueHeaders) { initialize(); NiceMock response_decoder; @@ -2182,6 +2183,7 @@ TEST_P(Http1ClientConnectionImplTest, 100Response) { EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); Buffer::OwnedImpl initial_response("HTTP/1.1 100 Continue\r\n\r\n"); auto status = codec_->dispatch(initial_response); + EXPECT_TRUE(status.ok()); EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); @@ -2190,6 +2192,50 @@ TEST_P(Http1ClientConnectionImplTest, 100Response) { EXPECT_TRUE(status.ok()); } +// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing). +TEST_P(Http1ClientConnectionImplTest, MultipleContinueHeaders) { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl initial_response("HTTP/1.1 100 Continue\r\n\r\n"); + auto status = codec_->dispatch(initial_response); + EXPECT_TRUE(status.ok()); + + EXPECT_CALL(response_decoder, decode100ContinueHeaders_(_)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl another_100_response("HTTP/1.1 100 Continue\r\n\r\n"); + status = codec_->dispatch(another_100_response); + EXPECT_TRUE(status.ok()); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); + EXPECT_CALL(response_decoder, decodeData(_, _)).Times(0); + Buffer::OwnedImpl response("HTTP/1.1 200 OK\r\n\r\n"); + status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); +} + +// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to +// upgrade, ignore, etc.). +TEST_P(Http1ClientConnectionImplTest, 1xxNonContinueHeaders) { + initialize(); + + NiceMock response_decoder; + Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); + TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); + Buffer::OwnedImpl response("HTTP/1.1 102 Processing\r\n\r\n"); + auto status = codec_->dispatch(response); + EXPECT_TRUE(status.ok()); +} + // 101 Switching Protocol with Transfer-Encoding headers is barred by RFC 7230, Section 3.3.1. TEST_P(Http1ClientConnectionImplTest, 101ResponseTransferEncodingNotAllowed) { // By default, transfer-encoding is barred. diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index fa1c92346e73..c3c56839244b 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -360,6 +360,7 @@ TEST_P(Http2CodecImplTest, ShutdownNotice) { response_encoder_->encodeHeaders(response_headers, true); } +// 100 response followed by 200 results in a [decode100ContinueHeaders, decodeHeaders] sequence. TEST_P(Http2CodecImplTest, ContinueHeaders) { initialize(); @@ -377,6 +378,78 @@ TEST_P(Http2CodecImplTest, ContinueHeaders) { response_encoder_->encodeHeaders(response_headers, true); }; +// nghttp2 rejects trailers with :status. +TEST_P(Http2CodecImplTest, TrailerStatus) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + + // nghttp2 doesn't allow :status in trailers + EXPECT_THROW(response_encoder_->encode100ContinueHeaders(continue_headers), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +}; + +// Multiple 100 responses are passed to the response encoder (who is responsible for coalescing). +TEST_P(Http2CodecImplTest, MultipleContinueHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); + response_encoder_->encode100ContinueHeaders(continue_headers); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, true)); + response_encoder_->encodeHeaders(response_headers, true); +}; + +// 101/102 headers etc. are passed to the response encoder (who is responsibly for deciding to +// upgrade, ignore, etc.). +TEST_P(Http2CodecImplTest, 1xxNonContinueHeaders) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl other_headers{{":status", "102"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(other_headers, false); +}; + +// nghttp2 treats 101 inside an HTTP/2 stream as an invalid HTTP header field. +TEST_P(Http2CodecImplTest, Invalid101SwitchingProtocols) { + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); + request_encoder_->encodeHeaders(request_headers, true); + + TestResponseHeaderMapImpl upgrade_headers{{":status", "101"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_THROW(response_encoder_->encodeHeaders(upgrade_headers, false), ClientCodecError); + EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); +} + TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { initialize(); @@ -469,27 +542,6 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinueAllowed) { expectDetailsRequest("http2.violation.of.messaging.rule"); }; -TEST_P(Http2CodecImplTest, Invalid103) { - initialize(); - - TestRequestHeaderMapImpl request_headers; - HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); - request_encoder_->encodeHeaders(request_headers, true); - - TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); - response_encoder_->encode100ContinueHeaders(continue_headers); - - TestResponseHeaderMapImpl early_hint_headers{{":status", "103"}}; - EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - response_encoder_->encodeHeaders(early_hint_headers, false); - - EXPECT_THROW_WITH_MESSAGE(response_encoder_->encodeHeaders(early_hint_headers, false), - ClientCodecError, "Unexpected 'trailers' with no end stream."); - EXPECT_EQ(1, client_stats_store_.counter("http2.too_many_header_frames").value()); -} - TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { initialize(); diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 518dfec196bb..660405054c32 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -356,3 +356,12 @@ envoy_cc_test( "//source/common/router:string_accessor_lib", ], ) + +envoy_cc_test( + name = "upstream_request_test", + srcs = ["upstream_request_test.cc"], + deps = [ + "//source/common/router:router_lib", + "//test/mocks/router:router_filter_interface", + ], +) diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 0a1ef34fc1b2..cd00aaa9787c 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -3766,6 +3766,58 @@ TEST_F(RouterTest, RetryUpstreamResetResponseStarted) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); } +// The router filter is responsible for not propagating 100-continue headers after the initial 100. +TEST_F(RouterTest, Coalesce100ContinueHeaders) { + // Setup. + NiceMock encoder1; + Http::ResponseDecoder* response_decoder = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_, upstream_stream_info_); + return nullptr; + })); + expectResponseTimerCreate(); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + // Initial 100-continue, this is processed normally. + EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_)); + { + Http::ResponseHeaderMapPtr continue_headers( + new Http::TestResponseHeaderMapImpl{{":status", "100"}}); + response_decoder->decode100ContinueHeaders(std::move(continue_headers)); + } + EXPECT_EQ( + 1U, + cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value()); + + // No encode100ContinueHeaders() invocation for the second 100-continue (but we continue to track + // stats from upstream). + EXPECT_CALL(callbacks_, encode100ContinueHeaders_(_)).Times(0); + { + Http::ResponseHeaderMapPtr continue_headers( + new Http::TestResponseHeaderMapImpl{{":status", "100"}}); + response_decoder->decode100ContinueHeaders(std::move(continue_headers)); + } + EXPECT_EQ( + 2U, + cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value()); + + // Reset stream and cleanup. + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); + encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +} + TEST_F(RouterTest, RetryUpstreamReset100ContinueResponseStarted) { NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc new file mode 100644 index 000000000000..72c48179290f --- /dev/null +++ b/test/common/router/upstream_request_test.cc @@ -0,0 +1,48 @@ +#include "common/router/upstream_request.h" + +#include "test/mocks/router/router_filter_interface.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::NiceMock; + +namespace Envoy { +namespace Router { +namespace { + +class UpstreamRequestTest : public testing::Test { +public: + NiceMock router_filter_interface_; + UpstreamRequest upstream_request_{router_filter_interface_, + std::make_unique>()}; +}; + +// UpstreamRequest is responsible processing for passing 101 upgrade headers to onUpstreamHeaders. +TEST_F(UpstreamRequestTest, Decode101UpgradeHeaders) { + auto upgrade_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "101"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)); + upstream_request_.decodeHeaders(std::move(upgrade_headers), false); +} + +// UpstreamRequest is responsible for ignoring non-{100,101} 1xx headers. +TEST_F(UpstreamRequestTest, IgnoreOther1xxHeaders) { + auto other_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "102"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)).Times(0); + upstream_request_.decodeHeaders(std::move(other_headers), false); +} + +// UpstreamRequest is responsible processing for passing 200 upgrade headers to onUpstreamHeaders. +TEST_F(UpstreamRequestTest, Decode200UpgradeHeaders) { + auto response_headers = std::make_unique( + Http::TestResponseHeaderMapImpl({{":status", "200"}})); + EXPECT_CALL(router_filter_interface_, onUpstreamHeaders(_, _, _, _)); + upstream_request_.decodeHeaders(std::move(response_headers), false); +} + +} // namespace +} // namespace Router +} // namespace Envoy diff --git a/test/extensions/upstreams/http/tcp/BUILD b/test/extensions/upstreams/http/tcp/BUILD index 057aeff867b2..70bea0f75177 100644 --- a/test/extensions/upstreams/http/tcp/BUILD +++ b/test/extensions/upstreams/http/tcp/BUILD @@ -19,8 +19,8 @@ envoy_cc_test( "//source/extensions/upstreams/http/tcp:upstream_request_lib", "//test/common/http:common_lib", "//test/mocks:common_lib", - "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", + "//test/mocks/router:router_filter_interface", "//test/mocks/router:router_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/server:instance_mocks", diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index cb5ee535357d..1672f700f3c7 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -8,8 +8,8 @@ #include "test/common/http/common.h" #include "test/mocks/common.h" -#include "test/mocks/http/mocks.h" #include "test/mocks/router/mocks.h" +#include "test/mocks/router/router_filter_interface.h" #include "test/mocks/server/factory_context.h" #include "test/mocks/server/instance.h" #include "test/mocks/tcp/mocks.h" @@ -26,75 +26,6 @@ using testing::NiceMock; using testing::Return; using testing::ReturnRef; -namespace Envoy { -namespace Router { -namespace { - -class MockRouterFilterInterface : public RouterFilterInterface { -public: - MockRouterFilterInterface() - : config_("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) { - auto cluster_info = new NiceMock(); - cluster_info->timeout_budget_stats_ = nullptr; - ON_CALL(*cluster_info, timeoutBudgetStats()).WillByDefault(Return(absl::nullopt)); - cluster_info_.reset(cluster_info); - ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_)); - ON_CALL(*this, config()).WillByDefault(ReturnRef(config_)); - ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_)); - ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_)); - EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber()); - ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); - ON_CALL(callbacks_, connection()).WillByDefault(Return(&client_connection_)); - route_entry_.connect_config_.emplace(RouteEntry::ConnectConfig()); - } - - MOCK_METHOD(void, onUpstream100ContinueHeaders, - (Envoy::Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); - MOCK_METHOD(void, onUpstreamHeaders, - (uint64_t response_code, Envoy::Http::ResponseHeaderMapPtr&& headers, - UpstreamRequest& upstream_request, bool end_stream)); - MOCK_METHOD(void, onUpstreamData, - (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream)); - MOCK_METHOD(void, onUpstreamTrailers, - (Envoy::Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); - MOCK_METHOD(void, onUpstreamMetadata, (Envoy::Http::MetadataMapPtr && metadata_map)); - MOCK_METHOD(void, onUpstreamReset, - (Envoy::Http::StreamResetReason reset_reason, absl::string_view transport_failure, - UpstreamRequest& upstream_request)); - MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); - MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); - MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); - - MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); - MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); - MOCK_METHOD(FilterConfig&, config, ()); - MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); - MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ()); - MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ()); - MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); - MOCK_METHOD(bool, downstreamEndStream, (), (const)); - MOCK_METHOD(uint32_t, attemptCount, (), (const)); - MOCK_METHOD(const VirtualCluster*, requestVcluster, (), (const)); - MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); - MOCK_METHOD(const std::list&, upstreamRequests, (), (const)); - MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const)); - MOCK_METHOD(TimeSource&, timeSource, ()); - - NiceMock callbacks_; - NiceMock route_entry_; - NiceMock client_connection_; - - envoy::extensions::filters::http::router::v3::Router router_proto; - NiceMock context_; - FilterConfig config_; - Upstream::ClusterInfoConstSharedPtr cluster_info_; - std::list requests_; -}; - -} // namespace -} // namespace Router -} // namespace Envoy - namespace Envoy { namespace Extensions { namespace Upstreams { diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index a2d317be5637..554aadb78602 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -853,8 +853,9 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ } } -void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upstream_complete, - bool with_encoder_filter) { +void HttpIntegrationTest::testEnvoyProxying1xx(bool continue_before_upstream_complete, + bool with_encoder_filter, + bool with_multiple_1xx_headers) { if (with_encoder_filter) { // Because 100-continue only affects encoder filters, make sure it plays well with one. config_helper_.addFilter("name: envoy.filters.http.cors"); @@ -891,6 +892,13 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); if (continue_before_upstream_complete) { + if (with_multiple_1xx_headers) { + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "102"}}, false); + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + } // This case tests sending on 100-Continue headers before the client has sent all the // request data. upstream_request_->encode100ContinueHeaders( @@ -902,6 +910,13 @@ void HttpIntegrationTest::testEnvoyProxying100Continue(bool continue_before_upst ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); if (!continue_before_upstream_complete) { + if (with_multiple_1xx_headers) { + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "102"}}, false); + upstream_request_->encode100ContinueHeaders( + Http::TestResponseHeaderMapImpl{{":status", "100"}}); + } // This case tests forwarding 100-Continue after the client has sent all data. upstream_request_->encode100ContinueHeaders( Http::TestResponseHeaderMapImpl{{":status", "100"}}); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 4c04d5672507..30e898936f72 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -212,8 +212,9 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testEnvoyHandling100Continue(bool additional_continue_from_upstream = false, const std::string& via = ""); - void testEnvoyProxying100Continue(bool continue_before_upstream_complete = false, - bool with_encoder_filter = false); + void testEnvoyProxying1xx(bool continue_before_upstream_complete = false, + bool with_encoder_filter = false, + bool with_multiple_1xx_headers = false); // HTTP/2 client tests. void testDownstreamResetBeforeResponseComplete(); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 37dce6d1a0e9..197c628be6c3 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -290,11 +290,11 @@ TEST_P(IntegrationTest, RouterUpstreamResponseBeforeRequestComplete) { } TEST_P(IntegrationTest, EnvoyProxyingEarly100ContinueWithEncoderFilter) { - testEnvoyProxying100Continue(true, true); + testEnvoyProxying1xx(true, true); } TEST_P(IntegrationTest, EnvoyProxyingLate100ContinueWithEncoderFilter) { - testEnvoyProxying100Continue(false, true); + testEnvoyProxying1xx(false, true); } // Regression test for https://github.com/envoyproxy/envoy/issues/10923. @@ -304,7 +304,7 @@ TEST_P(IntegrationTest, EnvoyProxying100ContinueWithDecodeDataPause) { typed_config: "@type": type.googleapis.com/google.protobuf.Empty )EOF"); - testEnvoyProxying100Continue(true); + testEnvoyProxying1xx(true); } // This is a regression for https://github.com/envoyproxy/envoy/issues/2715 and validates that a diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 4176a1cd391c..1078a1bfe6ff 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -863,12 +863,20 @@ TEST_P(ProtocolIntegrationTest, EnvoyHandlingDuplicate100Continue) { testEnvoyHandling100Continue(true); } -TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarly100Continue) { - testEnvoyProxying100Continue(true); +// 100-continue before the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarly100Continue) { testEnvoyProxying1xx(true); } + +// Multiple 1xx before the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingEarlyMultiple1xx) { + testEnvoyProxying1xx(true, false, true); } -TEST_P(ProtocolIntegrationTest, EnvoyProxyingLate100Continue) { - testEnvoyProxying100Continue(false); +// 100-continue after the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingLate100Continue) { testEnvoyProxying1xx(false); } + +// Multiple 1xx after the request completes. +TEST_P(ProtocolIntegrationTest, EnvoyProxyingLateMultiple1xx) { + testEnvoyProxying1xx(false, false, true); } TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } diff --git a/test/mocks/router/BUILD b/test/mocks/router/BUILD index 282a60b154f8..7044a84c1edb 100644 --- a/test/mocks/router/BUILD +++ b/test/mocks/router/BUILD @@ -35,3 +35,15 @@ envoy_cc_mock( "@envoy_api//envoy/type/v3:pkg_cc_proto", ], ) + +envoy_cc_mock( + name = "router_filter_interface", + srcs = ["router_filter_interface.cc"], + hdrs = ["router_filter_interface.h"], + deps = [ + "//source/common/router:router_lib", + "//test/mocks/http:http_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + ], +) diff --git a/test/mocks/router/router_filter_interface.cc b/test/mocks/router/router_filter_interface.cc new file mode 100644 index 000000000000..81d9b8be1a34 --- /dev/null +++ b/test/mocks/router/router_filter_interface.cc @@ -0,0 +1,29 @@ +#include "test/mocks/router/router_filter_interface.h" + +using testing::AnyNumber; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Router { + +MockRouterFilterInterface::MockRouterFilterInterface() + : config_("prefix.", context_, ShadowWriterPtr(new MockShadowWriter()), router_proto) { + auto cluster_info = new NiceMock(); + cluster_info->timeout_budget_stats_ = nullptr; + ON_CALL(*cluster_info, timeoutBudgetStats()).WillByDefault(Return(absl::nullopt)); + cluster_info_.reset(cluster_info); + ON_CALL(*this, callbacks()).WillByDefault(Return(&callbacks_)); + ON_CALL(*this, config()).WillByDefault(ReturnRef(config_)); + ON_CALL(*this, cluster()).WillByDefault(Return(cluster_info_)); + ON_CALL(*this, upstreamRequests()).WillByDefault(ReturnRef(requests_)); + EXPECT_CALL(callbacks_.dispatcher_, setTrackedObject(_)).Times(AnyNumber()); + ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); + ON_CALL(callbacks_, connection()).WillByDefault(Return(&client_connection_)); + route_entry_.connect_config_.emplace(RouteEntry::ConnectConfig()); +} + +MockRouterFilterInterface::~MockRouterFilterInterface() = default; + +} // namespace Router +} // namespace Envoy diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h new file mode 100644 index 000000000000..40b6ac609b8e --- /dev/null +++ b/test/mocks/router/router_filter_interface.h @@ -0,0 +1,63 @@ +#pragma once + +#include "common/router/router.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Router { + +class MockRouterFilterInterface : public RouterFilterInterface { +public: + MockRouterFilterInterface(); + ~MockRouterFilterInterface() override; + + MOCK_METHOD(void, onUpstream100ContinueHeaders, + (Envoy::Http::ResponseHeaderMapPtr && headers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHeaders, + (uint64_t response_code, Envoy::Http::ResponseHeaderMapPtr&& headers, + UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamData, + (Buffer::Instance & data, UpstreamRequest& upstream_request, bool end_stream)); + MOCK_METHOD(void, onUpstreamTrailers, + (Envoy::Http::ResponseTrailerMapPtr && trailers, UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamMetadata, (Envoy::Http::MetadataMapPtr && metadata_map)); + MOCK_METHOD(void, onUpstreamReset, + (Envoy::Http::StreamResetReason reset_reason, absl::string_view transport_failure, + UpstreamRequest& upstream_request)); + MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); + MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); + + MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); + MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); + MOCK_METHOD(FilterConfig&, config, ()); + MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); + MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ()); + MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ()); + MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); + MOCK_METHOD(bool, downstreamEndStream, (), (const)); + MOCK_METHOD(uint32_t, attemptCount, (), (const)); + MOCK_METHOD(const VirtualCluster*, requestVcluster, (), (const)); + MOCK_METHOD(const RouteEntry*, routeEntry, (), (const)); + MOCK_METHOD(const std::list&, upstreamRequests, (), (const)); + MOCK_METHOD(const UpstreamRequest*, finalUpstreamRequest, (), (const)); + MOCK_METHOD(TimeSource&, timeSource, ()); + + NiceMock callbacks_; + NiceMock route_entry_; + NiceMock client_connection_; + + envoy::extensions::filters::http::router::v3::Router router_proto; + NiceMock context_; + FilterConfig config_; + Upstream::ClusterInfoConstSharedPtr cluster_info_; + std::list requests_; +}; + +} // namespace Router +} // namespace Envoy From e8216a8cf79c54e3e0a77ab729ebf27f4e79eb1b Mon Sep 17 00:00:00 2001 From: Kuat Date: Thu, 23 Jul 2020 14:33:43 -0700 Subject: [PATCH 730/909] xds: implement extension config discovery for HCM (#11826) Signed-off-by: Kuat Yessenov --- api/BUILD | 1 + api/envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + api/envoy/config/core/v3/extension.proto | 31 ++ api/envoy/config/core/v4alpha/extension.proto | 34 ++ api/envoy/data/accesslog/v3/accesslog.proto | 5 +- .../v3/http_connection_manager.proto | 34 +- .../v4alpha/http_connection_manager.proto | 38 +- .../service/{filter => extension}/v3/BUILD | 0 .../v3/config_discovery.proto} | 21 +- api/versioning/BUILD | 2 +- docs/root/api-v3/service/service.rst | 1 + .../root/configuration/overview/extension.rst | 22 + docs/root/version_history/current.rst | 2 + .../envoy/config/accesslog/v3/accesslog.proto | 1 + .../config/accesslog/v4alpha/accesslog.proto | 1 + .../envoy/config/core/v3/extension.proto | 31 ++ .../envoy/config/core/v4alpha/extension.proto | 34 ++ .../envoy/data/accesslog/v3/accesslog.proto | 5 +- .../v3/http_connection_manager.proto | 34 +- .../v4alpha/http_connection_manager.proto | 38 +- .../service/{filter => extension}/v3/BUILD | 0 .../v3/config_discovery.proto} | 21 +- include/envoy/config/BUILD | 6 + .../envoy/config/extension_config_provider.h | 53 +++ include/envoy/filter/http/BUILD | 21 + .../filter/http/filter_config_provider.h | 57 +++ include/envoy/stream_info/stream_info.h | 4 +- source/common/config/BUILD | 1 + source/common/config/protobuf_link_hacks.h | 1 + source/common/config/type_to_endpoint.cc | 7 + source/common/config/utility.h | 49 +- source/common/filter/http/BUILD | 31 ++ .../http/filter_config_discovery_impl.cc | 211 +++++++++ .../http/filter_config_discovery_impl.h | 184 ++++++++ source/common/stream_info/utility.cc | 8 +- source/common/stream_info/utility.h | 1 + .../grpc/grpc_access_log_utils.cc | 5 +- .../network/http_connection_manager/BUILD | 2 + .../network/http_connection_manager/config.cc | 142 +++++- .../network/http_connection_manager/config.h | 19 +- .../common/access_log/access_log_impl_test.cc | 12 +- test/common/filter/http/BUILD | 30 ++ .../http/filter_config_discovery_impl_test.cc | 297 ++++++++++++ test/common/stream_info/utility_test.cc | 10 +- .../grpc/grpc_access_log_utils_test.cc | 1 + .../network/http_connection_manager/BUILD | 1 + .../http_connection_manager/config_test.cc | 422 +++++++++++++++--- test/integration/BUILD | 15 + .../extension_discovery_integration_test.cc | 327 ++++++++++++++ 50 files changed, 2025 insertions(+), 250 deletions(-) rename api/envoy/service/{filter => extension}/v3/BUILD (100%) rename api/envoy/service/{filter/v3/filter_config_discovery.proto => extension/v3/config_discovery.proto} (52%) rename generated_api_shadow/envoy/service/{filter => extension}/v3/BUILD (100%) rename generated_api_shadow/envoy/service/{filter/v3/filter_config_discovery.proto => extension/v3/config_discovery.proto} (52%) create mode 100644 include/envoy/config/extension_config_provider.h create mode 100644 include/envoy/filter/http/BUILD create mode 100644 include/envoy/filter/http/filter_config_provider.h create mode 100644 source/common/filter/http/BUILD create mode 100644 source/common/filter/http/filter_config_discovery_impl.cc create mode 100644 source/common/filter/http/filter_config_discovery_impl.h create mode 100644 test/common/filter/http/BUILD create mode 100644 test/common/filter/http/filter_config_discovery_impl_test.cc create mode 100644 test/integration/extension_discovery_integration_test.cc diff --git a/api/BUILD b/api/BUILD index 9d4f802dfe5f..50835fb0b1c4 100644 --- a/api/BUILD +++ b/api/BUILD @@ -245,6 +245,7 @@ proto_library( "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", + "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index 9a2f276b34b4..e1b5a2e58b90 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -242,6 +242,7 @@ message ResponseFlagFilter { in: "DPE" in: "UMSDR" in: "RFCF" + in: "NFCF" } } }]; diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index 939d4df95889..35f494ea1ac8 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -241,6 +241,7 @@ message ResponseFlagFilter { in: "DPE" in: "UMSDR" in: "RFCF" + in: "NFCF" } } }]; diff --git a/api/envoy/config/core/v3/extension.proto b/api/envoy/config/core/v3/extension.proto index 636398760785..ba66da6a8e36 100644 --- a/api/envoy/config/core/v3/extension.proto +++ b/api/envoy/config/core/v3/extension.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v3; +import "envoy/config/core/v3/config_source.proto"; + import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; @@ -28,3 +30,32 @@ message TypedExtensionConfig { // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/core/v4alpha/extension.proto b/api/envoy/config/core/v4alpha/extension.proto index 52ae2a143b49..4de107580d07 100644 --- a/api/envoy/config/core/v4alpha/extension.proto +++ b/api/envoy/config/core/v4alpha/extension.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v4alpha; +import "envoy/config/core/v4alpha/config_source.proto"; + import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; @@ -32,3 +34,35 @@ message TypedExtensionConfig { // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ExtensionConfigSource"; + + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/data/accesslog/v3/accesslog.proto b/api/envoy/data/accesslog/v3/accesslog.proto index 347adc2003e6..c16b5be1ff0e 100644 --- a/api/envoy/data/accesslog/v3/accesslog.proto +++ b/api/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 22] +// [#next-free-field: 23] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -269,6 +269,9 @@ message ResponseFlags { // Indicates the response was served from a cache filter. bool response_from_cache_filter = 21; + + // Indicates that a filter configuration is not available. + bool no_filter_config_found = 22; } // Properties of a negotiated TLS connection. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 87e629f4f441..04a132ad2672 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -5,6 +5,7 @@ package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; @@ -797,38 +798,13 @@ message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; - // [#not-implemented-hide:] Configuration source specifier for the late-bound - // filter configuration. The HTTP Listener is warmed until all the initial - // filter configurations are received, unless the flag to apply the default - // configuration is set. Subsequent filter updates are atomic on a per-worker - // basis, and apply to new streams while the active streams continue using - // the older filter configurations. If the initial delivery of the filter - // configuration fails, due to a timeout for example, the optional default - // configuration is applied. Without a default configuration, the filter is - // disabled, and the HTTP listener responds with 500 immediately. After the - // failure, the listener continues subscribing to the subsequent filter - // configurations. - message HttpFilterConfigSource { - config.core.v3.ConfigSource config_source = 1; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial filter configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first xDS response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - } - reserved 3, 2; reserved "config"; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in FilterConfigDS. + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -836,8 +812,10 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; - // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. - HttpFilterConfigSource filter_config_ds = 5; + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v3.ExtensionConfigSource config_discovery = 5; } } diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index ac31bf1ecd62..042a39863f81 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -5,6 +5,7 @@ package envoy.extensions.filters.network.http_connection_manager.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; @@ -803,42 +804,13 @@ message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; - // [#not-implemented-hide:] Configuration source specifier for the late-bound - // filter configuration. The HTTP Listener is warmed until all the initial - // filter configurations are received, unless the flag to apply the default - // configuration is set. Subsequent filter updates are atomic on a per-worker - // basis, and apply to new streams while the active streams continue using - // the older filter configurations. If the initial delivery of the filter - // configuration fails, due to a timeout for example, the optional default - // configuration is applied. Without a default configuration, the filter is - // disabled, and the HTTP listener responds with 500 immediately. After the - // failure, the listener continues subscribing to the subsequent filter - // configurations. - message HttpFilterConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter." - "HttpFilterConfigSource"; - - config.core.v4alpha.ConfigSource config_source = 1; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial filter configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first xDS response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - } - reserved 3, 2; reserved "config"; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in FilterConfigDS. + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -846,8 +818,10 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; - // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. - HttpFilterConfigSource filter_config_ds = 5; + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } } diff --git a/api/envoy/service/filter/v3/BUILD b/api/envoy/service/extension/v3/BUILD similarity index 100% rename from api/envoy/service/filter/v3/BUILD rename to api/envoy/service/extension/v3/BUILD diff --git a/api/envoy/service/filter/v3/filter_config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto similarity index 52% rename from api/envoy/service/filter/v3/filter_config_discovery.proto rename to api/envoy/service/extension/v3/config_discovery.proto index 79c5846710bb..ce2a5c7dfe70 100644 --- a/api/envoy/service/filter/v3/filter_config_discovery.proto +++ b/api/envoy/service/extension/v3/config_discovery.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.service.filter.v3; +package envoy.service.extension.v3; import "envoy/service/discovery/v3/discovery.proto"; @@ -10,28 +10,29 @@ import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -option java_package = "io.envoyproxy.envoy.service.filter.v3"; -option java_outer_classname = "FilterConfigDiscoveryProto"; +option java_package = "io.envoyproxy.envoy.service.extension.v3"; +option java_outer_classname = "ConfigDiscoveryProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: FilterConfigDS] +// [#protodoc-title: ExtensionConfigDS] -// Return filter configurations. -service FilterConfigDiscoveryService { +// Return extension configurations. +service ExtensionConfigDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; - rpc StreamFilterConfigs(stream discovery.v3.DiscoveryRequest) + rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } - rpc DeltaFilterConfigs(stream discovery.v3.DeltaDiscoveryRequest) + rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } - rpc FetchFilterConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:filter_configs"; + rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) + returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:extension_configs"; option (google.api.http).body = "*"; } } diff --git a/api/versioning/BUILD b/api/versioning/BUILD index e00a0fbbb55d..00939e940295 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -128,7 +128,7 @@ proto_library( "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/filter/v3:pkg", + "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", diff --git a/docs/root/api-v3/service/service.rst b/docs/root/api-v3/service/service.rst index de8110cf5fbd..d651856c678b 100644 --- a/docs/root/api-v3/service/service.rst +++ b/docs/root/api-v3/service/service.rst @@ -16,3 +16,4 @@ Services tap/v3/* ../config/tap/v3/* trace/v3/* + extension/v3/* diff --git a/docs/root/configuration/overview/extension.rst b/docs/root/configuration/overview/extension.rst index 37f58b8ecad7..dab59eaf6b97 100644 --- a/docs/root/configuration/overview/extension.rst +++ b/docs/root/configuration/overview/extension.rst @@ -61,3 +61,25 @@ follows: "@type": type.googleapis.com/udpa.type.v1.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3Router +Discovery service +^^^^^^^^^^^^^^^^^ + +Extension configuration can be supplied dynamically from a :ref:`an xDS +management server` using :ref:`ExtensionConfiguration discovery +service`. +The name field in the extension configuration acts as the resource identifier. +For example, HTTP connection manager supports :ref:`dynamic filter +re-configuration` +for HTTP filters. + +Extension config discovery service has a :ref:`statistics +` tree rooted at +*.extension_config_discovery..*. In addition +to the common subscription statistics, it also provides the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + config_reload, Counter, Total number of successful configuration updates + config_fail, Counter, Total number of failed configuration updates diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e2fcea52f2c2..a768a80008b8 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -52,6 +52,8 @@ New Features that track headers and body sizes of requests and responses. * stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. +* tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. +* xds: added :ref:`extension config discovery` support for HTTP filters. Deprecated ---------- diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index 09d691dd3665..3307b4c57ffd 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -240,6 +240,7 @@ message ResponseFlagFilter { in: "DPE" in: "UMSDR" in: "RFCF" + in: "NFCF" } } }]; diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index 939d4df95889..35f494ea1ac8 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -241,6 +241,7 @@ message ResponseFlagFilter { in: "DPE" in: "UMSDR" in: "RFCF" + in: "NFCF" } } }]; diff --git a/generated_api_shadow/envoy/config/core/v3/extension.proto b/generated_api_shadow/envoy/config/core/v3/extension.proto index 636398760785..ba66da6a8e36 100644 --- a/generated_api_shadow/envoy/config/core/v3/extension.proto +++ b/generated_api_shadow/envoy/config/core/v3/extension.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v3; +import "envoy/config/core/v3/config_source.proto"; + import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; @@ -28,3 +30,32 @@ message TypedExtensionConfig { // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto index 52ae2a143b49..4de107580d07 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.config.core.v4alpha; +import "envoy/config/core/v4alpha/config_source.proto"; + import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; @@ -32,3 +34,35 @@ message TypedExtensionConfig { // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.core.v3.ExtensionConfigSource"; + + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // `apply_default_config_without_warming` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto index 347adc2003e6..c16b5be1ff0e 100644 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto @@ -186,7 +186,7 @@ message AccessLogCommon { } // Flags indicating occurrences during request/response processing. -// [#next-free-field: 22] +// [#next-free-field: 23] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; @@ -269,6 +269,9 @@ message ResponseFlags { // Indicates the response was served from a cache filter. bool response_from_cache_filter = 21; + + // Indicates that a filter configuration is not available. + bool no_filter_config_found = 22; } // Properties of a negotiated TLS connection. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index a25759c85fc7..0439633d6e6e 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -5,6 +5,7 @@ package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; @@ -802,36 +803,11 @@ message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; - // [#not-implemented-hide:] Configuration source specifier for the late-bound - // filter configuration. The HTTP Listener is warmed until all the initial - // filter configurations are received, unless the flag to apply the default - // configuration is set. Subsequent filter updates are atomic on a per-worker - // basis, and apply to new streams while the active streams continue using - // the older filter configurations. If the initial delivery of the filter - // configuration fails, due to a timeout for example, the optional default - // configuration is applied. Without a default configuration, the filter is - // disabled, and the HTTP listener responds with 500 immediately. After the - // failure, the listener continues subscribing to the subsequent filter - // configurations. - message HttpFilterConfigSource { - config.core.v3.ConfigSource config_source = 1; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial filter configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first xDS response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - } - reserved 3; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in FilterConfigDS. + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -839,8 +815,10 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; - // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. - HttpFilterConfigSource filter_config_ds = 5; + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v3.ExtensionConfigSource config_discovery = 5; google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index ac31bf1ecd62..042a39863f81 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -5,6 +5,7 @@ package envoy.extensions.filters.network.http_connection_manager.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; @@ -803,42 +804,13 @@ message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; - // [#not-implemented-hide:] Configuration source specifier for the late-bound - // filter configuration. The HTTP Listener is warmed until all the initial - // filter configurations are received, unless the flag to apply the default - // configuration is set. Subsequent filter updates are atomic on a per-worker - // basis, and apply to new streams while the active streams continue using - // the older filter configurations. If the initial delivery of the filter - // configuration fails, due to a timeout for example, the optional default - // configuration is applied. Without a default configuration, the filter is - // disabled, and the HTTP listener responds with 500 immediately. After the - // failure, the listener continues subscribing to the subsequent filter - // configurations. - message HttpFilterConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter." - "HttpFilterConfigSource"; - - config.core.v4alpha.ConfigSource config_source = 1; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial filter configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first xDS response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - } - reserved 3, 2; reserved "config"; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in FilterConfigDS. + // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported @@ -846,8 +818,10 @@ message HttpFilter { oneof config_type { google.protobuf.Any typed_config = 4; - // [#not-implemented-hide:] Configuration source specifier for FilterConfigDS. - HttpFilterConfigSource filter_config_ds = 5; + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } } diff --git a/generated_api_shadow/envoy/service/filter/v3/BUILD b/generated_api_shadow/envoy/service/extension/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/service/filter/v3/BUILD rename to generated_api_shadow/envoy/service/extension/v3/BUILD diff --git a/generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto similarity index 52% rename from generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto rename to generated_api_shadow/envoy/service/extension/v3/config_discovery.proto index 79c5846710bb..ce2a5c7dfe70 100644 --- a/generated_api_shadow/envoy/service/filter/v3/filter_config_discovery.proto +++ b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.service.filter.v3; +package envoy.service.extension.v3; import "envoy/service/discovery/v3/discovery.proto"; @@ -10,28 +10,29 @@ import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; -option java_package = "io.envoyproxy.envoy.service.filter.v3"; -option java_outer_classname = "FilterConfigDiscoveryProto"; +option java_package = "io.envoyproxy.envoy.service.extension.v3"; +option java_outer_classname = "ConfigDiscoveryProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: FilterConfigDS] +// [#protodoc-title: ExtensionConfigDS] -// Return filter configurations. -service FilterConfigDiscoveryService { +// Return extension configurations. +service ExtensionConfigDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; - rpc StreamFilterConfigs(stream discovery.v3.DiscoveryRequest) + rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } - rpc DeltaFilterConfigs(stream discovery.v3.DeltaDiscoveryRequest) + rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } - rpc FetchFilterConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:filter_configs"; + rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) + returns (discovery.v3.DiscoveryResponse) { + option (google.api.http).post = "/v3/discovery:extension_configs"; option (google.api.http).body = "*"; } } diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 67ac833c2403..96140621aa6b 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -29,6 +29,12 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "extension_config_provider_interface", + hdrs = ["extension_config_provider.h"], + deps = ["//source/common/protobuf"], +) + envoy_cc_library( name = "grpc_mux_interface", hdrs = ["grpc_mux.h"], diff --git a/include/envoy/config/extension_config_provider.h b/include/envoy/config/extension_config_provider.h new file mode 100644 index 000000000000..0ea1aef9adc3 --- /dev/null +++ b/include/envoy/config/extension_config_provider.h @@ -0,0 +1,53 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "common/protobuf/protobuf.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Config { + +/** + * A provider for extension configurations obtained either statically or via + * the extension configuration discovery service. Dynamically updated extension + * configurations may share subscriptions across extension config providers. + */ +template class ExtensionConfigProvider { +public: + virtual ~ExtensionConfigProvider() = default; + + /** + * Get the extension configuration resource name. + **/ + virtual const std::string& name() PURE; + + /** + * @return FactoryCallback an extension factory callback. Note that if the + * provider has not yet performed an initial configuration load and no + * default is provided, an empty optional will be returned. The factory + * callback is the latest version of the extension configuration, and should + * generally apply only to new requests and connections. + */ + virtual absl::optional config() PURE; + + /** + * Validate that the configuration is applicable in the context of the provider. If an exception + * is thrown by any of the config providers for an update, the extension configuration update is + * rejected. + * @param proto_config is the candidate configuration update. + * @param factory used to instantiate an extension config. + */ + virtual void validateConfig(const ProtobufWkt::Any& proto_config, Factory& factory) PURE; + + /** + * Update the provider with a new configuration. + * @param config is an extension factory callback to replace the existing configuration. + * @param version_info is the version of the new extension configuration. + */ + virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info) PURE; +}; + +} // namespace Config +} // namespace Envoy diff --git a/include/envoy/filter/http/BUILD b/include/envoy/filter/http/BUILD new file mode 100644 index 000000000000..5a76c4ba7b9d --- /dev/null +++ b/include/envoy/filter/http/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "filter_config_provider_interface", + hdrs = ["filter_config_provider.h"], + deps = [ + "//include/envoy/config:extension_config_provider_interface", + "//include/envoy/http:filter_interface", + "//include/envoy/init:manager_interface", + "//include/envoy/server:filter_config_interface", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/include/envoy/filter/http/filter_config_provider.h b/include/envoy/filter/http/filter_config_provider.h new file mode 100644 index 000000000000..e1c3f58c125a --- /dev/null +++ b/include/envoy/filter/http/filter_config_provider.h @@ -0,0 +1,57 @@ +#pragma once + +#include "envoy/config/core/v3/config_source.pb.h" +#include "envoy/config/extension_config_provider.h" +#include "envoy/http/filter.h" +#include "envoy/init/manager.h" +#include "envoy/server/filter_config.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +using FilterConfigProvider = + Envoy::Config::ExtensionConfigProvider; +using FilterConfigProviderPtr = std::unique_ptr; + +/** + * The FilterConfigProviderManager exposes the ability to get an FilterConfigProvider + * for both static and dynamic filter config providers. + */ +class FilterConfigProviderManager { +public: + virtual ~FilterConfigProviderManager() = default; + + /** + * Get an FilterConfigProviderPtr for a filter config. The config providers may share + * the underlying subscriptions to the filter config discovery service. + * @param config_source supplies the configuration source for the filter configs. + * @param filter_config_name the filter config resource name. + * @param require_type_urls enforces that the typed filter config must have a certain type URL. + * @param factory_context is the context to use for the filter config provider. + * @param stat_prefix supplies the stat_prefix to use for the provider stats. + * @param apply_without_warming initializes immediately with the default config and starts the + * subscription. + */ + virtual FilterConfigProviderPtr createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) PURE; + + /** + * Get an FilterConfigProviderPtr for a statically inlined filter config. + * @param config is a fully resolved filter instantiation factory. + * @param filter_config_name is the name of the filter configuration resource. + */ + virtual FilterConfigProviderPtr + createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config, + const std::string& filter_config_name) PURE; +}; + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index fbec2554d380..515d4e83c744 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -76,8 +76,10 @@ enum ResponseFlag { UpstreamMaxStreamDurationReached = 0x80000, // True if the response was served from an Envoy cache filter. ResponseFromCacheFilter = 0x100000, + // Filter config was not received within the permitted warming deadline. + NoFilterConfigFound = 0x200000, // ATTENTION: MAKE SURE THIS REMAINS EQUAL TO THE LAST FLAG. - LastFlag = ResponseFromCacheFilter + LastFlag = NoFilterConfigFound }; /** diff --git a/source/common/config/BUILD b/source/common/config/BUILD index e42e10d7803b..b93d24af1fbf 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -253,6 +253,7 @@ envoy_cc_library( "@envoy_api//envoy/service/discovery/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", "@envoy_api//envoy/service/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", "@envoy_api//envoy/service/listener/v3:pkg_cc_proto", "@envoy_api//envoy/service/ratelimit/v2:pkg_cc_proto", "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", diff --git a/source/common/config/protobuf_link_hacks.h b/source/common/config/protobuf_link_hacks.h index efcfa08f0c23..b613d60ff84c 100644 --- a/source/common/config/protobuf_link_hacks.h +++ b/source/common/config/protobuf_link_hacks.h @@ -13,6 +13,7 @@ #include "envoy/service/discovery/v2/sds.pb.h" #include "envoy/service/discovery/v3/ads.pb.h" #include "envoy/service/endpoint/v3/eds.pb.h" +#include "envoy/service/extension/v3/config_discovery.pb.h" #include "envoy/service/listener/v3/lds.pb.h" #include "envoy/service/ratelimit/v2/rls.pb.h" #include "envoy/service/ratelimit/v3/rls.pb.h" diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index d7434aaa01f8..9821b288dcbc 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -177,6 +177,13 @@ TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { "envoy.service.listener.v3.ListenerDiscoveryService"), SERVICE_VERSION_INFO("envoy.service.discovery.v2.RuntimeDiscoveryService", "envoy.service.runtime.v3.RuntimeDiscoveryService"), + ServiceVersionInfoMap{{ + "envoy.service.extension.v3.ExtensionConfigDiscoveryService", + ServiceVersionInfo{{ + {envoy::config::core::v3::ApiVersion::V3, + "envoy.service.extension.v3.ExtensionConfigDiscoveryService"}, + }}, + }}, }) { for (const auto& registered_service : registered) { const TypeUrl resource_type_url = getResourceTypeUrl(registered_service.first); diff --git a/source/common/config/utility.h b/source/common/config/utility.h index 09b9e0ea6f37..b4ee90445adf 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -236,27 +236,42 @@ class Utility { */ template static Factory& getAndCheckFactory(const ProtoMessage& message) { - const ProtobufWkt::Any& typed_config = message.typed_config(); + Factory* factory = Utility::getFactoryByType(message.typed_config()); + if (factory != nullptr) { + return *factory; + } + + return Utility::getAndCheckFactoryByName(message.name()); + } + + /** + * Get type URL from a typed config. + * @param typed_config for the extension config. + */ + static std::string getFactoryType(const ProtobufWkt::Any& typed_config) { static const std::string& typed_struct_type = udpa::type::v1::TypedStruct::default_instance().GetDescriptor()->full_name(); - - if (!typed_config.type_url().empty()) { - // Unpack methods will only use the fully qualified type name after the last '/'. - // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87 - auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url())); - if (type == typed_struct_type) { - udpa::type::v1::TypedStruct typed_struct; - MessageUtil::unpackTo(typed_config, typed_struct); - // Not handling nested structs or typed structs in typed structs - type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url())); - } - Factory* factory = Registry::FactoryRegistry::getFactoryByType(type); - if (factory != nullptr) { - return *factory; - } + // Unpack methods will only use the fully qualified type name after the last '/'. + // https://github.com/protocolbuffers/protobuf/blob/3.6.x/src/google/protobuf/any.proto#L87 + auto type = std::string(TypeUtil::typeUrlToDescriptorFullName(typed_config.type_url())); + if (type == typed_struct_type) { + udpa::type::v1::TypedStruct typed_struct; + MessageUtil::unpackTo(typed_config, typed_struct); + // Not handling nested structs or typed structs in typed structs + return std::string(TypeUtil::typeUrlToDescriptorFullName(typed_struct.type_url())); } + return type; + } - return Utility::getAndCheckFactoryByName(message.name()); + /** + * Get a Factory from the registry by type URL. + * @param typed_config for the extension config. + */ + template static Factory* getFactoryByType(const ProtobufWkt::Any& typed_config) { + if (typed_config.type_url().empty()) { + return nullptr; + } + return Registry::FactoryRegistry::getFactoryByType(getFactoryType(typed_config)); } /** diff --git a/source/common/filter/http/BUILD b/source/common/filter/http/BUILD new file mode 100644 index 000000000000..888c2fd44b12 --- /dev/null +++ b/source/common/filter/http/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "filter_config_discovery_lib", + srcs = ["filter_config_discovery_impl.cc"], + hdrs = ["filter_config_discovery_impl.h"], + deps = [ + "//include/envoy/config:subscription_interface", + "//include/envoy/filter/http:filter_config_provider_interface", + "//include/envoy/singleton:instance_interface", + "//include/envoy/stats:stats_macros", + "//include/envoy/thread_local:thread_local_interface", + "//source/common/config:subscription_base_interface", + "//source/common/config:subscription_factory_lib", + "//source/common/config:utility_lib", + "//source/common/grpc:common_lib", + "//source/common/init:manager_lib", + "//source/common/init:target_lib", + "//source/common/init:watcher_lib", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/common/filter/http/filter_config_discovery_impl.cc b/source/common/filter/http/filter_config_discovery_impl.cc new file mode 100644 index 000000000000..eccab3fe988d --- /dev/null +++ b/source/common/filter/http/filter_config_discovery_impl.cc @@ -0,0 +1,211 @@ +#include "common/filter/http/filter_config_discovery_impl.h" + +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/server/filter_config.h" + +#include "common/config/utility.h" +#include "common/grpc/common.h" +#include "common/protobuf/utility.h" + +#include "absl/strings/str_join.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +DynamicFilterConfigProviderImpl::DynamicFilterConfigProviderImpl( + FilterConfigSubscriptionSharedPtr&& subscription, + const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context) + : subscription_(std::move(subscription)), require_type_urls_(require_type_urls), + tls_(factory_context.threadLocal().allocateSlot()), + init_target_("DynamicFilterConfigProviderImpl", [this]() { + subscription_->start(); + // This init target is used to activate the subscription but not wait + // for a response. It is used whenever a default config is provided to be + // used while waiting for a response. + init_target_.ready(); + }) { + subscription_->filter_config_providers_.insert(this); + tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(); + }); +} + +DynamicFilterConfigProviderImpl::~DynamicFilterConfigProviderImpl() { + subscription_->filter_config_providers_.erase(this); +} + +const std::string& DynamicFilterConfigProviderImpl::name() { return subscription_->name(); } + +absl::optional DynamicFilterConfigProviderImpl::config() { + return tls_->getTyped().config_; +} + +void DynamicFilterConfigProviderImpl::validateConfig( + const ProtobufWkt::Any& proto_config, Server::Configuration::NamedHttpFilterConfigFactory&) { + auto type_url = Config::Utility::getFactoryType(proto_config); + if (require_type_urls_.count(type_url) == 0) { + throw EnvoyException(fmt::format("Error: filter config has type URL {} but expect {}.", + type_url, absl::StrJoin(require_type_urls_, ", "))); + } +} + +void DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config, + const std::string&) { + tls_->runOnAllThreads([config](ThreadLocal::ThreadLocalObjectSharedPtr previous) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + auto prev_config = std::dynamic_pointer_cast(previous); + prev_config->config_ = config; + return previous; + }); +} + +FilterConfigSubscription::FilterConfigSubscription( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, FilterConfigProviderManagerImpl& filter_config_provider_manager, + const std::string& subscription_id) + : Config::SubscriptionBase( + envoy::config::core::v3::ApiVersion::V3, + factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), + filter_config_name_(filter_config_name), factory_context_(factory_context), + validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), + init_target_(fmt::format("FilterConfigSubscription init {}", filter_config_name_), + [this]() { start(); }), + scope_(factory_context.scope().createScope(stat_prefix + "extension_config_discovery." + + filter_config_name_ + ".")), + stat_prefix_(stat_prefix), + stats_({ALL_EXTENSION_CONFIG_DISCOVERY_STATS(POOL_COUNTER(*scope_))}), + filter_config_provider_manager_(filter_config_provider_manager), + subscription_id_(subscription_id) { + const auto resource_name = getResourceName(); + subscription_ = + factory_context.clusterManager().subscriptionFactory().subscriptionFromConfigSource( + config_source, Grpc::Common::typeUrl(resource_name), *scope_, *this, resource_decoder_); +} + +void FilterConfigSubscription::start() { + if (!started_) { + started_ = true; + subscription_->start({filter_config_name_}); + } +} + +void FilterConfigSubscription::onConfigUpdate( + const std::vector& resources, const std::string& version_info) { + // Make sure to make progress in case the control plane is temporarily inconsistent. + init_target_.ready(); + + if (resources.size() != 1) { + throw EnvoyException(fmt::format( + "Unexpected number of resources in ExtensionConfigDS response: {}", resources.size())); + } + const auto& filter_config = dynamic_cast( + resources[0].get().resource()); + if (filter_config.name() != filter_config_name_) { + throw EnvoyException(fmt::format("Unexpected resource name in ExtensionConfigDS response: {}", + filter_config.name())); + } + // Skip update if hash matches + const uint64_t new_hash = MessageUtil::hash(filter_config.typed_config()); + if (new_hash == last_config_hash_) { + return; + } + auto& factory = + Config::Utility::getAndCheckFactory( + filter_config); + // Ensure that the filter config is valid in the filter chain context once the proto is processed. + // Validation happens before updating to prevent a partial update application. It might be + // possible that the providers have distinct type URL constraints. + for (auto* provider : filter_config_providers_) { + provider->validateConfig(filter_config.typed_config(), factory); + } + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + filter_config.typed_config(), validator_, factory); + Envoy::Http::FilterFactoryCb factory_callback = + factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_); + ENVOY_LOG(debug, "Updating filter config {}", filter_config_name_); + for (auto* provider : filter_config_providers_) { + provider->onConfigUpdate(factory_callback, version_info); + } + stats_.config_reload_.inc(); + last_config_hash_ = new_hash; +} + +void FilterConfigSubscription::onConfigUpdate( + const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, const std::string&) { + if (!removed_resources.empty()) { + ENVOY_LOG(error, + "Server sent a delta ExtensionConfigDS update attempting to remove a resource (name: " + "{}). Ignoring.", + removed_resources[0]); + } + if (!added_resources.empty()) { + onConfigUpdate(added_resources, added_resources[0].get().version()); + } +} + +void FilterConfigSubscription::onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason, + const EnvoyException*) { + ENVOY_LOG(debug, "Updating filter config {} failed due to {}", filter_config_name_, reason); + stats_.config_fail_.inc(); + // Make sure to make progress in case the control plane is temporarily failing. + init_target_.ready(); +} + +FilterConfigSubscription::~FilterConfigSubscription() { + // If we get destroyed during initialization, make sure we signal that we "initialized". + init_target_.ready(); + // Remove the subscription from the provider manager. + filter_config_provider_manager_.subscriptions_.erase(subscription_id_); +} + +std::shared_ptr FilterConfigProviderManagerImpl::getSubscription( + const envoy::config::core::v3::ConfigSource& config_source, const std::string& name, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix) { + // FilterConfigSubscriptions are unique based on their config source and filter config name + // combination. + // TODO(https://github.com/envoyproxy/envoy/issues/11967) Hash collision can cause subscription + // aliasing. + const std::string subscription_id = absl::StrCat(MessageUtil::hash(config_source), ".", name); + auto it = subscriptions_.find(subscription_id); + if (it == subscriptions_.end()) { + auto subscription = std::make_shared( + config_source, name, factory_context, stat_prefix, *this, subscription_id); + subscriptions_.insert({subscription_id, std::weak_ptr(subscription)}); + return subscription; + } else { + auto existing = it->second.lock(); + ASSERT(existing != nullptr, + absl::StrCat("Cannot find subscribed filter config resource ", name)); + return existing; + } +} + +FilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) { + auto subscription = + getSubscription(config_source, filter_config_name, factory_context, stat_prefix); + // For warming, wait until the subscription receives the first response to indicate readiness. + // Otherwise, mark ready immediately and start the subscription on initialization. A default + // config is expected in the latter case. + if (!apply_without_warming) { + factory_context.initManager().add(subscription->initTarget()); + } + auto provider = std::make_unique( + std::move(subscription), require_type_urls, factory_context); + // Ensure the subscription starts if it has not already. + if (apply_without_warming) { + factory_context.initManager().add(provider->init_target_); + } + return provider; +} + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/http/filter_config_discovery_impl.h new file mode 100644 index 000000000000..1c2c838c5aae --- /dev/null +++ b/source/common/filter/http/filter_config_discovery_impl.h @@ -0,0 +1,184 @@ +#pragma once + +#include "envoy/config/core/v3/extension.pb.h" +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/config/subscription.h" +#include "envoy/filter/http/filter_config_provider.h" +#include "envoy/protobuf/message_validator.h" +#include "envoy/server/factory_context.h" +#include "envoy/singleton/instance.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "common/config/subscription_base.h" +#include "common/init/manager_impl.h" +#include "common/init/target_impl.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Filter { +namespace Http { + +class FilterConfigProviderManagerImpl; +class FilterConfigSubscription; + +using FilterConfigSubscriptionSharedPtr = std::shared_ptr; + +/** + * Implementation of a filter config provider using discovery subscriptions. + **/ +class DynamicFilterConfigProviderImpl : public FilterConfigProvider { +public: + DynamicFilterConfigProviderImpl(FilterConfigSubscriptionSharedPtr&& subscription, + const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context); + ~DynamicFilterConfigProviderImpl() override; + + // Config::ExtensionConfigProvider + const std::string& name() override; + absl::optional config() override; + void validateConfig(const ProtobufWkt::Any& proto_config, + Server::Configuration::NamedHttpFilterConfigFactory&) override; + void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&) override; + +private: + struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject { + ThreadLocalConfig() : config_{absl::nullopt} {} + absl::optional config_{}; + }; + + FilterConfigSubscriptionSharedPtr subscription_; + const std::set require_type_urls_; + ThreadLocal::SlotPtr tls_; + + // Local initialization target to ensure that the subscription starts in + // case no warming is requested by any other filter config provider. + Init::TargetImpl init_target_; + + friend class FilterConfigProviderManagerImpl; +}; + +/** + * All extension config discovery stats. @see stats_macros.h + */ +#define ALL_EXTENSION_CONFIG_DISCOVERY_STATS(COUNTER) \ + COUNTER(config_reload) \ + COUNTER(config_fail) + +/** + * Struct definition for all extension config discovery stats. @see stats_macros.h + */ +struct ExtensionConfigDiscoveryStats { + ALL_EXTENSION_CONFIG_DISCOVERY_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * A class that fetches the filter configuration dynamically using the filter config discovery API. + * Subscriptions are shared between the filter config providers. The filter config providers are + * notified when a new config is accepted. + */ +class FilterConfigSubscription + : Config::SubscriptionBase, + Logger::Loggable { +public: + FilterConfigSubscription(const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, + FilterConfigProviderManagerImpl& filter_config_provider_manager, + const std::string& subscription_id); + + ~FilterConfigSubscription() override; + + const Init::SharedTargetImpl& initTarget() { return init_target_; } + const std::string& name() { return filter_config_name_; } + +private: + void start(); + + // Config::SubscriptionCallbacks + void onConfigUpdate(const std::vector& resources, + const std::string& version_info) override; + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string&) override; + void onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason, + const EnvoyException*) override; + + std::unique_ptr subscription_; + const std::string filter_config_name_; + uint64_t last_config_hash_{0ul}; + Server::Configuration::FactoryContext& factory_context_; + ProtobufMessage::ValidationVisitor& validator_; + + Init::SharedTargetImpl init_target_; + bool started_{false}; + + Stats::ScopePtr scope_; + const std::string stat_prefix_; + ExtensionConfigDiscoveryStats stats_; + + // FilterConfigProviderManagerImpl maintains active subscriptions in a map. + FilterConfigProviderManagerImpl& filter_config_provider_manager_; + const std::string subscription_id_; + absl::flat_hash_set filter_config_providers_; + friend class DynamicFilterConfigProviderImpl; +}; + +/** + * Provider implementation of a static filter config. + **/ +class StaticFilterConfigProviderImpl : public FilterConfigProvider { +public: + StaticFilterConfigProviderImpl(const Envoy::Http::FilterFactoryCb& config, + const std::string filter_config_name) + : config_(config), filter_config_name_(filter_config_name) {} + + // Config::ExtensionConfigProvider + const std::string& name() override { return filter_config_name_; } + absl::optional config() override { return config_; } + void validateConfig(const ProtobufWkt::Any&, + Server::Configuration::NamedHttpFilterConfigFactory&) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } + void onConfigUpdate(Envoy::Http::FilterFactoryCb, const std::string&) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } + +private: + Envoy::Http::FilterFactoryCb config_; + const std::string filter_config_name_; +}; + +/** + * An implementation of FilterConfigProviderManager. + */ +class FilterConfigProviderManagerImpl : public FilterConfigProviderManager, + public Singleton::Instance { +public: + FilterConfigProviderPtr createDynamicFilterConfigProvider( + const envoy::config::core::v3::ConfigSource& config_source, + const std::string& filter_config_name, const std::set& require_type_urls, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool apply_without_warming) override; + + FilterConfigProviderPtr + createStaticFilterConfigProvider(const Envoy::Http::FilterFactoryCb& config, + const std::string& filter_config_name) override { + return std::make_unique(config, filter_config_name); + } + +private: + std::shared_ptr + getSubscription(const envoy::config::core::v3::ConfigSource& config_source, + const std::string& name, Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix); + absl::flat_hash_map> subscriptions_; + friend class FilterConfigSubscription; +}; + +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/source/common/stream_info/utility.cc b/source/common/stream_info/utility.cc index 9a5a690b682f..2f7049545bd3 100644 --- a/source/common/stream_info/utility.cc +++ b/source/common/stream_info/utility.cc @@ -27,6 +27,7 @@ const std::string ResponseFlagUtils::INVALID_ENVOY_REQUEST_HEADERS = "IH"; const std::string ResponseFlagUtils::DOWNSTREAM_PROTOCOL_ERROR = "DPE"; const std::string ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED = "UMSDR"; const std::string ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER = "RFCF"; +const std::string ResponseFlagUtils::NO_FILTER_CONFIG_FOUND = "NFCF"; void ResponseFlagUtils::appendString(std::string& result, const std::string& append) { if (result.empty()) { @@ -39,7 +40,7 @@ void ResponseFlagUtils::appendString(std::string& result, const std::string& app const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info) { std::string result; - static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(ResponseFlag::FailedLocalHealthCheck)) { appendString(result, FAILED_LOCAL_HEALTH_CHECK); @@ -124,6 +125,10 @@ const std::string ResponseFlagUtils::toShortString(const StreamInfo& stream_info appendString(result, RESPONSE_FROM_CACHE_FILTER); } + if (stream_info.hasResponseFlag(ResponseFlag::NoFilterConfigFound)) { + appendString(result, NO_FILTER_CONFIG_FOUND); + } + return result.empty() ? NONE : result; } @@ -153,6 +158,7 @@ absl::optional ResponseFlagUtils::toResponseFlag(const std::string {ResponseFlagUtils::UPSTREAM_MAX_STREAM_DURATION_REACHED, ResponseFlag::UpstreamMaxStreamDurationReached}, {ResponseFlagUtils::RESPONSE_FROM_CACHE_FILTER, ResponseFlag::ResponseFromCacheFilter}, + {ResponseFlagUtils::NO_FILTER_CONFIG_FOUND, ResponseFlag::NoFilterConfigFound}, }; const auto& it = map.find(flag); if (it != map.end()) { diff --git a/source/common/stream_info/utility.h b/source/common/stream_info/utility.h index 2c7b73d751fb..9b4ac08e413c 100644 --- a/source/common/stream_info/utility.h +++ b/source/common/stream_info/utility.h @@ -42,6 +42,7 @@ class ResponseFlagUtils { const static std::string DOWNSTREAM_PROTOCOL_ERROR; const static std::string UPSTREAM_MAX_STREAM_DURATION_REACHED; const static std::string RESPONSE_FROM_CACHE_FILTER; + const static std::string NO_FILTER_CONFIG_FOUND; }; /** diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 0977540b4102..74b061cbad7c 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -37,7 +37,7 @@ void Utility::responseFlagsToAccessLogResponseFlags( envoy::data::accesslog::v3::AccessLogCommon& common_access_log, const StreamInfo::StreamInfo& stream_info) { - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x100000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::FailedLocalHealthCheck)) { @@ -122,6 +122,9 @@ void Utility::responseFlagsToAccessLogResponseFlags( if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter)) { common_access_log.mutable_response_flags()->set_response_from_cache_filter(true); } + if (stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)) { + common_access_log.mutable_response_flags()->set_no_filter_config_found(true); + } } void Utility::extractCommonAccessLogProperties( diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 7ab7817d80fd..5d03f03ecc4a 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -31,6 +31,7 @@ envoy_cc_extension( "//source/common/access_log:access_log_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", + "//source/common/filter/http:filter_config_discovery_lib", "//source/common/http:conn_manager_lib", "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", @@ -48,6 +49,7 @@ envoy_cc_extension( "//source/common/tracing:http_tracer_config_lib", "//source/common/tracing:http_tracer_lib", "//source/common/tracing:http_tracer_manager_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 888709fe4251..ce274734447a 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -9,6 +9,7 @@ #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" #include "envoy/filesystem/filesystem.h" +#include "envoy/registry/registry.h" #include "envoy/server/admin.h" #include "envoy/tracing/http_tracer.h" #include "envoy/type/tracing/v3/custom_tag.pb.h" @@ -17,6 +18,7 @@ #include "common/access_log/access_log_impl.h" #include "common/common/fmt.h" #include "common/config/utility.h" +#include "common/filter/http/filter_config_discovery_impl.h" #include "common/http/conn_manager_utility.h" #include "common/http/default_server_string.h" #include "common/http/http1/codec_impl.h" @@ -35,6 +37,8 @@ #include "common/tracing/http_tracer_config_impl.h" #include "common/tracing/http_tracer_manager_impl.h" +#include "extensions/filters/http/common/pass_through_filter.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -75,6 +79,16 @@ std::unique_ptr createInternalAddressConfig( return std::make_unique(); } +class MissingConfigFilter : public Http::PassThroughDecoderFilter { +public: + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound); + decoder_callbacks_->sendLocalReply(Http::Code::InternalServerError, EMPTY_STRING, nullptr, + absl::nullopt, EMPTY_STRING); + return Http::FilterHeadersStatus::StopIteration; + } +}; + } // namespace // Singleton registration via macro defined in envoy/singleton/manager.h @@ -82,6 +96,7 @@ SINGLETON_MANAGER_REGISTRATION(date_provider); SINGLETON_MANAGER_REGISTRATION(route_config_provider_manager); SINGLETON_MANAGER_REGISTRATION(scoped_routes_config_provider_manager); SINGLETON_MANAGER_REGISTRATION(http_tracer_manager); +SINGLETON_MANAGER_REGISTRATION(filter_config_provider_manager); Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryContext& context) { std::shared_ptr date_provider = @@ -112,8 +127,13 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont context.getServerFactoryContext(), context.messageValidationVisitor())); }); + std::shared_ptr filter_config_provider_manager = + context.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(filter_config_provider_manager), + [] { return std::make_shared(); }); + return {date_provider, route_config_provider_manager, scoped_routes_config_provider_manager, - http_tracer_manager}; + http_tracer_manager, filter_config_provider_manager}; } std::shared_ptr Utility::createConfig( @@ -122,10 +142,11 @@ std::shared_ptr Utility::createConfig( Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager) { + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) { return std::make_shared( proto_config, context, date_provider, route_config_provider_manager, - scoped_routes_config_provider_manager, http_tracer_manager); + scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager); } Network::FilterFactoryCb @@ -137,7 +158,8 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoTyped( auto filter_config = Utility::createConfig( proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_, - *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_); + *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_, + *singletons.filter_config_provider_manager_); // This lambda captures the shared_ptrs created above, thus preserving the // reference count. @@ -169,7 +191,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager) + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) : context_(context), stats_prefix_(fmt::format("http.{}.", config.stat_prefix())), stats_(Http::ConnectionManagerImpl::generateStats(stats_prefix_, context_.scope())), tracing_stats_( @@ -180,6 +203,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( skip_xff_append_(config.skip_xff_append()), via_(config.via()), route_config_provider_manager_(route_config_provider_manager), scoped_routes_config_provider_manager_(scoped_routes_config_provider_manager), + filter_config_provider_manager_(filter_config_provider_manager), http2_options_(Http2::Utility::initializeAndValidateOptions( config.http2_protocol_options(), config.has_stream_error_on_invalid_http_message(), config.stream_error_on_invalid_http_message())), @@ -451,17 +475,16 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( void HttpConnectionManagerConfig::processFilter( const envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter& proto_config, - int i, absl::string_view prefix, std::list& filter_factories, + int i, absl::string_view prefix, FilterFactoriesList& filter_factories, const char* filter_chain_type, bool last_filter_in_current_config) { ENVOY_LOG(debug, " {} filter #{}", prefix, i); - ENVOY_LOG(debug, " name: {}", proto_config.name()); - ENVOY_LOG(debug, " config: {}", - MessageUtil::getJsonStringFromMessage( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()), - true)); + if (proto_config.config_type_case() == + envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter::ConfigTypeCase:: + kConfigDiscovery) { + processDynamicFilterConfig(proto_config.name(), proto_config.config_discovery(), + filter_factories, filter_chain_type, last_filter_in_current_config); + return; + } // Now see if there is a factory that will accept the config. auto& factory = @@ -474,7 +497,63 @@ void HttpConnectionManagerConfig::processFilter( bool is_terminal = factory.isTerminalFilter(); Config::Utility::validateTerminalFilters(proto_config.name(), factory.name(), filter_chain_type, is_terminal, last_filter_in_current_config); - filter_factories.push_back(callback); + auto filter_config_provider = filter_config_provider_manager_.createStaticFilterConfigProvider( + callback, proto_config.name()); + ENVOY_LOG(debug, " name: {}", filter_config_provider->name()); + ENVOY_LOG(debug, " config: {}", + MessageUtil::getJsonStringFromMessage( + proto_config.has_typed_config() + ? static_cast(proto_config.typed_config()) + : static_cast( + proto_config.hidden_envoy_deprecated_config()), + true)); + filter_factories.push_back(std::move(filter_config_provider)); +} + +void HttpConnectionManagerConfig::processDynamicFilterConfig( + const std::string& name, const envoy::config::core::v3::ExtensionConfigSource& config_discovery, + FilterFactoriesList& filter_factories, const char* filter_chain_type, + bool last_filter_in_current_config) { + ENVOY_LOG(debug, " dynamic filter name: {}", name); + if (config_discovery.apply_default_config_without_warming() && + !config_discovery.has_default_config()) { + throw EnvoyException(fmt::format( + "Error: filter config {} applied without warming but has no default config.", name)); + } + std::set require_type_urls; + for (const auto& type_url : config_discovery.type_urls()) { + auto factory_type_url = TypeUtil::typeUrlToDescriptorFullName(type_url); + require_type_urls.emplace(factory_type_url); + auto* factory = Registry::FactoryRegistry< + Server::Configuration::NamedHttpFilterConfigFactory>::getFactoryByType(factory_type_url); + if (factory == nullptr) { + throw EnvoyException( + fmt::format("Error: no factory found for a required type URL {}.", factory_type_url)); + } + Config::Utility::validateTerminalFilters(name, factory->name(), filter_chain_type, + factory->isTerminalFilter(), + last_filter_in_current_config); + } + auto filter_config_provider = filter_config_provider_manager_.createDynamicFilterConfigProvider( + config_discovery.config_source(), name, require_type_urls, context_, stats_prefix_, + config_discovery.apply_default_config_without_warming()); + if (config_discovery.has_default_config()) { + auto* default_factory = + Config::Utility::getFactoryByType( + config_discovery.default_config()); + if (default_factory == nullptr) { + throw EnvoyException(fmt::format("Error: cannot find filter factory {} for default filter " + "configuration with type URL {}.", + name, config_discovery.default_config().type_url())); + } + filter_config_provider->validateConfig(config_discovery.default_config(), *default_factory); + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + config_discovery.default_config(), context_.messageValidationVisitor(), *default_factory); + Http::FilterFactoryCb default_config = + default_factory->createFilterFactoryFromProto(*message, stats_prefix_, context_); + filter_config_provider->onConfigUpdate(default_config, ""); + } + filter_factories.push_back(std::move(filter_config_provider)); } Http::ServerConnectionPtr @@ -528,12 +607,32 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, NOT_REACHED_GCOVR_EXCL_LINE; } -void HttpConnectionManagerConfig::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) { - for (const Http::FilterFactoryCb& factory : filter_factories_) { - factory(callbacks); +void HttpConnectionManagerConfig::createFilterChainForFactories( + Http::FilterChainFactoryCallbacks& callbacks, const FilterFactoriesList& filter_factories) { + bool added_missing_config_filter = false; + for (const auto& filter_config_provider : filter_factories) { + auto config = filter_config_provider->config(); + if (config.has_value()) { + config.value()(callbacks); + continue; + } + + // If a filter config is missing after warming, inject a local reply with status 500. + if (!added_missing_config_filter) { + ENVOY_LOG(trace, "Missing filter config for a provider {}", filter_config_provider->name()); + callbacks.addStreamDecoderFilter( + Http::StreamDecoderFilterSharedPtr{std::make_shared()}); + added_missing_config_filter = true; + } else { + ENVOY_LOG(trace, "Provider {} missing a filter config", filter_config_provider->name()); + } } } +void HttpConnectionManagerConfig::createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) { + createFilterChainForFactories(callbacks, filter_factories_); +} + bool HttpConnectionManagerConfig::createUpgradeFilterChain( absl::string_view upgrade_type, const Http::FilterChainFactory::UpgradeMap* per_route_upgrade_map, @@ -562,9 +661,7 @@ bool HttpConnectionManagerConfig::createUpgradeFilterChain( filters_to_use = it->second.filter_factories.get(); } - for (const Http::FilterFactoryCb& factory : *filters_to_use) { - factory(callbacks); - } + createFilterChainForFactories(callbacks, *filters_to_use); return true; } @@ -602,7 +699,8 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( auto filter_config = Utility::createConfig( proto_config, context, *singletons.date_provider_, *singletons.route_config_provider_manager_, - *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_); + *singletons.scoped_routes_config_provider_manager_, *singletons.http_tracer_manager_, + *singletons.filter_config_provider_manager_); // This lambda captures the shared_ptrs created above, thus preserving the // reference count. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index d2fef63dedb1..47cc707bdb89 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -8,8 +8,10 @@ #include #include "envoy/config/config_provider_manager.h" +#include "envoy/config/core/v3/extension.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/filter/http/filter_config_provider.h" #include "envoy/http/filter.h" #include "envoy/http/request_id_extension.h" #include "envoy/router/route_config_provider_manager.h" @@ -88,11 +90,12 @@ class HttpConnectionManagerConfig : Logger::Loggable, Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager); + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); // Http::FilterChainFactory void createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) override; - using FilterFactoriesList = std::list; + using FilterFactoriesList = std::list; struct FilterConfig { std::unique_ptr filter_factories; bool allow_upgrade; @@ -178,6 +181,13 @@ class HttpConnectionManagerConfig : Logger::Loggable, proto_config, int i, absl::string_view prefix, FilterFactoriesList& filter_factories, const char* filter_chain_type, bool last_filter_in_current_config); + void + processDynamicFilterConfig(const std::string& name, + const envoy::config::core::v3::ExtensionConfigSource& config_discovery, + FilterFactoriesList& filter_factories, const char* filter_chain_type, + bool last_filter_in_current_config); + void createFilterChainForFactories(Http::FilterChainFactoryCallbacks& callbacks, + const FilterFactoriesList& filter_factories); /** * Determines what tracing provider to use for a given @@ -206,6 +216,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::vector set_current_client_cert_details_; Router::RouteConfigProviderManager& route_config_provider_manager_; Config::ConfigProviderManager& scoped_routes_config_provider_manager_; + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager_; CodecType codec_type_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; const Http::Http1Settings http1_settings_; @@ -267,6 +278,7 @@ class Utility { Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager_; Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager_; Tracing::HttpTracerManagerSharedPtr http_tracer_manager_; + std::shared_ptr filter_config_provider_manager_; }; /** @@ -293,7 +305,8 @@ class Utility { Server::Configuration::FactoryContext& context, Http::DateProvider& date_provider, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, - Tracing::HttpTracerManager& http_tracer_manager); + Tracing::HttpTracerManager& http_tracer_manager, + Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); }; } // namespace HttpConnectionManager diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 0bcac1bd72e5..09abacf4dc69 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -948,12 +948,13 @@ name: accesslog - DPE - UMSDR - RFCF + - NFCF typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /dev/null )EOF"; - static_assert(StreamInfo::ResponseFlag::LastFlag == 0x100000, + static_assert(StreamInfo::ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); const std::vector all_response_flags = { @@ -977,7 +978,8 @@ name: accesslog StreamInfo::ResponseFlag::InvalidEnvoyRequestHeaders, StreamInfo::ResponseFlag::DownstreamProtocolError, StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached, - StreamInfo::ResponseFlag::ResponseFromCacheFilter}; + StreamInfo::ResponseFlag::ResponseFromCacheFilter, + StreamInfo::ResponseFlag::NoFilterConfigFound}; InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); @@ -1009,7 +1011,8 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"accesslog\"\nfilter {\n " " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " @@ -1036,7 +1039,8 @@ name: accesslog "[\"embedded message failed validation\"] | caused by " "ResponseFlagFilterValidationError.Flags[i]: [\"value must be in list \" [\"LH\" \"UH\" " "\"UT\" \"LR\" \"UR\" \"UF\" \"UC\" \"UO\" \"NR\" \"DI\" \"FI\" \"RL\" \"UAEX\" \"RLSE\" " - "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\"]]): name: \"accesslog\"\nfilter {\n " + "\"DC\" \"URX\" \"SI\" \"IH\" \"DPE\" \"UMSDR\" \"RFCF\" \"NFCF\"]]): name: " + "\"accesslog\"\nfilter {\n " " " "response_flag_filter {\n flags: \"UnsupportedFlag\"\n }\n}\ntyped_config {\n " "[type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog] {\n path: \"/dev/null\"\n " diff --git a/test/common/filter/http/BUILD b/test/common/filter/http/BUILD new file mode 100644 index 000000000000..c6ce0344543c --- /dev/null +++ b/test/common/filter/http/BUILD @@ -0,0 +1,30 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "filter_config_discovery_impl_test", + srcs = ["filter_config_discovery_impl_test.cc"], + deps = [ + "//source/common/config:utility_lib", + "//source/common/filter/http:filter_config_discovery_lib", + "//source/common/json:json_loader_lib", + "//source/extensions/filters/http/health_check:config", + "//source/extensions/filters/http/router:config", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) diff --git a/test/common/filter/http/filter_config_discovery_impl_test.cc b/test/common/filter/http/filter_config_discovery_impl_test.cc new file mode 100644 index 000000000000..bd25e662a593 --- /dev/null +++ b/test/common/filter/http/filter_config_discovery_impl_test.cc @@ -0,0 +1,297 @@ +#include +#include +#include + +#include "envoy/config/core/v3/config_source.pb.h" +#include "envoy/config/core/v3/extension.pb.h" +#include "envoy/config/core/v3/extension.pb.validate.h" +#include "envoy/service/discovery/v3/discovery.pb.h" +#include "envoy/stats/scope.h" + +#include "common/config/utility.h" +#include "common/filter/http/filter_config_discovery_impl.h" +#include "common/json/json_loader.h" + +#include "test/mocks/init/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::InSequence; +using testing::Invoke; +using testing::ReturnRef; + +namespace Envoy { +namespace Filter { +namespace Http { +namespace { + +class FilterConfigDiscoveryTestBase : public testing::Test { +public: + FilterConfigDiscoveryTestBase() { + // For server_factory_context + ON_CALL(factory_context_, scope()).WillByDefault(ReturnRef(scope_)); + ON_CALL(factory_context_, messageValidationContext()) + .WillByDefault(ReturnRef(validation_context_)); + EXPECT_CALL(validation_context_, dynamicValidationVisitor()) + .WillRepeatedly(ReturnRef(validation_visitor_)); + EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager_)); + ON_CALL(init_manager_, add(_)).WillByDefault(Invoke([this](const Init::Target& target) { + init_target_handle_ = target.createHandle("test"); + })); + ON_CALL(init_manager_, initialize(_)) + .WillByDefault(Invoke( + [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); })); + } + + Event::SimulatedTimeSystem& timeSystem() { return time_system_; } + + Event::SimulatedTimeSystem time_system_; + NiceMock validation_context_; + NiceMock validation_visitor_; + NiceMock init_manager_; + NiceMock factory_context_; + Init::ExpectableWatcherImpl init_watcher_; + Init::TargetHandlePtr init_target_handle_; + NiceMock scope_; +}; + +// Test base class with a single provider. +class FilterConfigDiscoveryImplTest : public FilterConfigDiscoveryTestBase { +public: + FilterConfigDiscoveryImplTest() { + filter_config_provider_manager_ = std::make_unique(); + } + ~FilterConfigDiscoveryImplTest() override { factory_context_.thread_local_.shutdownThread(); } + + FilterConfigProviderPtr createProvider(std::string name, bool warm) { + EXPECT_CALL(init_manager_, add(_)); + envoy::config::core::v3::ConfigSource config_source; + TestUtility::loadFromYaml("ads: {}", config_source); + return filter_config_provider_manager_->createDynamicFilterConfigProvider( + config_source, name, {"envoy.extensions.filters.http.router.v3.Router"}, factory_context_, + "xds.", !warm); + } + + void setup(bool warm = true) { + provider_ = createProvider("foo", warm); + callbacks_ = factory_context_.cluster_manager_.subscription_factory_.callbacks_; + EXPECT_CALL(*factory_context_.cluster_manager_.subscription_factory_.subscription_, start(_)); + if (!warm) { + EXPECT_CALL(init_watcher_, ready()); + } + init_manager_.initialize(init_watcher_); + } + + std::unique_ptr filter_config_provider_manager_; + FilterConfigProviderPtr provider_; + Config::SubscriptionCallbacks* callbacks_{}; +}; + +TEST_F(FilterConfigDiscoveryImplTest, DestroyReady) { + setup(); + EXPECT_CALL(init_watcher_, ready()); +} + +TEST_F(FilterConfigDiscoveryImplTest, Basic) { + InSequence s; + setup(); + EXPECT_EQ("foo", provider_->name()); + EXPECT_EQ(absl::nullopt, provider_->config()); + + // Initial request. + { + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); + } + + // 2nd request with same response. Based on hash should not reload config. + { + const std::string response_yaml = R"EOF( + version_info: "2" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); + } +} + +TEST_F(FilterConfigDiscoveryImplTest, ConfigFailed) { + InSequence s; + setup(); + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdateFailed(Config::ConfigUpdateFailureReason::FetchTimedout, {}); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, TooManyResources) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, "Unexpected number of resources in ExtensionConfigDS response: 2"); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, WrongName) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: bar + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, "Unexpected resource name in ExtensionConfigDS response: bar"); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, Incremental) { + InSequence s; + setup(); + const std::string response_yaml = R"EOF( +version_info: "1" +resources: +- "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +)EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + Protobuf::RepeatedPtrField remove; + *remove.Add() = "bar"; + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, remove, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, ApplyWithoutWarming) { + InSequence s; + setup(false); + EXPECT_EQ("foo", provider_->name()); + EXPECT_EQ(absl::nullopt, provider_->config()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_fail").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, DualProviders) { + InSequence s; + setup(); + auto provider2 = createProvider("foo", true); + EXPECT_EQ("foo", provider2->name()); + EXPECT_EQ(absl::nullopt, provider2->config()); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()); + EXPECT_NE(absl::nullopt, provider_->config()); + EXPECT_NE(absl::nullopt, provider2->config()); + EXPECT_EQ(1UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { + InSequence s; + setup(); + auto provider2 = createProvider("foo", true); + const std::string response_yaml = R"EOF( + version_info: "1" + resources: + - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig + name: foo + typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: false + )EOF"; + const auto response = + TestUtility::parseYaml(response_yaml); + const auto decoded_resources = + TestUtility::decodeResources(response); + EXPECT_CALL(init_watcher_, ready()); + EXPECT_THROW_WITH_MESSAGE( + callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), + EnvoyException, + "Error: filter config has type URL envoy.config.filter.http.health_check.v2.HealthCheck but " + "expect envoy.extensions.filters.http.router.v3.Router."); + EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); +} + +} // namespace +} // namespace Http +} // namespace Filter +} // namespace Envoy diff --git a/test/common/stream_info/utility_test.cc b/test/common/stream_info/utility_test.cc index 6492488efa98..f74faa902220 100644 --- a/test/common/stream_info/utility_test.cc +++ b/test/common/stream_info/utility_test.cc @@ -15,7 +15,7 @@ namespace StreamInfo { namespace { TEST(ResponseFlagUtilsTest, toShortStringConversion) { - static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair(ResponseFlag::FailedLocalHealthCheck, "LH"), @@ -38,7 +38,8 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { std::make_pair(ResponseFlag::InvalidEnvoyRequestHeaders, "IH"), std::make_pair(ResponseFlag::DownstreamProtocolError, "DPE"), std::make_pair(ResponseFlag::UpstreamMaxStreamDurationReached, "UMSDR"), - std::make_pair(ResponseFlag::ResponseFromCacheFilter, "RFCF")}; + std::make_pair(ResponseFlag::ResponseFromCacheFilter, "RFCF"), + std::make_pair(ResponseFlag::NoFilterConfigFound, "NFCF")}; for (const auto& test_case : expected) { NiceMock stream_info; @@ -66,7 +67,7 @@ TEST(ResponseFlagUtilsTest, toShortStringConversion) { } TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { - static_assert(ResponseFlag::LastFlag == 0x100000, "A flag has been added. Fix this code."); + static_assert(ResponseFlag::LastFlag == 0x200000, "A flag has been added. Fix this code."); std::vector> expected = { std::make_pair("LH", ResponseFlag::FailedLocalHealthCheck), @@ -89,7 +90,8 @@ TEST(ResponseFlagsUtilsTest, toResponseFlagConversion) { std::make_pair("IH", ResponseFlag::InvalidEnvoyRequestHeaders), std::make_pair("DPE", ResponseFlag::DownstreamProtocolError), std::make_pair("UMSDR", ResponseFlag::UpstreamMaxStreamDurationReached), - std::make_pair("RFCF", ResponseFlag::ResponseFromCacheFilter)}; + std::make_pair("RFCF", ResponseFlag::ResponseFromCacheFilter), + std::make_pair("NFCF", ResponseFlag::NoFilterConfigFound)}; EXPECT_FALSE(ResponseFlagUtils::toResponseFlag("NonExistentFlag").has_value()); diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc index b824aeb2a3ad..5e3a4460e6bf 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_utils_test.cc @@ -43,6 +43,7 @@ TEST(UtilityResponseFlagsToAccessLogResponseFlagsTest, All) { common_access_log_expected.mutable_response_flags()->set_upstream_max_stream_duration_reached( true); common_access_log_expected.mutable_response_flags()->set_response_from_cache_filter(true); + common_access_log_expected.mutable_response_flags()->set_no_filter_config_found(true); EXPECT_EQ(common_access_log_expected.DebugString(), common_access_log.DebugString()); } diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index 4f264b13a029..15a050c21711 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -25,6 +25,7 @@ envoy_extension_cc_test( ":config_cc_proto", "//source/common/buffer:buffer_lib", "//source/common/event:dispatcher_lib", + "//source/common/filter/http:filter_config_discovery_lib", "//source/extensions/access_loggers/file:config", "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/http/router:config", diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 5e7648ba1ce4..170246b40eb9 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -8,6 +8,7 @@ #include "envoy/type/v3/percent.pb.h" #include "common/buffer/buffer_impl.h" +#include "common/filter/http/filter_config_discovery_impl.h" #include "common/http/date_provider_impl.h" #include "common/http/request_id_extension_uuid_impl.h" @@ -54,12 +55,14 @@ class HttpConnectionManagerConfigTest : public testing::Test { NiceMock route_config_provider_manager_; NiceMock scoped_routes_config_provider_manager_; NiceMock http_tracer_manager_; + Filter::Http::FilterConfigProviderManagerImpl filter_config_provider_manager_; std::shared_ptr> http_tracer_{ std::make_shared>()}; void createHttpConnectionManagerConfig(const std::string& yaml) { HttpConnectionManagerConfig(parseHttpConnectionManagerFromYaml(yaml), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); } }; @@ -176,7 +179,8 @@ stat_prefix: router HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(128, config.tracingConfig()->max_path_tag_length_); EXPECT_EQ(*context_.local_info_.address_, config.localAddress()); @@ -211,7 +215,8 @@ stat_prefix: router HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // By default, tracer must be a null object (Tracing::HttpNullTracer) rather than nullptr. EXPECT_THAT(config.tracer().get(), WhenDynamicCastTo(NotNull())); @@ -249,7 +254,8 @@ stat_prefix: router HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Even though tracer provider is configured in the bootstrap config, a given filter instance // should not have a tracer associated with it. @@ -284,7 +290,8 @@ tracing: {} # notice that tracing is enabled HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -324,7 +331,8 @@ tracing: {} # notice that tracing is enabled HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -383,7 +391,8 @@ stat_prefix: router HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); // Actual HttpTracer must be obtained from the HttpTracerManager. EXPECT_THAT(config.tracer(), Eq(http_tracer_)); @@ -414,7 +423,8 @@ stat_prefix: router )EOF"; HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); std::vector custom_tags{"ltag", "etag", "rtag", "mtag"}; const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; @@ -434,7 +444,8 @@ stat_prefix: router )EOF"; HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; const Tracing::RequestHeaderCustomTag* foo = dynamic_cast( @@ -466,7 +477,8 @@ stat_prefix: router ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_); } @@ -492,7 +504,8 @@ stat_prefix: router ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_); } @@ -511,7 +524,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(100, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(Tracing::DefaultMaxPathTagLength, config.tracingConfig()->max_path_tag_length_); @@ -546,7 +560,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, @@ -580,7 +595,8 @@ TEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.tracingConfig()->client_sampling_.numerator()); EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, @@ -606,7 +622,8 @@ TEST_F(HttpConnectionManagerConfigTest, UnixSocketInternalAddress) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); Network::Address::PipeInstance unixAddress{"/foo"}; Network::Address::Ipv4Instance internalIpAddress{"127.0.0.1", 0}; Network::Address::Ipv4Instance externalIpAddress{"12.0.0.1", 0}; @@ -626,7 +643,8 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(60, config.maxRequestHeadersKb()); } @@ -642,7 +660,8 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbConfigured) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(16, config.maxRequestHeadersKb()); } @@ -658,7 +677,8 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbMaxConfigurable) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(96, config.maxRequestHeadersKb()); } @@ -675,7 +695,8 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.streamIdleTimeout().count()); } @@ -692,7 +713,8 @@ TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); } @@ -710,7 +732,8 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeout) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(1000, config.idleTimeout().value().count()); } @@ -726,7 +749,8 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(std::chrono::hours(1), config.idleTimeout().value()); } @@ -744,7 +768,8 @@ TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeoutOff) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.idleTimeout().has_value()); } @@ -760,7 +785,8 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultMaxRequestHeaderCount) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(100, config.maxRequestHeadersCount()); } @@ -778,7 +804,8 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeaderCountConfigurable) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(200, config.maxRequestHeadersCount()); } @@ -797,7 +824,8 @@ TEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) { &Runtime::MockSnapshot::featureEnabledDefault)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::OVERWRITE, config.serverHeaderTransformation()); } @@ -817,7 +845,8 @@ TEST_F(HttpConnectionManagerConfigTest, ServerAppendIfAbsent) { &Runtime::MockSnapshot::featureEnabledDefault)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::APPEND_IF_ABSENT, config.serverHeaderTransformation()); } @@ -837,7 +866,8 @@ TEST_F(HttpConnectionManagerConfigTest, ServerPassThrough) { &Runtime::MockSnapshot::featureEnabledDefault)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(HttpConnectionManagerConfig::HttpConnectionManagerProto::PASS_THROUGH, config.serverHeaderTransformation()); } @@ -858,7 +888,8 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathDefault) { &Runtime::MockSnapshot::featureEnabledDefault)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); #ifdef ENVOY_NORMALIZE_PATH_BY_DEFAULT EXPECT_TRUE(config.shouldNormalizePath()); #else @@ -881,7 +912,8 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathRuntime) { .WillOnce(Return(true)); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldNormalizePath()); } @@ -901,7 +933,8 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathTrue) { .Times(0); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldNormalizePath()); } @@ -921,7 +954,8 @@ TEST_F(HttpConnectionManagerConfigTest, NormalizePathFalse) { .Times(0); HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldNormalizePath()); } @@ -937,7 +971,8 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); } @@ -954,7 +989,8 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesTrue) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldMergeSlashes()); } @@ -971,7 +1007,8 @@ TEST_F(HttpConnectionManagerConfigTest, MergeSlashesFalse) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldMergeSlashes()); } @@ -987,7 +1024,8 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldStripMatchingPort()); } @@ -1004,7 +1042,8 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortTrue) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.shouldStripMatchingPort()); } @@ -1021,7 +1060,8 @@ TEST_F(HttpConnectionManagerConfigTest, RemovePortFalse) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.shouldStripMatchingPort()); } @@ -1037,7 +1077,8 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresAllowedByDefault) HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::ALLOW, config.headersWithUnderscoresAction()); } @@ -1056,7 +1097,8 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresDroppedByConfig) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::DROP_HEADER, config.headersWithUnderscoresAction()); } @@ -1075,7 +1117,8 @@ TEST_F(HttpConnectionManagerConfigTest, HeadersWithUnderscoresRequestRejectedByC HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(envoy::config::core::v3::HttpProtocolOptions::REJECT_REQUEST, config.headersWithUnderscoresAction()); } @@ -1092,7 +1135,8 @@ TEST_F(HttpConnectionManagerConfigTest, ConfiguredRequestTimeout) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(53 * 1000, config.requestTimeout().count()); } @@ -1108,7 +1152,8 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledRequestTimeout) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.requestTimeout().count()); } @@ -1123,7 +1168,8 @@ TEST_F(HttpConnectionManagerConfigTest, UnconfiguredRequestTimeout) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_EQ(0, config.requestTimeout().count()); } @@ -1497,7 +1543,8 @@ TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseDefault) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_FALSE(config.alwaysSetRequestIdInResponse()); } @@ -1513,7 +1560,8 @@ TEST_F(HttpConnectionManagerConfigTest, AlwaysSetRequestIdInResponseConfigured) HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); EXPECT_TRUE(config.alwaysSetRequestIdInResponse()); } @@ -1578,7 +1626,8 @@ TEST_F(HttpConnectionManagerConfigTest, CustomRequestIDExtension) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); auto request_id_extension = dynamic_cast(config.requestIDExtension().get()); ASSERT_NE(nullptr, request_id_extension); @@ -1613,7 +1662,8 @@ TEST_F(HttpConnectionManagerConfigTest, DefaultRequestIDExtension) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); auto request_id_extension = dynamic_cast(config.requestIDExtension().get()); ASSERT_NE(nullptr, request_id_extension); @@ -1679,6 +1729,213 @@ stat_prefix: router http_connection_manager_factory(); } +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterWarmingNoDefault) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + apply_default_config_without_warming: true + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: filter config foo applied without warming but has no default config."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterBadDefault) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/google.protobuf.Value + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: cannot find filter factory foo for default filter configuration with type URL " + "type.googleapis.com/google.protobuf.Value."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultNotTerminal) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: non-terminal filter named foo of type envoy.filters.http.health_check is the last " + "filter in a http filter chain."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultTerminal) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + type_urls: + - type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +- name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE(createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: terminal filter named foo of type envoy.filters.http.router " + "must be the last filter in a http filter chain."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultRequireTypeUrl) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: filter config has type URL envoy.extensions.filters.http.router.v3.Router but " + "expect envoy.config.filter.http.health_check.v2.HealthCheck."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterRequireTypeUrlMissingFactory) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/google.protobuf.Value + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Error: no factory found for a required type URL google.protobuf.Value."); +} + +TEST_F(HttpConnectionManagerConfigTest, DynamicFilterDefaultValid) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + default_config: + "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + pass_through_mode: false + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + apply_default_config_without_warming: true +- name: envoy.filters.http.router + )EOF"; + + createHttpConnectionManagerConfig(yaml_string); +} + class FilterChainTest : public HttpConnectionManagerConfigTest { public: const std::string basic_config_ = R"EOF( @@ -1705,7 +1962,8 @@ stat_prefix: router TEST_F(FilterChainTest, CreateFilterChain) { HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(basic_config_), context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); Http::MockFilterChainFactoryCallbacks callbacks; EXPECT_CALL(callbacks, addStreamFilter(_)); // Buffer @@ -1713,6 +1971,57 @@ TEST_F(FilterChainTest, CreateFilterChain) { config.createFilterChain(callbacks); } +TEST_F(FilterChainTest, CreateDynamicFilterChain) { + const std::string yaml_string = R"EOF( +codec_type: http1 +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: foo + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: bar + config_discovery: + config_source: { ads: {} } + type_urls: + - type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck +- name: envoy.filters.http.router + )EOF"; + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + + Http::MockFilterChainFactoryCallbacks callbacks; + Http::StreamDecoderFilterSharedPtr missing_config_filter; + EXPECT_CALL(callbacks, addStreamDecoderFilter(_)) + .Times(2) + .WillOnce(testing::SaveArg<0>(&missing_config_filter)) + .WillOnce(Return()); // MissingConfigFilter (only once) and router + config.createFilterChain(callbacks); + + Http::MockStreamDecoderFilterCallbacks decoder_callbacks; + NiceMock stream_info; + EXPECT_CALL(decoder_callbacks, streamInfo()).WillRepeatedly(ReturnRef(stream_info)); + EXPECT_CALL(decoder_callbacks, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)) + .WillRepeatedly(Return()); + Http::TestRequestHeaderMapImpl headers; + missing_config_filter->setDecoderFilterCallbacks(decoder_callbacks); + missing_config_filter->decodeHeaders(headers, false); + EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::NoFilterConfigFound)); +} + // Tests where upgrades are configured on via the HCM. TEST_F(FilterChainTest, CreateUpgradeFilterChain) { auto hcm_config = parseHttpConnectionManagerFromYaml(basic_config_); @@ -1720,7 +2029,8 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChain) { HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); NiceMock callbacks; // Check the case where WebSockets are configured in the HCM, and no router @@ -1767,7 +2077,8 @@ TEST_F(FilterChainTest, CreateUpgradeFilterChainHCMDisabled) { HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); NiceMock callbacks; // Check the case where WebSockets are off in the HCM, and no router config is present. @@ -1821,7 +2132,8 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChain) { HttpConnectionManagerConfig config(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_); + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); { Http::MockFilterChainFactoryCallbacks callbacks; @@ -1865,7 +2177,8 @@ TEST_F(FilterChainTest, CreateCustomUpgradeFilterChainWithRouterNotLast) { EXPECT_THROW_WITH_MESSAGE( HttpConnectionManagerConfig(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_), EnvoyException, "Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router " "must be the last filter in a http upgrade filter chain."); @@ -1879,7 +2192,8 @@ TEST_F(FilterChainTest, InvalidConfig) { EXPECT_THROW_WITH_MESSAGE( HttpConnectionManagerConfig(hcm_config, context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_), + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_), EnvoyException, "Error: multiple upgrade configs with the same name: 'websocket'"); } diff --git a/test/integration/BUILD b/test/integration/BUILD index e89acf52eded..2009458e1852 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -877,6 +877,21 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "extension_discovery_integration_test", + srcs = ["extension_discovery_integration_test.cc"], + tags = ["fails_on_windows"], + deps = [ + ":http_integration_lib", + "//source/extensions/filters/http/rbac:config", + "//test/common/grpc:grpc_client_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", + ], +) + envoy_cc_test_library( name = "server_stats_interface", hdrs = ["server_stats.h"], diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc new file mode 100644 index 000000000000..7af8be71c394 --- /dev/null +++ b/test/integration/extension_discovery_integration_test.cc @@ -0,0 +1,327 @@ +#include "envoy/extensions/filters/http/rbac/v3/rbac.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/service/extension/v3/config_discovery.pb.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/integration/http_integration.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +std::string denyPrivateConfig() { + return R"EOF( + rules: + action: DENY + policies: + "test": + permissions: + - url_path: { path: { prefix: "/private" } } + principals: + - any: true +)EOF"; +} + +std::string allowAllConfig() { + return R"EOF( + rules: + action: ALLOW + policies: + "test": + permissions: + - any: true + principals: + - any: true +)EOF"; +} + +std::string invalidConfig() { + return R"EOF( + rules: + action: DENY + policies: + "test": {} +)EOF"; +} + +class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, + public HttpIntegrationTest { +public: + ExtensionDiscoveryIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} + + void addDynamicFilter(const std::string& name, bool apply_without_warming, + bool set_default_config = true) { + config_helper_.addConfigModifier( + [this, name, apply_without_warming, set_default_config]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + http_connection_manager) { + auto* filter = http_connection_manager.mutable_http_filters()->Add(); + filter->set_name(name); + auto* discovery = filter->mutable_config_discovery(); + discovery->add_type_urls( + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC"); + if (set_default_config) { + const auto rbac_configuration = + TestUtility::parseYaml(R"EOF( + rules: + action: DENY + policies: + "test": + permissions: + - any: true + principals: + - any: true + )EOF"); + discovery->mutable_default_config()->PackFrom(rbac_configuration); + } + discovery->set_apply_default_config_without_warming(apply_without_warming); + auto* api_config_source = discovery->mutable_config_source()->mutable_api_config_source(); + api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); + api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + auto* grpc_service = api_config_source->add_grpc_services(); + setGrpcService(*grpc_service, "ecds_cluster", getEcdsFakeUpstream().localAddress()); + // keep router the last + auto size = http_connection_manager.http_filters_size(); + http_connection_manager.mutable_http_filters()->SwapElements(size - 2, size - 1); + }); + } + + void initialize() override { + defer_listener_finalization_ = true; + setUpstreamCount(1); + // Add an xDS cluster for extension config discovery. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* ecds_cluster = bootstrap.mutable_static_resources()->add_clusters(); + ecds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + ecds_cluster->set_name("ecds_cluster"); + ecds_cluster->mutable_http2_protocol_options(); + }); + // Make HCM do a direct response to avoid timing issues with the upstream. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + http_connection_manager) { + http_connection_manager.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_direct_response() + ->set_status(200); + }); + HttpIntegrationTest::initialize(); + } + + ~ExtensionDiscoveryIntegrationTest() override { + AssertionResult result = ecds_connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = ecds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + ecds_connection_.reset(); + } + + void createUpstreams() override { + HttpIntegrationTest::createUpstreams(); + // Create the extension config discovery upstream (fake_upstreams_[1]). + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, + timeSystem(), enable_half_close_)); + for (auto& upstream : fake_upstreams_) { + upstream->set_allow_unexpected_disconnects(true); + } + } + + void waitXdsStream() { + auto& upstream = getEcdsFakeUpstream(); + AssertionResult result = upstream.waitForHttpConnection(*dispatcher_, ecds_connection_); + RELEASE_ASSERT(result, result.message()); + result = ecds_connection_->waitForNewStream(*dispatcher_, ecds_stream_); + RELEASE_ASSERT(result, result.message()); + ecds_stream_->startGrpcStream(); + } + + void sendXdsResponse(const std::string& name, const std::string& version, + const std::string& rbac_config) { + envoy::service::discovery::v3::DiscoveryResponse response; + response.set_version_info(version); + response.set_type_url("type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig"); + const auto rbac_configuration = + TestUtility::parseYaml(rbac_config); + envoy::config::core::v3::TypedExtensionConfig typed_config; + typed_config.set_name(name); + typed_config.mutable_typed_config()->PackFrom(rbac_configuration); + response.add_resources()->PackFrom(typed_config); + ecds_stream_->sendGrpcMessage(response); + } + + FakeUpstream& getEcdsFakeUpstream() const { return *fake_upstreams_[1]; } + +private: + FakeHttpConnectionPtr ecds_connection_{nullptr}; + FakeStreamPtr ecds_stream_{nullptr}; +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, ExtensionDiscoveryIntegrationTest, + GRPC_CLIENT_INTEGRATION_PARAMS); + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicSuccess) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_reload", + 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + { + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } + Http::TestRequestHeaderMapImpl banned_request_headers{ + {":method", "GET"}, {":path", "/private/key"}, {":scheme", "http"}, {":authority", "host"}}; + { + auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + } + // Update again but keep the connection. + { + sendXdsResponse("foo", "2", allowAllConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_reload", + 2); + auto response = codec_client_->makeHeaderOnlyRequest(banned_request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithDefault) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_fail", 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicFailWithoutDefault) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false, false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("foo", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.foo.config_fail", 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarming) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("bar", true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + registerTestServerPorts({"http"}); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + // Initial request uses the default config. + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + } + + // Update should cause a different response. + sendXdsResponse("bar", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.bar.config_reload", + 1); + { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + } +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicWithoutWarmingFail) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("bar", true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + registerTestServerPorts({"http"}); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + // Update should not cause a different response. + sendXdsResponse("bar", "1", invalidConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.bar.config_fail", 1); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); +} + +TEST_P(ExtensionDiscoveryIntegrationTest, BasicTwoSubscriptionsSameName) { + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("baz", true); + addDynamicFilter("baz", false); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + registerTestServerPorts({"http"}); + sendXdsResponse("baz", "1", denyPrivateConfig()); + test_server_->waitForCounterGe("http.config_test.extension_config_discovery.baz.config_reload", + 1); + test_server_->waitUntilListenersReady(); + test_server_->waitForGaugeGe("listener_manager.workers_started", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initialized); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/"}, {":scheme", "http"}, {":authority", "host"}}; + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +} // namespace +} // namespace Envoy From 0d862f639696b483f8a5f4262d72a8b0011b4859 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 23 Jul 2020 15:22:35 -0700 Subject: [PATCH 731/909] ci: use local build cache for non-RBE CI (#12215) - refactored cache setup script Signed-off-by: Lizan Zhou --- .azure-pipelines/bazel.yml | 1 + .azure-pipelines/pipelines.yml | 2 +- .bazelrc | 1 + ci/setup_cache.sh | 37 ++++++++++++++++------------------ ci/upload_gcs_artifact.sh | 4 +--- 5 files changed, 21 insertions(+), 24 deletions(-) diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index ea96e3778742..3999c3efab6f 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -58,6 +58,7 @@ steps: GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) ${{ if eq(parameters.rbe, false) }}: BAZEL_BUILD_EXTRA_OPTIONS: "--curses=no --experimental_repository_cache_hardlinks ${{ parameters.bazelBuildExtraOptions }}" + BAZEL_REMOTE_CACHE: $(LocalBuildCache) displayName: "Run CI script" diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 83b51e4e32f0..a0947d2ccea3 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -111,7 +111,7 @@ jobs: ciTarget: bazel.$(CI_TARGET) rbe: false # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces - bazelBuildExtraOptions: "--test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" + bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' displayName: "Upload $(CI_TARGET) Report to GCS" diff --git a/.bazelrc b/.bazelrc index 3c61e6d8c32d..b63c7a2ba2ae 100644 --- a/.bazelrc +++ b/.bazelrc @@ -117,6 +117,7 @@ build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH # Coverage options coverage --config=coverage +coverage --build_tests_only build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 build:coverage --action_env=GCOV=llvm-profdata build:coverage --copt=-DNDEBUG diff --git a/ci/setup_cache.sh b/ci/setup_cache.sh index 699961bbb082..f615b8b41d5d 100755 --- a/ci/setup_cache.sh +++ b/ci/setup_cache.sh @@ -2,7 +2,7 @@ set -e -if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then +if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all # users by default. GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json) @@ -14,27 +14,24 @@ if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then trap gcp_service_account_cleanup EXIT - echo "${GCP_SERVICE_ACCOUNT_KEY}" | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}" + bash -c 'echo "${GCP_SERVICE_ACCOUNT_KEY}"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}" + + export BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}" fi -if [[ "${BAZEL_REMOTE_CACHE}" =~ ^http ]]; then - if [[ ! -z "${GCP_SERVICE_ACCOUNT_KEY}" ]]; then - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_http_cache=${BAZEL_REMOTE_CACHE} \ - --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}" - echo "Set up bazel HTTP read/write cache at ${BAZEL_REMOTE_CACHE}." - else - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_http_cache=${BAZEL_REMOTE_CACHE} --noremote_upload_local_results" - echo "Set up bazel HTTP read only cache at ${BAZEL_REMOTE_CACHE}." + +if [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}" + echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}." + + if [[ ! -z "${BAZEL_REMOTE_INSTANCE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}" + echo "instance_name: ${BAZEL_REMOTE_INSTANCE}." + elif [[ -z "${ENVOY_RBE}" ]]; then + export BAZEL_BUILD_EXTRA_OPTIONS+=" --jobs=HOST_CPUS*.9 --remote_timeout=600" + echo "using local build cache." fi -elif [[ ! -z "${BAZEL_REMOTE_CACHE}" ]]; then - export BAZEL_BUILD_EXTRA_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS} \ - --remote_cache=${BAZEL_REMOTE_CACHE} \ - --remote_instance_name=${BAZEL_REMOTE_INSTANCE} \ - --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE} \ - --auth_enabled=true" - echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE} instance: ${BAZEL_REMOTE_INSTANCE}." + else - echo "No remote cache bucket is set, skipping setup remote cache." + echo "No remote cache is set, skipping setup remote cache." fi diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh index 3ec06f3d1761..7bd5b0201359 100755 --- a/ci/upload_gcs_artifact.sh +++ b/ci/upload_gcs_artifact.sh @@ -1,7 +1,5 @@ #!/bin/bash -# Do not ever set -x here, it is a security hazard as it will place the credentials below in the -# CI logs. set -e -o pipefail if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then @@ -10,7 +8,7 @@ if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then fi # Fail when service account key is not specified -echo ${GCP_SERVICE_ACCOUNT_KEY} | base64 --decode | gcloud auth activate-service-account --key-file=- +bash -c 'echo ${GCP_SERVICE_ACCOUNT_KEY}' | base64 --decode | gcloud auth activate-service-account --key-file=- SOURCE_DIRECTORY="$1" TARGET_SUFFIX="$2" From 8b9f68848877502fd32fc85c281c442310963494 Mon Sep 17 00:00:00 2001 From: foreseeable Date: Thu, 23 Jul 2020 19:50:36 -0400 Subject: [PATCH 732/909] test: Split huge monolith mock header(test/upstream/mocks.h) to speed up test compilation (#12048) breakdown test/mocks/upstream/mocks.h into different mock classes test/mocks/upstream/mocks.h is a wide-used mock header included by various test files. However it's very huge and most test files only used a small portion of it. Splitting it up into different mock classes will be helpful to reduce compilation time. (similar to #11797 ) Risk Level: low Testing: existing tests Docs Changes: N/A Release Notes: no Related Issues: #10917 Signed-off-by: Muge Chen --- test/mocks/upstream/BUILD | 228 ++++++++- test/mocks/upstream/basic_resource_limit.cc | 17 + test/mocks/upstream/basic_resource_limit.h | 23 + test/mocks/upstream/cds_api.cc | 19 + test/mocks/upstream/cds_api.h | 25 + test/mocks/upstream/cluster.cc | 23 + test/mocks/upstream/cluster.h | 33 ++ test/mocks/upstream/cluster_info_factory.cc | 10 + test/mocks/upstream/cluster_info_factory.h | 20 + test/mocks/upstream/cluster_manager.cc | 36 ++ test/mocks/upstream/cluster_manager.h | 81 ++++ .../mocks/upstream/cluster_manager_factory.cc | 9 + test/mocks/upstream/cluster_manager_factory.h | 44 ++ test/mocks/upstream/cluster_priority_set.cc | 10 + test/mocks/upstream/cluster_priority_set.h | 22 + .../upstream/cluster_real_priority_set.cc | 9 + .../upstream/cluster_real_priority_set.h | 19 + .../upstream/cluster_update_callbacks.cc | 10 + .../mocks/upstream/cluster_update_callbacks.h | 21 + .../cluster_update_callbacks_handle.cc | 9 + .../cluster_update_callbacks_handle.h | 16 + .../upstream/health_check_event_logger.h | 35 ++ test/mocks/upstream/health_checker.cc | 19 + test/mocks/upstream/health_checker.h | 27 ++ test/mocks/upstream/host_set.cc | 55 +++ test/mocks/upstream/host_set.h | 69 +++ test/mocks/upstream/load_balancer.cc | 15 + test/mocks/upstream/load_balancer.h | 23 + test/mocks/upstream/load_balancer_context.h | 1 + test/mocks/upstream/mocks.cc | 186 -------- test/mocks/upstream/mocks.h | 446 +----------------- test/mocks/upstream/priority_set.cc | 54 +++ test/mocks/upstream/priority_set.h | 42 ++ test/mocks/upstream/retry_host_predicate.cc | 10 + test/mocks/upstream/retry_host_predicate.h | 19 + test/mocks/upstream/retry_priority.cc | 7 + test/mocks/upstream/retry_priority.h | 30 ++ test/mocks/upstream/retry_priority_factory.h | 34 ++ .../test_retry_host_predicate_factory.h | 26 + .../upstream/thread_aware_load_balancer.cc | 10 + .../upstream/thread_aware_load_balancer.h | 20 + test/mocks/upstream/thread_local_cluster.cc | 19 + test/mocks/upstream/thread_local_cluster.h | 27 ++ 43 files changed, 1247 insertions(+), 611 deletions(-) create mode 100644 test/mocks/upstream/basic_resource_limit.cc create mode 100644 test/mocks/upstream/basic_resource_limit.h create mode 100644 test/mocks/upstream/cds_api.cc create mode 100644 test/mocks/upstream/cds_api.h create mode 100644 test/mocks/upstream/cluster.cc create mode 100644 test/mocks/upstream/cluster.h create mode 100644 test/mocks/upstream/cluster_info_factory.cc create mode 100644 test/mocks/upstream/cluster_info_factory.h create mode 100644 test/mocks/upstream/cluster_manager.cc create mode 100644 test/mocks/upstream/cluster_manager.h create mode 100644 test/mocks/upstream/cluster_manager_factory.cc create mode 100644 test/mocks/upstream/cluster_manager_factory.h create mode 100644 test/mocks/upstream/cluster_priority_set.cc create mode 100644 test/mocks/upstream/cluster_priority_set.h create mode 100644 test/mocks/upstream/cluster_real_priority_set.cc create mode 100644 test/mocks/upstream/cluster_real_priority_set.h create mode 100644 test/mocks/upstream/cluster_update_callbacks.cc create mode 100644 test/mocks/upstream/cluster_update_callbacks.h create mode 100644 test/mocks/upstream/cluster_update_callbacks_handle.cc create mode 100644 test/mocks/upstream/cluster_update_callbacks_handle.h create mode 100644 test/mocks/upstream/health_check_event_logger.h create mode 100644 test/mocks/upstream/health_checker.cc create mode 100644 test/mocks/upstream/health_checker.h create mode 100644 test/mocks/upstream/host_set.cc create mode 100644 test/mocks/upstream/host_set.h create mode 100644 test/mocks/upstream/load_balancer.cc create mode 100644 test/mocks/upstream/load_balancer.h delete mode 100644 test/mocks/upstream/mocks.cc create mode 100644 test/mocks/upstream/priority_set.cc create mode 100644 test/mocks/upstream/priority_set.h create mode 100644 test/mocks/upstream/retry_host_predicate.cc create mode 100644 test/mocks/upstream/retry_host_predicate.h create mode 100644 test/mocks/upstream/retry_priority.cc create mode 100644 test/mocks/upstream/retry_priority.h create mode 100644 test/mocks/upstream/retry_priority_factory.h create mode 100644 test/mocks/upstream/test_retry_host_predicate_factory.h create mode 100644 test/mocks/upstream/thread_aware_load_balancer.cc create mode 100644 test/mocks/upstream/thread_aware_load_balancer.h create mode 100644 test/mocks/upstream/thread_local_cluster.cc create mode 100644 test/mocks/upstream/thread_local_cluster.h diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index d3ba248f450f..f249847bac2c 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -69,10 +69,30 @@ envoy_cc_mock( envoy_cc_mock( name = "upstream_mocks", - srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ + ":basic_resource_limit_mocks", + ":cds_api_mocks", + ":cluster_info_factory_mocks", + ":cluster_manager_factory_mocks", + ":cluster_manager_mocks", + ":cluster_mocks", + ":cluster_priority_set_mocks", + ":cluster_real_priority_set_mocks", + ":cluster_update_callbacks_handle_mocks", + ":cluster_update_callbacks_mocks", + ":health_check_event_logger_mocks", + ":health_checker_mocks", + ":host_set_mocks", ":load_balancer_context_mock", + ":load_balancer_mocks", + ":priority_set_mocks", + ":retry_host_predicate_mocks", + ":retry_priority_factory_mocks", + ":retry_priority_mocks", + ":test_retry_host_predicate_factory_mocks", + ":thread_aware_load_balancer_mocks", + ":thread_local_cluster_mocks", ":transport_socket_match_mocks", "//include/envoy/http:async_client_interface", "//include/envoy/upstream:cluster_factory_interface", @@ -80,6 +100,7 @@ envoy_cc_mock( "//include/envoy/upstream:health_checker_interface", "//include/envoy/upstream:load_balancer_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/http:header_utility_lib", "//source/common/upstream:cluster_factory_lib", "//source/common/upstream:health_discovery_service_lib", "//source/common/upstream:upstream_lib", @@ -96,3 +117,208 @@ envoy_cc_mock( "@envoy_api//envoy/data/core/v3:pkg_cc_proto", ], ) + +envoy_cc_mock( + name = "host_set_mocks", + srcs = ["host_set.cc"], + hdrs = ["host_set.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//source/common/common:callback_impl_lib", + "//source/common/upstream:upstream_lib", + ], +) + +envoy_cc_mock( + name = "priority_set_mocks", + srcs = ["priority_set.cc"], + hdrs = ["priority_set.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//test/mocks/upstream:host_set_mocks", + ], +) + +envoy_cc_mock( + name = "retry_priority_mocks", + srcs = ["retry_priority.cc"], + hdrs = ["retry_priority.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + ], +) + +envoy_cc_mock( + name = "retry_priority_factory_mocks", + hdrs = ["retry_priority_factory.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + "//test/mocks/upstream:retry_priority_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_mocks", + srcs = ["cluster.cc"], + hdrs = ["cluster.h"], + deps = [ + "//include/envoy/upstream:upstream_interface", + "//test/mocks/upstream:cluster_info_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_real_priority_set_mocks", + srcs = ["cluster_real_priority_set.cc"], + hdrs = ["cluster_real_priority_set.h"], + deps = [ + "//test/mocks/upstream:cluster_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_priority_set_mocks", + srcs = ["cluster_priority_set.cc"], + hdrs = ["cluster_priority_set.h"], + deps = [ + "//test/mocks/upstream:cluster_mocks", + "//test/mocks/upstream:priority_set_mocks", + ], +) + +envoy_cc_mock( + name = "load_balancer_mocks", + srcs = ["load_balancer.cc"], + hdrs = ["load_balancer.h"], + deps = [ + "//include/envoy/upstream:load_balancer_interface", + "//test/mocks/upstream:host_mocks", + ], +) + +envoy_cc_mock( + name = "thread_aware_load_balancer_mocks", + srcs = ["thread_aware_load_balancer.cc"], + hdrs = ["thread_aware_load_balancer.h"], + deps = [ + "//include/envoy/upstream:load_balancer_interface", + ], +) + +envoy_cc_mock( + name = "thread_local_cluster_mocks", + srcs = ["thread_local_cluster.cc"], + hdrs = ["thread_local_cluster.h"], + deps = [ + "//include/envoy/upstream:thread_local_cluster_interface", + "//test/mocks/upstream:cluster_priority_set_mocks", + "//test/mocks/upstream:load_balancer_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_manager_factory_mocks", + srcs = ["cluster_manager_factory.cc"], + hdrs = ["cluster_manager_factory.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//test/mocks/secret:secret_mocks", + ], +) + +envoy_cc_mock( + name = "cluster_update_callbacks_handle_mocks", + srcs = ["cluster_update_callbacks_handle.cc"], + hdrs = ["cluster_update_callbacks_handle.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_manager_mocks", + srcs = ["cluster_manager.cc"], + hdrs = ["cluster_manager.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//test/mocks/config:config_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/http:http_mocks", + "//test/mocks/tcp:tcp_mocks", + "//test/mocks/upstream:cluster_manager_factory_mocks", + "//test/mocks/upstream:thread_local_cluster_mocks", + ], +) + +envoy_cc_mock( + name = "health_checker_mocks", + srcs = ["health_checker.cc"], + hdrs = ["health_checker.h"], + deps = [ + "//include/envoy/upstream:health_checker_interface", + ], +) + +envoy_cc_mock( + name = "health_check_event_logger_mocks", + hdrs = ["health_check_event_logger.h"], + deps = [ + "//include/envoy/upstream:health_checker_interface", + "@envoy_api//envoy/data/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_mock( + name = "cds_api_mocks", + srcs = ["cds_api.cc"], + hdrs = ["cds_api.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_update_callbacks_mocks", + srcs = ["cluster_update_callbacks.cc"], + hdrs = ["cluster_update_callbacks.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_mock( + name = "cluster_info_factory_mocks", + srcs = ["cluster_info_factory.cc"], + hdrs = ["cluster_info_factory.h"], + deps = [ + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_mock( + name = "retry_host_predicate_mocks", + srcs = ["retry_host_predicate.cc"], + hdrs = ["retry_host_predicate.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + ], +) + +envoy_cc_mock( + name = "test_retry_host_predicate_factory_mocks", + hdrs = ["test_retry_host_predicate_factory.h"], + deps = [ + "//include/envoy/upstream:retry_interface", + "//test/mocks/upstream:retry_host_predicate_mocks", + ], +) + +envoy_cc_mock( + name = "basic_resource_limit_mocks", + srcs = ["basic_resource_limit.cc"], + hdrs = ["basic_resource_limit.h"], + deps = [ + "//include/envoy/common:resource_interface", + ], +) diff --git a/test/mocks/upstream/basic_resource_limit.cc b/test/mocks/upstream/basic_resource_limit.cc new file mode 100644 index 000000000000..0676301dabf2 --- /dev/null +++ b/test/mocks/upstream/basic_resource_limit.cc @@ -0,0 +1,17 @@ +#include "basic_resource_limit.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { + +using ::testing::Return; +MockBasicResourceLimit::MockBasicResourceLimit() { + ON_CALL(*this, canCreate()).WillByDefault(Return(true)); +} + +MockBasicResourceLimit::~MockBasicResourceLimit() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/basic_resource_limit.h b/test/mocks/upstream/basic_resource_limit.h new file mode 100644 index 000000000000..93b31d2e2be8 --- /dev/null +++ b/test/mocks/upstream/basic_resource_limit.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/common/resource.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockBasicResourceLimit : public ResourceLimit { +public: + MockBasicResourceLimit(); + ~MockBasicResourceLimit() override; + + MOCK_METHOD(bool, canCreate, ()); + MOCK_METHOD(void, inc, ()); + MOCK_METHOD(void, dec, ()); + MOCK_METHOD(void, decBy, (uint64_t)); + MOCK_METHOD(uint64_t, max, ()); + MOCK_METHOD(uint64_t, count, (), (const)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cds_api.cc b/test/mocks/upstream/cds_api.cc new file mode 100644 index 000000000000..297defc9d23b --- /dev/null +++ b/test/mocks/upstream/cds_api.cc @@ -0,0 +1,19 @@ +#include "cds_api.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::SaveArg; +MockCdsApi::MockCdsApi() { + ON_CALL(*this, setInitializedCb(_)).WillByDefault(SaveArg<0>(&initialized_callback_)); +} + +MockCdsApi::~MockCdsApi() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cds_api.h b/test/mocks/upstream/cds_api.h new file mode 100644 index 000000000000..e11f644159f5 --- /dev/null +++ b/test/mocks/upstream/cds_api.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockCdsApi : public CdsApi { +public: + MockCdsApi(); + ~MockCdsApi() override; + + MOCK_METHOD(void, initialize, ()); + MOCK_METHOD(void, setInitializedCb, (std::function callback)); + MOCK_METHOD(const std::string, versionInfo, (), (const)); + + std::function initialized_callback_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster.cc b/test/mocks/upstream/cluster.cc new file mode 100644 index 000000000000..d0c297506490 --- /dev/null +++ b/test/mocks/upstream/cluster.cc @@ -0,0 +1,23 @@ +#include "cluster.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Invoke; +using ::testing::Return; +MockCluster::MockCluster() { + ON_CALL(*this, info()).WillByDefault(Return(info_)); + ON_CALL(*this, initialize(_)) + .WillByDefault(Invoke([this](std::function callback) -> void { + EXPECT_EQ(nullptr, initialize_callback_); + initialize_callback_ = callback; + })); +} + +MockCluster::~MockCluster() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster.h b/test/mocks/upstream/cluster.h new file mode 100644 index 000000000000..243daf09dbdb --- /dev/null +++ b/test/mocks/upstream/cluster.h @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "envoy/upstream/upstream.h" + +#include "test/mocks/upstream/cluster_info.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockCluster : public Cluster { +public: + MockCluster(); + ~MockCluster() override; + + // Upstream::Cluster + MOCK_METHOD(HealthChecker*, healthChecker, ()); + MOCK_METHOD(ClusterInfoConstSharedPtr, info, (), (const)); + MOCK_METHOD(Outlier::Detector*, outlierDetector, ()); + MOCK_METHOD(const Outlier::Detector*, outlierDetector, (), (const)); + MOCK_METHOD(void, initialize, (std::function callback)); + MOCK_METHOD(InitializePhase, initializePhase, (), (const)); + MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); + + std::shared_ptr info_{new ::testing::NiceMock()}; + std::function initialize_callback_; + Network::Address::InstanceConstSharedPtr source_address_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_info_factory.cc b/test/mocks/upstream/cluster_info_factory.cc new file mode 100644 index 000000000000..f00b821d1903 --- /dev/null +++ b/test/mocks/upstream/cluster_info_factory.cc @@ -0,0 +1,10 @@ +#include "cluster_info_factory.h" + +namespace Envoy { +namespace Upstream { +MockClusterInfoFactory::MockClusterInfoFactory() = default; + +MockClusterInfoFactory::~MockClusterInfoFactory() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_info_factory.h b/test/mocks/upstream/cluster_info_factory.h new file mode 100644 index 000000000000..08144c57154d --- /dev/null +++ b/test/mocks/upstream/cluster_info_factory.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "common/common/logger.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { +public: + MockClusterInfoFactory(); + ~MockClusterInfoFactory() override; + + MOCK_METHOD(ClusterInfoConstSharedPtr, createClusterInfo, (const CreateClusterInfoParams&)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager.cc b/test/mocks/upstream/cluster_manager.cc new file mode 100644 index 000000000000..d40a5af3d273 --- /dev/null +++ b/test/mocks/upstream/cluster_manager.cc @@ -0,0 +1,36 @@ +#include "cluster_manager.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Eq; +using ::testing::Return; +using ::testing::ReturnRef; +MockClusterManager::MockClusterManager(TimeSource&) : MockClusterManager() {} + +MockClusterManager::MockClusterManager() { + ON_CALL(*this, httpConnPoolForCluster(_, _, _, _)).WillByDefault(Return(&conn_pool_)); + ON_CALL(*this, tcpConnPoolForCluster(_, _, _)).WillByDefault(Return(&tcp_conn_pool_)); + ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault(ReturnRef(async_client_)); + ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault((ReturnRef(async_client_))); + ON_CALL(*this, bindConfig()).WillByDefault(ReturnRef(bind_config_)); + ON_CALL(*this, adsMux()).WillByDefault(Return(ads_mux_)); + ON_CALL(*this, grpcAsyncClientManager()).WillByDefault(ReturnRef(async_client_manager_)); + ON_CALL(*this, localClusterName()).WillByDefault((ReturnRef(local_cluster_name_))); + + // Matches are LIFO so "" will match first. + ON_CALL(*this, get(_)).WillByDefault(Return(&thread_local_cluster_)); + ON_CALL(*this, get(Eq(""))).WillByDefault(Return(nullptr)); + ON_CALL(*this, subscriptionFactory()).WillByDefault(ReturnRef(subscription_factory_)); +} + +MockClusterManager::~MockClusterManager() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h new file mode 100644 index 000000000000..c24b1b045acd --- /dev/null +++ b/test/mocks/upstream/cluster_manager.h @@ -0,0 +1,81 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "test/mocks/config/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/http/mocks.h" +#include "test/mocks/tcp/mocks.h" + +#include "cluster_manager_factory.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "thread_local_cluster.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockClusterManager : public ClusterManager { +public: + explicit MockClusterManager(TimeSource& time_source); + MockClusterManager(); + ~MockClusterManager() override; + + ClusterUpdateCallbacksHandlePtr + addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) override { + return ClusterUpdateCallbacksHandlePtr{addThreadLocalClusterUpdateCallbacks_(callbacks)}; + } + + Host::CreateConnectionData tcpConnForCluster(const std::string& cluster, + LoadBalancerContext* context) override { + MockHost::MockCreateConnectionData data = tcpConnForCluster_(cluster, context); + return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; + } + + ClusterManagerFactory& clusterManagerFactory() override { return cluster_manager_factory_; } + + // Upstream::ClusterManager + MOCK_METHOD(bool, addOrUpdateCluster, + (const envoy::config::cluster::v3::Cluster& cluster, + const std::string& version_info)); + MOCK_METHOD(void, setPrimaryClustersInitializedCb, (PrimaryClustersReadyCallback)); + MOCK_METHOD(void, setInitializedCb, (InitializationCompleteCallback)); + MOCK_METHOD(void, initializeSecondaryClusters, + (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); + MOCK_METHOD(ClusterInfoMap, clusters, ()); + MOCK_METHOD(const ClusterSet&, primaryClusters, ()); + MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); + MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, + (const std::string& cluster, ResourcePriority priority, + absl::optional downstream_protocol, LoadBalancerContext* context)); + MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster, + (const std::string& cluster, ResourcePriority priority, + LoadBalancerContext* context)); + MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConnForCluster_, + (const std::string& cluster, LoadBalancerContext* context)); + MOCK_METHOD(Http::AsyncClient&, httpAsyncClientForCluster, (const std::string& cluster)); + MOCK_METHOD(bool, removeCluster, (const std::string& cluster)); + MOCK_METHOD(void, shutdown, ()); + MOCK_METHOD(const envoy::config::core::v3::BindConfig&, bindConfig, (), (const)); + MOCK_METHOD(Config::GrpcMuxSharedPtr, adsMux, ()); + MOCK_METHOD(Grpc::AsyncClientManager&, grpcAsyncClientManager, ()); + MOCK_METHOD(const std::string, versionInfo, (), (const)); + MOCK_METHOD(const absl::optional&, localClusterName, (), (const)); + MOCK_METHOD(ClusterUpdateCallbacksHandle*, addThreadLocalClusterUpdateCallbacks_, + (ClusterUpdateCallbacks & callbacks)); + MOCK_METHOD(Config::SubscriptionFactory&, subscriptionFactory, ()); + + NiceMock conn_pool_; + NiceMock async_client_; + NiceMock tcp_conn_pool_; + NiceMock thread_local_cluster_; + envoy::config::core::v3::BindConfig bind_config_; + std::shared_ptr> ads_mux_; + NiceMock async_client_manager_; + absl::optional local_cluster_name_; + NiceMock cluster_manager_factory_; + NiceMock subscription_factory_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager_factory.cc b/test/mocks/upstream/cluster_manager_factory.cc new file mode 100644 index 000000000000..37727679a554 --- /dev/null +++ b/test/mocks/upstream/cluster_manager_factory.cc @@ -0,0 +1,9 @@ +#include "cluster_manager_factory.h" + +namespace Envoy { +namespace Upstream { +MockClusterManagerFactory::MockClusterManagerFactory() = default; + +MockClusterManagerFactory::~MockClusterManagerFactory() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_manager_factory.h b/test/mocks/upstream/cluster_manager_factory.h new file mode 100644 index 000000000000..cdcc952d090b --- /dev/null +++ b/test/mocks/upstream/cluster_manager_factory.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "test/mocks/secret/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockClusterManagerFactory : public ClusterManagerFactory { +public: + MockClusterManagerFactory(); + ~MockClusterManagerFactory() override; + + Secret::MockSecretManager& secretManager() override { return secret_manager_; }; + + MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, + (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); + + MOCK_METHOD(Http::ConnectionPool::InstancePtr, allocateConnPool, + (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, + Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options, + const Network::TransportSocketOptionsSharedPtr& transport_socket_options)); + + MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool, + (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsSharedPtr)); + + MOCK_METHOD((std::pair), clusterFromProto, + (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api)); + + MOCK_METHOD(CdsApiPtr, createCds, + (const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm)); + +private: + NiceMock secret_manager_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_priority_set.cc b/test/mocks/upstream/cluster_priority_set.cc new file mode 100644 index 000000000000..08b4b1a6a40a --- /dev/null +++ b/test/mocks/upstream/cluster_priority_set.cc @@ -0,0 +1,10 @@ +#include "cluster_priority_set.h" + +namespace Envoy { +namespace Upstream { +MockClusterMockPrioritySet::MockClusterMockPrioritySet() = default; + +MockClusterMockPrioritySet::~MockClusterMockPrioritySet() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_priority_set.h b/test/mocks/upstream/cluster_priority_set.h new file mode 100644 index 000000000000..da090750ec23 --- /dev/null +++ b/test/mocks/upstream/cluster_priority_set.h @@ -0,0 +1,22 @@ +#pragma once + +#include "cluster.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "priority_set.h" + +namespace Envoy { +namespace Upstream { +class MockClusterMockPrioritySet : public MockCluster { +public: + MockClusterMockPrioritySet(); + ~MockClusterMockPrioritySet() override; + + // Upstream::Cluster + MockPrioritySet& prioritySet() override { return priority_set_; } + const PrioritySet& prioritySet() const override { return priority_set_; } + + ::testing::NiceMock priority_set_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_real_priority_set.cc b/test/mocks/upstream/cluster_real_priority_set.cc new file mode 100644 index 000000000000..60c3d05a08f8 --- /dev/null +++ b/test/mocks/upstream/cluster_real_priority_set.cc @@ -0,0 +1,9 @@ +#include "cluster_real_priority_set.h" + +namespace Envoy { +namespace Upstream { +MockClusterRealPrioritySet::MockClusterRealPrioritySet() = default; + +MockClusterRealPrioritySet::~MockClusterRealPrioritySet() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_real_priority_set.h b/test/mocks/upstream/cluster_real_priority_set.h new file mode 100644 index 000000000000..4b6cde5ffd8c --- /dev/null +++ b/test/mocks/upstream/cluster_real_priority_set.h @@ -0,0 +1,19 @@ +#pragma once + +#include "cluster.h" + +namespace Envoy { +namespace Upstream { +class MockClusterRealPrioritySet : public MockCluster { +public: + MockClusterRealPrioritySet(); + ~MockClusterRealPrioritySet() override; + + // Upstream::Cluster + PrioritySetImpl& prioritySet() override { return priority_set_; } + const PrioritySet& prioritySet() const override { return priority_set_; } + + PrioritySetImpl priority_set_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks.cc b/test/mocks/upstream/cluster_update_callbacks.cc new file mode 100644 index 000000000000..5451d3ed2a70 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks.cc @@ -0,0 +1,10 @@ +#include "cluster_update_callbacks.h" + +namespace Envoy { +namespace Upstream { +MockClusterUpdateCallbacks::MockClusterUpdateCallbacks() = default; + +MockClusterUpdateCallbacks::~MockClusterUpdateCallbacks() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks.h b/test/mocks/upstream/cluster_update_callbacks.h new file mode 100644 index 000000000000..782c319004c1 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterUpdateCallbacks : public ClusterUpdateCallbacks { +public: + MockClusterUpdateCallbacks(); + ~MockClusterUpdateCallbacks() override; + + MOCK_METHOD(void, onClusterAddOrUpdate, (ThreadLocalCluster & cluster)); + MOCK_METHOD(void, onClusterRemoval, (const std::string& cluster_name)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks_handle.cc b/test/mocks/upstream/cluster_update_callbacks_handle.cc new file mode 100644 index 000000000000..72f44f798c12 --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks_handle.cc @@ -0,0 +1,9 @@ +#include "cluster_update_callbacks_handle.h" + +namespace Envoy { +namespace Upstream { +MockClusterUpdateCallbacksHandle::MockClusterUpdateCallbacksHandle() = default; + +MockClusterUpdateCallbacksHandle::~MockClusterUpdateCallbacksHandle() = default; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/cluster_update_callbacks_handle.h b/test/mocks/upstream/cluster_update_callbacks_handle.h new file mode 100644 index 000000000000..22a023c45a9d --- /dev/null +++ b/test/mocks/upstream/cluster_update_callbacks_handle.h @@ -0,0 +1,16 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockClusterUpdateCallbacksHandle : public ClusterUpdateCallbacksHandle { +public: + MockClusterUpdateCallbacksHandle(); + ~MockClusterUpdateCallbacksHandle() override; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/health_check_event_logger.h b/test/mocks/upstream/health_check_event_logger.h new file mode 100644 index 000000000000..1d6e75819b3b --- /dev/null +++ b/test/mocks/upstream/health_check_event_logger.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "envoy/data/core/v3/health_check_event.pb.h" +#include "envoy/upstream/health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHealthCheckEventLogger : public HealthCheckEventLogger { +public: + MOCK_METHOD(void, logEjectUnhealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + envoy::data::core::v3::HealthCheckFailureType)); + MOCK_METHOD(void, logAddHealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + bool)); + MOCK_METHOD(void, logUnhealthy, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, + envoy::data::core::v3::HealthCheckFailureType, bool)); + MOCK_METHOD(void, logDegraded, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); + MOCK_METHOD(void, logNoLongerDegraded, + (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/health_checker.cc b/test/mocks/upstream/health_checker.cc new file mode 100644 index 000000000000..2bf89a8345b4 --- /dev/null +++ b/test/mocks/upstream/health_checker.cc @@ -0,0 +1,19 @@ +#include "health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Invoke; +MockHealthChecker::MockHealthChecker() { + ON_CALL(*this, addHostCheckCompleteCb(_)).WillByDefault(Invoke([this](HostStatusCb cb) -> void { + callbacks_.push_back(cb); + })); +} + +MockHealthChecker::~MockHealthChecker() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/health_checker.h b/test/mocks/upstream/health_checker.h new file mode 100644 index 000000000000..af4a96a9ee87 --- /dev/null +++ b/test/mocks/upstream/health_checker.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/upstream/health_checker.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHealthChecker : public HealthChecker { +public: + MockHealthChecker(); + ~MockHealthChecker() override; + + MOCK_METHOD(void, addHostCheckCompleteCb, (HostStatusCb callback)); + MOCK_METHOD(void, start, ()); + + void runCallbacks(Upstream::HostSharedPtr host, HealthTransition changed_state) { + for (const auto& callback : callbacks_) { + callback(host, changed_state); + } + } + + std::list callbacks_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/host_set.cc b/test/mocks/upstream/host_set.cc new file mode 100644 index 000000000000..1d49579073f0 --- /dev/null +++ b/test/mocks/upstream/host_set.cc @@ -0,0 +1,55 @@ +#include "host_set.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::Invoke; +using ::testing::Return; +using ::testing::ReturnRef; +MockHostSet::MockHostSet(uint32_t priority, uint32_t overprovisioning_factor) + : priority_(priority), overprovisioning_factor_(overprovisioning_factor) { + ON_CALL(*this, priority()).WillByDefault(Return(priority_)); + ON_CALL(*this, hosts()).WillByDefault(ReturnRef(hosts_)); + ON_CALL(*this, hostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(hosts_); + })); + ON_CALL(*this, healthyHosts()).WillByDefault(ReturnRef(healthy_hosts_)); + ON_CALL(*this, healthyHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(healthy_hosts_); + })); + ON_CALL(*this, degradedHosts()).WillByDefault(ReturnRef(degraded_hosts_)); + ON_CALL(*this, degradedHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(degraded_hosts_); + })); + ON_CALL(*this, excludedHosts()).WillByDefault(ReturnRef(excluded_hosts_)); + ON_CALL(*this, excludedHostsPtr()).WillByDefault(Invoke([this]() { + return std::make_shared(excluded_hosts_); + })); + ON_CALL(*this, hostsPerLocality()).WillByDefault(Invoke([this]() -> const HostsPerLocality& { + return *hosts_per_locality_; + })); + ON_CALL(*this, hostsPerLocalityPtr()).WillByDefault(Return(hosts_per_locality_)); + ON_CALL(*this, healthyHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *healthy_hosts_per_locality_; })); + ON_CALL(*this, healthyHostsPerLocalityPtr()).WillByDefault(Return(healthy_hosts_per_locality_)); + ON_CALL(*this, degradedHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *degraded_hosts_per_locality_; })); + ON_CALL(*this, degradedHostsPerLocalityPtr()).WillByDefault(Return(degraded_hosts_per_locality_)); + ON_CALL(*this, excludedHostsPerLocality()) + .WillByDefault( + Invoke([this]() -> const HostsPerLocality& { return *excluded_hosts_per_locality_; })); + ON_CALL(*this, excludedHostsPerLocalityPtr()).WillByDefault(Return(excluded_hosts_per_locality_)); + ON_CALL(*this, localityWeights()).WillByDefault(Invoke([this]() -> LocalityWeightsConstSharedPtr { + return locality_weights_; + })); +} + +MockHostSet::~MockHostSet() = default; + +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/host_set.h b/test/mocks/upstream/host_set.h new file mode 100644 index 000000000000..95ed9f90ee88 --- /dev/null +++ b/test/mocks/upstream/host_set.h @@ -0,0 +1,69 @@ +#pragma once + +#include "envoy/upstream/upstream.h" + +#include "common/common/callback_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockHostSet : public HostSet { +public: + MockHostSet(uint32_t priority = 0, + uint32_t overprovisioning_factor = kDefaultOverProvisioningFactor); + ~MockHostSet() override; + + void runCallbacks(const HostVector added, const HostVector removed) { + member_update_cb_helper_.runCallbacks(priority(), added, removed); + } + + Common::CallbackHandle* addMemberUpdateCb(PrioritySet::PriorityUpdateCb callback) { + return member_update_cb_helper_.add(callback); + } + + // Upstream::HostSet + MOCK_METHOD(const HostVector&, hosts, (), (const)); + MOCK_METHOD(HostVectorConstSharedPtr, hostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, healthyHosts, (), (const)); + MOCK_METHOD(HealthyHostVectorConstSharedPtr, healthyHostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, degradedHosts, (), (const)); + MOCK_METHOD(DegradedHostVectorConstSharedPtr, degradedHostsPtr, (), (const)); + MOCK_METHOD(const HostVector&, excludedHosts, (), (const)); + MOCK_METHOD(ExcludedHostVectorConstSharedPtr, excludedHostsPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, hostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, hostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, healthyHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, healthyHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, degradedHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, degradedHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(const HostsPerLocality&, excludedHostsPerLocality, (), (const)); + MOCK_METHOD(HostsPerLocalityConstSharedPtr, excludedHostsPerLocalityPtr, (), (const)); + MOCK_METHOD(LocalityWeightsConstSharedPtr, localityWeights, (), (const)); + MOCK_METHOD(absl::optional, chooseHealthyLocality, ()); + MOCK_METHOD(absl::optional, chooseDegradedLocality, ()); + MOCK_METHOD(uint32_t, priority, (), (const)); + uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; } + void setOverprovisioningFactor(const uint32_t overprovisioning_factor) { + overprovisioning_factor_ = overprovisioning_factor; + } + + HostVector hosts_; + HostVector healthy_hosts_; + HostVector degraded_hosts_; + HostVector excluded_hosts_; + HostsPerLocalitySharedPtr hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr healthy_hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr degraded_hosts_per_locality_{new HostsPerLocalityImpl()}; + HostsPerLocalitySharedPtr excluded_hosts_per_locality_{new HostsPerLocalityImpl()}; + LocalityWeightsConstSharedPtr locality_weights_{{}}; + Common::CallbackManager member_update_cb_helper_; + uint32_t priority_{}; + uint32_t overprovisioning_factor_{}; + bool run_in_panic_mode_ = false; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer.cc b/test/mocks/upstream/load_balancer.cc new file mode 100644 index 000000000000..3cdb79d405db --- /dev/null +++ b/test/mocks/upstream/load_balancer.cc @@ -0,0 +1,15 @@ +#include "load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::_; +using ::testing::Return; +MockLoadBalancer::MockLoadBalancer() { ON_CALL(*this, chooseHost(_)).WillByDefault(Return(host_)); } + +MockLoadBalancer::~MockLoadBalancer() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer.h b/test/mocks/upstream/load_balancer.h new file mode 100644 index 000000000000..364b6a7eb1d3 --- /dev/null +++ b/test/mocks/upstream/load_balancer.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "test/mocks/upstream/host.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockLoadBalancer : public LoadBalancer { +public: + MockLoadBalancer(); + ~MockLoadBalancer() override; + + // Upstream::LoadBalancer + MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); + + std::shared_ptr host_{new MockHost()}; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/load_balancer_context.h b/test/mocks/upstream/load_balancer_context.h index ef3d46486777..553ae4e98e2d 100644 --- a/test/mocks/upstream/load_balancer_context.h +++ b/test/mocks/upstream/load_balancer_context.h @@ -1,3 +1,4 @@ +#pragma once #include "envoy/upstream/load_balancer.h" #include "gmock/gmock.h" diff --git a/test/mocks/upstream/mocks.cc b/test/mocks/upstream/mocks.cc deleted file mode 100644 index 990aa6b1a620..000000000000 --- a/test/mocks/upstream/mocks.cc +++ /dev/null @@ -1,186 +0,0 @@ -#include "test/mocks/upstream/mocks.h" - -#include -#include - -#include "envoy/upstream/load_balancer.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::_; -using testing::Eq; -using testing::Invoke; -using testing::Return; -using testing::ReturnRef; -using testing::SaveArg; - -namespace Envoy { -namespace Upstream { - -MockHostSet::MockHostSet(uint32_t priority, uint32_t overprovisioning_factor) - : priority_(priority), overprovisioning_factor_(overprovisioning_factor) { - ON_CALL(*this, priority()).WillByDefault(Return(priority_)); - ON_CALL(*this, hosts()).WillByDefault(ReturnRef(hosts_)); - ON_CALL(*this, hostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(hosts_); - })); - ON_CALL(*this, healthyHosts()).WillByDefault(ReturnRef(healthy_hosts_)); - ON_CALL(*this, healthyHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(healthy_hosts_); - })); - ON_CALL(*this, degradedHosts()).WillByDefault(ReturnRef(degraded_hosts_)); - ON_CALL(*this, degradedHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(degraded_hosts_); - })); - ON_CALL(*this, excludedHosts()).WillByDefault(ReturnRef(excluded_hosts_)); - ON_CALL(*this, excludedHostsPtr()).WillByDefault(Invoke([this]() { - return std::make_shared(excluded_hosts_); - })); - ON_CALL(*this, hostsPerLocality()).WillByDefault(Invoke([this]() -> const HostsPerLocality& { - return *hosts_per_locality_; - })); - ON_CALL(*this, hostsPerLocalityPtr()).WillByDefault(Return(hosts_per_locality_)); - ON_CALL(*this, healthyHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *healthy_hosts_per_locality_; })); - ON_CALL(*this, healthyHostsPerLocalityPtr()).WillByDefault(Return(healthy_hosts_per_locality_)); - ON_CALL(*this, degradedHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *degraded_hosts_per_locality_; })); - ON_CALL(*this, degradedHostsPerLocalityPtr()).WillByDefault(Return(degraded_hosts_per_locality_)); - ON_CALL(*this, excludedHostsPerLocality()) - .WillByDefault( - Invoke([this]() -> const HostsPerLocality& { return *excluded_hosts_per_locality_; })); - ON_CALL(*this, excludedHostsPerLocalityPtr()).WillByDefault(Return(excluded_hosts_per_locality_)); - ON_CALL(*this, localityWeights()).WillByDefault(Invoke([this]() -> LocalityWeightsConstSharedPtr { - return locality_weights_; - })); -} - -MockHostSet::~MockHostSet() = default; - -MockPrioritySet::MockPrioritySet() { - getHostSet(0); - ON_CALL(*this, hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); - ON_CALL(testing::Const(*this), hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); - ON_CALL(*this, addMemberUpdateCb(_)) - .WillByDefault(Invoke([this](PrioritySet::MemberUpdateCb cb) -> Common::CallbackHandle* { - return member_update_cb_helper_.add(cb); - })); - ON_CALL(*this, addPriorityUpdateCb(_)) - .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandle* { - return priority_update_cb_helper_.add(cb); - })); -} - -MockPrioritySet::~MockPrioritySet() = default; - -HostSet& MockPrioritySet::getHostSet(uint32_t priority) { - if (host_sets_.size() < priority + 1) { - for (size_t i = host_sets_.size(); i <= priority; ++i) { - auto host_set = new NiceMock(i); - host_sets_.push_back(HostSetPtr{host_set}); - host_set->addMemberUpdateCb([this](uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed) { - runUpdateCallbacks(priority, hosts_added, hosts_removed); - }); - } - } - return *host_sets_[priority]; -} -void MockPrioritySet::runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed) { - member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed); - priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed); -} - -MockRetryPriority::~MockRetryPriority() = default; - -MockCluster::MockCluster() { - ON_CALL(*this, info()).WillByDefault(Return(info_)); - ON_CALL(*this, initialize(_)) - .WillByDefault(Invoke([this](std::function callback) -> void { - EXPECT_EQ(nullptr, initialize_callback_); - initialize_callback_ = callback; - })); -} - -MockCluster::~MockCluster() = default; - -MockClusterRealPrioritySet::MockClusterRealPrioritySet() = default; -MockClusterRealPrioritySet::~MockClusterRealPrioritySet() = default; - -MockClusterMockPrioritySet::MockClusterMockPrioritySet() = default; -MockClusterMockPrioritySet::~MockClusterMockPrioritySet() = default; - -MockLoadBalancer::MockLoadBalancer() { ON_CALL(*this, chooseHost(_)).WillByDefault(Return(host_)); } -MockLoadBalancer::~MockLoadBalancer() = default; - -MockThreadAwareLoadBalancer::MockThreadAwareLoadBalancer() = default; -MockThreadAwareLoadBalancer::~MockThreadAwareLoadBalancer() = default; - -MockThreadLocalCluster::MockThreadLocalCluster() { - ON_CALL(*this, prioritySet()).WillByDefault(ReturnRef(cluster_.priority_set_)); - ON_CALL(*this, info()).WillByDefault(Return(cluster_.info_)); - ON_CALL(*this, loadBalancer()).WillByDefault(ReturnRef(lb_)); -} - -MockThreadLocalCluster::~MockThreadLocalCluster() = default; - -MockClusterUpdateCallbacksHandle::MockClusterUpdateCallbacksHandle() = default; -MockClusterUpdateCallbacksHandle::~MockClusterUpdateCallbacksHandle() = default; - -MockClusterManager::MockClusterManager(TimeSource&) : MockClusterManager() {} - -MockClusterManager::MockClusterManager() { - ON_CALL(*this, httpConnPoolForCluster(_, _, _, _)).WillByDefault(Return(&conn_pool_)); - ON_CALL(*this, tcpConnPoolForCluster(_, _, _)).WillByDefault(Return(&tcp_conn_pool_)); - ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault(ReturnRef(async_client_)); - ON_CALL(*this, httpAsyncClientForCluster(_)).WillByDefault((ReturnRef(async_client_))); - ON_CALL(*this, bindConfig()).WillByDefault(ReturnRef(bind_config_)); - ON_CALL(*this, adsMux()).WillByDefault(Return(ads_mux_)); - ON_CALL(*this, grpcAsyncClientManager()).WillByDefault(ReturnRef(async_client_manager_)); - ON_CALL(*this, localClusterName()).WillByDefault((ReturnRef(local_cluster_name_))); - - // Matches are LIFO so "" will match first. - ON_CALL(*this, get(_)).WillByDefault(Return(&thread_local_cluster_)); - ON_CALL(*this, get(Eq(""))).WillByDefault(Return(nullptr)); - ON_CALL(*this, subscriptionFactory()).WillByDefault(ReturnRef(subscription_factory_)); -} - -MockClusterManager::~MockClusterManager() = default; - -MockHealthChecker::MockHealthChecker() { - ON_CALL(*this, addHostCheckCompleteCb(_)).WillByDefault(Invoke([this](HostStatusCb cb) -> void { - callbacks_.push_back(cb); - })); -} - -MockHealthChecker::~MockHealthChecker() = default; - -MockCdsApi::MockCdsApi() { - ON_CALL(*this, setInitializedCb(_)).WillByDefault(SaveArg<0>(&initialized_callback_)); -} - -MockCdsApi::~MockCdsApi() = default; - -MockClusterUpdateCallbacks::MockClusterUpdateCallbacks() = default; -MockClusterUpdateCallbacks::~MockClusterUpdateCallbacks() = default; - -MockClusterInfoFactory::MockClusterInfoFactory() = default; -MockClusterInfoFactory::~MockClusterInfoFactory() = default; - -MockRetryHostPredicate::MockRetryHostPredicate() = default; -MockRetryHostPredicate::~MockRetryHostPredicate() = default; - -MockClusterManagerFactory::MockClusterManagerFactory() = default; -MockClusterManagerFactory::~MockClusterManagerFactory() = default; - -MockBasicResourceLimit::MockBasicResourceLimit() { - ON_CALL(*this, canCreate()).WillByDefault(Return(true)); -} -MockBasicResourceLimit::~MockBasicResourceLimit() = default; - -} // namespace Upstream -} // namespace Envoy diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 0e78b3fcf46e..879280b0aef1 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -1,10 +1,6 @@ #pragma once -#include -#include -#include -#include -#include +// NOLINT(namespace-envoy) #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" @@ -28,424 +24,26 @@ #include "test/mocks/secret/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/mocks/tcp/mocks.h" +#include "test/mocks/upstream/basic_resource_limit.h" +#include "test/mocks/upstream/cds_api.h" +#include "test/mocks/upstream/cluster.h" #include "test/mocks/upstream/cluster_info.h" +#include "test/mocks/upstream/cluster_info_factory.h" +#include "test/mocks/upstream/cluster_manager.h" +#include "test/mocks/upstream/cluster_manager_factory.h" +#include "test/mocks/upstream/cluster_priority_set.h" +#include "test/mocks/upstream/cluster_real_priority_set.h" +#include "test/mocks/upstream/cluster_update_callbacks.h" +#include "test/mocks/upstream/cluster_update_callbacks_handle.h" +#include "test/mocks/upstream/health_check_event_logger.h" +#include "test/mocks/upstream/health_checker.h" +#include "test/mocks/upstream/host_set.h" +#include "test/mocks/upstream/load_balancer.h" #include "test/mocks/upstream/load_balancer_context.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using testing::NiceMock; - -namespace Envoy { -namespace Upstream { - -class MockHostSet : public HostSet { -public: - MockHostSet(uint32_t priority = 0, - uint32_t overprovisioning_factor = kDefaultOverProvisioningFactor); - ~MockHostSet() override; - - void runCallbacks(const HostVector added, const HostVector removed) { - member_update_cb_helper_.runCallbacks(priority(), added, removed); - } - - Common::CallbackHandle* addMemberUpdateCb(PrioritySet::PriorityUpdateCb callback) { - return member_update_cb_helper_.add(callback); - } - - // Upstream::HostSet - MOCK_METHOD(const HostVector&, hosts, (), (const)); - MOCK_METHOD(HostVectorConstSharedPtr, hostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, healthyHosts, (), (const)); - MOCK_METHOD(HealthyHostVectorConstSharedPtr, healthyHostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, degradedHosts, (), (const)); - MOCK_METHOD(DegradedHostVectorConstSharedPtr, degradedHostsPtr, (), (const)); - MOCK_METHOD(const HostVector&, excludedHosts, (), (const)); - MOCK_METHOD(ExcludedHostVectorConstSharedPtr, excludedHostsPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, hostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, hostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, healthyHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, healthyHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, degradedHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, degradedHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(const HostsPerLocality&, excludedHostsPerLocality, (), (const)); - MOCK_METHOD(HostsPerLocalityConstSharedPtr, excludedHostsPerLocalityPtr, (), (const)); - MOCK_METHOD(LocalityWeightsConstSharedPtr, localityWeights, (), (const)); - MOCK_METHOD(absl::optional, chooseHealthyLocality, ()); - MOCK_METHOD(absl::optional, chooseDegradedLocality, ()); - MOCK_METHOD(uint32_t, priority, (), (const)); - uint32_t overprovisioningFactor() const override { return overprovisioning_factor_; } - void setOverprovisioningFactor(const uint32_t overprovisioning_factor) { - overprovisioning_factor_ = overprovisioning_factor; - } - - HostVector hosts_; - HostVector healthy_hosts_; - HostVector degraded_hosts_; - HostVector excluded_hosts_; - HostsPerLocalitySharedPtr hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr healthy_hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr degraded_hosts_per_locality_{new HostsPerLocalityImpl()}; - HostsPerLocalitySharedPtr excluded_hosts_per_locality_{new HostsPerLocalityImpl()}; - LocalityWeightsConstSharedPtr locality_weights_{{}}; - Common::CallbackManager member_update_cb_helper_; - uint32_t priority_{}; - uint32_t overprovisioning_factor_{}; - bool run_in_panic_mode_ = false; -}; - -class MockPrioritySet : public PrioritySet { -public: - MockPrioritySet(); - ~MockPrioritySet() override; - - HostSet& getHostSet(uint32_t priority); - void runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, - const HostVector& hosts_removed); - - MOCK_METHOD(Common::CallbackHandle*, addMemberUpdateCb, (MemberUpdateCb callback), (const)); - MOCK_METHOD(Common::CallbackHandle*, addPriorityUpdateCb, (PriorityUpdateCb callback), (const)); - MOCK_METHOD(const std::vector&, hostSetsPerPriority, (), (const)); - MOCK_METHOD(std::vector&, hostSetsPerPriority, ()); - MOCK_METHOD(void, updateHosts, - (uint32_t priority, UpdateHostsParams&& update_hosts_params, - LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, absl::optional overprovisioning_factor)); - MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&)); - - MockHostSet* getMockHostSet(uint32_t priority) { - getHostSet(priority); // Ensure the host set exists. - return reinterpret_cast(host_sets_[priority].get()); - } - - std::vector host_sets_; - Common::CallbackManager member_update_cb_helper_; - Common::CallbackManager - priority_update_cb_helper_; -}; - -class MockRetryPriority : public RetryPriority { -public: - MockRetryPriority(const HealthyLoad& healthy_priority_load, - const DegradedLoad& degraded_priority_load) - : priority_load_({healthy_priority_load, degraded_priority_load}) {} - MockRetryPriority(const MockRetryPriority& other) : priority_load_(other.priority_load_) {} - ~MockRetryPriority() override; - - const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&, - const HealthyAndDegradedLoad&, - const PriorityMappingFunc&) override { - return priority_load_; - } - - MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); - -private: - const HealthyAndDegradedLoad priority_load_; -}; - -class MockRetryPriorityFactory : public RetryPriorityFactory { -public: - MockRetryPriorityFactory(const MockRetryPriority& retry_priority) - : retry_priority_(retry_priority) {} - RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message&, - ProtobufMessage::ValidationVisitor&, - uint32_t) override { - return std::make_shared>(retry_priority_); - } - - std::string name() const override { return "envoy.test_retry_priority"; } - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - // Using Struct instead of a custom per-filter empty config proto - // This is only allowed in tests. - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; - } - -private: - const MockRetryPriority& retry_priority_; -}; - -class MockCluster : public Cluster { -public: - MockCluster(); - ~MockCluster() override; - - // Upstream::Cluster - MOCK_METHOD(HealthChecker*, healthChecker, ()); - MOCK_METHOD(ClusterInfoConstSharedPtr, info, (), (const)); - MOCK_METHOD(Outlier::Detector*, outlierDetector, ()); - MOCK_METHOD(const Outlier::Detector*, outlierDetector, (), (const)); - MOCK_METHOD(void, initialize, (std::function callback)); - MOCK_METHOD(InitializePhase, initializePhase, (), (const)); - MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, sourceAddress, (), (const)); - - std::shared_ptr info_{new NiceMock()}; - std::function initialize_callback_; - Network::Address::InstanceConstSharedPtr source_address_; -}; - -// Note that we could template the two implementations below, but to avoid having to define the -// ctor/dtor (which is fairly expensive for mocks) in the header file we duplicate the code instead. - -// Use this when interaction with a real PrioritySet is needed, e.g. when update callbacks -// needs to be triggered. -class MockClusterRealPrioritySet : public MockCluster { -public: - MockClusterRealPrioritySet(); - ~MockClusterRealPrioritySet() override; - - // Upstream::Cluster - PrioritySetImpl& prioritySet() override { return priority_set_; } - const PrioritySet& prioritySet() const override { return priority_set_; } - - PrioritySetImpl priority_set_; -}; - -// Use this for additional convenience methods provided by MockPrioritySet. -class MockClusterMockPrioritySet : public MockCluster { -public: - MockClusterMockPrioritySet(); - ~MockClusterMockPrioritySet() override; - - // Upstream::Cluster - MockPrioritySet& prioritySet() override { return priority_set_; } - const PrioritySet& prioritySet() const override { return priority_set_; } - - NiceMock priority_set_; -}; - -class MockLoadBalancer : public LoadBalancer { -public: - MockLoadBalancer(); - ~MockLoadBalancer() override; - - // Upstream::LoadBalancer - MOCK_METHOD(HostConstSharedPtr, chooseHost, (LoadBalancerContext * context)); - - std::shared_ptr host_{new MockHost()}; -}; - -class MockThreadAwareLoadBalancer : public ThreadAwareLoadBalancer { -public: - MockThreadAwareLoadBalancer(); - ~MockThreadAwareLoadBalancer() override; - - // Upstream::ThreadAwareLoadBalancer - MOCK_METHOD(LoadBalancerFactorySharedPtr, factory, ()); - MOCK_METHOD(void, initialize, ()); -}; - -class MockThreadLocalCluster : public ThreadLocalCluster { -public: - MockThreadLocalCluster(); - ~MockThreadLocalCluster() override; - - // Upstream::ThreadLocalCluster - MOCK_METHOD(const PrioritySet&, prioritySet, ()); - MOCK_METHOD(ClusterInfoConstSharedPtr, info, ()); - MOCK_METHOD(LoadBalancer&, loadBalancer, ()); - - NiceMock cluster_; - NiceMock lb_; -}; - -class MockClusterManagerFactory : public ClusterManagerFactory { -public: - MockClusterManagerFactory(); - ~MockClusterManagerFactory() override; - - Secret::MockSecretManager& secretManager() override { return secret_manager_; }; - - MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, - (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); - - MOCK_METHOD(Http::ConnectionPool::InstancePtr, allocateConnPool, - (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, - Http::Protocol protocol, const Network::ConnectionSocket::OptionsSharedPtr& options, - const Network::TransportSocketOptionsSharedPtr& transport_socket_options)); - - MOCK_METHOD(Tcp::ConnectionPool::InstancePtr, allocateTcpConnPool, - (Event::Dispatcher & dispatcher, HostConstSharedPtr host, ResourcePriority priority, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsSharedPtr)); - - MOCK_METHOD((std::pair), clusterFromProto, - (const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api)); - - MOCK_METHOD(CdsApiPtr, createCds, - (const envoy::config::core::v3::ConfigSource& cds_config, ClusterManager& cm)); - -private: - NiceMock secret_manager_; -}; - -class MockClusterUpdateCallbacksHandle : public ClusterUpdateCallbacksHandle { -public: - MockClusterUpdateCallbacksHandle(); - ~MockClusterUpdateCallbacksHandle() override; -}; - -class MockClusterManager : public ClusterManager { -public: - explicit MockClusterManager(TimeSource& time_source); - MockClusterManager(); - ~MockClusterManager() override; - - ClusterUpdateCallbacksHandlePtr - addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& callbacks) override { - return ClusterUpdateCallbacksHandlePtr{addThreadLocalClusterUpdateCallbacks_(callbacks)}; - } - - Host::CreateConnectionData tcpConnForCluster(const std::string& cluster, - LoadBalancerContext* context) override { - MockHost::MockCreateConnectionData data = tcpConnForCluster_(cluster, context); - return {Network::ClientConnectionPtr{data.connection_}, data.host_description_}; - } - - ClusterManagerFactory& clusterManagerFactory() override { return cluster_manager_factory_; } - - // Upstream::ClusterManager - MOCK_METHOD(bool, addOrUpdateCluster, - (const envoy::config::cluster::v3::Cluster& cluster, - const std::string& version_info)); - MOCK_METHOD(void, setPrimaryClustersInitializedCb, (PrimaryClustersReadyCallback)); - MOCK_METHOD(void, setInitializedCb, (InitializationCompleteCallback)); - MOCK_METHOD(void, initializeSecondaryClusters, - (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); - MOCK_METHOD(ClusterInfoMap, clusters, ()); - MOCK_METHOD(const ClusterSet&, primaryClusters, ()); - MOCK_METHOD(ThreadLocalCluster*, get, (absl::string_view cluster)); - MOCK_METHOD(Http::ConnectionPool::Instance*, httpConnPoolForCluster, - (const std::string& cluster, ResourcePriority priority, - absl::optional downstream_protocol, LoadBalancerContext* context)); - MOCK_METHOD(Tcp::ConnectionPool::Instance*, tcpConnPoolForCluster, - (const std::string& cluster, ResourcePriority priority, - LoadBalancerContext* context)); - MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConnForCluster_, - (const std::string& cluster, LoadBalancerContext* context)); - MOCK_METHOD(Http::AsyncClient&, httpAsyncClientForCluster, (const std::string& cluster)); - MOCK_METHOD(bool, removeCluster, (const std::string& cluster)); - MOCK_METHOD(void, shutdown, ()); - MOCK_METHOD(const envoy::config::core::v3::BindConfig&, bindConfig, (), (const)); - MOCK_METHOD(Config::GrpcMuxSharedPtr, adsMux, ()); - MOCK_METHOD(Grpc::AsyncClientManager&, grpcAsyncClientManager, ()); - MOCK_METHOD(const std::string, versionInfo, (), (const)); - MOCK_METHOD(const absl::optional&, localClusterName, (), (const)); - MOCK_METHOD(ClusterUpdateCallbacksHandle*, addThreadLocalClusterUpdateCallbacks_, - (ClusterUpdateCallbacks & callbacks)); - MOCK_METHOD(Config::SubscriptionFactory&, subscriptionFactory, ()); - - NiceMock conn_pool_; - NiceMock async_client_; - NiceMock tcp_conn_pool_; - NiceMock thread_local_cluster_; - envoy::config::core::v3::BindConfig bind_config_; - std::shared_ptr> ads_mux_; - NiceMock async_client_manager_; - absl::optional local_cluster_name_; - NiceMock cluster_manager_factory_; - NiceMock subscription_factory_; -}; - -class MockHealthChecker : public HealthChecker { -public: - MockHealthChecker(); - ~MockHealthChecker() override; - - MOCK_METHOD(void, addHostCheckCompleteCb, (HostStatusCb callback)); - MOCK_METHOD(void, start, ()); - - void runCallbacks(Upstream::HostSharedPtr host, HealthTransition changed_state) { - for (const auto& callback : callbacks_) { - callback(host, changed_state); - } - } - - std::list callbacks_; -}; - -class MockHealthCheckEventLogger : public HealthCheckEventLogger { -public: - MOCK_METHOD(void, logEjectUnhealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - envoy::data::core::v3::HealthCheckFailureType)); - MOCK_METHOD(void, logAddHealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - bool)); - MOCK_METHOD(void, logUnhealthy, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&, - envoy::data::core::v3::HealthCheckFailureType, bool)); - MOCK_METHOD(void, logDegraded, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); - MOCK_METHOD(void, logNoLongerDegraded, - (envoy::data::core::v3::HealthCheckerType, const HostDescriptionConstSharedPtr&)); -}; - -class MockCdsApi : public CdsApi { -public: - MockCdsApi(); - ~MockCdsApi() override; - - MOCK_METHOD(void, initialize, ()); - MOCK_METHOD(void, setInitializedCb, (std::function callback)); - MOCK_METHOD(const std::string, versionInfo, (), (const)); - - std::function initialized_callback_; -}; - -class MockClusterUpdateCallbacks : public ClusterUpdateCallbacks { -public: - MockClusterUpdateCallbacks(); - ~MockClusterUpdateCallbacks() override; - - MOCK_METHOD(void, onClusterAddOrUpdate, (ThreadLocalCluster & cluster)); - MOCK_METHOD(void, onClusterRemoval, (const std::string& cluster_name)); -}; - -class MockClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { -public: - MockClusterInfoFactory(); - ~MockClusterInfoFactory() override; - - MOCK_METHOD(ClusterInfoConstSharedPtr, createClusterInfo, (const CreateClusterInfoParams&)); -}; - -class MockRetryHostPredicate : public RetryHostPredicate { -public: - MockRetryHostPredicate(); - ~MockRetryHostPredicate() override; - - MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host& candidate_host)); - MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); -}; - -class TestRetryHostPredicateFactory : public RetryHostPredicateFactory { -public: - RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override { - return std::make_shared>(); - } - - std::string name() const override { return "envoy.test_host_predicate"; } - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - // Using Struct instead of a custom per-filter empty config proto - // This is only allowed in tests. - return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; - } -}; - -class MockBasicResourceLimit : public ResourceLimit { -public: - MockBasicResourceLimit(); - ~MockBasicResourceLimit() override; - - MOCK_METHOD(bool, canCreate, ()); - MOCK_METHOD(void, inc, ()); - MOCK_METHOD(void, dec, ()); - MOCK_METHOD(void, decBy, (uint64_t)); - MOCK_METHOD(uint64_t, max, ()); - MOCK_METHOD(uint64_t, count, (), (const)); -}; - -} // namespace Upstream -} // namespace Envoy +#include "test/mocks/upstream/priority_set.h" +#include "test/mocks/upstream/retry_host_predicate.h" +#include "test/mocks/upstream/retry_priority.h" +#include "test/mocks/upstream/retry_priority_factory.h" +#include "test/mocks/upstream/test_retry_host_predicate_factory.h" +#include "test/mocks/upstream/thread_aware_load_balancer.h" +#include "test/mocks/upstream/thread_local_cluster.h" diff --git a/test/mocks/upstream/priority_set.cc b/test/mocks/upstream/priority_set.cc new file mode 100644 index 000000000000..31724e10ce24 --- /dev/null +++ b/test/mocks/upstream/priority_set.cc @@ -0,0 +1,54 @@ +#include "priority_set.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { + +using ::testing::_; +using ::testing::Invoke; +using ::testing::ReturnRef; + +MockPrioritySet::MockPrioritySet() { + getHostSet(0); + ON_CALL(*this, hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); + ON_CALL(testing::Const(*this), hostSetsPerPriority()).WillByDefault(ReturnRef(host_sets_)); + ON_CALL(*this, addMemberUpdateCb(_)) + .WillByDefault(Invoke([this](PrioritySet::MemberUpdateCb cb) -> Common::CallbackHandle* { + return member_update_cb_helper_.add(cb); + })); + ON_CALL(*this, addPriorityUpdateCb(_)) + .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandle* { + return priority_update_cb_helper_.add(cb); + })); +} + +MockPrioritySet::~MockPrioritySet() = default; + +HostSet& MockPrioritySet::getHostSet(uint32_t priority) { + if (host_sets_.size() < priority + 1) { + for (size_t i = host_sets_.size(); i <= priority; ++i) { + auto host_set = new ::testing::NiceMock(i); + host_sets_.push_back(HostSetPtr{host_set}); + host_set->addMemberUpdateCb([this](uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) { + runUpdateCallbacks(priority, hosts_added, hosts_removed); + }); + } + } + return *host_sets_[priority]; +} + +void MockPrioritySet::runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) { + member_update_cb_helper_.runCallbacks(hosts_added, hosts_removed); + priority_update_cb_helper_.runCallbacks(priority, hosts_added, hosts_removed); +} + +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/priority_set.h b/test/mocks/upstream/priority_set.h new file mode 100644 index 000000000000..d4c6ee5fd82a --- /dev/null +++ b/test/mocks/upstream/priority_set.h @@ -0,0 +1,42 @@ +#pragma once + +#include "envoy/upstream/upstream.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "host_set.h" + +namespace Envoy { +namespace Upstream { +class MockPrioritySet : public PrioritySet { +public: + MockPrioritySet(); + ~MockPrioritySet() override; + + HostSet& getHostSet(uint32_t priority); + void runUpdateCallbacks(uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed); + + MOCK_METHOD(Common::CallbackHandle*, addMemberUpdateCb, (MemberUpdateCb callback), (const)); + MOCK_METHOD(Common::CallbackHandle*, addPriorityUpdateCb, (PriorityUpdateCb callback), (const)); + MOCK_METHOD(const std::vector&, hostSetsPerPriority, (), (const)); + MOCK_METHOD(std::vector&, hostSetsPerPriority, ()); + MOCK_METHOD(void, updateHosts, + (uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, absl::optional overprovisioning_factor)); + MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&)); + + MockHostSet* getMockHostSet(uint32_t priority) { + getHostSet(priority); // Ensure the host set exists. + return reinterpret_cast(host_sets_[priority].get()); + } + + std::vector host_sets_; + Common::CallbackManager member_update_cb_helper_; + Common::CallbackManager + priority_update_cb_helper_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/retry_host_predicate.cc b/test/mocks/upstream/retry_host_predicate.cc new file mode 100644 index 000000000000..a6233d8141c4 --- /dev/null +++ b/test/mocks/upstream/retry_host_predicate.cc @@ -0,0 +1,10 @@ +#include "retry_host_predicate.h" + +namespace Envoy { +namespace Upstream { +MockRetryHostPredicate::MockRetryHostPredicate() = default; + +MockRetryHostPredicate::~MockRetryHostPredicate() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_host_predicate.h b/test/mocks/upstream/retry_host_predicate.h new file mode 100644 index 000000000000..54ffb4749ee5 --- /dev/null +++ b/test/mocks/upstream/retry_host_predicate.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockRetryHostPredicate : public RetryHostPredicate { +public: + MockRetryHostPredicate(); + ~MockRetryHostPredicate() override; + + MOCK_METHOD(bool, shouldSelectAnotherHost, (const Host& candidate_host)); + MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority.cc b/test/mocks/upstream/retry_priority.cc new file mode 100644 index 000000000000..9df9fe988f61 --- /dev/null +++ b/test/mocks/upstream/retry_priority.cc @@ -0,0 +1,7 @@ +#include "retry_priority.h" + +namespace Envoy { +namespace Upstream { +MockRetryPriority::~MockRetryPriority() = default; +} +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority.h b/test/mocks/upstream/retry_priority.h new file mode 100644 index 000000000000..708bfa0fc33e --- /dev/null +++ b/test/mocks/upstream/retry_priority.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockRetryPriority : public RetryPriority { +public: + MockRetryPriority(const HealthyLoad& healthy_priority_load, + const DegradedLoad& degraded_priority_load) + : priority_load_({healthy_priority_load, degraded_priority_load}) {} + MockRetryPriority(const MockRetryPriority& other) : priority_load_(other.priority_load_) {} + ~MockRetryPriority() override; + + const HealthyAndDegradedLoad& determinePriorityLoad(const PrioritySet&, + const HealthyAndDegradedLoad&, + const PriorityMappingFunc&) override { + return priority_load_; + } + + MOCK_METHOD(void, onHostAttempted, (HostDescriptionConstSharedPtr)); + +private: + const HealthyAndDegradedLoad priority_load_; +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/retry_priority_factory.h b/test/mocks/upstream/retry_priority_factory.h new file mode 100644 index 000000000000..158359c22a48 --- /dev/null +++ b/test/mocks/upstream/retry_priority_factory.h @@ -0,0 +1,34 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "retry_priority.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockRetryPriorityFactory : public RetryPriorityFactory { +public: + MockRetryPriorityFactory(const MockRetryPriority& retry_priority) + : retry_priority_(retry_priority) {} + RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message&, + ProtobufMessage::ValidationVisitor&, + uint32_t) override { + return std::make_shared>(retry_priority_); + } + + std::string name() const override { return "envoy.test_retry_priority"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom per-filter empty config proto + // This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + +private: + const MockRetryPriority& retry_priority_; +}; +} // namespace Upstream + +} // namespace Envoy diff --git a/test/mocks/upstream/test_retry_host_predicate_factory.h b/test/mocks/upstream/test_retry_host_predicate_factory.h new file mode 100644 index 000000000000..b436ae01bcac --- /dev/null +++ b/test/mocks/upstream/test_retry_host_predicate_factory.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "retry_host_predicate.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class TestRetryHostPredicateFactory : public RetryHostPredicateFactory { +public: + RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override { + return std::make_shared>(); + } + + std::string name() const override { return "envoy.test_host_predicate"; } + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom per-filter empty config proto + // This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_aware_load_balancer.cc b/test/mocks/upstream/thread_aware_load_balancer.cc new file mode 100644 index 000000000000..46ed300485f2 --- /dev/null +++ b/test/mocks/upstream/thread_aware_load_balancer.cc @@ -0,0 +1,10 @@ +#include "thread_aware_load_balancer.h" + +namespace Envoy { +namespace Upstream { +MockThreadAwareLoadBalancer::MockThreadAwareLoadBalancer() = default; + +MockThreadAwareLoadBalancer::~MockThreadAwareLoadBalancer() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_aware_load_balancer.h b/test/mocks/upstream/thread_aware_load_balancer.h new file mode 100644 index 000000000000..49b0cea8176f --- /dev/null +++ b/test/mocks/upstream/thread_aware_load_balancer.h @@ -0,0 +1,20 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockThreadAwareLoadBalancer : public ThreadAwareLoadBalancer { +public: + MockThreadAwareLoadBalancer(); + ~MockThreadAwareLoadBalancer() override; + + // Upstream::ThreadAwareLoadBalancer + MOCK_METHOD(LoadBalancerFactorySharedPtr, factory, ()); + MOCK_METHOD(void, initialize, ()); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_local_cluster.cc b/test/mocks/upstream/thread_local_cluster.cc new file mode 100644 index 000000000000..0ab62164b6ff --- /dev/null +++ b/test/mocks/upstream/thread_local_cluster.cc @@ -0,0 +1,19 @@ +#include "thread_local_cluster.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +using ::testing::Return; +using ::testing::ReturnRef; +MockThreadLocalCluster::MockThreadLocalCluster() { + ON_CALL(*this, prioritySet()).WillByDefault(ReturnRef(cluster_.priority_set_)); + ON_CALL(*this, info()).WillByDefault(Return(cluster_.info_)); + ON_CALL(*this, loadBalancer()).WillByDefault(ReturnRef(lb_)); +} + +MockThreadLocalCluster::~MockThreadLocalCluster() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/thread_local_cluster.h b/test/mocks/upstream/thread_local_cluster.h new file mode 100644 index 000000000000..34eda63df6cb --- /dev/null +++ b/test/mocks/upstream/thread_local_cluster.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/upstream/thread_local_cluster.h" + +#include "cluster_priority_set.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "load_balancer.h" + +namespace Envoy { +namespace Upstream { +using ::testing::NiceMock; +class MockThreadLocalCluster : public ThreadLocalCluster { +public: + MockThreadLocalCluster(); + ~MockThreadLocalCluster() override; + + // Upstream::ThreadLocalCluster + MOCK_METHOD(const PrioritySet&, prioritySet, ()); + MOCK_METHOD(ClusterInfoConstSharedPtr, info, ()); + MOCK_METHOD(LoadBalancer&, loadBalancer, ()); + + NiceMock cluster_; + NiceMock lb_; +}; +} // namespace Upstream +} // namespace Envoy From da58cfc3292c6291c04dd3f10895b09e297bef15 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 23 Jul 2020 16:51:53 -0700 Subject: [PATCH 733/909] test: disable xds_fuzz (#12262) Until https://github.com/envoyproxy/envoy/issues/12258 is fixed. Signed-off-by: Matt Klein --- test/server/config_validation/BUILD | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 83a4937749dc..f2888cef0882 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -147,12 +147,13 @@ envoy_cc_test_library( ], ) -envoy_cc_fuzz_test( - name = "xds_fuzz_test", - srcs = ["xds_fuzz_test.cc"], - corpus = "xds_corpus", - deps = [ - ":xds_fuzz_lib", - "//source/common/protobuf:utility_lib", - ], -) +# https://github.com/envoyproxy/envoy/issues/12258 +# envoy_cc_fuzz_test( +# name = "xds_fuzz_test", +# srcs = ["xds_fuzz_test.cc"], +# corpus = "xds_corpus", +# deps = [ +# ":xds_fuzz_lib", +# "//source/common/protobuf:utility_lib", +# ], +# ) From b9a41ada797cbfb8da3ea895dff4a6c42ee48082 Mon Sep 17 00:00:00 2001 From: chaoqin-li1123 <55518381+chaoqin-li1123@users.noreply.github.com> Date: Thu, 23 Jul 2020 22:46:49 -0500 Subject: [PATCH 734/909] reduce number of srds update callbacks (#12118) Currently, in scope route discovery service, when resources are received from a management server, for each added, updated and removed scope route, a callback is fired. If several update callbacks are combined into a single update callback, less performance penalty will be incurred. This can be achieved by moving applyConfigUpdate out of the for loop and applying all the updates in a single callback. In this case, partial update won't be accepted both in sotw and delta srds. Scope key and scope name conflicts will be checked before any config update is applied. Risk Level: Low Testing: Modify a unit test related to change. Docs Changes: The behavior of delta srds will be changed, partial update won't be accepted. Signed-off-by: chaoqinli --- source/common/router/scoped_config_impl.cc | 32 ++-- source/common/router/scoped_config_impl.h | 6 +- source/common/router/scoped_rds.cc | 176 ++++++++++-------- source/common/router/scoped_rds.h | 17 +- test/common/router/scoped_config_impl_test.cc | 20 +- test/common/router/scoped_rds_test.cc | 170 ++++++++++++++--- 6 files changed, 280 insertions(+), 141 deletions(-) diff --git a/source/common/router/scoped_config_impl.cc b/source/common/router/scoped_config_impl.cc index e5e51b763191..8cef0a7e4a30 100644 --- a/source/common/router/scoped_config_impl.cc +++ b/source/common/router/scoped_config_impl.cc @@ -116,23 +116,27 @@ ScopeKeyPtr ScopeKeyBuilderImpl::computeScopeKey(const Http::HeaderMap& headers) return std::make_unique(std::move(key)); } -void ScopedConfigImpl::addOrUpdateRoutingScope( - const ScopedRouteInfoConstSharedPtr& scoped_route_info) { - const auto iter = scoped_route_info_by_name_.find(scoped_route_info->scopeName()); - if (iter != scoped_route_info_by_name_.end()) { - ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); - scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); +void ScopedConfigImpl::addOrUpdateRoutingScopes( + const std::vector& scoped_route_infos) { + for (auto& scoped_route_info : scoped_route_infos) { + const auto iter = scoped_route_info_by_name_.find(scoped_route_info->scopeName()); + if (iter != scoped_route_info_by_name_.end()) { + ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); + scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); + } + scoped_route_info_by_name_[scoped_route_info->scopeName()] = scoped_route_info; + scoped_route_info_by_key_[scoped_route_info->scopeKey().hash()] = scoped_route_info; } - scoped_route_info_by_name_[scoped_route_info->scopeName()] = scoped_route_info; - scoped_route_info_by_key_[scoped_route_info->scopeKey().hash()] = scoped_route_info; } -void ScopedConfigImpl::removeRoutingScope(const std::string& scope_name) { - const auto iter = scoped_route_info_by_name_.find(scope_name); - if (iter != scoped_route_info_by_name_.end()) { - ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); - scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); - scoped_route_info_by_name_.erase(iter); +void ScopedConfigImpl::removeRoutingScopes(const std::vector& scope_names) { + for (std::string const& scope_name : scope_names) { + const auto iter = scoped_route_info_by_name_.find(scope_name); + if (iter != scoped_route_info_by_name_.end()) { + ASSERT(scoped_route_info_by_key_.contains(iter->second->scopeKey().hash())); + scoped_route_info_by_key_.erase(iter->second->scopeKey().hash()); + scoped_route_info_by_name_.erase(iter); + } } } diff --git a/source/common/router/scoped_config_impl.h b/source/common/router/scoped_config_impl.h index 575a097407d6..5a1703caf82c 100644 --- a/source/common/router/scoped_config_impl.h +++ b/source/common/router/scoped_config_impl.h @@ -183,8 +183,10 @@ class ScopedConfigImpl : public ScopedConfig { ScopedConfigImpl(ScopedRoutes::ScopeKeyBuilder&& scope_key_builder) : scope_key_builder_(std::move(scope_key_builder)) {} - void addOrUpdateRoutingScope(const ScopedRouteInfoConstSharedPtr& scoped_route_info); - void removeRoutingScope(const std::string& scope_name); + void + addOrUpdateRoutingScopes(const std::vector& scoped_route_infos); + + void removeRoutingScopes(const std::vector& scope_names); // Envoy::Router::ScopedConfig Router::ConfigConstSharedPtr getRouteConfig(const Http::HeaderMap& headers) const override; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 32dd71981aff..c7e4d3db44ff 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -19,6 +19,7 @@ #include "common/init/manager_impl.h" #include "common/init/watcher_impl.h" #include "common/router/rds_impl.h" +#include "common/router/scoped_config_impl.h" #include "absl/strings/str_join.h" @@ -136,56 +137,41 @@ ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::RdsRouteConfigProvide bool ScopedRdsConfigSubscription::addOrUpdateScopes( const std::vector& resources, Init::Manager& init_manager, - const std::string& version_info, std::vector& exception_msgs) { + const std::string& version_info) { bool any_applied = false; envoy::extensions::filters::network::http_connection_manager::v3::Rds rds; rds.mutable_config_source()->MergeFrom(rds_config_source_); - absl::flat_hash_set unique_resource_names; + std::vector updated_scopes; for (const auto& resource : resources) { - try { - // Explicit copy so that we can std::move later. - envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config = - dynamic_cast( - resource.get().resource()); - const std::string scope_name = scoped_route_config.name(); - if (!unique_resource_names.insert(scope_name).second) { - throw EnvoyException( - fmt::format("duplicate scoped route configuration '{}' found", scope_name)); - } - // TODO(stevenzzz): Creating a new RdsRouteConfigProvider likely expensive, migrate RDS to - // config-provider-framework to make it light weight. - rds.set_route_config_name(scoped_route_config.route_configuration_name()); - auto rds_config_provider_helper = - std::make_unique(*this, scope_name, rds, init_manager); - auto scoped_route_info = std::make_shared( - std::move(scoped_route_config), rds_config_provider_helper->routeConfig()); - // Detect if there is key conflict between two scopes, in which case Envoy won't be able to - // tell which RouteConfiguration to use. Reject the second scope in the delta form API. - auto iter = scope_name_by_hash_.find(scoped_route_info->scopeKey().hash()); - if (iter != scope_name_by_hash_.end()) { - if (iter->second != scoped_route_info->scopeName()) { - throw EnvoyException( - fmt::format("scope key conflict found, first scope is '{}', second scope is '{}'", - iter->second, scoped_route_info->scopeName())); - } - } - // NOTE: delete previous route provider if any. - route_provider_by_scope_.insert({scope_name, std::move(rds_config_provider_helper)}); - scope_name_by_hash_[scoped_route_info->scopeKey().hash()] = scoped_route_info->scopeName(); - scoped_route_map_[scoped_route_info->scopeName()] = scoped_route_info; - applyConfigUpdate([scoped_route_info](ConfigProvider::ConfigConstSharedPtr config) - -> ConfigProvider::ConfigConstSharedPtr { - auto* thread_local_scoped_config = - const_cast(static_cast(config.get())); - thread_local_scoped_config->addOrUpdateRoutingScope(scoped_route_info); - return config; - }); - any_applied = true; - ENVOY_LOG(debug, "srds: add/update scoped_route '{}', version: {}", - scoped_route_info->scopeName(), version_info); - } catch (const EnvoyException& e) { - exception_msgs.emplace_back(absl::StrCat("", e.what())); - } + // Explicit copy so that we can std::move later. + envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config = + dynamic_cast( + resource.get().resource()); + const std::string scope_name = scoped_route_config.name(); + // TODO(stevenzzz): Creating a new RdsRouteConfigProvider likely expensive, migrate RDS to + // config-provider-framework to make it light weight. + rds.set_route_config_name(scoped_route_config.route_configuration_name()); + auto rds_config_provider_helper = + std::make_unique(*this, scope_name, rds, init_manager); + auto scoped_route_info = std::make_shared( + std::move(scoped_route_config), rds_config_provider_helper->routeConfig()); + route_provider_by_scope_.insert({scope_name, std::move(rds_config_provider_helper)}); + scope_name_by_hash_[scoped_route_info->scopeKey().hash()] = scoped_route_info->scopeName(); + scoped_route_map_[scoped_route_info->scopeName()] = scoped_route_info; + updated_scopes.push_back(scoped_route_info); + any_applied = true; + ENVOY_LOG(debug, "srds: queueing add/update of scoped_route '{}', version: {}", + scoped_route_info->scopeName(), version_info); + } + + if (!updated_scopes.empty()) { + applyConfigUpdate([updated_scopes](ConfigProvider::ConfigConstSharedPtr config) + -> ConfigProvider::ConfigConstSharedPtr { + auto* thread_local_scoped_config = + const_cast(static_cast(config.get())); + thread_local_scoped_config->addOrUpdateRoutingScopes(updated_scopes); + return config; + }); } return any_applied; } @@ -195,6 +181,7 @@ ScopedRdsConfigSubscription::removeScopes( const Protobuf::RepeatedPtrField& scope_names, const std::string& version_info) { std::list to_be_removed_rds_providers; + std::vector removed_scope_names; for (const auto& scope_name : scope_names) { auto iter = scoped_route_map_.find(scope_name); if (iter != scoped_route_map_.end()) { @@ -206,16 +193,20 @@ ScopedRdsConfigSubscription::removeScopes( } scope_name_by_hash_.erase(iter->second->scopeKey().hash()); scoped_route_map_.erase(iter); - applyConfigUpdate([scope_name](ConfigProvider::ConfigConstSharedPtr config) - -> ConfigProvider::ConfigConstSharedPtr { - auto* thread_local_scoped_config = - const_cast(static_cast(config.get())); - thread_local_scoped_config->removeRoutingScope(scope_name); - return config; - }); - ENVOY_LOG(debug, "srds: remove scoped route '{}', version: {}", scope_name, version_info); + removed_scope_names.push_back(scope_name); + ENVOY_LOG(debug, "srds: queueing removal of scoped route '{}', version: {}", scope_name, + version_info); } } + if (!removed_scope_names.empty()) { + applyConfigUpdate([removed_scope_names](ConfigProvider::ConfigConstSharedPtr config) + -> ConfigProvider::ConfigConstSharedPtr { + auto* thread_local_scoped_config = + const_cast(static_cast(config.get())); + thread_local_scoped_config->removeRoutingScopes(removed_scope_names); + return config; + }); + } return to_be_removed_rds_providers; } @@ -224,7 +215,6 @@ void ScopedRdsConfigSubscription::onConfigUpdate( const Protobuf::RepeatedPtrField& removed_resources, const std::string& version_info) { // NOTE: deletes are done before adds/updates. - absl::flat_hash_map to_be_removed_scopes; // Destruction of resume_rds will lift the floodgate for new RDS subscriptions. // Note in the case of partial acceptance, accepted RDS subscriptions should be started @@ -263,25 +253,28 @@ void ScopedRdsConfigSubscription::onConfigUpdate( }); } - std::vector exception_msgs; + std::string exception_msg; + Protobuf::RepeatedPtrField clean_removed_resources = + detectUpdateConflictAndCleanupRemoved(added_resources, removed_resources, exception_msg); + if (!exception_msg.empty()) { + throw EnvoyException(fmt::format("Error adding/updating scoped route(s): {}", exception_msg)); + } + // Do not delete RDS config providers just yet, in case the to be deleted RDS subscriptions could // be reused by some to be added scopes. std::list - to_be_removed_rds_providers = removeScopes(removed_resources, version_info); + to_be_removed_rds_providers = removeScopes(clean_removed_resources, version_info); + bool any_applied = addOrUpdateScopes(added_resources, (srds_init_mgr == nullptr ? localInitManager() : *srds_init_mgr), - version_info, exception_msgs) || + version_info) || !to_be_removed_rds_providers.empty(); ConfigSubscriptionCommonBase::onConfigUpdate(); if (any_applied) { setLastConfigInfo(absl::optional({absl::nullopt, version_info})); } stats_.config_reload_.inc(); - if (!exception_msgs.empty()) { - throw EnvoyException(fmt::format("Error adding/updating scoped route(s): {}", - absl::StrJoin(exception_msgs, ", "))); - } } void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name, @@ -298,7 +291,7 @@ void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_nam -> ConfigProvider::ConfigConstSharedPtr { auto* thread_local_scoped_config = const_cast(static_cast(config.get())); - thread_local_scoped_config->addOrUpdateRoutingScope(new_scoped_route_info); + thread_local_scoped_config->addOrUpdateRoutingScopes({new_scoped_route_info}); return config; }); } @@ -308,9 +301,37 @@ void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_nam void ScopedRdsConfigSubscription::onConfigUpdate( const std::vector& resources, const std::string& version_info) { + Protobuf::RepeatedPtrField to_remove_repeated; + for (const auto& scoped_route : scoped_route_map_) { + *to_remove_repeated.Add() = scoped_route.first; + } + onConfigUpdate(resources, to_remove_repeated, version_info); +} + +Protobuf::RepeatedPtrField +ScopedRdsConfigSubscription::detectUpdateConflictAndCleanupRemoved( + const std::vector& resources, + const Protobuf::RepeatedPtrField& removed_resources, std::string& exception_msg) { + Protobuf::RepeatedPtrField clean_removed_resources; + // All the scope names to be removed or updated. + absl::flat_hash_set updated_or_removed_scopes; + for (const std::string& removed_resource : removed_resources) { + updated_or_removed_scopes.insert(removed_resource); + } + for (const auto& resource : resources) { + const auto& scoped_route = + dynamic_cast( + resource.get().resource()); + updated_or_removed_scopes.insert(scoped_route.name()); + } + + absl::flat_hash_map scope_name_by_hash = scope_name_by_hash_; + absl::erase_if(scope_name_by_hash, [&updated_or_removed_scopes](const auto& key_name) { + auto const& [key, name] = key_name; + return updated_or_removed_scopes.contains(name); + }); absl::flat_hash_map scoped_routes; - absl::flat_hash_map scope_name_by_key_hash; for (const auto& resource : resources) { // Throws (thus rejects all) on any error. const auto& scoped_route = @@ -319,28 +340,27 @@ void ScopedRdsConfigSubscription::onConfigUpdate( const std::string& scope_name = scoped_route.name(); auto scope_config_inserted = scoped_routes.try_emplace(scope_name, std::move(scoped_route)); if (!scope_config_inserted.second) { - throw EnvoyException( - fmt::format("duplicate scoped route configuration '{}' found", scope_name)); + exception_msg = fmt::format("duplicate scoped route configuration '{}' found", scope_name); + return clean_removed_resources; } const envoy::config::route::v3::ScopedRouteConfiguration& scoped_route_config = scope_config_inserted.first->second; const uint64_t key_fingerprint = MessageUtil::hash(scoped_route_config.key()); - if (!scope_name_by_key_hash.try_emplace(key_fingerprint, scope_name).second) { - throw EnvoyException( + if (!scope_name_by_hash.try_emplace(key_fingerprint, scope_name).second) { + exception_msg = fmt::format("scope key conflict found, first scope is '{}', second scope is '{}'", - scope_name_by_key_hash[key_fingerprint], scope_name)); + scope_name_by_hash[key_fingerprint], scope_name); + return clean_removed_resources; } } - ScopedRouteMap scoped_routes_to_remove = scoped_route_map_; - Protobuf::RepeatedPtrField to_remove_repeated; - for (auto& iter : scoped_routes) { - const std::string& scope_name = iter.first; - scoped_routes_to_remove.erase(scope_name); - } - for (const auto& scoped_route : scoped_routes_to_remove) { - *to_remove_repeated.Add() = scoped_route.first; + + // only remove resources that is not going to be updated. + for (const std::string& removed_resource : removed_resources) { + if (!scoped_routes.contains(removed_resource)) { + *clean_removed_resources.Add() = removed_resource; + } } - onConfigUpdate(resources, to_remove_repeated, version_info); + return clean_removed_resources; } ScopedRdsConfigProvider::ScopedRdsConfigProvider( diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index 66b2d53301f7..b00ab1a4ef8a 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -136,8 +136,7 @@ class ScopedRdsConfigSubscription // during updating, the exception message is collected via the exception messages vector. // Returns true if any scope updated, false otherwise. bool addOrUpdateScopes(const std::vector& resources, - Init::Manager& init_manager, const std::string& version_info, - std::vector& exception_msgs); + Init::Manager& init_manager, const std::string& version_info); // Removes given scopes from the managed set of scopes. // Returns a list of to be removed helpers which is temporally held in the onConfigUpdate method, // to make sure new scopes sharing the same RDS source configs could reuse the subscriptions. @@ -148,12 +147,18 @@ class ScopedRdsConfigSubscription // Envoy::Config::DeltaConfigSubscriptionInstance void start() override { subscription_->start({}); } + // Detect scope name and scope key conflict between added scopes or between added scopes and old + // scopes. Some removed scopes may be in added resources list, instead of being removed, they + // should be updated, so only return scope names that will disappear after update. If conflict + // detected, fill exception_msg with information about scope conflict and return. + Protobuf::RepeatedPtrField detectUpdateConflictAndCleanupRemoved( + const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, std::string& exception_msg); + // Envoy::Config::SubscriptionCallbacks - // NOTE: state-of-the-world form onConfigUpdate(resources, version_info) will throw an - // EnvoyException on any error and essentially reject an update. While the Delta form - // onConfigUpdate(added_resources, removed_resources, version_info) by design will partially - // accept correct RouteConfiguration from management server. + // NOTE: both delta form and state-of-the-world form onConfigUpdate(resources, version_info) will + // throw an EnvoyException on any error and essentially reject an update. void onConfigUpdate(const std::vector& resources, const std::string& version_info) override; void onConfigUpdate(const std::vector& added_resources, diff --git a/test/common/router/scoped_config_impl_test.cc b/test/common/router/scoped_config_impl_test.cc index e84540850b9b..cc7adfd1adcb 100644 --- a/test/common/router/scoped_config_impl_test.cc +++ b/test/common/router/scoped_config_impl_test.cc @@ -445,8 +445,8 @@ class ScopedConfigImplTest : public testing::Test { // Test a ScopedConfigImpl returns the correct route Config. TEST_F(ScopedConfigImplTest, PickRoute) { scoped_config_impl_ = std::make_unique(std::move(key_builder_config_)); - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_); - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_}); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_}); // Key (foo, bar) maps to scope_info_a_. ConfigConstSharedPtr route_config = scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ @@ -482,19 +482,21 @@ TEST_F(ScopedConfigImplTest, Update) { EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // Add scope_key (bar, baz). - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_b_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_b_}); + // scope_info_a_ not found EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); + // scope_info_b_ found EXPECT_EQ(scoped_config_impl_->getRouteConfig(TestRequestHeaderMapImpl{ {"foo_header", ",,key=v,bar=bar,"}, {"bar_header", ";val1;baz"}}), scope_info_b_->routeConfig()); // Add scope_key (foo, bar). - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_}); // Found scope_info_a_. EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), scope_info_a_->routeConfig()); // Update scope foo_scope. - scoped_config_impl_->addOrUpdateRoutingScope(scope_info_a_v2_); + scoped_config_impl_->addOrUpdateRoutingScopes({scope_info_a_v2_}); EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // foo_scope now is keyed by (xyz, xyz). @@ -503,15 +505,13 @@ TEST_F(ScopedConfigImplTest, Update) { scope_info_a_v2_->routeConfig()); // Remove scope "foo_scope". - scoped_config_impl_->removeRoutingScope("foo_scope"); + scoped_config_impl_->removeRoutingScopes({"foo_scope"}); // scope_info_a_ is gone. EXPECT_EQ(scoped_config_impl_->getRouteConfig(headers), nullptr); // Now delete some non-existent scopes. - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("foo_scope1")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("base_scope")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("bluh_scope")); - EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScope("xyz_scope")); + EXPECT_NO_THROW(scoped_config_impl_->removeRoutingScopes( + {"foo_scope1", "base_scope", "bluh_scope", "xyz_scope"})); } } // namespace diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index f2f13ed03dec..b00466a702a4 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -251,8 +251,8 @@ route_configuration_name: foo_routes // Verify the config is a ScopedConfigImpl instance, both scopes point to "" as RDS hasn't kicked // in yet(NullConfigImpl returned). - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_EQ(getScopedRdsProvider() ->config() ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) @@ -327,8 +327,8 @@ route_configuration_name: foo_routes // Verify the config is a ScopedConfigImpl instance, both scopes point to "" as RDS hasn't kicked // in yet(NullConfigImpl returned). - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_EQ(getScopedRdsProvider() ->config() ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) @@ -404,8 +404,8 @@ route_configuration_name: foo_routes 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); // Scope key "x-foo-key" points to nowhere. - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), IsNull()); @@ -433,29 +433,25 @@ route_configuration_name: foo_routes - string_key: x-foo-key )EOF"; const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); - init_watcher_.expectReady(); // Partial success gets the subscription ready. + init_watcher_.expectReady().Times(0); // The onConfigUpdate will simply throw an exception. context_init_manager_.initialize(init_watcher_); const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); EXPECT_THROW_WITH_REGEX( - srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "2"), EnvoyException, + srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), EnvoyException, ".*scope key conflict found, first scope is 'foo_scope', second scope is 'foo_scope2'"); EXPECT_EQ( - // Partially reject. - 1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + // Fully rejected. + 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); - // foo_scope update is applied. - EXPECT_EQ(getScopedRouteMap().size(), 1UL); - EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); - // Scope key "x-foo-key" points to foo_routes due to partial rejection. - pushRdsConfig({"foo_routes"}, "111"); // Push some real route configuration. - EXPECT_EQ(1UL, - server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value()); - EXPECT_EQ(getScopedRdsProvider() - ->config() - ->getRouteConfig(TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}) - ->name(), - "foo_routes"); + // Scope key "x-foo-key" points to nowhere. + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); + EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + IsNull()); + EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), + 0UL); } // Tests that scope-key conflict resources in different config updates are handled correctly. @@ -486,8 +482,8 @@ route_configuration_name: bar_routes server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); // Scope key "x-foo-key" points to nowhere. - EXPECT_NE(getScopedRdsProvider(), nullptr); - EXPECT_NE(getScopedRdsProvider()->config(), nullptr); + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); // No RDS "foo_routes" config push happened yet, Router::NullConfig is returned. EXPECT_THAT(getScopedRdsProvider() ->config() @@ -596,10 +592,11 @@ route_configuration_name: foo_routes const auto decoded_resources = TestUtility::decodeResources({resource, resource}); EXPECT_THROW_WITH_MESSAGE(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1"), EnvoyException, - "duplicate scoped route configuration 'foo_scope' found"); + "Error adding/updating scoped route(s): duplicate scoped route " + "configuration 'foo_scope' found"); } -// Tests that only one resource is provided during a config update. +// Tests duplicate resources in the same update, should be fully rejected. TEST_F(ScopedRdsTest, InvalidDuplicateResourceDelta) { setup(); init_watcher_.expectReady().Times(0); @@ -619,12 +616,17 @@ route_configuration_name: foo_routes "Error adding/updating scoped route(s): duplicate scoped route configuration 'foo_scope' " "found"); EXPECT_EQ( - // Partially reject. - 1UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + // Fully rejected. + 0UL, server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") .value()); - // foo_scope update is applied. - EXPECT_EQ(getScopedRouteMap().size(), 1UL); - EXPECT_EQ(getScopedRouteMap().count("foo_scope"), 1); + // Scope key "x-foo-key" points to nowhere. + ASSERT_THAT(getScopedRdsProvider(), Not(IsNull())); + ASSERT_THAT(getScopedRdsProvider()->config(), Not(IsNull())); + EXPECT_THAT(getScopedRdsProvider()->config()->getRouteConfig( + TestRequestHeaderMapImpl{{"Addr", "x-foo-key;x-foo-key"}}), + IsNull()); + EXPECT_EQ(server_factory_context_.scope_.counter("foo.rds.foo_routes.config_reload").value(), + 0UL); } // Tests a config update failure. @@ -825,6 +827,112 @@ route_configuration_name: static-foo-route-config ".*"); } +// Tests whether scope key conflict with updated scopes is ignored. +TEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeDelta) { + setup(); + const std::string config_yaml = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml2 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + + // Delta API. + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, {}, "1")); + EXPECT_EQ(1UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); + + const std::string config_yaml3 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml4 = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, {}, "2")); + EXPECT_EQ(2UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); +} + +// Tests whether scope key conflict with updated scopes is ignored. +TEST_F(ScopedRdsTest, IgnoreConflictWithUpdatedScopeSotW) { + setup(); + const std::string config_yaml = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml2 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_2 = parseScopedRouteConfigurationFromYaml(config_yaml2); + + // Delta API. + const auto decoded_resources = TestUtility::decodeResources({resource, resource_2}); + context_init_manager_.initialize(init_watcher_); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources.refvec_, "1")); + EXPECT_EQ(1UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); + + const std::string config_yaml3 = R"EOF( +name: bar_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-foo-key +)EOF"; + const auto resource_3 = parseScopedRouteConfigurationFromYaml(config_yaml); + const std::string config_yaml4 = R"EOF( +name: foo_scope +route_configuration_name: foo_routes +key: + fragments: + - string_key: x-bar-key +)EOF"; + const auto resource_4 = parseScopedRouteConfigurationFromYaml(config_yaml2); + const auto decoded_resources_2 = TestUtility::decodeResources({resource_3, resource_4}); + EXPECT_NO_THROW(srds_subscription_->onConfigUpdate(decoded_resources_2.refvec_, "2")); + EXPECT_EQ(2UL, + server_factory_context_.scope_.counter("foo.scoped_rds.foo_scoped_routes.config_reload") + .value()); + EXPECT_EQ(getScopedRouteMap().size(), 2); +} + } // namespace } // namespace Router } // namespace Envoy From 3cd9cfb8de623d56f1160ad0173906f106d37152 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 23 Jul 2020 21:03:54 -0700 Subject: [PATCH 735/909] load stats: fix integration test flake (#12265) Waiting on a load stats response can race with resetting the counters when initializing a watch. Moving the counter increment prevents the race. Fixes https://github.com/envoyproxy/envoy/issues/11784 Signed-off-by: Matt Klein --- source/common/upstream/load_stats_reporter.cc | 2 +- test/integration/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 87e3982e8db9..1b4556487d63 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -140,9 +140,9 @@ void LoadStatsReporter::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& me void LoadStatsReporter::onReceiveMessage( std::unique_ptr&& message) { ENVOY_LOG(debug, "New load report epoch: {}", message->DebugString()); - stats_.requests_.inc(); message_ = std::move(message); startLoadReportPeriod(); + stats_.requests_.inc(); } void LoadStatsReporter::startLoadReportPeriod() { diff --git a/test/integration/README.md b/test/integration/README.md index 5554b5ad0564..d3cffc3d5412 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -160,7 +160,7 @@ The full command might look something like ``` bazel test //test/integration:http2_upstream_integration_test \ --test_arg=--gtest_filter="IpVersions/Http2UpstreamIntegrationTest.RouterRequestAndResponseWithBodyNoBuffer/IPv6" \ ---jobs 60 --local_ram_resources=1000000000 --local_cpu_resources=1000000000 --runs_per_test=1000 --test_arg="-l trace" +--jobs 60 --local_test_jobs=60 --runs_per_test=1000 --test_arg="-l trace" ``` ## Debugging test flakes From dcf451c0a5f5fb544d2ae4116a5778c6b24af049 Mon Sep 17 00:00:00 2001 From: antonio Date: Fri, 24 Jul 2020 00:05:20 -0400 Subject: [PATCH 736/909] benchmark: Skip expensive setup and benchmark to avoid filter_chain_benchmark_test_benchmark_test timeouts under tsan (#12264) also, bump up the googlebenchmark version to pickup the fix to SkipWithError, https://github.com/google/benchmark/pull/938 Signed-off-by: Antonio Vicente --- bazel/repository_locations.bzl | 6 ++--- test/server/filter_chain_benchmark_test.cc | 29 +++++++++++++++++----- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 5df269d463ae..7e1f4d8c6141 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -227,9 +227,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_github_google_benchmark = dict( - sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", - strip_prefix = "benchmark-1.5.0", - urls = ["https://github.com/google/benchmark/archive/v1.5.0.tar.gz"], + sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2", + strip_prefix = "benchmark-1.5.1", + urls = ["https://github.com/google/benchmark/archive/v1.5.1.tar.gz"], use_category = ["test"], ), com_github_libevent_libevent = dict( diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index bda0e1d81b60..dbd24ecff6cc 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -11,6 +11,7 @@ #include "extensions/transport_sockets/well_known_names.h" +#include "test/benchmark/main.h" #include "test/mocks/network/mocks.h" #include "test/mocks/server/factory_context.h" #include "test/test_common/environment.h" @@ -178,11 +179,9 @@ const char YamlSingleDstPortBottom[] = R"EOF( - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ticket_key_a")EOF"; } // namespace -class FilterChainBenchmarkFixture : public benchmark::Fixture { +class FilterChainBenchmarkFixture : public ::benchmark::Fixture { public: - using benchmark::Fixture::SetUp; - - void SetUp(::benchmark::State& state) override { + void initialize(::benchmark::State& state) { int64_t input_size = state.range(0); std::vector port_chains; port_chains.reserve(input_size); @@ -195,6 +194,10 @@ class FilterChainBenchmarkFixture : public benchmark::Fixture { TestUtility::loadFromYaml(listener_yaml_config_, listener_config_); filter_chains_ = listener_config_.filter_chains(); } + + Envoy::Thread::MutexBasicLockable lock_; + Logger::Context logging_state_{spdlog::level::warn, Logger::Logger::DEFAULT_LOG_FORMAT, lock_, + false}; std::string listener_yaml_config_; envoy::config::listener::v3::Listener listener_config_; absl::Span filter_chains_; @@ -205,6 +208,12 @@ class FilterChainBenchmarkFixture : public benchmark::Fixture { // NOLINTNEXTLINE(readability-redundant-member-init) BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) (::benchmark::State& state) { + if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + + initialize(state); NiceMock factory_context; for (auto _ : state) { FilterChainManagerImpl filter_chain_manager{ @@ -216,6 +225,12 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest) (::benchmark::State& state) { + if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 64) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + + initialize(state); std::vector sockets; sockets.reserve(state.range(0)); for (int i = 0; i < state.range(0); i++) { @@ -238,12 +253,14 @@ BENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) ->Ranges({ // scale of the chains {1, 4096}, - }); + }) + ->Unit(::benchmark::kMillisecond); BENCHMARK_REGISTER_F(FilterChainBenchmarkFixture, FilterChainFindTest) ->Ranges({ // scale of the chains {1, 4096}, - }); + }) + ->Unit(::benchmark::kMillisecond); /* clang-format off From a5d56f5c2e027fa3a3965487a06035de272648df Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Fri, 24 Jul 2020 09:39:18 +0530 Subject: [PATCH 737/909] http: add custom header support for sha1 fingerprint of peer cert (#12173) Adds support for DOWNSTREAM_PEER_FINGERPRINT_1 that sends SHA1 finger print of peer cert as a custom header. Signed-off-by: Rama Chavali --- .../http/http_conn_man/headers.rst | 6 ++ .../observability/access_log/usage.rst | 6 ++ docs/root/version_history/current.rst | 2 + include/envoy/ssl/connection.h | 6 ++ .../formatter/substitution_formatter.cc | 5 ++ source/common/router/header_formatter.cc | 5 ++ .../transport_sockets/tls/ssl_socket.cc | 18 ++++++ .../transport_sockets/tls/ssl_socket.h | 2 + .../formatter/substitution_formatter_test.cc | 35 +++++++++++ test/common/router/header_formatter_test.cc | 31 ++++++++- .../transport_sockets/tls/ssl_socket_test.cc | 63 ++++++++++++++++--- .../tls/test_data/no_san_cert_info.h | 3 +- test/mocks/ssl/mocks.h | 1 + 13 files changed, 169 insertions(+), 14 deletions(-) diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 345657d0ee93..0ef3e630c5e0 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -571,6 +571,12 @@ Supported variable names are: TCP The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection. +%DOWNSTREAM_PEER_FINGERPRINT_1% + HTTP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + TCP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + %DOWNSTREAM_PEER_SERIAL% HTTP The serial number of the client certificate used to establish the downstream TLS connection. diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 86da614014d1..894c8ac61c36 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -495,6 +495,12 @@ The following command operators are supported: TCP The hex-encoded SHA256 fingerprint of the client certificate used to establish the downstream TLS connection. +%DOWNSTREAM_PEER_FINGERPRINT_1% + HTTP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + TCP + The hex-encoded SHA1 fingerprint of the client certificate used to establish the downstream TLS connection. + %DOWNSTREAM_PEER_SERIAL% HTTP The serial number of the client certificate used to establish the downstream TLS connection. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a768a80008b8..de0307a2c4bf 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -39,9 +39,11 @@ Removed Config or Runtime New Features ------------ +* access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. * dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. +* http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h index b58d9511698e..a93c97c38d40 100644 --- a/include/envoy/ssl/connection.h +++ b/include/envoy/ssl/connection.h @@ -47,6 +47,12 @@ class ConnectionInfo { */ virtual const std::string& sha256PeerCertificateDigest() const PURE; + /** + * @return std::string the SHA1 digest of the peer certificate. Returns "" if there is no peer + * certificate which can happen in TLS (non mTLS) connections. + */ + virtual const std::string& sha1PeerCertificateDigest() const PURE; + /** * @return std::string the serial number field of the peer certificate. Returns "" if * there is no peer certificate, or no serial number. diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index aedc72ad87e1..4a3bb8a90cf8 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -716,6 +716,11 @@ StreamInfoFormatter::StreamInfoFormatter(const std::string& field_name) { [](const Ssl::ConnectionInfo& connection_info) { return connection_info.sha256PeerCertificateDigest(); }); + } else if (field_name == "DOWNSTREAM_PEER_FINGERPRINT_1") { + field_extractor_ = std::make_unique( + [](const Ssl::ConnectionInfo& connection_info) { + return connection_info.sha1PeerCertificateDigest(); + }); } else if (field_name == "DOWNSTREAM_PEER_SERIAL") { field_extractor_ = std::make_unique( [](const Ssl::ConnectionInfo& connection_info) { diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 4e12d6fd8712..8e40e95b23eb 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -288,6 +288,11 @@ StreamInfoHeaderFormatter::StreamInfoHeaderFormatter(absl::string_view field_nam sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { return connection_info.sha256PeerCertificateDigest(); }); + } else if (field_name == "DOWNSTREAM_PEER_FINGERPRINT_1") { + field_extractor_ = + sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { + return connection_info.sha1PeerCertificateDigest(); + }); } else if (field_name == "DOWNSTREAM_PEER_SERIAL") { field_extractor_ = sslConnectionInfoStringHeaderExtractor([](const Ssl::ConnectionInfo& connection_info) { diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 03f3f8b44e4a..ab2644ccc808 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -371,6 +371,24 @@ const std::string& SslSocketInfo::sha256PeerCertificateDigest() const { return cached_sha_256_peer_certificate_digest_; } +const std::string& SslSocketInfo::sha1PeerCertificateDigest() const { + if (!cached_sha_1_peer_certificate_digest_.empty()) { + return cached_sha_1_peer_certificate_digest_; + } + bssl::UniquePtr cert(SSL_get_peer_certificate(ssl())); + if (!cert) { + ASSERT(cached_sha_1_peer_certificate_digest_.empty()); + return cached_sha_1_peer_certificate_digest_; + } + + std::vector computed_hash(SHA_DIGEST_LENGTH); + unsigned int n; + X509_digest(cert.get(), EVP_sha1(), computed_hash.data(), &n); + RELEASE_ASSERT(n == computed_hash.size(), ""); + cached_sha_1_peer_certificate_digest_ = Hex::encode(computed_hash); + return cached_sha_1_peer_certificate_digest_; +} + const std::string& SslSocketInfo::urlEncodedPemEncodedPeerCertificate() const { if (!cached_url_encoded_pem_encoded_peer_certificate_.empty()) { return cached_url_encoded_pem_encoded_peer_certificate_; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index 2a6ee3a056dd..27416ce7f635 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -60,6 +60,7 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { bool peerCertificateValidated() const override; absl::Span uriSanLocalCertificate() const override; const std::string& sha256PeerCertificateDigest() const override; + const std::string& sha1PeerCertificateDigest() const override; const std::string& serialNumberPeerCertificate() const override; const std::string& issuerPeerCertificate() const override; const std::string& subjectPeerCertificate() const override; @@ -83,6 +84,7 @@ class SslSocketInfo : public Envoy::Ssl::ConnectionInfo { private: mutable std::vector cached_uri_san_local_certificate_; mutable std::string cached_sha_256_peer_certificate_digest_; + mutable std::string cached_sha_1_peer_certificate_digest_; mutable std::string cached_serial_number_peer_certificate_; mutable std::string cached_issuer_peer_certificate_; mutable std::string cached_subject_peer_certificate_; diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index b5caec7619ef..882b9910fa20 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -758,6 +758,41 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { stream_info, body), ProtoEq(ValueUtil::nullValue())); } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + auto connection_info = std::make_shared(); + std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; + EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) + .WillRepeatedly(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, + response_trailers, stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::stringValue(expected_sha))); + } + { + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + auto connection_info = std::make_shared(); + std::string expected_sha; + EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) + .WillRepeatedly(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); + } + { + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); + EXPECT_EQ("-", upstream_format.format(request_headers, response_headers, response_trailers, + stream_info, body)); + EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, + stream_info, body), + ProtoEq(ValueUtil::nullValue())); + } { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL"); auto connection_info = std::make_shared(); diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index 99be7eb35476..84140d099d7b 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -298,7 +298,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionNoTls) { testFormatting(stream_info, "DOWNSTREAM_TLS_VERSION", EMPTY_STRING); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprint) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256Fingerprint) { NiceMock stream_info; auto connection_info = std::make_shared>(); std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; @@ -308,7 +308,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprint) { "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintEmpty) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintEmpty) { NiceMock stream_info; auto connection_info = std::make_shared>(); std::string expected_sha; @@ -317,12 +317,37 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintEmp testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } -TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerFingerprintNoTls) { +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintNoTls) { NiceMock stream_info; EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1Fingerprint) { + NiceMock stream_info; + auto connection_info = std::make_shared>(); + std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; + ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", + "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); +} + +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintEmpty) { + NiceMock stream_info; + auto connection_info = std::make_shared>(); + std::string expected_sha; + ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); +} + +TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintNoTls) { + NiceMock stream_info; + EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); +} + TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerial) { NiceMock stream_info; auto connection_info = std::make_shared>(); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index f4f76258fb31..76f3a16b56b1 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -142,12 +142,19 @@ class TestUtilOptions : public TestUtilOptionsBase { return *this; } - TestUtilOptions& setExpectedDigest(const std::string& expected_digest) { - expected_digest_ = expected_digest; + TestUtilOptions& setExpectedSha256Digest(const std::string& expected_sha256_digest) { + expected_sha256_digest_ = expected_sha256_digest; return *this; } - const std::string& expectedDigest() const { return expected_digest_; } + const std::string& expectedSha256Digest() const { return expected_sha256_digest_; } + + TestUtilOptions& setExpectedSha1Digest(const std::string& expected_sha1_digest) { + expected_sha1_digest_ = expected_sha1_digest; + return *this; + } + + const std::string& expectedSha1Digest() const { return expected_sha1_digest_; } TestUtilOptions& setExpectedLocalUri(const std::string& expected_local_uri) { expected_local_uri_ = {expected_local_uri}; @@ -250,7 +257,8 @@ class TestUtilOptions : public TestUtilOptionsBase { bool expect_no_cert_chain_; bool expect_private_key_method_; Network::ConnectionEvent expected_server_close_event_; - std::string expected_digest_; + std::string expected_sha256_digest_; + std::string expected_sha1_digest_; std::vector expected_local_uri_; std::string expected_serial_number_; std::string expected_peer_issuer_; @@ -338,13 +346,20 @@ void testUtil(const TestUtilOptions& options) { size_t connect_count = 0; auto connect_second_time = [&]() { if (++connect_count == 2) { - if (!options.expectedDigest().empty()) { + if (!options.expectedSha256Digest().empty()) { // Assert twice to ensure a cached value is returned and still valid. - EXPECT_EQ(options.expectedDigest(), + EXPECT_EQ(options.expectedSha256Digest(), server_connection->ssl()->sha256PeerCertificateDigest()); - EXPECT_EQ(options.expectedDigest(), + EXPECT_EQ(options.expectedSha256Digest(), server_connection->ssl()->sha256PeerCertificateDigest()); } + if (!options.expectedSha1Digest().empty()) { + // Assert twice to ensure a cached value is returned and still valid. + EXPECT_EQ(options.expectedSha1Digest(), + server_connection->ssl()->sha1PeerCertificateDigest()); + EXPECT_EQ(options.expectedSha1Digest(), + server_connection->ssl()->sha1PeerCertificateDigest()); + } // Assert twice to ensure a cached value is returned and still valid. EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); EXPECT_EQ(options.expectedClientCertUri(), server_connection->ssl()->uriSanPeerCertificate()); @@ -398,6 +413,7 @@ void testUtil(const TestUtilOptions& options) { EXPECT_FALSE(server_connection->ssl()->validFromPeerCertificate().has_value()); EXPECT_FALSE(server_connection->ssl()->expirationPeerCertificate().has_value()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha256PeerCertificateDigest()); + EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->sha1PeerCertificateDigest()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->urlEncodedPemEncodedPeerCertificate()); EXPECT_EQ(EMPTY_STRING, server_connection->ssl()->subjectPeerCertificate()); EXPECT_EQ(std::vector{}, server_connection->ssl()->dnsSansPeerCertificate()); @@ -821,10 +837,35 @@ TEST_P(SslSocketTest, GetCertDigest) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } +TEST_P(SslSocketTest, GetCertDigestInvalidFiles) { + const std::string client_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: +)EOF"; + + const std::string server_ctx_yaml = R"EOF( + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" +)EOF"; + + TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); + testUtil( + test_options.setExpectedSha256Digest("").setExpectedSha1Digest("").setExpectedSerialNumber( + "")); +} + TEST_P(SslSocketTest, GetCertDigestInline) { envoy::config::listener::v3::Listener listener; envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains(); @@ -894,7 +935,8 @@ TEST_P(SslSocketTest, GetCertDigestServerCertWithIntermediateCA) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } @@ -921,7 +963,8 @@ TEST_P(SslSocketTest, GetCertDigestServerCertWithoutCommonName) { )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedDigest(TEST_NO_SAN_CERT_HASH) + testUtil(test_options.setExpectedSha256Digest(TEST_NO_SAN_CERT_256_HASH) + .setExpectedSha1Digest(TEST_NO_SAN_CERT_1_HASH) .setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)); } diff --git a/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h b/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h index bb1ec52cbe6a..b1b23f3b5ba5 100644 --- a/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h +++ b/test/extensions/transport_sockets/tls/test_data/no_san_cert_info.h @@ -1,6 +1,7 @@ // NOLINT(namespace-envoy) -constexpr char TEST_NO_SAN_CERT_HASH[] = +constexpr char TEST_NO_SAN_CERT_256_HASH[] = "0035c2f2cefc21bd5e1e52b945ff26c474dad33343ae00aa8f86f4877aa02eca"; +constexpr char TEST_NO_SAN_CERT_1_HASH[] = "7bf61b89caf51c49c3dfaf6209b6a7ad900b352b"; constexpr char TEST_NO_SAN_CERT_SPKI[] = "xVbSFNk3uh/hr0XoZArX7fc1RrKx0oQ+OkVcGa1HCzY="; constexpr char TEST_NO_SAN_CERT_SERIAL[] = "b8b5ecc898f21249"; constexpr char TEST_NO_SAN_CERT_NOT_BEFORE[] = "Dec 18 01:50:34 2018 GMT"; diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index 825822992a7c..c3bc9b2f8ecd 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -42,6 +42,7 @@ class MockConnectionInfo : public ConnectionInfo { MOCK_METHOD(bool, peerCertificateValidated, (), (const)); MOCK_METHOD(absl::Span, uriSanLocalCertificate, (), (const)); MOCK_METHOD(const std::string&, sha256PeerCertificateDigest, (), (const)); + MOCK_METHOD(const std::string&, sha1PeerCertificateDigest, (), (const)); MOCK_METHOD(const std::string&, serialNumberPeerCertificate, (), (const)); MOCK_METHOD(const std::string&, issuerPeerCertificate, (), (const)); MOCK_METHOD(const std::string&, subjectPeerCertificate, (), (const)); From 06acbab53a7bed5309ab1e6ca1c18850023eb75d Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 24 Jul 2020 00:10:50 -0400 Subject: [PATCH 738/909] xds: safely handle watch removals during update, nested pause/resume. (#12069) To fix #11877, we need to handle safely the case where two watches point at the same resource, and a WatchMap onConfigUpdate() causes one watch to remove the other watch during its invoked onConfigUpdate(). While working on this, it made sense to fix #11674, avoiding spurious ClusterLoadAssignment discovery requests in the regression integration test. Risk level: Medium (this has xDS wire-level implications). Testing: New unit tests for pause/resume, regression unit and integration tests for watch map removal behaviors. Fixes #11877 #11674 Signed-off-by: Harvey Tuch Co-authored-by: Sebastian Schepens --- include/envoy/config/grpc_mux.h | 16 ---- source/common/config/BUILD | 1 + source/common/config/grpc_mux_impl.cc | 73 +++++++--------- source/common/config/grpc_mux_impl.h | 20 ++--- source/common/config/grpc_stream.h | 10 ++- source/common/config/new_grpc_mux_impl.cc | 17 +--- source/common/config/new_grpc_mux_impl.h | 2 - source/common/config/pausable_ack_queue.cc | 23 +++-- source/common/config/pausable_ack_queue.h | 2 +- source/common/config/watch_map.cc | 38 ++++++++- source/common/config/watch_map.h | 7 ++ source/common/router/rds_impl.cc | 5 +- source/common/router/rds_impl.h | 5 +- .../common/upstream/cluster_manager_impl.cc | 4 +- .../config/delta_subscription_impl_test.cc | 3 + test/common/config/grpc_mux_impl_test.cc | 67 ++++++++++----- test/common/config/pausable_ack_queue_test.cc | 9 ++ test/common/config/watch_map_test.cc | 83 +++++++++++++++++-- test/config/utility.cc | 31 +++++++ test/config/utility.h | 4 + test/integration/ads_integration.cc | 10 +++ test/integration/ads_integration.h | 5 ++ test/integration/ads_integration_test.cc | 66 ++++++++++++--- test/mocks/config/mocks.h | 2 - 24 files changed, 354 insertions(+), 149 deletions(-) diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 2d16c66270dd..0f20aae3cfc5 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -84,22 +84,6 @@ class GrpcMux { */ ABSL_MUST_USE_RESULT virtual ScopedResume pause(const std::vector type_urls) PURE; - /** - * Retrieves the current pause state as set by pause()/resume(). - * @param type_url type URL corresponding to xDS API, e.g. - * type.googleapis.com/envoy.api.v2.Cluster - * @return bool whether the API is paused. - */ - virtual bool paused(const std::string& type_url) const PURE; - - /** - * Retrieves the current pause state as set by pause()/resume(). - * @param type_urls type URLs corresponding to xDS API, e.g. - * type.googleapis.com/envoy.api.v2.Cluster - * @return bool whether any of the APIs is paused. - */ - virtual bool paused(const std::vector type_urls) const PURE; - /** * Start a configuration subscription asynchronously for some API type and resources. * @param type_url type URL corresponding to xDS API, e.g. diff --git a/source/common/config/BUILD b/source/common/config/BUILD index b93d24af1fbf..55b8cce10d0c 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -396,6 +396,7 @@ envoy_cc_library( ":decoded_resource_lib", "//include/envoy/config:subscription_interface", "//source/common/common:assert_lib", + "//source/common/common:cleanup_lib", "//source/common/common:minimal_logger_lib", "//source/common/protobuf", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index abfaf0735859..e97d55362445 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -31,18 +31,7 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, void GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { - if (!grpc_stream_.grpcStreamAvailable()) { - ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url); - return; // Drop this request; the reconnect will enqueue a new one. - } - ApiState& api_state = api_state_[type_url]; - if (api_state.paused_) { - ENVOY_LOG(trace, "API {} paused during sendDiscoveryRequest(), setting pending.", type_url); - api_state.pending_ = true; - return; // Drop this request; the unpause will enqueue a new one. - } - auto& request = api_state.request_; request.mutable_resource_names()->Clear(); @@ -105,21 +94,19 @@ ScopedResume GrpcMuxImpl::pause(const std::string& type_url) { ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { for (const auto& type_url : type_urls) { - ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url); ApiState& api_state = api_state_[type_url]; - ASSERT(!api_state.paused_); - ASSERT(!api_state.pending_); - api_state.paused_ = true; + ENVOY_LOG(debug, "Pausing discovery requests for {} (previous count {})", type_url, + api_state.pauses_); + ++api_state.pauses_; } return std::make_unique([this, type_urls]() { for (const auto& type_url : type_urls) { - ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url); ApiState& api_state = api_state_[type_url]; - ASSERT(api_state.paused_); - api_state.paused_ = false; + ENVOY_LOG(debug, "Resuming discovery requests for {} (previous count {})", type_url, + api_state.pauses_); + ASSERT(api_state.paused()); - if (api_state.pending_) { - ASSERT(api_state.subscribed_); + if (--api_state.pauses_ == 0 && api_state.pending_ && api_state.subscribed_) { queueDiscoveryRequest(type_url); api_state.pending_ = false; } @@ -127,23 +114,6 @@ ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { }); } -bool GrpcMuxImpl::paused(const std::string& type_url) const { - auto entry = api_state_.find(type_url); - if (entry == api_state_.end()) { - return false; - } - return entry->second.paused_; -} - -bool GrpcMuxImpl::paused(const std::vector type_urls) const { - for (const auto& type_url : type_urls) { - if (paused(type_url)) { - return true; - } - } - return false; -} - void GrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats& control_plane_stats) { @@ -177,6 +147,12 @@ void GrpcMuxImpl::onDiscoveryResponse( } return; } + ScopedResume same_type_resume; + // We pause updates of the same type. This is necessary for SotW and GrpcMuxImpl, since unlike + // delta and NewGRpcMuxImpl, independent watch additions/removals trigger updates regardless of + // the delta state. The proper fix for this is to converge these implementations, + // see https://github.com/envoyproxy/envoy/issues/11477. + same_type_resume = pause(type_url); try { // To avoid O(n^2) explosion (e.g. when we have 1000s of EDS watches), we // build a map here from resource name to resource and then walk watches_. @@ -234,6 +210,7 @@ void GrpcMuxImpl::onDiscoveryResponse( error_detail->set_message(Config::Utility::truncateGrpcStatusMessage(e.what())); } api_state_[type_url].request_.set_response_nonce(message->nonce()); + ASSERT(api_state_[type_url].paused()); queueDiscoveryRequest(type_url); } @@ -241,6 +218,8 @@ void GrpcMuxImpl::onWriteable() { drainRequests(); } void GrpcMuxImpl::onStreamEstablished() { first_stream_request_ = true; + grpc_stream_.maybeUpdateQueueSizeStat(0); + request_queue_ = std::make_unique>(); for (const auto& type_url : subscriptions_) { queueDiscoveryRequest(type_url); } @@ -256,17 +235,27 @@ void GrpcMuxImpl::onEstablishmentFailure() { } void GrpcMuxImpl::queueDiscoveryRequest(const std::string& queue_item) { - request_queue_.push(queue_item); + if (!grpc_stream_.grpcStreamAvailable()) { + ENVOY_LOG(debug, "No stream available to queueDiscoveryRequest for {}", queue_item); + return; // Drop this request; the reconnect will enqueue a new one. + } + ApiState& api_state = api_state_[queue_item]; + if (api_state.paused()) { + ENVOY_LOG(trace, "API {} paused during queueDiscoveryRequest(), setting pending.", queue_item); + api_state.pending_ = true; + return; // Drop this request; the unpause will enqueue a new one. + } + request_queue_->push(queue_item); drainRequests(); } void GrpcMuxImpl::drainRequests() { - while (!request_queue_.empty() && grpc_stream_.checkRateLimitAllowsDrain()) { + while (!request_queue_->empty() && grpc_stream_.checkRateLimitAllowsDrain()) { // Process the request, if rate limiting is not enabled at all or if it is under rate limit. - sendDiscoveryRequest(request_queue_.front()); - request_queue_.pop(); + sendDiscoveryRequest(request_queue_->front()); + request_queue_->pop(); } - grpc_stream_.maybeUpdateQueueSizeStat(request_queue_.size()); + grpc_stream_.maybeUpdateQueueSizeStat(request_queue_->size()); } } // namespace Config diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index f2dfc9529714..4480387219a8 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -41,8 +42,6 @@ class GrpcMuxImpl : public GrpcMux, // GrpcMux ScopedResume pause(const std::string& type_url) override; ScopedResume pause(const std::vector type_urls) override; - bool paused(const std::string& type_url) const override; - bool paused(const std::vector type_urls) const override; GrpcMuxWatchPtr addWatch(const std::string& type_url, const std::set& resources, SubscriptionCallbacks& callbacks, @@ -51,8 +50,6 @@ class GrpcMuxImpl : public GrpcMux, void handleDiscoveryResponse( std::unique_ptr&& message); - void sendDiscoveryRequest(const std::string& type_url); - // Config::GrpcStreamCallbacks void onStreamEstablished() override; void onEstablishmentFailure() override; @@ -70,6 +67,7 @@ class GrpcMuxImpl : public GrpcMux, private: void drainRequests(); void setRetryTimer(); + void sendDiscoveryRequest(const std::string& type_url); struct GrpcMuxWatchImpl : public GrpcMuxWatch { GrpcMuxWatchImpl(const std::set& resources, SubscriptionCallbacks& callbacks, @@ -83,14 +81,14 @@ class GrpcMuxImpl : public GrpcMux, ~GrpcMuxWatchImpl() override { watches_.remove(this); if (!resources_.empty()) { - parent_.sendDiscoveryRequest(type_url_); + parent_.queueDiscoveryRequest(type_url_); } } void update(const std::set& resources) override { watches_.remove(this); if (!resources_.empty()) { - parent_.sendDiscoveryRequest(type_url_); + parent_.queueDiscoveryRequest(type_url_); } resources_ = resources; // move this watch to the beginning of the list @@ -110,12 +108,14 @@ class GrpcMuxImpl : public GrpcMux, // Per muxed API state. struct ApiState { + bool paused() const { return pauses_ > 0; } + // Watches on the returned resources for the API; std::list watches_; // Current DiscoveryRequest for API. envoy::service::discovery::v3::DiscoveryRequest request_; - // Paused via pause()? - bool paused_{}; + // Count of unresumed pause() invocations. + uint32_t pauses_{}; // Was a DiscoveryRequest elided during a pause? bool pending_{}; // Has this API been tracked in subscriptions_? @@ -138,7 +138,7 @@ class GrpcMuxImpl : public GrpcMux, // A queue to store requests while rate limited. Note that when requests cannot be sent due to the // gRPC stream being down, this queue does not store them; rather, they are simply dropped. // This string is a type URL. - std::queue request_queue_; + std::unique_ptr> request_queue_; const envoy::config::core::v3::ApiVersion transport_api_version_; }; @@ -155,8 +155,6 @@ class NullGrpcMuxImpl : public GrpcMux, ScopedResume pause(const std::vector) override { return std::make_unique([] {}); } - bool paused(const std::string&) const override { return false; } - bool paused(const std::vector) const override { return false; } GrpcMuxWatchPtr addWatch(const std::string&, const std::set&, SubscriptionCallbacks&, OpaqueResourceDecoder&) override { diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index 5ac368deea0d..b922d8c09f0f 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -38,7 +38,11 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, // Default Bucket contains 100 tokens maximum and refills at 10 tokens/sec. limit_request_ = std::make_unique( rate_limit_settings.max_tokens_, time_source_, rate_limit_settings.fill_rate_); - drain_request_timer_ = dispatcher.createTimer([this]() { callbacks_->onWriteable(); }); + drain_request_timer_ = dispatcher.createTimer([this]() { + if (stream_ != nullptr) { + callbacks_->onWriteable(); + } + }); } // TODO(htuch): Make this configurable. @@ -121,7 +125,9 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, ASSERT(drain_request_timer_ != nullptr); control_plane_stats_.rate_limit_enforced_.inc(); // Enable the drain request timer. - drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable()); + if (!drain_request_timer_->enabled()) { + drain_request_timer_->enableTimer(limit_request_->nextTokenAvailable()); + } return false; } diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 5814b605a444..c7caaf04f664 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -37,24 +37,13 @@ ScopedResume NewGrpcMuxImpl::pause(const std::vector type_urls) { return std::make_unique([this, type_urls]() { for (const auto& type_url : type_urls) { pausable_ack_queue_.resume(type_url); - trySendDiscoveryRequests(); + if (!pausable_ack_queue_.paused(type_url)) { + trySendDiscoveryRequests(); + } } }); } -bool NewGrpcMuxImpl::paused(const std::string& type_url) const { - return pausable_ack_queue_.paused(type_url); -} - -bool NewGrpcMuxImpl::paused(const std::vector type_urls) const { - for (const auto& type_url : type_urls) { - if (paused(type_url)) { - return true; - } - } - return false; -} - void NewGrpcMuxImpl::onDiscoveryResponse( std::unique_ptr&& message, ControlPlaneStats&) { diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 6c3198f94cc1..431106a4dd39 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -43,8 +43,6 @@ class NewGrpcMuxImpl ScopedResume pause(const std::string& type_url) override; ScopedResume pause(const std::vector type_urls) override; - bool paused(const std::string& type_url) const override; - bool paused(const std::vector type_urls) const override; void onDiscoveryResponse( std::unique_ptr&& message, diff --git a/source/common/config/pausable_ack_queue.cc b/source/common/config/pausable_ack_queue.cc index daec7587acb2..dc6f01773f6a 100644 --- a/source/common/config/pausable_ack_queue.cc +++ b/source/common/config/pausable_ack_queue.cc @@ -13,7 +13,7 @@ size_t PausableAckQueue::size() const { return storage_.size(); } bool PausableAckQueue::empty() { for (const auto& entry : storage_) { - if (!paused_[entry.type_url_]) { + if (pauses_[entry.type_url_] == 0) { return false; } } @@ -22,7 +22,7 @@ bool PausableAckQueue::empty() { const UpdateAck& PausableAckQueue::front() { for (const auto& entry : storage_) { - if (!paused_[entry.type_url_]) { + if (pauses_[entry.type_url_] == 0) { return entry; } } @@ -32,7 +32,7 @@ const UpdateAck& PausableAckQueue::front() { UpdateAck PausableAckQueue::popFront() { for (auto it = storage_.begin(); it != storage_.end(); ++it) { - if (!paused_[it->type_url_]) { + if (pauses_[it->type_url_] == 0) { UpdateAck ret = *it; storage_.erase(it); return ret; @@ -44,23 +44,22 @@ UpdateAck PausableAckQueue::popFront() { void PausableAckQueue::pause(const std::string& type_url) { // It's ok to pause a subscription that doesn't exist yet. - auto& pause_entry = paused_[type_url]; - ASSERT(!pause_entry); - pause_entry = true; + auto& pause_entry = pauses_[type_url]; + ++pause_entry; } void PausableAckQueue::resume(const std::string& type_url) { - auto& pause_entry = paused_[type_url]; - ASSERT(pause_entry); - pause_entry = false; + auto& pause_entry = pauses_[type_url]; + ASSERT(pause_entry > 0); + --pause_entry; } bool PausableAckQueue::paused(const std::string& type_url) const { - auto entry = paused_.find(type_url); - if (entry == paused_.end()) { + auto entry = pauses_.find(type_url); + if (entry == pauses_.end()) { return false; } - return entry->second; + return entry->second > 0; } } // namespace Config diff --git a/source/common/config/pausable_ack_queue.h b/source/common/config/pausable_ack_queue.h index 011f3ed479a7..5535e262598f 100644 --- a/source/common/config/pausable_ack_queue.h +++ b/source/common/config/pausable_ack_queue.h @@ -27,7 +27,7 @@ class PausableAckQueue { private: // It's ok for non-existent subs to be paused/resumed. The cleanest way to support that is to give // the pause state its own map. (Map key is type_url.) - absl::flat_hash_map paused_; + absl::flat_hash_map pauses_; std::list storage_; }; diff --git a/source/common/config/watch_map.cc b/source/common/config/watch_map.cc index dec3d188570b..f17d01decbc4 100644 --- a/source/common/config/watch_map.cc +++ b/source/common/config/watch_map.cc @@ -2,6 +2,7 @@ #include "envoy/service/discovery/v3/discovery.pb.h" +#include "common/common/cleanup.h" #include "common/config/decoded_resource_impl.h" namespace Envoy { @@ -17,8 +18,20 @@ Watch* WatchMap::addWatch(SubscriptionCallbacks& callbacks, } void WatchMap::removeWatch(Watch* watch) { - wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. - watches_.erase(watch); + if (deferred_removed_during_update_ != nullptr) { + deferred_removed_during_update_->insert(watch); + } else { + wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. + watches_.erase(watch); + } +} + +void WatchMap::removeDeferredWatches() { + for (auto& watch : *deferred_removed_during_update_) { + wildcard_watches_.erase(watch); // may or may not be in there, but we want it gone. + watches_.erase(watch); + } + deferred_removed_during_update_ = nullptr; } AddedRemoved WatchMap::updateWatchInterest(Watch* watch, @@ -62,6 +75,10 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField return; } + // Track any removals triggered by earlier watch updates. + ASSERT(deferred_removed_during_update_ == nullptr); + deferred_removed_during_update_ = std::make_unique>(); + Cleanup cleanup([this] { removeDeferredWatches(); }); // Build a map from watches, to the set of updated resources that each watch cares about. Each // entry in the map is then a nice little bundle that can be fed directly into the individual // onConfigUpdate()s. @@ -80,6 +97,9 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField const bool map_is_single_wildcard = (watches_.size() == 1 && wildcard_watches_.size() == 1); // We just bundled up the updates into nice per-watch packages. Now, deliver them. for (auto& watch : watches_) { + if (deferred_removed_during_update_->count(watch.get()) > 0) { + continue; + } const auto this_watch_updates = per_watch_updates.find(watch); if (this_watch_updates == per_watch_updates.end()) { // This update included no resources this watch cares about. @@ -90,12 +110,12 @@ void WatchMap::onConfigUpdate(const Protobuf::RepeatedPtrField // of this watch's resources, so the watch must be informed with an onConfigUpdate. // 3) Otherwise, we can skip onConfigUpdate for this watch. if (map_is_single_wildcard || !watch->state_of_the_world_empty_) { - watch->callbacks_.onConfigUpdate({}, version_info); watch->state_of_the_world_empty_ = true; + watch->callbacks_.onConfigUpdate({}, version_info); } } else { - watch->callbacks_.onConfigUpdate(this_watch_updates->second, version_info); watch->state_of_the_world_empty_ = false; + watch->callbacks_.onConfigUpdate(this_watch_updates->second, version_info); } } } @@ -130,6 +150,10 @@ void WatchMap::onConfigUpdate( const Protobuf::RepeatedPtrField& added_resources, const Protobuf::RepeatedPtrField& removed_resources, const std::string& system_version_info) { + // Track any removals triggered by earlier watch updates. + ASSERT(deferred_removed_during_update_ == nullptr); + deferred_removed_during_update_ = std::make_unique>(); + Cleanup cleanup([this] { removeDeferredWatches(); }); // Build a pair of maps: from watches, to the set of resources {added,removed} that each watch // cares about. Each entry in the map-pair is then a nice little bundle that can be fed directly // into the individual onConfigUpdate()s. @@ -159,6 +183,9 @@ void WatchMap::onConfigUpdate( // We just bundled up the updates into nice per-watch packages. Now, deliver them. for (const auto& added : per_watch_added) { const Watch* cur_watch = added.first; + if (deferred_removed_during_update_->count(cur_watch) > 0) { + continue; + } const auto removed = per_watch_removed.find(cur_watch); if (removed == per_watch_removed.end()) { // additions only, no removals @@ -172,6 +199,9 @@ void WatchMap::onConfigUpdate( } // Any removals-only updates will not have been picked up in the per_watch_added loop. for (auto& removed : per_watch_removed) { + if (deferred_removed_during_update_->count(removed.first) > 0) { + continue; + } removed.first->callbacks_.onConfigUpdate({}, removed.second, system_version_info); } } diff --git a/source/common/config/watch_map.h b/source/common/config/watch_map.h index e1b1236b5081..f1f7d09294ed 100644 --- a/source/common/config/watch_map.h +++ b/source/common/config/watch_map.h @@ -96,6 +96,8 @@ class WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable findAdditions(const std::vector& newly_added_to_watch, @@ -114,6 +116,11 @@ class WatchMap : public UntypedConfigUpdateCallbacks, public Logger::Loggable wildcard_watches_; + // Watches that have been removed inside the call stack of the WatchMap's onConfigUpdate(). This + // can happen when a watch's onConfigUpdate() results in another watch being removed via + // removeWatch(). + std::unique_ptr> deferred_removed_during_update_; + // Maps a resource name to the set of watches interested in that resource. Has two purposes: // 1) Acts as a reference count; no watches care anymore ==> the resource can be removed. // 2) Enables efficient lookup of all interested watches when a resource has been updated. diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 414480e208b8..d84eb0a3d4f5 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -69,7 +69,9 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( : Envoy::Config::SubscriptionBase( rds.config_source().resource_api_version(), factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), - route_config_name_(rds.route_config_name()), factory_context_(factory_context), + route_config_name_(rds.route_config_name()), + scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), + factory_context_(factory_context), parent_init_target_(fmt::format("RdsRouteConfigSubscription init {}", route_config_name_), [this]() { local_init_manager_.initialize(local_init_watcher_); }), local_init_watcher_(fmt::format("RDS local-init-watcher {}", rds.route_config_name()), @@ -78,7 +80,6 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( fmt::format("RdsRouteConfigSubscription local-init-target {}", route_config_name_), [this]() { subscription_->start({route_config_name_}); }), local_init_manager_(fmt::format("RDS local-init-manager {}", route_config_name_)), - scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), stat_prefix_(stat_prefix), stats_({ALL_RDS_STATS(POOL_COUNTER(*scope_))}), route_config_provider_manager_(route_config_provider_manager), manager_identifier_(manager_identifier) { diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 5cf1a235c554..0e8f6630e59a 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -150,8 +150,10 @@ class RdsRouteConfigSubscription bool validateUpdateSize(int num_resources); - Envoy::Config::SubscriptionPtr subscription_; const std::string route_config_name_; + // This scope must outlive the subscription_ below as the subscription has derived stats. + Stats::ScopePtr scope_; + Envoy::Config::SubscriptionPtr subscription_; Server::Configuration::ServerFactoryContext& factory_context_; // Init target used to notify the parent init manager that the subscription [and its sub resource] @@ -162,7 +164,6 @@ class RdsRouteConfigSubscription // Target which starts the RDS subscription. Init::TargetImpl local_init_target_; Init::ManagerImpl local_init_manager_; - Stats::ScopePtr scope_; std::string stat_prefix_; RdsStats stats_; RouteConfigProviderManagerImpl& route_config_provider_manager_; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 758a4aa25025..f9c8c4097767 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -159,9 +159,7 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { if (cm_.adsMux()) { const auto type_urls = Config::getAllVersionTypeUrls(); - if (!cm_.adsMux()->paused(type_urls)) { - maybe_resume_eds = cm_.adsMux()->pause(type_urls); - } + maybe_resume_eds = cm_.adsMux()->pause(type_urls); } initializeSecondaryClusters(); } diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 29c46f5d8096..9633eb08d34d 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -43,6 +43,9 @@ TEST_F(DeltaSubscriptionImplTest, UpdateResourcesCausesRequest) { TEST_F(DeltaSubscriptionImplTest, PauseHoldsRequest) { startSubscription({"name1", "name2", "name3"}); auto resume_sub = subscription_->pause(); + // If nested pause wasn't handled correctly, the single expectedSendMessage below would be + // insufficient. + auto nested_resume_sub = subscription_->pause(); expectSendMessage({"name4"}, {"name1", "name2"}, Grpc::Status::WellKnownGrpcStatus::Ok, "", {}); // If not for the pause, these updates would make the expectSendMessage fail due to too many diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 04938ce6adc1..5a8bd21840db 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -50,7 +50,11 @@ class GrpcMuxImplTestBase : public testing::Test { GrpcMuxImplTestBase() : async_client_(new Grpc::MockAsyncClient()), control_plane_connected_state_( - stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)) {} + stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)), + control_plane_pending_requests_( + stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::NeverImport)) + + {} void setup() { grpc_mux_ = std::make_unique( @@ -105,6 +109,7 @@ class GrpcMuxImplTestBase : public testing::Test { Stats::TestUtil::TestStore stats_; Envoy::Config::RateLimitSettings rate_limit_settings_; Stats::Gauge& control_plane_connected_state_; + Stats::Gauge& control_plane_pending_requests_; }; class GrpcMuxImplTest : public GrpcMuxImplTestBase { @@ -164,6 +169,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { EXPECT_CALL(*timer, enableTimer(_, _)); grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_EQ(0, control_plane_pending_requests_.value()); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, "", true); expectSendMessage("bar", {}, ""); @@ -198,6 +204,13 @@ TEST_F(GrpcMuxImplTest, PauseResume) { foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); } + // When nesting, we only have a single resumption. + { + ScopedResume a = grpc_mux_->pause("foo"); + ScopedResume b = grpc_mux_->pause("foo"); + foo_zz_sub = grpc_mux_->addWatch("foo", {"zz"}, callbacks_, resource_decoder_); + expectSendMessage("foo", {"zz", "z", "x", "y"}, ""); + } grpc_mux_->pause("foo")->cancel(); } @@ -482,12 +495,13 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { } // Verifies that default rate limiting is enforced with empty RateLimitSettings. -TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSettings) { +TEST_F(GrpcMuxImplTest, TooManyRequestsWithEmptyRateLimitSettings) { // Validate that request drain timer is created. Event::MockTimer* timer = nullptr; Event::MockTimer* drain_request_timer = nullptr; Event::TimerCb timer_cb; + Event::TimerCb drain_timer_cb; EXPECT_CALL(dispatcher_, createTimer_(_)) .WillOnce(Invoke([&timer, &timer_cb](Event::TimerCb cb) { timer_cb = cb; @@ -495,20 +509,19 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti timer = new Event::MockTimer(); return timer; })) - .WillOnce(Invoke([&drain_request_timer, &timer_cb](Event::TimerCb cb) { - timer_cb = cb; + .WillOnce(Invoke([&drain_request_timer, &drain_timer_cb](Event::TimerCb cb) { + drain_timer_cb = cb; EXPECT_EQ(nullptr, drain_request_timer); drain_request_timer = new Event::MockTimer(); return drain_request_timer; })); - EXPECT_CALL(*mock_time_system_, monotonicTime()) - .WillRepeatedly(Return(std::chrono::steady_clock::time_point{})); RateLimitSettings custom_rate_limit_settings; custom_rate_limit_settings.enabled_ = true; setup(custom_rate_limit_settings); - EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(99)); + // Attempt to send 99 messages. One of them is rate limited (and we never drain). + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(99); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); const auto onReceiveMessage = [&](uint64_t burst) { @@ -527,11 +540,29 @@ TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithEmptyRateLimitSetti // Validate that drain_request_timer is enabled when there are no tokens. EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(100), _)); - onReceiveMessage(99); - EXPECT_EQ(1, stats_.counter("control_plane.rate_limit_enforced").value()); - EXPECT_EQ( - 1, - stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::Accumulate).value()); + // The drain timer enable is checked twice, once when we limit, again when the watch is destroyed. + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); + onReceiveMessage(110); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + + // Validate that when we reset a stream with pending requests, it reverts back to the initial + // query (i.e. the queue is discarded). + EXPECT_CALL(callbacks_, + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + EXPECT_CALL(random_, random()); + ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. + EXPECT_CALL(*timer, enableTimer(_, _)); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + time_system_.setMonotonicTime(std::chrono::seconds(30)); + timer_cb(); + EXPECT_EQ(0, control_plane_pending_requests_.value()); + // One more message on the way out when the watch is destroyed. + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); } // Verifies that rate limiting is enforced with custom RateLimitSettings. @@ -585,20 +616,18 @@ TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { EXPECT_EQ(0, stats_.counter("control_plane.rate_limit_enforced").value()); // Validate that drain_request_timer is enabled when there are no tokens. - EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _)) - .Times(AtLeast(1)); + EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _)); + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); onReceiveMessage(160); - EXPECT_EQ(12, stats_.counter("control_plane.rate_limit_enforced").value()); - Stats::Gauge& pending_requests = - stats_.gauge("control_plane.pending_requests", Stats::Gauge::ImportMode::Accumulate); - EXPECT_EQ(12, pending_requests.value()); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); // Validate that drain requests call when there are multiple requests in queue. time_system_.setMonotonicTime(std::chrono::seconds(10)); drain_timer_cb(); // Check that the pending_requests stat is updated with the queue drain. - EXPECT_EQ(0, pending_requests.value()); + EXPECT_EQ(0, control_plane_pending_requests_.value()); } // Verifies that a message with no resources is accepted. diff --git a/test/common/config/pausable_ack_queue_test.cc b/test/common/config/pausable_ack_queue_test.cc index b282c2092992..f817cc7ff52a 100644 --- a/test/common/config/pausable_ack_queue_test.cc +++ b/test/common/config/pausable_ack_queue_test.cc @@ -50,6 +50,15 @@ TEST(PausableAckQueueTest, TestPauseResume) { EXPECT_EQ("nonce2", p.front().nonce_); EXPECT_EQ("type2", p.front().type_url_); + // validate the above result is invariant even if we nest pauses. + p.pause("type1"); + EXPECT_EQ(4, p.size()); + EXPECT_EQ("nonce2", p.front().nonce_); + EXPECT_EQ("type2", p.front().type_url_); + p.resume("type1"); + EXPECT_EQ("nonce2", p.front().nonce_); + EXPECT_EQ("type2", p.front().type_url_); + UpdateAck ack = p.popFront(); EXPECT_EQ("nonce2", ack.nonce_); EXPECT_EQ("type2", ack.type_url_); diff --git a/test/common/config/watch_map_test.cc b/test/common/config/watch_map_test.cc index 8742979a9653..6749cb901a8a 100644 --- a/test/common/config/watch_map_test.cc +++ b/test/common/config/watch_map_test.cc @@ -15,7 +15,10 @@ #include "gtest/gtest.h" using ::testing::_; +using ::testing::AtMost; using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::NiceMock; namespace Envoy { namespace Config { @@ -233,6 +236,75 @@ TEST(WatchMapTest, Overlap) { } } +// These are regression tests for #11877, validate that when two watches point at the same +// watched resource, and an update to one of the watches removes one or both of them, that +// WatchMap defers deletes and doesn't crash. +class SameWatchRemoval : public testing::Test { +public: + void SetUp() override { + envoy::config::endpoint::v3::ClusterLoadAssignment alice; + alice.set_cluster_name("alice"); + updated_resources_.Add()->PackFrom(alice); + watch1_ = watch_map_.addWatch(callbacks1_, resource_decoder_); + watch2_ = watch_map_.addWatch(callbacks2_, resource_decoder_); + watch_map_.updateWatchInterest(watch1_, {"alice"}); + watch_map_.updateWatchInterest(watch2_, {"alice"}); + } + + void removeAllInterest() { + ASSERT_FALSE(watch_cb_invoked_); + watch_cb_invoked_ = true; + watch_map_.removeWatch(watch1_); + watch_map_.removeWatch(watch2_); + } + + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; + WatchMap watch_map_; + NiceMock callbacks1_; + MockSubscriptionCallbacks callbacks2_; + Protobuf::RepeatedPtrField updated_resources_; + Watch* watch1_; + Watch* watch2_; + bool watch_cb_invoked_{}; +}; + +TEST_F(SameWatchRemoval, SameWatchRemovalSotw) { + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate(updated_resources_, "version1"); +} + +TEST_F(SameWatchRemoval, SameWatchRemovalDeltaAdd) { + Protobuf::RepeatedPtrField delta_resources = + wrapInResource(updated_resources_, "version1"); + Protobuf::RepeatedPtrField removed_names_proto; + + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate(delta_resources, removed_names_proto, "version1"); +} + +TEST_F(SameWatchRemoval, SameWatchRemovalDeltaRemove) { + Protobuf::RepeatedPtrField removed_names_proto; + *removed_names_proto.Add() = "alice"; + EXPECT_CALL(callbacks1_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + EXPECT_CALL(callbacks2_, onConfigUpdate(_, _, _)) + .Times(AtMost(1)) + .WillRepeatedly(InvokeWithoutArgs([this] { removeAllInterest(); })); + watch_map_.onConfigUpdate({}, removed_names_proto, "version1"); +} + // Checks the following: // First watch on a resource name ==> updateWatchInterest() returns "add it to subscription" // Watch loses interest ==> "remove it from subscription" @@ -270,7 +342,8 @@ TEST(WatchMapTest, AddRemoveAdd) { { AddedRemoved added_removed = watch_map.updateWatchInterest(watch1, {"dummy"}); EXPECT_TRUE(added_removed.added_.empty()); - EXPECT_EQ(std::set({"alice"}), added_removed.removed_); // remove from subscription + EXPECT_EQ(std::set({"alice"}), + added_removed.removed_); // remove from subscription // (The xDS client should have responded to updateWatchInterest()'s return value by removing // Alice from the subscription, so onConfigUpdate() calls should be impossible right now.) @@ -367,11 +440,11 @@ TEST(WatchMapTest, WatchingEverything) { doDeltaAndSotwUpdate(watch_map, updated_resources, {}, "version1"); } -// Delta onConfigUpdate has some slightly subtle details with how it handles the three cases where a -// watch receives {only updates, updates+removals, only removals} to its resources. This test +// Delta onConfigUpdate has some slightly subtle details with how it handles the three cases where +// a watch receives {only updates, updates+removals, only removals} to its resources. This test // exercise those cases. Also, the removal-only case tests that SotW does call a watch's -// onConfigUpdate even if none of the watch's interested resources are among the updated resources. -// (Which ensures we deliver empty config updates when a resource is dropped.) +// onConfigUpdate even if none of the watch's interested resources are among the updated +// resources. (Which ensures we deliver empty config updates when a resource is dropped.) TEST(WatchMapTest, DeltaOnConfigUpdate) { MockSubscriptionCallbacks callbacks1; MockSubscriptionCallbacks callbacks2; diff --git a/test/config/utility.cc b/test/config/utility.cc index f57e7af4286c..921ebb81fe1d 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -397,6 +397,37 @@ ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy return cluster; } +envoy::config::cluster::v3::Cluster +ConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_policy, + envoy::config::core::v3::ApiVersion api_version) { + API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; + TestUtility::loadFromYaml( + fmt::format(R"EOF( + name: {} + connect_timeout: 5s + type: EDS + eds_cluster_config: + eds_config: + resource_api_version: {} + ads: {{}} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: {} + lb_policy: {} + http2_protocol_options: {{}} + )EOF", + name, apiVersionStr(api_version), + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem"), + lb_policy), + cluster, shouldBoost(api_version)); + return cluster; +} + envoy::config::endpoint::v3::ClusterLoadAssignment ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::string& address, uint32_t port, diff --git a/test/config/utility.h b/test/config/utility.h index b4217d4f31f5..34aa4ba475ad 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -110,6 +110,10 @@ class ConfigHelper { const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::cluster::v3::Cluster buildTlsCluster( + const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", + envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment( const std::string& name, const std::string& ip_version, uint32_t port, envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index 63c8279d5138..7d81b1de0a1b 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -38,6 +38,10 @@ envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std:: return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); } +envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildTlsCluster(const std::string& name) { + return ConfigHelper::buildTlsCluster(name, "ROUND_ROBIN", api_version_); +} + envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) { return ConfigHelper::buildCluster(name, "MAGLEV", api_version_); } @@ -49,6 +53,12 @@ AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); } +envoy::config::endpoint::v3::ClusterLoadAssignment +AdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) { + return ConfigHelper::buildClusterLoadAssignment( + name, Network::Test::getLoopbackAddressString(ipVersion()), 8443, api_version_); +} + envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index 9bac58f602d0..0da99aea566a 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -24,11 +24,16 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); + envoy::config::cluster::v3::Cluster buildTlsCluster(const std::string& name); + envoy::config::cluster::v3::Cluster buildRedisCluster(const std::string& name); envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment(const std::string& name); + envoy::config::endpoint::v3::ClusterLoadAssignment + buildTlsClusterLoadAssignment(const std::string& name); + envoy::config::listener::v3::Listener buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix = "ads_test"); diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 71917d02b0a3..021beac26885 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -182,6 +182,60 @@ TEST_P(AdsIntegrationTest, RdsAfterLdsWithNoRdsChanges) { makeSingleRequest(); } +// Regression test for #11877, validate behavior of EDS updates when a cluster is updated and +// an active cluster is replaced by a newer cluster undergoing warming. +TEST_P(AdsIntegrationTest, CdsEdsReplacementWarming) { + initialize(); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", + {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {buildTlsCluster("cluster_0")}, + {buildTlsCluster("cluster_0")}, {}, "2"); + // Inconsistent SotW and delta behaviors for warming, see + // https://github.com/envoyproxy/envoy/issues/11477#issuecomment-657855029. + if (sotw_or_delta_ != Grpc::SotwOrDelta::Delta) { + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + } + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildTlsClusterLoadAssignment("cluster_0")}, + {buildTlsClusterLoadAssignment("cluster_0")}, {}, "2"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "2", {}, {}, {}, true)); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "2", + {"cluster_0"}, {}, {})); +} + // Validate that the request with duplicate clusters in the initial request during server init is // rejected. TEST_P(AdsIntegrationTest, DuplicateInitialClusters) { @@ -321,16 +375,12 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); makeSingleRequest(); - EXPECT_FALSE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // Send the first warming cluster. sendDiscoveryResponse( Config::TypeUrl::get().Cluster, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, {"cluster_0"}, "2"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); - EXPECT_TRUE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"warming_cluster_1"}, {"warming_cluster_1"}, {"cluster_0"})); @@ -346,8 +396,6 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { {"warming_cluster_2", "warming_cluster_1"}, {"warming_cluster_2"}, {})); - EXPECT_TRUE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // Finish warming the clusters. sendDiscoveryResponse( Config::TypeUrl::get().ClusterLoadAssignment, @@ -359,8 +407,6 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { // Validate that clusters are warmed. test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - EXPECT_FALSE( - test_server_->server().clusterManager().adsMux()->paused(Config::TypeUrl::get().Cluster)); // CDS is resumed and EDS response was acknowledged. if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { @@ -1034,14 +1080,12 @@ TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); makeSingleRequest(); - EXPECT_FALSE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); // Send the first warming cluster. sendDiscoveryResponse( cds_type_url, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, {"cluster_0"}, "2", false); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); - EXPECT_TRUE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_1"}, {"warming_cluster_1"}, {"cluster_0"})); @@ -1056,7 +1100,6 @@ TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_2", "warming_cluster_1"}, {"warming_cluster_2"}, {})); - EXPECT_TRUE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); // Finish warming the clusters. sendDiscoveryResponse( eds_type_url, @@ -1068,7 +1111,6 @@ TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { // Validate that clusters are warmed. test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - EXPECT_FALSE(test_server_->server().clusterManager().adsMux()->paused(cds_type_url)); // CDS is resumed and EDS response was acknowledged. if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 0ad8e2f15088..29412fffbd06 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -97,8 +97,6 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD(void, start, (), (override)); MOCK_METHOD(ScopedResume, pause, (const std::string& type_url), (override)); MOCK_METHOD(ScopedResume, pause, (const std::vector type_urls), (override)); - MOCK_METHOD(bool, paused, (const std::string& type_url), (const, override)); - MOCK_METHOD(bool, paused, (const std::vector type_urls), (const, override)); MOCK_METHOD(void, addSubscription, (const std::set& resources, const std::string& type_url, From 2113df3fa4e1936b2c868bf5ed4ae06bb73f8256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 24 Jul 2020 00:52:58 -0400 Subject: [PATCH 739/909] gzip filter: fix docs wrt runtime feature flag (#12263) Althought this filter is deprecated in favor of the compressor filter, let's go ahead and fix the incorrect reference to the now optional runtime feature flag. Signed-off-by: Raul Gutierrez Segales --- docs/root/configuration/http/http_filters/gzip_filter.rst | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/root/configuration/http/http_filters/gzip_filter.rst b/docs/root/configuration/http/http_filters/gzip_filter.rst index f492b13d102e..71947d2510ac 100644 --- a/docs/root/configuration/http/http_filters/gzip_filter.rst +++ b/docs/root/configuration/http/http_filters/gzip_filter.rst @@ -29,11 +29,9 @@ Configuration Runtime ------- -The Gzip filter supports the following runtime settings: - -gzip.filter_enabled - The % of requests for which the filter is enabled. Default is 100. - +The Gzip filter can be runtime feature flagged via the :ref:`runtime_enabled +` +configuration field within the compressor field. How it works ------------ From ee46ce739ab5ab8a125213bd5b11ff43350fb523 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 23 Jul 2020 21:55:50 -0700 Subject: [PATCH 740/909] ssl: add test for changed cipher suite support when boringssl version changes (#12240) Signed-off-by: Greg Greenway --- .../tls/context_impl_test.cc | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index c53c6a03cfc1..053f6468796a 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -42,6 +42,57 @@ namespace Extensions { namespace TransportSockets { namespace Tls { +namespace { +const std::vector& knownCipherSuites() { + CONSTRUCT_ON_FIRST_USE(std::vector, {"ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-PSK-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-PSK-AES128-CBC-SHA", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "ECDHE-PSK-AES256-CBC-SHA", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA", + "PSK-AES128-CBC-SHA", + "AES256-SHA", + "PSK-AES256-CBC-SHA", + "DES-CBC3-SHA"}); +} +} // namespace + +class SslLibraryCipherSuiteSupport : public ::testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(CipherSuites, SslLibraryCipherSuiteSupport, + ::testing::ValuesIn(knownCipherSuites())); + +// Tests for whether new cipher suites are added. When they are, they must be added to +// knownCipherSuites() so that this test can detect if they are removed in the future. +TEST_F(SslLibraryCipherSuiteSupport, CipherSuitesNotAdded) { + bssl::UniquePtr ctx(SSL_CTX_new(TLS_method())); + EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), "ALL")); + + std::vector present_cipher_suites; + for (const SSL_CIPHER* cipher : SSL_CTX_get_ciphers(ctx.get())) { + present_cipher_suites.push_back(SSL_CIPHER_get_name(cipher)); + } + EXPECT_THAT(present_cipher_suites, testing::IsSubsetOf(knownCipherSuites())); +} + +// Test that no previously supported cipher suites were removed from the SSL library. If a cipher +// suite is removed, it must be added to the release notes as an incompatible change, because it can +// cause previously loadable configurations to no longer load if they reference the cipher suite. +TEST_P(SslLibraryCipherSuiteSupport, CipherSuitesNotRemoved) { + bssl::UniquePtr ctx(SSL_CTX_new(TLS_method())); + EXPECT_NE(0, SSL_CTX_set_strict_cipher_list(ctx.get(), GetParam().c_str())); +} + class SslContextImplTest : public SslCertsTest { protected: Event::SimulatedTimeSystem time_system_; From 6434bbdfa4a130bc46a8d1b193de38a9192df71c Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 24 Jul 2020 00:56:41 -0400 Subject: [PATCH 741/909] minor fix for integration test (#12212) Signed-off-by: Asra Ali --- test/integration/fake_upstream.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 68f4ab559bdb..f01ca8a8ca26 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -287,11 +287,11 @@ FakeHttpConnection::FakeHttpConnection( http1_settings.enable_trailers_ = true; Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); #ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #else - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #endif @@ -303,11 +303,11 @@ FakeHttpConnection::FakeHttpConnection( http2_options.set_allow_metadata(true); Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); #ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #else - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #endif From 08464ecdc0c93846f3d039d0f0c6fed935f5bdc8 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 23 Jul 2020 22:54:21 -0700 Subject: [PATCH 742/909] tcp tunneling: fix integration test flake (#12267) We need to wait for all listeners to be up. Fixes https://github.com/envoyproxy/envoy/issues/12253 (and maybe other flakes) Risk Level: None Testing: Existing tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Matt Klein --- test/integration/api_listener_integration_test.cc | 7 ++----- test/integration/integration.cc | 4 +++- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/test/integration/api_listener_integration_test.cc b/test/integration/api_listener_integration_test.cc index c71d57506b7c..0005e9a83f56 100644 --- a/test/integration/api_listener_integration_test.cc +++ b/test/integration/api_listener_integration_test.cc @@ -19,15 +19,12 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, ApiListenerIntegrationTest() : BaseIntegrationTest(GetParam(), bootstrapConfig()) { use_lds_ = false; autonomous_upstream_ = true; + defer_listener_finalization_ = true; } void SetUp() override { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - // currently ApiListener does not trigger this wait - // https://github.com/envoyproxy/envoy/blob/0b92c58d08d28ba7ef0ed5aaf44f90f0fccc5dce/test/integration/integration.cc#L454 - // Thus, the ApiListener has to be added in addition to the already existing listener in the - // config. - bootstrap.mutable_static_resources()->add_listeners()->MergeFrom( + bootstrap.mutable_static_resources()->mutable_listeners(0)->MergeFrom( Server::parseListenerFromV2Yaml(apiListenerConfig())); }); } diff --git a/test/integration/integration.cc b/test/integration/integration.cc index f471ca50bbb8..f71a577dc6e3 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -490,7 +490,9 @@ void BaseIntegrationTest::createGeneratedApiTestServer( const char* rejected = "listener_manager.lds.update_rejected"; for (Stats::CounterSharedPtr success_counter = test_server_->counter(success), rejected_counter = test_server_->counter(rejected); - (success_counter == nullptr || success_counter->value() < concurrency_) && + (success_counter == nullptr || + success_counter->value() < + concurrency_ * config_helper_.bootstrap().static_resources().listeners_size()) && (!allow_lds_rejection || rejected_counter == nullptr || rejected_counter->value() == 0); success_counter = test_server_->counter(success), rejected_counter = test_server_->counter(rejected)) { From 20c32d2db78168249f47cc781ed25f999ef13a52 Mon Sep 17 00:00:00 2001 From: htuch Date: Fri, 24 Jul 2020 14:18:12 -0400 Subject: [PATCH 743/909] security: some GREYFOX inspired policy fine tunings. (#12276) We heard back from Istio that release adjacency to EOQ wasn't great, and from other internal teams that more details on the CVEs in the distributor mailout would be helpful. Signed-off-by: Harvey Tuch --- SECURITY.md | 3 +++ security/email-templates.md | 1 + 2 files changed, 4 insertions(+) diff --git a/SECURITY.md b/SECURITY.md index a195ce706bc7..3483408e7ea8 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -124,6 +124,9 @@ to perform a release within this time window. If there are exceptional circumsta security team will raise this window to four weeks. The release window will be reduced if the security issue is public or embargo is broken. +We will endeavor not to overlap this three week window with or place it adjacent to major corporate +holiday periods or end-of-quarter (e.g. impacting downstream Istio releases), where possible. + ### Fix and disclosure SLOs * All reports to envoy-security@googlegroups.com will be triaged and have an diff --git a/security/email-templates.md b/security/email-templates.md index e58dfdc91747..ffd0232c7798 100644 --- a/security/email-templates.md +++ b/security/email-templates.md @@ -50,6 +50,7 @@ Envoy maintainers on the Envoy GitHub. We will address the following CVE(s): * CVE-YEAR-ABCDEF (CVSS score $CVSS, $SEVERITY): $CVESUMMARY + - Link to the appropriate section of the CVE writeup document with gh-cve-template.md content. ... We intend to make candidates release patches available under embargo on the From 2a34c76be781c8a839b74477b196f119acc51398 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Sat, 25 Jul 2020 01:19:34 +0700 Subject: [PATCH 744/909] build: Use c++17 when compiling googleurl (#12269) Signed-off-by: Dhi Aurrahman --- bazel/external/googleurl.patch | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch index 72e3991b4ff0..fe867e5bedc6 100644 --- a/bazel/external/googleurl.patch +++ b/bazel/external/googleurl.patch @@ -77,10 +77,10 @@ index d5fca65..fc0d7e5 100644 -] +_default_copts = select({ + "@envoy//bazel:windows_x86_64": [ -+ "/std:c++14", ++ "/std:c++17", + ], + "//conditions:default": [ -+ "-std=c++14", ++ "-std=c++17", + "-fno-strict-aliasing", + ], +}) @@ -114,6 +114,6 @@ index 0126bdc..5d1a171 100644 "//base", "//base/strings", "//polyfills", -+ "@org_unicode_icuuc//:common", ++ "@org_unicode_icuuc//:common", ], ) From 15e499a6ea6f4f616aa6830f99aa69b7c72dc85a Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Sat, 25 Jul 2020 07:14:38 +1000 Subject: [PATCH 745/909] docs: remove incorrect statement about max_retries (#12248) Signed-off-by: Martin Matusiak --- docs/root/configuration/http/http_filters/router_filter.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 1446c02c0256..0aaa931891e9 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -51,8 +51,6 @@ A few notes on how Envoy does retries: upstream.base_retry_backoff_ms runtime parameter. The back-off intervals can also be modified by configuring the retry policy's :ref:`retry back-off `. -* If max retries is set both by header as well as in the route configuration, the maximum value is - taken when determining the max retries to use for the request. .. _config_http_filters_router_x-envoy-retry-on: From 44f4399c73ff0ef5c50ead428b9d2a281a5e95f7 Mon Sep 17 00:00:00 2001 From: Kuat Date: Fri, 24 Jul 2020 14:43:47 -0700 Subject: [PATCH 746/909] ecds: fix a flake in the integration test (#12268) Signed-off-by: Kuat Yessenov --- .../http/filter_config_discovery_impl.h | 5 ++- .../extension_discovery_integration_test.cc | 36 ++++++++++++++----- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/http/filter_config_discovery_impl.h index 1c2c838c5aae..626832dd8a11 100644 --- a/source/common/filter/http/filter_config_discovery_impl.h +++ b/source/common/filter/http/filter_config_discovery_impl.h @@ -107,7 +107,6 @@ class FilterConfigSubscription void onConfigUpdateFailed(Config::ConfigUpdateFailureReason reason, const EnvoyException*) override; - std::unique_ptr subscription_; const std::string filter_config_name_; uint64_t last_config_hash_{0ul}; Server::Configuration::FactoryContext& factory_context_; @@ -125,6 +124,10 @@ class FilterConfigSubscription const std::string subscription_id_; absl::flat_hash_set filter_config_providers_; friend class DynamicFilterConfigProviderImpl; + + // This must be the last since its destructor may call out to stats to report + // on draining requests. + std::unique_ptr subscription_; }; /** diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc index 7af8be71c394..0a0fa4559ec7 100644 --- a/test/integration/extension_discovery_integration_test.cc +++ b/test/integration/extension_discovery_integration_test.cc @@ -53,9 +53,9 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, ipVersion()) {} void addDynamicFilter(const std::string& name, bool apply_without_warming, - bool set_default_config = true) { + bool set_default_config = true, bool rate_limit = false) { config_helper_.addConfigModifier( - [this, name, apply_without_warming, set_default_config]( + [this, name, apply_without_warming, set_default_config, rate_limit]( envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& http_connection_manager) { auto* filter = http_connection_manager.mutable_http_filters()->Add(); @@ -81,6 +81,9 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara auto* api_config_source = discovery->mutable_config_source()->mutable_api_config_source(); api_config_source->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); api_config_source->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + if (rate_limit) { + api_config_source->mutable_rate_limit_settings()->mutable_max_tokens()->set_value(10); + } auto* grpc_service = api_config_source->add_grpc_services(); setGrpcService(*grpc_service, "ecds_cluster", getEcdsFakeUpstream().localAddress()); // keep router the last @@ -113,11 +116,13 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara } ~ExtensionDiscoveryIntegrationTest() override { - AssertionResult result = ecds_connection_->close(); - RELEASE_ASSERT(result, result.message()); - result = ecds_connection_->waitForDisconnect(); - RELEASE_ASSERT(result, result.message()); - ecds_connection_.reset(); + if (ecds_connection_ != nullptr) { + AssertionResult result = ecds_connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = ecds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + ecds_connection_.reset(); + } } void createUpstreams() override { @@ -155,7 +160,6 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara FakeUpstream& getEcdsFakeUpstream() const { return *fake_upstreams_[1]; } -private: FakeHttpConnectionPtr ecds_connection_{nullptr}; FakeStreamPtr ecds_stream_{nullptr}; }; @@ -323,5 +327,21 @@ TEST_P(ExtensionDiscoveryIntegrationTest, BasicTwoSubscriptionsSameName) { EXPECT_EQ("200", response->headers().getStatusValue()); } +TEST_P(ExtensionDiscoveryIntegrationTest, DestroyDuringInit) { + // If rate limiting is enabled on the config source, gRPC mux drainage updates the requests + // queue size on destruction. The update calls out to stats scope nested under the extension + // config subscription stats scope. This test verifies that the stats scope outlasts the gRPC + // subscription. + on_server_init_function_ = [&]() { waitXdsStream(); }; + addDynamicFilter("foo", false, true); + initialize(); + test_server_->waitForCounterGe("listener_manager.lds.update_success", 1); + EXPECT_EQ(test_server_->server().initManager().state(), Init::Manager::State::Initializing); + test_server_.reset(); + auto result = ecds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + ecds_connection_.reset(); +} + } // namespace } // namespace Envoy From 62e58349195090328d771a01ec8382d571915ddc Mon Sep 17 00:00:00 2001 From: Yuki Miyake Date: Sat, 25 Jul 2020 06:52:35 +0900 Subject: [PATCH 747/909] docs: Display the code block correctly (#12288) I found that the code block in `grpc_bridge.rst` hasn't been displayed well. So I fixed it. Signed-off-by: zawawahoge --- docs/root/start/sandboxes/grpc_bridge.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst index 3382a075bf53..aa61e6074269 100644 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -32,7 +32,7 @@ Docker compose ~~~~~~~~~~~~~~ To run the docker compose file, and set up both the Python and the gRPC containers -run: +run:: $ pwd envoy/examples/grpc-bridge From defea7ecbf6f3ebffe8b9d41afa7f62322756f05 Mon Sep 17 00:00:00 2001 From: Wayne Zhang Date: Fri, 24 Jul 2020 15:07:55 -0700 Subject: [PATCH 748/909] jwt_authn: If a request has multiple JWT tokens, all must be valid. (#12089) By default, jwt_authn filter extracts JWT token from `Authorization` header and `access_token` query parameter. A request may have multiple JWT tokens, and will be forwarded to the backend if one of the tokens is good. It poses a security risk: a hacker can put a good token in the query parameter and an invalid one in the Authorization header. Envoy will forward the request to the backend, and the backend will use the bad token in Authorization header. This PR try to patch such security hole: all tokens in a request should be valid. Risk Level: Low. This change only impacts the requests with multiple JWT tokens. In production traffic, it will be very rare that a request have multiple JWT tokens. Testing: Unit-test Docs Changes: None Release Notes: None Signed-off-by: Wayne Zhang --- .../http/http_filters/jwt_authn_filter.rst | 4 +- .../filters/http/jwt_authn/authenticator.cc | 10 +- .../http/jwt_authn/authenticator_test.cc | 180 ++++++++++++------ 3 files changed, 130 insertions(+), 64 deletions(-) diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index d084f1c9f44d..50790a230905 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -49,10 +49,12 @@ If *from_headers* and *from_params* is empty, the default location to extract J Authorization: Bearer -If fails to extract a JWT from above header, then check query parameter key *access_token* as in this example:: +and query parameter key *access_token* as:: /path?access_token= +If a request has two tokens, one from the header and the other from the query parameter, all of them must be valid. + In the :ref:`filter config `, *providers* is a map, to map *provider_name* to a :ref:`JwtProvider `. The *provider_name* must be unique, it is referred in the `JwtRequirement ` in its *provider_name* field. .. important:: diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index 9ca03ac020b7..27c1d3e8e267 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -265,8 +265,14 @@ void AuthenticatorImpl::verifyKey() { void AuthenticatorImpl::doneWithStatus(const Status& status) { ENVOY_LOG(debug, "{}: JWT token verification completed with: {}", name(), ::google::jwt_verify::getStatusString(status)); - // if on allow missing or failed this should verify all tokens, otherwise stop on ok. - if ((Status::Ok == status && !is_allow_failed_ && !is_allow_missing_) || tokens_.empty()) { + + // If a request has multiple tokens, all of them must be valid. Otherwise it may have + // following security hole: a request has a good token and a bad one, it will pass + // verification, forwarded to the backend, and the backend may mistakenly use the bad + // token as the good one that passed the verification. + + // Unless allowing failed or missing, all tokens must be verified successfully. + if ((Status::Ok != status && !is_allow_failed_ && !is_allow_missing_) || tokens_.empty()) { tokens_.clear(); if (is_allow_failed_) { callback_(Status::Ok); diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index 40850b1bd31f..de34b8961829 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -36,18 +36,20 @@ class AuthenticatorTest : public testing::Test { public: void SetUp() override { TestUtility::loadFromYaml(ExampleConfig, proto_config_); - CreateAuthenticator(); + createAuthenticator(); } - void CreateAuthenticator(::google::jwt_verify::CheckAudience* check_audience = nullptr, - const absl::optional& provider = - absl::make_optional(ProviderName)) { + void createAuthenticator( + ::google::jwt_verify::CheckAudience* check_audience = nullptr, + const absl::optional& provider = absl::make_optional(ProviderName), + bool allow_failed = false, bool allow_missing = false) { filter_config_ = FilterConfigImpl::create(proto_config_, "", mock_factory_ctx_); raw_fetcher_ = new MockJwksFetcher; fetcher_.reset(raw_fetcher_); auth_ = Authenticator::create( - check_audience, provider, !provider, !provider, filter_config_->getCache().getJwksCache(), - filter_config_->cm(), [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, + check_audience, provider, allow_failed, allow_missing, + filter_config_->getCache().getJwksCache(), filter_config_->cm(), + [this](Upstream::ClusterManager&) { return std::move(fetcher_); }, filter_config_->timeSource()); jwks_ = Jwks::createFrom(PublicKey, Jwks::JWKS); EXPECT_TRUE(jwks_->getStatus() == Status::Ok); @@ -99,8 +101,7 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { // Test OK pubkey and its cache for (int i = 0; i < 10; i++) { - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); @@ -114,7 +115,7 @@ TEST_F(AuthenticatorTest, TestOkJWTandCache) { TEST_F(AuthenticatorTest, TestForwardJwt) { // Config forward_jwt flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_forward(true); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { @@ -122,8 +123,7 @@ TEST_F(AuthenticatorTest, TestForwardJwt) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); @@ -139,7 +139,7 @@ TEST_F(AuthenticatorTest, TestSetPayload) { // Config payload_in_metadata flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( "my_payload"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { @@ -147,8 +147,7 @@ TEST_F(AuthenticatorTest, TestSetPayload) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); @@ -169,8 +168,8 @@ TEST_F(AuthenticatorTest, TestJwtWithNonExistKid) { })); // Test OK pubkey and its cache - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NonExistKidToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExistKidToken)}}; expectVerifyStatus(Status::JwtVerificationFail, headers); } @@ -180,17 +179,83 @@ TEST_F(AuthenticatorTest, TestMissedJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); // Empty headers. - auto headers = Http::TestRequestHeaderMapImpl{}; + Http::TestRequestHeaderMapImpl headers{}; expectVerifyStatus(Status::JwtMissed, headers); } +// Test multiple tokens; the one from query parameter is bad, verification should fail. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromQuery) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(NonExistKidToken)}, + }; + + expectVerifyStatus(Status::JwtVerificationFail, headers); +} + +// Test multiple tokens; the one from header is bad, verification should fail. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadFromHeader) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExistKidToken)}, + {":path", "/foo?access_token=" + std::string(GoodToken)}, + }; + + expectVerifyStatus(Status::JwtVerificationFail, headers); +} + +// Test multiple tokens; all are good, verification is ok. +TEST_F(AuthenticatorTest, TestMultipleJWTAllGood) { + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: all are good + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(GoodToken)}, + }; + + expectVerifyStatus(Status::Ok, headers); +} + +// Test multiple tokens; one of them is bad and allow_failed, verification is ok. +TEST_F(AuthenticatorTest, TestMultipleJWTOneBadAllowFails) { + createAuthenticator(nullptr, absl::make_optional(ProviderName), + /*allow_failed=*/true, /*all_missing=*/false); + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(1); + + // headers with multiple tokens: one good, one bad + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {":path", "/foo?access_token=" + std::string(NonExistKidToken)}, + }; + + expectVerifyStatus(Status::Ok, headers); +} + +// Test empty header and allow_missing, verification is ok. +TEST_F(AuthenticatorTest, TestAllowMissingWithEmptyHeader) { + createAuthenticator(nullptr, absl::make_optional(ProviderName), + /*allow_failed=*/false, /*all_missing=*/true); + EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); + + // Empty headers + Http::TestRequestHeaderMapImpl headers{}; + + expectVerifyStatus(Status::Ok, headers); +} + // This test verifies if Jwt is invalid, JwtBadFormat status is returned. TEST_F(AuthenticatorTest, TestInvalidJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); std::string token = "invalidToken"; - auto headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + token}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + token}}; expectVerifyStatus(Status::JwtBadFormat, headers); } @@ -198,7 +263,7 @@ TEST_F(AuthenticatorTest, TestInvalidJWT) { TEST_F(AuthenticatorTest, TestInvalidPrefix) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer-invalid"}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer-invalid"}}; expectVerifyStatus(Status::JwtMissed, headers); } @@ -207,8 +272,8 @@ TEST_F(AuthenticatorTest, TestInvalidPrefix) { TEST_F(AuthenticatorTest, TestNonExpiringJWT) { EXPECT_CALL(mock_factory_ctx_.cluster_manager_, httpAsyncClientForCluster(_)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NonExpiringToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NonExpiringToken)}}; expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); } @@ -216,8 +281,7 @@ TEST_F(AuthenticatorTest, TestNonExpiringJWT) { TEST_F(AuthenticatorTest, TestExpiredJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(ExpiredToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(ExpiredToken)}}; expectVerifyStatus(Status::JwtExpired, headers); } @@ -225,8 +289,8 @@ TEST_F(AuthenticatorTest, TestExpiredJWT) { TEST_F(AuthenticatorTest, TestNotYetValidJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(NotYetValidToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(NotYetValidToken)}}; expectVerifyStatus(Status::JwtNotYetValid, headers); } @@ -235,12 +299,11 @@ TEST_F(AuthenticatorTest, TestInvalidLocalJwks) { auto& provider = (*proto_config_.mutable_providers())[std::string(ProviderName)]; provider.clear_remote_jwks(); provider.mutable_local_jwks()->set_inline_string("invalid"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksNoValidKeys, headers); } @@ -248,8 +311,8 @@ TEST_F(AuthenticatorTest, TestInvalidLocalJwks) { TEST_F(AuthenticatorTest, TestNonMatchAudJWT) { EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(InvalidAudToken)}}; + Http::TestRequestHeaderMapImpl headers{ + {"Authorization", "Bearer " + std::string(InvalidAudToken)}}; expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); } @@ -257,12 +320,11 @@ TEST_F(AuthenticatorTest, TestNonMatchAudJWT) { TEST_F(AuthenticatorTest, TestIssuerNotFound) { // Create a config with an other issuer. (*proto_config_.mutable_providers())[std::string(ProviderName)].set_issuer("other_issuer"); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)).Times(0); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwtUnknownIssuer, headers); } @@ -274,8 +336,7 @@ TEST_F(AuthenticatorTest, TestPubkeyFetchFail) { receiver.onJwksError(JwksFetcher::JwksReceiver::Failure::InvalidJwks); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksFetchFail, headers); Http::ResponseMessagePtr response_message(new Http::ResponseMessageImpl( @@ -291,8 +352,7 @@ TEST_F(AuthenticatorTest, TestOnDestroy) { // Cancel is called once. EXPECT_CALL(*raw_fetcher_, cancel()).Times(1); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; initTokenExtractor(); auto tokens = extractor_->extract(headers); // callback should not be called. @@ -308,15 +368,14 @@ TEST_F(AuthenticatorTest, TestNoForwardPayloadHeader) { // In this config, there is no forward_payload_header auto& provider0 = (*proto_config_.mutable_providers())[std::string(ProviderName)]; provider0.clear_forward_payload_header(); - CreateAuthenticator(); + createAuthenticator(); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::Ok, headers); // Test when forward_payload_header is not set, the output should NOT @@ -334,36 +393,36 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleTokens) { header->set_value_prefix("Bearer "); } - CreateAuthenticator(nullptr, absl::nullopt); + createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers1{ {"a", "Bearer " + std::string(ExpiredToken)}, {"b", "Bearer " + std::string(GoodToken)}, {"c", "Bearer " + std::string(InvalidAudToken)}, {":path", "/"}, }; - expectVerifyStatus(Status::Ok, headers); + expectVerifyStatus(Status::Ok, headers1); - EXPECT_TRUE(headers.has("a")); - EXPECT_FALSE(headers.has("b")); - EXPECT_TRUE(headers.has("c")); + EXPECT_TRUE(headers1.has("a")); + EXPECT_FALSE(headers1.has("b")); + EXPECT_TRUE(headers1.has("c")); - headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers2{ {"a", "Bearer " + std::string(GoodToken)}, {"b", "Bearer " + std::string(GoodToken)}, {"c", "Bearer " + std::string(GoodToken)}, {":path", "/"}, }; - expectVerifyStatus(Status::Ok, headers); + expectVerifyStatus(Status::Ok, headers2); - EXPECT_FALSE(headers.has("a")); - EXPECT_FALSE(headers.has("b")); - EXPECT_FALSE(headers.has("c")); + EXPECT_FALSE(headers2.has("a")); + EXPECT_FALSE(headers2.has("b")); + EXPECT_FALSE(headers2.has("c")); } // This test verifies that allow failed authenticator will verify all tokens. @@ -381,7 +440,7 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { header->set_name("other-auth"); header->set_value_prefix("Bearer "); - CreateAuthenticator(nullptr, absl::nullopt); + createAuthenticator(nullptr, absl::nullopt, /*allow_failed=*/true); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .Times(2) .WillRepeatedly(Invoke([](const envoy::config::core::v3::HttpUri&, Tracing::Span&, @@ -391,7 +450,7 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { receiver.onJwksSuccess(std::move(jwks)); })); - auto headers = Http::TestRequestHeaderMapImpl{ + Http::TestRequestHeaderMapImpl headers{ {"Authorization", "Bearer " + std::string(GoodToken)}, {"expired-auth", "Bearer " + std::string(ExpiredToken)}, {"other-auth", "Bearer " + std::string(OtherGoodToken)}, @@ -408,19 +467,19 @@ TEST_F(AuthenticatorTest, TestAllowFailedMultipleIssuers) { TEST_F(AuthenticatorTest, TestCustomCheckAudience) { auto check_audience = std::make_unique<::google::jwt_verify::CheckAudience>( std::vector{"invalid_service"}); - CreateAuthenticator(check_audience.get()); + createAuthenticator(check_audience.get()); EXPECT_CALL(*raw_fetcher_, fetch(_, _, _)) .WillOnce(Invoke([this](const envoy::config::core::v3::HttpUri&, Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { receiver.onJwksSuccess(std::move(jwks_)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(InvalidAudToken)}}; - expectVerifyStatus(Status::Ok, headers); + Http::TestRequestHeaderMapImpl headers1{ + {"Authorization", "Bearer " + std::string(InvalidAudToken)}}; + expectVerifyStatus(Status::Ok, headers1); - headers = Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; - expectVerifyStatus(Status::JwtAudienceNotAllowed, headers); + Http::TestRequestHeaderMapImpl headers2{{"Authorization", "Bearer " + std::string(GoodToken)}}; + expectVerifyStatus(Status::JwtAudienceNotAllowed, headers2); } // This test verifies that when invalid JWKS is fetched, an JWKS error status is returned. @@ -432,8 +491,7 @@ TEST_F(AuthenticatorTest, TestInvalidPubkeyKey) { receiver.onJwksSuccess(std::move(jwks)); })); - auto headers = - Http::TestRequestHeaderMapImpl{{"Authorization", "Bearer " + std::string(GoodToken)}}; + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; expectVerifyStatus(Status::JwksPemBadBase64, headers); } From 1f0eae35e0b456583e3aca1efb7487be646990ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josef=20Podan=C3=BD?= Date: Mon, 27 Jul 2020 15:49:54 +0200 Subject: [PATCH 749/909] docs: add LocalityLbEndpoints.locality to a locality weighted load balancing configuration section (#12249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Josef Podaný --- .../upstream/load_balancing/locality_weight.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst index f435b7abce6d..d5abaa4c82ed 100644 --- a/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst +++ b/docs/root/intro/arch_overview/upstream/load_balancing/locality_weight.rst @@ -60,9 +60,11 @@ picked. The load balancer follows these steps: Locality weighted load balancing is configured by setting :ref:`locality_weighted_lb_config ` in the -cluster configuration and providing weights in :ref:`LocalityLbEndpoints -` via :ref:`load_balancing_weight -`. +cluster configuration and by providing weights via :ref:`load_balancing_weight +` and +identifying the location of the upstream hosts via :ref:`locality +` in +:ref:`LocalityLbEndpoints `. This feature is not compatible with :ref:`load balancer subsetting `, since it is not straightforward to From 49d4a2a54be31c226fc26ee5976007d01b76e2ba Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 27 Jul 2020 11:10:29 -0400 Subject: [PATCH 750/909] dubbo: removing an unused file (#12302) Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a Fixes #12282 Signed-off-by: Alyssa Wilk --- .../network/dubbo_proxy/deserializer.h | 177 ------------------ 1 file changed, 177 deletions(-) delete mode 100644 source/extensions/filters/network/dubbo_proxy/deserializer.h diff --git a/source/extensions/filters/network/dubbo_proxy/deserializer.h b/source/extensions/filters/network/dubbo_proxy/deserializer.h deleted file mode 100644 index 95f2f8e5bc44..000000000000 --- a/source/extensions/filters/network/dubbo_proxy/deserializer.h +++ /dev/null @@ -1,177 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/buffer/buffer.h" - -#include "common/common/assert.h" -#include "common/config/utility.h" -#include "common/singleton/const_singleton.h" - -#include "extensions/filters/network/dubbo_proxy/message.h" -#include "extensions/filters/network/dubbo_proxy/metadata.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace DubboProxy { - -/** - * Names of available deserializer implementations. - */ -class DeserializerNameValues { -public: - struct SerializationTypeHash { - template std::size_t operator()(T t) const { return static_cast(t); } - }; - - using DeserializerTypeNameMap = - std::unordered_map; - - const DeserializerTypeNameMap deserializerTypeNameMap = { - {SerializationType::Hessian, "hessian"}, - }; - - const std::string& fromType(SerializationType type) const { - const auto& itor = deserializerTypeNameMap.find(type); - if (itor != deserializerTypeNameMap.end()) { - return itor->second; - } - - NOT_REACHED_GCOVR_EXCL_LINE; - } -}; - -using DeserializerNames = ConstSingleton; - -/** - * RpcInvocation represent an rpc call - * See - * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcInvocation.java - */ -class RpcInvocation { -public: - virtual ~RpcInvocation() = default; - virtual const std::string& getMethodName() const PURE; - virtual const std::string& getServiceName() const PURE; - virtual const std::string& getServiceVersion() const PURE; -}; - -using RpcInvocationPtr = std::unique_ptr; - -/** - * RpcResult represent the result of an rpc call - * See - * https://github.com/apache/incubator-dubbo/blob/master/dubbo-rpc/dubbo-rpc-api/src/main/java/org/apache/dubbo/rpc/RpcResult.java - */ -class RpcResult { -public: - virtual ~RpcResult() = default; - virtual bool hasException() const PURE; -}; - -using RpcResultPtr = std::unique_ptr; - -class Deserializer { -public: - virtual ~Deserializer() = default; - /** - * Return this Deserializer's name - * - * @return std::string containing the serialization name. - */ - virtual const std::string& name() const PURE; - - /** - * @return SerializationType the deserializer type - */ - virtual SerializationType type() const PURE; - - /** - * deserialize an rpc call - * If successful, the RpcInvocation removed from the buffer - * - * @param buffer the currently buffered dubbo data - * @body_size the complete RpcInvocation size - * @throws EnvoyException if the data is not valid for this serialization - */ - virtual void deserializeRpcInvocation(Buffer::Instance& buffer, size_t body_size, - MessageMetadataSharedPtr metadata) PURE; - /** - * deserialize result of an rpc call - * If successful, the RpcResult removed from the buffer - * - * @param buffer the currently buffered dubbo data - * @body_size the complete RpcResult size - * @throws EnvoyException if the data is not valid for this serialization - */ - virtual RpcResultPtr deserializeRpcResult(Buffer::Instance& buffer, size_t body_size) PURE; - - /** - * serialize result of an rpc call - * If successful, the output_buffer is written to the serialized data - * - * @param output_buffer store the serialized data - * @param content the rpc response content - * @param type the rpc response type - * @return size_t the length of the serialized content - */ - virtual size_t serializeRpcResult(Buffer::Instance& output_buffer, const std::string& content, - RpcResponseType type) PURE; -}; - -using DeserializerPtr = std::unique_ptr; - -/** - * Implemented by each Dubbo deserialize and registered via Registry::registerFactory or the - * convenience class RegisterFactory. - */ -class NamedDeserializerConfigFactory { -public: - virtual ~NamedDeserializerConfigFactory() = default; - - /** - * Create a particular Dubbo deserializer. - * @return DeserializerPtr the transport - */ - virtual DeserializerPtr createDeserializer() PURE; - - /** - * @return std::string the identifying name for a particular implementation of Dubbo deserializer - * produced by the factory. - */ - virtual std::string name() PURE; - - /** - * Convenience method to lookup a factory by type. - * @param TransportType the transport type - * @return NamedDeserializerConfigFactory& for the TransportType - */ - static NamedDeserializerConfigFactory& getFactory(SerializationType type) { - const std::string& name = DeserializerNames::get().fromType(type); - return Envoy::Config::Utility::getAndCheckFactory(name); - } -}; - -/** - * DeserializerFactoryBase provides a template for a trivial NamedDeserializerConfigFactory. - */ -template -class DeserializerFactoryBase : public NamedDeserializerConfigFactory { - DeserializerPtr createDeserializer() override { return std::make_unique(); } - - std::string name() override { return name_; } - -protected: - DeserializerFactoryBase(SerializationType type) - : name_(DeserializerNames::get().fromType(type)) {} - -private: - const std::string name_; -}; - -} // namespace DubboProxy -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy From ce26fe19e0f8cd033e16aa3a9145281b0d03b748 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Mon, 27 Jul 2020 10:32:20 -0500 Subject: [PATCH 751/909] [Fuzz] Network-layer filter generic fuzzer (#12086) * added generic freamework for testing filters. This is a fuzzer for testing network-layer(L3/L4) filters. Now Envoy has 20 network-layer filters which will deal with raw bytes from untrusted networks and thus they are security-critical to some extent. The idea of this is to write a fuzzer which can be applied to different kinds of network filters(potentially cover all the filters), and when new filters are added to Envoy, we won't need to write dedicated fuzzers one by one to give them fuzz coverage. Signed-off-by: jianwen --- .../client_ssl_auth/v3/client_ssl_auth.proto | 3 +- .../client_ssl_auth/v3/client_ssl_auth.proto | 3 +- source/extensions/all_extensions.bzl | 9 ++ .../filters/network/common/fuzz/BUILD | 58 ++++++++++ .../client_sslL_auth_2 | 47 ++++++++ .../client_ssl_authz_1 | 44 +++++++ .../direct_response_1 | 36 ++++++ .../direct_response_open_file | 19 +++ .../network_readfilter_corpus/dubbo_proxy_1 | 53 +++++++++ .../fuzz/network_readfilter_corpus/echo_1 | 31 +++++ .../fuzz/network_readfilter_corpus/empty | 14 +++ .../network_readfilter_corpus/ext_authz_1 | 20 ++++ .../network_readfilter_corpus/ext_authz_2 | 16 +++ .../local_ratelimit_1 | 39 +++++++ .../local_ratelimit_time_overflow | 44 +++++++ .../network_readfilter_corpus/redis_proxy_1 | 28 +++++ .../network_readfilter_corpus/sni_cluster_1 | 35 ++++++ .../network_readfilter_corpus/sni_cluster_2 | 25 ++++ .../common/fuzz/network_readfilter_fuzz.proto | 34 ++++++ .../fuzz/network_readfilter_fuzz_test.cc | 61 ++++++++++ .../common/fuzz/uber_per_readfilter.cc | 102 +++++++++++++++++ .../network/common/fuzz/uber_readfilter.cc | 108 ++++++++++++++++++ .../network/common/fuzz/uber_readfilter.h | 49 ++++++++ .../filters/network/common/fuzz/utils/BUILD | 17 +++ .../filters/network/common/fuzz/utils/fakes.h | 49 ++++++++ 25 files changed, 942 insertions(+), 2 deletions(-) create mode 100644 test/extensions/filters/network/common/fuzz/BUILD create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_readfilter.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_readfilter.h create mode 100644 test/extensions/filters/network/common/fuzz/utils/BUILD create mode 100644 test/extensions/filters/network/common/fuzz/utils/fakes.h diff --git a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index 29cd04939b8a..b3af267a77ad 100644 --- a/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -29,7 +29,8 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string auth_api_cluster = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto index 29cd04939b8a..b3af267a77ad 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto @@ -29,7 +29,8 @@ message ClientSSLAuth { // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + string auth_api_cluster = 1 + [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index 8e151ad42d2d..ace7333688bc 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -38,3 +38,12 @@ def envoy_all_http_filters(): all_extensions = dicts.add(_required_extensions, EXTENSIONS) return [v for k, v in all_extensions.items() if k.startswith(_http_filter_prefix)] + +# All network-layer filters are extensions with names that have the following prefix. +_network_filter_prefix = "envoy.filters.network" + +# Return all network-layer filter extensions to be compiled into network-layer filter generic fuzzer. +def envoy_all_network_filters(): + all_extensions = dicts.add(_required_extensions, EXTENSIONS) + + return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix)] diff --git a/test/extensions/filters/network/common/fuzz/BUILD b/test/extensions/filters/network/common/fuzz/BUILD new file mode 100644 index 000000000000..a97370781cbc --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/BUILD @@ -0,0 +1,58 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", + "envoy_cc_test_library", + "envoy_package", + "envoy_proto_library", +) +load( + "//source/extensions:all_extensions.bzl", + "envoy_all_network_filters", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_proto_library( + name = "network_readfilter_fuzz_proto", + srcs = ["network_readfilter_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/listener/v3:pkg", + ], +) + +envoy_cc_test_library( + name = "uber_readfilter_lib", + srcs = [ + "uber_per_readfilter.cc", + "uber_readfilter.cc", + ], + hdrs = ["uber_readfilter.h"], + deps = [ + ":network_readfilter_fuzz_proto_cc_proto", + "//source/common/config:utility_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:utility_lib", + "//test/extensions/filters/common/ext_authz:ext_authz_test_common", + "//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib", + "//test/fuzz:utility_lib", + "//test/mocks/network:network_mocks", + "@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "network_readfilter_fuzz_test", + srcs = ["network_readfilter_fuzz_test.cc"], + corpus = "network_readfilter_corpus", + # All Envoy network filters must be linked to the test in order for the fuzzer to pick + # these up via the NamedNetworkFilterConfigFactory. + deps = [ + ":uber_readfilter_lib", + "//source/common/config:utility_lib", + "//test/config:utility_lib", + ] + envoy_all_network_filters(), +) diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 new file mode 100644 index 000000000000..dd24c6c6c4da --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_sslL_auth_2 @@ -0,0 +1,47 @@ +config { + name: "envoy.filters.network.client_ssl_auth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + value: "\n\010\177\177_p\000O\002@\022\007x-clien" + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "ppu" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 524288 + } +} +actions { + on_data { + data: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 new file mode 100644 index 000000000000..44f4dfaf34d1 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/client_ssl_authz_1 @@ -0,0 +1,44 @@ +config { + name: "envoy.filters.network.client_ssl_auth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.client_ssl_auth.v3.ClientSSLAuth" + value: "\n%envoy.filters.network.client_ssl_auth\022\0011" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 4 + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + on_data { + data: "u\360" + } +} +actions { + advance_time { + milliseconds: 4 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 new file mode 100644 index 000000000000..c65354895b28 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_1 @@ -0,0 +1,36 @@ +config { + name: "envoy.filters.network.direct_response" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_data { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "\006" + } +} +actions { + on_data { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file new file mode 100644 index 000000000000..26df2e4de4ec --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/direct_response_open_file @@ -0,0 +1,19 @@ +config { + name: "envoy.filters.network.direct_response" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.direct_response.v3.Config" + value: "\n\032\n\030*\014\n\002\020\001\"\006\020\001\"\002\030\0012\003\032\001\':\003\032\001\'" + } +} +actions { + on_new_connection { + } +} +actions{ + on_data{ + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 new file mode 100644 index 000000000000..b9c6f893f556 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/dubbo_proxy_1 @@ -0,0 +1,53 @@ +config { + name: "envoy.filters.network.dubbo_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy" + value: "\n!envoy.filters.network.dubbo_proxy" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 new file mode 100644 index 000000000000..fd15fde5a83f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/echo_1 @@ -0,0 +1,31 @@ +config { + name: "envoy.filters.network.echo" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.echo.v3.Echo" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 2097152 + } +} +actions { + advance_time { + milliseconds: 4194304 + } +} +actions { + on_data { + data: "y" + } +} +actions { + advance_time { + milliseconds: 2097152 + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty new file mode 100644 index 000000000000..9933bd3fed12 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/empty @@ -0,0 +1,14 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value:"\001\n\311\001type.googleapis.com/envoy.extensions.filters.netwe\360\231\201\270\362\251\212\211\361\263\275\271\363\206\215\263\361\255\230\252\362\265\266\243\364\203\217\266\362\211\226\227\362\232\255\221\362\227\227\210\362\255\274\232\363\220\256\256\364\206\217\231\363\246\273\262\363\214\207\237\360\255\215\236\364\206\232\207\361\273\210\256\362\234\204\234\361\256\236\207\361\225\240\253\363\255\231\272\363\254\256\273\360\276\201\214\361\231\215\216\363\233\202\226\361\252\222\256\362\217\241\265\363\200\257\245voy.api.v2.route.RouteActlRateLimit\022\017\010\200\312\002\022\004\010\200\312\002\032\003\010\200^" + } +} + +actions { + on_data { + data: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\002\010 \032d\n\002\010\001\022^\n2\n%envoy.filters.network.local_ratelimit\022\000\032\007\n\002\010\001\022\001+\022\000\032&\n\000\022\"\000\000\000\000\000voy.filters.network.lo\000\000\000\000\000\000+" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 new file mode 100644 index 000000000000..fabd48ca0150 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 new file mode 100644 index 000000000000..cc8199f166f4 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ext_authz_2 @@ -0,0 +1,16 @@ +config { + name: "envoy.filters.network.ext_authz" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + value: "\n\037envoy.filters.network.ext_authz\030\001(\001" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: ":" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 new file mode 100644 index 000000000000..ab8d73afbd8f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_1 @@ -0,0 +1,39 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\013\010\001\032\007\010\200^\020\200\306\001" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow new file mode 100644 index 000000000000..a450f763024b --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/local_ratelimit_time_overflow @@ -0,0 +1,44 @@ +config { + name: "envoy.filters.network.local_ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.local_ratelimit.v3.LocalRateLimit\022\017\010\001\032\013\010\200\336\200\200\240\007\020\200\306!" + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_data { + data: "\000\013" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 12035000 + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 53 + } +} +actions { + on_new_connection { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 new file mode 100644 index 000000000000..15ac639614e8 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/redis_proxy_1 @@ -0,0 +1,28 @@ +config { + name: "envoy.filters.network.redis_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy" + value: "\n\001N\032\032\n\005\020\200\200\200\030\030\001 \377\377\377\337\017*\005\020\200\200\200\0302\000@\001*\010\n\006\032\004\001\000\000\010" + } +} +actions { + on_new_connection { + + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "0" + end_stream: true + } +} +actions { + on_data { + data: "0" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 new file mode 100644 index 000000000000..e657e3b116a2 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_1 @@ -0,0 +1,35 @@ +config { + name: "envoy.filters.network.sni_cluster" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + on_data { + data: "IIIIIIIIIIIIIIIIIIII\000\000\000\000\000\000\000;IIIIIIIIIIIIIIIIIIIIIIIIIIIIII" + } +} +actions { + advance_time { + milliseconds: 16384 + } +} +actions { + advance_time { + milliseconds: 13 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 new file mode 100644 index 000000000000..25a5c974299a --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_cluster_2 @@ -0,0 +1,25 @@ +config { + name: "envoy.filters.network.sni_cluster" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_data { + data: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } +} +actions { + advance_time { + milliseconds: 1677721 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto new file mode 100644 index 000000000000..e8205658d25e --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package test.extensions.filters.network; +import "google/protobuf/empty.proto"; +import "validate/validate.proto"; +import "envoy/config/listener/v3/listener_components.proto"; + +message OnData { + bytes data = 1; + bool end_stream = 2; +} + +message AdvanceTime { + // Advance the system time by (0,24] hours. + uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}]; +} + +message Action { + oneof action_selector { + option (validate.required) = true; + // Call onNewConnection() + google.protobuf.Empty on_new_connection = 1; + // Call onData() + OnData on_data = 2; + // Advance time_source_ + AdvanceTime advance_time = 3; + } +} + +message FilterFuzzTestCase { + // This is actually a protobuf type for the config of network filters. + envoy.config.listener.v3.Filter config = 1; + repeated Action actions = 2; +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc new file mode 100644 index 000000000000..cacff3aa8938 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.cc @@ -0,0 +1,61 @@ +#include "common/config/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/well_known_names.h" + +#include "test/config/utility.h" +#include "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +DEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) { + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) { + // This post-processor mutation is applied only when libprotobuf-mutator + // calls mutate on an input, and *not* during fuzz target execution. + // Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + + // TODO(jianwendong): After extending to cover all the filters, we can use + // `Registry::FactoryRegistry< + // Server::Configuration::NamedNetworkFilterConfigFactory>::registeredNames()` + // to get all the filter names instead of calling `UberFilterFuzzer::filter_names()`. + static const auto filter_names = UberFilterFuzzer::filterNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + }}; + + try { + TestUtility::validate(input); + // Check the filter's name in case some filters are not supported yet. + static const auto filter_names = UberFilterFuzzer::filterNames(); + // TODO(jianwendong): remove this if block after covering all the filters. + if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) == + std::end(filter_names)) { + ENVOY_LOG_MISC(debug, "Test case with unsupported filter type: {}", input.config().name()); + return; + } + static UberFilterFuzzer fuzzer; + fuzzer.fuzz(input.config(), input.actions()); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc new file mode 100644 index 000000000000..7507dd72d4e3 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -0,0 +1,102 @@ +#include "envoy/extensions/filters/network/direct_response/v3/config.pb.h" +#include "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h" + +#include "extensions/filters/network/common/utility.h" +#include "extensions/filters/network/well_known_names.h" + +#include "test/extensions/filters/common/ext_authz/test_common.h" +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace { +// Limit the fill_interval in the config of local_ratelimit filter prevent overflow in +// std::chrono::time_point. +static const int SecondsPerDay = 86400; +} // namespace +std::vector UberFilterFuzzer::filterNames() { + // These filters have already been covered by this fuzzer. + // Will extend to cover other network filters one by one. + static std::vector filter_names; + if (filter_names.empty()) { + filter_names = {NetworkFilterNames::get().ExtAuthorization, + NetworkFilterNames::get().LocalRateLimit, + NetworkFilterNames::get().RedisProxy, + NetworkFilterNames::get().ClientSslAuth, + NetworkFilterNames::get().Echo, + NetworkFilterNames::get().DirectResponse, + NetworkFilterNames::get().DubboProxy, + NetworkFilterNames::get().SniCluster}; + } + return filter_names; +} + +void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { + // Set up response for ext_authz filter + if (filter_name == NetworkFilterNames::get().ExtAuthorization) { + + async_client_factory_ = std::make_unique(); + async_client_ = std::make_unique(); + // TODO(jianwendong): consider testing on different kinds of responses. + ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _)) + .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) { + Filters::Common::ExtAuthz::GrpcClientImpl* grpc_client_impl = + dynamic_cast(&callbacks); + const std::string empty_body{}; + const auto expected_headers = + Filters::Common::ExtAuthz::TestCommon::makeHeaderValueOption({}); + auto check_response = Filters::Common::ExtAuthz::TestCommon::makeCheckResponse( + Grpc::Status::WellKnownGrpcStatus::Ok, envoy::type::v3::OK, empty_body, + expected_headers); + // Give response to the grpc_client by calling onSuccess(). + grpc_client_impl->onSuccess(std::move(check_response), span_); + return async_request_.get(); + }))); + + EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] { + return std::move(async_client_); + })); + + EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_, + factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::move(async_client_factory_); + })); + } +} + +void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name, + Protobuf::Message* config_message) { + // System calls such as reading files are prohibited in this fuzzer. Some input that crashes the + // mock/fake objects are also prohibited. For now there are only two filters {DirectResponse, + // LocalRateLimit} on which we have constraints. + const std::string name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName( + std::string(filter_name)); + if (filter_name == NetworkFilterNames::get().DirectResponse) { + envoy::extensions::filters::network::direct_response::v3::Config& config = + dynamic_cast( + *config_message); + if (config.response().specifier_case() == + envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { + throw EnvoyException( + absl::StrCat("direct_response trying to open a file. Config:\n{}", config.DebugString())); + } + } else if (filter_name == NetworkFilterNames::get().LocalRateLimit) { + envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& config = + dynamic_cast( + *config_message); + if (config.token_bucket().fill_interval().seconds() > SecondsPerDay) { + // Too large fill_interval may cause "c++/v1/chrono" overflow when simulated_time_system_ is + // converting it to a smaller unit. Constraining fill_interval to no greater than one day is + // reasonable. + throw EnvoyException( + absl::StrCat("local_ratelimit trying to set a large fill_interval. Config:\n{}", + config.DebugString())); + } + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc new file mode 100644 index 000000000000..cd984f47351b --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -0,0 +1,108 @@ +#include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { + +void UberFilterFuzzer::reset() { + // Reset some changes made by current filter on some mock objects. + + // Close the connection to make sure the filter's callback is set to nullptr. + read_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state. + read_filter_callbacks_->connection_.callbacks_.clear(); + read_filter_callbacks_->connection_.bytes_sent_callbacks_.clear(); + read_filter_callbacks_->connection_.state_ = Network::Connection::State::Open; + read_filter_.reset(); +} + +void UberFilterFuzzer::fuzzerSetup() { + // Setup process when this fuzzer object is constructed. + // For a static fuzzer, this will only be executed once. + + // Get the pointer of read_filter when the read_filter is being added to connection_. + read_filter_callbacks_ = std::make_shared>(); + ON_CALL(read_filter_callbacks_->connection_, addReadFilter(_)) + .WillByDefault(Invoke([&](Network::ReadFilterSharedPtr read_filter) -> void { + read_filter_ = read_filter; + read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_); + })); + // Prepare sni for sni_cluster filter and sni_dynamic_forward_proxy filter. + ON_CALL(read_filter_callbacks_->connection_, requestedServerName()) + .WillByDefault(testing::Return("fake_cluster")); + // Prepare time source for filters such as local_ratelimit filter. + factory_context_.prepareSimulatedSystemTime(); + // Prepare address for filters such as ext_authz filter. + addr_ = std::make_shared("/test/test.sock"); + read_filter_callbacks_->connection_.remote_address_ = addr_; + read_filter_callbacks_->connection_.local_address_ = addr_; + async_request_ = std::make_unique(); +} + +UberFilterFuzzer::UberFilterFuzzer() : time_source_(factory_context_.simulatedTimeSystem()) { + fuzzerSetup(); +} + +void UberFilterFuzzer::fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions) { + try { + // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV + // constraints. + const std::string& filter_name = proto_config.name(); + ENVOY_LOG_MISC(info, "filter name {}", filter_name); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + // Make sure no invalid system calls are executed in fuzzer. + checkInvalidInputForFuzzer(filter_name, message.get()); + ENVOY_LOG_MISC(info, "Config content after decoded: {}", message->DebugString()); + cb_ = factory.createFilterFactoryFromProto(*message, factory_context_); + + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception in filter setup {}", e.what()); + return; + } + perFilterSetup(proto_config.name()); + // Add filter to connection_. + cb_(read_filter_callbacks_->connection_); + for (const auto& action : actions) { + ENVOY_LOG_MISC(trace, "action {}", action.DebugString()); + switch (action.action_selector_case()) { + case test::extensions::filters::network::Action::kOnData: { + ASSERT(read_filter_ != nullptr); + Buffer::OwnedImpl buffer(action.on_data().data()); + read_filter_->onData(buffer, action.on_data().end_stream()); + + break; + } + case test::extensions::filters::network::Action::kOnNewConnection: { + ASSERT(read_filter_ != nullptr); + read_filter_->onNewConnection(); + + break; + } + case test::extensions::filters::network::Action::kAdvanceTime: { + time_source_.advanceTimeAsync( + std::chrono::milliseconds(action.advance_time().milliseconds())); + factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + break; + } + default: { + // Unhandled actions. + ENVOY_LOG_MISC(debug, "Action support is missing for:\n{}", action.DebugString()); + PANIC("A case is missing for an action"); + } + } + } + + reset(); +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.h b/test/extensions/filters/network/common/fuzz/uber_readfilter.h new file mode 100644 index 000000000000..31a5bbc1d91e --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.h @@ -0,0 +1,49 @@ +#include "envoy/network/filter.h" + +#include "common/protobuf/protobuf.h" + +#include "test/extensions/filters/network/common/fuzz/network_readfilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/utils/fakes.h" +#include "test/mocks/network/mocks.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { + +class UberFilterFuzzer { +public: + UberFilterFuzzer(); + // This creates the filter config and runs the fuzzed data against the filter. + void + fuzz(const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::Action>& actions); + // Get the name of filters which has been covered by this fuzzer. + static std::vector filterNames(); + // Check whether the filter's config is invalid for fuzzer(e.g. system call). + void checkInvalidInputForFuzzer(const std::string& filter_name, + Protobuf::Message* config_message); + +protected: + // Set-up filter specific mock expectations in constructor. + void fuzzerSetup(); + // Reset the states of the mock objects. + void reset(); + // Mock behaviors for specific filters. + void perFilterSetup(const std::string& filter_name); + +private: + Server::Configuration::FakeFactoryContext factory_context_; + Network::ReadFilterSharedPtr read_filter_; + Network::FilterFactoryCb cb_; + Network::Address::InstanceConstSharedPtr addr_; + Event::SimulatedTimeSystem& time_source_; + std::shared_ptr> read_filter_callbacks_; + std::unique_ptr async_request_; + std::unique_ptr async_client_; + std::unique_ptr async_client_factory_; + Tracing::MockSpan span_; +}; + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/utils/BUILD b/test/extensions/filters/network/common/fuzz/utils/BUILD new file mode 100644 index 000000000000..6c231c2a185f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/utils/BUILD @@ -0,0 +1,17 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "network_filter_fuzzer_fakes_lib", + hdrs = ["fakes.h"], + deps = [ + "//test/mocks/server:factory_context_mocks", + ], +) diff --git a/test/extensions/filters/network/common/fuzz/utils/fakes.h b/test/extensions/filters/network/common/fuzz/utils/fakes.h new file mode 100644 index 000000000000..035dcb3e29ca --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/utils/fakes.h @@ -0,0 +1,49 @@ +#include "test/mocks/server/factory_context.h" + +namespace Envoy { +namespace Server { +namespace Configuration { +class FakeFactoryContext : public MockFactoryContext { +public: + void prepareSimulatedSystemTime() { + api_ = Api::createApiForTest(time_system_); + dispatcher_ = api_->allocateDispatcher("test_thread"); + } + AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } + Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } + Event::Dispatcher& dispatcher() override { return *dispatcher_; } + const Network::DrainDecision& drainDecision() override { return drain_manager_; } + Init::Manager& initManager() override { return init_manager_; } + ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; } + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } + Envoy::Random::RandomGenerator& random() override { return random_; } + Envoy::Runtime::Loader& runtime() override { return runtime_loader_; } + Stats::Scope& scope() override { return scope_; } + Singleton::Manager& singletonManager() override { return *singleton_manager_; } + ThreadLocal::Instance& threadLocal() override { return thread_local_; } + Server::Admin& admin() override { return admin_; } + Stats::Scope& listenerScope() override { return listener_scope_; } + Api::Api& api() override { return *api_; } + TimeSource& timeSource() override { return time_system_; } + OverloadManager& overloadManager() override { return overload_manager_; } + ProtobufMessage::ValidationContext& messageValidationContext() override { + return validation_context_; + } + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { + return ProtobufMessage::getStrictValidationVisitor(); + } + Event::SimulatedTimeSystem& simulatedTimeSystem() { + return dynamic_cast(time_system_); + } + Event::TestTimeSystem& timeSystem() { return time_system_; } + Grpc::Context& grpcContext() override { return grpc_context_; } + Http::Context& httpContext() override { return http_context_; } + + Event::DispatcherPtr dispatcher_; + Event::SimulatedTimeSystem time_system_; + Api::ApiPtr api_; +}; + +} // namespace Configuration +} // namespace Server +} // namespace Envoy From ff0beb1b10c10b0b95deca9a1cd4e3a980939907 Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Mon, 27 Jul 2020 09:55:02 -0700 Subject: [PATCH 752/909] Cache v6only option when we create ipv6 socket (#11793) On Windows, getsockopt fails if the socket driver is performing another non-blocking operation. For example if we query for a specific socket option while the socket is connecting then the getsockopt is going to fail with error (10022). When we create an ipv6 socket we set it to be ipv6 only. Now we cache this configuration and we query it before we connect to the socket This fixes IpVersions/TcpClientConnectionImplTest.BadConnectConnRefused/IPv6 test on Windows. Signed-off-by: Sotiris Nanopoulos --- include/envoy/network/socket.h | 5 +++-- .../common/network/io_socket_handle_impl.cc | 19 +------------------ source/common/network/io_socket_handle_impl.h | 4 +++- source/common/network/socket_impl.cc | 4 ---- source/common/network/socket_impl.h | 1 - .../common/network/socket_interface_impl.cc | 15 ++++++++++----- source/common/network/socket_interface_impl.h | 4 ++-- .../quiche/quic_io_handle_wrapper_test.cc | 1 - 8 files changed, 19 insertions(+), 34 deletions(-) diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index ff558f7760b9..c393f8541e2d 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -242,10 +242,11 @@ class SocketInterface { * @param type type of socket requested * @param addr_type type of address used with the socket * @param version IP version if address type is IP + * @param socket_v6only if the socket is ipv6 version only * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor */ - virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, - Address::IpVersion version) PURE; + virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, Address::IpVersion version, + bool socket_v6only) PURE; /** * Low level api to create a socket in the underlying host stack. Does not create an diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 12f3a04f00b7..5edd1fe5d054 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -430,24 +430,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, errorDetails(result.errno_))); } - int socket_v6only = 0; - if (ss.ss_family == AF_INET6) { - socklen_t size_int = sizeof(socket_v6only); - result = os_sys_calls.getsockopt(fd_, IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int); -#ifdef WIN32 - // On Windows, it is possible for this getsockopt() call to fail. - // This can happen if the address we are trying to connect to has nothing - // listening. So we can't use RELEASE_ASSERT and instead must throw an - // exception - if (SOCKET_FAILURE(result.rc_)) { - throw EnvoyException(fmt::format("getsockopt failed for '{}': ({}) {}", fd_, result.errno_, - errorDetails(result.errno_))); - } -#else - RELEASE_ASSERT(result.rc_ == 0, ""); -#endif - } - return Address::addressFromSockAddr(ss, ss_len, socket_v6only); + return Address::addressFromSockAddr(ss, ss_len, socket_v6only_); } Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 305fc1765bf6..e23d0f444726 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -16,7 +16,8 @@ namespace Network { */ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable { public: - explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET) : fd_(fd) {} + explicit IoSocketHandleImpl(os_fd_t fd = INVALID_SOCKET, bool socket_v6only = false) + : fd_(fd), socket_v6only_(socket_v6only) {} // Close underlying socket if close() hasn't been call yet. ~IoSocketHandleImpl() override; @@ -77,6 +78,7 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggabletype()) {} diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index eb6fa747cc70..1704b6a005f1 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -9,7 +9,6 @@ namespace Network { class SocketImpl : public virtual Socket { public: - SocketImpl(Socket::Type type, Address::Type addr_type, Address::IpVersion version); SocketImpl(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr); // Network::Socket diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index ba0b4a9b3fc5..e351faed53d1 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -13,7 +13,7 @@ namespace Envoy { namespace Network { IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type, - Address::IpVersion version) { + Address::IpVersion version, bool socket_v6only) { #if defined(__APPLE__) || defined(WIN32) int flags = 0; #else @@ -42,7 +42,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); RELEASE_ASSERT(SOCKET_VALID(result.rc_), fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); - IoHandlePtr io_handle = std::make_unique(result.rc_); + IoHandlePtr io_handle = std::make_unique(result.rc_, socket_v6only); #if defined(__APPLE__) || defined(WIN32) // Cannot set SOCK_NONBLOCK as a ::socket flag. @@ -56,10 +56,15 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) { Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; - IoHandlePtr io_handle = SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version); - if (addr->type() == Address::Type::Ip && addr->ip()->version() == Address::IpVersion::v6) { + int v6only = 0; + if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) { + v6only = addr->ip()->ipv6()->v6only(); + } + + IoHandlePtr io_handle = + SocketInterfaceImpl::socket(socket_type, addr->type(), ip_version, v6only); + if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) { // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. - const int v6only = addr->ip()->ipv6()->v6only(); const Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().setsockopt( io_handle->fd(), IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), sizeof(v6only)); diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index 034aea25feaf..9259e01c09d9 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -11,8 +11,8 @@ namespace Network { class SocketInterfaceImpl : public SocketInterfaceBase { public: // SocketInterface - IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, - Address::IpVersion version) override; + IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version, + bool socket_v6only) override; IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) override; IoHandlePtr socket(os_fd_t fd) override; bool ipFamilySupported(int domain) override; diff --git a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc index 5027ada97d11..2361a0399b0d 100644 --- a/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc +++ b/test/extensions/quic_listeners/quiche/quic_io_handle_wrapper_test.cc @@ -60,7 +60,6 @@ TEST_F(QuicIoHandleWrapperTest, DelegateIoHandleCalls) { EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)).WillOnce(Return(Api::SysCallIntResult{0, 0})); wrapper_->domain(); - EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)).WillOnce(Return(0)); EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)) .WillOnce(Invoke([](os_fd_t, sockaddr* addr, socklen_t* addrlen) -> Api::SysCallIntResult { addr->sa_family = AF_INET6; From 79d7d4ee917c4faff78b5ae8aa1b07cf2ff91cfc Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Mon, 27 Jul 2020 09:55:32 -0700 Subject: [PATCH 753/909] api: Add CertificateProviderInstance to CommonTlsContext. (#12237) Signed-off-by: Mark D. Roth --- api/envoy/config/bootstrap/v3/bootstrap.proto | 9 +++- .../config/bootstrap/v4alpha/bootstrap.proto | 9 +++- .../transport_sockets/tls/v3/tls.proto | 45 ++++++++++++++++-- .../transport_sockets/tls/v4alpha/tls.proto | 47 +++++++++++++++++-- .../envoy/config/bootstrap/v3/bootstrap.proto | 9 +++- .../config/bootstrap/v4alpha/bootstrap.proto | 9 +++- .../transport_sockets/tls/v3/tls.proto | 45 ++++++++++++++++-- .../transport_sockets/tls/v4alpha/tls.proto | 47 +++++++++++++++++-- 8 files changed, 200 insertions(+), 20 deletions(-) diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 9abd3a37fed4..25947fb1c23b 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 25] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -227,6 +227,13 @@ message Bootstrap { // Optional overriding of default socket interface. The value must be the name of one of the // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 84959f40ade9..3e4291944307 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -38,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 25] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -218,6 +218,13 @@ message Bootstrap { // Optional overriding of default socket interface. The value must be the name of one of the // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto index 1806a44666e5..7ee7920c724d 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -99,7 +99,7 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 11] +// [#next-free-field: 13] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; @@ -123,6 +123,26 @@ message CommonTlsContext { } } + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; @@ -133,17 +153,26 @@ message CommonTlsContext { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only to be used when validation_context_certificate_provider is not used. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [ (validate.rules).message = {required: true}, (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" ]; - // Certificate provider for fetching validation context - only to be used when - // validation_context_sds_secret_config is not used. + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3 [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; } reserved 5; @@ -168,6 +197,10 @@ message CommonTlsContext { // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; @@ -188,6 +221,10 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index d8cf226afbcd..a73ba6e002ba 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -98,7 +98,7 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 11] +// [#next-free-field: 13] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; @@ -126,6 +126,29 @@ message CommonTlsContext { } } + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." @@ -138,14 +161,22 @@ message CommonTlsContext { oneof dynamic_validation_context { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only to be used when validation_context_certificate_provider is not used. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [(validate.rules).message = {required: true}]; - // Certificate provider for fetching validation context - only to be used when - // validation_context_sds_secret_config is not used. + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4; } } @@ -171,6 +202,10 @@ message CommonTlsContext { // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; @@ -191,6 +226,10 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index dbfc503fff26..118971bf32cf 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 25] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -226,6 +226,13 @@ message Bootstrap { // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; + Runtime hidden_envoy_deprecated_runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 96b84eaf4cfa..b81ffb91f839 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 25] +// [#next-free-field: 26] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -226,6 +226,13 @@ message Bootstrap { // Optional overriding of default socket interface. The value must be the name of one of the // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; + + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto index 1806a44666e5..7ee7920c724d 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -99,7 +99,7 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 11] +// [#next-free-field: 13] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; @@ -123,6 +123,26 @@ message CommonTlsContext { } } + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; @@ -133,17 +153,26 @@ message CommonTlsContext { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only to be used when validation_context_certificate_provider is not used. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [ (validate.rules).message = {required: true}, (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" ]; - // Certificate provider for fetching validation context - only to be used when - // validation_context_sds_secret_config is not used. + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3 [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4 + [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; } reserved 5; @@ -168,6 +197,10 @@ message CommonTlsContext { // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; @@ -188,6 +221,10 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto index d8cf226afbcd..a73ba6e002ba 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto @@ -98,7 +98,7 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 11] +// [#next-free-field: 13] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; @@ -126,6 +126,29 @@ message CommonTlsContext { } } + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + message CertificateProviderInstance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." @@ -138,14 +161,22 @@ message CommonTlsContext { oneof dynamic_validation_context { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only to be used when validation_context_certificate_provider is not used. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [(validate.rules).message = {required: true}]; - // Certificate provider for fetching validation context - only to be used when - // validation_context_sds_secret_config is not used. + // Certificate provider for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3; + + // Certificate provider instance for fetching validation context. + // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, + // or validation_context_certificate_provider_instance may be used. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4; } } @@ -171,6 +202,10 @@ message CommonTlsContext { // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; @@ -191,6 +226,10 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In From 64b3ac63ae9d18d4efb2b2a97af53a1b6940e309 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 27 Jul 2020 13:26:21 -0700 Subject: [PATCH 754/909] ci: clear some CircleCI tech debts, collect more profile (#12270) Risk Level: Low Signed-off-by: Lizan Zhou --- .azure-pipelines/bazel.yml | 6 ++-- ci/build_setup.sh | 23 +++++--------- ci/do_ci.sh | 64 ++++++++++++++++++++------------------ ci/do_circle_ci.sh | 13 +++++++- 4 files changed, 54 insertions(+), 52 deletions(-) diff --git a/.azure-pipelines/bazel.yml b/.azure-pipelines/bazel.yml index 3999c3efab6f..52dfac3ba1cb 100644 --- a/.azure-pipelines/bazel.yml +++ b/.azure-pipelines/bazel.yml @@ -50,14 +50,12 @@ steps: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) ${{ if parameters.rbe }}: ENVOY_RBE: "1" - # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks - # to save disk space. - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) --curses=no --experimental_repository_cache_hardlinks ${{ parameters.bazelBuildExtraOptions }}" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) ${{ parameters.bazelBuildExtraOptions }}" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) ${{ if eq(parameters.rbe, false) }}: - BAZEL_BUILD_EXTRA_OPTIONS: "--curses=no --experimental_repository_cache_hardlinks ${{ parameters.bazelBuildExtraOptions }}" + BAZEL_BUILD_EXTRA_OPTIONS: "${{ parameters.bazelBuildExtraOptions }}" BAZEL_REMOTE_CACHE: $(LocalBuildCache) displayName: "Run CI script" diff --git a/ci/build_setup.sh b/ci/build_setup.sh index d8a62e1c8193..46b448381ef7 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -45,14 +45,6 @@ function setup_clang_toolchain() { echo "clang toolchain with ${ENVOY_STDLIB} configured" } -# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI -# Docker image gets confused as it has no passwd entry when running non-root -# unless we do this. -FAKE_HOME=/tmp/fake_home -mkdir -p "${FAKE_HOME}" -export HOME="${FAKE_HOME}" -export PYTHONUSERBASE="${FAKE_HOME}" - export BUILD_DIR=${BUILD_DIR:-/build} if [[ ! -d "${BUILD_DIR}" ]] then @@ -61,10 +53,8 @@ then fi # Environment setup. -export USER=bazel export TEST_TMPDIR=${BUILD_DIR}/tmp -export BAZEL="bazel" -export PATH=/opt/llvm/bin:$PATH +export PATH=/opt/llvm/bin:${PATH} export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}" if [[ -f "/etc/redhat-release" ]]; then @@ -86,15 +76,16 @@ export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" [[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=" --nocache_test_results" export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS}" -export BAZEL_BUILD_OPTIONS="--verbose_failures ${BAZEL_OPTIONS} --action_env=HOME --action_env=PYTHONUSERBASE \ - --local_cpu_resources=${NUM_CPUS} --show_task_finish --experimental_generate_json_trace_profile \ - --test_env=HOME --test_env=PYTHONUSERBASE --test_output=errors \ - --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ +# Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks +# to save disk space. +export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_finish --experimental_generate_json_trace_profile \ + --build_event_json_file=${BUILD_DIR}/build_event.json \ + --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" [[ "$(uname -m)" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --define=hot_restart=disabled --test_env=HEAPCHECK=" -[[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge +[[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge # Also setup some space for building Envoy standalone. export ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 620efefb8cc1..6629446dc40b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -19,7 +19,10 @@ cd "${SRCDIR}" echo "building using ${NUM_CPUS} CPUs" function collect_build_profile() { - cp -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/$1.profile.gz" || true + declare -g build_profile_count=${build_profile_count:-1} + mv -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz" || true + mv -f ${BUILD_DIR}/build_event.json "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.build_event.json" || true + ((build_profile_count++)) } function bazel_with_collection() { @@ -42,7 +45,6 @@ function bazel_with_collection() { function cp_binary_for_outside_access() { DELIVERY_LOCATION="$1" - ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') cp -f \ bazel-bin/"${ENVOY_BIN}" \ "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}" @@ -81,6 +83,11 @@ function bazel_binary_build() { fi echo "Building..." + ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') + + # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 + [[ ! -z "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* + bazel build ${BAZEL_BUILD_OPTIONS} -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} collect_build_profile "${BINARY_TYPE}"_build @@ -113,11 +120,11 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then [[ "$(uname -m)" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" setup_clang_toolchain - echo "bazel release build with tests..." - bazel_binary_build release - echo "Testing ${TEST_TARGETS}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + + echo "bazel release build with tests..." + bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.release.server_only" ]]; then setup_clang_toolchain @@ -131,26 +138,29 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then setup_clang_toolchain + echo "Testing ${TEST_TARGETS}" + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} + echo "bazel size optimized build with tests..." bazel_binary_build sizeopt - echo "Testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} --config=sizeopt ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" setup_gcc_toolchain - echo "bazel fastbuild build..." - bazel_binary_build release echo "Testing ${TEST_TARGETS}" bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c opt ${TEST_TARGETS} + + echo "bazel release build with gcc..." + bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain - echo "bazel debug build with tests..." - bazel_binary_build debug echo "Testing ${TEST_TARGETS}" bazel test ${BAZEL_BUILD_OPTIONS} -c dbg ${TEST_TARGETS} + + echo "bazel debug build with tests..." + bazel_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then setup_clang_toolchain @@ -205,7 +215,7 @@ elif [[ "$CI_TARGET" == "bazel.dev" ]]; then bazel_binary_build fastbuild echo "Building and testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS} + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} -c fastbuild ${TEST_TARGETS} exit 0 elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then # Right now, none of the available compile-time options conflict with each other. If this @@ -233,18 +243,20 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then TEST_TARGETS="@envoy//test/..." fi # Building all the dependencies from scratch to link them against libc++. - echo "Building..." - bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips echo "Building and testing ${TEST_TARGETS}" - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg ${TEST_TARGETS} --test_tag_filters=-nofips --build_tests_only # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in # integration tests with asan. - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. - bazel test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test + bazel_with_collection test ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test + + echo "Building binary..." + bazel build ${BAZEL_BUILD_OPTIONS} ${COMPILE_TIME_OPTIONS} -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips + collect_build_profile build exit 0 elif [[ "$CI_TARGET" == "bazel.api" ]]; then @@ -262,25 +274,15 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py exit 0 -elif [[ "$CI_TARGET" == "bazel.coverage" ]]; then +elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain - echo "bazel coverage build with tests ${COVERAGE_TEST_TARGETS}" + echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS}" - # Reduce the amount of memory Bazel tries to use to prevent it from launching too many subprocesses. - # This should prevent the system from running out of memory and killing tasks. See discussion on - # https://github.com/envoyproxy/envoy/pull/5611. - [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_ram_resources=12288" + [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]] && export FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} collect_build_profile coverage exit 0 -elif [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then - setup_clang_toolchain - echo "bazel coverage build with fuzz tests ${COVERAGE_TEST_TARGETS}" - - FUZZ_COVERAGE=true test/run_envoy_bazel_coverage.sh ${COVERAGE_TEST_TARGETS} - collect_build_profile coverage - exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then setup_clang_toolchain NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh index 036a75b1b8cb..7c44e7555b71 100755 --- a/ci/do_circle_ci.sh +++ b/ci/do_circle_ci.sh @@ -11,6 +11,16 @@ if [[ -e "~/.gitconfig" ]]; then mv ~/.gitconfig ~/.gitconfig_save fi +# Workaround for not using ci/run_envoy_docker.sh +# Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI +# Docker image gets confused as it has no passwd entry when running non-root +# unless we do this. +FAKE_HOME=/tmp/fake_home +mkdir -p "${FAKE_HOME}" +export HOME="${FAKE_HOME}" +export PYTHONUSERBASE="${FAKE_HOME}" +export USER=bazel + export ENVOY_SRCDIR="$(pwd)" # xlarge resource_class. @@ -20,7 +30,8 @@ export NUM_CPUS=6 # CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. # IPv6 tests are run with Azure Pipelines. -export BAZEL_EXTRA_TEST_OPTIONS="--test_env=ENVOY_IP_TEST_VERSIONS=v4only" +export BAZEL_EXTRA_BUILD_OPTIONS="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \ + --action_env=HOME --action_env=PYTHONUSERBASE --test_env=HOME --test_env=PYTHONUSERBASE" function finish { echo "disk space at end of build:" From d90083bd73b98027609c4f2e6e1903c654038700 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Mon, 27 Jul 2020 16:28:05 -0400 Subject: [PATCH 755/909] build:remove the separate c++14 flag (#12306) This PR is the final step in porting envoy to C++17. After this change, all envoy builds including envoy-mobile will be built in C++17 mode. See this for the PR that make envoy-mobile built in C++17 lyft/envoy-mobile#964. Risk Level: low, as the master has been running with c++17 mode for almost two weeks now and envoy mobile is also building with C++17 Testing: All existing tests have been passed. Docs Changes: Release Notes: Signed-off-by: Yifan Yang --- ci/do_ci.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 6629446dc40b..a56efe8a34f3 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -231,7 +231,6 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define path_normalization_by_default=true \ --define deprecated_features=disabled \ --define use_legacy_codecs_in_integration_tests=true \ - --define --cxxopt=-std=c++14 \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain From 41c9eb6e359af21bf69ebbfa435d941bc14b9c37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 27 Jul 2020 17:05:07 -0400 Subject: [PATCH 756/909] zookeeper: note available latency stats in docs (#12261) This was missed in #7825. Risk Level: low Docs Change: docs only Release Notes: n/a Signed-off-by: Raul Gutierrez Segales --- .../zookeeper_proxy_filter.rst | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst index 426634fefde5..b0c85ecd7857 100644 --- a/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/zookeeper_proxy_filter.rst @@ -43,8 +43,8 @@ in the configuration snippet below: Statistics ---------- -Every configured ZooKeeper proxy filter has statistics rooted at *zookeeper..* with the -following statistics: +Every configured ZooKeeper proxy filter has statistics rooted at *.zookeeper.*. The +following counters are available: .. csv-table:: :header: Name, Type, Description @@ -103,6 +103,48 @@ following statistics: removewatches_resp, Counter, Number of removewatches responses check_resp, Counter, Number of check responses + +.. _config_network_filters_zookeeper_proxy_latency_stats: + +Per opcode latency statistics +----------------------------- + +The filter will gather latency statistics in the *.zookeeper._response_latency* namespace. +Latency stats are in milliseconds: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + connect_response_latency, Histogram, Opcode execution time in milliseconds + ping_response_latency, Histogram, Opcode execution time in milliseconds + auth_response_latency, Histogram, Opcode execution time in milliseconds + watch_event, Histogram, Opcode execution time in milliseconds + getdata_response_latency, Histogram, Opcode execution time in milliseconds + create_response_latency, Histogram, Opcode execution time in milliseconds + create2_response_latency, Histogram, Opcode execution time in milliseconds + createcontainer_response_latency, Histogram, Opcode execution time in milliseconds + createttl_response_latency, Histogram, Opcode execution time in milliseconds + setdata_response_latency, Histogram, Opcode execution time in milliseconds + getchildren_response_latency, Histogram, Opcode execution time in milliseconds + getchildren2_response_latency, Histogram, Opcode execution time in milliseconds + getephemerals_response_latency, Histogram, Opcode execution time in milliseconds + getallchildrennumber_response_latency, Histogram, Opcode execution time in milliseconds + remove_response_latency, Histogram, Opcode execution time in milliseconds + exists_response_latency, Histogram, Opcode execution time in milliseconds + getacl_response_latency, Histogram, Opcode execution time in milliseconds + setacl_response_latency, Histogram, Opcode execution time in milliseconds + sync_response_latency, Histogram, Opcode execution time in milliseconds + multi_response_latency, Histogram, Opcode execution time in milliseconds + reconfig_response_latency, Histogram, Opcode execution time in milliseconds + close_response_latency, Histogram, Opcode execution time in milliseconds + setauth_response_latency, Histogram, Opcode execution time in milliseconds + setwatches_response_latency, Histogram, Opcode execution time in milliseconds + checkwatches_response_latency, Histogram, Opcode execution time in milliseconds + removewatches_response_latency, Histogram, Opcode execution time in milliseconds + check_response_latency, Histogram, Opcode execution time in milliseconds + + .. _config_network_filters_zookeeper_proxy_dynamic_metadata: Dynamic Metadata From 85121224ebe5c395b93e3d6c1981b2a437f50e72 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Tue, 28 Jul 2020 02:56:05 +0530 Subject: [PATCH 757/909] fix type in lua examples (#12301) Signed-off-by: Rama Chavali --- docs/root/configuration/http/http_filters/lua_filter.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index b5b720941eb1..3969e42e5d61 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -66,7 +66,7 @@ A simple example of configuring Lua HTTP filter that contains only :ref:`inline_ name: envoy.filters.http.lua typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.lua + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua inline_code: | -- Called on the request path. function envoy_on_request(request_handle) @@ -93,7 +93,7 @@ As a concrete example, given the following Lua filter configuration: name: envoy.filters.http.lua typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.lua + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua inline_code: | function envoy_on_request(request_handle) -- do something From 755dfe0fda2b1395441924b390c3fd605502e447 Mon Sep 17 00:00:00 2001 From: Aakash2017 Date: Tue, 28 Jul 2020 04:28:50 +0000 Subject: [PATCH 758/909] update createStatSink to use ServerFactoryContext (#12309) Updated creatStatSink function to take in Server::Configuration::ServerFactoryContext as a parameter instead of Server::Instance. Risk Level: Low Signed-off-by: Aakash Shukla --- include/envoy/server/factory_context.h | 6 ++++++ source/extensions/stat_sinks/dog_statsd/config.cc | 5 +++-- source/extensions/stat_sinks/dog_statsd/config.h | 2 +- source/extensions/stat_sinks/hystrix/config.cc | 5 +++-- source/extensions/stat_sinks/hystrix/config.h | 2 +- source/extensions/stat_sinks/hystrix/hystrix.cc | 7 ++++--- source/extensions/stat_sinks/hystrix/hystrix.h | 4 ++-- .../stat_sinks/metrics_service/config.cc | 7 ++++--- .../extensions/stat_sinks/metrics_service/config.h | 2 +- source/extensions/stat_sinks/statsd/config.cc | 7 ++++--- source/extensions/stat_sinks/statsd/config.h | 2 +- source/server/configuration_impl.cc | 2 +- source/server/configuration_impl.h | 3 ++- source/server/server.h | 3 +++ .../stats_sinks/dog_statsd/config_test.cc | 10 +++++----- test/extensions/stats_sinks/hystrix/config_test.cc | 2 +- .../extensions/stats_sinks/hystrix/hystrix_test.cc | 2 +- test/extensions/stats_sinks/statsd/config_test.cc | 14 +++++++------- test/mocks/server/instance.h | 1 + test/server/server_test.cc | 10 ++++++---- 20 files changed, 57 insertions(+), 39 deletions(-) diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h index 71248cfd37ca..764691dc45d5 100644 --- a/include/envoy/server/factory_context.h +++ b/include/envoy/server/factory_context.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -138,6 +139,11 @@ class ServerFactoryContext : public virtual CommonFactoryContext { * @return ServerLifecycleNotifier& the lifecycle notifier for the server. */ virtual ServerLifecycleNotifier& lifecycleNotifier() PURE; + + /** + * @return std::chrono::milliseconds the flush interval of stats sinks. + */ + virtual std::chrono::milliseconds statsFlushInterval() const PURE; }; /** diff --git a/source/extensions/stat_sinks/dog_statsd/config.cc b/source/extensions/stat_sinks/dog_statsd/config.cc index 8e346db5b32e..985eb3e255b0 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.cc +++ b/source/extensions/stat_sinks/dog_statsd/config.cc @@ -18,8 +18,9 @@ namespace Extensions { namespace StatSinks { namespace DogStatsd { -Stats::SinkPtr DogStatsdSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +DogStatsdSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& sink_config = MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); diff --git a/source/extensions/stat_sinks/dog_statsd/config.h b/source/extensions/stat_sinks/dog_statsd/config.h index 5e9cfdef1cb2..037dd4476eff 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.h +++ b/source/extensions/stat_sinks/dog_statsd/config.h @@ -16,7 +16,7 @@ class DogStatsdSinkFactory : Logger::Loggable, public Server::Configuration::StatsSinkFactory { public: Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/hystrix/config.cc b/source/extensions/stat_sinks/hystrix/config.cc index e23c4ab050d4..4997231b8be8 100644 --- a/source/extensions/stat_sinks/hystrix/config.cc +++ b/source/extensions/stat_sinks/hystrix/config.cc @@ -16,8 +16,9 @@ namespace Extensions { namespace StatSinks { namespace Hystrix { -Stats::SinkPtr HystrixSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +HystrixSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& hystrix_sink = MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); diff --git a/source/extensions/stat_sinks/hystrix/config.h b/source/extensions/stat_sinks/hystrix/config.h index 396cab600254..cff7ede28a17 100644 --- a/source/extensions/stat_sinks/hystrix/config.h +++ b/source/extensions/stat_sinks/hystrix/config.h @@ -16,7 +16,7 @@ class HystrixSinkFactory : Logger::Loggable, public: // StatsSinkFactory Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index abea32ca3dd6..82eb9906612b 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -271,9 +271,10 @@ const std::string HystrixSink::printRollingWindows() { return out_str.str(); } -HystrixSink::HystrixSink(Server::Instance& server, const uint64_t num_buckets) +HystrixSink::HystrixSink(Server::Configuration::ServerFactoryContext& server, + const uint64_t num_buckets) : server_(server), current_index_(num_buckets > 0 ? num_buckets : DEFAULT_NUM_BUCKETS), - window_size_(current_index_ + 1), stat_name_pool_(server.stats().symbolTable()), + window_size_(current_index_ + 1), stat_name_pool_(server.scope().symbolTable()), cluster_name_(stat_name_pool_.add(Config::TagNames::get().CLUSTER_NAME)), cluster_upstream_rq_time_(stat_name_pool_.add("cluster.upstream_rq_time")), membership_total_(stat_name_pool_.add("membership_total")), @@ -348,7 +349,7 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { Stats::Utility::findTag(histogram.get(), cluster_name_); // Make sure we found the cluster name tag ASSERT(value); - std::string value_str = server_.stats().symbolTable().toString(*value); + std::string value_str = server_.scope().symbolTable().toString(*value); auto it_bool_pair = time_histograms.emplace(std::make_pair(value_str, QuantileLatencyMap())); // Make sure histogram with this name was not already added ASSERT(it_bool_pair.second); diff --git a/source/extensions/stat_sinks/hystrix/hystrix.h b/source/extensions/stat_sinks/hystrix/hystrix.h index 08aa4f6b0c7e..70185e5730bd 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.h +++ b/source/extensions/stat_sinks/hystrix/hystrix.h @@ -47,7 +47,7 @@ using ClusterStatsCachePtr = std::unique_ptr; class HystrixSink : public Stats::Sink, public Logger::Loggable { public: - HystrixSink(Server::Instance& server, uint64_t num_buckets); + HystrixSink(Server::Configuration::ServerFactoryContext& server, uint64_t num_buckets); Http::Code handlerHystrixEventStream(absl::string_view, Http::ResponseHeaderMap& response_headers, Buffer::Instance&, Server::AdminStream& admin_stream); void flush(Stats::MetricSnapshot& snapshot) override; @@ -149,7 +149,7 @@ class HystrixSink : public Stats::Sink, public Logger::Loggable callbacks_list_; - Server::Instance& server_; + Server::Configuration::ServerFactoryContext& server_; uint64_t current_index_; const uint64_t window_size_; static const uint64_t DEFAULT_NUM_BUCKETS = 10; diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 73dadf66ddea..db1998aefe5b 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -17,8 +17,9 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { -Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { validateProtoDescriptors(); const auto& sink_config = @@ -31,7 +32,7 @@ Stats::SinkPtr MetricsServiceSinkFactory::createStatsSink(const Protobuf::Messag std::shared_ptr grpc_metrics_streamer = std::make_shared( server.clusterManager().grpcAsyncClientManager().factoryForGrpcService( - grpc_service, server.stats(), false), + grpc_service, server.scope(), false), server.localInfo(), transport_api_version); return std::make_unique( diff --git a/source/extensions/stat_sinks/metrics_service/config.h b/source/extensions/stat_sinks/metrics_service/config.h index 702ea0e97821..f67eeb2cb538 100644 --- a/source/extensions/stat_sinks/metrics_service/config.h +++ b/source/extensions/stat_sinks/metrics_service/config.h @@ -17,7 +17,7 @@ class MetricsServiceSinkFactory : Logger::Loggable, public Server::Configuration::StatsSinkFactory { public: Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/extensions/stat_sinks/statsd/config.cc b/source/extensions/stat_sinks/statsd/config.cc index fa0c1e758e61..3cbea7f511a2 100644 --- a/source/extensions/stat_sinks/statsd/config.cc +++ b/source/extensions/stat_sinks/statsd/config.cc @@ -16,8 +16,9 @@ namespace Extensions { namespace StatSinks { namespace Statsd { -Stats::SinkPtr StatsdSinkFactory::createStatsSink(const Protobuf::Message& config, - Server::Instance& server) { +Stats::SinkPtr +StatsdSinkFactory::createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) { const auto& statsd_sink = MessageUtil::downcastAndValidate( @@ -34,7 +35,7 @@ Stats::SinkPtr StatsdSinkFactory::createStatsSink(const Protobuf::Message& confi ENVOY_LOG(debug, "statsd TCP cluster: {}", statsd_sink.tcp_cluster_name()); return std::make_unique( server.localInfo(), statsd_sink.tcp_cluster_name(), server.threadLocal(), - server.clusterManager(), server.stats(), statsd_sink.prefix()); + server.clusterManager(), server.scope(), statsd_sink.prefix()); default: // Verified by schema. NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/extensions/stat_sinks/statsd/config.h b/source/extensions/stat_sinks/statsd/config.h index 591308a70ef4..928e7729055f 100644 --- a/source/extensions/stat_sinks/statsd/config.h +++ b/source/extensions/stat_sinks/statsd/config.h @@ -17,7 +17,7 @@ class StatsdSinkFactory : Logger::Loggable, public: // StatsSinkFactory Stats::SinkPtr createStatsSink(const Protobuf::Message& config, - Server::Instance& server) override; + Server::Configuration::ServerFactoryContext& server) override; ProtobufTypes::MessagePtr createEmptyConfigProto() override; diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 2a10e00388dc..63d8b162fac6 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -136,7 +136,7 @@ void MainImpl::initializeStatsSinks(const envoy::config::bootstrap::v3::Bootstra ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( sink_object, server.messageValidationContext().staticValidationVisitor(), factory); - stats_sinks_.emplace_back(factory.createStatsSink(*message, server)); + stats_sinks_.emplace_back(factory.createStatsSink(*message, server.serverFactoryContext())); } } diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index 7ad844d565a1..becb15a39745 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -42,7 +42,8 @@ class StatsSinkFactory : public Config::TypedFactory { * @param config supplies the custom proto configuration for the Stats::Sink * @param server supplies the server instance */ - virtual Stats::SinkPtr createStatsSink(const Protobuf::Message& config, Instance& server) PURE; + virtual Stats::SinkPtr createStatsSink(const Protobuf::Message& config, + Server::Configuration::ServerFactoryContext& server) PURE; std::string category() const override { return "envoy.stats_sinks"; } }; diff --git a/source/server/server.h b/source/server/server.h index cfa61a9b2bd9..22d2a8dd2c9d 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -173,6 +173,9 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, Grpc::Context& grpcContext() override { return server_.grpcContext(); } Envoy::Server::DrainManager& drainManager() override { return server_.drainManager(); } ServerLifecycleNotifier& lifecycleNotifier() override { return server_.lifecycleNotifier(); } + std::chrono::milliseconds statsFlushInterval() const override { + return server_.statsFlushInterval(); + } // Configuration::TransportSocketFactoryContext Ssl::ContextManager& sslContextManager() override { return server_.sslContextManager(); } diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index 1e84b2b16e09..cdb68d6e938b 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -49,7 +49,7 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); @@ -60,7 +60,7 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { // Negative test for protoc-gen-validate constraints for dog_statsd. TEST(DogStatsdConfigTest, ValidateFail) { - NiceMock server; + NiceMock server; EXPECT_THROW( DogStatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::DogStatsdSink(), server), ProtoValidationException); @@ -86,7 +86,7 @@ TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); @@ -113,7 +113,7 @@ TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); @@ -144,7 +144,7 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); auto udp_sink = dynamic_cast(sink.get()); diff --git a/test/extensions/stats_sinks/hystrix/config_test.cc b/test/extensions/stats_sinks/hystrix/config_test.cc index 79bf2c4bd21e..5a3c4c007e9f 100644 --- a/test/extensions/stats_sinks/hystrix/config_test.cc +++ b/test/extensions/stats_sinks/hystrix/config_test.cc @@ -35,7 +35,7 @@ TEST(StatsConfigTest, ValidHystrixSink) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index f7e9671a2956..5c76dd9499d8 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -243,7 +243,7 @@ class HystrixSinkTest : public testing::Test { ClusterTestInfo cluster2_{cluster2_name_}; NiceMock callbacks_; - NiceMock server_; + NiceMock server_; Upstream::ClusterManager::ClusterInfoMap cluster_map_; std::unique_ptr sink_; diff --git a/test/extensions/stats_sinks/statsd/config_test.cc b/test/extensions/stats_sinks/statsd/config_test.cc index 48e2a575b087..91a4b4db77be 100644 --- a/test/extensions/stats_sinks/statsd/config_test.cc +++ b/test/extensions/stats_sinks/statsd/config_test.cc @@ -39,7 +39,7 @@ TEST(StatsConfigTest, ValidTcpStatsd) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); @@ -81,7 +81,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -113,7 +113,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -136,7 +136,7 @@ TEST(StatsConfigTest, TcpSinkDefaultPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -161,7 +161,7 @@ TEST(StatsConfigTest, TcpSinkCustomPrefix) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); ASSERT_NE(sink, nullptr); @@ -193,7 +193,7 @@ TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); - NiceMock server; + NiceMock server; Stats::SinkPtr sink = factory->createStatsSink(*message, server); EXPECT_NE(sink, nullptr); EXPECT_NE(dynamic_cast(sink.get()), nullptr); @@ -202,7 +202,7 @@ TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { // Negative test for protoc-gen-validate constraints for statsd. TEST(StatsdConfigTest, ValidateFail) { - NiceMock server; + NiceMock server; EXPECT_THROW( StatsdSinkFactory().createStatsSink(envoy::config::metrics::v3::StatsdSink(), server), ProtoValidationException); diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index c966eeaffe0a..9daa9c3b8476 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -149,6 +149,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { MOCK_METHOD(Server::DrainManager&, drainManager, ()); MOCK_METHOD(Init::Manager&, initManager, ()); MOCK_METHOD(ServerLifecycleNotifier&, lifecycleNotifier, ()); + MOCK_METHOD(std::chrono::milliseconds, statsFlushInterval, (), (const)); testing::NiceMock cluster_manager_; testing::NiceMock dispatcher_; diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 5671a71b2bf8..82803584ce14 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -294,8 +294,9 @@ class CustomStatsSink : public Stats::Sink { class CustomStatsSinkFactory : public Server::Configuration::StatsSinkFactory { public: // StatsSinkFactory - Stats::SinkPtr createStatsSink(const Protobuf::Message&, Server::Instance& server) override { - return std::make_unique(server.stats()); + Stats::SinkPtr createStatsSink(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext& server) override { + return std::make_unique(server.scope()); } ProtobufTypes::MessagePtr createEmptyConfigProto() override { @@ -1184,7 +1185,7 @@ TEST_P(StaticValidationTest, ClusterUnknownField) { // Custom StatsSink that registers both a Cluster update callback and Server lifecycle callback. class CallbacksStatsSink : public Stats::Sink, public Upstream::ClusterUpdateCallbacks { public: - CallbacksStatsSink(Server::Instance& server) + CallbacksStatsSink(Server::Configuration::ServerFactoryContext& server) : cluster_removal_cb_handle_( server.clusterManager().addThreadLocalClusterUpdateCallbacks(*this)), lifecycle_cb_handle_(server.lifecycleNotifier().registerCallback( @@ -1207,7 +1208,8 @@ class CallbacksStatsSink : public Stats::Sink, public Upstream::ClusterUpdateCal class CallbacksStatsSinkFactory : public Server::Configuration::StatsSinkFactory { public: // StatsSinkFactory - Stats::SinkPtr createStatsSink(const Protobuf::Message&, Server::Instance& server) override { + Stats::SinkPtr createStatsSink(const Protobuf::Message&, + Server::Configuration::ServerFactoryContext& server) override { return std::make_unique(server); } From 8f668e976ea0bbd7d3ac352247c1fdead6acbb31 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Tue, 28 Jul 2020 02:27:47 -0400 Subject: [PATCH 759/909] fuzz: fixing unsigned integer underflow (#12289) Signed-off-by: Yifan Yang --- api/envoy/config/tap/v3/common.proto | 4 ++-- api/envoy/config/tap/v4alpha/common.proto | 4 ++-- generated_api_shadow/envoy/config/tap/v3/common.proto | 4 ++-- generated_api_shadow/envoy/config/tap/v4alpha/common.proto | 4 ++-- ...zz-testcase-minimized-filter_fuzz_test-5728217898680320 | 7 +++++++ 5 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index 812c30399e75..81de393e0581 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -123,10 +123,10 @@ message HttpGenericBodyMatch { option (validate.required) = true; // Text string to be located in HTTP body. - string string_match = 1; + string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2; + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto index 281150715c1b..5ce87d5b5770 100644 --- a/api/envoy/config/tap/v4alpha/common.proto +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -127,10 +127,10 @@ message HttpGenericBodyMatch { option (validate.required) = true; // Text string to be located in HTTP body. - string string_match = 1; + string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2; + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index 812c30399e75..81de393e0581 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -123,10 +123,10 @@ message HttpGenericBodyMatch { option (validate.required) = true; // Text string to be located in HTTP body. - string string_match = 1; + string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2; + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto index 281150715c1b..5ce87d5b5770 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -127,10 +127,10 @@ message HttpGenericBodyMatch { option (validate.required) = true; // Text string to be located in HTTP body. - string string_match = 1; + string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2; + bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 new file mode 100644 index 000000000000..b59917510f20 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.tap" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap" + value: "\no\022m\nb\n`\nD\032B\n@\n\030\032\026\n\024\n\n\032\010\032\006J\004\022\002\n\000\n\006\032\004\032\002*\000\n$\n\"\n\002 \001\n\034\032\032\032\030\n\026\n\002 \001\n\020\032\016\032\014\n\n\n\002 \001\n\004\032\002B\000\n\030\n\026\n\002 \001\n\020\032\016\032\014\n\n\n\002 \001\n\004\032\002B\000\022\007\n\005\032\003\n\001(" + } +} \ No newline at end of file From cf2df8cbe1f5aff726d6b8ea54d5ad716023b70d Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Tue, 28 Jul 2020 02:32:57 -0400 Subject: [PATCH 760/909] Replace std::unordered_map/set with absl containers (#11879) - Replace with absl::node_hash_map/set - Primarily for performance optimizations and to root out any assumptions made about iteration order in tests or otherwise (the replacement absl containers have a non-deterministic iteration order - absl::node_hash_map/set should be drop-in replacements for std::unordered_map/set - Note that a future refactor should reevaluate and move to absl::flat_hash_map/set where possible for memory optimizations - Add format check to disallow future usage of std::unordered_map/set - Small changes made where absl containers required it or tests needed to be modified for correctness Additional Description: - There may be an issue we should open with abseil about `emplace` and `try_emplace` when attempting to do in-place construction - When a constructor throws an exception, as far as I can tell the c++ language standard says the container should not be affected, however this does not seem to be the case for the absl containers so their guarantees are not the same (though they may be intended to have the same guarantees) - //test/server:overload_manager_impl_test demonstrates this - see https://github.com/abseil/abseil-cpp/issues/388 Risk Level: Low, absl::node_hash_map/set should be drop-in replacements for std::unordered_map/set though this may shake loose more assumptions in tests over time we weren't able to catch locally Testing: Small changes to unit tests, repeatedly run on Windows and Linux/clang Docs Changes: N/A Release Notes: N/A Fixes #11825 Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- include/envoy/grpc/status.h | 2 - include/envoy/http/BUILD | 1 + include/envoy/http/header_map.h | 1 - include/envoy/http/metadata_interface.h | 5 +- include/envoy/runtime/BUILD | 5 +- include/envoy/runtime/runtime.h | 4 +- include/envoy/server/overload_manager.h | 1 - include/envoy/upstream/BUILD | 3 + include/envoy/upstream/cluster_manager.h | 8 +- include/envoy/upstream/upstream.h | 4 +- source/common/access_log/BUILD | 4 +- source/common/access_log/access_log_impl.h | 4 +- .../access_log/access_log_manager_impl.h | 5 +- source/common/common/BUILD | 1 + source/common/common/hash.h | 2 - source/common/common/perf_annotation.h | 4 +- source/common/common/utility.cc | 9 +- source/common/common/utility.h | 1 - source/common/config/config_provider_impl.h | 6 +- .../common/config/delta_subscription_state.h | 8 +- source/common/config/grpc_mux_impl.cc | 5 +- source/common/config/grpc_mux_impl.h | 5 +- source/common/config/metadata.h | 5 +- source/common/config/utility.cc | 2 - source/common/config/well_known_names.h | 1 - .../common/filesystem/inotify/watcher_impl.h | 5 +- .../common/filesystem/kqueue/watcher_impl.h | 4 +- .../filesystem/win32/filesystem_impl.cc | 3 +- source/common/filesystem/win32/watcher_impl.h | 5 +- .../formatter/substitution_format_string.h | 1 - .../common/formatter/substitution_formatter.h | 1 - source/common/grpc/google_async_client_impl.h | 3 +- source/common/http/BUILD | 1 + source/common/http/utility.cc | 3 +- source/common/network/BUILD | 5 +- source/common/network/dns_impl.h | 4 +- source/common/network/lc_trie.h | 4 +- source/common/router/config_impl.h | 8 +- source/common/router/header_formatter.h | 3 +- .../router/metadatamatchcriteria_impl.cc | 2 +- source/common/router/rds_impl.h | 13 +-- .../route_config_update_receiver_impl.h | 1 - source/common/router/vhds.cc | 10 +- source/common/router/vhds.h | 8 +- source/common/runtime/runtime_impl.cc | 9 +- source/common/runtime/runtime_impl.h | 6 +- source/common/secret/sds_api.cc | 2 - source/common/secret/secret_manager_impl.h | 14 +-- source/common/singleton/manager_impl.h | 6 +- source/common/stats/BUILD | 1 + source/common/stats/fake_symbol_table_impl.h | 1 - source/common/stats/symbol_table_impl.h | 1 - source/common/stats/tag_producer_impl.cc | 6 +- source/common/stats/tag_producer_impl.h | 7 +- source/common/tcp_proxy/tcp_proxy.h | 5 +- source/common/upstream/cds_api_impl.cc | 3 +- source/common/upstream/cluster_manager_impl.h | 12 +-- source/common/upstream/eds.cc | 4 +- source/common/upstream/eds.h | 6 +- .../upstream/health_checker_base_impl.h | 2 +- source/common/upstream/load_balancer_impl.h | 6 +- source/common/upstream/load_stats_reporter.cc | 2 +- source/common/upstream/load_stats_reporter.h | 2 +- source/common/upstream/original_dst_cluster.h | 1 - .../common/upstream/outlier_detection_impl.h | 5 +- source/common/upstream/strict_dns_cluster.cc | 2 +- source/common/upstream/subset_lb.cc | 17 ++-- source/common/upstream/subset_lb.h | 8 +- source/common/upstream/upstream_impl.cc | 8 +- source/common/upstream/upstream_impl.h | 7 +- .../access_loggers/common/access_log_base.h | 1 - .../extensions/access_loggers/file/config.cc | 1 - .../grpc/grpc_access_log_impl.h | 1 - .../grpc/http_grpc_access_log_impl.h | 1 - .../grpc/tcp_grpc_access_log_impl.h | 1 - .../clusters/redis/redis_cluster.cc | 2 +- .../extensions/clusters/redis/redis_cluster.h | 2 +- source/extensions/common/tap/admin.h | 4 +- source/extensions/common/utility.h | 2 - .../extensions/filters/http/common/utility.h | 2 - .../filters/http/fault/fault_filter.h | 1 - .../json_transcoder_filter.cc | 4 + .../filters/http/grpc_web/grpc_web_filter.h | 2 - .../filters/http/jwt_authn/extractor.cc | 13 +-- .../filters/http/jwt_authn/extractor.h | 1 - .../filters/http/jwt_authn/jwks_cache.cc | 6 +- .../filters/http/jwt_authn/verifier.cc | 2 +- .../network/client_ssl_auth/client_ssl_auth.h | 5 +- .../filters/network/common/utility.h | 2 - .../filters/network/dubbo_proxy/message.h | 3 +- .../filters/network/dubbo_proxy/metadata.h | 1 - .../filters/network/dubbo_proxy/protocol.h | 1 - .../network/dubbo_proxy/protocol_constants.h | 10 +- .../network/dubbo_proxy/router/route.h | 2 +- .../filters/network/dubbo_proxy/serializer.h | 1 - .../network/dubbo_proxy/serializer_impl.h | 2 +- .../redis_proxy/command_splitter_impl.h | 1 - .../network/redis_proxy/conn_pool_impl.h | 7 +- .../network/rocketmq_proxy/active_message.cc | 2 +- .../network/rocketmq_proxy/topic_route.h | 9 +- .../filters/network/thrift_proxy/decoder.cc | 2 - .../filters/network/zookeeper_proxy/decoder.h | 4 +- .../quiche_unordered_containers_impl.h | 2 +- .../extensions/stat_sinks/hystrix/hystrix.cc | 6 +- .../extensions/stat_sinks/hystrix/hystrix.h | 4 +- .../extensions/transport_sockets/alts/BUILD | 3 + .../transport_sockets/alts/config.cc | 7 +- source/extensions/transport_sockets/tls/BUILD | 1 + .../transport_sockets/tls/context_impl.cc | 3 +- source/server/admin/admin.cc | 1 - source/server/admin/admin.h | 1 - source/server/admin/runtime_handler.cc | 5 +- source/server/configuration_impl.h | 1 - source/server/connection_handler_impl.cc | 2 +- source/server/connection_handler_impl.h | 2 +- source/server/filter_chain_manager_impl.cc | 3 +- source/server/lds_api.cc | 7 +- source/server/overload_manager_impl.cc | 17 ++-- source/server/overload_manager_impl.h | 13 +-- source/server/server.cc | 1 - test/common/filesystem/directory_test.cc | 4 +- .../formatter/substitution_formatter_test.cc | 20 ++-- test/common/grpc/google_grpc_utils_test.cc | 4 +- test/common/http/http2/codec_impl_test_util.h | 2 +- test/common/network/dns_impl_test.cc | 8 +- test/common/protobuf/utility_test.cc | 5 +- test/common/router/vhds_test.cc | 2 +- test/common/secret/BUILD | 1 - .../common/secret/secret_manager_impl_test.cc | 3 +- test/common/stats/thread_local_store_test.cc | 1 - test/common/upstream/BUILD | 1 - .../upstream/health_checker_impl_test.cc | 4 +- .../upstream/load_balancer_benchmark.cc | 12 +-- .../upstream/load_balancer_simulation_test.cc | 4 +- .../upstream/load_stats_reporter_test.cc | 4 +- test/common/upstream/ring_hash_lb_test.cc | 10 +- test/common/upstream/upstream_impl_test.cc | 2 +- .../ext_authz/ext_authz_http_impl_test.cc | 2 +- .../http/jwt_authn/group_verifier_test.cc | 8 +- .../filters/network/redis_proxy/BUILD | 1 - .../redis_proxy/conn_pool_impl_test.cc | 99 ++++++++++--------- .../rocketmq_proxy/active_message_test.cc | 2 +- .../rocketmq_proxy/topic_route_test.cc | 7 +- .../quiche/platform/quic_platform_test.cc | 1 - .../stats_sinks/hystrix/hystrix_test.cc | 18 ++-- .../tracers/zipkin/zipkin_tracer_impl_test.cc | 1 - test/fuzz/utility.h | 4 +- test/integration/fake_upstream.h | 6 +- test/integration/http2_integration_test.cc | 74 +++++++------- test/integration/http_integration.cc | 10 +- test/integration/integration.h | 8 +- test/integration/protocol_integration_test.cc | 10 +- test/integration/xfcc_integration_test.cc | 8 +- test/mocks/runtime/mocks.h | 4 +- test/mocks/server/config_tracker.h | 3 +- test/mocks/thread_local/mocks.h | 1 - test/mocks/upstream/cluster_info.h | 2 +- test/server/admin/admin_test.cc | 1 - test/server/admin/runtime_handler_test.cc | 4 +- test/server/filter_chain_benchmark_test.cc | 1 - test/test_common/environment.cc | 5 +- test/test_common/environment.h | 6 +- test/tools/router_check/router.cc | 1 - tools/clang_tools/api_booster/BUILD | 1 + tools/clang_tools/api_booster/main.cc | 3 +- .../api_booster/proto_cxx_utils.cc | 8 +- .../clang_tools/api_booster/proto_cxx_utils.h | 8 +- .../api_booster/proto_cxx_utils_test.cc | 8 +- tools/code_format/check_format.py | 9 ++ tools/code_format/check_format_test_helper.py | 6 ++ .../check_format/std_unordered_map.cc | 7 ++ .../check_format/std_unordered_set.cc | 7 ++ tools/type_whisperer/BUILD | 1 + tools/type_whisperer/api_type_db.h | 4 +- 174 files changed, 494 insertions(+), 442 deletions(-) create mode 100644 tools/testdata/check_format/std_unordered_map.cc create mode 100644 tools/testdata/check_format/std_unordered_set.cc diff --git a/include/envoy/grpc/status.h b/include/envoy/grpc/status.h index b967d3e29164..3715571bbb66 100644 --- a/include/envoy/grpc/status.h +++ b/include/envoy/grpc/status.h @@ -9,8 +9,6 @@ class Status { public: using GrpcStatus = int64_t; - // If this enum is changed, then the std::unordered_map in Envoy::Grpc::Utility::nameToGrpcStatus - // located at: //source/common/access_log/grpc/status.cc must also be changed. enum WellKnownGrpcStatus { // The RPC completed successfully. Ok = 0, diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index f84ccbc9c60a..41d7af731db8 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -128,6 +128,7 @@ envoy_cc_library( envoy_cc_library( name = "metadata_interface", hdrs = ["metadata_interface.h"], + external_deps = ["abseil_node_hash_map"], ) envoy_cc_library( diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 8692e3526735..bc5e9338a2dc 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/common/pure.h" diff --git a/include/envoy/http/metadata_interface.h b/include/envoy/http/metadata_interface.h index dc8dc0e4e65c..3874aa905a49 100644 --- a/include/envoy/http/metadata_interface.h +++ b/include/envoy/http/metadata_interface.h @@ -3,9 +3,10 @@ #include #include #include -#include #include +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Http { @@ -20,7 +21,7 @@ constexpr uint8_t END_METADATA_FLAG = 0x4; // TODO(soya3129): Respect max_frame_size after nghttp2 #1250 is resolved. constexpr uint64_t METADATA_MAX_PAYLOAD_SIZE = 16384; -using UnorderedStringMap = std::unordered_map; +using UnorderedStringMap = absl::node_hash_map; class MetadataMap : public UnorderedStringMap { public: diff --git a/include/envoy/runtime/BUILD b/include/envoy/runtime/BUILD index cb0aff14c0ef..b80d180dedaa 100644 --- a/include/envoy/runtime/BUILD +++ b/include/envoy/runtime/BUILD @@ -11,7 +11,10 @@ envoy_package() envoy_cc_library( name = "runtime_interface", hdrs = ["runtime.h"], - external_deps = ["abseil_optional"], + external_deps = [ + "abseil_node_hash_map", + "abseil_optional", + ], deps = [ "//include/envoy/stats:stats_interface", "//include/envoy/thread_local:thread_local_interface", diff --git a/include/envoy/runtime/runtime.h b/include/envoy/runtime/runtime.h index bdd2f67114fd..35737b6d0745 100644 --- a/include/envoy/runtime/runtime.h +++ b/include/envoy/runtime/runtime.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/pure.h" @@ -17,6 +16,7 @@ #include "common/singleton/threadsafe_singleton.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -253,7 +253,7 @@ class Loader { * a key, use an empty string as the value. * @param values the values to merge */ - virtual void mergeValues(const std::unordered_map& values) PURE; + virtual void mergeValues(const absl::node_hash_map& values) PURE; /** * Initiate all RTDS subscriptions. The `on_done` callback is invoked when all RTDS requests diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h index e10812add8fd..24ddd16cfd6c 100644 --- a/include/envoy/server/overload_manager.h +++ b/include/envoy/server/overload_manager.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/common/pure.h" #include "envoy/thread_local/thread_local.h" diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index 2755da336b56..d67f7b242f40 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -11,6 +11,9 @@ envoy_package() envoy_cc_library( name = "cluster_manager_interface", hdrs = ["cluster_manager.h"], + external_deps = [ + "abseil_node_hash_map", + ], deps = [ ":health_checker_interface", ":load_balancer_interface", diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index ed0c3935bb08..936fa439375b 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -4,7 +4,6 @@ #include #include #include -#include #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" @@ -32,6 +31,9 @@ #include "envoy/upstream/thread_local_cluster.h" #include "envoy/upstream/upstream.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Upstream { @@ -123,7 +125,7 @@ class ClusterManager { virtual void initializeSecondaryClusters(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) PURE; - using ClusterInfoMap = std::unordered_map>; + using ClusterInfoMap = absl::node_hash_map>; /** * @return ClusterInfoMap all current clusters. These are the primary (not thread local) @@ -131,7 +133,7 @@ class ClusterManager { */ virtual ClusterInfoMap clusters() PURE; - using ClusterSet = std::unordered_set; + using ClusterSet = absl::flat_hash_set; /** * @return const ClusterSet& providing the cluster names that are eligible as diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index cd15d0bb3dff..b2d72e2cea24 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -211,7 +211,7 @@ using HostVector = std::vector; using HealthyHostVector = Phantom; using DegradedHostVector = Phantom; using ExcludedHostVector = Phantom; -using HostMap = std::unordered_map; +using HostMap = absl::node_hash_map; using HostVectorSharedPtr = std::shared_ptr; using HostVectorConstSharedPtr = std::shared_ptr; @@ -221,7 +221,7 @@ using ExcludedHostVectorConstSharedPtr = std::shared_ptr; using LocalityWeightsMap = - std::unordered_map; + absl::node_hash_map; using PriorityState = std::vector>; /** diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD index 08408b26a9cf..00bfcb101f79 100644 --- a/source/common/access_log/BUILD +++ b/source/common/access_log/BUILD @@ -12,7 +12,9 @@ envoy_cc_library( name = "access_log_lib", srcs = ["access_log_impl.cc"], hdrs = ["access_log_impl.h"], - external_deps = ["abseil_hash"], + external_deps = [ + "abseil_hash", + ], deps = [ "//include/envoy/access_log:access_log_interface", "//include/envoy/config:typed_config_interface", diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 19264b2238d6..657a7d069cf5 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/access_log/access_log.h" @@ -17,6 +16,7 @@ #include "common/http/header_utility.h" #include "common/protobuf/protobuf.h" +#include "absl/container/node_hash_set.h" #include "absl/hash/hash.h" namespace Envoy { @@ -207,7 +207,7 @@ class ResponseFlagFilter : public Filter { class GrpcStatusFilter : public Filter { public: using GrpcStatusHashSet = - std::unordered_set>; + absl::node_hash_set>; GrpcStatusFilter(const envoy::config::accesslog::v3::GrpcStatusFilter& config); diff --git a/source/common/access_log/access_log_manager_impl.h b/source/common/access_log/access_log_manager_impl.h index 2bf745cf41bf..1727a0bbf053 100644 --- a/source/common/access_log/access_log_manager_impl.h +++ b/source/common/access_log/access_log_manager_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/access_log/access_log.h" #include "envoy/api/api.h" @@ -14,6 +13,8 @@ #include "common/common/logger.h" #include "common/common/thread.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { #define ACCESS_LOG_FILE_STATS(COUNTER, GAUGE) \ @@ -51,7 +52,7 @@ class AccessLogManagerImpl : public AccessLogManager, Logger::Loggable access_logs_; + absl::node_hash_map access_logs_; }; /** diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 05758f4fe50e..d3b6bc9741ce 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -347,6 +347,7 @@ envoy_cc_library( name = "utility_lib", srcs = ["utility.cc"], hdrs = ["utility.h"], + external_deps = ["abseil_node_hash_map"], deps = [ ":assert_lib", ":hash_lib", diff --git a/source/common/common/hash.h b/source/common/common/hash.h index 38fb20a0f9cc..c29b9effa89d 100644 --- a/source/common/common/hash.h +++ b/source/common/common/hash.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" diff --git a/source/common/common/perf_annotation.h b/source/common/common/perf_annotation.h index 5701eaa68c32..7187244322d0 100644 --- a/source/common/common/perf_annotation.h +++ b/source/common/common/perf_annotation.h @@ -4,11 +4,11 @@ #include #include -#include #include "common/common/thread.h" #include "common/common/utility.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" // Performance Annotation system, enabled with @@ -139,7 +139,7 @@ class PerfAnnotationContext { } }; - using DurationStatsMap = std::unordered_map; + using DurationStatsMap = absl::node_hash_map; // Maps {category, description} to DurationStats. #if PERF_THREAD_SAFE diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index eb7ba3619a39..2017bbcdfa66 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -15,6 +15,7 @@ #include "common/common/hash.h" #include "common/singleton/const_singleton.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/ascii.h" #include "absl/strings/match.h" #include "absl/strings/str_join.h" @@ -85,7 +86,7 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { SpecifierOffsets specifier_offsets; }; // A map is used to keep different formatted format strings at a given second. - std::unordered_map formatted; + absl::node_hash_map formatted; }; static thread_local CachedTime cached_time; @@ -101,9 +102,11 @@ std::string DateFormatter::fromTime(const SystemTime& time) const { // Remove all the expired cached items. for (auto it = cached_time.formatted.cbegin(); it != cached_time.formatted.cend();) { if (it->second.epoch_time_seconds != epoch_time_seconds) { - it = cached_time.formatted.erase(it); + auto next_it = std::next(it); + cached_time.formatted.erase(it); + it = next_it; } else { - it++; + ++it; } } diff --git a/source/common/common/utility.h b/source/common/common/utility.h index e990a4e662ef..0101fd3d9fd9 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/interval_set.h" diff --git a/source/common/config/config_provider_impl.h b/source/common/config/config_provider_impl.h index 157941124d29..144332fe23b0 100644 --- a/source/common/config/config_provider_impl.h +++ b/source/common/config/config_provider_impl.h @@ -391,10 +391,10 @@ class ConfigProviderManagerImplBase : public ConfigProviderManager, public Singl protected: // Ordered set for deterministic config dump output. using ConfigProviderSet = std::set; - using ConfigProviderMap = std::unordered_map, EnumClassHash>; + using ConfigProviderMap = absl::node_hash_map, EnumClassHash>; using ConfigSubscriptionMap = - std::unordered_map>; + absl::node_hash_map>; ConfigProviderManagerImplBase(Server::Admin& admin, const std::string& config_name); diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h index 00693a1abe2c..1e21ba3a8efd 100644 --- a/source/common/config/delta_subscription_state.h +++ b/source/common/config/delta_subscription_state.h @@ -13,6 +13,8 @@ #include "common/config/pausable_ack_queue.h" #include "common/config/watch_map.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Config { @@ -81,7 +83,7 @@ class DeltaSubscriptionState : public Logger::Loggable { // names we are currently interested in. Those in the waitingForServer state currently don't have // any version for that resource: we need to inform the server if we lose interest in them, but we // also need to *not* include them in the initial_resource_versions map upon a reconnect. - std::unordered_map resource_versions_; + absl::node_hash_map resource_versions_; // The keys of resource_versions_. Only tracked separately because std::map does not provide an // iterator into just its keys, e.g. for use in std::set_difference. std::set resource_names_; @@ -94,8 +96,8 @@ class DeltaSubscriptionState : public Logger::Loggable { bool any_request_sent_yet_in_current_stream_{}; // Tracks changes in our subscription interest since the previous DeltaDiscoveryRequest we sent. - // Can't use unordered_set due to ordering issues in gTest expectation matching. - // Feel free to change to unordered if you can figure out how to make it work. + // TODO: Can't use absl::flat_hash_set due to ordering issues in gTest expectation matching. + // Feel free to change to an unordered container once we figure out how to make it work. std::set names_added_; std::set names_removed_; }; diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index e97d55362445..907bf9148adf 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -1,7 +1,5 @@ #include "common/config/grpc_mux_impl.h" -#include - #include "envoy/service/discovery/v3/discovery.pb.h" #include "common/config/decoded_resource_impl.h" @@ -11,6 +9,7 @@ #include "common/protobuf/protobuf.h" #include "absl/container/btree_map.h" +#include "absl/container/node_hash_set.h" namespace Envoy { namespace Config { @@ -36,7 +35,7 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { request.mutable_resource_names()->Clear(); // Maintain a set to avoid dupes. - std::unordered_set resources; + absl::node_hash_set resources; for (const auto* watch : api_state.watches_) { for (const std::string& resource : watch->resources_) { if (resources.count(resource) == 0) { diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 4480387219a8..d735bc12c1cf 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/api/v2/discovery.pb.h" #include "envoy/common/random_generator.h" @@ -21,6 +20,8 @@ #include "common/config/grpc_stream.h" #include "common/config/utility.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Config { /** @@ -131,7 +132,7 @@ class GrpcMuxImpl : public GrpcMux, const LocalInfo::LocalInfo& local_info_; const bool skip_subsequent_node_; bool first_stream_request_; - std::unordered_map api_state_; + absl::node_hash_map api_state_; // Envoy's dependency ordering. std::list subscriptions_; diff --git a/source/common/config/metadata.h b/source/common/config/metadata.h index 3c59c77a2083..ed4fcd96c270 100644 --- a/source/common/config/metadata.h +++ b/source/common/config/metadata.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/typed_metadata.h" @@ -14,6 +13,8 @@ #include "common/protobuf/protobuf.h" #include "common/shared_pool/shared_pool.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Config { @@ -123,7 +124,7 @@ template class TypedMetadataImpl : public TypedMetadata } } - std::unordered_map> data_; + absl::node_hash_map> data_; }; } // namespace Config diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 6b5499def902..e42ee777a156 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -1,7 +1,5 @@ #include "common/config/utility.h" -#include - #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" diff --git a/source/common/config/well_known_names.h b/source/common/config/well_known_names.h index a2cd01dfcdf8..30698815f9ba 100644 --- a/source/common/config/well_known_names.h +++ b/source/common/config/well_known_names.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include "envoy/common/exception.h" diff --git a/source/common/filesystem/inotify/watcher_impl.h b/source/common/filesystem/inotify/watcher_impl.h index 40f903f43e4a..9b416f5c9c03 100644 --- a/source/common/filesystem/inotify/watcher_impl.h +++ b/source/common/filesystem/inotify/watcher_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/event/dispatcher.h" @@ -11,6 +10,8 @@ #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -43,7 +44,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { Api::Api& api_; int inotify_fd_; Event::FileEventPtr inotify_event_; - std::unordered_map callback_map_; + absl::node_hash_map callback_map_; }; } // namespace Filesystem diff --git a/source/common/filesystem/kqueue/watcher_impl.h b/source/common/filesystem/kqueue/watcher_impl.h index b61ba721b531..e34d90548979 100644 --- a/source/common/filesystem/kqueue/watcher_impl.h +++ b/source/common/filesystem/kqueue/watcher_impl.h @@ -11,6 +11,8 @@ #include "common/common/linked_object.h" #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -47,7 +49,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { Api::Api& api_; int queue_; - std::unordered_map watches_; + absl::node_hash_map watches_; Event::FileEventPtr kqueue_event_; }; diff --git a/source/common/filesystem/win32/filesystem_impl.cc b/source/common/filesystem/win32/filesystem_impl.cc index ca9e246b13b7..d868fe567c00 100644 --- a/source/common/filesystem/win32/filesystem_impl.cc +++ b/source/common/filesystem/win32/filesystem_impl.cc @@ -12,6 +12,7 @@ #include "common/common/fmt.h" #include "common/filesystem/filesystem_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" @@ -156,7 +157,7 @@ static const char filename_char_table[] = { // The "COM#" and "LPT#" names below have boolean flag requiring a [1-9] suffix. // This list can be avoided by observing dwFileAttributes & FILE_ATTRIBUTE_DEVICE // within WIN32_FILE_ATTRIBUTE_DATA or WIN32_FIND_DATA results. -std::unordered_map pathelt_table = { +absl::node_hash_map pathelt_table = { {"CON", false}, {"NUL", false}, {"AUX", false}, {"PRN", false}, {"COM", true}, {"LPT", true} }; diff --git a/source/common/filesystem/win32/watcher_impl.h b/source/common/filesystem/win32/watcher_impl.h index f107f541eea7..1eccf7aba5c6 100644 --- a/source/common/filesystem/win32/watcher_impl.h +++ b/source/common/filesystem/win32/watcher_impl.h @@ -7,7 +7,6 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/event/dispatcher.h" @@ -18,6 +17,8 @@ #include "common/common/logger.h" #include "common/common/thread_impl.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Filesystem { @@ -56,7 +57,7 @@ class WatcherImpl : public Watcher, Logger::Loggable { typedef std::unique_ptr DirectoryWatchPtr; Api::Api& api_; - std::unordered_map callback_map_; + absl::node_hash_map callback_map_; Event::FileEventPtr directory_event_; os_fd_t event_write_; os_fd_t event_read_; diff --git a/source/common/formatter/substitution_format_string.h b/source/common/formatter/substitution_format_string.h index 97d3bd1e8a17..6d514cecc47d 100644 --- a/source/common/formatter/substitution_format_string.h +++ b/source/common/formatter/substitution_format_string.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/config/core/v3/substitution_format_string.pb.h" #include "envoy/formatter/substitution_formatter.h" diff --git a/source/common/formatter/substitution_formatter.h b/source/common/formatter/substitution_formatter.h index 00b4be31ac0d..8336f3274f85 100644 --- a/source/common/formatter/substitution_formatter.h +++ b/source/common/formatter/substitution_formatter.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/common/time.h" diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 6a576df3497a..5d339e7764a3 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -21,6 +21,7 @@ #include "common/grpc/typed_async_client.h" #include "common/tracing/http_tracer_impl.h" +#include "absl/container/node_hash_set.h" #include "grpcpp/generic/generic_stub.h" #include "grpcpp/grpcpp.h" #include "grpcpp/support/proto_buffer_writer.h" @@ -109,7 +110,7 @@ class GoogleAsyncClientThreadLocal : public ThreadLocal::ThreadLocalObject, Thread::ThreadPtr completion_thread_; // Track all streams that are currently using this CQ, so we can notify them // on shutdown. - std::unordered_set streams_; + absl::node_hash_set streams_; }; using GoogleAsyncClientThreadLocalPtr = std::unique_ptr; diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 041c3508d650..9542d9eb4a93 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -344,6 +344,7 @@ envoy_cc_library( srcs = ["utility.cc"], hdrs = ["utility.h"], external_deps = [ + "abseil_node_hash_set", "abseil_optional", "nghttp2", ], diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index cac031e3b2a0..28e8872851cd 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -23,6 +23,7 @@ #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" @@ -61,7 +62,7 @@ namespace { void validateCustomSettingsParameters( const envoy::config::core::v3::Http2ProtocolOptions& options) { std::vector parameter_collisions, custom_parameter_collisions; - std::unordered_set + absl::node_hash_set custom_parameters; // User defined and named parameters with the same SETTINGS identifier can not both be set. for (const auto& it : options.custom_settings_parameters()) { diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 95d412a0a71a..67e3e0be9899 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -163,7 +163,10 @@ envoy_cc_library( envoy_cc_library( name = "lc_trie_lib", hdrs = ["lc_trie.h"], - external_deps = ["abseil_int128"], + external_deps = [ + "abseil_node_hash_set", + "abseil_int128", + ], deps = [ ":address_lib", ":cidr_range_lib", diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h index 44588fc4f52c..dc62e06adb11 100644 --- a/source/common/network/dns_impl.h +++ b/source/common/network/dns_impl.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/common/platform.h" #include "envoy/event/dispatcher.h" @@ -13,6 +12,7 @@ #include "common/common/logger.h" #include "common/common/utility.h" +#include "absl/container/node_hash_map.h" #include "ares.h" namespace Envoy { @@ -104,7 +104,7 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggable events_; + absl::node_hash_map events_; }; } // namespace Network diff --git a/source/common/network/lc_trie.h b/source/common/network/lc_trie.h index aae1aa7c0ef7..dea6c6b928d8 100644 --- a/source/common/network/lc_trie.h +++ b/source/common/network/lc_trie.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/common/exception.h" @@ -14,6 +13,7 @@ #include "common/network/cidr_range.h" #include "common/network/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/numeric/int128.h" #include "fmt/format.h" @@ -230,7 +230,7 @@ template class LcTrie { using Ipv4 = uint32_t; using Ipv6 = absl::uint128; - using DataSet = std::unordered_set; + using DataSet = absl::node_hash_set; using DataSetSharedPtr = std::shared_ptr; /** diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index d5f51ec194fa..a32d19fbe742 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "envoy/config/core/v3/base.pb.h" @@ -32,6 +31,7 @@ #include "common/router/tls_context_match_criteria_impl.h" #include "common/stats/symbol_table_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -69,7 +69,7 @@ class PerFilterConfigs { const RouteSpecificFilterConfig* get(const std::string& name) const; private: - std::unordered_map configs_; + absl::node_hash_map configs_; }; class RouteEntryImplBase; @@ -919,14 +919,14 @@ class RouteMatcher { private: using WildcardVirtualHosts = - std::map, std::greater<>>; + std::map, std::greater<>>; using SubstringFunction = std::function; const VirtualHostImpl* findWildcardVirtualHost(const std::string& host, const WildcardVirtualHosts& wildcard_virtual_hosts, SubstringFunction substring_function) const; Stats::ScopePtr vhost_scope_; - std::unordered_map virtual_hosts_; + absl::node_hash_map virtual_hosts_; // std::greater as a minor optimization to iterate from more to less specific // // A note on using an unordered_map versus a vector of (string, VirtualHostSharedPtr) pairs: diff --git a/source/common/router/header_formatter.h b/source/common/router/header_formatter.h index 55d1206a1112..847657dba0d7 100644 --- a/source/common/router/header_formatter.h +++ b/source/common/router/header_formatter.h @@ -6,6 +6,7 @@ #include "envoy/formatter/substitution_formatter.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -45,7 +46,7 @@ class StreamInfoHeaderFormatter : public HeaderFormatter { private: FieldExtractor field_extractor_; const bool append_; - std::unordered_map> + absl::node_hash_map> start_time_formatters_; }; diff --git a/source/common/router/metadatamatchcriteria_impl.cc b/source/common/router/metadatamatchcriteria_impl.cc index 8739d314cccf..88cfa4b229fd 100644 --- a/source/common/router/metadatamatchcriteria_impl.cc +++ b/source/common/router/metadatamatchcriteria_impl.cc @@ -9,7 +9,7 @@ MetadataMatchCriteriaImpl::extractMetadataMatchCriteria(const MetadataMatchCrite // Track locations of each name (from the parent) in v to make it // easier to replace them when the same name exists in matches. - std::unordered_map existing; + absl::node_hash_map existing; if (parent) { for (const auto& it : parent->metadata_match_criteria_) { diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 0e8f6630e59a..cba4793acd5a 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -5,8 +5,6 @@ #include #include #include -#include -#include #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -36,6 +34,9 @@ #include "common/router/route_config_update_receiver_impl.h" #include "common/router/vhds.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Router { @@ -118,7 +119,7 @@ class RdsRouteConfigSubscription public: ~RdsRouteConfigSubscription() override; - std::unordered_set& routeConfigProviders() { + absl::node_hash_set& routeConfigProviders() { ASSERT(route_config_providers_.size() == 1 || route_config_providers_.empty()); return route_config_providers_; } @@ -169,7 +170,7 @@ class RdsRouteConfigSubscription RouteConfigProviderManagerImpl& route_config_provider_manager_; const uint64_t manager_identifier_; // TODO(lambdai): Prove that a subscription has exactly one provider and remove the container. - std::unordered_set route_config_providers_; + absl::node_hash_set route_config_providers_; VhdsSubscriptionPtr vhds_subscription_; RouteConfigUpdatePtr config_update_info_; Common::CallbackManager<> update_callback_manager_; @@ -253,9 +254,9 @@ class RouteConfigProviderManagerImpl : public RouteConfigProviderManager, // TODO(jsedgwick) These two members are prime candidates for the owned-entry list/map // as in ConfigTracker. I.e. the ProviderImpls would have an EntryOwner for these lists // Then the lifetime management stuff is centralized and opaque. - std::unordered_map> + absl::node_hash_map> dynamic_route_config_providers_; - std::unordered_set static_route_config_providers_; + absl::node_hash_set static_route_config_providers_; Server::ConfigTracker::EntryOwnerPtr config_tracker_entry_; friend class RdsRouteConfigSubscription; diff --git a/source/common/router/route_config_update_receiver_impl.h b/source/common/router/route_config_update_receiver_impl.h index 9bfa6940cbbe..a0e44f7975da 100644 --- a/source/common/router/route_config_update_receiver_impl.h +++ b/source/common/router/route_config_update_receiver_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/route_components.pb.h" diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index 47552981f38f..31d5b9d27d25 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -20,11 +20,11 @@ namespace Envoy { namespace Router { // Implements callbacks to handle DeltaDiscovery protocol for VirtualHostDiscoveryService -VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, - Server::Configuration::ServerFactoryContext& factory_context, - const std::string& stat_prefix, - std::unordered_set& route_config_providers, - envoy::config::core::v3::ApiVersion resource_api_version) +VhdsSubscription::VhdsSubscription( + RouteConfigUpdatePtr& config_update_info, + Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix, + absl::node_hash_set& route_config_providers, + envoy::config::core::v3::ApiVersion resource_api_version) : Envoy::Config::SubscriptionBase( resource_api_version, factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index dc8bd87ded74..ea5be4074042 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -3,8 +3,6 @@ #include #include #include -#include -#include #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route_components.pb.h" @@ -25,6 +23,8 @@ #include "common/init/target_impl.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Router { @@ -42,7 +42,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase& route_config_providers, + absl::node_hash_set& route_config_providers, const envoy::config::core::v3::ApiVersion resource_api_version = envoy::config::core::v3::ApiVersion::AUTO); ~VhdsSubscription() override { init_target_.ready(); } @@ -74,7 +74,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase& route_config_providers_; + absl::node_hash_set& route_config_providers_; }; using VhdsSubscriptionPtr = std::unique_ptr; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 6b61b2211913..86c59606a80e 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -23,6 +22,8 @@ #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" @@ -232,7 +233,7 @@ void SnapshotImpl::parseEntryFractionalPercentValue(Entry& entry) { entry.fractional_percent_value_ = converted_fractional_percent; } -void AdminLayer::mergeValues(const std::unordered_map& values) { +void AdminLayer::mergeValues(const absl::node_hash_map& values) { for (const auto& kv : values) { values_.erase(kv.first); if (!kv.second.empty()) { @@ -350,7 +351,7 @@ LoaderImpl::LoaderImpl(Event::Dispatcher& dispatcher, ThreadLocal::SlotAllocator : generator_(generator), stats_(generateStats(store)), tls_(tls.allocateSlot()), config_(config), service_cluster_(local_info.clusterName()), api_(api), init_watcher_("RTDS", [this]() { onRtdsReady(); }), store_(store) { - std::unordered_set layer_names; + absl::node_hash_set layer_names; for (const auto& layer : config_.layers()) { auto ret = layer_names.insert(layer.name()); if (!ret.second) { @@ -488,7 +489,7 @@ SnapshotConstSharedPtr LoaderImpl::threadsafeSnapshot() { } } -void LoaderImpl::mergeValues(const std::unordered_map& values) { +void LoaderImpl::mergeValues(const absl::node_hash_map& values) { if (admin_layer_ == nullptr) { throw EnvoyException("No admin layer specified"); } diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index d25e8d8ed25f..ee4c0cb3841c 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/api/api.h" #include "envoy/common/exception.h" @@ -30,6 +29,7 @@ #include "common/init/target_impl.h" #include "common/singleton/threadsafe_singleton.h" +#include "absl/container/node_hash_map.h" #include "spdlog/spdlog.h" namespace Envoy { @@ -153,7 +153,7 @@ class AdminLayer : public OverrideLayerImpl { * Merge the provided values into our entry map. An empty value indicates that a key should be * removed from our map. */ - void mergeValues(const std::unordered_map& values); + void mergeValues(const absl::node_hash_map& values); private: RuntimeStats& stats_; @@ -238,7 +238,7 @@ class LoaderImpl : public Loader, Logger::Loggable { void initialize(Upstream::ClusterManager& cm) override; const Snapshot& snapshot() override; SnapshotConstSharedPtr threadsafeSnapshot() override; - void mergeValues(const std::unordered_map& values) override; + void mergeValues(const absl::node_hash_map& values) override; void startRtdsSubscriptions(ReadyCallback on_done) override; Stats::Scope& getRootScope() override; diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 42af9809767e..664de75f4439 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -1,7 +1,5 @@ #include "common/secret/sds_api.h" -#include - #include "envoy/api/v2/auth/cert.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" diff --git a/source/common/secret/secret_manager_impl.h b/source/common/secret/secret_manager_impl.h index d7be0e8b6b54..799c7415d7ce 100644 --- a/source/common/secret/secret_manager_impl.h +++ b/source/common/secret/secret_manager_impl.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/secret/secret_manager.h" @@ -13,6 +11,8 @@ #include "common/common/logger.h" #include "common/secret/sds_api.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Secret { @@ -115,22 +115,22 @@ class SecretManagerImpl : public SecretManager { ASSERT(num_deleted == 1, ""); } - std::unordered_map> dynamic_secret_providers_; + absl::node_hash_map> dynamic_secret_providers_; }; // Manages pairs of secret name and TlsCertificateConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_tls_certificate_providers_; // Manages pairs of secret name and CertificateValidationContextConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_certificate_validation_context_providers_; - std::unordered_map + absl::node_hash_map static_session_ticket_keys_providers_; // Manages pairs of secret name and GenericSecretConfigProviderSharedPtr. - std::unordered_map + absl::node_hash_map static_generic_secret_providers_; // map hash code of SDS config source and SdsApi object. diff --git a/source/common/singleton/manager_impl.h b/source/common/singleton/manager_impl.h index 6f55ad3fadb2..e6eb8cb9af97 100644 --- a/source/common/singleton/manager_impl.h +++ b/source/common/singleton/manager_impl.h @@ -1,12 +1,12 @@ #pragma once -#include - #include "envoy/singleton/manager.h" #include "envoy/thread/thread.h" #include "common/common/non_copyable.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Singleton { @@ -24,7 +24,7 @@ class ManagerImpl : public Manager, NonCopyable { InstanceSharedPtr get(const std::string& name, SingletonFactoryCb cb) override; private: - std::unordered_map> singletons_; + absl::node_hash_map> singletons_; Thread::ThreadFactory& thread_factory_; const Thread::ThreadId run_tid_; }; diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index 2a671b4c07ef..bc5c41f6e9e2 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -223,6 +223,7 @@ envoy_cc_library( name = "tag_producer_lib", srcs = ["tag_producer_impl.cc"], hdrs = ["tag_producer_impl.h"], + external_deps = ["abseil_node_hash_set"], deps = [ ":tag_extractor_lib", "//include/envoy/stats:stats_interface", diff --git a/source/common/stats/fake_symbol_table_impl.h b/source/common/stats/fake_symbol_table_impl.h index 9e4c5422f7a5..19bfa00daa79 100644 --- a/source/common/stats/fake_symbol_table_impl.h +++ b/source/common/stats/fake_symbol_table_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/exception.h" diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 664c5b56dacb..09f79ac46cf5 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/exception.h" diff --git a/source/common/stats/tag_producer_impl.cc b/source/common/stats/tag_producer_impl.cc index 84b2ab158142..255dfcaeed39 100644 --- a/source/common/stats/tag_producer_impl.cc +++ b/source/common/stats/tag_producer_impl.cc @@ -14,7 +14,7 @@ namespace Stats { TagProducerImpl::TagProducerImpl(const envoy::config::metrics::v3::StatsConfig& config) { // To check name conflict. reserveResources(config); - std::unordered_set names = addDefaultExtractors(config); + absl::node_hash_set names = addDefaultExtractors(config); for (const auto& tag_specifier : config.stats_tags()) { const std::string& name = tag_specifier.tag_name(); @@ -97,9 +97,9 @@ void TagProducerImpl::reserveResources(const envoy::config::metrics::v3::StatsCo default_tags_.reserve(config.stats_tags().size()); } -std::unordered_set +absl::node_hash_set TagProducerImpl::addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config) { - std::unordered_set names; + absl::node_hash_set names; if (!config.has_use_all_default_tags() || config.use_all_default_tags().value()) { for (const auto& desc : Config::TagNames::get().descriptorVec()) { names.emplace(desc.name_); diff --git a/source/common/stats/tag_producer_impl.h b/source/common/stats/tag_producer_impl.h index e8b27307b2b8..093d4021389b 100644 --- a/source/common/stats/tag_producer_impl.h +++ b/source/common/stats/tag_producer_impl.h @@ -4,8 +4,6 @@ #include #include #include -#include -#include #include #include "envoy/config/metrics/v3/stats.pb.h" @@ -18,6 +16,7 @@ #include "common/protobuf/protobuf.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/string_view.h" namespace Envoy { @@ -71,9 +70,9 @@ class TagProducerImpl : public TagProducer { * into a string-set for dup-detection against new stat names * specified in the configuration. * @param config const envoy::config::metrics::v2::StatsConfig& the config. - * @return names std::unordered_set the set of names to populate + * @return names absl::node_hash_set the set of names to populate */ - std::unordered_set + absl::node_hash_set addDefaultExtractors(const envoy::config::metrics::v3::StatsConfig& config); /** diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index faa5727df255..871be2ad16f8 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include "envoy/access_log/access_log.h" @@ -30,6 +29,8 @@ #include "common/tcp_proxy/upstream.h" #include "common/upstream/load_balancer_impl.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace TcpProxy { @@ -421,7 +422,7 @@ class UpstreamDrainManager : public ThreadLocal::ThreadLocalObject { // This must be a map instead of set because there is no way to move elements // out of a set, and these elements get passed to deferredDelete() instead of // being deleted in-place. The key and value will always be equal. - std::unordered_map drainers_; + absl::node_hash_map drainers_; }; } // namespace TcpProxy diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 246bb0de1d27..b05ae5802cd3 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -14,6 +14,7 @@ #include "common/config/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -63,7 +64,7 @@ void CdsApiImpl::onConfigUpdate(const std::vector& a removed_resources.size()); std::vector exception_msgs; - std::unordered_set cluster_names; + absl::node_hash_set cluster_names; bool any_applied = false; for (const auto& resource : added_resources) { envoy::config::cluster::v3::Cluster cluster; diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 2c8bf10e2469..c229395c1353 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -327,7 +327,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable>; + absl::node_hash_map>; struct ClusterEntry : public ThreadLocalCluster { ClusterEntry(ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster, @@ -387,9 +387,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable host_http_conn_pool_map_; - std::unordered_map host_tcp_conn_pool_map_; - std::unordered_map host_tcp_conn_map_; + absl::node_hash_map host_http_conn_pool_map_; + absl::node_hash_map host_tcp_conn_pool_map_; + absl::node_hash_map host_tcp_conn_map_; std::list update_callbacks_; const PrioritySet* local_priority_set_{}; @@ -468,9 +468,9 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable; - using PendingUpdatesByPriorityMap = std::unordered_map; + using PendingUpdatesByPriorityMap = absl::node_hash_map; using PendingUpdatesByPriorityMapPtr = std::unique_ptr; - using ClusterUpdatesMap = std::unordered_map; + using ClusterUpdatesMap = absl::node_hash_map; void applyUpdates(const Cluster& cluster, uint32_t priority, PendingUpdates& updates); bool scheduleUpdate(const Cluster& cluster, uint32_t priority, bool mergeable, diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 3433d608898c..d8dc0d21c242 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -47,7 +47,7 @@ EdsClusterImpl::EdsClusterImpl( void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}); } void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) { - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; PriorityStateManager priority_state_manager(parent_, parent_.local_info_, &host_update_cb); for (const auto& locality_lb_endpoint : cluster_load_assignment_.endpoints()) { parent_.validateEndpointsForZoneAwareRouting(locality_lb_endpoint); @@ -234,7 +234,7 @@ bool EdsClusterImpl::updateHostsPerLocality( const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, PriorityStateManager& priority_state_manager, - std::unordered_map& updated_hosts) { + absl::node_hash_map& updated_hosts) { const auto& host_set = priority_set_.getOrCreateHostSet(priority, overprovisioning_factor); HostVectorSharedPtr current_hosts_copy(new HostVector(host_set.hosts())); diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index b1eab5a10972..4ab24c38788a 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -47,13 +47,13 @@ class EdsClusterImpl const std::string& system_version_info) override; void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, const EnvoyException* e) override; - using LocalityWeightsMap = std::unordered_map; + using LocalityWeightsMap = absl::node_hash_map; bool updateHostsPerLocality(const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, PriorityStateManager& priority_state_manager, - std::unordered_map& updated_hosts); + absl::node_hash_map& updated_hosts); bool validateUpdateSize(int num_resources); // ClusterImplBase diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index eaae6ddf93cb..ff2f62101f57 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -154,7 +154,7 @@ class HealthCheckerImplBase : public HealthChecker, const std::chrono::milliseconds unhealthy_interval_; const std::chrono::milliseconds unhealthy_edge_interval_; const std::chrono::milliseconds healthy_edge_interval_; - std::unordered_map active_sessions_; + absl::node_hash_map active_sessions_; const std::shared_ptr transport_socket_options_; const MetadataConstSharedPtr transport_socket_match_metadata_; }; diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 2ec2f605cbd7..13ab0884f285 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -228,7 +228,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { struct HostsSourceHash { size_t operator()(const HostsSource& hs) const { - // This is only used for std::unordered_map keys, so we don't need a deterministic hash. + // This is only used for absl::node_hash_map keys, so we don't need a deterministic hash. size_t hash = std::hash()(hs.priority_); hash = 37 * hash + std::hash()(static_cast(hs.source_type_)); hash = 37 * hash + std::hash()(hs.locality_index_); @@ -387,7 +387,7 @@ class EdfLoadBalancerBase : public ZoneAwareLoadBalancerBase { const HostsSource& source) PURE; // Scheduler for each valid HostsSource. - std::unordered_map scheduler_; + absl::node_hash_map scheduler_; }; /** @@ -422,7 +422,7 @@ class RoundRobinLoadBalancer : public EdfLoadBalancerBase { return hosts_to_use[rr_indexes_[source]++ % hosts_to_use.size()]; } - std::unordered_map rr_indexes_; + absl::node_hash_map rr_indexes_; }; /** diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 1b4556487d63..fa5697e86fbd 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -152,7 +152,7 @@ void LoadStatsReporter::startLoadReportPeriod() { // problems due to referencing of temporaries in the below loop with Google's // internal string type. Consider this optimization when the string types // converge. - std::unordered_map existing_clusters; + absl::node_hash_map existing_clusters; if (message_->send_all_clusters()) { for (const auto& p : cm_.clusters()) { const std::string& cluster_name = p.first; diff --git a/source/common/upstream/load_stats_reporter.h b/source/common/upstream/load_stats_reporter.h index b89f3d4f75c8..bd6ecfb39389 100644 --- a/source/common/upstream/load_stats_reporter.h +++ b/source/common/upstream/load_stats_reporter.h @@ -66,7 +66,7 @@ class LoadStatsReporter envoy::service::load_stats::v3::LoadStatsRequest request_; std::unique_ptr message_; // Map from cluster name to start of measurement interval. - std::unordered_map clusters_; + absl::node_hash_map clusters_; TimeSource& time_source_; }; diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index 52d8e56a30dc..14970a46094a 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/secret/secret_manager.h" diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index c51cb134bfbf..dcaf3c638757 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/access_log/access_log.h" @@ -22,6 +21,8 @@ #include "envoy/upstream/outlier_detection.h" #include "envoy/upstream/upstream.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Upstream { namespace Outlier { @@ -413,7 +414,7 @@ class DetectorImpl : public Detector, public std::enable_shared_from_this callbacks_; - std::unordered_map host_monitors_; + absl::node_hash_map host_monitors_; EventLoggerSharedPtr event_logger_; // EjectionPair for external and local origin events. diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index eb7477b5b5cf..279bf47bef27 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -118,7 +118,7 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { if (status == Network::DnsResolver::ResolutionStatus::Success) { parent_.info_->stats().update_success_.inc(); - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; HostVector new_hosts; std::chrono::seconds ttl_refresh_rate = std::chrono::seconds::max(); for (const auto& resp : response) { diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index cce4fe9ffff1..6fe34d4853b7 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -1,7 +1,6 @@ #include "common/upstream/subset_lb.h" #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/base.pb.h" @@ -15,6 +14,8 @@ #include "common/upstream/maglev_lb.h" #include "common/upstream/ring_hash_lb.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Upstream { @@ -366,7 +367,7 @@ void SubsetLoadBalancer::processSubsets( const HostVector& hosts_added, const HostVector& hosts_removed, std::function update_cb, std::function new_cb) { - std::unordered_set subsets_modified; + absl::node_hash_set subsets_modified; std::pair steps[] = {{hosts_added, true}, {hosts_removed, false}}; for (const auto& step : steps) { @@ -602,11 +603,15 @@ void SubsetLoadBalancer::purgeEmptySubsets(LbSubsetMap& subsets) { stats_.lb_subsets_removed_.inc(); } - it = subset_it->second.erase(it); + auto next_it = std::next(it); + subset_it->second.erase(it); + it = next_it; } if (subset_it->second.empty()) { - subset_it = subsets.erase(subset_it); + auto next_subset_it = std::next(subset_it); + subsets.erase(subset_it); + subset_it = next_subset_it; } else { subset_it++; } @@ -691,8 +696,8 @@ void SubsetLoadBalancer::HostSubsetImpl::update(const HostVector& hosts_added, // that we maintain a consistent view of the metadata and saves on computation // since metadata lookups can be expensive. // - // We use an unordered_set because this can potentially be in the tens of thousands. - std::unordered_set matching_hosts; + // We use an unordered container because this can potentially be in the tens of thousands. + absl::node_hash_set matching_hosts; auto cached_predicate = [&matching_hosts](const auto& host) { return matching_hosts.count(&host) == 1; diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index f1691768f682..c9fcb8d64eed 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -4,7 +4,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/runtime/runtime.h" @@ -16,6 +15,7 @@ #include "common/protobuf/utility.h" #include "common/upstream/upstream_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -120,8 +120,8 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable; using SubsetSelectorMapPtr = std::shared_ptr; - using ValueSubsetMap = std::unordered_map; - using LbSubsetMap = std::unordered_map; + using ValueSubsetMap = absl::node_hash_map; + using LbSubsetMap = absl::node_hash_map; using SubsetSelectorFallbackParamsRef = std::reference_wrapper; class LoadBalancerContextWrapper : public LoadBalancerContext { @@ -171,7 +171,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable subset_keys_; + absl::node_hash_map subset_keys_; SubsetSelectorFallbackParams fallback_params_; }; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 44fc4860a931..711b74076548 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/config/cluster/v3/circuit_breaker.pb.h" @@ -53,6 +52,7 @@ #include "extensions/filters/network/common/utility.h" #include "extensions/transport_sockets/well_known_names.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_cat.h" namespace Envoy { @@ -233,8 +233,8 @@ bool updateHealthFlag(const Host& updated_host, Host& existing_host, Host::Healt // Converts a set of hosts into a HostVector, excluding certain hosts. // @param hosts hosts to convert // @param excluded_hosts hosts to exclude from the resulting vector. -HostVector filterHosts(const std::unordered_set& hosts, - const std::unordered_set& excluded_hosts) { +HostVector filterHosts(const absl::node_hash_set& hosts, + const absl::node_hash_set& excluded_hosts) { HostVector net_hosts; net_hosts.reserve(hosts.size()); @@ -1360,7 +1360,7 @@ bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, // do the same thing. // Keep track of hosts we see in new_hosts that we are able to match up with an existing host. - std::unordered_set existing_hosts_for_current_priority( + absl::node_hash_set existing_hosts_for_current_priority( current_priority_hosts.size()); HostVector final_hosts; for (const HostSharedPtr& host : new_hosts) { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 3b42d06818c3..dad144554ed2 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -54,6 +54,7 @@ #include "server/transport_socket_config_impl.h" +#include "absl/container/node_hash_set.h" #include "absl/synchronization/mutex.h" namespace Envoy { @@ -498,12 +499,12 @@ class PrioritySetImpl : public PrioritySet { const HostVector& hosts_removed, absl::optional overprovisioning_factor) override; - std::unordered_set all_hosts_added_; - std::unordered_set all_hosts_removed_; + absl::node_hash_set all_hosts_added_; + absl::node_hash_set all_hosts_removed_; private: PrioritySetImpl& parent_; - std::unordered_set priorities_; + absl::node_hash_set priorities_; }; }; diff --git a/source/extensions/access_loggers/common/access_log_base.h b/source/extensions/access_loggers/common/access_log_base.h index 75f3237a434c..4fc1aae87d6c 100644 --- a/source/extensions/access_loggers/common/access_log_base.h +++ b/source/extensions/access_loggers/common/access_log_base.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/access_log/access_log.h" diff --git a/source/extensions/access_loggers/file/config.cc b/source/extensions/access_loggers/file/config.cc index 60c536131389..a3e817c71058 100644 --- a/source/extensions/access_loggers/file/config.cc +++ b/source/extensions/access_loggers/file/config.cc @@ -1,7 +1,6 @@ #include "extensions/access_loggers/file/config.h" #include -#include #include "envoy/extensions/access_loggers/file/v3/file.pb.h" #include "envoy/extensions/access_loggers/file/v3/file.pb.validate.h" diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 5cf04837c49b..2fe0d112d6f6 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include "envoy/data/accesslog/v3/accesslog.pb.h" diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index 0d6ec73ac0fd..fcae58bd5f10 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index 7a7260df7248..cf424bb92a3d 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index e4a07f005082..db5f04d91807 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -102,7 +102,7 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { } } - std::unordered_map updated_hosts; + absl::node_hash_map updated_hosts; Upstream::HostVector hosts_added; Upstream::HostVector hosts_removed; const bool host_updated = updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index a3e4574c7032..b3d842aa19de 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -247,7 +247,7 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { Event::Dispatcher& dispatcher_; std::string current_host_address_; Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{}; - std::unordered_map client_map_; + absl::node_hash_map client_map_; std::list discovery_address_list_; diff --git a/source/extensions/common/tap/admin.h b/source/extensions/common/tap/admin.h index c876e9f7fd76..bf80f6889b17 100644 --- a/source/extensions/common/tap/admin.h +++ b/source/extensions/common/tap/admin.h @@ -6,6 +6,8 @@ #include "extensions/common/tap/tap.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Extensions { namespace Common { @@ -80,7 +82,7 @@ class AdminHandler : public Singleton::Instance, Server::Admin& admin_; Event::Dispatcher& main_thread_dispatcher_; - std::unordered_map> config_id_map_; + absl::node_hash_map> config_id_map_; absl::optional attached_request_; }; diff --git a/source/extensions/common/utility.h b/source/extensions/common/utility.h index 309dca2e2107..60336fe5e444 100644 --- a/source/extensions/common/utility.h +++ b/source/extensions/common/utility.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/common/exception.h" #include "envoy/runtime/runtime.h" diff --git a/source/extensions/filters/http/common/utility.h b/source/extensions/filters/http/common/utility.h index 23915b30f4df..b119e2db12b5 100644 --- a/source/extensions/filters/http/common/utility.h +++ b/source/extensions/filters/http/common/utility.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/common/macros.h" #include "extensions/common/utility.h" diff --git a/source/extensions/filters/http/fault/fault_filter.h b/source/extensions/filters/http/fault/fault_filter.h index 2dfed7c9167d..206a8134c72c 100644 --- a/source/extensions/filters/http/fault/fault_filter.h +++ b/source/extensions/filters/http/fault/fault_filter.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include "envoy/extensions/filters/http/fault/v3/fault.pb.h" diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 222b9dbf00fb..e2998a3f5866 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -146,7 +146,11 @@ JsonTranscoderConfig::JsonTranscoderConfig( &descriptor_pool_)); PathMatcherBuilder pmb; + // clang-format off + // We cannot convert this to a absl hash set as PathMatcherUtility::RegisterByHttpRule takes a + // std::unordered_set as an argument std::unordered_set ignored_query_parameters; + // clang-format on for (const auto& query_param : proto_config.ignored_query_parameters()) { ignored_query_parameters.insert(query_param); } diff --git a/source/extensions/filters/http/grpc_web/grpc_web_filter.h b/source/extensions/filters/http/grpc_web/grpc_web_filter.h index 7dfd54d51f48..2ae3d2381fbf 100644 --- a/source/extensions/filters/http/grpc_web/grpc_web_filter.h +++ b/source/extensions/filters/http/grpc_web/grpc_web_filter.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/http/filter.h" #include "envoy/upstream/cluster_manager.h" diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 338187e6a139..b84f9fb4178f 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -9,6 +9,7 @@ #include "common/http/utility.h" #include "common/singleton/const_singleton.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" using envoy::extensions::filters::http::jwt_authn::v3::JwtProvider; @@ -35,7 +36,7 @@ using JwtConstValues = ConstSingleton; // A base JwtLocation object to store token and specified_issuers. class JwtLocationBase : public JwtLocation { public: - JwtLocationBase(const std::string& token, const std::unordered_set& issuers) + JwtLocationBase(const std::string& token, const absl::node_hash_set& issuers) : token_(token), specified_issuers_(issuers) {} // Get the token string @@ -50,13 +51,13 @@ class JwtLocationBase : public JwtLocation { // Extracted token. const std::string token_; // Stored issuers specified the location. - const std::unordered_set& specified_issuers_; + const absl::node_hash_set& specified_issuers_; }; // The JwtLocation for header extraction. class JwtHeaderLocation : public JwtLocationBase { public: - JwtHeaderLocation(const std::string& token, const std::unordered_set& issuers, + JwtHeaderLocation(const std::string& token, const absl::node_hash_set& issuers, const LowerCaseString& header) : JwtLocationBase(token, issuers), header_(header) {} @@ -70,7 +71,7 @@ class JwtHeaderLocation : public JwtLocationBase { // The JwtLocation for param extraction. class JwtParamLocation : public JwtLocationBase { public: - JwtParamLocation(const std::string& token, const std::unordered_set& issuers, + JwtParamLocation(const std::string& token, const absl::node_hash_set& issuers, const std::string&) : JwtLocationBase(token, issuers) {} @@ -118,7 +119,7 @@ class ExtractorImpl : public Logger::Loggable, public Extractor // The value prefix. e.g. for "Bearer ", the value_prefix is "Bearer ". std::string value_prefix_; // Issuers that specified this header. - std::unordered_set specified_issuers_; + absl::node_hash_set specified_issuers_; }; using HeaderLocationSpecPtr = std::unique_ptr; // The map of (header + value_prefix) to HeaderLocationSpecPtr @@ -127,7 +128,7 @@ class ExtractorImpl : public Logger::Loggable, public Extractor // ParamMap value type to store issuers that specified this header. struct ParamLocationSpec { // Issuers that specified this param. - std::unordered_set specified_issuers_; + absl::node_hash_set specified_issuers_; }; // The map of a parameter key to set of issuers specified the parameter std::map param_locations_; diff --git a/source/extensions/filters/http/jwt_authn/extractor.h b/source/extensions/filters/http/jwt_authn/extractor.h index 83255f9a2982..8be7d9b830ca 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.h +++ b/source/extensions/filters/http/jwt_authn/extractor.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" #include "envoy/http/header_map.h" diff --git a/source/extensions/filters/http/jwt_authn/jwks_cache.cc b/source/extensions/filters/http/jwt_authn/jwks_cache.cc index 9c7034c08d0b..a6020ad9c055 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_cache.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_cache.cc @@ -1,7 +1,6 @@ #include "extensions/filters/http/jwt_authn/jwks_cache.h" #include -#include #include "envoy/common/time.h" #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" @@ -10,6 +9,7 @@ #include "common/config/datasource.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_map.h" #include "jwt_verify_lib/check_audience.h" using envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication; @@ -125,9 +125,9 @@ class JwksCacheImpl : public JwksCache { private: // The Jwks data map indexed by provider. - std::unordered_map jwks_data_map_; + absl::node_hash_map jwks_data_map_; // The Jwks data pointer map indexed by issuer. - std::unordered_map issuer_ptr_map_; + absl::node_hash_map issuer_ptr_map_; }; } // namespace diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index 138a50e95a9d..e8b613911e8d 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -70,7 +70,7 @@ class ContextImpl : public Verifier::Context { Http::RequestHeaderMap& headers_; Tracing::Span& parent_span_; Verifier::Callbacks* callback_; - std::unordered_map completion_states_; + absl::node_hash_map completion_states_; std::vector auths_; ProtobufWkt::Struct payload_; }; diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h index e5d1bf793706..53422e5f48d8 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.h @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" @@ -19,6 +18,8 @@ #include "common/network/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -59,7 +60,7 @@ class AllowedPrincipals : public ThreadLocal::ThreadLocalObject { size_t size() const { return allowed_sha256_digests_.size(); } private: - std::unordered_set allowed_sha256_digests_; + absl::node_hash_set allowed_sha256_digests_; }; using AllowedPrincipalsSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/network/common/utility.h b/source/extensions/filters/network/common/utility.h index 8c499cf1eb49..54a458aa7b62 100644 --- a/source/extensions/filters/network/common/utility.h +++ b/source/extensions/filters/network/common/utility.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/common/macros.h" #include "extensions/common/utility.h" diff --git a/source/extensions/filters/network/dubbo_proxy/message.h b/source/extensions/filters/network/dubbo_proxy/message.h index 19a1f91d90d9..08a399fae8b7 100644 --- a/source/extensions/filters/network/dubbo_proxy/message.h +++ b/source/extensions/filters/network/dubbo_proxy/message.h @@ -7,6 +7,7 @@ #include "common/buffer/buffer_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/types/optional.h" namespace Envoy { @@ -88,7 +89,7 @@ enum class RpcResponseType : uint8_t { class Context { public: - using AttachmentMap = std::unordered_map; + using AttachmentMap = absl::node_hash_map; bool hasAttachments() const { return !attachments_.empty(); } const AttachmentMap& attachments() const { return attachments_; } diff --git a/source/extensions/filters/network/dubbo_proxy/metadata.h b/source/extensions/filters/network/dubbo_proxy/metadata.h index 5f0037ca6ae3..698b50283ec6 100644 --- a/source/extensions/filters/network/dubbo_proxy/metadata.h +++ b/source/extensions/filters/network/dubbo_proxy/metadata.h @@ -2,7 +2,6 @@ #include #include -#include #include "common/common/assert.h" #include "common/common/empty_string.h" diff --git a/source/extensions/filters/network/dubbo_proxy/protocol.h b/source/extensions/filters/network/dubbo_proxy/protocol.h index b496699d42c2..09f16d4420da 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/buffer/buffer.h" #include "envoy/config/typed_config.h" diff --git a/source/extensions/filters/network/dubbo_proxy/protocol_constants.h b/source/extensions/filters/network/dubbo_proxy/protocol_constants.h index 138905d22c1e..e7b787831e37 100644 --- a/source/extensions/filters/network/dubbo_proxy/protocol_constants.h +++ b/source/extensions/filters/network/dubbo_proxy/protocol_constants.h @@ -1,13 +1,13 @@ #pragma once -#include - #include "common/common/assert.h" #include "common/common/fmt.h" #include "common/singleton/const_singleton.h" #include "extensions/filters/network/dubbo_proxy/message.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -22,7 +22,7 @@ class ProtocolNameValues { template std::size_t operator()(T t) const { return static_cast(t); } }; - using ProtocolTypeNameMap = std::unordered_map; + using ProtocolTypeNameMap = absl::node_hash_map; const ProtocolTypeNameMap protocolTypeNameMap = { {ProtocolType::Dubbo, "dubbo"}, @@ -47,7 +47,7 @@ class SerializerNameValues { }; using SerializerTypeNameMap = - std::unordered_map; + absl::node_hash_map; const SerializerTypeNameMap serializerTypeNameMap = { {SerializationType::Hessian2, "hessian2"}, @@ -77,7 +77,7 @@ class ProtocolSerializerNameValues { #define GENERATE_PAIR(X, Y) generateKey(X, Y), generateValue(X, Y) - using ProtocolSerializerTypeNameMap = std::unordered_map; + using ProtocolSerializerTypeNameMap = absl::node_hash_map; const ProtocolSerializerTypeNameMap protocolSerializerTypeNameMap = { {GENERATE_PAIR(ProtocolType::Dubbo, SerializationType::Hessian2)}, diff --git a/source/extensions/filters/network/dubbo_proxy/router/route.h b/source/extensions/filters/network/dubbo_proxy/router/route.h index c9814aa18f2a..247cdf480f16 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route.h +++ b/source/extensions/filters/network/dubbo_proxy/router/route.h @@ -37,7 +37,7 @@ class RouteMatcherNameValues { }; using RouteMatcherNameMap = - std::unordered_map; + absl::node_hash_map; const RouteMatcherNameMap routeMatcherNameMap = { {RouteMatcherType::Default, "default"}, diff --git a/source/extensions/filters/network/dubbo_proxy/serializer.h b/source/extensions/filters/network/dubbo_proxy/serializer.h index 2d3c1125cb7f..8b12ccd43dc4 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/buffer/buffer.h" #include "envoy/config/typed_config.h" diff --git a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h index cec6ac1a0252..1c9bcd7ccf0b 100644 --- a/source/extensions/filters/network/dubbo_proxy/serializer_impl.h +++ b/source/extensions/filters/network/dubbo_proxy/serializer_impl.h @@ -11,7 +11,7 @@ namespace DubboProxy { class RpcInvocationImpl : public RpcInvocationBase { public: // TODO(gengleilei) Add parameter data types and implement Dubbo data type mapping. - using ParameterValueMap = std::unordered_map; + using ParameterValueMap = absl::node_hash_map; using ParameterValueMapPtr = std::unique_ptr; RpcInvocationImpl() = default; diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 813597dc0d2a..630bbcb71503 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/stats/scope.h" diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index aaa25c238510..2a6c643cfeed 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h" @@ -30,6 +29,8 @@ #include "extensions/filters/network/common/redis/utility.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -154,9 +155,9 @@ class InstanceImpl : public Instance, public std::enable_shared_from_this client_map_; + absl::node_hash_map client_map_; Envoy::Common::CallbackHandle* host_set_member_update_cb_handle_{}; - std::unordered_map host_address_map_; + absl::node_hash_map host_address_map_; std::string auth_username_; std::string auth_password_; std::list created_via_redirect_hosts_; diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.cc b/source/extensions/filters/network/rocketmq_proxy/active_message.cc index c9e3bd14c2c3..3f38565da684 100644 --- a/source/extensions/filters/network/rocketmq_proxy/active_message.cc +++ b/source/extensions/filters/network/rocketmq_proxy/active_message.cc @@ -134,7 +134,7 @@ void ActiveMessage::fillBrokerData(std::vector& list, const std::str } if (!found) { - std::unordered_map addresses; + absl::node_hash_map addresses; addresses.emplace(broker_id, address); list.emplace_back(BrokerData(cluster, broker_name, std::move(addresses))); diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.h b/source/extensions/filters/network/rocketmq_proxy/topic_route.h index 2b9afdb1d526..f6c1bc9eba19 100644 --- a/source/extensions/filters/network/rocketmq_proxy/topic_route.h +++ b/source/extensions/filters/network/rocketmq_proxy/topic_route.h @@ -1,11 +1,12 @@ #pragma once #include -#include #include #include "common/protobuf/utility.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -37,7 +38,7 @@ class QueueData { class BrokerData { public: BrokerData(const std::string& cluster, const std::string& broker_name, - std::unordered_map&& broker_addrs) + absl::node_hash_map&& broker_addrs) : cluster_(cluster), broker_name_(broker_name), broker_addrs_(broker_addrs) {} void encode(ProtobufWkt::Struct& data_struct); @@ -46,12 +47,12 @@ class BrokerData { const std::string& brokerName() const { return broker_name_; } - std::unordered_map& brokerAddresses() { return broker_addrs_; } + absl::node_hash_map& brokerAddresses() { return broker_addrs_; } private: std::string cluster_; std::string broker_name_; - std::unordered_map broker_addrs_; + absl::node_hash_map broker_addrs_; }; class TopicRouteData { diff --git a/source/extensions/filters/network/thrift_proxy/decoder.cc b/source/extensions/filters/network/thrift_proxy/decoder.cc index c02a4b1dc062..73a12ff23377 100644 --- a/source/extensions/filters/network/thrift_proxy/decoder.cc +++ b/source/extensions/filters/network/thrift_proxy/decoder.cc @@ -1,7 +1,5 @@ #include "extensions/filters/network/thrift_proxy/decoder.h" -#include - #include "envoy/common/exception.h" #include "common/common/assert.h" diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.h b/source/extensions/filters/network/zookeeper_proxy/decoder.h index 6492f2179f5c..85b99fdffbf7 100644 --- a/source/extensions/filters/network/zookeeper_proxy/decoder.h +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.h @@ -10,6 +10,8 @@ #include "extensions/filters/network/zookeeper_proxy/utils.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -169,7 +171,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { const uint32_t max_packet_bytes_; BufferHelper helper_; TimeSource& time_source_; - std::unordered_map requests_by_xid_; + absl::node_hash_map requests_by_xid_; }; } // namespace ZooKeeperProxy diff --git a/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h b/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h index 508efe2ee01f..f3e4130b01ff 100644 --- a/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quiche_unordered_containers_impl.h @@ -14,7 +14,7 @@ namespace quiche { // The default hasher used by hash tables. template using QuicheDefaultHasherImpl = absl::Hash; -// Similar to std::unordered_map, but with better performance and memory usage. +// Similar to absl::node_hash_map, but with better performance and memory usage. template using QuicheUnorderedMapImpl = absl::node_hash_map; diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index 82eb9906612b..a35f67a8d3f7 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -342,7 +342,7 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { Upstream::ClusterManager::ClusterInfoMap clusters = server_.clusterManager().clusters(); // Save a map of the relevant histograms per cluster in a convenient format. - std::unordered_map time_histograms; + absl::node_hash_map time_histograms; for (const auto& histogram : snapshot.histograms()) { if (histogram.get().tagExtractedStatName() == cluster_upstream_rq_time_) { absl::optional value = @@ -410,7 +410,9 @@ void HystrixSink::flush(Stats::MetricSnapshot& snapshot) { if (clusters.size() < cluster_stats_cache_map_.size()) { for (auto it = cluster_stats_cache_map_.begin(); it != cluster_stats_cache_map_.end();) { if (clusters.find(it->first) == clusters.end()) { - it = cluster_stats_cache_map_.erase(it); + auto next_it = std::next(it); + cluster_stats_cache_map_.erase(it); + it = next_it; } else { ++it; } diff --git a/source/extensions/stat_sinks/hystrix/hystrix.h b/source/extensions/stat_sinks/hystrix/hystrix.h index 70185e5730bd..796e72d1f97a 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.h +++ b/source/extensions/stat_sinks/hystrix/hystrix.h @@ -19,7 +19,7 @@ namespace Hystrix { using RollingWindow = std::vector; using RollingStatsMap = std::map; -using QuantileLatencyMap = std::unordered_map; +using QuantileLatencyMap = absl::node_hash_map; static const std::vector hystrix_quantiles = {0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.995, 1}; @@ -155,7 +155,7 @@ class HystrixSink : public Stats::Sink, public Logger::Loggable cluster_stats_cache_map_; + absl::node_hash_map cluster_stats_cache_map_; // Saved StatNames for fast comparisons in loop. // TODO(mattklein123): Many/all of these stats should just be pulled directly from the cluster diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index 5145a1abdb1d..a667fac37e14 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -34,6 +34,9 @@ envoy_cc_extension( hdrs = [ "config.h", ], + external_deps = [ + "abseil_node_hash_set", + ], security_posture = "robust_to_untrusted_downstream_and_upstream", deps = [ ":tsi_handshaker", diff --git a/source/extensions/transport_sockets/alts/config.cc b/source/extensions/transport_sockets/alts/config.cc index c45e7f0a9ee1..1d8b60eab386 100644 --- a/source/extensions/transport_sockets/alts/config.cc +++ b/source/extensions/transport_sockets/alts/config.cc @@ -13,6 +13,7 @@ #include "extensions/transport_sockets/alts/grpc_tsi.h" #include "extensions/transport_sockets/alts/tsi_socket.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -37,7 +38,7 @@ void grpcAltsSetRpcProtocolVersions(grpc_gcp_rpc_protocol_versions* rpc_versions // Returns true if the peer's service account is found in peers, otherwise // returns false and fills out err with an error message. -bool doValidate(const tsi_peer& peer, const std::unordered_set& peers, +bool doValidate(const tsi_peer& peer, const absl::node_hash_set& peers, std::string& err) { for (size_t i = 0; i < peer.property_count; ++i) { const std::string name = std::string(peer.properties[i].name); @@ -57,8 +58,8 @@ bool doValidate(const tsi_peer& peer, const std::unordered_set& pee HandshakeValidator createHandshakeValidator(const envoy::extensions::transport_sockets::alts::v3::Alts& config) { const auto& peer_service_accounts = config.peer_service_accounts(); - const std::unordered_set peers(peer_service_accounts.cbegin(), - peer_service_accounts.cend()); + const absl::node_hash_set peers(peer_service_accounts.cbegin(), + peer_service_accounts.cend()); HandshakeValidator validator; // Skip validation if peers is empty. if (!peers.empty()) { diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index b26ce0cc4d14..6b14b5b0a870 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -91,6 +91,7 @@ envoy_cc_library( "context_manager_impl.h", ], external_deps = [ + "abseil_node_hash_set", "abseil_synchronization", "ssl", ], diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index f42f9077fc42..369bdd460f98 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -24,6 +24,7 @@ #include "extensions/transport_sockets/tls/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "openssl/evp.h" @@ -268,7 +269,7 @@ ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& c } } - std::unordered_set cert_pkey_ids; + absl::node_hash_set cert_pkey_ids; for (uint32_t i = 0; i < tls_certificates.size(); ++i) { auto& ctx = tls_contexts_[i]; // Load certificate chain. diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 227f0a92277e..7745860a1c9d 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -4,7 +4,6 @@ #include #include #include -#include #include #include diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 4cf81cba4f9e..093ed76e4156 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -4,7 +4,6 @@ #include #include #include -#include #include #include diff --git a/source/server/admin/runtime_handler.cc b/source/server/admin/runtime_handler.cc index 869427f694b8..5719f4ac730e 100644 --- a/source/server/admin/runtime_handler.cc +++ b/source/server/admin/runtime_handler.cc @@ -1,7 +1,6 @@ #include "server/admin/runtime_handler.h" #include -#include #include #include "common/common/empty_string.h" @@ -10,6 +9,8 @@ #include "server/admin/utils.h" +#include "absl/container/node_hash_map.h" + namespace Envoy { namespace Server { @@ -96,7 +97,7 @@ Http::Code RuntimeHandler::handlerRuntimeModify(absl::string_view url, Http::Res return Http::Code::BadRequest; } } - std::unordered_map overrides; + absl::node_hash_map overrides; overrides.insert(params.begin(), params.end()); try { server_.runtime().mergeValues(overrides); diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index becb15a39745..d1c88000c1d1 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 3d2a38928dad..5aaf9b7708c0 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -453,7 +453,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::deferredRemoveFilterChains( // Since is_deleting_ is on, we need to manually remove the map value and drive the iterator. // Defer delete connection container to avoid race condition in destroying connection. parent_.dispatcher_.deferredDelete(std::move(iter->second)); - iter = connections_by_context_.erase(iter); + connections_by_context_.erase(iter); } } is_deleting_ = was_deleting; diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index df6fa758bd5b..4fe28847be48 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -181,7 +181,7 @@ class ConnectionHandlerImpl : public Network::ConnectionHandler, const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; std::list sockets_; - std::unordered_map connections_by_context_; + absl::node_hash_map connections_by_context_; // The number of connections currently active on this listener. This is typically used for // connection balancing across per-handler listeners. diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 00f71743fa5b..3e1169c17531 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -11,6 +11,7 @@ #include "server/configuration_impl.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" @@ -149,7 +150,7 @@ void FilterChainManagerImpl::addFilterChain( FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { Cleanup cleanup([this]() { origin_ = absl::nullopt; }); - std::unordered_set + absl::node_hash_set filter_chains; uint32_t new_filter_chain_size = 0; for (const auto& filter_chain : filter_chain_span) { diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index fc6ced5853ee..3165a1525ce6 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -1,7 +1,5 @@ #include "server/lds_api.h" -#include - #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/api/v2/listener.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -15,6 +13,7 @@ #include "common/config/utility.h" #include "common/protobuf/utility.h" +#include "absl/container/node_hash_set.h" #include "absl/strings/str_join.h" namespace Envoy { @@ -57,7 +56,7 @@ void LdsApiImpl::onConfigUpdate(const std::vector& a } ListenerManager::FailureStates failure_state; - std::unordered_set listener_names; + absl::node_hash_set listener_names; std::string message; for (const auto& resource : added_resources) { envoy::config::listener::v3::Listener listener; @@ -97,7 +96,7 @@ void LdsApiImpl::onConfigUpdate(const std::vector& r const std::string& version_info) { // We need to keep track of which listeners need to remove. // Specifically, it's [listeners we currently have] - [listeners found in the response]. - std::unordered_set listeners_to_remove; + absl::node_hash_set listeners_to_remove; for (const auto& listener : listener_manager_.listeners()) { listeners_to_remove.insert(listener.get().name()); } diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index 40156ed9b179..1e4e085bb890 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -10,6 +10,7 @@ #include "server/resource_monitor_config_impl.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" namespace Envoy { @@ -51,7 +52,7 @@ class ThreadLocalOverloadStateImpl : public ThreadLocalOverloadState { void setState(const std::string& action, OverloadActionState state) { actions_[action] = state; } private: - std::unordered_map actions_; + absl::node_hash_map actions_; }; Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { @@ -84,7 +85,7 @@ OverloadAction::OverloadAction(const envoy::config::overload::v3::OverloadAction NOT_REACHED_GCOVR_EXCL_LINE; } - if (!triggers_.insert(std::make_pair(trigger_config.name(), std::move(trigger))).second) { + if (!triggers_.try_emplace(trigger_config.name(), std::move(trigger)).second) { throw EnvoyException( absl::StrCat("Duplicate trigger resource for overload action ", config.name())); } @@ -132,9 +133,7 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S auto config = Config::Utility::translateToFactoryConfig(resource, validation_visitor, factory); auto monitor = factory.createResourceMonitor(*config, context); - auto result = - resources_.emplace(std::piecewise_construct, std::forward_as_tuple(name), - std::forward_as_tuple(name, std::move(monitor), *this, stats_scope)); + auto result = resources_.try_emplace(name, name, std::move(monitor), *this, stats_scope); if (!result.second) { throw EnvoyException(absl::StrCat("Duplicate resource monitor ", name)); } @@ -143,8 +142,12 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S for (const auto& action : config.actions()) { const auto& name = action.name(); ENVOY_LOG(debug, "Adding overload action {}", name); - auto result = actions_.emplace(std::piecewise_construct, std::forward_as_tuple(name), - std::forward_as_tuple(action, stats_scope)); + // TODO: use in place construction once https://github.com/abseil/abseil-cpp/issues/388 is + // addressed + // We cannot currently use in place construction as the OverloadAction constructor may throw, + // causing an inconsistent internal state of the actions_ map, which on destruction results in + // an invalid free. + auto result = actions_.try_emplace(name, OverloadAction(action, stats_scope)); if (!result.second) { throw EnvoyException(absl::StrCat("Duplicate overload action ", name)); } diff --git a/source/server/overload_manager_impl.h b/source/server/overload_manager_impl.h index d76eedc3659f..4405bfeaf3aa 100644 --- a/source/server/overload_manager_impl.h +++ b/source/server/overload_manager_impl.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include #include #include "envoy/api/api.h" @@ -17,6 +15,9 @@ #include "common/common/logger.h" +#include "absl/container/node_hash_map.h" +#include "absl/container/node_hash_set.h" + namespace Envoy { namespace Server { @@ -45,8 +46,8 @@ class OverloadAction { using TriggerPtr = std::unique_ptr; private: - std::unordered_map triggers_; - std::unordered_set fired_triggers_; + absl::node_hash_map triggers_; + absl::node_hash_set fired_triggers_; Stats::Gauge& active_gauge_; }; @@ -104,8 +105,8 @@ class OverloadManagerImpl : Logger::Loggable, public OverloadM ThreadLocal::SlotPtr tls_; const std::chrono::milliseconds refresh_interval_; Event::TimerPtr timer_; - std::unordered_map resources_; - std::unordered_map actions_; + absl::node_hash_map resources_; + absl::node_hash_map actions_; using ResourceToActionMap = std::unordered_multimap; ResourceToActionMap resource_to_actions_; diff --git a/source/server/server.cc b/source/server/server.cc index f192817978e0..912665143c98 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -5,7 +5,6 @@ #include #include #include -#include #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/common/exception.h" diff --git a/test/common/filesystem/directory_test.cc b/test/common/filesystem/directory_test.cc index b9bfa86a6a47..82f44f977238 100644 --- a/test/common/filesystem/directory_test.cc +++ b/test/common/filesystem/directory_test.cc @@ -1,13 +1,13 @@ #include #include #include -#include #include "common/filesystem/directory.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_set.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -66,7 +66,7 @@ struct EntryHash { } }; -using EntrySet = std::unordered_set; +using EntrySet = absl::node_hash_set; EntrySet getDirectoryContents(const std::string& dir_path, bool recursive) { Directory directory(dir_path); diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index 882b9910fa20..66ed458a2344 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -1494,7 +1494,7 @@ TEST(SubstitutionFormatterTest, GrpcStatusFormatterTest) { } void verifyJsonOutput(std::string json_string, - std::unordered_map expected_map) { + absl::node_hash_map expected_map) { const auto parsed = Json::Factory::loadFromString(json_string); // Every json log line should have only one newline character, and it should be the last character @@ -1520,7 +1520,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterPlainStringTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"plain_string", "plain_string_value"}}; absl::flat_hash_map key_mapping = { @@ -1544,7 +1544,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterSingleOperatorTest) { absl::optional protocol = Http::Protocol::Http11; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(Return(protocol)); - std::unordered_map expected_json_map = {{"protocol", "HTTP/1.1"}}; + absl::node_hash_map expected_json_map = {{"protocol", "HTTP/1.1"}}; absl::flat_hash_map key_mapping = {{"protocol", "%PROTOCOL%"}}; JsonFormatterImpl formatter(key_mapping, false); @@ -1561,7 +1561,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterNonExistentHeaderTest) { Http::TestResponseTrailerMapImpl response_trailer; std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"protocol", "HTTP/1.1"}, {"some_request_header", "SOME_REQUEST_HEADER"}, {"nonexistent_response_header", "-"}, @@ -1591,7 +1591,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterAlternateHeaderTest) { Http::TestResponseTrailerMapImpl response_trailer; std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"request_present_header_or_request_absent_header", "REQUEST_PRESENT_HEADER"}, {"request_absent_header_or_request_present_header", "REQUEST_PRESENT_HEADER"}, {"response_absent_header_or_response_absent_header", "RESPONSE_PRESENT_HEADER"}, @@ -1628,7 +1628,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterDynamicMetadataTest) { EXPECT_CALL(stream_info, dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}, {"test_obj.inner_key", "\"inner_value\""}}; @@ -1690,7 +1690,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterFilterStateTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key", "\"test_value\""}, {"test_obj", "{\"inner_key\":\"inner_value\"}"}}; absl::flat_hash_map key_mapping = { @@ -1746,7 +1746,7 @@ TEST(SubstitutionFormatterTest, FilterStateSpeciferTest) { StreamInfo::FilterState::StateType::ReadOnly); EXPECT_CALL(Const(stream_info), filterState()).Times(testing::AtLeast(1)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"test_key_plain", "test_value By PLAIN"}, {"test_key_typed", "\"test_value By TYPED\""}, }; @@ -1823,7 +1823,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterStartTimeTest) { SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(stream_info, startTime()).WillRepeatedly(Return(time)); - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"simple_date", "2018/03/28"}, {"test_time", fmt::format("{}", expected_time_in_epoch)}, {"bad_format", "bad_format"}, @@ -1852,7 +1852,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterMultiTokenTest) { Http::TestResponseTrailerMapImpl response_trailer; std::string body; - std::unordered_map expected_json_map = { + absl::node_hash_map expected_json_map = { {"multi_token_field", "HTTP/1.1 plainstring SOME_REQUEST_HEADER SOME_RESPONSE_HEADER"}}; absl::flat_hash_map key_mapping = { diff --git a/test/common/grpc/google_grpc_utils_test.cc b/test/common/grpc/google_grpc_utils_test.cc index 2b422af3f4eb..82fa62f6a55c 100644 --- a/test/common/grpc/google_grpc_utils_test.cc +++ b/test/common/grpc/google_grpc_utils_test.cc @@ -99,8 +99,8 @@ TEST(GoogleGrpcUtilsTest, ChannelArgsFromConfig) { )EOF"); const grpc::ChannelArguments channel_args = GoogleGrpcUtils::channelArgsFromConfig(config); grpc_channel_args effective_args = channel_args.c_channel_args(); - std::unordered_map string_args; - std::unordered_map int_args; + absl::node_hash_map string_args; + absl::node_hash_map int_args; for (uint32_t n = 0; n < effective_args.num_args; ++n) { const grpc_arg arg = effective_args.args[n]; ASSERT_TRUE(arg.type == GRPC_ARG_STRING || arg.type == GRPC_ARG_INTEGER); diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 2ba9f545a20c..339481d6d408 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -55,7 +55,7 @@ class TestCodecSettingsProvider { } private: - std::unordered_map settings_; + absl::node_hash_map settings_; }; struct ServerCodecFacade : public virtual Connection { diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 3b015ca2a94d..54cf4fbdb8a1 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -1,7 +1,6 @@ #include #include #include -#include #include #include "envoy/common/platform.h" @@ -27,6 +26,7 @@ #include "test/test_common/utility.h" #include "absl/container/fixed_array.h" +#include "absl/container/node_hash_map.h" #include "ares.h" #include "ares_dns.h" #include "gtest/gtest.h" @@ -53,9 +53,9 @@ namespace { // List of IP address (in human readable format). using IpList = std::list; // Map from hostname to IpList. -using HostMap = std::unordered_map; +using HostMap = absl::node_hash_map; // Map from hostname to CNAME -using CNameMap = std::unordered_map; +using CNameMap = absl::node_hash_map; // Represents a single TestDnsServer query state and lifecycle. This implements // just enough of RFC 1035 to handle queries we generate in the tests below. enum class RecordType { A, AAAA }; @@ -320,7 +320,7 @@ class DnsResolverImplPeer { ares_channel channel() const { return resolver_->channel_; } bool isChannelDirty() const { return resolver_->dirty_channel_; } - const std::unordered_map& events() { return resolver_->events_; } + const absl::node_hash_map& events() { return resolver_->events_; } // Reset the channel state for a DnsResolverImpl such that it will only use // TCP and optionally has a zero timeout (for validating timeout behavior). void resetChannelTcpOnly(bool zero_timeout) { diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 3ae2fb03bcb6..2132fd25e2d2 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -1,5 +1,3 @@ -#include - #include "envoy/api/v2/cluster.pb.h" #include "envoy/api/v2/core/base.pb.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.h" @@ -30,6 +28,7 @@ #include "test/test_common/logging.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_set.h" #include "gtest/gtest.h" #include "udpa/type/v1/typed_struct.pb.h" @@ -1142,7 +1141,7 @@ TEST_F(ProtobufUtilityTest, HashedValueStdHash) { HashedValue hv1(v1), hv2(v2), hv3(v3); - std::unordered_set set; + absl::node_hash_set set; set.emplace(hv1); set.emplace(hv2); set.emplace(hv3); diff --git a/test/common/router/vhds_test.cc b/test/common/router/vhds_test.cc index 88bd464d7bbb..f1abea4ac04f 100644 --- a/test/common/router/vhds_test.cc +++ b/test/common/router/vhds_test.cc @@ -86,7 +86,7 @@ name: my_route Init::ExpectableWatcherImpl init_watcher_; Init::TargetHandlePtr init_target_handle_; const std::string context_ = "vhds_test"; - std::unordered_set providers_; + absl::node_hash_set providers_; Protobuf::util::MessageDifferencer messageDifferencer_; std::string default_vhds_config_; NiceMock subscription_factory_; diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index b5b05456c3fd..48572641a39b 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -14,7 +14,6 @@ envoy_cc_test( data = [ "//test/extensions/transport_sockets/tls/test_data:certs", ], - tags = ["fails_on_windows"], deps = [ "//source/common/secret:sds_api_lib", "//source/common/secret:secret_manager_impl_lib", diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index a947df8417d0..58304e1a1106 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -43,7 +43,8 @@ class SecretManagerImplTest : public testing::Test, public Logger::Loggable(*message_ptr); envoy::admin::v3::SecretsConfigDump expected_secrets_config_dump; TestUtility::loadFromYaml(expected_dump_yaml, expected_secrets_config_dump); - EXPECT_EQ(expected_secrets_config_dump.DebugString(), secrets_config_dump.DebugString()); + EXPECT_THAT(secrets_config_dump, + ProtoEqIgnoreRepeatedFieldOrdering(expected_secrets_config_dump)); } void setupSecretProviderContext() {} diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index daf20f6f2349..726f32174ae8 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -1,7 +1,6 @@ #include #include #include -#include #include "envoy/config/metrics/v3/stats.pb.h" #include "envoy/stats/histogram.h" diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index cfbabbb44f43..9e76ee81c5df 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -221,7 +221,6 @@ envoy_cc_test( envoy_cc_test( name = "load_stats_reporter_test", srcs = ["load_stats_reporter_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/stats:stats_lib", "//source/common/upstream:load_stats_reporter_lib", diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 46ea30ccd6d5..b20b8b56be23 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -136,8 +136,8 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest using TestSessionPtr = std::unique_ptr; using HostWithHealthCheckMap = - std::unordered_map; + absl::node_hash_map; void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { health_checker_ = std::make_shared( diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index a70f4d920a9a..bb491a788a16 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -214,7 +214,7 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { }; void computeHitStats(benchmark::State& state, - const std::unordered_map& hit_counter) { + const absl::node_hash_map& hit_counter) { double mean = 0; for (const auto& pair : hit_counter) { mean += pair.second; @@ -240,7 +240,7 @@ void BM_LeastRequestLoadBalancerChooseHost(benchmark::State& state) { const uint64_t choice_count = state.range(1); const uint64_t keys_to_simulate = state.range(2); LeastRequestTester tester(num_hosts, choice_count); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); @@ -273,12 +273,12 @@ void BM_RingHashLoadBalancerChooseHost(benchmark::State& state) { RingHashTester tester(num_hosts, min_ring_size); tester.ring_hash_lb_->initialize(); LoadBalancerPtr lb = tester.ring_hash_lb_->factory()->create(); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); // Note: To a certain extent this is benchmarking the performance of xxhash as well as - // std::unordered_map. However, it should be roughly equivalent to the work done when + // absl::node_hash_map. However, it should be roughly equivalent to the work done when // comparing different hashing algorithms. // TODO(mattklein123): When Maglev is a real load balancer, further share code with the // other test. @@ -311,12 +311,12 @@ void BM_MaglevLoadBalancerChooseHost(benchmark::State& state) { MaglevTester tester(num_hosts); tester.maglev_lb_->initialize(); LoadBalancerPtr lb = tester.maglev_lb_->factory()->create(); - std::unordered_map hit_counter; + absl::node_hash_map hit_counter; TestLoadBalancerContext context; state.ResumeTiming(); // Note: To a certain extent this is benchmarking the performance of xxhash as well as - // std::unordered_map. However, it should be roughly equivalent to the work done when + // absl::node_hash_map. However, it should be roughly equivalent to the work done when // comparing different hashing algorithms. for (uint64_t i = 0; i < keys_to_simulate; i++) { context.hash_key_ = hashInt(i); diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 3d8dd616eec2..0f86debac3b8 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -75,13 +75,13 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { LeastRequestLoadBalancer lb_{ priority_set, nullptr, stats, runtime, random, common_config, least_request_lb_config}; - std::unordered_map host_hits; + absl::node_hash_map host_hits; const uint64_t total_requests = 100; for (uint64_t i = 0; i < total_requests; i++) { host_hits[lb_.chooseHost(nullptr)]++; } - std::unordered_map weight_to_percent; + absl::node_hash_map weight_to_percent; for (const auto& host : host_hits) { std::cout << fmt::format("url:{}, weight:{}, hits:{}, percent_of_total:{}\n", host.first->address()->asString(), host.first->weight(), host.second, diff --git a/test/common/upstream/load_stats_reporter_test.cc b/test/common/upstream/load_stats_reporter_test.cc index c22593a84f5c..2fd28c380661 100644 --- a/test/common/upstream/load_stats_reporter_test.cc +++ b/test/common/upstream/load_stats_reporter_test.cc @@ -55,7 +55,9 @@ class LoadStatsReporterTest : public testing::Test { expected_request.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); std::copy(expected_cluster_stats.begin(), expected_cluster_stats.end(), Protobuf::RepeatedPtrFieldBackInserter(expected_request.mutable_cluster_stats())); - EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false)); + EXPECT_CALL( + async_stream_, + sendMessageRaw_(Grpc::ProtoBufferEqIgnoreRepeatedFieldOrdering(expected_request), false)); } void deliverLoadStatsResponse(const std::vector& cluster_names) { diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index 9c8a0c7b2652..9c9413233e02 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/router/router.h" @@ -15,6 +14,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -468,7 +468,7 @@ TEST_P(RingHashLoadBalancerTest, HostWeightedTinyRing) { LoadBalancerPtr lb = lb_->factory()->create(); // :90 should appear once, :91 should appear twice and :92 should appear three times. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {4443673547860492590UL, 2}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}}; for (const auto& entry : expected) { @@ -547,7 +547,7 @@ TEST_P(RingHashLoadBalancerTest, LocalityWeightedTinyRing) { // :90 should appear once, :91 should appear twice, :92 should appear three times, // and :93 shouldn't appear at all. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {4443673547860492590UL, 2}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {13444792449719432967UL, 2}, {16117243373044804889UL, 0}}; for (const auto& entry : expected) { @@ -617,7 +617,7 @@ TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedTinyRing) { // :90 should appear once, :91 and :92 should each appear two times, and :93 should appear four // times, to get the correct overall proportions. - std::unordered_map expected{ + absl::node_hash_map expected{ {928266305478181108UL, 2}, {3851675632748031481UL, 3}, {5583722120771150861UL, 1}, {6311230543546372928UL, 1}, {7700377290971790572UL, 3}, {12559126875973811811UL, 3}, {13444792449719432967UL, 2}, {13784988426630141778UL, 3}, {16117243373044804889UL, 0}}; @@ -763,7 +763,7 @@ TEST_P(RingHashLoadBalancerTest, LopsidedWeightSmallScale) { // Every 128th host in the light-but-dense locality should have an entry on the ring, for a total // of 8 entries. This gives us the right ratio of 1/128. - std::unordered_map expected{ + absl::node_hash_map expected{ {11664790346325243808UL, 1}, {15894554872961148518UL, 128}, {13958138884277627155UL, 256}, {15803774069438192949UL, 384}, {3829253010855396576UL, 512}, {17918147347826565154UL, 640}, {6442769608292299103UL, 768}, {5881074926069334434UL, 896}}; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 2adbf136be49..2fd3def01f71 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -814,7 +814,7 @@ TEST_F(StrictDnsClusterImplTest, LoadAssignmentBasic) { // Remove the duplicated hosts from both resolve targets and ensure that we don't see the same // host multiple times. - std::unordered_set removed_hosts; + absl::node_hash_set removed_hosts; cluster.prioritySet().addPriorityUpdateCb( [&](uint32_t, const HostVector&, const HostVector& hosts_removed) -> void { for (const auto& host : hosts_removed) { diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 9ffaae149de2..602f5836919a 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -99,7 +99,7 @@ class ExtAuthzHttpClientTest : public testing::Test { return std::make_shared(proto_config, timeout, path_prefix); } - Http::RequestMessagePtr sendRequest(std::unordered_map&& headers) { + Http::RequestMessagePtr sendRequest(absl::node_hash_map&& headers) { envoy::service::auth::v3::CheckRequest request{}; auto mutable_headers = request.mutable_attributes()->mutable_request()->mutable_http()->mutable_headers(); diff --git a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc index 10ca0909555e..fec4c1430516 100644 --- a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc @@ -63,7 +63,7 @@ const char AnyWithAll[] = R"( - provider_name: "provider_4" )"; -using StatusMap = std::unordered_map; +using StatusMap = absl::node_hash_map; constexpr auto allowfailed = "_allow_failed_"; @@ -109,9 +109,9 @@ class GroupVerifierTest : public testing::Test { return struct_obj; } - std::unordered_map + absl::node_hash_map createAsyncMockAuthsAndVerifier(const std::vector& providers) { - std::unordered_map callbacks; + absl::node_hash_map callbacks; for (const auto& provider : providers) { auto mock_auth = std::make_unique(); EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) @@ -130,7 +130,7 @@ class GroupVerifierTest : public testing::Test { JwtAuthentication proto_config_; VerifierConstPtr verifier_; MockVerifierCallbacks mock_cb_; - std::unordered_map> mock_auths_; + absl::node_hash_map> mock_auths_; NiceMock mock_factory_; ContextSharedPtr context_; NiceMock parent_span_; diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 24d847f5306f..034d9bd11b7c 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -41,7 +41,6 @@ envoy_extension_cc_test( name = "conn_pool_impl_test", srcs = ["conn_pool_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", - tags = ["fails_on_windows"], deps = [ ":redis_mocks", "//source/common/event:dispatcher_lib", diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index 1d1694cdbfc3..8b56ba1f695f 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -150,7 +150,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client EXPECT_NE(nullptr, request); } - std::unordered_map& + absl::node_hash_map& clientMap() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().client_map_; @@ -161,7 +161,7 @@ class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client return conn_pool_impl->tls_->getTyped().client_map_[host].get(); } - std::unordered_map& hostAddressMap() { + absl::node_hash_map& hostAddressMap() { InstanceImpl* conn_pool_impl = dynamic_cast(conn_pool_.get()); return conn_pool_impl->tls_->getTyped().host_address_map_; } @@ -631,10 +631,6 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) { } TEST_F(RedisConnPoolImplTest, MakeRequestToHost) { - InSequence s; - - setup(false); - Common::Redis::RespValue value; Common::Redis::Client::MockPoolRequest active_request1; Common::Redis::Client::MockPoolRequest active_request2; @@ -645,48 +641,55 @@ TEST_F(RedisConnPoolImplTest, MakeRequestToHost) { Upstream::HostConstSharedPtr host1; Upstream::HostConstSharedPtr host2; - // There is no cluster yet, so makeRequestToHost() should fail. - EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1)); - // Add the cluster now. - update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_); - - EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1))); - EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1))) - .WillOnce(Return(&active_request1)); - Common::Redis::Client::PoolRequest* request1 = - conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1); - EXPECT_EQ(&active_request1, request1); - EXPECT_EQ(host1->address()->asString(), "10.0.0.1:3000"); - - // IPv6 address returned from Redis server will not have square brackets - // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around - // the address. - EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2))); - EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2))) - .WillOnce(Return(&active_request2)); - Common::Redis::Client::PoolRequest* request2 = - conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2); - EXPECT_EQ(&active_request2, request2); - EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); + { + InSequence s; - // Test with a badly specified host address (no colon, no address, no port). - EXPECT_EQ(conn_pool_->makeRequestToHost("bad", value, callbacks1), nullptr); - // Test with a badly specified IPv4 address. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.bad:3000", value, callbacks1), nullptr); - // Test with a badly specified TCP port. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:bad", value, callbacks1), nullptr); - // Test with a TCP port outside of the acceptable range for a 32-bit integer. - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:4294967297", value, callbacks1), - nullptr); // 2^32 + 1 - // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535). - EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:65536", value, callbacks1), nullptr); - // Test with a badly specified IPv6-like address. - EXPECT_EQ(conn_pool_->makeRequestToHost("bad:ipv6:3000", value, callbacks1), nullptr); - // Test with a valid IPv6 address and a badly specified TCP port (out of range). - EXPECT_EQ(conn_pool_->makeRequestToHost("2001:470:813b:::70000", value, callbacks1), nullptr); + setup(false); + + // There is no cluster yet, so makeRequestToHost() should fail. + EXPECT_EQ(nullptr, conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1)); + // Add the cluster now. + update_callbacks_->onClusterAddOrUpdate(cm_.thread_local_cluster_); + + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host1), Return(client1))); + EXPECT_CALL(*client1, makeRequest_(Ref(value), Ref(callbacks1))) + .WillOnce(Return(&active_request1)); + Common::Redis::Client::PoolRequest* request1 = + conn_pool_->makeRequestToHost("10.0.0.1:3000", value, callbacks1); + EXPECT_EQ(&active_request1, request1); + EXPECT_EQ(host1->address()->asString(), "10.0.0.1:3000"); + + // IPv6 address returned from Redis server will not have square brackets + // around it, while Envoy represents Address::Ipv6Instance addresses with square brackets around + // the address. + EXPECT_CALL(*this, create_(_)).WillOnce(DoAll(SaveArg<0>(&host2), Return(client2))); + EXPECT_CALL(*client2, makeRequest_(Ref(value), Ref(callbacks2))) + .WillOnce(Return(&active_request2)); + Common::Redis::Client::PoolRequest* request2 = + conn_pool_->makeRequestToHost("2001:470:813B:0:0:0:0:1:3333", value, callbacks2); + EXPECT_EQ(&active_request2, request2); + EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); + + // Test with a badly specified host address (no colon, no address, no port). + EXPECT_EQ(conn_pool_->makeRequestToHost("bad", value, callbacks1), nullptr); + // Test with a badly specified IPv4 address. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.bad:3000", value, callbacks1), nullptr); + // Test with a badly specified TCP port. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:bad", value, callbacks1), nullptr); + // Test with a TCP port outside of the acceptable range for a 32-bit integer. + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:4294967297", value, callbacks1), + nullptr); // 2^32 + 1 + // Test with a TCP port outside of the acceptable range for a TCP port (0 .. 65535). + EXPECT_EQ(conn_pool_->makeRequestToHost("10.0.0.1:65536", value, callbacks1), nullptr); + // Test with a badly specified IPv6-like address. + EXPECT_EQ(conn_pool_->makeRequestToHost("bad:ipv6:3000", value, callbacks1), nullptr); + // Test with a valid IPv6 address and a badly specified TCP port (out of range). + EXPECT_EQ(conn_pool_->makeRequestToHost("2001:470:813b:::70000", value, callbacks1), nullptr); + } - EXPECT_CALL(*client2, close()); + // We cannot guarantee which order close will be called, perform these checks unsequenced EXPECT_CALL(*client1, close()); + EXPECT_CALL(*client2, close()); tls_.shutdownThread(); } @@ -741,7 +744,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndRemovedWithDraining) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); @@ -840,7 +843,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithNoDraining) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); @@ -918,7 +921,7 @@ TEST_F(RedisConnPoolImplTest, HostsAddedAndEndWithClusterRemoval) { EXPECT_EQ(&active_request2, request2); EXPECT_EQ(host2->address()->asString(), "[2001:470:813b::1]:3333"); - std::unordered_map& host_address_map = + absl::node_hash_map& host_address_map = hostAddressMap(); EXPECT_EQ(host_address_map.size(), 2); // host1 and host2 have been created. EXPECT_EQ(host_address_map[host1->address()->asString()], host1); diff --git a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc index 4715637df2b5..54f01cfaeea0 100644 --- a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc @@ -75,7 +75,7 @@ TEST_F(ActiveMessageTest, ClusterName) { TEST_F(ActiveMessageTest, FillBrokerData) { - std::unordered_map address; + absl::node_hash_map address; address.emplace(0, "1.2.3.4:10911"); BrokerData broker_data("DefaultCluster", "broker-a", std::move(address)); diff --git a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc index a2392b0c0603..a337b89ead69 100644 --- a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc @@ -1,9 +1,8 @@ -#include - #include "common/protobuf/utility.h" #include "extensions/filters/network/rocketmq_proxy/topic_route.h" +#include "absl/container/node_hash_map.h" #include "gtest/gtest.h" namespace Envoy { @@ -26,7 +25,7 @@ TEST(TopicRouteTest, Serialization) { } TEST(BrokerDataTest, Serialization) { - std::unordered_map broker_addrs; + absl::node_hash_map broker_addrs; std::string dummy_address("127.0.0.1:10911"); for (int64_t i = 0; i < 3; i++) { broker_addrs[i] = dummy_address; @@ -56,7 +55,7 @@ TEST(TopicRouteDataTest, Serialization) { std::string dummy_address("127.0.0.1:10911"); for (int i = 0; i < 16; i++) { - std::unordered_map broker_addrs; + absl::node_hash_map broker_addrs; for (int64_t i = 0; i < 3; i++) { broker_addrs[i] = dummy_address; } diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index d8408586a96e..50e24f4e0f0d 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -7,7 +7,6 @@ #include #include -#include #include "common/memory/stats.h" #include "common/network/socket_impl.h" diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index 5c76dd9499d8..463d437c3d46 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -157,7 +157,7 @@ class HystrixSinkTest : public testing::Test { addClusterToMap(cluster2_name_, cluster2_.cluster_); } - std::unordered_map + absl::node_hash_map addSecondClusterAndSendDataHelper(Buffer::OwnedImpl& buffer, const uint64_t success_step, const uint64_t error_step, const uint64_t timeout_step, const uint64_t success_step2, const uint64_t error_step2, @@ -216,8 +216,8 @@ class HystrixSinkTest : public testing::Test { } } - std::unordered_map buildClusterMap(absl::string_view data_message) { - std::unordered_map cluster_message_map; + absl::node_hash_map buildClusterMap(absl::string_view data_message) { + absl::node_hash_map cluster_message_map; std::vector messages = absl::StrSplit(data_message, "data: ", absl::SkipWhitespace()); for (auto message : messages) { @@ -257,7 +257,7 @@ TEST_F(HystrixSinkTest, EmptyFlush) { // Register callback to sink. sink_->registerConnection(&callbacks_); sink_->flush(snapshot_); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); validateResults(cluster_message_map[cluster1_name_], 0, 0, 0, 0, 0, window_size_); } @@ -280,7 +280,7 @@ TEST_F(HystrixSinkTest, BasicFlow) { sink_->flush(snapshot_); } - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr json_buffer = @@ -352,7 +352,7 @@ TEST_F(HystrixSinkTest, Disconnect) { } EXPECT_NE(buffer.length(), 0); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr json_buffer = Json::Factory::loadFromString(cluster_message_map[cluster1_name_]); @@ -392,7 +392,7 @@ TEST_F(HystrixSinkTest, AddCluster) { Buffer::OwnedImpl buffer = createClusterAndCallbacks(); // Add cluster and "run" some traffic. - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = addSecondClusterAndSendDataHelper(buffer, success_step, error_step, timeout_step, success_step2, error_step2, timeout_step2); @@ -433,7 +433,7 @@ TEST_F(HystrixSinkTest, AddAndRemoveClusters) { removeSecondClusterHelper(buffer); // Check that removed worked. - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); ASSERT_NE(cluster_message_map.find(cluster1_name_), cluster_message_map.end()) << "cluster1_name = " << cluster1_name_; @@ -485,7 +485,7 @@ TEST_F(HystrixSinkTest, HistogramTest) { sink_->registerConnection(&callbacks_); sink_->flush(snapshot_); - std::unordered_map cluster_message_map = + absl::node_hash_map cluster_message_map = buildClusterMap(buffer.toString()); Json::ObjectSharedPtr latency = Json::Factory::loadFromString(cluster_message_map[cluster1_name_]) diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 12041fdd5860..bd51f1493e5c 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include "envoy/config/trace/v3/zipkin.pb.h" diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index 171674a7ff04..534e5f1f8850 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -83,8 +83,8 @@ replaceInvalidStringValues(const envoy::config::core::v3::Metadata& upstream_met template inline T fromHeaders( const test::fuzz::Headers& headers, - const std::unordered_set& ignore_headers = std::unordered_set(), - std::unordered_set include_headers = std::unordered_set()) { + const absl::node_hash_set& ignore_headers = absl::node_hash_set(), + absl::node_hash_set include_headers = absl::node_hash_set()) { T header_map; for (const auto& header : headers.headers()) { if (ignore_headers.find(absl::AsciiStrToLower(header.key())) == ignore_headers.end()) { diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 6afeb17b36a9..ef2a79c92cdb 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -200,8 +200,8 @@ class FakeStream : public Http::RequestDecoder, Event::TestTimeSystem& timeSystem() { return time_system_; } - Http::MetadataMap& metadata_map() { return metadata_map_; } - std::unordered_map& duplicated_metadata_key_count() { + Http::MetadataMap& metadataMap() { return metadata_map_; } + absl::node_hash_map& duplicatedMetadataKeyCount() { return duplicated_metadata_key_count_; } @@ -222,7 +222,7 @@ class FakeStream : public Http::RequestDecoder, bool add_served_by_header_{}; Event::TestTimeSystem& time_system_; Http::MetadataMap metadata_map_; - std::unordered_map duplicated_metadata_key_count_; + absl::node_hash_map duplicated_metadata_key_count_; bool received_data_{false}; }; diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index aea7937dc7db..a2a35d244103 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -152,7 +152,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the second request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -171,7 +171,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the third request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -190,7 +190,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the fourth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -210,7 +210,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the fifth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -230,7 +230,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMetadataInResponse) { // Verifies metadata is received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find(key)->second, value); + EXPECT_EQ(response->metadataMap().find(key)->second, value); // Sends the sixth request. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -283,10 +283,10 @@ TEST_P(Http2MetadataIntegrationTest, ProxyMultipleMetadata) { ASSERT_TRUE(response->complete()); for (int i = 0; i < size; i++) { for (const auto& metadata : *multiple_vecs[i][0]) { - EXPECT_EQ(response->metadata_map().find(metadata.first)->second, metadata.second); + EXPECT_EQ(response->metadataMap().find(metadata.first)->second, metadata.second); } } - EXPECT_EQ(response->metadata_map().size(), multiple_vecs.size()); + EXPECT_EQ(response->metadataMap().size(), multiple_vecs.size()); } TEST_P(Http2MetadataIntegrationTest, ProxyInvalidMetadata) { @@ -314,7 +314,7 @@ TEST_P(Http2MetadataIntegrationTest, ProxyInvalidMetadata) { // Verifies metadata is not received by the client. response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().size(), 0); + EXPECT_EQ(response->metadataMap().size(), 0); } void verifyExpectedMetadata(Http::MetadataMap metadata_map, std::set keys) { @@ -342,7 +342,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); std::set expected_metadata_keys = {"headers", "duplicate"}; - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); // Upstream responds with headers and data. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -353,7 +353,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("data"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 2); // Upstream responds with headers, data and trailers. @@ -367,7 +367,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 3); // Upstream responds with headers, 100-continue and data. @@ -389,7 +389,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { ASSERT_TRUE(response->complete()); expected_metadata_keys.erase("trailers"); expected_metadata_keys.insert("100-continue"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 4); // Upstream responds with headers and metadata that will not be consumed. @@ -408,7 +408,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { expected_metadata_keys.erase("100-continue"); expected_metadata_keys.insert("aaa"); expected_metadata_keys.insert("keep"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); // Upstream responds with headers, data and metadata that will be consumed. response = codec_client_->makeRequestWithBody(default_request_headers_, 10); @@ -426,7 +426,7 @@ TEST_P(Http2MetadataIntegrationTest, TestResponseMetadata) { expected_metadata_keys.erase("aaa"); expected_metadata_keys.insert("data"); expected_metadata_keys.insert("replace"); - verifyExpectedMetadata(response->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(response->metadataMap(), expected_metadata_keys); EXPECT_EQ(response->keyCount("duplicate"), 2); } @@ -476,9 +476,9 @@ TEST_P(Http2MetadataIntegrationTest, ProxySmallMetadataInRequest) { // Verifies metadata is received by upstream. upstream_request_->encodeHeaders(default_response_headers_, true); - EXPECT_EQ(upstream_request_->metadata_map().find("key")->second, "value"); - EXPECT_EQ(upstream_request_->metadata_map().size(), 1); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("key")->second, 3); + EXPECT_EQ(upstream_request_->metadataMap().find("key")->second, "value"); + EXPECT_EQ(upstream_request_->metadataMap().size(), 1); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("key")->second, 3); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -506,9 +506,9 @@ TEST_P(Http2MetadataIntegrationTest, ProxyLargeMetadataInRequest) { // Verifies metadata is received upstream. upstream_request_->encodeHeaders(default_response_headers_, true); - EXPECT_EQ(upstream_request_->metadata_map().find("key")->second, value); - EXPECT_EQ(upstream_request_->metadata_map().size(), 1); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("key")->second, 3); + EXPECT_EQ(upstream_request_->metadataMap().find("key")->second, value); + EXPECT_EQ(upstream_request_->metadataMap().size(), 1); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("key")->second, 3); response->waitForEndStream(); ASSERT_TRUE(response->complete()); @@ -565,7 +565,7 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { // Verifies a headers metadata added. std::set expected_metadata_keys = {"headers"}; expected_metadata_keys.insert("metadata"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); // Sends a headers only request with metadata. An empty data frame carries end_stream. auto encoder_decoder = codec_client_->startRequest(default_request_headers_); @@ -582,8 +582,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { expected_metadata_keys.insert("data"); expected_metadata_keys.insert("metadata"); expected_metadata_keys.insert("replace"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 3); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 3); // Verifies zero length data received, and end_stream is true. EXPECT_EQ(true, upstream_request_->receivedData()); EXPECT_EQ(0, upstream_request_->bodyLength()); @@ -604,8 +604,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { response->waitForEndStream(); ASSERT_TRUE(response->complete()); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 4); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 4); // Sends headers, large data, metadata. Large data triggers decodeData() multiple times, and each // time, a "data" metadata is added. @@ -622,9 +622,9 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { ASSERT_TRUE(response->complete()); expected_metadata_keys.erase("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_GE(upstream_request_->duplicated_metadata_key_count().find("data")->second, 2); - EXPECT_GE(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 3); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find("data")->second, 2); + EXPECT_GE(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 3); // Sends multiple metadata. auto encoder_decoder_4 = codec_client_->startRequest(default_request_headers_); @@ -646,8 +646,8 @@ TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { expected_metadata_keys.insert("metadata1"); expected_metadata_keys.insert("metadata2"); expected_metadata_keys.insert("trailers"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 6); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 6); } static std::string decode_headers_only = R"EOF( @@ -691,7 +691,7 @@ void Http2MetadataIntegrationTest::verifyHeadersOnlyTest() { // Verifies a headers metadata added. std::set expected_metadata_keys = {"headers"}; expected_metadata_keys.insert("metadata"); - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); // Verifies zero length data received, and end_stream is true. EXPECT_EQ(true, upstream_request_->receivedData()); @@ -754,8 +754,8 @@ void Http2MetadataIntegrationTest::testRequestMetadataWithStopAllFilter() { ASSERT_TRUE(response->complete()); std::set expected_metadata_keys = {"headers", "data", "metadata", "metadata1", "metadata2", "replace", "trailers"}; - verifyExpectedMetadata(upstream_request_->metadata_map(), expected_metadata_keys); - EXPECT_EQ(upstream_request_->duplicated_metadata_key_count().find("metadata")->second, 6); + verifyExpectedMetadata(upstream_request_->metadataMap(), expected_metadata_keys); + EXPECT_EQ(upstream_request_->duplicatedMetadataKeyCount().find("metadata")->second, 6); } static std::string metadata_stop_all_filter = R"EOF( @@ -805,10 +805,10 @@ name: encode-headers-return-stop-all-filter response->waitForEndStream(); ASSERT_TRUE(response->complete()); - EXPECT_EQ(response->metadata_map().find("headers")->second, "headers"); - EXPECT_EQ(response->metadata_map().find("data")->second, "data"); - EXPECT_EQ(response->metadata_map().find("trailers")->second, "trailers"); - EXPECT_EQ(response->metadata_map().size(), 3); + EXPECT_EQ(response->metadataMap().find("headers")->second, "headers"); + EXPECT_EQ(response->metadataMap().find("data")->second, "data"); + EXPECT_EQ(response->metadataMap().find("trailers")->second, "trailers"); + EXPECT_EQ(response->metadataMap().size(), 3); EXPECT_EQ(count * size + added_decoded_data_size * 2, response->body().size()); } diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 554aadb78602..56d738e3b9ce 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -842,9 +842,9 @@ void HttpIntegrationTest::testEnvoyHandling100Continue(bool additional_continue_ response->waitForEndStream(); ASSERT_TRUE(response->complete()); - ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->getStatusValue()); - EXPECT_EQ(nullptr, response->continue_headers()->Via()); + ASSERT(response->continueHeaders() != nullptr); + EXPECT_EQ("100", response->continueHeaders()->getStatusValue()); + EXPECT_EQ(nullptr, response->continueHeaders()->Via()); EXPECT_EQ("200", response->headers().getStatusValue()); if (via.empty()) { EXPECT_EQ(nullptr, response->headers().Via()); @@ -926,8 +926,8 @@ void HttpIntegrationTest::testEnvoyProxying1xx(bool continue_before_upstream_com upstream_request_->encodeHeaders(default_response_headers_, true); response->waitForEndStream(); EXPECT_TRUE(response->complete()); - ASSERT(response->continue_headers() != nullptr); - EXPECT_EQ("100", response->continue_headers()->getStatusValue()); + ASSERT(response->continueHeaders() != nullptr); + EXPECT_EQ("100", response->continueHeaders()->getStatusValue()); EXPECT_EQ("200", response->headers().getStatusValue()); } diff --git a/test/integration/integration.h b/test/integration/integration.h index 6f3825c054d8..dfee7b131ff0 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -42,11 +42,11 @@ class IntegrationStreamDecoder : public Http::ResponseDecoder, public Http::Stre const std::string& body() { return body_; } bool complete() { return saw_end_stream_; } bool reset() { return saw_reset_; } - Http::StreamResetReason reset_reason() { return reset_reason_; } - const Http::ResponseHeaderMap* continue_headers() { return continue_headers_.get(); } + Http::StreamResetReason resetReason() { return reset_reason_; } + const Http::ResponseHeaderMap* continueHeaders() { return continue_headers_.get(); } const Http::ResponseHeaderMap& headers() { return *headers_; } const Http::ResponseTrailerMapPtr& trailers() { return trailers_; } - const Http::MetadataMap& metadata_map() { return *metadata_map_; } + const Http::MetadataMap& metadataMap() { return *metadata_map_; } uint64_t keyCount(std::string key) { return duplicated_metadata_key_count_[key]; } void waitForContinueHeaders(); void waitForHeaders(); @@ -79,7 +79,7 @@ class IntegrationStreamDecoder : public Http::ResponseDecoder, public Http::Stre Http::ResponseHeaderMapPtr headers_; Http::ResponseTrailerMapPtr trailers_; Http::MetadataMapPtr metadata_map_{new Http::MetadataMap()}; - std::unordered_map duplicated_metadata_key_count_; + absl::node_hash_map duplicated_metadata_key_count_; bool waiting_for_end_stream_{}; bool saw_end_stream_{}; std::string body_; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 1078a1bfe6ff..fc5b7aa80642 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -974,7 +974,7 @@ TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresCauseRequestRejectedByDefa response->waitForReset(); codec_client_->close(); ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("unexpected_underscore")); } @@ -1115,7 +1115,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLength) { test_server_->waitForCounterGe("http.config_test.downstream_rq_4xx", 1); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason()); } } @@ -1152,7 +1152,7 @@ TEST_P(DownstreamProtocolIntegrationTest, InvalidContentLengthAllowed) { EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } } @@ -1173,7 +1173,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengths) { EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::ConnectionTermination, response->resetReason()); } } @@ -1208,7 +1208,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { EXPECT_EQ("400", response->headers().getStatusValue()); } else { ASSERT_TRUE(response->reset()); - EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->reset_reason()); + EXPECT_EQ(Http::StreamResetReason::RemoteReset, response->resetReason()); } } diff --git a/test/integration/xfcc_integration_test.cc b/test/integration/xfcc_integration_test.cc index 2b87808c372c..e14ee0ef7e30 100644 --- a/test/integration/xfcc_integration_test.cc +++ b/test/integration/xfcc_integration_test.cc @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -21,6 +20,7 @@ #include "test/test_common/printers.h" #include "test/test_common/utility.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "integration.h" @@ -429,8 +429,8 @@ TEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) { // } // std::cout << "};" << std::endl; - std::unordered_map tag_extracted_counter_map; - std::unordered_map tag_extracted_gauge_map; + absl::node_hash_map tag_extracted_counter_map; + absl::node_hash_map tag_extracted_gauge_map; tag_extracted_counter_map = { {listenerStatPrefix("downstream_cx_total"), "listener.downstream_cx_total"}, @@ -748,7 +748,7 @@ TEST_P(XfccIntegrationTest, TagExtractedNameGenerationTest) { {"server.version", "server.version"}}; auto test_name_against_mapping = - [](const std::unordered_map& extracted_name_map, + [](const absl::node_hash_map& extracted_name_map, const Stats::Metric& metric) { auto it = extracted_name_map.find(metric.name()); // Ignore any metrics that are not found in the map for ease of addition diff --git a/test/mocks/runtime/mocks.h b/test/mocks/runtime/mocks.h index 8d99b4bc402d..53bea8ce81ad 100644 --- a/test/mocks/runtime/mocks.h +++ b/test/mocks/runtime/mocks.h @@ -2,7 +2,6 @@ #include #include -#include #include "envoy/runtime/runtime.h" #include "envoy/type/v3/percent.pb.h" @@ -10,6 +9,7 @@ #include "test/mocks/stats/mocks.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" namespace Envoy { @@ -65,7 +65,7 @@ class MockLoader : public Loader { MOCK_METHOD(void, initialize, (Upstream::ClusterManager & cm)); MOCK_METHOD(const Snapshot&, snapshot, ()); MOCK_METHOD(SnapshotConstSharedPtr, threadsafeSnapshot, ()); - MOCK_METHOD(void, mergeValues, ((const std::unordered_map&))); + MOCK_METHOD(void, mergeValues, ((const absl::node_hash_map&))); MOCK_METHOD(void, startRtdsSubscriptions, (ReadyCallback)); MOCK_METHOD(Stats::Scope&, getRootScope, ()); diff --git a/test/mocks/server/config_tracker.h b/test/mocks/server/config_tracker.h index 09f516f0e03c..1c30cf919c76 100644 --- a/test/mocks/server/config_tracker.h +++ b/test/mocks/server/config_tracker.h @@ -4,6 +4,7 @@ #include "envoy/server/config_tracker.h" +#include "absl/container/node_hash_map.h" #include "gmock/gmock.h" namespace Envoy { @@ -23,7 +24,7 @@ class MockConfigTracker : public ConfigTracker { return EntryOwnerPtr{add_(key, std::move(callback))}; } - std::unordered_map config_tracker_callbacks_; + absl::node_hash_map config_tracker_callbacks_; }; } // namespace Server } // namespace Envoy diff --git a/test/mocks/thread_local/mocks.h b/test/mocks/thread_local/mocks.h index a4a68cf6881d..9bbd26a64465 100644 --- a/test/mocks/thread_local/mocks.h +++ b/test/mocks/thread_local/mocks.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include "envoy/thread_local/thread_local.h" diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index e8f3d47869ca..8af0bfcb39df 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -60,7 +60,7 @@ class MockClusterTypedMetadata : public Config::TypedMetadataImpl>& data() { + absl::node_hash_map>& data() { return data_; } }; diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index 0281af125fb9..478354eea707 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include #include "envoy/admin/v3/clusters.pb.h" diff --git a/test/server/admin/runtime_handler_test.cc b/test/server/admin/runtime_handler_test.cc index ec8c0953fc13..dfd7fc0bf1f9 100644 --- a/test/server/admin/runtime_handler_test.cc +++ b/test/server/admin/runtime_handler_test.cc @@ -81,7 +81,7 @@ TEST_P(AdminInstanceTest, RuntimeModify) { Runtime::MockLoader loader; EXPECT_CALL(server_, runtime()).WillRepeatedly(testing::ReturnPointee(&loader)); - std::unordered_map overrides; + absl::node_hash_map overrides; overrides["foo"] = "bar"; overrides["x"] = "42"; overrides["nothing"] = ""; @@ -97,7 +97,7 @@ TEST_P(AdminInstanceTest, RuntimeModifyParamsInBody) { const std::string key = "routing.traffic_shift.foo"; const std::string value = "numerator: 1\ndenominator: TEN_THOUSAND\n"; - const std::unordered_map overrides = {{key, value}}; + const absl::node_hash_map overrides = {{key, value}}; EXPECT_CALL(loader, mergeValues(overrides)).Times(1); const std::string body = fmt::format("{}={}", key, value); diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index dbd24ecff6cc..a04047346b30 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -1,5 +1,4 @@ #include -#include #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 4d65f0e78617..9269cc73bcce 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "envoy/common/platform.h" @@ -17,6 +16,8 @@ #include "common/common/utility.h" #include "common/filesystem/directory.h" +#include "absl/container/node_hash_map.h" + #ifdef ENVOY_HANDLE_SIGNALS #include "common/signal/signal_action.h" #endif @@ -289,7 +290,7 @@ const std::string TestEnvironment::unixDomainSocketDirectory() { std::string TestEnvironment::substitute(const std::string& str, Network::Address::IpVersion version) { - const std::unordered_map path_map = { + const absl::node_hash_map path_map = { {"test_tmpdir", TestEnvironment::temporaryDirectory()}, {"test_udsdir", TestEnvironment::unixDomainSocketDirectory()}, {"test_rundir", runfiles_ != nullptr ? TestEnvironment::runfilesDirectory() : "invalid"}, diff --git a/test/test_common/environment.h b/test/test_common/environment.h index 02b324ef5191..9434c59b7e6a 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -2,7 +2,6 @@ #include #include -#include #include #include "envoy/network/address.h" @@ -10,6 +9,7 @@ #include "common/json/json_loader.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -18,9 +18,9 @@ namespace Envoy { class TestEnvironment { public: - using PortMap = std::unordered_map; + using PortMap = absl::node_hash_map; - using ParamMap = std::unordered_map; + using ParamMap = absl::node_hash_map; /** * Perform common initialization steps needed to run a test binary. This diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index e1aaf0782e3c..e79671f255d2 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -3,7 +3,6 @@ #include #include #include -#include #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/route/v3/route.pb.h" diff --git a/tools/clang_tools/api_booster/BUILD b/tools/clang_tools/api_booster/BUILD index 296e318c01fe..d6affe19640b 100644 --- a/tools/clang_tools/api_booster/BUILD +++ b/tools/clang_tools/api_booster/BUILD @@ -24,6 +24,7 @@ clang_tools_cc_library( srcs = ["proto_cxx_utils.cc"], hdrs = ["proto_cxx_utils.h"], deps = [ + "@com_google_absl//absl/container:node_hash_map", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:optional", ], diff --git a/tools/clang_tools/api_booster/main.cc b/tools/clang_tools/api_booster/main.cc index b71d9542752e..976ddc969fcd 100644 --- a/tools/clang_tools/api_booster/main.cc +++ b/tools/clang_tools/api_booster/main.cc @@ -27,6 +27,7 @@ #include "tools/type_whisperer/api_type_db.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/str_cat.h" // Enable to see debug log messages. @@ -243,7 +244,7 @@ class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, const clang::SourceManager& source_manager) { auto* direct_callee = call_expr.getDirectCallee(); if (direct_callee != nullptr) { - const std::unordered_map ValidateNameToArg = { + const absl::node_hash_map ValidateNameToArg = { {"loadFromYamlAndValidate", 1}, {"loadFromFileAndValidate", 1}, {"downcastAndValidate", -1}, diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.cc b/tools/clang_tools/api_booster/proto_cxx_utils.cc index 42cc92e7c4c4..194bdc0e6bf7 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils.cc +++ b/tools/clang_tools/api_booster/proto_cxx_utils.cc @@ -40,10 +40,10 @@ std::string ProtoCxxUtils::protoToCxxType(const std::string& proto_type_name, bo absl::optional ProtoCxxUtils::renameMethod(absl::string_view method_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { // Simple O(N * M) match, where M is constant (the set of prefixes/suffixes) so // should be fine. - for (const auto field_rename : renames) { + for (const auto& field_rename : renames) { const std::vector GeneratedMethodPrefixes = { "clear_", "set_", "has_", "mutable_", "set_allocated_", "release_", "add_", "", }; @@ -63,7 +63,7 @@ ProtoCxxUtils::renameMethod(absl::string_view method_name, absl::optional ProtoCxxUtils::renameConstant(absl::string_view constant_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { if (constant_name.size() < 2 || constant_name[0] != 'k' || !isupper(constant_name[1])) { return {}; } @@ -91,7 +91,7 @@ ProtoCxxUtils::renameConstant(absl::string_view constant_name, absl::optional ProtoCxxUtils::renameEnumValue(absl::string_view enum_value_name, - const std::unordered_map renames) { + const absl::node_hash_map renames) { const auto it = renames.find(std::string(enum_value_name)); if (it == renames.cend()) { return {}; diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.h b/tools/clang_tools/api_booster/proto_cxx_utils.h index 22b816455bc9..10eff61a7910 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils.h +++ b/tools/clang_tools/api_booster/proto_cxx_utils.h @@ -1,8 +1,8 @@ #pragma once #include -#include +#include "absl/container/node_hash_map.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" @@ -25,18 +25,18 @@ class ProtoCxxUtils { // field in proto, and if so, return the new method name. static absl::optional renameMethod(absl::string_view method_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Given a constant, e.g. kFooBar, determine if it needs upgrading. We need // this for synthesized oneof cases. static absl::optional renameConstant(absl::string_view constant_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Given an enum value, e.g. FOO_BAR determine if it needs upgrading. static absl::optional renameEnumValue(absl::string_view enum_value_name, - const std::unordered_map renames); + const absl::node_hash_map renames); // Convert from a protobuf type, e.g. foo.bar.v2, to a C++ type, e.g. // foo::bar::v2. diff --git a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc index 6b4e0789ba10..2a06413bd4d2 100644 --- a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc +++ b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc @@ -1,5 +1,3 @@ -#include - #include "gtest/gtest.h" #include "proto_cxx_utils.h" @@ -32,7 +30,7 @@ TEST(ProtoCxxUtils, ProtoToCxxType) { // Validate proto field accessor upgrades. TEST(ProtoCxxUtils, RenameMethod) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"foo", "bar"}, {"bar", "baz"}, }; @@ -52,7 +50,7 @@ TEST(ProtoCxxUtils, RenameMethod) { // Validate proto constant upgrades. TEST(ProtoCxxUtils, RenameConstant) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"foo_bar", "bar_foo"}, {"foo_baz", "baz"}, }; @@ -63,7 +61,7 @@ TEST(ProtoCxxUtils, RenameConstant) { // Validate proto enum value upgrades. TEST(ProtoCxxUtils, RenameEnumValue) { - const std::unordered_map renames = { + const absl::node_hash_map renames = { {"FOO_BAR", "BAR_FOO"}, }; EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameEnumValue("FOO_BAZ", renames)); diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index b647b827fc3c..d38b731a0e47 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -647,6 +647,15 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't use strptime; use absl::FormatTime instead") if tokenInLine("strerror", line): reportError("Don't use strerror; use Envoy::errorDetails instead") + # Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and + # non-deterministic iteration order that exposes faulty assertions. + # See: https://abseil.io/docs/cpp/guides/container#hash-tables + if "std::unordered_map" in line: + reportError("Don't use std::unordered_map; use absl::flat_hash_map instead or " + "absl::node_hash_map if pointer stability of keys/values is required") + if "std::unordered_set" in line: + reportError("Don't use std::unordered_set; use absl::flat_hash_set instead or " + "absl::node_hash_set if pointer stability of keys/values is required") if "std::atomic_" in line: # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic objects, so prefer to use that instead. diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index acf2cd9f8700..9cb00aa50f86 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -232,6 +232,12 @@ def runChecks(): "Registry::InjectFactory instead.") errors += checkUnfixableError("strerror.cc", "Don't use strerror; use Envoy::errorDetails instead") + errors += checkUnfixableError( + "std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead " + + "or absl::node_hash_map if pointer stability of keys/values is required") + errors += checkUnfixableError( + "std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " + + "or absl::node_hash_set if pointer stability of keys/values is required") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/testdata/check_format/std_unordered_map.cc b/tools/testdata/check_format/std_unordered_map.cc new file mode 100644 index 000000000000..ed838faf2cd8 --- /dev/null +++ b/tools/testdata/check_format/std_unordered_map.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + +std::unordered_map foo; + +} // namespace Envoy diff --git a/tools/testdata/check_format/std_unordered_set.cc b/tools/testdata/check_format/std_unordered_set.cc new file mode 100644 index 000000000000..258bed7836c8 --- /dev/null +++ b/tools/testdata/check_format/std_unordered_set.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + +std::unordered_set foo; + +} // namespace Envoy diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index 4b2b7735de55..27463e42a4c6 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -101,6 +101,7 @@ envoy_cc_library( "//source/common/protobuf", "//tools/type_whisperer:api_type_db_proto_cc_proto", "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", + "@com_google_absl//absl/container:node_hash_map", ], ) diff --git a/tools/type_whisperer/api_type_db.h b/tools/type_whisperer/api_type_db.h index cec5627588ea..853364646e8c 100644 --- a/tools/type_whisperer/api_type_db.h +++ b/tools/type_whisperer/api_type_db.h @@ -1,8 +1,8 @@ #pragma once #include -#include +#include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -25,7 +25,7 @@ struct TypeInformation { const bool enum_type_; // Field or enum value renames. - std::unordered_map renames_; + absl::node_hash_map renames_; }; // We don't expose the raw API type database to consumers, as this requires RTTI From 7cf3efa4b3227d61d5e1be9d99e54071c3a6d7f1 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Tue, 28 Jul 2020 09:43:35 -0400 Subject: [PATCH 761/909] hcm: introduce FilterManager (#12295) Moves all the filter handling into an inner class, in preparation for splitting the behavior out of the HCM. As an initial PR, this relies on delegating to the ActiveStream for basically all operations not directly related to the filters. This is a relatively minor refactor in that most of the FM logic just delegates back up to the ActiveStream. In subsequent PRs this logic will be migrated to the FM piece by piece, until the AS and the FM no longer are tightly coupled, at which point FM can be moved out into its own file. Risk Level: Medium Testing: Existing tests Docs Changes: n/a Release Notes: n/a Part of #10455 Signed-off-by: Snow Pettersen --- source/common/http/conn_manager_impl.cc | 731 +++++++++++++++--------- source/common/http/conn_manager_impl.h | 220 ++++--- 2 files changed, 554 insertions(+), 397 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 78cfb69a599e..602cba3aebd0 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -227,19 +227,10 @@ void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { stream.stream_idle_timer_->disableTimer(); stream.stream_idle_timer_ = nullptr; } - stream.disarmRequestTimeout(); + stream.filter_manager_.disarmRequestTimeout(); stream.state_.destroyed_ = true; - for (auto& filter : stream.decoder_filters_) { - filter->handle_->onDestroy(); - } - - for (auto& filter : stream.encoder_filters_) { - // Do not call on destroy twice for dual registered filters. - if (!filter->dual_filter_) { - filter->handle_->onDestroy(); - } - } + stream.filter_manager_.destroyFilters(); read_callbacks_->connection().dispatcher().deferredDelete(stream.removeFromList(streams_)); @@ -525,7 +516,7 @@ void ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpd } ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager) - : connection_manager_(connection_manager), + : connection_manager_(connection_manager), filter_manager_(*this), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::HistogramCompletableTimespanImpl( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), @@ -690,7 +681,7 @@ void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { connection_manager_.doEndStream(*this); } -void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( +void ConnectionManagerImpl::FilterManager::addStreamDecoderFilterWorker( StreamDecoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper(new ActiveStreamDecoderFilter(*this, filter, dual_filter)); filter->setDecoderFilterCallbacks(*wrapper); @@ -705,7 +696,7 @@ void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); } -void ConnectionManagerImpl::ActiveStream::addStreamEncoderFilterWorker( +void ConnectionManagerImpl::FilterManager::addStreamEncoderFilterWorker( StreamEncoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper(new ActiveStreamEncoderFilter(*this, filter, dual_filter)); filter->setEncoderFilterCallbacks(*wrapper); @@ -839,7 +830,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // We end the decode here only if the request is header only. If we convert the request to a // header only, the stream will be marked as done once a subsequent decodeData/decodeTrailers is // called with end_stream=true. - maybeEndDecode(end_stream); + filter_manager_.maybeEndDecode(end_stream); // Drop new requests when overloaded as soon as we have decoded the headers. if (connection_manager_.overload_stop_accepting_requests_ref_ == @@ -1025,7 +1016,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he traceRequest(); } - decodeHeaders(nullptr, *request_headers_, end_stream); + filter_manager_.decodeHeaders(nullptr, *request_headers_, end_stream); // Reset it here for both global and overridden cases. resetIdleTimer(); @@ -1088,54 +1079,57 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { } } -void ConnectionManagerImpl::ActiveStream::maybeContinueDecoding( +void ConnectionManagerImpl::FilterManager::maybeContinueDecoding( const std::list::iterator& continue_data_entry) { if (continue_data_entry != decoder_filters_.end()) { // We use the continueDecoding() code since it will correctly handle not calling // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code // expects it. - ASSERT(buffered_request_data_); + ASSERT(active_stream_.buffered_request_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); } } -void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilter* filter, - RequestHeaderMap& headers, - bool end_stream) { +void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, + RequestHeaderMap& headers, + bool end_stream) { // Headers filter iteration should always start with the next filter if available. std::list::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); std::list::iterator continue_data_entry = decoder_filters_.end(); for (; entry != decoder_filters_.end(); entry++) { - ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); - state_.filter_call_state_ |= FilterCallState::DecodeHeaders; - (*entry)->end_stream_ = state_.decoding_headers_only_ || + ASSERT( + !(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeHeaders)); + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeHeaders; + (*entry)->end_stream_ = active_stream_.state_.decoding_headers_only_ || (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_)); - state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; - ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", *this, + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeHeaders; + ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); const bool new_metadata_added = processNewlyAddedMetadata(); // If end_stream is set in headers, and a filter adds new metadata, we need to delay end_stream // in headers by inserting an empty data frame with end_stream set. The empty data frame is sent // after the new metadata. - if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) { + if ((*entry)->end_stream_ && new_metadata_added && !active_stream_.buffered_request_data_) { Buffer::OwnedImpl empty_data(""); - ENVOY_STREAM_LOG( - trace, "inserting an empty data frame for end_stream due metadata being added.", *this); + ENVOY_STREAM_LOG(trace, + "inserting an empty data frame for end_stream due metadata being added.", + active_stream_); // Metadata frame doesn't carry end of stream bit. We need an empty data frame to end the // stream. addDecodedData(*((*entry).get()), empty_data, true); } (*entry)->decode_headers_called_ = true; - if (!(*entry)->commonHandleAfterHeadersCallback(status, state_.decoding_headers_only_) && + if (!(*entry)->commonHandleAfterHeadersCallback(status, + active_stream_.state_.decoding_headers_only_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but @@ -1146,7 +1140,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte // Here we handle the case where we have a header only request, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. - if (end_stream && buffered_request_data_ && continue_data_entry == decoder_filters_.end()) { + if (end_stream && active_stream_.buffered_request_data_ && + continue_data_entry == decoder_filters_.end()) { continue_data_entry = entry; } } @@ -1161,39 +1156,41 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(ActiveStreamDecoderFilte void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); - maybeEndDecode(end_stream); + filter_manager_.maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + filter_manager_.decodeData(nullptr, data, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); } -void ConnectionManagerImpl::ActiveStream::decodeData( +void ConnectionManagerImpl::FilterManager::decodeData( ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { - ScopeTrackerScopeState scope(this, - connection_manager_.read_callbacks_->connection().dispatcher()); - resetIdleTimer(); + ScopeTrackerScopeState scope( + &active_stream_, + active_stream_.connection_manager_.read_callbacks_->connection().dispatcher()); + active_stream_.resetIdleTimer(); // If we previously decided to decode only the headers, do nothing here. - if (state_.decoding_headers_only_) { + if (active_stream_.state_.decoding_headers_only_) { return; } // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. - if (state_.local_complete_) { + if (active_stream_.state_.local_complete_) { return; } auto trailers_added_entry = decoder_filters_.end(); - const bool trailers_exists_at_start = request_trailers_ != nullptr; + const bool trailers_exists_at_start = active_stream_.request_trailers_ != nullptr; // Filter iteration may start at the current filter. std::list::iterator entry = commonDecodePrefix(filter, filter_iteration_start_state); for (; entry != decoder_filters_.end(); entry++) { // If the filter pointed by entry has stopped for all frame types, return now. - if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { + if (handleDataIfStopAll(**entry, data, active_stream_.state_.decoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. @@ -1229,38 +1226,40 @@ void ConnectionManagerImpl::ActiveStream::decodeData( if ((*entry)->end_stream_) { return; } - ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData)); + ASSERT(!(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeData)); // We check the request_trailers_ pointer here in case addDecodedTrailers // is called in decodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. if (end_stream) { - state_.filter_call_state_ |= FilterCallState::LastDataFrame; + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::LastDataFrame; } - recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_); + recordLatestDataFilter(entry, active_stream_.state_.latest_data_decoding_filter_, + decoder_filters_); - state_.filter_call_state_ |= FilterCallState::DecodeData; - (*entry)->end_stream_ = end_stream && !request_trailers_; + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeData; + (*entry)->end_stream_ = end_stream && !active_stream_.request_trailers_; FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } - state_.filter_call_state_ &= ~FilterCallState::DecodeData; + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeData; if (end_stream) { - state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::LastDataFrame; } - ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); processNewlyAddedMetadata(); - if (!trailers_exists_at_start && request_trailers_ && + if (!trailers_exists_at_start && active_stream_.request_trailers_ && trailers_added_entry == decoder_filters_.end()) { trailers_added_entry = entry; } - if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) && + if (!(*entry)->commonHandleAfterDataCallback( + status, data, active_stream_.state_.decoder_filters_streaming_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but @@ -1272,7 +1271,7 @@ void ConnectionManagerImpl::ActiveStream::decodeData( // If trailers were adding during decodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != decoder_filters_.end()) { - decodeTrailers(trailers_added_entry->get(), *request_trailers_); + decodeTrailers(trailers_added_entry->get(), *active_stream_.request_trailers_); } if (end_stream) { @@ -1280,29 +1279,31 @@ void ConnectionManagerImpl::ActiveStream::decodeData( } } -RequestTrailerMap& ConnectionManagerImpl::ActiveStream::addDecodedTrailers() { +RequestTrailerMap& ConnectionManagerImpl::FilterManager::addDecodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). - ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); + ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); // Trailers can only be added once. - ASSERT(!request_trailers_); + ASSERT(!active_stream_.request_trailers_); - request_trailers_ = RequestTrailerMapImpl::create(); - return *request_trailers_; + active_stream_.request_trailers_ = RequestTrailerMapImpl::create(); + return *active_stream_.request_trailers_; } -void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilter& filter, - Buffer::Instance& data, bool streaming) { - if (state_.filter_call_state_ == 0 || - (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || - (state_.filter_call_state_ & FilterCallState::DecodeData) || - ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) { +void ConnectionManagerImpl::FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, + Buffer::Instance& data, bool streaming) { + if (active_stream_.state_.filter_call_state_ == 0 || + (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeHeaders) || + (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeData) || + ((active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeTrailers) && + !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. - state_.decoder_filters_streaming_ = streaming; + active_stream_.state_.decoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); - } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { + } else if (active_stream_.state_.filter_call_state_ & + ActiveStream::FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); @@ -1313,28 +1314,28 @@ void ConnectionManagerImpl::ActiveStream::addDecodedData(ActiveStreamDecoderFilt } } -MetadataMapVector& ConnectionManagerImpl::ActiveStream::addDecodedMetadata() { - return *getRequestMetadataMapVector(); +MetadataMapVector& ConnectionManagerImpl::FilterManager::addDecodedMetadata() { + return *active_stream_.getRequestMetadataMapVector(); } void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& trailers) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); resetIdleTimer(); - maybeEndDecode(true); + filter_manager_.maybeEndDecode(true); request_trailers_ = std::move(trailers); - decodeTrailers(nullptr, *request_trailers_); + filter_manager_.decodeTrailers(nullptr, *request_trailers_); } -void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilter* filter, - RequestTrailerMap& trailers) { +void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, + RequestTrailerMap& trailers) { // If we previously decided to decode only the headers, do nothing here. - if (state_.decoding_headers_only_) { + if (active_stream_.state_.decoding_headers_only_) { return; } // See decodeData() above for why we check local_complete_ here. - if (state_.local_complete_) { + if (active_stream_.state_.local_complete_) { return; } @@ -1348,13 +1349,14 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(ActiveStreamDecoderFilt return; } - ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); - state_.filter_call_state_ |= FilterCallState::DecodeTrailers; + ASSERT(!(active_stream_.state_.filter_call_state_ & + ActiveStream::FilterCallState::DecodeTrailers)); + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; - state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; - ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeTrailers; + ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); processNewlyAddedMetadata(); @@ -1371,11 +1373,11 @@ void ConnectionManagerImpl::ActiveStream::decodeMetadata(MetadataMapPtr&& metada // After going through filters, the ownership of metadata_map will be passed to terminal filter. // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map // and encode later when connection pool is ready. - decodeMetadata(nullptr, *metadata_map); + filter_manager_.decodeMetadata(nullptr, *metadata_map); } -void ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilter* filter, - MetadataMap& metadata_map) { +void ConnectionManagerImpl::FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, + MetadataMap& metadata_map) { // Filter iteration may start at the current filter. std::list::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); @@ -1392,36 +1394,36 @@ void ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilt } FilterMetadataStatus status = (*entry)->handle_->decodeMetadata(metadata_map); - ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", *this, - static_cast((*entry).get()), static_cast(status), - metadata_map); + ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", + active_stream_, static_cast((*entry).get()), + static_cast(status), metadata_map); } } -void ConnectionManagerImpl::ActiveStream::maybeEndDecode(bool end_stream) { - ASSERT(!state_.remote_complete_); - state_.remote_complete_ = end_stream; +void ConnectionManagerImpl::FilterManager::maybeEndDecode(bool end_stream) { + ASSERT(!active_stream_.state_.remote_complete_); + active_stream_.state_.remote_complete_ = end_stream; if (end_stream) { - stream_info_.onLastDownstreamRxByteReceived(); - ENVOY_STREAM_LOG(debug, "request end stream", *this); + active_stream_.stream_info_.onLastDownstreamRxByteReceived(); + ENVOY_STREAM_LOG(debug, "request end stream", active_stream_); } } -void ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() { - if (request_timer_) { - request_timer_->disableTimer(); +void ConnectionManagerImpl::FilterManager::disarmRequestTimeout() { + if (active_stream_.request_timer_) { + active_stream_.request_timer_->disableTimer(); } } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonEncodePrefix( +ConnectionManagerImpl::FilterManager::commonEncodePrefix( ActiveStreamEncoderFilter* filter, bool end_stream, FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { - ASSERT(!state_.local_complete_); - state_.local_complete_ = end_stream; + ASSERT(!active_stream_.state_.local_complete_); + active_stream_.state_.local_complete_ = end_stream; return encoder_filters_.begin(); } @@ -1435,7 +1437,7 @@ ConnectionManagerImpl::ActiveStream::commonEncodePrefix( } std::list::iterator -ConnectionManagerImpl::ActiveStream::commonDecodePrefix( +ConnectionManagerImpl::FilterManager::commonDecodePrefix( ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { return decoder_filters_.begin(); @@ -1558,8 +1560,8 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( if (response_headers_.get() == nullptr) { // If the response has not started at all, send the response through the filter chain. - sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request, - grpc_status, details); + filter_manager_.sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, + is_head_request, grpc_status, details); } else if (!state_.non_100_response_headers_encoded_) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this, details); @@ -1585,13 +1587,15 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( } response_headers_ = std::move(response_headers); encodeHeadersInternal(*response_headers_, end_stream); + filter_manager_.maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { encodeDataInternal(data, end_stream); + filter_manager_.maybeEndEncode(end_stream); }}, Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, grpc_status, state_.is_head_request_}); - maybeEndEncode(state_.local_complete_); + filter_manager_.maybeEndEncode(state_.local_complete_); } else { stream_info_.setResponseCodeDetails(details); // If we land in this branch, response headers have already been sent to the client. @@ -1602,50 +1606,52 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( } } -void ConnectionManagerImpl::ActiveStream::sendLocalReplyViaFilterChain( +void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, absl::string_view details) { - ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); - ASSERT(response_headers_ == nullptr); + ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", active_stream_, details); + ASSERT(active_stream_.response_headers_ == nullptr); // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. If the filter chain already exists this will be // a no-op. - createFilterChain(); + active_stream_.createFilterChain(); Utility::sendLocalReply( - state_.destroyed_, + active_stream_.state_.destroyed_, Utility::EncodeFunctions{ [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { - connection_manager_.config_.localReply().rewrite( - request_headers_.get(), response_headers, stream_info_, code, body, content_type); + active_stream_.connection_manager_.config_.localReply().rewrite( + active_stream_.request_headers_.get(), response_headers, + active_stream_.stream_info_, code, body, content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { if (modify_headers != nullptr) { modify_headers(*headers); } - response_headers_ = std::move(headers); + active_stream_.response_headers_ = std::move(headers); // TODO: Start encoding from the last decoder filter that saw the // request instead. - encodeHeaders(nullptr, *response_headers_, end_stream); + encodeHeaders(nullptr, *active_stream_.response_headers_, end_stream); }, [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the // request instead. - encodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + encodeData(nullptr, data, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); }}, Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); } -void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( +void ConnectionManagerImpl::FilterManager::encode100ContinueHeaders( ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { - resetIdleTimer(); - ASSERT(connection_manager_.config_.proxy100Continue()); + active_stream_.resetIdleTimer(); + ASSERT(active_stream_.connection_manager_.config_.proxy100Continue()); // The caller must guarantee that encode100ContinueHeaders() is invoked at most once. - ASSERT(!state_.has_continue_headers_ || filter != nullptr); + ASSERT(!active_stream_.state_.has_continue_headers_ || filter != nullptr); // Make sure commonContinue continues encode100ContinueHeaders. - state_.has_continue_headers_ = true; + active_stream_.state_.has_continue_headers_ = true; // Similar to the block in encodeHeaders, run encode100ContinueHeaders on each // filter. This is simpler than that case because 100 continue implies no @@ -1655,12 +1661,16 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( std::list::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { - ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode100ContinueHeaders)); - state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders; + ASSERT(!(active_stream_.state_.filter_call_state_ & + ActiveStream::FilterCallState::Encode100ContinueHeaders)); + active_stream_.state_.filter_call_state_ |= + ActiveStream::FilterCallState::Encode100ContinueHeaders; FilterHeadersStatus status = (*entry)->handle_->encode100ContinueHeaders(headers); - state_.filter_call_state_ &= ~FilterCallState::Encode100ContinueHeaders; - ENVOY_STREAM_LOG(trace, "encode 100 continue headers called: filter={} status={}", *this, - static_cast((*entry).get()), static_cast(status)); + active_stream_.state_.filter_call_state_ &= + ~ActiveStream::FilterCallState::Encode100ContinueHeaders; + ENVOY_STREAM_LOG(trace, "encode 100 continue headers called: filter={} status={}", + active_stream_, static_cast((*entry).get()), + static_cast(status)); if (!(*entry)->commonHandleAfter100ContinueHeadersCallback(status)) { return; } @@ -1668,38 +1678,39 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. - ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), - connection_manager_.config_, EMPTY_STRING); + ConnectionManagerUtility::mutateResponseHeaders(headers, active_stream_.request_headers_.get(), + active_stream_.connection_manager_.config_, + EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. - chargeStats(headers); + active_stream_.chargeStats(headers); - ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", *this, headers); + ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", active_stream_, headers); // Now actually encode via the codec. - response_encoder_->encode100ContinueHeaders(headers); + active_stream_.response_encoder_->encode100ContinueHeaders(headers); } -void ConnectionManagerImpl::ActiveStream::maybeContinueEncoding( +void ConnectionManagerImpl::FilterManager::maybeContinueEncoding( const std::list::iterator& continue_data_entry) { if (continue_data_entry != encoder_filters_.end()) { // We use the continueEncoding() code since it will correctly handle not calling // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code // expects it. - ASSERT(buffered_response_data_); + ASSERT(active_stream_.buffered_response_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); } } -void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilter* filter, - ResponseHeaderMap& headers, - bool end_stream) { +void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, + ResponseHeaderMap& headers, + bool end_stream) { // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds. ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) || Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols)); - resetIdleTimer(); + active_stream_.resetIdleTimer(); disarmRequestTimeout(); // Headers filter iteration should always start with the next filter if available. @@ -1708,26 +1719,27 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte std::list::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { - ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders)); - state_.filter_call_state_ |= FilterCallState::EncodeHeaders; - (*entry)->end_stream_ = state_.encoding_headers_only_ || + ASSERT( + !(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeHeaders)); + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeHeaders; + (*entry)->end_stream_ = active_stream_.state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } - state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; - ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", *this, + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeHeaders; + ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); (*entry)->encode_headers_called_ = true; - const auto continue_iteration = - (*entry)->commonHandleAfterHeadersCallback(status, state_.encoding_headers_only_); + const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback( + status, active_stream_.state_.encoding_headers_only_); // If we're encoding a headers only response, then mark the local as complete. This ensures // that we don't attempt to reset the downstream request in doEndStream. - if (state_.encoding_headers_only_) { - state_.local_complete_ = true; + if (active_stream_.state_.encoding_headers_only_) { + active_stream_.state_.local_complete_ = true; } if (!continue_iteration) { @@ -1739,14 +1751,16 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ActiveStreamEncoderFilte // Here we handle the case where we have a header only response, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. - if (end_stream && buffered_response_data_ && continue_data_entry == encoder_filters_.end()) { + if (end_stream && active_stream_.buffered_response_data_ && + continue_data_entry == encoder_filters_.end()) { continue_data_entry = entry; } } - const bool modified_end_stream = state_.encoding_headers_only_ || + const bool modified_end_stream = active_stream_.state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); - encodeHeadersInternal(headers, modified_end_stream); + active_stream_.encodeHeadersInternal(headers, modified_end_stream); + maybeEndEncode(modified_end_stream); if (!modified_end_stream) { maybeContinueEncoding(continue_data_entry); @@ -1874,12 +1888,11 @@ void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMa // Now actually encode via the codec. stream_info_.onFirstDownstreamTxByteSent(); response_encoder_->encodeHeaders(headers, end_stream); - maybeEndEncode(end_stream); } -void ConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter* filter, - MetadataMapPtr&& metadata_map_ptr) { - resetIdleTimer(); +void ConnectionManagerImpl::FilterManager::encodeMetadata(ActiveStreamEncoderFilter* filter, + MetadataMapPtr&& metadata_map_ptr) { + active_stream_.resetIdleTimer(); std::list::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::CanStartFromCurrent); @@ -1895,43 +1908,53 @@ void ConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilt } FilterMetadataStatus status = (*entry)->handle_->encodeMetadata(*metadata_map_ptr); - ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); } // TODO(soya3129): update stats with metadata. // Now encode metadata via the codec. if (!metadata_map_ptr->empty()) { - ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", *this, *metadata_map_ptr); + ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", active_stream_, *metadata_map_ptr); MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); - response_encoder_->encodeMetadata(metadata_map_vector); + active_stream_.response_encoder_->encodeMetadata(metadata_map_vector); } } -ResponseTrailerMap& ConnectionManagerImpl::ActiveStream::addEncodedTrailers() { +ResponseTrailerMap& ConnectionManagerImpl::FilterManager::addEncodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). - ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); + ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); // Trailers can only be added once. - ASSERT(!response_trailers_); + ASSERT(!active_stream_.response_trailers_); - response_trailers_ = ResponseTrailerMapImpl::create(); - return *response_trailers_; + active_stream_.response_trailers_ = ResponseTrailerMapImpl::create(); + return *active_stream_.response_trailers_; +} + +void ConnectionManagerImpl::FilterManager::sendLocalReply( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status, absl::string_view details) { + active_stream_.sendLocalReply(is_grpc_request, code, body, modify_headers, is_head_request, + grpc_status, details); } -void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilter& filter, - Buffer::Instance& data, bool streaming) { - if (state_.filter_call_state_ == 0 || - (state_.filter_call_state_ & FilterCallState::EncodeHeaders) || - (state_.filter_call_state_ & FilterCallState::EncodeData) || - ((state_.filter_call_state_ & FilterCallState::EncodeTrailers) && !filter.canIterate())) { +void ConnectionManagerImpl::FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, + Buffer::Instance& data, bool streaming) { + if (active_stream_.state_.filter_call_state_ == 0 || + (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeHeaders) || + (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeData) || + ((active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeTrailers) && + !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. - state_.encoder_filters_streaming_ = streaming; + active_stream_.state_.encoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); - } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { + } else if (active_stream_.state_.filter_call_state_ & + ActiveStream::FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); @@ -1942,13 +1965,13 @@ void ConnectionManagerImpl::ActiveStream::addEncodedData(ActiveStreamEncoderFilt } } -void ConnectionManagerImpl::ActiveStream::encodeData( +void ConnectionManagerImpl::FilterManager::encodeData( ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { - resetIdleTimer(); + active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. - if (state_.encoding_headers_only_) { + if (active_stream_.state_.encoding_headers_only_) { return; } @@ -1957,10 +1980,10 @@ void ConnectionManagerImpl::ActiveStream::encodeData( commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); - const bool trailers_exists_at_start = response_trailers_ != nullptr; + const bool trailers_exists_at_start = active_stream_.response_trailers_ != nullptr; for (; entry != encoder_filters_.end(); entry++) { // If the filter pointed by entry has stopped for all frame type, return now. - if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { + if (handleDataIfStopAll(**entry, data, active_stream_.state_.encoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. @@ -1968,47 +1991,50 @@ void ConnectionManagerImpl::ActiveStream::encodeData( if ((*entry)->end_stream_) { return; } - ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeData)); + ASSERT(!(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeData)); // We check the response_trailers_ pointer here in case addEncodedTrailers // is called in encodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. - state_.filter_call_state_ |= FilterCallState::EncodeData; + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeData; if (end_stream) { - state_.filter_call_state_ |= FilterCallState::LastDataFrame; + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::LastDataFrame; } - recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_); + recordLatestDataFilter(entry, active_stream_.state_.latest_data_encoding_filter_, + encoder_filters_); - (*entry)->end_stream_ = end_stream && !response_trailers_; + (*entry)->end_stream_ = end_stream && !active_stream_.response_trailers_; FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } - state_.filter_call_state_ &= ~FilterCallState::EncodeData; + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeData; if (end_stream) { - state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::LastDataFrame; } - ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", *this, + ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); - if (!trailers_exists_at_start && response_trailers_ && + if (!trailers_exists_at_start && active_stream_.response_trailers_ && trailers_added_entry == encoder_filters_.end()) { trailers_added_entry = entry; } - if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.encoder_filters_streaming_)) { + if (!(*entry)->commonHandleAfterDataCallback( + status, data, active_stream_.state_.encoder_filters_streaming_)) { return; } } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); - encodeDataInternal(data, modified_end_stream); + active_stream_.encodeDataInternal(data, modified_end_stream); + maybeEndEncode(modified_end_stream); // If trailers were adding during encodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != encoder_filters_.end()) { - encodeTrailers(trailers_added_entry->get(), *response_trailers_); + encodeTrailers(trailers_added_entry->get(), *active_stream_.response_trailers_); } } @@ -2020,15 +2046,14 @@ void ConnectionManagerImpl::ActiveStream::encodeDataInternal(Buffer::Instance& d stream_info_.addBytesSent(data.length()); response_encoder_->encodeData(data, end_stream); - maybeEndEncode(end_stream); } -void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilter* filter, - ResponseTrailerMap& trailers) { - resetIdleTimer(); +void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, + ResponseTrailerMap& trailers) { + active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. - if (state_.encoding_headers_only_) { + if (active_stream_.state_.encoding_headers_only_) { return; } @@ -2040,49 +2065,50 @@ void ConnectionManagerImpl::ActiveStream::encodeTrailers(ActiveStreamEncoderFilt if ((*entry)->stoppedAll()) { return; } - ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); - state_.filter_call_state_ |= FilterCallState::EncodeTrailers; + ASSERT(!(active_stream_.state_.filter_call_state_ & + ActiveStream::FilterCallState::EncodeTrailers)); + active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; - state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; - ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this, + active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeTrailers; + ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } - ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", *this, trailers); + ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", active_stream_, trailers); - response_encoder_->encodeTrailers(trailers); + active_stream_.response_encoder_->encodeTrailers(trailers); maybeEndEncode(true); } -void ConnectionManagerImpl::ActiveStream::maybeEndEncode(bool end_stream) { +void ConnectionManagerImpl::FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { - ASSERT(!state_.codec_saw_local_complete_); - state_.codec_saw_local_complete_ = true; - stream_info_.onLastDownstreamTxByteSent(); - request_response_timespan_->complete(); - connection_manager_.doEndStream(*this); + ASSERT(!active_stream_.state_.codec_saw_local_complete_); + active_stream_.state_.codec_saw_local_complete_ = true; + active_stream_.stream_info_.onLastDownstreamTxByteSent(); + active_stream_.request_response_timespan_->complete(); + active_stream_.connection_manager_.doEndStream(active_stream_); } } -bool ConnectionManagerImpl::ActiveStream::processNewlyAddedMetadata() { - if (request_metadata_map_vector_ == nullptr) { +bool ConnectionManagerImpl::FilterManager::processNewlyAddedMetadata() { + if (active_stream_.request_metadata_map_vector_ == nullptr) { return false; } - for (const auto& metadata_map : *getRequestMetadataMapVector()) { + for (const auto& metadata_map : *active_stream_.getRequestMetadataMapVector()) { decodeMetadata(nullptr, *metadata_map); } - getRequestMetadataMapVector()->clear(); + active_stream_.getRequestMetadataMapVector()->clear(); return true; } -bool ConnectionManagerImpl::ActiveStream::handleDataIfStopAll(ActiveStreamFilterBase& filter, - Buffer::Instance& data, - bool& filter_streaming) { +bool ConnectionManagerImpl::FilterManager::handleDataIfStopAll(ActiveStreamFilterBase& filter, + Buffer::Instance& data, + bool& filter_streaming) { if (filter.stoppedAll()) { ASSERT(!filter.canIterate()); filter_streaming = @@ -2205,12 +2231,12 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { // TODO(mattklein123): Raise an error if this is called during a callback. if (!canContinue()) { - ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", parent_, + ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", parent_.active_stream_, static_cast(this)); return; } - ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_, + ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", parent_.active_stream_, static_cast(this)); ASSERT(!canIterate()); // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the @@ -2226,7 +2252,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { do100ContinueHeaders(); // If the response headers have not yet come in, don't continue on with // headers and body. doHeaders expects request headers to exist. - if (!parent_.response_headers_.get()) { + if (!parent_.active_stream_.response_headers_.get()) { return; } } @@ -2254,7 +2280,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback( FilterHeadersStatus status) { - ASSERT(parent_.state_.has_continue_headers_); + ASSERT(parent_.active_stream_.state_.has_continue_headers_); ASSERT(!continue_headers_continued_); ASSERT(canIterate()); @@ -2283,7 +2309,7 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterHeadersCall // Set headers_only to true so we know to end early if necessary, // but continue filter iteration so we actually write the headers/run the cleanup code. headers_only = true; - ENVOY_STREAM_LOG(debug, "converting to headers only", parent_); + ENVOY_STREAM_LOG(debug, "converting to headers only", parent_.active_stream_); } else { ASSERT(status == FilterHeadersStatus::Continue); headers_continued_ = true; @@ -2364,34 +2390,40 @@ bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfterTrailersCal } const Network::Connection* ConnectionManagerImpl::ActiveStreamFilterBase::connection() { - return parent_.connection(); + return parent_.active_stream_.connection(); } Event::Dispatcher& ConnectionManagerImpl::ActiveStreamFilterBase::dispatcher() { - return parent_.connection_manager_.read_callbacks_->connection().dispatcher(); + return parent_.active_stream_.connection_manager_.read_callbacks_->connection().dispatcher(); } StreamInfo::StreamInfo& ConnectionManagerImpl::ActiveStreamFilterBase::streamInfo() { - return parent_.stream_info_; + return parent_.active_stream_.stream_info_; } Tracing::Span& ConnectionManagerImpl::ActiveStreamFilterBase::activeSpan() { - if (parent_.active_span_) { - return *parent_.active_span_; + if (parent_.active_stream_.active_span_) { + return *parent_.active_stream_.active_span_; } else { return Tracing::NullSpan::instance(); } } -Tracing::Config& ConnectionManagerImpl::ActiveStreamFilterBase::tracingConfig() { return parent_; } +Tracing::Config& ConnectionManagerImpl::ActiveStreamFilterBase::tracingConfig() { + return parent_.active_stream_; +} + +const ScopeTrackedObject& ConnectionManagerImpl::ActiveStreamFilterBase::scope() { + return parent_.active_stream_; +} Upstream::ClusterInfoConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::clusterInfo() { // NOTE: Refreshing route caches clusterInfo as well. - if (!parent_.cached_route_.has_value()) { - parent_.refreshCachedRoute(); + if (!parent_.active_stream_.cached_route_.has_value()) { + parent_.active_stream_.refreshCachedRoute(); } - return parent_.cached_cluster_info_.value(); + return parent_.active_stream_.cached_cluster_info_.value(); } Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route() { @@ -2400,30 +2432,72 @@ Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route Router::RouteConstSharedPtr ConnectionManagerImpl::ActiveStreamFilterBase::route(const Router::RouteCallback& cb) { - if (parent_.cached_route_.has_value()) { - return parent_.cached_route_.value(); + if (parent_.active_stream_.cached_route_.has_value()) { + return parent_.active_stream_.cached_route_.value(); } - parent_.refreshCachedRoute(cb); - return parent_.cached_route_.value(); + parent_.active_stream_.refreshCachedRoute(cb); + return parent_.active_stream_.cached_route_.value(); } void ConnectionManagerImpl::ActiveStreamFilterBase::clearRouteCache() { - parent_.cached_route_ = absl::optional(); - parent_.cached_cluster_info_ = absl::optional(); - if (parent_.tracing_custom_tags_) { - parent_.tracing_custom_tags_->clear(); + parent_.active_stream_.cached_route_ = absl::optional(); + parent_.active_stream_.cached_cluster_info_ = + absl::optional(); + if (parent_.active_stream_.tracing_custom_tags_) { + parent_.active_stream_.tracing_custom_tags_->clear(); } } +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::canContinue() { + // It is possible for the connection manager to respond directly to a request even while + // a filter is trying to continue. If a response has already happened, we should not + // continue to further filters. A concrete example of this is a filter buffering data, the + // last data frame comes in and the filter continues, but the final buffering takes the stream + // over the high watermark such that a 413 is returned. + return !parent_.active_stream_.state_.local_complete_; +} + Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::createBuffer() { auto buffer = std::make_unique( [this]() -> void { this->requestDataDrained(); }, [this]() -> void { this->requestDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); - buffer->setWatermarks(parent_.buffer_limit_); + buffer->setWatermarks(parent_.active_stream_.buffer_limit_); return buffer; } +Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamDecoderFilter::bufferedData() { + return parent_.active_stream_.buffered_request_data_; +} + +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::complete() { + return parent_.active_stream_.state_.remote_complete_; +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doHeaders(bool end_stream) { + parent_.decodeHeaders(this, *parent_.active_stream_.request_headers_, end_stream); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doData(bool end_stream) { + parent_.decodeData(this, *parent_.active_stream_.buffered_request_data_, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::doTrailers() { + parent_.decodeTrailers(this, *parent_.active_stream_.request_trailers_); +} +bool ConnectionManagerImpl::ActiveStreamDecoderFilter::hasTrailers() { + return parent_.active_stream_.request_trailers_ != nullptr; +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::drainSavedRequestMetadata() { + ASSERT(saved_request_metadata_ != nullptr); + for (auto& metadata_map : *getSavedRequestMetadata()) { + parent_.decodeMetadata(this, *metadata_map); + } + getSavedRequestMetadata()->clear(); +} + void ConnectionManagerImpl::ActiveStreamDecoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; @@ -2453,38 +2527,56 @@ MetadataMapVector& ConnectionManagerImpl::ActiveStreamDecoderFilter::addDecodedM void ConnectionManagerImpl::ActiveStreamDecoderFilter::injectDecodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { parent_.decodeData(this, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } +const Buffer::Instance* ConnectionManagerImpl::ActiveStreamDecoderFilter::decodingBuffer() { + return parent_.active_stream_.buffered_request_data_.get(); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::modifyDecodingBuffer( + std::function callback) { + ASSERT(parent_.active_stream_.state_.latest_data_decoding_filter_ == this); + callback(*parent_.active_stream_.buffered_request_data_.get()); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::sendLocalReply( + Code code, absl::string_view body, + std::function modify_headers, + const absl::optional grpc_status, absl::string_view details) { + parent_.active_stream_.stream_info_.setResponseCodeDetails(details); + parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, + parent_.active_stream_.state_.is_head_request_, grpc_status, details); +} void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( ResponseHeaderMapPtr&& headers) { // If Envoy is not configured to proxy 100-Continue responses, swallow the 100 Continue // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a // 100-Continue, then proxies a duplicate 100 Continue from upstream. - if (parent_.connection_manager_.config_.proxy100Continue()) { - parent_.continue_headers_ = std::move(headers); - parent_.encode100ContinueHeaders(nullptr, *parent_.continue_headers_); + if (parent_.active_stream_.connection_manager_.config_.proxy100Continue()) { + parent_.active_stream_.continue_headers_ = std::move(headers); + parent_.encode100ContinueHeaders(nullptr, *parent_.active_stream_.continue_headers_); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - parent_.response_headers_ = std::move(headers); - parent_.encodeHeaders(nullptr, *parent_.response_headers_, end_stream); + parent_.active_stream_.response_headers_ = std::move(headers); + parent_.encodeHeaders(nullptr, *parent_.active_stream_.response_headers_, end_stream); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { parent_.encodeData(nullptr, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeTrailers( ResponseTrailerMapPtr&& trailers) { - parent_.response_trailers_ = std::move(trailers); - parent_.encodeTrailers(nullptr, *parent_.response_trailers_); + parent_.active_stream_.response_trailers_ = std::move(trailers); + parent_.encodeTrailers(nullptr, *parent_.active_stream_.response_trailers_); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeMetadata( @@ -2494,17 +2586,19 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeMetadata( void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterAboveWriteBufferHighWatermark() { - ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", parent_); - parent_.response_encoder_->getStream().readDisable(true); - parent_.connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc(); + ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", + parent_.active_stream_); + parent_.active_stream_.response_encoder_->getStream().readDisable(true); + parent_.active_stream_.connection_manager_.stats_.named_ + .downstream_flow_control_paused_reading_total_.inc(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataTooLarge() { - ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_); - if (parent_.state_.decoder_filters_streaming_) { + ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_.active_stream_); + if (parent_.active_stream_.state_.decoder_filters_streaming_) { onDecoderFilterAboveWriteBufferHighWatermark(); } else { - parent_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); + parent_.active_stream_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge); } @@ -2518,63 +2612,77 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataDrained() { void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterBelowWriteBufferLowWatermark() { - ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", parent_); + ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", + parent_.active_stream_); // If the state is destroyed, the codec's stream is already torn down. On // teardown the codec will unwind any remaining read disable calls. - if (!parent_.state_.destroyed_) { - parent_.response_encoder_->getStream().readDisable(false); + if (!parent_.active_stream_.state_.destroyed_) { + parent_.active_stream_.response_encoder_->getStream().readDisable(false); } - parent_.connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); + parent_.active_stream_.connection_manager_.stats_.named_ + .downstream_flow_control_resumed_reading_total_.inc(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { // This is called exactly once per upstream-stream, by the router filter. Therefore, we // expect the same callbacks to not be registered twice. - ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), - &watermark_callbacks) == parent_.watermark_callbacks_.end()); - parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); - for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { + ASSERT(std::find(parent_.active_stream_.watermark_callbacks_.begin(), + parent_.active_stream_.watermark_callbacks_.end(), + &watermark_callbacks) == parent_.active_stream_.watermark_callbacks_.end()); + parent_.active_stream_.watermark_callbacks_.emplace( + parent_.active_stream_.watermark_callbacks_.end(), &watermark_callbacks); + for (uint32_t i = 0; i < parent_.active_stream_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), - &watermark_callbacks) != parent_.watermark_callbacks_.end()); - parent_.watermark_callbacks_.remove(&watermark_callbacks); + ASSERT(std::find(parent_.active_stream_.watermark_callbacks_.begin(), + parent_.active_stream_.watermark_callbacks_.end(), + &watermark_callbacks) != parent_.active_stream_.watermark_callbacks_.end()); + parent_.active_stream_.watermark_callbacks_.remove(&watermark_callbacks); +} + +void ConnectionManagerImpl::ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) { + parent_.active_stream_.setBufferLimit(limit); +} + +uint32_t ConnectionManagerImpl::ActiveStreamDecoderFilter::decoderBufferLimit() { + return parent_.active_stream_.buffer_limit_; } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // Because the filter's and the HCM view of if the stream has a body and if // the stream is complete may differ, re-check bytesReceived() to make sure // there was no body from the HCM's point of view. - if (!complete() || parent_.stream_info_.bytesReceived() != 0) { + if (!complete() || parent_.active_stream_.stream_info_.bytesReceived() != 0) { return false; } // n.b. we do not currently change the codecs to point at the new stream // decoder because the decoder callbacks are complete. It would be good to // null out that pointer but should not be necessary. - RequestHeaderMapPtr request_headers(std::move(parent_.request_headers_)); - ResponseEncoder* response_encoder = parent_.response_encoder_; - parent_.response_encoder_ = nullptr; - response_encoder->getStream().removeCallbacks(parent_); + RequestHeaderMapPtr request_headers(std::move(parent_.active_stream_.request_headers_)); + ResponseEncoder* response_encoder = parent_.active_stream_.response_encoder_; + parent_.active_stream_.response_encoder_ = nullptr; + response_encoder->getStream().removeCallbacks(parent_.active_stream_); // This functionally deletes the stream (via deferred delete) so do not // reference anything beyond this point. - parent_.connection_manager_.doEndStream(this->parent_); + parent_.active_stream_.connection_manager_.doEndStream(parent_.active_stream_); - RequestDecoder& new_stream = parent_.connection_manager_.newStream(*response_encoder, true); + RequestDecoder& new_stream = + parent_.active_stream_.connection_manager_.newStream(*response_encoder, true); // We don't need to copy over the old parent FilterState from the old StreamInfo if it did not // store any objects with a LifeSpan at or above DownstreamRequest. This is to avoid unnecessary // heap allocation. // TODO(snowp): In the case where connection level filter state has been set on the connection // FilterState that we inherit, we'll end up copying this every time even though we could get // away with just resetting it to the HCM filter_state_. - if (parent_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( + if (parent_.active_stream_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( StreamInfo::FilterState::LifeSpan::Request)) { - (*parent_.connection_manager_.streams_.begin())->stream_info_.filter_state_ = + (*parent_.active_stream_.connection_manager_.streams_.begin())->stream_info_.filter_state_ = std::make_shared( - parent_.stream_info_.filter_state_->parent(), + parent_.active_stream_.stream_info_.filter_state_->parent(), StreamInfo::FilterState::LifeSpan::FilterChain); } @@ -2582,14 +2690,25 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { return true; } +void ConnectionManagerImpl::ActiveStreamDecoderFilter::addUpstreamSocketOptions( + const Network::Socket::OptionsSharedPtr& options) { + + Network::Socket::appendOptions(parent_.active_stream_.upstream_options_, options); +} + +Network::Socket::OptionsSharedPtr +ConnectionManagerImpl::ActiveStreamDecoderFilter::getUpstreamSocketOptions() const { + return parent_.active_stream_.upstream_options_; +} + void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestRouteConfigUpdate( Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { - parent_.requestRouteConfigUpdate(dispatcher(), std::move(route_config_updated_cb)); + parent_.active_stream_.requestRouteConfigUpdate(dispatcher(), std::move(route_config_updated_cb)); } absl::optional ConnectionManagerImpl::ActiveStreamDecoderFilter::routeConfig() { - return parent_.routeConfig(); + return parent_.active_stream_.routeConfig(); } Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamEncoderFilter::createBuffer() { @@ -2597,9 +2716,35 @@ Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamEncoderFilter::cre [this]() -> void { this->responseDataDrained(); }, [this]() -> void { this->responseDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); - buffer->setWatermarks(parent_.buffer_limit_); + buffer->setWatermarks(parent_.active_stream_.buffer_limit_); return Buffer::WatermarkBufferPtr{buffer}; } +Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamEncoderFilter::bufferedData() { + return parent_.active_stream_.buffered_response_data_; +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::complete() { + return parent_.active_stream_.state_.local_complete_; +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::has100Continueheaders() { + return parent_.active_stream_.state_.has_continue_headers_ && !continue_headers_continued_; +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::do100ContinueHeaders() { + parent_.encode100ContinueHeaders(this, *parent_.active_stream_.continue_headers_); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doHeaders(bool end_stream) { + parent_.encodeHeaders(this, *parent_.active_stream_.response_headers_, end_stream); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doData(bool end_stream) { + parent_.encodeData(this, *parent_.active_stream_.buffered_response_data_, end_stream, + FilterManager::FilterIterationStartState::CanStartFromCurrent); +} +void ConnectionManagerImpl::ActiveStreamEncoderFilter::drainSavedResponseMetadata() { + ASSERT(saved_response_metadata_ != nullptr); + for (auto& metadata_map : *getSavedResponseMetadata()) { + parent_.encodeMetadata(this, std::move(metadata_map)); + } + getSavedResponseMetadata()->clear(); +} void ConnectionManagerImpl::ActiveStreamEncoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. @@ -2614,6 +2759,12 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::handleMetadataAfterHeader // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } +void ConnectionManagerImpl::ActiveStreamEncoderFilter::doTrailers() { + parent_.encodeTrailers(this, *parent_.active_stream_.response_trailers_); +} +bool ConnectionManagerImpl::ActiveStreamEncoderFilter::hasTrailers() { + return parent_.active_stream_.response_trailers_ != nullptr; +} void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) { return parent_.addEncodedData(*this, data, streaming); @@ -2622,7 +2773,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::In void ConnectionManagerImpl::ActiveStreamEncoderFilter::injectEncodedDataToFilterChain( Buffer::Instance& data, bool end_stream) { parent_.encodeData(this, data, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); + FilterManager::FilterIterationStartState::CanStartFromCurrent); } ResponseTrailerMap& ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedTrailers() { @@ -2636,30 +2787,58 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedMetadata( void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterAboveWriteBufferHighWatermark() { - ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", parent_); - parent_.callHighWatermarkCallbacks(); + ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", + parent_.active_stream_); + parent_.active_stream_.callHighWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterBelowWriteBufferLowWatermark() { - ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", parent_); - parent_.callLowWatermarkCallbacks(); + ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", + parent_.active_stream_); + parent_.active_stream_.callLowWatermarkCallbacks(); +} + +void ConnectionManagerImpl::ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) { + parent_.active_stream_.setBufferLimit(limit); +} + +uint32_t ConnectionManagerImpl::ActiveStreamEncoderFilter::encoderBufferLimit() { + return parent_.active_stream_.buffer_limit_; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); } +const Buffer::Instance* ConnectionManagerImpl::ActiveStreamEncoderFilter::encodingBuffer() { + return parent_.active_stream_.buffered_response_data_.get(); +} + +void ConnectionManagerImpl::ActiveStreamEncoderFilter::modifyEncodingBuffer( + std::function callback) { + ASSERT(parent_.active_stream_.state_.latest_data_encoding_filter_ == this); + callback(*parent_.active_stream_.buffered_response_data_.get()); +} + +Http1StreamEncoderOptionsOptRef +ConnectionManagerImpl::ActiveStreamEncoderFilter::http1StreamEncoderOptions() { + // TODO(mattklein123): At some point we might want to actually wrap this interface but for now + // we give the filter direct access to the encoder options. + return parent_.active_stream_.response_encoder_->http1StreamEncoderOptions(); +} + void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { - if (parent_.state_.encoder_filters_streaming_) { + if (parent_.active_stream_.state_.encoder_filters_streaming_) { onEncoderFilterAboveWriteBufferHighWatermark(); } else { - parent_.connection_manager_.stats_.named_.rs_too_large_.inc(); + parent_.active_stream_.connection_manager_.stats_.named_.rs_too_large_.inc(); // In this case, sendLocalReply will either send a response directly to the encoder, or // reset the stream. parent_.sendLocalReply( - parent_.request_headers_ && Grpc::Common::isGrpcRequestHeaders(*parent_.request_headers_), + parent_.active_stream_.request_headers_ && + Grpc::Common::isGrpcRequestHeaders(*parent_.active_stream_.request_headers_), Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), - nullptr, parent_.state_.is_head_request_, absl::nullopt, + nullptr, parent_.active_stream_.state_.is_head_request_, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } @@ -2669,12 +2848,12 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataDrained() { } void ConnectionManagerImpl::ActiveStreamFilterBase::resetStream() { - parent_.connection_manager_.stats_.named_.downstream_rq_tx_reset_.inc(); - parent_.connection_manager_.doEndStream(this->parent_); + parent_.active_stream_.connection_manager_.stats_.named_.downstream_rq_tx_reset_.inc(); + parent_.active_stream_.connection_manager_.doEndStream(parent_.active_stream_); } uint64_t ConnectionManagerImpl::ActiveStreamFilterBase::streamId() const { - return parent_.stream_id_; + return parent_.active_stream_.stream_id_; } } // namespace Http diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 104f640c1312..f5be8f24f9fd 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -105,12 +105,13 @@ class ConnectionManagerImpl : Logger::Loggable, private: struct ActiveStream; + class FilterManager; /** * Base class wrapper for both stream encoder and decoder filters. */ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks { - ActiveStreamFilterBase(ActiveStream& parent, bool dual_filter) + ActiveStreamFilterBase(FilterManager& parent, bool dual_filter) : parent_(parent), iteration_state_(IterationState::Continue), iterate_from_current_filter_(false), headers_continued_(false), continue_headers_continued_(false), end_stream_(false), dual_filter_(dual_filter), @@ -159,7 +160,7 @@ class ConnectionManagerImpl : Logger::Loggable, StreamInfo::StreamInfo& streamInfo() override; Tracing::Span& activeSpan() override; Tracing::Config& tracingConfig() override; - const ScopeTrackedObject& scope() override { return parent_; } + const ScopeTrackedObject& scope() override; // Functions to set or get iteration state. bool canIterate() { return iteration_state_ == IterationState::Continue; } @@ -199,7 +200,7 @@ class ConnectionManagerImpl : Logger::Loggable, StopAllWatermark, // Iteration has stopped for all frame types, and following data should // be buffered until high watermark is reached. }; - ActiveStream& parent_; + FilterManager& parent_; IterationState iteration_state_; // If the filter resumes iteration from a StopAllBuffer/Watermark state, the current filter // hasn't parsed data and trailers. As a result, the filter iteration should start with the @@ -221,46 +222,28 @@ class ConnectionManagerImpl : Logger::Loggable, struct ActiveStreamDecoderFilter : public ActiveStreamFilterBase, public StreamDecoderFilterCallbacks, LinkedObject { - ActiveStreamDecoderFilter(ActiveStream& parent, StreamDecoderFilterSharedPtr filter, + ActiveStreamDecoderFilter(FilterManager& parent, StreamDecoderFilterSharedPtr filter, bool dual_filter) : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {} // ActiveStreamFilterBase - bool canContinue() override { - // It is possible for the connection manager to respond directly to a request even while - // a filter is trying to continue. If a response has already happened, we should not - // continue to further filters. A concrete example of this is a filter buffering data, the - // last data frame comes in and the filter continues, but the final buffering takes the stream - // over the high watermark such that a 413 is returned. - return !parent_.state_.local_complete_; - } + bool canContinue() override; Buffer::WatermarkBufferPtr createBuffer() override; - Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_request_data_; } - bool complete() override { return parent_.state_.remote_complete_; } + Buffer::WatermarkBufferPtr& bufferedData() override; + bool complete() override; bool has100Continueheaders() override { return false; } void do100ContinueHeaders() override { NOT_REACHED_GCOVR_EXCL_LINE; } - void doHeaders(bool end_stream) override { - parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); - } - void doData(bool end_stream) override { - parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); - } + void doHeaders(bool end_stream) override; + void doData(bool end_stream) override; void doMetadata() override { if (saved_request_metadata_ != nullptr) { drainSavedRequestMetadata(); } } - void doTrailers() override { parent_.decodeTrailers(this, *parent_.request_trailers_); } - bool hasTrailers() override { return parent_.request_trailers_ != nullptr; } + void doTrailers() override; + bool hasTrailers() override; - void drainSavedRequestMetadata() { - ASSERT(saved_request_metadata_ != nullptr); - for (auto& metadata_map : *getSavedRequestMetadata()) { - parent_.decodeMetadata(this, *metadata_map); - } - getSavedRequestMetadata()->clear(); - } + void drainSavedRequestMetadata(); // This function is called after the filter calls decodeHeaders() to drain accumulated metadata. void handleMetadataAfterHeadersCallback() override; @@ -270,23 +253,14 @@ class ConnectionManagerImpl : Logger::Loggable, RequestTrailerMap& addDecodedTrailers() override; MetadataMapVector& addDecodedMetadata() override; void continueDecoding() override; - const Buffer::Instance* decodingBuffer() override { - return parent_.buffered_request_data_.get(); - } + const Buffer::Instance* decodingBuffer() override; - void modifyDecodingBuffer(std::function callback) override { - ASSERT(parent_.state_.latest_data_decoding_filter_ == this); - callback(*parent_.buffered_request_data_.get()); - } + void modifyDecodingBuffer(std::function callback) override; void sendLocalReply(Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status, - absl::string_view details) override { - parent_.stream_info_.setResponseCodeDetails(details); - parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, - parent_.state_.is_head_request_, grpc_status, details); - } + absl::string_view details) override; void encode100ContinueHeaders(ResponseHeaderMapPtr&& headers) override; void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) override; void encodeData(Buffer::Instance& data, bool end_stream) override; @@ -298,17 +272,13 @@ class ConnectionManagerImpl : Logger::Loggable, addDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override; void removeDownstreamWatermarkCallbacks(DownstreamWatermarkCallbacks& watermark_callbacks) override; - void setDecoderBufferLimit(uint32_t limit) override { parent_.setBufferLimit(limit); } - uint32_t decoderBufferLimit() override { return parent_.buffer_limit_; } + void setDecoderBufferLimit(uint32_t limit) override; + uint32_t decoderBufferLimit() override; bool recreateStream() override; - void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override { - Network::Socket::appendOptions(parent_.upstream_options_, options); - } + void addUpstreamSocketOptions(const Network::Socket::OptionsSharedPtr& options) override; - Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override { - return parent_.upstream_options_; - } + Network::Socket::OptionsSharedPtr getUpstreamSocketOptions() const override; // Each decoder filter instance checks if the request passed to the filter is gRPC // so that we can issue gRPC local responses to gRPC requests. Filter's decodeHeaders() @@ -341,35 +311,20 @@ class ConnectionManagerImpl : Logger::Loggable, struct ActiveStreamEncoderFilter : public ActiveStreamFilterBase, public StreamEncoderFilterCallbacks, LinkedObject { - ActiveStreamEncoderFilter(ActiveStream& parent, StreamEncoderFilterSharedPtr filter, + ActiveStreamEncoderFilter(FilterManager& parent, StreamEncoderFilterSharedPtr filter, bool dual_filter) : ActiveStreamFilterBase(parent, dual_filter), handle_(filter) {} // ActiveStreamFilterBase bool canContinue() override { return true; } Buffer::WatermarkBufferPtr createBuffer() override; - Buffer::WatermarkBufferPtr& bufferedData() override { return parent_.buffered_response_data_; } - bool complete() override { return parent_.state_.local_complete_; } - bool has100Continueheaders() override { - return parent_.state_.has_continue_headers_ && !continue_headers_continued_; - } - void do100ContinueHeaders() override { - parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); - } - void doHeaders(bool end_stream) override { - parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); - } - void doData(bool end_stream) override { - parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, - ActiveStream::FilterIterationStartState::CanStartFromCurrent); - } - void drainSavedResponseMetadata() { - ASSERT(saved_response_metadata_ != nullptr); - for (auto& metadata_map : *getSavedResponseMetadata()) { - parent_.encodeMetadata(this, std::move(metadata_map)); - } - getSavedResponseMetadata()->clear(); - } + Buffer::WatermarkBufferPtr& bufferedData() override; + bool complete() override; + bool has100Continueheaders() override; + void do100ContinueHeaders() override; + void doHeaders(bool end_stream) override; + void doData(bool end_stream) override; + void drainSavedResponseMetadata(); void handleMetadataAfterHeadersCallback() override; void doMetadata() override { @@ -377,8 +332,8 @@ class ConnectionManagerImpl : Logger::Loggable, drainSavedResponseMetadata(); } } - void doTrailers() override { parent_.encodeTrailers(this, *parent_.response_trailers_); } - bool hasTrailers() override { return parent_.response_trailers_ != nullptr; } + void doTrailers() override; + bool hasTrailers() override; // Http::StreamEncoderFilterCallbacks void addEncodedData(Buffer::Instance& data, bool streaming) override; @@ -387,21 +342,12 @@ class ConnectionManagerImpl : Logger::Loggable, void addEncodedMetadata(MetadataMapPtr&& metadata_map) override; void onEncoderFilterAboveWriteBufferHighWatermark() override; void onEncoderFilterBelowWriteBufferLowWatermark() override; - void setEncoderBufferLimit(uint32_t limit) override { parent_.setBufferLimit(limit); } - uint32_t encoderBufferLimit() override { return parent_.buffer_limit_; } + void setEncoderBufferLimit(uint32_t limit) override; + uint32_t encoderBufferLimit() override; void continueEncoding() override; - const Buffer::Instance* encodingBuffer() override { - return parent_.buffered_response_data_.get(); - } - void modifyEncodingBuffer(std::function callback) override { - ASSERT(parent_.state_.latest_data_encoding_filter_ == this); - callback(*parent_.buffered_response_data_.get()); - } - Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override { - // TODO(mattklein123): At some point we might want to actually wrap this interface but for now - // we give the filter direct access to the encoder options. - return parent_.response_encoder_->http1StreamEncoderOptions(); - } + const Buffer::Instance* encodingBuffer() override; + void modifyEncodingBuffer(std::function callback) override; + Http1StreamEncoderOptionsOptRef http1StreamEncoderOptions() override; void responseDataTooLarge(); void responseDataDrained(); @@ -441,26 +387,27 @@ class ConnectionManagerImpl : Logger::Loggable, NullRouteConfigUpdateRequester() = default; }; - /** - * Wraps a single active stream on the connection. These are either full request/response pairs - * or pushes. - */ - struct ActiveStream : LinkedObject, - public Event::DeferredDeletable, - public StreamCallbacks, - public RequestDecoder, - public FilterChainFactoryCallbacks, - public Tracing::Config, - public ScopeTrackedObject { - ActiveStream(ConnectionManagerImpl& connection_manager); - ~ActiveStream() override; + class FilterManager { + public: + explicit FilterManager(ActiveStream& active_stream) : active_stream_(active_stream) {} + void destroyFilters() { + for (auto& filter : decoder_filters_) { + filter->handle_->onDestroy(); + } + for (auto& filter : encoder_filters_) { + // Do not call on destroy twice for dual registered filters. + if (!filter->dual_filter_) { + filter->handle_->onDestroy(); + } + } + } // Indicates which filter to start the iteration with. enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter); void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter); - void chargeStats(const ResponseHeaderMap& headers); + // Returns the encoder filter to start iteration with. std::list::iterator commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, @@ -469,7 +416,6 @@ class ConnectionManagerImpl : Logger::Loggable, std::list::iterator commonDecodePrefix(ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state); - const Network::Connection* connection(); void addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming); RequestTrailerMap& addDecodedTrailers(); MetadataMapVector& addDecodedMetadata(); @@ -493,7 +439,7 @@ class ConnectionManagerImpl : Logger::Loggable, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, - absl::string_view details) override; + absl::string_view details); void sendLocalReplyViaFilterChain( bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, bool is_head_request, @@ -514,25 +460,56 @@ class ConnectionManagerImpl : Logger::Loggable, void encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers); void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr); - // This is a helper function for encodeHeaders and responseDataTooLarge which allows for shared - // code for the two headers encoding paths. It does header munging, updates timing stats, and - // sends the headers to the encoder. - void encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream); - // This is a helper function for encodeData and responseDataTooLarge which allows for shared - // code for the two data encoding paths. It does stats updates and tracks potential end of - // stream. - void encodeDataInternal(Buffer::Instance& data, bool end_stream); - void maybeEndEncode(bool end_stream); // Returns true if new metadata is decoded. Otherwise, returns false. bool processNewlyAddedMetadata(); - uint64_t streamId() { return stream_id_; } + // Returns true if filter has stopped iteration for all frame types. Otherwise, returns false. // filter_streaming is the variable to indicate if stream is streaming, and its value may be // changed by the function. bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, bool& filter_streaming); + ActiveStream& active_stream_; + + private: + std::list decoder_filters_; + std::list encoder_filters_; + }; + + /** + * Wraps a single active stream on the connection. These are either full request/response pairs + * or pushes. + */ + struct ActiveStream : LinkedObject, + public Event::DeferredDeletable, + public StreamCallbacks, + public RequestDecoder, + public FilterChainFactoryCallbacks, + public Tracing::Config, + public ScopeTrackedObject { + ActiveStream(ConnectionManagerImpl& connection_manager); + ~ActiveStream() override; + + void chargeStats(const ResponseHeaderMap& headers); + const Network::Connection* connection(); + void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, + + const std::function& modify_headers, + bool is_head_request, + const absl::optional grpc_status, + absl::string_view details) override; + uint64_t streamId() { return stream_id_; } + + // This is a helper function for encodeHeaders and responseDataTooLarge which allows for + // shared code for the two headers encoding paths. It does header munging, updates timing + // stats, and sends the headers to the encoder. + void encodeHeadersInternal(ResponseHeaderMap& headers, bool end_stream); + // This is a helper function for encodeData and responseDataTooLarge which allows for shared + // code for the two data encoding paths. It does stats updates and tracks potential end of + // stream. + void encodeDataInternal(Buffer::Instance& data, bool end_stream); + // Http::StreamCallbacks void onResetStream(StreamResetReason reason, absl::string_view transport_failure_reason) override; @@ -549,14 +526,14 @@ class ConnectionManagerImpl : Logger::Loggable, // Http::FilterChainFactoryCallbacks void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { - addStreamDecoderFilterWorker(filter, false); + filter_manager_.addStreamDecoderFilterWorker(filter, false); } void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override { - addStreamEncoderFilterWorker(filter, false); + filter_manager_.addStreamEncoderFilterWorker(filter, false); } void addStreamFilter(StreamFilterSharedPtr filter) override { - addStreamDecoderFilterWorker(filter, true); - addStreamEncoderFilterWorker(filter, true); + filter_manager_.addStreamDecoderFilterWorker(filter, true); + filter_manager_.addStreamEncoderFilterWorker(filter, true); } void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; @@ -708,6 +685,7 @@ class ConnectionManagerImpl : Logger::Loggable, } ConnectionManagerImpl& connection_manager_; + FilterManager filter_manager_; Router::ConfigConstSharedPtr snapped_route_config_; Router::ScopedConfigConstSharedPtr snapped_scoped_routes_config_; Tracing::SpanPtr active_span_; @@ -720,8 +698,6 @@ class ConnectionManagerImpl : Logger::Loggable, RequestHeaderMapPtr request_headers_; Buffer::WatermarkBufferPtr buffered_request_data_; RequestTrailerMapPtr request_trailers_; - std::list decoder_filters_; - std::list encoder_filters_; std::list access_log_handlers_; Stats::TimespanPtr request_response_timespan_; // Per-stream idle timeout. @@ -746,6 +722,8 @@ class ConnectionManagerImpl : Logger::Loggable, Network::Socket::OptionsSharedPtr upstream_options_; std::unique_ptr route_config_update_requester_; std::unique_ptr tracing_custom_tags_{nullptr}; + + friend FilterManager; }; using ActiveStreamPtr = std::unique_ptr; From 8972b478e6c9f1e7342e3dbfb57b35317c0cc009 Mon Sep 17 00:00:00 2001 From: Weston Carlson Date: Tue, 28 Jul 2020 07:46:48 -0600 Subject: [PATCH 762/909] transport socket: Add proxy proto transport socket. (#11584) Commit Message: Add proxy proto transport socket Additional Description: This is the part 1 PR described in #10682. It adds the transports socket / unit tests, a transport socket options struct for the proxy proto header, and does a refactor to make the listener filter use the common proxy proto constants (potentially want to move these now since the proxy proto config api type is not in extensions?) Risk Level: Small Testing: Unit Docs Changes: None Release Notes: None Part Of: #1031 Signed-off-by: Weston Carlson Co-authored-by: Lizan Zhou --- CODEOWNERS | 4 + api/BUILD | 1 + .../transport_sockets/proxy_protocol/v3/BUILD | 12 + .../v3/upstream_proxy_protocol.proto | 26 ++ api/versioning/BUILD | 1 + .../transport_sockets/proxy_protocol/v3/BUILD | 12 + .../v3/upstream_proxy_protocol.proto | 26 ++ include/envoy/network/BUILD | 9 + include/envoy/network/proxy_protocol.h | 14 + include/envoy/network/transport_socket.h | 6 + source/common/network/BUILD | 13 + .../network/proxy_protocol_filter_state.cc | 13 + .../network/proxy_protocol_filter_state.h | 23 + .../network/transport_socket_options_impl.cc | 12 +- .../network/transport_socket_options_impl.h | 21 +- source/extensions/extensions_build_config.bzl | 1 + .../filters/listener/proxy_protocol/BUILD | 1 + .../listener/proxy_protocol/proxy_protocol.cc | 16 + .../listener/proxy_protocol/proxy_protocol.h | 5 + .../proxy_protocol/proxy_protocol_header.h | 24 -- .../extensions/transport_sockets/common/BUILD | 20 + .../transport_sockets/common/passthrough.cc | 47 +++ .../transport_sockets/common/passthrough.h | 32 ++ .../transport_sockets/proxy_protocol/BUILD | 26 ++ .../proxy_protocol/proxy_protocol.cc | 106 +++++ .../proxy_protocol/proxy_protocol.h | 44 ++ source/extensions/transport_sockets/tap/BUILD | 1 + .../extensions/transport_sockets/tap/tap.cc | 11 +- source/extensions/transport_sockets/tap/tap.h | 9 +- .../transport_sockets/well_known_names.h | 5 +- test/common/network/BUILD | 1 + .../transport_socket_options_impl_test.cc | 10 + .../extensions/transport_sockets/common/BUILD | 20 + .../common/passthrough_test.cc | 90 ++++ .../transport_sockets/proxy_protocol/BUILD | 27 ++ .../proxy_protocol/proxy_protocol_test.cc | 398 ++++++++++++++++++ test/mocks/buffer/mocks.h | 4 + test/test_common/utility.cc | 19 + test/test_common/utility.h | 12 + 39 files changed, 1073 insertions(+), 49 deletions(-) create mode 100644 api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD create mode 100644 api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto create mode 100644 include/envoy/network/proxy_protocol.h create mode 100644 source/common/network/proxy_protocol_filter_state.cc create mode 100644 source/common/network/proxy_protocol_filter_state.h create mode 100644 source/extensions/transport_sockets/common/BUILD create mode 100644 source/extensions/transport_sockets/common/passthrough.cc create mode 100644 source/extensions/transport_sockets/common/passthrough.h create mode 100644 source/extensions/transport_sockets/proxy_protocol/BUILD create mode 100644 source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc create mode 100644 source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h create mode 100644 test/extensions/transport_sockets/common/BUILD create mode 100644 test/extensions/transport_sockets/common/passthrough_test.cc create mode 100644 test/extensions/transport_sockets/proxy_protocol/BUILD create mode 100644 test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc diff --git a/CODEOWNERS b/CODEOWNERS index 5c30c1bb9923..8b206bb0f1c7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -36,6 +36,10 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/transport_sockets/alts @htuch @yangminzhu # tls transport socket extension /*/extensions/transport_sockets/tls @PiotrSikora @lizan +# proxy protocol socket extension +/*/extensions/transport_sockets/proxy_protocol @alyssawilk @wez470 +# common transport socket +/*/extensions/transport_sockets/common @alyssawilk @wez470 # sni_cluster extension /*/extensions/filters/network/sni_cluster @rshriram @lizan # sni_dynamic_forward_proxy extension diff --git a/api/BUILD b/api/BUILD index 50835fb0b1c4..8c608fdeca4a 100644 --- a/api/BUILD +++ b/api/BUILD @@ -231,6 +231,7 @@ proto_library( "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", diff --git a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD new file mode 100644 index 000000000000..2c3dad6453b6 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto new file mode 100644 index 000000000000..c6c2ee9798d6 --- /dev/null +++ b/api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.proxy_protocol.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; +option java_outer_classname = "UpstreamProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream Proxy Protocol] +// [#extension: envoy.transport_sockets.upstream_proxy_protocol] +// [#not-implemented-hide:] +// Configuration for PROXY protocol socket +message ProxyProtocolUpstreamTransport { + config.core.v3.ProxyProtocolConfig config = 1; + + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 00939e940295..305f09df3cae 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -114,6 +114,7 @@ proto_library( "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", + "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD new file mode 100644 index 000000000000..2c3dad6453b6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto new file mode 100644 index 000000000000..c6c2ee9798d6 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.proxy_protocol.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; +option java_outer_classname = "UpstreamProxyProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream Proxy Protocol] +// [#extension: envoy.transport_sockets.upstream_proxy_protocol] +// [#not-implemented-hide:] +// Configuration for PROXY protocol socket +message ProxyProtocolUpstreamTransport { + config.core.v3.ProxyProtocolConfig config = 1; + + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; +} diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index a9f4dc7bd739..3076f862ddb8 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -116,6 +116,7 @@ envoy_cc_library( hdrs = ["transport_socket.h"], deps = [ ":io_handle_interface", + ":proxy_protocol_options_lib", "//include/envoy/buffer:buffer_interface", "//include/envoy/ssl:connection_interface", ], @@ -152,3 +153,11 @@ envoy_cc_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "proxy_protocol_options_lib", + hdrs = ["proxy_protocol.h"], + deps = [ + ":address_interface", + ], +) diff --git a/include/envoy/network/proxy_protocol.h b/include/envoy/network/proxy_protocol.h new file mode 100644 index 000000000000..52c111859b11 --- /dev/null +++ b/include/envoy/network/proxy_protocol.h @@ -0,0 +1,14 @@ +#pragma once + +#include "envoy/network/address.h" + +namespace Envoy { +namespace Network { + +struct ProxyProtocolData { + const Network::Address::InstanceConstSharedPtr src_addr_; + const Network::Address::InstanceConstSharedPtr dst_addr_; +}; + +} // namespace Network +} // namespace Envoy \ No newline at end of file diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index b8f1063ad4bc..9e117b116134 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -5,6 +5,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" #include "envoy/network/io_handle.h" +#include "envoy/network/proxy_protocol.h" #include "envoy/ssl/connection.h" #include "absl/types/optional.h" @@ -200,6 +201,11 @@ class TransportSocketOptions { */ virtual const absl::optional& applicationProtocolFallback() const PURE; + /** + * @return optional PROXY protocol address information. + */ + virtual absl::optional proxyProtocolOptions() const PURE; + /** * @param vector of bytes to which the option should append hash key data that will be used * to separate connections based on the option. Any data already in the key vector must diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 67e3e0be9899..dd8bcb546337 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -358,8 +358,10 @@ envoy_cc_library( hdrs = ["transport_socket_options_impl.h"], deps = [ ":application_protocol_lib", + ":proxy_protocol_filter_state_lib", ":upstream_server_name_lib", ":upstream_subject_alt_names_lib", + "//include/envoy/network:proxy_protocol_options_lib", "//include/envoy/network:transport_socket_interface", "//include/envoy/stream_info:filter_state_interface", "//source/common/common:scalar_to_byte_vector_lib", @@ -402,3 +404,14 @@ envoy_cc_library( "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "proxy_protocol_filter_state_lib", + srcs = ["proxy_protocol_filter_state.cc"], + hdrs = ["proxy_protocol_filter_state.h"], + deps = [ + "//include/envoy/network:proxy_protocol_options_lib", + "//include/envoy/stream_info:filter_state_interface", + "//source/common/common:macros", + ], +) diff --git a/source/common/network/proxy_protocol_filter_state.cc b/source/common/network/proxy_protocol_filter_state.cc new file mode 100644 index 000000000000..cae58c961253 --- /dev/null +++ b/source/common/network/proxy_protocol_filter_state.cc @@ -0,0 +1,13 @@ +#include "common/network/proxy_protocol_filter_state.h" + +#include "common/common/macros.h" + +namespace Envoy { +namespace Network { + +const std::string& ProxyProtocolFilterState::key() { + CONSTRUCT_ON_FIRST_USE(std::string, "envoy.network.proxy_protocol_options"); +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/proxy_protocol_filter_state.h b/source/common/network/proxy_protocol_filter_state.h new file mode 100644 index 000000000000..9cb35a9ee878 --- /dev/null +++ b/source/common/network/proxy_protocol_filter_state.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/network/proxy_protocol.h" +#include "envoy/stream_info/filter_state.h" + +namespace Envoy { +namespace Network { + +/** + * PROXY protocol info to be used in connections. + */ +class ProxyProtocolFilterState : public StreamInfo::FilterState::Object { +public: + ProxyProtocolFilterState(Network::ProxyProtocolData options) : options_(options) {} + const Network::ProxyProtocolData& value() const { return options_; } + static const std::string& key(); + +private: + const Network::ProxyProtocolData options_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/transport_socket_options_impl.cc b/source/common/network/transport_socket_options_impl.cc index 6d3ccf5ecea3..62358ce48710 100644 --- a/source/common/network/transport_socket_options_impl.cc +++ b/source/common/network/transport_socket_options_impl.cc @@ -9,6 +9,7 @@ #include "common/common/scalar_to_byte_vector.h" #include "common/common/utility.h" #include "common/network/application_protocol.h" +#include "common/network/proxy_protocol_filter_state.h" #include "common/network/upstream_server_name.h" #include "common/network/upstream_subject_alt_names.h" @@ -54,6 +55,7 @@ TransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& fi absl::string_view server_name; std::vector application_protocols; std::vector subject_alt_names; + absl::optional proxy_protocol_options; bool needs_transport_socket_options = false; if (filter_state.hasData(UpstreamServerName::key())) { @@ -77,9 +79,17 @@ TransportSocketOptionsUtility::fromFilterState(const StreamInfo::FilterState& fi needs_transport_socket_options = true; } + if (filter_state.hasData(ProxyProtocolFilterState::key())) { + const auto& proxy_protocol_filter_state = + filter_state.getDataReadOnly(ProxyProtocolFilterState::key()); + proxy_protocol_options.emplace(proxy_protocol_filter_state.value()); + needs_transport_socket_options = true; + } + if (needs_transport_socket_options) { return std::make_shared( - server_name, std::move(subject_alt_names), std::move(application_protocols)); + server_name, std::move(subject_alt_names), std::move(application_protocols), absl::nullopt, + proxy_protocol_options); } else { return nullptr; } diff --git a/source/common/network/transport_socket_options_impl.h b/source/common/network/transport_socket_options_impl.h index a181676db176..3611f117c8e5 100644 --- a/source/common/network/transport_socket_options_impl.h +++ b/source/common/network/transport_socket_options_impl.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/network/proxy_protocol.h" #include "envoy/network/transport_socket.h" #include "envoy/stream_info/filter_state.h" @@ -25,6 +26,9 @@ class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { const absl::optional& applicationProtocolFallback() const override { return alpn_fallback_; } + absl::optional proxyProtocolOptions() const override { + return inner_options_->proxyProtocolOptions(); + } void hashKey(std::vector& key) const override; private: @@ -34,15 +38,18 @@ class AlpnDecoratingTransportSocketOptions : public TransportSocketOptions { class TransportSocketOptionsImpl : public TransportSocketOptions { public: - TransportSocketOptionsImpl(absl::string_view override_server_name = "", - std::vector&& override_verify_san_list = {}, - std::vector&& override_alpn = {}, - absl::optional&& fallback_alpn = {}) + TransportSocketOptionsImpl( + absl::string_view override_server_name = "", + std::vector&& override_verify_san_list = {}, + std::vector&& override_alpn = {}, + absl::optional&& fallback_alpn = {}, + absl::optional proxy_proto_options = absl::nullopt) : override_server_name_(override_server_name.empty() ? absl::nullopt : absl::optional(override_server_name)), override_verify_san_list_{std::move(override_verify_san_list)}, - override_alpn_list_{std::move(override_alpn)}, alpn_fallback_{std::move(fallback_alpn)} {} + override_alpn_list_{std::move(override_alpn)}, alpn_fallback_{std::move(fallback_alpn)}, + proxy_protocol_options_(proxy_proto_options) {} // Network::TransportSocketOptions const absl::optional& serverNameOverride() const override { @@ -57,6 +64,9 @@ class TransportSocketOptionsImpl : public TransportSocketOptions { const absl::optional& applicationProtocolFallback() const override { return alpn_fallback_; } + absl::optional proxyProtocolOptions() const override { + return proxy_protocol_options_; + } void hashKey(std::vector& key) const override; private: @@ -64,6 +74,7 @@ class TransportSocketOptionsImpl : public TransportSocketOptions { const std::vector override_verify_san_list_; const std::vector override_alpn_list_; const absl::optional alpn_fallback_; + const absl::optional proxy_protocol_options_; }; class TransportSocketOptionsUtility { diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 8c1e615a26a8..d69443ada6a0 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -160,6 +160,7 @@ EXTENSIONS = { # "envoy.transport_sockets.alts": "//source/extensions/transport_sockets/alts:config", + "envoy.transport_sockets.upstream_proxy_protocol": "//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config", "envoy.transport_sockets.quic": "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index d39dc0a51d6d..407d05e43468 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -29,6 +29,7 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", "//source/extensions/filters/listener:well_known_names", "@envoy_api//envoy/extensions/filters/listener/proxy_protocol/v3:pkg_cc_proto", ], diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index e2561480785a..c3029c2234cf 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -21,8 +21,24 @@ #include "common/network/address_impl.h" #include "common/network/utility.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" #include "extensions/filters/listener/well_known_names.h" +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V1_SIGNATURE_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_INET6; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_AF_INET6; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_LOCAL; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ONBEHALF_OF; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_SIGNATURE_LEN; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_DGRAM; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_TRANSPORT_STREAM; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_VERSION; + namespace Envoy { namespace Extensions { namespace ListenerFilters { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h index ac390908f52c..26ee119f5d3d 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.h @@ -8,9 +8,14 @@ #include "common/common/logger.h" +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + #include "absl/container/flat_hash_map.h" #include "proxy_protocol_header.h" +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_ADDR_LEN_UNIX; +using Envoy::Extensions::Common::ProxyProtocol::PROXY_PROTO_V2_HEADER_LEN; + namespace Envoy { namespace Extensions { namespace ListenerFilters { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h index 63c3c96eadf0..c451c8f5e1c7 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol_header.h @@ -9,22 +9,6 @@ namespace Extensions { namespace ListenerFilters { namespace ProxyProtocol { -// See https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt for definitions - -// TODO(wez470): Refactor listener filter to use common proxy proto constants -constexpr char PROXY_PROTO_V1_SIGNATURE[] = "PROXY "; -constexpr uint32_t PROXY_PROTO_V1_SIGNATURE_LEN = 6; -constexpr char PROXY_PROTO_V2_SIGNATURE[] = "\x0d\x0a\x0d\x0a\x00\x0d\x0a\x51\x55\x49\x54\x0a"; -constexpr uint32_t PROXY_PROTO_V2_SIGNATURE_LEN = 12; -constexpr uint32_t PROXY_PROTO_V2_HEADER_LEN = 16; -constexpr uint32_t PROXY_PROTO_V2_VERSION = 0x2; -constexpr uint32_t PROXY_PROTO_V2_ONBEHALF_OF = 0x1; -constexpr uint32_t PROXY_PROTO_V2_LOCAL = 0x0; - -constexpr uint32_t PROXY_PROTO_V2_AF_INET = 0x1; -constexpr uint32_t PROXY_PROTO_V2_AF_INET6 = 0x2; -constexpr uint32_t PROXY_PROTO_V2_AF_UNIX = 0x3; - struct WireHeader { WireHeader(size_t extensions_length) : extensions_length_(extensions_length), protocol_version_(Network::Address::IpVersion::v4), @@ -44,14 +28,6 @@ struct WireHeader { const bool local_command_; }; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNSPEC = 0; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET = 12; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_INET6 = 36; -constexpr uint32_t PROXY_PROTO_V2_ADDR_LEN_UNIX = 216; - -constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_STREAM = 0x1; -constexpr uint8_t PROXY_PROTO_V2_TRANSPORT_DGRAM = 0x2; - } // namespace ProxyProtocol } // namespace ListenerFilters } // namespace Extensions diff --git a/source/extensions/transport_sockets/common/BUILD b/source/extensions/transport_sockets/common/BUILD new file mode 100644 index 000000000000..8aacce0fdd15 --- /dev/null +++ b/source/extensions/transport_sockets/common/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_library( + name = "passthrough_lib", + srcs = ["passthrough.cc"], + hdrs = ["passthrough.h"], + deps = [ + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//source/common/buffer:buffer_lib", + ], +) diff --git a/source/extensions/transport_sockets/common/passthrough.cc b/source/extensions/transport_sockets/common/passthrough.cc new file mode 100644 index 000000000000..60d632adb24a --- /dev/null +++ b/source/extensions/transport_sockets/common/passthrough.cc @@ -0,0 +1,47 @@ +#include "extensions/transport_sockets/common/passthrough.h" + +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { + +PassthroughSocket::PassthroughSocket(Network::TransportSocketPtr&& transport_socket) + : transport_socket_(std::move(transport_socket)) {} + +void PassthroughSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) { + transport_socket_->setTransportSocketCallbacks(callbacks); +} + +std::string PassthroughSocket::protocol() const { return transport_socket_->protocol(); } + +absl::string_view PassthroughSocket::failureReason() const { + return transport_socket_->failureReason(); +} + +bool PassthroughSocket::canFlushClose() { return transport_socket_->canFlushClose(); } + +void PassthroughSocket::closeSocket(Network::ConnectionEvent event) { + transport_socket_->closeSocket(event); +} + +Network::IoResult PassthroughSocket::doRead(Buffer::Instance& buffer) { + return transport_socket_->doRead(buffer); +} + +Network::IoResult PassthroughSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { + return transport_socket_->doWrite(buffer, end_stream); +} + +void PassthroughSocket::onConnected() { transport_socket_->onConnected(); } + +Ssl::ConnectionInfoConstSharedPtr PassthroughSocket::ssl() const { + return transport_socket_->ssl(); +} + +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/common/passthrough.h b/source/extensions/transport_sockets/common/passthrough.h new file mode 100644 index 000000000000..bbf832c73419 --- /dev/null +++ b/source/extensions/transport_sockets/common/passthrough.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { + +class PassthroughSocket : public Network::TransportSocket { +public: + PassthroughSocket(Network::TransportSocketPtr&& transport_socket); + + void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; + std::string protocol() const override; + absl::string_view failureReason() const override; + bool canFlushClose() override; + void closeSocket(Network::ConnectionEvent event) override; + Network::IoResult doRead(Buffer::Instance& buffer) override; + Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; + void onConnected() override; + Ssl::ConnectionInfoConstSharedPtr ssl() const override; + +protected: + Network::TransportSocketPtr transport_socket_; +}; + +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/proxy_protocol/BUILD b/source/extensions/transport_sockets/proxy_protocol/BUILD new file mode 100644 index 000000000000..d44382487e85 --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/BUILD @@ -0,0 +1,26 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_extension( + name = "upstream_proxy_protocol", + srcs = ["proxy_protocol.cc"], + hdrs = ["proxy_protocol.h"], + security_posture = "robust_to_untrusted_downstream", + undocumented = True, + deps = [ + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//source/common/buffer:buffer_lib", + "//source/common/network:address_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/transport_sockets/common:passthrough_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc new file mode 100644 index 000000000000..d1427b7aaa9d --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -0,0 +1,106 @@ +#include "extensions/transport_sockets/proxy_protocol/proxy_protocol.h" + +#include + +#include "envoy/config/core/v3/proxy_protocol.pb.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +UpstreamProxyProtocolSocket::UpstreamProxyProtocolSocket( + Network::TransportSocketPtr&& transport_socket, + Network::TransportSocketOptionsSharedPtr options, ProxyProtocolConfig_Version version) + : PassthroughSocket(std::move(transport_socket)), options_(options), version_(version) {} + +void UpstreamProxyProtocolSocket::setTransportSocketCallbacks( + Network::TransportSocketCallbacks& callbacks) { + transport_socket_->setTransportSocketCallbacks(callbacks); + callbacks_ = &callbacks; + generateHeader(); +} + +Network::IoResult UpstreamProxyProtocolSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { + if (header_buffer_.length() > 0) { + auto header_res = writeHeader(); + if (header_buffer_.length() == 0 && header_res.action_ == Network::PostIoAction::KeepOpen) { + auto inner_res = transport_socket_->doWrite(buffer, end_stream); + return {inner_res.action_, header_res.bytes_processed_ + inner_res.bytes_processed_, false}; + } + return header_res; + } else { + return transport_socket_->doWrite(buffer, end_stream); + } +} + +void UpstreamProxyProtocolSocket::generateHeader() { + if (version_ == ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1) { + generateHeaderV1(); + } else { + generateHeaderV2(); + } +} + +void UpstreamProxyProtocolSocket::generateHeaderV1() { + // Default to local addresses + auto src_addr = callbacks_->connection().localAddress(); + auto dst_addr = callbacks_->connection().remoteAddress(); + + if (options_ && options_->proxyProtocolOptions().has_value()) { + const auto options = options_->proxyProtocolOptions().value(); + src_addr = options.src_addr_; + dst_addr = options.dst_addr_; + } + + Common::ProxyProtocol::generateV1Header(*src_addr->ip(), *dst_addr->ip(), header_buffer_); +} + +void UpstreamProxyProtocolSocket::generateHeaderV2() { + if (!options_ || !options_->proxyProtocolOptions().has_value()) { + Common::ProxyProtocol::generateV2LocalHeader(header_buffer_); + } else { + const auto options = options_->proxyProtocolOptions().value(); + Common::ProxyProtocol::generateV2Header(*options.src_addr_->ip(), *options.dst_addr_->ip(), + header_buffer_); + } +} + +Network::IoResult UpstreamProxyProtocolSocket::writeHeader() { + Network::PostIoAction action = Network::PostIoAction::KeepOpen; + uint64_t bytes_written = 0; + do { + if (header_buffer_.length() == 0) { + break; + } + + Api::IoCallUint64Result result = header_buffer_.write(callbacks_->ioHandle()); + + if (result.ok()) { + ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); + bytes_written += result.rc_; + } else { + ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), + result.err_->getErrorDetails()); + if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { + action = Network::PostIoAction::Close; + } + break; + } + } while (true); + + return {action, bytes_written, false}; +} + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h new file mode 100644 index 000000000000..3b0996e20882 --- /dev/null +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/config/core/v3/proxy_protocol.pb.h" +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/logger.h" + +#include "extensions/transport_sockets/common/passthrough.h" + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { + +class UpstreamProxyProtocolSocket : public TransportSockets::PassthroughSocket, + public Logger::Loggable { +public: + UpstreamProxyProtocolSocket(Network::TransportSocketPtr&& transport_socket, + Network::TransportSocketOptionsSharedPtr options, + ProxyProtocolConfig_Version version); + + void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; + Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; + +private: + void generateHeader(); + void generateHeaderV1(); + void generateHeaderV2(); + Network::IoResult writeHeader(); + + Network::TransportSocketOptionsSharedPtr options_; + Network::TransportSocketCallbacks* callbacks_{}; + Buffer::OwnedImpl header_buffer_{}; + ProxyProtocolConfig_Version version_{ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1}; +}; + +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index e319ee596df3..a241afa2df24 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -42,6 +42,7 @@ envoy_cc_library( "//include/envoy/network:transport_socket_interface", "//source/common/buffer:buffer_lib", "//source/extensions/common/tap:extension_config_base", + "//source/extensions/transport_sockets/common:passthrough_lib", "@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/transport_sockets/tap/tap.cc b/source/extensions/transport_sockets/tap/tap.cc index 21109084247b..7674ba6b584d 100644 --- a/source/extensions/transport_sockets/tap/tap.cc +++ b/source/extensions/transport_sockets/tap/tap.cc @@ -11,7 +11,7 @@ namespace Tap { TapSocket::TapSocket(SocketTapConfigSharedPtr config, Network::TransportSocketPtr&& transport_socket) - : config_(config), transport_socket_(std::move(transport_socket)) {} + : PassthroughSocket(std::move(transport_socket)), config_(config) {} void TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) { ASSERT(!tapper_); @@ -19,11 +19,6 @@ void TapSocket::setTransportSocketCallbacks(Network::TransportSocketCallbacks& c tapper_ = config_ ? config_->createPerSocketTapper(callbacks.connection()) : nullptr; } -std::string TapSocket::protocol() const { return transport_socket_->protocol(); } -absl::string_view TapSocket::failureReason() const { return transport_socket_->failureReason(); } - -bool TapSocket::canFlushClose() { return transport_socket_->canFlushClose(); } - void TapSocket::closeSocket(Network::ConnectionEvent event) { if (tapper_ != nullptr) { tapper_->closeSocket(event); @@ -51,10 +46,6 @@ Network::IoResult TapSocket::doWrite(Buffer::Instance& buffer, bool end_stream) return result; } -void TapSocket::onConnected() { transport_socket_->onConnected(); } - -Ssl::ConnectionInfoConstSharedPtr TapSocket::ssl() const { return transport_socket_->ssl(); } - TapSocketFactory::TapSocketFactory( const envoy::extensions::transport_sockets::tap::v3::Tap& proto_config, Common::Tap::TapConfigFactoryPtr&& config_factory, Server::Admin& admin, diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index 72d8967468d7..33156b705153 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -5,6 +5,7 @@ #include "envoy/network/transport_socket.h" #include "extensions/common/tap/extension_config_base.h" +#include "extensions/transport_sockets/common/passthrough.h" #include "extensions/transport_sockets/tap/tap_config.h" namespace Envoy { @@ -12,25 +13,19 @@ namespace Extensions { namespace TransportSockets { namespace Tap { -class TapSocket : public Network::TransportSocket { +class TapSocket : public TransportSockets::PassthroughSocket { public: TapSocket(SocketTapConfigSharedPtr config, Network::TransportSocketPtr&& transport_socket); // Network::TransportSocket void setTransportSocketCallbacks(Network::TransportSocketCallbacks& callbacks) override; - std::string protocol() const override; - absl::string_view failureReason() const override; - bool canFlushClose() override; void closeSocket(Network::ConnectionEvent event) override; Network::IoResult doRead(Buffer::Instance& buffer) override; Network::IoResult doWrite(Buffer::Instance& buffer, bool end_stream) override; - void onConnected() override; - Ssl::ConnectionInfoConstSharedPtr ssl() const override; private: SocketTapConfigSharedPtr config_; PerSocketTapperPtr tapper_; - Network::TransportSocketPtr transport_socket_; }; class TapSocketFactory : public Network::TransportSocketFactory, diff --git a/source/extensions/transport_sockets/well_known_names.h b/source/extensions/transport_sockets/well_known_names.h index 404357f45477..471e1e8b60cf 100644 --- a/source/extensions/transport_sockets/well_known_names.h +++ b/source/extensions/transport_sockets/well_known_names.h @@ -15,10 +15,11 @@ namespace TransportSockets { class TransportSocketNameValues { public: const std::string Alts = "envoy.transport_sockets.alts"; - const std::string Tap = "envoy.transport_sockets.tap"; + const std::string Quic = "envoy.transport_sockets.quic"; const std::string RawBuffer = "envoy.transport_sockets.raw_buffer"; + const std::string Tap = "envoy.transport_sockets.tap"; const std::string Tls = "envoy.transport_sockets.tls"; - const std::string Quic = "envoy.transport_sockets.quic"; + const std::string UpstreamProxyProtocol = "envoy.transport_sockets.upstream_proxy_protocol"; }; using TransportSocketNames = ConstSingleton; diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 592a709050c7..08c82e85c385 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -341,6 +341,7 @@ envoy_cc_test( name = "transport_socket_options_impl_test", srcs = ["transport_socket_options_impl_test.cc"], deps = [ + "//source/common/network:address_lib", "//source/common/network:transport_socket_options_lib", "//source/common/stream_info:filter_state_lib", ], diff --git a/test/common/network/transport_socket_options_impl_test.cc b/test/common/network/transport_socket_options_impl_test.cc index 51535afa53ba..a96fbc53bdd3 100644 --- a/test/common/network/transport_socket_options_impl_test.cc +++ b/test/common/network/transport_socket_options_impl_test.cc @@ -1,5 +1,7 @@ #include "common/http/utility.h" +#include "common/network/address_impl.h" #include "common/network/application_protocol.h" +#include "common/network/proxy_protocol_filter_state.h" #include "common/network/transport_socket_options_impl.h" #include "common/network/upstream_server_name.h" #include "common/stream_info/filter_state_impl.h" @@ -31,6 +33,14 @@ TEST_F(TransportSocketOptionsImplTest, UpstreamServer) { filter_state_.setData( UpstreamServerName::key(), std::make_unique("www.example.com"), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain); + filter_state_.setData(ProxyProtocolFilterState::key(), + std::make_unique(Network::ProxyProtocolData{ + Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("202.168.0.13", 52000)), + Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("174.2.2.222", 80))}), + StreamInfo::FilterState::StateType::ReadOnly, + StreamInfo::FilterState::LifeSpan::FilterChain); auto transport_socket_options = TransportSocketOptionsUtility::fromFilterState(filter_state_); EXPECT_EQ(absl::make_optional("www.example.com"), transport_socket_options->serverNameOverride()); diff --git a/test/extensions/transport_sockets/common/BUILD b/test/extensions/transport_sockets/common/BUILD new file mode 100644 index 000000000000..f30b8bf8bb2f --- /dev/null +++ b/test/extensions/transport_sockets/common/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "passthrough_test", + srcs = ["passthrough_test.cc"], + deps = [ + "//source/extensions/transport_sockets/common:passthrough_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/network:transport_socket_mocks", + ], +) diff --git a/test/extensions/transport_sockets/common/passthrough_test.cc b/test/extensions/transport_sockets/common/passthrough_test.cc new file mode 100644 index 000000000000..067caab6611e --- /dev/null +++ b/test/extensions/transport_sockets/common/passthrough_test.cc @@ -0,0 +1,90 @@ +#include "extensions/transport_sockets/common/passthrough.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace { + +class PassthroughTest : public testing::Test { +protected: + void SetUp() override { + auto inner_socket = std::make_unique>(); + inner_socket_ = inner_socket.get(); + passthrough_socket_ = std::make_unique(std::move(inner_socket)); + } + + NiceMock* inner_socket_; + std::unique_ptr passthrough_socket_; +}; + +// Test setTransportSocketCallbacks method defers to inner socket +TEST_F(PassthroughTest, SetTransportSocketCallbacksDefersToInnerSocket) { + auto transport_callbacks = std::make_unique>(); + EXPECT_CALL(*inner_socket_, setTransportSocketCallbacks(Ref(*transport_callbacks))).Times(1); + passthrough_socket_->setTransportSocketCallbacks(*transport_callbacks); +} + +// Test protocol method defers to inner socket +TEST_F(PassthroughTest, ProtocolDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, protocol()).Times(1); + passthrough_socket_->protocol(); +} + +// Test failureReason method defers to inner socket +TEST_F(PassthroughTest, FailureReasonDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, failureReason()).Times(1); + passthrough_socket_->failureReason(); +} + +// Test canFlushClose method defers to inner socket +TEST_F(PassthroughTest, CanFlushCloseDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, canFlushClose()).Times(1); + passthrough_socket_->canFlushClose(); +} + +// Test closeSocket method defers to inner socket +TEST_F(PassthroughTest, CloseSocketDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, closeSocket(testing::Eq(Network::ConnectionEvent::LocalClose))) + .Times(1); + passthrough_socket_->closeSocket(Network::ConnectionEvent::LocalClose); +} + +// Test doRead method defers to inner socket +TEST_F(PassthroughTest, DoReadDefersToInnerSocket) { + auto buff = Buffer::OwnedImpl("data"); + EXPECT_CALL(*inner_socket_, doRead(BufferEqual(&buff))).Times(1); + passthrough_socket_->doRead(buff); +} + +// Test doWrite method defers to inner socket +TEST_F(PassthroughTest, DoWriteDefersToInnerSocket) { + auto buff = Buffer::OwnedImpl("data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&buff), false)).Times(1); + passthrough_socket_->doWrite(buff, false); +} + +// Test onConnected method defers to inner socket +TEST_F(PassthroughTest, OnConnectedDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, onConnected()).Times(1); + passthrough_socket_->onConnected(); +} + +// Test ssl method defers to inner socket +TEST_F(PassthroughTest, SslDefersToInnerSocket) { + EXPECT_CALL(*inner_socket_, ssl()).Times(1); + passthrough_socket_->ssl(); +} + +} // namespace +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/transport_sockets/proxy_protocol/BUILD b/test/extensions/transport_sockets/proxy_protocol/BUILD new file mode 100644 index 000000000000..dbbdb719f507 --- /dev/null +++ b/test/extensions/transport_sockets/proxy_protocol/BUILD @@ -0,0 +1,27 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "proxy_protocol_test", + srcs = ["proxy_protocol_test.cc"], + extension_name = "envoy.transport_sockets.upstream_proxy_protocol", + deps = [ + "//include/envoy/network:proxy_protocol_options_lib", + "//source/extensions/common/proxy_protocol:proxy_protocol_header_lib", + "//source/extensions/transport_sockets/proxy_protocol:upstream_proxy_protocol", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/network:io_handle_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/network:transport_socket_mocks", + ], +) diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc new file mode 100644 index 000000000000..2823d218c992 --- /dev/null +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc @@ -0,0 +1,398 @@ +#include "envoy/network/proxy_protocol.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/address_impl.h" +#include "common/network/transport_socket_options_impl.h" + +#include "extensions/common/proxy_protocol/proxy_protocol_header.h" +#include "extensions/transport_sockets/proxy_protocol/proxy_protocol.h" + +#include "test/mocks/buffer/mocks.h" +#include "test/mocks/network/io_handle.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::InSequence; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +using envoy::config::core::v3::ProxyProtocolConfig_Version; + +namespace Envoy { +namespace Extensions { +namespace TransportSockets { +namespace ProxyProtocol { +namespace { + +constexpr uint64_t MaxSlices = 16; + +class ProxyProtocolTest : public testing::Test { +public: + void initialize(ProxyProtocolConfig_Version version, + Network::TransportSocketOptionsSharedPtr socket_options) { + auto inner_socket = std::make_unique>(); + inner_socket_ = inner_socket.get(); + ON_CALL(transport_callbacks_, ioHandle()).WillByDefault(ReturnRef(io_handle_)); + proxy_protocol_socket_ = std::make_unique(std::move(inner_socket), + socket_options, version); + proxy_protocol_socket_->setTransportSocketCallbacks(transport_callbacks_); + } + + NiceMock* inner_socket_; + NiceMock io_handle_; + std::unique_ptr proxy_protocol_socket_; + NiceMock transport_callbacks_; +}; + +// Test injects PROXY protocol header only once +TEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + auto msg2 = Buffer::OwnedImpl("more data"); + { + InSequence s; + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false)).Times(1); + } + + proxy_protocol_socket_->doWrite(msg, false); + proxy_protocol_socket_->doWrite(msg2, false); +} + +// Test returned bytes processed includes the PROXY protocol header +TEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + auto msg2 = Buffer::OwnedImpl("more data"); + { + InSequence s; + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false})); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg2), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg2.length(), false})); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(expected_buff.length() + msg.length(), resp.bytes_processed_); + auto resp2 = proxy_protocol_socket_->doWrite(msg2, false); + EXPECT_EQ(msg2.length(), resp2.bytes_processed_); +} + +// Test returns KeepOpen action when write error is Again +TEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + auto msg = Buffer::OwnedImpl("some data"); + { + InSequence s; + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + 0, Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError))))); + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)) + .WillOnce(Return(Network::IoResult{Network::PostIoAction::KeepOpen, msg.length(), false})); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::KeepOpen, resp.action_); + auto resp2 = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::KeepOpen, resp2.action_); +} + +// Test returns Close action when write error is not Again +TEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + auto msg = Buffer::OwnedImpl("some data"); + { + InSequence s; + EXPECT_CALL(io_handle_, writev(_, _)) + .WillOnce(Return(testing::ByMove( + Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(EADDRNOTAVAIL), + [](Api::IoError* err) { delete err; }))))); + } + + auto resp = proxy_protocol_socket_->doWrite(msg, false); + EXPECT_EQ(Network::PostIoAction::Close, resp.action_); +} + +// Test injects V1 PROXY protocol using upstream addresses when transport options are null +TEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), 1)) + .WillOnce(Return(testing::ByMove( + Api::IoCallUint64Result(43, Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V1IPV6LocalAddressesWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("a:b:c:d::", "e:b:c:f::", 50000, 8080, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol for downstream IPV4 addresses +TEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) { + auto src_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("202.168.0.13", 52000)); + auto dst_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv4Instance("174.2.2.222", 80)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://174.2.2.222:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://172.0.0.1:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("202.168.0.13", "174.2.2.222", 52000, 80, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V1 PROXY protocol for downstream IPV6 addresses +TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1::2:3", 52000)); + auto dst_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("a:b:c:d::", 80)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV1Header("1::2:3", "a:b:c:d::", 52000, 80, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V1, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol using upstream addresses when transport options are null +TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://1.2.3.4:773"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:513"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2LocalHeader(expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, nullptr); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol using upstream addresses when header options are null +TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenHeaderOptionsAreNull) { + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://1.2.3.4:773"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:513"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2LocalHeader(expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, + std::make_shared()); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol for downstream IPV4 addresses +TEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("1.2.3.4", 773)); + auto dst_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv4Instance("0.1.1.2", 513)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://0.1.1.2:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://3.3.3.3:80"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2Header("1.2.3.4", "0.1.1.2", 773, 513, + Network::Address::IpVersion::v4, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +// Test injects V2 PROXY protocol for downstream IPV6 addresses +TEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) { + auto src_addr = + Network::Address::InstanceConstSharedPtr(new Network::Address::Ipv6Instance("1:2:3::4", 8)); + auto dst_addr = Network::Address::InstanceConstSharedPtr( + new Network::Address::Ipv6Instance("1:100:200:3::", 2)); + Network::TransportSocketOptionsSharedPtr socket_options = + std::make_shared( + "", std::vector{}, std::vector{}, absl::nullopt, + absl::optional( + Network::ProxyProtocolData{src_addr, dst_addr})); + transport_callbacks_.connection_.local_address_ = + Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000"); + transport_callbacks_.connection_.remote_address_ = + Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080"); + Buffer::OwnedImpl expected_buff{}; + Common::ProxyProtocol::generateV2Header("1:2:3::4", "1:100:200:3::", 8, 2, + Network::Address::IpVersion::v6, expected_buff); + auto expected_slices = expected_buff.getRawSlices(MaxSlices); + initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); + + EXPECT_CALL(io_handle_, writev(RawSliceVectorEqual(expected_slices), expected_slices.size())) + .WillOnce(Return(testing::ByMove(Api::IoCallUint64Result( + expected_buff.length(), Api::IoErrorPtr(nullptr, [](Api::IoError*) {}))))); + auto msg = Buffer::OwnedImpl("some data"); + EXPECT_CALL(*inner_socket_, doWrite(BufferEqual(&msg), false)).Times(1); + + proxy_protocol_socket_->doWrite(msg, false); +} + +} // namespace +} // namespace ProxyProtocol +} // namespace TransportSockets +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index 76246f0dc4c3..6918729c7b39 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -133,4 +133,8 @@ ACTION_P(AddBufferToStringWithoutDraining, target_string) { target_string->append(arg0.toString()); } +MATCHER_P(RawSliceVectorEqual, rhs, testing::PrintToString(rhs)) { + return TestUtility::rawSlicesEqual(arg, rhs.data(), rhs.size()); +} + } // namespace Envoy diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index b9a1adbf06e9..cd358d84fe32 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -114,6 +114,25 @@ bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instan return true; } +bool TestUtility::rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs, + size_t num_slices) { + for (size_t slice = 0; slice < num_slices; slice++) { + auto rhs_slice = rhs[slice]; + auto lhs_slice = lhs[slice]; + if (rhs_slice.len_ != lhs_slice.len_) { + return false; + } + auto rhs_slice_data = static_cast(rhs_slice.mem_); + auto lhs_slice_data = static_cast(lhs_slice.mem_); + for (size_t offset = 0; offset < rhs_slice.len_; offset++) { + if (rhs_slice_data[offset] != lhs_slice_data[offset]) { + return false; + } + } + } + return true; +} + void TestUtility::feedBufferWithRandomCharacters(Buffer::Instance& buffer, uint64_t n_char, uint64_t seed) { const std::string sample = "Neque porro quisquam est qui dolorem ipsum.."; diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 4b2788e275f2..399f2d869d45 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -158,6 +158,18 @@ class TestUtility { */ static bool buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs); + /** + * Compare 2 RawSlice pointers. + * @param lhs supplies raw slice 1. + * @param rhs supplies raw slice 2. + * @param num_slices The number of slices to compare. It is assumed lhs and rhs have the same + * number. + * @return true if for num_slices, all lhs raw slices are equal to the corresponding rhs raw slice + * in length and a byte by byte data comparison. false otherwise + */ + static bool rawSlicesEqual(const Buffer::RawSlice* lhs, const Buffer::RawSlice* rhs, + size_t num_slices); + /** * Feed a buffer with random characters. * @param buffer supplies the buffer to be fed. From 97d98292fe0ceb480a2c522e6c72e3c88825c9b2 Mon Sep 17 00:00:00 2001 From: antonio Date: Tue, 28 Jul 2020 10:06:29 -0400 Subject: [PATCH 763/909] test: Ignore grpc.primary_user_agent grpc library version in test that verifies parsing gRPC channel args from config. (#12318) Commit Message: test: Ignore grpc.primary_user_agent grpc library version in test that verifies parsing gRPC channel args from config. Additional Description: Risk Level: n/a test-only change Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Antonio Vicente --- test/common/grpc/google_grpc_utils_test.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/common/grpc/google_grpc_utils_test.cc b/test/common/grpc/google_grpc_utils_test.cc index 82fa62f6a55c..a44580b813e0 100644 --- a/test/common/grpc/google_grpc_utils_test.cc +++ b/test/common/grpc/google_grpc_utils_test.cc @@ -8,6 +8,7 @@ #include "gtest/gtest.h" +using testing::HasSubstr; using testing::Pair; using testing::UnorderedElementsAre; @@ -110,9 +111,10 @@ TEST(GoogleGrpcUtilsTest, ChannelArgsFromConfig) { int_args[arg.key] = arg.value.integer; } } - EXPECT_THAT(string_args, UnorderedElementsAre(Pair("grpc.ssl_target_name_override", "bar"), - Pair("grpc.primary_user_agent", "grpc-c++/1.25.0"), - Pair("grpc.default_authority", "foo"))); + EXPECT_THAT(string_args, + UnorderedElementsAre(Pair("grpc.ssl_target_name_override", "bar"), + Pair("grpc.primary_user_agent", HasSubstr("grpc-c++/")), + Pair("grpc.default_authority", "foo"))); EXPECT_THAT(int_args, UnorderedElementsAre(Pair("grpc.http2.max_ping_strikes", 5), Pair("grpc.http2.max_pings_without_data", 3))); } From 2e939b6a1330853c05f556298e0eb859eced8055 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Tue, 28 Jul 2020 17:20:07 +0100 Subject: [PATCH 764/909] fuzz: add unit tests to xDS verifier (#12246) Commit Message: Add unit tests to xDS verifier * add new file xds_verifier_test.cc with unit tests * add tests for main paths through verifier Signed-off-by: Sam Flattery --- test/server/config_validation/BUILD | 9 + test/server/config_validation/xds_fuzz.cc | 20 +- test/server/config_validation/xds_fuzz.h | 4 +- test/server/config_validation/xds_verifier.cc | 87 ++++--- test/server/config_validation/xds_verifier.h | 8 +- .../config_validation/xds_verifier_test.cc | 227 ++++++++++++++++++ 6 files changed, 312 insertions(+), 43 deletions(-) create mode 100644 test/server/config_validation/xds_verifier_test.cc diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index f2888cef0882..9671af5ddf63 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -129,6 +129,15 @@ envoy_cc_test_library( ], ) +envoy_cc_test( + name = "xds_verifier_test", + srcs = ["xds_verifier_test.cc"], + deps = [ + ":xds_verifier_lib", + "//test/config:utility_lib", + ], +) + envoy_cc_test_library( name = "xds_fuzz_lib", srcs = ["xds_fuzz.cc"], diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index a14d09cc0252..78d246275101 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -69,6 +69,9 @@ XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& inp create_xds_upstream_ = true; tls_xds_upstream_ = false; + // avoid listeners draining during the test + drain_time_ = std::chrono::seconds(60); + if (input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW) { sotw_or_delta_ = Grpc::SotwOrDelta::Sotw; } else { @@ -130,6 +133,7 @@ bool XdsFuzzTest::hasRoute(const std::string& route_name) { */ void XdsFuzzTest::addListener(const std::string& listener_name, const std::string& route_name) { ENVOY_LOG_MISC(debug, "Adding {} with reference to {}", listener_name, route_name); + lds_update_success_++; bool removed = eraseListener(listener_name); auto listener = buildListener(listener_name, route_name); listeners_.push_back(listener); @@ -154,6 +158,7 @@ void XdsFuzzTest::removeListener(const std::string& listener_name) { bool removed = eraseListener(listener_name); if (removed) { + lds_update_success_++; updateListener(listeners_, {}, {listener_name}); EXPECT_TRUE(waitForAck(Config::TypeUrl::get().Listener, std::to_string(version_))); verifier_.listenerRemoved(listener_name); @@ -165,19 +170,15 @@ void XdsFuzzTest::removeListener(const std::string& listener_name) { */ void XdsFuzzTest::addRoute(const std::string& route_name) { ENVOY_LOG_MISC(debug, "Adding {}", route_name); - bool has_route = hasRoute(route_name); auto route = buildRouteConfig(route_name); - routes_.push_back(route); - if (has_route) { - // if the route was already in routes_, don't send a duplicate add in delta request - updateRoute(routes_, {}, {}); - verifier_.routeUpdated(route); - } else { - updateRoute(routes_, {route}, {}); - verifier_.routeAdded(route); + if (!hasRoute(route_name)) { + routes_.push_back(route); } + updateRoute(routes_, {route}, {}); + verifier_.routeAdded(route); + EXPECT_TRUE(waitForAck(Config::TypeUrl::get().RouteConfiguration, std::to_string(version_))); } @@ -277,6 +278,7 @@ void XdsFuzzTest::replay() { test_server_->waitForCounterEq("listener_manager.listener_modified", verifier_.numModified()); test_server_->waitForCounterEq("listener_manager.listener_added", verifier_.numAdded()); test_server_->waitForCounterEq("listener_manager.listener_removed", verifier_.numRemoved()); + test_server_->waitForCounterEq("listener_manager.lds.update_success", lds_update_success_); } ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", verifier_.numWarming(), diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index bdc37951c7f8..eac6cc269b66 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -63,7 +63,7 @@ class XdsFuzzTest : public HttpIntegrationTest { std::vector getRoutesConfigDump(); bool eraseListener(const std::string& listener_name); - bool hasRoute(const std::string& route_num); + bool hasRoute(const std::string& route_name); AssertionResult waitForAck(const std::string& expected_type_url, const std::string& expected_version); @@ -77,6 +77,8 @@ class XdsFuzzTest : public HttpIntegrationTest { envoy::config::core::v3::ApiVersion api_version_; Network::Address::IpVersion ip_version_; + + uint64_t lds_update_success_{0}; }; } // namespace Envoy diff --git a/test/server/config_validation/xds_verifier.cc b/test/server/config_validation/xds_verifier.cc index c6932a74bf79..2501911a75d6 100644 --- a/test/server/config_validation/xds_verifier.cc +++ b/test/server/config_validation/xds_verifier.cc @@ -29,11 +29,21 @@ std::string XdsVerifier::getRoute(const envoy::config::listener::v3::Listener& l * @return true iff the route listener refers to is in all_routes_ */ bool XdsVerifier::hasRoute(const envoy::config::listener::v3::Listener& listener) { - return all_routes_.contains(getRoute(listener)); + return hasRoute(getRoute(listener)); } +bool XdsVerifier::hasRoute(const std::string& name) { return all_routes_.contains(name); } + bool XdsVerifier::hasActiveRoute(const envoy::config::listener::v3::Listener& listener) { - return active_routes_.contains(getRoute(listener)); + return hasActiveRoute(getRoute(listener)); +} + +bool XdsVerifier::hasActiveRoute(const std::string& name) { return active_routes_.contains(name); } + +bool XdsVerifier::hasListener(const std::string& name, ListenerState state) { + return std::any_of(listeners_.begin(), listeners_.end(), [&](const auto& rep) { + return rep.listener.name() == name && state == rep.state; + }); } /** @@ -68,7 +78,7 @@ void XdsVerifier::dumpState() { * updated listener */ void XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& listener) { - ENVOY_LOG_MISC(debug, "About to update listener {}", listener.name()); + ENVOY_LOG_MISC(debug, "About to update listener {} to {}", listener.name(), getRoute(listener)); dumpState(); if (std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { @@ -79,17 +89,25 @@ void XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& l return; } - for (unsigned long i = 0; i < listeners_.size(); ++i) { - const auto& rep = listeners_[i]; + bool found = false; + for (auto it = listeners_.begin(); it != listeners_.end();) { + const auto& rep = *it; + ENVOY_LOG_MISC(debug, "checking {} for update", rep.listener.name()); if (rep.listener.name() == listener.name()) { - if (rep.state == ACTIVE) { + // if we're updating a warming/active listener, num_modified_ must be incremented + if (rep.state != DRAINING && !found) { num_modified_++; + found = true; + } + + if (rep.state == ACTIVE) { if (hasActiveRoute(listener)) { // if the new listener is ready to take traffic, the old listener will be removed // it seems to be directly removed without being added to the config dump as draining ENVOY_LOG_MISC(debug, "Removing {} after update", listener.name()); num_active_--; - listeners_.erase(listeners_.begin() + i); + it = listeners_.erase(it); + continue; } else { // if the new listener has not gotten its route yet, the old listener will remain active // until that happens @@ -99,9 +117,12 @@ void XdsVerifier::listenerUpdated(const envoy::config::listener::v3::Listener& l // if the old listener is warming, it will be removed and replaced with the new ENVOY_LOG_MISC(debug, "Removed warming listener {}", listener.name()); num_warming_--; - listeners_.erase(listeners_.begin() + i); + it = listeners_.erase(it); + // don't increment it + continue; } } + ++it; } dumpState(); listenerAdded(listener, true); @@ -139,25 +160,28 @@ void XdsVerifier::listenerAdded(const envoy::config::listener::v3::Listener& lis */ void XdsVerifier::listenerRemoved(const std::string& name) { bool found = false; - for (unsigned long i = 0; i < listeners_.size(); ++i) { - auto& rep = listeners_[i]; - if (rep.listener.name() != name) { - continue; - } - if (rep.state == ACTIVE) { - // the listener will be drained before being removed - ENVOY_LOG_MISC(debug, "Changing {} to DRAINING", name); - num_removed_++; - num_active_--; - num_draining_++; - rep.state = DRAINING; - } else if (rep.state == WARMING) { - // the listener will be removed immediately - ENVOY_LOG_MISC(debug, "Removed warming listener {}", name); - listeners_.erase(listeners_.begin() + i); - num_warming_--; + for (auto it = listeners_.begin(); it != listeners_.end();) { + auto& rep = *it; + if (rep.listener.name() == name) { + if (rep.state == ACTIVE) { + // the listener will be drained before being removed + ENVOY_LOG_MISC(debug, "Changing {} to DRAINING", name); + found = true; + num_active_--; + num_draining_++; + rep.state = DRAINING; + } else if (rep.state == WARMING) { + // the listener will be removed immediately + ENVOY_LOG_MISC(debug, "Removed warming listener {}", name); + found = true; + num_warming_--; + it = listeners_.erase(it); + // don't increment it + continue; + } } + ++it; } if (found) { @@ -236,7 +260,6 @@ void XdsVerifier::markForRemoval(ListenerRepresentation& rep) { // mark it as removed to remove it after the loop so as not to invalidate the iterator in // the caller function old_rep.state = REMOVED; - /* num_modified_++; */ num_active_--; } } @@ -271,17 +294,19 @@ void XdsVerifier::routeAdded(const envoy::config::route::v3::RouteConfiguration& // if an unreferenced route is sent in delta, it is ignored forever as it will not be sent in // future RDS updates, whereas in SOTW it will be present in all future RDS updates, so if a // listener that refers to it is added in the meantime, it will become active + if (!hasRoute(route.name())) { + all_routes_.insert({route.name(), route}); + } - // in delta, active_routes_ and all_routes_ should be the same as we only send one route at a - // time, so it either becomes active or not if (sotw_or_delta_ == DELTA && std::any_of(listeners_.begin(), listeners_.end(), [&](auto& rep) { return getRoute(rep.listener) == route.name(); })) { - active_routes_.insert({route.name(), route}); - all_routes_.insert({route.name(), route}); + if (!hasActiveRoute(route.name())) { + active_routes_.insert({route.name(), route}); + updateDeltaListeners(route); + } updateDeltaListeners(route); } else if (sotw_or_delta_ == SOTW) { - all_routes_.insert({route.name(), route}); updateSotwListeners(); } } diff --git a/test/server/config_validation/xds_verifier.h b/test/server/config_validation/xds_verifier.h index b1c1c511c26d..ffd7ff38231b 100644 --- a/test/server/config_validation/xds_verifier.h +++ b/test/server/config_validation/xds_verifier.h @@ -49,12 +49,16 @@ class XdsVerifier { void dumpState(); + bool hasListener(const std::string& name, ListenerState state); + bool hasRoute(const envoy::config::listener::v3::Listener& listener); + bool hasRoute(const std::string& name); + bool hasActiveRoute(const envoy::config::listener::v3::Listener& listener); + bool hasActiveRoute(const std::string& name); + private: enum SotwOrDelta { SOTW, DELTA }; std::string getRoute(const envoy::config::listener::v3::Listener& listener); - bool hasRoute(const envoy::config::listener::v3::Listener& listener); - bool hasActiveRoute(const envoy::config::listener::v3::Listener& listener); void updateSotwListeners(); void updateDeltaListeners(const envoy::config::route::v3::RouteConfiguration& route); void markForRemoval(ListenerRepresentation& rep); diff --git a/test/server/config_validation/xds_verifier_test.cc b/test/server/config_validation/xds_verifier_test.cc new file mode 100644 index 000000000000..72ca229d05ba --- /dev/null +++ b/test/server/config_validation/xds_verifier_test.cc @@ -0,0 +1,227 @@ +#include "test/config/utility.h" +#include "test/server/config_validation/xds_verifier.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +envoy::config::listener::v3::Listener buildListener(const std::string& listener_name, + const std::string& route_name) { + return ConfigHelper::buildListener(listener_name, route_name, "", "ads_test", + envoy::config::core::v3::ApiVersion::V3); +} + +envoy::config::route::v3::RouteConfiguration buildRoute(const std::string& route_name) { + return ConfigHelper::buildRouteConfig(route_name, "cluster_0", + envoy::config::core::v3::ApiVersion::V3); +} + +// add, warm, drain and remove a listener +TEST(XdsVerifier, Basic) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasRoute("route_config_0") && verifier.hasActiveRoute("route_config_0")); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 0); + EXPECT_EQ(verifier.numActive(), 1); + + verifier.listenerRemoved("listener_0"); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_EQ(verifier.numDraining(), 1); + EXPECT_EQ(verifier.numRemoved(), 1); + EXPECT_EQ(verifier.numActive(), 0); + + verifier.drainedListener("listener_0"); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_EQ(verifier.numRemoved(), 1); +} + +TEST(XdsVerifier, RouteBeforeListenerSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + // send a route first, so envoy will not accept it + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasRoute("route_config_0")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + + // envoy still doesn't know about the route, so this will warm + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + // send a new route, which will include route_config_0 since SOTW, so route_config_0 will become + // active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_1")); + EXPECT_TRUE(verifier.hasActiveRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 1); +} + +TEST(XdsVerifier, RouteBeforeListenerDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + // send a route first, so envoy will not accept it + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + + // envoy still doesn't know about the route, so this will warm + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numAdded(), 1); + EXPECT_EQ(verifier.numWarming(), 1); + + // send a new route, which will not include route_config_0 since SOTW, so route_config_0 will not + // become active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_1")); + EXPECT_FALSE(verifier.hasActiveRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numWarming(), 1); +} + +TEST(XdsVerifier, UpdateWarmingListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + // the new listener should directly replace the old listener since it's warming + EXPECT_EQ(verifier.numModified(), 1); + EXPECT_EQ(verifier.numAdded(), 1); + + // send the route for the old listener, which should have been replaced with the update + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // now the new should become active + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, UpdateActiveListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add an active listener + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // send an update, which should keep the old listener active until the new warms + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + EXPECT_EQ(verifier.numModified(), 1); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // warm the new listener, which should remove the old + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.WARMING)); + + EXPECT_EQ(verifier.numActive(), 1); +} + +TEST(XdsVerifier, UpdateActiveToActive) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add two active listeners to different routes + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // add an active listener + verifier.listenerAdded(buildListener("listener_1", "route_config_1")); + verifier.routeAdded(buildRoute("route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numAdded(), 2); + + // send an update, which should make the new listener active straight away and remove the old + // since its route is already active + verifier.listenerUpdated(buildListener("listener_0", "route_config_1")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_FALSE(verifier.hasListener("listener_0", verifier.WARMING)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, WarmMultipleListenersSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add two warming listener to the same route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerAdded(buildListener("listener_1", "route_config_0")); + + // send the route, make sure both are active + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, WarmMultipleListenersDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + + // add two warming listener to the same route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.listenerAdded(buildListener("listener_1", "route_config_0")); + + // send the route, make sure both are active + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + EXPECT_TRUE(verifier.hasListener("listener_1", verifier.ACTIVE)); + EXPECT_EQ(verifier.numActive(), 2); +} + +TEST(XdsVerifier, ResendRouteSOTW) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // send a route that will be ignored + verifier.routeAdded(buildRoute("route_config_0")); + + // add a warming listener that refers to this route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // send the same route again, make sure listener becomes active + verifier.routeUpdated(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, ResendRouteDelta) { + XdsVerifier verifier(test::server::config_validation::Config::DELTA); + + // send a route that will be ignored + verifier.routeAdded(buildRoute("route_config_0")); + + // add a warming listener that refers to this route + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.WARMING)); + + // send the same route again, make sure listener becomes active + verifier.routeUpdated(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +TEST(XdsVerifier, RemoveThenAddListener) { + XdsVerifier verifier(test::server::config_validation::Config::SOTW); + + // add an active listener + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + verifier.routeAdded(buildRoute("route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); + + // remove it + verifier.listenerRemoved("listener_0"); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + + // and add it back, it should now be draining and active + verifier.listenerAdded(buildListener("listener_0", "route_config_0")); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.DRAINING)); + EXPECT_TRUE(verifier.hasListener("listener_0", verifier.ACTIVE)); +} + +} // namespace Envoy From dbbcc692621537d0cb4abd54950bb6356b6d01d6 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 28 Jul 2020 09:48:25 -0700 Subject: [PATCH 765/909] logging: fix delegating log sink races (#12298) This fixes two different issues: 1) Previously there was no locking around log sink replacement, so it was possibles for a log sink to get removed by one thread while getting written to by another thread. 2) Even with locking, the base class destructor pattern would do the swap after the derived class was destroyed, leading to undefined behavior. This was easy to reproduce in cx_limit_integration_test but is an issue anywhere the log expectations are used, or previously in the death test stderr workaround (EXPECT_DEATH_LOG_TO_STDERR) for coverage which has been removed because coverage no longer logs to a file and instead logs to stderr like the rest of the tests. Fixes https://github.com/envoyproxy/envoy/issues/11841 Risk Level: Medium, code is a bit scary, though only really in tests Testing: Existing tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Matt Klein --- source/common/common/logger.cc | 35 ++++++++++++++---- source/common/common/logger.h | 34 ++++++++++++++---- source/common/common/logger_delegates.cc | 6 +++- source/common/common/logger_delegates.h | 4 +-- test/common/buffer/owned_impl_test.cc | 1 - test/common/common/assert_test.cc | 1 - test/common/common/logger_test.cc | 18 +++++----- test/common/http/header_map_impl_test.cc | 8 ++--- .../http2/metadata_encoder_decoder_test.cc | 1 - test/common/network/address_impl_test.cc | 17 +++++---- test/common/network/connection_impl_test.cc | 2 +- test/common/network/listener_impl_test.cc | 2 +- test/common/network/udp_listener_impl_test.cc | 1 - test/common/signal/signals_test.cc | 16 +++++---- test/common/singleton/manager_impl_test.cc | 3 +- .../upstream/conn_pool_map_impl_test.cc | 16 ++++----- test/exe/main_common_test.cc | 3 +- test/exe/terminate_handler_test.cc | 2 +- .../compressor/zlib_compressor_impl_test.cc | 12 +++---- .../zlib_decompressor_impl_test.cc | 2 +- .../quiche/platform/quic_platform_test.cc | 36 +++++++++---------- .../header_prefix_integration_test.cc | 3 +- test/integration/xds_integration_test.cc | 3 +- test/test_common/logging.cc | 7 ++-- test/test_common/test_time_system_test.cc | 7 ++-- test/test_common/utility.h | 16 --------- 26 files changed, 141 insertions(+), 115 deletions(-) diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index 080b9a08d85d..9d84904b8d93 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -19,18 +19,34 @@ namespace Logger { StandardLogger::StandardLogger(const std::string& name) : Logger(std::make_shared(name, Registry::getSink())) {} -SinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) - : previous_delegate_(log_sink->delegate()), log_sink_(log_sink) { - log_sink->setDelegate(this); -} +SinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) : log_sink_(log_sink) {} SinkDelegate::~SinkDelegate() { - assert(log_sink_->delegate() == this); // Ensures stacked allocation of delegates. + // The previous delegate should have never been set or should have been reset by now via + // restoreDelegate(); + assert(previous_delegate_ == nullptr); +} + +void SinkDelegate::setDelegate() { + // There should be no previous delegate before this call. + assert(previous_delegate_ == nullptr); + previous_delegate_ = log_sink_->delegate(); + log_sink_->setDelegate(this); +} + +void SinkDelegate::restoreDelegate() { + // Ensures stacked allocation of delegates. + assert(log_sink_->delegate() == this); log_sink_->setDelegate(previous_delegate_); + previous_delegate_ = nullptr; } StderrSinkDelegate::StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink) - : SinkDelegate(log_sink) {} + : SinkDelegate(log_sink) { + setDelegate(); +} + +StderrSinkDelegate::~StderrSinkDelegate() { restoreDelegate(); } void StderrSinkDelegate::log(absl::string_view msg) { Thread::OptionalLockGuard guard(lock_); @@ -60,6 +76,13 @@ void DelegatingLogSink::log(const spdlog::details::log_msg& msg) { } lock.Release(); + // Hold the sink mutex while performing the actual logging. This prevents the sink from being + // swapped during an individual log event. + // TODO(mattklein123): In production this lock will never be contended. In practice, thread + // protection is really only needed in tests. It would be nice to figure out a test-only + // mechanism for this that does not require extra locking that we don't explicitly need in the + // prod code. + absl::ReaderMutexLock sink_lock(&sink_mutex_); if (should_escape_) { sink_->log(escapeLogLine(msg_view)); } else { diff --git a/source/common/common/logger.h b/source/common/common/logger.h index d315f8ef56d6..6fb9c5719096 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -104,10 +104,21 @@ class SinkDelegate : NonCopyable { virtual void flush() PURE; protected: + // Swap the current log sink delegate for this one. This should be called by the derived class + // constructor immediately before returning. This is required to match restoreDelegate(), + // otherwise it's possible for the previous delegate to get set in the base class constructor, + // the derived class constructor throws, and cleanup becomes broken. + void setDelegate(); + + // Swap the current log sink (this) for the previous one. This should be called by the derived + // class destructor in the body. This is critical as otherwise it's possible for a log message + // to get routed to a partially destructed sink. + void restoreDelegate(); + SinkDelegate* previousDelegate() { return previous_delegate_; } private: - SinkDelegate* previous_delegate_; + SinkDelegate* previous_delegate_{nullptr}; DelegatingLogSinkSharedPtr log_sink_; }; @@ -117,6 +128,7 @@ class SinkDelegate : NonCopyable { class StderrSinkDelegate : public SinkDelegate { public: explicit StderrSinkDelegate(DelegatingLogSinkSharedPtr log_sink); + ~StderrSinkDelegate() override; // SinkDelegate void log(absl::string_view msg) override; @@ -141,7 +153,10 @@ class DelegatingLogSink : public spdlog::sinks::sink { // spdlog::sinks::sink void log(const spdlog::details::log_msg& msg) override; - void flush() override { sink_->flush(); } + void flush() override { + absl::ReaderMutexLock lock(&sink_mutex_); + sink_->flush(); + } void set_pattern(const std::string& pattern) override { set_formatter(spdlog::details::make_unique(pattern)); } @@ -180,13 +195,20 @@ class DelegatingLogSink : public spdlog::sinks::sink { DelegatingLogSink() = default; - void setDelegate(SinkDelegate* sink) { sink_ = sink; } - SinkDelegate* delegate() { return sink_; } + void setDelegate(SinkDelegate* sink) { + absl::WriterMutexLock lock(&sink_mutex_); + sink_ = sink; + } + SinkDelegate* delegate() { + absl::ReaderMutexLock lock(&sink_mutex_); + return sink_; + } - SinkDelegate* sink_{nullptr}; + SinkDelegate* sink_ ABSL_GUARDED_BY(sink_mutex_){nullptr}; + absl::Mutex sink_mutex_; std::unique_ptr stderr_sink_; // Builtin sink to use as a last resort. std::unique_ptr formatter_ ABSL_GUARDED_BY(format_mutex_); - absl::Mutex format_mutex_; // direct absl reference to break build cycle. + absl::Mutex format_mutex_; bool should_escape_{false}; }; diff --git a/source/common/common/logger_delegates.cc b/source/common/common/logger_delegates.cc index 31685fd2671f..2fedc7838edb 100644 --- a/source/common/common/logger_delegates.cc +++ b/source/common/common/logger_delegates.cc @@ -13,7 +13,11 @@ namespace Logger { FileSinkDelegate::FileSinkDelegate(const std::string& log_path, AccessLog::AccessLogManager& log_manager, DelegatingLogSinkSharedPtr log_sink) - : SinkDelegate(log_sink), log_file_(log_manager.createAccessLog(log_path)) {} + : SinkDelegate(log_sink), log_file_(log_manager.createAccessLog(log_path)) { + setDelegate(); +} + +FileSinkDelegate::~FileSinkDelegate() { restoreDelegate(); } void FileSinkDelegate::log(absl::string_view msg) { // Log files have internal locking to ensure serial, non-interleaved diff --git a/source/common/common/logger_delegates.h b/source/common/common/logger_delegates.h index 504855d58f44..f6058fcd507f 100644 --- a/source/common/common/logger_delegates.h +++ b/source/common/common/logger_delegates.h @@ -14,9 +14,6 @@ namespace Envoy { namespace Logger { -class DelegatingLogSink; -using DelegatingLogSinkSharedPtr = std::shared_ptr; - /** * SinkDelegate that writes log messages to a file. */ @@ -24,6 +21,7 @@ class FileSinkDelegate : public SinkDelegate { public: FileSinkDelegate(const std::string& log_path, AccessLog::AccessLogManager& log_manager, DelegatingLogSinkSharedPtr log_sink); + ~FileSinkDelegate() override; // SinkDelegate void log(absl::string_view msg) override; diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index bb6799aed98e..d22b5c072c76 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -1034,7 +1034,6 @@ TEST_F(OwnedImplTest, ReadReserveAndCommit) { } TEST(OverflowDetectingUInt64, Arithmetic) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. OverflowDetectingUInt64 length; length += 1; length -= 1; diff --git a/test/common/common/assert_test.cc b/test/common/common/assert_test.cc index ec9e96f45792..880aa0f4f602 100644 --- a/test/common/common/assert_test.cc +++ b/test/common/common/assert_test.cc @@ -7,7 +7,6 @@ namespace Envoy { TEST(ReleaseAssertDeathTest, VariousLogs) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. EXPECT_DEATH({ RELEASE_ASSERT(0, ""); }, ".*assert failure: 0.*"); EXPECT_DEATH({ RELEASE_ASSERT(0, "With some logs"); }, ".*assert failure: 0. Details: With some logs.*"); diff --git a/test/common/common/logger_test.cc b/test/common/common/logger_test.cc index 9320078e881a..8e4d8839852d 100644 --- a/test/common/common/logger_test.cc +++ b/test/common/common/logger_test.cc @@ -8,30 +8,32 @@ namespace Envoy { namespace Logger { -class LoggerEscapeTest : public testing::Test {}; +TEST(LoggerTest, StackingStderrSinkDelegate) { + StderrSinkDelegate stacked(Envoy::Logger::Registry::getSink()); +} -TEST_F(LoggerEscapeTest, LinuxEOL) { +TEST(LoggerEscapeTest, LinuxEOL) { EXPECT_EQ("line 1 \\n line 2\n", DelegatingLogSink::escapeLogLine("line 1 \n line 2\n")); } -TEST_F(LoggerEscapeTest, WindowEOL) { +TEST(LoggerEscapeTest, WindowEOL) { EXPECT_EQ("line 1 \\n line 2\r\n", DelegatingLogSink::escapeLogLine("line 1 \n line 2\r\n")); } -TEST_F(LoggerEscapeTest, NoTrailingWhitespace) { +TEST(LoggerEscapeTest, NoTrailingWhitespace) { EXPECT_EQ("line 1 \\n line 2", DelegatingLogSink::escapeLogLine("line 1 \n line 2")); } -TEST_F(LoggerEscapeTest, NoWhitespace) { +TEST(LoggerEscapeTest, NoWhitespace) { EXPECT_EQ("line1", DelegatingLogSink::escapeLogLine("line1")); } -TEST_F(LoggerEscapeTest, AnyTrailingWhitespace) { +TEST(LoggerEscapeTest, AnyTrailingWhitespace) { EXPECT_EQ("line 1 \\t tab 1 \\n line 2\t\n", DelegatingLogSink::escapeLogLine("line 1 \t tab 1 \n line 2\t\n")); } -TEST_F(LoggerEscapeTest, WhitespaceOnly) { +TEST(LoggerEscapeTest, WhitespaceOnly) { // 8 spaces EXPECT_EQ(" ", DelegatingLogSink::escapeLogLine(" ")); @@ -39,7 +41,7 @@ TEST_F(LoggerEscapeTest, WhitespaceOnly) { EXPECT_EQ("\r\n\t \r\n \n", DelegatingLogSink::escapeLogLine("\r\n\t \r\n \n")); } -TEST_F(LoggerEscapeTest, Empty) { EXPECT_EQ("", DelegatingLogSink::escapeLogLine("")); } +TEST(LoggerEscapeTest, Empty) { EXPECT_EQ("", DelegatingLogSink::escapeLogLine("")); } } // namespace Logger } // namespace Envoy diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index ad9d1ccb912c..0e5b0c3df8cd 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1005,14 +1005,14 @@ TEST(HeaderMapImplTest, TestAppendHeader) { TEST(TestHeaderMapImplDeathTest, TestHeaderLengthChecks) { HeaderString value; value.setCopy("some;"); - EXPECT_DEATH_LOG_TO_STDERR(value.append(nullptr, std::numeric_limits::max()), - "Trying to allocate overly large headers."); + EXPECT_DEATH(value.append(nullptr, std::numeric_limits::max()), + "Trying to allocate overly large headers."); std::string source("hello"); HeaderString reference; reference.setReference(source); - EXPECT_DEATH_LOG_TO_STDERR(reference.append(nullptr, std::numeric_limits::max()), - "Trying to allocate overly large headers."); + EXPECT_DEATH(reference.append(nullptr, std::numeric_limits::max()), + "Trying to allocate overly large headers."); } TEST(HeaderMapImplTest, PseudoHeaderOrder) { diff --git a/test/common/http/http2/metadata_encoder_decoder_test.cc b/test/common/http/http2/metadata_encoder_decoder_test.cc index c038c7a01a65..ef225502dcaa 100644 --- a/test/common/http/http2/metadata_encoder_decoder_test.cc +++ b/test/common/http/http2/metadata_encoder_decoder_test.cc @@ -333,7 +333,6 @@ using MetadataEncoderDecoderDeathTest = MetadataEncoderDecoderTest; // Crash if a caller tries to pack more frames than the encoder has data for. TEST_F(MetadataEncoderDecoderDeathTest, PackTooManyFrames) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. MetadataMap metadata_map = { {"header_key1", std::string(5, 'a')}, {"header_key2", std::string(5, 'b')}, diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 3eb398804466..127632fe3737 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -448,9 +448,9 @@ TEST(AddressFromSockAddrDeathTest, IPv4) { EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &sin.sin_addr)); sin.sin_port = htons(6502); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) - 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in) + 1), "ss_len"); EXPECT_EQ("1.2.3.4:6502", addressFromSockAddr(ss, sizeof(sockaddr_in))->asString()); @@ -467,9 +467,9 @@ TEST(AddressFromSockAddrDeathTest, IPv6) { EXPECT_EQ(1, inet_pton(AF_INET6, "01:023::00Ef", &sin6.sin6_addr)); sin6.sin6_port = htons(32000); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) - 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, sizeof(sockaddr_in6) + 1), "ss_len"); EXPECT_EQ("[1:23::ef]:32000", addressFromSockAddr(ss, sizeof(sockaddr_in6))->asString()); @@ -490,9 +490,8 @@ TEST(AddressFromSockAddrDeathTest, Pipe) { StringUtil::strlcpy(sun.sun_path, "/some/path", sizeof sun.sun_path); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, 1), "ss_len"); - EXPECT_DEATH_LOG_TO_STDERR(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), - "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, 1), "ss_len"); + EXPECT_DEATH(addressFromSockAddr(ss, offsetof(struct sockaddr_un, sun_path)), "ss_len"); socklen_t ss_len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sun.sun_path); EXPECT_EQ("/some/path", addressFromSockAddr(ss, ss_len)->asString()); diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 4922309e427b..55328d741c24 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -85,7 +85,7 @@ TEST_P(ConnectionImplDeathTest, BadFd) { Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); IoHandlePtr io_handle = std::make_unique(); StreamInfo::StreamInfoImpl stream_info(dispatcher->timeSource()); - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( ConnectionImpl(*dispatcher, std::make_unique(std::move(io_handle), nullptr, nullptr), Network::Test::createRawBufferSocket(), stream_info, false), diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 9f6f515dac05..5708f826e73d 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -59,7 +59,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ListenerImplDeathTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); TEST_P(ListenerImplDeathTest, ErrorCallback) { - EXPECT_DEATH_LOG_TO_STDERR(errorCallbackTest(GetParam()), ".*listener accept failure.*"); + EXPECT_DEATH(errorCallbackTest(GetParam()), ".*listener accept failure.*"); } class TestListenerImpl : public ListenerImpl { diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 20c83106dce6..11b4e5346ead 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -390,7 +390,6 @@ TEST_P(UdpListenerImplTest, SendData) { * The send fails because the server_socket is created with bind=false. */ TEST_P(UdpListenerImplTest, SendDataError) { - Logger::StderrSinkDelegate stderr_sink(Logger::Registry::getSink()); // For coverage build. const std::string payload("hello world"); Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); diff --git a/test/common/signal/signals_test.cc b/test/common/signal/signals_test.cc index 98753f047d0f..475bc7b8b0ee 100644 --- a/test/common/signal/signals_test.cc +++ b/test/common/signal/signals_test.cc @@ -26,10 +26,11 @@ namespace Envoy { #ifndef ASANITIZED TEST(SignalsDeathTest, InvalidAddressDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) *(nasty_ptr) = 0; }(), "backtrace.*Segmentation fault"); @@ -44,10 +45,11 @@ TEST(SignalsDeathTest, RegisteredHandlerTest) { SignalAction::registerFatalErrorHandler(handler); SignalAction actions; // Make sure the fatal error log "HERE" registered above is logged on fatal error. - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) *(nasty_ptr) = 0; }(), "HERE"); @@ -56,7 +58,7 @@ TEST(SignalsDeathTest, RegisteredHandlerTest) { TEST(SignalsDeathTest, BusDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Bus error is tricky. There's one way that can work on POSIX systems // described below but it depends on mmaping a file. Just make it easy and @@ -72,7 +74,7 @@ TEST(SignalsDeathTest, BusDeathTest) { TEST(SignalsDeathTest, BadMathDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // It turns out to be really hard to not have the optimizer get rid of a // division by zero. Just raise the signal for this test. @@ -85,7 +87,7 @@ TEST(SignalsDeathTest, BadMathDeathTest) { // Unfortunately we don't have a reliable way to do this on other platforms TEST(SignalsDeathTest, IllegalInstructionDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() -> void { // Intel defines the "ud2" opcode to be an invalid instruction: __asm__("ud2"); @@ -96,7 +98,7 @@ TEST(SignalsDeathTest, IllegalInstructionDeathTest) { TEST(SignalsDeathTest, AbortDeathTest) { SignalAction actions; - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); + EXPECT_DEATH([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); } TEST(SignalsDeathTest, RestoredPreviousHandlerDeathTest) { @@ -108,7 +110,7 @@ TEST(SignalsDeathTest, RestoredPreviousHandlerDeathTest) { // goes out of scope, NOT the default. } // Outer SignalAction should be active again: - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); + EXPECT_DEATH([]() -> void { abort(); }(), "backtrace.*Abort(ed)?"); } #endif diff --git a/test/common/singleton/manager_impl_test.cc b/test/common/singleton/manager_impl_test.cc index 1a1f4c671a1c..aa5796ae79c1 100644 --- a/test/common/singleton/manager_impl_test.cc +++ b/test/common/singleton/manager_impl_test.cc @@ -18,8 +18,7 @@ static void deathTestWorker() { } TEST(SingletonManagerImplDeathTest, NotRegistered) { - EXPECT_DEATH_LOG_TO_STDERR(deathTestWorker(), - "invalid singleton name 'foo'. Make sure it is registered."); + EXPECT_DEATH(deathTestWorker(), "invalid singleton name 'foo'. Make sure it is registered."); } SINGLETON_MANAGER_REGISTRATION(test); diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc index 6c4605cd96d2..64c518b2fc68 100644 --- a/test/common/upstream/conn_pool_map_impl_test.cc +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -401,8 +401,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryClearTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), + ".*Details: A resource should only be entered once"); } TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { @@ -412,7 +412,7 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( test_map->addDrainedCallback([&test_map, this] { test_map->getPool(2, getBasicFactory()); }), ".*Details: A resource should only be entered once"); } @@ -424,9 +424,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryDrainConnectionsTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( - test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), + ".*Details: A resource should only be entered once"); } TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { @@ -436,9 +435,8 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { ON_CALL(*mock_pools_[0], addDrainedCallback(_)) .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - EXPECT_DEATH_LOG_TO_STDERR( - test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), - ".*Details: A resource should only be entered once"); + EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), + ".*Details: A resource should only be entered once"); } #endif // !defined(NDEBUG) diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 2305986ec677..d57b7e193416 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -161,7 +161,7 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { // so disable handling that signal. signal(SIGABRT, SIG_DFL); #endif - EXPECT_DEATH_LOG_TO_STDERR( + EXPECT_DEATH( []() { // Allocating a fixed-size large array that results in OOM on gcc // results in a compile-time error on clang of "array size too big", @@ -172,6 +172,7 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { size *= 1000) { int* p = new int[size]; // Use the pointer to prevent clang from optimizing the allocation away in opt mode. + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) ENVOY_LOG_MISC(debug, "p={}", reinterpret_cast(p)); } }(), diff --git a/test/exe/terminate_handler_test.cc b/test/exe/terminate_handler_test.cc index f37774352b3a..d48782242ae9 100644 --- a/test/exe/terminate_handler_test.cc +++ b/test/exe/terminate_handler_test.cc @@ -8,7 +8,7 @@ namespace Envoy { TEST(TerminateHandlerDeathTest, HandlerInstalledTest) { TerminateHandler handler; - EXPECT_DEATH_LOG_TO_STDERR([]() -> void { std::terminate(); }(), ".*std::terminate called!.*"); + EXPECT_DEATH([]() -> void { std::terminate(); }(), ".*std::terminate called!.*"); } } // namespace Envoy diff --git a/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc index ee8b487549d6..7d5046e1c744 100644 --- a/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc +++ b/test/extensions/compression/gzip/compressor/zlib_compressor_impl_test.cc @@ -163,13 +163,11 @@ TEST_P(ZlibCompressorImplFactoryTest, CreateCompressorTest) { // Exercises death by passing bad initialization params or by calling // compress before init. TEST_F(ZlibCompressorImplDeathTest, CompressorDeathTest) { - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFlushTestHelper(), - "assert failure: result == Z_OK"); - EXPECT_DEATH_LOG_TO_STDERR(uninitializedCompressorFinishTestHelper(), - "assert failure: result == Z_STREAM_END"); + EXPECT_DEATH(compressorBadInitTestHelper(100, 8), "assert failure: result >= 0"); + EXPECT_DEATH(compressorBadInitTestHelper(31, 10), "assert failure: result >= 0"); + EXPECT_DEATH(uninitializedCompressorTestHelper(), "assert failure: result == Z_OK"); + EXPECT_DEATH(uninitializedCompressorFlushTestHelper(), "assert failure: result == Z_OK"); + EXPECT_DEATH(uninitializedCompressorFinishTestHelper(), "assert failure: result == Z_STREAM_END"); } // Exercises compressor's checksum by calling it before init or compress. diff --git a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc index 782fd9af090e..43ae89d42fd0 100644 --- a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc +++ b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc @@ -87,7 +87,7 @@ class ZlibDecompressorImplFailureTest : public ZlibDecompressorImplTest { // Test different failures by passing bad initialization params or by calling decompress before // init. TEST_F(ZlibDecompressorImplFailureTest, DecompressorFailureTest) { - EXPECT_DEATH_LOG_TO_STDERR(decompressorBadInitTestHelper(100), "assert failure: result >= 0"); + EXPECT_DEATH(decompressorBadInitTestHelper(100), "assert failure: result >= 0"); uninitializedDecompressorTestHelper(); } diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index 50e24f4e0f0d..68141aa94039 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -280,8 +280,8 @@ TEST_F(QuicPlatformTest, QuicThread) { EXPECT_EQ(1, value); // QuicThread will panic if it's started but not joined. - EXPECT_DEATH_LOG_TO_STDERR({ AdderThread(&value, 2).Start(); }, - "QuicThread should be joined before destruction"); + EXPECT_DEATH({ AdderThread(&value, 2).Start(); }, + "QuicThread should be joined before destruction"); } TEST_F(QuicPlatformTest, QuicUint128) { @@ -397,9 +397,9 @@ TEST_F(QuicPlatformTest, QuicCHECK) { "CHECK failed:.* Supposed to fail in debug mode."); EXPECT_DEBUG_DEATH({ DCHECK(false); }, "CHECK failed"); - EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false) << " Supposed to fail in all modes."; }, - "CHECK failed:.* Supposed to fail in all modes."); - EXPECT_DEATH_LOG_TO_STDERR({ CHECK(false); }, "CHECK failed"); + EXPECT_DEATH({ CHECK(false) << " Supposed to fail in all modes."; }, + "CHECK failed:.* Supposed to fail in all modes."); + EXPECT_DEATH({ CHECK(false); }, "CHECK failed"); } // Test the behaviors of the cross products of @@ -408,16 +408,16 @@ TEST_F(QuicPlatformTest, QuicCHECK) { TEST_F(QuicPlatformTest, QuicFatalLog) { #ifdef NDEBUG // Release build - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); + EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 0", "Should abort 0"); QUIC_LOG(DFATAL) << "Should not abort"; QUIC_DLOG(FATAL) << "Should compile out"; QUIC_DLOG(DFATAL) << "Should compile out"; #else // Debug build - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); - EXPECT_DEATH_LOG_TO_STDERR(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); + EXPECT_DEATH(QUIC_LOG(FATAL) << "Should abort 1", "Should abort 1"); + EXPECT_DEATH(QUIC_LOG(DFATAL) << "Should abort 2", "Should abort 2"); + EXPECT_DEATH(QUIC_DLOG(FATAL) << "Should abort 3", "Should abort 3"); + EXPECT_DEATH(QUIC_DLOG(DFATAL) << "Should abort 4", "Should abort 4"); #endif } @@ -435,7 +435,7 @@ TEST_F(QuicPlatformTest, QuicNotReached) { #ifdef NDEBUG QUIC_NOTREACHED(); // Expect no-op. #else - EXPECT_DEATH_LOG_TO_STDERR(QUIC_NOTREACHED(), "not reached"); + EXPECT_DEATH(QUIC_NOTREACHED(), "not reached"); #endif } @@ -597,12 +597,12 @@ TEST_F(QuicPlatformTest, QuicFlags) { } TEST_F(QuicPlatformTest, QuicPccSender) { - EXPECT_DEATH_LOG_TO_STDERR(quic::CreatePccSender(/*clock=*/nullptr, /*rtt_stats=*/nullptr, - /*unacked_packets=*/nullptr, /*random=*/nullptr, - /*stats=*/nullptr, - /*initial_congestion_window=*/0, - /*max_congestion_window=*/0), - "PccSender is not supported."); + EXPECT_DEATH(quic::CreatePccSender(/*clock=*/nullptr, /*rtt_stats=*/nullptr, + /*unacked_packets=*/nullptr, /*random=*/nullptr, + /*stats=*/nullptr, + /*initial_congestion_window=*/0, + /*max_congestion_window=*/0), + "PccSender is not supported."); } class FileUtilsTest : public testing::Test { @@ -687,7 +687,7 @@ TEST_F(QuicPlatformTest, FailToPickUnsedPort) { // Fail bind call's to mimic port exhaustion. EXPECT_CALL(os_sys_calls, bind(_, _, _)) .WillRepeatedly(Return(Envoy::Api::SysCallIntResult{-1, SOCKET_ERROR_ADDR_IN_USE})); - EXPECT_DEATH_LOG_TO_STDERR(QuicPickServerPortForTestsOrDie(), "Failed to pick a port for test."); + EXPECT_DEATH(QuicPickServerPortForTestsOrDie(), "Failed to pick a port for test."); } TEST_F(QuicPlatformTest, TestEnvoyQuicBufferAllocator) { diff --git a/test/integration/header_prefix_integration_test.cc b/test/integration/header_prefix_integration_test.cc index 723effb513ff..e2e47831a27c 100644 --- a/test/integration/header_prefix_integration_test.cc +++ b/test/integration/header_prefix_integration_test.cc @@ -53,8 +53,7 @@ TEST_P(HeaderPrefixIntegrationTest, FailedCustomHeaderPrefix) { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { bootstrap.set_header_prefix("x-custom-but-not-set"); }); - EXPECT_DEATH_LOG_TO_STDERR(initialize(), - "Attempting to change the header prefix after it has been used!"); + EXPECT_DEATH(initialize(), "Attempting to change the header prefix after it has been used!"); } INSTANTIATE_TEST_SUITE_P(Protocols, HeaderPrefixIntegrationTest, diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index b76f8c476b4a..f36b97d26ff7 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -452,8 +452,7 @@ TEST_P(LdsIntegrationTest, FailConfigLoad) { filter_chain->mutable_filters(0)->clear_typed_config(); filter_chain->mutable_filters(0)->set_name("grewgragra"); }); - EXPECT_DEATH_LOG_TO_STDERR(initialize(), - "Didn't find a registered implementation for name: 'grewgragra'"); + EXPECT_DEATH(initialize(), "Didn't find a registered implementation for name: 'grewgragra'"); } } // namespace } // namespace Envoy diff --git a/test/test_common/logging.cc b/test/test_common/logging.cc index 6d259a75f12b..8e398ce52947 100644 --- a/test/test_common/logging.cc +++ b/test/test_common/logging.cc @@ -24,8 +24,11 @@ LogLevelSetter::~LogLevelSetter() { } LogRecordingSink::LogRecordingSink(Logger::DelegatingLogSinkSharedPtr log_sink) - : Logger::SinkDelegate(log_sink) {} -LogRecordingSink::~LogRecordingSink() = default; + : Logger::SinkDelegate(log_sink) { + setDelegate(); +} + +LogRecordingSink::~LogRecordingSink() { restoreDelegate(); } void LogRecordingSink::log(absl::string_view msg) { previousDelegate()->log(msg); diff --git a/test/test_common/test_time_system_test.cc b/test/test_common/test_time_system_test.cc index b4733d023599..35fa9873f7ae 100644 --- a/test/test_common/test_time_system_test.cc +++ b/test/test_common/test_time_system_test.cc @@ -26,8 +26,8 @@ TEST_F(TestTimeSystemTest, TwoRealsSameReference) { TEST_F(TestTimeSystemTest, SimThenRealConflict) { SimulatedTimeSystem t1; - EXPECT_DEATH_LOG_TO_STDERR({ DangerousDeprecatedTestTime t2; }, - ".*Two different types of time-systems allocated.*"); + EXPECT_DEATH({ DangerousDeprecatedTestTime t2; }, + ".*Two different types of time-systems allocated.*"); } TEST_F(TestTimeSystemTest, SimThenRealSerial) { @@ -37,8 +37,7 @@ TEST_F(TestTimeSystemTest, SimThenRealSerial) { TEST_F(TestTimeSystemTest, RealThenSim) { DangerousDeprecatedTestTime t1; - EXPECT_DEATH_LOG_TO_STDERR({ SimulatedTimeSystem t2; }, - ".*Two different types of time-systems allocated.*"); + EXPECT_DEATH({ SimulatedTimeSystem t2; }, ".*Two different types of time-systems allocated.*"); } TEST_F(TestTimeSystemTest, RealThenSimSerial) { diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 399f2d869d45..5c50fb9b005f 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -87,22 +87,6 @@ namespace Envoy { ADD_FAILURE() << "Unexpected exception: " << std::string(e.what()); \ } -/* - Macro to use instead of EXPECT_DEATH when stderr is produced by a logger. - It temporarily installs stderr sink and restores the original logger sink after the test - completes and stderr_sink object goes of of scope. - EXPECT_DEATH(statement, regex) test passes when statement causes crash and produces error message - matching regex. Test fails when statement does not crash or it crashes but message does not - match regex. If a message produced during crash is redirected away from strerr, the test fails. - By installing StderrSinkDelegate, the macro forces EXPECT_DEATH to send any output produced by - statement to stderr. -*/ -#define EXPECT_DEATH_LOG_TO_STDERR(statement, message) \ - do { \ - Envoy::Logger::StderrSinkDelegate stderr_sink(Envoy::Logger::Registry::getSink()); \ - EXPECT_DEATH(statement, message); \ - } while (false) - #define VERIFY_ASSERTION(statement) \ do { \ ::testing::AssertionResult status = statement; \ From 61c2816f704d11f66a507b76abc8f01c1816fe57 Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Tue, 28 Jul 2020 11:24:53 -0700 Subject: [PATCH 766/909] stats: Add additional constraints to histogram bucket configuration (#12312) This ensures that the configuration is valid. Note that this adds stricter validation to an existing proto field, but the field was only added 5 days ago, so only someone running HEAD, using this new config, with invalid/non-sensical config should be affected. Signed-off-by: Greg Greenway --- api/envoy/config/metrics/v3/stats.proto | 9 +++++++-- api/envoy/config/metrics/v4alpha/stats.proto | 9 +++++++-- .../envoy/config/metrics/v3/stats.proto | 9 +++++++-- .../envoy/config/metrics/v4alpha/stats.proto | 9 +++++++-- source/common/stats/histogram_impl.cc | 5 +++-- test/common/stats/histogram_impl_test.cc | 13 +++++++++++++ test/integration/integration_admin_test.cc | 4 ++-- test/integration/integration_admin_test.h | 2 +- 8 files changed, 47 insertions(+), 13 deletions(-) diff --git a/api/envoy/config/metrics/v3/stats.proto b/api/envoy/config/metrics/v3/stats.proto index 1c28a8f61065..275db1f6457a 100644 --- a/api/envoy/config/metrics/v3/stats.proto +++ b/api/envoy/config/metrics/v3/stats.proto @@ -294,8 +294,13 @@ message HistogramBucketSettings { // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - // Each value is the upper bound of a successive bucket. - repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto index e2c5ae9dc2c7..6265118cf9b8 100644 --- a/api/envoy/config/metrics/v4alpha/stats.proto +++ b/api/envoy/config/metrics/v4alpha/stats.proto @@ -297,8 +297,13 @@ message HistogramBucketSettings { // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - // Each value is the upper bound of a successive bucket. - repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto index 4fa05259054e..8b66a83a55e6 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/stats.proto @@ -292,8 +292,13 @@ message HistogramBucketSettings { // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - // Each value is the upper bound of a successive bucket. - repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto index e2c5ae9dc2c7..6265118cf9b8 100644 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto +++ b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto @@ -297,8 +297,13 @@ message HistogramBucketSettings { // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - // Each value is the upper bound of a successive bucket. - repeated double buckets = 2 [(validate.rules).repeated = {min_items: 1}]; + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + repeated double buckets = 2 [(validate.rules).repeated = { + min_items: 1 + unique: true + items {double {gt: 0.0}} + }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support diff --git a/source/common/stats/histogram_impl.cc b/source/common/stats/histogram_impl.cc index da633c4d707a..a2b866dc112d 100644 --- a/source/common/stats/histogram_impl.cc +++ b/source/common/stats/histogram_impl.cc @@ -85,8 +85,9 @@ HistogramSettingsImpl::HistogramSettingsImpl(const envoy::config::metrics::v3::S : configs_([&config]() { std::vector configs; for (const auto& matcher : config.histogram_bucket_settings()) { - configs.emplace_back(matcher.match(), ConstSupportedBuckets{matcher.buckets().begin(), - matcher.buckets().end()}); + std::vector buckets{matcher.buckets().begin(), matcher.buckets().end()}; + std::sort(buckets.begin(), buckets.end()); + configs.emplace_back(matcher.match(), std::move(buckets)); } return configs; diff --git a/test/common/stats/histogram_impl_test.cc b/test/common/stats/histogram_impl_test.cc index 3cbde4b280d0..085e3d9a5a1a 100644 --- a/test/common/stats/histogram_impl_test.cc +++ b/test/common/stats/histogram_impl_test.cc @@ -38,6 +38,19 @@ TEST_F(HistogramSettingsImplTest, Basic) { EXPECT_EQ(settings_->buckets("abcd"), ConstSupportedBuckets({0.1, 2})); } +// Test that buckets are correctly sorted. +TEST_F(HistogramSettingsImplTest, Sorted) { + envoy::config::metrics::v3::HistogramBucketSettings setting; + setting.mutable_match()->set_exact("a"); + setting.mutable_buckets()->Add(0.1); + setting.mutable_buckets()->Add(2); + setting.mutable_buckets()->Add(1); // Out-of-order + buckets_configs_.push_back(setting); + + initialize(); + EXPECT_EQ(settings_->buckets("a"), ConstSupportedBuckets({0.1, 1, 2})); +} + // Test that only matching configurations are applied. TEST_F(HistogramSettingsImplTest, Matching) { { diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index cbfa1ef8559c..b5640bfcbac5 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -227,8 +227,8 @@ TEST_P(IntegrationAdminTest, Admin) { EXPECT_THAT(response->body(), HasSubstr("envoy_cluster_upstream_cx_active{envoy_cluster_name=\"cluster_0\"} 0\n")); - // Test that a specific bucket config is applied. Buckets 0-3 (inclusive) are set in initialize(). - for (int i = 0; i <= 3; i++) { + // Test that a specific bucket config is applied. Buckets 1-4 (inclusive) are set in initialize(). + for (int i = 1; i <= 4; i++) { EXPECT_THAT( response->body(), HasSubstr(fmt::format("envoy_cluster_upstream_cx_connect_ms_bucket{{envoy_cluster_name=" diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index dc4b38be7fdc..a63649e7ed71 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -22,10 +22,10 @@ class IntegrationAdminTest : public HttpProtocolIntegrationTest { *bootstrap.mutable_stats_config()->mutable_histogram_bucket_settings(); envoy::config::metrics::v3::HistogramBucketSettings* setting = hist_settings.Add(); setting->mutable_match()->set_suffix("upstream_cx_connect_ms"); - setting->mutable_buckets()->Add(0); setting->mutable_buckets()->Add(1); setting->mutable_buckets()->Add(2); setting->mutable_buckets()->Add(3); + setting->mutable_buckets()->Add(4); }); HttpIntegrationTest::initialize(); } From 7ef68787ffd37792647d828ec82992091bf575f6 Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Tue, 28 Jul 2020 11:57:41 -0700 Subject: [PATCH 767/909] Adds an env newLine helper function (#12290) Adds the helper function const std::string& TestEnvironment::newLine() in the test environment that can be used in test code for cross platform comparison of strings that contain new lines. Risk Level: low Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: davinci26 Signed-off-by: Sotiris Nanopoulos --- .../http/cache/cache_filter_integration_test.cc | 6 ++++-- test/test_common/environment.h | 11 +++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 4ddb6786804f..21264d1d3f20 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -69,7 +69,8 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_THAT(request->headers(), IsSupersetOfHeaders(response_headers)); EXPECT_EQ(request->headers().get(Http::Headers::get().Age), nullptr); EXPECT_EQ(request->body(), std::string(42, 'a')); - EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); + EXPECT_EQ(waitForAccessLog(access_log_name_), + fmt::format("- via_upstream{}", TestEnvironment::newLine)); } // Advance time, to verify the original date header is preserved. @@ -84,7 +85,8 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { EXPECT_NE(request->headers().get(Http::Headers::get().Age), nullptr); // Advance time to force a log flush. simTime().advanceTimeWait(std::chrono::seconds(1)); - EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "RFCF cache.response_from_cache_filter\n"); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), + fmt::format("RFCF cache.response_from_cache_filter{}", TestEnvironment::newLine)); } // Send the same GET request twice with body and trailers twice, then check that the response diff --git a/test/test_common/environment.h b/test/test_common/environment.h index 9434c59b7e6a..06a97895bb4d 100644 --- a/test/test_common/environment.h +++ b/test/test_common/environment.h @@ -93,6 +93,17 @@ class TestEnvironment { */ static const std::string& nullDevicePath(); + /** + * Obtain platform specific new line character(s) + * @return absl::string_view platform specific new line character(s) + */ + static constexpr absl::string_view newLine +#ifdef WIN32 + {"\r\n"}; +#else + {"\n"}; +#endif + /** * Obtain read-only test input data directory. * @param workspace the name of the Bazel workspace where the input data is. From f958d39fcd927603e15460870d36febd27156e05 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 28 Jul 2020 16:01:34 -0400 Subject: [PATCH 768/909] test: regression testing handling of out of bounds response codes (#12329) Risk Level: n/a (test only) Testing: yes Docs Changes: no Release Notes: no Signed-off-by: Alyssa Wilk --- test/integration/protocol_integration_test.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index fc5b7aa80642..b1cf6457fe6b 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -157,6 +157,18 @@ TEST_P(ProtocolIntegrationTest, RouterRedirect) { response->headers().get(Http::Headers::get().Location)->value().getStringView()); } +TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "600"}}; + auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 0); + + ASSERT_TRUE(response->complete()); + EXPECT_EQ("600", response->headers().getStatusValue()); +} + // Add a health check filter and verify correct computation of health based on upstream status. TEST_P(ProtocolIntegrationTest, ComputedHealthCheck) { config_helper_.addFilter(R"EOF( From 3dedf1693f45239c670c5ba7598db44ff2e32c2f Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Wed, 29 Jul 2020 06:17:25 +0700 Subject: [PATCH 769/909] docs: Fix refs in Lua docs (#12313) This is a docs only change, mostly on how to refer to LuaPerRoute info. Signed-off-by: Dhi Aurrahman --- .../extensions/filters/http/lua/v3/lua.proto | 2 +- .../http/http_filters/lua_filter.rst | 21 ++++++++++--------- .../extensions/filters/http/lua/v3/lua.proto | 2 +- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/api/envoy/extensions/filters/http/lua/v3/lua.proto b/api/envoy/extensions/filters/http/lua/v3/lua.proto index 10ac92e83b01..622726744de6 100644 --- a/api/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/api/envoy/extensions/filters/http/lua/v3/lua.proto @@ -27,7 +27,7 @@ message Lua { // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; - // Map of named Lua source codes that can be referenced in :ref:` LuaPerRoute + // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute // `. The Lua source codes can be // loaded from inline string or local files. // diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 3969e42e5d61..5f9e3d02cb0a 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -59,7 +59,7 @@ Configuration * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.http.lua*. -A simple example of configuring Lua HTTP filter that contains only :ref:`inline_code +A simple example of configuring Lua HTTP filter that contains only :ref:`inline_code ` is as follow: .. code-block:: yaml @@ -77,14 +77,14 @@ A simple example of configuring Lua HTTP filter that contains only :ref:`inline_ -- Do something. end -By default, Lua script defined in ``inline_code`` will be treated as a ``GLOBAL`` script. Envoy will +By default, Lua script defined in ``inline_code`` will be treated as a ``GLOBAL`` script. Envoy will execute it for every HTTP request. Per-Route Configuration ----------------------- -The Lua HTTP filter also can be disabled or overridden on a per-route basis by providing a -:ref:`LuaPerRoute ` configuration +The Lua HTTP filter also can be disabled or overridden on a per-route basis by providing a +:ref:`LuaPerRoute ` configuration on the virtual host, route, or weighted cluster. As a concrete example, given the following Lua filter configuration: @@ -110,8 +110,9 @@ As a concrete example, given the following Lua filter configuration: response_handle:logInfo("Bye Bye.") end -The HTTP Lua filter can be disabled on some virtual host, route, or weighted cluster by the -LuaPerRoute configuration as follow: +The HTTP Lua filter can be disabled on some virtual host, route, or weighted cluster by the +:ref:`LuaPerRoute ` configuration as +follow: .. code-block:: yaml @@ -119,7 +120,7 @@ LuaPerRoute configuration as follow: envoy.filters.http.lua: disabled: true -We can also refer to a Lua script in the filter configuration by specifying a name in LuaPerRoute. +We can also refer to a Lua script in the filter configuration by specifying a name in LuaPerRoute. The ``GLOBAL`` Lua script will be overridden by the referenced script: .. code-block:: yaml @@ -130,10 +131,10 @@ The ``GLOBAL`` Lua script will be overridden by the referenced script: .. attention:: - The name ``GLOBAL`` is reserved for :ref:`Lua.inline_code - `. Therefore, do not use + The name ``GLOBAL`` is reserved for :ref:`Lua.inline_code + `. Therefore, do not use ``GLOBAL`` as name for other Lua scripts. - + Script examples --------------- diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto index 10ac92e83b01..622726744de6 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto @@ -27,7 +27,7 @@ message Lua { // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; - // Map of named Lua source codes that can be referenced in :ref:` LuaPerRoute + // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute // `. The Lua source codes can be // loaded from inline string or local files. // From 6eb7e642d33f5a55b63c367188f09819925fca34 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Wed, 29 Jul 2020 04:48:29 +0530 Subject: [PATCH 770/909] listener: add listener address in log (#12247) When Envoy rejects a listener because it has a listener with the same name on a different address, it just logs the new address. It would help to log the existing address also. Signed-off-by: Rama Chavali --- source/server/listener_manager_impl.cc | 24 ++++++++++++++++------- test/server/listener_manager_impl_test.cc | 22 ++++++++++++++++++++- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 77fc33d33464..ed501d8b4a9f 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -426,13 +426,23 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( // avoids confusion during updates and allows us to use the same bound address. Note that in // the case of port 0 binding, the new listener will implicitly use the same bound port from // the existing listener. - if ((existing_warming_listener != warming_listeners_.end() && - *(*existing_warming_listener)->address() != *new_listener->address()) || - (existing_active_listener != active_listeners_.end() && - *(*existing_active_listener)->address() != *new_listener->address())) { - const std::string message = fmt::format( - "error updating listener: '{}' has a different address '{}' from existing listener", name, - new_listener->address()->asString()); + bool active_listener_exists = false; + bool warming_listener_exists = false; + if (existing_warming_listener != warming_listeners_.end() && + *(*existing_warming_listener)->address() != *new_listener->address()) { + warming_listener_exists = true; + } + if (existing_active_listener != active_listeners_.end() && + *(*existing_active_listener)->address() != *new_listener->address()) { + active_listener_exists = true; + } + if (active_listener_exists || warming_listener_exists) { + const std::string message = + fmt::format("error updating listener: '{}' has a different address '{}' from existing " + "listener address '{}'", + name, new_listener->address()->asString(), + warming_listener_exists ? (*existing_warming_listener)->address()->asString() + : (*existing_active_listener)->address()->asString()); ENVOY_LOG(warn, "{}", message); throw EnvoyException(message); } diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index c2cee57c8044..22dfc81d3bc3 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -612,7 +612,7 @@ drain_type: modify_only "", true), EnvoyException, "error updating listener: 'foo' has a different address " - "'127.0.0.1:1235' from existing listener"); + "'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'"); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -766,6 +766,15 @@ name: "foo" filter_chains: {} )EOF"; + const std::string listener_foo_address_update_yaml = R"EOF( +name: "foo" +address: + socket_address: + address: "127.0.0.1" + port_value: 1235 +filter_chains: {} + )EOF"; + Init::ManagerImpl server_init_mgr("server-init-manager"); Init::ExpectableWatcherImpl server_init_watcher("server-init-watcher"); { // Add and remove a listener before starting workers. @@ -849,6 +858,17 @@ version_info: version1 seconds: 2002002002 nanos: 2000000 )EOF"); + + // While it is in warming state, try updating the address. It should fail. + ListenerHandle* listener_foo3 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_foo3, onDestroy()); + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_address_update_yaml), + "version3", true), + EnvoyException, + "error updating listener: 'foo' has a different address " + "'127.0.0.1:1235' from existing listener address '127.0.0.1:1234'"); + // Delete foo-listener again. EXPECT_CALL(*listener_foo2, onDestroy()); EXPECT_TRUE(manager_->removeListener("foo")); From f6e90f2966887b25a8b6d5dd7f13ffb32865d66b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 29 Jul 2020 08:02:52 -0400 Subject: [PATCH 771/909] upstream: per-upstream prefetching (#12135) Implementing per-upstream prefetching, useful for high QPS or latency-sensitive services. Risk Level: low (off by default) Testing: new unit tests, integration test Docs Changes: n/a Release Notes: not yet Signed-off-by: Alyssa Wilk --- api/envoy/config/cluster/v3/cluster.proto | 35 ++- .../config/cluster/v4alpha/cluster.proto | 38 ++- .../envoy/config/cluster/v3/cluster.proto | 35 ++- .../config/cluster/v4alpha/cluster.proto | 38 ++- include/envoy/upstream/upstream.h | 5 + source/common/conn_pool/conn_pool_base.cc | 192 ++++++++----- source/common/conn_pool/conn_pool_base.h | 83 +++--- source/common/http/conn_pool_base.cc | 10 +- source/common/http/http1/conn_pool.cc | 2 +- source/common/runtime/runtime_features.cc | 1 + source/common/tcp/conn_pool.cc | 4 +- source/common/tcp/conn_pool.h | 16 +- source/common/upstream/upstream_impl.cc | 2 + source/common/upstream/upstream_impl.h | 2 + test/common/http/http1/conn_pool_test.cc | 2 +- test/common/http/http2/BUILD | 1 + test/common/http/http2/conn_pool_test.cc | 258 +++++++++++++++--- test/integration/protocol_integration_test.cc | 21 ++ test/mocks/event/mocks.h | 1 - test/mocks/upstream/cluster_info.cc | 1 + test/mocks/upstream/cluster_info.h | 1 + tools/spelling/spelling_dictionary.txt | 3 + 22 files changed, 592 insertions(+), 159 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index b4ea53bb0933..9edba75862e6 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -43,7 +43,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 50] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -541,6 +541,35 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15, 7, 11, 35; reserved "hosts", "tls_context", "extension_protocol_options"; @@ -884,6 +913,10 @@ message Cluster { // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 4172b07e0538..07d2c7b9e65c 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 50] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -545,6 +545,38 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15, 7, 11, 35, 47; reserved "hosts", "tls_context", "extension_protocol_options", "track_timeout_budgets"; @@ -876,6 +908,10 @@ message Cluster { // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index ac93934e72bf..1f501359733b 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -44,7 +44,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 50] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -541,6 +541,35 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15; // Configuration to use different transport sockets for different endpoints. @@ -883,6 +912,10 @@ message Cluster { // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; + repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 [deprecated = true]; envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index facc5d38d16c..87e35b70009c 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 50] +// [#next-free-field: 51] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; @@ -545,6 +545,38 @@ message Cluster { google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } + // [#not-implemented-hide:] + message PrefetchPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + + // Indicates how many many streams (rounded up) can be anticipated per-upstream for each + // stream, useful for high-QPS or latency-sensitive services. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections prefetched. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be + // prefetched. + // + // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can + // harm latency more than the prefetching helps. + google.protobuf.DoubleValue prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + reserved 12, 15, 7, 11, 35; reserved "hosts", "tls_context", "extension_protocol_options"; @@ -888,6 +920,10 @@ message Cluster { // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; + + // [#not-implemented-hide:] + // Prefetch configuration for this cluster. + PrefetchPolicy prefetch_policy = 50; } // [#not-implemented-hide:] Extensible load balancing policy configuration. diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index b2d72e2cea24..a7a4af39cd21 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -730,6 +730,11 @@ class ClusterInfo { */ virtual const absl::optional idleTimeout() const PURE; + /** + * @return how many streams should be anticipated per each current stream. + */ + virtual float prefetchRatio() const PURE; + /** * @return soft limit on size of the cluster's connections read and write buffers. */ diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 3c2c0e648db5..573bfd50c9f4 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -33,11 +33,44 @@ void ConnPoolImplBase::destructAllConnections() { dispatcher_.clearDeferredDeleteList(); } -void ConnPoolImplBase::tryCreateNewConnection() { - if (pending_requests_.size() <= connecting_request_capacity_) { - // There are already enough CONNECTING connections for the number - // of queued requests. - return; +bool ConnPoolImplBase::shouldCreateNewConnection() const { + // The number of streams we want to be provisioned for is the number of + // pending and active streams times the prefetch ratio. + // The number of streams we are (theoretically) provisioned for is the + // connecting stream capacity plus the number of active streams. + // + // If prefetch ratio is not set, it defaults to 1, and this simplifies to the + // legacy value of pending_streams_.size() > connecting_stream_capacity_ + return (pending_streams_.size() + num_active_streams_) * prefetchRatio() > + (connecting_stream_capacity_ + num_active_streams_); +} + +float ConnPoolImplBase::prefetchRatio() const { + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_prefetch")) { + return host_->cluster().prefetchRatio(); + } else { + return 1.0; + } +} + +void ConnPoolImplBase::tryCreateNewConnections() { + // Somewhat arbitrarily cap the number of connections prefetched due to new + // incoming connections. The prefetch ratio is capped at 3, so in steady + // state, no more than 3 connections should be prefetched. If hosts go + // unhealthy, and connections are not immediately prefetched, it could be that + // many connections are desired when the host becomes healthy again, but + // overwhelming it with connections is not desirable. + for (int i = 0; i < 3; ++i) { + if (!tryCreateNewConnection()) { + return; + } + } +} + +bool ConnPoolImplBase::tryCreateNewConnection() { + // There are already enough CONNECTING connections for the number of queued streams. + if (!shouldCreateNewConnection()) { + return false; } const bool can_create_connection = @@ -47,17 +80,18 @@ void ConnPoolImplBase::tryCreateNewConnection() { } // If we are at the connection circuit-breaker limit due to other upstreams having // too many open connections, and this upstream has no connections, always create one, to - // prevent pending requests being queued to this upstream with no way to be processed. + // prevent pending streams being queued to this upstream with no way to be processed. if (can_create_connection || (ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty())) { ENVOY_LOG(debug, "creating a new connection"); ActiveClientPtr client = instantiateActiveClient(); ASSERT(client->state_ == ActiveClient::State::CONNECTING); - ASSERT(std::numeric_limits::max() - connecting_request_capacity_ >= + ASSERT(std::numeric_limits::max() - connecting_stream_capacity_ >= client->effectiveConcurrentRequestLimit()); - connecting_request_capacity_ += client->effectiveConcurrentRequestLimit(); + connecting_stream_capacity_ += client->effectiveConcurrentRequestLimit(); client->moveIntoList(std::move(client), owningList(client->state_)); } + return can_create_connection; } void ConnPoolImplBase::attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, @@ -65,24 +99,24 @@ void ConnPoolImplBase::attachRequestToClient(Envoy::ConnectionPool::ActiveClient ASSERT(client.state_ == Envoy::ConnectionPool::ActiveClient::State::READY); if (!host_->cluster().resourceManager(priority_).requests().canCreate()) { - ENVOY_LOG(debug, "max requests overflow"); + ENVOY_LOG(debug, "max streams overflow"); onPoolFailure(client.real_host_description_, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); } else { ENVOY_CONN_LOG(debug, "creating stream", client); - client.remaining_requests_--; - if (client.remaining_requests_ == 0) { - ENVOY_CONN_LOG(debug, "maximum requests per connection, DRAINING", client); + client.remaining_streams_--; + if (client.remaining_streams_ == 0) { + ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); host_->cluster().stats().upstream_cx_max_requests_.inc(); transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); - } else if (client.numActiveRequests() + 1 >= client.concurrent_request_limit_) { - // As soon as the new request is created, the client will be maxed out. + } else if (client.numActiveRequests() + 1 >= client.concurrent_stream_limit_) { + // As soon as the new stream is created, the client will be maxed out. transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); } - num_active_requests_++; + num_active_streams_++; host_->stats().rq_total_.inc(); host_->stats().rq_active_.inc(); host_->cluster().stats().upstream_rq_total_.inc(); @@ -94,22 +128,22 @@ void ConnPoolImplBase::attachRequestToClient(Envoy::ConnectionPool::ActiveClient } void ConnPoolImplBase::onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, - bool delay_attaching_request) { + bool delay_attaching_stream) { ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveRequests()); - ASSERT(num_active_requests_ > 0); - num_active_requests_--; + ASSERT(num_active_streams_ > 0); + num_active_streams_--; host_->stats().rq_active_.dec(); host_->cluster().stats().upstream_rq_active_.dec(); host_->cluster().resourceManager(priority_).requests().dec(); if (client.state_ == ActiveClient::State::DRAINING && client.numActiveRequests() == 0) { - // Close out the draining client if we no longer have active requests. + // Close out the draining client if we no longer have active streams. client.close(); } else if (client.state_ == ActiveClient::State::BUSY) { - // A request was just ended, so we should be below the limit now. - ASSERT(client.numActiveRequests() < client.concurrent_request_limit_); + // A stream was just ended, so we should be below the limit now. + ASSERT(client.numActiveRequests() < client.concurrent_stream_limit_); transitionActiveClientState(client, ActiveClient::State::READY); - if (!delay_attaching_request) { + if (!delay_attaching_stream) { onUpstreamReady(); } } @@ -120,6 +154,9 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) ActiveClient& client = *ready_clients_.front(); ENVOY_CONN_LOG(debug, "using existing connection", client); attachRequestToClient(client, context); + // Even if there's a ready client, we may want to prefetch a new connection + // to handle the next incoming stream. + tryCreateNewConnections(); return nullptr; } @@ -127,12 +164,12 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) ConnectionPool::Cancellable* pending = newPendingRequest(context); // This must come after newPendingRequest() because this function uses the - // length of pending_requests_ to determine if a new connection is needed. - tryCreateNewConnection(); + // length of pending_streams_ to determine if a new connection is needed. + tryCreateNewConnections(); return pending; } else { - ENVOY_LOG(debug, "max pending requests overflow"); + ENVOY_LOG(debug, "max pending streams overflow"); onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); @@ -141,12 +178,12 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) } void ConnPoolImplBase::onUpstreamReady() { - while (!pending_requests_.empty() && !ready_clients_.empty()) { + while (!pending_streams_.empty() && !ready_clients_.empty()) { ActiveClientPtr& client = ready_clients_.front(); - ENVOY_CONN_LOG(debug, "attaching to next request", *client); - // Pending requests are pushed onto the front, so pull from the back. - attachRequestToClient(*client, pending_requests_.back()->context()); - pending_requests_.pop_back(); + ENVOY_CONN_LOG(debug, "attaching to next stream", *client); + // Pending streams are pushed onto the front, so pull from the back. + attachRequestToClient(*client, pending_streams_.back()->context()); + pending_streams_.pop_back(); } } @@ -197,7 +234,7 @@ void ConnPoolImplBase::closeIdleConnections() { } } - if (pending_requests_.empty()) { + if (pending_streams_.empty()) { for (auto& client : connecting_clients_) { to_close.push_back(client.get()); } @@ -211,8 +248,8 @@ void ConnPoolImplBase::closeIdleConnections() { void ConnPoolImplBase::drainConnectionsImpl() { closeIdleConnections(); - // closeIdleConnections() closes all connections in ready_clients_ with no active requests, - // so all remaining entries in ready_clients_ are serving requests. Move them and all entries + // closeIdleConnections() closes all connections in ready_clients_ with no active streams, + // so all remaining entries in ready_clients_ are serving streams. Move them and all entries // in busy_clients_ to draining. while (!ready_clients_.empty()) { transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); @@ -233,7 +270,7 @@ void ConnPoolImplBase::checkForDrained() { closeIdleConnections(); - if (pending_requests_.empty() && ready_clients_.empty() && busy_clients_.empty() && + if (pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() && connecting_clients_.empty()) { ENVOY_LOG(debug, "invoking drained callbacks"); for (const Instance::DrainedCb& cb : drained_callbacks_) { @@ -245,8 +282,8 @@ void ConnPoolImplBase::checkForDrained() { void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event) { if (client.state_ == ActiveClient::State::CONNECTING) { - ASSERT(connecting_request_capacity_ >= client.effectiveConcurrentRequestLimit()); - connecting_request_capacity_ -= client.effectiveConcurrentRequestLimit(); + ASSERT(connecting_stream_capacity_ >= client.effectiveConcurrentRequestLimit()); + connecting_stream_capacity_ -= client.effectiveConcurrentRequestLimit(); } if (event == Network::ConnectionEvent::RemoteClose || @@ -255,8 +292,8 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view ENVOY_CONN_LOG(debug, "client disconnected, failure reason: {}", client, failure_reason); Envoy::Upstream::reportUpstreamCxDestroy(host_, event); - const bool incomplete_request = client.closingWithIncompleteRequest(); - if (incomplete_request) { + const bool incomplete_stream = client.closingWithIncompleteRequest(); + if (incomplete_stream) { Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); } @@ -274,12 +311,15 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view } // Raw connect failures should never happen under normal circumstances. If we have an upstream - // that is behaving badly, requests can get stuck here in the pending state. If we see a - // connect failure, we purge all pending requests so that calling code can determine what to - // do with the request. - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. + // that is behaving badly, streams can get stuck here in the pending state. If we see a + // connect failure, we purge all pending streams so that calling code can determine what to + // do with the stream. + // NOTE: We move the existing pending streams to a temporary list. This is done so that + // if retry logic submits a new stream to the pool, we don't fail it inline. purgePendingRequests(client.real_host_description_, failure_reason, reason); + // TODO(alyssawilk) only iff upstream is healthy. + // See if we should prefetch another connection based on active connections. + tryCreateNewConnections(); } // We need to release our resourceManager() resources before checking below for @@ -289,15 +329,15 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view client.releaseResources(); dispatcher_.deferredDelete(client.removeFromList(owningList(client.state_))); - if (incomplete_request) { + if (incomplete_stream) { checkForDrained(); } client.state_ = ActiveClient::State::CLOSED; - // If we have pending requests and we just lost a connection we should make a new one. - if (!pending_requests_.empty()) { - tryCreateNewConnection(); + // If we have pending streams and we just lost a connection we should make a new one. + if (!pending_streams_.empty()) { + tryCreateNewConnections(); } } else if (event == Network::ConnectionEvent::Connected) { client.conn_connect_ms_->complete(); @@ -334,36 +374,46 @@ void PendingRequest::cancel(Envoy::ConnectionPool::CancelPolicy policy) { void ConnPoolImplBase::purgePendingRequests( const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason, ConnectionPool::PoolFailureReason reason) { - // NOTE: We move the existing pending requests to a temporary list. This is done so that - // if retry logic submits a new request to the pool, we don't fail it inline. - pending_requests_to_purge_ = std::move(pending_requests_); - while (!pending_requests_to_purge_.empty()) { - PendingRequestPtr request = - pending_requests_to_purge_.front()->removeFromList(pending_requests_to_purge_); + // NOTE: We move the existing pending streams to a temporary list. This is done so that + // if retry logic submits a new stream to the pool, we don't fail it inline. + pending_streams_to_purge_ = std::move(pending_streams_); + while (!pending_streams_to_purge_.empty()) { + PendingRequestPtr stream = + pending_streams_to_purge_.front()->removeFromList(pending_streams_to_purge_); host_->cluster().stats().upstream_rq_pending_failure_eject_.inc(); - onPoolFailure(host_description, failure_reason, reason, request->context()); + onPoolFailure(host_description, failure_reason, reason, stream->context()); } } -void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& request, +bool ConnPoolImplBase::connectingConnectionIsExcess() const { + ASSERT(connecting_stream_capacity_ >= + connecting_clients_.front()->effectiveConcurrentRequestLimit()); + // If prefetchRatio is one, this simplifies to checking if there would still be sufficient + // connecting stream capacity to serve all pending streams if the most recent client were + // removed from the picture. + // + // If prefetch ratio is set, it also factors in the anticipated load based on both queued streams + // and active streams, and makes sure the connecting capacity would still be sufficient to serve + // that even with the most recent client removed. + return (pending_streams_.size() + num_active_streams_) * prefetchRatio() <= + (connecting_stream_capacity_ - + connecting_clients_.front()->effectiveConcurrentRequestLimit() + num_active_streams_); +} + +void ConnPoolImplBase::onPendingRequestCancel(PendingRequest& stream, Envoy::ConnectionPool::CancelPolicy policy) { - ENVOY_LOG(debug, "cancelling pending request"); - if (!pending_requests_to_purge_.empty()) { - // If pending_requests_to_purge_ is not empty, it means that we are called from + ENVOY_LOG(debug, "cancelling pending stream"); + if (!pending_streams_to_purge_.empty()) { + // If pending_streams_to_purge_ is not empty, it means that we are called from // with-in a onPoolFailure callback invoked in purgePendingRequests (i.e. purgePendingRequests - // is down in the call stack). Remove this request from the list as it is cancelled, + // is down in the call stack). Remove this stream from the list as it is cancelled, // and there is no need to call its onPoolFailure callback. - request.removeFromList(pending_requests_to_purge_); + stream.removeFromList(pending_streams_to_purge_); } else { - request.removeFromList(pending_requests_); + stream.removeFromList(pending_streams_); } - // There's excess capacity if - // pending_requests < connecting_request_capacity_ - capacity of most recent client. - // It's calculated below with addition instead to avoid underflow issues, overflow being - // assumed to not be a problem across the connection pool. if (policy == Envoy::ConnectionPool::CancelPolicy::CloseExcess && !connecting_clients_.empty() && - (pending_requests_.size() + connecting_clients_.front()->effectiveConcurrentRequestLimit() <= - connecting_request_capacity_)) { + connectingConnectionIsExcess()) { auto& client = *connecting_clients_.front(); transitionActiveClientState(client, ActiveClient::State::DRAINING); client.close(); @@ -381,10 +431,10 @@ uint64_t translateZeroToUnlimited(uint64_t limit) { } } // namespace -ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit) - : parent_(parent), remaining_requests_(translateZeroToUnlimited(lifetime_request_limit)), - concurrent_request_limit_(translateZeroToUnlimited(concurrent_request_limit)), +ActiveClient::ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit, + uint64_t concurrent_stream_limit) + : parent_(parent), remaining_streams_(translateZeroToUnlimited(lifetime_stream_limit)), + concurrent_stream_limit_(translateZeroToUnlimited(concurrent_stream_limit)), connect_timer_(parent_.dispatcher().createTimer([this]() -> void { onConnectTimeout(); })) { conn_connect_ms_ = std::make_unique( parent_.host()->cluster().stats().upstream_cx_connect_ms_, parent_.dispatcher().timeSource()); diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 2488542110e0..acce470ed559 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -28,8 +28,8 @@ class ActiveClient : public LinkedObject, public Event::DeferredDeletable, protected Logger::Loggable { public: - ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_request_limit, - uint64_t concurrent_request_limit); + ActiveClient(ConnPoolImplBase& parent, uint64_t lifetime_stream_limit, + uint64_t concurrent_stream_limit); ~ActiveClient() override; void releaseResources(); @@ -42,33 +42,33 @@ class ActiveClient : public LinkedObject, // Called if the connection does not complete within the cluster's connectTimeout() void onConnectTimeout(); - // Returns the concurrent request limit, accounting for if the total request limit - // is less than the concurrent request limit. + // Returns the concurrent stream limit, accounting for if the total stream limit + // is less than the concurrent stream limit. uint64_t effectiveConcurrentRequestLimit() const { - return std::min(remaining_requests_, concurrent_request_limit_); + return std::min(remaining_streams_, concurrent_stream_limit_); } // Closes the underlying connection. virtual void close() PURE; // Returns the ID of the underlying connection. virtual uint64_t id() const PURE; - // Returns true if this closed with an incomplete request, for stats tracking/ purposes. + // Returns true if this closed with an incomplete stream, for stats tracking/ purposes. virtual bool closingWithIncompleteRequest() const PURE; - // Returns the number of active requests on this connection. + // Returns the number of active streams on this connection. virtual size_t numActiveRequests() const PURE; enum class State { CONNECTING, // Connection is not yet established. - READY, // Additional requests may be immediately dispatched to this connection. - BUSY, // Connection is at its concurrent request limit. - DRAINING, // No more requests can be dispatched to this connection, and it will be closed - // when all requests complete. + READY, // Additional streams may be immediately dispatched to this connection. + BUSY, // Connection is at its concurrent stream limit. + DRAINING, // No more streams can be dispatched to this connection, and it will be closed + // when all streams complete. CLOSED // Connection is closed and object is queued for destruction. }; ConnPoolImplBase& parent_; - uint64_t remaining_requests_; - const uint64_t concurrent_request_limit_; + uint64_t remaining_streams_; + const uint64_t concurrent_stream_limit_; State state_{State::CONNECTING}; Upstream::HostDescriptionConstSharedPtr real_host_description_; Stats::TimespanPtr conn_connect_ms_; @@ -78,6 +78,7 @@ class ActiveClient : public LinkedObject, bool timed_out_{false}; }; +// TODO(alyssawilk) renames for Request classes and functions -> Stream classes and functions. // PendingRequest is the base class for a connection which has been created but not yet established. class PendingRequest : public LinkedObject, public ConnectionPool::Cancellable { public: @@ -98,7 +99,7 @@ using PendingRequestPtr = std::unique_ptr; using ActiveClientPtr = std::unique_ptr; -// Base class that handles request queueing logic shared between connection pool implementations. +// Base class that handles stream queueing logic shared between connection pool implementations. class ConnPoolImplBase : protected Logger::Loggable { public: ConnPoolImplBase(Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, @@ -128,11 +129,11 @@ class ConnPoolImplBase : protected Logger::Loggable { // Gets a pointer to the list that currently owns this client. std::list& owningList(ActiveClient::State state); - // Removes the PendingRequest from the list of requests. Called when the PendingRequest is + // Removes the PendingRequest from the list of streams. Called when the PendingRequest is // cancelled, e.g. when the stream is reset before a connection has been established. - void onPendingRequestCancel(PendingRequest& request, Envoy::ConnectionPool::CancelPolicy policy); + void onPendingRequestCancel(PendingRequest& stream, Envoy::ConnectionPool::CancelPolicy policy); - // Fails all pending requests, calling onPoolFailure on the associated callbacks. + // Fails all pending streams, calling onPoolFailure on the associated callbacks. void purgePendingRequests(const Upstream::HostDescriptionConstSharedPtr& host_description, absl::string_view failure_reason, ConnectionPool::PoolFailureReason pool_failure_reason); @@ -151,10 +152,6 @@ class ConnPoolImplBase : protected Logger::Loggable { virtual ConnectionPool::Cancellable* newPendingRequest(AttachContext& context) PURE; - // Creates a new connection if allowed by resourceManager, or if created to avoid - // starving this pool. - void tryCreateNewConnection(); - void attachRequestToClient(Envoy::ConnectionPool::ActiveClient& client, AttachContext& context); virtual void onPoolFailure(const Upstream::HostDescriptionConstSharedPtr& host_description, @@ -162,8 +159,8 @@ class ConnPoolImplBase : protected Logger::Loggable { ConnectionPool::PoolFailureReason pool_failure_reason, AttachContext& context) PURE; virtual void onPoolReady(ActiveClient& client, AttachContext& context) PURE; - // Called by derived classes any time a request is completed or destroyed for any reason. - void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_request); + // Called by derived classes any time a stream is completed or destroyed for any reason. + void onRequestClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_stream); const Upstream::HostConstSharedPtr& host() const { return host_; } Event::Dispatcher& dispatcher() { return dispatcher_; } @@ -174,6 +171,23 @@ class ConnPoolImplBase : protected Logger::Loggable { } protected: + // Creates up to 3 connections, based on the prefetch ratio. + void tryCreateNewConnections(); + + // Creates a new connection if there is sufficient demand, it is allowed by resourceManager, or + // to avoid starving this pool. + bool tryCreateNewConnection(); + + // A helper function which determines if a canceled pending connection should + // be closed as excess or not. + bool connectingConnectionIsExcess() const; + + // A helper function which determines if a new incoming stream should trigger + // connection prefetch. + bool shouldCreateNewConnection() const; + + float prefetchRatio() const; + const Upstream::HostConstSharedPtr host_; const Upstream::ResourcePriority priority_; @@ -181,30 +195,29 @@ class ConnPoolImplBase : protected Logger::Loggable { const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsSharedPtr transport_socket_options_; -protected: std::list drained_callbacks_; - std::list pending_requests_; + std::list pending_streams_; - // When calling purgePendingRequests, this list will be used to hold the requests we are about - // to purge. We need this if one cancelled requests cancels a different pending request - std::list pending_requests_to_purge_; + // When calling purgePendingRequests, this list will be used to hold the streams we are about + // to purge. We need this if one cancelled streams cancels a different pending stream + std::list pending_streams_to_purge_; - // Clients that are ready to handle additional requests. + // Clients that are ready to handle additional streams. // All entries are in state READY. std::list ready_clients_; - // Clients that are not ready to handle additional requests due to being BUSY or DRAINING. + // Clients that are not ready to handle additional streams due to being BUSY or DRAINING. std::list busy_clients_; - // Clients that are not ready to handle additional requests because they are CONNECTING. + // Clients that are not ready to handle additional streams because they are CONNECTING. std::list connecting_clients_; - // The number of requests currently attached to clients. - uint64_t num_active_requests_{0}; + // The number of streams currently attached to clients. + uint64_t num_active_streams_{0}; - // The number of requests that can be immediately dispatched + // The number of streams that can be immediately dispatched // if all CONNECTING connections become connected. - uint64_t connecting_request_capacity_{0}; + uint64_t connecting_stream_capacity_{0}; }; } // namespace ConnectionPool diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index dc1af9718211..7559b9e44fe3 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -61,18 +61,18 @@ HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, } bool HttpConnPoolImplBase::hasActiveConnections() const { - return (!pending_requests_.empty() || (num_active_requests_ > 0)); + return (!pending_streams_.empty() || (num_active_streams_ > 0)); } ConnectionPool::Cancellable* HttpConnPoolImplBase::newPendingRequest(Envoy::ConnectionPool::AttachContext& context) { Http::ResponseDecoder& decoder = *typedContext(context).decoder_; Http::ConnectionPool::Callbacks& callbacks = *typedContext(context).callbacks_; - ENVOY_LOG(debug, "queueing request due to no available connections"); - Envoy::ConnectionPool::PendingRequestPtr pending_request( + ENVOY_LOG(debug, "queueing stream due to no available connections"); + Envoy::ConnectionPool::PendingRequestPtr pending_stream( new HttpPendingRequest(*this, decoder, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); + pending_stream->moveIntoList(std::move(pending_stream), pending_streams_); + return pending_streams_.front().get(); } void HttpConnPoolImplBase::onPoolReady(Envoy::ConnectionPool::ActiveClient& client, diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index e810c435f39e..5203399e5a8e 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -57,7 +57,7 @@ void ConnPoolImpl::onResponseComplete(ActiveClient& client) { } else { client.stream_wrapper_.reset(); - if (!pending_requests_.empty() && !upstream_ready_enabled_) { + if (!pending_streams_.empty() && !upstream_ready_enabled_) { upstream_ready_enabled_ = true; upstream_ready_cb_->scheduleCallbackCurrentIteration(); } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 5a8ce4bcbcd4..6c7e53e427e9 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -60,6 +60,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.activate_fds_next_event_loop", "envoy.reloadable_features.allow_500_after_100", "envoy.deprecated_features.allow_deprecated_extension_names", + "envoy.reloadable_features.allow_prefetch", "envoy.reloadable_features.consume_all_retry_headers", "envoy.reloadable_features.disallow_unbounded_access_logs", "envoy.reloadable_features.early_errors_via_hcm", diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 5cbb3093a919..ed3332d8afef 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -13,9 +13,9 @@ namespace Envoy { namespace Tcp { ActiveTcpClient::ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, - uint64_t concurrent_request_limit) + uint64_t concurrent_stream_limit) : Envoy::ConnectionPool::ActiveClient(parent, host->cluster().maxRequestsPerConnection(), - concurrent_request_limit), + concurrent_stream_limit), parent_(parent) { Upstream::Host::CreateConnectionData data = host->createConnection( parent_.dispatcher(), parent_.socketOptions(), parent_.transportSocketOptions()); diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index d267ac24ed06..0ed92d295d31 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -85,7 +85,7 @@ class ActiveTcpClient : public Envoy::ConnectionPool::ActiveClient { }; ActiveTcpClient(ConnPoolImpl& parent, const Upstream::HostConstSharedPtr& host, - uint64_t concurrent_request_limit); + uint64_t concurrent_stream_limit); ~ActiveTcpClient() override; // Override the default's of Envoy::ConnectionPool::ActiveClient for class-specific functions. @@ -137,11 +137,11 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, // Legacy behavior for the TCP connection pool marks all connecting clients // as draining. for (auto& connecting_client : connecting_clients_) { - if (connecting_client->remaining_requests_ > 1) { + if (connecting_client->remaining_streams_ > 1) { uint64_t old_limit = connecting_client->effectiveConcurrentRequestLimit(); - connecting_client->remaining_requests_ = 1; + connecting_client->remaining_streams_ = 1; if (connecting_client->effectiveConcurrentRequestLimit() < old_limit) { - connecting_request_capacity_ -= + connecting_stream_capacity_ -= (old_limit - connecting_client->effectiveConcurrentRequestLimit()); } } @@ -162,10 +162,10 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, ConnectionPool::Cancellable* newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override { - Envoy::ConnectionPool::PendingRequestPtr pending_request = + Envoy::ConnectionPool::PendingRequestPtr pending_stream = std::make_unique(*this, typedContext(context)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); - return pending_requests_.front().get(); + pending_stream->moveIntoList(std::move(pending_stream), pending_streams_); + return pending_streams_.front().get(); } Upstream::HostDescriptionConstSharedPtr host() const override { @@ -196,7 +196,7 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, // These two functions exist for testing parity between old and new Tcp Connection Pools. virtual void onConnReleased(Envoy::ConnectionPool::ActiveClient& client) { if (client.state_ == Envoy::ConnectionPool::ActiveClient::State::BUSY) { - if (!pending_requests_.empty() && !upstream_ready_enabled_) { + if (!pending_streams_.empty() && !upstream_ready_enabled_) { upstream_ready_cb_->scheduleCallbackCurrentIteration(); } } diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 711b74076548..4bca4252b823 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -688,6 +688,8 @@ ClusterInfoImpl::ClusterInfoImpl( Http::DEFAULT_MAX_HEADERS_COUNT))), connect_timeout_( std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(config, connect_timeout))), + prefetch_ratio_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), prefetch_ratio, 1.0)), per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index dad144554ed2..b5f2e9d469a0 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -536,6 +536,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idleTimeout() const override { return idle_timeout_; } + float prefetchRatio() const override { return prefetch_ratio_; } uint32_t perConnectionBufferLimitBytes() const override { return per_connection_buffer_limit_bytes_; } @@ -655,6 +656,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idle_timeout_; + const float prefetch_ratio_; const uint32_t per_connection_buffer_limit_bytes_; TransportSocketMatcherPtr socket_matcher_; Stats::ScopePtr stats_scope_; diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 91e692527928..64b459c7ef22 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -61,7 +61,7 @@ class ConnPoolImplForTest : public ConnPoolImpl { ~ConnPoolImplForTest() override { EXPECT_EQ(0U, ready_clients_.size()); EXPECT_EQ(0U, busy_clients_.size()); - EXPECT_EQ(0U, pending_requests_.size()); + EXPECT_EQ(0U, pending_streams_.size()); } struct TestCodecClient { diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 9d64120d5dd7..6421f02a7a49 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -82,6 +82,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/upstream:upstream_mocks", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index c3fab0eeedab..d0f0ed1c5061 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -16,6 +16,7 @@ #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/mocks.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -24,6 +25,7 @@ using testing::_; using testing::DoAll; using testing::InSequence; using testing::Invoke; +using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Property; using testing::Return; @@ -72,44 +74,82 @@ class Http2ConnPoolImplTest : public testing::Test { EXPECT_EQ("", TestUtility::nonZeroedGauges(cluster_->stats_store_.gauges())); } - TestCodecClient& createTestClient() { - test_clients_.emplace_back(); - TestCodecClient& test_client = test_clients_.back(); - test_client.connection_ = new NiceMock(); - test_client.codec_ = new NiceMock(); - test_client.connect_timer_ = new NiceMock(&dispatcher_); - test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); - EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + void createTestClients(int num_clients) { + // Create N clients. + for (int i = 0; i < num_clients; ++i) { + test_clients_.emplace_back(); + TestCodecClient& test_client = test_clients_.back(); + test_client.connection_ = new NiceMock(); + test_client.codec_ = new NiceMock(); + test_client.connect_timer_ = new NiceMock(); + test_client.client_dispatcher_ = api_->allocateDispatcher("test_thread"); + } - return test_client; + // Outside the for loop, set the createTimer expectations. + EXPECT_CALL(dispatcher_, createTimer_(_)) + .Times(num_clients) + .WillRepeatedly(Invoke([this](Event::TimerCb cb) { + test_clients_[timer_index_].connect_timer_->callback_ = cb; + return test_clients_[timer_index_++].connect_timer_; + })); + // Loop again through the last num_clients entries to set enableTimer expectations. + // Ideally this could be done in the loop above but it breaks InSequence + // assertions. + for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) { + TestCodecClient& test_client = test_clients_[i]; + EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); + } } - void expectConnectionSetupForClient(TestCodecClient& test_client, + void expectConnectionSetupForClient(int num_clients, absl::optional buffer_limits = {}) { + // Set the createClientConnection mocks. The createCodecClient_ invoke + // below takes care of making sure connection_index_ is updated. EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillOnce(Return(test_client.connection_)); - auto cluster = std::make_shared>(); - Network::ClientConnectionPtr connection{test_client.connection_}; - test_client.codec_client_ = new CodecClientForTest( - CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, - [this](CodecClient*) -> void { onClientDestroy(); }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); - if (buffer_limits) { - EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()).WillOnce(Return(*buffer_limits)); - EXPECT_CALL(*test_clients_.back().connection_, setBufferLimits(*buffer_limits)); + .Times(num_clients) + .WillRepeatedly(InvokeWithoutArgs([this]() -> Network::ClientConnection* { + return test_clients_[connection_index_].connection_; + })); + + // Loop through the last num_clients clients, setting up codec clients and + // per-client mocks. + for (size_t i = test_clients_.size() - num_clients; i < test_clients_.size(); ++i) { + TestCodecClient& test_client = test_clients_[i]; + auto cluster = std::make_shared>(); + Network::ClientConnectionPtr connection{test_client.connection_}; + test_client.codec_client_ = new CodecClientForTest( + CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, + [this](CodecClient*) -> void { onClientDestroy(); }, + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + if (buffer_limits) { + EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()) + .Times(num_clients) + .WillRepeatedly(Return(*buffer_limits)); + EXPECT_CALL(*test_client.connection_, setBufferLimits(*buffer_limits)).Times(1); + } } + // Finally (for InSequence tests) set up createCodecClient and make sure the + // index is incremented to avoid returning the same client more than once. EXPECT_CALL(*pool_, createCodecClient_(_)) - .WillOnce(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { - return test_clients_.back().codec_client_; + .Times(num_clients) + .WillRepeatedly(Invoke([this](Upstream::Host::CreateConnectionData&) -> CodecClient* { + return test_clients_[connection_index_++].codec_client_; })); } // Creates a new test client, expecting a new connection to be created and associated // with the new client. void expectClientCreate(absl::optional buffer_limits = {}) { - expectConnectionSetupForClient(createTestClient(), buffer_limits); + createTestClients(1); + expectConnectionSetupForClient(1, buffer_limits); + } + void expectClientsCreate(int num_clients) { + createTestClients(num_clients); + expectConnectionSetupForClient(num_clients, absl::nullopt); } + // Connects a pending connection for client with the given index. + void expectClientConnect(size_t index); // Connects a pending connection for client with the given index, asserting // that the provided request receives onPoolReady. void expectClientConnect(size_t index, ActiveTestRequest& r); @@ -127,6 +167,11 @@ class Http2ConnPoolImplTest : public testing::Test { */ void closeClient(size_t index); + /** + * Closes all test clients. + */ + void closeAllClients(); + /** * Completes an active request. Useful when this flow is not part of the main test assertions. */ @@ -140,6 +185,8 @@ class Http2ConnPoolImplTest : public testing::Test { MOCK_METHOD(void, onClientDestroy, ()); + int timer_index_{}; + int connection_index_{}; Stats::IsolatedStoreImpl stats_store_; Api::ApiPtr api_; NiceMock dispatcher_; @@ -171,12 +218,16 @@ class ActiveTestRequest { ConnectionPool::Cancellable* handle_{}; }; -void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { - expectStreamConnect(index, r); +void Http2ConnPoolImplTest::expectClientConnect(size_t index) { EXPECT_CALL(*test_clients_[index].connect_timer_, disableTimer()); test_clients_[index].connection_->raiseEvent(Network::ConnectionEvent::Connected); } +void Http2ConnPoolImplTest::expectClientConnect(size_t index, ActiveTestRequest& r) { + expectStreamConnect(index, r); + expectClientConnect(index); +} + void Http2ConnPoolImplTest::expectStreamConnect(size_t index, ActiveTestRequest& r) { EXPECT_CALL(*test_clients_[index].codec_, newStream(_)) .WillOnce(DoAll(SaveArgAddress(&r.inner_decoder_), ReturnRef(r.inner_encoder_))); @@ -206,6 +257,14 @@ void Http2ConnPoolImplTest::closeClient(size_t index) { dispatcher_.clearDeferredDeleteList(); } +void Http2ConnPoolImplTest::closeAllClients() { + for (auto& test_client : test_clients_) { + test_client.connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + } + EXPECT_CALL(*this, onClientDestroy()).Times(test_clients_.size()); + dispatcher_.clearDeferredDeleteList(); +} + void Http2ConnPoolImplTest::completeRequest(ActiveTestRequest& r) { EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); r.callbacks_.outer_encoder_->encodeHeaders( @@ -261,7 +320,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { // This requires some careful set up of expectations ordering: the call to createTransportSocket // happens before all the connection set up but after the test client is created (due to some) // of the mocks that are constructed as part of the test client. - auto& client = createTestClient(); + createTestClients(1); EXPECT_CALL(*factory_ptr, createTransportSocket(_)) .WillOnce(Invoke( [](Network::TransportSocketOptionsSharedPtr options) -> Network::TransportSocketPtr { @@ -270,7 +329,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { Http::Utility::AlpnNames::get().Http2); return std::make_unique(); })); - expectConnectionSetupForClient(client); + expectConnectionSetupForClient(1); ActiveTestRequest r(*this, 0, false); expectClientConnect(0, r); EXPECT_CALL(r.inner_encoder_, encodeHeaders(_, true)); @@ -780,8 +839,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsRequestOverflow) { expectStreamConnect(0, r1); expectStreamReset(r2); expectStreamReset(r3); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + expectClientConnect(0); // Clean up everything. for (uint64_t i = 0; i < requests.max() - 1; ++i) { @@ -817,8 +875,7 @@ TEST_F(Http2ConnPoolImplTest, PendingRequestsMaxPendingCircuitBreaker) { EXPECT_EQ(nullptr, pool_->newStream(decoder, callbacks)); expectStreamConnect(0, r1); - EXPECT_CALL(*test_clients_[0].connect_timer_, disableTimer()); - test_clients_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); + expectClientConnect(0); // Clean up everything. for (uint64_t i = 0; i < pending_reqs.max() - 1; ++i) { @@ -1257,6 +1314,145 @@ TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { closeClient(0); } + +TEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With one request per connection, and prefetch 1.5, the first request will + // kick off 2 connections. + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // With another incoming request, we'll have 2 in flight and want 1.5*2 so + // create one connection. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // With a third request we'll have 3 in flight and want 1.5*3 -> 5 so kick off + // two again. + expectClientsCreate(2); + ActiveTestRequest r3(*this, 0, false); + + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r3.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchOff) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.allow_prefetch", "false"}}); + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // Despite the prefetch ratio, no prefetch will happen due to the runtime + // disable. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With two requests per connection, and prefetch 1.5, the first request will + // only kick off 1 connection. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // With another incoming request, we'll have capacity(2) in flight and want 1.5*2 so + // create an additional connection. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + // With one request per connection, and prefetch 1.5, the first request will + // kick off 2 connections. + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // When the first client connects, r1 will be assigned. + expectClientConnect(0, r1); + // When the second connects, there is no waiting stream request to assign. + expectClientConnect(1); + + // The next incoming request will immediately be assigned a stream, and also + // kick off a prefetch. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 1, true); + + // Clean up. + completeRequest(r1); + completeRequest(r2); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.5)); + + expectClientsCreate(2); + ActiveTestRequest r1(*this, 0, false); + + // When the first client connects, r1 will be assigned. + expectClientConnect(0, r1); + + // Now cause the prefetched connection to fail. We should try to create + // another in its place. + expectClientsCreate(1); + test_clients_[1].connect_timer_->invokeCallback(); + + // Clean up. + completeRequest(r1); + pool_->drainConnections(); + closeAllClients(); +} + +TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { + cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(1.00)); + + // First request prefetches an additional connection. + expectClientsCreate(1); + ActiveTestRequest r1(*this, 0, false); + + // Second request does not prefetch. + expectClientsCreate(1); + ActiveTestRequest r2(*this, 0, false); + + // Change the prefetch ratio to force the connection to no longer be excess. + ON_CALL(*cluster_, prefetchRatio).WillByDefault(Return(2)); + // Closing off the second request should bring us back to 1 request in queue, + // desired capacity 2, so will not close the connection. + EXPECT_CALL(*this, onClientDestroy()).Times(0); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + + // Clean up. + r1.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + pool_->drainConnections(); + closeAllClients(); +} + } // namespace Http2 } // namespace Http } // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index b1cf6457fe6b..2fee4ec1367e 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1921,6 +1921,27 @@ TEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) { test_server_->waitForCounterGe("http.config_test.downstream_cx_max_duration_reached", 1); } +TEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + cluster->mutable_prefetch_policy()->mutable_prefetch_ratio()->set_value(1.5); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = + sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0); + FakeHttpConnectionPtr fake_upstream_connection_two; + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + // For HTTP/1.1 there should be a prefetched connection. + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_two)); + } else { + // For HTTP/2, the original connection can accommodate two requests. + ASSERT_FALSE(fake_upstreams_[0]->waitForHttpConnection( + *dispatcher_, fake_upstream_connection_two, std::chrono::milliseconds(5))); + } +} + TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeout) { config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500)); initialize(); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 16cf4283c218..55983c27c536 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -170,7 +170,6 @@ class MockTimer : public Timer { const ScopeTrackedObject* scope_{}; bool enabled_{}; -private: Event::TimerCb callback_; }; diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index e45cfe0bc521..b6b8a59631ce 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -53,6 +53,7 @@ MockClusterInfo::MockClusterInfo() circuit_breakers_stats_, absl::nullopt, absl::nullopt)) { ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); + ON_CALL(*this, prefetchRatio()).WillByDefault(Return(1.0)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 8af0bfcb39df..3848154f7355 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -89,6 +89,7 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(bool, addedViaApi, (), (const)); MOCK_METHOD(std::chrono::milliseconds, connectTimeout, (), (const)); MOCK_METHOD(const absl::optional, idleTimeout, (), (const)); + MOCK_METHOD(float, prefetchRatio, (), (const)); MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const)); MOCK_METHOD(uint64_t, features, (), (const)); MOCK_METHOD(const Http::Http1Settings&, http1Settings, (), (const)); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 64e5d768f114..7f07e7158ba9 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -284,6 +284,7 @@ SIGINT SIGPIPE SIGSEGV SIGTERM +SMTP SNI SOTW SPD @@ -870,6 +871,8 @@ precompute precomputed predeclared prefetch +prefetched +prefetches preflight preorder prepend From 8b157dd4486b158073e55b988db26b65e8c7465f Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 29 Jul 2020 05:26:40 -0700 Subject: [PATCH 772/909] test: fix PerWorkerStatsAndBalancingFlake (#12346) Risk Level: None Testing: Existing tests Docs Changes: N/A Release Notes: N/A Fixes https://github.com/envoyproxy/envoy/issues/12325 Signed-off-by: Matt Klein --- test/integration/integration_test.cc | 8 ++++---- test/integration/server.h | 17 +++++++++++++++-- test/integration/server_stats.h | 6 ++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 197c628be6c3..081bcd2681ba 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -124,12 +124,12 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { check_listener_stats(0, 0); // Main thread admin listener stats. - EXPECT_NE(nullptr, test_server_->counter("listener.admin.main_thread.downstream_cx_total")); + test_server_->waitForCounterExists("listener.admin.main_thread.downstream_cx_total"); // Per-thread watchdog stats. - EXPECT_NE(nullptr, test_server_->counter("server.main_thread.watchdog_miss")); - EXPECT_NE(nullptr, test_server_->counter("server.worker_0.watchdog_miss")); - EXPECT_NE(nullptr, test_server_->counter("server.worker_1.watchdog_miss")); + test_server_->waitForCounterExists("server.main_thread.watchdog_miss"); + test_server_->waitForCounterExists("server.worker_0.watchdog_miss"); + test_server_->waitForCounterExists("server.worker_1.watchdog_miss"); codec_client_ = makeHttpConnection(lookupPort("http")); IntegrationCodecClientPtr codec_client2 = makeHttpConnection(lookupPort("http")); diff --git a/test/integration/server.h b/test/integration/server.h index 1197aa3e8364..bec7f855b042 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -205,7 +205,7 @@ class NotifyingAllocatorImpl : public Stats::AllocatorImpl { public: using Stats::AllocatorImpl::AllocatorImpl; - virtual void waitForCounterFromStringEq(const std::string& name, uint64_t value) { + void waitForCounterFromStringEq(const std::string& name, uint64_t value) { absl::MutexLock l(&mutex_); ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() != value) { @@ -214,7 +214,7 @@ class NotifyingAllocatorImpl : public Stats::AllocatorImpl { ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); } - virtual void waitForCounterFromStringGe(const std::string& name, uint64_t value) { + void waitForCounterFromStringGe(const std::string& name, uint64_t value) { absl::MutexLock l(&mutex_); ENVOY_LOG_MISC(trace, "waiting for {} to be {}", name, value); while (getCounterLockHeld(name) == nullptr || getCounterLockHeld(name)->value() < value) { @@ -223,6 +223,15 @@ class NotifyingAllocatorImpl : public Stats::AllocatorImpl { ENVOY_LOG_MISC(trace, "done waiting for {} to be {}", name, value); } + void waitForCounterExists(const std::string& name) { + absl::MutexLock l(&mutex_); + ENVOY_LOG_MISC(trace, "waiting for {} to exist", name); + while (getCounterLockHeld(name) == nullptr) { + condvar_.Wait(&mutex_); + } + ENVOY_LOG_MISC(trace, "done waiting for {} to exist", name); + } + protected: Stats::Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, const StatNameTagVector& stat_name_tags) override { @@ -420,6 +429,10 @@ class IntegrationTestServer : public Logger::Loggable, notifyingStatsAllocator().waitForCounterFromStringGe(name, value); } + void waitForCounterExists(const std::string& name) override { + notifyingStatsAllocator().waitForCounterExists(name); + } + void waitForGaugeGe(const std::string& name, uint64_t value) override { TestUtility::waitForGaugeGe(statStore(), name, value, time_system_); } diff --git a/test/integration/server_stats.h b/test/integration/server_stats.h index 859363aa0f11..186dba56450a 100644 --- a/test/integration/server_stats.h +++ b/test/integration/server_stats.h @@ -23,6 +23,12 @@ class IntegrationTestServerStats { */ virtual void waitForCounterGe(const std::string& name, uint64_t value) PURE; + /** + * Wait for a counter to exist. + * @param name counter name. + */ + virtual void waitForCounterExists(const std::string& name) PURE; + /** * Wait for a gauge to >= a given value. * @param name gauge name. From c04c605a9a34139235de67f8027f257f3eec18d8 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 29 Jul 2020 05:33:07 -0700 Subject: [PATCH 773/909] test: fix rbac flake (#12347) The write can see disconnect upon completion. Fixes #12294 Risk Level: None Testing: Existing test Docs Changes: N/A Release Notes: N/A Signed-off-by: Matt Klein --- test/extensions/filters/network/rbac/integration_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extensions/filters/network/rbac/integration_test.cc b/test/extensions/filters/network/rbac/integration_test.cc index 22b7407af629..7f90867687b5 100644 --- a/test/extensions/filters/network/rbac/integration_test.cc +++ b/test/extensions/filters/network/rbac/integration_test.cc @@ -122,7 +122,7 @@ name: rbac - any: true )EOF"); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); - ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(tcp_client->write("hello", false, false)); tcp_client->waitForDisconnect(); EXPECT_EQ(0U, test_server_->counter("tcp.rbac.allowed")->value()); From be6bbcda4934b3c6f7ac58640d2b302b4ffd8a14 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Wed, 29 Jul 2020 19:18:07 +0530 Subject: [PATCH 774/909] router: add transport failure reason to response body (#12321) Commit Message:add transport failure reason to body Additional Description: Adds transport failure reason to response body . This is specially useful to detect tls related failures when the client does not use Envoy. Otherwise cert expiration related errors just come as connection failure and it takes some digging to figure out the reason. Risk Level: Low, but changes the existing response body. Testing: Updated Docs Changes: N/A Release Notes: Updated Runtime guard: yes Signed-off-by: Rama Chavali --- docs/root/version_history/current.rst | 1 + source/common/router/router.cc | 15 +++++++++++---- source/common/runtime/runtime_features.cc | 1 + test/common/router/router_test.cc | 7 ++++--- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index de0307a2c4bf..7adc7a3e0a07 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -18,6 +18,7 @@ Minor Behavior Changes see a change in behavior. * logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set in the environment. +* router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. Bug Fixes diff --git a/source/common/router/router.cc b/source/common/router/router.cc index ab81ba482ddf..fcf328b6feb2 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -1025,8 +1025,9 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, absl::string_view transport_failure_reason, UpstreamRequest& upstream_request) { - ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, - Http::Utility::resetReasonToString(reset_reason)); + ENVOY_STREAM_LOG(debug, "upstream reset: reset reason: {}, transport failure reason: {}", + *callbacks_, Http::Utility::resetReasonToString(reset_reason), + transport_failure_reason); // TODO: The reset may also come from upstream over the wire. In this case it should be // treated as external origin error and distinguished from local origin error. @@ -1050,10 +1051,16 @@ void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, } const StreamInfo::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason); + const std::string body = absl::StrCat("upstream connect error or disconnect/reset before headers. reset reason: ", - Http::Utility::resetReasonToString(reset_reason)); - + Http::Utility::resetReasonToString(reset_reason), + Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_transport_failure_reason_in_body") && + !transport_failure_reason.empty() + ? ", transport failure reason: " + : "", + transport_failure_reason); callbacks_->streamInfo().setUpstreamTransportFailureReason(transport_failure_reason); const std::string& basic_details = downstream_response_started_ ? StreamInfo::ResponseCodeDetails::get().LateUpstreamReset diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 6c7e53e427e9..492304b3d290 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -71,6 +71,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fix_wildcard_matching", "envoy.reloadable_features.fixed_connection_close", "envoy.reloadable_features.http_default_alpn", + "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.preserve_query_string_in_path_redirects", diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index cd00aaa9787c..0e829d87914e 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -483,12 +483,12 @@ TEST_F(RouterTest, PoolFailureWithPriority) { .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, - absl::string_view(), cm_.conn_pool_.host_); + "tls version mismatch", cm_.conn_pool_.host_); return nullptr; })); Http::TestResponseHeaderMapImpl response_headers{ - {":status", "503"}, {"content-length", "91"}, {"content-type", "text/plain"}}; + {":status", "503"}, {"content-length", "139"}, {"content-type", "text/plain"}}; EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(callbacks_, encodeData(_, true)); EXPECT_CALL(callbacks_.stream_info_, @@ -505,7 +505,8 @@ TEST_F(RouterTest, PoolFailureWithPriority) { // Pool failure, so upstream request was not initiated. EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(callbacks_.details_, "upstream_reset_before_response_started{connection failure}"); + EXPECT_EQ(callbacks_.details_, + "upstream_reset_before_response_started{connection failure,tls version mismatch}"); } TEST_F(RouterTest, Http1Upstream) { From efd8ba9d65e482314396b14c35b4c568b662eb83 Mon Sep 17 00:00:00 2001 From: Rui Maranhao Date: Wed, 29 Jul 2020 15:03:43 +0100 Subject: [PATCH 775/909] Initialising variable which may be used in an uninitialised fashion in getOriginalDst. (#12250) Fixed uninitialized memory in network utility Utility::getOriginalDst Initializing variable which may be used in an uninitialized fashion as reported by the test case. Using uninitialized memory can be a security issue, such as potentially leaking previous stack contents. By zero-initializing, we avoid such potential leaks. Running with the specific input case after this PR is applied no longer results in any error findings (credits: the input case was found using google/clusterfuzz). Note that this PR only avoids the uninitialized memory use identified in that bug, and is unaware of the functionality or semantics of the rest of the code. The project owners are welcome to suggest alternate fixes on this PR or address other behavioral concerns in a separate PR. Signed-off-by: Rui Maranhao --- source/common/network/utility.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index ebb83c43bb25..ed2c8be12d9f 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -379,6 +379,7 @@ Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { } sockaddr_storage orig_addr; + memset(&orig_addr, 0, sizeof(orig_addr)); socklen_t addr_len = sizeof(sockaddr_storage); int status; From 0db1e3ba40eaa9e6ab50f332aba3a9066e8a4b13 Mon Sep 17 00:00:00 2001 From: Kuat Date: Wed, 29 Jul 2020 09:51:22 -0700 Subject: [PATCH 776/909] xds: increment config_reload after workers receive TLS update (#12342) Signed-off-by: Kuat Yessenov --- .../envoy/config/extension_config_provider.h | 6 +++++- include/envoy/server/admin.h | 5 +++++ .../filter/http/filter_config_discovery_impl.cc | 17 +++++++++++++---- .../filter/http/filter_config_discovery_impl.h | 6 ++++-- .../network/http_connection_manager/config.cc | 2 +- source/server/admin/admin.h | 1 + source/server/config_validation/admin.h | 1 + .../http/filter_config_discovery_impl_test.cc | 3 +++ test/mocks/server/admin.cc | 1 + test/mocks/server/admin.h | 1 + 10 files changed, 35 insertions(+), 8 deletions(-) diff --git a/include/envoy/config/extension_config_provider.h b/include/envoy/config/extension_config_provider.h index 0ea1aef9adc3..5dc3ee3a65d5 100644 --- a/include/envoy/config/extension_config_provider.h +++ b/include/envoy/config/extension_config_provider.h @@ -9,6 +9,8 @@ namespace Envoy { namespace Config { +using ConfigAppliedCb = std::function; + /** * A provider for extension configurations obtained either statically or via * the extension configuration discovery service. Dynamically updated extension @@ -45,8 +47,10 @@ template class ExtensionConfigProvider { * Update the provider with a new configuration. * @param config is an extension factory callback to replace the existing configuration. * @param version_info is the version of the new extension configuration. + * @param cb the continuation callback for a completed configuration application. */ - virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info) PURE; + virtual void onConfigUpdate(FactoryCallback config, const std::string& version_info, + ConfigAppliedCb cb) PURE; }; } // namespace Config diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h index b99b76a1e0ad..41578bd3cb70 100644 --- a/include/envoy/server/admin.h +++ b/include/envoy/server/admin.h @@ -153,6 +153,11 @@ class Admin { * @param handler the handler that will receive this Admin's listener. */ virtual void addListenerToHandler(Network::ConnectionHandler* handler) PURE; + + /** + * @return the number of worker threads to run in the server. + */ + virtual uint32_t concurrency() const PURE; }; } // namespace Server diff --git a/source/common/filter/http/filter_config_discovery_impl.cc b/source/common/filter/http/filter_config_discovery_impl.cc index eccab3fe988d..2084f8e679e7 100644 --- a/source/common/filter/http/filter_config_discovery_impl.cc +++ b/source/common/filter/http/filter_config_discovery_impl.cc @@ -52,11 +52,15 @@ void DynamicFilterConfigProviderImpl::validateConfig( } void DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config, - const std::string&) { - tls_->runOnAllThreads([config](ThreadLocal::ThreadLocalObjectSharedPtr previous) + const std::string&, + Config::ConfigAppliedCb cb) { + tls_->runOnAllThreads([config, cb](ThreadLocal::ThreadLocalObjectSharedPtr previous) -> ThreadLocal::ThreadLocalObjectSharedPtr { auto prev_config = std::dynamic_pointer_cast(previous); prev_config->config_ = config; + if (cb) { + cb(); + } return previous; }); } @@ -126,10 +130,15 @@ void FilterConfigSubscription::onConfigUpdate( Envoy::Http::FilterFactoryCb factory_callback = factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_); ENVOY_LOG(debug, "Updating filter config {}", filter_config_name_); + const auto pending_update = std::make_shared>( + (factory_context_.admin().concurrency() + 1) * filter_config_providers_.size()); for (auto* provider : filter_config_providers_) { - provider->onConfigUpdate(factory_callback, version_info); + provider->onConfigUpdate(factory_callback, version_info, [this, pending_update]() { + if (--(*pending_update) == 0) { + stats_.config_reload_.inc(); + } + }); } - stats_.config_reload_.inc(); last_config_hash_ = new_hash; } diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/http/filter_config_discovery_impl.h index 626832dd8a11..7e1229cd2e7c 100644 --- a/source/common/filter/http/filter_config_discovery_impl.h +++ b/source/common/filter/http/filter_config_discovery_impl.h @@ -41,7 +41,8 @@ class DynamicFilterConfigProviderImpl : public FilterConfigProvider { absl::optional config() override; void validateConfig(const ProtobufWkt::Any& proto_config, Server::Configuration::NamedHttpFilterConfigFactory&) override; - void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&) override; + void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&, + Config::ConfigAppliedCb cb) override; private: struct ThreadLocalConfig : public ThreadLocal::ThreadLocalObject { @@ -146,7 +147,8 @@ class StaticFilterConfigProviderImpl : public FilterConfigProvider { Server::Configuration::NamedHttpFilterConfigFactory&) override { NOT_REACHED_GCOVR_EXCL_LINE; } - void onConfigUpdate(Envoy::Http::FilterFactoryCb, const std::string&) override { + void onConfigUpdate(Envoy::Http::FilterFactoryCb, const std::string&, + Config::ConfigAppliedCb) override { NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index ce274734447a..29078e4f59c5 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -551,7 +551,7 @@ void HttpConnectionManagerConfig::processDynamicFilterConfig( config_discovery.default_config(), context_.messageValidationVisitor(), *default_factory); Http::FilterFactoryCb default_config = default_factory->createFilterFactoryFromProto(*message, stats_prefix_, context_); - filter_config_provider->onConfigUpdate(default_config, ""); + filter_config_provider->onConfigUpdate(default_config, "", nullptr); } filter_factories.push_back(std::move(filter_config_provider)); } diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 093ed76e4156..62bfb2f80acc 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -93,6 +93,7 @@ class AdminImpl : public Admin, Network::Address::InstanceConstSharedPtr address, const Network::Socket::OptionsSharedPtr& socket_options, Stats::ScopePtr&& listener_scope) override; + uint32_t concurrency() const override { return server_.options().concurrency(); } // Network::FilterChainManager const Network::FilterChain* findFilterChain(const Network::ConnectionSocket&) const override { diff --git a/source/server/config_validation/admin.h b/source/server/config_validation/admin.h index 9eabd7f40b9f..14cd04d93713 100644 --- a/source/server/config_validation/admin.h +++ b/source/server/config_validation/admin.h @@ -27,6 +27,7 @@ class ValidationAdmin : public Admin { Http::Code request(absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body) override; void addListenerToHandler(Network::ConnectionHandler* handler) override; + uint32_t concurrency() const override { return 1; } private: ConfigTrackerImpl config_tracker_; diff --git a/test/common/filter/http/filter_config_discovery_impl_test.cc b/test/common/filter/http/filter_config_discovery_impl_test.cc index bd25e662a593..2d7d7d0e00e6 100644 --- a/test/common/filter/http/filter_config_discovery_impl_test.cc +++ b/test/common/filter/http/filter_config_discovery_impl_test.cc @@ -28,6 +28,7 @@ using testing::_; using testing::InSequence; using testing::Invoke; +using testing::Return; using testing::ReturnRef; namespace Envoy { @@ -51,6 +52,8 @@ class FilterConfigDiscoveryTestBase : public testing::Test { ON_CALL(init_manager_, initialize(_)) .WillByDefault(Invoke( [this](const Init::Watcher& watcher) { init_target_handle_->initialize(watcher); })); + // Thread local storage assumes a single (main) thread with no workers. + ON_CALL(factory_context_.admin_, concurrency()).WillByDefault(Return(0)); } Event::SimulatedTimeSystem& timeSystem() { return time_system_; } diff --git a/test/mocks/server/admin.cc b/test/mocks/server/admin.cc index 435c14f6e973..2f873c547633 100644 --- a/test/mocks/server/admin.cc +++ b/test/mocks/server/admin.cc @@ -7,6 +7,7 @@ namespace Envoy { namespace Server { MockAdmin::MockAdmin() { ON_CALL(*this, getConfigTracker()).WillByDefault(testing::ReturnRef(config_tracker_)); + ON_CALL(*this, concurrency()).WillByDefault(testing::Return(1)); } MockAdmin::~MockAdmin() = default; diff --git a/test/mocks/server/admin.h b/test/mocks/server/admin.h index 512e3286bfca..8805ee969709 100644 --- a/test/mocks/server/admin.h +++ b/test/mocks/server/admin.h @@ -31,6 +31,7 @@ class MockAdmin : public Admin { (absl::string_view path_and_query, absl::string_view method, Http::ResponseHeaderMap& response_headers, std::string& body)); MOCK_METHOD(void, addListenerToHandler, (Network::ConnectionHandler * handler)); + MOCK_METHOD(uint32_t, concurrency, (), (const)); ::testing::NiceMock config_tracker_; }; From da93a050509cce6fa38e9b101d2df6bb0327530c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 29 Jul 2020 14:00:09 -0400 Subject: [PATCH 777/909] retry: removing wkn (#12332) Commit Message: removing well known names file for host retry extensions Risk Level: n/a Testing: n/a Docs Changes: n/a Signed-off-by: Alyssa Wilk --- source/extensions/retry/host/BUILD | 17 ----------- .../retry/host/omit_canary_hosts/BUILD | 1 - .../retry/host/omit_canary_hosts/config.h | 5 +--- .../retry/host/omit_host_metadata/BUILD | 1 - .../retry/host/omit_host_metadata/config.h | 5 +--- .../omit_host_metadata/omit_host_metadata.cc | 1 - .../retry/host/previous_hosts/BUILD | 1 - .../retry/host/previous_hosts/config.h | 5 +--- .../extensions/retry/host/well_known_names.h | 28 ------------------- .../host/omit_canary_hosts/config_test.cc | 5 ++-- .../retry/host/omit_host_metadata/BUILD | 1 - .../host/omit_host_metadata/config_test.cc | 3 +- .../retry/host/previous_hosts/config_test.cc | 5 ++-- test/per_file_coverage.sh | 2 -- 14 files changed, 8 insertions(+), 72 deletions(-) delete mode 100644 source/extensions/retry/host/BUILD delete mode 100644 source/extensions/retry/host/well_known_names.h diff --git a/source/extensions/retry/host/BUILD b/source/extensions/retry/host/BUILD deleted file mode 100644 index 06456dbbcb5e..000000000000 --- a/source/extensions/retry/host/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], - deps = [ - "//source/common/singleton:const_singleton", - ], -) diff --git a/source/extensions/retry/host/omit_canary_hosts/BUILD b/source/extensions/retry/host/omit_canary_hosts/BUILD index 1f4f6fed89a7..e8fc9840f156 100644 --- a/source/extensions/retry/host/omit_canary_hosts/BUILD +++ b/source/extensions/retry/host/omit_canary_hosts/BUILD @@ -26,7 +26,6 @@ envoy_cc_extension( ":omit_canary_hosts_predicate_lib", "//include/envoy/registry", "//include/envoy/upstream:retry_interface", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/config/retry/omit_canary_hosts/v2:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/omit_canary_hosts/config.h b/source/extensions/retry/host/omit_canary_hosts/config.h index c34398003b82..d453bc8c8506 100644 --- a/source/extensions/retry/host/omit_canary_hosts/config.h +++ b/source/extensions/retry/host/omit_canary_hosts/config.h @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_canary_hosts/omit_canary_hosts.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -17,9 +16,7 @@ class OmitCanaryHostsRetryPredicateFactory : public Upstream::RetryHostPredicate return std::make_shared(); } - std::string name() const override { - return RetryHostPredicateValues::get().OmitCanaryHostsPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.omit_canary_hosts"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique< diff --git a/source/extensions/retry/host/omit_host_metadata/BUILD b/source/extensions/retry/host/omit_host_metadata/BUILD index f0e4013ecb94..09b01e08848c 100644 --- a/source/extensions/retry/host/omit_host_metadata/BUILD +++ b/source/extensions/retry/host/omit_host_metadata/BUILD @@ -29,7 +29,6 @@ envoy_cc_extension( "//include/envoy/registry", "//include/envoy/upstream:retry_interface", "//source/common/protobuf", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/omit_host_metadata/config.h b/source/extensions/retry/host/omit_host_metadata/config.h index a510a0076cae..85cb0734692d 100644 --- a/source/extensions/retry/host/omit_host_metadata/config.h +++ b/source/extensions/retry/host/omit_host_metadata/config.h @@ -5,7 +5,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -17,9 +16,7 @@ class OmitHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFactor Upstream::RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config, uint32_t retry_count) override; - std::string name() const override { - return RetryHostPredicateValues::get().OmitHostMetadataPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.omit_host_metadata"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return ProtobufTypes::MessagePtr( diff --git a/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc b/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc index 1eb21f52f971..91559571ef2e 100644 --- a/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc +++ b/source/extensions/retry/host/omit_host_metadata/omit_host_metadata.cc @@ -1,7 +1,6 @@ #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" #include "common/config/metadata.h" -#include "common/config/well_known_names.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/retry/host/previous_hosts/BUILD b/source/extensions/retry/host/previous_hosts/BUILD index 7ec06c64c705..ea9aa0d78f4e 100644 --- a/source/extensions/retry/host/previous_hosts/BUILD +++ b/source/extensions/retry/host/previous_hosts/BUILD @@ -26,7 +26,6 @@ envoy_cc_extension( ":previous_hosts_predicate_lib", "//include/envoy/registry", "//include/envoy/upstream:retry_interface", - "//source/extensions/retry/host:well_known_names", "@envoy_api//envoy/config/retry/previous_hosts/v2:pkg_cc_proto", ], ) diff --git a/source/extensions/retry/host/previous_hosts/config.h b/source/extensions/retry/host/previous_hosts/config.h index d01261adb8d1..201290d5be99 100644 --- a/source/extensions/retry/host/previous_hosts/config.h +++ b/source/extensions/retry/host/previous_hosts/config.h @@ -4,7 +4,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/previous_hosts/previous_hosts.h" -#include "extensions/retry/host/well_known_names.h" namespace Envoy { namespace Extensions { @@ -18,9 +17,7 @@ class PreviousHostsRetryPredicateFactory : public Upstream::RetryHostPredicateFa return std::make_shared(retry_count); } - std::string name() const override { - return RetryHostPredicateValues::get().PreviousHostsPredicate; - } + std::string name() const override { return "envoy.retry_host_predicates.previous_hosts"; } ProtobufTypes::MessagePtr createEmptyConfigProto() override { return std::make_unique(); diff --git a/source/extensions/retry/host/well_known_names.h b/source/extensions/retry/host/well_known_names.h deleted file mode 100644 index fc009573c43d..000000000000 --- a/source/extensions/retry/host/well_known_names.h +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include - -#include "common/singleton/const_singleton.h" - -namespace Envoy { -namespace Extensions { -namespace Retry { -namespace Host { - -/** - * Well-known retry host predicate names. - */ -class RetryHostPredicatesNameValues { -public: - // Previous host predicate. Rejects hosts that have already been tried. - const std::string PreviousHostsPredicate = "envoy.retry_host_predicates.previous_hosts"; - const std::string OmitCanaryHostsPredicate = "envoy.retry_host_predicates.omit_canary_hosts"; - const std::string OmitHostMetadataPredicate = "envoy.retry_host_predicates.omit_host_metadata"; -}; - -using RetryHostPredicateValues = ConstSingleton; - -} // namespace Host -} // namespace Retry -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/retry/host/omit_canary_hosts/config_test.cc b/test/extensions/retry/host/omit_canary_hosts/config_test.cc index df042d83e9f5..5ce5870057ee 100644 --- a/test/extensions/retry/host/omit_canary_hosts/config_test.cc +++ b/test/extensions/retry/host/omit_canary_hosts/config_test.cc @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_canary_hosts/config.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -19,7 +18,7 @@ namespace { TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().OmitCanaryHostsPredicate); + "envoy.retry_host_predicates.omit_canary_hosts"); ASSERT_NE(nullptr, factory); @@ -39,7 +38,7 @@ TEST(OmitCanaryHostsRetryPredicateTest, PredicateTest) { TEST(OmitCanaryHostsRetryPredicateTest, EmptyConfig) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().OmitCanaryHostsPredicate); + "envoy.retry_host_predicates.omit_canary_hosts"); ASSERT_NE(nullptr, factory); diff --git a/test/extensions/retry/host/omit_host_metadata/BUILD b/test/extensions/retry/host/omit_host_metadata/BUILD index 37030ee17c44..feb3fbbdeff2 100644 --- a/test/extensions/retry/host/omit_host_metadata/BUILD +++ b/test/extensions/retry/host/omit_host_metadata/BUILD @@ -16,7 +16,6 @@ envoy_extension_cc_test( srcs = ["config_test.cc"], extension_name = "envoy.retry_host_predicates.omit_host_metadata", deps = [ - "//source/extensions/retry/host:well_known_names", "//source/extensions/retry/host/omit_host_metadata:config", "//test/mocks/upstream:upstream_mocks", "@envoy_api//envoy/extensions/retry/host/omit_host_metadata/v3:pkg_cc_proto", diff --git a/test/extensions/retry/host/omit_host_metadata/config_test.cc b/test/extensions/retry/host/omit_host_metadata/config_test.cc index f69394bac9d7..039f9d231eb0 100644 --- a/test/extensions/retry/host/omit_host_metadata/config_test.cc +++ b/test/extensions/retry/host/omit_host_metadata/config_test.cc @@ -3,7 +3,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/omit_host_metadata/omit_host_metadata.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -20,7 +19,7 @@ namespace { TEST(OmitHostsRetryPredicateTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().OmitHostMetadataPredicate); + "envoy.retry_host_predicates.omit_host_metadata"); ASSERT_NE(nullptr, factory); diff --git a/test/extensions/retry/host/previous_hosts/config_test.cc b/test/extensions/retry/host/previous_hosts/config_test.cc index 84ccaad32a69..ed97542fab7b 100644 --- a/test/extensions/retry/host/previous_hosts/config_test.cc +++ b/test/extensions/retry/host/previous_hosts/config_test.cc @@ -2,7 +2,6 @@ #include "envoy/upstream/retry.h" #include "extensions/retry/host/previous_hosts/config.h" -#include "extensions/retry/host/well_known_names.h" #include "test/mocks/upstream/mocks.h" @@ -19,7 +18,7 @@ namespace { TEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().PreviousHostsPredicate); + "envoy.retry_host_predicates.previous_hosts"); ASSERT_NE(nullptr, factory); @@ -50,7 +49,7 @@ TEST(PreviousHostsRetryPredicateConfigTest, PredicateTest) { TEST(PreviousHostsRetryPredicateConfigTest, EmptyConfig) { auto factory = Registry::FactoryRegistry::getFactory( - RetryHostPredicateValues::get().PreviousHostsPredicate); + "envoy.retry_host_predicates.previous_hosts"); ASSERT_NE(nullptr, factory); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index ba62f6adcd92..09452a44bd25 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -54,8 +54,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/health_checkers/redis:95.9" "source/extensions/quic_listeners:84.8" "source/extensions/quic_listeners/quiche:84.8" -"source/extensions/retry:95.5" -"source/extensions/retry/host:85.7" "source/extensions/stat_sinks/statsd:85.2" "source/extensions/tracers/opencensus:91.2" "source/extensions/tracers/xray:95.3" From 6dabdb393e695928f7e6777445d722fe2f625413 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Wed, 29 Jul 2020 14:22:23 -0400 Subject: [PATCH 778/909] zookeeper: convert ASSERT() into check (#12308) A malformed packet or buggy server could trigger the ASSERT(), so let's deal with it. Reported-by: @jianwen612 Signed-off-by: Raul Gutierrez Segales --- .../filters/network/zookeeper_proxy/decoder.cc | 13 ++++++++----- .../filters/network/zookeeper_proxy/filter_test.cc | 12 ++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/source/extensions/filters/network/zookeeper_proxy/decoder.cc b/source/extensions/filters/network/zookeeper_proxy/decoder.cc index b3b40c3ab9d9..56877b8ac82e 100644 --- a/source/extensions/filters/network/zookeeper_proxy/decoder.cc +++ b/source/extensions/filters/network/zookeeper_proxy/decoder.cc @@ -176,15 +176,18 @@ void DecoderImpl::decodeOnWrite(Buffer::Instance& data, uint64_t& offset) { const auto xid = helper_.peekInt32(data, offset); const auto xid_code = static_cast(xid); - // Find the corresponding request for this XID. - const auto it = requests_by_xid_.find(xid); - std::chrono::milliseconds latency; OpCodes opcode; if (xid_code != XidCodes::WatchXid) { - // If this fails, it's a server-side bug. - ASSERT(it != requests_by_xid_.end()); + // Find the corresponding request for this XID. + const auto it = requests_by_xid_.find(xid); + + // If this fails, it's either a server-side bug or a malformed packet. + if (it == requests_by_xid_.end()) { + throw EnvoyException("xid not found"); + } + latency = std::chrono::duration_cast(time_source_.monotonicTime() - it->second.start_time); opcode = it->second.opcode; diff --git a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc index f818f403a0af..3a6eefd8e28b 100644 --- a/test/extensions/filters/network/zookeeper_proxy/filter_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/filter_test.cc @@ -953,6 +953,18 @@ TEST_F(ZooKeeperFilterTest, WatchEvent) { EXPECT_EQ(0UL, config_->stats().decoder_error_.value()); } +TEST_F(ZooKeeperFilterTest, MissingXid) { + initialize(); + + const auto& stat = config_->stats().getdata_resp_; + Buffer::OwnedImpl data = encodeResponseHeader(1000, 2000, 0); + + EXPECT_EQ(Envoy::Network::FilterStatus::Continue, filter_->onWrite(data, false)); + EXPECT_EQ(0UL, stat.value()); + EXPECT_EQ(0UL, config_->stats().response_bytes_.value()); + EXPECT_EQ(1UL, config_->stats().decoder_error_.value()); +} + } // namespace ZooKeeperProxy } // namespace NetworkFilters } // namespace Extensions From 929c03bdb8a5744841df35aa5de2131e1613c543 Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Wed, 29 Jul 2020 14:56:47 -0400 Subject: [PATCH 779/909] fuzz: cap stats utility_fuzz_test iterations (#12296) Commit Message: Limiting the number of a loop iterations in stats/utility_fuzz_test. Additional Description: This avoids test timeout when the input is large. Signed-off-by: Adi Suissa-Peleg --- test/common/stats/utility_fuzz_test.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/common/stats/utility_fuzz_test.cc b/test/common/stats/utility_fuzz_test.cc index 7d34966a7ada..e61257e2d191 100644 --- a/test/common/stats/utility_fuzz_test.cc +++ b/test/common/stats/utility_fuzz_test.cc @@ -12,6 +12,14 @@ namespace Envoy { namespace Fuzz { +namespace { + +// The maximum number of iterations the fuzz test can run until stopped. This is +// to avoid lengthy tests and timeouts. +constexpr size_t MaxIterations = 1000; + +} // namespace + DEFINE_FUZZER(const uint8_t* buf, size_t len) { Stats::Utility::sanitizeStatsName(absl::string_view(reinterpret_cast(buf), len)); @@ -52,8 +60,10 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { Stats::Utility::counterFromStatNames(*scope, {}); Stats::Utility::counterFromElements(*scope, {}); } else { - // add random length string in each loop - while (provider.remaining_bytes() > 3) { + // Run until either running out of strings to process or a maximal number of + // iterations is reached. + for (size_t iter = 0; iter < MaxIterations && provider.remaining_bytes() > 3; iter++) { + // add random length string in each loop if (provider.ConsumeBool()) { absl::string_view str = make_string( provider.ConsumeRandomLengthString(std::min(max_len, provider.remaining_bytes()))); From c694e470dbc9a1dd24b296d117330f0e5becb2c9 Mon Sep 17 00:00:00 2001 From: Nupur Garg <37600866+gargnupur@users.noreply.github.com> Date: Wed, 29 Jul 2020 12:58:38 -0700 Subject: [PATCH 780/909] Add upstream_local_address and upstream_transport_failure_reason fields in cel filter extension (#12316) Commit Message: Add upstream_local_address and upstream_transport_failure_reason fields in cel filter extension Additional Description: Risk Level: Low Testing: Added unit test Docs Changes: Release Notes: Signed-off-by: gargnupur --- .../arch_overview/security/rbac_filter.rst | 2 ++ .../extensions/filters/common/expr/context.cc | 7 +++++++ .../extensions/filters/common/expr/context.h | 2 ++ .../filters/common/expr/context_test.cc | 20 +++++++++++++++++++ 4 files changed, 31 insertions(+) diff --git a/docs/root/intro/arch_overview/security/rbac_filter.rst b/docs/root/intro/arch_overview/security/rbac_filter.rst index abccdbee6dd5..fc98580e4f84 100644 --- a/docs/root/intro/arch_overview/security/rbac_filter.rst +++ b/docs/root/intro/arch_overview/security/rbac_filter.rst @@ -108,6 +108,8 @@ The following attributes are exposed to the language runtime: upstream.dns_san_peer_certificate, string, The first DNS entry in the SAN field of the peer certificate in the upstream TLS connection upstream.uri_san_local_certificate, string, The first URI entry in the SAN field of the local certificate in the upstream TLS connection upstream.uri_san_peer_certificate, string, The first URI entry in the SAN field of the peer certificate in the upstream TLS connection + upstream.local_address, string, The local address of the upstream connection + upstream.transport_failure_reason, string, The upstream transport failure reason e.g. certificate validation failed Most attributes are optional and provide the default value based on the type of the attribute. diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 97420096adcd..17a0bd88a570 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -190,6 +190,13 @@ absl::optional UpstreamWrapper::operator[](CelValue key) const { upstream_host->address()->ip() != nullptr) { return CelValue::CreateInt64(upstream_host->address()->ip()->port()); } + } else if (value == UpstreamLocalAddress) { + auto upstream_local_address = info_.upstreamLocalAddress(); + if (upstream_local_address != nullptr) { + return CelValue::CreateStringView(upstream_local_address->asStringView()); + } + } else if (value == UpstreamTransportFailureReason) { + return CelValue::CreateStringView(info_.upstreamTransportFailureReason()); } auto ssl_info = info_.upstreamSslConnection(); diff --git a/source/extensions/filters/common/expr/context.h b/source/extensions/filters/common/expr/context.h index f3a2aed0cef5..8c06b86ce7cb 100644 --- a/source/extensions/filters/common/expr/context.h +++ b/source/extensions/filters/common/expr/context.h @@ -66,6 +66,8 @@ constexpr absl::string_view Destination = "destination"; // Upstream properties constexpr absl::string_view Upstream = "upstream"; +constexpr absl::string_view UpstreamLocalAddress = "local_address"; +constexpr absl::string_view UpstreamTransportFailureReason = "transport_failure_reason"; class RequestWrapper; diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc index 9ce4c6fcc756..e187a54d7080 100644 --- a/test/extensions/filters/common/expr/context_test.cc +++ b/test/extensions/filters/common/expr/context_test.cc @@ -367,6 +367,8 @@ TEST(Context, ConnectionAttributes) { Network::Utility::parseInternetAddress("10.20.30.40", 456, false); Network::Address::InstanceConstSharedPtr upstream_address = Network::Utility::parseInternetAddress("10.1.2.3", 679, false); + Network::Address::InstanceConstSharedPtr upstream_local_address = + Network::Utility::parseInternetAddress("10.1.2.3", 1000, false); const std::string sni_name = "kittens.com"; EXPECT_CALL(info, downstreamLocalAddress()).WillRepeatedly(ReturnRef(local)); EXPECT_CALL(info, downstreamRemoteAddress()).WillRepeatedly(ReturnRef(remote)); @@ -374,6 +376,10 @@ TEST(Context, ConnectionAttributes) { EXPECT_CALL(info, upstreamSslConnection()).WillRepeatedly(Return(upstream_ssl_info)); EXPECT_CALL(info, upstreamHost()).WillRepeatedly(Return(upstream_host)); EXPECT_CALL(info, requestedServerName()).WillRepeatedly(ReturnRef(sni_name)); + EXPECT_CALL(info, upstreamLocalAddress()).WillRepeatedly(ReturnRef(upstream_local_address)); + const std::string upstream_transport_failure_reason = "ConnectionTermination"; + EXPECT_CALL(info, upstreamTransportFailureReason()) + .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_CALL(*downstream_ssl_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*upstream_host, address()).WillRepeatedly(Return(upstream_address)); @@ -577,6 +583,20 @@ TEST(Context, ConnectionAttributes) { ASSERT_TRUE(value.value().IsString()); EXPECT_EQ(subject_peer, value.value().StringOrDie().value()); } + + { + auto value = upstream[CelValue::CreateStringView(UpstreamLocalAddress)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsString()); + EXPECT_EQ(upstream_local_address->asStringView(), value.value().StringOrDie().value()); + } + + { + auto value = upstream[CelValue::CreateStringView(UpstreamTransportFailureReason)]; + EXPECT_TRUE(value.has_value()); + ASSERT_TRUE(value.value().IsString()); + EXPECT_EQ(upstream_transport_failure_reason, value.value().StringOrDie().value()); + } } } // namespace From 7287fad48f49cae45ce897643622fbf989c3c6c8 Mon Sep 17 00:00:00 2001 From: Christoph Pakulski Date: Wed, 29 Jul 2020 16:24:51 -0400 Subject: [PATCH 781/909] postgres: create metadata based on SQL query (#11620) Create metadata similar to MySQL based on SQL query sent by Postgres client. The metadata may be used by other filters like RBAC. Risk Level: Low. Testing: Added unit tests. Docs Changes: Yes - updated Postgres section. Release Notes: Yes. Fixes #11065 Signed-off-by: Christoph Pakulski --- .../v3alpha/postgres_proxy.proto | 7 ++ .../advanced/well_known_dynamic_metadata.rst | 1 + .../network_filters/postgres_proxy_filter.rst | 32 +++++- docs/root/version_history/current.rst | 1 + .../v3alpha/postgres_proxy.proto | 7 ++ source/extensions/common/sqlutils/sqlutils.cc | 13 ++- source/extensions/common/sqlutils/sqlutils.h | 7 +- .../network/mysql_proxy/mysql_decoder.h | 9 ++ .../network/mysql_proxy/mysql_filter.cc | 5 +- .../filters/network/postgres_proxy/BUILD | 2 + .../filters/network/postgres_proxy/config.cc | 3 +- .../filters/network/postgres_proxy/config.h | 2 +- .../postgres_proxy/postgres_decoder.cc | 56 +++++++-- .../network/postgres_proxy/postgres_decoder.h | 15 +++ .../network/postgres_proxy/postgres_filter.cc | 34 +++++- .../network/postgres_proxy/postgres_filter.h | 9 +- .../filters/network/well_known_names.h | 2 +- .../common/sqlutils/sqlutils_test.cc | 101 +++++++++-------- .../postgres_proxy/postgres_decoder_test.cc | 106 +++++++++++++++++- .../postgres_proxy/postgres_filter_test.cc | 71 +++++++++++- 20 files changed, 409 insertions(+), 74 deletions(-) diff --git a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto index 61f3ec45c883..aa8e0f5941bf 100644 --- a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ b/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.network.postgres_proxy.v3alpha; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,4 +24,9 @@ message PostgresProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Controls whether SQL statements received in Frontend Query messages + // are parsed. Parsing is required to produce Postgres proxy filter + // metadata. Defaults to true. + google.protobuf.BoolValue enable_sql_parsing = 2; } diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index 1eb74e6c920c..0088a85d9b94 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -17,6 +17,7 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`External Authorization Network Filter ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` +* :ref:`Postgres Proxy Filter ` * :ref:`Role Based Access Control (RBAC) Filter ` * :ref:`Role Based Access Control (RBAC) Network Filter ` * :ref:`ZooKeeper Proxy Filter ` diff --git a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst index f2bdf3391467..eb9ffb93c79d 100644 --- a/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/postgres_proxy_filter.rst @@ -4,9 +4,11 @@ Postgres proxy ================ The Postgres proxy filter decodes the wire protocol between a Postgres client (downstream) and a Postgres server -(upstream). The decoded information is currently used only to produce Postgres level statistics like sessions, -statements or transactions executed, among others. This current version does not decode SQL queries. Future versions may -add more statistics and more advanced capabilities. When the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes +(upstream). The decoded information is used to produce Postgres level statistics like sessions, +statements or transactions executed, among others. The Postgres proxy filter parses SQL queries carried in ``Query`` and ``Parse`` messages. +When SQL query has been parsed successfully, the :ref:`metadata ` is created, +which may be used by other filters like :ref:`RBAC `. +When the Postgres filter detects that a session is encrypted, the messages are ignored and no decoding takes place. More information: * Postgres :ref:`architecture overview ` @@ -78,6 +80,8 @@ Every configured Postgres proxy filter has statistics rooted at postgres., string, The resource name in *table.db* format. + [], list, A list of strings representing the operations executed on the resource. Operations can be one of insert/update/select/drop/delete/create/alter/show. + +.. attention:: + + Currently used parser does not successfully parse all SQL statements and it cannot be assumed that all SQL queries will successfully produce Dynamic Metadata. + Creating Dynamic Metadata from SQL queries is on best-effort basis at the moment. If parsing of an SQL query fails, ``statements_parse_error`` counter is increased, log message is created, Dynamic Metadata is not + produced, but the Postgres message is still forwarded to upstream Postgres server. + +Parsing SQL statements and emitting Dynamic Metadata can be disabled by setting :ref:`enable_sql_parsing` to false. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 7adc7a3e0a07..25ff97319359 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -47,6 +47,7 @@ New Features * http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. +* postgres network filter: :ref:`metadata ` is produced based on SQL query. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * router: added new :ref:`envoy-ratelimited` diff --git a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto index 61f3ec45c883..aa8e0f5941bf 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package envoy.extensions.filters.network.postgres_proxy.v3alpha; +import "google/protobuf/wrappers.proto"; + import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -22,4 +24,9 @@ message PostgresProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Controls whether SQL statements received in Frontend Query messages + // are parsed. Parsing is required to produce Postgres proxy filter + // metadata. Defaults to true. + google.protobuf.BoolValue enable_sql_parsing = 2; } diff --git a/source/extensions/common/sqlutils/sqlutils.cc b/source/extensions/common/sqlutils/sqlutils.cc index 023a5529e083..64b5438111be 100644 --- a/source/extensions/common/sqlutils/sqlutils.cc +++ b/source/extensions/common/sqlutils/sqlutils.cc @@ -5,14 +5,23 @@ namespace Extensions { namespace Common { namespace SQLUtils { -bool SQLUtils::setMetadata(const std::string& query, ProtobufWkt::Struct& metadata) { +bool SQLUtils::setMetadata(const std::string& query, const DecoderAttributes& attr, + ProtobufWkt::Struct& metadata) { hsql::SQLParserResult result; + hsql::SQLParser::parse(query, &result); if (!result.isValid()) { return false; } + std::string database; + // Check if the attributes map contains database name. + const auto it = attr.find("database"); + if (it != attr.end()) { + database = absl::StrCat(".", it->second); + } + auto& fields = *metadata.mutable_fields(); for (auto i = 0u; i < result.size(); ++i) { @@ -23,7 +32,7 @@ bool SQLUtils::setMetadata(const std::string& query, ProtobufWkt::Struct& metada // Get names of accessed tables. result.getStatement(i)->tablesAccessed(table_access_map); for (auto& it : table_access_map) { - auto& operations = *fields[it.first].mutable_list_value(); + auto& operations = *fields[it.first + database].mutable_list_value(); // For each table get names of operations performed on that table. for (const auto& ot : it.second) { operations.add_values()->set_string_value(ot); diff --git a/source/extensions/common/sqlutils/sqlutils.h b/source/extensions/common/sqlutils/sqlutils.h index 8519f21836fa..4e0c29131d53 100644 --- a/source/extensions/common/sqlutils/sqlutils.h +++ b/source/extensions/common/sqlutils/sqlutils.h @@ -9,15 +9,20 @@ namespace SQLUtils { class SQLUtils { public: + using DecoderAttributes = std::map; /** * Method parses SQL query string and writes output to metadata. * @param query supplies SQL statement. + * @param attr supplies attributes which cannot be extracted from SQL query but are + * required to create proper metadata. For example database name may be sent + * by a client when it initially connects to the server, not along each SQL query. * @param metadata supplies placeholder where metadata should be written. * @return True if parsing was successful and False if parsing failed. * If True was returned the metadata contains result of parsing. The results are * stored in metadata.mutable_fields. **/ - static bool setMetadata(const std::string& query, ProtobufWkt::Struct& metadata); + static bool setMetadata(const std::string& query, const DecoderAttributes& attr, + ProtobufWkt::Struct& metadata); }; } // namespace SQLUtils diff --git a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h b/source/extensions/filters/network/mysql_proxy/mysql_decoder.h index ff11a613f87b..e5890d1a05ef 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h +++ b/source/extensions/filters/network/mysql_proxy/mysql_decoder.h @@ -6,6 +6,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" #include "extensions/filters/network/mysql_proxy/mysql_codec_command.h" @@ -45,6 +46,14 @@ class Decoder { virtual void onData(Buffer::Instance& data) PURE; virtual MySQLSession& getSession() PURE; + + const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const { + return attributes_; + } + +protected: + // Decoder attributes. + Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_; }; using DecoderPtr = std::unique_ptr; diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc index 648171e786e3..e66701ee8784 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc +++ b/source/extensions/filters/network/mysql_proxy/mysql_filter.cc @@ -6,7 +6,6 @@ #include "common/common/assert.h" #include "common/common/logger.h" -#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/well_known_names.h" namespace Envoy { @@ -108,7 +107,9 @@ void MySQLFilter::onCommand(Command& command) { read_callbacks_->connection().streamInfo().dynamicMetadata(); ProtobufWkt::Struct metadata( (*dynamic_metadata.mutable_filter_metadata())[NetworkFilterNames::get().MySQLProxy]); - auto result = Common::SQLUtils::SQLUtils::setMetadata(command.getData(), metadata); + + auto result = Common::SQLUtils::SQLUtils::setMetadata(command.getData(), + decoder_->getAttributes(), metadata); ENVOY_CONN_LOG(trace, "mysql_proxy: query processed {}", read_callbacks_->connection(), command.getData()); diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/source/extensions/filters/network/postgres_proxy/BUILD index a3c13b7c1633..b2d7d2dcef11 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/source/extensions/filters/network/postgres_proxy/BUILD @@ -33,6 +33,8 @@ envoy_cc_library( "//include/envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/network:filter_lib", + "//source/extensions/common/sqlutils:sqlutils_lib", + "//source/extensions/filters/network:well_known_names", ], ) diff --git a/source/extensions/filters/network/postgres_proxy/config.cc b/source/extensions/filters/network/postgres_proxy/config.cc index 948ccd9f58a0..14180bc201b1 100644 --- a/source/extensions/filters/network/postgres_proxy/config.cc +++ b/source/extensions/filters/network/postgres_proxy/config.cc @@ -15,9 +15,10 @@ NetworkFilters::PostgresProxy::PostgresConfigFactory::createFilterFactoryFromPro ASSERT(!proto_config.stat_prefix().empty()); const std::string stat_prefix = fmt::format("postgres.{}", proto_config.stat_prefix()); + const bool enable_sql = PROTOBUF_GET_WRAPPED_OR_DEFAULT(proto_config, enable_sql_parsing, true); PostgresFilterConfigSharedPtr filter_config( - std::make_shared(stat_prefix, context.scope())); + std::make_shared(stat_prefix, enable_sql, context.scope())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/network/postgres_proxy/config.h b/source/extensions/filters/network/postgres_proxy/config.h index 4c5f1e4a8a50..eada27fed618 100644 --- a/source/extensions/filters/network/postgres_proxy/config.h +++ b/source/extensions/filters/network/postgres_proxy/config.h @@ -19,7 +19,7 @@ class PostgresConfigFactory : public Common::FactoryBase< envoy::extensions::filters::network::postgres_proxy::v3alpha::PostgresProxy> { public: - PostgresConfigFactory() : FactoryBase{NetworkFilterNames::get().Postgres} {} + PostgresConfigFactory() : FactoryBase{NetworkFilterNames::get().PostgresProxy} {} private: Network::FilterFactoryCb createFilterFactoryFromProtoTyped( diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc index d4d3702c33a7..0aae15ce995f 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc @@ -2,6 +2,8 @@ #include +#include "absl/strings/str_split.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -9,7 +11,7 @@ namespace PostgresProxy { void DecoderImpl::initialize() { // Special handler for first message of the transaction. - first_ = MsgProcessor{"Startup", {}}; + first_ = MsgProcessor{"Startup", {&DecoderImpl::onStartup}}; // Frontend messages. FE_messages_.direction_ = "Frontend"; @@ -17,7 +19,7 @@ void DecoderImpl::initialize() { // Setup handlers for known messages. absl::flat_hash_map& FE_known_msgs = FE_messages_.messages_; - // Handler for know messages. + // Handler for known Frontend messages. FE_known_msgs['B'] = MsgProcessor{"Bind", {}}; FE_known_msgs['C'] = MsgProcessor{"Close", {}}; FE_known_msgs['d'] = MsgProcessor{"CopyData", {}}; @@ -29,12 +31,12 @@ void DecoderImpl::initialize() { FE_known_msgs['F'] = MsgProcessor{"FunctionCall", {}}; FE_known_msgs['p'] = MsgProcessor{"PasswordMessage/GSSResponse/SASLInitialResponse/SASLResponse", {}}; - FE_known_msgs['P'] = MsgProcessor{"Parse", {}}; - FE_known_msgs['Q'] = MsgProcessor{"Query", {}}; + FE_known_msgs['P'] = MsgProcessor{"Parse", {&DecoderImpl::onParse}}; + FE_known_msgs['Q'] = MsgProcessor{"Query", {&DecoderImpl::onQuery}}; FE_known_msgs['S'] = MsgProcessor{"Sync", {}}; FE_known_msgs['X'] = MsgProcessor{"Terminate", {&DecoderImpl::decodeFrontendTerminate}}; - // Handler for unknown messages. + // Handler for unknown Frontend messages. FE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; // Backend messages. @@ -43,7 +45,7 @@ void DecoderImpl::initialize() { // Setup handlers for known messages. absl::flat_hash_map& BE_known_msgs = BE_messages_.messages_; - // Handler for know messages. + // Handler for known Backend messages. BE_known_msgs['R'] = MsgProcessor{"Authentication", {&DecoderImpl::decodeAuthentication}}; BE_known_msgs['K'] = MsgProcessor{"BackendKeyData", {}}; BE_known_msgs['2'] = MsgProcessor{"BindComplete", {}}; @@ -68,7 +70,7 @@ void DecoderImpl::initialize() { BE_known_msgs['Z'] = MsgProcessor{"ReadyForQuery", {}}; BE_known_msgs['T'] = MsgProcessor{"RowDescription", {}}; - // Handler for unknown messages. + // Handler for unknown Backend messages. BE_messages_.unknown_ = MsgProcessor{"Other", {&DecoderImpl::incMessagesUnknown}}; // Setup hash map for handling backend statements. @@ -169,6 +171,7 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { // The 1 byte message type and message length should be in the buffer // Check if the entire message has been read. std::string message; + uint32_t length = data.peekBEInt(startup_ ? 0 : 1); if (data.length() < (length + (startup_ ? 0 : 1))) { ENVOY_LOG(trace, "postgres_proxy: cannot parse message. Need {} bytes in buffer", @@ -190,6 +193,7 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { return false; } else { ENVOY_LOG(debug, "Detected version {}.{} of Postgres", code >> 16, code & 0x0000FFFF); + // 4 bytes of length and 4 bytes of version code. } } @@ -324,6 +328,44 @@ void DecoderImpl::decodeBackendErrorResponse() { decodeErrorNotice(BE_errors_); // indicating its meaning. It can be warning, notice, info, debug or log. void DecoderImpl::decodeBackendNoticeResponse() { decodeErrorNotice(BE_notices_); } +// Method parses Parse message of the following format: +// String: The name of the destination prepared statement (an empty string selects the unnamed +// prepared statement). +// +// String: The query string to be parsed. +// +// Int16: The number of parameter data +// types specified (can be zero). Note that this is not an indication of the number of parameters +// that might appear in the query string, only the number that the frontend wants to pre-specify +// types for. Then, for each parameter, there is the following: +// +// Int32: Specifies the object ID of +// the parameter data type. Placing a zero here is equivalent to leaving the type unspecified. +void DecoderImpl::onParse() { + // The first two strings are separated by \0. + // The first string is optional. If no \0 is found it means + // that the message contains query string only. + std::vector query_parts = absl::StrSplit(message_, absl::ByChar('\0')); + callbacks_->processQuery(query_parts[1]); +} + +void DecoderImpl::onQuery() { callbacks_->processQuery(message_); } + +// Method is invoked on clear-text Startup message. +// The message format is continuous string of the following format: +// userdatabaseapplication_nameencoding +void DecoderImpl::onStartup() { + // First 4 bytes of startup message contains version code. + // It is skipped. After that message contains attributes. + attributes_ = absl::StrSplit(message_.substr(4), absl::ByChar('\0'), absl::SkipEmpty()); + + // If "database" attribute is not found, default it to "user" attribute. + if ((attributes_.find("database") == attributes_.end()) && + (attributes_.find("user") != attributes_.end())) { + attributes_["database"] = attributes_["user"]; + } +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h index bd779a2c24ac..24465b55731f 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h @@ -6,6 +6,7 @@ #include "common/buffer/buffer_impl.h" #include "common/common/logger.h" +#include "extensions/common/sqlutils/sqlutils.h" #include "extensions/filters/network/postgres_proxy/postgres_session.h" #include "absl/container/flat_hash_map.h" @@ -39,6 +40,8 @@ class DecoderCallbacks { enum class ErrorType { Error, Fatal, Panic, Unknown }; virtual void incErrors(ErrorType) PURE; + + virtual void processQuery(const std::string&) PURE; }; // Postgres message decoder. @@ -48,6 +51,15 @@ class Decoder { virtual bool onData(Buffer::Instance& data, bool frontend) PURE; virtual PostgresSession& getSession() PURE; + + const Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes& getAttributes() const { + return attributes_; + } + +protected: + // Decoder attributes extracted from Startup message. + // It can be username, database name, client app type, etc. + Extensions::Common::SQLUtils::SQLUtils::DecoderAttributes attributes_; }; using DecoderPtr = std::unique_ptr; @@ -113,6 +125,9 @@ class DecoderImpl : public Decoder, Logger::Loggable { void decodeBackendNoticeResponse(); void decodeFrontendTerminate(); void decodeErrorNotice(MsgParserDict& types); + void onQuery(); + void onParse(); + void onStartup(); void incMessagesUnknown() { callbacks_->incMessagesUnknown(); } void incSessionsEncrypted() { callbacks_->incSessionsEncrypted(); } diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc index c339de5dd47c..f66754c05101 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.cc @@ -4,14 +4,18 @@ #include "envoy/network/connection.h" #include "extensions/filters/network/postgres_proxy/postgres_decoder.h" +#include "extensions/filters/network/well_known_names.h" namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace PostgresProxy { -PostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, Stats::Scope& scope) - : stat_prefix_{stat_prefix}, scope_{scope}, stats_{generateStats(stat_prefix, scope)} {} +PostgresFilterConfig::PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, + Stats::Scope& scope) + : stat_prefix_{stat_prefix}, + enable_sql_parsing_(enable_sql_parsing), scope_{scope}, stats_{generateStats(stat_prefix, + scope)} {} PostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{config} { if (!decoder_) { @@ -21,7 +25,8 @@ PostgresFilter::PostgresFilter(PostgresFilterConfigSharedPtr config) : config_{c // Network::ReadFilter Network::FilterStatus PostgresFilter::onData(Buffer::Instance& data, bool) { - ENVOY_CONN_LOG(trace, "echo: got {} bytes", read_callbacks_->connection(), data.length()); + ENVOY_CONN_LOG(trace, "postgres_proxy: got {} bytes", read_callbacks_->connection(), + data.length()); // Frontend Buffer frontend_buffer_.add(data); @@ -159,6 +164,29 @@ void PostgresFilter::incStatements(StatementType type) { } } +void PostgresFilter::processQuery(const std::string& sql) { + if (config_->enable_sql_parsing_) { + ProtobufWkt::Struct metadata; + + auto result = Common::SQLUtils::SQLUtils::setMetadata(sql, decoder_->getAttributes(), metadata); + + if (!result) { + config_->stats_.statements_parse_error_.inc(); + ENVOY_CONN_LOG(trace, "postgres_proxy: cannot parse SQL: {}", read_callbacks_->connection(), + sql.c_str()); + return; + } + + config_->stats_.statements_parsed_.inc(); + ENVOY_CONN_LOG(trace, "postgres_proxy: query processed {}", read_callbacks_->connection(), + sql.c_str()); + + // Set dynamic metadata + read_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().PostgresProxy, metadata); + } +} + void PostgresFilter::doDecode(Buffer::Instance& data, bool frontend) { // Keep processing data until buffer is empty or decoder says // that it cannot process data in the buffer. diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.h b/source/extensions/filters/network/postgres_proxy/postgres_filter.h index 0355bea4b1f3..5571a0587c40 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_filter.h @@ -40,6 +40,8 @@ namespace PostgresProxy { COUNTER(transactions) \ COUNTER(transactions_commit) \ COUNTER(transactions_rollback) \ + COUNTER(statements_parsed) \ + COUNTER(statements_parse_error) \ COUNTER(notices) \ COUNTER(notices_notice) \ COUNTER(notices_warning) \ @@ -60,9 +62,11 @@ struct PostgresProxyStats { */ class PostgresFilterConfig { public: - PostgresFilterConfig(const std::string& stat_prefix, Stats::Scope& scope); + PostgresFilterConfig(const std::string& stat_prefix, bool enable_sql_parsing, + Stats::Scope& scope); const std::string stat_prefix_; + bool enable_sql_parsing_{true}; Stats::Scope& scope_; PostgresProxyStats stats_; @@ -101,6 +105,7 @@ class PostgresFilter : public Network::Filter, void incTransactions() override; void incTransactionsCommit() override; void incTransactionsRollback() override; + void processQuery(const std::string&) override; void doDecode(Buffer::Instance& data, bool); DecoderPtr createDecoder(DecoderCallbacks* callbacks); @@ -111,6 +116,8 @@ class PostgresFilter : public Network::Filter, uint32_t getFrontendBufLength() const { return frontend_buffer_.length(); } uint32_t getBackendBufLength() const { return backend_buffer_.length(); } const PostgresProxyStats& getStats() const { return config_->stats_; } + Network::Connection& connection() const { return read_callbacks_->connection(); } + const PostgresFilterConfigSharedPtr& getConfig() const { return config_; } private: Network::ReadFilterCallbacks* read_callbacks_{}; diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index bc626950ee4c..78564a5a990f 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -31,7 +31,7 @@ class NetworkFilterNameValues { // MySQL proxy filter const std::string MySQLProxy = "envoy.filters.network.mysql_proxy"; // Postgres proxy filter - const std::string Postgres = "envoy.filters.network.postgres_proxy"; + const std::string PostgresProxy = "envoy.filters.network.postgres_proxy"; // Rate limit filter const std::string RateLimit = "envoy.filters.network.ratelimit"; // Redis proxy filter diff --git a/test/extensions/common/sqlutils/sqlutils_test.cc b/test/extensions/common/sqlutils/sqlutils_test.cc index 74ce5c8dfef8..2ab95360367f 100644 --- a/test/extensions/common/sqlutils/sqlutils_test.cc +++ b/test/extensions/common/sqlutils/sqlutils_test.cc @@ -15,9 +15,11 @@ namespace SQLUtils { // The map is checked only when parsing was successful. Map is indexed by table name and points to // list of operations performed on the table. For example table1: "select", "insert" says that there // was SELECT and INSERT operations on table1. +// DecoderAttributes is a map containing additional attributes which augment creating metadata. class MetadataFromSQLTest : public ::testing::TestWithParam< - std::tuple>>> {}; + std::tuple>, + SQLUtils::DecoderAttributes>> {}; // Test takes SQL query as a parameter and checks if the parsing // produces the correct metadata. @@ -42,7 +44,8 @@ TEST_P(MetadataFromSQLTest, ParsingAndMetadataTest) { ProtobufWkt::Struct metadata; // Check if the parsing result is what expected. - ASSERT_EQ(std::get<1>(GetParam()), SQLUtils::setMetadata(test_query, metadata)); + ASSERT_EQ(std::get<1>(GetParam()), + SQLUtils::setMetadata(test_query, std::get<3>(GetParam()), metadata)); // If parsing was expected to fail do not check parsing values. if (!std::get<1>(GetParam())) { @@ -99,80 +102,86 @@ TEST_P(MetadataFromSQLTest, ParsingAndMetadataTest) { // before comparing. It however requires that all table names in the queries below use lowercase // only. #define TEST_VALUE(...) \ - std::tuple>> { __VA_ARGS__ } + std::tuple>, \ + SQLUtils::DecoderAttributes> { \ + __VA_ARGS__ \ + } INSTANTIATE_TEST_SUITE_P( SQLUtilsTestSuite, MetadataFromSQLTest, ::testing::Values( - TEST_VALUE("blahblah;", false, {}), + TEST_VALUE("blahblah;", false, {}, {}), TEST_VALUE("CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT);", true, - {{"table1", {"create"}}}), + {{"table1", {"create"}}}, {}), TEST_VALUE("CREATE TABLE IF NOT EXISTS `table number 1`(Usr VARCHAR(40),Count INT);", true, - {{"table number 1", {"create"}}}), + {{"table number 1.testdb", {"create"}}}, {{"database", "testdb"}}), TEST_VALUE( "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table1;", - true, {{"table1", {"select", "create"}}}), + true, {{"table1", {"select", "create"}}}, {}), TEST_VALUE( "CREATE TABLE IF NOT EXISTS table1(Usr VARCHAR(40),Count INT); SELECT * from table2;", - true, {{"table1", {"create"}}, {"table2", {"select"}}}), + true, {{"table1", {"create"}}, {"table2", {"select"}}}, {{"user", "testusr"}}), TEST_VALUE("CREATE TABLE table1(Usr VARCHAR(40),Count INT);", true, - {{"table1", {"create"}}}), - TEST_VALUE("CREATE TABLE;", false, {}), + {{"table1", {"create"}}}, {}), + TEST_VALUE("CREATE TABLE;", false, {}, {}), TEST_VALUE("CREATE TEMPORARY table table1(Usr VARCHAR(40),Count INT);", true, - {{"table1", {"create"}}}), - TEST_VALUE("DROP TABLE IF EXISTS table1", true, {{"table1", {"drop"}}}), - TEST_VALUE("ALTER TABLE table1 add column Id varchar (20);", true, {{"table1", {"alter"}}}), + {{"table1", {"create"}}}, {}), + TEST_VALUE("DROP TABLE IF EXISTS table1", true, {{"table1", {"drop"}}}, {}), + TEST_VALUE("ALTER TABLE table1 add column Id varchar (20);", true, {{"table1", {"alter"}}}, + {}), TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, - {{"table1", {"insert"}}}), + {{"table1", {"insert"}}}, {}), TEST_VALUE("INSERT LOW_PRIORITY INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, - {{"table1", {"insert"}}}), + {{"table1", {"insert"}}}, {}), TEST_VALUE("INSERT IGNORE INTO table1 (Usr, Count) VALUES ('allsp2', 3);", true, - {{"table1", {"insert"}}}), + {{"table1", {"insert"}}}, {}), TEST_VALUE("INSERT INTO table1 (Usr, Count) VALUES ('allsp2', 3);SELECT * from table1", - true, {{"table1", {"insert", "select"}}}), - TEST_VALUE("DELETE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), + true, {{"table1", {"insert", "select"}}}, {}), + TEST_VALUE("DELETE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, {}), TEST_VALUE("DELETE LOW_PRIORITY FROM table1 WHERE Count > 3;", true, - {{"table1", {"delete"}}}), - TEST_VALUE("DELETE QUICK FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), - TEST_VALUE("DELETE IGNORE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}), + {{"table1", {"delete"}}}, {}), + TEST_VALUE("DELETE QUICK FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, {}), + TEST_VALUE("DELETE IGNORE FROM table1 WHERE Count > 3;", true, {{"table1", {"delete"}}}, + {}), - TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}), - TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}), + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT * FROM table1 WHERE Count = 1;", true, {{"table1", {"select"}}}, {}), TEST_VALUE("SELECT product.category FROM table1 WHERE Count = 1;", true, - {{"table1", {"select"}}, {"product", {"unknown"}}}), - TEST_VALUE("SELECT DISTINCT Usr FROM table1;", true, {{"table1", {"select"}}}), + {{"table1", {"select"}}, {"product", {"unknown"}}}, {}), + TEST_VALUE("SELECT DISTINCT Usr FROM table1;", true, {{"table1", {"select"}}}, {}), TEST_VALUE("SELECT Usr, Count FROM table1 ORDER BY Count DESC;", true, - {{"table1", {"select"}}}), - TEST_VALUE("SELECT 12 AS a, a FROM table1 GROUP BY a;", true, {{"table1", {"select"}}}), - TEST_VALUE("SELECT;", false, {}), TEST_VALUE("SELECT Usr, Count FROM;", false, {}), + {{"table1.testdb", {"select"}}}, {{"user", "testuser"}, {"database", "testdb"}}), + TEST_VALUE("SELECT 12 AS a, a FROM table1 GROUP BY a;", true, {{"table1", {"select"}}}, {}), + TEST_VALUE("SELECT;", false, {}, {}), TEST_VALUE("SELECT Usr, Count FROM;", false, {}, {}), TEST_VALUE("INSERT INTO table1 SELECT * FROM table2;", true, - {{"table1", {"insert"}}, {"table2", {"select"}}}), + {{"table1", {"insert"}}, {"table2", {"select"}}}, {}), TEST_VALUE("INSERT INTO table1 SELECT tbl_temp1.fld_order_id FROM table2;", true, - {{"tbl_temp1", {"unknown"}}, {"table2", {"select"}}, {"table1", {"insert"}}}), - TEST_VALUE("UPDATE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}), - TEST_VALUE("UPDATE LOW_PRIORITY table1 SET col1 = col1 + 1", true, - {{"table1", {"update"}}}), - TEST_VALUE("UPDATE IGNORE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}), + {{"tbl_temp1", {"unknown"}}, {"table2", {"select"}}, {"table1", {"insert"}}}, + {}), + TEST_VALUE("UPDATE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, {}), + TEST_VALUE("UPDATE LOW_PRIORITY table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, + {}), + TEST_VALUE("UPDATE IGNORE table1 SET col1 = col1 + 1", true, {{"table1", {"update"}}}, {}), TEST_VALUE("UPDATE table1 SET column1=(SELECT * columnX from table2);", true, - {{"table1", {"update"}}, {"table2", {"select"}}}), + {{"table1", {"update"}}, {"table2", {"select"}}}, {}), // operations on database should not create any metadata - TEST_VALUE("CREATE DATABASE testdb;", true, {}), - TEST_VALUE("CREATE DATABASE IF NOT EXISTS testdb;", true, {}), - TEST_VALUE("ALTER DATABASE testdb CHARACTER SET charset_name;", true, {}), - TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET charset_name;", true, {}), - TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET = charset_name;", true, {}), - TEST_VALUE("ALTER SCHEMA testdb default CHARACTER SET = charset_name;", true, {}), + TEST_VALUE("CREATE DATABASE testdb;", true, {}, {}), + TEST_VALUE("CREATE DATABASE IF NOT EXISTS testdb;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb CHARACTER SET charset_name;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET charset_name;", true, {}, {}), + TEST_VALUE("ALTER DATABASE testdb default CHARACTER SET = charset_name;", true, {}, {}), + TEST_VALUE("ALTER SCHEMA testdb default CHARACTER SET = charset_name;", true, {}, {}), // The following DROP DATABASE tests should not produce metadata. - TEST_VALUE("DROP DATABASE testdb;", true, {}), - TEST_VALUE("DROP DATABASE IF EXISTS testdb;", true, {}), + TEST_VALUE("DROP DATABASE testdb;", true, {}, {}), + TEST_VALUE("DROP DATABASE IF EXISTS testdb;", true, {}, {}), // Schema. Should be parsed fine, but should not produce any metadata - TEST_VALUE("SHOW databases;", true, {}), TEST_VALUE("SHOW tables;", true, {}), - TEST_VALUE("SELECT * FROM;", false, {}), - TEST_VALUE("SELECT 1 FROM tabletest1;", true, {{"tabletest1", {"select"}}}) + TEST_VALUE("SHOW databases;", true, {}, {}), TEST_VALUE("SHOW tables;", true, {}, {}), + TEST_VALUE("SELECT * FROM;", false, {}, {}), + TEST_VALUE("SELECT 1 FROM tabletest1;", true, {{"tabletest1", {"select"}}}, {}) )); diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index 0714e5fd9974..7a29494eb8ea 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -23,6 +23,7 @@ class DecoderCallbacksMock : public DecoderCallbacks { MOCK_METHOD(void, incTransactionsRollback, (), (override)); MOCK_METHOD(void, incNotices, (NoticeType), (override)); MOCK_METHOD(void, incErrors, (ErrorType), (override)); + MOCK_METHOD(void, processQuery, (const std::string&), (override)); }; // Define fixture class with decoder and mock callbacks. @@ -75,12 +76,36 @@ class PostgresProxyNoticeTest TEST_F(PostgresProxyDecoderTest, StartupMessage) { decoder_->setStartup(true); - // Start with length. - data_.writeBEInt(12); - // Add 8 bytes of some data. - data_.add(buf_, 8); + buf_[0] = '\0'; + // Startup message has the following structure: + // Length (4 bytes) - payload and length field + // version (4 bytes) + // Attributes: key/value pairs separated by '\0' + data_.writeBEInt(53); + // Add version code + data_.writeBEInt(0x00030000); + // user-postgres key-pair + data_.add("user"); // 4 bytes + data_.add(buf_, 1); + data_.add("postgres"); // 8 bytes + data_.add(buf_, 1); + // database-test-db key-pair + data_.add("database"); // 8 bytes + data_.add(buf_, 1); + data_.add("testdb"); // 6 bytes + data_.add(buf_, 1); + // Some other attribute + data_.add("attribute"); // 9 bytes + data_.add(buf_, 1); + data_.add("blah"); // 4 bytes + data_.add(buf_, 1); decoder_->onData(data_, true); ASSERT_THAT(data_.length(), 0); + // Verify parsing attributes + ASSERT_THAT(decoder_->getAttributes().at("user"), "postgres"); + ASSERT_THAT(decoder_->getAttributes().at("database"), "testdb"); + // This attribute should not be found + ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); // Now feed normal message with 1bytes as command. data_.add("P"); @@ -91,6 +116,40 @@ TEST_F(PostgresProxyDecoderTest, StartupMessage) { ASSERT_THAT(data_.length(), 0); } +// Test verifies that when Startup message does not carry +// "database" attribute, it is derived from "user". +TEST_F(PostgresProxyDecoderTest, StartupMessageNoAttr) { + decoder_->setStartup(true); + + buf_[0] = '\0'; + // Startup message has the following structure: + // Length (4 bytes) - payload and length field + // version (4 bytes) + // Attributes: key/value pairs separated by '\0' + data_.writeBEInt(37); + // Add version code + data_.writeBEInt(0x00030000); + // user-postgres key-pair + data_.add("user"); // 4 bytes + data_.add(buf_, 1); + data_.add("postgres"); // 8 bytes + data_.add(buf_, 1); + // database-test-db key-pair + // Some other attribute + data_.add("attribute"); // 9 bytes + data_.add(buf_, 1); + data_.add("blah"); // 4 bytes + data_.add(buf_, 1); + decoder_->onData(data_, true); + ASSERT_THAT(data_.length(), 0); + + // Verify parsing attributes + ASSERT_THAT(decoder_->getAttributes().at("user"), "postgres"); + ASSERT_THAT(decoder_->getAttributes().at("database"), "postgres"); + // This attribute should not be found + ASSERT_THAT(decoder_->getAttributes().find("no"), decoder_->getAttributes().end()); +} + // Test processing messages which map 1:1 with buffer. // The buffer contains just a single entire message and // nothing more. @@ -181,7 +240,7 @@ TEST_F(PostgresProxyDecoderTest, Unknown) { // Test if each frontend command calls incMessagesFrontend() method. TEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) { EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(1); - createPostgresMsg(data_, GetParam(), "Some message just to create payload"); + createPostgresMsg(data_, GetParam(), "SELECT 1;"); decoder_->onData(data_, true); } @@ -206,6 +265,43 @@ TEST_F(PostgresProxyFrontendDecoderTest, TerminateMessage) { ASSERT_FALSE(decoder_->getSession().inTransaction()); } +// Query message should invoke filter's callback message +TEST_F(PostgresProxyFrontendDecoderTest, QueryMessage) { + EXPECT_CALL(callbacks_, processQuery).Times(1); + createPostgresMsg(data_, "Q", "SELECT * FROM whatever;"); + decoder_->onData(data_, true); +} + +// Parse message has optional Query name which may be in front of actual +// query statement. This test verifies that both formats are processed +// correctly. +TEST_F(PostgresProxyFrontendDecoderTest, ParseMessage) { + std::string query = "SELECT * FROM whatever;"; + std::string query_name, query_params; + + // Should be called twice with the same query. + EXPECT_CALL(callbacks_, processQuery(query)).Times(2); + + // Set params to be zero. + query_params.reserve(2); + query_params += '\0'; + query_params += '\0'; + + // Message without optional query name. + query_name.reserve(1); + query_name += '\0'; + createPostgresMsg(data_, "P", query_name + query + query_params); + decoder_->onData(data_, true); + + // Message with optional name query_name + query_name.clear(); + query_name.reserve(5); + query_name += "P0_8"; + query_name += '\0'; + createPostgresMsg(data_, "P", query_name + query + query_params); + decoder_->onData(data_, true); +} + // Test if each backend command calls incMessagesBackend()) method. TEST_P(PostgresProxyBackendDecoderTest, BackendInc) { EXPECT_CALL(callbacks_, incMessagesBackend()).Times(1); diff --git a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc index c44d32bf94c3..5536189b84fa 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc @@ -4,6 +4,7 @@ #include #include "extensions/filters/network/postgres_proxy/postgres_filter.h" +#include "extensions/filters/network/well_known_names.h" #include "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h" #include "test/mocks/network/mocks.h" @@ -13,6 +14,7 @@ namespace Extensions { namespace NetworkFilters { namespace PostgresProxy { +using testing::ReturnRef; using ::testing::WithArgs; // Decoder mock. @@ -29,17 +31,30 @@ class PostgresFilterTest std::function>> { public: PostgresFilterTest() { - config_ = std::make_shared(stat_prefix_, scope_); + config_ = std::make_shared(stat_prefix_, true, scope_); filter_ = std::make_unique(config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); } + void setMetadata() { + EXPECT_CALL(filter_callbacks_, connection()).WillRepeatedly(ReturnRef(connection_)); + EXPECT_CALL(connection_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().PostgresProxy, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + NetworkFilterNames::get().PostgresProxy, obj)); + })); + } + Stats::IsolatedStoreImpl scope_; std::string stat_prefix_{"test."}; std::unique_ptr filter_; PostgresFilterConfigSharedPtr config_; NiceMock filter_callbacks_; + NiceMock connection_; + NiceMock stream_info_; // These variables are used internally in tests. Buffer::OwnedImpl data_; @@ -238,6 +253,60 @@ TEST_F(PostgresFilterTest, EncryptedSessionStats) { ASSERT_THAT(filter_->getStats().sessions_encrypted_.value(), 1); } +// Test verifies that incorrect SQL statement does not create +// Postgres metadata. +TEST_F(PostgresFilterTest, MetadataIncorrectSQL) { + // Pretend that startup message has been received. + static_cast(filter_->getDecoder())->setStartup(false); + setMetadata(); + + createPostgresMsg(data_, "Q", "BLAH blah blah"); + filter_->onData(data_, false); + + // SQL statement was wrong. No metadata should have been created. + ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains( + NetworkFilterNames::get().PostgresProxy), + false); + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 1); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0); +} + +// Test verifies that Postgres metadata is created for correct SQL statement. +// and it happens only when parse_sql flag is true. +TEST_F(PostgresFilterTest, QueryMessageMetadata) { + // Pretend that startup message has been received. + static_cast(filter_->getDecoder())->setStartup(false); + setMetadata(); + + // Disable creating parsing SQL and creating metadata. + filter_->getConfig()->enable_sql_parsing_ = false; + createPostgresMsg(data_, "Q", "SELECT * FROM whatever"); + filter_->onData(data_, false); + + ASSERT_THAT(filter_->connection().streamInfo().dynamicMetadata().filter_metadata().contains( + NetworkFilterNames::get().PostgresProxy), + false); + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 0); + + // Now enable SQL parsing and creating metadata. + filter_->getConfig()->enable_sql_parsing_ = true; + filter_->onData(data_, false); + + auto& filter_meta = filter_->connection().streamInfo().dynamicMetadata().filter_metadata().at( + NetworkFilterNames::get().PostgresProxy); + auto& fields = filter_meta.fields(); + + ASSERT_THAT(fields.size(), 1); + ASSERT_THAT(fields.contains("whatever"), true); + + const auto& operations = fields.at("whatever").list_value(); + ASSERT_EQ("select", operations.values(0).string_value()); + + ASSERT_THAT(filter_->getStats().statements_parse_error_.value(), 0); + ASSERT_THAT(filter_->getStats().statements_parsed_.value(), 1); +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions From c1d116e71cc2d9830156685d65d5d9736215555a Mon Sep 17 00:00:00 2001 From: Jinhui Song Date: Wed, 29 Jul 2020 16:02:22 -0500 Subject: [PATCH 782/909] logger: implement Fancy Logger with fine-grained log control (#11822) Implement a new logger called Fancy Logger with fine-grained control, e.g. file, function and line level. Additional Description: Some new macros are defined for basic usage but not hooked with any existing log macros. API for the logger is still in progress. Risk Level: Low Testing: unit test & benchmark Docs Changes: None Release Notes: None Signed-off-by: Jinhui Song --- source/common/common/BUILD | 17 +++ source/common/common/fancy_logger.cc | 102 +++++++++++++++++ source/common/common/fancy_logger.h | 116 ++++++++++++++++++++ source/common/common/logger.cc | 21 +++- source/common/common/logger.h | 11 +- test/common/common/BUILD | 16 +++ test/common/common/log_macros_test.cc | 44 ++++++++ test/common/common/logger_speed_test.cc | 139 ++++++++++++++++++++++++ 8 files changed, 464 insertions(+), 2 deletions(-) create mode 100644 source/common/common/fancy_logger.cc create mode 100644 source/common/common/fancy_logger.h create mode 100644 test/common/common/logger_speed_test.cc diff --git a/source/common/common/BUILD b/source/common/common/BUILD index d3b6bc9741ce..b1c7c2d13240 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -151,6 +151,23 @@ envoy_cc_library( }), ) +envoy_cc_library( + name = "fancy_logger_lib", + srcs = ["fancy_logger.cc"], + hdrs = ["fancy_logger.h"], + external_deps = ["abseil_synchronization"], + deps = [ + ":base_logger_lib", + ":lock_guard_lib", + ":macros", + ":minimal_logger_lib", + ":non_copyable", + ] + select({ + "//bazel:android_logger": ["logger_impl_lib_android"], + "//conditions:default": ["logger_impl_lib_standard"], + }), +) + envoy_cc_library( name = "base_logger_lib", srcs = ["base_logger.cc"], diff --git a/source/common/common/fancy_logger.cc b/source/common/common/fancy_logger.cc new file mode 100644 index 000000000000..ef90afeefb98 --- /dev/null +++ b/source/common/common/fancy_logger.cc @@ -0,0 +1,102 @@ +#include "common/common/fancy_logger.h" + +#include +#include + +#include "common/common/logger.h" + +using spdlog::level::level_enum; + +namespace Envoy { + +/** + * Implements a lock from BasicLockable, to avoid dependency problem of thread.h. + */ +class FancyBasicLockable : public Thread::BasicLockable { +public: + // BasicLockable + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() override { mutex_.Lock(); } + bool tryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) override { return mutex_.TryLock(); } + void unlock() ABSL_UNLOCK_FUNCTION() override { mutex_.Unlock(); } + +private: + absl::Mutex mutex_; +}; + +SpdLoggerSharedPtr FancyContext::getFancyLogEntry(std::string key) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::ReaderMutexLock l(&fancy_log_lock_); + return fancy_log_map_->find(key)->second; +} + +void FancyContext::initFancyLogger(std::string key, std::atomic& logger) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::WriterMutexLock l(&fancy_log_lock_); + auto it = fancy_log_map_->find(key); + spdlog::logger* target; + if (it == fancy_log_map_->end()) { + target = createLogger(key); + } else { + target = it->second.get(); + } + logger.store(target); +} + +bool FancyContext::setFancyLogger(std::string key, level_enum log_level) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + absl::ReaderMutexLock l(&fancy_log_lock_); + auto it = fancy_log_map_->find(key); + if (it != fancy_log_map_->end()) { + it->second->set_level(log_level); + return true; + } + return false; +} + +void FancyContext::setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_) { + if (level == Logger::Context::getFancyDefaultLevel() && + format == Logger::Context::getFancyLogFormat()) { + return; + } + absl::ReaderMutexLock l(&fancy_log_lock_); + for (const auto& it : *fancy_log_map_) { + if (it.second->level() == Logger::Context::getFancyDefaultLevel()) { + // if logger is default level now + it.second->set_level(level); + } + it.second->set_pattern(format); + } +} + +void FancyContext::initSink() { + spdlog::sink_ptr sink = Logger::Registry::getSink(); + Logger::DelegatingLogSinkSharedPtr sp = std::static_pointer_cast(sink); + if (!sp->hasLock()) { + static FancyBasicLockable tlock; + sp->setLock(tlock); + sp->setShouldEscape(false); + } +} + +spdlog::logger* FancyContext::createLogger(std::string key, int level) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_) { + SpdLoggerSharedPtr new_logger = + std::make_shared(key, Logger::Registry::getSink()); + if (!Logger::Registry::getSink()->hasLock()) { // occurs in benchmark test + initSink(); + } + level_enum lv = Logger::Context::getFancyDefaultLevel(); + if (level > -1) { + lv = static_cast(level); + } + new_logger->set_level(lv); + new_logger->set_pattern(Logger::Context::getFancyLogFormat()); + new_logger->flush_on(level_enum::critical); + fancy_log_map_->insert(std::make_pair(key, new_logger)); + return new_logger.get(); +} + +FancyContext& getFancyContext() { MUTABLE_CONSTRUCT_ON_FIRST_USE(FancyContext); } + +} // namespace Envoy diff --git a/source/common/common/fancy_logger.h b/source/common/common/fancy_logger.h new file mode 100644 index 000000000000..dee92922d029 --- /dev/null +++ b/source/common/common/fancy_logger.h @@ -0,0 +1,116 @@ +#pragma once + +#include + +#include "common/common/macros.h" + +#include "absl/container/flat_hash_map.h" +#include "absl/synchronization/mutex.h" +#include "spdlog/spdlog.h" + +namespace Envoy { + +using SpdLoggerSharedPtr = std::shared_ptr; +using FancyMap = absl::flat_hash_map; +using FancyMapPtr = std::shared_ptr; + +/** + * Stores the lock and functions used by Fancy Logger's macro so that we don't need to declare + * them globally. Functions are provided to initialize a logger, set log level, flush a logger. + */ +class FancyContext { +public: + /** + * Gets a logger from map given a key (e.g. file name). + */ + SpdLoggerSharedPtr getFancyLogEntry(std::string key) ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Initializes Fancy Logger and register it in global map if not done. + */ + void initFancyLogger(std::string key, std::atomic& logger) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Sets log level. If not found, return false. + */ + bool setFancyLogger(std::string key, spdlog::level::level_enum log_level) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + + /** + * Sets the default logger level and format when updating context. + */ + void setDefaultFancyLevelFormat(spdlog::level::level_enum level, std::string format) + ABSL_LOCKS_EXCLUDED(fancy_log_lock_); + +private: + /** + * Initializes sink for the initialization of loggers, needed only in benchmark test. + */ + void initSink(); + + /** + * Creates a logger given key and log level, and add it to map. + * Key is the log component name, e.g. file name now. + */ + spdlog::logger* createLogger(std::string key, int level = -1) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(fancy_log_lock_); + + /** + * Lock for the following map (not for the corresponding loggers). + */ + absl::Mutex fancy_log_lock_; + + /** + * Map that stores pairs, key can be the file name. + */ + FancyMapPtr fancy_log_map_ ABSL_GUARDED_BY(fancy_log_lock_) = std::make_shared(); +}; + +FancyContext& getFancyContext(); + +#define FANCY_KEY std::string(__FILE__) + +/** + * Macro for fancy logger. + * Uses a global map to store logger and take use of thread-safe spdlog::logger. + * The local pointer is used to avoid another load() when logging. Here we use + * spdlog::logger* as atomic is a C++20 feature. + */ +#define FANCY_LOG(LEVEL, ...) \ + do { \ + static std::atomic flogger{0}; \ + spdlog::logger* local_flogger = flogger.load(std::memory_order_relaxed); \ + if (!local_flogger) { \ + getFancyContext().initFancyLogger(FANCY_KEY, flogger); \ + local_flogger = flogger.load(std::memory_order_relaxed); \ + } \ + local_flogger->log(spdlog::source_loc{__FILE__, __LINE__, __func__}, \ + ENVOY_SPDLOG_LEVEL(LEVEL), __VA_ARGS__); \ + } while (0) + +/** + * Convenient macro for connection log. + */ +#define FANCY_CONN_LOG(LEVEL, FORMAT, CONNECTION, ...) \ + FANCY_LOG(LEVEL, "[C{}] " FORMAT, (CONNECTION).id(), ##__VA_ARGS__) + +/** + * Convenient macro for stream log. + */ +#define FANCY_STREAM_LOG(LEVEL, FORMAT, STREAM, ...) \ + FANCY_LOG(LEVEL, "[C{}][S{}] " FORMAT, (STREAM).connection() ? (STREAM).connection()->id() : 0, \ + (STREAM).streamId(), ##__VA_ARGS__) + +/** + * Convenient macro for log flush. + */ +#define FANCY_FLUSH_LOG() \ + do { \ + SpdLoggerSharedPtr p = getFancyContext().getFancyLogEntry(FANCY_KEY); \ + if (p) { \ + p->flush(); \ + } \ + } while (0) + +} // namespace Envoy diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index 9d84904b8d93..530484876202 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -126,11 +126,30 @@ Context::~Context() { } } -void Context::activate() { +void Context::activate(LoggerMode mode) { Registry::getSink()->setLock(lock_); Registry::getSink()->setShouldEscape(should_escape_); Registry::setLogLevel(log_level_); Registry::setLogFormat(log_format_); + + if (mode == LoggerMode::Fancy) { + fancy_default_level_ = log_level_; + fancy_log_format_ = log_format_; + } +} + +std::string Context::getFancyLogFormat() { + if (!current_context) { // Context is not instantiated in benchmark test + return "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; + } + return current_context->fancy_log_format_; +} + +spdlog::level::level_enum Context::getFancyDefaultLevel() { + if (!current_context) { + return spdlog::level::info; + } + return current_context->fancy_default_level_; } std::vector& Registry::allLoggers() { diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 6fb9c5719096..5c2ed08f497c 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -13,6 +13,7 @@ #include "common/common/macros.h" #include "common/common/non_copyable.h" +#include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "fmt/ostream.h" @@ -212,6 +213,8 @@ class DelegatingLogSink : public spdlog::sinks::sink { bool should_escape_{false}; }; +enum class LoggerMode { Envoy, Fancy }; + /** * Defines a scope for the logging system with the specified lock and log level. * This is equivalent to setLogLevel, setLogFormat, and setLock, which can be @@ -229,14 +232,20 @@ class Context { Thread::BasicLockable& lock, bool should_escape); ~Context(); + static std::string getFancyLogFormat(); + static spdlog::level::level_enum getFancyDefaultLevel(); + private: - void activate(); + void activate(LoggerMode mode = LoggerMode::Envoy); const spdlog::level::level_enum log_level_; const std::string log_format_; Thread::BasicLockable& lock_; bool should_escape_; Context* const save_context_; + + std::string fancy_log_format_ = "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; + spdlog::level::level_enum fancy_default_level_ = spdlog::level::info; }; /** diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 8c4ca910649b..dc9a1a334145 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -106,6 +106,7 @@ envoy_cc_test( name = "log_macros_test", srcs = ["log_macros_test.cc"], deps = [ + "//source/common/common:fancy_logger_lib", "//source/common/common:minimal_logger_lib", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", @@ -114,6 +115,21 @@ envoy_cc_test( ], ) +envoy_cc_benchmark_binary( + name = "logger_speed_test", + srcs = ["logger_speed_test.cc"], + external_deps = ["benchmark"], + deps = [ + "//source/common/common:fancy_logger_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_benchmark_test( + name = "logger_speed_test_benchmark_test", + benchmark_binary = "logger_speed_test", +) + envoy_cc_test( name = "logger_test", srcs = ["logger_test.cc"], diff --git a/test/common/common/log_macros_test.cc b/test/common/common/log_macros_test.cc index 9de22d83b26c..c19fbebfde01 100644 --- a/test/common/common/log_macros_test.cc +++ b/test/common/common/log_macros_test.cc @@ -1,6 +1,7 @@ #include #include +#include "common/common/fancy_logger.h" #include "common/common/logger.h" #include "test/mocks/http/mocks.h" @@ -48,6 +49,7 @@ TEST(Logger, evaluateParams) { // Log message with higher severity and make sure that params were evaluated. GET_MISC_LOGGER().set_level(spdlog::level::info); ENVOY_LOG_MISC(warn, "test message '{}'", i++); + EXPECT_THAT(i, testing::Eq(2)); } @@ -137,4 +139,46 @@ TEST_F(FormatTest, OutputEscaped) { EXPECT_LOG_CONTAINS_ALL_OF_ESCAPED(message, logMessageEscapeSequences()); } +/** + * Test for Fancy Logger convenient macros. + */ +TEST(Fancy, Global) { + FANCY_LOG(info, "Hello world! Here's a line of fancy log!"); + FANCY_LOG(error, "Fancy Error! Here's the second message!"); + + NiceMock connection_; + NiceMock stream_; + FANCY_CONN_LOG(warn, "Fake info {} of connection", connection_, 1); + FANCY_STREAM_LOG(warn, "Fake warning {} of stream", stream_, 1); + + FANCY_LOG(critical, "Critical message for later flush."); + FANCY_FLUSH_LOG(); +} + +TEST(Fancy, SetLevel) { + const char* file = "P=NP_file"; + getFancyContext().setFancyLogger(file, spdlog::level::trace); + + getFancyContext().setFancyLogger(__FILE__, spdlog::level::err); + FANCY_LOG(error, "Fancy Error! Here's a test for level."); + FANCY_LOG(warn, "Warning: you shouldn't see this message!"); +} + +TEST(Fancy, Default) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::info); // revert to default + std::string fmt = "[%t][%l][%n] %v"; + getFancyContext().setDefaultFancyLevelFormat(spdlog::level::warn, fmt); + FANCY_LOG(info, "Info: you shouldn't see this message!"); + FANCY_LOG(warn, "Warning: warning at default log level!"); + EXPECT_EQ(Logger::Context::getFancyLogFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] %v"); + EXPECT_EQ(Logger::Context::getFancyDefaultLevel(), spdlog::level::info); +} + +TEST(Fancy, FastPath) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::info); + for (int i = 0; i < 10; i++) { + FANCY_LOG(warn, "Fake warning No. {}", i); + } +} + } // namespace Envoy diff --git a/test/common/common/logger_speed_test.cc b/test/common/common/logger_speed_test.cc new file mode 100644 index 000000000000..55f4275db251 --- /dev/null +++ b/test/common/common/logger_speed_test.cc @@ -0,0 +1,139 @@ +#include +#include + +#include "common/common/fancy_logger.h" +#include "common/common/logger.h" + +#include "benchmark/benchmark.h" + +namespace Envoy { + +/** + * Benchmark for the main slow path, i.e. new logger creation here. + */ +static void fancySlowPath(benchmark::State& state) { + FANCY_LOG(info, "Slow path test begins."); + std::atomic logger; + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + std::string key = "k" + std::to_string(i + (state.thread_index << 8)); + getFancyContext().initFancyLogger(key, logger); + } + } +} + +#define FL FANCY_LOG(trace, "Default") +#define FL_8 \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; \ + FL; +#define FL_64 \ + { FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 FL_8 } +#define FL_512 \ + { FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 FL_64 } +#define FL_1024 \ + { FL_512 FL_512 } + +/** + * Benchmark for medium path, i.e. new site initialization within the same file. + */ +static void fancyMediumPath(benchmark::State& state) { + FANCY_LOG(info, "Medium path test begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + // create different call sites for medium path + for (int i = 0; i < state.range(0); i++) { + FL_1024 + } + } +} + +/** + * Benchmark for fast path, i.e. integration test of common scenario. + */ +static void fancyFastPath(benchmark::State& state) { + // control log length to be the same as normal Envoy below + std::string msg(100 - strlen(__FILE__) + 4, '.'); + spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info; + getFancyContext().setFancyLogger(FANCY_KEY, lv); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + FANCY_LOG(trace, "Fast path: {}", msg); + } + } +} + +/** + * Benchmark for ENVOY_LOG to compare. + */ +static void envoyNormal(benchmark::State& state) { + spdlog::level::level_enum lv = state.range(1) ? spdlog::level::trace : spdlog::level::info; + std::string msg(100, '.'); + GET_MISC_LOGGER().set_level(lv); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + ENVOY_LOG_MISC(trace, "Fast path: {}", msg); + } + } +} + +/** + * Benchmark for a large number of level setting. + */ +static void fancyLevelSetting(benchmark::State& state) { + FANCY_LOG(info, "Level setting test begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + getFancyContext().setFancyLogger(__FILE__, spdlog::level::warn); + } + } +} + +/** + * Comparison with Envoy's level setting. + */ +static void envoyLevelSetting(benchmark::State& state) { + ENVOY_LOG_MISC(info, "Envoy's level setting begins."); + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + for (int i = 0; i < state.range(0); i++) { + GET_MISC_LOGGER().set_level(spdlog::level::warn); + } + } +} + +/** + * Benchmarks in detail starts. + */ +BENCHMARK(fancySlowPath)->Arg(1 << 10); +BENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancySlowPath)->Arg(1 << 10)->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(fancyMediumPath)->Arg(1)->Iterations(1); +// Seems medium path's concurrency test doesn't make sense (hard to do as well) + +BENCHMARK(fancyFastPath)->Args({1024, 0})->Args({1024, 1}); // First no actual log, then log +BENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime(); +BENCHMARK(fancyFastPath)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(envoyNormal)->Args({1024, 0})->Args({1024, 1}); +BENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(20)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 0})->Threads(200)->MeasureProcessCPUTime(); +BENCHMARK(envoyNormal)->Args({1 << 10, 1})->Threads(200)->MeasureProcessCPUTime(); + +BENCHMARK(fancyLevelSetting)->Arg(1 << 10); +BENCHMARK(envoyLevelSetting)->Arg(1 << 10); + +} // namespace Envoy From 7f6a716a4e552e02ba7b5a6167a27fb2c292c07f Mon Sep 17 00:00:00 2001 From: John Plevyak Date: Wed, 29 Jul 2020 15:21:08 -0700 Subject: [PATCH 783/909] Wasm upstreaming: required bazel/* support. (#12116) Merge in changes from envoyproxy/envoy-wasm in the bazel/ directory as part of upstreaming Wasm. Risk Level: Low Testing: Unit tests pass. Docs Changes: N/A Release Notes: N/A Signed-off-by: John Plevyak --- bazel/antlr.patch | 26 +++++++ bazel/dependency_imports.bzl | 2 + bazel/external/proxy_wasm_cpp_host.BUILD | 37 ++++++++++ bazel/external/wee8.genrule_cmd | 32 +++++++-- bazel/repositories.bzl | 38 +++++++++++ bazel/repository_locations.bzl | 43 ++++++++++++ bazel/wasm/BUILD | 1 + bazel/wasm/wasm.bzl | 67 +++++++++++++++++++ .../extensions/common/wasm/test_data/Makefile | 5 ++ test/extensions/common/wasm/wasm_vm_test.cc | 35 ++++++++++ 10 files changed, 280 insertions(+), 6 deletions(-) create mode 100644 bazel/antlr.patch create mode 100644 bazel/external/proxy_wasm_cpp_host.BUILD create mode 100644 bazel/wasm/BUILD create mode 100644 bazel/wasm/wasm.bzl create mode 100644 test/extensions/common/wasm/test_data/Makefile diff --git a/bazel/antlr.patch b/bazel/antlr.patch new file mode 100644 index 000000000000..ad0efbc8642e --- /dev/null +++ b/bazel/antlr.patch @@ -0,0 +1,26 @@ +diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +index c6cceda13..e86533759 100755 +--- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp ++++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +@@ -104,7 +104,7 @@ void deserializeSets( + + } + +-ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) { ++ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions()) { + } + + ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso): deserializationOptions(dso) { +diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +index 827c3d59f..62914cf55 100755 +--- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp ++++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +@@ -69,7 +69,7 @@ void LexerATNSimulator::copyState(LexerATNSimulator *simulator) { + } + + size_t LexerATNSimulator::match(CharStream *input, size_t mode) { +- match_calls++; ++ // match_calls++; + _mode = mode; + ssize_t mark = input->mark(); + diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 38be774dccd5..a0a0110f4735 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -7,6 +7,7 @@ load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependenci load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install") load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") +load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") # go version for rules_go GO_VERSION = "1.14.4" @@ -19,6 +20,7 @@ def envoy_dependency_imports(go_version = GO_VERSION): gazelle_dependencies() apple_rules_dependencies() upb_bazel_version_repository(name = "upb_bazel_version") + antlr_dependencies(471) custom_exec_properties( name = "envoy_large_machine_exec_property", diff --git a/bazel/external/proxy_wasm_cpp_host.BUILD b/bazel/external/proxy_wasm_cpp_host.BUILD new file mode 100644 index 000000000000..4cb87cf98ec1 --- /dev/null +++ b/bazel/external/proxy_wasm_cpp_host.BUILD @@ -0,0 +1,37 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +licenses(["notice"]) # Apache 2 + +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "include", + hdrs = glob(["include/proxy-wasm/**/*.h"]), + deps = [ + "@proxy_wasm_cpp_sdk//:common_lib", + ], +) + +cc_library( + name = "lib", + srcs = glob( + [ + "src/**/*.h", + "src/**/*.cc", + ], + exclude = ["src/**/wavm*"], + ), + copts = ["-std=c++14"], + deps = [ + ":include", + "//external:abseil_flat_hash_map", + "//external:abseil_optional", + "//external:abseil_strings", + "//external:protobuf", + "//external:ssl", + "//external:wee8", + "//external:zlib", + "@proxy_wasm_cpp_sdk//:api_lib", + "@proxy_wasm_cpp_sdk//:common_lib", + ], +) diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index 8886462edbe9..8cb0e24c5f49 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -51,8 +51,17 @@ if [[ $${ENVOY_TSAN-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_tsan=true" fi -# Release build. -WEE8_BUILD_ARGS+=" is_debug=false" +# Debug/release build. +if [[ $(COMPILATION_MODE) == "dbg" && $${ENVOY_UBSAN_VPTR-} != "1" && $${ENVOY_MSAN-} != "1" && $${ENVOY_TSAN-} != "1" ]]; then + WEE8_BUILD_ARGS+=" is_debug=true" + WEE8_BUILD_ARGS+=" v8_symbol_level=2" + WEE8_BUILD_ARGS+=" v8_optimized_debug=false" +else + WEE8_BUILD_ARGS+=" is_debug=false" + WEE8_BUILD_ARGS+=" v8_symbol_level=1" + WEE8_BUILD_ARGS+=" v8_enable_handle_zapping=false" +fi + # Clang or not Clang, that is the question. WEE8_BUILD_ARGS+=" is_clang=$$IS_CLANG" # Hack to disable bleeding-edge compiler flags. @@ -81,17 +90,28 @@ if [[ `uname -m` == "aarch64" ]]; then fi # Build wee8. -if [[ "$$(uname -s)" == "Darwin" ]]; then +if [[ -f /etc/centos-release ]] && [[ $$(cat /etc/centos-release) =~ "CentOS Linux release 7" ]] && [[ -x "$$(command -v gn)" ]]; then + # Using system default gn tools + # This is done only for CentOS 7, as it has an old version of GLIBC which is otherwise incompatible + gn=$$(command -v gn) +elif [[ "$$(uname -s)" == "Darwin" ]]; then gn=buildtools/mac/gn - ninja=third_party/depot_tools/ninja elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then gn=buildtools/linux64/gn - ninja=third_party/depot_tools/ninja else - # Using system default ninja & gn tools + # Using system default gn tools gn=$$(command -v gn) +fi + +if [[ "$$(uname -s)" == "Darwin" ]]; then + ninja=third_party/depot_tools/ninja +elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then + ninja=third_party/depot_tools/ninja +else + # Using system default ninja tools ninja=$$(command -v ninja) fi + "$$gn" gen out/wee8 --args="$$WEE8_BUILD_ARGS" "$$ninja" -C out/wee8 wee8 diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index cd63791ed747..bf1827cf8c66 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -191,8 +191,12 @@ def envoy_dependencies(skip_targets = []): _io_opentracing_cpp() _net_zlib() _upb() + _proxy_wasm_cpp_sdk() + _proxy_wasm_cpp_host() + _emscripten_toolchain() _repository_impl("com_googlesource_code_re2") _com_google_cel_cpp() + _repository_impl("com_github_google_flatbuffers") _repository_impl("bazel_toolchains") _repository_impl("bazel_compdb") _repository_impl("envoy_build_tools") @@ -379,6 +383,24 @@ def _net_zlib(): def _com_google_cel_cpp(): _repository_impl("com_google_cel_cpp") + _repository_impl("rules_antlr") + location = _get_location("antlr4_runtimes") + http_archive( + name = "antlr4_runtimes", + build_file_content = """ +package(default_visibility = ["//visibility:public"]) +cc_library( + name = "cpp", + srcs = glob(["runtime/Cpp/runtime/src/**/*.cpp"]), + hdrs = glob(["runtime/Cpp/runtime/src/**/*.h"]), + includes = ["runtime/Cpp/runtime/src"], +) +""", + patch_args = ["-p1"], + # Patches ASAN violation of initialization fiasco + patches = ["@envoy//bazel:antlr.patch"], + **location + ) def _com_github_nghttp2_nghttp2(): location = _get_location("com_github_nghttp2_nghttp2") @@ -770,6 +792,22 @@ def _upb(): actual = "@upb//:upb", ) +def _proxy_wasm_cpp_sdk(): + _repository_impl(name = "proxy_wasm_cpp_sdk") + +def _proxy_wasm_cpp_host(): + _repository_impl( + name = "proxy_wasm_cpp_host", + build_file = "@envoy//bazel/external:proxy_wasm_cpp_host.BUILD", + ) + +def _emscripten_toolchain(): + _repository_impl( + name = "emscripten_toolchain", + build_file_content = BUILD_ALL_CONTENT, + patch_cmds = REPOSITORY_LOCATIONS["emscripten_toolchain"]["patch_cmds"], + ) + def _com_github_google_jwt_verify(): _repository_impl("com_github_google_jwt_verify") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 7e1f4d8c6141..bc1df883c414 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -429,6 +429,13 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["dataplane"], cpe = "N/A", ), + com_github_google_flatbuffers = dict( + sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", + strip_prefix = "flatbuffers-a83caf5910644ba1c421c002ef68e42f21c15f9f", + urls = ["https://github.com/google/flatbuffers/archive/a83caf5910644ba1c421c002ef68e42f21c15f9f.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), com_googlesource_code_re2 = dict( sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", strip_prefix = "re2-2020-07-06", @@ -481,4 +488,40 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["dataplane"], cpe = "cpe:2.3:a:icu-project:international_components_for_unicode", ), + proxy_wasm_cpp_sdk = dict( + sha256 = "7d9e1f2e299215ed3e5fa8c8149740872b1100cfe3230fc639f967d9dcfd812e", + strip_prefix = "proxy-wasm-cpp-sdk-5cec30b448975e1fd3f4117311f0957309df5cb0", + urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/5cec30b448975e1fd3f4117311f0957309df5cb0.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), + proxy_wasm_cpp_host = dict( + sha256 = "494d3f81156b92bac640c26000497fbf3a7b1bc35f9789594280450c6e5d8129", + strip_prefix = "proxy-wasm-cpp-host-928db4d79ec7b90aea3ad13ea5df36dc60c9c31d", + urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/928db4d79ec7b90aea3ad13ea5df36dc60c9c31d.tar.gz"], + use_category = ["dataplane"], + cpe = "N/A", + ), + emscripten_toolchain = dict( + sha256 = "2bdbee6947e32ad1e03cd075b48fda493ab16157b2b0225b445222cd528e1843", + patch_cmds = [ + "./emsdk install 1.39.19-upstream", + "./emsdk activate --embedded 1.39.19-upstream", + ], + strip_prefix = "emsdk-dec8a63594753fe5f4ad3b47850bf64d66c14a4e", + urls = ["https://github.com/emscripten-core/emsdk/archive/dec8a63594753fe5f4ad3b47850bf64d66c14a4e.tar.gz"], + use_category = ["build"], + ), + rules_antlr = dict( + sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", + strip_prefix = "rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a", + urls = ["https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz"], + use_category = ["build"], + ), + antlr4_runtimes = dict( + sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574", + strip_prefix = "antlr4-4.7.1", + urls = ["https://github.com/antlr/antlr4/archive/4.7.1.tar.gz"], + use_category = ["build"], + ), ) diff --git a/bazel/wasm/BUILD b/bazel/wasm/BUILD new file mode 100644 index 000000000000..779d1695d3b7 --- /dev/null +++ b/bazel/wasm/BUILD @@ -0,0 +1 @@ +licenses(["notice"]) # Apache 2 diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl new file mode 100644 index 000000000000..65fefcb49e90 --- /dev/null +++ b/bazel/wasm/wasm.bzl @@ -0,0 +1,67 @@ +load("@rules_cc//cc:defs.bzl", "cc_binary") + +def _wasm_transition_impl(settings, attr): + return { + "//command_line_option:cpu": "wasm32", + "//command_line_option:crosstool_top": "@proxy_wasm_cpp_sdk//toolchain:emscripten", + + # Overriding copt/cxxopt/linkopt to prevent sanitizers/coverage options leak + # into WASM build configuration + "//command_line_option:copt": [], + "//command_line_option:cxxopt": [], + "//command_line_option:linkopt": [], + "//command_line_option:collect_code_coverage": "false", + } + +wasm_transition = transition( + implementation = _wasm_transition_impl, + inputs = [], + outputs = [ + "//command_line_option:cpu", + "//command_line_option:crosstool_top", + "//command_line_option:copt", + "//command_line_option:cxxopt", + "//command_line_option:linkopt", + "//command_line_option:collect_code_coverage", + ], +) + +def _wasm_binary_impl(ctx): + out = ctx.actions.declare_file(ctx.label.name) + ctx.actions.run_shell( + command = 'cp "{}" "{}"'.format(ctx.files.binary[0].path, out.path), + outputs = [out], + inputs = ctx.files.binary, + ) + + return [DefaultInfo(runfiles = ctx.runfiles([out]))] + +# WASM binary rule implementation. +# This copies the binary specified in binary attribute in WASM configuration to +# target configuration, so a binary in non-WASM configuration can depend on them. +wasm_binary = rule( + implementation = _wasm_binary_impl, + attrs = { + "binary": attr.label(mandatory = True, cfg = wasm_transition), + "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"), + }, +) + +def wasm_cc_binary(name, **kwargs): + wasm_name = "_wasm_" + name + kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib"]) + kwargs.setdefault("linkopts", ["--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js"]) + kwargs.setdefault("visibility", ["//visibility:public"]) + cc_binary( + name = wasm_name, + # Adding manual tag it won't be built in non-WASM (e.g. x86_64 config) + # when an wildcard is specified, but it will be built in WASM configuration + # when the wasm_binary below is built. + tags = ["manual"], + **kwargs + ) + + wasm_binary( + name = name, + binary = ":" + wasm_name, + ) diff --git a/test/extensions/common/wasm/test_data/Makefile b/test/extensions/common/wasm/test_data/Makefile new file mode 100644 index 000000000000..03707a7f42d5 --- /dev/null +++ b/test/extensions/common/wasm/test_data/Makefile @@ -0,0 +1,5 @@ +all: test_rust.wasm + +test_rust.wasm: test_rust.rs + rustc -C lto -C opt-level=3 -C panic=abort -C link-arg=-S -C link-arg=-zstack-size=32768 --crate-type cdylib --target wasm32-unknown-unknown test_rust.rs + ../../../../../bazel-bin/test/tools/wee8_compile/wee8_compile_tool test_rust.wasm test_rust.wasm diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index a628aa43baed..b07b684a0ba4 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -150,6 +150,13 @@ TEST_P(WasmVmTest, V8BadCode) { } TEST_P(WasmVmTest, V8Code) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); EXPECT_TRUE(wasm_vm->runtime() == "envoy.wasm.runtime.v8"); @@ -170,6 +177,13 @@ TEST_P(WasmVmTest, V8Code) { } TEST_P(WasmVmTest, V8BadHostFunctions) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -198,6 +212,13 @@ TEST_P(WasmVmTest, V8BadHostFunctions) { } TEST_P(WasmVmTest, V8BadModuleFunctions) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -226,6 +247,13 @@ TEST_P(WasmVmTest, V8BadModuleFunctions) { } TEST_P(WasmVmTest, V8FunctionCalls) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); @@ -264,6 +292,13 @@ TEST_P(WasmVmTest, V8FunctionCalls) { } TEST_P(WasmVmTest, V8Memory) { +#ifndef NDEBUG + // Do not execute pre-compilation tests in debug mode because V8 will fail to load because the + // flags do not match. TODO: restore this test when the rust toolchain is integrated. + if (GetParam() == 1) { + return; + } +#endif auto wasm_vm = createWasmVm("envoy.wasm.runtime.v8", scope_); ASSERT_TRUE(wasm_vm != nullptr); From bdd97414b2db1446603444cd9535b724ff29c834 Mon Sep 17 00:00:00 2001 From: Yuval Kohavi Date: Wed, 29 Jul 2020 19:10:44 -0400 Subject: [PATCH 784/909] extensions: Plumb ProtocolOptionsFactoryContext to the ProtocolOptionsFactory (#12234) Commit Message: Plumb ProtocolOptionsFactoryContext to the ProtocolOptionsFactory Additional Description: We have a use-case of needing a timer in the context of protocol-options. Risk Level: Low, adding a field that is only used in extensions. and is no-op of existing extensions Testing: N/A Docs Changes: N/A Release Notes: N/A Signed-off-by: Yuval Kohavi --- include/envoy/server/factory_context.h | 5 ++++ include/envoy/server/filter_config.h | 4 +-- .../upstream/health_discovery_service.cc | 6 ++-- source/common/upstream/upstream_impl.cc | 28 +++++++++---------- source/common/upstream/upstream_impl.h | 3 +- .../filters/network/common/factory_base.h | 13 +++++---- .../filters/network/redis_proxy/config.h | 3 +- .../filters/network/thrift_proxy/config.h | 3 +- test/common/upstream/upstream_impl_test.cc | 4 +-- .../dynamic_validation_integration_test.cc | 5 ++-- 10 files changed, 41 insertions(+), 33 deletions(-) diff --git a/include/envoy/server/factory_context.h b/include/envoy/server/factory_context.h index 764691dc45d5..08f67e31cc3b 100644 --- a/include/envoy/server/factory_context.h +++ b/include/envoy/server/factory_context.h @@ -268,6 +268,11 @@ class ListenerFactoryContext : public virtual FactoryContext { virtual const Network::ListenerConfig& listenerConfig() const PURE; }; +/** + * FactoryContext for ProtocolOptionsFactory. + */ +using ProtocolOptionsFactoryContext = Server::Configuration::TransportSocketFactoryContext; + } // namespace Configuration } // namespace Server } // namespace Envoy diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h index 343e6c87ad28..97b2b3fe51b0 100644 --- a/include/envoy/server/filter_config.h +++ b/include/envoy/server/filter_config.h @@ -92,9 +92,9 @@ class ProtocolOptionsFactory : public Config::TypedFactory { */ virtual Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { + ProtocolOptionsFactoryContext& factory_context) { UNREFERENCED_PARAMETER(config); - UNREFERENCED_PARAMETER(validation_visitor); + UNREFERENCED_PARAMETER(factory_context); return nullptr; } diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index a0a585138d4f..f77bc6216016 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -278,9 +278,9 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) auto socket_matcher = std::make_unique( params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope); - return std::make_unique( - params.cluster_, params.bind_config_, params.runtime_, std::move(socket_matcher), - std::move(scope), params.added_via_api_, params.validation_visitor_, factory_context); + return std::make_unique(params.cluster_, params.bind_config_, params.runtime_, + std::move(socket_matcher), std::move(scope), + params.added_via_api_, factory_context); } void HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager, diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 4bca4252b823..92dde0ac6783 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -139,7 +139,7 @@ parseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config, ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typed_config, const ProtobufWkt::Struct& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) { Server::Configuration::ProtocolOptionsFactory* factory = Registry::FactoryRegistry::getFactory( name); @@ -160,15 +160,15 @@ createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typ throw EnvoyException(fmt::format("filter {} does not support protocol options", name)); } - Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validation_visitor, - *proto_config); + Envoy::Config::Utility::translateOpaqueConfig( + typed_config, config, factory_context.messageValidationVisitor(), *proto_config); - return factory->createProtocolOptionsConfig(*proto_config, validation_visitor); + return factory->createProtocolOptionsConfig(*proto_config, factory_context); } -std::map -parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, - ProtobufMessage::ValidationVisitor& validation_visitor) { +std::map parseExtensionProtocolOptions( + const envoy::config::cluster::v3::Cluster& config, + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) { if (!config.typed_extension_protocol_options().empty() && !config.hidden_envoy_deprecated_extension_protocol_options().empty()) { throw EnvoyException("Only one of typed_extension_protocol_options or " @@ -184,7 +184,7 @@ parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first); auto object = createProtocolOptionsConfig( - name, it.second, ProtobufWkt::Struct::default_instance(), validation_visitor); + name, it.second, ProtobufWkt::Struct::default_instance(), factory_context); if (object != nullptr) { options[name] = std::move(object); } @@ -197,7 +197,7 @@ parseExtensionProtocolOptions(const envoy::config::cluster::v3::Cluster& config, auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first); auto object = createProtocolOptionsConfig(name, ProtobufWkt::Any::default_instance(), it.second, - validation_visitor); + factory_context); if (object != nullptr) { options[name] = std::move(object); } @@ -677,7 +677,6 @@ ClusterInfoImpl::ClusterInfoImpl( const envoy::config::cluster::v3::Cluster& config, const envoy::config::core::v3::BindConfig& bind_config, Runtime::Loader& runtime, TransportSocketMatcherPtr&& socket_matcher, Stats::ScopePtr&& stats_scope, bool added_via_api, - ProtobufMessage::ValidationVisitor& validation_visitor, Server::Configuration::TransportSocketFactoryContext& factory_context) : runtime_(runtime), name_(config.name()), type_(config.type()), max_requests_per_connection_( @@ -702,7 +701,7 @@ ClusterInfoImpl::ClusterInfoImpl( http1_settings_(Http::Utility::parseHttp1Settings(config.http_protocol_options())), http2_options_(Http2::Utility::initializeAndValidateOptions(config.http2_protocol_options())), common_http_protocol_options_(config.common_http_protocol_options()), - extension_protocol_options_(parseExtensionProtocolOptions(config, validation_visitor)), + extension_protocol_options_(parseExtensionProtocolOptions(config, factory_context)), resource_managers_(config, runtime, name_, *stats_scope_), maintenance_mode_runtime_key_(absl::StrCat("upstream.maintenance_mode.", name_)), source_address_(getSourceAddress(config, bind_config)), @@ -898,10 +897,9 @@ ClusterImplBase::ClusterImplBase( auto socket_factory = createTransportSocketFactory(cluster, factory_context); auto socket_matcher = std::make_unique( cluster.transport_socket_matches(), factory_context, socket_factory, *stats_scope); - info_ = std::make_unique( - cluster, factory_context.clusterManager().bindConfig(), runtime, std::move(socket_matcher), - std::move(stats_scope), added_via_api, factory_context.messageValidationVisitor(), - factory_context); + info_ = std::make_unique(cluster, factory_context.clusterManager().bindConfig(), + runtime, std::move(socket_matcher), + std::move(stats_scope), added_via_api, factory_context); // Create the default (empty) priority set before registering callbacks to // avoid getting an update the first time it is accessed. priority_set_.getOrCreateHostSet(0); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index b5f2e9d469a0..999962a5b3b4 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -516,8 +516,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable(); } - Upstream::ProtocolOptionsConfigConstSharedPtr - createProtocolOptionsConfig(const Protobuf::Message& proto_config, - ProtobufMessage::ValidationVisitor& validation_visitor) override { + Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig( + const Protobuf::Message& proto_config, + Server::Configuration::ProtocolOptionsFactoryContext& factory_context) override { return createProtocolOptionsTyped(MessageUtil::downcastAndValidate( - proto_config, validation_visitor)); + proto_config, factory_context.messageValidationVisitor()), + factory_context); } std::string name() const override { return name_; } @@ -52,7 +54,8 @@ class FactoryBase : public Server::Configuration::NamedNetworkFilterConfigFactor Server::Configuration::FactoryContext& context) PURE; virtual Upstream::ProtocolOptionsConfigConstSharedPtr - createProtocolOptionsTyped(const ProtocolOptionsProto&) { + createProtocolOptionsTyped(const ProtocolOptionsProto&, + Server::Configuration::ProtocolOptionsFactoryContext&) { throw EnvoyException(fmt::format("filter {} does not support protocol options", name_)); } diff --git a/source/extensions/filters/network/redis_proxy/config.h b/source/extensions/filters/network/redis_proxy/config.h index cbb1866018f4..c3237934fcea 100644 --- a/source/extensions/filters/network/redis_proxy/config.h +++ b/source/extensions/filters/network/redis_proxy/config.h @@ -77,7 +77,8 @@ class RedisProxyFilterConfigFactory Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( const envoy::extensions::filters::network::redis_proxy::v3::RedisProtocolOptions& - proto_config) override { + proto_config, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return std::make_shared(proto_config); } }; diff --git a/source/extensions/filters/network/thrift_proxy/config.h b/source/extensions/filters/network/thrift_proxy/config.h index 62a123936bac..532298c380e3 100644 --- a/source/extensions/filters/network/thrift_proxy/config.h +++ b/source/extensions/filters/network/thrift_proxy/config.h @@ -52,7 +52,8 @@ class ThriftProxyFilterConfigFactory Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( const envoy::extensions::filters::network::thrift_proxy::v3::ThriftProtocolOptions& - proto_config) override { + proto_config, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return std::make_shared(proto_config); } }; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 2fd3def01f71..a0707d9cad2b 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -2712,7 +2712,7 @@ class TestNetworkFilterConfigFactory } Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& msg, - ProtobufMessage::ValidationVisitor&) override { + Server::Configuration::ProtocolOptionsFactoryContext&) override { return parent_.createProtocolOptionsConfig(msg); } std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.test.filter"); } @@ -2747,7 +2747,7 @@ class TestHttpFilterConfigFactory : public Server::Configuration::NamedHttpFilte } Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const Protobuf::Message& msg, - ProtobufMessage::ValidationVisitor&) override { + Server::Configuration::ProtocolOptionsFactoryContext&) override { return parent_.createProtocolOptionsConfig(msg); } std::string name() const override { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.test.filter"); } diff --git a/test/integration/dynamic_validation_integration_test.cc b/test/integration/dynamic_validation_integration_test.cc index 31a80bffae0d..aab7833a5372 100644 --- a/test/integration/dynamic_validation_integration_test.cc +++ b/test/integration/dynamic_validation_integration_test.cc @@ -38,8 +38,9 @@ class TestDynamicValidationNetworkFilterConfigFactory return Network::FilterFactoryCb(); } - Upstream::ProtocolOptionsConfigConstSharedPtr createProtocolOptionsTyped( - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy&) override { + Upstream::ProtocolOptionsConfigConstSharedPtr + createProtocolOptionsTyped(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy&, + Server::Configuration::ProtocolOptionsFactoryContext&) override { return nullptr; } }; From 4f8a9a75d20b9703903a1579eef3607e0335e767 Mon Sep 17 00:00:00 2001 From: asraa Date: Wed, 29 Jul 2020 20:20:57 -0400 Subject: [PATCH 785/909] [http] Swap default to legacy codec with runtime override to new (no-op) (#12303) Swap default codec to legacy codec implementation while codec error handling is improved. Backport some non-backported changes. Signed-off-by: Asra Ali --- docs/root/version_history/current.rst | 2 +- source/common/http/http1/codec_impl_legacy.cc | 2 ++ source/common/http/http2/codec_impl_legacy.cc | 25 ++++++++++++++----- source/common/http/http2/codec_impl_legacy.h | 6 ++--- source/common/runtime/runtime_features.cc | 3 ++- test/common/http/http2/BUILD | 2 +- 6 files changed, 28 insertions(+), 12 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 25ff97319359..6be17e93042b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -45,7 +45,7 @@ New Features * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. * http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. -* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is deprecated, but can be used during the removal period by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. The removal period will be one month. +* http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default, but the new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * postgres network filter: :ref:`metadata ` is produced based on SQL query. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc index cabba6db5dda..6c227685e245 100644 --- a/source/common/http/http1/codec_impl_legacy.cc +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -1093,6 +1093,8 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode } int ClientConnectionImpl::onHeadersComplete() { + ENVOY_CONN_LOG(trace, "status_code {}", connection_, parser_.status_code); + // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc index 7f2f45978ae7..50cac71a404a 100644 --- a/source/common/http/http2/codec_impl_legacy.cc +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -29,17 +29,23 @@ namespace Legacy { namespace Http2 { class Http2ResponseCodeDetailValues { +public: // Invalid HTTP header field was received and stream is going to be // closed. const absl::string_view ng_http2_err_http_header_ = "http2.invalid.header.field"; - // Violation in HTTP messaging rule. const absl::string_view ng_http2_err_http_messaging_ = "http2.violation.of.messaging.rule"; - // none of the above const absl::string_view ng_http2_err_unknown_ = "http2.unknown.nghttp2.error"; + // The number of headers (or trailers) exceeded the configured limits + const absl::string_view too_many_headers = "http2.too_many_headers"; + // Envoy detected an HTTP/2 frame flood from the server. + const absl::string_view outbound_frame_flood = "http2.outbound_frames_flood"; + // Envoy detected an inbound HTTP/2 frame flood. + const absl::string_view inbound_empty_frame_flood = "http2.inbound_empty_frames_flood"; + // Envoy was configured to drop requests with header keys beginning with underscores. + const absl::string_view invalid_underscore = "http2.unexpected_underscore"; -public: const absl::string_view errorDetails(int error_code) const { switch (error_code) { case NGHTTP2_ERR_HTTP_HEADER: @@ -374,6 +380,7 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); + setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); return NGHTTP2_ERR_FLOODED; } @@ -937,6 +944,7 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, auto should_return = checkHeaderNameForUnderscores(name.getStringView()); if (should_return) { + stream->setDetails(Http2ResponseCodeDetails::get().invalid_underscore); name.clear(); value.clear(); return should_return.value(); @@ -946,8 +954,9 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, if (stream->headers().byteSize() > max_headers_kb_ * 1024 || stream->headers().size() > max_headers_count_) { - // This will cause the library to reset/close the stream. + stream->setDetails(Http2ResponseCodeDetails::get().too_many_headers); stats_.header_overflow_.inc(); + // This will cause the library to reset/close the stream. return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } else { return 0; @@ -1353,7 +1362,7 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 break; } - if (!checkInboundFrameLimits()) { + if (!checkInboundFrameLimits(hd->stream_id)) { // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate // all the way to nghttp2_session_mem_recv() where we need it. flood_detected_ = true; @@ -1363,8 +1372,9 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 return true; } -bool ServerConnectionImpl::checkInboundFrameLimits() { +bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { ASSERT(dispatching_downstream_data_); + ConnectionImpl::StreamImpl* stream = getStream(stream_id); if (consecutive_inbound_frames_with_empty_payload_ > max_consecutive_inbound_frames_with_empty_payload_) { @@ -1372,6 +1382,9 @@ bool ServerConnectionImpl::checkInboundFrameLimits() { "error reading frame: Too many consecutive frames with an empty payload " "received in this HTTP/2 session.", connection_); + if (stream) { + stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); + } stats_.inbound_empty_frames_flood_.inc(); return false; } diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h index 26ad399541a1..ef3595ecdfba 100644 --- a/source/common/http/http2/codec_impl_legacy.h +++ b/source/common/http/http2/codec_impl_legacy.h @@ -507,7 +507,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 492304b3d290..5e179bc9c5dd 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -73,7 +73,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http_default_alpn", "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.listener_in_place_filterchain_update", - "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", "envoy.reloadable_features.stop_faking_paths", @@ -90,6 +89,8 @@ constexpr const char* runtime_features[] = { // When features are added here, there should be a tracking bug assigned to the // code owner to flip the default after sufficient testing. constexpr const char* disabled_runtime_features[] = { + // TODO(asraa) flip this feature after codec errors are handled + "envoy.reloadable_features.new_codec_behavior", // TODO(alyssawilk) flip true after the release. "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 6421f02a7a49..646345fd1373 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -49,7 +49,7 @@ envoy_cc_test( name = "codec_impl_legacy_test", srcs = ["codec_impl_test.cc"], args = [ - "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", + "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", ], shard_count = 5, tags = ["fails_on_windows"], From 4dfa844f56d637b6ec26f415f65035707cdf01c8 Mon Sep 17 00:00:00 2001 From: Radha Date: Thu, 30 Jul 2020 01:30:30 +0100 Subject: [PATCH 786/909] header-to-metadata: add support for cookie to metadata (#12206) header-to-metadata filter supports adding a header's value to a metadata key which is later used for subset load balancing. This PR adds support for extracting a specific cookie value before it's added as metadata. Signed-off-by: Radha Kumari --- .../v3/header_to_metadata.proto | 28 ++- .../v4alpha/header_to_metadata.proto | 24 +- .../header_to_metadata_filter.rst | 33 ++- .../v3/header_to_metadata.proto | 28 ++- .../v4alpha/header_to_metadata.proto | 24 +- .../header_to_metadata_filter.cc | 169 ++++++++------ .../header_to_metadata_filter.h | 55 ++++- .../http/header_to_metadata/config_test.cc | 79 ++++++- .../header_to_metadata_filter_test.cc | 206 ++++++++++++++++++ 9 files changed, 533 insertions(+), 113 deletions(-) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 189de8e7454f..11e70d91d30f 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -86,29 +86,41 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // The cookie to be extracted. + string cookie = 5 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. + // of the header or cookie value. KeyValuePair on_header_present = 2; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. + // of the missing header or cookie value. KeyValuePair on_header_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 603d0a002dc8..54855c08f8c1 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -86,29 +86,39 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + oneof header_cookie_specifier { + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The cookie to be extracted. + string cookie = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } - // If the header is present, apply this metadata KeyValuePair. + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. + // of the header or cookie value. KeyValuePair on_header_present = 2; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. + // of the missing header or cookie value. KeyValuePair on_header_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst index f169fbe93989..bdf2cecc63fe 100644 --- a/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst +++ b/docs/root/configuration/http/http_filters/header_to_metadata_filter.rst @@ -6,8 +6,14 @@ Envoy Header-To-Metadata Filter * This filter should be configured with the name *envoy.filters.http.header_to_metadata*. This filter is configured with rules that will be matched against requests and responses. -Each rule has a header and can be triggered either when the header is present or missing. When -a rule is triggered, dynamic metadata will be added based on the configuration of the rule. +Each rule has either a cookie or a header and can be triggered either when the header +or cookie is present or missing. + +When a rule is triggered, dynamic metadata will be added based on the configuration of the rule. +If the header or cookie is present, it's value is extracted and used along with the specified +key as metadata. If the header or cookie is missing, on missing case is triggered and the value +specified is used for adding metadata. + The metadata can then be used for load balancing decisions, consumed from logs, etc. A typical use case for this filter is to dynamically match requests with load balancer @@ -39,6 +45,29 @@ absence of a version header could be: type: STRING remove: false +As with headers, the value of the specified cookie will be extracted from the request +and added as metadata with the key specified. +Removing a cookie when a rule matches is unsupported. + +.. code-block:: yaml + + http_filters: + - name: envoy.filters.http.header_to_metadata + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.header_to_metadata.v3.Config + request_rules: + - cookie: cookie + on_header_present: + metadata_namespace: envoy.lb + key: version + type: STRING + on_header_missing: + metadata_namespace: envoy.lb + key: default + value: 'true' + type: STRING + remove: false + A corresponding upstream cluster configuration could be: diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 189de8e7454f..11e70d91d30f 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -86,29 +86,41 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // The cookie to be extracted. + string cookie = 5 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" + ]; + + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. + // of the header or cookie value. KeyValuePair on_header_present = 2; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. + // of the missing header or cookie value. KeyValuePair on_header_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 603d0a002dc8..54855c08f8c1 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -86,29 +86,39 @@ message Config { } // A Rule defines what metadata to apply when a header is present or missing. + // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + oneof header_cookie_specifier { + // Specifies that a match will be performed on the value of a header or a cookie. + // + // The header to be extracted. + string header = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The cookie to be extracted. + string cookie = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + } - // If the header is present, apply this metadata KeyValuePair. + // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. + // of the header or cookie value. KeyValuePair on_header_present = 2; - // If the header is not present, apply this metadata KeyValuePair. + // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. + // of the missing header or cookie value. KeyValuePair on_header_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. + // This field is not supported in case of a cookie. bool remove = 4; } diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc index f9c060960eb9..350234f2fe36 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.cc @@ -18,7 +18,70 @@ namespace Extensions { namespace HttpFilters { namespace HeaderToMetadataFilter { -Rule::Rule(const std::string& header, const ProtoRule& rule) : header_(header), rule_(rule) { +// Extract the value of the header. +absl::optional HeaderValueSelector::extract(Http::HeaderMap& map) const { + const Http::HeaderEntry* header_entry = map.get(header_); + if (header_entry == nullptr) { + return absl::nullopt; + } + // Catch the value in the header before removing. + absl::optional value = std::string(header_entry->value().getStringView()); + if (remove_) { + map.remove(header_); + } + return value; +} + +// Extract the value of the key from the cookie header. +absl::optional CookieValueSelector::extract(Http::HeaderMap& map) const { + std::string value = Envoy::Http::Utility::parseCookieValue(map, cookie_); + if (!value.empty()) { + return absl::optional(std::move(value)); + } + return absl::nullopt; +} + +Rule::Rule(const ProtoRule& rule) : rule_(rule) { + // Ensure only one of header and cookie is specified. + // TODO(radha13): remove this once we are on v4 and these fields are folded into a oneof. + if (!rule.cookie().empty() && !rule.header().empty()) { + throw EnvoyException("Cannot specify both header and cookie"); + } + + // Initialize the shared pointer. + if (!rule.header().empty()) { + selector_ = + std::make_shared(Http::LowerCaseString(rule.header()), rule.remove()); + } else if (!rule.cookie().empty()) { + selector_ = std::make_shared(rule.cookie()); + } else { + throw EnvoyException("One of Cookie or Header option needs to be specified"); + } + + // Rule must have at least one of the `on_header_*` fields set. + if (!rule.has_on_header_present() && !rule.has_on_header_missing()) { + const auto& error = fmt::format("header to metadata filter: rule for {} has neither " + "`on_header_present` nor `on_header_missing` set", + selector_->toString()); + throw EnvoyException(error); + } + + // Ensure value and regex_value_rewrite are not mixed. + // TODO(rgs1): remove this once we are on v4 and these fields are folded into a oneof. + if (!rule.on_header_present().value().empty() && + rule.on_header_present().has_regex_value_rewrite()) { + throw EnvoyException("Cannot specify both value and regex_value_rewrite"); + } + + // Remove field is un-supported for cookie. + if (!rule.cookie().empty() && rule.remove()) { + throw EnvoyException("Cannot specify remove for cookie"); + } + + if (rule.has_on_header_missing() && rule.on_header_missing().value().empty()) { + throw EnvoyException("Cannot specify on_header_missing rule with an empty value"); + } + if (rule.on_header_present().has_regex_value_rewrite()) { const auto& rewrite_spec = rule.on_header_present().regex_value_rewrite(); regex_rewrite_ = Regex::Utility::parseRegex(rewrite_spec.pattern()); @@ -49,26 +112,7 @@ bool Config::configToVector(const ProtobufRepeatedRule& proto_rules, } for (const auto& entry : proto_rules) { - // Rule must have at least one of the `on_header_*` fields set. - if (!entry.has_on_header_present() && !entry.has_on_header_missing()) { - const auto& error = fmt::format("header to metadata filter: rule for header '{}' has neither " - "`on_header_present` nor `on_header_missing` set", - entry.header()); - throw EnvoyException(error); - } - - // Ensure value and regex_value_rewrite are not mixed. - // TODO(rgs1): remove this once we are on v4 and these fields are folded into a oneof. - if (!entry.on_header_present().value().empty() && - entry.on_header_present().has_regex_value_rewrite()) { - throw EnvoyException("Cannot specify both value and regex_value_rewrite"); - } - - if (entry.has_on_header_missing() && entry.on_header_missing().value().empty()) { - throw EnvoyException("Cannot specify on_header_missing rule with an empty value"); - } - - vector.emplace_back(entry.header(), entry); + vector.emplace_back(entry); } return true; @@ -108,8 +152,8 @@ void HeaderToMetadataFilter::setEncoderFilterCallbacks( } bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta_namespace, - const std::string& key, absl::string_view value, - ValueType type, ValueEncode encode) const { + const std::string& key, std::string value, ValueType type, + ValueEncode encode) const { ProtobufWkt::Value val; ASSERT(!value.empty()); @@ -120,10 +164,9 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta return false; } - std::string decodedValue = std::string(value); if (encode == envoy::extensions::filters::http::header_to_metadata::v3::Config::BASE64) { - decodedValue = Base64::decodeWithoutPadding(value); - if (decodedValue.empty()) { + value = Base64::decodeWithoutPadding(value); + if (value.empty()) { ENVOY_LOG(debug, "Base64 decode failed"); return false; } @@ -132,11 +175,11 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta // Sane enough, add the key/value. switch (type) { case envoy::extensions::filters::http::header_to_metadata::v3::Config::STRING: - val.set_string_value(std::move(decodedValue)); + val.set_string_value(std::move(value)); break; case envoy::extensions::filters::http::header_to_metadata::v3::Config::NUMBER: { double dval; - if (absl::SimpleAtod(StringUtil::trim(decodedValue), &dval)) { + if (absl::SimpleAtod(StringUtil::trim(value), &dval)) { val.set_number_value(dval); } else { ENVOY_LOG(debug, "value to number conversion failed"); @@ -145,7 +188,7 @@ bool HeaderToMetadataFilter::addMetadata(StructMap& map, const std::string& meta break; } case envoy::extensions::filters::http::header_to_metadata::v3::Config::PROTOBUF_VALUE: { - if (!val.ParseFromString(decodedValue)) { + if (!val.ParseFromString(value)) { ENVOY_LOG(debug, "parse from decoded string failed"); return false; } @@ -172,56 +215,42 @@ const std::string& HeaderToMetadataFilter::decideNamespace(const std::string& ns return nspace.empty() ? HttpFilterNames::get().HeaderToMetadata : nspace; } +// add metadata['key']= value depending on header present or missing case +void HeaderToMetadataFilter::applyKeyValue(std::string value, const Rule& rule, + const KeyValuePair& keyval, StructMap& np) { + if (!keyval.value().empty()) { + value = keyval.value(); + } else { + const auto& matcher = rule.regexRewrite(); + if (matcher != nullptr) { + value = matcher->replaceAll(value, rule.regexSubstitution()); + } + } + if (!value.empty()) { + const auto& nspace = decideNamespace(keyval.metadata_namespace()); + addMetadata(np, nspace, keyval.key(), value, keyval.type(), keyval.encode()); + } else { + ENVOY_LOG(debug, "value is empty, not adding metadata"); + } +} + void HeaderToMetadataFilter::writeHeaderToMetadata(Http::HeaderMap& headers, const HeaderToMetadataRules& rules, Http::StreamFilterCallbacks& callbacks) { StructMap structs_by_namespace; for (const auto& rule : rules) { - const auto& header = rule.header(); const auto& proto_rule = rule.rule(); - const Http::HeaderEntry* header_entry = headers.get(header); - - if (header_entry != nullptr && proto_rule.has_on_header_present()) { - const auto& keyval = proto_rule.on_header_present(); - absl::string_view value = header_entry->value().getStringView(); - // This is used to hold the rewritten header value, so that it can - // be bound to value without going out of scope. - std::string rewritten_value; - - if (!keyval.value().empty()) { - value = absl::string_view(keyval.value()); - } else { - const auto& matcher = rule.regexRewrite(); - if (matcher != nullptr) { - rewritten_value = matcher->replaceAll(value, rule.regexSubstitution()); - value = rewritten_value; - } - } - - if (!value.empty()) { - const auto& nspace = decideNamespace(keyval.metadata_namespace()); - addMetadata(structs_by_namespace, nspace, keyval.key(), value, keyval.type(), - keyval.encode()); - } else { - ENVOY_LOG(debug, "value is empty, not adding metadata"); - } - - if (proto_rule.remove()) { - headers.remove(header); - } - } - if (header_entry == nullptr && proto_rule.has_on_header_missing()) { - // Add metadata for the header missing case. - const auto& keyval = proto_rule.on_header_missing(); - - ASSERT(!keyval.value().empty()); - const auto& nspace = decideNamespace(keyval.metadata_namespace()); - addMetadata(structs_by_namespace, nspace, keyval.key(), keyval.value(), keyval.type(), - keyval.encode()); + absl::optional value = rule.selector_->extract(headers); + + if (value && proto_rule.has_on_header_present()) { + applyKeyValue(std::move(value).value_or(""), rule, proto_rule.on_header_present(), + structs_by_namespace); + } else if (!value && proto_rule.has_on_header_missing()) { + applyKeyValue(std::move(value).value_or(""), rule, proto_rule.on_header_missing(), + structs_by_namespace); } } - // Any matching rules? if (!structs_by_namespace.empty()) { for (auto const& entry : structs_by_namespace) { diff --git a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h index 4cc3e117c4ff..dd85f1fc4f99 100644 --- a/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h +++ b/source/extensions/filters/http/header_to_metadata/header_to_metadata_filter.h @@ -20,17 +20,63 @@ namespace HeaderToMetadataFilter { using ProtoRule = envoy::extensions::filters::http::header_to_metadata::v3::Config::Rule; using ValueType = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueType; using ValueEncode = envoy::extensions::filters::http::header_to_metadata::v3::Config::ValueEncode; +using KeyValuePair = envoy::extensions::filters::http::header_to_metadata::v3::Config::KeyValuePair; + +// Interface for getting values from a cookie or a header. +class ValueSelector { +public: + virtual ~ValueSelector() = default; + + /** + * Called to extract the value of a given header or cookie. + * @param http header map. + * @return absl::optional the extracted header or cookie. + */ + virtual absl::optional extract(Http::HeaderMap& map) const PURE; + + /** + * @return a string representation of either a cookie or a header passed in the request. + */ + virtual std::string toString() const PURE; +}; + +// Get value from a header. +class HeaderValueSelector : public ValueSelector { +public: + // ValueSelector. + explicit HeaderValueSelector(Http::LowerCaseString header, bool remove) + : header_(std::move(header)), remove_(std::move(remove)) {} + absl::optional extract(Http::HeaderMap& map) const override; + std::string toString() const override { return fmt::format("header '{}'", header_.get()); } + ~HeaderValueSelector() override = default; + +private: + const Http::LowerCaseString header_; + const bool remove_; +}; + +// Get value from a cookie. +class CookieValueSelector : public ValueSelector { +public: + // ValueSelector. + explicit CookieValueSelector(std::string cookie) : cookie_(std::move(cookie)) {} + absl::optional extract(Http::HeaderMap& map) const override; + std::string toString() const override { return fmt::format("cookie '{}'", cookie_); } + ~CookieValueSelector() override = default; + +private: + const std::string cookie_; +}; class Rule { public: - Rule(const std::string& header, const ProtoRule& rule); + Rule(const ProtoRule& rule); const ProtoRule& rule() const { return rule_; } const Regex::CompiledMatcherPtr& regexRewrite() const { return regex_rewrite_; } const std::string& regexSubstitution() const { return regex_rewrite_substitution_; } - const Http::LowerCaseString& header() const { return header_; } + std::shared_ptr selector_; private: - const Http::LowerCaseString header_; const ProtoRule rule_; Regex::CompiledMatcherPtr regex_rewrite_{}; std::string regex_rewrite_substitution_{}; @@ -142,8 +188,9 @@ class HeaderToMetadataFilter : public Http::StreamFilter, */ void writeHeaderToMetadata(Http::HeaderMap& headers, const HeaderToMetadataRules& rules, Http::StreamFilterCallbacks& callbacks); - bool addMetadata(StructMap&, const std::string&, const std::string&, absl::string_view, ValueType, + bool addMetadata(StructMap&, const std::string&, const std::string&, std::string, ValueType, ValueEncode) const; + void applyKeyValue(std::string, const Rule&, const KeyValuePair&, StructMap&); const std::string& decideNamespace(const std::string& nspace) const; const Config* getConfig() const; }; diff --git a/test/extensions/filters/http/header_to_metadata/config_test.cc b/test/extensions/filters/http/header_to_metadata/config_test.cc index f10d78f63604..3b7771b4ce23 100644 --- a/test/extensions/filters/http/header_to_metadata/config_test.cc +++ b/test/extensions/filters/http/header_to_metadata/config_test.cc @@ -32,22 +32,26 @@ void testForbiddenConfig(const std::string& yaml) { EnvoyException); } -// Tests that an empty header is rejected. -TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyHeader) { +// Tests that empty (metadata) keys are rejected. +TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { const std::string yaml = R"EOF( request_rules: -- header: "" + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: "" + type: STRING )EOF"; HeaderToMetadataProtoConfig proto_config; EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } -// Tests that empty (metadata) keys are rejected. -TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { +// Tests that empty (metadata) keys are rejected in case of cookie. +TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyCookieKey) { const std::string yaml = R"EOF( request_rules: - - header: x-version + - cookie: x-cookie on_header_present: metadata_namespace: envoy.lb key: "" @@ -58,7 +62,7 @@ TEST(HeaderToMetadataFilterConfigTest, InvalidEmptyKey) { EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); } -// Tests that a valid config is properly consumed. +// Tests that a valid config with header is properly consumed. TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { const std::string yaml = R"EOF( request_rules: @@ -86,6 +90,34 @@ TEST(HeaderToMetadataFilterConfigTest, SimpleConfig) { cb(filter_callbacks); } +// Tests that a valid config with cookie is properly consumed. +TEST(HeaderToMetadataFilterConfigTest, SimpleCookieConfig) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_present: + metadata_namespace: envoy.lb + key: version1 + type: STRING + on_header_missing: + metadata_namespace: envoy.lb + key: default + value: 'true' + type: STRING + )EOF"; + + HeaderToMetadataProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + HeaderToMetadataConfig factory; + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callbacks; + EXPECT_CALL(filter_callbacks, addStreamFilter(_)); + cb(filter_callbacks); +} + // Tests that per route config properly overrides the global config. TEST(HeaderToMetadataFilterConfigTest, PerRouteConfig) { const std::string yaml = R"EOF( @@ -134,6 +166,25 @@ TEST(HeaderToMetadataFilterConfigTest, ValueAndRegex) { testForbiddenConfig(yaml); } +// Tests that cookie configuration does not allow value and regex_value_rewrite in the same rule. +TEST(HeaderToMetadataFilterConfigTest, CookieValueAndRegex) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_present: + metadata_namespace: envoy.lb + key: cluster + value: foo + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^/(cluster[\\d\\w-]+)/?.*$" + substitution: "\\1" + )EOF"; + + testForbiddenConfig(yaml); +} + // Tests that on_header_missing rules don't allow an empty value. TEST(HeaderToMetadataFilterConfigTest, OnHeaderMissingEmptyValue) { const std::string yaml = R"EOF( @@ -148,6 +199,20 @@ TEST(HeaderToMetadataFilterConfigTest, OnHeaderMissingEmptyValue) { testForbiddenConfig(yaml); } +// Tests that on_header_missing rules don't allow an empty cookie value. +TEST(HeaderToMetadataFilterConfigTest, CookieOnHeaderMissingEmptyValue) { + const std::string yaml = R"EOF( +request_rules: + - cookie: x-cookie + on_header_missing: + metadata_namespace: envoy.lb + key: "foo" + type: STRING + )EOF"; + + testForbiddenConfig(yaml); +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index cf09a67ae718..fc9c81431ae1 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -443,6 +443,55 @@ TEST_F(HeaderToMetadataTest, PerRouteEmtpyRules) { EXPECT_THROW(std::make_shared(config_proto, true), EnvoyException); } +/** + * Invalid empty header or cookie should be rejected. + */ +TEST_F(HeaderToMetadataTest, RejectEmptyHeader) { + const std::string config = R"EOF( +request_rules: + - header: "" + +)EOF"; + auto expected = "One of Cookie or Header option needs to be specified"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + +/** + * Rules with both header and cookie fields should be rejected. + */ +TEST_F(HeaderToMetadataTest, RejectBothCookieHeader) { + const std::string config = R"EOF( +request_rules: + - header: x-something + cookie: something-else + on_header_present: + key: something + value: else + type: STRING + remove: false + +)EOF"; + auto expected = "Cannot specify both header and cookie"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + +/** + * Rules with remove field should be rejected in case of a cookie. + */ +TEST_F(HeaderToMetadataTest, RejectRemoveForCookie) { + const std::string config = R"EOF( +request_rules: + - cookie: cookie + on_header_present: + metadata_namespace: envoy.lb + key: version + type: STRING + remove: true +)EOF"; + auto expected = "Cannot specify remove for cookie"; + EXPECT_THROW_WITH_MESSAGE(initializeFilter(config), EnvoyException, expected); +} + /** * Empty values not added to metadata. */ @@ -543,6 +592,163 @@ TEST_F(HeaderToMetadataTest, NoMissingWhenHeaderIsPresent) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); } +/** + * on header missing case with no header data + */ + +TEST_F(HeaderToMetadataTest, OnMissingWhenHeaderIsPresent) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", ""}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * on header present case, when the regex replacement turns the header into an empty string + */ +TEST_F(HeaderToMetadataTest, HeaderIsPresentButRegexEmptiesIt) { + const std::string config = R"EOF( +request_rules: + - header: x-version + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^foo" + substitution: "" + on_header_missing: + metadata_namespace: envoy.lb + key: version + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"x-version", "foo"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * cookie value extracted and stored + */ +TEST_F(HeaderToMetadataTest, CookieValueUsed) { + const std::string response_config_yaml = R"EOF( +response_rules: + - cookie: bar + on_header_present: + key: bar + type: STRING + remove: false +)EOF"; + initializeFilter(response_config_yaml); + Http::TestResponseHeaderMapImpl incoming_headers{{"cookie", "bar=foo"}}; + std::map expected = {{"bar", "foo"}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, + setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false)); +} + +/** + * Ignore the cookie's value, use a given constant value. + */ +TEST_F(HeaderToMetadataTest, IgnoreCookieValueUseConstant) { + const std::string response_config_yaml = R"EOF( +response_rules: + - cookie: meh + on_header_present: + key: meh + value: some_value + type: STRING + remove: false +)EOF"; + initializeFilter(response_config_yaml); + Http::TestResponseHeaderMapImpl incoming_headers{{"cookie", "meh=foo"}}; + std::map expected = {{"meh", "some_value"}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, + setDynamicMetadata(HttpFilterNames::get().HeaderToMetadata, MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(incoming_headers, false)); +} + +/** + * No cookie value, no metadata + */ +TEST_F(HeaderToMetadataTest, NoCookieValue) { + const std::string config = R"EOF( +request_rules: + - cookie: foo + on_header_missing: + metadata_namespace: envoy.lb + key: foo + value: some_value + type: STRING +)EOF"; + initializeFilter(config); + Http::TestRequestHeaderMapImpl headers{{"cookie", ""}}; + std::map expected = {{"foo", "some_value"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); +} + +/** + * Regex substitution on cookie value. + */ +TEST_F(HeaderToMetadataTest, CookieRegexSubstitution) { + const std::string config = R"EOF( +request_rules: + - cookie: foo + on_header_present: + metadata_namespace: envoy.lb + key: cluster + regex_value_rewrite: + pattern: + google_re2: {} + regex: "^(cluster[\\d\\w-]+)$" + substitution: "\\1 matched" +)EOF"; + initializeFilter(config); + + // match. + { + Http::TestRequestHeaderMapImpl headers{{"cookie", "foo=cluster-prod-001"}}; + std::map expected = {{"cluster", "cluster-prod-001 matched"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } + + // No match. + { + Http::TestRequestHeaderMapImpl headers{{"cookie", "foo=cluster"}}; + std::map expected = {{"cluster", "cluster"}}; + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); + EXPECT_CALL(req_info_, setDynamicMetadata("envoy.lb", MapEq(expected))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + } +} + } // namespace HeaderToMetadataFilter } // namespace HttpFilters } // namespace Extensions From 9d70da7c8156f29f6493562ba088138f61f36fc8 Mon Sep 17 00:00:00 2001 From: Jingzhao123 <44225601+Jingzhao123@users.noreply.github.com> Date: Thu, 30 Jul 2020 17:24:16 +0800 Subject: [PATCH 787/909] Enable envoy images build on Arm CI environments (#11813) In this patch, it will enable the envoyproxy/envoy arm image to build in community arm CI environments. 1. Do some modifications in docker_ci.sh script for building arm images by buildx. It will firstly set up environments. Then use the buildx tool to build the envoyproxy/envoy arm images on x86 platform. 2. Modify the docker build job for building multi-arch images. It will firstly download the arm64 and amd64 envoy binaries. Then invoke the docker_ci.sh scripts to generate images. Risk Level: Medium (of breaking images) Testing: CI Docs Changes: N/A Release Notes: Added Fixes #1861 Signed-off-by: Jingzhao.Ni --- .azure-pipelines/pipelines.yml | 15 +++++-- ci/Dockerfile-envoy | 5 +-- ci/Dockerfile-envoy-alpine | 4 +- ci/Dockerfile-envoy-alpine-debug | 3 +- ci/docker_ci.sh | 56 ++++++++++++++++++++++----- docs/root/version_history/current.rst | 1 + 6 files changed, 64 insertions(+), 20 deletions(-) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index a0947d2ccea3..492884110c88 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -122,8 +122,8 @@ jobs: condition: always() - job: docker - displayName: "Linux-x64 docker" - dependsOn: ["release"] + displayName: "Linux multi-arch docker" + dependsOn: ["release","release_arm64"] condition: and(succeeded(), eq(variables['PostSubmit'], 'true'), ne(variables['Build.Reason'], 'PullRequest')) pool: vmImage: "ubuntu-18.04" @@ -135,10 +135,17 @@ jobs: itemPattern: "bazel.release/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) - + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release.arm64" + itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) - bash: | set -e - tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz + mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 + mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 ci/docker_ci.sh workingDirectory: $(Build.SourcesDirectory) env: diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 377fb3684b8d..ee6709912db9 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -1,6 +1,5 @@ ARG BUILD_FROM=ubuntu:18.04 - # Build stage FROM $BUILD_FROM as build @@ -17,7 +16,7 @@ RUN apt-get update \ # Final stage FROM $BUILD_FROM - +ARG TARGETPLATFORM RUN apt-get update \ && apt-get upgrade -y \ && apt-get install -y ca-certificates \ @@ -31,7 +30,7 @@ RUN adduser --group --system envoy RUN mkdir -p /etc/envoy -ADD build_release_stripped/envoy /usr/local/bin/envoy +ADD ${TARGETPLATFORM}/build_release_stripped/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index a4bd4ffbe5e2..b9bf2320af23 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,8 +1,8 @@ FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 - RUN mkdir -p /etc/envoy -ADD build_release_stripped/envoy /usr/local/bin/envoy +ADD linux/amd64/build_release_stripped/envoy /usr/local/bin/envoy + ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy diff --git a/ci/Dockerfile-envoy-alpine-debug b/ci/Dockerfile-envoy-alpine-debug index b7e7f34529a4..c58df8ccd211 100644 --- a/ci/Dockerfile-envoy-alpine-debug +++ b/ci/Dockerfile-envoy-alpine-debug @@ -1,8 +1,7 @@ FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 - RUN mkdir -p /etc/envoy -ADD build_release/envoy /usr/local/bin/envoy +ADD linux/amd64/build_release/envoy /usr/local/bin/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 7accf7f63d36..d91af54cda36 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -4,16 +4,59 @@ # CI logs. set -e +# Setting environments for buildx tools +config_env(){ + # Qemu configurations + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + # Remove older build instance + docker buildx rm multi-builder | true + docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64 +} + +build_images(){ + TYPE=$1 + BUILD_TAG=$2 + + # Only build/push envoyproxy/envoy multi-arch images since others still do not support. + if [ -z "${TYPE}" ]; then + docker buildx build --platform linux/arm64 -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + # Export envoyproxy/envoy amd64 image which will be used for building envoyproxy/envoy-google-vrp + docker buildx build --platform linux/amd64 -f ci/Dockerfile-envoy"${TYPE}" -o type=docker -t ${BUILD_TAG} . + elif [ "${TYPE}" == "-google-vrp" ]; then + # The envoyproxy/envoy-google-vrp is based on envoyproxy/envoy image. So it is built from cache envoyproxy/envoy:local + docker build -f ci/Dockerfile-envoy"${TYPE}" --cache-from "${DOCKER_IMAGE_PREFIX}:local" -t ${BUILD_TAG} . + else + docker build -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + fi +} + +push_images(){ + TYPE=$1 + BUILD_TAG=$2 + + if [ -z "${TYPE}" ]; then + # Only push envoyproxy/envoy multi-arch images since others still do not support. + docker buildx build --platform linux/arm64,linux/amd64 --push -f ci/Dockerfile-envoy"${TYPE}" -t ${BUILD_TAG} . + else + docker tag "${DOCKER_IMAGE_PREFIX}${TYPE}:local" ${BUILD_TAG} + docker push ${BUILD_TAG} + fi +} + # This prefix is altered for the private security images on setec builds. DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. BUILD_TYPES=("" "-alpine" "-alpine-debug" "-google-vrp") +# Configure docker-buildx tools +config_env + # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. for BUILD_TYPE in "${BUILD_TYPES[@]}"; do - docker build -f ci/Dockerfile-envoy"${BUILD_TYPE}" -t "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" . + build_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" done MASTER_BRANCH="refs/heads/master" @@ -42,21 +85,16 @@ fi docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" for BUILD_TYPE in "${BUILD_TYPES[@]}"; do - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" # Only push latest on master builds. if [[ "${AZP_BRANCH}" == "${MASTER_BRANCH}" ]]; then - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" fi # Push vX.Y-latest to tag the latest image in a release line if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then RELEASE_LINE=$(echo "$IMAGE_NAME" | sed -E 's/(v[0-9]+\.[0-9]+)\.[0-9]+/\1-latest/') - docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}:local" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" - docker push "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" + push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" fi done - - diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6be17e93042b..e2a877537854 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -41,6 +41,7 @@ New Features ------------ * access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. +* build: enable building envoy arm64 images by buildx tool in x86 CI platform. * dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. From 96cc820634d07b7c7b298db0f23fa4989a2cf588 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Jul 2020 08:32:32 -0400 Subject: [PATCH 788/909] tcp: fixing a bug in the new connection pool due to too many shared pointers (#12366) The prior regression was due to variable shadowing. ActiveTcpClient::ActiveTcpClient sets real_host_description_ in the constructor. Here it set its own real_host_description_ where it was supposed to set the base class real_host_description_ When onPoollFailure was called, it was called with the base class real_host_description_ (nullptr) So in the TCP filter read_callbacks_->upstreamHost(host); set a null pointer. and in void Filter::onUpstreamEvent read_callbacks_->upstreamHost()->outlierDetector().putResult( would segfault. Risk Level: n/a (flag disabled code) Testing: new regression test of upstream failure, unit test assert Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- source/common/conn_pool/conn_pool_base.cc | 1 + source/common/tcp/conn_pool.h | 1 - test/config/utility.cc | 15 ++++++++++----- .../redis_proxy_integration_test.cc | 12 ++++++------ test/integration/integration.h | 2 +- test/integration/stats_integration_test.cc | 1 - .../integration/tcp_proxy_integration_test.cc | 19 +++++++++++++++++++ 7 files changed, 37 insertions(+), 14 deletions(-) diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 573bfd50c9f4..66a4a23fafce 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -88,6 +88,7 @@ bool ConnPoolImplBase::tryCreateNewConnection() { ASSERT(client->state_ == ActiveClient::State::CONNECTING); ASSERT(std::numeric_limits::max() - connecting_stream_capacity_ >= client->effectiveConcurrentRequestLimit()); + ASSERT(client->real_host_description_); connecting_stream_capacity_ += client->effectiveConcurrentRequestLimit(); client->moveIntoList(std::move(client), owningList(client->state_)); } diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 0ed92d295d31..91918b07be76 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -109,7 +109,6 @@ class ActiveTcpClient : public Envoy::ConnectionPool::ActiveClient { void clearCallbacks(); ConnPoolImpl& parent_; - Upstream::HostDescriptionConstSharedPtr real_host_description_; ConnectionPool::UpstreamCallbacks* callbacks_{}; Network::ClientConnectionPtr connection_; ConnectionPool::ConnectionStatePtr connection_state_; diff --git a/test/config/utility.cc b/test/config/utility.cc index 921ebb81fe1d..873e4ec52484 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -671,11 +671,16 @@ void ConfigHelper::finalize(const std::vector& ports) { for (int k = 0; k < locality_lb->lb_endpoints_size(); ++k) { auto lb_endpoint = locality_lb->mutable_lb_endpoints(k); if (lb_endpoint->endpoint().address().has_socket_address()) { - RELEASE_ASSERT(ports.size() > port_idx, ""); - lb_endpoint->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address() - ->set_port_value(ports[port_idx++]); + if (lb_endpoint->endpoint().address().socket_address().port_value() == 0) { + RELEASE_ASSERT(ports.size() > port_idx, ""); + lb_endpoint->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address() + ->set_port_value(ports[port_idx++]); + } else { + ENVOY_LOG_MISC(debug, "Not overriding preset port", + lb_endpoint->endpoint().address().socket_address().port_value()); + } } } } diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index b9760e147206..aae9ef011c36 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -117,12 +117,12 @@ const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - endpoint: address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - name: cluster_2 type: STATIC lb_policy: RANDOM @@ -134,12 +134,12 @@ const std::string CONFIG_WITH_ROUTES_BASE = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 - endpoint: address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 listeners: name: listener_0 address: @@ -235,7 +235,7 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 1 + port_value: 0 - name: cluster_2 type: STATIC typed_extension_protocol_options: @@ -251,7 +251,7 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( address: socket_address: address: 127.0.0.1 - port_value: 2 + port_value: 0 listeners: name: listener_0 address: diff --git a/test/integration/integration.h b/test/integration/integration.h index dfee7b131ff0..2a081d82cf8e 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -184,7 +184,7 @@ class BaseIntegrationTest : protected Logger::Loggable { virtual void createEnvoy(); // Sets upstream_protocol_ and alters the upstream protocol in the config_helper_ void setUpstreamProtocol(FakeHttpConnection::Type protocol); - // Sets fake_upstreams_count_ and alters the upstream protocol in the config_helper_ + // Sets fake_upstreams_count_ void setUpstreamCount(uint32_t count) { fake_upstreams_count_ = count; } // Skip validation that ensures that all upstream ports are referenced by the // configuration generated in ConfigHelper::finalize. diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index deab7fc53c00..1238176409b3 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -192,7 +192,6 @@ class ClusterMemoryTestHelper : public BaseIntegrationTest { auto* socket_address = host->mutable_socket_address(); socket_address->set_protocol(envoy::config::core::v3::SocketAddress::TCP); socket_address->set_address("0.0.0.0"); - socket_address->set_port_value(80); } } }); diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index bac7260a9e7e..39d6a4779740 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -125,6 +125,25 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) { tcp_client->waitForDisconnect(); } +TEST_P(TcpProxyIntegrationTest, NoUpstream) { + // Set the first upstream to have an invalid port, so connection will fail, + // but it won't fail synchronously (as it would if there were simply no + // upstreams) + fake_upstreams_count_ = 0; + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); + auto* lb_endpoint = + cluster->mutable_load_assignment()->mutable_endpoints(0)->mutable_lb_endpoints(0); + lb_endpoint->mutable_endpoint()->mutable_address()->mutable_socket_address()->set_port_value(1); + }); + config_helper_.skipPortUsageValidation(); + enable_half_close_ = false; + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client->waitForDisconnect(); +} + TEST_P(TcpProxyIntegrationTest, TcpProxyLargeWrite) { config_helper_.setBufferLimits(1024, 1024); initialize(); From f991d6105eda542883b1e1d89c9439b9a56f126b Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 30 Jul 2020 05:46:49 -0700 Subject: [PATCH 789/909] tidy: fix main_common_test and signals_test (#12372) NOLINTNEXTLINE doesn't work well within macro expansion. Do it inline or disable with #if. Signed-off-by: Lizan Zhou --- test/common/signal/signals_test.cc | 6 ++---- test/exe/main_common_test.cc | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/test/common/signal/signals_test.cc b/test/common/signal/signals_test.cc index 475bc7b8b0ee..8cee0e5d0aa6 100644 --- a/test/common/signal/signals_test.cc +++ b/test/common/signal/signals_test.cc @@ -30,8 +30,7 @@ TEST(SignalsDeathTest, InvalidAddressDeathTest) { []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); - // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) - *(nasty_ptr) = 0; + *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference) }(), "backtrace.*Segmentation fault"); } @@ -49,8 +48,7 @@ TEST(SignalsDeathTest, RegisteredHandlerTest) { []() -> void { // Oops! volatile int* nasty_ptr = reinterpret_cast(0x0); - // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) - *(nasty_ptr) = 0; + *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference) }(), "HERE"); SignalAction::removeFatalErrorHandler(handler); diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index d57b7e193416..1550a9e3feb3 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -151,7 +151,8 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonDeathTest, TestUtility::ipTestParamsToString); TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { -#if defined(__has_feature) && (__has_feature(thread_sanitizer) || __has_feature(address_sanitizer)) +#if defined(__clang_analyzer__) || (defined(__has_feature) && (__has_feature(thread_sanitizer) || \ + __has_feature(address_sanitizer))) ENVOY_LOG_MISC(critical, "MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration"); #else @@ -172,7 +173,6 @@ TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { size *= 1000) { int* p = new int[size]; // Use the pointer to prevent clang from optimizing the allocation away in opt mode. - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) ENVOY_LOG_MISC(debug, "p={}", reinterpret_cast(p)); } }(), From 702f1fbf18e1a6125e8fb4ab4068c71ca7fa1c05 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 30 Jul 2020 14:05:15 +0100 Subject: [PATCH 790/909] test: add optional timeouts to waiting for counters/gauges (#12251) Commit Message: Add timeouts to waiting for counters/gauges in integration tests add a default timeout of 0s which will not enable the timeout for the existing tests raise an assertionfailure if they timeout Signed-off-by: Sam Flattery --- test/integration/server.h | 28 ++++++++++------ test/integration/server_stats.h | 20 +++++++++--- test/server/config_validation/xds_fuzz.cc | 22 ++++++++----- test/server/config_validation/xds_fuzz.h | 1 + test/test_common/utility.cc | 40 ++++++++++++++++++----- test/test_common/utility.h | 38 ++++++++++++++++----- 6 files changed, 109 insertions(+), 40 deletions(-) diff --git a/test/integration/server.h b/test/integration/server.h index bec7f855b042..4dc9a3ee21ea 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -421,24 +421,32 @@ class IntegrationTestServer : public Logger::Loggable, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy); - void waitForCounterEq(const std::string& name, uint64_t value) override { - notifyingStatsAllocator().waitForCounterFromStringEq(name, value); + void + waitForCounterEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForCounterGe(statStore(), name, value, time_system_, timeout)); } - void waitForCounterGe(const std::string& name, uint64_t value) override { - notifyingStatsAllocator().waitForCounterFromStringGe(name, value); + void + waitForCounterGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForCounterGe(statStore(), name, value, time_system_, timeout)); } - void waitForCounterExists(const std::string& name) override { - notifyingStatsAllocator().waitForCounterExists(name); + void + waitForGaugeEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForGaugeEq(statStore(), name, value, time_system_, timeout)); } - void waitForGaugeGe(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeGe(statStore(), name, value, time_system_); + void + waitForGaugeGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { + ASSERT_TRUE(TestUtility::waitForGaugeGe(statStore(), name, value, time_system_, timeout)); } - void waitForGaugeEq(const std::string& name, uint64_t value) override { - TestUtility::waitForGaugeEq(statStore(), name, value, time_system_); + void waitForCounterExists(const std::string& name) override { + notifyingStatsAllocator().waitForCounterExists(name); } Stats::CounterSharedPtr counter(const std::string& name) override { diff --git a/test/integration/server_stats.h b/test/integration/server_stats.h index 186dba56450a..c3ab300f0505 100644 --- a/test/integration/server_stats.h +++ b/test/integration/server_stats.h @@ -13,15 +13,21 @@ class IntegrationTestServerStats { * Wait for a counter to == a given value. * @param name counter name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForCounterEq(const std::string& name, uint64_t value) PURE; + virtual void + waitForCounterEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Wait for a counter to >= a given value. * @param name counter name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForCounterGe(const std::string& name, uint64_t value) PURE; + virtual void + waitForCounterGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Wait for a counter to exist. @@ -33,15 +39,21 @@ class IntegrationTestServerStats { * Wait for a gauge to >= a given value. * @param name gauge name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForGaugeGe(const std::string& name, uint64_t value) PURE; + virtual void + waitForGaugeGe(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Wait for a gauge to == a given value. * @param name gauge name. * @param value target value. + * @param timeout amount of time to wait before asserting false, or 0 for no timeout. */ - virtual void waitForGaugeEq(const std::string& name, uint64_t value) PURE; + virtual void + waitForGaugeEq(const std::string& name, uint64_t value, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; /** * Counter lookup. This is not thread safe, since we don't get a consistent diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 78d246275101..9c1647c155cf 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -242,7 +242,7 @@ void XdsFuzzTest::replay() { addListener(listener_name, route_name); if (!sent_listener) { addRoute(route_name); - test_server_->waitForCounterEq("listener_manager.listener_create_success", 1); + test_server_->waitForCounterEq("listener_manager.listener_create_success", 1, timeout_); } sent_listener = true; break; @@ -270,15 +270,19 @@ void XdsFuzzTest::replay() { if (sent_listener) { // wait for all of the updates to take effect test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", - verifier_.numWarming()); - test_server_->waitForGaugeEq("listener_manager.total_listeners_active", - verifier_.numActive()); + verifier_.numWarming(), timeout_); + test_server_->waitForGaugeEq("listener_manager.total_listeners_active", verifier_.numActive(), + timeout_); test_server_->waitForGaugeEq("listener_manager.total_listeners_draining", - verifier_.numDraining()); - test_server_->waitForCounterEq("listener_manager.listener_modified", verifier_.numModified()); - test_server_->waitForCounterEq("listener_manager.listener_added", verifier_.numAdded()); - test_server_->waitForCounterEq("listener_manager.listener_removed", verifier_.numRemoved()); - test_server_->waitForCounterEq("listener_manager.lds.update_success", lds_update_success_); + verifier_.numDraining(), timeout_); + test_server_->waitForCounterEq("listener_manager.listener_modified", verifier_.numModified(), + timeout_); + test_server_->waitForCounterEq("listener_manager.listener_added", verifier_.numAdded(), + timeout_); + test_server_->waitForCounterEq("listener_manager.listener_removed", verifier_.numRemoved(), + timeout_); + test_server_->waitForCounterEq("listener_manager.lds.update_success", lds_update_success_, + timeout_); } ENVOY_LOG_MISC(debug, "warming {} ({}), active {} ({}), draining {} ({})", verifier_.numWarming(), diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index eac6cc269b66..826175bf0241 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -78,6 +78,7 @@ class XdsFuzzTest : public HttpIntegrationTest { Network::Address::IpVersion ip_version_; + std::chrono::seconds timeout_{5}; uint64_t lds_update_success_{0}; }; diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index cd358d84fe32..13feb48e5df5 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -158,32 +158,56 @@ Stats::TextReadoutSharedPtr TestUtility::findTextReadout(Stats::Store& store, return findByName(store.textReadouts(), name); } -void TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForCounterEq(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findCounter(store, name) == nullptr || findCounter(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForCounterGe(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findCounter(store, name) == nullptr || findCounter(store, name)->value() < value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForGaugeGe(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findGauge(store, name) == nullptr || findGauge(store, name)->value() < value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } -void TestUtility::waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system) { +AssertionResult TestUtility::waitForGaugeEq(Stats::Store& store, const std::string& name, + uint64_t value, Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout) { + auto end_time = time_system.monotonicTime() + timeout; while (findGauge(store, name) == nullptr || findGauge(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); + if (timeout != std::chrono::milliseconds::zero() && time_system.monotonicTime() >= end_time) { + return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + } } + return AssertionSuccess(); } std::list diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 5c50fb9b005f..df96fff5d7f6 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -195,14 +195,19 @@ class TestUtility { static Stats::GaugeSharedPtr findGauge(Stats::Store& store, const std::string& name); /** - * Wait till Counter value is equal to the passed ion value. + * Wait for a counter to == a given value. * @param store supplies the stats store. * @param name supplies the name of the counter to wait for. * @param value supplies the value of the counter. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter was == to the value within the timeout, else + * AssertionFailure(). */ - static void waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForCounterEq(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a counter to >= a given value. @@ -210,9 +215,14 @@ class TestUtility { * @param name counter name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter was >= to the value within the timeout, else + * AssertionFailure(). */ - static void waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForCounterGe(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a gauge to >= a given value. @@ -220,9 +230,14 @@ class TestUtility { * @param name gauge name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the counter gauge >= to the value within the timeout, else + * AssertionFailure(). */ - static void waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForGaugeGe(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Wait for a gauge to == a given value. @@ -230,9 +245,14 @@ class TestUtility { * @param name gauge name. * @param value target value. * @param time_system the time system to use for waiting. + * @param timeout the maximum time to wait before timing out, or 0 for no timeout. + * @return AssertionSuccess() if the gauge was == to the value within the timeout, else + * AssertionFailure(). */ - static void waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, - Event::TestTimeSystem& time_system); + static AssertionResult + waitForGaugeEq(Stats::Store& store, const std::string& name, uint64_t value, + Event::TestTimeSystem& time_system, + std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); /** * Find a readout in a stats store. From 68f7288413df640415e5a29e4e4480bc860aa49c Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Thu, 30 Jul 2020 20:24:12 +0700 Subject: [PATCH 791/909] lua: Expose SSL connection info (#12174) This patch adds Lua APIs to access SSL connection info of a connection. Signed-off-by: Dhi Aurrahman --- .../http/http_filters/lua_filter.rst | 218 +++++++++++++++++- docs/root/version_history/current.rst | 1 + include/envoy/ssl/connection.h | 10 +- source/common/common/hex.cc | 11 + source/common/common/hex.h | 8 + source/extensions/filters/common/lua/BUILD | 1 + .../extensions/filters/common/lua/wrappers.cc | 137 ++++++++++- .../extensions/filters/common/lua/wrappers.h | 144 +++++++++++- .../extensions/filters/http/lua/wrappers.cc | 15 ++ source/extensions/filters/http/lua/wrappers.h | 13 +- .../filters/http/lua/lua_filter_test.cc | 146 ++++++++++++ 11 files changed, 691 insertions(+), 13 deletions(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 5f9e3d02cb0a..8e6f8eeffef8 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -562,6 +562,17 @@ dynamicMetadata() Returns a :ref:`dynamic metadata object `. +downstreamSslConnection() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + streamInfo:downstreamSslConnection() + +Returns :repo:`information ` related to the current SSL connection. + +Returns a downstream :ref:`SSL connection info object `. + .. _config_http_filters_lua_stream_info_dynamic_metadata_wrapper: Dynamic metadata object API @@ -625,7 +636,7 @@ Connection object API --------------------- ssl() -^^^^^^^^ +^^^^^ .. code-block:: lua @@ -638,6 +649,207 @@ ssl() Returns :repo:`SSL connection ` object when the connection is secured and *nil* when it is not. -.. note:: +Returns an :ref:`SSL connection info object `. + +.. _config_http_filters_lua_ssl_socket_info: + +SSL connection object API +------------------------- + +peerCertificatePresented() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + if downstreamSslConnection:peerCertificatePresented() then + print("peer certificate is presented") + end + +Returns bool whether the peer certificate is presented. + +peerCertificateValidated() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + if downstreamSslConnection:peerCertificateVaidated() then + print("peer certificate is valiedated") + end + +Returns bool whether the peer certificate was validated. + +uriSanLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + -- For example, uriSanLocalCertificate contains {"san1", "san2"} + local certs = downstreamSslConnection:uriSanLocalCertificate() + + -- The following prints san1,san2 + handle:logTrace(table.concat(certs, ",")) + +Returns the URIs (as a table) in the SAN field of the local certificate. Returns an empty table if +there is no local certificate, or no SAN field, or no URI SAN entries. + +sha256PeerCertificateDigest() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:sha256PeerCertificateDigest() + +Returns the SHA256 digest of the peer certificate. Returns ``""`` if there is no peer certificate +which can happen in TLS (non-mTLS) connections. + +serialNumberPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:serialNumberPeerCertificate() + +Returns the serial number field of the peer certificate. Returns ``""`` if there is no peer +certificate, or no serial number. + +issuerPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:issuerPeerCertificate() + +Returns the issuer field of the peer certificate in RFC 2253 format. Returns ``""`` if there is no +peer certificate, or no issuer. + +subjectPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:subjectPeerCertificate() + +Return the subject field of the peer certificate in RFC 2253 format. Returns ``""`` if there is no +peer certificate, or no subject. + +uriSanPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:uriSanPeerCertificate() + +Returns the URIs (as a table) in the SAN field of the peer certificate. Returns en empty table if +there is no peer certificate, or no SAN field, or no URI SAN entries. + +subjectLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:subjectLocalCertificate() + +Returns the subject field of the local certificate in RFC 2253 format. Returns ``""`` if there is no +local certificate, or no subject. + +urlEncodedPemEncodedPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificate() + +Returns the URL-encoded PEM-encoded representation of the peer certificate. Returns ``""`` if there +is no peer certificate or encoding fails. + +urlEncodedPemEncodedPeerCertificateChain() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain() + +Returnns the URL-encoded PEM-encoded representation of the full peer certificate chain including the +leaf certificate. Returns ``""`` if there is no peer certificate or encoding fails. + +dnsSansPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:dnsSansPeerCertificate() + +Returns the DNS entries (as a table) in the SAN field of the peer certificate. Returns an empty +table if there is no peer certificate, or no SAN field, or no DNS SAN entries. + +dnsSansLocalCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:dnsSansLocalCertificate() + +Returns the DNS entries (as a table) in the SAN field of the local certificate. Returns an empty +table if there is no local certificate, or no SAN field, or no DNS SAN entries. + +validFromPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:validFromPeerCertificate() + +Returns the time (timestamp-since-epoch in seconds) that the peer certificate was issued and should +be considered valid from. Returns ``0`` if there is no peer certificate. + +In Lua, we usually use ``os.time(os.date("!*t"))`` to get current timestamp-since-epoch in seconds. + +expirationPeerCertificate() +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:validFromPeerCertificate() + +Returns the time (timestamp-since-epoch in seconds) that the peer certificate expires and should not +be considered valid after. Returns ``0`` if there is no peer certificate. + +In Lua, we usually use ``os.time(os.date("!*t"))`` to get current timestamp-since-epoch in seconds. + +sessionId() +^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:sessionId() + +Returns the hex-encoded TLS session ID as defined in RFC 5246. + +ciphersuiteId() +^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:ciphersuiteId() + +Returns the standard ID (hex-encoded) for the ciphers used in the established TLS connection. +Returns ``"0xffff"`` if there is no current negotiated ciphersuite. + +ciphersuiteString() +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:ciphersuiteString() + +Returns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns +``""`` if there is no current negotiated ciphersuite. + +tlsVersion() +^^^^^^^^^^^^ + +.. code-block:: lua + + downstreamSslConnection:urlEncodedPemEncodedPeerCertificateChain() - Currently the SSL connection object has no exposed APIs. +Returns the TLS version (e.g., TLSv1.2, TLSv1.3) used in the established TLS connection. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index e2a877537854..831e5c9f620e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -48,6 +48,7 @@ New Features * http: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as custom header. * http: introduced new HTTP/1 and HTTP/2 codec implementations that will remove the use of exceptions for control flow due to high risk factors and instead use error statuses. The old behavior is used by default, but the new codecs can be enabled for testing by setting the runtime feature `envoy.reloadable_features.new_codec_behavior` to true. The new codecs will be in development for one month, and then enabled by default while the old codecs are deprecated. * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. +* lua: added Lua APIs to access :ref:`SSL connection info ` object. * postgres network filter: :ref:`metadata ` is produced based on SQL query. * redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * router: added new diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h index a93c97c38d40..ab27b0cd5b33 100644 --- a/include/envoy/ssl/connection.h +++ b/include/envoy/ssl/connection.h @@ -30,8 +30,8 @@ class ConnectionInfo { virtual bool peerCertificateValidated() const PURE; /** - * @return std::string the URIs in the SAN field of the local certificate. Returns {} if there is - * no local certificate, or no SAN field, or no URI. + * @return absl::Spanthe URIs in the SAN field of the local certificate. + * Returns {} if there is no local certificate, or no SAN field, or no URI. **/ virtual absl::Span uriSanLocalCertificate() const PURE; @@ -72,8 +72,8 @@ class ConnectionInfo { virtual const std::string& subjectPeerCertificate() const PURE; /** - * @return std::string the URIs in the SAN field of the peer certificate. Returns {} if there is - *no peer certificate, or no SAN field, or no URI. + * @return absl::Span the URIs in the SAN field of the peer certificate. + * Returns {} if there is no peer certificate, or no SAN field, or no URI. **/ virtual absl::Span uriSanPeerCertificate() const PURE; @@ -142,7 +142,7 @@ class ConnectionInfo { * if a peer cert exists and it contains the specified extension. * * Note: This is used out of tree, check with @snowp before removing. - * @param extension_name name of extension to look up + * @param extension_name name of extension to look up. * @return absl::optional the raw octets of the extension ``ASN.1`` object, if it * exists. */ diff --git a/source/common/common/hex.cc b/source/common/common/hex.cc index 1fc6b603133f..7c0b7f8c2e13 100644 --- a/source/common/common/hex.cc +++ b/source/common/common/hex.cc @@ -73,4 +73,15 @@ std::string Hex::uint32ToHex(uint32_t value) { return encode(data.data(), data.size()); } + +std::string Hex::uint16ToHex(uint16_t value) { + std::array data; + + // This is explicitly done for performance reasons + // using std::stringstream with std::hex is ~3 orders of magnitude slower. + data[1] = (value & 0x00FF); + data[0] = (value & 0xFF00) >> 8; + + return encode(data.data(), data.size()); +} } // namespace Envoy diff --git a/source/common/common/hex.h b/source/common/common/hex.h index e77ac57d50d3..aba722a4fc06 100644 --- a/source/common/common/hex.h +++ b/source/common/common/hex.h @@ -49,5 +49,13 @@ class Hex final { * @return value as hexadecimal string */ static std::string uint32ToHex(uint32_t value); + + /** + * Converts the given 16-bit unsigned integer into a hexadecimal string. + * The result is always a string of 4 characters left padded with zeroes. + * @param value The unsigned integer to be converted. + * @return value as hexadecimal string + */ + static std::string uint16ToHex(uint16_t value); }; } // namespace Envoy diff --git a/source/extensions/filters/common/lua/BUILD b/source/extensions/filters/common/lua/BUILD index b36d7b7414c7..0095b156c4b6 100644 --- a/source/extensions/filters/common/lua/BUILD +++ b/source/extensions/filters/common/lua/BUILD @@ -44,6 +44,7 @@ envoy_cc_library( deps = [ ":lua_lib", "//include/envoy/buffer:buffer_interface", + "//source/common/common:hex_lib", "//source/common/protobuf", ], ) diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index 2e8d5d16c76d..02e4db6ca2a8 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -1,11 +1,50 @@ #include "extensions/filters/common/lua/wrappers.h" +#include + +#include + +#include "common/common/assert.h" +#include "common/common/hex.h" + +#include "absl/time/time.h" + namespace Envoy { namespace Extensions { namespace Filters { namespace Common { namespace Lua { +namespace { + +// Builds a Lua table from a list of strings. +template +void createLuaTableFromStringList(lua_State* state, const StringList& list) { + lua_createtable(state, list.size(), 0); + for (size_t i = 0; i < list.size(); i++) { + lua_pushstring(state, list[i].c_str()); + // After the list[i].c_str() is pushed to the stack, we need to set the "current element" with + // that value. The lua_rawseti(state, t, i) helps us to set the value of table t with key i. + // Given the index of the current element/table in the stack is below the pushed value i.e. -2 + // and the key (refers to where the element is in the table) is i + 1 (note that in Lua index + // starts from 1), hence we have: + lua_rawseti(state, -2, i + 1); + } +} + +// By default, LUA_INTEGER is https://en.cppreference.com/w/cpp/types/ptrdiff_t +// (https://github.com/LuaJIT/LuaJIT/blob/8271c643c21d1b2f344e339f559f2de6f3663191/src/luaconf.h#L104), +// which is large enough to hold timestamp-since-epoch in seconds. Note: In Lua, we usually use +// os.time(os.date("!*t")) to get current timestamp-since-epoch in seconds. +int64_t timestampInSeconds(const absl::optional& system_time) { + return system_time.has_value() ? std::chrono::duration_cast( + system_time.value().time_since_epoch()) + .count() + : 0; +} + +} // namespace + int BufferWrapper::luaLength(lua_State* state) { lua_pushnumber(state, data_.length()); return 1; @@ -217,13 +256,109 @@ int MetadataMapWrapper::luaPairs(lua_State* state) { return 1; } +int SslConnectionWrapper::luaPeerCertificatePresented(lua_State* state) { + lua_pushboolean(state, connection_info_.peerCertificatePresented()); + return 1; +} + +int SslConnectionWrapper::luaPeerCertificateValidated(lua_State* state) { + lua_pushboolean(state, connection_info_.peerCertificateValidated()); + return 1; +} + +int SslConnectionWrapper::luaUriSanLocalCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.uriSanLocalCertificate()); + return 1; +} + +int SslConnectionWrapper::luaSha256PeerCertificateDigest(lua_State* state) { + lua_pushstring(state, connection_info_.sha256PeerCertificateDigest().c_str()); + return 1; +} + +int SslConnectionWrapper::luaSerialNumberPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.serialNumberPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaIssuerPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.issuerPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaSubjectPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.subjectPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUriSanPeerCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.uriSanPeerCertificate()); + return 1; +} + +int SslConnectionWrapper::luaSubjectLocalCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.subjectLocalCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaDnsSansPeerCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.dnsSansPeerCertificate()); + return 1; +} + +int SslConnectionWrapper::luaDnsSansLocalCertificate(lua_State* state) { + createLuaTableFromStringList(state, connection_info_.dnsSansLocalCertificate()); + return 1; +} + +int SslConnectionWrapper::luaValidFromPeerCertificate(lua_State* state) { + lua_pushinteger(state, timestampInSeconds(connection_info_.validFromPeerCertificate())); + return 1; +} + +int SslConnectionWrapper::luaExpirationPeerCertificate(lua_State* state) { + lua_pushinteger(state, timestampInSeconds(connection_info_.expirationPeerCertificate())); + return 1; +} + +int SslConnectionWrapper::luaSessionId(lua_State* state) { + lua_pushstring(state, connection_info_.sessionId().c_str()); + return 1; +} + +int SslConnectionWrapper::luaCiphersuiteId(lua_State* state) { + lua_pushstring(state, + absl::StrCat("0x", Hex::uint16ToHex(connection_info_.ciphersuiteId())).c_str()); + return 1; +} + +int SslConnectionWrapper::luaCiphersuiteString(lua_State* state) { + lua_pushstring(state, connection_info_.ciphersuiteString().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificate(lua_State* state) { + lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificate().c_str()); + return 1; +} + +int SslConnectionWrapper::luaUrlEncodedPemEncodedPeerCertificateChain(lua_State* state) { + lua_pushstring(state, connection_info_.urlEncodedPemEncodedPeerCertificateChain().c_str()); + return 1; +} + +int SslConnectionWrapper::luaTlsVersion(lua_State* state) { + lua_pushstring(state, connection_info_.tlsVersion().c_str()); + return 1; +} + int ConnectionWrapper::luaSsl(lua_State* state) { const auto& ssl = connection_->ssl(); if (ssl != nullptr) { if (ssl_connection_wrapper_.get() != nullptr) { ssl_connection_wrapper_.pushStack(); } else { - ssl_connection_wrapper_.reset(SslConnectionWrapper::create(state, ssl), true); + ssl_connection_wrapper_.reset(SslConnectionWrapper::create(state, *ssl), true); } } else { lua_pushnil(state); diff --git a/source/extensions/filters/common/lua/wrappers.h b/source/extensions/filters/common/lua/wrappers.h index 92aa697cfd73..09ea9b44467a 100644 --- a/source/extensions/filters/common/lua/wrappers.h +++ b/source/extensions/filters/common/lua/wrappers.h @@ -112,10 +112,145 @@ class MetadataMapWrapper : public BaseLuaObject { */ class SslConnectionWrapper : public BaseLuaObject { public: - SslConnectionWrapper(const Ssl::ConnectionInfoConstSharedPtr) {} - static ExportedFunctions exportedFunctions() { return {}; } + explicit SslConnectionWrapper(const Ssl::ConnectionInfo& info) : connection_info_{info} {} + static ExportedFunctions exportedFunctions() { + return {{"peerCertificatePresented", static_luaPeerCertificatePresented}, + {"peerCertificateValidated", static_luaPeerCertificateValidated}, + {"uriSanLocalCertificate", static_luaUriSanLocalCertificate}, + {"sha256PeerCertificateDigest", static_luaSha256PeerCertificateDigest}, + {"serialNumberPeerCertificate", static_luaSerialNumberPeerCertificate}, + {"issuerPeerCertificate", static_luaIssuerPeerCertificate}, + {"subjectPeerCertificate", static_luaSubjectPeerCertificate}, + {"uriSanPeerCertificate", static_luaUriSanPeerCertificate}, + {"subjectLocalCertificate", static_luaSubjectLocalCertificate}, + {"dnsSansPeerCertificate", static_luaDnsSansPeerCertificate}, + {"dnsSansLocalCertificate", static_luaDnsSansLocalCertificate}, + {"validFromPeerCertificate", static_luaValidFromPeerCertificate}, + {"expirationPeerCertificate", static_luaExpirationPeerCertificate}, + {"sessionId", static_luaSessionId}, + {"ciphersuiteId", static_luaCiphersuiteId}, + {"ciphersuiteString", static_luaCiphersuiteString}, + {"urlEncodedPemEncodedPeerCertificate", static_luaUrlEncodedPemEncodedPeerCertificate}, + {"urlEncodedPemEncodedPeerCertificateChain", + static_luaUrlEncodedPemEncodedPeerCertificateChain}, + {"tlsVersion", static_luaTlsVersion}}; + } + +private: + /** + * Returns bool whether the peer certificate is presented. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificatePresented); + + /** + * Returns bool whether the peer certificate is validated. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaPeerCertificateValidated); + + /** + * Returns the URIs in the SAN field of the local certificate. Returns empty table if there is no + * local certificate, or no SAN field, or no URI in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanLocalCertificate); + + /** + * Returns the subject field of the local certificate in RFC 2253 format. Returns empty string if + * there is no local certificate, or no subject. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectLocalCertificate); - // TODO(dio): Add more Lua APIs around Ssl::Connection. + /** + * Returns the SHA256 digest of the peer certificate. Returns empty string if there is no peer + * certificate which can happen in TLS (non mTLS) connections. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSha256PeerCertificateDigest); + + /** + * Returns the serial number field of the peer certificate. Returns empty string if there is no + * peer certificate, or no serial number. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSerialNumberPeerCertificate); + + /** + * Returns the issuer field of the peer certificate in RFC 2253 format. Returns empty string if + * there is no peer certificate, or no issuer. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaIssuerPeerCertificate); + + /** + * Returns the subject field of the peer certificate in RFC 2253 format. Returns empty string if + * there is no peer certificate, or no subject. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSubjectPeerCertificate); + + /** + * Returns the URIs in the SAN field of the peer certificate. Returns empty table if there is no + * peer certificate, or no SAN field, or no URI. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUriSanPeerCertificate); + + /** + * Return string the URL-encoded PEM-encoded representation of the peer certificate. Returns empty + * string if there is no peer certificate or encoding fails. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificate); + + /** + * Returns the URL-encoded PEM-encoded representation of the full peer certificate chain including + * the leaf certificate. Returns empty string if there is no peer certificate or encoding fails. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaUrlEncodedPemEncodedPeerCertificateChain); + + /** + * Returns the DNS entries in the SAN field of the peer certificate. Returns an empty table if + * there is no peer certificate, or no SAN field, or no DNS entries in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansPeerCertificate); + + /** + * Returns the DNS entries in the SAN field of the local certificate. Returns an empty table if + * there is no local certificate, or no SAN field, or no DNS entries in SAN. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaDnsSansLocalCertificate); + + /** + * Returns the timestamp-since-epoch (in seconds) that the peer certificate was issued and should + * be considered valid from. Returns empty string if there is no peer certificate. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaValidFromPeerCertificate); + + /** + * Returns the timestamp-since-epoch (in seconds) that the peer certificate expires and should not + * be considered valid after. Returns empty string if there is no peer certificate. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaExpirationPeerCertificate); + + /** + * Returns the hex-encoded TLS session ID as defined in RFC 5246. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaSessionId); + + /** + * Returns the standard ID for the ciphers used in the established TLS connection. Returns 0xffff + * if there is no current negotiated ciphersuite. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteId); + + /** + * Returns the OpenSSL name for the set of ciphers used in the established TLS connection. Returns + * empty string if there is no current negotiated ciphersuite. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaCiphersuiteString); + + /** + * Returns the TLS version (e.g. TLSv1.2, TLSv1.3) used in the established TLS connection. Returns + * string if secured and nil if not. + */ + DECLARE_LUA_FUNCTION(SslConnectionWrapper, luaTlsVersion); + + // TODO(dio): Add luaX509Extension if required, since currently it is used out of tree. + + const Ssl::ConnectionInfo& connection_info_; }; /** @@ -124,6 +259,9 @@ class SslConnectionWrapper : public BaseLuaObject { class ConnectionWrapper : public BaseLuaObject { public: ConnectionWrapper(const Network::Connection* connection) : connection_{connection} {} + + // TODO(dio): Remove this in favor of StreamInfo::downstreamSslConnection wrapper since ssl() in + // envoy/network/connection.h is subject to removal. static ExportedFunctions exportedFunctions() { return {{"ssl", static_luaSsl}}; } private: diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index 4a24fafaa6a6..300586ef2860 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -113,6 +113,21 @@ int StreamInfoWrapper::luaDynamicMetadata(lua_State* state) { return 1; } +int StreamInfoWrapper::luaDownstreamSslConnection(lua_State* state) { + const auto& ssl = stream_info_.downstreamSslConnection(); + if (ssl != nullptr) { + if (downstream_ssl_connection_.get() != nullptr) { + downstream_ssl_connection_.pushStack(); + } else { + downstream_ssl_connection_.reset( + Filters::Common::Lua::SslConnectionWrapper::create(state, *ssl), true); + } + } else { + lua_pushnil(state); + } + return 1; +} + DynamicMetadataMapIterator::DynamicMetadataMapIterator(DynamicMetadataMapWrapper& parent) : parent_{parent}, current_{parent_.streamInfo().dynamicMetadata().filter_metadata().begin()} {} diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index 35f82556250d..be616dc087ec 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -7,6 +7,7 @@ #include "extensions/common/crypto/crypto_impl.h" #include "extensions/filters/common/lua/lua.h" +#include "extensions/filters/common/lua/wrappers.h" #include "openssl/evp.h" @@ -181,7 +182,9 @@ class StreamInfoWrapper : public Filters::Common::Lua::BaseLuaObject dynamic_metadata_wrapper_; + Filters::Common::Lua::LuaDeathRef + downstream_ssl_connection_; friend class DynamicMetadataMapWrapper; }; diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 807f9cbefba3..84ac6fc706da 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -1,3 +1,4 @@ +#include #include #include "envoy/config/core/v3/base.pb.h" @@ -1826,6 +1827,151 @@ TEST_F(LuaHttpFilterTest, CheckConnection) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); } +// Inspect stream info downstream SSL connection. +TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) { + const std::string SCRIPT{R"EOF( + function envoy_on_request(request_handle) + if request_handle:streamInfo():downstreamSslConnection() == nil then + else + if request_handle:streamInfo():downstreamSslConnection():peerCertificatePresented() then + request_handle:logTrace("peerCertificatePresented") + end + + if request_handle:streamInfo():downstreamSslConnection():peerCertificateValidated() then + request_handle:logTrace("peerCertificateValidated") + end + + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanPeerCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():uriSanLocalCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansPeerCertificate(), ",")) + request_handle:logTrace(table.concat(request_handle:streamInfo():downstreamSslConnection():dnsSansLocalCertificate(), ",")) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteId()) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():validFromPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():expirationPeerCertificate()) + + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectLocalCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():sha256PeerCertificateDigest()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():serialNumberPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():issuerPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():subjectPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():ciphersuiteString()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():tlsVersion()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificate()) + request_handle:logTrace(request_handle:streamInfo():downstreamSslConnection():urlEncodedPemEncodedPeerCertificateChain()) + end + end + )EOF"}; + + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + + auto connection_info = std::make_shared(); + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + + EXPECT_CALL(*connection_info, peerCertificatePresented()).WillOnce(Return(true)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peerCertificatePresented"))); + + EXPECT_CALL(*connection_info, peerCertificateValidated()).WillOnce(Return(true)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peerCertificateValidated"))); + + const std::vector peer_uri_sans{"peer-uri-sans-1", "peer-uri-sans-2"}; + EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillOnce(Return(peer_uri_sans)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peer-uri-sans-1,peer-uri-sans-2"))); + + const std::vector local_uri_sans{"local-uri-sans-1", "local-uri-sans-2"}; + EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillOnce(Return(local_uri_sans)); + EXPECT_CALL(*filter_, + scriptLog(spdlog::level::trace, StrEq("local-uri-sans-1,local-uri-sans-2"))); + + const std::vector peer_dns_sans{"peer-dns-sans-1", "peer-dns-sans-2"}; + EXPECT_CALL(*connection_info, dnsSansPeerCertificate()).WillOnce(Return(peer_dns_sans)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peer-dns-sans-1,peer-dns-sans-2"))); + + const std::vector local_dns_sans{"local-dns-sans-1", "local-dns-sans-2"}; + EXPECT_CALL(*connection_info, dnsSansLocalCertificate()).WillOnce(Return(local_dns_sans)); + EXPECT_CALL(*filter_, + scriptLog(spdlog::level::trace, StrEq("local-dns-sans-1,local-dns-sans-2"))); + + const std::string subject_local = "subject-local"; + EXPECT_CALL(*connection_info, subjectLocalCertificate()).WillOnce(ReturnRef(subject_local)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(subject_local))); + + const uint64_t cipher_suite_id = 0x0707; + EXPECT_CALL(*connection_info, ciphersuiteId()).WillRepeatedly(Return(cipher_suite_id)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("0x0707"))); + + const SystemTime validity(std::chrono::seconds(1522796777)); + EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(validity)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("1522796777"))); + + const SystemTime expiry(std::chrono::seconds(1522796776)); + EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(expiry)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("1522796776"))); + + const std::string peer_cert_digest = "peer-cert-digest"; + EXPECT_CALL(*connection_info, sha256PeerCertificateDigest()) + .WillOnce(ReturnRef(peer_cert_digest)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_digest))); + + const std::string peer_cert_serial_number = "peer-cert-serial-number"; + EXPECT_CALL(*connection_info, serialNumberPeerCertificate()) + .WillOnce(ReturnRef(peer_cert_serial_number)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_serial_number))); + + const std::string peer_cert_issuer = "peer-cert-issuer"; + EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillOnce(ReturnRef(peer_cert_issuer)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_issuer))); + + const std::string peer_cert_subject = "peer-cert-subject"; + EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillOnce(ReturnRef(peer_cert_subject)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_subject))); + + const std::string cipher_suite = "cipher-suite"; + EXPECT_CALL(*connection_info, ciphersuiteString()).WillOnce(Return(cipher_suite)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(cipher_suite))); + + const std::string tls_version = "tls-version"; + EXPECT_CALL(*connection_info, tlsVersion()).WillOnce(ReturnRef(tls_version)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(tls_version))); + + const std::string peer_cert = "peer-cert"; + EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) + .WillOnce(ReturnRef(peer_cert)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert))); + + const std::string peer_cert_chain = "peer-cert-chain"; + EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificateChain()) + .WillOnce(ReturnRef(peer_cert_chain)); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(peer_cert_chain))); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + +// Inspect stream info downstream SSL connection in a plain connection. +TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnectionOnPlainConnection) { + const std::string SCRIPT{R"EOF( + function envoy_on_request(request_handle) + if request_handle:streamInfo():downstreamSslConnection() == nil then + request_handle:logTrace("downstreamSslConnection is nil") + end + end + )EOF"}; + + setup(SCRIPT); + + EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); + EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("downstreamSslConnection is nil"))); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + TEST_F(LuaHttpFilterTest, ImportPublicKey) { const std::string SCRIPT{R"EOF( function string.fromhex(str) From 360e0803a8a44e2f64479e7e435d311b77aaa6f0 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Jul 2020 10:47:11 -0400 Subject: [PATCH 792/909] http: setting details for all HTTP/1.1 repsonse paths. (#12228) Setting response details on all HTTP/1.1 response paths, and adding an ASSERT invariant. This also fixes two timeout paths to use the new sendLocalReply functionality, so send response headers and body if possible. Signed-off-by: Alyssa Wilk --- docs/root/version_history/current.rst | 1 + include/envoy/stream_info/stream_info.h | 8 + source/common/http/conn_manager_impl.cc | 37 +++- source/common/http/conn_manager_impl.h | 1 + source/common/runtime/runtime_features.cc | 1 + .../filters/http/cors/cors_filter.cc | 7 + .../extensions/filters/http/lua/lua_filter.cc | 6 + source/server/admin/admin_filter.cc | 2 + .../http/conn_manager_impl_fuzz_test.cc | 6 +- test/common/http/conn_manager_impl_test.cc | 161 +++++++++++++++++- .../filters/http/cors/cors_filter_test.cc | 2 + .../filters/http/lua/lua_filter_test.cc | 1 + test/integration/protocol_integration_test.cc | 23 +++ test/integration/redirect_integration_test.cc | 4 + test/server/admin/admin_filter_test.cc | 2 + 15 files changed, 251 insertions(+), 11 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 831e5c9f620e..9c1d3d40a59e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -12,6 +12,7 @@ Minor Behavior Changes * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. * http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. +* http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. * http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. * http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 515d4e83c744..4f0309604764 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -109,6 +109,8 @@ struct ResponseCodeDetailValues { const std::string ResponsePayloadTooLArge = "response_payload_too_large"; // The per-stream keepalive timeout was exceeded. const std::string StreamIdleTimeout = "stream_idle_timeout"; + // The per-stream max duration timeout was exceeded. + const std::string MaxDurationTimeout = "max_duration_timeout"; // The per-stream total request timeout was exceeded const std::string RequestOverallTimeout = "request_overall_timeout"; // The request was rejected due to the Overload Manager reaching configured resource limits. @@ -158,6 +160,12 @@ struct ResponseCodeDetailValues { const std::string LateUpstreamReset = "upstream_reset_after_response_started"; // The connection is rejected due to no matching filter chain. const std::string FilterChainNotFound = "filter_chain_not_found"; + // The client disconnected unexpectedly. + const std::string DownstreamRemoteDisconnect = "downstream_remote_disconnect"; + // The response was generated by the admin filter. + const std::string AdminFilterResponse = "admin_filter_response"; + // The original stream was replaced with an internal redirect. + const std::string InternalRedirect = "internal_redirect"; }; using ResponseCodeDetails = ConstSingleton; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 602cba3aebd0..d307398e3b14 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -391,12 +391,12 @@ void ConnectionManagerImpl::onEvent(Network::ConnectionEvent event) { stats_.named_.downstream_cx_destroy_local_.inc(); } - if (event == Network::ConnectionEvent::RemoteClose) { - stats_.named_.downstream_cx_destroy_remote_.inc(); - } - if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { + if (event == Network::ConnectionEvent::RemoteClose) { + remote_close_ = true; + stats_.named_.downstream_cx_destroy_remote_.inc(); + } // TODO(mattklein123): It is technically possible that something outside of the filter causes // a local connection close, so we still guard against that here. A better solution would be to // have some type of "pre-close" callback that we could hook for cleanup that would get called @@ -610,11 +610,22 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() { } } + // TODO(alyssawilk) this is not true. Fix. // A downstream disconnect can be identified for HTTP requests when the upstream returns with a 0 // response code and when no other response flags are set. if (!stream_info_.hasAnyResponseFlag() && !stream_info_.responseCode()) { stream_info_.setResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination); } + if (connection_manager_.remote_close_) { + stream_info_.setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect); + } + + if (connection_manager_.codec_->protocol() < Protocol::Http2) { + // For HTTP/2 there are still some reset cases where details are not set. + // For HTTP/1 there shouldn't be any. Regression-proof this. + ASSERT(stream_info_.responseCodeDetails().has_value()); + } connection_manager_.stats_.named_.downstream_rq_active_.dec(); for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { @@ -654,11 +665,14 @@ void ConnectionManagerImpl::ActiveStream::resetIdleTimer() { void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { connection_manager_.stats_.named_.downstream_rq_idle_timeout_.inc(); // If headers have not been sent to the user, send a 408. - if (response_headers_ != nullptr) { + if (response_headers_ != nullptr && + !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { // TODO(htuch): We could send trailers here with an x-envoy timeout header // or gRPC status code, and/or set H2 RST_STREAM error. + stream_info_.setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); connection_manager_.doEndStream(*this); } else { + // TODO(mattklein) this may result in multiple flags. This Ok? stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); sendLocalReply(request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), @@ -678,7 +692,15 @@ void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { ENVOY_STREAM_LOG(debug, "Stream max duration time reached", *this); connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc(); - connection_manager_.doEndStream(*this); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { + sendLocalReply( + request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), + Http::Code::RequestTimeout, "downstream duration timeout", nullptr, state_.is_head_request_, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + } else { + stream_info_.setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + connection_manager_.doEndStream(*this); + } } void ConnectionManagerImpl::FilterManager::addStreamDecoderFilterWorker( @@ -2659,6 +2681,9 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { if (!complete() || parent_.active_stream_.stream_info_.bytesReceived() != 0) { return false; } + + parent_.active_stream_.stream_info_.setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().InternalRedirect); // n.b. we do not currently change the codecs to point at the new stream // decoder because the decoder callbacks are complete. It would be good to // null out that pointer but should not be necessary. diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index f5be8f24f9fd..4302ea34aa0c 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -785,6 +785,7 @@ class ConnectionManagerImpl : Logger::Loggable, const Server::OverloadActionState& overload_stop_accepting_requests_ref_; const Server::OverloadActionState& overload_disable_keepalive_ref_; TimeSource& time_source_; + bool remote_close_{}; }; } // namespace Http diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 5e179bc9c5dd..0cd11caf9396 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -75,6 +75,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.listener_in_place_filterchain_update", "envoy.reloadable_features.preserve_query_string_in_path_redirects", "envoy.reloadable_features.preserve_upstream_date", + "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.stop_faking_paths", "envoy.reloadable_features.hcm_stream_error_on_invalid_message", "envoy.reloadable_features.strict_1xx_and_204_response_headers", diff --git a/source/extensions/filters/http/cors/cors_filter.cc b/source/extensions/filters/http/cors/cors_filter.cc index 976e9b336f6c..574a0f36bfc1 100644 --- a/source/extensions/filters/http/cors/cors_filter.cc +++ b/source/extensions/filters/http/cors/cors_filter.cc @@ -14,6 +14,11 @@ namespace Extensions { namespace HttpFilters { namespace Cors { +struct HttpResponseCodeDetailValues { + const absl::string_view CorsResponse = "cors_response"; +}; +using HttpResponseCodeDetails = ConstSingleton; + Http::RegisterCustomInlineHeader access_control_request_method_handle(Http::CustomHeaders::get().AccessControlRequestMethod); Http::RegisterCustomInlineHeader @@ -104,6 +109,8 @@ Http::FilterHeadersStatus CorsFilter::decodeHeaders(Http::RequestHeaderMap& head response_headers->setInline(access_control_max_age_handle.handle(), maxAge()); } + decoder_callbacks_->streamInfo().setResponseCodeDetails( + HttpResponseCodeDetails::get().CorsResponse); decoder_callbacks_->encodeHeaders(std::move(response_headers), true); return Http::FilterHeadersStatus::StopIteration; diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 4d09421d5e3e..0053443cd549 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -19,6 +19,11 @@ namespace Lua { namespace { +struct HttpResponseCodeDetailValues { + const absl::string_view LuaResponse = "lua_response"; +}; +using HttpResponseCodeDetails = ConstSingleton; + const std::string DEPRECATED_LUA_NAME = "envoy.lua"; std::atomic& deprecatedNameLogged() { @@ -721,6 +726,7 @@ void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { void Filter::DecoderCallbacks::respond(Http::ResponseHeaderMapPtr&& headers, Buffer::Instance* body, lua_State*) { + callbacks_->streamInfo().setResponseCodeDetails(HttpResponseCodeDetails::get().LuaResponse); callbacks_->encodeHeaders(std::move(headers), body == nullptr); if (body && !parent_.destroyed_) { callbacks_->encodeData(*body, true); diff --git a/source/server/admin/admin_filter.cc b/source/server/admin/admin_filter.cc index 0cfb76839325..d2b70fa36a6d 100644 --- a/source/server/admin/admin_filter.cc +++ b/source/server/admin/admin_filter.cc @@ -70,6 +70,8 @@ void AdminFilter::onComplete() { RELEASE_ASSERT(request_headers_, ""); Http::Code code = admin_server_callback_func_(path, *header_map, response, *this); Utility::populateFallbackResponseHeaders(code, *header_map); + decoder_callbacks_->streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().AdminFilterResponse); decoder_callbacks_->encodeHeaders(std::move(header_map), end_stream_on_complete_ && response.length() == 0); diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index bfca81e4145c..4a7315cf56c4 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -90,7 +90,11 @@ class FuzzConfig : public ConnectionManagerConfig { callbacks.addStreamDecoderFilter(StreamDecoderFilterSharedPtr{decoder_filter_}); callbacks.addStreamEncoderFilter(StreamEncoderFilterSharedPtr{encoder_filter_}); })); - EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)); + EXPECT_CALL(*decoder_filter_, setDecoderFilterCallbacks(_)) + .WillOnce(Invoke([this](StreamDecoderFilterCallbacks& callbacks) -> void { + decoder_filter_->callbacks_ = &callbacks; + callbacks.streamInfo().setResponseCodeDetails(""); + })); EXPECT_CALL(*encoder_filter_, setEncoderFilterCallbacks(_)); EXPECT_CALL(filter_factory_, createUpgradeFilterChain("WebSocket", _, _)) .WillRepeatedly(Invoke([&](absl::string_view, const Http::FilterChainFactory::UpgradeMap*, diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 88b6712252d9..a0752d6a5930 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -272,6 +272,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); return altered_response_headers; } @@ -483,6 +484,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponse) { } ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); // Drain 2 so that on the 2nd iteration we will hit zero. @@ -538,6 +540,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponse) { ResponseHeaderMapPtr continue_headers{new TestResponseHeaderMapImpl{{":status", "100"}}}; filter->callbacks_->encode100ContinueHeaders(std::move(continue_headers)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); data.drain(4); @@ -577,6 +580,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFiltersProxy .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); } @@ -600,6 +604,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithEncoderFilters) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); } @@ -630,6 +635,7 @@ TEST_F(HttpConnectionManagerImplTest, PauseResume100Continue) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), false); } @@ -681,6 +687,7 @@ TEST_F(HttpConnectionManagerImplTest, 100ContinueResponseWithDecoderPause) { decoder->decodeData(data, true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); data.drain(4); @@ -868,6 +875,9 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseSantizedPath) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + EXPECT_CALL(*filter, onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // The router observes normalized paths, not the original path, when path @@ -906,6 +916,9 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseSantizedPath) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + // Clean up. + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RouteOverride) { @@ -1102,6 +1115,10 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + // Clean up. + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // Filters observe host header w/o port's part when port's removal is configured @@ -1138,6 +1155,10 @@ TEST_F(HttpConnectionManagerImplTest, FilterShouldUseNormalizedHost) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + // Clean up. + EXPECT_CALL(*filter, onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // The router observes host header w/o port, not the original host, when @@ -1176,6 +1197,9 @@ TEST_F(HttpConnectionManagerImplTest, RouteShouldUseNormalizedHost) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + // Clean up. + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, PreserveUpstreamDateDisabledDateNotSet) { @@ -1393,6 +1417,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlow) { decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1462,6 +1487,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1529,6 +1555,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); data.drain(4); @@ -1595,6 +1622,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowIngressDecorat decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); @@ -1676,6 +1704,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); filter->callbacks_->activeSpan().setTag("service-cluster", "scoobydoo"); @@ -1775,6 +1804,8 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecoratorOverrideOp) { @@ -1839,6 +1870,7 @@ TEST_F(HttpConnectionManagerImplTest, StartAndFinishSpanNormalFlowEgressDecorato {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), true); @@ -1896,6 +1928,7 @@ TEST_F(HttpConnectionManagerImplTest, {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "200"}, {"x-envoy-decorator-operation", "testOp"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), true); @@ -1954,6 +1987,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), true); @@ -1984,6 +2018,7 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { EXPECT_TRUE(stream_info.hasAnyResponseFlag()); EXPECT_TRUE( stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination)); + EXPECT_EQ("downstream_remote_disconnect", stream_info.responseCodeDetails().value()); })); NiceMock encoder; @@ -2001,6 +2036,8 @@ TEST_F(HttpConnectionManagerImplTest, TestDownstreamDisconnectAccessLog) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { @@ -2038,6 +2075,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), false); @@ -2129,6 +2167,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), false); @@ -2173,6 +2212,7 @@ TEST_F(HttpConnectionManagerImplTest, DoNotStartSpanIfTracingIsNotEnabled) { {"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}}; decoder->decodeHeaders(std::move(headers), true); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->encodeHeaders(std::move(response_headers), true); @@ -2229,6 +2269,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutNotConfigured) { conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // When the global timeout is configured, the timer is enabled before we receive @@ -2380,6 +2421,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; EXPECT_CALL(*idle_timer, enableTimer(std::chrono::milliseconds(30), _)); + EXPECT_CALL(*idle_timer, disableTimer()); decoder->decodeHeaders(std::move(headers), false); data.drain(4); @@ -2390,6 +2432,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteOverride) { conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // Per-route zero timeout overrides the global stream idle timeout. @@ -2418,6 +2461,8 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutRouteZeroOverride) { conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_idle_timeout_.value()); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // Validate the per-stream idle timeout after having sent downstream headers. @@ -2618,6 +2663,7 @@ TEST_F(HttpConnectionManagerImplTest, PerStreamIdleTimeoutAfterBidiData) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*idle_timer, enableTimer(_, _)); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); EXPECT_CALL(*idle_timer, enableTimer(_, _)); @@ -2668,6 +2714,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledByDefault) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) { @@ -2682,6 +2729,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutDisabledIfSetToZero) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { @@ -2691,6 +2739,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)); + EXPECT_CALL(*request_timer, disableTimer()); conn_manager_->newStream(response_encoder_); return Http::okStatus(); @@ -2698,6 +2747,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutValidlyConfigured) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutCallbackDisarmsAndReturns408) { @@ -2736,7 +2786,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance&) -> Http::Status { Event::MockTimer* request_timer = setUpTimer(); EXPECT_CALL(*request_timer, enableTimer(request_timeout_, _)).Times(1); - EXPECT_CALL(*request_timer, disableTimer()).Times(0); + EXPECT_CALL(*request_timer, disableTimer()).Times(1); RequestDecoder* decoder = &conn_manager_->newStream(response_encoder_); RequestHeaderMapPtr headers{ @@ -2751,6 +2801,8 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsNotDisarmedOnIncompleteReq conn_manager_->onData(fake_input, false); // kick off request EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithHeader) { @@ -2765,7 +2817,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW RequestHeaderMapPtr headers{ new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); decoder->decodeHeaders(std::move(headers), true); return Http::okStatus(); })); @@ -2774,6 +2826,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW conn_manager_->onData(fake_input, false); // kick off request EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithData) { @@ -2789,7 +2842,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "POST"}}}; decoder->decodeHeaders(std::move(headers), false); - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); decoder->decodeData(data, true); return Http::okStatus(); })); @@ -2798,6 +2851,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestWithTrailers) { @@ -2814,7 +2868,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW decoder->decodeHeaders(std::move(headers), false); decoder->decodeData(data, false); - EXPECT_CALL(*request_timer, disableTimer()).Times(1); + EXPECT_CALL(*request_timer, disableTimer()).Times(2); RequestTrailerMapPtr trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; decoder->decodeTrailers(std::move(trailers)); return Http::okStatus(); @@ -2824,6 +2878,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnCompleteRequestW conn_manager_->onData(fake_input, false); EXPECT_EQ(0U, stats_.named_.downstream_rq_timeout_.value()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { @@ -2848,6 +2903,7 @@ TEST_F(HttpConnectionManagerImplTest, RequestTimeoutIsDisarmedOnEncodeHeaders) { EXPECT_CALL(*request_timer, disableTimer()).Times(1); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); return Http::okStatus(); })); @@ -2895,6 +2951,7 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationDisabledIfSetToZero) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationValidlyConfigured) { @@ -2905,12 +2962,14 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationValidlyConfigured) { Event::MockTimer* duration_timer = setUpTimer(); EXPECT_CALL(*duration_timer, enableTimer(max_stream_duration_.value(), _)); + EXPECT_CALL(*duration_timer, disableTimer()); conn_manager_->newStream(response_encoder_); return Http::okStatus(); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); // kick off request + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { @@ -2928,6 +2987,8 @@ TEST_F(HttpConnectionManagerImplTest, MaxStreamDurationCallbackResetStream) { conn_manager_->onData(fake_input, false); // kick off request EXPECT_CALL(*duration_timer, disableTimer()); + EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + EXPECT_CALL(response_encoder_, encodeData(_, true)); duration_timer->invokeCallback(); EXPECT_EQ(1U, stats_.named_.downstream_rq_max_duration_reached_.value()); @@ -3134,6 +3195,7 @@ TEST_F(HttpConnectionManagerImplTest, FooUpgradeDrainClose) { {"upgrade", "foo"}}}; decoder->decodeHeaders(std::move(headers), false); + filter->decoder_callbacks_->streamInfo().setResponseCodeDetails(""); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{ {":status", "101"}, {"Connection", "upgrade"}, {"upgrade", "foo"}}}; filter->decoder_callbacks_->encodeHeaders(std::move(response_headers), false); @@ -3169,6 +3231,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectAsUpgrade) { // Kick off the incoming data. Use extra data which should cause a redispatch. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, ConnectWithEmptyPath) { @@ -3256,6 +3319,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainCloseRaceWithClose) { Event::MockTimer* drain_timer = setUpTimer(); EXPECT_CALL(*drain_timer, enableTimer(_, _)); expectOnDestroy(); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); // Fake a protocol error that races with the drain timeout. This will cause a local close. @@ -3317,6 +3381,9 @@ TEST_F(HttpConnectionManagerImplTest, Buffer::OwnedImpl fake_input; conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, DrainClose) { @@ -3352,6 +3419,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainClose) { EXPECT_CALL(*drain_timer, enableTimer(_, _)); EXPECT_CALL(drain_close_, drainClose()).WillOnce(Return(true)); EXPECT_CALL(*codec_, shutdownNotice()); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); EXPECT_EQ(ssl_connection_.get(), filter->callbacks_->connection()->ssl().get()); @@ -3398,6 +3466,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseBeforeRequestComplete) { close(Network::ConnectionCloseType::FlushWriteAndDelay)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); } @@ -3432,6 +3501,7 @@ TEST_F(HttpConnectionManagerImplTest, DisconnectOnProxyConnectionDisconnect) { close(Network::ConnectionCloseType::FlushWriteAndDelay)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), true); } @@ -3470,6 +3540,7 @@ TEST_F(HttpConnectionManagerImplTest, ResponseStartBeforeRequestComplete) { EXPECT_NE(nullptr, headers.Server()); EXPECT_EQ("", headers.getServerValue()); })); + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), false); // Finish the request. @@ -3671,6 +3742,7 @@ TEST_F(HttpConnectionManagerImplTest, IdleTimeout) { EXPECT_CALL(*idle_timer, enableTimer(_, _)); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); Event::MockTimer* drain_timer = setUpTimer(); @@ -3738,6 +3810,7 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionDuration) { conn_manager_->onData(fake_input, false); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); Event::MockTimer* drain_timer = setUpTimer(); @@ -3786,6 +3859,7 @@ TEST_F(HttpConnectionManagerImplTest, IntermediateBufferingEarlyResponse) { .WillOnce(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { // Now filter 2 will send a complete response. ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders(std::move(response_headers), true); return FilterHeadersStatus::StopIteration; })); @@ -3842,6 +3916,9 @@ TEST_F(HttpConnectionManagerImplTest, DoubleBuffering) { .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); EXPECT_CALL(*decoder_filters_[2], decodeComplete()); decoder_filters_[1]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { @@ -3883,6 +3960,9 @@ TEST_F(HttpConnectionManagerImplTest, ZeroByteDataFiltering) { .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[0]->callbacks_->continueDecoding(); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { @@ -3938,6 +4018,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInTrailersCallback) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -4029,6 +4110,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddTrailersInDataCallbackNoTrailers) EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); // invoke encodeHeaders + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -4115,6 +4197,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback) { .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -4187,6 +4270,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_NoDataFram EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -4250,6 +4334,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInTrailersCallback_ContinueAf EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -4333,6 +4418,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyDuringDecodeData) { EXPECT_CALL(response_encoder_, encodeData(_, true)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl data1("good"); @@ -4389,6 +4475,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyInline) { EXPECT_CALL(response_encoder_, encodeData(_, true)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -4455,6 +4542,9 @@ TEST_F(HttpConnectionManagerImplTest, Filter) { // Kick off the incoming data. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { @@ -4493,6 +4583,7 @@ TEST_F(HttpConnectionManagerImplTest, UpstreamWatermarkCallbacks) { EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -4557,6 +4648,10 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksPassedOnWith EXPECT_CALL(callbacks2, onAboveWriteBufferHighWatermark()); decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks2); } + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithLazyCreation) { @@ -4621,6 +4716,9 @@ TEST_F(HttpConnectionManagerImplTest, UnderlyingConnectionWatermarksUnwoundWithL EXPECT_CALL(callbacks, onBelowWriteBufferLowWatermark()).Times(0); decoder_filters_[0]->callbacks_->addDownstreamWatermarkCallbacks(callbacks); } + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { @@ -4649,6 +4747,10 @@ TEST_F(HttpConnectionManagerImplTest, AlterFilterWatermarkLimits) { // Once the limits are turned off can be turned on again. decoder_filters_[0]->callbacks_->setDecoderBufferLimit(100); EXPECT_EQ(100, decoder_filters_[0]->callbacks_->decoderBufferLimit()); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { @@ -4672,6 +4774,7 @@ TEST_F(HttpConnectionManagerImplTest, HitFilterWatermarkLimits) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); MockDownstreamWatermarkCallbacks callbacks; @@ -4774,6 +4877,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsBeforeHeaders) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; EXPECT_CALL(*encoder_filters_[1], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::StopIteration)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Now overload the buffer with response data. The filter returns @@ -4816,6 +4920,7 @@ TEST_F(HttpConnectionManagerImplTest, HitResponseBufferLimitsAfterHeaders) { EXPECT_CALL(*encoder_filters_[0], encodeHeaders(_, false)) .WillOnce(Return(FilterHeadersStatus::Continue)); EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Now overload the buffer with response data. The filter returns @@ -4955,6 +5060,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamHeaders) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), true); @@ -4998,6 +5104,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamData) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), false); @@ -5044,6 +5151,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterContinueAndEndStreamTrailers) { expectOnDestroy(); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( makeHeaderMap({{":status", "200"}}), false); @@ -5090,6 +5198,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterAddBodyContinuation) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); @@ -5204,6 +5313,7 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithAllContinue) { EXPECT_CALL(*encoder_filters_[2], encodeData(_, true)).Times(0); EXPECT_CALL(*encoder_filters_[1], encodeData(_, true)).Times(0); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -5291,6 +5401,7 @@ TEST_F(HttpConnectionManagerImplTest, AddDataWithStopAndContinue) { .WillOnce(Return(FilterHeadersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[2], encodeComplete()); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); @@ -5381,6 +5492,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataNoTrailers) { })); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -5471,6 +5583,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterDirectDecodeEncodeDataTrailers) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); + decoder_filters_[1]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[1]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -5565,6 +5678,7 @@ TEST_F(HttpConnectionManagerImplTest, MultipleFilters) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); EXPECT_CALL(*encoder_filters_[1], encodeComplete()); EXPECT_EQ(ssl_connection_.get(), encoder_filters_[1]->callbacks_->connection()->ssl().get()); + decoder_filters_[2]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[2]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); Buffer::OwnedImpl response_body("response"); @@ -5663,6 +5777,7 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenOverloaded) { decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); data.drain(4); @@ -5699,6 +5814,10 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPat .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[0]->callbacks_->continueDecoding(); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPathSecondFilter) { @@ -5722,6 +5841,10 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnDecodingPat .WillOnce(Return(FilterTrailersStatus::Continue)); EXPECT_CALL(*decoder_filters_[1], decodeComplete()); decoder_filters_[1]->callbacks_->continueDecoding(); + + expectOnDestroy(); + EXPECT_CALL(stream_, removeCallbacks(_)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPath) { @@ -5735,6 +5858,7 @@ TEST_F(HttpConnectionManagerImplTest, TestStopAllIterationAndBufferOnEncodingPat return FilterHeadersStatus::StopAllIterationAndBuffer; })); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders(std::move(response_headers), false); // Invoke encodeData while all iteration is stopped and make sure the filters do not have @@ -5786,6 +5910,7 @@ TEST_F(HttpConnectionManagerImplTest, DisableKeepAliveWhenDraining) { decoder->decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); data.drain(4); @@ -5862,6 +5987,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { .WillOnce(Return(FilterTrailersStatus::StopIteration)); decoder->decodeTrailers(std::move(trailers)); } + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS no scope found. @@ -5892,6 +6020,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteNotFound) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS updating scopes affects routing. @@ -5941,6 +6072,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { EXPECT_CALL(*decoder_filters_[0], decodeComplete()); // end_stream=true. Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS Scope header update cause cross-scope reroute. @@ -6000,6 +6134,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } // SRDS scoped RouteConfiguration found and route found. @@ -6041,6 +6178,9 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, NewConnection) { @@ -6092,6 +6232,9 @@ TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestHeadersSize) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { @@ -6131,6 +6274,9 @@ TEST_F(HttpConnectionManagerImplTest, TestUpstreamRequestBodySize) { Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); + + expectOnDestroy(); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { @@ -6184,6 +6330,7 @@ TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseHeadersSize) { EXPECT_CALL(response_encoder_, encodeHeaders(_, true)); expectOnDestroy(); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); } @@ -6233,6 +6380,7 @@ TEST_F(HttpConnectionManagerImplTest, TestUpstreamResponseBodySize) { EXPECT_CALL(response_encoder_, encodeHeaders(_, false)); + decoder_filters_[0]->callbacks_->streamInfo().setResponseCodeDetails(""); decoder_filters_[0]->callbacks_->encodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, false); @@ -6278,6 +6426,7 @@ TEST_F(HttpConnectionManagerImplTest, HeaderOnlyRequestAndResponseUsingHttp3) { decoder.decodeHeaders(std::move(headers), true); ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true); EXPECT_EQ(1U, stats_.named_.downstream_rq_2xx_.value()); @@ -6386,6 +6535,9 @@ TEST_F(HttpConnectionManagerImplTest, ConnectionFilterState) { // The connection life time data should have been written to the connection filter state. EXPECT_TRUE(filter_callbacks_.connection_.stream_info_.filter_state_->hasData( "per_downstream_connection")); + EXPECT_CALL(*decoder_filters_[1], onDestroy()); + EXPECT_CALL(*decoder_filters_[2], onDestroy()); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } class HttpConnectionManagerImplDeathTest : public HttpConnectionManagerImplTest { @@ -6430,6 +6582,7 @@ TEST_F(HttpConnectionManagerImplDeathTest, InvalidConnectionManagerConfig) { route_config_provider2_.reset(); // Only scoped route config provider valid. EXPECT_NO_THROW(conn_manager_->onData(fake_input, false)); + filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } } // namespace Http diff --git a/test/extensions/filters/http/cors/cors_filter_test.cc b/test/extensions/filters/http/cors/cors_filter_test.cc index b045ed78987d..1c49f88eed1b 100644 --- a/test/extensions/filters/http/cors/cors_filter_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_test.cc @@ -253,6 +253,8 @@ TEST_F(CorsFilterTest, OptionsRequestMatchingOriginByWildcard) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.encodeHeaders(response_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.encodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.encodeTrailers(response_trailers_)); + ASSERT_TRUE(decoder_callbacks_.stream_info_.responseCodeDetails().has_value()); + EXPECT_EQ(decoder_callbacks_.stream_info_.responseCodeDetails().value(), "cors_response"); } TEST_F(CorsFilterTest, OptionsRequestWithOriginCorsEnabledShadowDisabled) { diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 84ac6fc706da..f0893c1e8c0a 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -70,6 +70,7 @@ class LuaHttpFilterTest : public testing::Test { })); EXPECT_CALL(encoder_callbacks_, activeSpan()).Times(AtLeast(0)); EXPECT_CALL(encoder_callbacks_, encodingBuffer()).Times(AtLeast(0)); + EXPECT_CALL(decoder_callbacks_, streamInfo()).Times(testing::AnyNumber()); } ~LuaHttpFilterTest() override { filter_->onDestroy(); } diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 2fee4ec1367e..941fd92a603f 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1956,9 +1956,32 @@ TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeout) { ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_max_duration_reached", 1); + response->waitForReset(); + EXPECT_TRUE(response->complete()); +} + +TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeoutLegacy) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + config_helper_.addRuntimeOverride("envoy.reloadable_features.allow_response_for_timeout", + "false"); + config_helper_.setDownstreamMaxStreamDuration(std::chrono::milliseconds(500)); + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + test_server_->waitForCounterGe("http.config_test.downstream_rq_max_duration_reached", 1); response->waitForReset(); EXPECT_FALSE(response->complete()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("max_duration_timeout")); } // Make sure that invalid authority headers get blocked at or before the HCM. diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index f569a3d63f14..0b03e662e9e8 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -8,6 +8,8 @@ namespace Envoy { +using testing::HasSubstr; + namespace { constexpr char HandleThreeHopLocationFormat[] = "http://handle.internal.redirect.max.three.hop/path{}"; @@ -110,6 +112,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) { } TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); // Validate that header sanitization is only called once. config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -141,6 +144,7 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") ->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("internal_redirect")); } TEST_P(RedirectIntegrationTest, InternalRedirectWithThreeHopLimit) { diff --git a/test/server/admin/admin_filter_test.cc b/test/server/admin/admin_filter_test.cc index 524bafedb475..ee51cdecc169 100644 --- a/test/server/admin/admin_filter_test.cc +++ b/test/server/admin/admin_filter_test.cc @@ -45,6 +45,8 @@ TEST_P(AdminFilterTest, HeaderOnly) { EXPECT_CALL(callbacks_, encodeHeaders_(_, false)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(request_headers_, true)); + ASSERT_TRUE(callbacks_.stream_info_.responseCodeDetails().has_value()); + EXPECT_EQ(callbacks_.stream_info_.responseCodeDetails().value(), "admin_filter_response"); } TEST_P(AdminFilterTest, Body) { From 8c3338d2756190d2ca38a49b7752d62a7d6b878c Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Thu, 30 Jul 2020 16:37:18 +0100 Subject: [PATCH 793/909] reactive xds_fuzz_test (#12350) Commit Message: Reactive xds_fuzz_test after tsan fix passes locally with --config=clang-tsan --runs_per_test=1000 without tsan failures Signed-off-by: Sam Flattery --- test/server/config_validation/BUILD | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 9671af5ddf63..3a710f8ff42e 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -156,13 +156,12 @@ envoy_cc_test_library( ], ) -# https://github.com/envoyproxy/envoy/issues/12258 -# envoy_cc_fuzz_test( -# name = "xds_fuzz_test", -# srcs = ["xds_fuzz_test.cc"], -# corpus = "xds_corpus", -# deps = [ -# ":xds_fuzz_lib", -# "//source/common/protobuf:utility_lib", -# ], -# ) +envoy_cc_fuzz_test( + name = "xds_fuzz_test", + srcs = ["xds_fuzz_test.cc"], + corpus = "xds_corpus", + deps = [ + ":xds_fuzz_lib", + "//source/common/protobuf:utility_lib", + ], +) From f521fc718780cda23e6f34669635494ea88890fc Mon Sep 17 00:00:00 2001 From: Nicolas Flacco <47160394+FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg@users.noreply.github.com> Date: Thu, 30 Jul 2020 08:56:50 -0700 Subject: [PATCH 794/909] Revert "Redis fault injection (#10784)" (#12371) This reverts commit 048583b924e6c5c7812af56ec344ae210c168b3b. Signed-off-by: FAYiEKcbD0XFqF2QK2E4viAHg8rMm2VbjYKdjTg --- .../network/redis_proxy/v3/redis_proxy.proto | 56 +---- .../network_filters/redis_proxy_filter.rst | 52 +---- docs/root/version_history/current.rst | 1 - .../network/redis_proxy/v3/redis_proxy.proto | 56 +---- .../filters/network/common/redis/BUILD | 22 -- .../filters/network/common/redis/fault.h | 52 ----- .../network/common/redis/fault_impl.cc | 148 ------------- .../filters/network/common/redis/fault_impl.h | 108 --------- .../filters/network/redis_proxy/BUILD | 3 - .../network/redis_proxy/command_splitter.h | 22 -- .../redis_proxy/command_splitter_impl.cc | 137 +++--------- .../redis_proxy/command_splitter_impl.h | 148 +++---------- .../filters/network/redis_proxy/config.cc | 17 +- .../network/redis_proxy/proxy_filter.cc | 10 +- .../network/redis_proxy/proxy_filter.h | 8 +- .../filters/network/common/redis/BUILD | 13 -- .../network/common/redis/fault_test.cc | 206 ------------------ .../filters/network/redis_proxy/BUILD | 9 - .../redis_proxy/command_lookup_speed_test.cc | 12 +- .../redis_proxy/command_splitter_impl_test.cc | 162 +------------- .../network/redis_proxy/config_test.cc | 37 ---- .../filters/network/redis_proxy/mocks.cc | 7 - .../filters/network/redis_proxy/mocks.h | 20 -- .../network/redis_proxy/proxy_filter_test.cc | 41 ++-- .../redis_proxy_integration_test.cc | 56 +---- 25 files changed, 98 insertions(+), 1305 deletions(-) delete mode 100644 source/extensions/filters/network/common/redis/fault.h delete mode 100644 source/extensions/filters/network/common/redis/fault_impl.cc delete mode 100644 source/extensions/filters/network/common/redis/fault_impl.h delete mode 100644 test/extensions/filters/network/common/redis/fault_test.cc diff --git a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 402937fff28f..af69d33a6340 100644 --- a/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 9] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -183,31 +183,6 @@ message RedisProxy { Route catch_all_route = 4; } - // RedisFault defines faults used for fault injection. - message RedisFault { - enum RedisFaultType { - // Delays requests. This is the base fault; other faults can have delays added. - DELAY = 0; - - // Returns errors on requests. - ERROR = 1; - } - - // Fault type. - RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Percentage of requests fault applies to. - config.core.v3.RuntimeFractionalPercent fault_enabled = 2 - [(validate.rules).message = {required: true}]; - - // Delay for all faults. If not set, defaults to zero - google.protobuf.Duration delay = 3; - - // Commands fault is restricted to, if any. If not set, fault applies to all commands - // other than auth and ping (due to special handling of those commands in Envoy). - repeated string commands = 4; - } - reserved 2; reserved "cluster"; @@ -261,35 +236,6 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; - // List of faults to inject. Faults currently come in two flavors: - // - Delay, which delays a request. - // - Error, which responds to a request with an error. Errors can also have delays attached. - // - // Example: - // - // .. code-block:: yaml - // - // faults: - // - fault_type: ERROR - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // commands: - // - GET - // - fault_type: DELAY - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // delay: 2s - // - // See the :ref:`fault injection section - // ` for more information on how to configure this. - repeated RedisFault faults = 8; - // If a username is provided an ACL style AUTH command will be required with a username and password. // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this username and the *downstream_auth_password* diff --git a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst index 6adf7c8ffb27..3c3fb77f3861 100644 --- a/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst +++ b/docs/root/configuration/listeners/network_filters/redis_proxy_filter.rst @@ -58,9 +58,7 @@ changed to microseconds by setting the configuration parameter :ref:`latency_in_ total, Counter, Number of commands success, Counter, Number of commands that were successful error, Counter, Number of commands that returned a partial or complete error response - latency, Histogram, Command execution time in milliseconds (including delay faults) - error_fault, Counter, Number of commands that had an error fault injected - delay_fault, Counter, Number of commands that had a delay fault injected + latency, Histogram, Command execution time in milliseconds .. _config_network_filters_redis_proxy_per_command_stats: @@ -72,51 +70,3 @@ The Redis proxy filter supports the following runtime settings: redis.drain_close_enabled % of connections that will be drain closed if the server is draining and would otherwise attempt a drain close. Defaults to 100. - -.. _config_network_filters_redis_proxy_fault_injection: - -Fault Injection ---------------- - -The Redis filter can perform fault injection. Currently, Delay and Error faults are supported. -Delay faults delay a request, and Error faults respond with an error. Moreover, errors can be delayed. - -Note that the Redis filter does not check for correctness in your configuration - it is the user's -responsibility to make sure both the default and runtime percentages are correct! This is because -percentages can be changed during runtime, and validating correctness at request time is expensive. -If multiple faults are specified, the fault injection percentage should not exceed 100% for a given -fault and Redis command combination. For example, if two faults are specified; one applying to GET at 60 -%, and one applying to all commands at 50%, that is a bad configuration as GET now has 110% chance of -applying a fault. This means that every request will have a fault. - -If a delay is injected, the delay is additive - if the request took 400ms and a delay of 100ms -is injected, then the total request latency is 500ms. Also, due to implementation of the redis protocol, -a delayed request will delay everything that comes in after it, due to the proxy's need to respect the -order of commands it receives. - -Note that faults must have a `fault_enabled` field, and are not enabled by default (if no default value -or runtime key are set). - -Example configuration: - -.. code-block:: yaml - - faults: - - fault_type: ERROR - fault_enabled: - default_value: - numerator: 10 - denominator: HUNDRED - runtime_key: "bogus_key" - commands: - - GET - - fault_type: DELAY - fault_enabled: - default_value: - numerator: 10 - denominator: HUNDRED - runtime_key: "bogus_key" - delay: 2s - -This creates two faults- an error, applying only to GET commands at 10%, and a delay, applying to all -commands at 10%. This means that 20% of GET commands will have a fault applied, as discussed earlier. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9c1d3d40a59e..f0920bc65f50 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -51,7 +51,6 @@ New Features * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * lua: added Lua APIs to access :ref:`SSL connection info ` object. * postgres network filter: :ref:`metadata ` is produced based on SQL query. -* redis: added fault injection support :ref:`fault injection for redis proxy `, described further in :ref:`configuration documentation `. * router: added new :ref:`envoy-ratelimited` retry policy, which allows retrying envoy's own rate limited responses. diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 0bc52493bb29..8f996c30f9ae 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 9] +// [#next-free-field: 8] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -182,31 +182,6 @@ message RedisProxy { [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } - // RedisFault defines faults used for fault injection. - message RedisFault { - enum RedisFaultType { - // Delays requests. This is the base fault; other faults can have delays added. - DELAY = 0; - - // Returns errors on requests. - ERROR = 1; - } - - // Fault type. - RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Percentage of requests fault applies to. - config.core.v3.RuntimeFractionalPercent fault_enabled = 2 - [(validate.rules).message = {required: true}]; - - // Delay for all faults. If not set, defaults to zero - google.protobuf.Duration delay = 3; - - // Commands fault is restricted to, if any. If not set, fault applies to all commands - // other than auth and ping (due to special handling of those commands in Envoy). - repeated string commands = 4; - } - // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -256,35 +231,6 @@ message RedisProxy { // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; - // List of faults to inject. Faults currently come in two flavors: - // - Delay, which delays a request. - // - Error, which responds to a request with an error. Errors can also have delays attached. - // - // Example: - // - // .. code-block:: yaml - // - // faults: - // - fault_type: ERROR - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // commands: - // - GET - // - fault_type: DELAY - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // delay: 2s - // - // See the :ref:`fault injection section - // ` for more information on how to configure this. - repeated RedisFault faults = 8; - // If a username is provided an ACL style AUTH command will be required with a username and password. // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this username and the *downstream_auth_password* diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index 8c9c9e32d3ac..3b4dcedbb01e 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -100,25 +100,3 @@ envoy_cc_library( "//source/common/stats:utility_lib", ], ) - -envoy_cc_library( - name = "fault_interface", - hdrs = ["fault.h"], - deps = [ - "@envoy_api//envoy/type/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "fault_lib", - srcs = ["fault_impl.cc"], - hdrs = ["fault_impl.h"], - deps = [ - ":codec_lib", - ":fault_interface", - "//include/envoy/common:random_generator_interface", - "//include/envoy/upstream:upstream_interface", - "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", - ], -) diff --git a/source/extensions/filters/network/common/redis/fault.h b/source/extensions/filters/network/common/redis/fault.h deleted file mode 100644 index 158969455c92..000000000000 --- a/source/extensions/filters/network/common/redis/fault.h +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/common/pure.h" -#include "envoy/type/v3/percent.pb.h" - -#include "absl/types/optional.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Common { -namespace Redis { - -/** - * Fault Type. - */ -enum class FaultType { Delay, Error }; - -class Fault { -public: - virtual ~Fault() = default; - - virtual FaultType faultType() const PURE; - virtual std::chrono::milliseconds delayMs() const PURE; - virtual const std::vector commands() const PURE; - virtual envoy::type::v3::FractionalPercent defaultValue() const PURE; - virtual absl::optional runtimeKey() const PURE; -}; - -using FaultSharedPtr = std::shared_ptr; - -class FaultManager { -public: - virtual ~FaultManager() = default; - - /** - * Get fault type and delay given a Redis command. - * @param command supplies the Redis command string. - */ - virtual const Fault* getFaultForCommand(const std::string& command) const PURE; -}; - -using FaultManagerPtr = std::unique_ptr; - -} // namespace Redis -} // namespace Common -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/common/redis/fault_impl.cc b/source/extensions/filters/network/common/redis/fault_impl.cc deleted file mode 100644 index 4b813ad9d799..000000000000 --- a/source/extensions/filters/network/common/redis/fault_impl.cc +++ /dev/null @@ -1,148 +0,0 @@ -#include "extensions/filters/network/common/redis/fault_impl.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Common { -namespace Redis { - -struct FaultManagerKeyNamesValues { - // The rbac filter rejected the request - const std::string AllKey = "ALL_KEY"; -}; -using FaultManagerKeyNames = ConstSingleton; - -FaultManagerImpl::FaultImpl::FaultImpl( - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault) - : commands_(buildCommands(base_fault)) { - delay_ms_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(base_fault, delay, 0)); - - switch (base_fault.fault_type()) { - case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::DELAY: - fault_type_ = FaultType::Delay; - break; - case envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::RedisFault::ERROR: - fault_type_ = FaultType::Error; - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - break; - } - - default_value_ = base_fault.fault_enabled().default_value(); - runtime_key_ = base_fault.fault_enabled().runtime_key(); -}; - -std::vector FaultManagerImpl::FaultImpl::buildCommands( - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault) { - std::vector commands; - for (const std::string& command : base_fault.commands()) { - commands.emplace_back(absl::AsciiStrToLower(command)); - } - return commands; -} - -FaultManagerImpl::FaultManagerImpl( - Random::RandomGenerator& random, Runtime::Loader& runtime, - const Protobuf::RepeatedPtrField< - ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> - faults) - : fault_map_(buildFaultMap(faults)), random_(random), runtime_(runtime) {} - -FaultMap FaultManagerImpl::buildFaultMap( - const Protobuf::RepeatedPtrField< - ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> - faults) { - // Next, create the fault map that maps commands to pointers to Fault objects. - // Group faults by command - FaultMap fault_map; - for (auto base_fault : faults) { - auto fault_ptr = std::make_shared(base_fault); - if (!fault_ptr->commands().empty()) { - for (const std::string& command : fault_ptr->commands()) { - fault_map[command].emplace_back(fault_ptr); - } - } else { - // Generic "ALL" entry in map for faults that map to all keys; also add to each command - fault_map[FaultManagerKeyNames::get().AllKey].emplace_back(fault_ptr); - } - } - - // Add the ALL keys faults to each command too so that we can just query faults by command. - // Get all ALL_KEY faults. - FaultMap::iterator it_outer = fault_map.find(FaultManagerKeyNames::get().AllKey); - if (it_outer != fault_map.end()) { - for (const FaultSharedPtr& fault_ptr : it_outer->second) { - FaultMap::iterator it_inner; - for (it_inner = fault_map.begin(); it_inner != fault_map.end(); it_inner++) { - std::string command = it_inner->first; - if (command != FaultManagerKeyNames::get().AllKey) { - fault_map[command].push_back(fault_ptr); - } - } - } - } - return fault_map; -} - -uint64_t FaultManagerImpl::getIntegerNumeratorOfFractionalPercent( - absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const { - uint64_t numerator; - if (default_value.denominator() == envoy::type::v3::FractionalPercent::HUNDRED) { - numerator = default_value.numerator(); - } else { - int denominator = - ProtobufPercentHelper::fractionalPercentDenominatorToInt(default_value.denominator()); - numerator = (default_value.numerator() * 100) / denominator; - } - return runtime_.snapshot().getInteger(key, numerator); -} - -// Fault checking algorithm: -// -// For example, if we have an ERROR fault at 5% for all commands, and a DELAY fault at 10% for GET, -// if we receive a GET, we want 5% of GETs to get DELAY, and 10% to get ERROR. Thus, we need to -// amortize the percentages. -// -// 0. Get random number. -// 1. Get faults for given command. -// 2. For each fault, calculate the amortized fault injection percentage. -// -// Note that we do not check to make sure the probabilities of faults are <= 100%! -const Fault* FaultManagerImpl::getFaultForCommandInternal(const std::string& command) const { - FaultMap::const_iterator it_outer = fault_map_.find(command); - if (it_outer != fault_map_.end()) { - auto random_number = random_.random() % 100; - int amortized_fault = 0; - - for (const FaultSharedPtr& fault_ptr : it_outer->second) { - uint64_t fault_injection_percentage = getIntegerNumeratorOfFractionalPercent( - fault_ptr->runtimeKey().value(), fault_ptr->defaultValue()); - if (random_number < (fault_injection_percentage + amortized_fault)) { - return fault_ptr.get(); - } else { - amortized_fault += fault_injection_percentage; - } - } - } - - return nullptr; -} - -const Fault* FaultManagerImpl::getFaultForCommand(const std::string& command) const { - if (!fault_map_.empty()) { - if (fault_map_.count(command) > 0) { - return getFaultForCommandInternal(command); - } else { - return getFaultForCommandInternal(FaultManagerKeyNames::get().AllKey); - } - } - - return nullptr; -} - -} // namespace Redis -} // namespace Common -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/common/redis/fault_impl.h b/source/extensions/filters/network/common/redis/fault_impl.h deleted file mode 100644 index 3850a8a4b4c9..000000000000 --- a/source/extensions/filters/network/common/redis/fault_impl.h +++ /dev/null @@ -1,108 +0,0 @@ -#pragma once - -#include -#include - -#include "envoy/api/api.h" -#include "envoy/common/random_generator.h" -#include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.h" -#include "envoy/upstream/upstream.h" - -#include "common/protobuf/utility.h" -#include "common/singleton/const_singleton.h" - -#include "extensions/filters/network/common/redis/fault.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Common { -namespace Redis { - -using FaultMap = absl::flat_hash_map>; - -/** - * Message returned for particular types of faults. - */ -struct FaultMessagesValues { - const std::string Error = "Fault Injected: Error"; -}; -using FaultMessages = ConstSingleton; - -/** - * Fault management- creation, storage and retrieval. Faults are queried for by command, - * so they are stored in an unordered map using the command as key. For faults that apply to - * all commands, we use a special ALL_KEYS entry in the map. - */ -class FaultManagerImpl : public FaultManager { -public: - FaultManagerImpl( - Random::RandomGenerator& random, Runtime::Loader& runtime, - const Protobuf::RepeatedPtrField< - ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> - base_faults); - - const Fault* getFaultForCommand(const std::string& command) const override; - - static FaultSharedPtr makeFaultForTest(Common::Redis::FaultType fault_type, - std::chrono::milliseconds delay_ms) { - envoy::type::v3::FractionalPercent default_value; - default_value.set_numerator(100); - default_value.set_denominator(envoy::type::v3::FractionalPercent::HUNDRED); - FaultImpl fault = - FaultImpl(fault_type, delay_ms, std::vector(), default_value, "foo"); - return std::make_shared(fault); - } - - // Allow the unit test to have access to private members. - friend class FaultTest; - -private: - class FaultImpl : public Fault { - public: - FaultImpl( - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault); - FaultImpl(FaultType fault_type, std::chrono::milliseconds delay_ms, - const std::vector commands, - envoy::type::v3::FractionalPercent default_value, - absl::optional runtime_key) - : fault_type_(fault_type), delay_ms_(delay_ms), commands_(commands), - default_value_(default_value), runtime_key_(runtime_key) {} // For testing only - - FaultType faultType() const override { return fault_type_; }; - std::chrono::milliseconds delayMs() const override { return delay_ms_; }; - const std::vector commands() const override { return commands_; }; - envoy::type::v3::FractionalPercent defaultValue() const override { return default_value_; }; - absl::optional runtimeKey() const override { return runtime_key_; }; - - private: - static std::vector buildCommands( - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault base_fault); - - FaultType fault_type_; - std::chrono::milliseconds delay_ms_; - const std::vector commands_; - envoy::type::v3::FractionalPercent default_value_; - absl::optional runtime_key_; - }; - - static FaultMap - buildFaultMap(const Protobuf::RepeatedPtrField< - ::envoy::extensions::filters::network::redis_proxy::v3::RedisProxy_RedisFault> - faults); - - uint64_t getIntegerNumeratorOfFractionalPercent( - absl::string_view key, const envoy::type::v3::FractionalPercent& default_value) const; - const Fault* getFaultForCommandInternal(const std::string& command) const; - const FaultMap fault_map_; - -protected: - Random::RandomGenerator& random_; - Runtime::Loader& runtime_; -}; - -} // namespace Redis -} // namespace Common -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy \ No newline at end of file diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index b7503144a5f8..4d452f0cad3c 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -17,7 +17,6 @@ envoy_cc_library( name = "command_splitter_interface", hdrs = ["command_splitter.h"], deps = [ - "//include/envoy/event:dispatcher_interface", "//source/extensions/filters/network/common/redis:codec_interface", ], ) @@ -65,7 +64,6 @@ envoy_cc_library( "//source/common/common:utility_lib", "//source/common/stats:timespan_lib", "//source/extensions/filters/network/common/redis:client_lib", - "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/common/redis:supported_commands_lib", "//source/extensions/filters/network/common/redis:utility_lib", ], @@ -127,7 +125,6 @@ envoy_cc_extension( "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", "//source/extensions/filters/network/common/redis:codec_lib", - "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/common/redis:redis_command_stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index d5408b11ab0c..e03d0a92e137 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -3,7 +3,6 @@ #include #include "envoy/common/pure.h" -#include "envoy/event/dispatcher.h" #include "extensions/filters/network/common/redis/codec.h" @@ -81,27 +80,6 @@ class Instance { SplitCallbacks& callbacks) PURE; }; -using CommandSplitterPtr = std::unique_ptr; - -/** - * A command splitter factory that allows creation of the command splitter when - * we have access to the dispatcher parameter. This supports fault injection, - * specifically delay faults, which rely on the dispatcher for creating delay timers. - */ -class CommandSplitterFactory { -public: - virtual ~CommandSplitterFactory() = default; - - /** - * Create a command splitter. - * @param dispatcher supplies the dispatcher . - * @return CommandSplitterPtr a handle to a newly created command splitter. - */ - virtual CommandSplitterPtr create(Event::Dispatcher& dispatcher) PURE; -}; - -using CommandSplitterFactorySharedPtr = std::shared_ptr; - } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 624cead01a9c..a5bd89588f51 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -79,9 +79,7 @@ void SplitRequestBase::updateStats(const bool success) { } else { command_stats_.error_.inc(); } - if (command_latency_ != nullptr) { - command_latency_->complete(); - } + command_latency_->complete(); } SingleServerRequest::~SingleServerRequest() { ASSERT(!handle_); } @@ -92,12 +90,10 @@ void SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) { callbacks_.onResponse(std::move(response)); } -void SingleServerRequest::onFailure() { onFailure(Response::get().UpstreamFailure); } - -void SingleServerRequest::onFailure(std::string error_msg) { +void SingleServerRequest::onFailure() { handle_ = nullptr; updateStats(false); - callbacks_.onResponse(Common::Redis::Utility::makeError(error_msg)); + callbacks_.onResponse(Common::Redis::Utility::makeError(Response::get().UpstreamFailure)); } void SingleServerRequest::cancel() { @@ -105,44 +101,13 @@ void SingleServerRequest::cancel() { handle_ = nullptr; } -SplitRequestPtr ErrorFaultRequest::create(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { - std::unique_ptr request_ptr{ - new ErrorFaultRequest(callbacks, command_stats, time_source, delay_command_latency)}; - - request_ptr->onFailure(Common::Redis::FaultMessages::get().Error); - command_stats.error_fault_.inc(); - return nullptr; -} - -std::unique_ptr DelayFaultRequest::create(SplitCallbacks& callbacks, - CommandStats& command_stats, - TimeSource& time_source, - Event::Dispatcher& dispatcher, - std::chrono::milliseconds delay) { - return std::make_unique(callbacks, command_stats, time_source, dispatcher, - delay); -} - -void DelayFaultRequest::onResponse(Common::Redis::RespValuePtr&& response) { - response_ = std::move(response); - delay_timer_->enableTimer(delay_); -} - -void DelayFaultRequest::onDelayResponse() { - command_stats_.delay_fault_.inc(); - command_latency_->complete(); // Complete latency of the command stats of the wrapped request - callbacks_.onResponse(std::move(response_)); -} - -void DelayFaultRequest::cancel() { delay_timer_->disableTimer(); } - SplitRequestPtr SimpleRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { + TimeSource& time_source) { std::unique_ptr request_ptr{ - new SimpleRequest(callbacks, command_stats, time_source, delay_command_latency)}; + new SimpleRequest(callbacks, command_stats, time_source)}; + const auto route = router.upstreamPool(incoming_request->asArray()[1].asString()); if (route) { Common::Redis::RespValueSharedPtr base_request = std::move(incoming_request); @@ -161,7 +126,7 @@ SplitRequestPtr SimpleRequest::create(Router& router, SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { + TimeSource& time_source) { // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] // Ensure there are at least three args to the command or it cannot be hashed. if (incoming_request->asArray().size() < 4) { @@ -170,8 +135,7 @@ SplitRequestPtr EvalRequest::create(Router& router, Common::Redis::RespValuePtr& return nullptr; } - std::unique_ptr request_ptr{ - new EvalRequest(callbacks, command_stats, time_source, delay_command_latency)}; + std::unique_ptr request_ptr{new EvalRequest(callbacks, command_stats, time_source)}; const auto route = router.upstreamPool(incoming_request->asArray()[3].asString()); if (route) { @@ -213,9 +177,8 @@ void FragmentedRequest::onChildFailure(uint32_t index) { SplitRequestPtr MGETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { - std::unique_ptr request_ptr{ - new MGETRequest(callbacks, command_stats, time_source, delay_command_latency)}; + TimeSource& time_source) { + std::unique_ptr request_ptr{new MGETRequest(callbacks, command_stats, time_source)}; request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -287,14 +250,13 @@ void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t SplitRequestPtr MSETRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { + TimeSource& time_source) { if ((incoming_request->asArray().size() - 1) % 2 != 0) { onWrongNumberOfArguments(callbacks, *incoming_request); command_stats.error_.inc(); return nullptr; } - std::unique_ptr request_ptr{ - new MSETRequest(callbacks, command_stats, time_source, delay_command_latency)}; + std::unique_ptr request_ptr{new MSETRequest(callbacks, command_stats, time_source)}; request_ptr->num_pending_responses_ = (incoming_request->asArray().size() - 1) / 2; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -359,12 +321,13 @@ void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t } } -SplitRequestPtr -SplitKeysSumResultRequest::create(Router& router, Common::Redis::RespValuePtr&& incoming_request, - SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) { +SplitRequestPtr SplitKeysSumResultRequest::create(Router& router, + Common::Redis::RespValuePtr&& incoming_request, + SplitCallbacks& callbacks, + CommandStats& command_stats, + TimeSource& time_source) { std::unique_ptr request_ptr{ - new SplitKeysSumResultRequest(callbacks, command_stats, time_source, delay_command_latency)}; + new SplitKeysSumResultRequest(callbacks, command_stats, time_source)}; request_ptr->num_pending_responses_ = incoming_request->asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); @@ -428,15 +391,13 @@ void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& va } } -InstanceImpl::InstanceImpl(Router& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros, - Common::Redis::FaultManager& fault_manager, - Event::Dispatcher& dispatcher) - : simple_command_handler_(router), eval_command_handler_(router), mget_handler_(router), - mset_handler_(router), - split_keys_sum_result_handler_(router), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX( - scope, stat_prefix + "splitter."))}, - time_source_(time_source), fault_manager_(fault_manager), dispatcher_(dispatcher) { +InstanceImpl::InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros) + : router_(std::move(router)), simple_command_handler_(*router_), + eval_command_handler_(*router_), mget_handler_(*router_), mset_handler_(*router_), + split_keys_sum_result_handler_(*router_), + stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, + time_source_(time_source) { for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { addHandler(scope, stat_prefix, command, latency_in_micros, simple_command_handler_); } @@ -507,7 +468,6 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, return nullptr; } - // Get the handler for the downstream request auto handler = handler_lookup_table_.find(to_lower_string.c_str()); if (handler == nullptr) { stats_.unsupported_command_.inc(); @@ -515,46 +475,11 @@ SplitRequestPtr InstanceImpl::makeRequest(Common::Redis::RespValuePtr&& request, fmt::format("unsupported command '{}'", request->asArray()[0].asString()))); return nullptr; } - - // Fault Injection Check - const Common::Redis::Fault* fault_ptr = fault_manager_.getFaultForCommand(to_lower_string); - - // Check if delay, which determines which callbacks to use. If a delay fault is enabled, - // the delay fault itself wraps the request (or other fault) and the delay fault itself - // implements the callbacks functions, and in turn calls the real callbacks after injecting - // delay on the result of the wrapped request or fault. - const bool has_delay_fault = - fault_ptr != nullptr && fault_ptr->delayMs() > std::chrono::milliseconds(0); - std::unique_ptr delay_fault_ptr; - if (has_delay_fault) { - delay_fault_ptr = DelayFaultRequest::create(callbacks, handler->command_stats_, time_source_, - dispatcher_, fault_ptr->delayMs()); - } - - // Note that the command_stats_ object of the original request is used for faults, so that our - // downstream metrics reflect any faults added (with special fault metrics) or extra latency from - // a delay. 2) we use a ternary operator for the callback parameter- we want to use the - // delay_fault as callback if there is a delay per the earlier comment. ENVOY_LOG(debug, "redis: splitting '{}'", request->toString()); handler->command_stats_.total_.inc(); - - SplitRequestPtr request_ptr; - if (fault_ptr != nullptr && fault_ptr->faultType() == Common::Redis::FaultType::Error) { - request_ptr = ErrorFaultRequest::create(has_delay_fault ? *delay_fault_ptr : callbacks, - handler->command_stats_, time_source_, has_delay_fault); - } else { - request_ptr = handler->handler_.get().startRequest( - std::move(request), has_delay_fault ? *delay_fault_ptr : callbacks, handler->command_stats_, - time_source_, has_delay_fault); - } - - // Complete delay, if any. The delay fault takes ownership of the wrapped request. - if (has_delay_fault) { - delay_fault_ptr->wrapped_request_ptr_ = std::move(request_ptr); - return delay_fault_ptr; - } else { - return request_ptr; - } + SplitRequestPtr request_ptr = handler->handler_.get().startRequest( + std::move(request), callbacks, handler->command_stats_, time_source_); + return request_ptr; } void InstanceImpl::onInvalidRequest(SplitCallbacks& callbacks) { @@ -580,12 +505,6 @@ void InstanceImpl::addHandler(Stats::Scope& scope, const std::string& stat_prefi handler})); } -CommandSplitterPtr CommandSplitterFactoryImpl::create(Event::Dispatcher& dispatcher) { - return std::make_unique(*router_, scope_, stat_prefix_, - time_source_, latency_in_micros_, - *fault_manager_, dispatcher); -} - } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 630bbcb71503..b67b4498f0cf 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -10,10 +10,10 @@ #include "common/common/logger.h" #include "common/common/utility.h" +#include "common/singleton/const_singleton.h" #include "common/stats/timespan_impl.h" #include "extensions/filters/network/common/redis/client_impl.h" -#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/common/redis/utility.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" @@ -42,9 +42,7 @@ using Response = ConstSingleton; #define ALL_COMMAND_STATS(COUNTER) \ COUNTER(total) \ COUNTER(success) \ - COUNTER(error) \ - COUNTER(error_fault) \ - COUNTER(delay_fault) + COUNTER(error) /** * Struct definition for all command stats. @see stats_macros.h @@ -60,7 +58,7 @@ class CommandHandler { virtual SplitRequestPtr startRequest(Common::Redis::RespValuePtr&& request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) PURE; + TimeSource& time_source) PURE; }; class CommandHandlerBase { @@ -76,14 +74,10 @@ class SplitRequestBase : public SplitRequest { const Common::Redis::RespValue& request); void updateStats(const bool success); - SplitRequestBase(CommandStats& command_stats, TimeSource& time_source, bool delay_command_latency) + SplitRequestBase(CommandStats& command_stats, TimeSource& time_source) : command_stats_(command_stats) { - if (!delay_command_latency) { - command_latency_ = std::make_unique( - command_stats_.latency_, time_source); - } else { - command_latency_ = nullptr; - } + command_latency_ = std::make_unique( + command_stats_.latency_, time_source); } CommandStats& command_stats_; Stats::TimespanPtr command_latency_; @@ -99,16 +93,14 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba // ConnPool::PoolCallbacks void onResponse(Common::Redis::RespValuePtr&& response) override; void onFailure() override; - void onFailure(std::string error_msg); // RedisProxy::CommandSplitter::SplitRequest void cancel() override; protected: SingleServerRequest(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency) - : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) { - } + TimeSource& time_source) + : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} SplitCallbacks& callbacks_; ConnPool::InstanceSharedPtr conn_pool_; @@ -116,57 +108,6 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba Common::Redis::RespValuePtr incoming_request_; }; -/** - * ErrorFaultRequest returns an error. - */ -class ErrorFaultRequest : public SingleServerRequest { -public: - static SplitRequestPtr create(SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool has_delaydelay_command_latency_fault); - -private: - ErrorFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - bool delay_command_latency) - : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} -}; - -/** - * DelayFaultRequest wraps a request- either a normal request or a fault- and delays it. - */ -class DelayFaultRequest : public SplitRequestBase, public SplitCallbacks { -public: - static std::unique_ptr - create(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - Event::Dispatcher& dispatcher, std::chrono::milliseconds delay); - - DelayFaultRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - Event::Dispatcher& dispatcher, std::chrono::milliseconds delay) - : SplitRequestBase(command_stats, time_source, false), callbacks_(callbacks), delay_(delay) { - delay_timer_ = dispatcher.createTimer([this]() -> void { onDelayResponse(); }); - } - - // SplitCallbacks - bool connectionAllowed() override { return callbacks_.connectionAllowed(); } - void onAuth(const std::string& password) override { callbacks_.onAuth(password); } - void onAuth(const std::string& username, const std::string& password) override { - callbacks_.onAuth(username, password); - } - void onResponse(Common::Redis::RespValuePtr&& response) override; - - // RedisProxy::CommandSplitter::SplitRequest - void cancel() override; - - SplitRequestPtr wrapped_request_ptr_; - -private: - void onDelayResponse(); - - SplitCallbacks& callbacks_; - std::chrono::milliseconds delay_; - Event::TimerPtr delay_timer_; - Common::Redis::RespValuePtr response_; -}; - /** * SimpleRequest hashes the first argument as the key. */ @@ -174,12 +115,11 @@ class SimpleRequest : public SingleServerRequest { public: static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency); + TimeSource& time_source); private: - SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - bool delay_command_latency) - : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} + SimpleRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) + : SingleServerRequest(callbacks, command_stats, time_source) {} }; /** @@ -189,12 +129,11 @@ class EvalRequest : public SingleServerRequest { public: static SplitRequestPtr create(Router& router, Common::Redis::RespValuePtr&& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, - TimeSource& time_source, bool delay_command_latency); + TimeSource& time_source); private: - EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - bool delay_command_latency) - : SingleServerRequest(callbacks, command_stats, time_source, delay_command_latency) {} + EvalRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) + : SingleServerRequest(callbacks, command_stats, time_source) {} }; /** @@ -210,10 +149,8 @@ class FragmentedRequest : public SplitRequestBase { void cancel() override; protected: - FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source, - bool delay_command_latency) - : SplitRequestBase(command_stats, time_source, delay_command_latency), callbacks_(callbacks) { - } + FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) + : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} struct PendingRequest : public ConnPool::PoolCallbacks { PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {} @@ -248,12 +185,11 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - InstanceImpl(Router& router, Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros, - Common::Redis::FaultManager& fault_manager, Event::Dispatcher& dispatcher); + InstanceImpl(RouterPtr&& router, Stats::Scope& scope, const std::string& stat_prefix, + TimeSource& time_source, bool latency_in_micros); // RedisProxy::CommandSplitter::Instance SplitRequestPtr makeRequest(Common::Redis::RespValuePtr&& request, @@ -354,6 +286,7 @@ class InstanceImpl : public Instance, Logger::Loggable { bool latency_in_micros, CommandHandler& handler); void onInvalidRequest(SplitCallbacks& callbacks); + RouterPtr router_; CommandHandlerFactory simple_command_handler_; CommandHandlerFactory eval_command_handler_; CommandHandlerFactory mget_handler_; @@ -362,27 +295,6 @@ class InstanceImpl : public Instance, Logger::Loggable { TrieLookupTable handler_lookup_table_; InstanceStats stats_; TimeSource& time_source_; - Common::Redis::FaultManager& fault_manager_; - Event::Dispatcher& dispatcher_; -}; - -class CommandSplitterFactoryImpl : public CommandSplitterFactory { -public: - CommandSplitterFactoryImpl(RouterPtr&& router, Common::Redis::FaultManagerPtr fault_manager, - Stats::Scope& scope, const std::string& stat_prefix, - TimeSource& time_source, bool latency_in_micros) - : router_(std::move(router)), fault_manager_(std::move(fault_manager)), scope_(scope), - stat_prefix_(stat_prefix), time_source_(time_source), - latency_in_micros_(latency_in_micros){}; - CommandSplitterPtr create(Event::Dispatcher& dispatcher) override; - -private: - RouterPtr router_; - Common::Redis::FaultManagerPtr fault_manager_; - Stats::Scope& scope_; - const std::string& stat_prefix_; - TimeSource& time_source_; - bool latency_in_micros_; }; } // namespace CommandSplitter diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index c75e61b2fcb0..2d62f511b393 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -5,7 +5,6 @@ #include "extensions/common/redis/cluster_refresh_manager_impl.h" #include "extensions/filters/network/common/redis/client_impl.h" -#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" #include "extensions/filters/network/redis_proxy/router_impl.h" @@ -87,18 +86,14 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP auto router = std::make_unique(prefix_routes, std::move(upstreams), context.runtime()); - auto fault_manager = std::make_unique( - context.random(), context.runtime(), proto_config.faults()); - - auto splitter_factory = std::make_shared( - std::move(router), std::move(fault_manager), context.scope(), filter_config->stat_prefix_, - context.timeSource(), proto_config.latency_in_micros()); - - return [splitter_factory, refresh_manager, - filter_config](Network::FilterManager& filter_manager) -> void { + std::shared_ptr splitter = + std::make_shared( + std::move(router), context.scope(), filter_config->stat_prefix_, context.timeSource(), + proto_config.latency_in_micros()); + return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( - factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter_factory, + factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter, filter_config)); }; } diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index bbbadb29de5d..aa2f558cc51a 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -34,11 +34,10 @@ ProxyStats ProxyFilterConfig::generateStats(const std::string& prefix, Stats::Sc } ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, - Common::Redis::EncoderPtr&& encoder, - CommandSplitter::CommandSplitterFactory& splitter_factory, + Common::Redis::EncoderPtr&& encoder, CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config) - : decoder_(factory.create(*this)), encoder_(std::move(encoder)), - splitter_factory_(splitter_factory), config_(config) { + : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter), + config_(config) { config_->stats_.downstream_cx_total_.inc(); config_->stats_.downstream_cx_active_.inc(); connection_allowed_ = @@ -58,13 +57,12 @@ void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& ca config_->stats_.downstream_cx_tx_bytes_total_, config_->stats_.downstream_cx_tx_bytes_buffered_, nullptr, nullptr}); - splitter_ = splitter_factory_.create(callbacks_->connection().dispatcher()); } void ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) { pending_requests_.emplace_back(*this); PendingRequest& request = pending_requests_.back(); - CommandSplitter::SplitRequestPtr split = splitter_->makeRequest(std::move(value), request); + CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(std::move(value), request); if (split) { // The splitter can immediately respond and destroy the pending request. Only store the handle // if the request is still alive. diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index 4b7877bc00d7..1694a2a0640e 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -75,8 +75,7 @@ class ProxyFilter : public Network::ReadFilter, public Network::ConnectionCallbacks { public: ProxyFilter(Common::Redis::DecoderFactory& factory, Common::Redis::EncoderPtr&& encoder, - CommandSplitter::CommandSplitterFactory& splitter_factory, - ProxyFilterConfigSharedPtr config); + CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config); ~ProxyFilter() override; // Network::ReadFilter @@ -95,8 +94,6 @@ class ProxyFilter : public Network::ReadFilter, bool connectionAllowed() { return connection_allowed_; } private: - friend class RedisProxyFilterTest; - struct PendingRequest : public CommandSplitter::SplitCallbacks { PendingRequest(ProxyFilter& parent); ~PendingRequest() override; @@ -122,8 +119,7 @@ class ProxyFilter : public Network::ReadFilter, Common::Redis::DecoderPtr decoder_; Common::Redis::EncoderPtr encoder_; - CommandSplitter::CommandSplitterFactory& splitter_factory_; - CommandSplitter::CommandSplitterPtr splitter_; + CommandSplitter::Instance& splitter_; ProxyFilterConfigSharedPtr config_; Buffer::OwnedImpl encoder_buffer_; Network::ReadFilterCallbacks* callbacks_{}; diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD index e8e445c8b608..dffc23954488 100644 --- a/test/extensions/filters/network/common/redis/BUILD +++ b/test/extensions/filters/network/common/redis/BUILD @@ -60,16 +60,3 @@ envoy_cc_test( "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", ], ) - -envoy_cc_test( - name = "fault_test", - srcs = ["fault_test.cc"], - deps = [ - ":redis_mocks", - "//source/common/common:assert_lib", - "//source/extensions/filters/network/common/redis:fault_lib", - "//test/mocks/runtime:runtime_mocks", - "//test/test_common:test_runtime_lib", - "//test/test_common:utility_lib", - ], -) diff --git a/test/extensions/filters/network/common/redis/fault_test.cc b/test/extensions/filters/network/common/redis/fault_test.cc deleted file mode 100644 index a80caf5c2d2a..000000000000 --- a/test/extensions/filters/network/common/redis/fault_test.cc +++ /dev/null @@ -1,206 +0,0 @@ -#include "envoy/common/random_generator.h" - -#include "common/common/assert.h" - -#include "extensions/filters/network/common/redis/fault_impl.h" - -#include "test/extensions/filters/network/common/redis/mocks.h" -#include "test/mocks/runtime/mocks.h" -#include "test/test_common/printers.h" -#include "test/test_common/test_runtime.h" -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -using testing::Return; - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Common { -namespace Redis { - -using RedisProxy = envoy::extensions::filters::network::redis_proxy::v3::RedisProxy; -using FractionalPercent = envoy::type::v3::FractionalPercent; -class FaultTest : public testing::Test { -public: - const std::string RUNTIME_KEY = "runtime_key"; - - void - createCommandFault(RedisProxy::RedisFault* fault, std::string command_str, int delay_seconds, - absl::optional fault_percentage, - absl::optional denominator, - absl::optional runtime_key) { - // We don't set fault type as it isn't used in the test - - auto* commands = fault->mutable_commands(); - auto* command = commands->Add(); - command->assign(command_str); - - fault->set_fault_type(envoy::extensions::filters::network::redis_proxy::v3:: - RedisProxy_RedisFault_RedisFaultType_ERROR); - - addFaultPercentage(fault, fault_percentage, denominator, runtime_key); - addDelay(fault, delay_seconds); - } - - void - createAllKeyFault(RedisProxy::RedisFault* fault, int delay_seconds, - absl::optional fault_percentage, - absl::optional denominator, - absl::optional runtime_key) { - addFaultPercentage(fault, fault_percentage, denominator, runtime_key); - addDelay(fault, delay_seconds); - } - - void - addFaultPercentage(RedisProxy::RedisFault* fault, absl::optional fault_percentage, - absl::optional denominator, - absl::optional runtime_key) { - envoy::config::core::v3::RuntimeFractionalPercent* fault_enabled = - fault->mutable_fault_enabled(); - - if (runtime_key.has_value()) { - fault_enabled->set_runtime_key(runtime_key.value()); - } - auto* percentage = fault_enabled->mutable_default_value(); - if (fault_percentage.has_value()) { - percentage->set_numerator(fault_percentage.value()); - } - if (denominator.has_value()) { - percentage->set_denominator(denominator.value()); - } - } - - void addDelay(RedisProxy::RedisFault* fault, int delay_seconds) { - std::chrono::seconds duration = std::chrono::seconds(delay_seconds); - fault->mutable_delay()->set_seconds(duration.count()); - } - - testing::NiceMock random_; - testing::NiceMock runtime_; -}; - -TEST_F(FaultTest, MakeFaultForTestHelper) { - Common::Redis::FaultSharedPtr fault_ptr = - FaultManagerImpl::makeFaultForTest(FaultType::Error, std::chrono::milliseconds(10)); - - ASSERT_TRUE(fault_ptr->faultType() == FaultType::Error); - ASSERT_TRUE(fault_ptr->delayMs() == std::chrono::milliseconds(10)); -} - -TEST_F(FaultTest, NoFaults) { - RedisProxy redis_config; - auto* faults = redis_config.mutable_faults(); - - TestScopedRuntime scoped_runtime; - FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); - - const Fault* fault_ptr = fault_manager.getFaultForCommand("get"); - ASSERT_TRUE(fault_ptr == nullptr); -} - -TEST_F(FaultTest, SingleCommandFaultNotEnabled) { - RedisProxy redis_config; - auto* faults = redis_config.mutable_faults(); - createCommandFault(faults->Add(), "get", 0, 0, FractionalPercent::HUNDRED, RUNTIME_KEY); - - TestScopedRuntime scoped_runtime; - FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); - - EXPECT_CALL(random_, random()).WillOnce(Return(0)); - EXPECT_CALL(runtime_, snapshot()); - const Fault* fault_ptr = fault_manager.getFaultForCommand("get"); - ASSERT_TRUE(fault_ptr == nullptr); -} - -TEST_F(FaultTest, SingleCommandFault) { - // Inject a single fault. Notably we use a different denominator to test that code path; normally - // we use FractionalPercent::HUNDRED. - RedisProxy redis_config; - auto* faults = redis_config.mutable_faults(); - createCommandFault(faults->Add(), "ttl", 0, 5000, FractionalPercent::TEN_THOUSAND, RUNTIME_KEY); - - TestScopedRuntime scoped_runtime; - FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); - - EXPECT_CALL(random_, random()).WillOnce(Return(1)); - EXPECT_CALL(runtime_.snapshot_, getInteger(RUNTIME_KEY, 50)).WillOnce(Return(10)); - - const Fault* fault_ptr = fault_manager.getFaultForCommand("ttl"); - ASSERT_TRUE(fault_ptr != nullptr); -} - -TEST_F(FaultTest, SingleCommandFaultWithNoDefaultValueOrRuntimeValue) { - // Inject a single fault with no default value or runtime value. - RedisProxy redis_config; - auto* faults = redis_config.mutable_faults(); - createCommandFault(faults->Add(), "ttl", 0, absl::nullopt, absl::nullopt, absl::nullopt); - - TestScopedRuntime scoped_runtime; - FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); - - EXPECT_CALL(random_, random()).WillOnce(Return(1)); - const Fault* fault_ptr = fault_manager.getFaultForCommand("ttl"); - ASSERT_TRUE(fault_ptr == nullptr); -} - -TEST_F(FaultTest, MultipleFaults) { - // This creates 2 faults, but the map will have 3 entries, as each command points to - // command specific faults AND the general fault. The second fault has no runtime key, - // forcing the runtime key check to be false in application code and falling back to the - // default value. - RedisProxy redis_config; - auto* faults = redis_config.mutable_faults(); - createCommandFault(faults->Add(), "get", 0, 25, FractionalPercent::HUNDRED, RUNTIME_KEY); - createAllKeyFault(faults->Add(), 2, 25, FractionalPercent::HUNDRED, absl::nullopt); - - TestScopedRuntime scoped_runtime; - FaultManagerImpl fault_manager = FaultManagerImpl(random_, runtime_, *faults); - const Fault* fault_ptr; - - // Get command - should have a fault 50% of time - // For the first call we mock the random percentage to be 10%, which will give us the first fault - // with 0s delay. - EXPECT_CALL(random_, random()).WillOnce(Return(1)); - EXPECT_CALL(runtime_.snapshot_, getInteger(_, 25)).WillOnce(Return(10)); - fault_ptr = fault_manager.getFaultForCommand("get"); - ASSERT_TRUE(fault_ptr != nullptr); - ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(0)); - - // Another Get; we mock the random percentage to be 25%, giving us the ALL_KEY fault - EXPECT_CALL(random_, random()).WillOnce(Return(25)); - EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)) - .Times(2) - .WillOnce(Return(10)) - .WillOnce(Return(50)); - fault_ptr = fault_manager.getFaultForCommand("get"); - ASSERT_TRUE(fault_ptr != nullptr); - ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000)); - - // No fault for Get command with mocked random percentage >= 50%. - EXPECT_CALL(random_, random()).WillOnce(Return(50)); - EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).Times(2); - fault_ptr = fault_manager.getFaultForCommand("get"); - ASSERT_TRUE(fault_ptr == nullptr); - - // Any other command; we mock the random percentage to be 1%, giving us the ALL_KEY fault - EXPECT_CALL(random_, random()).WillOnce(Return(1)); - EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).WillOnce(Return(10)); - - fault_ptr = fault_manager.getFaultForCommand("ttl"); - ASSERT_TRUE(fault_ptr != nullptr); - ASSERT_EQ(fault_ptr->delayMs(), std::chrono::milliseconds(2000)); - - // No fault for any other command with mocked random percentage >= 25%. - EXPECT_CALL(random_, random()).WillOnce(Return(25)); - EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)); - fault_ptr = fault_manager.getFaultForCommand("ttl"); - ASSERT_TRUE(fault_ptr == nullptr); -} - -} // namespace Redis -} // namespace Common -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index 034d9bd11b7c..13980d9b57ca 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -18,22 +18,16 @@ envoy_extension_cc_test( name = "command_splitter_impl_test", srcs = ["command_splitter_impl_test.cc"], extension_name = "envoy.filters.network.redis_proxy", - # This test takes a while to run specially under tsan. - # Shard it to avoid test timeout. - shard_count = 2, deps = [ ":redis_mocks", "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", - "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:router_interface", "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", - "//test/mocks/event:event_mocks", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", - "//test/test_common:test_runtime_lib", ], ) @@ -90,7 +84,6 @@ envoy_cc_mock( "//source/extensions/common/redis:cluster_refresh_manager_interface", "//source/extensions/filters/network/common/redis:client_interface", "//source/extensions/filters/network/common/redis:codec_lib", - "//source/extensions/filters/network/common/redis:fault_interface", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", "//source/extensions/filters/network/redis_proxy:router_interface", @@ -122,7 +115,6 @@ envoy_extension_cc_benchmark_binary( "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", - "//test/mocks/event:event_mocks", "//test/test_common:printers_lib", "//test/test_common:simulated_time_system_lib", ], @@ -155,7 +147,6 @@ envoy_extension_cc_test( extension_name = "envoy.filters.network.redis_proxy", tags = ["fails_on_windows"], deps = [ - "//source/extensions/filters/network/common/redis:fault_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/integration:integration_lib", ], diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index d7ddc451bcb4..edf29c973092 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -12,14 +12,10 @@ #include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "test/extensions/filters/network/redis_proxy/mocks.h" -#include "test/mocks/event/mocks.h" #include "test/test_common/simulated_time_system.h" #include "benchmark/benchmark.h" -using testing::NiceMock; - namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -68,13 +64,11 @@ class CommandLookUpSpeedTest { } } - RouterPtr router_{std::make_unique()}; + Router* router_{new NullRouterImpl()}; Stats::IsolatedStoreImpl store_; Event::SimulatedTimeSystem time_system_; - NiceMock fault_manager_; - NiceMock dispatcher_; - CommandSplitter::InstanceImpl splitter_{*router_, store_, "redis.foo.", time_system_, - false, fault_manager_, dispatcher_}; + CommandSplitter::InstanceImpl splitter_{RouterPtr{router_}, store_, "redis.foo.", time_system_, + false}; NoOpSplitCallbacks callbacks_; CommandSplitter::SplitRequestPtr handle_; }; diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index e864cb29e65c..097cb3d49f4c 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -6,14 +6,12 @@ #include "common/common/fmt.h" #include "common/stats/isolated_store_impl.h" -#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/common.h" -#include "test/mocks/event/mocks.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -34,12 +32,7 @@ namespace CommandSplitter { class RedisCommandSplitterImplTest : public testing::Test { public: RedisCommandSplitterImplTest() : RedisCommandSplitterImplTest(false) {} - RedisCommandSplitterImplTest(bool latency_in_macro) - : RedisCommandSplitterImplTest(latency_in_macro, nullptr) {} - RedisCommandSplitterImplTest(bool latency_in_macro, Common::Redis::FaultSharedPtr fault_ptr) - : latency_in_micros_(latency_in_macro) { - ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr.get())); - } + RedisCommandSplitterImplTest(bool latency_in_macro) : latency_in_micros_(latency_in_macro) {} void makeBulkStringArray(Common::Redis::RespValue& value, const std::vector& strings) { std::vector values(strings.size()); @@ -59,18 +52,14 @@ class RedisCommandSplitterImplTest : public testing::Test { const bool latency_in_micros_; ConnPool::MockInstance* conn_pool_{new ConnPool::MockInstance()}; - ConnPool::InstanceSharedPtr conn_pool_shared_ptr_{conn_pool_}; ConnPool::MockInstance* mirror_conn_pool_{new ConnPool::MockInstance()}; ConnPool::InstanceSharedPtr mirror_conn_pool_shared_ptr_{mirror_conn_pool_}; - std::shared_ptr> route_{new NiceMock(conn_pool_shared_ptr_)}; - std::shared_ptr> router_{new NiceMock(route_)}; + std::shared_ptr> route_{ + new NiceMock(ConnPool::InstanceSharedPtr{conn_pool_})}; NiceMock store_; - NiceMock dispatcher_; - NiceMock fault_manager_; - Event::SimulatedTimeSystem time_system_; - InstanceImpl splitter_{*router_, store_, "redis.foo.", time_system_, - latency_in_micros_, fault_manager_, dispatcher_}; + InstanceImpl splitter_{std::make_unique>(route_), store_, "redis.foo.", + time_system_, latency_in_micros_}; MockSplitCallbacks callbacks_; SplitRequestPtr handle_; }; @@ -1004,147 +993,6 @@ INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithLatencyMicrosTest, RedisSingleServerRequestWithLatencyMicrosTest, testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); -// In subclasses of fault test, we mock the expected faults in the constructor, as the -// fault manager is owned by the splitter, which is also generated later in construction -// of the base test class. -class RedisSingleServerRequestWithFaultTest : public RedisSingleServerRequestTest { -public: - NiceMock* timer_; - Event::TimerCb timer_cb_; - int delay_ms_; - Common::Redis::FaultSharedPtr fault_ptr_; -}; - -class RedisSingleServerRequestWithErrorFaultTest : public RedisSingleServerRequestWithFaultTest { -public: - RedisSingleServerRequestWithErrorFaultTest() { - delay_ms_ = 0; - fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( - Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_)); - ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); - } -}; - -TEST_P(RedisSingleServerRequestWithErrorFaultTest, Fault) { - InSequence s; - - std::string lower_command = absl::AsciiStrToLower(GetParam()); - Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; - makeBulkStringArray(*request, {GetParam(), "hello"}); - - EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); - EXPECT_CALL(callbacks_, onResponse_(_)); - handle_ = splitter_.makeRequest(std::move(request), callbacks_); - EXPECT_EQ(nullptr, handle_); - - EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); - EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.error", lower_command)).value()); - EXPECT_EQ(1UL, - store_.counter(fmt::format("redis.foo.command.{}.error_fault", lower_command)).value()); -}; - -class RedisSingleServerRequestWithErrorWithDelayFaultTest - : public RedisSingleServerRequestWithFaultTest { -public: - RedisSingleServerRequestWithErrorWithDelayFaultTest() { - delay_ms_ = 5; - fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( - Common::Redis::FaultType::Error, std::chrono::milliseconds(delay_ms_)); - ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); - timer_ = new NiceMock(); - } -}; - -INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorFaultTest, - RedisSingleServerRequestWithErrorFaultTest, - testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); - -TEST_P(RedisSingleServerRequestWithErrorWithDelayFaultTest, Fault) { - InSequence s; - - std::string lower_command = absl::AsciiStrToLower(GetParam()); - Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; - makeBulkStringArray(*request, {GetParam(), "hello"}); - - // As error faults have zero latency, recorded latency is equal to the delay. - EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); - EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { - timer_cb_ = timer_cb; - return timer_; - })); - - handle_ = splitter_.makeRequest(std::move(request), callbacks_); - EXPECT_NE(nullptr, handle_); - time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_)); - EXPECT_CALL(store_, deliverHistogramToSinks( - Property(&Stats::Metric::name, - fmt::format("redis.foo.command.{}.latency", lower_command)), - delay_ms_)); - EXPECT_CALL(callbacks_, onResponse_(_)); - timer_cb_(); - - EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); - EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.error", lower_command)).value()); - EXPECT_EQ(1UL, - store_.counter(fmt::format("redis.foo.command.{}.error_fault", lower_command)).value()); -}; - -INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithErrorWithDelayFaultTest, - RedisSingleServerRequestWithErrorWithDelayFaultTest, - testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); - -class RedisSingleServerRequestWithDelayFaultTest : public RedisSingleServerRequestWithFaultTest { -public: - RedisSingleServerRequestWithDelayFaultTest() { - delay_ms_ = 15; - fault_ptr_ = Common::Redis::FaultManagerImpl::makeFaultForTest( - Common::Redis::FaultType::Delay, std::chrono::milliseconds(delay_ms_)); - ON_CALL(fault_manager_, getFaultForCommand(_)).WillByDefault(Return(fault_ptr_.get())); - timer_ = new NiceMock(); - } -}; - -TEST_P(RedisSingleServerRequestWithDelayFaultTest, Fault) { - InSequence s; - - std::string lower_command = absl::AsciiStrToLower(GetParam()); - std::string hash_key = "hello"; - - Common::Redis::RespValuePtr request{new Common::Redis::RespValue()}; - makeBulkStringArray(*request, {GetParam(), "hello"}); - - EXPECT_CALL(callbacks_, connectionAllowed()).WillOnce(Return(true)); - EXPECT_CALL(dispatcher_, createTimer_(_)).WillOnce(Invoke([this](Event::TimerCb timer_cb) { - timer_cb_ = timer_cb; - return timer_; - })); - EXPECT_CALL(*conn_pool_, makeRequest_(hash_key, RespVariantEq(*request), _)) - .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); - - handle_ = splitter_.makeRequest(std::move(request), callbacks_); - - EXPECT_NE(nullptr, handle_); - - EXPECT_CALL(store_, deliverHistogramToSinks( - Property(&Stats::Metric::name, - fmt::format("redis.foo.command.{}.latency", lower_command)), - delay_ms_)); - respond(); - - time_system_.setMonotonicTime(std::chrono::milliseconds(delay_ms_)); - timer_cb_(); - - EXPECT_EQ(1UL, store_.counter(fmt::format("redis.foo.command.{}.total", lower_command)).value()); - EXPECT_EQ(1UL, - store_.counter(fmt::format("redis.foo.command.{}.success", lower_command)).value()); - EXPECT_EQ(1UL, - store_.counter(fmt::format("redis.foo.command.{}.delay_fault", lower_command)).value()); -}; - -INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestWithDelayFaultTest, - RedisSingleServerRequestWithDelayFaultTest, - testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); - } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index 155b72689284..a9043af8cd6e 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -169,43 +169,6 @@ stat_prefix: foo cb(connection); } -TEST(RedisProxyFilterConfigFactoryTest, RedisProxyFaultProto) { - const std::string yaml = R"EOF( -prefix_routes: - catch_all_route: - cluster: fake_cluster -stat_prefix: foo -faults: -- fault_type: ERROR - fault_enabled: - default_value: - numerator: 30 - denominator: HUNDRED - runtime_key: "bogus_key" - commands: - - GET -- fault_type: DELAY - fault_enabled: - default_value: - numerator: 20 - denominator: HUNDRED - runtime_key: "bogus_key" - delay: 2s -settings: - op_timeout: 0.02s - )EOF"; - - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); - NiceMock context; - RedisProxyFilterConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); - EXPECT_TRUE(factory.isTerminalFilter()); - Network::MockConnection connection; - EXPECT_CALL(connection, addReadFilter(_)); - cb(connection); -} - // Test that the deprecated extension name still functions. TEST(RedisProxyFilterConfigFactoryTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.redis_proxy"; diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index fa1ff637af61..d51809ba27f6 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -26,10 +26,6 @@ MockMirrorPolicy::MockMirrorPolicy(ConnPool::InstanceSharedPtr conn_pool) ON_CALL(*this, shouldMirror(_)).WillByDefault(Return(true)); } -MockFaultManager::MockFaultManager() = default; -MockFaultManager::MockFaultManager(const MockFaultManager&) {} -MockFaultManager::~MockFaultManager() = default; - namespace ConnPool { MockPoolCallbacks::MockPoolCallbacks() = default; @@ -51,9 +47,6 @@ MockSplitCallbacks::~MockSplitCallbacks() = default; MockInstance::MockInstance() = default; MockInstance::~MockInstance() = default; -MockCommandSplitterFactory::MockCommandSplitterFactory() = default; -MockCommandSplitterFactory::~MockCommandSplitterFactory() = default; - } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index d76a1f2598b7..b093ad35b9b9 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -7,7 +7,6 @@ #include "extensions/common/redis/cluster_refresh_manager.h" #include "extensions/filters/network/common/redis/client.h" #include "extensions/filters/network/common/redis/codec_impl.h" -#include "extensions/filters/network/common/redis/fault.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" #include "extensions/filters/network/redis_proxy/router.h" @@ -52,15 +51,6 @@ class MockMirrorPolicy : public MirrorPolicy { ConnPool::InstanceSharedPtr conn_pool_; }; -class MockFaultManager : public Common::Redis::FaultManager { -public: - MockFaultManager(); - MockFaultManager(const MockFaultManager& other); - ~MockFaultManager() override; - - MOCK_METHOD(const Common::Redis::Fault*, getFaultForCommand, (const std::string&), (const)); -}; - namespace ConnPool { class MockPoolCallbacks : public PoolCallbacks { @@ -129,16 +119,6 @@ class MockInstance : public Instance { (const Common::Redis::RespValue& request, SplitCallbacks& callbacks)); }; -class MockCommandSplitterFactory : public CommandSplitterFactory { -public: - MockCommandSplitterFactory(); - ~MockCommandSplitterFactory() override; - - CommandSplitterPtr create(Event::Dispatcher& dispatcher) override { return create_(dispatcher); }; - - MOCK_METHOD(CommandSplitterPtr, create_, (Event::Dispatcher & dispatcher)); -}; - } // namespace CommandSplitter } // namespace RedisProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 66def36798c0..f094c02b665a 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -19,7 +19,6 @@ #include "gtest/gtest.h" using testing::_; -using testing::ByMove; using testing::ByRef; using testing::DoAll; using testing::Eq; @@ -132,12 +131,8 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder parseProtoFromYaml(yaml_string); config_ = std::make_shared(proto_config, store_, drain_decision_, runtime_, api_); - - std::unique_ptr splitter_ptr = - std::make_unique(); - EXPECT_CALL(splitter_factory_, create_(_)).WillOnce(Return(ByMove(std::move(splitter_ptr)))); - filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, - splitter_factory_, config_); + filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, splitter_, + config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); EXPECT_EQ(1UL, config_->stats_.downstream_cx_total_.value()); @@ -157,10 +152,6 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder } } - CommandSplitter::MockInstance& getSplitter() { - return reinterpret_cast(*filter_->splitter_); - } - // Common::Redis::DecoderFactory Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override { decoder_callbacks_ = &callbacks; @@ -170,7 +161,7 @@ class RedisProxyFilterTest : public testing::Test, public Common::Redis::Decoder Common::Redis::MockEncoder* encoder_{new Common::Redis::MockEncoder()}; Common::Redis::MockDecoder* decoder_{new Common::Redis::MockDecoder()}; Common::Redis::DecoderCallbacks* decoder_callbacks_{}; - CommandSplitter::MockCommandSplitterFactory splitter_factory_; + CommandSplitter::MockInstance splitter_; Stats::TestUtil::TestStore store_; NiceMock drain_decision_; NiceMock runtime_; @@ -190,12 +181,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseWithDrainClose) { CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request2), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); })); @@ -231,12 +222,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request2), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); })); @@ -260,7 +251,7 @@ TEST_F(RedisProxyFilterTest, DownstreamDisconnectWithActive) { CommandSplitter::SplitCallbacks* request_callbacks1; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); })); @@ -278,7 +269,7 @@ TEST_F(RedisProxyFilterTest, ImmediateResponse) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request1)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request1), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -322,7 +313,7 @@ TEST_F(RedisProxyFilterTest, AuthWhenNotRequired) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -349,7 +340,7 @@ TEST_F(RedisProxyFilterTest, AuthAclWhenNotRequired) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -392,7 +383,7 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordCorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -419,7 +410,7 @@ TEST_F(RedisProxyFilterWithAuthPasswordTest, AuthPasswordIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -464,7 +455,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclCorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -491,7 +482,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclUsernameIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { @@ -518,7 +509,7 @@ TEST_F(RedisProxyFilterWithAuthAclTest, AuthAclPasswordIncorrect) { EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request)); })); - EXPECT_CALL(getSplitter(), makeRequest_(Ref(*request), _)) + EXPECT_CALL(splitter_, makeRequest_(Ref(*request), _)) .WillOnce( Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { diff --git a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc index aae9ef011c36..34f8f0e9b665 100644 --- a/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc +++ b/test/extensions/filters/network/redis_proxy/redis_proxy_integration_test.cc @@ -1,9 +1,6 @@ #include #include -#include "common/common/fmt.h" - -#include "extensions/filters/network/common/redis/fault_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "test/integration/integration.h" @@ -55,7 +52,7 @@ const std::string CONFIG = R"EOF( filters: name: redis typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy + "@type": type.googleapis.com/envoy.config.filter.network.redis_proxy.v2.RedisProxy stat_prefix: redis_stats prefix_routes: catch_all_route: @@ -276,27 +273,6 @@ const std::string CONFIG_WITH_ROUTES_AND_AUTH_PASSWORDS = R"EOF( cluster: cluster_2 )EOF"; -// This is a configuration with fault injection enabled. -const std::string CONFIG_WITH_FAULT_INJECTION = CONFIG + R"EOF( - faults: - - fault_type: ERROR - fault_enabled: - default_value: - numerator: 100 - denominator: HUNDRED - commands: - - GET - - fault_type: DELAY - fault_enabled: - default_value: - numerator: 20 - denominator: HUNDRED - runtime_key: "bogus_key" - delay: 2s - commands: - - SET -)EOF"; - // This function encodes commands as an array of bulkstrings as transmitted by Redis clients to // Redis servers, according to the Redis protocol. std::string makeBulkStringArray(std::vector&& command_strings) { @@ -460,12 +436,6 @@ class RedisProxyWithCommandStatsIntegrationTest : public RedisProxyIntegrationTe : RedisProxyIntegrationTest(CONFIG_WITH_COMMAND_STATS, 2) {} }; -class RedisProxyWithFaultInjectionIntegrationTest : public RedisProxyIntegrationTest { -public: - RedisProxyWithFaultInjectionIntegrationTest() - : RedisProxyIntegrationTest(CONFIG_WITH_FAULT_INJECTION, 2) {} -}; - INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -498,10 +468,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithCommandStatsIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -INSTANTIATE_TEST_SUITE_P(IpVersions, RedisProxyWithFaultInjectionIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - void RedisProxyIntegrationTest::initialize() { setUpstreamCount(num_upstreams_); setDeterministic(); @@ -1096,25 +1062,5 @@ TEST_P(RedisProxyWithMirrorsIntegrationTest, EnabledViaRuntimeFraction) { redis_client->close(); } -TEST_P(RedisProxyWithFaultInjectionIntegrationTest, ErrorFault) { - std::string fault_response = - fmt::format("-{}\r\n", Extensions::NetworkFilters::Common::Redis::FaultMessages::get().Error); - initialize(); - simpleProxyResponse(makeBulkStringArray({"get", "foo"}), fault_response); - - EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.get.error")->value()); - EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.get.error_fault")->value()); -} - -TEST_P(RedisProxyWithFaultInjectionIntegrationTest, DelayFault) { - const std::string& set_request = makeBulkStringArray({"set", "write_only:toto", "bar"}); - const std::string& set_response = ":1\r\n"; - initialize(); - simpleRequestAndResponse(set_request, set_response); - - EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.set.success")->value()); - EXPECT_EQ(1, test_server_->counter("redis.redis_stats.command.set.delay_fault")->value()); -} - } // namespace } // namespace Envoy From edd20e56f0f6ba69cabd9b17a9ecdcf7c3bea37f Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 30 Jul 2020 08:59:39 -0700 Subject: [PATCH 795/909] proto: workaround for ECDS to be included in api/BUILD (#12367) Signed-off-by: Lizan Zhou --- api/envoy/service/extension/v3/config_discovery.proto | 6 ++++++ .../envoy/service/extension/v3/config_discovery.proto | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/api/envoy/service/extension/v3/config_discovery.proto b/api/envoy/service/extension/v3/config_discovery.proto index ce2a5c7dfe70..652355b707e3 100644 --- a/api/envoy/service/extension/v3/config_discovery.proto +++ b/api/envoy/service/extension/v3/config_discovery.proto @@ -36,3 +36,9 @@ service ExtensionConfigDiscoveryService { option (google.api.http).body = "*"; } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue +// with importing services: https://github.com/google/protobuf/issues/4221 and +// protoxform to upgrade the file. +message EcdsDummy { +} diff --git a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto index ce2a5c7dfe70..652355b707e3 100644 --- a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto +++ b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto @@ -36,3 +36,9 @@ service ExtensionConfigDiscoveryService { option (google.api.http).body = "*"; } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue +// with importing services: https://github.com/google/protobuf/issues/4221 and +// protoxform to upgrade the file. +message EcdsDummy { +} From e219801b4439ae202d2051ac12c44e481bb139df Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 30 Jul 2020 13:16:52 -0400 Subject: [PATCH 796/909] http: changing sendLocalReply args (#12333) dropping is_head_request as it can be inferred by the HCM. Annoyingly we now can't in all cases - I did tweaks to the HTTP/1.1 codec a few weeks back which make use of this for incomplete HTTP/1.1 headers. Given that behavior landed recently and it's only for responses failing mid-headers I'm going to see if folks think it's Ok to lose since it makes the API simpler for the other 90% of callers. Risk Level: low Testing: adjusted unit tests Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- include/envoy/http/codec.h | 2 - source/common/http/conn_manager_impl.cc | 43 ++++------ source/common/http/conn_manager_impl.h | 3 - source/common/http/http1/codec_impl.cc | 3 +- source/common/http/http1/codec_impl_legacy.cc | 3 +- test/common/http/http1/codec_impl_test.cc | 82 +++++-------------- test/integration/fake_upstream.h | 4 +- test/mocks/http/stream_decoder.h | 2 +- 8 files changed, 45 insertions(+), 97 deletions(-) diff --git a/include/envoy/http/codec.h b/include/envoy/http/codec.h index 46a9bf4e4f2b..ccc04af094a6 100644 --- a/include/envoy/http/codec.h +++ b/include/envoy/http/codec.h @@ -193,13 +193,11 @@ class RequestDecoder : public virtual StreamDecoder { * @param code supplies the HTTP error code to send. * @param body supplies an optional body to send with the local reply. * @param modify_headers supplies a way to edit headers before they are sent downstream. - * @param is_head_request indicates if the request is a HEAD request * @param grpc_status an optional gRPC status for gRPC requests * @param details details about the source of the error, for debug purposes */ virtual void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, - bool is_head_request, const absl::optional grpc_status, absl::string_view details) PURE; }; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index d307398e3b14..4c11cdd2d110 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -676,8 +676,8 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); sendLocalReply(request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), - Http::Code::RequestTimeout, "stream timeout", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); + Http::Code::RequestTimeout, "stream timeout", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); } } @@ -685,8 +685,8 @@ void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { connection_manager_.stats_.named_.downstream_rq_timeout_.inc(); sendLocalReply(request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), - Http::Code::RequestTimeout, "request timeout", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); + Http::Code::RequestTimeout, "request timeout", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); } void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { @@ -862,8 +862,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he state_.created_filter_chain_ = true; connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*request_headers_), - Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, - state_.is_head_request_, absl::nullopt, + Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload); return; } @@ -892,8 +891,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he stream_info_.protocol(protocol); if (!connection_manager_.config_.http1Settings().accept_http_10_) { // Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on. - sendLocalReply(false, Code::UpgradeRequired, "", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().LowVersion); + sendLocalReply(false, Code::UpgradeRequired, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().LowVersion); return; } else if (!fixed_connection_close) { // HTTP/1.0 defaults to single-use connections. Make sure the connection @@ -915,8 +914,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!request_headers_->Host()) { // Require host header. For HTTP/1.1 Host has already been translated to :authority. sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().MissingHost); + nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingHost); return; } @@ -930,8 +928,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if ((!HeaderUtility::isConnect(*request_headers_) || request_headers_->Path()) && request_headers_->getPathValue().empty()) { sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().MissingPath); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingPath); return; } @@ -939,8 +936,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!request_headers_->getPathValue().empty() && request_headers_->getPathValue()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().AbsolutePath); + absl::nullopt, StreamInfo::ResponseCodeDetails::get().AbsolutePath); return; } @@ -948,7 +944,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_, connection_manager_.config_)) { sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, state_.is_head_request_, absl::nullopt, + nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed); return; } @@ -1003,8 +999,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he state_.saw_connection_close_ = true; connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc(); sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, "", - nullptr, state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().UpgradeFailed); + nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpgradeFailed); return; } // Allow non websocket requests to go through websocket enabled routes. @@ -1568,8 +1563,9 @@ absl::optional ConnectionManagerImpl::ActiveStream void ConnectionManagerImpl::ActiveStream::sendLocalReply( bool is_grpc_request, Code code, absl::string_view body, - const std::function& modify_headers, bool is_head_request, + const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details) { + const bool is_head_request = state_.is_head_request_; stream_info_.setResponseCodeDetails(details); // The BadRequest error code indicates there has been a messaging error. @@ -1957,10 +1953,9 @@ ResponseTrailerMap& ConnectionManagerImpl::FilterManager::addEncodedTrailers() { void ConnectionManagerImpl::FilterManager::sendLocalReply( bool is_grpc_request, Code code, absl::string_view body, - const std::function& modify_headers, bool is_head_request, + const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details) { - active_stream_.sendLocalReply(is_grpc_request, code, body, modify_headers, is_head_request, - grpc_status, details); + active_stream_.sendLocalReply(is_grpc_request, code, body, modify_headers, grpc_status, details); } void ConnectionManagerImpl::FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, @@ -2568,8 +2563,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::sendLocalReply( std::function modify_headers, const absl::optional grpc_status, absl::string_view details) { parent_.active_stream_.stream_info_.setResponseCodeDetails(details); - parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, - parent_.active_stream_.state_.is_head_request_, grpc_status, details); + parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, grpc_status, details); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( @@ -2863,8 +2857,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { parent_.active_stream_.request_headers_ && Grpc::Common::isGrpcRequestHeaders(*parent_.active_stream_.request_headers_), Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), - nullptr, parent_.active_stream_.state_.is_head_request_, absl::nullopt, - StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); + nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 4302ea34aa0c..34ed3e7c8f3f 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -437,7 +437,6 @@ class ConnectionManagerImpl : Logger::Loggable, ResponseTrailerMap& addEncodedTrailers(); void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, - bool is_head_request, const absl::optional grpc_status, absl::string_view details); void sendLocalReplyViaFilterChain( @@ -494,9 +493,7 @@ class ConnectionManagerImpl : Logger::Loggable, void chargeStats(const ResponseHeaderMap& headers); const Network::Connection* connection(); void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, - const std::function& modify_headers, - bool is_head_request, const absl::optional grpc_status, absl::string_view details) override; uint64_t streamId() { return stream_id_; } diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 596540e56a66..c9cf88f569e8 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -1006,10 +1006,9 @@ void ServerConnectionImpl::sendProtocolError(absl::string_view details) { is_grpc_request = Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); } - const bool is_head_request = parser_.method == HTTP_HEAD; active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, CodeUtility::toString(error_code_), nullptr, - is_head_request, absl::nullopt, details); + absl::nullopt, details); return; } } diff --git a/source/common/http/http1/codec_impl_legacy.cc b/source/common/http/http1/codec_impl_legacy.cc index 6c227685e245..a8829d2182e2 100644 --- a/source/common/http/http1/codec_impl_legacy.cc +++ b/source/common/http/http1/codec_impl_legacy.cc @@ -1011,10 +1011,9 @@ void ServerConnectionImpl::sendProtocolError(absl::string_view details) { is_grpc_request = Grpc::Common::isGrpcRequestHeaders(*absl::get(headers_or_trailers_)); } - const bool is_head_request = parser_.method == HTTP_HEAD; active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, CodeUtility::toString(error_code_), nullptr, - is_head_request, absl::nullopt, details); + absl::nullopt, details); return; } } diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 9f21744cdd0a..7ecd8baf1bb0 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -154,7 +154,7 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur return decoder; })); - EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(p, codec_->protocol()); @@ -271,7 +271,7 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(trailer_string); if (enable_trailers) { - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "trailers size exceeds limit"); @@ -299,7 +299,7 @@ void Http1ServerConnectionImplTest::testRequestHeadersExceedLimit(std::string he auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); buffer = Buffer::OwnedImpl(header_string + "\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); @@ -364,7 +364,7 @@ TEST_P(Http1ServerConnectionImplTest, IdentityEncodingNoChunked) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: identity\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -379,7 +379,7 @@ TEST_P(Http1ServerConnectionImplTest, UnsupportedEncoding) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\ntransfer-encoding: gzip\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -531,7 +531,7 @@ TEST_P(Http1ServerConnectionImplTest, InvalidChunkHeader) { "6\r\nHello \r\n" "invalid\r\nWorl"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: HPE_INVALID_CHUNK_SIZE"); @@ -548,7 +548,7 @@ TEST_P(Http1ServerConnectionImplTest, IdentityAndChunkedBody) { Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: " "identity,chunked\r\n\r\nb\r\nHello World\r\n0\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported transfer encoding"); @@ -754,7 +754,7 @@ TEST_P(Http1ServerConnectionImplTest, Http11InvalidTrailerPost) { "body\r\n0\r\n" "badtrailer\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, Http::Code::BadRequest, "Bad Request", _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } @@ -836,7 +836,7 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestNoStreamLegacy) { MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).Times(0); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)).Times(0); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)).Times(0); Buffer::OwnedImpl buffer("bad"); auto status = codec_->dispatch(buffer); @@ -856,53 +856,13 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestNoStream) { return decoder; })); // Check that before any headers are parsed, requests do not look like HEAD or gRPC requests. - EXPECT_CALL(decoder, sendLocalReply(false, _, _, _, false, _, _)); + EXPECT_CALL(decoder, sendLocalReply(false, _, _, _, _, _)); Buffer::OwnedImpl buffer("bad"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } -// Make sure that if the first line is parsed, that sendLocalReply tracks HEAD requests correctly. -TEST_P(Http1ServerConnectionImplTest, BadHeadRequest) { - initialize(); - - MockRequestDecoder decoder; - Http::ResponseEncoder* response_encoder = nullptr; - EXPECT_CALL(callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { - response_encoder = &encoder; - return decoder; - })); - // Make sure sendLocalReply picks up the head request. - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, true, _, _)); - - // Send invalid characters - Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\nHOST: h.com\r\r\r\r"); - auto status = codec_->dispatch(buffer); - EXPECT_TRUE(isCodecProtocolError(status)); -} - -// Make sure that if gRPC headers are parsed, they are tracked by sendLocalReply. -TEST_P(Http1ServerConnectionImplTest, BadGrpcRequest) { - initialize(); - - MockRequestDecoder decoder; - Http::ResponseEncoder* response_encoder = nullptr; - EXPECT_CALL(callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& { - response_encoder = &encoder; - return decoder; - })); - // Make sure sendLocalReply picks up the head request. - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, true, _, _)); - - // Send invalid characters - Buffer::OwnedImpl buffer("HEAD / HTTP/1.1\r\ncontent-type: application/grpc\r\nHOST: ###\r\r"); - auto status = codec_->dispatch(buffer); - EXPECT_TRUE(isCodecProtocolError(status)); -} - // This behavior was observed during CVE-2019-18801 and helped to limit the // scope of affected Envoy configurations. TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { @@ -912,7 +872,7 @@ TEST_P(Http1ServerConnectionImplTest, RejectInvalidMethod) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("BAD / HTTP/1.1\r\nHost: foo\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } @@ -928,7 +888,7 @@ TEST_P(Http1ServerConnectionImplTest, BadRequestStartedStream) { EXPECT_TRUE(status.ok()); Buffer::OwnedImpl buffer2("g"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } @@ -1056,7 +1016,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderInvalidCharsRejection) { })); Buffer::OwnedImpl buffer( absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo: ", std::string(1, 3), "\r\n")); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: header value contains invalid chars"); @@ -1125,7 +1085,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderNameWithUnderscoreCauseRequestReject })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.com\r\nfoo_bar: bar\r\n\r\n")); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: header name contains underscores"); @@ -1146,7 +1106,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderInvalidAuthority) { return decoder; })); Buffer::OwnedImpl buffer(absl::StrCat("GET / HTTP/1.1\r\nHOST: h.\"com\r\n\r\n")); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), @@ -1169,7 +1129,7 @@ TEST_P(Http1ServerConnectionImplTest, HeaderMutateEmbeddedNul) { Buffer::OwnedImpl buffer( absl::StrCat(example_input.substr(0, n), std::string(1, '\0'), example_input.substr(n))); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_FALSE(status.ok()); EXPECT_TRUE(isCodecProtocolError(status)); @@ -1774,7 +1734,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestAbsoluteURLNotallowed) { EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); Buffer::OwnedImpl buffer("CONNECT http://host:80 HTTP/1.1\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); } @@ -1803,7 +1763,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithTEChunked) { // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 CONNECT with body has no defined // semantics: Envoy will reject chunked CONNECT requests. - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); Buffer::OwnedImpl buffer( "CONNECT host:80 HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n12345abcd"); auto status = codec_->dispatch(buffer); @@ -1821,7 +1781,7 @@ TEST_P(Http1ServerConnectionImplTest, ConnectRequestWithNonZeroContentLength) { // Make sure we avoid the deferred_end_stream_headers_ optimization for // requests-with-no-body. Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\ncontent-length: 1\r\n\r\nabcd"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); auto status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "http/1.1 protocol error: unsupported content length"); @@ -2667,7 +2627,7 @@ TEST_P(Http1ServerConnectionImplTest, LargeRequestHeadersSplitRejected) { } // the 60th 1kb header should induce overflow buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); @@ -2697,7 +2657,7 @@ TEST_P(Http1ServerConnectionImplTest, ManyRequestHeadersSplitRejected) { // The final 101th header should induce overflow. buffer = Buffer::OwnedImpl("header101:\r\n\r\n"); - EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _, _)); + EXPECT_CALL(decoder, sendLocalReply(_, _, _, _, _, _)); status = codec_->dispatch(buffer); EXPECT_TRUE(isCodecProtocolError(status)); EXPECT_EQ(status.message(), "headers size exceeds limit"); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index ef2a79c92cdb..b735399ade3d 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -85,8 +85,10 @@ class FakeStream : public Http::RequestDecoder, void sendLocalReply(bool is_grpc_request, Http::Code code, absl::string_view body, const std::function& /*modify_headers*/, - bool is_head_request, const absl::optional grpc_status, + const absl::optional grpc_status, absl::string_view /*details*/) override { + const bool is_head_request = + headers_ != nullptr && headers_->getMethodValue() == Http::Headers::get().MethodValues.Head; Http::Utility::sendLocalReply( false, Http::Utility::EncodeFunctions( diff --git a/test/mocks/http/stream_decoder.h b/test/mocks/http/stream_decoder.h index b822de460b9d..479eec77f9df 100644 --- a/test/mocks/http/stream_decoder.h +++ b/test/mocks/http/stream_decoder.h @@ -19,7 +19,7 @@ class MockRequestDecoder : public RequestDecoder { MOCK_METHOD(void, sendLocalReply, (bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, - bool is_head_request, const absl::optional grpc_status, + const absl::optional grpc_status, absl::string_view details)); void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override { From 9b07ee8508698561231e1398b55abaea9687e2dd Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 30 Jul 2020 14:33:34 -0400 Subject: [PATCH 797/909] Fix clang-tidy errors in the LinkedObject::moveIntoList* methods (#12378) Fix clang-tidy errors in the LinkedObject::moveIntoList* methods Signed-off-by: Yan Avlasov --- source/common/common/linked_object.h | 61 ++++++++++++------- source/common/conn_pool/conn_pool_base.cc | 2 +- source/common/grpc/async_client_impl.cc | 4 +- source/common/grpc/async_client_impl.h | 2 +- .../common/grpc/google_async_client_impl.cc | 4 +- source/common/grpc/google_async_client_impl.h | 2 +- source/common/http/async_client_impl.cc | 4 +- source/common/http/async_client_impl.h | 2 +- source/common/http/codec_client.cc | 2 +- source/common/http/conn_manager_impl.cc | 6 +- source/common/http/conn_pool_base.cc | 2 +- source/common/http/http2/codec_impl.cc | 4 +- source/common/http/http2/codec_impl_legacy.cc | 4 +- source/common/network/filter_manager_impl.cc | 4 +- source/common/router/router.cc | 6 +- source/common/tcp/conn_pool.h | 2 +- source/common/tcp/original_conn_pool.cc | 4 +- .../network/dubbo_proxy/active_message.cc | 4 +- .../network/dubbo_proxy/conn_manager.cc | 2 +- .../network/rocketmq_proxy/conn_manager.cc | 2 +- .../network/thrift_proxy/conn_manager.cc | 2 +- .../network/thrift_proxy/conn_manager.h | 2 +- source/server/connection_handler_impl.cc | 4 +- test/common/common/BUILD | 8 +++ test/common/common/linked_object_test.cc | 44 +++++++++++++ test/common/http/codec_impl_fuzz_test.cc | 4 +- .../network/rocketmq_proxy/router_test.cc | 2 +- test/integration/fake_upstream.cc | 2 +- 28 files changed, 130 insertions(+), 61 deletions(-) create mode 100644 test/common/common/linked_object_test.cc diff --git a/source/common/common/linked_object.h b/source/common/common/linked_object.h index 13fc6d491567..9c65e085b76f 100644 --- a/source/common/common/linked_object.h +++ b/source/common/common/linked_object.h @@ -6,6 +6,40 @@ #include "common/common/assert.h" namespace Envoy { + +/** + * Helper methods for placing LinkedObject into a list. + */ +namespace LinkedList { + +/** + * Move an item into a linked list at the front. + * @param item supplies the item to move in. + * @param list supplies the list to move the item into. + */ +template +void moveIntoList(std::unique_ptr&& item, std::list>& list) { + ASSERT(!item->inserted_); + item->inserted_ = true; + auto position = list.emplace(list.begin(), std::move(item)); + (*position)->entry_ = position; +} + +/** + * Move an item into a linked list at the back. + * @param item supplies the item to move in. + * @param list supplies the list to move the item into. + */ +template +void moveIntoListBack(std::unique_ptr&& item, std::list>& list) { + ASSERT(!item->inserted_); + item->inserted_ = true; + auto position = list.emplace(list.end(), std::move(item)); + (*position)->entry_ = position; +} + +} // namespace LinkedList + /** * Mixin class that allows an object contained in a unique pointer to be easily linked and unlinked * from lists. @@ -39,28 +73,6 @@ template class LinkedObject { dst.splice(dst.begin(), src, entry_); } - /** - * Move an item into a linked list at the front. - * @param item supplies the item to move in. - * @param list supplies the list to move the item into. - */ - void moveIntoList(std::unique_ptr&& item, ListType& list) { - ASSERT(!inserted_); - inserted_ = true; - entry_ = list.emplace(list.begin(), std::move(item)); - } - - /** - * Move an item into a linked list at the back. - * @param item supplies the item to move in. - * @param list supplies the list to move the item into. - */ - void moveIntoListBack(std::unique_ptr&& item, ListType& list) { - ASSERT(!inserted_); - inserted_ = true; - entry_ = list.emplace(list.end(), std::move(item)); - } - /** * Remove this item from a list. * @param list supplies the list to remove from. This item should be in this list. @@ -79,6 +91,11 @@ template class LinkedObject { LinkedObject() = default; private: + template + friend void LinkedList::moveIntoList(std::unique_ptr&&, std::list>&); + template + friend void LinkedList::moveIntoListBack(std::unique_ptr&&, std::list>&); + typename ListType::iterator entry_; bool inserted_{false}; // iterators do not have any "invalid" value so we need this boolean for // sanity checking. diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 66a4a23fafce..e8319ab81619 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -90,7 +90,7 @@ bool ConnPoolImplBase::tryCreateNewConnection() { client->effectiveConcurrentRequestLimit()); ASSERT(client->real_host_description_); connecting_stream_capacity_ += client->effectiveConcurrentRequestLimit(); - client->moveIntoList(std::move(client), owningList(client->state_)); + LinkedList::moveIntoList(std::move(client), owningList(client->state_)); } return can_create_connection; } diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index c35a5fb60033..6df5339a91d4 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -38,7 +38,7 @@ AsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name, return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return async_request; } @@ -54,7 +54,7 @@ RawAsyncStream* AsyncClientImpl::startRaw(absl::string_view service_full_name, return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return active_streams_.front().get(); } diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index 9b49826eb692..ac1b2f50ae97 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -48,7 +48,7 @@ class AsyncClientImpl final : public RawAsyncClient { class AsyncStreamImpl : public RawAsyncStream, Http::AsyncClient::StreamCallbacks, public Event::DeferredDeletable, - LinkedObject { + public LinkedObject { public: AsyncStreamImpl(AsyncClientImpl& parent, absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index fc31fbbc7a14..e4b329d3e67e 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -119,7 +119,7 @@ AsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return async_request; } @@ -135,7 +135,7 @@ RawAsyncStream* GoogleAsyncClientImpl::startRaw(absl::string_view service_full_n return nullptr; } - grpc_stream->moveIntoList(std::move(grpc_stream), active_streams_); + LinkedList::moveIntoList(std::move(grpc_stream), active_streams_); return active_streams_.front().get(); } diff --git a/source/common/grpc/google_async_client_impl.h b/source/common/grpc/google_async_client_impl.h index 5d339e7764a3..8e946ce5c0cb 100644 --- a/source/common/grpc/google_async_client_impl.h +++ b/source/common/grpc/google_async_client_impl.h @@ -210,7 +210,7 @@ class GoogleAsyncClientImpl final : public RawAsyncClient, Logger::Loggable, - LinkedObject { + public LinkedObject { public: GoogleAsyncStreamImpl(GoogleAsyncClientImpl& parent, absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 910955355892..a1eaecc78fc4 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -63,7 +63,7 @@ AsyncClient::Request* AsyncClientImpl::send(RequestMessagePtr&& request, // The request may get immediately failed. If so, we will return nullptr. if (!new_request->remote_closed_) { - new_request->moveIntoList(std::move(new_request), active_streams_); + LinkedList::moveIntoList(std::move(new_request), active_streams_); return async_request; } else { new_request->cleanup(); @@ -74,7 +74,7 @@ AsyncClient::Request* AsyncClientImpl::send(RequestMessagePtr&& request, AsyncClient::Stream* AsyncClientImpl::start(AsyncClient::StreamCallbacks& callbacks, const AsyncClient::StreamOptions& options) { std::unique_ptr new_stream{new AsyncStreamImpl(*this, callbacks, options)}; - new_stream->moveIntoList(std::move(new_stream), active_streams_); + LinkedList::moveIntoList(std::move(new_stream), active_streams_); return active_streams_.front().get(); } diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 8a5826a49e5b..a4e2e7c86b84 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -76,7 +76,7 @@ class AsyncStreamImpl : public AsyncClient::Stream, public StreamDecoderFilterCallbacks, public Event::DeferredDeletable, Logger::Loggable, - LinkedObject, + public LinkedObject, public ScopeTrackedObject { public: AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCallbacks& callbacks, diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index e3fbc23ef921..2353eba5be36 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -67,7 +67,7 @@ RequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) { ActiveRequestPtr request(new ActiveRequest(*this, response_decoder)); request->encoder_ = &codec_->newStream(*request); request->encoder_->getStream().addCallbacks(*request); - request->moveIntoList(std::move(request), active_requests_); + LinkedList::moveIntoList(std::move(request), active_requests_); disableIdleTimer(); return *active_requests_.front()->encoder_; } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 4c11cdd2d110..5878e526346c 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -256,7 +256,7 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || new_stream->high_watermark_count_ > 0); - new_stream->moveIntoList(std::move(new_stream), streams_); + LinkedList::moveIntoList(std::move(new_stream), streams_); return **streams_.begin(); } @@ -715,7 +715,7 @@ void ConnectionManagerImpl::FilterManager::addStreamDecoderFilterWorker( // - B // - C // The decoder filter chain will iterate through filters A, B, C. - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } void ConnectionManagerImpl::FilterManager::addStreamEncoderFilterWorker( @@ -730,7 +730,7 @@ void ConnectionManagerImpl::FilterManager::addStreamEncoderFilterWorker( // - B // - C // The encoder filter chain will iterate through filters C, B, A. - wrapper->moveIntoList(std::move(wrapper), encoder_filters_); + LinkedList::moveIntoList(std::move(wrapper), encoder_filters_); } void ConnectionManagerImpl::ActiveStream::addAccessLogHandler( diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 7559b9e44fe3..6a3b2362a1ed 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -71,7 +71,7 @@ HttpConnPoolImplBase::newPendingRequest(Envoy::ConnectionPool::AttachContext& co ENVOY_LOG(debug, "queueing stream due to no available connections"); Envoy::ConnectionPool::PendingRequestPtr pending_stream( new HttpPendingRequest(*this, decoder, callbacks)); - pending_stream->moveIntoList(std::move(pending_stream), pending_streams_); + LinkedList::moveIntoList(std::move(pending_stream), pending_streams_); return pending_streams_.front().get(); } diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 9e6b5140beaa..0405ec12a5c8 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -1247,7 +1247,7 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { stream->runHighWatermarkCallbacks(); } ClientStreamImpl& stream_ref = *stream; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); return stream_ref; } @@ -1313,7 +1313,7 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { } stream->request_decoder_ = &callbacks_.newStream(*stream); stream->stream_id_ = frame->hd.stream_id; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, active_streams_.front().get()); return 0; diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc index 50cac71a404a..9daec6c70efe 100644 --- a/source/common/http/http2/codec_impl_legacy.cc +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -1251,7 +1251,7 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { stream->runHighWatermarkCallbacks(); } ClientStreamImpl& stream_ref = *stream; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); return stream_ref; } @@ -1317,7 +1317,7 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { } stream->request_decoder_ = &callbacks_.newStream(*stream); stream->stream_id_ = frame->hd.stream_id; - stream->moveIntoList(std::move(stream), active_streams_); + LinkedList::moveIntoList(std::move(stream), active_streams_); nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, active_streams_.front().get()); return 0; diff --git a/source/common/network/filter_manager_impl.cc b/source/common/network/filter_manager_impl.cc index c083a56eb4ed..593abc098095 100644 --- a/source/common/network/filter_manager_impl.cc +++ b/source/common/network/filter_manager_impl.cc @@ -13,7 +13,7 @@ void FilterManagerImpl::addWriteFilter(WriteFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); ActiveWriteFilterPtr new_filter(new ActiveWriteFilter{*this, filter}); filter->initializeWriteFilterCallbacks(*new_filter); - new_filter->moveIntoList(std::move(new_filter), downstream_filters_); + LinkedList::moveIntoList(std::move(new_filter), downstream_filters_); } void FilterManagerImpl::addFilter(FilterSharedPtr filter) { @@ -25,7 +25,7 @@ void FilterManagerImpl::addReadFilter(ReadFilterSharedPtr filter) { ASSERT(connection_.state() == Connection::State::Open); ActiveReadFilterPtr new_filter(new ActiveReadFilter{*this, filter}); filter->initializeReadFilterCallbacks(*new_filter); - new_filter->moveIntoListBack(std::move(new_filter), upstream_filters_); + LinkedList::moveIntoListBack(std::move(new_filter), upstream_filters_); } bool FilterManagerImpl::initializeReadFilters() { diff --git a/source/common/router/router.cc b/source/common/router/router.cc index fcf328b6feb2..88db133d6e2c 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -589,7 +589,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, UpstreamRequestPtr upstream_request = std::make_unique(*this, std::move(generic_conn_pool)); - upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); @@ -1169,7 +1169,7 @@ void Filter::resetOtherUpstreams(UpstreamRequest& upstream_request) { ASSERT(final_upstream_request); // Now put the final request back on this list. - final_upstream_request->moveIntoList(std::move(final_upstream_request), upstream_requests_); + LinkedList::moveIntoList(std::move(final_upstream_request), upstream_requests_); } void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPtr&& headers, @@ -1551,7 +1551,7 @@ void Filter::doRetry() { } UpstreamRequest* upstream_request_tmp = upstream_request.get(); - upstream_request->moveIntoList(std::move(upstream_request), upstream_requests_); + LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_); upstream_requests_.front()->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_ && downstream_end_stream_); // It's possible we got immediately reset which means the upstream request we just diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 91918b07be76..8b6c5e6e983d 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -163,7 +163,7 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, newPendingRequest(Envoy::ConnectionPool::AttachContext& context) override { Envoy::ConnectionPool::PendingRequestPtr pending_stream = std::make_unique(*this, typedContext(context)); - pending_stream->moveIntoList(std::move(pending_stream), pending_streams_); + LinkedList::moveIntoList(std::move(pending_stream), pending_streams_); return pending_streams_.front().get(); } diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index 378a3314f7d2..b34c31280f89 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -97,7 +97,7 @@ void OriginalConnPoolImpl::checkForDrained() { void OriginalConnPoolImpl::createNewConnection() { ENVOY_LOG(debug, "creating a new connection"); ActiveConnPtr conn(new ActiveConn(*this)); - conn->moveIntoList(std::move(conn), pending_conns_); + LinkedList::moveIntoList(std::move(conn), pending_conns_); } ConnectionPool::Cancellable* @@ -124,7 +124,7 @@ OriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { ENVOY_LOG(debug, "queueing request due to no available connections"); PendingRequestPtr pending_request(new PendingRequest(*this, callbacks)); - pending_request->moveIntoList(std::move(pending_request), pending_requests_); + LinkedList::moveIntoList(std::move(pending_request), pending_requests_); return pending_requests_.front().get(); } else { ENVOY_LOG(debug, "max pending requests overflow"); diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index 1baaf5771c92..d4af36ae17b1 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -450,14 +450,14 @@ void ActiveMessage::addDecoderFilterWorker(DubboFilters::DecoderFilterSharedPtr ActiveMessageDecoderFilterPtr wrapper = std::make_unique(*this, filter, dual_filter); filter->setDecoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } void ActiveMessage::addEncoderFilterWorker(DubboFilters::EncoderFilterSharedPtr filter, bool dual_filter) { ActiveMessageEncoderFilterPtr wrapper = std::make_unique(*this, filter, dual_filter); filter->setEncoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), encoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), encoder_filters_); } void ActiveMessage::onReset() { parent_.deferredMessage(*this); } diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc index 31d91e3897a8..a21f6350bcae 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -83,7 +83,7 @@ StreamHandler& ConnectionManager::newStream() { ActiveMessagePtr new_message(std::make_unique(*this)); new_message->createFilterChain(); - new_message->moveIntoList(std::move(new_message), active_message_list_); + LinkedList::moveIntoList(std::move(new_message), active_message_list_); return **active_message_list_.begin(); } diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc index a613998d53a0..0748f80476ff 100644 --- a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc @@ -343,7 +343,7 @@ ActiveMessage& ConnectionManager::createActiveMessage(RemotingCommandPtr& reques ENVOY_CONN_LOG(trace, "ConnectionManager#createActiveMessage. Code: {}, opaque: {}", read_callbacks_->connection(), request->code(), request->opaque()); ActiveMessagePtr active_message = std::make_unique(*this, std::move(request)); - active_message->moveIntoList(std::move(active_message), active_message_list_); + LinkedList::moveIntoList(std::move(active_message), active_message_list_); return **active_message_list_.begin(); } diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index 09cd67aa67a7..737e70736978 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -171,7 +171,7 @@ DecoderEventHandler& ConnectionManager::newDecoderEventHandler() { ActiveRpcPtr new_rpc(new ActiveRpc(*this)); new_rpc->createFilterChain(); - new_rpc->moveIntoList(std::move(new_rpc), rpcs_); + LinkedList::moveIntoList(std::move(new_rpc), rpcs_); return **rpcs_.begin(); } diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index b06476a9dde6..b7408e1a3def 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -222,7 +222,7 @@ class ConnectionManager : public Network::ReadFilter, void addDecoderFilter(ThriftFilters::DecoderFilterSharedPtr filter) override { ActiveRpcDecoderFilterPtr wrapper = std::make_unique(*this, filter); filter->setDecoderFilterCallbacks(*wrapper); - wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); + LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } FilterStatus applyDecoderFilters(ActiveRpcDecoderFilter* filter); diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 5aaf9b7708c0..b4ce4de04bd0 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -362,7 +362,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::onAcceptWorker( // Otherwise we let active_socket be destructed when it goes out of scope. if (active_socket->iter_ != active_socket->accept_filters_.end()) { active_socket->startTimer(); - active_socket->moveIntoListBack(std::move(active_socket), sockets_); + LinkedList::moveIntoListBack(std::move(active_socket), sockets_); } } @@ -422,7 +422,7 @@ void ConnectionHandlerImpl::ActiveTcpListener::newConnection( if (active_connection->connection_->state() != Network::Connection::State::Closed) { ENVOY_CONN_LOG(debug, "new connection", *active_connection->connection_); active_connection->connection_->addConnectionCallbacks(*active_connection); - active_connection->moveIntoList(std::move(active_connection), active_connections.connections_); + LinkedList::moveIntoList(std::move(active_connection), active_connections.connections_); } } diff --git a/test/common/common/BUILD b/test/common/common/BUILD index dc9a1a334145..94eb85adf914 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -102,6 +102,14 @@ envoy_cc_test( deps = ["//source/common/common:hex_lib"], ) +envoy_cc_test( + name = "linked_object_test", + srcs = ["linked_object_test.cc"], + deps = [ + "//source/common/common:linked_object", + ], +) + envoy_cc_test( name = "log_macros_test", srcs = ["log_macros_test.cc"], diff --git a/test/common/common/linked_object_test.cc b/test/common/common/linked_object_test.cc new file mode 100644 index 000000000000..351f2f340d3f --- /dev/null +++ b/test/common/common/linked_object_test.cc @@ -0,0 +1,44 @@ +#include "common/common/linked_object.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +class TestObject : public LinkedObject { +public: + TestObject() = default; +}; + +TEST(LinkedObjectTest, MoveIntoListFront) { + std::list> list; + auto object = std::make_unique(); + TestObject* object_ptr = object.get(); + LinkedList::moveIntoList(std::move(object), list); + ASSERT_EQ(1, list.size()); + ASSERT_EQ(object_ptr, list.front().get()); + + auto object2 = std::make_unique(); + TestObject* object2_ptr = object2.get(); + LinkedList::moveIntoList(std::move(object2), list); + ASSERT_EQ(2, list.size()); + ASSERT_EQ(object2_ptr, list.front().get()); + ASSERT_EQ(object_ptr, list.back().get()); +} + +TEST(LinkedObjectTest, MoveIntoListBack) { + std::list> list; + std::unique_ptr object = std::make_unique(); + TestObject* object_ptr = object.get(); + LinkedList::moveIntoListBack(std::move(object), list); + ASSERT_EQ(1, list.size()); + ASSERT_EQ(object_ptr, list.front().get()); + + auto object2 = std::make_unique(); + TestObject* object2_ptr = object2.get(); + LinkedList::moveIntoListBack(std::move(object2), list); + ASSERT_EQ(2, list.size()); + ASSERT_EQ(object2_ptr, list.back().get()); + ASSERT_EQ(object_ptr, list.front().get()); +} + +} // namespace Envoy diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 492d2889aa8f..9c4534a0fdc4 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -523,7 +523,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi } auto stream_ptr = pending_streams.front()->removeFromList(pending_streams); HttpStream* const stream = stream_ptr.get(); - stream_ptr->moveIntoListBack(std::move(stream_ptr), streams); + LinkedList::moveIntoListBack(std::move(stream_ptr), streams); stream->response_.response_encoder_ = &encoder; encoder.getStream().addCallbacks(stream->response_.stream_callbacks_); stream->stream_index_ = streams.size() - 1; @@ -580,7 +580,7 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi should_close_connection = true; } }); - stream->moveIntoListBack(std::move(stream), pending_streams); + LinkedList::moveIntoListBack(std::move(stream), pending_streams); break; } case test::common::http::Action::kStreamAction: { diff --git a/test/extensions/filters/network/rocketmq_proxy/router_test.cc b/test/extensions/filters/network/rocketmq_proxy/router_test.cc index 95d74a527dc7..6a5c3c2336b2 100644 --- a/test/extensions/filters/network/rocketmq_proxy/router_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/router_test.cc @@ -383,7 +383,7 @@ TEST_F(RocketmqRouterTest, ReceivedSendMessageResponseWithDecodeError) { })); EXPECT_CALL(*active_message_, onReset()); - active_message_->moveIntoList(std::move(active_message_), conn_manager_->activeMessageList()); + LinkedList::moveIntoList(std::move(active_message_), conn_manager_->activeMessageList()); router_->onUpstreamData(buffer, false); } diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index f01ca8a8ca26..c2b550405b1d 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -519,7 +519,7 @@ bool FakeUpstream::createNetworkFilterChain(Network::Connection& connection, } auto connection_wrapper = std::make_unique(connection, allow_unexpected_disconnects_); - connection_wrapper->moveIntoListBack(std::move(connection_wrapper), new_connections_); + LinkedList::moveIntoListBack(std::move(connection_wrapper), new_connections_); upstream_event_.notifyOne(); return true; } From 9829e953941a8a9be29bbe202084e02e8c78dc65 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 30 Jul 2020 14:43:16 -0400 Subject: [PATCH 798/909] fix merge snafu (#12380) Signed-off-by: Asra Ali --- source/common/http/conn_manager_impl.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 5878e526346c..5c4c91563a98 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -693,10 +693,10 @@ void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { ENVOY_STREAM_LOG(debug, "Stream max duration time reached", *this); connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc(); if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { - sendLocalReply( - request_headers_ != nullptr && Grpc::Common::isGrpcRequestHeaders(*request_headers_), - Http::Code::RequestTimeout, "downstream duration timeout", nullptr, state_.is_head_request_, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + sendLocalReply(request_headers_ != nullptr && + Grpc::Common::isGrpcRequestHeaders(*request_headers_), + Http::Code::RequestTimeout, "downstream duration timeout", nullptr, + absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); } else { stream_info_.setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); connection_manager_.doEndStream(*this); From 26eaa2e85cee69e5c32ab6bf4c5ae3d338fa462f Mon Sep 17 00:00:00 2001 From: John Plevyak Date: Thu, 30 Jul 2020 11:59:31 -0700 Subject: [PATCH 799/909] Upstream Wasm proto from envoy-wasm. (#12159) Description: Upstream Wasm proto files from envoy-wasm. Risk Level: Low Testing: Unit tests in envoy-wasm, integration tests in istio/proxy. Docs Changes: N/A Release Notes: N/A Signed-off-by: John Plevyak --- api/BUILD | 3 ++ .../extensions/access_loggers/wasm/v3/BUILD | 12 +++++++ .../access_loggers/wasm/v3/wasm.proto | 20 +++++++++++ .../extensions/filters/http/wasm/v3/BUILD | 12 +++++++ .../filters/http/wasm/v3/wasm.proto | 20 +++++++++++ .../extensions/filters/network/wasm/v3/BUILD | 12 +++++++ .../filters/network/wasm/v3/wasm.proto | 20 +++++++++++ api/envoy/extensions/wasm/v3/wasm.proto | 35 ++++++++++++------- api/versioning/BUILD | 3 ++ docs/root/api-v3/config/config.rst | 1 + docs/root/api-v3/config/wasm/wasm.rst | 8 +++++ .../extensions/access_loggers/wasm/v3/BUILD | 12 +++++++ .../access_loggers/wasm/v3/wasm.proto | 20 +++++++++++ .../extensions/filters/http/wasm/v3/BUILD | 12 +++++++ .../filters/http/wasm/v3/wasm.proto | 20 +++++++++++ .../extensions/filters/network/wasm/v3/BUILD | 12 +++++++ .../filters/network/wasm/v3/wasm.proto | 20 +++++++++++ .../envoy/extensions/wasm/v3/wasm.proto | 35 ++++++++++++------- 18 files changed, 253 insertions(+), 24 deletions(-) create mode 100644 api/envoy/extensions/access_loggers/wasm/v3/BUILD create mode 100644 api/envoy/extensions/access_loggers/wasm/v3/wasm.proto create mode 100644 api/envoy/extensions/filters/http/wasm/v3/BUILD create mode 100644 api/envoy/extensions/filters/http/wasm/v3/wasm.proto create mode 100644 api/envoy/extensions/filters/network/wasm/v3/BUILD create mode 100644 api/envoy/extensions/filters/network/wasm/v3/wasm.proto create mode 100644 docs/root/api-v3/config/wasm/wasm.rst create mode 100644 generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto create mode 100644 generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD create mode 100644 generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto diff --git a/api/BUILD b/api/BUILD index 8c608fdeca4a..3ac2738ebc3e 100644 --- a/api/BUILD +++ b/api/BUILD @@ -154,6 +154,7 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", + "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", @@ -195,6 +196,7 @@ proto_library( "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", + "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", @@ -221,6 +223,7 @@ proto_library( "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", diff --git a/api/envoy/extensions/access_loggers/wasm/v3/BUILD b/api/envoy/extensions/access_loggers/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/api/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto new file mode 100644 index 000000000000..cd9db5906436 --- /dev/null +++ b/api/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +// Custom configuration for an :ref:`AccessLog ` +// that calls into a WASM VM. +message WasmAccessLog { + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/filters/http/wasm/v3/BUILD b/api/envoy/extensions/filters/http/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/api/envoy/extensions/filters/http/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/wasm/v3/wasm.proto b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto new file mode 100644 index 000000000000..a812992a5b84 --- /dev/null +++ b/api/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/filters/network/wasm/v3/BUILD b/api/envoy/extensions/filters/network/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/api/envoy/extensions/filters/network/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/wasm/v3/wasm.proto b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto new file mode 100644 index 000000000000..131582762b59 --- /dev/null +++ b/api/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/api/envoy/extensions/wasm/v3/wasm.proto b/api/envoy/extensions/wasm/v3/wasm.proto index 73b7959cd95d..26f458214466 100644 --- a/api/envoy/extensions/wasm/v3/wasm.proto +++ b/api/envoy/extensions/wasm/v3/wasm.proto @@ -15,11 +15,11 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Wasm service] +// [#protodoc-title: Wasm] +// [[#not-implemented-hide:] // Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. +// [#next-free-field: 7] message VmConfig { // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same @@ -44,21 +44,26 @@ message VmConfig { // Warning: this should only be enable for trusted sources as the precompiled code is not // verified. bool allow_precompiled = 5; + + // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration + // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter + // warming state. + bool nack_on_code_cache_miss = 6; } +// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. message PluginConfig { // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for + // multiple filters/services are handled by the same *vm_id* and *root_id* and for // logging/debugging. string name = 1; // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; + // filters/services with a blank root_id with the same *vm_id* will share Context(s). + string root_id = 2; // Configuration for finding or starting VM. oneof vm_config { @@ -71,13 +76,19 @@ message PluginConfig { // `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 5; + google.protobuf.Any configuration = 4; + + // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), + // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, + // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false + // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial + // startup the proxy will not start. + bool fail_open = 5; } -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. +// [[#not-implemented-hide:] +// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService +// ` This opaque configuration will be used to create a Wasm Service. message WasmService { // General plugin configuration. PluginConfig config = 1; diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 305f09df3cae..950594d7213e 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -37,6 +37,7 @@ proto_library( "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", + "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", @@ -78,6 +79,7 @@ proto_library( "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", + "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", @@ -104,6 +106,7 @@ proto_library( "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", + "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", diff --git a/docs/root/api-v3/config/config.rst b/docs/root/api-v3/config/config.rst index 23518f83cc19..536bd9468979 100644 --- a/docs/root/api-v3/config/config.rst +++ b/docs/root/api-v3/config/config.rst @@ -20,3 +20,4 @@ Extensions internal_redirect/internal_redirect endpoint/endpoint upstream/upstream + wasm/wasm diff --git a/docs/root/api-v3/config/wasm/wasm.rst b/docs/root/api-v3/config/wasm/wasm.rst new file mode 100644 index 000000000000..efdb96212478 --- /dev/null +++ b/docs/root/api-v3/config/wasm/wasm.rst @@ -0,0 +1,8 @@ +WASM +==== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../../extensions/wasm/v3/* diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto new file mode 100644 index 000000000000..cd9db5906436 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.access_loggers.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +// Custom configuration for an :ref:`AccessLog ` +// that calls into a WASM VM. +message WasmAccessLog { + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto new file mode 100644 index 000000000000..a812992a5b84 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD new file mode 100644 index 000000000000..8bad369e3511 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/extensions/wasm/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto new file mode 100644 index 000000000000..131582762b59 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.wasm.v3; + +import "envoy/extensions/wasm/v3/wasm.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; +option java_outer_classname = "WasmProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [[#not-implemented-hide:] +message Wasm { + // General Plugin configuration. + envoy.extensions.wasm.v3.PluginConfig config = 1; +} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto index 73b7959cd95d..26f458214466 100644 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto @@ -15,11 +15,11 @@ option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; -// [#protodoc-title: Wasm service] +// [#protodoc-title: Wasm] +// [[#not-implemented-hide:] // Configuration for a Wasm VM. -// [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. +// [#next-free-field: 7] message VmConfig { // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same @@ -44,21 +44,26 @@ message VmConfig { // Warning: this should only be enable for trusted sources as the precompiled code is not // verified. bool allow_precompiled = 5; + + // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration + // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter + // warming state. + bool nack_on_code_cache_miss = 6; } +// [[#not-implemented-hide:] // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] -// [#not-implemented-hide:] pending implementation. message PluginConfig { // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *group_name* and for + // multiple filters/services are handled by the same *vm_id* and *root_id* and for // logging/debugging. string name = 1; // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank group_name with the same *vm_id* will share Context(s). - string group_name = 2; + // filters/services with a blank root_id with the same *vm_id* will share Context(s). + string root_id = 2; // Configuration for finding or starting VM. oneof vm_config { @@ -71,13 +76,19 @@ message PluginConfig { // `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 5; + google.protobuf.Any configuration = 4; + + // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), + // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, + // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false + // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial + // startup the proxy will not start. + bool fail_open = 5; } -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`ServiceConfig -// `. This opaque configuration will be used to -// create a Wasm Service. -// [#not-implemented-hide:] pending implementation. +// [[#not-implemented-hide:] +// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService +// ` This opaque configuration will be used to create a Wasm Service. message WasmService { // General plugin configuration. PluginConfig config = 1; From 11669e44d6241c5393ef220a4a7bb280cff76ae5 Mon Sep 17 00:00:00 2001 From: "Nolan \"Tempa Kyouran\" Varani" Date: Thu, 30 Jul 2020 14:44:40 -0700 Subject: [PATCH 800/909] aws_lambda: Increase code coverage in AWS Lambda filter (#12368) Commit Message: Increase code coverage in AWS Lambda filter Additional Description: Removing unreachable/dead lines of code, remove aws_lambda from per_file_coverage.sh Risk Level: Low Testing: bazel test //test/extensions/filters/http/aws_lambda/... Docs Changes: N/A Release Notes: N/A Fixes #11989 Signed-off-by: Nolan Varani --- source/extensions/filters/http/aws_lambda/config.cc | 2 -- test/per_file_coverage.sh | 1 - 2 files changed, 3 deletions(-) diff --git a/source/extensions/filters/http/aws_lambda/config.cc b/source/extensions/filters/http/aws_lambda/config.cc index 957e8a0960d4..c784020da619 100644 --- a/source/extensions/filters/http/aws_lambda/config.cc +++ b/source/extensions/filters/http/aws_lambda/config.cc @@ -25,10 +25,8 @@ getInvocationMode(const envoy::extensions::filters::http::aws_lambda::v3::Config switch (proto_config.invocation_mode()) { case Config_InvocationMode_ASYNCHRONOUS: return InvocationMode::Asynchronous; - break; case Config_InvocationMode_SYNCHRONOUS: return InvocationMode::Synchronous; - break; default: NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 09452a44bd25..c0fa968a46ca 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -37,7 +37,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/cache/simple_http_cache:84.5" "source/extensions/filters/http/ip_tagging:91.2" "source/extensions/filters/http/grpc_json_transcoder:93.3" -"source/extensions/filters/http/aws_lambda:96.4" "source/extensions/filters/listener:96.0" "source/extensions/filters/listener/tls_inspector:92.4" "source/extensions/filters/listener/http_inspector:93.3" From fd4e76f7d13245e9a31b800a6b4387e1e8e43220 Mon Sep 17 00:00:00 2001 From: Radha Date: Thu, 30 Jul 2020 22:55:40 +0100 Subject: [PATCH 801/909] header-to-metadata: rename on_header_present and on_header_missing fields (#12385) Signed-off-by: Radha Kumari --- .../http/header_to_metadata/v3/header_to_metadata.proto | 4 ++-- .../http/header_to_metadata/v4alpha/header_to_metadata.proto | 4 ++-- .../http/header_to_metadata/v3/header_to_metadata.proto | 4 ++-- .../http/header_to_metadata/v4alpha/header_to_metadata.proto | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 11e70d91d30f..ace7c535069a 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -109,13 +109,13 @@ message Config { // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. - KeyValuePair on_header_present = 2; + KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. - KeyValuePair on_header_missing = 3; + KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; // Whether or not to remove the header after a rule is applied. // diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 54855c08f8c1..0d7c814584dc 100644 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -107,13 +107,13 @@ message Config { // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. - KeyValuePair on_header_present = 2; + KeyValuePair on_present = 2; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. - KeyValuePair on_header_missing = 3; + KeyValuePair on_missing = 3; // Whether or not to remove the header after a rule is applied. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto index 11e70d91d30f..ace7c535069a 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto @@ -109,13 +109,13 @@ message Config { // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. - KeyValuePair on_header_present = 2; + KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. - KeyValuePair on_header_missing = 3; + KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; // Whether or not to remove the header after a rule is applied. // diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto index 54855c08f8c1..0d7c814584dc 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto @@ -107,13 +107,13 @@ message Config { // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. - KeyValuePair on_header_present = 2; + KeyValuePair on_present = 2; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. - KeyValuePair on_header_missing = 3; + KeyValuePair on_missing = 3; // Whether or not to remove the header after a rule is applied. // From 1423af543051c667ed1b4ae7d0c8b78ae66795c8 Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Thu, 30 Jul 2020 16:57:19 -0500 Subject: [PATCH 802/909] Update yaml-cpp with Windows build fixes (#12382) Co-authored-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr --- bazel/foreign_cc/BUILD | 2 ++ bazel/repository_locations.bzl | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index c87f82ff4eae..79bb436a57ee 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -213,7 +213,9 @@ envoy_cmake_external( cache_entries = { "YAML_CPP_BUILD_TESTS": "off", "YAML_CPP_BUILD_TOOLS": "off", + "YAML_BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", + "YAML_MSVC_SHARED_RT": "off", }, lib_source = "@com_github_jbeder_yaml_cpp//:all", static_libraries = select({ diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index bc1df883c414..d7ddc3c41efb 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -260,10 +260,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_jbeder_yaml_cpp = dict( - sha256 = "17ffa6320c33de65beec33921c9334dee65751c8a4b797ba5517e844062b98f1", - strip_prefix = "yaml-cpp-6701275f1910bf63631528dfd9df9c3ac787365b", - # 2020-05-25 - urls = ["https://github.com/jbeder/yaml-cpp/archive/6701275f1910bf63631528dfd9df9c3ac787365b.tar.gz"], + sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f", + strip_prefix = "yaml-cpp-98acc5a8874faab28b82c28936f4b400b389f5d6", + # 2020-07-28 + urls = ["https://github.com/greenhouse-org/yaml-cpp/archive/98acc5a8874faab28b82c28936f4b400b389f5d6.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), From 0186a9f00351c96f3e7d9d846ff1e798ef26ffe0 Mon Sep 17 00:00:00 2001 From: "William A. Rowe Jr" Date: Thu, 30 Jul 2020 17:00:54 -0500 Subject: [PATCH 803/909] Fix 4 test failures due to broken CRLF line handling (#12381) - test/test_common/environment.cc was reading config files for substitution in binary (preserving \r) and then writing the substituted resulting file in text (replacing \n with \r\n), ending up with \r\r\n line endings. - //test/common/runtime:runtime_impl_test now generates multiline strings on the fly to ensure line endings are treated in string as a binary blob Co-authored-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr Signed-off-by: Sunjay Bhatia Signed-off-by: William A Rowe Jr --- test/common/buffer/BUILD | 2 -- test/common/common/BUILD | 2 -- test/common/runtime/BUILD | 2 -- test/common/runtime/filesystem_setup.sh | 3 +++ test/common/runtime/runtime_impl_test.cc | 18 ++++++++++++------ .../root/envoy/subdir/{file3 => file} | 1 - test/exe/BUILD | 2 -- test/test_common/environment.cc | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) rename test/common/runtime/test_data/root/envoy/subdir/{file3 => file} (50%) diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index 0e32ba806f14..bd01534ca6ca 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -74,8 +74,6 @@ envoy_cc_test( envoy_cc_test( name = "watermark_buffer_test", srcs = ["watermark_buffer_test.cc"], - # Fails on windows with cr/lf yaml file checkouts - tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", diff --git a/test/common/common/BUILD b/test/common/common/BUILD index 94eb85adf914..21ea6fe21df9 100644 --- a/test/common/common/BUILD +++ b/test/common/common/BUILD @@ -181,8 +181,6 @@ envoy_cc_test( envoy_cc_test( name = "random_generator_test", srcs = ["random_generator_test.cc"], - # Fails on windows with cr/lf yaml file checkouts - tags = ["fails_on_windows"], deps = [ "//source/common/common:random_generator_lib", "//test/mocks/runtime:runtime_mocks", diff --git a/test/common/runtime/BUILD b/test/common/runtime/BUILD index 878ebe377ce1..8cb4a424daa7 100644 --- a/test/common/runtime/BUILD +++ b/test/common/runtime/BUILD @@ -42,8 +42,6 @@ envoy_cc_test( name = "runtime_impl_test", srcs = ["runtime_impl_test.cc"], data = glob(["test_data/**"]) + ["filesystem_setup.sh"], - # Fails on windows with cr/lf yaml file checkouts - tags = ["fails_on_windows"], deps = [ "//source/common/config:runtime_utility_lib", "//source/common/runtime:runtime_lib", diff --git a/test/common/runtime/filesystem_setup.sh b/test/common/runtime/filesystem_setup.sh index b66941acdc70..35baffead34b 100755 --- a/test/common/runtime/filesystem_setup.sh +++ b/test/common/runtime/filesystem_setup.sh @@ -9,6 +9,9 @@ cd "${TEST_SRCDIR}/envoy" rm -rf "${TEST_TMPDIR}/${TEST_DATA}" mkdir -p "${TEST_TMPDIR}/${TEST_DATA}" cp -RfL "${TEST_DATA}"/* "${TEST_TMPDIR}/${TEST_DATA}" +# Verify text value is treated as a binary blob regardless of source line-ending settings +printf "hello\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_lf" +printf "hello\r\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_crlf" chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" # Deliberate symlink of doom. diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 3f0706a608bc..dad18c5d2bf8 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -140,12 +140,14 @@ TEST_F(DiskLoaderImplTest, All) { // Basic string getting. EXPECT_EQ("world", loader_->snapshot().get("file2").value().get()); - EXPECT_EQ("hello\nworld", loader_->snapshot().get("subdir.file3").value().get()); + EXPECT_EQ("hello", loader_->snapshot().get("subdir.file").value().get()); + EXPECT_EQ("hello\nworld", loader_->snapshot().get("file_lf").value().get()); + EXPECT_EQ("hello\r\nworld", loader_->snapshot().get("file_crlf").value().get()); EXPECT_FALSE(loader_->snapshot().get("invalid").has_value()); // Existence checking. EXPECT_EQ(true, loader_->snapshot().get("file2").has_value()); - EXPECT_EQ(true, loader_->snapshot().get("subdir.file3").has_value()); + EXPECT_EQ(true, loader_->snapshot().get("subdir.file").has_value()); EXPECT_EQ(false, loader_->snapshot().get("invalid").has_value()); // Integer getting. @@ -255,7 +257,7 @@ TEST_F(DiskLoaderImplTest, All) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(23, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(25, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(4, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } @@ -556,7 +558,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { file12: FaLSe file13: false subdir: - file3: "hello\nworld" + file: "hello" numerator_only: numerator: 52 denominator_only: @@ -567,6 +569,8 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { empty: {} file_with_words: "some words" file_with_double: 23.2 + file_lf: "hello\nworld" + file_crlf: "hello\r\nworld" bool_as_int0: 0 bool_as_int1: 1 )EOF"); @@ -574,7 +578,9 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { // Basic string getting. EXPECT_EQ("world", loader_->snapshot().get("file2").value().get()); - EXPECT_EQ("hello\nworld", loader_->snapshot().get("subdir.file3").value().get()); + EXPECT_EQ("hello", loader_->snapshot().get("subdir.file").value().get()); + EXPECT_EQ("hello\nworld", loader_->snapshot().get("file_lf").value().get()); + EXPECT_EQ("hello\r\nworld", loader_->snapshot().get("file_crlf").value().get()); EXPECT_FALSE(loader_->snapshot().get("invalid").has_value()); // Integer getting. @@ -674,7 +680,7 @@ TEST_F(StaticLoaderImplTest, ProtoParsing) { EXPECT_EQ(0, store_.counter("runtime.load_error").value()); EXPECT_EQ(1, store_.counter("runtime.load_success").value()); - EXPECT_EQ(19, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); + EXPECT_EQ(21, store_.gauge("runtime.num_keys", Stats::Gauge::ImportMode::NeverImport).value()); EXPECT_EQ(2, store_.gauge("runtime.num_layers", Stats::Gauge::ImportMode::NeverImport).value()); } diff --git a/test/common/runtime/test_data/root/envoy/subdir/file3 b/test/common/runtime/test_data/root/envoy/subdir/file similarity index 50% rename from test/common/runtime/test_data/root/envoy/subdir/file3 rename to test/common/runtime/test_data/root/envoy/subdir/file index 94954abda49d..ce013625030b 100644 --- a/test/common/runtime/test_data/root/envoy/subdir/file3 +++ b/test/common/runtime/test_data/root/envoy/subdir/file @@ -1,2 +1 @@ hello -world diff --git a/test/exe/BUILD b/test/exe/BUILD index 7b1378dab34b..283086ab4799 100644 --- a/test/exe/BUILD +++ b/test/exe/BUILD @@ -63,8 +63,6 @@ envoy_cc_test( name = "main_common_test", srcs = ["main_common_test.cc"], data = ["//test/config/integration:google_com_proxy_port_0"], - # Fails on windows with cr/lf yaml file checkouts - tags = ["fails_on_windows"], deps = [ "//source/common/api:api_lib", "//source/exe:main_common_lib", diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 9269cc73bcce..5938d4452c00 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -394,7 +394,7 @@ std::string TestEnvironment::temporaryFileSubstitute(const std::string& path, const std::string out_json_path = TestEnvironment::temporaryPath(name) + ".with.ports" + extension; { - std::ofstream out_json_file(out_json_path); + std::ofstream out_json_file(out_json_path, std::ios::binary); out_json_file << out_json_string; } return out_json_path; From ec34be801b508623c99e76cc6b1aa5c376e2ea1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Thu, 30 Jul 2020 18:02:02 -0400 Subject: [PATCH 804/909] docs/comments: update v2 --> v3 references (#12365) Signed-off-by: Raul Gutierrez Segales --- include/envoy/config/subscription_factory.h | 2 +- include/envoy/grpc/async_client_manager.h | 2 +- include/envoy/network/filter.h | 2 +- include/envoy/router/router.h | 2 +- include/envoy/secret/secret_manager.h | 2 +- .../envoy/server/active_udp_listener_config.h | 2 +- include/envoy/stream_info/stream_info.h | 3 ++- include/envoy/upstream/cluster_manager.h | 4 ++-- include/envoy/upstream/upstream.h | 10 ++++----- .../config/subscription_factory_impl.cc | 3 ++- source/common/config/utility.h | 21 ++++++++++--------- source/common/grpc/google_grpc_creds_impl.h | 14 ++++++------- source/common/http/utility.h | 2 +- source/common/network/cidr_range.h | 2 +- source/common/network/resolver_impl.h | 4 ++-- source/common/protobuf/utility.h | 4 ++-- .../upstream/health_discovery_service.cc | 3 ++- source/common/upstream/upstream_impl.cc | 2 +- source/docs/subset_load_balancer.md | 4 ++-- .../config/subscription_factory_impl_test.cc | 2 +- 20 files changed, 47 insertions(+), 43 deletions(-) diff --git a/include/envoy/config/subscription_factory.h b/include/envoy/config/subscription_factory.h index 3d67d5526692..eb08360e7dda 100644 --- a/include/envoy/config/subscription_factory.h +++ b/include/envoy/config/subscription_factory.h @@ -14,7 +14,7 @@ class SubscriptionFactory { /** * Subscription factory interface. * - * @param config envoy::api::v2::core::ConfigSource to construct from. + * @param config envoy::config::core::v3::ConfigSource to construct from. * @param type_url type URL for the resource being subscribed to. * @param scope stats scope for any stats tracked by the subscription. * @param callbacks the callbacks needed by all Subscription objects, to deliver config updates. diff --git a/include/envoy/grpc/async_client_manager.h b/include/envoy/grpc/async_client_manager.h index 8494204cf8ad..9b036480018f 100644 --- a/include/envoy/grpc/async_client_manager.h +++ b/include/envoy/grpc/async_client_manager.h @@ -32,7 +32,7 @@ class AsyncClientManager { /** * Create a Grpc::AsyncClients factory for a service. Validation of the service is performed and * will raise an exception on failure. - * @param grpc_service envoy::api::v2::core::GrpcService configuration. + * @param grpc_service envoy::config::core::v3::GrpcService configuration. * @param scope stats scope. * @param skip_cluster_check if set to true skips checks for cluster presence and being statically * configured. diff --git a/include/envoy/network/filter.h b/include/envoy/network/filter.h index e43f166f73b9..a111b1a22ed4 100644 --- a/include/envoy/network/filter.h +++ b/include/envoy/network/filter.h @@ -281,7 +281,7 @@ class ListenerFilterCallbacks { virtual void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) PURE; /** - * @return const envoy::api::v2::core::Metadata& the dynamic metadata associated with this + * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this * connection. */ virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE; diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index 35449ec4cf70..7d37cf02cf32 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -813,7 +813,7 @@ class RouteEntry : public ResponseEntry { virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE; /** - * @return const envoy::api::v2::core::Metadata& return the metadata provided in the config for + * @return const envoy::config::core::v3::Metadata& return the metadata provided in the config for * this route. */ virtual const envoy::config::core::v3::Metadata& metadata() const PURE; diff --git a/include/envoy/secret/secret_manager.h b/include/envoy/secret/secret_manager.h index 666ce325244c..ce13f6eba214 100644 --- a/include/envoy/secret/secret_manager.h +++ b/include/envoy/secret/secret_manager.h @@ -24,7 +24,7 @@ class SecretManager { virtual ~SecretManager() = default; /** - * @param add a static secret from envoy::api::v2::auth::Secret. + * @param add a static secret from envoy::extensions::transport_sockets::tls::v3::Secret. * @throw an EnvoyException if the secret is invalid or not supported, or there is duplicate. */ virtual void diff --git a/include/envoy/server/active_udp_listener_config.h b/include/envoy/server/active_udp_listener_config.h index 2e027dc4d747..ae387dcfe9d6 100644 --- a/include/envoy/server/active_udp_listener_config.h +++ b/include/envoy/server/active_udp_listener_config.h @@ -10,7 +10,7 @@ namespace Server { /** * Interface to create udp listener according to - * envoy::api::v2::listener::UdpListenerConfig.udp_listener_name. + * envoy::config::listener::v3::UdpListenerConfig.udp_listener_name. */ class ActiveUdpListenerConfigFactory : public Config::UntypedFactory { public: diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 4f0309604764..c64e0837266d 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -489,7 +489,8 @@ class StreamInfo { virtual const Router::RouteEntry* routeEntry() const PURE; /** - * @return const envoy::api::v2::core::Metadata& the dynamic metadata associated with this request + * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this + * request */ virtual envoy::config::core::v3::Metadata& dynamicMetadata() PURE; virtual const envoy::config::core::v3::Metadata& dynamicMetadata() const PURE; diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h index 936fa439375b..8389eb94a96d 100644 --- a/include/envoy/upstream/cluster_manager.h +++ b/include/envoy/upstream/cluster_manager.h @@ -214,8 +214,8 @@ class ClusterManager { virtual void shutdown() PURE; /** - * @return const envoy::api::v2::core::BindConfig& cluster manager wide bind configuration for new - * upstream connections. + * @return const envoy::config::core::v3::BindConfig& cluster manager wide bind configuration for + * new upstream connections. */ virtual const envoy::config::core::v3::BindConfig& bindConfig() const PURE; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index a7a4af39cd21..ebdc1575eb8f 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -777,8 +777,8 @@ class ClusterInfo { } /** - * @return const envoy::api::v2::Cluster::CommonLbConfig& the common configuration for all - * load balancers for this cluster. + * @return const envoy::config::cluster::v3::Cluster::CommonLbConfig& the common configuration for + * all load balancers for this cluster. */ virtual const envoy::config::cluster::v3::Cluster::CommonLbConfig& lbConfig() const PURE; @@ -811,8 +811,8 @@ class ClusterInfo { lbRingHashConfig() const PURE; /** - * @return const absl::optional& the configuration - * for the Original Destination load balancing policy, only used if type is set to + * @return const absl::optional& the + * configuration for the Original Destination load balancing policy, only used if type is set to * ORIGINAL_DST_LB. */ virtual const absl::optional& @@ -904,7 +904,7 @@ class ClusterInfo { virtual const LoadBalancerSubsetInfo& lbSubsetInfo() const PURE; /** - * @return const envoy::api::v2::core::Metadata& the configuration metadata for this cluster. + * @return const envoy::config::core::v3::Metadata& the configuration metadata for this cluster. */ virtual const envoy::config::core::v3::Metadata& metadata() const PURE; diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 505206b5bf73..6495688add61 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -99,7 +99,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::configSourceInitialFetchTimeout(config), true); } default: - throw EnvoyException("Missing config source specifier in envoy::api::v2::core::ConfigSource"); + throw EnvoyException( + "Missing config source specifier in envoy::config::core::v3::ConfigSource"); } NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/config/utility.h b/source/common/config/utility.h index b4ee90445adf..d19026386c53 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -34,7 +34,7 @@ namespace Envoy { namespace Config { /** - * Constant Api Type Values, used by envoy::api::v2::core::ApiConfigSource. + * Constant Api Type Values, used by envoy::config::core::v3::ApiConfigSource. */ class ApiTypeValues { public: @@ -78,14 +78,14 @@ class Utility { /** * Extract refresh_delay as a std::chrono::milliseconds from - * envoy::api::v2::core::ApiConfigSource. + * envoy::config::core::v3::ApiConfigSource. */ static std::chrono::milliseconds apiConfigSourceRefreshDelay(const envoy::config::core::v3::ApiConfigSource& api_config_source); /** * Extract request_timeout as a std::chrono::milliseconds from - * envoy::api::v2::core::ApiConfigSource. If request_timeout isn't set in the config source, a + * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a * default value of 1s will be returned. */ static std::chrono::milliseconds @@ -93,18 +93,18 @@ class Utility { /** * Extract initial_fetch_timeout as a std::chrono::milliseconds from - * envoy::api::v2::core::ConfigSource. If request_timeout isn't set in the config source, a + * envoy::config::core::v3::ApiConfigSource. If request_timeout isn't set in the config source, a * default value of 0s will be returned. */ static std::chrono::milliseconds configSourceInitialFetchTimeout(const envoy::config::core::v3::ConfigSource& config_source); /** - * Populate an envoy::api::v2::core::ApiConfigSource. + * Populate an envoy::config::core::v3::ApiConfigSource. * @param cluster supplies the cluster name for the ApiConfigSource. * @param refresh_delay_ms supplies the refresh delay for the ApiConfigSource in ms. * @param api_type supplies the type of subscription to use for the ApiConfigSource. - * @param api_config_source a reference to the envoy::api::v2::core::ApiConfigSource object to + * @param api_config_source a reference to the envoy::config::core::v3::ApiConfigSource object to * populate. */ static void translateApiConfigSource(const std::string& cluster, uint32_t refresh_delay_ms, @@ -179,7 +179,8 @@ class Utility { const envoy::config::core::v3::ApiConfigSource& api_config_source); /** - * Parses RateLimit configuration from envoy::api::v2::core::ApiConfigSource to RateLimitSettings. + * Parses RateLimit configuration from envoy::config::core::v3::ApiConfigSource to + * RateLimitSettings. * @param api_config_source ApiConfigSource. * @return RateLimitSettings. */ @@ -353,9 +354,9 @@ class Utility { createHistogramSettings(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); /** - * Obtain gRPC async client factory from a envoy::api::v2::core::ApiConfigSource. + * Obtain gRPC async client factory from a envoy::config::core::v3::ApiConfigSource. * @param async_client_manager gRPC async client manager. - * @param api_config_source envoy::api::v3::core::ApiConfigSource. Must have config type GRPC. + * @param api_config_source envoy::config::core::v3::ApiConfigSource. Must have config type GRPC. * @param skip_cluster_check whether to skip cluster validation. * @return Grpc::AsyncClientFactoryPtr gRPC async client factory. */ @@ -367,7 +368,7 @@ class Utility { /** * Translate a set of cluster's hosts into a load assignment configuration. * @param hosts cluster's list of hosts. - * @return envoy::api::v2::ClusterLoadAssignment a load assignment configuration. + * @return envoy::config::endpoint::v3::ClusterLoadAssignment a load assignment configuration. */ static envoy::config::endpoint::v3::ClusterLoadAssignment translateClusterHosts(const Protobuf::RepeatedPtrField& hosts); diff --git a/source/common/grpc/google_grpc_creds_impl.h b/source/common/grpc/google_grpc_creds_impl.h index 8e2bd2b67288..e36083432aec 100644 --- a/source/common/grpc/google_grpc_creds_impl.h +++ b/source/common/grpc/google_grpc_creds_impl.h @@ -19,7 +19,7 @@ getGoogleGrpcChannelCredentials(const envoy::config::core::v3::GrpcService& grpc class CredsUtility { public: /** - * Translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials + * Translation from envoy::config::core::v3::GrpcService::GoogleGrpc to grpc::ChannelCredentials * for channel credentials. * @param google_grpc Google gRPC config. * @param api reference to the Api object @@ -31,8 +31,8 @@ class CredsUtility { Api::Api& api); /** - * Static translation from envoy::api::v2::core::GrpcService to a vector of grpc::CallCredentials. - * Any plugin based call credentials will be elided. + * Static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to a vector of + * grpc::CallCredentials. Any plugin based call credentials will be elided. * @param grpc_service Google gRPC config. * @return std::vector> call credentials. */ @@ -40,8 +40,8 @@ class CredsUtility { callCredentials(const envoy::config::core::v3::GrpcService::GoogleGrpc& google_grpc); /** - * Default translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials for SSL - * channel credentials. + * Default translation from envoy::config::core::v3::GrpcService::GoogleGrpc to + * grpc::ChannelCredentials for SSL channel credentials. * @param grpc_service_config gRPC service config. * @param api reference to the Api object * @return std::shared_ptr SSL channel credentials. Empty SSL @@ -53,8 +53,8 @@ class CredsUtility { Api::Api& api); /** - * Default static translation from envoy::api::v2::core::GrpcService to grpc::ChannelCredentials - * for all non-plugin based channel and call credentials. + * Default static translation from envoy::config::core::v3::GrpcService::GoogleGrpc to + * grpc::ChannelCredentials for all non-plugin based channel and call credentials. * @param grpc_service_config gRPC service config. * @param api reference to the Api object * @return std::shared_ptr composite channel and call credentials. diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 492193c4e2ff..d4625c9b8e82 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -265,7 +265,7 @@ bool isWebSocketUpgradeRequest(const RequestHeaderMap& headers); /** * @return Http1Settings An Http1Settings populated from the - * envoy::api::v2::core::Http1ProtocolOptions config. + * envoy::config::core::v3::Http1ProtocolOptions config. */ Http1Settings parseHttp1Settings(const envoy::config::core::v3::Http1ProtocolOptions& config); diff --git a/source/common/network/cidr_range.h b/source/common/network/cidr_range.h index a98d0c1ef118..9ec72f39156b 100644 --- a/source/common/network/cidr_range.h +++ b/source/common/network/cidr_range.h @@ -94,7 +94,7 @@ class CidrRange { static CidrRange create(const std::string& range); /** - * Constructs a CidrRange from envoy::api::v2::core::CidrRange. + * Constructs a CidrRange from envoy::config::core::v3::CidrRange. * TODO(ccaraman): Update CidrRange::create to support only constructing valid ranges. */ static CidrRange create(const envoy::config::core::v3::CidrRange& cidr); diff --git a/source/common/network/resolver_impl.h b/source/common/network/resolver_impl.h index 958c9b22d0f6..0241a4fe5309 100644 --- a/source/common/network/resolver_impl.h +++ b/source/common/network/resolver_impl.h @@ -11,7 +11,7 @@ namespace Envoy { namespace Network { namespace Address { /** - * Create an Instance from a envoy::api::v2::core::Address. + * Create an Instance from a envoy::config::core::v3::Address. * @param address supplies the address proto to resolve. * @return pointer to the Instance. */ @@ -19,7 +19,7 @@ Address::InstanceConstSharedPtr resolveProtoAddress(const envoy::config::core::v3::Address& address); /** - * Create an Instance from a envoy::api::v2::core::SocketAddress. + * Create an Instance from a envoy::config::core::v3::SocketAddress. * @param address supplies the socket address proto to resolve. * @return pointer to the Instance. */ diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index a605dbc684c6..dc2ec54d1863 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -85,7 +85,7 @@ uint64_t fractionalPercentDenominatorToInt( } // namespace ProtobufPercentHelper } // namespace Envoy -// Convert an envoy::api::v2::core::Percent to a double or a default. +// Convert an envoy::type::v3::Percent to a double or a default. // @param message supplies the proto message containing the field. // @param field_name supplies the field name in the message. // @param default_value supplies the default if the field is not present. @@ -94,7 +94,7 @@ uint64_t fractionalPercentDenominatorToInt( ? (message).has_##field_name() ? (message).field_name().value() : default_value \ : throw EnvoyException(fmt::format("Value not in the range of 0..100 range."))) -// Convert an envoy::api::v2::core::Percent to a rounded integer or a default. +// Convert an envoy::type::v3::Percent to a rounded integer or a default. // @param message supplies the proto message containing the field. // @param field_name supplies the field name in the message. // @param max_value supplies the maximum allowed integral value (e.g., 100, 10000, etc.). diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index f77bc6216016..21fa74e34588 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -108,7 +108,8 @@ envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse HdsDelega auto* endpoint = response.mutable_endpoint_health_response()->add_endpoints_health(); Network::Utility::addressToProtobufAddress( *host->address(), *endpoint->mutable_endpoint()->mutable_address()); - // TODO(lilika): Add support for more granular options of envoy::api::v2::core::HealthStatus + // TODO(lilika): Add support for more granular options of + // envoy::config::core::v3::HealthStatus if (host->health() == Host::Health::Healthy) { endpoint->set_health_status(envoy::config::core::v3::HEALTHY); } else { diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 92dde0ac6783..d98d672ffd8f 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -440,7 +440,7 @@ void HostSetImpl::rebuildLocalityScheduler( // scheduler. // // TODO(htuch): if the underlying locality index -> - // envoy::api::v2::core::Locality hasn't changed in hosts_/healthy_hosts_/degraded_hosts_, we + // envoy::config::core::v3::Locality hasn't changed in hosts_/healthy_hosts_/degraded_hosts_, we // could just update locality_weight_ without rebuilding. Similar to how host // level WRR works, we would age out the existing entries via picks and lazily // apply the new weights. diff --git a/source/docs/subset_load_balancer.md b/source/docs/subset_load_balancer.md index c34e032f6d35..23220d79e1a3 100644 --- a/source/docs/subset_load_balancer.md +++ b/source/docs/subset_load_balancer.md @@ -120,7 +120,7 @@ e7 | dev | 1.2-pre | std | Note: Only e1 has the "xlarge" metadata key. -Given this CDS `envoy::api::v2::Cluster`: +Given this CDS `envoy::config::cluster::v3::Cluster`: ``` json { @@ -165,7 +165,7 @@ After loading this configuration, the SLB's `LbSubsetMap` looks like this:
![LbSubsetMap Diagram](subset_load_balancer_diagram.svg) -Given these `envoy::api::v2::route::Route` entries: +Given these `envoy::config::route::v3::Route` entries: ``` json "routes": [ diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 5d975a0f1ba3..3c0cbc5c5d73 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -70,7 +70,7 @@ TEST_F(SubscriptionFactoryTest, NoConfigSpecifier) { envoy::config::core::v3::ConfigSource config; EXPECT_THROW_WITH_MESSAGE( subscriptionFromConfigSource(config), EnvoyException, - "Missing config source specifier in envoy::api::v2::core::ConfigSource"); + "Missing config source specifier in envoy::config::core::v3::ConfigSource"); } TEST_F(SubscriptionFactoryTest, RestClusterEmpty) { From fc7885c1c2a3224e9efd6a385c90edb8187407aa Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Thu, 30 Jul 2020 15:10:07 -0700 Subject: [PATCH 805/909] Guard check that event_base_new initializes correctly (#12218) Signed-off-by: davinci26 --- source/common/event/libevent_scheduler.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index cec3cc8228ee..5d6be40e7d60 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -15,7 +15,11 @@ void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { } } // namespace -LibeventScheduler::LibeventScheduler() : libevent_(event_base_new()) { +LibeventScheduler::LibeventScheduler() { + event_base* event_base = event_base_new(); + RELEASE_ASSERT(event_base != nullptr, "Failed to initialize libevent event_base"); + libevent_ = Libevent::BasePtr(event_base); + // The dispatcher won't work as expected if libevent hasn't been configured to use threads. RELEASE_ASSERT(Libevent::Global::initialized(), ""); } From c3d4552020f980ec6be8695b308de128bbafe032 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Thu, 30 Jul 2020 15:54:55 -0700 Subject: [PATCH 806/909] docs: add more docs on ARM images (#12390) Follow up to https://github.com/envoyproxy/envoy/pull/11813/files Signed-off-by: Matt Klein --- docs/root/install/building.rst | 10 ++++++++++ docs/root/version_history/current.rst | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index a4868f04d2ce..7031a93ca05a 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -71,6 +71,16 @@ We will consider producing additional binary types depending on community intere CI, packaging, etc. Please open an `issue in GetEnvoy `_ for pre-built binaries for different platforms. +.. _arm_binaries: + +ARM64 binaries +^^^^^^^^^^^^^^ + +`envoyproxy/envoy `_ and +`envoyproxy/envoy-dev `_ are Docker +`multi-arch `_ images +and should run transparently on compatible ARM64 hosts. + Modifying Envoy --------------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f0920bc65f50..cfe53410a320 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -42,7 +42,7 @@ New Features ------------ * access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. -* build: enable building envoy arm64 images by buildx tool in x86 CI platform. +* build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. * dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. * ext_authz filter: added support for emitting dynamic metadata for both :ref:`HTTP ` and :ref:`network ` filters. * grpc-json: support specifying `response_body` field in for `google.api.HttpBody` message. From 97e3ff5d0e7fc40ee01c8981ab036d8db882a938 Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Thu, 30 Jul 2020 19:25:04 -0400 Subject: [PATCH 807/909] Added random jitter option to watchdog kill_timeout. (#12386) Signed-off-by: Kevin Baichoo --- api/envoy/config/bootstrap/v3/bootstrap.proto | 8 ++- .../config/bootstrap/v4alpha/bootstrap.proto | 8 ++- docs/root/version_history/current.rst | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 8 ++- .../config/bootstrap/v4alpha/bootstrap.proto | 8 ++- source/server/configuration_impl.cc | 16 +++++- test/server/configuration_impl_test.cc | 53 +++++++++++++++++++ 7 files changed, 96 insertions(+), 6 deletions(-) diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index 25947fb1c23b..2d096a39c73b 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -305,7 +305,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -323,6 +323,12 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 3e4291944307..ba6107aa8dfe 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -296,7 +296,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -314,6 +314,12 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index cfe53410a320..bc28b05fc93b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -59,6 +59,7 @@ New Features * stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. * tap: added :ref:`generic body matcher` to scan http requests and responses for text or hex patterns. * tcp: switched the TCP connection pool to the new "shared" connection pool, sharing a common code base with HTTP and HTTP/2. Any unexpected behavioral changes can be temporarily reverted by setting `envoy.reloadable_features.new_tcp_connection_pool` to false. +* watchdog: support randomizing the watchdog's kill timeout to prevent synchronized kills via a maximium jitter parameter :ref:`max_kill_timeout_jitter`. * xds: added :ref:`extension config discovery` support for HTTP filters. Deprecated diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 118971bf32cf..d3cf6d6947cf 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -306,7 +306,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; @@ -324,6 +324,12 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index b81ffb91f839..89dd0d7f7d0d 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -304,7 +304,7 @@ message ClusterManager { // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 6] +// [#next-free-field: 7] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; @@ -322,6 +322,12 @@ message Watchdog { // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; + // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; + // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 63d8b162fac6..7510f068f7ee 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -90,8 +90,20 @@ void MainImpl::initialize(const envoy::config::bootstrap::v3::Bootstrap& bootstr std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, miss_timeout, 200)); watchdog_megamiss_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, megamiss_timeout, 1000)); - watchdog_kill_timeout_ = - std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0)); + uint64_t kill_timeout = PROTOBUF_GET_MS_OR_DEFAULT(watchdog, kill_timeout, 0); + const uint64_t max_kill_timeout_jitter = + PROTOBUF_GET_MS_OR_DEFAULT(watchdog, max_kill_timeout_jitter, 0); + + // Adjust kill timeout if we have skew enabled. + if (kill_timeout > 0 && max_kill_timeout_jitter > 0) { + // Increments the kill timeout with a random value in (0, max_skew]. + // We shouldn't have overflow issues due to the range of Duration. + // This won't be entirely uniform, depending on how large max_skew + // is relation to uint64. + kill_timeout += (server.random().random() % max_kill_timeout_jitter) + 1; + } + + watchdog_kill_timeout_ = std::chrono::milliseconds(kill_timeout); watchdog_multikill_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(watchdog, multikill_timeout, 0)); watchdog_multikill_threshold_ = diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 5576179da9b8..9b93be806e32 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -740,6 +740,59 @@ TEST_F(ConfigurationImplTest, ExceedLoadBalancerLocalityWeightsLimit) { "The sum of weights of all localities at the same priority exceeds 4294967295"); } +TEST_F(ConfigurationImplTest, KillTimeoutWithoutSkew) { + const std::string json = R"EOF( + { + "watchdog": { + "kill_timeout": "1.0s", + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_EQ(std::chrono::milliseconds(1000), config.wdKillTimeout()); +} + +TEST_F(ConfigurationImplTest, CanSkewsKillTimeout) { + const std::string json = R"EOF( + { + "watchdog": { + "kill_timeout": "1.0s", + "max_kill_timeout_jitter": "0.5s" + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_LT(std::chrono::milliseconds(1000), config.wdKillTimeout()); + EXPECT_GE(std::chrono::milliseconds(1500), config.wdKillTimeout()); +} + +TEST_F(ConfigurationImplTest, DoesNotSkewIfKillTimeoutDisabled) { + const std::string json = R"EOF( + { + "watchdog": { + "max_kill_timeout_jitter": "0.5s" + }, + })EOF"; + + envoy::config::bootstrap::v3::Bootstrap bootstrap; + TestUtility::loadFromJson(json, bootstrap); + + MainImpl config; + config.initialize(bootstrap, server_, cluster_manager_factory_); + + EXPECT_EQ(std::chrono::milliseconds(0), config.wdKillTimeout()); +} + } // namespace } // namespace Configuration } // namespace Server From f4adb5932921203fc403c2a4de1c7810d4a008b6 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Thu, 30 Jul 2020 16:48:21 -0700 Subject: [PATCH 808/909] network: socket interface support for addresses (#12189) Signed-off-by: Florin Coras --- include/envoy/network/address.h | 5 +++ include/envoy/network/socket.h | 4 +-- source/common/network/address_impl.cc | 36 +++++++++++++------ source/common/network/address_impl.h | 29 +++++++++------ source/common/network/listen_socket_impl.cc | 3 +- source/common/network/listen_socket_impl.h | 5 ++- source/common/network/socket_impl.cc | 2 +- source/common/network/socket_interface.h | 18 ++++++++++ .../common/network/socket_interface_impl.cc | 4 +-- source/common/network/socket_interface_impl.h | 5 +-- .../filters/udp/udp_proxy/udp_proxy_filter.h | 3 +- .../stat_sinks/common/statsd/statsd.cc | 4 +-- .../extensions/tracers/xray/daemon_broker.cc | 3 +- test/common/network/dns_impl_test.cc | 2 ++ .../socket_interface_integration_test.cc | 23 ++++++++++++ test/mocks/network/mocks.h | 2 ++ 16 files changed, 109 insertions(+), 39 deletions(-) diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h index 7ba285ca23da..94793f12c155 100644 --- a/include/envoy/network/address.h +++ b/include/envoy/network/address.h @@ -176,6 +176,11 @@ class Instance { * @return the type of address. */ virtual Type type() const PURE; + + /** + * @return name of socket interface that should be used with this address + */ + virtual const std::string& socketInterface() const PURE; }; using InstanceConstSharedPtr = std::shared_ptr; diff --git a/include/envoy/network/socket.h b/include/envoy/network/socket.h index c393f8541e2d..74c805e79785 100644 --- a/include/envoy/network/socket.h +++ b/include/envoy/network/socket.h @@ -246,7 +246,7 @@ class SocketInterface { * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor */ virtual IoHandlePtr socket(Socket::Type type, Address::Type addr_type, Address::IpVersion version, - bool socket_v6only) PURE; + bool socket_v6only) const PURE; /** * Low level api to create a socket in the underlying host stack. Does not create an @@ -256,7 +256,7 @@ class SocketInterface { * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor */ virtual IoHandlePtr socket(Socket::Type socket_type, - const Address::InstanceConstSharedPtr addr) PURE; + const Address::InstanceConstSharedPtr addr) const PURE; /** * Wrap socket file descriptor in IoHandle diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 5b4d57e55bd4..971322b21ccd 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -91,7 +91,8 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, NOT_REACHED_GCOVR_EXCL_LINE; } -Ipv4Instance::Ipv4Instance(const sockaddr_in* address) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(const sockaddr_in* address, absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { ip_.ipv4_.address_ = *address; ip_.friendly_address_ = sockaddrToString(*address); @@ -104,9 +105,12 @@ Ipv4Instance::Ipv4Instance(const sockaddr_in* address) : InstanceBase(Type::Ip) validateIpv4Supported(friendly_name_); } -Ipv4Instance::Ipv4Instance(const std::string& address) : Ipv4Instance(address, 0) {} +Ipv4Instance::Ipv4Instance(const std::string& address, absl::string_view sock_interface) + : Ipv4Instance(address, 0, sock_interface) {} -Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); @@ -120,7 +124,8 @@ Ipv4Instance::Ipv4Instance(const std::string& address, uint32_t port) : Instance ip_.friendly_address_ = address; } -Ipv4Instance::Ipv4Instance(uint32_t port) : InstanceBase(Type::Ip) { +Ipv4Instance::Ipv4Instance(uint32_t port, absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_.sin_family = AF_INET; ip_.ipv4_.address_.sin_port = htons(port); @@ -181,7 +186,9 @@ std::string Ipv6Instance::Ipv6Helper::makeFriendlyAddress() const { return ptr; } -Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only) : InstanceBase(Type::Ip) { +Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { ip_.ipv6_.address_ = address; ip_.friendly_address_ = ip_.ipv6_.makeFriendlyAddress(); ip_.ipv6_.v6only_ = v6only; @@ -189,9 +196,12 @@ Ipv6Instance::Ipv6Instance(const sockaddr_in6& address, bool v6only) : InstanceB validateIpv6Supported(friendly_name_); } -Ipv6Instance::Ipv6Instance(const std::string& address) : Ipv6Instance(address, 0) {} +Ipv6Instance::Ipv6Instance(const std::string& address, absl::string_view sock_interface) + : Ipv6Instance(address, 0, sock_interface) {} -Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port) : InstanceBase(Type::Ip) { +Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port, + absl::string_view sock_interface) + : InstanceBase(Type::Ip, sock_interface) { ip_.ipv6_.address_.sin6_family = AF_INET6; ip_.ipv6_.address_.sin6_port = htons(port); if (!address.empty()) { @@ -207,7 +217,8 @@ Ipv6Instance::Ipv6Instance(const std::string& address, uint32_t port) : Instance validateIpv6Supported(friendly_name_); } -Ipv6Instance::Ipv6Instance(uint32_t port) : Ipv6Instance("", port) {} +Ipv6Instance::Ipv6Instance(uint32_t port, absl::string_view sock_interface) + : Ipv6Instance("", port, sock_interface) {} bool Ipv6Instance::operator==(const Instance& rhs) const { const auto* rhs_casted = dynamic_cast(&rhs); @@ -215,8 +226,9 @@ bool Ipv6Instance::operator==(const Instance& rhs) const { (ip_.port() == rhs_casted->ip_.port())); } -PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode) - : InstanceBase(Type::Pipe) { +PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode, + absl::string_view sock_interface) + : InstanceBase(Type::Pipe, sock_interface) { if (address->sun_path[0] == '\0') { #if !defined(__linux__) throw EnvoyException("Abstract AF_UNIX sockets are only supported on linux."); @@ -240,7 +252,9 @@ PipeInstance::PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t pipe_.mode_ = mode; } -PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode) : InstanceBase(Type::Pipe) { +PipeInstance::PipeInstance(const std::string& pipe_path, mode_t mode, + absl::string_view sock_interface) + : InstanceBase(Type::Pipe, sock_interface) { if (pipe_path.size() >= sizeof(pipe_.address_.sun_path)) { throw EnvoyException( fmt::format("Path \"{}\" exceeds maximum UNIX domain socket path size of {}.", pipe_path, diff --git a/source/common/network/address_impl.h b/source/common/network/address_impl.h index c9bd3e64408e..c7473fcd4754 100644 --- a/source/common/network/address_impl.h +++ b/source/common/network/address_impl.h @@ -37,10 +37,16 @@ class InstanceBase : public Instance { const std::string& logicalName() const override { return asString(); } Type type() const override { return type_; } + const std::string& socketInterface() const override { return socket_interface_; } + protected: InstanceBase(Type type) : type_(type) {} + InstanceBase(Type type, absl::string_view sock_interface) : type_(type) { + socket_interface_ = std::string(sock_interface); + } std::string friendly_name_; + std::string socket_interface_; private: const Type type_; @@ -54,23 +60,23 @@ class Ipv4Instance : public InstanceBase { /** * Construct from an existing unix IPv4 socket address (IP v4 address and port). */ - explicit Ipv4Instance(const sockaddr_in* address); + explicit Ipv4Instance(const sockaddr_in* address, absl::string_view sock_interface = ""); /** * Construct from a string IPv4 address such as "1.2.3.4". Port will be unset/0. */ - explicit Ipv4Instance(const std::string& address); + explicit Ipv4Instance(const std::string& address, absl::string_view sock_interface = ""); /** * Construct from a string IPv4 address such as "1.2.3.4" as well as a port. */ - Ipv4Instance(const std::string& address, uint32_t port); + Ipv4Instance(const std::string& address, uint32_t port, absl::string_view sock_interface = ""); /** * Construct from a port. The IPv4 address will be set to "any" and is suitable for binding * a port to any available address. */ - explicit Ipv4Instance(uint32_t port); + explicit Ipv4Instance(uint32_t port, absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; @@ -124,23 +130,24 @@ class Ipv6Instance : public InstanceBase { /** * Construct from an existing unix IPv6 socket address (IP v6 address and port). */ - Ipv6Instance(const sockaddr_in6& address, bool v6only = true); + Ipv6Instance(const sockaddr_in6& address, bool v6only = true, + absl::string_view sock_interface = ""); /** * Construct from a string IPv6 address such as "12:34::5". Port will be unset/0. */ - explicit Ipv6Instance(const std::string& address); + explicit Ipv6Instance(const std::string& address, absl::string_view sock_interface = ""); /** * Construct from a string IPv6 address such as "12:34::5" as well as a port. */ - Ipv6Instance(const std::string& address, uint32_t port); + Ipv6Instance(const std::string& address, uint32_t port, absl::string_view sock_interface = ""); /** * Construct from a port. The IPv6 address will be set to "any" and is suitable for binding * a port to any available address. */ - explicit Ipv6Instance(uint32_t port); + explicit Ipv6Instance(uint32_t port, absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; @@ -195,12 +202,14 @@ class PipeInstance : public InstanceBase { /** * Construct from an existing unix address. */ - explicit PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0); + explicit PipeInstance(const sockaddr_un* address, socklen_t ss_len, mode_t mode = 0, + absl::string_view sock_interface = ""); /** * Construct from a string pipe path. */ - explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0); + explicit PipeInstance(const std::string& pipe_path, mode_t mode = 0, + absl::string_view sock_interface = ""); // Network::Address::Instance bool operator==(const Instance& rhs) const override; diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index ae8dab60227c..a905a254eac8 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -62,8 +62,7 @@ template <> void NetworkListenSocket>::setPrebindSocketOptions() {} UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) - : ListenSocketImpl(SocketInterfaceSingleton::get().socket(Socket::Type::Stream, address), - address) { + : ListenSocketImpl(ioHandleForAddr(Socket::Type::Stream, address), address) { RELEASE_ASSERT(io_handle_->fd() != -1, ""); bind(local_address_); } diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index c0786536a67e..8e6050599d73 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -165,9 +165,8 @@ class ClientSocketImpl : public ConnectionSocketImpl { public: ClientSocketImpl(const Address::InstanceConstSharedPtr& remote_address, const OptionsSharedPtr& options) - : ConnectionSocketImpl( - Network::SocketInterfaceSingleton::get().socket(Socket::Type::Stream, remote_address), - nullptr, remote_address) { + : ConnectionSocketImpl(Network::ioHandleForAddr(Socket::Type::Stream, remote_address), + nullptr, remote_address) { if (options) { addOptions(options); } diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 2f1cce5c077f..a24c34de7eab 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -12,7 +12,7 @@ namespace Envoy { namespace Network { SocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr addr) - : io_handle_(SocketInterfaceSingleton::get().socket(sock_type, addr)), sock_type_(sock_type), + : io_handle_(ioHandleForAddr(sock_type, addr)), sock_type_(sock_type), addr_type_(addr->type()) {} SocketImpl::SocketImpl(IoHandlePtr&& io_handle, diff --git a/source/common/network/socket_interface.h b/source/common/network/socket_interface.h index 66d717857500..9374b65a2344 100644 --- a/source/common/network/socket_interface.h +++ b/source/common/network/socket_interface.h @@ -52,5 +52,23 @@ static inline const SocketInterface* socketInterface(std::string name) { using SocketInterfaceSingleton = InjectableSingleton; using SocketInterfaceLoader = ScopedInjectableLoader; +/** + * Create IoHandle for given address + * @param type type of socket to be requested + * @param addr address that is gleaned for address type, version and socket interface name + * @return @ref Network::IoHandlePtr that wraps the underlying socket file descriptor + */ +static inline IoHandlePtr ioHandleForAddr(Socket::Type type, + const Address::InstanceConstSharedPtr addr) { + auto sock_interface_name = addr->socketInterface(); + if (!sock_interface_name.empty()) { + auto sock_interface = socketInterface(sock_interface_name); + RELEASE_ASSERT(sock_interface != nullptr, + fmt::format("missing socket interface {}", sock_interface_name)); + return sock_interface->socket(type, addr); + } + return SocketInterfaceSingleton::get().socket(type, addr); +} + } // namespace Network } // namespace Envoy \ No newline at end of file diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index e351faed53d1..065556d69222 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -13,7 +13,7 @@ namespace Envoy { namespace Network { IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type addr_type, - Address::IpVersion version, bool socket_v6only) { + Address::IpVersion version, bool socket_v6only) const { #if defined(__APPLE__) || defined(WIN32) int flags = 0; #else @@ -54,7 +54,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type } IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, - const Address::InstanceConstSharedPtr addr) { + const Address::InstanceConstSharedPtr addr) const { Address::IpVersion ip_version = addr->ip() ? addr->ip()->version() : Address::IpVersion::v4; int v6only = 0; if (addr->type() == Address::Type::Ip && ip_version == Address::IpVersion::v6) { diff --git a/source/common/network/socket_interface_impl.h b/source/common/network/socket_interface_impl.h index 9259e01c09d9..42f9b6875d9d 100644 --- a/source/common/network/socket_interface_impl.h +++ b/source/common/network/socket_interface_impl.h @@ -12,8 +12,9 @@ class SocketInterfaceImpl : public SocketInterfaceBase { public: // SocketInterface IoHandlePtr socket(Socket::Type socket_type, Address::Type addr_type, Address::IpVersion version, - bool socket_v6only) override; - IoHandlePtr socket(Socket::Type socket_type, const Address::InstanceConstSharedPtr addr) override; + bool socket_v6only) const override; + IoHandlePtr socket(Socket::Type socket_type, + const Address::InstanceConstSharedPtr addr) const override; IoHandlePtr socket(os_fd_t fd) override; bool ipFamilySupported(int domain) override; diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 8456d96089b3..68a85b3699d8 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -222,8 +222,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, virtual Network::IoHandlePtr createIoHandle(const Upstream::HostConstSharedPtr& host) { // Virtual so this can be overridden in unit tests. - return Network::SocketInterfaceSingleton::get().socket(Network::Socket::Type::Datagram, - host->address()); + return Network::ioHandleForAddr(Network::Socket::Type::Datagram, host->address()); } // Upstream::ClusterUpdateCallbacks diff --git a/source/extensions/stat_sinks/common/statsd/statsd.cc b/source/extensions/stat_sinks/common/statsd/statsd.cc index 3384013ce8cf..d7c1a5099178 100644 --- a/source/extensions/stat_sinks/common/statsd/statsd.cc +++ b/source/extensions/stat_sinks/common/statsd/statsd.cc @@ -30,8 +30,8 @@ namespace Common { namespace Statsd { UdpStatsdSink::WriterImpl::WriterImpl(UdpStatsdSink& parent) - : parent_(parent), io_handle_(Network::SocketInterfaceSingleton::get().socket( - Network::Socket::Type::Datagram, parent_.server_address_)) {} + : parent_(parent), io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram, + parent_.server_address_)) {} void UdpStatsdSink::WriterImpl::write(const std::string& message) { // TODO(mattklein123): We can avoid this const_cast pattern by having a constant variant of diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index 99e7a9bee4d3..9772fbe0073d 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -30,8 +30,7 @@ std::string createHeader(const std::string& format, uint32_t version) { DaemonBrokerImpl::DaemonBrokerImpl(const std::string& daemon_endpoint) : address_(Network::Utility::parseInternetAddressAndPort(daemon_endpoint, false /*v6only*/)), - io_handle_(Network::SocketInterfaceSingleton::get().socket(Network::Socket::Type::Datagram, - address_)) {} + io_handle_(Network::ioHandleForAddr(Network::Socket::Type::Datagram, address_)) {} void DaemonBrokerImpl::send(const std::string& data) const { auto& logger = Logger::Registry::getLog(Logger::Id::tracing); diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 54cf4fbdb8a1..df6aed9816bb 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -393,10 +393,12 @@ class CustomInstance : public Address::Instance { const sockaddr* sockAddr() const override { return instance_.sockAddr(); } socklen_t sockAddrLen() const override { return instance_.sockAddrLen(); } Address::Type type() const override { return instance_.type(); } + const std::string& socketInterface() const override { return socket_interface_; } private: std::string antagonistic_name_; Address::Ipv4Instance instance_; + std::string socket_interface_{""}; }; TEST_F(DnsImplConstructor, SupportCustomAddressInstances) { diff --git a/test/integration/socket_interface_integration_test.cc b/test/integration/socket_interface_integration_test.cc index c2b798c9801e..3e40a901d6c3 100644 --- a/test/integration/socket_interface_integration_test.cc +++ b/test/integration/socket_interface_integration_test.cc @@ -63,5 +63,28 @@ TEST_P(SocketInterfaceIntegrationTest, Basic) { EXPECT_EQ("hello", response); } +TEST_P(SocketInterfaceIntegrationTest, AddressWithSocketInterface) { + BaseIntegrationTest::initialize(); + + ConnectionStatusCallbacks connect_callbacks_; + Network::ClientConnectionPtr client_; + Network::Address::InstanceConstSharedPtr address = + std::make_shared( + Network::Test::getLoopbackAddressUrlString(Network::Address::IpVersion::v4), + lookupPort("listener_0"), + "envoy.extensions.network.socket_interface.default_socket_interface"); + + client_ = dispatcher_->createClientConnection(address, Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); + + client_->addConnectionCallbacks(connect_callbacks_); + client_->connect(); + while (!connect_callbacks_.connected() && !connect_callbacks_.closed()) { + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + } + + client_->close(Network::ConnectionCloseType::FlushWrite); +} + } // namespace } // namespace Envoy \ No newline at end of file diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 6aa2d2d7c83b..be6d86765d0d 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -432,9 +432,11 @@ class MockResolvedAddress : public Address::Instance { const std::string& asString() const override { return physical_; } absl::string_view asStringView() const override { return physical_; } const std::string& logicalName() const override { return logical_; } + const std::string& socketInterface() const override { return socket_interface_; } const std::string logical_; const std::string physical_; + const std::string socket_interface_{""}; }; class MockTransportSocketCallbacks : public TransportSocketCallbacks { From a2ebf092025e64f1b452a8bdaef13795677bceef Mon Sep 17 00:00:00 2001 From: yanavlasov Date: Thu, 30 Jul 2020 23:50:50 -0400 Subject: [PATCH 809/909] http2: remove exceptions from H/2 codec (#11575) Remove all throw statements from H/2 codec and replace error plumbing with Status and StatusOr objects. Signed-off-by: Yan Avlasov --- source/common/http/http2/BUILD | 4 +- source/common/http/http2/codec_impl.cc | 218 +++++++++++------- source/common/http/http2/codec_impl.h | 58 +++-- test/common/http/http2/BUILD | 16 +- test/common/http/http2/codec_impl_test.cc | 157 +++++++++---- test/common/http/http2/codec_impl_test_util.h | 16 +- test/common/http/http2/frame_replay.h | 1 - test/common/http/http2/frame_replay_test.cc | 1 + .../http/http2/request_header_fuzz_test.cc | 1 + .../http/http2/response_header_fuzz_test.cc | 1 + 10 files changed, 311 insertions(+), 162 deletions(-) diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index d2e4ed011311..ac83f7b49884 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -58,7 +58,9 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = CODEC_LIB_DEPS, + deps = CODEC_LIB_DEPS + [ + "//source/common/common:statusor_lib", + ], ) envoy_cc_library( diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 0405ec12a5c8..205c9d2d18ab 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -159,7 +159,16 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector local_end_stream_ = end_stream; submitHeaders(final_headers, end_stream ? nullptr : &provider); - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // The RELEASE_ASSERT below does not change the existing behavior of `sendPendingFrames()`. + // The `sendPendingFrames()` used to throw on errors and the only method that was catching + // these exceptions was the `dispatch()`. The `dispatch()` method still checks and handles + // errors returned by the `sendPendingFrames()`. + // Other callers of `sendPendingFrames()` do not catch exceptions from this method and + // would cause abnormal process termination in error cases. This change replaces abnormal + // process termination from unhandled exception with the RELEASE_ASSERT. + // Further work will replace this RELEASE_ASSERT with proper error handling. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, @@ -222,7 +231,9 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { createPendingFlushTimer(); } else { submitTrailers(trailers); - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } } @@ -235,7 +246,9 @@ void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadat for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { submitMetadata(flags); } - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::StreamImpl::readDisable(bool disable) { @@ -250,7 +263,9 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } } } @@ -364,7 +379,7 @@ ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* } } -int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { +Status ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we // "just know" that the frame header is 9 bytes. // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback @@ -373,17 +388,18 @@ int ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t parent_.outbound_data_frames_++; Buffer::OwnedImpl output; - if (!parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE)) { + auto status = parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); + if (!status.ok()) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); - return NGHTTP2_ERR_FLOODED; + return status; } parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); - return 0; + return status; } void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, @@ -419,7 +435,9 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { // This will emit a reset frame for this stream and close the stream locally. No reset callbacks // will be run because higher layers think the stream is already finished. resetStreamWorker(StreamResetReason::LocalReset); - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { @@ -434,7 +452,9 @@ void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_str data_deferred_ = false; } - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); if (local_end_stream_ && pending_send_data_.length() > 0) { createPendingFlushTimer(); } @@ -458,7 +478,9 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces // the cleanup logic to run which will reset the stream in all cases if all data frames could not // be sent. - parent_.sendPendingFrames(); + auto status = parent_.sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { @@ -498,7 +520,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat per_stream_buffer_limit_(http2_options.initial_stream_window_size().value()), stream_error_on_invalid_http_messaging_( http2_options.override_stream_error_on_invalid_http_message().value()), - flood_detected_(false), max_outbound_frames_(http2_options.max_outbound_frames().value()), + max_outbound_frames_(http2_options.max_outbound_frames().value()), frame_buffer_releasor_([this]() { releaseOutboundFrame(); }), max_outbound_control_frames_(http2_options.max_outbound_control_frames().value()), control_frame_buffer_releasor_([this]() { releaseOutboundControlFrame(); }), @@ -535,12 +557,19 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { dispatching_ = true; ssize_t rc = nghttp2_session_mem_recv(session_, static_cast(slice.mem_), slice.len_); - if (rc == NGHTTP2_ERR_FLOODED || flood_detected_) { - throw FrameFloodException( + if (!nghttp2_callback_status_.ok()) { + return nghttp2_callback_status_; + } + // This error is returned when nghttp2 library detected a frame flood by one of its + // internal mechanisms. Most flood protection is done by Envoy's codec and this error + // should never be returned. However it is handled here in case nghttp2 has some flood + // protections that Envoy's codec does not have. + if (rc == NGHTTP2_ERR_FLOODED) { + return bufferFloodError( "Flooding was detected in this HTTP/2 session, and it must be closed"); } if (rc != static_cast(slice.len_)) { - throw CodecProtocolException(fmt::format("{}", nghttp2_strerror(rc))); + return codecProtocolError(nghttp2_strerror(rc)); } dispatching_ = false; @@ -550,8 +579,7 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { data.drain(data.length()); // Decoding incoming frames can generate outbound frames so flush pending. - sendPendingFrames(); - return Http::okStatus(); + return sendPendingFrames(); } ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { @@ -579,30 +607,33 @@ void ConnectionImpl::goAway() { NGHTTP2_NO_ERROR, nullptr, 0); ASSERT(rc == 0); - sendPendingFrames(); + auto status = sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } void ConnectionImpl::shutdownNotice() { int rc = nghttp2_submit_shutdown_notice(session_); ASSERT(rc == 0); - sendPendingFrames(); + auto status = sendPendingFrames(); + // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. + RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); } -int ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { +Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}", connection_, static_cast(hd->type), static_cast(hd->flags)); // Track all the frames without padding here, since this is the only callback we receive // for some of them (e.g. CONTINUATION frame, frames sent on closed streams, etc.). // HEADERS frame is tracked in onBeginHeaders(), DATA frame is tracked in onFrameReceived(). + auto status = okStatus(); if (hd->type != NGHTTP2_HEADERS && hd->type != NGHTTP2_DATA) { - if (!trackInboundFrames(hd, 0)) { - return NGHTTP2_ERR_FLOODED; - } + status = trackInboundFrames(hd, 0); } - return 0; + return status; } ABSL_MUST_USE_RESULT @@ -615,7 +646,7 @@ enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { } } -int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { +Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS @@ -624,9 +655,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ASSERT(frame->hd.type != NGHTTP2_CONTINUATION); if (frame->hd.type == NGHTTP2_DATA) { - if (!trackInboundFrames(&frame->hd, frame->data.padlen)) { - return NGHTTP2_ERR_FLOODED; - } + RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->data.padlen)); } // Only raise GOAWAY once, since we don't currently expose stream information. Shutdown @@ -636,7 +665,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ASSERT(frame->hd.stream_id == 0); raised_goaway_ = true; callbacks().onGoAway(ngHttp2ErrorCodeToErrorCode(frame->goaway.error_code)); - return 0; + return okStatus(); } if (frame->hd.type == NGHTTP2_SETTINGS && frame->hd.flags == NGHTTP2_FLAG_NONE) { @@ -645,7 +674,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { StreamImpl* stream = getStream(frame->hd.stream_id); if (!stream) { - return 0; + return okStatus(); } switch (frame->hd.type) { @@ -705,7 +734,7 @@ int ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { } } - return 0; + return okStatus(); } int ConnectionImpl::onFrameSend(const nghttp2_frame* frame) { @@ -794,30 +823,26 @@ int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { return 0; } -void ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { +Status ConnectionImpl::incrementOutboundFrameCount(bool is_outbound_flood_monitored_control_frame) { ++outbound_frames_; if (is_outbound_flood_monitored_control_frame) { ++outbound_control_frames_; } - checkOutboundQueueLimits(); + return checkOutboundQueueLimits(); } -bool ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, - size_t length) { +Status ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the // onBeforeFrameSend callback is not called for DATA frames. bool is_outbound_flood_monitored_control_frame = false; std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); - try { - incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); - } catch (const FrameFloodException&) { - return false; - } + RETURN_IF_ERROR(incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame)); output.add(data, length); output.addDrainTracker(is_outbound_flood_monitored_control_frame ? control_frame_buffer_releasor_ : frame_buffer_releasor_); - return true; + return okStatus(); } void ConnectionImpl::releaseOutboundFrame() { @@ -831,13 +856,14 @@ void ConnectionImpl::releaseOutboundControlFrame() { releaseOutboundFrame(); } -ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { +StatusOr ConnectionImpl::onSend(const uint8_t* data, size_t length) { ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); Buffer::OwnedImpl buffer; - if (!addOutboundFrameFragment(buffer, data, length)) { + auto status = addOutboundFrameFragment(buffer, data, length); + if (!status.ok()) { ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", connection_); - return NGHTTP2_ERR_FLOODED; + return status; } // While the buffer is transient the fragment it contains will be moved into the @@ -959,24 +985,25 @@ int ConnectionImpl::saveHeader(const nghttp2_frame* frame, HeaderString&& name, } } -void ConnectionImpl::sendPendingFrames() { +Status ConnectionImpl::sendPendingFrames() { if (dispatching_ || connection_.state() == Network::Connection::State::Closed) { - return; + return okStatus(); } const int rc = nghttp2_session_send(session_); if (rc != 0) { ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); - // For errors caused by the pending outbound frame flood the FrameFloodException has - // to be thrown. However the nghttp2 library returns only the generic error code for - // all failure types. Check queue limits and throw FrameFloodException if they were - // exceeded. - if (outbound_frames_ > max_outbound_frames_ || - outbound_control_frames_ > max_outbound_control_frames_) { - throw FrameFloodException("Too many frames in the outbound queue."); + + if (!nghttp2_callback_status_.ok()) { + return nghttp2_callback_status_; } - throw CodecProtocolException(std::string(nghttp2_strerror(rc))); + // The frame flood error should set the nghttp2_callback_status_ error, and return at the + // statement above. + ASSERT(outbound_frames_ <= max_outbound_frames_ && + outbound_control_frames_ <= max_outbound_control_frames_); + + return codecProtocolError(nghttp2_strerror(rc)); } // See ConnectionImpl::StreamImpl::resetStream() for why we do this. This is an uncommon event, @@ -998,8 +1025,9 @@ void ConnectionImpl::sendPendingFrames() { stream->resetStreamWorker(stream->deferred_reset_.value()); } } - sendPendingFrames(); + RETURN_IF_ERROR(sendPendingFrames()); } + return okStatus(); } void ConnectionImpl::sendSettings( @@ -1065,12 +1093,25 @@ void ConnectionImpl::sendSettings( } } +int ConnectionImpl::setAndCheckNghttp2CallbackStatus(Status&& status) { + // Keep the error status that caused the original failure. Subsequent + // error statuses are silently discarded. + nghttp2_callback_status_.Update(std::move(status)); + return nghttp2_callback_status_.ok() ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; +} + ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_new(&callbacks_); nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - return static_cast(user_data)->onSend(data, length); + auto status_or_len = static_cast(user_data)->onSend(data, length); + if (status_or_len.ok()) { + return status_or_len.value(); + } + auto status = status_or_len.status(); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_send_data_callback( @@ -1078,12 +1119,16 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, nghttp2_data_source* source, void*) -> int { ASSERT(frame->data.padlen == 0); - return static_cast(source->ptr)->onDataSourceSend(framehd, length); + auto status = static_cast(source->ptr)->onDataSourceSend(framehd, length); + return static_cast(source->ptr) + ->parent_.setAndCheckNghttp2CallbackStatus(std::move(status)); }); nghttp2_session_callbacks_set_on_begin_headers_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onBeginHeaders(frame); + auto status = static_cast(user_data)->onBeginHeaders(frame); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_header_callback( @@ -1108,12 +1153,16 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_set_on_begin_frame_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame_hd* hd, void* user_data) -> int { - return static_cast(user_data)->onBeforeFrameReceived(hd); + auto status = static_cast(user_data)->onBeforeFrameReceived(hd); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_frame_recv_callback( callbacks_, [](nghttp2_session*, const nghttp2_frame* frame, void* user_data) -> int { - return static_cast(user_data)->onFrameReceived(frame); + auto status = static_cast(user_data)->onFrameReceived(frame); + return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( + std::move(status)); }); nghttp2_session_callbacks_set_on_stream_close_callback( @@ -1251,7 +1300,7 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& decoder) { return stream_ref; } -int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { +Status ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { // The client code explicitly does not currently support push promise. RELEASE_ASSERT(frame->hd.type == NGHTTP2_HEADERS, ""); RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || @@ -1262,7 +1311,7 @@ int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { stream->allocTrailers(); } - return 0; + return okStatus(); } int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, @@ -1290,13 +1339,11 @@ ServerConnectionImpl::ServerConnectionImpl( allow_metadata_ = http2_options.allow_metadata(); } -int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { +Status ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { // For a server connection, we should never get push promise frames. ASSERT(frame->hd.type == NGHTTP2_HEADERS); - if (!trackInboundFrames(&frame->hd, frame->headers.padlen)) { - return NGHTTP2_ERR_FLOODED; - } + RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->headers.padlen)); if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { stats_.trailers_.inc(); @@ -1304,7 +1351,7 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { StreamImpl* stream = getStream(frame->hd.stream_id); stream->allocTrailers(); - return 0; + return okStatus(); } ServerStreamImplPtr stream(new ServerStreamImpl(*this, per_stream_buffer_limit_)); @@ -1316,7 +1363,7 @@ int ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { LinkedList::moveIntoList(std::move(stream), active_streams_); nghttp2_session_set_stream_user_data(session_, frame->hd.stream_id, active_streams_.front().get()); - return 0; + return okStatus(); } int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, @@ -1327,7 +1374,8 @@ int ServerConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na return saveHeader(frame, std::move(name), std::move(value)); } -bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) { +Status ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, + uint32_t padding_length) { ENVOY_CONN_LOG(trace, "track inbound frame type={} flags={} length={} padding_length={}", connection_, static_cast(hd->type), static_cast(hd->flags), static_cast(hd->length), padding_length); @@ -1358,17 +1406,10 @@ bool ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, uint32 break; } - if (!checkInboundFrameLimits(hd->stream_id)) { - // NGHTTP2_ERR_FLOODED is overridden within nghttp2 library and it doesn't propagate - // all the way to nghttp2_session_mem_recv() where we need it. - flood_detected_ = true; - return false; - } - - return true; + return checkInboundFrameLimits(hd->stream_id); } -bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { +Status ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { ASSERT(dispatching_downstream_data_); ConnectionImpl::StreamImpl* stream = getStream(stream_id); @@ -1382,7 +1423,7 @@ bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { stream->setDetails(Http2ResponseCodeDetails::get().inbound_empty_frame_flood); } stats_.inbound_empty_frames_flood_.inc(); - return false; + return bufferFloodError("Too many consecutive frames with an empty payload"); } if (inbound_priority_frames_ > max_inbound_priority_frames_per_stream_ * (1 + inbound_streams_)) { @@ -1390,7 +1431,7 @@ bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { "error reading frame: Too many PRIORITY frames received in this HTTP/2 session.", connection_); stats_.inbound_priority_frames_flood_.inc(); - return false; + return bufferFloodError("Too many PRIORITY frames"); } if (inbound_window_update_frames_ > @@ -1401,21 +1442,22 @@ bool ServerConnectionImpl::checkInboundFrameLimits(int32_t stream_id) { "error reading frame: Too many WINDOW_UPDATE frames received in this HTTP/2 session.", connection_); stats_.inbound_window_update_frames_flood_.inc(); - return false; + return bufferFloodError("Too many WINDOW_UPDATE frames"); } - return true; + return okStatus(); } -void ServerConnectionImpl::checkOutboundQueueLimits() { +Status ServerConnectionImpl::checkOutboundQueueLimits() { if (outbound_frames_ > max_outbound_frames_ && dispatching_downstream_data_) { stats_.outbound_flood_.inc(); - throw FrameFloodException("Too many frames in the outbound queue."); + return bufferFloodError("Too many frames in the outbound queue."); } if (outbound_control_frames_ > max_outbound_control_frames_ && dispatching_downstream_data_) { stats_.outbound_control_flood_.inc(); - throw FrameFloodException("Too many control frames in the outbound queue."); + return bufferFloodError("Too many control frames in the outbound queue."); } + return okStatus(); } Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { @@ -1430,13 +1472,11 @@ Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { ASSERT(!dispatching_downstream_data_); dispatching_downstream_data_ = true; - // Make sure the dispatching_downstream_data_ is set to false even - // when ConnectionImpl::dispatch throws an exception. + // Make sure the dispatching_downstream_data_ is set to false when innerDispatch ends. Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); // Make sure downstream outbound queue was not flooded by the upstream frames. - checkOutboundQueueLimits(); - + RETURN_IF_ERROR(checkOutboundQueueLimits()); return ConnectionImpl::innerDispatch(data); } diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index b19f9880a3db..ee3b71253502 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -16,6 +16,7 @@ #include "common/buffer/watermark_buffer.h" #include "common/common/linked_object.h" #include "common/common/logger.h" +#include "common/common/statusor.h" #include "common/common/thread.h" #include "common/http/codec_helper.h" #include "common/http/header_map_impl.h" @@ -185,7 +186,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, const HeaderMap& headers); void saveHeader(HeaderString&& name, HeaderString&& value); @@ -391,7 +392,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable active_streams_; @@ -421,7 +428,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onSend(const uint8_t* data, size_t length); private: virtual ConnectionCallbacks& callbacks() PURE; - virtual int onBeginHeaders(const nghttp2_frame* frame) PURE; + virtual Status onBeginHeaders(const nghttp2_frame* frame) PURE; int onData(int32_t stream_id, const uint8_t* data, size_t len); - int onBeforeFrameReceived(const nghttp2_frame_hd* hd); - int onFrameReceived(const nghttp2_frame* frame); + Status onBeforeFrameReceived(const nghttp2_frame_hd* hd); + Status onFrameReceived(const nghttp2_frame* frame); int onBeforeFrameSend(const nghttp2_frame* frame); int onFrameSend(const nghttp2_frame* frame); int onError(absl::string_view error); @@ -501,12 +513,12 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable checkHeaderNameForUnderscores(absl::string_view header_name) override; // Http::Connection diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 646345fd1373..62ef3d05c34c 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -106,7 +106,6 @@ envoy_cc_test_library( "//source/common/http:utility_lib", "//source/common/http/http2:codec_lib", "//test/common/http:common_lib", - "//test/common/http/http2:codec_impl_test_util", "//test/mocks/http:http_mocks", "//test/mocks/network:network_mocks", "//test/test_common:environment_lib", @@ -124,7 +123,10 @@ envoy_cc_test( "response_header_corpus/simple_example_plain", ], tags = ["fails_on_windows"], - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) envoy_cc_test( @@ -147,12 +149,18 @@ envoy_cc_fuzz_test( name = "response_header_fuzz_test", srcs = ["response_header_fuzz_test.cc"], corpus = "response_header_corpus", - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) envoy_cc_fuzz_test( name = "request_header_fuzz_test", srcs = ["request_header_fuzz_test.cc"], corpus = "request_header_corpus", - deps = [":frame_replay_lib"], + deps = [ + ":frame_replay_lib", + "//test/common/http/http2:codec_impl_test_util", + ], ) diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index c3c56839244b..15696671724d 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1801,7 +1801,12 @@ TEST_P(Http2CodecImplTest, PingFlood) { buffer.move(frame); })); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many control frames in the outbound queue." + : "Too many frames in the outbound queue."); EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } @@ -1824,7 +1829,7 @@ TEST_P(Http2CodecImplTest, PingFloodMitigationDisabled) { EXPECT_CALL(server_connection_, write(_, _)) .Times(CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES + 1); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } // Verify that outbound control frame counter decreases when send buffer is drained @@ -1854,7 +1859,7 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { })); // We should be 1 frame under the control frame flood mitigation threshold. - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); EXPECT_EQ(ack_count, kMaxOutboundControlFrames); // Drain floor(kMaxOutboundFrames / 2) slices from the send buffer @@ -1866,12 +1871,17 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { } // The number of outbound frames should be half of max so the connection should not be // terminated. - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); EXPECT_EQ(ack_count, kMaxOutboundControlFrames + kMaxOutboundControlFrames / 2); // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many control frames in the outbound queue." + : "Too many frames in the outbound queue."); } // Verify that codec detects flood of outbound HEADER frames @@ -1898,7 +1908,8 @@ TEST_P(Http2CodecImplTest, ResponseHeadersFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); @@ -1931,7 +1942,8 @@ TEST_P(Http2CodecImplTest, ResponseDataFlood) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES + 1); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); @@ -1963,7 +1975,7 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) { // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } // Verify that outbound frame counter decreases when send buffer is drained @@ -2005,7 +2017,8 @@ TEST_P(Http2CodecImplTest, ResponseDataFloodCounterReset) { // Presently flood mitigation is done only when processing downstream data // So we need to send a frame from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); } // Verify that control frames are added to the counter of outbound frames of all types. @@ -2034,7 +2047,8 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { } // Send one PING frame above the outbound queue size limit EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, + "Too many frames in the outbound queue."); EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); @@ -2042,25 +2056,35 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many PRIORITY frames" + : "Flooding was detected in this HTTP/2 session, and it must be closed"); } TEST_P(Http2CodecImplTest, PriorityFloodOverride) { max_inbound_priority_frames_per_stream_ = 2147483647; priorityFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } TEST_P(Http2CodecImplTest, WindowUpdateFlood) { windowUpdateFlood(); - EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_THROW_WITH_MESSAGE( + client_->sendPendingFrames().IgnoreError(), ServerCodecError, + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many WINDOW_UPDATE frames" + : "Flooding was detected in this HTTP/2 session, and it must be closed"); } TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { max_inbound_window_update_frames_per_data_frame_sent_ = 2147483647; windowUpdateFlood(); - EXPECT_NO_THROW(client_->sendPendingFrames()); + EXPECT_NO_THROW(client_->sendPendingFrames().IgnoreError()); } TEST_P(Http2CodecImplTest, EmptyDataFlood) { @@ -2070,6 +2094,11 @@ TEST_P(Http2CodecImplTest, EmptyDataFlood) { auto status = server_wrapper_.dispatch(data, *server_); EXPECT_FALSE(status.ok()); EXPECT_TRUE(isBufferFloodError(status)); + // Legacy codec does not propagate error details and uses generic error message + EXPECT_EQ(Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") + ? "Too many consecutive frames with an empty payload" + : "Flooding was detected in this HTTP/2 session, and it must be closed", + status.message()); } TEST_P(Http2CodecImplTest, EmptyDataFloodOverride) { @@ -2162,34 +2191,7 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, nghttp2_session* create(const nghttp2_session_callbacks*, typename Nghttp2SessionFactoryType::ConnectionImplType* connection, - const nghttp2_option*) override { - // Only need to provide callbacks required to send METADATA frames. - nghttp2_session_callbacks_new(&callbacks_); - nghttp2_session_callbacks_set_pack_extension_callback( - callbacks_, - [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, - void* user_data) -> ssize_t { - // Double cast required due to multiple inheritance. - return static_cast*>( - static_cast( - user_data)) - ->encoder_.packNextFramePayload(data, length); - }); - nghttp2_session_callbacks_set_send_callback( - callbacks_, - [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - return static_cast*>( - static_cast( - user_data)) - ->onSend(data, length); - }); - nghttp2_option_new(&options_); - nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); - nghttp2_session* session; - nghttp2_session_client_new2(&session, callbacks_, connection, options_); - return session; - } + const nghttp2_option*) override; void init(nghttp2_session*, typename Nghttp2SessionFactoryType::ConnectionImplType*, const envoy::config::core::v3::Http2ProtocolOptions&) override {} @@ -2199,6 +2201,77 @@ class TestNghttp2SessionFactory : public Nghttp2SessionFactoryType, nghttp2_option* options_; }; +template +nghttp2_session* +TestNghttp2SessionFactory::create( + const nghttp2_session_callbacks*, + typename Nghttp2SessionFactoryType::ConnectionImplType* connection, const nghttp2_option*) { + // Only need to provide callbacks required to send METADATA frames. + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, + void* user_data) -> ssize_t { + // Double cast required due to multiple inheritance. + return static_cast*>( + static_cast(user_data)) + ->encoder_.packNextFramePayload(data, length); + }); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + // Cast down to MetadataTestClientConnectionImpl to leverage friendship. + auto status_or_len = + static_cast*>( + static_cast(user_data)) + ->onSend(data, length); + if (status_or_len.ok()) { + return status_or_len.value(); + } + return NGHTTP2_ERR_CALLBACK_FAILURE; + }); + nghttp2_option_new(&options_); + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks_, connection, options_); + return session; +} + +template <> +nghttp2_session* TestNghttp2SessionFactory:: + create(const nghttp2_session_callbacks*, + Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType* connection, + const nghttp2_option*) { + // Only need to provide callbacks required to send METADATA frames. + nghttp2_session_callbacks_new(&callbacks_); + nghttp2_session_callbacks_set_pack_extension_callback( + callbacks_, + [](nghttp2_session*, uint8_t* data, size_t length, const nghttp2_frame*, + void* user_data) -> ssize_t { + // Double cast required due to multiple inheritance. + return static_cast*>( + static_cast< + Envoy::Http::Legacy::Http2::ProdNghttp2SessionFactory::ConnectionImplType*>( + user_data)) + ->encoder_.packNextFramePayload(data, length); + }); + nghttp2_session_callbacks_set_send_callback( + callbacks_, + [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { + // Cast down to MetadataTestClientConnectionImpl to leverage friendship. + return static_cast*>( + static_cast(user_data)) + ->onSend(data, length); + }); + nghttp2_option_new(&options_); + nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); + nghttp2_session* session; + nghttp2_session_client_new2(&session, callbacks_, connection, options_); + return session; +} + using TestNghttp2SessionFactoryNew = TestNghttp2SessionFactory; using TestNghttp2SessionFactoryLegacy = diff --git a/test/common/http/http2/codec_impl_test_util.h b/test/common/http/http2/codec_impl_test_util.h index 339481d6d408..6049876ef844 100644 --- a/test/common/http/http2/codec_impl_test_util.h +++ b/test/common/http/http2/codec_impl_test_util.h @@ -114,7 +114,7 @@ struct ClientCodecFacade : public ClientConnection { virtual nghttp2_session* session() PURE; virtual Http::Stream* getStream(int32_t stream_id) PURE; virtual uint64_t getStreamPendingSendDataLength(int32_t stream_id) PURE; - virtual void sendPendingFrames() PURE; + virtual Status sendPendingFrames() PURE; virtual bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) PURE; }; @@ -148,7 +148,7 @@ class TestClientConnectionImpl : public TestClientConnection, public CodecImplTy uint64_t getStreamPendingSendDataLength(int32_t stream_id) override { return CodecImplType::getStream(stream_id)->pending_send_data_.length(); } - void sendPendingFrames() override { CodecImplType::sendPendingFrames(); } + Status sendPendingFrames() override; // Submits an H/2 METADATA frame to the peer. // Returns true on success, false otherwise. bool submitMetadata(const MetadataMapVector& mm_vector, int32_t stream_id) override { @@ -162,6 +162,18 @@ class TestClientConnectionImpl : public TestClientConnection, public CodecImplTy void onSettingsForTest(const nghttp2_settings& settings) override { onSettingsFrame(settings); } }; +template +Status TestClientConnectionImpl::sendPendingFrames() { + return CodecImplType::sendPendingFrames(); +} + +template <> +Status +TestClientConnectionImpl::sendPendingFrames() { + Envoy::Http::Legacy::Http2::ClientConnectionImpl::sendPendingFrames(); + return okStatus(); +} + using TestClientConnectionImplLegacy = TestClientConnectionImpl; using TestClientConnectionImplNew = diff --git a/test/common/http/http2/frame_replay.h b/test/common/http/http2/frame_replay.h index 3a6e89c6ca5b..2922d6a19110 100644 --- a/test/common/http/http2/frame_replay.h +++ b/test/common/http/http2/frame_replay.h @@ -4,7 +4,6 @@ #include "common/stats/isolated_store_impl.h" -#include "test/common/http/http2/codec_impl_test_util.h" #include "test/mocks/http/mocks.h" #include "test/mocks/network/mocks.h" #include "test/test_common/utility.h" diff --git a/test/common/http/http2/frame_replay_test.cc b/test/common/http/http2/frame_replay_test.cc index aadda98c8b3d..b1931d350bb8 100644 --- a/test/common/http/http2/frame_replay_test.cc +++ b/test/common/http/http2/frame_replay_test.cc @@ -1,6 +1,7 @@ #include "common/http/exception.h" #include "test/common/http/common.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "gtest/gtest.h" diff --git a/test/common/http/http2/request_header_fuzz_test.cc b/test/common/http/http2/request_header_fuzz_test.cc index d925ed1bb002..3af7f5c594ce 100644 --- a/test/common/http/http2/request_header_fuzz_test.cc +++ b/test/common/http/http2/request_header_fuzz_test.cc @@ -4,6 +4,7 @@ #include "common/http/exception.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" diff --git a/test/common/http/http2/response_header_fuzz_test.cc b/test/common/http/http2/response_header_fuzz_test.cc index e73b88ab954d..4559aa06419b 100644 --- a/test/common/http/http2/response_header_fuzz_test.cc +++ b/test/common/http/http2/response_header_fuzz_test.cc @@ -5,6 +5,7 @@ #include "common/http/exception.h" #include "test/common/http/common.h" +#include "test/common/http/http2/codec_impl_test_util.h" #include "test/common/http/http2/frame_replay.h" #include "test/fuzz/fuzz_runner.h" From 4ad03270ea93fed5ad13ed78800617e5b665fd79 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Fri, 31 Jul 2020 08:15:17 -0400 Subject: [PATCH 810/909] hcm: introduce FilterManagerCallbacks, move buffers to FM (#12330) Introduces FilterManagerCallbacks which can be used by the FM to call back out with the encoded data. This interface will be expanded as more functionality is split between the ActiveStream and the FilterManager. Makes the majority of FM functions private, relying on befriending the filter wrappers and a more well defined interface for the ActiveStream to pass headers/data to be decoded by the FM. Moves the buffers and watermark handling into the FM. Signed-off-by: Snow Pettersen --- source/common/http/conn_manager_impl.cc | 173 ++++++++++--------- source/common/http/conn_manager_impl.h | 183 ++++++++++++++++++--- test/common/http/conn_manager_impl_test.cc | 2 +- 3 files changed, 250 insertions(+), 108 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 5c4c91563a98..29b9a3565251 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -246,16 +246,15 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod } ENVOY_CONN_LOG(debug, "new stream", read_callbacks_->connection()); - ActiveStreamPtr new_stream(new ActiveStream(*this)); + ActiveStreamPtr new_stream(new ActiveStream(*this, response_encoder.getStream().bufferLimit())); new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); new_stream->response_encoder_->getStream().setFlushTimeout(new_stream->idle_timeout_ms_); - new_stream->buffer_limit_ = new_stream->response_encoder_->getStream().bufferLimit(); // If the network connection is backed up, the stream should be made aware of it on creation. // Both HTTP/1.x and HTTP/2 codecs handle this in StreamCallbackHelper::addCallbacksHelper. ASSERT(read_callbacks_->connection().aboveHighWatermark() == false || - new_stream->high_watermark_count_ > 0); + new_stream->filter_manager_.aboveHighWatermark()); LinkedList::moveIntoList(std::move(new_stream), streams_); return **streams_.begin(); } @@ -515,8 +514,9 @@ void ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpd std::move(route_config_updated_cb)); } -ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager) - : connection_manager_(connection_manager), filter_manager_(*this), +ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager, + uint32_t buffer_limit) + : connection_manager_(connection_manager), filter_manager_(*this, *this, buffer_limit), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::HistogramCompletableTimespanImpl( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), @@ -1033,7 +1033,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he traceRequest(); } - filter_manager_.decodeHeaders(nullptr, *request_headers_, end_stream); + filter_manager_.decodeHeaders(*request_headers_, end_stream); // Reset it here for both global and overridden cases. resetIdleTimer(); @@ -1102,7 +1102,7 @@ void ConnectionManagerImpl::FilterManager::maybeContinueDecoding( // We use the continueDecoding() code since it will correctly handle not calling // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code // expects it. - ASSERT(active_stream_.buffered_request_data_); + ASSERT(buffered_request_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); @@ -1134,7 +1134,7 @@ void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilt // If end_stream is set in headers, and a filter adds new metadata, we need to delay end_stream // in headers by inserting an empty data frame with end_stream set. The empty data frame is sent // after the new metadata. - if ((*entry)->end_stream_ && new_metadata_added && !active_stream_.buffered_request_data_) { + if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) { Buffer::OwnedImpl empty_data(""); ENVOY_STREAM_LOG(trace, "inserting an empty data frame for end_stream due metadata being added.", @@ -1157,8 +1157,7 @@ void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilt // Here we handle the case where we have a header only request, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. - if (end_stream && active_stream_.buffered_request_data_ && - continue_data_entry == decoder_filters_.end()) { + if (end_stream && buffered_request_data_ && continue_data_entry == decoder_filters_.end()) { continue_data_entry = entry; } } @@ -1176,8 +1175,7 @@ void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, boo filter_manager_.maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - filter_manager_.decodeData(nullptr, data, end_stream, - FilterManager::FilterIterationStartState::CanStartFromCurrent); + filter_manager_.decodeData(data, end_stream); } void ConnectionManagerImpl::FilterManager::decodeData( @@ -1341,7 +1339,7 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& resetIdleTimer(); filter_manager_.maybeEndDecode(true); request_trailers_ = std::move(trailers); - filter_manager_.decodeTrailers(nullptr, *request_trailers_); + filter_manager_.decodeTrailers(*request_trailers_); } void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, @@ -1390,7 +1388,7 @@ void ConnectionManagerImpl::ActiveStream::decodeMetadata(MetadataMapPtr&& metada // After going through filters, the ownership of metadata_map will be passed to terminal filter. // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map // and encode later when connection pool is ready. - filter_manager_.decodeMetadata(nullptr, *metadata_map); + filter_manager_.decodeMetadata(*metadata_map); } void ConnectionManagerImpl::FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, @@ -1604,11 +1602,11 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( modify_headers(*response_headers); } response_headers_ = std::move(response_headers); - encodeHeadersInternal(*response_headers_, end_stream); + encodeHeaders(*response_headers_, end_stream); filter_manager_.maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { - encodeDataInternal(data, end_stream); + encodeData(data, end_stream); filter_manager_.maybeEndEncode(end_stream); }}, Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, @@ -1694,19 +1692,23 @@ void ConnectionManagerImpl::FilterManager::encode100ContinueHeaders( } } + filter_manager_callbacks_.encode100ContinueHeaders(headers); +} + +void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( + ResponseHeaderMap& response_headers) { // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. - ConnectionManagerUtility::mutateResponseHeaders(headers, active_stream_.request_headers_.get(), - active_stream_.connection_manager_.config_, - EMPTY_STRING); + ConnectionManagerUtility::mutateResponseHeaders(response_headers, request_headers_.get(), + connection_manager_.config_, EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. - active_stream_.chargeStats(headers); + chargeStats(response_headers); - ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", active_stream_, headers); + ENVOY_STREAM_LOG(debug, "encoding 100 continue headers via codec:\n{}", *this, response_headers); // Now actually encode via the codec. - active_stream_.response_encoder_->encode100ContinueHeaders(headers); + response_encoder_->encode100ContinueHeaders(response_headers); } void ConnectionManagerImpl::FilterManager::maybeContinueEncoding( @@ -1715,7 +1717,7 @@ void ConnectionManagerImpl::FilterManager::maybeContinueEncoding( // We use the continueEncoding() code since it will correctly handle not calling // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code // expects it. - ASSERT(active_stream_.buffered_response_data_); + ASSERT(buffered_response_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); @@ -1769,15 +1771,14 @@ void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilt // Here we handle the case where we have a header only response, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. - if (end_stream && active_stream_.buffered_response_data_ && - continue_data_entry == encoder_filters_.end()) { + if (end_stream && buffered_response_data_ && continue_data_entry == encoder_filters_.end()) { continue_data_entry = entry; } } const bool modified_end_stream = active_stream_.state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); - active_stream_.encodeHeadersInternal(headers, modified_end_stream); + filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); maybeEndEncode(modified_end_stream); if (!modified_end_stream) { @@ -1785,8 +1786,8 @@ void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilt } } -void ConnectionManagerImpl::ActiveStream::encodeHeadersInternal(ResponseHeaderMap& headers, - bool end_stream) { +void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& headers, + bool end_stream) { // Base headers. // By default, always preserve the upstream date response header if present. If we choose to @@ -1933,10 +1934,9 @@ void ConnectionManagerImpl::FilterManager::encodeMetadata(ActiveStreamEncoderFil // Now encode metadata via the codec. if (!metadata_map_ptr->empty()) { - ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", active_stream_, *metadata_map_ptr); MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); - active_stream_.response_encoder_->encodeMetadata(metadata_map_vector); + filter_manager_callbacks_.encodeMetadata(metadata_map_vector); } } @@ -2045,7 +2045,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); - active_stream_.encodeDataInternal(data, modified_end_stream); + filter_manager_callbacks_.encodeData(data, modified_end_stream); maybeEndEncode(modified_end_stream); // If trailers were adding during encodeData we need to trigger decodeTrailers in order @@ -2055,8 +2055,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( } } -void ConnectionManagerImpl::ActiveStream::encodeDataInternal(Buffer::Instance& data, - bool end_stream) { +void ConnectionManagerImpl::ActiveStream::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!state_.encoding_headers_only_); ENVOY_STREAM_LOG(trace, "encoding data via codec (size={} end_stream={})", *this, data.length(), end_stream); @@ -2065,6 +2064,33 @@ void ConnectionManagerImpl::ActiveStream::encodeDataInternal(Buffer::Instance& d response_encoder_->encodeData(data, end_stream); } +void ConnectionManagerImpl::ActiveStream::encodeTrailers(ResponseTrailerMap& trailers) { + ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", *this, trailers); + + response_encoder_->encodeTrailers(trailers); +} + +void ConnectionManagerImpl::ActiveStream::encodeMetadata(MetadataMapVector& metadata) { + ENVOY_STREAM_LOG(debug, "encoding metadata via codec:\n{}", *this, metadata); + response_encoder_->encodeMetadata(metadata); +} + +void ConnectionManagerImpl::ActiveStream::onDecoderFilterBelowWriteBufferLowWatermark() { + ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", *this); + // If the state is destroyed, the codec's stream is already torn down. On + // teardown the codec will unwind any remaining read disable calls. + if (!state_.destroyed_) { + response_encoder_->getStream().readDisable(false); + } + connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); +} + +void ConnectionManagerImpl::ActiveStream::onDecoderFilterAboveWriteBufferHighWatermark() { + ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", *this); + response_encoder_->getStream().readDisable(true); + connection_manager_.stats_.named_.downstream_flow_control_paused_reading_total_.inc(); +} + void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers) { active_stream_.resetIdleTimer(); @@ -2096,9 +2122,7 @@ void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFil } } - ENVOY_STREAM_LOG(debug, "encoding trailers via codec:\n{}", active_stream_, trailers); - - active_stream_.response_encoder_->encodeTrailers(trailers); + filter_manager_callbacks_.encodeTrailers(trailers); maybeEndEncode(true); } @@ -2157,12 +2181,12 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl: void ConnectionManagerImpl::ActiveStream::onAboveWriteBufferHighWatermark() { ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to downstream stream watermark.", *this); - callHighWatermarkCallbacks(); + filter_manager_.callHighWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStream::onBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to downstream stream watermark.", *this); - callLowWatermarkCallbacks(); + filter_manager_.callLowWatermarkCallbacks(); } Tracing::OperationName ConnectionManagerImpl::ActiveStream::operationName() const { @@ -2181,14 +2205,14 @@ uint32_t ConnectionManagerImpl::ActiveStream::maxPathTagLength() const { return connection_manager_.config_.tracingConfig()->max_path_tag_length_; } -void ConnectionManagerImpl::ActiveStream::callHighWatermarkCallbacks() { +void ConnectionManagerImpl::FilterManager::callHighWatermarkCallbacks() { ++high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onAboveWriteBufferHighWatermark(); } } -void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { +void ConnectionManagerImpl::FilterManager::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { @@ -2196,8 +2220,8 @@ void ConnectionManagerImpl::ActiveStream::callLowWatermarkCallbacks() { } } -void ConnectionManagerImpl::ActiveStream::setBufferLimit(uint32_t new_limit) { - ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", *this, new_limit); +void ConnectionManagerImpl::FilterManager::setBufferLimit(uint32_t new_limit) { + ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", active_stream_, new_limit); buffer_limit_ = new_limit; if (buffered_request_data_) { buffered_request_data_->setWatermarks(buffer_limit_); @@ -2479,12 +2503,12 @@ Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::cre [this]() -> void { this->requestDataDrained(); }, [this]() -> void { this->requestDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); - buffer->setWatermarks(parent_.active_stream_.buffer_limit_); + buffer->setWatermarks(parent_.buffer_limit_); return buffer; } Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamDecoderFilter::bufferedData() { - return parent_.active_stream_.buffered_request_data_; + return parent_.buffered_request_data_; } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::complete() { @@ -2496,7 +2520,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::doHeaders(bool end_stream } void ConnectionManagerImpl::ActiveStreamDecoderFilter::doData(bool end_stream) { - parent_.decodeData(this, *parent_.active_stream_.buffered_request_data_, end_stream, + parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } @@ -2549,13 +2573,13 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::injectDecodedDataToFilter void ConnectionManagerImpl::ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } const Buffer::Instance* ConnectionManagerImpl::ActiveStreamDecoderFilter::decodingBuffer() { - return parent_.active_stream_.buffered_request_data_.get(); + return parent_.buffered_request_data_.get(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::modifyDecodingBuffer( std::function callback) { ASSERT(parent_.active_stream_.state_.latest_data_decoding_filter_ == this); - callback(*parent_.active_stream_.buffered_request_data_.get()); + callback(*parent_.buffered_request_data_.get()); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::sendLocalReply( @@ -2602,11 +2626,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeMetadata( void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterAboveWriteBufferHighWatermark() { - ENVOY_STREAM_LOG(debug, "Read-disabling downstream stream due to filter callbacks.", - parent_.active_stream_); - parent_.active_stream_.response_encoder_->getStream().readDisable(true); - parent_.active_stream_.connection_manager_.stats_.named_ - .downstream_flow_control_paused_reading_total_.inc(); + parent_.filter_manager_callbacks_.onDecoderFilterAboveWriteBufferHighWatermark(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataTooLarge() { @@ -2628,44 +2648,33 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataDrained() { void ConnectionManagerImpl::ActiveStreamDecoderFilter:: onDecoderFilterBelowWriteBufferLowWatermark() { - ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", - parent_.active_stream_); - // If the state is destroyed, the codec's stream is already torn down. On - // teardown the codec will unwind any remaining read disable calls. - if (!parent_.active_stream_.state_.destroyed_) { - parent_.active_stream_.response_encoder_->getStream().readDisable(false); - } - parent_.active_stream_.connection_manager_.stats_.named_ - .downstream_flow_control_resumed_reading_total_.inc(); + parent_.filter_manager_callbacks_.onDecoderFilterBelowWriteBufferLowWatermark(); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { // This is called exactly once per upstream-stream, by the router filter. Therefore, we // expect the same callbacks to not be registered twice. - ASSERT(std::find(parent_.active_stream_.watermark_callbacks_.begin(), - parent_.active_stream_.watermark_callbacks_.end(), - &watermark_callbacks) == parent_.active_stream_.watermark_callbacks_.end()); - parent_.active_stream_.watermark_callbacks_.emplace( - parent_.active_stream_.watermark_callbacks_.end(), &watermark_callbacks); - for (uint32_t i = 0; i < parent_.active_stream_.high_watermark_count_; ++i) { + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) == parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); + for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { - ASSERT(std::find(parent_.active_stream_.watermark_callbacks_.begin(), - parent_.active_stream_.watermark_callbacks_.end(), - &watermark_callbacks) != parent_.active_stream_.watermark_callbacks_.end()); - parent_.active_stream_.watermark_callbacks_.remove(&watermark_callbacks); + ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), + &watermark_callbacks) != parent_.watermark_callbacks_.end()); + parent_.watermark_callbacks_.remove(&watermark_callbacks); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) { - parent_.active_stream_.setBufferLimit(limit); + parent_.setBufferLimit(limit); } uint32_t ConnectionManagerImpl::ActiveStreamDecoderFilter::decoderBufferLimit() { - return parent_.active_stream_.buffer_limit_; + return parent_.buffer_limit_; } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { @@ -2735,11 +2744,11 @@ Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamEncoderFilter::cre [this]() -> void { this->responseDataDrained(); }, [this]() -> void { this->responseDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); - buffer->setWatermarks(parent_.active_stream_.buffer_limit_); + buffer->setWatermarks(parent_.buffer_limit_); return Buffer::WatermarkBufferPtr{buffer}; } Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamEncoderFilter::bufferedData() { - return parent_.active_stream_.buffered_response_data_; + return parent_.buffered_response_data_; } bool ConnectionManagerImpl::ActiveStreamEncoderFilter::complete() { return parent_.active_stream_.state_.local_complete_; @@ -2754,7 +2763,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::doHeaders(bool end_stream parent_.encodeHeaders(this, *parent_.active_stream_.response_headers_, end_stream); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::doData(bool end_stream) { - parent_.encodeData(this, *parent_.active_stream_.buffered_response_data_, end_stream, + parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::drainSavedResponseMetadata() { @@ -2808,34 +2817,34 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterAboveWriteBufferHighWatermark() { ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", parent_.active_stream_); - parent_.active_stream_.callHighWatermarkCallbacks(); + parent_.callHighWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStreamEncoderFilter:: onEncoderFilterBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", parent_.active_stream_); - parent_.active_stream_.callLowWatermarkCallbacks(); + parent_.callLowWatermarkCallbacks(); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) { - parent_.active_stream_.setBufferLimit(limit); + parent_.setBufferLimit(limit); } uint32_t ConnectionManagerImpl::ActiveStreamEncoderFilter::encoderBufferLimit() { - return parent_.active_stream_.buffer_limit_; + return parent_.buffer_limit_; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); } const Buffer::Instance* ConnectionManagerImpl::ActiveStreamEncoderFilter::encodingBuffer() { - return parent_.active_stream_.buffered_response_data_.get(); + return parent_.buffered_response_data_.get(); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::modifyEncodingBuffer( std::function callback) { ASSERT(parent_.active_stream_.state_.latest_data_encoding_filter_ == this); - callback(*parent_.active_stream_.buffered_response_data_.get()); + callback(*parent_.buffered_response_data_.get()); } Http1StreamEncoderOptionsOptRef diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 34ed3e7c8f3f..3396dd264f3c 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -17,6 +17,7 @@ #include "envoy/http/codes.h" #include "envoy/http/context.h" #include "envoy/http/filter.h" +#include "envoy/http/header_map.h" #include "envoy/network/connection.h" #include "envoy/network/drain_decision.h" #include "envoy/network/filter.h" @@ -387,9 +388,68 @@ class ConnectionManagerImpl : Logger::Loggable, NullRouteConfigUpdateRequester() = default; }; + /** + * Callbacks invoked by the FilterManager to pass filter data/events back to the caller. + */ + class FilterManagerCallbacks { + public: + virtual ~FilterManagerCallbacks() = default; + + /** + * Called when the provided headers have been encoded by all the filters in the chain. + * @param response_headers the encoded headers. + * @param end_stream whether this is a header only response. + */ + virtual void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) PURE; + + /** + * Called when the provided 100 Continue headers have been encoded by all the filters in the + * chain. + * @param response_headers the encoded headers. + */ + virtual void encode100ContinueHeaders(ResponseHeaderMap& response_headers) PURE; + + /** + * Called when the provided data has been encoded by all filters in the chain. + * @param data the encoded data. + * @param end_stream whether this is the end of the response. + */ + virtual void encodeData(Buffer::Instance& data, bool end_stream) PURE; + + /** + * Called when the provided trailers have been encoded by all filters in the chain. + * @param trailers the encoded trailers. + */ + virtual void encodeTrailers(ResponseTrailerMap& trailers) PURE; + + /** + * Called when the provided metadata has been encoded by all filters in the chain. + * @param trailers the encoded trailers. + */ + virtual void encodeMetadata(MetadataMapVector& metadata) PURE; + + /** + * Called when the stream write buffer is no longer above the low watermark. + */ + virtual void onDecoderFilterBelowWriteBufferLowWatermark() PURE; + + /** + * Called when the stream write buffer is above above the high watermark. + */ + virtual void onDecoderFilterAboveWriteBufferHighWatermark() PURE; + }; + + /** + * FilterManager manages decoding a request through a series of decoding filter and the encoding + * of the resulting response. + */ class FilterManager { public: - explicit FilterManager(ActiveStream& active_stream) : active_stream_(active_stream) {} + FilterManager(ActiveStream& active_stream, FilterManagerCallbacks& filter_manager_callbacks, + uint32_t buffer_limit) + : active_stream_(active_stream), filter_manager_callbacks_(filter_manager_callbacks), + buffer_limit_(buffer_limit) {} + void destroyFilters() { for (auto& filter : decoder_filters_) { filter->handle_->onDestroy(); @@ -402,12 +462,81 @@ class ConnectionManagerImpl : Logger::Loggable, } } } - // Indicates which filter to start the iteration with. - enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + /** + * Decodes the provided headers starting at the first filter in the chain. + * @param headers the headers to decode. + * @param end_stream whether the request is header only. + */ + void decodeHeaders(RequestHeaderMap& headers, bool end_stream) { + decodeHeaders(nullptr, headers, end_stream); + } + + /** + * Decodes the provided data starting at the first filter in the chain. + * @param data the data to decode. + * @param end_stream whether this data is the end of the request. + */ + void decodeData(Buffer::Instance& data, bool end_stream) { + decodeData(nullptr, data, end_stream, FilterIterationStartState::CanStartFromCurrent); + } + + /** + * Decodes the provided trailers starting at the first filter in the chain. + * @param trailers the trailers to decode. + */ + void decodeTrailers(RequestTrailerMap& trailers) { decodeTrailers(nullptr, trailers); } + + /** + * Decodes the provided metadata starting at the first filter in the chain. + * @param metadata_map the metadata to decode. + */ + void decodeMetadata(MetadataMap& metadata_map) { decodeMetadata(nullptr, metadata_map); } + + // TODO(snowp): Make private as filter chain construction is moved into FM. void addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, bool dual_filter); void addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, bool dual_filter); + void disarmRequestTimeout(); + + /** + * If end_stream is true, marks decoding as complete. This is a noop if end_stream is false. + * @param end_stream whether decoding is complete. + */ + void maybeEndDecode(bool end_stream); + + /** + * If end_stream is true, marks encoding as complete. This is a noop if end_stream is false. + * @param end_stream whether encoding is complete. + */ + void maybeEndEncode(bool end_stream); + + /** + * Sends a local reply by constructing a response and passing it through all the encoder + * filters. The resulting response will be passed out via the FilterManagerCallbacks. + */ + void sendLocalReplyViaFilterChain( + bool is_grpc_request, Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status, absl::string_view details); + + // Possibly increases buffer_limit_ to the value of limit. + void setBufferLimit(uint32_t limit); + + /** + * @return bool whether any above high watermark triggers are currently active + */ + bool aboveHighWatermark() { return high_watermark_count_ != 0; } + + // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark + // events for this stream and the downstream connection to the router filter. + void callHighWatermarkCallbacks(); + void callLowWatermarkCallbacks(); + + private: + // Indicates which filter to start the iteration with. + enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; + // Returns the encoder filter to start iteration with. std::list::iterator commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, @@ -431,18 +560,12 @@ class ConnectionManagerImpl : Logger::Loggable, FilterIterationStartState filter_iteration_start_state); void decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers); void decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map); - void disarmRequestTimeout(); - void maybeEndDecode(bool end_stream); void addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming); ResponseTrailerMap& addEncodedTrailers(); void sendLocalReply(bool is_grpc_request, Code code, absl::string_view body, const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details); - void sendLocalReplyViaFilterChain( - bool is_grpc_request, Code code, absl::string_view body, - const std::function& modify_headers, bool is_head_request, - const absl::optional grpc_status, absl::string_view details); void encode100ContinueHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers); // As with most of the encode functions, this runs encodeHeaders on various // filters before calling encodeHeadersInternal which does final header munging and passes the @@ -459,7 +582,6 @@ class ConnectionManagerImpl : Logger::Loggable, void encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers); void encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr); - void maybeEndEncode(bool end_stream); // Returns true if new metadata is decoded. Otherwise, returns false. bool processNewlyAddedMetadata(); @@ -471,9 +593,22 @@ class ConnectionManagerImpl : Logger::Loggable, ActiveStream& active_stream_; - private: + FilterManagerCallbacks& filter_manager_callbacks_; + std::list decoder_filters_; std::list encoder_filters_; + + Buffer::WatermarkBufferPtr buffered_response_data_; + Buffer::WatermarkBufferPtr buffered_request_data_; + uint32_t buffer_limit_{0}; + uint32_t high_watermark_count_{0}; + std::list watermark_callbacks_{}; + + // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, + // at which point they no longer need to be friends. + friend ActiveStreamFilterBase; + friend ActiveStreamDecoderFilter; + friend ActiveStreamEncoderFilter; }; /** @@ -486,8 +621,9 @@ class ConnectionManagerImpl : Logger::Loggable, public RequestDecoder, public FilterChainFactoryCallbacks, public Tracing::Config, - public ScopeTrackedObject { - ActiveStream(ConnectionManagerImpl& connection_manager); + public ScopeTrackedObject, + public FilterManagerCallbacks { + ActiveStream(ConnectionManagerImpl& connection_manager, uint32_t buffer_limit); ~ActiveStream() override; void chargeStats(const ResponseHeaderMap& headers); @@ -555,6 +691,15 @@ class ConnectionManagerImpl : Logger::Loggable, DUMP_DETAILS(&stream_info_); } + // FilterManagerCallbacks + void encodeHeaders(ResponseHeaderMap& response_headers, bool end_stream) override; + void encode100ContinueHeaders(ResponseHeaderMap& response_headers) override; + void encodeData(Buffer::Instance& data, bool end_stream) override; + void encodeTrailers(ResponseTrailerMap& trailers) override; + void encodeMetadata(MetadataMapVector& metadata) override; + void onDecoderFilterBelowWriteBufferLowWatermark() override; + void onDecoderFilterAboveWriteBufferHighWatermark() override; + void traceRequest(); // Updates the snapped_route_config_ (by reselecting scoped route configuration), if a scope is @@ -570,11 +715,6 @@ class ConnectionManagerImpl : Logger::Loggable, void refreshCachedTracingCustomTags(); - // Pass on watermark callbacks to watermark subscribers. This boils down to passing watermark - // events for this stream and the downstream connection to the router filter. - void callHighWatermarkCallbacks(); - void callLowWatermarkCallbacks(); - /** * Flags that keep track of which filter calls are currently in progress. */ @@ -645,8 +785,6 @@ class ConnectionManagerImpl : Logger::Loggable, ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; }; - // Possibly increases buffer_limit_ to the value of limit. - void setBufferLimit(uint32_t limit); // Set up the Encoder/Decoder filter chain. bool createFilterChain(); // Per-stream idle timeout callback. @@ -690,10 +828,8 @@ class ConnectionManagerImpl : Logger::Loggable, ResponseEncoder* response_encoder_{}; ResponseHeaderMapPtr continue_headers_; ResponseHeaderMapPtr response_headers_; - Buffer::WatermarkBufferPtr buffered_response_data_; ResponseTrailerMapPtr response_trailers_{}; RequestHeaderMapPtr request_headers_; - Buffer::WatermarkBufferPtr buffered_request_data_; RequestTrailerMapPtr request_trailers_; std::list access_log_handlers_; Stats::TimespanPtr request_response_timespan_; @@ -708,13 +844,10 @@ class ConnectionManagerImpl : Logger::Loggable, StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; - std::list watermark_callbacks_{}; // Stores metadata added in the decoding filter that is being processed. Will be cleared before // processing the next filter. The storage is created on demand. We need to store metadata // temporarily in the filter in case the filter has stopped all while processing headers. std::unique_ptr request_metadata_map_vector_{nullptr}; - uint32_t buffer_limit_{0}; - uint32_t high_watermark_count_{0}; const std::string* decorated_operation_{nullptr}; Network::Socket::OptionsSharedPtr upstream_options_; std::unique_ptr route_config_update_requester_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a0752d6a5930..3e83615f3dc4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -195,11 +195,11 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan void setUpBufferLimits() { ON_CALL(response_encoder_, getStream()).WillByDefault(ReturnRef(stream_)); + EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); EXPECT_CALL(stream_, addCallbacks(_)) .WillOnce(Invoke( [&](Http::StreamCallbacks& callbacks) -> void { stream_callbacks_ = &callbacks; })); EXPECT_CALL(stream_, setFlushTimeout(_)); - EXPECT_CALL(stream_, bufferLimit()).WillOnce(Return(initial_buffer_limit_)); } // If request_with_data_and_trailers is true, includes data and trailers in the request. If From 98ad7e0847313cfae2a94f91fd5b95ee3e8448e0 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Fri, 31 Jul 2020 09:44:48 -0700 Subject: [PATCH 811/909] logger: Change default log format (#11972) Change default log format and values of the corresponding compatibility flag as was previously stated in the 0.15 release's documentation. Signed-off-by: Ruslan Nigmatullin --- docs/root/operations/cli.rst | 14 +++++--------- docs/root/version_history/current.rst | 1 + source/common/common/base_logger.cc | 2 +- source/server/options_impl.cc | 2 +- test/server/options_impl_test.cc | 10 +++++----- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index df2326ea6800..cf2f8894716a 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -114,13 +114,10 @@ following are the command line options that Envoy supports. .. option:: --log-format *(optional)* The format string to use for laying out the log message metadata. If this is not - set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] %v"`` is used. + set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"`` is used. - When used in conjunction with :option:`--log-format-prefix-with-location` set to 0, the logger can be - configured to not prefix ``%v`` by a file path and a line number. - - **NOTE**: The default log format will be changed to ``"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"`` - together with the default value of :option:`--log-format-prefix-with-location` to 0 at 1.16.0 release. + When used in conjunction with :option:`--log-format-prefix-with-location` set to 1, the logger can be + configured to prefix ``%v`` by a file path and a line number. When used in conjunction with :option:`--log-format-escaped`, the logger can be configured to log in a format that is parsable by log viewers. Known integrations are documented @@ -167,10 +164,9 @@ following are the command line options that Envoy supports. *(optional)* This temporary flag allows replacing all entries of ``"%v"`` in the log format by ``"[%g:%#] %v"``. This flag is provided for migration purposes only. If this is not set, a - default value 1 is used. + default value 0 is used. - **NOTE**: The default value will be changed to 0 at 1.16.0 release and the flag will be - removed at 1.17.0 release. + **NOTE**: The flag will be removed at 1.17.0 release. .. option:: --log-format-escaped diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index bc28b05fc93b..6828e1862bec 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -17,6 +17,7 @@ Minor Behavior Changes * http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might see a change in behavior. +* logging: change default log format to `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` and default value of :option:`--log-format-prefix-with-location` to `0`. * logging: nghttp2 log messages no longer appear at trace level unless `ENVOY_NGHTTP2_TRACE` is set in the environment. * router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. diff --git a/source/common/common/base_logger.cc b/source/common/common/base_logger.cc index 912c649337d6..2491ab389dc3 100644 --- a/source/common/common/base_logger.cc +++ b/source/common/common/base_logger.cc @@ -3,7 +3,7 @@ namespace Envoy { namespace Logger { -const char* Logger::DEFAULT_LOG_FORMAT = "[%Y-%m-%d %T.%e][%t][%l][%n] %v"; +const char* Logger::DEFAULT_LOG_FORMAT = "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"; Logger::Logger(std::shared_ptr logger) : logger_(logger) { logger_->set_pattern(DEFAULT_LOG_FORMAT); diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 51734175acec..fac2d8ae32c1 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -112,7 +112,7 @@ OptionsImpl::OptionsImpl(std::vector args, "", "log-format-prefix-with-location", "Prefix all occurrences of '%v' in log format with with '[%g:%#] ' ('[path/to/file.cc:99] " "').", - false, true, "bool", cmd); + false, false, "bool", cmd); TCLAP::ValueArg log_path("", "log-path", "Path to logfile", false, "", "string", cmd); TCLAP::ValueArg restart_epoch("", "restart-epoch", "hot restart epoch #", false, 0, diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index e2a52fafb781..3898ffff14e3 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -96,7 +96,7 @@ TEST_F(OptionsImplTest, All) { EXPECT_EQ(0U, options->restartEpoch()); EXPECT_EQ(spdlog::level::info, options->logLevel()); EXPECT_EQ(2, options->componentLogLevels().size()); - EXPECT_EQ("[[%g:%#] %v]", options->logFormat()); + EXPECT_EQ("[%v]", options->logFormat()); EXPECT_EQ("/foo/bar", options->logPath()); EXPECT_EQ("cluster", options->serviceClusterName()); EXPECT_EQ("node", options->serviceNodeName()); @@ -431,19 +431,19 @@ TEST_F(OptionsImplTest, LogFormatDefault) { TEST_F(OptionsImplTest, LogFormatDefaultNoPrefix) { std::unique_ptr options = createOptionsImpl({"envoy", "-c", "hello", "--log-format-prefix-with-location", "0"}); - EXPECT_EQ(options->logFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] %v"); + EXPECT_EQ(options->logFormat(), "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"); } TEST_F(OptionsImplTest, LogFormatOverride) { std::unique_ptr options = - createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v"}); + createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v", + "--log-format-prefix-with-location 1"}); EXPECT_EQ(options->logFormat(), "%%v [%g:%#] %v %t [%g:%#] %v"); } TEST_F(OptionsImplTest, LogFormatOverrideNoPrefix) { std::unique_ptr options = - createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v", - "--log-format-prefix-with-location 0"}); + createOptionsImpl({"envoy", "-c", "hello", "--log-format", "%%v %v %t %v"}); EXPECT_EQ(options->logFormat(), "%%v %v %t %v"); } From d7c7e9a79eed80afa56a0b05cf6adf0516750d14 Mon Sep 17 00:00:00 2001 From: David Raskin <66272127+davidraskin@users.noreply.github.com> Date: Fri, 31 Jul 2020 11:52:51 -0500 Subject: [PATCH 812/909] logging: add metadata access log filter (#12322) Adding a filter for access logs that will decide whether to log based on dynamic metadata. Signed-off-by: davidraskin --- api/envoy/config/accesslog/v3/BUILD | 1 + api/envoy/config/accesslog/v3/accesslog.proto | 84 +++++++++++----- api/envoy/config/accesslog/v4alpha/BUILD | 1 + .../config/accesslog/v4alpha/accesslog.proto | 84 +++++++++++----- docs/root/version_history/current.rst | 2 +- .../envoy/config/accesslog/v3/BUILD | 1 + .../envoy/config/accesslog/v3/accesslog.proto | 84 +++++++++++----- .../envoy/config/accesslog/v4alpha/BUILD | 1 + .../config/accesslog/v4alpha/accesslog.proto | 84 +++++++++++----- source/common/access_log/access_log_impl.cc | 39 ++++++++ source/common/access_log/access_log_impl.h | 22 +++++ .../common/access_log/access_log_impl_test.cc | 98 +++++++++++++++++++ 12 files changed, 396 insertions(+), 105 deletions(-) diff --git a/api/envoy/config/accesslog/v3/BUILD b/api/envoy/config/accesslog/v3/BUILD index 92e9f3949251..518ca23126cd 100644 --- a/api/envoy/config/accesslog/v3/BUILD +++ b/api/envoy/config/accesslog/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/config/accesslog/v3/accesslog.proto b/api/envoy/config/accesslog/v3/accesslog.proto index e1b5a2e58b90..e9d815aafcea 100644 --- a/api/envoy/config/accesslog/v3/accesslog.proto +++ b/api/envoy/config/accesslog/v3/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -39,8 +41,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -53,7 +55,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLogFilter"; @@ -93,6 +95,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -156,25 +161,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -203,21 +213,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -248,8 +259,8 @@ message ResponseFlagFilter { }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; @@ -277,11 +288,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.accesslog.v2.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v3.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/api/envoy/config/accesslog/v4alpha/BUILD b/api/envoy/config/accesslog/v4alpha/BUILD index 4ed75a69ea09..e426e922fa72 100644 --- a/api/envoy/config/accesslog/v4alpha/BUILD +++ b/api/envoy/config/accesslog/v4alpha/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto index 35f494ea1ac8..bd4bcd48c4b4 100644 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/api/envoy/config/accesslog/v4alpha/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -39,8 +41,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -53,7 +55,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.AccessLogFilter"; @@ -93,6 +95,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -156,25 +161,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -202,21 +212,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -247,8 +258,8 @@ message ResponseFlagFilter { }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.GrpcStatusFilter"; @@ -276,11 +287,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v4alpha.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6828e1862bec..01c1f3dd56a0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -41,7 +41,7 @@ Removed Config or Runtime New Features ------------ - +* access log: added a :ref:`dynamic metadata filter` for access logs, which filters whether to log based on matching dynamic metadata. * access log: added support for :ref:`%DOWNSTREAM_PEER_FINGERPRINT_1% ` as a response flag. * build: enable building envoy :ref:`arm64 images ` by buildx tool in x86 CI platform. * dynamic_forward_proxy: added :ref:`use_tcp_for_dns_lookups` option to use TCP for DNS lookups in order to match the DNS options for :ref:`Clusters`. diff --git a/generated_api_shadow/envoy/config/accesslog/v3/BUILD b/generated_api_shadow/envoy/config/accesslog/v3/BUILD index 92e9f3949251..518ca23126cd 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/BUILD +++ b/generated_api_shadow/envoy/config/accesslog/v3/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/route/v3:pkg", + "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto index 3307b4c57ffd..f1a8c29a4921 100644 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -35,8 +37,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -51,7 +53,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLogFilter"; @@ -91,6 +93,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -154,25 +159,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -201,21 +211,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -246,8 +257,8 @@ message ResponseFlagFilter { }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; @@ -275,11 +286,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.filter.accesslog.v2.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v3.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD index 4ed75a69ea09..e426e922fa72 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD @@ -9,6 +9,7 @@ api_proto_package( "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", + "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto index 35f494ea1ac8..bd4bcd48c4b4 100644 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto @@ -4,10 +4,12 @@ package envoy.config.accesslog.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/route/v4alpha/route_components.proto"; +import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -39,8 +41,8 @@ message AccessLog { // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: + // Custom configuration that depends on the access log being instantiated. + // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` @@ -53,7 +55,7 @@ message AccessLog { } } -// [#next-free-field: 12] +// [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.AccessLogFilter"; @@ -93,6 +95,9 @@ message AccessLogFilter { // Extension filter. ExtensionFilter extension_filter = 11; + + // Metadata Filter + MetadataFilter metadata_filter = 12; } } @@ -156,25 +161,30 @@ message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.RuntimeFilter"; - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. + // Runtime key to get an optional overridden numerator for use in the + // *percent_sampled* field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will + // randomly sample based on the runtime key value alone. + // *use_independent_randomness* can be used for logging kill switches within + // complex nested :ref:`AndFilter // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -202,21 +212,22 @@ message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.HeaderFilter"; - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found -// in the access log formatter :ref:`documentation`. +// in the access log formatter +// :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.ResponseFlagFilter"; - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { @@ -247,8 +258,8 @@ message ResponseFlagFilter { }]; } -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.GrpcStatusFilter"; @@ -276,11 +287,32 @@ message GrpcStatusFilter { // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. bool exclude = 2; } +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +message MetadataFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.accesslog.v3.MetadataFilter"; + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + type.matcher.v4alpha.MetadataMatcher matcher = 1; + + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + google.protobuf.BoolValue match_if_key_not_found = 2; +} + // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index ea3e9a1330c5..db4b8330370c 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -13,6 +13,7 @@ #include "common/common/assert.h" #include "common/common/utility.h" +#include "common/config/metadata.h" #include "common/config/utility.h" #include "common/http/header_map_impl.h" #include "common/http/header_utility.h" @@ -76,6 +77,8 @@ FilterPtr FilterFactory::fromProto(const envoy::config::accesslog::v3::AccessLog case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kGrpcStatusFilter: MessageUtil::validate(config, validation_visitor); return FilterPtr{new GrpcStatusFilter(config.grpc_status_filter())}; + case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kMetadataFilter: + return FilterPtr{new MetadataFilter(config.metadata_filter())}; case envoy::config::accesslog::v3::AccessLogFilter::FilterSpecifierCase::kExtensionFilter: MessageUtil::validate(config, validation_visitor); { @@ -255,6 +258,42 @@ Grpc::Status::GrpcStatus GrpcStatusFilter::protoToGrpcStatus( return static_cast(status); } +MetadataFilter::MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config) + : default_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_config, match_if_key_not_found, true)), + filter_(filter_config.matcher().filter()) { + + auto& matcher_config = filter_config.matcher(); + + for (const auto& seg : matcher_config.path()) { + path_.push_back(seg.key()); + } + + // Matches if the value equals the configured 'MetadataMatcher' value. + const auto& val = matcher_config.value(); + value_matcher_ = Matchers::ValueMatcher::create(val); + + // Matches if the value is present in dynamic metadata + auto present_val = envoy::type::matcher::v3::ValueMatcher(); + present_val.set_present_match(true); + present_matcher_ = Matchers::ValueMatcher::create(present_val); +} + +bool MetadataFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&) const { + const auto& value = + Envoy::Config::Metadata::metadataValue(&info.dynamicMetadata(), filter_, path_); + // If the key corresponds to a set value in dynamic metadata, return true if the value matches the + // the configured 'MetadataMatcher' value and false otherwise + if (present_matcher_->match(value)) { + return value_matcher_->match(value); + } + + // If the key does not correspond to a set value in dynamic metadata, return true if + // 'match_if_key_not_found' is set to true and false otherwise + return default_match_; +} + InstanceSharedPtr AccessLogFactory::fromProto(const envoy::config::accesslog::v3::AccessLog& config, Server::Configuration::FactoryContext& context) { FilterPtr filter; diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 657a7d069cf5..5aef64a40e2c 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -12,6 +12,7 @@ #include "envoy/server/access_log_config.h" #include "envoy/type/v3/percent.pb.h" +#include "common/common/matchers.h" #include "common/grpc/status.h" #include "common/http/header_utility.h" #include "common/protobuf/protobuf.h" @@ -228,6 +229,27 @@ class GrpcStatusFilter : public Filter { protoToGrpcStatus(envoy::config::accesslog::v3::GrpcStatusFilter::Status status) const; }; +/** + * Filters requests based on dynamic metadata + */ +class MetadataFilter : public Filter { +public: + MetadataFilter(const envoy::config::accesslog::v3::MetadataFilter& filter_config); + + bool evaluate(const StreamInfo::StreamInfo& info, const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const Http::ResponseTrailerMap& response_trailers) const override; + +private: + Matchers::ValueMatcherConstSharedPtr present_matcher_; + Matchers::ValueMatcherConstSharedPtr value_matcher_; + + std::vector path_; + + const bool default_match_; + const std::string filter_; +}; + /** * Extension filter factory that reads from ExtensionFilter proto. */ diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 09abacf4dc69..2c882010c657 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -1269,6 +1269,104 @@ name: accesslog log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); } +TEST_F(AccessLogImplTest, MetadataFilter) { + const std::string yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "a" + - key: "b" + - key: "c" + value: + bool_match: true + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + auto& fields_a = *metadata_val.mutable_fields(); + auto& struct_b = *fields_a["a"].mutable_struct_value(); + auto& fields_b = *struct_b.mutable_fields(); + auto& struct_c = *fields_b["b"].mutable_struct_value(); + auto& fields_c = *struct_c.mutable_fields(); + fields_c["c"].set_bool_value(true); + + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); + + EXPECT_CALL(*file_, write(_)).Times(1); + + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); + fields_c["c"].set_bool_value(false); + + EXPECT_CALL(*file_, write(_)).Times(0); +} + +TEST_F(AccessLogImplTest, MetadataFilterNoKey) { + const std::string default_true_yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "x" + value: + bool_match: true + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + const std::string default_false_yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + matcher: + filter: "some.namespace" + path: + - key: "y" + value: + bool_match: true + match_if_key_not_found: + value: false + +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + auto& fields_a = *metadata_val.mutable_fields(); + auto& struct_b = *fields_a["a"].mutable_struct_value(); + auto& fields_b = *struct_b.mutable_fields(); + fields_b["b"].set_bool_value(true); + + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr default_false_log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_false_yaml), context_); + EXPECT_CALL(*file_, write(_)).Times(0); + + default_false_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); + + const InstanceSharedPtr default_true_log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(default_true_yaml), context_); + EXPECT_CALL(*file_, write(_)).Times(1); + + default_true_log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + class TestHeaderFilterFactory : public ExtensionFilterFactory { public: ~TestHeaderFilterFactory() override = default; From 25d159ace4d9833595a44d0c935f999b1cec89de Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 31 Jul 2020 10:33:18 -0700 Subject: [PATCH 813/909] caching: Handling of Pragma:no-cache when Cache-Control header is missing (#12396) If a request lacks a Cache-Control header, but has a Pragma header with a no-cache directive, treat it as if it was Cache-Control: no-cache. Signed-off-by: Yosry Ahmed --- source/common/http/headers.h | 1 + .../filters/http/cache/http_cache.cc | 22 +++++++++- .../filters/http/cache/http_cache.h | 3 +- .../filters/http/cache/http_cache_test.cc | 44 +++++++++++++++++++ tools/spelling/spelling_dictionary.txt | 2 + 5 files changed, 69 insertions(+), 3 deletions(-) diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 5906f02b794c..62b0528bb38c 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -66,6 +66,7 @@ class CustomHeaderValues { const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; const LowerCaseString Origin{"origin"}; const LowerCaseString OtSpanContext{"x-ot-span-context"}; + const LowerCaseString Pragma{"pragma"}; const LowerCaseString Referer{"referer"}; const LowerCaseString Vary{"vary"}; diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index b9e17d495e5a..0a406c652f76 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -20,6 +20,8 @@ Http::RegisterCustomInlineHeader response_cache_control_handle(Http::CustomHeaders::get().CacheControl); +Http::RegisterCustomInlineHeader + pragma_handler(Http::CustomHeaders::get().Pragma); std::ostream& operator<<(std::ostream& os, CacheEntryStatus status) { switch (status) { @@ -42,8 +44,7 @@ std::ostream& operator<<(std::ostream& os, const AdjustedByteRange& range) { } LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp) - : timestamp_(timestamp), request_cache_control_(request_headers.getInlineValue( - request_cache_control_handle.handle())) { + : timestamp_(timestamp) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " @@ -56,6 +57,8 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst const Http::HeaderString& forwarded_proto = request_headers.ForwardedProto()->value(); const auto& scheme_values = Http::Headers::get().SchemeValues; ASSERT(forwarded_proto == scheme_values.Http || forwarded_proto == scheme_values.Https); + + initializeRequestCacheControl(request_headers); // TODO(toddmgreer): Let config determine whether to include forwarded_proto, host, and // query params. // TODO(toddmgreer): get cluster name. @@ -73,6 +76,21 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst size_t stableHashKey(const Key& key) { return MessageUtil::hash(key); } size_t localHashKey(const Key& key) { return stableHashKey(key); } +void LookupRequest::initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers) { + const absl::string_view cache_control = + request_headers.getInlineValue(request_cache_control_handle.handle()); + const absl::string_view pragma = request_headers.getInlineValue(pragma_handler.handle()); + + if (!cache_control.empty()) { + request_cache_control_ = RequestCacheControl(cache_control); + } else { + // According to: https://httpwg.org/specs/rfc7234.html#header.pragma + // when Cache-Control header is missing, "Pragma:no-cache" is equivalent to + // "Cache-Control:no-cache" Any other directives are ignored + request_cache_control_.must_validate_ = RequestCacheControl(pragma).must_validate_; + } +} + bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_headers) const { // TODO(yosrym93): Store parsed response cache-control in cache instead of parsing it on every // lookup diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index b29c88d8c6db..578582ec9be9 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -183,6 +183,7 @@ class LookupRequest { uint64_t content_length) const; private: + void initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers); bool requiresValidation(const Http::ResponseHeaderMap& response_headers) const; Key key_; @@ -196,7 +197,7 @@ class LookupRequest { // simpler to instead call makeLookupResult with each potential response. HeaderVector vary_headers_; - const RequestCacheControl request_cache_control_; + RequestCacheControl request_cache_control_; }; // Statically known information about a cache. diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index a4946bae7c7a..d28d75aaf39f 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -158,6 +158,50 @@ TEST_F(LookupRequestTest, NotExpiredViaFallbackheader) { EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); } +// If request Cache-Control header is missing, +// "Pragma:no-cache" is equivalent to "Cache-Control:no-cache" +// https://httpwg.org/specs/rfc7234.html#header.pragma +TEST_F(LookupRequestTest, PragmaNoCacheFallback) { + request_headers_.addCopy("pragma", "no-cache"); + const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is not expired but the request requires revalidation through Pragma: no-cache + EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) { + request_headers_.addCopy("pragma", "no-cache, custom-directive=custom-value"); + const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is not expired but the request requires revalidation through Pragma: no-cache + EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaFallbackOtherValuesIgnored) { + request_headers_.addCopy("pragma", "max-age=0"); + const LookupRequest lookup_request(request_headers_, current_time_ + std::chrono::seconds(5)); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Response is fresh, Pragma header with values other than "no-cache" is ignored + EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); +} + +TEST_F(LookupRequestTest, PragmaNoFallback) { + request_headers_.addCopy("pragma", "no-cache"); + request_headers_.addCopy("cache-control", "max-age=10"); + const LookupRequest lookup_request(request_headers_, current_time_ + std::chrono::seconds(5)); + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); + const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); + // Pragma header is ignored when Cache-Control header is present + EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); +} + TEST_F(LookupRequestTest, FullRange) { request_headers_.addCopy("Range", "0-99"); const LookupRequest lookup_request(request_headers_, current_time_); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 7f07e7158ba9..98fc8fce45b0 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -860,6 +860,7 @@ postfix postfixes postgres postgresql +pragma pre preallocate preallocating @@ -964,6 +965,7 @@ restarter resync retriable retriggers +revalidation rmdir rocketmq rewriter From c3814e566306d75fea2ab60b5ac09ed9722b0cfb Mon Sep 17 00:00:00 2001 From: antonio Date: Fri, 31 Jul 2020 13:34:50 -0400 Subject: [PATCH 814/909] buffer: Improve Buffer::OwnedImpl::linearize implementation (#12162) Change the Buffer::OwnedImpl::linearize implementation so it only moves the requested number of bytes to the a flat buffer slice at the beginning of the buffer. Linearizing beyond the requested size can result in the excess bytes needing to be copied again as part of the next linearize call if the buffer ends up with more than 1 buffer slice when the linearize operation completes. Typical usage of linearize involves flattening the first 16kb in a buffer in a loop to construct inputs appropriate to SSL_write; the original implementation ended up copying an extra 4032 bytes when linearizing a 16KB block. Signed-off-by: Antonio Vicente --- source/common/buffer/buffer_impl.cc | 40 ++++++++++----------------- source/common/buffer/buffer_impl.h | 1 + test/common/buffer/owned_impl_test.cc | 12 ++++---- 3 files changed, 21 insertions(+), 32 deletions(-) diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 7503104ea426..0ad095135e57 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -148,7 +148,9 @@ void OwnedImpl::copyOut(size_t start, uint64_t size, void* data) const { ASSERT(size == 0); } -void OwnedImpl::drain(uint64_t size) { +void OwnedImpl::drain(uint64_t size) { drainImpl(size); } + +void OwnedImpl::drainImpl(uint64_t size) { while (size != 0) { if (slices_.empty()) { break; @@ -218,34 +220,20 @@ void* OwnedImpl::linearize(uint32_t size) { if (slices_.empty()) { return nullptr; } - uint64_t linearized_size = 0; - uint64_t num_slices_to_linearize = 0; - for (const auto& slice : slices_) { - num_slices_to_linearize++; - linearized_size += slice->dataSize(); - if (linearized_size >= size) { - break; - } - } - if (num_slices_to_linearize > 1) { - auto new_slice = OwnedSlice::create(linearized_size); - uint64_t bytes_copied = 0; - Slice::Reservation reservation = new_slice->reserve(linearized_size); + if (slices_[0]->dataSize() < size) { + auto new_slice = OwnedSlice::create(size); + Slice::Reservation reservation = new_slice->reserve(size); ASSERT(reservation.mem_ != nullptr); - ASSERT(reservation.len_ == linearized_size); - auto dest = static_cast(reservation.mem_); - do { - uint64_t data_size = slices_.front()->dataSize(); - if (data_size > 0) { - memcpy(dest, slices_.front()->data(), data_size); - bytes_copied += data_size; - dest += data_size; - } - slices_.pop_front(); - } while (bytes_copied < linearized_size); - ASSERT(dest == static_cast(reservation.mem_) + linearized_size); + ASSERT(reservation.len_ == size); + copyOut(0, size, reservation.mem_); new_slice->commit(reservation); + + // Replace the first 'size' bytes in the buffer with the new slice. Since new_slice re-adds the + // drained bytes, avoid use of the overridable 'drain' method to avoid incorrectly checking if + // we dipped below low-watermark. + drainImpl(size); slices_.emplace_front(std::move(new_slice)); + length_ += size; } return slices_.front()->data(); } diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index cc1981eb459b..05e673d6b2ae 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -582,6 +582,7 @@ class OwnedImpl : public LibEventInstance { bool isSameBufferImpl(const Instance& rhs) const; void addImpl(const void* data, uint64_t size); + void drainImpl(uint64_t size); /** * Moves contents of the `other_slice` by either taking its ownership or coalescing it diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index d22b5c072c76..42246acb357a 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -595,22 +595,22 @@ TEST_F(OwnedImplTest, LinearizeDrainTracking) { testing::MockFunction drain_tracker; testing::MockFunction done_tracker; EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384)); EXPECT_CALL(release_callback_tracker, Call(_, _, _)); EXPECT_CALL(tracker2, Call()); - EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384)); EXPECT_CALL(release_callback_tracker2, Call(_, _, _)); EXPECT_CALL(tracker3, Call()); - EXPECT_CALL(tracker4, Call()); EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384)); EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384)); + EXPECT_CALL(tracker4, Call()); EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384)); EXPECT_CALL(tracker5, Call()); EXPECT_CALL(drain_tracker, Call(4616, 4616)); EXPECT_CALL(done_tracker, Call()); - for (auto& expected_first_slice : std::vector>{{16584, 3832, 20416}, - {32904, 3896, 36800}, - {16520, 3896, 36800}, - {20296, 120, 20416}, + for (auto& expected_first_slice : std::vector>{{16384, 4032, 20416}, + {16384, 4032, 20416}, + {16520, 0, 32704}, + {16384, 4032, 20416}, {4616, 3512, 8128}}) { const uint32_t write_size = std::min(LinearizeSize, buffer.length()); buffer.linearize(write_size); From 153d22597bf166cb4238cf532da3c74584734cea Mon Sep 17 00:00:00 2001 From: asraa Date: Fri, 31 Jul 2020 13:39:24 -0400 Subject: [PATCH 815/909] [http] bugfix where error details are overridden (#12353) nghttp2 will continue to do some frame processing on the same stream even if an error is detected that should trigger a stream close. This causes setDetails to be called twice, and override the error details in release mode (crash in debug mode) Signed-off-by: Asra Ali --- source/common/http/http2/codec_impl.h | 16 ++- source/common/http/http2/codec_impl_legacy.h | 16 ++- test/common/http/http2/BUILD | 1 + test/common/http/http2/http2_frame.cc | 8 ++ test/common/http/http2/http2_frame.h | 4 + .../response_header_corpus/set_details_twice | Bin 0 -> 147 bytes test/integration/http2_integration_test.cc | 112 ++++++++++++------ test/integration/http2_integration_test.h | 26 ++-- 8 files changed, 128 insertions(+), 55 deletions(-) create mode 100644 test/common/http/http2/response_header_corpus/set_details_twice diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index ee3b71253502..8de01c67728b 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -226,11 +226,17 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable(data_[3]); } ResponseStatus responseStatus() const; @@ -155,6 +156,9 @@ class Http2Frame { // header. void appendHpackInt(uint64_t value, unsigned char prefix_mask); void appendData(absl::string_view data) { data_.insert(data_.end(), data.begin(), data.end()); } + void appendData(std::vector data) { + data_.insert(data_.end(), data.begin(), data.end()); + } // Headers are directly encoded void appendStaticHeader(StaticHeaderIndex index); diff --git a/test/common/http/http2/response_header_corpus/set_details_twice b/test/common/http/http2/response_header_corpus/set_details_twice new file mode 100644 index 0000000000000000000000000000000000000000..52aba4d72c31b36a501b59a3acf091b5381e6d3d GIT binary patch literal 147 lcmZQzsAXhfU|?YE+`M`HfPe)+z`)4J3UmYmV+X^6Q~;WbS26$q literal 0 HcmV?d00001 diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index a2a35d244103..e99cd9eebbfd 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -1494,36 +1494,32 @@ TEST_P(Http2RingHashIntegrationTest, CookieRoutingWithCookieWithTtlSet) { EXPECT_EQ(served_by.size(), 1); } -namespace { -const int64_t TransmitThreshold = 100 * 1024 * 1024; -} // namespace +void Http2FrameIntegrationTest::startHttp2Session() { + ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); -void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { - // nghttp2 library has its own internal mitigation for outbound control frames (see - // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified - // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when - // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal - // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's - // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or - // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the - // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). - // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + // Send empty initial SETTINGS frame. + auto settings = Http2Frame::makeEmptySettingsFrame(); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); - listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); - }); + // Read initial SETTINGS frame from the server. + readFrame(); + + // Send an SETTINGS ACK. + settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); + ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + + // read pending SETTINGS and WINDOW_UPDATE frames + readFrame(); + readFrame(); } -void Http2FloodMitigationTest::beginSession() { +void Http2FrameIntegrationTest::beginSession() { setDownstreamProtocol(Http::CodecClient::Type::HTTP2); setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // set lower outbound frame limits to make tests run faster config_helper_.setOutboundFramesLimits(1000, 100); initialize(); - // Set up a raw connection to easily send requests without reading responses. Also, set a small - // TCP receive buffer to speed up connection backup. + // Set up a raw connection to easily send requests without reading responses. auto options = std::make_shared(); options->emplace_back(std::make_shared( envoy::config::core::v3::SocketOption::STATE_PREBIND, @@ -1532,7 +1528,7 @@ void Http2FloodMitigationTest::beginSession() { startHttp2Session(); } -Http2Frame Http2FloodMitigationTest::readFrame() { +Http2Frame Http2FrameIntegrationTest::readFrame() { Http2Frame frame; EXPECT_TRUE(tcp_client_->waitForData(frame.HeaderSize)); frame.setHeader(tcp_client_->data()); @@ -1546,28 +1542,72 @@ Http2Frame Http2FloodMitigationTest::readFrame() { return frame; } -void Http2FloodMitigationTest::sendFrame(const Http2Frame& frame) { +void Http2FrameIntegrationTest::sendFrame(const Http2Frame& frame) { ASSERT_TRUE(tcp_client_->connected()); ASSERT_TRUE(tcp_client_->write(std::string(frame), false, false)); } -void Http2FloodMitigationTest::startHttp2Session() { - ASSERT_TRUE(tcp_client_->write(Http2Frame::Preamble, false, false)); +// Regression test. +TEST_P(Http2FrameIntegrationTest, SetDetailsTwice) { + autonomous_upstream_ = true; + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + beginSession(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); - // Send empty initial SETTINGS frame. - auto settings = Http2Frame::makeEmptySettingsFrame(); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); + // Send two concatenated frames, the first with too many headers, and the second an invalid frame + // (push_promise) + std::string bad_frame = + "00006d0104000000014083a8749783ee3a3fbebebebebebebebebebebebebebebebebebebebebebebebebebebebe" + "bebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebe" + "bebebebebebebebebebebebebebebebebebebebebebebebebebe0001010500000000018800a065"; + Http2Frame request = Http2Frame::makeGenericFrameFromHexDump(bad_frame); + sendFrame(request); + tcp_client_->close(); - // Read initial SETTINGS frame from the server. - readFrame(); + // Expect that the details for the first frame are kept. + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("too_many_headers")); +} - // Send an SETTINGS ACK. - settings = Http2Frame::makeEmptySettingsFrame(Http2Frame::SettingsFlags::Ack); - ASSERT_TRUE(tcp_client_->write(std::string(settings), false, false)); +INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); - // read pending SETTINGS and WINDOW_UPDATE frames - readFrame(); - readFrame(); +namespace { +const int64_t TransmitThreshold = 100 * 1024 * 1024; +} // namespace + +void Http2FloodMitigationTest::setNetworkConnectionBufferSize() { + // nghttp2 library has its own internal mitigation for outbound control frames (see + // NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM). The default nghttp2 mitigation threshold of 1K is modified + // to 10K in the ConnectionImpl::Http2Options::Http2Options. The mitigation is triggered when + // there are more than 10000 PING or SETTINGS frames with ACK flag in the nghttp2 internal + // outbound queue. It is possible to trigger this mitigation in nghttp2 before triggering Envoy's + // own flood mitigation. This can happen when a buffer large enough to contain over 10K PING or + // SETTINGS frames is dispatched to the nghttp2 library. To prevent this from happening the + // network connection receive buffer needs to be smaller than 90Kb (which is 10K SETTINGS frames). + // Set it to the arbitrarily chosen value of 32K. Note that this buffer has 16K lower bound. + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->listeners_size() >= 1, ""); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + + listener->mutable_per_connection_buffer_limit_bytes()->set_value(32 * 1024); + }); +} + +void Http2FloodMitigationTest::beginSession() { + setDownstreamProtocol(Http::CodecClient::Type::HTTP2); + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // set lower outbound frame limits to make tests run faster + config_helper_.setOutboundFramesLimits(1000, 100); + initialize(); + // Set up a raw connection to easily send requests without reading responses. Also, set a small + // TCP receive buffer to speed up connection backup. + auto options = std::make_shared(); + options->emplace_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 1024)); + tcp_client_ = makeTcpConnection(lookupPort("http"), options); + startHttp2Session(); } // Verify that the server detects the flood of the given frame. diff --git a/test/integration/http2_integration_test.h b/test/integration/http2_integration_test.h index 11c0477b4c61..bcafbf0c7866 100644 --- a/test/integration/http2_integration_test.h +++ b/test/integration/http2_integration_test.h @@ -67,25 +67,33 @@ class Http2MetadataIntegrationTest : public Http2IntegrationTest { void runHeaderOnlyTest(bool send_request_body, size_t body_size); }; -class Http2FloodMitigationTest : public testing::TestWithParam, - public HttpIntegrationTest { +class Http2FrameIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { public: - Http2FloodMitigationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) { + Http2FrameIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, GetParam()) {} + +protected: + void startHttp2Session(); + Http2Frame readFrame(); + void sendFrame(const Http2Frame& frame); + virtual void beginSession(); + + IntegrationTcpClientPtr tcp_client_; +}; + +class Http2FloodMitigationTest : public Http2FrameIntegrationTest { +public: + Http2FloodMitigationTest() { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); } protected: - void startHttp2Session(); void floodServer(const Http2Frame& frame, const std::string& flood_stat); void floodServer(absl::string_view host, absl::string_view path, Http2Frame::ResponseStatus expected_http_status, const std::string& flood_stat); - Http2Frame readFrame(); - void sendFrame(const Http2Frame& frame); void setNetworkConnectionBufferSize(); - void beginSession(); - - IntegrationTcpClientPtr tcp_client_; + void beginSession() override; }; } // namespace Envoy From 7390addb3b9b31c793a1048b95d7d5c2ba138919 Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Fri, 31 Jul 2020 13:28:32 -0500 Subject: [PATCH 816/909] [fuzz]improved the filtername handle in network_readfilter_fuzz_test (#12399) The previous version works fine in the current version of Envoy. But if a customized version Envoy (without some network-level filters) run this fuzz test, an error may occur because they can't be found in factory. Improved the handle for filter names by getting the intersection of supported_filter_names and filter names in factory. The intersection will only be calculated once since it is static. Signed-off-by: jianwen --- .../common/fuzz/uber_per_readfilter.cc | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc index 7507dd72d4e3..d39dc65e7485 100644 --- a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -20,14 +20,27 @@ std::vector UberFilterFuzzer::filterNames() { // Will extend to cover other network filters one by one. static std::vector filter_names; if (filter_names.empty()) { - filter_names = {NetworkFilterNames::get().ExtAuthorization, - NetworkFilterNames::get().LocalRateLimit, - NetworkFilterNames::get().RedisProxy, - NetworkFilterNames::get().ClientSslAuth, - NetworkFilterNames::get().Echo, - NetworkFilterNames::get().DirectResponse, - NetworkFilterNames::get().DubboProxy, - NetworkFilterNames::get().SniCluster}; + const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + const std::vector supported_filter_names = { + NetworkFilterNames::get().ExtAuthorization, + NetworkFilterNames::get().LocalRateLimit, + NetworkFilterNames::get().RedisProxy, + NetworkFilterNames::get().ClientSslAuth, + NetworkFilterNames::get().Echo, + NetworkFilterNames::get().DirectResponse, + NetworkFilterNames::get().DubboProxy, + NetworkFilterNames::get().SniCluster}; + // Check whether each filter is loaded into Envoy. + // Some customers build Envoy without some filters. When they run fuzzing, the use of a filter + // that does not exist will cause fatal errors. + for (auto& filter_name : supported_filter_names) { + if (factories.contains(filter_name)) { + filter_names.push_back(filter_name); + } else { + ENVOY_LOG_MISC(debug, "Filter name not found in the factory: {}", filter_name); + } + } } return filter_names; } From ae0c311b8903e720afcc0a001c34cbbe35cd5153 Mon Sep 17 00:00:00 2001 From: ankatare Date: Sat, 1 Aug 2020 03:20:18 +0530 Subject: [PATCH 817/909] V2 TO V3 fragment changes for extension and server directory under test/.. (#12058) Commit Message: v2 to v3 fragment changes for extension and server directory test cases. Risk Level: Low Testing: unit and format Fixes #10843 Signed-off-by: Abhay Narayan Katare --- .../clusters/redis/redis_cluster_test.cc | 11 +- .../filters/http/router/config_test.cc | 8 +- .../network/dubbo_proxy/config_test.cc | 12 +- .../network/ratelimit/ratelimit_test.cc | 2 +- .../network/rocketmq_proxy/config_test.cc | 9 +- .../filters/network/tcp_proxy/config_test.cc | 2 +- .../filters/ratelimit/config_test.cc | 6 +- .../filters/ratelimit/ratelimit_test.cc | 38 +- .../thrift_proxy/route_matcher_test.cc | 44 +- .../thrift_proxy/router_ratelimit_test.cc | 4 +- .../network/zookeeper_proxy/config_test.cc | 2 +- .../tls/context_impl_test.cc | 22 +- .../api_listener_integration_test.cc | 6 +- test/integration/echo_integration_test.cc | 2 +- test/server/api_listener_test.cc | 6 +- .../listener_manager_impl_quic_only_test.cc | 2 +- test/server/listener_manager_impl_test.cc | 375 +++++++++--------- test/server/utility.h | 5 +- 18 files changed, 284 insertions(+), 272 deletions(-) diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 284dab85ea05..42a77b8445bd 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -90,10 +90,11 @@ class RedisClusterTest : public testing::Test, return addresses; } - void setupFromV3Yaml(const std::string& yaml) { + void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { expectRedisSessionCreated(); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); + envoy::config::cluster::v3::Cluster cluster_config = + Upstream::parseClusterFromV3Yaml(yaml, avoid_boosting); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -121,7 +122,7 @@ class RedisClusterTest : public testing::Test, }); } - void setupFactoryFromV2Yaml(const std::string& yaml) { + void setupFactoryFromV3Yaml(const std::string& yaml) { NiceMock cm; envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( @@ -793,12 +794,12 @@ TEST_F(RedisClusterTest, FactoryInitNotRedisClusterTypeFailure) { cluster_refresh_timeout: 0.25s )EOF"; - EXPECT_THROW_WITH_MESSAGE(setupFactoryFromV2Yaml(basic_yaml_hosts), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(setupFactoryFromV3Yaml(basic_yaml_hosts), EnvoyException, "Redis cluster can only created with redis cluster type."); } TEST_F(RedisClusterTest, FactoryInitRedisClusterTypeSuccess) { - setupFactoryFromV2Yaml(BasicConfig); + setupFactoryFromV3Yaml(BasicConfig); } TEST_F(RedisClusterTest, RedisErrorResponse) { diff --git a/test/extensions/filters/http/router/config_test.cc b/test/extensions/filters/http/router/config_test.cc index 34aa098a76ec..1808920fd610 100644 --- a/test/extensions/filters/http/router/config_test.cc +++ b/test/extensions/filters/http/router/config_test.cc @@ -27,7 +27,7 @@ TEST(RouterFilterConfigTest, SimpleRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - TestUtility::loadFromYaml(yaml_string, proto_config); + TestUtility::loadFromYaml(yaml_string, proto_config, false, true); NiceMock context; RouterFilterConfig factory; Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats.", context); @@ -43,8 +43,8 @@ TEST(RouterFilterConfigTest, BadRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException, - "route: Cannot find field"); + EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config, false, true), + EnvoyException, "route: Cannot find field"); } TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { @@ -54,7 +54,7 @@ TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { )EOF"; envoy::extensions::filters::http::router::v3::Router router_config; - TestUtility::loadFromYaml(yaml, router_config); + TestUtility::loadFromYaml(yaml, router_config, false, true); NiceMock context; RouterFilterConfig factory; diff --git a/test/extensions/filters/network/dubbo_proxy/config_test.cc b/test/extensions/filters/network/dubbo_proxy/config_test.cc index c72283f56a3d..bdf4b37204d0 100644 --- a/test/extensions/filters/network/dubbo_proxy/config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/config_test.cc @@ -22,9 +22,9 @@ using DubboProxyProto = envoy::extensions::filters::network::dubbo_proxy::v3::Du namespace { -DubboProxyProto parseDubboProxyFromV2Yaml(const std::string& yaml) { +DubboProxyProto parseDubboProxyFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { DubboProxyProto dubbo_proxy; - TestUtility::loadFromYaml(yaml, dubbo_proxy); + TestUtility::loadFromYaml(yaml, dubbo_proxy, false, avoid_boosting); return dubbo_proxy; } @@ -92,7 +92,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithExplicitRouterConfig) { - name: envoy.filters.dubbo.router )EOF"; - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); testConfig(config); } @@ -107,7 +107,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithUnknownFilter) { - name: envoy.filters.dubbo.router )EOF"; - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); EXPECT_THROW_WITH_REGEX(factory_.createFilterFactoryFromProto(config, context_), EnvoyException, "no_such_filter"); @@ -131,7 +131,7 @@ TEST_F(DubboFilterConfigTest, DubboProxyWithMultipleFilters) { DubboFilters::MockFilterConfigFactory factory; Registry::InjectFactory registry(factory); - DubboProxyProto config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto config = parseDubboProxyFromV3Yaml(yaml); testConfig(config); EXPECT_EQ(1, factory.config_struct_.fields_size()); @@ -156,7 +156,7 @@ TEST_F(DubboFilterConfigTest, CreateFilterChain) { DubboFilters::MockFilterConfigFactory factory; Registry::InjectFactory registry(factory); - DubboProxyProto dubbo_config = parseDubboProxyFromV2Yaml(yaml); + DubboProxyProto dubbo_config = parseDubboProxyFromV3Yaml(yaml); NiceMock context; DubboFilters::MockFilterChainFactoryCallbacks callbacks; diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index e77f6080cba8..96bc8baf679d 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -40,7 +40,7 @@ class RateLimitFilterTest : public testing::Test { .WillByDefault(Return(true)); envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config); + TestUtility::loadFromYaml(yaml, proto_config, false, true); config_ = std::make_shared(proto_config, stats_store_, runtime_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); diff --git a/test/extensions/filters/network/rocketmq_proxy/config_test.cc b/test/extensions/filters/network/rocketmq_proxy/config_test.cc index aa56bbe0a29c..3030522c9eb7 100644 --- a/test/extensions/filters/network/rocketmq_proxy/config_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/config_test.cc @@ -22,9 +22,10 @@ namespace RocketmqProxy { using RocketmqProxyProto = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; -RocketmqProxyProto parseRocketmqProxyFromV2Yaml(const std::string& yaml) { +RocketmqProxyProto parseRocketmqProxyFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { RocketmqProxyProto rocketmq_proxy; - TestUtility::loadFromYaml(yaml, rocketmq_proxy); + TestUtility::loadFromYaml(yaml, rocketmq_proxy, false, avoid_boosting); return rocketmq_proxy; } @@ -86,7 +87,7 @@ TEST_F(RocketmqFilterConfigTest, RocketmqProxyWithFullConfig) { transient_object_life_span: seconds: 30 )EOF"; - RocketmqProxyProto config = parseRocketmqProxyFromV2Yaml(yaml); + RocketmqProxyProto config = parseRocketmqProxyFromV3Yaml(yaml); testConfig(config); } @@ -168,4 +169,4 @@ TEST_F(RocketmqFilterConfigTest, ProxyAddressWithNonIpType) { } // namespace RocketmqProxy } // namespace NetworkFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index 3f2296dfc5d7..ff74cf1cb0f8 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -91,7 +91,7 @@ TEST_P(RouteIpListConfigTest, DEPRECATED_FEATURE_TEST(TcpProxy)) { )EOF"; envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy proto_config; - TestUtility::loadFromJson(json_string, proto_config, true); + TestUtility::loadFromJson(json_string, proto_config, true, false); NiceMock context; ConfigFactory factory; diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc index 98d1e8eebef6..98bcf3d482db 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc @@ -19,9 +19,9 @@ namespace RateLimitFilter { namespace { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit -parseRateLimitFromV2Yaml(const std::string& yaml) { +parseRateLimitFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml, rate_limit); + TestUtility::loadFromYaml(yaml, rate_limit, false, avoid_boosting); return rate_limit; } @@ -46,7 +46,7 @@ timeout: "1.337s" cluster_name: ratelimit_cluster )EOF"; - auto proto_config = parseRateLimitFromV2Yaml(yaml_string); + auto proto_config = parseRateLimitFromV3Yaml(yaml_string); NiceMock context; diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index 292fad5500a6..25659e77b6ef 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -50,10 +50,10 @@ class ThriftRateLimitFilterTest : public testing::Test { .WillByDefault(Return(true)); } - void SetUpTest(const std::string& yaml) { + void setupTest(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config); + TestUtility::loadFromYaml(yaml, proto_config, false, true); config_ = std::make_shared(proto_config, local_info_, stats_store_, runtime_, cm_); @@ -94,7 +94,7 @@ class ThriftRateLimitFilterTest : public testing::Test { }; TEST_F(ThriftRateLimitFilterTest, NoRoute) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); @@ -165,7 +165,7 @@ TEST_F(ThriftRateLimitFilterTest, NoRoute) { } TEST_F(ThriftRateLimitFilterTest, NoCluster) { - SetUpTest(filter_config_); + setupTest(filter_config_); ON_CALL(cm_, get(_)).WillByDefault(Return(nullptr)); @@ -173,7 +173,7 @@ TEST_F(ThriftRateLimitFilterTest, NoCluster) { } TEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) { - SetUpTest(filter_config_); + setupTest(filter_config_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); @@ -182,7 +182,7 @@ TEST_F(ThriftRateLimitFilterTest, NoApplicableRateLimit) { } TEST_F(ThriftRateLimitFilterTest, NoDescriptor) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)).Times(1); EXPECT_CALL(*client_, limit(_, _, _, _)).Times(0); @@ -191,7 +191,7 @@ TEST_F(ThriftRateLimitFilterTest, NoDescriptor) { } TEST_F(ThriftRateLimitFilterTest, RuntimeDisabled) { - SetUpTest(filter_config_); + setupTest(filter_config_); EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.thrift_filter_enabled", 100)) .WillOnce(Return(false)); @@ -200,7 +200,7 @@ TEST_F(ThriftRateLimitFilterTest, RuntimeDisabled) { } TEST_F(ThriftRateLimitFilterTest, OkResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) @@ -233,7 +233,7 @@ TEST_F(ThriftRateLimitFilterTest, OkResponse) { } TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -256,7 +256,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { } TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -283,7 +283,7 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { } TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -313,7 +313,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { } TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { - SetUpTest(fail_close_config_); + setupTest(fail_close_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -345,7 +345,7 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { } TEST_F(ThriftRateLimitFilterTest, LimitResponse) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -376,7 +376,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponse) { } TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -409,7 +409,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { } TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -434,7 +434,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { } TEST_F(ThriftRateLimitFilterTest, ResetDuringCall) { - SetUpTest(filter_config_); + setupTest(filter_config_); InSequence s; EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _)) @@ -453,7 +453,7 @@ TEST_F(ThriftRateLimitFilterTest, ResetDuringCall) { TEST_F(ThriftRateLimitFilterTest, RouteRateLimitDisabledForRouteKey) { route_rate_limit_.disable_key_ = "test_key"; - SetUpTest(filter_config_); + setupTest(filter_config_); ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.test_key.thrift_filter_enabled", 100)) .WillByDefault(Return(false)); @@ -472,7 +472,7 @@ TEST_F(ThriftRateLimitFilterTest, ConfigValueTest) { } )EOF"; - SetUpTest(stage_filter_config); + setupTest(stage_filter_config); EXPECT_EQ(5UL, config_->stage()); EXPECT_EQ("foo", config_->domain()); @@ -485,7 +485,7 @@ TEST_F(ThriftRateLimitFilterTest, DefaultConfigValueTest) { } )EOF"; - SetUpTest(stage_filter_config); + setupTest(stage_filter_config); EXPECT_EQ(0UL, config_->stage()); EXPECT_EQ("foo", config_->domain()); diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index 0e89c355ae26..079f646d3a91 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -21,9 +21,9 @@ namespace Router { namespace { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration -parseRouteConfigurationFromV2Yaml(const std::string& yaml) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config); + TestUtility::loadFromYaml(yaml, route_config, false, avoid_boosting); TestUtility::validate(route_config); return route_config; } @@ -43,7 +43,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -80,7 +80,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -123,7 +123,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); @@ -160,7 +160,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(new RouteMatcher(config), EnvoyException); } @@ -180,7 +180,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -217,7 +217,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -260,7 +260,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); @@ -297,7 +297,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(new RouteMatcher(config), EnvoyException); } @@ -316,7 +316,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -349,7 +349,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -387,7 +387,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -423,7 +423,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -460,7 +460,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -496,7 +496,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -534,7 +534,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -590,7 +590,7 @@ name: config )EOF"; envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -634,7 +634,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); EXPECT_THROW(RouteMatcher m(config), EnvoyException); } @@ -658,7 +658,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; @@ -730,7 +730,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; metadata.setMethodName("method1"); @@ -819,7 +819,7 @@ name: config )EOF"; const envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration config = - parseRouteConfigurationFromV2Yaml(yaml); + parseRouteConfigurationFromV3Yaml(yaml); RouteMatcher matcher(config); MessageMetadata metadata; metadata.setMethodName("method1"); diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 4460b8c11c6e..0813d5859026 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -31,9 +31,9 @@ namespace { class ThriftRateLimitConfigurationTest : public testing::Test { public: - void initialize(const std::string& yaml) { + void initialize(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; - TestUtility::loadFromYaml(yaml, config); + TestUtility::loadFromYaml(yaml, config, false, avoid_boosting); initialize(config); } diff --git a/test/extensions/filters/network/zookeeper_proxy/config_test.cc b/test/extensions/filters/network/zookeeper_proxy/config_test.cc index 34dc5ca0a651..b133a8f5075b 100644 --- a/test/extensions/filters/network/zookeeper_proxy/config_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/config_test.cc @@ -49,7 +49,7 @@ stat_prefix: test_prefix )EOF"; ZooKeeperProxyProtoConfig proto_config; - TestUtility::loadFromYamlAndValidate(yaml, proto_config); + TestUtility::loadFromYamlAndValidate(yaml, proto_config, false, true); testing::NiceMock context; ZooKeeperConfigFactory factory; diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 053f6468796a..60cec6e1fe17 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -591,9 +591,10 @@ class SslServerContextImplTicketTest : public SslContextImplTest { loadConfig(server_context_config); } - void loadConfigYaml(const std::string& yaml) { + void loadConfigYaml(const std::string& yaml, bool avoid_boosting = true) { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context, false, + avoid_boosting); ServerContextConfigImpl cfg(tls_context, factory_context_); loadConfig(cfg); } @@ -808,14 +809,15 @@ TEST_F(SslServerContextImplTicketTest, CRLWithNoCA) { TEST_F(SslServerContextImplTicketTest, VerifySanWithNoCA) { const std::string yaml = R"EOF( - common_tls_context: - tls_certificates: - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" - validation_context: - verify_subject_alt_name: "spiffe://lyft.com/testclient" + common_tls_context: + tls_certificates: + certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem" + validation_context: + match_subject_alt_names: + exact : "spiffe://lyft.com/testclient" )EOF"; EXPECT_THROW_WITH_MESSAGE(loadConfigYaml(yaml), EnvoyException, "SAN-based verification of peer certificates without trusted CA " diff --git a/test/integration/api_listener_integration_test.cc b/test/integration/api_listener_integration_test.cc index 0005e9a83f56..e4a206a41a15 100644 --- a/test/integration/api_listener_integration_test.cc +++ b/test/integration/api_listener_integration_test.cc @@ -24,8 +24,12 @@ class ApiListenerIntegrationTest : public BaseIntegrationTest, void SetUp() override { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // currently ApiListener does not trigger this wait + // https://github.com/envoyproxy/envoy/blob/0b92c58d08d28ba7ef0ed5aaf44f90f0fccc5dce/test/integration/integration.cc#L454 + // Thus, the ApiListener has to be added in addition to the already existing listener in the + // config. bootstrap.mutable_static_resources()->mutable_listeners(0)->MergeFrom( - Server::parseListenerFromV2Yaml(apiListenerConfig())); + Server::parseListenerFromV3Yaml(apiListenerConfig())); }); } diff --git a/test/integration/echo_integration_test.cc b/test/integration/echo_integration_test.cc index 001247c3c563..b965d3254d1e 100644 --- a/test/integration/echo_integration_test.cc +++ b/test/integration/echo_integration_test.cc @@ -67,7 +67,7 @@ name: new_listener [&listener_added_by_worker]() -> void { listener_added_by_worker.setReady(); }); test_server_->server().dispatcher().post([this, json, &listener_added_by_manager]() -> void { EXPECT_TRUE(test_server_->server().listenerManager().addOrUpdateListener( - Server::parseListenerFromV2Yaml(json), "", true)); + Server::parseListenerFromV3Yaml(json), "", true)); listener_added_by_manager.setReady(); }); listener_added_by_worker.waitReady(); diff --git a/test/server/api_listener_test.cc b/test/server/api_listener_test.cc index aec3e64d2bf7..ff9fa0d02fd0 100644 --- a/test/server/api_listener_test.cc +++ b/test/server/api_listener_test.cc @@ -55,7 +55,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name()); @@ -81,7 +81,7 @@ name: test_api_listener path: eds path )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); EXPECT_THROW_WITH_MESSAGE( HttpApiListener(config, *listener_manager_, config.name()), EnvoyException, @@ -115,7 +115,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - const envoy::config::listener::v3::Listener config = parseListenerFromV2Yaml(yaml); + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name()); diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index a4465dcdb937..b19f2458a6d7 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -47,7 +47,7 @@ reuse_port: true )EOF", Network::Address::IpVersion::v4); - envoy::config::listener::v3::Listener listener_proto = parseListenerFromV2Yaml(yaml); + envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); EXPECT_CALL(server_.random_, uuid()); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, #ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 22dfc81d3bc3..0afea3df6b0f 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -55,7 +55,7 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { * Create an IPv4 listener with a given name. */ envoy::config::listener::v3::Listener createIPv4Listener(const std::string& name) { - envoy::config::listener::v3::Listener listener = parseListenerFromV2Yaml(R"EOF( + envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( address: socket_address: { address: 127.0.0.1, port_value: 1111 } filter_chains: @@ -165,7 +165,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); EXPECT_EQ(std::chrono::milliseconds(15000), manager_->listeners().front().get().listenerFiltersTimeout()); @@ -182,7 +182,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DefaultListenerPerConnectionBuffe )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1024 * 1024U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -198,7 +198,7 @@ per_connection_buffer_limit_bytes: 8192 )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(8192U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -230,7 +230,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) { Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -267,7 +267,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -283,24 +283,27 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DEPRECATED_FEATURE_TEST(TlsContex port_value: 1234 filter_chains: - filters: [] - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - verify_subject_alt_name: - - localhost - - 127.0.0.1 + transport_socket: + name: tls + typed_config: + "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + exact: localhost + exact: 127.0.0.1 )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); @@ -359,7 +362,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig) { test: a )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "test: Cannot find field"); } @@ -371,7 +374,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfigNoFilterChains) port_value: 1234 )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "no filter chains specified"); } @@ -387,7 +390,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadListenerConfig2UDPListenerFilt - name: envoy.filters.listener.original_dst )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Only 1 UDP listener filter per listener supported"); } @@ -404,7 +407,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { typed_config: {} )EOF"; - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "foo: Cannot find field"); } class NonTerminalFilterFactory : public Configuration::NamedNetworkFilterConfigFactory { @@ -437,11 +440,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TerminalNotLast) { filter_chains: - filters: - name: non_terminal - config: {} + typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Error: non-terminal filter named non_terminal of type non_terminal is the last " "filter in a network filter chain."); } @@ -455,13 +458,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, NotTerminalLast) { filter_chains: - filters: - name: envoy.filters.network.tcp_proxy - config: {} + typed_config: {} - name: unknown_but_will_not_be_processed - config: {} + typed_config: {} )EOF"; EXPECT_THROW_WITH_REGEX( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Error: terminal filter named envoy.filters.network.tcp_proxy of type " "envoy.filters.network.tcp_proxy must be the last filter in a network filter chain."); } @@ -475,10 +478,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterName) { filter_chains: - filters: - name: invalid - config: {} + typed_config: {} )EOF"; - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Didn't find a registered implementation for name: 'invalid'"); } @@ -522,11 +525,11 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, StatsScopeTest) { filter_chains: - filters: - name: stats_test - config: {} + typed_config: {} )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); EXPECT_EQ(1UL, server_.stats_store_.counterFromString("bar").value()); @@ -544,7 +547,7 @@ TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) { )EOF"; EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); EXPECT_EQ(std::chrono::milliseconds(), manager_->listeners().front().get().listenerFiltersTimeout()); } @@ -565,7 +568,7 @@ TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) { ListenerHandle* listener_foo = expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); @@ -589,7 +592,7 @@ drain_type: default ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener, but with a different address. Should throw. @@ -608,7 +611,7 @@ drain_type: modify_only expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); EXPECT_CALL(*listener_foo_different_address, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_different_address_yaml), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_different_address_yaml), "", true), EnvoyException, "error updating listener: 'foo' has a different address " @@ -644,7 +647,7 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -676,7 +679,7 @@ drain_type: default EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo, onDestroy()); } @@ -700,12 +703,12 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, false); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); checkConfigDump(R"EOF( static_listeners: listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: "foo" address: socket_address: @@ -727,11 +730,11 @@ name: foo filter_chains: - filters: - name: fake - config: {} + typed_config: {} )EOF"; EXPECT_FALSE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", false)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", false)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo listener. Should be blocked. @@ -781,7 +784,7 @@ filter_chains: {} ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(server_, initManager()).WillOnce(ReturnRef(server_init_mgr)); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -794,7 +797,7 @@ version_info: version1 warming_state: version_info: version1 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -834,7 +837,7 @@ version_info: version1 EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); // Version 2 listener will be initialized by listener manager directly. EXPECT_CALL(listener_foo2->target_, initialize()).Times(1); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version2", true)); // Version2 is in warming list as listener_foo2->target_ is not ready yet. checkStats(__LINE__, /*added=*/2, 0, /*removed=*/1, /*warming=*/1, 0, 0, 0); @@ -847,7 +850,7 @@ version_info: version1 warming_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -897,7 +900,7 @@ filter_chains: {} ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Start workers and capture ListenerImpl. @@ -932,7 +935,7 @@ name: foo auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); EXPECT_CALL(*timer, enableTimer(_, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); worker_->callAddCompletion(true); @@ -976,7 +979,7 @@ filter_chains: {} ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version1", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version1")); checkConfigDump(R"EOF( @@ -987,7 +990,7 @@ version_info: version1 warming_state: version_info: version1 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1000,7 +1003,7 @@ version_info: version1 )EOF"); // Update duplicate should be a NOP. - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Update foo listener. Should share socket. @@ -1018,7 +1021,7 @@ per_connection_buffer_limit_bytes: 10 ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "version2", true)); checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version2")); @@ -1030,7 +1033,7 @@ version_info: version2 warming_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1067,7 +1070,7 @@ version_info: version2 // Update duplicate should be a NOP. EXPECT_FALSE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(3003003003003)); @@ -1079,7 +1082,7 @@ version_info: version2 EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "version3", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version3", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version3")); @@ -1091,7 +1094,7 @@ version_info: version3 active_state: version_info: version3 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1104,7 +1107,7 @@ version_info: version3 draining_state: version_info: version2 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1140,7 +1143,7 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "version4", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "version4", true)); EXPECT_EQ(2UL, manager_->listeners().size()); worker_->callAddCompletion(true); checkStats(__LINE__, 2, 2, 0, 0, 2, 0, 0); @@ -1161,7 +1164,7 @@ filter_chains: {} EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_baz->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "version5", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "version5", true)); EXPECT_EQ(2UL, manager_->listeners().size()); checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version5")); @@ -1172,7 +1175,7 @@ version_info: version5 active_state: version_info: version3 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1186,7 +1189,7 @@ version_info: version5 active_state: version_info: version4 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: bar address: socket_address: @@ -1200,7 +1203,7 @@ version_info: version5 warming_state: version_info: version5 listener: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: baz address: socket_address: @@ -1213,7 +1216,7 @@ version_info: version5 )EOF"); // Update a duplicate baz that is currently warming. - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "", true)); checkStats(__LINE__, 3, 2, 0, 1, 2, 0, 0); // Update baz while it is warming. @@ -1226,7 +1229,7 @@ name: baz filter_chains: - filters: - name: fake - config: {} + typed_config: {} )EOF"; ListenerHandle* listener_baz_update1 = expectListenerCreate(true, true); @@ -1236,7 +1239,7 @@ name: baz })); EXPECT_CALL(listener_baz_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_baz_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_update1_yaml), "", true)); EXPECT_EQ(2UL, manager_->listeners().size()); checkStats(__LINE__, 3, 3, 0, 1, 2, 0, 0); @@ -1276,7 +1279,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1298,7 +1301,7 @@ name: foo // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); @@ -1336,7 +1339,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1354,7 +1357,7 @@ name: foo ListenerHandle* listener_foo2 = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); @@ -1400,7 +1403,7 @@ name: foo })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); } TEST_F(ListenerManagerImplTest, ReusePortEqualToTrue) { @@ -1435,7 +1438,7 @@ reuse_port: true })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); } TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) { @@ -1468,14 +1471,14 @@ name: foo EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) .WillOnce(Throw(EnvoyException("can't bind"))); EXPECT_CALL(*listener_foo, onDestroy()); - EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true), + EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true), EnvoyException); checkConfigDump(R"EOF( dynamic_listeners: - name: foo error_state: failed_configuration: - "@type": type.googleapis.com/envoy.api.v2.Listener + "@type": type.googleapis.com/envoy.config.listener.v3.Listener name: foo address: socket_address: @@ -1556,7 +1559,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); worker_->callAddCompletion(true); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1610,7 +1613,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); @@ -1624,7 +1627,7 @@ name: foo listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 2, 0, 1, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); @@ -1647,7 +1650,7 @@ per_connection_buffer_limit_bytes: 999 ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 2, 1, 1, 1, 1, 0, 0); @@ -1690,7 +1693,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - auto foo_inbound_proto = parseListenerFromV2Yaml(listener_foo_yaml); + auto foo_inbound_proto = parseListenerFromV3Yaml(listener_foo_yaml); EXPECT_TRUE(manager_->addOrUpdateListener(foo_inbound_proto, "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); @@ -1715,7 +1718,7 @@ traffic_direction: OUTBOUND EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo_outbound->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_outbound_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_outbound_yaml), "", true)); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo_outbound->target_.ready(); worker_->callAddCompletion(true); @@ -1743,7 +1746,7 @@ traffic_direction: OUTBOUND EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_outbound_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_outbound_yaml), "", true)); EXPECT_EQ(3UL, manager_->listeners().size()); worker_->callAddCompletion(true); @@ -1758,7 +1761,7 @@ traffic_direction: INBOUND filter_chains: - filters: [] )EOF"; - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true)); // Explicitly validate that in place filter chain update is not allowed. auto in_place_foo_inbound_proto = foo_inbound_proto; @@ -1794,7 +1797,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); @@ -1818,7 +1821,7 @@ name: bar filter_chains: - filters: [] )EOF"; - EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true)); + EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true)); } // Validate that stopping a warming listener, removes directly from warming listener list. @@ -1843,7 +1846,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); @@ -1867,7 +1870,7 @@ per_connection_buffer_limit_bytes: 999 ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); // Stop foo which should remove warming listener. @@ -1899,7 +1902,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); @@ -1936,7 +1939,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(false, false); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", false)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); @@ -1964,7 +1967,7 @@ TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { - filters: [] )EOF"; - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); EXPECT_EQ(1UL, server_.stats_store_.counterFromString("listener.[__1]_10000.foo").value()); @@ -1992,7 +1995,7 @@ name: foo ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); // Add bar with same non-binding address. Should fail. const std::string listener_bar_yaml = R"EOF( @@ -2010,7 +2013,7 @@ name: bar ListenerHandle* listener_bar = expectListenerCreate(true, true); EXPECT_CALL(*listener_bar, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true), EnvoyException, "error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener"); @@ -2022,7 +2025,7 @@ name: bar listener_bar = expectListenerCreate(true, true); EXPECT_CALL(*listener_bar, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_bar_yaml), "", true), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "", true), EnvoyException, "error adding listener: 'bar' has duplicate address '0.0.0.0:1234' as existing listener"); @@ -2043,7 +2046,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: destination_port: 8080 @@ -2060,7 +2063,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to unknown port - no match. @@ -2089,7 +2092,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: 127.0.0.0, prefix_len: 8 } @@ -2106,7 +2109,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to unknown IP - no match. @@ -2135,7 +2138,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "server1.example.com" @@ -2152,7 +2155,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI - no match. @@ -2182,7 +2185,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -2199,7 +2202,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TCP client - no match. @@ -2224,7 +2227,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: application_protocols: "http/1.1" @@ -2242,7 +2245,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without ALPN - no match. @@ -2271,10 +2274,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: - source_type: LOCAL + source_type: SAME_IP_OR_LOOPBACK transport_socket: name: tls typed_config: @@ -2288,7 +2291,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // EXTERNAL IPv4 client without "http/1.1" ALPN - no match. @@ -2330,7 +2333,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2349,7 +2352,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client with source 10.0.1.1. No match. @@ -2390,7 +2393,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_prefix_ranges: @@ -2409,7 +2412,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv6 client with matching subnet. Match. @@ -2430,7 +2433,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: source_ports: @@ -2448,7 +2451,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Client with source port 100. Match. @@ -2477,10 +2480,10 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: - source_type: LOCAL + source_type: SAME_IP_OR_LOOPBACK transport_socket: name: tls typed_config: @@ -2515,7 +2518,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // LOCAL TLS client with "http/1.1" ALPN - no match. @@ -2565,7 +2568,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2602,7 +2605,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to default port - using 1st filter chain. @@ -2651,7 +2654,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2688,7 +2691,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // IPv4 client connects to default IP - using 1st filter chain. @@ -2737,7 +2740,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2783,7 +2786,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI - using 1st filter chain. @@ -2836,7 +2839,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2855,7 +2858,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TCP client - using 1st filter chain. @@ -2881,7 +2884,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2900,7 +2903,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without ALPN - using 1st filter chain. @@ -2929,7 +2932,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty @@ -2950,7 +2953,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS client without SNI and ALPN - using 1st filter chain. @@ -2992,7 +2995,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3025,7 +3028,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3036,7 +3039,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3066,7 +3069,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3076,14 +3079,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidDesti socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: prefix_ranges: { address_prefix: a.b.c.d, prefix_len: 32 } )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "malformed IP address: a.b.c.d"); } @@ -3093,14 +3096,14 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithInvalidServe socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "*w.example.com" )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': partial wildcards are not " "supported in \"server_names\""); @@ -3112,7 +3115,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -3121,7 +3124,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': multiple filter chains with " "the same matching rules are defined"); @@ -3134,7 +3137,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -3144,7 +3147,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': contains filter chains with " "unimplemented fields"); @@ -3156,7 +3159,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappi socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: server_names: "example.com" @@ -3165,7 +3168,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappi )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "error adding listener '127.0.0.1:1234': multiple filter chains with " "overlapping matching rules are defined"); @@ -3185,7 +3188,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsFilterChainWithoutTlsInspector EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with TLS requirements, @@ -3207,7 +3210,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: transport_protocol: "tls" @@ -3218,7 +3221,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Make sure there is exactly 1 listener filter (and assume it's TLS Inspector). 2 filters @@ -3246,7 +3249,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SniFilterChainWithoutTlsInspector EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with SNI requirements, @@ -3274,7 +3277,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AlpnFilterChainWithoutTlsInspecto EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // TLS Inspector is automatically injected for filter chains with ALPN requirements, @@ -3303,7 +3306,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CustomTransportProtocolWithSniWit EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); // Make sure there are no listener filters (i.e. no automatically injected TLS Inspector). @@ -3342,7 +3345,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3367,7 +3370,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateK EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3387,7 +3390,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateIncomplete) { Network::Address::IpVersion::v4); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), EnvoyException, + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, TestEnvironment::substitute( "Failed to load incomplete certificate from {{ test_rundir }}" "/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem, ", @@ -3410,7 +3413,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidCertificateC )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load certificate chain from "); } @@ -3436,7 +3439,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidIntermediate )EOF"), Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load certificate chain from "); } @@ -3456,7 +3459,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidPrivateKey) )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load private key from "); } @@ -3478,7 +3481,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidTrustedCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load trusted CA certificates from "); } @@ -3504,7 +3507,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { route: { cluster: service_foo } listener_filters: - name: "envoy.filters.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); Configuration::ListenerFactoryContext* listener_factory_context = nullptr; @@ -3519,7 +3522,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Metadata) { listener_factory_context = &context; return ProdListenerComponentFactory::createListenerFilterFactoryList_(filters, context); })); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); ASSERT_NE(nullptr, listener_factory_context); EXPECT_EQ("test_value", Config::Metadata::metadataValue( &listener_factory_context->listenerMetadata(), "com.bar.foo", "baz") @@ -3534,13 +3537,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { filter_chains: {} listener_filters: - name: "envoy.filters.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3609,13 +3612,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilter) { filter_chains: {} listener_filters: - name: "test.listener.original_dst" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3683,13 +3686,13 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { filter_chains: {} listener_filters: - name: "test.listener.original_dstipv6" - config: {} + typed_config: {} )EOF", Network::Address::IpVersion::v6); EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::ListenerConfig& listener = manager_->listeners().back().get(); @@ -3738,7 +3741,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl EXPECT_EQ(options, nullptr); return listener_factory_.socket_; })); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3814,7 +3817,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) { } TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { - const envoy::config::listener::v3::Listener listener = parseListenerFromV2Yaml(R"EOF( + const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( name: SockoptsListener address: socket_address: { address: 127.0.0.1, port_value: 1111 } @@ -3862,7 +3865,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) { Registry::InjectFactory register_resolver(mock_resolver); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3887,7 +3890,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3915,7 +3918,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) { EXPECT_CALL(server_.random_, uuid()); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3938,7 +3941,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, InvalidCRLInline) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Failed to load CRL from "); } @@ -3960,7 +3963,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "^Failed to load CRL from .* without trusted CA$"); } @@ -3983,7 +3986,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifySanWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "SAN-based verification of peer certificates without trusted CA " "is insecure and not allowed"); @@ -4008,7 +4011,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithNoCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true), + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "Certificate validity period is always ignored without trusted CA"); } @@ -4034,7 +4037,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithCA) { )EOF", Network::Address::IpVersion::v4); - EXPECT_NO_THROW(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + EXPECT_NO_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); } // Validate that dispatcher stats prefix is set correctly when enabled. @@ -4068,7 +4071,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_TRUE(manager_->apiListener().has_value()); } @@ -4097,7 +4100,7 @@ name: test_api_listener cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", true)); + ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_FALSE(manager_->apiListener().has_value()); } @@ -4149,13 +4152,13 @@ name: test_api_listener_2 cluster: dynamic_forward_proxy_cluster )EOF"; - ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); ASSERT_TRUE(manager_->apiListener().has_value()); EXPECT_EQ("test_api_listener", manager_->apiListener()->get().name()); // Only one ApiListener is added. - ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(yaml), "", false)); + ASSERT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", false)); EXPECT_EQ(0U, manager_->listeners().size()); // The original ApiListener is there. ASSERT_TRUE(manager_->apiListener().has_value()); @@ -4183,7 +4186,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); @@ -4210,7 +4213,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -4245,7 +4248,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); @@ -4271,7 +4274,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -4314,7 +4317,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); @@ -4340,7 +4343,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -4377,7 +4380,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); EXPECT_CALL(listener_foo->target_, initialize()); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); @@ -4401,7 +4404,7 @@ traffic_direction: INBOUND ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); checkStats(__LINE__, 1, 1, 0, 1, 1, 0, 0); @@ -4615,7 +4618,7 @@ name: foo EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_yaml), "", true)); + EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); worker_->callAddCompletion(true); @@ -4640,7 +4643,7 @@ name: foo auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); EXPECT_CALL(*timer, enableTimer(_, _)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update1_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -4671,7 +4674,7 @@ name: foo EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); EXPECT_TRUE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_update2_yaml), "", true)); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update2_yaml), "", true)); EXPECT_EQ(1, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); EXPECT_CALL(*worker_, removeListener(_, _)); diff --git a/test/server/utility.h b/test/server/utility.h index 726d427483dd..fafc08ae0500 100644 --- a/test/server/utility.h +++ b/test/server/utility.h @@ -12,9 +12,10 @@ namespace Envoy { namespace Server { namespace { -inline envoy::config::listener::v3::Listener parseListenerFromV2Yaml(const std::string& yaml) { +inline envoy::config::listener::v3::Listener parseListenerFromV3Yaml(const std::string& yaml, + bool avoid_boosting = true) { envoy::config::listener::v3::Listener listener; - TestUtility::loadFromYaml(yaml, listener, true); + TestUtility::loadFromYaml(yaml, listener, true, avoid_boosting); return listener; } From ebe1ed1217585cd31dc22772d3a1f36a15c0703e Mon Sep 17 00:00:00 2001 From: "Nolan \"Tempa Kyouran\" Varani" Date: Fri, 31 Jul 2020 15:20:15 -0700 Subject: [PATCH 818/909] aws_iam: Increase code coverage for AWS IAM credential provider (#12394) Increase code coverage for AWS IAM credential provider Signed-off-by: Nolan Varani --- .../aws_iam/aws_iam_grpc_credentials_test.cc | 79 +++++++++++++------ test/per_file_coverage.sh | 1 - 2 files changed, 53 insertions(+), 27 deletions(-) diff --git a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc index 40d09b78e23b..6001a7bfdacd 100644 --- a/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc +++ b/test/extensions/grpc_credentials/aws_iam/aws_iam_grpc_credentials_test.cc @@ -34,6 +34,9 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { } void expectExtraHeaders(FakeStream& fake_stream) override { + if (call_credentials_ != CallCredentials::FromPlugin) { + return; + } AssertionResult result = fake_stream.waitForHeadersComplete(); RELEASE_ASSERT(result, result.message()); Http::TestRequestHeaderMapImpl stream_headers(fake_stream.headers()); @@ -57,41 +60,55 @@ class GrpcAwsIamClientIntegrationTest : public GrpcSslClientIntegrationTest { ssl_creds->mutable_root_certs()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - std::string config_yaml; - switch (region_location_) { - case RegionLocation::InEnvironment: - TestEnvironment::setEnvVar("AWS_REGION", region_name_, 1); - ABSL_FALLTHROUGH_INTENDED; - case RegionLocation::NotProvided: - config_yaml = fmt::format(R"EOF( -"@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig -service_name: {} -)EOF", - service_name_); - break; - case RegionLocation::InConfig: - config_yaml = fmt::format(R"EOF( -"@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig -service_name: {} -region: {} -)EOF", - service_name_, region_name_); - break; + switch (call_credentials_) { + case CallCredentials::FromPlugin: { + std::string config_yaml; + switch (region_location_) { + case RegionLocation::InEnvironment: + TestEnvironment::setEnvVar("AWS_REGION", region_name_, 1); + ABSL_FALLTHROUGH_INTENDED; + case RegionLocation::NotProvided: + config_yaml = fmt::format(R"EOF( + "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig + service_name: {} + )EOF", + service_name_); + break; + case RegionLocation::InConfig: + config_yaml = fmt::format(R"EOF( + "@type": type.googleapis.com/envoy.config.grpc_credential.v2alpha.AwsIamConfig + service_name: {} + region: {} + )EOF", + service_name_, region_name_); + break; + } + + auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin(); + plugin_config->set_name(credentials_factory_name_); + Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config()); + return config; + } + case CallCredentials::AccessToken: + google_grpc->add_call_credentials()->mutable_access_token()->assign("foo"); + return config; + default: + return config; } - - auto* plugin_config = google_grpc->add_call_credentials()->mutable_from_plugin(); - plugin_config->set_name(credentials_factory_name_); - Envoy::TestUtility::loadFromYaml(config_yaml, *plugin_config->mutable_typed_config()); - return config; } - enum class RegionLocation { NotProvided, InEnvironment, InConfig, }; + enum class CallCredentials { + FromPlugin, + AccessToken, + }; + RegionLocation region_location_ = RegionLocation::NotProvided; + CallCredentials call_credentials_ = CallCredentials::FromPlugin; std::string service_name_{}; std::string region_name_{}; std::string credentials_factory_name_{}; @@ -133,6 +150,16 @@ TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_NoRegion) { EXPECT_THROW_WITH_REGEX(initialize();, EnvoyException, "AWS region"); } +TEST_P(GrpcAwsIamClientIntegrationTest, AwsIamGrpcAuth_UnexpectedCallCredentials) { + SKIP_IF_GRPC_CLIENT(ClientType::EnvoyGrpc); + call_credentials_ = CallCredentials::AccessToken; + credentials_factory_name_ = Extensions::GrpcCredentials::GrpcCredentialsNames::get().AwsIam; + initialize(); + auto request = createRequest(empty_metadata_); + request->sendReply(); + dispatcher_helper_.runDispatcher(); +} + } // namespace } // namespace Grpc } // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index c0fa968a46ca..670564628c10 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -48,7 +48,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/common/fault:95.8" "source/extensions/filters/common/lua:95.9" "source/extensions/grpc_credentials:92.0" -"source/extensions/grpc_credentials/aws_iam:86.8" "source/extensions/health_checkers:95.9" "source/extensions/health_checkers/redis:95.9" "source/extensions/quic_listeners:84.8" From fa5d0dccec7179901687ed4e297f89e9c1eb67d0 Mon Sep 17 00:00:00 2001 From: Phil Genera Date: Fri, 31 Jul 2020 19:33:41 -0400 Subject: [PATCH 819/909] docs: fix formatting for --ignore-unknown-dynamic-fields (#12418) Signed-off-by: Phil Genera --- docs/root/operations/cli.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index cf2f8894716a..5cbd5b911051 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -309,9 +309,9 @@ following are the command line options that Envoy supports. .. option:: --ignore-unknown-dynamic-fields *(optional)* This flag disables validation of protobuf configuration for unknown fields in dynamic - configuration. Unlike setting --reject-unknown-dynamic-fields to false, it does not log warnings or - count occurrences of unknown fields, in the interest of configuration processing speed. If - --reject-unknown-dynamic-fields is set to true, this flag has no effect. + configuration. Unlike setting :option:`--reject-unknown-dynamic-fields` to false, it does not log warnings + or count occurrences of unknown fields, in the interest of configuration processing speed. If + :option:`--reject-unknown-dynamic-fields` is set to true, this flag has no effect. .. option:: --disable-extensions From 16cdc5d34e82dd510a20e72850a8b8969d2dee31 Mon Sep 17 00:00:00 2001 From: antonio Date: Fri, 31 Jul 2020 19:35:17 -0400 Subject: [PATCH 820/909] Do not attempt to read the value of the "envoy.reloadable_features.activate_fds_next_event_loop" runtime feature before Runtime::LoaderSingleton is initialized. (#12415) Signed-off-by: Antonio Vicente --- source/common/event/file_event_impl.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index 95335dd36aae..a4ad6c419814 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -14,8 +14,16 @@ namespace Event { FileEventImpl::FileEventImpl(DispatcherImpl& dispatcher, os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, uint32_t events) : cb_(cb), fd_(fd), trigger_(trigger), - activate_fd_events_next_event_loop_(Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.activate_fds_next_event_loop")) { + activate_fd_events_next_event_loop_( + // Only read the runtime feature if the runtime loader singleton has already been created. + // Attempts to access runtime features too early in the initialization sequence triggers + // some spurious, scary-looking logs about not being able to read runtime feature config + // from the singleton. These warnings are caused by creation of filesystem watchers as + // part of the process of loading the runtime configuration from disk. + Runtime::LoaderSingleton::getExisting() + ? Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_fds_next_event_loop") + : true) { #ifdef WIN32 RELEASE_ASSERT(trigger_ == FileTriggerType::Level, "libevent does not support edge triggers on Windows"); From b02924095895ecff1d15794d1133c75d87e48134 Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Fri, 31 Jul 2020 18:01:43 -0700 Subject: [PATCH 821/909] ci: fix adding extra params for CircleCI. (#12400) Signed-off-by: Piotr Sikora --- ci/do_circle_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh index 7c44e7555b71..29469a24b814 100755 --- a/ci/do_circle_ci.sh +++ b/ci/do_circle_ci.sh @@ -30,7 +30,7 @@ export NUM_CPUS=6 # CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. # IPv6 tests are run with Azure Pipelines. -export BAZEL_EXTRA_BUILD_OPTIONS="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \ +export BAZEL_BUILD_EXTRA_OPTIONS+="--test_env=ENVOY_IP_TEST_VERSIONS=v4only --local_cpu_resources=${NUM_CPUS} \ --action_env=HOME --action_env=PYTHONUSERBASE --test_env=HOME --test_env=PYTHONUSERBASE" function finish { From c625cb32586c572c6d1a738218b818bee6e48afa Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 31 Jul 2020 18:11:41 -0700 Subject: [PATCH 822/909] build: fix main branch merge issue (#12422) Signed-off-by: Matt Klein --- test/server/listener_manager_impl_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 0afea3df6b0f..07a88ebf3dd1 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -866,7 +866,7 @@ version_info: version1 ListenerHandle* listener_foo3 = expectListenerCreate(true, true); EXPECT_CALL(*listener_foo3, onDestroy()); EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV2Yaml(listener_foo_address_update_yaml), + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_address_update_yaml), "version3", true), EnvoyException, "error updating listener: 'foo' has a different address " From da403fa7b67b9331ca1d129a4035950d2e23c752 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 31 Jul 2020 21:36:57 -0400 Subject: [PATCH 823/909] build: marking extensions as extension-only visible by default (#12337) Risk Level: medium (of build breakage) Testing: n/a Docs Changes: n/a Release Notes: n/a Part of #9953 Signed-off-by: Alyssa Wilk --- BUILD | 30 +++++++++++++++++++ bazel/README.md | 11 +++++++ bazel/envoy_build_system.bzl | 3 ++ bazel/envoy_library.bzl | 3 +- docs/root/version_history/current.rst | 2 ++ source/extensions/access_loggers/BUILD | 6 ++-- source/extensions/access_loggers/common/BUILD | 4 +-- source/extensions/access_loggers/file/BUILD | 11 +++++-- source/extensions/access_loggers/grpc/BUILD | 16 ++++++++-- source/extensions/clusters/BUILD | 6 ++-- source/extensions/clusters/aggregate/BUILD | 4 +-- .../clusters/dynamic_forward_proxy/BUILD | 4 +-- source/extensions/clusters/redis/BUILD | 4 +-- source/extensions/common/BUILD | 6 ++-- source/extensions/common/aws/BUILD | 4 +-- source/extensions/common/crypto/BUILD | 10 +++++-- .../common/dynamic_forward_proxy/BUILD | 4 +-- source/extensions/common/proxy_protocol/BUILD | 6 ++-- source/extensions/common/redis/BUILD | 4 +-- source/extensions/common/sqlutils/BUILD | 4 +-- source/extensions/common/tap/BUILD | 4 +-- source/extensions/common/wasm/BUILD | 4 +-- source/extensions/common/wasm/null/BUILD | 4 +-- source/extensions/common/wasm/v8/BUILD | 4 +-- .../compression/common/compressor/BUILD | 4 +-- .../compression/common/decompressor/BUILD | 4 +-- .../extensions/compression/gzip/common/BUILD | 4 +-- .../compression/gzip/compressor/BUILD | 4 +-- .../compression/gzip/decompressor/BUILD | 4 +-- source/extensions/extensions_build_config.bzl | 6 ++++ source/extensions/filters/common/expr/BUILD | 4 +-- .../extensions/filters/common/ext_authz/BUILD | 4 +-- source/extensions/filters/common/fault/BUILD | 4 +-- source/extensions/filters/common/lua/BUILD | 4 +-- .../filters/common/original_src/BUILD | 4 +-- .../extensions/filters/common/ratelimit/BUILD | 4 +-- source/extensions/filters/common/rbac/BUILD | 4 +-- source/extensions/filters/http/BUILD | 6 ++-- .../filters/http/adaptive_concurrency/BUILD | 4 +-- .../adaptive_concurrency/controller/BUILD | 4 +-- .../filters/http/admission_control/BUILD | 4 +-- .../http/admission_control/evaluators/BUILD | 4 +-- .../extensions/filters/http/aws_lambda/BUILD | 4 +-- .../filters/http/aws_request_signing/BUILD | 4 +-- source/extensions/filters/http/buffer/BUILD | 9 ++++-- source/extensions/filters/http/cache/BUILD | 4 +-- .../http/cache/simple_http_cache/BUILD | 4 +-- source/extensions/filters/http/common/BUILD | 14 +++++++-- .../filters/http/common/compressor/BUILD | 4 +-- .../extensions/filters/http/compressor/BUILD | 4 +-- source/extensions/filters/http/cors/BUILD | 9 ++++-- source/extensions/filters/http/csrf/BUILD | 4 +-- .../filters/http/decompressor/BUILD | 4 +-- .../filters/http/dynamic_forward_proxy/BUILD | 4 +-- source/extensions/filters/http/dynamo/BUILD | 4 +-- .../extensions/filters/http/ext_authz/BUILD | 4 +-- source/extensions/filters/http/fault/BUILD | 4 +-- .../filters/http/grpc_http1_bridge/BUILD | 11 +++++-- .../http/grpc_http1_reverse_bridge/BUILD | 4 +-- .../filters/http/grpc_json_transcoder/BUILD | 4 +-- .../extensions/filters/http/grpc_stats/BUILD | 4 +-- source/extensions/filters/http/grpc_web/BUILD | 4 +-- source/extensions/filters/http/gzip/BUILD | 4 +-- .../filters/http/header_to_metadata/BUILD | 4 +-- .../filters/http/health_check/BUILD | 11 +++++-- .../extensions/filters/http/ip_tagging/BUILD | 9 ++++-- .../extensions/filters/http/jwt_authn/BUILD | 4 +-- source/extensions/filters/http/lua/BUILD | 4 +-- .../extensions/filters/http/on_demand/BUILD | 10 +++++-- .../filters/http/original_src/BUILD | 4 +-- .../extensions/filters/http/ratelimit/BUILD | 4 +-- source/extensions/filters/http/rbac/BUILD | 9 ++++-- source/extensions/filters/http/router/BUILD | 6 ++-- source/extensions/filters/http/squash/BUILD | 4 +-- source/extensions/filters/http/tap/BUILD | 4 +-- source/extensions/filters/listener/BUILD | 6 ++-- .../filters/listener/http_inspector/BUILD | 4 +-- .../filters/listener/original_dst/BUILD | 9 ++++-- .../filters/listener/original_src/BUILD | 4 +-- .../filters/listener/proxy_protocol/BUILD | 10 +++++-- .../filters/listener/tls_inspector/BUILD | 14 +++++++-- source/extensions/filters/network/BUILD | 6 ++-- .../filters/network/client_ssl_auth/BUILD | 4 +-- .../extensions/filters/network/common/BUILD | 8 +++-- .../filters/network/common/redis/BUILD | 4 +-- .../filters/network/direct_response/BUILD | 4 +-- .../filters/network/dubbo_proxy/BUILD | 4 +-- .../filters/network/dubbo_proxy/filters/BUILD | 4 +-- .../filters/network/dubbo_proxy/router/BUILD | 4 +-- source/extensions/filters/network/echo/BUILD | 9 ++++-- .../filters/network/ext_authz/BUILD | 4 +-- .../network/http_connection_manager/BUILD | 6 ++-- source/extensions/filters/network/kafka/BUILD | 4 +-- .../filters/network/local_ratelimit/BUILD | 4 +-- .../filters/network/mongo_proxy/BUILD | 4 +-- .../filters/network/mysql_proxy/BUILD | 4 +-- .../filters/network/postgres_proxy/BUILD | 4 +-- .../filters/network/ratelimit/BUILD | 10 +++++-- source/extensions/filters/network/rbac/BUILD | 4 +-- .../filters/network/redis_proxy/BUILD | 9 ++++-- .../filters/network/rocketmq_proxy/BUILD | 4 +-- .../network/rocketmq_proxy/router/BUILD | 4 +-- .../filters/network/sni_cluster/BUILD | 4 +-- .../network/sni_dynamic_forward_proxy/BUILD | 4 +-- .../filters/network/tcp_proxy/BUILD | 6 ++-- .../filters/network/thrift_proxy/BUILD | 4 +-- .../network/thrift_proxy/filters/BUILD | 4 +-- .../thrift_proxy/filters/ratelimit/BUILD | 4 +-- .../filters/network/thrift_proxy/router/BUILD | 4 +-- .../filters/network/zookeeper_proxy/BUILD | 4 +-- .../extensions/filters/udp/dns_filter/BUILD | 4 +-- source/extensions/filters/udp/udp_proxy/BUILD | 4 +-- source/extensions/grpc_credentials/BUILD | 6 ++-- .../extensions/grpc_credentials/aws_iam/BUILD | 4 +-- .../extensions/grpc_credentials/example/BUILD | 10 +++++-- .../file_based_metadata/BUILD | 4 +-- source/extensions/health_checkers/BUILD | 6 ++-- source/extensions/health_checkers/redis/BUILD | 4 +-- source/extensions/internal_redirect/BUILD | 6 ++-- .../allow_listed_routes/BUILD | 9 ++++-- .../internal_redirect/previous_routes/BUILD | 9 ++++-- .../internal_redirect/safe_cross_scheme/BUILD | 9 ++++-- source/extensions/quic_listeners/quiche/BUILD | 10 +++++-- .../quic_listeners/quiche/platform/BUILD | 4 +-- source/extensions/resource_monitors/BUILD | 6 ++-- .../extensions/resource_monitors/common/BUILD | 6 ++-- .../resource_monitors/fixed_heap/BUILD | 4 +-- .../resource_monitors/injected_resource/BUILD | 9 ++++-- .../retry/host/omit_canary_hosts/BUILD | 4 +-- .../retry/host/omit_host_metadata/BUILD | 4 +-- .../retry/host/previous_hosts/BUILD | 4 +-- source/extensions/retry/priority/BUILD | 4 +-- .../retry/priority/previous_priorities/BUILD | 4 +-- source/extensions/stat_sinks/BUILD | 6 ++-- .../extensions/stat_sinks/common/statsd/BUILD | 4 +-- source/extensions/stat_sinks/dog_statsd/BUILD | 4 +-- source/extensions/stat_sinks/hystrix/BUILD | 4 +-- .../stat_sinks/metrics_service/BUILD | 4 +-- source/extensions/stat_sinks/statsd/BUILD | 9 ++++-- source/extensions/tracers/common/BUILD | 4 +-- source/extensions/tracers/common/ot/BUILD | 4 +-- source/extensions/tracers/datadog/BUILD | 4 +-- source/extensions/tracers/dynamic_ot/BUILD | 4 +-- source/extensions/tracers/lightstep/BUILD | 4 +-- source/extensions/tracers/opencensus/BUILD | 4 +-- source/extensions/tracers/xray/BUILD | 4 +-- source/extensions/tracers/zipkin/BUILD | 9 ++++-- source/extensions/transport_sockets/BUILD | 6 ++-- .../extensions/transport_sockets/alts/BUILD | 4 +-- .../extensions/transport_sockets/common/BUILD | 4 +-- .../transport_sockets/proxy_protocol/BUILD | 4 +-- .../transport_sockets/raw_buffer/BUILD | 6 ++-- source/extensions/transport_sockets/tap/BUILD | 10 +++++-- source/extensions/transport_sockets/tls/BUILD | 12 ++++++-- .../transport_sockets/tls/private_key/BUILD | 4 +-- .../extensions/upstreams/http/generic/BUILD | 4 +-- source/extensions/upstreams/http/http/BUILD | 4 +-- source/extensions/upstreams/http/tcp/BUILD | 4 +-- tools/code_format/envoy_build_fixer.py | 30 ++++++++++++------- 159 files changed, 586 insertions(+), 315 deletions(-) diff --git a/BUILD b/BUILD index 4dc2cadee42d..8518272d537f 100644 --- a/BUILD +++ b/BUILD @@ -1,6 +1,36 @@ +load( + "@envoy_build_config//:extensions_build_config.bzl", + "ADDITIONAL_VISIBILITY", +) + licenses(["notice"]) # Apache 2 exports_files([ "VERSION", ".clang-format", ]) + +# These two definitions exist to help reduce Envoy upstream core code depending on extensions. +# To avoid visibility problems, one can extend ADDITIONAL_VISIBILITY in source/extensions/extensions_build_config.bzl +# +# TODO(#9953) //test/config_test:__pkg__ should probably be split up and removed. +# TODO(#9953) the config fuzz tests should be moved somewhere local and //test/config_test and //test/server removed. +package_group( + name = "extension_config", + packages = [ + "//source/exe", + "//source/extensions/...", + "//test/config_test", + "//test/extensions/...", + "//test/server", + "//test/server/config_validation", + ] + ADDITIONAL_VISIBILITY, +) + +package_group( + name = "extension_library", + packages = [ + "//source/extensions/...", + "//test/extensions/...", + ] + ADDITIONAL_VISIBILITY, +) diff --git a/bazel/README.md b/bazel/README.md index 62d7b6e8f2c0..0c3e3f9abb9d 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -611,6 +611,17 @@ local_repository( ... ``` +## Extra extensions + +If you are building your own Envoy extensions or custom Envoy builds and encounter visibility +problems with, you may need to adjust the default visibility rules. +By default, Envoy extensions are set up to only be visible to code within the +[//source/extensions](../source/extensions/), or the Envoy server target. To adjust this, +add any additional targets you need to `ADDITIONAL_VISIBILITY` in +[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). +See the instructions above about how to create your own custom version of +[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). + # Release builds Release builds should be built in `opt` mode, processed with `strip` and have a diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 0f062cbfe8d8..a96a2cdabc0d 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -36,6 +36,9 @@ load( def envoy_package(): native.package(default_visibility = ["//visibility:public"]) +def envoy_extension_package(): + native.package(default_visibility = ["//:extension_library"]) + # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. def _envoy_directory_genrule_impl(ctx): diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index dd35bcac6f9a..63e4b963bb18 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -70,12 +70,13 @@ def envoy_cc_extension( undocumented = False, status = "stable", tags = [], + visibility = ["//:extension_config"], **kwargs): if security_posture not in EXTENSION_SECURITY_POSTURES: fail("Unknown extension security posture: " + security_posture) if status not in EXTENSION_STATUS_VALUES: fail("Unknown extension status: " + status) - envoy_cc_library(name, tags = tags, **kwargs) + envoy_cc_library(name, tags = tags, visibility = visibility, **kwargs) # Envoy C++ library targets should be specified with this function. def envoy_cc_library( diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 01c1f3dd56a0..f06045af222a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -5,6 +5,8 @@ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* +* build: added visibility rules for upstream. If these cause visibility related breakage, see notes in //BUILD. + Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* diff --git a/source/extensions/access_loggers/BUILD b/source/extensions/access_loggers/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/access_loggers/BUILD +++ b/source/extensions/access_loggers/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/access_loggers/common/BUILD b/source/extensions/access_loggers/common/BUILD index a4cf5294cf81..1afb1f270a42 100644 --- a/source/extensions/access_loggers/common/BUILD +++ b/source/extensions/access_loggers/common/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Base class for implementations of AccessLog::Instance. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "access_log_base", diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index 6e86f2e0a490..b95be9f7228c 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,12 +10,14 @@ licenses(["notice"]) # Apache 2 # Access log implementation that writes to a file. # Public docs: docs/root/configuration/access_log.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "file_access_log_lib", srcs = ["file_access_log_impl.cc"], hdrs = ["file_access_log_impl.h"], + # The file based access logger is core code. + visibility = ["//visibility:public"], deps = [ "//source/extensions/access_loggers/common:access_log_base", ], @@ -26,6 +28,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) determine if this is core or should be cleaned up. + visibility = [ + "//:extension_config", + "//test:__subpackages__", + ], deps = [ ":file_access_log_lib", "//include/envoy/registry", diff --git a/source/extensions/access_loggers/grpc/BUILD b/source/extensions/access_loggers/grpc/BUILD index e92a44b24d6d..94683341a2f7 100644 --- a/source/extensions/access_loggers/grpc/BUILD +++ b/source/extensions/access_loggers/grpc/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Access log implementation that writes to a gRPC service. # Public docs: TODO(rodaine): Docs needed. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "config_utils", @@ -98,6 +98,12 @@ envoy_cc_extension( srcs = ["http_config.cc"], hdrs = ["http_config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", @@ -115,6 +121,12 @@ envoy_cc_extension( srcs = ["tcp_config.cc"], hdrs = ["tcp_config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":config_utils", "//include/envoy/server:access_log_config_interface", diff --git a/source/extensions/clusters/BUILD b/source/extensions/clusters/BUILD index ee5bcf6bc186..46709ec0c238 100644 --- a/source/extensions/clusters/BUILD +++ b/source/extensions/clusters/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/clusters/aggregate/BUILD b/source/extensions/clusters/aggregate/BUILD index d6c7d4d1a515..d23dd525625a 100644 --- a/source/extensions/clusters/aggregate/BUILD +++ b/source/extensions/clusters/aggregate/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "cluster", diff --git a/source/extensions/clusters/dynamic_forward_proxy/BUILD b/source/extensions/clusters/dynamic_forward_proxy/BUILD index 744f1e1bfca8..0dc4780118e1 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/BUILD +++ b/source/extensions/clusters/dynamic_forward_proxy/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "cluster", diff --git a/source/extensions/clusters/redis/BUILD b/source/extensions/clusters/redis/BUILD index 3edf4864852c..784103719061 100644 --- a/source/extensions/clusters/redis/BUILD +++ b/source/extensions/clusters/redis/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "crc16_lib", diff --git a/source/extensions/common/BUILD b/source/extensions/common/BUILD index 54a5bcddfc7f..abc0d81c2d50 100644 --- a/source/extensions/common/BUILD +++ b/source/extensions/common/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "utility_lib", hdrs = ["utility.h"], + # Legacy. TODO(#9953) clean up. + visibility = ["//visibility:public"], deps = [ "//include/envoy/runtime:runtime_interface", "//source/common/common:documentation_url_lib", diff --git a/source/extensions/common/aws/BUILD b/source/extensions/common/aws/BUILD index 4d610a59545f..621d60806d54 100644 --- a/source/extensions/common/aws/BUILD +++ b/source/extensions/common/aws/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "signer_interface", diff --git a/source/extensions/common/crypto/BUILD b/source/extensions/common/crypto/BUILD index 836c8320a523..ea1802a97570 100644 --- a/source/extensions/common/crypto/BUILD +++ b/source/extensions/common/crypto/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "utility_lib", @@ -23,6 +23,12 @@ envoy_cc_extension( ], security_posture = "unknown", undocumented = True, + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/config:__subpackages__", + "//test/common/crypto:__subpackages__", + ], deps = [ "//include/envoy/buffer:buffer_interface", "//source/common/common:assert_lib", diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index 4321013da9f9..19d613869618 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "dns_cache_interface", diff --git a/source/extensions/common/proxy_protocol/BUILD b/source/extensions/common/proxy_protocol/BUILD index fb0d2f74c09f..7a2b9bf66d03 100644 --- a/source/extensions/common/proxy_protocol/BUILD +++ b/source/extensions/common/proxy_protocol/BUILD @@ -1,17 +1,19 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_protocol_header_lib", srcs = ["proxy_protocol_header.cc"], hdrs = ["proxy_protocol_header.h"], + # This is used by the router, so considered core code. + visibility = ["//visibility:public"], deps = [ "//include/envoy/buffer:buffer_interface", "//include/envoy/network:address_interface", diff --git a/source/extensions/common/redis/BUILD b/source/extensions/common/redis/BUILD index 1d50b1cfc6fc..f7427e61ad2e 100644 --- a/source/extensions/common/redis/BUILD +++ b/source/extensions/common/redis/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # clusters. # Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "cluster_refresh_manager_interface", diff --git a/source/extensions/common/sqlutils/BUILD b/source/extensions/common/sqlutils/BUILD index c0129c29cfc3..f477e6a42208 100644 --- a/source/extensions/common/sqlutils/BUILD +++ b/source/extensions/common/sqlutils/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "sqlutils_lib", diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index 480b2d05b6f3..8cf381c67dee 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "tap_interface", diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index c31b2deb485b..6e034dbda256 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", diff --git a/source/extensions/common/wasm/null/BUILD b/source/extensions/common/wasm/null/BUILD index 0d9d49510412..31a33d8f4d49 100644 --- a/source/extensions/common/wasm/null/BUILD +++ b/source/extensions/common/wasm/null/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "null_vm_plugin_interface", diff --git a/source/extensions/common/wasm/v8/BUILD b/source/extensions/common/wasm/v8/BUILD index 0e4f86d97a66..4ff62d112f2f 100644 --- a/source/extensions/common/wasm/v8/BUILD +++ b/source/extensions/common/wasm/v8/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "v8_lib", diff --git a/source/extensions/compression/common/compressor/BUILD b/source/extensions/compression/common/compressor/BUILD index 54843124ba79..db3d5c88ae16 100644 --- a/source/extensions/compression/common/compressor/BUILD +++ b/source/extensions/compression/common/compressor/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "compressor_factory_base_lib", diff --git a/source/extensions/compression/common/decompressor/BUILD b/source/extensions/compression/common/decompressor/BUILD index 27208bee530a..0d69c90a8acd 100644 --- a/source/extensions/compression/common/decompressor/BUILD +++ b/source/extensions/compression/common/decompressor/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "decompressor_factory_base_lib", diff --git a/source/extensions/compression/gzip/common/BUILD b/source/extensions/compression/gzip/common/BUILD index 8ec29af79ddb..5c301a6a9abe 100644 --- a/source/extensions/compression/gzip/common/BUILD +++ b/source/extensions/compression/gzip/common/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "zlib_base_lib", diff --git a/source/extensions/compression/gzip/compressor/BUILD b/source/extensions/compression/gzip/compressor/BUILD index 3f37d2524356..e8918d1fcbc8 100644 --- a/source/extensions/compression/gzip/compressor/BUILD +++ b/source/extensions/compression/gzip/compressor/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "compressor_lib", diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD index 9c86b64ef61b..b4c6fb375d45 100644 --- a/source/extensions/compression/gzip/decompressor/BUILD +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "zlib_decompressor_impl_lib", diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index d69443ada6a0..0ae05caa57c0 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -198,3 +198,9 @@ EXTENSIONS = { } + +# This can be used to extend the visibility rules for Envoy extensions +# (//:extension_config and //:extension_library in //BUILD) +# if downstream Envoy builds need to directly reference envoy extensions. +ADDITIONAL_VISIBILITY = [ + ] diff --git a/source/extensions/filters/common/expr/BUILD b/source/extensions/filters/common/expr/BUILD index d9abedc88404..fbbcd725ba43 100644 --- a/source/extensions/filters/common/expr/BUILD +++ b/source/extensions/filters/common/expr/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "evaluator_lib", diff --git a/source/extensions/filters/common/ext_authz/BUILD b/source/extensions/filters/common/ext_authz/BUILD index 45d4fb01d96f..977560fefb20 100644 --- a/source/extensions/filters/common/ext_authz/BUILD +++ b/source/extensions/filters/common/ext_authz/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ext_authz_interface", diff --git a/source/extensions/filters/common/fault/BUILD b/source/extensions/filters/common/fault/BUILD index d64605085fc4..bf05af548e01 100644 --- a/source/extensions/filters/common/fault/BUILD +++ b/source/extensions/filters/common/fault/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "fault_config_lib", diff --git a/source/extensions/filters/common/lua/BUILD b/source/extensions/filters/common/lua/BUILD index 0095b156c4b6..769784c89092 100644 --- a/source/extensions/filters/common/lua/BUILD +++ b/source/extensions/filters/common/lua/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) load("//bazel:envoy_internal.bzl", "envoy_external_dep_path") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() bool_flag( name = "moonjit", diff --git a/source/extensions/filters/common/original_src/BUILD b/source/extensions/filters/common/original_src/BUILD index 76662376ee0c..0c4b4832e2e3 100644 --- a/source/extensions/filters/common/original_src/BUILD +++ b/source/extensions/filters/common/original_src/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Helprs for filters for mirroring the downstream remote address on the upstream's source. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "original_src_socket_option_lib", diff --git a/source/extensions/filters/common/ratelimit/BUILD b/source/extensions/filters/common/ratelimit/BUILD index 726bdf338f9a..e98dc90a8916 100644 --- a/source/extensions/filters/common/ratelimit/BUILD +++ b/source/extensions/filters/common/ratelimit/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", diff --git a/source/extensions/filters/common/rbac/BUILD b/source/extensions/filters/common/rbac/BUILD index 9a9bbc105749..14c649ca4340 100644 --- a/source/extensions/filters/common/rbac/BUILD +++ b/source/extensions/filters/common/rbac/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "utility_lib", diff --git a/source/extensions/filters/http/BUILD b/source/extensions/filters/http/BUILD index ee5bcf6bc186..790ddc806157 100644 --- a/source/extensions/filters/http/BUILD +++ b/source/extensions/filters/http/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/filters/http/adaptive_concurrency/BUILD b/source/extensions/filters/http/adaptive_concurrency/BUILD index c6a7a2d4e95f..9cef1214ab36 100644 --- a/source/extensions/filters/http/adaptive_concurrency/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -11,7 +11,7 @@ licenses(["notice"]) # Apache 2 # requests based on sampled latencies. # Public docs: docs/root/configuration/http_filters/adaptive_concurrency_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "adaptive_concurrency_filter_lib", diff --git a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD index ae74e71c6b35..b9f4475d7af7 100644 --- a/source/extensions/filters/http/adaptive_concurrency/controller/BUILD +++ b/source/extensions/filters/http/adaptive_concurrency/controller/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # requests based on sampled latencies. # Public docs: TODO (tonya11en) -envoy_package() +envoy_extension_package() envoy_cc_library( name = "controller_lib", diff --git a/source/extensions/filters/http/admission_control/BUILD b/source/extensions/filters/http/admission_control/BUILD index 2bfdfb9912a6..07acbda5fe58 100644 --- a/source/extensions/filters/http/admission_control/BUILD +++ b/source/extensions/filters/http/admission_control/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -9,7 +9,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. # Public docs: docs/root/configuration/http_filters/admission_control.rst -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "admission_control_filter_lib", diff --git a/source/extensions/filters/http/admission_control/evaluators/BUILD b/source/extensions/filters/http/admission_control/evaluators/BUILD index 79910a264e7e..c5c72ee2db5c 100644 --- a/source/extensions/filters/http/admission_control/evaluators/BUILD +++ b/source/extensions/filters/http/admission_control/evaluators/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # HTTP L7 filter that probabilistically rejects requests based on upstream success-rate. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "response_evaluator_lib", diff --git a/source/extensions/filters/http/aws_lambda/BUILD b/source/extensions/filters/http/aws_lambda/BUILD index 1e3d6006293a..86e2cc553f78 100644 --- a/source/extensions/filters/http/aws_lambda/BUILD +++ b/source/extensions/filters/http/aws_lambda/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) @@ -11,7 +11,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP AWS Lambda filter # Public docs: docs/root/configuration/http_filters/aws_lambda_filter.rst -envoy_package() +envoy_extension_package() envoy_proto_library( name = "request_response", diff --git a/source/extensions/filters/http/aws_request_signing/BUILD b/source/extensions/filters/http/aws_request_signing/BUILD index a83efef61e98..01b83ecf6865 100644 --- a/source/extensions/filters/http/aws_request_signing/BUILD +++ b/source/extensions/filters/http/aws_request_signing/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP AWS request signing filter # Public docs: docs/root/configuration/http_filters/aws_request_signing_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "aws_request_signing_filter_lib", diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index eeb4a403931e..9f9364576031 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Request buffering and timeout L7 HTTP filter # Public docs: docs/root/configuration/http_filters/buffer_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "buffer_filter_lib", @@ -38,6 +38,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index ee97d6f0a9a8..159fd3e80253 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 ## Pluggable HTTP cache filter -envoy_package() +envoy_extension_package() envoy_cc_library( name = "cache_filter_lib", diff --git a/source/extensions/filters/http/cache/simple_http_cache/BUILD b/source/extensions/filters/http/cache/simple_http_cache/BUILD index b38c273b2601..f9484060aa97 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/BUILD +++ b/source/extensions/filters/http/cache/simple_http_cache/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) @@ -9,7 +9,7 @@ licenses(["notice"]) # Apache 2 ## WIP: Simple in-memory cache storage plugin. Not ready for deployment. -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "simple_http_cache_lib", diff --git a/source/extensions/filters/http/common/BUILD b/source/extensions/filters/http/common/BUILD index 7a3ccda3d2c1..39da5c48c58e 100644 --- a/source/extensions/filters/http/common/BUILD +++ b/source/extensions/filters/http/common/BUILD @@ -1,16 +1,21 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "pass_through_filter_lib", hdrs = ["pass_through_filter.h"], + # A thin shim used by test and prod filters. + visibility = [ + "//source:__subpackages__", + "//test:__subpackages__", + ], deps = [ "//include/envoy/server:filter_config_interface", ], @@ -41,6 +46,11 @@ envoy_cc_library( envoy_cc_library( name = "utility_lib", hdrs = ["utility.h"], + # Used by the router filter. TODO(#9953) clean up. + visibility = [ + "//source:__subpackages__", + "//test:__subpackages__", + ], deps = [ "//include/envoy/runtime:runtime_interface", "//source/common/common:macros", diff --git a/source/extensions/filters/http/common/compressor/BUILD b/source/extensions/filters/http/common/compressor/BUILD index 56468881c8f2..a1c67b984a5e 100644 --- a/source/extensions/filters/http/common/compressor/BUILD +++ b/source/extensions/filters/http/common/compressor/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() # TODO(rojkov): move this library to source/extensions/filters/http/compressor/. envoy_cc_library( diff --git a/source/extensions/filters/http/compressor/BUILD b/source/extensions/filters/http/compressor/BUILD index ea1d38801a5e..01855f8eb64a 100644 --- a/source/extensions/filters/http/compressor/BUILD +++ b/source/extensions/filters/http/compressor/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that performs compression with configurable compression libraries # Public docs: docs/root/configuration/http_filters/compressor_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "compressor_filter_lib", diff --git a/source/extensions/filters/http/cors/BUILD b/source/extensions/filters/http/cors/BUILD index 0685c0e41f27..903fa5599ff0 100644 --- a/source/extensions/filters/http/cors/BUILD +++ b/source/extensions/filters/http/cors/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) # Public docs: docs/root/configuration/http_filters/cors_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "cors_filter_lib", @@ -32,6 +32,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/csrf/BUILD b/source/extensions/filters/http/csrf/BUILD index c82dbf9764e2..47bea6f6bbf2 100644 --- a/source/extensions/filters/http/csrf/BUILD +++ b/source/extensions/filters/http/csrf/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter which implements CSRF processing (https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)) # Public docs: docs/root/configuration/http_filters/csrf_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "csrf_filter_lib", diff --git a/source/extensions/filters/http/decompressor/BUILD b/source/extensions/filters/http/decompressor/BUILD index b4665ca09b7b..08d224b8b284 100644 --- a/source/extensions/filters/http/decompressor/BUILD +++ b/source/extensions/filters/http/decompressor/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that performs decompression with configurable decompression libraries # Public docs: docs/root/configuration/http_filters/decompressor_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "decompressor_filter_lib", diff --git a/source/extensions/filters/http/dynamic_forward_proxy/BUILD b/source/extensions/filters/http/dynamic_forward_proxy/BUILD index 56d4ff9be0b6..dc15f124ed78 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_filter_lib", diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index ad5f2fc3b97e..c152863819ed 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ # Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "dynamo_filter_lib", diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 559363edcf7b..0d789c30c048 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # External authorization L7 HTTP filter # Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ext_authz", diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 726cda5785d0..a518d60f37e1 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that injects faults into the request flow # Public docs: docs/root/configuration/http_filters/fault_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "fault_filter_lib", diff --git a/source/extensions/filters/http/grpc_http1_bridge/BUILD b/source/extensions/filters/http/grpc_http1_bridge/BUILD index 486904e2f8a5..1a978232aa06 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_bridge/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that bridges HTTP/1.1 unary "gRPC" to compliant HTTP/2 gRPC. # Public docs: docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "http1_bridge_filter_lib", @@ -34,6 +34,13 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "unknown", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//source/exe:__pkg__", + "//test/integration:__subpackages__", + "//test/server:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD index 1a80fefdb45c..852c3c368a5f 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter_lib", diff --git a/source/extensions/filters/http/grpc_json_transcoder/BUILD b/source/extensions/filters/http/grpc_json_transcoder/BUILD index 3b7ab0a09d22..88429fc0bfc7 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/source/extensions/filters/http/grpc_json_transcoder/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements binary gRPC to JSON transcoding # Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "json_transcoder_filter_lib", diff --git a/source/extensions/filters/http/grpc_stats/BUILD b/source/extensions/filters/http/grpc_stats/BUILD index 62bc49e8be01..ac38af975136 100644 --- a/source/extensions/filters/http/grpc_stats/BUILD +++ b/source/extensions/filters/http/grpc_stats/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements gRPC telemetry -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/http/grpc_web/BUILD b/source/extensions/filters/http/grpc_web/BUILD index 1f6910590907..d18eb56ed01d 100644 --- a/source/extensions/filters/http/grpc_web/BUILD +++ b/source/extensions/filters/http/grpc_web/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) # Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "grpc_web_filter_lib", diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index 3844addc83b6..39b1459d45be 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that performs gzip compression # Public docs: docs/root/configuration/http_filters/gzip_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "gzip_filter_lib", diff --git a/source/extensions/filters/http/header_to_metadata/BUILD b/source/extensions/filters/http/header_to_metadata/BUILD index e0232d4d8d1c..1bbe574312e6 100644 --- a/source/extensions/filters/http/header_to_metadata/BUILD +++ b/source/extensions/filters/http/header_to_metadata/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that transforms request data into dynamic metadata # Public docs: docs/root/configuration/http_filters/header_to_metadata_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "header_to_metadata_filter_lib", diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index f0841d388b48..dd4fa02f30b3 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements health check responses # Public docs: docs/root/configuration/http_filters/health_check_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "health_check_lib", @@ -38,6 +38,13 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/filter/http:__subpackages__", + "//test/integration:__subpackages__", + "//test/server:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/http:header_utility_lib", diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index cbcf98b1d516..5e27f10bb15c 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that writes an IP tagging header based on IP trie data # Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ip_tagging_filter_lib", @@ -34,6 +34,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index a2967b990132..f0249b014ea1 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "extractor_lib", diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index 657e3472a88f..2e08db0ad563 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) # Public docs: docs/root/configuration/http_filters/lua_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "lua_filter_lib", diff --git a/source/extensions/filters/http/on_demand/BUILD b/source/extensions/filters/http/on_demand/BUILD index 3f4ef02c1dba..86b029ca21d3 100644 --- a/source/extensions/filters/http/on_demand/BUILD +++ b/source/extensions/filters/http/on_demand/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # On-demand RDS update HTTP filter -envoy_package() +envoy_extension_package() envoy_cc_library( name = "on_demand_update_lib", @@ -31,6 +31,12 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) classify and clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/original_src/BUILD b/source/extensions/filters/http/original_src/BUILD index eff7f4cf9679..b88a1d8df9ff 100644 --- a/source/extensions/filters/http/original_src/BUILD +++ b/source/extensions/filters/http/original_src/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # A filter for mirroring the downstream remote address on the upstream's source. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "config_lib", diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index 4a8c7a8c35d5..9119aa35a26d 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Ratelimit L7 HTTP filter # Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", diff --git a/source/extensions/filters/http/rbac/BUILD b/source/extensions/filters/http/rbac/BUILD index 9554a910a16c..1f7802394c70 100644 --- a/source/extensions/filters/http/rbac/BUILD +++ b/source/extensions/filters/http/rbac/BUILD @@ -2,18 +2,23 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", diff --git a/source/extensions/filters/http/router/BUILD b/source/extensions/filters/http/router/BUILD index ab7487d00b6a..6402dc14c880 100644 --- a/source/extensions/filters/http/router/BUILD +++ b/source/extensions/filters/http/router/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -9,13 +9,15 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter responsible for routing to upstream connection pools # Public docs: docs/root/configuration/http_filters/router_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/registry", "//source/common/router:router_lib", diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index 8579d7a2860a..ea2bdcd1242b 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements the Squash microservice debugger # Public docs: docs/root/configuration/http_filters/squash_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "squash_filter_lib", diff --git a/source/extensions/filters/http/tap/BUILD b/source/extensions/filters/http/tap/BUILD index 62a8d2f36f5a..73d4237cd019 100644 --- a/source/extensions/filters/http/tap/BUILD +++ b/source/extensions/filters/http/tap/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP Tap filter # Public docs: docs/root/configuration/http_filters/tap_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "tap_config_interface", diff --git a/source/extensions/filters/listener/BUILD b/source/extensions/filters/listener/BUILD index 06456dbbcb5e..9a2ee9ad75cb 100644 --- a/source/extensions/filters/listener/BUILD +++ b/source/extensions/filters/listener/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index 87e808230bd1..0f3c7f50eb40 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # HTTP inspector filter for sniffing HTTP protocol and setting HTTP version to a FilterChain. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "http_inspector_lib", diff --git a/source/extensions/filters/listener/original_dst/BUILD b/source/extensions/filters/listener/original_dst/BUILD index a940d212c987..78c09f58155c 100644 --- a/source/extensions/filters/listener/original_dst/BUILD +++ b/source/extensions/filters/listener/original_dst/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # ORIGINAL_DST iptables redirection listener filter # Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "original_dst_lib", @@ -29,6 +29,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":original_dst_lib", "//include/envoy/registry", diff --git a/source/extensions/filters/listener/original_src/BUILD b/source/extensions/filters/listener/original_src/BUILD index 4bed07cc6619..4240bb61f28a 100644 --- a/source/extensions/filters/listener/original_src/BUILD +++ b/source/extensions/filters/listener/original_src/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # A filter for mirroring the downstream remote address on the upstream's source. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "config_lib", diff --git a/source/extensions/filters/listener/proxy_protocol/BUILD b/source/extensions/filters/listener/proxy_protocol/BUILD index 407d05e43468..810c99d4021f 100644 --- a/source/extensions/filters/listener/proxy_protocol/BUILD +++ b/source/extensions/filters/listener/proxy_protocol/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Proxy protocol listener filter: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_protocol_lib", @@ -18,6 +18,7 @@ envoy_cc_library( "proxy_protocol.h", "proxy_protocol_header.h", ], + visibility = ["//visibility:public"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/network:filter_interface", @@ -39,6 +40,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/listener/tls_inspector/BUILD b/source/extensions/filters/listener/tls_inspector/BUILD index c751c53156a4..35a163b26b99 100644 --- a/source/extensions/filters/listener/tls_inspector/BUILD +++ b/source/extensions/filters/listener/tls_inspector/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,13 +10,18 @@ licenses(["notice"]) # Apache 2 # TLS inspector filter for examining various TLS parameters before routing to a FilterChain. # Public docs: docs/root/configuration/listener_filters/tls_inspector.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "tls_inspector_lib", srcs = ["tls_inspector.cc"], hdrs = ["tls_inspector.h"], external_deps = ["ssl"], + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/event:timer_interface", @@ -33,6 +38,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "robust_to_untrusted_downstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/registry", "//include/envoy/server:filter_config_interface", diff --git a/source/extensions/filters/network/BUILD b/source/extensions/filters/network/BUILD index ee5bcf6bc186..790ddc806157 100644 --- a/source/extensions/filters/network/BUILD +++ b/source/extensions/filters/network/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # Well known names are public. + visibility = ["//visibility:public"], deps = [ "//source/common/config:well_known_names", "//source/common/singleton:const_singleton", diff --git a/source/extensions/filters/network/client_ssl_auth/BUILD b/source/extensions/filters/network/client_ssl_auth/BUILD index 2a120e030866..d77c4abae594 100644 --- a/source/extensions/filters/network/client_ssl_auth/BUILD +++ b/source/extensions/filters/network/client_ssl_auth/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Client SSL authorization L4 network filter # Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "client_ssl_auth", diff --git a/source/extensions/filters/network/common/BUILD b/source/extensions/filters/network/common/BUILD index 4e70e2aa414d..09249e400050 100644 --- a/source/extensions/filters/network/common/BUILD +++ b/source/extensions/filters/network/common/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", hdrs = ["factory_base.h"], + # Used by core. TODO(#9953) clean up. + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:filter_config_interface", ], @@ -19,6 +21,8 @@ envoy_cc_library( envoy_cc_library( name = "utility_lib", hdrs = ["utility.h"], + # Used by core. TODO(#9953) clean up. + visibility = ["//visibility:public"], deps = [ "//include/envoy/runtime:runtime_interface", "//source/common/common:macros", diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD index 3b4dcedbb01e..5c0393d36a62 100644 --- a/source/extensions/filters/network/common/redis/BUILD +++ b/source/extensions/filters/network/common/redis/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "codec_interface", diff --git a/source/extensions/filters/network/direct_response/BUILD b/source/extensions/filters/network/direct_response/BUILD index fe6244a5c19d..a7ed6d274a1f 100644 --- a/source/extensions/filters/network/direct_response/BUILD +++ b/source/extensions/filters/network/direct_response/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Direct response L4 network filter. # Public docs: docs/root/configuration/network_filters/direct_response_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter", diff --git a/source/extensions/filters/network/dubbo_proxy/BUILD b/source/extensions/filters/network/dubbo_proxy/BUILD index 6b2affdd7d72..bf83e91ad0fd 100644 --- a/source/extensions/filters/network/dubbo_proxy/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "buffer_helper_lib", diff --git a/source/extensions/filters/network/dubbo_proxy/filters/BUILD b/source/extensions/filters/network/dubbo_proxy/filters/BUILD index 2fc5922c92ea..d2c9fd1ff03c 100644 --- a/source/extensions/filters/network/dubbo_proxy/filters/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/filters/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter_interface", diff --git a/source/extensions/filters/network/dubbo_proxy/router/BUILD b/source/extensions/filters/network/dubbo_proxy/router/BUILD index 9dd2cf7e46c8..4227ca25fcf5 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/BUILD +++ b/source/extensions/filters/network/dubbo_proxy/router/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "router_interface", diff --git a/source/extensions/filters/network/echo/BUILD b/source/extensions/filters/network/echo/BUILD index 6d39336775b0..6b136705258c 100644 --- a/source/extensions/filters/network/echo/BUILD +++ b/source/extensions/filters/network/echo/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Echo L4 network filter. This is primarily a simplistic example. # Public docs: docs/root/configuration/network_filters/echo_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "echo", @@ -29,6 +29,11 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], security_posture = "unknown", + # TODO(#9953) move echo integration test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":echo", "//include/envoy/registry", diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index 1a7277d7ac4d..ebc6847e28f6 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # External authorization L4 network filter # Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ext_authz", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 5d03f03ecc4a..012cd2b00cce 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,13 +10,15 @@ licenses(["notice"]) # Apache 2 # drives all of the L7 HTTP filters. # Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/config:config_provider_manager_interface", "//include/envoy/filesystem:filesystem_interface", diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD index 495a94a7bad6..3c338ff751c6 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/source/extensions/filters/network/kafka/BUILD @@ -3,7 +3,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -11,7 +11,7 @@ licenses(["notice"]) # Apache 2 # Kafka network filter. # Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "kafka_broker_config_lib", diff --git a/source/extensions/filters/network/local_ratelimit/BUILD b/source/extensions/filters/network/local_ratelimit/BUILD index 052b817726d9..13389742fa56 100644 --- a/source/extensions/filters/network/local_ratelimit/BUILD +++ b/source/extensions/filters/network/local_ratelimit/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Local ratelimit L4 network filter # Public docs: docs/root/configuration/network_filters/local_rate_limit_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "local_ratelimit_lib", diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 04c14c2c610c..2e281e1f6789 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Mongo proxy L4 network filter (observability and fault injection). # Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "bson_interface", diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/source/extensions/filters/network/mysql_proxy/BUILD index 152584385054..fee8571ea619 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/source/extensions/filters/network/mysql_proxy/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # MySQL proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_lib", diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/source/extensions/filters/network/postgres_proxy/BUILD index b2d7d2dcef11..aa397da9b55f 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/source/extensions/filters/network/postgres_proxy/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -12,7 +12,7 @@ licenses(["notice"]) # Apache 2 # PostgresSQL proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter", diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 68f54558afa4..f653adf348fb 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,12 +10,18 @@ licenses(["notice"]) # Apache 2 # Ratelimit L4 network filter # Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", srcs = ["ratelimit.cc"], hdrs = ["ratelimit.h"], + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//source/extensions:__subpackages__", + "//test/common/network:__pkg__", + "//test/extensions:__subpackages__", + ], deps = [ "//include/envoy/network:connection_interface", "//include/envoy/network:filter_interface", diff --git a/source/extensions/filters/network/rbac/BUILD b/source/extensions/filters/network/rbac/BUILD index 367104e913d8..75e98406cf26 100644 --- a/source/extensions/filters/network/rbac/BUILD +++ b/source/extensions/filters/network/rbac/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 4d452f0cad3c..c0b742efa02e 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -11,7 +11,7 @@ licenses(["notice"]) # Apache 2 # clusters. # Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "command_splitter_interface", @@ -119,6 +119,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ "//include/envoy/upstream:upstream_interface", "//source/extensions/common/redis:cluster_refresh_manager_lib", diff --git a/source/extensions/filters/network/rocketmq_proxy/BUILD b/source/extensions/filters/network/rocketmq_proxy/BUILD index 7ce5e971d74a..f837b9bf83f8 100644 --- a/source/extensions/filters/network/rocketmq_proxy/BUILD +++ b/source/extensions/filters/network/rocketmq_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", diff --git a/source/extensions/filters/network/rocketmq_proxy/router/BUILD b/source/extensions/filters/network/rocketmq_proxy/router/BUILD index 03f3b70a34be..8f303861daae 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/BUILD +++ b/source/extensions/filters/network/rocketmq_proxy/router/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "router_interface", diff --git a/source/extensions/filters/network/sni_cluster/BUILD b/source/extensions/filters/network/sni_cluster/BUILD index 6524b5defe1e..e6670b8e4260 100644 --- a/source/extensions/filters/network/sni_cluster/BUILD +++ b/source/extensions/filters/network/sni_cluster/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "sni_cluster", diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD index d7f95b44d6bd..372fce9155e2 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_filter_lib", diff --git a/source/extensions/filters/network/tcp_proxy/BUILD b/source/extensions/filters/network/tcp_proxy/BUILD index 312b3233b10d..d6d7495e9122 100644 --- a/source/extensions/filters/network/tcp_proxy/BUILD +++ b/source/extensions/filters/network/tcp_proxy/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -9,13 +9,15 @@ licenses(["notice"]) # Apache 2 # TCP proxy L4 network filter. # Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/registry", "//source/common/tcp_proxy", diff --git a/source/extensions/filters/network/thrift_proxy/BUILD b/source/extensions/filters/network/thrift_proxy/BUILD index baa733731637..78f484da3f9e 100644 --- a/source/extensions/filters/network/thrift_proxy/BUILD +++ b/source/extensions/filters/network/thrift_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "app_exception_lib", diff --git a/source/extensions/filters/network/thrift_proxy/filters/BUILD b/source/extensions/filters/network/thrift_proxy/filters/BUILD index 808e42dd8e98..a1b91d286809 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "filter_config_interface", diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD index 5c136b0a0353..7252afc340a7 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "ratelimit_lib", diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index 74a706741538..00e32bbf06a2 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/filters/network/zookeeper_proxy/BUILD b/source/extensions/filters/network/zookeeper_proxy/BUILD index 301498c6465b..8dc6e0791392 100644 --- a/source/extensions/filters/network/zookeeper_proxy/BUILD +++ b/source/extensions/filters/network/zookeeper_proxy/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # ZooKeeper proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/zookeeper_proxy_filter.rst -envoy_package() +envoy_extension_package() envoy_cc_library( name = "proxy_lib", diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index 1d4f8e0ab1fb..4511fb6380da 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "dns_filter_lib", diff --git a/source/extensions/filters/udp/udp_proxy/BUILD b/source/extensions/filters/udp/udp_proxy/BUILD index 7b9efa4498a2..834c8ed66a0a 100644 --- a/source/extensions/filters/udp/udp_proxy/BUILD +++ b/source/extensions/filters/udp/udp_proxy/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "udp_proxy_filter_lib", diff --git a/source/extensions/grpc_credentials/BUILD b/source/extensions/grpc_credentials/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/grpc_credentials/BUILD +++ b/source/extensions/grpc_credentials/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/grpc_credentials/aws_iam/BUILD b/source/extensions/grpc_credentials/aws_iam/BUILD index 4c3e179096b1..ab920487e264 100644 --- a/source/extensions/grpc_credentials/aws_iam/BUILD +++ b/source/extensions/grpc_credentials/aws_iam/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # AWS IAM gRPC Credentials -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/grpc_credentials/example/BUILD b/source/extensions/grpc_credentials/example/BUILD index 30025a7c046e..8c43f6c27532 100644 --- a/source/extensions/grpc_credentials/example/BUILD +++ b/source/extensions/grpc_credentials/example/BUILD @@ -1,20 +1,26 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Example gRPC Credentials -envoy_package() +envoy_extension_package() envoy_cc_library( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], external_deps = ["grpc"], + # Legacy test use. + visibility = [ + "//source/extensions:__subpackages__", + "//test/common/grpc:__subpackages__", + "//test/extensions:__subpackages__", + ], deps = [ "//include/envoy/grpc:google_grpc_creds_interface", "//include/envoy/registry", diff --git a/source/extensions/grpc_credentials/file_based_metadata/BUILD b/source/extensions/grpc_credentials/file_based_metadata/BUILD index b41ac277c73f..d6c8b8d5e5fb 100644 --- a/source/extensions/grpc_credentials/file_based_metadata/BUILD +++ b/source/extensions/grpc_credentials/file_based_metadata/BUILD @@ -1,14 +1,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # File Based Metadata gRPC Credentials -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/health_checkers/BUILD b/source/extensions/health_checkers/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/health_checkers/BUILD +++ b/source/extensions/health_checkers/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 3dd32163468a..cd852d4f78ec 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Redis custom health checker. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "redis", diff --git a/source/extensions/internal_redirect/BUILD b/source/extensions/internal_redirect/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/internal_redirect/BUILD +++ b/source/extensions/internal_redirect/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD index c2ee85a134ac..6fe252ddf6bb 100644 --- a/source/extensions/internal_redirect/allow_listed_routes/BUILD +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "allow_listed_routes_lib", @@ -25,6 +25,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":allow_listed_routes_lib", "//include/envoy/registry", diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD index 91f76aebc135..58a0878f0957 100644 --- a/source/extensions/internal_redirect/previous_routes/BUILD +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "previous_routes_lib", @@ -25,6 +25,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":previous_routes_lib", "//include/envoy/registry", diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD index 50433bf8fb42..d957fa57673f 100644 --- a/source/extensions/internal_redirect/safe_cross_scheme/BUILD +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "safe_cross_scheme_lib", @@ -24,6 +24,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream_and_upstream", + # TODO(#9953) clean up by moving the redirect test to extensions. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":safe_cross_scheme_lib", "//include/envoy/registry", diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index eb2e1922e182..1099eb26deb8 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "envoy_quic_alarm_lib", @@ -306,6 +306,12 @@ envoy_cc_library( srcs = ["active_quic_listener_config.cc"], hdrs = ["active_quic_listener_config.h"], tags = ["nofips"], + # TODO(#9953) this should be cleaned up + visibility = [ + "//source/extensions:__subpackages__", + "//test/extensions:__subpackages__", + "//test/server:__subpackages__", + ], deps = [ ":active_quic_listener_lib", "//include/envoy/registry", diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index 9c9857842e75..e7f70f86cb26 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() # Build targets in this package are part of the QUICHE platform implementation. # These implementations are the infrastructure building block for QUIC. They are diff --git a/source/extensions/resource_monitors/BUILD b/source/extensions/resource_monitors/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/resource_monitors/BUILD +++ b/source/extensions/resource_monitors/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/resource_monitors/common/BUILD b/source/extensions/resource_monitors/common/BUILD index 7e759d696abd..a17f10b5c378 100644 --- a/source/extensions/resource_monitors/common/BUILD +++ b/source/extensions/resource_monitors/common/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", hdrs = ["factory_base.h"], + # This resource monitoring library is considered core code. + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:resource_monitor_config_interface", "//source/common/protobuf:utility_lib", diff --git a/source/extensions/resource_monitors/fixed_heap/BUILD b/source/extensions/resource_monitors/fixed_heap/BUILD index 4feb2a6e7cd2..6c2022537d3d 100644 --- a/source/extensions/resource_monitors/fixed_heap/BUILD +++ b/source/extensions/resource_monitors/fixed_heap/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "fixed_heap_monitor", diff --git a/source/extensions/resource_monitors/injected_resource/BUILD b/source/extensions/resource_monitors/injected_resource/BUILD index 4b3702afffdf..6f1c24318cee 100644 --- a/source/extensions/resource_monitors/injected_resource/BUILD +++ b/source/extensions/resource_monitors/injected_resource/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "injected_resource_monitor", @@ -28,6 +28,11 @@ envoy_cc_extension( hdrs = ["config.h"], security_posture = "data_plane_agnostic", status = "alpha", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/integration:__subpackages__", + ], deps = [ ":injected_resource_monitor", "//include/envoy/registry", diff --git a/source/extensions/retry/host/omit_canary_hosts/BUILD b/source/extensions/retry/host/omit_canary_hosts/BUILD index e8fc9840f156..9427fa9fc507 100644 --- a/source/extensions/retry/host/omit_canary_hosts/BUILD +++ b/source/extensions/retry/host/omit_canary_hosts/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "omit_canary_hosts_predicate_lib", diff --git a/source/extensions/retry/host/omit_host_metadata/BUILD b/source/extensions/retry/host/omit_host_metadata/BUILD index 09b01e08848c..5e1aaa38c5af 100644 --- a/source/extensions/retry/host/omit_host_metadata/BUILD +++ b/source/extensions/retry/host/omit_host_metadata/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "omit_host_metadata_predicate_lib", diff --git a/source/extensions/retry/host/previous_hosts/BUILD b/source/extensions/retry/host/previous_hosts/BUILD index ea9aa0d78f4e..78e78b1a330e 100644 --- a/source/extensions/retry/host/previous_hosts/BUILD +++ b/source/extensions/retry/host/previous_hosts/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "previous_hosts_predicate_lib", diff --git a/source/extensions/retry/priority/BUILD b/source/extensions/retry/priority/BUILD index 06456dbbcb5e..22d835b40706 100644 --- a/source/extensions/retry/priority/BUILD +++ b/source/extensions/retry/priority/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", diff --git a/source/extensions/retry/priority/previous_priorities/BUILD b/source/extensions/retry/priority/previous_priorities/BUILD index 65061e5740da..66a592d9c772 100644 --- a/source/extensions/retry/priority/previous_priorities/BUILD +++ b/source/extensions/retry/priority/previous_priorities/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "previous_priorities_lib", diff --git a/source/extensions/stat_sinks/BUILD b/source/extensions/stat_sinks/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/stat_sinks/BUILD +++ b/source/extensions/stat_sinks/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/stat_sinks/common/statsd/BUILD b/source/extensions/stat_sinks/common/statsd/BUILD index 378a7146234d..5e3d6a771d21 100644 --- a/source/extensions/stat_sinks/common/statsd/BUILD +++ b/source/extensions/stat_sinks/common/statsd/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "statsd_lib", diff --git a/source/extensions/stat_sinks/dog_statsd/BUILD b/source/extensions/stat_sinks/dog_statsd/BUILD index 2a6e1d7d9c44..662a3c18c24f 100644 --- a/source/extensions/stat_sinks/dog_statsd/BUILD +++ b/source/extensions/stat_sinks/dog_statsd/BUILD @@ -1,7 +1,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -9,7 +9,7 @@ licenses(["notice"]) # Apache 2 # Stats sink for the DataDog (https://www.datadoghq.com/) variant of the statsd protocol # (https://docs.datadoghq.com/developers/dogstatsd/). -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index 463576dd757c..7b28f8218c1b 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Stats sink for the basic version of the hystrix protocol (https://github.com/b/hystrix_spec). -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index ecd35309b7fb..df78d152ba53 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto -envoy_package() +envoy_extension_package() envoy_cc_library( name = "metrics_service_grpc_lib", diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index a9c862e12e8b..0a8ed4648bca 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -1,20 +1,25 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Stats sink for the basic version of the statsd protocol (https://github.com/b/statsd_spec). -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "data_plane_agnostic", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/server:__subpackages__", + ], deps = [ "//include/envoy/registry", "//source/common/network:address_lib", diff --git a/source/extensions/tracers/common/BUILD b/source/extensions/tracers/common/BUILD index f31e56bc9cd6..450aef98b536 100644 --- a/source/extensions/tracers/common/BUILD +++ b/source/extensions/tracers/common/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "factory_base_lib", diff --git a/source/extensions/tracers/common/ot/BUILD b/source/extensions/tracers/common/ot/BUILD index 16a0a3642905..beced5b3f219 100644 --- a/source/extensions/tracers/common/ot/BUILD +++ b/source/extensions/tracers/common/ot/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "opentracing_driver_lib", diff --git a/source/extensions/tracers/datadog/BUILD b/source/extensions/tracers/datadog/BUILD index 325f4345a717..7ad1d164203e 100644 --- a/source/extensions/tracers/datadog/BUILD +++ b/source/extensions/tracers/datadog/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Trace driver for Datadog (https://datadoghq.com/) -envoy_package() +envoy_extension_package() envoy_cc_library( name = "datadog_tracer_lib", diff --git a/source/extensions/tracers/dynamic_ot/BUILD b/source/extensions/tracers/dynamic_ot/BUILD index eb9cc5ee24c6..95b903be987d 100644 --- a/source/extensions/tracers/dynamic_ot/BUILD +++ b/source/extensions/tracers/dynamic_ot/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Trace driver for dynamically loadable C++ OpenTracing drivers (http://opentracing.io/). -envoy_package() +envoy_extension_package() envoy_cc_library( name = "dynamic_opentracing_driver_lib", diff --git a/source/extensions/tracers/lightstep/BUILD b/source/extensions/tracers/lightstep/BUILD index 1fb5d0e30171..6c287b4a75fe 100644 --- a/source/extensions/tracers/lightstep/BUILD +++ b/source/extensions/tracers/lightstep/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Trace driver for LightStep (https://lightstep.com/) -envoy_package() +envoy_extension_package() envoy_cc_library( name = "lightstep_tracer_lib", diff --git a/source/extensions/tracers/opencensus/BUILD b/source/extensions/tracers/opencensus/BUILD index 3494746500a1..2513be7249f6 100644 --- a/source/extensions/tracers/opencensus/BUILD +++ b/source/extensions/tracers/opencensus/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_select_google_grpc", ) @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Trace driver for OpenCensus: https://opencensus.io/ -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index a186e661eaad..ef486aaac4eb 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", "envoy_proto_library", ) @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # Trace driver for AWS X-Ray. -envoy_package() +envoy_extension_package() envoy_proto_library( name = "daemon", diff --git a/source/extensions/tracers/zipkin/BUILD b/source/extensions/tracers/zipkin/BUILD index ee0328353fa0..fc2d417c4d1c 100644 --- a/source/extensions/tracers/zipkin/BUILD +++ b/source/extensions/tracers/zipkin/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Trace driver for Zipkin (https://zipkin.io/). -envoy_package() +envoy_extension_package() envoy_cc_library( name = "zipkin_lib", @@ -68,6 +68,11 @@ envoy_cc_extension( srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", + # Legacy test use. TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/server:__subpackages__", + ], deps = [ ":zipkin_lib", "//source/extensions/tracers/common:factory_base_lib", diff --git a/source/extensions/transport_sockets/BUILD b/source/extensions/transport_sockets/BUILD index 06456dbbcb5e..40a5e79b39d3 100644 --- a/source/extensions/transport_sockets/BUILD +++ b/source/extensions/transport_sockets/BUILD @@ -1,16 +1,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "well_known_names", hdrs = ["well_known_names.h"], + # well known names files are public as long as they exist. + visibility = ["//visibility:public"], deps = [ "//source/common/singleton:const_singleton", ], diff --git a/source/extensions/transport_sockets/alts/BUILD b/source/extensions/transport_sockets/alts/BUILD index a667fac37e14..631c74a1c8d3 100644 --- a/source/extensions/transport_sockets/alts/BUILD +++ b/source/extensions/transport_sockets/alts/BUILD @@ -2,7 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # ALTS transport socket. This provides Google's ALTS protocol support in GCP to Envoy. # https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ -envoy_package() +envoy_extension_package() envoy_cc_library( name = "grpc_tsi_wrapper", diff --git a/source/extensions/transport_sockets/common/BUILD b/source/extensions/transport_sockets/common/BUILD index 8aacce0fdd15..eee229da12fb 100644 --- a/source/extensions/transport_sockets/common/BUILD +++ b/source/extensions/transport_sockets/common/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "passthrough_lib", diff --git a/source/extensions/transport_sockets/proxy_protocol/BUILD b/source/extensions/transport_sockets/proxy_protocol/BUILD index d44382487e85..251721adfbb4 100644 --- a/source/extensions/transport_sockets/proxy_protocol/BUILD +++ b/source/extensions/transport_sockets/proxy_protocol/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "upstream_proxy_protocol", diff --git a/source/extensions/transport_sockets/raw_buffer/BUILD b/source/extensions/transport_sockets/raw_buffer/BUILD index 4d5bdacbe88c..3d4b41c96cde 100644 --- a/source/extensions/transport_sockets/raw_buffer/BUILD +++ b/source/extensions/transport_sockets/raw_buffer/BUILD @@ -1,20 +1,22 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Built-in plaintext connection transport socket. -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", + # This is core Envoy config. + visibility = ["//visibility:public"], deps = [ "//include/envoy/network:transport_socket_interface", "//include/envoy/registry", diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index a241afa2df24..4adb0db7cb38 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -2,14 +2,14 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # tap wrapper around a transport socket. -envoy_package() +envoy_extension_package() envoy_cc_library( name = "tap_config_interface", @@ -53,6 +53,12 @@ envoy_cc_extension( hdrs = ["config.h"], security_posture = "requires_trusted_downstream_and_upstream", status = "alpha", + # TODO(#9953) clean up. + visibility = [ + "//:extension_config", + "//test/common/access_log:__subpackages__", + "//test/integration:__subpackages__", + ], deps = [ ":tap_config_impl", ":tap_lib", diff --git a/source/extensions/transport_sockets/tls/BUILD b/source/extensions/transport_sockets/tls/BUILD index 6b14b5b0a870..1cd091050d15 100644 --- a/source/extensions/transport_sockets/tls/BUILD +++ b/source/extensions/transport_sockets/tls/BUILD @@ -2,20 +2,22 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 # Built-in TLS connection transport socket. -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream_and_upstream", + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":ssl_socket_lib", "//include/envoy/network:transport_socket_interface", @@ -37,6 +39,8 @@ envoy_cc_library( "abseil_synchronization", "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":context_config_lib", ":context_lib", @@ -62,6 +66,8 @@ envoy_cc_library( external_deps = [ "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ "//include/envoy/secret:secret_callbacks_interface", "//include/envoy/secret:secret_provider_interface", @@ -95,6 +101,8 @@ envoy_cc_library( "abseil_synchronization", "ssl", ], + # TLS is core functionality. + visibility = ["//visibility:public"], deps = [ ":utility_lib", "//include/envoy/ssl:context_config_interface", diff --git a/source/extensions/transport_sockets/tls/private_key/BUILD b/source/extensions/transport_sockets/tls/private_key/BUILD index 8b0563f5e06d..f6163ca64012 100644 --- a/source/extensions/transport_sockets/tls/private_key/BUILD +++ b/source/extensions/transport_sockets/tls/private_key/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_library( name = "private_key_manager_lib", diff --git a/source/extensions/upstreams/http/generic/BUILD b/source/extensions/upstreams/http/generic/BUILD index 712b0d9632ea..563b4bf5a9e2 100644 --- a/source/extensions/upstreams/http/generic/BUILD +++ b/source/extensions/upstreams/http/generic/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/upstreams/http/http/BUILD b/source/extensions/upstreams/http/http/BUILD index f97f894d3294..4c0b5be394b9 100644 --- a/source/extensions/upstreams/http/http/BUILD +++ b/source/extensions/upstreams/http/http/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/source/extensions/upstreams/http/tcp/BUILD b/source/extensions/upstreams/http/tcp/BUILD index 82b0422fad70..6daa95ce15d7 100644 --- a/source/extensions/upstreams/http/tcp/BUILD +++ b/source/extensions/upstreams/http/tcp/BUILD @@ -2,12 +2,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_extension", "envoy_cc_library", - "envoy_package", + "envoy_extension_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_extension_package() envoy_cc_extension( name = "config", diff --git a/tools/code_format/envoy_build_fixer.py b/tools/code_format/envoy_build_fixer.py index 51f7d0fb866c..9af90f0f7e21 100755 --- a/tools/code_format/envoy_build_fixer.py +++ b/tools/code_format/envoy_build_fixer.py @@ -2,7 +2,7 @@ # Enforces: # - License headers on Envoy BUILD files -# - envoy_package() top-level invocation for standard Envoy package setup. +# - envoy_package() or envoy_extension_package() top-level invocation for standard Envoy package setup. # - Infers API dependencies from source files. # - Misc. cleanups: avoids redundant blank lines, removes unused loads. # - Maybe more later? @@ -31,8 +31,9 @@ # Match an Envoy rule, e.g. envoy_cc_library( in a BUILD file. ENVOY_RULE_REGEX = re.compile(r'envoy[_\w]+\(') -# Match a load() statement for the envoy_package macro. +# Match a load() statement for the envoy_package macros. PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_package".*?\)\n)', re.DOTALL) +EXTENSION_PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_extension_package".*?\)\n)', re.DOTALL) # Match Buildozer 'print' output. Example of Buildozer print output: # cc_library json_transcoder_filter_lib [json_transcoder_filter.cc] (missing) (missing) @@ -70,20 +71,29 @@ def RunBuildozer(cmds, contents): # Add an Apache 2 license and envoy_package() import and rule as needed. -def FixPackageAndLicense(contents): +def FixPackageAndLicense(path, contents): + regex_to_use = PACKAGE_LOAD_BLOCK_REGEX + package_string = 'envoy_package' + + if 'source/extensions' in path: + regex_to_use = EXTENSION_PACKAGE_LOAD_BLOCK_REGEX + package_string = 'envoy_extension_package' + # Ensure we have an envoy_package import load if this is a real Envoy package. We also allow # the prefix to be overridden if envoy is included in a larger workspace. if re.search(ENVOY_RULE_REGEX, contents): + new_load = 'new_load {}//bazel:envoy_build_system.bzl %s' % package_string contents = RunBuildozer([ - ('new_load {}//bazel:envoy_build_system.bzl envoy_package'.format( - os.getenv("ENVOY_BAZEL_PREFIX", "")), '__pkg__'), + (new_load.format(os.getenv("ENVOY_BAZEL_PREFIX", "")), '__pkg__'), ], contents) # Envoy package is inserted after the load block containing the # envoy_package import. - if 'envoy_package()' not in contents: - contents = re.sub(PACKAGE_LOAD_BLOCK_REGEX, r'\1\nenvoy_package()\n\n', contents) - if 'envoy_package()' not in contents: - raise EnvoyBuildFixerError('Unable to insert envoy_package()') + package_and_parens = package_string + '()' + if package_and_parens not in contents: + contents = re.sub(regex_to_use, r'\1\n%s\n\n' % package_and_parens, contents) + if package_and_parens not in contents: + raise EnvoyBuildFixerError('Unable to insert %s' % package_and_parens) + # Delete old licenses. if re.search(OLD_LICENSES_REGEX, contents): contents = re.sub(OLD_LICENSES_REGEX, '', contents) @@ -173,7 +183,7 @@ def FixBuild(path): with open(path, 'r') as f: contents = f.read() xforms = [ - FixPackageAndLicense, + functools.partial(FixPackageAndLicense, path), functools.partial(FixApiDeps, path), BuildifierLint, ] From 4f85591457c73a56efb171ec021d1f80b09205fe Mon Sep 17 00:00:00 2001 From: ujihisa Date: Fri, 31 Jul 2020 22:33:58 -0700 Subject: [PATCH 824/909] docs: Fix broken YAML in cluster circuit breaker configuration example (#12357) Signed-off-by: ujihisa --- .../upstream/cluster_manager/cluster_circuit_breakers.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst index 9f765173658b..6c0ca34773c0 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_circuit_breakers.rst @@ -11,7 +11,7 @@ The following is an example circuit breaker configuration: .. code-block:: yaml circuit_breakers: - thresholds: + thresholds: - priority: "DEFAULT" max_requests: 75 max_pending_requests: 35 From e44dd09d45e1737b6888076aefbcacdab3245d79 Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 3 Aug 2020 09:52:24 -0400 Subject: [PATCH 825/909] [test] fix flag in codec test new/legacy (#12412) Commit Message: fix runtime flag override in tests. There was a mistake that codec_impl_test for H/2 tested legacy and codec_impl_legacy_test tested new codec. Swap back so codec tests new, legacy tests legacy. Risk Level: Low Signed-off-by: Asra Ali --- test/common/http/http2/BUILD | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 0a6ff0374d08..467c2785f2aa 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -40,6 +40,10 @@ CODEC_TEST_DEPS = [ envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], + # The default codec is the legacy codec. Override runtime flag for testing new codec. + args = [ + "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", + ], shard_count = 5, tags = ["fails_on_windows"], deps = CODEC_TEST_DEPS, @@ -48,8 +52,9 @@ envoy_cc_test( envoy_cc_test( name = "codec_impl_legacy_test", srcs = ["codec_impl_test.cc"], + # The default codec is the legacy codec. Verify the runtime flag for the new codec is disabled. args = [ - "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", + "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], shard_count = 5, tags = ["fails_on_windows"], From eec0e7ec3fb532c228befd3bbb5d67301cbd2c11 Mon Sep 17 00:00:00 2001 From: phlax Date: Mon, 3 Aug 2020 17:11:13 +0100 Subject: [PATCH 826/909] examples: copy configs into example images (#12256) Signed-off-by: Ryan Northey --- examples/BUILD | 7 +++++ examples/cors/backend/Dockerfile-frontenvoy | 2 ++ examples/cors/backend/docker-compose.yaml | 2 -- examples/cors/frontend/Dockerfile-frontenvoy | 2 ++ examples/cors/frontend/docker-compose.yaml | 2 -- .../{ => crosssite}/Dockerfile-frontenvoy | 2 ++ examples/csrf/crosssite/docker-compose.yml | 4 +-- examples/csrf/crosssite/front-envoy.yaml | 2 +- examples/csrf/samesite/Dockerfile-frontenvoy | 7 +++++ examples/csrf/samesite/docker-compose.yml | 4 +-- examples/csrf/samesite/front-envoy.yaml | 27 ++++++++++++------- examples/ext_authz/Dockerfile-frontenvoy | 9 +++++++ examples/ext_authz/docker-compose.yaml | 8 +++--- examples/ext_authz/run_envoy.sh | 3 +++ examples/fault-injection/Dockerfile-envoy | 2 ++ examples/fault-injection/docker-compose.yaml | 1 - examples/fault-injection/envoy.yaml | 9 ++++--- examples/front-proxy/Dockerfile-frontenvoy | 2 ++ examples/front-proxy/docker-compose.yaml | 2 -- examples/grpc-bridge/Dockerfile-client | 5 ++++ examples/grpc-bridge/Dockerfile-server | 5 ++++ examples/grpc-bridge/docker-compose.yaml | 14 +++++----- .../Dockerfile-frontenvoy | 17 ++++++++++++ .../jaeger-native-tracing/docker-compose.yaml | 17 +++--------- examples/jaeger-native-tracing/start-front.sh | 3 --- .../jaeger-native-tracing/start-service.sh | 3 --- examples/jaeger-tracing/Dockerfile-frontenvoy | 7 +++++ examples/jaeger-tracing/docker-compose.yaml | 6 ++--- examples/lua/Dockerfile-proxy | 2 ++ examples/lua/docker-compose.yaml | 2 -- examples/mysql/Dockerfile-proxy | 2 ++ examples/mysql/docker-compose.yaml | 2 -- examples/zipkin-tracing/Dockerfile-frontenvoy | 7 +++++ examples/zipkin-tracing/docker-compose.yaml | 6 ++--- test/config_test/example_configs_test.cc | 4 +-- 35 files changed, 128 insertions(+), 71 deletions(-) rename examples/csrf/{ => crosssite}/Dockerfile-frontenvoy (67%) create mode 100644 examples/csrf/samesite/Dockerfile-frontenvoy create mode 100644 examples/ext_authz/Dockerfile-frontenvoy create mode 100755 examples/ext_authz/run_envoy.sh create mode 100644 examples/grpc-bridge/Dockerfile-client create mode 100644 examples/grpc-bridge/Dockerfile-server create mode 100644 examples/jaeger-native-tracing/Dockerfile-frontenvoy delete mode 100755 examples/jaeger-native-tracing/start-front.sh delete mode 100755 examples/jaeger-native-tracing/start-service.sh create mode 100644 examples/jaeger-tracing/Dockerfile-frontenvoy create mode 100644 examples/zipkin-tracing/Dockerfile-frontenvoy diff --git a/examples/BUILD b/examples/BUILD index 2ad8bbe29b5a..72c67907b879 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -14,9 +14,14 @@ filegroup( "cors/backend/service-envoy.yaml", "cors/frontend/front-envoy.yaml", "cors/frontend/service-envoy.yaml", + "csrf/crosssite/front-envoy.yaml", + "csrf/samesite/front-envoy.yaml", + "csrf/service-envoy.yaml", "ext_authz/config/grpc-service/v2.yaml", "ext_authz/config/grpc-service/v3.yaml", "ext_authz/config/http-service.yaml", + "ext_authz/config/opa-service/v2.yaml", + "fault-injection/envoy.yaml", "front-proxy/front-envoy.yaml", "front-proxy/service-envoy.yaml", "grpc-bridge/client/envoy-proxy.yaml", @@ -27,6 +32,8 @@ filegroup( "load-reporting-service/service-envoy-w-lrs.yaml", "lua/envoy.yaml", "lua/lib/mylibrary.lua", + "mysql/envoy.yaml", + "redis/envoy.yaml", "zipkin-tracing/front-envoy-zipkin.yaml", "zipkin-tracing/service1-envoy-zipkin.yaml", "zipkin-tracing/service2-envoy-zipkin.yaml", diff --git a/examples/cors/backend/Dockerfile-frontenvoy b/examples/cors/backend/Dockerfile-frontenvoy index 83b5ba806c6a..0b2e25a0de1b 100644 --- a/examples/cors/backend/Dockerfile-frontenvoy +++ b/examples/cors/backend/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/cors/backend/docker-compose.yaml b/examples/cors/backend/docker-compose.yaml index 35427012b465..af233b442c41 100644 --- a/examples/cors/backend/docker-compose.yaml +++ b/examples/cors/backend/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: diff --git a/examples/cors/frontend/Dockerfile-frontenvoy b/examples/cors/frontend/Dockerfile-frontenvoy index 83b5ba806c6a..0b2e25a0de1b 100644 --- a/examples/cors/frontend/Dockerfile-frontenvoy +++ b/examples/cors/frontend/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/cors/frontend/docker-compose.yaml b/examples/cors/frontend/docker-compose.yaml index 5b230317cb7d..7872d92ae83d 100644 --- a/examples/cors/frontend/docker-compose.yaml +++ b/examples/cors/frontend/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: diff --git a/examples/csrf/Dockerfile-frontenvoy b/examples/csrf/crosssite/Dockerfile-frontenvoy similarity index 67% rename from examples/csrf/Dockerfile-frontenvoy rename to examples/csrf/crosssite/Dockerfile-frontenvoy index 83b5ba806c6a..0b2e25a0de1b 100644 --- a/examples/csrf/Dockerfile-frontenvoy +++ b/examples/csrf/crosssite/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/csrf/crosssite/docker-compose.yml b/examples/csrf/crosssite/docker-compose.yml index 31e2df957979..4a2f3fdbf43e 100644 --- a/examples/csrf/crosssite/docker-compose.yml +++ b/examples/csrf/crosssite/docker-compose.yml @@ -3,10 +3,8 @@ services: front-envoy: build: - context: .. + context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: diff --git a/examples/csrf/crosssite/front-envoy.yaml b/examples/csrf/crosssite/front-envoy.yaml index ea4b7a5f3316..879a0fa66576 100644 --- a/examples/csrf/crosssite/front-envoy.yaml +++ b/examples/csrf/crosssite/front-envoy.yaml @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: "/dev/stdout" route_config: name: local_route virtual_hosts: diff --git a/examples/csrf/samesite/Dockerfile-frontenvoy b/examples/csrf/samesite/Dockerfile-frontenvoy new file mode 100644 index 000000000000..0b2e25a0de1b --- /dev/null +++ b/examples/csrf/samesite/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/csrf/samesite/docker-compose.yml b/examples/csrf/samesite/docker-compose.yml index 45ef76f05b7f..2fcac143f6f6 100644 --- a/examples/csrf/samesite/docker-compose.yml +++ b/examples/csrf/samesite/docker-compose.yml @@ -3,10 +3,8 @@ services: front-envoy: build: - context: .. + context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: diff --git a/examples/csrf/samesite/front-envoy.yaml b/examples/csrf/samesite/front-envoy.yaml index e47aff2ec0f8..cc18e2080a24 100644 --- a/examples/csrf/samesite/front-envoy.yaml +++ b/examples/csrf/samesite/front-envoy.yaml @@ -15,7 +15,7 @@ static_resources: - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog - path: "/var/log/access.log" + path: "/dev/stdout" route_config: name: local_route virtual_hosts: @@ -23,14 +23,17 @@ static_resources: domains: - "*" cors: - allow_origin: - - "*" + allow_origin_string_match: + - safe_regex: + google_re2: {} + regex: \* filter_enabled: default_value: numerator: 100 denominator: HUNDRED - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 100 @@ -46,8 +49,9 @@ static_resources: prefix: "/csrf/disabled" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 @@ -56,8 +60,9 @@ static_resources: prefix: "/csrf/shadow" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 @@ -70,14 +75,17 @@ static_resources: prefix: "/csrf/additional_origin" route: cluster: generic_service - per_filter_config: + typed_per_filter_config: envoy.filters.http.csrf: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 100 denominator: HUNDRED additional_origins: - - regex: .* + - safe_regex: + google_re2: {} + regex: .* - match: prefix: "/" route: @@ -86,7 +94,8 @@ static_resources: - name: envoy.filters.http.cors typed_config: {} - name: envoy.filters.http.csrf - config: + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.csrf.v3.CsrfPolicy filter_enabled: default_value: numerator: 0 diff --git a/examples/ext_authz/Dockerfile-frontenvoy b/examples/ext_authz/Dockerfile-frontenvoy new file mode 100644 index 000000000000..f329c86ce655 --- /dev/null +++ b/examples/ext_authz/Dockerfile-frontenvoy @@ -0,0 +1,9 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./config /etc/envoy-config +COPY ./run_envoy.sh /run_envoy.sh +RUN chmod go+r -R /etc/envoy-config \ + && chmod go+rx /run_envoy.sh /etc/envoy-config /etc/envoy-config/* +CMD /run_envoy.sh diff --git a/examples/ext_authz/docker-compose.yaml b/examples/ext_authz/docker-compose.yaml index e7fb59a7f2c3..148ecc489f8e 100644 --- a/examples/ext_authz/docker-compose.yaml +++ b/examples/ext_authz/docker-compose.yaml @@ -3,10 +3,10 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./${FRONT_ENVOY_YAML}:/etc/front-envoy.yaml + context: . + dockerfile: Dockerfile-frontenvoy + environment: + - FRONT_ENVOY_YAML networks: - envoymesh expose: diff --git a/examples/ext_authz/run_envoy.sh b/examples/ext_authz/run_envoy.sh new file mode 100755 index 000000000000..c9bb7ca58b4d --- /dev/null +++ b/examples/ext_authz/run_envoy.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +/usr/local/bin/envoy -c "/etc/envoy-${FRONT_ENVOY_YAML}" --service-cluster front-proxy diff --git a/examples/fault-injection/Dockerfile-envoy b/examples/fault-injection/Dockerfile-envoy index f4c09bae67c5..13dec2521a99 100644 --- a/examples/fault-injection/Dockerfile-envoy +++ b/examples/fault-injection/Dockerfile-envoy @@ -1,4 +1,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get install -y curl tree +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml COPY enable_delay_fault_injection.sh disable_delay_fault_injection.sh enable_abort_fault_injection.sh disable_abort_fault_injection.sh send_request.sh / diff --git a/examples/fault-injection/docker-compose.yaml b/examples/fault-injection/docker-compose.yaml index fe8ec0c9d68f..50daad870d67 100644 --- a/examples/fault-injection/docker-compose.yaml +++ b/examples/fault-injection/docker-compose.yaml @@ -6,7 +6,6 @@ services: dockerfile: Dockerfile-envoy command: /usr/local/bin/envoy -c /etc/envoy.yaml volumes: - - ./envoy.yaml:/etc/envoy.yaml - ./runtime:/srv/runtime networks: - envoymesh diff --git a/examples/fault-injection/envoy.yaml b/examples/fault-injection/envoy.yaml index 661e5ad88f54..ac0bd82b3568 100644 --- a/examples/fault-injection/envoy.yaml +++ b/examples/fault-injection/envoy.yaml @@ -63,6 +63,9 @@ admin: socket_address: address: 0.0.0.0 port_value: 9901 -runtime: - symlink_root: /srv/runtime/current - subdirectory: envoy +layered_runtime: + layers: + - name: disk_layer_0 + disk_layer: + symlink_root: /srv/runtime/current + subdirectory: envoy diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy index 83b5ba806c6a..0b2e25a0de1b 100644 --- a/examples/front-proxy/Dockerfile-frontenvoy +++ b/examples/front-proxy/Dockerfile-frontenvoy @@ -2,4 +2,6 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update && apt-get -q install -y \ curl +COPY ./front-envoy.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/front-proxy/docker-compose.yaml b/examples/front-proxy/docker-compose.yaml index f8de52edd298..cac5826a46d2 100644 --- a/examples/front-proxy/docker-compose.yaml +++ b/examples/front-proxy/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-frontenvoy - volumes: - - ./front-envoy.yaml:/etc/front-envoy.yaml networks: - envoymesh expose: diff --git a/examples/grpc-bridge/Dockerfile-client b/examples/grpc-bridge/Dockerfile-client new file mode 100644 index 000000000000..da27eecaf689 --- /dev/null +++ b/examples/grpc-bridge/Dockerfile-client @@ -0,0 +1,5 @@ +FROM envoyproxy/envoy-dev:latest + +COPY ./client/envoy-proxy.yaml /etc/client-envoy-proxy.yaml +RUN chmod go+r /etc/client-envoy-proxy.yaml +CMD /usr/local/bin/envoy -c /etc/client-envoy-proxy.yaml diff --git a/examples/grpc-bridge/Dockerfile-server b/examples/grpc-bridge/Dockerfile-server new file mode 100644 index 000000000000..a59690934ede --- /dev/null +++ b/examples/grpc-bridge/Dockerfile-server @@ -0,0 +1,5 @@ +FROM envoyproxy/envoy-dev:latest + +COPY ./server/envoy-proxy.yaml /etc/server-envoy-proxy.yaml +RUN chmod go+r /etc/server-envoy-proxy.yaml +CMD /usr/local/bin/envoy -c /etc/server-envoy-proxy.yaml --service-cluster backend-proxy diff --git a/examples/grpc-bridge/docker-compose.yaml b/examples/grpc-bridge/docker-compose.yaml index c09707a310e5..3ffaa58447c6 100644 --- a/examples/grpc-bridge/docker-compose.yaml +++ b/examples/grpc-bridge/docker-compose.yaml @@ -17,10 +17,9 @@ services: - kv-backend-service grpc-server-proxy: - image: envoyproxy/envoy:latest - command: /usr/local/bin/envoy -c /etc/server-envoy-proxy.yaml --service-cluster backend-proxy - volumes: - - ./server/envoy-proxy.yaml:/etc/server-envoy-proxy.yaml + build: + context: . + dockerfile: Dockerfile-server networks: envoymesh: aliases: @@ -45,10 +44,9 @@ services: - grpc-client grpc-client-proxy: - image: envoyproxy/envoy:latest - command: /usr/local/bin/envoy -c /etc/client-envoy-proxy.yaml - volumes: - - ./client/envoy-proxy.yaml:/etc/client-envoy-proxy.yaml + build: + context: . + dockerfile: Dockerfile-client networks: envoymesh: aliases: diff --git a/examples/jaeger-native-tracing/Dockerfile-frontenvoy b/examples/jaeger-native-tracing/Dockerfile-frontenvoy new file mode 100644 index 000000000000..5379dfe5e242 --- /dev/null +++ b/examples/jaeger-native-tracing/Dockerfile-frontenvoy @@ -0,0 +1,17 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml +# +# for discussion on jaeger binary compatibility, and the source of the file, see here: +# https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072 +# +RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/local/lib/libjaegertracing_plugin.so" > /tmp/checksum \ + && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \ + | tar zxf - -C /usr/local/lib \ + && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \ + && sha256sum -c /tmp/checksum \ + && rm /tmp/checksum \ + && chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-native-tracing/docker-compose.yaml b/examples/jaeger-native-tracing/docker-compose.yaml index ca8fccb3d52b..b0060928551a 100644 --- a/examples/jaeger-native-tracing/docker-compose.yaml +++ b/examples/jaeger-native-tracing/docker-compose.yaml @@ -3,13 +3,8 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-jaeger.yaml:/etc/front-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-front.sh:/start-front.sh - entrypoint: /start-front.sh + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: @@ -28,9 +23,7 @@ services: dockerfile: Dockerfile-service volumes: - ./service1-envoy-jaeger.yaml:/etc/service-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-service.sh:/start-service.sh - entrypoint: /start-service.sh + - ./libjaegertracing.so.0.4.2:/usr/local/lib/libjaegertracing_plugin.so networks: envoymesh: aliases: @@ -49,9 +42,7 @@ services: dockerfile: Dockerfile-service volumes: - ./service2-envoy-jaeger.yaml:/etc/service-envoy.yaml - - ./install-jaeger-plugin.sh:/install-jaeger-plugin.sh - - ./start-service.sh:/start-service.sh - entrypoint: /start-service.sh + - ./libjaegertracing.so.0.4.2:/usr/local/lib/libjaegertracing_plugin.so networks: envoymesh: aliases: diff --git a/examples/jaeger-native-tracing/start-front.sh b/examples/jaeger-native-tracing/start-front.sh deleted file mode 100755 index 0f2eff403021..000000000000 --- a/examples/jaeger-native-tracing/start-front.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -/install-jaeger-plugin.sh -/usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-native-tracing/start-service.sh b/examples/jaeger-native-tracing/start-service.sh deleted file mode 100755 index e4d9643215e0..000000000000 --- a/examples/jaeger-native-tracing/start-service.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -/install-jaeger-plugin.sh -/usr/local/bin/start_service.sh diff --git a/examples/jaeger-tracing/Dockerfile-frontenvoy b/examples/jaeger-tracing/Dockerfile-frontenvoy new file mode 100644 index 000000000000..e955e76bb9b8 --- /dev/null +++ b/examples/jaeger-tracing/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/jaeger-tracing/docker-compose.yaml b/examples/jaeger-tracing/docker-compose.yaml index 94fbfb62bfa1..22026eac8bef 100644 --- a/examples/jaeger-tracing/docker-compose.yaml +++ b/examples/jaeger-tracing/docker-compose.yaml @@ -3,10 +3,8 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-jaeger.yaml:/etc/front-envoy.yaml + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: diff --git a/examples/lua/Dockerfile-proxy b/examples/lua/Dockerfile-proxy index 5ba5c9c33a0d..03cb54ac245b 100644 --- a/examples/lua/Dockerfile-proxy +++ b/examples/lua/Dockerfile-proxy @@ -1,3 +1,5 @@ FROM envoyproxy/envoy-dev:latest ADD ./lib/mylibrary.lua /lib/mylibrary.lua +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml /lib/mylibrary.lua CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug --service-cluster proxy diff --git a/examples/lua/docker-compose.yaml b/examples/lua/docker-compose.yaml index 716ae8f6c4ff..c5472e4aa8fd 100644 --- a/examples/lua/docker-compose.yaml +++ b/examples/lua/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-proxy - volumes: - - ./envoy.yaml:/etc/envoy.yaml networks: - envoymesh expose: diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy index ad18604cd0c7..09595e6e6279 100644 --- a/examples/mysql/Dockerfile-proxy +++ b/examples/mysql/Dockerfile-proxy @@ -1,3 +1,5 @@ FROM envoyproxy/envoy-dev:latest +COPY ./envoy.yaml /etc/envoy.yaml +RUN chmod go+r /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml -l debug diff --git a/examples/mysql/docker-compose.yaml b/examples/mysql/docker-compose.yaml index d4b8b13e13c1..720a05acb2a2 100644 --- a/examples/mysql/docker-compose.yaml +++ b/examples/mysql/docker-compose.yaml @@ -5,8 +5,6 @@ services: build: context: . dockerfile: Dockerfile-proxy - volumes: - - ./envoy.yaml:/etc/envoy.yaml networks: envoymesh: aliases: diff --git a/examples/zipkin-tracing/Dockerfile-frontenvoy b/examples/zipkin-tracing/Dockerfile-frontenvoy new file mode 100644 index 000000000000..87040962caf2 --- /dev/null +++ b/examples/zipkin-tracing/Dockerfile-frontenvoy @@ -0,0 +1,7 @@ +FROM envoyproxy/envoy-dev:latest + +RUN apt-get update && apt-get -q install -y \ + curl +COPY ./front-envoy-zipkin.yaml /etc/front-envoy.yaml +RUN chmod go+r /etc/front-envoy.yaml +CMD /usr/local/bin/envoy -c /etc/front-envoy.yaml --service-cluster front-proxy diff --git a/examples/zipkin-tracing/docker-compose.yaml b/examples/zipkin-tracing/docker-compose.yaml index 488ccccf1bb6..dc82e926ef5a 100644 --- a/examples/zipkin-tracing/docker-compose.yaml +++ b/examples/zipkin-tracing/docker-compose.yaml @@ -3,10 +3,8 @@ services: front-envoy: build: - context: ../ - dockerfile: front-proxy/Dockerfile-frontenvoy - volumes: - - ./front-envoy-zipkin.yaml:/etc/front-envoy.yaml + context: . + dockerfile: Dockerfile-frontenvoy networks: - envoymesh expose: diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 6d68ccc0c424..788b04f293c8 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -21,9 +21,9 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on macOS and disabled via Bazel. - EXPECT_EQ(28UL, ConfigTest::run(directory)); + EXPECT_EQ(35UL, ConfigTest::run(directory)); #else - EXPECT_EQ(29UL, ConfigTest::run(directory)); + EXPECT_EQ(36UL, ConfigTest::run(directory)); #endif ConfigTest::testMerge(); From 9e2da9198abe48dc8ae37d4e1fe7e9c2ca5aab3e Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 3 Aug 2020 22:14:45 +0530 Subject: [PATCH 827/909] listener: add additional debug context for filter chain errors (#12405) Signed-off-by: Rama Chavali --- source/server/filter_chain_manager_impl.cc | 21 +++++++++++--------- test/server/listener_manager_impl_test.cc | 23 ++++++++++++---------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 3e1169c17531..fbc519816c09 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -11,7 +11,7 @@ #include "server/configuration_impl.h" -#include "absl/container/node_hash_set.h" +#include "absl/container/node_hash_map.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" @@ -150,22 +150,25 @@ void FilterChainManagerImpl::addFilterChain( FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { Cleanup cleanup([this]() { origin_ = absl::nullopt; }); - absl::node_hash_set + absl::node_hash_map filter_chains; uint32_t new_filter_chain_size = 0; for (const auto& filter_chain : filter_chain_span) { const auto& filter_chain_match = filter_chain->filter_chain_match(); if (!filter_chain_match.address_suffix().empty() || filter_chain_match.has_suffix_len()) { - throw EnvoyException(fmt::format("error adding listener '{}': contains filter chains with " + throw EnvoyException(fmt::format("error adding listener '{}': filter chain '{}' contains " "unimplemented fields", - address_->asString())); + address_->asString(), filter_chain->name())); } - if (filter_chains.find(filter_chain_match) != filter_chains.end()) { - throw EnvoyException(fmt::format("error adding listener '{}': multiple filter chains with " - "the same matching rules are defined", - address_->asString())); + const auto& matching_iter = filter_chains.find(filter_chain_match); + if (matching_iter != filter_chains.end()) { + throw EnvoyException(fmt::format("error adding listener '{}': filter chain '{}' has " + "the same matching rules defined as '{}'", + address_->asString(), filter_chain->name(), + matching_iter->second)); } - filter_chains.insert(filter_chain_match); + filter_chains.insert({filter_chain_match, filter_chain->name()}); // Validate IP addresses. std::vector destination_ips; diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 07a88ebf3dd1..85c5c14e76a8 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -3117,17 +3117,19 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithSameMatch - name: "envoy.filters.listener.tls_inspector" typed_config: {} filter_chains: - - filter_chain_match: + - name : foo + filter_chain_match: transport_protocol: "tls" - - filter_chain_match: + - name: bar + filter_chain_match: transport_protocol: "tls" )EOF", Network::Address::IpVersion::v4); EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, - "error adding listener '127.0.0.1:1234': multiple filter chains with " - "the same matching rules are defined"); + "error adding listener '127.0.0.1:1234': filter chain 'bar' has " + "the same matching rules defined as 'foo'"); } TEST_F(ListenerManagerImplWithRealFiltersTest, @@ -3139,18 +3141,19 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, - name: "envoy.filters.listener.tls_inspector" typed_config: {} filter_chains: - - filter_chain_match: + - name: foo + filter_chain_match: transport_protocol: "tls" - - filter_chain_match: + - name: bar + filter_chain_match: transport_protocol: "tls" address_suffix: 127.0.0.0 )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), - EnvoyException, - "error adding listener '127.0.0.1:1234': contains filter chains with " - "unimplemented fields"); + EXPECT_THROW_WITH_MESSAGE( + manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, + "error adding listener '127.0.0.1:1234': filter chain 'bar' contains unimplemented fields"); } TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithOverlappingRules) { From 045c7c1e14c3b080ce04a0364d1f29ab831b5502 Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Mon, 3 Aug 2020 22:15:28 +0530 Subject: [PATCH 828/909] rds: add route name to validation errors (#12404) When route validation error happens, the error does not contain the route name, making it difficult to identify problem if there are many routes. Signed-off-by: Rama Chavali --- source/common/router/config_impl.cc | 8 +++++--- test/common/router/config_impl_test.cc | 14 ++++++++++---- test/common/router/rds_impl_test.cc | 2 +- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index ae92edeab16b..73a2de20e6dd 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -1197,7 +1197,8 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r bool duplicate_found = false; if ("*" == domain) { if (default_virtual_host_) { - throw EnvoyException(fmt::format("Only a single wildcard domain is permitted")); + throw EnvoyException(fmt::format("Only a single wildcard domain is permitted in route {}", + route_config.name())); } default_virtual_host_ = virtual_host; } else if (!domain.empty() && '*' == domain[0]) { @@ -1212,8 +1213,9 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r duplicate_found = !virtual_hosts_.emplace(domain, virtual_host).second; } if (duplicate_found) { - throw EnvoyException(fmt::format( - "Only unique values for domains are permitted. Duplicate entry of domain {}", domain)); + throw EnvoyException(fmt::format("Only unique values for domains are permitted. Duplicate " + "entry of domain {} in route {}", + domain, route_config.name())); } } } diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index eb327298f0aa..5d4ce7b56bc7 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -3751,6 +3751,7 @@ TEST_F(RouteMatcherTest, TestDuplicateDomainConfig) { // Test to detect if hostname matches are case-insensitive TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: [www.lyft.com] @@ -3766,11 +3767,13 @@ TEST_F(RouteMatcherTest, TestCaseSensitiveDomainConfig) { EXPECT_THROW_WITH_MESSAGE( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com"); + "Only unique values for domains are permitted. Duplicate entry of domain www.lyft.com in " + "route foo"); } TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["*"] @@ -3786,11 +3789,12 @@ TEST_F(RouteMatcherTest, TestDuplicateWildcardDomainConfig) { EXPECT_THROW_WITH_MESSAGE( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Only a single wildcard domain is permitted"); + "Only a single wildcard domain is permitted in route foo"); } TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["*.lyft.com"] @@ -3806,11 +3810,13 @@ TEST_F(RouteMatcherTest, TestDuplicateSuffixWildcardDomainConfig) { EXPECT_THROW_WITH_MESSAGE( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com"); + "Only unique values for domains are permitted. Duplicate entry of domain *.lyft.com in route " + "foo"); } TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { const std::string yaml = R"EOF( +name: foo virtual_hosts: - name: www2 domains: ["bar.*"] @@ -3826,7 +3832,7 @@ TEST_F(RouteMatcherTest, TestDuplicatePrefixWildcardDomainConfig) { EXPECT_THROW_WITH_MESSAGE( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Only unique values for domains are permitted. Duplicate entry of domain bar.*"); + "Only unique values for domains are permitted. Duplicate entry of domain bar.* in route foo"); } TEST_F(RouteMatcherTest, TestInvalidCharactersInPrefixRewrites) { diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 8e0e62652631..3c70a9c93f54 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -664,7 +664,7 @@ version_info: '1' EXPECT_THROW_WITH_MESSAGE( rds_callbacks_->onConfigUpdate(decoded_resources.refvec_, response1.version_info()), - EnvoyException, "Only a single wildcard domain is permitted"); + EnvoyException, "Only a single wildcard domain is permitted in route foo_route_config"); message_ptr = server_factory_context_.admin_.config_tracker_.config_tracker_callbacks_["routes"](); From a278ff36ee2718436e52820807b7ce7da4d3eed3 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Mon, 3 Aug 2020 14:28:17 -0400 Subject: [PATCH 829/909] lint: add guards to prevent the use of std::any/optional/variant (#12421) Per the discussion on #12341, use of these standard library functions are temporarily banned to prevent runtime failure when running in iOS11. Signed-off-by: Yifan Yang --- tools/code_format/check_format.py | 9 +++++++++ tools/code_format/check_format_test_helper.py | 5 +++++ tools/testdata/check_format/std_any.cc | 7 +++++++ tools/testdata/check_format/std_optional.cc | 7 +++++++ tools/testdata/check_format/std_variant.cc | 7 +++++++ 5 files changed, 35 insertions(+) create mode 100644 tools/testdata/check_format/std_any.cc create mode 100644 tools/testdata/check_format/std_optional.cc create mode 100644 tools/testdata/check_format/std_variant.cc diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index d38b731a0e47..75e641071966 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -660,6 +660,15 @@ def checkSourceLine(line, file_path, reportError): # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic objects, so prefer to use that instead. reportError("Don't use free std::atomic_* functions, use std::atomic members instead.") + # Blocking the use of std::any, std::optional, std::variant for now as iOS 11/macOS 10.13 + # does not support these functions at runtime. + # See: https://github.com/envoyproxy/envoy/issues/12341 + if tokenInLine("std::any", line): + reportError("Don't use std::any; use absl::any instead") + if tokenInLine("std::optional", line): + reportError("Don't use std::optional; use absl::optional instead") + if tokenInLine("std::variant", line): + reportError("Don't use std::variant; use absl::variant instead") if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that # can be used instead diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 9cb00aa50f86..132354171855 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -238,6 +238,11 @@ def runChecks(): errors += checkUnfixableError( "std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " + "or absl::node_hash_set if pointer stability of keys/values is required") + errors += checkUnfixableError("std_any.cc", "Don't use std::any; use absl::any instead") + errors += checkUnfixableError("std_optional.cc", + "Don't use std::optional; use absl::optional instead") + errors += checkUnfixableError("std_variant.cc", + "Don't use std::variant; use absl::variant instead") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/testdata/check_format/std_any.cc b/tools/testdata/check_format/std_any.cc new file mode 100644 index 000000000000..24f2b5576aaa --- /dev/null +++ b/tools/testdata/check_format/std_any.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::any foo; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_optional.cc b/tools/testdata/check_format/std_optional.cc new file mode 100644 index 000000000000..693aa481e889 --- /dev/null +++ b/tools/testdata/check_format/std_optional.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::optional foo; + } +} // namespace Envoy diff --git a/tools/testdata/check_format/std_variant.cc b/tools/testdata/check_format/std_variant.cc new file mode 100644 index 000000000000..60a02f15cddc --- /dev/null +++ b/tools/testdata/check_format/std_variant.cc @@ -0,0 +1,7 @@ +#include + +namespace Envoy { + void bar() { + std::variant foo; + } +} // namespace Envoy From e13b69011fcd2ac086132f4b71eca00379ffb640 Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Mon, 3 Aug 2020 11:53:52 -0700 Subject: [PATCH 830/909] test: use TestEnvironment::nullDevicePath instead of /dev/null/ (#12172) Signed-off-by: Sotiris Nanopoulos --- .../aggregate/cluster_integration_test.cc | 9 +++--- .../redis/redis_cluster_integration_test.cc | 7 +++-- .../mysql_proxy/mysql_integration_test.cc | 1 + .../mysql_proxy/mysql_test_config.yaml | 2 +- .../postgres_integration_test.cc | 2 +- .../postgres_proxy/postgres_test_config.yaml | 2 +- .../redis_proxy_integration_test.cc | 28 +++++++++++-------- test/integration/rtds_integration_test.cc | 5 ++-- 8 files changed, 32 insertions(+), 24 deletions(-) diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index bd6af30b0808..abebeadc82c9 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -29,9 +29,9 @@ const int FirstUpstreamIndex = 2; const int SecondUpstreamIndex = 3; const std::string& config() { - CONSTRUCT_ON_FIRST_USE(std::string, R"EOF( + CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -47,7 +47,7 @@ const std::string& config() { static_resources: clusters: - name: my_cds_cluster - http2_protocol_options: {} + http2_protocol_options: {{}} load_assignment: cluster_name: my_cds_cluster endpoints: @@ -108,7 +108,8 @@ const std::string& config() { match: prefix: "/aggregatecluster" domains: "*" -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } class AggregateIntegrationTest : public testing::TestWithParam, diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 33f160e8d43f..7cddc336c3f2 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -18,9 +18,9 @@ namespace { // in the cluster. The load balancing policy must be set // to random for proper test operation. const std::string& listenerConfig() { - CONSTRUCT_ON_FIRST_USE(std::string, R"EOF( + CONSTRUCT_ON_FIRST_USE(std::string, fmt::format(R"EOF( admin: - access_log_path: /dev/null + access_log_path: {} address: socket_address: address: 127.0.0.1 @@ -44,7 +44,8 @@ const std::string& listenerConfig() { settings: op_timeout: 5s enable_redirection: true -)EOF"); +)EOF", + TestEnvironment::nullDevicePath())); } const std::string& clusterConfig() { diff --git a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc index ec528867d375..6bbb5bacab3e 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc +++ b/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc @@ -27,6 +27,7 @@ class MySQLIntegrationTest : public testing::TestWithParam Date: Mon, 3 Aug 2020 11:54:55 -0700 Subject: [PATCH 831/909] compdb: use aspects generated header files (#12426) Signed-off-by: Lizan Zhou --- .bazelrc | 4 --- bazel/BUILD | 5 ---- bazel/README.md | 2 +- bazel/envoy_library.bzl | 7 ----- bazel/envoy_test.bzl | 5 ---- bazel/repository_locations.bzl | 7 ++--- ci/run_clang_tidy.sh | 2 +- .../quic_listeners/quiche/platform/BUILD | 5 ---- tools/api_boost/api_boost.py | 3 +-- tools/clang_tools/README.md | 2 +- tools/gen_compilation_database.py | 27 +------------------ tools/vscode/refresh_compdb.sh | 2 +- 12 files changed, 10 insertions(+), 61 deletions(-) diff --git a/.bazelrc b/.bazelrc index b63c7a2ba2ae..6db0ac239e5a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -260,11 +260,7 @@ build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 # Compile database generation config -# We don't care about built binaries so always strip and use fastbuild. -build:compdb -c fastbuild -build:compdb --strip=always build:compdb --build_tag_filters=-nocompdb -build:compdb --define=ENVOY_CONFIG_COMPILATION_DATABASE=1 # Windows build quirks build:windows --action_env=TMPDIR diff --git a/bazel/BUILD b/bazel/BUILD index 97d9d79fb6be..717d9e4d1683 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -124,11 +124,6 @@ config_setting( values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, ) -config_setting( - name = "compdb_build", - values = {"define": "ENVOY_CONFIG_COMPILATION_DATABASE=1"}, -) - config_setting( name = "clang_build", flag_values = { diff --git a/bazel/README.md b/bazel/README.md index 0c3e3f9abb9d..98974803657f 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -735,7 +735,7 @@ For example, you can use [You Complete Me](https://valloric.github.io/YouComplet For example, use following command to prepare a compilation database: ``` -TEST_TMPDIR=/tmp tools/gen_compilation_database.py --run_bazel_build +TEST_TMPDIR=/tmp tools/gen_compilation_database.py ``` diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 63e4b963bb18..965ad72690f0 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -95,13 +95,6 @@ def envoy_cc_library( if tcmalloc_dep: deps += tcmalloc_external_deps(repository) - # Intended for compilation database generation. This generates an empty cc - # source file so Bazel generates virtual includes and recognize them as C++. - # Workaround for https://github.com/bazelbuild/bazel/issues/10845. - srcs += select({ - "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], - "//conditions:default": [], - }) cc_library( name = name, srcs = srcs, diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 7f04e152b77f..21f83d1980de 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -210,11 +210,6 @@ def envoy_cc_test_library( repository + "//test/test_common:printers_includes", ] - # Same as envoy_cc_library - srcs += select({ - "@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"], - "//conditions:default": [], - }) _envoy_cc_test_infrastructure_library( name, srcs, diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d7ddc3c41efb..1d273afb005b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -42,9 +42,10 @@ USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "test", "other"] DEPENDENCY_REPOSITORIES = dict( bazel_compdb = dict( - sha256 = "87e376a685eacfb27bcc0d0cdf5ded1d0b99d868390ac50f452ba6ed781caffe", - strip_prefix = "bazel-compilation-database-0.4.2", - urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.2.tar.gz"], + sha256 = "943f1a57e01d030b9c649f9e41fdafd871e8b0e8a1431f93c6673c38b9c15b3b", + strip_prefix = "bazel-compilation-database-c37b909045eb72d29a47f77cc1e9b519dd5c10b6", + # 2020-07-31 + urls = ["https://github.com/grailbio/bazel-compilation-database/archive/c37b909045eb72d29a47f77cc1e9b519dd5c10b6.tar.gz"], use_category = ["build"], ), bazel_gazelle = dict( diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 5b46c82789c8..0f5917fb516d 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -31,7 +31,7 @@ trap cleanup EXIT # bazel build need to be run to setup virtual includes, generating files which are consumed # by clang-tidy -"${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --run_bazel_build --include_headers +"${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --include_headers # Do not run clang-tidy against win32 impl # TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows diff --git a/test/extensions/quic_listeners/quiche/platform/BUILD b/test/extensions/quic_listeners/quiche/platform/BUILD index d43071a61163..9ccaf9cdedc2 100644 --- a/test/extensions/quic_listeners/quiche/platform/BUILD +++ b/test/extensions/quic_listeners/quiche/platform/BUILD @@ -224,11 +224,6 @@ envoy_cc_test_library( ], ) -envoy_cc_test_library( - name = "spdy_platform_test_impl_lib", - hdrs = ["spdy_test_impl.h"], -) - envoy_cc_test( name = "envoy_quic_clock_test", srcs = ["envoy_quic_clock_test.cc"], diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py index 8916e8b82822..eda6eaf94088 100755 --- a/tools/api_boost/api_boost.py +++ b/tools/api_boost/api_boost.py @@ -101,8 +101,7 @@ def ApiBoostTree(target_paths, # tool in place before we can start boosting. if generate_compilation_database: print('Building compilation database for %s' % dep_build_targets) - sp.run(['./tools/gen_compilation_database.py', '--run_bazel_build', '--include_headers'] + - dep_build_targets, + sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets, check=True) if build_api_booster: diff --git a/tools/clang_tools/README.md b/tools/clang_tools/README.md index 705138a7e357..30bbdbddcd75 100644 --- a/tools/clang_tools/README.md +++ b/tools/clang_tools/README.md @@ -36,7 +36,7 @@ generates this and also does setup of the Bazel cache paths to allow external dependencies to be located: ```console -tools/gen_compilation_database.py --run_bazel_build --include_headers +tools/gen_compilation_database.py --include_headers ``` Finally, the tool can be run against source files in the Envoy tree: diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index 1a3cf2ff4025..0073b0345016 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -10,19 +10,6 @@ from pathlib import Path -def runBazelBuildForCompilationDatabase(bazel_options, bazel_targets): - query_targets = ' union '.join(bazel_targets) - query = ' union '.join( - q.format(query_targets) for q in [ - 'attr(include_prefix, ".+", kind(cc_library, deps({})))', - 'attr(strip_include_prefix, ".+", kind(cc_library, deps({})))', - 'attr(generator_function, ".*proto_library", kind(cc_.*, deps({})))', - ]) - build_targets = subprocess.check_output(["bazel", "query", "--notool_deps", - query]).decode().splitlines() - subprocess.check_call(["bazel", "build"] + bazel_options + build_targets) - - # This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh def generateCompilationDatabase(args): # We need to download all remote outputs for generated source code. This option lives here to override those @@ -31,20 +18,10 @@ def generateCompilationDatabase(args): "--config=compdb", "--remote_download_outputs=all", ] - if args.keep_going: - bazel_options.append("-k") - if args.run_bazel_build: - try: - runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets) - except subprocess.CalledProcessError as e: - if not args.keep_going: - raise - else: - logging.warning("bazel build failed {}: {}".format(e.returncode, e.cmd)) subprocess.check_call(["bazel", "build"] + bazel_options + [ "--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect", - "--output_groups=compdb_files" + "--output_groups=compdb_files,header_files" ] + args.bazel_targets) execroot = subprocess.check_output(["bazel", "info", "execution_root"] + @@ -110,8 +87,6 @@ def fixCompilationDatabase(args, db): if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate JSON compilation database') - parser.add_argument('--run_bazel_build', action='store_true') - parser.add_argument('-k', '--keep_going', action='store_true') parser.add_argument('--include_external', action='store_true') parser.add_argument('--include_genfiles', action='store_true') parser.add_argument('--include_headers', action='store_true') diff --git a/tools/vscode/refresh_compdb.sh b/tools/vscode/refresh_compdb.sh index 1f6a279256eb..4a81bc5714b7 100755 --- a/tools/vscode/refresh_compdb.sh +++ b/tools/vscode/refresh_compdb.sh @@ -3,7 +3,7 @@ [[ -z "${SKIP_PROTO_FORMAT}" ]] && tools/proto_format/proto_format.sh fix # Setting TEST_TMPDIR here so the compdb headers won't be overwritten by another bazel run -TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py --run_bazel_build -k +TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py # Kill clangd to reload the compilation database killall -v /opt/llvm/bin/clangd From f4e31831f3b280300baabb4bc37d7da2598ffbd9 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Mon, 3 Aug 2020 14:56:05 -0400 Subject: [PATCH 832/909] cleanup: replacing .first/.second with meaningful names [envoy/include and miscellanies in envoy/source/common] (#12359) Since c++17 is supported now in Envoy, we can concisely replace the uninformative .first/.second variables with better names to improve readability. Signed-off-by: Yifan Yang --- include/envoy/registry/registry.h | 43 ++++++++++--------- .../access_log/access_log_manager_impl.cc | 10 ++--- source/common/common/utility.cc | 10 ++--- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h index ef12fff187b6..2b85df27c2e9 100644 --- a/include/envoy/registry/registry.h +++ b/include/envoy/registry/registry.h @@ -164,9 +164,9 @@ template class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable class FactoryRegistry : public Logger::Loggable> buildFactoriesByType() { auto mapping = std::make_unique>(); - for (const auto& factory : factories()) { - if (factory.second == nullptr) { + for (const auto& [factory_name, factory] : factories()) { + if (factory == nullptr) { continue; } // Skip untyped factories. - std::string config_type = factory.second->configType(); + std::string config_type = factory->configType(); if (config_type.empty()) { continue; } @@ -356,14 +356,14 @@ template class FactoryRegistry : public Logger::Loggablefind(config_type); - if (it != mapping->end() && it->second != factory.second) { + if (it != mapping->end() && it->second != factory) { // Mark double-registered types with a nullptr. // See issue https://github.com/envoyproxy/envoy/issues/9643. ENVOY_LOG(warn, "Double registration for type: '{}' by '{}' and '{}'", config_type, - factory.second->name(), it->second ? it->second->name() : ""); + factory->name(), it->second ? it->second->name() : ""); it->second = nullptr; } else { - mapping->emplace(std::make_pair(config_type, factory.second)); + mapping->emplace(std::make_pair(config_type, factory)); } const Protobuf::Descriptor* previous = @@ -464,21 +464,22 @@ template class FactoryRegistry : public Logger::Loggablename(), prev_by_name->configType()); } - for (auto mapping : prev_deprecated_names) { - deprecatedFactoryNames().erase(mapping.first); + for (auto [prev_deprecated_name, mapped_canonical_name] : prev_deprecated_names) { + deprecatedFactoryNames().erase(prev_deprecated_name); - ENVOY_LOG(info, "Removed deprecated name '{}'", mapping.first); + ENVOY_LOG(info, "Removed deprecated name '{}'", prev_deprecated_name); - if (!mapping.second.empty()) { - deprecatedFactoryNames().emplace(std::make_pair(mapping.first, mapping.second)); + if (!mapped_canonical_name.empty()) { + deprecatedFactoryNames().emplace( + std::make_pair(prev_deprecated_name, mapped_canonical_name)); - auto* deprecated_factory = getFactory(mapping.second); + auto* deprecated_factory = getFactory(mapped_canonical_name); RELEASE_ASSERT(deprecated_factory != nullptr, "failed to restore deprecated factory name"); - factories().emplace(mapping.second, deprecated_factory); + factories().emplace(mapped_canonical_name, deprecated_factory); - ENVOY_LOG(info, "Restored deprecated name '{}' (mapped to '{}'", mapping.first, - mapping.second); + ENVOY_LOG(info, "Restored deprecated name '{}' (mapped to '{}'", prev_deprecated_name, + mapped_canonical_name); } } diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index 055e602bdcfb..4393fe94c0b8 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -12,16 +12,16 @@ namespace Envoy { namespace AccessLog { AccessLogManagerImpl::~AccessLogManagerImpl() { - for (auto& access_log : access_logs_) { - ENVOY_LOG(debug, "destroying access logger {}", access_log.first); - access_log.second.reset(); + for (auto& [log_key, log_file_ptr] : access_logs_) { + ENVOY_LOG(debug, "destroying access logger {}", log_key); + log_file_ptr.reset(); } ENVOY_LOG(debug, "destroyed access loggers"); } void AccessLogManagerImpl::reopen() { - for (auto& access_log : access_logs_) { - access_log.second->reopen(); + for (auto& [log_key, log_file_ptr] : access_logs_) { + log_file_ptr->reopen(); } } diff --git a/source/common/common/utility.cc b/source/common/common/utility.cc index 2017bbcdfa66..1d7a5005129a 100644 --- a/source/common/common/utility.cc +++ b/source/common/common/utility.cc @@ -507,12 +507,12 @@ std::string StringUtil::removeCharacters(const absl::string_view& str, const auto intervals = remove_characters.toVector(); std::vector pieces; pieces.reserve(intervals.size()); - for (const auto& interval : intervals) { - if (interval.first != pos) { - ASSERT(interval.second <= str.size()); - pieces.push_back(str.substr(pos, interval.first - pos)); + for (const auto& [left_bound, right_bound] : intervals) { + if (left_bound != pos) { + ASSERT(right_bound <= str.size()); + pieces.push_back(str.substr(pos, left_bound - pos)); } - pos = interval.second; + pos = right_bound; } if (pos != str.size()) { pieces.push_back(str.substr(pos)); From fedf4a06a02cdd9192540db7eadbee1a3f453113 Mon Sep 17 00:00:00 2001 From: Caio Date: Mon, 3 Aug 2020 13:07:48 -0700 Subject: [PATCH 833/909] CacheFilter: add parsing of range requests (#11943) Tackles the first part of issue #10132 (parsing range requests). Added a function getRequestRanges to parse ranges from requests' headers into LookupRequest. This allows the creation of appropriate ranges for the LookupResult object, which will have the corresponding bytes for the content of the response. In turn, this will allow the extension of the function CacheFilter::onHeaders to provide only the requested range(s) of content. Signed-off-by: Caio Co-authored-by: Josiah Kiehl --- source/common/http/headers.h | 1 + source/extensions/filters/http/cache/BUILD | 2 + .../filters/http/cache/cache_filter.cc | 6 +- .../filters/http/cache/cache_headers_utils.cc | 25 +++ .../filters/http/cache/cache_headers_utils.h | 7 + .../filters/http/cache/http_cache.cc | 106 +++++++++- .../filters/http/cache/http_cache.h | 17 +- .../http/cache/cache_headers_utils_test.cc | 31 +++ .../filters/http/cache/http_cache_test.cc | 192 +++++++++++++++++- 9 files changed, 375 insertions(+), 12 deletions(-) diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 62b0528bb38c..63de5a1351d9 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -192,6 +192,7 @@ class HeaderValues { const LowerCaseString Path{":path"}; const LowerCaseString Protocol{":protocol"}; const LowerCaseString ProxyConnection{"proxy-connection"}; + const LowerCaseString Range{"range"}; const LowerCaseString RequestId{"x-request-id"}; const LowerCaseString Scheme{":scheme"}; const LowerCaseString Server{"server"}; diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 159fd3e80253..28d852f64c48 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -58,6 +58,7 @@ envoy_cc_library( "//include/envoy/http:codes_interface", "//include/envoy/http:header_map_interface", "//source/common/common:assert_lib", + "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", @@ -68,6 +69,7 @@ envoy_cc_library( name = "cache_headers_utils_lib", srcs = ["cache_headers_utils.cc"], hdrs = ["cache_headers_utils.h"], + external_deps = ["abseil_optional"], deps = [ "//include/envoy/common:time_interface", "//include/envoy/http:header_map_interface", diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 039cb33b37ed..ae9523494f2d 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -89,8 +89,8 @@ void CacheFilter::onHeaders(LookupResult&& result) { // TODO(yosrym93): Handle request only-if-cached directive switch (result.cache_entry_status_) { case CacheEntryStatus::FoundNotModified: - case CacheEntryStatus::UnsatisfiableRange: - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. + case CacheEntryStatus::NotSatisfiableRange: // TODO(#10132): create 416 response. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. case CacheEntryStatus::RequiresValidation: // Cache entries that require validation are treated as unusable entries // until validation is implemented @@ -104,6 +104,8 @@ void CacheFilter::onHeaders(LookupResult&& result) { state_ = GetHeadersState::GetHeadersResultUnusable; } return; + case CacheEntryStatus::SatisfiableRange: // TODO(#10132): break response content to the ranges + // requested. case CacheEntryStatus::Ok: response_has_trailers_ = result.has_trailers_; const bool end_stream = (result.content_length_ == 0 && !response_has_trailers_); diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc index 988c9e8e0568..f33161cdf220 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.cc +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -168,6 +168,31 @@ SystemTime CacheHeadersUtils::httpTime(const Http::HeaderEntry* header_entry) { return {}; } +absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::string_view& str) { + uint64_t val = 0; + uint32_t bytes_consumed = 0; + + for (const char cur : str) { + if (!absl::ascii_isdigit(cur)) { + break; + } + uint64_t new_val = (val * 10) + (cur - '0'); + if (new_val / 8 < val) { + // Overflow occurred + return absl::nullopt; + } + val = new_val; + ++bytes_consumed; + } + + if (bytes_consumed) { + // Consume some digits + str.remove_prefix(bytes_consumed); + return val; + } + return absl::nullopt; +} + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h index 9bf50370e61e..8a185d88b40d 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.h +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -93,6 +93,13 @@ class CacheHeadersUtils { // Parses header_entry as an HTTP time. Returns SystemTime() if // header_entry is null or malformed. static SystemTime httpTime(const Http::HeaderEntry* header_entry); + + /** + * Read a leading positive decimal integer value and advance "*str" past the + * digits read. If overflow occurs, or no digits exist, return + * absl::nullopt without advancing "*str". + */ + static absl::optional readAndRemoveLeadingDigits(absl::string_view& str); }; } // namespace Cache } // namespace HttpFilters diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 0a406c652f76..75d81e2d31e1 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -2,13 +2,17 @@ #include #include +#include #include "envoy/http/codes.h" #include "envoy/http/header_map.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/protobuf/utility.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" #include "absl/time/time.h" namespace Envoy { @@ -33,8 +37,10 @@ std::ostream& operator<<(std::ostream& os, CacheEntryStatus status) { return os << "RequiresValidation"; case CacheEntryStatus::FoundNotModified: return os << "FoundNotModified"; - case CacheEntryStatus::UnsatisfiableRange: - return os << "UnsatisfiableRange"; + case CacheEntryStatus::NotSatisfiableRange: + return os << "NotSatisfiableRange"; + case CacheEntryStatus::SatisfiableRange: + return os << "SatisfiableRange"; } NOT_REACHED_GCOVR_EXCL_LINE; } @@ -62,8 +68,13 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst // TODO(toddmgreer): Let config determine whether to include forwarded_proto, host, and // query params. // TODO(toddmgreer): get cluster name. - // TODO(toddmgreer): Parse Range header into request_range_spec_, and handle the resultant - // vector in CacheFilter::onOkHeaders. + // TODO(toddmgreer): handle the resultant vector in CacheFilter::onOkHeaders. + // Range Requests are only valid for GET requests + if (request_headers.getMethodValue() == Http::Headers::get().MethodValues.Get) { + // TODO(cbdm): using a constant limit of 10 ranges, could make this into a parameter + const int RangeSpecifierLimit = 10; + request_range_spec_ = RangeRequests::parseRanges(request_headers, RangeSpecifierLimit); + } key_.set_cluster_name("cluster_name_goes_here"); key_.set_host(std::string(request_headers.getHostValue())); key_.set_path(std::string(request_headers.getPathValue())); @@ -156,6 +167,9 @@ LookupResult LookupRequest::makeLookupResult(Http::ResponseHeaderMapPtr&& respon result.content_length_ = content_length; if (!adjustByteRangeSet(result.response_ranges_, request_range_spec_, content_length)) { result.headers_->setStatus(static_cast(Http::Code::RangeNotSatisfiable)); + result.cache_entry_status_ = CacheEntryStatus::NotSatisfiableRange; + } else if (!result.response_ranges_.empty()) { + result.cache_entry_status_ = CacheEntryStatus::SatisfiableRange; } result.has_trailers_ = false; return result; @@ -213,6 +227,90 @@ bool adjustByteRangeSet(std::vector& response_ranges, } return true; } + +std::vector RangeRequests::parseRanges(const Http::RequestHeaderMap& request_headers, + uint64_t max_byte_range_specs) { + // Makes sure we have a GET request, as Range headers are only valid with this type of request. + const absl::string_view method = request_headers.getMethodValue(); + ASSERT(method == Http::Headers::get().MethodValues.Get); + + // Multiple instances of range headers are invalid. + // https://tools.ietf.org/html/rfc7230#section-3.2.2 + std::vector range_headers; + Http::HeaderUtility::getAllOfHeader(request_headers, Http::Headers::get().Range.get(), + range_headers); + + absl::string_view header_value; + if (range_headers.size() == 1) { + header_value = range_headers.front(); + } else { + if (range_headers.size() > 1) { + ENVOY_LOG(debug, "Multiple range headers provided in request. Ignoring all range headers."); + } + return {}; + } + + if (!absl::ConsumePrefix(&header_value, "bytes=")) { + ENVOY_LOG(debug, "Invalid range header. range-unit not correctly specified, only 'bytes' " + "supported. Ignoring range header."); + return {}; + } + + std::vector ranges = + absl::StrSplit(header_value, absl::MaxSplits(',', max_byte_range_specs)); + if (ranges.size() > max_byte_range_specs) { + ENVOY_LOG(debug, + "There are more ranges than allowed by the byte range parse limit ({}). Ignoring " + "range header.", + max_byte_range_specs); + return {}; + } + + std::vector parsed_ranges; + for (absl::string_view cur_range : ranges) { + absl::optional first = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range); + + if (!absl::ConsumePrefix(&cur_range, "-")) { + ENVOY_LOG(debug, + "Invalid format for range header: missing range-end. Ignoring range header."); + return {}; + } + + absl::optional last = CacheHeadersUtils::readAndRemoveLeadingDigits(cur_range); + + if (!cur_range.empty()) { + ENVOY_LOG(debug, + "Unexpected characters after byte range in range header. Ignoring range header."); + return {}; + } + + if (!first && !last) { + ENVOY_LOG(debug, "Invalid format for range header: missing first-byte-pos AND last-byte-pos; " + "at least one of them is required. Ignoring range header."); + return {}; + } + + // Handle suffix range (e.g., -123). + if (!first) { + first = std::numeric_limits::max(); + } + + // Handle optional range-end (e.g., 123-). + if (!last) { + last = std::numeric_limits::max(); + } + + if (first != std::numeric_limits::max() && first > last) { + ENVOY_LOG(debug, "Invalid format for range header: range-start and range-end out of order. " + "Ignoring range header."); + return {}; + } + + parsed_ranges.push_back(RawByteRange(first.value(), last.value())); + } + + return parsed_ranges; +} } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 578582ec9be9..760dfa4f835f 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -11,6 +11,7 @@ #include "envoy/http/header_map.h" #include "common/common/assert.h" +#include "common/common/logger.h" #include "source/extensions/filters/http/cache/key.pb.h" @@ -34,8 +35,10 @@ enum class CacheEntryStatus { // This entry is fresh, and an appropriate basis for a 304 Not Modified // response. FoundNotModified, - // This entry is fresh, but can't satisfy the requested range(s). - UnsatisfiableRange, + // This entry is fresh, but cannot satisfy the requested range(s). + NotSatisfiableRange, + // This entry is fresh, and can satisfy the requested range(s). + SatisfiableRange, }; std::ostream& operator<<(std::ostream& os, CacheEntryStatus status); @@ -71,6 +74,16 @@ class RawByteRange { const uint64_t last_byte_pos_; }; +class RangeRequests : Logger::Loggable { +public: + // Parses the ranges from the request headers into a vector. + // max_byte_range_specs defines how many byte ranges can be parsed from the header value. + // If there is no range header, multiple range headers, the header value is malformed, or there + // are more ranges than max_byte_range_specs, returns an empty vector. + static std::vector parseRanges(const Http::RequestHeaderMap& request_headers, + uint64_t max_byte_range_specs); +}; + // Byte range from an HTTP request, adjusted for a known response body size, and converted from an // HTTP-style closed interval to a C++ style half-open interval. class AdjustedByteRange { diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index b9c20baf20d3..a8873346de7d 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -311,6 +311,37 @@ TEST_P(HttpTimeTest, Ok) { TEST(HttpTime, Null) { EXPECT_EQ(CacheHeadersUtils::httpTime(nullptr), SystemTime()); } +void testReadAndRemoveLeadingDigits(absl::string_view input, int64_t expected, + absl::string_view remaining) { + absl::string_view test_input(input); + auto output = CacheHeadersUtils::readAndRemoveLeadingDigits(test_input); + if (output) { + EXPECT_EQ(output, static_cast(expected)) << "input=" << input; + EXPECT_EQ(test_input, remaining) << "input=" << input; + } else { + EXPECT_LT(expected, 0) << "input=" << input; + EXPECT_EQ(test_input, remaining) << "input=" << input; + } +} + +TEST(ReadAndRemoveLeadingDigits, ComprehensiveTest) { + testReadAndRemoveLeadingDigits("123", 123, ""); + testReadAndRemoveLeadingDigits("a123", -1, "a123"); + testReadAndRemoveLeadingDigits("9_", 9, "_"); + testReadAndRemoveLeadingDigits("11111111111xyz", 11111111111ll, "xyz"); + + // Overflow case + testReadAndRemoveLeadingDigits("1111111111111111111111111111111xyz", -1, + "1111111111111111111111111111111xyz"); + + // 2^64 + testReadAndRemoveLeadingDigits("18446744073709551616xyz", -1, "18446744073709551616xyz"); + // 2^64-1 + testReadAndRemoveLeadingDigits("18446744073709551615xyz", 18446744073709551615ull, "xyz"); + // (2^64-1)*10+9 + testReadAndRemoveLeadingDigits("184467440737095516159yz", -1, "184467440737095516159yz"); +} + } // namespace } // namespace Cache } // namespace HttpFilters diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index d28d75aaf39f..cf6ada24cac1 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -202,9 +202,12 @@ TEST_F(LookupRequestTest, PragmaNoFallback) { EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); } -TEST_F(LookupRequestTest, FullRange) { - request_headers_.addCopy("Range", "0-99"); +TEST_F(LookupRequestTest, SatisfiableRange) { + // add method (GET) and range to headers + request_headers_.addReference(Http::Headers::get().Method, Http::Headers::get().MethodValues.Get); + request_headers_.addReference(Http::Headers::get().Range, "bytes=1-99,3-,-2"); const LookupRequest lookup_request(request_headers_, current_time_); + const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}, @@ -212,11 +215,50 @@ TEST_F(LookupRequestTest, FullRange) { const uint64_t content_length = 4; const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers, content_length); - ASSERT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); + ASSERT_EQ(CacheEntryStatus::SatisfiableRange, lookup_response.cache_entry_status_); + ASSERT_TRUE(lookup_response.headers_); EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); EXPECT_EQ(lookup_response.content_length_, 4); - EXPECT_TRUE(lookup_response.response_ranges_.empty()); + + // checks that the ranges have been adjusted to the content's length + EXPECT_EQ(lookup_response.response_ranges_.size(), 3); + + EXPECT_EQ(lookup_response.response_ranges_[0].begin(), 1); + EXPECT_EQ(lookup_response.response_ranges_[0].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[0].length(), 3); + + EXPECT_EQ(lookup_response.response_ranges_[1].begin(), 3); + EXPECT_EQ(lookup_response.response_ranges_[1].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[1].length(), 1); + + EXPECT_EQ(lookup_response.response_ranges_[2].begin(), 2); + EXPECT_EQ(lookup_response.response_ranges_[2].end(), 4); + EXPECT_EQ(lookup_response.response_ranges_[2].length(), 2); + + EXPECT_FALSE(lookup_response.has_trailers_); +} + +TEST_F(LookupRequestTest, NotSatisfiableRange) { + // add method (GET) and range headers + request_headers_.addReference(Http::Headers::get().Method, Http::Headers::get().MethodValues.Get); + request_headers_.addReference(Http::Headers::get().Range, "bytes=5-99,100-"); + + const LookupRequest lookup_request(request_headers_, current_time_); + + const Http::TestResponseHeaderMapImpl response_headers( + {{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public, max-age=3600"}, + {"content-length", "4"}}); + const uint64_t content_length = 4; + const LookupResult lookup_response = + makeLookupResult(lookup_request, response_headers, content_length); + ASSERT_EQ(CacheEntryStatus::NotSatisfiableRange, lookup_response.cache_entry_status_); + + ASSERT_TRUE(lookup_response.headers_); + EXPECT_THAT(*lookup_response.headers_, Http::IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(lookup_response.content_length_, 4); + ASSERT_TRUE(lookup_response.response_ranges_.empty()); EXPECT_FALSE(lookup_response.has_trailers_); } @@ -285,6 +327,148 @@ TEST(AdjustByteRange, NoRangeRequest) { EXPECT_THAT(result, ContainerEq(std::vector{})); } +namespace { +Http::TestRequestHeaderMapImpl makeTestHeaderMap(std::string range_value) { + return Http::TestRequestHeaderMapImpl{{":method", "GET"}, {"range", range_value}}; +} +} // namespace + +TEST(ParseRangesTest, NoRangeHeader) { + Http::TestRequestHeaderMapImpl headers = Http::TestRequestHeaderMapImpl{{":method", "GET"}}; + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, InvalidUnit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bits=3-4"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, SingleRange) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=3-4"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_EQ(3, result_vector[0].firstBytePos()); + ASSERT_EQ(4, result_vector[0].lastBytePos()); +} + +TEST(ParseRangesTest, MissingFirstBytePos) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=-5"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_TRUE(result_vector[0].isSuffix()); + ASSERT_EQ(5, result_vector[0].suffixLength()); +} + +TEST(ParseRangesTest, MissingLastBytePos) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=6-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(1, result_vector.size()); + + ASSERT_EQ(6, result_vector[0].firstBytePos()); + ASSERT_EQ(std::numeric_limits::max(), result_vector[0].lastBytePos()); +} + +TEST(ParseRangesTest, MultipleRanges) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=345-456,-567,6789-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 5); + + ASSERT_EQ(3, result_vector.size()); + + ASSERT_EQ(345, result_vector[0].firstBytePos()); + ASSERT_EQ(456, result_vector[0].lastBytePos()); + + ASSERT_TRUE(result_vector[1].isSuffix()); + ASSERT_EQ(567, result_vector[1].suffixLength()); + + ASSERT_EQ(6789, result_vector[2].firstBytePos()); + ASSERT_EQ(UINT64_MAX, result_vector[2].lastBytePos()); +} + +TEST(ParseRangesTest, LongRangeHeaderValue) { + Http::TestRequestHeaderMapImpl headers = + makeTestHeaderMap("bytes=1000-1000,1001-1001,1002-1002,1003-1003,1004-1004,1005-" + "1005,1006-1006,1007-1007,1008-1008,100-"); + std::vector result_vector = RangeRequests::parseRanges(headers, 10); + + ASSERT_EQ(10, result_vector.size()); +} + +TEST(ParseRangesTest, ZeroRangeLimit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=1000-1000"); + std::vector result_vector = RangeRequests::parseRanges(headers, 0); + + ASSERT_EQ(0, result_vector.size()); +} + +TEST(ParseRangesTest, OverRangeLimit) { + Http::TestRequestHeaderMapImpl headers = makeTestHeaderMap("bytes=1000-1000,1001-1001"); + std::vector result_vector = RangeRequests::parseRanges(headers, 1); + + ASSERT_EQ(0, result_vector.size()); +} + +class ParseInvalidRangeHeaderTest : public testing::Test, + public testing::WithParamInterface { +protected: + Http::TestRequestHeaderMapImpl range() { return makeTestHeaderMap(GetParam()); } +}; + +// clang-format off +INSTANTIATE_TEST_SUITE_P( + Default, ParseInvalidRangeHeaderTest, + testing::Values("-", + "1-2", + "12", + "a", + "a1", + "bytes=", + "bytes=-", + "bytes1-2", + "bytes=12", + "bytes=1-2-3", + "bytes=1-2-", + "bytes=1--3", + "bytes=--2", + "bytes=2--", + "bytes=-2-", + "bytes=-1-2", + "bytes=a-2", + "bytes=2-a", + "bytes=-a", + "bytes=a-", + "bytes=a1-2", + "bytes=1-a2", + "bytes=1a-2", + "bytes=1-2a", + "bytes=1-2,3-a", + "bytes=1-a,3-4", + "bytes=1-2,3a-4", + "bytes=1-2,3-4a", + "bytes=1-2,3-4-5", + "bytes=1-2,bytes=3-4", + "bytes=1-2,3-4,a", + // too many byte ranges (test sets the limit as 5) + "bytes=0-1,1-2,2-3,3-4,4-5,5-6", + // UINT64_MAX-UINT64_MAX+1 + "bytes=18446744073709551615-18446744073709551616", + // UINT64_MAX+1-UINT64_MAX+2 + "bytes=18446744073709551616-18446744073709551617")); +// clang-format on + +TEST_P(ParseInvalidRangeHeaderTest, InvalidRangeReturnsEmpty) { + std::vector result_vector = RangeRequests::parseRanges(range(), 5); + ASSERT_EQ(0, result_vector.size()); +} + } // namespace Cache } // namespace HttpFilters } // namespace Extensions From 71e9049eb3b22bd53d1651b8a435ed3b4142ee9f Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Mon, 3 Aug 2020 18:01:37 -0500 Subject: [PATCH 834/909] thrift_proxy: Fix potential assert failure when input contains invalid characters (#12362) An assert failure will occur when `config.route_config.route.cluster_header` contains invalid characters for the HTTP header. Added validation for this field to avoid assert failure and a regression test case in the unit test. ``` [assert] [bazel-out/k8-fastbuild/bin/include/envoy/http/_virtual_includes/header_map_interface/envoy/http/header_map.h:54] assert failure: valid(). ``` Risk Level: Low Testing: Added a regression test case Fixes #12361 Signed-off-by: jianwen --- .../network/thrift_proxy/v3/route.proto | 4 +++- .../network/thrift_proxy/v4alpha/route.proto | 4 +++- docs/root/version_history/current.rst | 1 + .../network/thrift_proxy/v3/route.proto | 4 +++- .../network/thrift_proxy/v4alpha/route.proto | 4 +++- .../thrift_proxy/header_transport_impl.cc | 9 +++++-- .../thrift_proxy/twitter_protocol_impl.cc | 17 ++++++++++--- .../network/thrift_proxy/config_test.cc | 24 +++++++++++++++++++ 8 files changed, 58 insertions(+), 9 deletions(-) diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 5ce18fd06233..b7afc4f0b803 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -103,7 +103,9 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto index 9b847d645a65..374cc131ddf8 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -103,7 +103,9 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f06045af222a..0ef344aef57e 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -24,6 +24,7 @@ Minor Behavior Changes in the environment. * router: added transport failure reason to response body when upstream reset happens. After this change, the response body will be of the form `upstream connect error or disconnect/reset before headers. reset reason:{}, transport failure reason:{}`.This behavior may be reverted by setting runtime feature `envoy.reloadable_features.http_transport_failure_reason_in_body` to false. * router: now consumes all retry related headers to prevent them from being propagated to the upstream. This behavior may be reverted by setting runtime feature `envoy.reloadable_features.consume_all_retry_headers` to false. +* thrift_proxy: special characters {'\0', '\r', '\n'} will be stripped from thrift headers. Bug Fixes --------- diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index 5ce18fd06233..b7afc4f0b803 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -103,7 +103,9 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto index 9b847d645a65..374cc131ddf8 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto @@ -103,7 +103,9 @@ message RouteAction { // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; + string cluster_header = 6 [ + (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_VALUE strict: false} + ]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in diff --git a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc index 08903d539c49..0a548452c820 100644 --- a/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/header_transport_impl.cc @@ -8,6 +8,8 @@ #include "extensions/filters/network/thrift_proxy/buffer_helper.h" +#include "absl/strings/str_replace.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -143,8 +145,11 @@ bool HeaderTransportImpl::decodeFrameStart(Buffer::Instance& buffer, MessageMeta } while (num_headers-- > 0) { - const Http::LowerCaseString key = - Http::LowerCaseString(drainVarString(buffer, header_size, "header key")); + std::string key_string = drainVarString(buffer, header_size, "header key"); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + key_string = + absl::StrReplaceAll(key_string, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + const Http::LowerCaseString key = Http::LowerCaseString(key_string); const std::string value = drainVarString(buffer, header_size, "header value"); metadata.headers().addCopy(key, value); } diff --git a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc index 6d748c2817e5..7f1ca57592cd 100644 --- a/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/twitter_protocol_impl.cc @@ -8,6 +8,8 @@ #include "extensions/filters/network/thrift_proxy/thrift_object_impl.h" #include "extensions/filters/network/thrift_proxy/unframed_transport_impl.h" +#include "absl/strings/str_replace.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { @@ -1022,7 +1024,10 @@ void TwitterProtocolImpl::updateMetadataWithRequestHeader(const ThriftObject& he metadata.setFlags(*req_header.flags()); } for (const auto& context : *req_header.contexts()) { - headers.addCopy(Http::LowerCaseString{context.key_}, context.value_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string key = + absl::StrReplaceAll(context.key_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + headers.addCopy(Http::LowerCaseString{key}, context.value_); } if (req_header.dest()) { headers.addReferenceKey(Headers::get().Dest, *req_header.dest()); @@ -1030,7 +1035,10 @@ void TwitterProtocolImpl::updateMetadataWithRequestHeader(const ThriftObject& he // TODO(zuercher): Delegations are stored as headers for now. Consider passing them as simple // objects for (const auto& delegation : *req_header.delegations()) { - std::string key = fmt::format(":d:{}", delegation.src_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string src = + absl::StrReplaceAll(delegation.src_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + const std::string key = fmt::format(":d:{}", src); headers.addCopy(Http::LowerCaseString{key}, delegation.dst_); } if (req_header.traceIdHigh()) { @@ -1050,7 +1058,10 @@ void TwitterProtocolImpl::updateMetadataWithResponseHeader(const ThriftObject& h Http::HeaderMap& headers = metadata.headers(); for (const auto& context : resp_header.contexts()) { - headers.addCopy(Http::LowerCaseString(context.key_), context.value_); + // LowerCaseString doesn't allow '\0', '\n', and '\r'. + const std::string key = + absl::StrReplaceAll(context.key_, {{std::string(1, '\0'), ""}, {"\n", ""}, {"\r", ""}}); + headers.addCopy(Http::LowerCaseString(key), context.value_); } SpanList& spans = resp_header.spans(); diff --git a/test/extensions/filters/network/thrift_proxy/config_test.cc b/test/extensions/filters/network/thrift_proxy/config_test.cc index b4afd50866e0..6bf4afbd3f7a 100644 --- a/test/extensions/filters/network/thrift_proxy/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/config_test.cc @@ -122,6 +122,30 @@ TEST_F(ThriftFilterConfigTest, ThriftProxyWithEmptyProto) { testConfig(config); } +// Test config with an invalid cluster_header. +TEST_F(ThriftFilterConfigTest, RouterConfigWithInvalidClusterHeader) { + const std::string yaml = R"EOF( +stat_prefix: thrift +route_config: + name: local_route + routes: + match: + method_name: A + route: + cluster_header: A +thrift_filters: + - name: envoy.filters.thrift.router +)EOF"; + + envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config = + parseThriftProxyFromV2Yaml(yaml); + std::string header = "A"; + header.push_back('\000'); // Add an invalid character for http header. + config.mutable_route_config()->mutable_routes()->at(0).mutable_route()->set_cluster_header( + header); + EXPECT_THROW(factory_.createFilterFactoryFromProto(config, context_), ProtoValidationException); +} + // Test config with an explicitly defined router filter. TEST_F(ThriftFilterConfigTest, ThriftProxyWithExplicitRouterConfig) { const std::string yaml = R"EOF( From 1e113e3800535b4a7aaff17d05d68689f9144198 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 3 Aug 2020 16:09:18 -0700 Subject: [PATCH 835/909] test: fix TCP tunneling flake (#12446) We can't full close the connection before we are done. Fixes https://github.com/envoyproxy/envoy/issues/12398 Signed-off-by: Matt Klein --- test/integration/tcp_tunneling_integration_test.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index 83e5be19b533..bc270853ae65 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -476,11 +476,11 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); upstream_request_->readDisable(true); - upstream_request_->encodeData("", true); + upstream_request_->encodeData("hello", false); // This ensures that fake_upstream_connection->readDisable has been run on its thread // before tcp_client starts writing. - tcp_client->waitForHalfClose(); + ASSERT_TRUE(tcp_client->waitForData(5)); ASSERT_TRUE(tcp_client->write(data, true)); @@ -490,6 +490,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { upstream_request_->readDisable(false); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeData("world", true); tcp_client->waitForHalfClose(); } From ea9fecb4550461cb1bfb21310a5760890c61d7b3 Mon Sep 17 00:00:00 2001 From: antonio Date: Mon, 3 Aug 2020 19:13:20 -0400 Subject: [PATCH 836/909] test: de-flake TimerImpl tests by advancing libevent's monotonic clock instead of just sleeping. (#12424) Use of absl::Sleep wasn't enough because time could move backwards between either timer registration and absl::SleepFor, or absl::SleepFor and Dispatcher::run. Updating libevent time cache until the returned monotonic time exceeds the desired duration eliminates the possibility of flakiness due to time moving backwards. Signed-off-by: Antonio Vicente --- test/common/event/dispatcher_impl_test.cc | 154 ++++++++++++---------- 1 file changed, 81 insertions(+), 73 deletions(-) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index a651162a3f03..4a709e4972b8 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -472,41 +472,67 @@ TEST_F(DispatcherMonotonicTimeTest, ApproximateMonotonicTime) { dispatcher_->run(Dispatcher::RunType::Block); } -TEST(TimerImplTest, TimerEnabledDisabled) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - Event::TimerPtr timer = dispatcher->createTimer([] {}); +class TimerImplTest : public testing::Test { +protected: + void SetUp() override { + // Update time cache to provide a stable time reference for timer registration. + event_base_update_cache_time(&libevent_base_); + } + + // Advance time forward while updating the libevent's time cache and monotonic time reference. + // Pushing the monotonic time reference forward eliminates the possibility of time moving + // backwards and breaking the overly picky TimerImpl tests below. + void advanceLibeventTime(absl::Duration duration) { + timeval start_tv; + { + int ret = event_base_gettimeofday_cached(&libevent_base_, &start_tv); + RELEASE_ASSERT(ret == 0, "event_base_gettimeofday_cached failed"); + } + + timeval now_tv; + do { + absl::SleepFor(duration); + event_base_update_cache_time(&libevent_base_); + int ret = event_base_gettimeofday_cached(&libevent_base_, &now_tv); + RELEASE_ASSERT(ret == 0, "event_base_gettimeofday_cached failed"); + } while (duration > absl::DurationFromTimeval(now_tv) - absl::DurationFromTimeval(start_tv)); + } + + Api::ApiPtr api_{Api::createApiForTest()}; + DispatcherPtr dispatcher_{api_->allocateDispatcher("test_thread")}; + event_base& libevent_base_{static_cast(*dispatcher_).base()}; +}; + +TEST_F(TimerImplTest, TimerEnabledDisabled) { + Event::TimerPtr timer = dispatcher_->createTimer([] {}); EXPECT_FALSE(timer->enabled()); timer->enableTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); - dispatcher->run(Dispatcher::RunType::NonBlock); + dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); timer->enableHRTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); - dispatcher->run(Dispatcher::RunType::NonBlock); + dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); } // Timers scheduled at different times execute in order. -TEST(TimerImplTest, TimerOrdering) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerOrdering) { ReadyWatcher watcher1; - Event::TimerPtr timer1 = dispatcher->createTimer([&] { watcher1.ready(); }); + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); ReadyWatcher watcher2; - Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); ReadyWatcher watcher3; - Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); timer1->enableTimer(std::chrono::milliseconds(0)); timer2->enableTimer(std::chrono::milliseconds(1)); timer3->enableTimer(std::chrono::milliseconds(2)); - // Sleep for 5ms so timers above all trigger in the same loop iteration. - absl::SleepFor(absl::Milliseconds(5)); + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); @@ -517,22 +543,19 @@ TEST(TimerImplTest, TimerOrdering) { EXPECT_CALL(watcher1, ready()); EXPECT_CALL(watcher2, ready()); EXPECT_CALL(watcher3, ready()); - dispatcher->run(Dispatcher::RunType::Block); + dispatcher_->run(Dispatcher::RunType::Block); } // Alarms that are scheduled to execute and are cancelled do not trigger. -TEST(TimerImplTest, TimerOrderAndDisableAlarm) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerOrderAndDisableAlarm) { ReadyWatcher watcher3; - Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); ReadyWatcher watcher2; - Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); ReadyWatcher watcher1; - Event::TimerPtr timer1 = dispatcher->createTimer([&] { + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { timer2->disableTimer(); watcher1.ready(); }); @@ -541,8 +564,8 @@ TEST(TimerImplTest, TimerOrderAndDisableAlarm) { timer2->enableTimer(std::chrono::milliseconds(1)); timer3->enableTimer(std::chrono::milliseconds(2)); - // Sleep for 5ms so timers above all trigger in the same loop iteration. - absl::SleepFor(absl::Milliseconds(5)); + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); @@ -552,26 +575,23 @@ TEST(TimerImplTest, TimerOrderAndDisableAlarm) { InSequence s; EXPECT_CALL(watcher1, ready()); EXPECT_CALL(watcher3, ready()); - dispatcher->run(Dispatcher::RunType::Block); + dispatcher_->run(Dispatcher::RunType::Block); } // Change the registration time for a timer that is already activated by disabling and re-enabling // the timer. Verify that execution is delayed. -TEST(TimerImplTest, TimerOrderDisableAndReschedule) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerOrderDisableAndReschedule) { ReadyWatcher watcher4; - Event::TimerPtr timer4 = dispatcher->createTimer([&] { watcher4.ready(); }); + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); ReadyWatcher watcher3; - Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); ReadyWatcher watcher2; - Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); ReadyWatcher watcher1; - Event::TimerPtr timer1 = dispatcher->createTimer([&] { + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { timer2->disableTimer(); timer2->enableTimer(std::chrono::milliseconds(0)); timer3->disableTimer(); @@ -584,8 +604,8 @@ TEST(TimerImplTest, TimerOrderDisableAndReschedule) { timer3->enableTimer(std::chrono::milliseconds(2)); timer4->enableTimer(std::chrono::milliseconds(3)); - // Sleep for 5ms so timers above all trigger in the same loop iteration. - absl::SleepFor(absl::Milliseconds(5)); + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); @@ -599,26 +619,23 @@ TEST(TimerImplTest, TimerOrderDisableAndReschedule) { EXPECT_CALL(watcher4, ready()); EXPECT_CALL(watcher2, ready()); EXPECT_CALL(watcher3, ready()); - dispatcher->run(Dispatcher::RunType::Block); + dispatcher_->run(Dispatcher::RunType::Block); } // Change the registration time for a timer that is already activated by re-enabling the timer // without calling disableTimer first. -TEST(TimerImplTest, TimerOrderAndReschedule) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerOrderAndReschedule) { ReadyWatcher watcher4; - Event::TimerPtr timer4 = dispatcher->createTimer([&] { watcher4.ready(); }); + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); ReadyWatcher watcher3; - Event::TimerPtr timer3 = dispatcher->createTimer([&] { watcher3.ready(); }); + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); ReadyWatcher watcher2; - Event::TimerPtr timer2 = dispatcher->createTimer([&] { watcher2.ready(); }); + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); ReadyWatcher watcher1; - Event::TimerPtr timer1 = dispatcher->createTimer([&] { + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { timer2->enableTimer(std::chrono::milliseconds(0)); timer3->enableTimer(std::chrono::milliseconds(1)); watcher1.ready(); @@ -629,8 +646,8 @@ TEST(TimerImplTest, TimerOrderAndReschedule) { timer3->enableTimer(std::chrono::milliseconds(2)); timer4->enableTimer(std::chrono::milliseconds(3)); - // Sleep for 5ms so timers above all trigger in the same loop iteration. - absl::SleepFor(absl::Milliseconds(5)); + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); @@ -645,30 +662,27 @@ TEST(TimerImplTest, TimerOrderAndReschedule) { EXPECT_CALL(watcher2, ready()); EXPECT_CALL(watcher4, ready()); EXPECT_CALL(watcher3, ready()); - dispatcher->run(Dispatcher::RunType::Block); + dispatcher_->run(Dispatcher::RunType::Block); } -TEST(TimerImplTest, TimerChaining) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerChaining) { ReadyWatcher watcher1; - Event::TimerPtr timer1 = dispatcher->createTimer([&] { watcher1.ready(); }); + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); ReadyWatcher watcher2; - Event::TimerPtr timer2 = dispatcher->createTimer([&] { + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); timer1->enableTimer(std::chrono::milliseconds(0)); }); ReadyWatcher watcher3; - Event::TimerPtr timer3 = dispatcher->createTimer([&] { + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); timer2->enableTimer(std::chrono::milliseconds(0)); }); ReadyWatcher watcher4; - Event::TimerPtr timer4 = dispatcher->createTimer([&] { + Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); timer3->enableTimer(std::chrono::milliseconds(0)); }); @@ -683,7 +697,7 @@ TEST(TimerImplTest, TimerChaining) { EXPECT_CALL(watcher3, ready()); EXPECT_CALL(watcher2, ready()); EXPECT_CALL(watcher1, ready()); - dispatcher->run(Dispatcher::RunType::NonBlock); + dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer1->enabled()); EXPECT_FALSE(timer2->enabled()); @@ -691,10 +705,7 @@ TEST(TimerImplTest, TimerChaining) { EXPECT_FALSE(timer4->enabled()); } -TEST(TimerImplTest, TimerChainDisable) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerChainDisable) { ReadyWatcher watcher; Event::TimerPtr timer1; Event::TimerPtr timer2; @@ -707,9 +718,9 @@ TEST(TimerImplTest, TimerChainDisable) { timer3->disableTimer(); }; - timer1 = dispatcher->createTimer(timer_cb); - timer2 = dispatcher->createTimer(timer_cb); - timer3 = dispatcher->createTimer(timer_cb); + timer1 = dispatcher_->createTimer(timer_cb); + timer2 = dispatcher_->createTimer(timer_cb); + timer3 = dispatcher_->createTimer(timer_cb); timer3->enableTimer(std::chrono::milliseconds(0)); timer2->enableTimer(std::chrono::milliseconds(0)); @@ -720,13 +731,10 @@ TEST(TimerImplTest, TimerChainDisable) { EXPECT_TRUE(timer3->enabled()); // Only 1 call to watcher ready since the other 2 timers were disabled by the first timer. EXPECT_CALL(watcher, ready()); - dispatcher->run(Dispatcher::RunType::NonBlock); + dispatcher_->run(Dispatcher::RunType::NonBlock); } -TEST(TimerImplTest, TimerChainDelete) { - Api::ApiPtr api = Api::createApiForTest(); - DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - +TEST_F(TimerImplTest, TimerChainDelete) { ReadyWatcher watcher; Event::TimerPtr timer1; Event::TimerPtr timer2; @@ -739,9 +747,9 @@ TEST(TimerImplTest, TimerChainDelete) { timer3.reset(); }; - timer1 = dispatcher->createTimer(timer_cb); - timer2 = dispatcher->createTimer(timer_cb); - timer3 = dispatcher->createTimer(timer_cb); + timer1 = dispatcher_->createTimer(timer_cb); + timer2 = dispatcher_->createTimer(timer_cb); + timer3 = dispatcher_->createTimer(timer_cb); timer3->enableTimer(std::chrono::milliseconds(0)); timer2->enableTimer(std::chrono::milliseconds(0)); @@ -752,7 +760,7 @@ TEST(TimerImplTest, TimerChainDelete) { EXPECT_TRUE(timer3->enabled()); // Only 1 call to watcher ready since the other 2 timers were deleted by the first timer. EXPECT_CALL(watcher, ready()); - dispatcher->run(Dispatcher::RunType::NonBlock); + dispatcher_->run(Dispatcher::RunType::NonBlock); } class TimerImplTimingTest : public testing::Test { From 5fb236b4c3376e2b761d540a54b4772d620740aa Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Mon, 3 Aug 2020 19:14:14 -0400 Subject: [PATCH 837/909] hcm: move ownership of headers/trailers into FM (#12406) * hcm: introduce FilterManagerCallbacks, improve FM interface Introduces FilterManagerCallbacks which can be used by the FM to call back out with the encoded data. This interface will be expanded as more functionalitiy is split between the ActiveStream and the FilterManager. Also makes the majority of FM functions private, relying on befriending the filter wrappers and a more well defined interface for the ActiveStream to pass headers/data to be decoded by the FM. Signed-off-by: Snow Pettersen * add buffers + watermarks to callback interface Signed-off-by: Snow Pettersen * fix merge conflicts Signed-off-by: Snow Pettersen * format Signed-off-by: Snow Pettersen * renames, comments Signed-off-by: Snow Pettersen * more comments, move buffer_limit to ctor Signed-off-by: Snow Pettersen * format + comments Signed-off-by: Snow Pettersen * hcm: change ownership of headers from ActiveStream to FilterManager Signed-off-by: Snow Pettersen * cleanup Signed-off-by: Snow Pettersen * format Signed-off-by: Snow Pettersen * revert signature Signed-off-by: Snow Pettersen * format Signed-off-by: Snow Pettersen * fix test Signed-off-by: Snow Pettersen * add todo Signed-off-by: Snow Pettersen Co-authored-by: Snow Pettersen --- source/common/http/conn_manager_impl.cc | 269 +++++++++++---------- source/common/http/conn_manager_impl.h | 85 +++++-- source/common/http/header_utility.cc | 4 +- source/common/http/header_utility.h | 2 +- test/common/http/conn_manager_impl_test.cc | 4 +- test/common/http/header_utility_test.cc | 6 +- 6 files changed, 216 insertions(+), 154 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 29b9a3565251..e325baa41a56 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -11,6 +11,7 @@ #include "envoy/common/time.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/http/header_map.h" #include "envoy/network/drain_decision.h" #include "envoy/router/router.h" #include "envoy/ssl/connection.h" @@ -30,6 +31,7 @@ #include "common/http/conn_manager_utility.h" #include "common/http/exception.h" #include "common/http/header_map_impl.h" +#include "common/http/header_utility.h" #include "common/http/headers.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" @@ -629,12 +631,12 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() { connection_manager_.stats_.named_.downstream_rq_active_.dec(); for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { - access_log->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_); + access_log->log(filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), + filter_manager_.responseTrailers(), stream_info_); } for (const auto& log_handler : access_log_handlers_) { - log_handler->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_); + log_handler->log(filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), + filter_manager_.responseTrailers(), stream_info_); } if (stream_info_.healthCheck()) { @@ -643,8 +645,8 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() { if (active_span_) { Tracing::HttpTracerUtility::finalizeDownstreamSpan( - *active_span_, request_headers_.get(), response_headers_.get(), response_trailers_.get(), - stream_info_, *this); + *active_span_, filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), + filter_manager_.responseTrailers(), stream_info_, *this); } if (state_.successful_upgrade_) { connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec(); @@ -665,7 +667,7 @@ void ConnectionManagerImpl::ActiveStream::resetIdleTimer() { void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { connection_manager_.stats_.named_.downstream_rq_idle_timeout_.inc(); // If headers have not been sent to the user, send a 408. - if (response_headers_ != nullptr && + if (filter_manager_.responseHeaders() != nullptr && !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { // TODO(htuch): We could send trailers here with an x-envoy timeout header // or gRPC status code, and/or set H2 RST_STREAM error. @@ -674,8 +676,8 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { } else { // TODO(mattklein) this may result in multiple flags. This Ok? stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); - sendLocalReply(request_headers_ != nullptr && - Grpc::Common::isGrpcRequestHeaders(*request_headers_), + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::RequestTimeout, "stream timeout", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); } @@ -683,8 +685,8 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { void ConnectionManagerImpl::ActiveStream::onRequestTimeout() { connection_manager_.stats_.named_.downstream_rq_timeout_.inc(); - sendLocalReply(request_headers_ != nullptr && - Grpc::Common::isGrpcRequestHeaders(*request_headers_), + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::RequestTimeout, "request timeout", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestOverallTimeout); } @@ -693,8 +695,8 @@ void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { ENVOY_STREAM_LOG(debug, "Stream max duration time reached", *this); connection_manager_.stats_.named_.downstream_rq_max_duration_reached_.inc(); if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { - sendLocalReply(request_headers_ != nullptr && - Grpc::Common::isGrpcRequestHeaders(*request_headers_), + sendLocalReply(filter_manager_.requestHeaders() != nullptr && + Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::RequestTimeout, "downstream duration timeout", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); } else { @@ -803,7 +805,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he bool end_stream) { ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); - request_headers_ = std::move(headers); + filter_manager_.setRequestHeaders(std::move(headers)); Upstream::HostDescriptionConstSharedPtr upstream_host = connection_manager_.read_callbacks_->upstreamHost(); @@ -811,7 +813,8 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = upstream_host->cluster().requestResponseSizeStats(); if (req_resp_stats.has_value()) { - req_resp_stats->get().upstream_rq_headers_size_.recordValue(request_headers_->byteSize()); + req_resp_stats->get().upstream_rq_headers_size_.recordValue( + filter_manager_.requestHeaders()->byteSize()); } } @@ -822,15 +825,17 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fixed_connection_close"); if (fixed_connection_close) { state_.saw_connection_close_ = - HeaderUtility::shouldCloseConnection(protocol, *request_headers_); + HeaderUtility::shouldCloseConnection(protocol, *filter_manager_.requestHeaders()); } - if (Http::Headers::get().MethodValues.Head == request_headers_->getMethodValue()) { + if (Http::Headers::get().MethodValues.Head == + filter_manager_.requestHeaders()->getMethodValue()) { state_.is_head_request_ = true; } - if (HeaderUtility::isConnect(*request_headers_) && !request_headers_->Path() && + if (HeaderUtility::isConnect(*filter_manager_.requestHeaders()) && + !filter_manager_.requestHeaders()->Path() && !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.stop_faking_paths")) { - request_headers_->setPath("/"); + filter_manager_.requestHeaders()->setPath("/"); } // We need to snap snapped_route_config_ here as it's used in mutateRequestHeaders later. @@ -847,7 +852,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he } ENVOY_STREAM_LOG(debug, "request headers complete (end_stream={}):\n{}", *this, end_stream, - *request_headers_); + *filter_manager_.requestHeaders()); // We end the decode here only if the request is header only. If we convert the request to a // header only, the stream will be marked as done once a subsequent decodeData/decodeTrailers is @@ -861,23 +866,25 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // overload it is more important to avoid unnecessary allocation than to create the filters. state_.created_filter_chain_ = true; connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); - sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*request_headers_), + sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().Overload); return; } - if (!connection_manager_.config_.proxy100Continue() && request_headers_->Expect() && - request_headers_->Expect()->value() == Headers::get().ExpectValues._100Continue.c_str()) { + if (!connection_manager_.config_.proxy100Continue() && + filter_manager_.requestHeaders()->Expect() && + filter_manager_.requestHeaders()->Expect()->value() == + Headers::get().ExpectValues._100Continue.c_str()) { // Note in the case Envoy is handling 100-Continue complexity, it skips the filter chain // and sends the 100-Continue directly to the encoder. chargeStats(continueHeader()); response_encoder_->encode100ContinueHeaders(continueHeader()); // Remove the Expect header so it won't be handled again upstream. - request_headers_->removeExpect(); + filter_manager_.requestHeaders()->removeExpect(); } - connection_manager_.user_agent_.initializeFromHeaders(*request_headers_, + connection_manager_.user_agent_.initializeFromHeaders(*filter_manager_.requestHeaders(), connection_manager_.stats_.prefixStatName(), connection_manager_.stats_.scope_); @@ -898,62 +905,68 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // HTTP/1.0 defaults to single-use connections. Make sure the connection // will be closed unless Keep-Alive is present. state_.saw_connection_close_ = true; - if (absl::EqualsIgnoreCase(request_headers_->getConnectionValue(), + if (absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getConnectionValue(), Http::Headers::get().ConnectionValues.KeepAlive)) { state_.saw_connection_close_ = false; } } - if (!request_headers_->Host() && + if (!filter_manager_.requestHeaders()->Host() && !connection_manager_.config_.http1Settings().default_host_for_http_10_.empty()) { // Add a default host if configured to do so. - request_headers_->setHost( + filter_manager_.requestHeaders()->setHost( connection_manager_.config_.http1Settings().default_host_for_http_10_); } } - if (!request_headers_->Host()) { + if (!filter_manager_.requestHeaders()->Host()) { // Require host header. For HTTP/1.1 Host has already been translated to :authority. - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingHost); + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::BadRequest, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingHost); return; } // Verify header sanity checks which should have been performed by the codec. - ASSERT(HeaderUtility::requestHeadersValid(*request_headers_).has_value() == false); + ASSERT(HeaderUtility::requestHeadersValid(*filter_manager_.requestHeaders()).has_value() == + false); // Check for the existence of the :path header for non-CONNECT requests, or present-but-empty // :path header for CONNECT requests. We expect the codec to have broken the path into pieces if // applicable. NOTE: Currently the HTTP/1.1 codec only does this when the allow_absolute_url flag // is enabled on the HCM. - if ((!HeaderUtility::isConnect(*request_headers_) || request_headers_->Path()) && - request_headers_->getPathValue().empty()) { - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().MissingPath); + if ((!HeaderUtility::isConnect(*filter_manager_.requestHeaders()) || + filter_manager_.requestHeaders()->Path()) && + filter_manager_.requestHeaders()->getPathValue().empty()) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::NotFound, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().MissingPath); return; } // Currently we only support relative paths at the application layer. - if (!request_headers_->getPathValue().empty() && request_headers_->getPathValue()[0] != '/') { + if (!filter_manager_.requestHeaders()->getPathValue().empty() && + filter_manager_.requestHeaders()->getPathValue()[0] != '/') { connection_manager_.stats_.named_.downstream_rq_non_relative_path_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::NotFound, "", nullptr, - absl::nullopt, StreamInfo::ResponseCodeDetails::get().AbsolutePath); + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::NotFound, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().AbsolutePath); return; } // Path sanitization should happen before any path access other than the above sanity check. - if (!ConnectionManagerUtility::maybeNormalizePath(*request_headers_, + if (!ConnectionManagerUtility::maybeNormalizePath(*filter_manager_.requestHeaders(), connection_manager_.config_)) { - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", - nullptr, absl::nullopt, + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::BadRequest, "", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().PathNormalizationFailed); return; } - ConnectionManagerUtility::maybeNormalizeHost(*request_headers_, connection_manager_.config_, - localPort()); + ConnectionManagerUtility::maybeNormalizeHost(*filter_manager_.requestHeaders(), + connection_manager_.config_, localPort()); if (!fixed_connection_close && protocol == Protocol::Http11 && - absl::EqualsIgnoreCase(request_headers_->getConnectionValue(), + absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } @@ -961,7 +974,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // since it is supported by http-parser the underlying parser for http // requests. if (!fixed_connection_close && protocol < Protocol::Http2 && !state_.saw_connection_close_ && - absl::EqualsIgnoreCase(request_headers_->getProxyConnectionValue(), + absl::EqualsIgnoreCase(filter_manager_.requestHeaders()->getProxyConnectionValue(), Http::Headers::get().ConnectionValues.Close)) { state_.saw_connection_close_ = true; } @@ -969,7 +982,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!state_.is_internally_created_) { // Only sanitize headers on first pass. // Modify the downstream remote address depending on configuration and headers. stream_info_.setDownstreamRemoteAddress(ConnectionManagerUtility::mutateRequestHeaders( - *request_headers_, connection_manager_.read_callbacks_->connection(), + *filter_manager_.requestHeaders(), connection_manager_.read_callbacks_->connection(), connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); } ASSERT(stream_info_.downstreamRemoteAddress() != nullptr); @@ -979,11 +992,11 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!state_.is_internally_created_) { // Only mutate tracing headers on first pass. ConnectionManagerUtility::mutateTracingRequestHeader( - *request_headers_, connection_manager_.runtime_, connection_manager_.config_, - cached_route_.value().get()); + *filter_manager_.requestHeaders(), connection_manager_.runtime_, + connection_manager_.config_, cached_route_.value().get()); } - stream_info_.setRequestHeaders(*request_headers_); + stream_info_.setRequestHeaders(*filter_manager_.requestHeaders()); const bool upgrade_rejected = createFilterChain() == false; @@ -998,8 +1011,9 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // contains a smuggled HTTP request. state_.saw_connection_close_ = true; connection_manager_.stats_.named_.downstream_rq_ws_on_non_ws_route_.inc(); - sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::Forbidden, "", - nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpgradeFailed); + sendLocalReply(Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + Code::Forbidden, "", nullptr, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().UpgradeFailed); return; } // Allow non websocket requests to go through websocket enabled routes. @@ -1033,7 +1047,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he traceRequest(); } - filter_manager_.decodeHeaders(*request_headers_, end_stream); + filter_manager_.decodeHeaders(*filter_manager_.requestHeaders(), end_stream); // Reset it here for both global and overridden cases. resetIdleTimer(); @@ -1041,12 +1055,12 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he void ConnectionManagerImpl::ActiveStream::traceRequest() { Tracing::Decision tracing_decision = - Tracing::HttpTracerUtility::isTracing(stream_info_, *request_headers_); + Tracing::HttpTracerUtility::isTracing(stream_info_, *filter_manager_.requestHeaders()); ConnectionManagerImpl::chargeTracingStats(tracing_decision.reason, connection_manager_.config_.tracingStats()); - active_span_ = connection_manager_.tracer().startSpan(*this, *request_headers_, stream_info_, - tracing_decision); + active_span_ = connection_manager_.tracer().startSpan(*this, *filter_manager_.requestHeaders(), + stream_info_, tracing_decision); if (!active_span_) { return; @@ -1075,10 +1089,11 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { // propagation enabled) as a request header to enable the receiving service to use it in its // server span. if (decorated_operation_ && state_.decorated_propagate_) { - request_headers_->setEnvoyDecoratorOperation(*decorated_operation_); + filter_manager_.requestHeaders()->setEnvoyDecoratorOperation(*decorated_operation_); } } else { - const HeaderEntry* req_operation_override = request_headers_->EnvoyDecoratorOperation(); + const HeaderEntry* req_operation_override = + filter_manager_.requestHeaders()->EnvoyDecoratorOperation(); // For ingress (inbound) requests, if a decorator operation name has been provided, it // should be used to override the active span's operation. @@ -1091,7 +1106,7 @@ void ConnectionManagerImpl::ActiveStream::traceRequest() { decorated_operation_ = nullptr; } // Remove header so not propagated to service - request_headers_->removeEnvoyDecoratorOperation(); + filter_manager_.requestHeaders()->removeEnvoyDecoratorOperation(); } } } @@ -1198,7 +1213,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( } auto trailers_added_entry = decoder_filters_.end(); - const bool trailers_exists_at_start = active_stream_.request_trailers_ != nullptr; + const bool trailers_exists_at_start = request_trailers_ != nullptr; // Filter iteration may start at the current filter. std::list::iterator entry = commonDecodePrefix(filter, filter_iteration_start_state); @@ -1254,7 +1269,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( decoder_filters_); active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeData; - (*entry)->end_stream_ = end_stream && !active_stream_.request_trailers_; + (*entry)->end_stream_ = end_stream && !request_trailers_; FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); @@ -1268,7 +1283,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( processNewlyAddedMetadata(); - if (!trailers_exists_at_start && active_stream_.request_trailers_ && + if (!trailers_exists_at_start && request_trailers_ && trailers_added_entry == decoder_filters_.end()) { trailers_added_entry = entry; } @@ -1286,7 +1301,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( // If trailers were adding during decodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != decoder_filters_.end()) { - decodeTrailers(trailers_added_entry->get(), *active_stream_.request_trailers_); + decodeTrailers(trailers_added_entry->get(), *request_trailers_); } if (end_stream) { @@ -1299,10 +1314,10 @@ RequestTrailerMap& ConnectionManagerImpl::FilterManager::addDecodedTrailers() { ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); // Trailers can only be added once. - ASSERT(!active_stream_.request_trailers_); + ASSERT(!request_trailers_); - active_stream_.request_trailers_ = RequestTrailerMapImpl::create(); - return *active_stream_.request_trailers_; + request_trailers_ = RequestTrailerMapImpl::create(); + return *request_trailers_; } void ConnectionManagerImpl::FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, @@ -1330,7 +1345,7 @@ void ConnectionManagerImpl::FilterManager::addDecodedData(ActiveStreamDecoderFil } MetadataMapVector& ConnectionManagerImpl::FilterManager::addDecodedMetadata() { - return *active_stream_.getRequestMetadataMapVector(); + return *getRequestMetadataMapVector(); } void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& trailers) { @@ -1338,8 +1353,7 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& connection_manager_.read_callbacks_->connection().dispatcher()); resetIdleTimer(); filter_manager_.maybeEndDecode(true); - request_trailers_ = std::move(trailers); - filter_manager_.decodeTrailers(*request_trailers_); + filter_manager_.decodeTrailers(std::move(trailers)); } void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, @@ -1476,12 +1490,10 @@ void ConnectionManagerImpl::startDrainSequence() { } void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { - ASSERT(request_headers_ != nullptr, - "Try to snap scoped route config when there is no request headers."); - // NOTE: if a RDS subscription hasn't got a RouteConfiguration back, a Router::NullConfigImpl is // returned, in that case we let it pass. - snapped_route_config_ = snapped_scoped_routes_config_->getRouteConfig(*request_headers_); + snapped_route_config_ = + snapped_scoped_routes_config_->getRouteConfig(*filter_manager_.requestHeaders()); if (snapped_route_config_ == nullptr) { ENVOY_STREAM_LOG(trace, "can't find SRDS scope.", *this); // TODO(stevenzzzz): Consider to pass an error message to router filter, so that it can @@ -1494,14 +1506,15 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRo void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::RouteCallback& cb) { Router::RouteConstSharedPtr route; - if (request_headers_ != nullptr) { + if (filter_manager_.requestHeaders() != nullptr) { if (connection_manager_.config_.isRoutable() && connection_manager_.config_.scopedRouteConfigProvider() != nullptr) { // NOTE: re-select scope as well in case the scope key header has been changed by a filter. snapScopedRouteConfig(); } if (snapped_route_config_ != nullptr) { - route = snapped_route_config_->route(cb, *request_headers_, stream_info_, stream_id_); + route = snapped_route_config_->route(cb, *filter_manager_.requestHeaders(), stream_info_, + stream_id_); } } stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; @@ -1545,8 +1558,8 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedTracingCustomTags() { void ConnectionManagerImpl::ActiveStream::requestRouteConfigUpdate( Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { - ASSERT(!request_headers_->Host()->value().empty()); - const auto& host_header = absl::AsciiStrToLower(request_headers_->getHostValue()); + ASSERT(!filter_manager_.requestHeaders()->Host()->value().empty()); + const auto& host_header = absl::AsciiStrToLower(filter_manager_.requestHeaders()->getHostValue()); route_config_update_requester_->requestRouteConfigUpdate(host_header, thread_local_dispatcher, std::move(route_config_updated_cb)); } @@ -1574,7 +1587,7 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( state_.saw_connection_close_ = true; } - if (response_headers_.get() == nullptr) { + if (filter_manager_.responseHeaders() == nullptr) { // If the response has not started at all, send the response through the filter chain. filter_manager_.sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request, grpc_status, details); @@ -1594,23 +1607,26 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( Utility::EncodeFunctions{ [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { - connection_manager_.config_.localReply().rewrite( - request_headers_.get(), response_headers, stream_info_, code, body, content_type); + connection_manager_.config_.localReply().rewrite(filter_manager_.requestHeaders(), + response_headers, stream_info_, code, + body, content_type); }, [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { if (modify_headers != nullptr) { modify_headers(*response_headers); } - response_headers_ = std::move(response_headers); - encodeHeaders(*response_headers_, end_stream); + // TODO(snowp): This is kinda awkward but we need to do this so that the access log + // sees these headers. Is there a better way? + filter_manager_.setResponseHeaders(std::move(response_headers)); + encodeHeaders(*filter_manager_.responseHeaders(), end_stream); filter_manager_.maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { encodeData(data, end_stream); filter_manager_.maybeEndEncode(end_stream); }}, - Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, - grpc_status, state_.is_head_request_}); + Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), + code, body, grpc_status, state_.is_head_request_}); filter_manager_.maybeEndEncode(state_.local_complete_); } else { stream_info_.setResponseCodeDetails(details); @@ -1627,7 +1643,7 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, absl::string_view details) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", active_stream_, details); - ASSERT(active_stream_.response_headers_ == nullptr); + ASSERT(response_headers_ == nullptr); // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. If the filter chain already exists this will be // a no-op. @@ -1639,17 +1655,17 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { active_stream_.connection_manager_.config_.localReply().rewrite( - active_stream_.request_headers_.get(), response_headers, - active_stream_.stream_info_, code, body, content_type); + request_headers_.get(), response_headers, active_stream_.stream_info_, code, body, + content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { if (modify_headers != nullptr) { modify_headers(*headers); } - active_stream_.response_headers_ = std::move(headers); + response_headers_ = std::move(headers); // TODO: Start encoding from the last decoder filter that saw the // request instead. - encodeHeaders(nullptr, *active_stream_.response_headers_, end_stream); + encodeHeaders(nullptr, *response_headers_, end_stream); }, [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the @@ -1699,7 +1715,8 @@ void ConnectionManagerImpl::ActiveStream::encode100ContinueHeaders( ResponseHeaderMap& response_headers) { // Strip the T-E headers etc. Defer other header additions as well as drain-close logic to the // continuation headers. - ConnectionManagerUtility::mutateResponseHeaders(response_headers, request_headers_.get(), + ConnectionManagerUtility::mutateResponseHeaders(response_headers, + filter_manager_.requestHeaders(), connection_manager_.config_, EMPTY_STRING); // Count both the 1xx and follow-up response code in stats. @@ -1807,7 +1824,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade headers.Server() == nullptr)) { headers.setReferenceServer(connection_manager_.config_.serverName()); } - ConnectionManagerUtility::mutateResponseHeaders(headers, request_headers_.get(), + ConnectionManagerUtility::mutateResponseHeaders(headers, filter_manager_.requestHeaders(), connection_manager_.config_, connection_manager_.config_.via()); @@ -1866,7 +1883,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade // Do not do this for H2 (which drains via GOAWAY) or Upgrade or CONNECT (as the // payload is no longer HTTP/1.1) if (!Utility::isUpgrade(headers) && - !HeaderUtility::isConnectResponse(request_headers_, *response_headers_)) { + !HeaderUtility::isConnectResponse(filter_manager_.requestHeaders(), + *filter_manager_.responseHeaders())) { headers.setReferenceConnection(Headers::get().ConnectionValues.Close); } } @@ -1945,10 +1963,10 @@ ResponseTrailerMap& ConnectionManagerImpl::FilterManager::addEncodedTrailers() { ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); // Trailers can only be added once. - ASSERT(!active_stream_.response_trailers_); + ASSERT(!response_trailers_); - active_stream_.response_trailers_ = ResponseTrailerMapImpl::create(); - return *active_stream_.response_trailers_; + response_trailers_ = ResponseTrailerMapImpl::create(); + return *response_trailers_; } void ConnectionManagerImpl::FilterManager::sendLocalReply( @@ -1997,7 +2015,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); - const bool trailers_exists_at_start = active_stream_.response_trailers_ != nullptr; + const bool trailers_exists_at_start = response_trailers_ != nullptr; for (; entry != encoder_filters_.end(); entry++) { // If the filter pointed by entry has stopped for all frame type, return now. if (handleDataIfStopAll(**entry, data, active_stream_.state_.encoder_filters_streaming_)) { @@ -2021,7 +2039,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( recordLatestDataFilter(entry, active_stream_.state_.latest_data_encoding_filter_, encoder_filters_); - (*entry)->end_stream_ = end_stream && !active_stream_.response_trailers_; + (*entry)->end_stream_ = end_stream && !response_trailers_; FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); @@ -2033,7 +2051,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); - if (!trailers_exists_at_start && active_stream_.response_trailers_ && + if (!trailers_exists_at_start && response_trailers_ && trailers_added_entry == encoder_filters_.end()) { trailers_added_entry = entry; } @@ -2051,7 +2069,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( // If trailers were adding during encodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != encoder_filters_.end()) { - encodeTrailers(trailers_added_entry->get(), *active_stream_.response_trailers_); + encodeTrailers(trailers_added_entry->get(), *response_trailers_); } } @@ -2137,13 +2155,13 @@ void ConnectionManagerImpl::FilterManager::maybeEndEncode(bool end_stream) { } bool ConnectionManagerImpl::FilterManager::processNewlyAddedMetadata() { - if (active_stream_.request_metadata_map_vector_ == nullptr) { + if (request_metadata_map_vector_ == nullptr) { return false; } - for (const auto& metadata_map : *active_stream_.getRequestMetadataMapVector()) { + for (const auto& metadata_map : *getRequestMetadataMapVector()) { decodeMetadata(nullptr, *metadata_map); } - active_stream_.getRequestMetadataMapVector()->clear(); + getRequestMetadataMapVector()->clear(); return true; } @@ -2236,12 +2254,16 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { return false; } bool upgrade_rejected = false; - const Envoy::Http::HeaderEntry* upgrade = - request_headers_ ? request_headers_->Upgrade() : nullptr; - // Treat CONNECT requests as a special upgrade case. - if (!upgrade && request_headers_ && HeaderUtility::isConnect(*request_headers_)) { - upgrade = request_headers_->Method(); + const HeaderEntry* upgrade = nullptr; + if (filter_manager_.requestHeaders()) { + upgrade = filter_manager_.requestHeaders()->Upgrade(); + + // Treat CONNECT requests as a special upgrade case. + if (!upgrade && HeaderUtility::isConnect(*filter_manager_.requestHeaders())) { + upgrade = filter_manager_.requestHeaders()->Method(); + } } + state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = nullptr; @@ -2293,7 +2315,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { do100ContinueHeaders(); // If the response headers have not yet come in, don't continue on with // headers and body. doHeaders expects request headers to exist. - if (!parent_.active_stream_.response_headers_.get()) { + if (!parent_.response_headers_.get()) { return; } } @@ -2516,7 +2538,7 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::complete() { } void ConnectionManagerImpl::ActiveStreamDecoderFilter::doHeaders(bool end_stream) { - parent_.decodeHeaders(this, *parent_.active_stream_.request_headers_, end_stream); + parent_.decodeHeaders(this, *parent_.request_headers_, end_stream); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::doData(bool end_stream) { @@ -2525,10 +2547,10 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::doData(bool end_stream) { } void ConnectionManagerImpl::ActiveStreamDecoderFilter::doTrailers() { - parent_.decodeTrailers(this, *parent_.active_stream_.request_trailers_); + parent_.decodeTrailers(this, *parent_.request_trailers_); } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::hasTrailers() { - return parent_.active_stream_.request_trailers_ != nullptr; + return parent_.request_trailers_ != nullptr; } void ConnectionManagerImpl::ActiveStreamDecoderFilter::drainSavedRequestMetadata() { @@ -2596,15 +2618,15 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a // 100-Continue, then proxies a duplicate 100 Continue from upstream. if (parent_.active_stream_.connection_manager_.config_.proxy100Continue()) { - parent_.active_stream_.continue_headers_ = std::move(headers); - parent_.encode100ContinueHeaders(nullptr, *parent_.active_stream_.continue_headers_); + parent_.continue_headers_ = std::move(headers); + parent_.encode100ContinueHeaders(nullptr, *parent_.continue_headers_); } } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - parent_.active_stream_.response_headers_ = std::move(headers); - parent_.encodeHeaders(nullptr, *parent_.active_stream_.response_headers_, end_stream); + parent_.setResponseHeaders(std::move(headers)); + parent_.encodeHeaders(nullptr, *parent_.response_headers_, end_stream); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, @@ -2615,8 +2637,8 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeData(Buffer::Instan void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeTrailers( ResponseTrailerMapPtr&& trailers) { - parent_.active_stream_.response_trailers_ = std::move(trailers); - parent_.encodeTrailers(nullptr, *parent_.active_stream_.response_trailers_); + parent_.response_trailers_ = std::move(trailers); + parent_.encodeTrailers(nullptr, *parent_.response_trailers_); } void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeMetadata( @@ -2690,7 +2712,7 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // n.b. we do not currently change the codecs to point at the new stream // decoder because the decoder callbacks are complete. It would be good to // null out that pointer but should not be necessary. - RequestHeaderMapPtr request_headers(std::move(parent_.active_stream_.request_headers_)); + RequestHeaderMapPtr request_headers(std::move(parent_.request_headers_)); ResponseEncoder* response_encoder = parent_.active_stream_.response_encoder_; parent_.active_stream_.response_encoder_ = nullptr; response_encoder->getStream().removeCallbacks(parent_.active_stream_); @@ -2757,10 +2779,10 @@ bool ConnectionManagerImpl::ActiveStreamEncoderFilter::has100Continueheaders() { return parent_.active_stream_.state_.has_continue_headers_ && !continue_headers_continued_; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::do100ContinueHeaders() { - parent_.encode100ContinueHeaders(this, *parent_.active_stream_.continue_headers_); + parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::doHeaders(bool end_stream) { - parent_.encodeHeaders(this, *parent_.active_stream_.response_headers_, end_stream); + parent_.encodeHeaders(this, *parent_.response_headers_, end_stream); } void ConnectionManagerImpl::ActiveStreamEncoderFilter::doData(bool end_stream) { parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, @@ -2788,10 +2810,10 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::handleMetadataAfterHeader iterate_from_current_filter_ = saved_state; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::doTrailers() { - parent_.encodeTrailers(this, *parent_.active_stream_.response_trailers_); + parent_.encodeTrailers(this, *parent_.response_trailers_); } bool ConnectionManagerImpl::ActiveStreamEncoderFilter::hasTrailers() { - return parent_.active_stream_.response_trailers_ != nullptr; + return parent_.response_trailers_ != nullptr; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) { @@ -2863,8 +2885,7 @@ void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { // In this case, sendLocalReply will either send a response directly to the encoder, or // reset the stream. parent_.sendLocalReply( - parent_.active_stream_.request_headers_ && - Grpc::Common::isGrpcRequestHeaders(*parent_.active_stream_.request_headers_), + parent_.request_headers_ && Grpc::Common::isGrpcRequestHeaders(*parent_.request_headers_), Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 3396dd264f3c..7679d15ff878 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -485,7 +485,12 @@ class ConnectionManagerImpl : Logger::Loggable, * Decodes the provided trailers starting at the first filter in the chain. * @param trailers the trailers to decode. */ - void decodeTrailers(RequestTrailerMap& trailers) { decodeTrailers(nullptr, trailers); } + void decodeTrailers(RequestTrailerMapPtr&& trailers) { + ASSERT(request_trailers_ == nullptr); + + request_trailers_ = std::move(trailers); + decodeTrailers(nullptr, *request_trailers_); + } /** * Decodes the provided metadata starting at the first filter in the chain. @@ -533,6 +538,42 @@ class ConnectionManagerImpl : Logger::Loggable, void callHighWatermarkCallbacks(); void callLowWatermarkCallbacks(); + void setRequestHeaders(RequestHeaderMapPtr&& request_headers) { + // TODO(snowp): Ideally we don't need this function, but during decodeHeaders we might issue + // local replies before the FilterManager::decodeData has been called. We could likely get rid + // of this by updating the calls to sendLocalReply to pass ownership over the headers + adding + // asserts that we don't call the overload that doesn't pass ownership unless decodeData has + // been called. + ASSERT(request_headers_ == nullptr); + request_headers_ = std::move(request_headers); + } + + void setResponseHeaders(ResponseHeaderMapPtr&& response_headers) { + // Note: sometimes the headers get reset (local reply while response is buffering), so we + // don't assert here. + response_headers_ = std::move(response_headers); + } + + /** + * Returns the current request headers, or nullptr if header decoding hasn't started yet. + */ + RequestHeaderMap* requestHeaders() const { return request_headers_.get(); } + + /** + * Returns the current request trailers, or nullptr if trailer decoding hasn't started yet. + */ + RequestTrailerMap* requestTrailers() const { return request_trailers_.get(); } + + /** + * Returns the current response headers, or nullptr if header encoding hasn't started yet. + */ + ResponseHeaderMap* responseHeaders() const { return response_headers_.get(); } + + /** + * Returns the current response trailers, or nullptr if trailer encoding hasn't started yet. + */ + ResponseTrailerMap* responseTrailers() const { return response_trailers_.get(); } + private: // Indicates which filter to start the iteration with. enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; @@ -591,6 +632,13 @@ class ConnectionManagerImpl : Logger::Loggable, bool handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, bool& filter_streaming); + MetadataMapVector* getRequestMetadataMapVector() { + if (request_metadata_map_vector_ == nullptr) { + request_metadata_map_vector_ = std::make_unique(); + } + return request_metadata_map_vector_.get(); + } + ActiveStream& active_stream_; FilterManagerCallbacks& filter_manager_callbacks_; @@ -598,11 +646,20 @@ class ConnectionManagerImpl : Logger::Loggable, std::list decoder_filters_; std::list encoder_filters_; + ResponseHeaderMapPtr continue_headers_; + ResponseHeaderMapPtr response_headers_; + ResponseTrailerMapPtr response_trailers_; + RequestHeaderMapPtr request_headers_; + RequestTrailerMapPtr request_trailers_; + // Stores metadata added in the decoding filter that is being processed. Will be cleared before + // processing the next filter. The storage is created on demand. We need to store metadata + // temporarily in the filter in case the filter has stopped all while processing headers. + std::unique_ptr request_metadata_map_vector_; Buffer::WatermarkBufferPtr buffered_response_data_; Buffer::WatermarkBufferPtr buffered_request_data_; uint32_t buffer_limit_{0}; uint32_t high_watermark_count_{0}; - std::list watermark_callbacks_{}; + std::list watermark_callbacks_; // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, // at which point they no longer need to be friends. @@ -684,10 +741,10 @@ class ConnectionManagerImpl : Logger::Loggable, << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) << "\n"; - DUMP_DETAILS(request_headers_); - DUMP_DETAILS(request_trailers_); - DUMP_DETAILS(response_headers_); - DUMP_DETAILS(response_trailers_); + DUMP_DETAILS(filter_manager_.requestHeaders()); + DUMP_DETAILS(filter_manager_.requestTrailers()); + DUMP_DETAILS(filter_manager_.responseHeaders()); + DUMP_DETAILS(filter_manager_.responseTrailers()); DUMP_DETAILS(&stream_info_); } @@ -805,13 +862,6 @@ class ConnectionManagerImpl : Logger::Loggable, return os; } - MetadataMapVector* getRequestMetadataMapVector() { - if (request_metadata_map_vector_ == nullptr) { - request_metadata_map_vector_ = std::make_unique(); - } - return request_metadata_map_vector_.get(); - } - Tracing::CustomTagMap& getOrMakeTracingCustomTagMap() { if (tracing_custom_tags_ == nullptr) { tracing_custom_tags_ = std::make_unique(); @@ -826,11 +876,6 @@ class ConnectionManagerImpl : Logger::Loggable, Tracing::SpanPtr active_span_; const uint64_t stream_id_; ResponseEncoder* response_encoder_{}; - ResponseHeaderMapPtr continue_headers_; - ResponseHeaderMapPtr response_headers_; - ResponseTrailerMapPtr response_trailers_{}; - RequestHeaderMapPtr request_headers_; - RequestTrailerMapPtr request_trailers_; std::list access_log_handlers_; Stats::TimespanPtr request_response_timespan_; // Per-stream idle timeout. @@ -844,10 +889,6 @@ class ConnectionManagerImpl : Logger::Loggable, StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; - // Stores metadata added in the decoding filter that is being processed. Will be cleared before - // processing the next filter. The storage is created on demand. We need to store metadata - // temporarily in the filter in case the filter has stopped all while processing headers. - std::unique_ptr request_metadata_map_vector_{nullptr}; const std::string* decorated_operation_{nullptr}; Network::Socket::OptionsSharedPtr upstream_options_; std::unique_ptr route_config_update_requester_; diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index ebe6d26e597e..c293f29e16cd 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -157,9 +157,9 @@ bool HeaderUtility::isConnect(const RequestHeaderMap& headers) { return headers.Method() && headers.Method()->value() == Http::Headers::get().MethodValues.Connect; } -bool HeaderUtility::isConnectResponse(const RequestHeaderMapPtr& request_headers, +bool HeaderUtility::isConnectResponse(const RequestHeaderMap* request_headers, const ResponseHeaderMap& response_headers) { - return request_headers.get() && isConnect(*request_headers) && + return request_headers && isConnect(*request_headers) && static_cast(Http::Utility::getResponseStatus(response_headers)) == Http::Code::OK; } diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index b357563b4c9a..22992f1927f9 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -120,7 +120,7 @@ class HeaderUtility { /** * @brief a helper function to determine if the headers represent an accepted CONNECT response. */ - static bool isConnectResponse(const RequestHeaderMapPtr& request_headers, + static bool isConnectResponse(const RequestHeaderMap* request_headers, const ResponseHeaderMap& response_headers); /** diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 3e83615f3dc4..10bc9e6d3cb9 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -5954,7 +5954,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { std::stringstream out; object->dumpState(out); std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("request_headers_: null")); + EXPECT_THAT(state, testing::HasSubstr("filter_manager_.requestHeaders(): null")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; })) @@ -5976,7 +5976,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { std::stringstream out; object->dumpState(out); std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("request_headers_: \n")); + EXPECT_THAT(state, testing::HasSubstr("filter_manager_.requestHeaders(): \n")); EXPECT_THAT(state, testing::HasSubstr("':authority', 'host'\n")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 6f557b1f017d..dc8c831c650d 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -535,10 +535,10 @@ TEST(HeaderIsValidTest, IsConnectResponse) { TestResponseHeaderMapImpl success_response{{":status", "200"}}; TestResponseHeaderMapImpl failure_response{{":status", "500"}}; - EXPECT_TRUE(HeaderUtility::isConnectResponse(connect_request, success_response)); - EXPECT_FALSE(HeaderUtility::isConnectResponse(connect_request, failure_response)); + EXPECT_TRUE(HeaderUtility::isConnectResponse(connect_request.get(), success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(connect_request.get(), failure_response)); EXPECT_FALSE(HeaderUtility::isConnectResponse(nullptr, success_response)); - EXPECT_FALSE(HeaderUtility::isConnectResponse(get_request, success_response)); + EXPECT_FALSE(HeaderUtility::isConnectResponse(get_request.get(), success_response)); } TEST(HeaderAddTest, HeaderAdd) { From 1306042ce62d54ae022e71d429b3059ea72f028c Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 3 Aug 2020 16:46:46 -0700 Subject: [PATCH 838/909] test: fix http2_integration_test flake (#12450) Fixes https://github.com/envoyproxy/envoy/issues/12442 Signed-off-by: Matt Klein --- test/integration/http2_integration_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index e99cd9eebbfd..2cc24c148bc7 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -830,6 +830,7 @@ TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } // Verify the case where there is an HTTP/2 codec/protocol error with an active stream. TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); codec_client_ = makeHttpConnection(lookupPort("http")); // Sends a request. From ecda2d4d32fa649798facc9f0c17474a4f71f2df Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 3 Aug 2020 17:11:31 -0700 Subject: [PATCH 839/909] compdb: treat envoy headers as c++17 (#12449) Signed-off-by: Lizan Zhou --- bazel/repository_locations.bzl | 7 +++---- tools/gen_compilation_database.py | 3 +++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 1d273afb005b..1a3fb597c1c2 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -42,10 +42,9 @@ USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "test", "other"] DEPENDENCY_REPOSITORIES = dict( bazel_compdb = dict( - sha256 = "943f1a57e01d030b9c649f9e41fdafd871e8b0e8a1431f93c6673c38b9c15b3b", - strip_prefix = "bazel-compilation-database-c37b909045eb72d29a47f77cc1e9b519dd5c10b6", - # 2020-07-31 - urls = ["https://github.com/grailbio/bazel-compilation-database/archive/c37b909045eb72d29a47f77cc1e9b519dd5c10b6.tar.gz"], + sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4", + strip_prefix = "bazel-compilation-database-0.4.5", + urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.5.tar.gz"], use_category = ["build"], ), bazel_gazelle = dict( diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index 0073b0345016..0d65eaec3b64 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -73,6 +73,9 @@ def modifyCompileCommand(target, args): if isHeader(target["file"]): options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable" options += " -Wno-unused-function" + if not target["file"].startswith("external/"): + # *.h file is treated as C header by default while our headers files are all C++17. + options = "-x c++ -std=c++17 -fexceptions " + options target["command"] = " ".join([cc, options]) return target From d58bdcf86e1fe9ec843c126e778b44e33102bb7a Mon Sep 17 00:00:00 2001 From: ahedberg Date: Mon, 3 Aug 2020 20:19:00 -0400 Subject: [PATCH 840/909] test: add using-declarations to call sites currently invoking ADL (#12438) The namespace for some actions is changing. This will break these call sites invoking testing::DoAll via ADL unless we add using-declarations or fully-qualify them, as the testing namespace will no longer be a candidate. Signed-off-by: Ashley Hedberg --- test/common/tcp_proxy/tcp_proxy_test.cc | 18 ++++++++-------- .../upstream/cluster_manager_impl_test.cc | 21 ++++++++++--------- test/extensions/common/tap/admin_test.cc | 9 ++++---- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 5fd7d8b21ce5..3658360dcad0 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -37,20 +37,20 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using testing::_; -using testing::Invoke; -using testing::InvokeWithoutArgs; -using testing::NiceMock; -using testing::Return; -using testing::ReturnPointee; -using testing::ReturnRef; -using testing::SaveArg; - namespace Envoy { namespace TcpProxy { namespace { using ::Envoy::Network::UpstreamServerName; +using ::testing::_; +using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::InvokeWithoutArgs; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::ReturnPointee; +using ::testing::ReturnRef; +using ::testing::SaveArg; namespace { Config constructConfigFromYaml(const std::string& yaml, diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index ae2c282fdd4f..affa3fed62b3 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -6,20 +6,21 @@ #include "test/common/upstream/test_cluster_manager.h" -using testing::_; -using testing::Eq; -using testing::InSequence; -using testing::Invoke; -using testing::Mock; -using testing::NiceMock; -using testing::Return; -using testing::ReturnNew; -using testing::SaveArg; - namespace Envoy { namespace Upstream { namespace { +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::InSequence; +using ::testing::Invoke; +using ::testing::Mock; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::ReturnNew; +using ::testing::SaveArg; + envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; diff --git a/test/extensions/common/tap/admin_test.cc b/test/extensions/common/tap/admin_test.cc index 2e3d940eadc5..1894251b3bfc 100644 --- a/test/extensions/common/tap/admin_test.cc +++ b/test/extensions/common/tap/admin_test.cc @@ -7,16 +7,17 @@ #include "gtest/gtest.h" -using testing::_; -using testing::Return; -using testing::SaveArg; - namespace Envoy { namespace Extensions { namespace Common { namespace Tap { namespace { +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; + class MockExtensionConfig : public ExtensionConfig { public: MOCK_METHOD(const absl::string_view, adminId, ()); From bc6042ad7ac8938a0bee8c986c6e1995611ac61d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Tue, 4 Aug 2020 04:47:16 -0400 Subject: [PATCH 841/909] Make extensions public visible (#12451) Workaround for #12444, while we figure out a permanent solution. Signed-off-by: Raul Gutierrez Segales --- bazel/envoy_build_system.bzl | 4 +++- bazel/envoy_library.bzl | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index a96a2cdabc0d..a01fb8f80582 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -37,7 +37,9 @@ def envoy_package(): native.package(default_visibility = ["//visibility:public"]) def envoy_extension_package(): - native.package(default_visibility = ["//:extension_library"]) + # TODO(rgs1): revert this to //:extension_library once + # https://github.com/envoyproxy/envoy/issues/12444 is fixed. + native.package(default_visibility = ["//visibility:public"]) # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index 965ad72690f0..25b4c6ba17d7 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -70,7 +70,9 @@ def envoy_cc_extension( undocumented = False, status = "stable", tags = [], - visibility = ["//:extension_config"], + # TODO(rgs1): revert this to //:extension_config once + # https://github.com/envoyproxy/envoy/issues/12444 is fixed. + visibility = ["//visibility:public"], **kwargs): if security_posture not in EXTENSION_SECURITY_POSTURES: fail("Unknown extension security posture: " + security_posture) From 235ffdc68b63a1c3540e113b9c2baad52484d94b Mon Sep 17 00:00:00 2001 From: Matt Tierney Date: Tue, 4 Aug 2020 08:44:39 -0400 Subject: [PATCH 842/909] test: Set test directory to be writable before writing into it (#12428) Move chmod permissions for writing to new directory before executing printfs to verify text value treatment in the subdirectory. Risk Level: Low Testing: bazel test //test/common/runtime:runtime_impl_test Docs Changes: N/A Release Notes: N/A Signed-off-by: Matt Tierney --- test/common/runtime/filesystem_setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/common/runtime/filesystem_setup.sh b/test/common/runtime/filesystem_setup.sh index 35baffead34b..ef27243da854 100755 --- a/test/common/runtime/filesystem_setup.sh +++ b/test/common/runtime/filesystem_setup.sh @@ -9,10 +9,10 @@ cd "${TEST_SRCDIR}/envoy" rm -rf "${TEST_TMPDIR}/${TEST_DATA}" mkdir -p "${TEST_TMPDIR}/${TEST_DATA}" cp -RfL "${TEST_DATA}"/* "${TEST_TMPDIR}/${TEST_DATA}" +chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" # Verify text value is treated as a binary blob regardless of source line-ending settings printf "hello\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_lf" printf "hello\r\nworld" > "${TEST_TMPDIR}/${TEST_DATA}/root/envoy/file_crlf" -chmod -R u+rwX "${TEST_TMPDIR}/${TEST_DATA}" # Deliberate symlink of doom. LOOP_PATH="${TEST_TMPDIR}/${TEST_DATA}/loop" From 2a6a4b8d758b4bf4238cb6d2babdd3d83fbf45bc Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Tue, 4 Aug 2020 08:45:29 -0500 Subject: [PATCH 843/909] [Rocketmq_proxy] fix assert failure on invalid buffer (#12344) Replace a faulty assert by an if-block to check the validity of the input to make the filter more robust against invalid data (instead of a crash or integer underflow). * added regression test and fix for the assert failure Signed-off-by: jianwen --- docs/root/version_history/current.rst | 1 + .../filters/network/rocketmq_proxy/codec.cc | 9 +++++++-- .../network/rocketmq_proxy/conn_manager_test.cc | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0ef344aef57e..15f30db08ed3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -33,6 +33,7 @@ Bug Fixes * csrf: fixed issues with regards to origin and host header parsing. * dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. * fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. +* rocketmq_proxy network-level filter: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. Removed Config or Runtime ------------------------- diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.cc b/source/extensions/filters/network/rocketmq_proxy/codec.cc index b56e0d5d599a..dd00abdfa330 100644 --- a/source/extensions/filters/network/rocketmq_proxy/codec.cc +++ b/source/extensions/filters/network/rocketmq_proxy/codec.cc @@ -37,10 +37,15 @@ RemotingCommandPtr Decoder::decode(Buffer::Instance& buffer, bool& underflow, bo auto mark = buffer.peekBEInt(); uint32_t header_length = adjustHeaderLength(mark); - ASSERT(frame_length > header_length); + if (frame_length < header_length + FRAME_HEADER_LENGTH_FIELD_SIZE) { + // There is an error in frame_length. + // Make sure body_length is non-negative. + has_error = true; + return nullptr; + } buffer.drain(FRAME_HEADER_LENGTH_FIELD_SIZE); - uint32_t body_length = frame_length - 4 - header_length; + uint32_t body_length = frame_length - FRAME_HEADER_LENGTH_FIELD_SIZE - header_length; ENVOY_LOG(debug, "Request/Response Frame Meta: Frame Length = {}, Header Length = {}, Body Length = {}", diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc index 84af7bdd9758..3470b4c34818 100644 --- a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc @@ -650,6 +650,20 @@ TEST_F(RocketmqConnectionManagerTest, OnDataWithUnsupportedCode) { buffer_.drain(buffer_.length()); } +TEST_F(RocketmqConnectionManagerTest, OnDataInvalidFrameLength) { + // Test against the invalid input where frame_length <= header_length. + const std::string yaml = R"EOF( + stat_prefix: test + )EOF"; + initializeFilter(yaml); + buffer_.add( + std::string({'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'})); + EXPECT_EQ(conn_manager_->onData(buffer_, false), Network::FilterStatus::StopIteration); + EXPECT_EQ(1U, store_.counter("test.request").value()); + + buffer_.drain(buffer_.length()); +} + TEST_F(RocketmqConnectionManagerTest, ConsumerGroupMemberEqual) { initializeFilter(); From ff59e9abbe84cb73246101515575c7aea1c2876e Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Tue, 4 Aug 2020 12:34:08 -0400 Subject: [PATCH 844/909] watermark_buffer_test: Break up contiguous memory allocation to avoid bad alloc exception (#12417) Running this test in CI on Windows we have seen bad alloc exceptions, to combat this and reduce memory overhead, break up large string allocation into smaller segments. Signed-off-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia --- test/common/buffer/watermark_buffer_test.cc | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index db7fe530fcdb..476967254f35 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -431,19 +431,22 @@ TEST_F(WatermarkBufferTest, OverflowWatermarkDisabledOnVeryHighValue) { // Make sure the overflow threshold will be above std::numeric_limits::max() Runtime::LoaderSingleton::getExisting()->mergeValues({{"envoy.buffer.overflow_multiplier", "3"}}); - buffer1.setWatermarks((std::numeric_limits::max() / 3) + 4); - - // Add 2 halves instead of full uint32_t::max to get around std::bad_alloc exception - const uint32_t half_max = std::numeric_limits::max() / 2; - const std::string half_max_str = std::string(half_max, 'a'); - buffer1.add(half_max_str.data(), half_max); - buffer1.add(half_max_str.data(), half_max); + buffer1.setWatermarks((std::numeric_limits::max() / 3) + 1); + + // Add many segments instead of full uint32_t::max to get around std::bad_alloc exception + const uint32_t segment_denominator = 128; + const uint32_t big_segment_len = std::numeric_limits::max() / segment_denominator + 1; + const std::string big_segment_str = std::string(big_segment_len, 'a'); + for (uint32_t i = 0; i < segment_denominator; ++i) { + buffer1.add(big_segment_str.data(), big_segment_len); + } EXPECT_EQ(1, high_watermark_buffer1); EXPECT_EQ(0, overflow_watermark_buffer1); buffer1.add(TEN_BYTES, 10); EXPECT_EQ(1, high_watermark_buffer1); EXPECT_EQ(0, overflow_watermark_buffer1); - EXPECT_EQ(2 * half_max + static_cast(10), buffer1.length()); + EXPECT_EQ(static_cast(segment_denominator) * big_segment_len + 10, buffer1.length()); + EXPECT_GT(buffer1.length(), std::numeric_limits::max()); #endif } From 5adfc817447a9690719e1402651a25aa83828d75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Tue, 4 Aug 2020 12:35:04 -0400 Subject: [PATCH 845/909] pass through filter: make it public visible (#12463) Signed-off-by: Raul Gutierrez Segales --- source/extensions/filters/http/common/BUILD | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/source/extensions/filters/http/common/BUILD b/source/extensions/filters/http/common/BUILD index 39da5c48c58e..bbafc6cc659a 100644 --- a/source/extensions/filters/http/common/BUILD +++ b/source/extensions/filters/http/common/BUILD @@ -12,10 +12,7 @@ envoy_cc_library( name = "pass_through_filter_lib", hdrs = ["pass_through_filter.h"], # A thin shim used by test and prod filters. - visibility = [ - "//source:__subpackages__", - "//test:__subpackages__", - ], + visibility = ["//visibility:public"], deps = [ "//include/envoy/server:filter_config_interface", ], From e8aab1b4f14baf7d7cf89297e0422dac6c7aac8e Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 4 Aug 2020 09:51:40 -0700 Subject: [PATCH 846/909] arm: enable hotrestart (#12447) Signed-off-by: Lizan Zhou --- ci/build_setup.sh | 2 +- test/integration/hotrestart_test.sh | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 46b448381ef7..aa21bbadb232 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -83,7 +83,7 @@ export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_fin --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" -[[ "$(uname -m)" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --define=hot_restart=disabled --test_env=HEAPCHECK=" +[[ "$(uname -m)" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" [[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 8a3051f589d2..a09aee64e5cb 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -73,6 +73,10 @@ cat "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | cat > "${HOT_RESTART_JSON_REUSE_PORT}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_REUSE_PORT}") +# Shared memory size varies by architecture +SHARED_MEMORY_SIZE="104" +[[ "$(uname -m)" == "aarch64" ]] && SHARED_MEMORY_SIZE="120" + echo "Hot restart test using dynamic base id" TEST_INDEX=0 @@ -128,7 +132,7 @@ function run_testsuite() { # string, compare it against a hard-coded string. start_test Checking for consistency of /hot_restart_version CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" 2>&1) - EXPECTED_CLI_HOT_RESTART_VERSION="11.104" + EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" echo "The Envoy's hot restart version is ${CLI_HOT_RESTART_VERSION}" echo "Now checking that the above version is what we expected." check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] @@ -136,7 +140,7 @@ function run_testsuite() { start_test Checking for consistency of /hot_restart_version with --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) - EXPECTED_CLI_HOT_RESTART_VERSION="11.104" + EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] start_test Checking for match of --hot-restart-version and admin /hot_restart_version From 0b1cf9dfa1a5f74e60934eaec1a85f16c0df4cb6 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Tue, 4 Aug 2020 17:48:24 -0400 Subject: [PATCH 847/909] Fix ambiguous duration units and add format check (#12225) - ambiguous value-based std::chrono::{clock_type}::duration(value) constructors result in stdlib implementation specific default time units which are hard to read and potentially different on different platforms - this change removes any instances of these ambiguous constructions and adds a format check to prevent them; developers should specify an explicit unit of time - we explicitly saw this issue in https://github.com/envoyproxy/envoy/pull/11915 where the assumed duration unit was different on Windows, causing test failures Additional Description: Risk Level: Low Testing: Adds format check test and adjust existing unit tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- include/envoy/event/timer.h | 4 ++++ test/extensions/filters/network/kafka/broker/BUILD | 1 + .../network/kafka/broker/filter_unit_test.cc | 8 +++++--- tools/code_format/check_format.py | 7 +++++++ tools/code_format/check_format_test_helper.py | 5 +++++ tools/testdata/check_format/duration_value.cc | 9 +++++++++ tools/testdata/check_format/duration_value_zero.cc | 13 +++++++++++++ 7 files changed, 44 insertions(+), 3 deletions(-) create mode 100644 tools/testdata/check_format/duration_value.cc create mode 100644 tools/testdata/check_format/duration_value_zero.cc diff --git a/include/envoy/event/timer.h b/include/envoy/event/timer.h index 337c318a1224..c02a6a648b65 100644 --- a/include/envoy/event/timer.h +++ b/include/envoy/event/timer.h @@ -81,6 +81,10 @@ class TimeSystem : public TimeSource { ~TimeSystem() override = default; using Duration = MonotonicTime::duration; + using Nanoseconds = std::chrono::nanoseconds; + using Microseconds = std::chrono::microseconds; + using Milliseconds = std::chrono::milliseconds; + using Seconds = std::chrono::seconds; /** * Creates a timer factory. This indirection enables thread-local timer-queue management, diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD index c765de8405cf..cc64251e3f59 100644 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ b/test/extensions/filters/network/kafka/broker/BUILD @@ -26,6 +26,7 @@ envoy_extension_cc_test( srcs = ["filter_unit_test.cc"], extension_name = "envoy.filters.network.kafka_broker", deps = [ + "//include/envoy/event:timer_interface", "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", "//test/mocks/network:network_mocks", "//test/mocks/stats:stats_mocks", diff --git a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc index 51e251504ced..0555fe75f625 100644 --- a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc +++ b/test/extensions/filters/network/kafka/broker/filter_unit_test.cc @@ -1,3 +1,5 @@ +#include "envoy/event/timer.h" + #include "extensions/filters/network/kafka/broker/filter.h" #include "extensions/filters/network/kafka/external/requests.h" @@ -218,7 +220,7 @@ TEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterRequest) { EXPECT_CALL(*request_metrics_, onRequest(api_key)); - MonotonicTime time_point{MonotonicTime::duration(1234)}; + MonotonicTime time_point{Event::TimeSystem::Milliseconds(1234)}; EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(time_point)); // when @@ -248,10 +250,10 @@ TEST_F(KafkaMetricsFacadeImplUnitTest, ShouldRegisterResponse) { const int32_t correlation_id = 1234; AbstractResponseSharedPtr response = std::make_shared(api_key, correlation_id); - MonotonicTime request_time_point{MonotonicTime::duration(1234000000)}; + MonotonicTime request_time_point{Event::TimeSystem::Milliseconds(1234)}; testee_.getRequestArrivalsForTest()[correlation_id] = request_time_point; - MonotonicTime response_time_point{MonotonicTime::duration(2345000000)}; + MonotonicTime response_time_point{Event::TimeSystem::Milliseconds(2345)}; EXPECT_CALL(*response_metrics_, onResponse(api_key, 1111)); EXPECT_CALL(time_source_, monotonicTime()).WillOnce(Return(response_time_point)); diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 75e641071966..61851749266c 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -103,6 +103,7 @@ HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)") TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{") EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)') +DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)') # yapf: disable PROTOBUF_TYPE_ERRORS = { @@ -620,6 +621,12 @@ def checkSourceLine(line, file_path, reportError): "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") + duration_arg = DURATION_VALUE_REGEX.search(line) + if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0": + # Matching duration(int-const or float-const) other than zero + reportError( + "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" + ) if not allowlistedForRegisterFactory(file_path): if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line: reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, " diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 132354171855..8fbc058ff8c3 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -166,6 +166,10 @@ def runChecks(): "Don't reference real-world time sources from production code; use injection") errors += checkUnfixableError("real_time_source.cc", real_time_inject_error) errors += checkUnfixableError("real_time_system.cc", real_time_inject_error) + errors += checkUnfixableError( + "duration_value.cc", + "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" + ) errors += checkUnfixableError("system_clock.cc", real_time_inject_error) errors += checkUnfixableError("steady_clock.cc", real_time_inject_error) errors += checkUnfixableError( @@ -282,6 +286,7 @@ def runChecks(): "term absl::make_unique< should be replaced with standard library term std::make_unique<") errors += checkFileExpectingOK("real_time_source_override.cc") + errors += checkFileExpectingOK("duration_value_zero.cc") errors += checkFileExpectingOK("time_system_wait_for.cc") errors += checkFileExpectingOK("clang_format_off.cc") return errors diff --git a/tools/testdata/check_format/duration_value.cc b/tools/testdata/check_format/duration_value.cc new file mode 100644 index 000000000000..39275d769e02 --- /dev/null +++ b/tools/testdata/check_format/duration_value.cc @@ -0,0 +1,9 @@ +#include + +namespace Envoy { + +std::chrono::duration foo() { + return std::chrono::steady_clock::duration(12345); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/duration_value_zero.cc b/tools/testdata/check_format/duration_value_zero.cc new file mode 100644 index 000000000000..ebbcce9bf51a --- /dev/null +++ b/tools/testdata/check_format/duration_value_zero.cc @@ -0,0 +1,13 @@ +#include + +namespace Envoy { + +std::chrono::duration foo_int() { + return std::chrono::steady_clock::duration(0); +} + +std::chrono::duration foo_decimal() { + return std::chrono::steady_clock::duration(0.0); +} + +} // namespace Envoy From 29b49276db0812b566d143a79996848819248428 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Tue, 4 Aug 2020 14:51:19 -0700 Subject: [PATCH 848/909] envoy grpc: overridable host (#12338) Add authority field in envoy grpc message to override the default host name as cluster name. Risk Level: Low Testing: Docs Changes: Release Notes: Fix #12116 Signed-off-by: Yuchen Dai --- api/envoy/config/core/v3/grpc_service.proto | 6 ++ .../config/core/v4alpha/grpc_service.proto | 6 ++ .../envoy/config/core/v3/grpc_service.proto | 6 ++ .../config/core/v4alpha/grpc_service.proto | 6 ++ source/common/grpc/async_client_impl.cc | 9 +-- source/common/grpc/async_client_impl.h | 2 + source/common/grpc/common.cc | 4 +- test/common/grpc/async_client_impl_test.cc | 64 ++++++++++++++++++- 8 files changed, 94 insertions(+), 9 deletions(-) diff --git a/api/envoy/config/core/v3/grpc_service.proto b/api/envoy/config/core/v3/grpc_service.proto index 3f62884df6e3..967c694d2bc4 100644 --- a/api/envoy/config/core/v3/grpc_service.proto +++ b/api/envoy/config/core/v3/grpc_service.proto @@ -36,6 +36,12 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto index 4c95bb9e9853..51f11fa1f346 100644 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ b/api/envoy/config/core/v4alpha/grpc_service.proto @@ -36,6 +36,12 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto index f4d41ddba258..552817ffd06f 100644 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto @@ -36,6 +36,12 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto index 4c95bb9e9853..51f11fa1f346 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto @@ -36,6 +36,12 @@ message GrpcService { // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_bytes: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 6df5339a91d4..55e4fa75b23b 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -16,7 +16,8 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::GrpcService& config, TimeSource& time_source) : cm_(cm), remote_cluster_name_(config.envoy_grpc().cluster_name()), - initial_metadata_(config.initial_metadata()), time_source_(time_source) {} + host_name_(config.envoy_grpc().authority()), initial_metadata_(config.initial_metadata()), + time_source_(time_source) {} AsyncClientImpl::~AsyncClientImpl() { while (!active_streams_.empty()) { @@ -83,9 +84,9 @@ void AsyncStreamImpl::initialize(bool buffer_body_for_retry) { // TODO(htuch): match Google gRPC base64 encoding behavior for *-bin headers, see // https://github.com/envoyproxy/envoy/pull/2444#discussion_r163914459. - headers_message_ = - Common::prepareHeaders(parent_.remote_cluster_name_, service_full_name_, method_name_, - absl::optional(options_.timeout)); + headers_message_ = Common::prepareHeaders( + parent_.host_name_.empty() ? parent_.remote_cluster_name_ : parent_.host_name_, + service_full_name_, method_name_, options_.timeout); // Fill service-wide initial metadata. for (const auto& header_value : parent_.initial_metadata_) { headers_message_->headers().addCopy(Http::LowerCaseString(header_value.key()), diff --git a/source/common/grpc/async_client_impl.h b/source/common/grpc/async_client_impl.h index ac1b2f50ae97..ae0e2c7782ab 100644 --- a/source/common/grpc/async_client_impl.h +++ b/source/common/grpc/async_client_impl.h @@ -37,6 +37,8 @@ class AsyncClientImpl final : public RawAsyncClient { private: Upstream::ClusterManager& cm_; const std::string remote_cluster_name_; + // The host header value in the http transport. + const std::string host_name_; const Protobuf::RepeatedPtrField initial_metadata_; std::list active_streams_; TimeSource& time_source_; diff --git a/source/common/grpc/common.cc b/source/common/grpc/common.cc index 5c4c9234c7bd..4322df957916 100644 --- a/source/common/grpc/common.cc +++ b/source/common/grpc/common.cc @@ -225,13 +225,13 @@ void Common::toGrpcTimeout(const std::chrono::milliseconds& timeout, } Http::RequestMessagePtr -Common::prepareHeaders(const std::string& upstream_cluster, const std::string& service_full_name, +Common::prepareHeaders(const std::string& host_name, const std::string& service_full_name, const std::string& method_name, const absl::optional& timeout) { Http::RequestMessagePtr message(new Http::RequestMessageImpl()); message->headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); message->headers().setPath(absl::StrCat("/", service_full_name, "/", method_name)); - message->headers().setHost(upstream_cluster); + message->headers().setHost(host_name); // According to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md TE should appear // before Timeout and ContentType. message->headers().setReferenceTE(Http::Headers::get().TEValues.Trailers); diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index bdc77f95af50..fd49adf1a692 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -38,6 +38,64 @@ class EnvoyAsyncClientImplTest : public testing::Test { DangerousDeprecatedTestTime test_time_; }; +// Validate that the host header is the cluster name in grpc config. +TEST_F(EnvoyAsyncClientImplTest, HostIsClusterNameByDefault) { + NiceMock> grpc_callbacks; + Http::AsyncClient::StreamCallbacks* http_callbacks; + + Http::MockAsyncClientStream http_stream; + EXPECT_CALL(http_client_, start(_, _)) + .WillOnce( + Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + http_callbacks = &callbacks; + return &http_stream; + })); + + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) { + return headers.Host()->value() == "test_cluster"; + }))); + EXPECT_CALL(http_stream, sendHeaders(_, _)) + .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); + auto grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); + EXPECT_EQ(grpc_stream, nullptr); +} + +// Validate that the host header is the authority field in grpc config. +TEST_F(EnvoyAsyncClientImplTest, HostIsOverrideByConfig) { + envoy::config::core::v3::GrpcService config; + config.mutable_envoy_grpc()->set_cluster_name("test_cluster"); + config.mutable_envoy_grpc()->set_authority("demo.com"); + + grpc_client_ = std::make_unique(cm_, config, test_time_.timeSystem()); + EXPECT_CALL(cm_, httpAsyncClientForCluster("test_cluster")) + .WillRepeatedly(ReturnRef(http_client_)); + + NiceMock> grpc_callbacks; + Http::AsyncClient::StreamCallbacks* http_callbacks; + + Http::MockAsyncClientStream http_stream; + EXPECT_CALL(http_client_, start(_, _)) + .WillOnce( + Invoke([&http_callbacks, &http_stream](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + http_callbacks = &callbacks; + return &http_stream; + })); + + EXPECT_CALL(grpc_callbacks, + onCreateInitialMetadata(testing::Truly([](Http::RequestHeaderMap& headers) { + return headers.Host()->value() == "demo.com"; + }))); + EXPECT_CALL(http_stream, sendHeaders(_, _)) + .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); + auto grpc_stream = + grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); + EXPECT_EQ(grpc_stream, nullptr); +} + // Validate that a failure in the HTTP client returns immediately with status // UNAVAILABLE. TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { @@ -46,7 +104,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpStartFail) { EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } // Validate that a failure in the HTTP client returns immediately with status @@ -98,7 +156,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpSendHeadersFail) { EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Internal, "")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } // Validate that a failure to sendHeaders() in the HTTP client returns @@ -150,7 +208,7 @@ TEST_F(EnvoyAsyncClientImplTest, StreamHttpClientException) { onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "Cluster not available")); auto grpc_stream = grpc_client_->start(*method_descriptor_, grpc_callbacks, Http::AsyncClient::StreamOptions()); - EXPECT_TRUE(grpc_stream == nullptr); + EXPECT_EQ(grpc_stream, nullptr); } } // namespace From 1a6337673bb0773ee681ba7bc04b005268a807c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Tue, 4 Aug 2020 17:51:51 -0400 Subject: [PATCH 849/909] rocketmq: small grammar fix (#12467) Signed-off-by: Raul Gutierrez Segales --- .../extensions/filters/network/rocketmq_proxy/conn_manager.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h index e69237b6cae7..29f3faf48ad5 100644 --- a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h +++ b/source/extensions/filters/network/rocketmq_proxy/conn_manager.h @@ -39,7 +39,7 @@ class Config { virtual Router::RouterPtr createRouter() PURE; /** - * Indicate whether this proxy is running in develop mode. Once set true, this proxy plugin may + * Indicate whether this proxy is running in development mode. If true, this proxy plugin may * work without dedicated traffic intercepting facility without considering backward * compatibility. * @return true when in development mode; false otherwise. @@ -212,4 +212,4 @@ class ConnectionManager : public Network::ReadFilter, Logger::Loggable Date: Tue, 4 Aug 2020 15:09:52 -0700 Subject: [PATCH 850/909] test: fix Response204WithBody flake (#12473) Fixes https://github.com/envoyproxy/envoy/issues/12459 Signed-off-by: Matt Klein --- test/integration/integration_test.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 081bcd2681ba..9efe9bab87b8 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1428,10 +1428,11 @@ TEST_P(IntegrationTest, Response204WithBody) { auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); - // Create a response with a body + // Create a response with a body. This will cause an upstream messaging error but downstream + // should still see a response. upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "204"}}, false); upstream_request_->encodeData(512, true); - ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect(true)); response->waitForEndStream(); From 544a414ff26a6bda516a48651d64b50226b43cb5 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 4 Aug 2020 15:45:28 -0700 Subject: [PATCH 851/909] tidy: allow run clang_tidy with file (#12472) Refactor `ci/clang_tidy.sh` and `ci/do_ci.sh` to allow run clang-tidy for specified source file. Make running clang_tidy with build container or within devcontainer easier. Risk Level: Low Testing: Local, CI Docs Changes: Yes Release Notes: N/A Signed-off-by: Lizan Zhou --- ci/README.md | 2 +- ci/do_ci.sh | 4 +-- ci/run_clang_tidy.sh | 60 +++++++++++++++++++++++--------------------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/ci/README.md b/ci/README.md index 4e11ef0327d7..b7d39ecab194 100644 --- a/ci/README.md +++ b/ci/README.md @@ -127,7 +127,7 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.fuzz ` — build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with "_with_libfuzzer" for ``. * `bazel.compile_time_options` — build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.compile_time_options ` — build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build. -* `bazel.clang_tidy` — build and run clang-tidy over all source files. +* `bazel.clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit. * `check_format`— run `clang-format` and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. diff --git a/ci/do_ci.sh b/ci/do_ci.sh index a56efe8a34f3..34731effdd2f 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -99,9 +99,9 @@ function bazel_binary_build() { } CI_TARGET=$1 +shift if [[ $# -gt 1 ]]; then - shift COVERAGE_TEST_TARGETS=$* TEST_TARGETS="$COVERAGE_TEST_TARGETS" else @@ -284,7 +284,7 @@ elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then setup_clang_toolchain - NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh + NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh $* exit 0 elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy diff --git a/ci/run_clang_tidy.sh b/ci/run_clang_tidy.sh index 0f5917fb516d..8114f4f32bb4 100755 --- a/ci/run_clang_tidy.sh +++ b/ci/run_clang_tidy.sh @@ -2,7 +2,10 @@ set -eo pipefail -ENVOY_SRCDIR=${ENVOY_SRCDIR:-$(cd $(dirname $0)/.. && pwd)} +# ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build +# (for example envoy-filter-example). +[[ -z "${ENVOY_SRCDIR}" ]] && ENVOY_SRCDIR="${PWD}" +[[ -z "${SRCDIR}" ]] && SRCDIR="${ENVOY_SRCDIR}" export LLVM_CONFIG=${LLVM_CONFIG:-llvm-config} LLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)} @@ -21,14 +24,6 @@ rm clang-tidy-config-errors.txt echo "Generating compilation database..." -cp -f .bazelrc .bazelrc.bak - -function cleanup() { - cp -f .bazelrc.bak .bazelrc - rm -f .bazelrc.bak -} -trap cleanup EXIT - # bazel build need to be run to setup virtual includes, generating files which are consumed # by clang-tidy "${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --include_headers @@ -59,32 +54,39 @@ function filter_excludes() { exclude_testdata | exclude_win32_impl | exclude_macos_impl | exclude_third_party } -if [[ -z "${DIFF_REF}" && "${BUILD_REASON}" != "PullRequest" ]]; then - DIFF_REF=HEAD^ -fi - -if [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then - echo "Running full clang-tidy..." +function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ -clang-tidy-binary=${CLANG_TIDY} \ -clang-apply-replacements-binary=${CLANG_APPLY_REPLACEMENTS} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet \ - ${APPLY_CLANG_TIDY_FIXES:+-fix} -elif [[ -n "${DIFF_REF}" ]]; then - echo "Running clang-tidy-diff against ref ${DIFF_REF}" - git diff ${DIFF_REF} | filter_excludes | \ + -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p ${SRCDIR} -quiet \ + ${APPLY_CLANG_TIDY_FIXES:+-fix} $@ +} + +function run_clang_tidy_diff() { + git diff $1 | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet + -export-fixes=${FIX_YAML} -j ${NUM_CPUS:-0} -p 1 -quiet +} + +if [[ $# -gt 0 ]]; then + echo "Running clang-tidy on: $@" + run_clang_tidy $@ +elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then + echo "Running a full clang-tidy" + run_clang_tidy else - echo "Running clang-tidy-diff against master branch..." - git diff "remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" | filter_excludes | \ - python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ - -clang-tidy-binary=${CLANG_TIDY} \ - -export-fixes=${FIX_YAML} \ - -j ${NUM_CPUS:-0} -p 1 -quiet + if [[ -z "${DIFF_REF}" ]]; then + if [[ "${BUILD_REASON}" == "PullRequest" ]]; then + DIFF_REF="remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" + elif [[ "${BUILD_REASON}" == *CI ]]; then + DIFF_REF="HEAD^" + else + DIFF_REF=$(${ENVOY_SRCDIR}/tools/git/last_github_commit.sh) + fi + fi + echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse ${DIFF_REF})), current HEAD ($(git rev-parse HEAD))" + run_clang_tidy_diff ${DIFF_REF} fi if [[ -s "${FIX_YAML}" ]]; then From 8b6abe6293cced29a0dd63d2aa6dea15702a3512 Mon Sep 17 00:00:00 2001 From: Alex Konradi Date: Tue, 4 Aug 2020 19:17:58 -0400 Subject: [PATCH 852/909] Point runtime overrides doc at correct file (#12474) The list of enabled runtime flags is in runtime_features.cc, not runtime_features.h Signed-off-by: Alex Konradi --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce51f2d59f30..7a35daf39100 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -188,7 +188,7 @@ maintainer's discretion. Generally all runtime guarded features will be set true release is cut. Old code paths for refactors can be cleaned up after a release and there has been some production run time. Old code for behavioral changes will be deprecated after six months. Runtime features are set true by default by inclusion in -[source/common/runtime/runtime_features.h](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.h) +[source/common/runtime/runtime_features.cc](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.cc) There are four suggested options for testing new runtime features: From 598e76a51b781bdd435d5d4f96d482fc283695b7 Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Tue, 4 Aug 2020 19:20:10 -0400 Subject: [PATCH 853/909] bugfix: check_format script, tokenInLine helper function (#12441) Signed-off-by: Yifan Yang --- tools/code_format/check_format.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 61851749266c..12af83fbf002 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -568,7 +568,11 @@ def tokenInLine(token, line): index = 0 while True: index = line.find(token, index) - if index < 1: + # the following check has been changed from index < 1 to index < 0 because + # this function incorrectly returns false when the token in question is the + # first one in a line. The following line returns false when the token is present: + # (no leading whitespace) violating_symbol foo; + if index < 0: break if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'): if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or From 8d3902a1eb51c7a7c301f9fa23a99e476f736137 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 4 Aug 2020 18:21:45 -0700 Subject: [PATCH 854/909] extensions: add a couple static registration macros (#12478) Commit Message: add a couple static registration macros Additional Description: needed for registration in static library environments. Risk Level: low Testing: local Signed-off-by: Jose Nino --- source/common/upstream/static_cluster.h | 2 ++ source/extensions/stat_sinks/statsd/config.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/source/common/upstream/static_cluster.h b/source/common/upstream/static_cluster.h index ed7a97b57fc1..b5de7b76b8dd 100644 --- a/source/common/upstream/static_cluster.h +++ b/source/common/upstream/static_cluster.h @@ -44,5 +44,7 @@ class StaticClusterFactory : public ClusterFactoryImplBase { Stats::ScopePtr&& stats_scope) override; }; +DECLARE_FACTORY(StaticClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/extensions/stat_sinks/statsd/config.h b/source/extensions/stat_sinks/statsd/config.h index 928e7729055f..3a709715b2bd 100644 --- a/source/extensions/stat_sinks/statsd/config.h +++ b/source/extensions/stat_sinks/statsd/config.h @@ -24,6 +24,8 @@ class StatsdSinkFactory : Logger::Loggable, std::string name() const override; }; +DECLARE_FACTORY(StatsdSinkFactory); + } // namespace Statsd } // namespace StatSinks } // namespace Extensions From 800d3423500a4334ae6ab25333bd46410aed4b02 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Tue, 4 Aug 2020 18:32:36 -0700 Subject: [PATCH 855/909] tidy: use libstdc++ (#12481) Signed-off-by: Lizan Zhou --- ci/do_ci.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 34731effdd2f..381f6245e94a 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -283,8 +283,10 @@ elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage collect_build_profile coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then + # clang-tidy will warn on standard library issues with libc++ + ENVOY_STDLIB="libstdc++" setup_clang_toolchain - NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh $* + NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@" exit 0 elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy From 7f8b4af2e410ae8ee204e90d27db5b348ac7f39c Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 4 Aug 2020 19:33:00 -0700 Subject: [PATCH 856/909] test: shard hds_integration_test (#12482) This should avoid TSAN timeout flakes. Signed-off-by: Matt Klein --- test/integration/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/BUILD b/test/integration/BUILD index 2009458e1852..477a428bffce 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -805,6 +805,7 @@ envoy_cc_test( envoy_cc_test( name = "hds_integration_test", srcs = ["hds_integration_test.cc"], + shard_count = 2, tags = ["fails_on_windows"], deps = [ ":http_integration_lib", From 7d03b628859cdf20d97a4e9dc2e4c137884b4a1e Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Wed, 5 Aug 2020 09:40:37 +0700 Subject: [PATCH 857/909] gRPC-Web: Fix failing HTTP/2 requests on some browsers due to empty trailers (#12178) This adds `envoy.reloadable_features.skip_encoding_empty_trailers` runtime feature flag (enabled by default) to skip encoding empty trailers in H2 codec. This behavior can be reverted temporarily by setting runtime feature `envoy.reloadable_features.skip_encoding_empty_trailers` to false. Before this commit, seeing empty trailers of gRPC-Web filter response, codec submits: ``` [ 5.036] recv DATA frame [ 5.040] recv DATA frame [ 5.053] recv HEADERS frame ; END_STREAM | END_HEADERS ``` After: ``` [ 5.036] recv DATA frame [ 5.040] recv DATA frame [ 5.052] recv DATA frame ; END_STREAM ``` Risk Level: Low Testing: Unit, integration tests. Docs Changes: N/A. Release Notes: Added Fixes #10514 Signed-off-by: Dhi Aurrahman --- docs/root/version_history/current.rst | 2 + source/common/http/http2/BUILD | 5 +- source/common/http/http2/codec_impl.cc | 24 +++++++ source/common/http/http2/codec_impl.h | 10 +++ source/common/http/http2/codec_impl_legacy.cc | 24 +++++++ source/common/http/http2/codec_impl_legacy.h | 10 +++ source/common/runtime/runtime_features.cc | 3 +- .../filters/http/grpc_web/grpc_web_filter.cc | 2 +- test/common/http/http2/codec_impl_test.cc | 59 +++++++++++++++++ .../grpc_web_filter_integration_test.cc | 65 ++++++++++++++++--- 10 files changed, 190 insertions(+), 14 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 15f30db08ed3..497bb0047df5 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -15,6 +15,7 @@ Minor Behavior Changes * http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. +* http: changed empty trailers encoding behavior by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. This behavior can be reverted temporarily by setting runtime feature ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` to false. * http: clarified and enforced 1xx handling. Multiple 100-continue headers are coalesced when proxying. 1xx headers other than {100, 101} are dropped. * http: fixed the 100-continue response path to properly handle upstream failure by sending 5xx responses. This behavior can be temporarily reverted by setting `envoy.reloadable_features.allow_500_after_100` to false. * http: the per-stream FilterState maintained by the HTTP connection manager will now provide read/write access to the downstream connection FilterState. As such, code that relies on interacting with this might @@ -33,6 +34,7 @@ Bug Fixes * csrf: fixed issues with regards to origin and host header parsing. * dynamic_forward_proxy: only perform DNS lookups for routes to Dynamic Forward Proxy clusters since other cluster types handle DNS lookup themselves. * fault: fixed an issue with `active_faults` gauge not being decremented for when abort faults were injected. +* grpc-web: fixed an issue with failing HTTP/2 requests on some browsers. Notably, WebKit-based browsers (https://bugs.webkit.org/show_bug.cgi?id=210108), Internet Explorer 11, and Edge (pre-Chromium). * rocketmq_proxy network-level filter: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. Removed Config or Runtime diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index ac83f7b49884..5ccf63147d5f 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -35,6 +35,7 @@ CODEC_LIB_DEPS = [ "//source/common/common:enum_to_int", "//source/common/common:linked_object", "//source/common/common:minimal_logger_lib", + "//source/common/common:statusor_lib", "//source/common/common:utility_lib", "//source/common/http:codec_helper_lib", "//source/common/http:codes_lib", @@ -58,9 +59,7 @@ envoy_cc_library( "abseil_inlined_vector", "abseil_algorithm", ], - deps = CODEC_LIB_DEPS + [ - "//source/common/common:statusor_lib", - ], + deps = CODEC_LIB_DEPS, ) envoy_cc_library( diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 205c9d2d18ab..894b80b628df 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -20,6 +20,7 @@ #include "common/http/headers.h" #include "common/http/http2/codec_stats.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" #include "absl/container/fixed_array.h" @@ -344,6 +345,18 @@ void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& } void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + ASSERT(local_end_stream_); + const bool skip_encoding_empty_trailers = + trailers.empty() && parent_.skip_encoding_empty_trailers_; + if (skip_encoding_empty_trailers) { + ENVOY_CONN_LOG(debug, "skipping submitting trailers", parent_.connection_); + + // Instead of submitting empty trailers, we send empty data instead. + Buffer::OwnedImpl empty_buffer; + encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers); + return; + } + std::vector final_headers; buildHeaders(final_headers, trailers); int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), @@ -442,6 +455,15 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!local_end_stream_); + encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false); +} + +void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers) { + if (skip_encoding_empty_trailers) { + ASSERT(data.length() == 0 && end_stream); + } + local_end_stream_ = end_stream; parent_.stats_.pending_send_bytes_.add(data.length()); pending_send_data_.move(data); @@ -530,6 +552,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat http2_options.max_inbound_priority_frames_per_stream().value()), max_inbound_window_update_frames_per_data_frame_sent_( http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} ConnectionImpl::~ConnectionImpl() { diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 8de01c67728b..ce8e6f809a0d 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -267,6 +267,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + void encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers); + ConnectionImpl& parent_; int32_t stream_id_{-1}; uint32_t unconsumed_bytes_{0}; @@ -503,6 +506,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onSend(const uint8_t* data, size_t length); + // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have + // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of + // an HTTP/2 response as reported here: https://github.com/envoyproxy/envoy/issues/10514. This is + // controlled by "envoy.reloadable_features.http2_skip_encoding_empty_trailers" runtime feature + // flag. + const bool skip_encoding_empty_trailers_; + private: virtual ConnectionCallbacks& callbacks() PURE; virtual Status onBeginHeaders(const nghttp2_frame* frame) PURE; diff --git a/source/common/http/http2/codec_impl_legacy.cc b/source/common/http/http2/codec_impl_legacy.cc index 9daec6c70efe..c0046aae307d 100644 --- a/source/common/http/http2/codec_impl_legacy.cc +++ b/source/common/http/http2/codec_impl_legacy.cc @@ -20,6 +20,7 @@ #include "common/http/headers.h" #include "common/http/http2/codec_stats.h" #include "common/http/utility.h" +#include "common/runtime/runtime_features.h" #include "absl/container/fixed_array.h" @@ -333,6 +334,18 @@ void ConnectionImpl::StreamImpl::saveHeader(HeaderString&& name, HeaderString&& } void ConnectionImpl::StreamImpl::submitTrailers(const HeaderMap& trailers) { + ASSERT(local_end_stream_); + const bool skip_encoding_empty_trailers = + trailers.empty() && parent_.skip_encoding_empty_trailers_; + if (skip_encoding_empty_trailers) { + ENVOY_CONN_LOG(debug, "skipping submitting trailers", parent_.connection_); + + // Instead of submitting empty trailers, we send empty data instead. + Buffer::OwnedImpl empty_buffer; + encodeDataHelper(empty_buffer, /*end_stream=*/true, skip_encoding_empty_trailers); + return; + } + std::vector final_headers; buildHeaders(final_headers, trailers); int rc = nghttp2_submit_trailer(parent_.session_, stream_id_, final_headers.data(), @@ -428,6 +441,15 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!local_end_stream_); + encodeDataHelper(data, end_stream, /*skip_encoding_empty_trailers=*/false); +} + +void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers) { + if (skip_encoding_empty_trailers) { + ASSERT(data.length() == 0 && end_stream); + } + local_end_stream_ = end_stream; parent_.stats_.pending_send_bytes_.add(data.length()); pending_send_data_.move(data); @@ -512,6 +534,8 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat http2_options.max_inbound_priority_frames_per_stream().value()), max_inbound_window_update_frames_per_data_frame_sent_( http2_options.max_inbound_window_update_frames_per_data_frame_sent().value()), + skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false) {} ConnectionImpl::~ConnectionImpl() { diff --git a/source/common/http/http2/codec_impl_legacy.h b/source/common/http/http2/codec_impl_legacy.h index 0b663cd93b12..47065d643806 100644 --- a/source/common/http/http2/codec_impl_legacy.h +++ b/source/common/http/http2/codec_impl_legacy.h @@ -267,6 +267,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable 0; } + void encodeDataHelper(Buffer::Instance& data, bool end_stream, + bool skip_encoding_empty_trailers); + ConnectionImpl& parent_; int32_t stream_id_{-1}; uint32_t unconsumed_bytes_{0}; @@ -492,6 +495,13 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggableencodeTrailers(TestResponseTrailerMapImpl{{"trailing", "header"}}); } +// When having empty trailers, codec submits empty buffer and end_stream instead. +TEST_P(Http2CodecImplTest, IgnoreTrailingEmptyHeaders) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "true"}}); + + initialize(); + + Buffer::OwnedImpl empty_buffer; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, false); + EXPECT_CALL(request_decoder_, decodeData(BufferEqual(&empty_buffer), true)); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{}); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeData(BufferEqual(&empty_buffer), true)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); +} + +// When having empty trailers and "envoy.reloadable_features.http2_skip_encoding_empty_trailers" is +// turned off, codec submits empty trailers. +TEST_P(Http2CodecImplTest, SubmitTrailingEmptyHeaders) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http2_skip_encoding_empty_trailers", "false"}}); + + initialize(); + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + request_encoder_->encodeHeaders(request_headers, false); + EXPECT_CALL(request_decoder_, decodeData(_, false)); + Buffer::OwnedImpl hello("hello"); + request_encoder_->encodeData(hello, false); + EXPECT_CALL(request_decoder_, decodeTrailers_(_)); + request_encoder_->encodeTrailers(TestRequestTrailerMapImpl{}); + + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + response_encoder_->encodeHeaders(response_headers, false); + EXPECT_CALL(response_decoder_, decodeData(_, false)); + Buffer::OwnedImpl world("world"); + response_encoder_->encodeData(world, false); + EXPECT_CALL(response_decoder_, decodeTrailers_(_)); + response_encoder_->encodeTrailers(TestResponseTrailerMapImpl{}); +} + TEST_P(Http2CodecImplTest, TrailingHeadersLargeClientBody) { initialize(); diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index 608954031f71..2eaf4ef7fdb1 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -9,23 +9,55 @@ namespace Envoy { namespace { -class GrpcWebFilterIntegrationTest : public ::testing::TestWithParam, +using SkipEncodingEmptyTrailers = bool; +using TestParams = + std::tuple; + +class GrpcWebFilterIntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { public: GrpcWebFilterIntegrationTest() - : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} + : HttpIntegrationTest(std::get<1>(GetParam()), std::get<0>(GetParam())) {} void SetUp() override { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); config_helper_.addFilter("name: envoy.filters.http.grpc_web"); } + + void skipEncodingEmptyTrailers(SkipEncodingEmptyTrailers http2_skip_encoding_empty_trailers) { + config_helper_.addRuntimeOverride( + "envoy.reloadable_features.http2_skip_encoding_empty_trailers", + http2_skip_encoding_empty_trailers ? "true" : "false"); + } + + static std::string testParamsToString(const testing::TestParamInfo params) { + return fmt::format( + "{}_{}_{}", + TestUtility::ipTestParamsToString(testing::TestParamInfo( + std::get<0>(params.param), params.index)), + std::get<1>(params.param) == Http::CodecClient::Type::HTTP2 ? "Http2" : "Http", + std::get<2>(params.param) ? "SkipEncodingEmptyTrailers" : "SubmitEncodingEmptyTrailers"); + } }; -INSTANTIATE_TEST_SUITE_P(IpVersions, GrpcWebFilterIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); -TEST_P(GrpcWebFilterIntegrationTest, GRPCWebTrailersNotDuplicated) { - config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); +INSTANTIATE_TEST_SUITE_P( + Params, GrpcWebFilterIntegrationTest, + testing::Combine( + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + testing::Values(Http::CodecClient::Type::HTTP1, Http::CodecClient::Type::HTTP2), + testing::Values(SkipEncodingEmptyTrailers{true}, SkipEncodingEmptyTrailers{false})), + GrpcWebFilterIntegrationTest::testParamsToString); + +TEST_P(GrpcWebFilterIntegrationTest, GrpcWebTrailersNotDuplicated) { + const auto downstream_protocol = std::get<1>(GetParam()); + const bool http2_skip_encoding_empty_trailers = std::get<2>(GetParam()); + + if (downstream_protocol == Http::CodecClient::Type::HTTP1) { + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + } else { + skipEncodingEmptyTrailers(http2_skip_encoding_empty_trailers); + } + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); Http::TestRequestTrailerMapImpl request_trailers{{"request1", "trailer1"}, @@ -59,8 +91,23 @@ TEST_P(GrpcWebFilterIntegrationTest, GRPCWebTrailersNotDuplicated) { EXPECT_EQ("200", response->headers().getStatusValue()); EXPECT_TRUE(absl::StrContains(response->body(), "response1:trailer1")); EXPECT_TRUE(absl::StrContains(response->body(), "response2:trailer2")); - // Expect that the trailers be in the response-body instead - EXPECT_EQ(response->trailers(), nullptr); + + if (downstream_protocol == Http::CodecClient::Type::HTTP1) { + // When the downstream protocol is HTTP/1.1 we expect the trailers to be in the response-body. + EXPECT_EQ(nullptr, response->trailers()); + } + + if (downstream_protocol == Http::CodecClient::Type::HTTP2) { + if (http2_skip_encoding_empty_trailers) { + // When the downstream protocol is HTTP/2 and the feature-flag to skip encoding empty trailers + // is turned on, expect that the trailers are included in the response-body. + EXPECT_EQ(nullptr, response->trailers()); + } else { + // Otherwise, we send empty trailers. + ASSERT_NE(nullptr, response->trailers()); + EXPECT_TRUE(response->trailers()->empty()); + } + } } } // namespace From 9f405633b63e71c623d4de81698807fca17debe9 Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Tue, 4 Aug 2020 21:54:14 -0700 Subject: [PATCH 858/909] Add support for X-RateLimit-* headers in ratelimit filter (#12410) Adds support for X-RateLimit-* headers described in the draft RFC. The X-RateLimit-Limit header contains the quota-policy per RFC. The descriptor name is included in the quota policy under the name key. X-RateLimit-Reset header is emitted, but it would need a followup in the ratelimit service, which I will do once this is merged. Signed-off-by: Petr Pchelko --- .../http/ratelimit/v3/rate_limit.proto | 37 ++++- api/envoy/service/ratelimit/v3/rls.proto | 5 + docs/root/version_history/current.rst | 1 + .../http/ratelimit/v3/rate_limit.proto | 37 ++++- .../envoy/service/ratelimit/v3/rls.proto | 5 + .../extensions/filters/common/ratelimit/BUILD | 1 + .../filters/common/ratelimit/ratelimit.h | 8 +- .../common/ratelimit/ratelimit_impl.cc | 8 +- .../extensions/filters/http/ratelimit/BUILD | 11 ++ .../filters/http/ratelimit/ratelimit.cc | 14 ++ .../filters/http/ratelimit/ratelimit.h | 6 + .../http/ratelimit/ratelimit_headers.cc | 82 +++++++++ .../http/ratelimit/ratelimit_headers.h | 40 +++++ .../filters/network/ratelimit/ratelimit.cc | 5 +- .../filters/network/ratelimit/ratelimit.h | 1 + .../filters/ratelimit/ratelimit.cc | 2 + .../filters/ratelimit/ratelimit.h | 1 + .../network/filter_manager_impl_test.cc | 2 +- .../extensions/filters/common/ratelimit/BUILD | 9 + .../common/ratelimit/ratelimit_impl_test.cc | 17 +- .../filters/common/ratelimit/utils.h | 28 ++++ test/extensions/filters/http/ratelimit/BUILD | 14 ++ .../http/ratelimit/ratelimit_headers_test.cc | 90 ++++++++++ .../ratelimit/ratelimit_integration_test.cc | 104 +++++++++++- .../filters/http/ratelimit/ratelimit_test.cc | 156 ++++++++++++++++-- .../network/ratelimit/ratelimit_test.cc | 19 ++- .../filters/ratelimit/ratelimit_test.cc | 23 ++- test/mocks/http/mocks.h | 12 +- 28 files changed, 683 insertions(+), 55 deletions(-) create mode 100644 source/extensions/filters/http/ratelimit/ratelimit_headers.cc create mode 100644 source/extensions/filters/http/ratelimit/ratelimit_headers.h create mode 100644 test/extensions/filters/common/ratelimit/utils.h create mode 100644 test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 057b7c3d4403..d80f9fcaed83 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,11 +19,20 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 8] +// [#next-free-field: 9] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; + // Defines the version of the standard to use for X-RateLimit headers. + enum XRateLimitHeadersRFCVersion { + // X-RateLimit headers disabled. + OFF = 0; + + // Use `draft RFC Version 02 `_. + DRAFT_VERSION_02 = 1; + } + // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -64,4 +73,30 @@ message RateLimit { // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; + + // Defines the standard version to use for X-RateLimit headers emitted by the filter: + // + // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the + // client in the current time-window followed by the description of the + // quota policy. The values are returned by the rate limiting service in + // :ref:`current_limit` + // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. + // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the + // current time-window. The values are returned by the rate limiting service + // in :ref:`limit_remaining` + // field. + // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of + // the current time-window. The values are returned by the rate limiting service + // in :ref:`duration_until_reset` + // field. + // + // In case rate limiting policy specifies more then one time window, the values + // above represent the window that is closest to reaching its limit. + // + // For more information about the headers specification see selected version of + // the `draft RFC `_. + // + // Disabled by default. + XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/api/envoy/service/ratelimit/v3/rls.proto b/api/envoy/service/ratelimit/v3/rls.proto index 06cb6a9e5550..42f24cfb0805 100644 --- a/api/envoy/service/ratelimit/v3/rls.proto +++ b/api/envoy/service/ratelimit/v3/rls.proto @@ -5,6 +5,8 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -110,6 +112,9 @@ message RateLimitResponse { // The limit remaining in the current time unit. uint32 limit_remaining = 3; + + // Duration until reset of the current limit window. + google.protobuf.Duration duration_until_reset = 4; } // The overall response code which takes into account all of the descriptors that were passed diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 497bb0047df5..2c2b2fb85c02 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -58,6 +58,7 @@ New Features * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * lua: added Lua APIs to access :ref:`SSL connection info ` object. * postgres network filter: :ref:`metadata ` is produced based on SQL query. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. * router: added new :ref:`envoy-ratelimited` retry policy, which allows retrying envoy's own rate limited responses. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 057b7c3d4403..d80f9fcaed83 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,11 +19,20 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 8] +// [#next-free-field: 9] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; + // Defines the version of the standard to use for X-RateLimit headers. + enum XRateLimitHeadersRFCVersion { + // X-RateLimit headers disabled. + OFF = 0; + + // Use `draft RFC Version 02 `_. + DRAFT_VERSION_02 = 1; + } + // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -64,4 +73,30 @@ message RateLimit { // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; + + // Defines the standard version to use for X-RateLimit headers emitted by the filter: + // + // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the + // client in the current time-window followed by the description of the + // quota policy. The values are returned by the rate limiting service in + // :ref:`current_limit` + // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. + // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the + // current time-window. The values are returned by the rate limiting service + // in :ref:`limit_remaining` + // field. + // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of + // the current time-window. The values are returned by the rate limiting service + // in :ref:`duration_until_reset` + // field. + // + // In case rate limiting policy specifies more then one time window, the values + // above represent the window that is closest to reaching its limit. + // + // For more information about the headers specification see selected version of + // the `draft RFC `_. + // + // Disabled by default. + XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 + [(validate.rules).enum = {defined_only: true}]; } diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto index 06cb6a9e5550..42f24cfb0805 100644 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto @@ -5,6 +5,8 @@ package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; +import "google/protobuf/duration.proto"; + import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -110,6 +112,9 @@ message RateLimitResponse { // The limit remaining in the current time unit. uint32 limit_remaining = 3; + + // Duration until reset of the current limit window. + google.protobuf.Duration duration_until_reset = 4; } // The overall response code which takes into account all of the descriptors that were passed diff --git a/source/extensions/filters/common/ratelimit/BUILD b/source/extensions/filters/common/ratelimit/BUILD index e98dc90a8916..4bf0b36b1e5d 100644 --- a/source/extensions/filters/common/ratelimit/BUILD +++ b/source/extensions/filters/common/ratelimit/BUILD @@ -39,6 +39,7 @@ envoy_cc_library( "//include/envoy/singleton:manager_interface", "//include/envoy/tracing:http_tracer_interface", "//source/common/stats:symbol_table_lib", + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/common/ratelimit/ratelimit.h b/source/extensions/filters/common/ratelimit/ratelimit.h index bb4317eb2a38..4ad48e7a87ab 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit.h +++ b/source/extensions/filters/common/ratelimit/ratelimit.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "envoy/ratelimit/ratelimit.h" +#include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/singleton/manager.h" #include "envoy/tracing/http_tracer.h" @@ -30,6 +31,10 @@ enum class LimitStatus { OverLimit }; +using DescriptorStatusList = + std::vector; +using DescriptorStatusListPtr = std::unique_ptr; + /** * Async callbacks used during limit() calls. */ @@ -41,7 +46,8 @@ class RequestCallbacks { * Called when a limit request is complete. The resulting status, * response headers and request headers to be forwarded to the upstream are supplied. */ - virtual void complete(LimitStatus status, Http::ResponseHeaderMapPtr&& response_headers_to_add, + virtual void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses, + Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) PURE; }; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index fb8f7bb3abe7..5a93471af903 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -7,7 +7,6 @@ #include "envoy/config/core/v3/grpc_service.pb.h" #include "envoy/extensions/common/ratelimit/v3/ratelimit.pb.h" -#include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/stats/scope.h" #include "common/common/assert.h" @@ -101,7 +100,10 @@ void GrpcClientImpl::onSuccess( request_headers_to_add->addCopy(Http::LowerCaseString(h.key()), h.value()); } } - callbacks_->complete(status, std::move(response_headers_to_add), + + DescriptorStatusListPtr descriptor_statuses = std::make_unique( + response->statuses().begin(), response->statuses().end()); + callbacks_->complete(status, std::move(descriptor_statuses), std::move(response_headers_to_add), std::move(request_headers_to_add)); callbacks_ = nullptr; } @@ -109,7 +111,7 @@ void GrpcClientImpl::onSuccess( void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&, Tracing::Span&) { ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); - callbacks_->complete(LimitStatus::Error, nullptr, nullptr); + callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr); callbacks_ = nullptr; } diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index 9119aa35a26d..0b9584711194 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( srcs = ["ratelimit.cc"], hdrs = ["ratelimit.h"], deps = [ + ":ratelimit_headers_lib", "//include/envoy/http:codes_interface", "//include/envoy/ratelimit:ratelimit_interface", "//source/common/common:assert_lib", @@ -30,6 +31,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "ratelimit_headers_lib", + srcs = ["ratelimit_headers.cc"], + hdrs = ["ratelimit_headers.h"], + deps = [ + "//source/common/http:header_map_lib", + "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", + ], +) + envoy_cc_extension( name = "config", srcs = ["config.cc"], diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 2bec4783a626..c2c2b36b9e3a 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -12,6 +12,8 @@ #include "common/http/header_utility.h" #include "common/router/config_impl.h" +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -125,6 +127,7 @@ void Filter::onDestroy() { } void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) { state_ = State::Complete; @@ -161,6 +164,17 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, break; } + if (config_->enableXRateLimitHeaders()) { + Http::ResponseHeaderMapPtr rate_limit_headers = + XRateLimitHeaderUtils::create(std::move(descriptor_statuses)); + if (response_headers_to_add_ == nullptr) { + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + } + Http::HeaderUtility::addHeaders(*response_headers_to_add_, *rate_limit_headers); + } else { + descriptor_statuses = nullptr; + } + if (status == Filters::Common::RateLimit::LimitStatus::OverLimit && config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enforcing", 100)) { state_ = State::Responded; diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index c47e93cfde4f..26937019f2ef 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -43,6 +43,9 @@ class FilterConfig { : stringToType(config.request_type())), local_info_(local_info), scope_(scope), runtime_(runtime), failure_mode_deny_(config.failure_mode_deny()), + enable_x_ratelimit_headers_( + config.enable_x_ratelimit_headers() == + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_02), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) @@ -55,6 +58,7 @@ class FilterConfig { Stats::Scope& scope() { return scope_; } FilterRequestType requestType() const { return request_type_; } bool failureModeAllow() const { return !failure_mode_deny_; } + bool enableXRateLimitHeaders() const { return enable_x_ratelimit_headers_; } const absl::optional rateLimitedGrpcStatus() const { return rate_limited_grpc_status_; } @@ -80,6 +84,7 @@ class FilterConfig { Stats::Scope& scope_; Runtime::Loader& runtime_; const bool failure_mode_deny_; + const bool enable_x_ratelimit_headers_; const absl::optional rate_limited_grpc_status_; Http::Context& http_context_; Filters::Common::RateLimit::StatNames stat_names_; @@ -117,6 +122,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/source/extensions/filters/http/ratelimit/ratelimit_headers.cc b/source/extensions/filters/http/ratelimit/ratelimit_headers.cc new file mode 100644 index 000000000000..097171b108f5 --- /dev/null +++ b/source/extensions/filters/http/ratelimit/ratelimit_headers.cc @@ -0,0 +1,82 @@ +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + +#include "common/http/header_map_impl.h" + +#include "absl/strings/substitute.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { + +Http::ResponseHeaderMapPtr XRateLimitHeaderUtils::create( + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses) { + Http::ResponseHeaderMapPtr result = Http::ResponseHeaderMapImpl::create(); + if (!descriptor_statuses || descriptor_statuses->empty()) { + descriptor_statuses = nullptr; + return result; + } + + absl::optional + min_remaining_limit_status; + std::string quota_policy; + for (auto&& status : *descriptor_statuses) { + if (!status.has_current_limit()) { + continue; + } + if (!min_remaining_limit_status || + status.limit_remaining() < min_remaining_limit_status.value().limit_remaining()) { + min_remaining_limit_status.emplace(status); + } + const uint32_t window = convertRateLimitUnit(status.current_limit().unit()); + // Constructing the quota-policy per RFC + // https://tools.ietf.org/id/draft-polli-ratelimit-headers-02.html#name-ratelimit-limit + // Example of the result: `, 10;w=1;name="per-ip", 1000;w=3600` + if (window) { + // For each descriptor status append `;w=` + absl::SubstituteAndAppend("a_policy, ", $0;$1=$2", + status.current_limit().requests_per_unit(), + XRateLimitHeaders::get().QuotaPolicyKeys.Window, window); + if (!status.current_limit().name().empty()) { + // If the descriptor has a name, append `;name=""` + absl::SubstituteAndAppend("a_policy, ";$0=\"$1\"", + XRateLimitHeaders::get().QuotaPolicyKeys.Name, + status.current_limit().name()); + } + } + } + + if (min_remaining_limit_status) { + const std::string rate_limit_limit = absl::StrCat( + min_remaining_limit_status.value().current_limit().requests_per_unit(), quota_policy); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitLimit, rate_limit_limit); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitRemaining, + min_remaining_limit_status.value().limit_remaining()); + result->addReferenceKey(XRateLimitHeaders::get().XRateLimitReset, + min_remaining_limit_status.value().duration_until_reset().seconds()); + } + descriptor_statuses = nullptr; + return result; +} + +uint32_t XRateLimitHeaderUtils::convertRateLimitUnit( + const envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit) { + switch (unit) { + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::SECOND: + return 1; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE: + return 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR: + return 60 * 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::DAY: + return 24 * 60 * 60; + case envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN: + default: + return 0; + } +} + +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/ratelimit/ratelimit_headers.h b/source/extensions/filters/http/ratelimit/ratelimit_headers.h new file mode 100644 index 000000000000..047bf495defc --- /dev/null +++ b/source/extensions/filters/http/ratelimit/ratelimit_headers.h @@ -0,0 +1,40 @@ +#pragma once + +#include "envoy/http/header_map.h" + +#include "common/singleton/const_singleton.h" + +#include "extensions/filters/common/ratelimit/ratelimit.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { + +class XRateLimitHeaderValues { +public: + const Http::LowerCaseString XRateLimitLimit{"x-ratelimit-limit"}; + const Http::LowerCaseString XRateLimitRemaining{"x-ratelimit-remaining"}; + const Http::LowerCaseString XRateLimitReset{"x-ratelimit-reset"}; + + struct { + const std::string Window{"w"}; + const std::string Name{"name"}; + } QuotaPolicyKeys; +}; +using XRateLimitHeaders = ConstSingleton; + +class XRateLimitHeaderUtils { +public: + static Http::ResponseHeaderMapPtr + create(Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses); + +private: + static uint32_t + convertRateLimitUnit(envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::Unit unit); +}; + +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/ratelimit/ratelimit.cc b/source/extensions/filters/network/ratelimit/ratelimit.cc index 7ef447a8af2a..430508ce3b61 100644 --- a/source/extensions/filters/network/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/ratelimit/ratelimit.cc @@ -69,8 +69,9 @@ void Filter::onEvent(Network::ConnectionEvent event) { } } -void Filter::complete(Filters::Common::RateLimit::LimitStatus status, Http::ResponseHeaderMapPtr&&, - Http::RequestHeaderMapPtr&&) { +void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&&, + Http::ResponseHeaderMapPtr&&, Http::RequestHeaderMapPtr&&) { status_ = Status::Complete; config_->stats().active_.dec(); diff --git a/source/extensions/filters/network/ratelimit/ratelimit.h b/source/extensions/filters/network/ratelimit/ratelimit.h index 2babfd85dcd2..eba34f434867 100644 --- a/source/extensions/filters/network/ratelimit/ratelimit.h +++ b/source/extensions/filters/network/ratelimit/ratelimit.h @@ -92,6 +92,7 @@ class Filter : public Network::ReadFilter, // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc index 2c85cb099818..e26a565f5856 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.cc @@ -58,11 +58,13 @@ void Filter::onDestroy() { } void Filter::complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) { // TODO(zuercher): Store headers to append to a response. Adding them to a local reply (over // limit or error) is a matter of modifying the callbacks to allow it. Adding them to an upstream // response requires either response (aka encoder) filters or some other mechanism. + UNREFERENCED_PARAMETER(descriptor_statuses); UNREFERENCED_PARAMETER(response_headers_to_add); UNREFERENCED_PARAMETER(request_headers_to_add); diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h index 90a244ca78e0..caa5333cda65 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit.h @@ -77,6 +77,7 @@ class Filter : public ThriftProxy::ThriftFilters::PassThroughDecoderFilter, // RateLimit::RequestCallbacks void complete(Filters::Common::RateLimit::LimitStatus status, + Filters::Common::RateLimit::DescriptorStatusListPtr&& descriptor_statuses, Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override; diff --git a/test/common/network/filter_manager_impl_test.cc b/test/common/network/filter_manager_impl_test.cc index 9a3feee48b1e..76e066e57d1d 100644 --- a/test/common/network/filter_manager_impl_test.cc +++ b/test/common/network/filter_manager_impl_test.cc @@ -416,7 +416,7 @@ stat_prefix: name .WillOnce(Return(&conn_pool)); request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr, - nullptr); + nullptr, nullptr); conn_pool.poolReady(upstream_connection); diff --git a/test/extensions/filters/common/ratelimit/BUILD b/test/extensions/filters/common/ratelimit/BUILD index eb4d027b6eef..652af79831f9 100644 --- a/test/extensions/filters/common/ratelimit/BUILD +++ b/test/extensions/filters/common/ratelimit/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", "envoy_cc_test", + "envoy_cc_test_library", "envoy_package", ) @@ -32,3 +33,11 @@ envoy_cc_mock( "//source/extensions/filters/common/ratelimit:ratelimit_client_interface", ], ) + +envoy_cc_test_library( + name = "ratelimit_utils", + hdrs = ["utils.h"], + deps = [ + "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 2646f8966c96..bb4545583993 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -35,13 +35,16 @@ namespace { class MockRequestCallbacks : public RequestCallbacks { public: - void complete(LimitStatus status, Http::ResponseHeaderMapPtr&& response_headers_to_add, + void complete(LimitStatus status, DescriptorStatusListPtr&& descriptor_statuses, + Http::ResponseHeaderMapPtr&& response_headers_to_add, Http::RequestHeaderMapPtr&& request_headers_to_add) override { - complete_(status, response_headers_to_add.get(), request_headers_to_add.get()); + complete_(status, descriptor_statuses.get(), response_headers_to_add.get(), + request_headers_to_add.get()); } MOCK_METHOD(void, complete_, - (LimitStatus status, const Http::ResponseHeaderMap* response_headers_to_add, + (LimitStatus status, const DescriptorStatusList* descriptor_statuses, + const Http::ResponseHeaderMap* response_headers_to_add, const Http::RequestHeaderMap* request_headers_to_add)); }; @@ -85,7 +88,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { response = std::make_unique(); response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT); EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("over_limit"))); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OverLimit, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OverLimit, _, _, _)); client_.onSuccess(std::move(response), span_); } @@ -104,7 +107,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { response = std::make_unique(); response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK); EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("ok"))); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _)); client_.onSuccess(std::move(response), span_); } @@ -121,7 +124,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { Tracing::NullSpan::instance()); response = std::make_unique(); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::Error, _, _, _)); client_.onFailure(Grpc::Status::Unknown, "", span_); } @@ -144,7 +147,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { response = std::make_unique(); response->set_overall_code(envoy::service::ratelimit::v3::RateLimitResponse::OK); EXPECT_CALL(span_, setTag(Eq("ratelimit_status"), Eq("ok"))); - EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _)); + EXPECT_CALL(request_callbacks_, complete_(LimitStatus::OK, _, _, _)); client_.onSuccess(std::move(response), span_); } } diff --git a/test/extensions/filters/common/ratelimit/utils.h b/test/extensions/filters/common/ratelimit/utils.h new file mode 100644 index 000000000000..d993ed35e4a7 --- /dev/null +++ b/test/extensions/filters/common/ratelimit/utils.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include "envoy/service/ratelimit/v3/rls.pb.h" + +namespace Envoy { +namespace RateLimit { + +inline envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus +buildDescriptorStatus(uint32_t requests_per_unit, + envoy::service::ratelimit::v3::RateLimitResponse_RateLimit_Unit unit, + std::string name, uint32_t limit_remaining, uint32_t seconds_until_reset) { + envoy::service::ratelimit::v3::RateLimitResponse_DescriptorStatus statusMsg; + statusMsg.set_limit_remaining(limit_remaining); + statusMsg.mutable_duration_until_reset()->set_seconds(seconds_until_reset); + if (requests_per_unit) { + envoy::service::ratelimit::v3::RateLimitResponse_RateLimit* limitMsg = + statusMsg.mutable_current_limit(); + limitMsg->set_requests_per_unit(requests_per_unit); + limitMsg->set_unit(unit); + limitMsg->set_name(name); + } + return statusMsg; +} + +} // namespace RateLimit +} // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index 3f3a1e47b70f..e209aa081608 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -23,6 +23,7 @@ envoy_extension_cc_test( "//source/extensions/filters/common/ratelimit:ratelimit_lib", "//source/extensions/filters/http/ratelimit:ratelimit_lib", "//test/extensions/filters/common/ratelimit:ratelimit_mocks", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", "//test/mocks/http:http_mocks", "//test/mocks/local_info:local_info_mocks", "//test/mocks/ratelimit:ratelimit_mocks", @@ -58,6 +59,7 @@ envoy_extension_cc_test( "//source/common/grpc:common_lib", "//source/extensions/filters/http/ratelimit:config", "//test/common/grpc:grpc_client_integration_lib", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", @@ -66,3 +68,15 @@ envoy_extension_cc_test( "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "ratelimit_headers_test", + srcs = ["ratelimit_headers_test.cc"], + extension_name = "envoy.filters.http.cache", + deps = [ + "//source/extensions/filters/http/ratelimit:ratelimit_headers_lib", + "//test/extensions/filters/common/ratelimit:ratelimit_utils", + "//test/mocks/http:http_mocks", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc new file mode 100644 index 000000000000..9acc0ca72cba --- /dev/null +++ b/test/extensions/filters/http/ratelimit/ratelimit_headers_test.cc @@ -0,0 +1,90 @@ +#include +#include + +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" + +#include "test/extensions/filters/common/ratelimit/utils.h" +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace RateLimitFilter { +namespace { + +using Envoy::RateLimit::buildDescriptorStatus; +using Filters::Common::RateLimit::DescriptorStatusList; + +struct RateLimitHeadersTestCase { + Http::TestResponseHeaderMapImpl expected_headers; + DescriptorStatusList descriptor_statuses; +}; + +class RateLimitHeadersTest : public testing::TestWithParam { +public: + static const std::vector& getTestCases() { + CONSTRUCT_ON_FIRST_USE( + std::vector, + // Empty descriptor statuses + {{}, {}}, + // Status with no current limit is ignored + {{{"x-ratelimit-limit", "4, 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "5"}, + {"x-ratelimit-reset", "6"}}, + {// passing 0 will cause it not to set a current limit + buildDescriptorStatus(0, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, + "first", 2, 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, + // Empty name is not appended + {{{"x-ratelimit-limit", "1, 1;w=60"}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + { + // passing 0 will cause it not to set a current limit + buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "", 2, 3), + }}, + // Unknown unit is ignored in window, but not overall + {{{"x-ratelimit-limit", "1, 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + {// passing 0 will cause it not to set a current limit + buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::UNKNOWN, "first", 2, + 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, + // Normal case, multiple arguments + {{{"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}, + {buildDescriptorStatus(1, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, + "first", 2, 3), + buildDescriptorStatus(4, + envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, + "second", 5, 6)}}, ); + } +}; + +INSTANTIATE_TEST_SUITE_P(RateLimitHeadersTest, RateLimitHeadersTest, + testing::ValuesIn(RateLimitHeadersTest::getTestCases())); + +TEST_P(RateLimitHeadersTest, RateLimitHeadersTest) { + Http::ResponseHeaderMapPtr result = XRateLimitHeaderUtils::create( + std::make_unique(GetParam().descriptor_statuses)); + EXPECT_THAT(result, HeaderMapEqual(&GetParam().expected_headers)); +} + +} // namespace +} // namespace RateLimitFilter +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 60b7812718e1..e7b831a60902 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -10,8 +10,10 @@ #include "common/grpc/common.h" #include "extensions/filters/http/ratelimit/config.h" +#include "extensions/filters/http/ratelimit/ratelimit_headers.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/extensions/filters/common/ratelimit/utils.h" #include "test/integration/http_integration.h" #include "gtest/gtest.h" @@ -44,6 +46,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara // enhance rate limit filter config based on the configuration of test. TestUtility::loadFromYaml(base_filter_config_, proto_config_); proto_config_.set_failure_mode_deny(failure_mode_deny_); + proto_config_.set_enable_x_ratelimit_headers(enable_x_ratelimit_headers_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); @@ -127,12 +130,15 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara EXPECT_EQ(std::to_string(response_code), response_->headers().getStatusValue()); } - void sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::Code code, - const Http::ResponseHeaderMap& response_headers_to_add, - const Http::RequestHeaderMap& request_headers_to_add) { + void sendRateLimitResponse( + envoy::service::ratelimit::v3::RateLimitResponse::Code code, + const Extensions::Filters::Common::RateLimit::DescriptorStatusList& descriptor_statuses, + const Http::ResponseHeaderMap& response_headers_to_add, + const Http::RequestHeaderMap& request_headers_to_add) { ratelimit_request_->startGrpcStream(); envoy::service::ratelimit::v3::RateLimitResponse response_msg; response_msg.set_overall_code(code); + *response_msg.mutable_statuses() = {descriptor_statuses.begin(), descriptor_statuses.end()}; response_headers_to_add.iterate( [&response_msg](const Http::HeaderEntry& h) -> Http::HeaderMap::Iterate { @@ -168,7 +174,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara void basicFlow() { initiateClientConnection(); waitForRatelimitRequest(); - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {}, Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForSuccessfulUpstreamResponse(); cleanup(); @@ -185,6 +191,8 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara const uint64_t request_size_ = 1024; const uint64_t response_size_ = 512; bool failure_mode_deny_ = false; + envoy::extensions::filters::http::ratelimit::v3::RateLimit::XRateLimitHeadersRFCVersion + enable_x_ratelimit_headers_ = envoy::extensions::filters::http::ratelimit::v3::RateLimit::OFF; envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config_{}; const std::string base_filter_config_ = R"EOF( domain: some_domain @@ -198,10 +206,21 @@ class RatelimitFailureModeIntegrationTest : public RatelimitIntegrationTest { RatelimitFailureModeIntegrationTest() { failure_mode_deny_ = true; } }; +// Test verifies that response headers provided by filter work. +class RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegrationTest { +public: + RatelimitFilterHeadersEnabledIntegrationTest() { + enable_x_ratelimit_headers_ = + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_02; + } +}; + INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest, + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } @@ -212,7 +231,7 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { {"x-ratelimit-remaining", "500"}}; Http::TestRequestHeaderMapImpl request_headers_to_add{{"x-ratelimit-done", "true"}}; - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, {}, ratelimit_response_headers, request_headers_to_add); waitForSuccessfulUpstreamResponse(); @@ -240,7 +259,7 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { TEST_P(RatelimitIntegrationTest, OverLimit) { initiateClientConnection(); waitForRatelimitRequest(); - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); cleanup(); @@ -255,7 +274,7 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { waitForRatelimitRequest(); Http::TestResponseHeaderMapImpl ratelimit_response_headers{ {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "0"}, {"retry-after", "33"}}; - sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, ratelimit_response_headers, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); @@ -347,5 +366,76 @@ TEST_P(RatelimitFailureModeIntegrationTest, ErrorWithFailureModeOff) { EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.failure_mode_allowed")); } +TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OkWithFilterHeaders) { + initiateClientConnection(); + waitForRatelimitRequest(); + + Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{ + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OK, descriptor_statuses, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); + waitForSuccessfulUpstreamResponse(); + + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit, + "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\"")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining, + "2")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, "3")); + + cleanup(); + + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.ok")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + +TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) { + initiateClientConnection(); + waitForRatelimitRequest(); + + Extensions::Filters::Common::RateLimit::DescriptorStatusList descriptor_statuses{ + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, + descriptor_statuses, Http::TestResponseHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}); + waitForFailedUpstreamResponse(429); + + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitLimit, + "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\"")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitRemaining, + "2")); + EXPECT_THAT( + response_.get()->headers(), + Http::HeaderValueOf( + Extensions::HttpFilters::RateLimitFilter::XRateLimitHeaders::get().XRateLimitReset, "3")); + + cleanup(); + + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index e4bfb5c4b9c6..ba968cbf3a03 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -12,6 +12,7 @@ #include "extensions/filters/http/ratelimit/ratelimit.h" #include "test/extensions/filters/common/ratelimit/mocks.h" +#include "test/extensions/filters/common/ratelimit/utils.h" #include "test/mocks/http/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/ratelimit/mocks.h" @@ -74,6 +75,11 @@ class HttpRateLimitFilterTest : public testing::Test { failure_mode_deny: true )EOF"; + const std::string enable_x_ratelimit_headers_config_ = R"EOF( + domain: foo + enable_x_ratelimit_headers: DRAFT_VERSION_02 + )EOF"; + const std::string filter_config_ = R"EOF( domain: foo )EOF"; @@ -223,7 +229,8 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) .Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); @@ -274,7 +281,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { {"x-ratelimit-limit", "1000"}, {"x-ratelimit-remaining", "500"}}}; request_callbacks_->complete( - Filters::Common::RateLimit::LimitStatus::OK, + Filters::Common::RateLimit::LimitStatus::OK, nullptr, Http::ResponseHeaderMapPtr{new Http::TestResponseHeaderMapImpl(*rl_headers)}, Http::RequestHeaderMapPtr{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}); Http::TestResponseHeaderMapImpl expected_headers(*rl_headers); @@ -287,6 +294,66 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); } +TEST_F(HttpRateLimitFilterTest, OkResponseWithFilterHeaders) { + SetUpTest(enable_x_ratelimit_headers_config_); + InSequence s; + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + + EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, + getApplicableRateLimit(0)) + .Times(1); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + request_headers_.addCopy(Http::Headers::get().RequestId, "requestid"); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::StopIteration, filter_->decodeTrailers(request_trailers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) + .Times(0); + + auto descriptor_statuses = { + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + auto descriptor_statuses_ptr = + std::make_unique(descriptor_statuses); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, + std::move(descriptor_statuses_ptr), nullptr, nullptr); + + Http::TestResponseHeaderMapImpl expected_headers{ + {"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}; + Http::TestResponseHeaderMapImpl response_headers; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + EXPECT_THAT(response_headers, HeaderMapEqualRef(&expected_headers)); + EXPECT_EQ( + 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); +} + TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { SetUpTest(filter_config_); InSequence s; @@ -300,7 +367,8 @@ TEST_F(HttpRateLimitFilterTest, ImmediateOkResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -330,7 +398,8 @@ TEST_F(HttpRateLimitFilterTest, ImmediateErrorResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -368,7 +437,8 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponse) { filter_->decodeHeaders(request_headers_, false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -400,7 +470,8 @@ TEST_F(HttpRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)) @@ -440,8 +511,8 @@ TEST_F(HttpRateLimitFilterTest, LimitResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() ->statsScope() @@ -492,8 +563,8 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)}; Http::RequestHeaderMapPtr uh{new Http::TestRequestHeaderMapImpl(*request_headers_to_add)}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - std::move(uh)); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), std::move(uh)); EXPECT_THAT(*request_headers_to_add, Not(IsSubsetOfHeaders(request_headers_))); EXPECT_EQ(1U, filter_callbacks_.clusterInfo() @@ -508,6 +579,58 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } +TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { + SetUpTest(enable_x_ratelimit_headers_config_); + InSequence s; + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + EXPECT_CALL(*client_, limit(_, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->encode100ContinueHeaders(response_headers_)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data_, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); + + Http::TestResponseHeaderMapImpl expected_headers{ + {":status", "429"}, + {"x-envoy-ratelimited", Http::Headers::get().EnvoyRateLimitedValues.True}, + {"x-ratelimit-limit", "1, 1;w=60;name=\"first\", 4;w=3600;name=\"second\""}, + {"x-ratelimit-remaining", "2"}, + {"x-ratelimit-reset", "3"}}; + EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), true)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + + auto descriptor_statuses = { + Envoy::RateLimit::buildDescriptorStatus( + 1, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::MINUTE, "first", 2, 3), + Envoy::RateLimit::buildDescriptorStatus( + 4, envoy::service::ratelimit::v3::RateLimitResponse::RateLimit::HOUR, "second", 5, 6)}; + auto descriptor_statuses_ptr = + std::make_unique(descriptor_statuses); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, + std::move(descriptor_statuses_ptr), nullptr, nullptr); + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); +} + TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { SetUpTest(filter_config_); InSequence s; @@ -527,8 +650,8 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { .WillOnce(Return(false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); @@ -679,7 +802,8 @@ TEST_F(HttpRateLimitFilterTest, InternalRequestType) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -724,7 +848,8 @@ TEST_F(HttpRateLimitFilterTest, ExternalRequestType) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -766,7 +891,8 @@ TEST_F(HttpRateLimitFilterTest, ExcludeVirtualHost) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index 96bc8baf679d..ac64a1d6d108 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -114,7 +114,8 @@ TEST_F(RateLimitFilterTest, OK) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -141,7 +142,7 @@ TEST_F(RateLimitFilterTest, OverLimit) { EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(*client_, cancel()).Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -170,7 +171,7 @@ TEST_F(RateLimitFilterTest, OverLimitNotEnforcing) { EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0); EXPECT_CALL(*client_, cancel()).Times(0); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -195,7 +196,8 @@ TEST_F(RateLimitFilterTest, Error) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -235,7 +237,8 @@ TEST_F(RateLimitFilterTest, ImmediateOK) { EXPECT_CALL(*client_, limit(_, "foo", _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); @@ -258,7 +261,8 @@ TEST_F(RateLimitFilterTest, ImmediateError) { EXPECT_CALL(*client_, limit(_, "foo", _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); @@ -300,7 +304,8 @@ TEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index 25659e77b6ef..447076e9bd15 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -226,7 +226,8 @@ TEST_F(ThriftRateLimitFilterTest, OkResponse) { EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)) .Times(0); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("ratelimit.ok").value()); @@ -245,7 +246,8 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateOkResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -268,7 +270,8 @@ TEST_F(ThriftRateLimitFilterTest, ImmediateErrorResponse) { _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { - callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); }))); EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); @@ -297,7 +300,8 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponse) { EXPECT_EQ(ThriftProxy::FilterStatus::StopIteration, filter_->messageBegin(request_metadata_)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ(ThriftProxy::FilterStatus::Continue, filter_->messageEnd()); EXPECT_CALL(filter_callbacks_.stream_info_, @@ -334,7 +338,8 @@ TEST_F(ThriftRateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { })); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimitServiceError)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, + nullptr); EXPECT_EQ( 1U, @@ -367,7 +372,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponse) { EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); EXPECT_CALL(filter_callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(1U, @@ -400,8 +405,8 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseWithHeaders) { setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl(*rl_headers)}; - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, std::move(h), - nullptr); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("ratelimit.over_limit") @@ -425,7 +430,7 @@ TEST_F(ThriftRateLimitFilterTest, LimitResponseRuntimeDisabled) { EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.thrift_filter_enforcing", 100)) .WillOnce(Return(false)); EXPECT_CALL(filter_callbacks_, continueDecoding()); - request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr); EXPECT_EQ(1U, diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index afbad757fcd7..94550d19ae12 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -630,7 +630,17 @@ MATCHER_P(HeaderMapEqual, rhs, "") { return equal; } -MATCHER_P(HeaderMapEqualRef, rhs, "") { return arg == *rhs; } +MATCHER_P(HeaderMapEqualRef, rhs, "") { + const bool equal = (arg == *rhs); + if (!equal) { + *result_listener << "\n" + << TestUtility::addLeftAndRightPadding("header map:") << "\n" + << *rhs << TestUtility::addLeftAndRightPadding("is not equal to:") << "\n" + << arg << TestUtility::addLeftAndRightPadding("") // line full of padding + << "\n"; + } + return equal; +} // Test that a HeaderMapPtr argument includes a given key-value pair, e.g., // HeaderHasValue("Upgrade", "WebSocket") From c2b0d37ca5b40b71ce48984f2cf5984297b79a71 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Tue, 4 Aug 2020 21:55:39 -0700 Subject: [PATCH 859/909] decompressor filter: advertise accept-encoding on headers-only requests (#12483) We noticed that certain headers-only requests were not having their respective responses be compressed. Ultimately the problem was that the decompressor filter was not advertising accept-encoding for headers-only requests. This PR advertises (when configured to do so and response decompression is active) even on headers-only requests. Signed-off-by: Jose Nino --- docs/root/version_history/current.rst | 1 + .../http/decompressor/decompressor_filter.cc | 14 +++++++------- .../http/decompressor/decompressor_filter_test.cc | 11 +++++++++++ 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2c2b2fb85c02..ccc3430d7d92 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -12,6 +12,7 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * compressor: always insert `Vary` headers for compressible resources even if it's decided not to compress a response due to incompatible `Accept-Encoding` value. The `Vary` header needs to be inserted to let a caching proxy in front of Envoy know that the requested resource still can be served with compression applied. +* decompressor: headers-only requests were incorrectly not advertising accept-encoding when configured to do so. This is now fixed. * http: added :ref:`headers_to_add ` to :ref:`local reply mapper ` to allow its users to add/append/override response HTTP headers to local replies. * http: added HCM level configuration of :ref:`error handling on invalid messaging ` which substantially changes Envoy's behavior when encountering invalid HTTP/1.1 defaulting to closing the connection instead of allowing reuse. This can temporarily be reverted by setting `envoy.reloadable_features.hcm_stream_error_on_invalid_message` to false, or permanently reverted by setting the :ref:`HCM option ` to true to restore prior HTTP/1.1 beavior and setting the *new* HTTP/2 configuration :ref:`override_stream_error_on_invalid_http_message ` to false to retain prior HTTP/2 behavior. * http: changed Envoy to send error headers and body when possible. This behavior may be temporarily reverted by setting `envoy.reloadable_features.allow_response_for_timeout` to false. diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.cc b/source/extensions/filters/http/decompressor/decompressor_filter.cc index 1c5efe245dac..62f7526e2598 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.cc +++ b/source/extensions/filters/http/decompressor/decompressor_filter.cc @@ -60,12 +60,6 @@ DecompressorFilter::DecompressorFilter(DecompressorFilterConfigSharedPtr config) Http::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { - // Headers only request, continue. - if (end_stream) { - return Http::FilterHeadersStatus::Continue; - } - ENVOY_STREAM_LOG(debug, "DecompressorFilter::decodeHeaders: {}", *decoder_callbacks_, headers); - // Two responsibilities on the request side: // 1. If response decompression is enabled (and advertisement is enabled), then advertise to // the upstream that this hop is able to decompress responses via the Accept-Encoding header. @@ -77,7 +71,13 @@ Http::FilterHeadersStatus DecompressorFilter::decodeHeaders(Http::RequestHeaderM *decoder_callbacks_, headers.getInlineValue(accept_encoding_handle.handle())); } - // 2. If request decompression is enabled, then decompress the request. + // Headers-only requests do not, by definition, get decompressed. + if (end_stream) { + return Http::FilterHeadersStatus::Continue; + } + ENVOY_STREAM_LOG(debug, "DecompressorFilter::decodeHeaders: {}", *decoder_callbacks_, headers); + + // 2. Setup request decompression if all checks comply. return maybeInitDecompress(config_->requestDirectionConfig(), request_decompressor_, *decoder_callbacks_, headers); }; diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc index b903a9e8b7ee..871b8f28b751 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_test.cc @@ -311,6 +311,17 @@ TEST_P(DecompressorFilterTest, NoDecompressionHeadersOnly) { Http::TestRequestHeaderMapImpl headers_before_filter; std::unique_ptr headers_after_filter = doHeaders(headers_before_filter, true /* end_stream */); + + if (isRequestDirection()) { + ASSERT_EQ(headers_after_filter->get(Http::LowerCaseString("accept-encoding")) + ->value() + .getStringView(), + "mock"); + // The request direction adds Accept-Encoding by default, even for header-only requests. + // Other than this header, the rest of the headers should be the same before and after the + // filter. + headers_after_filter->remove(Http::LowerCaseString("accept-encoding")); + } EXPECT_THAT(headers_after_filter, HeaderMapEqualIgnoreOrder(&headers_before_filter)); } From 8452f4b749674deb5f4b963d0da78797fb7b1278 Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Wed, 5 Aug 2020 09:53:38 -0700 Subject: [PATCH 860/909] Switch RateLimit headers spec version to latest (#12493) Followup for a new feature introduced by #12410 Apologies for not noticing that a later draft was introduced recently. I think we should start with supporting the latest available spec draft, so update version 2 to version 3. The change is technically backwards-incompatible, but the new feature was introduced one day ago, nobody could have been so fast to depend on it. Signed-off-by: Petr Pchelko --- .../extensions/filters/http/ratelimit/v3/rate_limit.proto | 6 +++--- docs/root/version_history/current.rst | 2 +- .../extensions/filters/http/ratelimit/v3/rate_limit.proto | 6 +++--- source/extensions/filters/http/ratelimit/ratelimit.h | 2 +- .../filters/http/ratelimit/ratelimit_integration_test.cc | 2 +- test/extensions/filters/http/ratelimit/ratelimit_test.cc | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index d80f9fcaed83..781fddc1939c 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -29,8 +29,8 @@ message RateLimit { // X-RateLimit headers disabled. OFF = 0; - // Use `draft RFC Version 02 `_. - DRAFT_VERSION_02 = 1; + // Use `draft RFC Version 03 `_. + DRAFT_VERSION_03 = 1; } // The rate limit domain to use when calling the rate limit service. @@ -94,7 +94,7 @@ message RateLimit { // above represent the window that is closest to reaching its limit. // // For more information about the headers specification see selected version of - // the `draft RFC `_. + // the `draft RFC `_. // // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ccc3430d7d92..938018830fe7 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -59,7 +59,7 @@ New Features * load balancer: added a :ref:`configuration` option to specify the active request bias used by the least request load balancer. * lua: added Lua APIs to access :ref:`SSL connection info ` object. * postgres network filter: :ref:`metadata ` is produced based on SQL query. -* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. * router: added new :ref:`envoy-ratelimited` retry policy, which allows retrying envoy's own rate limited responses. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index d80f9fcaed83..781fddc1939c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -29,8 +29,8 @@ message RateLimit { // X-RateLimit headers disabled. OFF = 0; - // Use `draft RFC Version 02 `_. - DRAFT_VERSION_02 = 1; + // Use `draft RFC Version 03 `_. + DRAFT_VERSION_03 = 1; } // The rate limit domain to use when calling the rate limit service. @@ -94,7 +94,7 @@ message RateLimit { // above represent the window that is closest to reaching its limit. // // For more information about the headers specification see selected version of - // the `draft RFC `_. + // the `draft RFC `_. // // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 26937019f2ef..b7b803343cbe 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -45,7 +45,7 @@ class FilterConfig { failure_mode_deny_(config.failure_mode_deny()), enable_x_ratelimit_headers_( config.enable_x_ratelimit_headers() == - envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_02), + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index e7b831a60902..e8c86a00e06d 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -211,7 +211,7 @@ class RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegration public: RatelimitFilterHeadersEnabledIntegrationTest() { enable_x_ratelimit_headers_ = - envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_02; + envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03; } }; diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index ba968cbf3a03..a0d3a31d8a2c 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -77,7 +77,7 @@ class HttpRateLimitFilterTest : public testing::Test { const std::string enable_x_ratelimit_headers_config_ = R"EOF( domain: foo - enable_x_ratelimit_headers: DRAFT_VERSION_02 + enable_x_ratelimit_headers: DRAFT_VERSION_03 )EOF"; const std::string filter_config_ = R"EOF( From 014a2555bfe358d73f068e448a3a40a946779c39 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Wed, 5 Aug 2020 14:13:14 -0400 Subject: [PATCH 861/909] hcm: move StreamInfo and filter creation into FM (#12470) Moves the ownership of the StreamInfoImpl into the FilterManager, exposed via an accessor to allow the ActiveStream to still modify/read the StreamInfo. Moves the interaction with the FilterChainFactory into the FilterManager, as well as ownership over the access logs created via the factory callbacks. Signed-off-by: Snow Pettersen --- source/common/http/conn_manager_impl.cc | 193 ++++++++++++------------ source/common/http/conn_manager_impl.h | 70 ++++++--- 2 files changed, 147 insertions(+), 116 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index e325baa41a56..a00ba998d641 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -378,9 +378,9 @@ void ConnectionManagerImpl::resetAllStreams( // of the form: if parameter is nonempty, use that; else if the // codec details are nonempty, use those. This hack does not // seem better than the code duplication, so punt for now. - stream.stream_info_.setResponseFlag(response_flag.value()); + stream.filter_manager_.streamInfo().setResponseFlag(response_flag.value()); if (*response_flag == StreamInfo::ResponseFlag::DownstreamProtocolError) { - stream.stream_info_.setResponseCodeDetails( + stream.filter_manager_.streamInfo().setResponseCodeDetails( stream.response_encoder_->getStream().responseDetails()); } } @@ -518,13 +518,14 @@ void ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpd ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connection_manager, uint32_t buffer_limit) - : connection_manager_(connection_manager), filter_manager_(*this, *this, buffer_limit), + : connection_manager_(connection_manager), + filter_manager_(*this, *this, buffer_limit, connection_manager_.config_.filterFactory(), + connection_manager_.codec_->protocol(), connection_manager_.timeSource(), + connection_manager_.read_callbacks_->connection().streamInfo().filterState(), + StreamInfo::FilterState::LifeSpan::Connection), stream_id_(connection_manager.random_generator_.random()), request_response_timespan_(new Stats::HistogramCompletableTimespanImpl( connection_manager_.stats_.named_.downstream_rq_time_, connection_manager_.timeSource())), - stream_info_(connection_manager_.codec_->protocol(), connection_manager_.timeSource(), - connection_manager_.read_callbacks_->connection().streamInfo().filterState(), - StreamInfo::FilterState::LifeSpan::Connection), upstream_options_(std::make_shared()) { ASSERT(!connection_manager.config_.isRoutable() || ((connection_manager.config_.routeConfigProvider() == nullptr && @@ -533,8 +534,12 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect connection_manager.config_.scopedRouteConfigProvider() == nullptr)), "Either routeConfigProvider or scopedRouteConfigProvider should be set in " "ConnectionManagerImpl."); + for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { + filter_manager_.addAccessLogHandler(access_log); + } - stream_info_.setRequestIDExtension(connection_manager.config_.requestIDExtension()); + filter_manager_.streamInfo().setRequestIDExtension( + connection_manager.config_.requestIDExtension()); if (connection_manager_.config_.isRoutable() && connection_manager.config_.routeConfigProvider() != nullptr) { @@ -558,18 +563,19 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect } else { connection_manager_.stats_.named_.downstream_rq_http1_total_.inc(); } - stream_info_.setDownstreamLocalAddress( + filter_manager_.streamInfo().setDownstreamLocalAddress( connection_manager_.read_callbacks_->connection().localAddress()); - stream_info_.setDownstreamDirectRemoteAddress( + filter_manager_.streamInfo().setDownstreamDirectRemoteAddress( connection_manager_.read_callbacks_->connection().directRemoteAddress()); // Initially, the downstream remote address is the source address of the // downstream connection. That can change later in the request's lifecycle, // based on XFF processing, but setting the downstream remote address here // prevents surprises for logging code in edge cases. - stream_info_.setDownstreamRemoteAddress( + filter_manager_.streamInfo().setDownstreamRemoteAddress( connection_manager_.read_callbacks_->connection().remoteAddress()); - stream_info_.setDownstreamSslConnection(connection_manager_.read_callbacks_->connection().ssl()); + filter_manager_.streamInfo().setDownstreamSslConnection( + connection_manager_.read_callbacks_->connection().ssl()); if (connection_manager_.config_.streamIdleTimeout().count()) { idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout(); @@ -594,12 +600,12 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect this); } - stream_info_.setRequestedServerName( + filter_manager_.streamInfo().setRequestedServerName( connection_manager_.read_callbacks_->connection().requestedServerName()); } ConnectionManagerImpl::ActiveStream::~ActiveStream() { - stream_info_.onRequestComplete(); + filter_manager_.streamInfo().onRequestComplete(); Upstream::HostDescriptionConstSharedPtr upstream_host = connection_manager_.read_callbacks_->upstreamHost(); @@ -607,46 +613,41 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() { Upstream::ClusterRequestResponseSizeStatsOptRef req_resp_stats = upstream_host->cluster().requestResponseSizeStats(); if (req_resp_stats.has_value()) { - req_resp_stats->get().upstream_rq_body_size_.recordValue(stream_info_.bytesReceived()); - req_resp_stats->get().upstream_rs_body_size_.recordValue(stream_info_.bytesSent()); + req_resp_stats->get().upstream_rq_body_size_.recordValue( + filter_manager_.streamInfo().bytesReceived()); + req_resp_stats->get().upstream_rs_body_size_.recordValue( + filter_manager_.streamInfo().bytesSent()); } } // TODO(alyssawilk) this is not true. Fix. // A downstream disconnect can be identified for HTTP requests when the upstream returns with a 0 // response code and when no other response flags are set. - if (!stream_info_.hasAnyResponseFlag() && !stream_info_.responseCode()) { - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::DownstreamConnectionTermination); + if (!filter_manager_.streamInfo().hasAnyResponseFlag() && + !filter_manager_.streamInfo().responseCode()) { + filter_manager_.streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::DownstreamConnectionTermination); } if (connection_manager_.remote_close_) { - stream_info_.setResponseCodeDetails( + filter_manager_.streamInfo().setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect); } if (connection_manager_.codec_->protocol() < Protocol::Http2) { // For HTTP/2 there are still some reset cases where details are not set. // For HTTP/1 there shouldn't be any. Regression-proof this. - ASSERT(stream_info_.responseCodeDetails().has_value()); + ASSERT(filter_manager_.streamInfo().responseCodeDetails().has_value()); } connection_manager_.stats_.named_.downstream_rq_active_.dec(); - for (const AccessLog::InstanceSharedPtr& access_log : connection_manager_.config_.accessLogs()) { - access_log->log(filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), - filter_manager_.responseTrailers(), stream_info_); - } - for (const auto& log_handler : access_log_handlers_) { - log_handler->log(filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), - filter_manager_.responseTrailers(), stream_info_); - } - - if (stream_info_.healthCheck()) { + if (filter_manager_.streamInfo().healthCheck()) { connection_manager_.config_.tracingStats().health_check_.inc(); } if (active_span_) { Tracing::HttpTracerUtility::finalizeDownstreamSpan( *active_span_, filter_manager_.requestHeaders(), filter_manager_.responseHeaders(), - filter_manager_.responseTrailers(), stream_info_, *this); + filter_manager_.responseTrailers(), filter_manager_.streamInfo(), *this); } if (state_.successful_upgrade_) { connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec(); @@ -671,11 +672,12 @@ void ConnectionManagerImpl::ActiveStream::onIdleTimeout() { !Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_response_for_timeout")) { // TODO(htuch): We could send trailers here with an x-envoy timeout header // or gRPC status code, and/or set H2 RST_STREAM error. - stream_info_.setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().StreamIdleTimeout); connection_manager_.doEndStream(*this); } else { // TODO(mattklein) this may result in multiple flags. This Ok? - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); sendLocalReply(filter_manager_.requestHeaders() != nullptr && Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::RequestTimeout, "stream timeout", nullptr, absl::nullopt, @@ -700,7 +702,8 @@ void ConnectionManagerImpl::ActiveStream::onStreamMaxDurationReached() { Http::Code::RequestTimeout, "downstream duration timeout", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); } else { - stream_info_.setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().MaxDurationTimeout); connection_manager_.doEndStream(*this); } } @@ -735,16 +738,16 @@ void ConnectionManagerImpl::FilterManager::addStreamEncoderFilterWorker( LinkedList::moveIntoList(std::move(wrapper), encoder_filters_); } -void ConnectionManagerImpl::ActiveStream::addAccessLogHandler( +void ConnectionManagerImpl::FilterManager::addAccessLogHandler( AccessLog::InstanceSharedPtr handler) { access_log_handlers_.push_back(handler); } void ConnectionManagerImpl::ActiveStream::chargeStats(const ResponseHeaderMap& headers) { uint64_t response_code = Utility::getResponseStatus(headers); - stream_info_.response_code_ = response_code; + filter_manager_.streamInfo().response_code_ = response_code; - if (stream_info_.health_check_request_) { + if (filter_manager_.streamInfo().health_check_request_) { return; } @@ -895,7 +898,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he // HTTP/1.1. // // The protocol may have shifted in the HTTP/1.0 case so reset it. - stream_info_.protocol(protocol); + filter_manager_.streamInfo().protocol(protocol); if (!connection_manager_.config_.http1Settings().accept_http_10_) { // Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on. sendLocalReply(false, Code::UpgradeRequired, "", nullptr, absl::nullopt, @@ -981,11 +984,12 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he if (!state_.is_internally_created_) { // Only sanitize headers on first pass. // Modify the downstream remote address depending on configuration and headers. - stream_info_.setDownstreamRemoteAddress(ConnectionManagerUtility::mutateRequestHeaders( - *filter_manager_.requestHeaders(), connection_manager_.read_callbacks_->connection(), - connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); + filter_manager_.streamInfo().setDownstreamRemoteAddress( + ConnectionManagerUtility::mutateRequestHeaders( + *filter_manager_.requestHeaders(), connection_manager_.read_callbacks_->connection(), + connection_manager_.config_, *snapped_route_config_, connection_manager_.local_info_)); } - ASSERT(stream_info_.downstreamRemoteAddress() != nullptr); + ASSERT(filter_manager_.streamInfo().downstreamRemoteAddress() != nullptr); ASSERT(!cached_route_); refreshCachedRoute(); @@ -996,9 +1000,9 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he connection_manager_.config_, cached_route_.value().get()); } - stream_info_.setRequestHeaders(*filter_manager_.requestHeaders()); + filter_manager_.streamInfo().setRequestHeaders(*filter_manager_.requestHeaders()); - const bool upgrade_rejected = createFilterChain() == false; + const bool upgrade_rejected = filter_manager_.createFilterChain() == false; // TODO if there are no filters when starting a filter iteration, the connection manager // should return 404. The current returns no response if there is no router filter. @@ -1054,13 +1058,13 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he } void ConnectionManagerImpl::ActiveStream::traceRequest() { - Tracing::Decision tracing_decision = - Tracing::HttpTracerUtility::isTracing(stream_info_, *filter_manager_.requestHeaders()); + Tracing::Decision tracing_decision = Tracing::HttpTracerUtility::isTracing( + filter_manager_.streamInfo(), *filter_manager_.requestHeaders()); ConnectionManagerImpl::chargeTracingStats(tracing_decision.reason, connection_manager_.config_.tracingStats()); - active_span_ = connection_manager_.tracer().startSpan(*this, *filter_manager_.requestHeaders(), - stream_info_, tracing_decision); + active_span_ = connection_manager_.tracer().startSpan( + *this, *filter_manager_.requestHeaders(), filter_manager_.streamInfo(), tracing_decision); if (!active_span_) { return; @@ -1188,7 +1192,7 @@ void ConnectionManagerImpl::ActiveStream::decodeData(Buffer::Instance& data, boo ScopeTrackerScopeState scope(this, connection_manager_.read_callbacks_->connection().dispatcher()); filter_manager_.maybeEndDecode(end_stream); - stream_info_.addBytesReceived(data.length()); + filter_manager_.streamInfo().addBytesReceived(data.length()); filter_manager_.decodeData(data, end_stream); } @@ -1433,7 +1437,7 @@ void ConnectionManagerImpl::FilterManager::maybeEndDecode(bool end_stream) { ASSERT(!active_stream_.state_.remote_complete_); active_stream_.state_.remote_complete_ = end_stream; if (end_stream) { - active_stream_.stream_info_.onLastDownstreamRxByteReceived(); + stream_info_.onLastDownstreamRxByteReceived(); ENVOY_STREAM_LOG(debug, "request end stream", active_stream_); } } @@ -1513,21 +1517,21 @@ void ConnectionManagerImpl::ActiveStream::refreshCachedRoute(const Router::Route snapScopedRouteConfig(); } if (snapped_route_config_ != nullptr) { - route = snapped_route_config_->route(cb, *filter_manager_.requestHeaders(), stream_info_, - stream_id_); + route = snapped_route_config_->route(cb, *filter_manager_.requestHeaders(), + filter_manager_.streamInfo(), stream_id_); } } - stream_info_.route_entry_ = route ? route->routeEntry() : nullptr; + filter_manager_.streamInfo().route_entry_ = route ? route->routeEntry() : nullptr; cached_route_ = std::move(route); - if (nullptr == stream_info_.route_entry_) { + if (nullptr == filter_manager_.streamInfo().route_entry_) { cached_cluster_info_ = nullptr; } else { - Upstream::ThreadLocalCluster* local_cluster = - connection_manager_.cluster_manager_.get(stream_info_.route_entry_->clusterName()); + Upstream::ThreadLocalCluster* local_cluster = connection_manager_.cluster_manager_.get( + filter_manager_.streamInfo().route_entry_->clusterName()); cached_cluster_info_ = (nullptr == local_cluster) ? nullptr : local_cluster->info(); } - stream_info_.setUpstreamClusterInfo(cached_cluster_info_.value()); + filter_manager_.streamInfo().setUpstreamClusterInfo(cached_cluster_info_.value()); refreshCachedTracingCustomTags(); } @@ -1577,7 +1581,7 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( const std::function& modify_headers, const absl::optional grpc_status, absl::string_view details) { const bool is_head_request = state_.is_head_request_; - stream_info_.setResponseCodeDetails(details); + filter_manager_.streamInfo().setResponseCodeDetails(details); // The BadRequest error code indicates there has been a messaging error. if (Runtime::runtimeFeatureEnabled( @@ -1607,9 +1611,9 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( Utility::EncodeFunctions{ [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { - connection_manager_.config_.localReply().rewrite(filter_manager_.requestHeaders(), - response_headers, stream_info_, code, - body, content_type); + connection_manager_.config_.localReply().rewrite( + filter_manager_.requestHeaders(), response_headers, filter_manager_.streamInfo(), + code, body, content_type); }, [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { if (modify_headers != nullptr) { @@ -1629,7 +1633,7 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( code, body, grpc_status, state_.is_head_request_}); filter_manager_.maybeEndEncode(state_.local_complete_); } else { - stream_info_.setResponseCodeDetails(details); + filter_manager_.streamInfo().setResponseCodeDetails(details); // If we land in this branch, response headers have already been sent to the client. // All we can do at this point is reset the stream. ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", @@ -1647,7 +1651,7 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. If the filter chain already exists this will be // a no-op. - active_stream_.createFilterChain(); + createFilterChain(); Utility::sendLocalReply( active_stream_.state_.destroyed_, @@ -1655,8 +1659,7 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { active_stream_.connection_manager_.config_.localReply().rewrite( - request_headers_.get(), response_headers, active_stream_.stream_info_, code, body, - content_type); + request_headers_.get(), response_headers, stream_info_, code, body, content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { if (modify_headers != nullptr) { @@ -1812,7 +1815,8 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade // is not from cache const bool should_preserve_upstream_date = Runtime::runtimeFeatureEnabled("envoy.reloadable_features.preserve_upstream_date") || - stream_info_.hasResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter); + filter_manager_.streamInfo().hasResponseFlag( + StreamInfo::ResponseFlag::ResponseFromCacheFilter); if (!should_preserve_upstream_date || !headers.Date()) { connection_manager_.config_.dateProvider().setDateHeader(headers); } @@ -1923,7 +1927,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade headers); // Now actually encode via the codec. - stream_info_.onFirstDownstreamTxByteSent(); + filter_manager_.streamInfo().onFirstDownstreamTxByteSent(); response_encoder_->encodeHeaders(headers, end_stream); } @@ -2078,7 +2082,7 @@ void ConnectionManagerImpl::ActiveStream::encodeData(Buffer::Instance& data, boo ENVOY_STREAM_LOG(trace, "encoding data via codec (size={} end_stream={})", *this, data.length(), end_stream); - stream_info_.addBytesSent(data.length()); + filter_manager_.streamInfo().addBytesSent(data.length()); response_encoder_->encodeData(data, end_stream); } @@ -2148,7 +2152,7 @@ void ConnectionManagerImpl::FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { ASSERT(!active_stream_.state_.codec_saw_local_complete_); active_stream_.state_.codec_saw_local_complete_ = true; - active_stream_.stream_info_.onLastDownstreamTxByteSent(); + stream_info_.onLastDownstreamTxByteSent(); active_stream_.request_response_timespan_->complete(); active_stream_.connection_manager_.doEndStream(active_stream_); } @@ -2192,8 +2196,8 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason, absl: // DownstreamProtocolError and propagate the details upwards. const absl::string_view encoder_details = response_encoder_->getStream().responseDetails(); if (!encoder_details.empty()) { - stream_info_.setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError); - stream_info_.setResponseCodeDetails(encoder_details); + filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError); + filter_manager_.streamInfo().setResponseCodeDetails(encoder_details); } } @@ -2249,36 +2253,36 @@ void ConnectionManagerImpl::FilterManager::setBufferLimit(uint32_t new_limit) { } } -bool ConnectionManagerImpl::ActiveStream::createFilterChain() { - if (state_.created_filter_chain_) { +bool ConnectionManagerImpl::FilterManager::createFilterChain() { + if (active_stream_.state_.created_filter_chain_) { return false; } bool upgrade_rejected = false; const HeaderEntry* upgrade = nullptr; - if (filter_manager_.requestHeaders()) { - upgrade = filter_manager_.requestHeaders()->Upgrade(); + if (request_headers_) { + upgrade = request_headers_->Upgrade(); // Treat CONNECT requests as a special upgrade case. - if (!upgrade && HeaderUtility::isConnect(*filter_manager_.requestHeaders())) { - upgrade = filter_manager_.requestHeaders()->Method(); + if (!upgrade && HeaderUtility::isConnect(*request_headers_)) { + upgrade = request_headers_->Method(); } } - state_.created_filter_chain_ = true; + active_stream_.state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = nullptr; // We must check if the 'cached_route_' optional is populated since this function can be called // early via sendLocalReply(), before the cached route is populated. - if (hasCachedRoute() && cached_route_.value()->routeEntry()) { - upgrade_map = &cached_route_.value()->routeEntry()->upgradeMap(); + if (active_stream_.hasCachedRoute() && active_stream_.cached_route_.value()->routeEntry()) { + upgrade_map = &active_stream_.cached_route_.value()->routeEntry()->upgradeMap(); } - if (connection_manager_.config_.filterFactory().createUpgradeFilterChain( - upgrade->value().getStringView(), upgrade_map, *this)) { - state_.successful_upgrade_ = true; - connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); - connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); + if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(), + upgrade_map, *this)) { + active_stream_.state_.successful_upgrade_ = true; + + filter_manager_callbacks_.upgradeFilterChainCreated(); return true; } else { upgrade_rejected = true; @@ -2287,7 +2291,7 @@ bool ConnectionManagerImpl::ActiveStream::createFilterChain() { } } - connection_manager_.config_.filterFactory().createFilterChain(*this); + filter_chain_factory_.createFilterChain(*this); return !upgrade_rejected; } @@ -2461,7 +2465,7 @@ Event::Dispatcher& ConnectionManagerImpl::ActiveStreamFilterBase::dispatcher() { } StreamInfo::StreamInfo& ConnectionManagerImpl::ActiveStreamFilterBase::streamInfo() { - return parent_.active_stream_.stream_info_; + return parent_.stream_info_; } Tracing::Span& ConnectionManagerImpl::ActiveStreamFilterBase::activeSpan() { @@ -2608,7 +2612,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::sendLocalReply( Code code, absl::string_view body, std::function modify_headers, const absl::optional grpc_status, absl::string_view details) { - parent_.active_stream_.stream_info_.setResponseCodeDetails(details); + parent_.stream_info_.setResponseCodeDetails(details); parent_.sendLocalReply(is_grpc_request_, code, body, modify_headers, grpc_status, details); } @@ -2703,11 +2707,11 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // Because the filter's and the HCM view of if the stream has a body and if // the stream is complete may differ, re-check bytesReceived() to make sure // there was no body from the HCM's point of view. - if (!complete() || parent_.active_stream_.stream_info_.bytesReceived() != 0) { + if (!complete() || parent_.stream_info_.bytesReceived() != 0) { return false; } - parent_.active_stream_.stream_info_.setResponseCodeDetails( + parent_.stream_info_.setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().InternalRedirect); // n.b. we do not currently change the codecs to point at the new stream // decoder because the decoder callbacks are complete. It would be good to @@ -2728,12 +2732,13 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::recreateStream() { // TODO(snowp): In the case where connection level filter state has been set on the connection // FilterState that we inherit, we'll end up copying this every time even though we could get // away with just resetting it to the HCM filter_state_. - if (parent_.active_stream_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( + if (parent_.stream_info_.filter_state_->hasDataAtOrAboveLifeSpan( StreamInfo::FilterState::LifeSpan::Request)) { - (*parent_.active_stream_.connection_manager_.streams_.begin())->stream_info_.filter_state_ = - std::make_shared( - parent_.active_stream_.stream_info_.filter_state_->parent(), - StreamInfo::FilterState::LifeSpan::FilterChain); + (*parent_.active_stream_.connection_manager_.streams_.begin()) + ->filter_manager_.streamInfo() + .filter_state_ = std::make_shared( + parent_.stream_info_.filter_state_->parent(), + StreamInfo::FilterState::LifeSpan::FilterChain); } new_stream.decodeHeaders(std::move(request_headers), true); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 7679d15ff878..cab42078a38a 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -11,6 +11,7 @@ #include "envoy/access_log/access_log.h" #include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" +#include "envoy/common/time.h" #include "envoy/event/deferred_deletable.h" #include "envoy/http/api_listener.h" #include "envoy/http/codec.h" @@ -28,6 +29,7 @@ #include "envoy/ssl/connection.h" #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "envoy/stream_info/filter_state.h" #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/upstream.h" @@ -437,18 +439,46 @@ class ConnectionManagerImpl : Logger::Loggable, * Called when the stream write buffer is above above the high watermark. */ virtual void onDecoderFilterAboveWriteBufferHighWatermark() PURE; + + /** + * Called when the FilterManager creates an Upgrade filter chain. + */ + virtual void upgradeFilterChainCreated() PURE; }; /** * FilterManager manages decoding a request through a series of decoding filter and the encoding * of the resulting response. */ - class FilterManager { + class FilterManager : public FilterChainFactoryCallbacks { public: FilterManager(ActiveStream& active_stream, FilterManagerCallbacks& filter_manager_callbacks, - uint32_t buffer_limit) + uint32_t buffer_limit, FilterChainFactory& filter_chain_factory, + Http::Protocol protocol, TimeSource& time_source, + StreamInfo::FilterStateSharedPtr parent_filter_state, + StreamInfo::FilterState::LifeSpan filter_state_life_span) : active_stream_(active_stream), filter_manager_callbacks_(filter_manager_callbacks), - buffer_limit_(buffer_limit) {} + buffer_limit_(buffer_limit), filter_chain_factory_(filter_chain_factory), + stream_info_(protocol, time_source, parent_filter_state, filter_state_life_span) {} + ~FilterManager() override { + for (const auto& log_handler : access_log_handlers_) { + log_handler->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), + stream_info_); + } + } + + // Http::FilterChainFactoryCallbacks + void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { + addStreamDecoderFilterWorker(filter, false); + } + void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override { + addStreamEncoderFilterWorker(filter, false); + } + void addStreamFilter(StreamFilterSharedPtr filter) override { + addStreamDecoderFilterWorker(filter, true); + addStreamEncoderFilterWorker(filter, true); + } + void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; void destroyFilters() { for (auto& filter : decoder_filters_) { @@ -574,6 +604,13 @@ class ConnectionManagerImpl : Logger::Loggable, */ ResponseTrailerMap* responseTrailers() const { return response_trailers_.get(); } + // TODO(snowp): This should probably return a StreamInfo instead of the impl. + StreamInfo::StreamInfoImpl& streamInfo() { return stream_info_; } + const StreamInfo::StreamInfoImpl& streamInfo() const { return stream_info_; } + + // Set up the Encoder/Decoder filter chain. + bool createFilterChain(); + private: // Indicates which filter to start the iteration with. enum class FilterIterationStartState { AlwaysStartFromNext, CanStartFromCurrent }; @@ -645,6 +682,7 @@ class ConnectionManagerImpl : Logger::Loggable, std::list decoder_filters_; std::list encoder_filters_; + std::list access_log_handlers_; ResponseHeaderMapPtr continue_headers_; ResponseHeaderMapPtr response_headers_; @@ -661,6 +699,8 @@ class ConnectionManagerImpl : Logger::Loggable, uint32_t high_watermark_count_{0}; std::list watermark_callbacks_; + FilterChainFactory& filter_chain_factory_; + StreamInfo::StreamInfoImpl stream_info_; // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, // at which point they no longer need to be friends. friend ActiveStreamFilterBase; @@ -676,7 +716,6 @@ class ConnectionManagerImpl : Logger::Loggable, public Event::DeferredDeletable, public StreamCallbacks, public RequestDecoder, - public FilterChainFactoryCallbacks, public Tracing::Config, public ScopeTrackedObject, public FilterManagerCallbacks { @@ -714,19 +753,6 @@ class ConnectionManagerImpl : Logger::Loggable, void decodeHeaders(RequestHeaderMapPtr&& headers, bool end_stream) override; void decodeTrailers(RequestTrailerMapPtr&& trailers) override; - // Http::FilterChainFactoryCallbacks - void addStreamDecoderFilter(StreamDecoderFilterSharedPtr filter) override { - filter_manager_.addStreamDecoderFilterWorker(filter, false); - } - void addStreamEncoderFilter(StreamEncoderFilterSharedPtr filter) override { - filter_manager_.addStreamEncoderFilterWorker(filter, false); - } - void addStreamFilter(StreamFilterSharedPtr filter) override { - filter_manager_.addStreamDecoderFilterWorker(filter, true); - filter_manager_.addStreamEncoderFilterWorker(filter, true); - } - void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; - // Tracing::TracingConfig Tracing::OperationName operationName() const override; const Tracing::CustomTagMap* customTags() const override; @@ -745,7 +771,7 @@ class ConnectionManagerImpl : Logger::Loggable, DUMP_DETAILS(filter_manager_.requestTrailers()); DUMP_DETAILS(filter_manager_.responseHeaders()); DUMP_DETAILS(filter_manager_.responseTrailers()); - DUMP_DETAILS(&stream_info_); + DUMP_DETAILS(&filter_manager_.streamInfo()); } // FilterManagerCallbacks @@ -756,6 +782,10 @@ class ConnectionManagerImpl : Logger::Loggable, void encodeMetadata(MetadataMapVector& metadata) override; void onDecoderFilterBelowWriteBufferLowWatermark() override; void onDecoderFilterAboveWriteBufferHighWatermark() override; + void upgradeFilterChainCreated() override { + connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); + connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); + } void traceRequest(); @@ -842,8 +872,6 @@ class ConnectionManagerImpl : Logger::Loggable, ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; }; - // Set up the Encoder/Decoder filter chain. - bool createFilterChain(); // Per-stream idle timeout callback. void onIdleTimeout(); // Reset per-stream idle timer. @@ -876,7 +904,6 @@ class ConnectionManagerImpl : Logger::Loggable, Tracing::SpanPtr active_span_; const uint64_t stream_id_; ResponseEncoder* response_encoder_{}; - std::list access_log_handlers_; Stats::TimespanPtr request_response_timespan_; // Per-stream idle timeout. Event::TimerPtr stream_idle_timer_; @@ -886,7 +913,6 @@ class ConnectionManagerImpl : Logger::Loggable, Event::TimerPtr max_stream_duration_timer_; std::chrono::milliseconds idle_timeout_ms_{}; State state_; - StreamInfo::StreamInfoImpl stream_info_; absl::optional cached_route_; absl::optional cached_cluster_info_; const std::string* decorated_operation_{nullptr}; From 904cc6b36614e0a5c443d11465b408d3a1c1a342 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 5 Aug 2020 11:19:36 -0700 Subject: [PATCH 862/909] build: bazelrc cleanup (#12487) Signed-off-by: Lizan Zhou --- .bazelrc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.bazelrc b/.bazelrc index 6db0ac239e5a..393c54d63b70 100644 --- a/.bazelrc +++ b/.bazelrc @@ -11,7 +11,6 @@ startup --host_jvm_args=-Xmx2g build --workspace_status_command="bash bazel/get_workspace_status" -build --experimental_local_memory_estimate build --experimental_strict_action_env=true build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a @@ -52,8 +51,7 @@ build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 build:asan --copt -fsanitize=address,undefined build:asan --linkopt -fsanitize=address,undefined -# TODO(lizan): vptr and function requires C++ UBSAN runtime which we're not currently linking to. -# Enable them when bazel has better support for that or with explicit linker options. +# vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. build:asan --copt -fno-sanitize=vptr,function build:asan --linkopt -fno-sanitize=vptr,function build:asan --copt -DADDRESS_SANITIZER=1 From dbc0286e9e1443ff0ed4df2f950b9e9510877757 Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Wed, 5 Aug 2020 16:41:01 -0700 Subject: [PATCH 863/909] test: Enable various common network tests on windows (#12503) This patch enables the following tests on Windows: 1. //test/common/http:codec_client_test 2. //test/common/network:listener_impl_test 3. //test/common/network:connection_impl_test To do so we swap the addresses to use `getCanonicalLoopbackAddress` instead of `getAnyAddress` and we add synchronization in `ConnectionImplTest.ReadWatermarks` tests. Additional Description: N/A Risk Level: Low, test only Testing: Updated unit tests Docs Changes: N/A Release Notes: N/A Signed-off-by: Sotiris Nanopoulos --- test/common/http/BUILD | 2 - test/common/http/codec_client_test.cc | 2 +- test/common/network/BUILD | 4 -- test/common/network/connection_impl_test.cc | 41 ++++++++++----------- test/common/network/listener_impl_test.cc | 8 ++-- 5 files changed, 24 insertions(+), 33 deletions(-) diff --git a/test/common/http/BUILD b/test/common/http/BUILD index b690d0a1d04a..b317052c4414 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -50,8 +50,6 @@ envoy_cc_test( envoy_cc_test( name = "codec_client_test", srcs = ["codec_client_test.cc"], - # IpVersions/CodecNetworkTest.SendData/IPv4: Test times out on Windows. - tags = ["fails_on_windows"], deps = [ ":common_lib", "//source/common/buffer:buffer_lib", diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 3316380030b9..15979e8350b3 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -283,7 +283,7 @@ class CodecNetworkTest : public testing::TestWithParamtimeSource()) { dispatcher_ = api_->allocateDispatcher("test_thread"); auto socket = std::make_shared( - Network::Test::getAnyAddress(GetParam()), nullptr, true); + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr); upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true); diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 08c82e85c385..cd05a87e8344 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -74,8 +74,6 @@ envoy_cc_test( envoy_cc_test( name = "connection_impl_test", srcs = ["connection_impl_test.cc"], - # Times out on Windows - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/common:empty_string", @@ -181,8 +179,6 @@ envoy_cc_test( envoy_cc_test( name = "listener_impl_test", srcs = ["listener_impl_test.cc"], - # Times out on Windows - tags = ["fails_on_windows"], deps = [ "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 55328d741c24..c4ea2f60c4fd 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -106,8 +106,8 @@ class ConnectionImplTest : public testing::TestWithParam { if (dispatcher_.get() == nullptr) { dispatcher_ = api_->allocateDispatcher("test_thread"); } - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = std::make_unique( *dispatcher_, socket_->localAddress(), source_address_, @@ -291,8 +291,8 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { // Using a broadcast/multicast address as the connection destinations address causes an // immediate error return from connect(). Address::InstanceConstSharedPtr broadcast_address; - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); if (socket_->localAddress()->ip()->version() == Address::IpVersion::v4) { broadcast_address = std::make_shared("224.0.0.1", 0); } else { @@ -808,17 +808,18 @@ TEST_P(ConnectionImplTest, ReadWatermarks) { client_connection_->addReadFilter(client_read_filter); connect(); + auto on_filter_data_exit = [&](Buffer::Instance&, bool) -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + }; + EXPECT_FALSE(testClientConnection()->readBuffer().highWatermarkTriggered()); EXPECT_TRUE(client_connection_->readEnabled()); // Add 4 bytes to the buffer and verify the connection becomes read disabled. { Buffer::OwnedImpl buffer("data"); server_connection_->write(buffer, false); - EXPECT_CALL(*client_read_filter, onData(_, false)) - .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { - dispatcher_->exit(); - return FilterStatus::StopIteration; - })); + EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit)); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); @@ -841,11 +842,7 @@ TEST_P(ConnectionImplTest, ReadWatermarks) { { Buffer::OwnedImpl buffer("bye"); server_connection_->write(buffer, false); - EXPECT_CALL(*client_read_filter, onData(_, false)) - .WillOnce(Invoke([&](Buffer::Instance&, bool) -> FilterStatus { - dispatcher_->exit(); - return FilterStatus::StopIteration; - })); + EXPECT_CALL(*client_read_filter, onData(_, false)).WillOnce(Invoke(on_filter_data_exit)); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_TRUE(testClientConnection()->readBuffer().highWatermarkTriggered()); @@ -877,8 +874,8 @@ TEST_P(ConnectionImplTest, ReadWatermarks) { client_connection_->readDisable(false); return FilterStatus::StopIteration; })) - .WillRepeatedly(Return(FilterStatus::StopIteration)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + .WillRepeatedly(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); } // Test the same logic for dispatched_buffered_data from the @@ -909,8 +906,8 @@ TEST_P(ConnectionImplTest, ReadWatermarks) { client_connection_->readDisable(false); return FilterStatus::StopIteration; })) - .WillRepeatedly(Return(FilterStatus::StopIteration)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + .WillRepeatedly(Invoke(on_filter_data_exit)); + dispatcher_->run(Event::Dispatcher::RunType::Block); } disconnect(true); @@ -1133,8 +1130,8 @@ TEST_P(ConnectionImplTest, BindFailureTest) { new Network::Address::Ipv6Instance(address_string, 0)}; } dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( @@ -2239,8 +2236,8 @@ class ReadBufferLimitTest : public ConnectionImplTest { void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size) { const uint32_t buffer_size = 256 * 1024; dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 5708f826e73d..5aaef758ce4a 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -207,8 +207,8 @@ TEST_P(ListenerImplTest, GlobalConnectionLimitEnforcement) { } TEST_P(ListenerImplTest, WildcardListenerUseActualDst) { - auto socket = - std::make_shared(Network::Test::getAnyAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); Network::MockListenerCallbacks listener_callbacks; Network::MockConnectionHandler connection_handler; // Do not redirect since use_original_dst is false. @@ -284,8 +284,8 @@ TEST_P(ListenerImplTest, WildcardListenerIpv4Compat) { TEST_P(ListenerImplTest, DisableAndEnableListener) { testing::InSequence s1; - auto socket = - std::make_shared(Network::Test::getAnyAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); MockListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; TestListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, true); From 0cb1e86fd6a9b293b0023a0b9d8208646d7d6f39 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Wed, 5 Aug 2020 19:43:56 -0400 Subject: [PATCH 864/909] test: Fix watcher_impl_test on Windows (#12496) - Tests that used a non-blocking libevent event loop are flaky on Windows (and would be flaky on other platforms if event notifications routinely took longer to be propagated) since the event loop could exit before an event notification. Switching to use a blocking event loop prevents early exit before filesystem events are evaluated. - Skip SymlinkAtomicRename test as Windows does not have an atomic file move API that can move a directory/symlink where the new name is a non-empty existing directory/symlink (MoveFileEx can atomically replace a file with a file, however). Signed-off-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia Co-authored-by: William A Rowe Jr --- source/common/event/libevent_scheduler.cc | 2 +- test/common/filesystem/BUILD | 1 - test/common/filesystem/watcher_impl_test.cc | 8 ++++++-- test/test_common/environment.cc | 1 + 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index 5d6be40e7d60..6e675f3ffe04 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -43,7 +43,7 @@ void LibeventScheduler::run(Dispatcher::RunType mode) { // This is because libevent only supports level triggering on Windows, and so the write // event callbacks will trigger every time through the loop. Adding EVLOOP_ONCE ensures the // loop will run at most once - flag |= EVLOOP_NONBLOCK | EVLOOP_ONCE; + flag |= EVLOOP_ONCE; #endif break; case Dispatcher::RunType::Block: diff --git a/test/common/filesystem/BUILD b/test/common/filesystem/BUILD index 68f4eca5f716..82e28ebda60d 100644 --- a/test/common/filesystem/BUILD +++ b/test/common/filesystem/BUILD @@ -29,7 +29,6 @@ envoy_cc_test( envoy_cc_test( name = "watcher_impl_test", srcs = ["watcher_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//source/common/common:assert_lib", "//source/common/event:dispatcher_includes", diff --git a/test/common/filesystem/watcher_impl_test.cc b/test/common/filesystem/watcher_impl_test.cc index 64133ab70249..7928de7d2b4f 100644 --- a/test/common/filesystem/watcher_impl_test.cc +++ b/test/common/filesystem/watcher_impl_test.cc @@ -75,7 +75,6 @@ TEST_F(WatcherImplTest, Create) { { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/watcher_target")); } WatchCallback callback; - EXPECT_CALL(callback, called(Watcher::Events::MovedTo)); watcher->addWatch(TestEnvironment::temporaryPath("envoy_test/watcher_link"), Watcher::Events::MovedTo, [&](uint32_t events) -> void { callback.called(events); @@ -85,6 +84,7 @@ TEST_F(WatcherImplTest, Create) { { std::ofstream file(TestEnvironment::temporaryPath("envoy_test/other_file")); } dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_CALL(callback, called(Watcher::Events::MovedTo)); TestEnvironment::createSymlink(TestEnvironment::temporaryPath("envoy_test/watcher_target"), TestEnvironment::temporaryPath("envoy_test/watcher_new_link")); TestEnvironment::renameFile(TestEnvironment::temporaryPath("envoy_test/watcher_new_link"), @@ -109,7 +109,7 @@ TEST_F(WatcherImplTest, Modify) { file << "text" << std::flush; file.close(); EXPECT_CALL(callback, called(Watcher::Events::Modified)); - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(WatcherImplTest, BadPath) { @@ -152,6 +152,9 @@ TEST_F(WatcherImplTest, RootDirectoryPath) { #endif } +// Skipping this test on Windows as there is no Windows API able to atomically move a +// directory/symlink when the new name is a non-empty directory +#ifndef WIN32 TEST_F(WatcherImplTest, SymlinkAtomicRename) { Filesystem::WatcherPtr watcher = dispatcher_->createFilesystemWatcher(); @@ -181,6 +184,7 @@ TEST_F(WatcherImplTest, SymlinkAtomicRename) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +#endif } // namespace Filesystem } // namespace Envoy diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 5938d4452c00..9008d13091f7 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -156,6 +156,7 @@ void TestEnvironment::renameFile(const std::string& old_name, const std::string& #ifdef WIN32 // use MoveFileEx, since ::rename will not overwrite an existing file. See // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/rename-wrename?view=vs-2017 + // Note MoveFileEx cannot overwrite a directory as documented, nor a symlink, apparently. const BOOL rc = ::MoveFileEx(old_name.c_str(), new_name.c_str(), MOVEFILE_REPLACE_EXISTING); ASSERT_NE(0, rc); #else From df7c1672b30c75555fe5504f593aeb97f49d1b94 Mon Sep 17 00:00:00 2001 From: Kuat Date: Wed, 5 Aug 2020 17:04:49 -0700 Subject: [PATCH 865/909] xds: keep a reference to old config in the filter config provider (#12479) Some filter factories allocate TLS slots shared via shared pointers. On a filter config update, the last filter factory reference happens to be deleted on a worker thread, which causes a runtime failure since TLS slots must be deleted on the main thread. The solution is to prolong the life of the filter factory using main thread completion callback. Signed-off-by: Kuat Yessenov --- .../http/filter_config_discovery_impl.cc | 24 ++++--- .../http/filter_config_discovery_impl.h | 3 + test/integration/BUILD | 4 +- .../extension_discovery_integration_test.cc | 61 +++++------------- test/integration/filters/BUILD | 20 ++++++ .../filters/set_response_code_filter.cc | 64 +++++++++++++++++++ .../set_response_code_filter_config.proto | 10 +++ 7 files changed, 129 insertions(+), 57 deletions(-) create mode 100644 test/integration/filters/set_response_code_filter.cc create mode 100644 test/integration/filters/set_response_code_filter_config.proto diff --git a/source/common/filter/http/filter_config_discovery_impl.cc b/source/common/filter/http/filter_config_discovery_impl.cc index 2084f8e679e7..32b7e3e8b038 100644 --- a/source/common/filter/http/filter_config_discovery_impl.cc +++ b/source/common/filter/http/filter_config_discovery_impl.cc @@ -54,15 +54,21 @@ void DynamicFilterConfigProviderImpl::validateConfig( void DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&, Config::ConfigAppliedCb cb) { - tls_->runOnAllThreads([config, cb](ThreadLocal::ThreadLocalObjectSharedPtr previous) - -> ThreadLocal::ThreadLocalObjectSharedPtr { - auto prev_config = std::dynamic_pointer_cast(previous); - prev_config->config_ = config; - if (cb) { - cb(); - } - return previous; - }); + tls_->runOnAllThreads( + [config, cb](ThreadLocal::ThreadLocalObjectSharedPtr previous) + -> ThreadLocal::ThreadLocalObjectSharedPtr { + auto prev_config = std::dynamic_pointer_cast(previous); + prev_config->config_ = config; + if (cb) { + cb(); + } + return previous; + }, + [this, config]() { + // This happens after all workers have discarded the previous config so it can be safely + // deleted on the main thread by an update with the new config. + this->current_config_ = config; + }); } FilterConfigSubscription::FilterConfigSubscription( diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/http/filter_config_discovery_impl.h index 7e1229cd2e7c..43a75542d138 100644 --- a/source/common/filter/http/filter_config_discovery_impl.h +++ b/source/common/filter/http/filter_config_discovery_impl.h @@ -52,6 +52,9 @@ class DynamicFilterConfigProviderImpl : public FilterConfigProvider { FilterConfigSubscriptionSharedPtr subscription_; const std::set require_type_urls_; + // Currently applied configuration to ensure that the main thread deletes the last reference to + // it. + absl::optional current_config_{absl::nullopt}; ThreadLocal::SlotPtr tls_; // Local initialization target to ensure that the subscription starts in diff --git a/test/integration/BUILD b/test/integration/BUILD index 477a428bffce..f680952718e5 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -884,10 +884,10 @@ envoy_cc_test( tags = ["fails_on_windows"], deps = [ ":http_integration_lib", - "//source/extensions/filters/http/rbac:config", "//test/common/grpc:grpc_client_integration_lib", + "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", + "//test/integration/filters:set_response_code_filter_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/rbac/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", ], diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc index 0a0fa4559ec7..467922b3e123 100644 --- a/test/integration/extension_discovery_integration_test.cc +++ b/test/integration/extension_discovery_integration_test.cc @@ -1,8 +1,8 @@ -#include "envoy/extensions/filters/http/rbac/v3/rbac.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/extension/v3/config_discovery.pb.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/integration/filters/set_response_code_filter_config.pb.h" #include "test/integration/http_integration.h" #include "test/test_common/utility.h" @@ -13,38 +13,14 @@ namespace { std::string denyPrivateConfig() { return R"EOF( - rules: - action: DENY - policies: - "test": - permissions: - - url_path: { path: { prefix: "/private" } } - principals: - - any: true + prefix: "/private" + code: 403 )EOF"; } -std::string allowAllConfig() { - return R"EOF( - rules: - action: ALLOW - policies: - "test": - permissions: - - any: true - principals: - - any: true -)EOF"; -} +std::string allowAllConfig() { return "code: 200"; } -std::string invalidConfig() { - return R"EOF( - rules: - action: DENY - policies: - "test": {} -)EOF"; -} +std::string invalidConfig() { return "code: 90"; } class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { @@ -62,20 +38,12 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara filter->set_name(name); auto* discovery = filter->mutable_config_discovery(); discovery->add_type_urls( - "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC"); + "type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig"); if (set_default_config) { - const auto rbac_configuration = - TestUtility::parseYaml(R"EOF( - rules: - action: DENY - policies: - "test": - permissions: - - any: true - principals: - - any: true - )EOF"); - discovery->mutable_default_config()->PackFrom(rbac_configuration); + const auto default_configuration = + TestUtility::parseYaml( + "code: 403"); + discovery->mutable_default_config()->PackFrom(default_configuration); } discovery->set_apply_default_config_without_warming(apply_without_warming); auto* api_config_source = discovery->mutable_config_source()->mutable_api_config_source(); @@ -145,15 +113,16 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara } void sendXdsResponse(const std::string& name, const std::string& version, - const std::string& rbac_config) { + const std::string& yaml_config) { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info(version); response.set_type_url("type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig"); - const auto rbac_configuration = - TestUtility::parseYaml(rbac_config); + const auto configuration = + TestUtility::parseYaml( + yaml_config); envoy::config::core::v3::TypedExtensionConfig typed_config; typed_config.set_name(name); - typed_config.mutable_typed_config()->PackFrom(rbac_configuration); + typed_config.mutable_typed_config()->PackFrom(configuration); response.add_resources()->PackFrom(typed_config); ecds_stream_->sendGrpcMessage(response); } diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 516b02f8c100..197dfc897cfc 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -172,6 +173,25 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "set_response_code_filter_lib", + srcs = [ + "set_response_code_filter.cc", + ], + deps = [ + ":set_response_code_filter_config_proto_cc_proto", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//source/extensions/filters/http/common:factory_base_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_proto_library( + name = "set_response_code_filter_config_proto", + srcs = [":set_response_code_filter_config.proto"], +) + envoy_cc_test_library( name = "stop_iteration_and_continue", srcs = [ diff --git a/test/integration/filters/set_response_code_filter.cc b/test/integration/filters/set_response_code_filter.cc new file mode 100644 index 000000000000..28653c0ba080 --- /dev/null +++ b/test/integration/filters/set_response_code_filter.cc @@ -0,0 +1,64 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" + +#include "extensions/filters/http/common/factory_base.h" +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/set_response_code_filter_config.pb.h" +#include "test/integration/filters/set_response_code_filter_config.pb.validate.h" + +#include "absl/strings/match.h" + +namespace Envoy { + +// A test filter that responds directly with a code on a prefix match. +class SetResponseCodeFilterConfig { +public: + SetResponseCodeFilterConfig(const std::string& prefix, uint32_t code, + Server::Configuration::FactoryContext& context) + : prefix_(prefix), code_(code), tls_slot_(context.threadLocal().allocateSlot()) {} + + const std::string prefix_; + const uint32_t code_; + // Allocate a slot to validate that it is destroyed on a main thread only. + ThreadLocal::SlotPtr tls_slot_; +}; + +class SetResponseCodeFilter : public Http::PassThroughFilter { +public: + SetResponseCodeFilter(std::shared_ptr config) : config_(config) {} + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override { + if (absl::StartsWith(headers.Path()->value().getStringView(), config_->prefix_)) { + decoder_callbacks_->sendLocalReply(static_cast(config_->code_), "", nullptr, + absl::nullopt, ""); + return Http::FilterHeadersStatus::StopIteration; + } + return Http::FilterHeadersStatus::Continue; + } + +private: + const std::shared_ptr config_; +}; + +class SetResponseCodeFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< + test::integration::filters::SetResponseCodeFilterConfig> { +public: + SetResponseCodeFilterFactory() : FactoryBase("set-response-code-filter") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const test::integration::filters::SetResponseCodeFilterConfig& proto_config, + const std::string&, Server::Configuration::FactoryContext& context) override { + auto filter_config = std::make_shared( + proto_config.prefix(), proto_config.code(), context); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; + } +}; + +REGISTER_FACTORY(SetResponseCodeFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); +} // namespace Envoy diff --git a/test/integration/filters/set_response_code_filter_config.proto b/test/integration/filters/set_response_code_filter_config.proto new file mode 100644 index 000000000000..f952981ab7a4 --- /dev/null +++ b/test/integration/filters/set_response_code_filter_config.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test.integration.filters; + +import "validate/validate.proto"; + +message SetResponseCodeFilterConfig { + string prefix = 1; + uint32 code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; +} From baa1417b24c2d10b17c4c05c2211f6cfb4ba3b82 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 5 Aug 2020 19:20:53 -0700 Subject: [PATCH 866/909] tidy: fix clang-diagnostic-errors (#12507) Signed-off-by: Lizan Zhou --- include/envoy/http/BUILD | 1 + include/envoy/ssl/connection.h | 1 + source/common/http/BUILD | 5 ++++- source/common/protobuf/well_known.h | 2 ++ source/extensions/common/wasm/BUILD | 1 + source/extensions/common/wasm/wasm_vm_base.h | 2 +- source/extensions/filters/http/dynamo/config.h | 1 + source/extensions/filters/http/on_demand/config.h | 1 + source/extensions/filters/network/kafka/BUILD | 2 ++ source/extensions/health_checkers/redis/BUILD | 1 + .../quiche/platform/spdy_string_utils_impl.h | 8 ++++++++ 11 files changed, 23 insertions(+), 2 deletions(-) diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index 41d7af731db8..f17ce1cb5e14 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -91,6 +91,7 @@ envoy_cc_library( deps = [ ":header_map_interface", "//include/envoy/network:address_interface", + "//include/envoy/stream_info:filter_state_interface", ], ) diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h index ab27b0cd5b33..8241c48ad8d7 100644 --- a/include/envoy/ssl/connection.h +++ b/include/envoy/ssl/connection.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/pure.h" diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 9542d9eb4a93..3cc2fec33737 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -247,7 +247,10 @@ envoy_cc_library( envoy_cc_library( name = "exception_lib", hdrs = ["exception.h"], - deps = ["//include/envoy/http:header_map_interface"], + deps = [ + "//include/envoy/http:codes_interface", + "//include/envoy/http:header_map_interface", + ], ) envoy_cc_library( diff --git a/source/common/protobuf/well_known.h b/source/common/protobuf/well_known.h index 86905f3b63c0..dcd2a9a82b4b 100644 --- a/source/common/protobuf/well_known.h +++ b/source/common/protobuf/well_known.h @@ -1,5 +1,7 @@ #pragma once +#include + namespace Envoy { namespace ProtobufWellKnown { diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 6e034dbda256..e594ac846209 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -30,6 +30,7 @@ envoy_cc_library( name = "wasm_vm_base", hdrs = ["wasm_vm_base.h"], deps = [ + ":wasm_vm_interface", "//source/common/stats:stats_lib", ], ) diff --git a/source/extensions/common/wasm/wasm_vm_base.h b/source/extensions/common/wasm/wasm_vm_base.h index a709534cba52..a780af5c8dcc 100644 --- a/source/extensions/common/wasm/wasm_vm_base.h +++ b/source/extensions/common/wasm/wasm_vm_base.h @@ -37,7 +37,7 @@ class WasmVmBase : public WasmVm { stats_.active_.inc(); ENVOY_LOG(debug, "WasmVm created {} now active", runtime_, stats_.active_.value()); } - virtual ~WasmVmBase() { + ~WasmVmBase() override { stats_.active_.dec(); ENVOY_LOG(debug, "~WasmVm {} {} remaining active", runtime_, stats_.active_.value()); } diff --git a/source/extensions/filters/http/dynamo/config.h b/source/extensions/filters/http/dynamo/config.h index 2638b3f76a41..551438e67454 100644 --- a/source/extensions/filters/http/dynamo/config.h +++ b/source/extensions/filters/http/dynamo/config.h @@ -3,6 +3,7 @@ #include #include "envoy/extensions/filters/http/dynamo/v3/dynamo.pb.h" +#include "envoy/extensions/filters/http/dynamo/v3/dynamo.pb.validate.h" #include "envoy/server/filter_config.h" #include "extensions/filters/http/common/factory_base.h" diff --git a/source/extensions/filters/http/on_demand/config.h b/source/extensions/filters/http/on_demand/config.h index 1f63e9cc53f4..88556a869470 100644 --- a/source/extensions/filters/http/on_demand/config.h +++ b/source/extensions/filters/http/on_demand/config.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/filter/http/on_demand/v2/on_demand.pb.h" +#include "envoy/config/filter/http/on_demand/v2/on_demand.pb.validate.h" #include "extensions/filters/http/common/factory_base.h" #include "extensions/filters/http/well_known_names.h" diff --git a/source/extensions/filters/network/kafka/BUILD b/source/extensions/filters/network/kafka/BUILD index 3c338ff751c6..cc625b61fc14 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/source/extensions/filters/network/kafka/BUILD @@ -97,6 +97,7 @@ envoy_cc_library( ], deps = [ ":serialization_lib", + ":tagged_fields_lib", ], ) @@ -160,6 +161,7 @@ envoy_cc_library( ], deps = [ ":serialization_lib", + ":tagged_fields_lib", ], ) diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index cd852d4f78ec..3bc89797ab32 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( name = "utility", hdrs = ["utility.h"], deps = [ + "//source/common/config:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h index 08884d56b8f8..41fa3cad815f 100644 --- a/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/spdy_string_utils_impl.h @@ -13,32 +13,40 @@ #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "fmt/printf.h" +#include "quiche/common/platform/api/quiche_string_piece.h" namespace spdy { template +// NOLINTNEXTLINE(readability-identifier-naming) inline void SpdyStrAppendImpl(std::string* output, const Args&... args) { absl::StrAppend(output, std::forward(args)...); } +// NOLINTNEXTLINE(readability-identifier-naming) inline char SpdyHexDigitToIntImpl(char c) { return quiche::HexDigitToInt(c); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexDecodeImpl(absl::string_view data) { return absl::HexStringToBytes(data); } +// NOLINTNEXTLINE(readability-identifier-naming) inline bool SpdyHexDecodeToUInt32Impl(absl::string_view data, uint32_t* out) { return quiche::HexDecodeToUInt32(data, out); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexEncodeImpl(const void* bytes, size_t size) { return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexEncodeUInt32AndTrimImpl(uint32_t data) { return absl::StrCat(absl::Hex(data)); } +// NOLINTNEXTLINE(readability-identifier-naming) inline std::string SpdyHexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } struct SpdyStringPieceCaseHashImpl { From 14dae5791f84fef7983ffefca39958cf8b7ac2a3 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 6 Aug 2020 03:48:50 -0700 Subject: [PATCH 867/909] tidy: fix auto fixable errors (#12514) Signed-off-by: Lizan Zhou --- source/common/common/basic_resource_impl.h | 4 ++-- source/common/filesystem/posix/directory_iterator_impl.cc | 3 +-- source/common/protobuf/utility.cc | 4 ++-- source/common/stats/symbol_table_impl.h | 2 +- source/extensions/filters/network/kafka/serialization.h | 4 +--- .../quiche/platform/quic_mem_slice_span_impl.h | 2 +- test/common/http/conn_manager_impl_test.cc | 4 ++-- test/extensions/filters/common/lua/lua_wrappers.h | 4 +++- test/integration/http_subset_lb_integration_test.cc | 6 +++--- test/integration/transport_socket_match_integration_test.cc | 5 ++--- 10 files changed, 18 insertions(+), 20 deletions(-) diff --git a/source/common/common/basic_resource_impl.h b/source/common/common/basic_resource_impl.h index 8fe93aaabcb9..820412e04a88 100644 --- a/source/common/common/basic_resource_impl.h +++ b/source/common/common/basic_resource_impl.h @@ -23,8 +23,8 @@ class BasicResourceLimitImpl : public ResourceLimit { public: BasicResourceLimitImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key) : max_(max), runtime_(&runtime), runtime_key_(runtime_key) {} - BasicResourceLimitImpl(uint64_t max) : max_(max), runtime_(nullptr) {} - BasicResourceLimitImpl() : max_(std::numeric_limits::max()), runtime_(nullptr) {} + BasicResourceLimitImpl(uint64_t max) : max_(max) {} + BasicResourceLimitImpl() : max_(std::numeric_limits::max()) {} bool canCreate() override { return current_.load() < max(); } diff --git a/source/common/filesystem/posix/directory_iterator_impl.cc b/source/common/filesystem/posix/directory_iterator_impl.cc index f1808242feed..6e8906d5c3d1 100644 --- a/source/common/filesystem/posix/directory_iterator_impl.cc +++ b/source/common/filesystem/posix/directory_iterator_impl.cc @@ -8,8 +8,7 @@ namespace Envoy { namespace Filesystem { DirectoryIteratorImpl::DirectoryIteratorImpl(const std::string& directory_path) - : directory_path_(directory_path), dir_(nullptr), - os_sys_calls_(Api::OsSysCallsSingleton::get()) { + : directory_path_(directory_path), os_sys_calls_(Api::OsSysCallsSingleton::get()) { openDirectory(); nextEntry(); } diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 09e2fcc82f81..288c3fc9620a 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -36,12 +36,12 @@ void blockFormat(YAML::Node node) { node.SetStyle(YAML::EmitterStyle::Block); if (node.Type() == YAML::NodeType::Sequence) { - for (auto it : node) { + for (const auto& it : node) { blockFormat(it); } } if (node.Type() == YAML::NodeType::Map) { - for (auto it : node) { + for (const auto& it : node) { blockFormat(it.second); } } diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 09f79ac46cf5..816799461803 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -497,7 +497,7 @@ class StatNameManagedStorage : public StatNameStorage { // generate symbols for it. StatNameManagedStorage(absl::string_view name, SymbolTable& table) : StatNameStorage(name, table), symbol_table_(table) {} - StatNameManagedStorage(StatNameManagedStorage&& src) + StatNameManagedStorage(StatNameManagedStorage&& src) noexcept : StatNameStorage(std::move(src)), symbol_table_(src.symbol_table_) {} ~StatNameManagedStorage() { free(symbol_table_); } diff --git a/source/extensions/filters/network/kafka/serialization.h b/source/extensions/filters/network/kafka/serialization.h index 8d157172891a..8e833e67720d 100644 --- a/source/extensions/filters/network/kafka/serialization.h +++ b/source/extensions/filters/network/kafka/serialization.h @@ -66,8 +66,6 @@ template class Deserializer { */ template class IntDeserializer : public Deserializer { public: - IntDeserializer() : written_{0} {}; - uint32_t feed(absl::string_view& data) override { const uint32_t available = std::min(sizeof(buf_) - written_, data.size()); memcpy(buf_ + written_, data.data(), available); @@ -86,7 +84,7 @@ template class IntDeserializer : public Deserializer { protected: char buf_[sizeof(T) / sizeof(char)]; - uint32_t written_; + uint32_t written_{0}; bool ready_{false}; }; diff --git a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h index 60917fcd0d7c..1824fb8d1fa5 100644 --- a/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h +++ b/source/extensions/quic_listeners/quiche/platform/quic_mem_slice_span_impl.h @@ -20,7 +20,7 @@ namespace quic { // Wraps a Buffer::Instance and deliver its data with minimum number of copies. class QuicMemSliceSpanImpl { public: - QuicMemSliceSpanImpl() : buffer_(nullptr) {} + QuicMemSliceSpanImpl() = default; /** * @param buffer has to outlive the life time of this class. */ diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 10bc9e6d3cb9..f9fca43ba0c4 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -97,7 +97,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan stats_({ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_), - tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, + listener_stats_({CONN_MAN_LISTENER_STATS(POOL_COUNTER(fake_listener_stats_))}), request_id_extension_(RequestIDExtensionFactory::defaultInstance(random_)), local_reply_(LocalReply::Factory::createDefault()) { @@ -380,7 +380,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan MockServerConnection* codec_; NiceMock filter_factory_; ConnectionManagerStats stats_; - ConnectionManagerTracingStats tracing_stats_; + ConnectionManagerTracingStats tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}; NiceMock drain_close_; std::unique_ptr conn_manager_; std::string server_name_; diff --git a/test/extensions/filters/common/lua/lua_wrappers.h b/test/extensions/filters/common/lua/lua_wrappers.h index 4b2e7f1f8b0a..e13f1914c48d 100644 --- a/test/extensions/filters/common/lua/lua_wrappers.h +++ b/test/extensions/filters/common/lua/lua_wrappers.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "extensions/filters/common/lua/lua.h" #include "test/mocks/thread_local/mocks.h" @@ -18,7 +20,7 @@ template class LuaWrappersTestBase : public testing::Test { public: virtual void setup(const std::string& code) { coroutine_.reset(); - state_.reset(new ThreadLocalState(code, tls_)); + state_ = std::make_unique(code, tls_); state_->registerType(); coroutine_ = state_->createCoroutine(); lua_pushlightuserdata(coroutine_->luaState(), this); diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 4137ec95bbed..cd275c20a7c3 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -52,8 +52,8 @@ class HttpSubsetLbIntegrationTest : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), ConfigHelper::httpProxyConfig()), - num_hosts_{4}, is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH || - GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) { + is_hash_lb_(GetParam() == envoy::config::cluster::v3::Cluster::RING_HASH || + GetParam() == envoy::config::cluster::v3::Cluster::MAGLEV) { autonomous_upstream_ = true; setUpstreamCount(num_hosts_); @@ -186,7 +186,7 @@ class HttpSubsetLbIntegrationTest } } - const uint32_t num_hosts_; + const uint32_t num_hosts_{4}; const bool is_hash_lb_; const std::string hash_header_{"x-hash"}; diff --git a/test/integration/transport_socket_match_integration_test.cc b/test/integration/transport_socket_match_integration_test.cc index 771e98df76f7..2456921be3e1 100644 --- a/test/integration/transport_socket_match_integration_test.cc +++ b/test/integration/transport_socket_match_integration_test.cc @@ -20,8 +20,7 @@ class TransportSockeMatchIntegrationTest : public testing::Test, public HttpInte TransportSockeMatchIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, TestEnvironment::getIpVersionsForTest().front(), - ConfigHelper::httpProxyConfig()), - num_hosts_{2} { + ConfigHelper::httpProxyConfig()) { autonomous_upstream_ = true; setUpstreamCount(num_hosts_); } @@ -163,7 +162,7 @@ require_client_certificate: true setUpstreamProtocol(FakeHttpConnection::Type::HTTP1); } - const uint32_t num_hosts_; + const uint32_t num_hosts_{2}; Http::TestRequestHeaderMapImpl type_a_request_headers_{{":method", "GET"}, {":path", "/test"}, {":scheme", "http"}, From 8a7ba1f630678cc075bc929bf5478f82d313e921 Mon Sep 17 00:00:00 2001 From: DongRyeol Cha Date: Fri, 7 Aug 2020 01:01:30 +0900 Subject: [PATCH 868/909] udp: prevent crashing the envoy if udpListener is empty (#11914) This change delete the read filter before udp listener deletion. Signed-off-by: DongRyeol Cha --- source/server/connection_handler_impl.h | 9 +++- test/server/connection_handler_test.cc | 62 +++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 4fe28847be48..17c94ded87a3 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -361,7 +361,14 @@ class ActiveRawUdpListener : public Network::UdpListenerCallbacks, Network::Listener* listener() override { return udp_listener_.get(); } void pauseListening() override { udp_listener_->disable(); } void resumeListening() override { udp_listener_->enable(); } - void shutdownListener() override { udp_listener_.reset(); } + void shutdownListener() override { + // The read filter should be deleted before the UDP listener is deleted. + // The read filter refers to the UDP listener to send packets to downstream. + // If the UDP listener is deleted before the read filter, the read filter may try to use it + // after deletion. + read_filter_.reset(); + udp_listener_.reset(); + } // Network::UdpListenerFilterManager void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 148874e612e3..7fcea249bbd4 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -128,6 +128,39 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable; + class MockUpstreamUdpFilter : public Network::UdpListenerReadFilter { + public: + MockUpstreamUdpFilter(ConnectionHandlerTest& parent, Network::UdpReadFilterCallbacks& callbacks) + : UdpListenerReadFilter(callbacks), parent_(parent) {} + ~MockUpstreamUdpFilter() override { + parent_.deleted_before_listener_ = !parent_.udp_listener_deleted_; + } + + MOCK_METHOD(void, onData, (Network::UdpRecvData&), (override)); + MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode), (override)); + + private: + ConnectionHandlerTest& parent_; + }; + + class MockUpstreamUdpListener : public Network::UdpListener { + public: + explicit MockUpstreamUdpListener(ConnectionHandlerTest& parent) : parent_(parent) { + ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + } + ~MockUpstreamUdpListener() override { parent_.udp_listener_deleted_ = true; } + + MOCK_METHOD(void, enable, (), (override)); + MOCK_METHOD(void, disable, (), (override)); + MOCK_METHOD(Event::Dispatcher&, dispatcher, (), (override)); + MOCK_METHOD(Network::Address::InstanceConstSharedPtr&, localAddress, (), (const, override)); + MOCK_METHOD(Api::IoCallUint64Result, send, (const Network::UdpSendData&), (override)); + + private: + ConnectionHandlerTest& parent_; + Event::MockDispatcher dispatcher_; + }; + TestListener* addListener( uint64_t tag, bool bind_to_port, bool hand_off_restored_destination_connections, const std::string& name, Network::Listener* listener, @@ -190,6 +223,8 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; std::shared_ptr> listener_filter_matcher_; + bool udp_listener_deleted_ = false; + bool deleted_before_listener_ = false; }; // Verify that if a listener is removed while a rebalanced connection is in flight, we correctly @@ -1016,6 +1051,33 @@ TEST_F(ConnectionHandlerTest, ListenerFilterWorks) { EXPECT_CALL(*listener, onDestroy()); } +// The read_filter should be deleted before the udp_listener is deleted. +TEST_F(ConnectionHandlerTest, ShutdownUdpListener) { + InSequence s; + + Network::MockUdpReadFilterCallbacks dummy_callbacks; + auto listener = new NiceMock(*this); + TestListener* test_listener = + addListener(1, true, false, "test_listener", listener, nullptr, nullptr, nullptr, + Network::Socket::Type::Datagram, std::chrono::milliseconds(), false, nullptr); + auto filter = std::make_unique>(*this, dummy_callbacks); + + EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _)) + .WillOnce(Invoke([&](Network::UdpListenerFilterManager& udp_listener, + Network::UdpReadFilterCallbacks&) -> bool { + udp_listener.addReadFilter(std::move(filter)); + return true; + })); + EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(dummy_callbacks.udp_listener_, onDestroy()); + + handler_->addListener(absl::nullopt, *test_listener); + handler_->stopListeners(); + + ASSERT_TRUE(deleted_before_listener_) + << "The read_filter_ should be deleted before the udp_listener_ is deleted."; +} + } // namespace } // namespace Server } // namespace Envoy From 6a71f16c664f2e3a874d2a991262d49e6ed485c8 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 6 Aug 2020 09:05:27 -0700 Subject: [PATCH 869/909] build: fix several use of unintialized value detect by MSAN (#12506) Signed-off-by: Lizan Zhou --- bazel/foreign_cc/BUILD | 2 ++ bazel/foreign_cc/luajit.patch | 2 +- bazel/foreign_cc/moonjit.patch | 2 +- source/common/network/address_impl.cc | 1 + source/common/network/utility.cc | 1 + source/extensions/filters/udp/dns_filter/dns_parser.cc | 2 ++ source/extensions/tracers/xray/xray_configuration.h | 2 +- test/exe/main_common_test.cc | 3 ++- .../filters/network/postgres_proxy/postgres_decoder_test.cc | 2 +- 9 files changed, 12 insertions(+), 5 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 79bb436a57ee..316c8fef1e7c 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -45,6 +45,7 @@ configure_make( # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, + "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//conditions:default": {}, }), lib_source = "@com_github_luajit_luajit//:all", @@ -65,6 +66,7 @@ configure_make( # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, + "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index 035aa61094e2..5a6cefe29e09 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -88,7 +88,7 @@ index 0000000..9c71271 + + # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. -+ if "ENVOY_CONFIG_ASAN" in os.environ: ++ if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: + os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blocklist.txt" % os.environ["PWD"] + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index b4593afdf111..d7a67050f170 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -79,7 +79,7 @@ index 0000000..9c71271 + + # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. -+ if "ENVOY_CONFIG_ASAN" in os.environ: ++ if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: + os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blocklist.txt" % os.environ["PWD"] + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index 971322b21ccd..57d1317b7e4d 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -93,6 +93,7 @@ Address::InstanceConstSharedPtr addressFromSockAddr(const sockaddr_storage& ss, Ipv4Instance::Ipv4Instance(const sockaddr_in* address, absl::string_view sock_interface) : InstanceBase(Type::Ip, sock_interface) { + memset(&ip_.ipv4_.address_, 0, sizeof(ip_.ipv4_.address_)); ip_.ipv4_.address_ = *address; ip_.friendly_address_ = sockaddrToString(*address); diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index ed2c8be12d9f..15145ec7ef49 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -194,6 +194,7 @@ Address::InstanceConstSharedPtr Utility::parseInternetAddressAndPort(const std:: throwWithMalformedIp(ip_address); } sockaddr_in sa4; + memset(&sa4, 0, sizeof(sa4)); if (ip_str.empty() || inet_pton(AF_INET, ip_str.c_str(), &sa4.sin_addr) != 1) { throwWithMalformedIp(ip_address); } diff --git a/source/extensions/filters/udp/dns_filter/dns_parser.cc b/source/extensions/filters/udp/dns_filter/dns_parser.cc index 488d3952f5bc..b68016d19185 100644 --- a/source/extensions/filters/udp/dns_filter/dns_parser.cc +++ b/source/extensions/filters/udp/dns_filter/dns_parser.cc @@ -340,6 +340,7 @@ DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::Instance case DNS_RECORD_TYPE_A: if (available_bytes >= sizeof(uint32_t)) { sockaddr_in sa4; + memset(&sa4, 0, sizeof(sa4)); sa4.sin_addr.s_addr = buffer->peekLEInt(data_offset); ip_addr = std::make_shared(&sa4); data_offset += data_length; @@ -348,6 +349,7 @@ DnsAnswerRecordPtr DnsMessageParser::parseDnsAnswerRecord(const Buffer::Instance case DNS_RECORD_TYPE_AAAA: if (available_bytes >= sizeof(absl::uint128)) { sockaddr_in6 sa6; + memset(&sa6, 0, sizeof(sa6)); uint8_t* address6_bytes = reinterpret_cast(&sa6.sin6_addr.s6_addr); static constexpr size_t count = sizeof(absl::uint128) / sizeof(uint8_t); for (size_t index = 0; index < count; index++) { diff --git a/source/extensions/tracers/xray/xray_configuration.h b/source/extensions/tracers/xray/xray_configuration.h index 852f70561498..114ea398444a 100644 --- a/source/extensions/tracers/xray/xray_configuration.h +++ b/source/extensions/tracers/xray/xray_configuration.h @@ -27,7 +27,7 @@ enum class SamplingDecision { struct XRayHeader { std::string trace_id_; std::string parent_id_; - SamplingDecision sample_decision_; + SamplingDecision sample_decision_{}; }; } // namespace XRay diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 1550a9e3feb3..39d0486683d7 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -152,7 +152,8 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, MainCommonDeathTest, TEST_P(MainCommonDeathTest, OutOfMemoryHandler) { #if defined(__clang_analyzer__) || (defined(__has_feature) && (__has_feature(thread_sanitizer) || \ - __has_feature(address_sanitizer))) + __has_feature(address_sanitizer) || \ + __has_feature(memory_sanitizer))) ENVOY_LOG_MISC(critical, "MainCommonTest::OutOfMemoryHandler not supported by this compiler configuration"); #else diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index 7a29494eb8ea..aa2d9ff2c7b7 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -41,7 +41,7 @@ class PostgresProxyDecoderTestBase { // fields often used Buffer::OwnedImpl data_; - char buf_[256]; + char buf_[256]{}; std::string payload_; }; From 432ee807210907d769c10de7af2e775d23502f36 Mon Sep 17 00:00:00 2001 From: antonio Date: Thu, 6 Aug 2020 13:12:38 -0400 Subject: [PATCH 870/909] dispatcher: Run zero-delay timeout timers on the next iteration of the event loop (#11823) Processing 0-delay timers in the same loop they are generated can result in long timer callback chains which could starve other operations in the event loop or even result in infinite processing loops. Cases that required same-iteration scheduling behavior for 0-delay timers were refactored to use SchedulableCallback::scheduleCallbackCurrentIteration in #11663, so behavior changes due to this change should be relatively minor. Signed-off-by: Antonio Vicente --- source/common/event/BUILD | 1 + source/common/event/libevent_scheduler.h | 43 ++++ source/common/event/timer_impl.cc | 15 +- source/common/event/timer_impl.h | 5 + source/common/runtime/runtime_features.cc | 1 + .../quic_listeners/quiche/envoy_quic_alarm.cc | 3 +- test/common/event/BUILD | 1 + test/common/event/dispatcher_impl_test.cc | 239 ++++++++++++++++-- test/test_common/BUILD | 1 + test/test_common/simulated_time_system.cc | 80 ++++-- test/test_common/simulated_time_system.h | 9 +- .../test_common/simulated_time_system_test.cc | 96 +++++-- 12 files changed, 419 insertions(+), 75 deletions(-) diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 1a99e72fe7bd..23ccee3b5ff7 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -136,6 +136,7 @@ envoy_cc_library( ":libevent_lib", "//include/envoy/event:timer_interface", "//source/common/common:scope_tracker", + "//source/common/runtime:runtime_features_lib", ], ) diff --git a/source/common/event/libevent_scheduler.h b/source/common/event/libevent_scheduler.h index 748036114f5b..6059a0017bae 100644 --- a/source/common/event/libevent_scheduler.h +++ b/source/common/event/libevent_scheduler.h @@ -15,6 +15,49 @@ namespace Envoy { namespace Event { // Implements Scheduler based on libevent. +// +// Here is a rough summary of operations that libevent performs in each event loop iteration, in +// order. Note that the invocation order for "same-iteration" operations that execute as a group +// can be surprising and invocation order of expired timers is non-deterministic. +// Whenever possible, it is preferable to avoid making event invocation ordering assumptions. +// +// 1. Calculate the poll timeout by comparing the current time to the deadline of the closest +// timer (the one at head of the priority queue). +// 2. Run registered "prepare" callbacks. +// 3. Poll for fd events using the closest timer as timeout, add active fds to the work list. +// 4. Run registered "check" callbacks. +// 5. Check timer deadlines against current time and move expired timers from the timer priority +// queue to the work list. Expired timers are moved to the work list is a non-deterministic order. +// 6. Execute items in the work list until the list is empty. Note that additional work +// items could be added to the work list during execution of this step, more details below. +// 7. Goto 1 if the loop termination condition has not been reached +// +// The following "same-iteration" work items are added directly to the work list when they are +// scheduled so they execute in the current iteration of the event loop. Note that there are no +// ordering guarantees when mixing the mechanisms below. Specifically, it is unsafe to assume that +// calling post followed by deferredDelete will result in the post callback being invoked before the +// deferredDelete; deferredDelete will run first if there is a pending deferredDeletion at the time +// the post callback is scheduled because deferredDelete invocation is grouped. +// - Event::Dispatcher::post(cb). Post callbacks are invoked as a group. +// - Event::Dispatcher::deferredDelete(object) and Event::DeferredTaskUtil::deferredRun(...). +// The same mechanism implements both of these operations, so they are invoked as a group. +// - Event::SchedulableCallback::scheduleCallbackCurrentIteration(). Each of these callbacks is +// scheduled and invoked independently. +// - Event::FileEvent::activate() if "envoy.reloadable_features.activate_fds_next_event_loop" +// runtime feature is disabled. +// - Event::Timer::enableTimer(0) if "envoy.reloadable_features.activate_timers_next_event_loop" +// runtime feature is disabled. +// +// Event::FileEvent::activate and Event::SchedulableCallback::scheduleCallbackNextIteration are +// implemented as libevent timers with a deadline of 0. Both of these actions are moved to the work +// list while checking for expired timers during step 5. +// +// Events execute in the following order, derived from the order in which items were added to the +// work list: +// 0. Events added via event_active prior to the start of the event loop (in tests) +// 1. Fd events +// 2. Timers, FileEvent::activate and SchedulableCallback::scheduleCallbackNextIteration +// 3. "Same-iteration" work items described above, including Event::Dispatcher::post callbacks class LibeventScheduler : public Scheduler, public CallbackScheduler { public: using OnPrepareCallback = std::function; diff --git a/source/common/event/timer_impl.cc b/source/common/event/timer_impl.cc index 6c71f3cfe5ac..56137dc8b2e3 100644 --- a/source/common/event/timer_impl.cc +++ b/source/common/event/timer_impl.cc @@ -3,6 +3,7 @@ #include #include "common/common/assert.h" +#include "common/runtime/runtime_features.h" #include "event2/event.h" @@ -10,7 +11,16 @@ namespace Envoy { namespace Event { TimerImpl::TimerImpl(Libevent::BasePtr& libevent, TimerCb cb, Dispatcher& dispatcher) - : cb_(cb), dispatcher_(dispatcher) { + : cb_(cb), dispatcher_(dispatcher), + activate_timers_next_event_loop_( + // Only read the runtime feature if the runtime loader singleton has already been created. + // Accessing runtime features too early in the initialization sequence triggers logging + // and the logging code itself depends on the use of timers. Attempts to log while + // initializing the logging subsystem will result in a crash. + Runtime::LoaderSingleton::getExisting() + ? Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_timers_next_event_loop") + : true) { ASSERT(cb_); evtimer_assign( &raw_event_, libevent.get(), @@ -44,7 +54,8 @@ void TimerImpl::enableHRTimer(const std::chrono::microseconds& d, void TimerImpl::internalEnableTimer(const timeval& tv, const ScopeTrackedObject* object) { object_ = object; - if (tv.tv_sec == 0 && tv.tv_usec == 0) { + + if (!activate_timers_next_event_loop_ && tv.tv_sec == 0 && tv.tv_usec == 0) { event_active(&raw_event_, EV_TIMEOUT, 0); } else { event_add(&raw_event_, &tv); diff --git a/source/common/event/timer_impl.h b/source/common/event/timer_impl.h index f9e980824269..307fb3fe80d7 100644 --- a/source/common/event/timer_impl.h +++ b/source/common/event/timer_impl.h @@ -70,6 +70,11 @@ class TimerImpl : public Timer, ImplBase { // example if the DispatcherImpl::post is called by two threads, they race to // both set this to null. std::atomic object_{}; + + // Latched "envoy.reloadable_features.activate_timers_next_event_loop" runtime feature. If true, + // timers scheduled with a 0 time delta are evaluated in the next iteration of the event loop + // after polling and activating new fd events. + const bool activate_timers_next_event_loop_; }; } // namespace Event diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 80d33ae2fc3f..ae0602138651 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -58,6 +58,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.connection_header_sanitization", // Begin alphabetically sorted section. "envoy.reloadable_features.activate_fds_next_event_loop", + "envoy.reloadable_features.activate_timers_next_event_loop", "envoy.reloadable_features.allow_500_after_100", "envoy.deprecated_features.allow_deprecated_extension_names", "envoy.reloadable_features.allow_prefetch", diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc index e652b79a6120..349eb5f2a32b 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_alarm.cc @@ -18,7 +18,8 @@ void EnvoyQuicAlarm::SetImpl() { // loop. QUICHE alarm is not expected to be scheduled in current event loop. This bit is a bummer // in QUICHE, and we are working on the fix. Once QUICHE is fixed of expecting this behavior, we // no longer need to round up the duration. - // TODO(antoniovicente) improve the timer behavior in such case. + // TODO(antoniovicente) Remove the std::max(1, ...) when decommissioning the + // envoy.reloadable_features.activate_timers_next_event_loop runtime flag. timer_->enableHRTimer( std::chrono::microseconds(std::max(static_cast(1), duration.ToMicroseconds()))); } diff --git a/test/common/event/BUILD b/test/common/event/BUILD index 50a792f01804..b6032fe71825 100644 --- a/test/common/event/BUILD +++ b/test/common/event/BUILD @@ -21,6 +21,7 @@ envoy_cc_test( "//test/mocks:common_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 4a709e4972b8..30fbcd32f248 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -12,6 +12,7 @@ #include "test/mocks/common.h" #include "test/mocks/stats/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -25,6 +26,12 @@ namespace Envoy { namespace Event { namespace { +static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { + // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. + auto watcher = static_cast(arg); + watcher->ready(); +} + class SchedulableCallbackImplTest : public testing::Test { protected: SchedulableCallbackImplTest() @@ -37,12 +44,6 @@ class SchedulableCallbackImplTest : public testing::Test { Api::ApiPtr api_; DispatcherPtr dispatcher_; std::vector callbacks_; - - static void onWatcherReady(evwatch*, const evwatch_prepare_cb_info*, void* arg) { - // `arg` contains the ReadyWatcher passed in from evwatch_prepare_new. - auto watcher = static_cast(arg); - watcher->ready(); - } }; TEST_F(SchedulableCallbackImplTest, ScheduleCurrentAndCancel) { @@ -472,8 +473,19 @@ TEST_F(DispatcherMonotonicTimeTest, ApproximateMonotonicTime) { dispatcher_->run(Dispatcher::RunType::Block); } -class TimerImplTest : public testing::Test { +class TimerImplTest : public testing::TestWithParam { protected: + TimerImplTest() { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_timers_next_event_loop", + activateTimersNextEventLoop() ? "true" : "false"}}); + // Watch for dispatcher prepare events. + evwatch_prepare_new(&static_cast(dispatcher_.get())->base(), onWatcherReady, + &prepare_watcher_); + } + + bool activateTimersNextEventLoop() { return GetParam(); } + void SetUp() override { // Update time cache to provide a stable time reference for timer registration. event_base_update_cache_time(&libevent_base_); @@ -498,26 +510,153 @@ class TimerImplTest : public testing::Test { } while (duration > absl::DurationFromTimeval(now_tv) - absl::DurationFromTimeval(start_tv)); } + TestScopedRuntime scoped_runtime_; Api::ApiPtr api_{Api::createApiForTest()}; DispatcherPtr dispatcher_{api_->allocateDispatcher("test_thread")}; event_base& libevent_base_{static_cast(*dispatcher_).base()}; + ReadyWatcher prepare_watcher_; }; -TEST_F(TimerImplTest, TimerEnabledDisabled) { +INSTANTIATE_TEST_SUITE_P(DelayActivation, TimerImplTest, testing::Bool()); + +TEST_P(TimerImplTest, TimerEnabledDisabled) { + InSequence s; + Event::TimerPtr timer = dispatcher_->createTimer([] {}); EXPECT_FALSE(timer->enabled()); timer->enableTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); + EXPECT_CALL(prepare_watcher_, ready()); dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); timer->enableHRTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); + EXPECT_CALL(prepare_watcher_, ready()); dispatcher_->run(Dispatcher::RunType::NonBlock); EXPECT_FALSE(timer->enabled()); } +TEST_P(TimerImplTest, ChangeTimerBackwardsBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + ReadyWatcher watcher3; + Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(0)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer3->enableTimer(std::chrono::milliseconds(2)); + timer2->enableTimer(std::chrono::milliseconds(3)); + timer1->enableTimer(std::chrono::milliseconds(4)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher3 to trigger first because the deadlines for timers 1 and 2 was moved backwards. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher3, ready()); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher1, ready()); + dispatcher_->run(Dispatcher::RunType::Block); +} + +TEST_P(TimerImplTest, ChangeTimerForwardsToZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(2)); + timer2->enableTimer(std::chrono::milliseconds(1)); + timer1->enableTimer(std::chrono::milliseconds(0)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger first because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + } else { + // Timers execute in the wrong order. + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher1, ready()); + } + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeTimerForwardsToNonZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::milliseconds(3)); + timer2->enableTimer(std::chrono::milliseconds(2)); + timer1->enableTimer(std::chrono::milliseconds(1)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger first because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(watcher2, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeLargeTimerForwardToZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::seconds(2000)); + timer2->enableTimer(std::chrono::seconds(1000)); + timer1->enableTimer(std::chrono::seconds(0)); + + // Expect watcher1 to trigger because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + +TEST_P(TimerImplTest, ChangeLargeTimerForwardToNonZeroBeforeRun) { + ReadyWatcher watcher1; + Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); + + ReadyWatcher watcher2; + Event::TimerPtr timer2 = dispatcher_->createTimer([&] { watcher2.ready(); }); + + timer1->enableTimer(std::chrono::seconds(2000)); + timer2->enableTimer(std::chrono::seconds(1000)); + timer1->enableTimer(std::chrono::milliseconds(1)); + + // Advance time by 5ms so timers above all trigger in the same loop iteration. + advanceLibeventTime(absl::Milliseconds(5)); + + // Expect watcher1 to trigger because timer1's deadline was moved forward. + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); + EXPECT_CALL(watcher1, ready()); + EXPECT_CALL(prepare_watcher_, ready()); + dispatcher_->run(Dispatcher::RunType::NonBlock); +} + // Timers scheduled at different times execute in order. -TEST_F(TimerImplTest, TimerOrdering) { +TEST_P(TimerImplTest, TimerOrdering) { ReadyWatcher watcher1; Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); @@ -540,6 +679,7 @@ TEST_F(TimerImplTest, TimerOrdering) { // Expect watcher calls to happen in order since timers have different times. InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher1, ready()); EXPECT_CALL(watcher2, ready()); EXPECT_CALL(watcher3, ready()); @@ -547,7 +687,7 @@ TEST_F(TimerImplTest, TimerOrdering) { } // Alarms that are scheduled to execute and are cancelled do not trigger. -TEST_F(TimerImplTest, TimerOrderAndDisableAlarm) { +TEST_P(TimerImplTest, TimerOrderAndDisableAlarm) { ReadyWatcher watcher3; Event::TimerPtr timer3 = dispatcher_->createTimer([&] { watcher3.ready(); }); @@ -573,6 +713,7 @@ TEST_F(TimerImplTest, TimerOrderAndDisableAlarm) { // Expect watcher calls to happen in order since timers have different times. InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher1, ready()); EXPECT_CALL(watcher3, ready()); dispatcher_->run(Dispatcher::RunType::Block); @@ -580,7 +721,7 @@ TEST_F(TimerImplTest, TimerOrderAndDisableAlarm) { // Change the registration time for a timer that is already activated by disabling and re-enabling // the timer. Verify that execution is delayed. -TEST_F(TimerImplTest, TimerOrderDisableAndReschedule) { +TEST_P(TimerImplTest, TimerOrderDisableAndReschedule) { ReadyWatcher watcher4; Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); @@ -615,16 +756,33 @@ TEST_F(TimerImplTest, TimerOrderDisableAndReschedule) { // timer1 is expected to run first and reschedule timers 2 and 3. timer4 should fire before // timer2 and timer3 since timer4's registration is unaffected. InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher1, ready()); - EXPECT_CALL(watcher4, ready()); - EXPECT_CALL(watcher2, ready()); - EXPECT_CALL(watcher3, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher4, ready()); + // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure + // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two + // timers could execute in different loop iterations. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + } else { + EXPECT_CALL(watcher4, ready()); + EXPECT_CALL(watcher2, ready()); + // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher3, ready()); + } dispatcher_->run(Dispatcher::RunType::Block); } // Change the registration time for a timer that is already activated by re-enabling the timer // without calling disableTimer first. -TEST_F(TimerImplTest, TimerOrderAndReschedule) { +TEST_P(TimerImplTest, TimerOrderAndReschedule) { ReadyWatcher watcher4; Event::TimerPtr timer4 = dispatcher_->createTimer([&] { watcher4.ready(); }); @@ -658,14 +816,31 @@ TEST_F(TimerImplTest, TimerOrderAndReschedule) { // no effect if the time delta is 0. Expect timers 1, 2 and 4 to execute in the original order. // Timer 3 is delayed since it is rescheduled with a non-zero delta. InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher1, ready()); - EXPECT_CALL(watcher2, ready()); - EXPECT_CALL(watcher4, ready()); - EXPECT_CALL(watcher3, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(watcher4, ready()); + // Sleep during prepare to ensure that enough time has elapsed before timer evaluation to ensure + // that timers 2 and 3 are picked up by the same loop iteration. Without the sleep the two + // timers could execute in different loop iterations. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher3, ready()); + } else { + EXPECT_CALL(watcher2, ready()); + EXPECT_CALL(watcher4, ready()); + // Sleep in prepare cb to avoid flakiness if epoll_wait returns before the timer timeout. + EXPECT_CALL(prepare_watcher_, ready()).WillOnce(testing::InvokeWithoutArgs([&]() { + advanceLibeventTime(absl::Milliseconds(5)); + })); + EXPECT_CALL(watcher3, ready()); + } dispatcher_->run(Dispatcher::RunType::Block); } -TEST_F(TimerImplTest, TimerChaining) { +TEST_P(TimerImplTest, TimerChaining) { ReadyWatcher watcher1; Event::TimerPtr timer1 = dispatcher_->createTimer([&] { watcher1.ready(); }); @@ -693,9 +868,20 @@ TEST_F(TimerImplTest, TimerChaining) { EXPECT_FALSE(timer2->enabled()); EXPECT_FALSE(timer3->enabled()); EXPECT_TRUE(timer4->enabled()); + InSequence s; + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher4, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } EXPECT_CALL(watcher3, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } EXPECT_CALL(watcher2, ready()); + if (activateTimersNextEventLoop()) { + EXPECT_CALL(prepare_watcher_, ready()); + } EXPECT_CALL(watcher1, ready()); dispatcher_->run(Dispatcher::RunType::NonBlock); @@ -705,7 +891,7 @@ TEST_F(TimerImplTest, TimerChaining) { EXPECT_FALSE(timer4->enabled()); } -TEST_F(TimerImplTest, TimerChainDisable) { +TEST_P(TimerImplTest, TimerChainDisable) { ReadyWatcher watcher; Event::TimerPtr timer1; Event::TimerPtr timer2; @@ -729,12 +915,14 @@ TEST_F(TimerImplTest, TimerChainDisable) { EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); EXPECT_TRUE(timer3->enabled()); + InSequence s; // Only 1 call to watcher ready since the other 2 timers were disabled by the first timer. + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher, ready()); dispatcher_->run(Dispatcher::RunType::NonBlock); } -TEST_F(TimerImplTest, TimerChainDelete) { +TEST_P(TimerImplTest, TimerChainDelete) { ReadyWatcher watcher; Event::TimerPtr timer1; Event::TimerPtr timer2; @@ -758,7 +946,9 @@ TEST_F(TimerImplTest, TimerChainDelete) { EXPECT_TRUE(timer1->enabled()); EXPECT_TRUE(timer2->enabled()); EXPECT_TRUE(timer3->enabled()); + InSequence s; // Only 1 call to watcher ready since the other 2 timers were deleted by the first timer. + EXPECT_CALL(prepare_watcher_, ready()); EXPECT_CALL(watcher, ready()); dispatcher_->run(Dispatcher::RunType::NonBlock); } @@ -771,6 +961,13 @@ class TimerImplTimingTest : public testing::Test { EXPECT_TRUE(timer.enabled()); while (true) { dispatcher.run(Dispatcher::RunType::NonBlock); +#ifdef WIN32 + // The event loop runs for a single iteration in NonBlock mode on Windows. A few iterations + // are required to ensure that next iteration callbacks have a chance to run before time + // advances once again. + dispatcher.run(Dispatcher::RunType::NonBlock); + dispatcher.run(Dispatcher::RunType::NonBlock); +#endif if (timer.enabled()) { time_system.advanceTimeAsync(std::chrono::microseconds(1)); } else { diff --git a/test/test_common/BUILD b/test/test_common/BUILD index b01ceabe8925..7b0a5c972382 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -283,6 +283,7 @@ envoy_cc_test( ":utility_lib", "//source/common/event:libevent_scheduler_lib", "//test/mocks/event:event_mocks", + "//test/test_common:test_runtime_lib", ], ) diff --git a/test/test_common/simulated_time_system.cc b/test/test_common/simulated_time_system.cc index d50560a1e3af..d361beddf471 100644 --- a/test/test_common/simulated_time_system.cc +++ b/test/test_common/simulated_time_system.cc @@ -8,6 +8,7 @@ #include "common/common/lock_guard.h" #include "common/event/real_time_system.h" #include "common/event/timer_impl.h" +#include "common/runtime/runtime_features.h" namespace Envoy { namespace Event { @@ -50,9 +51,11 @@ class UnlockGuard { // mechanism used in RealTimeSystem timers is employed for simulated alarms. class SimulatedTimeSystemHelper::Alarm : public Timer { public: - Alarm(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler, TimerCb cb) + Alarm(SimulatedScheduler& simulated_scheduler, SimulatedTimeSystemHelper& time_system, + CallbackScheduler& cb_scheduler, TimerCb cb) : cb_(cb_scheduler.createSchedulableCallback([this, cb] { runAlarm(cb); })), - time_system_(time_system), armed_(false), pending_(false) {} + simulated_scheduler_(simulated_scheduler), time_system_(time_system), armed_(false), + pending_(false) {} ~Alarm() override; @@ -108,6 +111,7 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { } SchedulableCallbackPtr cb_; + SimulatedScheduler& simulated_scheduler_; SimulatedTimeSystemHelper& time_system_; bool armed_ ABSL_GUARDED_BY(time_system_.mutex_); bool pending_ ABSL_GUARDED_BY(time_system_.mutex_); @@ -120,14 +124,19 @@ class SimulatedTimeSystemHelper::Alarm : public Timer { class SimulatedTimeSystemHelper::SimulatedScheduler : public Scheduler { public: SimulatedScheduler(SimulatedTimeSystemHelper& time_system, CallbackScheduler& cb_scheduler) - : time_system_(time_system), cb_scheduler_(cb_scheduler) {} + : time_system_(time_system), cb_scheduler_(cb_scheduler), + schedule_ready_alarms_cb_(cb_scheduler.createSchedulableCallback( + [this] { time_system_.scheduleReadyAlarms(); })) {} TimerPtr createTimer(const TimerCb& cb, Dispatcher& /*dispatcher*/) override { - return std::make_unique(time_system_, cb_scheduler_, cb); + return std::make_unique(*this, time_system_, cb_scheduler_, + cb); }; + void scheduleReadyAlarms() { schedule_ready_alarms_cb_->scheduleCallbackNextIteration(); } private: SimulatedTimeSystemHelper& time_system_; CallbackScheduler& cb_scheduler_; + SchedulableCallbackPtr schedule_ready_alarms_cb_; }; SimulatedTimeSystemHelper::Alarm::Alarm::~Alarm() { @@ -168,10 +177,11 @@ void SimulatedTimeSystemHelper::Alarm::Alarm::enableHRTimer( } armed_ = true; - if (duration.count() == 0) { + if (duration.count() == 0 && !Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.activate_timers_next_event_loop")) { activateLockHeld(); } else { - time_system_.addAlarmLockHeld(*this, duration); + time_system_.addAlarmLockHeld(*this, duration, simulated_scheduler_); } } @@ -287,7 +297,8 @@ void SimulatedTimeSystemHelper::alarmActivateLockHeld(Alarm& alarm) ABSL_NO_THRE } void SimulatedTimeSystemHelper::addAlarmLockHeld( - Alarm& alarm, const std::chrono::microseconds& duration) ABSL_NO_THREAD_SAFETY_ANALYSIS { + Alarm& alarm, const std::chrono::microseconds& duration, + SimulatedScheduler& simulated_scheduler) ABSL_NO_THREAD_SAFETY_ANALYSIS { ASSERT(&(alarm.timeSystem()) == this); ASSERT(alarms_.size() == alarm_registrations_map_.size()); ASSERT(alarm_registrations_map_.find(&alarm) == alarm_registrations_map_.end()); @@ -295,6 +306,18 @@ void SimulatedTimeSystemHelper::addAlarmLockHeld( auto insert_result = alarms_.insert({monotonic_time_ + duration, random_source_.random(), alarm}); ASSERT(insert_result.second); alarm_registrations_map_.emplace(&alarm, insert_result.first); + if (duration.count() == 0) { + // Force the event loop to check for timers that are ready to execute since we just added an 0 + // delay alarm which is ready to execution in the next iteration of the event loop. + // TODO(antoniovicente) Refactor alarm tracking so it happens per scheduler and limit wakeup to + // a single event loop. + + // We don't want to activate the alarm under lock, as it will make a libevent call, and libevent + // itself uses locks: + // https://github.com/libevent/libevent/blob/29cc8386a2f7911eaa9336692a2c5544d8b4734f/event.c#L1917 + UnlockGuard unlocker(mutex_); + simulated_scheduler.scheduleReadyAlarms(); + } // Sanity check that the parallel data structures used for alarm registration have the same number // of entries. @@ -320,33 +343,40 @@ SchedulerPtr SimulatedTimeSystemHelper::createScheduler(Scheduler& /*base_schedu } void SimulatedTimeSystemHelper::setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) { + only_one_thread_.checkOneThread(); // We don't have a MutexLock construct that allows temporarily // dropping the lock to run a callback. The main issue here is that we must // be careful not to be holding mutex_ when an exception can be thrown. // That can only happen here in alarm->activate(), which is run with the mutex // released. if (monotonic_time >= monotonic_time_) { - // Alarms is a std::set ordered by wakeup time, so pulling off begin() each - // iteration gives you wakeup order. Also note that alarms may be added - // or removed during the call to activate() so it would not be correct to - // range-iterate over the set. - while (!alarms_.empty()) { - const AlarmRegistration& alarm_registration = *alarms_.begin(); - MonotonicTime alarm_time = alarm_registration.time_; - if (alarm_time > monotonic_time) { - break; - } - ASSERT(alarm_time >= monotonic_time_); - system_time_ += - std::chrono::duration_cast(alarm_time - monotonic_time_); - monotonic_time_ = alarm_time; - Alarm& alarm = alarm_registration.alarm_; - removeAlarmLockHeld(alarm); - alarmActivateLockHeld(alarm); - } system_time_ += std::chrono::duration_cast(monotonic_time - monotonic_time_); monotonic_time_ = monotonic_time; + scheduleReadyAlarmsLockHeld(); + } +} + +void SimulatedTimeSystemHelper::scheduleReadyAlarms() { + absl::MutexLock lock(&mutex_); + scheduleReadyAlarmsLockHeld(); +} + +void SimulatedTimeSystemHelper::scheduleReadyAlarmsLockHeld() { + // Alarms is a std::set ordered by wakeup time, so pulling off begin() each + // iteration gives you wakeup order. Also note that alarms may be added + // or removed during the call to activate() so it would not be correct to + // range-iterate over the set. + while (!alarms_.empty()) { + const AlarmRegistration& alarm_registration = *alarms_.begin(); + MonotonicTime alarm_time = alarm_registration.time_; + if (alarm_time > monotonic_time_) { + break; + } + + Alarm& alarm = alarm_registration.alarm_; + removeAlarmLockHeld(alarm); + alarmActivateLockHeld(alarm); } } diff --git a/test/test_common/simulated_time_system.h b/test/test_common/simulated_time_system.h index 3b4c938d0e9e..e8a369e4f9cc 100644 --- a/test/test_common/simulated_time_system.h +++ b/test/test_common/simulated_time_system.h @@ -104,10 +104,17 @@ class SimulatedTimeSystemHelper : public TestTimeSystem { void setMonotonicTimeLockHeld(const MonotonicTime& monotonic_time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + /** + * Schedule expired alarms so they execute in their event loops. + */ + void scheduleReadyAlarms(); + void scheduleReadyAlarmsLockHeld() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void alarmActivateLockHeld(Alarm& alarm) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Adds/removes an alarm. - void addAlarmLockHeld(Alarm&, const std::chrono::microseconds& duration) + void addAlarmLockHeld(Alarm&, const std::chrono::microseconds& duration, + SimulatedScheduler& simulated_scheduler) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void removeAlarmLockHeld(Alarm&) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); diff --git a/test/test_common/simulated_time_system_test.cc b/test/test_common/simulated_time_system_test.cc index 9fd5314c9b92..13a435148aff 100644 --- a/test/test_common/simulated_time_system_test.cc +++ b/test/test_common/simulated_time_system_test.cc @@ -6,6 +6,7 @@ #include "test/mocks/common.h" #include "test/mocks/event/mocks.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "event2/event.h" @@ -16,12 +17,20 @@ namespace Event { namespace Test { namespace { -class SimulatedTimeSystemTest : public testing::Test { +enum class ActivateMode { DelayActivateTimers, EagerlyActivateTimers }; + +class SimulatedTimeSystemTest : public testing::TestWithParam { protected: SimulatedTimeSystemTest() : scheduler_(time_system_.createScheduler(base_scheduler_, base_scheduler_)), start_monotonic_time_(time_system_.monotonicTime()), - start_system_time_(time_system_.systemTime()) {} + start_system_time_(time_system_.systemTime()) { + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.activate_timers_next_event_loop", + activateMode() == ActivateMode::DelayActivateTimers ? "true" : "false"}}); + } + + ActivateMode activateMode() { return GetParam(); } void trackPrepareCalls() { base_scheduler_.registerOnPrepareCallback([this]() { output_.append(1, 'p'); }); @@ -58,6 +67,7 @@ class SimulatedTimeSystemTest : public testing::Test { base_scheduler_.run(Dispatcher::RunType::NonBlock); } + TestScopedRuntime scoped_runtime_; Event::MockDispatcher dispatcher_; LibeventScheduler base_scheduler_; SimulatedTimeSystem time_system_; @@ -68,7 +78,11 @@ class SimulatedTimeSystemTest : public testing::Test { SystemTime start_system_time_; }; -TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { +INSTANTIATE_TEST_SUITE_P(DelayTimerActivation, SimulatedTimeSystemTest, + testing::Values(ActivateMode::DelayActivateTimers, + ActivateMode::EagerlyActivateTimers)); + +TEST_P(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); advanceMsAndLoop(5); @@ -76,7 +90,7 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeAsync) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, TimerTotalOrdering) { +TEST_P(SimulatedTimeSystemTest, TimerTotalOrdering) { trackPrepareCalls(); addTask(0, '0'); @@ -90,7 +104,7 @@ TEST_F(SimulatedTimeSystemTest, TimerTotalOrdering) { EXPECT_EQ("p012", output_); } -TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering) { +TEST_P(SimulatedTimeSystemTest, TimerPartialOrdering) { trackPrepareCalls(); std::set outputs; @@ -115,7 +129,7 @@ TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering) { EXPECT_THAT(outputs, testing::ElementsAre("p0123", "p0213")); } -TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering2) { +TEST_P(SimulatedTimeSystemTest, TimerPartialOrdering2) { trackPrepareCalls(); std::set outputs; @@ -142,7 +156,7 @@ TEST_F(SimulatedTimeSystemTest, TimerPartialOrdering2) { } // Timers that are scheduled to execute and but are disabled first do not trigger. -TEST_F(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { +TEST_P(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { trackPrepareCalls(); // Create 3 timers. The first timer should disable the second, so it doesn't trigger. @@ -159,7 +173,7 @@ TEST_F(SimulatedTimeSystemTest, TimerOrderAndDisableTimer) { } // Capture behavior of timers which are rescheduled without being disabled first. -TEST_F(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) { +TEST_P(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) { trackPrepareCalls(); // Reschedule timers 1, 2 and 4 without disabling first. @@ -179,15 +193,34 @@ TEST_F(SimulatedTimeSystemTest, TimerOrderAndRescheduleTimer) { // Timer 4 runs as part of the first wakeup since its new schedule time has a delta of 0. Timer 2 // is delayed since it is rescheduled with a non-zero delta. advanceMsAndLoop(5); - EXPECT_EQ("p0134", output_); + if (activateMode() == ActivateMode::DelayActivateTimers) { +#ifdef WIN32 + // Force it to run again to pick up next iteration callbacks. + // The event loop runs for a single iteration in NonBlock mode on Windows as a hack to work + // around LEVEL trigger fd registrations constantly firing events and preventing the NonBlock + // event loop from ever reaching the no-fd event and no-expired timers termination condition. It + // is not possible to get consistent event loop behavior since the time system does not override + // the base scheduler's run behavior, and libevent does not provide a mode where it runs at most + // N iterations before breaking out of the loop for us to prefer over the single iteration mode + // used on Windows. + advanceMsAndLoop(0); +#endif + EXPECT_EQ("p013p4", output_); + } else { + EXPECT_EQ("p0134", output_); + } advanceMsAndLoop(100); - EXPECT_EQ("p0134p2", output_); + if (activateMode() == ActivateMode::DelayActivateTimers) { + EXPECT_EQ("p013p4p2", output_); + } else { + EXPECT_EQ("p0134p2", output_); + } } // Disable and re-enable timers that is already pending execution and verify that execution is // delayed. -TEST_F(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) { +TEST_P(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) { trackPrepareCalls(); // Disable and reschedule timers 1, 2 and 4 when timer 0 triggers. @@ -210,13 +243,26 @@ TEST_F(SimulatedTimeSystemTest, TimerOrderDisableAndRescheduleTimer) { // because it is scheduled with zero delay. Timer 2 executes in a later iteration because it is // re-enabled with a non-zero timeout. advanceMsAndLoop(5); - EXPECT_EQ("p0314", output_); + if (activateMode() == ActivateMode::DelayActivateTimers) { +#ifdef WIN32 + // The event loop runs for a single iteration in NonBlock mode on Windows. Force it to run again + // to pick up next iteration callbacks. + advanceMsAndLoop(0); +#endif + EXPECT_THAT(output_, testing::AnyOf("p03p14", "p03p41")); + } else { + EXPECT_EQ("p0314", output_); + } advanceMsAndLoop(100); - EXPECT_EQ("p0314p2", output_); + if (activateMode() == ActivateMode::DelayActivateTimers) { + EXPECT_THAT(output_, testing::AnyOf("p03p14p2", "p03p41p2")); + } else { + EXPECT_EQ("p0314p2", output_); + } } -TEST_F(SimulatedTimeSystemTest, AdvanceTimeWait) { +TEST_P(SimulatedTimeSystemTest, AdvanceTimeWait) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); @@ -238,7 +284,7 @@ TEST_F(SimulatedTimeSystemTest, AdvanceTimeWait) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, WaitFor) { +TEST_P(SimulatedTimeSystemTest, WaitFor) { EXPECT_EQ(start_monotonic_time_, time_system_.monotonicTime()); EXPECT_EQ(start_system_time_, time_system_.systemTime()); @@ -299,7 +345,7 @@ TEST_F(SimulatedTimeSystemTest, WaitFor) { thread->join(); } -TEST_F(SimulatedTimeSystemTest, Monotonic) { +TEST_P(SimulatedTimeSystemTest, Monotonic) { // Setting time forward works. time_system_.setMonotonicTime(start_monotonic_time_ + std::chrono::milliseconds(5)); EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime()); @@ -309,7 +355,7 @@ TEST_F(SimulatedTimeSystemTest, Monotonic) { EXPECT_EQ(start_monotonic_time_ + std::chrono::milliseconds(5), time_system_.monotonicTime()); } -TEST_F(SimulatedTimeSystemTest, System) { +TEST_P(SimulatedTimeSystemTest, System) { // Setting time forward works. time_system_.setSystemTime(start_system_time_ + std::chrono::milliseconds(5)); EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(5), time_system_.systemTime()); @@ -319,7 +365,7 @@ TEST_F(SimulatedTimeSystemTest, System) { EXPECT_EQ(start_system_time_ + std::chrono::milliseconds(3), time_system_.systemTime()); } -TEST_F(SimulatedTimeSystemTest, Ordering) { +TEST_P(SimulatedTimeSystemTest, Ordering) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -330,7 +376,7 @@ TEST_F(SimulatedTimeSystemTest, Ordering) { EXPECT_EQ("356", output_); } -TEST_F(SimulatedTimeSystemTest, SystemTimeOrdering) { +TEST_P(SimulatedTimeSystemTest, SystemTimeOrdering) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -344,7 +390,7 @@ TEST_F(SimulatedTimeSystemTest, SystemTimeOrdering) { EXPECT_EQ("356", output_); // callbacks don't get replayed. } -TEST_F(SimulatedTimeSystemTest, DisableTimer) { +TEST_P(SimulatedTimeSystemTest, DisableTimer) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -356,7 +402,7 @@ TEST_F(SimulatedTimeSystemTest, DisableTimer) { EXPECT_EQ("36", output_); } -TEST_F(SimulatedTimeSystemTest, IgnoreRedundantDisable) { +TEST_P(SimulatedTimeSystemTest, IgnoreRedundantDisable) { addTask(5, '5'); timers_[0]->disableTimer(); timers_[0]->disableTimer(); @@ -364,7 +410,7 @@ TEST_F(SimulatedTimeSystemTest, IgnoreRedundantDisable) { EXPECT_EQ("", output_); } -TEST_F(SimulatedTimeSystemTest, OverrideEnable) { +TEST_P(SimulatedTimeSystemTest, OverrideEnable) { addTask(5, '5'); timers_[0]->enableTimer(std::chrono::milliseconds(6)); advanceMsAndLoop(5); @@ -373,7 +419,7 @@ TEST_F(SimulatedTimeSystemTest, OverrideEnable) { EXPECT_EQ("5", output_); } -TEST_F(SimulatedTimeSystemTest, DeleteTime) { +TEST_P(SimulatedTimeSystemTest, DeleteTime) { addTask(5, '5'); addTask(3, '3'); addTask(6, '6'); @@ -386,7 +432,7 @@ TEST_F(SimulatedTimeSystemTest, DeleteTime) { } // Regression test for issues documented in https://github.com/envoyproxy/envoy/pull/6956 -TEST_F(SimulatedTimeSystemTest, DuplicateTimer) { +TEST_P(SimulatedTimeSystemTest, DuplicateTimer) { // Set one alarm two times to test that pending does not get duplicated.. std::chrono::milliseconds delay(0); TimerPtr zero_timer = scheduler_->createTimer([this]() { output_.append(1, '2'); }, dispatcher_); @@ -422,7 +468,7 @@ TEST_F(SimulatedTimeSystemTest, DuplicateTimer) { thread->join(); } -TEST_F(SimulatedTimeSystemTest, Enabled) { +TEST_P(SimulatedTimeSystemTest, Enabled) { TimerPtr timer = scheduler_->createTimer({}, dispatcher_); timer->enableTimer(std::chrono::milliseconds(0)); EXPECT_TRUE(timer->enabled()); From 4c4503456e710ddd5e58b90dec1a56b89ae6abb3 Mon Sep 17 00:00:00 2001 From: asraa Date: Thu, 6 Aug 2020 15:11:15 -0400 Subject: [PATCH 871/909] [test] Fix flag swap for integration tests and lint for exceptions in new H/2 codec (#12471) When legacy codecs were swapped to be the default, the test flag to flip wasn't swapped. This swaps so that compile time options or (--define use_new_codecs_in_integration_tests=true) will run integration tests with new codecs, default legacy. Make linter for H/2 codec so no one reintroduces a throw. Added testing Signed-off-by: Asra Ali --- bazel/BUILD | 4 ++-- bazel/envoy_build_system.bzl | 4 ++-- bazel/envoy_select.bzl | 4 ++-- ci/do_ci.sh | 2 +- test/config/utility.cc | 4 ++-- test/config/utility.h | 4 ++-- test/integration/BUILD | 6 +++--- test/integration/fake_upstream.cc | 12 ++++++------ test/integration/integration.cc | 8 ++++---- test/integration/integration.h | 2 +- tools/code_format/check_format.py | 18 ++++++++++++++++++ tools/code_format/check_format_test_helper.py | 3 +++ tools/testdata/check_format/commented_throw.cc | 7 +++++++ tools/testdata/check_format/throw.cc | 7 +++++++ 14 files changed, 60 insertions(+), 25 deletions(-) create mode 100644 tools/testdata/check_format/commented_throw.cc create mode 100644 tools/testdata/check_format/throw.cc diff --git a/bazel/BUILD b/bazel/BUILD index 717d9e4d1683..ee7ad281d809 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -195,8 +195,8 @@ config_setting( ) config_setting( - name = "enable_legacy_codecs_in_integration_tests", - values = {"define": "use_legacy_codecs_in_integration_tests=true"}, + name = "enable_new_codecs_in_integration_tests", + values = {"define": "use_new_codecs_in_integration_tests=true"}, ) cc_proto_library( diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index a01fb8f80582..548ffff4a714 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -18,7 +18,7 @@ load( _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", - _envoy_select_legacy_codecs_in_integration_tests = "envoy_select_legacy_codecs_in_integration_tests", + _envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests", ) load( ":envoy_test.bzl", @@ -174,7 +174,7 @@ def envoy_google_grpc_external_deps(): envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart -envoy_select_legacy_codecs_in_integration_tests = _envoy_select_legacy_codecs_in_integration_tests +envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary diff --git a/bazel/envoy_select.bzl b/bazel/envoy_select.bzl index ba7704ceb02f..107ad2a21bde 100644 --- a/bazel/envoy_select.bzl +++ b/bazel/envoy_select.bzl @@ -33,8 +33,8 @@ def envoy_select_hot_restart(xs, repository = ""): }) # Select the given values if use legacy codecs in test is on in the current build. -def envoy_select_legacy_codecs_in_integration_tests(xs, repository = ""): +def envoy_select_new_codecs_in_integration_tests(xs, repository = ""): return select({ - repository + "//bazel:enable_legacy_codecs_in_integration_tests": xs, + repository + "//bazel:enable_new_codecs_in_integration_tests": xs, "//conditions:default": [], }) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 381f6245e94a..895ab71747d8 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -230,7 +230,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then --define quiche=enabled \ --define path_normalization_by_default=true \ --define deprecated_features=disabled \ - --define use_legacy_codecs_in_integration_tests=true \ + --define use_new_codecs_in_integration_tests=true \ " ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain diff --git a/test/config/utility.cc b/test/config/utility.cc index 873e4ec52484..598c9ade7ba8 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -617,8 +617,8 @@ void ConfigHelper::addRuntimeOverride(const std::string& key, const std::string& (*static_layer->mutable_fields())[std::string(key)] = ValueUtil::stringValue(std::string(value)); } -void ConfigHelper::setLegacyCodecs() { - addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "false"); +void ConfigHelper::setNewCodecs() { + addRuntimeOverride("envoy.reloadable_features.new_codec_behavior", "true"); } void ConfigHelper::finalize(const std::vector& ports) { diff --git a/test/config/utility.h b/test/config/utility.h index 34aa4ba475ad..ff338722630e 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -233,8 +233,8 @@ class ConfigHelper { const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& config); - // Set legacy codecs to use for upstream and downstream codecs. - void setLegacyCodecs(); + // Set new codecs to use for upstream and downstream codecs. + void setNewCodecs(); private: static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { diff --git a/test/integration/BUILD b/test/integration/BUILD index f680952718e5..f54848a6e70c 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -8,7 +8,7 @@ load( "envoy_package", "envoy_proto_library", "envoy_select_hot_restart", - "envoy_select_legacy_codecs_in_integration_tests", + "envoy_select_new_codecs_in_integration_tests", "envoy_sh_test", ) @@ -576,8 +576,8 @@ envoy_cc_test_library( "ssl_utility.h", "utility.h", ], - copts = envoy_select_legacy_codecs_in_integration_tests( - ["-DENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS"], + copts = envoy_select_new_codecs_in_integration_tests( + ["-DENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS"], "@envoy", ), data = ["//test/common/runtime:filesystem_test_data"], diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index c2b550405b1d..476922bd8a68 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -286,12 +286,12 @@ FakeHttpConnection::FakeHttpConnection( // For the purpose of testing, we always have the upstream encode the trailers if any http1_settings.enable_trailers_ = true; Http::Http1::CodecStats& stats = fake_upstream.http1CodecStats(); -#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS - codec_ = std::make_unique( +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS + codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #else - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), stats, *this, http1_settings, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #endif @@ -302,12 +302,12 @@ FakeHttpConnection::FakeHttpConnection( http2_options.set_allow_connect(true); http2_options.set_allow_metadata(true); Http::Http2::CodecStats& stats = fake_upstream.http2CodecStats(); -#ifdef ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS - codec_ = std::make_unique( +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS + codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #else - codec_ = std::make_unique( + codec_ = std::make_unique( shared_connection_.connection(), *this, stats, http2_options, max_request_headers_kb, max_request_headers_count, headers_with_underscores_action); #endif diff --git a/test/integration/integration.cc b/test/integration/integration.cc index f71a577dc6e3..979e3fd8c47b 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -286,10 +286,10 @@ BaseIntegrationTest::BaseIntegrationTest(const InstanceConstSharedPtrFn& upstrea return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); ON_CALL(factory_context_, api()).WillByDefault(ReturnRef(*api_)); - // In ENVOY_USE_LEGACY_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. -#ifdef ENVOY_USE_LEGACY_CODECS_IN__INTEGRATION_TESTS - ENVOY_LOG_MISC(debug, "Using legacy codecs"); - setLegacyCodecs(); + // In ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS mode, set runtime config to use legacy codecs. +#ifdef ENVOY_USE_NEW_CODECS_IN_INTEGRATION_TESTS + ENVOY_LOG_MISC(debug, "Using new codecs"); + setNewCodecs(); #endif } diff --git a/test/integration/integration.h b/test/integration/integration.h index 2a081d82cf8e..e4f8a9cba86e 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -191,7 +191,7 @@ class BaseIntegrationTest : protected Logger::Loggable { void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. void setDeterministic() { deterministic_ = true; } - void setLegacyCodecs() { config_helper_.setLegacyCodecs(); } + void setNewCodecs() { config_helper_.setNewCodecs(); } FakeHttpConnection::Type upstreamProtocol() const { return upstream_protocol_; } diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 12af83fbf002..0f1ebfc5b955 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -87,6 +87,10 @@ # Only one C++ file should instantiate grpc_init GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc") +# These files should not throw exceptions. Add HTTP/1 when exceptions removed. +EXCEPTION_DENYLIST = ("./source/common/http/http2/codec_impl.h", + "./source/common/http/http2/codec_impl.cc") + CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() @@ -103,6 +107,7 @@ HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)") TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{") EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)') +COMMENT_REGEX = re.compile(r"//|\*") DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)') # yapf: disable @@ -355,6 +360,11 @@ def allowlistedForUnpackTo(file_path): ] +def denylistedForExceptions(file_path): + return (file_path in EXCEPTION_DENYLIST or isInSubdir(file_path, 'tools/testdata')) and \ + not file_path.endswith(DOCS_SUFFIX) + + def findSubstringAndReturnError(pattern, file_path, error_message): text = readFile(file_path) if pattern in text: @@ -744,6 +754,14 @@ def checkSourceLine(line, file_path, reportError): reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + "Grpc::GoogleGrpcContext. See #8282") + if denylistedForExceptions(file_path): + throw = line.find("throw") + if throw != -1: + comment_match = COMMENT_REGEX.search(line) + if comment_match is None or comment_match.start(0) > throw: + reportError("Don't introduce throws into exception-free files, use error " + + "statuses instead.") + def checkBuildLine(line, file_path, reportError): if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/") or diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 8fbc058ff8c3..da3f576605aa 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -247,6 +247,9 @@ def runChecks(): "Don't use std::optional; use absl::optional instead") errors += checkUnfixableError("std_variant.cc", "Don't use std::variant; use absl::variant instead") + errors += checkUnfixableError( + "throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.") + errors += checkFileExpectingOK("commented_throw.cc") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/testdata/check_format/commented_throw.cc b/tools/testdata/check_format/commented_throw.cc new file mode 100644 index 000000000000..7f209bb1836d --- /dev/null +++ b/tools/testdata/check_format/commented_throw.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + // throw std::runtime_error("error"); +} + +} // namespace Envoy diff --git a/tools/testdata/check_format/throw.cc b/tools/testdata/check_format/throw.cc new file mode 100644 index 000000000000..3c67c7208b7f --- /dev/null +++ b/tools/testdata/check_format/throw.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +void foo() { + throw std::runtime_error("error"); +} + +} // namespace Envoy From c90d48520dca6ecfc772e02bdc5312bafc4ec71c Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Thu, 6 Aug 2020 14:23:49 -0500 Subject: [PATCH 872/909] [fuzz] Covered another 8 filters in network-level ReadFilter generic fuzzer (#12464) Added coverage for 8 additional filters in WriteFilter generic fuzzer: Added thrift filters into envoy_all_network_filters so that thrift filters can be loaded when we load thrift_proxy filter. Or otherwise thrift filters cannot be found in the thrift filter factory. Signed-off-by: jianwen --- source/extensions/all_extensions.bzl | 5 +- .../filters/network/common/fuzz/BUILD | 3 + .../http_connection_manager_1 | 21 ++++++ .../http_connection_manager_2 | 21 ++++++ .../fuzz/network_readfilter_corpus/kafka_1 | 20 ++++++ .../network_readfilter_corpus/ratelimit_1 | 26 +++++++ .../fuzz/network_readfilter_corpus/rbac_1 | 20 ++++++ .../rocketmq_proxy_crash | 15 ++++ .../sni_dynamic_forward_proxy_1 | 36 ++++++++++ .../network_readfilter_corpus/thrift_proxy_1 | 7 ++ .../network_readfilter_corpus/thrift_proxy_3 | 34 +++++++++ .../thrift_proxy_assert_failure | 7 ++ .../zookeeper_proxy_1 | 34 +++++++++ .../fuzz/network_readfilter_fuzz_test.dict | 3 + .../common/fuzz/uber_per_readfilter.cc | 70 ++++++++++++++++--- .../network/common/fuzz/uber_readfilter.cc | 14 ++-- .../network/common/fuzz/uber_readfilter.h | 2 +- 17 files changed, 322 insertions(+), 16 deletions(-) create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict diff --git a/source/extensions/all_extensions.bzl b/source/extensions/all_extensions.bzl index ace7333688bc..5fde35e3c92b 100644 --- a/source/extensions/all_extensions.bzl +++ b/source/extensions/all_extensions.bzl @@ -42,8 +42,11 @@ def envoy_all_http_filters(): # All network-layer filters are extensions with names that have the following prefix. _network_filter_prefix = "envoy.filters.network" +# All thrift filters are extensions with names that have the following prefix. +_thrift_filter_prefix = "envoy.filters.thrift" + # Return all network-layer filter extensions to be compiled into network-layer filter generic fuzzer. def envoy_all_network_filters(): all_extensions = dicts.add(_required_extensions, EXTENSIONS) - return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix)] + return [v for k, v in all_extensions.items() if k.startswith(_network_filter_prefix) or k.startswith(_thrift_filter_prefix)] diff --git a/test/extensions/filters/network/common/fuzz/BUILD b/test/extensions/filters/network/common/fuzz/BUILD index a97370781cbc..f8d38307d569 100644 --- a/test/extensions/filters/network/common/fuzz/BUILD +++ b/test/extensions/filters/network/common/fuzz/BUILD @@ -33,6 +33,7 @@ envoy_cc_test_library( deps = [ ":network_readfilter_fuzz_proto_cc_proto", "//source/common/config:utility_lib", + "//source/extensions/filters/common/ratelimit:ratelimit_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:utility_lib", "//test/extensions/filters/common/ext_authz:ext_authz_test_common", @@ -41,6 +42,7 @@ envoy_cc_test_library( "//test/mocks/network:network_mocks", "@envoy_api//envoy/extensions/filters/network/direct_response/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/local_ratelimit/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", ], ) @@ -48,6 +50,7 @@ envoy_cc_fuzz_test( name = "network_readfilter_fuzz_test", srcs = ["network_readfilter_fuzz_test.cc"], corpus = "network_readfilter_corpus", + dictionaries = ["network_readfilter_fuzz_test.dict"], # All Envoy network filters must be linked to the test in order for the fuzzer to pick # these up via the NamedNetworkFilterConfigFactory. deps = [ diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 new file mode 100644 index 000000000000..cae9fbab6700 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_1 @@ -0,0 +1,21 @@ +config { + name: "envoy.filters.network.http_connection_manager" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + value: "\022\002B\001\"\000J\004(\001J\000z\002\010\001\220\001\001" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 new file mode 100644 index 000000000000..d4012d30d384 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/http_connection_manager_2 @@ -0,0 +1,21 @@ +config { + name: "envoy.filters.network.http_connection_manager" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + value: "\010\002\022\001-\"5\n\001\000\032\001~\032\'envoy.type.matcher.v3.ListStringMatcherB\001-B\001~:\013\"\t\t\000\000\000\004\000\000\000\000B\002(\001\312\001\000\362\001\002\010\001" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 new file mode 100644 index 000000000000..dd8c619f9d2f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/kafka_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.kafka_broker" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker" + value: "\n\"envoy.filters.network.kafka_broker" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + advance_time { + milliseconds: 10000 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 new file mode 100644 index 000000000000..967d64df713d --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/ratelimit_1 @@ -0,0 +1,26 @@ +config { + name: "envoy.filters.network.ratelimit" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.ratelimit.v3.RateLimit" + value: "\nP\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\022Y\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\032W\nU\n\001[\022P\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\"\005\020\200\200\214\001(\0012e\022c\022Y\n\010\001\000\000\000\000\000\000\002\"M\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\032\006\010\200\200\204\360\002" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + data: "\000" + } +} +actions { + on_data { + data: "\000\000" + } +} +actions { + advance_time { + milliseconds: 7299840 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 new file mode 100644 index 000000000000..61f1adaedc4d --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rbac_1 @@ -0,0 +1,20 @@ +config { + name: "envoy.filters.network.rbac" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC" + value: "\032\010\177\177\177\177\177\177\177\177" + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash new file mode 100644 index 000000000000..b1a71216b2fc --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/rocketmq_proxy_crash @@ -0,0 +1,15 @@ +config { + name: "envoy.filters.network.rocketmq_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy" + value: "\n \022\034\n\032__________________________ \001 \001" + } +} + +actions { + on_data { + data: "\000\000\000\000\000\000\000\000\000" + end_stream: false + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 new file mode 100644 index 000000000000..21ad6d880835 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/sni_dynamic_forward_proxy_1 @@ -0,0 +1,36 @@ +config { + name: "envoy.filters.network.sni_dynamic_forward_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig" + value: "\nP\nFenvoy.network.sni_dynamic_fo.filters.network.sni_dynamic_forward_proxy*\006\010\200\200\200\260\002" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 30976 + } +} +actions { + advance_time { + milliseconds: 262144 + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_data { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 new file mode 100644 index 000000000000..a194b7f99031 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_1 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\020\003\030\003\"\231\002\022\226\002\n\003\n\001A\022\216\002\032\201\002\n\361\001\n\010@\000\000\000\000\000\000\000\022\344\001\nc\n_*]\032[\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\022\007\020\002\"\003\n\001A\022\000\n}\nyenvoy.filters.network.thrift_prox\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177y\022\000\n\013\n\000\022\007\n\005\n\001#\022\0002\010A\177\177\177\177\177\177\177" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 new file mode 100644 index 000000000000..78a87924ae34 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_3 @@ -0,0 +1,34 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nz\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 10 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} +actions { + on_data { + } +} +actions { + on_data { + data: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.Thrif~tProxy" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure new file mode 100644 index 000000000000..ca2772ee0e71 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/thrift_proxy_assert_failure @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.network.thrift_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy" + value: "\nYtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.vLLLLLLLLL3.ThriftProxy\020\003\030\003\"\231\002\022\226\002\n\003\n\001A\022\216\002\032\201\002\n\361\001\n\010@\000\000\000\000\000\000\000\022\344\001\nc\n_*]\032[\nPtype.googleapis.com/envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy\022\007\020\002\"\003\n\001A\022\000\n}\nyenvoy.filters.network.thrift_prox\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177y\022\000\n\013\n\000\022\007\n\005\n\001#\022\0002\010A\000\000\000\000\000\000\000" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 new file mode 100644 index 000000000000..fb16dbd750df --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/zookeeper_proxy_1 @@ -0,0 +1,34 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\032\000" + } +} +actions { + advance_time { + milliseconds: 8257536 + } +} +actions { + on_new_connection { + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 8257536 + } +} +actions { + on_data { + } +} +actions { + advance_time { + milliseconds: 83886080 + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict new file mode 100644 index 000000000000..41d6703efd99 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_fuzz_test.dict @@ -0,0 +1,3 @@ +# The names of supported thrift_filters in ThriftProxy +"envoy.filters.thrift.router" +"envoy.filters.thrift.rate_limit" diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc index d39dc65e7485..ce8d04e51fc4 100644 --- a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -1,6 +1,8 @@ #include "envoy/extensions/filters/network/direct_response/v3/config.pb.h" #include "envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.pb.h" +#include "envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h" +#include "extensions/filters/common/ratelimit/ratelimit_impl.h" #include "extensions/filters/network/common/utility.h" #include "extensions/filters/network/well_known_names.h" @@ -23,14 +25,18 @@ std::vector UberFilterFuzzer::filterNames() { const auto factories = Registry::FactoryRegistry< Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); const std::vector supported_filter_names = { - NetworkFilterNames::get().ExtAuthorization, - NetworkFilterNames::get().LocalRateLimit, - NetworkFilterNames::get().RedisProxy, - NetworkFilterNames::get().ClientSslAuth, - NetworkFilterNames::get().Echo, - NetworkFilterNames::get().DirectResponse, - NetworkFilterNames::get().DubboProxy, - NetworkFilterNames::get().SniCluster}; + NetworkFilterNames::get().ExtAuthorization, NetworkFilterNames::get().LocalRateLimit, + NetworkFilterNames::get().RedisProxy, NetworkFilterNames::get().ClientSslAuth, + NetworkFilterNames::get().Echo, NetworkFilterNames::get().DirectResponse, + NetworkFilterNames::get().DubboProxy, NetworkFilterNames::get().SniCluster, + // A dedicated http_connection_manager fuzzer can be found in + // test/common/http/conn_manager_impl_fuzz_test.cc + NetworkFilterNames::get().HttpConnectionManager, NetworkFilterNames::get().ThriftProxy, + NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().SniDynamicForwardProxy, + NetworkFilterNames::get().KafkaBroker, NetworkFilterNames::get().RocketmqProxy, + NetworkFilterNames::get().RateLimit, NetworkFilterNames::get().Rbac + // TODO(jianwendong): cover mongo_proxy, mysql_proxy, postgres_proxy, tcp_proxy. + }; // Check whether each filter is loaded into Envoy. // Some customers build Envoy without some filters. When they run fuzzing, the use of a filter // that does not exist will cause fatal errors. @@ -76,14 +82,46 @@ void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { return std::move(async_client_factory_); })); + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; + } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; + } else if (filter_name == NetworkFilterNames::get().RateLimit) { + async_client_factory_ = std::make_unique(); + async_client_ = std::make_unique(); + // TODO(jianwendong): consider testing on different kinds of responses. + ON_CALL(*async_client_, sendRaw(_, _, _, _, _, _)) + .WillByDefault(testing::WithArgs<3>(Invoke([&](Grpc::RawAsyncRequestCallbacks& callbacks) { + Filters::Common::RateLimit::GrpcClientImpl* grpc_client_impl = + dynamic_cast(&callbacks); + // Response OK + auto response = std::make_unique(); + // Give response to the grpc_client by calling onSuccess(). + grpc_client_impl->onSuccess(std::move(response), span_); + return async_request_.get(); + }))); + + EXPECT_CALL(*async_client_factory_, create()).WillOnce(Invoke([&] { + return std::move(async_client_); + })); + + EXPECT_CALL(factory_context_.cluster_manager_.async_client_manager_, + factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::move(async_client_factory_); + })); + read_filter_callbacks_->connection_.local_address_ = pipe_addr_; + read_filter_callbacks_->connection_.remote_address_ = pipe_addr_; } } void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name, Protobuf::Message* config_message) { // System calls such as reading files are prohibited in this fuzzer. Some input that crashes the - // mock/fake objects are also prohibited. For now there are only two filters {DirectResponse, - // LocalRateLimit} on which we have constraints. + // mock/fake objects are also prohibited. We could also avoid fuzzing some unfinished features by + // checking them here. For now there are only three filters {DirectResponse, LocalRateLimit, + // HttpConnectionManager} on which we have constraints. const std::string name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName( std::string(filter_name)); if (filter_name == NetworkFilterNames::get().DirectResponse) { @@ -107,6 +145,18 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name absl::StrCat("local_ratelimit trying to set a large fill_interval. Config:\n{}", config.DebugString())); } + } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + config = dynamic_cast(*config_message); + if (config.codec_type() == envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager::HTTP3) { + // Quiche is still in progress and http_conn_manager has a dedicated fuzzer. + // So we won't fuzz it here with complex mocks. + throw EnvoyException(absl::StrCat( + "http_conn_manager trying to use Quiche which we won't fuzz here. Config:\n{}", + config.DebugString())); + } } } diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc index cd984f47351b..a5b2faa1ab26 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -6,7 +6,6 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { - void UberFilterFuzzer::reset() { // Reset some changes made by current filter on some mock objects. @@ -16,6 +15,10 @@ void UberFilterFuzzer::reset() { read_filter_callbacks_->connection_.callbacks_.clear(); read_filter_callbacks_->connection_.bytes_sent_callbacks_.clear(); read_filter_callbacks_->connection_.state_ = Network::Connection::State::Open; + // Clear the pointers inside the mock_dispatcher + Event::MockDispatcher& mock_dispatcher = + dynamic_cast(read_filter_callbacks_->connection_.dispatcher_); + mock_dispatcher.clearDeferredDeleteList(); read_filter_.reset(); } @@ -30,15 +33,18 @@ void UberFilterFuzzer::fuzzerSetup() { read_filter_ = read_filter; read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_); })); + ON_CALL(read_filter_callbacks_->connection_, addFilter(_)) + .WillByDefault(Invoke([&](Network::FilterSharedPtr read_filter) -> void { + read_filter_ = read_filter; + read_filter_->initializeReadFilterCallbacks(*read_filter_callbacks_); + })); // Prepare sni for sni_cluster filter and sni_dynamic_forward_proxy filter. ON_CALL(read_filter_callbacks_->connection_, requestedServerName()) .WillByDefault(testing::Return("fake_cluster")); // Prepare time source for filters such as local_ratelimit filter. factory_context_.prepareSimulatedSystemTime(); // Prepare address for filters such as ext_authz filter. - addr_ = std::make_shared("/test/test.sock"); - read_filter_callbacks_->connection_.remote_address_ = addr_; - read_filter_callbacks_->connection_.local_address_ = addr_; + pipe_addr_ = std::make_shared("/test/test.sock"); async_request_ = std::make_unique(); } diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.h b/test/extensions/filters/network/common/fuzz/uber_readfilter.h index 31a5bbc1d91e..d055c5e4451a 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.h +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.h @@ -35,7 +35,7 @@ class UberFilterFuzzer { Server::Configuration::FakeFactoryContext factory_context_; Network::ReadFilterSharedPtr read_filter_; Network::FilterFactoryCb cb_; - Network::Address::InstanceConstSharedPtr addr_; + Network::Address::InstanceConstSharedPtr pipe_addr_; Event::SimulatedTimeSystem& time_source_; std::shared_ptr> read_filter_callbacks_; std::unique_ptr async_request_; From b7e47bd2bdc30e1d6fdd2c4263c53b2a9439ad52 Mon Sep 17 00:00:00 2001 From: ASOP Date: Thu, 6 Aug 2020 14:25:34 -0700 Subject: [PATCH 873/909] Code Optimization (#12512) 1. Reduce code repetition: * In 'admin.cc': There are many times of ```get()``` from ```absl::optional``` for the same value. Similar for ```host->address()->asString()```. So I merged them together to avoid multiple calls for the same value. * Similar problem inside "connection_handler_impl.cc" 2. Adopt pair binding feature in C++ 17. Risk Level: Low Signed-off-by: pingsun --- source/server/admin/admin.cc | 109 ++++++++++----------- source/server/connection_handler_impl.cc | 24 ++--- source/server/filter_chain_manager_impl.cc | 26 +++-- source/server/listener_manager_impl.cc | 6 +- 4 files changed, 80 insertions(+), 85 deletions(-) diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 7745860a1c9d..7eb12d3180d8 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -299,8 +299,8 @@ void AdminImpl::addCircuitSettings(const std::string& cluster_name, const std::s // TODO(efimki): Add support of text readouts stats. void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { envoy::admin::v3::Clusters clusters; - for (auto& cluster_pair : server_.clusterManager().clusters()) { - const Upstream::Cluster& cluster = cluster_pair.second.get(); + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); envoy::admin::v3::ClusterStatus& cluster_status = *clusters.add_cluster_statuses(); @@ -332,17 +332,17 @@ void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { host_status.set_hostname(host->hostname()); host_status.mutable_locality()->MergeFrom(host->locality()); - for (const auto& named_counter : host->counters()) { + for (const auto& [counter_name, counter] : host->counters()) { auto& metric = *host_status.add_stats(); - metric.set_name(std::string(named_counter.first)); - metric.set_value(named_counter.second.get().value()); + metric.set_name(std::string(counter_name)); + metric.set_value(counter.get().value()); metric.set_type(envoy::admin::v3::SimpleMetric::COUNTER); } - for (const auto& named_gauge : host->gauges()) { + for (const auto& [gauge_name, gauge] : host->gauges()) { auto& metric = *host_status.add_stats(); - metric.set_name(std::string(named_gauge.first)); - metric.set_value(named_gauge.second.get().value()); + metric.set_name(std::string(gauge_name)); + metric.set_value(gauge.get().value()); metric.set_type(envoy::admin::v3::SimpleMetric::GAUGE); } @@ -376,61 +376,58 @@ void AdminImpl::writeClustersAsJson(Buffer::Instance& response) { // TODO(efimki): Add support of text readouts stats. void AdminImpl::writeClustersAsText(Buffer::Instance& response) { - for (auto& cluster : server_.clusterManager().clusters()) { - addOutlierInfo(cluster.second.get().info()->name(), cluster.second.get().outlierDetector(), - response); - - addCircuitSettings( - cluster.second.get().info()->name(), "default", - cluster.second.get().info()->resourceManager(Upstream::ResourcePriority::Default), - response); - addCircuitSettings( - cluster.second.get().info()->name(), "high", - cluster.second.get().info()->resourceManager(Upstream::ResourcePriority::High), response); - - response.add(fmt::format("{}::added_via_api::{}\n", cluster.second.get().info()->name(), - cluster.second.get().info()->addedViaApi())); - for (auto& host_set : cluster.second.get().prioritySet().hostSetsPerPriority()) { + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); + const std::string& cluster_name = cluster.info()->name(); + addOutlierInfo(cluster_name, cluster.outlierDetector(), response); + + addCircuitSettings(cluster_name, "default", + cluster.info()->resourceManager(Upstream::ResourcePriority::Default), + response); + addCircuitSettings(cluster_name, "high", + cluster.info()->resourceManager(Upstream::ResourcePriority::High), response); + + response.add( + fmt::format("{}::added_via_api::{}\n", cluster_name, cluster.info()->addedViaApi())); + for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { for (auto& host : host_set->hosts()) { + const std::string& host_address = host->address()->asString(); std::map all_stats; - for (const auto& counter : host->counters()) { - all_stats[counter.first] = counter.second.get().value(); + for (const auto& [counter_name, counter] : host->counters()) { + all_stats[counter_name] = counter.get().value(); } - for (const auto& gauge : host->gauges()) { - all_stats[gauge.first] = gauge.second.get().value(); + for (const auto& [gauge_name, gauge] : host->gauges()) { + all_stats[gauge_name] = gauge.get().value(); } - for (const auto& stat : all_stats) { - response.add(fmt::format("{}::{}::{}::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), stat.first, stat.second)); + for (const auto& [stat_name, stat] : all_stats) { + response.add( + fmt::format("{}::{}::{}::{}\n", cluster_name, host_address, stat_name, stat)); } - response.add(fmt::format("{}::{}::hostname::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->hostname())); - response.add(fmt::format("{}::{}::health_flags::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + response.add( + fmt::format("{}::{}::hostname::{}\n", cluster_name, host_address, host->hostname())); + response.add(fmt::format("{}::{}::health_flags::{}\n", cluster_name, host_address, Upstream::HostUtility::healthFlagsToString(*host))); - response.add(fmt::format("{}::{}::weight::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->weight())); - response.add(fmt::format("{}::{}::region::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().region())); - response.add(fmt::format("{}::{}::zone::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().zone())); - response.add(fmt::format("{}::{}::sub_zone::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->locality().sub_zone())); - response.add(fmt::format("{}::{}::canary::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->canary())); - response.add(fmt::format("{}::{}::priority::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), host->priority())); + response.add( + fmt::format("{}::{}::weight::{}\n", cluster_name, host_address, host->weight())); + response.add(fmt::format("{}::{}::region::{}\n", cluster_name, host_address, + host->locality().region())); + response.add( + fmt::format("{}::{}::zone::{}\n", cluster_name, host_address, host->locality().zone())); + response.add(fmt::format("{}::{}::sub_zone::{}\n", cluster_name, host_address, + host->locality().sub_zone())); + response.add( + fmt::format("{}::{}::canary::{}\n", cluster_name, host_address, host->canary())); + response.add( + fmt::format("{}::{}::priority::{}\n", cluster_name, host_address, host->priority())); response.add(fmt::format( - "{}::{}::success_rate::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + "{}::{}::success_rate::{}\n", cluster_name, host_address, host->outlierDetector().successRate( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::ExternalOrigin))); response.add(fmt::format( - "{}::{}::local_origin_success_rate::{}\n", cluster.second.get().info()->name(), - host->address()->asString(), + "{}::{}::local_origin_success_rate::{}\n", cluster_name, host_address, host->outlierDetector().successRate( Upstream::Outlier::DetectorHostMonitor::SuccessRateMonitorType::LocalOrigin))); } @@ -464,8 +461,8 @@ void AdminImpl::addAllConfigToDump(envoy::admin::v3::ConfigDump& dump, } } - for (const auto& key_callback_pair : callbacks_map) { - ProtobufTypes::MessagePtr message = key_callback_pair.second(); + for (const auto& [name, callback] : callbacks_map) { + ProtobufTypes::MessagePtr message = callback(); ASSERT(message); if (mask.has_value()) { @@ -492,8 +489,8 @@ AdminImpl::addResourceToDump(envoy::admin::v3::ConfigDump& dump, } } - for (const auto& key_callback_pair : callbacks_map) { - ProtobufTypes::MessagePtr message = key_callback_pair.second(); + for (const auto& [name, callback] : callbacks_map) { + ProtobufTypes::MessagePtr message = callback(); ASSERT(message); auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource); @@ -563,8 +560,8 @@ void AdminImpl::addLbEndpoint( ProtobufTypes::MessagePtr AdminImpl::dumpEndpointConfigs() const { auto endpoint_config_dump = std::make_unique(); - for (auto& cluster_pair : server_.clusterManager().clusters()) { - const Upstream::Cluster& cluster = cluster_pair.second.get(); + for (const auto& [name, cluster_ref] : server_.clusterManager().clusters()) { + const Upstream::Cluster& cluster = cluster_ref.get(); Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index b4ce4de04bd0..c0becac81ad5 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -515,31 +515,31 @@ ConnectionHandlerImpl::ActiveTcpConnection::ActiveTcpConnection( // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); - - active_connections_.listener_.stats_.downstream_cx_total_.inc(); - active_connections_.listener_.stats_.downstream_cx_active_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_total_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_active_.inc(); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_total_.inc(); + listener.stats_.downstream_cx_active_.inc(); + listener.per_worker_stats_.downstream_cx_total_.inc(); + listener.per_worker_stats_.downstream_cx_active_.inc(); // Active connections on the handler (not listener). The per listener connections have already // been incremented at this point either via the connection balancer or in the socket accept // path if there is no configured balancer. - ++active_connections_.listener_.parent_.num_handler_connections_; + ++listener.parent_.num_handler_connections_; } ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { emitLogs(*active_connections_.listener_.config_, *stream_info_); - - active_connections_.listener_.stats_.downstream_cx_active_.dec(); - active_connections_.listener_.stats_.downstream_cx_destroy_.inc(); - active_connections_.listener_.per_worker_stats_.downstream_cx_active_.dec(); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_active_.dec(); + listener.stats_.downstream_cx_destroy_.inc(); + listener.per_worker_stats_.downstream_cx_active_.dec(); conn_length_->complete(); // Active listener connections (not handler). - active_connections_.listener_.decNumConnections(); + listener.decNumConnections(); // Active handler connections (not listener). - active_connections_.listener_.parent_.decNumConnections(); + listener.parent_.decNumConnections(); } ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index fbc519816c09..4ab8fa9a6867 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -558,42 +558,40 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceIpAn } void FilterChainManagerImpl::convertIPsToTries() { - for (auto& port : destination_ports_map_) { + for (auto& [destination_port, destination_ips_pair] : destination_ports_map_) { // These variables are used as we build up the destination CIDRs used for the trie. - auto& destination_ips_pair = port.second; - auto& destination_ips_map = destination_ips_pair.first; + auto& [destination_ips_map, destination_ips_trie] = destination_ips_pair; std::vector>> destination_ips_list; destination_ips_list.reserve(destination_ips_map.size()); - for (const auto& entry : destination_ips_map) { - destination_ips_list.push_back(makeCidrListEntry(entry.first, entry.second)); + for (const auto& [destination_ip, server_names_map_ptr] : destination_ips_map) { + destination_ips_list.push_back(makeCidrListEntry(destination_ip, server_names_map_ptr)); // This hugely nested for loop greatly pains me, but I'm not sure how to make it better. // We need to get access to all of the source IP strings so that we can convert them into // a trie like we did for the destination IPs above. - for (auto& server_names_entry : *entry.second) { - for (auto& transport_protocols_entry : server_names_entry.second) { - for (auto& application_protocols_entry : transport_protocols_entry.second) { - for (auto& source_array_entry : application_protocols_entry.second) { - auto& source_ips_map = source_array_entry.first; + for (auto& [server_name, transport_protocols_map] : *server_names_map_ptr) { + for (auto& [transport_protocol, application_protocols_map] : transport_protocols_map) { + for (auto& [application_protocol, source_arrays] : application_protocols_map) { + for (auto& [source_ips_map, source_ips_trie] : source_arrays) { std::vector< std::pair>> source_ips_list; source_ips_list.reserve(source_ips_map.size()); - for (auto& source_ip : source_ips_map) { - source_ips_list.push_back(makeCidrListEntry(source_ip.first, source_ip.second)); + for (auto& [source_ip, source_port_map_ptr] : source_ips_map) { + source_ips_list.push_back(makeCidrListEntry(source_ip, source_port_map_ptr)); } - source_array_entry.second = std::make_unique(source_ips_list, true); + source_ips_trie = std::make_unique(source_ips_list, true); } } } } } - destination_ips_pair.second = std::make_unique(destination_ips_list, true); + destination_ips_trie = std::make_unique(destination_ips_list, true); } } diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index ed501d8b4a9f..fa384c274fa2 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -312,11 +312,11 @@ ProtobufTypes::MessagePtr ListenerManagerImpl::dumpListenerConfigs() { fillState(*dump_listener, *listener); } - for (const auto& state_and_name : error_state_tracker_) { + for (const auto& [error_name, error_state] : error_state_tracker_) { DynamicListener* dynamic_listener = - getOrCreateDynamicListener(state_and_name.first, *config_dump, listener_map); + getOrCreateDynamicListener(error_name, *config_dump, listener_map); - const envoy::admin::v3::UpdateFailureState& state = *state_and_name.second; + const envoy::admin::v3::UpdateFailureState& state = *error_state; dynamic_listener->mutable_error_state()->CopyFrom(state); } From 0422f644228968a2af2383fd0b917b146976759c Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Thu, 6 Aug 2020 18:08:14 -0400 Subject: [PATCH 874/909] hcm: split ActiveStream::State into AS and FM state (#12518) Splits the State object into two, one for the ActiveStream and one for the FilterManager. To better split the responsibility, introduce new functions on the FM and on the FM callbacks, cleaning up a few TODOs on the way. Signed-off-by: Snow Pettersen --- source/common/http/conn_manager_impl.cc | 261 ++++++++++----------- source/common/http/conn_manager_impl.h | 195 ++++++++++----- test/common/http/conn_manager_impl_test.cc | 4 +- 3 files changed, 255 insertions(+), 205 deletions(-) diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index a00ba998d641..f00afb45cf7b 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -199,11 +199,12 @@ void ConnectionManagerImpl::doEndStream(ActiveStream& stream) { // explicitly nulls out response_encoder to avoid the downstream being notified of the // Envoy-internal stream instance being ended. if (stream.response_encoder_ != nullptr && - (!stream.state_.remote_complete_ || !stream.state_.codec_saw_local_complete_)) { + (!stream.filter_manager_.remoteComplete() || !stream.state_.codec_saw_local_complete_)) { // Indicate local is complete at this point so that if we reset during a continuation, we don't // raise further data or trailers. ENVOY_STREAM_LOG(debug, "doEndStream() resetting stream", stream); - stream.state_.local_complete_ = true; + // TODO(snowp): This call might not be necessary, try to clean up + remove setter function. + stream.filter_manager_.setLocalComplete(); stream.state_.codec_saw_local_complete_ = true; stream.response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); reset_stream = true; @@ -231,7 +232,6 @@ void ConnectionManagerImpl::doDeferredStreamDestroy(ActiveStream& stream) { } stream.filter_manager_.disarmRequestTimeout(); - stream.state_.destroyed_ = true; stream.filter_manager_.destroyFilters(); read_callbacks_->connection().dispatcher().deferredDelete(stream.removeFromList(streams_)); @@ -520,6 +520,7 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect uint32_t buffer_limit) : connection_manager_(connection_manager), filter_manager_(*this, *this, buffer_limit, connection_manager_.config_.filterFactory(), + connection_manager_.config_.localReply(), connection_manager_.codec_->protocol(), connection_manager_.timeSource(), connection_manager_.read_callbacks_->connection().streamInfo().filterState(), StreamInfo::FilterState::LifeSpan::Connection), @@ -652,8 +653,6 @@ ConnectionManagerImpl::ActiveStream::~ActiveStream() { if (state_.successful_upgrade_) { connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec(); } - - ASSERT(state_.filter_call_state_ == 0); } void ConnectionManagerImpl::ActiveStream::resetIdleTimer() { @@ -867,7 +866,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(RequestHeaderMapPtr&& he Server::OverloadActionState::Active) { // In this one special case, do not create the filter chain. If there is a risk of memory // overload it is more important to avoid unnecessary allocation than to create the filters. - state_.created_filter_chain_ = true; + filter_manager_.skipFilterChainCreation(); connection_manager_.stats_.named_.downstream_rq_overload_close_.inc(); sendLocalReply(Grpc::Common::isGrpcRequestHeaders(*filter_manager_.requestHeaders()), Http::Code::ServiceUnavailable, "envoy overloaded", nullptr, absl::nullopt, @@ -1137,15 +1136,14 @@ void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilt std::list::iterator continue_data_entry = decoder_filters_.end(); for (; entry != decoder_filters_.end(); entry++) { - ASSERT( - !(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeHeaders)); - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeHeaders; - (*entry)->end_stream_ = active_stream_.state_.decoding_headers_only_ || + ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); + state_.filter_call_state_ |= FilterCallState::DecodeHeaders; + (*entry)->end_stream_ = state_.decoding_headers_only_ || (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); ASSERT(!(status == FilterHeadersStatus::ContinueAndEndStream && (*entry)->end_stream_)); - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeHeaders; + state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); @@ -1164,8 +1162,7 @@ void ConnectionManagerImpl::FilterManager::decodeHeaders(ActiveStreamDecoderFilt } (*entry)->decode_headers_called_ = true; - if (!(*entry)->commonHandleAfterHeadersCallback(status, - active_stream_.state_.decoding_headers_only_) && + if (!(*entry)->commonHandleAfterHeadersCallback(status, state_.decoding_headers_only_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but @@ -1206,13 +1203,13 @@ void ConnectionManagerImpl::FilterManager::decodeData( active_stream_.resetIdleTimer(); // If we previously decided to decode only the headers, do nothing here. - if (active_stream_.state_.decoding_headers_only_) { + if (state_.decoding_headers_only_) { return; } // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. - if (active_stream_.state_.local_complete_) { + if (state_.local_complete_) { return; } @@ -1224,7 +1221,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( for (; entry != decoder_filters_.end(); entry++) { // If the filter pointed by entry has stopped for all frame types, return now. - if (handleDataIfStopAll(**entry, data, active_stream_.state_.decoder_filters_streaming_)) { + if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. @@ -1260,27 +1257,26 @@ void ConnectionManagerImpl::FilterManager::decodeData( if ((*entry)->end_stream_) { return; } - ASSERT(!(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeData)); + ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData)); // We check the request_trailers_ pointer here in case addDecodedTrailers // is called in decodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. if (end_stream) { - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::LastDataFrame; + state_.filter_call_state_ |= FilterCallState::LastDataFrame; } - recordLatestDataFilter(entry, active_stream_.state_.latest_data_decoding_filter_, - decoder_filters_); + recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_); - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeData; + state_.filter_call_state_ |= FilterCallState::DecodeData; (*entry)->end_stream_ = end_stream && !request_trailers_; FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeData; + state_.filter_call_state_ &= ~FilterCallState::DecodeData; if (end_stream) { - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::LastDataFrame; + state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); @@ -1292,8 +1288,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( trailers_added_entry = entry; } - if (!(*entry)->commonHandleAfterDataCallback( - status, data, active_stream_.state_.decoder_filters_streaming_) && + if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but @@ -1315,7 +1310,7 @@ void ConnectionManagerImpl::FilterManager::decodeData( RequestTrailerMap& ConnectionManagerImpl::FilterManager::addDecodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). - ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); + ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!request_trailers_); @@ -1326,18 +1321,16 @@ RequestTrailerMap& ConnectionManagerImpl::FilterManager::addDecodedTrailers() { void ConnectionManagerImpl::FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming) { - if (active_stream_.state_.filter_call_state_ == 0 || - (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeHeaders) || - (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeData) || - ((active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::DecodeTrailers) && - !filter.canIterate())) { + if (state_.filter_call_state_ == 0 || + (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || + (state_.filter_call_state_ & FilterCallState::DecodeData) || + ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. - active_stream_.state_.decoder_filters_streaming_ = streaming; + state_.decoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); - } else if (active_stream_.state_.filter_call_state_ & - ActiveStream::FilterCallState::DecodeTrailers) { + } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); @@ -1363,12 +1356,12 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers) { // If we previously decided to decode only the headers, do nothing here. - if (active_stream_.state_.decoding_headers_only_) { + if (state_.decoding_headers_only_) { return; } // See decodeData() above for why we check local_complete_ here. - if (active_stream_.state_.local_complete_) { + if (state_.local_complete_) { return; } @@ -1382,13 +1375,12 @@ void ConnectionManagerImpl::FilterManager::decodeTrailers(ActiveStreamDecoderFil return; } - ASSERT(!(active_stream_.state_.filter_call_state_ & - ActiveStream::FilterCallState::DecodeTrailers)); - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::DecodeTrailers; + ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); + state_.filter_call_state_ |= FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::DecodeTrailers; + state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); @@ -1434,8 +1426,8 @@ void ConnectionManagerImpl::FilterManager::decodeMetadata(ActiveStreamDecoderFil } void ConnectionManagerImpl::FilterManager::maybeEndDecode(bool end_stream) { - ASSERT(!active_stream_.state_.remote_complete_); - active_stream_.state_.remote_complete_ = end_stream; + ASSERT(!state_.remote_complete_); + state_.remote_complete_ = end_stream; if (end_stream) { stream_info_.onLastDownstreamRxByteReceived(); ENVOY_STREAM_LOG(debug, "request end stream", active_stream_); @@ -1455,8 +1447,8 @@ ConnectionManagerImpl::FilterManager::commonEncodePrefix( // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { - ASSERT(!active_stream_.state_.local_complete_); - active_stream_.state_.local_complete_ = end_stream; + ASSERT(!state_.local_complete_); + state_.local_complete_ = end_stream; return encoder_filters_.begin(); } @@ -1604,34 +1596,8 @@ void ConnectionManagerImpl::ActiveStream::sendLocalReply( // state machine screwed up, bypass the filter chain and send the local // reply directly to the codec. // - // Make sure we won't end up with nested watermark calls from the body buffer. - state_.encoder_filters_streaming_ = true; - Http::Utility::sendLocalReply( - state_.destroyed_, - Utility::EncodeFunctions{ - [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, - absl::string_view& content_type) -> void { - connection_manager_.config_.localReply().rewrite( - filter_manager_.requestHeaders(), response_headers, filter_manager_.streamInfo(), - code, body, content_type); - }, - [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { - if (modify_headers != nullptr) { - modify_headers(*response_headers); - } - // TODO(snowp): This is kinda awkward but we need to do this so that the access log - // sees these headers. Is there a better way? - filter_manager_.setResponseHeaders(std::move(response_headers)); - encodeHeaders(*filter_manager_.responseHeaders(), end_stream); - filter_manager_.maybeEndEncode(end_stream); - }, - [&](Buffer::Instance& data, bool end_stream) -> void { - encodeData(data, end_stream); - filter_manager_.maybeEndEncode(end_stream); - }}, - Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*filter_manager_.requestHeaders()), - code, body, grpc_status, state_.is_head_request_}); - filter_manager_.maybeEndEncode(state_.local_complete_); + filter_manager_.sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, + grpc_status); } else { filter_manager_.streamInfo().setResponseCodeDetails(details); // If we land in this branch, response headers have already been sent to the client. @@ -1654,7 +1620,7 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( createFilterChain(); Utility::sendLocalReply( - active_stream_.state_.destroyed_, + state_.destroyed_, Utility::EncodeFunctions{ [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { @@ -1679,14 +1645,48 @@ void ConnectionManagerImpl::FilterManager::sendLocalReplyViaFilterChain( Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); } +void ConnectionManagerImpl::FilterManager::sendDirectLocalReply( + Code code, absl::string_view body, + const std::function& modify_headers, bool is_head_request, + const absl::optional grpc_status) { + // Make sure we won't end up with nested watermark calls from the body buffer. + state_.encoder_filters_streaming_ = true; + Http::Utility::sendLocalReply( + state_.destroyed_, + Utility::EncodeFunctions{ + [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, + absl::string_view& content_type) -> void { + local_reply_.rewrite(request_headers_.get(), response_headers, stream_info_, code, body, + content_type); + }, + [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { + if (modify_headers != nullptr) { + modify_headers(*response_headers); + } + + // Move the response headers into the FilterManager to make sure they're visible to + // access logs. + response_headers_ = std::move(response_headers); + filter_manager_callbacks_.encodeHeaders(*response_headers_, end_stream); + maybeEndEncode(end_stream); + }, + [&](Buffer::Instance& data, bool end_stream) -> void { + filter_manager_callbacks_.encodeData(data, end_stream); + maybeEndEncode(end_stream); + }}, + Utility::LocalReplyData{Grpc::Common::hasGrpcContentType(*request_headers_), code, body, + grpc_status, is_head_request}); + maybeEndEncode(state_.local_complete_); +} + void ConnectionManagerImpl::FilterManager::encode100ContinueHeaders( ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { active_stream_.resetIdleTimer(); ASSERT(active_stream_.connection_manager_.config_.proxy100Continue()); // The caller must guarantee that encode100ContinueHeaders() is invoked at most once. - ASSERT(!active_stream_.state_.has_continue_headers_ || filter != nullptr); + ASSERT(!state_.has_continue_headers_ || filter != nullptr); // Make sure commonContinue continues encode100ContinueHeaders. - active_stream_.state_.has_continue_headers_ = true; + state_.has_continue_headers_ = true; // Similar to the block in encodeHeaders, run encode100ContinueHeaders on each // filter. This is simpler than that case because 100 continue implies no @@ -1696,13 +1696,10 @@ void ConnectionManagerImpl::FilterManager::encode100ContinueHeaders( std::list::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { - ASSERT(!(active_stream_.state_.filter_call_state_ & - ActiveStream::FilterCallState::Encode100ContinueHeaders)); - active_stream_.state_.filter_call_state_ |= - ActiveStream::FilterCallState::Encode100ContinueHeaders; + ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode100ContinueHeaders)); + state_.filter_call_state_ |= FilterCallState::Encode100ContinueHeaders; FilterHeadersStatus status = (*entry)->handle_->encode100ContinueHeaders(headers); - active_stream_.state_.filter_call_state_ &= - ~ActiveStream::FilterCallState::Encode100ContinueHeaders; + state_.filter_call_state_ &= ~FilterCallState::Encode100ContinueHeaders; ENVOY_STREAM_LOG(trace, "encode 100 continue headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); @@ -1759,27 +1756,26 @@ void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilt std::list::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { - ASSERT( - !(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeHeaders)); - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeHeaders; - (*entry)->end_stream_ = active_stream_.state_.encoding_headers_only_ || + ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders)); + state_.filter_call_state_ |= FilterCallState::EncodeHeaders; + (*entry)->end_stream_ = state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeHeaders; + state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); (*entry)->encode_headers_called_ = true; - const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback( - status, active_stream_.state_.encoding_headers_only_); + const auto continue_iteration = + (*entry)->commonHandleAfterHeadersCallback(status, state_.encoding_headers_only_); // If we're encoding a headers only response, then mark the local as complete. This ensures // that we don't attempt to reset the downstream request in doEndStream. - if (active_stream_.state_.encoding_headers_only_) { - active_stream_.state_.local_complete_ = true; + if (state_.encoding_headers_only_) { + state_.local_complete_ = true; } if (!continue_iteration) { @@ -1796,7 +1792,7 @@ void ConnectionManagerImpl::FilterManager::encodeHeaders(ActiveStreamEncoderFilt } } - const bool modified_end_stream = active_stream_.state_.encoding_headers_only_ || + const bool modified_end_stream = state_.encoding_headers_only_ || (end_stream && continue_data_entry == encoder_filters_.end()); filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); maybeEndEncode(modified_end_stream); @@ -1873,7 +1869,7 @@ void ConnectionManagerImpl::ActiveStream::encodeHeaders(ResponseHeaderMap& heade // If we are destroying a stream before remote is complete and the connection does not support // multiplexing, we should disconnect since we don't want to wait around for the request to // finish. - if (!state_.remote_complete_) { + if (!filter_manager_.remoteComplete()) { if (connection_manager_.codec_->protocol() < Protocol::Http2) { connection_manager_.drain_state_ = DrainState::Closing; } @@ -1964,7 +1960,7 @@ void ConnectionManagerImpl::FilterManager::encodeMetadata(ActiveStreamEncoderFil ResponseTrailerMap& ConnectionManagerImpl::FilterManager::addEncodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). - ASSERT(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::LastDataFrame); + ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!response_trailers_); @@ -1982,18 +1978,16 @@ void ConnectionManagerImpl::FilterManager::sendLocalReply( void ConnectionManagerImpl::FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming) { - if (active_stream_.state_.filter_call_state_ == 0 || - (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeHeaders) || - (active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeData) || - ((active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeTrailers) && - !filter.canIterate())) { + if (state_.filter_call_state_ == 0 || + (state_.filter_call_state_ & FilterCallState::EncodeHeaders) || + (state_.filter_call_state_ & FilterCallState::EncodeData) || + ((state_.filter_call_state_ & FilterCallState::EncodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. - active_stream_.state_.encoder_filters_streaming_ = streaming; + state_.encoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); - } else if (active_stream_.state_.filter_call_state_ & - ActiveStream::FilterCallState::EncodeTrailers) { + } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); @@ -2010,7 +2004,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. - if (active_stream_.state_.encoding_headers_only_) { + if (state_.encoding_headers_only_) { return; } @@ -2022,7 +2016,7 @@ void ConnectionManagerImpl::FilterManager::encodeData( const bool trailers_exists_at_start = response_trailers_ != nullptr; for (; entry != encoder_filters_.end(); entry++) { // If the filter pointed by entry has stopped for all frame type, return now. - if (handleDataIfStopAll(**entry, data, active_stream_.state_.encoder_filters_streaming_)) { + if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. @@ -2030,27 +2024,26 @@ void ConnectionManagerImpl::FilterManager::encodeData( if ((*entry)->end_stream_) { return; } - ASSERT(!(active_stream_.state_.filter_call_state_ & ActiveStream::FilterCallState::EncodeData)); + ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeData)); // We check the response_trailers_ pointer here in case addEncodedTrailers // is called in encodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeData; + state_.filter_call_state_ |= FilterCallState::EncodeData; if (end_stream) { - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::LastDataFrame; + state_.filter_call_state_ |= FilterCallState::LastDataFrame; } - recordLatestDataFilter(entry, active_stream_.state_.latest_data_encoding_filter_, - encoder_filters_); + recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_); (*entry)->end_stream_ = end_stream && !response_trailers_; FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeData; + state_.filter_call_state_ &= ~FilterCallState::EncodeData; if (end_stream) { - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::LastDataFrame; + state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); @@ -2060,13 +2053,13 @@ void ConnectionManagerImpl::FilterManager::encodeData( trailers_added_entry = entry; } - if (!(*entry)->commonHandleAfterDataCallback( - status, data, active_stream_.state_.encoder_filters_streaming_)) { + if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.encoder_filters_streaming_)) { return; } } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); + ASSERT(!state_.encoding_headers_only_); filter_manager_callbacks_.encodeData(data, modified_end_stream); maybeEndEncode(modified_end_stream); @@ -2078,7 +2071,6 @@ void ConnectionManagerImpl::FilterManager::encodeData( } void ConnectionManagerImpl::ActiveStream::encodeData(Buffer::Instance& data, bool end_stream) { - ASSERT(!state_.encoding_headers_only_); ENVOY_STREAM_LOG(trace, "encoding data via codec (size={} end_stream={})", *this, data.length(), end_stream); @@ -2101,7 +2093,7 @@ void ConnectionManagerImpl::ActiveStream::onDecoderFilterBelowWriteBufferLowWate ENVOY_STREAM_LOG(debug, "Read-enabling downstream stream due to filter callbacks.", *this); // If the state is destroyed, the codec's stream is already torn down. On // teardown the codec will unwind any remaining read disable calls. - if (!state_.destroyed_) { + if (!filter_manager_.destroyed()) { response_encoder_->getStream().readDisable(false); } connection_manager_.stats_.named_.downstream_flow_control_resumed_reading_total_.inc(); @@ -2118,7 +2110,7 @@ void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFil active_stream_.resetIdleTimer(); // If we previously decided to encode only the headers, do nothing here. - if (active_stream_.state_.encoding_headers_only_) { + if (state_.encoding_headers_only_) { return; } @@ -2130,13 +2122,12 @@ void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFil if ((*entry)->stoppedAll()) { return; } - ASSERT(!(active_stream_.state_.filter_call_state_ & - ActiveStream::FilterCallState::EncodeTrailers)); - active_stream_.state_.filter_call_state_ |= ActiveStream::FilterCallState::EncodeTrailers; + ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); + state_.filter_call_state_ |= FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; - active_stream_.state_.filter_call_state_ &= ~ActiveStream::FilterCallState::EncodeTrailers; + state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", active_stream_, static_cast((*entry).get()), static_cast(status)); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { @@ -2150,11 +2141,7 @@ void ConnectionManagerImpl::FilterManager::encodeTrailers(ActiveStreamEncoderFil void ConnectionManagerImpl::FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { - ASSERT(!active_stream_.state_.codec_saw_local_complete_); - active_stream_.state_.codec_saw_local_complete_ = true; - stream_info_.onLastDownstreamTxByteSent(); - active_stream_.request_response_timespan_->complete(); - active_stream_.connection_manager_.doEndStream(active_stream_); + filter_manager_callbacks_.endStream(); } } @@ -2254,7 +2241,7 @@ void ConnectionManagerImpl::FilterManager::setBufferLimit(uint32_t new_limit) { } bool ConnectionManagerImpl::FilterManager::createFilterChain() { - if (active_stream_.state_.created_filter_chain_) { + if (state_.created_filter_chain_) { return false; } bool upgrade_rejected = false; @@ -2268,7 +2255,7 @@ bool ConnectionManagerImpl::FilterManager::createFilterChain() { } } - active_stream_.state_.created_filter_chain_ = true; + state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = nullptr; @@ -2280,8 +2267,6 @@ bool ConnectionManagerImpl::FilterManager::createFilterChain() { if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(), upgrade_map, *this)) { - active_stream_.state_.successful_upgrade_ = true; - filter_manager_callbacks_.upgradeFilterChainCreated(); return true; } else { @@ -2347,7 +2332,7 @@ void ConnectionManagerImpl::ActiveStreamFilterBase::commonContinue() { bool ConnectionManagerImpl::ActiveStreamFilterBase::commonHandleAfter100ContinueHeadersCallback( FilterHeadersStatus status) { - ASSERT(parent_.active_stream_.state_.has_continue_headers_); + ASSERT(parent_.state_.has_continue_headers_); ASSERT(!continue_headers_continued_); ASSERT(canIterate()); @@ -2521,7 +2506,7 @@ bool ConnectionManagerImpl::ActiveStreamDecoderFilter::canContinue() { // continue to further filters. A concrete example of this is a filter buffering data, the // last data frame comes in and the filter continues, but the final buffering takes the stream // over the high watermark such that a 413 is returned. - return !parent_.active_stream_.state_.local_complete_; + return !parent_.state_.local_complete_; } Buffer::WatermarkBufferPtr ConnectionManagerImpl::ActiveStreamDecoderFilter::createBuffer() { @@ -2538,7 +2523,7 @@ Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamDecoderFilter::bu } bool ConnectionManagerImpl::ActiveStreamDecoderFilter::complete() { - return parent_.active_stream_.state_.remote_complete_; + return parent_.state_.remote_complete_; } void ConnectionManagerImpl::ActiveStreamDecoderFilter::doHeaders(bool end_stream) { @@ -2604,7 +2589,7 @@ const Buffer::Instance* ConnectionManagerImpl::ActiveStreamDecoderFilter::decodi void ConnectionManagerImpl::ActiveStreamDecoderFilter::modifyDecodingBuffer( std::function callback) { - ASSERT(parent_.active_stream_.state_.latest_data_decoding_filter_ == this); + ASSERT(parent_.state_.latest_data_decoding_filter_ == this); callback(*parent_.buffered_request_data_.get()); } @@ -2629,7 +2614,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter::encode100ContinueHeaders( void ConnectionManagerImpl::ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream) { - parent_.setResponseHeaders(std::move(headers)); + parent_.response_headers_ = std::move(headers); parent_.encodeHeaders(nullptr, *parent_.response_headers_, end_stream); } @@ -2657,7 +2642,7 @@ void ConnectionManagerImpl::ActiveStreamDecoderFilter:: void ConnectionManagerImpl::ActiveStreamDecoderFilter::requestDataTooLarge() { ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_.active_stream_); - if (parent_.active_stream_.state_.decoder_filters_streaming_) { + if (parent_.state_.decoder_filters_streaming_) { onDecoderFilterAboveWriteBufferHighWatermark(); } else { parent_.active_stream_.connection_manager_.stats_.named_.downstream_rq_too_large_.inc(); @@ -2778,10 +2763,10 @@ Buffer::WatermarkBufferPtr& ConnectionManagerImpl::ActiveStreamEncoderFilter::bu return parent_.buffered_response_data_; } bool ConnectionManagerImpl::ActiveStreamEncoderFilter::complete() { - return parent_.active_stream_.state_.local_complete_; + return parent_.state_.local_complete_; } bool ConnectionManagerImpl::ActiveStreamEncoderFilter::has100Continueheaders() { - return parent_.active_stream_.state_.has_continue_headers_ && !continue_headers_continued_; + return parent_.state_.has_continue_headers_ && !continue_headers_continued_; } void ConnectionManagerImpl::ActiveStreamEncoderFilter::do100ContinueHeaders() { parent_.encode100ContinueHeaders(this, *parent_.continue_headers_); @@ -2870,7 +2855,7 @@ const Buffer::Instance* ConnectionManagerImpl::ActiveStreamEncoderFilter::encodi void ConnectionManagerImpl::ActiveStreamEncoderFilter::modifyEncodingBuffer( std::function callback) { - ASSERT(parent_.active_stream_.state_.latest_data_encoding_filter_ == this); + ASSERT(parent_.state_.latest_data_encoding_filter_ == this); callback(*parent_.buffered_response_data_.get()); } @@ -2882,7 +2867,7 @@ ConnectionManagerImpl::ActiveStreamEncoderFilter::http1StreamEncoderOptions() { } void ConnectionManagerImpl::ActiveStreamEncoderFilter::responseDataTooLarge() { - if (parent_.active_stream_.state_.encoder_filters_streaming_) { + if (parent_.state_.encoder_filters_streaming_) { onEncoderFilterAboveWriteBufferHighWatermark(); } else { parent_.active_stream_.connection_manager_.stats_.named_.rs_too_large_.inc(); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index cab42078a38a..e453d0271df0 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -40,6 +40,7 @@ #include "common/http/conn_manager_config.h" #include "common/http/user_agent.h" #include "common/http/utility.h" +#include "common/local_reply/local_reply.h" #include "common/stream_info/stream_info_impl.h" #include "common/tracing/http_tracer_impl.h" @@ -430,6 +431,11 @@ class ConnectionManagerImpl : Logger::Loggable, */ virtual void encodeMetadata(MetadataMapVector& metadata) PURE; + /** + * Called after encoding has completed. + */ + virtual void endStream() PURE; + /** * Called when the stream write buffer is no longer above the low watermark. */ @@ -450,21 +456,38 @@ class ConnectionManagerImpl : Logger::Loggable, * FilterManager manages decoding a request through a series of decoding filter and the encoding * of the resulting response. */ - class FilterManager : public FilterChainFactoryCallbacks { + class FilterManager : public ScopeTrackedObject, FilterChainFactoryCallbacks { public: FilterManager(ActiveStream& active_stream, FilterManagerCallbacks& filter_manager_callbacks, uint32_t buffer_limit, FilterChainFactory& filter_chain_factory, - Http::Protocol protocol, TimeSource& time_source, - StreamInfo::FilterStateSharedPtr parent_filter_state, + const LocalReply::LocalReply& local_reply, Http::Protocol protocol, + TimeSource& time_source, StreamInfo::FilterStateSharedPtr parent_filter_state, StreamInfo::FilterState::LifeSpan filter_state_life_span) : active_stream_(active_stream), filter_manager_callbacks_(filter_manager_callbacks), buffer_limit_(buffer_limit), filter_chain_factory_(filter_chain_factory), + local_reply_(local_reply), stream_info_(protocol, time_source, parent_filter_state, filter_state_life_span) {} ~FilterManager() override { for (const auto& log_handler : access_log_handlers_) { log_handler->log(request_headers_.get(), response_headers_.get(), response_trailers_.get(), stream_info_); } + + ASSERT(state_.filter_call_state_ == 0); + } + + // ScopeTrackedObject + void dumpState(std::ostream& os, int indent_level = 0) const override { + const char* spaces = spacesForLevel(indent_level); + os << spaces << "FilterManager " << this << DUMP_MEMBER(state_.has_continue_headers_) + << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) + << "\n"; + + DUMP_DETAILS(request_headers_); + DUMP_DETAILS(request_trailers_); + DUMP_DETAILS(response_headers_); + DUMP_DETAILS(response_trailers_); + DUMP_DETAILS(&stream_info_); } // Http::FilterChainFactoryCallbacks @@ -481,6 +504,8 @@ class ConnectionManagerImpl : Logger::Loggable, void addAccessLogHandler(AccessLog::InstanceSharedPtr handler) override; void destroyFilters() { + state_.destroyed_ = true; + for (auto& filter : decoder_filters_) { filter->handle_->onDestroy(); } @@ -555,6 +580,15 @@ class ConnectionManagerImpl : Logger::Loggable, const std::function& modify_headers, bool is_head_request, const absl::optional grpc_status, absl::string_view details); + /** + * Sends a local reply by constructing a response and skipping the encoder filters. The + * resulting response will be passed out via the FilterManagerCallbacks. + */ + void sendDirectLocalReply(Code code, absl::string_view body, + const std::function& modify_headers, + bool is_head_request, + const absl::optional grpc_status); + // Possibly increases buffer_limit_ to the value of limit. void setBufferLimit(uint32_t limit); @@ -578,10 +612,28 @@ class ConnectionManagerImpl : Logger::Loggable, request_headers_ = std::move(request_headers); } - void setResponseHeaders(ResponseHeaderMapPtr&& response_headers) { - // Note: sometimes the headers get reset (local reply while response is buffering), so we - // don't assert here. - response_headers_ = std::move(response_headers); + /** + * Marks local processing as complete. + */ + void setLocalComplete() { state_.local_complete_ = true; } + + /** + * Whether the filters have been destroyed. + */ + bool destroyed() const { return state_.destroyed_; } + + /** + * Whether remote processing has been marked as complete. + */ + bool remoteComplete() const { return state_.remote_complete_; } + + /** + * Instructs the FilterManager to not create a filter chain. This makes it possible to issue + * a local reply without the overhead of creating and traversing the filters. + */ + void skipFilterChainCreation() { + ASSERT(!state_.created_filter_chain_); + state_.created_filter_chain_ = true; } /** @@ -700,12 +752,71 @@ class ConnectionManagerImpl : Logger::Loggable, std::list watermark_callbacks_; FilterChainFactory& filter_chain_factory_; + const LocalReply::LocalReply& local_reply_; StreamInfo::StreamInfoImpl stream_info_; // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, // at which point they no longer need to be friends. friend ActiveStreamFilterBase; friend ActiveStreamDecoderFilter; friend ActiveStreamEncoderFilter; + + /** + * Flags that keep track of which filter calls are currently in progress. + */ + // clang-format off + struct FilterCallState { + static constexpr uint32_t DecodeHeaders = 0x01; + static constexpr uint32_t DecodeData = 0x02; + static constexpr uint32_t DecodeTrailers = 0x04; + static constexpr uint32_t EncodeHeaders = 0x08; + static constexpr uint32_t EncodeData = 0x10; + static constexpr uint32_t EncodeTrailers = 0x20; + // Encode100ContinueHeaders is a bit of a special state as 100 continue + // headers may be sent during request processing. This state is only used + // to verify we do not encode100Continue headers more than once per + // filter. + static constexpr uint32_t Encode100ContinueHeaders = 0x40; + // Used to indicate that we're processing the final [En|De]codeData frame, + // i.e. end_stream = true + static constexpr uint32_t LastDataFrame = 0x80; + }; + // clang-format on + + struct State { + State() + : remote_complete_(false), local_complete_(false), has_continue_headers_(false), + created_filter_chain_(false) {} + + uint32_t filter_call_state_{0}; + + bool remote_complete_ : 1; + bool local_complete_ : 1; // This indicates that local is complete prior to filter processing. + // A filter can still stop the stream from being complete as seen + // by the codec. + // By default, we will assume there are no 100-Continue headers. If encode100ContinueHeaders + // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. + bool has_continue_headers_ : 1; + bool created_filter_chain_ : 1; + + // The following 3 members are booleans rather than part of the space-saving bitfield as they + // are passed as arguments to functions expecting bools. Extend State using the bitfield + // where possible. + bool encoder_filters_streaming_{true}; + bool decoder_filters_streaming_{true}; + bool destroyed_{false}; + // Whether a filter has indicated that the response should be treated as a headers only + // response. + bool encoding_headers_only_{false}; + // Whether a filter has indicated that the request should be treated as a headers only + // request. + bool decoding_headers_only_{false}; + + // Used to track which filter is the latest filter that has received data. + ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; + ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; + }; + + State state_; }; /** @@ -763,15 +874,9 @@ class ConnectionManagerImpl : Logger::Loggable, void dumpState(std::ostream& os, int indent_level = 0) const override { const char* spaces = spacesForLevel(indent_level); os << spaces << "ActiveStream " << this << DUMP_MEMBER(stream_id_) - << DUMP_MEMBER(state_.has_continue_headers_) << DUMP_MEMBER(state_.is_head_request_) - << DUMP_MEMBER(state_.decoding_headers_only_) << DUMP_MEMBER(state_.encoding_headers_only_) - << "\n"; + << DUMP_MEMBER(state_.is_head_request_); - DUMP_DETAILS(filter_manager_.requestHeaders()); - DUMP_DETAILS(filter_manager_.requestTrailers()); - DUMP_DETAILS(filter_manager_.responseHeaders()); - DUMP_DETAILS(filter_manager_.responseTrailers()); - DUMP_DETAILS(&filter_manager_.streamInfo()); + DUMP_DETAILS(&filter_manager_); } // FilterManagerCallbacks @@ -780,11 +885,19 @@ class ConnectionManagerImpl : Logger::Loggable, void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMap& trailers) override; void encodeMetadata(MetadataMapVector& metadata) override; + void endStream() override { + ASSERT(!state_.codec_saw_local_complete_); + state_.codec_saw_local_complete_ = true; + filter_manager_.streamInfo().onLastDownstreamTxByteSent(); + request_response_timespan_->complete(); + connection_manager_.doEndStream(*this); + } void onDecoderFilterBelowWriteBufferLowWatermark() override; void onDecoderFilterAboveWriteBufferHighWatermark() override; void upgradeFilterChainCreated() override { connection_manager_.stats_.named_.downstream_cx_upgrades_total_.inc(); connection_manager_.stats_.named_.downstream_cx_upgrades_active_.inc(); + state_.successful_upgrade_ = true; } void traceRequest(); @@ -802,74 +915,26 @@ class ConnectionManagerImpl : Logger::Loggable, void refreshCachedTracingCustomTags(); - /** - * Flags that keep track of which filter calls are currently in progress. - */ - // clang-format off - struct FilterCallState { - static constexpr uint32_t DecodeHeaders = 0x01; - static constexpr uint32_t DecodeData = 0x02; - static constexpr uint32_t DecodeTrailers = 0x04; - static constexpr uint32_t EncodeHeaders = 0x08; - static constexpr uint32_t EncodeData = 0x10; - static constexpr uint32_t EncodeTrailers = 0x20; - // Encode100ContinueHeaders is a bit of a special state as 100 continue - // headers may be sent during request processing. This state is only used - // to verify we do not encode100Continue headers more than once per - // filter. - static constexpr uint32_t Encode100ContinueHeaders = 0x40; - // Used to indicate that we're processing the final [En|De]codeData frame, - // i.e. end_stream = true - static constexpr uint32_t LastDataFrame = 0x80; - }; - // clang-format on - // All state for the stream. Put here for readability. struct State { State() - : remote_complete_(false), local_complete_(false), codec_saw_local_complete_(false), - saw_connection_close_(false), successful_upgrade_(false), created_filter_chain_(false), - is_internally_created_(false), decorated_propagate_(true), has_continue_headers_(false), + : codec_saw_local_complete_(false), saw_connection_close_(false), + successful_upgrade_(false), is_internally_created_(false), decorated_propagate_(true), is_head_request_(false), non_100_response_headers_encoded_(false) {} - uint32_t filter_call_state_{0}; - // The following 3 members are booleans rather than part of the space-saving bitfield as they - // are passed as arguments to functions expecting bools. Extend State using the bitfield - // where possible. - bool encoder_filters_streaming_{true}; - bool decoder_filters_streaming_{true}; - bool destroyed_{false}; - bool remote_complete_ : 1; - bool local_complete_ : 1; // This indicates that local is complete prior to filter processing. - // A filter can still stop the stream from being complete as seen - // by the codec. bool codec_saw_local_complete_ : 1; // This indicates that local is complete as written all // the way through to the codec. bool saw_connection_close_ : 1; bool successful_upgrade_ : 1; - bool created_filter_chain_ : 1; // True if this stream is internally created. Currently only used for // internal redirects or other streams created via recreateStream(). bool is_internally_created_ : 1; bool decorated_propagate_ : 1; - // By default, we will assume there are no 100-Continue headers. If encode100ContinueHeaders - // is ever called, this is set to true so commonContinue resumes processing the 100-Continue. - bool has_continue_headers_ : 1; bool is_head_request_ : 1; // Tracks if headers other than 100-Continue have been encoded to the codec. bool non_100_response_headers_encoded_ : 1; - // Whether a filter has indicated that the request should be treated as a headers only - // request. - bool decoding_headers_only_{false}; - // Whether a filter has indicated that the response should be treated as a headers only - // response. - bool encoding_headers_only_{false}; - - // Used to track which filter is the latest filter that has received data. - ActiveStreamEncoderFilter* latest_data_encoding_filter_{}; - ActiveStreamDecoderFilter* latest_data_decoding_filter_{}; }; // Per-stream idle timeout callback. diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index f9fca43ba0c4..a13dd0133a1c 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -5954,7 +5954,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { std::stringstream out; object->dumpState(out); std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("filter_manager_.requestHeaders(): null")); + EXPECT_THAT(state, testing::HasSubstr("request_headers_: null")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; })) @@ -5976,7 +5976,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { std::stringstream out; object->dumpState(out); std::string state = out.str(); - EXPECT_THAT(state, testing::HasSubstr("filter_manager_.requestHeaders(): \n")); + EXPECT_THAT(state, testing::HasSubstr("request_headers_: \n")); EXPECT_THAT(state, testing::HasSubstr("':authority', 'host'\n")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; From 8e34f9e0f6914087b1f614aaed23bb0d6fc1dc14 Mon Sep 17 00:00:00 2001 From: nigriMSFT Date: Thu, 6 Aug 2020 16:07:56 -0700 Subject: [PATCH 875/909] event: bump libevent version to the latest (#12423) On Windows, configure event-base to use wepoll backend instead of the default win32 backend. Risk Level: Low Testing: CI Docs Changes: N/A Release Notes: N/A Signed-off-by: Nick Grifka --- bazel/foreign_cc/BUILD | 1 + bazel/repository_locations.bzl | 12 +++++++----- source/common/event/libevent_scheduler.cc | 11 +++++++++++ tools/spelling/spelling_dictionary.txt | 1 + 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 316c8fef1e7c..c4a59ab20bda 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -158,6 +158,7 @@ envoy_cmake_external( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", + "EVENT__DISABLE_MBEDTLS": "on", "EVENT__DISABLE_REGRESS": "on", "EVENT__DISABLE_TESTS": "on", "EVENT__LIBRARY_TYPE": "STATIC", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 1a3fb597c1c2..096d1b8ae77b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -233,18 +233,20 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["test"], ), com_github_libevent_libevent = dict( - sha256 = "c64156c24602ab7a5c66937d774cc55868911d5bbbf1650792f5877744b1c2d9", + sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213", # This SHA includes the new "prepare" and "check" watchers, used for event loop performance # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition # in the watchers (see https://github.com/libevent/libevent/pull/802). # This also includes the fixes for https://github.com/libevent/libevent/issues/806 # and https://github.com/lyft/envoy-mobile/issues/215. - # This also include the fixes for Phantom events with EV_ET (see + # This also includes the fixes for Phantom events with EV_ET (see # https://github.com/libevent/libevent/issues/984). + # This also includes the wepoll backend for Windows (see + # https://github.com/libevent/libevent/pull/1006) # TODO(adip): Update to v2.2 when it is released. - strip_prefix = "libevent-06a11929511bebaaf40c52aaf91de397b1782ba2", - # 2020-05-08 - urls = ["https://github.com/libevent/libevent/archive/06a11929511bebaaf40c52aaf91de397b1782ba2.tar.gz"], + strip_prefix = "libevent-62c152d9a7cd264b993dad730c4163c6ede2e0a3", + # 2020-07-31 + urls = ["https://github.com/libevent/libevent/archive/62c152d9a7cd264b993dad730c4163c6ede2e0a3.tar.gz"], use_category = ["dataplane"], cpe = "cpe:2.3:a:libevent_project:libevent:*", ), diff --git a/source/common/event/libevent_scheduler.cc b/source/common/event/libevent_scheduler.cc index 6e675f3ffe04..dda0380cb4d8 100644 --- a/source/common/event/libevent_scheduler.cc +++ b/source/common/event/libevent_scheduler.cc @@ -16,7 +16,18 @@ void recordTimeval(Stats::Histogram& histogram, const timeval& tv) { } // namespace LibeventScheduler::LibeventScheduler() { +#ifdef WIN32 + event_config* event_config = event_config_new(); + RELEASE_ASSERT(event_config != nullptr, + "Failed to initialize libevent event_base: event_config_new"); + // Request wepoll backend by avoiding win32 backend. + int error = event_config_avoid_method(event_config, "win32"); + RELEASE_ASSERT(error == 0, "Failed to initialize libevent event_base: event_config_avoid_method"); + event_base* event_base = event_base_new_with_config(event_config); + event_config_free(event_config); +#else event_base* event_base = event_base_new(); +#endif RELEASE_ASSERT(event_base != nullptr, "Failed to initialize libevent event_base"); libevent_ = Libevent::BasePtr(event_base); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 98fc8fce45b0..d855084a1fd7 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -1172,6 +1172,7 @@ vptr wakeup wakeups websocket +wepoll whitespace whitespaces wildcard From 7c9202879eadfe68ed49c635273d7580aabe1314 Mon Sep 17 00:00:00 2001 From: Michael Behr Date: Thu, 6 Aug 2020 20:06:33 -0400 Subject: [PATCH 876/909] signal: Add hooks for calling fatal error handlers from non-envoy signal handlers (#12062) Add hooks for calling fatal error handlers from non-Envoy signal handlers. Move register/removeFatalErrorHandler from SignalAction into a new FatalErrorHandler namespace, and add a new function, callFatalErrorHandlers, which runs the registered error handlers. This makes the crash logging from issue #7300 available for builds that don't use ENVOY_HANDLE_SIGNALS, as long as they do use ENVOY_OBJECT_TRACE_ON_DUMP. Risk Level: Low Testing: bazel test //test/... Docs Changes: N/A Release Notes: Added Fixes #11984 Signed-off-by: Michael Behr --- docs/root/version_history/current.rst | 1 + source/common/event/dispatcher_impl.cc | 10 +-- source/common/event/dispatcher_impl.h | 4 +- source/common/signal/BUILD | 1 + source/common/signal/fatal_error_handler.cc | 72 +++++++++++++++++++++ source/common/signal/fatal_error_handler.h | 31 +++++++-- source/common/signal/signal_action.cc | 49 +------------- source/common/signal/signal_action.h | 13 ---- test/common/event/dispatcher_impl_test.cc | 2 +- test/common/signal/BUILD | 2 + test/common/signal/signals_test.cc | 72 +++++++++++++++++++-- 11 files changed, 175 insertions(+), 82 deletions(-) create mode 100644 source/common/signal/fatal_error_handler.cc diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 938018830fe7..01a2b7235bee 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -63,6 +63,7 @@ New Features * router: added new :ref:`envoy-ratelimited` retry policy, which allows retrying envoy's own rate limited responses. +* signal: added support for calling fatal error handlers without envoy's signal handler, via FatalErrorHandler::callFatalErrorHandlers(). * stats: added optional histograms to :ref:`cluster stats ` that track headers and body sizes of requests and responses. * stats: allow configuring histogram buckets for stats sinks and admin endpoints that support it. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index d4f9c28c68e6..76f4a109039f 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -45,19 +45,13 @@ DispatcherImpl::DispatcherImpl(const std::string& name, Buffer::WatermarkFactory post_cb_(base_scheduler_.createSchedulableCallback([this]() -> void { runPostCallbacks(); })), current_to_delete_(&to_delete_1_) { ASSERT(!name_.empty()); -#ifdef ENVOY_HANDLE_SIGNALS - SignalAction::registerFatalErrorHandler(*this); -#endif + FatalErrorHandler::registerFatalErrorHandler(*this); updateApproximateMonotonicTimeInternal(); base_scheduler_.registerOnPrepareCallback( std::bind(&DispatcherImpl::updateApproximateMonotonicTime, this)); } -DispatcherImpl::~DispatcherImpl() { -#ifdef ENVOY_HANDLE_SIGNALS - SignalAction::removeFatalErrorHandler(*this); -#endif -} +DispatcherImpl::~DispatcherImpl() { FatalErrorHandler::removeFatalErrorHandler(*this); } void DispatcherImpl::initializeStats(Stats::Scope& scope, const absl::optional& prefix) { diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 143ff4eb065c..0db663dd985b 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -80,12 +80,12 @@ class DispatcherImpl : Logger::Loggable, void updateApproximateMonotonicTime() override; // FatalErrorInterface - void onFatalError() const override { + void onFatalError(std::ostream& os) const override { // Dump the state of the tracked object if it is in the current thread. This generally results // in dumping the active state only for the thread which caused the fatal error. if (isThreadSafe()) { if (current_object_) { - current_object_->dumpState(std::cerr); + current_object_->dumpState(os); } } } diff --git a/source/common/signal/BUILD b/source/common/signal/BUILD index 6dc082eda079..3008c01cb50e 100644 --- a/source/common/signal/BUILD +++ b/source/common/signal/BUILD @@ -10,6 +10,7 @@ envoy_package() envoy_cc_library( name = "fatal_error_handler_lib", + srcs = ["fatal_error_handler.cc"], hdrs = ["fatal_error_handler.h"], ) diff --git a/source/common/signal/fatal_error_handler.cc b/source/common/signal/fatal_error_handler.cc new file mode 100644 index 000000000000..b215d158b158 --- /dev/null +++ b/source/common/signal/fatal_error_handler.cc @@ -0,0 +1,72 @@ +#include "common/signal/fatal_error_handler.h" + +#include + +#include "absl/base/attributes.h" +#include "absl/synchronization/mutex.h" + +namespace Envoy { +namespace FatalErrorHandler { + +namespace { + +ABSL_CONST_INIT static absl::Mutex failure_mutex(absl::kConstInit); +// Since we can't grab the failure mutex on fatal error (snagging locks under +// fatal crash causing potential deadlocks) access the handler list as an atomic +// operation, which is async-signal-safe. If the crash handler runs at the same +// time as another thread tries to modify the list, one of them will get the +// list and the other will get nullptr instead. If the crash handler loses the +// race and gets nullptr, it won't run any of the registered error handlers. +using FailureFunctionList = std::list; +ABSL_CONST_INIT std::atomic fatal_error_handlers{nullptr}; + +} // namespace + +void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler) { +#ifdef ENVOY_OBJECT_TRACE_ON_DUMP + absl::MutexLock l(&failure_mutex); + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list == nullptr) { + list = new FailureFunctionList; + } + list->push_back(&handler); + fatal_error_handlers.store(list, std::memory_order_release); +#else + UNREFERENCED_PARAMETER(handler); +#endif +} + +void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler) { +#ifdef ENVOY_OBJECT_TRACE_ON_DUMP + absl::MutexLock l(&failure_mutex); + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list == nullptr) { + // removeFatalErrorHandler() may see an empty list of fatal error handlers + // if it's called at the same time as callFatalErrorHandlers(). In that case + // Envoy is in the middle of crashing anyway, but don't add a segfault on + // top of the crash. + return; + } + list->remove(&handler); + if (list->empty()) { + delete list; + } else { + fatal_error_handlers.store(list, std::memory_order_release); + } +#else + UNREFERENCED_PARAMETER(handler); +#endif +} + +void callFatalErrorHandlers(std::ostream& os) { + FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); + if (list != nullptr) { + for (const auto* handler : *list) { + handler->onFatalError(os); + } + delete list; + } +} + +} // namespace FatalErrorHandler +} // namespace Envoy diff --git a/source/common/signal/fatal_error_handler.h b/source/common/signal/fatal_error_handler.h index 95c185911d3e..b06997af7e81 100644 --- a/source/common/signal/fatal_error_handler.h +++ b/source/common/signal/fatal_error_handler.h @@ -1,18 +1,39 @@ #pragma once +#include + #include "envoy/common/pure.h" namespace Envoy { // A simple class which allows registering functions to be called when Envoy -// receives one of the fatal signals, documented below. -// -// This is split out of signal_action.h because it is exempted from various -// builds. +// receives one of the fatal signals, documented in signal_action.h. class FatalErrorHandlerInterface { public: virtual ~FatalErrorHandlerInterface() = default; - virtual void onFatalError() const PURE; + // Called when Envoy receives a fatal signal. Must be async-signal-safe: in + // particular, it can't allocate memory. + virtual void onFatalError(std::ostream& os) const PURE; }; +namespace FatalErrorHandler { +/** + * Add this handler to the list of functions which will be called if Envoy + * receives a fatal signal. + */ +void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler); + +/** + * Removes this handler from the list of functions which will be called if Envoy + * receives a fatal signal. + */ +void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler); + +/** + * Calls and unregisters the fatal error handlers registered with + * registerFatalErrorHandler. This is async-signal-safe and intended to be + * called from a fatal signal handler. + */ +void callFatalErrorHandlers(std::ostream& os); +} // namespace FatalErrorHandler } // namespace Envoy diff --git a/source/common/signal/signal_action.cc b/source/common/signal/signal_action.cc index 11797843cd17..c3a53c19da70 100644 --- a/source/common/signal/signal_action.cc +++ b/source/common/signal/signal_action.cc @@ -9,46 +9,6 @@ namespace Envoy { -ABSL_CONST_INIT static absl::Mutex failure_mutex(absl::kConstInit); -// Since we can't grab the failure mutex on fatal error (snagging locks under -// fatal crash causing potential deadlocks) access the handler list as an atomic -// operation, to minimize the chance that one thread is operating on the list -// while the crash handler is attempting to access it. -// This basically makes edits to the list thread-safe - if one thread is -// modifying the list rather than crashing in the crash handler due to accessing -// the list in a non-thread-safe manner, it simply won't log crash traces. -using FailureFunctionList = std::list; -ABSL_CONST_INIT std::atomic fatal_error_handlers{nullptr}; - -void SignalAction::registerFatalErrorHandler(const FatalErrorHandlerInterface& handler) { -#ifdef ENVOY_OBJECT_TRACE_ON_DUMP - absl::MutexLock l(&failure_mutex); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - if (list == nullptr) { - list = new FailureFunctionList; - } - list->push_back(&handler); - fatal_error_handlers.store(list, std::memory_order_release); -#else - UNREFERENCED_PARAMETER(handler); -#endif -} - -void SignalAction::removeFatalErrorHandler(const FatalErrorHandlerInterface& handler) { -#ifdef ENVOY_OBJECT_TRACE_ON_DUMP - absl::MutexLock l(&failure_mutex); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - list->remove(&handler); - if (list->empty()) { - delete list; - } else { - fatal_error_handlers.store(list, std::memory_order_release); - } -#else - UNREFERENCED_PARAMETER(handler); -#endif -} - constexpr int SignalAction::FATAL_SIGS[]; void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { @@ -62,13 +22,8 @@ void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { } tracer.logTrace(); - FailureFunctionList* list = fatal_error_handlers.exchange(nullptr, std::memory_order_relaxed); - if (list) { - // Finally after logging the stack trace, call any registered crash handlers. - for (const auto* handler : *list) { - handler->onFatalError(); - } - } + // Finally after logging the stack trace, call any registered crash handlers. + FatalErrorHandler::callFatalErrorHandlers(std::cerr); signal(sig, SIG_DFL); raise(sig); diff --git a/source/common/signal/signal_action.h b/source/common/signal/signal_action.h index 0092dc4fffaa..ffabf9cc3cde 100644 --- a/source/common/signal/signal_action.h +++ b/source/common/signal/signal_action.h @@ -73,18 +73,6 @@ class SignalAction : NonCopyable { */ static void sigHandler(int sig, siginfo_t* info, void* context); - /** - * Add this handler to the list of functions which will be called if Envoy - * receives a fatal signal. - */ - static void registerFatalErrorHandler(const FatalErrorHandlerInterface& handler); - - /** - * Removes this handler from the list of functions which will be called if Envoy - * receives a fatal signal. - */ - static void removeFatalErrorHandler(const FatalErrorHandlerInterface& handler); - private: /** * Allocate this many bytes on each side of the area used for alt stack. @@ -142,7 +130,6 @@ class SignalAction : NonCopyable { char* altstack_{}; std::array previous_handlers_; stack_t previous_altstack_; - std::list fatal_error_handlers_; }; } // namespace Envoy diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 30fbcd32f248..44055a091c89 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -375,7 +375,7 @@ TEST_F(DispatcherImplTest, TimerWithScope) { timer = dispatcher_->createTimer([this]() { { Thread::LockGuard lock(mu_); - static_cast(dispatcher_.get())->onFatalError(); + static_cast(dispatcher_.get())->onFatalError(std::cerr); work_finished_ = true; } cv_.notifyOne(); diff --git a/test/common/signal/BUILD b/test/common/signal/BUILD index 97a42a58cafd..c3f9cf5df843 100644 --- a/test/common/signal/BUILD +++ b/test/common/signal/BUILD @@ -17,7 +17,9 @@ envoy_cc_test( "skip_on_windows", ], deps = [ + "//source/common/signal:fatal_error_handler_lib", "//source/common/signal:sigaction_lib", + "//test/common/stats:stat_test_utility_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/signal/signals_test.cc b/test/common/signal/signals_test.cc index 8cee0e5d0aa6..cc66d32d81d0 100644 --- a/test/common/signal/signals_test.cc +++ b/test/common/signal/signals_test.cc @@ -2,8 +2,10 @@ #include +#include "common/signal/fatal_error_handler.h" #include "common/signal/signal_action.h" +#include "test/common/stats/stat_test_utility.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -19,6 +21,12 @@ namespace Envoy { #define ASANITIZED /* Sanitized by GCC */ #endif +// Use this test handler instead of a mock, because fatal error handlers must be +// signal-safe and a mock might allocate memory. +class TestFatalErrorHandler : public FatalErrorHandlerInterface { + void onFatalError(std::ostream& os) const override { os << "HERE!"; } +}; + // Death tests that expect a particular output are disabled under address sanitizer. // The sanitizer does its own special signal handling and prints messages that are // not ours instead of what this test expects. As of latest Clang this appears @@ -35,13 +43,9 @@ TEST(SignalsDeathTest, InvalidAddressDeathTest) { "backtrace.*Segmentation fault"); } -class TestFatalErrorHandler : public FatalErrorHandlerInterface { - void onFatalError() const override { std::cerr << "HERE!"; } -}; - TEST(SignalsDeathTest, RegisteredHandlerTest) { TestFatalErrorHandler handler; - SignalAction::registerFatalErrorHandler(handler); + FatalErrorHandler::registerFatalErrorHandler(handler); SignalAction actions; // Make sure the fatal error log "HERE" registered above is logged on fatal error. EXPECT_DEATH( @@ -51,7 +55,7 @@ TEST(SignalsDeathTest, RegisteredHandlerTest) { *(nasty_ptr) = 0; // NOLINT(clang-analyzer-core.NullDereference) }(), "HERE"); - SignalAction::removeFatalErrorHandler(handler); + FatalErrorHandler::removeFatalErrorHandler(handler); } TEST(SignalsDeathTest, BusDeathTest) { @@ -145,4 +149,60 @@ TEST(Signals, HandlerTest) { SignalAction::sigHandler(SIGURG, &fake_si, nullptr); } +TEST(FatalErrorHandler, CallHandler) { + // Reserve space in advance so that the handler doesn't allocate memory. + std::string s; + s.reserve(1024); + std::ostringstream os(std::move(s)); + + TestFatalErrorHandler handler; + FatalErrorHandler::registerFatalErrorHandler(handler); + + FatalErrorHandler::callFatalErrorHandlers(os); + EXPECT_EQ(os.str(), "HERE!"); + + // callFatalErrorHandlers() will unregister the handler, so this isn't + // necessary for cleanup. Call it anyway, to simulate the case when one thread + // tries to remove the handler while another thread crashes. + FatalErrorHandler::removeFatalErrorHandler(handler); +} + +// Use this specialized test handler instead of a mock, because fatal error +// handlers must be signal-safe and a mock might allocate memory. +class MemoryCheckingFatalErrorHandler : public FatalErrorHandlerInterface { +public: + MemoryCheckingFatalErrorHandler(const Stats::TestUtil::MemoryTest& memory_test, + uint64_t& allocated_after_call) + : memory_test_(memory_test), allocated_after_call_(allocated_after_call) {} + void onFatalError(std::ostream& os) const override { + UNREFERENCED_PARAMETER(os); + allocated_after_call_ = memory_test_.consumedBytes(); + } + +private: + const Stats::TestUtil::MemoryTest& memory_test_; + uint64_t& allocated_after_call_; +}; + +// FatalErrorHandler::callFatalErrorHandlers shouldn't allocate any heap memory, +// so that it's safe to call from a signal handler. Test by comparing the +// allocated memory before a call with the allocated memory during a handler. +TEST(FatalErrorHandler, DontAllocateMemory) { + // Reserve space in advance so that the handler doesn't allocate memory. + std::string s; + s.reserve(1024); + std::ostringstream os(std::move(s)); + + Stats::TestUtil::MemoryTest memory_test; + + uint64_t allocated_after_call; + MemoryCheckingFatalErrorHandler handler(memory_test, allocated_after_call); + FatalErrorHandler::registerFatalErrorHandler(handler); + + uint64_t allocated_before_call = memory_test.consumedBytes(); + FatalErrorHandler::callFatalErrorHandlers(os); + + EXPECT_MEMORY_EQ(allocated_after_call, allocated_before_call); +} + } // namespace Envoy From c9c4709c844b90b9bb2935d784a428d667c9df7d Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Fri, 7 Aug 2020 07:22:54 +0700 Subject: [PATCH 877/909] build: Update ICU and googleurl (#12376) This patch updates ICU to 67.1 and googleurl to https://quiche.googlesource.com/googleurl/+/ef0d23689e240e6c8de4c3a5296b209128c87373. This reduces `bazel/external/googleurl.patch` to only handling MSVC quirks. Please note that googleurl direction for Windows is to use clang-cl, while we still use msvc-cl (hence the patch). Risk Level: Low Testing: Existing Docs Changes: N/A Release Notes: N/A Fixes #12015 Signed-off-by: Dhi Aurrahman --- .bazelrc | 3 + bazel/external/googleurl.patch | 101 ++++++--------------------------- bazel/external/icuuc.BUILD | 22 +++---- bazel/repositories.bzl | 5 -- bazel/repository_locations.bzl | 13 +++-- 5 files changed, 36 insertions(+), 108 deletions(-) diff --git a/.bazelrc b/.bazelrc index 393c54d63b70..1cb3caaa6a9f 100644 --- a/.bazelrc +++ b/.bazelrc @@ -33,6 +33,9 @@ build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH +# Skip system ICU linking. +build --@com_googlesource_googleurl//build_config:system_icu=0 + # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl diff --git a/bazel/external/googleurl.patch b/bazel/external/googleurl.patch index fe867e5bedc6..fb33ca4475fb 100644 --- a/bazel/external/googleurl.patch +++ b/bazel/external/googleurl.patch @@ -2,7 +2,7 @@ # clang-cl. Tracked in https://github.com/envoyproxy/envoy/issues/11974. diff --git a/base/compiler_specific.h b/base/compiler_specific.h -index 2962537..6193b56 100644 +index 0cd36dc..8c4cbd4 100644 --- a/base/compiler_specific.h +++ b/base/compiler_specific.h @@ -7,10 +7,6 @@ @@ -16,7 +16,21 @@ index 2962537..6193b56 100644 // Annotate a variable indicating it's ok if the variable is not used. // (Typically used to silence a compiler warning when the assignment // is important for some other reason.) -@@ -212,7 +208,9 @@ +@@ -55,8 +51,12 @@ + // prevent code folding, see gurl_base::debug::Alias. + // Use like: + // void NOT_TAIL_CALLED FooBar(); +-#if defined(__clang__) && __has_attribute(not_tail_called) ++#if defined(__clang__) ++#if defined(__has_attribute) ++#if __has_attribute(not_tail_called) + #define NOT_TAIL_CALLED __attribute__((not_tail_called)) ++#endif ++#endif + #else + #define NOT_TAIL_CALLED + #endif +@@ -226,7 +226,9 @@ #endif #endif @@ -27,7 +41,7 @@ index 2962537..6193b56 100644 // Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for // the specified variable. // Library-wide alternative is -@@ -243,6 +241,8 @@ +@@ -257,6 +259,8 @@ // E.g. platform, bot, benchmark or test name in patch description or next to // the attribute. #define STACK_UNINITIALIZED __attribute__((uninitialized)) @@ -36,84 +50,3 @@ index 2962537..6193b56 100644 #else #define STACK_UNINITIALIZED #endif -diff --git a/base/strings/BUILD b/base/strings/BUILD -index 7a06170..7c86a5f 100644 ---- a/base/strings/BUILD -+++ b/base/strings/BUILD -@@ -6,23 +6,21 @@ load("//:build_config.bzl", "build_config") - cc_library( - name = "strings", - srcs = [ -- "string16.cc", - "string_piece.cc", - "string_util.cc", - "string_util_constants.cc", - "utf_string_conversion_utils.cc", - "utf_string_conversions.cc", -- ], -+ ] + build_config.strings_srcs, - hdrs = [ - "char_traits.h", - "string16.h", - "string_piece.h", - "string_piece_forward.h", - "string_util.h", -- "string_util_posix.h", - "utf_string_conversion_utils.h", - "utf_string_conversions.h", -- ], -+ ] + build_config.strings_hdrs, - copts = build_config.default_copts, - visibility = ["//visibility:public"], - deps = [ -diff --git a/build_config.bzl b/build_config.bzl -index d5fca65..fc0d7e5 100644 ---- a/build_config/build_config.bzl -+++ b/build_config/build_config.bzl -@@ -1,8 +1,25 @@ --_default_copts = [ -- "-std=c++14", -- "-fno-strict-aliasing", --] -+_default_copts = select({ -+ "@envoy//bazel:windows_x86_64": [ -+ "/std:c++17", -+ ], -+ "//conditions:default": [ -+ "-std=c++17", -+ "-fno-strict-aliasing", -+ ], -+}) -+ -+_strings_srcs = select({ -+ "@envoy//bazel:windows_x86_64": [], -+ "//conditions:default": ["string16.cc"], -+}) -+ -+_strings_hdrs = select({ -+ "@envoy//bazel:windows_x86_64": ["string_util_win.h"], -+ "//conditions:default": ["string_util_posix.h"], -+}) - - build_config = struct( - default_copts = _default_copts, -+ strings_srcs = _strings_srcs, -+ strings_hdrs = _strings_hdrs, - ) -diff --git a/url/BUILD b/url/BUILD -index 0126bdc..5d1a171 100644 ---- a/url/BUILD -+++ b/url/BUILD -@@ -43,11 +43,11 @@ cc_library( - "url_util.h", - ], - copts = build_config.default_copts, -- linkopts = ["-licuuc"], - visibility = ["//visibility:public"], - deps = [ - "//base", - "//base/strings", - "//polyfills", -+ "@org_unicode_icuuc//:common", - ], - ) diff --git a/bazel/external/icuuc.BUILD b/bazel/external/icuuc.BUILD index e910a64af1aa..305d0db952b1 100644 --- a/bazel/external/icuuc.BUILD +++ b/bazel/external/icuuc.BUILD @@ -2,10 +2,7 @@ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 -exports_files([ - "icu4c/LICENSE", - "icu4j/main/shared/licenses/LICENSE", -]) +exports_files(["LICENSE"]) icuuc_copts = [ "-DU_STATIC_IMPLEMENTATION", @@ -31,15 +28,15 @@ icuuc_copts = [ cc_library( name = "headers", - hdrs = glob(["icu4c/source/common/unicode/*.h"]), - includes = ["icu4c/source/common"], + hdrs = glob(["source/common/unicode/*.h"]), + includes = ["source/common"], visibility = ["//visibility:public"], ) cc_library( name = "common", - hdrs = glob(["icu4c/source/common/unicode/*.h"]), - includes = ["icu4c/source/common"], + hdrs = glob(["source/common/unicode/*.h"]), + includes = ["source/common"], visibility = ["//visibility:public"], deps = [":icuuc"], ) @@ -47,13 +44,12 @@ cc_library( cc_library( name = "icuuc", srcs = glob([ - "icu4c/source/common/*.c", - "icu4c/source/common/*.cpp", - "icu4c/source/stubdata/*.cpp", + "source/common/*.c", + "source/common/*.cpp", + "source/stubdata/*.cpp", ]), - hdrs = glob(["icu4c/source/common/*.h"]), + hdrs = glob(["source/common/*.h"]), copts = icuuc_copts, - tags = ["requires-rtti"], visibility = ["//visibility:private"], deps = [":headers"], ) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index bf1827cf8c66..4e0293ef288b 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -903,11 +903,6 @@ def _org_unicode_icuuc(): _repository_impl( name = "org_unicode_icuuc", build_file = "@envoy//bazel/external:icuuc.BUILD", - # TODO(dio): Consider patching udata when we need to embed some data. - ) - native.bind( - name = "icuuc", - actual = "@org_unicode_icuuc//:common", ) def _foreign_cc_dependencies(): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 096d1b8ae77b..145da07e7f25 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -417,9 +417,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_googleurl = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz - sha256 = "f1ab73ddd1a7db4e08a9e4db6c2e98e5a0a7bbaca08f5fee0d73adb02c24e44a", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_6dafefa72cba2ab2ba4922d17a30618e9617c7cf.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. + sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", + # 2020-08-05 + urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), @@ -484,9 +485,9 @@ DEPENDENCY_REPOSITORIES = dict( use_category = ["test"], ), org_unicode_icuuc = dict( - strip_prefix = "icu-release-64-2", - sha256 = "524960ac99d086cdb6988d2a92fc163436fd3c6ec0a84c475c6382fbf989be05", - urls = ["https://github.com/unicode-org/icu/archive/release-64-2.tar.gz"], + strip_prefix = "icu", + sha256 = "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc", + urls = ["https://github.com/unicode-org/icu/releases/download/release-67-1/icu4c-67_1-src.tgz"], use_category = ["dataplane"], cpe = "cpe:2.3:a:icu-project:international_components_for_unicode", ), From 1e8c0fccbe25cf183395eeddda4042e85f616ba1 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 7 Aug 2020 10:10:59 +0100 Subject: [PATCH 878/909] Use pyyaml safe_load (#12534) Risk Level: medium without being fixed Testing: rebuilding docs has been tested with the change to the protodoc compiler Docs Changes: n/a Release Notes: Fixes #12533 Signed-off-by: Ryan Northey --- tools/config_validation/validate_fragment.py | 2 +- tools/protodoc/protodoc.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py index faa9951114a8..d272f37fb006 100644 --- a/tools/config_validation/validate_fragment.py +++ b/tools/config_validation/validate_fragment.py @@ -69,4 +69,4 @@ def ParseArgs(): message_type = parsed_args.message_type content = parsed_args.s if (parsed_args.fragment_path is None) else pathlib.Path( parsed_args.fragment_path).read_text() - ValidateFragment(message_type, yaml.load(content, Loader=yaml.FullLoader)) + ValidateFragment(message_type, yaml.safe_load(content)) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index ed3885f7b145..8eeeceb9e225 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -589,7 +589,7 @@ def __init__(self): with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. - protodoc_manifest_untyped = yaml.load(f.read()) + protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) From f809fbd517c131b06fec16bea29a5e637648e185 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 7 Aug 2020 02:19:16 -0700 Subject: [PATCH 879/909] caching: Validate stale cache entries and update cached headers (#12232) When a stale cache hit is found for a request, conditional headers are injected to the request for validation. If the a 304 response is received (not modified), a response is constructed from cache, and cached headers are updated. If a request already includes conditional headers it bypasses the CacheFilter, this is valid behavior and satisfies the minimum of #9855 for production -- however, it can lead to missed caching opportunities. Risk Level: Low Testing: Integration tests Docs Changes: N/A Release Notes: N/A Fixes #9976 Signed-off-by: Yosry Ahmed --- source/common/http/headers.h | 6 + source/extensions/filters/http/cache/BUILD | 2 + .../filters/http/cache/cache_filter.cc | 296 ++++++++++++++---- .../filters/http/cache/cache_filter.h | 69 +++- .../filters/http/cache/cache_headers_utils.cc | 22 +- .../filters/http/cache/cacheability_utils.cc | 30 +- .../filters/http/cache/cacheability_utils.h | 12 +- .../filters/http/cache/http_cache.cc | 28 +- .../filters/http/cache/http_cache.h | 5 +- .../simple_http_cache/simple_http_cache.cc | 8 +- .../simple_http_cache/simple_http_cache.h | 4 +- .../cache/cache_filter_integration_test.cc | 148 ++++++++- .../filters/http/cache/cache_filter_test.cc | 28 +- .../http/cache/cache_headers_utils_test.cc | 8 +- .../http/cache/cacheability_utils_test.cc | 15 +- .../filters/http/cache/http_cache_test.cc | 10 +- 16 files changed, 560 insertions(+), 131 deletions(-) diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 63de5a1351d9..73f866f7b60f 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -64,6 +64,12 @@ class CustomHeaderValues { const LowerCaseString ContentEncoding{"content-encoding"}; const LowerCaseString Etag{"etag"}; const LowerCaseString GrpcAcceptEncoding{"grpc-accept-encoding"}; + const LowerCaseString IfMatch{"if-match"}; + const LowerCaseString IfNoneMatch{"if-none-match"}; + const LowerCaseString IfModifiedSince{"if-modified-since"}; + const LowerCaseString IfUnmodifiedSince{"if-unmodified-since"}; + const LowerCaseString IfRange{"if-range"}; + const LowerCaseString LastModified{"last-modified"}; const LowerCaseString Origin{"origin"}; const LowerCaseString OtSpanContext{"x-ot-span-context"}; const LowerCaseString Pragma{"pragma"}; diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 28d852f64c48..6dd67613de95 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -20,10 +20,12 @@ envoy_cc_library( ":cache_headers_utils_lib", ":cacheability_utils_lib", ":http_cache_lib", + "//source/common/common:enum_to_int", "//source/common/common:logger_lib", "//source/common/common:macros", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", + "//source/common/http:utility_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", ], diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index ae9523494f2d..6e4b469f342e 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -1,6 +1,8 @@ #include "extensions/filters/http/cache/cache_filter.h" +#include "common/common/enum_to_int.h" #include "common/http/headers.h" +#include "common/http/utility.h" #include "extensions/filters/http/cache/cacheability_utils.h" @@ -11,6 +13,12 @@ namespace Extensions { namespace HttpFilters { namespace Cache { +namespace { +inline bool isResponseNotModified(const Http::ResponseHeaderMap& response_headers) { + return Http::Utility::getResponseStatus(response_headers) == enumToInt(Http::Code::NotModified); +} +} // namespace + struct CacheResponseCodeDetailValues { const absl::string_view ResponseFromCacheFilter = "cache.response_from_cache_filter"; }; @@ -49,25 +57,52 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea lookup_ = cache_.makeLookupContext(std::move(lookup_request)); ASSERT(lookup_); - ENVOY_STREAM_LOG(debug, "CacheFilter::decodeHeaders starting lookup", *decoder_callbacks_); - lookup_->getHeaders([this](LookupResult&& result) { onHeaders(std::move(result)); }); - if (state_ == GetHeadersState::GetHeadersResultUnusable) { - // onHeaders has already been called, and no usable cache entry was found--continue iteration. + + lookup_->getHeaders( + [this, &headers](LookupResult&& result) { onHeaders(std::move(result), headers); }); + + // If the cache called onHeaders synchronously it will have advanced the filter_state_. + switch (filter_state_) { + case FilterState::Initial: + // Headers are not fetched from cache yet -- wait until cache lookup is completed. + filter_state_ = FilterState::WaitingForCacheLookup; + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + case FilterState::DecodeServingFromCache: + case FilterState::ResponseServedFromCache: + // A fresh cached response was found -- no need to continue the decoding stream. + return Http::FilterHeadersStatus::StopAllIterationAndWatermark; + default: return Http::FilterHeadersStatus::Continue; } - // onHeaders hasn't been called yet--stop iteration to wait for it, and tell it that we stopped - // iteration. - state_ = GetHeadersState::FinishedGetHeadersCall; - return Http::FilterHeadersStatus::StopAllIterationAndWatermark; } Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { + if (filter_state_ == FilterState::DecodeServingFromCache) { + // This call was invoked by decoder_callbacks_->encodeHeaders -- ignore it. + return Http::FilterHeadersStatus::Continue; + } + // If lookup_ is null, the request wasn't cacheable, so the response isn't either. - if (lookup_ && request_allows_inserts_ && CacheabilityUtils::isCacheableResponse(headers)) { - // TODO(yosrym93): Add date internal header or metadata to cached responses and use it instead - // of the date header + if (!lookup_) { + return Http::FilterHeadersStatus::Continue; + } + + if (filter_state_ == FilterState::ValidatingCachedResponse && isResponseNotModified(headers)) { + processSuccessfulValidation(headers); + if (filter_state_ != FilterState::ResponseServedFromCache) { + // Response is still being fetched from cache -- wait until it is fetched & encoded. + filter_state_ = FilterState::WaitingForCacheBody; + return Http::FilterHeadersStatus::StopIteration; + } + return Http::FilterHeadersStatus::Continue; + } + + // Either a cache miss or a cache entry that is no longer valid. + // Check if the new response can be cached. + if (request_allows_inserts_ && CacheabilityUtils::isCacheableResponse(headers)) { + // TODO(#12140): Add date internal header or metadata to cached responses. ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); insert_ = cache_.makeInsertContext(std::move(lookup_)); insert_->insertHeaders(headers, end_stream); @@ -76,8 +111,16 @@ Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& he } Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_stream) { + if (filter_state_ == FilterState::DecodeServingFromCache) { + // This call was invoked by decoder_callbacks_->encodeData -- ignore it. + return Http::FilterDataStatus::Continue; + } + if (filter_state_ == FilterState::WaitingForCacheBody) { + // Encoding stream stopped waiting for cached body (and trailers) to be encoded. + return Http::FilterDataStatus::StopIterationAndBuffer; + } if (insert_) { - ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting body", *encoder_callbacks_); + ENVOY_STREAM_LOG(debug, "CacheFilter::encodeData inserting body", *encoder_callbacks_); // TODO(toddmgreer): Wait for the cache if necessary. insert_->insertBody( data, [](bool) {}, end_stream); @@ -85,58 +128,58 @@ Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_ return Http::FilterDataStatus::Continue; } -void CacheFilter::onHeaders(LookupResult&& result) { - // TODO(yosrym93): Handle request only-if-cached directive +void CacheFilter::getBody() { + ASSERT(lookup_, "CacheFilter is trying to call getBody with no LookupContext"); + ASSERT(!remaining_body_.empty(), "No reason to call getBody when there's no body to get."); + lookup_->getBody(remaining_body_[0], + [this](Buffer::InstancePtr&& body) { onBody(std::move(body)); }); +} + +void CacheFilter::getTrailers() { + ASSERT(lookup_, "CacheFilter is trying to call getTrailers with no LookupContext"); + ASSERT(response_has_trailers_, "No reason to call getTrailers when there's no trailers to get."); + lookup_->getTrailers( + [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); +} + +void CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers) { + // TODO(yosrym93): Handle request only-if-cached directive. + bool should_continue_decoding = false; switch (result.cache_entry_status_) { case CacheEntryStatus::FoundNotModified: case CacheEntryStatus::NotSatisfiableRange: // TODO(#10132): create 416 response. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; // We don't yet return or support these codes. case CacheEntryStatus::RequiresValidation: - // Cache entries that require validation are treated as unusable entries - // until validation is implemented - // TODO(yosrym93): Implement response validation + // If a cache entry requires validation, inject validation headers in the request and let it + // pass through as if no cache entry was found. + // If the cache entry was valid, the response status should be 304 (unmodified) and the cache + // entry will be injected in the response body. + lookup_result_ = std::make_unique(std::move(result)); + should_continue_decoding = filter_state_ == FilterState::WaitingForCacheLookup; + filter_state_ = FilterState::ValidatingCachedResponse; + injectValidationHeaders(request_headers); + break; case CacheEntryStatus::Unusable: - if (state_ == GetHeadersState::FinishedGetHeadersCall) { - // decodeHeader returned Http::FilterHeadersStatus::StopAllIterationAndWatermark--restart it - decoder_callbacks_->continueDecoding(); - } else { - // decodeHeader hasn't yet returned--tell it to return Http::FilterHeadersStatus::Continue. - state_ = GetHeadersState::GetHeadersResultUnusable; - } - return; + should_continue_decoding = filter_state_ == FilterState::WaitingForCacheLookup; + filter_state_ = FilterState::NoCachedResponseFound; + break; case CacheEntryStatus::SatisfiableRange: // TODO(#10132): break response content to the ranges // requested. case CacheEntryStatus::Ok: - response_has_trailers_ = result.has_trailers_; - const bool end_stream = (result.content_length_ == 0 && !response_has_trailers_); - // TODO(toddmgreer): Calculate age per https://httpwg.org/specs/rfc7234.html#age.calculations - result.headers_->addReferenceKey(Http::Headers::get().Age, 0); - decoder_callbacks_->streamInfo().setResponseFlag( - StreamInfo::ResponseFlag::ResponseFromCacheFilter); - decoder_callbacks_->streamInfo().setResponseCodeDetails( - CacheResponseCodeDetails::get().ResponseFromCacheFilter); - decoder_callbacks_->encodeHeaders(std::move(result.headers_), end_stream); - if (end_stream) { - return; - } - if (result.content_length_ > 0) { - remaining_body_.emplace_back(0, result.content_length_); - getBody(); - } else { - lookup_->getTrailers( - [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); - } + lookup_result_ = std::make_unique(std::move(result)); + filter_state_ = FilterState::DecodeServingFromCache; + encodeCachedResponse(); + } + if (should_continue_decoding) { + // decodeHeaders returned StopIteration waiting for this callback -- continue decoding. + decoder_callbacks_->continueDecoding(); } -} - -void CacheFilter::getBody() { - ASSERT(!remaining_body_.empty(), "No reason to call getBody when there's no body to get."); - lookup_->getBody(remaining_body_[0], - [this](Buffer::InstancePtr&& body) { onBody(std::move(body)); }); } // TODO(toddmgreer): Handle downstream backpressure. void CacheFilter::onBody(Buffer::InstancePtr&& body) { + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was being validated. ASSERT(!remaining_body_.empty(), "CacheFilter doesn't call getBody unless there's more body to get, so this is a " "bogus callback."); @@ -149,23 +192,166 @@ void CacheFilter::onBody(Buffer::InstancePtr&& body) { remaining_body_.erase(remaining_body_.begin()); } else { ASSERT(false, "Received oversized body from cache."); - decoder_callbacks_->resetStream(); + filter_state_ == FilterState::DecodeServingFromCache ? decoder_callbacks_->resetStream() + : encoder_callbacks_->resetStream(); return; } const bool end_stream = remaining_body_.empty() && !response_has_trailers_; - decoder_callbacks_->encodeData(*body, end_stream); + + filter_state_ == FilterState::DecodeServingFromCache + ? decoder_callbacks_->encodeData(*body, end_stream) + : encoder_callbacks_->addEncodedData(*body, true); + if (!remaining_body_.empty()) { getBody(); } else if (response_has_trailers_) { - lookup_->getTrailers( - [this](Http::ResponseTrailerMapPtr&& trailers) { onTrailers(std::move(trailers)); }); + getTrailers(); + } else { + finalizeEncodingCachedResponse(); } } void CacheFilter::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { - decoder_callbacks_->encodeTrailers(std::move(trailers)); + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was being validated. + if (filter_state_ == FilterState::DecodeServingFromCache) { + decoder_callbacks_->encodeTrailers(std::move(trailers)); + } else { + Http::ResponseTrailerMap& response_trailers = encoder_callbacks_->addEncodedTrailers(); + response_trailers = std::move(*trailers); + } + finalizeEncodingCachedResponse(); } + +void CacheFilter::processSuccessfulValidation(Http::ResponseHeaderMap& response_headers) { + ASSERT(lookup_result_, "CacheFilter trying to validate a non-existent lookup result"); + ASSERT( + filter_state_ == FilterState::ValidatingCachedResponse, + "processSuccessfulValidation must only be called when a cached response is being validated"); + ASSERT(isResponseNotModified(response_headers), + "processSuccessfulValidation must only be called with 304 responses"); + + // Check whether the cached entry should be updated before modifying the 304 response. + const bool should_update_cached_entry = shouldUpdateCachedEntry(response_headers); + + // Update the 304 response status code and content-length. + response_headers.setStatus(lookup_result_->headers_->getStatusValue()); + response_headers.setContentLength(lookup_result_->headers_->getContentLengthValue()); + + // A cache entry was successfully validated -> encode cached body and trailers. + // encodeCachedResponse also adds the age header to lookup_result_ + // so it should be called before headers are merged. + encodeCachedResponse(); + + // Add any missing headers from the cached response to the 304 response. + lookup_result_->headers_->iterate([&response_headers](const Http::HeaderEntry& cached_header) { + // TODO(yosrym93): Try to avoid copying the header key twice. + Http::LowerCaseString key(std::string(cached_header.key().getStringView())); + absl::string_view value = cached_header.value().getStringView(); + if (!response_headers.get(key)) { + response_headers.setCopy(key, value); + } + return Http::HeaderMap::Iterate::Continue; + }); + + if (should_update_cached_entry) { + // TODO(yosrym93): else the cached entry should be deleted. + cache_.updateHeaders(*lookup_, response_headers); + } +} + +bool CacheFilter::shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const { + ASSERT(isResponseNotModified(response_headers), + "shouldUpdateCachedEntry must only be called with 304 responses"); + ASSERT(lookup_result_, "shouldUpdateCachedEntry precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, + "shouldUpdateCachedEntry precondition unsatisfied: the " + "CacheFilter is not validating a cache lookup result"); + + // According to: https://httpwg.org/specs/rfc7234.html#freshening.responses, + // and assuming a single cached response per key: + // If the 304 response contains a strong validator (etag) that does not match the cached response, + // the cached response should not be updated. + const Http::HeaderEntry* response_etag = response_headers.get(Http::CustomHeaders::get().Etag); + const Http::HeaderEntry* cached_etag = + lookup_result_->headers_->get(Http::CustomHeaders::get().Etag); + return !response_etag || (cached_etag && cached_etag->value().getStringView() == + response_etag->value().getStringView()); +} + +void CacheFilter::injectValidationHeaders(Http::RequestHeaderMap& request_headers) { + ASSERT(lookup_result_, "injectValidationHeaders precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, + "injectValidationHeaders precondition unsatisfied: the " + "CacheFilter is not validating a cache lookup result"); + + const Http::HeaderEntry* etag_header = + lookup_result_->headers_->get(Http::CustomHeaders::get().Etag); + const Http::HeaderEntry* last_modified_header = + lookup_result_->headers_->get(Http::CustomHeaders::get().LastModified); + + if (etag_header) { + absl::string_view etag = etag_header->value().getStringView(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfNoneMatch, etag); + } + if (CacheHeadersUtils::httpTime(last_modified_header) != SystemTime()) { + // Valid Last-Modified header exists. + absl::string_view last_modified = last_modified_header->value().getStringView(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfModifiedSince, last_modified); + } else { + // Either Last-Modified is missing or invalid, fallback to Date. + // A correct behaviour according to: + // https://httpwg.org/specs/rfc7232.html#header.if-modified-since + absl::string_view date = lookup_result_->headers_->getDateValue(); + request_headers.setReferenceKey(Http::CustomHeaders::get().IfModifiedSince, date); + } +} + +void CacheFilter::encodeCachedResponse() { + ASSERT(lookup_result_, "encodeCachedResponse precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + + response_has_trailers_ = lookup_result_->has_trailers_; + const bool end_stream = (lookup_result_->content_length_ == 0 && !response_has_trailers_); + // TODO(toddmgreer): Calculate age per https://httpwg.org/specs/rfc7234.html#age.calculations + lookup_result_->headers_->addReferenceKey(Http::Headers::get().Age, 0); + + // Set appropriate response flags and codes. + Http::StreamFilterCallbacks* callbacks = + filter_state_ == FilterState::DecodeServingFromCache + ? static_cast(decoder_callbacks_) + : static_cast(encoder_callbacks_); + + callbacks->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::ResponseFromCacheFilter); + callbacks->streamInfo().setResponseCodeDetails( + CacheResponseCodeDetails::get().ResponseFromCacheFilter); + + // If the filter is encoding, 304 response headers and cached headers are merged in encodeHeaders. + // If the filter is decoding, we need to serve response headers from cache directly. + if (filter_state_ == FilterState::DecodeServingFromCache) { + decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), end_stream); + } + + if (lookup_result_->content_length_ > 0) { + remaining_body_.emplace_back(0, lookup_result_->content_length_); + getBody(); + } else if (response_has_trailers_) { + getTrailers(); + } +} + +void CacheFilter::finalizeEncodingCachedResponse() { + if (filter_state_ == FilterState::WaitingForCacheBody) { + // encodeHeaders returned StopIteration waiting for finishing encoding the cached response -- + // continue encoding. + encoder_callbacks_->continueEncoding(); + } + filter_state_ = FilterState::ResponseServedFromCache; +} + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 78072c92e3cd..f873569289e0 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -6,6 +6,7 @@ #include #include "envoy/extensions/filters/http/cache/v3alpha/cache.pb.h" +#include "envoy/http/header_map.h" #include "common/common/logger.h" @@ -38,19 +39,50 @@ class CacheFilter : public Http::PassThroughFilter, Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override; private: + // Utility functions: make any necessary checks and call the corresponding lookup_ functions. void getBody(); - void onHeaders(LookupResult&& result); + void getTrailers(); + + // Callbacks for HttpCache to call when headers/body/trailers are ready. + void onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers); void onBody(Buffer::InstancePtr&& body); void onTrailers(Http::ResponseTrailerMapPtr&& trailers); + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Serves a validated cached response after updating it with a 304 response. + void processSuccessfulValidation(Http::ResponseHeaderMap& response_headers); + + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Checks if a cached entry should be updated with a 304 response. + bool shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const; + + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // Should only be called during onHeaders as it modifies RequestHeaderMap. + // Adds required conditional headers for cache validation to the request headers + // according to the present cache lookup result headers. + void injectValidationHeaders(Http::RequestHeaderMap& request_headers); + + // Precondition: lookup_result_ points to a fresh or validated cache look up result. + // filter_state_ is ValidatingCachedResponse. + // Adds a cache lookup result to the response encoding stream. + // Can be called during decoding if a valid cache hit is found, + // or during encoding if a cache entry was validated successfully. + void encodeCachedResponse(); + + // Precondition: finished adding a response from cache to the response encoding stream. + // Updates filter_state_ and continues the encoding stream if necessary. + void finalizeEncodingCachedResponse(); + TimeSource& time_source_; HttpCache& cache_; LookupContextPtr lookup_; InsertContextPtr insert_; + LookupResultPtr lookup_result_; - // Tracks what body bytes still need to be read from the cache. This is - // currently only one Range, but will expand when full range support is added. Initialized by - // onOkHeaders. + // Tracks what body bytes still need to be read from the cache. This is currently only one Range, + // but will expand when full range support is added. Initialized by encodeCachedResponse. std::vector remaining_body_; // True if the response has trailers. @@ -61,9 +93,32 @@ class CacheFilter : public Http::PassThroughFilter, // https://httpwg.org/specs/rfc7234.html#response.cacheability bool request_allows_inserts_ = false; - // Used for coordinating between decodeHeaders and onHeaders. - enum class GetHeadersState { Initial, FinishedGetHeadersCall, GetHeadersResultUnusable }; - GetHeadersState state_ = GetHeadersState::Initial; + enum class FilterState { + Initial, + + // CacheFilter::decodeHeaders called lookup->getHeaders() but onHeaders was not called yet + // (lookup result not ready) -- the decoding stream should be stopped until the cache lookup + // result is ready. + WaitingForCacheLookup, + + // CacheFilter::encodeHeaders called encodeCachedResponse() but encoding the cached response is + // not finished yet -- the encoding stream should be stopped until it is finished. + WaitingForCacheBody, + + // Cache lookup did not find a cached response for this request. + NoCachedResponseFound, + + // Cache lookup found a cached response that requires validation. + ValidatingCachedResponse, + + // Cache lookup found a fresh cached response and it is being added to the encoding stream. + DecodeServingFromCache, + + // The cached response was successfully added to the encoding stream (either during decoding or + // encoding). + ResponseServedFromCache + }; + FilterState filter_state_ = FilterState::Initial; }; } // namespace Cache diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc index f33161cdf220..27d08bde0088 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.cc +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -15,19 +15,19 @@ namespace Extensions { namespace HttpFilters { namespace Cache { -// Utility functions used in RequestCacheControl & ResponseCacheControl +// Utility functions used in RequestCacheControl & ResponseCacheControl. namespace { // A directive with an invalid duration is ignored, the RFC does not specify a behavior: // https://httpwg.org/specs/rfc7234.html#delta-seconds OptionalDuration parseDuration(absl::string_view s) { OptionalDuration duration; - // Strip quotation marks if any + // Strip quotation marks if any. if (s.size() > 1 && s.front() == '"' && s.back() == '"') { s = s.substr(1, s.size() - 2); } long num; if (absl::SimpleAtoi(s, &num) && num >= 0) { - // s is a valid string of digits representing a positive number + // s is a valid string of digits representing a positive number. duration = std::chrono::seconds(num); } return duration; @@ -87,12 +87,12 @@ ResponseCacheControl::ResponseCacheControl(absl::string_view cache_control_heade std::tie(directive, argument) = separateDirectiveAndArgument(full_directive); if (directive == "no-cache") { - // If no-cache directive has arguments they are ignored - not handled + // If no-cache directive has arguments they are ignored - not handled. must_validate_ = true; } else if (directive == "must-revalidate" || directive == "proxy-revalidate") { no_stale_ = true; } else if (directive == "no-store" || directive == "private") { - // If private directive has arguments they are ignored - not handled + // If private directive has arguments they are ignored - not handled. no_store_ = true; } else if (directive == "no-transform") { no_transform_ = true; @@ -151,12 +151,12 @@ SystemTime CacheHeadersUtils::httpTime(const Http::HeaderEntry* header_entry) { absl::Time time; const std::string input(header_entry->value().getStringView()); - // Acceptable Date/Time Formats per + // Acceptable Date/Time Formats per: // https://tools.ietf.org/html/rfc7231#section-7.1.1.1 // - // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate - // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format - // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format + // Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate. + // Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format. + // Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format. static const char* rfc7231_date_formats[] = {"%a, %d %b %Y %H:%M:%S GMT", "%A, %d-%b-%y %H:%M:%S GMT", "%a %b %e %H:%M:%S %Y"}; @@ -178,7 +178,7 @@ absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::str } uint64_t new_val = (val * 10) + (cur - '0'); if (new_val / 8 < val) { - // Overflow occurred + // Overflow occurred. return absl::nullopt; } val = new_val; @@ -186,7 +186,7 @@ absl::optional CacheHeadersUtils::readAndRemoveLeadingDigits(absl::str } if (bytes_consumed) { - // Consume some digits + // Consume some digits. str.remove_prefix(bytes_consumed); return val; } diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc index fef63c201444..778fd574a09d 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.cc +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -1,5 +1,7 @@ #include "extensions/filters/http/cache/cacheability_utils.h" +#include "envoy/http/header_map.h" + #include "common/common/macros.h" #include "common/common/utility.h" @@ -14,10 +16,18 @@ const absl::flat_hash_set& cacheableStatusCodes() { // https://tools.ietf.org/html/rfc7231#section-6.1, // https://tools.ietf.org/html/rfc7538#section-3, // https://tools.ietf.org/html/rfc7725#section-3 - // TODO(yosrym93): the list of cacheable status codes should be configurable + // TODO(yosrym93): the list of cacheable status codes should be configurable. CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set, "200", "203", "204", "206", "300", "301", "308", "404", "405", "410", "414", "451", "501"); } + +const std::vector& conditionalHeaders() { + // As defined by: https://httpwg.org/specs/rfc7232.html#preconditions. + CONSTRUCT_ON_FIRST_USE( + std::vector, &Http::CustomHeaders::get().IfMatch, + &Http::CustomHeaders::get().IfNoneMatch, &Http::CustomHeaders::get().IfModifiedSince, + &Http::CustomHeaders::get().IfUnmodifiedSince, &Http::CustomHeaders::get().IfRange); +} } // namespace Http::RegisterCustomInlineHeader @@ -29,8 +39,20 @@ bool CacheabilityUtils::isCacheableRequest(const Http::RequestHeaderMap& headers const absl::string_view method = headers.getMethodValue(); const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); const Http::HeaderValues& header_values = Http::Headers::get(); + + // Check if the request contains any conditional headers. + // For now, requests with conditional headers bypass the CacheFilter. + // This behavior does not cause any incorrect results, but may reduce the cache effectiveness. + // If needed to be handled properly refer to: + // https://httpwg.org/specs/rfc7234.html#validation.received + for (auto conditional_header : conditionalHeaders()) { + if (headers.get(*conditional_header)) { + return false; + } + } + // TODO(toddmgreer): Also serve HEAD requests from cache. - // TODO(toddmgreer): Check all the other cache-related headers. + // Cache-related headers are checked in HttpCache::LookupRequest. return headers.Path() && headers.Host() && !headers.getInline(authorization_handle.handle()) && (method == header_values.MethodValues.Get) && (forwarded_proto == header_values.SchemeValues.Http || @@ -42,8 +64,8 @@ bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& heade ResponseCacheControl response_cache_control(cache_control); // Only cache responses with explicit validation data, either: - // max-age or s-maxage cache-control directives with date header - // expires header + // max-age or s-maxage cache-control directives with date header. + // expires header. const bool has_validation_data = (headers.Date() && response_cache_control.max_age_.has_value()) || headers.get(Http::Headers::get().Expires); diff --git a/source/extensions/filters/http/cache/cacheability_utils.h b/source/extensions/filters/http/cache/cacheability_utils.h index 97f88aebbfbd..752e4f3f1155 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.h +++ b/source/extensions/filters/http/cache/cacheability_utils.h @@ -11,17 +11,17 @@ namespace HttpFilters { namespace Cache { class CacheabilityUtils { public: - // Checks if a request can be served from cache, - // this does not depend on cache-control headers as + // Checks if a request can be served from cache. + // This does not depend on cache-control headers as // request cache-control headers only decide whether - // validation is required and whether the response can be cached + // validation is required and whether the response can be cached. static bool isCacheableRequest(const Http::RequestHeaderMap& headers); - // Checks if a response can be stored in cache + // Checks if a response can be stored in cache. // Note that if a request is not cacheable according to 'isCacheableRequest' - // then its response is also not cacheable + // then its response is also not cacheable. // Therefore, isCacheableRequest, isCacheableResponse and CacheFilter::request_allows_inserts_ - // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability + // together should cover https://httpwg.org/specs/rfc7234.html#response.cacheability. static bool isCacheableResponse(const Http::ResponseHeaderMap& headers); }; } // namespace Cache diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 75d81e2d31e1..60e73ff89549 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -71,7 +71,7 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst // TODO(toddmgreer): handle the resultant vector in CacheFilter::onOkHeaders. // Range Requests are only valid for GET requests if (request_headers.getMethodValue() == Http::Headers::get().MethodValues.Get) { - // TODO(cbdm): using a constant limit of 10 ranges, could make this into a parameter + // TODO(cbdm): using a constant limit of 10 ranges, could make this into a parameter. const int RangeSpecifierLimit = 10; request_range_spec_ = RangeRequests::parseRanges(request_headers, RangeSpecifierLimit); } @@ -95,16 +95,16 @@ void LookupRequest::initializeRequestCacheControl(const Http::RequestHeaderMap& if (!cache_control.empty()) { request_cache_control_ = RequestCacheControl(cache_control); } else { - // According to: https://httpwg.org/specs/rfc7234.html#header.pragma + // According to: https://httpwg.org/specs/rfc7234.html#header.pragma, // when Cache-Control header is missing, "Pragma:no-cache" is equivalent to - // "Cache-Control:no-cache" Any other directives are ignored + // "Cache-Control:no-cache". Any other directives are ignored. request_cache_control_.must_validate_ = RequestCacheControl(pragma).must_validate_; } } bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_headers) const { // TODO(yosrym93): Store parsed response cache-control in cache instead of parsing it on every - // lookup + // lookup. const absl::string_view cache_control = response_headers.getInlineValue(response_cache_control_handle.handle()); const ResponseCacheControl response_cache_control(cache_control); @@ -112,7 +112,7 @@ bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_h const SystemTime response_time = CacheHeadersUtils::httpTime(response_headers.Date()); if (timestamp_ < response_time) { - // Response time is in the future, validate response + // Response time is in the future, validate response. return true; } @@ -121,14 +121,14 @@ bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_h request_cache_control_.max_age_.value() < response_age; if (response_cache_control.must_validate_ || request_cache_control_.must_validate_ || request_max_age_exceeded) { - // Either the request or response explicitly require validation or a request max-age requirement - // is not satisfied + // Either the request or response explicitly require validation, or a request max-age + // requirement is not satisfied. return true; } - // CacheabilityUtils::isCacheableResponse(..) guarantees that any cached response satisfies this + // CacheabilityUtils::isCacheableResponse(..) guarantees that any cached response satisfies this. // When date metadata injection for responses with no date - // is implemented, this ASSERT will need to be updated + // is implemented, this ASSERT will need to be updated. ASSERT((response_headers.Date() && response_cache_control.max_age_.has_value()) || response_headers.get(Http::Headers::get().Expires), "Cache entry does not have valid expiration data."); @@ -139,15 +139,15 @@ bool LookupRequest::requiresValidation(const Http::ResponseHeaderMap& response_h : CacheHeadersUtils::httpTime(response_headers.get(Http::Headers::get().Expires)); if (timestamp_ > expiration_time) { - // Response is stale, requires validation - // if the response does not allow being served stale - // or the request max-stale directive does not allow it + // Response is stale, requires validation if + // the response does not allow being served stale, + // or the request max-stale directive does not allow it. const bool allowed_by_max_stale = request_cache_control_.max_stale_.has_value() && request_cache_control_.max_stale_.value() > timestamp_ - expiration_time; return response_cache_control.no_stale_ || !allowed_by_max_stale; } else { - // Response is fresh, requires validation only if there is an unsatisfied min-fresh requirement + // Response is fresh, requires validation only if there is an unsatisfied min-fresh requirement. const bool min_fresh_unsatisfied = request_cache_control_.min_fresh_.has_value() && request_cache_control_.min_fresh_.value() > expiration_time - timestamp_; @@ -190,7 +190,7 @@ bool adjustByteRangeSet(std::vector& response_ranges, for (const RawByteRange& spec : request_range_spec) { if (spec.isSuffix()) { - // spec is a suffix-byte-range-spec + // spec is a suffix-byte-range-spec. if (spec.suffixLength() == 0) { // This range is unsatisfiable, so skip it. continue; diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index 760dfa4f835f..907a8d02be96 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -151,6 +151,7 @@ struct LookupResult { // True if the cached response has trailers. bool has_trailers_ = false; }; +using LookupResultPtr = std::unique_ptr; // Produces a hash of key that is consistent across restarts, architectures, // builds, and configurations. Caches that store persistent entries based on a @@ -305,8 +306,8 @@ class HttpCache { // // This is called when an expired cache entry is successfully validated, to // update the cache entry. - virtual void updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) PURE; + virtual void updateHeaders(const LookupContext& lookup_context, + const Http::ResponseHeaderMap& response_headers) PURE; // Returns statically known information about a cache. virtual CacheInfo cacheInfo() const PURE; diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index 5eadaa6a3692..ab2707c450c0 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -92,12 +92,10 @@ LookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request) { return std::make_unique(*this, std::move(request)); } -void SimpleHttpCache::updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) { - ASSERT(lookup_context); - ASSERT(response_headers); +void SimpleHttpCache::updateHeaders(const LookupContext&, const Http::ResponseHeaderMap&) { // TODO(toddmgreer): Support updating headers. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + // Not implemented yet, however this is called during tests + // NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } SimpleHttpCache::Entry SimpleHttpCache::lookup(const LookupRequest& request) { diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index 453deda6fed1..0223d6bd34b9 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -25,8 +25,8 @@ class SimpleHttpCache : public HttpCache { // HttpCache LookupContextPtr makeLookupContext(LookupRequest&& request) override; InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context) override; - void updateHeaders(LookupContextPtr&& lookup_context, - Http::ResponseHeaderMapPtr&& response_headers) override; + void updateHeaders(const LookupContext& lookup_context, + const Http::ResponseHeaderMap& response_headers) override; CacheInfo cacheInfo() const override; Entry lookup(const LookupRequest& request); diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 21264d1d3f20..d4113c78c7a8 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -89,7 +89,153 @@ TEST_P(CacheIntegrationTest, MissInsertHit) { fmt::format("RFCF cache.response_from_cache_filter{}", TestEnvironment::newLine)); } -// Send the same GET request twice with body and trailers twice, then check that the response +TEST_P(CacheIntegrationTest, SuccessfulValidation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + // Set system time to cause Envoy's cached formatted time to match time on this thread. + simTime().setSystemTime(std::chrono::hours(1)); + initializeFilter(default_config); + + // Include test name and params in URL to make each test's requests unique. + const Http::TestRequestHeaderMapImpl request_headers = { + {":method", "GET"}, + {":path", absl::StrCat("/", protocolTestParamsToString({GetParam(), 0}))}, + {":scheme", "http"}, + {":authority", "SuccessfulValidation"}}; + + const std::string original_response_date = formatter_.now(simTime()); + Http::TestResponseHeaderMapImpl response_headers = {{":status", "200"}, + {"date", original_response_date}, + {"cache-control", "max-age=0"}, + {"content-length", "42"}, + {"etag", "abc123"}}; + + // Send first request, and get response from upstream. + { + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(response_headers, /*end_stream=*/false); + // send 42 'a's + upstream_request_->encodeData(42, true); + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_EQ(response_decoder->body(), std::string(42, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); + } + + simTime().advanceTimeWait(std::chrono::seconds(10)); + const std::string not_modified_date = formatter_.now(simTime()); + + // Send second request, the cached response should be validated then served. + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + // Check for injected conditional headers -- no "Last-Modified" header so should fallback to + // "Date". + Http::TestRequestHeaderMapImpl injected_headers = {{"if-none-match", "abc123"}, + {"if-modified-since", original_response_date}}; + EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers)); + + // Create a 304 (not modified) response -> cached response is valid. + Http::TestResponseHeaderMapImpl not_modified_response_headers = {{":status", "304"}, + {"date", not_modified_date}}; + upstream_request_->encodeHeaders(not_modified_response_headers, /*end_stream=*/true); + + // The original response headers should be updated with 304 response headers. + response_headers.setDate(not_modified_date); + + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + + // Check that the served response is the cached response. + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(response_headers)); + EXPECT_EQ(response_decoder->body(), std::string(42, 'a')); + // Check that age header exists as this is a cached response. + EXPECT_NE(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "RFCF cache.response_from_cache_filter\n"); +} + +TEST_P(CacheIntegrationTest, UnsuccessfulValidation) { + useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS%"); + // Set system time to cause Envoy's cached formatted time to match time on this thread. + simTime().setSystemTime(std::chrono::hours(1)); + initializeFilter(default_config); + + // Include test name and params in URL to make each test's requests unique. + const Http::TestRequestHeaderMapImpl request_headers = { + {":method", "GET"}, + {":path", absl::StrCat("/", protocolTestParamsToString({GetParam(), 0}))}, + {":scheme", "http"}, + {":authority", "UnsuccessfulValidation"}}; + + Http::TestResponseHeaderMapImpl original_response_headers = {{":status", "200"}, + {"date", formatter_.now(simTime())}, + {"cache-control", "max-age=0"}, + {"content-length", "10"}, + {"etag", "a1"}}; + + // Send first request, and get response from upstream. + { + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(original_response_headers, /*end_stream=*/false); + // send 10 'a's + upstream_request_->encodeData(10, true); + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(original_response_headers)); + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + EXPECT_EQ(response_decoder->body(), std::string(10, 'a')); + EXPECT_EQ(waitForAccessLog(access_log_name_), "- via_upstream\n"); + } + + simTime().advanceTimeWait(std::chrono::seconds(10)); + // Any response with status other than 304 should be passed to the client as-is. + Http::TestResponseHeaderMapImpl updated_response_headers = {{":status", "200"}, + {"date", formatter_.now(simTime())}, + {"cache-control", "max-age=0"}, + {"content-length", "20"}, + {"etag", "a2"}}; + + // Send second request, validation of the cached response should be attempted but should fail. + IntegrationStreamDecoderPtr response_decoder = + codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + // Check for injected precondition headers. + Http::TestRequestHeaderMapImpl injected_headers = {{"if-none-match", "a1"}}; + EXPECT_THAT(upstream_request_->headers(), IsSupersetOfHeaders(injected_headers)); + + // Reply with the updated response -> cached response is invalid. + upstream_request_->encodeHeaders(updated_response_headers, /*end_stream=*/false); + // send 20 'a's + upstream_request_->encodeData(20, true); + + // Wait for the response to be read by the codec client. + response_decoder->waitForEndStream(); + // Check that the served response is the updated response. + EXPECT_TRUE(response_decoder->complete()); + EXPECT_THAT(response_decoder->headers(), IsSupersetOfHeaders(updated_response_headers)); + EXPECT_EQ(response_decoder->body(), std::string(20, 'a')); + // Check that age header does not exist as this is not a cached response. + EXPECT_EQ(response_decoder->headers().get(Http::Headers::get().Age), nullptr); + + // Advance time to force a log flush. + simTime().advanceTimeWait(std::chrono::seconds(1)); + EXPECT_EQ(waitForAccessLog(access_log_name_, 1), "- via_upstream\n"); +} + +// Send the same GET request with body and trailers twice, then check that the response // doesn't have an age header, to confirm that it wasn't served from cache. TEST_P(CacheIntegrationTest, GetRequestWithBodyAndTrailers) { // Set system time to cause Envoy's cached formatted time to match time on this thread. diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 67bcd808f7ca..0ab4034cc799 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -79,21 +79,21 @@ TEST_F(CacheFilterTest, ImmediateHitNoBody) { ON_CALL(context_.dispatcher_, post(_)).WillByDefault(::testing::InvokeArgument<0>()); { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::Continue); - // Encode response header + // Encode response header. EXPECT_EQ(filter.encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 2 header + // Decode request 2 header. EXPECT_CALL(decoder_callbacks_, encodeHeaders_(testing::AllOf(IsSupersetOfHeaders(response_headers_), HeaderHasValueRef("age", "0")), @@ -111,25 +111,25 @@ TEST_F(CacheFilterTest, DelayedHitNoBody) { ON_CALL(context_.dispatcher_, post(_)).WillByDefault(::testing::InvokeArgument<0>()); { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(delayed_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); EXPECT_CALL(decoder_callbacks_, continueDecoding); delayed_cache_.delayed_cb_(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); - // Encode response header + // Encode response header. EXPECT_EQ(filter.encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(delayed_cache_); - // Decode request 2 header + // Decode request 2 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); EXPECT_CALL(decoder_callbacks_, @@ -149,13 +149,13 @@ TEST_F(CacheFilterTest, ImmediateHitBody) { const std::string body = "abc"; { - // Create filter for request 1 + // Create filter for request 1. CacheFilter filter = makeFilter(simple_cache_); - // Decode request 1 header + // Decode request 1 header. EXPECT_EQ(filter.decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::Continue); - // Encode response header + // Encode response header. Buffer::OwnedImpl buffer(body); response_headers_.setContentLength(body.size()); EXPECT_EQ(filter.encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); @@ -163,7 +163,7 @@ TEST_F(CacheFilterTest, ImmediateHitBody) { filter.onDestroy(); } { - // Create filter for request 2 + // Create filter for request 2. CacheFilter filter = makeFilter(simple_cache_); // Decode request 2 header diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index a8873346de7d..dd3f0a78e52b 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -268,15 +268,15 @@ class ResponseCacheControlTest : public testing::TestWithParam { public: static const std::vector& getOkTestCases() { // clang-format off CONSTRUCT_ON_FIRST_USE(std::vector, - "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate - "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format - "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format + "Sun, 06 Nov 1994 08:49:37 GMT", // IMF-fixdate. + "Sunday, 06-Nov-94 08:49:37 GMT", // obsolete RFC 850 format. + "Sun Nov 6 08:49:37 1994" // ANSI C's asctime() format. ); // clang-format on } diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index cdd049f5884f..f4647e8bfc3f 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -1,3 +1,5 @@ +#include "envoy/http/header_map.h" + #include "extensions/filters/http/cache/cacheability_utils.h" #include "test/test_common/utility.h" @@ -10,7 +12,7 @@ namespace HttpFilters { namespace Cache { namespace { -class IsCacheableRequestTest : public testing::Test { +class IsCacheableRequestTest : public testing::TestWithParam { protected: const Http::TestRequestHeaderMapImpl cacheable_request_headers_ = {{":path", "/"}, {":method", "GET"}, @@ -74,6 +76,17 @@ TEST_F(IsCacheableRequestTest, AuthorizationHeader) { EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); } +INSTANTIATE_TEST_SUITE_P(ConditionalHeaders, IsCacheableRequestTest, + testing::Values("if-match", "if-none-match", "if-modified-since", + "if-unmodified-since", "if-range")); + +TEST_P(IsCacheableRequestTest, ConditionalHeaders) { + Http::TestRequestHeaderMapImpl request_headers = cacheable_request_headers_; + EXPECT_TRUE(CacheabilityUtils::isCacheableRequest(request_headers)); + request_headers.setCopy(Http::LowerCaseString{GetParam()}, "test-value"); + EXPECT_FALSE(CacheabilityUtils::isCacheableRequest(request_headers)); +} + TEST_F(IsCacheableResponseTest, CacheableResponse) { EXPECT_TRUE(CacheabilityUtils::isCacheableResponse(cacheable_response_headers_)); } diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index cf6ada24cac1..fac7c099f73d 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -159,7 +159,7 @@ TEST_F(LookupRequestTest, NotExpiredViaFallbackheader) { } // If request Cache-Control header is missing, -// "Pragma:no-cache" is equivalent to "Cache-Control:no-cache" +// "Pragma:no-cache" is equivalent to "Cache-Control:no-cache". // https://httpwg.org/specs/rfc7234.html#header.pragma TEST_F(LookupRequestTest, PragmaNoCacheFallback) { request_headers_.addCopy("pragma", "no-cache"); @@ -167,7 +167,7 @@ TEST_F(LookupRequestTest, PragmaNoCacheFallback) { const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - // Response is not expired but the request requires revalidation through Pragma: no-cache + // Response is not expired but the request requires revalidation through Pragma: no-cache. EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); } @@ -177,7 +177,7 @@ TEST_F(LookupRequestTest, PragmaNoCacheFallbackExtraDirectivesIgnored) { const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - // Response is not expired but the request requires revalidation through Pragma: no-cache + // Response is not expired but the request requires revalidation through Pragma: no-cache. EXPECT_EQ(CacheEntryStatus::RequiresValidation, lookup_response.cache_entry_status_); } @@ -187,7 +187,7 @@ TEST_F(LookupRequestTest, PragmaFallbackOtherValuesIgnored) { const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - // Response is fresh, Pragma header with values other than "no-cache" is ignored + // Response is fresh, Pragma header with values other than "no-cache" is ignored. EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); } @@ -198,7 +198,7 @@ TEST_F(LookupRequestTest, PragmaNoFallback) { const Http::TestResponseHeaderMapImpl response_headers( {{"date", formatter_.fromTime(current_time_)}, {"cache-control", "public, max-age=3600"}}); const LookupResult lookup_response = makeLookupResult(lookup_request, response_headers); - // Pragma header is ignored when Cache-Control header is present + // Pragma header is ignored when Cache-Control header is present. EXPECT_EQ(CacheEntryStatus::Ok, lookup_response.cache_entry_status_); } From fdab03e5c269d02b4d706e07b04287392c86caa2 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Fri, 7 Aug 2020 09:24:28 -0700 Subject: [PATCH 880/909] repokitteh: implement retest azp (#12402) Signed-off-by: Lizan Zhou --- ci/repokitteh/modules/azure_pipelines.star | 49 ++++++++++++++++++++++ repokitteh.star | 4 +- 2 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 ci/repokitteh/modules/azure_pipelines.star diff --git a/ci/repokitteh/modules/azure_pipelines.star b/ci/repokitteh/modules/azure_pipelines.star new file mode 100644 index 000000000000..dc619e06d226 --- /dev/null +++ b/ci/repokitteh/modules/azure_pipelines.star @@ -0,0 +1,49 @@ +load("github.com/repokitteh/modules/lib/utils.star", "react") + +_azp_context_prefix = "ci/azp: " + +def _retry_azp(organization, project, build_id, token): + """Makes an Azure Pipelines Build API request with retry""" + + url = "https://dev.azure.com/{organization}/{project}/_apis/build/builds/{buildId}?retry=true&api-version=5.1".format(organization = organization, project = project, buildId = build_id) + return http(url = url, method = "PATCH", headers = { + "authorization": "Basic " + token, + "content-type": "application/json;odata=verbose", + }) + +def _get_azp_checks(): + github_checks = github.check_list_runs()["check_runs"] + + check_ids = [] + checks = [] + for check in github_checks: + if check["app"]["slug"] == "azure-pipelines" and check["external_id"] not in check_ids: + check_ids.append(check["external_id"]) + checks.append(check) + + return checks + +def _retry(config, comment_id, command): + msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n" + checks = _get_azp_checks() + + retried_checks = [] + for check in checks: + name_with_link = "[{}]({})".format(check["name"], check["details_url"]) + if check["status"] != "completed": + msgs += "Cannot retry non-completed check: {}, please wait.\n".format(name_with_link) + elif check["conclusion"] != "failure": + msgs += "Check {} didn't fail.\n".format(name_with_link) + else: + _, build_id, project = check["external_id"].split("|") + _retry_azp("cncf", project, build_id, config["token"]) + retried_checks.append(name_with_link) + + if len(retried_checks) == 0: + react(comment_id, msgs) + else: + react(comment_id, None) + msgs += "Retried failed jobs in: {}".format(", ".join(retried_checks)) + github.issue_create_comment(msgs) + +handlers.command(name = "retry-azp", func = _retry) diff --git a/repokitteh.star b/repokitteh.star index e902d9eae2ea..cf2385c1dfde 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -4,6 +4,7 @@ use("github.com/repokitteh/modules/assign.star") use("github.com/repokitteh/modules/review.star") use("github.com/repokitteh/modules/wait.star") use("github.com/repokitteh/modules/circleci.star", secret_token=get_secret('circle_token')) +use("github.com/envoyproxy/envoy/ci/repokitteh/modules/azure_pipelines.star", secret_token=get_secret('azp_token')) use( "github.com/envoyproxy/envoy/ci/repokitteh/modules/ownerscheck.star", paths=[ @@ -28,7 +29,8 @@ use( ], ) -alias('retest', 'retry-circle') +alias('retest-circle', 'retry-circle') +alias('retest', 'retry-azp') def _backport(): github.issue_label('backport/review') From 6d9e2ed1f390aec4788068811a1fc30be3944e8c Mon Sep 17 00:00:00 2001 From: danzh Date: Fri, 7 Aug 2020 12:31:42 -0400 Subject: [PATCH 881/909] quiche: update tar (#12525) Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 19 ++ bazel/repository_locations.bzl | 6 +- .../quiche/envoy_quic_dispatcher.cc | 5 +- .../quiche/envoy_quic_dispatcher.h | 1 + .../quiche/platform/flags_list.h | 182 ++++++++++-------- test/extensions/quic_listeners/quiche/BUILD | 1 + .../quiche/active_quic_listener_test.cc | 2 +- .../quiche/envoy_quic_client_session_test.cc | 2 +- .../quiche/envoy_quic_client_stream_test.cc | 2 +- .../quiche/envoy_quic_dispatcher_test.cc | 2 +- .../quiche/envoy_quic_server_session_test.cc | 2 +- .../quiche/envoy_quic_server_stream_test.cc | 3 +- .../integration/quic_http_integration_test.cc | 2 +- 13 files changed, 139 insertions(+), 90 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 2ec3f85a4e67..50f9f8443c21 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -3731,6 +3731,25 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "quic_test_tools_session_peer_lib", + srcs = [ + "quiche/quic/test_tools/quic_session_peer.cc", + ], + hdrs = [ + "quiche/quic/test_tools/quic_session_peer.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_packets_lib", + ":quic_core_session_lib", + ":quic_core_utils_lib", + ":quic_platform", + ], +) + envoy_cc_test_library( name = "quic_test_tools_unacked_packet_map_peer_lib", srcs = ["quiche/quic/test_tools/quic_unacked_packet_map_peer.cc"], diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 145da07e7f25..14c8b6d625be 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -410,9 +410,9 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_googlesource_quiche = dict( - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz - sha256 = "792924bbf27203bb0d1d08c99597a30793ef8f4cfa2df99792aea7200f1b27e3", - urls = ["https://storage.googleapis.com/quiche-envoy-integration/b2b8ff25f5a565324b93411ca29c3403ccbca969.tar.gz"], + # Static snapshot of https://quiche.googlesource.com/quiche/+archive/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz + sha256 = "d7129a2f41f2bd00a8a38b33f9b7b955d3e7de3dec20f69b70d7000d3a856360", + urls = ["https://storage.googleapis.com/quiche-envoy-integration/96bd860bec207d4b722ab7f319fa47be129a85cd.tar.gz"], use_category = ["dataplane"], cpe = "N/A", ), diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc index 08564b722580..ba8f7f3a8239 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.cc @@ -48,8 +48,9 @@ void EnvoyQuicDispatcher::OnConnectionClosed(quic::QuicConnectionId connection_i } std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( - quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& peer_address, - quiche::QuicheStringPiece /*alpn*/, const quic::ParsedQuicVersion& version) { + quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& /*self_address*/, + const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece /*alpn*/, + const quic::ParsedQuicVersion& version) { auto quic_connection = std::make_unique( server_connection_id, peer_address, *helper(), *alarm_factory(), writer(), /*owns_writer=*/false, quic::ParsedQuicVersionVector{version}, listen_socket_); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h index ede0c5b42625..5921342b84bf 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_dispatcher.h @@ -59,6 +59,7 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { protected: std::unique_ptr CreateQuicSession(quic::QuicConnectionId server_connection_id, + const quic::QuicSocketAddress& self_address, const quic::QuicSocketAddress& peer_address, quiche::QuicheStringPiece alpn, const quic::ParsedQuicVersion& version) override; diff --git a/source/extensions/quic_listeners/quiche/platform/flags_list.h b/source/extensions/quic_listeners/quiche/platform/flags_list.h index 776521f42d0d..587e80054c0a 100644 --- a/source/extensions/quic_listeners/quiche/platform/flags_list.h +++ b/source/extensions/quic_listeners/quiche/platform/flags_list.h @@ -17,6 +17,10 @@ QUICHE_FLAG( bool, http2_reloadable_flag_http2_backend_alpn_failure_error_code, false, "If true, the GFE will return a new ResponseCodeDetails error when ALPN to the backend fails.") +QUICHE_FLAG(bool, http2_reloadable_flag_http2_ip_based_cwnd_exp, false, + "If true, enable IP address based CWND bootstrapping experiment with different " + "bandwidth models and priorities in HTTP2.") + QUICHE_FLAG(bool, http2_reloadable_flag_http2_security_requirement_for_client3, false, "If true, check whether client meets security requirements during SSL handshake. If " "flag is true and client does not meet security requirements, do not negotiate HTTP/2 " @@ -31,10 +35,18 @@ QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_debugips, fa QUICHE_FLAG(bool, quic_reloadable_flag_advertise_quic_for_https_for_external_users, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_gclb_quic_allow_alia, true, + "If gfe2_reloadable_flag_gclb_use_alia is also true, use Alia for GCLB QUIC " + "handshakes. To be used as a big red button if there's a problem with Alia/QUIC.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_ack_delay_alarm_granularity, false, "When true, ensure the ACK delay is never less than the alarm granularity when ACK " "decimation is enabled.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_add_silent_idle_timeout, false, + "If true, when server is silently closing connections due to idle timeout, serialize " + "the connection close packets which will be added to time wait list.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_backend_set_stream_ttl, false, "If true, check backend response header for X-Response-Ttl. If it is provided, the " "stream TTL is set. A QUIC stream will be immediately canceled when tries to write " @@ -46,17 +58,6 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_allow_client_enabled_bbr_v2, true, QUICHE_FLAG(bool, quic_reloadable_flag_quic_alpn_dispatch, false, "Support different QUIC sessions, as indicated by ALPN. Used for QBONE.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_always_send_earliest_ack, false, - "If true, SendAllPendingAcks always send the earliest ACK.") - -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_avoid_leak_writer_buffer, true, - "If true, QUIC will free writer-allocated packet buffer if writer->WritePacket is not called.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_add_ack_height_to_queueing_threshold, true, - "If true, QUIC BBRv2 to take ack height into account when calculating " - "queuing_threshold in PROBE_UP.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_avoid_too_low_probe_bw_cwnd, false, "If true, QUIC BBRv2's PROBE_BW mode will not reduce cwnd below BDP+ack_height.") @@ -67,31 +68,36 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_fewer_startup_round_trips, fals QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_ignore_inflight_lo, false, "When true, QUIC's BBRv2 ignores inflight_lo in PROBE_BW.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr2_improve_adjust_network_parameters, false, + "If true, improve Bbr2Sender::AdjustNetworkParameters by 1) do not inject a bandwidth " + "sample to the bandwidth filter, and 2) re-calculate pacing rate after cwnd updated..") + QUICHE_FLAG( bool, quic_reloadable_flag_quic_bbr2_limit_inflight_hi, false, "When true, the B2HI connection option limits reduction of inflight_hi to (1-Beta)*CWND.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_donot_inject_bandwidth, true, - "If true, do not inject bandwidth in BbrSender::AdjustNetworkParameters.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_fix_pacing_rate, true, - "If true, re-calculate pacing rate when cwnd gets bootstrapped.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_flexible_app_limited, false, "When true and the BBR9 connection option is present, BBR only considers bandwidth " "samples app-limited if they're not filling the pipe.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_mitigate_overly_large_bandwidth_sample, true, - "If true, when cwnd gets bootstrapped and causing badly overshoot, reset cwnd and " - "pacing rate based on measured bw.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_bbr_no_bytes_acked_in_startup_recovery, false, "When in STARTUP and recovery, do not add bytes_acked to QUIC BBR's CWND in " "CalculateCongestionWindow()") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_gfe_bandwidth, false, + "If true, bootstrap initial QUIC cwnd by GFE measured bandwidth models.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, "If true, bootstrap initial QUIC cwnd by SPDY priorities.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_check_encryption_level_in_fast_path, false, + "If true, when data is sending in fast path mode in the creator, making sure stream " + "data is sent in the right encryption level.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_coalesced_packet_of_higher_space2, false, + "If true, try to coalesce packet of higher space with retransmissions to mitigate RTT " + "inflations.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_conservative_bursts, false, "If true, set burst token to 2 in cwnd bootstrapping experiment.") @@ -114,24 +120,24 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_default_to_bbr_v2, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_determine_serialized_packet_fate_early, false, "If true, determine a serialized packet's fate before the packet gets serialized.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_25, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_server_blackhole_detection, false, + "If true, disable blackhole detection on server side.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_25, true, "If true, disable QUIC version h3-25.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_27, false, "If true, disable QUIC version h3-27.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_draft_29, false, + "If true, disable QUIC version h3-29.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q043, false, "If true, disable QUIC version Q043.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q046, false, "If true, disable QUIC version Q046.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q048, true, - "If true, disable QUIC version Q048.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q049, true, - "If true, disable QUIC version Q049.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_disable_version_q050, false, "If true, disable QUIC version Q050.") @@ -150,66 +156,66 @@ QUICHE_FLAG( bool, quic_reloadable_flag_quic_do_not_close_stream_again_on_connection_close, false, "If true, do not try to close stream again if stream fails to be closed upon connection close.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_do_not_use_stream_map, false, + "If true, QUIC subclasses will no longer directly access stream_map for its content.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_donot_reset_ideal_next_packet_send_time, false, "If true, stop resetting ideal_next_packet_send_time_ in pacing sender.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_pad_chlo, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_pad_chlo, true, "When true, do not pad the QUIC_CRYPTO CHLO message itself. Note that the packet " "containing the CHLO will still be padded.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_send_max_ack_delay_if_default, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_dont_send_max_ack_delay_if_default, true, "When true, QUIC_CRYPTO versions of QUIC will not send the max ACK delay unless it is " "configured to a non-default value.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_ack_decimation, true, - "Default enables QUIC ack decimation and adds a connection option to disable it.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_experiment_at_gfe, false, "If ture, enable GFE-picked loss detection experiment.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_loss_detection_tuner, false, "If true, allow QUIC loss detection tuning to be enabled by connection option ELDT.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_tls_resumption_v2, false, - "If true, enables support for TLS resumption in QUIC.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_draft_29, false, - "If true, enable QUIC version h3-29.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_overshooting_detection, false, + "If true, enable overshooting detection when the DTOS connection option is supplied.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_zero_rtt_for_tls, false, - "If true, support for IETF QUIC 0-rtt is enabled.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_enable_version_t051, false, + "If true, enable QUIC version h3-T051.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_enabled, false, "") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_bbr_cwnd_in_bandwidth_resumption, true, - "If true, adjust congestion window when doing bandwidth resumption in BBR.") - QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_extra_padding_bytes, false, "If true, consider frame expansion when calculating extra padding bytes to meet " "minimum plaintext packet size required for header protection.") -QUICHE_FLAG( - bool, quic_reloadable_flag_quic_fix_gquic_stream_type, false, - "If true, do not use QuicUtil::IsBidirectionalStreamId() to determine gQUIC stream type.") - -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_min_crypto_frame_size, true, - "If true, include MinPlaintextPacketSize when deterine whether removing soft limit for " - "crypto frames.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_neuter_handshake_data, false, + "If true, fix a case where data is marked lost in HANDSHAKE level but HANDSHAKE key " + "gets decrypted later.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_packet_number_length, false, "If true, take the largest acked packet into account when computing the sent packet " "number length.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_pto_timeout, true, - "If true, use 0 as ack_delay when calculate PTO timeout for INITIAL and HANDSHAKE " - "packet number space.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_print_draft_version, false, + "When true, ParsedQuicVersionToString will print IETF drafts with format draft29 " + "instead of ff00001d.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_server_pto_timeout, true, - "If true, do not arm PTO on half RTT packets if they are the only ones in flight.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_get_stream_information_from_stream_map, false, + "If true, gQUIC will only consult stream_map in QuicSession::GetNumActiveStreams().") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_fix_undecryptable_packets, false, - "If true, remove the head of line blocking caused by an unprocessable packet in the " - "undecryptable packets list.") +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_http3_goaway_new_behavior, false, + "If true, server accepts GOAWAY (draft-28 behavior), client receiving GOAWAY with stream ID " + "that is not client-initiated bidirectional stream ID closes connection with H3_ID_ERROR " + "(draft-28 behavior). Also, receiving a GOAWAY with ID larger than previously received closes " + "connection with H3_ID_ERROR. If false, server receiving GOAWAY closes connection with " + "H3_FRAME_UNEXPECTED (draft-27 behavior), client receiving GOAWAY with stream ID that is not " + "client-initiated bidirectional stream ID closes connection with PROTOCOL_VIOLATION (draft-04 " + "behavior), larger ID than previously received does not trigger connection close.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_ip_based_cwnd_exp, false, + "If true, enable IP address based CWND bootstrapping experiment with different " + "bandwidth models and priorities. ") QUICHE_FLAG(bool, quic_reloadable_flag_quic_listener_never_fake_epollout, false, "If true, QuicListener::OnSocketIsWritable will always return false, which means there " @@ -230,15 +236,43 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_frontend_service_vip_mapping, "If true, for L1 GFE, as requests come in, record frontend service to VIP mapping " "which is used to announce VIP in SHLO for proxied sessions. ") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_record_received_min_ack_delay, false, + "If true, record the received min_ack_delay in transport parameters to QUIC config.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_reject_all_traffic, false, "") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_streams_waiting_for_acks, false, + "If true, QuicSession will no longer need streams_waiting_for_acks_.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_unused_ack_options, false, + "Remove ACK_DECIMATION_WITH_REORDERING mode and fast_ack_after_quiescence option in " + "QUIC received packet manager.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_remove_zombie_streams, false, + "If true, QuicSession doesn't keep a separate zombie_streams. Instead, all streams are " + "stored in stream_map_.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_require_handshake_confirmation, false, "If true, require handshake confirmation for QUIC connections, functionally disabling " "0-rtt handshakes.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_save_user_agent_in_quic_session, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_retransmit_handshake_data_early, false, + "If true, retransmit unacked handshake data before PTO expiry.") + +QUICHE_FLAG( + bool, quic_reloadable_flag_quic_revert_mtu_after_two_ptos, false, + "If true, QUIC connection will revert to a previously validated MTU(if exists) after two PTOs.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_save_user_agent_in_quic_session, true, "If true, save user agent into in QuicSession.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_early_data_header_to_backend, false, + "If true, for 0RTT IETF QUIC requests, GFE will append a Early-Data header and send it " + "to backend.") + +QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_path_response, false, + "If true, send PATH_RESPONSE upon receiving PATH_CHALLENGE regardless of perspective.") + QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, "When the STMP connection option is sent by the client, timestamps in the QUIC ACK " "frame are sent and processed.") @@ -246,10 +280,10 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_send_timestamps, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_server_push, true, "If true, enable server push feature on QUIC.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_stop_sending_duplicate_max_streams, false, - "If true, session does not send duplicate MAX_STREAMS.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_simplify_received_packet_manager_ack, false, + "Simplify the ACK code in quic_received_packet_manager.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_support_handshake_done_in_t050, false, +QUICHE_FLAG(bool, quic_reloadable_flag_quic_support_handshake_done_in_t050, true, "If true, support HANDSHAKE_DONE frame in T050.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_testonly_default_false, false, @@ -265,9 +299,8 @@ QUICHE_FLAG(bool, quic_reloadable_flag_quic_unified_iw_options, false, QUICHE_FLAG(bool, quic_reloadable_flag_quic_update_packet_size, false, "If true, update packet size when the first frame gets queued.") -QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_dispatcher_clock_for_read_timestamp, true, - "If true, in QuicListener, use QuicDispatcher's clock as the source for packet read " - "timestamps.") +QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_half_rtt_as_first_pto, false, + "If true, when TLPR copt is used, enable half RTT as first PTO timeout.") QUICHE_FLAG(bool, quic_reloadable_flag_quic_use_header_stage_idle_list2, false, "If true, use header stage idle list for QUIC connections in GFE.") @@ -294,19 +327,16 @@ QUICHE_FLAG( bool, quic_restart_flag_quic_allow_loas_multipacket_chlo, false, "If true, inspects QUIC CHLOs for kLOAS and early creates sessions to allow multi-packet CHLOs") -QUICHE_FLAG(bool, quic_restart_flag_quic_dispatcher_track_top_1k_client_ip, true, - "If true, GfeQuicDispatcher will track the top 1000 client IPs.") +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_tls_resumption_v4, false, + "If true, enables support for TLS resumption in QUIC.") + +QUICHE_FLAG(bool, quic_restart_flag_quic_enable_zero_rtt_for_tls_v2, false, + "If true, support for IETF QUIC 0-rtt is enabled.") -QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, false, +QUICHE_FLAG(bool, quic_restart_flag_quic_google_transport_param_omit_old, true, "When true, QUIC+TLS will not send nor parse the old-format Google-specific transport " "parameters.") -QUICHE_FLAG(bool, quic_restart_flag_quic_ignore_cid_first_byte_in_rx_ring_bpf, true, - "If true, ignore CID first byte in BPF for RX_RING.") - -QUICHE_FLAG(bool, quic_restart_flag_quic_memslice_ensure_ownership, true, - "Call gfe2::MemSlice::EnsureReferenceCounted in the constructor of QuicMemSlice.") - QUICHE_FLAG(bool, quic_restart_flag_quic_offload_pacing_to_usps2, false, "If true, QUIC offload pacing when using USPS as egress method.") @@ -334,10 +364,6 @@ QUICHE_FLAG(bool, quic_restart_flag_quic_use_pigeon_socket_to_backend, false, "If true, create a shared pigeon socket for all quic to backend connections and switch " "to use it after successful handshake.") -QUICHE_FLAG(bool, spdy_reloadable_flag_fix_spdy_header_coalescing, true, - "If true, when coalescing multivalued spdy headers, only headers that exist in spdy " - "headers block are updated.") - QUICHE_FLAG(bool, spdy_reloadable_flag_quic_bootstrap_cwnd_by_spdy_priority, true, "If true, bootstrap initial QUIC cwnd by SPDY priorities.") diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index e14bc1f36fef..b3c4eb70698d 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -77,6 +77,7 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/test_common:utility_lib", "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_googlesource_quiche//:quic_test_tools_session_peer_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 8c1e7e222790..747452ccdf78 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -85,7 +85,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc index 5db43230cd7c..488fe023354e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_session_test.cc @@ -95,7 +95,7 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc index 2a32df6319ed..9784c7231ff2 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_client_stream_test.cc @@ -25,7 +25,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index c3ab38f57ff5..fb15815fa1db 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -65,7 +65,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc index 6ddae3c80624..f2ef9fae069e 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_session_test.cc @@ -145,7 +145,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc index 6468f95fe9fa..4a4236737bd0 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_server_stream_test.cc @@ -7,6 +7,7 @@ #pragma GCC diagnostic ignored "-Winvalid-offsetof" #include "quiche/quic/test_tools/quic_connection_peer.h" +#include "quiche/quic/test_tools/quic_session_peer.h" #pragma GCC diagnostic pop #include "common/event/libevent_scheduler.h" @@ -39,7 +40,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_enable_version_draft_29, GetParam()); + SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_27, !GetParam()); SetQuicReloadableFlag(quic_disable_version_draft_25, !GetParam()); return quic::CurrentSupportedVersions()[0]; diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index bbe34b658e7b..05fb1e61a7aa 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -54,7 +54,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers return quic::CurrentSupportedVersionsWithQuicCrypto(); } bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_enable_version_draft_29, use_http3); + SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_27, !use_http3); SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); From e0bd39855d03534afa28c90d2a1a00c08c837c3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=A5=81=E6=97=A0=E5=BF=A7?= Date: Sat, 8 Aug 2020 00:32:39 +0800 Subject: [PATCH 882/909] docs: Update x-envoy-upstream-service-time description to be more accurate (#12517) Signed-off-by: wbpcode --- docs/root/configuration/http/http_filters/router_filter.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 0aaa931891e9..4ca285e9eda7 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -360,9 +360,9 @@ HTTP response headers set on downstream responses x-envoy-upstream-service-time ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Contains the time in milliseconds spent by the upstream host processing the request. This is useful -if the client wants to determine service time compared to network latency. This header is set on -responses. +Contains the time in milliseconds spent by the upstream host processing the request and the network +latency between Envoy and upstream host. This is useful if the client wants to determine service time +compared to network latency between client and Envoy. This header is set on responses. .. _config_http_filters_router_x-envoy-overloaded_set: From 43b110ab6ec17c80463a50bf6d3ae6077f9fb226 Mon Sep 17 00:00:00 2001 From: danzh Date: Fri, 7 Aug 2020 13:16:29 -0400 Subject: [PATCH 883/909] quiche: implement certificate verification (#12063) Implement quic::ProofVerifier which consists of cert verification and signature verification. Cert verification: Share cert verification code with Extensions::TransportSockets::Tls::ClientContextImpl. And initialize ProofVerifier using Envoy::Ssl::ClientContextConfig protobuf. Signature verification: Use quic::CertificateViewer to verify signature. Part of #9434 #2557 Signed-off-by: Dan Zhang --- source/extensions/quic_listeners/quiche/BUILD | 36 ++- .../quiche/envoy_quic_fake_proof_verifier.h | 61 ----- .../quiche/envoy_quic_proof_source.cc | 15 +- .../quiche/envoy_quic_proof_source.h | 19 +- .../quiche/envoy_quic_proof_source_base.cc | 81 ++++++ ...ource.h => envoy_quic_proof_source_base.h} | 44 +-- .../quiche/envoy_quic_proof_verifier.cc | 48 ++++ .../quiche/envoy_quic_proof_verifier.h | 30 +++ .../quiche/envoy_quic_proof_verifier_base.cc | 70 +++++ .../quiche/envoy_quic_proof_verifier_base.h | 47 ++++ .../quic_listeners/quiche/envoy_quic_utils.cc | 61 +++++ .../quic_listeners/quiche/envoy_quic_utils.h | 11 + .../tls/context_config_impl.h | 5 +- .../transport_sockets/tls/context_impl.cc | 79 ++++-- .../transport_sockets/tls/context_impl.h | 9 +- test/extensions/quic_listeners/quiche/BUILD | 27 +- .../quiche/crypto_test_utils_for_envoy.cc | 4 +- .../quiche/envoy_quic_proof_source_test.cc | 221 ++++++++++++--- .../quiche/envoy_quic_proof_verifier_test.cc | 252 ++++++++++++++++++ .../integration/quic_http_integration_test.cc | 75 +++++- .../quic_listeners/quiche/test_proof_source.h | 20 +- .../quiche/test_proof_verifier.h | 30 +++ test/mocks/ssl/mocks.h | 17 ++ tools/spelling/spelling_dictionary.txt | 1 + 24 files changed, 1072 insertions(+), 191 deletions(-) delete mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc rename source/extensions/quic_listeners/quiche/{envoy_quic_fake_proof_source.h => envoy_quic_proof_source_base.h} (68%) create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc create mode 100644 source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h create mode 100644 test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc create mode 100644 test/extensions/quic_listeners/quiche/test_proof_verifier.h diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index 1099eb26deb8..fd2cce9b9b5f 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -62,12 +62,17 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_quic_fake_proof_source_lib", - hdrs = ["envoy_quic_fake_proof_source.h"], + name = "envoy_quic_proof_source_base_lib", + srcs = ["envoy_quic_proof_source_base.cc"], + hdrs = ["envoy_quic_proof_source_base.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ + ":envoy_quic_utils_lib", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", "@com_googlesource_quiche//:quic_core_crypto_proof_source_interface_lib", + "@com_googlesource_quiche//:quic_core_data_lib", "@com_googlesource_quiche//:quic_core_versions_lib", ], ) @@ -79,7 +84,7 @@ envoy_cc_library( external_deps = ["ssl"], tags = ["nofips"], deps = [ - ":envoy_quic_fake_proof_source_lib", + ":envoy_quic_proof_source_base_lib", ":envoy_quic_utils_lib", ":quic_io_handle_wrapper_lib", ":quic_transport_socket_factory_lib", @@ -91,16 +96,32 @@ envoy_cc_library( ) envoy_cc_library( - name = "envoy_quic_proof_verifier_lib", - hdrs = ["envoy_quic_fake_proof_verifier.h"], + name = "envoy_quic_proof_verifier_base_lib", + srcs = ["envoy_quic_proof_verifier_base.cc"], + hdrs = ["envoy_quic_proof_verifier_base.h"], external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ + ":envoy_quic_utils_lib", + "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", "@com_googlesource_quiche//:quic_core_versions_lib", ], ) +envoy_cc_library( + name = "envoy_quic_proof_verifier_lib", + srcs = ["envoy_quic_proof_verifier.cc"], + hdrs = ["envoy_quic_proof_verifier.h"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + deps = [ + ":envoy_quic_proof_verifier_base_lib", + ":envoy_quic_utils_lib", + "//source/extensions/transport_sockets/tls:context_lib", + ], +) + envoy_cc_library( name = "spdy_server_push_utils_for_envoy_lib", srcs = ["spdy_server_push_utils_for_envoy.cc"], @@ -323,7 +344,10 @@ envoy_cc_library( name = "envoy_quic_utils_lib", srcs = ["envoy_quic_utils.cc"], hdrs = ["envoy_quic_utils.h"], - external_deps = ["quiche_quic_platform"], + external_deps = [ + "quiche_quic_platform", + "ssl", + ], tags = ["nofips"], deps = [ "//include/envoy/http:codec_interface", diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h deleted file mode 100644 index af107983317b..000000000000 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -#include "absl/strings/str_cat.h" - -#pragma GCC diagnostic push - -// QUICHE allows unused parameters. -#pragma GCC diagnostic ignored "-Wunused-parameter" - -#include "quiche/quic/core/crypto/proof_verifier.h" -#include "quiche/quic/core/quic_versions.h" - -#pragma GCC diagnostic pop - -namespace Envoy { -namespace Quic { - -// A fake implementation of quic::ProofVerifier which approves the certs and -// signature produced by EnvoyQuicFakeProofSource. -class EnvoyQuicFakeProofVerifier : public quic::ProofVerifier { -public: - ~EnvoyQuicFakeProofVerifier() override = default; - - // quic::ProofVerifier - // Return success if the certs chain is valid and signature is "Fake signature for { - // [server_config] }". Otherwise failure. - quic::QuicAsyncStatus - VerifyProof(const std::string& hostname, const uint16_t port, - const std::string& /*server_config*/, quic::QuicTransportVersion /*quic_version*/, - absl::string_view /*chlo_hash*/, const std::vector& certs, - const std::string& cert_sct, const std::string& /*signature*/, - const quic::ProofVerifyContext* context, std::string* error_details, - std::unique_ptr* details, - std::unique_ptr callback) override { - if (VerifyCertChain(hostname, port, certs, "", cert_sct, context, error_details, details, - std::move(callback)) == quic::QUIC_SUCCESS) { - return quic::QUIC_SUCCESS; - } - return quic::QUIC_FAILURE; - } - - // Return success upon one arbitrary cert content. Otherwise failure. - quic::QuicAsyncStatus - VerifyCertChain(const std::string& /*hostname*/, const uint16_t /*port*/, - const std::vector& certs, const std::string& /*ocsp_response*/, - const std::string& cert_sct, const quic::ProofVerifyContext* /*context*/, - std::string* /*error_details*/, - std::unique_ptr* /*details*/, - std::unique_ptr /*callback*/) override { - // Cert SCT support is not enabled for fake ProofSource. - if (cert_sct.empty() && certs.size() == 1) { - return quic::QUIC_SUCCESS; - } - return quic::QUIC_FAILURE; - } - - std::unique_ptr CreateDefaultContext() override { return nullptr; } -}; - -} // namespace Quic -} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc index 66fe7017436d..96fe056e818e 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc @@ -28,19 +28,13 @@ EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address } auto& cert_config = cert_config_ref.value().get(); const std::string& chain_str = cert_config.certificateChain(); - std::string pem_str = std::string(const_cast(chain_str.data()), chain_str.size()); std::stringstream pem_stream(chain_str); std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); - if (chain.empty()) { - ENVOY_LOG(warn, "Failed to load certificate chain from %s", cert_config.certificateChainPath()); - return quic::QuicReferenceCountedPointer( - new quic::ProofSource::Chain({})); - } return quic::QuicReferenceCountedPointer( new quic::ProofSource::Chain(chain)); } -void EnvoyQuicProofSource::ComputeTlsSignature( +void EnvoyQuicProofSource::signPayload( const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, std::unique_ptr callback) { @@ -59,7 +53,11 @@ void EnvoyQuicProofSource::ComputeTlsSignature( std::stringstream pem_str(pkey); std::unique_ptr pem_key = quic::CertificatePrivateKey::LoadPemFromStream(&pem_str); - + if (pem_key == nullptr) { + ENVOY_LOG(warn, "Failed to load private key."); + callback->Run(false, "", nullptr); + return; + } // Sign. std::string sig = pem_key->Sign(in, signature_algorithm); @@ -85,7 +83,6 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre const Network::FilterChain* filter_chain = filter_chain_manager_.findFilterChain(connection_socket); if (filter_chain == nullptr) { - ENVOY_LOG(warn, "No matching filter chain found for handshake."); listener_stats_.no_filter_chain_match_.inc(); return {absl::nullopt, absl::nullopt}; } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h index 4dab673687d8..6e1c74c9234c 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.h @@ -2,14 +2,14 @@ #include "server/connection_handler_impl.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" #include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" namespace Envoy { namespace Quic { -class EnvoyQuicProofSource : public EnvoyQuicFakeProofSource, - protected Logger::Loggable { +// A ProofSource implementation which supplies a proof instance with certs from filter chain. +class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { public: EnvoyQuicProofSource(Network::Socket& listen_socket, Network::FilterChainManager& filter_chain_manager, @@ -19,14 +19,17 @@ class EnvoyQuicProofSource : public EnvoyQuicFakeProofSource, ~EnvoyQuicProofSource() override = default; + // quic::ProofSource quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname) override; - void ComputeTlsSignature(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, - const std::string& hostname, uint16_t signature_algorithm, - quiche::QuicheStringPiece in, - std::unique_ptr callback) override; + +protected: + // quic::ProofSource + void signPayload(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, const std::string& hostname, + uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) override; private: struct CertConfigWithFilterChain { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc new file mode 100644 index 000000000000..220dc4cb1ccf --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.cc @@ -0,0 +1,81 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" + +#pragma GCC diagnostic push + +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include "quiche/quic/core/quic_data_writer.h" + +#pragma GCC diagnostic pop + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +namespace Envoy { +namespace Quic { + +void EnvoyQuicProofSourceBase::GetProof(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, + const std::string& server_config, + quic::QuicTransportVersion /*transport_version*/, + quiche::QuicheStringPiece chlo_hash, + std::unique_ptr callback) { + quic::QuicReferenceCountedPointer chain = + GetCertChain(server_address, client_address, hostname); + + if (chain == nullptr || chain->certs.empty()) { + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + + server_config.size(); + auto payload = std::make_unique(payload_size); + quic::QuicDataWriter payload_writer(payload_size, payload.get(), + quiche::Endianness::HOST_BYTE_ORDER); + bool success = + payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && + payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && + payload_writer.WriteStringPiece(server_config); + if (!success) { + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + std::string error_details; + bssl::UniquePtr cert = parseDERCertificate(chain->certs[0], &error_details); + if (cert == nullptr) { + ENVOY_LOG(warn, absl::StrCat("Invalid leaf cert: ", error_details)); + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + bssl::UniquePtr pub_key(X509_get_pubkey(cert.get())); + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details); + if (sign_alg == 0) { + ENVOY_LOG(warn, absl::StrCat("Failed to deduce signature algorithm from public key: ", + error_details)); + quic::QuicCryptoProof proof; + callback->Run(/*ok=*/false, nullptr, proof, nullptr); + return; + } + + auto signature_callback = std::make_unique(std::move(callback), chain); + + signPayload(server_address, client_address, hostname, sign_alg, + quiche::QuicheStringPiece(payload.get(), payload_size), + std::move(signature_callback)); +} + +void EnvoyQuicProofSourceBase::ComputeTlsSignature( + const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, quiche::QuicheStringPiece in, + std::unique_ptr callback) { + signPayload(server_address, client_address, hostname, signature_algorithm, in, + std::move(callback)); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h similarity index 68% rename from source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h rename to source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h index f4a2a9466f42..149cc50c7d63 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h @@ -12,14 +12,16 @@ #pragma GCC diagnostic ignored "-Wunused-parameter" #include "quiche/quic/core/crypto/proof_source.h" #include "quiche/quic/core/quic_versions.h" - +#include "quiche/quic/core/crypto/crypto_protocol.h" +#include "quiche/quic/platform/api/quic_reference_counted.h" +#include "quiche/quic/platform/api/quic_socket_address.h" +#include "quiche/common/platform/api/quiche_string_piece.h" #pragma GCC diagnostic pop #include "openssl/ssl.h" #include "envoy/network/filter.h" -#include "quiche/quic/platform/api/quic_reference_counted.h" -#include "quiche/quic/platform/api/quic_socket_address.h" -#include "quiche/common/platform/api/quiche_string_piece.h" +#include "server/backtrace.h" +#include "common/common/logger.h" namespace Envoy { namespace Quic { @@ -38,11 +40,12 @@ class EnvoyQuicProofSourceDetails : public quic::ProofSource::Details { const Network::FilterChain& filter_chain_; }; -// A fake implementation of quic::ProofSource which uses RSA cipher suite to sign in GetProof(). -// TODO(danzh) Rename it to EnvoyQuicProofSource once it's fully implemented. -class EnvoyQuicFakeProofSource : public quic::ProofSource { +// A partial implementation of quic::ProofSource which chooses a cipher suite according to the leaf +// cert to sign in GetProof(). +class EnvoyQuicProofSourceBase : public quic::ProofSource, + protected Logger::Loggable { public: - ~EnvoyQuicFakeProofSource() override = default; + ~EnvoyQuicProofSourceBase() override = default; // quic::ProofSource // Returns a certs chain and its fake SCT "Fake timestamp" and TLS signature wrapped @@ -50,19 +53,24 @@ class EnvoyQuicFakeProofSource : public quic::ProofSource { void GetProof(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname, const std::string& server_config, quic::QuicTransportVersion /*transport_version*/, - quiche::QuicheStringPiece /*chlo_hash*/, - std::unique_ptr callback) override { - quic::QuicReferenceCountedPointer chain = - GetCertChain(server_address, client_address, hostname); - quic::QuicCryptoProof proof; - // TODO(danzh) Get the signature algorithm from leaf cert. - auto signature_callback = std::make_unique(std::move(callback), chain); - ComputeTlsSignature(server_address, client_address, hostname, SSL_SIGN_RSA_PSS_RSAE_SHA256, - server_config, std::move(signature_callback)); - } + quiche::QuicheStringPiece chlo_hash, + std::unique_ptr callback) override; TicketCrypter* GetTicketCrypter() override { return nullptr; } + void ComputeTlsSignature(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override; + +protected: + virtual void signPayload(const quic::QuicSocketAddress& server_address, + const quic::QuicSocketAddress& client_address, + const std::string& hostname, uint16_t signature_algorithm, + quiche::QuicheStringPiece in, + std::unique_ptr callback) PURE; + private: // Used by GetProof() to get signature. class SignatureCallback : public quic::ProofSource::SignatureCallback { diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc new file mode 100644 index 000000000000..b7040d1279d7 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.cc @@ -0,0 +1,48 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +#include "quiche/quic/core/crypto/certificate_view.h" + +namespace Envoy { +namespace Quic { + +quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( + const std::string& hostname, const uint16_t /*port*/, const std::vector& certs, + const std::string& /*ocsp_response*/, const std::string& /*cert_sct*/, + const quic::ProofVerifyContext* /*context*/, std::string* error_details, + std::unique_ptr* /*details*/, + std::unique_ptr /*callback*/) { + ASSERT(!certs.empty()); + bssl::UniquePtr intermediates(sk_X509_new_null()); + bssl::UniquePtr leaf; + for (size_t i = 0; i < certs.size(); i++) { + bssl::UniquePtr cert = parseDERCertificate(certs[i], error_details); + if (!cert) { + return quic::QUIC_FAILURE; + } + if (i == 0) { + leaf = std::move(cert); + } else { + sk_X509_push(intermediates.get(), cert.release()); + } + } + bool success = context_impl_.verifyCertChain(*leaf, *intermediates, *error_details); + if (!success) { + return quic::QUIC_FAILURE; + } + + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(certs[0]); + ASSERT(cert_view != nullptr); + for (const absl::string_view config_san : cert_view->subject_alt_name_domains()) { + if (Extensions::TransportSockets::Tls::ContextImpl::dnsNameMatch(hostname, config_san)) { + return quic::QUIC_SUCCESS; + } + } + *error_details = absl::StrCat("Leaf certificate doesn't match hostname: ", hostname); + return quic::QUIC_FAILURE; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h new file mode 100644 index 000000000000..a29eb999119f --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h @@ -0,0 +1,30 @@ +#pragma once + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" +#include "extensions/transport_sockets/tls/context_impl.h" + +namespace Envoy { +namespace Quic { + +// A quic::ProofVerifier implementation which verifies cert chain using SSL +// client context config. +class EnvoyQuicProofVerifier : public EnvoyQuicProofVerifierBase { +public: + EnvoyQuicProofVerifier(Stats::Scope& scope, const Envoy::Ssl::ClientContextConfig& config, + TimeSource& time_source) + : context_impl_(scope, config, time_source) {} + + // EnvoyQuicProofVerifierBase + quic::QuicAsyncStatus + VerifyCertChain(const std::string& hostname, const uint16_t port, + const std::vector& certs, const std::string& ocsp_response, + const std::string& cert_sct, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) override; + +private: + Extensions::TransportSockets::Tls::ClientContextImpl context_impl_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc new file mode 100644 index 000000000000..229b3ab36628 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.cc @@ -0,0 +1,70 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +#include "openssl/ssl.h" +#include "quiche/quic/core/crypto/certificate_view.h" +#include "quiche/quic/core/crypto/crypto_protocol.h" +#include "quiche/quic/core/quic_data_writer.h" + +namespace Envoy { +namespace Quic { + +quic::QuicAsyncStatus EnvoyQuicProofVerifierBase::VerifyProof( + const std::string& hostname, const uint16_t port, const std::string& server_config, + quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, + const std::vector& certs, const std::string& cert_sct, + const std::string& signature, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) { + if (certs.empty()) { + *error_details = "Received empty cert chain."; + return quic::QUIC_FAILURE; + } + if (!verifySignature(server_config, chlo_hash, certs[0], signature, error_details)) { + return quic::QUIC_FAILURE; + } + + return VerifyCertChain(hostname, port, certs, "", cert_sct, context, error_details, details, + std::move(callback)); +} + +bool EnvoyQuicProofVerifierBase::verifySignature(const std::string& server_config, + absl::string_view chlo_hash, + const std::string& cert, + const std::string& signature, + std::string* error_details) { + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(cert); + if (cert_view == nullptr) { + *error_details = "Invalid leaf cert."; + return false; + } + int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details); + if (sign_alg == 0) { + return false; + } + + size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + + server_config.size(); + auto payload = std::make_unique(payload_size); + quic::QuicDataWriter payload_writer(payload_size, payload.get(), + quiche::Endianness::HOST_BYTE_ORDER); + bool success = + payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && + payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && + payload_writer.WriteStringPiece(server_config); + if (!success) { + *error_details = "QuicPacketWriter error."; + return false; + } + bool valid = cert_view->VerifySignature(quiche::QuicheStringPiece(payload.get(), payload_size), + signature, sign_alg); + if (!valid) { + *error_details = "Signature is not valid."; + } + return valid; +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h new file mode 100644 index 000000000000..02dac5facd42 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h @@ -0,0 +1,47 @@ +#pragma once + +#include "absl/strings/str_cat.h" + +#pragma GCC diagnostic push + +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" + +#include "quiche/quic/core/crypto/proof_verifier.h" +#include "quiche/quic/core/quic_versions.h" + +#pragma GCC diagnostic pop + +#include "common/common/logger.h" + +namespace Envoy { +namespace Quic { + +// A partial implementation of quic::ProofVerifier which does signature +// verification. +class EnvoyQuicProofVerifierBase : public quic::ProofVerifier, + protected Logger::Loggable { +public: + ~EnvoyQuicProofVerifierBase() override = default; + + // quic::ProofVerifier + // Return success if the certs chain is valid and signature of { + // server_config + chlo_hash} is valid. Otherwise failure. + quic::QuicAsyncStatus + VerifyProof(const std::string& hostname, const uint16_t port, const std::string& server_config, + quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, + const std::vector& certs, const std::string& cert_sct, + const std::string& signature, const quic::ProofVerifyContext* context, + std::string* error_details, std::unique_ptr* details, + std::unique_ptr callback) override; + + std::unique_ptr CreateDefaultContext() override { return nullptr; } + +protected: + virtual bool verifySignature(const std::string& server_config, absl::string_view chlo_hash, + const std::string& cert, const std::string& signature, + std::string* error_details); +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index aefb6a860e5e..b5c710a81269 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -124,5 +124,66 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, return connection_socket; } +bssl::UniquePtr parseDERCertificate(const std::string& der_bytes, + std::string* error_details) { + const uint8_t* data; + const uint8_t* orig_data; + orig_data = data = reinterpret_cast(der_bytes.data()); + bssl::UniquePtr cert(d2i_X509(nullptr, &data, der_bytes.size())); + if (!cert.get()) { + *error_details = "d2i_X509: fail to parse DER"; + return nullptr; + } + if (data < orig_data || static_cast(data - orig_data) != der_bytes.size()) { + *error_details = "There is trailing garbage in DER."; + return nullptr; + } + return cert; +} + +int deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details) { + int sign_alg = 0; + const int pkey_id = EVP_PKEY_id(public_key); + switch (pkey_id) { + case EVP_PKEY_EC: { + // We only support P-256 ECDSA today. + const EC_KEY* ecdsa_public_key = EVP_PKEY_get0_EC_KEY(public_key); + // Since we checked the key type above, this should be valid. + ASSERT(ecdsa_public_key != nullptr); + const EC_GROUP* ecdsa_group = EC_KEY_get0_group(ecdsa_public_key); + if (ecdsa_group == nullptr || EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) { + *error_details = "Invalid leaf cert, only P-256 ECDSA certificates are supported"; + break; + } + // QUICHE uses SHA-256 as hash function in cert signature. + sign_alg = SSL_SIGN_ECDSA_SECP256R1_SHA256; + } break; + case EVP_PKEY_RSA: { + // We require RSA certificates with 2048-bit or larger keys. + const RSA* rsa_public_key = EVP_PKEY_get0_RSA(public_key); + // Since we checked the key type above, this should be valid. + ASSERT(rsa_public_key != nullptr); + const unsigned rsa_key_length = RSA_size(rsa_public_key); +#ifdef BORINGSSL_FIPS + if (rsa_key_length != 2048 / 8 && rsa_key_length != 3072 / 8) { + *error_details = "Invalid leaf cert, only RSA certificates with 2048-bit or 3072-bit keys " + "are supported in FIPS mode"; + break; + } +#else + if (rsa_key_length < 2048 / 8) { + *error_details = + "Invalid leaf cert, only RSA certificates with 2048-bit or larger keys are supported"; + break; + } +#endif + sign_alg = SSL_SIGN_RSA_PSS_RSAE_SHA256; + } break; + default: + *error_details = "Invalid leaf cert, only RSA and ECDSA certificates are supported"; + } + return sign_alg; +} + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index f5714ef15b83..34dce87d836b 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -24,6 +24,8 @@ #include "quiche/quic/platform/api/quic_ip_address.h" #include "quiche/quic/platform/api/quic_socket_address.h" +#include "openssl/ssl.h" + namespace Envoy { namespace Quic { @@ -80,5 +82,14 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, const Network::ConnectionSocket::OptionsSharedPtr& options); +// Convert a cert in string form to X509 object. +// Return nullptr if the bytes passed cannot be passed. +bssl::UniquePtr parseDERCertificate(const std::string& der_bytes, std::string* error_details); + +// Deduce the suitable signature algorithm according to the public key. +// Return the sign algorithm id works with the public key; If the public key is +// not supported, return 0 with error_details populated correspondingly. +int deduceSignatureAlgorithmFromPublicKey(const EVP_PKEY* public_key, std::string* error_details); + } // namespace Quic } // namespace Envoy diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 9cfaff0482fb..ad2d927d8231 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -98,6 +98,9 @@ class ContextConfigImpl : public virtual Ssl::ContextConfig { class ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::ClientContextConfig { public: + static const std::string DEFAULT_CIPHER_SUITES; + static const std::string DEFAULT_CURVES; + ClientContextConfigImpl( const envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext& config, absl::string_view sigalgs, @@ -116,8 +119,6 @@ class ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::Cli private: static const unsigned DEFAULT_MIN_VERSION; static const unsigned DEFAULT_MAX_VERSION; - static const std::string DEFAULT_CIPHER_SUITES; - static const std::string DEFAULT_CURVES; const std::string server_name_indication_; const bool allow_renegotiation_; diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index 369bdd460f98..502739958e50 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -527,49 +527,50 @@ int ContextImpl::verifyCallback(X509_STORE_CTX* store_ctx, void* arg) { ContextImpl* impl = reinterpret_cast(arg); SSL* ssl = reinterpret_cast( X509_STORE_CTX_get_ex_data(store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx())); - Envoy::Ssl::SslExtendedSocketInfo* sslExtendedInfo = + auto cert = bssl::UniquePtr(SSL_get_peer_certificate(ssl)); + return impl->doVerifyCertChain( + store_ctx, reinterpret_cast( - SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())); + SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())), + *cert, static_cast(SSL_get_app_data(ssl))); +} - if (impl->verify_trusted_ca_) { +int ContextImpl::doVerifyCertChain( + X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, X509& leaf_cert, + const Network::TransportSocketOptions* transport_socket_options) { + if (verify_trusted_ca_) { int ret = X509_verify_cert(store_ctx); - if (sslExtendedInfo) { - sslExtendedInfo->setCertificateValidationStatus( + if (ssl_extended_info) { + ssl_extended_info->setCertificateValidationStatus( ret == 1 ? Envoy::Ssl::ClientValidationStatus::Validated : Envoy::Ssl::ClientValidationStatus::Failed); } if (ret <= 0) { - impl->stats_.fail_verify_error_.inc(); - return impl->allow_untrusted_certificate_ ? 1 : ret; + stats_.fail_verify_error_.inc(); + return allow_untrusted_certificate_ ? 1 : ret; } } - bssl::UniquePtr cert(SSL_get_peer_certificate(ssl)); - - const Network::TransportSocketOptions* transport_socket_options = - static_cast(SSL_get_app_data(ssl)); - - Envoy::Ssl::ClientValidationStatus validated = impl->verifyCertificate( - cert.get(), + Envoy::Ssl::ClientValidationStatus validated = verifyCertificate( + &leaf_cert, transport_socket_options && !transport_socket_options->verifySubjectAltNameListOverride().empty() ? transport_socket_options->verifySubjectAltNameListOverride() - : impl->verify_subject_alt_name_list_, - impl->subject_alt_name_matchers_); + : verify_subject_alt_name_list_, + subject_alt_name_matchers_); - if (sslExtendedInfo) { - if (sslExtendedInfo->certificateValidationStatus() == + if (ssl_extended_info) { + if (ssl_extended_info->certificateValidationStatus() == Envoy::Ssl::ClientValidationStatus::NotValidated) { - sslExtendedInfo->setCertificateValidationStatus(validated); + ssl_extended_info->setCertificateValidationStatus(validated); } else if (validated != Envoy::Ssl::ClientValidationStatus::NotValidated) { - sslExtendedInfo->setCertificateValidationStatus(validated); + ssl_extended_info->setCertificateValidationStatus(validated); } } - return impl->allow_untrusted_certificate_ - ? 1 - : (validated != Envoy::Ssl::ClientValidationStatus::Failed); + return allow_untrusted_certificate_ ? 1 + : (validated != Envoy::Ssl::ClientValidationStatus::Failed); } Envoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate( @@ -675,7 +676,7 @@ bool ContextImpl::matchSubjectAltName( if (general_name->type == GEN_DNS && config_san_matcher.matcher().match_pattern_case() == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact - ? dnsNameMatch(config_san_matcher.matcher().exact(), san.c_str()) + ? dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) : config_san_matcher.match(san)) { return true; } @@ -703,20 +704,20 @@ bool ContextImpl::verifySubjectAltName(X509* cert, return false; } -bool ContextImpl::dnsNameMatch(const std::string& dns_name, const char* pattern) { +bool ContextImpl::dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern) { if (dns_name == pattern) { return true; } - size_t pattern_len = strlen(pattern); + size_t pattern_len = pattern.length(); if (pattern_len > 1 && pattern[0] == '*' && pattern[1] == '.') { if (dns_name.length() > pattern_len - 1) { const size_t off = dns_name.length() - pattern_len + 1; if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_wildcard_matching")) { return dns_name.substr(0, off).find('.') == std::string::npos && - dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1); } else { - return dns_name.compare(off, pattern_len - 1, pattern + 1) == 0; + return dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1); } } } @@ -1394,6 +1395,28 @@ bool ServerContextImpl::TlsContext::isCipherEnabled(uint16_t cipher_id, uint16_t return false; } +bool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, + std::string& error_details) { + bssl::UniquePtr ctx(X509_STORE_CTX_new()); + // It doesn't matter which SSL context is used, because they share the same + // cert validation config. + X509_STORE* store = SSL_CTX_get_cert_store(tls_contexts_[0].ssl_ctx_.get()); + if (!X509_STORE_CTX_init(ctx.get(), store, &leaf_cert, &intermediates)) { + error_details = "Failed to verify certificate chain: X509_STORE_CTX_init"; + return false; + } + + int res = doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr); + if (res <= 0) { + const int n = X509_STORE_CTX_get_error(ctx.get()); + const int depth = X509_STORE_CTX_get_error_depth(ctx.get()); + error_details = absl::StrCat("X509_verify_cert: certificate verification error at depth ", + depth, ": ", X509_verify_cert_error_string(n)); + return false; + } + return true; +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/context_impl.h b/source/extensions/transport_sockets/tls/context_impl.h index 407dd45f86f8..5ea35a48228e 100644 --- a/source/extensions/transport_sockets/tls/context_impl.h +++ b/source/extensions/transport_sockets/tls/context_impl.h @@ -84,7 +84,7 @@ class ContextImpl : public virtual Envoy::Ssl::Context { * @param pattern the pattern to match against (*.example.com) * @return true if the san matches pattern */ - static bool dnsNameMatch(const std::string& dns_name, const char* pattern); + static bool dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern); SslStats& stats() { return stats_; } @@ -101,6 +101,8 @@ class ContextImpl : public virtual Envoy::Ssl::Context { std::vector getPrivateKeyMethodProviders(); + bool verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, std::string& error_details); + protected: ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config, TimeSource& time_source); @@ -117,6 +119,11 @@ class ContextImpl : public virtual Envoy::Ssl::Context { // A SSL_CTX_set_cert_verify_callback for custom cert validation. static int verifyCallback(X509_STORE_CTX* store_ctx, void* arg); + // Called by verifyCallback to do the actual cert chain verification. + int doVerifyCertChain(X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, + X509& leaf_cert, + const Network::TransportSocketOptions* transport_socket_options); + Envoy::Ssl::ClientValidationStatus verifyCertificate(X509* cert, const std::vector& verify_san_list, const std::vector& subject_alt_name_matchers); diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index b3c4eb70698d..29ae0a89eb28 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -49,6 +49,7 @@ envoy_cc_test( deps = [ "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", "@com_googlesource_quiche//:quic_core_versions_lib", @@ -56,6 +57,19 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "envoy_quic_proof_verifier_test", + srcs = ["envoy_quic_proof_verifier_test.cc"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + "//source/extensions/transport_sockets/tls:context_config_lib", + "//test/mocks/ssl:ssl_mocks", + "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + ], +) + envoy_cc_test( name = "envoy_quic_server_stream_test", srcs = ["envoy_quic_server_stream_test.cc"], @@ -221,19 +235,28 @@ envoy_cc_test_library( hdrs = ["test_proof_source.h"], tags = ["nofips"], deps = [ - "//source/extensions/quic_listeners/quiche:envoy_quic_fake_proof_source_lib", + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_source_base_lib", "//test/mocks/network:network_mocks", "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", ], ) +envoy_cc_test_library( + name = "test_proof_verifier_lib", + hdrs = ["test_proof_verifier.h"], + tags = ["nofips"], + deps = [ + "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_base_lib", + ], +) + envoy_cc_test_library( name = "quic_test_utils_for_envoy_lib", srcs = ["crypto_test_utils_for_envoy.cc"], tags = ["nofips"], deps = [ ":test_proof_source_lib", - "//source/extensions/quic_listeners/quiche:envoy_quic_proof_verifier_lib", + ":test_proof_verifier_lib", "@com_googlesource_quiche//:quic_test_tools_test_utils_interface_lib", ], ) diff --git a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc index c5b7a11d70e3..cafdce0c6227 100644 --- a/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc +++ b/test/extensions/quic_listeners/quiche/crypto_test_utils_for_envoy.cc @@ -19,7 +19,7 @@ #endif #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "test/extensions/quic_listeners/quiche/test_proof_verifier.h" #include "test/extensions/quic_listeners/quiche/test_proof_source.h" namespace quic { @@ -32,7 +32,7 @@ std::unique_ptr ProofSourceForTesting() { // NOLINTNEXTLINE(readability-identifier-naming) std::unique_ptr ProofVerifierForTesting() { - return std::make_unique(); + return std::make_unique(); } // NOLINTNEXTLINE(readability-identifier-naming) diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc index e61e34eac270..d896dbb86b7c 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc @@ -2,9 +2,10 @@ #include #include -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" #include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" #include "test/mocks/network/mocks.h" #include "test/mocks/ssl/mocks.h" @@ -23,29 +24,105 @@ namespace Quic { class TestGetProofCallback : public quic::ProofSource::Callback { public: - TestGetProofCallback(bool& called, std::string leaf_cert_scts, const absl::string_view cert, + TestGetProofCallback(bool& called, bool should_succeed, const std::string& server_config, + quic::QuicTransportVersion& version, quiche::QuicheStringPiece chlo_hash, Network::FilterChain& filter_chain) - : called_(called), expected_leaf_certs_scts_(std::move(leaf_cert_scts)), - expected_leaf_cert_(cert), expected_filter_chain_(filter_chain) {} + : called_(called), should_succeed_(should_succeed), server_config_(server_config), + version_(version), chlo_hash_(chlo_hash), expected_filter_chain_(filter_chain) { + ON_CALL(client_context_config_, cipherSuites) + .WillByDefault(ReturnRef( + Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES)); + ON_CALL(client_context_config_, ecdhCurves) + .WillByDefault( + ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES)); + const std::string alpn("h2,http/1.1"); + ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn)); + const std::string empty_string; + ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string)); + ON_CALL(client_context_config_, signingAlgorithmsForTest()) + .WillByDefault(ReturnRef(empty_string)); + ON_CALL(client_context_config_, certificateValidationContext()) + .WillByDefault(Return(&cert_validation_ctx_config_)); + + // Getting the last cert in the chain as the root CA cert. + std::string cert_chain(quic::test::kTestCertificateChainPem); + const std::string& root_ca_cert = + cert_chain.substr(cert_chain.rfind("-----BEGIN CERTIFICATE-----")); + const std::string path_string("some_path"); + ON_CALL(cert_validation_ctx_config_, caCert()).WillByDefault(ReturnRef(root_ca_cert)); + ON_CALL(cert_validation_ctx_config_, caCertPath()).WillByDefault(ReturnRef(path_string)); + ON_CALL(cert_validation_ctx_config_, trustChainVerification) + .WillByDefault(Return(envoy::extensions::transport_sockets::tls::v3:: + CertificateValidationContext::VERIFY_TRUST_CHAIN)); + ON_CALL(cert_validation_ctx_config_, allowExpiredCertificate()).WillByDefault(Return(true)); + const std::string crl_list; + ON_CALL(cert_validation_ctx_config_, certificateRevocationList()) + .WillByDefault(ReturnRef(crl_list)); + ON_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) + .WillByDefault(ReturnRef(path_string)); + const std::vector empty_string_list; + ON_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) + .WillByDefault(ReturnRef(empty_string_list)); + const std::vector san_matchers; + ON_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) + .WillByDefault(ReturnRef(san_matchers)); + ON_CALL(cert_validation_ctx_config_, verifyCertificateHashList()) + .WillByDefault(ReturnRef(empty_string_list)); + ON_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList()) + .WillByDefault(ReturnRef(empty_string_list)); + verifier_ = + std::make_unique(store_, client_context_config_, time_system_); + } // quic::ProofSource::Callback void Run(bool ok, const quic::QuicReferenceCountedPointer& chain, const quic::QuicCryptoProof& proof, std::unique_ptr details) override { + called_ = true; + if (!should_succeed_) { + EXPECT_FALSE(ok); + return; + }; EXPECT_TRUE(ok); - EXPECT_EQ(expected_leaf_certs_scts_, proof.leaf_cert_scts); EXPECT_EQ(2, chain->certs.size()); - EXPECT_EQ(expected_leaf_cert_, chain->certs[0]); + std::string error; + EXPECT_EQ(quic::QUIC_SUCCESS, + verifier_->VerifyProof("www.example.org", 54321, server_config_, version_, chlo_hash_, + chain->certs, proof.leaf_cert_scts, proof.signature, nullptr, + &error, nullptr, nullptr)) + << error; EXPECT_EQ(&expected_filter_chain_, &static_cast(details.get())->filterChain()); - called_ = true; } private: bool& called_; - std::string expected_leaf_certs_scts_; - absl::string_view expected_leaf_cert_; + bool should_succeed_; + const std::string& server_config_; + const quic::QuicTransportVersion& version_; + quiche::QuicheStringPiece chlo_hash_; Network::FilterChain& expected_filter_chain_; + NiceMock store_; + Event::GlobalTimeSystem time_system_; + NiceMock client_context_config_; + NiceMock cert_validation_ctx_config_; + std::unique_ptr verifier_; +}; + +class TestSignatureCallback : public quic::ProofSource::SignatureCallback { +public: + TestSignatureCallback(bool expect_success) : expect_success_(expect_success) {} + ~TestSignatureCallback() override { EXPECT_TRUE(run_called_); } + + // quic::ProofSource::SignatureCallback + void Run(bool ok, std::string, std::unique_ptr) override { + EXPECT_EQ(expect_success_, ok); + run_called_ = true; + } + +private: + bool expect_success_; + bool run_called_{false}; }; class EnvoyQuicProofSourceTest : public ::testing::Test { @@ -53,17 +130,55 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { EnvoyQuicProofSourceTest() : server_address_(quic::QuicIpAddress::Loopback4(), 12345), client_address_(quic::QuicIpAddress::Loopback4(), 54321), + transport_socket_factory_(std::make_unique()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), POOL_HISTOGRAM(listener_config_.listenerScope()))}), proof_source_(listen_socket_, filter_chain_manager_, listener_stats_) {} + void expectCertChainAndPrivateKey(const std::string& cert, bool expect_private_key) { + EXPECT_CALL(listen_socket_, ioHandle()).Times(expect_private_key ? 2u : 1u); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), + *connection_socket.localAddress()); + EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), + *connection_socket.remoteAddress()); + EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, + connection_socket.detectedTransportProtocol()); + EXPECT_EQ("h2", connection_socket.requestedApplicationProtocols()[0]); + return &filter_chain_; + })); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory_)); + + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config_)}; + EXPECT_CALL(dynamic_cast( + transport_socket_factory_.serverContextConfig()), + tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + EXPECT_CALL(tls_cert_config_, certificateChain()).WillOnce(ReturnRef(cert)); + if (expect_private_key) { + EXPECT_CALL(tls_cert_config_, privateKey()).WillOnce(ReturnRef(pkey_)); + } + } + + void testGetProof(bool expect_success) { + bool called = false; + auto callback = std::make_unique(called, expect_success, server_config_, + version_, chlo_hash_, filter_chain_); + proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, + chlo_hash_, std::move(callback)); + EXPECT_TRUE(called); + } + protected: std::string hostname_{"www.fake.com"}; quic::QuicSocketAddress server_address_; quic::QuicSocketAddress client_address_; quic::QuicTransportVersion version_{quic::QUIC_VERSION_UNSUPPORTED}; - quiche::QuicheStringPiece chlo_hash_{""}; + quiche::QuicheStringPiece chlo_hash_{"aaaaa"}; std::string server_config_{"Server Config"}; std::string expected_certs_{quic::test::kTestCertificateChainPem}; std::string pkey_{quic::test::kTestCertificatePrivateKeyPem}; @@ -71,27 +186,66 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { Network::MockFilterChainManager filter_chain_manager_; Network::MockListenSocket listen_socket_; testing::NiceMock listener_config_; + QuicServerTransportSocketFactory transport_socket_factory_; + Ssl::MockTlsCertificateConfig tls_cert_config_; Server::ListenerStats listener_stats_; EnvoyQuicProofSource proof_source_; - EnvoyQuicFakeProofVerifier proof_verifier_; }; TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { + expectCertChainAndPrivateKey(expected_certs_, true); + testGetProof(true); +} + +TEST_F(EnvoyQuicProofSourceTest, GetProofFailNoFilterChain) { bool called = false; - auto callback = std::make_unique( - called, "Fake timestamp", quic::test::kTestCertificate, filter_chain_); - EXPECT_CALL(listen_socket_, ioHandle()).Times(2); + auto callback = std::make_unique(called, false, server_config_, version_, + chlo_hash_, filter_chain_); + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillRepeatedly(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); + proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, + chlo_hash_, std::move(callback)); + EXPECT_TRUE(called); +} + +TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidCert) { + std::string invalid_cert{R"(-----BEGIN CERTIFICATE----- + invalid certificate + -----END CERTIFICATE-----)"}; + expectCertChainAndPrivateKey(invalid_cert, false); + testGetProof(false); +} + +TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidPublicKeyInCert) { + // This is a valid cert with RSA public key. But we don't support RSA key with + // length < 1024. + std::string cert_with_rsa_1024{R"(-----BEGIN CERTIFICATE----- +MIIC2jCCAkOgAwIBAgIUDBHEwlCvLGh3w0O8VwIW+CjYXY8wDQYJKoZIhvcNAQEL +BQAwfzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlk +Z2UxDzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRl +c3QxHzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA0MTg1 +OTQ4WhcNMjEwODA0MTg1OTQ4WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUEx +EjAQBgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVl +bnZveTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xl +LmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAykCZNjxws+sNfnp18nsp ++7LN81J/RSwAHLkGnwEtd3OxSUuiCYHgYlyuEAwJdf99+SaFrgcA4LvYJ/Mhm/fZ +msnpfsAvoQ49+ax0fm1x56ii4KgNiu9iFsWwwVmkHkgjlRcRsmhr4WeIf14Yvpqs +JNsbNVSCZ4GLQ2V6BqIHlhcCAwEAAaNTMFEwHQYDVR0OBBYEFDO1KPYcdRmeKDvL +H2Yzj8el2Xe1MB8GA1UdIwQYMBaAFDO1KPYcdRmeKDvLH2Yzj8el2Xe1MA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAnwWVmwSK9TDml7oHGBavzOC1 +f/lOd5zz2e7Tu2pUtx1sX1tlKph1D0ANpJwxRV78R2hjmynLSl7h4Ual9NMubqkD +x96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4 +GUy+n0vQNB0cXGzgcGI= +-----END CERTIFICATE-----)"}; + expectCertChainAndPrivateKey(cert_with_rsa_1024, false); + testGetProof(false); +} + +TEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) { + EXPECT_CALL(listen_socket_, ioHandle()); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) - .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), - *connection_socket.localAddress()); - EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), - *connection_socket.remoteAddress()); - EXPECT_EQ(Extensions::TransportSockets::TransportProtocolNames::get().Quic, - connection_socket.detectedTransportProtocol()); - EXPECT_EQ("h2", connection_socket.requestedApplicationProtocols()[0]); - return &filter_chain_; - })); + .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; })); auto server_context_config = std::make_unique(); auto server_context_config_ptr = server_context_config.get(); QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config)); @@ -103,20 +257,11 @@ TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { std::reference_wrapper(tls_cert_config)}; EXPECT_CALL(*server_context_config_ptr, tlsCertificates()) .WillRepeatedly(Return(tls_cert_configs)); - EXPECT_CALL(tls_cert_config, certificateChain()).WillOnce(ReturnRef(expected_certs_)); - EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(pkey_)); - proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, - chlo_hash_, std::move(callback)); - EXPECT_TRUE(called); - - EXPECT_EQ(quic::QUIC_SUCCESS, - proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - {"Fake cert"}, "", "fake signature", nullptr, nullptr, - nullptr, nullptr)); - EXPECT_EQ(quic::QUIC_FAILURE, - proof_verifier_.VerifyProof(hostname_, /*port=*/0, server_config_, version_, chlo_hash_, - {"Fake cert", "Unexpected cert"}, "Fake timestamp", - "fake signature", nullptr, nullptr, nullptr, nullptr)); + std::string invalid_pkey("abcdefg"); + EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(invalid_pkey)); + proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, + SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false)); } } // namespace Quic diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc new file mode 100644 index 000000000000..4a1dfe144dd3 --- /dev/null +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_verifier_test.cc @@ -0,0 +1,252 @@ +#include +#include + +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" + +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/test_time.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "quiche/quic/core/crypto/certificate_view.h" +#include "quiche/quic/test_tools/test_certificates.h" + +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Quic { + +class EnvoyQuicProofVerifierTest : public testing::Test { +public: + EnvoyQuicProofVerifierTest() + : root_ca_cert_(cert_chain_.substr(cert_chain_.rfind("-----BEGIN CERTIFICATE-----"))), + leaf_cert_([=]() { + std::stringstream pem_stream(cert_chain_); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + return chain[0]; + }()) { + ON_CALL(client_context_config_, cipherSuites) + .WillByDefault(ReturnRef( + Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES)); + ON_CALL(client_context_config_, ecdhCurves) + .WillByDefault( + ReturnRef(Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CURVES)); + ON_CALL(client_context_config_, alpnProtocols()).WillByDefault(ReturnRef(alpn_)); + ON_CALL(client_context_config_, serverNameIndication()).WillByDefault(ReturnRef(empty_string_)); + ON_CALL(client_context_config_, signingAlgorithmsForTest()).WillByDefault(ReturnRef(sig_algs_)); + ON_CALL(client_context_config_, certificateValidationContext()) + .WillByDefault(Return(&cert_validation_ctx_config_)); + } + + // Since this cert chain contains an expired cert, we can flip allow_expired_cert to test the code + // paths for BoringSSL cert verification success and failure. + void configCertVerificationDetails(bool allow_expired_cert) { + // Getting the last cert in the chain as the root CA cert. + EXPECT_CALL(cert_validation_ctx_config_, caCert()).WillRepeatedly(ReturnRef(root_ca_cert_)); + EXPECT_CALL(cert_validation_ctx_config_, caCertPath()).WillRepeatedly(ReturnRef(path_string_)); + EXPECT_CALL(cert_validation_ctx_config_, trustChainVerification) + .WillRepeatedly(Return(envoy::extensions::transport_sockets::tls::v3:: + CertificateValidationContext::VERIFY_TRUST_CHAIN)); + EXPECT_CALL(cert_validation_ctx_config_, allowExpiredCertificate()) + .WillRepeatedly(Return(allow_expired_cert)); + EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationList()) + .WillRepeatedly(ReturnRef(empty_string_)); + EXPECT_CALL(cert_validation_ctx_config_, certificateRevocationListPath()) + .WillRepeatedly(ReturnRef(path_string_)); + EXPECT_CALL(cert_validation_ctx_config_, verifySubjectAltNameList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + EXPECT_CALL(cert_validation_ctx_config_, subjectAltNameMatchers()) + .WillRepeatedly(ReturnRef(san_matchers_)); + EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateHashList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + EXPECT_CALL(cert_validation_ctx_config_, verifyCertificateSpkiList()) + .WillRepeatedly(ReturnRef(empty_string_list_)); + verifier_ = + std::make_unique(store_, client_context_config_, time_system_); + } + +protected: + const std::string path_string_{"some_path"}; + const std::string alpn_{"h2,http/1.1"}; + const std::string sig_algs_{"rsa_pss_rsae_sha256"}; + const std::vector san_matchers_; + const std::string empty_string_; + const std::vector empty_string_list_; + const std::string cert_chain_{quic::test::kTestCertificateChainPem}; + const std::string root_ca_cert_; + const std::string leaf_cert_; + NiceMock store_; + Event::GlobalTimeSystem time_system_; + NiceMock client_context_config_; + Ssl::MockCertificateValidationContextConfig cert_validation_ctx_config_; + std::unique_ptr verifier_; +}; + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainSuccess) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_SUCCESS, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {leaf_cert_}, ocsp_response, cert_sct, nullptr, + &error_details, nullptr, nullptr)) + << error_details; +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureFromSsl) { + configCertVerificationDetails(false); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {leaf_cert_}, ocsp_response, cert_sct, nullptr, + &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("X509_verify_cert: certificate verification error at depth 1: certificate has expired", + error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidLeafCert) { + configCertVerificationDetails(true); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs{"invalid leaf cert"}; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("www.google.com", 54321, certs, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("d2i_X509: fail to parse DER", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureLeafCertWithGarbage) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string cert_with_trailing_garbage = absl::StrCat(leaf_cert_, "AAAAAA"); + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + {cert_with_trailing_garbage}, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("There is trailing garbage in DER.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyCertChain("unknown.org", 54321, {leaf_cert_}, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr)) + << error_details; + EXPECT_EQ("Leaf certificate doesn't match hostname: unknown.org", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureEmptyCertChain) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, certs, cert_sct, "signature", + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Received empty cert chain.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidLeafCert) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + const std::vector certs{"invalid leaf cert"}; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, certs, cert_sct, "signature", + nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Invalid leaf cert.", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureUnsupportedECKey) { + configCertVerificationDetails(true); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + // This is a EC cert with secp384r1 curve which is not supported by Envoy. + const std::string certs{R"(-----BEGIN CERTIFICATE----- +MIICkDCCAhagAwIBAgIUTZbykU9eQL3GdrNlodxrOJDecIQwCgYIKoZIzj0EAwIw +fzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1BMRIwEAYDVQQHDAlDYW1icmlkZ2Ux +DzANBgNVBAoMBkdvb2dsZTEOMAwGA1UECwwFZW52b3kxDTALBgNVBAMMBHRlc3Qx +HzAdBgkqhkiG9w0BCQEWEGRhbnpoQGdvb2dsZS5jb20wHhcNMjAwODA1MjAyMDI0 +WhcNMjIwODA1MjAyMDI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTUExEjAQ +BgNVBAcMCUNhbWJyaWRnZTEPMA0GA1UECgwGR29vZ2xlMQ4wDAYDVQQLDAVlbnZv +eTENMAsGA1UEAwwEdGVzdDEfMB0GCSqGSIb3DQEJARYQZGFuemhAZ29vZ2xlLmNv +bTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGRaEAtVq+xHXfsF4R/j+mqVN2E29ZYL +oFlvnelKeeT2B51bSfUv+X+Ci1BSa2OxPCVS6o0vpcF6YOlz4CS7QcXZIoRfhsv7 +O2Hz/IdxAPhX/gdK/70T1x+V/6nvIHiiw6NTMFEwHQYDVR0OBBYEFF75rDce6xNJ +GfpKbUg4emG2KWRMMB8GA1UdIwQYMBaAFF75rDce6xNJGfpKbUg4emG2KWRMMA8G +A1UdEwEB/wQFMAMBAf8wCgYIKoZIzj0EAwIDaAAwZQIxAIyZghTK3cmyrRWkxfQ7 +xEc11gujcT8nbytYbM6jodKwcbtR6SOmLx2ychXrCMm2ZAIwXqmrTYBtrbqb3mBx +VdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA +-----END CERTIFICATE-----)"}; + std::stringstream pem_stream(certs); + std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(chain[0]); + ASSERT(cert_view); + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof("www.google.com", 54321, server_config, version, chlo_hash, + chain, cert_sct, "signature", nullptr, &error_details, nullptr, + nullptr)); + EXPECT_EQ("Invalid leaf cert, only P-256 ECDSA certificates are supported", error_details); +} + +TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidSignature) { + configCertVerificationDetails(true); + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf_cert_); + quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; + quiche::QuicheStringPiece chlo_hash{"aaaaa"}; + std::string server_config{"Server Config"}; + const std::string ocsp_response; + const std::string cert_sct; + std::string error_details; + EXPECT_EQ(quic::QUIC_FAILURE, + verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, + server_config, version, chlo_hash, {leaf_cert_}, cert_sct, + "signature", nullptr, &error_details, nullptr, nullptr)); + EXPECT_EQ("Signature is not valid.", error_details); +} + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc index 05fb1e61a7aa..85688dbd0835 100644 --- a/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc +++ b/test/extensions/quic_listeners/quiche/integration/quic_http_integration_test.cc @@ -1,3 +1,5 @@ +#include + #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" @@ -7,6 +9,7 @@ #include "test/config/utility.h" #include "test/integration/http_integration.h" +#include "test/integration/ssl_utility.h" #include "test/test_common/utility.h" #pragma GCC diagnostic push @@ -23,12 +26,14 @@ #include "extensions/quic_listeners/quiche/envoy_quic_client_session.h" #include "extensions/quic_listeners/quiche/envoy_quic_client_connection.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_verifier.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier.h" #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_alarm_factory.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/quic_transport_socket_factory.h" #include "test/extensions/quic_listeners/quiche/test_utils.h" +#include "extensions/transport_sockets/tls/context_config_impl.h" namespace Envoy { namespace Quic { @@ -44,6 +49,43 @@ class CodecClientCallbacksForTest : public Http::CodecClientCallbacks { Http::StreamResetReason last_stream_reset_reason_{Http::StreamResetReason::LocalReset}; }; +std::unique_ptr +createQuicClientTransportSocketFactory(const Ssl::ClientSslTransportOptions& options, Api::Api& api, + const std::string& san_to_match) { + std::string yaml_plain = R"EOF( + common_tls_context: + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/config/integration/certs/cacert.pem" +)EOF"; + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml_plain), tls_context); + auto* common_context = tls_context.mutable_common_tls_context(); + + if (options.alpn_) { + common_context->add_alpn_protocols("h3"); + } + if (options.san_) { + common_context->mutable_validation_context()->add_match_subject_alt_names()->set_exact( + san_to_match); + } + for (const std::string& cipher_suite : options.cipher_suites_) { + common_context->mutable_tls_params()->add_cipher_suites(cipher_suite); + } + if (!options.sni_.empty()) { + tls_context.set_sni(options.sni_); + } + + common_context->mutable_tls_params()->set_tls_minimum_protocol_version(options.tls_version_); + common_context->mutable_tls_params()->set_tls_maximum_protocol_version(options.tls_version_); + + NiceMock mock_factory_ctx; + ON_CALL(mock_factory_ctx, api()).WillByDefault(testing::ReturnRef(api)); + auto cfg = std::make_unique( + tls_context, options.sigalgs_, mock_factory_ctx); + return std::make_unique(std::move(cfg)); +} + class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVersionTest { public: QuicHttpIntegrationTest() @@ -59,8 +101,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers SetQuicReloadableFlag(quic_disable_version_draft_25, !use_http3); return quic::CurrentSupportedVersions(); }()), - crypto_config_(std::make_unique()), conn_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), + conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()), injected_resource_filename_(TestEnvironment::temporaryPath("injected_resource")), file_updater_(injected_resource_filename_) {} @@ -81,7 +122,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers quic::ParsedQuicVersionVector{supported_versions_[0]}, local_addr, *dispatcher_, nullptr); quic_connection_ = connection.get(); auto session = std::make_unique( - quic_config_, supported_versions_, std::move(connection), server_id_, &crypto_config_, + quic_config_, supported_versions_, std::move(connection), server_id_, crypto_config_.get(), &push_promise_index_, *dispatcher_, 0); session->Initialize(); return session; @@ -170,16 +211,24 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers updateResource(0); HttpIntegrationTest::initialize(); registerTestServerPorts({"http"}); + crypto_config_ = + std::make_unique(std::make_unique( + stats_store_, + createQuicClientTransportSocketFactory( + Ssl::ClientSslTransportOptions().setAlpn(true).setSan(true), *api_, san_to_match_) + ->clientContextConfig(), + timeSystem())); } void updateResource(double pressure) { file_updater_.update(absl::StrCat(pressure)); } protected: quic::QuicConfig quic_config_; - quic::QuicServerId server_id_{"example.com", 443, false}; + quic::QuicServerId server_id_{"lyft.com", 443, false}; + std::string san_to_match_{"spiffe://lyft.com/backend-team"}; quic::QuicClientPushPromiseIndex push_promise_index_; quic::ParsedQuicVersionVector supported_versions_; - quic::QuicCryptoClientConfig crypto_config_; + std::unique_ptr crypto_config_; EnvoyQuicConnectionHelper conn_helper_; EnvoyQuicAlarmFactory alarm_factory_; CodecClientCallbacksForTest client_codec_callback_; @@ -461,5 +510,19 @@ TEST_P(QuicHttpIntegrationTest, AdminDrainDrainsListeners) { testAdminDrain(Http::CodecClient::Type::HTTP1); } +TEST_P(QuicHttpIntegrationTest, CertVerificationFailure) { + san_to_match_ = "www.random_domain.com"; + initialize(); + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt); + EXPECT_FALSE(codec_client_->connected()); + std::string failure_reason = + GetParam().second == QuicVersionType::GquicQuicCrypto + ? "QUIC_PROOF_INVALID with details: Proof invalid: X509_verify_cert: certificate " + "verification error at depth 0: ok" + : "QUIC_HANDSHAKE_FAILED with details: TLS handshake failure (ENCRYPTION_HANDSHAKE) 46: " + "certificate unknown"; + EXPECT_EQ(failure_reason, codec_client_->connection()->transportFailureReason()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/extensions/quic_listeners/quiche/test_proof_source.h b/test/extensions/quic_listeners/quiche/test_proof_source.h index ad8bae60a540..8b1baf920d69 100644 --- a/test/extensions/quic_listeners/quiche/test_proof_source.h +++ b/test/extensions/quic_listeners/quiche/test_proof_source.h @@ -15,14 +15,14 @@ #include #include "test/mocks/network/mocks.h" -#include "extensions/quic_listeners/quiche/envoy_quic_fake_proof_source.h" +#include "extensions/quic_listeners/quiche/envoy_quic_proof_source_base.h" namespace Envoy { namespace Quic { // A test ProofSource which always provide a hard-coded test certificate in // QUICHE and a fake signature. -class TestProofSource : public Quic::EnvoyQuicFakeProofSource { +class TestProofSource : public EnvoyQuicProofSourceBase { public: quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& /*server_address*/, @@ -31,18 +31,18 @@ class TestProofSource : public Quic::EnvoyQuicFakeProofSource { return cert_chain_; } - void - ComputeTlsSignature(const quic::QuicSocketAddress& /*server_address*/, - const quic::QuicSocketAddress& /*client_address*/, - const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, - quiche::QuicheStringPiece in, - std::unique_ptr callback) override { + const Network::MockFilterChain& filterChain() const { return filter_chain_; } + +protected: + void signPayload(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, + quiche::QuicheStringPiece in, + std::unique_ptr callback) override { callback->Run(true, absl::StrCat("Fake signature for { ", in, " }"), std::make_unique(filter_chain_)); } - const Network::MockFilterChain& filterChain() const { return filter_chain_; } - private: quic::QuicReferenceCountedPointer cert_chain_{ new quic::ProofSource::Chain( diff --git a/test/extensions/quic_listeners/quiche/test_proof_verifier.h b/test/extensions/quic_listeners/quiche/test_proof_verifier.h new file mode 100644 index 000000000000..77dada22d1cd --- /dev/null +++ b/test/extensions/quic_listeners/quiche/test_proof_verifier.h @@ -0,0 +1,30 @@ +#include "extensions/quic_listeners/quiche/envoy_quic_proof_verifier_base.h" + +namespace Envoy { +namespace Quic { + +// A test quic::ProofVerifier which always approves the certs and signature. +class TestProofVerifier : public EnvoyQuicProofVerifierBase { +public: + // quic::ProofVerifier + quic::QuicAsyncStatus + VerifyCertChain(const std::string& /*hostname*/, const uint16_t /*port*/, + const std::vector& /*certs*/, const std::string& /*ocsp_response*/, + const std::string& /*cert_sct*/, const quic::ProofVerifyContext* /*context*/, + std::string* /*error_details*/, + std::unique_ptr* /*details*/, + std::unique_ptr /*callback*/) override { + return quic::QUIC_SUCCESS; + } + +protected: + // EnvoyQuicProofVerifierBase + bool verifySignature(const std::string& /*server_config*/, absl::string_view /*chlo_hash*/, + const std::string& /*cert*/, const std::string& /*signature*/, + std::string* /*error_details*/) override { + return true; + } +}; + +} // namespace Quic +} // namespace Envoy diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index c3bc9b2f8ecd..7567e5807cff 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -129,6 +129,23 @@ class MockTlsCertificateConfig : public TlsCertificateConfig { MOCK_METHOD(Envoy::Ssl::PrivateKeyMethodProviderSharedPtr, privateKeyMethod, (), (const)); }; +class MockCertificateValidationContextConfig : public CertificateValidationContextConfig { +public: + MOCK_METHOD(const std::string&, caCert, (), (const)); + MOCK_METHOD(const std::string&, caCertPath, (), (const)); + MOCK_METHOD(const std::string&, certificateRevocationList, (), (const)); + MOCK_METHOD(const std::string&, certificateRevocationListPath, (), (const)); + MOCK_METHOD(const std::vector&, verifySubjectAltNameList, (), (const)); + MOCK_METHOD(const std::vector&, subjectAltNameMatchers, + (), (const)); + MOCK_METHOD(const std::vector&, verifyCertificateHashList, (), (const)); + MOCK_METHOD(const std::vector&, verifyCertificateSpkiList, (), (const)); + MOCK_METHOD(bool, allowExpiredCertificate, (), (const)); + MOCK_METHOD(envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext:: + TrustChainVerification, + trustChainVerification, (), (const)); +}; + class MockPrivateKeyMethodManager : public PrivateKeyMethodManager { public: MockPrivateKeyMethodManager(); diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index d855084a1fd7..cf99f6b3f17a 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -990,6 +990,7 @@ sched schedulable schemas scopekey +secp sendmsg sendmmsg sendto From 739f48476ac29badd2ef6a52cf70a8cfeeeef485 Mon Sep 17 00:00:00 2001 From: ASOP Date: Fri, 7 Aug 2020 14:04:48 -0700 Subject: [PATCH 884/909] Init manager checking unready targets with target-aware watchers (#12035) The current init manager only knows how many registered targets are not ready via its ```count_``` field. This pull request mainly helps init manager to check which specific targets are not ready. Key idea: Init manager stores the **unready_target_name:count** key-value pair in a hash map to check which registered targets are not ready. Enable passing target name into watcher's callback function. 1. When a new target is added into the targets list, increase the occurrence of the target's name in the hash map ```++target_name_count_[target_name]```. 2. When a target is ready, it informs the manager's watcher and finally, the watcher notifies the manager(that's why we need to pass a ```string_view name``` parameter to the manager's callback). And decrease the count of the target's name by 1. ```--target_name_count_[target_name]```. **Most recent update**: Wrapped the old ReadyFn type function into the new ```TargetAwareReadyFn```. Now ```WatcherImpl``` can take both types of function in its Ctor. ~Split WatcherImpl into two classes, the old one and a new ```TargetAwareWatcherImpl```. As we now limit the ```watcher_``` inside ```ManagerImpl``` to receive a ```string_view``` parameter, we declare it to be of ```TargetAwareWatcherImpl```.~ In the current code base, watchers are basically constructed with ```std::function```. In order to adopt the new feature(pass a ```string_view``` parameter to the manager's callback, I wrote a new constructor for ```WatcherImpl``` with ```std::function``` parameter. In addition, I added a ```onTargetReadySendTargetName(string_view)``` adopted from the original callback ```onTargetReady()```. So now both types of callbacks are supported. I've also updated code in the constructor of ```ManagerImpl```, and now the ```watcher_``` of a manager is constructed with the new type of function(with string_view parameter). Therefore, the manager will always get target_name from the watcher's callback. Risk Level: Low Linked Issue: https://github.com/envoyproxy/envoy/issues/11963 Signed-off-by: pingsun --- include/envoy/init/target.h | 6 +++++ source/common/init/manager_impl.cc | 23 ++++++++++++++---- source/common/init/manager_impl.h | 28 ++++++++++++++++------ source/common/init/target_impl.cc | 2 ++ source/common/init/target_impl.h | 2 ++ source/common/init/watcher_impl.cc | 16 ++++++++----- source/common/init/watcher_impl.h | 26 ++++++++++---------- test/integration/stats_integration_test.cc | 8 +++---- 8 files changed, 78 insertions(+), 33 deletions(-) diff --git a/include/envoy/init/target.h b/include/envoy/init/target.h index 9ab46d38aff4..75397ad2f991 100644 --- a/include/envoy/init/target.h +++ b/include/envoy/init/target.h @@ -25,6 +25,12 @@ struct TargetHandle { * @return true if the target received this call, false if the target was already destroyed. */ virtual bool initialize(const Watcher& watcher) const PURE; + + /** + * @return a human-readable target name, for logging / debugging / tracking target names. + * The target name has to be unique. + */ + virtual absl::string_view name() const PURE; }; using TargetHandlePtr = std::unique_ptr; diff --git a/source/common/init/manager_impl.cc b/source/common/init/manager_impl.cc index f60ddc64a9e9..95cb37e4cc3b 100644 --- a/source/common/init/manager_impl.cc +++ b/source/common/init/manager_impl.cc @@ -1,19 +1,23 @@ #include "common/init/manager_impl.h" +#include + #include "common/common/assert.h" +#include "common/init/watcher_impl.h" namespace Envoy { namespace Init { ManagerImpl::ManagerImpl(absl::string_view name) : name_(fmt::format("init manager {}", name)), state_(State::Uninitialized), count_(0), - watcher_(name_, [this]() { onTargetReady(); }) {} + watcher_(name_, [this](absl::string_view target_name) { onTargetReady(target_name); }) {} Manager::State ManagerImpl::state() const { return state_; } void ManagerImpl::add(const Target& target) { ++count_; TargetHandlePtr target_handle(target.createHandle(name_)); + ++target_names_count_[target.name()]; switch (state_) { case State::Uninitialized: // If the manager isn't initialized yet, save the target handle to be initialized later. @@ -53,15 +57,26 @@ void ManagerImpl::initialize(const Watcher& watcher) { // completed immediately. for (const auto& target_handle : target_handles_) { if (!target_handle->initialize(watcher_)) { - onTargetReady(); + onTargetReady(target_handle->name()); } } } } -void ManagerImpl::onTargetReady() { +const absl::flat_hash_map& ManagerImpl::unreadyTargets() const { + return target_names_count_; +} + +void ManagerImpl::onTargetReady(absl::string_view target_name) { // If there are no remaining targets and one mysteriously calls us back, this manager is haunted. - ASSERT(count_ != 0, fmt::format("{} called back by target after initialization complete")); + ASSERT(count_ != 0, + fmt::format("{} called back by target after initialization complete", target_name)); + + // Decrease target_name count by 1. + ASSERT(target_names_count_.find(target_name) != target_names_count_.end()); + if (--target_names_count_[target_name] == 0) { + target_names_count_.erase(target_name); + } // If there are no uninitialized targets remaining when called back by a target, that means it was // the last. Signal `ready` to the handle we saved in `initialize`. diff --git a/source/common/init/manager_impl.h b/source/common/init/manager_impl.h index b92ac102fd72..026014245ccd 100644 --- a/source/common/init/manager_impl.h +++ b/source/common/init/manager_impl.h @@ -7,6 +7,8 @@ #include "common/common/logger.h" #include "common/init/watcher_impl.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Init { @@ -35,27 +37,39 @@ class ManagerImpl : public Manager, Logger::Loggable { void add(const Target& target) override; void initialize(const Watcher& watcher) override; + // Expose the const reference of target_names_count_ hash map to public. + const absl::flat_hash_map& unreadyTargets() const; + private: - void onTargetReady(); + // Callback function with an additional target_name parameter, decrease unready targets count by + // 1, update target_names_count_ hash map. + void onTargetReady(absl::string_view target_name); + void ready(); - // Human-readable name for logging + // Human-readable name for logging. const std::string name_; - // Current state + // Current state. State state_; - // Current number of registered targets that have not yet initialized + // Current number of registered targets that have not yet initialized. uint32_t count_; - // Handle to the watcher passed in `initialize`, to be called when initialization completes + // Handle to the watcher passed in `initialize`, to be called when initialization completes. WatcherHandlePtr watcher_handle_; - // Watcher to receive ready notifications from each target + // Watcher to receive ready notifications from each target. We restrict the watcher_ inside + // ManagerImpl to be constructed with the 'TargetAwareReadyFn' fn so that the init manager will + // get target name information when the watcher_ calls 'onTargetSendName(target_name)' For any + // other purpose, a watcher can be constructed with either TargetAwareReadyFn or ReadyFn. const WatcherImpl watcher_; - // All registered targets + // All registered targets. std::list target_handles_; + + // Count of target_name of unready targets. + absl::flat_hash_map target_names_count_; }; } // namespace Init diff --git a/source/common/init/target_impl.cc b/source/common/init/target_impl.cc index 4d8df4c27aac..8ee37eabfd14 100644 --- a/source/common/init/target_impl.cc +++ b/source/common/init/target_impl.cc @@ -22,6 +22,8 @@ bool TargetHandleImpl::initialize(const Watcher& watcher) const { } } +absl::string_view TargetHandleImpl::name() const { return name_; } + TargetImpl::TargetImpl(absl::string_view name, InitializeFn fn) : name_(fmt::format("target {}", name)), fn_(std::make_shared([this, fn](WatcherHandlePtr watcher_handle) { diff --git a/source/common/init/target_impl.h b/source/common/init/target_impl.h index d6a098daaca2..da7281c69b2f 100644 --- a/source/common/init/target_impl.h +++ b/source/common/init/target_impl.h @@ -38,6 +38,8 @@ class TargetHandleImpl : public TargetHandle, Logger::Loggable // Init::TargetHandle bool initialize(const Watcher& watcher) const override; + absl::string_view name() const override; + private: // Name of the handle (almost always the name of the ManagerImpl calling the target) const std::string handle_name_; diff --git a/source/common/init/watcher_impl.cc b/source/common/init/watcher_impl.cc index b69fe3e7cf84..50b792bdcbbe 100644 --- a/source/common/init/watcher_impl.cc +++ b/source/common/init/watcher_impl.cc @@ -4,7 +4,7 @@ namespace Envoy { namespace Init { WatcherHandleImpl::WatcherHandleImpl(absl::string_view handle_name, absl::string_view name, - std::weak_ptr fn) + std::weak_ptr fn) : handle_name_(handle_name), name_(name), fn_(std::move(fn)) {} bool WatcherHandleImpl::ready() const { @@ -12,26 +12,30 @@ bool WatcherHandleImpl::ready() const { if (locked_fn) { // If we can "lock" a shared pointer to the watcher's callback function, call it. ENVOY_LOG(debug, "{} initialized, notifying {}", handle_name_, name_); - (*locked_fn)(); + (*locked_fn)(handle_name_); return true; } else { // If not, the watcher was already destroyed. - ENVOY_LOG(debug, "{} initialized, but can't notify {} (unavailable)", handle_name_, name_); + ENVOY_LOG(debug, "{} initialized, but can't notify {}", handle_name_, name_); return false; } } WatcherImpl::WatcherImpl(absl::string_view name, ReadyFn fn) - : name_(name), fn_(std::make_shared(std::move(fn))) {} + : name_(name), fn_(std::make_shared( + [callback = std::move(fn)](absl::string_view) { callback(); })) {} + +WatcherImpl::WatcherImpl(absl::string_view name, TargetAwareReadyFn fn) + : name_(name), fn_(std::make_shared(std::move(fn))) {} WatcherImpl::~WatcherImpl() { ENVOY_LOG(debug, "{} destroyed", name_); } absl::string_view WatcherImpl::name() const { return name_; } WatcherHandlePtr WatcherImpl::createHandle(absl::string_view handle_name) const { - // Note: can't use std::make_unique because WatcherHandleImpl ctor is private + // Note: can't use std::make_unique because WatcherHandleImpl ctor is private. return std::unique_ptr( - new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); + new WatcherHandleImpl(handle_name, name_, std::weak_ptr(fn_))); } } // namespace Init diff --git a/source/common/init/watcher_impl.h b/source/common/init/watcher_impl.h index 816a37c860eb..fb41d8c0400a 100644 --- a/source/common/init/watcher_impl.h +++ b/source/common/init/watcher_impl.h @@ -14,6 +14,7 @@ namespace Init { * initialization completes. */ using ReadyFn = std::function; +using TargetAwareReadyFn = std::function; /** * A WatcherHandleImpl functions as a weak reference to a Watcher. It is how a TargetImpl safely @@ -25,22 +26,22 @@ class WatcherHandleImpl : public WatcherHandle, Logger::Loggable fn); + std::weak_ptr fn); public: - // Init::WatcherHandle + // Init::WatcherHandle. bool ready() const override; private: // Name of the handle (either the name of the target calling the manager, or the name of the - // manager calling the client) + // manager calling the client). const std::string handle_name_; - // Name of the watcher (either the name of the manager, or the name of the client) + // Name of the watcher (either the name of the manager, or the name of the client). const std::string name_; - // The watcher's callback function, only called if the weak pointer can be "locked" - const std::weak_ptr fn_; + // The watcher's callback function, only called if the weak pointer can be "locked". + const std::weak_ptr fn_; }; /** @@ -51,22 +52,23 @@ class WatcherHandleImpl : public WatcherHandle, Logger::Loggable { public: /** - * @param name a human-readable watcher name, for logging / debugging - * @param fn a callback function to invoke when `ready` is called on the handle + * @param name a human-readable watcher name, for logging / debugging. + * @param fn a callback function to invoke when `ready` is called on the handle. */ WatcherImpl(absl::string_view name, ReadyFn fn); + WatcherImpl(absl::string_view name, TargetAwareReadyFn fn); ~WatcherImpl() override; - // Init::Watcher + // Init::Watcher. absl::string_view name() const override; WatcherHandlePtr createHandle(absl::string_view handle_name) const override; private: - // Human-readable name for logging + // Human-readable name for logging. const std::string name_; - // The callback function, called via WatcherHandleImpl by either the target or the manager - const std::shared_ptr fn_; + // The callback function, called via WatcherHandleImpl by either the target or the manager. + const std::shared_ptr fn_; }; } // namespace Init diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 1238176409b3..26143f370000 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -286,6 +286,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // 2020/07/20 11559 44747 46000 stats: add histograms for request/response headers // and body sizes. // 2020/07/21 12034 44811 46000 Add configurable histogram buckets. + // 2020/07/31 12035 45002 46000 Init manager store unready targets in hash map. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -303,8 +304,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - // https://github.com/envoyproxy/envoy/issues/12209 - // EXPECT_MEMORY_EQ(m_per_cluster, 44811); + EXPECT_MEMORY_EQ(m_per_cluster, 45002); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -362,6 +362,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // 2020/07/20 11559 36859 38000 stats: add histograms for request/response headers // and body sizes. // 2020/07/21 12034 36923 38000 Add configurable histogram buckets. + // 2020/07/31 12035 37114 38000 Init manager store unready targets in hash map. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -379,8 +380,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - // https://github.com/envoyproxy/envoy/issues/12209 - // EXPECT_MEMORY_EQ(m_per_cluster, 36923); + EXPECT_MEMORY_EQ(m_per_cluster, 37114); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } From bd40556d20b14bb947956a0093719b0d986bcea7 Mon Sep 17 00:00:00 2001 From: DongRyeol Cha Date: Sat, 8 Aug 2020 07:52:45 +0900 Subject: [PATCH 885/909] Fix potential bug that call the virtual method in destructor (#12536) As you know that the virtual method should not be called in destructor because it is not guaranteed if virtual function table exist on destructor stage. There are no crash or abnormal behavior now but it is definitely a potential bug. So, this patch changes the virtual function calling to non virtual function calling. Signed-off-by: DongRyeol Cha --- source/common/network/udp_listener_impl.cc | 6 ++++-- source/common/network/udp_listener_impl.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index c959132d1642..2b28e0ea5841 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -45,16 +45,18 @@ UdpListenerImpl::UdpListenerImpl(Event::DispatcherImpl& dispatcher, SocketShared } UdpListenerImpl::~UdpListenerImpl() { - disable(); + disableEvent(); file_event_.reset(); } -void UdpListenerImpl::disable() { file_event_->setEnabled(0); } +void UdpListenerImpl::disable() { disableEvent(); } void UdpListenerImpl::enable() { file_event_->setEnabled(Event::FileReadyType::Read | Event::FileReadyType::Write); } +void UdpListenerImpl::disableEvent() { file_event_->setEnabled(0); } + void UdpListenerImpl::onSocketEvent(short flags) { ASSERT((flags & (Event::FileReadyType::Read | Event::FileReadyType::Write))); ENVOY_UDP_LOG(trace, "socket event: {}", flags); diff --git a/source/common/network/udp_listener_impl.h b/source/common/network/udp_listener_impl.h index 2184b4419c10..9789301361c3 100644 --- a/source/common/network/udp_listener_impl.h +++ b/source/common/network/udp_listener_impl.h @@ -54,6 +54,7 @@ class UdpListenerImpl : public BaseListenerImpl, private: void onSocketEvent(short flags); + void disableEvent(); TimeSource& time_source_; Event::FileEventPtr file_event_; From 933e267db60961e76549ae6e65ac4872cb78db4d Mon Sep 17 00:00:00 2001 From: yugantrana Date: Fri, 7 Aug 2020 21:06:33 -0400 Subject: [PATCH 886/909] udp: write performance improvement via udp_gso (#12219) Introduces UdpPacketWriter Interface that can be used to perform writes in Batched/PassThrough modes by using QuicGsoBatchWriter implementation from QUICHE extension. **Additional Description:** UDP GSO (Generic Segmentation Offload) was introduced in Linux at version 4.18. It allows batch-writing of multiple messages into a single payload and sending these messages along as a batch in a single sendmsg syscall. Currently, Envoy performs the sending of messages using simple sendmsg implementation in pass-through mode, i.e. no support for batch writing. With this change, UdpListener can use UdpPacketWriter interface as a DefaultWriter or a GsoBatchWriter to perform pass-through or batched writes respectively. Detailed description of the changes can be found in the design document, [here](https://docs.google.com/document/d/16ePbgkfrzQ6v-cOVMSnKDja3dUdZvX-mxT9jw29rx4g/edit?usp=sharing). **Risk Level:** Low, not in use **Testing:** - Added udp_listener_impl_batched_writes_test, to verify that multiple packets of varying sizes are batched/flushed as per gso specifications while using UdpGsoBatchWriter. - Modified existing tests, to verify that UdpDefaultWriter performs writes in pass-through mode. - Ran all tests. All 677 tests passed successfully. ``` **Docs Changes:** None **Release Notes:** None **Fixes:** #11925 Signed-off-by: Yugant --- api/envoy/config/listener/v3/listener.proto | 13 +- .../v3/udp_default_writer_config.proto | 21 ++ .../v3/udp_gso_batch_writer_config.proto | 21 ++ .../config/listener/v4alpha/listener.proto | 13 +- .../v4alpha/udp_default_writer_config.proto | 23 ++ .../v4alpha/udp_gso_batch_writer_config.proto | 23 ++ bazel/external/quiche.BUILD | 14 + .../envoy/config/listener/v3/listener.proto | 13 +- .../v3/udp_default_writer_config.proto | 21 ++ .../v3/udp_gso_batch_writer_config.proto | 21 ++ .../config/listener/v4alpha/listener.proto | 13 +- .../v4alpha/udp_default_writer_config.proto | 23 ++ .../v4alpha/udp_gso_batch_writer_config.proto | 23 ++ include/envoy/api/os_sys_calls.h | 5 + include/envoy/common/platform.h | 12 + include/envoy/network/BUILD | 24 ++ include/envoy/network/listener.h | 21 ++ .../envoy/network/udp_packet_writer_config.h | 26 ++ .../envoy/network/udp_packet_writer_handler.h | 120 ++++++++ source/common/api/posix/os_sys_calls_impl.cc | 21 +- source/common/api/posix/os_sys_calls_impl.h | 1 + source/common/api/win32/os_sys_calls_impl.cc | 5 + source/common/api/win32/os_sys_calls_impl.h | 1 + source/common/network/BUILD | 27 ++ .../network/udp_default_writer_config.cc | 32 ++ .../network/udp_default_writer_config.h | 32 ++ source/common/network/udp_listener_impl.cc | 10 +- source/common/network/udp_listener_impl.h | 1 + .../network/udp_packet_writer_handler_impl.cc | 28 ++ .../network/udp_packet_writer_handler_impl.h | 45 +++ .../filters/udp/udp_proxy/udp_proxy_filter.cc | 2 + source/extensions/quic_listeners/quiche/BUILD | 63 +++- .../quiche/active_quic_listener.cc | 22 +- .../quiche/active_quic_listener.h | 2 + .../quiche/envoy_quic_client_connection.cc | 18 +- .../quiche/envoy_quic_packet_writer.cc | 76 +++-- .../quiche/envoy_quic_packet_writer.h | 31 +- .../quic_listeners/quiche/envoy_quic_utils.cc | 21 +- .../quic_listeners/quiche/envoy_quic_utils.h | 3 +- .../quiche/udp_gso_batch_writer.cc | 126 ++++++++ .../quiche/udp_gso_batch_writer.h | 124 ++++++++ .../quiche/udp_gso_batch_writer_config.cc | 30 ++ .../quiche/udp_gso_batch_writer_config.h | 28 ++ source/server/BUILD | 1 + source/server/admin/admin.h | 3 + source/server/connection_handler_impl.cc | 31 +- source/server/connection_handler_impl.h | 11 +- source/server/listener_impl.cc | 21 ++ source/server/listener_impl.h | 5 + test/common/network/BUILD | 51 ++++ .../udp_listener_impl_batch_writer_test.cc | 279 ++++++++++++++++++ test/common/network/udp_listener_impl_test.cc | 116 ++------ .../network/udp_listener_impl_test_base.h | 123 ++++++++ .../proxy_protocol_regression_test.cc | 1 + .../proxy_protocol/proxy_protocol_test.cc | 2 + .../udp/udp_proxy/udp_proxy_filter_test.cc | 5 + test/extensions/quic_listeners/quiche/BUILD | 2 + .../quiche/active_quic_listener_test.cc | 18 +- .../quiche/envoy_quic_dispatcher_test.cc | 6 +- .../quiche/envoy_quic_utils_test.cc | 2 +- .../quiche/envoy_quic_writer_test.cc | 4 +- test/integration/fake_upstream.h | 8 +- test/mocks/network/mocks.h | 27 ++ test/server/BUILD | 2 + test/server/connection_handler_test.cc | 11 +- .../listener_manager_impl_quic_only_test.cc | 26 +- test/server/listener_manager_impl_test.cc | 22 ++ tools/spelling/spelling_dictionary.txt | 4 + 68 files changed, 1792 insertions(+), 188 deletions(-) create mode 100644 api/envoy/config/listener/v3/udp_default_writer_config.proto create mode 100644 api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto create mode 100644 api/envoy/config/listener/v4alpha/udp_default_writer_config.proto create mode 100644 api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto create mode 100644 generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto create mode 100644 generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto create mode 100644 generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto create mode 100644 include/envoy/network/udp_packet_writer_config.h create mode 100644 include/envoy/network/udp_packet_writer_handler.h create mode 100644 source/common/network/udp_default_writer_config.cc create mode 100644 source/common/network/udp_default_writer_config.h create mode 100644 source/common/network/udp_packet_writer_handler_impl.cc create mode 100644 source/common/network/udp_packet_writer_handler_impl.h create mode 100644 source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc create mode 100644 source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h create mode 100644 source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc create mode 100644 source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h create mode 100644 test/common/network/udp_listener_impl_batch_writer_test.cc create mode 100644 test/common/network/udp_listener_impl_test_base.h diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index ab0b0ecac7c7..8c5066909caf 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/api_listener.proto"; import "envoy/config/listener/v3/listener_components.proto"; @@ -35,7 +36,7 @@ message ListenerCollection { udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 23] +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -248,4 +249,14 @@ message Listener { // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v3.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v3.TypedExtensionConfig udp_writer_config = 23; } diff --git a/api/envoy/config/listener/v3/udp_default_writer_config.proto b/api/envoy/config/listener/v3/udp_default_writer_config.proto new file mode 100644 index 000000000000..707a66c7b5c4 --- /dev/null +++ b/api/envoy/config/listener/v3/udp_default_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { +} diff --git a/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto b/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto new file mode 100644 index 000000000000..134cb6a42dd2 --- /dev/null +++ b/api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { +} diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index 7c8c92fc4989..c188ecb24490 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/api_listener.proto"; import "envoy/config/listener/v4alpha/listener_components.proto"; @@ -38,7 +39,7 @@ message ListenerCollection { udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 23] +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -251,4 +252,14 @@ message Listener { // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v4alpha.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v4alpha.TypedExtensionConfig udp_writer_config = 23; } diff --git a/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto b/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto new file mode 100644 index 000000000000..02660a7b49f4 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_default_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpDefaultWriterOptions"; +} diff --git a/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto b/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto new file mode 100644 index 000000000000..5427fe19e7e1 --- /dev/null +++ b/api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpGsoBatchWriterOptions"; +} diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 50f9f8443c21..b641e9d59e84 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -3584,6 +3584,20 @@ envoy_cc_test_library( deps = [":quic_core_crypto_random_lib"], ) +envoy_cc_test_library( + name = "quic_test_tools_mock_syscall_wrapper_lib", + srcs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.cc"], + hdrs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.h"], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_core_syscall_wrapper_lib", + ":quic_platform_base", + ":quic_platform_test", + ], +) + envoy_cc_test_library( name = "quic_test_tools_sent_packet_manager_peer_lib", srcs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.cc"], diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index fbf34d16442b..0d0dc5d817a9 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/api_listener.proto"; import "envoy/config/listener/v3/listener_components.proto"; @@ -35,7 +36,7 @@ message ListenerCollection { udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 23] +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -247,5 +248,15 @@ message Listener { // emitted by this listener. repeated accesslog.v3.AccessLog access_log = 22; + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v3.TypedExtensionConfig udp_writer_config = 23; + google.protobuf.BoolValue hidden_envoy_deprecated_use_original_dst = 4 [deprecated = true]; } diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto new file mode 100644 index 000000000000..707a66c7b5c4 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v3/udp_default_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { +} diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto new file mode 100644 index 000000000000..134cb6a42dd2 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v3/udp_gso_batch_writer_config.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index 7c8c92fc4989..c188ecb24490 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -5,6 +5,7 @@ package envoy.config.listener.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; +import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/api_listener.proto"; import "envoy/config/listener/v4alpha/listener_components.proto"; @@ -38,7 +39,7 @@ message ListenerCollection { udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 23] +// [#next-free-field: 24] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -251,4 +252,14 @@ message Listener { // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v4alpha.AccessLog access_log = 22; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // writer to create, i.e. :ref:`name ` + // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, + // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. + // If not present, treat it as "udp_default_writer". + // [#not-implemented-hide:] + core.v4alpha.TypedExtensionConfig udp_writer_config = 23; } diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto new file mode 100644 index 000000000000..02660a7b49f4 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_default_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpDefaultWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Default Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Default Writer. +message UdpDefaultWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpDefaultWriterOptions"; +} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto new file mode 100644 index 000000000000..5427fe19e7e1 --- /dev/null +++ b/generated_api_shadow/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.config.listener.v4alpha; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; +option java_outer_classname = "UdpGsoBatchWriterConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Udp Gso Batch Writer Config] + +// [#not-implemented-hide:] +// Configuration specific to the Udp Gso Batch Writer. +message UdpGsoBatchWriterOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v3.UdpGsoBatchWriterOptions"; +} diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index 28dd3d305652..071a5465d5b5 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -67,6 +67,11 @@ class OsSysCalls { */ virtual bool supportsUdpGro() const PURE; + /** + * return true if the OS supports UDP GSO + */ + virtual bool supportsUdpGso() const PURE; + /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. diff --git a/include/envoy/common/platform.h b/include/envoy/common/platform.h index 30da6aa87039..71e0795c9a55 100644 --- a/include/envoy/common/platform.h +++ b/include/envoy/common/platform.h @@ -189,6 +189,18 @@ struct msghdr { #define IP6T_SO_ORIGINAL_DST 80 #endif +#ifndef SOL_UDP +#define SOL_UDP 17 +#endif + +#ifndef UDP_GRO +#define UDP_GRO 104 +#endif + +#ifndef UDP_SEGMENT +#define UDP_SEGMENT 103 +#endif + typedef int os_fd_t; #define INVALID_SOCKET -1 diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD index 3076f862ddb8..3a8e67613c58 100644 --- a/include/envoy/network/BUILD +++ b/include/envoy/network/BUILD @@ -41,6 +41,20 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udp_packet_writer_handler_interface", + hdrs = ["udp_packet_writer_handler.h"], + deps = [ + ":address_interface", + ":io_handle_interface", + ":socket_interface", + "//include/envoy/api:io_error_interface", + "//include/envoy/buffer:buffer_interface", + "//include/envoy/stats:stats_interface", + "//include/envoy/stats:stats_macros", + ], +) + envoy_cc_library( name = "dns_interface", hdrs = ["dns.h"], @@ -137,6 +151,7 @@ envoy_cc_library( ":connection_balancer_interface", ":connection_interface", ":listen_socket_interface", + ":udp_packet_writer_handler_interface", "//include/envoy/access_log:access_log_interface", "//include/envoy/common:resource_interface", "//include/envoy/stats:stats_interface", @@ -154,6 +169,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udp_packet_writer_config_interface", + hdrs = ["udp_packet_writer_config.h"], + deps = [ + "//include/envoy/config:typed_config_interface", + "//include/envoy/network:udp_packet_writer_handler_interface", + ], +) + envoy_cc_library( name = "proxy_protocol_options_lib", hdrs = ["proxy_protocol.h"], diff --git a/include/envoy/network/listener.h b/include/envoy/network/listener.h index 373f25caaf2c..3d8257e69c5f 100644 --- a/include/envoy/network/listener.h +++ b/include/envoy/network/listener.h @@ -12,6 +12,7 @@ #include "envoy/network/connection.h" #include "envoy/network/connection_balancer.h" #include "envoy/network/listen_socket.h" +#include "envoy/network/udp_packet_writer_handler.h" #include "envoy/stats/scope.h" namespace Envoy { @@ -134,6 +135,12 @@ class ListenerConfig { */ virtual ActiveUdpListenerFactory* udpListenerFactory() PURE; + /** + * @return factory pointer if writing on UDP socket, otherwise return + * nullptr. + */ + virtual UdpPacketWriterFactoryOptRef udpPacketWriterFactory() PURE; + /** * @return traffic direction of the listener. */ @@ -254,6 +261,12 @@ class UdpListenerCallbacks { * @param error_code supplies the received error on the listener. */ virtual void onReceiveError(Api::IoError::IoErrorCode error_code) PURE; + + /** + * Returns the pointer to the udp_packet_writer associated with the + * UdpListenerCallback + */ + virtual UdpPacketWriter& udpPacketWriter() PURE; }; /** @@ -305,6 +318,14 @@ class UdpListener : public virtual Listener { * sender. */ virtual Api::IoCallUint64Result send(const UdpSendData& data) PURE; + + /** + * Flushes out remaining buffered data since last call of send(). + * This is a no-op if the implementation doesn't buffer data while sending. + * + * @return the error code of the underlying flush api. + */ + virtual Api::IoCallUint64Result flush() PURE; }; using UdpListenerPtr = std::unique_ptr; diff --git a/include/envoy/network/udp_packet_writer_config.h b/include/envoy/network/udp_packet_writer_config.h new file mode 100644 index 000000000000..dee4487e2198 --- /dev/null +++ b/include/envoy/network/udp_packet_writer_config.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/config/typed_config.h" +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/protobuf/protobuf.h" + +namespace Envoy { +namespace Network { + +class UdpPacketWriterConfigFactory : public Config::TypedFactory { +public: + ~UdpPacketWriterConfigFactory() override = default; + + /** + * Create an UdpPacketWriterFactory object according to given message. + * @param message specifies Udp Packet Writer options in a protobuf. + */ + virtual Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message& message) PURE; + + std::string category() const override { return "envoy.udp_packet_writers"; } +}; + +} // namespace Network +} // namespace Envoy diff --git a/include/envoy/network/udp_packet_writer_handler.h b/include/envoy/network/udp_packet_writer_handler.h new file mode 100644 index 000000000000..dc82e54d8c34 --- /dev/null +++ b/include/envoy/network/udp_packet_writer_handler.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include + +#include "envoy/api/io_error.h" +#include "envoy/buffer/buffer.h" +#include "envoy/network/address.h" +#include "envoy/network/socket.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +namespace Envoy { +namespace Network { + +/** + * Max v6 packet size, excluding IP and UDP headers. + */ +constexpr uint64_t UdpMaxOutgoingPacketSize = 1452; + +/** + * UdpPacketWriterBuffer bundles a buffer and a function that + * releases it. + */ +struct UdpPacketWriterBuffer { + UdpPacketWriterBuffer() = default; + UdpPacketWriterBuffer(uint8_t* buffer, size_t length, + std::function release_buffer) + : buffer_(buffer), length_(length), release_buffer_(std::move(release_buffer)) {} + + uint8_t* buffer_ = nullptr; + size_t length_ = 0; + std::function release_buffer_; +}; + +class UdpPacketWriter { +public: + virtual ~UdpPacketWriter() = default; + + /** + * @brief Sends a packet via given UDP socket with specific source address. + * + * @param buffer points to the buffer containing the packet + * @param local_ip is the source address to be used to send. If it is null, + * picks up the default network interface ip address. + * @param peer_address is the destination address to send to. + * @return result with number of bytes written, and write status + */ + virtual Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, + const Address::Ip* local_ip, + const Address::Instance& peer_address) PURE; + + /** + * @returns true if the network socket is not writable. + */ + virtual bool isWriteBlocked() const PURE; + + /** + * @brief mark the socket as writable when the socket is unblocked. + */ + virtual void setWritable() PURE; + + /** + * @brief Get the maximum size of the packet which can be written using this + * writer for the supplied peer address. + * + * @param peer_address is the destination address to send to. + * @return the max packet size + */ + virtual uint64_t getMaxPacketSize(const Address::Instance& peer_address) const PURE; + + /** + * @return true if Batch Mode + * @return false if PassThroughMode + */ + virtual bool isBatchMode() const PURE; + + /** + * @brief Get pointer to the next write location in internal buffer, + * it should be called iff the caller does not call writePacket + * for the returned buffer. The caller is expected to call writePacket + * with the buffer returned from this function to save a memcpy. + * + * @param local_ip is the source address to be used to send. + * @param peer_address is the destination address to send to. + * @return { char* to the next write location, + * func to release buffer } + */ + virtual UdpPacketWriterBuffer getNextWriteLocation(const Address::Ip* local_ip, + const Address::Instance& peer_address) PURE; + + /** + * @brief Batch Mode: Try to send all buffered packets + * PassThrough Mode: NULL operation + * + * @return Api::IoCallUint64Result + */ + virtual Api::IoCallUint64Result flush() PURE; +}; + +using UdpPacketWriterPtr = std::unique_ptr; + +class UdpPacketWriterFactory { +public: + virtual ~UdpPacketWriterFactory() = default; + + /** + * Creates an UdpPacketWriter object for the given Udp Socket + * @param socket UDP socket used to send packets. + * @return the UdpPacketWriter created. + */ + virtual UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) PURE; +}; + +using UdpPacketWriterFactoryPtr = std::unique_ptr; +using UdpPacketWriterFactoryOptRef = absl::optional>; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/api/posix/os_sys_calls_impl.cc b/source/common/api/posix/os_sys_calls_impl.cc index e1366dbbf942..546015123bc0 100644 --- a/source/common/api/posix/os_sys_calls_impl.cc +++ b/source/common/api/posix/os_sys_calls_impl.cc @@ -76,9 +76,6 @@ bool OsSysCallsImpl::supportsMmsg() const { bool OsSysCallsImpl::supportsUdpGro() const { #if !defined(__linux__) return false; -#else -#ifndef UDP_GRO - return false; #else static const bool is_supported = [] { int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP); @@ -92,6 +89,24 @@ bool OsSysCallsImpl::supportsUdpGro() const { }(); return is_supported; #endif +} + +bool OsSysCallsImpl::supportsUdpGso() const { +#if !defined(__linux__) + return false; +#else + static const bool is_supported = [] { + int fd = ::socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, IPPROTO_UDP); + if (fd < 0) { + return false; + } + int optval; + socklen_t optlen = sizeof(optval); + bool result = (0 <= ::getsockopt(fd, IPPROTO_UDP, UDP_SEGMENT, &optval, &optlen)); + ::close(fd); + return result; + }(); + return is_supported; #endif } diff --git a/source/common/api/posix/os_sys_calls_impl.h b/source/common/api/posix/os_sys_calls_impl.h index a35b15113806..036604eb40c1 100644 --- a/source/common/api/posix/os_sys_calls_impl.h +++ b/source/common/api/posix/os_sys_calls_impl.h @@ -23,6 +23,7 @@ class OsSysCallsImpl : public OsSysCalls { struct timespec* timeout) override; bool supportsMmsg() const override; bool supportsUdpGro() const override; + bool supportsUdpGso() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index 22bd2d60d72b..86519612a253 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -175,6 +175,11 @@ bool OsSysCallsImpl::supportsUdpGro() const { return false; } +bool OsSysCallsImpl::supportsUdpGso() const { + // Windows doesn't support it. + return false; +} + SysCallIntResult OsSysCallsImpl::ftruncate(int fd, off_t length) { const int rc = ::_chsize_s(fd, length); return {rc, rc == 0 ? 0 : errno}; diff --git a/source/common/api/win32/os_sys_calls_impl.h b/source/common/api/win32/os_sys_calls_impl.h index d82e156de6b9..3a2ca378d658 100644 --- a/source/common/api/win32/os_sys_calls_impl.h +++ b/source/common/api/win32/os_sys_calls_impl.h @@ -23,6 +23,7 @@ class OsSysCallsImpl : public OsSysCalls { struct timespec* timeout) override; bool supportsMmsg() const override; bool supportsUdpGro() const override; + bool supportsUdpGso() const override; SysCallIntResult close(os_fd_t fd) override; SysCallIntResult ftruncate(int fd, off_t length) override; SysCallPtrResult mmap(void* addr, size_t length, int prot, int flags, int fd, diff --git a/source/common/network/BUILD b/source/common/network/BUILD index dd8bcb546337..b40195b288b7 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -235,6 +235,7 @@ envoy_cc_library( deps = [ ":address_lib", ":listen_socket_lib", + ":udp_default_writer_config", "//include/envoy/event:dispatcher_interface", "//include/envoy/event:file_event_interface", "//include/envoy/network:exception_interface", @@ -405,6 +406,32 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "udp_packet_writer_handler_lib", + srcs = ["udp_packet_writer_handler_impl.cc"], + hdrs = ["udp_packet_writer_handler_impl.h"], + deps = [ + ":io_socket_error_lib", + ":utility_lib", + "//include/envoy/network:socket_interface", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/network:udp_packet_writer_handler_interface", + "//source/common/buffer:buffer_lib", + ], +) + +envoy_cc_library( + name = "udp_default_writer_config", + srcs = ["udp_default_writer_config.cc"], + hdrs = ["udp_default_writer_config.h"], + deps = [ + ":udp_packet_writer_handler_lib", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/registry", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "proxy_protocol_filter_state_lib", srcs = ["proxy_protocol_filter_state.cc"], diff --git a/source/common/network/udp_default_writer_config.cc b/source/common/network/udp_default_writer_config.cc new file mode 100644 index 000000000000..c07336c513a8 --- /dev/null +++ b/source/common/network/udp_default_writer_config.cc @@ -0,0 +1,32 @@ +#include "common/network/udp_default_writer_config.h" + +#include +#include + +#include "envoy/config/listener/v3/udp_default_writer_config.pb.h" + +#include "common/network/udp_packet_writer_handler_impl.h" + +namespace Envoy { +namespace Network { + +UdpPacketWriterPtr UdpDefaultWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& /*scope*/) { + return std::make_unique(io_handle); +} + +ProtobufTypes::MessagePtr UdpDefaultWriterConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +UdpPacketWriterFactoryPtr +UdpDefaultWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) { + return std::make_unique(); +} + +std::string UdpDefaultWriterConfigFactory::name() const { return "udp_default_writer"; } + +REGISTER_FACTORY(UdpDefaultWriterConfigFactory, Network::UdpPacketWriterConfigFactory); + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_default_writer_config.h b/source/common/network/udp_default_writer_config.h new file mode 100644 index 000000000000..e01c465e904f --- /dev/null +++ b/source/common/network/udp_default_writer_config.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/network/udp_packet_writer_config.h" +#include "envoy/network/udp_packet_writer_handler.h" +#include "envoy/registry/registry.h" + +namespace Envoy { +namespace Network { + +class UdpDefaultWriterFactory : public Network::UdpPacketWriterFactory { +public: + Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) override; +}; + +// UdpPacketWriterConfigFactory to create UdpDefaultWriterFactory based on given protobuf +// This is the default UdpPacketWriterConfigFactory if not specified in config. +class UdpDefaultWriterConfigFactory : public UdpPacketWriterConfigFactory { +public: + // UdpPacketWriterConfigFactory + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message&) override; + + std::string name() const override; +}; + +DECLARE_FACTORY(UdpDefaultWriterConfigFactory); + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index 2b28e0ea5841..3eaf0272e940 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -110,8 +110,9 @@ const Address::InstanceConstSharedPtr& UdpListenerImpl::localAddress() const { Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { ENVOY_UDP_LOG(trace, "send"); Buffer::Instance& buffer = send_data.buffer_; - Api::IoCallUint64Result send_result = Utility::writeToSocket( - socket_->ioHandle(), buffer, send_data.local_ip_, send_data.peer_address_); + + Api::IoCallUint64Result send_result = + cb_.udpPacketWriter().writePacket(buffer, send_data.local_ip_, send_data.peer_address_); // The send_result normalizes the rc_ value to 0 in error conditions. // The drain call is hence 'safe' in success and failure cases. @@ -119,5 +120,10 @@ Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { return send_result; } +Api::IoCallUint64Result UdpListenerImpl::flush() { + ENVOY_UDP_LOG(trace, "flush"); + return cb_.udpPacketWriter().flush(); +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/udp_listener_impl.h b/source/common/network/udp_listener_impl.h index 9789301361c3..67168fb1c7ee 100644 --- a/source/common/network/udp_listener_impl.h +++ b/source/common/network/udp_listener_impl.h @@ -35,6 +35,7 @@ class UdpListenerImpl : public BaseListenerImpl, Event::Dispatcher& dispatcher() override; const Address::InstanceConstSharedPtr& localAddress() const override; Api::IoCallUint64Result send(const UdpSendData& data) override; + Api::IoCallUint64Result flush() override; void processPacket(Address::InstanceConstSharedPtr local_address, Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer, diff --git a/source/common/network/udp_packet_writer_handler_impl.cc b/source/common/network/udp_packet_writer_handler_impl.cc new file mode 100644 index 000000000000..27d499268e28 --- /dev/null +++ b/source/common/network/udp_packet_writer_handler_impl.cc @@ -0,0 +1,28 @@ +#include "common/network/udp_packet_writer_handler_impl.h" + +#include "common/buffer/buffer_impl.h" +#include "common/network/utility.h" + +namespace Envoy { +namespace Network { + +UdpDefaultWriter::UdpDefaultWriter(Network::IoHandle& io_handle) + : write_blocked_(false), io_handle_(io_handle) {} + +UdpDefaultWriter::~UdpDefaultWriter() = default; + +Api::IoCallUint64Result UdpDefaultWriter::writePacket(const Buffer::Instance& buffer, + const Address::Ip* local_ip, + const Address::Instance& peer_address) { + ASSERT(!write_blocked_, "Cannot write while IO handle is blocked."); + Api::IoCallUint64Result result = + Utility::writeToSocket(io_handle_, buffer, local_ip, peer_address); + if (result.err_ && result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { + // Writer is blocked when error code received is EWOULDBLOCK/EAGAIN + write_blocked_ = true; + } + return result; +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/udp_packet_writer_handler_impl.h b/source/common/network/udp_packet_writer_handler_impl.h new file mode 100644 index 000000000000..50c3f34b79cd --- /dev/null +++ b/source/common/network/udp_packet_writer_handler_impl.h @@ -0,0 +1,45 @@ +#pragma once + +#include "envoy/buffer/buffer.h" +#include "envoy/network/socket.h" +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/network/io_socket_error_impl.h" + +namespace Envoy { +namespace Network { + +class UdpDefaultWriter : public UdpPacketWriter { +public: + UdpDefaultWriter(Network::IoHandle& io_handle); + + ~UdpDefaultWriter() override; + + // Following writePacket utilizes Utility::writeToSocket() implementation + Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, const Address::Ip* local_ip, + const Address::Instance& peer_address) override; + + bool isWriteBlocked() const override { return write_blocked_; } + void setWritable() override { write_blocked_ = false; } + uint64_t getMaxPacketSize(const Address::Instance& /*peer_address*/) const override { + return Network::UdpMaxOutgoingPacketSize; + } + bool isBatchMode() const override { return false; } + Network::UdpPacketWriterBuffer + getNextWriteLocation(const Address::Ip* /*local_ip*/, + const Address::Instance& /*peer_address*/) override { + return {nullptr, 0, nullptr}; + } + Api::IoCallUint64Result flush() override { + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + +private: + bool write_blocked_; + Network::IoHandle& io_handle_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc index 8afb8035dbf5..095bc869f7e6 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc @@ -206,6 +206,8 @@ void UdpProxyFilter::ActiveSession::onReadReady() { if (result->getErrorCode() != Api::IoError::IoErrorCode::Again) { cluster_.cluster_stats_.sess_rx_errors_.inc(); } + // Flush out buffered data at the end of IO event. + cluster_.filter_.read_callbacks_->udpListener().flush(); } void UdpProxyFilter::ActiveSession::write(const Buffer::Instance& buffer) { diff --git a/source/extensions/quic_listeners/quiche/BUILD b/source/extensions/quic_listeners/quiche/BUILD index fd2cce9b9b5f..31a4ff5dec98 100644 --- a/source/extensions/quic_listeners/quiche/BUILD +++ b/source/extensions/quic_listeners/quiche/BUILD @@ -49,18 +49,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "envoy_quic_packet_writer_lib", - srcs = ["envoy_quic_packet_writer.cc"], - hdrs = ["envoy_quic_packet_writer.h"], - external_deps = ["quiche_quic_platform"], - tags = ["nofips"], - deps = [ - ":envoy_quic_utils_lib", - "@com_googlesource_quiche//:quic_core_packet_writer_interface_lib", - ], -) - envoy_cc_library( name = "envoy_quic_proof_source_base_lib", srcs = ["envoy_quic_proof_source_base.cc"], @@ -274,6 +262,7 @@ envoy_cc_library( ":envoy_quic_packet_writer_lib", "//include/envoy/event:dispatcher_interface", "//source/common/network:socket_option_factory_lib", + "//source/common/network:udp_packet_writer_handler_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -313,6 +302,7 @@ envoy_cc_library( ":envoy_quic_packet_writer_lib", ":envoy_quic_proof_source_lib", ":envoy_quic_utils_lib", + ":udp_gso_batch_writer_lib", "//include/envoy/network:listener_interface", "//source/common/network:listener_lib", "//source/common/protobuf:utility_lib", @@ -377,6 +367,55 @@ envoy_cc_extension( ], ) +envoy_cc_library( + name = "envoy_quic_packet_writer_lib", + srcs = ["envoy_quic_packet_writer.cc"], + hdrs = ["envoy_quic_packet_writer.h"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + deps = [ + ":envoy_quic_utils_lib", + "@com_googlesource_quiche//:quic_core_packet_writer_interface_lib", + ], +) + +envoy_cc_library( + name = "udp_gso_batch_writer_lib", + srcs = ["udp_gso_batch_writer.cc"], + hdrs = ["udp_gso_batch_writer.h"], + external_deps = ["quiche_quic_platform"], + tags = ["nofips"], + visibility = [ + "//test/common/network:__subpackages__", + "//test/extensions:__subpackages__", + ], + deps = [ + ":envoy_quic_utils_lib", + "//include/envoy/network:udp_packet_writer_handler_interface", + "//source/common/network:io_socket_error_lib", + "//source/common/protobuf:utility_lib", + "//source/common/runtime:runtime_lib", + "@com_googlesource_quiche//:quic_core_batch_writer_gso_batch_writer_lib", + ], +) + +envoy_cc_library( + name = "udp_gso_batch_writer_config_lib", + srcs = ["udp_gso_batch_writer_config.cc"], + hdrs = ["udp_gso_batch_writer_config.h"], + tags = ["nofips"], + visibility = [ + "//test/server:__subpackages__", + ], + deps = [ + ":udp_gso_batch_writer_lib", + "//include/envoy/network:udp_packet_writer_config_interface", + "//include/envoy/registry", + "//source/common/api:os_sys_calls_lib", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "envoy_quic_crypto_server_stream_lib", srcs = ["envoy_quic_crypto_server_stream.cc"], diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.cc b/source/extensions/quic_listeners/quiche/active_quic_listener.cc index c691e39a5551..eda0b7210e72 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.cc +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.cc @@ -12,8 +12,9 @@ #include "extensions/quic_listeners/quiche/envoy_quic_connection_helper.h" #include "extensions/quic_listeners/quiche/envoy_quic_dispatcher.h" #include "extensions/quic_listeners/quiche/envoy_quic_proof_source.h" -#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" namespace Envoy { namespace Quic { @@ -66,7 +67,20 @@ ActiveQuicListener::ActiveQuicListener(Event::Dispatcher& dispatcher, crypto_config_.get(), quic_config, &version_manager_, std::move(connection_helper), std::move(alarm_factory), quic::kQuicDefaultConnectionIdLength, parent, *config_, stats_, per_worker_stats_, dispatcher, listen_socket_); - quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(listen_socket_)); + + // Create udp_packet_writer + Network::UdpPacketWriterPtr udp_packet_writer = + listener_config.udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket_.ioHandle(), listener_config.listenerScope()); + udp_packet_writer_ = udp_packet_writer.get(); + if (udp_packet_writer->isBatchMode()) { + // UdpPacketWriter* can be downcasted to UdpGsoBatchWriter*, which indirectly inherits + // from the quic::QuicPacketWriter class and can be passed to InitializeWithWriter(). + quic_dispatcher_->InitializeWithWriter( + dynamic_cast(udp_packet_writer.release())); + } else { + quic_dispatcher_->InitializeWithWriter(new EnvoyQuicPacketWriter(std::move(udp_packet_writer))); + } } ActiveQuicListener::~ActiveQuicListener() { onListenerShutdown(); } @@ -79,9 +93,9 @@ void ActiveQuicListener::onListenerShutdown() { void ActiveQuicListener::onData(Network::UdpRecvData& data) { quic::QuicSocketAddress peer_address( - envoyAddressInstanceToQuicSocketAddress(data.addresses_.peer_)); + envoyIpAddressToQuicSocketAddress(data.addresses_.peer_->ip())); quic::QuicSocketAddress self_address( - envoyAddressInstanceToQuicSocketAddress(data.addresses_.local_)); + envoyIpAddressToQuicSocketAddress(data.addresses_.local_->ip())); quic::QuicTime timestamp = quic::QuicTime::Zero() + quic::QuicTime::Delta::FromMicroseconds(std::chrono::duration_cast( diff --git a/source/extensions/quic_listeners/quiche/active_quic_listener.h b/source/extensions/quic_listeners/quiche/active_quic_listener.h index 8d0d5c9dd46e..08b7807dfc4f 100644 --- a/source/extensions/quic_listeners/quiche/active_quic_listener.h +++ b/source/extensions/quic_listeners/quiche/active_quic_listener.h @@ -47,6 +47,7 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, void onReceiveError(Api::IoError::IoErrorCode /*error_code*/) override { // No-op. Quic can't do anything upon listener error. } + Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } @@ -65,6 +66,7 @@ class ActiveQuicListener : public Network::UdpListenerCallbacks, std::unique_ptr quic_dispatcher_; Network::Socket& listen_socket_; Runtime::FeatureFlag enabled_; + Network::UdpPacketWriter* udp_packet_writer_; }; using ActiveQuicListenerPtr = std::unique_ptr; diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc index c8cfe4d14d69..bb3c172536df 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_client_connection.cc @@ -6,6 +6,7 @@ #include "common/network/listen_socket_impl.h" #include "common/network/socket_option_factory.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" @@ -30,9 +31,11 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, quic::QuicAlarmFactory& alarm_factory, const quic::ParsedQuicVersionVector& supported_versions, Event::Dispatcher& dispatcher, Network::ConnectionSocketPtr&& connection_socket) - : EnvoyQuicClientConnection(server_connection_id, helper, alarm_factory, - new EnvoyQuicPacketWriter(*connection_socket), true, - supported_versions, dispatcher, std::move(connection_socket)) {} + : EnvoyQuicClientConnection( + server_connection_id, helper, alarm_factory, + new EnvoyQuicPacketWriter( + std::make_unique(connection_socket->ioHandle())), + true, supported_versions, dispatcher, std::move(connection_socket)) {} EnvoyQuicClientConnection::EnvoyQuicClientConnection( const quic::QuicConnectionId& server_connection_id, quic::QuicConnectionHelperInterface& helper, @@ -41,7 +44,7 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( Network::ConnectionSocketPtr&& connection_socket) : EnvoyQuicConnection( server_connection_id, - envoyAddressInstanceToQuicSocketAddress(connection_socket->remoteAddress()), helper, + envoyIpAddressToQuicSocketAddress(connection_socket->remoteAddress()->ip()), helper, alarm_factory, writer, owns_writer, quic::Perspective::IS_CLIENT, supported_versions, std::move(connection_socket)), dispatcher_(dispatcher) {} @@ -64,8 +67,8 @@ void EnvoyQuicClientConnection::processPacket( timestamp, /*owns_buffer=*/false, /*ttl=*/0, /*ttl_valid=*/false, /*packet_headers=*/nullptr, /*headers_length=*/0, /*owns_header_buffer*/ false); - ProcessUdpPacket(envoyAddressInstanceToQuicSocketAddress(local_address), - envoyAddressInstanceToQuicSocketAddress(peer_address), packet); + ProcessUdpPacket(envoyIpAddressToQuicSocketAddress(local_address->ip()), + envoyIpAddressToQuicSocketAddress(peer_address->ip()), packet); } uint64_t EnvoyQuicClientConnection::maxPacketSize() const { @@ -94,7 +97,8 @@ void EnvoyQuicClientConnection::setUpConnectionSocket() { void EnvoyQuicClientConnection::switchConnectionSocket( Network::ConnectionSocketPtr&& connection_socket) { - auto writer = std::make_unique(*connection_socket); + auto writer = std::make_unique( + std::make_unique(connection_socket->ioHandle())); // Destroy the old file_event before closing the old socket. Otherwise the socket might be picked // up by another socket() call while file_event is still operating on it. file_event_.reset(); diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc index 88816a34d059..a6a70623a43f 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.cc @@ -1,43 +1,75 @@ #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" -#include "common/buffer/buffer_impl.h" -#include "common/network/utility.h" +#include #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" namespace Envoy { namespace Quic { -EnvoyQuicPacketWriter::EnvoyQuicPacketWriter(Network::Socket& socket) - : write_blocked_(false), socket_(socket) {} -quic::WriteResult EnvoyQuicPacketWriter::WritePacket(const char* buffer, size_t buf_len, +namespace { + +quic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) { + if (result.ok()) { + return {quic::WRITE_STATUS_OK, static_cast(result.rc_)}; + } + quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again + ? quic::WRITE_STATUS_BLOCKED + : quic::WRITE_STATUS_ERROR; + return {status, static_cast(result.err_->getErrorCode())}; +} + +} // namespace + +EnvoyQuicPacketWriter::EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer) + : envoy_udp_packet_writer_(std::move(envoy_udp_packet_writer)) {} + +quic::WriteResult EnvoyQuicPacketWriter::WritePacket(const char* buffer, size_t buffer_len, const quic::QuicIpAddress& self_ip, const quic::QuicSocketAddress& peer_address, quic::PerPacketOptions* options) { ASSERT(options == nullptr, "Per packet option is not supported yet."); - ASSERT(!write_blocked_, "Cannot write while IO handle is blocked."); - Buffer::RawSlice slice; - slice.mem_ = const_cast(buffer); - slice.len_ = buf_len; + Buffer::BufferFragmentImpl fragment(buffer, buffer_len, nullptr); + Buffer::OwnedImpl buf; + buf.addBufferFragment(fragment); + quic::QuicSocketAddress self_address(self_ip, /*port=*/0); Network::Address::InstanceConstSharedPtr local_addr = quicAddressToEnvoyAddressInstance(self_address); Network::Address::InstanceConstSharedPtr remote_addr = quicAddressToEnvoyAddressInstance(peer_address); - Api::IoCallUint64Result result = Network::Utility::writeToSocket( - socket_.ioHandle(), &slice, 1, local_addr == nullptr ? nullptr : local_addr->ip(), - *remote_addr); - if (result.ok()) { - return {quic::WRITE_STATUS_OK, static_cast(result.rc_)}; - } - quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again - ? quic::WRITE_STATUS_BLOCKED - : quic::WRITE_STATUS_ERROR; - if (quic::IsWriteBlockedStatus(status)) { - write_blocked_ = true; - } - return {status, static_cast(result.err_->getErrorCode())}; + + Api::IoCallUint64Result result = envoy_udp_packet_writer_->writePacket( + buf, local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr); + + return convertToQuicWriteResult(result); +} + +quic::QuicByteCount +EnvoyQuicPacketWriter::GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const { + Network::Address::InstanceConstSharedPtr remote_addr = + quicAddressToEnvoyAddressInstance(peer_address); + return static_cast(envoy_udp_packet_writer_->getMaxPacketSize(*remote_addr)); +} + +quic::QuicPacketBuffer +EnvoyQuicPacketWriter::GetNextWriteLocation(const quic::QuicIpAddress& self_ip, + const quic::QuicSocketAddress& peer_address) { + quic::QuicSocketAddress self_address(self_ip, /*port=*/0); + Network::Address::InstanceConstSharedPtr local_addr = + quicAddressToEnvoyAddressInstance(self_address); + Network::Address::InstanceConstSharedPtr remote_addr = + quicAddressToEnvoyAddressInstance(peer_address); + Network::UdpPacketWriterBuffer write_location = envoy_udp_packet_writer_->getNextWriteLocation( + local_addr == nullptr ? nullptr : local_addr->ip(), *remote_addr); + return quic::QuicPacketBuffer(reinterpret_cast(write_location.buffer_), + write_location.release_buffer_); +} + +quic::WriteResult EnvoyQuicPacketWriter::Flush() { + Api::IoCallUint64Result result = envoy_udp_packet_writer_->flush(); + return convertToQuicWriteResult(result); } } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h index 4d2eed570165..bb4b736c84c8 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_packet_writer.h @@ -10,14 +10,14 @@ #pragma GCC diagnostic pop -#include "envoy/network/listener.h" +#include "envoy/network/udp_packet_writer_handler.h" namespace Envoy { namespace Quic { class EnvoyQuicPacketWriter : public quic::QuicPacketWriter { public: - EnvoyQuicPacketWriter(Network::Socket& socket); + EnvoyQuicPacketWriter(Network::UdpPacketWriterPtr envoy_udp_packet_writer); quic::WriteResult WritePacket(const char* buffer, size_t buf_len, const quic::QuicIpAddress& self_address, @@ -25,26 +25,19 @@ class EnvoyQuicPacketWriter : public quic::QuicPacketWriter { quic::PerPacketOptions* options) override; // quic::QuicPacketWriter - bool IsWriteBlocked() const override { return write_blocked_; } - void SetWritable() override { write_blocked_ = false; } - quic::QuicByteCount - GetMaxPacketSize(const quic::QuicSocketAddress& /*peer_address*/) const override { - return quic::kMaxOutgoingPacketSize; - } - // Currently this writer doesn't support pacing offload or batch writing. + bool IsWriteBlocked() const override { return envoy_udp_packet_writer_->isWriteBlocked(); } + void SetWritable() override { envoy_udp_packet_writer_->setWritable(); } + bool IsBatchMode() const override { return envoy_udp_packet_writer_->isBatchMode(); } + // Currently this writer doesn't support pacing offload. bool SupportsReleaseTime() const override { return false; } - bool IsBatchMode() const override { return false; } - quic::QuicPacketBuffer - GetNextWriteLocation(const quic::QuicIpAddress& /*self_address*/, - const quic::QuicSocketAddress& /*peer_address*/) override { - return {nullptr, nullptr}; - } - quic::WriteResult Flush() override { return {quic::WRITE_STATUS_OK, 0}; } + + quic::QuicByteCount GetMaxPacketSize(const quic::QuicSocketAddress& peer_address) const override; + quic::QuicPacketBuffer GetNextWriteLocation(const quic::QuicIpAddress& self_address, + const quic::QuicSocketAddress& peer_address) override; + quic::WriteResult Flush() override; private: - // Modified by WritePacket() to indicate underlying IoHandle status. - bool write_blocked_; - Network::Socket& socket_; + Network::UdpPacketWriterPtr envoy_udp_packet_writer_; }; } // namespace Quic diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc index b5c710a81269..c7a32fbf317d 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.cc @@ -22,25 +22,30 @@ quicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address) { : nullptr; } -quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( - const Network::Address::InstanceConstSharedPtr& envoy_address) { - ASSERT(envoy_address != nullptr && envoy_address->type() == Network::Address::Type::Ip); - uint32_t port = envoy_address->ip()->port(); +quic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip) { + if (envoy_ip == nullptr) { + // Return uninitialized socket addr + return quic::QuicSocketAddress(); + } + + uint32_t port = envoy_ip->port(); sockaddr_storage ss; - if (envoy_address->ip()->version() == Network::Address::IpVersion::v4) { + + if (envoy_ip->version() == Network::Address::IpVersion::v4) { + // Create and return quic ipv4 address auto ipv4_addr = reinterpret_cast(&ss); memset(ipv4_addr, 0, sizeof(sockaddr_in)); ipv4_addr->sin_family = AF_INET; ipv4_addr->sin_port = htons(port); - ipv4_addr->sin_addr.s_addr = envoy_address->ip()->ipv4()->address(); + ipv4_addr->sin_addr.s_addr = envoy_ip->ipv4()->address(); } else { + // Create and return quic ipv6 address auto ipv6_addr = reinterpret_cast(&ss); memset(ipv6_addr, 0, sizeof(sockaddr_in6)); ipv6_addr->sin6_family = AF_INET6; ipv6_addr->sin6_port = htons(port); ASSERT(sizeof(ipv6_addr->sin6_addr.s6_addr) == 16u); - *reinterpret_cast(ipv6_addr->sin6_addr.s6_addr) = - envoy_address->ip()->ipv6()->address(); + *reinterpret_cast(ipv6_addr->sin6_addr.s6_addr) = envoy_ip->ipv6()->address(); } return quic::QuicSocketAddress(ss); } diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h index 34dce87d836b..5c321ab749f1 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_utils.h +++ b/source/extensions/quic_listeners/quiche/envoy_quic_utils.h @@ -34,8 +34,7 @@ namespace Quic { Network::Address::InstanceConstSharedPtr quicAddressToEnvoyAddressInstance(const quic::QuicSocketAddress& quic_address); -quic::QuicSocketAddress envoyAddressInstanceToQuicSocketAddress( - const Network::Address::InstanceConstSharedPtr& envoy_address); +quic::QuicSocketAddress envoyIpAddressToQuicSocketAddress(const Network::Address::Ip* envoy_ip); // The returned header map has all keys in lower case. template diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc new file mode 100644 index 000000000000..5525ee285d41 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.cc @@ -0,0 +1,126 @@ +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +#include "common/network/io_socket_error_impl.h" + +#include "extensions/quic_listeners/quiche/envoy_quic_utils.h" + +namespace Envoy { +namespace Quic { +namespace { +Api::IoCallUint64Result convertQuicWriteResult(quic::WriteResult quic_result, size_t payload_len) { + switch (quic_result.status) { + case quic::WRITE_STATUS_OK: { + if (quic_result.bytes_written == 0) { + ENVOY_LOG_MISC(trace, "sendmsg successful, message buffered to send"); + } else { + ENVOY_LOG_MISC(trace, "sendmsg successful, flushed bytes {}", quic_result.bytes_written); + } + // Return payload_len as rc & nullptr as error on success + return Api::IoCallUint64Result( + /*rc=*/payload_len, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + case quic::WRITE_STATUS_BLOCKED_DATA_BUFFERED: { + // Data was buffered, Return payload_len as rc & nullptr as error + ENVOY_LOG_MISC(trace, "sendmsg blocked, message buffered to send"); + return Api::IoCallUint64Result( + /*rc=*/payload_len, + /*err=*/Api::IoErrorPtr(nullptr, Network::IoSocketError::deleteIoError)); + } + case quic::WRITE_STATUS_BLOCKED: { + // Writer blocked, return error + ENVOY_LOG_MISC(trace, "sendmsg blocked, message not buffered"); + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(Network::IoSocketError::getIoSocketEagainInstance(), + Network::IoSocketError::deleteIoError)); + } + default: { + // Write Failed, return {0 and error_code} + ENVOY_LOG_MISC(trace, "sendmsg failed with error code {}", + static_cast(quic_result.error_code)); + return Api::IoCallUint64Result( + /*rc=*/0, + /*err=*/Api::IoErrorPtr(new Network::IoSocketError(quic_result.error_code), + Network::IoSocketError::deleteIoError)); + } + } +} + +} // namespace + +// Initialize QuicGsoBatchWriter, set io_handle_ and stats_ +UdpGsoBatchWriter::UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope) + : quic::QuicGsoBatchWriter(std::make_unique(), io_handle.fd()), + stats_(generateStats(scope)) {} + +// Do Nothing in the Destructor For now +UdpGsoBatchWriter::~UdpGsoBatchWriter() = default; + +Api::IoCallUint64Result +UdpGsoBatchWriter::writePacket(const Buffer::Instance& buffer, const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) { + // Convert received parameters to relevant forms + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip); + size_t payload_len = static_cast(buffer.length()); + + // TODO(yugant): Currently we do not use PerPacketOptions with Quic, we may want to + // specify this parameter here at a later stage. + quic::WriteResult quic_result = + WritePacket(buffer.toString().c_str(), payload_len, self_addr.host(), peer_addr, + /*quic::PerPacketOptions=*/nullptr); + updateUdpGsoBatchWriterStats(quic_result); + + return convertQuicWriteResult(quic_result, payload_len); +} + +uint64_t UdpGsoBatchWriter::getMaxPacketSize(const Network::Address::Instance& peer_address) const { + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + return static_cast(GetMaxPacketSize(peer_addr)); +} + +Network::UdpPacketWriterBuffer +UdpGsoBatchWriter::getNextWriteLocation(const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) { + quic::QuicSocketAddress peer_addr = envoyIpAddressToQuicSocketAddress(peer_address.ip()); + quic::QuicSocketAddress self_addr = envoyIpAddressToQuicSocketAddress(local_ip); + quic::QuicPacketBuffer quic_buf = GetNextWriteLocation(self_addr.host(), peer_addr); + return Network::UdpPacketWriterBuffer(reinterpret_cast(quic_buf.buffer), + Network::UdpMaxOutgoingPacketSize, quic_buf.release_buffer); +} + +Api::IoCallUint64Result UdpGsoBatchWriter::flush() { + quic::WriteResult quic_result = Flush(); + updateUdpGsoBatchWriterStats(quic_result); + + return convertQuicWriteResult(quic_result, /*payload_len=*/0); +} + +void UdpGsoBatchWriter::updateUdpGsoBatchWriterStats(quic::WriteResult quic_result) { + if (quic_result.status == quic::WRITE_STATUS_OK && quic_result.bytes_written > 0) { + if (gso_size_ > 0u) { + uint64_t num_pkts_in_batch = + std::ceil(static_cast(quic_result.bytes_written) / gso_size_); + stats_.pkts_sent_per_batch_.recordValue(num_pkts_in_batch); + } + stats_.total_bytes_sent_.add(quic_result.bytes_written); + } + stats_.internal_buffer_size_.set(batch_buffer().SizeInUse()); + gso_size_ = buffered_writes().empty() ? 0u : buffered_writes().front().buf_len; +} + +UdpGsoBatchWriterStats UdpGsoBatchWriter::generateStats(Stats::Scope& scope) { + return { + UDP_GSO_BATCH_WRITER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; +} + +UdpGsoBatchWriterFactory::UdpGsoBatchWriterFactory() = default; + +Network::UdpPacketWriterPtr +UdpGsoBatchWriterFactory::createUdpPacketWriter(Network::IoHandle& io_handle, Stats::Scope& scope) { + return std::make_unique(io_handle, scope); +} + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h new file mode 100644 index 000000000000..477ad8bdcdc7 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer.h @@ -0,0 +1,124 @@ +#pragma once + +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +// QUICHE allows ignored qualifiers +#pragma GCC diagnostic ignored "-Wignored-qualifiers" + +// QUICHE doesn't mark override at QuicBatchWriterBase::SupportsReleaseTime() +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winconsistent-missing-override" +#elif defined(__GNUC__) && __GNUC__ >= 5 +#pragma GCC diagnostic ignored "-Wsuggest-override" +#endif + +#include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h" + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#pragma GCC diagnostic pop + +#include "envoy/network/udp_packet_writer_handler.h" + +#include "common/protobuf/utility.h" +#include "common/runtime/runtime_protos.h" + +namespace Envoy { +namespace Quic { + +/** + * @brief The following can be used to collect statistics + * related to UdpGsoBatchWriter. The stats maintained are + * as follows: + * + * @total_bytes_sent: Maintains the count of total bytes + * sent via the UdpGsoBatchWriter on the current ioHandle + * via both WritePacket() and Flush() functions. + * + * @internal_buffer_size: Gauge value to keep a track of the + * total bytes buffered to writer by UdpGsoBatchWriter. + * Resets whenever the internal bytes are sent to the client. + * + * @pkts_sent_per_batch: Histogram to keep maintain stats of + * total number of packets sent in each batch by UdpGsoBatchWriter + * Provides summary count of batch-sizes within bucketed range, + * and also provides sum and count stats. + * + * TODO(danzh): Add writer stats to QUIC Documentation when it is + * created for QUIC/HTTP3 docs. Also specify in the documentation + * that user has to compile in QUICHE to use UdpGsoBatchWriter. + */ +#define UDP_GSO_BATCH_WRITER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(total_bytes_sent) \ + GAUGE(internal_buffer_size, NeverImport) \ + HISTOGRAM(pkts_sent_per_batch, Unspecified) + +/** + * Wrapper struct for udp gso batch writer stats. @see stats_macros.h + */ +struct UdpGsoBatchWriterStats { + UDP_GSO_BATCH_WRITER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, + GENERATE_HISTOGRAM_STRUCT) +}; + +/** + * UdpPacketWriter implementation based on quic::QuicGsoBatchWriter to send packets + * in batches, using UDP socket's generic segmentation offload(GSO) capability. + */ +class UdpGsoBatchWriter : public quic::QuicGsoBatchWriter, public Network::UdpPacketWriter { +public: + UdpGsoBatchWriter(Network::IoHandle& io_handle, Stats::Scope& scope); + + ~UdpGsoBatchWriter() override; + + // writePacket perform batched sends based on QuicGsoBatchWriter::WritePacket + Api::IoCallUint64Result writePacket(const Buffer::Instance& buffer, + const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) override; + + // UdpPacketWriter Implementations + bool isWriteBlocked() const override { return IsWriteBlocked(); } + void setWritable() override { return SetWritable(); } + bool isBatchMode() const override { return IsBatchMode(); } + uint64_t getMaxPacketSize(const Network::Address::Instance& peer_address) const override; + Network::UdpPacketWriterBuffer + getNextWriteLocation(const Network::Address::Ip* local_ip, + const Network::Address::Instance& peer_address) override; + Api::IoCallUint64Result flush() override; + +private: + /** + * @brief Update stats_ field for the udp packet writer + * @param quic_result is the result from Flush/WritePacket + */ + void updateUdpGsoBatchWriterStats(quic::WriteResult quic_result); + + /** + * @brief Generate UdpGsoBatchWriterStats object from scope + * @param scope for stats + * @return UdpGsoBatchWriterStats for scope + */ + UdpGsoBatchWriterStats generateStats(Stats::Scope& scope); + UdpGsoBatchWriterStats stats_; + uint64_t gso_size_; +}; + +class UdpGsoBatchWriterFactory : public Network::UdpPacketWriterFactory { +public: + UdpGsoBatchWriterFactory(); + + Network::UdpPacketWriterPtr createUdpPacketWriter(Network::IoHandle& io_handle, + Stats::Scope& scope) override; + +private: + envoy::config::core::v3::RuntimeFeatureFlag enabled_; +}; + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc new file mode 100644 index 000000000000..e2428f32ecaf --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.cc @@ -0,0 +1,30 @@ +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h" + +#include "envoy/config/listener/v3/udp_gso_batch_writer_config.pb.h" + +#include "common/api/os_sys_calls_impl.h" + +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +namespace Envoy { +namespace Quic { + +ProtobufTypes::MessagePtr UdpGsoBatchWriterConfigFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +Network::UdpPacketWriterFactoryPtr +UdpGsoBatchWriterConfigFactory::createUdpPacketWriterFactory(const Protobuf::Message& /*message*/) { + if (!Api::OsSysCallsSingleton::get().supportsUdpGso()) { + throw EnvoyException("Error configuring batch writer on platform without support " + "for UDP GSO. Reset udp_writer_config to default writer"); + } + return std::make_unique(); +} + +std::string UdpGsoBatchWriterConfigFactory::name() const { return GsoBatchWriterName; } + +REGISTER_FACTORY(UdpGsoBatchWriterConfigFactory, Network::UdpPacketWriterConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h new file mode 100644 index 000000000000..20c286808872 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/udp_gso_batch_writer_config.h @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include "envoy/network/udp_packet_writer_config.h" +#include "envoy/registry/registry.h" + +namespace Envoy { +namespace Quic { + +const std::string GsoBatchWriterName{"udp_gso_batch_writer"}; + +// Network::UdpPacketWriterConfigFactory to create UdpGsoBatchWriterFactory based on given +// protobuf. +class UdpGsoBatchWriterConfigFactory : public Network::UdpPacketWriterConfigFactory { +public: + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + + Network::UdpPacketWriterFactoryPtr + createUdpPacketWriterFactory(const Protobuf::Message&) override; + + std::string name() const override; +}; + +DECLARE_FACTORY(UdpGsoBatchWriterConfigFactory); + +} // namespace Quic +} // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index fb1efa139ec4..7bfcd7699576 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -305,6 +305,7 @@ envoy_cc_library( ":well_known_names_lib", "//include/envoy/access_log:access_log_interface", "//include/envoy/network:connection_interface", + "//include/envoy/network:udp_packet_writer_config_interface", "//include/envoy/server:active_udp_listener_config_interface", "//include/envoy/server:api_listener_interface", "//include/envoy/server:filter_config_interface", diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 62bfb2f80acc..e3c66660fdc5 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -379,6 +379,9 @@ class AdminImpl : public Admin, Network::ActiveUdpListenerFactory* udpListenerFactory() override { NOT_REACHED_GCOVR_EXCL_LINE; } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + NOT_REACHED_GCOVR_EXCL_LINE; + } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; } diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index c0becac81ad5..323ddf4df430 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -545,16 +545,30 @@ ConnectionHandlerImpl::ActiveTcpConnection::~ActiveTcpConnection() { ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config) - : ActiveRawUdpListener( - parent, - dispatcher.createUdpListener(config.listenSocketFactory().getListenSocket(), *this), - config) {} + : ActiveRawUdpListener(parent, config.listenSocketFactory().getListenSocket(), dispatcher, + config) {} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket_ptr, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener(parent, *listen_socket_ptr, listen_socket_ptr, dispatcher, config) {} ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::Socket& listen_socket, + Network::SocketSharedPtr listen_socket_ptr, + Event::Dispatcher& dispatcher, + Network::ListenerConfig& config) + : ActiveRawUdpListener(parent, listen_socket, + dispatcher.createUdpListener(std::move(listen_socket_ptr), *this), + config) {} + +ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::Socket& listen_socket, Network::UdpListenerPtr&& listener, Network::ListenerConfig& config) : ConnectionHandlerImpl::ActiveListenerImplBase(parent, &config), - udp_listener_(std::move(listener)), read_filter_(nullptr) { + udp_listener_(std::move(listener)), read_filter_(nullptr), listen_socket_(listen_socket) { // Create the filter chain on creating a new udp listener config_->filterChainFactory().createUdpListenerFilterChain(*this, *this); @@ -564,6 +578,10 @@ ActiveRawUdpListener::ActiveRawUdpListener(Network::ConnectionHandler& parent, fmt::format("Cannot create listener as no read filter registered for the udp listener: {} ", config_->name())); } + + // Create udp_packet_writer + udp_packet_writer_ = config.udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket_.ioHandle(), config.listenerScope()); } void ActiveRawUdpListener::onData(Network::UdpRecvData& data) { read_filter_->onData(data); } @@ -574,6 +592,9 @@ void ActiveRawUdpListener::onWriteReady(const Network::Socket&) { // TODO(sumukhs): This is not used now. When write filters are implemented, this is a // trigger to invoke the on write ready API on the filters which is when they can write // data + + // Clear write_blocked_ status for udpPacketWriter + udp_packet_writer_->setWritable(); } void ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 17c94ded87a3..63a8c97575f3 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -348,14 +348,21 @@ class ActiveRawUdpListener : public Network::UdpListenerCallbacks, public: ActiveRawUdpListener(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, Network::ListenerConfig& config); - ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::UdpListenerPtr&& listener, + ActiveRawUdpListener(Network::ConnectionHandler& parent, + Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + Network::SocketSharedPtr listen_socket_ptr, Event::Dispatcher& dispatcher, + Network::ListenerConfig& config); + ActiveRawUdpListener(Network::ConnectionHandler& parent, Network::Socket& listen_socket, + Network::UdpListenerPtr&& listener, Network::ListenerConfig& config); // Network::UdpListenerCallbacks void onData(Network::UdpRecvData& data) override; void onReadReady() override; void onWriteReady(const Network::Socket& socket) override; void onReceiveError(Api::IoError::IoErrorCode error_code) override; + Network::UdpPacketWriter& udpPacketWriter() override { return *udp_packet_writer_; } // ActiveListenerImplBase Network::Listener* listener() override { return udp_listener_.get(); } @@ -379,6 +386,8 @@ class ActiveRawUdpListener : public Network::UdpListenerCallbacks, private: Network::UdpListenerPtr udp_listener_; Network::UdpListenerReadFilterPtr read_filter_; + Network::UdpPacketWriterPtr udp_packet_writer_; + Network::Socket& listen_socket_; }; } // namespace Server diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index bd6c81b6c62e..f3fe37ade187 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -5,6 +5,7 @@ #include "envoy/config/listener/v3/listener_components.pb.h" #include "envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.pb.h" #include "envoy/network/exception.h" +#include "envoy/network/udp_packet_writer_config.h" #include "envoy/registry/registry.h" #include "envoy/server/active_udp_listener_config.h" #include "envoy/server/transport_socket_config.h" @@ -276,6 +277,7 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); buildListenSocketOptions(socket_type); buildUdpListenerFactory(socket_type, concurrency); + buildUdpWriterFactory(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); @@ -331,6 +333,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); buildListenSocketOptions(socket_type); buildUdpListenerFactory(socket_type, concurrency); + buildUdpWriterFactory(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); @@ -372,6 +375,24 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, } } +void ListenerImpl::buildUdpWriterFactory(Network::Socket::Type socket_type) { + if (socket_type == Network::Socket::Type::Datagram) { + auto udp_writer_config = config_.udp_writer_config(); + if (!Api::OsSysCallsSingleton::get().supportsUdpGso() || + udp_writer_config.typed_config().type_url().empty()) { + const std::string default_type_url = + "type.googleapis.com/envoy.config.listener.v3.UdpDefaultWriterOptions"; + udp_writer_config.mutable_typed_config()->set_type_url(default_type_url); + } + auto& config_factory = + Config::Utility::getAndCheckFactory( + udp_writer_config); + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + udp_writer_config.typed_config(), validation_visitor_, config_factory); + udp_writer_factory_ = config_factory.createUdpPacketWriterFactory(*message); + } +} + void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { // The process-wide `signal()` handling may fail to handle SIGPIPE if overridden // in the process (i.e., on a mobile client). Some OSes support handling it at the socket layer: diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index aa1bd2ca1b0d..920f8a24e9b3 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -302,6 +302,9 @@ class ListenerImpl final : public Network::ListenerConfig, Network::ActiveUdpListenerFactory* udpListenerFactory() override { return udp_listener_factory_.get(); } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); + } Network::ConnectionBalancer& connectionBalancer() override { return *connection_balancer_; } ResourceLimit& openConnections() override { return *open_connections_; } @@ -341,6 +344,7 @@ class ListenerImpl final : public Network::ListenerConfig, // Helpers for constructor. void buildAccessLog(); void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency); + void buildUdpWriterFactory(Network::Socket::Type socket_type); void buildListenSocketOptions(Network::Socket::Type socket_type); void createListenerFilterFactories(Network::Socket::Type socket_type); void validateFilterChains(Network::Socket::Type socket_type); @@ -386,6 +390,7 @@ class ListenerImpl final : public Network::ListenerConfig, const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + Network::UdpPacketWriterFactoryPtr udp_writer_factory_; Network::ConnectionBalancerPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; diff --git a/test/common/network/BUILD b/test/common/network/BUILD index cd05a87e8344..0557529c0a0a 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -196,23 +196,74 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "udp_listener_impl_test_base_lib", + hdrs = ["udp_listener_impl_test_base.h"], + deps = [ + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:listener_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "udp_listener_impl_test", srcs = ["udp_listener_impl_test.cc"], tags = ["fails_on_windows"], deps = [ + ":udp_listener_impl_test_base_lib", + "//source/common/event:dispatcher_lib", + "//source/common/network:address_lib", + "//source/common/network:listener_lib", + "//source/common/network:socket_option_lib", + "//source/common/network:udp_packet_writer_handler_lib", + "//source/common/network:utility_lib", + "//source/common/stats:stats_lib", + "//test/common/network:listener_impl_test_base_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:environment_lib", + "//test/test_common:network_utility_lib", + "//test/test_common:threadsafe_singleton_injector_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "udp_listener_impl_batch_writer_test", + srcs = ["udp_listener_impl_batch_writer_test.cc"], + tags = [ + # Skipping as quiche quic_gso_batch_writer.h does not exist on Windows + "skip_on_windows", + ], + deps = [ + ":udp_listener_impl_test_base_lib", "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", "//source/common/network:listener_lib", "//source/common/network:socket_option_lib", + "//source/common/network:udp_packet_writer_handler_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib", "//test/common/network:listener_impl_test_base_lib", "//test/mocks/network:network_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:utility_lib", + "@com_googlesource_quiche//:quic_test_tools_mock_syscall_wrapper_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/test/common/network/udp_listener_impl_batch_writer_test.cc b/test/common/network/udp_listener_impl_batch_writer_test.cc new file mode 100644 index 000000000000..959fd52515f1 --- /dev/null +++ b/test/common/network/udp_listener_impl_batch_writer_test.cc @@ -0,0 +1,279 @@ +#include +#include +#include +#include +#include + +#ifdef __GNUC__ +#pragma GCC diagnostic push +// QUICHE allows unused parameters. +#pragma GCC diagnostic ignored "-Wunused-parameter" +// QUICHE uses offsetof(). +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#pragma GCC diagnostic ignored "-Wtype-limits" + +#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h" + +#pragma GCC diagnostic pop +#else +#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h" +#endif + +#include "envoy/config/core/v3/base.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" +#include "common/network/udp_listener_impl.h" +#include "common/network/utility.h" + +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" + +#include "test/common/network/udp_listener_impl_test_base.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" +#include "test/test_common/utility.h" + +#include "absl/time/time.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; +using testing::ReturnRef; + +namespace Envoy { +namespace Network { +namespace { + +size_t getPacketLength(const msghdr* msg) { + size_t length = 0; + for (size_t i = 0; i < msg->msg_iovlen; ++i) { + length += msg->msg_iov[i].iov_len; + } + return length; +} + +class UdpListenerImplBatchWriterTest : public UdpListenerImplTestBase { +public: + void SetUp() override { + // Set listening socket options and set UdpGsoBatchWriter + server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions()); + server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions()); + listener_ = std::make_unique( + dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource()); + udp_packet_writer_ = std::make_unique( + server_socket_->ioHandle(), listener_config_.listenerScope()); + ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_)); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplBatchWriterTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +/** + * Tests UDP Packet Writer To Send packets in Batches to a client + * 1. Setup a udp listener and client socket + * 2. Send different sized payloads to client. + * - Verify that the packets are buffered as long as payload + * length matches gso_size. + * - When payload size > gso_size verify that the new payload is + * buffered and already buffered packets are sent to client + * - When payload size < gso_size verify that the new payload is + * sent along with the already buffered payloads. + * 3. Call UdpPacketWriter's External Flush + * - Verify that the internal buffer is emptied and the + * total_bytes_sent counter is updated accordingly. + */ +TEST_P(UdpListenerImplBatchWriterTest, SendData) { + EXPECT_TRUE(udp_packet_writer_->isBatchMode()); + Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress(); + + absl::FixedArray payloads{"length7", "length7", "len<7", + "length7", "length7", "length>7"}; + std::string internal_buffer(""); + std::string last_buffered(""); + std::list pkts_to_send; + bool send_buffered_pkts = false; + + // Get initial value of total_bytes_sent + uint64_t total_bytes_sent = + listener_config_.listenerScope().counterFromString("total_bytes_sent").value(); + + for (const auto& payload : payloads) { + Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); + buffer->add(payload); + UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer}; + + auto send_result = listener_->send(send_data); + EXPECT_TRUE(send_result.ok()) << "send() failed : " << send_result.err_->getErrorDetails(); + EXPECT_EQ(send_result.rc_, payload.length()); + + // Verify udp_packet_writer stats for batch writing + if (internal_buffer.length() == 0 || /* internal buffer is empty*/ + payload.compare(last_buffered) == 0) { /*len(payload) == gso_size*/ + pkts_to_send.emplace_back(payload); + internal_buffer.append(payload); + last_buffered = payload; + } else if (payload.compare(last_buffered) < 0) { /*len(payload) < gso_size*/ + pkts_to_send.emplace_back(payload); + internal_buffer.clear(); + last_buffered.clear(); + send_buffered_pkts = true; + } else { /*len(payload) > gso_size*/ + internal_buffer = payload; + last_buffered = payload; + send_buffered_pkts = true; + } + + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + internal_buffer.length()); + + // Verify that the total_bytes_sent is only updated when the packets + // are actually sent to the client, and not on being buffered. + if (send_buffered_pkts) { + for (const auto& pkt : pkts_to_send) { + total_bytes_sent += pkt.length(); + } + pkts_to_send.clear(); + if (last_buffered.length() != 0) { + pkts_to_send.emplace_back(last_buffered); + } + send_buffered_pkts = false; + } + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + } + + // Test External Flush + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + 0); + total_bytes_sent += payloads.back().length(); + + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); +} + +/** + * Tests UDP Packet writer behavior when socket is write-blocked. + * 1. Setup the udp_listener and have a payload buffered in the internal buffer. + * 2. Then set the socket to return EWOULDBLOCK error on sendmsg and write a + * different sized buffer to the packet writer. + * - Ensure that a buffer shorter than the initial buffer is added to the + * Internal Buffer. + * - A buffer longer than the initial buffer should not get appended to the + * Internal Buffer. + */ +TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { + // Quic Mock Objects + quic::test::MockQuicSyscallWrapper os_sys_calls; + quic::ScopedGlobalSyscallWrapperOverride os_calls(&os_sys_calls); + + // The initial payload to be buffered + std::string initial_payload("length7"); + + // Get initial value of total_bytes_sent + uint64_t total_bytes_sent = + listener_config_.listenerScope().counterFromString("total_bytes_sent").value(); + + // Possible following payloads to be sent after the initial payload + absl::FixedArray following_payloads{"length<7", "len<7"}; + + for (const auto& following_payload : following_payloads) { + std::string internal_buffer(""); + + // First have initial payload added to the udp_packet_writer's internal buffer. + Buffer::InstancePtr initial_buffer(new Buffer::OwnedImpl()); + initial_buffer->add(initial_payload); + UdpSendData initial_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *initial_buffer}; + auto send_result = listener_->send(initial_send_data); + internal_buffer.append(initial_payload); + EXPECT_TRUE(send_result.ok()); + EXPECT_EQ(send_result.rc_, initial_payload.length()); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + initial_payload.length()); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + + // Mock the socket to be write blocked on sendmsg syscall + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) { + errno = EWOULDBLOCK; + return -1; + })); + + // Now send the following payload + Buffer::InstancePtr following_buffer(new Buffer::OwnedImpl()); + following_buffer->add(following_payload); + UdpSendData following_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *following_buffer}; + send_result = listener_->send(following_send_data); + + if (following_payload.length() < initial_payload.length()) { + // The following payload should get buffered if it is + // shorter than initial payload + EXPECT_TRUE(send_result.ok()); + EXPECT_EQ(send_result.rc_, following_payload.length()); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + internal_buffer.append(following_payload); + // Send another packet and verify that writer gets blocked later + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([](int /*sockfd*/, const msghdr* /*msg*/, int /*flags*/) { + errno = EWOULDBLOCK; + return -1; + })); + following_buffer->add(following_payload); + UdpSendData final_send_data{send_to_addr_->ip(), *server_socket_->localAddress(), + *following_buffer}; + send_result = listener_->send(final_send_data); + } + + EXPECT_FALSE(send_result.ok()); + EXPECT_EQ(send_result.rc_, 0); + EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + internal_buffer.length()); + + // Reset write blocked status and verify correct buffer is flushed + udp_packet_writer_->setWritable(); + EXPECT_CALL(os_sys_calls, Sendmsg(_, _, _)) + .WillOnce(Invoke([&](int /*sockfd*/, const msghdr* msg, int /*flags*/) { + EXPECT_EQ(internal_buffer.length(), getPacketLength(msg)); + return internal_buffer.length(); + })); + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(flush_result.rc_, 0); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + EXPECT_EQ(listener_config_.listenerScope() + .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) + .value(), + 0); + total_bytes_sent += internal_buffer.length(); + EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), + total_bytes_sent); + } +} + +} // namespace +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 11b4e5346ead..c2d1a4216bb6 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -11,9 +11,10 @@ #include "common/network/socket_option_factory.h" #include "common/network/socket_option_impl.h" #include "common/network/udp_listener_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "common/network/utility.h" -#include "test/common/network/listener_impl_test_base.h" +#include "test/common/network/udp_listener_impl_test_base.h" #include "test/mocks/api/mocks.h" #include "test/mocks/network/mocks.h" #include "test/test_common/environment.h" @@ -28,6 +29,7 @@ using testing::_; using testing::Invoke; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Network { @@ -45,76 +47,23 @@ class MockSupportsUdpGro : public Api::OsSysCallsImpl { MOCK_METHOD(bool, supportsUdpGro, (), (const)); }; -class UdpListenerImplTest : public ListenerImplTestBase { +class UdpListenerImplTest : public UdpListenerImplTestBase { public: - UdpListenerImplTest() - : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) { - time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + void SetUp() override { ON_CALL(udp_gro_syscall_, supportsUdpGro()).WillByDefault(Return(false)); - } - void SetUp() override { // Set listening socket options. server_socket_->addOptions(SocketOptionFactory::buildIpPacketInfoOptions()); server_socket_->addOptions(SocketOptionFactory::buildRxQueueOverFlowOptions()); if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { server_socket_->addOptions(SocketOptionFactory::buildUdpGroOptions()); } - listener_ = std::make_unique( dispatcherImpl(), server_socket_, listener_callbacks_, dispatcherImpl().timeSource()); + udp_packet_writer_ = std::make_unique(server_socket_->ioHandle()); + ON_CALL(listener_callbacks_, udpPacketWriter()).WillByDefault(ReturnRef(*udp_packet_writer_)); } -protected: - Address::Instance* getServerLoopbackAddress() { - if (version_ == Address::IpVersion::v4) { - return new Address::Ipv4Instance(Network::Test::getLoopbackAddressString(version_), - server_socket_->localAddress()->ip()->port()); - } - return new Address::Ipv6Instance(Network::Test::getLoopbackAddressString(version_), - server_socket_->localAddress()->ip()->port()); - } - - SocketSharedPtr createServerSocket(bool bind) { - // Set IP_FREEBIND to allow sendmsg to send with non-local IPv6 source address. - return std::make_shared(Network::Test::getAnyAddress(version_), -#ifdef IP_FREEBIND - SocketOptionFactory::buildIpFreebindOptions(), -#else - nullptr, -#endif - bind); - } - - // Validates receive data, source/destination address and received time. - void validateRecvCallbackParams(const UdpRecvData& data, size_t num_packet_per_recv) { - ASSERT_NE(data.addresses_.local_, nullptr); - - ASSERT_NE(data.addresses_.peer_, nullptr); - ASSERT_NE(data.addresses_.peer_->ip(), nullptr); - - EXPECT_EQ(data.addresses_.local_->asString(), send_to_addr_->asString()); - - EXPECT_EQ(data.addresses_.peer_->ip()->addressAsString(), - client_.localAddress()->ip()->addressAsString()); - - EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); - - EXPECT_EQ(time_system_.monotonicTime(), - data.receive_time_ + - std::chrono::milliseconds( - (num_packets_received_by_listener_ % num_packet_per_recv) * 100)); - // Advance time so that next onData() should have different received time. - time_system_.advanceTimeWait(std::chrono::milliseconds(100)); - ++num_packets_received_by_listener_; - } - - SocketSharedPtr server_socket_; - Network::Test::UdpSyncPeer client_{GetParam()}; - Address::InstanceConstSharedPtr send_to_addr_; - MockUdpListenerCallbacks listener_callbacks_; - std::unique_ptr listener_; - size_t num_packets_received_by_listener_{0}; NiceMock udp_gro_syscall_; TestThreadsafeSingletonInjector os_calls{&udp_gro_syscall_}; }; @@ -342,35 +291,12 @@ TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { * address. */ TEST_P(UdpListenerImplTest, SendData) { + EXPECT_FALSE(udp_packet_writer_->isBatchMode()); const std::string payload("hello world"); Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); - // Use a self address that is unlikely to be picked by source address discovery - // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into - // consideration. - Address::InstanceConstSharedPtr send_from_addr; - if (version_ == Address::IpVersion::v4) { - // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. - send_from_addr = std::make_shared( -#ifndef __APPLE__ - "127.1.2.3", -#else - "127.0.0.1", -#endif - server_socket_->localAddress()->ip()->port()); - } else { - // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use - // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with - // customized source address is doing the work because kernel also picks ::1 - // if it's not specified in cmsghdr. - send_from_addr = std::make_shared( -#ifdef IP_FREEBIND - "::9", -#else - "::1", -#endif - server_socket_->localAddress()->ip()->port()); - } + + Address::InstanceConstSharedPtr send_from_addr = getNonDefaultSourceAddress(); UdpSendData send_data{send_from_addr->ip(), *client_.localAddress(), *buffer}; @@ -384,6 +310,11 @@ TEST_P(UdpListenerImplTest, SendData) { EXPECT_EQ(bytes_to_read, data.buffer_->length()); EXPECT_EQ(send_from_addr->asString(), data.addresses_.peer_->asString()); EXPECT_EQ(data.buffer_->toString(), payload); + + // Verify External Flush is a No-op + auto flush_result = udp_packet_writer_->flush(); + EXPECT_TRUE(flush_result.ok()); + EXPECT_EQ(0, flush_result.rc_); } /** @@ -399,10 +330,25 @@ TEST_P(UdpListenerImplTest, SendDataError) { // Inject mocked OsSysCalls implementation to mock a write failure. Api::MockOsSysCalls os_sys_calls; TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)) - .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); auto send_result = listener_->send(send_data); EXPECT_FALSE(send_result.ok()); + EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::Again); + // Failed write shouldn't drain the data. + EXPECT_EQ(payload.length(), buffer->length()); + // Verify the writer is set to blocked + EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); + + // Reset write_blocked status + udp_packet_writer_->setWritable(); + EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); + + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)) + .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_NOT_SUP})); + send_result = listener_->send(send_data); + EXPECT_FALSE(send_result.ok()); EXPECT_EQ(send_result.err_->getErrorCode(), Api::IoError::IoErrorCode::NoSupport); // Failed write shouldn't drain the data. EXPECT_EQ(payload.length(), buffer->length()); diff --git a/test/common/network/udp_listener_impl_test_base.h b/test/common/network/udp_listener_impl_test_base.h new file mode 100644 index 000000000000..2547986a316a --- /dev/null +++ b/test/common/network/udp_listener_impl_test_base.h @@ -0,0 +1,123 @@ +#include +#include +#include +#include + +#include "envoy/config/core/v3/base.pb.h" + +#include "common/network/address_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/network/socket_option_impl.h" +#include "common/network/udp_listener_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" +#include "common/network/utility.h" + +#include "test/common/network/listener_impl_test_base.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/threadsafe_singleton_injector.h" +#include "test/test_common/utility.h" + +#include "absl/time/time.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Network { + +class UdpListenerImplTestBase : public ListenerImplTestBase { +public: + UdpListenerImplTestBase() + : server_socket_(createServerSocket(true)), send_to_addr_(getServerLoopbackAddress()) { + time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + } + +protected: + Address::Instance* getServerLoopbackAddress() { + if (version_ == Address::IpVersion::v4) { + return new Address::Ipv4Instance(Network::Test::getLoopbackAddressString(version_), + server_socket_->localAddress()->ip()->port()); + } + return new Address::Ipv6Instance(Network::Test::getLoopbackAddressString(version_), + server_socket_->localAddress()->ip()->port()); + } + + SocketSharedPtr createServerSocket(bool bind) { + // Set IP_FREEBIND to allow sendmsg to send with non-local IPv6 source address. + return std::make_shared(Network::Test::getAnyAddress(version_), +#ifdef IP_FREEBIND + SocketOptionFactory::buildIpFreebindOptions(), +#else + nullptr, +#endif + bind); + } + + Address::InstanceConstSharedPtr getNonDefaultSourceAddress() { + // Use a self address that is unlikely to be picked by source address discovery + // algorithm if not specified in recvmsg/recvmmsg. Port is not taken into + // consideration. + Address::InstanceConstSharedPtr send_from_addr; + if (version_ == Address::IpVersion::v4) { + // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. + send_from_addr = std::make_shared( +#ifndef __APPLE__ + "127.1.2.3", +#else + "127.0.0.1", +#endif + server_socket_->localAddress()->ip()->port()); + } else { + // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use + // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with + // customized source address is doing the work because kernel also picks ::1 + // if it's not specified in cmsghdr. + send_from_addr = std::make_shared( +#ifdef IP_FREEBIND + "::9", +#else + "::1", +#endif + server_socket_->localAddress()->ip()->port()); + } + return send_from_addr; + } + + // Validates receive data, source/destination address and received time. + void validateRecvCallbackParams(const UdpRecvData& data, size_t num_packet_per_recv) { + ASSERT_NE(data.addresses_.local_, nullptr); + + ASSERT_NE(data.addresses_.peer_, nullptr); + ASSERT_NE(data.addresses_.peer_->ip(), nullptr); + + EXPECT_EQ(data.addresses_.local_->asString(), send_to_addr_->asString()); + + EXPECT_EQ(data.addresses_.peer_->ip()->addressAsString(), + client_.localAddress()->ip()->addressAsString()); + + EXPECT_EQ(*data.addresses_.local_, *send_to_addr_); + + EXPECT_EQ(time_system_.monotonicTime(), + data.receive_time_ + + std::chrono::milliseconds( + (num_packets_received_by_listener_ % num_packet_per_recv) * 100)); + // Advance time so that next onData() should have different received time. + time_system_.advanceTimeWait(std::chrono::milliseconds(100)); + ++num_packets_received_by_listener_; + } + + SocketSharedPtr server_socket_; + Network::Test::UdpSyncPeer client_{GetParam()}; + Address::InstanceConstSharedPtr send_to_addr_; + NiceMock listener_callbacks_; + NiceMock listener_config_; + std::unique_ptr listener_; + size_t num_packets_received_by_listener_{0}; + Network::UdpPacketWriterPtr udp_packet_writer_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index 8654352291de..7c1bd0d80ae1 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -70,6 +70,7 @@ class ProxyProtocolRegressionTest : public testing::TestWithParam(&new_session.file_event_cb_), Return(nullptr))); + // Internal Buffer is Empty, flush will be a no-op + ON_CALL(callbacks_.udp_listener_, flush()) + .WillByDefault( + InvokeWithoutArgs([]() -> Api::IoCallUint64Result { return makeNoError(0); })); } void checkTransferStats(uint64_t rx_bytes, uint64_t rx_datagrams, uint64_t tx_bytes, diff --git a/test/extensions/quic_listeners/quiche/BUILD b/test/extensions/quic_listeners/quiche/BUILD index 29ae0a89eb28..bb259455a7c1 100644 --- a/test/extensions/quic_listeners/quiche/BUILD +++ b/test/extensions/quic_listeners/quiche/BUILD @@ -34,6 +34,7 @@ envoy_cc_test( ], deps = [ "//source/common/network:io_socket_error_lib", + "//source/common/network:udp_packet_writer_handler_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_packet_writer_lib", "//test/mocks/api:api_mocks", "//test/mocks/network:network_mocks", @@ -192,6 +193,7 @@ envoy_cc_test( "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_lib", "//source/extensions/quic_listeners/quiche:envoy_quic_utils_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_lib", "//source/server:configuration_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:instance_mocks", diff --git a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc index 747452ccdf78..b41b6bdd311d 100644 --- a/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc +++ b/test/extensions/quic_listeners/quiche/active_quic_listener_test.cc @@ -43,6 +43,7 @@ #include "extensions/quic_listeners/quiche/active_quic_listener_config.h" #include "extensions/quic_listeners/quiche/platform/envoy_quic_clock.h" #include "extensions/quic_listeners/quiche/envoy_quic_utils.h" +#include "extensions/quic_listeners/quiche/udp_gso_batch_writer.h" using testing::Return; using testing::ReturnRef; @@ -111,6 +112,18 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_)); + // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test + ON_CALL(listener_config_, udpPacketWriterFactory()) + .WillByDefault(Return( + std::reference_wrapper(udp_packet_writer_factory_))); + ON_CALL(udp_packet_writer_factory_, createUdpPacketWriter(_, _)) + .WillByDefault(Invoke( + [&](Network::IoHandle& io_handle, Stats::Scope& scope) -> Network::UdpPacketWriterPtr { + Network::UdpPacketWriterPtr udp_packet_writer = + std::make_unique(io_handle, scope); + return udp_packet_writer; + })); + listener_factory_ = createQuicListenerFactory(yamlForQuicConfig()); EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager_)); quic_listener_ = @@ -179,8 +192,8 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { client_sockets_.push_back(std::make_unique(local_address_, nullptr, /*bind*/ false)); Buffer::OwnedImpl payload = generateChloPacketToSend( quic_version_, quic_config_, ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), - connection_id, clock_, envoyAddressInstanceToQuicSocketAddress(local_address_), - envoyAddressInstanceToQuicSocketAddress(local_address_), "test.example.org"); + connection_id, clock_, envoyIpAddressToQuicSocketAddress(local_address_->ip()), + envoyIpAddressToQuicSocketAddress(local_address_->ip()), "test.example.org"); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT_EQ(1u, slice.size()); // Send a full CHLO to finish 0-RTT handshake. @@ -243,6 +256,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { std::shared_ptr read_filter_; Network::MockConnectionCallbacks network_connection_callbacks_; NiceMock listener_config_; + NiceMock udp_packet_writer_factory_; quic::QuicConfig quic_config_; Server::ConnectionHandlerImpl connection_handler_; std::unique_ptr quic_listener_; diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc index fb15815fa1db..c5b6e6c2e7af 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_dispatcher_test.cc @@ -128,7 +128,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, EnvoyQuicClock clock(*dispatcher_); Buffer::OwnedImpl payload = generateChloPacketToSend( quic_version_, quic_config_, crypto_config_, connection_id_, clock, - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, + envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr, "test.example.org"); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT(slice.size() == 1); @@ -139,7 +139,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); envoy_quic_dispatcher_.ProcessPacket( - envoyAddressInstanceToQuicSocketAddress(listen_socket_->localAddress()), peer_addr, + envoyIpAddressToQuicSocketAddress(listen_socket_->localAddress()->ip()), peer_addr, *received_packet); if (should_buffer) { @@ -165,7 +165,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, auto envoy_connection = static_cast(session); EXPECT_EQ("test.example.org", envoy_connection->requestedServerName()); EXPECT_EQ(peer_addr, - envoyAddressInstanceToQuicSocketAddress(envoy_connection->remoteAddress())); + envoyIpAddressToQuicSocketAddress(envoy_connection->remoteAddress()->ip())); ASSERT(envoy_connection->localAddress() != nullptr); EXPECT_EQ(*listen_socket_->localAddress(), *envoy_connection->localAddress()); } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc index cd5004c39c2d..68d606ea54b4 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_utils_test.cc @@ -39,7 +39,7 @@ TEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) { Network::Address::InstanceConstSharedPtr envoy_addr = quicAddressToEnvoyAddressInstance(quic_addr); EXPECT_EQ(quic_addr.ToString(), envoy_addr->asStringView()); - EXPECT_EQ(quic_addr, envoyAddressInstanceToQuicSocketAddress(envoy_addr)); + EXPECT_EQ(quic_addr, envoyIpAddressToQuicSocketAddress(envoy_addr->ip())); } } diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc index 0c6232fb8e50..cb22532e69bb 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_writer_test.cc @@ -5,6 +5,7 @@ #include "common/network/address_impl.h" #include "common/network/io_socket_error_impl.h" +#include "common/network/udp_packet_writer_handler_impl.h" #include "extensions/quic_listeners/quiche/envoy_quic_packet_writer.h" @@ -22,7 +23,8 @@ namespace Quic { class EnvoyQuicWriterTest : public ::testing::Test { public: - EnvoyQuicWriterTest() : envoy_quic_writer_(socket_) { + EnvoyQuicWriterTest() + : envoy_quic_writer_(std::make_unique(socket_.ioHandle())) { self_address_.FromString("::"); quic::QuicIpAddress peer_ip; peer_ip.FromString("::1"); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index b735399ade3d..46287d7a5191 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -32,6 +32,7 @@ #include "common/network/connection_balancer_impl.h" #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" +#include "common/network/udp_default_writer_config.h" #include "common/stats/isolated_store_impl.h" #include "server/active_raw_udp_listener_config.h" @@ -698,7 +699,8 @@ class FakeUpstream : Logger::Loggable, public: FakeListener(FakeUpstream& parent) : parent_(parent), name_("fake_upstream"), - udp_listener_factory_(std::make_unique()) {} + udp_listener_factory_(std::make_unique()), + udp_writer_factory_(std::make_unique()) {} private: // Network::ListenerConfig @@ -718,6 +720,9 @@ class FakeUpstream : Logger::Loggable, Network::ActiveUdpListenerFactory* udpListenerFactory() override { return udp_listener_factory_.get(); } + Network::UdpPacketWriterFactoryOptRef udpPacketWriterFactory() override { + return Network::UdpPacketWriterFactoryOptRef(std::ref(*udp_writer_factory_)); + } Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; } envoy::config::core::v3::TrafficDirection direction() const override { return envoy::config::core::v3::UNSPECIFIED; @@ -736,6 +741,7 @@ class FakeUpstream : Logger::Loggable, const std::string name_; Network::NopConnectionBalancerImpl connection_balancer_; const Network::ActiveUdpListenerFactoryPtr udp_listener_factory_; + const Network::UdpPacketWriterFactoryPtr udp_writer_factory_; BasicResourceLimitImpl connection_resource_; const std::vector empty_access_logs_; }; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index be6d86765d0d..596afe6ffbea 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -142,6 +142,7 @@ class MockUdpListenerCallbacks : public UdpListenerCallbacks { MOCK_METHOD(void, onReadReady, ()); MOCK_METHOD(void, onWriteReady, (const Socket& socket)); MOCK_METHOD(void, onReceiveError, (Api::IoError::IoErrorCode err)); + MOCK_METHOD(Network::UdpPacketWriter&, udpPacketWriter, ()); }; class MockDrainDecision : public DrainDecision { @@ -328,6 +329,14 @@ class MockListenSocketFactory : public ListenSocketFactory { MOCK_METHOD(SocketOptRef, sharedSocket, (), (const)); }; +class MockUdpPacketWriterFactory : public UdpPacketWriterFactory { +public: + MockUdpPacketWriterFactory() = default; + + MOCK_METHOD(Network::UdpPacketWriterPtr, createUdpPacketWriter, + (Network::IoHandle&, Stats::Scope&), ()); +}; + class MockListenerConfig : public ListenerConfig { public: MockListenerConfig(); @@ -345,6 +354,7 @@ class MockListenerConfig : public ListenerConfig { MOCK_METHOD(uint64_t, listenerTag, (), (const)); MOCK_METHOD(const std::string&, name, (), (const)); MOCK_METHOD(Network::ActiveUdpListenerFactory*, udpListenerFactory, ()); + MOCK_METHOD(Network::UdpPacketWriterFactoryOptRef, udpPacketWriterFactory, ()); MOCK_METHOD(ConnectionBalancer&, connectionBalancer, ()); MOCK_METHOD(ResourceLimit&, openConnections, ()); @@ -455,6 +465,22 @@ class MockTransportSocketCallbacks : public TransportSocketCallbacks { testing::NiceMock connection_; }; +class MockUdpPacketWriter : public UdpPacketWriter { +public: + MockUdpPacketWriter() = default; + + MOCK_METHOD(Api::IoCallUint64Result, writePacket, + (const Buffer::Instance& buffer, const Address::Ip* local_ip, + const Address::Instance& peer_address)); + MOCK_METHOD(bool, isWriteBlocked, (), (const)); + MOCK_METHOD(void, setWritable, ()); + MOCK_METHOD(uint64_t, getMaxPacketSize, (const Address::Instance& peer_address), (const)); + MOCK_METHOD(bool, isBatchMode, (), (const)); + MOCK_METHOD(Network::UdpPacketWriterBuffer, getNextWriteLocation, + (const Address::Ip* local_ip, const Address::Instance& peer_address)); + MOCK_METHOD(Api::IoCallUint64Result, flush, ()); +}; + class MockUdpListener : public UdpListener { public: MockUdpListener(); @@ -466,6 +492,7 @@ class MockUdpListener : public UdpListener { MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); MOCK_METHOD(Address::InstanceConstSharedPtr&, localAddress, (), (const)); MOCK_METHOD(Api::IoCallUint64Result, send, (const UdpSendData&)); + MOCK_METHOD(Api::IoCallUint64Result, flush, ()); Event::MockDispatcher dispatcher_; }; diff --git a/test/server/BUILD b/test/server/BUILD index 007ae24ab5d0..84a3d3fb8899 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -75,6 +75,7 @@ envoy_cc_test( "//source/common/config:utility_lib", "//source/common/network:address_lib", "//source/common/network:connection_balancer_lib", + "//source/common/network:udp_default_writer_config", "//source/common/stats:stats_lib", "//source/server:active_raw_udp_listener_config", "//source/server:connection_handler_lib", @@ -274,6 +275,7 @@ envoy_cc_test( ":utility_lib", "//source/extensions/quic_listeners/quiche:active_quic_listener_config_lib", "//source/extensions/quic_listeners/quiche:quic_transport_socket_factory_lib", + "//source/extensions/quic_listeners/quiche:udp_gso_batch_writer_config_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//test/test_common:threadsafe_singleton_injector_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 7fcea249bbd4..ebcc66a8c656 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -11,6 +11,7 @@ #include "common/network/connection_balancer_impl.h" #include "common/network/io_socket_handle_impl.h" #include "common/network/raw_buffer_socket.h" +#include "common/network/udp_default_writer_config.h" #include "common/network/utility.h" #include "server/connection_handler_impl.h" @@ -56,7 +57,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> filter_chain_manager = nullptr) - : parent_(parent), socket_(std::make_shared()), + : parent_(parent), socket_(std::make_shared>()), socket_factory_(std::move(socket_factory)), tag_(tag), bind_to_port_(bind_to_port), hand_off_restored_destination_connections_(hand_off_restored_destination_connections), name_(name), listener_filters_timeout_(listener_filters_timeout), @@ -69,6 +70,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable(listener_name) .createActiveUdpListenerFactory(dummy, /*concurrency=*/1); + udp_writer_factory_ = std::make_unique(); ON_CALL(*socket_, socketType()).WillByDefault(Return(socket_type)); } @@ -96,6 +98,9 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable socket_; + std::shared_ptr> socket_; Network::ListenSocketFactorySharedPtr socket_factory_; uint64_t tag_; bool bind_to_port_; @@ -120,6 +125,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_listener_factory_; + std::unique_ptr udp_writer_factory_; Network::ConnectionBalancerPtr connection_balancer_; BasicResourceLimitImpl open_connections_; const std::vector empty_access_logs_; @@ -155,6 +161,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable udp_gso_syscall_; + TestThreadsafeSingletonInjector os_calls{&udp_gso_syscall_}; +}; TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { const std::string yaml = TestEnvironment::substitute(R"EOF( @@ -44,10 +53,15 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { reuse_port: true udp_listener_config: udp_listener_name: "quiche_quic_listener" +udp_writer_config: + name: "udp_gso_batch_writer" + typed_config: + "@type": type.googleapis.com/envoy.config.listener.v3.UdpGsoBatchWriterOptions )EOF", Network::Address::IpVersion::v4); envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); + ON_CALL(udp_gso_syscall_, supportsUdpGso()).WillByDefault(Return(true)); EXPECT_CALL(server_.random_, uuid()); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, #ifdef SO_RXQ_OVFL // SO_REUSEPORT is on as configured @@ -73,19 +87,23 @@ reuse_port: true /* expected_sockopt_name */ SO_REUSEPORT, /* expected_value */ 1, /* expected_num_calls */ 1); -#ifdef UDP_GRO if (Api::OsSysCallsSingleton::get().supportsUdpGro()) { expectSetsockopt(/* expected_sockopt_level */ SOL_UDP, /* expected_sockopt_name */ UDP_GRO, /* expected_value */ 1, /* expected_num_calls */ 1); } -#endif manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); EXPECT_FALSE(manager_->listeners()[0].get().udpListenerFactory()->isTransportConnectionless()); - manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + Network::SocketSharedPtr listen_socket = + manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + + Network::UdpPacketWriterPtr udp_packet_writer = + manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope()); + EXPECT_TRUE(udp_packet_writer->isBatchMode()); // No filter chain found with non-matching transport protocol. EXPECT_EQ(nullptr, findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111)); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 85c5c14e76a8..c47c22f44b6e 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -4699,6 +4699,28 @@ name: foo EXPECT_EQ(0UL, manager_->listeners().size()); } +// This test verifies that on default initialization the UDP Packet Writer +// is initialized in passthrough mode. (i.e. by using UdpDefaultWriter). +TEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) { + const envoy::config::listener::v3::Listener listener = parseListenerFromV3Yaml(R"EOF( +address: + socket_address: + address: 127.0.0.1 + protocol: UDP + port_value: 1234 +filter_chains: + filters: [] + )EOF"); + manager_->addOrUpdateListener(listener, "", true); + EXPECT_EQ(1U, manager_->listeners().size()); + Network::SocketSharedPtr listen_socket = + manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + Network::UdpPacketWriterPtr udp_packet_writer = + manager_->listeners().front().get().udpPacketWriterFactory()->get().createUdpPacketWriter( + listen_socket->ioHandle(), manager_->listeners()[0].get().listenerScope()); + EXPECT_FALSE(udp_packet_writer->isBatchMode()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index cf99f6b3f17a..40f0ea30ffce 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -90,6 +90,7 @@ ENV EOF EOS EOY +EPOLLOUT EPOLLRDHUP EQ ERANGE @@ -146,6 +147,7 @@ IDL IETF INADDR INET +INVAL IO IOS IP @@ -431,6 +433,7 @@ bools borks broadcasted buf +buflen bugprone builtin builtins @@ -633,6 +636,7 @@ gmock goog google goto +gso gzip hackery hacky From d58f7025f77ba4bff10ad48ea8685916f0cd52f1 Mon Sep 17 00:00:00 2001 From: Jonh Wendell Date: Sat, 8 Aug 2020 17:53:25 -0400 Subject: [PATCH 887/909] Bump golang to 1.14.7 (#12557) Signed-off-by: Jonh Wendell --- bazel/dependency_imports.bzl | 2 +- bazel/repository_locations.bzl | 4 ++-- ci/verify_examples.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index a0a0110f4735..56aa348f4be0 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -10,7 +10,7 @@ load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") # go version for rules_go -GO_VERSION = "1.14.4" +GO_VERSION = "1.14.7" def envoy_dependency_imports(go_version = GO_VERSION): rules_foreign_cc_dependencies() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 14c8b6d625be..73698991a2f4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -354,8 +354,8 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), io_bazel_rules_go = dict( - sha256 = "a8d6b1b354d371a646d2f7927319974e0f9e52f73a2452d2b3877118169eb6bb", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.23.3/rules_go-v0.23.3.tar.gz"], + sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.23.7/rules_go-v0.23.7.tar.gz"], use_category = ["build"], ), rules_cc = dict( diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 61fb380ef2d6..711ceb5f25a3 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -23,7 +23,7 @@ cd ../ # Test grpc bridge example # install go -GO_VERSION="1.14.4" +GO_VERSION="1.14.7" curl -O https://storage.googleapis.com/golang/go$GO_VERSION.linux-amd64.tar.gz tar -xf go$GO_VERSION.linux-amd64.tar.gz sudo mv go /usr/local From 8c2019ab978820688ebba729bbd82d418d2be488 Mon Sep 17 00:00:00 2001 From: DongRyeol Cha Date: Mon, 10 Aug 2020 15:04:15 +0900 Subject: [PATCH 888/909] build: Fix that selective testing does not work (#12563) As you know that the envoy build system has a functionality that can choose some tests to do test manually. But now, to do that, I need to add a dummy argument when I execute the do_ci.sh like as following: /ci/run_envoy_docker.sh "./ci/do_ci.sh bazel.dev '' //test/common/network:udp_listener_impl_test". So, this patch fixes that selective testing works like as previous behavior. Signed-off-by: DongRyeol Cha --- ci/do_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 895ab71747d8..3218361ce81d 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -101,7 +101,7 @@ function bazel_binary_build() { CI_TARGET=$1 shift -if [[ $# -gt 1 ]]; then +if [[ $# -ge 1 ]]; then COVERAGE_TEST_TARGETS=$* TEST_TARGETS="$COVERAGE_TEST_TARGETS" else From bb3571a6207820ad0de4ef9be6db838e175f6054 Mon Sep 17 00:00:00 2001 From: Sam Flattery <44659644+samflattery@users.noreply.github.com> Date: Mon, 10 Aug 2020 13:28:29 +0100 Subject: [PATCH 889/909] xds: remove warming listeners in xDS SOTW updates (#12461) Commit Message: Remove warming listeners in xDS SOTW updates Additional Description: - my xDS fuzzer timed out on OSS fuzz here because Envoy does not remove warming listeners when they are left out of a SOTW update like it removes active ones - changes LdsApiImpl::onConfigUpdate to use a new function in ListenerManagerImpl, allListeners() instead of the old listeners() which just returns active listeners to compute the diff between the update and the current SOTW - changed lds_api_test.cc to use this new function Signed-off-by: Sam Flattery --- include/envoy/server/listener_manager.h | 30 +++++++-- source/server/lds_api.cc | 3 +- source/server/listener_manager_impl.cc | 27 ++++++-- source/server/listener_manager_impl.h | 3 +- test/integration/ads_integration_test.cc | 62 +++++++++++++++++++ test/mocks/server/listener_manager.h | 3 +- ...e-minimized-xds_fuzz_test-6524356210196480 | 54 ++++++++++++++++ test/server/hot_restarting_parent_test.cc | 6 +- test/server/lds_api_test.cc | 17 +++-- 9 files changed, 186 insertions(+), 19 deletions(-) create mode 100644 test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index 956a89264ac4..e01551414def 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -131,6 +131,16 @@ class ListenerManager { All, }; + // The types of listeners to be returned from listeners(ListenerState). + // An enum instead of enum class so the underlying type is an int and bitwise operations can be + // used without casting. + enum ListenerState : uint8_t { + ACTIVE = 1 << 0, + WARMING = 1 << 1, + DRAINING = 1 << 2, + ALL = ACTIVE | WARMING | DRAINING + }; + virtual ~ListenerManager() = default; /** @@ -161,11 +171,15 @@ class ListenerManager { virtual void createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config) PURE; /** - * @return std::vector> a list of the currently - * loaded listeners. Note that this routine returns references to the existing listeners. The - * references are only valid in the context of the current call stack and should not be stored. + * @param state the type of listener to be returned (defaults to ACTIVE), states can be OR'd + * together to return multiple different types + * @return std::vector> a list of currently known + * listeners in the requested state. Note that this routine returns references to the existing + * listeners. The references are only valid in the context of the current call stack and should + * not be stored. */ - virtual std::vector> listeners() PURE; + virtual std::vector> + listeners(ListenerState state = ListenerState::ACTIVE) PURE; /** * @return uint64_t the total number of connections owned by all listeners across all workers. @@ -223,5 +237,13 @@ class ListenerManager { virtual ApiListenerOptRef apiListener() PURE; }; +// overload operator| to allow ListenerManager::listeners(ListenerState) to be called using a +// combination of flags, such as listeners(ListenerState::WARMING|ListenerState::ACTIVE) +constexpr ListenerManager::ListenerState operator|(const ListenerManager::ListenerState lhs, + const ListenerManager::ListenerState rhs) { + return static_cast(static_cast(lhs) | + static_cast(rhs)); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 3165a1525ce6..4a1a65ed125b 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -97,7 +97,8 @@ void LdsApiImpl::onConfigUpdate(const std::vector& r // We need to keep track of which listeners need to remove. // Specifically, it's [listeners we currently have] - [listeners found in the response]. absl::node_hash_set listeners_to_remove; - for (const auto& listener : listener_manager_.listeners()) { + for (const auto& listener : + listener_manager_.listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) { listeners_to_remove.insert(listener.get().name()); } for (const auto& resource : resources) { diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index fa384c274fa2..a257b69b36e5 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -639,11 +639,30 @@ ListenerManagerImpl::getListenerByName(ListenerList& listeners, const std::strin return ret; } -std::vector> ListenerManagerImpl::listeners() { +std::vector> +ListenerManagerImpl::listeners(ListenerState state) { std::vector> ret; - ret.reserve(active_listeners_.size()); - for (const auto& listener : active_listeners_) { - ret.push_back(*listener); + + size_t size = 0; + size += state & WARMING ? warming_listeners_.size() : 0; + size += state & ACTIVE ? active_listeners_.size() : 0; + size += state & DRAINING ? draining_listeners_.size() : 0; + ret.reserve(size); + + if (state & WARMING) { + for (const auto& listener : warming_listeners_) { + ret.push_back(*listener); + } + } + if (state & ACTIVE) { + for (const auto& listener : active_listeners_) { + ret.push_back(*listener); + } + } + if (state & DRAINING) { + for (const auto& draining_listener : draining_listeners_) { + ret.push_back(*(draining_listener.listener_)); + } } return ret; } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 36f9ccd1e7f8..106e1d629dc9 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -186,7 +186,8 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable> listeners() override; + std::vector> + listeners(ListenerState state = ListenerState::ACTIVE) override; uint64_t numConnections() const override; bool removeListener(const std::string& listener_name) override; void startWorkers(GuardDog& guard_dog) override; diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 021beac26885..bf413b9d91d5 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -420,6 +420,68 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { {"warming_cluster_2", "warming_cluster_1"}, {}, {})); } +// Validate that warming listeners are removed when left out of SOTW update. +TEST_P(AdsIntegrationTest, RemoveWarmingListener) { + initialize(); + + // Send initial configuration to start workers, validate we can process a request. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "1"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"cluster_0"}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", + {"route_config_0"}, {"route_config_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {}, {}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send a listener without its route, so it will be added as warming. + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, + {buildListener("listener_0", "route_config_0"), + buildListener("warming_listener_1", "nonexistent_route")}, + {buildListener("warming_listener_1", "nonexistent_route")}, {}, "2"); + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", 1); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"nonexistent_route", "route_config_0"}, + {"nonexistent_route"}, {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "2", {}, {}, {})); + + // Send a request removing the warming listener. + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, + {buildListener("listener_0", "route_config_0")}, {"warming_listener_1"}, "3"); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", + {"route_config_0"}, {}, {"nonexistent_route"})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "3", {}, {}, {})); + + // The warming listener should be successfully removed. + test_server_->waitForCounterEq("listener_manager.listener_removed", 1); + test_server_->waitForGaugeEq("listener_manager.total_listeners_warming", 0); +} + // Verify cluster warming is finished only on named EDS response. TEST_P(AdsIntegrationTest, ClusterWarmingOnNamedResponse) { initialize(); diff --git a/test/mocks/server/listener_manager.h b/test/mocks/server/listener_manager.h index 889dfa1f521f..a91a9acb1764 100644 --- a/test/mocks/server/listener_manager.h +++ b/test/mocks/server/listener_manager.h @@ -15,7 +15,8 @@ class MockListenerManager : public ListenerManager { (const envoy::config::listener::v3::Listener& config, const std::string& version_info, bool modifiable)); MOCK_METHOD(void, createLdsApi, (const envoy::config::core::v3::ConfigSource& lds_config)); - MOCK_METHOD(std::vector>, listeners, ()); + MOCK_METHOD(std::vector>, listeners, + (ListenerState state)); MOCK_METHOD(uint64_t, numConnections, (), (const)); MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); diff --git a/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 b/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 new file mode 100644 index 000000000000..df27fe2695e7 --- /dev/null +++ b/test/server/config_validation/xds_corpus/clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 @@ -0,0 +1,54 @@ +actions { + add_listener { + listener_num: 256 + route_num: 6356993 + } +} +actions { + add_listener { + route_num: 16 + } +} +actions { + remove_listener { + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + route_num: 11264 + } +} +actions { + add_listener { + listener_num: 2147483648 + route_num: 2147483648 + } +} +actions { + add_listener { + listener_num: 6356993 + route_num: 11264 + } +} +actions { + add_listener { + listener_num: 256 + route_num: 65537 + } +} +actions { + add_route { + route_num: 1 + } +} +actions { + add_listener { + listener_num: 2147483648 + route_num: 2 + } +} diff --git a/test/server/hot_restarting_parent_test.cc b/test/server/hot_restarting_parent_test.cc index 80ce667bb50d..a3f405d550db 100644 --- a/test/server/hot_restarting_parent_test.cc +++ b/test/server/hot_restarting_parent_test.cc @@ -36,7 +36,8 @@ TEST_F(HotRestartingParentTest, GetListenSocketsForChildNotFound) { MockListenerManager listener_manager; std::vector> listeners; EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager)); - EXPECT_CALL(listener_manager, listeners()).WillOnce(Return(listeners)); + EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE)) + .WillOnce(Return(listeners)); HotRestartMessage::Request request; request.mutable_pass_listen_socket()->set_address("tcp://127.0.0.1:80"); @@ -51,7 +52,8 @@ TEST_F(HotRestartingParentTest, GetListenSocketsForChildNotBindPort) { InSequence s; listeners.push_back(std::ref(*static_cast(&listener_config))); EXPECT_CALL(server_, listenerManager()).WillOnce(ReturnRef(listener_manager)); - EXPECT_CALL(listener_manager, listeners()).WillOnce(Return(listeners)); + EXPECT_CALL(listener_manager, listeners(ListenerManager::ListenerState::ACTIVE)) + .WillOnce(Return(listeners)); EXPECT_CALL(listener_config, listenSocketFactory()); EXPECT_CALL(listener_config.socket_factory_, localAddress()); EXPECT_CALL(listener_config, bindToPort()).WillOnce(Return(false)); diff --git a/test/server/lds_api_test.cc b/test/server/lds_api_test.cc index f4c5aee7b72c..54a84886e832 100644 --- a/test/server/lds_api_test.cc +++ b/test/server/lds_api_test.cc @@ -76,7 +76,8 @@ class LdsApiTest : public testing::Test { listeners_.back().name_ = name; refs.emplace_back(listeners_.back()); } - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(refs)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(refs)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); } @@ -120,7 +121,8 @@ TEST_F(LdsApiTest, MisconfiguredListenerNameIsPresentInException) { socket_address->set_port_value(1); listener.add_filter_chains(); - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)) @@ -141,7 +143,8 @@ TEST_F(LdsApiTest, EmptyListenersUpdate) { std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, endListenerUpdate(_)) .WillOnce(Invoke([](ListenerManager::FailureStates&& state) { EXPECT_EQ(0, state.size()); })); @@ -164,7 +167,8 @@ TEST_F(LdsApiTest, ListenerCreationContinuesEvenAfterException) { const auto listener_2 = buildListener("valid-listener-2"); const auto listener_3 = buildListener("invalid-listener-2"); - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)) @@ -195,7 +199,8 @@ TEST_F(LdsApiTest, ValidateDuplicateListeners) { const auto listener = buildListener("duplicate_listener"); std::vector> existing_listeners; - EXPECT_CALL(listener_manager_, listeners()).WillOnce(Return(existing_listeners)); + EXPECT_CALL(listener_manager_, listeners(ListenerManager::WARMING | ListenerManager::ACTIVE)) + .WillOnce(Return(existing_listeners)); EXPECT_CALL(listener_manager_, beginListenerUpdate()); EXPECT_CALL(listener_manager_, addOrUpdateListener(_, _, true)).WillOnce(Return(true)); EXPECT_CALL(listener_manager_, endListenerUpdate(_)); @@ -347,7 +352,7 @@ version_info: '1' address: tcp://0.0.0.1 port_value: 61000 filter_chains: - - filters: + - filters: )EOF"; auto response1 = TestUtility::parseYaml(response1_yaml); From 2187f1070a76a124a4d7cebe72551804b1a5218b Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 10 Aug 2020 08:56:05 -0400 Subject: [PATCH 890/909] http1: remove exceptions from H/1 codec (#11778) Commit Message: Remove all throw statements from H/1 codec This change removed all uses of C++ exceptions from H/1 codec. I modeled the flow after Yan's H/2 work (#11575). Codec status are set in uniform helper methods. This is the only change from the previous PR (#11101), besides merging newer exceptions. This change replaces all throw statements with a return of corresponding error Status and adds plumbing to return the status to codec callers. The dispatch() method returns the encountered error to the caller, which will be handled accordingly. The calls to the RequestEncoder::encodeHeaders() NOT called from dispatch() method will RELEASE_ASSERT if an error code is returned. This does not alter the existing behavior of abnormally terminating the process, just the method of termination: RELEASE_ASSERT vs uncaught exception. Risk Level: High (Codec changes) Signed-off-by: Asra Ali --- source/common/http/http1/BUILD | 2 +- source/common/http/http1/codec_impl.cc | 253 ++++++++++++++-------- source/common/http/http1/codec_impl.h | 88 +++++--- test/common/http/codec_impl_fuzz_test.cc | 2 +- test/common/http/http1/codec_impl_test.cc | 39 +++- 5 files changed, 250 insertions(+), 134 deletions(-) diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index 9451c4e29ae3..2fb4325d9810 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -55,7 +55,7 @@ envoy_cc_library( srcs = ["codec_impl.cc"], hdrs = ["codec_impl.h"], external_deps = ["http_parser"], - deps = CODEC_LIB_DEPS, + deps = CODEC_LIB_DEPS + ["//source/common/common:cleanup_lib"], ) envoy_cc_library( diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index c9cf88f569e8..4e8c54ed7db5 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -9,7 +9,9 @@ #include "envoy/http/header_map.h" #include "envoy/network/connection.h" +#include "common/common/cleanup.h" #include "common/common/enum_to_int.h" +#include "common/common/statusor.h" #include "common/common/utility.h" #include "common/grpc/common.h" #include "common/http/exception.h" @@ -275,9 +277,10 @@ void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffe outbound_responses_++; } -void ServerConnectionImpl::doFloodProtectionChecks() const { +Status ServerConnectionImpl::doFloodProtectionChecks() const { + ASSERT(dispatching_); if (!flood_protection_) { - return; + return okStatus(); } // Before processing another request, make sure that we are below the response flood protection // threshold. @@ -285,8 +288,9 @@ void ServerConnectionImpl::doFloodProtectionChecks() const { ENVOY_CONN_LOG(trace, "error accepting request: too many pending responses queued", connection_); stats_.response_flood_.inc(); - throw FrameFloodException("Too many responses queued."); + return bufferFloodError("Too many responses queued."); } + return okStatus(); } void ConnectionImpl::flushOutput(bool end_encode) { @@ -372,12 +376,14 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end const HeaderEntry* host = headers.Host(); bool is_connect = HeaderUtility::isConnect(headers); - if (!method || (!path && !is_connect)) { - // TODO(#10878): This exception does not occur during dispatch and would not be triggered under - // normal circumstances since inputs would fail parsing at ingress. Replace with proper error - // handling when exceptions are removed. Include missing host header for CONNECT. - throw CodecClientException(":method and :path must be specified"); - } + // TODO(#10878): Include missing host header for CONNECT. + // The RELEASE_ASSERT below does not change the existing behavior of `encodeHeaders`. + // The `encodeHeaders` used to throw on errors. Callers of `encodeHeaders()` do not catch + // exceptions and this would cause abnormal process termination in error cases. This change + // replaces abnormal process termination from unhandled exception with the RELEASE_ASSERT. Further + // work will replace this RELEASE_ASSERT with proper error handling. + RELEASE_ASSERT(method && (path || is_connect), ":method and :path must be specified"); + if (method->value() == Headers::get().MethodValues.Head) { head_request_ = true; } else if (method->value() == Headers::get().MethodValues.Connect) { @@ -400,34 +406,57 @@ void RequestEncoderImpl::encodeHeaders(const RequestHeaderMap& headers, bool end encodeHeadersBase(headers, absl::nullopt, end_stream); } +int ConnectionImpl::setAndCheckCallbackStatus(Status&& status) { + ASSERT(codec_status_.ok()); + codec_status_ = std::move(status); + return codec_status_.ok() ? enumToInt(HttpParserCode::Success) : enumToInt(HttpParserCode::Error); +} + +int ConnectionImpl::setAndCheckCallbackStatusOr(Envoy::StatusOr&& statusor) { + ASSERT(codec_status_.ok()); + if (statusor.ok()) { + return statusor.value(); + } else { + codec_status_ = std::move(statusor.status()); + return enumToInt(HttpParserCode::Error); + } +} + http_parser_settings ConnectionImpl::settings_{ [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageBeginBase(); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onMessageBeginBase(); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onUrl(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onUrl(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, nullptr, // on_status [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderField(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onHeaderField(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser, const char* at, size_t length) -> int { - static_cast(parser->data)->onHeaderValue(at, length); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onHeaderValue(at, length); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser) -> int { - return static_cast(parser->data)->onHeadersCompleteBase(); + auto* conn_impl = static_cast(parser->data); + auto statusor = conn_impl->onHeadersCompleteBase(); + return conn_impl->setAndCheckCallbackStatusOr(std::move(statusor)); }, [](http_parser* parser, const char* at, size_t length) -> int { static_cast(parser->data)->bufferBody(at, length); return 0; }, [](http_parser* parser) -> int { - static_cast(parser->data)->onMessageCompleteBase(); - return 0; + auto* conn_impl = static_cast(parser->data); + auto status = conn_impl->onMessageCompleteBase(); + return conn_impl->setAndCheckCallbackStatus(std::move(status)); }, [](http_parser* parser) -> int { // A 0-byte chunk header is used to signal the end of the chunked body. @@ -453,6 +482,7 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat enable_trailers_(enable_trailers), strict_1xx_and_204_headers_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.strict_1xx_and_204_response_headers")), + dispatching_(false), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, [&]() -> void { this->onAboveHighWatermark(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }), @@ -462,11 +492,12 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat parser_.data = this; } -void ConnectionImpl::completeLastHeader() { +Status ConnectionImpl::completeLastHeader() { + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "completed header: key={} value={}", connection_, current_header_field_.getStringView(), current_header_value_.getStringView()); - checkHeaderNameForUnderscores(); + RETURN_IF_ERROR(checkHeaderNameForUnderscores()); auto& headers_or_trailers = headersOrTrailers(); if (!current_header_field_.empty()) { current_header_field_.inlineTransform([](char c) { return absl::ascii_tolower(c); }); @@ -481,15 +512,16 @@ void ConnectionImpl::completeLastHeader() { // Check if the number of headers exceeds the limit. if (headers_or_trailers.size() > max_headers_count_) { error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().TooManyHeaders)); const absl::string_view header_type = processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + return codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); } header_parsing_state_ = HeaderParsingState::Field; ASSERT(current_header_field_.empty()); ASSERT(current_header_value_.empty()); + return okStatus(); } uint32_t ConnectionImpl::getHeadersSize() { @@ -497,15 +529,16 @@ uint32_t ConnectionImpl::getHeadersSize() { headersOrTrailers().byteSize(); } -void ConnectionImpl::checkMaxHeadersSize() { +Status ConnectionImpl::checkMaxHeadersSize() { const uint32_t total = getHeadersSize(); if (total > (max_headers_kb_ * 1024)) { const absl::string_view header_type = processing_trailers_ ? Http1HeaderTypes::get().Trailers : Http1HeaderTypes::get().Headers; error_code_ = Http::Code::RequestHeaderFieldsTooLarge; - sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge); - throw CodecProtocolException(absl::StrCat(header_type, " size exceeds limit")); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HeadersTooLarge)); + return codecProtocolError(absl::StrCat(header_type, " size exceeds limit")); } + return okStatus(); } bool ConnectionImpl::maybeDirectDispatch(Buffer::Instance& data) { @@ -530,8 +563,14 @@ Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ENVOY_CONN_LOG(trace, "parsing {} bytes", connection_, data.length()); + // Make sure that dispatching_ is set to false after dispatching, even when + // http_parser exits early with an error code. + Cleanup cleanup([this]() { dispatching_ = false; }); + ASSERT(!dispatching_); + ASSERT(codec_status_.ok()); ASSERT(buffered_body_.length() == 0); + dispatching_ = true; if (maybeDirectDispatch(data)) { return Http::okStatus(); } @@ -542,7 +581,11 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { ssize_t total_parsed = 0; if (data.length() > 0) { for (const Buffer::RawSlice& slice : data.getRawSlices()) { - total_parsed += dispatchSlice(static_cast(slice.mem_), slice.len_); + auto statusor_parsed = dispatchSlice(static_cast(slice.mem_), slice.len_); + if (!statusor_parsed.ok()) { + return statusor_parsed.status(); + } + total_parsed += statusor_parsed.value(); if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK) { // Parse errors trigger an exception in dispatchSlice so we are guaranteed to be paused at // this point. @@ -552,7 +595,10 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { } dispatchBufferedBody(); } else { - dispatchSlice(nullptr, 0); + auto result = dispatchSlice(nullptr, 0); + if (!result.ok()) { + return result.status(); + } } ASSERT(buffered_body_.length() == 0); @@ -565,50 +611,59 @@ Http::Status ConnectionImpl::innerDispatch(Buffer::Instance& data) { return Http::okStatus(); } -size_t ConnectionImpl::dispatchSlice(const char* slice, size_t len) { +Envoy::StatusOr ConnectionImpl::dispatchSlice(const char* slice, size_t len) { + ASSERT(codec_status_.ok() && dispatching_); ssize_t rc = http_parser_execute(&parser_, &settings_, slice, len); + if (!codec_status_.ok()) { + return codec_status_; + } if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { - sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError); - throw CodecProtocolException("http/1.1 protocol error: " + - std::string(http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().HttpCodecError)); + // Avoid overwriting the codec_status_ set in the callbacks. + ASSERT(codec_status_.ok()); + codec_status_ = codecProtocolError( + absl::StrCat("http/1.1 protocol error: ", http_errno_name(HTTP_PARSER_ERRNO(&parser_)))); + return codec_status_; } return rc; } -void ConnectionImpl::onHeaderField(const char* data, size_t length) { +Status ConnectionImpl::onHeaderField(const char* data, size_t length) { + ASSERT(dispatching_); // We previously already finished up the headers, these headers are // now trailers. if (header_parsing_state_ == HeaderParsingState::Done) { if (!enable_trailers_) { // Ignore trailers. - return; + return okStatus(); } processing_trailers_ = true; header_parsing_state_ = HeaderParsingState::Field; allocTrailers(); } if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); } current_header_field_.append(data, length); - checkMaxHeadersSize(); + return checkMaxHeadersSize(); } -void ConnectionImpl::onHeaderValue(const char* data, size_t length) { +Status ConnectionImpl::onHeaderValue(const char* data, size_t length) { + ASSERT(dispatching_); if (header_parsing_state_ == HeaderParsingState::Done && !enable_trailers_) { // Ignore trailers. - return; + return okStatus(); } absl::string_view header_value{data, length}; if (!Http::HeaderUtility::headerValueIsValid(header_value)) { ENVOY_CONN_LOG(debug, "invalid header value: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters); - throw CodecProtocolException("http/1.1 protocol error: header value contains invalid chars"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidCharacters)); + return codecProtocolError("http/1.1 protocol error: header value contains invalid chars"); } header_parsing_state_ = HeaderParsingState::Value; @@ -621,13 +676,14 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { } current_header_value_.append(header_value.data(), header_value.length()); - checkMaxHeadersSize(); + return checkMaxHeadersSize(); } -int ConnectionImpl::onHeadersCompleteBase() { +Envoy::StatusOr ConnectionImpl::onHeadersCompleteBase() { ASSERT(!processing_trailers_); + ASSERT(dispatching_); ENVOY_CONN_LOG(trace, "onHeadersCompleteBase", connection_); - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); if (!(parser_.http_major == 1 && parser_.http_minor == 1)) { // This is not necessarily true, but it's good enough since higher layers only care if this is @@ -666,8 +722,8 @@ int ConnectionImpl::onHeadersCompleteBase() { // Per https://tools.ietf.org/html/rfc7231#section-4.3.6 a payload with a // CONNECT request has no defined semantics, and may be rejected. error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed); - throw CodecProtocolException("http/1.1 protocol error: unsupported content length"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().BodyDisallowed)); + return codecProtocolError("http/1.1 protocol error: unsupported content length"); } } ENVOY_CONN_LOG(trace, "codec entering upgrade mode for CONNECT request.", connection_); @@ -683,16 +739,20 @@ int ConnectionImpl::onHeadersCompleteBase() { if (!absl::EqualsIgnoreCase(encoding, Headers::get().TransferEncodingValues.Chunked) || parser_.method == HTTP_CONNECT) { error_code_ = Http::Code::NotImplemented; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding)); + return codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); } } - int rc = onHeadersComplete(); + auto statusor = onHeadersComplete(); + if (!statusor.ok()) { + RETURN_IF_ERROR(statusor.status()); + } + header_parsing_state_ = HeaderParsingState::Done; // Returning 2 informs http_parser to not expect a body or further data on this connection. - return handling_upgrade_ ? 2 : rc; + return handling_upgrade_ ? 2 : statusor.value(); } void ConnectionImpl::bufferBody(const char* data, size_t length) { @@ -701,6 +761,7 @@ void ConnectionImpl::bufferBody(const char* data, size_t length) { void ConnectionImpl::dispatchBufferedBody() { ASSERT(HTTP_PARSER_ERRNO(&parser_) == HPE_OK || HTTP_PARSER_ERRNO(&parser_) == HPE_PAUSED); + ASSERT(codec_status_.ok()); if (buffered_body_.length() > 0) { onBody(buffered_body_); buffered_body_.drain(buffered_body_.length()); @@ -715,7 +776,7 @@ void ConnectionImpl::onChunkHeader(bool is_final_chunk) { } } -void ConnectionImpl::onMessageCompleteBase() { +Status ConnectionImpl::onMessageCompleteBase() { ENVOY_CONN_LOG(trace, "message complete", connection_); dispatchBufferedBody(); @@ -726,19 +787,20 @@ void ConnectionImpl::onMessageCompleteBase() { ASSERT(!deferred_end_stream_headers_); ENVOY_CONN_LOG(trace, "Pausing parser due to upgrade.", connection_); http_parser_pause(&parser_, 1); - return; + return okStatus(); } // If true, this indicates we were processing trailers and must // move the last header into current_header_map_ if (header_parsing_state_ == HeaderParsingState::Value) { - completeLastHeader(); + RETURN_IF_ERROR(completeLastHeader()); } onMessageComplete(); + return okStatus(); } -void ConnectionImpl::onMessageBeginBase() { +Status ConnectionImpl::onMessageBeginBase() { ENVOY_CONN_LOG(trace, "message begin", connection_); // Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets // protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable @@ -747,7 +809,7 @@ void ConnectionImpl::onMessageBeginBase() { processing_trailers_ = false; header_parsing_state_ = HeaderParsingState::Field; allocHeaders(); - onMessageBegin(); + return onMessageBegin(); } void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { @@ -795,7 +857,7 @@ void ServerConnectionImpl::onEncodeComplete() { } } -void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { +Status ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int method) { HeaderString path(Headers::get().Path); bool is_connect = (method == HTTP_CONNECT); @@ -806,7 +868,7 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me (active_request.request_url_.getStringView()[0] == '/' || ((method == HTTP_OPTIONS) && active_request.request_url_.getStringView()[0] == '*'))) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return okStatus(); } // If absolute_urls and/or connect are not going be handled, copy the url and return. @@ -815,13 +877,13 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me // Absolute URLS in CONNECT requests will be rejected below by the URL class validation. if (!codec_settings_.allow_absolute_url_ && !is_connect) { headers.addViaMove(std::move(path), std::move(active_request.request_url_)); - return; + return okStatus(); } Utility::Url absolute_url; if (!absolute_url.initialize(active_request.request_url_.getStringView(), is_connect)) { - sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl); - throw CodecProtocolException("http/1.1 protocol error: invalid url in request line"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUrl)); + return codecProtocolError("http/1.1 protocol error: invalid url in request line"); } // RFC7230#5.7 // When a proxy receives a request with an absolute-form of @@ -836,9 +898,10 @@ void ServerConnectionImpl::handlePath(RequestHeaderMap& headers, unsigned int me headers.setPath(absolute_url.pathAndQueryParams()); } active_request.request_url_.clear(); + return okStatus(); } -int ServerConnectionImpl::onHeadersComplete() { +Envoy::StatusOr ServerConnectionImpl::onHeadersComplete() { // Handle the case where response happens prior to request complete. It's up to upper layer code // to disconnect the connection but we shouldn't fire any more events since it doesn't make // sense. @@ -855,8 +918,9 @@ int ServerConnectionImpl::onHeadersComplete() { ENVOY_CONN_LOG(debug, "Invalid nominated headers in Connection: {}", connection_, header_value); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization); - throw CodecProtocolException("Invalid nominated headers in Connection."); + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().ConnectionHeaderSanitization)); + return codecProtocolError("Invalid nominated headers in Connection."); } } @@ -865,7 +929,7 @@ int ServerConnectionImpl::onHeadersComplete() { active_request.response_encoder_.setIsResponseToHeadRequest(parser_.method == HTTP_HEAD); active_request.response_encoder_.setIsResponseToConnectRequest(parser_.method == HTTP_CONNECT); - handlePath(*headers, parser_.method); + RETURN_IF_ERROR(handlePath(*headers, parser_.method)); ASSERT(active_request.request_url_.empty()); headers->setMethod(method_string); @@ -873,8 +937,8 @@ int ServerConnectionImpl::onHeadersComplete() { // Make sure the host is valid. auto details = HeaderUtility::requestHeadersValid(*headers); if (details.has_value()) { - sendProtocolError(details.value().get()); - throw CodecProtocolException( + RETURN_IF_ERROR(sendProtocolError(details.value().get())); + return codecProtocolError( "http/1.1 protocol error: request headers failed spec compliance checks"); } @@ -901,26 +965,31 @@ int ServerConnectionImpl::onHeadersComplete() { return 0; } -void ServerConnectionImpl::onMessageBegin() { +Status ServerConnectionImpl::onMessageBegin() { if (!resetStreamCalled()) { ASSERT(!active_request_.has_value()); active_request_.emplace(*this, header_key_formatter_.get()); auto& active_request = active_request_.value(); + if (resetStreamCalled()) { + return codecClientError("cannot create new streams after calling reset"); + } active_request.request_decoder_ = &callbacks_.newStream(active_request.response_encoder_); // Check for pipelined request flood as we prepare to accept a new request. // Parse errors that happen prior to onMessageBegin result in stream termination, it is not // possible to overflow output buffers with early parse errors. - doFloodProtectionChecks(); + RETURN_IF_ERROR(doFloodProtectionChecks()); } + return okStatus(); } -void ServerConnectionImpl::onUrl(const char* data, size_t length) { +Status ServerConnectionImpl::onUrl(const char* data, size_t length) { if (active_request_.has_value()) { active_request_.value().request_url_.append(data, length); - checkMaxHeadersSize(); + RETURN_IF_ERROR(checkMaxHeadersSize()); } + return okStatus(); } void ServerConnectionImpl::onBody(Buffer::Instance& data) { @@ -985,14 +1054,14 @@ void ServerConnectionImpl::sendProtocolErrorOld(absl::string_view details) { } } -void ServerConnectionImpl::sendProtocolError(absl::string_view details) { +Status ServerConnectionImpl::sendProtocolError(absl::string_view details) { if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.early_errors_via_hcm")) { sendProtocolErrorOld(details); - return; + return okStatus(); } // We do this here because we may get a protocol error before we have a logical stream. if (!active_request_.has_value()) { - onMessageBeginBase(); + RETURN_IF_ERROR(onMessageBeginBase()); } ASSERT(active_request_.has_value()); @@ -1009,8 +1078,8 @@ void ServerConnectionImpl::sendProtocolError(absl::string_view details) { active_request_->request_decoder_->sendLocalReply(is_grpc_request, error_code_, CodeUtility::toString(error_code_), nullptr, absl::nullopt, details); - return; } + return okStatus(); } void ServerConnectionImpl::onAboveHighWatermark() { @@ -1031,7 +1100,7 @@ void ServerConnectionImpl::releaseOutboundResponse( delete fragment; } -void ServerConnectionImpl::checkHeaderNameForUnderscores() { +Status ServerConnectionImpl::checkHeaderNameForUnderscores() { if (headers_with_underscores_action_ != envoy::config::core::v3::HttpProtocolOptions::ALLOW && Http::HeaderUtility::headerNameContainsUnderscore(current_header_field_.getStringView())) { if (headers_with_underscores_action_ == @@ -1045,11 +1114,12 @@ void ServerConnectionImpl::checkHeaderNameForUnderscores() { ENVOY_CONN_LOG(debug, "Rejecting request due to header name with underscores: {}", connection_, current_header_field_.getStringView()); error_code_ = Http::Code::BadRequest; - sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidUnderscore)); stats_.requests_rejected_with_underscores_in_headers_.inc(); - throw CodecProtocolException("http/1.1 protocol error: header name contains underscores"); + return codecProtocolError("http/1.1 protocol error: header name contains underscores"); } } + return okStatus(); } ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, @@ -1071,10 +1141,6 @@ bool ClientConnectionImpl::cannotHaveBody() { } RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decoder) { - if (resetStreamCalled()) { - throw CodecClientException("cannot create new streams after calling reset"); - } - // If reads were disabled due to flow control, we expect reads to always be enabled again before // reusing this connection. This is done when the response is received. ASSERT(connection_.readEnabled()); @@ -1086,14 +1152,14 @@ RequestEncoder& ClientConnectionImpl::newStream(ResponseDecoder& response_decode return pending_response_.value().encoder_; } -int ClientConnectionImpl::onHeadersComplete() { +Envoy::StatusOr ClientConnectionImpl::onHeadersComplete() { ENVOY_CONN_LOG(trace, "status_code {}", connection_, parser_.status_code); // Handle the case where the client is closing a kept alive connection (by sending a 408 // with a 'Connection: close' header). In this case we just let response flush out followed // by the remote close. if (!pending_response_.has_value() && !resetStreamCalled()) { - throw PrematureResponseException(static_cast(parser_.status_code)); + return prematureResponseError("", static_cast(parser_.status_code)); } else if (pending_response_.has_value()) { ASSERT(!pending_response_done_); auto& headers = absl::get(headers_or_trailers_); @@ -1110,23 +1176,25 @@ int ClientConnectionImpl::onHeadersComplete() { if (headers->TransferEncoding() && absl::EqualsIgnoreCase(headers->TransferEncoding()->value().getStringView(), Headers::get().TransferEncodingValues.Chunked)) { - sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding); - throw CodecProtocolException("http/1.1 protocol error: unsupported transfer encoding"); + RETURN_IF_ERROR(sendProtocolError(Http1ResponseCodeDetails::get().InvalidTransferEncoding)); + return codecProtocolError("http/1.1 protocol error: unsupported transfer encoding"); } } if (strict_1xx_and_204_headers_ && (parser_.status_code < 200 || parser_.status_code == 204)) { if (headers->TransferEncoding()) { - sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed); - throw CodecProtocolException( + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().TransferEncodingNotAllowed)); + return codecProtocolError( "http/1.1 protocol error: transfer encoding not allowed in 1xx or 204"); } if (headers->ContentLength()) { // Report a protocol error for non-zero Content-Length, but paper over zero Content-Length. if (headers->ContentLength()->value().getStringView() != "0") { - sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed); - throw CodecProtocolException( + RETURN_IF_ERROR( + sendProtocolError(Http1ResponseCodeDetails::get().ContentLengthNotAllowed)); + return codecProtocolError( "http/1.1 protocol error: content length not allowed in 1xx or 204"); } @@ -1154,8 +1222,8 @@ int ClientConnectionImpl::onHeadersComplete() { } } - // Here we deal with cases where the response cannot have a body, but http_parser does not deal - // with it for us. + // Here we deal with cases where the response cannot have a body by returning 1, but http_parser + // does not deal with it for us. return cannotHaveBody() ? 1 : 0; } @@ -1215,11 +1283,12 @@ void ClientConnectionImpl::onResetStream(StreamResetReason reason) { } } -void ClientConnectionImpl::sendProtocolError(absl::string_view details) { +Status ClientConnectionImpl::sendProtocolError(absl::string_view details) { if (pending_response_.has_value()) { ASSERT(!pending_response_done_); pending_response_.value().encoder_.setDetails(details); } + return okStatus(); } void ClientConnectionImpl::onAboveHighWatermark() { diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index c74c0adae87c..0f8b5d7de71a 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -217,13 +217,38 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable&& statusor); + + // Codec errors found in callbacks are overridden within the http_parser library. This holds those + // errors to propagate them through to dispatch() where we can handle the error. + Envoy::Http::Status codec_status_; + protected: ConnectionImpl(Network::Connection& connection, CodecStats& stats, http_parser_type type, uint32_t max_headers_kb, const uint32_t max_headers_count, HeaderKeyFormatterPtr&& header_key_formatter, bool enable_trailers); + // The following define special return values for http_parser callbacks. See: + // https://github.com/nodejs/http-parser/blob/5c5b3ac62662736de9e71640a8dc16da45b32503/http_parser.h#L72 + // These codes do not overlap with standard HTTP Status codes. They are only used for user + // callbacks. + enum class HttpParserCode { + // Callbacks other than on_headers_complete should return a non-zero int to indicate an error + // and + // halt execution. + Error = -1, + Success = 0, + // Returning '1' from on_headers_complete will tell http_parser that it should not expect a + // body. + NoBody = 1, + // Returning '2' from on_headers_complete will tell http_parser that it should not expect a body + // nor any further data on the connection. + NoBodyData = 2, + }; + bool resetStreamCalled() { return reset_stream_called_; } - void onMessageBeginBase(); + Status onMessageBeginBase(); /** * Get memory used to represent HTTP headers or trailers currently being parsed. @@ -234,10 +259,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable dispatchSlice(const char* slice, size_t len); /** * Called by the http_parser when body data is received. @@ -314,37 +341,39 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onHeadersCompleteBase(); + virtual Envoy::StatusOr onHeadersComplete() PURE; /** * Called to see if upgrade transition is allowed. @@ -365,8 +394,9 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onHeadersComplete() override; // If upgrade behavior is not allowed, the HCM will have sanitized the headers out. bool upgradeAllowed() const override { return true; } void onBody(Buffer::Instance& data) override; void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; + Status sendProtocolError(absl::string_view details) override; void onAboveHighWatermark() override; void onBelowLowWatermark() override; HeaderMap& headersOrTrailers() override { @@ -495,8 +527,8 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { void releaseOutboundResponse(const Buffer::OwnedBufferFragmentImpl* fragment); void maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) override; - void doFloodProtectionChecks() const; - void checkHeaderNameForUnderscores() override; + Status doFloodProtectionChecks() const; + Status checkHeaderNameForUnderscores() override; ServerConnectionCallbacks& callbacks_; absl::optional active_request_; @@ -545,14 +577,14 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // ConnectionImpl void onEncodeComplete() override {} - void onMessageBegin() override {} - void onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - int onHeadersComplete() override; + Status onMessageBegin() override { return okStatus(); } + Status onUrl(const char*, size_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + Envoy::StatusOr onHeadersComplete() override; bool upgradeAllowed() const override; void onBody(Buffer::Instance& data) override; void onMessageComplete() override; void onResetStream(StreamResetReason reason) override; - void sendProtocolError(absl::string_view details) override; + Status sendProtocolError(absl::string_view details) override; void onAboveHighWatermark() override; void onBelowLowWatermark() override; HeaderMap& headersOrTrailers() override { diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 9c4534a0fdc4..50b4cac3aacf 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -50,7 +50,7 @@ template <> TestRequestHeaderMapImpl fromSanitizedHeaders(const test::fuzz::Headers& headers) { return Fuzz::fromHeaders(headers, {"transfer-encoding"}, - {":authority"}); + {":authority", ":method", ":path"}); } // Convert from test proto Http1ServerSettings to Http1Settings. diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 7ecd8baf1bb0..f6da689eacd6 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -68,8 +68,10 @@ class Http1CodecTestBase { class Http1ServerConnectionImplTest : public Http1CodecTestBase, public testing::TestWithParam { public: + bool testingNewCodec() { return GetParam(); } + void initialize() { - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, headers_with_underscores_action_); @@ -135,7 +137,7 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -171,7 +173,7 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -198,7 +200,7 @@ void Http1ServerConnectionImplTest::expectTrailersTest(bool enable_trailers) { // Make a new 'codec' with the right settings if (enable_trailers) { codec_settings_.enable_trailers_ = enable_trailers; - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -240,7 +242,7 @@ void Http1ServerConnectionImplTest::testTrailersExceedLimit(std::string trailer_ initialize(); // Make a new 'codec' with the right settings codec_settings_.enable_trailers_ = enable_trailers; - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_request_headers_kb_, max_request_headers_count_, envoy::config::core::v3::HttpProtocolOptions::ALLOW); @@ -1841,8 +1843,10 @@ TEST_P(Http1ServerConnectionImplTest, WatermarkTest) { class Http1ClientConnectionImplTest : public Http1CodecTestBase, public testing::TestWithParam { public: + bool testingNewCodec() { return GetParam(); } + void initialize() { - if (GetParam()) { + if (testingNewCodec()) { codec_ = std::make_unique( connection_, http1CodecStats(), callbacks_, codec_settings_, max_response_headers_count_); } else { @@ -1852,7 +1856,7 @@ class Http1ClientConnectionImplTest : public Http1CodecTestBase, } void readDisableOnRequestEncoder(RequestEncoder* request_encoder, bool disable) { - if (GetParam()) { + if (testingNewCodec()) { dynamic_cast(request_encoder)->readDisable(disable); } else { dynamic_cast(request_encoder)->readDisable(disable); @@ -2238,12 +2242,23 @@ TEST_P(Http1ClientConnectionImplTest, BadEncodeParams) { NiceMock response_decoder; - // Need to set :method and :path + // Need to set :method and :path. + // New and legacy codecs will behave differently on errors from processing outbound data. The + // legacy codecs will throw an exception (that presently will be uncaught in contexts like + // sendLocalReply), while the new codecs temporarily RELEASE_ASSERT until Envoy handles errors on + // outgoing data. Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), - CodecClientException); - EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), - CodecClientException); + if (testingNewCodec()) { + EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), + ":method and :path must be specified"); + EXPECT_DEATH(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), + ":method and :path must be specified"); + } else { + EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}}, true), + CodecClientException); + EXPECT_THROW(request_encoder.encodeHeaders(TestRequestHeaderMapImpl{{":method", "GET"}}, true), + CodecClientException); + } } TEST_P(Http1ClientConnectionImplTest, NoContentLengthResponse) { From dc560dfaa0e7ce158eca8a2e8b5ba68246719ad2 Mon Sep 17 00:00:00 2001 From: Martin Matusiak Date: Tue, 11 Aug 2020 01:16:45 +1000 Subject: [PATCH 891/909] cleanup: remove unused forward declaration (#12515) Signed-off-by: Martin Matusiak Commit Message: cleanup: remove unused forward declaration Risk Level: Low Testing: bazel test //test/... Docs Changes: none Release Notes: n/a --- include/envoy/stats/stats.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index c40e072c9d11..c03b1d58ad0b 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -15,8 +15,6 @@ namespace Envoy { namespace Stats { -class Allocator; - /** * General interface for all stats objects. */ From 9ed8092b5eac0874ff028b22fe72d28057ff94a1 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Mon, 10 Aug 2020 10:07:45 -0700 Subject: [PATCH 892/909] build: mark virtual functions called in ctor/dtor final (#12558) Signed-off-by: Lizan Zhou Prevents undefined behavior and let clang-tidy not warn about it. --- source/common/network/connection_impl.h | 4 ++-- .../ssl/certificate_validation_context_config_impl.h | 2 +- source/common/stats/allocator_impl.cc | 2 +- source/common/stats/histogram_impl.h | 2 +- source/common/stats/scope_prefixer.h | 4 ++-- source/common/stats/thread_local_store.h | 7 ++++--- source/common/tcp_proxy/tcp_proxy.h | 2 +- source/common/upstream/upstream_impl.h | 2 +- .../common/dynamic_forward_proxy/dns_cache_impl.h | 2 +- source/extensions/filters/http/common/jwks_fetcher.cc | 2 +- source/extensions/filters/http/jwt_authn/jwks_cache.cc | 2 +- source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h | 2 +- 12 files changed, 17 insertions(+), 16 deletions(-) diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index b464e2af96d1..17ebe609a263 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -60,7 +60,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback // Network::Connection void addBytesSentCallback(BytesSentCb cb) override; void enableHalfClose(bool enabled) override; - void close(ConnectionCloseType type) override; + void close(ConnectionCloseType type) final; std::string nextProtocol() const override { return transport_socket_->protocol(); } void noDelay(bool enable) override; void readDisable(bool disable) override; @@ -132,7 +132,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback bool consumerWantsToRead(); // Network::ConnectionImplBase - void closeConnectionImmediately() override; + void closeConnectionImmediately() final; void closeSocket(ConnectionEvent close_type); diff --git a/source/common/ssl/certificate_validation_context_config_impl.h b/source/common/ssl/certificate_validation_context_config_impl.h index f054039ee1ba..1636c2ed0713 100644 --- a/source/common/ssl/certificate_validation_context_config_impl.h +++ b/source/common/ssl/certificate_validation_context_config_impl.h @@ -21,7 +21,7 @@ class CertificateValidationContextConfigImpl : public CertificateValidationConte const std::string& certificateRevocationList() const override { return certificate_revocation_list_; } - const std::string& certificateRevocationListPath() const override { + const std::string& certificateRevocationListPath() const final { return certificate_revocation_list_path_; } const std::vector& verifySubjectAltNameList() const override { diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 5e507db18522..63e3159a842e 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -63,7 +63,7 @@ template class StatsSharedImpl : public MetricImpl } // Metric - SymbolTable& symbolTable() override { return alloc_.symbolTable(); } + SymbolTable& symbolTable() final { return alloc_.symbolTable(); } bool used() const override { return flags_ & Metric::Flags::Used; } // RefcountInterface diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index a58c60fd5fc5..67c2d7d17066 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -108,7 +108,7 @@ class HistogramImpl : public HistogramImplHelper { void recordValue(uint64_t value) override { parent_.deliverHistogramToSinks(*this, value); } bool used() const override { return true; } - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + SymbolTable& symbolTable() final { return parent_.symbolTable(); } private: Unit unit_; diff --git a/source/common/stats/scope_prefixer.h b/source/common/stats/scope_prefixer.h index 4257c1dd5ddf..b6872bc98dff 100644 --- a/source/common/stats/scope_prefixer.h +++ b/source/common/stats/scope_prefixer.h @@ -49,8 +49,8 @@ class ScopePrefixer : public Scope { HistogramOptConstRef findHistogram(StatName name) const override; TextReadoutOptConstRef findTextReadout(StatName name) const override; - const SymbolTable& constSymbolTable() const override { return scope_.constSymbolTable(); } - SymbolTable& symbolTable() override { return scope_.symbolTable(); } + const SymbolTable& constSymbolTable() const final { return scope_.constSymbolTable(); } + SymbolTable& symbolTable() final { return scope_.symbolTable(); } NullGaugeImpl& nullGauge(const std::string& str) override { return scope_.nullGauge(str); } diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 23ce40e5fc15..c86844a2d38c 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -59,7 +59,7 @@ class ThreadLocalHistogramImpl : public HistogramImplHelper { void recordValue(uint64_t value) override; // Stats::Metric - SymbolTable& symbolTable() override { return symbol_table_; } + SymbolTable& symbolTable() final { return symbol_table_; } bool used() const override { return used_; } private: @@ -334,13 +334,14 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo ScopePtr createScope(const std::string& name) override { return parent_.createScope(symbolTable().toString(prefix_.statName()) + "." + name); } - const SymbolTable& constSymbolTable() const override { return parent_.constSymbolTable(); } - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + const SymbolTable& constSymbolTable() const final { return parent_.constSymbolTable(); } + SymbolTable& symbolTable() final { return parent_.symbolTable(); } Counter& counterFromString(const std::string& name) override { StatNameManagedStorage storage(name, symbolTable()); return counterFromStatName(storage.statName()); } + Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override { StatNameManagedStorage storage(name, symbolTable()); return gaugeFromStatName(storage.statName(), import_mode); diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 871be2ad16f8..8a402e8a4cd2 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -322,7 +322,7 @@ class Filter : public Network::ReadFilter, bool on_high_watermark_called_{false}; }; - virtual StreamInfo::StreamInfo& getStreamInfo(); + StreamInfo::StreamInfo& getStreamInfo(); protected: struct DownstreamCallbacks : public Network::ConnectionCallbacks { diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 999962a5b3b4..4a9e0a06468d 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -196,7 +196,7 @@ class HostImpl : public HostDescriptionImpl, } void healthFlagClear(HealthFlag flag) override { health_flags_ &= ~enumToInt(flag); } bool healthFlagGet(HealthFlag flag) const override { return health_flags_ & enumToInt(flag); } - void healthFlagSet(HealthFlag flag) override { health_flags_ |= enumToInt(flag); } + void healthFlagSet(HealthFlag flag) final { health_flags_ |= enumToInt(flag); } ActiveHealthFailureType getActiveHealthFailureType() const override { return active_health_failure_type_; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 6ba35d5a5f31..a7f1426c8be3 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -89,7 +89,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggablecancel(); ENVOY_LOG(debug, "fetch pubkey [uri = {}]: canceled", uri_->uri()); diff --git a/source/extensions/filters/http/jwt_authn/jwks_cache.cc b/source/extensions/filters/http/jwt_authn/jwks_cache.cc index a6020ad9c055..7ec91acd9806 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_cache.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_cache.cc @@ -115,7 +115,7 @@ class JwksCacheImpl : public JwksCache { return it->second; } - JwksData* findByProvider(const std::string& provider) override { + JwksData* findByProvider(const std::string& provider) final { const auto it = jwks_data_map_.find(provider); if (it == jwks_data_map_.end()) { return nullptr; diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 68a85b3699d8..90c1f345ac38 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -226,7 +226,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, } // Upstream::ClusterUpdateCallbacks - void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) override; + void onClusterAddOrUpdate(Upstream::ThreadLocalCluster& cluster) final; void onClusterRemoval(const std::string& cluster_name) override; const UdpProxyFilterConfigSharedPtr config_; From 0d74a8bd03e2f54331262b963c158e46a7f8a9fb Mon Sep 17 00:00:00 2001 From: Roelof DuToit Date: Mon, 10 Aug 2020 13:22:27 -0400 Subject: [PATCH 893/909] buffer: add method to extract front slice without copying (#12439) Add a method to Envoy::Buffer::Instance that may be used to extract the front slice of the implementation's queue (SliceDeque in the case of Buffer::OwnedImpl) without copying the actual payload. A SliceData class is defined to facilitate the extraction process. Signed-off-by: Roelof DuToit --- include/envoy/buffer/buffer.h | 25 +++ source/common/buffer/buffer_impl.cc | 28 ++++ source/common/buffer/buffer_impl.h | 45 ++++-- source/common/buffer/watermark_buffer.cc | 15 ++ source/common/buffer/watermark_buffer.h | 3 + test/common/buffer/buffer_fuzz.cc | 2 + test/common/buffer/owned_impl_test.cc | 163 ++++++++++++++++++++ test/common/buffer/watermark_buffer_test.cc | 33 ++++ 8 files changed, 305 insertions(+), 9 deletions(-) diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index aca59b31d695..6e4f52644e37 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -16,6 +16,7 @@ #include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" +#include "absl/types/span.h" namespace Envoy { namespace Buffer { @@ -55,6 +56,21 @@ class BufferFragment { virtual void done() PURE; }; +/** + * A class to facilitate extracting buffer slices from a buffer instance. + */ +class SliceData { +public: + virtual ~SliceData() = default; + + /** + * @return a mutable view of the slice data. + */ + virtual absl::Span getMutableData() PURE; +}; + +using SliceDataPtr = std::unique_ptr; + /** * A basic buffer abstraction. */ @@ -144,6 +160,15 @@ class Instance { virtual RawSliceVector getRawSlices(absl::optional max_slices = absl::nullopt) const PURE; + /** + * Transfer ownership of the front slice to the caller. Must only be called if the + * buffer is not empty otherwise the implementation will have undefined behavior. + * If the underlying slice is immutable then the implementation must create and return + * a mutable slice that has a copy of the immutable data. + * @return pointer to SliceData object that wraps the front slice + */ + virtual SliceDataPtr extractMutableFrontSlice() PURE; + /** * @return uint64_t the total length of the buffer (not necessarily contiguous in memory). */ diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc index 0ad095135e57..0b92c7a426f5 100644 --- a/source/common/buffer/buffer_impl.cc +++ b/source/common/buffer/buffer_impl.cc @@ -201,6 +201,34 @@ RawSliceVector OwnedImpl::getRawSlices(absl::optional max_slices) cons return raw_slices; } +SliceDataPtr OwnedImpl::extractMutableFrontSlice() { + RELEASE_ASSERT(length_ > 0, "Extract called on empty buffer"); + // Remove zero byte fragments from the front of the queue to ensure + // that the extracted slice has data. + while (!slices_.empty() && slices_.front()->dataSize() == 0) { + slices_.pop_front(); + } + ASSERT(!slices_.empty()); + ASSERT(slices_.front()); + auto slice = std::move(slices_.front()); + auto size = slice->dataSize(); + length_ -= size; + slices_.pop_front(); + if (!slice->isMutable()) { + // Create a mutable copy of the immutable slice data. + auto mutable_slice = OwnedSlice::create(size); + auto copy_size = mutable_slice->append(slice->data(), size); + ASSERT(copy_size == size); + // Drain trackers for the immutable slice will be called as part of the slice destructor. + return mutable_slice; + } else { + // Make sure drain trackers are called before ownership of the slice is transferred from + // the buffer to the caller. + slice->callAndClearDrainTrackers(); + return slice; + } +} + uint64_t OwnedImpl::length() const { #ifndef NDEBUG // When running in debug mode, verify that the precomputed length matches the sum diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 05e673d6b2ae..92ff88742dc1 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -31,16 +31,23 @@ namespace Buffer { * | * data() */ -class Slice { +class Slice : public SliceData { public: using Reservation = RawSlice; - virtual ~Slice() { - for (const auto& drain_tracker : drain_trackers_) { - drain_tracker(); - } + ~Slice() override { callAndClearDrainTrackers(); } + + // SliceData + absl::Span getMutableData() override { + RELEASE_ASSERT(isMutable(), "Not allowed to call getMutableData if slice is immutable"); + return {base_ + data_, reservable_ - data_}; } + /** + * @return true if the data in the slice is mutable + */ + virtual bool isMutable() const { return false; } + /** * @return a pointer to the start of the usable content. */ @@ -117,10 +124,10 @@ class Slice { * @param reservation a reservation obtained from a previous call to reserve(). * If the reservation is not from this Slice, commit() will return false. * If the caller is committing fewer bytes than provided by reserve(), it - * should change the mem_ field of the reservation before calling commit(). + * should change the len_ field of the reservation before calling commit(). * For example, if a caller reserve()s 4KB to do a nonblocking socket read, * and the read only returns two bytes, the caller should set - * reservation.mem_ = 2 and then call `commit(reservation)`. + * reservation.len_ = 2 and then call `commit(reservation)`. * @return whether the Reservation was successfully committed to the Slice. */ bool commit(const Reservation& reservation) { @@ -200,15 +207,32 @@ class Slice { return SliceRepresentation{dataSize(), reservableSize(), capacity_}; } + /** + * Move all drain trackers from the current slice to the destination slice. + */ void transferDrainTrackersTo(Slice& destination) { destination.drain_trackers_.splice(destination.drain_trackers_.end(), drain_trackers_); ASSERT(drain_trackers_.empty()); } + /** + * Add a drain tracker to the slice. + */ void addDrainTracker(std::function drain_tracker) { drain_trackers_.emplace_back(std::move(drain_tracker)); } + /** + * Call all drain trackers associated with the slice, then clear + * the drain tracker list. + */ + void callAndClearDrainTrackers() { + for (const auto& drain_tracker : drain_trackers_) { + drain_tracker(); + } + drain_trackers_.clear(); + } + protected: Slice(uint64_t data, uint64_t reservable, uint64_t capacity) : data_(data), reservable_(reservable), capacity_(capacity) {} @@ -261,6 +285,8 @@ class OwnedSlice final : public Slice, public InlineStorage { private: OwnedSlice(uint64_t size) : Slice(0, 0, size) { base_ = storage_; } + bool isMutable() const override { return true; } + /** * Compute a slice size big enough to hold a specified amount of data. * @param data_size the minimum amount of data the slice must be able to store, in bytes. @@ -539,6 +565,7 @@ class OwnedImpl : public LibEventInstance { void copyOut(size_t start, uint64_t size, void* data) const override; void drain(uint64_t size) override; RawSliceVector getRawSlices(absl::optional max_slices = absl::nullopt) const override; + SliceDataPtr extractMutableFrontSlice() override; uint64_t length() const override; void* linearize(uint32_t size) override; void move(Instance& rhs) override; @@ -558,13 +585,13 @@ class OwnedImpl : public LibEventInstance { * @param data start of the content to copy. * */ - void appendSliceForTest(const void* data, uint64_t size); + virtual void appendSliceForTest(const void* data, uint64_t size); /** * Create a new slice at the end of the buffer, and copy the supplied string into it. * @param data the string to append to the buffer. */ - void appendSliceForTest(absl::string_view data); + virtual void appendSliceForTest(absl::string_view data); /** * Describe the in-memory representation of the slices in the buffer. For use diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index e3537ffe7943..9d566be1965d 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -51,6 +51,12 @@ void WatermarkBuffer::move(Instance& rhs, uint64_t length) { checkHighAndOverflowWatermarks(); } +SliceDataPtr WatermarkBuffer::extractMutableFrontSlice() { + auto result = OwnedImpl::extractMutableFrontSlice(); + checkLowWatermark(); + return result; +} + Api::IoCallUint64Result WatermarkBuffer::read(Network::IoHandle& io_handle, uint64_t max_length) { Api::IoCallUint64Result result = OwnedImpl::read(io_handle, max_length); checkHighAndOverflowWatermarks(); @@ -69,6 +75,15 @@ Api::IoCallUint64Result WatermarkBuffer::write(Network::IoHandle& io_handle) { return result; } +void WatermarkBuffer::appendSliceForTest(const void* data, uint64_t size) { + OwnedImpl::appendSliceForTest(data, size); + checkHighAndOverflowWatermarks(); +} + +void WatermarkBuffer::appendSliceForTest(absl::string_view data) { + appendSliceForTest(data.data(), data.size()); +} + void WatermarkBuffer::setWatermarks(uint32_t low_watermark, uint32_t high_watermark) { ASSERT(low_watermark < high_watermark || (high_watermark == 0 && low_watermark == 0)); uint32_t overflow_watermark_multiplier = diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 127069307902..de44822a56ab 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -34,10 +34,13 @@ class WatermarkBuffer : public OwnedImpl { void drain(uint64_t size) override; void move(Instance& rhs) override; void move(Instance& rhs, uint64_t length) override; + SliceDataPtr extractMutableFrontSlice() override; Api::IoCallUint64Result read(Network::IoHandle& io_handle, uint64_t max_length) override; uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override; Api::IoCallUint64Result write(Network::IoHandle& io_handle) override; void postProcess() override { checkLowWatermark(); } + void appendSliceForTest(const void* data, uint64_t size) override; + void appendSliceForTest(absl::string_view data) override; void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); } void setWatermarks(uint32_t low_watermark, uint32_t high_watermark); diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 9c80f4655b09..5ab1bd85c4ae 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -133,6 +133,8 @@ class StringBuffer : public Buffer::Instance { return mutableStart(); } + Buffer::SliceDataPtr extractMutableFrontSlice() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + void move(Buffer::Instance& rhs) override { move(rhs, rhs.length()); } void move(Buffer::Instance& rhs, uint64_t length) override { diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 42246acb357a..ce7ec99e3847 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -347,6 +347,169 @@ TEST_F(OwnedImplTest, Read) { EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); } +TEST_F(OwnedImplTest, ExtractOwnedSlice) { + // Create a buffer with two owned slices. + Buffer::OwnedImpl buffer; + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length0 = 5; + buffer.appendSliceForTest("123"); + const uint64_t expected_length1 = 3; + EXPECT_EQ(buffer.toString(), "abcde123"); + RawSliceVector slices = buffer.getRawSlices(); + EXPECT_EQ(2, slices.size()); + + // Extract first slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0); + EXPECT_EQ("abcde", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "123"); + + // Modify and re-add extracted first slice data to the end of the buffer. + auto slice_mutable_data = slice->getMutableData(); + ASSERT_NE(slice_mutable_data.data(), nullptr); + EXPECT_EQ(slice_mutable_data.size(), expected_length0); + *slice_mutable_data.data() = 'A'; + buffer.appendSliceForTest(slice_mutable_data.data(), slice_mutable_data.size()); + EXPECT_EQ(buffer.toString(), "123Abcde"); + + // Extract second slice, leaving only the original first slice. + slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length1); + EXPECT_EQ("123", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "Abcde"); +} + +TEST_F(OwnedImplTest, ExtractAfterSentinelDiscard) { + // Create a buffer with a sentinel and one owned slice. + Buffer::OwnedImpl buffer; + bool sentinel_discarded = false; + const Buffer::OwnedBufferFragmentImpl::Releasor sentinel_releasor{ + [&](const Buffer::OwnedBufferFragmentImpl* sentinel) { + sentinel_discarded = true; + delete sentinel; + }}; + auto sentinel = + Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), sentinel_releasor); + buffer.addBufferFragment(*sentinel.release()); + + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length = 5; + EXPECT_EQ(buffer.toString(), "abcde"); + RawSliceVector slices = buffer.getRawSlices(); // only returns slices with data + EXPECT_EQ(1, slices.size()); + + // Extract owned slice after discarding sentinel. + EXPECT_FALSE(sentinel_discarded); + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + EXPECT_TRUE(sentinel_discarded); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length); + EXPECT_EQ("abcde", + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(0, buffer.length()); +} + +TEST_F(OwnedImplTest, DrainThenExtractOwnedSlice) { + // Create a buffer with two owned slices. + Buffer::OwnedImpl buffer; + buffer.appendSliceForTest("abcde"); + const uint64_t expected_length0 = 5; + buffer.appendSliceForTest("123"); + EXPECT_EQ(buffer.toString(), "abcde123"); + RawSliceVector slices = buffer.getRawSlices(); + EXPECT_EQ(2, slices.size()); + + // Partially drain the first slice. + const uint64_t partial_drain_size = 2; + buffer.drain(partial_drain_size); + EXPECT_EQ(buffer.toString(), static_cast("abcde123") + partial_drain_size); + + // Extracted partially drained first slice, leaving the second slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size); + EXPECT_EQ(static_cast("abcde") + partial_drain_size, + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(buffer.toString(), "123"); +} + +TEST_F(OwnedImplTest, ExtractUnownedSlice) { + // Create a buffer with an unowned slice. + std::string input{"unowned test slice"}; + const size_t expected_length0 = input.size(); + auto frag = OwnedBufferFragmentImpl::create( + {input.c_str(), expected_length0}, + [this](const OwnedBufferFragmentImpl*) { release_callback_called_ = true; }); + Buffer::OwnedImpl buffer; + buffer.addBufferFragment(*frag); + + bool drain_tracker_called{false}; + buffer.addDrainTracker([&] { drain_tracker_called = true; }); + + // Add an owned slice to the end of the buffer. + EXPECT_EQ(expected_length0, buffer.length()); + std::string owned_slice_content{"another slice, but owned"}; + buffer.add(owned_slice_content); + const uint64_t expected_length1 = owned_slice_content.length(); + + // Partially drain the unowned slice. + const uint64_t partial_drain_size = 5; + buffer.drain(partial_drain_size); + EXPECT_EQ(expected_length0 - partial_drain_size + expected_length1, buffer.length()); + EXPECT_FALSE(release_callback_called_); + EXPECT_FALSE(drain_tracker_called); + + // Extract what remains of the unowned slice, leaving only the owned slice. + auto slice = buffer.extractMutableFrontSlice(); + ASSERT_TRUE(slice); + EXPECT_TRUE(drain_tracker_called); + auto slice_data = slice->getMutableData(); + ASSERT_NE(slice_data.data(), nullptr); + EXPECT_EQ(slice_data.size(), expected_length0 - partial_drain_size); + EXPECT_EQ(input.data() + partial_drain_size, + absl::string_view(reinterpret_cast(slice_data.data()), slice_data.size())); + EXPECT_EQ(expected_length1, buffer.length()); + + // The underlying immutable unowned slice was discarded during the extract + // operation and replaced with a mutable copy. The drain trackers were + // called as part of the extract, implying that the release callback was called. + EXPECT_TRUE(release_callback_called_); +} + +TEST_F(OwnedImplTest, ExtractWithDrainTracker) { + testing::InSequence s; + + Buffer::OwnedImpl buffer; + buffer.add("a"); + + testing::MockFunction tracker1; + testing::MockFunction tracker2; + buffer.addDrainTracker(tracker1.AsStdFunction()); + buffer.addDrainTracker(tracker2.AsStdFunction()); + + testing::MockFunction done; + EXPECT_CALL(tracker1, Call()); + EXPECT_CALL(tracker2, Call()); + EXPECT_CALL(done, Call()); + auto slice = buffer.extractMutableFrontSlice(); + // The test now has ownership of the slice, but the drain trackers were + // called as part of the extract operation + done.Call(); + slice.reset(); +} + TEST_F(OwnedImplTest, DrainTracking) { testing::InSequence s; diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 476967254f35..3e7cf0b57eed 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -142,6 +142,7 @@ TEST_F(WatermarkBufferTest, Drain) { buffer_.add(TEN_BYTES, 11); buffer_.drain(5); EXPECT_EQ(6, buffer_.length()); + EXPECT_EQ(1, times_high_watermark_called_); EXPECT_EQ(0, times_low_watermark_called_); // Now drain below. @@ -153,6 +154,38 @@ TEST_F(WatermarkBufferTest, Drain) { EXPECT_EQ(2, times_high_watermark_called_); } +TEST_F(WatermarkBufferTest, DrainUsingExtract) { + // Similar to `Drain` test, but using extractMutableFrontSlice() instead of drain(). + buffer_.add(TEN_BYTES, 10); + ASSERT_EQ(buffer_.length(), 10); + buffer_.extractMutableFrontSlice(); + EXPECT_EQ(0, times_high_watermark_called_); + EXPECT_EQ(0, times_low_watermark_called_); + + // Go above the high watermark then drain down to just at the low watermark. + buffer_.appendSliceForTest(TEN_BYTES, 5); + buffer_.appendSliceForTest(TEN_BYTES, 1); + buffer_.appendSliceForTest(TEN_BYTES, 5); + EXPECT_EQ(1, times_high_watermark_called_); + EXPECT_EQ(0, times_low_watermark_called_); + auto slice0 = buffer_.extractMutableFrontSlice(); // essentially drain(5) + ASSERT_TRUE(slice0); + EXPECT_EQ(slice0->getMutableData().size(), 5); + EXPECT_EQ(6, buffer_.length()); + EXPECT_EQ(0, times_low_watermark_called_); + + // Now drain below. + auto slice1 = buffer_.extractMutableFrontSlice(); // essentially drain(1) + ASSERT_TRUE(slice1); + EXPECT_EQ(slice1->getMutableData().size(), 1); + EXPECT_EQ(1, times_high_watermark_called_); + EXPECT_EQ(1, times_low_watermark_called_); + + // Going back above should trigger the high again. + buffer_.add(TEN_BYTES, 10); + EXPECT_EQ(2, times_high_watermark_called_); +} + // Verify that low watermark callback is called on drain in the case where the // high watermark is non-zero and low watermark is 0. TEST_F(WatermarkBufferTest, DrainWithLowWatermarkOfZero) { From 520389e677cdcd4a85df769deb40f6cdd2f4f6f8 Mon Sep 17 00:00:00 2001 From: Yangmin Zhu Date: Mon, 10 Aug 2020 10:29:44 -0700 Subject: [PATCH 894/909] tap: factor out the TAP filter matcher for later reuse in other filters (#12429) This is the 1st PR for #11832 that factors out the TAP filter matcher to prepare for reuse in other filters. Signed-off-by: Yangmin Zhu --- CODEOWNERS | 2 + api/BUILD | 1 + api/envoy/config/common/matcher/v3/BUILD | 12 ++ .../config/common/matcher/v3/matcher.proto | 100 +++++++++++++++ api/envoy/config/common/matcher/v4alpha/BUILD | 13 ++ .../common/matcher/v4alpha/matcher.proto | 114 ++++++++++++++++++ api/envoy/config/tap/v3/BUILD | 1 + api/envoy/config/tap/v3/common.proto | 13 +- api/envoy/config/tap/v4alpha/BUILD | 1 + api/envoy/config/tap/v4alpha/common.proto | 10 +- api/versioning/BUILD | 1 + docs/root/api-v3/config/common/common.rst | 1 + docs/root/version_history/current.rst | 2 + .../envoy/config/common/matcher/v3/BUILD | 12 ++ .../config/common/matcher/v3/matcher.proto | 100 +++++++++++++++ .../envoy/config/common/matcher/v4alpha/BUILD | 13 ++ .../common/matcher/v4alpha/matcher.proto | 114 ++++++++++++++++++ .../envoy/config/tap/v3/BUILD | 1 + .../envoy/config/tap/v3/common.proto | 13 +- .../envoy/config/tap/v4alpha/BUILD | 1 + .../envoy/config/tap/v4alpha/common.proto | 13 +- source/extensions/common/matcher/BUILD | 21 ++++ .../tap_matcher.cc => matcher/matcher.cc} | 52 ++++---- .../{tap/tap_matcher.h => matcher/matcher.h} | 34 ++---- source/extensions/common/tap/BUILD | 16 +-- source/extensions/common/tap/admin.h | 1 - source/extensions/common/tap/tap.h | 4 +- .../extensions/common/tap/tap_config_base.cc | 21 +++- .../extensions/common/tap/tap_config_base.h | 5 +- test/extensions/common/matcher/BUILD | 19 +++ .../matcher_test.cc} | 24 ++-- test/extensions/common/tap/BUILD | 10 -- test/extensions/common/tap/admin_test.cc | 2 +- test/extensions/filters/http/tap/BUILD | 1 + .../http/tap/tap_filter_integration_test.cc | 99 +++++++++++---- .../filters/http/tap/tap_filter_test.cc | 25 +++- .../tls/integration/ssl_integration_test.cc | 6 +- 37 files changed, 756 insertions(+), 122 deletions(-) create mode 100644 api/envoy/config/common/matcher/v3/BUILD create mode 100644 api/envoy/config/common/matcher/v3/matcher.proto create mode 100644 api/envoy/config/common/matcher/v4alpha/BUILD create mode 100644 api/envoy/config/common/matcher/v4alpha/matcher.proto create mode 100644 generated_api_shadow/envoy/config/common/matcher/v3/BUILD create mode 100644 generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto create mode 100644 generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD create mode 100644 generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto create mode 100644 source/extensions/common/matcher/BUILD rename source/extensions/common/{tap/tap_matcher.cc => matcher/matcher.cc} (87%) rename source/extensions/common/{tap/tap_matcher.h => matcher/matcher.h} (94%) create mode 100644 test/extensions/common/matcher/BUILD rename test/extensions/common/{tap/tap_matcher_test.cc => matcher/matcher_test.cc} (97%) diff --git a/CODEOWNERS b/CODEOWNERS index 8b206bb0f1c7..3c6ccecfac91 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -80,6 +80,8 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan # webassembly common extension /*/extensions/common/wasm @jplevyak @PiotrSikora @lizan +# common matcher +/*/extensions/common/matcher @mattklein123 @yangminzhu # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste /*/extensions/common/proxy_protocol @alyssawilk @wez470 diff --git a/api/BUILD b/api/BUILD index 3ac2738ebc3e..99bd1b119c62 100644 --- a/api/BUILD +++ b/api/BUILD @@ -130,6 +130,7 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", diff --git a/api/envoy/config/common/matcher/v3/BUILD b/api/envoy/config/common/matcher/v3/BUILD new file mode 100644 index 000000000000..c312b8eb6a61 --- /dev/null +++ b/api/envoy/config/common/matcher/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/common/matcher/v3/matcher.proto b/api/envoy/config/common/matcher/v3/matcher.proto new file mode 100644 index 000000000000..d0955e7a1f8c --- /dev/null +++ b/api/envoy/config/common/matcher/v3/matcher.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v3; + +import "envoy/config/route/v3/route_components.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + // HTTP headers to match. + repeated route.v3.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/common/matcher/v4alpha/BUILD b/api/envoy/config/common/matcher/v4alpha/BUILD new file mode 100644 index 000000000000..7028ce1a2aea --- /dev/null +++ b/api/envoy/config/common/matcher/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/config/common/matcher/v4alpha/matcher.proto b/api/envoy/config/common/matcher/v4alpha/matcher.proto new file mode 100644 index 000000000000..685ae03a1878 --- /dev/null +++ b/api/envoy/config/common/matcher/v4alpha/matcher.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/api/envoy/config/tap/v3/BUILD b/api/envoy/config/tap/v3/BUILD index f266efc592a2..6fd3142264d9 100644 --- a/api/envoy/config/tap/v3/BUILD +++ b/api/envoy/config/tap/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/service/tap/v2alpha:pkg", diff --git a/api/envoy/config/tap/v3/common.proto b/api/envoy/config/tap/v3/common.proto index 81de393e0581..42783115f871 100644 --- a/api/envoy/config/tap/v3/common.proto +++ b/api/envoy/config/tap/v3/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v3; +import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -28,7 +29,17 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v3.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. diff --git a/api/envoy/config/tap/v4alpha/BUILD b/api/envoy/config/tap/v4alpha/BUILD index cb06389f0186..be8b1c3a17e3 100644 --- a/api/envoy/config/tap/v4alpha/BUILD +++ b/api/envoy/config/tap/v4alpha/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/tap/v3:pkg", diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto index 5ce87d5b5770..8366187fd1bf 100644 --- a/api/envoy/config/tap/v4alpha/common.proto +++ b/api/envoy/config/tap/v4alpha/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v4alpha; +import "envoy/config/common/matcher/v4alpha/matcher.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/route/v4alpha/route_components.proto"; @@ -25,9 +26,16 @@ message TapConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; + reserved 1; + + reserved "match_config"; + // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v4alpha.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 950594d7213e..e0a67d2f3cb1 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -13,6 +13,7 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", diff --git a/docs/root/api-v3/config/common/common.rst b/docs/root/api-v3/config/common/common.rst index 5739dffe3676..bb6965a5f149 100644 --- a/docs/root/api-v3/config/common/common.rst +++ b/docs/root/api-v3/config/common/common.rst @@ -5,5 +5,6 @@ Common :glob: :maxdepth: 2 + matcher/v3/* ../../extensions/common/dynamic_forward_proxy/v3/* ../../extensions/common/tap/v3/* diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 01a2b7235bee..f5b6217a1757 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -76,3 +76,5 @@ Deprecated ---------- * The :ref:`track_timeout_budgets ` field has been deprecated in favor of `timeout_budgets` part of an :ref:`Optional Configuration `. +* tap: the :ref:`match_config ` field has been deprecated in favor of + :ref:`match ` field. diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD new file mode 100644 index 000000000000..c312b8eb6a61 --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/route/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto new file mode 100644 index 000000000000..d0955e7a1f8c --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v3; + +import "envoy/config/route/v3/route_components.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + // HTTP headers to match. + repeated route.v3.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + message GenericTextMatch { + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD new file mode 100644 index 000000000000..7028ce1a2aea --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/common/matcher/v3:pkg", + "//envoy/config/route/v4alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto new file mode 100644 index 000000000000..685ae03a1878 --- /dev/null +++ b/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package envoy.config.common.matcher.v4alpha; + +import "envoy/config/route/v4alpha/route_components.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; + +// [#protodoc-title: Unified Matcher API] + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +message MatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + MatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // HTTP request headers match configuration. + HttpHeadersMatch http_request_headers_match = 5; + + // HTTP request trailers match configuration. + HttpHeadersMatch http_request_trailers_match = 6; + + // HTTP response headers match configuration. + HttpHeadersMatch http_response_headers_match = 7; + + // HTTP response trailers match configuration. + HttpHeadersMatch http_response_trailers_match = 8; + + // HTTP request generic body match configuration. + HttpGenericBodyMatch http_request_generic_body_match = 9; + + // HTTP response generic body match configuration. + HttpGenericBodyMatch http_response_generic_body_match = 10; + } +} + +// HTTP headers match configuration. +message HttpHeadersMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpHeadersMatch"; + + // HTTP headers to match. + repeated route.v4alpha.HeaderMatcher headers = 1; +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +message HttpGenericBodyMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; + + message GenericTextMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; + + oneof rule { + option (validate.required) = true; + + // Text string to be located in HTTP body. + string string_match = 1; + + // Sequence of bytes to be located in HTTP body. + bytes binary_match = 2; + } + } + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + uint32 bytes_limit = 1; + + // List of patterns to match. + repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/generated_api_shadow/envoy/config/tap/v3/BUILD b/generated_api_shadow/envoy/config/tap/v3/BUILD index f266efc592a2..6fd3142264d9 100644 --- a/generated_api_shadow/envoy/config/tap/v3/BUILD +++ b/generated_api_shadow/envoy/config/tap/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/service/tap/v2alpha:pkg", diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto index 81de393e0581..42783115f871 100644 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ b/generated_api_shadow/envoy/config/tap/v3/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v3; +import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/route/v3/route_components.proto"; @@ -28,7 +29,17 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v3.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD index cb06389f0186..be8b1c3a17e3 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD +++ b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/config/common/matcher/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/tap/v3:pkg", diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto index 5ce87d5b5770..d18ba1db94c1 100644 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.tap.v4alpha; +import "envoy/config/common/matcher/v4alpha/matcher.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/route/v4alpha/route_components.proto"; @@ -27,7 +28,17 @@ message TapConfig { // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + MatchPredicate hidden_envoy_deprecated_match_config = 1 [deprecated = true]; + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + common.matcher.v4alpha.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. diff --git a/source/extensions/common/matcher/BUILD b/source/extensions/common/matcher/BUILD new file mode 100644 index 000000000000..2ad3f963048a --- /dev/null +++ b/source/extensions/common/matcher/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "matcher_lib", + srcs = ["matcher.cc"], + hdrs = ["matcher.h"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/http:header_utility_lib", + "@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/common/tap/tap_matcher.cc b/source/extensions/common/matcher/matcher.cc similarity index 87% rename from source/extensions/common/tap/tap_matcher.cc rename to source/extensions/common/matcher/matcher.cc index 71c270432563..8040b4650bca 100644 --- a/source/extensions/common/tap/tap_matcher.cc +++ b/source/extensions/common/matcher/matcher.cc @@ -1,60 +1,58 @@ -#include "extensions/common/tap/tap_matcher.h" - -#include "envoy/config/tap/v3/common.pb.h" +#include "extensions/common/matcher/matcher.h" #include "common/common/assert.h" namespace Envoy { namespace Extensions { namespace Common { -namespace Tap { +namespace Matcher { -void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, +void buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config, std::vector& matchers) { // In order to store indexes and build our matcher tree inline, we must reserve a slot where // the matcher we are about to create will go. This allows us to know its future index and still // construct more of the tree in each called constructor (e.g., multiple OR/AND conditions). - // Once fully constructed, we move the matcher into its position below. See the tap matcher - // overview in tap.h for more information. + // Once fully constructed, we move the matcher into its position below. See the matcher + // overview in matcher.h for more information. matchers.emplace_back(nullptr); MatcherPtr new_matcher; switch (match_config.rule_case()) { - case envoy::config::tap::v3::MatchPredicate::RuleCase::kOrMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kOrMatch: new_matcher = std::make_unique(match_config.or_match(), matchers, SetLogicMatcher::Type::Or); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kAndMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAndMatch: new_matcher = std::make_unique(match_config.and_match(), matchers, SetLogicMatcher::Type::And); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kNotMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kNotMatch: new_matcher = std::make_unique(match_config.not_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kAnyMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kAnyMatch: new_matcher = std::make_unique(matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestHeadersMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestHeadersMatch: new_matcher = std::make_unique( match_config.http_request_headers_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestTrailersMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestTrailersMatch: new_matcher = std::make_unique( match_config.http_request_trailers_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseHeadersMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseHeadersMatch: new_matcher = std::make_unique( match_config.http_response_headers_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseTrailersMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseTrailersMatch: new_matcher = std::make_unique( match_config.http_response_trailers_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpRequestGenericBodyMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpRequestGenericBodyMatch: new_matcher = std::make_unique( match_config.http_request_generic_body_match(), matchers); break; - case envoy::config::tap::v3::MatchPredicate::RuleCase::kHttpResponseGenericBodyMatch: + case envoy::config::common::matcher::v3::MatchPredicate::RuleCase::kHttpResponseGenericBodyMatch: new_matcher = std::make_unique( match_config.http_response_generic_body_match(), matchers); break; @@ -66,8 +64,9 @@ void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, matchers[new_matcher->index()] = std::move(new_matcher); } -SetLogicMatcher::SetLogicMatcher(const envoy::config::tap::v3::MatchPredicate::MatchSet& configs, - std::vector& matchers, Type type) +SetLogicMatcher::SetLogicMatcher( + const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs, + std::vector& matchers, Type type) : LogicMatcherBase(matchers), matchers_(matchers), type_(type) { for (const auto& config : configs.rules()) { indexes_.push_back(matchers_.size()); @@ -100,7 +99,7 @@ void SetLogicMatcher::updateLocalStatus(MatchStatusVector& statuses, [&statuses](size_t index) { return statuses[index].might_change_status_; }); } -NotMatcher::NotMatcher(const envoy::config::tap::v3::MatchPredicate& config, +NotMatcher::NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config, std::vector& matchers) : LogicMatcherBase(matchers), matchers_(matchers), not_index_(matchers.size()) { buildMatcher(config, matchers); @@ -117,8 +116,9 @@ void NotMatcher::updateLocalStatus(MatchStatusVector& statuses, statuses[my_index_].might_change_status_ = statuses[not_index_].might_change_status_; } -HttpHeaderMatcherBase::HttpHeaderMatcherBase(const envoy::config::tap::v3::HttpHeadersMatch& config, - const std::vector& matchers) +HttpHeaderMatcherBase::HttpHeaderMatcherBase( + const envoy::config::common::matcher::v3::HttpHeadersMatch& config, + const std::vector& matchers) : SimpleMatcher(matchers), headers_to_match_(Http::HeaderUtility::buildHeaderDataVector(config.headers())) {} @@ -134,18 +134,18 @@ void HttpHeaderMatcherBase::matchHeaders(const Http::HeaderMap& headers, // HTTP body may be passed to the matcher in chunks. The search logic buffers // only as many bytes as is the length of the longest pattern to be found. HttpGenericBodyMatcher::HttpGenericBodyMatcher( - const envoy::config::tap::v3::HttpGenericBodyMatch& config, + const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config, const std::vector& matchers) : HttpBodyMatcherBase(matchers) { patterns_ = std::make_shared>(); for (const auto& i : config.patterns()) { switch (i.rule_case()) { // For binary match 'i' contains sequence of bytes to locate in the body. - case envoy::config::tap::v3::HttpGenericBodyMatch::GenericTextMatch::kBinaryMatch: { + case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kBinaryMatch: { patterns_->push_back(i.binary_match()); } break; // For string match 'i' contains exact string to locate in the body. - case envoy::config::tap::v3::HttpGenericBodyMatch::GenericTextMatch::kStringMatch: + case envoy::config::common::matcher::v3::HttpGenericBodyMatch::GenericTextMatch::kStringMatch: patterns_->push_back(i.string_match()); break; default: @@ -329,7 +329,7 @@ void HttpGenericBodyMatcher::resizeOverlapBuffer(HttpGenericBodyMatcherCtx* ctx) } } -} // namespace Tap +} // namespace Matcher } // namespace Common } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/common/tap/tap_matcher.h b/source/extensions/common/matcher/matcher.h similarity index 94% rename from source/extensions/common/tap/tap_matcher.h rename to source/extensions/common/matcher/matcher.h index 79705e3fe924..4eecd25d3786 100644 --- a/source/extensions/common/tap/tap_matcher.h +++ b/source/extensions/common/matcher/matcher.h @@ -1,15 +1,14 @@ #pragma once -#include "envoy/config/tap/v3/common.pb.h" +#include "envoy/config/common/matcher/v3/matcher.pb.h" #include "common/buffer/buffer_impl.h" -#include "common/common/matchers.h" #include "common/http/header_utility.h" namespace Envoy { namespace Extensions { namespace Common { -namespace Tap { +namespace Matcher { class Matcher; using MatcherPtr = std::unique_ptr; @@ -27,9 +26,9 @@ class MatcherCtx { }; /** - * Base class for all tap matchers. + * Base class for all matchers. * - * A high level note on the design of tap matching which is different from other matching in Envoy + * A high level note on the design of matching which is different from other matching in Envoy * due to a requirement to support streaming matching (match as new data arrives versus * calculating the match given all available data at once). * - The matching system is composed of a constant matching configuration. This is essentially @@ -66,7 +65,7 @@ class Matcher { Matcher(const std::vector& matchers) // NOTE: This code assumes that the index for the matcher being constructed has already been // allocated, which is why my_index_ is set to size() - 1. See buildMatcher() in - // tap_matcher.cc. + // matcher.cc. : my_index_(matchers.size() - 1) {} virtual ~Matcher() = default; @@ -150,9 +149,9 @@ class Matcher { /** * Factory method to build a matcher given a match config. Calling this function may end * up recursively building many matchers, which will all be added to the passed in vector - * of matchers. See the comments in tap.h for the general structure of how tap matchers work. + * of matchers. See the comments in matcher.h for the general structure of how matchers work. */ -void buildMatcher(const envoy::config::tap::v3::MatchPredicate& match_config, +void buildMatcher(const envoy::config::common::matcher::v3::MatchPredicate& match_config, std::vector& matchers); /** @@ -162,7 +161,6 @@ class LogicMatcherBase : public Matcher { public: using Matcher::Matcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector& statuses) const override { updateLocalStatus(statuses, [](Matcher& m, MatchStatusVector& statuses) { m.onNewStream(statuses); }); @@ -215,7 +213,7 @@ class SetLogicMatcher : public LogicMatcherBase { public: enum class Type { And, Or }; - SetLogicMatcher(const envoy::config::tap::v3::MatchPredicate::MatchSet& configs, + SetLogicMatcher(const envoy::config::common::matcher::v3::MatchPredicate::MatchSet& configs, std::vector& matchers, Type type); private: @@ -231,7 +229,7 @@ class SetLogicMatcher : public LogicMatcherBase { */ class NotMatcher : public LogicMatcherBase { public: - NotMatcher(const envoy::config::tap::v3::MatchPredicate& config, + NotMatcher(const envoy::config::common::matcher::v3::MatchPredicate& config, std::vector& matchers); private: @@ -249,7 +247,6 @@ class SimpleMatcher : public Matcher { public: using Matcher::Matcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector&) const override {} void onHttpRequestHeaders(const Http::RequestHeaderMap&, MatchStatusVector&) const override {} void onHttpRequestTrailers(const Http::RequestTrailerMap&, MatchStatusVector&) const override {} @@ -266,7 +263,6 @@ class AnyMatcher : public SimpleMatcher { public: using SimpleMatcher::SimpleMatcher; - // Extensions::Common::Tap::Matcher void onNewStream(MatchStatusVector& statuses) const override { statuses[my_index_].matches_ = true; statuses[my_index_].might_change_status_ = false; @@ -278,7 +274,7 @@ class AnyMatcher : public SimpleMatcher { */ class HttpHeaderMatcherBase : public SimpleMatcher { public: - HttpHeaderMatcherBase(const envoy::config::tap::v3::HttpHeadersMatch& config, + HttpHeaderMatcherBase(const envoy::config::common::matcher::v3::HttpHeadersMatch& config, const std::vector& matchers); protected: @@ -294,7 +290,6 @@ class HttpRequestHeadersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpRequestHeaders(const Http::RequestHeaderMap& request_headers, MatchStatusVector& statuses) const override { matchHeaders(request_headers, statuses); @@ -308,7 +303,6 @@ class HttpRequestTrailersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpRequestTrailers(const Http::RequestTrailerMap& request_trailers, MatchStatusVector& statuses) const override { matchHeaders(request_trailers, statuses); @@ -322,7 +316,6 @@ class HttpResponseHeadersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpResponseHeaders(const Http::ResponseHeaderMap& response_headers, MatchStatusVector& statuses) const override { matchHeaders(response_headers, statuses); @@ -336,7 +329,6 @@ class HttpResponseTrailersMatcher : public HttpHeaderMatcherBase { public: using HttpHeaderMatcherBase::HttpHeaderMatcherBase; - // Extensions::Common::Tap::Matcher void onHttpResponseTrailers(const Http::ResponseTrailerMap& response_trailers, MatchStatusVector& statuses) const override { matchHeaders(response_trailers, statuses); @@ -404,7 +396,7 @@ class HttpGenericBodyMatcherCtx : public MatcherCtx { class HttpGenericBodyMatcher : public HttpBodyMatcherBase { public: - HttpGenericBodyMatcher(const envoy::config::tap::v3::HttpGenericBodyMatch& config, + HttpGenericBodyMatcher(const envoy::config::common::matcher::v3::HttpGenericBodyMatch& config, const std::vector& matchers); protected: @@ -425,7 +417,7 @@ class HttpGenericBodyMatcher : public HttpBodyMatcherBase { private: // The following fields are initialized based on matcher config and are used - // by all HTTP tappers. + // by all HTTP matchers. // List of strings which body must contain to get match. std::shared_ptr> patterns_; // Stores the length of the longest pattern. @@ -450,7 +442,7 @@ class HttpResponseGenericBodyMatcher : public HttpGenericBodyMatcher { } }; -} // namespace Tap +} // namespace Matcher } // namespace Common } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/common/tap/BUILD b/source/extensions/common/tap/BUILD index 8cf381c67dee..e127bf3aaa19 100644 --- a/source/extensions/common/tap/BUILD +++ b/source/extensions/common/tap/BUILD @@ -12,8 +12,8 @@ envoy_cc_library( name = "tap_interface", hdrs = ["tap.h"], deps = [ - ":tap_matcher", "//include/envoy/http:header_map_interface", + "//source/extensions/common/matcher:matcher_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], @@ -25,26 +25,14 @@ envoy_cc_library( hdrs = ["tap_config_base.h"], deps = [ ":tap_interface", - ":tap_matcher", "//source/common/common:assert_lib", "//source/common/common:hex_lib", + "//source/extensions/common/matcher:matcher_lib", "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], ) -envoy_cc_library( - name = "tap_matcher", - srcs = ["tap_matcher.cc"], - hdrs = ["tap_matcher.h"], - deps = [ - "//source/common/buffer:buffer_lib", - "//source/common/common:matchers_lib", - "//source/common/http:header_utility_lib", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "admin", srcs = ["admin.cc"], diff --git a/source/extensions/common/tap/admin.h b/source/extensions/common/tap/admin.h index bf80f6889b17..a3cbb7f6e815 100644 --- a/source/extensions/common/tap/admin.h +++ b/source/extensions/common/tap/admin.h @@ -1,6 +1,5 @@ #pragma once -#include "envoy/config/tap/v3/common.pb.h" #include "envoy/server/admin.h" #include "envoy/singleton/manager.h" diff --git a/source/extensions/common/tap/tap.h b/source/extensions/common/tap/tap.h index 58ba4ba82d6d..9abf88d6965b 100644 --- a/source/extensions/common/tap/tap.h +++ b/source/extensions/common/tap/tap.h @@ -5,7 +5,7 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "envoy/http/header_map.h" -#include "extensions/common/tap/tap_matcher.h" +#include "extensions/common/matcher/matcher.h" #include "absl/strings/string_view.h" @@ -14,6 +14,8 @@ namespace Extensions { namespace Common { namespace Tap { +using Matcher = Envoy::Extensions::Common::Matcher::Matcher; + using TraceWrapperPtr = std::unique_ptr; inline TraceWrapperPtr makeTraceWrapper() { return std::make_unique(); diff --git a/source/extensions/common/tap/tap_config_base.cc b/source/extensions/common/tap/tap_config_base.cc index b9debd1720e6..7eea4b7fe7be 100644 --- a/source/extensions/common/tap/tap_config_base.cc +++ b/source/extensions/common/tap/tap_config_base.cc @@ -5,9 +5,11 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "common/common/assert.h" +#include "common/common/fmt.h" +#include "common/config/version_converter.h" #include "common/protobuf/utility.h" -#include "extensions/common/tap/tap_matcher.h" +#include "extensions/common/matcher/matcher.h" #include "absl/container/fixed_array.h" @@ -16,6 +18,8 @@ namespace Extensions { namespace Common { namespace Tap { +using namespace Matcher; + bool Utility::addBufferToProtoBytes(envoy::data::tap::v3::Body& output_body, uint32_t max_buffered_bytes, const Buffer::Instance& data, uint32_t buffer_start_offset, uint32_t buffer_length_to_copy) { @@ -72,7 +76,20 @@ TapConfigBaseImpl::TapConfigBaseImpl(envoy::config::tap::v3::TapConfig&& proto_c NOT_REACHED_GCOVR_EXCL_LINE; } - buildMatcher(proto_config.match_config(), matchers_); + envoy::config::common::matcher::v3::MatchPredicate match; + if (proto_config.has_match()) { + // Use the match field whenever it is set. + match = proto_config.match(); + } else if (proto_config.has_match_config()) { + // Fallback to use the deprecated match_config field and upgrade (wire cast) it to the new + // MatchPredicate which is backward compatible with the old MatchPredicate originally + // introduced in the Tap filter. + Config::VersionConverter::upgrade(proto_config.match_config(), match); + } else { + throw EnvoyException(fmt::format("Neither match nor match_config is set in TapConfig: {}", + proto_config.DebugString())); + } + buildMatcher(match, matchers_); } const Matcher& TapConfigBaseImpl::rootMatcher() const { diff --git a/source/extensions/common/tap/tap_config_base.h b/source/extensions/common/tap/tap_config_base.h index 59b53da027f6..8a6014bc143c 100644 --- a/source/extensions/common/tap/tap_config_base.h +++ b/source/extensions/common/tap/tap_config_base.h @@ -7,14 +7,17 @@ #include "envoy/data/tap/v3/common.pb.h" #include "envoy/data/tap/v3/wrapper.pb.h" +#include "extensions/common/matcher/matcher.h" #include "extensions/common/tap/tap.h" -#include "extensions/common/tap/tap_matcher.h" namespace Envoy { namespace Extensions { namespace Common { namespace Tap { +using Matcher = Envoy::Extensions::Common::Matcher::Matcher; +using MatcherPtr = Envoy::Extensions::Common::Matcher::MatcherPtr; + /** * Common utilities for tapping. */ diff --git a/test/extensions/common/matcher/BUILD b/test/extensions/common/matcher/BUILD new file mode 100644 index 000000000000..a2723b48da78 --- /dev/null +++ b/test/extensions/common/matcher/BUILD @@ -0,0 +1,19 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "matcher_test", + srcs = ["matcher_test.cc"], + deps = [ + "//source/extensions/common/matcher:matcher_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/common/matcher/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/common/tap/tap_matcher_test.cc b/test/extensions/common/matcher/matcher_test.cc similarity index 97% rename from test/extensions/common/tap/tap_matcher_test.cc rename to test/extensions/common/matcher/matcher_test.cc index 9898c7b4ee89..28f6752eb24a 100644 --- a/test/extensions/common/tap/tap_matcher_test.cc +++ b/test/extensions/common/matcher/matcher_test.cc @@ -1,8 +1,8 @@ -#include "envoy/config/tap/v3/common.pb.h" +#include "envoy/config/common/matcher/v3/matcher.pb.h" #include "common/protobuf/utility.h" -#include "extensions/common/tap/tap_matcher.h" +#include "extensions/common/matcher/matcher.h" #include "test/test_common/utility.h" @@ -11,19 +11,19 @@ namespace Envoy { namespace Extensions { namespace Common { -namespace Tap { +namespace Matcher { namespace { -class TapMatcherTestBase { +class MatcherTestBase { public: std::vector matchers_; Matcher::MatchStatusVector statuses_; - envoy::config::tap::v3::MatchPredicate config_; + envoy::config::common::matcher::v3::MatchPredicate config_; enum class Direction { Request, Response }; }; -class TapMatcherTest : public TapMatcherTestBase, public testing::Test { +class TapMatcherTest : public MatcherTestBase, public testing::Test { public: Http::TestRequestHeaderMapImpl request_headers_; Http::TestRequestTrailerMapImpl request_trailers_; @@ -31,12 +31,12 @@ class TapMatcherTest : public TapMatcherTestBase, public testing::Test { Http::TestResponseTrailerMapImpl response_trailers_; }; -class TapMatcherGenericBodyConfigTest : public TapMatcherTestBase, public ::testing::Test {}; +class TapMatcherGenericBodyConfigTest : public MatcherTestBase, public ::testing::Test {}; class TapMatcherGenericBodyTest - : public TapMatcherTestBase, + : public MatcherTestBase, public ::testing::TestWithParam< - std::tuple, std::list>, std::pair>>> { public: @@ -242,8 +242,8 @@ TEST_P(TapMatcherGenericBodyTest, GenericBodyTest) { INSTANTIATE_TEST_SUITE_P( TapMatcherGenericBodyTestSuite, TapMatcherGenericBodyTest, ::testing::Combine( - ::testing::Values(TapMatcherTestBase::Direction::Request, - TapMatcherTestBase::Direction::Response), + ::testing::Values(MatcherTestBase::Direction::Request, + MatcherTestBase::Direction::Response), ::testing::Values( // SEARCHING FOR SINGLE PATTERN - no limit // Should match - there is a single body chunk and envoy is in the body @@ -500,7 +500,7 @@ TEST_F(TapMatcherGenericBodyTest, RandomLengthOverlappingPatterns) { } } } // namespace -} // namespace Tap +} // namespace Matcher } // namespace Common } // namespace Extensions } // namespace Envoy diff --git a/test/extensions/common/tap/BUILD b/test/extensions/common/tap/BUILD index 9775f2873b05..c5a459721faf 100644 --- a/test/extensions/common/tap/BUILD +++ b/test/extensions/common/tap/BUILD @@ -31,16 +31,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "tap_matcher_test", - srcs = ["tap_matcher_test.cc"], - deps = [ - "//source/extensions/common/tap:tap_matcher", - "//test/test_common:utility_lib", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "tap_config_base_test", srcs = ["tap_config_base_test.cc"], diff --git a/test/extensions/common/tap/admin_test.cc b/test/extensions/common/tap/admin_test.cc index 1894251b3bfc..bffe69944cbe 100644 --- a/test/extensions/common/tap/admin_test.cc +++ b/test/extensions/common/tap/admin_test.cc @@ -50,7 +50,7 @@ class AdminHandlerTest : public testing::Test { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: diff --git a/test/extensions/filters/http/tap/BUILD b/test/extensions/filters/http/tap/BUILD index a6b1a6967278..f134caaf5356 100644 --- a/test/extensions/filters/http/tap/BUILD +++ b/test/extensions/filters/http/tap/BUILD @@ -56,6 +56,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/filters/http/tap:config", "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/data/tap/v3:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index a68b9d44ad19..fdfa591bc62e 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -4,6 +4,7 @@ #include "envoy/data/tap/v3/wrapper.pb.h" #include "test/integration/http_integration.h" +#include "test/test_common/utility.h" #include "absl/strings/match.h" #include "gtest/gtest.h" @@ -127,6 +128,27 @@ class TapIntegrationTest : public testing::TestWithParamclose(); + test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); + + // Find the written .pb file and verify it. + auto files = TestUtility::listFiles(path_prefix, false); + auto pb_file = std::find_if(files.begin(), files.end(), + [](const std::string& s) { return absl::EndsWith(s, ".pb"); }); + ASSERT_NE(pb_file, files.end()); + + envoy::data::tap::v3::TraceWrapper trace; + TestUtility::loadFromFile(*pb_file, trace, *api_); + EXPECT_TRUE(trace.has_http_buffered_trace()); + } + const Http::TestRequestHeaderMapImpl request_headers_tap_{{":method", "GET"}, {":path", "/"}, {":scheme", "http"}, @@ -168,10 +190,10 @@ TEST_P(TapIntegrationTest, StaticFilePerTap) { R"EOF( name: tap typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.tap.v2alpha.Tap + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap common_config: static_config: - match_config: + match: any_match: true output_config: sinks: @@ -180,24 +202,53 @@ name: tap path_prefix: {} )EOF"; - const std::string path_prefix = getTempPathPrefix(); - initializeFilter(fmt::format(filter_config, path_prefix)); + verifyStaticFilePerTap(filter_config); +} - // Initial request/response with tap. - codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); - makeRequest(request_headers_tap_, {}, nullptr, response_headers_no_tap_, {}, nullptr); - codec_client_->close(); - test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); +// Verify the match field takes precedence over the deprecated match_config field. +TEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfigAndMatch)) { + const std::string filter_config = + R"EOF( +name: tap +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap + common_config: + static_config: + # match_config should be ignored by the match field. + match_config: + not_match: + any_match: true + match: + any_match: true + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: {} +)EOF"; - // Find the written .pb file and verify it. - auto files = TestUtility::listFiles(path_prefix, false); - auto pb_file = std::find_if(files.begin(), files.end(), - [](const std::string& s) { return absl::EndsWith(s, ".pb"); }); - ASSERT_NE(pb_file, files.end()); + verifyStaticFilePerTap(filter_config); +} - envoy::data::tap::v3::TraceWrapper trace; - TestUtility::loadFromFile(*pb_file, trace, *api_); - EXPECT_TRUE(trace.has_http_buffered_trace()); +// Verify the deprecated match_config field. +TEST_P(TapIntegrationTest, DEPRECATED_FEATURE_TEST(StaticFilePerTapWithMatchConfig)) { + const std::string filter_config = + R"EOF( +name: tap +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.tap.v3.Tap + common_config: + static_config: + match_config: + any_match: true + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: {} +)EOF"; + + verifyStaticFilePerTap(filter_config); } // Verify a basic tap flow using the admin handler. @@ -212,7 +263,7 @@ TEST_P(TapIntegrationTest, AdminBasicFlow) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: or_match: rules: - http_request_headers_match: @@ -275,7 +326,7 @@ config_id: test_config_id R"EOF( config_id: test_config_id tap_config: - match_config: + match: and_match: rules: - http_request_headers_match: @@ -319,7 +370,7 @@ TEST_P(TapIntegrationTest, AdminTrailers) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: and_match: rules: - http_request_trailers_match: @@ -360,7 +411,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsBytes) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: @@ -391,7 +442,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsString) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: sinks: @@ -423,7 +474,7 @@ TEST_P(TapIntegrationTest, AdminBodyAsBytesTruncated) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: any_match: true output_config: max_buffered_rx_bytes: 3 @@ -546,7 +597,7 @@ TEST_P(TapIntegrationTest, AdminBodyMatching) { R"EOF( config_id: test_config_id tap_config: - match_config: + match: and_match: rules: - http_request_generic_body_match: diff --git a/test/extensions/filters/http/tap/tap_filter_test.cc b/test/extensions/filters/http/tap/tap_filter_test.cc index c4d229fd192d..7ed0f1afb77f 100644 --- a/test/extensions/filters/http/tap/tap_filter_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_test.cc @@ -133,7 +133,7 @@ TEST(TapFilterConfigTest, InvalidProto) { R"EOF( common_config: static_config: - match_config: + match: any_match: true output_config: sinks: @@ -150,6 +150,29 @@ TEST(TapFilterConfigTest, InvalidProto) { "Error: Specifying admin streaming output without configuring admin."); } +TEST(TapFilterConfigTest, NeitherMatchNorMatchConfig) { + const std::string filter_config = + R"EOF( + common_config: + static_config: + output_config: + sinks: + - format: PROTO_BINARY + file_per_tap: + path_prefix: abc +)EOF"; + + envoy::extensions::filters::http::tap::v3::Tap config; + TestUtility::loadFromYaml(filter_config, config); + NiceMock context; + TapFilterFactory factory; + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(config, "stats", context), + EnvoyException, + fmt::format("Neither match nor match_config is set in TapConfig: {}", + config.common_config().static_config().DebugString())); +} + } // namespace } // namespace TapFilter } // namespace HttpFilters diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index 9994f8ca314b..db9b0afd9ec5 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -396,10 +396,8 @@ class SslTapIntegrationTest : public SslIntegrationTest { envoy::extensions::transport_sockets::tap::v3::Tap createTapConfig(const envoy::config::core::v3::TransportSocket& inner_transport) { envoy::extensions::transport_sockets::tap::v3::Tap tap_config; - tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_match_config() - ->set_any_match(true); + tap_config.mutable_common_config()->mutable_static_config()->mutable_match()->set_any_match( + true); auto* output_config = tap_config.mutable_common_config()->mutable_static_config()->mutable_output_config(); if (max_rx_bytes_.has_value()) { From a520aeae8e2239ab0aea26a967317ef38cc86be8 Mon Sep 17 00:00:00 2001 From: danzh Date: Mon, 10 Aug 2020 13:35:54 -0400 Subject: [PATCH 895/909] quiche: verify private key used to sign in ComputeTlsSignature() (#12553) quic::ProofSource:: ComputeTlsSignature() can be called by either ProofSource::GetProof() or directly from QUICHE stack. When it's called from GetProof() the private key is verified while deducing signature algorithm in GetProof(). But when it is called from QUICHE TLS handshake stack directly, we currently don't check the private key used. Fixes #9434 Part of #2557 Signed-off-by: Dan Zhang --- .../quiche/envoy_quic_proof_source.cc | 12 ++++++- .../quiche/envoy_quic_proof_source_test.cc | 36 +++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc index 96fe056e818e..1f65e4e7e6a0 100644 --- a/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc +++ b/source/extensions/quic_listeners/quiche/envoy_quic_proof_source.cc @@ -58,9 +58,19 @@ void EnvoyQuicProofSource::signPayload( callback->Run(false, "", nullptr); return; } + // Verify the signature algorithm is as expected. + std::string error_details; + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pem_key->private_key(), &error_details); + if (sign_alg != signature_algorithm) { + ENVOY_LOG(warn, + fmt::format("The signature algorithm {} from the private key is not expected: {}", + sign_alg, error_details)); + callback->Run(false, "", nullptr); + return; + } + // Sign. std::string sig = pem_key->Sign(in, signature_algorithm); - bool success = !sig.empty(); ASSERT(res.filter_chain_.has_value()); callback->Run(success, sig, diff --git a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc index d896dbb86b7c..cbf66f511f50 100644 --- a/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc +++ b/test/extensions/quic_listeners/quiche/envoy_quic_proof_source_test.cc @@ -242,6 +242,42 @@ GUy+n0vQNB0cXGzgcGI= testGetProof(false); } +TEST_F(EnvoyQuicProofSourceTest, UnexpectedPrivateKey) { + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; })); + auto server_context_config = std::make_unique(); + auto server_context_config_ptr = server_context_config.get(); + QuicServerTransportSocketFactory transport_socket_factory(std::move(server_context_config)); + EXPECT_CALL(filter_chain_, transportSocketFactory()) + .WillRepeatedly(ReturnRef(transport_socket_factory)); + + Ssl::MockTlsCertificateConfig tls_cert_config; + std::vector> tls_cert_configs{ + std::reference_wrapper(tls_cert_config)}; + EXPECT_CALL(*server_context_config_ptr, tlsCertificates()) + .WillRepeatedly(Return(tls_cert_configs)); + std::string rsa_pkey_1024_len(R"(-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQC79hDq/OwN3ke3EF6Ntdi9R+VSrl9MStk992l1us8lZhq+e0zU +OlvxbUeZ8wyVkzs1gqI1it1IwF+EpdGhHhjggZjg040GD3HWSuyCzpHh+nLwJxtQ +D837PCg0zl+TnKv1YjY3I1F3trGhIqfd2B6pgaJ4hpr+0hdqnKP0Htd4DwIDAQAB +AoGASNypUD59Tx70k+1fifWNMEq3heacgJmfPxsyoXWqKSg8g8yOStLYo20mTXJf +VXg+go7CTJkpELOqE2SoL5nYMD0D/YIZCgDx85k0GWHdA6udNn4to95ZTeZPrBHx +T0QNQHnZI3A7RwLinO60IRY0NYzhkTEBxIuvIY6u0DVbrAECQQDpshbxK3DHc7Yi +Au7BUsxP8RbG4pP5IIVoD4YvJuwUkdrfrwejqTdkfchJJc+Gu/+h8vy7eASPHLLT +NBk5wFoPAkEAzeaKnx0CgNs0RX4+sSF727FroD98VUM38OFEJQ6U9OAWGvaKd8ey +yAYUjR2Sl5ZRyrwWv4IqyWgUGhZqNG0CAQJAPTjjm8DGpenhcB2WkNzxG4xMbEQV +gfGMIYvXmmi29liTn4AKH00IbvIo00jtih2cRcATh8VUZG2fR4dhiGik7wJAWSwS +NwzaS7IjtkERp6cHvELfiLxV/Zsp/BGjcKUbD96I1E6X834ySHyRo/f9x9bbP4Es +HO6j1yxTIGU6w8++AQJACdFPnRidOaj5oJmcZq0s6WGTYfegjTOKgi5KQzO0FTwG +qGm130brdD+1U1EJnEFmleLZ/W6mEi3MxcKpWOpTqQ== +-----END RSA PRIVATE KEY-----)"); + EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(rsa_pkey_1024_len)); + proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, + SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false)); +} + TEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) { EXPECT_CALL(listen_socket_, ioHandle()); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) From 67c5b435fd4723198091a658d913823b586b3854 Mon Sep 17 00:00:00 2001 From: asraa Date: Mon, 10 Aug 2020 13:39:42 -0400 Subject: [PATCH 896/909] [fuzz] fix bad inputs and config bugs (#12504) * Unset metadata matcher causes crash, since no matcher is set * `encodeHeaders` in HCM requires that the only 1xx header is a 101 upgrade * stream info destructor issue -- this one no longer reproduces Fixes: https://oss-fuzz.com/testcase?key=4863844862918656 https://oss-fuzz.com/testcase-detail/5656400764862464 https://oss-fuzz.com/testcase-detail/5631179290836992 Signed-off-by: Asra Ali --- source/common/access_log/access_log_impl.cc | 18 +++++++------ .../common/access_log/access_log_impl_test.cc | 26 +++++++++++++++++++ .../http/conn_manager_impl_corpus/status_163 | 22 ++++++++++++++++ .../http/conn_manager_impl_fuzz_test.cc | 8 +++++- .../fuzz/filter_corpus/metadata_not_reached | 7 +++++ .../h1_corpus/stream_info_destructor | 24 +++++++++++++++++ 6 files changed, 96 insertions(+), 9 deletions(-) create mode 100644 test/common/http/conn_manager_impl_corpus/status_163 create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached create mode 100644 test/integration/h1_corpus/stream_info_destructor diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc index db4b8330370c..447f951bc2f8 100644 --- a/source/common/access_log/access_log_impl.cc +++ b/source/common/access_log/access_log_impl.cc @@ -262,15 +262,17 @@ MetadataFilter::MetadataFilter(const envoy::config::accesslog::v3::MetadataFilte : default_match_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(filter_config, match_if_key_not_found, true)), filter_(filter_config.matcher().filter()) { - auto& matcher_config = filter_config.matcher(); + if (filter_config.has_matcher()) { + auto& matcher_config = filter_config.matcher(); - for (const auto& seg : matcher_config.path()) { - path_.push_back(seg.key()); - } + for (const auto& seg : matcher_config.path()) { + path_.push_back(seg.key()); + } - // Matches if the value equals the configured 'MetadataMatcher' value. - const auto& val = matcher_config.value(); - value_matcher_ = Matchers::ValueMatcher::create(val); + // Matches if the value equals the configured 'MetadataMatcher' value. + const auto& val = matcher_config.value(); + value_matcher_ = Matchers::ValueMatcher::create(val); + } // Matches if the value is present in dynamic metadata auto present_val = envoy::type::matcher::v3::ValueMatcher(); @@ -286,7 +288,7 @@ bool MetadataFilter::evaluate(const StreamInfo::StreamInfo& info, const Http::Re // If the key corresponds to a set value in dynamic metadata, return true if the value matches the // the configured 'MetadataMatcher' value and false otherwise if (present_matcher_->match(value)) { - return value_matcher_->match(value); + return value_matcher_ && value_matcher_->match(value); } // If the key does not correspond to a set value in dynamic metadata, return true if diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 2c882010c657..788d8885ba50 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -1310,6 +1310,32 @@ name: accesslog EXPECT_CALL(*file_, write(_)).Times(0); } +// This is a regression test for fuzz bug https://oss-fuzz.com/testcase-detail/4863844862918656 +// where a missing matcher would attempt to create a ValueMatcher and crash in debug mode. Instead, +// the configured metadata filter does not match. +TEST_F(AccessLogImplTest, MetadataFilterNoMatcher) { + const std::string yaml = R"EOF( +name: accesslog +filter: + metadata_filter: + match_if_key_not_found: false +typed_config: + "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + path: /dev/null + )EOF"; + + TestStreamInfo stream_info; + ProtobufWkt::Struct metadata_val; + stream_info.setDynamicMetadata("some.namespace", metadata_val); + + const InstanceSharedPtr log = + AccessLogFactory::fromProto(parseAccessLogFromV3Yaml(yaml), context_); + + // If no matcher is set, then expect no logs. + EXPECT_CALL(*file_, write(_)).Times(0); + log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info); +} + TEST_F(AccessLogImplTest, MetadataFilterNoKey) { const std::string default_true_yaml = R"EOF( name: accesslog diff --git a/test/common/http/conn_manager_impl_corpus/status_163 b/test/common/http/conn_manager_impl_corpus/status_163 new file mode 100644 index 000000000000..3c8e7c99f56f --- /dev/null +++ b/test/common/http/conn_manager_impl_corpus/status_163 @@ -0,0 +1,22 @@ +actions { + new_stream { + request_headers { + headers { + key: ":path" + value: "/" + } + } + } +} +actions { + stream_action { + response { + headers { + headers { + key: ":status" + value: "162" + } + } + } + } +} \ No newline at end of file diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 4a7315cf56c4..99f8b18dc779 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -477,11 +477,17 @@ class FuzzStream { Fuzz::fromHeaders(response_action.headers())); // The client codec will ensure we always have a valid :status. // Similarly, local replies should always contain this. + uint64_t status; try { - Utility::getResponseStatus(*headers); + status = Utility::getResponseStatus(*headers); } catch (const CodecClientException&) { headers->setReferenceKey(Headers::get().Status, "200"); } + // The only 1xx header that may be provided to encodeHeaders() is a 101 upgrade, + // guaranteed by the codec parsers. See include/envoy/http/filter.h. + if (CodeUtility::is1xx(status) && status != enumToInt(Http::Code::SwitchingProtocols)) { + headers->setReferenceKey(Headers::get().Status, "200"); + } decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream); state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers; } diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached b/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached new file mode 100644 index 000000000000..0e714ec32f42 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/metadata_not_reached @@ -0,0 +1,7 @@ +config { + name: "envoy.router" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + value: "\020\001\032\200\005\n\0012\022\372\004:\367\004\022\207\004:\204\004\022\255\001:\252\001\022\004\n\002\n\000\022[2Y\n\006\n\004\n\002\010\001\nK:I\022927\n\002R\000\n12/\n):\'\022\tB\007\n\005\n\001)@\001\022\0222\020\n\006\n\004\n\002\010\002\n\002\032\000\n\002b\000\022\006\n\004\n\002\010\001\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022725\n\004\n\002\n\000\n\002\032\000\n)2\'\n!:\037\022\tB\007\n\005\n\001)@\001\022\n2\010\n\002\032\000\n\002\"\000\022\006\n\004\n\002\010\001\n\002\032\000\022\004\n\002\n\000\022\004\n\002\n\000\022\263\0022\260\002\n\002R\000\n\245\0022\242\002\n\206\0022\203\002\nw2u\nj:h\022\004\n\002\n\000\022@2>\n8:6\022&2$\n\002R\000\n\0362\034\n\026:\024\022\n2\010\n\002\032\000\n\002\"\000\022\006\n\004\n\002\010\001\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\0202\016\n\004\n\002\n\000\n\002\032\000\n\002\"\000\022\004\n\002\n\000\n\007R\005\n\001\002\020\001\n.:,\022\0342\032\n\005R\003\n\001\004\n\r2\013\n\005R\003\n\001\002\n\002\032\000\n\002\032\000\022\006\n\004\n\002\010\001\022\004\n\002\n\000\nR:P\022<2:\n\006\n\004\n\002\010\001\n,:*\022\0342\032\n\005R\003\n\001\004\n\r2\013\n\005R\003\n\001\002\n\002\032\000\n\002\032\000\022\004\n\002\n\000\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\002\"\000\022\004\n\002\n\000\n\004R\002\020\001\n\027:\025\022\r2\013\n\005R\003\n\001\004\n\002\032\000\022\004\n\002\n\000\n\002\032\000\022\006\n\004\n\002\010\001\022\006\n\004\n\002\010\001\022\006\n\004\n\002\010\002\022\002\032\000\022J*H\nD\n\000\032@\022>2<\n:28\n6:4\022220\n.2,\n*2(\n&:$\022\"2 \n\000\n\034:\032\022\0302\026\n\n2\010\n\006:\004\022\0022\000\n\010:\006\022\0042\002\n\000\030\001\022\033:\031\022\002J\000\022\013\n\t\n\007\010\001\022\003\032\001/\022\006\n\004\n\002\010\002\032(\n\016\177\177\177\177\177\177\177\177\177\177\177\177\177\177\022\0262\024\n\002R\000\n\n2\010\n\002R\000\n\002\032\000\n\002\032\0000\001" + } +} \ No newline at end of file diff --git a/test/integration/h1_corpus/stream_info_destructor b/test/integration/h1_corpus/stream_info_destructor new file mode 100644 index 000000000000..63f9a21a8fb7 --- /dev/null +++ b/test/integration/h1_corpus/stream_info_destructor @@ -0,0 +1,24 @@ +events { + downstream_send_bytes: "POST /test/long/url HTTP/1.1\r\nhost: host\r\nx-lyft-user-id: -063%\nuser-agent: /4302450943\n\t\t08856android\363\243x-lyft-user-id: -063%\nuser-agent: /4;02450943\n\t\t08856android\363\243\201$80\n\t\t\t\n\t\t\t\tAe1\201\24180\n\t\t\t\n\t\t\t\tAe118\tefts " +} +events { +} +events { + downstream_send_bytes: "POST //urk HTTP/1.1\r\nshhfot: ost\r\n -253%\nuser-agent: /0%\nuser-agent: /430%\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\r\n" +} +events { +} +events { +} +events { + downstream_send_bytes: "POST /test/lon\nte: e: h" +} +events { +} +events { + downstream_send_bytes: "POST //urk HTTP/1.1\r\nshhfot: ost\r\n -253%\nuser-agent: /0%\nuser-agent: /430%\nuser-agent:4967:18446744073709551615iOS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\201~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~a~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~-7749978774642053139~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttuttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt + +tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\326Utttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-4017153681670550988tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt|tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttstttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt\364ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttaaaaaaaaaaaaaaaaaaaaaaa-6742158280474489582aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\r\n" +} +events { +} \ No newline at end of file From cf850688a93b8dea5705170a211ea98b77beda61 Mon Sep 17 00:00:00 2001 From: Arthur Yan <55563955+arthuryan-k@users.noreply.github.com> Date: Mon, 10 Aug 2020 14:01:28 -0400 Subject: [PATCH 897/909] fuzz: added fuzz test for listener filter http_inspector (#12411) Commit Message: Added fuzz test for listener filter Additional Description: Extended generic listener filter fuzzer library to support mocked dispatcher and system call behavior Created http_inspector_corpus and populated with testcases (valid and invalid headers) Created http_inspector_fuzz_test.cc and updated ListenerFilterFuzzer API Signed-off-by: Arthur Yan --- .../filters/listener/common/fuzz/BUILD | 13 ++- .../common/fuzz/listener_filter_fakes.cc | 86 ++++++++++++++++++ .../common/fuzz/listener_filter_fakes.h | 70 +++++++++++++++ .../common/fuzz/listener_filter_fuzzer.cc | 87 +++++++++++++++++-- .../common/fuzz/listener_filter_fuzzer.h | 39 +++++++-- .../common/fuzz/listener_filter_fuzzer.proto | 1 + .../filters/listener/http_inspector/BUILD | 11 +++ .../http_inspector_corpus/bad_header | 1 + .../http_inspector_corpus/incomplete_header | 1 + .../http_inspector_corpus/invalid_method | 1 + .../http_inspector_corpus/invalid_request | 1 + .../http_inspector_corpus/multiple_http10 | 3 + .../http_inspector_corpus/multiple_incomplete | 2 + .../http_inspector_corpus/valid_http10 | 1 + .../http_inspector_corpus/valid_http11 | 1 + .../http_inspector_corpus/valid_http2 | 1 + .../http_inspector_fuzz_test.cc | 32 +++++++ .../original_dst_corpus/invalid_scheme | 3 + .../original_dst_corpus/invalid_test | 4 - .../{unix_test => invalid_unix} | 1 - .../{ipv4_test => valid_ipv4} | 1 - .../{ipv6_test => valid_ipv6} | 1 - .../original_dst/original_dst_fuzz_test.cc | 9 +- .../{ipv4_test => valid_ipv4} | 7 +- .../{unix_test => valid_unix} | 5 +- .../original_src/original_src_fuzz_test.cc | 10 +-- .../original_src/original_src_fuzz_test.proto | 2 +- test/mocks/network/BUILD | 8 -- test/mocks/network/fakes.h | 62 ------------- 29 files changed, 346 insertions(+), 118 deletions(-) create mode 100644 test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc create mode 100644 test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 create mode 100644 test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc create mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme delete mode 100644 test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test rename test/extensions/filters/listener/original_dst/original_dst_corpus/{unix_test => invalid_unix} (55%) rename test/extensions/filters/listener/original_dst/original_dst_corpus/{ipv4_test => valid_ipv4} (54%) rename test/extensions/filters/listener/original_dst/original_dst_corpus/{ipv6_test => valid_ipv6} (56%) rename test/extensions/filters/listener/original_src/original_src_corpus/{ipv4_test => valid_ipv4} (53%) rename test/extensions/filters/listener/original_src/original_src_corpus/{unix_test => valid_unix} (61%) delete mode 100644 test/mocks/network/fakes.h diff --git a/test/extensions/filters/listener/common/fuzz/BUILD b/test/extensions/filters/listener/common/fuzz/BUILD index e306f3a43382..85ed3cbf7304 100644 --- a/test/extensions/filters/listener/common/fuzz/BUILD +++ b/test/extensions/filters/listener/common/fuzz/BUILD @@ -19,9 +19,20 @@ envoy_cc_test_library( srcs = ["listener_filter_fuzzer.cc"], hdrs = ["listener_filter_fuzzer.h"], deps = [ + ":listener_filter_fakes", ":listener_filter_fuzzer_proto_cc_proto", "//include/envoy/network:filter_interface", - "//test/mocks/network:network_fakes", + "//test/mocks/network:network_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", + ], +) + +envoy_cc_test_library( + name = "listener_filter_fakes", + srcs = ["listener_filter_fakes.cc"], + hdrs = ["listener_filter_fakes.h"], + deps = [ + "//source/common/api:os_sys_calls_lib", "//test/mocks/network:network_mocks", ], ) diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc new file mode 100644 index 000000000000..f0546c7950fe --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc @@ -0,0 +1,86 @@ +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +Network::IoHandle& FakeConnectionSocket::ioHandle() { return *io_handle_; } + +const Network::IoHandle& FakeConnectionSocket::ioHandle() const { return *io_handle_; } + +void FakeConnectionSocket::setLocalAddress( + const Network::Address::InstanceConstSharedPtr& local_address) { + local_address_ = local_address; + if (local_address_ != nullptr) { + addr_type_ = local_address_->type(); + } +} + +void FakeConnectionSocket::setRemoteAddress( + const Network::Address::InstanceConstSharedPtr& remote_address) { + remote_address_ = remote_address; +} + +const Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::localAddress() const { + return local_address_; +} + +const Network::Address::InstanceConstSharedPtr& FakeConnectionSocket::remoteAddress() const { + return remote_address_; +} + +Network::Address::Type FakeConnectionSocket::addressType() const { return addr_type_; } + +absl::optional FakeConnectionSocket::ipVersion() const { + if (local_address_ == nullptr || addr_type_ != Network::Address::Type::Ip) { + return absl::nullopt; + } + + return local_address_->ip()->version(); +} + +void FakeConnectionSocket::setDetectedTransportProtocol(absl::string_view protocol) { + transport_protocol_ = std::string(protocol); +} + +absl::string_view FakeConnectionSocket::detectedTransportProtocol() const { + return transport_protocol_; +} + +void FakeConnectionSocket::setRequestedApplicationProtocols( + const std::vector& protocols) { + application_protocols_.clear(); + for (const auto& protocol : protocols) { + application_protocols_.emplace_back(protocol); + } +} + +const std::vector& FakeConnectionSocket::requestedApplicationProtocols() const { + return application_protocols_; +} + +void FakeConnectionSocket::setRequestedServerName(absl::string_view server_name) { + server_name_ = std::string(server_name); +} + +absl::string_view FakeConnectionSocket::requestedServerName() const { return server_name_; } + +Api::SysCallIntResult FakeConnectionSocket::getSocketOption(int level, int, void* optval, + socklen_t*) const { + switch (level) { + case SOL_IPV6: + static_cast(optval)->ss_family = AF_INET6; + break; + case SOL_IP: + static_cast(optval)->ss_family = AF_INET; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + return Api::SysCallIntResult{0, 0}; +} + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h new file mode 100644 index 000000000000..4e13b4e6f418 --- /dev/null +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h @@ -0,0 +1,70 @@ +#include "common/api/os_sys_calls_impl.h" +#include "common/network/io_socket_handle_impl.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { + +static constexpr int kFakeSocketFd = 42; + +class FakeConnectionSocket : public Network::MockConnectionSocket { +public: + FakeConnectionSocket() + : io_handle_(std::make_unique(kFakeSocketFd)), + local_address_(nullptr), remote_address_(nullptr) {} + + ~FakeConnectionSocket() override { io_handle_->close(); } + + Network::IoHandle& ioHandle() override; + + const Network::IoHandle& ioHandle() const override; + + void setLocalAddress(const Network::Address::InstanceConstSharedPtr& local_address) override; + + void setRemoteAddress(const Network::Address::InstanceConstSharedPtr& remote_address) override; + + const Network::Address::InstanceConstSharedPtr& localAddress() const override; + + const Network::Address::InstanceConstSharedPtr& remoteAddress() const override; + + Network::Address::Type addressType() const override; + + absl::optional ipVersion() const override; + + void setRequestedApplicationProtocols(const std::vector& protocols) override; + + const std::vector& requestedApplicationProtocols() const override; + + void setDetectedTransportProtocol(absl::string_view protocol) override; + + absl::string_view detectedTransportProtocol() const override; + + void setRequestedServerName(absl::string_view server_name) override; + + absl::string_view requestedServerName() const override; + + Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override; + +private: + const Network::IoHandlePtr io_handle_; + Network::Address::InstanceConstSharedPtr local_address_; + Network::Address::InstanceConstSharedPtr remote_address_; + Network::Address::Type addr_type_; + std::vector application_protocols_; + std::string transport_protocol_; + std::string server_name_; +}; + +// TODO: Move over to Fake (name is confusing) +class FakeOsSysCalls : public Api::OsSysCallsImpl { +public: + MOCK_METHOD(Api::SysCallSizeResult, recv, (os_fd_t, void*, size_t, int)); +}; + +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc index f38aba8918f2..0f5aa60b8d44 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc @@ -1,7 +1,5 @@ #include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" -#include "common/network/utility.h" - namespace Envoy { namespace Extensions { namespace ListenerFilters { @@ -10,21 +8,92 @@ void ListenerFilterFuzzer::fuzz( Network::ListenerFilter& filter, const test::extensions::filters::listener::FilterFuzzTestCase& input) { try { - fuzzerSetup(input); + socket_.setLocalAddress(Network::Utility::resolveUrl(input.sock().local_address())); } catch (const EnvoyException& e) { - ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); - return; + // Socket's local address will be nullptr by default if fuzzed local address is malformed + // or missing - local address field in proto is optional + } + try { + socket_.setRemoteAddress(Network::Utility::resolveUrl(input.sock().remote_address())); + } catch (const EnvoyException& e) { + // Socket's remote address will be nullptr by default if fuzzed remote address is malformed + // or missing - remote address field in proto is optional + } + + FuzzedHeader header(input); + + if (!header.empty()) { + ON_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, MSG_PEEK)) + .WillByDefault(testing::Return(Api::SysCallSizeResult{static_cast(0), 0})); + + ON_CALL(dispatcher_, + createFileEvent_(_, _, Event::FileTriggerType::Edge, + Event::FileReadyType::Read | Event::FileReadyType::Closed)) + .WillByDefault(testing::DoAll(testing::SaveArg<1>(&file_event_callback_), + testing::ReturnNew>())); } filter.onAccept(cb_); + + if (file_event_callback_ == nullptr) { + // If filter does not call createFileEvent (i.e. original_dst and original_src) + return; + } + + if (!header.empty()) { + { + testing::InSequence s; + + EXPECT_CALL(os_sys_calls_, recv(kFakeSocketFd, _, _, MSG_PEEK)) + .Times(testing::AnyNumber()) + .WillRepeatedly(Invoke( + [&header](os_fd_t, void* buffer, size_t length, int) -> Api::SysCallSizeResult { + return header.next(buffer, length); + })); + } + + bool got_continue = false; + + ON_CALL(cb_, continueFilterChain(true)) + .WillByDefault(testing::InvokeWithoutArgs([&got_continue]() { got_continue = true; })); + + while (!got_continue) { + if (header.done()) { // End of stream reached but not done + file_event_callback_(Event::FileReadyType::Closed); + } else { + file_event_callback_(Event::FileReadyType::Read); + } + } + } } -void ListenerFilterFuzzer::socketSetup( - const test::extensions::filters::listener::FilterFuzzTestCase& input) { - socket_.setLocalAddress(Network::Utility::resolveUrl(input.sock().local_address())); - socket_.setRemoteAddress(Network::Utility::resolveUrl(input.sock().remote_address())); +FuzzedHeader::FuzzedHeader(const test::extensions::filters::listener::FilterFuzzTestCase& input) + : nreads_(input.data_size()), nread_(0) { + size_t len = 0; + for (int i = 0; i < nreads_; i++) { + len += input.data(i).size(); + } + + header_.reserve(len); + + for (int i = 0; i < nreads_; i++) { + header_ += input.data(i); + indices_.push_back(header_.size()); + } } +Api::SysCallSizeResult FuzzedHeader::next(void* buffer, size_t length) { + if (done()) { // End of stream reached + nread_ = nreads_ - 1; // Decrement to avoid out-of-range for last recv() call + } + memcpy(buffer, header_.data(), std::min(indices_[nread_], length)); + return Api::SysCallSizeResult{static_cast(indices_[nread_++]), 0}; +} + +bool FuzzedHeader::done() { return nread_ >= nreads_; } + +bool FuzzedHeader::empty() { return nreads_ == 0; } + } // namespace ListenerFilters } // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h index fe81a9e12cc4..66b6f8707bfd 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h @@ -1,8 +1,10 @@ #include "envoy/network/filter.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fakes.h" #include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" -#include "test/mocks/network/fakes.h" +#include "test/mocks/event/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" @@ -12,19 +14,40 @@ namespace ListenerFilters { class ListenerFilterFuzzer { public: + ListenerFilterFuzzer() { + ON_CALL(cb_, socket()).WillByDefault(testing::ReturnRef(socket_)); + ON_CALL(cb_, dispatcher()).WillByDefault(testing::ReturnRef(dispatcher_)); + } + void fuzz(Network::ListenerFilter& filter, const test::extensions::filters::listener::FilterFuzzTestCase& input); private: - void fuzzerSetup(const test::extensions::filters::listener::FilterFuzzTestCase& input) { - ON_CALL(cb_, socket()).WillByDefault(testing::ReturnRef(socket_)); - socketSetup(input); - } + FakeOsSysCalls os_sys_calls_; + TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; + NiceMock cb_; + FakeConnectionSocket socket_; + NiceMock dispatcher_; + Event::FileReadyCb file_event_callback_; +}; - void socketSetup(const test::extensions::filters::listener::FilterFuzzTestCase& input); +class FuzzedHeader { +public: + FuzzedHeader(const test::extensions::filters::listener::FilterFuzzTestCase& input); - NiceMock cb_; - Network::FakeConnectionSocket socket_; + // Copies next read into buffer and returns the number of bytes written + Api::SysCallSizeResult next(void* buffer, size_t length); + + bool done(); + + // Returns true if data field in proto is empty + bool empty(); + +private: + const int nreads_; // Number of reads + int nread_; // Counter of current read + std::string header_; // Construct header from single or multiple reads + std::vector indices_; // Ending indices for each read }; } // namespace ListenerFilters diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto index 916c645d41ba..5741ed9edfa3 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.proto @@ -9,4 +9,5 @@ message Socket { message FilterFuzzTestCase { Socket sock = 1; + repeated string data = 2; } \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index 8530e24434d9..05f898a7bf90 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( @@ -42,3 +43,13 @@ envoy_extension_cc_test( "//test/test_common:threadsafe_singleton_injector_lib", ], ) + +envoy_cc_fuzz_test( + name = "http_inspector_fuzz_test", + srcs = ["http_inspector_fuzz_test.cc"], + corpus = "http_inspector_corpus", + deps = [ + "//source/extensions/filters/listener/http_inspector:http_inspector_lib", + "//test/extensions/filters/listener/common/fuzz:listener_filter_fuzzer_lib", + ], +) diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header new file mode 100644 index 000000000000..a84991228ff5 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/bad_header @@ -0,0 +1 @@ +data: "X" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header new file mode 100644 index 000000000000..db337b0c762a --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/incomplete_header @@ -0,0 +1 @@ +data: "GE" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method new file mode 100644 index 000000000000..b14ffd72e116 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_method @@ -0,0 +1 @@ +data: "BAD /anything HTTP/1.1" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request new file mode 100644 index 000000000000..a7943ddb30b1 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/invalid_request @@ -0,0 +1 @@ +data: "BAD /anything HTTP/1.1\r\n" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 new file mode 100644 index 000000000000..42fa7434ebbb --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_http10 @@ -0,0 +1,3 @@ +data: "GET /anyt" +data: "hing HT" +data: "TP/1.0\r" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete new file mode 100644 index 000000000000..58c5d8ad8613 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/multiple_incomplete @@ -0,0 +1,2 @@ +data: "G" +data: "E" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 new file mode 100644 index 000000000000..5512c5504dd9 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http10 @@ -0,0 +1 @@ +data: "GET /anything HTTP/1.0\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: */*\r\nx-forwarded-proto: http\r\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: 15000\r\ncontent-length: 0\r\n\r\n" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 new file mode 100644 index 000000000000..56906d74b1c1 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http11 @@ -0,0 +1 @@ +data: "GET /anything HTTP/1.1\r\nhost: google.com\r\nuser-agent: curl/7.64.0\r\naccept: */*\r\nx-forwarded-proto: http\r\nx-request-id: a52df4a0-ed00-4a19-86a7-80e5049c6c84\r\nx-envoy-expected-rq-timeout-ms: 15000\r\ncontent-length: 3\r\n\r\nfoo" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 new file mode 100644 index 000000000000..0e1faf044c0f --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_corpus/valid_http2 @@ -0,0 +1 @@ +data: "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x0c\x04\x00\x00\x00\x00\x00\x00\x04\x10\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x04\x08\x00\x00\x00\x00\x00\x0f\xff\x00\x01\x00\x00}\x01\x05\x00\x00\x00\x01A\x8a\xa0\xe4\x1d\x13\x9d\t\xb8\xf0\x00\x0f\x04\x88`uzL\xe6\xaaf\x05\x82\x86z\x88%\xb6P\xc3\xab\xb8\xd2\xe0S\x03*/*@\x8d\xf2\xb4\xa7\xb3\xc0\xec\x90\xb2-]\x87I\xff\x83\x9d)\xaf@\x89\xf2\xb5\x85\xediP\x95\x8d\'\x9a\x18\x9e\x03\xf1\xcaU\x82&_Y\xa7[\n\xc3\x11\x19Y\xc7\xe4\x90\x04\x90\x8d\xb6\xe8?@\x96\xf2\xb1j\xee\x7fK\x17\xcde\"K\"\xd6vY&\xa4\xa7\xb5+R\x8f\x84\x0b`\x00?" \ No newline at end of file diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc new file mode 100644 index 000000000000..5f867c22b179 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_fuzz_test.cc @@ -0,0 +1,32 @@ +#include "extensions/filters/listener/http_inspector/http_inspector.h" + +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.h" +#include "test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace HttpInspector { + +DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCase& input) { + + try { + TestUtility::validate(input); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + return; + } + + Stats::IsolatedStoreImpl store; + ConfigSharedPtr cfg = std::make_shared(store); + auto filter = std::make_unique(cfg); + + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input); +} + +} // namespace HttpInspector +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme new file mode 100644 index 000000000000..67994b567f87 --- /dev/null +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_scheme @@ -0,0 +1,3 @@ +sock { + local_address: "hello world" +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test deleted file mode 100644 index 7c650514ebbb..000000000000 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_test +++ /dev/null @@ -1,4 +0,0 @@ -sock { - local_address: "hello world" - remote_address: "tcp://0.0.0.0:0" -} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix similarity index 55% rename from test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test rename to test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix index 3936a8a1c0b1..ee8917b15305 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/unix_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/invalid_unix @@ -1,4 +1,3 @@ sock { local_address: "unix://tmp/server" - remote_address: "tcp://0.0.0.0:0" } \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 similarity index 54% rename from test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test rename to test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 index 20cdd6796db9..a0510b8c253c 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv4_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv4 @@ -1,4 +1,3 @@ sock { local_address: "tcp://0.0.0.0:0" - remote_address: "tcp://0.0.0.0:0" } \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 similarity index 56% rename from test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test rename to test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 index bda8f2989203..32bdadc805ce 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_corpus/ipv6_test +++ b/test/extensions/filters/listener/original_dst/original_dst_corpus/valid_ipv6 @@ -1,4 +1,3 @@ sock { local_address: "tcp://[a:b:c:d::]:0" - remote_address: "tcp://0.0.0.0:0" } \ No newline at end of file diff --git a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc index 5476b6326e3a..4eb1899f3b35 100644 --- a/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc +++ b/test/extensions/filters/listener/original_dst/original_dst_fuzz_test.cc @@ -19,13 +19,8 @@ DEFINE_PROTO_FUZZER(const test::extensions::filters::listener::FilterFuzzTestCas } auto filter = std::make_unique(); - - try { - ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input); - } catch (const EnvoyException& e) { - ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); - } + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input); } } // namespace OriginalDst diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test b/test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 similarity index 53% rename from test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test rename to test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 index 7d439bab6cad..e9acd000b463 100644 --- a/test/extensions/filters/listener/original_src/original_src_corpus/ipv4_test +++ b/test/extensions/filters/listener/original_src/original_src_corpus/valid_ipv4 @@ -1,11 +1,10 @@ config { - bind_port: true + bind_port: false mark: 0 } -data { +fuzzed { sock { - local_address: "tcp://0.0.0.0:0" remote_address: "tcp://1.2.3.4:0" } -} +} \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_corpus/unix_test b/test/extensions/filters/listener/original_src/original_src_corpus/valid_unix similarity index 61% rename from test/extensions/filters/listener/original_src/original_src_corpus/unix_test rename to test/extensions/filters/listener/original_src/original_src_corpus/valid_unix index ecb14359bd52..9726394370c6 100644 --- a/test/extensions/filters/listener/original_src/original_src_corpus/unix_test +++ b/test/extensions/filters/listener/original_src/original_src_corpus/valid_unix @@ -1,11 +1,10 @@ config { bind_port: true - mark: 0 + mark: 15 } -data { +fuzzed { sock { - local_address: "tcp://0.0.0.0:0" remote_address: "unix://domain.socket" } } \ No newline at end of file diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc index c677a55b8d55..0116a7a98b36 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.cc @@ -21,14 +21,8 @@ DEFINE_PROTO_FUZZER( Config config(input.config()); auto filter = std::make_unique(config); - - try { - ListenerFilterFuzzer fuzzer; - fuzzer.fuzz(*filter, input.data()); - } catch (const EnvoyException& e) { - ENVOY_LOG_MISC(debug, "EnvoyException: {}", e.what()); - return; - } + ListenerFilterFuzzer fuzzer; + fuzzer.fuzz(*filter, input.fuzzed()); } } // namespace OriginalSrc diff --git a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto index 303b3c86daaa..093378b09045 100644 --- a/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto +++ b/test/extensions/filters/listener/original_src/original_src_fuzz_test.proto @@ -9,6 +9,6 @@ import "validate/validate.proto"; message OriginalSrcTestCase { envoy.extensions.filters.listener.original_src.v3.OriginalSrc config = 1 [(validate.rules).message.required = true]; - test.extensions.filters.listener.FilterFuzzTestCase data = 2 + test.extensions.filters.listener.FilterFuzzTestCase fuzzed = 2 [(validate.rules).message.required = true]; } \ No newline at end of file diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 5f16adc6206f..020e4b6db404 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -63,11 +63,3 @@ envoy_cc_mock( "//source/common/network:utility_lib", ], ) - -envoy_cc_mock( - name = "network_fakes", - hdrs = ["fakes.h"], - deps = [ - ":network_mocks", - ], -) diff --git a/test/mocks/network/fakes.h b/test/mocks/network/fakes.h deleted file mode 100644 index ec69dce0ec0d..000000000000 --- a/test/mocks/network/fakes.h +++ /dev/null @@ -1,62 +0,0 @@ -#include "common/network/utility.h" - -#include "test/mocks/network/mocks.h" - -#include "gmock/gmock.h" - -namespace Envoy { -namespace Network { - -class FakeConnectionSocket : public MockConnectionSocket { -public: - ~FakeConnectionSocket() override = default; - - FakeConnectionSocket() : local_address_(nullptr), remote_address_(nullptr) {} - - FakeConnectionSocket(const Address::InstanceConstSharedPtr& local_address, - const Address::InstanceConstSharedPtr& remote_address) - : local_address_(local_address), remote_address_(remote_address) {} - - void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { - local_address_ = local_address; - } - - void setRemoteAddress(const Address::InstanceConstSharedPtr& remote_address) override { - remote_address_ = remote_address; - } - - const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } - - const Address::InstanceConstSharedPtr& remoteAddress() const override { return remote_address_; } - - Address::Type addressType() const override { return local_address_->type(); } - - absl::optional ipVersion() const override { - if (local_address_->type() != Address::Type::Ip) { - return absl::nullopt; - } - - return local_address_->ip()->version(); - } - - Api::SysCallIntResult getSocketOption(int level, int, void* optval, socklen_t*) const override { - switch (level) { - case SOL_IPV6: - static_cast(optval)->ss_family = AF_INET6; - break; - case SOL_IP: - static_cast(optval)->ss_family = AF_INET; - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - - return Api::SysCallIntResult{0, 0}; - } - - Address::InstanceConstSharedPtr local_address_; - Address::InstanceConstSharedPtr remote_address_; -}; - -} // namespace Network -} // namespace Envoy From 366c095e735b47fa4e09f3029f3dc1b2e227a097 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia Date: Mon, 10 Aug 2020 17:14:43 -0400 Subject: [PATCH 898/909] Windows: Passing unit tests no longer need fails_on_windows tag (#12343) Signed-off-by: William A Rowe Jr Co-authored-by: Sunjay Bhatia --- test/common/http/http2/BUILD | 3 --- test/common/network/BUILD | 2 -- test/common/router/BUILD | 1 - test/common/upstream/BUILD | 2 -- test/extensions/filters/network/rocketmq_proxy/BUILD | 1 - test/server/BUILD | 1 - 6 files changed, 10 deletions(-) diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 467c2785f2aa..6c9e4f8c7f2e 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -45,7 +45,6 @@ envoy_cc_test( "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", ], shard_count = 5, - tags = ["fails_on_windows"], deps = CODEC_TEST_DEPS, ) @@ -57,7 +56,6 @@ envoy_cc_test( "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], shard_count = 5, - tags = ["fails_on_windows"], deps = CODEC_TEST_DEPS, ) @@ -128,7 +126,6 @@ envoy_cc_test( "response_header_corpus/simple_example_huffman", "response_header_corpus/simple_example_plain", ], - tags = ["fails_on_windows"], deps = [ ":frame_replay_lib", "//test/common/http/http2:codec_impl_test_util", diff --git a/test/common/network/BUILD b/test/common/network/BUILD index 0557529c0a0a..3e7b3941d1e3 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -98,7 +98,6 @@ envoy_cc_test( envoy_cc_test( name = "dns_impl_test", srcs = ["dns_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ "//include/envoy/event:dispatcher_interface", "//include/envoy/network:address_interface", @@ -326,7 +325,6 @@ envoy_cc_test( envoy_cc_test( name = "addr_family_aware_socket_option_impl_test", srcs = ["addr_family_aware_socket_option_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ ":socket_option_test", "//source/common/network:addr_family_aware_socket_option_lib", diff --git a/test/common/router/BUILD b/test/common/router/BUILD index 660405054c32..a377e5672fdd 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -289,7 +289,6 @@ envoy_cc_test( name = "router_upstream_log_test", srcs = ["router_upstream_log_test.cc"], external_deps = ["abseil_optional"], - tags = ["fails_on_windows"], deps = [ "//source/common/buffer:buffer_lib", "//source/common/network:utility_lib", diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 9e76ee81c5df..cdfb7d42b72b 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -135,7 +135,6 @@ envoy_benchmark_test( envoy_cc_test( name = "health_checker_impl_test", srcs = ["health_checker_impl_test.cc"], - tags = ["fails_on_windows"], deps = [ ":utility_lib", "//source/common/buffer:buffer_lib", @@ -405,7 +404,6 @@ envoy_benchmark_test( name = "load_balancer_benchmark_test", timeout = "long", benchmark_binary = "load_balancer_benchmark", - tags = ["fails_on_windows"], ) envoy_cc_test( diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/test/extensions/filters/network/rocketmq_proxy/BUILD index f01055ab6742..82a70612767f 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/test/extensions/filters/network/rocketmq_proxy/BUILD @@ -47,7 +47,6 @@ envoy_extension_cc_test( name = "router_test", srcs = ["router_test.cc"], extension_name = "envoy.filters.network.rocketmq_proxy", - tags = ["fails_on_windows"], deps = [ ":mocks_lib", ":utility_lib", diff --git a/test/server/BUILD b/test/server/BUILD index 84a3d3fb8899..4718ce6669fb 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -355,7 +355,6 @@ envoy_cc_test( ":server_test_data", ":static_validation_test_data", ], - tags = ["fails_on_windows"], deps = [ "//source/common/version:version_lib", "//source/extensions/access_loggers/file:config", From d651d2e048a7ddc977c028755287b545dd606adf Mon Sep 17 00:00:00 2001 From: jianwen612 <55008549+jianwen612@users.noreply.github.com> Date: Mon, 10 Aug 2020 16:15:30 -0500 Subject: [PATCH 899/909] [fuzz]network-level WriteFilter generic fuzzer (#12462) * added generic freamework for testing filters. Signed-off-by: jianwen * added code for covering ext_authz filter Signed-off-by: jianwen * restore the log output in ext_authz implementation. Signed-off-by: jianwen * fixed style problem Signed-off-by: jianwen * fixed style problem Signed-off-by: jianwen * added comments Signed-off-by: jianwen * added ststem time control for local_rate_limit Signed-off-by: jianwen * enabled three filters coverage Signed-off-by: jianwen * added support for ext_authz response Signed-off-by: jianwen * added coverage for tcp_proxy and client_ssl_auth. Increased the coverage for ext_auth by enabling the mocked response. Fixed the validation problem inside client_ssl_auth's protobuf Signed-off-by: jianwen * removed test for tcp_proxy filter Signed-off-by: jianwen * fix bazel style Signed-off-by: jianwen * fixed style Signed-off-by: jianwen * found issues in tcp_proxy and direct_response. added test cases for the issues Signed-off-by: jianwen * replace raw string names with names from factory Signed-off-by: jianwen * added test cases for direct response and sni_cluster Signed-off-by: jianwen * cleaned the code Signed-off-by: jianwen * deleted some useless comments Signed-off-by: jianwen * removed filters with known issues from the fuzzer Signed-off-by: jianwen * removed unnecessary corpus Signed-off-by: jianwen * fix the style Signed-off-by: jianwen * removed unsupported test cases Signed-off-by: jianwen * removed unnecessary comments Signed-off-by: jianwen * removed the empty destructor of fakeFactoryContext Signed-off-by: jianwen * fixed naming problems and removed the constructor of fake class Signed-off-by: jianwen * start working on http_connection_manager and solved one potential use-after-free problem. Signed-off-by: jianwen * fixed style problems Signed-off-by: jianwen * modified ON_CALL to EXPECT_CALL.WillOnce for some unique_ptr. Removed ON_CALL for addr_, instead, directly change the pointer inside connection_ Signed-off-by: jianwen * run fix code style Signed-off-by: jianwen * added HCM filter and SDFP filter Signed-off-by: jianwen * fixed typos and added TODOs Signed-off-by: jianwen * fixed a typo Signed-off-by: jianwen * separate the fake class definition and the per_filter processing in different files. Cleaned up the deps Signed-off-by: jianwen * added comments and assert() Signed-off-by: jianwen * fix style Signed-off-by: jianwen * fixed the proto definition on ThriftProxy.Route.RouteAction.cluter_header. Signed-off-by: jianwen * run proto fix after modification in route.proto Signed-off-by: jianwen * added comment on seconds_in_one_day_ Signed-off-by: jianwen * added test cases Signed-off-by: jianwen * trying to add valid filters Signed-off-by: jianwen * added test case for thrift proxy and added deps Signed-off-by: jianwen * added comment and log Signed-off-by: jianwen * refined the test cases Signed-off-by: jianwen * added dict Signed-off-by: jianwen * added dict to BUILD Signed-off-by: jianwen * added support for rocketmq_proxy Signed-off-by: jianwen * refined test cases for kafka and rocketmq Signed-off-by: jianwen * added support for RateLimit and a test case for it. Signed-off-by: jianwen * renamed the filter fuzzer to readfilter fuzzer Signed-off-by: jianwen * fixed code style Signed-off-by: jianwen * merged generic fuzzer(rename) Signed-off-by: jianwen * added rbac Signed-off-by: jianwen * fix nits Signed-off-by: jianwen * fixed style Signed-off-by: jianwen * fix style Signed-off-by: jianwen * removed several test cases Signed-off-by: jianwen * removed several test cases Signed-off-by: jianwen * adde comments Signed-off-by: jianwen * added writefilter fuzzer and a crash testcase for zookeeperproxy Signed-off-by: jianwen * covered all the filters Signed-off-by: jianwen * added a comment for postgres_proxy Signed-off-by: jianwen * fixed style Signed-off-by: jianwen * fixed TODO name Signed-off-by: jianwen * removed unrelevant changes Signed-off-by: jianwen * fixed proto Signed-off-by: jianwen * restore the changes Signed-off-by: jianwen * trying to add coverage for mongodb Signed-off-by: jianwen * fixed style and removed cout Signed-off-by: jianwen * added time source Signed-off-by: jianwen * added a test case for mongo, fixed style problem Signed-off-by: jianwen * fixed a spelling problem Signed-off-by: jianwen * removed the test case for postgres_proxy Signed-off-by: jianwen * added new lines and made the comment clearer Signed-off-by: jianwen * fixed a spelling problem Signed-off-by: jianwen * removed a hardcoded debug code, used debug log in writefilter fuzzer. Signed-off-by: jianwen * removed unnecessary BUILD deps. Signed-off-by: jianwen --- .../filters/network/common/fuzz/BUILD | 44 +++++++ .../network_writefilter_corpus/kafka_broker_1 | 110 ++++++++++++++++ .../mongodb_proxy_1 | 107 +++++++++++++++ .../network_writefilter_corpus/mysql_proxy_1 | 86 ++++++++++++ .../zookeeper_proxy_1 | 17 +++ .../zookeeper_proxy_assert_failure_onwrite | 12 ++ .../fuzz/network_writefilter_fuzz.proto | 31 +++++ .../fuzz/network_writefilter_fuzz_test.cc | 58 +++++++++ .../common/fuzz/uber_per_writefilter.cc | 35 +++++ .../network/common/fuzz/uber_writefilter.cc | 123 ++++++++++++++++++ .../network/common/fuzz/uber_writefilter.h | 40 ++++++ 11 files changed, 663 insertions(+) create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto create mode 100644 test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_writefilter.cc create mode 100644 test/extensions/filters/network/common/fuzz/uber_writefilter.h diff --git a/test/extensions/filters/network/common/fuzz/BUILD b/test/extensions/filters/network/common/fuzz/BUILD index f8d38307d569..8f54f57e5de8 100644 --- a/test/extensions/filters/network/common/fuzz/BUILD +++ b/test/extensions/filters/network/common/fuzz/BUILD @@ -23,6 +23,15 @@ envoy_proto_library( ], ) +envoy_proto_library( + name = "network_writefilter_fuzz_proto", + srcs = ["network_writefilter_fuzz.proto"], + deps = [ + "//test/fuzz:common_proto", + "@envoy_api//envoy/config/listener/v3:pkg", + ], +) + envoy_cc_test_library( name = "uber_readfilter_lib", srcs = [ @@ -59,3 +68,38 @@ envoy_cc_fuzz_test( "//test/config:utility_lib", ] + envoy_all_network_filters(), ) + +envoy_cc_test_library( + name = "uber_writefilter_lib", + srcs = [ + "uber_per_writefilter.cc", + "uber_writefilter.cc", + ], + hdrs = ["uber_writefilter.h"], + deps = [ + ":network_writefilter_fuzz_proto_cc_proto", + "//source/common/config:utility_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:utility_lib", + "//test/extensions/filters/network/common/fuzz/utils:network_filter_fuzzer_fakes_lib", + "//test/fuzz:utility_lib", + "//test/mocks/network:network_mocks", + ], +) + +envoy_cc_fuzz_test( + name = "network_writefilter_fuzz_test", + srcs = ["network_writefilter_fuzz_test.cc"], + corpus = "network_writefilter_corpus", + # All Envoy network filters must be linked to the test in order for the fuzzer to pick + # these up via the NamedNetworkFilterConfigFactory. + deps = [ + ":uber_writefilter_lib", + "//source/common/config:utility_lib", + "//source/extensions/filters/network/kafka:kafka_broker_config_lib", + "//source/extensions/filters/network/mongo_proxy:config", + "//source/extensions/filters/network/mysql_proxy:config", + "//source/extensions/filters/network/zookeeper_proxy:config", + "//test/config:utility_lib", + ], +) diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 new file mode 100644 index 000000000000..a20c58dd2d4a --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/kafka_broker_1 @@ -0,0 +1,110 @@ +config { + name: "envoy.filters.network.kafka_broker" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker" + value: "\n}\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 268435 + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "\312\312\312\312\312\312\312\312\312\312\312\312\315\312\312\312\312\312\312\312\312\312\312" + end_stream: true + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312\312" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "\n\002\315\265" + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "\020\000\000\000" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + data: "p" + } +} +actions { + on_write { + data: "-" + } +} +actions { + on_write { + data: "-" + end_stream: true + } +} +actions { + on_write { + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 new file mode 100644 index 000000000000..20a344f8fe35 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mongodb_proxy_1 @@ -0,0 +1,107 @@ +config { + name: "envoy.filters.network.mongo_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy" + value: "\n\001\\\032\007\"\003\010\200t*\000 \001" + } +} +actions { + on_write { + data: "]\000" + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "\004\000\001\000\000\000\000\000\000\001" + end_stream: true + } +} +actions { + on_write { + data: "<" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + } +} +actions { + on_write { + data: "pH\037\000 `\000\000" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} +actions { + advance_time { + milliseconds: 14848 + } +} +actions { + on_write { + data: "=" + end_stream: true + } +} +actions { + on_write { + data: "\004\000" + end_stream: true + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 new file mode 100644 index 000000000000..f58ad110b8b9 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/mysql_proxy_1 @@ -0,0 +1,86 @@ +config { + name: "envoy.filters.network.mysql_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy" + value: "\n\006#\336\215\302\246\001" + } +} +actions { + on_write { + data: "\031\031\031\031" + } +} +actions { + on_write { + data: "\031\031\031\031\031\031\031\031" + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "#" + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + data: "#" + end_stream: true + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "#" + } +} +actions { + on_write { + data: "\031\031\031\031\031\031\031\031" + end_stream: true + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} +actions { + on_write { + end_stream: true + } +} +actions { + on_write { + data: "3" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 new file mode 100644 index 000000000000..2e2e6c1bfb8d --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_1 @@ -0,0 +1,17 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\032\000" + } +} +actions { + on_write { + data: "\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030c.googlers.com\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030\030" + } +} +actions { + on_write { + } +} + diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite new file mode 100644 index 000000000000..ae270c6fe26c --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_corpus/zookeeper_proxy_assert_failure_onwrite @@ -0,0 +1,12 @@ +config { + name: "envoy.filters.network.zookeeper_proxy" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy" + value: "\nVtype.googleapis.com/envoy.extensions.filters.network.zookeeper_proxy.v3.ZooKeeperProxy\022\001!\032\006\010\377\376\377\317\017" + } +} +actions { + on_write { + data: "\030\030\030\030\030\030\030\030" + } +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto new file mode 100644 index 000000000000..77de32b5858f --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package test.extensions.filters.network; +import "validate/validate.proto"; +import "envoy/config/listener/v3/listener_components.proto"; + +message OnWrite { + bytes data = 1; + bool end_stream = 2; +} + +message AdvanceTime { + // Advance the system time by (0,24] hours. + uint32 milliseconds = 1 [(validate.rules).uint32 = {gt: 0 lt: 86400000}]; +} + +message WriteAction { + oneof action_selector { + option (validate.required) = true; + // Call onWrite() + OnWrite on_write = 2; + // Advance time_source_ + AdvanceTime advance_time = 3; + } +} + +message FilterFuzzTestCase { + // This is actually a protobuf type for the config of network filters. + envoy.config.listener.v3.Filter config = 1; + repeated WriteAction actions = 2; +} diff --git a/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc new file mode 100644 index 000000000000..702cb4078db4 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_writefilter_fuzz_test.cc @@ -0,0 +1,58 @@ +#include "common/config/utility.h" +#include "common/protobuf/utility.h" + +#include "extensions/filters/network/well_known_names.h" + +#include "test/config/utility.h" +#include "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" +#include "test/fuzz/fuzz_runner.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +DEFINE_PROTO_FUZZER(const test::extensions::filters::network::FilterFuzzTestCase& input) { + ABSL_ATTRIBUTE_UNUSED static PostProcessorRegistration reg = { + [](test::extensions::filters::network::FilterFuzzTestCase* input, unsigned int seed) { + // This post-processor mutation is applied only when libprotobuf-mutator + // calls mutate on an input, and *not* during fuzz target execution. + // Replaying a corpus through the fuzzer will not be affected by the + // post-processor mutation. + + // TODO(jianwendong): consider using a factory to store the names of all + // writeFilters. + static const auto filter_names = UberWriteFilterFuzzer::filterNames(); + static const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + // Choose a valid filter name. + if (std::find(filter_names.begin(), filter_names.end(), input->config().name()) == + std::end(filter_names)) { + absl::string_view filter_name = filter_names[seed % filter_names.size()]; + input->mutable_config()->set_name(std::string(filter_name)); + } + // Set the corresponding type_url for Any. + auto& factory = factories.at(input->config().name()); + input->mutable_config()->mutable_typed_config()->set_type_url( + absl::StrCat("type.googleapis.com/", + factory->createEmptyConfigProto()->GetDescriptor()->full_name())); + }}; + try { + TestUtility::validate(input); + // Check the filter's name in case some filters are not supported yet. + // TODO(jianwendong): remove this if block when we have a factory for writeFilters. + static const auto filter_names = UberWriteFilterFuzzer::filterNames(); + if (std::find(filter_names.begin(), filter_names.end(), input.config().name()) == + std::end(filter_names)) { + ENVOY_LOG_MISC(debug, "Test case with unsupported filter type: {}", input.config().name()); + return; + } + static UberWriteFilterFuzzer fuzzer; + fuzzer.fuzz(input.config(), input.actions()); + } catch (const ProtoValidationException& e) { + ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); + } +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy \ No newline at end of file diff --git a/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc new file mode 100644 index 000000000000..911caa250c52 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_per_writefilter.cc @@ -0,0 +1,35 @@ +#include "extensions/filters/network/common/utility.h" +#include "extensions/filters/network/well_known_names.h" + +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +std::vector UberWriteFilterFuzzer::filterNames() { + // These filters have already been covered by this fuzzer. + // Will extend to cover other network filters one by one. + static std::vector filter_names; + if (filter_names.empty()) { + const auto factories = Registry::FactoryRegistry< + Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); + const std::vector supported_filter_names = { + NetworkFilterNames::get().ZooKeeperProxy, NetworkFilterNames::get().KafkaBroker, + NetworkFilterNames::get().MongoProxy, NetworkFilterNames::get().MySQLProxy + // TODO(jianwendong) Add "NetworkFilterNames::get().Postgres" after it supports untrusted + // data. + }; + for (auto& filter_name : supported_filter_names) { + if (factories.contains(filter_name)) { + filter_names.push_back(filter_name); + } else { + ENVOY_LOG_MISC(debug, "Filter name not found in the factory: {}", filter_name); + } + } + } + return filter_names; +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc new file mode 100644 index 000000000000..517429a1dd4b --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -0,0 +1,123 @@ +#include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" + +#include "common/config/utility.h" +#include "common/config/version_converter.h" + +using testing::_; +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +void UberWriteFilterFuzzer::reset() { + // Reset the state of dependencies so that a new fuzz input starts in a clean state. + + // Close the connection to make sure the filter's callback is set to nullptr. + write_filter_callbacks_->connection_.raiseEvent(Network::ConnectionEvent::LocalClose); + // Clear the filter's raw pointer stored inside the connection_ and reset the connection_'s state. + write_filter_callbacks_->connection_.callbacks_.clear(); + write_filter_callbacks_->connection_.bytes_sent_callbacks_.clear(); + write_filter_callbacks_->connection_.state_ = Network::Connection::State::Open; + // Clear the pointers inside the mock_dispatcher + Event::MockDispatcher& mock_dispatcher = + dynamic_cast(write_filter_callbacks_->connection_.dispatcher_); + mock_dispatcher.clearDeferredDeleteList(); + write_filter_.reset(); +} + +void UberWriteFilterFuzzer::fuzzerSetup() { + // Setup process when this fuzzer object is constructed. + // For a static fuzzer, this will only be executed once. + + // Get the pointer of write_filter when the write_filter is being added to connection_. + write_filter_callbacks_ = std::make_shared>(); + read_filter_callbacks_ = std::make_shared>(); + ON_CALL(write_filter_callbacks_->connection_, addWriteFilter(_)) + .WillByDefault(Invoke([&](Network::WriteFilterSharedPtr write_filter) -> void { + write_filter->initializeWriteFilterCallbacks(*write_filter_callbacks_); + write_filter_ = write_filter; + })); + ON_CALL(write_filter_callbacks_->connection_, addFilter(_)) + .WillByDefault(Invoke([&](Network::FilterSharedPtr filter) -> void { + filter->initializeReadFilterCallbacks(*read_filter_callbacks_); + filter->initializeWriteFilterCallbacks(*write_filter_callbacks_); + write_filter_ = filter; + })); + factory_context_.prepareSimulatedSystemTime(); + + // Set featureEnabled for mongo_proxy + ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled("mongo.proxy_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("mongo.connection_logging_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, featureEnabled("mongo.logging_enabled", 100)) + .WillByDefault(Return(true)); + + // Set featureEnabled for thrift_proxy + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.thrift_filter_enabled", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.thrift_filter_enforcing", 100)) + .WillByDefault(Return(true)); + ON_CALL(factory_context_.runtime_loader_.snapshot_, + featureEnabled("ratelimit.test_key.thrift_filter_enabled", 100)) + .WillByDefault(Return(true)); +} + +UberWriteFilterFuzzer::UberWriteFilterFuzzer() + : time_source_(factory_context_.simulatedTimeSystem()) { + fuzzerSetup(); +} + +void UberWriteFilterFuzzer::fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions) { + try { + // Try to create the filter callback(cb_). Exit early if the config is invalid or violates PGV + // constraints. + const std::string& filter_name = proto_config.name(); + ENVOY_LOG_MISC(debug, "filter name {}", filter_name); + auto& factory = Config::Utility::getAndCheckFactoryByName< + Server::Configuration::NamedNetworkFilterConfigFactory>(filter_name); + ProtobufTypes::MessagePtr message = Config::Utility::translateToFactoryConfig( + proto_config, factory_context_.messageValidationVisitor(), factory); + ENVOY_LOG_MISC(debug, "Config content after decoded: {}", message->DebugString()); + cb_ = factory.createFilterFactoryFromProto(*message, factory_context_); + // Add filter to connection_. + cb_(write_filter_callbacks_->connection_); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(debug, "Controlled exception in filter setup {}", e.what()); + return; + } + for (const auto& action : actions) { + ENVOY_LOG_MISC(debug, "action {}", action.DebugString()); + switch (action.action_selector_case()) { + case test::extensions::filters::network::WriteAction::kOnWrite: { + ASSERT(write_filter_ != nullptr); + Buffer::OwnedImpl buffer(action.on_write().data()); + write_filter_->onWrite(buffer, action.on_write().end_stream()); + + break; + } + case test::extensions::filters::network::WriteAction::kAdvanceTime: { + time_source_.advanceTimeAsync( + std::chrono::milliseconds(action.advance_time().milliseconds())); + factory_context_.dispatcher().run(Event::Dispatcher::RunType::NonBlock); + break; + } + default: { + // Unhandled actions. + ENVOY_LOG_MISC(debug, "Action support is missing for:\n{}", action.DebugString()); + PANIC("A case is missing for an action"); + } + } + } + + reset(); +} + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.h b/test/extensions/filters/network/common/fuzz/uber_writefilter.h new file mode 100644 index 000000000000..9f6c34eb60e9 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.h @@ -0,0 +1,40 @@ +#include "envoy/network/filter.h" + +#include "common/protobuf/protobuf.h" + +#include "test/extensions/filters/network/common/fuzz/network_writefilter_fuzz.pb.validate.h" +#include "test/extensions/filters/network/common/fuzz/utils/fakes.h" +#include "test/mocks/network/mocks.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { + +class UberWriteFilterFuzzer { +public: + UberWriteFilterFuzzer(); + // This creates the filter config and runs the fuzzed data against the filter. + void fuzz( + const envoy::config::listener::v3::Filter& proto_config, + const Protobuf::RepeatedPtrField<::test::extensions::filters::network::WriteAction>& actions); + // Get the name of filters which has been covered by this fuzzer. + static std::vector filterNames(); + +protected: + // Set-up filter specific mock expectations in constructor. + void fuzzerSetup(); + // Reset the states of the mock objects. + void reset(); + +private: + Server::Configuration::FakeFactoryContext factory_context_; + Event::SimulatedTimeSystem& time_source_; + Network::WriteFilterSharedPtr write_filter_; + Network::FilterFactoryCb cb_; + std::shared_ptr> write_filter_callbacks_; + std::shared_ptr> read_filter_callbacks_; +}; + +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy From 3cb95f6ff88c7e9681f77fa12df386c3e10f6e7f Mon Sep 17 00:00:00 2001 From: Yifan Yang Date: Mon, 10 Aug 2020 17:23:52 -0400 Subject: [PATCH 900/909] Cleanup: cleaning up .first/.second [source/common/config] (#12468) This is the second PR to this #12354. This time is in the common/config submodule. Signed-off-by: Yifan Yang --- source/common/config/delta_subscription_state.cc | 8 ++++---- source/common/config/metadata.h | 7 ++++--- source/common/config/new_grpc_mux_impl.cc | 8 ++++---- source/common/config/type_to_endpoint.cc | 8 +++----- source/common/config/watch_map.cc | 13 ++++++------- 5 files changed, 21 insertions(+), 23 deletions(-) diff --git a/source/common/config/delta_subscription_state.cc b/source/common/config/delta_subscription_state.cc index 2763fdfd9dff..c0a6a5502cb0 100644 --- a/source/common/config/delta_subscription_state.cc +++ b/source/common/config/delta_subscription_state.cc @@ -124,16 +124,16 @@ DeltaSubscriptionState::getNextRequestAckless() { // initial_resource_versions "must be populated for first request in a stream". // Also, since this might be a new server, we must explicitly state *all* of our subscription // interest. - for (auto const& resource : resource_versions_) { + for (auto const& [resource_name, resource_version] : resource_versions_) { // Populate initial_resource_versions with the resource versions we currently have. // Resources we are interested in, but are still waiting to get any version of from the // server, do not belong in initial_resource_versions. (But do belong in new subscriptions!) - if (!resource.second.waitingForServer()) { - (*request.mutable_initial_resource_versions())[resource.first] = resource.second.version(); + if (!resource_version.waitingForServer()) { + (*request.mutable_initial_resource_versions())[resource_name] = resource_version.version(); } // As mentioned above, fill resource_names_subscribe with everything, including names we // have yet to receive any resource for. - names_added_.insert(resource.first); + names_added_.insert(resource_name); } names_removed_.clear(); } diff --git a/source/common/config/metadata.h b/source/common/config/metadata.h index ed4fcd96c270..efac4eff7e59 100644 --- a/source/common/config/metadata.h +++ b/source/common/config/metadata.h @@ -116,10 +116,11 @@ template class TypedMetadataImpl : public TypedMetadata */ void populateFrom(const envoy::config::core::v3::Metadata& metadata) { auto& data_by_key = metadata.filter_metadata(); - for (const auto& it : Registry::FactoryRegistry::factories()) { - const auto& meta_iter = data_by_key.find(it.first); + for (const auto& [factory_name, factory] : + Registry::FactoryRegistry::factories()) { + const auto& meta_iter = data_by_key.find(factory_name); if (meta_iter != data_by_key.end()) { - data_[it.second->name()] = it.second->parse(meta_iter->second); + data_[factory->name()] = factory->parse(meta_iter->second); } } } diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index c7caaf04f664..131ccd24db51 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -73,8 +73,8 @@ void NewGrpcMuxImpl::onDiscoveryResponse( } void NewGrpcMuxImpl::onStreamEstablished() { - for (auto& sub : subscriptions_) { - sub.second->sub_state_.markStreamFresh(); + for (auto& [type_url, subscription] : subscriptions_) { + subscription->sub_state_.markStreamFresh(); } trySendDiscoveryRequests(); } @@ -88,8 +88,8 @@ void NewGrpcMuxImpl::onEstablishmentFailure() { absl::flat_hash_map all_subscribed; absl::flat_hash_map already_called; do { - for (auto& sub : subscriptions_) { - all_subscribed[sub.first] = &sub.second->sub_state_; + for (auto& [type_url, subscription] : subscriptions_) { + all_subscribed[type_url] = &subscription->sub_state_; } for (auto& sub : all_subscribed) { if (already_called.insert(sub).second) { // insert succeeded ==> not already called diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 9821b288dcbc..1c32fe47ad2c 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -185,12 +185,11 @@ TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { }}, }}, }) { - for (const auto& registered_service : registered) { - const TypeUrl resource_type_url = getResourceTypeUrl(registered_service.first); + for (const auto& [registered_service_name, registered_service_info] : registered) { + const TypeUrl resource_type_url = getResourceTypeUrl(registered_service_name); VersionedService& service = (*type_url_to_versioned_service_map)[resource_type_url]; - for (const auto& versioned_service_name : registered_service.second.names_) { - const ServiceName& service_name = versioned_service_name.second; + for (const auto& [transport_api_version, service_name] : registered_service_info.names_) { const auto* service_desc = Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); @@ -200,7 +199,6 @@ TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { // services don't implement all, e.g. VHDS doesn't support SotW or REST. for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { const auto& method_desc = *service_desc->method(method_index); - const auto transport_api_version = versioned_service_name.first; if (absl::StartsWith(method_desc.name(), "Stream")) { service.sotw_grpc_.methods_[transport_api_version] = method_desc.full_name(); } else if (absl::StartsWith(method_desc.name(), "Delta")) { diff --git a/source/common/config/watch_map.cc b/source/common/config/watch_map.cc index f17d01decbc4..51e73e06344d 100644 --- a/source/common/config/watch_map.cc +++ b/source/common/config/watch_map.cc @@ -181,28 +181,27 @@ void WatchMap::onConfigUpdate( } // We just bundled up the updates into nice per-watch packages. Now, deliver them. - for (const auto& added : per_watch_added) { - const Watch* cur_watch = added.first; + for (const auto& [cur_watch, resource_to_add] : per_watch_added) { if (deferred_removed_during_update_->count(cur_watch) > 0) { continue; } const auto removed = per_watch_removed.find(cur_watch); if (removed == per_watch_removed.end()) { // additions only, no removals - cur_watch->callbacks_.onConfigUpdate(added.second, {}, system_version_info); + cur_watch->callbacks_.onConfigUpdate(resource_to_add, {}, system_version_info); } else { // both additions and removals - cur_watch->callbacks_.onConfigUpdate(added.second, removed->second, system_version_info); + cur_watch->callbacks_.onConfigUpdate(resource_to_add, removed->second, system_version_info); // Drop the removals now, so the final removals-only pass won't use them. per_watch_removed.erase(removed); } } // Any removals-only updates will not have been picked up in the per_watch_added loop. - for (auto& removed : per_watch_removed) { - if (deferred_removed_during_update_->count(removed.first) > 0) { + for (auto& [cur_watch, resource_to_remove] : per_watch_removed) { + if (deferred_removed_during_update_->count(cur_watch) > 0) { continue; } - removed.first->callbacks_.onConfigUpdate({}, removed.second, system_version_info); + cur_watch->callbacks_.onConfigUpdate({}, resource_to_remove, system_version_info); } } From 5200978ba69488fde6930d6059009dacc56c1772 Mon Sep 17 00:00:00 2001 From: justin-mp Date: Mon, 10 Aug 2020 17:24:38 -0400 Subject: [PATCH 901/909] Add const qualifier to returned pointers in MockIp and MockResolvedAddress (#12573) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Compared to their superclasses, the MockIp and MockResolvedAddress classes dropp the const qualifier from the pointers they return. Since the non-mock class implementing these methods return const pointers, it’s difficult to use non-mock classes with these mocks without resorting to hacks like const_cast. Signed-off-by: Justin Mazzola Paluska --- test/mocks/network/mocks.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 596afe6ffbea..45371be5d584 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -413,8 +413,8 @@ class MockIp : public Address::Ip { MOCK_METHOD(const std::string&, addressAsString, (), (const)); MOCK_METHOD(bool, isAnyAddress, (), (const)); MOCK_METHOD(bool, isUnicastAddress, (), (const)); - MOCK_METHOD(Address::Ipv4*, ipv4, (), (const)); - MOCK_METHOD(Address::Ipv6*, ipv6, (), (const)); + MOCK_METHOD(const Address::Ipv4*, ipv4, (), (const)); + MOCK_METHOD(const Address::Ipv6*, ipv6, (), (const)); MOCK_METHOD(uint32_t, port, (), (const)); MOCK_METHOD(Address::IpVersion, version, (), (const)); MOCK_METHOD(bool, v6only, (), (const)); @@ -432,11 +432,11 @@ class MockResolvedAddress : public Address::Instance { MOCK_METHOD(Api::SysCallIntResult, bind, (os_fd_t), (const)); MOCK_METHOD(Api::SysCallIntResult, connect, (os_fd_t), (const)); - MOCK_METHOD(Address::Ip*, ip, (), (const)); - MOCK_METHOD(Address::Pipe*, pipe, (), (const)); + MOCK_METHOD(const Address::Ip*, ip, (), (const)); + MOCK_METHOD(const Address::Pipe*, pipe, (), (const)); MOCK_METHOD(IoHandlePtr, socket, (Socket::Type), (const)); MOCK_METHOD(Address::Type, type, (), (const)); - MOCK_METHOD(sockaddr*, sockAddr, (), (const)); + MOCK_METHOD(const sockaddr*, sockAddr, (), (const)); MOCK_METHOD(socklen_t, sockAddrLen, (), (const)); const std::string& asString() const override { return physical_; } From cce07c0396190915ab3d72a9e0a6d1c4ba5aa1a2 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 10 Aug 2020 16:28:30 -0700 Subject: [PATCH 902/909] abseil: update to latest (#12561) Pulls in TSAN mutex fixes as well as works around false detection. Signed-off-by: Matt Klein --- .bazelrc | 3 +++ bazel/repository_locations.bzl | 8 ++++---- test/integration/fake_upstream.cc | 2 +- test/integration/fake_upstream.h | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.bazelrc b/.bazelrc index 1cb3caaa6a9f..fa8d80a0242d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -89,6 +89,9 @@ build:clang-tsan --build_tag_filters=-no_san,-no_tsan build:clang-tsan --test_tag_filters=-no_san,-no_tsan # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE +# https://github.com/abseil/abseil-cpp/issues/760 +# https://github.com/google/sanitizers/issues/953 +build:clang-tsan --test_env="TSAN_OPTIONS=report_atomic_races=0" # Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without # our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 73698991a2f4..11cf908a21c0 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -95,10 +95,10 @@ DEPENDENCY_REPOSITORIES = dict( cpe = "N/A", ), com_google_absl = dict( - sha256 = "ec8ef47335310cc3382bdc0d0cc1097a001e67dc83fcba807845aa5696e7e1e4", - strip_prefix = "abseil-cpp-302b250e1d917ede77b5ff00a6fd9f28430f1563", - # 2020-07-13 - urls = ["https://github.com/abseil/abseil-cpp/archive/302b250e1d917ede77b5ff00a6fd9f28430f1563.tar.gz"], + sha256 = "573baccd67aa591b8c7209bfb0c77e0d15633d77ced39d1ccbb1232828f7f7d9", + strip_prefix = "abseil-cpp-ce4bc927755fdf0ed03d679d9c7fa041175bb3cb", + # 2020-08-08 + urls = ["https://github.com/abseil/abseil-cpp/archive/ce4bc927755fdf0ed03d679d9c7fa041175bb3cb.tar.gz"], use_category = ["dataplane", "controlplane"], cpe = "N/A", ), diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 476922bd8a68..b20ff0318398 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -709,7 +709,7 @@ FakeRawConnection::waitForData(const std::function& da AssertionResult FakeRawConnection::write(const std::string& data, bool end_stream, milliseconds timeout) { return shared_connection_.executeOnDispatcher( - [&data, end_stream](Network::Connection& connection) { + [data, end_stream](Network::Connection& connection) { Buffer::OwnedImpl to_write(data); connection.write(to_write, end_stream); }, diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 46287d7a5191..0c23c42e4b41 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -300,7 +300,7 @@ class SharedConnectionWrapper : public Network::ConnectionCallbacks { return testing::AssertionSuccess(); } Thread::CondVar callback_ready_event; - bool unexpected_disconnect = false; + std::atomic unexpected_disconnect = false; connection_.dispatcher().post( [this, f, &callback_ready_event, &unexpected_disconnect]() -> void { // The use of connected() here, vs. !disconnected_, is because we want to use the lock_ From 04de1cfb2bb7a774dd38772c7e262af3e61727b3 Mon Sep 17 00:00:00 2001 From: Joshua Marantz Date: Mon, 10 Aug 2020 20:01:24 -0400 Subject: [PATCH 903/909] stats: keep a set of ref-counted parent histograms in ThreadLocalStore so that two with the same name map to the same histogram object. (#12275) Commit Message: Creates a storage model for thread-local histograms enabling continuity of data across scope reloads. Previously, whenever a Scope was re-created, the counters, gauges, and text-readouts of the same names would retain their previous values. However, fresh histograms were created on every scope reload, and stats dumps would include duplicate histograms with the same name. This change adds an analogous name-based set of histograms, held in ThreadLocalStore, so that we have a single histogram representing all its generations of data. This is somewhat more complex than for the other stats, since there were thread-local buffers, which previously were owned by TlsScope and needed to be made independent. So this introduces a new tls histogram map in the tls-cache to maintain this. This should help unblock #12241. Additional Description: Risk Level: high (not clear whether this is enough testing of histogram usage) Testing: //test/... Docs Changes: n/a Release Notes: n/a Fixes: #3098 Signed-off-by: Joshua Marantz --- source/common/stats/allocator_impl.h | 19 -- source/common/stats/metric_impl.h | 25 ++ source/common/stats/thread_local_store.cc | 188 +++++++++--- source/common/stats/thread_local_store.h | 83 ++--- source/docs/stats.md | 14 +- source/server/admin/stats_handler.cc | 8 +- test/common/stats/thread_local_store_test.cc | 306 ++++++++++++++++--- test/integration/stats_integration_test.cc | 8 +- 8 files changed, 504 insertions(+), 147 deletions(-) diff --git a/source/common/stats/allocator_impl.h b/source/common/stats/allocator_impl.h index 3242d0de5fef..469484866f18 100644 --- a/source/common/stats/allocator_impl.h +++ b/source/common/stats/allocator_impl.h @@ -58,29 +58,10 @@ class AllocatorImpl : public Allocator { friend class TextReadoutImpl; friend class NotifyingAllocatorImpl; - struct HeapStatHash { - using is_transparent = void; // NOLINT(readability-identifier-naming) - size_t operator()(const Metric* a) const { return a->statName().hash(); } - size_t operator()(StatName a) const { return a.hash(); } - }; - - struct HeapStatCompare { - using is_transparent = void; // NOLINT(readability-identifier-naming) - bool operator()(const Metric* a, const Metric* b) const { - return a->statName() == b->statName(); - } - bool operator()(const Metric* a, StatName b) const { return a->statName() == b; } - }; - void removeCounterFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void removeGaugeFromSetLockHeld(Gauge* gauge) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void removeTextReadoutFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // An unordered set of HeapStatData pointers which keys off the key() - // field in each object. This necessitates a custom comparator and hasher, which key off of the - // StatNamePtr's own StatNamePtrHash and StatNamePtrCompare operators. - template - using StatSet = absl::flat_hash_set; StatSet counters_ ABSL_GUARDED_BY(mutex_); StatSet gauges_ ABSL_GUARDED_BY(mutex_); StatSet text_readouts_ ABSL_GUARDED_BY(mutex_); diff --git a/source/common/stats/metric_impl.h b/source/common/stats/metric_impl.h index c923395b992d..52b577230fd3 100644 --- a/source/common/stats/metric_impl.h +++ b/source/common/stats/metric_impl.h @@ -32,10 +32,35 @@ class MetricHelper { void iterateTagStatNames(const Metric::TagStatNameIterFn& fn) const; void clear(SymbolTable& symbol_table) { stat_names_.clear(symbol_table); } + // Hasher for metrics. + struct Hash { + using is_transparent = void; // NOLINT(readability-identifier-naming) + size_t operator()(const Metric* a) const { return a->statName().hash(); } + size_t operator()(StatName a) const { return a.hash(); } + }; + + // Comparator for metrics. + struct Compare { + using is_transparent = void; // NOLINT(readability-identifier-naming) + bool operator()(const Metric* a, const Metric* b) const { + return a->statName() == b->statName(); + } + bool operator()(const Metric* a, StatName b) const { return a->statName() == b; } + }; + private: StatNameList stat_names_; }; +// An unordered set of stat pointers. which keys off Metric::statName(). +// This necessitates a custom comparator and hasher, using the StatNamePtr's +// own StatNamePtrHash and StatNamePtrCompare operators. +// +// This is used by AllocatorImpl for counters, gauges, and text-readouts, and +// is also used by thread_local_store.h for histograms. +template +using StatSet = absl::flat_hash_set; + /** * Partial implementation of the Metric interface on behalf of Counters, Gauges, * and Histograms. It leaves symbolTable() unimplemented so that implementations diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 697760ed1a4b..54d0c78eba9b 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -63,12 +63,22 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { // in the default_scope. There should be no requests, so there will // be no copies in TLS caches. Thread::LockGuard lock(lock_); + const uint32_t first_histogram_index = deleted_histograms_.size(); for (ScopeImpl* scope : scopes_) { removeRejectedStats(scope->central_cache_->counters_, deleted_counters_); removeRejectedStats(scope->central_cache_->gauges_, deleted_gauges_); removeRejectedStats(scope->central_cache_->histograms_, deleted_histograms_); removeRejectedStats(scope->central_cache_->text_readouts_, deleted_text_readouts_); } + + // Remove any newly rejected histograms from histogram_set_. + { + Thread::LockGuard hist_lock(hist_mutex_); + for (uint32_t i = first_histogram_index; i < deleted_histograms_.size(); ++i) { + uint32_t erased = histogram_set_.erase(deleted_histograms_[i].get()); + ASSERT(erased == 1); + } + } } template @@ -160,16 +170,11 @@ std::vector ThreadLocalStoreImpl::textReadouts() const { std::vector ThreadLocalStoreImpl::histograms() const { std::vector ret; - Thread::LockGuard lock(lock_); - // TODO(ramaraochavali): As histograms don't share storage, there is a chance of duplicate names - // here. We need to create global storage for histograms similar to how we have a central storage - // in shared memory for counters/gauges. In the interim, no de-dup is done here. This may result - // in histograms with duplicate names, but until shared storage is implemented it's ultimately - // less confusing for users who have such configs. - for (ScopeImpl* scope : scopes_) { - for (const auto& name_histogram_pair : scope->central_cache_->histograms_) { - const ParentHistogramSharedPtr& parent_hist = name_histogram_pair.second; - ret.push_back(parent_hist); + Thread::LockGuard lock(hist_mutex_); + { + ret.reserve(histogram_set_.size()); + for (const auto& histogram_ptr : histogram_set_) { + ret.emplace_back(histogram_ptr); } } @@ -189,6 +194,11 @@ void ThreadLocalStoreImpl::initializeThreading(Event::Dispatcher& main_thread_di void ThreadLocalStoreImpl::shutdownThreading() { // This will block both future cache fills as well as cache flushes. shutting_down_ = true; + Thread::LockGuard lock(hist_mutex_); + for (ParentHistogramImpl* histogram : histogram_set_) { + histogram->setShuttingDown(true); + } + histogram_set_.clear(); } void ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) { @@ -197,12 +207,9 @@ void ThreadLocalStoreImpl::mergeHistograms(PostMergeCb merge_complete_cb) { merge_in_progress_ = true; tls_->runOnAllThreads( [this]() -> void { - for (const auto& scope : tls_->getTyped().scope_cache_) { - const TlsCacheEntry& tls_cache_entry = scope.second; - for (const auto& name_histogram_pair : tls_cache_entry.histograms_) { - const TlsHistogramSharedPtr& tls_hist = name_histogram_pair.second; - tls_hist->beginMerge(); - } + for (const auto& id_hist : tls_->getTyped().tls_histogram_cache_) { + const TlsHistogramSharedPtr& tls_hist = id_hist.second; + tls_hist->beginMerge(); } }, [this, merge_complete_cb]() -> void { mergeInternal(merge_complete_cb); }); @@ -257,6 +264,10 @@ void ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) { if (!shutting_down_ && main_thread_dispatcher_) { const uint64_t scope_id = scope->scope_id_; lock.release(); + + // TODO(jmarantz): consider batching all the scope IDs that should be + // cleared from TLS caches to reduce bursts of runOnAllThreads on a large + // config update. See the pattern below used for histograms. main_thread_dispatcher_->post([this, central_cache, scope_id]() { sync_.syncPoint(MainDispatcherCleanupSync); clearScopeFromCaches(scope_id, central_cache); @@ -264,12 +275,27 @@ void ThreadLocalStoreImpl::releaseScopeCrossThread(ScopeImpl* scope) { } } +void ThreadLocalStoreImpl::releaseHistogramCrossThread(uint64_t histogram_id) { + // This can happen from any thread. We post() back to the main thread which will initiate the + // cache flush operation. + if (!shutting_down_ && main_thread_dispatcher_) { + main_thread_dispatcher_->post( + [this, histogram_id]() { clearHistogramFromCaches(histogram_id); }); + } +} + ThreadLocalStoreImpl::TlsCacheEntry& ThreadLocalStoreImpl::TlsCache::insertScope(uint64_t scope_id) { return scope_cache_[scope_id]; } void ThreadLocalStoreImpl::TlsCache::eraseScope(uint64_t scope_id) { scope_cache_.erase(scope_id); } +void ThreadLocalStoreImpl::TlsCache::eraseHistogram(uint64_t histogram_id) { + // This is called for every histogram in every thread, even though the + // histogram may not have been cached in each thread yet. So we don't + // want to check whether the erase() call erased anything. + tls_histogram_cache_.erase(histogram_id); +} void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache) { @@ -283,6 +309,22 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id, } } +void ThreadLocalStoreImpl::clearHistogramFromCaches(uint64_t histogram_id) { + // If we are shutting down we no longer perform cache flushes as workers may be shutting down + // at the same time. + if (!shutting_down_) { + // Perform a cache flush on all threads. + // + // TODO(jmarantz): If this cross-thread posting proves to be a performance + // bottleneck, + // https://gist.github.com/jmarantz/838cb6de7e74c0970ea6b63eded0139a + // contains a patch that will implement batching together to clear multiple + // histograms. + tls_->runOnAllThreads( + [this, histogram_id]() { tls_->getTyped().eraseHistogram(histogram_id); }); + } +} + ThreadLocalStoreImpl::ScopeImpl::ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix) : scope_id_(parent.next_scope_id_++), parent_(parent), prefix_(Utility::sanitizeStatsName(prefix), parent.alloc_.symbolTable()), @@ -566,9 +608,23 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( [&buckets, this](absl::string_view stat_name) { buckets = &parent_.histogram_settings_->buckets(stat_name); }); - RefcountPtr stat(new ParentHistogramImpl( - final_stat_name, unit, parent_, *this, tag_helper.tagExtractedName(), - tag_helper.statNameTags(), *buckets)); + + RefcountPtr stat; + { + Thread::LockGuard lock(parent_.hist_mutex_); + auto iter = parent_.histogram_set_.find(final_stat_name); + if (iter != parent_.histogram_set_.end()) { + stat = RefcountPtr(*iter); + } else { + stat = new ParentHistogramImpl(final_stat_name, unit, parent_, + tag_helper.tagExtractedName(), tag_helper.statNameTags(), + *buckets, parent_.next_histogram_id_++); + if (!parent_.shutting_down_) { + parent_.histogram_set_.insert(stat.get()); + } + } + } + central_ref = ¢ral_cache_->histograms_[stat->statName()]; *central_ref = stat; } @@ -639,34 +695,34 @@ TextReadoutOptConstRef ThreadLocalStoreImpl::ScopeImpl::findTextReadout(StatName return findStatLockHeld(name, central_cache_->text_readouts_); } -Histogram& ThreadLocalStoreImpl::ScopeImpl::tlsHistogram(StatName name, - ParentHistogramImpl& parent) { +Histogram& ThreadLocalStoreImpl::tlsHistogram(ParentHistogramImpl& parent, uint64_t id) { // tlsHistogram() is generally not called for a histogram that is rejected by // the matcher, so no further rejection-checking is needed at this level. // TlsHistogram inherits its reject/accept status from ParentHistogram. // See comments in counterFromStatName() which explains the logic here. - StatNameHashMap* tls_cache = nullptr; - if (!parent_.shutting_down_ && parent_.tls_) { - tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].histograms_; - auto iter = tls_cache->find(name); - if (iter != tls_cache->end()) { - return *iter->second; + TlsHistogramSharedPtr* tls_histogram = nullptr; + if (!shutting_down_ && tls_ != nullptr) { + TlsCache& tls_cache = tls_->getTyped(); + tls_histogram = &tls_cache.tls_histogram_cache_[id]; + if (*tls_histogram != nullptr) { + return **tls_histogram; } } - StatNameTagHelper tag_helper(parent_, name, absl::nullopt); + StatNameTagHelper tag_helper(*this, parent.statName(), absl::nullopt); TlsHistogramSharedPtr hist_tls_ptr( - new ThreadLocalHistogramImpl(name, parent.unit(), tag_helper.tagExtractedName(), + new ThreadLocalHistogramImpl(parent.statName(), parent.unit(), tag_helper.tagExtractedName(), tag_helper.statNameTags(), symbolTable())); parent.addTlsHistogram(hist_tls_ptr); - if (tls_cache) { - tls_cache->insert(std::make_pair(hist_tls_ptr->statName(), hist_tls_ptr)); + if (tls_histogram != nullptr) { + *tls_histogram = hist_tls_ptr; } + return *hist_tls_ptr; } @@ -682,7 +738,7 @@ ThreadLocalHistogramImpl::ThreadLocalHistogramImpl(StatName name, Histogram::Uni } ThreadLocalHistogramImpl::~ThreadLocalHistogramImpl() { - MetricImpl::clear(symbolTable()); + MetricImpl::clear(symbol_table_); hist_free(histograms_[0]); hist_free(histograms_[1]); } @@ -699,28 +755,78 @@ void ThreadLocalHistogramImpl::merge(histogram_t* target) { hist_clear(*other_histogram); } -ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, - TlsScope& tls_scope, StatName tag_extracted_name, +ParentHistogramImpl::ParentHistogramImpl(StatName name, Histogram::Unit unit, + ThreadLocalStoreImpl& thread_local_store, + StatName tag_extracted_name, const StatNameTagVector& stat_name_tags, - ConstSupportedBuckets& supported_buckets) - : MetricImpl(name, tag_extracted_name, stat_name_tags, parent.symbolTable()), unit_(unit), - parent_(parent), tls_scope_(tls_scope), interval_histogram_(hist_alloc()), + ConstSupportedBuckets& supported_buckets, uint64_t id) + : MetricImpl(name, tag_extracted_name, stat_name_tags, thread_local_store.symbolTable()), + unit_(unit), thread_local_store_(thread_local_store), interval_histogram_(hist_alloc()), cumulative_histogram_(hist_alloc()), interval_statistics_(interval_histogram_, supported_buckets), - cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false) {} + cumulative_statistics_(cumulative_histogram_, supported_buckets), merged_(false), id_(id) {} ParentHistogramImpl::~ParentHistogramImpl() { - MetricImpl::clear(parent_.symbolTable()); + thread_local_store_.releaseHistogramCrossThread(id_); + ASSERT(ref_count_ == 0); + MetricImpl::clear(thread_local_store_.symbolTable()); hist_free(interval_histogram_); hist_free(cumulative_histogram_); } +void ParentHistogramImpl::incRefCount() { ++ref_count_; } + +bool ParentHistogramImpl::decRefCount() { + bool ret; + if (shutting_down_) { + // When shutting down, we cannot reference thread_local_store_, as + // histograms can outlive the store. So we decrement the ref-count without + // the stores' lock. We will not be removing the object from the store's + // histogram map in this scenario, as the set was cleared during shutdown, + // and will not be repopulated in histogramFromStatNameWithTags after + // initiating shutdown. + ret = --ref_count_ == 0; + } else { + // We delegate to the Store object to decrement the ref-count so it can hold + // the lock to the map. If we don't hold a lock, another thread may + // simultaneously try to allocate the same name'd histogram after we + // decrement it, and we'll wind up with a dtor/update race. To avoid this we + // must hold the lock until the histogram is removed from the map. + // + // See also StatsSharedImpl::decRefCount() in allocator_impl.cc, which has + // the same issue. + ret = thread_local_store_.decHistogramRefCount(*this, ref_count_); + } + return ret; +} + +bool ThreadLocalStoreImpl::decHistogramRefCount(ParentHistogramImpl& hist, + std::atomic& ref_count) { + // We must hold the store's histogram lock when decrementing the + // refcount. Otherwise another thread may simultaneously try to allocate the + // same name'd stat after we decrement it, and we'll wind up with a + // dtor/update race. To avoid this we must hold the lock until the stat is + // removed from the map. + Thread::LockGuard lock(hist_mutex_); + ASSERT(ref_count >= 1); + if (--ref_count == 0) { + if (!shutting_down_) { + const size_t count = histogram_set_.erase(hist.statName()); + ASSERT(shutting_down_ || count == 1); + } + return true; + } + return false; +} + +SymbolTable& ParentHistogramImpl::symbolTable() { return thread_local_store_.symbolTable(); } + Histogram::Unit ParentHistogramImpl::unit() const { return unit_; } void ParentHistogramImpl::recordValue(uint64_t value) { - Histogram& tls_histogram = tls_scope_.tlsHistogram(statName(), *this); + Histogram& tls_histogram = thread_local_store_.tlsHistogram(*this, id_); tls_histogram.recordValue(value); - parent_.deliverHistogramToSinks(*this, value); + thread_local_store_.deliverHistogramToSinks(*this, value); } bool ParentHistogramImpl::used() const { diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index c86844a2d38c..22d72bfaa9e0 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -74,16 +74,16 @@ class ThreadLocalHistogramImpl : public HistogramImplHelper { using TlsHistogramSharedPtr = RefcountPtr; -class TlsScope; +class ThreadLocalStoreImpl; /** * Log Linear Histogram implementation that is stored in the main thread. */ class ParentHistogramImpl : public MetricImpl { public: - ParentHistogramImpl(StatName name, Histogram::Unit unit, Store& parent, TlsScope& tls_scope, + ParentHistogramImpl(StatName name, Histogram::Unit unit, ThreadLocalStoreImpl& parent, StatName tag_extracted_name, const StatNameTagVector& stat_name_tags, - ConstSupportedBuckets& supported_buckets); + ConstSupportedBuckets& supported_buckets, uint64_t id); ~ParentHistogramImpl() override; void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr); @@ -108,20 +108,23 @@ class ParentHistogramImpl : public MetricImpl { const std::string bucketSummary() const override; // Stats::Metric - SymbolTable& symbolTable() override { return parent_.symbolTable(); } + SymbolTable& symbolTable() override; bool used() const override; // RefcountInterface - void incRefCount() override { refcount_helper_.incRefCount(); } - bool decRefCount() override { return refcount_helper_.decRefCount(); } - uint32_t use_count() const override { return refcount_helper_.use_count(); } + void incRefCount() override; + bool decRefCount() override; + uint32_t use_count() const override { return ref_count_; } + + // Indicates that the ThreadLocalStore is shutting down, so no need to clear its histogram_set_. + void setShuttingDown(bool shutting_down) { shutting_down_ = shutting_down; } + bool shuttingDown() const { return shutting_down_; } private: bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_); Histogram::Unit unit_; - Store& parent_; - TlsScope& tls_scope_; + ThreadLocalStoreImpl& thread_local_store_; histogram_t* interval_histogram_; histogram_t* cumulative_histogram_; HistogramStatisticsImpl interval_statistics_; @@ -129,27 +132,13 @@ class ParentHistogramImpl : public MetricImpl { mutable Thread::MutexBasicLockable merge_lock_; std::list tls_histograms_ ABSL_GUARDED_BY(merge_lock_); bool merged_; - RefcountHelper refcount_helper_; + std::atomic shutting_down_{false}; + std::atomic ref_count_{0}; + const uint64_t id_; // Index into TlsCache::histogram_cache_. }; using ParentHistogramImplSharedPtr = RefcountPtr; -/** - * Class used to create ThreadLocalHistogram in the scope. - */ -class TlsScope : public Scope { -public: - ~TlsScope() override = default; - - // TODO(ramaraochavali): Allow direct TLS access for the advanced consumers. - /** - * @return a ThreadLocalHistogram within the scope's namespace. - * @param name name of the histogram with scope prefix attached. - * @param parent the parent histogram. - */ - virtual Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) PURE; -}; - /** * Store implementation with thread local caching. For design details see * https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md @@ -266,6 +255,8 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo void shutdownThreading() override; void mergeHistograms(PostMergeCb merge_cb) override; + Histogram& tlsHistogram(ParentHistogramImpl& parent, uint64_t id); + /** * @return a thread synchronizer object used for controlling thread behavior in tests. */ @@ -276,7 +267,12 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo */ const StatNameSet& wellKnownTags() const { return *well_known_tags_; } + bool decHistogramRefCount(ParentHistogramImpl& histogram, std::atomic& ref_count); + void releaseHistogramCrossThread(uint64_t histogram_id); + private: + friend class ThreadLocalStoreTestingPeer; + template using StatRefMap = StatNameHashMap>; struct TlsCacheEntry { @@ -288,9 +284,18 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo StatRefMap gauges_; StatRefMap text_readouts_; - // The histogram objects are not shared with the central cache, and don't - // require taking a lock when decrementing their ref-count. - StatNameHashMap histograms_; + // Histograms also require holding a mutex while decrementing reference + // counts. The only difference from other stats is that the histogram_set_ + // lives in the ThreadLocalStore object, rather than in + // AllocatorImpl. Histograms are removed from that set when all scopes + // referencing the histogram are dropped. Each ParentHistogram has a unique + // index, which is not re-used during the process lifetime. + // + // There is also a tls_histogram_cache_ in the TlsCache object, which is + // not tied to a scope. It maps from parent histogram's unique index to + // a TlsHistogram. This enables continuity between same-named histograms + // in same-named scopes. That scenario is common when re-creating scopes in + // response to xDS. StatNameHashMap parent_histograms_; // We keep a TLS cache of rejected stat names. This costs memory, but @@ -315,7 +320,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo }; using CentralCacheEntrySharedPtr = RefcountPtr; - struct ScopeImpl : public TlsScope { + struct ScopeImpl : public Scope { ScopeImpl(ThreadLocalStoreImpl& parent, const std::string& prefix); ~ScopeImpl() override; @@ -328,7 +333,6 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo Histogram& histogramFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags, Histogram::Unit unit) override; - Histogram& tlsHistogram(StatName name, ParentHistogramImpl& parent) override; TextReadout& textReadoutFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags) override; ScopePtr createScope(const std::string& name) override { @@ -437,6 +441,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo struct TlsCache : public ThreadLocal::ThreadLocalObject { TlsCacheEntry& insertScope(uint64_t scope_id); void eraseScope(uint64_t scope_id); + void eraseHistogram(uint64_t histogram); // The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references // during scope destruction. An ID is required vs. using the address of the scope pointer @@ -446,6 +451,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // store. See the overview for more information. This complexity is required for lockless // operation in the fast path. absl::flat_hash_map scope_cache_; + + // Maps from histogram ID (monotonically increasing) to a TLS histogram. + absl::flat_hash_map tls_histogram_cache_; }; template bool iterHelper(StatFn fn) const { @@ -460,6 +468,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::string getTagsForName(const std::string& name, TagVector& tags) const; void clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache); + void clearHistogramFromCaches(uint64_t histogram_id); void releaseScopeCrossThread(ScopeImpl* scope); void mergeInternal(PostMergeCb merge_cb); bool rejects(StatName name) const; @@ -497,15 +506,19 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // It seems like it would be better to have each client that expects a stat // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter& // but that would be fairly complex to change. - std::vector deleted_counters_; - std::vector deleted_gauges_; - std::vector deleted_histograms_; - std::vector deleted_text_readouts_; + std::vector deleted_counters_ ABSL_GUARDED_BY(lock_); + std::vector deleted_gauges_ ABSL_GUARDED_BY(lock_); + std::vector deleted_histograms_ ABSL_GUARDED_BY(lock_); + std::vector deleted_text_readouts_ ABSL_GUARDED_BY(lock_); Thread::ThreadSynchronizer sync_; std::atomic next_scope_id_{}; + uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0; StatNameSetPtr well_known_tags_; + + mutable Thread::MutexBasicLockable hist_mutex_; + StatSet histogram_set_ ABSL_GUARDED_BY(hist_mutex_); }; using ThreadLocalStoreImplPtr = std::unique_ptr; diff --git a/source/docs/stats.md b/source/docs/stats.md index 43be6992146c..f80d1b46932f 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -16,7 +16,7 @@ values, they are passed from parent to child in an RPC protocol. They were previously held in shared memory, which imposed various restrictions. Unlike the shared memory implementation, the RPC passing *requires a mode-bit specified when constructing gauges indicating whether it should be accumulated across hot-restarts*. - + ## Performance and Thread Local Storage A key tenant of the Envoy architecture is high performance on machines with @@ -77,6 +77,18 @@ followed. accumulates in to *interval* histograms. * Finally the main *interval* histogram is merged to *cumulative* histogram. +`ParentHistogram`s are held weakly a set in ThreadLocalStore. Like other stats, +they keep an embedded reference count and are removed from the set and destroyed +when the last strong reference disappears. Consequently, we must hold a lock for +the set when decrementing histogram reference counts. A similar process occurs for +other types of stats, but in those cases it is taken care of in `AllocatorImpl`. +There are strong references to `ParentHistograms` in TlsCacheEntry::parent_histograms_. + +Thread-local `TlsHistogram`s are created on behalf of a `ParentHistogram` +whenever accessed from a worker thread. They are strongly referenced in the +`ParentHistogram` as well as in a cache in the `ThreadLocalStore`, to help +maintain data continuity as scopes are re-created during operation. + ## Stat naming infrastructure and memory consumption Stat names are replicated in several places in various forms. diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index 753774f59dc9..e64fd878a8fb 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -121,13 +121,11 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, for (const auto& stat : all_stats) { response.add(fmt::format("{}: {}\n", stat.first, stat.second)); } - // TODO(ramaraochavali): See the comment in ThreadLocalStoreImpl::histograms() for why we use a - // multimap here. This makes sure that duplicate histograms get output. When shared storage is - // implemented this can be switched back to a normal map. - std::multimap all_histograms; + std::map all_histograms; for (const Stats::ParentHistogramSharedPtr& histogram : server_.stats().histograms()) { if (shouldShowMetric(*histogram, used_only, regex)) { - all_histograms.emplace(histogram->name(), histogram->quantileSummary()); + auto insert = all_histograms.emplace(histogram->name(), histogram->quantileSummary()); + ASSERT(insert.second); // No duplicates expected. } } for (const auto& histogram : all_histograms) { diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 726f32174ae8..135c6b424097 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -29,6 +29,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::HasSubstr; using testing::InSequence; using testing::NiceMock; using testing::Ref; @@ -39,6 +40,30 @@ namespace Stats { const uint64_t MaxStatNameLength = 127; +class ThreadLocalStoreTestingPeer { +public: + // Calculates the number of TLS histograms across all threads. This requires + // dispatching to all threads and blocking on their completion, and is exposed + // as a testing peer to enable tests that ensure that TLS histograms don't + // leak. + // + // Note that this must be called from the "main thread", which has different + // implications for unit tests that use real threads vs mocks. The easiest way + // to capture this in a general purpose helper is to use a callback to convey + // the resultant sum. + static void numTlsHistograms(ThreadLocalStoreImpl& thread_local_store_impl, + const std::function& num_tls_hist_cb) { + auto num_tls_histograms = std::make_shared>(0); + thread_local_store_impl.tls_->runOnAllThreads( + [&thread_local_store_impl, num_tls_histograms]() { + auto& tls_cache = + thread_local_store_impl.tls_->getTyped(); + *num_tls_histograms += tls_cache.tls_histogram_cache_.size(); + }, + [num_tls_hist_cb, num_tls_histograms]() { num_tls_hist_cb(*num_tls_histograms); }); + } +}; + class StatsThreadLocalStoreTest : public testing::Test { public: StatsThreadLocalStoreTest() @@ -52,6 +77,21 @@ class StatsThreadLocalStoreTest : public testing::Test { store_->addSink(sink_); } + uint32_t numTlsHistograms() { + uint32_t num_tls_histograms; + absl::Mutex mutex; + bool done = false; + ThreadLocalStoreTestingPeer::numTlsHistograms( + *store_, [&mutex, &done, &num_tls_histograms](uint32_t num) { + absl::MutexLock lock(&mutex); + num_tls_histograms = num; + done = true; + }); + absl::MutexLock lock(&mutex); + mutex.Await(absl::Condition(&done)); + return num_tls_histograms; + } + SymbolTablePtr symbol_table_; NiceMock main_thread_dispatcher_; NiceMock tls_; @@ -381,6 +421,52 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { store_->shutdownThreading(); scope1->deliverHistogramToSinks(h1, 100); scope1->deliverHistogramToSinks(h2, 200); + scope1.reset(); + tls_.shutdownThread(); +} + +TEST_F(StatsThreadLocalStoreTest, HistogramScopeOverlap) { + InSequence s; + store_->initializeThreading(main_thread_dispatcher_, tls_); + + // Creating two scopes with the same name gets you two distinct scope objects. + ScopePtr scope1 = store_->createScope("scope."); + ScopePtr scope2 = store_->createScope("scope."); + EXPECT_NE(scope1, scope2); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + // However, stats created in the two same-named scopes will be the same objects. + Counter& counter = scope1->counterFromString("counter"); + EXPECT_EQ(&counter, &scope2->counterFromString("counter")); + Gauge& gauge = scope1->gaugeFromString("gauge", Gauge::ImportMode::Accumulate); + EXPECT_EQ(&gauge, &scope2->gaugeFromString("gauge", Gauge::ImportMode::Accumulate)); + TextReadout& text_readout = scope1->textReadoutFromString("tr"); + EXPECT_EQ(&text_readout, &scope2->textReadoutFromString("tr")); + Histogram& histogram = scope1->histogramFromString("histogram", Histogram::Unit::Unspecified); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + + // The histogram was created in scope1, which can now be destroyed. But the + // histogram is kept alive by scope2. + EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 100)); + histogram.recordValue(100); + EXPECT_EQ(1, store_->histograms().size()); + EXPECT_EQ(1, numTlsHistograms()); + scope1.reset(); + EXPECT_EQ(1, store_->histograms().size()); + EXPECT_EQ(1, numTlsHistograms()); + EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 200)); + histogram.recordValue(200); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + scope2.reset(); + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + store_->shutdownThreading(); + + store_->histogramFromString("histogram_after_shutdown", Histogram::Unit::Unspecified); + tls_.shutdownThread(); } @@ -1102,7 +1188,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsFakeSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498160); // Apr 8, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 1498128); // July 30, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 1.6 * million_); } @@ -1122,7 +1208,7 @@ TEST_F(StatsThreadLocalStoreTestNoFixture, MemoryWithTlsRealSymbolTable) { TestUtil::MemoryTest memory_test; TestUtil::forEachSampleStat( 100, [this](absl::string_view name) { store_->counterFromString(std::string(name)); }); - EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827664); // July 2, 2020 + EXPECT_MEMORY_EQ(memory_test.consumedBytes(), 827632); // July 20, 2020 EXPECT_MEMORY_LE(memory_test.consumedBytes(), 0.9 * million_); } @@ -1378,9 +1464,8 @@ TEST_F(HistogramTest, ParentHistogramBucketSummary) { parent_histogram->bucketSummary()); } -class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestBase { -public: - static constexpr uint32_t NumThreads = 2; +class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { +protected: static constexpr uint32_t NumScopes = 1000; static constexpr uint32_t NumIters = 35; @@ -1416,18 +1501,17 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB absl::BlockingCounter blocking_counter_; }; - ClusterShutdownCleanupStarvationTest() - : start_time_(time_system_.monotonicTime()), api_(Api::createApiForTest()), - thread_factory_(api_->threadFactory()), pool_(store_->symbolTable()), - my_counter_name_(pool_.add("my_counter")), - my_counter_scoped_name_(pool_.add("scope.my_counter")) { + ThreadLocalRealThreadsTestBase(uint32_t num_threads) + : num_threads_(num_threads), start_time_(time_system_.monotonicTime()), + api_(Api::createApiForTest()), thread_factory_(api_->threadFactory()), + pool_(store_->symbolTable()) { // This is the same order as InstanceImpl::initialize in source/server/server.cc. - thread_dispatchers_.resize(NumThreads); + thread_dispatchers_.resize(num_threads_); { - BlockingBarrier blocking_barrier(NumThreads + 1); + BlockingBarrier blocking_barrier(num_threads_ + 1); main_thread_ = thread_factory_.createThread( [this, &blocking_barrier]() { mainThreadFn(blocking_barrier); }); - for (uint32_t i = 0; i < NumThreads; ++i) { + for (uint32_t i = 0; i < num_threads_; ++i) { threads_.emplace_back(thread_factory_.createThread( [this, i, &blocking_barrier]() { workerThreadFn(i, blocking_barrier); })); } @@ -1447,7 +1531,7 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB } } - ~ClusterShutdownCleanupStarvationTest() override { + ~ThreadLocalRealThreadsTestBase() override { { BlockingBarrier blocking_barrier(1); main_dispatcher_->post(blocking_barrier.run([this]() { @@ -1473,14 +1557,6 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB main_thread_->join(); } - void createScopesIncCountersAndCleanup() { - for (uint32_t i = 0; i < NumScopes; ++i) { - ScopePtr scope = store_->createScope("scope."); - Counter& counter = scope->counterFromStatName(my_counter_name_); - counter.inc(); - } - } - void workerThreadFn(uint32_t thread_index, BlockingBarrier& blocking_barrier) { thread_dispatchers_[thread_index] = api_->allocateDispatcher(absl::StrCat("test_worker_", thread_index)); @@ -1494,19 +1570,21 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB main_dispatcher_->run(Event::Dispatcher::RunType::RunUntilExit); } - void createScopesIncCountersAndCleanupAllThreads() { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post( - blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); })); - } + void mainDispatchBlock() { + // To ensure all stats are freed we have to wait for a few posts() to clear. + // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post(blocking_barrier.run([]() {})); } - std::chrono::seconds elapsedTime() { - return std::chrono::duration_cast(time_system_.monotonicTime() - - start_time_); + void tlsBlock() { + BlockingBarrier blocking_barrier(num_threads_); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post(blocking_barrier.run([]() {})); + } } + const uint32_t num_threads_; Event::TestRealTimeSystem time_system_; MonotonicTime start_time_; Api::ApiPtr api_; @@ -1517,6 +1595,37 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalStoreNoMocksTestB Thread::ThreadPtr main_thread_; std::vector threads_; StatNamePool pool_; +}; + +class ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBase { +protected: + static constexpr uint32_t NumThreads = 2; + + ClusterShutdownCleanupStarvationTest() + : ThreadLocalRealThreadsTestBase(NumThreads), my_counter_name_(pool_.add("my_counter")), + my_counter_scoped_name_(pool_.add("scope.my_counter")) {} + + void createScopesIncCountersAndCleanup() { + for (uint32_t i = 0; i < NumScopes; ++i) { + ScopePtr scope = store_->createScope("scope."); + Counter& counter = scope->counterFromStatName(my_counter_name_); + counter.inc(); + } + } + + void createScopesIncCountersAndCleanupAllThreads() { + BlockingBarrier blocking_barrier(NumThreads); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post( + blocking_barrier.run([this]() { createScopesIncCountersAndCleanup(); })); + } + } + + std::chrono::seconds elapsedTime() { + return std::chrono::duration_cast(time_system_.monotonicTime() - + start_time_); + } + StatName my_counter_name_; StatName my_counter_scoped_name_; }; @@ -1529,24 +1638,14 @@ TEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithBlockade) { for (uint32_t i = 0; i < NumIters && elapsedTime() < std::chrono::seconds(5); ++i) { createScopesIncCountersAndCleanupAllThreads(); - // To ensure all stats are freed we have to wait for a few posts() to clear. // First, wait for the main-dispatcher to initiate the cross-thread TLS cleanup. - auto main_dispatch_block = [this]() { - BlockingBarrier blocking_barrier(1); - main_dispatcher_->post(blocking_barrier.run([]() {})); - }; - main_dispatch_block(); + mainDispatchBlock(); // Next, wait for all the worker threads to complete their TLS cleanup. - { - BlockingBarrier blocking_barrier(NumThreads); - for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { - thread_dispatcher->post(blocking_barrier.run([]() {})); - } - } + tlsBlock(); // Finally, wait for the final central-cache cleanup, which occurs on the main thread. - main_dispatch_block(); + mainDispatchBlock(); // Here we show that the counter cleanups have finished, because the use-count is 1. CounterSharedPtr counter = @@ -1583,5 +1682,124 @@ TEST_F(ClusterShutdownCleanupStarvationTest, TwelveThreadsWithoutBlockade) { store_->sync().signal(ThreadLocalStoreImpl::MainDispatcherCleanupSync); } +class HistogramThreadTest : public ThreadLocalRealThreadsTestBase { +protected: + static constexpr uint32_t NumThreads = 10; + + HistogramThreadTest() : ThreadLocalRealThreadsTestBase(NumThreads) {} + + void mergeHistograms() { + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post([this, &blocking_barrier]() { + store_->mergeHistograms(blocking_barrier.decrementCountFn()); + }); + } + + uint32_t numTlsHistograms() { + uint32_t num; + { + BlockingBarrier blocking_barrier(1); + main_dispatcher_->post([this, &num, &blocking_barrier]() { + ThreadLocalStoreTestingPeer::numTlsHistograms(*store_, + [&num, &blocking_barrier](uint32_t num_hist) { + num = num_hist; + blocking_barrier.decrementCount(); + }); + }); + } + return num; + } + + // Executes a function on every worker thread dispatcher. + void foreachThread(const std::function& fn) { + BlockingBarrier blocking_barrier(NumThreads); + for (Event::DispatcherPtr& thread_dispatcher : thread_dispatchers_) { + thread_dispatcher->post(blocking_barrier.run(fn)); + } + } +}; + +TEST_F(HistogramThreadTest, MakeHistogramsAndRecordValues) { + foreachThread([this]() { + Histogram& histogram = + store_->histogramFromString("my_hist", Stats::Histogram::Unit::Unspecified); + histogram.recordValue(42); + }); + + mergeHistograms(); + + auto histograms = store_->histograms(); + ASSERT_EQ(1, histograms.size()); + ParentHistogramSharedPtr hist = histograms[0]; + EXPECT_THAT(hist->bucketSummary(), + HasSubstr(absl::StrCat(" B25(0,0) B50(", NumThreads, ",", NumThreads, ") "))); +} + +TEST_F(HistogramThreadTest, ScopeOverlap) { + // Creating two scopes with the same name gets you two distinct scope objects. + ScopePtr scope1 = store_->createScope("scope."); + ScopePtr scope2 = store_->createScope("scope."); + EXPECT_NE(scope1, scope2); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + // Histograms created in the two same-named scopes will be the same objects. + foreachThread([&scope1, &scope2]() { + Histogram& histogram = scope1->histogramFromString("histogram", Histogram::Unit::Unspecified); + EXPECT_EQ(&histogram, &scope2->histogramFromString("histogram", Histogram::Unit::Unspecified)); + histogram.recordValue(100); + }); + + mergeHistograms(); + + // Verify that we have the expected number of TLS histograms since we accessed + // the histogram on every thread. + std::vector histograms = store_->histograms(); + ASSERT_EQ(1, histograms.size()); + EXPECT_EQ(NumThreads, numTlsHistograms()); + + // There's no convenient API to pull data out of the histogram, except as + // a string. This expectation captures the bucket transition to indicate + // 0 samples at less than 100, and 10 between 100 and 249 inclusive. + EXPECT_THAT(histograms[0]->bucketSummary(), + HasSubstr(absl::StrCat(" B100(0,0) B250(", NumThreads, ",", NumThreads, ") "))); + + // The histogram was created in scope1, which can now be destroyed. But the + // histogram is kept alive by scope2. + scope1.reset(); + histograms = store_->histograms(); + EXPECT_EQ(1, histograms.size()); + EXPECT_EQ(NumThreads, numTlsHistograms()); + + // We can continue to accumulate samples at the scope2's view of the same + // histogram, and they will combine with the existing data, despite the + // fact that scope1 has been deleted. + foreachThread([&scope2]() { + Histogram& histogram = scope2->histogramFromString("histogram", Histogram::Unit::Unspecified); + histogram.recordValue(300); + }); + + mergeHistograms(); + + // Shows the bucket summary with 10 samples at >=100, and 20 at >=250. + EXPECT_THAT(histograms[0]->bucketSummary(), + HasSubstr(absl::StrCat(" B100(0,0) B250(0,", NumThreads, ") B500(", NumThreads, ",", + 2 * NumThreads, ") "))); + + // Now clear everything, and synchronize the system by calling mergeHistograms(). + // THere should be no more ParentHistograms or TlsHistograms. + scope2.reset(); + histograms.clear(); + mergeHistograms(); + + EXPECT_EQ(0, store_->histograms().size()); + EXPECT_EQ(0, numTlsHistograms()); + + store_->shutdownThreading(); + + store_->histogramFromString("histogram_after_shutdown", Histogram::Unit::Unspecified); +} + } // namespace Stats } // namespace Envoy diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 26143f370000..f66a9b8a14bf 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -287,6 +287,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // and body sizes. // 2020/07/21 12034 44811 46000 Add configurable histogram buckets. // 2020/07/31 12035 45002 46000 Init manager store unready targets in hash map. + // 2020/08/10 12275 44949 46000 Re-organize tls histogram maps to improve continuity. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -304,7 +305,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithFakeSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 45002); + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 44949); } EXPECT_MEMORY_LE(m_per_cluster, 46000); // Round up to allow platform variations. } @@ -363,6 +365,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // and body sizes. // 2020/07/21 12034 36923 38000 Add configurable histogram buckets. // 2020/07/31 12035 37114 38000 Init manager store unready targets in hash map. + // 2020/08/10 12275 37061 38000 Re-organize tls histogram maps to improve continuity. // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -380,7 +383,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSizeWithRealSymbolTable) { // We only run the exact test for ipv6 because ipv4 in some cases may allocate a // different number of bytes. We still run the approximate test. if (ip_version_ != Network::Address::IpVersion::v6) { - EXPECT_MEMORY_EQ(m_per_cluster, 37114); + // https://github.com/envoyproxy/envoy/issues/12209 + // EXPECT_MEMORY_EQ(m_per_cluster, 37061); } EXPECT_MEMORY_LE(m_per_cluster, 38000); // Round up to allow platform variations. } From 7cce05af396bcd52e140b34a77de8e520bd1f457 Mon Sep 17 00:00:00 2001 From: Elisha Ziskind Date: Mon, 10 Aug 2020 22:03:57 -0400 Subject: [PATCH 904/909] Add missing header files (#12581) Signed-off-by: Elisha Ziskind --- source/common/http/async_client_utility.cc | 2 ++ source/common/network/filter_matcher.cc | 4 +++- source/common/signal/BUILD | 3 +++ source/common/signal/fatal_error_handler.cc | 2 ++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/source/common/http/async_client_utility.cc b/source/common/http/async_client_utility.cc index 17124f06fb34..664a0fc0c651 100644 --- a/source/common/http/async_client_utility.cc +++ b/source/common/http/async_client_utility.cc @@ -1,5 +1,7 @@ #include "common/http/async_client_utility.h" +#include "common/common/assert.h" + namespace Envoy { namespace Http { diff --git a/source/common/network/filter_matcher.cc b/source/common/network/filter_matcher.cc index 6668850db44e..7b2831b8a55e 100644 --- a/source/common/network/filter_matcher.cc +++ b/source/common/network/filter_matcher.cc @@ -2,6 +2,8 @@ #include "envoy/network/filter.h" +#include "common/common/assert.h" + #include "absl/strings/str_format.h" namespace Envoy { @@ -50,4 +52,4 @@ bool ListenerFilterAndMatcher::matches(ListenerFilterCallbacks& cb) const { } } // namespace Network -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/source/common/signal/BUILD b/source/common/signal/BUILD index 3008c01cb50e..2a18144c87db 100644 --- a/source/common/signal/BUILD +++ b/source/common/signal/BUILD @@ -12,6 +12,9 @@ envoy_cc_library( name = "fatal_error_handler_lib", srcs = ["fatal_error_handler.cc"], hdrs = ["fatal_error_handler.h"], + deps = [ + "//source/common/common:macros", + ], ) envoy_cc_library( diff --git a/source/common/signal/fatal_error_handler.cc b/source/common/signal/fatal_error_handler.cc index b215d158b158..125093e3c589 100644 --- a/source/common/signal/fatal_error_handler.cc +++ b/source/common/signal/fatal_error_handler.cc @@ -2,6 +2,8 @@ #include +#include "common/common/macros.h" + #include "absl/base/attributes.h" #include "absl/synchronization/mutex.h" From 573170bb77c885278955b2f0d44dbae9875828c6 Mon Sep 17 00:00:00 2001 From: Lisa Lu Date: Mon, 10 Aug 2020 19:07:14 -0700 Subject: [PATCH 905/909] Revert buffer filter visibility back to public (#12579) Commit Message: Revert buffer filter visibility back to public Additional Description: After bringing in #12337, we are unable to build the router check tool as we build it with the buffer filter extension, which is no longer visible to the target we use. This change reverts the visibility change for the buffer filter back to public. Risk Level: Low Testing: N/A Docs Changes: N/A Release Notes: N/A Part of #12444 Signed-off-by: Lisa Lu --- source/extensions/filters/http/buffer/BUILD | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index 9f9364576031..c39db2ac9a85 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -39,10 +39,6 @@ envoy_cc_extension( hdrs = ["config.h"], security_posture = "robust_to_untrusted_downstream", # Legacy test use. TODO(#9953) clean up. - visibility = [ - "//:extension_config", - "//test:__subpackages__", - ], deps = [ "//include/envoy/registry", "//source/extensions/filters/http:well_known_names", From 689bd37806efb3e8bba3feccc1bc09a8e3270d4a Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Mon, 10 Aug 2020 21:10:53 -0700 Subject: [PATCH 906/909] buffer impl: add cast for android compilation (#12583) Commit Message: add cast for android compilation Risk Level: low using the expected type for the constructor as the static_cast type. Testing: local build of envoy mobile for android. CI Signed-off-by: Jose Nino --- source/common/buffer/buffer_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 92ff88742dc1..f5cea7650421 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -40,7 +40,7 @@ class Slice : public SliceData { // SliceData absl::Span getMutableData() override { RELEASE_ASSERT(isMutable(), "Not allowed to call getMutableData if slice is immutable"); - return {base_ + data_, reservable_ - data_}; + return {base_ + data_, static_cast::size_type>(reservable_ - data_)}; } /** From 89b594e09e2f4ebaac43c6b38d778a3e0dc13a3a Mon Sep 17 00:00:00 2001 From: David Raskin <66272127+davidraskin@users.noreply.github.com> Date: Mon, 10 Aug 2020 23:51:19 -0500 Subject: [PATCH 907/909] api: Add log action to RBAC filter api (#11705) The log action will be used to set the dynamic metadata key "envoy.log", which can be used to decide whether to log a request. Signed-off-by: davidraskin --- api/envoy/config/rbac/v3/rbac.proto | 108 +++++++++++------- api/envoy/config/rbac/v4alpha/rbac.proto | 108 +++++++++++------- .../advanced/well_known_dynamic_metadata.rst | 21 ++++ .../http/http_filters/rbac_filter.rst | 3 + .../listeners/network_filters/rbac_filter.rst | 3 + docs/root/version_history/current.rst | 1 + .../envoy/config/rbac/v3/rbac.proto | 108 +++++++++++------- .../envoy/config/rbac/v4alpha/rbac.proto | 108 +++++++++++------- .../extensions/filters/common/rbac/engine.h | 20 ++-- .../filters/common/rbac/engine_impl.cc | 57 ++++++--- .../filters/common/rbac/engine_impl.h | 34 +++++- .../extensions/filters/common/rbac/utility.h | 21 +--- .../filters/http/rbac/rbac_filter.cc | 7 +- .../filters/network/rbac/rbac_filter.cc | 6 +- .../filters/common/rbac/engine_impl_test.cc | 107 ++++++++++++++--- test/extensions/filters/common/rbac/mocks.h | 13 ++- .../http/rbac/rbac_filter_integration_test.cc | 37 ++++++ .../filters/http/rbac/rbac_filter_test.cc | 90 ++++++++++++++- .../filters/network/rbac/filter_test.cc | 71 +++++++++++- 19 files changed, 677 insertions(+), 246 deletions(-) diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index 10520b1ba38f..278e6857603f 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -24,8 +24,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -68,39 +74,55 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control @@ -161,9 +183,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -176,7 +198,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -189,13 +212,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Set"; @@ -210,19 +234,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -237,21 +263,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v3.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v3.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -260,9 +288,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto index 11b69b16e679..cc9d8933abab 100644 --- a/api/envoy/config/rbac/v4alpha/rbac.proto +++ b/api/envoy/config/rbac/v4alpha/rbac.proto @@ -23,8 +23,14 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -67,39 +73,55 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; oneof expression_specifier { @@ -160,9 +182,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -175,7 +197,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -188,13 +211,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Set"; @@ -209,9 +233,9 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v4alpha.StringMatcher principal_name = 2; } @@ -222,10 +246,12 @@ message Principal { oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -236,21 +262,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v4alpha.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v4alpha.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -259,9 +287,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index 0088a85d9b94..4d7f8ed3872c 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -27,3 +27,24 @@ The following Envoy filters can be configured to consume dynamic metadata emitte * :ref:`External Authorization Filter via the metadata context namespaces ` * :ref:`RateLimit Filter limit override ` + +.. _shared_dynamic_metadata: + +Shared Dynamic Metadata +----------------------- +Dynamic metadata that is set by multiple filters is placed in the common key namespace `envoy.common`. Refer to the corresponding rules when setting this metadata. + +.. csv-table:: + :header: Name, Type, Description, Rules + :widths: 1, 1, 3, 3 + + access_log_hint, boolean, Whether access loggers should log the request., "When this metadata is already set: A `true` value should not be overwritten by a `false` value, while a `false` value can be overwritten by a `true` value." + +The following Envoy filters emit shared dynamic metadata. + +* :ref:`Role Based Access Control (RBAC) Filter ` +* :ref:`Role Based Access Control (RBAC) Network Filter ` + +The following filters consume shared dynamic metadata. + +* :ref:`Metadata Access Log Filter` diff --git a/docs/root/configuration/http/http_filters/rbac_filter.rst b/docs/root/configuration/http/http_filters/rbac_filter.rst index d6068bbdcc6c..5db112d924ef 100644 --- a/docs/root/configuration/http/http_filters/rbac_filter.rst +++ b/docs/root/configuration/http/http_filters/rbac_filter.rst @@ -36,6 +36,8 @@ owning HTTP connection manager. denied, Counter, Total requests that were denied access shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules + logged, Counter, Total requests that should be logged + not_logged, Counter, Total requests that should not be logged .. _config_http_filters_rbac_dynamic_metadata: @@ -50,3 +52,4 @@ The RBAC filter emits the following dynamic metadata. shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any). shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`). + access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata`). diff --git a/docs/root/configuration/listeners/network_filters/rbac_filter.rst b/docs/root/configuration/listeners/network_filters/rbac_filter.rst index d07417492045..68ae9f2172d4 100644 --- a/docs/root/configuration/listeners/network_filters/rbac_filter.rst +++ b/docs/root/configuration/listeners/network_filters/rbac_filter.rst @@ -26,6 +26,8 @@ The RBAC network filter outputs statistics in the *.rbac.* namespac denied, Counter, Total requests that were denied access shadow_allowed, Counter, Total requests that would be allowed access by the filter's shadow rules shadow_denied, Counter, Total requests that would be denied access by the filter's shadow rules + logged, Counter, Total requests that should be logged + not_logged, Counter, Total requests that should not be logged .. _config_network_filters_rbac_dynamic_metadata: @@ -40,3 +42,4 @@ The RBAC filter emits the following dynamic metadata. shadow_effective_policy_id, string, The effective shadow policy ID matching the action (if any). shadow_engine_result, string, The engine result for the shadow rules (i.e. either `allowed` or `denied`). + access_log_hint, boolean, Whether the request should be logged. This metadata is shared and set under the key namespace 'envoy.common' (See :ref:`Shared Dynamic Metadata`). diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index f5b6217a1757..4a6ac04a2576 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -60,6 +60,7 @@ New Features * lua: added Lua APIs to access :ref:`SSL connection info ` object. * postgres network filter: :ref:`metadata ` is produced based on SQL query. * ratelimit: added :ref:`enable_x_ratelimit_headers ` option to enable `X-RateLimit-*` headers as defined in `draft RFC `_. +* rbac filter: added a log action to the :ref:`RBAC filter ` which sets dynamic metadata to inform access loggers whether to log. * router: added new :ref:`envoy-ratelimited` retry policy, which allows retrying envoy's own rate limited responses. diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto index 10520b1ba38f..278e6857603f 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto @@ -24,8 +24,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -68,39 +74,55 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control @@ -161,9 +183,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -176,7 +198,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -189,13 +212,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Set"; @@ -210,19 +234,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -237,21 +263,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v3.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v3.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -260,9 +288,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v3.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto index 3d8dae2402ea..7139dfaa1485 100644 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto @@ -23,8 +23,14 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). +// service. RBAC policies are additive. The policies are examined in order. Requests are allowed +// or denied based on the `action` and whether a matching policy is found. For instance, if the +// action is ALLOW and a matching policy is found the request should be allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // @@ -67,39 +73,55 @@ message RBAC { // Should we do safe-list or block-list style access control? enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style + // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; - // The policies deny access to principals. The rest is allowed. This is block-list style + // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; + + // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // All requests are allowed. + LOG = 2; } - // The action to take if a policy matches. The request is allowed if and only if: + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // * ALLOW: Allows the request if and only if there is a policy that matches + // the request. + // * DENY: Allows the request if and only if there are no policies that + // match the request. + // * LOG: Allows all requests. If at least one policy matches, the dynamic + // metadata key `access_log_hint` is set to the value `true` under the shared + // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // Other actions do not modify this key. // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the `any` field set to + // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; oneof expression_specifier { @@ -160,9 +182,9 @@ message Permission { // Metadata that describes additional information about the action. type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. + // Negates matching the provided permission. For instance, if the value of + // `not_rule` would match, this permission would not match. Conversely, if + // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is @@ -175,7 +197,8 @@ message Permission { // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, + // the :ref:`server name + // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // @@ -188,13 +211,14 @@ message Permission { } } -// Principal defines an identity or a group of identities for a downstream subject. +// Principal defines an identity or a group of identities for a downstream +// subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. + // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. + // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Set"; @@ -209,19 +233,21 @@ message Principal { reserved 1; - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. type.matcher.v4alpha.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; - // A set of identifiers that all must match in order to define the downstream. + // A set of identifiers that all must match in order to define the + // downstream. Set and_ids = 1; - // A set of identifiers at least one must match in order to define the downstream. + // A set of identifiers at least one must match in order to define the + // downstream. Set or_ids = 2; // When any is set, it matches any downstream. @@ -236,21 +262,23 @@ message Principal { // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. core.v4alpha.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. core.v4alpha.CidrRange remote_ip = 11; - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the `url_path` field if you + // want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. @@ -259,9 +287,9 @@ message Principal { // Metadata that describes additional information about the principal. type.matcher.v4alpha.MetadataMatcher metadata = 7; - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. + // Negates matching the provided principal. For instance, if the value of + // `not_id` would match, this principal would not match. Conversely, if the + // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } diff --git a/source/extensions/filters/common/rbac/engine.h b/source/extensions/filters/common/rbac/engine.h index a833867dd02a..7174d4edb860 100644 --- a/source/extensions/filters/common/rbac/engine.h +++ b/source/extensions/filters/common/rbac/engine.h @@ -19,32 +19,32 @@ class RoleBasedAccessControlEngine { virtual ~RoleBasedAccessControlEngine() = default; /** - * Returns whether or not the current action is permitted. + * Handles action-specific operations and returns whether or not the request is permitted. * * @param connection the downstream connection used to identify the action/principal. * @param headers the headers of the incoming request used to identify the action/principal. An * empty map should be used if there are no headers available. * @param info the per-request or per-connection stream info with additional information - * about the action/principal. + * about the action/principal. Can be modified by the LOG Action. * @param effective_policy_id it will be filled by the matching policy's ID, * which is used to identity the source of the allow/deny. */ - virtual bool allowed(const Network::Connection& connection, - const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const PURE; + virtual bool handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const PURE; /** - * Returns whether or not the current action is permitted. + * Handles action-specific operations and returns whether or not the request is permitted. * * @param connection the downstream connection used to identify the action/principal. * @param info the per-request or per-connection stream info with additional information - * about the action/principal. + * about the action/principal. Can be modified by the LOG Action. * @param effective_policy_id it will be filled by the matching policy's ID, * which is used to identity the source of the allow/deny. */ - virtual bool allowed(const Network::Connection& connection, const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const PURE; + virtual bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const PURE; }; } // namespace RBAC diff --git a/source/extensions/filters/common/rbac/engine_impl.cc b/source/extensions/filters/common/rbac/engine_impl.cc index d9717ef509c0..dc2a6ba79222 100644 --- a/source/extensions/filters/common/rbac/engine_impl.cc +++ b/source/extensions/filters/common/rbac/engine_impl.cc @@ -11,8 +11,8 @@ namespace Common { namespace RBAC { RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( - const envoy::config::rbac::v3::RBAC& rules) - : allowed_if_matched_(rules.action() == envoy::config::rbac::v3::RBAC::ALLOW) { + const envoy::config::rbac::v3::RBAC& rules, const EnforcementMode mode) + : action_(rules.action()), mode_(mode) { // guard expression builder by presence of a condition in policies for (const auto& policy : rules.policies()) { if (policy.second.has_condition()) { @@ -26,10 +26,43 @@ RoleBasedAccessControlEngineImpl::RoleBasedAccessControlEngineImpl( } } -bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connection, - const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const { +bool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const { + return handleAction(connection, *Http::StaticEmptyHeaders::get().request_headers, info, + effective_policy_id); +} + +bool RoleBasedAccessControlEngineImpl::handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, + StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const { + bool matched = checkPolicyMatch(connection, info, headers, effective_policy_id); + + switch (action_) { + case envoy::config::rbac::v3::RBAC::ALLOW: + return matched; + case envoy::config::rbac::v3::RBAC::DENY: + return !matched; + case envoy::config::rbac::v3::RBAC::LOG: { + // If not shadow enforcement, set shared log metadata + if (mode_ != EnforcementMode::Shadow) { + ProtobufWkt::Struct log_metadata; + auto& log_fields = *log_metadata.mutable_fields(); + log_fields[DynamicMetadataKeysSingleton::get().AccessLogKey].set_bool_value(matched); + info.setDynamicMetadata(DynamicMetadataKeysSingleton::get().CommonNamespace, log_metadata); + } + + return true; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +bool RoleBasedAccessControlEngineImpl::checkPolicyMatch( + const Network::Connection& connection, const StreamInfo::StreamInfo& info, + const Envoy::Http::RequestHeaderMap& headers, std::string* effective_policy_id) const { bool matched = false; for (const auto& policy : policies_) { @@ -42,17 +75,7 @@ bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connec } } - // only allowed if: - // - matched and ALLOW action - // - not matched and DENY action - return matched == allowed_if_matched_; -} - -bool RoleBasedAccessControlEngineImpl::allowed(const Network::Connection& connection, - const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const { - return allowed(connection, *Http::StaticEmptyHeaders::get().request_headers, info, - effective_policy_id); + return matched; } } // namespace RBAC diff --git a/source/extensions/filters/common/rbac/engine_impl.h b/source/extensions/filters/common/rbac/engine_impl.h index 261b45b0aa13..0aacfb41f8e1 100644 --- a/source/extensions/filters/common/rbac/engine_impl.h +++ b/source/extensions/filters/common/rbac/engine_impl.h @@ -11,18 +11,40 @@ namespace Filters { namespace Common { namespace RBAC { +class DynamicMetadataKeys { +public: + const std::string ShadowEffectivePolicyIdField{"shadow_effective_policy_id"}; + const std::string ShadowEngineResultField{"shadow_engine_result"}; + const std::string EngineResultAllowed{"allowed"}; + const std::string EngineResultDenied{"denied"}; + const std::string AccessLogKey{"access_log_hint"}; + const std::string CommonNamespace{"envoy.common"}; +}; + +using DynamicMetadataKeysSingleton = ConstSingleton; + +enum class EnforcementMode { Enforced, Shadow }; + class RoleBasedAccessControlEngineImpl : public RoleBasedAccessControlEngine, NonCopyable { public: - RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules); + RoleBasedAccessControlEngineImpl(const envoy::config::rbac::v3::RBAC& rules, + const EnforcementMode mode = EnforcementMode::Enforced); - bool allowed(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, - const StreamInfo::StreamInfo& info, std::string* effective_policy_id) const override; + bool handleAction(const Network::Connection& connection, + const Envoy::Http::RequestHeaderMap& headers, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const override; - bool allowed(const Network::Connection& connection, const StreamInfo::StreamInfo& info, - std::string* effective_policy_id) const override; + bool handleAction(const Network::Connection& connection, StreamInfo::StreamInfo& info, + std::string* effective_policy_id) const override; private: - const bool allowed_if_matched_; + // Checks whether the request matches any policies + bool checkPolicyMatch(const Network::Connection& connection, const StreamInfo::StreamInfo& info, + const Envoy::Http::RequestHeaderMap& headers, + std::string* effective_policy_id) const; + + const envoy::config::rbac::v3::RBAC::Action action_; + const EnforcementMode mode_; std::map> policies_; diff --git a/source/extensions/filters/common/rbac/utility.h b/source/extensions/filters/common/rbac/utility.h index a48efb813234..04635eb37411 100644 --- a/source/extensions/filters/common/rbac/utility.h +++ b/source/extensions/filters/common/rbac/utility.h @@ -12,16 +12,6 @@ namespace Filters { namespace Common { namespace RBAC { -class DynamicMetadataKeys { -public: - const std::string ShadowEffectivePolicyIdField{"shadow_effective_policy_id"}; - const std::string ShadowEngineResultField{"shadow_engine_result"}; - const std::string EngineResultAllowed{"allowed"}; - const std::string EngineResultDenied{"denied"}; -}; - -using DynamicMetadataKeysSingleton = ConstSingleton; - /** * All stats for the RBAC filter. @see stats_macros.h */ @@ -40,19 +30,18 @@ struct RoleBasedAccessControlFilterStats { RoleBasedAccessControlFilterStats generateStats(const std::string& prefix, Stats::Scope& scope); -enum class EnforcementMode { Enforced, Shadow }; - template std::unique_ptr createEngine(const ConfigType& config) { - return config.has_rules() ? std::make_unique(config.rules()) + return config.has_rules() ? std::make_unique( + config.rules(), EnforcementMode::Enforced) : nullptr; } template std::unique_ptr createShadowEngine(const ConfigType& config) { - return config.has_shadow_rules() - ? std::make_unique(config.shadow_rules()) - : nullptr; + return config.has_shadow_rules() ? std::make_unique( + config.shadow_rules(), EnforcementMode::Shadow) + : nullptr; } } // namespace RBAC diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index 6e1ff3ea3318..d396db7f52bc 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -80,8 +80,8 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo if (shadow_engine != nullptr) { std::string shadow_resp_code = Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().EngineResultAllowed; - if (shadow_engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(), - &effective_policy_id)) { + if (shadow_engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(), + &effective_policy_id)) { ENVOY_LOG(debug, "shadow allowed"); config_->stats().shadow_allowed_.inc(); } else { @@ -109,7 +109,8 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo const auto engine = config_->engine(callbacks_->route(), Filters::Common::RBAC::EnforcementMode::Enforced); if (engine != nullptr) { - if (engine->allowed(*callbacks_->connection(), headers, callbacks_->streamInfo(), nullptr)) { + if (engine->handleAction(*callbacks_->connection(), headers, callbacks_->streamInfo(), + nullptr)) { ENVOY_LOG(debug, "enforced allowed"); config_->stats().allowed_.inc(); return Http::FilterHeadersStatus::Continue; diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 1bc12017b3b6..3b328ed2815f 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -85,8 +85,10 @@ RoleBasedAccessControlFilter::checkEngine(Filters::Common::RBAC::EnforcementMode const auto engine = config_->engine(mode); if (engine != nullptr) { std::string effective_policy_id; - if (engine->allowed(callbacks_->connection(), callbacks_->connection().streamInfo(), - &effective_policy_id)) { + + // Check authorization decision and do Action operations + if (engine->handleAction(callbacks_->connection(), callbacks_->connection().streamInfo(), + &effective_policy_id)) { if (mode == Filters::Common::RBAC::EnforcementMode::Shadow) { ENVOY_LOG(debug, "shadow allowed"); config_->stats().shadow_allowed_.inc(); diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 8f4f3d7e6ad1..b9d8608a9208 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -24,21 +24,56 @@ namespace Common { namespace RBAC { namespace { +enum class LogResult { Yes, No, Undecided }; + void checkEngine( - const RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, + RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log, + StreamInfo::StreamInfo& info, const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), - const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl(), - const StreamInfo::StreamInfo& info = NiceMock()) { - EXPECT_EQ(expected, engine.allowed(connection, headers, info, nullptr)); + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) { + + bool engineRes = engine.handleAction(connection, headers, info, nullptr); + EXPECT_EQ(expected, engineRes); + + if (expected_log != LogResult::Undecided) { + auto filter_meta = info.dynamicMetadata().filter_metadata().at( + RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected_log == LogResult::Yes, + filter_meta.fields() + .at(RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } else { + EXPECT_EQ(info.dynamicMetadata().filter_metadata().end(), + info.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); + } +} + +void checkEngine( + RBAC::RoleBasedAccessControlEngineImpl& engine, bool expected, LogResult expected_log, + const Envoy::Network::Connection& connection = Envoy::Network::MockConnection(), + const Envoy::Http::RequestHeaderMap& headers = Envoy::Http::TestRequestHeaderMapImpl()) { + + NiceMock empty_info; + checkEngine(engine, expected, expected_log, empty_info, connection, headers); +} + +void onMetadata(NiceMock& info) { + ON_CALL(info, setDynamicMetadata("envoy.common", _)) + .WillByDefault(Invoke([&info](const std::string&, const ProtobufWkt::Struct& obj) { + (*info.metadata_.mutable_filter_metadata())["envoy.common"] = obj; + })); } TEST(RoleBasedAccessControlEngineImpl, Disabled) { envoy::config::rbac::v3::RBAC rbac; rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); - checkEngine(RBAC::RoleBasedAccessControlEngineImpl(rbac), false); + RBAC::RoleBasedAccessControlEngineImpl engine_allow(rbac); + checkEngine(engine_allow, false, LogResult::Undecided); rbac.set_action(envoy::config::rbac::v3::RBAC::DENY); - checkEngine(RBAC::RoleBasedAccessControlEngineImpl(rbac), true); + RBAC::RoleBasedAccessControlEngineImpl engine_deny(rbac); + checkEngine(engine_deny, true, LogResult::Undecided); } // Test various invalid policies to validate the fix for @@ -143,11 +178,11 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) { Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn, headers, info); + checkEngine(engine, true, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); } TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { @@ -166,11 +201,11 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, true, conn, headers, info); + checkEngine(engine, true, LogResult::Undecided, info, conn, headers); } TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { @@ -187,7 +222,7 @@ TEST(RoleBasedAccessControlEngineImpl, BasicCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false); + checkEngine(engine, false, LogResult::Undecided); } TEST(RoleBasedAccessControlEngineImpl, MalformedCondition) { @@ -209,6 +244,10 @@ TEST(RoleBasedAccessControlEngineImpl, MalformedCondition) { EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine(rbac), EnvoyException, "failed to create an expression: .*"); + + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + EXPECT_THROW_WITH_REGEX(RBAC::RoleBasedAccessControlEngineImpl engine_log(rbac), EnvoyException, + "failed to create an expression: .*"); } TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { @@ -225,7 +264,7 @@ TEST(RoleBasedAccessControlEngineImpl, MistypedCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false); + checkEngine(engine, false, LogResult::Undecided); } TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { @@ -250,7 +289,7 @@ TEST(RoleBasedAccessControlEngineImpl, ErrorCondition) { rbac.set_action(envoy::config::rbac::v3::RBAC::ALLOW); (*rbac.mutable_policies())["foo"] = policy; RBAC::RoleBasedAccessControlEngineImpl engine(rbac); - checkEngine(engine, false, Envoy::Network::MockConnection()); + checkEngine(engine, false, LogResult::Undecided, Envoy::Network::MockConnection()); } TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { @@ -286,7 +325,7 @@ TEST(RoleBasedAccessControlEngineImpl, HeaderCondition) { std::string value = "bar"; headers.setReference(key, value); - checkEngine(engine, true, Envoy::Network::MockConnection(), headers); + checkEngine(engine, true, LogResult::Undecided, Envoy::Network::MockConnection(), headers); } TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { @@ -331,7 +370,7 @@ TEST(RoleBasedAccessControlEngineImpl, MetadataCondition) { Protobuf::MapPair("other", label)); EXPECT_CALL(Const(info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); - checkEngine(engine, true, Envoy::Network::MockConnection(), headers, info); + checkEngine(engine, true, LogResult::Undecided, info, Envoy::Network::MockConnection(), headers); } TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { @@ -354,8 +393,44 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); + EXPECT_CALL(Const(info), downstreamLocalAddress()).Times(1).WillRepeatedly(ReturnRef(addr)); + checkEngine(engine, false, LogResult::Undecided, info, conn, headers); +} + +// Log tests +TEST(RoleBasedAccessControlEngineImpl, DisabledLog) { + NiceMock info; + onMetadata(info); + + envoy::config::rbac::v3::RBAC rbac; + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + checkEngine(engine, true, RBAC::LogResult::No, info); +} + +TEST(RoleBasedAccessControlEngineImpl, LogIfMatched) { + envoy::config::rbac::v3::Policy policy; + policy.add_permissions()->set_destination_port(123); + policy.add_principals()->set_any(true); + + envoy::config::rbac::v3::RBAC rbac; + rbac.set_action(envoy::config::rbac::v3::RBAC::LOG); + (*rbac.mutable_policies())["foo"] = policy; + RBAC::RoleBasedAccessControlEngineImpl engine(rbac); + + Envoy::Network::MockConnection conn; + Envoy::Http::TestRequestHeaderMapImpl headers; + NiceMock info; + onMetadata(info); + + Envoy::Network::Address::InstanceConstSharedPtr addr = + Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); + EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); + checkEngine(engine, true, RBAC::LogResult::Yes, info, conn, headers); + + addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); EXPECT_CALL(Const(info), downstreamLocalAddress()).WillOnce(ReturnRef(addr)); - checkEngine(engine, false, conn, headers, info); + checkEngine(engine, true, RBAC::LogResult::No, info, conn, headers); } } // namespace diff --git a/test/extensions/filters/common/rbac/mocks.h b/test/extensions/filters/common/rbac/mocks.h index fda95244c893..a99e97aa9ea6 100644 --- a/test/extensions/filters/common/rbac/mocks.h +++ b/test/extensions/filters/common/rbac/mocks.h @@ -14,16 +14,17 @@ namespace RBAC { class MockEngine : public RoleBasedAccessControlEngineImpl { public: - MockEngine(const envoy::config::rbac::v3::RBAC& rules) - : RoleBasedAccessControlEngineImpl(rules){}; + MockEngine(const envoy::config::rbac::v3::RBAC& rules, + const EnforcementMode mode = EnforcementMode::Enforced) + : RoleBasedAccessControlEngineImpl(rules, mode){}; - MOCK_METHOD(bool, allowed, + MOCK_METHOD(bool, handleAction, (const Envoy::Network::Connection&, const Envoy::Http::RequestHeaderMap&, - const StreamInfo::StreamInfo&, std::string* effective_policy_id), + StreamInfo::StreamInfo&, std::string* effective_policy_id), (const)); - MOCK_METHOD(bool, allowed, - (const Envoy::Network::Connection&, const StreamInfo::StreamInfo&, + MOCK_METHOD(bool, handleAction, + (const Envoy::Network::Connection&, StreamInfo::StreamInfo&, std::string* effective_policy_id), (const)); }; diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index 28e4420db014..b7fcd3ebcbb7 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -64,6 +64,20 @@ name: rbac - any: true )EOF"; +const std::string RBAC_CONFIG_WITH_LOG_ACTION = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: LOG + policies: + foo: + permissions: + - header: { name: ":method", exact_match: "GET" } + principals: + - any: true +)EOF"; + using RBACIntegrationTest = HttpProtocolIntegrationTest; INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, @@ -277,5 +291,28 @@ TEST_P(RBACIntegrationTest, PathIgnoreCase) { } } +TEST_P(RBACIntegrationTest, LogConnectionAllow) { + config_helper_.addFilter(RBAC_CONFIG_WITH_LOG_ACTION); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index d445860f8394..519a49126bbb 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -24,9 +24,12 @@ namespace HttpFilters { namespace RBACFilter { namespace { +enum class LogResult { Yes, No, Undecided }; + class RoleBasedAccessControlFilterTest : public testing::Test { public: - RoleBasedAccessControlFilterConfigSharedPtr setupConfig() { + RoleBasedAccessControlFilterConfigSharedPtr + setupConfig(envoy::config::rbac::v3::RBAC::Action action) { envoy::extensions::filters::http::rbac::v3::RBAC config; envoy::config::rbac::v3::Policy policy; @@ -36,7 +39,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { policy_rules->add_rules()->set_destination_port(123); policy_rules->add_rules()->mutable_url_path()->mutable_path()->set_suffix("suffix"); policy.add_principals()->set_any(true); - config.mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_rules()->set_action(action); (*config.mutable_rules()->mutable_policies())["foo"] = policy; envoy::config::rbac::v3::Policy shadow_policy; @@ -44,13 +47,14 @@ class RoleBasedAccessControlFilterTest : public testing::Test { shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact("xyz.cncf.io"); shadow_policy_rules->add_rules()->set_destination_port(456); shadow_policy.add_principals()->set_any(true); - config.mutable_shadow_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_shadow_rules()->set_action(action); (*config.mutable_shadow_rules()->mutable_policies())["bar"] = shadow_policy; return std::make_shared(config, "test", store_); } - RoleBasedAccessControlFilterTest() : config_(setupConfig()), filter_(config_) {} + RoleBasedAccessControlFilterTest() + : config_(setupConfig(envoy::config::rbac::v3::RBAC::ALLOW)), filter_(config_) {} void SetUp() override { EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); @@ -68,6 +72,21 @@ class RoleBasedAccessControlFilterTest : public testing::Test { ON_CALL(connection_, requestedServerName()).WillByDefault(Return(requested_server_name_)); } + void checkAccessLogMetadata(LogResult expected) { + if (expected != LogResult::Undecided) { + auto filter_meta = req_info_.dynamicMetadata().filter_metadata().at( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected == LogResult::Yes, + filter_meta.fields() + .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } else { + EXPECT_EQ(req_info_.dynamicMetadata().filter_metadata().end(), + req_info_.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); + } + } + void setMetadata() { ON_CALL(req_info_, setDynamicMetadata(HttpFilterNames::get().Rbac, _)) .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { @@ -75,6 +94,15 @@ class RoleBasedAccessControlFilterTest : public testing::Test { Protobuf::MapPair(HttpFilterNames::get().Rbac, obj)); })); + + ON_CALL(req_info_, + setDynamicMetadata( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + req_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj)); + })); } NiceMock callbacks_; @@ -82,8 +110,8 @@ class RoleBasedAccessControlFilterTest : public testing::Test { NiceMock req_info_; Stats::IsolatedStoreImpl store_; RoleBasedAccessControlFilterConfigSharedPtr config_; - RoleBasedAccessControlFilter filter_; + Network::Address::InstanceConstSharedPtr address_; std::string requested_server_name_; Http::TestRequestHeaderMapImpl headers_; @@ -92,6 +120,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { TEST_F(RoleBasedAccessControlFilterTest, Allowed) { setDestinationPort(123); + setMetadata(); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); Http::MetadataMap metadata_map{{"metadata", "metadata"}}; @@ -102,11 +131,14 @@ TEST_F(RoleBasedAccessControlFilterTest, Allowed) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) { setDestinationPort(999); setRequestedServerName("www.cncf.io"); + setMetadata(); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); EXPECT_EQ(1U, config_->stats().allowed_.value()); @@ -117,10 +149,13 @@ TEST_F(RoleBasedAccessControlFilterTest, RequestedServerName) { Buffer::OwnedImpl data(""); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, Path) { setDestinationPort(999); + setMetadata(); auto headers = Http::TestRequestHeaderMapImpl{ {":method", "GET"}, @@ -129,6 +164,7 @@ TEST_F(RoleBasedAccessControlFilterTest, Path) { {":authority", "host"}, }; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, Denied) { @@ -151,23 +187,65 @@ TEST_F(RoleBasedAccessControlFilterTest, Denied) { EXPECT_EQ("allowed", filter_meta.fields().at("shadow_engine_result").string_value()); EXPECT_EQ("bar", filter_meta.fields().at("shadow_effective_policy_id").string_value()); EXPECT_EQ("rbac_access_denied", callbacks_.details_); + checkAccessLogMetadata(LogResult::Undecided); } TEST_F(RoleBasedAccessControlFilterTest, RouteLocalOverride) { setDestinationPort(456); + setMetadata(); envoy::extensions::filters::http::rbac::v3::RBACPerRoute route_config; route_config.mutable_rbac()->mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::DENY); NiceMock engine{route_config.rbac().rules()}; NiceMock per_route_config_{route_config}; - EXPECT_CALL(engine, allowed(_, _, _, _)).WillRepeatedly(Return(true)); + EXPECT_CALL(engine, handleAction(_, _, _, _)).WillRepeatedly(Return(true)); EXPECT_CALL(per_route_config_, engine()).WillRepeatedly(ReturnRef(engine)); EXPECT_CALL(callbacks_.route_->route_entry_, perFilterConfig(HttpFilterNames::get().Rbac)) .WillRepeatedly(Return(&per_route_config_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, true)); + checkAccessLogMetadata(LogResult::Undecided); +} + +// Log Tests +TEST_F(RoleBasedAccessControlFilterTest, ShouldLog) { + config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG); + filter_ = RoleBasedAccessControlFilter(config_); + filter_.setDecoderFilterCallbacks(callbacks_); + + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + Buffer::OwnedImpl data(""); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::Yes); +} + +TEST_F(RoleBasedAccessControlFilterTest, ShouldNotLog) { + config_ = setupConfig(envoy::config::rbac::v3::RBAC::LOG); + filter_ = RoleBasedAccessControlFilter(config_); + filter_.setDecoderFilterCallbacks(callbacks_); + + setDestinationPort(456); + setMetadata(); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + Buffer::OwnedImpl data(""); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_.decodeData(data, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_.decodeTrailers(trailers_)); + + checkAccessLogMetadata(LogResult::No); } } // namespace diff --git a/test/extensions/filters/network/rbac/filter_test.cc b/test/extensions/filters/network/rbac/filter_test.cc index 2e8fd2642da3..fe042854baa7 100644 --- a/test/extensions/filters/network/rbac/filter_test.cc +++ b/test/extensions/filters/network/rbac/filter_test.cc @@ -22,8 +22,10 @@ namespace RBACFilter { class RoleBasedAccessControlNetworkFilterTest : public testing::Test { public: - RoleBasedAccessControlFilterConfigSharedPtr setupConfig(bool with_policy = true, - bool continuous = false) { + RoleBasedAccessControlFilterConfigSharedPtr + setupConfig(bool with_policy = true, bool continuous = false, + envoy::config::rbac::v3::RBAC::Action action = envoy::config::rbac::v3::RBAC::ALLOW) { + envoy::extensions::filters::network::rbac::v3::RBAC config; config.set_stat_prefix("tcp."); @@ -34,7 +36,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { ".*cncf.io"); policy_rules->add_rules()->set_destination_port(123); policy.add_principals()->set_any(true); - config.mutable_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_rules()->set_action(action); (*config.mutable_rules()->mutable_policies())["foo"] = policy; envoy::config::rbac::v3::Policy shadow_policy; @@ -42,7 +44,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { shadow_policy_rules->add_rules()->mutable_requested_server_name()->set_exact("xyz.cncf.io"); shadow_policy_rules->add_rules()->set_destination_port(456); shadow_policy.add_principals()->set_any(true); - config.mutable_shadow_rules()->set_action(envoy::config::rbac::v3::RBAC::ALLOW); + config.mutable_shadow_rules()->set_action(action); (*config.mutable_shadow_rules()->mutable_policies())["bar"] = shadow_policy; } @@ -72,6 +74,15 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { .WillByDefault(Return(requested_server_name_)); } + void checkAccessLogMetadata(bool expected) { + auto filter_meta = stream_info_.dynamicMetadata().filter_metadata().at( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace); + EXPECT_EQ(expected, + filter_meta.fields() + .at(Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().AccessLogKey) + .bool_value()); + } + void setMetadata() { ON_CALL(stream_info_, setDynamicMetadata(NetworkFilterNames::get().Rbac, _)) .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { @@ -79,6 +90,15 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { Protobuf::MapPair(NetworkFilterNames::get().Rbac, obj)); })); + + ON_CALL(stream_info_, + setDynamicMetadata( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, _)) + .WillByDefault(Invoke([this](const std::string&, const ProtobufWkt::Struct& obj) { + stream_info_.metadata_.mutable_filter_metadata()->insert( + Protobuf::MapPair( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace, obj)); + })); } NiceMock callbacks_; @@ -173,6 +193,49 @@ TEST_F(RoleBasedAccessControlNetworkFilterTest, Denied) { EXPECT_EQ("allowed", filter_meta.fields().at("shadow_engine_result").string_value()); } +// Log Tests +TEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldLog) { + config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG); + filter_ = std::make_unique(config_); + filter_->initializeReadFilterCallbacks(callbacks_); + + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + checkAccessLogMetadata(true); +} + +TEST_F(RoleBasedAccessControlNetworkFilterTest, ShouldNotLog) { + config_ = setupConfig(true, false, envoy::config::rbac::v3::RBAC::LOG); + filter_ = std::make_unique(config_); + filter_->initializeReadFilterCallbacks(callbacks_); + + setDestinationPort(456); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + EXPECT_EQ(1U, config_->stats().allowed_.value()); + EXPECT_EQ(0U, config_->stats().shadow_denied_.value()); + + checkAccessLogMetadata(false); +} + +TEST_F(RoleBasedAccessControlNetworkFilterTest, AllowNoChangeLog) { + setDestinationPort(123); + setMetadata(); + + EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data_, false)); + + // Check that Allow action does not set access log metadata + EXPECT_EQ(stream_info_.dynamicMetadata().filter_metadata().end(), + stream_info_.dynamicMetadata().filter_metadata().find( + Filters::Common::RBAC::DynamicMetadataKeysSingleton::get().CommonNamespace)); +} + } // namespace RBACFilter } // namespace NetworkFilters } // namespace Extensions From 9ad7d4ce5923fb053506cb76d47f93049a509a4c Mon Sep 17 00:00:00 2001 From: Matthew Grossman Date: Mon, 10 Aug 2020 21:52:30 -0700 Subject: [PATCH 908/909] tracing: add baggage methods to Tracing::Span (#12260) * WIP add some header info on baggage Signed-off-by: Matthew Grossman * messing w/ stuff Signed-off-by: Matthew Grossman * get a compiling solution Signed-off-by: Matthew Grossman * lint Signed-off-by: Matthew Grossman * format again Signed-off-by: Matthew Grossman * formatting Signed-off-by: Matthew Grossman * change the test slightly Signed-off-by: Matthew Grossman * s/std::string/string_view Signed-off-by: Matthew Grossman * add note about baggage and child/parent spans Signed-off-by: Matthew Grossman * add in test for OpenTracingDriverTest Signed-off-by: Matthew Grossman * add in test for OpenCensus Signed-off-by: Matthew Grossman * add in baggage test for xray Signed-off-by: Matthew Grossman * add in zipkin tests Signed-off-by: Matthew Grossman * add in http tracer impl test Signed-off-by: Matthew Grossman * fix test for nulltracer Signed-off-by: Matthew Grossman * Add additional TODO for zipkin impl and add in comments for tests Signed-off-by: Matthew Grossman * add in draft docs Signed-off-by: Matthew Grossman * fix doc link Signed-off-by: Matthew Grossman * Inline empty methods per mklein's nit Signed-off-by: Jake Kaufman Co-authored-by: Jake Kaufman --- .../arch_overview/observability/tracing.rst | 14 ++++++++++++++ include/envoy/tracing/http_tracer.h | 16 ++++++++++++++++ source/common/tracing/http_tracer_impl.h | 2 ++ .../tracers/common/ot/opentracing_driver_impl.cc | 8 ++++++++ .../tracers/common/ot/opentracing_driver_impl.h | 2 ++ .../tracers/opencensus/opencensus_tracer_impl.cc | 4 ++++ source/extensions/tracers/xray/tracer.h | 4 ++++ .../tracers/zipkin/zipkin_tracer_impl.cc | 4 ++++ .../tracers/zipkin/zipkin_tracer_impl.h | 4 ++++ test/common/tracing/http_tracer_impl_test.cc | 2 ++ .../common/ot/opentracing_driver_impl_test.cc | 14 ++++++++++++++ .../lightstep/lightstep_tracer_impl_test.cc | 11 +++++++++++ .../extensions/tracers/opencensus/tracer_test.cc | 4 ++++ test/extensions/tracers/xray/tracer_test.cc | 10 ++++++++++ .../tracers/zipkin/zipkin_tracer_impl_test.cc | 8 ++++++++ test/mocks/tracing/mocks.h | 2 ++ 16 files changed, 109 insertions(+) diff --git a/docs/root/intro/arch_overview/observability/tracing.rst b/docs/root/intro/arch_overview/observability/tracing.rst index 26f057468d76..958b003a5d9a 100644 --- a/docs/root/intro/arch_overview/observability/tracing.rst +++ b/docs/root/intro/arch_overview/observability/tracing.rst @@ -113,3 +113,17 @@ request ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or the trace ID configuration (Zipkin and Datadog). See :ref:`v3 API reference ` for more information on how to setup tracing in Envoy. + +Baggage +----------------------------- +Baggage provides a mechanism for data to be available throughout the entirety of a trace. +While metadata such as tags are usually communicated to collectors out-of-band, baggage data is injected into the actual +request context and available to applications during the duration of the request. This enables metadata to transparently +travel from the beginning of the request throughout your entire mesh without relying on application-specific modifications for +propagation. See `OpenTracing's documentation `_ for more information about baggage. + +Tracing providers have varying level of support for getting and setting baggage: + +* Lightstep (and any OpenTracing-compliant tracer) can read/write baggage +* Zipkin support is not yet implemented +* X-Ray and OpenCensus don't support baggage diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h index 63da639e84ee..22b024ac97e0 100644 --- a/include/envoy/tracing/http_tracer.h +++ b/include/envoy/tracing/http_tracer.h @@ -158,6 +158,22 @@ class Span { * @param sampled whether the span and any subsequent child spans should be sampled */ virtual void setSampled(bool sampled) PURE; + + /** + * Retrieve a key's value from the span's baggage. + * This baggage data could've been set by this span or any parent spans. + * @param key baggage key + * @return the baggage's value for the given input key + */ + virtual std::string getBaggage(absl::string_view key) PURE; + + /** + * Set a key/value pair in the current span's baggage. + * All subsequent child spans will have access to this baggage. + * @param key baggage key + * @param key baggage value + */ + virtual void setBaggage(absl::string_view key, absl::string_view value) PURE; }; /** diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index 14cb47cbeb6a..760b4ed2bf3e 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -169,6 +169,8 @@ class NullSpan : public Span { void log(SystemTime, const std::string&) override {} void finishSpan() override {} void injectContext(Http::RequestHeaderMap&) override {} + void setBaggage(absl::string_view, absl::string_view) override {} + std::string getBaggage(absl::string_view) override { return std::string(); } SpanPtr spawnChild(const Config&, const std::string&, SystemTime) override { return SpanPtr{new NullSpan()}; } diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index 080742af2ddd..cad01b83bb83 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -102,6 +102,14 @@ void OpenTracingSpan::log(SystemTime timestamp, const std::string& event) { finish_options_.log_records.emplace_back(std::move(record)); } +void OpenTracingSpan::setBaggage(absl::string_view key, absl::string_view value) { + span_->SetBaggageItem({key.data(), key.length()}, {value.data(), value.length()}); +} + +std::string OpenTracingSpan::getBaggage(absl::string_view key) { + return span_->BaggageItem({key.data(), key.length()}); +} + void OpenTracingSpan::injectContext(Http::RequestHeaderMap& request_headers) { if (driver_.propagationMode() == OpenTracingDriver::PropagationMode::SingleHeader) { // Inject the span context using Envoy's single-header format. diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.h b/source/extensions/tracers/common/ot/opentracing_driver_impl.h index d99ad7444dc5..2bfbddfe1886 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.h +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.h @@ -40,6 +40,8 @@ class OpenTracingSpan : public Tracing::Span, Logger::Loggable { */ void log(Envoy::SystemTime, const std::string&) override {} + // X-Ray doesn't support baggage, so noop these OpenTracing functions. + void setBaggage(absl::string_view, absl::string_view) override {} + std::string getBaggage(absl::string_view) override { return std::string(); } + /** * Creates a child span. * In X-Ray terms this creates a sub-segment and sets its parent ID to the current span's ID. diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index a96d91aab565..8cf176d1fabc 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -35,6 +35,10 @@ void ZipkinSpan::log(SystemTime timestamp, const std::string& event) { span_.log(timestamp, event); } +// TODO(#11622): Implement baggage storage for zipkin spans +void ZipkinSpan::setBaggage(absl::string_view, absl::string_view) {} +std::string ZipkinSpan::getBaggage(absl::string_view) { return std::string(); } + void ZipkinSpan::injectContext(Http::RequestHeaderMap& request_headers) { // Set the trace-id and span-id headers properly, based on the newly-created span structure. request_headers.setReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 1624ddf59cc5..9cb39ea27e92 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -76,6 +76,10 @@ class ZipkinSpan : public Tracing::Span { void setSampled(bool sampled) override; + // TODO(#11622): Implement baggage storage for zipkin spans + void setBaggage(absl::string_view, absl::string_view) override; + std::string getBaggage(absl::string_view) override; + /** * @return a reference to the Zipkin::Span object. */ diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 6bb18da079f8..ef1686bcc688 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -744,6 +744,8 @@ TEST(HttpNullTracerTest, BasicFunctionality) { span_ptr->setOperation("foo"); span_ptr->setTag("foo", "bar"); + span_ptr->setBaggage("key", "value"); + ASSERT_EQ("", span_ptr->getBaggage("baggage_key")); span_ptr->injectContext(request_headers); EXPECT_NE(nullptr, span_ptr->spawnChild(config, "foo", SystemTime())); diff --git a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc index 102bc6a2086c..011030dff5a4 100644 --- a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc @@ -99,6 +99,20 @@ TEST_F(OpenTracingDriverTest, FlushSpanWithLog) { EXPECT_EQ(expected_logs, driver_->recorder().top().logs); } +TEST_F(OpenTracingDriverTest, FlushSpanWithBaggage) { + setupValidDriver(); + + Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + first_span->setBaggage("abc", "123"); + first_span->finishSpan(); + + const std::map expected_baggage = {{"abc", "123"}}; + + EXPECT_EQ(1, driver_->recorder().spans().size()); + EXPECT_EQ(expected_baggage, driver_->recorder().top().span_context.baggage); +} + TEST_F(OpenTracingDriverTest, TagSamplingFalseByDecision) { setupValidDriver(OpenTracingDriver::PropagationMode::TracerNative, {}); diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index caba59fce679..ef657d6d54f5 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -718,6 +718,17 @@ TEST_F(LightStepDriverTest, SpawnChild) { EXPECT_FALSE(base2_context.empty()); } +TEST_F(LightStepDriverTest, GetAndSetBaggage) { + setupValidDriver(); + Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + + std::string key = "key1"; + std::string value = "value1"; + span->setBaggage(key, value); + EXPECT_EQ(span->getBaggage(key), value); +} + } // namespace } // namespace Lightstep } // namespace Tracers diff --git a/test/extensions/tracers/opencensus/tracer_test.cc b/test/extensions/tracers/opencensus/tracer_test.cc index 6bc91183341a..88ed7f2f5983 100644 --- a/test/extensions/tracers/opencensus/tracer_test.cc +++ b/test/extensions/tracers/opencensus/tracer_test.cc @@ -123,6 +123,10 @@ TEST(OpenCensusTracerTest, Span) { child->finishSpan(); span->setSampled(false); // Abandon tracer. span->finishSpan(); + + // Baggage methods are a noop in opencensus and won't affect events. + span->setBaggage("baggage_key", "baggage_value"); + ASSERT_EQ("", span->getBaggage("baggage_key")); } // Retrieve SpanData from the OpenCensus trace exporter. diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index c82e73056a5f..caeb153def47 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -100,6 +100,16 @@ TEST_F(XRayTracerTest, NonSampledSpansNotSerialized) { span->finishSpan(); } +TEST_F(XRayTracerTest, BaggageNotImplemented) { + Tracer tracer{"" /*span name*/, std::move(broker_), server_.timeSource()}; + auto span = tracer.createNonSampledSpan(); + span->setBaggage("baggage_key", "baggage_value"); + span->finishSpan(); + + // Baggage isn't supported so getBaggage should always return empty + ASSERT_EQ("", span->getBaggage("baggage_key")); +} + TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { NiceMock config; constexpr auto expected_span_name = "Service 1"; diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index bd51f1493e5c..0d1488e63bff 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -689,6 +689,14 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { EXPECT_FALSE(zipkin_zipkin_span4.annotations().empty()); EXPECT_EQ(timestamp_count, zipkin_zipkin_span4.annotations().back().timestamp()); EXPECT_EQ("abc", zipkin_zipkin_span4.annotations().back().value()); + + // ==== + // Test baggage noop + // ==== + Tracing::SpanPtr span5 = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + span5->setBaggage("baggage_key", "baggage_value"); + EXPECT_EQ("", span5->getBaggage("baggage_key")); } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) { diff --git a/test/mocks/tracing/mocks.h b/test/mocks/tracing/mocks.h index 8531027c7543..98a7a96ac513 100644 --- a/test/mocks/tracing/mocks.h +++ b/test/mocks/tracing/mocks.h @@ -37,6 +37,8 @@ class MockSpan : public Span { MOCK_METHOD(void, finishSpan, ()); MOCK_METHOD(void, injectContext, (Http::RequestHeaderMap & request_headers)); MOCK_METHOD(void, setSampled, (const bool sampled)); + MOCK_METHOD(void, setBaggage, (absl::string_view key, absl::string_view value)); + MOCK_METHOD(std::string, getBaggage, (absl::string_view key)); SpanPtr spawnChild(const Config& config, const std::string& name, SystemTime start_time) override { From 887637c1277134df9bea3ad9ac958e38ca69f6db Mon Sep 17 00:00:00 2001 From: DongRyeol Cha Date: Tue, 11 Aug 2020 16:16:59 +0900 Subject: [PATCH 909/909] build: Fix that custom docker image build (#12564) * build: Fix that custom docker image build The do_ci.sh builds the envoy and copies binary to under build-* directories. But recently, the arm64 build system was added but it does not consider the previous custom docker image build. It is very useful to build the custom docker image and test it within k8s environment. So, this patch support previous behavior again so that we can build the custom docker image. If some more cpu architectures are added, we can simple extends more cpu architectures. Signed-off-by: DongRyeol Cha --- .gitignore | 2 +- ci/build_setup.sh | 4 +++- ci/do_ci.sh | 25 ++++++++++++++++++------- ci/run_envoy_docker.sh | 2 +- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 7b7c6ff04d58..134967bc2bb7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ /bazel-* BROWSE /build -/build_* *.bzlc .cache .clangd @@ -34,3 +33,4 @@ clang.bazelrc user.bazelrc CMakeLists.txt cmake-build-debug +/linux diff --git a/ci/build_setup.sh b/ci/build_setup.sh index aa21bbadb232..93330224137d 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -9,8 +9,10 @@ export PPROF_PATH=/thirdparty_build/bin/pprof [ -z "${NUM_CPUS}" ] && NUM_CPUS=`grep -c ^processor /proc/cpuinfo` [ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source [ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static +[ -z "${ENVOY_BUILD_ARCH}" ] && export ENVOY_BUILD_ARCH=$(uname -m) echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}" echo "ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}" +echo "ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}" function setup_gcc_toolchain() { if [[ ! -z "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then @@ -83,7 +85,7 @@ export BAZEL_BUILD_OPTIONS=" ${BAZEL_OPTIONS} --verbose_failures --show_task_fin --test_output=errors --repository_cache=${BUILD_DIR}/repository_cache --experimental_repository_cache_hardlinks \ ${BAZEL_BUILD_EXTRA_OPTIONS} ${BAZEL_EXTRA_TEST_OPTIONS}" -[[ "$(uname -m)" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" +[[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HEAPCHECK=" [[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 3218361ce81d..40fa5312b805 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -16,7 +16,17 @@ SRCDIR="${PWD}" . "$(dirname "$0")"/build_setup.sh $build_setup_args cd "${SRCDIR}" +if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then + BUILD_ARCH_DIR="/linux/amd64" +elif [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then + BUILD_ARCH_DIR="/linux/arm64" +else + # Fall back to use the ENVOY_BUILD_ARCH itself. + BUILD_ARCH_DIR="/linux/${ENVOY_BUILD_ARCH}" +fi + echo "building using ${NUM_CPUS} CPUs" +echo "building for ${ENVOY_BUILD_ARCH}" function collect_build_profile() { declare -g build_profile_count=${build_profile_count:-1} @@ -52,18 +62,19 @@ function cp_binary_for_outside_access() { function cp_binary_for_image_build() { # TODO(mattklein123): Replace this with caching and a different job which creates images. + local BASE_TARGET_DIR="${ENVOY_SRCDIR}${BUILD_ARCH_DIR}" echo "Copying binary for image build..." - mkdir -p "${ENVOY_SRCDIR}"/build_"$1" - cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${ENVOY_SRCDIR}"/build_"$1" - mkdir -p "${ENVOY_SRCDIR}"/build_"$1"_stripped - strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${ENVOY_SRCDIR}"/build_"$1"_stripped/envoy + mkdir -p "${BASE_TARGET_DIR}"/build_"$1" + cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${BASE_TARGET_DIR}"/build_"$1" + mkdir -p "${BASE_TARGET_DIR}"/build_"$1"_stripped + strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${BASE_TARGET_DIR}"/build_"$1"_stripped/envoy # Copy for azp which doesn't preserve permissions, creating a tar archive - tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_SRCDIR}" build_"$1" build_"$1"_stripped + tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${BASE_TARGET_DIR}" build_"$1" build_"$1"_stripped # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) [[ -z "${BUILD_REASON}" ]] || \ - rm -rf "${ENVOY_SRCDIR}"/build_"$1" "${ENVOY_SRCDIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy \ + rm -rf "${BASE_TARGET_DIR}"/build_"$1" "${BASE_TARGET_DIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy \ bazel-bin/"${ENVOY_BIN}" } @@ -117,7 +128,7 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then # toolchain is kept consistent. This ifdef is checked in # test/common/stats/stat_test_utility.cc when computing # Stats::TestUtil::MemoryTest::mode(). - [[ "$(uname -m)" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" + [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=ENVOY_MEMORY_TEST_EXACT=true" setup_clang_toolchain echo "Testing ${TEST_TARGETS}" diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 886a2347d378..ca29667c14c9 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -28,7 +28,7 @@ docker run --rm ${ENVOY_DOCKER_OPTIONS} -e HTTP_PROXY=${http_proxy} -e HTTPS_PRO -e BAZEL_BUILD_EXTRA_OPTIONS -e BAZEL_EXTRA_TEST_OPTIONS -e BAZEL_REMOTE_CACHE -e ENVOY_STDLIB -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE -e GCP_SERVICE_ACCOUNT_KEY -e NUM_CPUS -e ENVOY_RBE -e FUZZIT_API_KEY -e ENVOY_BUILD_IMAGE \ -e ENVOY_SRCDIR -e ENVOY_BUILD_TARGET -e SYSTEM_PULLREQUEST_TARGETBRANCH -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ - -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL \ + -e GCS_ARTIFACT_BUCKET -e BUILD_SOURCEBRANCHNAME -e BAZELISK_BASE_URL -e ENVOY_BUILD_ARCH \ -v "$PWD":/source --cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN "${ENVOY_BUILD_IMAGE}" \ /bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home \ --home-dir /build envoybuild && usermod -a -G pcap envoybuild && sudo -EHs -u envoybuild bash -c \"cd /source && $*\""